summaryrefslogtreecommitdiffstats
path: root/include/sound/wm0010.h
blob: 3261e90815af3cf7663946a4b16daa7c7a49c0e3 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
/*
 * wm0010.h -- Platform data for WM0010 DSP Driver
 *
 * Copyright 2012 Wolfson Microelectronics PLC.
 *
 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 */

#ifndef WM0010_PDATA_H
#define WM0010_PDATA_H

struct wm0010_pdata {
	int gpio_reset;

	/* Set if there is an inverter between the GPIO controlling
	 * the reset signal and the device.
	 */
	int reset_active_high;
	int irq_flags;
};

#endif
88612c0b7120b&id2=41bd0314fa3a458bee7ad768d079e681316332e7'>Documentation/ABI/testing/procfs-smaps_rollup31
-rw-r--r--Documentation/ABI/testing/sysfs-block-zram8
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio9
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-lptimer-stm3257
-rw-r--r--Documentation/ABI/testing/sysfs-bus-thunderbolt2
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb-lvstest13
-rw-r--r--Documentation/ABI/testing/sysfs-driver-altera-cvp8
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-opal-powercap31
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-opal-psr18
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-swap26
-rw-r--r--Documentation/ABI/testing/sysfs-power12
-rw-r--r--Documentation/Makefile12
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.html130
-rw-r--r--Documentation/RCU/checklist.txt121
-rw-r--r--Documentation/RCU/rcu.txt9
-rw-r--r--Documentation/RCU/rcu_dereference.txt61
-rw-r--r--Documentation/RCU/rcubarrier.txt5
-rw-r--r--Documentation/RCU/torture.txt20
-rw-r--r--Documentation/RCU/whatisRCU.txt5
-rw-r--r--Documentation/admin-guide/devices.txt5
-rw-r--r--Documentation/admin-guide/kernel-parameters.rst1
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt32
-rw-r--r--Documentation/admin-guide/pm/cpufreq.rst16
-rw-r--r--Documentation/admin-guide/pm/index.rst12
-rw-r--r--Documentation/admin-guide/pm/intel_pstate.rst61
-rw-r--r--Documentation/admin-guide/pm/sleep-states.rst245
-rw-r--r--Documentation/admin-guide/pm/strategies.rst52
-rw-r--r--Documentation/admin-guide/pm/system-wide.rst8
-rw-r--r--Documentation/admin-guide/pm/working-state.rst9
-rw-r--r--Documentation/arm/firmware.txt2
-rw-r--r--Documentation/arm64/cpu-feature-registers.txt2
-rw-r--r--Documentation/atomic_bitops.txt66
-rw-r--r--Documentation/atomic_t.txt242
-rw-r--r--Documentation/blockdev/zram.txt11
-rw-r--r--Documentation/cgroup-v2.txt221
-rw-r--r--Documentation/conf.py6
-rw-r--r--Documentation/core-api/genalloc.rst144
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/core-api/kernel-api.rst49
-rw-r--r--Documentation/core-api/workqueue.rst10
-rw-r--r--Documentation/dev-tools/gdb-kernel-debugging.rst6
-rw-r--r--Documentation/dev-tools/kgdb.rst11
-rw-r--r--Documentation/device-mapper/dm-raid.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.txt2
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-mtk.txt51
-rw-r--r--Documentation/devicetree/bindings/ata/sata_rcar.txt14
-rw-r--r--Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt83
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt247
-rw-r--r--Documentation/devicetree/bindings/crypto/artpec6-crypto.txt16
-rw-r--r--Documentation/devicetree/bindings/crypto/atmel-crypto.txt13
-rw-r--r--Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt2
-rw-r--r--Documentation/devicetree/bindings/crypto/st,stm32-hash.txt30
-rw-r--r--Documentation/devicetree/bindings/display/bridge/dw_mipi_dsi.txt32
-rw-r--r--Documentation/devicetree/bindings/display/exynos/exynos5433-decon.txt12
-rw-r--r--Documentation/devicetree/bindings/display/repaper.txt52
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt7
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt4
-rw-r--r--Documentation/devicetree/bindings/display/sitronix,st7586.txt22
-rw-r--r--Documentation/devicetree/bindings/display/st,stm32-ltdc.txt105
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt36
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.txt24
-rw-r--r--Documentation/devicetree/bindings/fpga/altera-passive-serial.txt29
-rw-r--r--Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt36
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-74x164.txt3
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-aspeed.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-davinci.txt91
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-exar.txt5
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-vf610.txt4
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt16
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt2
-rw-r--r--Documentation/devicetree/bindings/hwmon/aspeed-pwm-tacho.txt9
-rw-r--r--Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt21
-rw-r--r--Documentation/devicetree/bindings/hwmon/ltq-cputemp.txt10
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt4
-rw-r--r--Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt6
-rw-r--r--Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt5
-rw-r--r--Documentation/devicetree/bindings/iio/counter/stm32-lptimer-cnt.txt27
-rw-r--r--Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt4
-rw-r--r--Documentation/devicetree/bindings/iio/humidity/hdc100x.txt17
-rw-r--r--Documentation/devicetree/bindings/iio/humidity/hts221.txt11
-rw-r--r--Documentation/devicetree/bindings/iio/humidity/htu21.txt13
-rw-r--r--Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt8
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/ms5637.txt17
-rw-r--r--Documentation/devicetree/bindings/iio/st-sensors.txt3
-rw-r--r--Documentation/devicetree/bindings/iio/temperature/tsys01.txt19
-rw-r--r--Documentation/devicetree/bindings/iio/timer/stm32-lptimer-trigger.txt23
-rw-r--r--Documentation/devicetree/bindings/iio/timer/stm32-timer-trigger.txt6
-rw-r--r--Documentation/devicetree/bindings/input/atmel,maxtouch.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt8
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.txt32
-rw-r--r--Documentation/devicetree/bindings/leds/ams,as3645a.txt71
-rw-r--r--Documentation/devicetree/bindings/leds/irled/gpio-ir-tx.txt14
-rw-r--r--Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.txt13
-rw-r--r--Documentation/devicetree/bindings/media/i2c/adv748x.txt95
-rw-r--r--Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt9
-rw-r--r--Documentation/devicetree/bindings/media/meson-ao-cec.txt28
-rw-r--r--Documentation/devicetree/bindings/media/mtk-cir.txt8
-rw-r--r--Documentation/devicetree/bindings/media/qcom,camss.txt197
-rw-r--r--Documentation/devicetree/bindings/media/renesas,drif.txt1
-rw-r--r--Documentation/devicetree/bindings/media/video-interfaces.txt8
-rw-r--r--Documentation/devicetree/bindings/media/zx-irdec.txt14
-rw-r--r--Documentation/devicetree/bindings/mfd/atmel-smc.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/axp20x.txt50
-rw-r--r--Documentation/devicetree/bindings/mfd/bd9571mwv.txt49
-rw-r--r--Documentation/devicetree/bindings/mfd/da9052-i2c.txt8
-rw-r--r--Documentation/devicetree/bindings/mfd/retu.txt25
-rw-r--r--Documentation/devicetree/bindings/mfd/rk808.txt22
-rw-r--r--Documentation/devicetree/bindings/mfd/stm32-lptimer.txt48
-rw-r--r--Documentation/devicetree/bindings/mfd/tps6105x.txt17
-rw-r--r--Documentation/devicetree/bindings/mfd/wm831x.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/zii,rave-sp.txt39
-rw-r--r--Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/img-dw-mshc.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,mmcif.txt4
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/sunxi-mmc.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt16
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt8
-rw-r--r--Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt1
-rw-r--r--Documentation/devicetree/bindings/net/anarion-gmac.txt25
-rw-r--r--Documentation/devicetree/bindings/net/brcm,amac.txt1
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bgmac-nsp.txt24
-rw-r--r--Documentation/devicetree/bindings/net/broadcom-bluetooth.txt35
-rw-r--r--Documentation/devicetree/bindings/net/dwmac-sun8i.txt84
-rw-r--r--Documentation/devicetree/bindings/net/marvell-pp2.txt29
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt12
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt5
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt30
-rw-r--r--Documentation/devicetree/bindings/net/rockchip-dwmac.txt1
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_axienet.txt55
-rw-r--r--Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt (renamed from Documentation/devicetree/bindings/phy/phy-mt65xx-usb.txt)17
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt43
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt11
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt11
-rw-r--r--Documentation/devicetree/bindings/phy/ralink-usb-phy.txt23
-rw-r--r--Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt10
-rw-r--r--Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt59
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx7ulp-pinctrl.txt61
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt8
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt63
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt26
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/sprd,pinctrl.txt83
-rw-r--r--Documentation/devicetree/bindings/pinctrl/sprd,sc9860-pinctrl.txt70
-rw-r--r--Documentation/devicetree/bindings/power/rockchip-io-domain.txt2
-rw-r--r--Documentation/devicetree/bindings/powerpc/ibm,vas.txt22
-rw-r--r--Documentation/devicetree/bindings/powerpc/opal/sensor-groups.txt27
-rw-r--r--Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt15
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt24
-rw-r--r--Documentation/devicetree/bindings/regulator/mt6311-regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/mt6323-regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/mt6380-regulator.txt89
-rw-r--r--Documentation/devicetree/bindings/regulator/mt6397-regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/pwm-regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.txt20
-rw-r--r--Documentation/devicetree/bindings/rng/imx-rngc.txt21
-rw-r--r--Documentation/devicetree/bindings/serial/8250.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-imx-uart.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci-serial.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/rs485.txt5
-rw-r--r--Documentation/devicetree/bindings/serial/st,stm32-usart.txt17
-rw-r--r--Documentation/devicetree/bindings/serio/ps2-gpio.txt23
-rw-r--r--Documentation/devicetree/bindings/sound/cs43130.txt67
-rw-r--r--Documentation/devicetree/bindings/sound/dmic.txt16
-rw-r--r--Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt68
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip,pdm.txt7
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip,rk3399-gru-sound.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-i2s.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/rt274.txt33
-rw-r--r--Documentation/devicetree/bindings/sound/rt5663.txt8
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,odroid.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/simple-card.txt3
-rw-r--r--Documentation/devicetree/bindings/sound/simple-scu-card.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/sun4i-i2s.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic32x4.txt13
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic3x.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/wm8524.txt16
-rw-r--r--Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rockchip.txt1
-rw-r--r--Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt28
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,cmt.txt73
-rw-r--r--Documentation/devicetree/bindings/usb/brcm,bdc.txt29
-rw-r--r--Documentation/devicetree/bindings/usb/fcs,fusb302.txt29
-rw-r--r--Documentation/devicetree/bindings/usb/keystone-usb.txt17
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt (renamed from Documentation/devicetree/bindings/usb/mt8173-xhci.txt)14
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtu3.txt (renamed from Documentation/devicetree/bindings/usb/mt8173-mtu3.txt)8
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usb3.txt16
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt3
-rw-r--r--Documentation/devicetree/bindings/xilinx.txt2
-rw-r--r--Documentation/doc-guide/sphinx.rst107
-rw-r--r--Documentation/driver-api/basics.rst5
-rw-r--r--Documentation/driver-api/dma-buf.rst3
-rw-r--r--Documentation/driver-api/gpio.rst45
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/driver-api/miscellaneous.rst1
-rw-r--r--Documentation/driver-api/s390-drivers.rst2
-rw-r--r--Documentation/driver-api/scsi.rst8
-rw-r--r--Documentation/driver-model/devres.txt1
-rw-r--r--Documentation/errseq.rst149
-rw-r--r--Documentation/fb/efifb.txt6
-rw-r--r--Documentation/features/core/tracehook/arch-support.txt2
-rw-r--r--Documentation/filesystems/caching/netfs-api.txt2
-rw-r--r--Documentation/filesystems/dax.txt5
-rw-r--r--Documentation/filesystems/vfs.txt4
-rw-r--r--Documentation/gpio/drivers-on-gpio.txt5
-rw-r--r--Documentation/gpio/gpio-legacy.txt2
-rw-r--r--Documentation/gpu/drm-internals.rst2
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst9
-rw-r--r--Documentation/gpu/drm-kms.rst59
-rw-r--r--Documentation/gpu/drm-mm.rst4
-rw-r--r--Documentation/gpu/drm-uapi.rst2
-rw-r--r--Documentation/gpu/i915.rst18
-rw-r--r--Documentation/gpu/todo.rst4
-rw-r--r--Documentation/hwmon/ftsteutates4
-rw-r--r--Documentation/hwmon/ibm-cffps54
-rw-r--r--Documentation/hwmon/lm250669
-rw-r--r--Documentation/iio/ep93xx_adc.txt29
-rw-r--r--Documentation/infiniband/tag_matching.txt64
-rw-r--r--Documentation/input/input.rst2
-rw-r--r--Documentation/input/joydev/index.rst1
-rw-r--r--Documentation/kbuild/makefiles.txt6
-rw-r--r--Documentation/locking/crossrelease.txt874
-rw-r--r--Documentation/locking/rt-mutex-design.txt432
-rw-r--r--Documentation/locking/rt-mutex.txt58
-rw-r--r--Documentation/media/ca.h.rst.exceptions1
-rw-r--r--Documentation/media/cec-drivers/index.rst34
-rw-r--r--Documentation/media/cec-drivers/pulse8-cec.rst11
-rw-r--r--Documentation/media/dmx.h.rst.exceptions20
-rw-r--r--Documentation/media/dvb-drivers/ci.rst1
-rw-r--r--Documentation/media/dvb-drivers/index.rst4
-rw-r--r--Documentation/media/frontend.h.rst.exceptions185
-rw-r--r--Documentation/media/index.rst7
-rw-r--r--Documentation/media/kapi/cec-core.rst40
-rw-r--r--Documentation/media/kapi/csi2.rst10
-rw-r--r--Documentation/media/kapi/dtv-core.rst443
-rw-r--r--Documentation/media/kapi/v4l2-event.rst2
-rw-r--r--Documentation/media/media_kapi.rst4
-rw-r--r--Documentation/media/media_uapi.rst4
-rw-r--r--Documentation/media/typical_media_device.svg3054
-rw-r--r--Documentation/media/uapi/cec/cec-api.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-func-close.rst2
-rw-r--r--Documentation/media/uapi/cec/cec-func-ioctl.rst2
-rw-r--r--Documentation/media/uapi/cec/cec-func-open.rst4
-rw-r--r--Documentation/media/uapi/cec/cec-func-poll.rst8
-rw-r--r--Documentation/media/uapi/cec/cec-funcs.rst1
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst13
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-dqevent.rst31
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-g-mode.rst82
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-receive.rst2
-rw-r--r--Documentation/media/uapi/dvb/audio-channel-select.rst2
-rw-r--r--Documentation/media/uapi/dvb/audio-fclose.rst8
-rw-r--r--Documentation/media/uapi/dvb/audio-fopen.rst8
-rw-r--r--Documentation/media/uapi/dvb/audio-fwrite.rst8
-rw-r--r--Documentation/media/uapi/dvb/audio-set-av-sync.rst2
-rw-r--r--Documentation/media/uapi/dvb/audio-set-bypass-mode.rst6
-rw-r--r--Documentation/media/uapi/dvb/audio-set-mute.rst2
-rw-r--r--Documentation/media/uapi/dvb/audio.rst13
-rw-r--r--Documentation/media/uapi/dvb/audio_h.rst9
-rw-r--r--Documentation/media/uapi/dvb/ca-fclose.rst21
-rw-r--r--Documentation/media/uapi/dvb/ca-fopen.rst71
-rw-r--r--Documentation/media/uapi/dvb/ca-get-cap.rst40
-rw-r--r--Documentation/media/uapi/dvb/ca-get-descr-info.rst29
-rw-r--r--Documentation/media/uapi/dvb/ca-get-msg.rst46
-rw-r--r--Documentation/media/uapi/dvb/ca-get-slot-info.rst102
-rw-r--r--Documentation/media/uapi/dvb/ca-reset.rst11
-rw-r--r--Documentation/media/uapi/dvb/ca-send-msg.rst14
-rw-r--r--Documentation/media/uapi/dvb/ca-set-descr.rst13
-rw-r--r--Documentation/media/uapi/dvb/ca-set-pid.rst60
-rw-r--r--Documentation/media/uapi/dvb/ca.rst16
-rw-r--r--Documentation/media/uapi/dvb/ca_data_types.rst103
-rw-r--r--Documentation/media/uapi/dvb/ca_function_calls.rst1
-rw-r--r--Documentation/media/uapi/dvb/ca_h.rst9
-rw-r--r--Documentation/media/uapi/dvb/demux.rst13
-rw-r--r--Documentation/media/uapi/dvb/dmx-add-pid.rst12
-rw-r--r--Documentation/media/uapi/dvb/dmx-fclose.rst26
-rw-r--r--Documentation/media/uapi/dvb/dmx-fopen.rst66
-rw-r--r--Documentation/media/uapi/dvb/dmx-fread.rst78
-rw-r--r--Documentation/media/uapi/dvb/dmx-fwrite.rst41
-rw-r--r--Documentation/media/uapi/dvb/dmx-get-caps.rst41
-rw-r--r--Documentation/media/uapi/dvb/dmx-get-event.rst60
-rw-r--r--Documentation/media/uapi/dvb/dmx-get-pes-pids.rst30
-rw-r--r--Documentation/media/uapi/dvb/dmx-get-stc.rst28
-rw-r--r--Documentation/media/uapi/dvb/dmx-remove-pid.rst12
-rw-r--r--Documentation/media/uapi/dvb/dmx-set-buffer-size.rst11
-rw-r--r--Documentation/media/uapi/dvb/dmx-set-filter.rst13
-rw-r--r--Documentation/media/uapi/dvb/dmx-set-pes-filter.rst12
-rw-r--r--Documentation/media/uapi/dvb/dmx-set-source.rst44
-rw-r--r--Documentation/media/uapi/dvb/dmx-start.rst15
-rw-r--r--Documentation/media/uapi/dvb/dmx-stop.rst12
-rw-r--r--Documentation/media/uapi/dvb/dmx_fcalls.rst3
-rw-r--r--Documentation/media/uapi/dvb/dmx_h.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx_types.rst225
-rw-r--r--Documentation/media/uapi/dvb/dtv-fe-stats.rst17
-rw-r--r--Documentation/media/uapi/dvb/dtv-properties.rst15
-rw-r--r--Documentation/media/uapi/dvb/dtv-property.rst31
-rw-r--r--Documentation/media/uapi/dvb/dtv-stats.rst18
-rw-r--r--Documentation/media/uapi/dvb/dvb-fe-read-status.rst2
-rw-r--r--Documentation/media/uapi/dvb/dvb-frontend-parameters.rst2
-rw-r--r--Documentation/media/uapi/dvb/dvbapi.rst43
-rw-r--r--Documentation/media/uapi/dvb/dvbproperty-006.rst12
-rw-r--r--Documentation/media/uapi/dvb/dvbproperty.rst90
-rw-r--r--Documentation/media/uapi/dvb/dvbstb.svg667
-rw-r--r--Documentation/media/uapi/dvb/examples.rst4
-rw-r--r--Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst48
-rw-r--r--Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst10
-rw-r--r--Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst39
-rw-r--r--Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst38
-rw-r--r--Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst8
-rw-r--r--Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst8
-rw-r--r--Documentation/media/uapi/dvb/fe-get-event.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-get-frontend.rst10
-rw-r--r--Documentation/media/uapi/dvb/fe-get-info.rst385
-rw-r--r--Documentation/media/uapi/dvb/fe-get-property.rst12
-rw-r--r--Documentation/media/uapi/dvb/fe-read-ber.rst8
-rw-r--r--Documentation/media/uapi/dvb/fe-read-signal-strength.rst8
-rw-r--r--Documentation/media/uapi/dvb/fe-read-snr.rst8
-rw-r--r--Documentation/media/uapi/dvb/fe-read-status.rst85
-rw-r--r--Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst8
-rw-r--r--Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst12
-rw-r--r--Documentation/media/uapi/dvb/fe-set-frontend.rst15
-rw-r--r--Documentation/media/uapi/dvb/fe-set-tone.rst38
-rw-r--r--Documentation/media/uapi/dvb/fe-set-voltage.rst8
-rw-r--r--Documentation/media/uapi/dvb/fe-type-t.rst2
-rw-r--r--Documentation/media/uapi/dvb/fe_property_parameters.rst1373
-rw-r--r--Documentation/media/uapi/dvb/frontend-header.rst4
-rw-r--r--Documentation/media/uapi/dvb/frontend-property-terrestrial-systems.rst2
-rw-r--r--Documentation/media/uapi/dvb/frontend.rst39
-rw-r--r--Documentation/media/uapi/dvb/frontend_f_close.rst16
-rw-r--r--Documentation/media/uapi/dvb/frontend_f_open.rst44
-rw-r--r--Documentation/media/uapi/dvb/frontend_h.rst9
-rw-r--r--Documentation/media/uapi/dvb/frontend_legacy_dvbv3_api.rst6
-rw-r--r--Documentation/media/uapi/dvb/headers.rst21
-rw-r--r--Documentation/media/uapi/dvb/intro.rst95
-rw-r--r--Documentation/media/uapi/dvb/legacy_dvb_apis.rst28
-rw-r--r--Documentation/media/uapi/dvb/net-add-if.rst42
-rw-r--r--Documentation/media/uapi/dvb/net-get-if.rst8
-rw-r--r--Documentation/media/uapi/dvb/net-remove-if.rst8
-rw-r--r--Documentation/media/uapi/dvb/net-types.rst9
-rw-r--r--Documentation/media/uapi/dvb/net.rst15
-rw-r--r--Documentation/media/uapi/dvb/net_h.rst9
-rw-r--r--Documentation/media/uapi/dvb/query-dvb-frontend-info.rst4
-rw-r--r--Documentation/media/uapi/dvb/video-continue.rst2
-rw-r--r--Documentation/media/uapi/dvb/video-freeze.rst4
-rw-r--r--Documentation/media/uapi/dvb/video-get-event.rst2
-rw-r--r--Documentation/media/uapi/dvb/video-play.rst2
-rw-r--r--Documentation/media/uapi/dvb/video-select-source.rst2
-rw-r--r--Documentation/media/uapi/dvb/video-stop.rst2
-rw-r--r--Documentation/media/uapi/dvb/video.rst15
-rw-r--r--Documentation/media/uapi/dvb/video_h.rst9
-rw-r--r--Documentation/media/uapi/gen-errors.rst49
-rw-r--r--Documentation/media/uapi/mediactl/media-controller.rst4
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst2
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-g-topology.rst2
-rw-r--r--Documentation/media/uapi/mediactl/media-types.rst2
-rw-r--r--Documentation/media/uapi/rc/rc-sysfs-nodes.rst10
-rw-r--r--Documentation/media/uapi/rc/remote_controllers.rst4
-rw-r--r--Documentation/media/uapi/v4l/bayer.svg1013
-rw-r--r--Documentation/media/uapi/v4l/colorspaces-defs.rst (renamed from Documentation/media/uapi/v4l/pixfmt-006.rst)4
-rw-r--r--Documentation/media/uapi/v4l/colorspaces-details.rst (renamed from Documentation/media/uapi/v4l/pixfmt-007.rst)47
-rw-r--r--Documentation/media/uapi/v4l/constraints.svg356
-rw-r--r--Documentation/media/uapi/v4l/crop.svg253
-rw-r--r--Documentation/media/uapi/v4l/dev-meta.rst2
-rw-r--r--Documentation/media/uapi/v4l/dev-sliced-vbi.rst23
-rw-r--r--Documentation/media/uapi/v4l/dev-subdev.rst8
-rw-r--r--Documentation/media/uapi/v4l/driver.rst9
-rw-r--r--Documentation/media/uapi/v4l/extended-controls.rst32
-rw-r--r--Documentation/media/uapi/v4l/fieldseq_bt.svg170
-rw-r--r--Documentation/media/uapi/v4l/fieldseq_tb.svg175
-rw-r--r--Documentation/media/uapi/v4l/format.rst2
-rw-r--r--Documentation/media/uapi/v4l/nv12mt.svg764
-rw-r--r--Documentation/media/uapi/v4l/nv12mt_example.svg2474
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-008.rst32
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-compressed.rst (renamed from Documentation/media/uapi/v4l/pixfmt-013.rst)0
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-intro.rst (renamed from Documentation/media/uapi/v4l/pixfmt-004.rst)0
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-inzi.rst7
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-m420.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-nv12.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-nv12m.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-nv16.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-nv16m.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst30
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst178
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst47
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-rgb.rst1
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb10p.rst14
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb12p.rst86
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-uyvy.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-v4l2-mplane.rst (renamed from Documentation/media/uapi/v4l/pixfmt-003.rst)0
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-v4l2.rst (renamed from Documentation/media/uapi/v4l/pixfmt-002.rst)0
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-vyuy.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-y41p.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuv410.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuv411p.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuv420.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuv420m.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuv422m.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuv422p.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuv444m.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuyv.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yvyu.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt.rst15
-rw-r--r--Documentation/media/uapi/v4l/selection.svg6957
-rw-r--r--Documentation/media/uapi/v4l/subdev-formats.rst17
-rw-r--r--Documentation/media/uapi/v4l/subdev-image-processing-crop.svg10
-rw-r--r--Documentation/media/uapi/v4l/subdev-image-processing-full.svg10
-rw-r--r--Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg10
-rw-r--r--Documentation/media/uapi/v4l/v4l2-selection-targets.rst2
-rw-r--r--Documentation/media/uapi/v4l/v4l2.rst5
-rw-r--r--Documentation/media/uapi/v4l/vbi_525.svg614
-rw-r--r--Documentation/media/uapi/v4l/vbi_625.svg388
-rw-r--r--Documentation/media/uapi/v4l/vbi_hsync.svg238
-rw-r--r--Documentation/media/uapi/v4l/vidioc-create-bufs.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-cropcap.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-dqevent.rst3
-rw-r--r--Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst3
-rw-r--r--Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst2
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enum-fmt.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst7
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst4
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enumaudio.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enumaudioout.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enuminput.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enumoutput.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enumstd.rst10
-rw-r--r--Documentation/media/uapi/v4l/vidioc-expbuf.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-audio.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-audioout.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-crop.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-ctrl.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst5
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-edid.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-enc-index.rst3
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst3
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-fbuf.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-fmt.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-frequency.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-input.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-modulator.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-output.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-parm.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-priority.rst2
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-selection.rst5
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst7
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-std.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-tuner.rst14
-rw-r--r--Documentation/media/uapi/v4l/vidioc-overlay.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-prepare-buf.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-qbuf.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-querybuf.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-querycap.rst8
-rw-r--r--Documentation/media/uapi/v4l/vidioc-queryctrl.rst4
-rw-r--r--Documentation/media/uapi/v4l/vidioc-querystd.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-reqbufs.rst2
-rw-r--r--Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-streamon.rst2
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst1
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subscribe-event.rst5
-rw-r--r--Documentation/media/v4l-drivers/au0828-cardlist.rst44
-rw-r--r--Documentation/media/v4l-drivers/bttv-cardlist.rst849
-rw-r--r--Documentation/media/v4l-drivers/cx23885-cardlist.rst304
-rw-r--r--Documentation/media/v4l-drivers/cx88-cardlist.rst469
-rw-r--r--Documentation/media/v4l-drivers/em28xx-cardlist.rst523
-rw-r--r--Documentation/media/v4l-drivers/imx.rst7
-rw-r--r--Documentation/media/v4l-drivers/index.rst6
-rw-r--r--Documentation/media/v4l-drivers/ivtv-cardlist.rst169
-rw-r--r--Documentation/media/v4l-drivers/qcom_camss.rst156
-rw-r--r--Documentation/media/v4l-drivers/qcom_camss_graph.dot41
-rw-r--r--Documentation/media/v4l-drivers/saa7134-cardlist.rst999
-rw-r--r--Documentation/media/v4l-drivers/saa7164-cardlist.rst84
-rw-r--r--Documentation/media/v4l-drivers/tm6000-cardlist.rst99
-rw-r--r--Documentation/media/v4l-drivers/usbvision-cardlist.rst349
-rw-r--r--Documentation/media/v4l-drivers/vivid.rst1
-rw-r--r--Documentation/memory-barriers.txt142
-rw-r--r--Documentation/networking/00-INDEX2
-rw-r--r--Documentation/networking/batman-adv.rst220
-rw-r--r--Documentation/networking/batman-adv.txt215
-rw-r--r--Documentation/networking/dpaa.txt68
-rw-r--r--Documentation/networking/filter.txt130
-rw-r--r--Documentation/networking/hinic.txt125
-rw-r--r--Documentation/networking/ieee802154.txt16
-rw-r--r--Documentation/networking/index.rst1
-rw-r--r--Documentation/networking/ip-sysctl.txt29
-rw-r--r--Documentation/networking/msg_zerocopy.rst257
-rw-r--r--Documentation/networking/netdev-FAQ.txt8
-rw-r--r--Documentation/networking/netvsc.txt75
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.txt11
-rw-r--r--Documentation/networking/rmnet.txt82
-rw-r--r--Documentation/networking/rxrpc.txt57
-rw-r--r--Documentation/networking/strparser.txt209
-rw-r--r--Documentation/networking/switchdev.txt4
-rw-r--r--Documentation/nvmem/nvmem.txt2
-rw-r--r--Documentation/power/runtime_pm.txt3
-rw-r--r--Documentation/power/states.txt125
-rw-r--r--Documentation/printk-formats.txt29
-rw-r--r--Documentation/process/applying-patches.rst43
-rw-r--r--Documentation/process/changes.rst16
-rw-r--r--Documentation/process/stable-kernel-rules.rst4
-rw-r--r--Documentation/process/submitting-patches.rst2
-rw-r--r--Documentation/security/keys/core.rst16
-rw-r--r--Documentation/security/keys/request-key.rst2
-rw-r--r--Documentation/security/keys/trusted-encrypted.rst2
-rw-r--r--Documentation/sphinx-static/theme_overrides.css17
-rw-r--r--Documentation/sphinx/kerneldoc.py8
-rw-r--r--Documentation/sphinx/requirements.txt3
-rw-r--r--Documentation/static-keys.txt20
-rw-r--r--Documentation/sysctl/kernel.txt13
-rw-r--r--Documentation/sysctl/net.txt47
-rw-r--r--Documentation/sysctl/vm.txt4
-rw-r--r--Documentation/trace/stm.txt13
-rw-r--r--Documentation/translations/ko_KR/memory-barriers.txt5
-rw-r--r--Documentation/translations/zh_CN/HOWTO2
-rw-r--r--Documentation/vm/numa7
-rw-r--r--Documentation/vm/swap_numa.txt69
-rw-r--r--Documentation/x86/amd-memory-encryption.txt68
-rw-r--r--Documentation/x86/early-microcode.txt70
-rw-r--r--Documentation/x86/intel_rdt_ui.txt323
-rw-r--r--Documentation/x86/microcode.txt137
-rw-r--r--Documentation/x86/orc-unwinder.txt179
-rw-r--r--Documentation/x86/protection-keys.txt6
-rw-r--r--Documentation/x86/x86_64/5level-paging.txt64
-rw-r--r--MAINTAINERS5154
-rw-r--r--Makefile17
-rw-r--r--arch/Kconfig12
-rw-r--r--arch/alpha/defconfig2
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/asm/asm-prototypes.h18
-rw-r--r--arch/alpha/include/asm/core_marvel.h2
-rw-r--r--arch/alpha/include/asm/fb.h13
-rw-r--r--arch/alpha/include/asm/futex.h26
-rw-r--r--arch/alpha/include/asm/io.h1
-rw-r--r--arch/alpha/include/asm/spinlock.h5
-rw-r--r--arch/alpha/include/asm/types.h2
-rw-r--r--arch/alpha/include/asm/unistd.h2
-rw-r--r--arch/alpha/include/uapi/asm/ioctls.h2
-rw-r--r--arch/alpha/include/uapi/asm/mman.h14
-rw-r--r--arch/alpha/include/uapi/asm/socket.h2
-rw-r--r--arch/alpha/include/uapi/asm/types.h12
-rw-r--r--arch/alpha/include/uapi/asm/unistd.h19
-rw-r--r--arch/alpha/kernel/core_marvel.c8
-rw-r--r--arch/alpha/kernel/core_titan.c2
-rw-r--r--arch/alpha/kernel/module.c3
-rw-r--r--arch/alpha/kernel/pci-noop.c6
-rw-r--r--arch/alpha/kernel/pci-sysfs.c7
-rw-r--r--arch/alpha/kernel/pci.c6
-rw-r--r--arch/alpha/kernel/setup.c5
-rw-r--r--arch/alpha/kernel/smc37c669.c7
-rw-r--r--arch/alpha/kernel/smp.c2
-rw-r--r--arch/alpha/kernel/sys_marvel.c12
-rw-r--r--arch/alpha/kernel/systbls.S9
-rw-r--r--arch/alpha/kernel/traps.c2
-rw-r--r--arch/alpha/lib/Makefile22
-rw-r--r--arch/alpha/lib/copy_user.S2
-rw-r--r--arch/alpha/lib/ev6-copy_user.S7
-rw-r--r--arch/alpha/math-emu/math.c1
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/boot/dts/axc001.dtsi20
-rw-r--r--arch/arc/boot/dts/axc003.dtsi21
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi21
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi3
-rw-r--r--arch/arc/boot/dts/vdk_axs10x_mb.dtsi1
-rw-r--r--arch/arc/configs/haps_hs_defconfig1
-rw-r--r--arch/arc/configs/haps_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/nps_defconfig1
-rw-r--r--arch/arc/configs/nsim_700_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/tb10x_defconfig1
-rw-r--r--arch/arc/include/asm/atomic.h2
-rw-r--r--arch/arc/include/asm/cache.h2
-rw-r--r--arch/arc/include/asm/futex.h40
-rw-r--r--arch/arc/include/asm/mmu.h2
-rw-r--r--arch/arc/include/asm/spinlock.h5
-rw-r--r--arch/arc/kernel/intc-arcv2.c10
-rw-r--r--arch/arc/kernel/intc-compact.c14
-rw-r--r--arch/arc/mm/cache.c50
-rw-r--r--arch/arc/mm/dma.c47
-rw-r--r--arch/arc/mm/tlb.c12
-rw-r--r--arch/arc/plat-sim/Kconfig13
-rw-r--r--arch/arc/plat-sim/platform.c5
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/boot/compressed/efi-header.S160
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.S30
-rw-r--r--arch/arm/boot/dts/armada-388-gp.dts4
-rw-r--r--arch/arm/boot/dts/da850-evm.dts21
-rw-r--r--arch/arm/boot/dts/da850-lcdk.dts7
-rw-r--r--arch/arm/boot/dts/dm8168-evm.dts34
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi2
-rw-r--r--arch/arm/boot/dts/dra71-evm.dts4
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi1
-rw-r--r--arch/arm/boot/dts/imx25.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6q-evi.dts16
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi4
-rw-r--r--arch/arm/boot/dts/imx7d-sdb.dts16
-rw-r--r--arch/arm/boot/dts/imx7ulp-pinfunc.h468
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi8
-rw-r--r--arch/arm/boot/dts/omap2420-n8x0-common.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi14
-rw-r--r--arch/arm/boot/dts/rk3228-evb.dts34
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi4
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi12
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-a83t.dtsi16
-rw-r--r--arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts9
-rw-r--r--arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts19
-rw-r--r--arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts7
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-2.dts8
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-one.dts8
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts5
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts8
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts22
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts16
-rw-r--r--arch/arm/boot/dts/sunxi-h3-h5.dtsi26
-rw-r--r--arch/arm/boot/dts/tango4-smp8758.dtsi1
-rw-r--r--arch/arm/boot/dts/tango4-vantage-1172.dts2
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig2
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/configs/sunxi_defconfig2
-rw-r--r--arch/arm/crypto/Kconfig5
-rw-r--r--arch/arm/crypto/aes-ce-glue.c4
-rw-r--r--arch/arm/crypto/aes-cipher-core.S88
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c5
-rw-r--r--arch/arm/crypto/ghash-ce-core.S234
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c24
-rw-r--r--arch/arm/include/asm/arch_gicv3.h34
-rw-r--r--arch/arm/include/asm/bug.h2
-rw-r--r--arch/arm/include/asm/cacheflush.h2
-rw-r--r--arch/arm/include/asm/futex.h26
-rw-r--r--arch/arm/include/asm/kexec.h5
-rw-r--r--arch/arm/include/asm/kvm_host.h6
-rw-r--r--arch/arm/include/asm/spinlock.h16
-rw-r--r--arch/arm/include/asm/thread_info.h15
-rw-r--r--arch/arm/include/asm/tlb.h11
-rw-r--r--arch/arm/include/asm/traps.h7
-rw-r--r--arch/arm/include/asm/uaccess.h2
-rw-r--r--arch/arm/include/asm/ucontext.h6
-rw-r--r--arch/arm/kernel/entry-common.S9
-rw-r--r--arch/arm/kernel/machine_kexec.c11
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/kernel/signal.c83
-rw-r--r--arch/arm/mach-at91/Kconfig2
-rw-r--r--arch/arm/mach-at91/pm.c12
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c4
-rw-r--r--arch/arm/mach-davinci/clock.c9
-rw-r--r--arch/arm/mach-ep93xx/clock.c20
-rw-r--r--arch/arm/mach-hisi/Kconfig1
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/io.h34
-rw-r--r--arch/arm/mach-mmp/devices.c2
-rw-r--r--arch/arm/mach-mvebu/platsmp.c2
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c12
-rw-r--r--arch/arm/mach-omap1/board-h2-mmc.c2
-rw-r--r--arch/arm/mach-omap1/board-h2.c2
-rw-r--r--arch/arm/mach-omap1/board-h3-mmc.c2
-rw-r--r--arch/arm/mach-omap1/board-h3.c2
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c4
-rw-r--r--arch/arm/mach-omap1/board-osk.c6
-rw-r--r--arch/arm/mach-omap2/Makefile2
-rw-r--r--arch/arm/mach-omap2/board-generic.c3
-rw-r--r--arch/arm/mach-omap2/common.h2
-rw-r--r--arch/arm/mach-omap2/display.c119
-rw-r--r--arch/arm/mach-omap2/display.h1
-rw-r--r--arch/arm/mach-omap2/drm.c53
-rw-r--r--arch/arm/mach-omap2/hsmmc.c239
-rw-r--r--arch/arm/mach-omap2/hsmmc.h9
-rw-r--r--arch/arm/mach-omap2/io.c3
-rw-r--r--arch/arm/mach-omap2/omap_twl.c2
-rw-r--r--arch/arm/mach-omap2/pm34xx.c1
-rw-r--r--arch/arm/mach-omap2/prm3xxx.c7
-rw-r--r--arch/arm/mach-omap2/prm44xx.c55
-rw-r--r--arch/arm/mach-prima2/common.c2
-rw-r--r--arch/arm/mach-pxa/Kconfig1
-rw-r--r--arch/arm/mach-pxa/include/mach/mtd-xip.h10
-rw-r--r--arch/arm/mach-pxa/raumfeld.c2
-rw-r--r--arch/arm/mach-rpc/include/mach/hardware.h4
-rw-r--r--arch/arm/mach-s3c24xx/mach-osiris-dvs.c2
-rw-r--r--arch/arm/mach-s3c24xx/mach-osiris.c2
-rw-r--r--arch/arm/mach-sa1100/clock.c25
-rw-r--r--arch/arm/mach-sa1100/include/mach/mtd-xip.h4
-rw-r--r--arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c6
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra114.c4
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c1
-rw-r--r--arch/arm/mach-w90x900/clock.c29
-rw-r--r--arch/arm/mm/dma-mapping-nommu.c45
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/net/bpf_jit_32.c2448
-rw-r--r--arch/arm/net/bpf_jit_32.h108
-rw-r--r--arch/arm64/Kconfig13
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts15
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts15
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts16
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts15
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi20
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts17
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts17
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts17
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts103
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi12
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi31
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi3
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-evb.dts17
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi39
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/crypto/Kconfig22
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-core.S30
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c174
-rw-r--r--arch/arm64/crypto/aes-ce-cipher.c55
-rw-r--r--arch/arm64/crypto/aes-ce.S12
-rw-r--r--arch/arm64/crypto/aes-cipher-core.S152
-rw-r--r--arch/arm64/crypto/aes-ctr-fallback.h53
-rw-r--r--arch/arm64/crypto/aes-glue.c63
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c53
-rw-r--r--arch/arm64/crypto/chacha20-neon-glue.c5
-rw-r--r--arch/arm64/crypto/crc32-ce-glue.c11
-rw-r--r--arch/arm64/crypto/crct10dif-ce-glue.c13
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S401
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c517
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c18
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c30
-rw-r--r--arch/arm64/crypto/sha256-glue.c1
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h7
-rw-r--r--arch/arm64/include/asm/arch_timer.h4
-rw-r--r--arch/arm64/include/asm/asm-bug.h54
-rw-r--r--arch/arm64/include/asm/assembler.h25
-rw-r--r--arch/arm64/include/asm/atomic_lse.h2
-rw-r--r--arch/arm64/include/asm/bug.h35
-rw-r--r--arch/arm64/include/asm/cacheflush.h4
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/efi.h16
-rw-r--r--arch/arm64/include/asm/elf.h6
-rw-r--r--arch/arm64/include/asm/esr.h42
-rw-r--r--arch/arm64/include/asm/fpsimd.h16
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h56
-rw-r--r--arch/arm64/include/asm/futex.h26
-rw-r--r--arch/arm64/include/asm/hugetlb.h9
-rw-r--r--arch/arm64/include/asm/irq.h42
-rw-r--r--arch/arm64/include/asm/kvm_host.h6
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h21
-rw-r--r--arch/arm64/include/asm/memory.h59
-rw-r--r--arch/arm64/include/asm/mmu.h2
-rw-r--r--arch/arm64/include/asm/neon.h16
-rw-r--r--arch/arm64/include/asm/numa.h3
-rw-r--r--arch/arm64/include/asm/page-def.h34
-rw-r--r--arch/arm64/include/asm/page.h12
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h18
-rw-r--r--arch/arm64/include/asm/pgtable.h103
-rw-r--r--arch/arm64/include/asm/processor.h4
-rw-r--r--arch/arm64/include/asm/ptrace.h31
-rw-r--r--arch/arm64/include/asm/signal32.h2
-rw-r--r--arch/arm64/include/asm/simd.h56
-rw-r--r--arch/arm64/include/asm/smp.h2
-rw-r--r--arch/arm64/include/asm/spinlock.h69
-rw-r--r--arch/arm64/include/asm/stacktrace.h61
-rw-r--r--arch/arm64/include/asm/string.h4
-rw-r--r--arch/arm64/include/asm/sysreg.h5
-rw-r--r--arch/arm64/include/asm/thread_info.h17
-rw-r--r--arch/arm64/include/asm/traps.h12
-rw-r--r--arch/arm64/include/asm/uaccess.h17
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h1
-rw-r--r--arch/arm64/kernel/acpi.c4
-rw-r--r--arch/arm64/kernel/asm-offsets.c1
-rw-r--r--arch/arm64/kernel/cpu_ops.c4
-rw-r--r--arch/arm64/kernel/cpufeature.c13
-rw-r--r--arch/arm64/kernel/cpuinfo.c1
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S24
-rw-r--r--arch/arm64/kernel/entry.S283
-rw-r--r--arch/arm64/kernel/fpsimd.c170
-rw-r--r--arch/arm64/kernel/head.S23
-rw-r--r--arch/arm64/kernel/hibernate.c4
-rw-r--r--arch/arm64/kernel/irq.c40
-rw-r--r--arch/arm64/kernel/kaslr.c20
-rw-r--r--arch/arm64/kernel/machine_kexec.c2
-rw-r--r--arch/arm64/kernel/perf_callchain.c1
-rw-r--r--arch/arm64/kernel/perf_event.c208
-rw-r--r--arch/arm64/kernel/probes/uprobes.c2
-rw-r--r--arch/arm64/kernel/process.c15
-rw-r--r--arch/arm64/kernel/ptrace.c5
-rw-r--r--arch/arm64/kernel/return_address.c1
-rw-r--r--arch/arm64/kernel/signal.c15
-rw-r--r--arch/arm64/kernel/signal32.c2
-rw-r--r--arch/arm64/kernel/smp.c26
-rw-r--r--arch/arm64/kernel/stacktrace.c60
-rw-r--r--arch/arm64/kernel/time.c1
-rw-r--r--arch/arm64/kernel/topology.c22
-rw-r--r--arch/arm64/kernel/traps.c89
-rw-r--r--arch/arm64/kernel/vdso.c15
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S18
-rw-r--r--arch/arm64/kvm/hyp/s2-setup.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c2
-rw-r--r--arch/arm64/lib/Makefile2
-rw-r--r--arch/arm64/lib/copy_page.S9
-rw-r--r--arch/arm64/lib/uaccess_flushcache.c47
-rw-r--r--arch/arm64/mm/cache.S37
-rw-r--r--arch/arm64/mm/dma-mapping.c8
-rw-r--r--arch/arm64/mm/fault.c89
-rw-r--r--arch/arm64/mm/flush.c16
-rw-r--r--arch/arm64/mm/hugetlbpage.c314
-rw-r--r--arch/arm64/mm/mmu.c18
-rw-r--r--arch/arm64/mm/numa.c7
-rw-r--r--arch/arm64/net/bpf_jit.h4
-rw-r--r--arch/arm64/net/bpf_jit_comp.c20
-rw-r--r--arch/blackfin/include/asm/bug.h4
-rw-r--r--arch/blackfin/include/asm/flat.h3
-rw-r--r--arch/blackfin/include/asm/spinlock.h5
-rw-r--r--arch/blackfin/kernel/flat.c4
-rw-r--r--arch/blackfin/kernel/module.c39
-rw-r--r--arch/c6x/configs/dsk6455_defconfig2
-rw-r--r--arch/c6x/configs/evmc6457_defconfig2
-rw-r--r--arch/c6x/configs/evmc6472_defconfig2
-rw-r--r--arch/c6x/configs/evmc6474_defconfig2
-rw-r--r--arch/c6x/configs/evmc6678_defconfig2
-rw-r--r--arch/c6x/platforms/megamod-pic.c22
-rw-r--r--arch/c6x/platforms/plldata.c4
-rw-r--r--arch/c6x/platforms/timer64.c8
-rw-r--r--arch/cris/arch-v32/mach-a3/arbiter.c4
-rw-r--r--arch/cris/arch-v32/mach-fs/arbiter.c4
-rw-r--r--arch/cris/kernel/traps.c6
-rw-r--r--arch/frv/include/asm/futex.h3
-rw-r--r--arch/frv/include/uapi/asm/socket.h2
-rw-r--r--arch/frv/kernel/futex.c27
-rw-r--r--arch/h8300/include/asm/flat.h2
-rw-r--r--arch/h8300/include/asm/traps.h6
-rw-r--r--arch/hexagon/include/asm/atomic.h2
-rw-r--r--arch/hexagon/include/asm/futex.h38
-rw-r--r--arch/hexagon/include/asm/spinlock.h5
-rw-r--r--arch/ia64/include/asm/acpi.h2
-rw-r--r--arch/ia64/include/asm/futex.h25
-rw-r--r--arch/ia64/include/asm/spinlock.h21
-rw-r--r--arch/ia64/include/asm/tlb.h8
-rw-r--r--arch/ia64/include/uapi/asm/socket.h2
-rw-r--r--arch/ia64/kernel/acpi.c6
-rw-r--r--arch/ia64/kernel/efi.c4
-rw-r--r--arch/m32r/include/asm/flat.h3
-rw-r--r--arch/m32r/include/asm/spinlock.h5
-rw-r--r--arch/m32r/include/uapi/asm/socket.h2
-rw-r--r--arch/m68k/configs/amiga_defconfig7
-rw-r--r--arch/m68k/configs/apollo_defconfig7
-rw-r--r--arch/m68k/configs/atari_defconfig8
-rw-r--r--arch/m68k/configs/bvme6000_defconfig7
-rw-r--r--arch/m68k/configs/hp300_defconfig7
-rw-r--r--arch/m68k/configs/mac_defconfig7
-rw-r--r--arch/m68k/configs/multi_defconfig7
-rw-r--r--arch/m68k/configs/mvme147_defconfig7
-rw-r--r--arch/m68k/configs/mvme16x_defconfig7
-rw-r--r--arch/m68k/configs/q40_defconfig7
-rw-r--r--arch/m68k/configs/sun3_defconfig7
-rw-r--r--arch/m68k/configs/sun3x_defconfig7
-rw-r--r--arch/m68k/include/asm/asm-prototypes.h5
-rw-r--r--arch/m68k/include/asm/flat.h3
-rw-r--r--arch/m68k/mac/misc.c16
-rw-r--r--arch/metag/Kconfig1
-rw-r--r--arch/metag/include/asm/atomic_lock1.h2
-rw-r--r--arch/metag/include/asm/spinlock.h5
-rw-r--r--arch/metag/include/asm/topology.h1
-rw-r--r--arch/microblaze/include/asm/flat.h2
-rw-r--r--arch/microblaze/include/asm/futex.h38
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/Makefile15
-rw-r--r--arch/mips/boot/compressed/.gitignore2
-rw-r--r--arch/mips/cavium-octeon/octeon-usb.c2
-rw-r--r--arch/mips/configs/pistachio_defconfig2
-rw-r--r--arch/mips/dec/int-handler.S34
-rw-r--r--arch/mips/include/asm/cache.h2
-rw-r--r--arch/mips/include/asm/cpu-features.h3
-rw-r--r--arch/mips/include/asm/futex.h25
-rw-r--r--arch/mips/include/asm/kvm_host.h5
-rw-r--r--arch/mips/include/asm/mach-ralink/ralink_regs.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c-defs.h37
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2d-defs.h60
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h1
-rw-r--r--arch/mips/include/uapi/asm/ioctls.h2
-rw-r--r--arch/mips/include/uapi/asm/mman.h14
-rw-r--r--arch/mips/include/uapi/asm/socket.h2
-rw-r--r--arch/mips/kernel/ptrace.c10
-rw-r--r--arch/mips/kernel/scall32-o32.S11
-rw-r--r--arch/mips/kernel/scall64-o32.S6
-rw-r--r--arch/mips/kernel/smp.c12
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/mips/mm/uasm-mips.c2
-rw-r--r--arch/mips/net/ebpf_jit.c1995
-rw-r--r--arch/mips/pci/pci.c7
-rw-r--r--arch/mips/ralink/mt7620.c1
-rw-r--r--arch/mips/vdso/gettimeofday.c6
-rw-r--r--arch/mn10300/include/asm/bug.h2
-rw-r--r--arch/mn10300/include/asm/spinlock.h5
-rw-r--r--arch/mn10300/include/uapi/asm/socket.h2
-rw-r--r--arch/openrisc/include/asm/futex.h39
-rw-r--r--arch/parisc/Kconfig12
-rw-r--r--arch/parisc/Makefile16
-rw-r--r--arch/parisc/boot/.gitignore2
-rw-r--r--arch/parisc/boot/Makefile26
-rw-r--r--arch/parisc/boot/compressed/.gitignore3
-rw-r--r--arch/parisc/boot/compressed/Makefile86
-rw-r--r--arch/parisc/boot/compressed/head.S85
-rw-r--r--arch/parisc/boot/compressed/misc.c301
-rw-r--r--arch/parisc/boot/compressed/vmlinux.lds.S101
-rw-r--r--arch/parisc/boot/compressed/vmlinux.scr10
-rw-r--r--arch/parisc/boot/install.sh65
-rw-r--r--arch/parisc/configs/712_defconfig41
-rw-r--r--arch/parisc/configs/a500_defconfig50
-rw-r--r--arch/parisc/configs/b180_defconfig17
-rw-r--r--arch/parisc/configs/c3000_defconfig39
-rw-r--r--arch/parisc/configs/c8000_defconfig17
-rw-r--r--arch/parisc/configs/default_defconfig49
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig21
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig48
-rw-r--r--arch/parisc/include/asm/atomic.h2
-rw-r--r--arch/parisc/include/asm/bug.h6
-rw-r--r--arch/parisc/include/asm/futex.h26
-rw-r--r--arch/parisc/include/asm/mmu_context.h3
-rw-r--r--arch/parisc/include/asm/page.h4
-rw-r--r--arch/parisc/include/asm/pdc.h2
-rw-r--r--arch/parisc/include/asm/pdcpat.h30
-rw-r--r--arch/parisc/include/asm/spinlock.h7
-rw-r--r--arch/parisc/include/asm/thread_info.h2
-rw-r--r--arch/parisc/include/uapi/asm/ioctls.h2
-rw-r--r--arch/parisc/include/uapi/asm/mman.h20
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/parisc/kernel/cache.c39
-rw-r--r--arch/parisc/kernel/firmware.c98
-rw-r--r--arch/parisc/kernel/irq.c10
-rw-r--r--arch/parisc/kernel/pci-dma.c3
-rw-r--r--arch/parisc/kernel/pdt.c273
-rw-r--r--arch/parisc/kernel/perf.c4
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/processor.c30
-rw-r--r--arch/parisc/kernel/real2.S4
-rw-r--r--arch/parisc/kernel/unwind.c4
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S2
-rw-r--r--arch/parisc/lib/memcpy.c2
-rw-r--r--arch/powerpc/Kconfig40
-rw-r--r--arch/powerpc/Makefile31
-rw-r--r--arch/powerpc/boot/4xx.c2
-rw-r--r--arch/powerpc/boot/Makefile25
-rw-r--r--arch/powerpc/boot/crt0.S20
-rw-r--r--arch/powerpc/boot/dts/fsp2.dts33
-rw-r--r--arch/powerpc/boot/ppc_asm.h8
-rw-r--r--arch/powerpc/boot/serial.c4
-rw-r--r--arch/powerpc/boot/util.S24
-rw-r--r--arch/powerpc/configs/40x/acadia_defconfig1
-rw-r--r--arch/powerpc/configs/40x/ep405_defconfig1
-rw-r--r--arch/powerpc/configs/40x/kilauea_defconfig1
-rw-r--r--arch/powerpc/configs/40x/klondike_defconfig1
-rw-r--r--arch/powerpc/configs/40x/makalu_defconfig1
-rw-r--r--arch/powerpc/configs/40x/obs600_defconfig1
-rw-r--r--arch/powerpc/configs/40x/virtex_defconfig3
-rw-r--r--arch/powerpc/configs/40x/walnut_defconfig1
-rw-r--r--arch/powerpc/configs/44x/akebono_defconfig2
-rw-r--r--arch/powerpc/configs/44x/bamboo_defconfig1
-rw-r--r--arch/powerpc/configs/44x/currituck_defconfig2
-rw-r--r--arch/powerpc/configs/44x/ebony_defconfig1
-rw-r--r--arch/powerpc/configs/44x/eiger_defconfig4
-rw-r--r--arch/powerpc/configs/44x/fsp2_defconfig3
-rw-r--r--arch/powerpc/configs/44x/icon_defconfig3
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig2
-rw-r--r--arch/powerpc/configs/44x/katmai_defconfig1
-rw-r--r--arch/powerpc/configs/44x/rainier_defconfig1
-rw-r--r--arch/powerpc/configs/44x/redwood_defconfig4
-rw-r--r--arch/powerpc/configs/44x/sequoia_defconfig1
-rw-r--r--arch/powerpc/configs/44x/taishan_defconfig1
-rw-r--r--arch/powerpc/configs/44x/virtex5_defconfig3
-rw-r--r--arch/powerpc/configs/44x/warp_defconfig1
-rw-r--r--arch/powerpc/configs/52xx/cm5200_defconfig1
-rw-r--r--arch/powerpc/configs/52xx/lite5200b_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/motionpro_defconfig15
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/asp8347_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/kmeter1_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc8313_rdb_defconfig3
-rw-r--r--arch/powerpc/configs/83xx/mpc8315_rdb_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc832x_mds_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc832x_rdb_defconfig6
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itx_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_mds_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc836x_mds_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc836x_rdk_defconfig5
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_mds_defconfig4
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_rdb_defconfig5
-rw-r--r--arch/powerpc/configs/83xx/sbc834x_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig3
-rw-r--r--arch/powerpc/configs/85xx/ksi8560_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/mpc8540_ads_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/mpc8560_ads_defconfig6
-rw-r--r--arch/powerpc/configs/85xx/mpc85xx_cds_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/sbc8548_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/socrates_defconfig3
-rw-r--r--arch/powerpc/configs/85xx/stx_gp3_defconfig3
-rw-r--r--arch/powerpc/configs/85xx/tqm8540_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/tqm8541_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/tqm8548_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/tqm8555_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/tqm8560_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/xes_mpc85xx_defconfig5
-rw-r--r--arch/powerpc/configs/adder875_defconfig2
-rw-r--r--arch/powerpc/configs/amigaone_defconfig2
-rw-r--r--arch/powerpc/configs/be.config1
-rw-r--r--arch/powerpc/configs/c2k_defconfig5
-rw-r--r--arch/powerpc/configs/cell_defconfig9
-rw-r--r--arch/powerpc/configs/chrp32_defconfig3
-rw-r--r--arch/powerpc/configs/ep8248e_defconfig1
-rw-r--r--arch/powerpc/configs/ep88xc_defconfig2
-rw-r--r--arch/powerpc/configs/g5_defconfig11
-rw-r--r--arch/powerpc/configs/gamecube_defconfig2
-rw-r--r--arch/powerpc/configs/holly_defconfig3
-rw-r--r--arch/powerpc/configs/linkstation_defconfig3
-rw-r--r--arch/powerpc/configs/maple_defconfig9
-rw-r--r--arch/powerpc/configs/mgcoge_defconfig2
-rw-r--r--arch/powerpc/configs/mpc512x_defconfig28
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig4
-rw-r--r--arch/powerpc/configs/mpc7448_hpc2_defconfig4
-rw-r--r--arch/powerpc/configs/mpc8272_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig10
-rw-r--r--arch/powerpc/configs/mpc866_ads_defconfig4
-rw-r--r--arch/powerpc/configs/mpc86xx_basic_defconfig12
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig2
-rw-r--r--arch/powerpc/configs/mvme5100_defconfig3
-rw-r--r--arch/powerpc/configs/pasemi_defconfig5
-rw-r--r--arch/powerpc/configs/pmac32_defconfig11
-rw-r--r--arch/powerpc/configs/powernv_defconfig21
-rw-r--r--arch/powerpc/configs/ppc40x_defconfig3
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig4
-rw-r--r--arch/powerpc/configs/ppc64_defconfig41
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig10
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig46
-rw-r--r--arch/powerpc/configs/pq2fads_defconfig1
-rw-r--r--arch/powerpc/configs/ps3_defconfig3
-rw-r--r--arch/powerpc/configs/pseries_defconfig45
-rw-r--r--arch/powerpc/configs/tqm8xx_defconfig3
-rw-r--r--arch/powerpc/configs/wii_defconfig5
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/asm-compat.h2
-rw-r--r--arch/powerpc/include/asm/barrier.h7
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h7
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h23
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h20
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h5
-rw-r--r--arch/powerpc/include/asm/bug.h9
-rw-r--r--arch/powerpc/include/asm/cache.h2
-rw-r--r--arch/powerpc/include/asm/cpuidle.h27
-rw-r--r--arch/powerpc/include/asm/cputable.h4
-rw-r--r--arch/powerpc/include/asm/eeh.h5
-rw-r--r--arch/powerpc/include/asm/fadump.h2
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h6
-rw-r--r--arch/powerpc/include/asm/fixmap.h10
-rw-r--r--arch/powerpc/include/asm/fs_pd.h2
-rw-r--r--arch/powerpc/include/asm/futex.h26
-rw-r--r--arch/powerpc/include/asm/hardirq.h4
-rw-r--r--arch/powerpc/include/asm/hugetlb.h14
-rw-r--r--arch/powerpc/include/asm/hvcall.h13
-rw-r--r--arch/powerpc/include/asm/icswx.h3
-rw-r--r--arch/powerpc/include/asm/imc-pmu.h128
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_host.h5
-rw-r--r--arch/powerpc/include/asm/machdep.h1
-rw-r--r--arch/powerpc/include/asm/mmu_context.h58
-rw-r--r--arch/powerpc/include/asm/nmi.h3
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/opal-api.h24
-rw-r--r--arch/powerpc/include/asm/opal.h19
-rw-r--r--arch/powerpc/include/asm/paca.h7
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h1
-rw-r--r--arch/powerpc/include/asm/pgalloc.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-be-types.h1
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h1
-rw-r--r--arch/powerpc/include/asm/pgtable.h17
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h16
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h16
-rw-r--r--arch/powerpc/include/asm/prom.h5
-rw-r--r--arch/powerpc/include/asm/pte-walk.h35
-rw-r--r--arch/powerpc/include/asm/reg.h99
-rw-r--r--arch/powerpc/include/asm/reg_booke.h3
-rw-r--r--arch/powerpc/include/asm/setup.h1
-rw-r--r--arch/powerpc/include/asm/smp.h6
-rw-r--r--arch/powerpc/include/asm/spinlock.h36
-rw-r--r--arch/powerpc/include/asm/sstep.h95
-rw-r--r--arch/powerpc/include/asm/string.h26
-rw-r--r--arch/powerpc/include/asm/timex.h6
-rw-r--r--arch/powerpc/include/asm/tlb.h11
-rw-r--r--arch/powerpc/include/asm/topology.h2
-rw-r--r--arch/powerpc/include/asm/vas.h159
-rw-r--r--arch/powerpc/include/asm/xive.h5
-rw-r--r--arch/powerpc/include/uapi/asm/ioctls.h2
-rw-r--r--arch/powerpc/include/uapi/asm/mman.h16
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/align.c774
-rw-r--r--arch/powerpc/kernel/asm-offsets.c8
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/cacheinfo.c34
-rw-r--r--arch/powerpc/kernel/cputable.c8
-rw-r--r--arch/powerpc/kernel/eeh.c20
-rw-r--r--arch/powerpc/kernel/eeh_dev.c7
-rw-r--r--arch/powerpc/kernel/eeh_driver.c2
-rw-r--r--arch/powerpc/kernel/eeh_pe.c90
-rw-r--r--arch/powerpc/kernel/eeh_sysfs.c3
-rw-r--r--arch/powerpc/kernel/entry_32.S22
-rw-r--r--arch/powerpc/kernel/entry_64.S69
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S75
-rw-r--r--arch/powerpc/kernel/fadump.c31
-rw-r--r--arch/powerpc/kernel/head_32.S6
-rw-r--r--arch/powerpc/kernel/head_64.S8
-rw-r--r--arch/powerpc/kernel/head_8xx.S109
-rw-r--r--arch/powerpc/kernel/idle_book3s.S151
-rw-r--r--arch/powerpc/kernel/io-workarounds.c9
-rw-r--r--arch/powerpc/kernel/iommu.c7
-rw-r--r--arch/powerpc/kernel/irq.c78
-rw-r--r--arch/powerpc/kernel/isa-bridge.c32
-rw-r--r--arch/powerpc/kernel/kgdb.c4
-rw-r--r--arch/powerpc/kernel/kvm.c7
-rw-r--r--arch/powerpc/kernel/l2cr_6xx.S4
-rw-r--r--arch/powerpc/kernel/legacy_serial.c12
-rw-r--r--arch/powerpc/kernel/mce.c33
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/kernel/optprobes_head.S8
-rw-r--r--arch/powerpc/kernel/paca.c13
-rw-r--r--arch/powerpc/kernel/pci-common.c15
-rw-r--r--arch/powerpc/kernel/pci_32.c4
-rw-r--r--arch/powerpc/kernel/pci_64.c4
-rw-r--r--arch/powerpc/kernel/pci_dn.c22
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c24
-rw-r--r--arch/powerpc/kernel/process.c104
-rw-r--r--arch/powerpc/kernel/prom_init.c34
-rw-r--r--arch/powerpc/kernel/ptrace.c55
-rw-r--r--arch/powerpc/kernel/reloc_64.S6
-rw-r--r--arch/powerpc/kernel/rtas_pci.c33
-rw-r--r--arch/powerpc/kernel/setup-common.c36
-rw-r--r--arch/powerpc/kernel/setup_32.c7
-rw-r--r--arch/powerpc/kernel/setup_64.c39
-rw-r--r--arch/powerpc/kernel/smp.c240
-rw-r--r--arch/powerpc/kernel/swsusp_asm64.S2
-rw-r--r--arch/powerpc/kernel/systbl.S14
-rw-r--r--arch/powerpc/kernel/traps.c305
-rw-r--r--arch/powerpc/kernel/uprobes.c9
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S12
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/kernel/watchdog.c52
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c9
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c32
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c56
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c12
-rw-r--r--arch/powerpc/kvm/book3s_hv.c15
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c18
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S70
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c68
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c3
-rw-r--r--arch/powerpc/lib/Makefile3
-rw-r--r--arch/powerpc/lib/copy_32.S44
-rw-r--r--arch/powerpc/lib/copypage_power7.S14
-rw-r--r--arch/powerpc/lib/copyuser_power7.S66
-rw-r--r--arch/powerpc/lib/ldstfp.S299
-rw-r--r--arch/powerpc/lib/mem_64.S19
-rw-r--r--arch/powerpc/lib/memcpy_power7.S66
-rw-r--r--arch/powerpc/lib/quad.S62
-rw-r--r--arch/powerpc/lib/sstep.c2232
-rw-r--r--arch/powerpc/lib/string_64.S2
-rw-r--r--arch/powerpc/mm/8xx_mmu.c29
-rw-r--r--arch/powerpc/mm/Makefile4
-rw-r--r--arch/powerpc/mm/dump_hashpagetable.c2
-rw-r--r--arch/powerpc/mm/dump_linuxpagetables.c2
-rw-r--r--arch/powerpc/mm/fault.c562
-rw-r--r--arch/powerpc/mm/hash_low_32.S2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c16
-rw-r--r--arch/powerpc/mm/hugetlbpage.c219
-rw-r--r--arch/powerpc/mm/icswx.c292
-rw-r--r--arch/powerpc/mm/icswx.h68
-rw-r--r--arch/powerpc/mm/icswx_pid.c87
-rw-r--r--arch/powerpc/mm/init_32.c8
-rw-r--r--arch/powerpc/mm/init_64.c6
-rw-r--r--arch/powerpc/mm/mem.c3
-rw-r--r--arch/powerpc/mm/mmu_context.c99
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c25
-rw-r--r--arch/powerpc/mm/mmu_decl.h10
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c24
-rw-r--r--arch/powerpc/mm/pgtable-hash64.c56
-rw-r--r--arch/powerpc/mm/pgtable-radix.c95
-rw-r--r--arch/powerpc/mm/pgtable_32.c66
-rw-r--r--arch/powerpc/mm/pgtable_64.c10
-rw-r--r--arch/powerpc/mm/slb_low.S23
-rw-r--r--arch/powerpc/mm/subpage-prot.c2
-rw-r--r--arch/powerpc/mm/tlb-radix.c153
-rw-r--r--arch/powerpc/mm/tlb_hash64.c13
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S2
-rw-r--r--arch/powerpc/net/bpf_jit.h1
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c26
-rw-r--r--arch/powerpc/perf/Makefile1
-rw-r--r--arch/powerpc/perf/callchain.c3
-rw-r--r--arch/powerpc/perf/core-book3s.c3
-rw-r--r--arch/powerpc/perf/imc-pmu.c1306
-rw-r--r--arch/powerpc/perf/isa207-common.c31
-rw-r--r--arch/powerpc/perf/isa207-common.h5
-rw-r--r--arch/powerpc/perf/power8-pmu.c33
-rw-r--r--arch/powerpc/perf/power9-events-list.h9
-rw-r--r--arch/powerpc/perf/power9-pmu.c23
-rw-r--r--arch/powerpc/platforms/44x/Makefile4
-rw-r--r--arch/powerpc/platforms/44x/machine_check.c89
-rw-r--r--arch/powerpc/platforms/4xx/Makefile8
-rw-r--r--arch/powerpc/platforms/4xx/cpm.c (renamed from arch/powerpc/sysdev/ppc4xx_cpm.c)10
-rw-r--r--arch/powerpc/platforms/4xx/gpio.c (renamed from arch/powerpc/sysdev/ppc4xx_gpio.c)3
-rw-r--r--arch/powerpc/platforms/4xx/hsta_msi.c (renamed from arch/powerpc/sysdev/ppc4xx_hsta_msi.c)0
-rw-r--r--arch/powerpc/platforms/4xx/machine_check.c26
-rw-r--r--arch/powerpc/platforms/4xx/msi.c (renamed from arch/powerpc/sysdev/ppc4xx_msi.c)3
-rw-r--r--arch/powerpc/platforms/4xx/ocm.c (renamed from arch/powerpc/sysdev/ppc4xx_ocm.c)0
-rw-r--r--arch/powerpc/platforms/4xx/pci.c (renamed from arch/powerpc/sysdev/ppc4xx_pci.c)118
-rw-r--r--arch/powerpc/platforms/4xx/pci.h (renamed from arch/powerpc/sysdev/ppc4xx_pci.h)0
-rw-r--r--arch/powerpc/platforms/4xx/soc.c (renamed from arch/powerpc/sysdev/ppc4xx_soc.c)5
-rw-r--r--arch/powerpc/platforms/4xx/uic.c (renamed from arch/powerpc/sysdev/uic.c)14
-rw-r--r--arch/powerpc/platforms/512x/clock-commonclk.c4
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c12
-rw-r--r--arch/powerpc/platforms/52xx/efika.c8
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c4
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pci.c8
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c3
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c4
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c4
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c8
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c4
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig3
-rw-r--r--arch/powerpc/platforms/8xx/Makefile2
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c2
-rw-r--r--arch/powerpc/platforms/8xx/machine_check.c37
-rw-r--r--arch/powerpc/platforms/8xx/pic.c (renamed from arch/powerpc/sysdev/mpc8xx_pic.c)2
-rw-r--r--arch/powerpc/platforms/8xx/pic.h (renamed from arch/powerpc/sysdev/mpc8xx_pic.h)0
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype45
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/amigaone/setup.c6
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c36
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c4
-rw-r--r--arch/powerpc/platforms/cell/iommu.c24
-rw-r--r--arch/powerpc/platforms/cell/ras.c4
-rw-r--r--arch/powerpc/platforms/cell/spider-pci.c4
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c4
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c26
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c2
-rw-r--r--arch/powerpc/platforms/chrp/pci.c18
-rw-r--r--arch/powerpc/platforms/chrp/pegasos_eth.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/mvme5100.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/storcenter.c2
-rw-r--r--arch/powerpc/platforms/maple/pci.c10
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c2
-rw-r--r--arch/powerpc/platforms/powermac/feature.c14
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c50
-rw-r--r--arch/powerpc/platforms/powermac/pci.c6
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_base.c24
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c6
-rw-r--r--arch/powerpc/platforms/powermac/pic.c8
-rw-r--r--arch/powerpc/platforms/powermac/setup.c2
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig22
-rw-r--r--arch/powerpc/platforms/powernv/Makefile5
-rw-r--r--arch/powerpc/platforms/powernv/copy-paste.h46
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c29
-rw-r--r--arch/powerpc/platforms/powernv/idle.c75
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c282
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c22
-rw-r--r--arch/powerpc/platforms/powernv/opal-async.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-flash.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-hmi.c22
-rw-r--r--arch/powerpc/platforms/powernv/opal-imc.c226
-rw-r--r--arch/powerpc/platforms/powernv/opal-powercap.c244
-rw-r--r--arch/powerpc/platforms/powernv/opal-prd.c13
-rw-r--r--arch/powerpc/platforms/powernv/opal-psr.c175
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor-groups.c212
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S11
-rw-r--r--arch/powerpc/platforms/powernv/opal-xscom.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal.c154
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c26
-rw-r--r--arch/powerpc/platforms/powernv/pci.c75
-rw-r--r--arch/powerpc/platforms/powernv/pci.h5
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h2
-rw-r--r--arch/powerpc/platforms/powernv/rng.c45
-rw-r--r--arch/powerpc/platforms/powernv/smp.c11
-rw-r--r--arch/powerpc/platforms/powernv/vas-window.c1134
-rw-r--r--arch/powerpc/platforms/powernv/vas.c151
-rw-r--r--arch/powerpc/platforms/powernv/vas.h467
-rw-r--r--arch/powerpc/platforms/ps3/repository.c22
-rw-r--r--arch/powerpc/platforms/ps3/setup.c15
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c9
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c4
-rw-r--r--arch/powerpc/platforms/pseries/event_sources.c6
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c17
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c57
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S2
-rw-r--r--arch/powerpc/platforms/pseries/ibmebus.c5
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c58
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c6
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c2
-rw-r--r--arch/powerpc/platforms/pseries/msi.c23
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h2
-rw-r--r--arch/powerpc/platforms/pseries/pseries_energy.c7
-rw-r--r--arch/powerpc/platforms/pseries/ras.c15
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c16
-rw-r--r--arch/powerpc/platforms/pseries/setup.c9
-rw-r--r--arch/powerpc/platforms/pseries/smp.c27
-rw-r--r--arch/powerpc/platforms/pseries/vio.c10
-rw-r--r--arch/powerpc/purgatory/trampoline.S6
-rw-r--r--arch/powerpc/sysdev/Makefile12
-rw-r--r--arch/powerpc/sysdev/axonram.c48
-rw-r--r--arch/powerpc/sysdev/dcr.c4
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_sram.c12
-rw-r--r--arch/powerpc/sysdev/fsl_gtm.c14
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c16
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c47
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c36
-rw-r--r--arch/powerpc/sysdev/fsl_rmu.c12
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c2
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h2
-rw-r--r--arch/powerpc/sysdev/ipic.c1
-rw-r--r--arch/powerpc/sysdev/mpic.c4
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c2
-rw-r--r--arch/powerpc/sysdev/mpic_msi.c2
-rw-r--r--arch/powerpc/sysdev/mpic_timer.c19
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c4
-rw-r--r--arch/powerpc/sysdev/mv64x60_dev.c20
-rw-r--r--arch/powerpc/sysdev/mv64x60_pci.c6
-rw-r--r--arch/powerpc/sysdev/of_rtc.c12
-rw-r--r--arch/powerpc/sysdev/scom.c5
-rw-r--r--arch/powerpc/sysdev/simple_gpio.c3
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c4
-rw-r--r--arch/powerpc/sysdev/xive/Kconfig5
-rw-r--r--arch/powerpc/sysdev/xive/Makefile1
-rw-r--r--arch/powerpc/sysdev/xive/common.c90
-rw-r--r--arch/powerpc/sysdev/xive/native.c22
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c662
-rw-r--r--arch/powerpc/sysdev/xive/xive-internal.h7
-rw-r--r--arch/powerpc/xmon/Makefile4
-rw-r--r--arch/powerpc/xmon/xmon.c86
-rw-r--r--arch/s390/Kconfig18
-rw-r--r--arch/s390/Makefile6
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/bug.h4
-rw-r--r--arch/s390/include/asm/compat.h5
-rw-r--r--arch/s390/include/asm/cpcmd.h7
-rw-r--r--arch/s390/include/asm/ebcdic.h4
-rw-r--r--arch/s390/include/asm/elf.h2
-rw-r--r--arch/s390/include/asm/futex.h23
-rw-r--r--arch/s390/include/asm/ipl.h2
-rw-r--r--arch/s390/include/asm/lowcore.h48
-rw-r--r--arch/s390/include/asm/mman.h11
-rw-r--r--arch/s390/include/asm/mmu_context.h38
-rw-r--r--arch/s390/include/asm/nmi.h2
-rw-r--r--arch/s390/include/asm/page-states.h1
-rw-r--r--arch/s390/include/asm/page.h37
-rw-r--r--arch/s390/include/asm/pgalloc.h18
-rw-r--r--arch/s390/include/asm/pgtable.h197
-rw-r--r--arch/s390/include/asm/qdio.h2
-rw-r--r--arch/s390/include/asm/setup.h17
-rw-r--r--arch/s390/include/asm/spinlock.h16
-rw-r--r--arch/s390/include/asm/timex.h40
-rw-r--r--arch/s390/include/asm/tlb.h23
-rw-r--r--arch/s390/include/asm/tlbflush.h7
-rw-r--r--arch/s390/include/asm/topology.h6
-rw-r--r--arch/s390/include/asm/types.h11
-rw-r--r--arch/s390/include/asm/unaligned.h13
-rw-r--r--arch/s390/include/uapi/asm/Kbuild1
-rw-r--r--arch/s390/include/uapi/asm/dasd.h6
-rw-r--r--arch/s390/include/uapi/asm/socket.h2
-rw-r--r--arch/s390/include/uapi/asm/swab.h89
-rw-r--r--arch/s390/include/uapi/asm/vmcp.h (renamed from drivers/s390/char/vmcp.h)20
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/cpcmd.c13
-rw-r--r--arch/s390/kernel/debug.c9
-rw-r--r--arch/s390/kernel/dumpstack.c2
-rw-r--r--arch/s390/kernel/early.c17
-rw-r--r--arch/s390/kernel/head.S3
-rw-r--r--arch/s390/kernel/head64.S4
-rw-r--r--arch/s390/kernel/irq.c3
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c2
-rw-r--r--arch/s390/kernel/relocate_kernel.S5
-rw-r--r--arch/s390/kernel/setup.c14
-rw-r--r--arch/s390/kernel/smp.c1
-rw-r--r--arch/s390/kernel/suspend.c24
-rw-r--r--arch/s390/kernel/time.c67
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/kernel/vdso32/vdso32.lds.S4
-rw-r--r--arch/s390/kernel/vdso64/vdso64.lds.S4
-rw-r--r--arch/s390/kvm/diag.c8
-rw-r--r--arch/s390/kvm/gaccess.c35
-rw-r--r--arch/s390/kvm/kvm-s390.c8
-rw-r--r--arch/s390/kvm/priv.c8
-rw-r--r--arch/s390/kvm/sthyi.c7
-rw-r--r--arch/s390/kvm/vsie.c2
-rw-r--r--arch/s390/lib/delay.c2
-rw-r--r--arch/s390/lib/spinlock.c87
-rw-r--r--arch/s390/lib/uaccess.c38
-rw-r--r--arch/s390/mm/fault.c10
-rw-r--r--arch/s390/mm/gmap.c163
-rw-r--r--arch/s390/mm/init.c60
-rw-r--r--arch/s390/mm/mmap.c6
-rw-r--r--arch/s390/mm/page-states.c192
-rw-r--r--arch/s390/mm/pageattr.c5
-rw-r--r--arch/s390/mm/pgalloc.c12
-rw-r--r--arch/s390/mm/pgtable.c160
-rw-r--r--arch/s390/mm/vmem.c47
-rw-r--r--arch/s390/net/bpf_jit_comp.c27
-rw-r--r--arch/s390/pci/pci_clp.c10
-rw-r--r--arch/s390/tools/gen_facilities.c5
-rw-r--r--arch/sh/configs/se7751_defconfig1
-rw-r--r--arch/sh/include/asm/bug.h4
-rw-r--r--arch/sh/include/asm/futex.h26
-rw-r--r--arch/sh/include/asm/spinlock-cas.h5
-rw-r--r--arch/sh/include/asm/spinlock-llsc.h5
-rw-r--r--arch/sh/include/asm/tlb.h8
-rw-r--r--arch/sh/include/uapi/asm/ioctls.h2
-rw-r--r--arch/sparc/configs/sparc32_defconfig4
-rw-r--r--arch/sparc/configs/sparc64_defconfig4
-rw-r--r--arch/sparc/crypto/aes_glue.c3
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/futex_64.h26
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h14
-rw-r--r--arch/sparc/include/asm/page_32.h2
-rw-r--r--arch/sparc/include/asm/spinlock_32.h5
-rw-r--r--arch/sparc/include/asm/spitfire.h16
-rw-r--r--arch/sparc/include/asm/trap_block.h1
-rw-r--r--arch/sparc/include/uapi/asm/ioctls.h2
-rw-r--r--arch/sparc/include/uapi/asm/socket.h2
-rw-r--r--arch/sparc/kernel/cpu.c6
-rw-r--r--arch/sparc/kernel/cpumap.c1
-rw-r--r--arch/sparc/kernel/head_64.S22
-rw-r--r--arch/sparc/kernel/pci_sun4v.c14
-rw-r--r--arch/sparc/kernel/pcic.c2
-rw-r--r--arch/sparc/kernel/setup_64.c15
-rw-r--r--arch/sparc/kernel/smp_64.c185
-rw-r--r--arch/sparc/kernel/sun4v_ivec.S15
-rw-r--r--arch/sparc/kernel/traps_64.c1
-rw-r--r--arch/sparc/kernel/tsb.S12
-rw-r--r--arch/sparc/lib/U3memcpy.S4
-rw-r--r--arch/sparc/lib/multi3.S24
-rw-r--r--arch/sparc/mm/init_64.c39
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c34
-rw-r--r--arch/sparc/power/hibernate.c3
-rw-r--r--arch/tile/include/asm/atomic_32.h2
-rw-r--r--arch/tile/include/asm/futex.h40
-rw-r--r--arch/tile/include/asm/spinlock_32.h2
-rw-r--r--arch/tile/include/asm/spinlock_64.h2
-rw-r--r--arch/tile/lib/spinlock_32.c23
-rw-r--r--arch/tile/lib/spinlock_64.c22
-rw-r--r--arch/um/include/asm/tlb.h13
-rw-r--r--arch/um/include/asm/unwind.h8
-rw-r--r--arch/x86/Kbuild5
-rw-r--r--arch/x86/Kconfig76
-rw-r--r--arch/x86/Kconfig.debug60
-rw-r--r--arch/x86/Makefile17
-rw-r--r--arch/x86/boot/Makefile5
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/boot/compressed/eboot.c13
-rw-r--r--arch/x86/boot/compressed/head_32.S129
-rw-r--r--arch/x86/boot/compressed/head_64.S112
-rw-r--r--arch/x86/boot/compressed/kaslr.c147
-rw-r--r--arch/x86/boot/compressed/misc.c3
-rw-r--r--arch/x86/boot/compressed/pagetable.c7
-rw-r--r--arch/x86/boot/header.S8
-rw-r--r--arch/x86/boot/string.c9
-rw-r--r--arch/x86/configs/i386_defconfig3
-rw-r--r--arch/x86/configs/tiny.config2
-rw-r--r--arch/x86/configs/x86_64_defconfig3
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c4
-rw-r--r--arch/x86/crypto/blowfish_glue.c3
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c3
-rw-r--r--arch/x86/crypto/des3_ede_glue.c3
-rw-r--r--arch/x86/crypto/sha1_avx2_x86_64_asm.S67
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c2
-rw-r--r--arch/x86/entry/Makefile1
-rw-r--r--arch/x86/entry/calling.h5
-rw-r--r--arch/x86/entry/common.c3
-rw-r--r--arch/x86/entry/entry_32.S20
-rw-r--r--arch/x86/entry/entry_64.S240
-rw-r--r--arch/x86/entry/entry_64_compat.S12
-rw-r--r--arch/x86/entry/vdso/vma.c2
-rw-r--r--arch/x86/events/amd/uncore.c21
-rw-r--r--arch/x86/events/core.c84
-rw-r--r--arch/x86/events/intel/Makefile2
-rw-r--r--arch/x86/events/intel/bts.c4
-rw-r--r--arch/x86/events/intel/core.c271
-rw-r--r--arch/x86/events/intel/cqm.c1766
-rw-r--r--arch/x86/events/intel/cstate.c26
-rw-r--r--arch/x86/events/intel/ds.c80
-rw-r--r--arch/x86/events/intel/lbr.c56
-rw-r--r--arch/x86/events/intel/p4.c2
-rw-r--r--arch/x86/events/intel/pt.c5
-rw-r--r--arch/x86/events/intel/rapl.c2
-rw-r--r--arch/x86/events/intel/uncore.c2
-rw-r--r--arch/x86/events/intel/uncore_nhmex.c12
-rw-r--r--arch/x86/events/intel/uncore_snb.c6
-rw-r--r--arch/x86/events/intel/uncore_snbep.c93
-rw-r--r--arch/x86/events/perf_event.h12
-rw-r--r--arch/x86/hyperv/Makefile2
-rw-r--r--arch/x86/hyperv/hv_init.c90
-rw-r--r--arch/x86/hyperv/mmu.c272
-rw-r--r--arch/x86/ia32/ia32_signal.c2
-rw-r--r--arch/x86/include/asm/acpi.h13
-rw-r--r--arch/x86/include/asm/asm.h6
-rw-r--r--arch/x86/include/asm/atomic.h69
-rw-r--r--arch/x86/include/asm/atomic64_32.h81
-rw-r--r--arch/x86/include/asm/atomic64_64.h73
-rw-r--r--arch/x86/include/asm/bug.h4
-rw-r--r--arch/x86/include/asm/cmdline.h2
-rw-r--r--arch/x86/include/asm/cmpxchg.h2
-rw-r--r--arch/x86/include/asm/cpufeatures.h5
-rw-r--r--arch/x86/include/asm/desc.h248
-rw-r--r--arch/x86/include/asm/desc_defs.h122
-rw-r--r--arch/x86/include/asm/disabled-features.h4
-rw-r--r--arch/x86/include/asm/dma-mapping.h5
-rw-r--r--arch/x86/include/asm/dmi.h8
-rw-r--r--arch/x86/include/asm/e820/api.h2
-rw-r--r--arch/x86/include/asm/elf.h23
-rw-r--r--arch/x86/include/asm/entry_arch.h15
-rw-r--r--arch/x86/include/asm/fixmap.h20
-rw-r--r--arch/x86/include/asm/fpu/internal.h6
-rw-r--r--arch/x86/include/asm/futex.h40
-rw-r--r--arch/x86/include/asm/hardirq.h1
-rw-r--r--arch/x86/include/asm/hw_irq.h20
-rw-r--r--arch/x86/include/asm/hypervisor.h10
-rw-r--r--arch/x86/include/asm/init.h1
-rw-r--r--arch/x86/include/asm/intel_rdt.h286
-rw-r--r--arch/x86/include/asm/intel_rdt_common.h27
-rw-r--r--arch/x86/include/asm/intel_rdt_sched.h92
-rw-r--r--arch/x86/include/asm/io.h110
-rw-r--r--arch/x86/include/asm/irq.h4
-rw-r--r--arch/x86/include/asm/irq_vectors.h3
-rw-r--r--arch/x86/include/asm/irq_work.h8
-rw-r--r--arch/x86/include/asm/kexec.h11
-rw-r--r--arch/x86/include/asm/kprobes.h8
-rw-r--r--arch/x86/include/asm/kvm_host.h5
-rw-r--r--arch/x86/include/asm/lguest.h91
-rw-r--r--arch/x86/include/asm/lguest_hcall.h74
-rw-r--r--arch/x86/include/asm/mem_encrypt.h80
-rw-r--r--arch/x86/include/asm/mmu.h25
-rw-r--r--arch/x86/include/asm/mmu_context.h21
-rw-r--r--arch/x86/include/asm/module.h9
-rw-r--r--arch/x86/include/asm/mpx.h9
-rw-r--r--arch/x86/include/asm/mshyperv.h149
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/include/asm/orc_lookup.h46
-rw-r--r--arch/x86/include/asm/orc_types.h107
-rw-r--r--arch/x86/include/asm/page_64.h4
-rw-r--r--arch/x86/include/asm/page_types.h3
-rw-r--r--arch/x86/include/asm/paravirt.h5
-rw-r--r--arch/x86/include/asm/paravirt_types.h19
-rw-r--r--arch/x86/include/asm/pgtable.h28
-rw-r--r--arch/x86/include/asm/pgtable_types.h58
-rw-r--r--arch/x86/include/asm/processor-flags.h13
-rw-r--r--arch/x86/include/asm/processor.h27
-rw-r--r--arch/x86/include/asm/proto.h3
-rw-r--r--arch/x86/include/asm/ptrace.h43
-rw-r--r--arch/x86/include/asm/realmode.h12
-rw-r--r--arch/x86/include/asm/refcount.h109
-rw-r--r--arch/x86/include/asm/rmwcc.h37
-rw-r--r--arch/x86/include/asm/segment.h4
-rw-r--r--arch/x86/include/asm/set_memory.h3
-rw-r--r--arch/x86/include/asm/setup.h1
-rw-r--r--arch/x86/include/asm/thread_info.h5
-rw-r--r--arch/x86/include/asm/tlb.h14
-rw-r--r--arch/x86/include/asm/tlbflush.h89
-rw-r--r--arch/x86/include/asm/topology.h6
-rw-r--r--arch/x86/include/asm/trace/common.h16
-rw-r--r--arch/x86/include/asm/trace/exceptions.h8
-rw-r--r--arch/x86/include/asm/trace/hyperv.h40
-rw-r--r--arch/x86/include/asm/trace/irq_vectors.h51
-rw-r--r--arch/x86/include/asm/traps.h50
-rw-r--r--arch/x86/include/asm/uaccess.h7
-rw-r--r--arch/x86/include/asm/unwind.h76
-rw-r--r--arch/x86/include/asm/unwind_hints.h105
-rw-r--r--arch/x86/include/asm/vga.h14
-rw-r--r--arch/x86/include/asm/xen/hypercall.h6
-rw-r--r--arch/x86/include/asm/xen/page.h5
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h2
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h17
-rw-r--r--arch/x86/include/uapi/asm/mman.h3
-rw-r--r--arch/x86/kernel/Makefile11
-rw-r--r--arch/x86/kernel/acpi/boot.c25
-rw-r--r--arch/x86/kernel/alternative.c22
-rw-r--r--arch/x86/kernel/apic/apic.c76
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/apic/vector.c2
-rw-r--r--arch/x86/kernel/asm-offsets_32.c20
-rw-r--r--arch/x86/kernel/asm-offsets_64.c1
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/amd.c56
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c43
-rw-r--r--arch/x86/kernel/cpu/bugs.c8
-rw-r--r--arch/x86/kernel/cpu/common.c64
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c32
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c375
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h440
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c (renamed from arch/x86/kernel/cpu/intel_rdt_schemata.c)67
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_monitor.c499
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c1117
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c43
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c25
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c22
-rw-r--r--arch/x86/kernel/cpu/mcheck/threshold.c16
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c4
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c27
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c22
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c18
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--arch/x86/kernel/devicetree.c3
-rw-r--r--arch/x86/kernel/dumpstack.c14
-rw-r--r--arch/x86/kernel/dumpstack_32.c4
-rw-r--r--arch/x86/kernel/dumpstack_64.c4
-rw-r--r--arch/x86/kernel/e820.c26
-rw-r--r--arch/x86/kernel/early-quirks.c5
-rw-r--r--arch/x86/kernel/eisa.c19
-rw-r--r--arch/x86/kernel/espfix_64.c2
-rw-r--r--arch/x86/kernel/head32.c4
-rw-r--r--arch/x86/kernel/head64.c106
-rw-r--r--arch/x86/kernel/head_32.S66
-rw-r--r--arch/x86/kernel/head_64.S40
-rw-r--r--arch/x86/kernel/hpet.c27
-rw-r--r--arch/x86/kernel/idt.c371
-rw-r--r--arch/x86/kernel/irq.c45
-rw-r--r--arch/x86/kernel/irq_work.c20
-rw-r--r--arch/x86/kernel/irqinit.c100
-rw-r--r--arch/x86/kernel/kdebugfs.c34
-rw-r--r--arch/x86/kernel/kprobes/core.c10
-rw-r--r--arch/x86/kernel/kprobes/opt.c9
-rw-r--r--arch/x86/kernel/ksysfs.c32
-rw-r--r--arch/x86/kernel/kvm.c10
-rw-r--r--arch/x86/kernel/ldt.c21
-rw-r--r--arch/x86/kernel/machine_kexec_32.c14
-rw-r--r--arch/x86/kernel/machine_kexec_64.c25
-rw-r--r--arch/x86/kernel/module.c11
-rw-r--r--arch/x86/kernel/mpparse.c108
-rw-r--r--arch/x86/kernel/nmi.c18
-rw-r--r--arch/x86/kernel/paravirt.c3
-rw-r--r--arch/x86/kernel/pci-dma.c11
-rw-r--r--arch/x86/kernel/pci-nommu.c2
-rw-r--r--arch/x86/kernel/pci-swiotlb.c15
-rw-r--r--arch/x86/kernel/platform-quirks.c1
-rw-r--r--arch/x86/kernel/process.c17
-rw-r--r--arch/x86/kernel/process_32.c4
-rw-r--r--arch/x86/kernel/process_64.c244
-rw-r--r--arch/x86/kernel/quirks.c10
-rw-r--r--arch/x86/kernel/reboot.c10
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S14
-rw-r--r--arch/x86/kernel/setup.c18
-rw-r--r--arch/x86/kernel/setup_percpu.c9
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/kernel/smp.c81
-rw-r--r--arch/x86/kernel/smpboot.c30
-rw-r--r--arch/x86/kernel/step.c2
-rw-r--r--arch/x86/kernel/sys_x86_64.c30
-rw-r--r--arch/x86/kernel/tls.c2
-rw-r--r--arch/x86/kernel/tracepoint.c57
-rw-r--r--arch/x86/kernel/traps.c107
-rw-r--r--arch/x86/kernel/unwind_frame.c41
-rw-r--r--arch/x86/kernel/unwind_guess.c5
-rw-r--r--arch/x86/kernel/unwind_orc.c582
-rw-r--r--arch/x86/kernel/vmlinux.lds.S3
-rw-r--r--arch/x86/kvm/Kconfig3
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/hyperv.c7
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h5
-rw-r--r--arch/x86/kvm/lapic.c17
-rw-r--r--arch/x86/kvm/mmu.c41
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/svm.c54
-rw-r--r--arch/x86/kvm/vmx.c335
-rw-r--r--arch/x86/kvm/x86.c55
-rw-r--r--arch/x86/lguest/Kconfig14
-rw-r--r--arch/x86/lguest/Makefile2
-rw-r--r--arch/x86/lguest/boot.c1558
-rw-r--r--arch/x86/lguest/head_32.S192
-rw-r--r--arch/x86/lib/cmdline.c105
-rw-r--r--arch/x86/math-emu/Makefile4
-rw-r--r--arch/x86/math-emu/div_Xsig.S1
-rw-r--r--arch/x86/math-emu/div_small.S2
-rw-r--r--arch/x86/math-emu/fpu_emu.h2
-rw-r--r--arch/x86/math-emu/fpu_entry.c11
-rw-r--r--arch/x86/math-emu/fpu_system.h48
-rw-r--r--arch/x86/math-emu/get_address.c17
-rw-r--r--arch/x86/math-emu/mul_Xsig.S4
-rw-r--r--arch/x86/math-emu/polynom_Xsig.S1
-rw-r--r--arch/x86/math-emu/reg_compare.c16
-rw-r--r--arch/x86/math-emu/reg_norm.S2
-rw-r--r--arch/x86/math-emu/reg_round.S2
-rw-r--r--arch/x86/math-emu/reg_u_add.S1
-rw-r--r--arch/x86/math-emu/reg_u_div.S2
-rw-r--r--arch/x86/math-emu/reg_u_mul.S1
-rw-r--r--arch/x86/math-emu/reg_u_sub.S1
-rw-r--r--arch/x86/math-emu/round_Xsig.S4
-rw-r--r--arch/x86/math-emu/shr_Xsig.S1
-rw-r--r--arch/x86/math-emu/wm_shrx.S2
-rw-r--r--arch/x86/math-emu/wm_sqrt.S1
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/dump_pagetables.c93
-rw-r--r--arch/x86/mm/extable.c44
-rw-r--r--arch/x86/mm/fault.c75
-rw-r--r--arch/x86/mm/hugetlbpage.c27
-rw-r--r--arch/x86/mm/ident_map.c12
-rw-r--r--arch/x86/mm/init.c5
-rw-r--r--arch/x86/mm/ioremap.c287
-rw-r--r--arch/x86/mm/kasan_init_64.c6
-rw-r--r--arch/x86/mm/mem_encrypt.c593
-rw-r--r--arch/x86/mm/mem_encrypt_boot.S149
-rw-r--r--arch/x86/mm/mmap.c19
-rw-r--r--arch/x86/mm/mpx.c33
-rw-r--r--arch/x86/mm/numa_emulation.c55
-rw-r--r--arch/x86/mm/pageattr.c67
-rw-r--r--arch/x86/mm/pat.c9
-rw-r--r--arch/x86/mm/pgtable.c8
-rw-r--r--arch/x86/mm/tlb.c365
-rw-r--r--arch/x86/net/bpf_jit_comp.c35
-rw-r--r--arch/x86/pci/common.c4
-rw-r--r--arch/x86/pci/intel_mid_pci.c12
-rw-r--r--arch/x86/platform/efi/efi.c19
-rw-r--r--arch/x86/platform/efi/efi_64.c15
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_bt.c2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_max7315.c6
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c6
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c2
-rw-r--r--arch/x86/platform/intel-mid/pwr.c4
-rw-r--r--arch/x86/platform/uv/tlb_uv.c33
-rw-r--r--arch/x86/power/cpu.c1
-rw-r--r--arch/x86/realmode/init.c12
-rw-r--r--arch/x86/realmode/rm/trampoline_64.S24
-rw-r--r--arch/x86/um/user-offsets.c2
-rw-r--r--arch/x86/xen/Kconfig5
-rw-r--r--arch/x86/xen/enlighten_hvm.c59
-rw-r--r--arch/x86/xen/enlighten_pv.c176
-rw-r--r--arch/x86/xen/irq.c3
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/mmu_pv.c25
-rw-r--r--arch/x86/xen/p2m.c25
-rw-r--r--arch/x86/xen/setup.c5
-rw-r--r--arch/x86/xen/smp_pv.c3
-rw-r--r--arch/x86/xen/time.c1
-rw-r--r--arch/x86/xen/xen-asm.S26
-rw-r--r--arch/x86/xen/xen-asm.h12
-rw-r--r--arch/x86/xen/xen-asm_32.S27
-rw-r--r--arch/x86/xen/xen-asm_64.S102
-rw-r--r--arch/x86/xen/xen-head.S2
-rw-r--r--arch/x86/xen/xen-ops.h16
-rw-r--r--arch/xtensa/include/asm/Kbuild2
-rw-r--r--arch/xtensa/include/asm/device.h15
-rw-r--r--arch/xtensa/include/asm/futex.h27
-rw-r--r--arch/xtensa/include/asm/param.h18
-rw-r--r--arch/xtensa/include/asm/spinlock.h5
-rw-r--r--arch/xtensa/include/uapi/asm/ioctls.h2
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h14
-rw-r--r--arch/xtensa/include/uapi/asm/socket.h2
-rw-r--r--arch/xtensa/kernel/setup.c6
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c2
-rw-r--r--arch/xtensa/mm/cache.c16
-rw-r--r--block/Kconfig5
-rw-r--r--block/Makefile1
-rw-r--r--block/bfq-iosched.c185
-rw-r--r--block/bfq-iosched.h49
-rw-r--r--block/bfq-wf2q.c148
-rw-r--r--block/bio-integrity.c30
-rw-r--r--block/bio.c32
-rw-r--r--block/blk-cgroup.c8
-rw-r--r--block/blk-core.c157
-rw-r--r--block/blk-flush.c26
-rw-r--r--block/blk-lib.c8
-rw-r--r--block/blk-merge.c6
-rw-r--r--block/blk-mq-cpumap.c4
-rw-r--r--block/blk-mq-debugfs.c7
-rw-r--r--block/blk-mq-pci.c8
-rw-r--r--block/blk-mq-rdma.c52
-rw-r--r--block/blk-mq-tag.c23
-rw-r--r--block/blk-mq.c70
-rw-r--r--block/blk-mq.h3
-rw-r--r--block/blk-settings.c1
-rw-r--r--block/blk-softirq.c2
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk-tag.c1
-rw-r--r--block/blk-throttle.c31
-rw-r--r--block/blk-zoned.c4
-rw-r--r--block/blk.h3
-rw-r--r--block/bsg-lib.c74
-rw-r--r--block/bsg.c7
-rw-r--r--block/cfq-iosched.c31
-rw-r--r--block/compat_ioctl.c2
-rw-r--r--block/deadline-iosched.c9
-rw-r--r--block/elevator.c4
-rw-r--r--block/genhd.c109
-rw-r--r--block/mq-deadline.c10
-rw-r--r--block/partition-generic.c23
-rw-r--r--crypto/Kconfig2
-rw-r--r--crypto/af_alg.c691
-rw-r--r--crypto/ahash.c29
-rw-r--r--crypto/algapi.c25
-rw-r--r--crypto/algif_aead.c864
-rw-r--r--crypto/algif_skcipher.c829
-rw-r--r--crypto/authencesn.c5
-rw-r--r--crypto/chacha20_generic.c9
-rw-r--r--crypto/ctr.c3
-rw-r--r--crypto/ecdh.c51
-rw-r--r--crypto/pcbc.c12
-rw-r--r--crypto/rng.c6
-rw-r--r--crypto/scompress.c55
-rw-r--r--crypto/serpent_generic.c77
-rw-r--r--crypto/tcrypt.c8
-rw-r--r--crypto/testmgr.h7
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_apd.c4
-rw-r--r--drivers/acpi/acpi_lpat.c6
-rw-r--r--drivers/acpi/acpi_lpss.c17
-rw-r--r--drivers/acpi/acpi_processor.c2
-rw-r--r--drivers/acpi/acpi_watchdog.c7
-rw-r--r--drivers/acpi/acpica/Makefile1
-rw-r--r--drivers/acpi/acpica/acapps.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h13
-rw-r--r--drivers/acpi/acpica/aclocal.h7
-rw-r--r--drivers/acpi/acpica/acobject.h15
-rw-r--r--drivers/acpi/acpica/actables.h5
-rw-r--r--drivers/acpi/acpica/acutils.h9
-rw-r--r--drivers/acpi/acpica/dbdisply.c37
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c398
-rw-r--r--drivers/acpi/acpica/dsopcode.c9
-rw-r--r--drivers/acpi/acpica/dspkginit.c496
-rw-r--r--drivers/acpi/acpica/evgpeblk.c30
-rw-r--r--drivers/acpi/acpica/evxfgpe.c8
-rw-r--r--drivers/acpi/acpica/excreate.c62
-rw-r--r--drivers/acpi/acpica/exdump.c34
-rw-r--r--drivers/acpi/acpica/exmisc.c9
-rw-r--r--drivers/acpi/acpica/exoparg2.c3
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c9
-rw-r--r--drivers/acpi/acpica/nsaccess.c28
-rw-r--r--drivers/acpi/acpica/nsarguments.c21
-rw-r--r--drivers/acpi/acpica/nsinit.c14
-rw-r--r--drivers/acpi/acpica/nsnames.c9
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c10
-rw-r--r--drivers/acpi/acpica/psloop.c14
-rw-r--r--drivers/acpi/acpica/psobject.c26
-rw-r--r--drivers/acpi/acpica/rsxface.c7
-rw-r--r--drivers/acpi/acpica/tbdata.c230
-rw-r--r--drivers/acpi/acpica/tbinstal.c161
-rw-r--r--drivers/acpi/acpica/tbxface.c39
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/uthex.c4
-rw-r--r--drivers/acpi/acpica/utmath.c222
-rw-r--r--drivers/acpi/acpica/utmisc.c10
-rw-r--r--drivers/acpi/acpica/utobject.c5
-rw-r--r--drivers/acpi/acpica/utprint.c8
-rw-r--r--drivers/acpi/acpica/utresrc.c7
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utstrtoul64.c9
-rw-r--r--drivers/acpi/acpica/uttrack.c9
-rw-r--r--drivers/acpi/apei/apei-internal.h5
-rw-r--r--drivers/acpi/apei/einj.c2
-rw-r--r--drivers/acpi/apei/ghes.c10
-rw-r--r--drivers/acpi/apei/hest.c13
-rw-r--r--drivers/acpi/arm64/iort.c197
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/blacklist.c83
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/device_pm.c175
-rw-r--r--drivers/acpi/dock.c2
-rw-r--r--drivers/acpi/ec.c102
-rw-r--r--drivers/acpi/internal.h11
-rw-r--r--drivers/acpi/nfit/core.c12
-rw-r--r--drivers/acpi/numa.c2
-rw-r--r--drivers/acpi/osi.c37
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c21
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/processor_idle.c25
-rw-r--r--drivers/acpi/property.c239
-rw-r--r--drivers/acpi/resource.c82
-rw-r--r--drivers/acpi/sbs.c27
-rw-r--r--drivers/acpi/scan.c108
-rw-r--r--drivers/acpi/sleep.c210
-rw-r--r--drivers/acpi/spcr.c72
-rw-r--r--drivers/acpi/sysfs.c91
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/utils.c36
-rw-r--r--drivers/acpi/video_detect.c14
-rw-r--r--drivers/acpi/x86/apple.c141
-rw-r--r--drivers/android/Kconfig14
-rw-r--r--drivers/android/Makefile3
-rw-r--r--drivers/android/binder.c3767
-rw-r--r--drivers/android/binder_alloc.c1009
-rw-r--r--drivers/android/binder_alloc.h187
-rw-r--r--drivers/android/binder_alloc_selftest.c310
-rw-r--r--drivers/android/binder_trace.h96
-rw-r--r--drivers/ata/Kconfig14
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c9
-rw-r--r--drivers/ata/ahci_da850.c8
-rw-r--r--drivers/ata/ahci_mtk.c196
-rw-r--r--drivers/ata/ahci_platform.c1
-rw-r--r--drivers/ata/libahci_platform.c34
-rw-r--r--drivers/ata/libata-core.c9
-rw-r--r--drivers/ata/libata-eh.c15
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/libata-zpodd.c4
-rw-r--r--drivers/ata/pata_amd.c1
-rw-r--r--drivers/ata/pata_cs5536.c1
-rw-r--r--drivers/ata/pata_octeon_cf.c10
-rw-r--r--drivers/ata/sata_gemini.c67
-rw-r--r--drivers/ata/sata_rcar.c8
-rw-r--r--drivers/ata/sata_svw.c2
-rw-r--r--drivers/atm/adummy.c4
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/atmtcp.c2
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/he.c4
-rw-r--r--drivers/atm/horizon.c2
-rw-r--r--drivers/atm/idt77252.c4
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/atm/nicstar.c4
-rw-r--r--drivers/atm/solos-pci.c8
-rw-r--r--drivers/atm/zatm.c4
-rw-r--r--drivers/auxdisplay/panel.c6
-rw-r--r--drivers/base/arch_topology.c86
-rw-r--r--drivers/base/base.h5
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/core.c141
-rw-r--r--drivers/base/dd.c32
-rw-r--r--drivers/base/dma-coherent.c164
-rw-r--r--drivers/base/dma-mapping.c2
-rw-r--r--drivers/base/firmware_class.c62
-rw-r--r--drivers/base/memory.c30
-rw-r--r--drivers/base/power/domain.c259
-rw-r--r--drivers/base/power/main.c103
-rw-r--r--drivers/base/power/opp/of.c37
-rw-r--r--drivers/base/power/wakeup.c10
-rw-r--r--drivers/base/property.c131
-rw-r--r--drivers/base/regmap/regmap-w1.c4
-rw-r--r--drivers/base/topology.c2
-rw-r--r--drivers/bcma/Kconfig9
-rw-r--r--drivers/bcma/driver_gpio.c1
-rw-r--r--drivers/block/DAC960.c12
-rw-r--r--drivers/block/Kconfig3
-rw-r--r--drivers/block/brd.c11
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_int.h31
-rw-r--r--drivers/block/drbd/drbd_main.c113
-rw-r--r--drivers/block/drbd/drbd_nl.c60
-rw-r--r--drivers/block/drbd/drbd_proc.c10
-rw-r--r--drivers/block/drbd/drbd_receiver.c60
-rw-r--r--drivers/block/drbd/drbd_req.c86
-rw-r--r--drivers/block/drbd/drbd_req.h6
-rw-r--r--drivers/block/drbd/drbd_state.c48
-rw-r--r--drivers/block/drbd/drbd_state.h8
-rw-r--r--drivers/block/drbd/drbd_worker.c48
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c56
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/nbd.c35
-rw-r--r--drivers/block/null_blk.c1311
-rw-r--r--drivers/block/pktcdvd.c11
-rw-r--r--drivers/block/ps3vram.c10
-rw-r--r--drivers/block/rsxx/dev.c6
-rw-r--r--drivers/block/skd_main.c3164
-rw-r--r--drivers/block/skd_s1120.h38
-rw-r--r--drivers/block/sunvdc.c61
-rw-r--r--drivers/block/virtio_blk.c25
-rw-r--r--drivers/block/xen-blkback/blkback.c9
-rw-r--r--drivers/block/xen-blkback/xenbus.c13
-rw-r--r--drivers/block/xen-blkfront.c33
-rw-r--r--drivers/block/zram/Kconfig12
-rw-r--r--drivers/block/zram/zram_drv.c549
-rw-r--r--drivers/block/zram/zram_drv.h11
-rw-r--r--drivers/bluetooth/Kconfig2
-rw-r--r--drivers/bluetooth/ath3k.c3
-rw-r--r--drivers/bluetooth/bluecard_cs.c58
-rw-r--r--drivers/bluetooth/bt3c_cs.c8
-rw-r--r--drivers/bluetooth/btbcm.c69
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c6
-rw-r--r--drivers/bluetooth/btqca.c2
-rw-r--r--drivers/bluetooth/btrtl.c2
-rw-r--r--drivers/bluetooth/btsdio.c3
-rw-r--r--drivers/bluetooth/btuart_cs.c8
-rw-r--r--drivers/bluetooth/btusb.c68
-rw-r--r--drivers/bluetooth/btwilink.c8
-rw-r--r--drivers/bluetooth/hci_bcm.c133
-rw-r--r--drivers/bluetooth/hci_h4.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c3
-rw-r--r--drivers/bluetooth/hci_ll.c11
-rw-r--r--drivers/bluetooth/hci_nokia.c10
-rw-r--r--drivers/bluetooth/hci_serdev.c13
-rw-r--r--drivers/bluetooth/hci_uart.h1
-rw-r--r--drivers/bus/uniphier-system-bus.c14
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/agp/ali-agp.c2
-rw-r--r--drivers/char/agp/amd-k7-agp.c4
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/agp/ati-agp.c2
-rw-r--r--drivers/char/agp/efficeon-agp.c2
-rw-r--r--drivers/char/agp/intel-agp.c2
-rw-r--r--drivers/char/agp/nvidia-agp.c2
-rw-r--r--drivers/char/agp/sis-agp.c2
-rw-r--r--drivers/char/agp/uninorth-agp.c2
-rw-r--r--drivers/char/applicom.c2
-rw-r--r--drivers/char/hw_random/Kconfig20
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/core.c42
-rw-r--r--drivers/char/hw_random/imx-rngc.c331
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c12
-rw-r--r--drivers/char/mwave/smapi.c48
-rw-r--r--drivers/char/ppdev.c3
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/char/tlclk.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c11
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c39
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.h13
-rw-r--r--drivers/clk/clk-gemini.c14
-rw-r--r--drivers/clk/keystone/sci-clk.c66
-rw-r--r--drivers/clk/meson/clk-mpll.c7
-rw-r--r--drivers/clk/meson/clkc.h1
-rw-r--r--drivers/clk/meson/gxbb.c5
-rw-r--r--drivers/clk/meson/meson8b.c5
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c16
-rw-r--r--drivers/clk/sunxi-ng/Makefile1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu_mmc_timing.c70
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c80
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.h30
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.c12
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c7
-rw-r--r--drivers/clocksource/Kconfig10
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c8
-rw-r--r--drivers/clocksource/bcm2835_timer.c1
-rw-r--r--drivers/clocksource/em_sti.c11
-rw-r--r--drivers/clocksource/tango_xtal.c6
-rw-r--r--drivers/clocksource/timer-imx-tpm.c239
-rw-r--r--drivers/clocksource/timer-of.c27
-rw-r--r--drivers/clocksource/timer-probe.c3
-rw-r--r--drivers/clocksource/timer-stm32.c8
-rw-r--r--drivers/cpufreq/Kconfig.arm21
-rw-r--r--drivers/cpufreq/Makefile4
-rw-r--r--drivers/cpufreq/arm_big_little.c10
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c65
-rw-r--r--drivers/cpufreq/cpufreq-dt.c1
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c2
-rw-r--r--drivers/cpufreq/cpufreq.c41
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c6
-rw-r--r--drivers/cpufreq/cpufreq_governor.c20
-rw-r--r--drivers/cpufreq/cpufreq_governor.h3
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c12
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c103
-rw-r--r--drivers/cpufreq/elanfreq.c4
-rw-r--r--drivers/cpufreq/gx-suspmod.c2
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c9
-rw-r--r--drivers/cpufreq/intel_pstate.c414
-rw-r--r--drivers/cpufreq/longrun.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c (renamed from drivers/cpufreq/mt8173-cpufreq.c)29
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c7
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c2
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c3
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c5
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c5
-rw-r--r--drivers/cpufreq/sh-cpufreq.c3
-rw-r--r--drivers/cpufreq/speedstep-ich.c2
-rw-r--r--drivers/cpufreq/speedstep-lib.c4
-rw-r--r--drivers/cpufreq/speedstep-smi.c2
-rw-r--r--drivers/cpufreq/sti-cpufreq.c8
-rw-r--r--drivers/cpufreq/tango-cpufreq.c38
-rw-r--r--drivers/cpufreq/ti-cpufreq.c4
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c3
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/coupled.c10
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c10
-rw-r--r--drivers/cpuidle/cpuidle.c18
-rw-r--r--drivers/cpuidle/driver.c32
-rw-r--r--drivers/cpuidle/dt_idle_states.c24
-rw-r--r--drivers/cpuidle/governors/ladder.c14
-rw-r--r--drivers/cpuidle/governors/menu.c13
-rw-r--r--drivers/cpuidle/poll_state.c37
-rw-r--r--drivers/crypto/Kconfig51
-rw-r--r--drivers/crypto/Makefile4
-rw-r--r--drivers/crypto/atmel-ecc.c781
-rw-r--r--drivers/crypto/atmel-ecc.h128
-rw-r--r--drivers/crypto/atmel-sha.c2
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/axis/Makefile1
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c3192
-rw-r--r--drivers/crypto/bcm/cipher.c114
-rw-r--r--drivers/crypto/bcm/cipher.h13
-rw-r--r--drivers/crypto/bcm/spu2.c1
-rw-r--r--drivers/crypto/caam/caamalg.c66
-rw-r--r--drivers/crypto/caam/caamalg_desc.c5
-rw-r--r--drivers/crypto/caam/caamalg_qi.c55
-rw-r--r--drivers/crypto/caam/caamhash.c7
-rw-r--r--drivers/crypto/caam/caamrng.c6
-rw-r--r--drivers/crypto/caam/ctrl.c127
-rw-r--r--drivers/crypto/caam/ctrl.h2
-rw-r--r--drivers/crypto/caam/error.c40
-rw-r--r--drivers/crypto/caam/error.h4
-rw-r--r--drivers/crypto/caam/intern.h11
-rw-r--r--drivers/crypto/caam/jr.c7
-rw-r--r--drivers/crypto/caam/qi.c30
-rw-r--r--drivers/crypto/caam/qi.h3
-rw-r--r--drivers/crypto/caam/regs.h1
-rw-r--r--drivers/crypto/caam/sg_sw_qm2.h81
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h43
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c13
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c7
-rw-r--r--drivers/crypto/ccp/Kconfig22
-rw-r--r--drivers/crypto/ccp/Makefile7
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c96
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c21
-rw-r--r--drivers/crypto/ccp/ccp-crypto-rsa.c299
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h36
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c15
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c20
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c28
-rw-r--r--drivers/crypto/ccp/ccp-dev.c134
-rw-r--r--drivers/crypto/ccp/ccp-dev.h30
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c2
-rw-r--r--drivers/crypto/ccp/ccp-ops.c133
-rw-r--r--drivers/crypto/ccp/ccp-pci.c356
-rw-r--r--drivers/crypto/ccp/ccp-platform.c293
-rw-r--r--drivers/crypto/ccp/sp-dev.c277
-rw-r--r--drivers/crypto/ccp/sp-dev.h133
-rw-r--r--drivers/crypto/ccp/sp-pci.c276
-rw-r--r--drivers/crypto/ccp/sp-platform.c256
-rw-r--r--drivers/crypto/geode-aes.c17
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel.c10
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c8
-rw-r--r--drivers/crypto/ixp4xx_crypto.c6
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c2
-rw-r--r--drivers/crypto/mxc-scc.c4
-rw-r--r--drivers/crypto/mxs-dcp.c8
-rw-r--r--drivers/crypto/n2_core.c60
-rw-r--r--drivers/crypto/nx/Kconfig1
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c514
-rw-r--r--drivers/crypto/nx/nx-842.c2
-rw-r--r--drivers/crypto/nx/nx-842.h13
-rw-r--r--drivers/crypto/omap-aes.c1
-rw-r--r--drivers/crypto/omap-des.c3
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c74
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h15
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c103
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c119
-rw-r--r--drivers/crypto/sahara.c14
-rw-r--r--drivers/crypto/stm32/Kconfig19
-rw-r--r--drivers/crypto/stm32/Makefile4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c1575
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c17
-rw-r--r--drivers/crypto/sunxi-ss/Makefile1
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c30
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-prng.c56
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss.h11
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c109
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h22
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c37
-rw-r--r--drivers/crypto/vmx/aes_ctr.c3
-rw-r--r--drivers/dax/device-dax.h2
-rw-r--r--drivers/dax/device.c33
-rw-r--r--drivers/dax/pmem.c12
-rw-r--r--drivers/dax/super.c6
-rw-r--r--drivers/devfreq/Kconfig1
-rw-r--r--drivers/devfreq/devfreq-event.c4
-rw-r--r--drivers/devfreq/devfreq.c5
-rw-r--r--drivers/devfreq/governor.h4
-rw-r--r--drivers/dma-buf/dma-fence.c21
-rw-r--r--drivers/dma-buf/reservation.c99
-rw-r--r--drivers/dma-buf/sw_sync.c201
-rw-r--r--drivers/dma-buf/sync_debug.c21
-rw-r--r--drivers/dma-buf/sync_debug.h26
-rw-r--r--drivers/dma-buf/sync_file.c13
-rw-r--r--drivers/dma/tegra210-adma.c4
-rw-r--r--drivers/edac/altera_edac.c6
-rw-r--r--drivers/edac/amd64_edac.c1
-rw-r--r--drivers/edac/amd76x_edac.c2
-rw-r--r--drivers/edac/cpc925_edac.c3
-rw-r--r--drivers/edac/e752x_edac.c2
-rw-r--r--drivers/edac/e7xxx_edac.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c14
-rw-r--r--drivers/edac/ghes_edac.c3
-rw-r--r--drivers/edac/highbank_mc_edac.c1
-rw-r--r--drivers/edac/i3000_edac.c3
-rw-r--r--drivers/edac/i3200_edac.c3
-rw-r--r--drivers/edac/i5000_edac.c1
-rw-r--r--drivers/edac/i5100_edac.c1
-rw-r--r--drivers/edac/i5400_edac.c1
-rw-r--r--drivers/edac/i7300_edac.c1
-rw-r--r--drivers/edac/i7core_edac.c9
-rw-r--r--drivers/edac/i82443bxgx_edac.c3
-rw-r--r--drivers/edac/i82860_edac.c2
-rw-r--r--drivers/edac/i82875p_edac.c2
-rw-r--r--drivers/edac/i82975x_edac.c2
-rw-r--r--drivers/edac/ie31200_edac.c2
-rw-r--r--drivers/edac/mce_amd.c36
-rw-r--r--drivers/edac/mv64x60_edac.c1
-rw-r--r--drivers/edac/pnd2_edac.c106
-rw-r--r--drivers/edac/ppc4xx_edac.c9
-rw-r--r--drivers/edac/r82600_edac.c2
-rw-r--r--drivers/edac/sb_edac.c64
-rw-r--r--drivers/edac/skx_edac.c3
-rw-r--r--drivers/edac/synopsys_edac.c1
-rw-r--r--drivers/edac/thunderx_edac.c6
-rw-r--r--drivers/edac/x38_edac.c3
-rw-r--r--drivers/edac/xgene_edac.c1
-rw-r--r--drivers/extcon/Kconfig7
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/devres.c50
-rw-r--r--drivers/extcon/extcon-intel-int3496.c2
-rw-r--r--drivers/extcon/extcon-max77693.c5
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c417
-rw-r--r--drivers/extcon/extcon.c279
-rw-r--r--drivers/firmware/dcdbas.c2
-rw-r--r--drivers/firmware/dmi-sysfs.c5
-rw-r--r--drivers/firmware/efi/Kconfig10
-rw-r--r--drivers/firmware/efi/apple-properties.c5
-rw-r--r--drivers/firmware/efi/arm-init.c8
-rw-r--r--drivers/firmware/efi/cper.c12
-rw-r--r--drivers/firmware/efi/efi-bgrt.c22
-rw-r--r--drivers/firmware/efi/efi.c75
-rw-r--r--drivers/firmware/efi/esrt.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile3
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c3
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c16
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c4
-rw-r--r--drivers/firmware/efi/libstub/random.c10
-rw-r--r--drivers/firmware/efi/libstub/tpm.c58
-rw-r--r--drivers/firmware/efi/reboot.c12
-rw-r--r--drivers/firmware/google/vpd.c10
-rw-r--r--drivers/firmware/pcdp.c4
-rw-r--r--drivers/fmc/Makefile1
-rw-r--r--drivers/fmc/fmc-chardev.c3
-rw-r--r--drivers/fmc/fmc-core.c95
-rw-r--r--drivers/fmc/fmc-debug.c173
-rw-r--r--drivers/fmc/fmc-dump.c41
-rw-r--r--drivers/fmc/fmc-match.c2
-rw-r--r--drivers/fmc/fmc-private.h9
-rw-r--r--drivers/fmc/fmc-sdb.c119
-rw-r--r--drivers/fmc/fmc-trivial.c20
-rw-r--r--drivers/fmc/fmc-write-eeprom.c8
-rw-r--r--drivers/fmc/fru-parse.c3
-rw-r--r--drivers/fpga/Kconfig20
-rw-r--r--drivers/fpga/Makefile2
-rw-r--r--drivers/fpga/altera-cvp.c500
-rw-r--r--drivers/fpga/altera-hps2fpga.c12
-rw-r--r--drivers/fpga/altera-ps-spi.c308
-rw-r--r--drivers/fpga/fpga-region.c4
-rw-r--r--drivers/fsi/fsi-core.c11
-rw-r--r--drivers/fsi/fsi-scom.c10
-rw-r--r--drivers/gpio/Kconfig27
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/devres.c3
-rw-r--r--drivers/gpio/gpio-74x164.c10
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c2
-rw-r--r--drivers/gpio/gpio-altera.c4
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-brcmstb.c11
-rw-r--r--drivers/gpio/gpio-davinci.c22
-rw-r--r--drivers/gpio/gpio-exar.c2
-rw-r--r--drivers/gpio/gpio-ge.c6
-rw-r--r--drivers/gpio/gpio-grgpio.c2
-rw-r--r--drivers/gpio/gpio-it87.c3
-rw-r--r--drivers/gpio/gpio-lp87565.c46
-rw-r--r--drivers/gpio/gpio-max77620.c2
-rw-r--r--drivers/gpio/gpio-mb86s7x.c4
-rw-r--r--drivers/gpio/gpio-ml-ioh.c12
-rw-r--r--drivers/gpio/gpio-mockup.c79
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c4
-rw-r--r--drivers/gpio/gpio-msic.c4
-rw-r--r--drivers/gpio/gpio-mvebu.c2
-rw-r--r--drivers/gpio/gpio-mxc.c30
-rw-r--r--drivers/gpio/gpio-mxs.c15
-rw-r--r--drivers/gpio/gpio-omap.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c8
-rw-r--r--drivers/gpio/gpio-pch.c12
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-pxa.c8
-rw-r--r--drivers/gpio/gpio-rcar.c10
-rw-r--r--drivers/gpio/gpio-sta2x11.c14
-rw-r--r--drivers/gpio/gpio-tb10x.c3
-rw-r--r--drivers/gpio/gpio-tegra.c135
-rw-r--r--drivers/gpio/gpio-thunderx.c639
-rw-r--r--drivers/gpio/gpio-tps68470.c176
-rw-r--r--drivers/gpio/gpio-twl4030.c2
-rw-r--r--drivers/gpio/gpio-twl6040.c2
-rw-r--r--drivers/gpio/gpio-tz1090.c10
-rw-r--r--drivers/gpio/gpio-vf610.c47
-rw-r--r--drivers/gpio/gpio-xilinx.c4
-rw-r--r--drivers/gpio/gpio-zevio.c2
-rw-r--r--drivers/gpio/gpio-zynq.c160
-rw-r--r--drivers/gpio/gpiolib-acpi.c4
-rw-r--r--drivers/gpio/gpiolib-of.c36
-rw-r--r--drivers/gpio/gpiolib-sysfs.c18
-rw-r--r--drivers/gpio/gpiolib.c139
-rw-r--r--drivers/gpio/gpiolib.h2
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h223
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c227
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c251
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c234
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c488
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c504
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c168
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c243
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c172
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c176
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c142
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c183
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c102
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c127
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c325
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c33
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c63
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c46
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c294
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h330
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h140
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c71
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c46
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h63
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h6
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h33
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c113
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c25
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c241
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c314
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c1291
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c88
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_debug.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu9.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c184
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c34
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c30
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h9
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c43
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c61
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c11
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c4
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c10
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c4
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c3
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c20
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h2
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c4
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c3
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c3
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c25
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c6
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h4
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c23
-rw-r--r--drivers/gpu/drm/ast/ast_main.c13
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c48
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c19
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c17
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c32
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h16
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c45
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c7
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c11
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c10
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c1
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c10
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c1
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c7
-rw-r--r--drivers/gpu/drm/bridge/panel.c36
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c7
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c9
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig16
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c327
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.h19
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c107
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.h46
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c981
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c9
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c7
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h8
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c10
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c71
-rw-r--r--drivers/gpu/drm/drm_atomic.c230
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c611
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c3
-rw-r--r--drivers/gpu/drm/drm_connector.c7
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c3
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h7
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c59
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c5
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c130
-rw-r--r--drivers/gpu/drm/drm_drv.c56
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c26
-rw-r--r--drivers/gpu/drm/drm_edid.c440
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c184
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c681
-rw-r--r--drivers/gpu/drm/drm_file.c9
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c55
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c39
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c283
-rw-r--r--drivers/gpu/drm/drm_internal.h20
-rw-r--r--drivers/gpu/drm/drm_ioc32.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c23
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c6
-rw-r--r--drivers/gpu/drm/drm_mode_config.c7
-rw-r--r--drivers/gpu/drm/drm_mode_object.c159
-rw-r--r--drivers/gpu/drm/drm_modes.c91
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c1
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c12
-rw-r--r--drivers/gpu/drm/drm_of.c4
-rw-r--r--drivers/gpu/drm/drm_pci.c40
-rw-r--r--drivers/gpu/drm/drm_plane.c121
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c4
-rw-r--r--drivers/gpu/drm/drm_property.c23
-rw-r--r--drivers/gpu/drm/drm_scdc_helper.c33
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c23
-rw-r--r--drivers/gpu/drm/drm_syncobj.c531
-rw-r--r--drivers/gpu/drm/drm_vblank.c187
-rw-r--r--drivers/gpu/drm/drm_vm.c6
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c45
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c8
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c124
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c43
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c232
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c43
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c30
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c68
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c30
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c58
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c1
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c22
-rw-r--r--drivers/gpu/drm/gma500/gem.c30
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c32
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c4
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c12
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c8
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c67
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c31
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c30
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c12
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c5
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c42
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c128
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h26
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h17
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c88
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c54
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c116
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c15
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c211
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c197
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h367
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c436
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c290
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h79
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c652
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c111
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h32
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c104
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c35
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c11
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c377
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.c5310
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.c2624
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.c2808
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.c2536
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.c765
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.c2924
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.c2973
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.c3413
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.c2972
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.c3026
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.h8
-rw-r--r--drivers/gpu/drm/i915/i915_params.c10
-rw-r--r--drivers/gpu/drm/i915/i915_params.h3
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c7
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c813
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h521
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c16
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c7
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h3
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c31
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h8
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c15
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c24
-rw-r--r--drivers/gpu/drm/i915/intel_color.c48
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c125
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1778
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c218
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c98
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c52
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c11
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h57
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c4
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c45
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c72
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c6
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c204
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c57
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c36
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c61
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c45
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c11
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c11
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c247
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen9.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h13
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c1028
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c81
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c161
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c22
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c343
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c36
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c32
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c3
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c10
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c1
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c1
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c5
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c65
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c17
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c32
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c27
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c11
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c10
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c7
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c1
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c1
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c62
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c232
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c64
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c108
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c64
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c11
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c38
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c27
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c66
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c63
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c59
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c34
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c37
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h12
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c45
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c58
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c58
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c85
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h2
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c7
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c62
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c71
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c104
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c81
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c190
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c824
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c88
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c329
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c406
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h49
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.c905
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.h109
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h16
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c38
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c60
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c24
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c12
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h25
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c29
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c86
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c38
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c137
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c124
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c3
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c16
-rw-r--r--drivers/gpu/drm/pl111/pl111_connector.c1
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c5
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c9
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c35
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c17
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c95
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c4
-rw-r--r--drivers/gpu/drm/radeon/vce_v2_0.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c199
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c30
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c38
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c129
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c119
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c56
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.h10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig19
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c1
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c1
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c110
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c33
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c31
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c28
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c215
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h84
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c375
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h905
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c5
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c6
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c5
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c10
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c3
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c3
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c3
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c8
-rw-r--r--drivers/gpu/drm/stm/Kconfig10
-rw-r--r--drivers/gpu/drm/stm/Makefile2
-rw-r--r--drivers/gpu/drm/stm/drv.c23
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c352
-rw-r--r--drivers/gpu/drm/stm/ltdc.c470
-rw-r--r--drivers/gpu/drm/stm/ltdc.h4
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig16
-rw-r--r--drivers/gpu/drm/sun4i/Makefile1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c19
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h32
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c159
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c220
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c11
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_layer.c2
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c5
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/Makefile2
-rw-r--r--drivers/gpu/drm/tegra/dc.c22
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c12
-rw-r--r--drivers/gpu/drm/tegra/drm.c116
-rw-r--r--drivers/gpu/drm/tegra/drm.h12
-rw-r--r--drivers/gpu/drm/tegra/dsi.c15
-rw-r--r--drivers/gpu/drm/tegra/fb.c8
-rw-r--r--drivers/gpu/drm/tegra/gem.c78
-rw-r--r--drivers/gpu/drm/tegra/gem.h2
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c15
-rw-r--r--drivers/gpu/drm/tegra/rgb.c1
-rw-r--r--drivers/gpu/drm/tegra/sor.c15
-rw-r--r--drivers/gpu/drm/tegra/trace.c2
-rw-r--r--drivers/gpu/drm/tegra/trace.h68
-rw-r--r--drivers/gpu/drm/tegra/vic.c15
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c20
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c8
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c1
-rw-r--r--drivers/gpu/drm/tinydrm/Kconfig23
-rw-r--r--drivers/gpu/drm/tinydrm/Makefile2
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c60
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c5
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c8
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c17
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c1117
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c428
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c64
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c86
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c2
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c11
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c13
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c4
-rw-r--r--drivers/gpu/drm/udl/udl_main.c2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig8
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c291
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c116
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h40
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c24
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c44
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c291
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c85
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c19
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h113
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c63
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c78
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c72
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c86
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h4
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c2
-rw-r--r--drivers/gpu/drm/via/via_drv.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c252
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c156
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c114
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c111
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c50
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c45
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c6
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c3
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c2
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc.c1
-rw-r--r--drivers/gpu/drm/zte/zx_vga.c1
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c10
-rw-r--r--drivers/gpu/host1x/bus.c19
-rw-r--r--drivers/gpu/host1x/dev.c12
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c24
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c2
-rw-r--r--drivers/gpu/host1x/job.c8
-rw-r--r--drivers/gpu/ipu-v3/Kconfig1
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c4
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/hid-asus.c218
-rw-r--r--drivers/hid/hid-core.c17
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-input.c196
-rw-r--r--drivers/hid/hid-logitech-hidpp.c5
-rw-r--r--drivers/hid/hid-multitouch.c66
-rw-r--r--drivers/hid/hid-ntrig.c2
-rw-r--r--drivers/hid/hid-ortek.c6
-rw-r--r--drivers/hid/hid-picolcd_cir.c4
-rw-r--r--drivers/hid/hid-prodikeys.c2
-rw-r--r--drivers/hid/hid-sensor-custom.c4
-rw-r--r--drivers/hid/hid-sensor-hub.c94
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c3
-rw-r--r--drivers/hid/uhid.c3
-rw-r--r--drivers/hid/usbhid/hid-core.c19
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/usbkbd.c2
-rw-r--r--drivers/hid/usbhid/usbmouse.c2
-rw-r--r--drivers/hid/wacom_sys.c63
-rw-r--r--drivers/hid/wacom_wac.c8
-rw-r--r--drivers/hv/Kconfig1
-rw-r--r--drivers/hv/channel.c145
-rw-r--r--drivers/hv/channel_mgmt.c49
-rw-r--r--drivers/hv/connection.c7
-rw-r--r--drivers/hv/hv.c9
-rw-r--r--drivers/hv/hv_balloon.c12
-rw-r--r--drivers/hv/hv_kvp.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h11
-rw-r--r--drivers/hv/ring_buffer.c169
-rw-r--r--drivers/hv/vmbus_drv.c20
-rw-r--r--drivers/hwmon/Kconfig8
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/adc128d818.c2
-rw-r--r--drivers/hwmon/ads1015.c14
-rw-r--r--drivers/hwmon/adt7475.c16
-rw-r--r--drivers/hwmon/applesmc.c13
-rw-r--r--drivers/hwmon/asc7621.c4
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c116
-rw-r--r--drivers/hwmon/da9052-hwmon.c285
-rw-r--r--drivers/hwmon/ftsteutates.c4
-rw-r--r--drivers/hwmon/hwmon.c4
-rw-r--r--drivers/hwmon/i5k_amb.c2
-rw-r--r--drivers/hwmon/it87.c214
-rw-r--r--drivers/hwmon/jc42.c19
-rw-r--r--drivers/hwmon/ltq-cputemp.c163
-rw-r--r--drivers/hwmon/nct7802.c10
-rw-r--r--drivers/hwmon/pmbus/Kconfig18
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c151
-rw-r--r--drivers/hwmon/pmbus/lm25066.c47
-rw-r--r--drivers/hwmon/pmbus/pmbus.h2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c292
-rw-r--r--drivers/hwmon/pmbus/tps53679.c113
-rw-r--r--drivers/hwmon/scpi-hwmon.c2
-rw-r--r--drivers/hwmon/stts751.c4
-rw-r--r--drivers/hwtracing/coresight/Kconfig10
-rw-r--r--drivers/hwtracing/coresight/Makefile2
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-dynamic-replicator.c (renamed from drivers/hwtracing/coresight/coresight-replicator-qcom.c)34
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c68
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c22
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c24
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h39
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c49
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c42
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c49
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c108
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h85
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c7
-rw-r--r--drivers/hwtracing/coresight/coresight.c8
-rw-r--r--drivers/hwtracing/intel_th/core.c359
-rw-r--r--drivers/hwtracing/intel_th/gth.c40
-rw-r--r--drivers/hwtracing/intel_th/gth.h5
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h104
-rw-r--r--drivers/hwtracing/intel_th/msu.c12
-rw-r--r--drivers/hwtracing/intel_th/pci.c67
-rw-r--r--drivers/hwtracing/intel_th/pti.c115
-rw-r--r--drivers/hwtracing/intel_th/pti.h8
-rw-r--r--drivers/hwtracing/stm/core.c2
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c33
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c6
-rw-r--r--drivers/i2c/busses/i2c-ismt.c6
-rw-r--r--drivers/i2c/busses/i2c-simtec.c6
-rw-r--r--drivers/i2c/i2c-core-acpi.c19
-rw-r--r--drivers/i2c/i2c-core-base.c5
-rw-r--r--drivers/i2c/i2c-core.h9
-rw-r--r--drivers/i2c/muxes/Kconfig2
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-timings.c18
-rw-r--r--drivers/ide/pmac.c18
-rw-r--r--drivers/idle/intel_idle.c190
-rw-r--r--drivers/iio/accel/bma180.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c9
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c1
-rw-r--r--drivers/iio/accel/da311.c2
-rw-r--r--drivers/iio/accel/sca3000.c6
-rw-r--r--drivers/iio/accel/st_accel.h5
-rw-r--r--drivers/iio/accel/st_accel_core.c38
-rw-r--r--drivers/iio/accel/st_accel_i2c.c8
-rw-r--r--drivers/iio/accel/st_accel_spi.c86
-rw-r--r--drivers/iio/adc/Kconfig33
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/ad7766.c6
-rw-r--r--drivers/iio/adc/aspeed_adc.c26
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c329
-rw-r--r--drivers/iio/adc/at91_adc.c2
-rw-r--r--drivers/iio/adc/axp288_adc.c42
-rw-r--r--drivers/iio/adc/dln2-adc.c722
-rw-r--r--drivers/iio/adc/ep93xx_adc.c255
-rw-r--r--drivers/iio/adc/ina2xx-adc.c38
-rw-r--r--drivers/iio/adc/ltc2471.c160
-rw-r--r--drivers/iio/adc/ltc2497.c54
-rw-r--r--drivers/iio/adc/max9611.c4
-rw-r--r--drivers/iio/adc/mcp3422.c6
-rw-r--r--drivers/iio/adc/meson_saradc.c13
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c37
-rw-r--r--drivers/iio/adc/rockchip_saradc.c8
-rw-r--r--drivers/iio/adc/stm32-adc-core.c12
-rw-r--r--drivers/iio/adc/stm32-adc.c154
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c3
-rw-r--r--drivers/iio/adc/ti-ads1015.c611
-rw-r--r--drivers/iio/adc/ti-ads7950.c42
-rw-r--r--drivers/iio/adc/twl4030-madc.c2
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c2
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c38
-rw-r--r--drivers/iio/adc/xilinx-xadc.h10
-rw-r--r--drivers/iio/chemical/Kconfig9
-rw-r--r--drivers/iio/chemical/Makefile1
-rw-r--r--drivers/iio/chemical/ccs811.c405
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c3
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c60
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c29
-rw-r--r--drivers/iio/counter/Kconfig9
-rw-r--r--drivers/iio/counter/Makefile1
-rw-r--r--drivers/iio/counter/stm32-lptimer-cnt.c383
-rw-r--r--drivers/iio/dac/stm32-dac-core.c40
-rw-r--r--drivers/iio/dac/stm32-dac.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c10
-rw-r--r--drivers/iio/gyro/st_gyro.h1
-rw-r--r--drivers/iio/gyro/st_gyro_core.c13
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c8
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c54
-rw-r--r--drivers/iio/humidity/Kconfig3
-rw-r--r--drivers/iio/humidity/hdc100x.c22
-rw-r--r--drivers/iio/humidity/hts221.h11
-rw-r--r--drivers/iio/humidity/hts221_buffer.c43
-rw-r--r--drivers/iio/humidity/hts221_core.c144
-rw-r--r--drivers/iio/humidity/htu21.c8
-rw-r--r--drivers/iio/imu/adis16400_core.c4
-rw-r--r--drivers/iio/imu/adis16480.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c17
-rw-r--r--drivers/iio/inkern.c6
-rw-r--r--drivers/iio/light/apds9300.c2
-rw-r--r--drivers/iio/light/rpr0521.c336
-rw-r--r--drivers/iio/light/tcs3472.c4
-rw-r--r--drivers/iio/light/tsl2563.c2
-rw-r--r--drivers/iio/light/tsl2583.c2
-rw-r--r--drivers/iio/magnetometer/Kconfig4
-rw-r--r--drivers/iio/magnetometer/ak8974.c133
-rw-r--r--drivers/iio/magnetometer/ak8975.c2
-rw-r--r--drivers/iio/magnetometer/st_magn.h1
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c7
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c8
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c30
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c2
-rw-r--r--drivers/iio/pressure/bmp280-core.c27
-rw-r--r--drivers/iio/pressure/bmp280.h5
-rw-r--r--drivers/iio/pressure/ms5637.c12
-rw-r--r--drivers/iio/pressure/st_pressure_core.c6
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c3
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c33
-rw-r--r--drivers/iio/pressure/zpa2326.c12
-rw-r--r--drivers/iio/proximity/Kconfig8
-rw-r--r--drivers/iio/proximity/srf08.c227
-rw-r--r--drivers/iio/temperature/tsys01.c7
-rw-r--r--drivers/iio/trigger/Kconfig11
-rw-r--r--drivers/iio/trigger/Makefile1
-rw-r--r--drivers/iio/trigger/stm32-lptimer-trigger.c118
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c160
-rw-r--r--drivers/infiniband/Kconfig9
-rw-r--r--drivers/infiniband/core/Makefile6
-rw-r--r--drivers/infiniband/core/addr.c120
-rw-r--r--drivers/infiniband/core/cache.c23
-rw-r--r--drivers/infiniband/core/cm.c224
-rw-r--r--drivers/infiniband/core/cma.c69
-rw-r--r--drivers/infiniband/core/core_priv.h27
-rw-r--r--drivers/infiniband/core/device.c149
-rw-r--r--drivers/infiniband/core/iwcm.c16
-rw-r--r--drivers/infiniband/core/iwpm_msg.c20
-rw-r--r--drivers/infiniband/core/iwpm_util.c15
-rw-r--r--drivers/infiniband/core/mad_rmpp.c2
-rw-r--r--drivers/infiniband/core/netlink.c321
-rw-r--r--drivers/infiniband/core/nldev.c325
-rw-r--r--drivers/infiniband/core/rdma_core.c179
-rw-r--r--drivers/infiniband/core/rdma_core.h42
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c13
-rw-r--r--drivers/infiniband/core/sa_query.c42
-rw-r--r--drivers/infiniband/core/sysfs.c4
-rw-r--r--drivers/infiniband/core/ucm.c2
-rw-r--r--drivers/infiniband/core/ucma.c10
-rw-r--r--drivers/infiniband/core/umem_odp.c19
-rw-r--r--drivers/infiniband/core/user_mad.c2
-rw-r--r--drivers/infiniband/core/uverbs.h3
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c319
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c364
-rw-r--r--drivers/infiniband/core/uverbs_ioctl_merge.c665
-rw-r--r--drivers/infiniband/core/uverbs_main.c42
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c48
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c281
-rw-r--r--drivers/infiniband/core/verbs.c224
-rw-r--r--drivers/infiniband/hw/bnxt_re/Kconfig1
-rw-r--r--drivers/infiniband/hw/bnxt_re/Makefile2
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h14
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c114
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h62
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c224
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c169
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c515
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h30
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c26
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c93
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h5
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/hfi1/Kconfig7
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile2
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c18
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h14
-rw-r--r--drivers/infiniband/hw/hfi1/aspm.h41
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c815
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h29
-rw-r--r--drivers/infiniband/hw/hfi1/common.h11
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c94
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c513
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.c11
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.c114
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.h190
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c437
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c73
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h554
-rw-r--r--drivers/infiniband/hw/hfi1/init.c393
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c3
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h70
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c805
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h5
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c36
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.h5
-rw-r--r--drivers/infiniband/hw/hfi1/opa_compat.h21
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c384
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c15
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c102
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c229
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h26
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c426
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c321
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c42
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h3
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c4
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c191
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h3
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ibhdrs.h456
-rw-r--r--drivers/infiniband/hw/hfi1/trace_misc.h20
-rw-r--r--drivers/infiniband/hw/hfi1/trace_mmu.h95
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h102
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h136
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c60
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c487
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c371
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h58
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c626
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h169
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c384
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h59
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c11
-rw-r--r--drivers/infiniband/hw/hfi1/vnic.h16
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c39
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_sdma.c27
-rw-r--r--drivers/infiniband/hw/hns/Kconfig2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c91
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c32
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c136
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c61
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h14
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.c9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_status.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c14
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c82
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c26
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h2
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c10
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c56
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c11
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h43
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c1108
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c25
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile2
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c20
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h4
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c421
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c9
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c464
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h86
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c159
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c172
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c33
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c14
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c7
-rw-r--r--drivers/infiniband/hw/nes/nes.c70
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c10
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qedr/main.c7
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h3
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c21
-rw-r--r--drivers/infiniband/hw/qib/qib.h8
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c29
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c92
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c23
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c149
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c66
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c32
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c43
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h18
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.c12
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c18
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c43
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c20
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h37
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c44
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c7
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h17
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c9
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c10
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c170
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c400
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_mr.h62
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_tx.h11
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c27
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c72
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c29
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c27
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c82
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c33
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c22
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c12
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c16
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c1
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c35
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c5
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h4
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/joystick/iforce/iforce-serio.c2
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c2
-rw-r--r--drivers/input/joystick/magellan.c2
-rw-r--r--drivers/input/joystick/spaceball.c2
-rw-r--r--drivers/input/joystick/spaceorb.c2
-rw-r--r--drivers/input/joystick/stinger.c2
-rw-r--r--drivers/input/joystick/twidjoy.c2
-rw-r--r--drivers/input/joystick/warrior.c2
-rw-r--r--drivers/input/joystick/xpad.c26
-rw-r--r--drivers/input/joystick/zhenhua.c2
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/gpio_keys.c18
-rw-r--r--drivers/input/keyboard/hil_kbd.c2
-rw-r--r--drivers/input/keyboard/hilkbd.c10
-rw-r--r--drivers/input/keyboard/lkkbd.c2
-rw-r--r--drivers/input/keyboard/newtonkbd.c2
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c15
-rw-r--r--drivers/input/keyboard/stowaway.c2
-rw-r--r--drivers/input/keyboard/sunkbd.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c5
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c2
-rw-r--r--drivers/input/keyboard/xtkbd.c2
-rw-r--r--drivers/input/misc/Kconfig11
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ati_remote2.c2
-rw-r--r--drivers/input/misc/axp20x-pek.c167
-rw-r--r--drivers/input/misc/dm355evm_keys.c2
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/keyspan_remote.c2
-rw-r--r--drivers/input/misc/pcspkr.c17
-rw-r--r--drivers/input/misc/powermate.c2
-rw-r--r--drivers/input/misc/rk805-pwrkey.c111
-rw-r--r--drivers/input/misc/soc_button_array.c2
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c2
-rw-r--r--drivers/input/misc/twl4030-vibra.c2
-rw-r--r--drivers/input/misc/xen-kbdfront.c5
-rw-r--r--drivers/input/misc/yealink.c2
-rw-r--r--drivers/input/mouse/alps.c41
-rw-r--r--drivers/input/mouse/alps.h8
-rw-r--r--drivers/input/mouse/appletouch.c2
-rw-r--r--drivers/input/mouse/byd.c2
-rw-r--r--drivers/input/mouse/elan_i2c.h2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c14
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c13
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c4
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/input/mouse/synaptics.c35
-rw-r--r--drivers/input/mouse/synaptics_usb.c2
-rw-r--r--drivers/input/mouse/trackpoint.c7
-rw-r--r--drivers/input/mouse/trackpoint.h3
-rw-r--r--drivers/input/mousedev.c62
-rw-r--r--drivers/input/rmi4/rmi_f01.c13
-rw-r--r--drivers/input/rmi4/rmi_f34.c2
-rw-r--r--drivers/input/serio/Kconfig11
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/ambakmi.c2
-rw-r--r--drivers/input/serio/gscps2.c10
-rw-r--r--drivers/input/serio/hp_sdc.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h4
-rw-r--r--drivers/input/serio/ps2-gpio.c453
-rw-r--r--drivers/input/serio/serio.c4
-rw-r--r--drivers/input/serio/serio_raw.c2
-rw-r--r--drivers/input/serio/xilinx_ps2.c12
-rw-r--r--drivers/input/tablet/acecad.c2
-rw-r--r--drivers/input/tablet/aiptek.c2
-rw-r--r--drivers/input/tablet/kbtab.c2
-rw-r--r--drivers/input/tablet/wacom_serial4.c2
-rw-r--r--drivers/input/touchscreen/ads7846.c4
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c46
-rw-r--r--drivers/input/touchscreen/dynapro.c2
-rw-r--r--drivers/input/touchscreen/elants_i2c.c2
-rw-r--r--drivers/input/touchscreen/elo.c2
-rw-r--r--drivers/input/touchscreen/fujitsu_ts.c2
-rw-r--r--drivers/input/touchscreen/gunze.c2
-rw-r--r--drivers/input/touchscreen/hampshire.c2
-rw-r--r--drivers/input/touchscreen/inexio.c2
-rw-r--r--drivers/input/touchscreen/mtouch.c2
-rw-r--r--drivers/input/touchscreen/mxs-lradc-ts.c8
-rw-r--r--drivers/input/touchscreen/penmount.c2
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c2
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c2
-rw-r--r--drivers/input/touchscreen/sur40.c46
-rw-r--r--drivers/input/touchscreen/touchit213.c2
-rw-r--r--drivers/input/touchscreen/touchright.c2
-rw-r--r--drivers/input/touchscreen/touchwin.c2
-rw-r--r--drivers/input/touchscreen/tsc40.c2
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c2
-rw-r--r--drivers/iommu/amd_iommu.c31
-rw-r--r--drivers/iommu/amd_iommu_init.c36
-rw-r--r--drivers/iommu/amd_iommu_proto.h10
-rw-r--r--drivers/iommu/amd_iommu_types.h6
-rw-r--r--drivers/iommu/amd_iommu_v2.c8
-rw-r--r--drivers/iommu/arm-smmu.c23
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/intel-svm.c9
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c7
-rw-r--r--drivers/iommu/io-pgtable.h9
-rw-r--r--drivers/iommu/iommu-sysfs.c32
-rw-r--r--drivers/iommu/mtk_iommu.c6
-rw-r--r--drivers/iommu/mtk_iommu.h1
-rw-r--r--drivers/irqchip/Kconfig15
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c5
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c13
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.h4
-rw-r--r--drivers/irqchip/irq-atmel-aic.c14
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c4
-rw-r--r--drivers/irqchip/irq-bcm2835.c9
-rw-r--r--drivers/irqchip/irq-bcm2836.c5
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c3
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c3
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c10
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c1
-rw-r--r--drivers/irqchip/irq-crossbar.c6
-rw-r--r--drivers/irqchip/irq-digicolor.c10
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c12
-rw-r--r--drivers/irqchip/irq-gic-realview.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c1536
-rw-r--r--drivers/irqchip/irq-gic-v3.c125
-rw-r--r--drivers/irqchip/irq-gic-v4.c225
-rw-r--r--drivers/irqchip/irq-gic.c19
-rw-r--r--drivers/irqchip/irq-hip04.c3
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c4
-rw-r--r--drivers/irqchip/irq-lpc32xx.c2
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c256
-rw-r--r--drivers/irqchip/irq-metag-ext.c4
-rw-r--r--drivers/irqchip/irq-mips-cpu.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c17
-rw-r--r--drivers/irqchip/irq-mmp.c4
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c3
-rw-r--r--drivers/irqchip/irq-mxs.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c8
-rw-r--r--drivers/irqchip/irq-sun4i.c6
-rw-r--r--drivers/irqchip/irq-tegra.c16
-rw-r--r--drivers/irqchip/irq-uniphier-aidet.c261
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c4
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c6
-rw-r--r--drivers/isdn/capi/kcapi.c2
-rw-r--r--drivers/isdn/divert/isdn_divert.c25
-rw-r--r--drivers/isdn/hardware/avm/c4.c2
-rw-r--r--drivers/isdn/hardware/eicon/divacapi.h16
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/hardware/eicon/message.c247
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.h2
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c2
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c2
-rw-r--r--drivers/isdn/hisax/hfc_usb.c2
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c28
-rw-r--r--drivers/isdn/i4l/isdn_common.c1
-rw-r--r--drivers/isdn/i4l/isdn_net.c5
-rw-r--r--drivers/isdn/mISDN/fsm.c5
-rw-r--r--drivers/isdn/mISDN/fsm.h2
-rw-r--r--drivers/isdn/mISDN/layer1.c3
-rw-r--r--drivers/isdn/mISDN/layer2.c15
-rw-r--r--drivers/isdn/mISDN/tei.c20
-rw-r--r--drivers/leds/Kconfig9
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-aat1290.c4
-rw-r--r--drivers/leds/leds-as3645a.c763
-rw-r--r--drivers/leds/leds-max77693.c4
-rw-r--r--drivers/lguest/Kconfig13
-rw-r--r--drivers/lguest/Makefile26
-rw-r--r--drivers/lguest/README47
-rw-r--r--drivers/lguest/core.c398
-rw-r--r--drivers/lguest/hypercalls.c304
-rw-r--r--drivers/lguest/interrupts_and_traps.c706
-rw-r--r--drivers/lguest/lg.h258
-rw-r--r--drivers/lguest/lguest_user.c446
-rw-r--r--drivers/lguest/page_tables.c1239
-rw-r--r--drivers/lguest/segments.c228
-rw-r--r--drivers/lguest/x86/core.c724
-rw-r--r--drivers/lguest/x86/switcher_32.S388
-rw-r--r--drivers/lightnvm/pblk-rb.c4
-rw-r--r--drivers/lightnvm/pblk-read.c23
-rw-r--r--drivers/lightnvm/pblk.h2
-rw-r--r--drivers/macintosh/macio_sysfs.c2
-rw-r--r--drivers/macintosh/rack-meter.c14
-rw-r--r--drivers/macintosh/smu.c8
-rw-r--r--drivers/macintosh/via-cuda.c4
-rw-r--r--drivers/macintosh/windfarm_cpufreq_clamp.c2
-rw-r--r--drivers/macintosh/windfarm_fcu_controls.c4
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_lm87_sensor.c6
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_rm31.c4
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c2
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sensors.c10
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c252
-rw-r--r--drivers/mailbox/pcc.c6
-rw-r--r--drivers/mcb/mcb-core.c20
-rw-r--r--drivers/mcb/mcb-lpc.c15
-rw-r--r--drivers/mcb/mcb-parse.c6
-rw-r--r--drivers/md/bcache/debug.c2
-rw-r--r--drivers/md/bcache/io.c2
-rw-r--r--drivers/md/bcache/journal.c6
-rw-r--r--drivers/md/bcache/request.c21
-rw-r--r--drivers/md/bcache/super.c6
-rw-r--r--drivers/md/bcache/writeback.c5
-rw-r--r--drivers/md/bitmap.c12
-rw-r--r--drivers/md/dm-bio-record.h9
-rw-r--r--drivers/md/dm-bufio.c5
-rw-r--r--drivers/md/dm-cache-target.c4
-rw-r--r--drivers/md/dm-crypt.c18
-rw-r--r--drivers/md/dm-delay.c4
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-integrity.c33
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-linear.c2
-rw-r--r--drivers/md/dm-log-writes.c8
-rw-r--r--drivers/md/dm-mpath.c4
-rw-r--r--drivers/md/dm-raid.c29
-rw-r--r--drivers/md/dm-raid1.c12
-rw-r--r--drivers/md/dm-snap.c16
-rw-r--r--drivers/md/dm-stripe.c10
-rw-r--r--drivers/md/dm-switch.c2
-rw-r--r--drivers/md/dm-table.c35
-rw-r--r--drivers/md/dm-thin.c6
-rw-r--r--drivers/md/dm-verity-fec.c21
-rw-r--r--drivers/md/dm-verity-target.c2
-rw-r--r--drivers/md/dm-zoned-metadata.c18
-rw-r--r--drivers/md/dm-zoned-reclaim.c2
-rw-r--r--drivers/md/dm-zoned-target.c12
-rw-r--r--drivers/md/dm.c28
-rw-r--r--drivers/md/faulty.c4
-rw-r--r--drivers/md/linear.c6
-rw-r--r--drivers/md/md.c37
-rw-r--r--drivers/md/md.h68
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c11
-rw-r--r--drivers/md/raid1-10.c81
-rw-r--r--drivers/md/raid1.c124
-rw-r--r--drivers/md/raid10.c110
-rw-r--r--drivers/md/raid5-cache.c79
-rw-r--r--drivers/md/raid5-ppl.c179
-rw-r--r--drivers/md/raid5.c47
-rw-r--r--drivers/media/Kconfig20
-rw-r--r--drivers/media/cec/Makefile4
-rw-r--r--drivers/media/cec/cec-adap.c286
-rw-r--r--drivers/media/cec/cec-api.c92
-rw-r--r--drivers/media/cec/cec-core.c27
-rw-r--r--drivers/media/cec/cec-notifier.c6
-rw-r--r--drivers/media/cec/cec-pin.c802
-rw-r--r--drivers/media/common/saa7146/saa7146_i2c.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_vbi.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c2
-rw-r--r--drivers/media/common/siano/smsir.c6
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c150
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c2
-rw-r--r--drivers/media/dvb-core/demux.h2
-rw-r--r--drivers/media/dvb-core/dmxdev.c24
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h1
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c1056
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.h13
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c15
-rw-r--r--drivers/media/dvb-frontends/Kconfig27
-rw-r--r--drivers/media/dvb-frontends/Makefile3
-rw-r--r--drivers/media/dvb-frontends/cx24123.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c80
-rw-r--r--drivers/media/dvb-frontends/dib0090.c11
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c2
-rw-r--r--drivers/media/dvb-frontends/dib8000.c20
-rw-r--r--drivers/media/dvb-frontends/dib8000.h1
-rw-r--r--drivers/media/dvb-frontends/dib9000.c22
-rw-r--r--drivers/media/dvb-frontends/dib9000.h7
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_driver.h15
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c35
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c6
-rw-r--r--drivers/media/dvb-frontends/isl6421.c76
-rw-r--r--drivers/media/dvb-frontends/lnbh25.c6
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c25
-rw-r--r--drivers/media/dvb-frontends/mn88472.c4
-rw-r--r--drivers/media/dvb-frontends/mn88473.c4
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c1873
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.h41
-rw-r--r--drivers/media/dvb-frontends/mxl5xx_defs.h731
-rw-r--r--drivers/media/dvb-frontends/mxl5xx_regs.h367
-rw-r--r--drivers/media/dvb-frontends/s5h1420.c2
-rw-r--r--drivers/media/dvb-frontends/stv0367.c340
-rw-r--r--drivers/media/dvb-frontends/stv0910.c1813
-rw-r--r--drivers/media/dvb-frontends/stv0910.h32
-rw-r--r--drivers/media/dvb-frontends/stv0910_regs.h4760
-rw-r--r--drivers/media/dvb-frontends/stv6111.c681
-rw-r--r--drivers/media/dvb-frontends/stv6111.h21
-rw-r--r--drivers/media/dvb-frontends/zd1301_demod.c2
-rw-r--r--drivers/media/i2c/Kconfig36
-rw-r--r--drivers/media/i2c/Makefile3
-rw-r--r--drivers/media/i2c/ad9389b.c2
-rw-r--r--drivers/media/i2c/adv7180.c2
-rw-r--r--drivers/media/i2c/adv748x/Makefile7
-rw-r--r--drivers/media/i2c/adv748x/adv748x-afe.c552
-rw-r--r--drivers/media/i2c/adv748x/adv748x-core.c833
-rw-r--r--drivers/media/i2c/adv748x/adv748x-csi2.c326
-rw-r--r--drivers/media/i2c/adv748x/adv748x-hdmi.c768
-rw-r--r--drivers/media/i2c/adv748x/adv748x.h425
-rw-r--r--drivers/media/i2c/adv7511.c5
-rw-r--r--drivers/media/i2c/adv7604.c7
-rw-r--r--drivers/media/i2c/adv7842.c5
-rw-r--r--drivers/media/i2c/dw9714.c26
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c27
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c59
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c2
-rw-r--r--drivers/media/i2c/max2175.c2
-rw-r--r--drivers/media/i2c/mt9m111.c6
-rw-r--r--drivers/media/i2c/mt9t001.c8
-rw-r--r--drivers/media/i2c/ov13858.c101
-rw-r--r--drivers/media/i2c/ov5640.c3
-rw-r--r--drivers/media/i2c/ov5645.c49
-rw-r--r--drivers/media/i2c/ov5670.c2601
-rw-r--r--drivers/media/i2c/ov6650.c (renamed from drivers/media/i2c/soc_camera/ov6650.c)77
-rw-r--r--drivers/media/i2c/ov7670.c6
-rw-r--r--drivers/media/i2c/ov9650.c67
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c3
-rw-r--r--drivers/media/i2c/s5k5baf.c9
-rw-r--r--drivers/media/i2c/saa7127.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c16
-rw-r--r--drivers/media/i2c/smiapp/smiapp-quirk.c8
-rw-r--r--drivers/media/i2c/soc_camera/Kconfig6
-rw-r--r--drivers/media/i2c/soc_camera/Makefile1
-rw-r--r--drivers/media/i2c/soc_camera/mt9t031.c2
-rw-r--r--drivers/media/i2c/tc358743.c2
-rw-r--r--drivers/media/i2c/ths8200.c2
-rw-r--r--drivers/media/i2c/tvp5150.c25
-rw-r--r--drivers/media/i2c/vs6624.c2
-rw-r--r--drivers/media/media-device.c16
-rw-r--r--drivers/media/media-entity.c2
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c2
-rw-r--r--drivers/media/pci/bt8xx/bt878.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-i2c.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c18
-rw-r--r--drivers/media/pci/bt8xx/dst_ca.c70
-rw-r--r--drivers/media/pci/cobalt/cobalt-alsa-pcm.c4
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-i2c.c2
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-mixer.c2
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-pcm.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c8
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c6
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c10
-rw-r--r--drivers/media/pci/cx23885/cx23885-i2c.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c16
-rw-r--r--drivers/media/pci/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c5
-rw-r--r--drivers/media/pci/cx25821/cx25821-i2c.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821.h2
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c2
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/pci/cx88/cx88-input.c30
-rw-r--r--drivers/media/pci/ddbridge/Kconfig21
-rw-r--r--drivers/media/pci/ddbridge/Makefile3
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c4066
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-hw.c376
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-hw.h43
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-i2c.c230
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-i2c.h112
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-io.h71
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-main.c346
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-maxs8.c444
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-maxs8.h29
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-regs.h159
-rw-r--r--drivers/media/pci/ddbridge/ddbridge.h341
-rw-r--r--drivers/media/pci/dm1105/dm1105.c8
-rw-r--r--drivers/media/pci/dt3155/dt3155.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-mixer.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c18
-rw-r--r--drivers/media/pci/mantis/hopper_cards.c2
-rw-r--r--drivers/media/pci/mantis/mantis_cards.c2
-rw-r--r--drivers/media/pci/mantis/mantis_common.h2
-rw-r--r--drivers/media/pci/mantis/mantis_i2c.c2
-rw-r--r--drivers/media/pci/mantis/mantis_input.c6
-rw-r--r--drivers/media/pci/meye/meye.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c2
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c2
-rw-r--r--drivers/media/pci/ngene/ngene-core.c32
-rw-r--r--drivers/media/pci/ngene/ngene-i2c.c8
-rw-r--r--drivers/media/pci/ngene/ngene.h6
-rw-r--r--drivers/media/pci/pluto2/pluto2.c2
-rw-r--r--drivers/media/pci/pt1/pt1.c2
-rw-r--r--drivers/media/pci/pt3/pt3.c11
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-i2c.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c81
-rw-r--r--drivers/media/pci/saa7146/hexium_gemini.c2
-rw-r--r--drivers/media/pci/saa7146/hexium_orion.c2
-rw-r--r--drivers/media/pci/saa7146/mxb.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-i2c.c2
-rw-r--r--drivers/media/pci/smipcie/smipcie-ir.c6
-rw-r--r--drivers/media/pci/smipcie/smipcie.h2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-g723.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-gpio.c97
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-tw28.c3
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10.h5
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c2
-rw-r--r--drivers/media/pci/ttpci/av7110.c2
-rw-r--r--drivers/media/pci/ttpci/av7110.h2
-rw-r--r--drivers/media/pci/ttpci/av7110_ca.c12
-rw-r--r--drivers/media/pci/ttpci/av7110_v4l.c2
-rw-r--r--drivers/media/pci/ttpci/budget-av.c2
-rw-r--r--drivers/media/pci/ttpci/budget-ci.c9
-rw-r--r--drivers/media/pci/ttpci/budget-patch.c2
-rw-r--r--drivers/media/pci/ttpci/budget.c2
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c1
-rw-r--r--drivers/media/pci/tw68/tw68-video.c2
-rw-r--r--drivers/media/pci/zoran/zoran_card.c2
-rw-r--r--drivers/media/platform/Kconfig25
-rw-r--r--drivers/media/platform/Makefile4
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c4
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c6
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c4
-rw-r--r--drivers/media/platform/coda/coda-bit.c35
-rw-r--r--drivers/media/platform/coda/coda-common.c82
-rw-r--r--drivers/media/platform/coda/coda.h2
-rw-r--r--drivers/media/platform/coda/coda_regs.h1
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c2
-rw-r--r--drivers/media/platform/davinci/ccdc_hw_device.h10
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c92
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c151
-rw-r--r--drivers/media/platform/davinci/vpbe.c2
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c4
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c2
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c2
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c95
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c30
-rw-r--r--drivers/media/platform/davinci/vpif_display.c4
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-i2c.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c8
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c5
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c8
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c4
-rw-r--r--drivers/media/platform/fsl-viu.c6
-rw-r--r--drivers/media/platform/m2m-deinterlace.c4
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c4
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c2
-rw-r--r--drivers/media/platform/meson/Makefile1
-rw-r--r--drivers/media/platform/meson/ao-cec.c744
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c4
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_comp.c10
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_core.c8
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c37
-rw-r--r--drivers/media/platform/mx2_emmaprp.c6
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c134
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h6
-rw-r--r--drivers/media/platform/omap3isp/isp.c161
-rw-r--r--drivers/media/platform/omap3isp/isp.h4
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c22
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c18
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c6
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.c91
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.h7
-rw-r--r--drivers/media/platform/omap3isp/ispreg.h4
-rw-r--r--drivers/media/platform/omap3isp/omap3isp.h6
-rw-r--r--drivers/media/platform/pxa_camera.c9
-rw-r--r--drivers/media/platform/qcom/camss-8x16/Makefile11
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-csid.c1092
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-csid.h82
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-csiphy.c890
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-csiphy.h77
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-ispif.c1175
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-ispif.h85
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.c3088
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.h123
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-video.c860
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-video.h70
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss.c746
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss.h106
-rw-r--r--drivers/media/platform/qcom/venus/core.c18
-rw-r--r--drivers/media/platform/qcom/venus/core.h1
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c76
-rw-r--r--drivers/media/platform/qcom/venus/firmware.h5
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c51
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h1
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c11
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c31
-rw-r--r--drivers/media/platform/qcom/venus/venc.c47
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c4
-rw-r--r--drivers/media/platform/rcar_fdp1.c2
-rw-r--r--drivers/media/platform/rcar_jpu.c2
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c1
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c7
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c4
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c200
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h8
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c9
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-regs.h2
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c8
-rw-r--r--drivers/media/platform/soc_camera/soc_mediabus.c3
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-debug.c14
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c2
-rw-r--r--drivers/media/platform/sti/cec/stih-cec.c4
-rw-r--r--drivers/media/platform/sti/delta/delta-v4l2.c6
-rw-r--r--drivers/media/platform/stm32/stm32-cec.c4
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c495
-rw-r--r--drivers/media/platform/ti-vpe/cal.c4
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c4
-rw-r--r--drivers/media/platform/via-camera.c2
-rw-r--r--drivers/media/platform/video-mux.c53
-rw-r--r--drivers/media/platform/vim2m.c4
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c15
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c17
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c17
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c17
-rw-r--r--drivers/media/platform/vivid/vivid-cec.c66
-rw-r--r--drivers/media/platform/vivid/vivid-core.c8
-rw-r--r--drivers/media/platform/vsp1/vsp1.h7
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.c45
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.h4
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c205
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.h1
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c286
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.h38
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c115
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c40
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h12
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c5
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.c49
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.h48
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h60
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c27
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c26
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c57
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c251
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c28
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c52
-rw-r--r--drivers/media/radio/dsbr100.c2
-rw-r--r--drivers/media/radio/radio-cadet.c2
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-keene.c2
-rw-r--r--drivers/media/radio/radio-ma901.c2
-rw-r--r--drivers/media/radio/radio-maxiradio.c2
-rw-r--r--drivers/media/radio/radio-mr800.c2
-rw-r--r--drivers/media/radio/radio-raremono.c2
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c2
-rw-r--r--drivers/media/radio/radio-shark.c2
-rw-r--r--drivers/media/radio/radio-shark2.c2
-rw-r--r--drivers/media/radio/radio-tea5764.c2
-rw-r--r--drivers/media/radio/radio-wl1273.c17
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c2
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c2
-rw-r--r--drivers/media/radio/si4713/radio-usb-si4713.c4
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c2
-rw-r--r--drivers/media/rc/Kconfig53
-rw-r--r--drivers/media/rc/Makefile3
-rw-r--r--drivers/media/rc/ati_remote.c7
-rw-r--r--drivers/media/rc/ene_ir.c6
-rw-r--r--drivers/media/rc/fintek-cir.c4
-rw-r--r--drivers/media/rc/gpio-ir-recv.c31
-rw-r--r--drivers/media/rc/gpio-ir-tx.c176
-rw-r--r--drivers/media/rc/igorplugusb.c11
-rw-r--r--drivers/media/rc/iguanair.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c6
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.h4
-rw-r--r--drivers/media/rc/img-ir/img-ir-jvc.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-nec.c20
-rw-r--r--drivers/media/rc/img-ir/img-ir-raw.c6
-rw-r--r--drivers/media/rc/img-ir/img-ir-rc5.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-rc6.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-sanyo.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-sharp.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-sony.c27
-rw-r--r--drivers/media/rc/imon.c55
-rw-r--r--drivers/media/rc/ir-hix5hd2.c4
-rw-r--r--drivers/media/rc/ir-jvc-decoder.c6
-rw-r--r--drivers/media/rc/ir-lirc-codec.c2
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c12
-rw-r--r--drivers/media/rc/ir-nec-decoder.c57
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c25
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c30
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c16
-rw-r--r--drivers/media/rc/ir-sharp-decoder.c6
-rw-r--r--drivers/media/rc/ir-sony-decoder.c23
-rw-r--r--drivers/media/rc/ir-spi.c1
-rw-r--r--drivers/media/rc/ir-xmp-decoder.c4
-rw-r--r--drivers/media/rc/ite-cir.c4
-rw-r--r--drivers/media/rc/keymaps/Makefile3
-rw-r--r--drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c8
-rw-r--r--drivers/media/rc/keymaps/rc-alink-dtu-m.c8
-rw-r--r--drivers/media/rc/keymaps/rc-anysee.c8
-rw-r--r--drivers/media/rc/keymaps/rc-apac-viewcomp.c8
-rw-r--r--drivers/media/rc/keymaps/rc-asus-pc39.c8
-rw-r--r--drivers/media/rc/keymaps/rc-asus-ps3-100.c8
-rw-r--r--drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c8
-rw-r--r--drivers/media/rc/keymaps/rc-ati-x10.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-a16d.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-cardbus.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-dvbt.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m135a.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-rm-ks.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avertv-303.c8
-rw-r--r--drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c8
-rw-r--r--drivers/media/rc/keymaps/rc-behold-columbus.c8
-rw-r--r--drivers/media/rc/keymaps/rc-behold.c8
-rw-r--r--drivers/media/rc/keymaps/rc-budget-ci-old.c8
-rw-r--r--drivers/media/rc/keymaps/rc-cec.c2
-rw-r--r--drivers/media/rc/keymaps/rc-cinergy-1400.c8
-rw-r--r--drivers/media/rc/keymaps/rc-cinergy.c8
-rw-r--r--drivers/media/rc/keymaps/rc-d680-dmb.c8
-rw-r--r--drivers/media/rc/keymaps/rc-delock-61959.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-nec.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-rc5.c8
-rw-r--r--drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c8
-rw-r--r--drivers/media/rc/keymaps/rc-digittrade.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dm1105-nec.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dtt200u.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dvbsky.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dvico-mce.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dvico-portable.c8
-rw-r--r--drivers/media/rc/keymaps/rc-em-terratec.c8
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv-fm53.c8
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv.c8
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv2.c8
-rw-r--r--drivers/media/rc/keymaps/rc-evga-indtube.c8
-rw-r--r--drivers/media/rc/keymaps/rc-eztv.c8
-rw-r--r--drivers/media/rc/keymaps/rc-flydvb.c8
-rw-r--r--drivers/media/rc/keymaps/rc-flyvideo.c8
-rw-r--r--drivers/media/rc/keymaps/rc-fusionhdtv-mce.c8
-rw-r--r--drivers/media/rc/keymaps/rc-gadmei-rm008z.c8
-rw-r--r--drivers/media/rc/keymaps/rc-geekbox.c8
-rw-r--r--drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c8
-rw-r--r--drivers/media/rc/keymaps/rc-gotview7135.c8
-rw-r--r--drivers/media/rc/keymaps/rc-hauppauge.c8
-rw-r--r--drivers/media/rc/keymaps/rc-imon-mce.c8
-rw-r--r--drivers/media/rc/keymaps/rc-imon-pad.c8
-rw-r--r--drivers/media/rc/keymaps/rc-iodata-bctv7e.c8
-rw-r--r--drivers/media/rc/keymaps/rc-it913x-v1.c8
-rw-r--r--drivers/media/rc/keymaps/rc-it913x-v2.c8
-rw-r--r--drivers/media/rc/keymaps/rc-kaiomy.c8
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-315u.c8
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-pc150u.c8
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c8
-rw-r--r--drivers/media/rc/keymaps/rc-leadtek-y04g0051.c8
-rw-r--r--drivers/media/rc/keymaps/rc-lme2510.c8
-rw-r--r--drivers/media/rc/keymaps/rc-manli.c8
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10-digitainer.c8
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10-or2x.c8
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10.c8
-rw-r--r--drivers/media/rc/keymaps/rc-msi-digivox-ii.c8
-rw-r--r--drivers/media/rc/keymaps/rc-msi-digivox-iii.c8
-rw-r--r--drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c8
-rw-r--r--drivers/media/rc/keymaps/rc-msi-tvanywhere.c8
-rw-r--r--drivers/media/rc/keymaps/rc-nebula.c8
-rw-r--r--drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c8
-rw-r--r--drivers/media/rc/keymaps/rc-norwood.c8
-rw-r--r--drivers/media/rc/keymaps/rc-npgtech.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pctv-sedna.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-color.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-grey.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-002t.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-mk12.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-new.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview.c8
-rw-r--r--drivers/media/rc/keymaps/rc-powercolor-real-angel.c8
-rw-r--r--drivers/media/rc/keymaps/rc-proteus-2309.c8
-rw-r--r--drivers/media/rc/keymaps/rc-purpletv.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pv951.c8
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c8
-rw-r--r--drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c8
-rw-r--r--drivers/media/rc/keymaps/rc-reddo.c8
-rw-r--r--drivers/media/rc/keymaps/rc-snapstream-firefly.c8
-rw-r--r--drivers/media/rc/keymaps/rc-streamzap.c8
-rw-r--r--drivers/media/rc/keymaps/rc-su3000.c8
-rw-r--r--drivers/media/rc/keymaps/rc-tbs-nec.c8
-rw-r--r--drivers/media/rc/keymaps/rc-technisat-ts35.c8
-rw-r--r--drivers/media/rc/keymaps/rc-technisat-usb2.c8
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c8
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c8
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c8
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-slim-2.c8
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-slim.c8
-rw-r--r--drivers/media/rc/keymaps/rc-tevii-nec.c8
-rw-r--r--drivers/media/rc/keymaps/rc-tivo.c8
-rw-r--r--drivers/media/rc/keymaps/rc-total-media-in-hand-02.c8
-rw-r--r--drivers/media/rc/keymaps/rc-total-media-in-hand.c8
-rw-r--r--drivers/media/rc/keymaps/rc-trekstor.c8
-rw-r--r--drivers/media/rc/keymaps/rc-tt-1500.c8
-rw-r--r--drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c8
-rw-r--r--drivers/media/rc/keymaps/rc-twinhan1027.c8
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-m1f.c8
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-s350.c8
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-tv-pvr.c8
-rw-r--r--drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c8
-rw-r--r--drivers/media/rc/keymaps/rc-winfast.c8
-rw-r--r--drivers/media/rc/keymaps/rc-zx-irdec.c79
-rw-r--r--drivers/media/rc/lirc_dev.c4
-rw-r--r--drivers/media/rc/mceusb.c40
-rw-r--r--drivers/media/rc/meson-ir.c4
-rw-r--r--drivers/media/rc/mtk-cir.c246
-rw-r--r--drivers/media/rc/nuvoton-cir.c120
-rw-r--r--drivers/media/rc/nuvoton-cir.h24
-rw-r--r--drivers/media/rc/pwm-ir-tx.c138
-rw-r--r--drivers/media/rc/rc-core-priv.h5
-rw-r--r--drivers/media/rc/rc-ir-raw.c68
-rw-r--r--drivers/media/rc/rc-loopback.c6
-rw-r--r--drivers/media/rc/rc-main.c265
-rw-r--r--drivers/media/rc/redrat3.c4
-rw-r--r--drivers/media/rc/serial_ir.c46
-rw-r--r--drivers/media/rc/sir_ir.c6
-rw-r--r--drivers/media/rc/st_rc.c6
-rw-r--r--drivers/media/rc/streamzap.c4
-rw-r--r--drivers/media/rc/sunxi-cir.c6
-rw-r--r--drivers/media/rc/ttusbir.c4
-rw-r--r--drivers/media/rc/winbond-cir.c37
-rw-r--r--drivers/media/rc/zx-irdec.c184
-rw-r--r--drivers/media/tuners/fc0011.c1
-rw-r--r--drivers/media/tuners/fc0012.c2
-rw-r--r--drivers/media/tuners/fc0013.c2
-rw-r--r--drivers/media/tuners/mxl5005s.c2
-rw-r--r--drivers/media/tuners/tda18271-maps.c4
-rw-r--r--drivers/media/tuners/tuner-simple.c2
-rw-r--r--drivers/media/usb/airspy/airspy.c4
-rw-r--r--drivers/media/usb/as102/as102_usb_drv.c2
-rw-r--r--drivers/media/usb/au0828/Kconfig1
-rw-r--r--drivers/media/usb/au0828/au0828-core.c2
-rw-r--r--drivers/media/usb/au0828/au0828-i2c.c4
-rw-r--r--drivers/media/usb/au0828/au0828-input.c8
-rw-r--r--drivers/media/usb/au0828/au0828-video.c2
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c2
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c2
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c3
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c10
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-input.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c11
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c16
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c13
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c5
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c16
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c13
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c30
-rw-r--r--drivers/media/usb/dvb-usb/dib0700.h2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c66
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c152
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c12
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-remote.c2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c74
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c4
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c6
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c18
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c128
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c4
-rw-r--r--drivers/media/usb/go7007/snd-go7007.c2
-rw-r--r--drivers/media/usb/gspca/gspca.c2
-rw-r--r--drivers/media/usb/gspca/xirlink_cit.c2
-rw-r--r--drivers/media/usb/hackrf/hackrf.c4
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c5
-rw-r--r--drivers/media/usb/msi2500/msi2500.c4
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c9
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-encoder.c6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c14
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c2
-rw-r--r--drivers/media/usb/rainshadow-cec/rainshadow-cec.c25
-rw-r--r--drivers/media/usb/s2255/s2255drv.c4
-rw-r--r--drivers/media/usb/stk1160/stk1160-core.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-i2c.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-sensor.c32
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c76
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.h6
-rw-r--r--drivers/media/usb/tm6000/tm6000-alsa.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-cards.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-input.c40
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c4
-rw-r--r--drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c4
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-i2c.c11
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c19
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c7
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c28
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c2
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c9
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h4
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c6
-rw-r--r--drivers/media/v4l2-core/tuner-core.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-clk.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c13
-rw-r--r--drivers/media/v4l2-core/v4l2-flash-led-class.c139
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c139
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c12
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c27
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c5
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c8
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c8
-rw-r--r--drivers/memory/atmel-ebi.c23
-rw-r--r--drivers/mfd/Kconfig57
-rw-r--r--drivers/mfd/Makefile3
-rw-r--r--drivers/mfd/ab8500-core.c6
-rw-r--r--drivers/mfd/atmel-smc.c69
-rw-r--r--drivers/mfd/axp20x-rsb.c1
-rw-r--r--drivers/mfd/axp20x.c32
-rw-r--r--drivers/mfd/bd9571mwv.c230
-rw-r--r--drivers/mfd/da9052-core.c28
-rw-r--r--drivers/mfd/da9052-spi.c2
-rw-r--r--drivers/mfd/da9055-i2c.c2
-rw-r--r--drivers/mfd/da9062-core.c6
-rw-r--r--drivers/mfd/db8500-prcmu.c62
-rw-r--r--drivers/mfd/dm355evm_msp.c2
-rw-r--r--drivers/mfd/hi6421-pmic-core.c89
-rw-r--r--drivers/mfd/intel-lpss-pci.c1
-rw-r--r--drivers/mfd/intel-lpss.c8
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c34
-rw-r--r--drivers/mfd/intel_soc_pmic_core.h3
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c27
-rw-r--r--drivers/mfd/lp87565.c7
-rw-r--r--drivers/mfd/lpc_ich.c10
-rw-r--r--drivers/mfd/max8925-i2c.c2
-rw-r--r--drivers/mfd/max8998.c6
-rw-r--r--drivers/mfd/omap-usb-tll.c4
-rw-r--r--drivers/mfd/retu-mfd.c12
-rw-r--r--drivers/mfd/rk808.c147
-rw-r--r--drivers/mfd/rtsx_pcr.c4
-rw-r--r--drivers/mfd/stm32-lptimer.c107
-rw-r--r--drivers/mfd/t7l66xb.c17
-rw-r--r--drivers/mfd/tps6105x.c8
-rw-r--r--drivers/mfd/tps65010.c2
-rw-r--r--drivers/mfd/tps68470.c106
-rw-r--r--drivers/mfd/twl-core.c10
-rw-r--r--drivers/mfd/twl4030-audio.c2
-rw-r--r--drivers/mfd/twl4030-irq.c2
-rw-r--r--drivers/mfd/twl4030-power.c2
-rw-r--r--drivers/mfd/twl6030-irq.c2
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/apds9802als.c4
-rw-r--r--drivers/misc/apds990x.c2
-rw-r--r--drivers/misc/aspeed-lpc-snoop.c34
-rw-r--r--drivers/misc/bh1770glc.c2
-rw-r--r--drivers/misc/cxl/api.c4
-rw-r--r--drivers/misc/cxl/fault.c16
-rw-r--r--drivers/misc/cxl/file.c8
-rw-r--r--drivers/misc/ds1682.c2
-rw-r--r--drivers/misc/eeprom/eeprom.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c24
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c126
-rw-r--r--drivers/misc/eeprom/max6875.c2
-rw-r--r--drivers/misc/hmc6352.c2
-rw-r--r--drivers/misc/hpilo.c2
-rw-r--r--drivers/misc/ioc4.c2
-rw-r--r--drivers/misc/isl29020.c4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c2
-rw-r--r--drivers/misc/lkdtm.h30
-rw-r--r--drivers/misc/lkdtm_bugs.c134
-rw-r--r--drivers/misc/lkdtm_core.c28
-rw-r--r--drivers/misc/lkdtm_refcount.c400
-rw-r--r--drivers/misc/mei/bus.c2
-rw-r--r--drivers/misc/mei/hw-me.c45
-rw-r--r--drivers/misc/mei/hw-me.h39
-rw-r--r--drivers/misc/mei/pci-me.c115
-rw-r--r--drivers/misc/mei/pci-txe.c6
-rw-r--r--drivers/misc/mic/scif/scif_dma.c11
-rw-r--r--drivers/misc/pch_phub.c4
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c12
-rw-r--r--drivers/misc/sram.c12
-rw-r--r--drivers/misc/ti-st/st_kim.c2
-rw-r--r--drivers/misc/tifm_7xx1.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c10
-rw-r--r--drivers/mmc/Kconfig7
-rw-r--r--drivers/mmc/Makefile2
-rw-r--r--drivers/mmc/core/block.c329
-rw-r--r--drivers/mmc/core/core.c68
-rw-r--r--drivers/mmc/core/core.h6
-rw-r--r--drivers/mmc/core/debugfs.c89
-rw-r--r--drivers/mmc/core/host.c6
-rw-r--r--drivers/mmc/core/host.h1
-rw-r--r--drivers/mmc/core/mmc.c33
-rw-r--r--drivers/mmc/core/mmc_ops.c3
-rw-r--r--drivers/mmc/core/mmc_test.c97
-rw-r--r--drivers/mmc/core/queue.h6
-rw-r--r--drivers/mmc/core/sd.c12
-rw-r--r--drivers/mmc/host/Kconfig34
-rw-r--r--drivers/mmc/host/Makefile13
-rw-r--r--drivers/mmc/host/android-goldfish.c8
-rw-r--r--drivers/mmc/host/atmel-mci.c10
-rw-r--r--drivers/mmc/host/bcm2835.c2
-rw-r--r--drivers/mmc/host/cavium-octeon.c13
-rw-r--r--drivers/mmc/host/cavium.c6
-rw-r--r--drivers/mmc/host/davinci_mmc.c2
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c298
-rw-r--r--drivers/mmc/host/dw_mmc.c64
-rw-r--r--drivers/mmc/host/dw_mmc.h4
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c714
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/moxart-mmc.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c3
-rw-r--r--drivers/mmc/host/mxcmmc.c34
-rw-r--r--drivers/mmc/host/of_mmc_spi.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c21
-rw-r--r--drivers/mmc/host/renesas_sdhi.h2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c31
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c287
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c34
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c2
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c7
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c4
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c39
-rw-r--r--drivers/mmc/host/sdhci-cadence.c88
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h3
-rw-r--r--drivers/mmc/host/sdhci-msm.c5
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c8
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c212
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c23
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c50
-rw-r--r--drivers/mmc/host/sdhci-pic32.c2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c28
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c30
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c34
-rw-r--r--drivers/mmc/host/sdhci-sirf.c43
-rw-r--r--drivers/mmc/host/sdhci-st.c28
-rw-r--r--drivers/mmc/host/sdhci-tegra.c26
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c34
-rw-r--r--drivers/mmc/host/sdhci-xenon.c121
-rw-r--r--drivers/mmc/host/sdhci-xenon.h2
-rw-r--r--drivers/mmc/host/sdhci.c94
-rw-r--r--drivers/mmc/host/sdhci.h5
-rw-r--r--drivers/mmc/host/sdricoh_cs.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c101
-rw-r--r--drivers/mmc/host/tmio_mmc.h9
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c32
-rw-r--r--drivers/mmc/host/toshsd.c2
-rw-r--r--drivers/mmc/host/usdhi6rol0.c2
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mmc/host/vub300.c4
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c8
-rw-r--r--drivers/mtd/mtd_blkdevs.c1
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c25
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c21
-rw-r--r--drivers/mtd/nand/nand_base.c13
-rw-r--r--drivers/mtd/nand/nand_timings.c6
-rw-r--r--drivers/mtd/nand/nandsim.c1
-rw-r--r--drivers/mtd/nand/sunxi_nand.c4
-rw-r--r--drivers/mux/Kconfig19
-rw-r--r--drivers/mux/Makefile5
-rw-r--r--drivers/mux/adg792a.c (renamed from drivers/mux/mux-adg792a.c)0
-rw-r--r--drivers/mux/core.c (renamed from drivers/mux/mux-core.c)16
-rw-r--r--drivers/mux/gpio.c (renamed from drivers/mux/mux-gpio.c)0
-rw-r--r--drivers/mux/mmio.c (renamed from drivers/mux/mux-mmio.c)0
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/appletalk/ipddp.c4
-rw-r--r--drivers/net/arcnet/arcdevice.h2
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/bonding/bond_main.c17
-rw-r--r--drivers/net/bonding/bond_sysfs.c2
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c84
-rw-r--r--drivers/net/dsa/b53/b53_priv.h16
-rw-r--r--drivers/net/dsa/bcm_sf2.c52
-rw-r--r--drivers/net/dsa/bcm_sf2.h13
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c8
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h3
-rw-r--r--drivers/net/dsa/dsa_loop.c42
-rw-r--r--drivers/net/dsa/lan9303-core.c137
-rw-r--r--drivers/net/dsa/lan9303.h11
-rw-r--r--drivers/net/dsa/lan9303_i2c.c2
-rw-r--r--drivers/net/dsa/lan9303_mdio.c23
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c124
-rw-r--r--drivers/net/dsa/mt7530.c81
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c417
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h146
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c104
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h41
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h3
-rw-r--r--drivers/net/dsa/qca8k.c112
-rw-r--r--drivers/net/dsa/qca8k.h1
-rw-r--r--drivers/net/dummy.c2
-rw-r--r--drivers/net/ethernet/3com/3c509.c4
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c2
-rw-r--r--drivers/net/ethernet/amd/a2065.c2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h33
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c25
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c207
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c501
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c86
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c97
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c81
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c54
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c352
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h92
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c7
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c49
-rw-r--r--drivers/net/ethernet/apple/mace.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c92
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h3
-rw-r--r--drivers/net/ethernet/arc/emac_main.c13
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c9
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig12
-rw-r--r--drivers/net/ethernet/broadcom/b44.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c117
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h24
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c21
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c70
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c478
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h95
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h500
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c834
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h158
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c513
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h89
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c378
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h18
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c263
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c2
-rwxr-xr-xdrivers/net/ethernet/cadence/macb_ptp.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c82
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c728
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c354
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c956
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c598
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c153
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c27
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h20
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c29
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h59
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c192
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c211
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c978
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h177
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c68
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h86
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c456
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c3
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c13
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c95
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c118
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c13
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c114
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h77
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c118
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.c783
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.h46
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c63
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h7
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c52
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c20
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig27
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c58
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c135
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c300
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h444
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c356
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h740
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c4265
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h519
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c213
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c1015
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h106
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c2891
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h593
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c493
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/huawei/Kconfig19
-rw-r--r--drivers/net/ethernet/huawei/Makefile5
-rw-r--r--drivers/net/ethernet/huawei/hinic/Kconfig12
-rw-r--r--drivers/net/ethernet/huawei/hinic/Makefile6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_common.c80
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_common.h38
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h64
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c978
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h208
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c946
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h187
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h149
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c1013
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h239
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c886
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h265
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c351
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h272
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c533
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.h97
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c597
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h153
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c887
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h201
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h214
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c878
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h117
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h368
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c1112
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c379
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h198
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c509
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.h55
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c504
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.h62
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c10
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c63
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h1
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c8
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c18
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c17
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c229
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h17
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c160
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c318
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c134
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c98
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c124
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c75
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h31
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c45
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c116
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c44
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h18
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c57
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h14
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c23
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c47
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c12
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c132
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c102
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c8
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c1591
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c188
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h80
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c273
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c261
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h282
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c298
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c377
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c229
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c212
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c476
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c201
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c169
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h422
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c200
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c578
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c214
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h79
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3228
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h26
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c15
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h8
-rw-r--r--drivers/net/ethernet/neterion/s2io.c45
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile1
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c30
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c75
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h22
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c144
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c139
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c75
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h45
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app_nic.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c64
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c37
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c593
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c42
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c88
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c243
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h86
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h60
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.c14
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c145
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h43
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c115
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h18
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c58
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c68
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h37
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c157
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c75
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h53
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h19
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c205
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c483
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c49
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig2
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c10
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/Kconfig12
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/Makefile10
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c356
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h55
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c271
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h26
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h86
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c106
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c104
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h44
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c174
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h29
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/renesas/ravb.h2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c131
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c5
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c10
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c13
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c232
-rw-r--r--drivers/net/ethernet/sfc/tx.c13
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c152
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c193
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c28
-rw-r--r--drivers/net/ethernet/sun/sunhme.h6
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c90
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c1
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c59
-rw-r--r--drivers/net/ethernet/ti/cpts.c113
-rw-r--r--drivers/net/ethernet/ti/cpts.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c10
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c13
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c48
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/fddi/defxx.c2
-rw-r--r--drivers/net/geneve.c322
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hamradio/baycom_par.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c2
-rw-r--r--drivers/net/hamradio/dmascc.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h77
-rw-r--r--drivers/net/hyperv/netvsc.c462
-rw-r--r--drivers/net/hyperv/netvsc_drv.c866
-rw-r--r--drivers/net/hyperv/rndis_filter.c176
-rw-r--r--drivers/net/ieee802154/ca8210.c6
-rw-r--r--drivers/net/ieee802154/mrf24j40.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c6
-rw-r--r--drivers/net/ipvlan/ipvtap.c2
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/macvlan.c5
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/Kconfig87
-rw-r--r--drivers/net/phy/Makefile7
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/dp83640.c7
-rw-r--r--drivers/net/phy/marvell.c320
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c103
-rw-r--r--drivers/net/phy/mdio-gpio.c2
-rw-r--r--drivers/net/phy/mdio-i2c.c109
-rw-r--r--drivers/net/phy/mdio-i2c.h19
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c2
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c2
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c21
-rw-r--r--drivers/net/phy/mdio-mux.c38
-rw-r--r--drivers/net/phy/mdio_bus.c6
-rw-r--r--drivers/net/phy/phy-core.c180
-rw-r--r--drivers/net/phy/phy.c235
-rw-r--r--drivers/net/phy/phy_device.c43
-rw-r--r--drivers/net/phy/phylink.c1462
-rw-r--r--drivers/net/phy/rockchip.c233
-rw-r--r--drivers/net/phy/sfp-bus.c475
-rw-r--r--drivers/net/phy/sfp.c915
-rw-r--r--drivers/net/phy/sfp.h28
-rw-r--r--drivers/net/ppp/ppp_generic.c44
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/tap.c11
-rw-r--r--drivers/net/team/team.c8
-rw-r--r--drivers/net/tun.c280
-rw-r--r--drivers/net/usb/asix.h1
-rw-r--r--drivers/net/usb/asix_common.c53
-rw-r--r--drivers/net/usb/asix_devices.c1
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc-phonet.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c37
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c6
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/lan78xx.c18
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/usb/smsc95xx.c1
-rw-r--r--drivers/net/virtio_net.c357
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h2
-rw-r--r--drivers/net/vrf.c139
-rw-r--r--drivers/net/vxlan.c163
-rw-r--r--drivers/net/wan/dscc4.c129
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c299
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h30
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c29
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c150
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c105
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c1106
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.h128
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c166
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h271
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c52
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig12
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c84
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c27
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c42
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c27
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h720
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h3
-rw-r--r--drivers/net/wireless/cisco/airo.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c34
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c17
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/a000.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h206
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h (renamed from drivers/net/wireless/intel/iwlwifi/fw/api.h)78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h)73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h657
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h184
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/context.h94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h)11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h345
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/filter.h183
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/led.h71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h152
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h)33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h386
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/paging.h108
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h164
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h258
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h)13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h)13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h)31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h)11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sf.h138
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h)15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h)13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h208
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h386
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tof.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h)9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h)66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h163
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/common_rx.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c)474
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h)125
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/nvm.c162
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c414
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h158
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c155
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c138
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c310
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h2846
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c559
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c413
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c237
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h160
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c200
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c163
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c580
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c176
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c25
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c4
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/intersil/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c35
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c15
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c32
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c173
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c126
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c15
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c19
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c121
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c34
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c5
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c5
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Makefile4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h1
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c315
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c486
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h27
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c67
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c408
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h15
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h11
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h202
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.c26
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c13
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c17
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c365
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c192
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c43
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h50
-rw-r--r--drivers/net/wireless/rsi/Makefile1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c80
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c368
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c495
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c741
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_ps.c146
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c157
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c84
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c138
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h1
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h66
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h88
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h258
-rw-r--r--drivers/net/wireless/rsi/rsi_ps.h64
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h7
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h6
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c23
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h3
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/wireless/zydas/zd1201.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/ntb/ntb_transport.c6
-rw-r--r--drivers/ntb/test/ntb_tool.c2
-rw-r--r--drivers/nvdimm/btt.c4
-rw-r--r--drivers/nvdimm/core.c7
-rw-r--r--drivers/nvdimm/nd.h9
-rw-r--r--drivers/nvdimm/pmem.c41
-rw-r--r--drivers/nvme/host/core.c52
-rw-r--r--drivers/nvme/host/fabrics.c3
-rw-r--r--drivers/nvme/host/fc.c125
-rw-r--r--drivers/nvme/host/lightnvm.c15
-rw-r--r--drivers/nvme/host/pci.c94
-rw-r--r--drivers/nvme/host/rdma.c53
-rw-r--r--drivers/nvme/target/admin-cmd.c22
-rw-r--r--drivers/nvme/target/configfs.c30
-rw-r--r--drivers/nvme/target/core.c5
-rw-r--r--drivers/nvme/target/fc.c326
-rw-r--r--drivers/nvme/target/io-cmd.c6
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/nvme/target/rdma.c5
-rw-r--r--drivers/nvmem/core.c43
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c2
-rw-r--r--drivers/nvmem/rockchip-efuse.c2
-rw-r--r--drivers/of/device.c8
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/of_mdio.c39
-rw-r--r--drivers/of/property.c83
-rw-r--r--drivers/parisc/asp.c4
-rw-r--r--drivers/parisc/ccio-dma.c4
-rw-r--r--drivers/parisc/ccio-rm-dma.c6
-rw-r--r--drivers/parisc/dino.c6
-rw-r--r--drivers/parisc/eisa.c4
-rw-r--r--drivers/parisc/hppb.c6
-rw-r--r--drivers/parisc/lasi.c4
-rw-r--r--drivers/parisc/lba_pci.c46
-rw-r--r--drivers/parisc/pdc_stable.c8
-rw-r--r--drivers/parisc/sba_iommu.c6
-rw-r--r--drivers/parisc/superio.c4
-rw-r--r--drivers/parisc/wax.c4
-rw-r--r--drivers/parport/daisy.c2
-rw-r--r--drivers/parport/parport_atari.c2
-rw-r--r--drivers/parport/parport_ax88796.c6
-rw-r--r--drivers/parport/parport_gsc.c10
-rw-r--r--drivers/parport/parport_ip32.c2
-rw-r--r--drivers/parport/parport_mfc3.c2
-rw-r--r--drivers/parport/parport_pc.c24
-rw-r--r--drivers/pci/host/pci-hyperv.c54
-rw-r--r--drivers/pci/msi.c13
-rw-r--r--drivers/pci/pci-acpi.c4
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pci.c44
-rw-r--r--drivers/pci/probe.c43
-rw-r--r--drivers/pci/quirks.c94
-rw-r--r--drivers/perf/arm_pmu.c47
-rw-r--r--drivers/perf/arm_pmu_platform.c9
-rw-r--r--drivers/perf/qcom_l2_pmu.c2
-rw-r--r--drivers/perf/xgene_pmu.c74
-rw-r--r--drivers/phy/Kconfig10
-rw-r--r--drivers/phy/Makefile3
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c112
-rw-r--r--drivers/phy/broadcom/Kconfig2
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c8
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c2
-rw-r--r--drivers/phy/marvell/Kconfig11
-rw-r--r--drivers/phy/marvell/Makefile1
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c644
-rw-r--r--drivers/phy/mediatek/Kconfig14
-rw-r--r--drivers/phy/mediatek/Makefile5
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c (renamed from drivers/phy/phy-mt65xx-usb3.c)557
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c162
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs.c14
-rw-r--r--drivers/phy/ralink/Kconfig11
-rw-r--r--drivers/phy/ralink/Makefile1
-rw-r--r--drivers/phy/ralink/phy-ralink-usb.c249
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c225
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c3
-rw-r--r--drivers/phy/samsung/phy-exynos-dp-video.c5
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c7
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c9
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c10
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c4
-rw-r--r--drivers/pinctrl/Kconfig17
-rw-r--r--drivers/pinctrl/Makefile3
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c70
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c64
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c21
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.h1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c31
-rw-r--r--drivers/pinctrl/berlin/berlin.c4
-rw-r--r--drivers/pinctrl/core.c17
-rw-r--r--drivers/pinctrl/core.h6
-rw-r--r--drivers/pinctrl/devicetree.c9
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c131
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h20
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx23.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx28.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7ulp.c364
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c25
-rw-r--r--drivers/pinctrl/intel/Kconfig19
-rw-r--r--drivers/pinctrl/intel/Makefile2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c424
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c302
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c32
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c343
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c6
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt2701.h12
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c31
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/pinconf-generic.c9
-rw-r--r--drivers/pinctrl/pinconf.c14
-rw-r--r--drivers/pinctrl/pinconf.h24
-rw-r--r--drivers/pinctrl/pinctrl-adi2.c4
-rw-r--r--drivers/pinctrl/pinctrl-amd.c4
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c2
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c11
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c2
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c4
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c2359
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c6
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c493
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c315
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c10
-rw-r--r--drivers/pinctrl/pinctrl-st.c12
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c8
-rw-r--r--drivers/pinctrl/pinctrl-tz1090-pdc.c6
-rw-r--r--drivers/pinctrl/pinctrl-tz1090.c6
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c34
-rw-r--r--drivers/pinctrl/pinmux.c16
-rw-r--r--drivers/pinctrl/pinmux.h29
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8064.c42
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c432
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c27
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c323
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c32
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c37
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c40
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c18
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h15
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig5
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile1
-rw-r--r--drivers/pinctrl/sh-pfc/core.c6
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c15
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c1082
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c146
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77995.c1812
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c11
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h23
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c13
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c10
-rw-r--r--drivers/pinctrl/sprd/Kconfig17
-rw-r--r--drivers/pinctrl/sprd/Makefile2
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c972
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c1113
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.h67
-rw-r--r--drivers/pinctrl/stm32/Kconfig9
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c2
-rw-r--r--drivers/pinctrl/sunxi/Kconfig2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c274
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c26
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c3
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c2
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c4
-rw-r--r--drivers/pinctrl/uniphier/Kconfig4
-rw-r--r--drivers/pinctrl/uniphier/Makefile1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-core.c279
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c665
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c714
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c273
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c386
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c455
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c458
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c386
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c989
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c273
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier.h40
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c8
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c18
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/dell-wmi.c12
-rw-r--r--drivers/platform/x86/intel-hid.c17
-rw-r--r--drivers/platform/x86/intel-vbtn.c4
-rw-r--r--drivers/platform/x86/wmi.c6
-rw-r--r--drivers/power/avs/rockchip-io-domain.c38
-rw-r--r--drivers/power/supply/twl4030_charger.c2
-rw-r--r--drivers/power/supply/wm831x_power.c72
-rw-r--r--drivers/ptp/ptp_clock.c42
-rw-r--r--drivers/ptp/ptp_dte.c2
-rw-r--r--drivers/ptp/ptp_ixp46x.c2
-rw-r--r--drivers/ptp/ptp_kvm.c2
-rw-r--r--drivers/ptp/ptp_pch.c2
-rw-r--r--drivers/ptp/ptp_private.h3
-rw-r--r--drivers/pwm/Kconfig10
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/pwm-stm32-lp.c246
-rw-r--r--drivers/pwm/pwm-twl-led.c2
-rw-r--r--drivers/pwm/pwm-twl.c2
-rw-r--r--drivers/regulator/Kconfig29
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/axp20x-regulator.c6
-rw-r--r--drivers/regulator/core.c16
-rw-r--r--drivers/regulator/cpcap-regulator.c21
-rw-r--r--drivers/regulator/da9063-regulator.c2
-rw-r--r--drivers/regulator/fan53555.c15
-rw-r--r--drivers/regulator/ltc3589.c2
-rw-r--r--drivers/regulator/max1586.c2
-rw-r--r--drivers/regulator/mt6380-regulator.c352
-rw-r--r--drivers/regulator/of_regulator.c4
-rw-r--r--drivers/regulator/pv88090-regulator.c11
-rw-r--r--drivers/regulator/pv88090-regulator.h8
-rw-r--r--drivers/regulator/pwm-regulator.c6
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c5
-rw-r--r--drivers/regulator/qcom_smd-regulator.c5
-rw-r--r--drivers/regulator/rk808-regulator.c130
-rw-r--r--drivers/regulator/rn5t618-regulator.c35
-rw-r--r--drivers/regulator/s5m8767.c4
-rw-r--r--drivers/regulator/stm32-vrefbuf.c202
-rw-r--r--drivers/regulator/twl-regulator.c2
-rw-r--r--drivers/regulator/twl6030-regulator.c2
-rw-r--r--drivers/reset/reset-socfpga.c4
-rw-r--r--drivers/rtc/rtc-dm355evm.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c1
-rw-r--r--drivers/rtc/rtc-twl.c2
-rw-r--r--drivers/s390/block/dasd.c55
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c3
-rw-r--r--drivers/s390/block/dasd_diag.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c8
-rw-r--r--drivers/s390/block/dasd_eckd.h2
-rw-r--r--drivers/s390/block/dasd_erp.c2
-rw-r--r--drivers/s390/block/dasd_fba.c202
-rw-r--r--drivers/s390/block/dasd_int.h19
-rw-r--r--drivers/s390/block/dasd_proc.c2
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/block/scm_blk.c13
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/Kconfig11
-rw-r--r--drivers/s390/char/raw3270.c2
-rw-r--r--drivers/s390/char/sclp_cmd.c1
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/sclp_early.c6
-rw-r--r--drivers/s390/char/sclp_ocf.c2
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/vmcp.c112
-rw-r--r--drivers/s390/cio/chp.c5
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c2
-rw-r--r--drivers/s390/crypto/zcrypt_card.c2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c2
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/lcs.c28
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/s390/net/qeth_core.h17
-rw-r--r--drivers/s390/net/qeth_core_main.c205
-rw-r--r--drivers/s390/net/qeth_core_sys.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c343
-rw-r--r--drivers/s390/net/qeth_l3_main.c71
-rw-r--r--drivers/s390/net/qeth_l3_sys.c25
-rw-r--r--drivers/sbus/char/display7seg.c4
-rw-r--r--drivers/sbus/char/flash.c4
-rw-r--r--drivers/sbus/char/uctrl.c4
-rw-r--r--drivers/scsi/Kconfig13
-rw-r--r--drivers/scsi/aacraid/aachba.c16
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/aic7xxx/Makefile12
-rw-r--r--drivers/scsi/aic7xxx/aicasm/Makefile53
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c68
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c45
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c64
-rw-r--r--drivers/scsi/csiostor/csio_hw.c4
-rw-r--r--drivers/scsi/csiostor/csio_init.c12
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c3
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c7
-rw-r--r--drivers/scsi/cxlflash/main.c11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c10
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/ipr.c34
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/isci/request.c14
-rw-r--r--drivers/scsi/lasi700.c6
-rw-r--r--drivers/scsi/libfc/fc_disc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c6
-rw-r--r--drivers/scsi/qedf/qedf.h3
-rw-r--r--drivers/scsi/qedf/qedf_els.c14
-rw-r--r--drivers/scsi/qedf/qedf_main.c22
-rw-r--r--drivers/scsi/qedi/Kconfig1
-rw-r--r--drivers/scsi/qedi/qedi.h17
-rw-r--r--drivers/scsi/qedi/qedi_fw.c2
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c8
-rw-r--r--drivers/scsi/qedi/qedi_main.c419
-rw-r--r--drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h210
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c12
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c30
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_transport_fc.c6
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/sd_zbc.c9
-rw-r--r--drivers/scsi/ses.c2
-rw-r--r--drivers/scsi/sg.c27
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/scsi/virtio_scsi.c1
-rw-r--r--drivers/scsi/zalon.c8
-rw-r--r--drivers/sfi/sfi_core.c23
-rw-r--r--drivers/soc/imx/gpcv2.c15
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c3
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c2
-rw-r--r--drivers/soc/zte/Kconfig1
-rw-r--r--drivers/spi/Kconfig5
-rw-r--r--drivers/spi/spi-altera.c163
-rw-r--r--drivers/spi/spi-ath79.c13
-rw-r--r--drivers/spi/spi-bcm-qspi.c89
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c10
-rw-r--r--drivers/spi/spi-bcm63xx.c4
-rw-r--r--drivers/spi/spi-cadence.c4
-rw-r--r--drivers/spi/spi-ep93xx.c501
-rw-r--r--drivers/spi/spi-falcon.c5
-rw-r--r--drivers/spi/spi-imx.c218
-rw-r--r--drivers/spi/spi-loopback-test.c34
-rw-r--r--drivers/spi/spi-omap2-mcspi.c4
-rw-r--r--drivers/spi/spi-orion.c4
-rw-r--r--drivers/spi/spi-pic32.c4
-rw-r--r--drivers/spi/spi-pl022.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c35
-rw-r--r--drivers/spi/spi-pxa2xx.h2
-rw-r--r--drivers/spi/spi-qup.c564
-rw-r--r--drivers/spi/spi-rockchip.c60
-rw-r--r--drivers/spi/spi-sh-msiof.c32
-rw-r--r--drivers/spi/spi-sh.c4
-rw-r--r--drivers/spi/spi-stm32.c2
-rw-r--r--drivers/spi/spi-sun6i.c2
-rw-r--r--drivers/spi/spi-tegra114.c2
-rw-r--r--drivers/spi/spi-tegra20-sflash.c2
-rw-r--r--drivers/spi/spi-tegra20-slink.c2
-rw-r--r--drivers/spi/spi-xlp.c4
-rw-r--r--drivers/spi/spi.c142
-rw-r--r--drivers/spmi/spmi-pmic-arb.c848
-rw-r--r--drivers/spmi/spmi.c26
-rw-r--r--drivers/staging/Kconfig8
-rw-r--r--drivers/staging/Makefile5
-rw-r--r--drivers/staging/android/ion/ion.h12
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c5
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c2
-rw-r--r--drivers/staging/ccree/Kconfig9
-rw-r--r--drivers/staging/ccree/Makefile2
-rw-r--r--drivers/staging/ccree/cc_hw_queue_defs.h3
-rw-r--r--drivers/staging/ccree/ssi_aead.c246
-rw-r--r--drivers/staging/ccree/ssi_aead.h12
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.c473
-rw-r--r--drivers/staging/ccree/ssi_cipher.c185
-rw-r--r--drivers/staging/ccree/ssi_driver.c64
-rw-r--r--drivers/staging/ccree/ssi_driver.h1
-rw-r--r--drivers/staging/ccree/ssi_fips.c119
-rw-r--r--drivers/staging/ccree/ssi_fips.h58
-rw-r--r--drivers/staging/ccree/ssi_fips_data.h306
-rw-r--r--drivers/staging/ccree/ssi_fips_ext.c92
-rw-r--r--drivers/staging/ccree/ssi_fips_ll.c1649
-rw-r--r--drivers/staging/ccree/ssi_fips_local.c357
-rw-r--r--drivers/staging/ccree/ssi_fips_local.h67
-rw-r--r--drivers/staging/ccree/ssi_hash.c276
-rw-r--r--drivers/staging/ccree/ssi_ivgen.c13
-rw-r--r--drivers/staging/ccree/ssi_pm.c4
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.c54
-rw-r--r--drivers/staging/ccree/ssi_sram_mgr.c6
-rw-r--r--drivers/staging/ccree/ssi_sysfs.c80
-rw-r--r--drivers/staging/comedi/comedi_buf.c2
-rw-r--r--drivers/staging/comedi/comedi_fops.c5
-rw-r--r--drivers/staging/comedi/drivers.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c6
-rw-r--r--drivers/staging/fbtft/fb_st7789v.c2
-rw-r--r--drivers/staging/fbtft/fbtft-core.c4
-rw-r--r--drivers/staging/fsl-dpaa2/Kconfig2
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c6
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h4
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c2
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni.c2
-rw-r--r--drivers/staging/fsl-mc/bus/Kconfig4
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/qbman-portal.c24
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c5
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-allocator.c6
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-bus.c4
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-msi.c9
-rw-r--r--drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c11
-rw-r--r--drivers/staging/fsl-mc/bus/mc-io.c11
-rw-r--r--drivers/staging/fsl-mc/bus/mc-sys.c36
-rw-r--r--drivers/staging/fsl-mc/include/dpaa2-io.h1
-rw-r--r--drivers/staging/goldfish/goldfish_nand.c24
-rw-r--r--drivers/staging/greybus/arche-platform.c14
-rw-r--r--drivers/staging/greybus/audio_codec.c2
-rw-r--r--drivers/staging/greybus/gbphy.c2
-rw-r--r--drivers/staging/greybus/interface.c40
-rw-r--r--drivers/staging/greybus/light.c46
-rw-r--r--drivers/staging/greybus/spilib.h3
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c48
-rw-r--r--drivers/staging/greybus/usb.c2
-rw-r--r--drivers/staging/greybus/vibrator.c8
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c90
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.h2
-rw-r--r--drivers/staging/gs_fpgaboot/io.c4
-rw-r--r--drivers/staging/iio/adc/ad7280a.c21
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c4
-rw-r--r--drivers/staging/iio/light/tsl2x7x.c372
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c2
-rw-r--r--drivers/staging/irda/TODO4
-rw-r--r--drivers/staging/irda/drivers/Kconfig (renamed from drivers/net/irda/Kconfig)0
-rw-r--r--drivers/staging/irda/drivers/Makefile (renamed from drivers/net/irda/Makefile)2
-rw-r--r--drivers/staging/irda/drivers/act200l-sir.c (renamed from drivers/net/irda/act200l-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/actisys-sir.c (renamed from drivers/net/irda/actisys-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/ali-ircc.c (renamed from drivers/net/irda/ali-ircc.c)0
-rw-r--r--drivers/staging/irda/drivers/ali-ircc.h (renamed from drivers/net/irda/ali-ircc.h)0
-rw-r--r--drivers/staging/irda/drivers/au1k_ir.c (renamed from drivers/net/irda/au1k_ir.c)0
-rw-r--r--drivers/staging/irda/drivers/bfin_sir.c (renamed from drivers/net/irda/bfin_sir.c)0
-rw-r--r--drivers/staging/irda/drivers/bfin_sir.h (renamed from drivers/net/irda/bfin_sir.h)0
-rw-r--r--drivers/staging/irda/drivers/donauboe.c (renamed from drivers/net/irda/donauboe.c)0
-rw-r--r--drivers/staging/irda/drivers/donauboe.h (renamed from drivers/net/irda/donauboe.h)0
-rw-r--r--drivers/staging/irda/drivers/esi-sir.c (renamed from drivers/net/irda/esi-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/girbil-sir.c (renamed from drivers/net/irda/girbil-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/irda-usb.c (renamed from drivers/net/irda/irda-usb.c)2
-rw-r--r--drivers/staging/irda/drivers/irda-usb.h (renamed from drivers/net/irda/irda-usb.h)0
-rw-r--r--drivers/staging/irda/drivers/irtty-sir.c (renamed from drivers/net/irda/irtty-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/irtty-sir.h (renamed from drivers/net/irda/irtty-sir.h)0
-rw-r--r--drivers/staging/irda/drivers/kingsun-sir.c (renamed from drivers/net/irda/kingsun-sir.c)2
-rw-r--r--drivers/staging/irda/drivers/ks959-sir.c (renamed from drivers/net/irda/ks959-sir.c)2
-rw-r--r--drivers/staging/irda/drivers/ksdazzle-sir.c (renamed from drivers/net/irda/ksdazzle-sir.c)2
-rw-r--r--drivers/staging/irda/drivers/litelink-sir.c (renamed from drivers/net/irda/litelink-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/ma600-sir.c (renamed from drivers/net/irda/ma600-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/mcp2120-sir.c (renamed from drivers/net/irda/mcp2120-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/mcs7780.c (renamed from drivers/net/irda/mcs7780.c)18
-rw-r--r--drivers/staging/irda/drivers/mcs7780.h (renamed from drivers/net/irda/mcs7780.h)0
-rw-r--r--drivers/staging/irda/drivers/nsc-ircc.c (renamed from drivers/net/irda/nsc-ircc.c)0
-rw-r--r--drivers/staging/irda/drivers/nsc-ircc.h (renamed from drivers/net/irda/nsc-ircc.h)0
-rw-r--r--drivers/staging/irda/drivers/old_belkin-sir.c (renamed from drivers/net/irda/old_belkin-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/pxaficp_ir.c (renamed from drivers/net/irda/pxaficp_ir.c)0
-rw-r--r--drivers/staging/irda/drivers/sa1100_ir.c (renamed from drivers/net/irda/sa1100_ir.c)0
-rw-r--r--drivers/staging/irda/drivers/sh_sir.c (renamed from drivers/net/irda/sh_sir.c)0
-rw-r--r--drivers/staging/irda/drivers/sir-dev.h (renamed from drivers/net/irda/sir-dev.h)0
-rw-r--r--drivers/staging/irda/drivers/sir_dev.c (renamed from drivers/net/irda/sir_dev.c)0
-rw-r--r--drivers/staging/irda/drivers/sir_dongle.c (renamed from drivers/net/irda/sir_dongle.c)0
-rw-r--r--drivers/staging/irda/drivers/smsc-ircc2.c (renamed from drivers/net/irda/smsc-ircc2.c)0
-rw-r--r--drivers/staging/irda/drivers/smsc-ircc2.h (renamed from drivers/net/irda/smsc-ircc2.h)0
-rw-r--r--drivers/staging/irda/drivers/smsc-sio.h (renamed from drivers/net/irda/smsc-sio.h)0
-rw-r--r--drivers/staging/irda/drivers/stir4200.c (renamed from drivers/net/irda/stir4200.c)2
-rw-r--r--drivers/staging/irda/drivers/tekram-sir.c (renamed from drivers/net/irda/tekram-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/toim3232-sir.c (renamed from drivers/net/irda/toim3232-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/via-ircc.c (renamed from drivers/net/irda/via-ircc.c)0
-rw-r--r--drivers/staging/irda/drivers/via-ircc.h (renamed from drivers/net/irda/via-ircc.h)0
-rw-r--r--drivers/staging/irda/drivers/vlsi_ir.c (renamed from drivers/net/irda/vlsi_ir.c)0
-rw-r--r--drivers/staging/irda/drivers/vlsi_ir.h (renamed from drivers/net/irda/vlsi_ir.h)0
-rw-r--r--drivers/staging/irda/drivers/w83977af.h (renamed from drivers/net/irda/w83977af.h)0
-rw-r--r--drivers/staging/irda/drivers/w83977af_ir.c (renamed from drivers/net/irda/w83977af_ir.c)0
-rw-r--r--drivers/staging/irda/drivers/w83977af_ir.h (renamed from drivers/net/irda/w83977af_ir.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/af_irda.h (renamed from include/net/irda/af_irda.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/crc.h (renamed from include/net/irda/crc.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/discovery.h (renamed from include/net/irda/discovery.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_core.h (renamed from include/net/irda/ircomm_core.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_event.h (renamed from include/net/irda/ircomm_event.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_lmp.h (renamed from include/net/irda/ircomm_lmp.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_param.h (renamed from include/net/irda/ircomm_param.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_ttp.h (renamed from include/net/irda/ircomm_ttp.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_tty.h (renamed from include/net/irda/ircomm_tty.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_tty_attach.h (renamed from include/net/irda/ircomm_tty_attach.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irda.h (renamed from include/net/irda/irda.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irda_device.h (renamed from include/net/irda/irda_device.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/iriap.h (renamed from include/net/irda/iriap.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/iriap_event.h (renamed from include/net/irda/iriap_event.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irias_object.h (renamed from include/net/irda/irias_object.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_client.h (renamed from include/net/irda/irlan_client.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_common.h (renamed from include/net/irda/irlan_common.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_eth.h (renamed from include/net/irda/irlan_eth.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_event.h (renamed from include/net/irda/irlan_event.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_filter.h (renamed from include/net/irda/irlan_filter.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_provider.h (renamed from include/net/irda/irlan_provider.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irlap.h (renamed from include/net/irda/irlap.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irlap_event.h (renamed from include/net/irda/irlap_event.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irlap_frame.h (renamed from include/net/irda/irlap_frame.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irlmp.h (renamed from include/net/irda/irlmp.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irlmp_event.h (renamed from include/net/irda/irlmp_event.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irlmp_frame.h (renamed from include/net/irda/irlmp_frame.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irmod.h (renamed from include/net/irda/irmod.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irqueue.h (renamed from include/net/irda/irqueue.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/irttp.h (renamed from include/net/irda/irttp.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/parameters.h (renamed from include/net/irda/parameters.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/qos.h (renamed from include/net/irda/qos.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/timer.h (renamed from include/net/irda/timer.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/wrapper.h (renamed from include/net/irda/wrapper.h)0
-rw-r--r--drivers/staging/irda/net/Kconfig (renamed from net/irda/Kconfig)8
-rw-r--r--drivers/staging/irda/net/Makefile (renamed from net/irda/Makefile)2
-rw-r--r--drivers/staging/irda/net/af_irda.c (renamed from net/irda/af_irda.c)2
-rw-r--r--drivers/staging/irda/net/discovery.c (renamed from net/irda/discovery.c)0
-rw-r--r--drivers/staging/irda/net/ircomm/Kconfig (renamed from net/irda/ircomm/Kconfig)0
-rw-r--r--drivers/staging/irda/net/ircomm/Makefile (renamed from net/irda/ircomm/Makefile)0
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_core.c (renamed from net/irda/ircomm/ircomm_core.c)0
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_event.c (renamed from net/irda/ircomm/ircomm_event.c)0
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_lmp.c (renamed from net/irda/ircomm/ircomm_lmp.c)0
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_param.c (renamed from net/irda/ircomm/ircomm_param.c)0
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_ttp.c (renamed from net/irda/ircomm/ircomm_ttp.c)0
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty.c (renamed from net/irda/ircomm/ircomm_tty.c)0
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty_attach.c (renamed from net/irda/ircomm/ircomm_tty_attach.c)0
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty_ioctl.c (renamed from net/irda/ircomm/ircomm_tty_ioctl.c)0
-rw-r--r--drivers/staging/irda/net/irda_device.c (renamed from net/irda/irda_device.c)0
-rw-r--r--drivers/staging/irda/net/iriap.c (renamed from net/irda/iriap.c)0
-rw-r--r--drivers/staging/irda/net/iriap_event.c (renamed from net/irda/iriap_event.c)0
-rw-r--r--drivers/staging/irda/net/irias_object.c (renamed from net/irda/irias_object.c)0
-rw-r--r--drivers/staging/irda/net/irlan/Kconfig (renamed from net/irda/irlan/Kconfig)0
-rw-r--r--drivers/staging/irda/net/irlan/Makefile (renamed from net/irda/irlan/Makefile)0
-rw-r--r--drivers/staging/irda/net/irlan/irlan_client.c (renamed from net/irda/irlan/irlan_client.c)0
-rw-r--r--drivers/staging/irda/net/irlan/irlan_client_event.c (renamed from net/irda/irlan/irlan_client_event.c)0
-rw-r--r--drivers/staging/irda/net/irlan/irlan_common.c (renamed from net/irda/irlan/irlan_common.c)0
-rw-r--r--drivers/staging/irda/net/irlan/irlan_eth.c (renamed from net/irda/irlan/irlan_eth.c)0
-rw-r--r--drivers/staging/irda/net/irlan/irlan_event.c (renamed from net/irda/irlan/irlan_event.c)0
-rw-r--r--drivers/staging/irda/net/irlan/irlan_filter.c (renamed from net/irda/irlan/irlan_filter.c)0
-rw-r--r--drivers/staging/irda/net/irlan/irlan_provider.c (renamed from net/irda/irlan/irlan_provider.c)0
-rw-r--r--drivers/staging/irda/net/irlan/irlan_provider_event.c (renamed from net/irda/irlan/irlan_provider_event.c)0
-rw-r--r--drivers/staging/irda/net/irlap.c (renamed from net/irda/irlap.c)0
-rw-r--r--drivers/staging/irda/net/irlap_event.c (renamed from net/irda/irlap_event.c)0
-rw-r--r--drivers/staging/irda/net/irlap_frame.c (renamed from net/irda/irlap_frame.c)0
-rw-r--r--drivers/staging/irda/net/irlmp.c (renamed from net/irda/irlmp.c)0
-rw-r--r--drivers/staging/irda/net/irlmp_event.c (renamed from net/irda/irlmp_event.c)0
-rw-r--r--drivers/staging/irda/net/irlmp_frame.c (renamed from net/irda/irlmp_frame.c)0
-rw-r--r--drivers/staging/irda/net/irmod.c (renamed from net/irda/irmod.c)2
-rw-r--r--drivers/staging/irda/net/irnet/Kconfig (renamed from net/irda/irnet/Kconfig)0
-rw-r--r--drivers/staging/irda/net/irnet/Makefile (renamed from net/irda/irnet/Makefile)0
-rw-r--r--drivers/staging/irda/net/irnet/irnet.h (renamed from net/irda/irnet/irnet.h)0
-rw-r--r--drivers/staging/irda/net/irnet/irnet_irda.c (renamed from net/irda/irnet/irnet_irda.c)0
-rw-r--r--drivers/staging/irda/net/irnet/irnet_irda.h (renamed from net/irda/irnet/irnet_irda.h)0
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.c (renamed from net/irda/irnet/irnet_ppp.c)0
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.h (renamed from net/irda/irnet/irnet_ppp.h)0
-rw-r--r--drivers/staging/irda/net/irnetlink.c (renamed from net/irda/irnetlink.c)0
-rw-r--r--drivers/staging/irda/net/irproc.c (renamed from net/irda/irproc.c)0
-rw-r--r--drivers/staging/irda/net/irqueue.c (renamed from net/irda/irqueue.c)0
-rw-r--r--drivers/staging/irda/net/irsysctl.c (renamed from net/irda/irsysctl.c)0
-rw-r--r--drivers/staging/irda/net/irttp.c (renamed from net/irda/irttp.c)0
-rw-r--r--drivers/staging/irda/net/parameters.c (renamed from net/irda/parameters.c)0
-rw-r--r--drivers/staging/irda/net/qos.c (renamed from net/irda/qos.c)0
-rw-r--r--drivers/staging/irda/net/timer.c (renamed from net/irda/timer.c)0
-rw-r--r--drivers/staging/irda/net/wrapper.c (renamed from net/irda/wrapper.c)0
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c4
-rw-r--r--drivers/staging/ks7010/ks_hostif.c4
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h28
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h104
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h26
-rw-r--r--drivers/staging/lustre/include/linux/lnet/api.h2
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h15
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h50
-rw-r--r--drivers/staging/lustre/include/linux/lnet/socklnd.h15
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/libcfs_debug.h149
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/libcfs_ioctl.h (renamed from drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h)0
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnet-dlc.h (renamed from drivers/staging/lustre/include/linux/lnet/lib-dlc.h)4
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnet-types.h (renamed from drivers/staging/lustre/include/linux/lnet/types.h)0
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnetctl.h (renamed from drivers/staging/lustre/include/linux/lnet/lnetctl.h)51
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnetst.h (renamed from drivers/staging/lustre/include/linux/lnet/lnetst.h)129
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/nidstr.h (renamed from drivers/staging/lustre/include/linux/lnet/nidstr.h)2
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/socklnd.h (renamed from drivers/staging/lustre/include/linux/lnet/lnet.h)24
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_cfg.h (renamed from drivers/staging/lustre/lustre/include/lustre_cfg.h)188
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_fid.h293
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_fiemap.h (renamed from drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h)6
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_idl.h (renamed from drivers/staging/lustre/lustre/include/lustre/lustre_idl.h)689
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_ioctl.h (renamed from drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h)203
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_kernelcomm.h (renamed from drivers/staging/lustre/lustre/include/uapi_kernelcomm.h)6
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_ostid.h236
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_param.h94
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_user.h (renamed from drivers/staging/lustre/lustre/include/lustre/lustre_user.h)15
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_ver.h (renamed from drivers/staging/lustre/lustre/include/lustre_ver.h)0
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h5
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c19
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h18
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c20
-rw-r--r--drivers/staging/lustre/lnet/libcfs/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c49
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c4
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c4
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c9
-rw-r--r--drivers/staging/lustre/lnet/libcfs/prng.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c5
-rw-r--r--drivers/staging/lustre/lnet/lnet/net_fault.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c5
-rw-r--r--drivers/staging/lustre/lnet/selftest/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c8
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c9
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h7
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h7
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h2
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h9
-rw-r--r--drivers/staging/lustre/lustre/Kconfig10
-rw-r--r--drivers/staging/lustre/lustre/fid/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_lib.c5
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c12
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c12
-rw-r--r--drivers/staging/lustre/lustre/fld/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c19
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h8
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c18
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c14
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h6
-rw-r--r--drivers/staging/lustre/lustre/include/interval_tree.h4
-rw-r--r--drivers/staging/lustre/lustre/include/llog_swab.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h10
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_compat.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_debug.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h23
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h25
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_errno.h (renamed from drivers/staging/lustre/lustre/include/lustre/lustre_errno.h)0
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h84
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h5
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_handles.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_kernelcomm.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h12
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_linkea.h15
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lmv.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h15
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mds.h11
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h39
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_obdo.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_param.h109
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_swab.h8
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h20
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h6
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h44
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h6
-rw-r--r--drivers/staging/lustre/lustre/include/seq_range.h2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c46
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c12
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c14
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h9
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c12
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c17
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c10
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_plain.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c36
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c53
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c22
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c16
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c35
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h20
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c27
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c9
-rw-r--r--drivers/staging/lustre/lustre/llite/range_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/range_lock.h4
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c5
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_security.c23
-rw-r--r--drivers/staging/lustre/lustre/lmv/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c15
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c22
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h8
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c184
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h96
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c87
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c29
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h31
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c152
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c15
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c4
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c43
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c469
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c4
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c13
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c4
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c46
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_io.c51
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c6
-rw-r--r--drivers/staging/lustre/lustre/mdc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c8
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c10
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c26
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c4
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c25
-rw-r--r--drivers/staging/lustre/lustre/mgc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/mgc/lproc_mgc.c4
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_internal.h11
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c39
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c41
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/kernelcomm.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linkea.c75
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c102
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c29
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_ref.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_handles.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c74
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c44
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/statfs_pack.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/uuid.c6
-rw-r--r--drivers/staging/lustre/lustre/obdecho/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c33
-rw-r--r--drivers/staging/lustre/lustre/osc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c8
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c31
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c4
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c9
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c32
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c47
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/errno.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_net.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c27
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c16
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c13
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_gc.c12
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c16
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_null.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c26
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c24
-rw-r--r--drivers/staging/media/atomisp/i2c/ap1302.c7
-rw-r--r--drivers/staging/media/atomisp/i2c/ap1302.h4
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.c5
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.h2
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h8
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/ad5816g.c11
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/drv201.c11
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9714.c14
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9718.c5
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9719.c11
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.c48
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.h31
-rw-r--r--drivers/staging/media/atomisp/i2c/lm3554.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.c12
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.c19
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.h3
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.c10
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.h2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.h5
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858_btns.h5
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/Makefile10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c50
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h15
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c35
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c27
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c139
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c3
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c178
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.h6
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c2
-rw-r--r--drivers/staging/media/imx/Kconfig3
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c57
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c4
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c37
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c4
-rw-r--r--drivers/staging/media/imx/imx-media-of.c50
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c37
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c18
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c2
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.c10
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c22
-rw-r--r--drivers/staging/nvec/nvec.c2
-rw-r--r--drivers/staging/octeon/ethernet-rx.c79
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c2
-rw-r--r--drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts53
-rw-r--r--drivers/staging/pi433/Documentation/devicetree/pi433.txt62
-rw-r--r--drivers/staging/pi433/Documentation/pi433.txt274
-rw-r--r--drivers/staging/pi433/Kconfig16
-rw-r--r--drivers/staging/pi433/Makefile3
-rw-r--r--drivers/staging/pi433/TODO5
-rw-r--r--drivers/staging/pi433/pi433_if.c1338
-rw-r--r--drivers/staging/pi433/pi433_if.h152
-rw-r--r--drivers/staging/pi433/rf69.c1032
-rw-r--r--drivers/staging/pi433/rf69.h82
-rw-r--r--drivers/staging/pi433/rf69_enum.h201
-rw-r--r--drivers/staging/pi433/rf69_registers.h489
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c16
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c6
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h3
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c78
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c23
-rw-r--r--drivers/staging/rtl8192u/r8192U_hw.h11
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c2
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c4
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c8
-rw-r--r--drivers/staging/rtl8712/usb_intf.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c7
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_odm.c38
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c14
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c6
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c14
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c46
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c20
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c1008
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/xmit_linux.c10
-rw-r--r--drivers/staging/rtlwifi/Kconfig22
-rw-r--r--drivers/staging/rtlwifi/Makefile70
-rw-r--r--drivers/staging/rtlwifi/TODO11
-rw-r--r--drivers/staging/rtlwifi/base.c2826
-rw-r--r--drivers/staging/rtlwifi/base.h186
-rw-r--r--drivers/staging/rtlwifi/btcoexist/Makefile8
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbt_precomp.h85
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.c5244
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.h444
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.c5225
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.h498
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.c65
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.h35
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c1881
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.h802
-rw-r--r--drivers/staging/rtlwifi/btcoexist/rtl_btc.c528
-rw-r--r--drivers/staging/rtlwifi/btcoexist/rtl_btc.h75
-rw-r--r--drivers/staging/rtlwifi/cam.c326
-rw-r--r--drivers/staging/rtlwifi/cam.h50
-rw-r--r--drivers/staging/rtlwifi/core.c2046
-rw-r--r--drivers/staging/rtlwifi/core.h86
-rw-r--r--drivers/staging/rtlwifi/debug.c636
-rw-r--r--drivers/staging/rtlwifi/debug.h234
-rw-r--r--drivers/staging/rtlwifi/efuse.c1342
-rw-r--r--drivers/staging/rtlwifi/efuse.h120
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_2_platform.h52
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_cfg.h132
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_phy.c106
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.c563
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.h40
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.c343
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.h44
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.c323
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.h53
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.c184
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.h42
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.c185
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.h45
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.c414
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.h38
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_88xx_cfg.h171
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c5979
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.h396
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.c329
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.h71
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.c974
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.h84
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.c554
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.h73
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c4494
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.h321
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_api.c426
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_api.h82
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_bit2.h13407
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_bit_8822b.h12103
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_info.h122
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_offload_c2h_nic.h184
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_offload_h2c_nic.h515
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_h2c_extra_info_nic.h115
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_intf_phy_cmd.h54
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_original_c2h_nic.h403
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_original_h2c_nic.h1011
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_pcie_reg.h28
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_pwr_seq_cmd.h116
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_reg2.h1132
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_reg_8822b.h728
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_bd_chip.h48
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_bd_nic.h48
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_desc_chip.h118
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_desc_nic.h133
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_sdio_reg.h62
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_bd_chip.h118
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_bd_nic.h123
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_desc_chip.h444
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_desc_nic.h506
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_type.h1934
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_usb_reg.h28
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.c1384
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.h94
-rw-r--r--drivers/staging/rtlwifi/pci.c2508
-rw-r--r--drivers/staging/rtlwifi/pci.h329
-rw-r--r--drivers/staging/rtlwifi/phydm/halphyrf_ce.c965
-rw-r--r--drivers/staging/rtlwifi/phydm/halphyrf_ce.h85
-rw-r--r--drivers/staging/rtlwifi/phydm/mp_precomp.h24
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.c1986
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.h946
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_acs.c200
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_acs.h57
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adaptivity.c941
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adaptivity.h119
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adc_sampling.c628
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adc_sampling.h96
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_antdiv.c83
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_antdiv.h301
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_beamforming.h48
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_ccx.c457
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_ccx.h83
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_cfotracking.c343
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_cfotracking.h60
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_debug.c2910
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_debug.h175
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dfs.h59
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dig.c1535
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dig.h241
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamic_rx_path.h37
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.c129
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.h50
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.c102
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.h64
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.c139
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.h44
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_features.h33
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_hwconfig.c1928
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_hwconfig.h510
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.c341
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.h205
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_iqk.h76
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_kfree.c228
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_kfree.h42
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_noisemonitor.c330
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_noisemonitor.h46
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.c644
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.h293
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_pre_define.h613
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_precomp.h85
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_psd.c422
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_psd.h67
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_rainfo.c1208
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_rainfo.h269
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_reg.h151
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_regdefine11ac.h94
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_regdefine11n.h213
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_types.h130
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c1969
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.h54
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c222
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.h38
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c4744
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.h129
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.c351
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.h45
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.c1815
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.h84
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.c1410
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.h48
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.c168
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.h54
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.c225
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.h30
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/version_rtl8822b.h34
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.c874
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.h45
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/halcomtxbf.h67
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbf8822b.h39
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbfinterface.h38
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbfjaguar.h36
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/phydm_hal_txbf_api.h41
-rw-r--r--drivers/staging/rtlwifi/ps.c1007
-rw-r--r--drivers/staging/rtlwifi/ps.h50
-rw-r--r--drivers/staging/rtlwifi/pwrseqcmd.h94
-rw-r--r--drivers/staging/rtlwifi/rc.c322
-rw-r--r--drivers/staging/rtlwifi/rc.h49
-rw-r--r--drivers/staging/rtlwifi/regd.c469
-rw-r--r--drivers/staging/rtlwifi/regd.h63
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/Makefile7
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/def.h82
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c968
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.h198
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.c2441
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.h66
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/led.c127
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/led.h34
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/phy.c2233
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/phy.h145
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/reg.h1653
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/sw.c481
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/sw.h32
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/trx.c1015
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/trx.h165
-rw-r--r--drivers/staging/rtlwifi/stats.c260
-rw-r--r--drivers/staging/rtlwifi/stats.h42
-rw-r--r--drivers/staging/rtlwifi/wifi.h3375
-rw-r--r--drivers/staging/rts5208/ms.c5
-rw-r--r--drivers/staging/rts5208/rtsx.c4
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c4
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c2
-rw-r--r--drivers/staging/rts5208/sd.c25
-rw-r--r--drivers/staging/rts5208/spi.c8
-rw-r--r--drivers/staging/rts5208/xd.c17
-rw-r--r--drivers/staging/skein/skein_block.c323
-rw-r--r--drivers/staging/skein/skein_block.h323
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c2
-rw-r--r--drivers/staging/sm750fb/sm750.c24
-rw-r--r--drivers/staging/speakup/main.c2
-rw-r--r--drivers/staging/speakup/spk_priv.h2
-rw-r--r--drivers/staging/speakup/spk_ttyio.c97
-rw-r--r--drivers/staging/typec/fusb302/Kconfig2
-rw-r--r--drivers/staging/typec/fusb302/TODO4
-rw-r--r--drivers/staging/typec/fusb302/fusb302.c144
-rw-r--r--drivers/staging/typec/pd.h2
-rw-r--r--drivers/staging/typec/tcpm.c471
-rw-r--r--drivers/staging/typec/tcpm.h12
-rw-r--r--drivers/staging/unisys/Documentation/overview.txt14
-rw-r--r--drivers/staging/unisys/include/channel.h361
-rw-r--r--drivers/staging/unisys/include/iochannel.h554
-rw-r--r--drivers/staging/unisys/include/visorbus.h44
-rw-r--r--drivers/staging/unisys/visorbus/controlvmchannel.h715
-rw-r--r--drivers/staging/unisys/visorbus/vbuschannel.h96
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c469
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_private.h38
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c219
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c607
-rw-r--r--drivers/staging/unisys/visorbus/vmcallinterface.h54
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c438
-rw-r--r--drivers/staging/unisys/visorinput/ultrainputreport.h68
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c138
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c736
-rw-r--r--drivers/staging/vboxvideo/Kconfig15
-rw-r--r--drivers/staging/vboxvideo/Makefile7
-rw-r--r--drivers/staging/vboxvideo/TODO9
-rw-r--r--drivers/staging/vboxvideo/hgsmi_base.c246
-rw-r--r--drivers/staging/vboxvideo/hgsmi_ch_setup.h66
-rw-r--r--drivers/staging/vboxvideo/hgsmi_channels.h53
-rw-r--r--drivers/staging/vboxvideo/hgsmi_defs.h92
-rw-r--r--drivers/staging/vboxvideo/modesetting.c142
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c285
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.h296
-rw-r--r--drivers/staging/vboxvideo/vbox_err.h50
-rw-r--r--drivers/staging/vboxvideo/vbox_fb.c270
-rw-r--r--drivers/staging/vboxvideo/vbox_hgsmi.c115
-rw-r--r--drivers/staging/vboxvideo/vbox_irq.c197
-rw-r--r--drivers/staging/vboxvideo/vbox_main.c534
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c867
-rw-r--r--drivers/staging/vboxvideo/vbox_prime.c74
-rw-r--r--drivers/staging/vboxvideo/vbox_ttm.c472
-rw-r--r--drivers/staging/vboxvideo/vboxvideo.h491
-rw-r--r--drivers/staging/vboxvideo/vboxvideo_guest.h95
-rw-r--r--drivers/staging/vboxvideo/vboxvideo_vbe.h84
-rw-r--r--drivers/staging/vboxvideo/vbva_base.c233
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c8
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c10
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c2
-rw-r--r--drivers/staging/vt6655/card.c6
-rw-r--r--drivers/staging/vt6655/mac.c2
-rw-r--r--drivers/staging/vt6656/device.h2
-rw-r--r--drivers/staging/vt6656/firmware.c2
-rw-r--r--drivers/staging/vt6656/key.h2
-rw-r--r--drivers/staging/vt6656/main_usb.c5
-rw-r--r--drivers/staging/vt6656/power.c6
-rw-r--r--drivers/staging/vt6656/rf.c6
-rw-r--r--drivers/staging/vt6656/usbpipe.c4
-rw-r--r--drivers/staging/wilc1000/host_interface.c4
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c5
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c65
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h4
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h100
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c16
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c1
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c27
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c37
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c16
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c12
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c7
-rw-r--r--drivers/target/target_core_iblock.c4
-rw-r--r--drivers/target/target_core_tpg.c4
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/target_core_user.c13
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/eeprom.c9
-rw-r--r--drivers/thunderbolt/icm.c22
-rw-r--r--drivers/thunderbolt/switch.c31
-rw-r--r--drivers/thunderbolt/tb.c4
-rw-r--r--drivers/thunderbolt/tb.h4
-rw-r--r--drivers/thunderbolt/tb_msgs.h12
-rw-r--r--drivers/tty/Kconfig3
-rw-r--r--drivers/tty/ehv_bytechan.c2
-rw-r--r--drivers/tty/goldfish.c234
-rw-r--r--drivers/tty/hvc/Kconfig2
-rw-r--r--drivers/tty/hvc/hvc_opal.c16
-rw-r--r--drivers/tty/hvc/hvc_vio.c20
-rw-r--r--drivers/tty/hvc/hvcs.c4
-rw-r--r--drivers/tty/isicom.c2
-rw-r--r--drivers/tty/mips_ejtag_fdc.c2
-rw-r--r--drivers/tty/moxa.c2
-rw-r--r--drivers/tty/mxser.c2
-rw-r--r--drivers/tty/n_gsm.c11
-rw-r--r--drivers/tty/pty.c127
-rw-r--r--drivers/tty/serdev/core.c2
-rw-r--r--drivers/tty/serial/21285.c2
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c7
-rw-r--r--drivers/tty/serial/8250/8250_core.c39
-rw-r--r--drivers/tty/serial/8250/8250_dw.c2
-rw-r--r--drivers/tty/serial/8250/8250_early.c8
-rw-r--r--drivers/tty/serial/8250/8250_exar.c6
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c8
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c8
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c118
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c5
-rw-r--r--drivers/tty/serial/8250/8250_of.c60
-rw-r--r--drivers/tty/serial/8250/8250_pci.c43
-rw-r--r--drivers/tty/serial/8250/8250_port.c81
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c63
-rw-r--r--drivers/tty/serial/8250/Kconfig11
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig4
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c43
-rw-r--r--drivers/tty/serial/apbuart.c2
-rw-r--r--drivers/tty/serial/arc_uart.c4
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/tty/serial/earlycon.c7
-rw-r--r--drivers/tty/serial/fsl_lpuart.c100
-rw-r--r--drivers/tty/serial/imx.c43
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c2
-rw-r--r--drivers/tty/serial/m32r_sio.c2
-rw-r--r--drivers/tty/serial/meson_uart.c2
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c14
-rw-r--r--drivers/tty/serial/msm_serial.c19
-rw-r--r--drivers/tty/serial/mux.c16
-rw-r--r--drivers/tty/serial/omap-serial.c13
-rw-r--r--drivers/tty/serial/owl-uart.c635
-rw-r--r--drivers/tty/serial/pch_uart.c38
-rw-r--r--drivers/tty/serial/pmac_zilog.c4
-rw-r--r--drivers/tty/serial/serial-tegra.c2
-rw-r--r--drivers/tty/serial/serial_core.c47
-rw-r--r--drivers/tty/serial/sh-sci.c15
-rw-r--r--drivers/tty/serial/sprd_serial.c8
-rw-r--r--drivers/tty/serial/st-asc.c3
-rw-r--r--drivers/tty/serial/stm32-usart.c125
-rw-r--r--drivers/tty/serial/stm32-usart.h37
-rw-r--r--drivers/tty/serial/sunsab.c2
-rw-r--r--drivers/tty/serial/sunsu.c6
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/synclink.c2
-rw-r--r--drivers/tty/synclink_gt.c2
-rw-r--r--drivers/tty/synclinkmp.c2
-rw-r--r--drivers/tty/tty_buffer.c26
-rw-r--r--drivers/tty/tty_io.c115
-rw-r--r--drivers/usb/atm/cxacru.c2
-rw-r--r--drivers/usb/atm/speedtch.c6
-rw-r--r--drivers/usb/atm/ueagle-atm.c4
-rw-r--r--drivers/usb/atm/usbatm.c6
-rw-r--r--drivers/usb/atm/xusbatm.c1
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.c2
-rw-r--r--drivers/usb/chipidea/Makefile1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c155
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c2
-rw-r--r--drivers/usb/chipidea/core.c6
-rw-r--r--drivers/usb/chipidea/otg_fsm.c2
-rw-r--r--drivers/usb/chipidea/udc.c8
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/class/cdc-wdm.c4
-rw-r--r--drivers/usb/class/usbtmc.c4
-rw-r--r--drivers/usb/common/common.c15
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/core/devio.c6
-rw-r--r--drivers/usb/core/hcd.c6
-rw-r--r--drivers/usb/core/hub.c29
-rw-r--r--drivers/usb/core/ledtrig-usbport.c5
-rw-r--r--drivers/usb/core/quirks.c10
-rw-r--r--drivers/usb/core/sysfs.c2
-rw-r--r--drivers/usb/dwc2/gadget.c5
-rw-r--r--drivers/usb/dwc2/hcd.c6
-rw-r--r--drivers/usb/dwc3/core.c6
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c22
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c4
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c22
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c41
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/function/f_fs.c7
-rw-r--r--drivers/usb/gadget/function/f_hid.c17
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/function/f_midi.c68
-rw-r--r--drivers/usb/gadget/function/f_ncm.c2
-rw-r--r--drivers/usb/gadget/function/f_rndis.c20
-rw-r--r--drivers/usb/gadget/function/f_uac1.c20
-rw-r--r--drivers/usb/gadget/function/f_uac2.c25
-rw-r--r--drivers/usb/gadget/function/u_audio.c4
-rw-r--r--drivers/usb/gadget/function/u_ether.c2
-rw-r--r--drivers/usb/gadget/function/u_ether.h1
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h35
-rw-r--r--drivers/usb/gadget/function/u_rndis.h4
-rw-r--r--drivers/usb/gadget/function/u_serial.c2
-rw-r--r--drivers/usb/gadget/legacy/webcam.c1
-rw-r--r--drivers/usb/gadget/udc/Kconfig5
-rw-r--r--drivers/usb/gadget/udc/bdc/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc.h24
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c148
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.c16
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c7
-rw-r--r--drivers/usb/gadget/udc/core.c20
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c152
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c4
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c12
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-omap.c4
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c4
-rw-r--r--drivers/usb/host/hwa-hc.c4
-rw-r--r--drivers/usb/host/imx21-hcd.c8
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c2
-rw-r--r--drivers/usb/host/max3421-hcd.c2
-rw-r--r--drivers/usb/host/ohci-omap.c2
-rw-r--r--drivers/usb/host/pci-quirks.c116
-rw-r--r--drivers/usb/host/pci-quirks.h3
-rw-r--r--drivers/usb/host/r8a66597-hcd.c2
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/whci/hcd.c2
-rw-r--r--drivers/usb/host/xhci-hub.c141
-rw-r--r--drivers/usb/host/xhci-mtk.c1
-rw-r--r--drivers/usb/host/xhci-pci.c13
-rw-r--r--drivers/usb/host/xhci-plat.c10
-rw-r--r--drivers/usb/host/xhci-rcar.c40
-rw-r--r--drivers/usb/host/xhci-ring.c33
-rw-r--r--drivers/usb/host/xhci-trace.h23
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/host/xhci.h91
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c6
-rw-r--r--drivers/usb/misc/adutux.c2
-rw-r--r--drivers/usb/misc/chaoskey.c2
-rw-r--r--drivers/usb/misc/cytherm.c1
-rw-r--r--drivers/usb/misc/ftdi-elan.c33
-rw-r--r--drivers/usb/misc/idmouse.c2
-rw-r--r--drivers/usb/misc/iowarrior.c4
-rw-r--r--drivers/usb/misc/ldusb.c1
-rw-r--r--drivers/usb/misc/legousbtower.c2
-rw-r--r--drivers/usb/misc/lvstest.c41
-rw-r--r--drivers/usb/misc/rio500.c4
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c13
-rw-r--r--drivers/usb/misc/trancevibrator.c2
-rw-r--r--drivers/usb/misc/usb251xb.c1
-rw-r--r--drivers/usb/misc/usbsevseg.c2
-rw-r--r--drivers/usb/misc/uss720.c7
-rw-r--r--drivers/usb/mtu3/mtu3.h2
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c58
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c4
-rw-r--r--drivers/usb/mtu3/mtu3_gadget_ep0.c23
-rw-r--r--drivers/usb/mtu3/mtu3_host.c4
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h4
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c1
-rw-r--r--drivers/usb/musb/musb_core.c22
-rw-r--r--drivers/usb/musb/musb_core.h24
-rw-r--r--drivers/usb/musb/musb_dsps.c13
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_host.c9
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c2
-rw-r--r--drivers/usb/phy/phy-msm-usb.c17
-rw-r--r--drivers/usb/phy/phy-mv-usb.c4
-rw-r--r--drivers/usb/phy/phy-qcom-8x16-usb.c9
-rw-r--r--drivers/usb/phy/phy-tahvo.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c2
-rw-r--r--drivers/usb/phy/phy.c276
-rw-r--r--drivers/usb/renesas_usbhs/common.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c28
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c2
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c2
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c9
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/option.c12
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/storage/isd200.c5
-rw-r--r--drivers/usb/storage/realtek_cr.c1
-rw-r--r--drivers/usb/storage/unusual_uas.h4
-rw-r--r--drivers/usb/storage/usb.c18
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h1
-rw-r--r--drivers/usb/usbip/stub_main.c2
-rw-r--r--drivers/usb/usbip/usbip_common.c2
-rw-r--r--drivers/usb/usbip/usbip_common.h2
-rw-r--r--drivers/usb/usbip/vhci_hcd.c4
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c6
-rw-r--r--drivers/usb/wusbcore/cbaf.c2
-rw-r--r--drivers/usb/wusbcore/dev-sysfs.c2
-rw-r--r--drivers/usb/wusbcore/wusbhc.c2
-rw-r--r--drivers/uwb/lc-rc.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c9
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c13
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/vhost/vhost.c28
-rw-r--r--drivers/vhost/vhost.h3
-rw-r--r--drivers/video/backlight/gpio_backlight.c62
-rw-r--r--drivers/video/backlight/lm3630a_bl.c5
-rw-r--r--drivers/video/backlight/pandora_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c2
-rw-r--r--drivers/video/console/sticore.c11
-rw-r--r--drivers/video/fbdev/core/fb_defio.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c12
-rw-r--r--drivers/video/fbdev/efifb.c39
-rw-r--r--drivers/video/fbdev/imxfb.c10
-rw-r--r--drivers/video/fbdev/omap/lcd_h3.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c2
-rw-r--r--drivers/video/fbdev/uvesafb.c7
-rw-r--r--drivers/virtio/Kconfig4
-rw-r--r--drivers/virtio/virtio_balloon.c28
-rw-r--r--drivers/virtio/virtio_pci_common.c10
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--drivers/w1/masters/ds1wm.c108
-rw-r--r--drivers/w1/masters/ds2482.c12
-rw-r--r--drivers/w1/masters/ds2490.c2
-rw-r--r--drivers/w1/masters/omap_hdq.c3
-rw-r--r--drivers/w1/slaves/Kconfig8
-rw-r--r--drivers/w1/slaves/Makefile1
-rw-r--r--drivers/w1/slaves/w1_ds2438.c9
-rw-r--r--drivers/w1/slaves/w1_ds2805.c313
-rw-r--r--drivers/w1/slaves/w1_therm.c164
-rw-r--r--drivers/w1/w1.c26
-rw-r--r--drivers/watchdog/twl4030_wdt.c2
-rw-r--r--drivers/xen/Kconfig12
-rw-r--r--drivers/xen/Makefile4
-rw-r--r--drivers/xen/balloon.c11
-rw-r--r--drivers/xen/biomerge.c3
-rw-r--r--drivers/xen/events/events_base.c21
-rw-r--r--drivers/xen/events/events_fifo.c7
-rw-r--r--drivers/xen/gntdev.c8
-rw-r--r--drivers/xen/grant-table.c9
-rw-r--r--drivers/xen/platform-pci.c2
-rw-r--r--drivers/xen/pvcalls-back.c1240
-rw-r--r--drivers/xen/xen-balloon.c22
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c44
-rw-r--r--drivers/xen/xen-selfballoon.c4
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c3
-rw-r--r--drivers/xen/xenfs/super.c1
-rw-r--r--fs/9p/cache.c29
-rw-r--r--fs/9p/vfs_file.c6
-rw-r--r--fs/affs/file.c2
-rw-r--r--fs/afs/cache.c43
-rw-r--r--fs/afs/misc.c1
-rw-r--r--fs/afs/rxrpc.c47
-rw-r--r--fs/afs/write.c2
-rw-r--r--fs/binfmt_elf.c3
-rw-r--r--fs/binfmt_flat.c2
-rw-r--r--fs/block_dev.c5
-rw-r--r--fs/btrfs/check-integrity.c41
-rw-r--r--fs/btrfs/disk-io.c6
-rw-r--r--fs/btrfs/extent-tree.c11
-rw-r--r--fs/btrfs/extent_io.c6
-rw-r--r--fs/btrfs/inode.c70
-rw-r--r--fs/btrfs/raid56.c42
-rw-r--r--fs/btrfs/scrub.c12
-rw-r--r--fs/btrfs/tree-log.c3
-rw-r--r--fs/btrfs/volumes.c16
-rw-r--r--fs/btrfs/volumes.h6
-rw-r--r--fs/buffer.c35
-rw-r--r--fs/ceph/addr.c24
-rw-r--r--fs/ceph/cache.c43
-rw-r--r--fs/ceph/dir.c5
-rw-r--r--fs/ceph/locks.c2
-rw-r--r--fs/char_dev.c58
-rw-r--r--fs/cifs/cache.c31
-rw-r--r--fs/cifs/cifssmb.c2
-rw-r--r--fs/cifs/connect.c22
-rw-r--r--fs/cifs/dir.c18
-rw-r--r--fs/cifs/file.c4
-rw-r--r--fs/cifs/smb2pdu.c11
-rw-r--r--fs/cifs/smb2pdu.h4
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/crypto/bio.c2
-rw-r--r--fs/dax.c392
-rw-r--r--fs/devpts/inode.c65
-rw-r--r--fs/direct-io.c8
-rw-r--r--fs/dlm/debug_fs.c25
-rw-r--r--fs/dlm/lock.c8
-rw-r--r--fs/dlm/lockspace.c9
-rw-r--r--fs/dlm/lowcomms.c2
-rw-r--r--fs/dlm/member.c15
-rw-r--r--fs/dlm/plock.c2
-rw-r--r--fs/dlm/user.c6
-rw-r--r--fs/ecryptfs/file.c2
-rw-r--r--fs/eventpoll.c42
-rw-r--r--fs/exofs/file.c2
-rw-r--r--fs/exofs/ore.c2
-rw-r--r--fs/ext2/acl.c43
-rw-r--r--fs/ext2/file.c25
-rw-r--r--fs/ext4/acl.c25
-rw-r--r--fs/ext4/dir.c2
-rw-r--r--fs/ext4/ext4.h24
-rw-r--r--fs/ext4/ext4_jbd2.h3
-rw-r--r--fs/ext4/extents.c11
-rw-r--r--fs/ext4/file.c70
-rw-r--r--fs/ext4/hash.c4
-rw-r--r--fs/ext4/ialloc.c93
-rw-r--r--fs/ext4/inode.c221
-rw-r--r--fs/ext4/ioctl.c27
-rw-r--r--fs/ext4/mballoc.c12
-rw-r--r--fs/ext4/mmp.c2
-rw-r--r--fs/ext4/namei.c64
-rw-r--r--fs/ext4/page-io.c4
-rw-r--r--fs/ext4/readpage.c2
-rw-r--r--fs/ext4/resize.c3
-rw-r--r--fs/ext4/super.c40
-rw-r--r--fs/ext4/xattr.c232
-rw-r--r--fs/f2fs/acl.c2
-rw-r--r--fs/f2fs/checkpoint.c10
-rw-r--r--fs/f2fs/data.c5
-rw-r--r--fs/f2fs/file.c7
-rw-r--r--fs/f2fs/segment.c2
-rw-r--r--fs/f2fs/sysfs.c1
-rw-r--r--fs/fscache/page.c5
-rw-r--r--fs/fuse/file.c21
-rw-r--r--fs/fuse/fuse_i.h1
-rw-r--r--fs/gfs2/acl.c30
-rw-r--r--fs/gfs2/aops.c14
-rw-r--r--fs/gfs2/bmap.c24
-rw-r--r--fs/gfs2/dir.c4
-rw-r--r--fs/gfs2/file.c9
-rw-r--r--fs/gfs2/glock.c137
-rw-r--r--fs/gfs2/glock.h36
-rw-r--r--fs/gfs2/glops.c30
-rw-r--r--fs/gfs2/incore.h4
-rw-r--r--fs/gfs2/inode.c17
-rw-r--r--fs/gfs2/lock_dlm.c5
-rw-r--r--fs/gfs2/log.c13
-rw-r--r--fs/gfs2/lops.c9
-rw-r--r--fs/gfs2/meta_io.c11
-rw-r--r--fs/gfs2/ops_fstype.c9
-rw-r--r--fs/gfs2/quota.c7
-rw-r--r--fs/gfs2/rgrp.c3
-rw-r--r--fs/gfs2/super.c71
-rw-r--r--fs/gfs2/util.h1
-rw-r--r--fs/gfs2/xattr.c9
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hfsplus/posix_acl.c30
-rw-r--r--fs/hfsplus/wrapper.c2
-rw-r--r--fs/hostfs/hostfs_kern.c2
-rw-r--r--fs/hpfs/file.c2
-rw-r--r--fs/hugetlbfs/inode.c30
-rw-r--r--fs/inode.c1
-rw-r--r--fs/internal.h1
-rw-r--r--fs/iomap.c12
-rw-r--r--fs/isofs/inode.c8
-rw-r--r--fs/jffs2/file.c2
-rw-r--r--fs/jfs/acl.c24
-rw-r--r--fs/jfs/file.c2
-rw-r--r--fs/jfs/jfs_logmgr.c4
-rw-r--r--fs/jfs/jfs_metapage.c4
-rw-r--r--fs/jfs/resize.c4
-rw-r--r--fs/jfs/super.c16
-rw-r--r--fs/kernfs/dir.c111
-rw-r--r--fs/kernfs/file.c12
-rw-r--r--fs/kernfs/inode.c9
-rw-r--r--fs/kernfs/kernfs-internal.h9
-rw-r--r--fs/kernfs/mount.c94
-rw-r--r--fs/kernfs/symlink.c6
-rw-r--r--fs/locks.c130
-rw-r--r--fs/mount.h4
-rw-r--r--fs/mpage.c2
-rw-r--r--fs/namei.c2
-rw-r--r--fs/ncpfs/file.c2
-rw-r--r--fs/nfs/Kconfig1
-rw-r--r--fs/nfs/blocklayout/blocklayout.c2
-rw-r--r--fs/nfs/client.c1
-rw-r--r--fs/nfs/dir.c47
-rw-r--r--fs/nfs/file.c4
-rw-r--r--fs/nfs/filelayout/filelayout.c13
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c4
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c1
-rw-r--r--fs/nfs/fscache-index.c40
-rw-r--r--fs/nfs/mount_clnt.c2
-rw-r--r--fs/nfs/nfs3proc.c11
-rw-r--r--fs/nfs/nfs4client.c3
-rw-r--r--fs/nfs/nfs4proc.c44
-rw-r--r--fs/nfs/nfs4xdr.c2
-rw-r--r--fs/nfs/pnfs_nfs.c24
-rw-r--r--fs/nfsd/nfs4callback.c6
-rw-r--r--fs/nfsd/nfs4xdr.c6
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/nilfs2/page.c3
-rw-r--r--fs/nilfs2/segbuf.c2
-rw-r--r--fs/ntfs/dir.c2
-rw-r--r--fs/ntfs/file.c2
-rw-r--r--fs/ocfs2/acl.c26
-rw-r--r--fs/ocfs2/acl.h7
-rw-r--r--fs/ocfs2/alloc.c22
-rw-r--r--fs/ocfs2/alloc.h3
-rw-r--r--fs/ocfs2/cluster/heartbeat.c44
-rw-r--r--fs/ocfs2/dir.c2
-rw-r--r--fs/ocfs2/file.c9
-rw-r--r--fs/ocfs2/journal.c1
-rw-r--r--fs/ocfs2/move_extents.c2
-rw-r--r--fs/ocfs2/ocfs2.h4
-rw-r--r--fs/ocfs2/refcounttree.c2
-rw-r--r--fs/ocfs2/suballoc.c2
-rw-r--r--fs/ocfs2/super.c1
-rw-r--r--fs/ocfs2/xattr.c2
-rw-r--r--fs/overlayfs/dir.c22
-rw-r--r--fs/overlayfs/inode.c32
-rw-r--r--fs/overlayfs/namei.c41
-rw-r--r--fs/overlayfs/overlayfs.h10
-rw-r--r--fs/overlayfs/readdir.c9
-rw-r--r--fs/overlayfs/super.c13
-rw-r--r--fs/overlayfs/util.c7
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/proc/devices.c8
-rw-r--r--fs/proc/internal.h9
-rw-r--r--fs/proc/meminfo.c18
-rw-r--r--fs/proc/task_mmu.c204
-rw-r--r--fs/quota/dquot.c21
-rw-r--r--fs/ramfs/file-nommu.c2
-rw-r--r--fs/read_write.c50
-rw-r--r--fs/reiserfs/dir.c2
-rw-r--r--fs/reiserfs/file.c2
-rw-r--r--fs/reiserfs/xattr_acl.c17
-rw-r--r--fs/select.c6
-rw-r--r--fs/sync.c9
-rw-r--r--fs/ubifs/file.c2
-rw-r--r--fs/userfaultfd.c53
-rw-r--r--fs/xfs/libxfs/xfs_attr.c156
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c6
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c39
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c301
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c1
-rw-r--r--fs/xfs/libxfs/xfs_btree.c33
-rw-r--r--fs/xfs/libxfs/xfs_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_defer.c29
-rw-r--r--fs/xfs/libxfs/xfs_defer.h5
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c4
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c59
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c21
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h2
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c6
-rw-r--r--fs/xfs/xfs_aops.c73
-rw-r--r--fs/xfs/xfs_attr_inactive.c6
-rw-r--r--fs/xfs/xfs_bmap_item.c2
-rw-r--r--fs/xfs/xfs_bmap_util.c124
-rw-r--r--fs/xfs/xfs_bmap_util.h1
-rw-r--r--fs/xfs/xfs_buf.c2
-rw-r--r--fs/xfs/xfs_buf_item.c137
-rw-r--r--fs/xfs/xfs_buf_item.h5
-rw-r--r--fs/xfs/xfs_dquot.c2
-rw-r--r--fs/xfs/xfs_error.c3
-rw-r--r--fs/xfs/xfs_error.h4
-rw-r--r--fs/xfs/xfs_file.c99
-rw-r--r--fs/xfs/xfs_icache.c10
-rw-r--r--fs/xfs/xfs_inode.c52
-rw-r--r--fs/xfs/xfs_inode_item.c47
-rw-r--r--fs/xfs/xfs_ioctl.c41
-rw-r--r--fs/xfs/xfs_iomap.c10
-rw-r--r--fs/xfs/xfs_iops.c2
-rw-r--r--fs/xfs/xfs_log.c44
-rw-r--r--fs/xfs/xfs_log_cil.c1
-rw-r--r--fs/xfs/xfs_log_recover.c161
-rw-r--r--fs/xfs/xfs_mount.c12
-rw-r--r--fs/xfs/xfs_qm.c47
-rw-r--r--fs/xfs/xfs_refcount_item.c2
-rw-r--r--fs/xfs/xfs_reflink.c15
-rw-r--r--fs/xfs/xfs_rtalloc.c2
-rw-r--r--fs/xfs/xfs_super.c2
-rw-r--r--fs/xfs/xfs_symlink.c5
-rw-r--r--fs/xfs/xfs_trace.h48
-rw-r--r--fs/xfs/xfs_trans.c28
-rw-r--r--fs/xfs/xfs_trans.h17
-rw-r--r--fs/xfs/xfs_trans_ail.c20
-rw-r--r--fs/xfs/xfs_trans_buf.c79
-rw-r--r--fs/xfs/xfs_trans_inode.c14
-rw-r--r--fs/xfs/xfs_trans_priv.h31
-rw-r--r--include/acpi/acnames.h1
-rw-r--r--include/acpi/acpi.h4
-rw-r--r--include/acpi/acpi_bus.h61
-rw-r--r--include/acpi/acpi_numa.h1
-rw-r--r--include/acpi/acpixf.h15
-rw-r--r--include/acpi/acrestyp.h7
-rw-r--r--include/acpi/actbl.h1
-rw-r--r--include/acpi/actbl2.h30
-rw-r--r--include/acpi/actypes.h11
-rw-r--r--include/acpi/apei.h8
-rw-r--r--include/acpi/ghes.h5
-rw-r--r--include/acpi/platform/acenv.h9
-rw-r--r--include/acpi/platform/acgcc.h4
-rw-r--r--include/acpi/platform/aclinux.h5
-rw-r--r--include/asm-generic/atomic64.h2
-rw-r--r--include/asm-generic/early_ioremap.h2
-rw-r--r--include/asm-generic/futex.h50
-rw-r--r--include/asm-generic/io.h27
-rw-r--r--include/asm-generic/pgtable.h12
-rw-r--r--include/asm-generic/qspinlock.h14
-rw-r--r--include/asm-generic/sections.h4
-rw-r--r--include/asm-generic/tlb.h7
-rw-r--r--include/asm-generic/topology.h6
-rw-r--r--include/asm-generic/vmlinux.lds.h73
-rw-r--r--include/crypto/algapi.h23
-rw-r--r--include/crypto/if_alg.h170
-rw-r--r--include/crypto/internal/akcipher.h6
-rw-r--r--include/crypto/internal/hash.h2
-rw-r--r--include/crypto/kpp.h10
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h39
-rw-r--r--include/drm/drmP.h161
-rw-r--r--include/drm/drm_atomic.h132
-rw-r--r--include/drm/drm_atomic_helper.h27
-rw-r--r--include/drm/drm_bridge.h3
-rw-r--r--include/drm/drm_connector.h42
-rw-r--r--include/drm/drm_crtc.h17
-rw-r--r--include/drm/drm_device.h190
-rw-r--r--include/drm/drm_dp_mst_helper.h10
-rw-r--r--include/drm/drm_drv.h94
-rw-r--r--include/drm/drm_edid.h11
-rw-r--r--include/drm/drm_fb_cma_helper.h4
-rw-r--r--include/drm/drm_fb_helper.h74
-rw-r--r--include/drm/drm_framebuffer.h7
-rw-r--r--include/drm/drm_gem.h17
-rw-r--r--include/drm/drm_gem_cma_helper.h5
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h37
-rw-r--r--include/drm/drm_mode_config.h6
-rw-r--r--include/drm/drm_modes.h11
-rw-r--r--include/drm/drm_modeset_helper_vtables.h125
-rw-r--r--include/drm/drm_pci.h11
-rw-r--r--include/drm/drm_plane.h28
-rw-r--r--include/drm/drm_property.h2
-rw-r--r--include/drm/drm_scdc_helper.h25
-rw-r--r--include/drm/drm_simple_kms_helper.h1
-rw-r--r--include/drm/drm_syncobj.h57
-rw-r--r--include/drm/drm_vblank.h3
-rw-r--r--include/drm/tinydrm/mipi-dbi.h6
-rw-r--r--include/drm/tinydrm/tinydrm-helpers.h2
-rw-r--r--include/drm/tinydrm/tinydrm.h4
-rw-r--r--include/drm/ttm/ttm_bo_driver.h22
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-gpio.h2
-rw-r--r--include/dt-bindings/pinctrl/samsung.h3
-rw-r--r--include/keys/rxrpc-type.h23
-rw-r--r--include/kvm/arm_pmu.h2
-rw-r--r--include/linux/acpi.h100
-rw-r--r--include/linux/acpi_iort.h5
-rw-r--r--include/linux/ahci_platform.h2
-rw-r--r--include/linux/arch_topology.h4
-rw-r--r--include/linux/ata.h10
-rw-r--r--include/linux/atomic.h3
-rw-r--r--include/linux/avf/virtchnl.h9
-rw-r--r--include/linux/binfmts.h4
-rw-r--r--include/linux/bio.h35
-rw-r--r--include/linux/bitrev.h19
-rw-r--r--include/linux/blk-cgroup.h3
-rw-r--r--include/linux/blk-mq-rdma.h10
-rw-r--r--include/linux/blk-mq.h5
-rw-r--r--include/linux/blk_types.h3
-rw-r--r--include/linux/blkdev.h63
-rw-r--r--include/linux/blktrace_api.h13
-rw-r--r--include/linux/bpf-cgroup.h2
-rw-r--r--include/linux/bpf.h93
-rw-r--r--include/linux/bpf_types.h7
-rw-r--r--include/linux/bpf_verifier.h77
-rw-r--r--include/linux/bsg-lib.h2
-rw-r--r--include/linux/ccp.h11
-rw-r--r--include/linux/cdev.h2
-rw-r--r--include/linux/ceph/ceph_features.h8
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/ceph/osdmap.h2
-rw-r--r--include/linux/ceph/rados.h4
-rw-r--r--include/linux/cgroup-defs.h68
-rw-r--r--include/linux/cgroup.h55
-rw-r--r--include/linux/clk/sunxi-ng.h35
-rw-r--r--include/linux/compat.h18
-rw-r--r--include/linux/compiler-gcc.h26
-rw-r--r--include/linux/compiler.h43
-rw-r--r--include/linux/completion.h47
-rw-r--r--include/linux/coresight-pmu.h6
-rw-r--r--include/linux/cpufreq.h38
-rw-r--r--include/linux/cpuhotplug.h5
-rw-r--r--include/linux/cpuidle.h21
-rw-r--r--include/linux/cpuset.h25
-rw-r--r--include/linux/cred.h4
-rw-r--r--include/linux/crush/crush.h2
-rw-r--r--include/linux/dax.h46
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/debugfs.h16
-rw-r--r--include/linux/devfreq.h13
-rw-r--r--include/linux/device-mapper.h41
-rw-r--r--include/linux/device.h37
-rw-r--r--include/linux/devpts_fs.h10
-rw-r--r--include/linux/dma-fence.h21
-rw-r--r--include/linux/dma-mapping.h53
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/drbd_genl.h3
-rw-r--r--include/linux/drbd_limits.h8
-rw-r--r--include/linux/edac.h1
-rw-r--r--include/linux/eeprom_93xx46.h3
-rw-r--r--include/linux/efi.h40
-rw-r--r--include/linux/errseq.h14
-rw-r--r--include/linux/ethtool.h15
-rw-r--r--include/linux/extcon.h130
-rw-r--r--include/linux/filter.h17
-rw-r--r--include/linux/fmc.h39
-rw-r--r--include/linux/fpga/fpga-mgr.h4
-rw-r--r--include/linux/fs.h68
-rw-r--r--include/linux/fs_struct.h2
-rw-r--r--include/linux/fscache.h9
-rw-r--r--include/linux/ftrace.h6
-rw-r--r--include/linux/futex.h7
-rw-r--r--include/linux/fwnode.h56
-rw-r--r--include/linux/genalloc.h5
-rw-r--r--include/linux/genhd.h26
-rw-r--r--include/linux/gpio/driver.h24
-rw-r--r--include/linux/gpio/machine.h3
-rw-r--r--include/linux/hid.h24
-rw-r--r--include/linux/hugetlb.h1
-rw-r--r--include/linux/hyperv.h105
-rw-r--r--include/linux/i2c.h3
-rw-r--r--include/linux/idr.h69
-rw-r--r--include/linux/igmp.h3
-rw-r--r--include/linux/iio/common/st_sensors.h19
-rw-r--r--include/linux/iio/common/st_sensors_i2c.h10
-rw-r--r--include/linux/iio/iio.h2
-rw-r--r--include/linux/iio/timer/stm32-lptim-trigger.h27
-rw-r--r--include/linux/iio/timer/stm32-timer-trigger.h14
-rw-r--r--include/linux/iio/trigger.h4
-rw-r--r--include/linux/inet_diag.h7
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/interrupt.h14
-rw-r--r--include/linux/io.h2
-rw-r--r--include/linux/iommu.h12
-rw-r--r--include/linux/ipc.h2
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/ipv6.h16
-rw-r--r--include/linux/irq.h14
-rw-r--r--include/linux/irq_sim.h44
-rw-r--r--include/linux/irqchip/arm-gic-common.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h84
-rw-r--r--include/linux/irqchip/arm-gic-v4.h105
-rw-r--r--include/linux/irqdomain.h7
-rw-r--r--include/linux/irqflags.h24
-rw-r--r--include/linux/jhash.h29
-rw-r--r--include/linux/jump_label.h33
-rw-r--r--include/linux/kasan-checks.h10
-rw-r--r--include/linux/kernel.h7
-rw-r--r--include/linux/kernfs.h28
-rw-r--r--include/linux/kexec.h8
-rw-r--r--include/linux/key-type.h4
-rw-r--r--include/linux/kmod.h2
-rw-r--r--include/linux/kobject.h4
-rw-r--r--include/linux/kthread.h2
-rw-r--r--include/linux/kvm_host.h7
-rw-r--r--include/linux/lguest.h73
-rw-r--r--include/linux/lguest_launcher.h44
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/llist.h21
-rw-r--r--include/linux/lockdep.h165
-rw-r--r--include/linux/lsm_hooks.h4
-rw-r--r--include/linux/mcb.h2
-rw-r--r--include/linux/mdio-mux.h9
-rw-r--r--include/linux/mem_encrypt.h48
-rw-r--r--include/linux/memblock.h6
-rw-r--r--include/linux/memcontrol.h62
-rw-r--r--include/linux/memory_hotplug.h2
-rw-r--r--include/linux/mfd/axp20x.h29
-rw-r--r--include/linux/mfd/bd9571mwv.h115
-rw-r--r--include/linux/mfd/cros_ec_commands.h75
-rw-r--r--include/linux/mfd/da9052/da9052.h6
-rw-r--r--include/linux/mfd/da9052/reg.h11
-rw-r--r--include/linux/mfd/dm355evm_msp.h (renamed from include/linux/i2c/dm355evm_msp.h)0
-rw-r--r--include/linux/mfd/ds1wm.h29
-rw-r--r--include/linux/mfd/hi6421-pmic.h5
-rw-r--r--include/linux/mfd/rk808.h121
-rw-r--r--include/linux/mfd/rn5t618.h6
-rw-r--r--include/linux/mfd/stm32-lptimer.h62
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h32
-rw-r--r--include/linux/mfd/tmio.h5
-rw-r--r--include/linux/mfd/tps65010.h (renamed from include/linux/i2c/tps65010.h)2
-rw-r--r--include/linux/mfd/tps68470.h97
-rw-r--r--include/linux/mfd/twl.h (renamed from include/linux/i2c/twl.h)0
-rw-r--r--include/linux/mlx4/device.h39
-rw-r--r--include/linux/mlx5/device.h15
-rw-r--r--include/linux/mlx5/driver.h42
-rw-r--r--include/linux/mlx5/mlx5_ifc.h170
-rw-r--r--include/linux/mlx5/qp.h4
-rw-r--r--include/linux/mlx5/srq.h5
-rw-r--r--include/linux/mlx5/vport.h3
-rw-r--r--include/linux/mm.h15
-rw-r--r--include/linux/mm_inline.h6
-rw-r--r--include/linux/mm_types.h114
-rw-r--r--include/linux/mmc/card.h4
-rw-r--r--include/linux/mmc/core.h23
-rw-r--r--include/linux/mmc/host.h60
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmu_notifier.h25
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/msg.h2
-rw-r--r--include/linux/msi.h1
-rw-r--r--include/linux/mtd/nand.h6
-rw-r--r--include/linux/mux/consumer.h2
-rw-r--r--include/linux/net.h14
-rw-r--r--include/linux/netdev_features.h6
-rw-r--r--include/linux/netdevice.h71
-rw-r--r--include/linux/netfilter.h54
-rw-r--r--include/linux/netfilter/xt_hashlimit.h3
-rw-r--r--include/linux/netfilter_ingress.h4
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/nmi.h8
-rw-r--r--include/linux/nvme-fc-driver.h7
-rw-r--r--include/linux/nvme-fc.h19
-rw-r--r--include/linux/nvme.h18
-rw-r--r--include/linux/nvmem-consumer.h10
-rw-r--r--include/linux/of.h3
-rw-r--r--include/linux/oom.h22
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/linux/pagemap.h14
-rw-r--r--include/linux/pagevec.h12
-rw-r--r--include/linux/path.h2
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/percpu.h20
-rw-r--r--include/linux/perf/arm_pmu.h4
-rw-r--r--include/linux/perf_event.h31
-rw-r--r--include/linux/phy.h23
-rw-r--r--include/linux/phy/phy.h2
-rw-r--r--include/linux/phylink.h148
-rw-r--r--include/linux/pid.h4
-rw-r--r--include/linux/pid_namespace.h2
-rw-r--r--include/linux/pinctrl/machine.h4
-rw-r--r--include/linux/pinctrl/pinconf-generic.h6
-rw-r--r--include/linux/platform_data/gpio_backlight.h1
-rw-r--r--include/linux/platform_data/hsmmc-omap.h10
-rw-r--r--include/linux/platform_data/mdio-bcm-unimac.h13
-rw-r--r--include/linux/platform_data/omap_drm.h53
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h2
-rw-r--r--include/linux/platform_data/x86/apple.h13
-rw-r--r--include/linux/pm.h4
-rw-r--r--include/linux/pm_domain.h3
-rw-r--r--include/linux/proc_ns.h2
-rw-r--r--include/linux/property.h67
-rw-r--r--include/linux/ptp_clock_kernel.h20
-rw-r--r--include/linux/ptr_ring.h9
-rw-r--r--include/linux/qed/qed_eth_if.h1
-rw-r--r--include/linux/qed/qed_if.h37
-rw-r--r--include/linux/radix-tree.h21
-rw-r--r--include/linux/raid/pq.h1
-rw-r--r--include/linux/rcupdate.h15
-rw-r--r--include/linux/rcutiny.h8
-rw-r--r--include/linux/refcount.h4
-rw-r--r--include/linux/regulator/mt6380-regulator.h32
-rw-r--r--include/linux/reservation.h3
-rw-r--r--include/linux/rwsem-spinlock.h1
-rw-r--r--include/linux/rwsem.h1
-rw-r--r--include/linux/sched.h102
-rw-r--r--include/linux/sched/debug.h4
-rw-r--r--include/linux/sched/mm.h14
-rw-r--r--include/linux/sched/signal.h2
-rw-r--r--include/linux/sched/task.h1
-rw-r--r--include/linux/sched/topology.h8
-rw-r--r--include/linux/sched/user.h3
-rw-r--r--include/linux/sctp.h171
-rw-r--r--include/linux/seg6_local.h6
-rw-r--r--include/linux/sem.h2
-rw-r--r--include/linux/serial_8250.h7
-rw-r--r--include/linux/serial_core.h10
-rw-r--r--include/linux/sfp.h434
-rw-r--r--include/linux/shm.h19
-rw-r--r--include/linux/shmem_fs.h6
-rw-r--r--include/linux/shrinker.h7
-rw-r--r--include/linux/skb_array.h3
-rw-r--r--include/linux/skbuff.h242
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/smp.h8
-rw-r--r--include/linux/soc/ti/knav_dma.h2
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/spinlock.h72
-rw-r--r--include/linux/spinlock_up.h6
-rw-r--r--include/linux/srcutiny.h13
-rw-r--r--include/linux/srcutree.h3
-rw-r--r--include/linux/suspend.h48
-rw-r--r--include/linux/swait.h55
-rw-r--r--include/linux/swap.h78
-rw-r--r--include/linux/swiotlb.h1
-rw-r--r--include/linux/sync_file.h3
-rw-r--r--include/linux/syscalls.h41
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/tcp.h11
-rw-r--r--include/linux/thread_info.h4
-rw-r--r--include/linux/time.h15
-rw-r--r--include/linux/tnum.h81
-rw-r--r--include/linux/trace_events.h6
-rw-r--r--include/linux/tty.h28
-rw-r--r--include/linux/tty_driver.h6
-rw-r--r--include/linux/tty_flip.h3
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/usb/audio-v2.h14
-rw-r--r--include/linux/usb/cdc_ncm.h1
-rw-r--r--include/linux/usb/chipidea.h1
-rw-r--r--include/linux/usb/gadget.h2
-rw-r--r--include/linux/usb/phy.h54
-rw-r--r--include/linux/user_namespace.h2
-rw-r--r--include/linux/utsname.h2
-rw-r--r--include/linux/uuid.h14
-rw-r--r--include/linux/vfio.h4
-rw-r--r--include/linux/virtio_net.h5
-rw-r--r--include/linux/vm_event_item.h6
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--include/linux/w1.h4
-rw-r--r--include/linux/wait.h45
-rw-r--r--include/linux/workqueue.h4
-rw-r--r--include/media/cec-notifier.h27
-rw-r--r--include/media/cec-pin.h186
-rw-r--r--include/media/cec.h81
-rw-r--r--include/media/davinci/dm644x_ccdc.h12
-rw-r--r--include/media/davinci/vpfe_capture.h10
-rw-r--r--include/media/drv-intf/saa7146.h2
-rw-r--r--include/media/i2c/ir-kbd-i2c.h8
-rw-r--r--include/media/media-device.h7
-rw-r--r--include/media/media-entity.h2
-rw-r--r--include/media/rc-core.h72
-rw-r--r--include/media/rc-map.h216
-rw-r--r--include/media/v4l2-clk.h4
-rw-r--r--include/media/v4l2-ctrls.h16
-rw-r--r--include/media/v4l2-flash-led-class.h48
-rw-r--r--include/media/v4l2-fwnode.h25
-rw-r--r--include/media/v4l2-mediabus.h30
-rw-r--r--include/media/v4l2-subdev.h12
-rw-r--r--include/media/videobuf2-core.h13
-rw-r--r--include/media/vsp1.h12
-rw-r--r--include/net/act_api.h76
-rw-r--r--include/net/addrconf.h10
-rw-r--r--include/net/af_rxrpc.h21
-rw-r--r--include/net/af_unix.h3
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bonding.h5
-rw-r--r--include/net/busy_poll.h12
-rw-r--r--include/net/devlink.h19
-rw-r--r--include/net/dsa.h53
-rw-r--r--include/net/dst.h3
-rw-r--r--include/net/erspan.h61
-rw-r--r--include/net/fib_notifier.h46
-rw-r--r--include/net/fib_rules.h9
-rw-r--r--include/net/flow.h35
-rw-r--r--include/net/flow_dissector.h8
-rw-r--r--include/net/flowcache.h25
-rw-r--r--include/net/inet6_hashtables.h22
-rw-r--r--include/net/inet_frag.h35
-rw-r--r--include/net/inet_hashtables.h31
-rw-r--r--include/net/inetpeer.h11
-rw-r--r--include/net/ip.h23
-rw-r--r--include/net/ip6_fib.h85
-rw-r--r--include/net/ip6_route.h13
-rw-r--r--include/net/ip_fib.h67
-rw-r--r--include/net/ip_tunnels.h7
-rw-r--r--include/net/mac80211.h15
-rw-r--r--include/net/ncsi.h12
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h11
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h5
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h45
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h37
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h4
-rw-r--r--include/net/netfilter/nf_queue.h2
-rw-r--r--include/net/netfilter/nf_tables.h45
-rw-r--r--include/net/netfilter/nf_tables_core.h2
-rw-r--r--include/net/netlink.h21
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/ipv6.h3
-rw-r--r--include/net/netns/netfilter.h2
-rw-r--r--include/net/netns/xfrm.h11
-rw-r--r--include/net/nsh.h307
-rw-r--r--include/net/pkt_cls.h145
-rw-r--r--include/net/pkt_sched.h14
-rw-r--r--include/net/raw.h2
-rw-r--r--include/net/rawv6.h2
-rw-r--r--include/net/route.h7
-rw-r--r--include/net/rtnetlink.h9
-rw-r--r--include/net/sch_generic.h29
-rw-r--r--include/net/sctp/command.h70
-rw-r--r--include/net/sctp/constants.h89
-rw-r--r--include/net/sctp/sctp.h22
-rw-r--r--include/net/sctp/sm.h205
-rw-r--r--include/net/sctp/structs.h74
-rw-r--r--include/net/seg6.h5
-rw-r--r--include/net/sock.h23
-rw-r--r--include/net/strparser.h121
-rw-r--r--include/net/switchdev.h87
-rw-r--r--include/net/tc_act/tc_gact.h20
-rw-r--r--include/net/tcp.h71
-rw-r--r--include/net/tso.h2
-rw-r--r--include/net/tun_proto.h49
-rw-r--r--include/net/udp.h45
-rw-r--r--include/net/udp_tunnel.h8
-rw-r--r--include/net/vxlan.h6
-rw-r--r--include/net/xfrm.h42
-rw-r--r--include/rdma/ib_addr.h17
-rw-r--r--include/rdma/ib_hdrs.h84
-rw-r--r--include/rdma/ib_marshall.h6
-rw-r--r--include/rdma/ib_verbs.h181
-rw-r--r--include/rdma/opa_addr.h42
-rw-r--r--include/rdma/opa_vnic.h3
-rw-r--r--include/rdma/rdma_netlink.h58
-rw-r--r--include/rdma/rdma_vt.h28
-rw-r--r--include/rdma/rdmavt_mr.h3
-rw-r--r--include/rdma/rdmavt_qp.h47
-rw-r--r--include/rdma/uverbs_ioctl.h438
-rw-r--r--include/rdma/uverbs_std_types.h58
-rw-r--r--include/rdma/uverbs_types.h39
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/sound/atmel-abdac.h23
-rw-r--r--include/sound/atmel-ac97c.h38
-rw-r--r--include/sound/core.h15
-rw-r--r--include/sound/omap-hdmi-audio.h2
-rw-r--r--include/sound/rt5663.h22
-rw-r--r--include/sound/rt5677.h45
-rw-r--r--include/sound/simple_card_utils.h1
-rw-r--r--include/sound/soc.h57
-rw-r--r--include/sound/tlv320aic32x4.h23
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/trace/events/bcache.h6
-rw-r--r--include/trace/events/block.h16
-rw-r--r--include/trace/events/bridge.h129
-rw-r--r--include/trace/events/ext4.h35
-rw-r--r--include/trace/events/f2fs.h2
-rw-r--r--include/trace/events/fs_dax.h2
-rw-r--r--include/trace/events/mmc.h36
-rw-r--r--include/trace/events/mmflags.h8
-rw-r--r--include/trace/events/qdisc.h50
-rw-r--r--include/trace/events/rcu.h7
-rw-r--r--include/trace/events/writeback.h2
-rw-r--r--include/trace/events/xdp.h118
-rw-r--r--include/trace/events/xen.h38
-rw-r--r--include/uapi/asm-generic/hugetlb_encode.h34
-rw-r--r--include/uapi/asm-generic/ioctls.h2
-rw-r--r--include/uapi/asm-generic/mman-common.h14
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/drm/armada_drm.h22
-rw-r--r--include/uapi/drm/drm.h22
-rw-r--r--include/uapi/drm/drm_fourcc.h31
-rw-r--r--include/uapi/drm/drm_mode.h50
-rw-r--r--include/uapi/drm/i915_drm.h51
-rw-r--r--include/uapi/drm/msm_drm.h6
-rw-r--r--include/uapi/drm/qxl_drm.h6
-rw-r--r--include/uapi/drm/vc4_drm.h22
-rw-r--r--include/uapi/drm/vmwgfx_drm.h11
-rw-r--r--include/uapi/linux/aio_abi.h21
-rw-r--r--include/uapi/linux/android/binder.h16
-rw-r--r--include/uapi/linux/blktrace_api.h3
-rw-r--r--include/uapi/linux/bpf.h74
-rw-r--r--include/uapi/linux/cec-funcs.h1
-rw-r--r--include/uapi/linux/cec.h8
-rw-r--r--include/uapi/linux/devlink.h18
-rw-r--r--include/uapi/linux/dlm_netlink.h1
-rw-r--r--include/uapi/linux/dvb/ca.h148
-rw-r--r--include/uapi/linux/dvb/dmx.h194
-rw-r--r--include/uapi/linux/dvb/frontend.h598
-rw-r--r--include/uapi/linux/dvb/net.h15
-rw-r--r--include/uapi/linux/errqueue.h3
-rw-r--r--include/uapi/linux/ethtool.h48
-rw-r--r--include/uapi/linux/fs.h28
-rw-r--r--include/uapi/linux/fsmap.h2
-rw-r--r--include/uapi/linux/if_arp.h1
-rw-r--r--include/uapi/linux/if_ether.h6
-rw-r--r--include/uapi/linux/if_tunnel.h1
-rw-r--r--include/uapi/linux/inet_diag.h2
-rw-r--r--include/uapi/linux/kfd_ioctl.h37
-rw-r--r--include/uapi/linux/loop.h3
-rw-r--r--include/uapi/linux/lwtunnel.h1
-rw-r--r--include/uapi/linux/media.h5
-rw-r--r--include/uapi/linux/membarrier.h23
-rw-r--r--include/uapi/linux/memfd.h24
-rw-r--r--include/uapi/linux/mman.h22
-rw-r--r--include/uapi/linux/ndctl.h37
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h20
-rw-r--r--include/uapi/linux/netfilter/xt_hashlimit.h36
-rw-r--r--include/uapi/linux/netlink.h20
-rw-r--r--include/uapi/linux/perf_event.h61
-rw-r--r--include/uapi/linux/raid/md_p.h4
-rw-r--r--include/uapi/linux/rtnetlink.h23
-rw-r--r--include/uapi/linux/rxrpc.h (renamed from include/linux/rxrpc.h)57
-rw-r--r--include/uapi/linux/seg6_iptunnel.h18
-rw-r--r--include/uapi/linux/seg6_local.h68
-rw-r--r--include/uapi/linux/serial_core.h14
-rw-r--r--include/uapi/linux/shm.h31
-rw-r--r--include/uapi/linux/snmp.h7
-rw-r--r--include/uapi/linux/tcp.h17
-rw-r--r--include/uapi/linux/usb/audio.h6
-rw-r--r--include/uapi/linux/usb/charger.h31
-rw-r--r--include/uapi/linux/userfaultfd.h16
-rw-r--r--include/uapi/linux/videodev2.h5
-rw-r--r--include/uapi/linux/virtio_ring.h4
-rw-r--r--include/uapi/linux/xfrm.h1
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h84
-rw-r--r--include/uapi/rdma/ib_user_verbs.h19
-rw-r--r--include/uapi/rdma/mlx4-abi.h52
-rw-r--r--include/uapi/rdma/mlx5-abi.h23
-rw-r--r--include/uapi/rdma/qedr-abi.h3
-rw-r--r--include/uapi/rdma/rdma_netlink.h84
-rw-r--r--include/uapi/rdma/rdma_user_ioctl.h33
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h6
-rw-r--r--include/uapi/sound/snd_sst_tokens.h92
-rw-r--r--include/xen/balloon.h8
-rw-r--r--include/xen/interface/io/pvcalls.h121
-rw-r--r--include/xen/interface/io/ring.h2
-rw-r--r--include/xen/xen.h20
-rw-r--r--init/Kconfig16
-rw-r--r--init/main.c16
-rw-r--r--ipc/msg.c3
-rw-r--r--ipc/sem.c6
-rw-r--r--ipc/shm.c4
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/audit.c1
-rw-r--r--kernel/audit_watch.c14
-rw-r--r--kernel/bpf/Makefile8
-rw-r--r--kernel/bpf/arraymap.c33
-rw-r--r--kernel/bpf/bpf_lru_list.h3
-rw-r--r--kernel/bpf/core.c61
-rw-r--r--kernel/bpf/devmap.c409
-rw-r--r--kernel/bpf/hashtab.c90
-rw-r--r--kernel/bpf/lpm_trie.c9
-rw-r--r--kernel/bpf/sockmap.c873
-rw-r--r--kernel/bpf/stackmap.c8
-rw-r--r--kernel/bpf/syscall.c133
-rw-r--r--kernel/bpf/tnum.c180
-rw-r--r--kernel/bpf/verifier.c2381
-rw-r--r--kernel/cgroup/cgroup-internal.h15
-rw-r--r--kernel/cgroup/cgroup-v1.c75
-rw-r--r--kernel/cgroup/cgroup.c1031
-rw-r--r--kernel/cgroup/cpuset.c63
-rw-r--r--kernel/cgroup/debug.c53
-rw-r--r--kernel/cgroup/freezer.c6
-rw-r--r--kernel/cgroup/pids.c1
-rw-r--r--kernel/configs/android-base.config1
-rw-r--r--kernel/cpu.c16
-rw-r--r--kernel/cpu_pm.c50
-rw-r--r--kernel/events/core.c244
-rw-r--r--kernel/events/internal.h6
-rw-r--r--kernel/events/ring_buffer.c31
-rw-r--r--kernel/events/uprobes.c2
-rw-r--r--kernel/exit.c11
-rw-r--r--kernel/fork.c45
-rw-r--r--kernel/futex.c70
-rw-r--r--kernel/irq/Kconfig9
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/chip.c129
-rw-r--r--kernel/irq/cpuhotplug.c9
-rw-r--r--kernel/irq/debugfs.c50
-rw-r--r--kernel/irq/internals.h12
-rw-r--r--kernel/irq/ipi.c4
-rw-r--r--kernel/irq/irq_sim.c164
-rw-r--r--kernel/irq/irqdomain.c241
-rw-r--r--kernel/irq/manage.c77
-rw-r--r--kernel/irq/pm.c2
-rw-r--r--kernel/irq/proc.c8
-rw-r--r--kernel/jump_label.c104
-rw-r--r--kernel/kexec_core.c12
-rw-r--r--kernel/kmod.c25
-rw-r--r--kernel/kthread.c1
-rw-r--r--kernel/locking/lockdep.c1008
-rw-r--r--kernel/locking/lockdep_internals.h2
-rw-r--r--kernel/locking/lockdep_proc.c4
-rw-r--r--kernel/locking/lockdep_states.h1
-rw-r--r--kernel/locking/osq_lock.c13
-rw-r--r--kernel/locking/qspinlock.c117
-rw-r--r--kernel/locking/qspinlock_paravirt.h24
-rw-r--r--kernel/locking/rtmutex.c1
-rw-r--r--kernel/locking/rtmutex_common.h29
-rw-r--r--kernel/locking/rwsem-spinlock.c37
-rw-r--r--kernel/locking/rwsem-xadd.c33
-rw-r--r--kernel/membarrier.c70
-rw-r--r--kernel/memremap.c72
-rw-r--r--kernel/panic.c12
-rw-r--r--kernel/pid.c14
-rw-r--r--kernel/power/hibernate.c29
-rw-r--r--kernel/power/main.c64
-rw-r--r--kernel/power/power.h5
-rw-r--r--kernel/power/snapshot.c2
-rw-r--r--kernel/power/suspend.c184
-rw-r--r--kernel/power/suspend_test.c4
-rw-r--r--kernel/power/swap.c5
-rw-r--r--kernel/rcu/Kconfig3
-rw-r--r--kernel/rcu/rcu.h128
-rw-r--r--kernel/rcu/rcu_segcblist.c108
-rw-r--r--kernel/rcu/rcu_segcblist.h28
-rw-r--r--kernel/rcu/rcuperf.c17
-rw-r--r--kernel/rcu/rcutorture.c83
-rw-r--r--kernel/rcu/srcutiny.c8
-rw-r--r--kernel/rcu/srcutree.c50
-rw-r--r--kernel/rcu/tiny.c2
-rw-r--r--kernel/rcu/tiny_plugin.h47
-rw-r--r--kernel/rcu/tree.c213
-rw-r--r--kernel/rcu/tree.h15
-rw-r--r--kernel/rcu/tree_exp.h2
-rw-r--r--kernel/rcu/tree_plugin.h238
-rw-r--r--kernel/rcu/update.c18
-rw-r--r--kernel/sched/Makefile1
-rw-r--r--kernel/sched/autogroup.c3
-rw-r--r--kernel/sched/completion.c30
-rw-r--r--kernel/sched/core.c66
-rw-r--r--kernel/sched/cpudeadline.c27
-rw-r--r--kernel/sched/cpufreq_schedutil.c98
-rw-r--r--kernel/sched/cpupri.c2
-rw-r--r--kernel/sched/cputime.c6
-rw-r--r--kernel/sched/deadline.c49
-rw-r--r--kernel/sched/debug.c83
-rw-r--r--kernel/sched/fair.c463
-rw-r--r--kernel/sched/idle.c8
-rw-r--r--kernel/sched/membarrier.c152
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h16
-rw-r--r--kernel/sched/swait.c6
-rw-r--r--kernel/sched/topology.c39
-rw-r--r--kernel/sched/wait.c7
-rw-r--r--kernel/signal.c17
-rw-r--r--kernel/smp.c32
-rw-r--r--kernel/task_work.c8
-rw-r--r--kernel/time/alarmtimer.c17
-rw-r--r--kernel/time/posix-cpu-timers.c14
-rw-r--r--kernel/time/timekeeping.c6
-rw-r--r--kernel/time/timekeeping_debug.c5
-rw-r--r--kernel/time/timer.c52
-rw-r--r--kernel/torture.c2
-rw-r--r--kernel/trace/blktrace.c261
-rw-r--r--kernel/trace/bpf_trace.c34
-rw-r--r--kernel/trace/ftrace.c45
-rw-r--r--kernel/trace/ring_buffer.c24
-rw-r--r--kernel/trace/ring_buffer_benchmark.c2
-rw-r--r--kernel/trace/trace.c20
-rw-r--r--kernel/trace/trace.h6
-rw-r--r--kernel/trace/trace_event_perf.c4
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/trace/trace_kprobe.c4
-rw-r--r--kernel/trace/trace_syscalls.c57
-rw-r--r--kernel/trace/trace_uprobe.c2
-rw-r--r--kernel/trace/tracing_map.c11
-rw-r--r--kernel/up.c2
-rw-r--r--kernel/watchdog.c1
-rw-r--r--kernel/watchdog_hld.c59
-rw-r--r--kernel/workqueue.c89
-rw-r--r--lib/Kconfig.debug34
-rw-r--r--lib/assoc_array.c2
-rw-r--r--lib/debugobjects.c3
-rw-r--r--lib/errseq.c17
-rw-r--r--lib/fault-inject.c8
-rw-r--r--lib/idr.c66
-rw-r--r--lib/kobject_uevent.c2
-rw-r--r--lib/locking-selftest.c123
-rw-r--r--lib/mpi/longlong.h24
-rw-r--r--lib/mpi/mpicoder.c4
-rw-r--r--lib/nlattr.c54
-rw-r--r--lib/radix-tree.c7
-rw-r--r--lib/raid6/Makefile4
-rw-r--r--lib/raid6/algos.c3
-rw-r--r--lib/raid6/avx512.c2
-rw-r--r--lib/raid6/neon.uc33
-rw-r--r--lib/raid6/recov_neon.c110
-rw-r--r--lib/raid6/recov_neon_inner.c117
-rw-r--r--lib/swiotlb.c57
-rw-r--r--lib/test_bpf.c364
-rw-r--r--lib/test_firmware.c710
-rw-r--r--lib/test_kmod.c16
-rw-r--r--lib/test_rhashtable.c57
-rw-r--r--lib/test_uuid.c2
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/balloon_compaction.c2
-rw-r--r--mm/cma_debug.c2
-rw-r--r--mm/debug.c6
-rw-r--r--mm/early_ioremap.c28
-rw-r--r--mm/filemap.c153
-rw-r--r--mm/gup.c2
-rw-r--r--mm/huge_memory.c77
-rw-r--r--mm/hugetlb.c80
-rw-r--r--mm/internal.h17
-rw-r--r--mm/kasan/kasan.c4
-rw-r--r--mm/kasan/report.c1
-rw-r--r--mm/ksm.c5
-rw-r--r--mm/madvise.c22
-rw-r--r--mm/memblock.c38
-rw-r--r--mm/memcontrol.c85
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/memory.c240
-rw-r--r--mm/memory_hotplug.c114
-rw-r--r--mm/mempolicy.c5
-rw-r--r--mm/migrate.c17
-rw-r--r--mm/mmap.c46
-rw-r--r--mm/mmu_notifier.c14
-rw-r--r--mm/mprotect.c5
-rw-r--r--mm/mremap.c21
-rw-r--r--mm/nobootmem.c16
-rw-r--r--mm/nommu.c4
-rw-r--r--mm/oom_kill.c24
-rw-r--r--mm/page-writeback.c19
-rw-r--r--mm/page_alloc.c529
-rw-r--r--mm/page_ext.c6
-rw-r--r--mm/page_idle.c2
-rw-r--r--mm/page_io.c45
-rw-r--r--mm/page_owner.c68
-rw-r--r--mm/percpu-internal.h82
-rw-r--r--mm/percpu-km.c2
-rw-r--r--mm/percpu-stats.c111
-rw-r--r--mm/percpu.c1522
-rw-r--r--mm/rmap.c71
-rw-r--r--mm/shmem.c222
-rw-r--r--mm/slab.h6
-rw-r--r--mm/slob.c6
-rw-r--r--mm/slub.c55
-rw-r--r--mm/sparse-vmemmap.c11
-rw-r--r--mm/sparse.c10
-rw-r--r--mm/swap.c24
-rw-r--r--mm/swap_state.c314
-rw-r--r--mm/swapfile.c362
-rw-r--r--mm/userfaultfd.c48
-rw-r--r--mm/util.c4
-rw-r--r--mm/vmalloc.c33
-rw-r--r--mm/vmscan.c126
-rw-r--r--mm/vmstat.c15
-rw-r--r--mm/z3fold.c479
-rw-r--r--mm/zsmalloc.c9
-rw-r--r--net/Kconfig21
-rw-r--r--net/Makefile2
-rw-r--r--net/atm/clip.c2
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/atm/mpc.c2
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.c6
-rw-r--r--net/batman-adv/bat_v_ogm.c16
-rw-r--r--net/batman-adv/distributed-arp-table.c2
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/send.c2
-rw-r--r--net/batman-adv/translation-table.c60
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/bluetooth/6lowpan.c9
-rw-r--r--net/bluetooth/Kconfig22
-rw-r--r--net/bluetooth/hci_sock.c6
-rw-r--r--net/bluetooth/hci_sysfs.c4
-rw-r--r--net/bluetooth/hidp/core.c3
-rw-r--r--net/bluetooth/lib.c6
-rw-r--r--net/bluetooth/selftest.c2
-rw-r--r--net/bridge/br_device.c4
-rw-r--r--net/bridge/br_fdb.c30
-rw-r--r--net/bridge/br_input.c3
-rw-r--r--net/bridge/br_mdb.c6
-rw-r--r--net/bridge/br_netfilter_hooks.c21
-rw-r--r--net/bridge/br_private.h9
-rw-r--r--net/bridge/br_switchdev.c2
-rw-r--r--net/bridge/netfilter/ebt_ip.c4
-rw-r--r--net/bridge/netfilter/ebt_ip6.c2
-rw-r--r--net/bridge/netfilter/ebtable_filter.c2
-rw-r--r--net/bridge/netfilter/ebtable_nat.c4
-rw-r--r--net/bridge/netfilter/ebtables.c33
-rw-r--r--net/can/gw.c6
-rw-r--r--net/ceph/crush/mapper.c2
-rw-r--r--net/ceph/messenger.c12
-rw-r--r--net/ceph/osd_client.c19
-rw-r--r--net/ceph/osdmap.c91
-rw-r--r--net/core/Makefile4
-rw-r--r--net/core/datagram.c69
-rw-r--r--net/core/dev.c311
-rw-r--r--net/core/dev_ioctl.c5
-rw-r--r--net/core/devlink.c85
-rw-r--r--net/core/dst.c9
-rw-r--r--net/core/ethtool.c42
-rw-r--r--net/core/fib_notifier.c173
-rw-r--r--net/core/fib_rules.c72
-rw-r--r--net/core/filter.c490
-rw-r--r--net/core/flow.c516
-rw-r--r--net/core/flow_dissector.c301
-rw-r--r--net/core/lwtunnel.c28
-rw-r--r--net/core/neighbour.c10
-rw-r--r--net/core/net-sysfs.c222
-rw-r--r--net/core/net-traces.c8
-rw-r--r--net/core/net_namespace.c5
-rw-r--r--net/core/netclassid_cgroup.c2
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/rtnetlink.c253
-rw-r--r--net/core/skbuff.c573
-rw-r--r--net/core/sock.c84
-rw-r--r--net/dcb/dcbnl.c4
-rw-r--r--net/dccp/feat.c7
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/ipv4.c5
-rw-r--r--net/dccp/ipv6.c52
-rw-r--r--net/dccp/proto.c19
-rw-r--r--net/decnet/dn_dev.c6
-rw-r--r--net/decnet/dn_fib.c4
-rw-r--r--net/decnet/dn_neigh.c2
-rw-r--r--net/decnet/dn_route.c4
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c2
-rw-r--r--net/dsa/dsa.c42
-rw-r--r--net/dsa/dsa2.c21
-rw-r--r--net/dsa/dsa_priv.h44
-rw-r--r--net/dsa/legacy.c40
-rw-r--r--net/dsa/port.c51
-rw-r--r--net/dsa/slave.c419
-rw-r--r--net/dsa/switch.c21
-rw-r--r--net/dsa/tag_brcm.c6
-rw-r--r--net/dsa/tag_dsa.c3
-rw-r--r--net/dsa/tag_edsa.c3
-rw-r--r--net/dsa/tag_ksz.c20
-rw-r--r--net/dsa/tag_lan9303.c5
-rw-r--r--net/dsa/tag_mtk.c17
-rw-r--r--net/dsa/tag_qca.c3
-rw-r--r--net/dsa/tag_trailer.c5
-rw-r--r--net/hsr/hsr_device.c3
-rw-r--r--net/ieee802154/6lowpan/core.c2
-rw-r--r--net/ieee802154/6lowpan/reassembly.c11
-rw-r--r--net/ipv4/af_inet.c35
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/cipso_ipv4.c12
-rw-r--r--net/ipv4/devinet.c8
-rw-r--r--net/ipv4/esp4.c84
-rw-r--r--net/ipv4/esp4_offload.c7
-rw-r--r--net/ipv4/fib_frontend.c26
-rw-r--r--net/ipv4/fib_lookup.h1
-rw-r--r--net/ipv4/fib_notifier.c101
-rw-r--r--net/ipv4/fib_rules.c44
-rw-r--r--net/ipv4/fib_semantics.c63
-rw-r--r--net/ipv4/fib_trie.c8
-rw-r--r--net/ipv4/fou.c1
-rw-r--r--net/ipv4/gre_offload.c14
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/igmp.c22
-rw-r--r--net/ipv4/inet_diag.c33
-rw-r--r--net/ipv4/inet_fragment.c4
-rw-r--r--net/ipv4/inet_hashtables.c27
-rw-r--r--net/ipv4/inetpeer.c431
-rw-r--r--net/ipv4/ip_fragment.c12
-rw-r--r--net/ipv4/ip_gre.c421
-rw-r--r--net/ipv4/ip_options.c9
-rw-r--r--net/ipv4/ip_output.c98
-rw-r--r--net/ipv4/ip_sockglue.c19
-rw-r--r--net/ipv4/ip_vti.c31
-rw-r--r--net/ipv4/ipmr.c8
-rw-r--r--net/ipv4/netfilter/arp_tables.c12
-rw-r--r--net/ipv4/netfilter/ip_tables.c25
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c6
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c2
-rw-r--r--net/ipv4/netfilter/iptable_nat.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c40
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c15
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_log_arp.c2
-rw-r--r--net/ipv4/netfilter/nf_log_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c57
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_masquerade_ipv4.c8
-rw-r--r--net/ipv4/netfilter/nf_tables_arp.c3
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c20
-rw-r--r--net/ipv4/proc.c7
-rw-r--r--net/ipv4/raw.c18
-rw-r--r--net/ipv4/raw_diag.c4
-rw-r--r--net/ipv4/route.c31
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c3
-rw-r--r--net/ipv4/tcp.c289
-rw-r--r--net/ipv4/tcp_bbr.c49
-rw-r--r--net/ipv4/tcp_bic.c14
-rw-r--r--net/ipv4/tcp_cdg.c12
-rw-r--r--net/ipv4/tcp_cong.c21
-rw-r--r--net/ipv4/tcp_cubic.c13
-rw-r--r--net/ipv4/tcp_diag.c109
-rw-r--r--net/ipv4/tcp_fastopen.c6
-rw-r--r--net/ipv4/tcp_highspeed.c11
-rw-r--r--net/ipv4/tcp_htcp.c3
-rw-r--r--net/ipv4/tcp_illinois.c11
-rw-r--r--net/ipv4/tcp_input.c207
-rw-r--r--net/ipv4/tcp_ipv4.c86
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_nv.c13
-rw-r--r--net/ipv4/tcp_output.c49
-rw-r--r--net/ipv4/tcp_probe.c5
-rw-r--r--net/ipv4/tcp_recovery.c2
-rw-r--r--net/ipv4/tcp_scalable.c16
-rw-r--r--net/ipv4/tcp_timer.c15
-rw-r--r--net/ipv4/tcp_ulp.c14
-rw-r--r--net/ipv4/tcp_veno.c11
-rw-r--r--net/ipv4/tcp_yeah.c11
-rw-r--r--net/ipv4/udp.c115
-rw-r--r--net/ipv4/udp_diag.c10
-rw-r--r--net/ipv4/udp_offload.c64
-rw-r--r--net/ipv4/udp_tunnel.c25
-rw-r--r--net/ipv4/xfrm4_policy.c25
-rw-r--r--net/ipv6/Kconfig16
-rw-r--r--net/ipv6/Makefile4
-rw-r--r--net/ipv6/addrconf.c62
-rw-r--r--net/ipv6/addrlabel.c22
-rw-r--r--net/ipv6/af_inet6.c5
-rw-r--r--net/ipv6/esp6.c77
-rw-r--r--net/ipv6/esp6_offload.c7
-rw-r--r--net/ipv6/exthdrs.c5
-rw-r--r--net/ipv6/fib6_notifier.c63
-rw-r--r--net/ipv6/fib6_rules.c69
-rw-r--r--net/ipv6/icmp.c27
-rw-r--r--net/ipv6/ila/ila_xlat.c2
-rw-r--r--net/ipv6/inet6_hashtables.c28
-rw-r--r--net/ipv6/ip6_fib.c197
-rw-r--r--net/ipv6/ip6_output.c80
-rw-r--r--net/ipv6/ip6_vti.c31
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c1
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/netfilter/ip6_tables.c14
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c2
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c42
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c25
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c12
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c2
-rw-r--r--net/ipv6/netfilter/nf_log_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c6
-rw-r--r--net/ipv6/netfilter/nf_nat_masquerade_ipv6.c4
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c29
-rw-r--r--net/ipv6/output_core.c4
-rw-r--r--net/ipv6/raw.c13
-rw-r--r--net/ipv6/reassembly.c12
-rw-r--r--net/ipv6/route.c188
-rw-r--r--net/ipv6/seg6.c7
-rw-r--r--net/ipv6/seg6_hmac.c7
-rw-r--r--net/ipv6/seg6_iptunnel.c82
-rw-r--r--net/ipv6/seg6_local.c938
-rw-r--r--net/ipv6/syncookies.c1
-rw-r--r--net/ipv6/sysctl_net_ipv6.c8
-rw-r--r--net/ipv6/tcp_ipv6.c27
-rw-r--r--net/ipv6/udp.c103
-rw-r--r--net/ipv6/udp_offload.c100
-rw-r--r--net/ipv6/xfrm6_input.c4
-rw-r--r--net/ipv6/xfrm6_policy.c20
-rw-r--r--net/kcm/kcmproc.c34
-rw-r--r--net/kcm/kcmsock.c53
-rw-r--r--net/key/af_key.c54
-rw-r--r--net/l2tp/l2tp_core.c113
-rw-r--r--net/l2tp/l2tp_core.h21
-rw-r--r--net/l2tp/l2tp_eth.c11
-rw-r--r--net/l2tp/l2tp_netlink.c74
-rw-r--r--net/l2tp/l2tp_ppp.c19
-rw-r--r--net/mac80211/agg-rx.c22
-rw-r--r--net/mpls/af_mpls.c8
-rw-r--r--net/ncsi/internal.h11
-rw-r--r--net/ncsi/ncsi-cmd.c10
-rw-r--r--net/ncsi/ncsi-manage.c310
-rw-r--r--net/ncsi/ncsi-pkt.h2
-rw-r--r--net/ncsi/ncsi-rsp.c12
-rw-r--r--net/netfilter/Kconfig9
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/core.c498
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c11
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c46
-rw-r--r--net/netfilter/nf_conntrack_broadcast.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c109
-rw-r--r--net/netfilter/nf_conntrack_expect.c72
-rw-r--r--net/netfilter/nf_conntrack_extend.c2
-rw-r--r--net/netfilter/nf_conntrack_helper.c34
-rw-r--r--net/netfilter/nf_conntrack_l3proto_generic.c7
-rw-r--r--net/netfilter/nf_conntrack_netlink.c106
-rw-r--r--net/netfilter/nf_conntrack_pptp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto.c90
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c20
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c21
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c16
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c20
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c20
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c18
-rw-r--r--net/netfilter/nf_conntrack_sip.c6
-rw-r--r--net/netfilter/nf_conntrack_standalone.c103
-rw-r--r--net/netfilter/nf_internals.h10
-rw-r--r--net/netfilter/nf_nat_core.c23
-rw-r--r--net/netfilter/nf_nat_redirect.c6
-rw-r--r--net/netfilter/nf_queue.c68
-rw-r--r--net/netfilter/nf_sockopt.c2
-rw-r--r--net/netfilter/nf_tables_api.c515
-rw-r--r--net/netfilter/nf_tables_core.c28
-rw-r--r--net/netfilter/nf_tables_trace.c42
-rw-r--r--net/netfilter/nfnetlink.c6
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c22
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c21
-rw-r--r--net/netfilter/nft_compat.c4
-rw-r--r--net/netfilter/nft_counter.c20
-rw-r--r--net/netfilter/nft_ct.c18
-rw-r--r--net/netfilter/nft_exthdr.c213
-rw-r--r--net/netfilter/nft_fib_netdev.c87
-rw-r--r--net/netfilter/nft_limit.c173
-rw-r--r--net/netfilter/nft_objref.c7
-rw-r--r--net/netfilter/nft_payload.c2
-rw-r--r--net/netfilter/nft_quota.c20
-rw-r--r--net/netfilter/nft_rt.c73
-rw-r--r--net/netfilter/nft_set_rbtree.c49
-rw-r--r--net/netfilter/x_tables.c14
-rw-r--r--net/netfilter/xt_CT.c2
-rw-r--r--net/netfilter/xt_NETMAP.c8
-rw-r--r--net/netfilter/xt_TCPMSS.c2
-rw-r--r--net/netfilter/xt_TPROXY.c10
-rw-r--r--net/netfilter/xt_addrtype.c3
-rw-r--r--net/netfilter/xt_connlimit.c26
-rw-r--r--net/netfilter/xt_hashlimit.c285
-rw-r--r--net/netfilter/xt_nat.c20
-rw-r--r--net/netfilter/xt_osf.c2
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/nsh/Kconfig9
-rw-r--r--net/nsh/Makefile1
-rw-r--r--net/nsh/nsh.c91
-rw-r--r--net/openvswitch/actions.c1
-rw-r--r--net/openvswitch/conntrack.c76
-rw-r--r--net/openvswitch/datapath.c21
-rw-r--r--net/openvswitch/datapath.h2
-rw-r--r--net/openvswitch/flow.c16
-rw-r--r--net/openvswitch/flow.h2
-rw-r--r--net/openvswitch/flow_netlink.c2
-rw-r--r--net/openvswitch/flow_table.c4
-rw-r--r--net/packet/af_packet.c35
-rw-r--r--net/phonet/pn_netlink.c12
-rw-r--r--net/qrtr/qrtr.c2
-rw-r--r--net/rds/bind.c2
-rw-r--r--net/rds/connection.c50
-rw-r--r--net/rds/ib_recv.c5
-rw-r--r--net/rds/rds.h4
-rw-r--r--net/rds/send.c10
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/rds/tcp_connect.c4
-rw-r--r--net/rds/tcp_send.c2
-rw-r--r--net/rds/threads.c2
-rw-r--r--net/rxrpc/af_rxrpc.c75
-rw-r--r--net/rxrpc/ar-internal.h27
-rw-r--r--net/rxrpc/call_accept.c3
-rw-r--r--net/rxrpc/call_object.c102
-rw-r--r--net/rxrpc/conn_client.c17
-rw-r--r--net/rxrpc/conn_object.c2
-rw-r--r--net/rxrpc/conn_service.c3
-rw-r--r--net/rxrpc/key.c22
-rw-r--r--net/rxrpc/local_event.c2
-rw-r--r--net/rxrpc/output.c2
-rw-r--r--net/rxrpc/peer_event.c6
-rw-r--r--net/rxrpc/protocol.h (renamed from include/rxrpc/packet.h)45
-rw-r--r--net/rxrpc/rxkad.c22
-rw-r--r--net/rxrpc/sendmsg.c62
-rw-r--r--net/rxrpc/utils.c23
-rw-r--r--net/sched/act_api.c325
-rw-r--r--net/sched/act_bpf.c17
-rw-r--r--net/sched/act_connmark.c16
-rw-r--r--net/sched/act_csum.c22
-rw-r--r--net/sched/act_gact.c16
-rw-r--r--net/sched/act_ife.c35
-rw-r--r--net/sched/act_ipt.c50
-rw-r--r--net/sched/act_mirred.c19
-rw-r--r--net/sched/act_nat.c16
-rw-r--r--net/sched/act_pedit.c18
-rw-r--r--net/sched/act_police.c18
-rw-r--r--net/sched/act_sample.c17
-rw-r--r--net/sched/act_simple.c20
-rw-r--r--net/sched/act_skbedit.c18
-rw-r--r--net/sched/act_skbmod.c18
-rw-r--r--net/sched/act_tunnel_key.c20
-rw-r--r--net/sched/act_vlan.c22
-rw-r--r--net/sched/cls_api.c246
-rw-r--r--net/sched/cls_basic.c48
-rw-r--r--net/sched/cls_bpf.c99
-rw-r--r--net/sched/cls_cgroup.c30
-rw-r--r--net/sched/cls_flow.c71
-rw-r--r--net/sched/cls_flower.c163
-rw-r--r--net/sched/cls_fw.c69
-rw-r--r--net/sched/cls_matchall.c74
-rw-r--r--net/sched/cls_route.c67
-rw-r--r--net/sched/cls_rsvp.h37
-rw-r--r--net/sched/cls_tcindex.c53
-rw-r--r--net/sched/cls_u32.c213
-rw-r--r--net/sched/sch_api.c529
-rw-r--r--net/sched/sch_atm.c46
-rw-r--r--net/sched/sch_cbq.c58
-rw-r--r--net/sched/sch_drr.c33
-rw-r--r--net/sched/sch_dsmark.c17
-rw-r--r--net/sched/sch_fq_codel.c13
-rw-r--r--net/sched/sch_generic.c10
-rw-r--r--net/sched/sch_hfsc.c93
-rw-r--r--net/sched/sch_hhf.c3
-rw-r--r--net/sched/sch_htb.c45
-rw-r--r--net/sched/sch_ingress.c32
-rw-r--r--net/sched/sch_mq.c9
-rw-r--r--net/sched/sch_mqprio.c25
-rw-r--r--net/sched/sch_multiq.c18
-rw-r--r--net/sched/sch_netem.c13
-rw-r--r--net/sched/sch_prio.c11
-rw-r--r--net/sched/sch_qfq.c33
-rw-r--r--net/sched/sch_red.c9
-rw-r--r--net/sched/sch_sfb.c9
-rw-r--r--net/sched/sch_sfq.c40
-rw-r--r--net/sched/sch_tbf.c14
-rw-r--r--net/sctp/associola.c21
-rw-r--r--net/sctp/auth.c13
-rw-r--r--net/sctp/bind_addr.c20
-rw-r--r--net/sctp/chunk.c4
-rw-r--r--net/sctp/debug.c8
-rw-r--r--net/sctp/endpointola.c12
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/sctp/ipv6.c10
-rw-r--r--net/sctp/objcnt.c2
-rw-r--r--net/sctp/output.c60
-rw-r--r--net/sctp/outqueue.c20
-rw-r--r--net/sctp/primitive.c4
-rw-r--r--net/sctp/probe.c13
-rw-r--r--net/sctp/protocol.c8
-rw-r--r--net/sctp/sctp_diag.c7
-rw-r--r--net/sctp/sm_make_chunk.c524
-rw-r--r--net/sctp/sm_sideeffect.c154
-rw-r--r--net/sctp/sm_statefuns.c1555
-rw-r--r--net/sctp/sm_statetable.c59
-rw-r--r--net/sctp/socket.c23
-rw-r--r--net/sctp/sysctl.c2
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/sctp/ulpevent.c10
-rw-r--r--net/smc/Kconfig4
-rw-r--r--net/smc/af_smc.c64
-rw-r--r--net/smc/smc_clc.c12
-rw-r--r--net/smc/smc_core.c388
-rw-r--r--net/smc/smc_core.h31
-rw-r--r--net/smc/smc_ib.c128
-rw-r--r--net/smc/smc_ib.h19
-rw-r--r--net/smc/smc_rx.c3
-rw-r--r--net/smc/smc_tx.c9
-rw-r--r--net/smc/smc_wr.c63
-rw-r--r--net/smc/smc_wr.h1
-rw-r--r--net/socket.c38
-rw-r--r--net/strparser/strparser.c315
-rw-r--r--net/sunrpc/svcsock.c22
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/switchdev/switchdev.c519
-rw-r--r--net/tipc/bearer.c52
-rw-r--r--net/tipc/bearer.h3
-rw-r--r--net/tipc/link.c23
-rw-r--r--net/tipc/msg.c8
-rw-r--r--net/tipc/netlink_compat.c8
-rw-r--r--net/tipc/node.c22
-rw-r--r--net/tipc/socket.c6
-rw-r--r--net/tipc/subscr.c21
-rw-r--r--net/unix/af_unix.c29
-rw-r--r--net/vmw_vsock/Kconfig12
-rw-r--r--net/vmw_vsock/Makefile3
-rw-r--r--net/vmw_vsock/hyperv_transport.c904
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--net/xfrm/xfrm_device.c9
-rw-r--r--net/xfrm/xfrm_input.c7
-rw-r--r--net/xfrm/xfrm_output.c3
-rw-r--r--net/xfrm/xfrm_policy.c442
-rw-r--r--net/xfrm/xfrm_state.c29
-rw-r--r--net/xfrm/xfrm_user.c22
-rw-r--r--samples/bpf/Makefile16
-rw-r--r--samples/bpf/bpf_load.c29
-rw-r--r--samples/bpf/bpf_load.h1
-rw-r--r--samples/bpf/map_perf_test_kern.c46
-rw-r--r--samples/bpf/map_perf_test_user.c89
-rw-r--r--samples/bpf/sock_flags_kern.c5
-rw-r--r--samples/bpf/syscall_tp_kern.c62
-rw-r--r--samples/bpf/syscall_tp_user.c71
-rw-r--r--samples/bpf/tcbpf2_kern.c67
-rw-r--r--samples/bpf/test_cgrp2_sock.c255
-rwxr-xr-xsamples/bpf/test_cgrp2_sock.sh162
-rwxr-xr-xsamples/bpf/test_tunnel_bpf.sh30
-rw-r--r--samples/bpf/xdp_monitor_kern.c88
-rw-r--r--samples/bpf/xdp_monitor_user.c295
-rw-r--r--samples/bpf/xdp_redirect_kern.c90
-rw-r--r--samples/bpf/xdp_redirect_map_kern.c92
-rw-r--r--samples/bpf/xdp_redirect_map_user.c145
-rw-r--r--samples/bpf/xdp_redirect_user.c143
-rw-r--r--samples/sockmap/Makefile78
-rw-r--r--samples/sockmap/sockmap_kern.c108
-rw-r--r--samples/sockmap/sockmap_user.c294
-rw-r--r--samples/v4l/v4l2-pci-skeleton.c2
-rw-r--r--scripts/Kbuild.include7
-rw-r--r--scripts/Makefile.asm-generic4
-rw-r--r--scripts/Makefile.build25
-rw-r--r--scripts/Makefile.dtbinst4
-rw-r--r--scripts/basic/Makefile2
-rw-r--r--scripts/basic/fixdep.c6
-rw-r--r--scripts/dtc/checks.c2
-rwxr-xr-xscripts/dtc/dtx_diff2
-rwxr-xr-xscripts/get_maintainer.pl91
-rwxr-xr-xscripts/kernel-doc4
-rw-r--r--scripts/mod/modpost.c27
-rw-r--r--scripts/parse-maintainers.pl128
-rwxr-xr-xscripts/sphinx-pre-install609
-rw-r--r--security/keys/internal.h2
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/selinux/include/xfrm.h4
-rw-r--r--security/smack/smack_netfilter.c2
-rw-r--r--sound/aoa/codecs/onyx.c12
-rw-r--r--sound/aoa/codecs/tas.c4
-rw-r--r--sound/aoa/soundbus/i2sbus/pcm.c4
-rw-r--r--sound/arm/aaci.c8
-rw-r--r--sound/arm/pxa2xx-pcm.c2
-rw-r--r--sound/atmel/ac97c.c87
-rw-r--r--sound/core/control.c388
-rw-r--r--sound/core/control_compat.c34
-rw-r--r--sound/core/init.c6
-rw-r--r--sound/core/pcm.c69
-rw-r--r--sound/core/pcm_compat.c5
-rw-r--r--sound/core/pcm_native.c303
-rw-r--r--sound/core/seq/Kconfig4
-rw-r--r--sound/core/seq/seq_clientmgr.c13
-rw-r--r--sound/core/seq/seq_queue.c14
-rw-r--r--sound/core/seq/seq_queue.h2
-rw-r--r--sound/core/timer.c13
-rw-r--r--sound/drivers/aloop.c6
-rw-r--r--sound/drivers/dummy.c2
-rw-r--r--sound/drivers/ml403-ac97cr.c8
-rw-r--r--sound/drivers/mpu401/mpu401.c2
-rw-r--r--sound/drivers/mpu401/mpu401_uart.c24
-rw-r--r--sound/drivers/opl3/opl3_lib.c4
-rw-r--r--sound/drivers/opl3/opl3_midi.c4
-rw-r--r--sound/drivers/pcsp/pcsp.c29
-rw-r--r--sound/drivers/pcsp/pcsp_lib.c2
-rw-r--r--sound/drivers/vx/vx_core.c4
-rw-r--r--sound/drivers/vx/vx_pcm.c4
-rw-r--r--sound/firewire/bebob/bebob.h2
-rw-r--r--sound/firewire/bebob/bebob_focusrite.c6
-rw-r--r--sound/firewire/bebob/bebob_maudio.c2
-rw-r--r--sound/firewire/bebob/bebob_terratec.c2
-rw-r--r--sound/firewire/bebob/bebob_yamaha_terratec.c2
-rw-r--r--sound/firewire/dice/dice.c2
-rw-r--r--sound/firewire/fireface/ff-pcm.c20
-rw-r--r--sound/firewire/fireface/ff-protocol-ff400.c2
-rw-r--r--sound/firewire/fireface/ff.c2
-rw-r--r--sound/firewire/fireface/ff.h4
-rw-r--r--sound/firewire/fireworks/fireworks_proc.c2
-rw-r--r--sound/firewire/isight.c2
-rw-r--r--sound/firewire/iso-resources.c7
-rw-r--r--sound/firewire/motu/motu-midi.c4
-rw-r--r--sound/firewire/motu/motu-pcm.c6
-rw-r--r--sound/firewire/motu/motu-protocol-v2.c5
-rw-r--r--sound/firewire/motu/motu-protocol-v3.c5
-rw-r--r--sound/firewire/motu/motu-stream.c38
-rw-r--r--sound/firewire/motu/motu.c29
-rw-r--r--sound/firewire/motu/motu.h6
-rw-r--r--sound/firewire/oxfw/oxfw-scs1x.c11
-rw-r--r--sound/firewire/tascam/tascam.c2
-rw-r--r--sound/hda/hdac_i915.c2
-rw-r--r--sound/isa/ad1816a/ad1816a.c2
-rw-r--r--sound/isa/ad1816a/ad1816a_lib.c8
-rw-r--r--sound/isa/ad1848/ad1848.c18
-rw-r--r--sound/isa/als100.c15
-rw-r--r--sound/isa/azt2320.c2
-rw-r--r--sound/isa/cmi8330.c2
-rw-r--r--sound/isa/cs423x/cs4231.c18
-rw-r--r--sound/isa/cs423x/cs4236.c22
-rw-r--r--sound/isa/es1688/es1688.c2
-rw-r--r--sound/isa/es1688/es1688_lib.c8
-rw-r--r--sound/isa/es18xx.c12
-rw-r--r--sound/isa/gus/gus_pcm.c8
-rw-r--r--sound/isa/gus/interwave.c2
-rw-r--r--sound/isa/msnd/msnd.c8
-rw-r--r--sound/isa/msnd/msnd_pinnacle.c2
-rw-r--r--sound/isa/opl3sa2.c4
-rw-r--r--sound/isa/opti9xx/miro.c9
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c16
-rw-r--r--sound/isa/sb/emu8000_callback.c2
-rw-r--r--sound/isa/sb/emu8000_pcm.c4
-rw-r--r--sound/isa/sb/sb16.c2
-rw-r--r--sound/isa/sb/sb16_main.c8
-rw-r--r--sound/isa/sb/sb8_main.c8
-rw-r--r--sound/isa/sscape.c2
-rw-r--r--sound/isa/wavefront/wavefront.c2
-rw-r--r--sound/isa/wss/wss_lib.c8
-rw-r--r--sound/mips/hal2.c6
-rw-r--r--sound/mips/sgio2audio.c8
-rw-r--r--sound/parisc/harmony.c18
-rw-r--r--sound/pci/ad1889.c4
-rw-r--r--sound/pci/ali5451/ali5451.c12
-rw-r--r--sound/pci/als300.c4
-rw-r--r--sound/pci/als4000.c4
-rw-r--r--sound/pci/asihpi/hpidebug.c4
-rw-r--r--sound/pci/atiixp.c4
-rw-r--r--sound/pci/atiixp_modem.c2
-rw-r--r--sound/pci/au88x0/au88x0_pcm.c10
-rw-r--r--sound/pci/aw2/aw2-alsa.c4
-rw-r--r--sound/pci/bt87x.c4
-rw-r--r--sound/pci/ca0106/ca0106_main.c10
-rw-r--r--sound/pci/cmipci.c31
-rw-r--r--sound/pci/cs4281.c4
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c4
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c4
-rw-r--r--sound/pci/ctxfi/cthw20k1.c2
-rw-r--r--sound/pci/ctxfi/cthw20k2.c2
-rw-r--r--sound/pci/ctxfi/ctpcm.c48
-rw-r--r--sound/pci/ctxfi/ctresource.c6
-rw-r--r--sound/pci/ctxfi/ctsrc.c6
-rw-r--r--sound/pci/echoaudio/echoaudio.c8
-rw-r--r--sound/pci/emu10k1/emu10k1x.c4
-rw-r--r--sound/pci/emu10k1/emufx.c14
-rw-r--r--sound/pci/emu10k1/emupcm.c10
-rw-r--r--sound/pci/emu10k1/p16v.c4
-rw-r--r--sound/pci/ens1370.c6
-rw-r--r--sound/pci/es1938.c4
-rw-r--r--sound/pci/es1968.c4
-rw-r--r--sound/pci/fm801.c8
-rw-r--r--sound/pci/hda/dell_wmi_helper.c87
-rw-r--r--sound/pci/hda/hda_bind.c5
-rw-r--r--sound/pci/hda/hda_codec.c4
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_analog.c4
-rw-r--r--sound/pci/hda/patch_ca0132.c8
-rw-r--r--sound/pci/hda/patch_conexant.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c41
-rw-r--r--sound/pci/hda/patch_realtek.c163
-rw-r--r--sound/pci/hda/patch_sigmatel.c2
-rw-r--r--sound/pci/ice1712/delta.c24
-rw-r--r--sound/pci/ice1712/ews.c12
-rw-r--r--sound/pci/ice1712/hoontech.c43
-rw-r--r--sound/pci/ice1712/hoontech.h1
-rw-r--r--sound/pci/ice1712/juli.c2
-rw-r--r--sound/pci/ice1712/phase.c4
-rw-r--r--sound/pci/ice1712/quartet.c2
-rw-r--r--sound/pci/ice1712/revo.c20
-rw-r--r--sound/pci/intel8x0.c28
-rw-r--r--sound/pci/intel8x0m.c10
-rw-r--r--sound/pci/korg1212/korg1212.c4
-rw-r--r--sound/pci/lola/lola_pcm.c2
-rw-r--r--sound/pci/lx6464es/lx6464es.c2
-rw-r--r--sound/pci/maestro3.c4
-rw-r--r--sound/pci/mixart/mixart.c18
-rw-r--r--sound/pci/mixart/mixart.h4
-rw-r--r--sound/pci/nm256/nm256.c2
-rw-r--r--sound/pci/pcxhr/pcxhr.c25
-rw-r--r--sound/pci/pcxhr/pcxhr.h3
-rw-r--r--sound/pci/pcxhr/pcxhr_mixer.c4
-rw-r--r--sound/pci/riptide/riptide.c4
-rw-r--r--sound/pci/rme32.c8
-rw-r--r--sound/pci/rme96.c51
-rw-r--r--sound/pci/rme9652/hdsp.c17
-rw-r--r--sound/pci/rme9652/hdspm.c90
-rw-r--r--sound/pci/rme9652/rme9652.c18
-rw-r--r--sound/pci/sis7019.c8
-rw-r--r--sound/pci/sonicvibes.c4
-rw-r--r--sound/pci/trident/trident_main.c20
-rw-r--r--sound/pci/via82xx.c2
-rw-r--r--sound/pci/via82xx_modem.c2
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c4
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c4
-rw-r--r--sound/ppc/pmac.c8
-rw-r--r--sound/ppc/snd_ps3.c2
-rw-r--r--sound/sh/aica.c4
-rw-r--r--sound/sh/sh_dac_audio.c6
-rw-r--r--sound/soc/atmel/atmel-classd.c1
-rw-r--r--sound/soc/atmel/atmel-pdmic.c1
-rw-r--r--sound/soc/au1x/db1200.c2
-rw-r--r--sound/soc/au1x/dbdma2.c2
-rw-r--r--sound/soc/au1x/dma.c2
-rw-r--r--sound/soc/au1x/psc-ac97.c2
-rw-r--r--sound/soc/au1x/psc-i2s.c2
-rw-r--r--sound/soc/bcm/cygnus-ssp.c237
-rw-r--r--sound/soc/blackfin/bf5xx-ac97-pcm.c2
-rw-r--r--sound/soc/blackfin/bf5xx-i2s-pcm.c2
-rw-r--r--sound/soc/blackfin/bf6xx-i2s.c2
-rw-r--r--sound/soc/blackfin/bf6xx-sport.c38
-rw-r--r--sound/soc/cirrus/edb93xx.c2
-rw-r--r--sound/soc/cirrus/snappercl15.c2
-rw-r--r--sound/soc/codecs/88pm860x-codec.c2
-rw-r--r--sound/soc/codecs/Kconfig20
-rw-r--r--sound/soc/codecs/Makefile8
-rw-r--r--sound/soc/codecs/ab8500-codec.c2
-rw-r--r--sound/soc/codecs/ac97.c2
-rw-r--r--sound/soc/codecs/ad1836.c2
-rw-r--r--sound/soc/codecs/ad193x.c2
-rw-r--r--sound/soc/codecs/ad1980.c2
-rw-r--r--sound/soc/codecs/ad73311.c2
-rw-r--r--sound/soc/codecs/adau1373.c2
-rw-r--r--sound/soc/codecs/adau1701.c2
-rw-r--r--sound/soc/codecs/adau1977.c8
-rw-r--r--sound/soc/codecs/adav80x.c2
-rw-r--r--sound/soc/codecs/ads117x.c2
-rw-r--r--sound/soc/codecs/ak4104.c2
-rw-r--r--sound/soc/codecs/ak4535.c2
-rw-r--r--sound/soc/codecs/ak4554.c2
-rw-r--r--sound/soc/codecs/ak4613.c2
-rw-r--r--sound/soc/codecs/ak4641.c2
-rw-r--r--sound/soc/codecs/ak4642.c2
-rw-r--r--sound/soc/codecs/ak4671.c2
-rw-r--r--sound/soc/codecs/ak5386.c2
-rw-r--r--sound/soc/codecs/alc5623.c2
-rw-r--r--sound/soc/codecs/arizona.c4
-rw-r--r--sound/soc/codecs/bt-sco.c2
-rw-r--r--sound/soc/codecs/cq93vc.c2
-rw-r--r--sound/soc/codecs/cs35l33.c14
-rw-r--r--sound/soc/codecs/cs35l34.c5
-rw-r--r--sound/soc/codecs/cs35l35.c2
-rw-r--r--sound/soc/codecs/cs4271.c2
-rw-r--r--sound/soc/codecs/cs42l42.c11
-rw-r--r--sound/soc/codecs/cs42l51.c2
-rw-r--r--sound/soc/codecs/cs43130.c2694
-rw-r--r--sound/soc/codecs/cs43130.h546
-rw-r--r--sound/soc/codecs/cs4349.c2
-rw-r--r--sound/soc/codecs/cs47l24.c6
-rw-r--r--sound/soc/codecs/cs53l30.c17
-rw-r--r--sound/soc/codecs/cx20442.c2
-rw-r--r--sound/soc/codecs/da7210.c2
-rw-r--r--sound/soc/codecs/da7213.c2
-rw-r--r--sound/soc/codecs/da7218.c2
-rw-r--r--sound/soc/codecs/da7219.c2
-rw-r--r--sound/soc/codecs/da732x.c2
-rw-r--r--sound/soc/codecs/da9055.c2
-rw-r--r--sound/soc/codecs/dmic.c54
-rw-r--r--sound/soc/codecs/es7134.c2
-rw-r--r--sound/soc/codecs/es8316.c4
-rw-r--r--sound/soc/codecs/es8328.c2
-rw-r--r--sound/soc/codecs/hdac_hdmi.c43
-rw-r--r--sound/soc/codecs/hdmi-codec.c47
-rw-r--r--sound/soc/codecs/ics43432.c2
-rw-r--r--sound/soc/codecs/inno_rk3036.c4
-rw-r--r--sound/soc/codecs/isabelle.c2
-rw-r--r--sound/soc/codecs/jz4740.c2
-rw-r--r--sound/soc/codecs/lm4857.c2
-rw-r--r--sound/soc/codecs/lm49453.c2
-rw-r--r--sound/soc/codecs/max9768.c2
-rw-r--r--sound/soc/codecs/max98088.c2
-rw-r--r--sound/soc/codecs/max98090.c2
-rw-r--r--sound/soc/codecs/max98095.c2
-rw-r--r--sound/soc/codecs/max98357a.c2
-rw-r--r--sound/soc/codecs/max98371.c14
-rw-r--r--sound/soc/codecs/max9850.c2
-rw-r--r--sound/soc/codecs/max9860.c2
-rw-r--r--sound/soc/codecs/max9867.c4
-rw-r--r--sound/soc/codecs/max98926.c8
-rw-r--r--sound/soc/codecs/max98927.c87
-rw-r--r--sound/soc/codecs/mc13783.c2
-rw-r--r--sound/soc/codecs/ml26124.c2
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c417
-rw-r--r--sound/soc/codecs/msm8916-wcd-digital.c39
-rw-r--r--sound/soc/codecs/nau8540.c2
-rw-r--r--sound/soc/codecs/nau8810.c2
-rw-r--r--sound/soc/codecs/nau8824.c2
-rw-r--r--sound/soc/codecs/nau8825.c28
-rw-r--r--sound/soc/codecs/pcm1681.c2
-rw-r--r--sound/soc/codecs/pcm179x.c2
-rw-r--r--sound/soc/codecs/pcm3008.c2
-rw-r--r--sound/soc/codecs/pcm512x.c2
-rw-r--r--sound/soc/codecs/rt274.c1229
-rw-r--r--sound/soc/codecs/rt274.h217
-rw-r--r--sound/soc/codecs/rt286.c2
-rw-r--r--sound/soc/codecs/rt298.c2
-rw-r--r--sound/soc/codecs/rt5514-spi.c146
-rw-r--r--sound/soc/codecs/rt5514-spi.h7
-rw-r--r--sound/soc/codecs/rt5514.c174
-rw-r--r--sound/soc/codecs/rt5514.h22
-rw-r--r--sound/soc/codecs/rt5616.c4
-rw-r--r--sound/soc/codecs/rt5631.c2
-rw-r--r--sound/soc/codecs/rt5640.c2
-rw-r--r--sound/soc/codecs/rt5645.c45
-rw-r--r--sound/soc/codecs/rt5651.c2
-rw-r--r--sound/soc/codecs/rt5659.c4
-rw-r--r--sound/soc/codecs/rt5660.c2
-rw-r--r--sound/soc/codecs/rt5663.c282
-rw-r--r--sound/soc/codecs/rt5663.h2
-rw-r--r--sound/soc/codecs/rt5665.c59
-rw-r--r--sound/soc/codecs/rt5665.h25
-rw-r--r--sound/soc/codecs/rt5670.c64
-rw-r--r--sound/soc/codecs/rt5677.c81
-rw-r--r--sound/soc/codecs/rt5677.h30
-rw-r--r--sound/soc/codecs/sgtl5000.c6
-rw-r--r--sound/soc/codecs/si476x.c2
-rw-r--r--sound/soc/codecs/sirf-audio-codec.c12
-rw-r--r--sound/soc/codecs/sn95031.c2
-rw-r--r--sound/soc/codecs/spdif_receiver.c2
-rw-r--r--sound/soc/codecs/spdif_transmitter.c2
-rw-r--r--sound/soc/codecs/ssm2518.c2
-rw-r--r--sound/soc/codecs/ssm2602.c2
-rw-r--r--sound/soc/codecs/ssm4567.c2
-rw-r--r--sound/soc/codecs/sta32x.c3
-rw-r--r--sound/soc/codecs/stac9766.c2
-rw-r--r--sound/soc/codecs/tas2552.c13
-rw-r--r--sound/soc/codecs/tas5086.c2
-rw-r--r--sound/soc/codecs/tas5720.c4
-rw-r--r--sound/soc/codecs/tlv320aic23.c2
-rw-r--r--sound/soc/codecs/tlv320aic26.c2
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c2
-rw-r--r--sound/soc/codecs/tlv320aic32x4-i2c.c2
-rw-r--r--sound/soc/codecs/tlv320aic32x4-spi.c2
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c208
-rw-r--r--sound/soc/codecs/tlv320aic32x4.h3
-rw-r--r--sound/soc/codecs/tlv320aic3x.c47
-rw-r--r--sound/soc/codecs/tlv320aic3x.h8
-rw-r--r--sound/soc/codecs/tlv320dac33.c2
-rw-r--r--sound/soc/codecs/twl4030.c4
-rw-r--r--sound/soc/codecs/twl6040.c6
-rw-r--r--sound/soc/codecs/uda134x.c2
-rw-r--r--sound/soc/codecs/uda1380.c2
-rw-r--r--sound/soc/codecs/wl1273.c2
-rw-r--r--sound/soc/codecs/wm5102.c4
-rw-r--r--sound/soc/codecs/wm5110.c4
-rw-r--r--sound/soc/codecs/wm8523.c6
-rw-r--r--sound/soc/codecs/wm8524.c261
-rw-r--r--sound/soc/codecs/wm8804.c3
-rw-r--r--sound/soc/codecs/zx_aud96p22.c6
-rw-r--r--sound/soc/davinci/davinci-mcasp.c6
-rw-r--r--sound/soc/davinci/davinci-vcif.c5
-rw-r--r--sound/soc/dwc/dwc-i2s.c6
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c2
-rw-r--r--sound/soc/fsl/fsl_asrc.c2
-rw-r--r--sound/soc/fsl/fsl_asrc_dma.c6
-rw-r--r--sound/soc/fsl/fsl_dma.c11
-rw-r--r--sound/soc/fsl/fsl_esai.c2
-rw-r--r--sound/soc/fsl/fsl_spdif.c2
-rw-r--r--sound/soc/fsl/fsl_ssi.c4
-rw-r--r--sound/soc/fsl/imx-audmux.c16
-rw-r--r--sound/soc/fsl/imx-es8328.c3
-rw-r--r--sound/soc/fsl/imx-pcm-fiq.c6
-rw-r--r--sound/soc/fsl/imx-ssi.c4
-rw-r--r--sound/soc/fsl/mpc5200_dma.c4
-rw-r--r--sound/soc/generic/audio-graph-card.c13
-rw-r--r--sound/soc/generic/audio-graph-scu-card.c18
-rw-r--r--sound/soc/generic/simple-card-utils.c19
-rw-r--r--sound/soc/generic/simple-card.c10
-rw-r--r--sound/soc/generic/simple-scu-card.c5
-rw-r--r--sound/soc/hisilicon/hi6210-i2s.c2
-rw-r--r--sound/soc/img/img-i2s-in.c2
-rw-r--r--sound/soc/img/img-i2s-out.c2
-rw-r--r--sound/soc/img/img-parallel-out.c2
-rw-r--r--sound/soc/img/img-spdif-in.c2
-rw-r--r--sound/soc/img/img-spdif-out.c2
-rw-r--r--sound/soc/intel/Kconfig3
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c10
-rw-r--r--sound/soc/intel/atom/sst/sst_drv_interface.c4
-rw-r--r--sound/soc/intel/atom/sst/sst_pci.c2
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-pcm.c4
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c86
-rw-r--r--sound/soc/intel/boards/byt-max98090.c12
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c10
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c310
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c94
-rw-r--r--sound/soc/intel/boards/mfld_machine.c4
-rw-r--r--sound/soc/intel/haswell/sst-haswell-pcm.c2
-rw-r--r--sound/soc/intel/skylake/Makefile5
-rw-r--r--sound/soc/intel/skylake/bxt-sst.c14
-rw-r--r--sound/soc/intel/skylake/cnl-sst-dsp.c274
-rw-r--r--sound/soc/intel/skylake/cnl-sst-dsp.h112
-rw-r--r--sound/soc/intel/skylake/cnl-sst.c497
-rw-r--r--sound/soc/intel/skylake/skl-messages.c167
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c82
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.c6
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.c6
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.h12
-rw-r--r--sound/soc/intel/skylake/skl-sst-utils.c6
-rw-r--r--sound/soc/intel/skylake/skl-sst.c12
-rw-r--r--sound/soc/intel/skylake/skl-topology.c741
-rw-r--r--sound/soc/intel/skylake/skl-topology.h83
-rw-r--r--sound/soc/intel/skylake/skl.c34
-rw-r--r--sound/soc/intel/skylake/skl.h3
-rw-r--r--sound/soc/jz4740/jz4740-i2s.c21
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c2
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c11
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c2
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-pcm.c15
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c1
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c1
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650.c1
-rw-r--r--sound/soc/mxs/mxs-saif.c13
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c1
-rw-r--r--sound/soc/nuc900/nuc900-pcm.c4
-rw-r--r--sound/soc/omap/ams-delta.c10
-rw-r--r--sound/soc/omap/omap-hdmi-audio.c9
-rw-r--r--sound/soc/omap/omap-pcm.c4
-rw-r--r--sound/soc/omap/omap-twl4030.c13
-rw-r--r--sound/soc/omap/rx51.c9
-rw-r--r--sound/soc/pxa/Kconfig1
-rw-r--r--sound/soc/pxa/hx4700.c8
-rw-r--r--sound/soc/pxa/mmp-pcm.c4
-rw-r--r--sound/soc/pxa/mmp-sspa.c2
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c4
-rw-r--r--sound/soc/qcom/apq8016_sbc.c58
-rw-r--r--sound/soc/qcom/lpass-platform.c4
-rw-r--r--sound/soc/qcom/storm.c1
-rw-r--r--sound/soc/rockchip/Kconfig2
-rw-r--r--sound/soc/rockchip/rk3288_hdmi_analog.c4
-rw-r--r--sound/soc/rockchip/rk3399_gru_sound.c270
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c4
-rw-r--r--sound/soc/rockchip/rockchip_pdm.c2
-rw-r--r--sound/soc/samsung/h1940_uda1380.c9
-rw-r--r--sound/soc/samsung/i2s.c45
-rw-r--r--sound/soc/samsung/idma.c4
-rw-r--r--sound/soc/samsung/jive_wm8750.c2
-rw-r--r--sound/soc/samsung/odroid.c50
-rw-r--r--sound/soc/samsung/pcm.c8
-rw-r--r--sound/soc/samsung/rx1950_uda1380.c10
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.c14
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.h7
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c15
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c11
-rw-r--r--sound/soc/samsung/s3c24xx_simtec.c2
-rw-r--r--sound/soc/samsung/s3c24xx_uda134x.c3
-rw-r--r--sound/soc/samsung/smartq_wm8987.c9
-rw-r--r--sound/soc/samsung/smdk_spdif.c2
-rw-r--r--sound/soc/samsung/spdif.c8
-rw-r--r--sound/soc/samsung/tm2_wm5110.c4
-rw-r--r--sound/soc/sh/dma-sh7760.c6
-rw-r--r--sound/soc/sh/fsi.c12
-rw-r--r--sound/soc/sh/hac.c2
-rw-r--r--sound/soc/sh/migor.c2
-rw-r--r--sound/soc/sh/rcar/adg.c11
-rw-r--r--sound/soc/sh/rcar/core.c109
-rw-r--r--sound/soc/sh/rcar/ctu.c5
-rw-r--r--sound/soc/sh/rcar/dma.c2
-rw-r--r--sound/soc/sh/rcar/dvc.c5
-rw-r--r--sound/soc/sh/rcar/gen.c4
-rw-r--r--sound/soc/sh/rcar/mix.c5
-rw-r--r--sound/soc/sh/rcar/rsnd.h1
-rw-r--r--sound/soc/sh/rcar/src.c14
-rw-r--r--sound/soc/sh/rcar/ssi.c141
-rw-r--r--sound/soc/sh/rcar/ssiu.c2
-rw-r--r--sound/soc/sh/siu_dai.c2
-rw-r--r--sound/soc/sh/siu_pcm.c2
-rw-r--r--sound/soc/soc-compress.c23
-rw-r--r--sound/soc/soc-core.c350
-rw-r--r--sound/soc/soc-jack.c76
-rw-r--r--sound/soc/soc-pcm.c79
-rw-r--r--sound/soc/soc-utils.c2
-rw-r--r--sound/soc/spear/spdif_in.c12
-rw-r--r--sound/soc/spear/spdif_out.c4
-rw-r--r--sound/soc/stm/stm32_i2s.c2
-rw-r--r--sound/soc/stm/stm32_sai.c2
-rw-r--r--sound/soc/stm/stm32_spdifrx.c2
-rw-r--r--sound/soc/sunxi/sun4i-codec.c15
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c455
-rw-r--r--sound/soc/sunxi/sun4i-spdif.c14
-rw-r--r--sound/soc/sunxi/sun8i-codec.c4
-rw-r--r--sound/soc/tegra/Kconfig42
-rw-r--r--sound/soc/tegra/tegra30_ahub.c4
-rw-r--r--sound/soc/tegra/tegra30_i2s.c2
-rw-r--r--sound/soc/tegra/tegra_alc5632.c14
-rw-r--r--sound/soc/tegra/tegra_max98090.c19
-rw-r--r--sound/soc/tegra/tegra_rt5640.c14
-rw-r--r--sound/soc/tegra/tegra_rt5677.c19
-rw-r--r--sound/soc/tegra/tegra_sgtl5000.c1
-rw-r--r--sound/soc/tegra/tegra_wm8753.c1
-rw-r--r--sound/soc/tegra/tegra_wm8903.c7
-rw-r--r--sound/soc/tegra/tegra_wm9712.c1
-rw-r--r--sound/soc/tegra/trimslice.c1
-rw-r--r--sound/soc/txx9/txx9aclc.c4
-rw-r--r--sound/soc/ux500/mop500.c5
-rw-r--r--sound/soc/ux500/ux500_msp_dai.c2
-rw-r--r--sound/soc/zte/zx-i2s.c2
-rw-r--r--sound/soc/zte/zx-spdif.c2
-rw-r--r--sound/soc/zte/zx-tdm.c2
-rw-r--r--sound/sparc/amd7930.c6
-rw-r--r--sound/sparc/cs4231.c8
-rw-r--r--sound/sparc/dbri.c4
-rw-r--r--sound/spi/at73c213.c2
-rw-r--r--sound/synth/emux/emux_seq.c20
-rw-r--r--sound/usb/6fire/chip.c2
-rw-r--r--sound/usb/6fire/pcm.c2
-rw-r--r--sound/usb/bcd2000/bcd2000.c2
-rw-r--r--sound/usb/caiaq/audio.c5
-rw-r--r--sound/usb/caiaq/device.c2
-rw-r--r--sound/usb/card.c4
-rw-r--r--sound/usb/hiface/pcm.c2
-rw-r--r--sound/usb/midi.c26
-rw-r--r--sound/usb/misc/ua101.c6
-rw-r--r--sound/usb/mixer.c22
-rw-r--r--sound/usb/mixer.h1
-rw-r--r--sound/usb/mixer_quirks.c6
-rw-r--r--sound/usb/pcm.c6
-rw-r--r--sound/usb/quirks.c19
-rw-r--r--sound/usb/stream.c4
-rw-r--r--sound/usb/usx2y/us122l.c2
-rw-r--r--sound/usb/usx2y/usb_stream.c4
-rw-r--r--sound/usb/usx2y/usbusx2y.c2
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c11
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c2
-rw-r--r--tools/Makefile15
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h8
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h3
-rw-r--r--tools/arch/parisc/include/uapi/asm/mman.h2
-rw-r--r--tools/arch/powerpc/include/uapi/asm/kvm.h6
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h12
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h3
-rw-r--r--tools/arch/x86/include/asm/unistd_32.h3
-rw-r--r--tools/arch/x86/include/asm/unistd_64.h3
-rw-r--r--tools/arch/x86/include/uapi/asm/unistd.h17
-rw-r--r--tools/build/Makefile.feature3
-rw-r--r--tools/build/feature/Makefile6
-rw-r--r--tools/build/feature/test-all.c5
-rw-r--r--tools/build/feature/test-bpf.c2
-rw-r--r--tools/build/feature/test-setns.c7
-rw-r--r--tools/build/tests/ex/Makefile2
-rwxr-xr-xtools/hv/bondvf.sh232
-rw-r--r--tools/hv/hv_fcopy_daemon.c32
-rw-r--r--tools/hv/hv_kvp_daemon.c2
-rw-r--r--tools/hv/hv_vss_daemon.c7
-rw-r--r--tools/iio/Build3
-rw-r--r--tools/iio/Makefile76
-rw-r--r--tools/include/linux/coresight-pmu.h6
-rw-r--r--tools/include/linux/string.h12
-rw-r--r--tools/include/uapi/asm-generic/fcntl.h220
-rw-r--r--tools/include/uapi/asm-generic/ioctls.h118
-rw-r--r--tools/include/uapi/drm/drm.h933
-rw-r--r--tools/include/uapi/drm/i915_drm.h1474
-rw-r--r--tools/include/uapi/linux/bpf.h76
-rw-r--r--tools/include/uapi/linux/fcntl.h21
-rw-r--r--tools/include/uapi/linux/kvm.h1419
-rw-r--r--tools/include/uapi/linux/perf_event.h57
-rw-r--r--tools/include/uapi/linux/sched.h52
-rw-r--r--tools/include/uapi/linux/vhost.h209
-rw-r--r--tools/include/uapi/sound/asound.h1026
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat22
-rw-r--r--tools/lguest/.gitignore2
-rw-r--r--tools/lguest/Makefile14
-rw-r--r--tools/lguest/extract58
-rw-r--r--tools/lguest/lguest.c3420
-rw-r--r--tools/lguest/lguest.txt125
-rw-r--r--tools/lib/api/Makefile8
-rw-r--r--tools/lib/bpf/Makefile12
-rw-r--r--tools/lib/bpf/bpf.c39
-rw-r--r--tools/lib/bpf/bpf.h8
-rw-r--r--tools/lib/bpf/libbpf.c32
-rw-r--r--tools/lib/bpf/libbpf.h2
-rw-r--r--tools/lib/string.c41
-rw-r--r--tools/lib/subcmd/Makefile2
-rw-r--r--tools/lib/subcmd/help.c2
-rw-r--r--tools/lib/subcmd/parse-options.c18
-rw-r--r--tools/objtool/Build3
-rw-r--r--tools/objtool/Documentation/stack-validation.txt56
-rw-r--r--tools/objtool/Makefile6
-rw-r--r--tools/objtool/arch.h5
-rw-r--r--tools/objtool/arch/x86/decode.c91
-rw-r--r--tools/objtool/builtin-check.c7
-rw-r--r--tools/objtool/builtin-orc.c70
-rw-r--r--tools/objtool/builtin.h1
-rw-r--r--tools/objtool/cfi.h2
-rw-r--r--tools/objtool/check.c498
-rw-r--r--tools/objtool/check.h22
-rw-r--r--tools/objtool/elf.c212
-rw-r--r--tools/objtool/elf.h15
-rw-r--r--tools/objtool/objtool.c3
-rw-r--r--tools/objtool/orc.h30
-rw-r--r--tools/objtool/orc_dump.c212
-rw-r--r--tools/objtool/orc_gen.c214
-rw-r--r--tools/objtool/orc_types.h107
-rw-r--r--tools/perf/Build2
-rw-r--r--tools/perf/Documentation/Makefile2
-rw-r--r--tools/perf/Documentation/intel-pt.txt6
-rw-r--r--tools/perf/Documentation/perf-annotate.txt6
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt5
-rw-r--r--tools/perf/Documentation/perf-probe.txt14
-rw-r--r--tools/perf/Documentation/perf-record.txt1
-rw-r--r--tools/perf/Documentation/perf-stat.txt4
-rw-r--r--tools/perf/Documentation/perf-top.txt4
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt10
-rw-r--r--tools/perf/MANIFEST8
-rw-r--r--tools/perf/Makefile.config25
-rw-r--r--tools/perf/Makefile.perf101
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c28
-rw-r--r--tools/perf/arch/powerpc/util/sym-handling.c2
-rw-r--r--tools/perf/arch/s390/util/Build1
-rw-r--r--tools/perf/arch/s390/util/sym-handling.c29
-rw-r--r--tools/perf/arch/x86/Makefile2
-rw-r--r--tools/perf/arch/x86/annotate/instructions.c46
-rw-r--r--tools/perf/arch/x86/include/arch-tests.h11
-rw-r--r--tools/perf/arch/x86/tests/insn-x86.c2
-rw-r--r--tools/perf/arch/x86/tests/intel-cqm.c2
-rw-r--r--tools/perf/arch/x86/tests/perf-time-to-tsc.c2
-rw-r--r--tools/perf/arch/x86/tests/rdpmc.c2
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c3
-rw-r--r--tools/perf/builtin-annotate.c23
-rw-r--r--tools/perf/builtin-buildid-cache.c54
-rw-r--r--tools/perf/builtin-config.c3
-rw-r--r--tools/perf/builtin-data.c2
-rw-r--r--tools/perf/builtin-ftrace.c2
-rw-r--r--tools/perf/builtin-help.c6
-rw-r--r--tools/perf/builtin-inject.c1
-rw-r--r--tools/perf/builtin-probe.c45
-rw-r--r--tools/perf/builtin-record.c9
-rw-r--r--tools/perf/builtin-report.c63
-rw-r--r--tools/perf/builtin-script.c11
-rw-r--r--tools/perf/builtin-stat.c30
-rw-r--r--tools/perf/builtin-top.c22
-rw-r--r--tools/perf/builtin-trace.c744
-rwxr-xr-xtools/perf/check-headers.sh20
-rw-r--r--tools/perf/perf-sys.h28
-rw-r--r--tools/perf/perf.c16
-rw-r--r--tools/perf/perf.h1
-rw-r--r--tools/perf/pmu-events/README4
-rw-r--r--tools/perf/pmu-events/arch/powerpc/mapfile.csv16
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/cache.json137
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/floating-point.json32
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/frontend.json377
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/marked.json647
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/memory.json132
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/other.json2512
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/pipeline.json557
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/pmc.json127
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/translation.json232
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/cache.json1672
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/floating-point.json88
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/frontend.json482
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/memory.json1396
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/other.json72
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/pipeline.json950
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json172
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json1156
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json284
-rw-r--r--tools/perf/pmu-events/jevents.c23
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py1
-rw-r--r--tools/perf/scripts/python/bin/export-to-sqlite-record8
-rw-r--r--tools/perf/scripts/python/bin/export-to-sqlite-report29
-rw-r--r--tools/perf/scripts/python/call-graph-from-sql.py (renamed from tools/perf/scripts/python/call-graph-from-postgresql.py)70
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py5
-rw-r--r--tools/perf/scripts/python/export-to-sqlite.py451
-rw-r--r--tools/perf/tests/Build1
-rw-r--r--tools/perf/tests/attr.c14
-rw-r--r--tools/perf/tests/attr.py50
-rw-r--r--tools/perf/tests/attr/base-record6
-rw-r--r--tools/perf/tests/attr/base-stat4
-rw-r--r--tools/perf/tests/attr/test-record-C01
-rw-r--r--tools/perf/tests/attr/test-record-basic1
-rw-r--r--tools/perf/tests/attr/test-record-branch-any2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any_call2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any_ret2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-hv2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-ind_call2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-k2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-u2
-rw-r--r--tools/perf/tests/attr/test-record-count1
-rw-r--r--tools/perf/tests/attr/test-record-data3
-rw-r--r--tools/perf/tests/attr/test-record-freq1
-rw-r--r--tools/perf/tests/attr/test-record-graph-default1
-rw-r--r--tools/perf/tests/attr/test-record-graph-dwarf4
-rw-r--r--tools/perf/tests/attr/test-record-graph-fp1
-rw-r--r--tools/perf/tests/attr/test-record-group1
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling1
-rw-r--r--tools/perf/tests/attr/test-record-group11
-rw-r--r--tools/perf/tests/attr/test-record-no-buffering (renamed from tools/perf/tests/attr/test-record-no-delay)4
-rw-r--r--tools/perf/tests/attr/test-record-no-inherit1
-rw-r--r--tools/perf/tests/attr/test-record-no-samples1
-rw-r--r--tools/perf/tests/attr/test-record-period1
-rw-r--r--tools/perf/tests/attr/test-record-raw2
-rw-r--r--tools/perf/tests/attr/test-stat-C04
-rw-r--r--tools/perf/tests/attr/test-stat-default2
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-12
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-23
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-35
-rw-r--r--tools/perf/tests/backward-ring-buffer.c2
-rw-r--r--tools/perf/tests/bitmap.c2
-rw-r--r--tools/perf/tests/bp_signal.c2
-rw-r--r--tools/perf/tests/bp_signal_overflow.c2
-rw-r--r--tools/perf/tests/bpf-script-test-prologue.c4
-rw-r--r--tools/perf/tests/bpf.c20
-rw-r--r--tools/perf/tests/builtin-test.c188
-rw-r--r--tools/perf/tests/clang.c4
-rw-r--r--tools/perf/tests/code-reading.c2
-rw-r--r--tools/perf/tests/cpumap.c4
-rw-r--r--tools/perf/tests/dso-data.c6
-rw-r--r--tools/perf/tests/dwarf-unwind.c2
-rw-r--r--tools/perf/tests/event-times.c2
-rw-r--r--tools/perf/tests/event_update.c2
-rw-r--r--tools/perf/tests/evsel-roundtrip-name.c2
-rw-r--r--tools/perf/tests/evsel-tp-sched.c2
-rw-r--r--tools/perf/tests/expr.c7
-rw-r--r--tools/perf/tests/fdarray.c4
-rw-r--r--tools/perf/tests/hists_cumulate.c2
-rw-r--r--tools/perf/tests/hists_filter.c2
-rw-r--r--tools/perf/tests/hists_link.c2
-rw-r--r--tools/perf/tests/hists_output.c2
-rw-r--r--tools/perf/tests/is_printable_array.c2
-rw-r--r--tools/perf/tests/keep-tracking.c2
-rw-r--r--tools/perf/tests/kmod-path.c2
-rw-r--r--tools/perf/tests/llvm.c2
-rw-r--r--tools/perf/tests/mem.c56
-rw-r--r--tools/perf/tests/mmap-basic.c2
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c2
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c4
-rw-r--r--tools/perf/tests/openat-syscall-tp-fields.c2
-rw-r--r--tools/perf/tests/openat-syscall.c4
-rw-r--r--tools/perf/tests/parse-events.c2
-rw-r--r--tools/perf/tests/parse-no-sample-id-all.c2
-rw-r--r--tools/perf/tests/perf-hooks.c2
-rw-r--r--tools/perf/tests/perf-record.c2
-rw-r--r--tools/perf/tests/pmu.c2
-rw-r--r--tools/perf/tests/python-use.c2
-rw-r--r--tools/perf/tests/sample-parsing.c2
-rw-r--r--tools/perf/tests/sdt.c12
-rw-r--r--tools/perf/tests/shell/lib/probe.sh6
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh23
-rwxr-xr-xtools/perf/tests/shell/probe_vfs_getname.sh14
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh41
-rwxr-xr-xtools/perf/tests/shell/trace+probe_libc_inet_pton.sh43
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh35
-rw-r--r--tools/perf/tests/stat.c6
-rw-r--r--tools/perf/tests/sw-clock.c2
-rw-r--r--tools/perf/tests/switch-tracking.c2
-rw-r--r--tools/perf/tests/task-exit.c2
-rw-r--r--tools/perf/tests/tests.h114
-rw-r--r--tools/perf/tests/thread-map.c6
-rw-r--r--tools/perf/tests/thread-mg-share.c2
-rw-r--r--tools/perf/tests/topology.c2
-rw-r--r--tools/perf/tests/unit_number__scnprintf.c2
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c2
-rw-r--r--tools/perf/trace/beauty/Build6
-rw-r--r--tools/perf/trace/beauty/beauty.h74
-rw-r--r--tools/perf/trace/beauty/clone.c75
-rwxr-xr-xtools/perf/trace/beauty/drm_ioctl.sh13
-rw-r--r--tools/perf/trace/beauty/fcntl.c100
-rw-r--r--tools/perf/trace/beauty/ioctl.c162
-rwxr-xr-xtools/perf/trace/beauty/kvm_ioctl.sh11
-rw-r--r--tools/perf/trace/beauty/mmap.c3
-rw-r--r--tools/perf/trace/beauty/open_flags.c29
-rwxr-xr-xtools/perf/trace/beauty/perf_ioctl.sh10
-rw-r--r--tools/perf/trace/beauty/pid.c4
-rw-r--r--tools/perf/trace/beauty/pkey_alloc.c50
-rwxr-xr-xtools/perf/trace/beauty/pkey_alloc_access_rights.sh10
-rwxr-xr-xtools/perf/trace/beauty/sndrv_ctl_ioctl.sh8
-rwxr-xr-xtools/perf/trace/beauty/sndrv_pcm_ioctl.sh8
-rwxr-xr-xtools/perf/trace/beauty/vhost_virtio_ioctl.sh17
-rw-r--r--tools/perf/ui/browser.c34
-rw-r--r--tools/perf/ui/browser.h2
-rw-r--r--tools/perf/ui/browsers/annotate.c94
-rw-r--r--tools/perf/ui/browsers/hists.c3
-rw-r--r--tools/perf/ui/gtk/annotate.c6
-rw-r--r--tools/perf/ui/stdio/hist.c6
-rw-r--r--tools/perf/util/Build6
-rw-r--r--tools/perf/util/annotate.c137
-rw-r--r--tools/perf/util/annotate.h22
-rw-r--r--tools/perf/util/bpf-loader.c2
-rw-r--r--tools/perf/util/bpf-prologue.c49
-rw-r--r--tools/perf/util/branch.c147
-rw-r--r--tools/perf/util/branch.h25
-rw-r--r--tools/perf/util/build-id.c129
-rw-r--r--tools/perf/util/build-id.h16
-rw-r--r--tools/perf/util/callchain.c249
-rw-r--r--tools/perf/util/callchain.h5
-rw-r--r--tools/perf/util/cgroup.c8
-rw-r--r--tools/perf/util/config.c13
-rw-r--r--tools/perf/util/counts.h1
-rw-r--r--tools/perf/util/data-convert-bt.c127
-rw-r--r--tools/perf/util/dso.c21
-rw-r--r--tools/perf/util/dso.h3
-rw-r--r--tools/perf/util/event.c1
-rw-r--r--tools/perf/util/event.h11
-rw-r--r--tools/perf/util/evlist.c16
-rw-r--r--tools/perf/util/evlist.h14
-rw-r--r--tools/perf/util/evsel.c221
-rw-r--r--tools/perf/util/evsel.h12
-rw-r--r--tools/perf/util/expr.h2
-rw-r--r--tools/perf/util/expr.y76
-rw-r--r--tools/perf/util/header.c1018
-rw-r--r--tools/perf/util/header.h16
-rw-r--r--tools/perf/util/hist.c7
-rw-r--r--tools/perf/util/intel-pt-decoder/Build2
-rw-r--r--tools/perf/util/llvm-utils.c2
-rw-r--r--tools/perf/util/machine.c52
-rw-r--r--tools/perf/util/map.c23
-rw-r--r--tools/perf/util/map.h2
-rw-r--r--tools/perf/util/mem-events.c54
-rw-r--r--tools/perf/util/namespaces.c211
-rw-r--r--tools/perf/util/namespaces.h38
-rw-r--r--tools/perf/util/parse-branch-options.c1
-rw-r--r--tools/perf/util/parse-events.c98
-rw-r--r--tools/perf/util/parse-events.h19
-rw-r--r--tools/perf/util/parse-events.l23
-rw-r--r--tools/perf/util/parse-events.y94
-rw-r--r--tools/perf/util/probe-event.c88
-rw-r--r--tools/perf/util/probe-event.h10
-rw-r--r--tools/perf/util/probe-file.c19
-rw-r--r--tools/perf/util/probe-file.h4
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c246
-rw-r--r--tools/perf/util/session.c29
-rw-r--r--tools/perf/util/setns.c8
-rw-r--r--tools/perf/util/smt.c44
-rw-r--r--tools/perf/util/smt.h6
-rw-r--r--tools/perf/util/sort.c2
-rw-r--r--tools/perf/util/srcline.c6
-rw-r--r--tools/perf/util/stat-shadow.c6
-rw-r--r--tools/perf/util/stat.c4
-rw-r--r--tools/perf/util/stat.h5
-rw-r--r--tools/perf/util/symbol-elf.c39
-rw-r--r--tools/perf/util/symbol-minimal.c2
-rw-r--r--tools/perf/util/symbol.c113
-rw-r--r--tools/perf/util/symbol.h7
-rw-r--r--tools/perf/util/thread.c3
-rw-r--r--tools/perf/util/thread.h1
-rw-r--r--tools/perf/util/tool.h10
-rw-r--r--tools/perf/util/util.c40
-rw-r--r--tools/perf/util/util.h8
-rw-r--r--tools/perf/util/values.c17
-rw-r--r--tools/perf/util/xyarray.c2
-rw-r--r--tools/perf/util/xyarray.h12
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c105
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c2
-rw-r--r--tools/power/cpupower/utils/cpupower.c15
-rw-r--r--tools/power/cpupower/utils/helpers/cpuid.c4
-rw-r--r--tools/power/cpupower/utils/helpers/helpers.h5
-rw-r--r--tools/power/cpupower/utils/helpers/misc.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c4
-rw-r--r--tools/power/cpupower/utils/idle_monitor/mperf_monitor.c3
-rw-r--r--tools/power/cpupower/utils/idle_monitor/nhm_idle.c8
-rw-r--r--tools/power/cpupower/utils/idle_monitor/snb_idle.c4
-rw-r--r--tools/power/pm-graph/Makefile19
-rwxr-xr-xtools/power/pm-graph/analyze_boot.py586
-rwxr-xr-xtools/power/pm-graph/analyze_suspend.py534
-rw-r--r--tools/power/pm-graph/bootgraph.861
-rw-r--r--tools/power/pm-graph/sleepgraph.848
-rw-r--r--tools/scripts/Makefile.include4
-rw-r--r--tools/spi/Build2
-rw-r--r--tools/spi/Makefile64
-rw-r--r--tools/testing/selftests/bpf/Makefile4
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h9
-rw-r--r--tools/testing/selftests/bpf/sockmap_parse_prog.c38
-rw-r--r--tools/testing/selftests/bpf/sockmap_verdict_prog.c68
-rw-r--r--tools/testing/selftests/bpf/test_align.c464
-rw-r--r--tools/testing/selftests/bpf/test_maps.c393
-rw-r--r--tools/testing/selftests/bpf/test_pkt_md_access.c11
-rw-r--r--tools/testing/selftests/bpf/test_progs.c79
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c1375
-rw-r--r--tools/testing/selftests/bpf/test_xdp_redirect.c28
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_redirect.sh59
-rwxr-xr-xtools/testing/selftests/firmware/fw_fallback.sh31
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh241
-rw-r--r--tools/testing/selftests/futex/Makefile2
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/kmod/kmod.sh4
-rw-r--r--tools/testing/selftests/memfd/Makefile2
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c372
-rw-r--r--tools/testing/selftests/memfd/run_tests.sh69
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile4
-rw-r--r--tools/testing/selftests/net/msg_zerocopy.c697
-rwxr-xr-xtools/testing/selftests/net/msg_zerocopy.sh112
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh272
-rw-r--r--tools/testing/selftests/networking/timestamping/.gitignore1
-rw-r--r--tools/testing/selftests/networking/timestamping/Makefile4
-rw-r--r--tools/testing/selftests/networking/timestamping/rxtimestamp.c389
-rwxr-xr-xtools/testing/selftests/ntb/ntb_test.sh4
-rw-r--r--tools/testing/selftests/powerpc/ptrace/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/switch_endian_test.S2
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-resched-dscr.c12
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/config_override.sh61
-rw-r--r--tools/testing/selftests/rcutorture/bin/functions.sh27
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh11
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh58
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh34
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/BUSTED.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-C.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-u3
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot2
-rw-r--r--tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt2
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/sysctl/sysctl.sh0
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tests.json50
-rw-r--r--tools/testing/selftests/timers/freq-step.c15
-rw-r--r--tools/testing/selftests/timers/set-timer-lat.c103
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c279
-rw-r--r--tools/testing/selftests/x86/fsgsbase.c41
-rw-r--r--tools/usb/usbip/src/usbip_attach.c35
-rw-r--r--virt/kvm/arm/mmu.c4
-rw-r--r--virt/kvm/arm/pmu.c43
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c3
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c1
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c4
-rw-r--r--virt/kvm/kvm_main.c88
9181 files changed, 503008 insertions, 184811 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-nvmem b/Documentation/ABI/stable/sysfs-bus-nvmem
new file mode 100644
index 000000000000..5923ab4620c5
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-bus-nvmem
@@ -0,0 +1,19 @@
+What: /sys/bus/nvmem/devices/.../nvmem
+Date: July 2015
+KernelVersion: 4.2
+Contact: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Description:
+ This file allows user to read/write the raw NVMEM contents.
+ Permissions for write to this file depends on the nvmem
+ provider configuration.
+
+ ex:
+ hexdump /sys/bus/nvmem/devices/qfprom0/nvmem
+
+ 0000000 0000 0000 0000 0000 0000 0000 0000 0000
+ *
+ 00000a0 db10 2240 0000 e000 0c00 0c00 0000 0c00
+ 0000000 0000 0000 0000 0000 0000 0000 0000 0000
+ ...
+ *
+ 0001000
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-rndis b/Documentation/ABI/testing/configfs-usb-gadget-rndis
index e32879b84b4d..137399095d74 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-rndis
+++ b/Documentation/ABI/testing/configfs-usb-gadget-rndis
@@ -12,3 +12,6 @@ Description:
Ethernet over USB link
dev_addr - MAC address of device's end of this
Ethernet over USB link
+ class - USB interface class, default is 02 (hex)
+ subclass - USB interface subclass, default is 06 (hex)
+ protocol - USB interface protocol, default is 00 (hex)
diff --git a/Documentation/ABI/testing/ppc-memtrace b/Documentation/ABI/testing/ppc-memtrace
new file mode 100644
index 000000000000..2e8b93741270
--- /dev/null
+++ b/Documentation/ABI/testing/ppc-memtrace
@@ -0,0 +1,45 @@
+What: /sys/kernel/debug/powerpc/memtrace
+Date: Aug 2017
+KernelVersion: 4.14
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: This folder contains the relevant debugfs files for the
+ hardware trace macro to use. CONFIG_PPC64_HARDWARE_TRACING
+ must be set.
+
+What: /sys/kernel/debug/powerpc/memtrace/enable
+Date: Aug 2017
+KernelVersion: 4.14
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: Write an integer containing the size in bytes of the memory
+ you want removed from each NUMA node to this file - it must be
+ aligned to the memblock size. This amount of RAM will be removed
+ from the kernel mappings and the following debugfs files will be
+ created. This can only be successfully done once per boot. Once
+ memory is successfully removed from each node, the following
+ files are created.
+
+What: /sys/kernel/debug/powerpc/memtrace/<node-id>
+Date: Aug 2017
+KernelVersion: 4.14
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: This directory contains information about the removed memory
+ from the specific NUMA node.
+
+What: /sys/kernel/debug/powerpc/memtrace/<node-id>/size
+Date: Aug 2017
+KernelVersion: 4.14
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: This contains the size of the memory removed from the node.
+
+What: /sys/kernel/debug/powerpc/memtrace/<node-id>/start
+Date: Aug 2017
+KernelVersion: 4.14
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: This contains the start address of the removed memory.
+
+What: /sys/kernel/debug/powerpc/memtrace/<node-id>/trace
+Date: Aug 2017
+KernelVersion: 4.14
+Contact: linuxppc-dev@lists.ozlabs.org
+Description: This is where the hardware trace macro will output the trace
+ it generates.
diff --git a/Documentation/ABI/testing/procfs-smaps_rollup b/Documentation/ABI/testing/procfs-smaps_rollup
new file mode 100644
index 000000000000..0a54ed0d63c9
--- /dev/null
+++ b/Documentation/ABI/testing/procfs-smaps_rollup
@@ -0,0 +1,31 @@
+What: /proc/pid/smaps_rollup
+Date: August 2017
+Contact: Daniel Colascione <dancol@google.com>
+Description:
+ This file provides pre-summed memory information for a
+ process. The format is identical to /proc/pid/smaps,
+ except instead of an entry for each VMA in a process,
+ smaps_rollup has a single entry (tagged "[rollup]")
+ for which each field is the sum of the corresponding
+ fields from all the maps in /proc/pid/smaps.
+ For more details, see the procfs man page.
+
+ Typical output looks like this:
+
+ 00100000-ff709000 ---p 00000000 00:00 0 [rollup]
+ Rss: 884 kB
+ Pss: 385 kB
+ Shared_Clean: 696 kB
+ Shared_Dirty: 0 kB
+ Private_Clean: 120 kB
+ Private_Dirty: 68 kB
+ Referenced: 884 kB
+ Anonymous: 68 kB
+ LazyFree: 0 kB
+ AnonHugePages: 0 kB
+ ShmemPmdMapped: 0 kB
+ Shared_Hugetlb: 0 kB
+ Private_Hugetlb: 0 kB
+ Swap: 0 kB
+ SwapPss: 0 kB
+ Locked: 385 kB
diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram
index 451b6d882b2c..c1513c756af1 100644
--- a/Documentation/ABI/testing/sysfs-block-zram
+++ b/Documentation/ABI/testing/sysfs-block-zram
@@ -90,3 +90,11 @@ Description:
device's debugging info useful for kernel developers. Its
format is not documented intentionally and may change
anytime without any notice.
+
+What: /sys/block/zram<id>/backing_dev
+Date: June 2017
+Contact: Minchan Kim <minchan@kernel.org>
+Description:
+ The backing_dev file is read-write and set up backing
+ device for zram to write incompressible pages.
+ For using, user should enable CONFIG_ZRAM_WRITEBACK.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 2db2cdf42d54..7eead5f97e02 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -119,6 +119,15 @@ Description:
unique to allow association with event codes. Units after
application of scale and offset are milliamps.
+What: /sys/bus/iio/devices/iio:deviceX/in_powerY_raw
+KernelVersion: 4.5
+Contact: linux-iio@vger.kernel.org
+Description:
+ Raw (unscaled no bias removal etc.) power measurement from
+ channel Y. The number must always be specified and
+ unique to allow association with event codes. Units after
+ application of scale and offset are milliwatts.
+
What: /sys/bus/iio/devices/iio:deviceX/in_capacitanceY_raw
KernelVersion: 3.2
Contact: linux-iio@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-lptimer-stm32 b/Documentation/ABI/testing/sysfs-bus-iio-lptimer-stm32
new file mode 100644
index 000000000000..ad2cc63e4bf8
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-lptimer-stm32
@@ -0,0 +1,57 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_count0_preset
+KernelVersion: 4.13
+Contact: fabrice.gasnier@st.com
+Description:
+ Reading returns the current preset value. Writing sets the
+ preset value. Encoder counts continuously from 0 to preset
+ value, depending on direction (up/down).
+
+What: /sys/bus/iio/devices/iio:deviceX/in_count_quadrature_mode_available
+KernelVersion: 4.13
+Contact: fabrice.gasnier@st.com
+Description:
+ Reading returns the list possible quadrature modes.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_count0_quadrature_mode
+KernelVersion: 4.13
+Contact: fabrice.gasnier@st.com
+Description:
+ Configure the device counter quadrature modes:
+ - non-quadrature:
+ Encoder IN1 input servers as the count input (up
+ direction).
+ - quadrature:
+ Encoder IN1 and IN2 inputs are mixed to get direction
+ and count.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_count_polarity_available
+KernelVersion: 4.13
+Contact: fabrice.gasnier@st.com
+Description:
+ Reading returns the list possible active edges.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_count0_polarity
+KernelVersion: 4.13
+Contact: fabrice.gasnier@st.com
+Description:
+ Configure the device encoder/counter active edge:
+ - rising-edge
+ - falling-edge
+ - both-edges
+
+ In non-quadrature mode, device counts up on active edge.
+ In quadrature mode, encoder counting scenarios are as follows:
+ ----------------------------------------------------------------
+ | Active | Level on | IN1 signal | IN2 signal |
+ | edge | opposite |------------------------------------------
+ | | signal | Rising | Falling | Rising | Falling |
+ ----------------------------------------------------------------
+ | Rising | High -> | Down | - | Up | - |
+ | edge | Low -> | Up | - | Down | - |
+ ----------------------------------------------------------------
+ | Falling | High -> | - | Up | - | Down |
+ | edge | Low -> | - | Down | - | Up |
+ ----------------------------------------------------------------
+ | Both | High -> | Down | Up | Up | Down |
+ | edges | Low -> | Up | Down | Down | Up |
+ ----------------------------------------------------------------
diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt
index 2a98149943ea..392bef5bd399 100644
--- a/Documentation/ABI/testing/sysfs-bus-thunderbolt
+++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt
@@ -45,6 +45,8 @@ Contact: thunderbolt-software@lists.01.org
Description: When a devices supports Thunderbolt secure connect it will
have this attribute. Writing 32 byte hex string changes
authorization to use the secure connection method instead.
+ Writing an empty string clears the key and regular connection
+ method can be used again.
What: /sys/bus/thunderbolt/devices/.../device
Date: Sep 2017
diff --git a/Documentation/ABI/testing/sysfs-bus-usb-lvstest b/Documentation/ABI/testing/sysfs-bus-usb-lvstest
index 5151290cf8e7..ee0046dc4192 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb-lvstest
+++ b/Documentation/ABI/testing/sysfs-bus-usb-lvstest
@@ -45,3 +45,16 @@ Contact: Pratyush Anand <pratyush.anand@gmail.com>
Description:
Write to this node to issue "U3 exit" for Link Layer
Validation device. It is needed for TD.7.36.
+
+What: /sys/bus/usb/devices/.../enable_compliance
+Date: July 2017
+Description:
+ Write to this node to set the port to compliance mode to test
+ with Link Layer Validation device. It is needed for TD.7.34.
+
+What: /sys/bus/usb/devices/.../warm_reset
+Date: July 2017
+Description:
+ Write to this node to issue "Warm Reset" for Link Layer Validation
+ device. It may be needed to properly reset an xHCI 1.1 host port if
+ compliance mode needed to be explicitly enabled.
diff --git a/Documentation/ABI/testing/sysfs-driver-altera-cvp b/Documentation/ABI/testing/sysfs-driver-altera-cvp
new file mode 100644
index 000000000000..8cde64a71edb
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-altera-cvp
@@ -0,0 +1,8 @@
+What: /sys/bus/pci/drivers/altera-cvp/chkcfg
+Date: May 2017
+Kernel Version: 4.13
+Contact: Anatolij Gustschin <agust@denx.de>
+Description:
+ Contains either 1 or 0 and controls if configuration
+ error checking in altera-cvp driver is turned on or
+ off.
diff --git a/Documentation/ABI/testing/sysfs-firmware-opal-powercap b/Documentation/ABI/testing/sysfs-firmware-opal-powercap
new file mode 100644
index 000000000000..c9b66ec4f165
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-firmware-opal-powercap
@@ -0,0 +1,31 @@
+What: /sys/firmware/opal/powercap
+Date: August 2017
+Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Description: Powercap directory for Powernv (P8, P9) servers
+
+ Each folder in this directory contains a
+ power-cappable component.
+
+What: /sys/firmware/opal/powercap/system-powercap
+ /sys/firmware/opal/powercap/system-powercap/powercap-min
+ /sys/firmware/opal/powercap/system-powercap/powercap-max
+ /sys/firmware/opal/powercap/system-powercap/powercap-current
+Date: August 2017
+Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Description: System powercap directory and attributes applicable for
+ Powernv (P8, P9) servers
+
+ This directory provides powercap information. It
+ contains below sysfs attributes:
+
+ - powercap-min : This file provides the minimum
+ possible powercap in Watt units
+
+ - powercap-max : This file provides the maximum
+ possible powercap in Watt units
+
+ - powercap-current : This file provides the current
+ powercap set on the system. Writing to this file
+ creates a request for setting a new-powercap. The
+ powercap requested must be between powercap-min
+ and powercap-max.
diff --git a/Documentation/ABI/testing/sysfs-firmware-opal-psr b/Documentation/ABI/testing/sysfs-firmware-opal-psr
new file mode 100644
index 000000000000..cc2ece70e365
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-firmware-opal-psr
@@ -0,0 +1,18 @@
+What: /sys/firmware/opal/psr
+Date: August 2017
+Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Description: Power-Shift-Ratio directory for Powernv P9 servers
+
+ Power-Shift-Ratio allows to provide hints the firmware
+ to shift/throttle power between different entities in
+ the system. Each attribute in this directory indicates
+ a settable PSR.
+
+What: /sys/firmware/opal/psr/cpu_to_gpu_X
+Date: August 2017
+Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
+Description: PSR sysfs attributes for Powernv P9 servers
+
+ Power-Shift-Ratio between CPU and GPU for a given chip
+ with chip-id X. This file gives the ratio (0-100)
+ which is used by OCC for power-capping.
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-swap b/Documentation/ABI/testing/sysfs-kernel-mm-swap
new file mode 100644
index 000000000000..587db52084c7
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-swap
@@ -0,0 +1,26 @@
+What: /sys/kernel/mm/swap/
+Date: August 2017
+Contact: Linux memory management mailing list <linux-mm@kvack.org>
+Description: Interface for swapping
+
+What: /sys/kernel/mm/swap/vma_ra_enabled
+Date: August 2017
+Contact: Linux memory management mailing list <linux-mm@kvack.org>
+Description: Enable/disable VMA based swap readahead.
+
+ If set to true, the VMA based swap readahead algorithm
+ will be used for swappable anonymous pages mapped in a
+ VMA, and the global swap readahead algorithm will be
+ still used for tmpfs etc. other users. If set to
+ false, the global swap readahead algorithm will be
+ used for all swappable pages.
+
+What: /sys/kernel/mm/swap/vma_ra_max_order
+Date: August 2017
+Contact: Linux memory management mailing list <linux-mm@kvack.org>
+Description: The max readahead size in order for VMA based swap readahead
+
+ VMA based swap readahead algorithm will readahead at
+ most 1 << max_order pages for each readahead. The
+ real readahead size for each readahead will be scaled
+ according to the estimation algorithm.
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power
index f523e5a3ac33..713cab1d5f12 100644
--- a/Documentation/ABI/testing/sysfs-power
+++ b/Documentation/ABI/testing/sysfs-power
@@ -273,3 +273,15 @@ Description:
This output is useful for system wakeup diagnostics of spurious
wakeup interrupts.
+
+What: /sys/power/pm_debug_messages
+Date: July 2017
+Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
+Description:
+ The /sys/power/pm_debug_messages file controls the printing
+ of debug messages from the system suspend/hiberbation
+ infrastructure to the kernel log.
+
+ Writing a "1" to this file enables the debug messages and
+ writing a "0" (default) to it disables them. Reads from
+ this file return the current value.
diff --git a/Documentation/Makefile b/Documentation/Makefile
index a42320385df3..85f7856f0092 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -22,6 +22,8 @@ ifeq ($(HAVE_SPHINX),0)
.DEFAULT:
$(warning The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed and in PATH, or set the SPHINXBUILD make variable to point to the full path of the '$(SPHINXBUILD)' executable.)
+ @echo
+ @./scripts/sphinx-pre-install
@echo " SKIP Sphinx $@ target."
else # HAVE_SPHINX
@@ -95,16 +97,6 @@ endif # HAVE_SPHINX
# The following targets are independent of HAVE_SPHINX, and the rules should
# work or silently pass without Sphinx.
-# no-ops for the Sphinx toolchain
-sgmldocs:
- @:
-psdocs:
- @:
-mandocs:
- @:
-installmandocs:
- @:
-
cleandocs:
$(Q)rm -rf $(BUILDDIR)
$(Q)$(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=Documentation/media clean
diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html
index 95b30fa25d56..62e847bcdcdd 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.html
+++ b/Documentation/RCU/Design/Requirements/Requirements.html
@@ -2080,6 +2080,8 @@ Some of the relevant points of interest are as follows:
<li> <a href="#Scheduler and RCU">Scheduler and RCU</a>.
<li> <a href="#Tracing and RCU">Tracing and RCU</a>.
<li> <a href="#Energy Efficiency">Energy Efficiency</a>.
+<li> <a href="#Scheduling-Clock Interrupts and RCU">
+ Scheduling-Clock Interrupts and RCU</a>.
<li> <a href="#Memory Efficiency">Memory Efficiency</a>.
<li> <a href="#Performance, Scalability, Response Time, and Reliability">
Performance, Scalability, Response Time, and Reliability</a>.
@@ -2532,6 +2534,134 @@ I learned of many of these requirements via angry phone calls:
Flaming me on the Linux-kernel mailing list was apparently not
sufficient to fully vent their ire at RCU's energy-efficiency bugs!
+<h3><a name="Scheduling-Clock Interrupts and RCU">
+Scheduling-Clock Interrupts and RCU</a></h3>
+
+<p>
+The kernel transitions between in-kernel non-idle execution, userspace
+execution, and the idle loop.
+Depending on kernel configuration, RCU handles these states differently:
+
+<table border=3>
+<tr><th><tt>HZ</tt> Kconfig</th>
+ <th>In-Kernel</th>
+ <th>Usermode</th>
+ <th>Idle</th></tr>
+<tr><th align="left"><tt>HZ_PERIODIC</tt></th>
+ <td>Can rely on scheduling-clock interrupt.</td>
+ <td>Can rely on scheduling-clock interrupt and its
+ detection of interrupt from usermode.</td>
+ <td>Can rely on RCU's dyntick-idle detection.</td></tr>
+<tr><th align="left"><tt>NO_HZ_IDLE</tt></th>
+ <td>Can rely on scheduling-clock interrupt.</td>
+ <td>Can rely on scheduling-clock interrupt and its
+ detection of interrupt from usermode.</td>
+ <td>Can rely on RCU's dyntick-idle detection.</td></tr>
+<tr><th align="left"><tt>NO_HZ_FULL</tt></th>
+ <td>Can only sometimes rely on scheduling-clock interrupt.
+ In other cases, it is necessary to bound kernel execution
+ times and/or use IPIs.</td>
+ <td>Can rely on RCU's dyntick-idle detection.</td>
+ <td>Can rely on RCU's dyntick-idle detection.</td></tr>
+</table>
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+ Why can't <tt>NO_HZ_FULL</tt> in-kernel execution rely on the
+ scheduling-clock interrupt, just like <tt>HZ_PERIODIC</tt>
+ and <tt>NO_HZ_IDLE</tt> do?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+ Because, as a performance optimization, <tt>NO_HZ_FULL</tt>
+ does not necessarily re-enable the scheduling-clock interrupt
+ on entry to each and every system call.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>
+However, RCU must be reliably informed as to whether any given
+CPU is currently in the idle loop, and, for <tt>NO_HZ_FULL</tt>,
+also whether that CPU is executing in usermode, as discussed
+<a href="#Energy Efficiency">earlier</a>.
+It also requires that the scheduling-clock interrupt be enabled when
+RCU needs it to be:
+
+<ol>
+<li> If a CPU is either idle or executing in usermode, and RCU believes
+ it is non-idle, the scheduling-clock tick had better be running.
+ Otherwise, you will get RCU CPU stall warnings. Or at best,
+ very long (11-second) grace periods, with a pointless IPI waking
+ the CPU from time to time.
+<li> If a CPU is in a portion of the kernel that executes RCU read-side
+ critical sections, and RCU believes this CPU to be idle, you will get
+ random memory corruption. <b>DON'T DO THIS!!!</b>
+
+ <br>This is one reason to test with lockdep, which will complain
+ about this sort of thing.
+<li> If a CPU is in a portion of the kernel that is absolutely
+ positively no-joking guaranteed to never execute any RCU read-side
+ critical sections, and RCU believes this CPU to to be idle,
+ no problem. This sort of thing is used by some architectures
+ for light-weight exception handlers, which can then avoid the
+ overhead of <tt>rcu_irq_enter()</tt> and <tt>rcu_irq_exit()</tt>
+ at exception entry and exit, respectively.
+ Some go further and avoid the entireties of <tt>irq_enter()</tt>
+ and <tt>irq_exit()</tt>.
+
+ <br>Just make very sure you are running some of your tests with
+ <tt>CONFIG_PROVE_RCU=y</tt>, just in case one of your code paths
+ was in fact joking about not doing RCU read-side critical sections.
+<li> If a CPU is executing in the kernel with the scheduling-clock
+ interrupt disabled and RCU believes this CPU to be non-idle,
+ and if the CPU goes idle (from an RCU perspective) every few
+ jiffies, no problem. It is usually OK for there to be the
+ occasional gap between idle periods of up to a second or so.
+
+ <br>If the gap grows too long, you get RCU CPU stall warnings.
+<li> If a CPU is either idle or executing in usermode, and RCU believes
+ it to be idle, of course no problem.
+<li> If a CPU is executing in the kernel, the kernel code
+ path is passing through quiescent states at a reasonable
+ frequency (preferably about once per few jiffies, but the
+ occasional excursion to a second or so is usually OK) and the
+ scheduling-clock interrupt is enabled, of course no problem.
+
+ <br>If the gap between a successive pair of quiescent states grows
+ too long, you get RCU CPU stall warnings.
+</ol>
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+ But what if my driver has a hardware interrupt handler
+ that can run for many seconds?
+ I cannot invoke <tt>schedule()</tt> from an hardware
+ interrupt handler, after all!
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+ One approach is to do <tt>rcu_irq_exit();rcu_irq_enter();</tt>
+ every so often.
+ But given that long-running interrupt handlers can cause
+ other problems, not least for response time, shouldn't you
+ work to keep your interrupt handler's runtime within reasonable
+ bounds?
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>
+But as long as RCU is properly informed of kernel state transitions between
+in-kernel execution, usermode execution, and idle, and as long as the
+scheduling-clock interrupt is enabled when RCU needs it to be, you
+can rest assured that the bugs you encounter will be in some other
+part of RCU or some other part of the kernel!
+
<h3><a name="Memory Efficiency">Memory Efficiency</a></h3>
<p>
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 6beda556faf3..49747717d905 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -23,6 +23,14 @@ over a rather long period of time, but improvements are always welcome!
Yet another exception is where the low real-time latency of RCU's
read-side primitives is critically important.
+ One final exception is where RCU readers are used to prevent
+ the ABA problem (https://en.wikipedia.org/wiki/ABA_problem)
+ for lockless updates. This does result in the mildly
+ counter-intuitive situation where rcu_read_lock() and
+ rcu_read_unlock() are used to protect updates, however, this
+ approach provides the same potential simplifications that garbage
+ collectors do.
+
1. Does the update code have proper mutual exclusion?
RCU does allow -readers- to run (almost) naked, but -writers- must
@@ -40,7 +48,9 @@ over a rather long period of time, but improvements are always welcome!
explain how this single task does not become a major bottleneck on
big multiprocessor machines (for example, if the task is updating
information relating to itself that other tasks can read, there
- by definition can be no bottleneck).
+ by definition can be no bottleneck). Note that the definition
+ of "large" has changed significantly: Eight CPUs was "large"
+ in the year 2000, but a hundred CPUs was unremarkable in 2017.
2. Do the RCU read-side critical sections make proper use of
rcu_read_lock() and friends? These primitives are needed
@@ -55,6 +65,12 @@ over a rather long period of time, but improvements are always welcome!
Disabling of preemption can serve as rcu_read_lock_sched(), but
is less readable.
+ Letting RCU-protected pointers "leak" out of an RCU read-side
+ critical section is every bid as bad as letting them leak out
+ from under a lock. Unless, of course, you have arranged some
+ other means of protection, such as a lock or a reference count
+ -before- letting them out of the RCU read-side critical section.
+
3. Does the update code tolerate concurrent accesses?
The whole point of RCU is to permit readers to run without
@@ -78,10 +94,10 @@ over a rather long period of time, but improvements are always welcome!
This works quite well, also.
- c. Make updates appear atomic to readers. For example,
+ c. Make updates appear atomic to readers. For example,
pointer updates to properly aligned fields will
appear atomic, as will individual atomic primitives.
- Sequences of perations performed under a lock will -not-
+ Sequences of operations performed under a lock will -not-
appear to be atomic to RCU readers, nor will sequences
of multiple atomic primitives.
@@ -168,8 +184,8 @@ over a rather long period of time, but improvements are always welcome!
5. If call_rcu(), or a related primitive such as call_rcu_bh(),
call_rcu_sched(), or call_srcu() is used, the callback function
- must be written to be called from softirq context. In particular,
- it cannot block.
+ will be called from softirq context. In particular, it cannot
+ block.
6. Since synchronize_rcu() can block, it cannot be called from
any sort of irq context. The same rule applies for
@@ -178,11 +194,14 @@ over a rather long period of time, but improvements are always welcome!
synchronize_sched_expedite(), and synchronize_srcu_expedited().
The expedited forms of these primitives have the same semantics
- as the non-expedited forms, but expediting is both expensive
- and unfriendly to real-time workloads. Use of the expedited
- primitives should be restricted to rare configuration-change
- operations that would not normally be undertaken while a real-time
- workload is running.
+ as the non-expedited forms, but expediting is both expensive and
+ (with the exception of synchronize_srcu_expedited()) unfriendly
+ to real-time workloads. Use of the expedited primitives should
+ be restricted to rare configuration-change operations that would
+ not normally be undertaken while a real-time workload is running.
+ However, real-time workloads can use rcupdate.rcu_normal kernel
+ boot parameter to completely disable expedited grace periods,
+ though this might have performance implications.
In particular, if you find yourself invoking one of the expedited
primitives repeatedly in a loop, please do everyone a favor:
@@ -193,11 +212,6 @@ over a rather long period of time, but improvements are always welcome!
of the system, especially to real-time workloads running on
the rest of the system.
- In addition, it is illegal to call the expedited forms from
- a CPU-hotplug notifier, or while holding a lock that is acquired
- by a CPU-hotplug notifier. Failing to observe this restriction
- will result in deadlock.
-
7. If the updater uses call_rcu() or synchronize_rcu(), then the
corresponding readers must use rcu_read_lock() and
rcu_read_unlock(). If the updater uses call_rcu_bh() or
@@ -321,7 +335,7 @@ over a rather long period of time, but improvements are always welcome!
Similarly, disabling preemption is not an acceptable substitute
for rcu_read_lock(). Code that attempts to use preemption
disabling where it should be using rcu_read_lock() will break
- in real-time kernel builds.
+ in CONFIG_PREEMPT=y kernel builds.
If you want to wait for interrupt handlers, NMI handlers, and
code under the influence of preempt_disable(), you instead
@@ -356,23 +370,22 @@ over a rather long period of time, but improvements are always welcome!
not the case, a self-spawning RCU callback would prevent the
victim CPU from ever going offline.)
-14. SRCU (srcu_read_lock(), srcu_read_unlock(), srcu_dereference(),
- synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu())
- may only be invoked from process context. Unlike other forms of
- RCU, it -is- permissible to block in an SRCU read-side critical
- section (demarked by srcu_read_lock() and srcu_read_unlock()),
- hence the "SRCU": "sleepable RCU". Please note that if you
- don't need to sleep in read-side critical sections, you should be
- using RCU rather than SRCU, because RCU is almost always faster
- and easier to use than is SRCU.
-
- Also unlike other forms of RCU, explicit initialization
- and cleanup is required via init_srcu_struct() and
- cleanup_srcu_struct(). These are passed a "struct srcu_struct"
- that defines the scope of a given SRCU domain. Once initialized,
- the srcu_struct is passed to srcu_read_lock(), srcu_read_unlock()
- synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu().
- A given synchronize_srcu() waits only for SRCU read-side critical
+14. Unlike other forms of RCU, it -is- permissible to block in an
+ SRCU read-side critical section (demarked by srcu_read_lock()
+ and srcu_read_unlock()), hence the "SRCU": "sleepable RCU".
+ Please note that if you don't need to sleep in read-side critical
+ sections, you should be using RCU rather than SRCU, because RCU
+ is almost always faster and easier to use than is SRCU.
+
+ Also unlike other forms of RCU, explicit initialization and
+ cleanup is required either at build time via DEFINE_SRCU()
+ or DEFINE_STATIC_SRCU() or at runtime via init_srcu_struct()
+ and cleanup_srcu_struct(). These last two are passed a
+ "struct srcu_struct" that defines the scope of a given
+ SRCU domain. Once initialized, the srcu_struct is passed
+ to srcu_read_lock(), srcu_read_unlock() synchronize_srcu(),
+ synchronize_srcu_expedited(), and call_srcu(). A given
+ synchronize_srcu() waits only for SRCU read-side critical
sections governed by srcu_read_lock() and srcu_read_unlock()
calls that have been passed the same srcu_struct. This property
is what makes sleeping read-side critical sections tolerable --
@@ -390,10 +403,16 @@ over a rather long period of time, but improvements are always welcome!
Therefore, SRCU should be used in preference to rw_semaphore
only in extremely read-intensive situations, or in situations
requiring SRCU's read-side deadlock immunity or low read-side
- realtime latency.
+ realtime latency. You should also consider percpu_rw_semaphore
+ when you need lightweight readers.
- Note that, rcu_assign_pointer() relates to SRCU just as it does
- to other forms of RCU.
+ SRCU's expedited primitive (synchronize_srcu_expedited())
+ never sends IPIs to other CPUs, so it is easier on
+ real-time workloads than is synchronize_rcu_expedited(),
+ synchronize_rcu_bh_expedited() or synchronize_sched_expedited().
+
+ Note that rcu_dereference() and rcu_assign_pointer() relate to
+ SRCU just as they do to other forms of RCU.
15. The whole point of call_rcu(), synchronize_rcu(), and friends
is to wait until all pre-existing readers have finished before
@@ -435,3 +454,33 @@ over a rather long period of time, but improvements are always welcome!
These debugging aids can help you find problems that are
otherwise extremely difficult to spot.
+
+18. If you register a callback using call_rcu(), call_rcu_bh(),
+ call_rcu_sched(), or call_srcu(), and pass in a function defined
+ within a loadable module, then it in necessary to wait for
+ all pending callbacks to be invoked after the last invocation
+ and before unloading that module. Note that it is absolutely
+ -not- sufficient to wait for a grace period! The current (say)
+ synchronize_rcu() implementation waits only for all previous
+ callbacks registered on the CPU that synchronize_rcu() is running
+ on, but it is -not- guaranteed to wait for callbacks registered
+ on other CPUs.
+
+ You instead need to use one of the barrier functions:
+
+ o call_rcu() -> rcu_barrier()
+ o call_rcu_bh() -> rcu_barrier_bh()
+ o call_rcu_sched() -> rcu_barrier_sched()
+ o call_srcu() -> srcu_barrier()
+
+ However, these barrier functions are absolutely -not- guaranteed
+ to wait for a grace period. In fact, if there are no call_rcu()
+ callbacks waiting anywhere in the system, rcu_barrier() is within
+ its rights to return immediately.
+
+ So if you need to wait for both an RCU grace period and for
+ all pre-existing call_rcu() callbacks, you will need to execute
+ both rcu_barrier() and synchronize_rcu(), if necessary, using
+ something like workqueues to to execute them concurrently.
+
+ See rcubarrier.txt for more information.
diff --git a/Documentation/RCU/rcu.txt b/Documentation/RCU/rcu.txt
index 745f429fda79..7d4ae110c2c9 100644
--- a/Documentation/RCU/rcu.txt
+++ b/Documentation/RCU/rcu.txt
@@ -76,15 +76,12 @@ o I hear that RCU is patented? What is with that?
Of these, one was allowed to lapse by the assignee, and the
others have been contributed to the Linux kernel under GPL.
There are now also LGPL implementations of user-level RCU
- available (http://lttng.org/?q=node/18).
+ available (http://liburcu.org/).
o I hear that RCU needs work in order to support realtime kernels?
- This work is largely completed. Realtime-friendly RCU can be
- enabled via the CONFIG_PREEMPT_RCU kernel configuration
- parameter. However, work is in progress for enabling priority
- boosting of preempted RCU read-side critical sections. This is
- needed if you have CPU-bound realtime threads.
+ Realtime-friendly RCU can be enabled via the CONFIG_PREEMPT_RCU
+ kernel configuration parameter.
o Where can I find more information on RCU?
diff --git a/Documentation/RCU/rcu_dereference.txt b/Documentation/RCU/rcu_dereference.txt
index b2a613f16d74..1acb26b09b48 100644
--- a/Documentation/RCU/rcu_dereference.txt
+++ b/Documentation/RCU/rcu_dereference.txt
@@ -25,35 +25,35 @@ o You must use one of the rcu_dereference() family of primitives
for an example where the compiler can in fact deduce the exact
value of the pointer, and thus cause misordering.
+o You are only permitted to use rcu_dereference on pointer values.
+ The compiler simply knows too much about integral values to
+ trust it to carry dependencies through integer operations.
+ There are a very few exceptions, namely that you can temporarily
+ cast the pointer to uintptr_t in order to:
+
+ o Set bits and clear bits down in the must-be-zero low-order
+ bits of that pointer. This clearly means that the pointer
+ must have alignment constraints, for example, this does
+ -not- work in general for char* pointers.
+
+ o XOR bits to translate pointers, as is done in some
+ classic buddy-allocator algorithms.
+
+ It is important to cast the value back to pointer before
+ doing much of anything else with it.
+
o Avoid cancellation when using the "+" and "-" infix arithmetic
operators. For example, for a given variable "x", avoid
- "(x-x)". There are similar arithmetic pitfalls from other
- arithmetic operators, such as "(x*0)", "(x/(x+1))" or "(x%1)".
- The compiler is within its rights to substitute zero for all of
- these expressions, so that subsequent accesses no longer depend
- on the rcu_dereference(), again possibly resulting in bugs due
- to misordering.
+ "(x-(uintptr_t)x)" for char* pointers. The compiler is within its
+ rights to substitute zero for this sort of expression, so that
+ subsequent accesses no longer depend on the rcu_dereference(),
+ again possibly resulting in bugs due to misordering.
Of course, if "p" is a pointer from rcu_dereference(), and "a"
and "b" are integers that happen to be equal, the expression
"p+a-b" is safe because its value still necessarily depends on
the rcu_dereference(), thus maintaining proper ordering.
-o Avoid all-zero operands to the bitwise "&" operator, and
- similarly avoid all-ones operands to the bitwise "|" operator.
- If the compiler is able to deduce the value of such operands,
- it is within its rights to substitute the corresponding constant
- for the bitwise operation. Once again, this causes subsequent
- accesses to no longer depend on the rcu_dereference(), causing
- bugs due to misordering.
-
- Please note that single-bit operands to bitwise "&" can also
- be dangerous. At this point, the compiler knows that the
- resulting value can only take on one of two possible values.
- Therefore, a very small amount of additional information will
- allow the compiler to deduce the exact value, which again can
- result in misordering.
-
o If you are using RCU to protect JITed functions, so that the
"()" function-invocation operator is applied to a value obtained
(directly or indirectly) from rcu_dereference(), you may need to
@@ -61,25 +61,6 @@ o If you are using RCU to protect JITed functions, so that the
This issue arises on some systems when a newly JITed function is
using the same memory that was used by an earlier JITed function.
-o Do not use the results from the boolean "&&" and "||" when
- dereferencing. For example, the following (rather improbable)
- code is buggy:
-
- int *p;
- int *q;
-
- ...
-
- p = rcu_dereference(gp)
- q = &global_q;
- q += p != &oom_p1 && p != &oom_p2;
- r1 = *q; /* BUGGY!!! */
-
- The reason this is buggy is that "&&" and "||" are often compiled
- using branches. While weak-memory machines such as ARM or PowerPC
- do order stores after such branches, they can speculate loads,
- which can result in misordering bugs.
-
o Do not use the results from relational operators ("==", "!=",
">", ">=", "<", or "<=") when dereferencing. For example,
the following (quite strange) code is buggy:
diff --git a/Documentation/RCU/rcubarrier.txt b/Documentation/RCU/rcubarrier.txt
index b10cfe711e68..5d7759071a3e 100644
--- a/Documentation/RCU/rcubarrier.txt
+++ b/Documentation/RCU/rcubarrier.txt
@@ -263,6 +263,11 @@ Quick Quiz #2: What happens if CPU 0's rcu_barrier_func() executes
are delayed for a full grace period? Couldn't this result in
rcu_barrier() returning prematurely?
+The current rcu_barrier() implementation is more complex, due to the need
+to avoid disturbing idle CPUs (especially on battery-powered systems)
+and the need to minimally disturb non-idle CPUs in real-time systems.
+However, the code above illustrates the concepts.
+
rcu_barrier() Summary
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index 278f6a9383b6..55918b54808b 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -276,15 +276,17 @@ o "Free-Block Circulation": Shows the number of torture structures
somehow gets incremented farther than it should.
Different implementations of RCU can provide implementation-specific
-additional information. For example, SRCU provides the following
+additional information. For example, Tree SRCU provides the following
additional line:
- srcu-torture: per-CPU(idx=1): 0(0,1) 1(0,1) 2(0,0) 3(0,1)
+ srcud-torture: Tree SRCU per-CPU(idx=0): 0(35,-21) 1(-4,24) 2(1,1) 3(-26,20) 4(28,-47) 5(-9,4) 6(-10,14) 7(-14,11) T(1,6)
-This line shows the per-CPU counter state. The numbers in parentheses are
-the values of the "old" and "current" counters for the corresponding CPU.
-The "idx" value maps the "old" and "current" values to the underlying
-array, and is useful for debugging.
+This line shows the per-CPU counter state, in this case for Tree SRCU
+using a dynamically allocated srcu_struct (hence "srcud-" rather than
+"srcu-"). The numbers in parentheses are the values of the "old" and
+"current" counters for the corresponding CPU. The "idx" value maps the
+"old" and "current" values to the underlying array, and is useful for
+debugging. The final "T" entry contains the totals of the counters.
USAGE
@@ -304,3 +306,9 @@ checked for such errors. The "rmmod" command forces a "SUCCESS",
"FAILURE", or "RCU_HOTPLUG" indication to be printk()ed. The first
two are self-explanatory, while the last indicates that while there
were no RCU failures, CPU-hotplug problems were detected.
+
+However, the tools/testing/selftests/rcutorture/bin/kvm.sh script
+provides better automation, including automatic failure analysis.
+It assumes a qemu/kvm-enabled platform, and runs guest OSes out of initrd.
+See tools/testing/selftests/rcutorture/doc/initrd.txt for instructions
+on setting up such an initrd.
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 8ed6c9f6133c..df62466da4e0 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -890,6 +890,8 @@ SRCU: Critical sections Grace period Barrier
srcu_read_lock_held
SRCU: Initialization/cleanup
+ DEFINE_SRCU
+ DEFINE_STATIC_SRCU
init_srcu_struct
cleanup_srcu_struct
@@ -913,7 +915,8 @@ a. Will readers need to block? If so, you need SRCU.
b. What about the -rt patchset? If readers would need to block
in an non-rt kernel, you need SRCU. If readers would block
in a -rt kernel, but not in a non-rt kernel, SRCU is not
- necessary.
+ necessary. (The -rt patchset turns spinlocks into sleeplocks,
+ hence this distinction.)
c. Do you need to treat NMI handlers, hardirq handlers,
and code segments with preemption disabled (whether
diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt
index 6b71852dadc2..4ec843123cc3 100644
--- a/Documentation/admin-guide/devices.txt
+++ b/Documentation/admin-guide/devices.txt
@@ -3081,3 +3081,8 @@
1 = /dev/osd1 Second OSD Device
...
255 = /dev/osd255 256th OSD Device
+
+ 384-511 char RESERVED FOR DYNAMIC ASSIGNMENT
+ Character devices that request a dynamic allocation of major
+ number will take numbers starting from 511 and downward,
+ once the 234-254 range is full.
diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst
index d76ab3907e2b..b2598cc9834c 100644
--- a/Documentation/admin-guide/kernel-parameters.rst
+++ b/Documentation/admin-guide/kernel-parameters.rst
@@ -138,6 +138,7 @@ parameter is applicable::
PPT Parallel port support is enabled.
PS2 Appropriate PS/2 support is enabled.
RAM RAM disk support is enabled.
+ RDT Intel Resource Director Technology.
S390 S390 architecture is enabled.
SCSI Appropriate SCSI support is enabled.
A lot of drivers have their options described inside
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index d9c171ce4190..86b0e8ec8ad7 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2233,6 +2233,17 @@
memory contents and reserves bad memory
regions that are detected.
+ mem_encrypt= [X86-64] AMD Secure Memory Encryption (SME) control
+ Valid arguments: on, off
+ Default (depends on kernel configuration option):
+ on (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y)
+ off (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=n)
+ mem_encrypt=on: Activate SME
+ mem_encrypt=off: Do not activate SME
+
+ Refer to Documentation/x86/amd-memory-encryption.txt
+ for details on when memory encryption can be activated.
+
mem_sleep_default= [SUSPEND] Default system suspend mode:
s2idle - Suspend-To-Idle
shallow - Power-On Suspend or equivalent (if supported)
@@ -2633,9 +2644,10 @@
In kernels built with CONFIG_NO_HZ_FULL=y, set
the specified list of CPUs whose tick will be stopped
whenever possible. The boot CPU will be forced outside
- the range to maintain the timekeeping.
- The CPUs in this range must also be included in the
- rcu_nocbs= set.
+ the range to maintain the timekeeping. Any CPUs
+ in this list will have their RCU callbacks offloaded,
+ just as if they had also been called out in the
+ rcu_nocbs= boot parameter.
noiotrap [SH] Disables trapped I/O port accesses.
@@ -2696,6 +2708,8 @@
nopat [X86] Disable PAT (page attribute table extension of
pagetables) support.
+ nopcid [X86-64] Disable the PCID cpu feature.
+
norandmaps Don't use address space randomization. Equivalent to
echo 0 > /proc/sys/kernel/randomize_va_space
@@ -2769,7 +2783,7 @@
Allowed values are enable and disable
numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
- one of ['zone', 'node', 'default'] can be specified
+ 'node', 'default' can be specified
This can be set from sysctl after boot.
See Documentation/sysctl/vm.txt for details.
@@ -3598,6 +3612,12 @@
Run specified binary instead of /init from the ramdisk,
used for early userspace startup. See initrd.
+ rdt= [HW,X86,RDT]
+ Turn on/off individual RDT features. List is:
+ cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, mba.
+ E.g. to turn on cmt and turn off mba use:
+ rdt=cmt,!mba
+
reboot= [KNL]
Format (x86 or x86_64):
[w[arm] | c[old] | h[ard] | s[oft] | g[pio]] \
@@ -4375,6 +4395,10 @@
decrease the size and leave more room for directly
mapped kernel RAM.
+ vmcp_cma=nn[MG] [KNL,S390]
+ Sets the memory size reserved for contiguous memory
+ allocations for the vmcp device driver.
+
vmhalt= [KNL,S390] Perform z/VM CP command after system halt.
Format: <command>
diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst
index 463cf7e73db8..47153e64dfb5 100644
--- a/Documentation/admin-guide/pm/cpufreq.rst
+++ b/Documentation/admin-guide/pm/cpufreq.rst
@@ -237,6 +237,14 @@ are the following:
This attribute is not present if the scaling driver in use does not
support it.
+``cpuinfo_cur_freq``
+ Current frequency of the CPUs belonging to this policy as obtained from
+ the hardware (in KHz).
+
+ This is expected to be the frequency the hardware actually runs at.
+ If that frequency cannot be determined, this attribute should not
+ be present.
+
``cpuinfo_max_freq``
Maximum possible operating frequency the CPUs belonging to this policy
can run at (in kHz).
@@ -471,14 +479,6 @@ This governor exposes the following tunables:
# echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) > ondemand/sampling_rate
-
-``min_sampling_rate``
- The minimum value of ``sampling_rate``.
-
- Equal to 10000 (10 ms) if :c:macro:`CONFIG_NO_HZ_COMMON` and
- :c:data:`tick_nohz_active` are both set or to 20 times the value of
- :c:data:`jiffies` in microseconds otherwise.
-
``up_threshold``
If the estimated CPU load is above this value (in percent), the governor
will set the frequency to the maximum value allowed for the policy.
diff --git a/Documentation/admin-guide/pm/index.rst b/Documentation/admin-guide/pm/index.rst
index 7f148f76f432..49237ac73442 100644
--- a/Documentation/admin-guide/pm/index.rst
+++ b/Documentation/admin-guide/pm/index.rst
@@ -5,12 +5,6 @@ Power Management
.. toctree::
:maxdepth: 2
- cpufreq
- intel_pstate
-
-.. only:: subproject and html
-
- Indices
- =======
-
- * :ref:`genindex`
+ strategies
+ system-wide
+ working-state
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
index 1d6249825efc..d2b6fda3d67b 100644
--- a/Documentation/admin-guide/pm/intel_pstate.rst
+++ b/Documentation/admin-guide/pm/intel_pstate.rst
@@ -167,35 +167,17 @@ is set.
``powersave``
.............
-Without HWP, this P-state selection algorithm generally depends on the
-processor model and/or the system profile setting in the ACPI tables and there
-are two variants of it.
-
-One of them is used with processors from the Atom line and (regardless of the
-processor model) on platforms with the system profile in the ACPI tables set to
-"mobile" (laptops mostly), "tablet", "appliance PC", "desktop", or
-"workstation". It is also used with processors supporting the HWP feature if
-that feature has not been enabled (that is, with the ``intel_pstate=no_hwp``
-argument in the kernel command line). It is similar to the algorithm
+Without HWP, this P-state selection algorithm is similar to the algorithm
implemented by the generic ``schedutil`` scaling governor except that the
utilization metric used by it is based on numbers coming from feedback
registers of the CPU. It generally selects P-states proportional to the
-current CPU utilization, so it is referred to as the "proportional" algorithm.
-
-The second variant of the ``powersave`` P-state selection algorithm, used in all
-of the other cases (generally, on processors from the Core line, so it is
-referred to as the "Core" algorithm), is based on the values read from the APERF
-and MPERF feedback registers and the previously requested target P-state.
-It does not really take CPU utilization into account explicitly, but as a rule
-it causes the CPU P-state to ramp up very quickly in response to increased
-utilization which is generally desirable in server environments.
-
-Regardless of the variant, this algorithm is run by the driver's utilization
-update callback for the given CPU when it is invoked by the CPU scheduler, but
-not more often than every 10 ms (that can be tweaked via ``debugfs`` in `this
-particular case <Tuning Interface in debugfs_>`_). Like in the ``performance``
-case, the hardware configuration is not touched if the new P-state turns out to
-be the same as the current one.
+current CPU utilization.
+
+This algorithm is run by the driver's utilization update callback for the
+given CPU when it is invoked by the CPU scheduler, but not more often than
+every 10 ms. Like in the ``performance`` case, the hardware configuration
+is not touched if the new P-state turns out to be the same as the current
+one.
This is the default P-state selection algorithm if the
:c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option
@@ -720,34 +702,7 @@ P-state is called, the ``ftrace`` filter can be set to to
gnome-shell-3409 [001] ..s. 2537.650850: intel_pstate_set_pstate <-intel_pstate_timer_func
<idle>-0 [000] ..s. 2537.654843: intel_pstate_set_pstate <-intel_pstate_timer_func
-Tuning Interface in ``debugfs``
--------------------------------
-
-The ``powersave`` algorithm provided by ``intel_pstate`` for `the Core line of
-processors in the active mode <powersave_>`_ is based on a `PID controller`_
-whose parameters were chosen to address a number of different use cases at the
-same time. However, it still is possible to fine-tune it to a specific workload
-and the ``debugfs`` interface under ``/sys/kernel/debug/pstate_snb/`` is
-provided for this purpose. [Note that the ``pstate_snb`` directory will be
-present only if the specific P-state selection algorithm matching the interface
-in it actually is in use.]
-
-The following files present in that directory can be used to modify the PID
-controller parameters at run time:
-
-| ``deadband``
-| ``d_gain_pct``
-| ``i_gain_pct``
-| ``p_gain_pct``
-| ``sample_rate_ms``
-| ``setpoint``
-
-Note, however, that achieving desirable results this way generally requires
-expert-level understanding of the power vs performance tradeoff, so extra care
-is recommended when attempting to do that.
-
.. _LCEU2015: http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf
.. _SDM: http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-system-programming-manual-325384.html
.. _ACPI specification: http://www.uefi.org/sites/default/files/resources/ACPI_6_1.pdf
-.. _PID controller: https://en.wikipedia.org/wiki/PID_controller
diff --git a/Documentation/admin-guide/pm/sleep-states.rst b/Documentation/admin-guide/pm/sleep-states.rst
new file mode 100644
index 000000000000..1e5c0f00cb2f
--- /dev/null
+++ b/Documentation/admin-guide/pm/sleep-states.rst
@@ -0,0 +1,245 @@
+===================
+System Sleep States
+===================
+
+::
+
+ Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+Sleep states are global low-power states of the entire system in which user
+space code cannot be executed and the overall system activity is significantly
+reduced.
+
+
+Sleep States That Can Be Supported
+==================================
+
+Depending on its configuration and the capabilities of the platform it runs on,
+the Linux kernel can support up to four system sleep states, includig
+hibernation and up to three variants of system suspend. The sleep states that
+can be supported by the kernel are listed below.
+
+.. _s2idle:
+
+Suspend-to-Idle
+---------------
+
+This is a generic, pure software, light-weight variant of system suspend (also
+referred to as S2I or S2Idle). It allows more energy to be saved relative to
+runtime idle by freezing user space, suspending the timekeeping and putting all
+I/O devices into low-power states (possibly lower-power than available in the
+working state), such that the processors can spend time in their deepest idle
+states while the system is suspended.
+
+The system is woken up from this state by in-band interrupts, so theoretically
+any devices that can cause interrupts to be generated in the working state can
+also be set up as wakeup devices for S2Idle.
+
+This state can be used on platforms without support for :ref:`standby <standby>`
+or :ref:`suspend-to-RAM <s2ram>`, or it can be used in addition to any of the
+deeper system suspend variants to provide reduced resume latency. It is always
+supported if the :c:macro:`CONFIG_SUSPEND` kernel configuration option is set.
+
+.. _standby:
+
+Standby
+-------
+
+This state, if supported, offers moderate, but real, energy savings, while
+providing a relatively straightforward transition back to the working state. No
+operating state is lost (the system core logic retains power), so the system can
+go back to where it left off easily enough.
+
+In addition to freezing user space, suspending the timekeeping and putting all
+I/O devices into low-power states, which is done for :ref:`suspend-to-idle
+<s2idle>` too, nonboot CPUs are taken offline and all low-level system functions
+are suspended during transitions into this state. For this reason, it should
+allow more energy to be saved relative to :ref:`suspend-to-idle <s2idle>`, but
+the resume latency will generally be greater than for that state.
+
+The set of devices that can wake up the system from this state usually is
+reduced relative to :ref:`suspend-to-idle <s2idle>` and it may be necessary to
+rely on the platform for setting up the wakeup functionality as appropriate.
+
+This state is supported if the :c:macro:`CONFIG_SUSPEND` kernel configuration
+option is set and the support for it is registered by the platform with the
+core system suspend subsystem. On ACPI-based systems this state is mapped to
+the S1 system state defined by ACPI.
+
+.. _s2ram:
+
+Suspend-to-RAM
+--------------
+
+This state (also referred to as STR or S2RAM), if supported, offers significant
+energy savings as everything in the system is put into a low-power state, except
+for memory, which should be placed into the self-refresh mode to retain its
+contents. All of the steps carried out when entering :ref:`standby <standby>`
+are also carried out during transitions to S2RAM. Additional operations may
+take place depending on the platform capabilities. In particular, on ACPI-based
+systems the kernel passes control to the platform firmware (BIOS) as the last
+step during S2RAM transitions and that usually results in powering down some
+more low-level components that are not directly controlled by the kernel.
+
+The state of devices and CPUs is saved and held in memory. All devices are
+suspended and put into low-power states. In many cases, all peripheral buses
+lose power when entering S2RAM, so devices must be able to handle the transition
+back to the "on" state.
+
+On ACPI-based systems S2RAM requires some minimal boot-strapping code in the
+platform firmware to resume the system from it. This may be the case on other
+platforms too.
+
+The set of devices that can wake up the system from S2RAM usually is reduced
+relative to :ref:`suspend-to-idle <s2idle>` and :ref:`standby <standby>` and it
+may be necessary to rely on the platform for setting up the wakeup functionality
+as appropriate.
+
+S2RAM is supported if the :c:macro:`CONFIG_SUSPEND` kernel configuration option
+is set and the support for it is registered by the platform with the core system
+suspend subsystem. On ACPI-based systems it is mapped to the S3 system state
+defined by ACPI.
+
+.. _hibernation:
+
+Hibernation
+-----------
+
+This state (also referred to as Suspend-to-Disk or STD) offers the greatest
+energy savings and can be used even in the absence of low-level platform support
+for system suspend. However, it requires some low-level code for resuming the
+system to be present for the underlying CPU architecture.
+
+Hibernation is significantly different from any of the system suspend variants.
+It takes three system state changes to put it into hibernation and two system
+state changes to resume it.
+
+First, when hibernation is triggered, the kernel stops all system activity and
+creates a snapshot image of memory to be written into persistent storage. Next,
+the system goes into a state in which the snapshot image can be saved, the image
+is written out and finally the system goes into the target low-power state in
+which power is cut from almost all of its hardware components, including memory,
+except for a limited set of wakeup devices.
+
+Once the snapshot image has been written out, the system may either enter a
+special low-power state (like ACPI S4), or it may simply power down itself.
+Powering down means minimum power draw and it allows this mechanism to work on
+any system. However, entering a special low-power state may allow additional
+means of system wakeup to be used (e.g. pressing a key on the keyboard or
+opening a laptop lid).
+
+After wakeup, control goes to the platform firmware that runs a boot loader
+which boots a fresh instance of the kernel (control may also go directly to
+the boot loader, depending on the system configuration, but anyway it causes
+a fresh instance of the kernel to be booted). That new instance of the kernel
+(referred to as the ``restore kernel``) looks for a hibernation image in
+persistent storage and if one is found, it is loaded into memory. Next, all
+activity in the system is stopped and the restore kernel overwrites itself with
+the image contents and jumps into a special trampoline area in the original
+kernel stored in the image (referred to as the ``image kernel``), which is where
+the special architecture-specific low-level code is needed. Finally, the
+image kernel restores the system to the pre-hibernation state and allows user
+space to run again.
+
+Hibernation is supported if the :c:macro:`CONFIG_HIBERNATION` kernel
+configuration option is set. However, this option can only be set if support
+for the given CPU architecture includes the low-level code for system resume.
+
+
+Basic ``sysfs`` Interfaces for System Suspend and Hibernation
+=============================================================
+
+The following files located in the :file:`/sys/power/` directory can be used by
+user space for sleep states control.
+
+``state``
+ This file contains a list of strings representing sleep states supported
+ by the kernel. Writing one of these strings into it causes the kernel
+ to start a transition of the system into the sleep state represented by
+ that string.
+
+ In particular, the strings "disk", "freeze" and "standby" represent the
+ :ref:`hibernation <hibernation>`, :ref:`suspend-to-idle <s2idle>` and
+ :ref:`standby <standby>` sleep states, respectively. The string "mem"
+ is interpreted in accordance with the contents of the ``mem_sleep`` file
+ described below.
+
+ If the kernel does not support any system sleep states, this file is
+ not present.
+
+``mem_sleep``
+ This file contains a list of strings representing supported system
+ suspend variants and allows user space to select the variant to be
+ associated with the "mem" string in the ``state`` file described above.
+
+ The strings that may be present in this file are "s2idle", "shallow"
+ and "deep". The string "s2idle" always represents :ref:`suspend-to-idle
+ <s2idle>` and, by convention, "shallow" and "deep" represent
+ :ref:`standby <standby>` and :ref:`suspend-to-RAM <s2ram>`,
+ respectively.
+
+ Writing one of the listed strings into this file causes the system
+ suspend variant represented by it to be associated with the "mem" string
+ in the ``state`` file. The string representing the suspend variant
+ currently associated with the "mem" string in the ``state`` file
+ is listed in square brackets.
+
+ If the kernel does not support system suspend, this file is not present.
+
+``disk``
+ This file contains a list of strings representing different operations
+ that can be carried out after the hibernation image has been saved. The
+ possible options are as follows:
+
+ ``platform``
+ Put the system into a special low-power state (e.g. ACPI S4) to
+ make additional wakeup options available and possibly allow the
+ platform firmware to take a simplified initialization path after
+ wakeup.
+
+ ``shutdown``
+ Power off the system.
+
+ ``reboot``
+ Reboot the system (useful for diagnostics mostly).
+
+ ``suspend``
+ Hybrid system suspend. Put the system into the suspend sleep
+ state selected through the ``mem_sleep`` file described above.
+ If the system is successfully woken up from that state, discard
+ the hibernation image and continue. Otherwise, use the image
+ to restore the previous state of the system.
+
+ ``test_resume``
+ Diagnostic operation. Load the image as though the system had
+ just woken up from hibernation and the currently running kernel
+ instance was a restore kernel and follow up with full system
+ resume.
+
+ Writing one of the listed strings into this file causes the option
+ represented by it to be selected.
+
+ The currently selected option is shown in square brackets which means
+ that the operation represented by it will be carried out after creating
+ and saving the image next time hibernation is triggered by writing
+ ``disk`` to :file:`/sys/power/state`.
+
+ If the kernel does not support hibernation, this file is not present.
+
+According to the above, there are two ways to make the system go into the
+:ref:`suspend-to-idle <s2idle>` state. The first one is to write "freeze"
+directly to :file:`/sys/power/state`. The second one is to write "s2idle" to
+:file:`/sys/power/mem_sleep` and then to write "mem" to
+:file:`/sys/power/state`. Likewise, there are two ways to make the system go
+into the :ref:`standby <standby>` state (the strings to write to the control
+files in that case are "standby" or "shallow" and "mem", respectively) if that
+state is supported by the platform. However, there is only one way to make the
+system go into the :ref:`suspend-to-RAM <s2ram>` state (write "deep" into
+:file:`/sys/power/mem_sleep` and "mem" into :file:`/sys/power/state`).
+
+The default suspend variant (ie. the one to be used without writing anything
+into :file:`/sys/power/mem_sleep`) is either "deep" (on the majority of systems
+supporting :ref:`suspend-to-RAM <s2ram>`) or "s2idle", but it can be overridden
+by the value of the "mem_sleep_default" parameter in the kernel command line.
+On some ACPI-based systems, depending on the information in the ACPI tables, the
+default may be "s2idle" even if :ref:`suspend-to-RAM <s2ram>` is supported.
diff --git a/Documentation/admin-guide/pm/strategies.rst b/Documentation/admin-guide/pm/strategies.rst
new file mode 100644
index 000000000000..afe4d3f831fe
--- /dev/null
+++ b/Documentation/admin-guide/pm/strategies.rst
@@ -0,0 +1,52 @@
+===========================
+Power Management Strategies
+===========================
+
+::
+
+ Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+The Linux kernel supports two major high-level power management strategies.
+
+One of them is based on using global low-power states of the whole system in
+which user space code cannot be executed and the overall system activity is
+significantly reduced, referred to as :doc:`sleep states <sleep-states>`. The
+kernel puts the system into one of these states when requested by user space
+and the system stays in it until a special signal is received from one of
+designated devices, triggering a transition to the ``working state`` in which
+user space code can run. Because sleep states are global and the whole system
+is affected by the state changes, this strategy is referred to as the
+:doc:`system-wide power management <system-wide>`.
+
+The other strategy, referred to as the :doc:`working-state power management
+<working-state>`, is based on adjusting the power states of individual hardware
+components of the system, as needed, in the working state. In consequence, if
+this strategy is in use, the working state of the system usually does not
+correspond to any particular physical configuration of it, but can be treated as
+a metastate covering a range of different power states of the system in which
+the individual components of it can be either ``active`` (in use) or
+``inactive`` (idle). If they are active, they have to be in power states
+allowing them to process data and to be accessed by software. In turn, if they
+are inactive, ideally, they should be in low-power states in which they may not
+be accessible.
+
+If all of the system components are active, the system as a whole is regarded as
+"runtime active" and that situation typically corresponds to the maximum power
+draw (or maximum energy usage) of it. If all of them are inactive, the system
+as a whole is regarded as "runtime idle" which may be very close to a sleep
+state from the physical system configuration and power draw perspective, but
+then it takes much less time and effort to start executing user space code than
+for the same system in a sleep state. However, transitions from sleep states
+back to the working state can only be started by a limited set of devices, so
+typically the system can spend much more time in a sleep state than it can be
+runtime idle in one go. For this reason, systems usually use less energy in
+sleep states than when they are runtime idle most of the time.
+
+Moreover, the two power management strategies address different usage scenarios.
+Namely, if the user indicates that the system will not be in use going forward,
+for example by closing its lid (if the system is a laptop), it probably should
+go into a sleep state at that point. On the other hand, if the user simply goes
+away from the laptop keyboard, it probably should stay in the working state and
+use the working-state power management in case it becomes idle, because the user
+may come back to it at any time and then may want the system to be immediately
+accessible.
diff --git a/Documentation/admin-guide/pm/system-wide.rst b/Documentation/admin-guide/pm/system-wide.rst
new file mode 100644
index 000000000000..0c81e4c5de39
--- /dev/null
+++ b/Documentation/admin-guide/pm/system-wide.rst
@@ -0,0 +1,8 @@
+============================
+System-Wide Power Management
+============================
+
+.. toctree::
+ :maxdepth: 2
+
+ sleep-states
diff --git a/Documentation/admin-guide/pm/working-state.rst b/Documentation/admin-guide/pm/working-state.rst
new file mode 100644
index 000000000000..fa01bf083dfe
--- /dev/null
+++ b/Documentation/admin-guide/pm/working-state.rst
@@ -0,0 +1,9 @@
+==============================
+Working-State Power Management
+==============================
+
+.. toctree::
+ :maxdepth: 2
+
+ cpufreq
+ intel_pstate
diff --git a/Documentation/arm/firmware.txt b/Documentation/arm/firmware.txt
index da6713adac8a..7f175dbb427e 100644
--- a/Documentation/arm/firmware.txt
+++ b/Documentation/arm/firmware.txt
@@ -60,7 +60,7 @@ Example of using a firmware operation:
/* some platform code, e.g. SMP initialization */
- __raw_writel(virt_to_phys(exynos4_secondary_startup),
+ __raw_writel(__pa_symbol(exynos4_secondary_startup),
CPU1_BOOT_REG);
/* Call Exynos specific smc call */
diff --git a/Documentation/arm64/cpu-feature-registers.txt b/Documentation/arm64/cpu-feature-registers.txt
index d1c97f9f51cc..dad411d635d8 100644
--- a/Documentation/arm64/cpu-feature-registers.txt
+++ b/Documentation/arm64/cpu-feature-registers.txt
@@ -179,6 +179,8 @@ infrastructure:
| FCMA | [19-16] | y |
|--------------------------------------------------|
| JSCVT | [15-12] | y |
+ |--------------------------------------------------|
+ | DPB | [3-0] | y |
x--------------------------------------------------x
Appendix I: Example
diff --git a/Documentation/atomic_bitops.txt b/Documentation/atomic_bitops.txt
new file mode 100644
index 000000000000..5550bfdcce5f
--- /dev/null
+++ b/Documentation/atomic_bitops.txt
@@ -0,0 +1,66 @@
+
+On atomic bitops.
+
+
+While our bitmap_{}() functions are non-atomic, we have a number of operations
+operating on single bits in a bitmap that are atomic.
+
+
+API
+---
+
+The single bit operations are:
+
+Non-RMW ops:
+
+ test_bit()
+
+RMW atomic operations without return value:
+
+ {set,clear,change}_bit()
+ clear_bit_unlock()
+
+RMW atomic operations with return value:
+
+ test_and_{set,clear,change}_bit()
+ test_and_set_bit_lock()
+
+Barriers:
+
+ smp_mb__{before,after}_atomic()
+
+
+All RMW atomic operations have a '__' prefixed variant which is non-atomic.
+
+
+SEMANTICS
+---------
+
+Non-atomic ops:
+
+In particular __clear_bit_unlock() suffers the same issue as atomic_set(),
+which is why the generic version maps to clear_bit_unlock(), see atomic_t.txt.
+
+
+RMW ops:
+
+The test_and_{}_bit() operations return the original value of the bit.
+
+
+ORDERING
+--------
+
+Like with atomic_t, the rule of thumb is:
+
+ - non-RMW operations are unordered;
+
+ - RMW operations that have no return value are unordered;
+
+ - RMW operations that have a return value are fully ordered.
+
+Except for test_and_set_bit_lock() which has ACQUIRE semantics and
+clear_bit_unlock() which has RELEASE semantics.
+
+Since a platform only has a single means of achieving atomic operations
+the same barriers as for atomic_t are used, see atomic_t.txt.
+
diff --git a/Documentation/atomic_t.txt b/Documentation/atomic_t.txt
new file mode 100644
index 000000000000..913396ac5824
--- /dev/null
+++ b/Documentation/atomic_t.txt
@@ -0,0 +1,242 @@
+
+On atomic types (atomic_t atomic64_t and atomic_long_t).
+
+The atomic type provides an interface to the architecture's means of atomic
+RMW operations between CPUs (atomic operations on MMIO are not supported and
+can lead to fatal traps on some platforms).
+
+API
+---
+
+The 'full' API consists of (atomic64_ and atomic_long_ prefixes omitted for
+brevity):
+
+Non-RMW ops:
+
+ atomic_read(), atomic_set()
+ atomic_read_acquire(), atomic_set_release()
+
+
+RMW atomic operations:
+
+Arithmetic:
+
+ atomic_{add,sub,inc,dec}()
+ atomic_{add,sub,inc,dec}_return{,_relaxed,_acquire,_release}()
+ atomic_fetch_{add,sub,inc,dec}{,_relaxed,_acquire,_release}()
+
+
+Bitwise:
+
+ atomic_{and,or,xor,andnot}()
+ atomic_fetch_{and,or,xor,andnot}{,_relaxed,_acquire,_release}()
+
+
+Swap:
+
+ atomic_xchg{,_relaxed,_acquire,_release}()
+ atomic_cmpxchg{,_relaxed,_acquire,_release}()
+ atomic_try_cmpxchg{,_relaxed,_acquire,_release}()
+
+
+Reference count (but please see refcount_t):
+
+ atomic_add_unless(), atomic_inc_not_zero()
+ atomic_sub_and_test(), atomic_dec_and_test()
+
+
+Misc:
+
+ atomic_inc_and_test(), atomic_add_negative()
+ atomic_dec_unless_positive(), atomic_inc_unless_negative()
+
+
+Barriers:
+
+ smp_mb__{before,after}_atomic()
+
+
+
+SEMANTICS
+---------
+
+Non-RMW ops:
+
+The non-RMW ops are (typically) regular LOADs and STOREs and are canonically
+implemented using READ_ONCE(), WRITE_ONCE(), smp_load_acquire() and
+smp_store_release() respectively.
+
+The one detail to this is that atomic_set{}() should be observable to the RMW
+ops. That is:
+
+ C atomic-set
+
+ {
+ atomic_set(v, 1);
+ }
+
+ P1(atomic_t *v)
+ {
+ atomic_add_unless(v, 1, 0);
+ }
+
+ P2(atomic_t *v)
+ {
+ atomic_set(v, 0);
+ }
+
+ exists
+ (v=2)
+
+In this case we would expect the atomic_set() from CPU1 to either happen
+before the atomic_add_unless(), in which case that latter one would no-op, or
+_after_ in which case we'd overwrite its result. In no case is "2" a valid
+outcome.
+
+This is typically true on 'normal' platforms, where a regular competing STORE
+will invalidate a LL/SC or fail a CMPXCHG.
+
+The obvious case where this is not so is when we need to implement atomic ops
+with a lock:
+
+ CPU0 CPU1
+
+ atomic_add_unless(v, 1, 0);
+ lock();
+ ret = READ_ONCE(v->counter); // == 1
+ atomic_set(v, 0);
+ if (ret != u) WRITE_ONCE(v->counter, 0);
+ WRITE_ONCE(v->counter, ret + 1);
+ unlock();
+
+the typical solution is to then implement atomic_set{}() with atomic_xchg().
+
+
+RMW ops:
+
+These come in various forms:
+
+ - plain operations without return value: atomic_{}()
+
+ - operations which return the modified value: atomic_{}_return()
+
+ these are limited to the arithmetic operations because those are
+ reversible. Bitops are irreversible and therefore the modified value
+ is of dubious utility.
+
+ - operations which return the original value: atomic_fetch_{}()
+
+ - swap operations: xchg(), cmpxchg() and try_cmpxchg()
+
+ - misc; the special purpose operations that are commonly used and would,
+ given the interface, normally be implemented using (try_)cmpxchg loops but
+ are time critical and can, (typically) on LL/SC architectures, be more
+ efficiently implemented.
+
+All these operations are SMP atomic; that is, the operations (for a single
+atomic variable) can be fully ordered and no intermediate state is lost or
+visible.
+
+
+ORDERING (go read memory-barriers.txt first)
+--------
+
+The rule of thumb:
+
+ - non-RMW operations are unordered;
+
+ - RMW operations that have no return value are unordered;
+
+ - RMW operations that have a return value are fully ordered;
+
+ - RMW operations that are conditional are unordered on FAILURE,
+ otherwise the above rules apply.
+
+Except of course when an operation has an explicit ordering like:
+
+ {}_relaxed: unordered
+ {}_acquire: the R of the RMW (or atomic_read) is an ACQUIRE
+ {}_release: the W of the RMW (or atomic_set) is a RELEASE
+
+Where 'unordered' is against other memory locations. Address dependencies are
+not defeated.
+
+Fully ordered primitives are ordered against everything prior and everything
+subsequent. Therefore a fully ordered primitive is like having an smp_mb()
+before and an smp_mb() after the primitive.
+
+
+The barriers:
+
+ smp_mb__{before,after}_atomic()
+
+only apply to the RMW ops and can be used to augment/upgrade the ordering
+inherent to the used atomic op. These barriers provide a full smp_mb().
+
+These helper barriers exist because architectures have varying implicit
+ordering on their SMP atomic primitives. For example our TSO architectures
+provide full ordered atomics and these barriers are no-ops.
+
+Thus:
+
+ atomic_fetch_add();
+
+is equivalent to:
+
+ smp_mb__before_atomic();
+ atomic_fetch_add_relaxed();
+ smp_mb__after_atomic();
+
+However the atomic_fetch_add() might be implemented more efficiently.
+
+Further, while something like:
+
+ smp_mb__before_atomic();
+ atomic_dec(&X);
+
+is a 'typical' RELEASE pattern, the barrier is strictly stronger than
+a RELEASE. Similarly for something like:
+
+ atomic_inc(&X);
+ smp_mb__after_atomic();
+
+is an ACQUIRE pattern (though very much not typical), but again the barrier is
+strictly stronger than ACQUIRE. As illustrated:
+
+ C strong-acquire
+
+ {
+ }
+
+ P1(int *x, atomic_t *y)
+ {
+ r0 = READ_ONCE(*x);
+ smp_rmb();
+ r1 = atomic_read(y);
+ }
+
+ P2(int *x, atomic_t *y)
+ {
+ atomic_inc(y);
+ smp_mb__after_atomic();
+ WRITE_ONCE(*x, 1);
+ }
+
+ exists
+ (r0=1 /\ r1=0)
+
+This should not happen; but a hypothetical atomic_inc_acquire() --
+(void)atomic_fetch_inc_acquire() for instance -- would allow the outcome,
+since then:
+
+ P1 P2
+
+ t = LL.acq *y (0)
+ t++;
+ *x = 1;
+ r0 = *x (1)
+ RMB
+ r1 = *y (0)
+ SC *y, t;
+
+is allowed.
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt
index 4fced8a21307..257e65714c6a 100644
--- a/Documentation/blockdev/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -168,6 +168,7 @@ max_comp_streams RW the number of possible concurrent compress operations
comp_algorithm RW show and change the compression algorithm
compact WO trigger memory compaction
debug_stat RO this file is used for zram debugging purposes
+backing_dev RW set up backend storage for zram to write out
User space is advised to use the following files to read the device statistics.
@@ -231,5 +232,15 @@ line of text and contains the following stats separated by whitespace:
resets the disksize to zero. You must set the disksize again
before reusing the device.
+* Optional Feature
+
+= writeback
+
+With incompressible pages, there is no memory saving with zram.
+Instead, with CONFIG_ZRAM_WRITEBACK, zram can write incompressible page
+to backing storage rather than keeping it in memory.
+User should set up backing device via /sys/block/zramX/backing_dev
+before disksize setting.
+
Nitin Gupta
ngupta@vflare.org
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index bde177103567..dc44785dc0fa 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -18,7 +18,9 @@ v1 is available under Documentation/cgroup-v1/.
1-2. What is cgroup?
2. Basic Operations
2-1. Mounting
- 2-2. Organizing Processes
+ 2-2. Organizing Processes and Threads
+ 2-2-1. Processes
+ 2-2-2. Threads
2-3. [Un]populated Notification
2-4. Controlling Controllers
2-4-1. Enabling and Disabling
@@ -167,8 +169,11 @@ cgroup v2 currently supports the following mount options.
Delegation section for details.
-Organizing Processes
---------------------
+Organizing Processes and Threads
+--------------------------------
+
+Processes
+~~~~~~~~~
Initially, only the root cgroup exists to which all processes belong.
A child cgroup can be created by creating a sub-directory::
@@ -219,6 +224,105 @@ is removed subsequently, " (deleted)" is appended to the path::
0::/test-cgroup/test-cgroup-nested (deleted)
+Threads
+~~~~~~~
+
+cgroup v2 supports thread granularity for a subset of controllers to
+support use cases requiring hierarchical resource distribution across
+the threads of a group of processes. By default, all threads of a
+process belong to the same cgroup, which also serves as the resource
+domain to host resource consumptions which are not specific to a
+process or thread. The thread mode allows threads to be spread across
+a subtree while still maintaining the common resource domain for them.
+
+Controllers which support thread mode are called threaded controllers.
+The ones which don't are called domain controllers.
+
+Marking a cgroup threaded makes it join the resource domain of its
+parent as a threaded cgroup. The parent may be another threaded
+cgroup whose resource domain is further up in the hierarchy. The root
+of a threaded subtree, that is, the nearest ancestor which is not
+threaded, is called threaded domain or thread root interchangeably and
+serves as the resource domain for the entire subtree.
+
+Inside a threaded subtree, threads of a process can be put in
+different cgroups and are not subject to the no internal process
+constraint - threaded controllers can be enabled on non-leaf cgroups
+whether they have threads in them or not.
+
+As the threaded domain cgroup hosts all the domain resource
+consumptions of the subtree, it is considered to have internal
+resource consumptions whether there are processes in it or not and
+can't have populated child cgroups which aren't threaded. Because the
+root cgroup is not subject to no internal process constraint, it can
+serve both as a threaded domain and a parent to domain cgroups.
+
+The current operation mode or type of the cgroup is shown in the
+"cgroup.type" file which indicates whether the cgroup is a normal
+domain, a domain which is serving as the domain of a threaded subtree,
+or a threaded cgroup.
+
+On creation, a cgroup is always a domain cgroup and can be made
+threaded by writing "threaded" to the "cgroup.type" file. The
+operation is single direction::
+
+ # echo threaded > cgroup.type
+
+Once threaded, the cgroup can't be made a domain again. To enable the
+thread mode, the following conditions must be met.
+
+- As the cgroup will join the parent's resource domain. The parent
+ must either be a valid (threaded) domain or a threaded cgroup.
+
+- When the parent is an unthreaded domain, it must not have any domain
+ controllers enabled or populated domain children. The root is
+ exempt from this requirement.
+
+Topology-wise, a cgroup can be in an invalid state. Please consider
+the following toplogy::
+
+ A (threaded domain) - B (threaded) - C (domain, just created)
+
+C is created as a domain but isn't connected to a parent which can
+host child domains. C can't be used until it is turned into a
+threaded cgroup. "cgroup.type" file will report "domain (invalid)" in
+these cases. Operations which fail due to invalid topology use
+EOPNOTSUPP as the errno.
+
+A domain cgroup is turned into a threaded domain when one of its child
+cgroup becomes threaded or threaded controllers are enabled in the
+"cgroup.subtree_control" file while there are processes in the cgroup.
+A threaded domain reverts to a normal domain when the conditions
+clear.
+
+When read, "cgroup.threads" contains the list of the thread IDs of all
+threads in the cgroup. Except that the operations are per-thread
+instead of per-process, "cgroup.threads" has the same format and
+behaves the same way as "cgroup.procs". While "cgroup.threads" can be
+written to in any cgroup, as it can only move threads inside the same
+threaded domain, its operations are confined inside each threaded
+subtree.
+
+The threaded domain cgroup serves as the resource domain for the whole
+subtree, and, while the threads can be scattered across the subtree,
+all the processes are considered to be in the threaded domain cgroup.
+"cgroup.procs" in a threaded domain cgroup contains the PIDs of all
+processes in the subtree and is not readable in the subtree proper.
+However, "cgroup.procs" can be written to from anywhere in the subtree
+to migrate all threads of the matching process to the cgroup.
+
+Only threaded controllers can be enabled in a threaded subtree. When
+a threaded controller is enabled inside a threaded subtree, it only
+accounts for and controls resource consumptions associated with the
+threads in the cgroup and its descendants. All consumptions which
+aren't tied to a specific thread belong to the threaded domain cgroup.
+
+Because a threaded subtree is exempt from no internal process
+constraint, a threaded controller must be able to handle competition
+between threads in a non-leaf cgroup and its child cgroups. Each
+threaded controller defines how such competitions are handled.
+
+
[Un]populated Notification
--------------------------
@@ -302,15 +406,15 @@ disabled if one or more children have it enabled.
No Internal Process Constraint
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Non-root cgroups can only distribute resources to their children when
-they don't have any processes of their own. In other words, only
-cgroups which don't contain any processes can have controllers enabled
-in their "cgroup.subtree_control" files.
+Non-root cgroups can distribute domain resources to their children
+only when they don't have any processes of their own. In other words,
+only domain cgroups which don't contain any processes can have domain
+controllers enabled in their "cgroup.subtree_control" files.
-This guarantees that, when a controller is looking at the part of the
-hierarchy which has it enabled, processes are always only on the
-leaves. This rules out situations where child cgroups compete against
-internal processes of the parent.
+This guarantees that, when a domain controller is looking at the part
+of the hierarchy which has it enabled, processes are always only on
+the leaves. This rules out situations where child cgroups compete
+against internal processes of the parent.
The root cgroup is exempt from this restriction. Root contains
processes and anonymous resource consumption which can't be associated
@@ -334,10 +438,10 @@ Model of Delegation
~~~~~~~~~~~~~~~~~~~
A cgroup can be delegated in two ways. First, to a less privileged
-user by granting write access of the directory and its "cgroup.procs"
-and "cgroup.subtree_control" files to the user. Second, if the
-"nsdelegate" mount option is set, automatically to a cgroup namespace
-on namespace creation.
+user by granting write access of the directory and its "cgroup.procs",
+"cgroup.threads" and "cgroup.subtree_control" files to the user.
+Second, if the "nsdelegate" mount option is set, automatically to a
+cgroup namespace on namespace creation.
Because the resource control interface files in a given directory
control the distribution of the parent's resources, the delegatee
@@ -644,6 +748,29 @@ Core Interface Files
All cgroup core files are prefixed with "cgroup."
+ cgroup.type
+
+ A read-write single value file which exists on non-root
+ cgroups.
+
+ When read, it indicates the current type of the cgroup, which
+ can be one of the following values.
+
+ - "domain" : A normal valid domain cgroup.
+
+ - "domain threaded" : A threaded domain cgroup which is
+ serving as the root of a threaded subtree.
+
+ - "domain invalid" : A cgroup which is in an invalid state.
+ It can't be populated or have controllers enabled. It may
+ be allowed to become a threaded cgroup.
+
+ - "threaded" : A threaded cgroup which is a member of a
+ threaded subtree.
+
+ A cgroup can be turned into a threaded cgroup by writing
+ "threaded" to this file.
+
cgroup.procs
A read-write new-line separated values file which exists on
all cgroups.
@@ -658,9 +785,6 @@ All cgroup core files are prefixed with "cgroup."
the PID to the cgroup. The writer should match all of the
following conditions.
- - Its euid is either root or must match either uid or suid of
- the target process.
-
- It must have write access to the "cgroup.procs" file.
- It must have write access to the "cgroup.procs" file of the
@@ -669,6 +793,35 @@ All cgroup core files are prefixed with "cgroup."
When delegating a sub-hierarchy, write access to this file
should be granted along with the containing directory.
+ In a threaded cgroup, reading this file fails with EOPNOTSUPP
+ as all the processes belong to the thread root. Writing is
+ supported and moves every thread of the process to the cgroup.
+
+ cgroup.threads
+ A read-write new-line separated values file which exists on
+ all cgroups.
+
+ When read, it lists the TIDs of all threads which belong to
+ the cgroup one-per-line. The TIDs are not ordered and the
+ same TID may show up more than once if the thread got moved to
+ another cgroup and then back or the TID got recycled while
+ reading.
+
+ A TID can be written to migrate the thread associated with the
+ TID to the cgroup. The writer should match all of the
+ following conditions.
+
+ - It must have write access to the "cgroup.threads" file.
+
+ - The cgroup that the thread is currently in must be in the
+ same resource domain as the destination cgroup.
+
+ - It must have write access to the "cgroup.procs" file of the
+ common ancestor of the source and destination cgroups.
+
+ When delegating a sub-hierarchy, write access to this file
+ should be granted along with the containing directory.
+
cgroup.controllers
A read-only space separated values file which exists on all
cgroups.
@@ -701,6 +854,38 @@ All cgroup core files are prefixed with "cgroup."
1 if the cgroup or its descendants contains any live
processes; otherwise, 0.
+ cgroup.max.descendants
+ A read-write single value files. The default is "max".
+
+ Maximum allowed number of descent cgroups.
+ If the actual number of descendants is equal or larger,
+ an attempt to create a new cgroup in the hierarchy will fail.
+
+ cgroup.max.depth
+ A read-write single value files. The default is "max".
+
+ Maximum allowed descent depth below the current cgroup.
+ If the actual descent depth is equal or larger,
+ an attempt to create a new child cgroup will fail.
+
+ cgroup.stat
+ A read-only flat-keyed file with the following entries:
+
+ nr_descendants
+ Total number of visible descendant cgroups.
+
+ nr_dying_descendants
+ Total number of dying descendant cgroups. A cgroup becomes
+ dying after being deleted by a user. The cgroup will remain
+ in dying state for some time undefined time (which can depend
+ on system load) before being completely destroyed.
+
+ A process can't enter a dying cgroup under any circumstances,
+ a dying cgroup can't revive.
+
+ A dying cgroup can consume system resources not exceeding
+ limits, which were active at the moment of cgroup deletion.
+
Controllers
===========
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 71b032bb44fd..f9054ab60cb1 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -29,7 +29,7 @@ from load_config import loadConfig
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = '1.2'
+needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
@@ -344,8 +344,8 @@ if major == 1 and minor > 3:
if major == 1 and minor <= 4:
latex_elements['preamble'] += '\\usepackage[margin=0.5in, top=1in, bottom=1in]{geometry}'
elif major == 1 and (minor > 5 or (minor == 5 and patch >= 3)):
- latex_elements['sphinxsetup'] = 'hmargin=0.5in, vmargin=0.5in'
-
+ latex_elements['sphinxsetup'] = 'hmargin=0.5in, vmargin=1in'
+ latex_elements['preamble'] += '\\fvset{fontsize=auto}\n'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
diff --git a/Documentation/core-api/genalloc.rst b/Documentation/core-api/genalloc.rst
new file mode 100644
index 000000000000..6b38a39fab24
--- /dev/null
+++ b/Documentation/core-api/genalloc.rst
@@ -0,0 +1,144 @@
+The genalloc/genpool subsystem
+==============================
+
+There are a number of memory-allocation subsystems in the kernel, each
+aimed at a specific need. Sometimes, however, a kernel developer needs to
+implement a new allocator for a specific range of special-purpose memory;
+often that memory is located on a device somewhere. The author of the
+driver for that device can certainly write a little allocator to get the
+job done, but that is the way to fill the kernel with dozens of poorly
+tested allocators. Back in 2005, Jes Sorensen lifted one of those
+allocators from the sym53c8xx_2 driver and posted_ it as a generic module
+for the creation of ad hoc memory allocators. This code was merged
+for the 2.6.13 release; it has been modified considerably since then.
+
+.. _posted: https://lwn.net/Articles/125842/
+
+Code using this allocator should include <linux/genalloc.h>. The action
+begins with the creation of a pool using one of:
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: gen_pool_create
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: devm_gen_pool_create
+
+A call to :c:func:`gen_pool_create` will create a pool. The granularity of
+allocations is set with min_alloc_order; it is a log-base-2 number like
+those used by the page allocator, but it refers to bytes rather than pages.
+So, if min_alloc_order is passed as 3, then all allocations will be a
+multiple of eight bytes. Increasing min_alloc_order decreases the memory
+required to track the memory in the pool. The nid parameter specifies
+which NUMA node should be used for the allocation of the housekeeping
+structures; it can be -1 if the caller doesn't care.
+
+The "managed" interface :c:func:`devm_gen_pool_create` ties the pool to a
+specific device. Among other things, it will automatically clean up the
+pool when the given device is destroyed.
+
+A pool is shut down with:
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: gen_pool_destroy
+
+It's worth noting that, if there are still allocations outstanding from the
+given pool, this function will take the rather extreme step of invoking
+BUG(), crashing the entire system. You have been warned.
+
+A freshly created pool has no memory to allocate. It is fairly useless in
+that state, so one of the first orders of business is usually to add memory
+to the pool. That can be done with one of:
+
+.. kernel-doc:: include/linux/genalloc.h
+ :functions: gen_pool_add
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: gen_pool_add_virt
+
+A call to :c:func:`gen_pool_add` will place the size bytes of memory
+starting at addr (in the kernel's virtual address space) into the given
+pool, once again using nid as the node ID for ancillary memory allocations.
+The :c:func:`gen_pool_add_virt` variant associates an explicit physical
+address with the memory; this is only necessary if the pool will be used
+for DMA allocations.
+
+The functions for allocating memory from the pool (and putting it back)
+are:
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: gen_pool_alloc
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: gen_pool_dma_alloc
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: gen_pool_free
+
+As one would expect, :c:func:`gen_pool_alloc` will allocate size< bytes
+from the given pool. The :c:func:`gen_pool_dma_alloc` variant allocates
+memory for use with DMA operations, returning the associated physical
+address in the space pointed to by dma. This will only work if the memory
+was added with :c:func:`gen_pool_add_virt`. Note that this function
+departs from the usual genpool pattern of using unsigned long values to
+represent kernel addresses; it returns a void * instead.
+
+That all seems relatively simple; indeed, some developers clearly found it
+to be too simple. After all, the interface above provides no control over
+how the allocation functions choose which specific piece of memory to
+return. If that sort of control is needed, the following functions will be
+of interest:
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: gen_pool_alloc_algo
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: gen_pool_set_algo
+
+Allocations with :c:func:`gen_pool_alloc_algo` specify an algorithm to be
+used to choose the memory to be allocated; the default algorithm can be set
+with :c:func:`gen_pool_set_algo`. The data value is passed to the
+algorithm; most ignore it, but it is occasionally needed. One can,
+naturally, write a special-purpose algorithm, but there is a fair set
+already available:
+
+- gen_pool_first_fit is a simple first-fit allocator; this is the default
+ algorithm if none other has been specified.
+
+- gen_pool_first_fit_align forces the allocation to have a specific
+ alignment (passed via data in a genpool_data_align structure).
+
+- gen_pool_first_fit_order_align aligns the allocation to the order of the
+ size. A 60-byte allocation will thus be 64-byte aligned, for example.
+
+- gen_pool_best_fit, as one would expect, is a simple best-fit allocator.
+
+- gen_pool_fixed_alloc allocates at a specific offset (passed in a
+ genpool_data_fixed structure via the data parameter) within the pool.
+ If the indicated memory is not available the allocation fails.
+
+There is a handful of other functions, mostly for purposes like querying
+the space available in the pool or iterating through chunks of memory.
+Most users, however, should not need much beyond what has been described
+above. With luck, wider awareness of this module will help to prevent the
+writing of special-purpose memory allocators in the future.
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: gen_pool_virt_to_phys
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: gen_pool_for_each_chunk
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: addr_in_gen_pool
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: gen_pool_avail
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: gen_pool_size
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: gen_pool_get
+
+.. kernel-doc:: lib/genalloc.c
+ :functions: of_gen_pool_get
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index 0606be3a3111..d5bbe035316d 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -20,6 +20,7 @@ Core utilities
genericirq
flexible-arrays
librs
+ genalloc
Interfaces for kernel debugging
===============================
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 17b00914c6ab..8282099e0cbf 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -344,3 +344,52 @@ codecs, and devices with strict requirements for interface clocking.
.. kernel-doc:: include/linux/clk.h
:internal:
+
+Synchronization Primitives
+==========================
+
+Read-Copy Update (RCU)
+----------------------
+
+.. kernel-doc:: include/linux/rcupdate.h
+ :external:
+
+.. kernel-doc:: include/linux/rcupdate_wait.h
+ :external:
+
+.. kernel-doc:: include/linux/rcutree.h
+ :external:
+
+.. kernel-doc:: kernel/rcu/tree.c
+ :external:
+
+.. kernel-doc:: kernel/rcu/tree_plugin.h
+ :external:
+
+.. kernel-doc:: kernel/rcu/tree_exp.h
+ :external:
+
+.. kernel-doc:: kernel/rcu/update.c
+ :external:
+
+.. kernel-doc:: include/linux/srcu.h
+ :external:
+
+.. kernel-doc:: kernel/rcu/srcutree.c
+ :external:
+
+.. kernel-doc:: include/linux/rculist_bl.h
+ :external:
+
+.. kernel-doc:: include/linux/rculist.h
+ :external:
+
+.. kernel-doc:: include/linux/rculist_nulls.h
+ :external:
+
+.. kernel-doc:: include/linux/rcu_sync.h
+ :external:
+
+.. kernel-doc:: kernel/rcu/sync.c
+ :external:
+
diff --git a/Documentation/core-api/workqueue.rst b/Documentation/core-api/workqueue.rst
index ffdec94fbca1..3943b5bfa8cf 100644
--- a/Documentation/core-api/workqueue.rst
+++ b/Documentation/core-api/workqueue.rst
@@ -243,11 +243,15 @@ throttling the number of active work items, specifying '0' is
recommended.
Some users depend on the strict execution ordering of ST wq. The
-combination of ``@max_active`` of 1 and ``WQ_UNBOUND`` is used to
-achieve this behavior. Work items on such wq are always queued to the
-unbound worker-pools and only one work item can be active at any given
+combination of ``@max_active`` of 1 and ``WQ_UNBOUND`` used to
+achieve this behavior. Work items on such wq were always queued to the
+unbound worker-pools and only one work item could be active at any given
time thus achieving the same ordering property as ST wq.
+In the current implementation the above configuration only guarantees
+ST behavior within a given NUMA node. Instead alloc_ordered_queue should
+be used to achieve system wide ST behavior.
+
Example Execution Scenarios
===========================
diff --git a/Documentation/dev-tools/gdb-kernel-debugging.rst b/Documentation/dev-tools/gdb-kernel-debugging.rst
index 5e93c9bc6619..19df79286f00 100644
--- a/Documentation/dev-tools/gdb-kernel-debugging.rst
+++ b/Documentation/dev-tools/gdb-kernel-debugging.rst
@@ -31,11 +31,13 @@ Setup
CONFIG_DEBUG_INFO_REDUCED off. If your architecture supports
CONFIG_FRAME_POINTER, keep it enabled.
-- Install that kernel on the guest.
+- Install that kernel on the guest, turn off KASLR if necessary by adding
+ "nokaslr" to the kernel command line.
Alternatively, QEMU allows to boot the kernel directly using -kernel,
-append, -initrd command line switches. This is generally only useful if
you do not depend on modules. See QEMU documentation for more details on
- this mode.
+ this mode. In this case, you should build the kernel with
+ CONFIG_RANDOMIZE_BASE disabled if the architecture supports KASLR.
- Enable the gdb stub of QEMU/KVM, either
diff --git a/Documentation/dev-tools/kgdb.rst b/Documentation/dev-tools/kgdb.rst
index 75273203a35a..d38be58f872a 100644
--- a/Documentation/dev-tools/kgdb.rst
+++ b/Documentation/dev-tools/kgdb.rst
@@ -348,6 +348,15 @@ default behavior is always set to 0.
- ``echo 1 > /sys/module/debug_core/parameters/kgdbreboot``
- Enter the debugger on reboot notify.
+Kernel parameter: ``nokaslr``
+-----------------------------
+
+If the architecture that you are using enable KASLR by default,
+you should consider turning it off. KASLR randomizes the
+virtual address where the kernel image is mapped and confuse
+gdb which resolve kernel symbol address from symbol table
+of vmlinux.
+
Using kdb
=========
@@ -358,7 +367,7 @@ This is a quick example of how to use kdb.
1. Configure kgdboc at boot using kernel parameters::
- console=ttyS0,115200 kgdboc=ttyS0,115200
+ console=ttyS0,115200 kgdboc=ttyS0,115200 nokaslr
OR
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 7e06e65586d4..4a0a7469fdd7 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -343,3 +343,4 @@ Version History
1.11.0 Fix table line argument order
(wrong raid10_copies/raid10_format sequence)
1.11.1 Add raid4/5/6 journal write-back support via journal_mode option
+1.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index fcbae6a5e6c1..15ac8e8dcfdf 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -34,8 +34,8 @@ its hardware characteristcs.
- Embedded Trace Macrocell (version 4.x):
"arm,coresight-etm4x", "arm,primecell";
- - Qualcomm Configurable Replicator (version 1.x):
- "qcom,coresight-replicator1x", "arm,primecell";
+ - Coresight programmable Replicator :
+ "arm,coresight-dynamic-replicator", "arm,primecell";
- System Trace Macrocell:
"arm,coresight-stm", "arm,primecell"; [1]
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
index 61c8b4620415..13611a8199bb 100644
--- a/Documentation/devicetree/bindings/arm/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/pmu.txt
@@ -9,9 +9,11 @@ Required properties:
- compatible : should be one of
"apm,potenza-pmu"
"arm,armv8-pmuv3"
+ "arm,cortex-a73-pmu"
"arm,cortex-a72-pmu"
"arm,cortex-a57-pmu"
"arm,cortex-a53-pmu"
+ "arm,cortex-a35-pmu"
"arm,cortex-a17-pmu"
"arm,cortex-a15-pmu"
"arm,cortex-a12-pmu"
diff --git a/Documentation/devicetree/bindings/ata/ahci-mtk.txt b/Documentation/devicetree/bindings/ata/ahci-mtk.txt
new file mode 100644
index 000000000000..d2aa696b161b
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/ahci-mtk.txt
@@ -0,0 +1,51 @@
+MediaTek Serial ATA controller
+
+Required properties:
+ - compatible : Must be "mediatek,<chip>-ahci", "mediatek,mtk-ahci".
+ When using "mediatek,mtk-ahci" compatible strings, you
+ need SoC specific ones in addition, one of:
+ - "mediatek,mt7622-ahci"
+ - reg : Physical base addresses and length of register sets.
+ - interrupts : Interrupt associated with the SATA device.
+ - interrupt-names : Associated name must be: "hostc".
+ - clocks : A list of phandle and clock specifier pairs, one for each
+ entry in clock-names.
+ - clock-names : Associated names must be: "ahb", "axi", "asic", "rbc", "pm".
+ - phys : A phandle and PHY specifier pair for the PHY port.
+ - phy-names : Associated name must be: "sata-phy".
+ - ports-implemented : See ./ahci-platform.txt for details.
+
+Optional properties:
+ - power-domains : A phandle and power domain specifier pair to the power
+ domain which is responsible for collapsing and restoring
+ power to the peripheral.
+ - resets : Must contain an entry for each entry in reset-names.
+ See ../reset/reset.txt for details.
+ - reset-names : Associated names must be: "axi", "sw", "reg".
+ - mediatek,phy-mode : A phandle to the system controller, used to enable
+ SATA function.
+
+Example:
+
+ sata: sata@1a200000 {
+ compatible = "mediatek,mt7622-ahci",
+ "mediatek,mtk-ahci";
+ reg = <0 0x1a200000 0 0x1100>;
+ interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hostc";
+ clocks = <&pciesys CLK_SATA_AHB_EN>,
+ <&pciesys CLK_SATA_AXI_EN>,
+ <&pciesys CLK_SATA_ASIC_EN>,
+ <&pciesys CLK_SATA_RBC_EN>,
+ <&pciesys CLK_SATA_PM_EN>;
+ clock-names = "ahb", "axi", "asic", "rbc", "pm";
+ phys = <&u3port1 PHY_TYPE_SATA>;
+ phy-names = "sata-phy";
+ ports-implemented = <0x1>;
+ power-domains = <&scpsys MT7622_POWER_DOMAIN_HIF0>;
+ resets = <&pciesys MT7622_SATA_AXI_BUS_RST>,
+ <&pciesys MT7622_SATA_PHY_SW_RST>,
+ <&pciesys MT7622_SATA_PHY_REG_RST>;
+ reset-names = "axi", "sw", "reg";
+ mediatek,phy-mode = <&pciesys>;
+ };
diff --git a/Documentation/devicetree/bindings/ata/sata_rcar.txt b/Documentation/devicetree/bindings/ata/sata_rcar.txt
index 0764f9ab63dc..e20eac7a3087 100644
--- a/Documentation/devicetree/bindings/ata/sata_rcar.txt
+++ b/Documentation/devicetree/bindings/ata/sata_rcar.txt
@@ -1,14 +1,22 @@
* Renesas R-Car SATA
Required properties:
-- compatible : should contain one of the following:
+- compatible : should contain one or more of the following:
- "renesas,sata-r8a7779" for R-Car H1
- ("renesas,rcar-sata" is deprecated)
- "renesas,sata-r8a7790-es1" for R-Car H2 ES1
- "renesas,sata-r8a7790" for R-Car H2 other than ES1
- "renesas,sata-r8a7791" for R-Car M2-W
- "renesas,sata-r8a7793" for R-Car M2-N
- "renesas,sata-r8a7795" for R-Car H3
+ - "renesas,rcar-gen2-sata" for a generic R-Car Gen2 compatible device
+ - "renesas,rcar-gen3-sata" for a generic R-Car Gen3 compatible device
+ - "renesas,rcar-sata" is deprecated
+
+ When compatible with the generic version nodes
+ must list the SoC-specific version corresponding
+ to the platform first followed by the generic
+ version.
+
- reg : address and length of the SATA registers;
- interrupts : must consist of one interrupt specifier.
- clocks : must contain a reference to the functional clock.
@@ -16,7 +24,7 @@ Required properties:
Example:
sata0: sata@ee300000 {
- compatible = "renesas,sata-r8a7791";
+ compatible = "renesas,sata-r8a7791", "renesas,rcar-gen2-sata";
reg = <0 0xee300000 0 0x2000>;
interrupt-parent = <&gic>;
interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt b/Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt
deleted file mode 100644
index 52b457c23eed..000000000000
--- a/Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-Device Tree Clock bindins for CPU DVFS of Mediatek MT8173 SoC
-
-Required properties:
-- clocks: A list of phandle + clock-specifier pairs for the clocks listed in clock names.
-- clock-names: Should contain the following:
- "cpu" - The multiplexer for clock input of CPU cluster.
- "intermediate" - A parent of "cpu" clock which is used as "intermediate" clock
- source (usually MAINPLL) when the original CPU PLL is under
- transition and not stable yet.
- Please refer to Documentation/devicetree/bindings/clk/clock-bindings.txt for
- generic clock consumer properties.
-- proc-supply: Regulator for Vproc of CPU cluster.
-
-Optional properties:
-- sram-supply: Regulator for Vsram of CPU cluster. When present, the cpufreq driver
- needs to do "voltage tracking" to step by step scale up/down Vproc and
- Vsram to fit SoC specific needs. When absent, the voltage scaling
- flow is handled by hardware, hence no software "voltage tracking" is
- needed.
-
-Example:
---------
- cpu0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x000>;
- enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
- clocks = <&infracfg CLK_INFRA_CA53SEL>,
- <&apmixedsys CLK_APMIXED_MAINPLL>;
- clock-names = "cpu", "intermediate";
- };
-
- cpu1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a53";
- reg = <0x001>;
- enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
- clocks = <&infracfg CLK_INFRA_CA53SEL>,
- <&apmixedsys CLK_APMIXED_MAINPLL>;
- clock-names = "cpu", "intermediate";
- };
-
- cpu2: cpu@100 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x100>;
- enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
- clocks = <&infracfg CLK_INFRA_CA57SEL>,
- <&apmixedsys CLK_APMIXED_MAINPLL>;
- clock-names = "cpu", "intermediate";
- };
-
- cpu3: cpu@101 {
- device_type = "cpu";
- compatible = "arm,cortex-a57";
- reg = <0x101>;
- enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
- clocks = <&infracfg CLK_INFRA_CA57SEL>,
- <&apmixedsys CLK_APMIXED_MAINPLL>;
- clock-names = "cpu", "intermediate";
- };
-
- &cpu0 {
- proc-supply = <&mt6397_vpca15_reg>;
- };
-
- &cpu1 {
- proc-supply = <&mt6397_vpca15_reg>;
- };
-
- &cpu2 {
- proc-supply = <&da9211_vcpu_reg>;
- sram-supply = <&mt6397_vsramca7_reg>;
- };
-
- &cpu3 {
- proc-supply = <&da9211_vcpu_reg>;
- sram-supply = <&mt6397_vsramca7_reg>;
- };
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt
new file mode 100644
index 000000000000..f6403089edcf
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-mediatek.txt
@@ -0,0 +1,247 @@
+Binding for MediaTek's CPUFreq driver
+=====================================
+
+Required properties:
+- clocks: A list of phandle + clock-specifier pairs for the clocks listed in clock names.
+- clock-names: Should contain the following:
+ "cpu" - The multiplexer for clock input of CPU cluster.
+ "intermediate" - A parent of "cpu" clock which is used as "intermediate" clock
+ source (usually MAINPLL) when the original CPU PLL is under
+ transition and not stable yet.
+ Please refer to Documentation/devicetree/bindings/clk/clock-bindings.txt for
+ generic clock consumer properties.
+- operating-points-v2: Please refer to Documentation/devicetree/bindings/opp/opp.txt
+ for detail.
+- proc-supply: Regulator for Vproc of CPU cluster.
+
+Optional properties:
+- sram-supply: Regulator for Vsram of CPU cluster. When present, the cpufreq driver
+ needs to do "voltage tracking" to step by step scale up/down Vproc and
+ Vsram to fit SoC specific needs. When absent, the voltage scaling
+ flow is handled by hardware, hence no software "voltage tracking" is
+ needed.
+- #cooling-cells:
+- cooling-min-level:
+- cooling-max-level:
+ Please refer to Documentation/devicetree/bindings/thermal/thermal.txt
+ for detail.
+
+Example 1 (MT7623 SoC):
+
+ cpu_opp_table: opp_table {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-598000000 {
+ opp-hz = /bits/ 64 <598000000>;
+ opp-microvolt = <1050000>;
+ };
+
+ opp-747500000 {
+ opp-hz = /bits/ 64 <747500000>;
+ opp-microvolt = <1050000>;
+ };
+
+ opp-1040000000 {
+ opp-hz = /bits/ 64 <1040000000>;
+ opp-microvolt = <1150000>;
+ };
+
+ opp-1196000000 {
+ opp-hz = /bits/ 64 <1196000000>;
+ opp-microvolt = <1200000>;
+ };
+
+ opp-1300000000 {
+ opp-hz = /bits/ 64 <1300000000>;
+ opp-microvolt = <1300000>;
+ };
+ };
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x0>;
+ clocks = <&infracfg CLK_INFRA_CPUSEL>,
+ <&apmixedsys CLK_APMIXED_MAINPLL>;
+ clock-names = "cpu", "intermediate";
+ operating-points-v2 = <&cpu_opp_table>;
+ #cooling-cells = <2>;
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ };
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x1>;
+ operating-points-v2 = <&cpu_opp_table>;
+ };
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x2>;
+ operating-points-v2 = <&cpu_opp_table>;
+ };
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x3>;
+ operating-points-v2 = <&cpu_opp_table>;
+ };
+
+Example 2 (MT8173 SoC):
+ cpu_opp_table_a: opp_table_a {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-507000000 {
+ opp-hz = /bits/ 64 <507000000>;
+ opp-microvolt = <859000>;
+ };
+
+ opp-702000000 {
+ opp-hz = /bits/ 64 <702000000>;
+ opp-microvolt = <908000>;
+ };
+
+ opp-1001000000 {
+ opp-hz = /bits/ 64 <1001000000>;
+ opp-microvolt = <983000>;
+ };
+
+ opp-1105000000 {
+ opp-hz = /bits/ 64 <1105000000>;
+ opp-microvolt = <1009000>;
+ };
+
+ opp-1183000000 {
+ opp-hz = /bits/ 64 <1183000000>;
+ opp-microvolt = <1028000>;
+ };
+
+ opp-1404000000 {
+ opp-hz = /bits/ 64 <1404000000>;
+ opp-microvolt = <1083000>;
+ };
+
+ opp-1508000000 {
+ opp-hz = /bits/ 64 <1508000000>;
+ opp-microvolt = <1109000>;
+ };
+
+ opp-1573000000 {
+ opp-hz = /bits/ 64 <1573000000>;
+ opp-microvolt = <1125000>;
+ };
+ };
+
+ cpu_opp_table_b: opp_table_b {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-507000000 {
+ opp-hz = /bits/ 64 <507000000>;
+ opp-microvolt = <828000>;
+ };
+
+ opp-702000000 {
+ opp-hz = /bits/ 64 <702000000>;
+ opp-microvolt = <867000>;
+ };
+
+ opp-1001000000 {
+ opp-hz = /bits/ 64 <1001000000>;
+ opp-microvolt = <927000>;
+ };
+
+ opp-1209000000 {
+ opp-hz = /bits/ 64 <1209000000>;
+ opp-microvolt = <968000>;
+ };
+
+ opp-1404000000 {
+ opp-hz = /bits/ 64 <1007000000>;
+ opp-microvolt = <1028000>;
+ };
+
+ opp-1612000000 {
+ opp-hz = /bits/ 64 <1612000000>;
+ opp-microvolt = <1049000>;
+ };
+
+ opp-1807000000 {
+ opp-hz = /bits/ 64 <1807000000>;
+ opp-microvolt = <1089000>;
+ };
+
+ opp-1989000000 {
+ opp-hz = /bits/ 64 <1989000000>;
+ opp-microvolt = <1125000>;
+ };
+ };
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x000>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ clocks = <&infracfg CLK_INFRA_CA53SEL>,
+ <&apmixedsys CLK_APMIXED_MAINPLL>;
+ clock-names = "cpu", "intermediate";
+ operating-points-v2 = <&cpu_opp_table_a>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x001>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ clocks = <&infracfg CLK_INFRA_CA53SEL>,
+ <&apmixedsys CLK_APMIXED_MAINPLL>;
+ clock-names = "cpu", "intermediate";
+ operating-points-v2 = <&cpu_opp_table_a>;
+ };
+
+ cpu2: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x100>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ clocks = <&infracfg CLK_INFRA_CA57SEL>,
+ <&apmixedsys CLK_APMIXED_MAINPLL>;
+ clock-names = "cpu", "intermediate";
+ operating-points-v2 = <&cpu_opp_table_b>;
+ };
+
+ cpu3: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x101>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ clocks = <&infracfg CLK_INFRA_CA57SEL>,
+ <&apmixedsys CLK_APMIXED_MAINPLL>;
+ clock-names = "cpu", "intermediate";
+ operating-points-v2 = <&cpu_opp_table_b>;
+ };
+
+ &cpu0 {
+ proc-supply = <&mt6397_vpca15_reg>;
+ };
+
+ &cpu1 {
+ proc-supply = <&mt6397_vpca15_reg>;
+ };
+
+ &cpu2 {
+ proc-supply = <&da9211_vcpu_reg>;
+ sram-supply = <&mt6397_vsramca7_reg>;
+ };
+
+ &cpu3 {
+ proc-supply = <&da9211_vcpu_reg>;
+ sram-supply = <&mt6397_vsramca7_reg>;
+ };
diff --git a/Documentation/devicetree/bindings/crypto/artpec6-crypto.txt b/Documentation/devicetree/bindings/crypto/artpec6-crypto.txt
new file mode 100644
index 000000000000..d9cca4875bd6
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/artpec6-crypto.txt
@@ -0,0 +1,16 @@
+Axis crypto engine with PDMA interface.
+
+Required properties:
+- compatible : Should be one of the following strings:
+ "axis,artpec6-crypto" for the version in the Axis ARTPEC-6 SoC
+ "axis,artpec7-crypto" for the version in the Axis ARTPEC-7 SoC.
+- reg: Base address and size for the PDMA register area.
+- interrupts: Interrupt handle for the PDMA interrupt line.
+
+Example:
+
+crypto@f4264000 {
+ compatible = "axis,artpec6-crypto";
+ reg = <0xf4264000 0x1000>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/crypto/atmel-crypto.txt b/Documentation/devicetree/bindings/crypto/atmel-crypto.txt
index f2aab3dc2b52..7de1a9674c70 100644
--- a/Documentation/devicetree/bindings/crypto/atmel-crypto.txt
+++ b/Documentation/devicetree/bindings/crypto/atmel-crypto.txt
@@ -66,3 +66,16 @@ sha@f8034000 {
dmas = <&dma1 2 17>;
dma-names = "tx";
};
+
+* Eliptic Curve Cryptography (I2C)
+
+Required properties:
+- compatible : must be "atmel,atecc508a".
+- reg: I2C bus address of the device.
+- clock-frequency: must be present in the i2c controller node.
+
+Example:
+atecc508a@C0 {
+ compatible = "atmel,atecc508a";
+ reg = <0xC0>;
+};
diff --git a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
index f69773f4252b..941bb6a6fb13 100644
--- a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
+++ b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
@@ -8,7 +8,6 @@ Required properties:
Optional properties:
- clocks: Reference to the crypto engine clock.
-- dma-mask: The address mask limitation. Defaults to 64.
Example:
@@ -24,6 +23,5 @@ Example:
interrupt-names = "mem", "ring0", "ring1", "ring2", "ring3",
"eip";
clocks = <&cpm_syscon0 1 26>;
- dma-mask = <0xff 0xffffffff>;
status = "disabled";
};
diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-hash.txt b/Documentation/devicetree/bindings/crypto/st,stm32-hash.txt
new file mode 100644
index 000000000000..04fc246f02f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/st,stm32-hash.txt
@@ -0,0 +1,30 @@
+* STMicroelectronics STM32 HASH
+
+Required properties:
+- compatible: Should contain entries for this and backward compatible
+ HASH versions:
+ - "st,stm32f456-hash" for stm32 F456.
+ - "st,stm32f756-hash" for stm32 F756.
+- reg: The address and length of the peripheral registers space
+- interrupts: the interrupt specifier for the HASH
+- clocks: The input clock of the HASH instance
+
+Optional properties:
+- resets: The input reset of the HASH instance
+- dmas: DMA specifiers for the HASH. See the DMA client binding,
+ Documentation/devicetree/bindings/dma/dma.txt
+- dma-names: DMA request name. Should be "in" if a dma is present.
+- dma-maxburst: Set number of maximum dma burst supported
+
+Example:
+
+hash1: hash@50060400 {
+ compatible = "st,stm32f756-hash";
+ reg = <0x50060400 0x400>;
+ interrupts = <80>;
+ clocks = <&rcc 0 STM32F7_AHB2_CLOCK(HASH)>;
+ resets = <&rcc STM32F7_AHB2_RESET(HASH)>;
+ dmas = <&dma2 7 2 0x400 0x0>;
+ dma-names = "in";
+ dma-maxburst = <0>;
+};
diff --git a/Documentation/devicetree/bindings/display/bridge/dw_mipi_dsi.txt b/Documentation/devicetree/bindings/display/bridge/dw_mipi_dsi.txt
new file mode 100644
index 000000000000..b13adf30b8d3
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/dw_mipi_dsi.txt
@@ -0,0 +1,32 @@
+Synopsys DesignWare MIPI DSI host controller
+============================================
+
+This document defines device tree properties for the Synopsys DesignWare MIPI
+DSI host controller. It doesn't constitue a device tree binding specification
+by itself but is meant to be referenced by platform-specific device tree
+bindings.
+
+When referenced from platform device tree bindings the properties defined in
+this document are defined as follows. The platform device tree bindings are
+responsible for defining whether each optional property is used or not.
+
+- reg: Memory mapped base address and length of the DesignWare MIPI DSI
+ host controller registers. (mandatory)
+
+- clocks: References to all the clocks specified in the clock-names property
+ as specified in [1]. (mandatory)
+
+- clock-names:
+ - "pclk" is the peripheral clock for either AHB and APB. (mandatory)
+ - "px_clk" is the pixel clock for the DPI/RGB input. (optional)
+
+- resets: References to all the resets specified in the reset-names property
+ as specified in [2]. (optional)
+
+- reset-names: string reset name, must be "apb" if used. (optional)
+
+- panel or bridge node: see [3]. (mandatory)
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Documentation/devicetree/bindings/reset/reset.txt
+[3] Documentation/devicetree/bindings/display/mipi-dsi-bus.txt
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos5433-decon.txt b/Documentation/devicetree/bindings/display/exynos/exynos5433-decon.txt
index 549c538b38a5..fc2588292a68 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos5433-decon.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos5433-decon.txt
@@ -25,12 +25,6 @@ Required properties:
size-cells must 1 and 0, respectively.
- port: contains an endpoint node which is connected to the endpoint in the mic
node. The reg value muset be 0.
-- i80-if-timings: specify whether the panel which is connected to decon uses
- i80 lcd interface or mipi video interface. This node contains
- no timing information as that of fimd does. Because there is
- no register in decon to specify i80 interface timing value,
- it is not needed, but make it remain to use same kind of node
- in fimd and exynos7 decon.
Example:
SoC specific DT entry:
@@ -59,9 +53,3 @@ decon: decon@13800000 {
};
};
};
-
-Board specific DT entry:
-&decon {
- i80-if-timings {
- };
-};
diff --git a/Documentation/devicetree/bindings/display/repaper.txt b/Documentation/devicetree/bindings/display/repaper.txt
new file mode 100644
index 000000000000..f5f9f9cf6a25
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/repaper.txt
@@ -0,0 +1,52 @@
+Pervasive Displays RePaper branded e-ink displays
+
+Required properties:
+- compatible: "pervasive,e1144cs021" for 1.44" display
+ "pervasive,e1190cs021" for 1.9" display
+ "pervasive,e2200cs021" for 2.0" display
+ "pervasive,e2271cs021" for 2.7" display
+
+- panel-on-gpios: Timing controller power control
+- discharge-gpios: Discharge control
+- reset-gpios: RESET pin
+- busy-gpios: BUSY pin
+
+Required property for e2271cs021:
+- border-gpios: Border control
+
+The node for this driver must be a child node of a SPI controller, hence
+all mandatory properties described in ../spi/spi-bus.txt must be specified.
+
+Optional property:
+- pervasive,thermal-zone: name of thermometer's thermal zone
+
+Example:
+
+ display_temp: lm75@48 {
+ compatible = "lm75b";
+ reg = <0x48>;
+ #thermal-sensor-cells = <0>;
+ };
+
+ thermal-zones {
+ display {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&display_temp>;
+ };
+ };
+
+ papirus27@0{
+ compatible = "pervasive,e2271cs021";
+ reg = <0>;
+
+ spi-max-frequency = <8000000>;
+
+ panel-on-gpios = <&gpio 23 0>;
+ border-gpios = <&gpio 14 0>;
+ discharge-gpios = <&gpio 15 0>;
+ reset-gpios = <&gpio 24 0>;
+ busy-gpios = <&gpio 25 0>;
+
+ pervasive,thermal-zone = "display";
+ };
diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
index 046076c6b277..fad8b7619647 100644
--- a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
@@ -11,7 +11,9 @@ following device-specific properties.
Required properties:
-- compatible: Shall contain "rockchip,rk3288-dw-hdmi".
+- compatible: should be one of the following:
+ "rockchip,rk3288-dw-hdmi"
+ "rockchip,rk3399-dw-hdmi"
- reg: See dw_hdmi.txt.
- reg-io-width: See dw_hdmi.txt. Shall be 4.
- interrupts: HDMI interrupt number
@@ -30,7 +32,8 @@ Optional properties
I2C master controller.
- clock-names: See dw_hdmi.txt. The "cec" clock is optional.
- clock-names: May contain "cec" as defined in dw_hdmi.txt.
-
+- clock-names: May contain "grf", power for grf io.
+- clock-names: May contain "vpll", external clock for some hdmi phy.
Example:
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
index 9eb3f0a2a078..5d835d9c1ba8 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
@@ -8,8 +8,12 @@ Required properties:
- compatible: value should be one of the following
"rockchip,rk3036-vop";
"rockchip,rk3288-vop";
+ "rockchip,rk3368-vop";
+ "rockchip,rk3366-vop";
"rockchip,rk3399-vop-big";
"rockchip,rk3399-vop-lit";
+ "rockchip,rk3228-vop";
+ "rockchip,rk3328-vop";
- interrupts: should contain a list of all VOP IP block interrupts in the
order: VSYNC, LCD_SYSTEM. The interrupt specifier
diff --git a/Documentation/devicetree/bindings/display/sitronix,st7586.txt b/Documentation/devicetree/bindings/display/sitronix,st7586.txt
new file mode 100644
index 000000000000..1d0dad1210d3
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/sitronix,st7586.txt
@@ -0,0 +1,22 @@
+Sitronix ST7586 display panel
+
+Required properties:
+- compatible: "lego,ev3-lcd".
+- a0-gpios: The A0 signal (since this binding is for serial mode, this is
+ the pin labeled D1 on the controller, not the pin labeled A0)
+- reset-gpios: Reset pin
+
+The node for this driver must be a child node of a SPI controller, hence
+all mandatory properties described in ../spi/spi-bus.txt must be specified.
+
+Optional properties:
+- rotation: panel rotation in degrees counter clockwise (0,90,180,270)
+
+Example:
+ display@0{
+ compatible = "lego,ev3-lcd";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+ a0-gpios = <&gpio 43 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio 80 GPIO_ACTIVE_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/display/st,stm32-ltdc.txt b/Documentation/devicetree/bindings/display/st,stm32-ltdc.txt
index 8e1476941c0f..74b5ac7b26d6 100644
--- a/Documentation/devicetree/bindings/display/st,stm32-ltdc.txt
+++ b/Documentation/devicetree/bindings/display/st,stm32-ltdc.txt
@@ -1,7 +1,6 @@
* STMicroelectronics STM32 lcd-tft display controller
- ltdc: lcd-tft display controller host
- must be a sub-node of st-display-subsystem
Required properties:
- compatible: "st,stm32-ltdc"
- reg: Physical base address of the IP registers and length of memory mapped region.
@@ -13,8 +12,40 @@
Required nodes:
- Video port for RGB output.
-Example:
+* STMicroelectronics STM32 DSI controller specific extensions to Synopsys
+ DesignWare MIPI DSI host controller
+The STMicroelectronics STM32 DSI controller uses the Synopsys DesignWare MIPI
+DSI host controller. For all mandatory properties & nodes, please refer
+to the related documentation in [5].
+
+Mandatory properties specific to STM32 DSI:
+- #address-cells: Should be <1>.
+- #size-cells: Should be <0>.
+- compatible: "st,stm32-dsi".
+- clock-names:
+ - phy pll reference clock string name, must be "ref".
+- resets: see [5].
+- reset-names: see [5].
+
+Mandatory nodes specific to STM32 DSI:
+- ports: A node containing DSI input & output port nodes with endpoint
+ definitions as documented in [3] & [4].
+ - port@0: DSI input port node, connected to the ltdc rgb output port.
+ - port@1: DSI output port node, connected to a panel or a bridge input port.
+- panel or bridge node: A node containing the panel or bridge description as
+ documented in [6].
+ - port: panel or bridge port node, connected to the DSI output port (port@1).
+
+Note: You can find more documentation in the following references
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+[2] Documentation/devicetree/bindings/reset/reset.txt
+[3] Documentation/devicetree/bindings/media/video-interfaces.txt
+[4] Documentation/devicetree/bindings/graph.txt
+[5] Documentation/devicetree/bindings/display/bridge/dw_mipi_dsi.txt
+[6] Documentation/devicetree/bindings/display/mipi-dsi-bus.txt
+
+Example 1: RGB panel
/ {
...
soc {
@@ -34,3 +65,73 @@ Example:
};
};
};
+
+Example 2: DSI panel
+
+/ {
+ ...
+ soc {
+ ...
+ ltdc: display-controller@40016800 {
+ compatible = "st,stm32-ltdc";
+ reg = <0x40016800 0x200>;
+ interrupts = <88>, <89>;
+ resets = <&rcc STM32F4_APB2_RESET(LTDC)>;
+ clocks = <&rcc 1 CLK_LCD>;
+ clock-names = "lcd";
+
+ port {
+ ltdc_out_dsi: endpoint {
+ remote-endpoint = <&dsi_in>;
+ };
+ };
+ };
+
+
+ dsi: dsi@40016c00 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-dsi";
+ reg = <0x40016c00 0x800>;
+ clocks = <&rcc 1 CLK_F469_DSI>, <&clk_hse>;
+ clock-names = "ref", "pclk";
+ resets = <&rcc STM32F4_APB2_RESET(DSI)>;
+ reset-names = "apb";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi_in: endpoint {
+ remote-endpoint = <&ltdc_out_dsi>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi_out: endpoint {
+ remote-endpoint = <&dsi_in_panel>;
+ };
+ };
+
+ };
+
+ panel-dsi@0 {
+ reg = <0>; /* dsi virtual channel (0..3) */
+ compatible = ...;
+ enable-gpios = ...;
+
+ port {
+ dsi_in_panel: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+
+ };
+
+ };
+
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index b83e6018041d..2ee6ff0ef98e 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -4,15 +4,33 @@ Allwinner A10 Display Pipeline
The Allwinner A10 Display pipeline is composed of several components
that are going to be documented below:
-For the input port of all components up to the TCON in the display
-pipeline, if there are multiple components, the local endpoint IDs
-must correspond to the index of the upstream block. For example, if
-the remote endpoint is Frontend 1, then the local endpoint ID must
-be 1.
-
-Conversely, for the output ports of the same group, the remote endpoint
-ID must be the index of the local hardware block. If the local backend
-is backend 1, then the remote endpoint ID must be 1.
+For all connections between components up to the TCONs in the display
+pipeline, when there are multiple components of the same type at the
+same depth, the local endpoint ID must be the same as the remote
+component's index. For example, if the remote endpoint is Frontend 1,
+then the local endpoint ID must be 1.
+
+ Frontend 0 [0] ------- [0] Backend 0 [0] ------- [0] TCON 0
+ [1] -- -- [1] [1] -- -- [1]
+ \ / \ /
+ X X
+ / \ / \
+ [0] -- -- [0] [0] -- -- [0]
+ Frontend 1 [1] ------- [1] Backend 1 [1] ------- [1] TCON 1
+
+For a two pipeline system such as the one depicted above, the lines
+represent the connections between the components, while the numbers
+within the square brackets corresponds to the ID of the local endpoint.
+
+The same rule also applies to DE 2.0 mixer-TCON connections:
+
+ Mixer 0 [0] ----------- [0] TCON 0
+ [1] ---- ---- [1]
+ \ /
+ X
+ / \
+ [0] ---- ---- [0]
+ Mixer 1 [1] ----------- [1] TCON 1
HDMI Encoder
------------
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.txt b/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.txt
new file mode 100644
index 000000000000..8e8625c00dfa
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.txt
@@ -0,0 +1,24 @@
+ChromeOS EC USB Type-C cable and accessories detection
+
+On ChromeOS systems with USB Type C ports, the ChromeOS Embedded Controller is
+able to detect the state of external accessories such as display adapters
+or USB devices when said accessories are attached or detached.
+
+The node for this device must be under a cros-ec node like google,cros-ec-spi
+or google,cros-ec-i2c.
+
+Required properties:
+- compatible: Should be "google,extcon-usbc-cros-ec".
+- google,usb-port-id: Specifies the USB port ID to use.
+
+Example:
+ cros-ec@0 {
+ compatible = "google,cros-ec-i2c";
+
+ ...
+
+ extcon {
+ compatible = "google,extcon-usbc-cros-ec";
+ google,usb-port-id = <0>;
+ };
+ }
diff --git a/Documentation/devicetree/bindings/fpga/altera-passive-serial.txt b/Documentation/devicetree/bindings/fpga/altera-passive-serial.txt
new file mode 100644
index 000000000000..48478bc07e29
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/altera-passive-serial.txt
@@ -0,0 +1,29 @@
+Altera Passive Serial SPI FPGA Manager
+
+Altera FPGAs support a method of loading the bitstream over what is
+referred to as "passive serial".
+The passive serial link is not technically SPI, and might require extra
+circuits in order to play nicely with other SPI slaves on the same bus.
+
+See https://www.altera.com/literature/hb/cyc/cyc_c51013.pdf
+
+Required properties:
+- compatible: Must be one of the following:
+ "altr,fpga-passive-serial",
+ "altr,fpga-arria10-passive-serial"
+- reg: SPI chip select of the FPGA
+- nconfig-gpios: config pin (referred to as nCONFIG in the manual)
+- nstat-gpios: status pin (referred to as nSTATUS in the manual)
+
+Optional properties:
+- confd-gpios: confd pin (referred to as CONF_DONE in the manual)
+
+Example:
+ fpga: fpga@0 {
+ compatible = "altr,fpga-passive-serial";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ nconfig-gpios = <&gpio4 9 GPIO_ACTIVE_LOW>;
+ nstat-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
+ confd-gpios = <&gpio4 12 GPIO_ACTIVE_LOW>;
+ };
diff --git a/Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt b/Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt
new file mode 100644
index 000000000000..8dcfba926bc7
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/xilinx-pr-decoupler.txt
@@ -0,0 +1,36 @@
+Xilinx LogiCORE Partial Reconfig Decoupler Softcore
+
+The Xilinx LogiCORE Partial Reconfig Decoupler manages one or more
+decouplers / fpga bridges.
+The controller can decouple/disable the bridges which prevents signal
+changes from passing through the bridge. The controller can also
+couple / enable the bridges which allows traffic to pass through the
+bridge normally.
+
+The Driver supports only MMIO handling. A PR region can have multiple
+PR Decouplers which can be handled independently or chained via decouple/
+decouple_status signals.
+
+Required properties:
+- compatible : Should contain "xlnx,pr-decoupler-1.00" followed by
+ "xlnx,pr-decoupler"
+- regs : base address and size for decoupler module
+- clocks : input clock to IP
+- clock-names : should contain "aclk"
+
+Optional properties:
+- bridge-enable : 0 if driver should disable bridge at startup
+ 1 if driver should enable bridge at startup
+ Default is to leave bridge in current state.
+
+See Documentation/devicetree/bindings/fpga/fpga-region.txt for generic bindings.
+
+Example:
+ fpga-bridge@100000450 {
+ compatible = "xlnx,pr-decoupler-1.00",
+ "xlnx-pr-decoupler";
+ regs = <0x10000045 0x10>;
+ clocks = <&clkc 15>;
+ clock-names = "aclk";
+ bridge-enable = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-74x164.txt b/Documentation/devicetree/bindings/gpio/gpio-74x164.txt
index ce1b2231bf5d..2a97553d8d76 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-74x164.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-74x164.txt
@@ -12,6 +12,9 @@ Required properties:
1 = active low
- registers-number: Number of daisy-chained shift registers
+Optional properties:
+- enable-gpios: GPIO connected to the OE (Output Enable) pin.
+
Example:
gpio5: gpio5@0 {
diff --git a/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt b/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
index c756afa88cc6..fc6378c778c5 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-aspeed.txt
@@ -18,7 +18,7 @@ Required properties:
Optional properties:
- interrupt-parent : The parent interrupt controller, optional if inherited
-- clocks : A phandle to the HPLL clock node for debounce timings
+- clocks : A phandle to the clock to use for debounce timings
The gpio and interrupt properties are further described in their respective
bindings documentation:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-davinci.txt b/Documentation/devicetree/bindings/gpio/gpio-davinci.txt
index 5079ba7d6568..8beb0539b6d8 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-davinci.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-davinci.txt
@@ -1,7 +1,10 @@
Davinci/Keystone GPIO controller bindings
Required Properties:
-- compatible: should be "ti,dm6441-gpio", "ti,keystone-gpio"
+- compatible: should be "ti,dm6441-gpio": for Davinci da850 SoCs
+ "ti,keystone-gpio": for Keystone 2 66AK2H/K, 66AK2L,
+ 66AK2E SoCs
+ "ti,k2g-gpio", "ti,keystone-gpio": for 66AK2G
- reg: Physical base address of the controller and the size of memory mapped
registers.
@@ -20,7 +23,21 @@ Required Properties:
- ti,ngpio: The number of GPIO pins supported.
- ti,davinci-gpio-unbanked: The number of GPIOs that have an individual interrupt
- line to processor.
+ line to processor.
+
+- clocks: Should contain the device's input clock, and should be defined as per
+ the appropriate clock bindings consumer usage in,
+
+ Documentation/devicetree/bindings/clock/keystone-gate.txt
+ for 66AK2HK/66AK2L/66AK2E SoCs or,
+
+ Documentation/devicetree/bindings/clock/ti,sci-clk.txt
+ for 66AK2G SoCs
+
+- clock-names: Name should be "gpio";
+
+Currently clock-names and clocks are needed for all keystone 2 platforms
+Davinci platforms do not have DT clocks as of now.
The GPIO controller also acts as an interrupt controller. It uses the default
two cells specifier as described in Documentation/devicetree/bindings/
@@ -60,3 +77,73 @@ leds {
...
};
};
+
+Example for 66AK2G:
+
+gpio0: gpio@2603000 {
+ compatible = "ti,k2g-gpio", "ti,keystone-gpio";
+ reg = <0x02603000 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupts = <GIC_SPI 432 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 433 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 434 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 435 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 436 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 437 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 438 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 439 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 440 IRQ_TYPE_EDGE_RISING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ti,ngpio = <144>;
+ ti,davinci-gpio-unbanked = <0>;
+ clocks = <&k2g_clks 0x001b 0x0>;
+ clock-names = "gpio";
+};
+
+Example for 66AK2HK/66AK2L/66AK2E:
+
+gpio0: gpio@260bf00 {
+ compatible = "ti,keystone-gpio";
+ reg = <0x0260bf00 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ /* HW Interrupts mapped to GPIO pins */
+ interrupts = <GIC_SPI 120 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 121 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 122 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 123 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 124 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 125 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 126 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 127 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 128 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 129 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 130 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 131 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 132 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 133 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 134 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 135 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 136 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 137 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 138 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 139 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 140 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 141 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 142 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 143 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 144 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 145 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 146 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 147 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 148 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 149 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 150 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 151 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkgpio>;
+ clock-names = "gpio";
+ ti,ngpio = <32>;
+ ti,davinci-gpio-unbanked = <32>;
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-exar.txt b/Documentation/devicetree/bindings/gpio/gpio-exar.txt
new file mode 100644
index 000000000000..4540d61824af
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-exar.txt
@@ -0,0 +1,5 @@
+Exportable MPIO interface of Exar UART chips
+
+Required properties of the device:
+ - exar,first-pin: first exportable pins (0..15)
+ - ngpios: number of exportable pins (1..16)
diff --git a/Documentation/devicetree/bindings/gpio/gpio-vf610.txt b/Documentation/devicetree/bindings/gpio/gpio-vf610.txt
index 436cc99c6598..0ccbae44019c 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-vf610.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-vf610.txt
@@ -5,7 +5,9 @@ functionality. Each pair serves 32 GPIOs. The VF610 has 5 instances of
each, and each PORT module has its own interrupt.
Required properties for GPIO node:
-- compatible : Should be "fsl,<soc>-gpio", currently "fsl,vf610-gpio"
+- compatible : Should be "fsl,<soc>-gpio", below is supported list:
+ "fsl,vf610-gpio"
+ "fsl,imx7ulp-gpio"
- reg : The first reg tuple represents the PORT module, the second tuple
the GPIO module.
- interrupts : Should be the port interrupt shared by all 32 pins.
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index 6826a371fb69..51c86f69995e 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -2,8 +2,9 @@
Required Properties:
- - compatible: should contain one of the following.
+ - compatible: should contain one or more of the following:
- "renesas,gpio-r8a7743": for R8A7743 (RZ/G1M) compatible GPIO controller.
+ - "renesas,gpio-r8a7745": for R8A7745 (RZ/G1E) compatible GPIO controller.
- "renesas,gpio-r8a7778": for R8A7778 (R-Mobile M1) compatible GPIO controller.
- "renesas,gpio-r8a7779": for R8A7779 (R-Car H1) compatible GPIO controller.
- "renesas,gpio-r8a7790": for R8A7790 (R-Car H2) compatible GPIO controller.
@@ -13,7 +14,14 @@ Required Properties:
- "renesas,gpio-r8a7794": for R8A7794 (R-Car E2) compatible GPIO controller.
- "renesas,gpio-r8a7795": for R8A7795 (R-Car H3) compatible GPIO controller.
- "renesas,gpio-r8a7796": for R8A7796 (R-Car M3-W) compatible GPIO controller.
- - "renesas,gpio-rcar": for generic R-Car GPIO controller.
+ - "renesas,rcar-gen1-gpio": for a generic R-Car Gen1 GPIO controller.
+ - "renesas,rcar-gen2-gpio": for a generic R-Car Gen2 or RZ/G1 GPIO controller.
+ - "renesas,rcar-gen3-gpio": for a generic R-Car Gen3 GPIO controller.
+ - "renesas,gpio-rcar": deprecated.
+
+ When compatible with the generic version nodes must list the
+ SoC-specific version corresponding to the platform first followed by
+ the generic version.
- reg: Base address and length of each memory resource used by the GPIO
controller hardware module.
@@ -43,7 +51,7 @@ interrupt-controller/interrupts.txt.
Example: R8A7779 (R-Car H1) GPIO controller nodes
gpio0: gpio@ffc40000 {
- compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
+ compatible = "renesas,gpio-r8a7779", "renesas,rcar-gen1-gpio";
reg = <0xffc40000 0x2c>;
interrupt-parent = <&gic>;
interrupts = <0 141 0x4>;
@@ -55,7 +63,7 @@ Example: R8A7779 (R-Car H1) GPIO controller nodes
};
...
gpio6: gpio@ffc46000 {
- compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
+ compatible = "renesas,gpio-r8a7779", "renesas,rcar-gen1-gpio";
reg = <0xffc46000 0x2c>;
interrupt-parent = <&gic>;
interrupts = <0 147 0x4>;
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt
index d3b6e1a4713a..5aa5926029ee 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt
@@ -40,7 +40,7 @@ Optional properties:
Example for a Mali-T760:
gpu@ffa30000 {
- compatible = "rockchip,rk3288-mali", "arm,mali-t760", "arm,mali-midgard";
+ compatible = "rockchip,rk3288-mali", "arm,mali-t760";
reg = <0xffa30000 0x10000>;
interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/hwmon/aspeed-pwm-tacho.txt b/Documentation/devicetree/bindings/hwmon/aspeed-pwm-tacho.txt
index cf4460564adb..367c8203213b 100644
--- a/Documentation/devicetree/bindings/hwmon/aspeed-pwm-tacho.txt
+++ b/Documentation/devicetree/bindings/hwmon/aspeed-pwm-tacho.txt
@@ -11,6 +11,8 @@ Required properties for pwm-tacho node:
- #size-cells : should be 1.
+- #cooling-cells: should be 2.
+
- reg : address and length of the register set for the device.
- pinctrl-names : a pinctrl state named "default" must be defined.
@@ -28,12 +30,17 @@ fan subnode format:
Under fan subnode there can upto 8 child nodes, with each child node
representing a fan. If there are 8 fans each fan can have one PWM port and
one/two Fan tach inputs.
+For PWM port can be configured cooling-levels to create cooling device.
+Cooling device could be bound to a thermal zone for the thermal control.
Required properties for each child node:
- reg : should specify PWM source port.
integer value in the range 0 to 7 with 0 indicating PWM port A and
7 indicating PWM port H.
+- cooling-levels: PWM duty cycle values in a range from 0 to 255
+ which correspond to thermal cooling states.
+
- aspeed,fan-tach-ch : should specify the Fan tach input channel.
integer value in the range 0 through 15, with 0 indicating
Fan tach channel 0 and 15 indicating Fan tach channel 15.
@@ -50,6 +57,7 @@ pwm_tacho_fixed_clk: fixedclk {
pwm_tacho: pwmtachocontroller@1e786000 {
#address-cells = <1>;
#size-cells = <1>;
+ #cooling-cells = <2>;
reg = <0x1E786000 0x1000>;
compatible = "aspeed,ast2500-pwm-tacho";
clocks = <&pwm_tacho_fixed_clk>;
@@ -58,6 +66,7 @@ pwm_tacho: pwmtachocontroller@1e786000 {
fan@0 {
reg = <0x00>;
+ cooling-levels = /bits/ 8 <125 151 177 203 229 255>;
aspeed,fan-tach-ch = /bits/ 8 <0x00>;
};
diff --git a/Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt b/Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
new file mode 100644
index 000000000000..f68a0a68fc52
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
@@ -0,0 +1,21 @@
+Device-tree bindings for IBM Common Form Factor Power Supply Version 1
+----------------------------------------------------------------------
+
+Required properties:
+ - compatible = "ibm,cffps1";
+ - reg = < I2C bus address >; : Address of the power supply on the
+ I2C bus.
+
+Example:
+
+ i2c-bus@100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #interrupt-cells = <1>;
+ < more properties >
+
+ power-supply@68 {
+ compatible = "ibm,cffps1";
+ reg = <0x68>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/hwmon/ltq-cputemp.txt b/Documentation/devicetree/bindings/hwmon/ltq-cputemp.txt
new file mode 100644
index 000000000000..33fd00a987c7
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/ltq-cputemp.txt
@@ -0,0 +1,10 @@
+Lantiq cpu temperatur sensor
+
+Requires node properties:
+- compatible value :
+ "lantiq,cputemp"
+
+Example:
+ cputemp@0 {
+ compatible = "lantiq,cputemp";
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt b/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt
index 8ce9cd2855b5..c143948b2a37 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-cbus-gpio.txt
@@ -20,8 +20,8 @@ i2c@0 {
#address-cells = <1>;
#size-cells = <0>;
- retu-mfd: retu@1 {
- compatible = "retu-mfd";
+ retu: retu@1 {
+ compatible = "nokia,retu";
reg = <0x1>;
};
};
diff --git a/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt b/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
index 3223684a643b..552e7a83951d 100644
--- a/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
@@ -11,6 +11,11 @@ Required properties:
- atmel,min-sample-rate-hz: Minimum sampling rate, it depends on SoC.
- atmel,max-sample-rate-hz: Maximum sampling rate, it depends on SoC.
- atmel,startup-time-ms: Startup time expressed in ms, it depends on SoC.
+ - atmel,trigger-edge-type: One of possible edge types for the ADTRG hardware
+ trigger pin. When the specific edge type is detected, the conversion will
+ start. Possible values are rising, falling, or both.
+ This property uses the IRQ edge types values: IRQ_TYPE_EDGE_RISING ,
+ IRQ_TYPE_EDGE_FALLING or IRQ_TYPE_EDGE_BOTH
Example:
@@ -25,4 +30,5 @@ adc: adc@fc030000 {
atmel,startup-time-ms = <4>;
vddana-supply = <&vdd_3v3_lp_reg>;
vref-supply = <&vdd_3v3_lp_reg>;
+ atmel,trigger-edge-type = <IRQ_TYPE_EDGE_BOTH>;
}
diff --git a/Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt b/Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt
index 68c45cbbe3d9..64dc4843c180 100644
--- a/Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/mt6577_auxadc.txt
@@ -12,6 +12,7 @@ for the Thermal Controller which holds a phandle to the AUXADC.
Required properties:
- compatible: Should be one of:
- "mediatek,mt2701-auxadc": For MT2701 family of SoCs
+ - "mediatek,mt7622-auxadc": For MT7622 family of SoCs
- "mediatek,mt8173-auxadc": For MT8173 family of SoCs
- reg: Address range of the AUXADC unit.
- clocks: Should contain a clock specifier for each entry in clock-names
diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
index e0a9b9d6d6fd..c2c50b59873d 100644
--- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
@@ -6,6 +6,7 @@ Required properties:
- "rockchip,rk3066-tsadc": for rk3036
- "rockchip,rk3328-saradc", "rockchip,rk3399-saradc": for rk3328
- "rockchip,rk3399-saradc": for rk3399
+ - "rockchip,rv1108-saradc", "rockchip,rk3399-saradc": for rv1108
- reg: physical base address of the controller and length of memory mapped
region.
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
index 8310073f14e1..48bfcaa3ffcd 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
@@ -74,6 +74,11 @@ Optional properties:
* can be 6, 8, 10 or 12 on stm32f4
* can be 8, 10, 12, 14 or 16 on stm32h7
Default is maximum resolution if unset.
+- st,min-sample-time-nsecs: Minimum sampling time in nanoseconds.
+ Depending on hardware (board) e.g. high/low analog input source impedance,
+ fine tune of ADC sampling time may be recommended.
+ This can be either one value or an array that matches 'st,adc-channels' list,
+ to set sample time resp. for all channels, or independently for each channel.
Example:
adc: adc@40012000 {
diff --git a/Documentation/devicetree/bindings/iio/counter/stm32-lptimer-cnt.txt b/Documentation/devicetree/bindings/iio/counter/stm32-lptimer-cnt.txt
new file mode 100644
index 000000000000..a04aa5c04103
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/counter/stm32-lptimer-cnt.txt
@@ -0,0 +1,27 @@
+STMicroelectronics STM32 Low-Power Timer quadrature encoder and counter
+
+STM32 Low-Power Timer provides several counter modes. It can be used as:
+- quadrature encoder to detect angular position and direction of rotary
+ elements, from IN1 and IN2 input signals.
+- simple counter from IN1 input signal.
+
+Must be a sub-node of an STM32 Low-Power Timer device tree node.
+See ../mfd/stm32-lptimer.txt for details about the parent node.
+
+Required properties:
+- compatible: Must be "st,stm32-lptimer-counter".
+- pinctrl-names: Set to "default".
+- pinctrl-0: List of phandles pointing to pin configuration nodes,
+ to set IN1/IN2 pins in mode of operation for Low-Power
+ Timer input on external pin.
+
+Example:
+ timer@40002400 {
+ compatible = "st,stm32-lptimer";
+ ...
+ counter {
+ compatible = "st,stm32-lptimer-counter";
+ pinctrl-names = "default";
+ pinctrl-0 = <&lptim1_in_pins>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt b/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt
index bcee71f808d0..bf2925c671c6 100644
--- a/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt
+++ b/Documentation/devicetree/bindings/iio/dac/st,stm32-dac.txt
@@ -10,7 +10,9 @@ current.
Contents of a stm32 dac root node:
-----------------------------------
Required properties:
-- compatible: Must be "st,stm32h7-dac-core".
+- compatible: Should be one of:
+ "st,stm32f4-dac-core"
+ "st,stm32h7-dac-core"
- reg: Offset and length of the device's register set.
- clocks: Must contain an entry for pclk (which feeds the peripheral bus
interface)
diff --git a/Documentation/devicetree/bindings/iio/humidity/hdc100x.txt b/Documentation/devicetree/bindings/iio/humidity/hdc100x.txt
new file mode 100644
index 000000000000..c52333bdfd19
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/humidity/hdc100x.txt
@@ -0,0 +1,17 @@
+* HDC100x temperature + humidity sensors
+
+Required properties:
+ - compatible: Should contain one of the following:
+ ti,hdc1000
+ ti,hdc1008
+ ti,hdc1010
+ ti,hdc1050
+ ti,hdc1080
+ - reg: i2c address of the sensor
+
+Example:
+
+hdc100x@40 {
+ compatible = "ti,hdc1000";
+ reg = <0x40>;
+};
diff --git a/Documentation/devicetree/bindings/iio/humidity/hts221.txt b/Documentation/devicetree/bindings/iio/humidity/hts221.txt
index b20ab9c12080..10adeb0d703d 100644
--- a/Documentation/devicetree/bindings/iio/humidity/hts221.txt
+++ b/Documentation/devicetree/bindings/iio/humidity/hts221.txt
@@ -5,9 +5,18 @@ Required properties:
- reg: i2c address of the sensor / spi cs line
Optional properties:
+- drive-open-drain: the interrupt/data ready line will be configured
+ as open drain, which is useful if several sensors share the same
+ interrupt line. This is a boolean property.
+ If the requested interrupt is configured as IRQ_TYPE_LEVEL_HIGH or
+ IRQ_TYPE_EDGE_RISING a pull-down resistor is needed to drive the line
+ when it is not active, whereas a pull-up one is needed when interrupt
+ line is configured as IRQ_TYPE_LEVEL_LOW or IRQ_TYPE_EDGE_FALLING.
+ Refer to pinctrl/pinctrl-bindings.txt for the property description.
- interrupt-parent: should be the phandle for the interrupt controller
- interrupts: interrupt mapping for IRQ. It should be configured with
- flags IRQ_TYPE_LEVEL_HIGH or IRQ_TYPE_EDGE_RISING.
+ flags IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
+ IRQ_TYPE_EDGE_FALLING.
Refer to interrupt-controller/interrupts.txt for generic interrupt
client node bindings.
diff --git a/Documentation/devicetree/bindings/iio/humidity/htu21.txt b/Documentation/devicetree/bindings/iio/humidity/htu21.txt
new file mode 100644
index 000000000000..97d79636f7ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/humidity/htu21.txt
@@ -0,0 +1,13 @@
+*HTU21 - Measurement-Specialties htu21 temperature & humidity sensor and humidity part of MS8607 sensor
+
+Required properties:
+
+ - compatible: should be "meas,htu21" or "meas,ms8607-humidity"
+ - reg: I2C address of the sensor
+
+Example:
+
+htu21@40 {
+ compatible = "meas,htu21";
+ reg = <0x40>;
+};
diff --git a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
index 6f28ff55f3ec..1ff1af799c76 100644
--- a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
+++ b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
@@ -11,6 +11,14 @@ Required properties:
Optional properties:
- st,drdy-int-pin: the pin on the package that will be used to signal
"data ready" (valid values: 1 or 2).
+- drive-open-drain: the interrupt/data ready line will be configured
+ as open drain, which is useful if several sensors share the same
+ interrupt line. This is a boolean property.
+ (This binding is taken from pinctrl/pinctrl-bindings.txt)
+ If the requested interrupt is configured as IRQ_TYPE_LEVEL_HIGH or
+ IRQ_TYPE_EDGE_RISING a pull-down resistor is needed to drive the line
+ when it is not active, whereas a pull-up one is needed when interrupt
+ line is configured as IRQ_TYPE_LEVEL_LOW or IRQ_TYPE_EDGE_FALLING.
- interrupt-parent: should be the phandle for the interrupt controller
- interrupts: interrupt mapping for IRQ. It should be configured with
flags IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or
diff --git a/Documentation/devicetree/bindings/iio/pressure/ms5637.txt b/Documentation/devicetree/bindings/iio/pressure/ms5637.txt
new file mode 100644
index 000000000000..1f43ffa068ac
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/pressure/ms5637.txt
@@ -0,0 +1,17 @@
+* MS5637 - Measurement-Specialties MS5637, MS5805, MS5837 and MS8607 pressure & temperature sensor
+
+Required properties:
+
+ -compatible: should be one of the following
+ meas,ms5637
+ meas,ms5805
+ meas,ms5837
+ meas,ms8607-temppressure
+ -reg: I2C address of the sensor
+
+Example:
+
+ms5637@76 {
+ compatible = "meas,ms5637";
+ reg = <0x76>;
+};
diff --git a/Documentation/devicetree/bindings/iio/st-sensors.txt b/Documentation/devicetree/bindings/iio/st-sensors.txt
index eaa8fbba34e2..9ec6f5ce54fc 100644
--- a/Documentation/devicetree/bindings/iio/st-sensors.txt
+++ b/Documentation/devicetree/bindings/iio/st-sensors.txt
@@ -45,6 +45,7 @@ Accelerometers:
- st,lis2dh12-accel
- st,h3lis331dl-accel
- st,lng2dm-accel
+- st,lis3l02dq
Gyroscopes:
- st,l3g4200d-gyro
@@ -52,6 +53,7 @@ Gyroscopes:
- st,lsm330dl-gyro
- st,lsm330dlc-gyro
- st,l3gd20-gyro
+- st,l3gd20h-gyro
- st,l3g4is-gyro
- st,lsm330-gyro
- st,lsm9ds0-gyro
@@ -62,6 +64,7 @@ Magnetometers:
- st,lsm303dlhc-magn
- st,lsm303dlm-magn
- st,lis3mdl-magn
+- st,lis2mdl
Pressure sensors:
- st,lps001wp-press
diff --git a/Documentation/devicetree/bindings/iio/temperature/tsys01.txt b/Documentation/devicetree/bindings/iio/temperature/tsys01.txt
new file mode 100644
index 000000000000..0d5cc5595d0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/temperature/tsys01.txt
@@ -0,0 +1,19 @@
+* TSYS01 - Measurement Specialties temperature sensor
+
+Required properties:
+
+ - compatible: should be "meas,tsys01"
+ - reg: I2C address of the sensor (changeable via CSB pin)
+
+ ------------------------
+ | CSB | Device Address |
+ ------------------------
+ 1 0x76
+ 0 0x77
+
+Example:
+
+tsys01@76 {
+ compatible = "meas,tsys01";
+ reg = <0x76>;
+};
diff --git a/Documentation/devicetree/bindings/iio/timer/stm32-lptimer-trigger.txt b/Documentation/devicetree/bindings/iio/timer/stm32-lptimer-trigger.txt
new file mode 100644
index 000000000000..85e6806b17d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/timer/stm32-lptimer-trigger.txt
@@ -0,0 +1,23 @@
+STMicroelectronics STM32 Low-Power Timer Trigger
+
+STM32 Low-Power Timer provides trigger source (LPTIM output) that can be used
+by STM32 internal ADC and/or DAC.
+
+Must be a sub-node of an STM32 Low-Power Timer device tree node.
+See ../mfd/stm32-lptimer.txt for details about the parent node.
+
+Required properties:
+- compatible: Must be "st,stm32-lptimer-trigger".
+- reg: Identify trigger hardware block. Must be 0, 1 or 2
+ respectively for lptimer1, lptimer2 or lptimer3
+ trigger output.
+
+Example:
+ timer@40002400 {
+ compatible = "st,stm32-lptimer";
+ ...
+ trigger@0 {
+ compatible = "st,stm32-lptimer-trigger";
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/timer/stm32-timer-trigger.txt b/Documentation/devicetree/bindings/iio/timer/stm32-timer-trigger.txt
index 55a653d15303..b8e8c769d434 100644
--- a/Documentation/devicetree/bindings/iio/timer/stm32-timer-trigger.txt
+++ b/Documentation/devicetree/bindings/iio/timer/stm32-timer-trigger.txt
@@ -4,7 +4,9 @@ Must be a sub-node of an STM32 Timers device tree node.
See ../mfd/stm32-timers.txt for details about the parent node.
Required parameters:
-- compatible: Must be "st,stm32-timer-trigger".
+- compatible: Must be one of:
+ "st,stm32-timer-trigger"
+ "st,stm32h7-timer-trigger"
- reg: Identify trigger hardware block.
Example:
@@ -14,7 +16,7 @@ Example:
compatible = "st,stm32-timers";
reg = <0x40010000 0x400>;
clocks = <&rcc 0 160>;
- clock-names = "clk_int";
+ clock-names = "int";
timer@0 {
compatible = "st,stm32-timer-trigger";
diff --git a/Documentation/devicetree/bindings/input/atmel,maxtouch.txt b/Documentation/devicetree/bindings/input/atmel,maxtouch.txt
index 1852906517ab..23e3abc3fdef 100644
--- a/Documentation/devicetree/bindings/input/atmel,maxtouch.txt
+++ b/Documentation/devicetree/bindings/input/atmel,maxtouch.txt
@@ -22,6 +22,8 @@ Optional properties for main touchpad device:
experiment to determine which bit corresponds to which input. Use
KEY_RESERVED for unused padding values.
+- reset-gpios: GPIO specifier for the touchscreen's reset pin (active low)
+
Example:
touch@4b {
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
index 9e389493203f..49ccabbfa6f3 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
@@ -4,8 +4,10 @@ Required properties:
- compatible: should be "fsl,<soc-name>-msi" to identify
Layerscape PCIe MSI controller block such as:
- "fsl,1s1021a-msi"
- "fsl,1s1043a-msi"
+ "fsl,ls1021a-msi"
+ "fsl,ls1043a-msi"
+ "fsl,ls1046a-msi"
+ "fsl,ls1043a-v1.1-msi"
- msi-controller: indicates that this is a PCIe MSI controller node
- reg: physical base address of the controller and length of memory mapped.
- interrupts: an interrupt to the parent interrupt controller.
@@ -23,7 +25,7 @@ MSI controller node
Examples:
msi1: msi-controller@1571000 {
- compatible = "fsl,1s1043a-msi";
+ compatible = "fsl,ls1043a-msi";
reg = <0x0 0x1571000 0x0 0x8>,
msi-controller;
interrupts = <0 116 0x4>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.txt b/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.txt
new file mode 100644
index 000000000000..48e71d3ac2ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.txt
@@ -0,0 +1,32 @@
+UniPhier AIDET
+
+UniPhier AIDET (ARM Interrupt Detector) is an add-on block for ARM GIC (Generic
+Interrupt Controller). GIC itself can handle only high level and rising edge
+interrupts. The AIDET provides logic inverter to support low level and falling
+edge interrupts.
+
+Required properties:
+- compatible: Should be one of the following:
+ "socionext,uniphier-ld4-aidet" - for LD4 SoC
+ "socionext,uniphier-pro4-aidet" - for Pro4 SoC
+ "socionext,uniphier-sld8-aidet" - for sLD8 SoC
+ "socionext,uniphier-pro5-aidet" - for Pro5 SoC
+ "socionext,uniphier-pxs2-aidet" - for PXs2/LD6b SoC
+ "socionext,uniphier-ld11-aidet" - for LD11 SoC
+ "socionext,uniphier-ld20-aidet" - for LD20 SoC
+ "socionext,uniphier-pxs3-aidet" - for PXs3 SoC
+- reg: Specifies offset and length of the register set for the device.
+- interrupt-controller: Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an interrupt
+ source. The value should be 2. The first cell defines the interrupt number
+ (corresponds to the SPI interrupt number of GIC). The second cell specifies
+ the trigger type as defined in interrupts.txt in this directory.
+
+Example:
+
+ aidet: aidet@5fc20000 {
+ compatible = "socionext,uniphier-pro4-aidet";
+ reg = <0x5fc20000 0x200>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/leds/ams,as3645a.txt b/Documentation/devicetree/bindings/leds/ams,as3645a.txt
new file mode 100644
index 000000000000..12c5ef26ec73
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/ams,as3645a.txt
@@ -0,0 +1,71 @@
+Analog devices AS3645A device tree bindings
+
+The AS3645A flash LED controller can drive two LEDs, one high current
+flash LED and one indicator LED. The high current flash LED can be
+used in torch mode as well.
+
+Ranges below noted as [a, b] are closed ranges between a and b, i.e. a
+and b are included in the range.
+
+Please also see common.txt in the same directory.
+
+
+Required properties
+===================
+
+compatible : Must be "ams,as3645a".
+reg : The I2C address of the device. Typically 0x30.
+
+
+Required properties of the "flash" child node
+=============================================
+
+flash-timeout-us: Flash timeout in microseconds. The value must be in
+ the range [100000, 850000] and divisible by 50000.
+flash-max-microamp: Maximum flash current in microamperes. Has to be
+ in the range between [200000, 500000] and
+ divisible by 20000.
+led-max-microamp: Maximum torch (assist) current in microamperes. The
+ value must be in the range between [20000, 160000] and
+ divisible by 20000.
+ams,input-max-microamp: Maximum flash controller input current. The
+ value must be in the range [1250000, 2000000]
+ and divisible by 50000.
+
+
+Optional properties of the "flash" child node
+=============================================
+
+label : The label of the flash LED.
+
+
+Required properties of the "indicator" child node
+=================================================
+
+led-max-microamp: Maximum indicator current. The allowed values are
+ 2500, 5000, 7500 and 10000.
+
+Optional properties of the "indicator" child node
+=================================================
+
+label : The label of the indicator LED.
+
+
+Example
+=======
+
+ as3645a@30 {
+ reg = <0x30>;
+ compatible = "ams,as3645a";
+ flash {
+ flash-timeout-us = <150000>;
+ flash-max-microamp = <320000>;
+ led-max-microamp = <60000>;
+ ams,input-max-microamp = <1750000>;
+ label = "as3645a:flash";
+ };
+ indicator {
+ led-max-microamp = <10000>;
+ label = "as3645a:indicator";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/leds/irled/gpio-ir-tx.txt b/Documentation/devicetree/bindings/leds/irled/gpio-ir-tx.txt
new file mode 100644
index 000000000000..cbe8dfd29715
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/irled/gpio-ir-tx.txt
@@ -0,0 +1,14 @@
+Device tree bindings for IR LED connected through gpio pin which is used as
+remote controller transmitter.
+
+Required properties:
+ - compatible: should be "gpio-ir-tx".
+ - gpios : Should specify the IR LED GPIO, see "gpios property" in
+ Documentation/devicetree/bindings/gpio/gpio.txt. Active low LEDs
+ should be indicated using flags in the GPIO specifier.
+
+Example:
+ irled@0 {
+ compatible = "gpio-ir-tx";
+ gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.txt b/Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.txt
new file mode 100644
index 000000000000..66e5672c2e3d
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.txt
@@ -0,0 +1,13 @@
+Device tree bindings for IR LED connected through pwm pin which is used as
+remote controller transmitter.
+
+Required properties:
+ - compatible: should be "pwm-ir-tx".
+ - pwms : PWM property to point to the PWM device (phandle)/port (id)
+ and to specify the period time to be used: <&phandle id period_ns>;
+
+Example:
+ irled {
+ compatible = "pwm-ir-tx";
+ pwms = <&pwm0 0 10000000>;
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/adv748x.txt b/Documentation/devicetree/bindings/media/i2c/adv748x.txt
new file mode 100644
index 000000000000..21ffb5ed8183
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/adv748x.txt
@@ -0,0 +1,95 @@
+* Analog Devices ADV748X video decoder with HDMI receiver
+
+The ADV7481 and ADV7482 are multi format video decoders with an integrated
+HDMI receiver. They can output CSI-2 on two independent outputs TXA and TXB
+from three input sources HDMI, analog and TTL.
+
+Required Properties:
+
+ - compatible: Must contain one of the following
+ - "adi,adv7481" for the ADV7481
+ - "adi,adv7482" for the ADV7482
+
+ - reg: I2C slave address
+
+Optional Properties:
+
+ - interrupt-names: Should specify the interrupts as "intrq1", "intrq2" and/or
+ "intrq3". All interrupts are optional. The "intrq3" interrupt
+ is only available on the adv7481
+ - interrupts: Specify the interrupt lines for the ADV748x
+
+The device node must contain one 'port' child node per device input and output
+port, in accordance with the video interface bindings defined in
+Documentation/devicetree/bindings/media/video-interfaces.txt. The port nodes
+are numbered as follows.
+
+ Name Type Port
+ ---------------------------------------
+ AIN0 sink 0
+ AIN1 sink 1
+ AIN2 sink 2
+ AIN3 sink 3
+ AIN4 sink 4
+ AIN5 sink 5
+ AIN6 sink 6
+ AIN7 sink 7
+ HDMI sink 8
+ TTL sink 9
+ TXA source 10
+ TXB source 11
+
+The digital output port nodes must contain at least one endpoint.
+
+Ports are optional if they are not connected to anything at the hardware level.
+
+Example:
+
+ video-receiver@70 {
+ compatible = "adi,adv7482";
+ reg = <0x70>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&gpio6>;
+ interrupt-names = "intrq1", "intrq2";
+ interrupts = <30 IRQ_TYPE_LEVEL_LOW>,
+ <31 IRQ_TYPE_LEVEL_LOW>;
+
+ port@7 {
+ reg = <7>;
+
+ adv7482_ain7: endpoint {
+ remote-endpoint = <&cvbs_in>;
+ };
+ };
+
+ port@8 {
+ reg = <8>;
+
+ adv7482_hdmi: endpoint {
+ remote-endpoint = <&hdmi_in>;
+ };
+ };
+
+ port@10 {
+ reg = <10>;
+
+ adv7482_txa: endpoint {
+ clock-lanes = <0>;
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&csi40_in>;
+ };
+ };
+
+ port@11 {
+ reg = <11>;
+
+ adv7482_txb: endpoint {
+ clock-lanes = <0>;
+ data-lanes = <1>;
+ remote-endpoint = <&csi20_in>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt b/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt
new file mode 100644
index 000000000000..b88dcdd41def
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt
@@ -0,0 +1,9 @@
+Dongwoon Anatech DW9714 camera voice coil lens driver
+
+DW9174 is a 10-bit DAC with current sink capability. It is intended
+for driving voice coil lenses in camera modules.
+
+Mandatory properties:
+
+- compatible: "dongwoon,dw9714"
+- reg: I²C slave address
diff --git a/Documentation/devicetree/bindings/media/meson-ao-cec.txt b/Documentation/devicetree/bindings/media/meson-ao-cec.txt
new file mode 100644
index 000000000000..8671bdb08080
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/meson-ao-cec.txt
@@ -0,0 +1,28 @@
+* Amlogic Meson AO-CEC driver
+
+The Amlogic Meson AO-CEC module is present is Amlogic SoCs and its purpose is
+to handle communication between HDMI connected devices over the CEC bus.
+
+Required properties:
+ - compatible : value should be following
+ "amlogic,meson-gx-ao-cec"
+
+ - reg : Physical base address of the IP registers and length of memory
+ mapped region.
+
+ - interrupts : AO-CEC interrupt number to the CPU.
+ - clocks : from common clock binding: handle to AO-CEC clock.
+ - clock-names : from common clock binding: must contain "core",
+ corresponding to entry in the clocks property.
+ - hdmi-phandle: phandle to the HDMI controller
+
+Example:
+
+cec_AO: cec@100 {
+ compatible = "amlogic,meson-gx-ao-cec";
+ reg = <0x0 0x00100 0x0 0x14>;
+ interrupts = <GIC_SPI 199 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc_AO CLKID_AO_CEC_32K>;
+ clock-names = "core";
+ hdmi-phandle = <&hdmi_tx>;
+};
diff --git a/Documentation/devicetree/bindings/media/mtk-cir.txt b/Documentation/devicetree/bindings/media/mtk-cir.txt
index 2be2005577d6..5e18087ce11f 100644
--- a/Documentation/devicetree/bindings/media/mtk-cir.txt
+++ b/Documentation/devicetree/bindings/media/mtk-cir.txt
@@ -2,10 +2,14 @@ Device-Tree bindings for Mediatek consumer IR controller
found in Mediatek SoC family
Required properties:
-- compatible : "mediatek,mt7623-cir"
+- compatible : Should be
+ "mediatek,mt7623-cir": for MT7623 SoC
+ "mediatek,mt7622-cir": for MT7622 SoC
- clocks : list of clock specifiers, corresponding to
entries in clock-names property;
-- clock-names : should contain "clk" entries;
+- clock-names : should contain
+ - "clk" entries: for MT7623 SoC
+ - "clk", "bus" entries: for MT7622 SoC
- interrupts : should contain IR IRQ number;
- reg : should contain IO map address for IR.
diff --git a/Documentation/devicetree/bindings/media/qcom,camss.txt b/Documentation/devicetree/bindings/media/qcom,camss.txt
new file mode 100644
index 000000000000..cadecebc73f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/qcom,camss.txt
@@ -0,0 +1,197 @@
+Qualcomm Camera Subsystem
+
+* Properties
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: Should contain:
+ - "qcom,msm8916-camss"
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Register ranges as listed in the reg-names property.
+- reg-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: Should contain the following entries:
+ - "csiphy0"
+ - "csiphy0_clk_mux"
+ - "csiphy1"
+ - "csiphy1_clk_mux"
+ - "csid0"
+ - "csid1"
+ - "ispif"
+ - "csi_clk_mux"
+ - "vfe0"
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Interrupts as listed in the interrupt-names property.
+- interrupt-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: Should contain the following entries:
+ - "csiphy0"
+ - "csiphy1"
+ - "csid0"
+ - "csid1"
+ - "ispif"
+ - "vfe0"
+- power-domains:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A phandle and power domain specifier pairs to the
+ power domain which is responsible for collapsing
+ and restoring power to the peripheral.
+- clocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A list of phandle and clock specifier pairs as listed
+ in clock-names property.
+- clock-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: Should contain the following entries:
+ - "camss_top_ahb"
+ - "ispif_ahb"
+ - "csiphy0_timer"
+ - "csiphy1_timer"
+ - "csi0_ahb"
+ - "csi0"
+ - "csi0_phy"
+ - "csi0_pix"
+ - "csi0_rdi"
+ - "csi1_ahb"
+ - "csi1"
+ - "csi1_phy"
+ - "csi1_pix"
+ - "csi1_rdi"
+ - "camss_ahb"
+ - "camss_vfe_vfe"
+ - "camss_csi_vfe"
+ - "iface"
+ - "bus"
+- vdda-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: A phandle to voltage supply for CSI2.
+- iommus:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A list of phandle and IOMMU specifier pairs.
+
+* Nodes
+
+- ports:
+ Usage: required
+ Definition: As described in video-interfaces.txt in same directory.
+ Properties:
+ - reg:
+ Usage: required
+ Value type: <u32>
+ Definition: Selects CSI2 PHY interface - PHY0 or PHY1.
+ Endpoint node properties:
+ - clock-lanes:
+ Usage: required
+ Value type: <u32>
+ Definition: The physical clock lane index. The value
+ must always be <1> as the physical clock
+ lane is lane 1.
+ - data-lanes:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: An array of physical data lanes indexes.
+ Position of an entry determines the logical
+ lane number, while the value of an entry
+ indicates physical lane index. Lane swapping
+ is supported.
+
+* An Example
+
+ camss: camss@1b00000 {
+ compatible = "qcom,msm8916-camss";
+ reg = <0x1b0ac00 0x200>,
+ <0x1b00030 0x4>,
+ <0x1b0b000 0x200>,
+ <0x1b00038 0x4>,
+ <0x1b08000 0x100>,
+ <0x1b08400 0x100>,
+ <0x1b0a000 0x500>,
+ <0x1b00020 0x10>,
+ <0x1b10000 0x1000>;
+ reg-names = "csiphy0",
+ "csiphy0_clk_mux",
+ "csiphy1",
+ "csiphy1_clk_mux",
+ "csid0",
+ "csid1",
+ "ispif",
+ "csi_clk_mux",
+ "vfe0";
+ interrupts = <GIC_SPI 78 0>,
+ <GIC_SPI 79 0>,
+ <GIC_SPI 51 0>,
+ <GIC_SPI 52 0>,
+ <GIC_SPI 55 0>,
+ <GIC_SPI 57 0>;
+ interrupt-names = "csiphy0",
+ "csiphy1",
+ "csid0",
+ "csid1",
+ "ispif",
+ "vfe0";
+ power-domains = <&gcc VFE_GDSC>;
+ clocks = <&gcc GCC_CAMSS_TOP_AHB_CLK>,
+ <&gcc GCC_CAMSS_ISPIF_AHB_CLK>,
+ <&gcc GCC_CAMSS_CSI0PHYTIMER_CLK>,
+ <&gcc GCC_CAMSS_CSI1PHYTIMER_CLK>,
+ <&gcc GCC_CAMSS_CSI0_AHB_CLK>,
+ <&gcc GCC_CAMSS_CSI0_CLK>,
+ <&gcc GCC_CAMSS_CSI0PHY_CLK>,
+ <&gcc GCC_CAMSS_CSI0PIX_CLK>,
+ <&gcc GCC_CAMSS_CSI0RDI_CLK>,
+ <&gcc GCC_CAMSS_CSI1_AHB_CLK>,
+ <&gcc GCC_CAMSS_CSI1_CLK>,
+ <&gcc GCC_CAMSS_CSI1PHY_CLK>,
+ <&gcc GCC_CAMSS_CSI1PIX_CLK>,
+ <&gcc GCC_CAMSS_CSI1RDI_CLK>,
+ <&gcc GCC_CAMSS_AHB_CLK>,
+ <&gcc GCC_CAMSS_VFE0_CLK>,
+ <&gcc GCC_CAMSS_CSI_VFE0_CLK>,
+ <&gcc GCC_CAMSS_VFE_AHB_CLK>,
+ <&gcc GCC_CAMSS_VFE_AXI_CLK>;
+ clock-names = "camss_top_ahb",
+ "ispif_ahb",
+ "csiphy0_timer",
+ "csiphy1_timer",
+ "csi0_ahb",
+ "csi0",
+ "csi0_phy",
+ "csi0_pix",
+ "csi0_rdi",
+ "csi1_ahb",
+ "csi1",
+ "csi1_phy",
+ "csi1_pix",
+ "csi1_rdi",
+ "camss_ahb",
+ "camss_vfe_vfe",
+ "camss_csi_vfe",
+ "iface",
+ "bus";
+ vdda-supply = <&pm8916_l2>;
+ iommus = <&apps_iommu 3>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ csiphy0_ep: endpoint {
+ clock-lanes = <1>;
+ data-lanes = <0 2>;
+ remote-endpoint = <&ov5645_ep>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/renesas,drif.txt b/Documentation/devicetree/bindings/media/renesas,drif.txt
index 39516b94c28f..0d8974aa8b38 100644
--- a/Documentation/devicetree/bindings/media/renesas,drif.txt
+++ b/Documentation/devicetree/bindings/media/renesas,drif.txt
@@ -40,6 +40,7 @@ To summarize,
Required properties of an internal channel:
-------------------------------------------
- compatible: "renesas,r8a7795-drif" if DRIF controller is a part of R8A7795 SoC.
+ "renesas,r8a7796-drif" if DRIF controller is a part of R8A7796 SoC.
"renesas,rcar-gen3-drif" for a generic R-Car Gen3 compatible device.
When compatible with the generic version, nodes must list the
diff --git a/Documentation/devicetree/bindings/media/video-interfaces.txt b/Documentation/devicetree/bindings/media/video-interfaces.txt
index 9cd2a369125d..852041a7480c 100644
--- a/Documentation/devicetree/bindings/media/video-interfaces.txt
+++ b/Documentation/devicetree/bindings/media/video-interfaces.txt
@@ -76,6 +76,11 @@ Optional endpoint properties
mode horizontal and vertical synchronization signals are provided to the
slave device (data source) by the master device (data sink). In the master
mode the data source device is also the source of the synchronization signals.
+- bus-type: data bus type. Possible values are:
+ 0 - autodetect based on other properties (MIPI CSI-2 D-PHY, parallel or Bt656)
+ 1 - MIPI CSI-2 C-PHY
+ 2 - MIPI CSI1
+ 3 - CCP2
- bus-width: number of data lines actively used, valid for the parallel busses.
- data-shift: on the parallel data busses, if bus-width is used to specify the
number of data lines, data-shift can be used to specify which data lines are
@@ -112,7 +117,8 @@ Optional endpoint properties
should be the combined length of data-lanes and clock-lanes properties.
If the lane-polarities property is omitted, the value must be interpreted
as 0 (normal). This property is valid for serial busses only.
-
+- strobe: Whether the clock signal is used as clock (0) or strobe (1). Used
+ with CCP2, for instance.
Example
-------
diff --git a/Documentation/devicetree/bindings/media/zx-irdec.txt b/Documentation/devicetree/bindings/media/zx-irdec.txt
new file mode 100644
index 000000000000..295b9fab593e
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/zx-irdec.txt
@@ -0,0 +1,14 @@
+IR Decoder (IRDEC) on ZTE ZX family SoCs
+
+Required properties:
+ - compatible: Should be "zte,zx296718-irdec".
+ - reg: Physical base address and length of IRDEC registers.
+ - interrupts: Interrupt number of IRDEC.
+
+Exmaples:
+
+ irdec: ir-decoder@111000 {
+ compatible = "zte,zx296718-irdec";
+ reg = <0x111000 0x1000>;
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/mfd/atmel-smc.txt b/Documentation/devicetree/bindings/mfd/atmel-smc.txt
index 26eeed373934..1103ce2030fb 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-smc.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-smc.txt
@@ -8,6 +8,7 @@ Required properties:
- compatible: Should be one of the following
"atmel,at91sam9260-smc", "syscon"
"atmel,sama5d3-smc", "syscon"
+ "atmel,sama5d2-smc", "syscon"
- reg: Contains offset/length value of the SMC memory
region.
diff --git a/Documentation/devicetree/bindings/mfd/axp20x.txt b/Documentation/devicetree/bindings/mfd/axp20x.txt
index aca09af66514..9455503b0299 100644
--- a/Documentation/devicetree/bindings/mfd/axp20x.txt
+++ b/Documentation/devicetree/bindings/mfd/axp20x.txt
@@ -7,7 +7,14 @@ axp209 (X-Powers)
axp221 (X-Powers)
axp223 (X-Powers)
axp803 (X-Powers)
+axp806 (X-Powers)
axp809 (X-Powers)
+axp813 (X-Powers)
+
+The AXP813 is 2 chips packaged into 1. The 2 chips do not share anything
+other than the packaging. Pins are routed separately. As such they should
+be treated as separate entities. The other half is an AC100 RTC/codec
+combo chip. Please see ./ac100.txt for its bindings.
Required properties:
- compatible: should be one of:
@@ -19,6 +26,7 @@ Required properties:
* "x-powers,axp803"
* "x-powers,axp806"
* "x-powers,axp809"
+ * "x-powers,axp813"
- reg: The I2C slave address or RSB hardware address for the AXP chip
- interrupt-parent: The parent interrupt controller
- interrupts: SoC NMI / GPIO interrupt connected to the PMIC's IRQ pin
@@ -28,12 +36,14 @@ Required properties:
Optional properties:
- x-powers,dcdc-freq: defines the work frequency of DC-DC in KHz
AXP152/20X: range: 750-1875, Default: 1.5 MHz
- AXP22X/80X: range: 1800-4050, Default: 3 MHz
+ AXP22X/8XX: range: 1800-4050, Default: 3 MHz
-- x-powers,drive-vbus-en: axp221 / axp223 only boolean, set this when the
- N_VBUSEN pin is used as an output pin to control an external
- regulator to drive the OTG VBus, rather then as an input pin
- which signals whether the board is driving OTG VBus or not.
+- x-powers,drive-vbus-en: boolean, set this when the N_VBUSEN pin is
+ used as an output pin to control an external
+ regulator to drive the OTG VBus, rather then
+ as an input pin which signals whether the
+ board is driving OTG VBus or not.
+ (axp221 / axp223 / axp813 only)
- x-powers,master-mode: Boolean (axp806 only). Set this when the PMIC is
wired for master mode. The default is slave mode.
@@ -171,6 +181,36 @@ LDO_IO1 : LDO : ips-supply : GPIO 1
RTC_LDO : LDO : ips-supply : always on
SW : On/Off Switch : swin-supply
+AXP813 regulators, type, and corresponding input supply names:
+
+Regulator Type Supply Name Notes
+--------- ---- ----------- -----
+DCDC1 : DC-DC buck : vin1-supply
+DCDC2 : DC-DC buck : vin2-supply : poly-phase capable
+DCDC3 : DC-DC buck : vin3-supply : poly-phase capable
+DCDC4 : DC-DC buck : vin4-supply
+DCDC5 : DC-DC buck : vin5-supply : poly-phase capable
+DCDC6 : DC-DC buck : vin6-supply : poly-phase capable
+DCDC7 : DC-DC buck : vin7-supply
+ALDO1 : LDO : aldoin-supply : shared supply
+ALDO2 : LDO : aldoin-supply : shared supply
+ALDO3 : LDO : aldoin-supply : shared supply
+DLDO1 : LDO : dldoin-supply : shared supply
+DLDO2 : LDO : dldoin-supply : shared supply
+DLDO3 : LDO : dldoin-supply : shared supply
+DLDO4 : LDO : dldoin-supply : shared supply
+ELDO1 : LDO : eldoin-supply : shared supply
+ELDO2 : LDO : eldoin-supply : shared supply
+ELDO3 : LDO : eldoin-supply : shared supply
+FLDO1 : LDO : fldoin-supply : shared supply
+FLDO2 : LDO : fldoin-supply : shared supply
+FLDO3 : LDO : fldoin-supply : shared supply
+LDO_IO0 : LDO : ips-supply : GPIO 0
+LDO_IO1 : LDO : ips-supply : GPIO 1
+RTC_LDO : LDO : ips-supply : always on
+SW : On/Off Switch : swin-supply
+DRIVEVBUS : Enable output : drivevbus-supply : external regulator
+
Example:
axp209: pmic@34 {
diff --git a/Documentation/devicetree/bindings/mfd/bd9571mwv.txt b/Documentation/devicetree/bindings/mfd/bd9571mwv.txt
new file mode 100644
index 000000000000..9ab216a851d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/bd9571mwv.txt
@@ -0,0 +1,49 @@
+* ROHM BD9571MWV Power Management Integrated Circuit (PMIC) bindings
+
+Required properties:
+ - compatible : Should be "rohm,bd9571mwv".
+ - reg : I2C slave address.
+ - interrupt-parent : Phandle to the parent interrupt controller.
+ - interrupts : The interrupt line the device is connected to.
+ - interrupt-controller : Marks the device node as an interrupt controller.
+ - #interrupt-cells : The number of cells to describe an IRQ, should be 2.
+ The first cell is the IRQ number.
+ The second cell is the flags, encoded as trigger
+ masks from ../interrupt-controller/interrupts.txt.
+ - gpio-controller : Marks the device node as a GPIO Controller.
+ - #gpio-cells : Should be two. The first cell is the pin number and
+ the second cell is used to specify flags.
+ See ../gpio/gpio.txt for more information.
+ - regulators: : List of child nodes that specify the regulator
+ initialization data. Child nodes must be named
+ after their hardware counterparts:
+ - vd09
+ - vd18
+ - vd25
+ - vd33
+ - dvfs
+ Each child node is defined using the standard
+ binding for regulators.
+
+Example:
+
+ pmic: pmic@30 {
+ compatible = "rohm,bd9571mwv";
+ reg = <0x30>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ regulators {
+ dvfs: dvfs {
+ regulator-name = "dvfs";
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1030000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/da9052-i2c.txt b/Documentation/devicetree/bindings/mfd/da9052-i2c.txt
index 9554292dc6cb..57fa74e65664 100644
--- a/Documentation/devicetree/bindings/mfd/da9052-i2c.txt
+++ b/Documentation/devicetree/bindings/mfd/da9052-i2c.txt
@@ -4,6 +4,14 @@ Required properties:
- compatible : Should be "dlg,da9052", "dlg,da9053-aa",
"dlg,da9053-ab", or "dlg,da9053-bb"
+Optional properties:
+- dlg,tsi-as-adc : Boolean, if set the X+, X-, Y+, Y- touchscreen
+ input lines are used as general purpose analogue
+ input.
+- tsiref-supply: Phandle to the regulator, which provides the reference
+ voltage for the TSIREF pin. Must be provided when the
+ touchscreen pins are used for ADC purposes.
+
Sub-nodes:
- regulators : Contain the regulator nodes. The DA9052/53 regulators are
bound using their names as listed below:
diff --git a/Documentation/devicetree/bindings/mfd/retu.txt b/Documentation/devicetree/bindings/mfd/retu.txt
new file mode 100644
index 000000000000..876242394a16
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/retu.txt
@@ -0,0 +1,25 @@
+* Device tree bindings for Nokia Retu and Tahvo multi-function device
+
+Retu and Tahvo are a multi-function devices found on Nokia Internet
+Tablets (770, N800 and N810). The Retu chip provides watchdog timer
+and power button control functionalities while Tahvo chip provides
+USB transceiver functionality.
+
+Required properties:
+- compatible: "nokia,retu" or "nokia,tahvo"
+- reg: Specifies the CBUS slave address of the ASIC chip
+- interrupts: The interrupt line the device is connected to
+- interrupt-parent: The parent interrupt controller
+
+Example:
+
+cbus0 {
+ compatible = "i2c-cbus-gpio";
+ ...
+ retu: retu@1 {
+ compatible = "nokia,retu";
+ interrupt-parent = <&gpio4>;
+ interrupts = <12 IRQ_TYPE_EDGE_RISING>;
+ reg = <0x1>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/mfd/rk808.txt b/Documentation/devicetree/bindings/mfd/rk808.txt
index 9636ae8d8d41..91b65227afeb 100644
--- a/Documentation/devicetree/bindings/mfd/rk808.txt
+++ b/Documentation/devicetree/bindings/mfd/rk808.txt
@@ -1,11 +1,14 @@
RK8XX Power Management Integrated Circuit
The rk8xx family current members:
+rk805
rk808
rk818
Required properties:
-- compatible: "rockchip,rk808", "rockchip,rk818"
+- compatible: "rockchip,rk805"
+- compatible: "rockchip,rk808"
+- compatible: "rockchip,rk818"
- reg: I2C slave address
- interrupt-parent: The parent interrupt controller.
- interrupts: the interrupt outputs of the controller.
@@ -18,6 +21,14 @@ Optional properties:
- rockchip,system-power-controller: Telling whether or not this pmic is controlling
the system power.
+Optional RK805 properties:
+- vcc1-supply: The input supply for DCDC_REG1
+- vcc2-supply: The input supply for DCDC_REG2
+- vcc3-supply: The input supply for DCDC_REG3
+- vcc4-supply: The input supply for DCDC_REG4
+- vcc5-supply: The input supply for LDO_REG1 and LDO_REG2
+- vcc6-supply: The input supply for LDO_REG3
+
Optional RK808 properties:
- vcc1-supply: The input supply for DCDC_REG1
- vcc2-supply: The input supply for DCDC_REG2
@@ -56,6 +67,15 @@ by a child node of the 'regulators' node.
/* standard regulator bindings here */
};
+Following regulators of the RK805 PMIC regulators are supported. Note that
+the 'n' in regulator name, as in DCDC_REGn or LDOn, represents the DCDC or LDO
+number as described in RK805 datasheet.
+
+ - DCDC_REGn
+ - valid values for n are 1 to 4.
+ - LDO_REGn
+ - valid values for n are 1 to 3
+
Following regulators of the RK808 PMIC block are supported. Note that
the 'n' in regulator name, as in DCDC_REGn or LDOn, represents the DCDC or LDO
number as described in RK808 datasheet.
diff --git a/Documentation/devicetree/bindings/mfd/stm32-lptimer.txt b/Documentation/devicetree/bindings/mfd/stm32-lptimer.txt
new file mode 100644
index 000000000000..2a9ff29db9c9
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/stm32-lptimer.txt
@@ -0,0 +1,48 @@
+STMicroelectronics STM32 Low-Power Timer
+
+The STM32 Low-Power Timer (LPTIM) is a 16-bit timer that provides several
+functions:
+- PWM output (with programmable prescaler, configurable polarity)
+- Quadrature encoder, counter
+- Trigger source for STM32 ADC/DAC (LPTIM_OUT)
+
+Required properties:
+- compatible: Must be "st,stm32-lptimer".
+- reg: Offset and length of the device's register set.
+- clocks: Phandle to the clock used by the LP Timer module.
+- clock-names: Must be "mux".
+- #address-cells: Should be '<1>'.
+- #size-cells: Should be '<0>'.
+
+Optional subnodes:
+- pwm: See ../pwm/pwm-stm32-lp.txt
+- counter: See ../iio/timer/stm32-lptimer-cnt.txt
+- trigger: See ../iio/timer/stm32-lptimer-trigger.txt
+
+Example:
+
+ timer@40002400 {
+ compatible = "st,stm32-lptimer";
+ reg = <0x40002400 0x400>;
+ clocks = <&timer_clk>;
+ clock-names = "mux";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pwm {
+ compatible = "st,stm32-pwm-lp";
+ pinctrl-names = "default";
+ pinctrl-0 = <&lppwm1_pins>;
+ };
+
+ trigger@0 {
+ compatible = "st,stm32-lptimer-trigger";
+ reg = <0>;
+ };
+
+ counter {
+ compatible = "st,stm32-lptimer-counter";
+ pinctrl-names = "default";
+ pinctrl-0 = <&lptim1_in_pins>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/tps6105x.txt b/Documentation/devicetree/bindings/mfd/tps6105x.txt
new file mode 100644
index 000000000000..93602c7a19c8
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/tps6105x.txt
@@ -0,0 +1,17 @@
+* Device tree bindings for TI TPS61050/61052 Boost Converters
+
+The TP61050/TPS61052 is a high-power "white LED driver". The
+device provides LED, GPIO and regulator functionalities.
+
+Required properties:
+- compatible: "ti,tps61050" or "ti,tps61052"
+- reg: Specifies the I2C slave address
+
+Example:
+
+i2c0 {
+ tps61052@33 {
+ compatible = "ti,tps61052";
+ reg = <0x33>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/mfd/wm831x.txt b/Documentation/devicetree/bindings/mfd/wm831x.txt
index 9f8b7430673c..505709403d3f 100644
--- a/Documentation/devicetree/bindings/mfd/wm831x.txt
+++ b/Documentation/devicetree/bindings/mfd/wm831x.txt
@@ -31,6 +31,7 @@ Required properties:
../interrupt-controller/interrupts.txt
Optional sub-nodes:
+ - phys : Contains a phandle to the USB PHY.
- regulators : Contains sub-nodes for each of the regulators supplied by
the device. The regulators are bound using their names listed below:
diff --git a/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt b/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt
new file mode 100644
index 000000000000..088eff9ddb78
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt
@@ -0,0 +1,39 @@
+Zodiac Inflight Innovations RAVE Supervisory Processor
+
+RAVE Supervisory Processor communicates with SoC over UART. It is
+expected that its Device Tree node is specified as a child of a node
+corresponding to UART controller used for communication.
+
+Required parent device properties:
+
+ - compatible: Should be one of:
+ - "zii,rave-sp-niu"
+ - "zii,rave-sp-mezz"
+ - "zii,rave-sp-esb"
+ - "zii,rave-sp-rdu1"
+ - "zii,rave-sp-rdu2"
+
+ - current-speed: Should be set to baud rate SP device is using
+
+RAVE SP consists of the following sub-devices:
+
+Device Description
+------ -----------
+rave-sp-wdt : Watchdog
+rave-sp-nvmem : Interface to onborad EEPROM
+rave-sp-backlight : Display backlight
+rave-sp-hwmon : Interface to onboard hardware sensors
+rave-sp-leds : Interface to onboard LEDs
+rave-sp-input : Interface to onboard power button
+
+Example of usage:
+
+ rdu {
+ compatible = "zii,rave-sp-rdu2";
+ current-speed = <1000000>;
+
+ watchdog {
+ compatible = "zii,rave-sp-watchdog";
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
index aad98442788b..a58c173b7ab9 100644
--- a/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/exynos-dw-mshc.txt
@@ -78,7 +78,6 @@ Example:
};
dwmmc0@12200000 {
- num-slots = <1>;
cap-mmc-highspeed;
cap-sd-highspeed;
broken-cd;
diff --git a/Documentation/devicetree/bindings/mmc/img-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/img-dw-mshc.txt
index 85de99fcaa2f..c54e577eea07 100644
--- a/Documentation/devicetree/bindings/mmc/img-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/img-dw-mshc.txt
@@ -24,6 +24,5 @@ Example:
fifo-depth = <0x20>;
bus-width = <4>;
- num-slots = <1>;
disable-wp;
};
diff --git a/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt
index 8af1afcb86dc..07242d141773 100644
--- a/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt
@@ -36,7 +36,6 @@ Example:
/* Board portion */
dwmmc0@fcd03000 {
- num-slots = <1>;
vmmc-supply = <&ldo12>;
fifo-depth = <0x100>;
pinctrl-names = "default";
@@ -52,7 +51,6 @@ Example:
dwmmc_1: dwmmc1@f723e000 {
compatible = "hisilicon,hi6220-dw-mshc";
- num-slots = <0x1>;
bus-width = <0x4>;
disable-wp;
cap-sd-highspeed;
diff --git a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt b/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
index c32dc5a9dbe6..5ff1e12c655a 100644
--- a/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
+++ b/Documentation/devicetree/bindings/mmc/renesas,mmcif.txt
@@ -11,6 +11,8 @@ Required properties:
- "renesas,mmcif-r7s72100" for the MMCIF found in r7s72100 SoCs
- "renesas,mmcif-r8a73a4" for the MMCIF found in r8a73a4 SoCs
- "renesas,mmcif-r8a7740" for the MMCIF found in r8a7740 SoCs
+ - "renesas,mmcif-r8a7743" for the MMCIF found in r8a7743 SoCs
+ - "renesas,mmcif-r8a7745" for the MMCIF found in r8a7745 SoCs
- "renesas,mmcif-r8a7778" for the MMCIF found in r8a7778 SoCs
- "renesas,mmcif-r8a7790" for the MMCIF found in r8a7790 SoCs
- "renesas,mmcif-r8a7791" for the MMCIF found in r8a7791 SoCs
@@ -21,7 +23,7 @@ Required properties:
- interrupts: Some SoCs have only 1 shared interrupt, while others have either
2 or 3 individual interrupts (error, int, card detect). Below is the number
of interrupts for each SoC:
- 1: r8a73a4, r8a7778, r8a7790, r8a7791, r8a7793, r8a7794
+ 1: r8a73a4, r8a7743, r8a7745, r8a7778, r8a7790, r8a7791, r8a7793, r8a7794
2: r8a7740, sh73a0
3: r7s72100
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index 49ed3ad2524a..c6558785e61b 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -15,6 +15,7 @@ Required Properties:
- "rockchip,rk3288-dw-mshc": for Rockchip RK3288
- "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108
- "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
+ - "rockchip,rk3228-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK322x
- "rockchip,rk3328-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3328
- "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
- "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
diff --git a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
index 7d53a799f140..63b57e2a10fb 100644
--- a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
@@ -12,6 +12,7 @@ Required properties:
* "allwinner,sun4i-a10-mmc"
* "allwinner,sun5i-a13-mmc"
* "allwinner,sun7i-a20-mmc"
+ * "allwinner,sun8i-a83t-emmc"
* "allwinner,sun9i-a80-mmc"
* "allwinner,sun50i-a64-emmc"
* "allwinner,sun50i-a64-mmc"
diff --git a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
index 9cb55ca57461..ef3e5f14067a 100644
--- a/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/synopsys-dw-mshc.txt
@@ -12,12 +12,12 @@ Required Properties:
* #address-cells: should be 1.
* #size-cells: should be 0.
-# Slots: The slot specific information are contained within child-nodes with
- each child-node representing a supported slot. There should be atleast one
- child node representing a card slot. The name of the child node representing
- the slot is recommended to be slot@n where n is the unique number of the slot
- connected to the controller. The following are optional properties which
- can be included in the slot child node.
+# Slots (DEPRECATED): The slot specific information are contained within
+ child-nodes with each child-node representing a supported slot. There should
+ be atleast one child node representing a card slot. The name of the child node
+ representing the slot is recommended to be slot@n where n is the unique number
+ of the slot connected to the controller. The following are optional properties
+ which can be included in the slot child node.
* reg: specifies the physical slot number. The valid values of this
property is 0 to (num-slots -1), where num-slots is the value
@@ -63,7 +63,7 @@ Optional properties:
clock(cclk_out). If it's not specified, max is 200MHZ and min is 400KHz by default.
(Use the "max-frequency" instead of "clock-freq-min-max".)
-* num-slots: specifies the number of slots supported by the controller.
+* num-slots (DEPRECATED): specifies the number of slots supported by the controller.
The number of physical slots actually used could be equal or less than the
value specified by num-slots. If this property is not specified, the value
of num-slot property is assumed to be 1.
@@ -124,7 +124,6 @@ board specific portions as listed below.
dwmmc0@12200000 {
clock-frequency = <400000000>;
clock-freq-min-max = <400000 200000000>;
- num-slots = <1>;
broken-cd;
fifo-depth = <0x80>;
card-detect-delay = <200>;
@@ -139,7 +138,6 @@ board specific portions as listed below.
dwmmc0@12200000 {
clock-frequency = <400000000>;
clock-freq-min-max = <400000 200000000>;
- num-slots = <1>;
broken-cd;
fifo-depth = <0x80>;
card-detect-delay = <200>;
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index 4fd8b7acc510..54ef642f23a0 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -15,6 +15,8 @@ Required properties:
"renesas,sdhi-r7s72100" - SDHI IP on R7S72100 SoC
"renesas,sdhi-r8a73a4" - SDHI IP on R8A73A4 SoC
"renesas,sdhi-r8a7740" - SDHI IP on R8A7740 SoC
+ "renesas,sdhi-r8a7743" - SDHI IP on R8A7743 SoC
+ "renesas,sdhi-r8a7745" - SDHI IP on R8A7745 SoC
"renesas,sdhi-r8a7778" - SDHI IP on R8A7778 SoC
"renesas,sdhi-r8a7779" - SDHI IP on R8A7779 SoC
"renesas,sdhi-r8a7790" - SDHI IP on R8A7790 SoC
@@ -33,10 +35,8 @@ Required properties:
If 2 clocks are specified by the hardware, you must name them as
"core" and "cd". If the controller only has 1 clock, naming is not
required.
- Below is the number clocks for each supported SoC:
- 1: SH73A0, R8A73A4, R8A7740, R8A7778, R8A7779, R8A7790
- R8A7791, R8A7792, R8A7793, R8A7794, R8A7795, R8A7796
- 2: R7S72100
+ Devices which have more than 1 clock are listed below:
+ 2: R7S72100
Optional properties:
- toshiba,mmc-wrprotect-disable: write-protect detection is unavailable
diff --git a/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt
index eaade0e5adeb..906819a90c2b 100644
--- a/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/zx-dw-mshc.txt
@@ -25,7 +25,6 @@ Example:
clock-frequency = <50000000>;
clocks = <&topcrm SD0_AHB>, <&topcrm SD0_WCLK>;
clock-names = "biu", "ciu";
- num-slots = <1>;
max-frequency = <50000000>;
cap-sdio-irq;
cap-sd-highspeed;
diff --git a/Documentation/devicetree/bindings/net/anarion-gmac.txt b/Documentation/devicetree/bindings/net/anarion-gmac.txt
new file mode 100644
index 000000000000..fe678965ae69
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/anarion-gmac.txt
@@ -0,0 +1,25 @@
+* Adaptrum Anarion ethernet controller
+
+This device is a platform glue layer for stmmac.
+Please see stmmac.txt for the other unchanged properties.
+
+Required properties:
+ - compatible: Should be "adaptrum,anarion-gmac", "snps,dwmac"
+ - phy-mode: Should be "rgmii". Other modes are not currently supported.
+
+
+Examples:
+
+ gmac1: ethernet@f2014000 {
+ compatible = "adaptrum,anarion-gmac", "snps,dwmac";
+ reg = <0xf2014000 0x4000>, <0xf2018100 8>;
+
+ interrupt-parent = <&core_intc>;
+ interrupts = <21>;
+ interrupt-names = "macirq";
+
+ clocks = <&core_clk>;
+ clock-names = "stmmaceth";
+
+ phy-mode = "rgmii";
+ };
diff --git a/Documentation/devicetree/bindings/net/brcm,amac.txt b/Documentation/devicetree/bindings/net/brcm,amac.txt
index 2fefa1a44afd..ad16c1f481f7 100644
--- a/Documentation/devicetree/bindings/net/brcm,amac.txt
+++ b/Documentation/devicetree/bindings/net/brcm,amac.txt
@@ -11,6 +11,7 @@ Required properties:
- reg-names: Names of the registers.
"amac_base": Address and length of the GMAC registers
"idm_base": Address and length of the GMAC IDM registers
+ (required for NSP and Northstar2)
"nicpm_base": Address and length of the NIC Port Manager
registers (required for Northstar2)
- interrupts: Interrupt number
diff --git a/Documentation/devicetree/bindings/net/brcm,bgmac-nsp.txt b/Documentation/devicetree/bindings/net/brcm,bgmac-nsp.txt
deleted file mode 100644
index 022946caa7e2..000000000000
--- a/Documentation/devicetree/bindings/net/brcm,bgmac-nsp.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Broadcom GMAC Ethernet Controller Device Tree Bindings
--------------------------------------------------------------
-
-Required properties:
- - compatible: "brcm,bgmac-nsp"
- - reg: Address and length of the GMAC registers,
- Address and length of the GMAC IDM registers
- - reg-names: Names of the registers. Must have both "gmac_base" and
- "idm_base"
- - interrupts: Interrupt number
-
-Optional properties:
-- mac-address: See ethernet.txt file in the same directory
-
-Examples:
-
-gmac0: ethernet@18022000 {
- compatible = "brcm,bgmac-nsp";
- reg = <0x18022000 0x1000>,
- <0x18110000 0x1000>;
- reg-names = "gmac_base", "idm_base";
- interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
-};
diff --git a/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt b/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt
new file mode 100644
index 000000000000..4194ff7e6ee6
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt
@@ -0,0 +1,35 @@
+Broadcom Bluetooth Chips
+---------------------
+
+This documents the binding structure and common properties for serial
+attached Broadcom devices.
+
+Serial attached Broadcom devices shall be a child node of the host UART
+device the slave device is attached to.
+
+Required properties:
+
+ - compatible: should contain one of the following:
+ * "brcm,bcm43438-bt"
+
+Optional properties:
+
+ - max-speed: see Documentation/devicetree/bindings/serial/slave-device.txt
+ - shutdown-gpios: GPIO specifier, used to enable the BT module
+ - device-wakeup-gpios: GPIO specifier, used to wakeup the controller
+ - host-wakeup-gpios: GPIO specifier, used to wakeup the host processor
+ - clocks: clock specifier if external clock provided to the controller
+ - clock-names: should be "extclk"
+
+
+Example:
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ max-speed = <921600>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/dwmac-sun8i.txt b/Documentation/devicetree/bindings/net/dwmac-sun8i.txt
deleted file mode 100644
index 725f3b187886..000000000000
--- a/Documentation/devicetree/bindings/net/dwmac-sun8i.txt
+++ /dev/null
@@ -1,84 +0,0 @@
-* Allwinner sun8i GMAC ethernet controller
-
-This device is a platform glue layer for stmmac.
-Please see stmmac.txt for the other unchanged properties.
-
-Required properties:
-- compatible: should be one of the following string:
- "allwinner,sun8i-a83t-emac"
- "allwinner,sun8i-h3-emac"
- "allwinner,sun8i-v3s-emac"
- "allwinner,sun50i-a64-emac"
-- reg: address and length of the register for the device.
-- interrupts: interrupt for the device
-- interrupt-names: should be "macirq"
-- clocks: A phandle to the reference clock for this device
-- clock-names: should be "stmmaceth"
-- resets: A phandle to the reset control for this device
-- reset-names: should be "stmmaceth"
-- phy-mode: See ethernet.txt
-- phy-handle: See ethernet.txt
-- #address-cells: shall be 1
-- #size-cells: shall be 0
-- syscon: A phandle to the syscon of the SoC with one of the following
- compatible string:
- - allwinner,sun8i-h3-system-controller
- - allwinner,sun8i-v3s-system-controller
- - allwinner,sun50i-a64-system-controller
- - allwinner,sun8i-a83t-system-controller
-
-Optional properties:
-- allwinner,tx-delay-ps: TX clock delay chain value in ps. Range value is 0-700. Default is 0)
-- allwinner,rx-delay-ps: RX clock delay chain value in ps. Range value is 0-3100. Default is 0)
-Both delay properties need to be a multiple of 100. They control the delay for
-external PHY.
-
-Optional properties for the following compatibles:
- - "allwinner,sun8i-h3-emac",
- - "allwinner,sun8i-v3s-emac":
-- allwinner,leds-active-low: EPHY LEDs are active low
-
-Required child node of emac:
-- mdio bus node: should be named mdio
-
-Required properties of the mdio node:
-- #address-cells: shall be 1
-- #size-cells: shall be 0
-
-The device node referenced by "phy" or "phy-handle" should be a child node
-of the mdio node. See phy.txt for the generic PHY bindings.
-
-Required properties of the phy node with the following compatibles:
- - "allwinner,sun8i-h3-emac",
- - "allwinner,sun8i-v3s-emac":
-- clocks: a phandle to the reference clock for the EPHY
-- resets: a phandle to the reset control for the EPHY
-
-Example:
-
-emac: ethernet@1c0b000 {
- compatible = "allwinner,sun8i-h3-emac";
- syscon = <&syscon>;
- reg = <0x01c0b000 0x104>;
- interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "macirq";
- resets = <&ccu RST_BUS_EMAC>;
- reset-names = "stmmaceth";
- clocks = <&ccu CLK_BUS_EMAC>;
- clock-names = "stmmaceth";
- #address-cells = <1>;
- #size-cells = <0>;
-
- phy-handle = <&int_mii_phy>;
- phy-mode = "mii";
- allwinner,leds-active-low;
- mdio: mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- int_mii_phy: ethernet-phy@1 {
- reg = <1>;
- clocks = <&ccu CLK_BUS_EPHY>;
- resets = <&ccu RST_BUS_EPHY>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt
index 6b4956beff8c..c78f3187dfea 100644
--- a/Documentation/devicetree/bindings/net/marvell-pp2.txt
+++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt
@@ -41,6 +41,11 @@ Optional properties (port):
- marvell,loopback: port is loopback mode
- phy: a phandle to a phy node defining the PHY address (as the reg
property, a single integer).
+- interrupt-names: if more than a single interrupt for rx is given, must
+ be the name associated to the interrupts listed. Valid
+ names are: "tx-cpu0", "tx-cpu1", "tx-cpu2", "tx-cpu3",
+ "rx-shared", "link".
+- marvell,system-controller: a phandle to the system controller.
Example for marvell,armada-375-pp2:
@@ -80,19 +85,37 @@ cpm_ethernet: ethernet@0 {
clock-names = "pp_clk", "gop_clk", "gp_clk";
eth0: eth0 {
- interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 43 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 47 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 51 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 55 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2",
+ "tx-cpu3", "rx-shared";
port-id = <0>;
gop-port-id = <0>;
};
eth1: eth1 {
- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <ICU_GRP_NSR 40 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 44 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 48 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 52 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 56 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2",
+ "tx-cpu3", "rx-shared";
port-id = <1>;
gop-port-id = <2>;
};
eth2: eth2 {
- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <ICU_GRP_NSR 41 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 45 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 49 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 53 IRQ_TYPE_LEVEL_HIGH>,
+ <ICU_GRP_NSR 57 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2",
+ "tx-cpu3", "rx-shared";
port-id = <2>;
gop-port-id = <3>;
};
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index c7194e87d5f4..1d1168b805cc 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -7,24 +7,30 @@ have dual GMAC each represented by a child node..
* Ethernet controller node
Required properties:
-- compatible: Should be "mediatek,mt2701-eth"
+- compatible: Should be
+ "mediatek,mt2701-eth": for MT2701 SoC
+ "mediatek,mt7623-eth", "mediatek,mt2701-eth": for MT7623 SoC
+ "mediatek,mt7622-eth": for MT7622 SoC
- reg: Address and length of the register set for the device
- interrupts: Should contain the three frame engines interrupts in numeric
order. These are fe_int0, fe_int1 and fe_int2.
- clocks: the clock used by the core
- clock-names: the names of the clock listed in the clocks property. These are
- "ethif", "esw", "gp2", "gp1"
+ "ethif", "esw", "gp2", "gp1" : For MT2701 and MT7623 SoC
+ "ethif", "esw", "gp0", "gp1", "gp2", "sgmii_tx250m", "sgmii_rx250m",
+ "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll" : For MT7622 SoC
- power-domains: phandle to the power domain that the ethernet is part of
- resets: Should contain a phandle to the ethsys reset signal
- reset-names: Should contain the reset signal name "eth"
- mediatek,ethsys: phandle to the syscon node that handles the port setup
+- mediatek,sgmiisys: phandle to the syscon node that handles the SGMII setup
+ which is required for those SoCs equipped with SGMII such as MT7622 SoC.
- mediatek,pctl: phandle to the syscon node that handles the ports slew rate
and driver current
Optional properties:
- interrupt-parent: Should be the phandle for the interrupt controller
that services interrupts for this device
-
* Ethernet MAC node
Required properties:
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index b55857696fc3..d3c24d5ffa9a 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -52,6 +52,11 @@ Optional Properties:
Mark the corresponding energy efficient ethernet mode as broken and
request the ethernet to stop advertising it.
+- phy-is-integrated: If set, indicates that the PHY is integrated into the same
+ physical package as the Ethernet MAC. If needed, muxers should be configured
+ to ensure the integrated PHY is used. The absence of this property indicates
+ the muxers should be configured so that the external PHY is used.
+
Example:
ethernet-phy@0 {
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index b519503be51a..16723535e1aa 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -4,19 +4,25 @@ This file provides information on what the device node for the Ethernet AVB
interface contains.
Required properties:
-- compatible: "renesas,etheravb-r8a7790" if the device is a part of R8A7790 SoC.
- "renesas,etheravb-r8a7791" if the device is a part of R8A7791 SoC.
- "renesas,etheravb-r8a7792" if the device is a part of R8A7792 SoC.
- "renesas,etheravb-r8a7793" if the device is a part of R8A7793 SoC.
- "renesas,etheravb-r8a7794" if the device is a part of R8A7794 SoC.
- "renesas,etheravb-r8a7795" if the device is a part of R8A7795 SoC.
- "renesas,etheravb-r8a7796" if the device is a part of R8A7796 SoC.
- "renesas,etheravb-rcar-gen2" for generic R-Car Gen 2 compatible interface.
- "renesas,etheravb-rcar-gen3" for generic R-Car Gen 3 compatible interface.
+- compatible: Must contain one or more of the following:
+ - "renesas,etheravb-r8a7743" for the R8A7743 SoC.
+ - "renesas,etheravb-r8a7745" for the R8A7745 SoC.
+ - "renesas,etheravb-r8a7790" for the R8A7790 SoC.
+ - "renesas,etheravb-r8a7791" for the R8A7791 SoC.
+ - "renesas,etheravb-r8a7792" for the R8A7792 SoC.
+ - "renesas,etheravb-r8a7793" for the R8A7793 SoC.
+ - "renesas,etheravb-r8a7794" for the R8A7794 SoC.
+ - "renesas,etheravb-rcar-gen2" as a fallback for the above
+ R-Car Gen2 and RZ/G1 devices.
- When compatible with the generic version, nodes must list the
- SoC-specific version corresponding to the platform first
- followed by the generic version.
+ - "renesas,etheravb-r8a7795" for the R8A7795 SoC.
+ - "renesas,etheravb-r8a7796" for the R8A7796 SoC.
+ - "renesas,etheravb-rcar-gen3" as a fallback for the above
+ R-Car Gen3 devices.
+
+ When compatible with the generic version, nodes must list the
+ SoC-specific version corresponding to the platform first followed by
+ the generic version.
- reg: offset and length of (1) the register block and (2) the stream buffer.
- interrupts: A list of interrupt-specifiers, one for each entry in
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
index 8f427550720a..c1325387632c 100644
--- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
@@ -10,6 +10,7 @@ Required properties:
"rockchip,rk3366-gmac": found on RK3366 SoCs
"rockchip,rk3368-gmac": found on RK3368 SoCs
"rockchip,rk3399-gmac": found on RK3399 SoCs
+ "rockchip,rv1108-gmac": found on RV1108 SoCs
- reg: addresses and length of the register sets for the device.
- interrupts: Should contain the GMAC interrupts.
- interrupt-names: Should contain the interrupt names "macirq".
diff --git a/Documentation/devicetree/bindings/net/xilinx_axienet.txt b/Documentation/devicetree/bindings/net/xilinx_axienet.txt
new file mode 100644
index 000000000000..38f9ec076743
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xilinx_axienet.txt
@@ -0,0 +1,55 @@
+XILINX AXI ETHERNET Device Tree Bindings
+--------------------------------------------------------
+
+Also called AXI 1G/2.5G Ethernet Subsystem, the xilinx axi ethernet IP core
+provides connectivity to an external ethernet PHY supporting different
+interfaces: MII, GMII, RGMII, SGMII, 1000BaseX. It also includes two
+segments of memory for buffering TX and RX, as well as the capability of
+offloading TX/RX checksum calculation off the processor.
+
+Management configuration is done through the AXI interface, while payload is
+sent and received through means of an AXI DMA controller. This driver
+includes the DMA driver code, so this driver is incompatible with AXI DMA
+driver.
+
+For more details about mdio please refer phy.txt file in the same directory.
+
+Required properties:
+- compatible : Must be one of "xlnx,axi-ethernet-1.00.a",
+ "xlnx,axi-ethernet-1.01.a", "xlnx,axi-ethernet-2.01.a"
+- reg : Address and length of the IO space.
+- interrupts : Should be a list of two interrupt, TX and RX.
+- phy-handle : Should point to the external phy device.
+ See ethernet.txt file in the same directory.
+- xlnx,rxmem : Set to allocated memory buffer for Rx/Tx in the hardware
+
+Optional properties:
+- phy-mode : See ethernet.txt
+- xlnx,phy-type : Deprecated, do not use, but still accepted in preference
+ to phy-mode.
+- xlnx,txcsum : 0 or empty for disabling TX checksum offload,
+ 1 to enable partial TX checksum offload,
+ 2 to enable full TX checksum offload
+- xlnx,rxcsum : Same values as xlnx,txcsum but for RX checksum offload
+
+Example:
+ axi_ethernet_eth: ethernet@40c00000 {
+ compatible = "xlnx,axi-ethernet-1.00.a";
+ device_type = "network";
+ interrupt-parent = <&microblaze_0_axi_intc>;
+ interrupts = <2 0>;
+ phy-mode = "mii";
+ reg = <0x40c00000 0x40000>;
+ xlnx,rxcsum = <0x2>;
+ xlnx,rxmem = <0x800>;
+ xlnx,txcsum = <0x2>;
+ phy-handle = <&phy0>;
+ axi_ethernetlite_0_mdio: mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy0: phy@0 {
+ device_type = "ethernet-phy";
+ reg = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
index 194926f77194..1ff02afdc55a 100644
--- a/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
+++ b/Documentation/devicetree/bindings/nvmem/rockchip-efuse.txt
@@ -4,7 +4,7 @@ Required properties:
- compatible: Should be one of the following.
- "rockchip,rk3066a-efuse" - for RK3066a SoCs.
- "rockchip,rk3188-efuse" - for RK3188 SoCs.
- - "rockchip,rk322x-efuse" - for RK322x SoCs.
+ - "rockchip,rk3228-efuse" - for RK3228 SoCs.
- "rockchip,rk3288-efuse" - for RK3288 SoCs.
- "rockchip,rk3399-efuse" - for RK3399 SoCs.
- reg: Should contain the registers location and exact eFuse size
diff --git a/Documentation/devicetree/bindings/phy/phy-mt65xx-usb.txt b/Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt
index 0acc5a99fb79..faf18084a33a 100644
--- a/Documentation/devicetree/bindings/phy/phy-mt65xx-usb.txt
+++ b/Documentation/devicetree/bindings/phy/phy-mtk-tphy.txt
@@ -1,13 +1,18 @@
-mt65xx USB3.0 PHY binding
+MediaTek T-PHY binding
--------------------------
-This binding describes a usb3.0 phy for mt65xx platforms of Medaitek SoC.
+T-phy controller supports physical layer functionality for a number of
+controllers on MediaTek SoCs, such as, USB2.0, USB3.0, PCIe, and SATA.
Required properties (controller (parent) node):
- compatible : should be one of
- "mediatek,mt2701-u3phy"
- "mediatek,mt2712-u3phy"
- "mediatek,mt8173-u3phy"
+ "mediatek,generic-tphy-v1"
+ "mediatek,generic-tphy-v2"
+ "mediatek,mt2701-u3phy" (deprecated)
+ "mediatek,mt2712-u3phy" (deprecated)
+ "mediatek,mt8173-u3phy";
+ make use of "mediatek,generic-tphy-v1" on mt2701 instead and
+ "mediatek,generic-tphy-v2" on mt2712 instead.
- clocks : (deprecated, use port's clocks instead) a list of phandle +
clock-specifier pairs, one for each entry in clock-names
- clock-names : (deprecated, use port's one instead) must contain
@@ -35,6 +40,8 @@ Required properties (port (child) node):
cell after port phandle is phy type from:
- PHY_TYPE_USB2
- PHY_TYPE_USB3
+ - PHY_TYPE_PCIE
+ - PHY_TYPE_SATA
Example:
diff --git a/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt b/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
new file mode 100644
index 000000000000..bfcf80341657
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt
@@ -0,0 +1,43 @@
+mvebu comphy driver
+-------------------
+
+A comphy controller can be found on Marvell Armada 7k/8k on the CP110. It
+provides a number of shared PHYs used by various interfaces (network, sata,
+usb, PCIe...).
+
+Required properties:
+
+- compatible: should be "marvell,comphy-cp110"
+- reg: should contain the comphy register location and length.
+- marvell,system-controller: should contain a phandle to the
+ system controller node.
+- #address-cells: should be 1.
+- #size-cells: should be 0.
+
+A sub-node is required for each comphy lane provided by the comphy.
+
+Required properties (child nodes):
+
+- reg: comphy lane number.
+- #phy-cells : from the generic phy bindings, must be 1. Defines the
+ input port to use for a given comphy lane.
+
+Example:
+
+ cpm_comphy: phy@120000 {
+ compatible = "marvell,comphy-cp110";
+ reg = <0x120000 0x6000>;
+ marvell,system-controller = <&cpm_syscon0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpm_comphy0: phy@0 {
+ reg = <0>;
+ #phy-cells = <1>;
+ };
+
+ cpm_comphy1: phy@1 {
+ reg = <1>;
+ #phy-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
index 84d59b0db8df..a67ef2a3874f 100644
--- a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt
@@ -6,6 +6,7 @@ Required properties (phy (parent) node):
* "rockchip,rk3328-usb2phy"
* "rockchip,rk3366-usb2phy"
* "rockchip,rk3399-usb2phy"
+ * "rockchip,rv1108-usb2phy"
- reg : the address offset of grf for usb-phy configuration.
- #clock-cells : should be 0.
- clock-output-names : specify the 480m output clock name.
@@ -18,6 +19,10 @@ Optional properties:
usb-phy output 480m and xin24m.
Refer to clk/clock-bindings.txt for generic clock
consumer properties.
+ - rockchip,usbgrf : phandle to the syscon managing the "usb general
+ register files". When set driver will request its
+ phandle as one companion-grf for some special SoCs
+ (e.g RV1108).
Required nodes : a sub-node is required for each port the phy provides.
The sub-node name is used to identify host or otg port,
@@ -28,10 +33,14 @@ Required nodes : a sub-node is required for each port the phy provides.
Required properties (port (child) node):
- #phy-cells : must be 0. See ./phy-bindings.txt for details.
- interrupts : specify an interrupt for each entry in interrupt-names.
- - interrupt-names : a list which shall be the following entries:
+ - interrupt-names : a list which should be one of the following cases:
+ Regular case:
* "otg-id" : for the otg id interrupt.
* "otg-bvalid" : for the otg vbus interrupt.
* "linestate" : for the host/otg linestate interrupt.
+ Some SoCs use one interrupt with the above muxed together, so for these
+ * "otg-mux" : otg-port interrupt, which mux otg-id/otg-bvalid/linestate
+ to one.
Optional properties:
- phy-supply : phandle to a regulator that provides power to VBUS.
diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
index e11c563a65ec..b6a9f2b92bab 100644
--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
+++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
@@ -6,6 +6,7 @@ controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB.
Required properties:
- compatible: compatible list, contains:
+ "qcom,ipq8074-qmp-pcie-phy" for PCIe phy on IPQ8074
"qcom,msm8996-qmp-pcie-phy" for 14nm PCIe phy on msm8996,
"qcom,msm8996-qmp-usb3-phy" for 14nm USB3 phy on msm8996.
@@ -38,6 +39,8 @@ Required properties:
"phy", "common", "cfg".
For "qcom,msm8996-qmp-usb3-phy" must contain
"phy", "common".
+ For "qcom,ipq8074-qmp-pcie-phy" must contain:
+ "phy", "common".
- vdda-phy-supply: Phandle to a regulator supply to PHY core block.
- vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
@@ -60,6 +63,13 @@ Required properties for child node:
one for each entry in clock-names.
- clock-names: Must contain following for pcie and usb qmp phys:
"pipe<lane-number>" for pipe clock specific to each lane.
+ - clock-output-names: Name of the PHY clock that will be the parent for
+ the above pipe clock.
+
+ For "qcom,ipq8074-qmp-pcie-phy":
+ - "pcie20_phy0_pipe_clk" Pipe Clock parent
+ (or)
+ "pcie20_phy1_pipe_clk"
- resets: a list of phandles and reset controller specifier pairs,
one for each entry in reset-names.
@@ -96,6 +106,7 @@ Example:
clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
clock-names = "pipe0";
+ clock-output-names = "pcie_0_pipe_clk_src";
resets = <&gcc GCC_PCIE_0_PHY_BCR>;
reset-names = "lane0";
};
diff --git a/Documentation/devicetree/bindings/phy/ralink-usb-phy.txt b/Documentation/devicetree/bindings/phy/ralink-usb-phy.txt
new file mode 100644
index 000000000000..9d2868a437ab
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/ralink-usb-phy.txt
@@ -0,0 +1,23 @@
+Mediatek/Ralink USB PHY
+
+Required properties:
+ - compatible: "ralink,rt3352-usbphy"
+ "mediatek,mt7620-usbphy"
+ "mediatek,mt7628-usbphy"
+ - reg: required for "mediatek,mt7628-usbphy", unused otherwise
+ - #phy-cells: should be 0
+ - ralink,sysctl: a phandle to a ralink syscon register region
+ - resets: the two reset controllers for host and device
+ - reset-names: the names of the 2 reset controllers
+
+Example:
+
+usbphy: phy {
+ compatible = "mediatek,mt7628-usbphy";
+ reg = <0x10120000 0x1000>;
+ #phy-cells = <0>;
+
+ ralink,sysctl = <&sysc>;
+ resets = <&rstctrl 22 &rstctrl 25>;
+ reset-names = "host", "device";
+};
diff --git a/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt b/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
index 005bc22938ff..cbc7847dbf6c 100644
--- a/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
+++ b/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
@@ -9,6 +9,7 @@ Required properties:
* allwinner,sun7i-a20-usb-phy
* allwinner,sun8i-a23-usb-phy
* allwinner,sun8i-a33-usb-phy
+ * allwinner,sun8i-a83t-usb-phy
* allwinner,sun8i-h3-usb-phy
* allwinner,sun8i-v3s-usb-phy
* allwinner,sun50i-a64-usb-phy
@@ -17,18 +18,22 @@ Required properties:
* "phy_ctrl"
* "pmu0" for H3, V3s and A64
* "pmu1"
- * "pmu2" for sun4i, sun6i or sun7i
+ * "pmu2" for sun4i, sun6i, sun7i, sun8i-a83t or sun8i-h3
+ * "pmu3" for sun8i-h3
- #phy-cells : from the generic phy bindings, must be 1
- clocks : phandle + clock specifier for the phy clocks
- clock-names :
* "usb_phy" for sun4i, sun5i or sun7i
* "usb0_phy", "usb1_phy" and "usb2_phy" for sun6i
* "usb0_phy", "usb1_phy" for sun8i
+ * "usb0_phy", "usb1_phy", "usb2_phy" and "usb2_hsic_12M" for sun8i-a83t
+ * "usb0_phy", "usb1_phy", "usb2_phy" and "usb3_phy" for sun8i-h3
- resets : a list of phandle + reset specifier pairs
- reset-names :
* "usb0_reset"
* "usb1_reset"
- * "usb2_reset" for sun4i, sun6i or sun7i
+ * "usb2_reset" for sun4i, sun6i, sun7i, sun8i-a83t or sun8i-h3
+ * "usb3_reset" for sun8i-h3
Optional properties:
- usb0_id_det-gpios : gpio phandle for reading the otg id pin value
@@ -37,6 +42,7 @@ Optional properties:
- usb0_vbus-supply : regulator phandle for controller usb0 vbus
- usb1_vbus-supply : regulator phandle for controller usb1 vbus
- usb2_vbus-supply : regulator phandle for controller usb2 vbus
+- usb3_vbus-supply : regulator phandle for controller usb3 vbus
Example:
usbphy: phy@0x01c13400 {
diff --git a/Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
new file mode 100644
index 000000000000..61466c58faae
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
@@ -0,0 +1,59 @@
+Cortina Systems Gemini pin controller
+
+This pin controller is found in the Cortina Systems Gemini SoC family,
+see further arm/gemini.txt. It is a purely group-based multiplexing pin
+controller.
+
+The pin controller node must be a subnode of the system controller node.
+
+Required properties:
+- compatible: "cortina,gemini-pinctrl"
+
+Subnodes of the pin controller contain pin control multiplexing set-up.
+Please refer to pinctrl-bindings.txt for generic pin multiplexing nodes.
+
+Example:
+
+
+syscon {
+ compatible = "cortina,gemini-syscon";
+ ...
+ pinctrl {
+ compatible = "cortina,gemini-pinctrl";
+ pinctrl-names = "default";
+ pinctrl-0 = <&dram_default_pins>, <&system_default_pins>,
+ <&vcontrol_default_pins>;
+
+ dram_default_pins: pinctrl-dram {
+ mux {
+ function = "dram";
+ groups = "dramgrp";
+ };
+ };
+ rtc_default_pins: pinctrl-rtc {
+ mux {
+ function = "rtc";
+ groups = "rtcgrp";
+ };
+ };
+ power_default_pins: pinctrl-power {
+ mux {
+ function = "power";
+ groups = "powergrp";
+ };
+ };
+ system_default_pins: pinctrl-system {
+ mux {
+ function = "system";
+ groups = "systemgrp";
+ };
+ };
+ (...)
+ uart_default_pins: pinctrl-uart {
+ mux {
+ function = "uart";
+ groups = "uartrxtxgrp";
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx7ulp-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx7ulp-pinctrl.txt
new file mode 100644
index 000000000000..44ad670ae11e
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx7ulp-pinctrl.txt
@@ -0,0 +1,61 @@
+* Freescale i.MX7ULP IOMUX Controller
+
+i.MX 7ULP has three IOMUXC instances: IOMUXC0 for M4 ports, IOMUXC1 for A7
+ports and IOMUXC DDR for DDR interface.
+
+Note:
+This binding doc is only for the IOMUXC1 support in A7 Domain and it only
+supports generic pin config.
+
+Please also refer pinctrl-bindings.txt in this directory for generic pinctrl
+binding.
+
+=== Pin Controller Node ===
+
+Required properties:
+- compatible: "fsl,imx7ulp-iomuxc1"
+- reg: Should contain the base physical address and size of the iomuxc
+ registers.
+
+=== Pin Configuration Node ===
+- pinmux: One integers array, represents a group of pins mux setting.
+ The format is pinmux = <PIN_FUNC_ID>, PIN_FUNC_ID is a pin working on
+ a specific function.
+
+ NOTE: i.MX7ULP PIN_FUNC_ID consists of 4 integers as it shares one mux
+ and config register as follows:
+ <mux_conf_reg input_reg mux_mode input_val>
+
+ Refer to imx7ulp-pinfunc.h in in device tree source folder for all
+ available imx7ulp PIN_FUNC_ID.
+
+Optional Properties:
+- drive-strength Integer. Controls Drive Strength
+ 0: Standard
+ 1: Hi Driver
+- drive-push-pull Bool. Enable Pin Push-pull
+- drive-open-drain Bool. Enable Pin Open-drian
+- slew-rate: Integer. Controls Slew Rate
+ 0: Standard
+ 1: Slow
+- bias-disable: Bool. Pull disabled
+- bias-pull-down: Bool. Pull down on pin
+- bias-pull-up: Bool. Pull up on pin
+
+Examples:
+#include "imx7ulp-pinfunc.h"
+
+/* Pin Controller Node */
+iomuxc1: iomuxc@40ac0000 {
+ compatible = "fsl,imx7ulp-iomuxc1";
+ reg = <0x40ac0000 0x1000>;
+
+ /* Pin Configuration Node */
+ pinctrl_lpuart4: lpuart4grp {
+ pinmux = <
+ IMX7ULP_PAD_PTC3__LPUART4_RX
+ IMX7ULP_PAD_PTC2__LPUART4_TX
+ >;
+ bias-pull-up;
+ };
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
index ca01710ee29a..3b7266c7c438 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
@@ -69,8 +69,9 @@ PWM1 PWM2 PWM3 PWM4 PWM5 PWM6 PWM7 RGMII1 RGMII2 RMII1 RMII2 ROM16 ROM8 ROMCS1
ROMCS2 ROMCS3 ROMCS4 RXD1 RXD2 RXD3 RXD4 SALT1 SALT2 SALT3 SALT4 SD1 SD2 SGPMCK
SGPMI SGPMLD SGPMO SGPSCK SGPSI0 SGPSI1 SGPSLD SIOONCTRL SIOPBI SIOPBO SIOPWREQ
SIOPWRGD SIOS3 SIOS5 SIOSCI SPI1 SPI1DEBUG SPI1PASSTHRU SPICS1 TIMER3 TIMER4
-TIMER5 TIMER6 TIMER7 TIMER8 TXD1 TXD2 TXD3 TXD4 UART6 USBCKI VGABIOS_ROM VGAHS
-VGAVS VPI18 VPI24 VPI30 VPO12 VPO24 WDTRST1 WDTRST2
+TIMER5 TIMER6 TIMER7 TIMER8 TXD1 TXD2 TXD3 TXD4 UART6 USB11D1 USB11H2 USB2D1
+USB2H1 USBCKI VGABIOS_ROM VGAHS VGAVS VPI18 VPI24 VPI30 VPO12 VPO24 WDTRST1
+WDTRST2
aspeed,ast2500-pinctrl, aspeed,g5-pinctrl:
@@ -86,7 +87,8 @@ SALT11 SALT12 SALT13 SALT14 SALT2 SALT3 SALT4 SALT5 SALT6 SALT7 SALT8 SALT9
SCL1 SCL2 SD1 SD2 SDA1 SDA2 SGPS1 SGPS2 SIOONCTRL SIOPBI SIOPBO SIOPWREQ
SIOPWRGD SIOS3 SIOS5 SIOSCI SPI1 SPI1CS1 SPI1DEBUG SPI1PASSTHRU SPI2CK SPI2CS0
SPI2CS1 SPI2MISO SPI2MOSI TIMER3 TIMER4 TIMER5 TIMER6 TIMER7 TIMER8 TXD1 TXD2
-TXD3 TXD4 UART6 USBCKI VGABIOSROM VGAHS VGAVS VPI24 VPO WDTRST1 WDTRST2
+TXD3 TXD4 UART6 USB11BHID USB2AD USB2AH USB2BD USB2BH USBCKI VGABIOSROM VGAHS
+VGAVS VPI24 VPO WDTRST1 WDTRST2
Examples
========
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
index 62d0f33fa65e..4483cc31e531 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -268,6 +268,8 @@ output-enable - enable output on a pin without actively driving it
(such as enabling an output buffer)
output-low - set the pin to output mode with low level
output-high - set the pin to output mode with high level
+sleep-hardware-state - indicate this is sleep related state which will be programmed
+ into the registers for the sleep state.
slew-rate - set the slew rate
For example:
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
index 17631d0a9af7..37d744750579 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
@@ -5,6 +5,7 @@ The Mediatek's Pin controller is used to control SoC pins.
Required properties:
- compatible: value should be one of the following.
"mediatek,mt2701-pinctrl", compatible with mt2701 pinctrl.
+ "mediatek,mt2712-pinctrl", compatible with mt2712 pinctrl.
"mediatek,mt6397-pinctrl", compatible with mt6397 pinctrl.
"mediatek,mt7623-pinctrl", compatible with mt7623 pinctrl.
"mediatek,mt8127-pinctrl", compatible with mt8127 pinctrl.
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt
new file mode 100644
index 000000000000..eee3dc260934
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-rk805.txt
@@ -0,0 +1,63 @@
+Pincontrol driver for RK805 Power management IC.
+
+RK805 has 2 pins which can be configured as GPIO output only.
+
+Please refer file <devicetree/bindings/pinctrl/pinctrl-bindings.txt>
+for details of the common pinctrl bindings used by client devices,
+including the meaning of the phrase "pin configuration node".
+
+Optional Pinmux properties:
+--------------------------
+Following properties are required if default setting of pins are required
+at boot.
+- pinctrl-names: A pinctrl state named per <pinctrl-binding.txt>.
+- pinctrl[0...n]: Properties to contain the phandle for pinctrl states per
+ <pinctrl-binding.txt>.
+
+The pin configurations are defined as child of the pinctrl states node. Each
+sub-node have following properties:
+
+Required properties:
+------------------
+- #gpio-cells: Should be two. The first cell is the pin number and the
+ second is the GPIO flags.
+
+- gpio-controller: Marks the device node as a GPIO controller.
+
+- pins: List of pins. Valid values of pins properties are: gpio0, gpio1.
+
+First 2 properties must be added in the RK805 PMIC node, documented in
+Documentation/devicetree/bindings/mfd/rk808.txt
+
+Optional properties:
+-------------------
+Following are optional properties defined as pinmux DT binding document
+<pinctrl-bindings.txt>. Absence of properties will leave the configuration
+on default.
+ function,
+ output-low,
+ output-high.
+
+Valid values for function properties are: gpio.
+
+Theres is also not customised properties for any GPIO.
+
+Example:
+--------
+rk805: rk805@18 {
+ compatible = "rockchip,rk805";
+ ...
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l>, <&rk805_default>;
+
+ rk805_default: pinmux {
+ gpio01 {
+ pins = "gpio0", "gpio1";
+ function = "gpio";
+ output-high;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
index a7bde64798c7..a752a4716486 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
@@ -46,7 +46,8 @@ Valid values for pins are:
gpio0-gpio89
Valid values for function are:
- cam_mclk, codec_mic_i2s, codec_spkr_i2s, gpio, gsbi1, gsbi2, gsbi3, gsbi4,
+ cam_mclk, codec_mic_i2s, codec_spkr_i2s, gp_clk_0a, gp_clk_0b, gp_clk_1a,
+ gp_clk_1b, gp_clk_2a, gp_clk_2b, gpio, gsbi1, gsbi2, gsbi3, gsbi4,
gsbi4_cam_i2c, gsbi5, gsbi5_spi_cs1, gsbi5_spi_cs2, gsbi5_spi_cs3, gsbi6,
gsbi6_spi_cs1, gsbi6_spi_cs2, gsbi6_spi_cs3, gsbi7, gsbi7_spi_cs1,
gsbi7_spi_cs2, gsbi7_spi_cs3, gsbi_cam_i2c, hdmi, mi2s, riva_bt, riva_fm,
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
index cfb8500dd56b..93374f478b9e 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
@@ -50,7 +50,11 @@ Valid values for qcom,pins are:
Supports mux, bias and drive-strength
Valid values for qcom,function are:
-gpio, blsp_uart1, blsp_i2c0, blsp_i2c1, blsp_uart0, blsp_spi1, blsp_spi0
+aud_pin, audio_pwm, blsp_i2c0, blsp_i2c1, blsp_spi0, blsp_spi1, blsp_uart0,
+blsp_uart1, chip_rst, gpio, i2s_rx, i2s_spdif_in, i2s_spdif_out, i2s_td, i2s_tx,
+jtag, led0, led1, led2, led3, led4, led5, led6, led7, led8, led9, led10, led11,
+mdc, mdio, pcie, pmu, prng_rosc, qpic, rgmii, rmii, sdio, smart0, smart1,
+smart2, smart3, tm, wifi0, wifi1
Example:
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
index 8d893a874634..5b12c57e7f02 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
@@ -16,6 +16,7 @@ PMIC's from Qualcomm.
"qcom,pm8941-gpio"
"qcom,pm8994-gpio"
"qcom,pma8084-gpio"
+ "qcom,pmi8994-gpio"
And must contain either "qcom,spmi-gpio" or "qcom,ssbi-gpio"
if the device is on an spmi bus or an ssbi bus respectively
@@ -85,6 +86,7 @@ to specify in a pin configuration subnode:
gpio1-gpio36 for pm8941
gpio1-gpio22 for pm8994
gpio1-gpio22 for pma8084
+ gpio1-gpio10 for pmi8994
- function:
Usage: required
@@ -98,7 +100,10 @@ to specify in a pin configuration subnode:
"dtest1",
"dtest2",
"dtest3",
- "dtest4"
+ "dtest4",
+ And following values are supported by LV/MV GPIO subtypes:
+ "func3",
+ "func4"
- bias-disable:
Usage: optional
@@ -183,6 +188,25 @@ to specify in a pin configuration subnode:
Value type: <none>
Definition: The specified pins are configured in open-source mode.
+- qcom,analog-pass:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in analog-pass-through mode.
+
+- qcom,atest:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects ATEST rail to route to GPIO when it's configured
+ in analog-pass-through mode.
+ Valid values are 1-4 corresponding to ATEST1 to ATEST4.
+
+- qcom,dtest-buffer:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects DTEST rail to route to GPIO when it's configured
+ as digital input.
+ Valid values are 1-4 corresponding to DTEST1 to DTEST4.
+
Example:
pm8921_gpio: gpio@150 {
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
index 645082f03259..f4d127df980d 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
@@ -24,6 +24,7 @@ Required Properties:
- "renesas,pfc-r8a7794": for R8A7794 (R-Car E2) compatible pin-controller.
- "renesas,pfc-r8a7795": for R8A7795 (R-Car H3) compatible pin-controller.
- "renesas,pfc-r8a7796": for R8A7796 (R-Car M3-W) compatible pin-controller.
+ - "renesas,pfc-r8a77995": for R8A77995 (R-Car D3) compatible pin-controller.
- "renesas,pfc-sh73a0": for SH73A0 (SH-Mobile AG5) compatible pin-controller.
- reg: Base address and length of each memory resource used by the pin
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
index ee01ab58224d..58b7921b4fed 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.txt
@@ -24,6 +24,7 @@ Required properties for iomux controller:
"rockchip,rk2928-pinctrl": for Rockchip RK2928
"rockchip,rk3066a-pinctrl": for Rockchip RK3066a
"rockchip,rk3066b-pinctrl": for Rockchip RK3066b
+ "rockchip,rk3128-pinctrl": for Rockchip RK3128
"rockchip,rk3188-pinctrl": for Rockchip RK3188
"rockchip,rk3228-pinctrl": for Rockchip RK3228
"rockchip,rk3288-pinctrl": for Rockchip RK3288
diff --git a/Documentation/devicetree/bindings/pinctrl/sprd,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/sprd,pinctrl.txt
new file mode 100644
index 000000000000..b1cea7a3a071
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/sprd,pinctrl.txt
@@ -0,0 +1,83 @@
+* Spreadtrum Pin Controller
+
+The Spreadtrum pin controller are organized in 3 blocks (types).
+
+The first block comprises some global control registers, and each
+register contains several bit fields with one bit or several bits
+to configure for some global common configuration, such as domain
+pad driving level, system control select and so on ("domain pad
+driving level": One pin can output 3.0v or 1.8v, depending on the
+related domain pad driving selection, if the related domain pad
+slect 3.0v, then the pin can output 3.0v. "system control" is used
+to choose one function (like: UART0) for which system, since we
+have several systems (AP/CP/CM4) on one SoC.).
+
+There are too much various configuration that we can not list all
+of them, so we can not make every Spreadtrum-special configuration
+as one generic configuration, and maybe it will add more strange
+global configuration in future. Then we add one "sprd,control" to
+set these various global control configuration, and we need use
+magic number for this property.
+
+Moreover we recognise every fields comprising one bit or several
+bits in one global control register as one pin, thus we should
+record every pin's bit offset, bit width and register offset to
+configure this field (pin).
+
+The second block comprises some common registers which have unified
+register definition, and each register described one pin is used
+to configure the pin sleep mode, function select and sleep related
+configuration.
+
+Now we have 4 systems for sleep mode on SC9860 SoC: AP system,
+PUBCP system, TGLDSP system and AGDSP system. And the pin sleep
+related configuration are:
+- input-enable
+- input-disable
+- output-high
+- output-low
+- bias-pull-up
+- bias-pull-down
+
+In some situation we need set the pin sleep mode and pin sleep related
+configuration, to set the pin sleep related configuration automatically
+by hardware when the system specified by sleep mode goes into deep
+sleep mode. For example, if we set the pin sleep mode as PUBCP_SLEEP
+and set the pin sleep related configuration as "input-enable", which
+means when PUBCP system goes into deep sleep mode, this pin will be set
+input enable automatically.
+
+Moreover we can not use the "sleep" state, since some systems (like:
+PUBCP system) do not run linux kernel OS (only AP system run linux
+kernel on SC9860 platform), then we can not select "sleep" state
+when the PUBCP system goes into deep sleep mode. Thus we introduce
+"sprd,sleep-mode" property to set pin sleep mode.
+
+The last block comprises some misc registers which also have unified
+register definition, and each register described one pin is used to
+configure drive strength, pull up/down and so on. Especially for pull
+up, we have two kind pull up resistor: 20K and 4.7K.
+
+Required properties for Spreadtrum pin controller:
+- compatible: "sprd,<soc>-pinctrl"
+ Please refer to each sprd,<soc>-pinctrl.txt binding doc for supported SoCs.
+- reg: The register address of pin controller device.
+- pins : An array of pin names.
+
+Optional properties:
+- function: Specified the function name.
+- drive-strength: Drive strength in mA.
+- input-schmitt-disable: Enable schmitt-trigger mode.
+- input-schmitt-enable: Disable schmitt-trigger mode.
+- bias-disable: Disable pin bias.
+- bias-pull-down: Pull down on pin.
+- bias-pull-up: Pull up on pin.
+- input-enable: Enable pin input.
+- input-disable: Enable pin output.
+- output-high: Set the pin as an output level high.
+- output-low: Set the pin as an output level low.
+- sleep-hardware-state: Indicate these configs in this state are sleep related.
+- sprd,control: Control values referring to databook for global control pins.
+- sprd,sleep-mode: Sleep mode selection.
+
+Please refer to each sprd,<soc>-pinctrl.txt binding doc for supported values.
diff --git a/Documentation/devicetree/bindings/pinctrl/sprd,sc9860-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/sprd,sc9860-pinctrl.txt
new file mode 100644
index 000000000000..5a628333d52f
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/sprd,sc9860-pinctrl.txt
@@ -0,0 +1,70 @@
+* Spreadtrum SC9860 Pin Controller
+
+Please refer to sprd,pinctrl.txt in this directory for common binding part
+and usage.
+
+Required properties:
+- compatible: Must be "sprd,sc9860-pinctrl".
+- reg: The register address of pin controller device.
+- pins : An array of strings, each string containing the name of a pin.
+
+Optional properties:
+- function: A string containing the name of the function, values must be
+ one of: "func1", "func2", "func3" and "func4".
+- drive-strength: Drive strength in mA. Supported values: 2, 4, 6, 8, 10,
+ 12, 14, 16, 20, 21, 24, 25, 27, 29, 31 and 33.
+- input-schmitt-disable: Enable schmitt-trigger mode.
+- input-schmitt-enable: Disable schmitt-trigger mode.
+- bias-disable: Disable pin bias.
+- bias-pull-down: Pull down on pin.
+- bias-pull-up: Pull up on pin. Supported values: 20000 for pull-up resistor
+ is 20K and 4700 for pull-up resistor is 4.7K.
+- input-enable: Enable pin input.
+- input-disable: Enable pin output.
+- output-high: Set the pin as an output level high.
+- output-low: Set the pin as an output level low.
+- sleep-hardware-state: Indicate these configs in this state are sleep related.
+- sprd,control: Control values referring to databook for global control pins.
+- sprd,sleep-mode: Choose the pin sleep mode, and supported values are:
+ AP_SLEEP, PUBCP_SLEEP, TGLDSP_SLEEP and AGDSP_SLEEP.
+
+Pin sleep mode definition:
+enum pin_sleep_mode {
+ AP_SLEEP = BIT(0),
+ PUBCP_SLEEP = BIT(1),
+ TGLDSP_SLEEP = BIT(2),
+ AGDSP_SLEEP = BIT(3),
+};
+
+Example:
+pin_controller: pinctrl@402a0000 {
+ compatible = "sprd,sc9860-pinctrl";
+ reg = <0x402a0000 0x10000>;
+
+ grp1: sd0 {
+ pins = "SC9860_VIO_SD2_IRTE", "SC9860_VIO_SD0_IRTE";
+ sprd,control = <0x1>;
+ };
+
+ grp2: rfctl_33 {
+ pins = "SC9860_RFCTL33";
+ function = "func2";
+ sprd,sleep-mode = <AP_SLEEP | PUBCP_SLEEP>;
+ grp2_sleep_mode: rfctl_33_sleep {
+ pins = "SC9860_RFCTL33";
+ sleep-hardware-state;
+ output-low;
+ }
+ };
+
+ grp3: rfctl_misc_20 {
+ pins = "SC9860_RFCTL20_MISC";
+ drive-strength = <10>;
+ bias-pull-up = <4700>;
+ grp3_sleep_mode: rfctl_misc_sleep {
+ pins = "SC9860_RFCTL20_MISC";
+ sleep-hardware-state;
+ bias-pull-up;
+ }
+ };
+};
diff --git a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
index 43c21fb04564..4a4766e9c254 100644
--- a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
+++ b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
@@ -39,6 +39,8 @@ Required properties:
- "rockchip,rk3368-pmu-io-voltage-domain" for rk3368 pmu-domains
- "rockchip,rk3399-io-voltage-domain" for rk3399
- "rockchip,rk3399-pmu-io-voltage-domain" for rk3399 pmu-domains
+ - "rockchip,rv1108-io-voltage-domain" for rv1108
+ - "rockchip,rv1108-pmu-io-voltage-domain" for rv1108 pmu-domains
Deprecated properties:
- rockchip,grf: phandle to the syscon managing the "general register files"
diff --git a/Documentation/devicetree/bindings/powerpc/ibm,vas.txt b/Documentation/devicetree/bindings/powerpc/ibm,vas.txt
new file mode 100644
index 000000000000..bf11d2faf7b8
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/ibm,vas.txt
@@ -0,0 +1,22 @@
+* IBM Powerpc Virtual Accelerator Switchboard (VAS)
+
+VAS is a hardware mechanism that allows kernel subsystems and user processes
+to directly submit compression and other requests to Nest accelerators (NX)
+or other coprocessors functions.
+
+Required properties:
+- compatible : should be "ibm,vas".
+- ibm,vas-id : A unique identifier for each instance of VAS in the system
+- reg : Should contain 4 pairs of 64-bit fields specifying the Hypervisor
+ window context start and length, OS/User window context start and length,
+ "Paste address" start and length, "Paste window id" start bit and number
+ of bits)
+
+Example:
+
+ vas@6019100000000 {
+ compatible = "ibm,vas", "ibm,power9-vas";
+ reg = <0x6019100000000 0x2000000 0x6019000000000 0x100000000 0x8000000000000 0x100000000 0x20 0x10>;
+ name = "vas";
+ ibm,vas-id = <0x1>;
+ };
diff --git a/Documentation/devicetree/bindings/powerpc/opal/sensor-groups.txt b/Documentation/devicetree/bindings/powerpc/opal/sensor-groups.txt
new file mode 100644
index 000000000000..6ad881cbffda
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/opal/sensor-groups.txt
@@ -0,0 +1,27 @@
+IBM OPAL Sensor Groups Binding
+-------------------------------
+
+Node: /ibm,opal/sensor-groups
+
+Description: Contains sensor groups available in the Powernv P9
+servers. Each child node indicates a sensor group.
+
+- compatible : Should be "ibm,opal-sensor-group"
+
+Each child node contains below properties:
+
+- type : String to indicate the type of sensor-group
+
+- sensor-group-id: Abstract unique identifier provided by firmware of
+ type <u32> which is used for sensor-group
+ operations like clearing the min/max history of all
+ sensors belonging to the group.
+
+- ibm,chip-id : Chip ID
+
+- sensors : Phandle array of child nodes of /ibm,opal/sensor/
+ belonging to this group
+
+- ops : Array of opal-call numbers indicating available operations on
+ sensor groups like clearing min/max, enabling/disabling sensor
+ group.
diff --git a/Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt b/Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt
index 07590bcdad15..7c04e22a5d6a 100644
--- a/Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt
+++ b/Documentation/devicetree/bindings/ptp/brcm,ptp-dte.txt
@@ -1,13 +1,20 @@
-* Broadcom Digital Timing Engine(DTE) based PTP clock driver
+* Broadcom Digital Timing Engine(DTE) based PTP clock
Required properties:
-- compatible: should be "brcm,ptp-dte"
+- compatible: should contain the core compatibility string
+ and the SoC compatibility string. The SoC
+ compatibility string is to handle SoC specific
+ hardware differences.
+ Core compatibility string:
+ "brcm,ptp-dte"
+ SoC compatibility strings:
+ "brcm,iproc-ptp-dte" - for iproc based SoC's
- reg: address and length of the DTE block's NCO registers
Example:
-ptp_dte: ptp_dte@180af650 {
- compatible = "brcm,ptp-dte";
+ptp: ptp-dte@180af650 {
+ compatible = "brcm,iproc-ptp-dte", "brcm,ptp-dte";
reg = <0x180af650 0x10>;
status = "okay";
};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt b/Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt
new file mode 100644
index 000000000000..f8338d11fd2b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-stm32-lp.txt
@@ -0,0 +1,24 @@
+STMicroelectronics STM32 Low-Power Timer PWM
+
+STM32 Low-Power Timer provides single channel PWM.
+
+Must be a sub-node of an STM32 Low-Power Timer device tree node.
+See ../mfd/stm32-lptimer.txt for details about the parent node.
+
+Required parameters:
+- compatible: Must be "st,stm32-pwm-lp".
+
+Optional properties:
+- pinctrl-names: Set to "default".
+- pinctrl-0: Phandle pointing to pin configuration node for PWM.
+
+Example:
+ timer@40002400 {
+ compatible = "st,stm32-lptimer";
+ ...
+ pwm {
+ compatible = "st,stm32-pwm-lp";
+ pinctrl-names = "default";
+ pinctrl-0 = <&lppwm1_pins>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/mt6311-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6311-regulator.txt
index 02649d8b3f5a..84d544d8c1b1 100644
--- a/Documentation/devicetree/bindings/regulator/mt6311-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/mt6311-regulator.txt
@@ -1,4 +1,4 @@
-Mediatek MT6311 Regulator Driver
+Mediatek MT6311 Regulator
Required properties:
- compatible: "mediatek,mt6311-regulator"
diff --git a/Documentation/devicetree/bindings/regulator/mt6323-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6323-regulator.txt
index c35d878b0960..a48749db4df3 100644
--- a/Documentation/devicetree/bindings/regulator/mt6323-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/mt6323-regulator.txt
@@ -1,4 +1,4 @@
-Mediatek MT6323 Regulator Driver
+Mediatek MT6323 Regulator
All voltage regulators are defined as subnodes of the regulators node. A list
of regulators provided by this controller are defined as subnodes of the
diff --git a/Documentation/devicetree/bindings/regulator/mt6380-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6380-regulator.txt
new file mode 100644
index 000000000000..0058441f16d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/mt6380-regulator.txt
@@ -0,0 +1,89 @@
+MediaTek MT6380 Regulator
+
+All voltage regulators provided by the MT6380 PMIC are described as the
+subnodes of the MT6380 regulators node. Each regulator is named according
+to its regulator type, buck-<name> and ldo-<name>. The definition for each
+of these nodes is defined using the standard binding for regulators at
+Documentation/devicetree/bindings/regulator/regulator.txt.
+
+The valid names for regulators are:
+BUCK:
+ buck-core1, buck-vcore, buck-vrf
+LDO:
+ ldo-vm ,ldo-va , ldo-vphy, ldo-vddr, ldo-vt
+
+Example:
+
+ regulators {
+ compatible = "mediatek,mt6380-regulator";
+
+ mt6380_vcpu_reg: buck-vcore1 {
+ regulator-name = "vcore1";
+ regulator-min-microvolt = < 600000>;
+ regulator-max-microvolt = <1393750>;
+ regulator-ramp-delay = <6250>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ mt6380_vcore_reg: buck-vcore {
+ regulator-name = "vcore";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1393750>;
+ regulator-ramp-delay = <6250>;
+ };
+
+ mt6380_vrf_reg: buck-vrf {
+ regulator-name = "vrf";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1575000>;
+ regulator-ramp-delay = <0>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ mt6380_vm_reg: ldo-vm {
+ regulator-name = "vm";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-ramp-delay = <0>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ mt6380_va_reg: ldo-va {
+ regulator-name = "va";
+ regulator-min-microvolt = <2200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <0>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ mt6380_vphy_reg: ldo-vphy {
+ regulator-name = "vphy";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-ramp-delay = <0>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ mt6380_vddr_reg: ldo-vddr {
+ regulator-name = "vddr";
+ regulator-min-microvolt = <1240000>;
+ regulator-max-microvolt = <1840000>;
+ regulator-ramp-delay = <0>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ mt6380_vt_reg: ldo-vt {
+ regulator-name = "vt";
+ regulator-min-microvolt = <2200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-ramp-delay = <0>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
index a42b1d6e9863..01141fb00875 100644
--- a/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
@@ -1,4 +1,4 @@
-Mediatek MT6397 Regulator Driver
+Mediatek MT6397 Regulator
Required properties:
- compatible: "mediatek,mt6397-regulator"
diff --git a/Documentation/devicetree/bindings/regulator/pwm-regulator.txt b/Documentation/devicetree/bindings/regulator/pwm-regulator.txt
index bf85aa9ad6a7..3d78d507e29f 100644
--- a/Documentation/devicetree/bindings/regulator/pwm-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/pwm-regulator.txt
@@ -71,7 +71,7 @@ Continuous Voltage With Enable GPIO Example:
* Inverted PWM logic, and the duty cycle range is limited
* to 30%-70%.
*/
- pwm-dutycycle-range <700 300>; /* */
+ pwm-dutycycle-range = <700 300>; /* */
};
Voltage Table Example:
diff --git a/Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.txt b/Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.txt
new file mode 100644
index 000000000000..3944ee3e731e
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/st,stm32-vrefbuf.txt
@@ -0,0 +1,20 @@
+STM32 VREFBUF - Voltage reference buffer
+
+Some STM32 devices embed a voltage reference buffer which can be used as
+voltage reference for ADCs, DACs and also as voltage reference for external
+components through the dedicated VREF+ pin.
+
+Required properties:
+- compatible: Must be "st,stm32-vrefbuf".
+- reg: Offset and length of VREFBUF register set.
+- clocks: Must contain an entry for peripheral clock.
+
+Example:
+ vrefbuf: regulator@58003C00 {
+ compatible = "st,stm32-vrefbuf";
+ reg = <0x58003C00 0x8>;
+ clocks = <&rcc VREF_CK>;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <2500000>;
+ vdda-supply = <&vdda>;
+ };
diff --git a/Documentation/devicetree/bindings/rng/imx-rngc.txt b/Documentation/devicetree/bindings/rng/imx-rngc.txt
new file mode 100644
index 000000000000..93c7174a7bed
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/imx-rngc.txt
@@ -0,0 +1,21 @@
+Freescale RNGC (Random Number Generator Version C)
+
+The driver also supports version B, which is mostly compatible
+to version C.
+
+Required properties:
+- compatible : should be one of
+ "fsl,imx25-rngb"
+ "fsl,imx35-rngc"
+- reg : offset and length of the register set of this block
+- interrupts : the interrupt number for the RNGC block
+- clocks : the RNGC clk source
+
+Example:
+
+rng@53fb0000 {
+ compatible = "fsl,imx25-rngb";
+ reg = <0x53fb0000 0x4000>;
+ interrupts = <22>;
+ clocks = <&trng_clk>;
+};
diff --git a/Documentation/devicetree/bindings/serial/8250.txt b/Documentation/devicetree/bindings/serial/8250.txt
index 419ff6c0a47f..dad3b2ec66d4 100644
--- a/Documentation/devicetree/bindings/serial/8250.txt
+++ b/Documentation/devicetree/bindings/serial/8250.txt
@@ -14,6 +14,8 @@ Required properties:
tegra132, or tegra210.
- "nxp,lpc3220-uart"
- "ralink,rt2880-uart"
+ - For MediaTek BTIF, must contain '"mediatek,<chip>-btif",
+ "mediatek,mtk-btif"' where <chip> is mt7622, mt7623.
- "altr,16550-FIFO32"
- "altr,16550-FIFO64"
- "altr,16550-FIFO128"
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
index e6b572409cf5..574c3a2c77d5 100644
--- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
@@ -9,7 +9,6 @@ Optional properties:
- fsl,irda-mode : Indicate the uart supports irda mode
- fsl,dte-mode : Indicate the uart works in DTE mode. The uart works
in DCE mode by default.
-- fsl,dma-size : Indicate the size of the DMA buffer and its periods
Please check Documentation/devicetree/bindings/serial/serial.txt
for the complete list of generic properties.
@@ -29,5 +28,4 @@ uart1: serial@73fbc000 {
interrupts = <31>;
uart-has-rtscts;
fsl,dte-mode;
- fsl,dma-size = <1024 4>;
};
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
index 8d27d1a603e7..4fc96946f81d 100644
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
@@ -41,6 +41,8 @@ Required properties:
- "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART.
- "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART.
- "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART.
+ - "renesas,scif-r8a77995" for R8A77995 (R-Car D3) SCIF compatible UART.
+ - "renesas,hscif-r8a77995" for R8A77995 (R-Car D3) HSCIF compatible UART.
- "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART.
- "renesas,scifb-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFB compatible UART.
- "renesas,rcar-gen1-scif" for R-Car Gen1 SCIF compatible UART,
diff --git a/Documentation/devicetree/bindings/serial/rs485.txt b/Documentation/devicetree/bindings/serial/rs485.txt
index 32b1fa1f2a5b..b8415936dfdb 100644
--- a/Documentation/devicetree/bindings/serial/rs485.txt
+++ b/Documentation/devicetree/bindings/serial/rs485.txt
@@ -5,14 +5,13 @@ the built-in half-duplex mode.
The properties described hereafter shall be given to a half-duplex capable
UART node.
-Required properties:
+Optional properties:
- rs485-rts-delay: prop-encoded-array <a b> where:
* a is the delay between rts signal and beginning of data sent in milliseconds.
it corresponds to the delay before sending data.
* b is the delay between end of data sent and rts signal in milliseconds
it corresponds to the delay after sending data and actual release of the line.
-
-Optional properties:
+ If this property is not specified, <0 0> is assumed.
- linux,rs485-enabled-at-boot-time: empty property telling to enable the rs485
feature at boot time. It can be disabled later with proper ioctl.
- rs485-rx-during-tx: empty property that enables the receiving of data even
diff --git a/Documentation/devicetree/bindings/serial/st,stm32-usart.txt b/Documentation/devicetree/bindings/serial/st,stm32-usart.txt
index 85ec5f2b1996..3657f9f9d17a 100644
--- a/Documentation/devicetree/bindings/serial/st,stm32-usart.txt
+++ b/Documentation/devicetree/bindings/serial/st,stm32-usart.txt
@@ -1,12 +1,19 @@
* STMicroelectronics STM32 USART
Required properties:
-- compatible: Can be either "st,stm32-usart", "st,stm32-uart",
-"st,stm32f7-usart" or "st,stm32f7-uart" depending on whether
-the device supports synchronous mode and is compatible with
-stm32(f4) or stm32f7.
+- compatible: can be either:
+ - "st,stm32-usart",
+ - "st,stm32-uart",
+ - "st,stm32f7-usart",
+ - "st,stm32f7-uart",
+ - "st,stm32h7-usart"
+ - "st,stm32h7-uart".
+ depending on whether the device supports synchronous mode
+ and is compatible with stm32(f4), stm32f7 or stm32h7.
- reg: The address and length of the peripheral registers space
-- interrupts: The interrupt line of the USART instance
+- interrupts:
+ - The interrupt line for the USART instance,
+ - An optional wake-up interrupt.
- clocks: The input clock of the USART instance
Optional properties:
diff --git a/Documentation/devicetree/bindings/serio/ps2-gpio.txt b/Documentation/devicetree/bindings/serio/ps2-gpio.txt
new file mode 100644
index 000000000000..7b7bc9cdf986
--- /dev/null
+++ b/Documentation/devicetree/bindings/serio/ps2-gpio.txt
@@ -0,0 +1,23 @@
+Device-Tree binding for ps/2 gpio device
+
+Required properties:
+ - compatible = "ps2-gpio"
+ - data-gpios: the data pin
+ - clk-gpios: the clock pin
+ - interrupts: Should trigger on the falling edge of the clock line.
+
+Optional properties:
+ - write-enable: Indicates whether write function is provided
+ to serio device. Possibly providing the write fn will not work, because
+ of the tough timing requirements.
+
+Example nodes:
+
+ps2@0 {
+ compatible = "ps2-gpio";
+ interrupt-parent = <&gpio>;
+ interrupts = <23 IRQ_TYPE_EDGE_FALLING>;
+ data-gpios = <&gpio 24 GPIO_ACTIVE_HIGH>;
+ clk-gpios = <&gpio 23 GPIO_ACTIVE_HIGH>;
+ write-enable;
+};
diff --git a/Documentation/devicetree/bindings/sound/cs43130.txt b/Documentation/devicetree/bindings/sound/cs43130.txt
new file mode 100644
index 000000000000..8b1dd5aeb004
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs43130.txt
@@ -0,0 +1,67 @@
+CS43130 DAC
+
+Required properties:
+
+ - compatible : "cirrus,cs43130", "cirrus,cs4399", "cirrus,cs43131",
+ "cirrus,cs43198"
+
+ - reg : the I2C address of the device for I2C
+
+ - VA-supply, VP-supply, VL-supply, VCP-supply, VD-supply:
+ power supplies for the device, as covered in
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+
+
+Optional properties:
+
+ - reset-gpios : Active low GPIO used to reset the device
+
+ - cirrus,xtal-ibias:
+ When external MCLK is generated by external crystal
+ oscillator, CS43130 can be used to provide bias current
+ for external crystal. Amount of bias current sent is
+ set as:
+ 1 = 7.5uA
+ 2 = 12.5uA
+ 3 = 15uA
+
+ - cirrus,dc-measure:
+ Boolean, define to enable headphone DC impedance measurement.
+
+ - cirrus,ac-measure:
+ Boolean, define to enable headphone AC impedance measurement.
+ DC impedance must also be enabled for AC impedance measurement.
+
+ - cirrus,dc-threshold:
+ Define 2 DC impedance thresholds in ohms for HP output control.
+ Default values are 50 and 120 Ohms.
+
+ - cirrus,ac-freq:
+ Define the frequencies at which to measure HP AC impedance.
+ Only used if "cirrus,dc-measure" is defined.
+ Exactly 10 frequencies must be defined.
+ If this properties is undefined, by default,
+ following frequencies are used:
+ <24 43 93 200 431 928 2000 4309 9283 20000>
+ The above frequencies are logarithmically equally spaced.
+ Log base is 10.
+
+Example:
+
+cs43130: audio-codec@30 {
+ compatible = "cirrus,cs43130";
+ reg = <0x30>;
+ reset-gpios = <&axi_gpio 54 0>;
+ VA-supply = <&dummy_vreg>;
+ VP-supply = <&dummy_vreg>;
+ VL-supply = <&dummy_vreg>;
+ VCP-supply = <&dummy_vreg>;
+ VD-supply = <&dummy_vreg>;
+ cirrus,xtal-ibias = <2>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <55 8>;
+ cirrus,dc-measure;
+ cirrus,ac-measure;
+ cirrus,dc-threshold = /bits/ 16 <20 100>;
+ cirrus,ac-freq = /bits/ 16 <24 43 93 200 431 928 2000 4309 9283 20000>;
+};
diff --git a/Documentation/devicetree/bindings/sound/dmic.txt b/Documentation/devicetree/bindings/sound/dmic.txt
new file mode 100644
index 000000000000..54c8ef6498a8
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/dmic.txt
@@ -0,0 +1,16 @@
+Device-Tree bindings for Digital microphone (DMIC) codec
+
+This device support generic PDM digital microphone.
+
+Required properties:
+ - compatible: should be "dmic-codec".
+
+Optional properties:
+ - dmicen-gpios: GPIO specifier for dmic to control start and stop
+
+Example node:
+
+ dmic_codec: dmic@0 {
+ compatible = "dmic-codec";
+ dmicen-gpios = <&gpio4 3 GPIO_ACTIVE_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt b/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt
index 9800a560e0c2..77a57f84bed4 100644
--- a/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt
+++ b/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt
@@ -3,7 +3,8 @@ Mediatek AFE PCM controller for mt2701
Required properties:
- compatible = "mediatek,mt2701-audio";
- reg: register location and size
-- interrupts: Should contain AFE interrupt
+- interrupts: should contain AFE and ASYS interrupts
+- interrupt-names: should be "afe" and "asys"
- power-domains: should define the power domain
- clock-names: should have these clock names:
"infra_sys_audio_clk",
@@ -59,6 +60,7 @@ Example:
<0 0x112A0000 0 0x20000>;
interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "afe", "asys";
power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
clocks = <&infracfg CLK_INFRA_AUDIO>,
<&topckgen CLK_TOP_AUD_MUX1_SEL>,
diff --git a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
index ccb401cfef9d..551ecab67efe 100644
--- a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
@@ -31,8 +31,22 @@ Required properties
- vdd-cdc-io-supply: phandle to VDD_CDC_IO regulator DT node.
- vdd-cdc-tx-rx-cx-supply: phandle to VDD_CDC_TX/RX/CX regulator DT node.
- vdd-micbias-supply: phandle of VDD_MICBIAS supply's regulator DT node.
-
Optional Properties:
+ - qcom,mbhc-vthreshold-low: Array of 5 threshold voltages in mV for 5 buttons
+ detection on headset when the mbhc is powered up
+ by internal current source, this is a low power.
+ - qcom,mbhc-vthreshold-high: Array of 5 thresold voltages in mV for 5 buttons
+ detection on headset when mbhc is powered up
+ from micbias.
+- qcom,micbias-lvl: Voltage (mV) for Mic Bias
+- qcom,hphl-jack-type-normally-open: boolean, present if hphl pin on jack is a
+ NO (Normally Open). If not specified, then
+ its assumed that hphl pin on jack is NC
+ (Normally Closed).
+- qcom,gnd-jack-type-normally-open: boolean, present if gnd pin on jack is
+ NO (Normally Open). If not specified, then
+ its assumed that gnd pin on jack is NC
+ (Normally Closed).
- qcom,micbias1-ext-cap: boolean, present if micbias1 has external capacitor
connected.
- qcom,micbias2-ext-cap: boolean, present if micbias2 has external capacitor
@@ -48,6 +62,8 @@ spmi_bus {
reg-names = "pmic-codec-core";
clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
clock-names = "mclk";
+ qcom,mbhc-vthreshold-low = <75 150 237 450 500>;
+ qcom,mbhc-vthreshold-high = <75 150 237 450 500>;
interrupt-parent = <&spmi_bus>;
interrupts = <0x1 0xf0 0x0 IRQ_TYPE_NONE>,
<0x1 0xf0 0x1 IRQ_TYPE_NONE>,
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index 7246bb268bf9..a1536fdc60e6 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -199,10 +199,10 @@ Ex)
sound {
compatible = "simple-scu-audio-card";
...
- simple-audio-card,cpu@0 {
+ simple-audio-card,cpu-0 {
sound-dai = <&rcar_sound 0>;
};
- simple-audio-card,cpu@1 {
+ simple-audio-card,cpu-1 {
sound-dai = <&rcar_sound 1>;
};
simple-audio-card,codec {
@@ -441,79 +441,79 @@ rcar_sound: sound@ec500000 {
"clk_a", "clk_b", "clk_c", "clk_i";
rcar_sound,dvc {
- dvc0: dvc@0 {
+ dvc0: dvc-0 {
dmas = <&audma0 0xbc>;
dma-names = "tx";
};
- dvc1: dvc@1 {
+ dvc1: dvc-1 {
dmas = <&audma0 0xbe>;
dma-names = "tx";
};
};
rcar_sound,mix {
- mix0: mix@0 { };
- mix1: mix@1 { };
+ mix0: mix-0 { };
+ mix1: mix-1 { };
};
rcar_sound,ctu {
- ctu00: ctu@0 { };
- ctu01: ctu@1 { };
- ctu02: ctu@2 { };
- ctu03: ctu@3 { };
- ctu10: ctu@4 { };
- ctu11: ctu@5 { };
- ctu12: ctu@6 { };
- ctu13: ctu@7 { };
+ ctu00: ctu-0 { };
+ ctu01: ctu-1 { };
+ ctu02: ctu-2 { };
+ ctu03: ctu-3 { };
+ ctu10: ctu-4 { };
+ ctu11: ctu-5 { };
+ ctu12: ctu-6 { };
+ ctu13: ctu-7 { };
};
rcar_sound,src {
- src0: src@0 {
+ src0: src-0 {
interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x85>, <&audma1 0x9a>;
dma-names = "rx", "tx";
};
- src1: src@1 {
+ src1: src-1 {
interrupts = <0 353 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x87>, <&audma1 0x9c>;
dma-names = "rx", "tx";
};
- src2: src@2 {
+ src2: src-2 {
interrupts = <0 354 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x89>, <&audma1 0x9e>;
dma-names = "rx", "tx";
};
- src3: src@3 {
+ src3: src-3 {
interrupts = <0 355 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x8b>, <&audma1 0xa0>;
dma-names = "rx", "tx";
};
- src4: src@4 {
+ src4: src-4 {
interrupts = <0 356 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x8d>, <&audma1 0xb0>;
dma-names = "rx", "tx";
};
- src5: src@5 {
+ src5: src-5 {
interrupts = <0 357 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x8f>, <&audma1 0xb2>;
dma-names = "rx", "tx";
};
- src6: src@6 {
+ src6: src-6 {
interrupts = <0 358 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x91>, <&audma1 0xb4>;
dma-names = "rx", "tx";
};
- src7: src@7 {
+ src7: src-7 {
interrupts = <0 359 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x93>, <&audma1 0xb6>;
dma-names = "rx", "tx";
};
- src8: src@8 {
+ src8: src-8 {
interrupts = <0 360 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x95>, <&audma1 0xb8>;
dma-names = "rx", "tx";
};
- src9: src@9 {
+ src9: src-9 {
interrupts = <0 361 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x97>, <&audma1 0xba>;
dma-names = "rx", "tx";
@@ -521,52 +521,52 @@ rcar_sound: sound@ec500000 {
};
rcar_sound,ssi {
- ssi0: ssi@0 {
+ ssi0: ssi-0 {
interrupts = <0 370 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi1: ssi@1 {
+ ssi1: ssi-1 {
interrupts = <0 371 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi2: ssi@2 {
+ ssi2: ssi-2 {
interrupts = <0 372 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi3: ssi@3 {
+ ssi3: ssi-3 {
interrupts = <0 373 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi4: ssi@4 {
+ ssi4: ssi-4 {
interrupts = <0 374 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi5: ssi@5 {
+ ssi5: ssi-5 {
interrupts = <0 375 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi6: ssi@6 {
+ ssi6: ssi-6 {
interrupts = <0 376 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi7: ssi@7 {
+ ssi7: ssi-7 {
interrupts = <0 377 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi8: ssi@8 {
+ ssi8: ssi-8 {
interrupts = <0 378 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
dma-names = "rx", "tx", "rxu", "txu";
};
- ssi9: ssi@9 {
+ ssi9: ssi-9 {
interrupts = <0 379 IRQ_TYPE_LEVEL_HIGH>;
dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
dma-names = "rx", "tx", "rxu", "txu";
diff --git a/Documentation/devicetree/bindings/sound/rockchip,pdm.txt b/Documentation/devicetree/bindings/sound/rockchip,pdm.txt
index 921729de7346..2ad66f649a28 100644
--- a/Documentation/devicetree/bindings/sound/rockchip,pdm.txt
+++ b/Documentation/devicetree/bindings/sound/rockchip,pdm.txt
@@ -29,11 +29,14 @@ pdm: pdm@ff040000 {
dma-names = "rx";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&pdmm0_clk
- &pdmm0_fsync
&pdmm0_sdi0
&pdmm0_sdi1
&pdmm0_sdi2
&pdmm0_sdi3>;
- pinctrl-1 = <&pdmm0_sleep>;
+ pinctrl-1 = <&pdmm0_clk_sleep
+ &pdmm0_sdi0_sleep
+ &pdmm0_sdi1_sleep
+ &pdmm0_sdi2_sleep
+ &pdmm0_sdi3_sleep>;
status = "disabled";
};
diff --git a/Documentation/devicetree/bindings/sound/rockchip,rk3399-gru-sound.txt b/Documentation/devicetree/bindings/sound/rockchip,rk3399-gru-sound.txt
index eac91db07178..72d3cf4c2606 100644
--- a/Documentation/devicetree/bindings/sound/rockchip,rk3399-gru-sound.txt
+++ b/Documentation/devicetree/bindings/sound/rockchip,rk3399-gru-sound.txt
@@ -4,7 +4,7 @@ Required properties:
- compatible: "rockchip,rk3399-gru-sound"
- rockchip,cpu: The phandle of the Rockchip I2S controller that's
connected to the codecs
-- rockchip,codec: The phandle of the MAX98357A/RT5514/DA7219 codecs
+- rockchip,codec: The phandle of the audio codecs
Optional properties:
- dmic-wakeup-delay-ms : specify delay time (ms) for DMIC ready.
diff --git a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt b/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
index 206aba1b34bb..b208a752576c 100644
--- a/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/rockchip-i2s.txt
@@ -7,8 +7,12 @@ Required properties:
- compatible: should be one of the following:
- "rockchip,rk3066-i2s": for rk3066
+ - "rockchip,rk3036-i2s", "rockchip,rk3066-i2s": for rk3036
- "rockchip,rk3188-i2s", "rockchip,rk3066-i2s": for rk3188
+ - "rockchip,rk3228-i2s", "rockchip,rk3066-i2s": for rk3228
- "rockchip,rk3288-i2s", "rockchip,rk3066-i2s": for rk3288
+ - "rockchip,rk3328-i2s", "rockchip,rk3066-i2s": for rk3328
+ - "rockchip,rk3366-i2s", "rockchip,rk3066-i2s": for rk3366
- "rockchip,rk3368-i2s", "rockchip,rk3066-i2s": for rk3368
- "rockchip,rk3399-i2s", "rockchip,rk3066-i2s": for rk3399
- reg: physical base address of the controller and length of memory mapped
diff --git a/Documentation/devicetree/bindings/sound/rt274.txt b/Documentation/devicetree/bindings/sound/rt274.txt
new file mode 100644
index 000000000000..e9a6178c78cf
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rt274.txt
@@ -0,0 +1,33 @@
+RT274 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+- compatible : "realtek,rt274".
+
+- reg : The I2C address of the device.
+
+Optional properties:
+
+- interrupts : The CODEC's interrupt output.
+
+
+Pins on the device (for linking into audio routes) for RT274:
+
+ * DMIC1 Pin
+ * DMIC2 Pin
+ * MIC
+ * LINE1
+ * LINE2
+ * HPO Pin
+ * SPDIF
+ * LINE3
+
+Example:
+
+codec: rt274@1c {
+ compatible = "realtek,rt274";
+ reg = <0x1c>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+};
diff --git a/Documentation/devicetree/bindings/sound/rt5663.txt b/Documentation/devicetree/bindings/sound/rt5663.txt
index 70eaeaed2b18..ff381718c517 100644
--- a/Documentation/devicetree/bindings/sound/rt5663.txt
+++ b/Documentation/devicetree/bindings/sound/rt5663.txt
@@ -12,6 +12,14 @@ Required properties:
Optional properties:
+- "realtek,dc_offset_l_manual"
+- "realtek,dc_offset_r_manual"
+- "realtek,dc_offset_l_manual_mic"
+- "realtek,dc_offset_r_manual_mic"
+ Based on the different PCB layout, add the manual offset value to
+ compensate the DC offset for each L and R channel, and they are different
+ between headphone and headset.
+
Pins on the device (for linking into audio routes) for RT5663:
* IN1P
diff --git a/Documentation/devicetree/bindings/sound/samsung,odroid.txt b/Documentation/devicetree/bindings/sound/samsung,odroid.txt
index c30934dd975b..625b1b18fd02 100644
--- a/Documentation/devicetree/bindings/sound/samsung,odroid.txt
+++ b/Documentation/devicetree/bindings/sound/samsung,odroid.txt
@@ -7,9 +7,6 @@ Required properties:
- model - the user-visible name of this sound complex
- clocks - should contain entries matching clock names in the clock-names
property
- - clock-names - should contain following entries:
- - "epll" - indicating the EPLL output clock
- - "i2s_rclk" - indicating the RCLK (root) clock of the I2S0 controller
- samsung,audio-widgets - this property specifies off-codec audio elements
like headphones or speakers, for details see widgets.txt
- samsung,audio-routing - a list of the connections between audio
@@ -46,9 +43,6 @@ sound {
"IN1", "Mic Jack",
"Mic Jack", "MICBIAS";
- clocks = <&clock CLK_FOUT_EPLL>, <&i2s0 CLK_I2S_RCLK_SRC>;
- clock-names = "epll", "sclk_i2s";
-
cpu {
sound-dai = <&i2s0 0>;
};
diff --git a/Documentation/devicetree/bindings/sound/simple-card.txt b/Documentation/devicetree/bindings/sound/simple-card.txt
index c7a93931fad2..166f2290233b 100644
--- a/Documentation/devicetree/bindings/sound/simple-card.txt
+++ b/Documentation/devicetree/bindings/sound/simple-card.txt
@@ -86,6 +86,9 @@ Optional CPU/CODEC subnodes properties:
in dai startup() and disabled with
clk_disable_unprepare() in dai
shutdown().
+- system-clock-direction-out : specifies clock direction as 'out' on
+ initialization. It is useful for some aCPUs with
+ fixed clocks.
Example 1 - single DAI link:
diff --git a/Documentation/devicetree/bindings/sound/simple-scu-card.txt b/Documentation/devicetree/bindings/sound/simple-scu-card.txt
index 327d229a51b2..32f8dbce5241 100644
--- a/Documentation/devicetree/bindings/sound/simple-scu-card.txt
+++ b/Documentation/devicetree/bindings/sound/simple-scu-card.txt
@@ -24,6 +24,7 @@ Optional subnode properties:
- simple-audio-card,convert-rate : platform specified sampling rate convert
- simple-audio-card,convert-channels : platform specified converted channel size (2 - 8 ch)
- simple-audio-card,prefix : see routing
+- simple-audio-card,widgets : Please refer to widgets.txt.
- simple-audio-card,routing : A list of the connections between audio components.
Each entry is a pair of strings, the first being the connection's sink,
the second being the connection's source. Valid names for sources.
diff --git a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
index ee21da865771..fc5da6080759 100644
--- a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
@@ -8,6 +8,7 @@ Required properties:
- compatible: should be one of the following:
- "allwinner,sun4i-a10-i2s"
- "allwinner,sun6i-a31-i2s"
+ - "allwinner,sun8i-h3-i2s"
- reg: physical base address of the controller and length of memory mapped
region.
- interrupts: should contain the I2S interrupt.
@@ -22,6 +23,7 @@ Required properties:
Required properties for the following compatibles:
- "allwinner,sun6i-a31-i2s"
+ - "allwinner,sun8i-h3-i2s"
- resets: phandle to the reset line for this codec
Example:
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt b/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt
index 5e2741af27be..ca75890f0d07 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic32x4.txt
@@ -3,7 +3,9 @@ Texas Instruments - tlv320aic32x4 Codec module
The tlv320aic32x4 serial control bus communicates through I2C protocols
Required properties:
- - compatible: Should be "ti,tlv320aic32x4"
+ - compatible - "string" - One of:
+ "ti,tlv320aic32x4" TLV320AIC3204
+ "ti,tlv320aic32x6" TLV320AIC3206, TLV320AIC3256
- reg: I2C slave address
- supply-*: Required supply regulators are:
"iov" - digital IO power supply
@@ -18,6 +20,8 @@ Optional properties:
- reset-gpios: Reset-GPIO phandle with args as described in gpio/gpio.txt
- clocks/clock-names: Clock named 'mclk' for the master clock of the codec.
See clock/clock-bindings.txt for information about the detailed format.
+ - aic32x4-gpio-func - <array of 5 int>
+ - Types are defined in include/sound/tlv320aic32x4.h
Example:
@@ -27,4 +31,11 @@ codec: tlv320aic32x4@18 {
reg = <0x18>;
clocks = <&clks 201>;
clock-names = "mclk";
+ aic32x4-gpio-func= <
+ 0xff /* AIC32X4_MFPX_DEFAULT_VALUE */
+ 0xff /* AIC32X4_MFPX_DEFAULT_VALUE */
+ 0x04 /* MFP3 AIC32X4_MFP3_GPIO_ENABLED */
+ 0xff /* AIC32X4_MFPX_DEFAULT_VALUE */
+ 0x08 /* MFP5 AIC32X4_MFP5_GPIO_INPUT */
+ >;
};
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
index 47a213c411ce..ba5b45c483f5 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
@@ -26,6 +26,11 @@ Optional properties:
3 - MICBIAS output is connected to AVDD,
If this node is not mentioned or if the value is incorrect, then MicBias
is powered down.
+- ai3x-ocmv - Output Common-Mode Voltage selection:
+ 0 - 1.35V,
+ 1 - 1.5V,
+ 2 - 1.65V,
+ 3 - 1.8V
- AVDD-supply, IOVDD-supply, DRVDD-supply, DVDD-supply : power supplies for the
device as covered in Documentation/devicetree/bindings/regulator/regulator.txt
diff --git a/Documentation/devicetree/bindings/sound/wm8524.txt b/Documentation/devicetree/bindings/sound/wm8524.txt
new file mode 100644
index 000000000000..20c62002cbcd
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/wm8524.txt
@@ -0,0 +1,16 @@
+WM8524 audio CODEC
+
+This device does not use I2C or SPI but a simple Hardware Control Interface.
+
+Required properties:
+
+ - compatible : "wlf,wm8524"
+
+ - wlf,mute-gpios: a GPIO spec for the MUTE pin.
+
+Example:
+
+codec: wm8524@0 {
+ compatible = "wlf,wm8524";
+ wlf,mute-gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
+};
diff --git a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
index 31b5b21598ff..5bf13960f7f4 100644
--- a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
+++ b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
@@ -9,6 +9,7 @@ Required properties:
- "fsl,imx31-cspi" for SPI compatible with the one integrated on i.MX31
- "fsl,imx35-cspi" for SPI compatible with the one integrated on i.MX35
- "fsl,imx51-ecspi" for SPI compatible with the one integrated on i.MX51
+ - "fsl,imx53-ecspi" for SPI compatible with the one integrated on i.MX53 and later Soc
- reg : Offset and length of the register set for the device
- interrupts : Should contain CSPI/eCSPI interrupt
- cs-gpios : Specifies the gpio pins to be used for chipselects.
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index 64ee489571c4..39e5ef7c5e71 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -6,6 +6,7 @@ Required properties:
"renesas,msiof-r8a7792" (R-Car V2H)
"renesas,msiof-r8a7793" (R-Car M2-N)
"renesas,msiof-r8a7794" (R-Car E2)
+ "renesas,msiof-r8a7795" (R-Car H3)
"renesas,msiof-r8a7796" (R-Car M3-W)
"renesas,msiof-sh73a0" (SH-Mobile AG5)
"renesas,sh-mobile-msiof" (generic SH-Mobile compatibile device)
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.txt b/Documentation/devicetree/bindings/spi/spi-rockchip.txt
index 83da4931d832..6e3ffacbba32 100644
--- a/Documentation/devicetree/bindings/spi/spi-rockchip.txt
+++ b/Documentation/devicetree/bindings/spi/spi-rockchip.txt
@@ -6,6 +6,7 @@ and display controllers using the SPI communication interface.
Required Properties:
- compatible: should be one of the following.
+ "rockchip,rv1108-spi" for rv1108 SoCs.
"rockchip,rk3036-spi" for rk3036 SoCS.
"rockchip,rk3066-spi" for rk3066 SoCs.
"rockchip,rk3188-spi" for rk3188 SoCs.
diff --git a/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt b/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt
new file mode 100644
index 000000000000..b4aa7ddb5b13
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/nxp,tpm-timer.txt
@@ -0,0 +1,28 @@
+NXP Low Power Timer/Pulse Width Modulation Module (TPM)
+
+The Timer/PWM Module (TPM) supports input capture, output compare,
+and the generation of PWM signals to control electric motor and power
+management applications. The counter, compare and capture registers
+are clocked by an asynchronous clock that can remain enabled in low
+power modes. TPM can support global counter bus where one TPM drives
+the counter bus for the others, provided bit width is the same.
+
+Required properties:
+
+- compatible : should be "fsl,imx7ulp-tpm"
+- reg : Specifies base physical address and size of the register sets
+ for the clock event device and clock source device.
+- interrupts : Should be the clock event device interrupt.
+- clocks : The clocks provided by the SoC to drive the timer, must contain
+ an entry for each entry in clock-names.
+- clock-names : Must include the following entries: "igp" and "per".
+
+Example:
+tpm5: tpm@40260000 {
+ compatible = "fsl,imx7ulp-tpm";
+ reg = <0x40260000 0x1000>;
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7ULP_CLK_NIC1_BUS_DIV>,
+ <&clks IMX7ULP_CLK_LPTPM5>;
+ clock-names = "ipg", "per";
+};
diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.txt b/Documentation/devicetree/bindings/timer/renesas,cmt.txt
index 1a05c1b243c1..6ca6b9e582a0 100644
--- a/Documentation/devicetree/bindings/timer/renesas,cmt.txt
+++ b/Documentation/devicetree/bindings/timer/renesas,cmt.txt
@@ -12,46 +12,29 @@ datasheets.
Required Properties:
- compatible: must contain one or more of the following:
- - "renesas,cmt-32-r8a7740" for the r8a7740 32-bit CMT
- (CMT0)
- - "renesas,cmt-32-sh7372" for the sh7372 32-bit CMT
- (CMT0)
- - "renesas,cmt-32-sh73a0" for the sh73a0 32-bit CMT
- (CMT0)
- - "renesas,cmt-32" for all 32-bit CMT without fast clock support
- (CMT0 on sh7372, sh73a0 and r8a7740)
- This is a fallback for the above renesas,cmt-32-* entries.
-
- - "renesas,cmt-32-fast-r8a7740" for the r8a7740 32-bit CMT with fast
- clock support (CMT[234])
- - "renesas,cmt-32-fast-sh7372" for the sh7372 32-bit CMT with fast
- clock support (CMT[234])
- - "renesas,cmt-32-fast-sh73a0" for the sh73A0 32-bit CMT with fast
- clock support (CMT[234])
- - "renesas,cmt-32-fast" for all 32-bit CMT with fast clock support
- (CMT[234] on sh7372, sh73a0 and r8a7740)
- This is a fallback for the above renesas,cmt-32-fast-* entries.
-
- - "renesas,cmt-48-sh7372" for the sh7372 48-bit CMT
- (CMT1)
- "renesas,cmt-48-sh73a0" for the sh73A0 48-bit CMT
(CMT1)
- "renesas,cmt-48-r8a7740" for the r8a7740 48-bit CMT
(CMT1)
- "renesas,cmt-48" for all non-second generation 48-bit CMT
- (CMT1 on sh7372, sh73a0 and r8a7740)
+ (CMT1 on sh73a0 and r8a7740)
This is a fallback for the above renesas,cmt-48-* entries.
- - "renesas,cmt-48-r8a73a4" for the r8a73a4 48-bit CMT
- (CMT[01])
- - "renesas,cmt-48-r8a7790" for the r8a7790 48-bit CMT
- (CMT[01])
- - "renesas,cmt-48-r8a7791" for the r8a7791 48-bit CMT
- (CMT[01])
- - "renesas,cmt-48-gen2" for all second generation 48-bit CMT
- (CMT[01] on r8a73a4, r8a7790 and r8a7791)
- This is a fallback for the renesas,cmt-48-r8a73a4,
- renesas,cmt-48-r8a7790 and renesas,cmt-48-r8a7791 entries.
+ - "renesas,cmt0-r8a73a4" for the 32-bit CMT0 device included in r8a73a4.
+ - "renesas,cmt1-r8a73a4" for the 48-bit CMT1 device included in r8a73a4.
+ - "renesas,cmt0-r8a7790" for the 32-bit CMT0 device included in r8a7790.
+ - "renesas,cmt1-r8a7790" for the 48-bit CMT1 device included in r8a7790.
+ - "renesas,cmt0-r8a7791" for the 32-bit CMT0 device included in r8a7791.
+ - "renesas,cmt1-r8a7791" for the 48-bit CMT1 device included in r8a7791.
+ - "renesas,cmt0-r8a7793" for the 32-bit CMT0 device included in r8a7793.
+ - "renesas,cmt1-r8a7793" for the 48-bit CMT1 device included in r8a7793.
+ - "renesas,cmt0-r8a7794" for the 32-bit CMT0 device included in r8a7794.
+ - "renesas,cmt1-r8a7794" for the 48-bit CMT1 device included in r8a7794.
+
+ - "renesas,rcar-gen2-cmt0" for 32-bit CMT0 devices included in R-Car Gen2.
+ - "renesas,rcar-gen2-cmt1" for 48-bit CMT1 devices included in R-Car Gen2.
+ These are fallbacks for r8a73a4 and all the R-Car Gen2
+ entries listed above.
- reg: base address and length of the registers block for the timer module.
- interrupts: interrupt-specifier for the timer, one per channel.
@@ -59,21 +42,29 @@ Required Properties:
in clock-names.
- clock-names: must contain "fck" for the functional clock.
- - renesas,channels-mask: bitmask of the available channels.
-
-Example: R8A7790 (R-Car H2) CMT0 node
-
- CMT0 on R8A7790 implements hardware channels 5 and 6 only and names
- them channels 0 and 1 in the documentation.
+Example: R8A7790 (R-Car H2) CMT0 and CMT1 nodes
cmt0: timer@ffca0000 {
- compatible = "renesas,cmt-48-r8a7790", "renesas,cmt-48-gen2";
+ compatible = "renesas,cmt0-r8a7790", "renesas,rcar-gen2-cmt0";
reg = <0 0xffca0000 0 0x1004>;
interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>,
<0 142 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp1_clks R8A7790_CLK_CMT0>;
clock-names = "fck";
+ };
- renesas,channels-mask = <0x60>;
+ cmt1: timer@e6130000 {
+ compatible = "renesas,cmt1-r8a7790", "renesas,rcar-gen2-cmt1";
+ reg = <0 0xe6130000 0 0x1004>;
+ interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>,
+ <0 121 IRQ_TYPE_LEVEL_HIGH>,
+ <0 122 IRQ_TYPE_LEVEL_HIGH>,
+ <0 123 IRQ_TYPE_LEVEL_HIGH>,
+ <0 124 IRQ_TYPE_LEVEL_HIGH>,
+ <0 125 IRQ_TYPE_LEVEL_HIGH>,
+ <0 126 IRQ_TYPE_LEVEL_HIGH>,
+ <0 127 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp3_clks R8A7790_CLK_CMT1>;
+ clock-names = "fck";
};
diff --git a/Documentation/devicetree/bindings/usb/brcm,bdc.txt b/Documentation/devicetree/bindings/usb/brcm,bdc.txt
new file mode 100644
index 000000000000..63e63af3bf59
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/brcm,bdc.txt
@@ -0,0 +1,29 @@
+Broadcom USB Device Controller (BDC)
+====================================
+
+Required properties:
+
+- compatible: must be one of:
+ "brcm,bdc-v0.16"
+ "brcm,bdc"
+- reg: the base register address and length
+- interrupts: the interrupt line for this controller
+
+Optional properties:
+
+On Broadcom STB platforms, these properties are required:
+
+- phys: phandle to one or two USB PHY blocks
+ NOTE: Some SoC's have a single phy and some have
+ USB 2.0 and USB 3.0 phys
+- clocks: phandle to the functional clock of this block
+
+Example:
+
+ bdc@f0b02000 {
+ compatible = "brcm,bdc-v0.16";
+ reg = <0xf0b02000 0xfc4>;
+ interrupts = <0x0 0x60 0x0>;
+ phys = <&usbphy_0 0x0>;
+ clocks = <&sw_usbd>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/fcs,fusb302.txt b/Documentation/devicetree/bindings/usb/fcs,fusb302.txt
new file mode 100644
index 000000000000..472facfa5a71
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/fcs,fusb302.txt
@@ -0,0 +1,29 @@
+Fairchild FUSB302 Type-C Port controllers
+
+Required properties :
+- compatible : "fcs,fusb302"
+- reg : I2C slave address
+- interrupts : Interrupt specifier
+
+Optional properties :
+- fcs,max-sink-microvolt : Maximum voltage to negotiate when configured as sink
+- fcs,max-sink-microamp : Maximum current to negotiate when configured as sink
+- fcs,max-sink-microwatt : Maximum power to negotiate when configured as sink
+ If this is less then max-sink-microvolt *
+ max-sink-microamp then the configured current will
+ be clamped.
+- fcs,operating-sink-microwatt :
+ Minimum amount of power accepted from a sink
+ when negotiating
+
+Example:
+
+fusb302: typec-portc@54 {
+ compatible = "fcs,fusb302";
+ reg = <0x54>;
+ interrupt-parent = <&nmi_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ fcs,max-sink-microvolt = <12000000>;
+ fcs,max-sink-microamp = <3000000>;
+ fcs,max-sink-microwatt = <36000000>;
+};
diff --git a/Documentation/devicetree/bindings/usb/keystone-usb.txt b/Documentation/devicetree/bindings/usb/keystone-usb.txt
index 60527d335b58..2d1bef16f149 100644
--- a/Documentation/devicetree/bindings/usb/keystone-usb.txt
+++ b/Documentation/devicetree/bindings/usb/keystone-usb.txt
@@ -12,8 +12,21 @@ Required properties:
MPU.
- ranges: allows valid 1:1 translation between child's address space and
parent's address space.
- - clocks: Clock IDs array as required by the controller.
- - clock-names: names of clocks correseponding to IDs in the clock property.
+
+SoC-specific Required Properties:
+The following are mandatory properties for Keystone 2 66AK2HK, 66AK2L and 66AK2E
+SoCs only:
+
+- clocks: Clock ID for USB functional clock.
+- clock-names: Must be "usb".
+
+
+The following are mandatory properties for Keystone 2 66AK2G SoCs only:
+
+- power-domains: Should contain a phandle to a PM domain provider node
+ and an args specifier containing the USB device id
+ value. This property is as per the binding,
+ Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
Sub-nodes:
The dwc3 core should be added as subnode to Keystone DWC3 glue.
diff --git a/Documentation/devicetree/bindings/usb/mt8173-xhci.txt b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
index 0acfc8acbea1..5611a2e4ddf0 100644
--- a/Documentation/devicetree/bindings/usb/mt8173-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
@@ -11,7 +11,11 @@ into two parts.
------------------------------------------------------------------------
Required properties:
- - compatible : should contain "mediatek,mt8173-xhci"
+ - compatible : should be "mediatek,<soc-model>-xhci", "mediatek,mtk-xhci",
+ soc-model is the name of SoC, such as mt8173, mt2712 etc, when using
+ "mediatek,mtk-xhci" compatible string, you need SoC specific ones in
+ addition, one of:
+ - "mediatek,mt8173-xhci"
- reg : specifies physical base address and size of the registers
- reg-names: should be "mac" for xHCI MAC and "ippc" for IP port control
- interrupts : interrupt used by the controller
@@ -68,10 +72,14 @@ usb30: usb@11270000 {
In the case, xhci is added as subnode to mtu3. An example and the DT binding
details of mtu3 can be found in:
-Documentation/devicetree/bindings/usb/mt8173-mtu3.txt
+Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
Required properties:
- - compatible : should contain "mediatek,mt8173-xhci"
+ - compatible : should be "mediatek,<soc-model>-xhci", "mediatek,mtk-xhci",
+ soc-model is the name of SoC, such as mt8173, mt2712 etc, when using
+ "mediatek,mtk-xhci" compatible string, you need SoC specific ones in
+ addition, one of:
+ - "mediatek,mt8173-xhci"
- reg : specifies physical base address and size of the registers
- reg-names: should be "mac" for xHCI MAC
- interrupts : interrupt used by the host controller
diff --git a/Documentation/devicetree/bindings/usb/mt8173-mtu3.txt b/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
index 1d7c3bc677f7..838ae48eafc1 100644
--- a/Documentation/devicetree/bindings/usb/mt8173-mtu3.txt
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
@@ -1,7 +1,11 @@
The device node for Mediatek USB3.0 DRD controller
Required properties:
- - compatible : should be "mediatek,mt8173-mtu3"
+ - compatible : should be "mediatek,<soc-model>-mtu3", "mediatek,mtu3",
+ soc-model is the name of SoC, such as mt8173, mt2712 etc,
+ when using "mediatek,mtu3" compatible string, you need SoC specific
+ ones in addition, one of:
+ - "mediatek,mt8173-mtu3"
- reg : specifies physical base address and size of the registers
- reg-names: should be "mac" for device IP and "ippc" for IP port control
- interrupts : interrupt used by the device IP
@@ -44,7 +48,7 @@ Optional properties:
Sub-nodes:
The xhci should be added as subnode to mtu3 as shown in the following example
if host mode is enabled. The DT binding details of xhci can be found in:
-Documentation/devicetree/bindings/usb/mt8173-xhci.txt
+Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
Example:
ssusb: usb@11271000 {
diff --git a/Documentation/devicetree/bindings/usb/renesas_usb3.txt b/Documentation/devicetree/bindings/usb/renesas_usb3.txt
index 8d52766f07b9..e28025883b79 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usb3.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usb3.txt
@@ -3,20 +3,30 @@ Renesas Electronics USB3.0 Peripheral driver
Required properties:
- compatible: Must contain one of the following:
- "renesas,r8a7795-usb3-peri"
+ - "renesas,r8a7796-usb3-peri"
+ - "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 compatible
+ device
+
+ When compatible with the generic version, nodes must list the
+ SoC-specific version corresponding to the platform first
+ followed by the generic version.
+
- reg: Base address and length of the register for the USB3.0 Peripheral
- interrupts: Interrupt specifier for the USB3.0 Peripheral
- clocks: clock phandle and specifier pair
-Example:
+Example of R-Car H3 ES1.x:
usb3_peri0: usb@ee020000 {
- compatible = "renesas,r8a7795-usb3-peri";
+ compatible = "renesas,r8a7795-usb3-peri",
+ "renesas,rcar-gen3-usb3-peri";
reg = <0 0xee020000 0 0x400>;
interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 328>;
};
usb3_peri1: usb@ee060000 {
- compatible = "renesas,r8a7795-usb3-peri";
+ compatible = "renesas,r8a7795-usb3-peri",
+ "renesas,rcar-gen3-usb3-peri";
reg = <0 0xee060000 0 0x400>;
interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 327>;
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index daf465bef758..f58c9323b92b 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -88,6 +88,7 @@ dlg Dialog Semiconductor
dlink D-Link Corporation
dmo Data Modul AG
domintech Domintech Co., Ltd.
+dongwoon Dongwoon Anatech
dptechnics DPTechnics
dragino Dragino Technology Co., Limited
ea Embedded Artists AB
@@ -175,6 +176,7 @@ kosagi Sutajio Ko-Usagi PTE Ltd.
kyo Kyocera Corporation
lacie LaCie
lantiq Lantiq Semiconductor
+lattice Lattice Semiconductor
lego LEGO Systems A/S
lenovo Lenovo Group Ltd.
lg LG Corporation
@@ -249,6 +251,7 @@ oxsemi Oxford Semiconductor, Ltd.
panasonic Panasonic Corporation
parade Parade Technologies Inc.
pericom Pericom Technology Inc.
+pervasive Pervasive Displays, Inc.
phytec PHYTEC Messtechnik GmbH
picochip Picochip Ltd
pine64 Pine64
diff --git a/Documentation/devicetree/bindings/xilinx.txt b/Documentation/devicetree/bindings/xilinx.txt
index 299d0923537b..1d11b9002ef8 100644
--- a/Documentation/devicetree/bindings/xilinx.txt
+++ b/Documentation/devicetree/bindings/xilinx.txt
@@ -281,6 +281,8 @@
capabilities of the underlying ICAP hardware
differ between different families. May be
'virtex2p', 'virtex4', or 'virtex5'.
+ - compatible : should contain "xlnx,xps-hwicap-1.00.a" or
+ "xlnx,opb-hwicap-1.00.b".
vi) Xilinx Uart 16550
diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst
index 84e8e8a9cbdb..a2417633fdd8 100644
--- a/Documentation/doc-guide/sphinx.rst
+++ b/Documentation/doc-guide/sphinx.rst
@@ -19,6 +19,110 @@ Finally, there are thousands of plain text documentation files scattered around
``Documentation``. Some of these will likely be converted to reStructuredText
over time, but the bulk of them will remain in plain text.
+.. _sphinx_install:
+
+Sphinx Install
+==============
+
+The ReST markups currently used by the Documentation/ files are meant to be
+built with ``Sphinx`` version 1.3 or upper. If you're desiring to build
+PDF outputs, it is recommended to use version 1.4.6 or upper.
+
+There's a script that checks for the Spinx requirements. Please see
+:ref:`sphinx-pre-install` for further details.
+
+Most distributions are shipped with Sphinx, but its toolchain is fragile,
+and it is not uncommon that upgrading it or some other Python packages
+on your machine would cause the documentation build to break.
+
+A way to get rid of that is to use a different version than the one shipped
+on your distributions. In order to do that, it is recommended to install
+Sphinx inside a virtual environment, using ``virtualenv-3``
+or ``virtualenv``, depending on how your distribution packaged Python 3.
+
+.. note::
+
+ #) Sphinx versions below 1.5 don't work properly with Python's
+ docutils version 0.13.1 or upper. So, if you're willing to use
+ those versions, you should run ``pip install 'docutils==0.12'``.
+
+ #) It is recommended to use the RTD theme for html output. Depending
+ on the Sphinx version, it should be installed in separate,
+ with ``pip install sphinx_rtd_theme``.
+
+ #) Some ReST pages contain math expressions. Due to the way Sphinx work,
+ those expressions are written using LaTeX notation. It needs texlive
+ installed with amdfonts and amsmath in order to evaluate them.
+
+In summary, if you want to install Sphinx version 1.4.9, you should do::
+
+ $ virtualenv sphinx_1.4
+ $ . sphinx_1.4/bin/activate
+ (sphinx_1.4) $ pip install -r Documentation/sphinx/requirements.txt
+
+After running ``. sphinx_1.4/bin/activate``, the prompt will change,
+in order to indicate that you're using the new environment. If you
+open a new shell, you need to rerun this command to enter again at
+the virtual environment before building the documentation.
+
+Image output
+------------
+
+The kernel documentation build system contains an extension that
+handles images on both GraphViz and SVG formats (see
+:ref:`sphinx_kfigure`).
+
+For it to work, you need to install both GraphViz and ImageMagick
+packages. If those packages are not installed, the build system will
+still build the documentation, but won't include any images at the
+output.
+
+PDF and LaTeX builds
+--------------------
+
+Such builds are currently supported only with Sphinx versions 1.4 and upper.
+
+For PDF and LaTeX output, you'll also need ``XeLaTeX`` version 3.14159265.
+
+Depending on the distribution, you may also need to install a series of
+``texlive`` packages that provide the minimal set of functionalities
+required for ``XeLaTeX`` to work.
+
+.. _sphinx-pre-install:
+
+Checking for Sphinx dependencies
+--------------------------------
+
+There's a script that automatically check for Sphinx dependencies. If it can
+recognize your distribution, it will also give a hint about the install
+command line options for your distro::
+
+ $ ./scripts/sphinx-pre-install
+ Checking if the needed tools for Fedora release 26 (Twenty Six) are available
+ Warning: better to also install "texlive-luatex85".
+ You should run:
+
+ sudo dnf install -y texlive-luatex85
+ /usr/bin/virtualenv sphinx_1.4
+ . sphinx_1.4/bin/activate
+ pip install -r Documentation/sphinx/requirements.txt
+
+ Can't build as 1 mandatory dependency is missing at ./scripts/sphinx-pre-install line 468.
+
+By default, it checks all the requirements for both html and PDF, including
+the requirements for images, math expressions and LaTeX build, and assumes
+that a virtual Python environment will be used. The ones needed for html
+builds are assumed to be mandatory; the others to be optional.
+
+It supports two optional parameters:
+
+``--no-pdf``
+ Disable checks for PDF;
+
+``--no-virtualenv``
+ Use OS packaging for Sphinx instead of Python virtual environment.
+
+
Sphinx Build
============
@@ -118,7 +222,7 @@ Here are some specific guidelines for the kernel documentation:
the C domain
------------
-The `Sphinx C Domain`_ (name c) is suited for documentation of C API. E.g. a
+The **Sphinx C Domain** (name c) is suited for documentation of C API. E.g. a
function prototype:
.. code-block:: rst
@@ -229,6 +333,7 @@ Rendered as:
- column 3
+.. _sphinx_kfigure:
Figures & Images
================
diff --git a/Documentation/driver-api/basics.rst b/Documentation/driver-api/basics.rst
index ab82250c7727..73fa7d42bbba 100644
--- a/Documentation/driver-api/basics.rst
+++ b/Documentation/driver-api/basics.rst
@@ -4,7 +4,7 @@ Driver Basics
Driver Entry and Exit points
----------------------------
-.. kernel-doc:: include/linux/init.h
+.. kernel-doc:: include/linux/module.h
:internal:
Driver device table
@@ -103,9 +103,6 @@ Kernel utility functions
.. kernel-doc:: kernel/panic.c
:export:
-.. kernel-doc:: kernel/sys.c
- :export:
-
.. kernel-doc:: kernel/rcu/tree.c
:export:
diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst
index 31671b469627..dc384f2f7f34 100644
--- a/Documentation/driver-api/dma-buf.rst
+++ b/Documentation/driver-api/dma-buf.rst
@@ -139,9 +139,6 @@ DMA Fences
Seqno Hardware Fences
~~~~~~~~~~~~~~~~~~~~~
-.. kernel-doc:: drivers/dma-buf/seqno-fence.c
- :export:
-
.. kernel-doc:: include/linux/seqno-fence.h
:internal:
diff --git a/Documentation/driver-api/gpio.rst b/Documentation/driver-api/gpio.rst
new file mode 100644
index 000000000000..6dd4aa647f27
--- /dev/null
+++ b/Documentation/driver-api/gpio.rst
@@ -0,0 +1,45 @@
+===================================
+General Purpose Input/Output (GPIO)
+===================================
+
+Core
+====
+
+.. kernel-doc:: include/linux/gpio/driver.h
+ :internal:
+
+.. kernel-doc:: drivers/gpio/gpiolib.c
+ :export:
+
+Legacy API
+==========
+
+The functions listed in this section are deprecated. The GPIO descriptor based
+API described above should be used in new code.
+
+.. kernel-doc:: drivers/gpio/gpiolib-legacy.c
+ :export:
+
+ACPI support
+============
+
+.. kernel-doc:: drivers/gpio/gpiolib-acpi.c
+ :export:
+
+Device tree support
+===================
+
+.. kernel-doc:: drivers/gpio/gpiolib-of.c
+ :export:
+
+Device-managed API
+==================
+
+.. kernel-doc:: drivers/gpio/devres.c
+ :export:
+
+sysfs helpers
+=============
+
+.. kernel-doc:: drivers/gpio/gpiolib-sysfs.c
+ :export:
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 7c94ab50afed..9c20624842b7 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -44,6 +44,7 @@ available subsections can be seen below.
uio-howto
firmware/index
pinctl
+ gpio
misc_devices
.. only:: subproject and html
diff --git a/Documentation/driver-api/miscellaneous.rst b/Documentation/driver-api/miscellaneous.rst
index 8da7d115bafc..304ffb146cf9 100644
--- a/Documentation/driver-api/miscellaneous.rst
+++ b/Documentation/driver-api/miscellaneous.rst
@@ -47,4 +47,3 @@ used by one consumer at a time.
.. kernel-doc:: drivers/pwm/core.c
:export:
-
diff --git a/Documentation/driver-api/s390-drivers.rst b/Documentation/driver-api/s390-drivers.rst
index 7060da136095..ecf8851d3565 100644
--- a/Documentation/driver-api/s390-drivers.rst
+++ b/Documentation/driver-api/s390-drivers.rst
@@ -75,7 +75,7 @@ The channel-measurement facility provides a means to collect measurement
data which is made available by the channel subsystem for each channel
attached device.
-.. kernel-doc:: arch/s390/include/asm/cmb.h
+.. kernel-doc:: arch/s390/include/uapi/asm/cmb.h
:internal:
.. kernel-doc:: drivers/s390/cio/cmf.c
diff --git a/Documentation/driver-api/scsi.rst b/Documentation/driver-api/scsi.rst
index 859fb672319f..5a2aa7a377d9 100644
--- a/Documentation/driver-api/scsi.rst
+++ b/Documentation/driver-api/scsi.rst
@@ -224,14 +224,6 @@ mid to lowlevel SCSI driver interface
.. kernel-doc:: drivers/scsi/hosts.c
:export:
-drivers/scsi/constants.c
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-mid to lowlevel SCSI driver interface
-
-.. kernel-doc:: drivers/scsi/constants.c
- :export:
-
Transport classes
-----------------
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 30e04f7a690d..69f08c0f23a8 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -312,6 +312,7 @@ IRQ
devm_irq_alloc_descs_from()
devm_irq_alloc_generic_chip()
devm_irq_setup_generic_chip()
+ devm_irq_sim_init()
LED
devm_led_classdev_register()
diff --git a/Documentation/errseq.rst b/Documentation/errseq.rst
new file mode 100644
index 000000000000..4c29bd5afbc5
--- /dev/null
+++ b/Documentation/errseq.rst
@@ -0,0 +1,149 @@
+The errseq_t datatype
+=====================
+An errseq_t is a way of recording errors in one place, and allowing any
+number of "subscribers" to tell whether it has changed since a previous
+point where it was sampled.
+
+The initial use case for this is tracking errors for file
+synchronization syscalls (fsync, fdatasync, msync and sync_file_range),
+but it may be usable in other situations.
+
+It's implemented as an unsigned 32-bit value. The low order bits are
+designated to hold an error code (between 1 and MAX_ERRNO). The upper bits
+are used as a counter. This is done with atomics instead of locking so that
+these functions can be called from any context.
+
+Note that there is a risk of collisions if new errors are being recorded
+frequently, since we have so few bits to use as a counter.
+
+To mitigate this, the bit between the error value and counter is used as
+a flag to tell whether the value has been sampled since a new value was
+recorded. That allows us to avoid bumping the counter if no one has
+sampled it since the last time an error was recorded.
+
+Thus we end up with a value that looks something like this::
+
+ bit: 31..13 12 11..0
+ +-----------------+----+----------------+
+ | counter | SF | errno |
+ +-----------------+----+----------------+
+
+The general idea is for "watchers" to sample an errseq_t value and keep
+it as a running cursor. That value can later be used to tell whether
+any new errors have occurred since that sampling was done, and atomically
+record the state at the time that it was checked. This allows us to
+record errors in one place, and then have a number of "watchers" that
+can tell whether the value has changed since they last checked it.
+
+A new errseq_t should always be zeroed out. An errseq_t value of all zeroes
+is the special (but common) case where there has never been an error. An all
+zero value thus serves as the "epoch" if one wishes to know whether there
+has ever been an error set since it was first initialized.
+
+API usage
+=========
+Let me tell you a story about a worker drone. Now, he's a good worker
+overall, but the company is a little...management heavy. He has to
+report to 77 supervisors today, and tomorrow the "big boss" is coming in
+from out of town and he's sure to test the poor fellow too.
+
+They're all handing him work to do -- so much he can't keep track of who
+handed him what, but that's not really a big problem. The supervisors
+just want to know when he's finished all of the work they've handed him so
+far and whether he made any mistakes since they last asked.
+
+He might have made the mistake on work they didn't actually hand him,
+but he can't keep track of things at that level of detail, all he can
+remember is the most recent mistake that he made.
+
+Here's our worker_drone representation::
+
+ struct worker_drone {
+ errseq_t wd_err; /* for recording errors */
+ };
+
+Every day, the worker_drone starts out with a blank slate::
+
+ struct worker_drone wd;
+
+ wd.wd_err = (errseq_t)0;
+
+The supervisors come in and get an initial read for the day. They
+don't care about anything that happened before their watch begins::
+
+ struct supervisor {
+ errseq_t s_wd_err; /* private "cursor" for wd_err */
+ spinlock_t s_wd_err_lock; /* protects s_wd_err */
+ }
+
+ struct supervisor su;
+
+ su.s_wd_err = errseq_sample(&wd.wd_err);
+ spin_lock_init(&su.s_wd_err_lock);
+
+Now they start handing him tasks to do. Every few minutes they ask him to
+finish up all of the work they've handed him so far. Then they ask him
+whether he made any mistakes on any of it::
+
+ spin_lock(&su.su_wd_err_lock);
+ err = errseq_check_and_advance(&wd.wd_err, &su.s_wd_err);
+ spin_unlock(&su.su_wd_err_lock);
+
+Up to this point, that just keeps returning 0.
+
+Now, the owners of this company are quite miserly and have given him
+substandard equipment with which to do his job. Occasionally it
+glitches and he makes a mistake. He sighs a heavy sigh, and marks it
+down::
+
+ errseq_set(&wd.wd_err, -EIO);
+
+...and then gets back to work. The supervisors eventually poll again
+and they each get the error when they next check. Subsequent calls will
+return 0, until another error is recorded, at which point it's reported
+to each of them once.
+
+Note that the supervisors can't tell how many mistakes he made, only
+whether one was made since they last checked, and the latest value
+recorded.
+
+Occasionally the big boss comes in for a spot check and asks the worker
+to do a one-off job for him. He's not really watching the worker
+full-time like the supervisors, but he does need to know whether a
+mistake occurred while his job was processing.
+
+He can just sample the current errseq_t in the worker, and then use that
+to tell whether an error has occurred later::
+
+ errseq_t since = errseq_sample(&wd.wd_err);
+ /* submit some work and wait for it to complete */
+ err = errseq_check(&wd.wd_err, since);
+
+Since he's just going to discard "since" after that point, he doesn't
+need to advance it here. He also doesn't need any locking since it's
+not usable by anyone else.
+
+Serializing errseq_t cursor updates
+===================================
+Note that the errseq_t API does not protect the errseq_t cursor during a
+check_and_advance_operation. Only the canonical error code is handled
+atomically. In a situation where more than one task might be using the
+same errseq_t cursor at the same time, it's important to serialize
+updates to that cursor.
+
+If that's not done, then it's possible for the cursor to go backward
+in which case the same error could be reported more than once.
+
+Because of this, it's often advantageous to first do an errseq_check to
+see if anything has changed, and only later do an
+errseq_check_and_advance after taking the lock. e.g.::
+
+ if (errseq_check(&wd.wd_err, READ_ONCE(su.s_wd_err)) {
+ /* su.s_wd_err is protected by s_wd_err_lock */
+ spin_lock(&su.s_wd_err_lock);
+ err = errseq_check_and_advance(&wd.wd_err, &su.s_wd_err);
+ spin_unlock(&su.s_wd_err_lock);
+ }
+
+That avoids the spinlock in the common case where nothing has changed
+since the last time it was checked.
diff --git a/Documentation/fb/efifb.txt b/Documentation/fb/efifb.txt
index a59916c29b33..1a85c1bdaf38 100644
--- a/Documentation/fb/efifb.txt
+++ b/Documentation/fb/efifb.txt
@@ -27,5 +27,11 @@ You have to add the following kernel parameters in your elilo.conf:
Macbook Pro 17", iMac 20" :
video=efifb:i20
+Accepted options:
+
+nowc Don't map the framebuffer write combined. This can be used
+ to workaround side-effects and slowdowns on other CPU cores
+ when large amounts of console data are written.
+
--
Edgar Hucek <gimli@dark-green.com>
diff --git a/Documentation/features/core/tracehook/arch-support.txt b/Documentation/features/core/tracehook/arch-support.txt
index 5e97a89420ef..dfb638c2f842 100644
--- a/Documentation/features/core/tracehook/arch-support.txt
+++ b/Documentation/features/core/tracehook/arch-support.txt
@@ -25,7 +25,7 @@
| mn10300: | ok |
| nios2: | ok |
| openrisc: | ok |
- | parisc: | TODO |
+ | parisc: | ok |
| powerpc: | ok |
| s390: | ok |
| score: | TODO |
diff --git a/Documentation/filesystems/caching/netfs-api.txt b/Documentation/filesystems/caching/netfs-api.txt
index aed6b94160b1..0eb31de3a2c1 100644
--- a/Documentation/filesystems/caching/netfs-api.txt
+++ b/Documentation/filesystems/caching/netfs-api.txt
@@ -151,8 +151,6 @@ To define an object, a structure of the following type should be filled out:
void (*mark_pages_cached)(void *cookie_netfs_data,
struct address_space *mapping,
struct pagevec *cached_pvec);
-
- void (*now_uncached)(void *cookie_netfs_data);
};
This has the following fields:
diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt
index a7e6e14aeb08..3be3b266be41 100644
--- a/Documentation/filesystems/dax.txt
+++ b/Documentation/filesystems/dax.txt
@@ -63,9 +63,8 @@ Filesystem support consists of
- implementing an mmap file operation for DAX files which sets the
VM_MIXEDMAP and VM_HUGEPAGE flags on the VMA, and setting the vm_ops to
include handlers for fault, pmd_fault, page_mkwrite, pfn_mkwrite. These
- handlers should probably call dax_iomap_fault() (for fault and page_mkwrite
- handlers), dax_iomap_pmd_fault(), dax_pfn_mkwrite() passing the appropriate
- iomap operations.
+ handlers should probably call dax_iomap_fault() passing the appropriate
+ fault size and iomap operations.
- calling iomap_zero_range() passing appropriate iomap operations instead of
block_truncate_page() for DAX files
- ensuring that there is sufficient locking between reads, writes,
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 73e7d91f03dc..405a3df759b3 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -829,9 +829,7 @@ struct address_space_operations {
swap_activate: Called when swapon is used on a file to allocate
space if necessary and pin the block lookup information in
memory. A return value of zero indicates success,
- in which case this file can be used to back swapspace. The
- swapspace operations will be proxied to this address space's
- ->swap_{out,in} methods.
+ in which case this file can be used to back swapspace.
swap_deactivate: Called during swapoff on files where swap_activate
was successful.
diff --git a/Documentation/gpio/drivers-on-gpio.txt b/Documentation/gpio/drivers-on-gpio.txt
index 306513251713..9a78d385b92e 100644
--- a/Documentation/gpio/drivers-on-gpio.txt
+++ b/Documentation/gpio/drivers-on-gpio.txt
@@ -84,6 +84,11 @@ hardware descriptions such as device tree or ACPI:
NAND flash MTD subsystem and provides chip access and partition parsing like
any other NAND driving hardware.
+- ps2-gpio: drivers/input/serio/ps2-gpio.c is used to drive a PS/2 (IBM) serio
+ bus, data and clock line, by bit banging two GPIO lines. It will appear as
+ any other serio bus to the system and makes it possible to connect drivers
+ for e.g. keyboards and other PS/2 protocol based devices.
+
Apart from this there are special GPIO drivers in subsystems like MMC/SD to
read card detect and write protect GPIO lines, and in the TTY serial subsystem
to emulate MCTRL (modem control) signals CTS/RTS by using two GPIO lines. The
diff --git a/Documentation/gpio/gpio-legacy.txt b/Documentation/gpio/gpio-legacy.txt
index b34fd94f7089..5eacc147ea87 100644
--- a/Documentation/gpio/gpio-legacy.txt
+++ b/Documentation/gpio/gpio-legacy.txt
@@ -459,7 +459,7 @@ pin controller?
This is done by registering "ranges" of pins, which are essentially
cross-reference tables. These are described in
-Documentation/pinctrl.txt
+Documentation/driver-api/pinctl.rst
While the pin allocation is totally managed by the pinctrl subsystem,
gpio (under gpiolib) is still maintained by gpio drivers. It may happen
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
index 0d936c67bf7d..5ee9674fb9e9 100644
--- a/Documentation/gpu/drm-internals.rst
+++ b/Documentation/gpu/drm-internals.rst
@@ -201,6 +201,8 @@ drivers.
Open/Close, File Operations and IOCTLs
======================================
+.. _drm_driver_fops:
+
File Operations
---------------
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 7c5e2549a58a..13dd237418cc 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -296,3 +296,12 @@ Auxiliary Modeset Helpers
.. kernel-doc:: drivers/gpu/drm/drm_modeset_helper.c
:export:
+
+Framebuffer GEM Helper Reference
+================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
+ :export:
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 2d77c9580164..307284125d7a 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -523,9 +523,6 @@ Color Management Properties
.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
:doc: overview
-.. kernel-doc:: include/drm/drm_color_mgmt.h
- :internal:
-
.. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
:export:
@@ -554,60 +551,8 @@ various modules/drivers.
Vertical Blanking
=================
-Vertical blanking plays a major role in graphics rendering. To achieve
-tear-free display, users must synchronize page flips and/or rendering to
-vertical blanking. The DRM API offers ioctls to perform page flips
-synchronized to vertical blanking and wait for vertical blanking.
-
-The DRM core handles most of the vertical blanking management logic,
-which involves filtering out spurious interrupts, keeping race-free
-blanking counters, coping with counter wrap-around and resets and
-keeping use counts. It relies on the driver to generate vertical
-blanking interrupts and optionally provide a hardware vertical blanking
-counter. Drivers must implement the following operations.
-
-- int (\*enable_vblank) (struct drm_device \*dev, int crtc); void
- (\*disable_vblank) (struct drm_device \*dev, int crtc);
- Enable or disable vertical blanking interrupts for the given CRTC.
-
-- u32 (\*get_vblank_counter) (struct drm_device \*dev, int crtc);
- Retrieve the value of the vertical blanking counter for the given
- CRTC. If the hardware maintains a vertical blanking counter its value
- should be returned. Otherwise drivers can use the
- :c:func:`drm_vblank_count()` helper function to handle this
- operation.
-
-Drivers must initialize the vertical blanking handling core with a call
-to :c:func:`drm_vblank_init()` in their load operation.
-
-Vertical blanking interrupts can be enabled by the DRM core or by
-drivers themselves (for instance to handle page flipping operations).
-The DRM core maintains a vertical blanking use count to ensure that the
-interrupts are not disabled while a user still needs them. To increment
-the use count, drivers call :c:func:`drm_vblank_get()`. Upon
-return vertical blanking interrupts are guaranteed to be enabled.
-
-To decrement the use count drivers call
-:c:func:`drm_vblank_put()`. Only when the use count drops to zero
-will the DRM core disable the vertical blanking interrupts after a delay
-by scheduling a timer. The delay is accessible through the
-vblankoffdelay module parameter or the ``drm_vblank_offdelay`` global
-variable and expressed in milliseconds. Its default value is 5000 ms.
-Zero means never disable, and a negative value means disable
-immediately. Drivers may override the behaviour by setting the
-:c:type:`struct drm_device <drm_device>`
-vblank_disable_immediate flag, which when set causes vblank interrupts
-to be disabled immediately regardless of the drm_vblank_offdelay
-value. The flag should only be set if there's a properly working
-hardware vblank counter present.
-
-When a vertical blanking interrupt occurs drivers only need to call the
-:c:func:`drm_handle_vblank()` function to account for the
-interrupt.
-
-Resources allocated by :c:func:`drm_vblank_init()` must be freed
-with a call to :c:func:`drm_vblank_cleanup()` in the driver unload
-operation handler.
+.. kernel-doc:: drivers/gpu/drm/drm_vblank.c
+ :doc: vblank handling
Vertical Blanking and Interrupt Handling Functions Reference
------------------------------------------------------------
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index 9412798645c1..b08e9dcd9177 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -191,7 +191,7 @@ acquired and release by :c:func:`calling drm_gem_object_get()` and
holding the lock.
When the last reference to a GEM object is released the GEM core calls
-the :c:type:`struct drm_driver <drm_driver>` gem_free_object
+the :c:type:`struct drm_driver <drm_driver>` gem_free_object_unlocked
operation. That operation is mandatory for GEM-enabled drivers and must
free the GEM object and all associated resources.
@@ -492,7 +492,7 @@ DRM Sync Objects
:doc: Overview
.. kernel-doc:: include/drm/drm_syncobj.h
- :export:
+ :internal:
.. kernel-doc:: drivers/gpu/drm/drm_syncobj.c
:export:
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index 858457567d3d..679373b4a03f 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -160,6 +160,8 @@ other hand, a driver requires shared state between clients which is
visible to user-space and accessible beyond open-file boundaries, they
cannot support render nodes.
+.. _drm_driver_ioctl:
+
IOCTL Support on Device Nodes
=============================
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 9c7ed3e3f1e9..2e7ee0313c1c 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -417,6 +417,10 @@ integrate with drm/i915 and to handle the `DRM_I915_PERF_OPEN` ioctl.
:functions: i915_perf_open_ioctl
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
:functions: i915_perf_release
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_add_config_ioctl
+.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
+ :functions: i915_perf_remove_config_ioctl
i915 Perf Stream
----------------
@@ -477,4 +481,16 @@ specific details than found in the more high-level sections.
.. kernel-doc:: drivers/gpu/drm/i915/i915_perf.c
:internal:
-.. WARNING: DOCPROC directive not supported: !Cdrivers/gpu/drm/i915/i915_irq.c
+Style
+=====
+
+The drm/i915 driver codebase has some style rules in addition to (and, in some
+cases, deviating from) the kernel coding style.
+
+Register macro definition style
+-------------------------------
+
+The style guide for ``i915_reg.h``.
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_reg.h
+ :doc: The i915 register macro definition style guide
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 1ae42006deea..22af55d06ab8 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -108,8 +108,8 @@ This would be especially useful for tinydrm:
crtc state, clear that to the max values, x/y = 0 and w/h = MAX_INT, in
__drm_atomic_helper_crtc_duplicate_state().
-- Move tinydrm_merge_clips into drm_framebuffer.c, dropping the tinydrm_
- prefix ofc and using drm_fb_. drm_framebuffer.c makes sense since this
+- Move tinydrm_merge_clips into drm_framebuffer.c, dropping the tinydrm\_
+ prefix ofc and using drm_fb\_. drm_framebuffer.c makes sense since this
is a function useful to implement the fb->dirty function.
- Create a new drm_fb_dirty function which does essentially what e.g.
diff --git a/Documentation/hwmon/ftsteutates b/Documentation/hwmon/ftsteutates
index 8c10a916de20..af54db92391b 100644
--- a/Documentation/hwmon/ftsteutates
+++ b/Documentation/hwmon/ftsteutates
@@ -18,6 +18,10 @@ enhancements. It can monitor up to 4 voltages, 16 temperatures and
8 fans. It also contains an integrated watchdog which is currently
implemented in this driver.
+To clear a temperature or fan alarm, execute the following command with the
+correct path to the alarm file:
+ echo 0 >XXXX_alarm
+
Specification of the chip can be found here:
ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
diff --git a/Documentation/hwmon/ibm-cffps b/Documentation/hwmon/ibm-cffps
new file mode 100644
index 000000000000..e05ecd8ecfcf
--- /dev/null
+++ b/Documentation/hwmon/ibm-cffps
@@ -0,0 +1,54 @@
+Kernel driver ibm-cffps
+=======================
+
+Supported chips:
+ * IBM Common Form Factor power supply
+
+Author: Eddie James <eajames@us.ibm.com>
+
+Description
+-----------
+
+This driver supports IBM Common Form Factor (CFF) power supplies. This driver
+is a client to the core PMBus driver.
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+Sysfs entries
+-------------
+
+The following attributes are supported:
+
+curr1_alarm Output current over-current alarm.
+curr1_input Measured output current in mA.
+curr1_label "iout1"
+
+fan1_alarm Fan 1 warning.
+fan1_fault Fan 1 fault.
+fan1_input Fan 1 speed in RPM.
+fan2_alarm Fan 2 warning.
+fan2_fault Fan 2 fault.
+fan2_input Fan 2 speed in RPM.
+
+in1_alarm Input voltage under-voltage alarm.
+in1_input Measured input voltage in mV.
+in1_label "vin"
+in2_alarm Output voltage over-voltage alarm.
+in2_input Measured output voltage in mV.
+in2_label "vout1"
+
+power1_alarm Input fault or alarm.
+power1_input Measured input power in uW.
+power1_label "pin"
+
+temp1_alarm PSU inlet ambient temperature over-temperature alarm.
+temp1_input Measured PSU inlet ambient temp in millidegrees C.
+temp2_alarm Secondary rectifier temp over-temperature alarm.
+temp2_input Measured secondary rectifier temp in millidegrees C.
+temp3_alarm ORing FET temperature over-temperature alarm.
+temp3_input Measured ORing FET temperature in millidegrees C.
diff --git a/Documentation/hwmon/lm25066 b/Documentation/hwmon/lm25066
index 2cb20ebb234d..3fa6bf820c88 100644
--- a/Documentation/hwmon/lm25066
+++ b/Documentation/hwmon/lm25066
@@ -29,6 +29,11 @@ Supported chips:
Addresses scanned: -
Datasheet:
http://www.national.com/pf/LM/LM5066.html
+ * Texas Instruments LM5066I
+ Prefix: 'lm5066i'
+ Addresses scanned: -
+ Datasheet:
+ http://www.ti.com/product/LM5066I
Author: Guenter Roeck <linux@roeck-us.net>
@@ -37,8 +42,8 @@ Description
-----------
This driver supports hardware monitoring for National Semiconductor / TI LM25056,
-LM25063, LM25066, LM5064, and LM5066 Power Management, Monitoring, Control, and
-Protection ICs.
+LM25063, LM25066, LM5064, and LM5066/LM5066I Power Management, Monitoring,
+Control, and Protection ICs.
The driver is a client driver to the core PMBus driver. Please see
Documentation/hwmon/pmbus for details on PMBus client drivers.
diff --git a/Documentation/iio/ep93xx_adc.txt b/Documentation/iio/ep93xx_adc.txt
new file mode 100644
index 000000000000..23053e7817bd
--- /dev/null
+++ b/Documentation/iio/ep93xx_adc.txt
@@ -0,0 +1,29 @@
+Cirrus Logic EP93xx ADC driver.
+
+1. Overview
+
+The driver is intended to work on both low-end (EP9301, EP9302) devices with
+5-channel ADC and high-end (EP9307, EP9312, EP9315) devices with 10-channel
+touchscreen/ADC module.
+
+2. Channel numbering
+
+Numbering scheme for channels 0..4 is defined in EP9301 and EP9302 datasheets.
+EP9307, EP9312 and EP9312 have 3 channels more (total 8), but the numbering is
+not defined. So the last three are numbered randomly, let's say.
+
+Assuming ep93xx_adc is IIO device0, you'd find the following entries under
+/sys/bus/iio/devices/iio:device0/:
+
+ +-----------------+---------------+
+ | sysfs entry | ball/pin name |
+ +-----------------+---------------+
+ | in_voltage0_raw | YM |
+ | in_voltage1_raw | SXP |
+ | in_voltage2_raw | SXM |
+ | in_voltage3_raw | SYP |
+ | in_voltage4_raw | SYM |
+ | in_voltage5_raw | XP |
+ | in_voltage6_raw | XM |
+ | in_voltage7_raw | YP |
+ +-----------------+---------------+
diff --git a/Documentation/infiniband/tag_matching.txt b/Documentation/infiniband/tag_matching.txt
new file mode 100644
index 000000000000..d2a3bf819226
--- /dev/null
+++ b/Documentation/infiniband/tag_matching.txt
@@ -0,0 +1,64 @@
+Tag matching logic
+
+The MPI standard defines a set of rules, known as tag-matching, for matching
+source send operations to destination receives. The following parameters must
+match the following source and destination parameters:
+* Communicator
+* User tag - wild card may be specified by the receiver
+* Source rank – wild car may be specified by the receiver
+* Destination rank – wild
+The ordering rules require that when more than one pair of send and receive
+message envelopes may match, the pair that includes the earliest posted-send
+and the earliest posted-receive is the pair that must be used to satisfy the
+matching operation. However, this doesn’t imply that tags are consumed in
+the order they are created, e.g., a later generated tag may be consumed, if
+earlier tags can’t be used to satisfy the matching rules.
+
+When a message is sent from the sender to the receiver, the communication
+library may attempt to process the operation either after or before the
+corresponding matching receive is posted. If a matching receive is posted,
+this is an expected message, otherwise it is called an unexpected message.
+Implementations frequently use different matching schemes for these two
+different matching instances.
+
+To keep MPI library memory footprint down, MPI implementations typically use
+two different protocols for this purpose:
+
+1. The Eager protocol- the complete message is sent when the send is
+processed by the sender. A completion send is received in the send_cq
+notifying that the buffer can be reused.
+
+2. The Rendezvous Protocol - the sender sends the tag-matching header,
+and perhaps a portion of data when first notifying the receiver. When the
+corresponding buffer is posted, the responder will use the information from
+the header to initiate an RDMA READ operation directly to the matching buffer.
+A fin message needs to be received in order for the buffer to be reused.
+
+Tag matching implementation
+
+There are two types of matching objects used, the posted receive list and the
+unexpected message list. The application posts receive buffers through calls
+to the MPI receive routines in the posted receive list and posts send messages
+using the MPI send routines. The head of the posted receive list may be
+maintained by the hardware, with the software expected to shadow this list.
+
+When send is initiated and arrives at the receive side, if there is no
+pre-posted receive for this arriving message, it is passed to the software and
+placed in the unexpected message list. Otherwise the match is processed,
+including rendezvous processing, if appropriate, delivering the data to the
+specified receive buffer. This allows overlapping receive-side MPI tag
+matching with computation.
+
+When a receive-message is posted, the communication library will first check
+the software unexpected message list for a matching receive. If a match is
+found, data is delivered to the user buffer, using a software controlled
+protocol. The UCX implementation uses either an eager or rendezvous protocol,
+depending on data size. If no match is found, the entire pre-posted receive
+list is maintained by the hardware, and there is space to add one more
+pre-posted receive to this list, this receive is passed to the hardware.
+Software is expected to shadow this list, to help with processing MPI cancel
+operations. In addition, because hardware and software are not expected to be
+tightly synchronized with respect to the tag-matching operation, this shadow
+list is used to detect the case that a pre-posted receive is passed to the
+hardware, as the matching unexpected message is being passed from the hardware
+to the software.
diff --git a/Documentation/input/input.rst b/Documentation/input/input.rst
index 3b3a22975106..47f86a4bf16c 100644
--- a/Documentation/input/input.rst
+++ b/Documentation/input/input.rst
@@ -109,7 +109,7 @@ evdev nodes are created with minors starting with 256.
keyboard
~~~~~~~~
-``keyboard`` is in-kernel input handler ad is a part of VT code. It
+``keyboard`` is in-kernel input handler and is a part of VT code. It
consumes keyboard keystrokes and handles user input for VT consoles.
mousedev
diff --git a/Documentation/input/joydev/index.rst b/Documentation/input/joydev/index.rst
index 8d9666c7561c..ebcff43056e2 100644
--- a/Documentation/input/joydev/index.rst
+++ b/Documentation/input/joydev/index.rst
@@ -12,7 +12,6 @@ Linux Joystick support
.. toctree::
:maxdepth: 3
- :numbered:
joystick
joystick-api
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 7003141a6d4f..329e740adea7 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -297,9 +297,9 @@ more details, with real examples.
ccflags-y specifies options for compiling with $(CC).
Example:
- # drivers/acpi/Makefile
- ccflags-y := -Os
- ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
+ # drivers/acpi/acpica/Makefile
+ ccflags-y := -Os -D_LINUX -DBUILDING_ACPICA
+ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
This variable is necessary because the top Makefile owns the
variable $(KBUILD_CFLAGS) and uses it for compilation flags for the
diff --git a/Documentation/locking/crossrelease.txt b/Documentation/locking/crossrelease.txt
new file mode 100644
index 000000000000..bdf1423d5f99
--- /dev/null
+++ b/Documentation/locking/crossrelease.txt
@@ -0,0 +1,874 @@
+Crossrelease
+============
+
+Started by Byungchul Park <byungchul.park@lge.com>
+
+Contents:
+
+ (*) Background
+
+ - What causes deadlock
+ - How lockdep works
+
+ (*) Limitation
+
+ - Limit lockdep
+ - Pros from the limitation
+ - Cons from the limitation
+ - Relax the limitation
+
+ (*) Crossrelease
+
+ - Introduce crossrelease
+ - Introduce commit
+
+ (*) Implementation
+
+ - Data structures
+ - How crossrelease works
+
+ (*) Optimizations
+
+ - Avoid duplication
+ - Lockless for hot paths
+
+ (*) APPENDIX A: What lockdep does to work aggresively
+
+ (*) APPENDIX B: How to avoid adding false dependencies
+
+
+==========
+Background
+==========
+
+What causes deadlock
+--------------------
+
+A deadlock occurs when a context is waiting for an event to happen,
+which is impossible because another (or the) context who can trigger the
+event is also waiting for another (or the) event to happen, which is
+also impossible due to the same reason.
+
+For example:
+
+ A context going to trigger event C is waiting for event A to happen.
+ A context going to trigger event A is waiting for event B to happen.
+ A context going to trigger event B is waiting for event C to happen.
+
+A deadlock occurs when these three wait operations run at the same time,
+because event C cannot be triggered if event A does not happen, which in
+turn cannot be triggered if event B does not happen, which in turn
+cannot be triggered if event C does not happen. After all, no event can
+be triggered since any of them never meets its condition to wake up.
+
+A dependency might exist between two waiters and a deadlock might happen
+due to an incorrect releationship between dependencies. Thus, we must
+define what a dependency is first. A dependency exists between them if:
+
+ 1. There are two waiters waiting for each event at a given time.
+ 2. The only way to wake up each waiter is to trigger its event.
+ 3. Whether one can be woken up depends on whether the other can.
+
+Each wait in the example creates its dependency like:
+
+ Event C depends on event A.
+ Event A depends on event B.
+ Event B depends on event C.
+
+ NOTE: Precisely speaking, a dependency is one between whether a
+ waiter for an event can be woken up and whether another waiter for
+ another event can be woken up. However from now on, we will describe
+ a dependency as if it's one between an event and another event for
+ simplicity.
+
+And they form circular dependencies like:
+
+ -> C -> A -> B -
+ / \
+ \ /
+ ----------------
+
+ where 'A -> B' means that event A depends on event B.
+
+Such circular dependencies lead to a deadlock since no waiter can meet
+its condition to wake up as described.
+
+CONCLUSION
+
+Circular dependencies cause a deadlock.
+
+
+How lockdep works
+-----------------
+
+Lockdep tries to detect a deadlock by checking dependencies created by
+lock operations, acquire and release. Waiting for a lock corresponds to
+waiting for an event, and releasing a lock corresponds to triggering an
+event in the previous section.
+
+In short, lockdep does:
+
+ 1. Detect a new dependency.
+ 2. Add the dependency into a global graph.
+ 3. Check if that makes dependencies circular.
+ 4. Report a deadlock or its possibility if so.
+
+For example, consider a graph built by lockdep that looks like:
+
+ A -> B -
+ \
+ -> E
+ /
+ C -> D -
+
+ where A, B,..., E are different lock classes.
+
+Lockdep will add a dependency into the graph on detection of a new
+dependency. For example, it will add a dependency 'E -> C' when a new
+dependency between lock E and lock C is detected. Then the graph will be:
+
+ A -> B -
+ \
+ -> E -
+ / \
+ -> C -> D - \
+ / /
+ \ /
+ ------------------
+
+ where A, B,..., E are different lock classes.
+
+This graph contains a subgraph which demonstrates circular dependencies:
+
+ -> E -
+ / \
+ -> C -> D - \
+ / /
+ \ /
+ ------------------
+
+ where C, D and E are different lock classes.
+
+This is the condition under which a deadlock might occur. Lockdep
+reports it on detection after adding a new dependency. This is the way
+how lockdep works.
+
+CONCLUSION
+
+Lockdep detects a deadlock or its possibility by checking if circular
+dependencies were created after adding each new dependency.
+
+
+==========
+Limitation
+==========
+
+Limit lockdep
+-------------
+
+Limiting lockdep to work on only typical locks e.g. spin locks and
+mutexes, which are released within the acquire context, the
+implementation becomes simple but its capacity for detection becomes
+limited. Let's check pros and cons in next section.
+
+
+Pros from the limitation
+------------------------
+
+Given the limitation, when acquiring a lock, locks in a held_locks
+cannot be released if the context cannot acquire it so has to wait to
+acquire it, which means all waiters for the locks in the held_locks are
+stuck. It's an exact case to create dependencies between each lock in
+the held_locks and the lock to acquire.
+
+For example:
+
+ CONTEXT X
+ ---------
+ acquire A
+ acquire B /* Add a dependency 'A -> B' */
+ release B
+ release A
+
+ where A and B are different lock classes.
+
+When acquiring lock A, the held_locks of CONTEXT X is empty thus no
+dependency is added. But when acquiring lock B, lockdep detects and adds
+a new dependency 'A -> B' between lock A in the held_locks and lock B.
+They can be simply added whenever acquiring each lock.
+
+And data required by lockdep exists in a local structure, held_locks
+embedded in task_struct. Forcing to access the data within the context,
+lockdep can avoid racy problems without explicit locks while handling
+the local data.
+
+Lastly, lockdep only needs to keep locks currently being held, to build
+a dependency graph. However, relaxing the limitation, it needs to keep
+even locks already released, because a decision whether they created
+dependencies might be long-deferred.
+
+To sum up, we can expect several advantages from the limitation:
+
+ 1. Lockdep can easily identify a dependency when acquiring a lock.
+ 2. Races are avoidable while accessing local locks in a held_locks.
+ 3. Lockdep only needs to keep locks currently being held.
+
+CONCLUSION
+
+Given the limitation, the implementation becomes simple and efficient.
+
+
+Cons from the limitation
+------------------------
+
+Given the limitation, lockdep is applicable only to typical locks. For
+example, page locks for page access or completions for synchronization
+cannot work with lockdep.
+
+Can we detect deadlocks below, under the limitation?
+
+Example 1:
+
+ CONTEXT X CONTEXT Y CONTEXT Z
+ --------- --------- ----------
+ mutex_lock A
+ lock_page B
+ lock_page B
+ mutex_lock A /* DEADLOCK */
+ unlock_page B held by X
+ unlock_page B
+ mutex_unlock A
+ mutex_unlock A
+
+ where A and B are different lock classes.
+
+No, we cannot.
+
+Example 2:
+
+ CONTEXT X CONTEXT Y
+ --------- ---------
+ mutex_lock A
+ mutex_lock A
+ wait_for_complete B /* DEADLOCK */
+ complete B
+ mutex_unlock A
+ mutex_unlock A
+
+ where A is a lock class and B is a completion variable.
+
+No, we cannot.
+
+CONCLUSION
+
+Given the limitation, lockdep cannot detect a deadlock or its
+possibility caused by page locks or completions.
+
+
+Relax the limitation
+--------------------
+
+Under the limitation, things to create dependencies are limited to
+typical locks. However, synchronization primitives like page locks and
+completions, which are allowed to be released in any context, also
+create dependencies and can cause a deadlock. So lockdep should track
+these locks to do a better job. We have to relax the limitation for
+these locks to work with lockdep.
+
+Detecting dependencies is very important for lockdep to work because
+adding a dependency means adding an opportunity to check whether it
+causes a deadlock. The more lockdep adds dependencies, the more it
+thoroughly works. Thus Lockdep has to do its best to detect and add as
+many true dependencies into a graph as possible.
+
+For example, considering only typical locks, lockdep builds a graph like:
+
+ A -> B -
+ \
+ -> E
+ /
+ C -> D -
+
+ where A, B,..., E are different lock classes.
+
+On the other hand, under the relaxation, additional dependencies might
+be created and added. Assuming additional 'FX -> C' and 'E -> GX' are
+added thanks to the relaxation, the graph will be:
+
+ A -> B -
+ \
+ -> E -> GX
+ /
+ FX -> C -> D -
+
+ where A, B,..., E, FX and GX are different lock classes, and a suffix
+ 'X' is added on non-typical locks.
+
+The latter graph gives us more chances to check circular dependencies
+than the former. However, it might suffer performance degradation since
+relaxing the limitation, with which design and implementation of lockdep
+can be efficient, might introduce inefficiency inevitably. So lockdep
+should provide two options, strong detection and efficient detection.
+
+Choosing efficient detection:
+
+ Lockdep works with only locks restricted to be released within the
+ acquire context. However, lockdep works efficiently.
+
+Choosing strong detection:
+
+ Lockdep works with all synchronization primitives. However, lockdep
+ suffers performance degradation.
+
+CONCLUSION
+
+Relaxing the limitation, lockdep can add additional dependencies giving
+additional opportunities to check circular dependencies.
+
+
+============
+Crossrelease
+============
+
+Introduce crossrelease
+----------------------
+
+In order to allow lockdep to handle additional dependencies by what
+might be released in any context, namely 'crosslock', we have to be able
+to identify those created by crosslocks. The proposed 'crossrelease'
+feature provoides a way to do that.
+
+Crossrelease feature has to do:
+
+ 1. Identify dependencies created by crosslocks.
+ 2. Add the dependencies into a dependency graph.
+
+That's all. Once a meaningful dependency is added into graph, then
+lockdep would work with the graph as it did. The most important thing
+crossrelease feature has to do is to correctly identify and add true
+dependencies into the global graph.
+
+A dependency e.g. 'A -> B' can be identified only in the A's release
+context because a decision required to identify the dependency can be
+made only in the release context. That is to decide whether A can be
+released so that a waiter for A can be woken up. It cannot be made in
+other than the A's release context.
+
+It's no matter for typical locks because each acquire context is same as
+its release context, thus lockdep can decide whether a lock can be
+released in the acquire context. However for crosslocks, lockdep cannot
+make the decision in the acquire context but has to wait until the
+release context is identified.
+
+Therefore, deadlocks by crosslocks cannot be detected just when it
+happens, because those cannot be identified until the crosslocks are
+released. However, deadlock possibilities can be detected and it's very
+worth. See 'APPENDIX A' section to check why.
+
+CONCLUSION
+
+Using crossrelease feature, lockdep can work with what might be released
+in any context, namely crosslock.
+
+
+Introduce commit
+----------------
+
+Since crossrelease defers the work adding true dependencies of
+crosslocks until they are actually released, crossrelease has to queue
+all acquisitions which might create dependencies with the crosslocks.
+Then it identifies dependencies using the queued data in batches at a
+proper time. We call it 'commit'.
+
+There are four types of dependencies:
+
+1. TT type: 'typical lock A -> typical lock B'
+
+ Just when acquiring B, lockdep can see it's in the A's release
+ context. So the dependency between A and B can be identified
+ immediately. Commit is unnecessary.
+
+2. TC type: 'typical lock A -> crosslock BX'
+
+ Just when acquiring BX, lockdep can see it's in the A's release
+ context. So the dependency between A and BX can be identified
+ immediately. Commit is unnecessary, too.
+
+3. CT type: 'crosslock AX -> typical lock B'
+
+ When acquiring B, lockdep cannot identify the dependency because
+ there's no way to know if it's in the AX's release context. It has
+ to wait until the decision can be made. Commit is necessary.
+
+4. CC type: 'crosslock AX -> crosslock BX'
+
+ When acquiring BX, lockdep cannot identify the dependency because
+ there's no way to know if it's in the AX's release context. It has
+ to wait until the decision can be made. Commit is necessary.
+ But, handling CC type is not implemented yet. It's a future work.
+
+Lockdep can work without commit for typical locks, but commit step is
+necessary once crosslocks are involved. Introducing commit, lockdep
+performs three steps. What lockdep does in each step is:
+
+1. Acquisition: For typical locks, lockdep does what it originally did
+ and queues the lock so that CT type dependencies can be checked using
+ it at the commit step. For crosslocks, it saves data which will be
+ used at the commit step and increases a reference count for it.
+
+2. Commit: No action is reauired for typical locks. For crosslocks,
+ lockdep adds CT type dependencies using the data saved at the
+ acquisition step.
+
+3. Release: No changes are required for typical locks. When a crosslock
+ is released, it decreases a reference count for it.
+
+CONCLUSION
+
+Crossrelease introduces commit step to handle dependencies of crosslocks
+in batches at a proper time.
+
+
+==============
+Implementation
+==============
+
+Data structures
+---------------
+
+Crossrelease introduces two main data structures.
+
+1. hist_lock
+
+ This is an array embedded in task_struct, for keeping lock history so
+ that dependencies can be added using them at the commit step. Since
+ it's local data, it can be accessed locklessly in the owner context.
+ The array is filled at the acquisition step and consumed at the
+ commit step. And it's managed in circular manner.
+
+2. cross_lock
+
+ One per lockdep_map exists. This is for keeping data of crosslocks
+ and used at the commit step.
+
+
+How crossrelease works
+----------------------
+
+It's the key of how crossrelease works, to defer necessary works to an
+appropriate point in time and perform in at once at the commit step.
+Let's take a look with examples step by step, starting from how lockdep
+works without crossrelease for typical locks.
+
+ acquire A /* Push A onto held_locks */
+ acquire B /* Push B onto held_locks and add 'A -> B' */
+ acquire C /* Push C onto held_locks and add 'B -> C' */
+ release C /* Pop C from held_locks */
+ release B /* Pop B from held_locks */
+ release A /* Pop A from held_locks */
+
+ where A, B and C are different lock classes.
+
+ NOTE: This document assumes that readers already understand how
+ lockdep works without crossrelease thus omits details. But there's
+ one thing to note. Lockdep pretends to pop a lock from held_locks
+ when releasing it. But it's subtly different from the original pop
+ operation because lockdep allows other than the top to be poped.
+
+In this case, lockdep adds 'the top of held_locks -> the lock to acquire'
+dependency every time acquiring a lock.
+
+After adding 'A -> B', a dependency graph will be:
+
+ A -> B
+
+ where A and B are different lock classes.
+
+And after adding 'B -> C', the graph will be:
+
+ A -> B -> C
+
+ where A, B and C are different lock classes.
+
+Let's performs commit step even for typical locks to add dependencies.
+Of course, commit step is not necessary for them, however, it would work
+well because this is a more general way.
+
+ acquire A
+ /*
+ * Queue A into hist_locks
+ *
+ * In hist_locks: A
+ * In graph: Empty
+ */
+
+ acquire B
+ /*
+ * Queue B into hist_locks
+ *
+ * In hist_locks: A, B
+ * In graph: Empty
+ */
+
+ acquire C
+ /*
+ * Queue C into hist_locks
+ *
+ * In hist_locks: A, B, C
+ * In graph: Empty
+ */
+
+ commit C
+ /*
+ * Add 'C -> ?'
+ * Answer the following to decide '?'
+ * What has been queued since acquire C: Nothing
+ *
+ * In hist_locks: A, B, C
+ * In graph: Empty
+ */
+
+ release C
+
+ commit B
+ /*
+ * Add 'B -> ?'
+ * Answer the following to decide '?'
+ * What has been queued since acquire B: C
+ *
+ * In hist_locks: A, B, C
+ * In graph: 'B -> C'
+ */
+
+ release B
+
+ commit A
+ /*
+ * Add 'A -> ?'
+ * Answer the following to decide '?'
+ * What has been queued since acquire A: B, C
+ *
+ * In hist_locks: A, B, C
+ * In graph: 'B -> C', 'A -> B', 'A -> C'
+ */
+
+ release A
+
+ where A, B and C are different lock classes.
+
+In this case, dependencies are added at the commit step as described.
+
+After commits for A, B and C, the graph will be:
+
+ A -> B -> C
+
+ where A, B and C are different lock classes.
+
+ NOTE: A dependency 'A -> C' is optimized out.
+
+We can see the former graph built without commit step is same as the
+latter graph built using commit steps. Of course the former way leads to
+earlier finish for building the graph, which means we can detect a
+deadlock or its possibility sooner. So the former way would be prefered
+when possible. But we cannot avoid using the latter way for crosslocks.
+
+Let's look at how commit steps work for crosslocks. In this case, the
+commit step is performed only on crosslock AX as real. And it assumes
+that the AX release context is different from the AX acquire context.
+
+ BX RELEASE CONTEXT BX ACQUIRE CONTEXT
+ ------------------ ------------------
+ acquire A
+ /*
+ * Push A onto held_locks
+ * Queue A into hist_locks
+ *
+ * In held_locks: A
+ * In hist_locks: A
+ * In graph: Empty
+ */
+
+ acquire BX
+ /*
+ * Add 'the top of held_locks -> BX'
+ *
+ * In held_locks: A
+ * In hist_locks: A
+ * In graph: 'A -> BX'
+ */
+
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ It must be guaranteed that the following operations are seen after
+ acquiring BX globally. It can be done by things like barrier.
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ acquire C
+ /*
+ * Push C onto held_locks
+ * Queue C into hist_locks
+ *
+ * In held_locks: C
+ * In hist_locks: C
+ * In graph: 'A -> BX'
+ */
+
+ release C
+ /*
+ * Pop C from held_locks
+ *
+ * In held_locks: Empty
+ * In hist_locks: C
+ * In graph: 'A -> BX'
+ */
+ acquire D
+ /*
+ * Push D onto held_locks
+ * Queue D into hist_locks
+ * Add 'the top of held_locks -> D'
+ *
+ * In held_locks: A, D
+ * In hist_locks: A, D
+ * In graph: 'A -> BX', 'A -> D'
+ */
+ acquire E
+ /*
+ * Push E onto held_locks
+ * Queue E into hist_locks
+ *
+ * In held_locks: E
+ * In hist_locks: C, E
+ * In graph: 'A -> BX', 'A -> D'
+ */
+
+ release E
+ /*
+ * Pop E from held_locks
+ *
+ * In held_locks: Empty
+ * In hist_locks: D, E
+ * In graph: 'A -> BX', 'A -> D'
+ */
+ release D
+ /*
+ * Pop D from held_locks
+ *
+ * In held_locks: A
+ * In hist_locks: A, D
+ * In graph: 'A -> BX', 'A -> D'
+ */
+ commit BX
+ /*
+ * Add 'BX -> ?'
+ * What has been queued since acquire BX: C, E
+ *
+ * In held_locks: Empty
+ * In hist_locks: D, E
+ * In graph: 'A -> BX', 'A -> D',
+ * 'BX -> C', 'BX -> E'
+ */
+
+ release BX
+ /*
+ * In held_locks: Empty
+ * In hist_locks: D, E
+ * In graph: 'A -> BX', 'A -> D',
+ * 'BX -> C', 'BX -> E'
+ */
+ release A
+ /*
+ * Pop A from held_locks
+ *
+ * In held_locks: Empty
+ * In hist_locks: A, D
+ * In graph: 'A -> BX', 'A -> D',
+ * 'BX -> C', 'BX -> E'
+ */
+
+ where A, BX, C,..., E are different lock classes, and a suffix 'X' is
+ added on crosslocks.
+
+Crossrelease considers all acquisitions after acqiuring BX are
+candidates which might create dependencies with BX. True dependencies
+will be determined when identifying the release context of BX. Meanwhile,
+all typical locks are queued so that they can be used at the commit step.
+And then two dependencies 'BX -> C' and 'BX -> E' are added at the
+commit step when identifying the release context.
+
+The final graph will be, with crossrelease:
+
+ -> C
+ /
+ -> BX -
+ / \
+ A - -> E
+ \
+ -> D
+
+ where A, BX, C,..., E are different lock classes, and a suffix 'X' is
+ added on crosslocks.
+
+However, the final graph will be, without crossrelease:
+
+ A -> D
+
+ where A and D are different lock classes.
+
+The former graph has three more dependencies, 'A -> BX', 'BX -> C' and
+'BX -> E' giving additional opportunities to check if they cause
+deadlocks. This way lockdep can detect a deadlock or its possibility
+caused by crosslocks.
+
+CONCLUSION
+
+We checked how crossrelease works with several examples.
+
+
+=============
+Optimizations
+=============
+
+Avoid duplication
+-----------------
+
+Crossrelease feature uses a cache like what lockdep already uses for
+dependency chains, but this time it's for caching CT type dependencies.
+Once that dependency is cached, the same will never be added again.
+
+
+Lockless for hot paths
+----------------------
+
+To keep all locks for later use at the commit step, crossrelease adopts
+a local array embedded in task_struct, which makes access to the data
+lockless by forcing it to happen only within the owner context. It's
+like how lockdep handles held_locks. Lockless implmentation is important
+since typical locks are very frequently acquired and released.
+
+
+=================================================
+APPENDIX A: What lockdep does to work aggresively
+=================================================
+
+A deadlock actually occurs when all wait operations creating circular
+dependencies run at the same time. Even though they don't, a potential
+deadlock exists if the problematic dependencies exist. Thus it's
+meaningful to detect not only an actual deadlock but also its potential
+possibility. The latter is rather valuable. When a deadlock occurs
+actually, we can identify what happens in the system by some means or
+other even without lockdep. However, there's no way to detect possiblity
+without lockdep unless the whole code is parsed in head. It's terrible.
+Lockdep does the both, and crossrelease only focuses on the latter.
+
+Whether or not a deadlock actually occurs depends on several factors.
+For example, what order contexts are switched in is a factor. Assuming
+circular dependencies exist, a deadlock would occur when contexts are
+switched so that all wait operations creating the dependencies run
+simultaneously. Thus to detect a deadlock possibility even in the case
+that it has not occured yet, lockdep should consider all possible
+combinations of dependencies, trying to:
+
+1. Use a global dependency graph.
+
+ Lockdep combines all dependencies into one global graph and uses them,
+ regardless of which context generates them or what order contexts are
+ switched in. Aggregated dependencies are only considered so they are
+ prone to be circular if a problem exists.
+
+2. Check dependencies between classes instead of instances.
+
+ What actually causes a deadlock are instances of lock. However,
+ lockdep checks dependencies between classes instead of instances.
+ This way lockdep can detect a deadlock which has not happened but
+ might happen in future by others but the same class.
+
+3. Assume all acquisitions lead to waiting.
+
+ Although locks might be acquired without waiting which is essential
+ to create dependencies, lockdep assumes all acquisitions lead to
+ waiting since it might be true some time or another.
+
+CONCLUSION
+
+Lockdep detects not only an actual deadlock but also its possibility,
+and the latter is more valuable.
+
+
+==================================================
+APPENDIX B: How to avoid adding false dependencies
+==================================================
+
+Remind what a dependency is. A dependency exists if:
+
+ 1. There are two waiters waiting for each event at a given time.
+ 2. The only way to wake up each waiter is to trigger its event.
+ 3. Whether one can be woken up depends on whether the other can.
+
+For example:
+
+ acquire A
+ acquire B /* A dependency 'A -> B' exists */
+ release B
+ release A
+
+ where A and B are different lock classes.
+
+A depedency 'A -> B' exists since:
+
+ 1. A waiter for A and a waiter for B might exist when acquiring B.
+ 2. Only way to wake up each is to release what it waits for.
+ 3. Whether the waiter for A can be woken up depends on whether the
+ other can. IOW, TASK X cannot release A if it fails to acquire B.
+
+For another example:
+
+ TASK X TASK Y
+ ------ ------
+ acquire AX
+ acquire B /* A dependency 'AX -> B' exists */
+ release B
+ release AX held by Y
+
+ where AX and B are different lock classes, and a suffix 'X' is added
+ on crosslocks.
+
+Even in this case involving crosslocks, the same rule can be applied. A
+depedency 'AX -> B' exists since:
+
+ 1. A waiter for AX and a waiter for B might exist when acquiring B.
+ 2. Only way to wake up each is to release what it waits for.
+ 3. Whether the waiter for AX can be woken up depends on whether the
+ other can. IOW, TASK X cannot release AX if it fails to acquire B.
+
+Let's take a look at more complicated example:
+
+ TASK X TASK Y
+ ------ ------
+ acquire B
+ release B
+ fork Y
+ acquire AX
+ acquire C /* A dependency 'AX -> C' exists */
+ release C
+ release AX held by Y
+
+ where AX, B and C are different lock classes, and a suffix 'X' is
+ added on crosslocks.
+
+Does a dependency 'AX -> B' exist? Nope.
+
+Two waiters are essential to create a dependency. However, waiters for
+AX and B to create 'AX -> B' cannot exist at the same time in this
+example. Thus the dependency 'AX -> B' cannot be created.
+
+It would be ideal if the full set of true ones can be considered. But
+we can ensure nothing but what actually happened. Relying on what
+actually happens at runtime, we can anyway add only true ones, though
+they might be a subset of true ones. It's similar to how lockdep works
+for typical locks. There might be more true dependencies than what
+lockdep has detected in runtime. Lockdep has no choice but to rely on
+what actually happens. Crossrelease also relies on it.
+
+CONCLUSION
+
+Relying on what actually happens, lockdep can avoid adding false
+dependencies.
diff --git a/Documentation/locking/rt-mutex-design.txt b/Documentation/locking/rt-mutex-design.txt
index 8666070d3189..6c6e8c2410de 100644
--- a/Documentation/locking/rt-mutex-design.txt
+++ b/Documentation/locking/rt-mutex-design.txt
@@ -97,9 +97,9 @@ waiter - A waiter is a struct that is stored on the stack of a blocked
a process being blocked on the mutex, it is fine to allocate
the waiter on the process's stack (local variable). This
structure holds a pointer to the task, as well as the mutex that
- the task is blocked on. It also has the plist node structures to
- place the task in the waiter_list of a mutex as well as the
- pi_list of a mutex owner task (described below).
+ the task is blocked on. It also has rbtree node structures to
+ place the task in the waiters rbtree of a mutex as well as the
+ pi_waiters rbtree of a mutex owner task (described below).
waiter is sometimes used in reference to the task that is waiting
on a mutex. This is the same as waiter->task.
@@ -179,53 +179,34 @@ again.
|
F->L5-+
+If process G has the highest priority in the chain, then all the tasks up
+the chain (A and B in this example), must have their priorities increased
+to that of G.
-Plist
------
-
-Before I go further and talk about how the PI chain is stored through lists
-on both mutexes and processes, I'll explain the plist. This is similar to
-the struct list_head functionality that is already in the kernel.
-The implementation of plist is out of scope for this document, but it is
-very important to understand what it does.
-
-There are a few differences between plist and list, the most important one
-being that plist is a priority sorted linked list. This means that the
-priorities of the plist are sorted, such that it takes O(1) to retrieve the
-highest priority item in the list. Obviously this is useful to store processes
-based on their priorities.
-
-Another difference, which is important for implementation, is that, unlike
-list, the head of the list is a different element than the nodes of a list.
-So the head of the list is declared as struct plist_head and nodes that will
-be added to the list are declared as struct plist_node.
-
-
-Mutex Waiter List
+Mutex Waiters Tree
-----------------
-Every mutex keeps track of all the waiters that are blocked on itself. The mutex
-has a plist to store these waiters by priority. This list is protected by
-a spin lock that is located in the struct of the mutex. This lock is called
-wait_lock. Since the modification of the waiter list is never done in
-interrupt context, the wait_lock can be taken without disabling interrupts.
+Every mutex keeps track of all the waiters that are blocked on itself. The
+mutex has a rbtree to store these waiters by priority. This tree is protected
+by a spin lock that is located in the struct of the mutex. This lock is called
+wait_lock.
-Task PI List
+Task PI Tree
------------
-To keep track of the PI chains, each process has its own PI list. This is
-a list of all top waiters of the mutexes that are owned by the process.
-Note that this list only holds the top waiters and not all waiters that are
+To keep track of the PI chains, each process has its own PI rbtree. This is
+a tree of all top waiters of the mutexes that are owned by the process.
+Note that this tree only holds the top waiters and not all waiters that are
blocked on mutexes owned by the process.
-The top of the task's PI list is always the highest priority task that
+The top of the task's PI tree is always the highest priority task that
is waiting on a mutex that is owned by the task. So if the task has
inherited a priority, it will always be the priority of the task that is
-at the top of this list.
+at the top of this tree.
-This list is stored in the task structure of a process as a plist called
-pi_list. This list is protected by a spin lock also in the task structure,
+This tree is stored in the task structure of a process as a rbtree called
+pi_waiters. It is protected by a spin lock also in the task structure,
called pi_lock. This lock may also be taken in interrupt context, so when
locking the pi_lock, interrupts must be disabled.
@@ -312,15 +293,12 @@ Mutex owner and flags
The mutex structure contains a pointer to the owner of the mutex. If the
mutex is not owned, this owner is set to NULL. Since all architectures
-have the task structure on at least a four byte alignment (and if this is
-not true, the rtmutex.c code will be broken!), this allows for the two
-least significant bits to be used as flags. This part is also described
-in Documentation/rt-mutex.txt, but will also be briefly described here.
-
-Bit 0 is used as the "Pending Owner" flag. This is described later.
-Bit 1 is used as the "Has Waiters" flags. This is also described later
- in more detail, but is set whenever there are waiters on a mutex.
+have the task structure on at least a two byte alignment (and if this is
+not true, the rtmutex.c code will be broken!), this allows for the least
+significant bit to be used as a flag. Bit 0 is used as the "Has Waiters"
+flag. It's set whenever there are waiters on a mutex.
+See Documentation/locking/rt-mutex.txt for further details.
cmpxchg Tricks
--------------
@@ -359,40 +337,31 @@ Priority adjustments
--------------------
The implementation of the PI code in rtmutex.c has several places that a
-process must adjust its priority. With the help of the pi_list of a
+process must adjust its priority. With the help of the pi_waiters of a
process this is rather easy to know what needs to be adjusted.
-The functions implementing the task adjustments are rt_mutex_adjust_prio,
-__rt_mutex_adjust_prio (same as the former, but expects the task pi_lock
-to already be taken), rt_mutex_getprio, and rt_mutex_setprio.
+The functions implementing the task adjustments are rt_mutex_adjust_prio
+and rt_mutex_setprio. rt_mutex_setprio is only used in rt_mutex_adjust_prio.
-rt_mutex_getprio and rt_mutex_setprio are only used in __rt_mutex_adjust_prio.
+rt_mutex_adjust_prio examines the priority of the task, and the highest
+priority process that is waiting any of mutexes owned by the task. Since
+the pi_waiters of a task holds an order by priority of all the top waiters
+of all the mutexes that the task owns, we simply need to compare the top
+pi waiter to its own normal/deadline priority and take the higher one.
+Then rt_mutex_setprio is called to adjust the priority of the task to the
+new priority. Note that rt_mutex_setprio is defined in kernel/sched/core.c
+to implement the actual change in priority.
-rt_mutex_getprio returns the priority that the task should have. Either the
-task's own normal priority, or if a process of a higher priority is waiting on
-a mutex owned by the task, then that higher priority should be returned.
-Since the pi_list of a task holds an order by priority list of all the top
-waiters of all the mutexes that the task owns, rt_mutex_getprio simply needs
-to compare the top pi waiter to its own normal priority, and return the higher
-priority back.
+(Note: For the "prio" field in task_struct, the lower the number, the
+ higher the priority. A "prio" of 5 is of higher priority than a
+ "prio" of 10.)
-(Note: if looking at the code, you will notice that the lower number of
- prio is returned. This is because the prio field in the task structure
- is an inverse order of the actual priority. So a "prio" of 5 is
- of higher priority than a "prio" of 10.)
-
-__rt_mutex_adjust_prio examines the result of rt_mutex_getprio, and if the
-result does not equal the task's current priority, then rt_mutex_setprio
-is called to adjust the priority of the task to the new priority.
-Note that rt_mutex_setprio is defined in kernel/sched/core.c to implement the
-actual change in priority.
-
-It is interesting to note that __rt_mutex_adjust_prio can either increase
+It is interesting to note that rt_mutex_adjust_prio can either increase
or decrease the priority of the task. In the case that a higher priority
-process has just blocked on a mutex owned by the task, __rt_mutex_adjust_prio
+process has just blocked on a mutex owned by the task, rt_mutex_adjust_prio
would increase/boost the task's priority. But if a higher priority task
were for some reason to leave the mutex (timeout or signal), this same function
-would decrease/unboost the priority of the task. That is because the pi_list
+would decrease/unboost the priority of the task. That is because the pi_waiters
always contains the highest priority task that is waiting on a mutex owned
by the task, so we only need to compare the priority of that top pi waiter
to the normal priority of the given task.
@@ -412,9 +381,10 @@ priorities.
rt_mutex_adjust_prio_chain is called with a task to be checked for PI
(de)boosting (the owner of a mutex that a process is blocking on), a flag to
-check for deadlocking, the mutex that the task owns, and a pointer to a waiter
+check for deadlocking, the mutex that the task owns, a pointer to a waiter
that is the process's waiter struct that is blocked on the mutex (although this
-parameter may be NULL for deboosting).
+parameter may be NULL for deboosting), a pointer to the mutex on which the task
+is blocked, and a top_task as the top waiter of the mutex.
For this explanation, I will not mention deadlock detection. This explanation
will try to stay at a high level.
@@ -424,133 +394,14 @@ that the state of the owner and lock can change when entered into this function.
Before this function is called, the task has already had rt_mutex_adjust_prio
performed on it. This means that the task is set to the priority that it
-should be at, but the plist nodes of the task's waiter have not been updated
-with the new priorities, and that this task may not be in the proper locations
-in the pi_lists and wait_lists that the task is blocked on. This function
+should be at, but the rbtree nodes of the task's waiter have not been updated
+with the new priorities, and this task may not be in the proper locations
+in the pi_waiters and waiters trees that the task is blocked on. This function
solves all that.
-A loop is entered, where task is the owner to be checked for PI changes that
-was passed by parameter (for the first iteration). The pi_lock of this task is
-taken to prevent any more changes to the pi_list of the task. This also
-prevents new tasks from completing the blocking on a mutex that is owned by this
-task.
-
-If the task is not blocked on a mutex then the loop is exited. We are at
-the top of the PI chain.
-
-A check is now done to see if the original waiter (the process that is blocked
-on the current mutex) is the top pi waiter of the task. That is, is this
-waiter on the top of the task's pi_list. If it is not, it either means that
-there is another process higher in priority that is blocked on one of the
-mutexes that the task owns, or that the waiter has just woken up via a signal
-or timeout and has left the PI chain. In either case, the loop is exited, since
-we don't need to do any more changes to the priority of the current task, or any
-task that owns a mutex that this current task is waiting on. A priority chain
-walk is only needed when a new top pi waiter is made to a task.
-
-The next check sees if the task's waiter plist node has the priority equal to
-the priority the task is set at. If they are equal, then we are done with
-the loop. Remember that the function started with the priority of the
-task adjusted, but the plist nodes that hold the task in other processes
-pi_lists have not been adjusted.
-
-Next, we look at the mutex that the task is blocked on. The mutex's wait_lock
-is taken. This is done by a spin_trylock, because the locking order of the
-pi_lock and wait_lock goes in the opposite direction. If we fail to grab the
-lock, the pi_lock is released, and we restart the loop.
-
-Now that we have both the pi_lock of the task as well as the wait_lock of
-the mutex the task is blocked on, we update the task's waiter's plist node
-that is located on the mutex's wait_list.
-
-Now we release the pi_lock of the task.
-
-Next the owner of the mutex has its pi_lock taken, so we can update the
-task's entry in the owner's pi_list. If the task is the highest priority
-process on the mutex's wait_list, then we remove the previous top waiter
-from the owner's pi_list, and replace it with the task.
-
-Note: It is possible that the task was the current top waiter on the mutex,
- in which case the task is not yet on the pi_list of the waiter. This
- is OK, since plist_del does nothing if the plist node is not on any
- list.
-
-If the task was not the top waiter of the mutex, but it was before we
-did the priority updates, that means we are deboosting/lowering the
-task. In this case, the task is removed from the pi_list of the owner,
-and the new top waiter is added.
-
-Lastly, we unlock both the pi_lock of the task, as well as the mutex's
-wait_lock, and continue the loop again. On the next iteration of the
-loop, the previous owner of the mutex will be the task that will be
-processed.
-
-Note: One might think that the owner of this mutex might have changed
- since we just grab the mutex's wait_lock. And one could be right.
- The important thing to remember is that the owner could not have
- become the task that is being processed in the PI chain, since
- we have taken that task's pi_lock at the beginning of the loop.
- So as long as there is an owner of this mutex that is not the same
- process as the tasked being worked on, we are OK.
-
- Looking closely at the code, one might be confused. The check for the
- end of the PI chain is when the task isn't blocked on anything or the
- task's waiter structure "task" element is NULL. This check is
- protected only by the task's pi_lock. But the code to unlock the mutex
- sets the task's waiter structure "task" element to NULL with only
- the protection of the mutex's wait_lock, which was not taken yet.
- Isn't this a race condition if the task becomes the new owner?
-
- The answer is No! The trick is the spin_trylock of the mutex's
- wait_lock. If we fail that lock, we release the pi_lock of the
- task and continue the loop, doing the end of PI chain check again.
-
- In the code to release the lock, the wait_lock of the mutex is held
- the entire time, and it is not let go when we grab the pi_lock of the
- new owner of the mutex. So if the switch of a new owner were to happen
- after the check for end of the PI chain and the grabbing of the
- wait_lock, the unlocking code would spin on the new owner's pi_lock
- but never give up the wait_lock. So the PI chain loop is guaranteed to
- fail the spin_trylock on the wait_lock, release the pi_lock, and
- try again.
-
- If you don't quite understand the above, that's OK. You don't have to,
- unless you really want to make a proof out of it ;)
-
-
-Pending Owners and Lock stealing
---------------------------------
-
-One of the flags in the owner field of the mutex structure is "Pending Owner".
-What this means is that an owner was chosen by the process releasing the
-mutex, but that owner has yet to wake up and actually take the mutex.
-
-Why is this important? Why can't we just give the mutex to another process
-and be done with it?
-
-The PI code is to help with real-time processes, and to let the highest
-priority process run as long as possible with little latencies and delays.
-If a high priority process owns a mutex that a lower priority process is
-blocked on, when the mutex is released it would be given to the lower priority
-process. What if the higher priority process wants to take that mutex again.
-The high priority process would fail to take that mutex that it just gave up
-and it would need to boost the lower priority process to run with full
-latency of that critical section (since the low priority process just entered
-it).
-
-There's no reason a high priority process that gives up a mutex should be
-penalized if it tries to take that mutex again. If the new owner of the
-mutex has not woken up yet, there's no reason that the higher priority process
-could not take that mutex away.
-
-To solve this, we introduced Pending Ownership and Lock Stealing. When a
-new process is given a mutex that it was blocked on, it is only given
-pending ownership. This means that it's the new owner, unless a higher
-priority process comes in and tries to grab that mutex. If a higher priority
-process does come along and wants that mutex, we let the higher priority
-process "steal" the mutex from the pending owner (only if it is still pending)
-and continue with the mutex.
-
+The main operation of this function is summarized by Thomas Gleixner in
+rtmutex.c. See the 'Chain walk basics and protection scope' comment for further
+details.
Taking of a mutex (The walk through)
------------------------------------
@@ -563,14 +414,14 @@ done when we have CMPXCHG enabled (otherwise the fast taking automatically
fails). Only when the owner field of the mutex is NULL can the lock be
taken with the CMPXCHG and nothing else needs to be done.
-If there is contention on the lock, whether it is owned or pending owner
-we go about the slow path (rt_mutex_slowlock).
+If there is contention on the lock, we go about the slow path
+(rt_mutex_slowlock).
The slow path function is where the task's waiter structure is created on
the stack. This is because the waiter structure is only needed for the
scope of this function. The waiter structure holds the nodes to store
-the task on the wait_list of the mutex, and if need be, the pi_list of
-the owner.
+the task on the waiters tree of the mutex, and if need be, the pi_waiters
+tree of the owner.
The wait_lock of the mutex is taken since the slow path of unlocking the
mutex also takes this lock.
@@ -581,102 +432,45 @@ contention).
try_to_take_rt_mutex is used every time the task tries to grab a mutex in the
slow path. The first thing that is done here is an atomic setting of
-the "Has Waiters" flag of the mutex's owner field. Yes, this could really
-be false, because if the mutex has no owner, there are no waiters and
-the current task also won't have any waiters. But we don't have the lock
-yet, so we assume we are going to be a waiter. The reason for this is to
-play nice for those architectures that do have CMPXCHG. By setting this flag
-now, the owner of the mutex can't release the mutex without going into the
-slow unlock path, and it would then need to grab the wait_lock, which this
-code currently holds. So setting the "Has Waiters" flag forces the owner
-to synchronize with this code.
-
-Now that we know that we can't have any races with the owner releasing the
-mutex, we check to see if we can take the ownership. This is done if the
-mutex doesn't have a owner, or if we can steal the mutex from a pending
-owner. Let's look at the situations we have here.
-
- 1) Has owner that is pending
- ----------------------------
-
- The mutex has a owner, but it hasn't woken up and the mutex flag
- "Pending Owner" is set. The first check is to see if the owner isn't the
- current task. This is because this function is also used for the pending
- owner to grab the mutex. When a pending owner wakes up, it checks to see
- if it can take the mutex, and this is done if the owner is already set to
- itself. If so, we succeed and leave the function, clearing the "Pending
- Owner" bit.
-
- If the pending owner is not current, we check to see if the current priority is
- higher than the pending owner. If not, we fail the function and return.
-
- There's also something special about a pending owner. That is a pending owner
- is never blocked on a mutex. So there is no PI chain to worry about. It also
- means that if the mutex doesn't have any waiters, there's no accounting needed
- to update the pending owner's pi_list, since we only worry about processes
- blocked on the current mutex.
-
- If there are waiters on this mutex, and we just stole the ownership, we need
- to take the top waiter, remove it from the pi_list of the pending owner, and
- add it to the current pi_list. Note that at this moment, the pending owner
- is no longer on the list of waiters. This is fine, since the pending owner
- would add itself back when it realizes that it had the ownership stolen
- from itself. When the pending owner tries to grab the mutex, it will fail
- in try_to_take_rt_mutex if the owner field points to another process.
-
- 2) No owner
- -----------
-
- If there is no owner (or we successfully stole the lock), we set the owner
- of the mutex to current, and set the flag of "Has Waiters" if the current
- mutex actually has waiters, or we clear the flag if it doesn't. See, it was
- OK that we set that flag early, since now it is cleared.
-
- 3) Failed to grab ownership
- ---------------------------
-
- The most interesting case is when we fail to take ownership. This means that
- there exists an owner, or there's a pending owner with equal or higher
- priority than the current task.
-
-We'll continue on the failed case.
-
-If the mutex has a timeout, we set up a timer to go off to break us out
-of this mutex if we failed to get it after a specified amount of time.
-
-Now we enter a loop that will continue to try to take ownership of the mutex, or
-fail from a timeout or signal.
-
-Once again we try to take the mutex. This will usually fail the first time
-in the loop, since it had just failed to get the mutex. But the second time
-in the loop, this would likely succeed, since the task would likely be
-the pending owner.
-
-If the mutex is TASK_INTERRUPTIBLE a check for signals and timeout is done
-here.
-
-The waiter structure has a "task" field that points to the task that is blocked
-on the mutex. This field can be NULL the first time it goes through the loop
-or if the task is a pending owner and had its mutex stolen. If the "task"
-field is NULL then we need to set up the accounting for it.
+the "Has Waiters" flag of the mutex's owner field. By setting this flag
+now, the current owner of the mutex being contended for can't release the mutex
+without going into the slow unlock path, and it would then need to grab the
+wait_lock, which this code currently holds. So setting the "Has Waiters" flag
+forces the current owner to synchronize with this code.
+
+The lock is taken if the following are true:
+ 1) The lock has no owner
+ 2) The current task is the highest priority against all other
+ waiters of the lock
+
+If the task succeeds to acquire the lock, then the task is set as the
+owner of the lock, and if the lock still has waiters, the top_waiter
+(highest priority task waiting on the lock) is added to this task's
+pi_waiters tree.
+
+If the lock is not taken by try_to_take_rt_mutex(), then the
+task_blocks_on_rt_mutex() function is called. This will add the task to
+the lock's waiter tree and propagate the pi chain of the lock as well
+as the lock's owner's pi_waiters tree. This is described in the next
+section.
Task blocks on mutex
--------------------
The accounting of a mutex and process is done with the waiter structure of
the process. The "task" field is set to the process, and the "lock" field
-to the mutex. The plist nodes are initialized to the processes current
-priority.
+to the mutex. The rbtree node of waiter are initialized to the processes
+current priority.
Since the wait_lock was taken at the entry of the slow lock, we can safely
-add the waiter to the wait_list. If the current process is the highest
-priority process currently waiting on this mutex, then we remove the
-previous top waiter process (if it exists) from the pi_list of the owner,
-and add the current process to that list. Since the pi_list of the owner
+add the waiter to the task waiter tree. If the current process is the
+highest priority process currently waiting on this mutex, then we remove the
+previous top waiter process (if it exists) from the pi_waiters of the owner,
+and add the current process to that tree. Since the pi_waiter of the owner
has changed, we call rt_mutex_adjust_prio on the owner to see if the owner
should adjust its priority accordingly.
-If the owner is also blocked on a lock, and had its pi_list changed
+If the owner is also blocked on a lock, and had its pi_waiters changed
(or deadlock checking is on), we unlock the wait_lock of the mutex and go ahead
and run rt_mutex_adjust_prio_chain on the owner, as described earlier.
@@ -686,30 +480,23 @@ mutex (waiter "task" field is not NULL), then we go to sleep (call schedule).
Waking up in the loop
---------------------
-The schedule can then wake up for a few reasons.
- 1) we were given pending ownership of the mutex.
- 2) we received a signal and was TASK_INTERRUPTIBLE
- 3) we had a timeout and was TASK_INTERRUPTIBLE
+The task can then wake up for a couple of reasons:
+ 1) The previous lock owner released the lock, and the task now is top_waiter
+ 2) we received a signal or timeout
-In any of these cases, we continue the loop and once again try to grab the
-ownership of the mutex. If we succeed, we exit the loop, otherwise we continue
-and on signal and timeout, will exit the loop, or if we had the mutex stolen
-we just simply add ourselves back on the lists and go back to sleep.
+In both cases, the task will try again to acquire the lock. If it
+does, then it will take itself off the waiters tree and set itself back
+to the TASK_RUNNING state.
-Note: For various reasons, because of timeout and signals, the steal mutex
- algorithm needs to be careful. This is because the current process is
- still on the wait_list. And because of dynamic changing of priorities,
- especially on SCHED_OTHER tasks, the current process can be the
- highest priority task on the wait_list.
-
-Failed to get mutex on Timeout or Signal
-----------------------------------------
+In first case, if the lock was acquired by another task before this task
+could get the lock, then it will go back to sleep and wait to be woken again.
-If a timeout or signal occurred, the waiter's "task" field would not be
-NULL and the task needs to be taken off the wait_list of the mutex and perhaps
-pi_list of the owner. If this process was a high priority process, then
-the rt_mutex_adjust_prio_chain needs to be executed again on the owner,
-but this time it will be lowering the priorities.
+The second case is only applicable for tasks that are grabbing a mutex
+that can wake up before getting the lock, either due to a signal or
+a timeout (i.e. rt_mutex_timed_futex_lock()). When woken, it will try to
+take the lock again, if it succeeds, then the task will return with the
+lock held, otherwise it will return with -EINTR if the task was woken
+by a signal, or -ETIMEDOUT if it timed out.
Unlocking the Mutex
@@ -739,25 +526,12 @@ owner still needs to make this check. If there are no waiters then the mutex
owner field is set to NULL, the wait_lock is released and nothing more is
needed.
-If there are waiters, then we need to wake one up and give that waiter
-pending ownership.
+If there are waiters, then we need to wake one up.
On the wake up code, the pi_lock of the current owner is taken. The top
-waiter of the lock is found and removed from the wait_list of the mutex
-as well as the pi_list of the current owner. The task field of the new
-pending owner's waiter structure is set to NULL, and the owner field of the
-mutex is set to the new owner with the "Pending Owner" bit set, as well
-as the "Has Waiters" bit if there still are other processes blocked on the
-mutex.
-
-The pi_lock of the previous owner is released, and the new pending owner's
-pi_lock is taken. Remember that this is the trick to prevent the race
-condition in rt_mutex_adjust_prio_chain from adding itself as a waiter
-on the mutex.
-
-We now clear the "pi_blocked_on" field of the new pending owner, and if
-the mutex still has waiters pending, we add the new top waiter to the pi_list
-of the pending owner.
+waiter of the lock is found and removed from the waiters tree of the mutex
+as well as the pi_waiters tree of the current owner. The "Has Waiters" bit is
+marked to prevent lower priority tasks from stealing the lock.
Finally we unlock the pi_lock of the pending owner and wake it up.
@@ -772,10 +546,14 @@ Credits
-------
Author: Steven Rostedt <rostedt@goodmis.org>
+Updated: Alex Shi <alex.shi@linaro.org> - 7/6/2017
-Reviewers: Ingo Molnar, Thomas Gleixner, Thomas Duetsch, and Randy Dunlap
+Original Reviewers: Ingo Molnar, Thomas Gleixner, Thomas Duetsch, and
+ Randy Dunlap
+Update (7/6/2017) Reviewers: Steven Rostedt and Sebastian Siewior
Updates
-------
This document was originally written for 2.6.17-rc3-mm1
+was updated on 4.12
diff --git a/Documentation/locking/rt-mutex.txt b/Documentation/locking/rt-mutex.txt
index 243393d882ee..35793e003041 100644
--- a/Documentation/locking/rt-mutex.txt
+++ b/Documentation/locking/rt-mutex.txt
@@ -28,14 +28,13 @@ magic bullet for poorly designed applications, but it allows
well-designed applications to use userspace locks in critical parts of
an high priority thread, without losing determinism.
-The enqueueing of the waiters into the rtmutex waiter list is done in
+The enqueueing of the waiters into the rtmutex waiter tree is done in
priority order. For same priorities FIFO order is chosen. For each
rtmutex, only the top priority waiter is enqueued into the owner's
-priority waiters list. This list too queues in priority order. Whenever
+priority waiters tree. This tree too queues in priority order. Whenever
the top priority waiter of a task changes (for example it timed out or
-got a signal), the priority of the owner task is readjusted. [The
-priority enqueueing is handled by "plists", see include/linux/plist.h
-for more details.]
+got a signal), the priority of the owner task is readjusted. The
+priority enqueueing is handled by "pi_waiters".
RT-mutexes are optimized for fastpath operations and have no internal
locking overhead when locking an uncontended mutex or unlocking a mutex
@@ -46,34 +45,29 @@ is used]
The state of the rt-mutex is tracked via the owner field of the rt-mutex
structure:
-rt_mutex->owner holds the task_struct pointer of the owner. Bit 0 and 1
-are used to keep track of the "owner is pending" and "rtmutex has
-waiters" state.
+lock->owner holds the task_struct pointer of the owner. Bit 0 is used to
+keep track of the "lock has waiters" state.
- owner bit1 bit0
- NULL 0 0 mutex is free (fast acquire possible)
- NULL 0 1 invalid state
- NULL 1 0 Transitional state*
- NULL 1 1 invalid state
- taskpointer 0 0 mutex is held (fast release possible)
- taskpointer 0 1 task is pending owner
- taskpointer 1 0 mutex is held and has waiters
- taskpointer 1 1 task is pending owner and mutex has waiters
+ owner bit0
+ NULL 0 lock is free (fast acquire possible)
+ NULL 1 lock is free and has waiters and the top waiter
+ is going to take the lock*
+ taskpointer 0 lock is held (fast release possible)
+ taskpointer 1 lock is held and has waiters**
-Pending-ownership handling is a performance optimization:
-pending-ownership is assigned to the first (highest priority) waiter of
-the mutex, when the mutex is released. The thread is woken up and once
-it starts executing it can acquire the mutex. Until the mutex is taken
-by it (bit 0 is cleared) a competing higher priority thread can "steal"
-the mutex which puts the woken up thread back on the waiters list.
+The fast atomic compare exchange based acquire and release is only
+possible when bit 0 of lock->owner is 0.
-The pending-ownership optimization is especially important for the
-uninterrupted workflow of high-prio tasks which repeatedly
-takes/releases locks that have lower-prio waiters. Without this
-optimization the higher-prio thread would ping-pong to the lower-prio
-task [because at unlock time we always assign a new owner].
+(*) It also can be a transitional state when grabbing the lock
+with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
+we need to set the bit0 before looking at the lock, and the owner may be
+NULL in this small time, hence this can be a transitional state.
-(*) The "mutex has waiters" bit gets set to take the lock. If the lock
-doesn't already have an owner, this bit is quickly cleared if there are
-no waiters. So this is a transitional state to synchronize with looking
-at the owner field of the mutex and the mutex owner releasing the lock.
+(**) There is a small time when bit 0 is set but there are no
+waiters. This can happen when grabbing the lock in the slow path.
+To prevent a cmpxchg of the owner releasing the lock, we need to
+set this bit before looking at the lock.
+
+BTW, there is still technically a "Pending Owner", it's just not called
+that anymore. The pending owner happens to be the top_waiter of a lock
+that has no owner and has been woken up to grab the lock.
diff --git a/Documentation/media/ca.h.rst.exceptions b/Documentation/media/ca.h.rst.exceptions
index d7c9fed8c004..553559cc6ad7 100644
--- a/Documentation/media/ca.h.rst.exceptions
+++ b/Documentation/media/ca.h.rst.exceptions
@@ -16,7 +16,6 @@ replace define CA_NDS :c:type:`ca_descr_info`
replace define CA_DSS :c:type:`ca_descr_info`
# some typedefs should point to struct/enums
-replace typedef ca_pid_t :c:type:`ca_pid`
replace typedef ca_slot_info_t :c:type:`ca_slot_info`
replace typedef ca_descr_info_t :c:type:`ca_descr_info`
replace typedef ca_caps_t :c:type:`ca_caps`
diff --git a/Documentation/media/cec-drivers/index.rst b/Documentation/media/cec-drivers/index.rst
new file mode 100644
index 000000000000..7ef204823422
--- /dev/null
+++ b/Documentation/media/cec-drivers/index.rst
@@ -0,0 +1,34 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. include:: <isonum.txt>
+
+.. _cec-drivers:
+
+#################################
+CEC driver-specific documentation
+#################################
+
+**Copyright** |copy| 2017 : LinuxTV Developers
+
+This documentation is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the Free
+Software Foundation version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+
+For more details see the file COPYING in the source distribution of Linux.
+
+.. only:: html
+
+ .. class:: toc-title
+
+ Table of Contents
+
+.. toctree::
+ :maxdepth: 5
+ :numbered:
+
+ pulse8-cec
diff --git a/Documentation/media/cec-drivers/pulse8-cec.rst b/Documentation/media/cec-drivers/pulse8-cec.rst
new file mode 100644
index 000000000000..99551c6a9bc5
--- /dev/null
+++ b/Documentation/media/cec-drivers/pulse8-cec.rst
@@ -0,0 +1,11 @@
+Pulse-Eight CEC Adapter driver
+==============================
+
+The pulse8-cec driver implements the following module option:
+
+``persistent_config``
+---------------------
+
+By default this is off, but when set to 1 the driver will store the current
+settings to the device's internal eeprom and restore it the next time the
+device is connected to the USB port.
diff --git a/Documentation/media/dmx.h.rst.exceptions b/Documentation/media/dmx.h.rst.exceptions
index 2fdb458564ba..629db384104a 100644
--- a/Documentation/media/dmx.h.rst.exceptions
+++ b/Documentation/media/dmx.h.rst.exceptions
@@ -40,27 +40,17 @@ replace enum dmx_input :c:type:`dmx_input`
replace symbol DMX_IN_FRONTEND :c:type:`dmx_input`
replace symbol DMX_IN_DVR :c:type:`dmx_input`
-# dmx_source_t symbols
-replace enum dmx_source :c:type:`dmx_source`
-replace symbol DMX_SOURCE_FRONT0 :c:type:`dmx_source`
-replace symbol DMX_SOURCE_FRONT1 :c:type:`dmx_source`
-replace symbol DMX_SOURCE_FRONT2 :c:type:`dmx_source`
-replace symbol DMX_SOURCE_FRONT3 :c:type:`dmx_source`
-replace symbol DMX_SOURCE_DVR0 :c:type:`dmx_source`
-replace symbol DMX_SOURCE_DVR1 :c:type:`dmx_source`
-replace symbol DMX_SOURCE_DVR2 :c:type:`dmx_source`
-replace symbol DMX_SOURCE_DVR3 :c:type:`dmx_source`
-
-
# Flags for struct dmx_sct_filter_params
replace define DMX_CHECK_CRC :c:type:`dmx_sct_filter_params`
replace define DMX_ONESHOT :c:type:`dmx_sct_filter_params`
replace define DMX_IMMEDIATE_START :c:type:`dmx_sct_filter_params`
-replace define DMX_KERNEL_CLIENT :c:type:`dmx_sct_filter_params`
# some typedefs should point to struct/enums
-replace typedef dmx_caps_t :c:type:`dmx_caps`
replace typedef dmx_filter_t :c:type:`dmx_filter`
replace typedef dmx_pes_type_t :c:type:`dmx_pes_type`
replace typedef dmx_input_t :c:type:`dmx_input`
-replace typedef dmx_source_t :c:type:`dmx_source`
+
+ignore symbol DMX_OUT_DECODER
+ignore symbol DMX_OUT_TAP
+ignore symbol DMX_OUT_TS_TAP
+ignore symbol DMX_OUT_TSDEMUX_TAP
diff --git a/Documentation/media/dvb-drivers/ci.rst b/Documentation/media/dvb-drivers/ci.rst
index 69b07e9d1816..87f3748c49b9 100644
--- a/Documentation/media/dvb-drivers/ci.rst
+++ b/Documentation/media/dvb-drivers/ci.rst
@@ -143,7 +143,6 @@ All these ioctls are also valid for the High level CI interface
#define CA_GET_MSG _IOR('o', 132, ca_msg_t)
#define CA_SEND_MSG _IOW('o', 133, ca_msg_t)
#define CA_SET_DESCR _IOW('o', 134, ca_descr_t)
-#define CA_SET_PID _IOW('o', 135, ca_pid_t)
On querying the device, the device yields information thus:
diff --git a/Documentation/media/dvb-drivers/index.rst b/Documentation/media/dvb-drivers/index.rst
index ea0da6d63299..376141143ae9 100644
--- a/Documentation/media/dvb-drivers/index.rst
+++ b/Documentation/media/dvb-drivers/index.rst
@@ -19,7 +19,9 @@ more details.
For more details see the file COPYING in the source distribution of Linux.
-.. class:: toc-title
+.. only:: html
+
+ .. class:: toc-title
Table of Contents
diff --git a/Documentation/media/frontend.h.rst.exceptions b/Documentation/media/frontend.h.rst.exceptions
index 7656770f1936..f7c4df620a52 100644
--- a/Documentation/media/frontend.h.rst.exceptions
+++ b/Documentation/media/frontend.h.rst.exceptions
@@ -25,19 +25,9 @@ ignore define DTV_MAX_COMMAND
ignore define MAX_DTV_STATS
ignore define DTV_IOCTL_MAX_MSGS
-# Stats enum is documented altogether
-replace enum fecap_scale_params :ref:`frontend-stat-properties`
-replace symbol FE_SCALE_COUNTER frontend-stat-properties
-replace symbol FE_SCALE_DECIBEL frontend-stat-properties
-replace symbol FE_SCALE_NOT_AVAILABLE frontend-stat-properties
-replace symbol FE_SCALE_RELATIVE frontend-stat-properties
-
# the same reference is used for both get and set ioctls
replace ioctl FE_SET_PROPERTY :c:type:`FE_GET_PROPERTY`
-# Ignore struct used only internally at Kernel
-ignore struct dtv_cmds_h
-
# Typedefs that use the enum reference
replace typedef fe_sec_voltage_t :c:type:`fe_sec_voltage`
@@ -45,3 +35,178 @@ replace typedef fe_sec_voltage_t :c:type:`fe_sec_voltage`
replace define FE_TUNE_MODE_ONESHOT :c:func:`FE_SET_FRONTEND_TUNE_MODE`
replace define LNA_AUTO dtv-lna
replace define NO_STREAM_ID_FILTER dtv-stream-id
+
+# Those enums are defined at the frontend.h header, and not externally
+
+ignore symbol FE_IS_STUPID
+ignore symbol FE_CAN_INVERSION_AUTO
+ignore symbol FE_CAN_FEC_1_2
+ignore symbol FE_CAN_FEC_2_3
+ignore symbol FE_CAN_FEC_3_4
+ignore symbol FE_CAN_FEC_4_5
+ignore symbol FE_CAN_FEC_5_6
+ignore symbol FE_CAN_FEC_6_7
+ignore symbol FE_CAN_FEC_7_8
+ignore symbol FE_CAN_FEC_8_9
+ignore symbol FE_CAN_FEC_AUTO
+ignore symbol FE_CAN_QPSK
+ignore symbol FE_CAN_QAM_16
+ignore symbol FE_CAN_QAM_32
+ignore symbol FE_CAN_QAM_64
+ignore symbol FE_CAN_QAM_128
+ignore symbol FE_CAN_QAM_256
+ignore symbol FE_CAN_QAM_AUTO
+ignore symbol FE_CAN_TRANSMISSION_MODE_AUTO
+ignore symbol FE_CAN_BANDWIDTH_AUTO
+ignore symbol FE_CAN_GUARD_INTERVAL_AUTO
+ignore symbol FE_CAN_HIERARCHY_AUTO
+ignore symbol FE_CAN_8VSB
+ignore symbol FE_CAN_16VSB
+ignore symbol FE_HAS_EXTENDED_CAPS
+ignore symbol FE_CAN_MULTISTREAM
+ignore symbol FE_CAN_TURBO_FEC
+ignore symbol FE_CAN_2G_MODULATION
+ignore symbol FE_NEEDS_BENDING
+ignore symbol FE_CAN_RECOVER
+ignore symbol FE_CAN_MUTE_TS
+
+ignore symbol QPSK
+ignore symbol QAM_16
+ignore symbol QAM_32
+ignore symbol QAM_64
+ignore symbol QAM_128
+ignore symbol QAM_256
+ignore symbol QAM_AUTO
+ignore symbol VSB_8
+ignore symbol VSB_16
+ignore symbol PSK_8
+ignore symbol APSK_16
+ignore symbol APSK_32
+ignore symbol DQPSK
+ignore symbol QAM_4_NR
+
+ignore symbol SEC_VOLTAGE_13
+ignore symbol SEC_VOLTAGE_18
+ignore symbol SEC_VOLTAGE_OFF
+
+ignore symbol SEC_TONE_ON
+ignore symbol SEC_TONE_OFF
+
+ignore symbol SEC_MINI_A
+ignore symbol SEC_MINI_B
+
+ignore symbol FE_NONE
+ignore symbol FE_HAS_SIGNAL
+ignore symbol FE_HAS_CARRIER
+ignore symbol FE_HAS_VITERBI
+ignore symbol FE_HAS_SYNC
+ignore symbol FE_HAS_LOCK
+ignore symbol FE_REINIT
+ignore symbol FE_TIMEDOUT
+
+ignore symbol FEC_NONE
+ignore symbol FEC_1_2
+ignore symbol FEC_2_3
+ignore symbol FEC_3_4
+ignore symbol FEC_4_5
+ignore symbol FEC_5_6
+ignore symbol FEC_6_7
+ignore symbol FEC_7_8
+ignore symbol FEC_8_9
+ignore symbol FEC_AUTO
+ignore symbol FEC_3_5
+ignore symbol FEC_9_10
+ignore symbol FEC_2_5
+
+ignore symbol TRANSMISSION_MODE_AUTO
+ignore symbol TRANSMISSION_MODE_1K
+ignore symbol TRANSMISSION_MODE_2K
+ignore symbol TRANSMISSION_MODE_8K
+ignore symbol TRANSMISSION_MODE_4K
+ignore symbol TRANSMISSION_MODE_16K
+ignore symbol TRANSMISSION_MODE_32K
+ignore symbol TRANSMISSION_MODE_C1
+ignore symbol TRANSMISSION_MODE_C3780
+ignore symbol TRANSMISSION_MODE_2K
+ignore symbol TRANSMISSION_MODE_8K
+
+ignore symbol GUARD_INTERVAL_AUTO
+ignore symbol GUARD_INTERVAL_1_128
+ignore symbol GUARD_INTERVAL_1_32
+ignore symbol GUARD_INTERVAL_1_16
+ignore symbol GUARD_INTERVAL_1_8
+ignore symbol GUARD_INTERVAL_1_4
+ignore symbol GUARD_INTERVAL_19_128
+ignore symbol GUARD_INTERVAL_19_256
+ignore symbol GUARD_INTERVAL_PN420
+ignore symbol GUARD_INTERVAL_PN595
+ignore symbol GUARD_INTERVAL_PN945
+
+ignore symbol HIERARCHY_NONE
+ignore symbol HIERARCHY_AUTO
+ignore symbol HIERARCHY_1
+ignore symbol HIERARCHY_2
+ignore symbol HIERARCHY_4
+
+ignore symbol INTERLEAVING_NONE
+ignore symbol INTERLEAVING_AUTO
+ignore symbol INTERLEAVING_240
+ignore symbol INTERLEAVING_720
+
+ignore symbol PILOT_ON
+ignore symbol PILOT_OFF
+ignore symbol PILOT_AUTO
+
+ignore symbol ROLLOFF_35
+ignore symbol ROLLOFF_20
+ignore symbol ROLLOFF_25
+ignore symbol ROLLOFF_AUTO
+
+ignore symbol INVERSION_ON
+ignore symbol INVERSION_OFF
+ignore symbol INVERSION_AUTO
+
+ignore symbol SYS_UNDEFINED
+ignore symbol SYS_DVBC_ANNEX_A
+ignore symbol SYS_DVBC_ANNEX_B
+ignore symbol SYS_DVBC_ANNEX_C
+ignore symbol SYS_ISDBC
+ignore symbol SYS_DVBT
+ignore symbol SYS_DVBT2
+ignore symbol SYS_ISDBT
+ignore symbol SYS_ATSC
+ignore symbol SYS_ATSCMH
+ignore symbol SYS_DTMB
+ignore symbol SYS_DVBS
+ignore symbol SYS_DVBS2
+ignore symbol SYS_TURBO
+ignore symbol SYS_ISDBS
+ignore symbol SYS_DAB
+ignore symbol SYS_DSS
+ignore symbol SYS_CMMB
+ignore symbol SYS_DVBH
+
+ignore symbol ATSCMH_SCCC_BLK_SEP
+ignore symbol ATSCMH_SCCC_BLK_COMB
+ignore symbol ATSCMH_SCCC_BLK_RES
+
+ignore symbol ATSCMH_SCCC_CODE_HLF
+ignore symbol ATSCMH_SCCC_CODE_QTR
+ignore symbol ATSCMH_SCCC_CODE_RES
+
+ignore symbol ATSCMH_RSFRAME_ENS_PRI
+ignore symbol ATSCMH_RSFRAME_ENS_SEC
+
+ignore symbol ATSCMH_RSFRAME_PRI_ONLY
+ignore symbol ATSCMH_RSFRAME_PRI_SEC
+ignore symbol ATSCMH_RSFRAME_RES
+
+ignore symbol ATSCMH_RSCODE_211_187
+ignore symbol ATSCMH_RSCODE_223_187
+ignore symbol ATSCMH_RSCODE_235_187
+ignore symbol ATSCMH_RSCODE_RES
+
+ignore symbol FE_SCALE_NOT_AVAILABLE
+ignore symbol FE_SCALE_DECIBEL
+ignore symbol FE_SCALE_RELATIVE
+ignore symbol FE_SCALE_COUNTER
diff --git a/Documentation/media/index.rst b/Documentation/media/index.rst
index 7f8f0af620ce..1cf5316c8ff8 100644
--- a/Documentation/media/index.rst
+++ b/Documentation/media/index.rst
@@ -1,7 +1,11 @@
Linux Media Subsystem Documentation
===================================
-Contents:
+.. only:: html
+
+ .. class:: toc-title
+
+ Table of Contents
.. toctree::
:maxdepth: 2
@@ -10,6 +14,7 @@ Contents:
media_kapi
dvb-drivers/index
v4l-drivers/index
+ cec-drivers/index
.. only:: subproject
diff --git a/Documentation/media/kapi/cec-core.rst b/Documentation/media/kapi/cec-core.rst
index 8a65c69ed071..28866259998c 100644
--- a/Documentation/media/kapi/cec-core.rst
+++ b/Documentation/media/kapi/cec-core.rst
@@ -107,6 +107,7 @@ your driver:
int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg);
void (*adap_status)(struct cec_adapter *adap, struct seq_file *file);
+ void (*adap_free)(struct cec_adapter *adap);
/* High-level callbacks */
...
@@ -184,6 +185,14 @@ To log the current CEC hardware status:
This optional callback can be used to show the status of the CEC hardware.
The status is available through debugfs: cat /sys/kernel/debug/cec/cecX/status
+To free any resources when the adapter is deleted:
+
+.. c:function::
+ void (*adap_free)(struct cec_adapter *adap);
+
+This optional callback can be used to free any resources that might have been
+allocated by the driver. It's called from cec_delete_adapter.
+
Your adapter driver will also have to react to events (typically interrupt
driven) by calling into the framework in the following situations:
@@ -336,3 +345,34 @@ log_addrs->num_log_addrs set to 0. The block argument is ignored when
unconfiguring. This function will just return if the physical address is
invalid. Once the physical address becomes valid, then the framework will
attempt to claim these logical addresses.
+
+CEC Pin framework
+-----------------
+
+Most CEC hardware operates on full CEC messages where the software provides
+the message and the hardware handles the low-level CEC protocol. But some
+hardware only drives the CEC pin and software has to handle the low-level
+CEC protocol. The CEC pin framework was created to handle such devices.
+
+Note that due to the close-to-realtime requirements it can never be guaranteed
+to work 100%. This framework uses highres timers internally, but if a
+timer goes off too late by more than 300 microseconds wrong results can
+occur. In reality it appears to be fairly reliable.
+
+One advantage of this low-level implementation is that it can be used as
+a cheap CEC analyser, especially if interrupts can be used to detect
+CEC pin transitions from low to high or vice versa.
+
+.. kernel-doc:: include/media/cec-pin.h
+
+CEC Notifier framework
+----------------------
+
+Most drm HDMI implementations have an integrated CEC implementation and no
+notifier support is needed. But some have independent CEC implementations
+that have their own driver. This could be an IP block for an SoC or a
+completely separate chip that deals with the CEC pin. For those cases a
+drm driver can install a notifier and use the notifier to inform the
+CEC driver about changes in the physical address.
+
+.. kernel-doc:: include/media/cec-notifier.h
diff --git a/Documentation/media/kapi/csi2.rst b/Documentation/media/kapi/csi2.rst
index e33fcb967922..0560100efca2 100644
--- a/Documentation/media/kapi/csi2.rst
+++ b/Documentation/media/kapi/csi2.rst
@@ -51,6 +51,16 @@ not active. Some transmitters do this automatically but some have to
be explicitly programmed to do so, and some are unable to do so
altogether due to hardware constraints.
+Stopping the transmitter
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+A transmitter stops sending the stream of images as a result of
+calling the ``.s_stream()`` callback. Some transmitters may stop the
+stream at a frame boundary whereas others stop immediately,
+effectively leaving the current frame unfinished. The receiver driver
+should not make assumptions either way, but function properly in both
+cases.
+
Receiver drivers
----------------
diff --git a/Documentation/media/kapi/dtv-core.rst b/Documentation/media/kapi/dtv-core.rst
index ff86bf0abeae..de9a228aca8a 100644
--- a/Documentation/media/kapi/dtv-core.rst
+++ b/Documentation/media/kapi/dtv-core.rst
@@ -1,6 +1,31 @@
Digital TV (DVB) devices
------------------------
+Digital TV devices are implemented by several different drivers:
+
+- A bridge driver that is responsible to talk with the bus where the other
+ devices are connected (PCI, USB, SPI), bind to the other drivers and
+ implement the digital demux logic (either in software or in hardware);
+
+- Frontend drivers that are usually implemented as two separate drivers:
+
+ - A tuner driver that implements the logic with commands the part of the
+ hardware with is reponsible to tune into a digital TV transponder or
+ physical channel. The output of a tuner is usually a baseband or
+ Intermediate Frequency (IF) signal;
+
+ - A demodulator driver (a.k.a "demod") that implements the logic with
+ commands the digital TV decoding hardware. The output of a demod is
+ a digital stream, with multiple audio, video and data channels typically
+ multiplexed using MPEG Transport Stream [#f1]_.
+
+On most hardware, the frontend drivers talk with the bridge driver using an
+I2C bus.
+
+.. [#f1] Some standards use TCP/IP for multiplexing data, like DVB-H (an
+ abandoned standard, not used anymore) and ATSC version 3.0 current
+ proposals. Currently, the DVB subsystem doesn't implement those standards.
+
Digital TV Common functions
---------------------------
@@ -55,8 +80,141 @@ Digital TV Frontend
The Digital TV Frontend kABI defines a driver-internal interface for
registering low-level, hardware specific driver to a hardware independent
frontend layer. It is only of interest for Digital TV device driver writers.
-The header file for this API is named dvb_frontend.h and located in
-drivers/media/dvb-core.
+The header file for this API is named ``dvb_frontend.h`` and located in
+``drivers/media/dvb-core``.
+
+Demodulator driver
+^^^^^^^^^^^^^^^^^^
+
+The demodulator driver is responsible to talk with the decoding part of the
+hardware. Such driver should implement :c:type:`dvb_frontend_ops`, with
+tells what type of digital TV standards are supported, and points to a
+series of functions that allow the DVB core to command the hardware via
+the code under ``drivers/media/dvb-core/dvb_frontend.c``.
+
+A typical example of such struct in a driver ``foo`` is::
+
+ static struct dvb_frontend_ops foo_ops = {
+ .delsys = { SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A },
+ .info = {
+ .name = "foo DVB-T/T2/C driver",
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_32 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_128 |
+ FE_CAN_QAM_256 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO |
+ FE_CAN_MUTE_TS |
+ FE_CAN_2G_MODULATION,
+ .frequency_min = 42000000, /* Hz */
+ .frequency_max = 1002000000, /* Hz */
+ .symbol_rate_min = 870000,
+ .symbol_rate_max = 11700000
+ },
+ .init = foo_init,
+ .sleep = foo_sleep,
+ .release = foo_release,
+ .set_frontend = foo_set_frontend,
+ .get_frontend = foo_get_frontend,
+ .read_status = foo_get_status_and_stats,
+ .tune = foo_tune,
+ .i2c_gate_ctrl = foo_i2c_gate_ctrl,
+ .get_frontend_algo = foo_get_algo,
+ };
+
+A typical example of such struct in a driver ``bar`` meant to be used on
+Satellite TV reception is::
+
+ static const struct dvb_frontend_ops bar_ops = {
+ .delsys = { SYS_DVBS, SYS_DVBS2 },
+ .info = {
+ .name = "Bar DVB-S/S2 demodulator",
+ .frequency_min = 500000, /* KHz */
+ .frequency_max = 2500000, /* KHz */
+ .frequency_stepsize = 0,
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 45000000,
+ .symbol_rate_tolerance = 500,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK,
+ },
+ .init = bar_init,
+ .sleep = bar_sleep,
+ .release = bar_release,
+ .set_frontend = bar_set_frontend,
+ .get_frontend = bar_get_frontend,
+ .read_status = bar_get_status_and_stats,
+ .i2c_gate_ctrl = bar_i2c_gate_ctrl,
+ .get_frontend_algo = bar_get_algo,
+ .tune = bar_tune,
+
+ /* Satellite-specific */
+ .diseqc_send_master_cmd = bar_send_diseqc_msg,
+ .diseqc_send_burst = bar_send_burst,
+ .set_tone = bar_set_tone,
+ .set_voltage = bar_set_voltage,
+ };
+
+.. note::
+
+ #) For satellite digital TV standards (DVB-S, DVB-S2, ISDB-S), the
+ frequencies are specified in kHz, while, for terrestrial and cable
+ standards, they're specified in Hz. Due to that, if the same frontend
+ supports both types, you'll need to have two separate
+ :c:type:`dvb_frontend_ops` structures, one for each standard.
+ #) The ``.i2c_gate_ctrl`` field is present only when the hardware has
+ allows controlling an I2C gate (either directly of via some GPIO pin),
+ in order to remove the tuner from the I2C bus after a channel is
+ tuned.
+ #) All new drivers should implement the
+ :ref:`DVBv5 statistics <dvbv5_stats>` via ``.read_status``.
+ Yet, there are a number of callbacks meant to get statistics for
+ signal strength, S/N and UCB. Those are there to provide backward
+ compatibility with legacy applications that don't support the DVBv5
+ API. Implementing those callbacks are optional. Those callbacks may be
+ removed in the future, after we have all existing drivers supporting
+ DVBv5 stats.
+ #) Other callbacks are required for satellite TV standards, in order to
+ control LNBf and DiSEqC: ``.diseqc_send_master_cmd``,
+ ``.diseqc_send_burst``, ``.set_tone``, ``.set_voltage``.
+
+.. |delta| unicode:: U+00394
+
+The ``drivers/media/dvb-core/dvb_frontend.c`` has a kernel thread with is
+responsible for tuning the device. It supports multiple algoritms to
+detect a channel, as defined at enum :c:func:`dvbfe_algo`.
+
+The algorithm to be used is obtained via ``.get_frontend_algo``. If the driver
+doesn't fill its field at struct :c:type:`dvb_frontend_ops`, it will default to
+``DVBFE_ALGO_SW``, meaning that the dvb-core will do a zigzag when tuning,
+e. g. it will try first to use the specified center frequency ``f``,
+then, it will do ``f`` + |delta|, ``f`` - |delta|, ``f`` + 2 x |delta|,
+``f`` - 2 x |delta| and so on.
+
+If the hardware has internally a some sort of zigzag algorithm, you should
+define a ``.get_frontend_algo`` function that would return ``DVBFE_ALGO_HW``.
+
+.. note::
+
+ The core frontend support also supports
+ a third type (``DVBFE_ALGO_CUSTOM``), in order to allow the driver to
+ define its own hardware-assisted algorithm. Very few hardware need to
+ use it nowadays. Using ``DVBFE_ALGO_CUSTOM`` require to provide other
+ function callbacks at struct :c:type:`dvb_frontend_ops`.
+
+Attaching frontend driver to the bridge driver
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Before using the Digital TV frontend core, the bridge driver should attach
the frontend demod, tuner and SEC devices and call
@@ -74,6 +232,287 @@ part of their handler for :c:type:`device_driver`.\ ``resume()``.
A few other optional functions are provided to handle some special cases.
+.. _dvbv5_stats:
+
+Digital TV Frontend statistics
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Introduction
+^^^^^^^^^^^^
+
+Digital TV frontends provide a range of
+:ref:`statistics <frontend-stat-properties>` meant to help tuning the device
+and measuring the quality of service.
+
+For each statistics measurement, the driver should set the type of scale used,
+or ``FE_SCALE_NOT_AVAILABLE`` if the statistics is not available on a given
+time. Drivers should also provide the number of statistics for each type.
+that's usually 1 for most video standards [#f2]_.
+
+Drivers should initialize each statistic counters with length and
+scale at its init code. For example, if the frontend provides signal
+strength, it should have, on its init code::
+
+ struct dtv_frontend_properties *c = &state->fe.dtv_property_cache;
+
+ c->strength.len = 1;
+ c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+And, when the statistics got updated, set the scale::
+
+ c->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ c->strength.stat[0].uvalue = strength;
+
+.. [#f2] For ISDB-T, it may provide both a global statistics and a per-layer
+ set of statistics. On such cases, len should be equal to 4. The first
+ value corresponds to the global stat; the other ones to each layer, e. g.:
+
+ - c->cnr.stat[0] for global S/N carrier ratio,
+ - c->cnr.stat[1] for Layer A S/N carrier ratio,
+ - c->cnr.stat[2] for layer B S/N carrier ratio,
+ - c->cnr.stat[3] for layer C S/N carrier ratio.
+
+.. note:: Please prefer to use ``FE_SCALE_DECIBEL`` instead of
+ ``FE_SCALE_RELATIVE`` for signal strength and CNR measurements.
+
+Groups of statistics
+^^^^^^^^^^^^^^^^^^^^
+
+There are several groups of statistics currently supported:
+
+Signal strength (:ref:`DTV-STAT-SIGNAL-STRENGTH`)
+ - Measures the signal strength level at the analog part of the tuner or
+ demod.
+
+ - Typically obtained from the gain applied to the tuner and/or frontend
+ in order to detect the carrier. When no carrier is detected, the gain is
+ at the maximum value (so, strength is on its minimal).
+
+ - As the gain is visible through the set of registers that adjust the gain,
+ typically, this statistics is always available [#f3]_.
+
+ - Drivers should try to make it available all the times, as this statistics
+ can be used when adjusting an antenna position and to check for troubles
+ at the cabling.
+
+ .. [#f3] On a few devices, the gain keeps floating if no carrier.
+ On such devices, strength report should check first if carrier is
+ detected at the tuner (``FE_HAS_CARRIER``, see :c:type:`fe_status`),
+ and otherwise return the lowest possible value.
+
+Carrier Signal to Noise ratio (:ref:`DTV-STAT-CNR`)
+ - Signal to Noise ratio for the main carrier.
+
+ - Signal to Noise measurement depends on the device. On some hardware, is
+ available when the main carrier is detected. On those hardware, CNR
+ measurement usually comes from the tuner (e. g. after ``FE_HAS_CARRIER``,
+ see :c:type:`fe_status`).
+
+ On other devices, it requires inner FEC decoding,
+ as the frontend measures it indirectly from other parameters (e. g. after
+ ``FE_HAS_VITERBI``, see :c:type:`fe_status`).
+
+ Having it available after inner FEC is more common.
+
+Bit counts post-FEC (:ref:`DTV-STAT-POST-ERROR-BIT-COUNT` and :ref:`DTV-STAT-POST-TOTAL-BIT-COUNT`)
+ - Those counters measure the number of bits and bit errors errors after
+ the forward error correction (FEC) on the inner coding block
+ (after Viterbi, LDPC or other inner code).
+
+ - Due to its nature, those statistics depend on full coding lock
+ (e. g. after ``FE_HAS_SYNC`` or after ``FE_HAS_LOCK``,
+ see :c:type:`fe_status`).
+
+Bit counts pre-FEC (:ref:`DTV-STAT-PRE-ERROR-BIT-COUNT` and :ref:`DTV-STAT-PRE-TOTAL-BIT-COUNT`)
+ - Those counters measure the number of bits and bit errors errors before
+ the forward error correction (FEC) on the inner coding block
+ (before Viterbi, LDPC or other inner code).
+
+ - Not all frontends provide this kind of statistics.
+
+ - Due to its nature, those statistics depend on inner coding lock (e. g.
+ after ``FE_HAS_VITERBI``, see :c:type:`fe_status`).
+
+Block counts (:ref:`DTV-STAT-ERROR-BLOCK-COUNT` and :ref:`DTV-STAT-TOTAL-BLOCK-COUNT`)
+ - Those counters measure the number of blocks and block errors errors after
+ the forward error correction (FEC) on the inner coding block
+ (before Viterbi, LDPC or other inner code).
+
+ - Due to its nature, those statistics depend on full coding lock
+ (e. g. after ``FE_HAS_SYNC`` or after
+ ``FE_HAS_LOCK``, see :c:type:`fe_status`).
+
+.. note:: All counters should be monotonically increased as they're
+ collected from the hardware.
+
+A typical example of the logic that handle status and statistics is::
+
+ static int foo_get_status_and_stats(struct dvb_frontend *fe)
+ {
+ struct foo_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+ int rc;
+ enum fe_status *status;
+
+ /* Both status and strength are always available */
+ rc = foo_read_status(fe, &status);
+ if (rc < 0)
+ return rc;
+
+ rc = foo_read_strength(fe);
+ if (rc < 0)
+ return rc;
+
+ /* Check if CNR is available */
+ if (!(fe->status & FE_HAS_CARRIER))
+ return 0;
+
+ rc = foo_read_cnr(fe);
+ if (rc < 0)
+ return rc;
+
+ /* Check if pre-BER stats are available */
+ if (!(fe->status & FE_HAS_VITERBI))
+ return 0;
+
+ rc = foo_get_pre_ber(fe);
+ if (rc < 0)
+ return rc;
+
+ /* Check if post-BER stats are available */
+ if (!(fe->status & FE_HAS_SYNC))
+ return 0;
+
+ rc = foo_get_post_ber(fe);
+ if (rc < 0)
+ return rc;
+ }
+
+ static const struct dvb_frontend_ops ops = {
+ /* ... */
+ .read_status = foo_get_status_and_stats,
+ };
+
+Statistics collect
+^^^^^^^^^^^^^^^^^^
+
+On almost all frontend hardware, the bit and byte counts are stored by
+the hardware after a certain amount of time or after the total bit/block
+counter reaches a certain value (usually programable), for example, on
+every 1000 ms or after receiving 1,000,000 bits.
+
+So, if you read the registers too soon, you'll end by reading the same
+value as in the previous reading, causing the monotonic value to be
+incremented too often.
+
+Drivers should take the responsibility to avoid too often reads. That
+can be done using two approaches:
+
+if the driver have a bit that indicates when a collected data is ready
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+Driver should check such bit before making the statistics available.
+
+An example of such behavior can be found at this code snippet (adapted
+from mb86a20s driver's logic)::
+
+ static int foo_get_pre_ber(struct dvb_frontend *fe)
+ {
+ struct foo_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int rc, bit_error;
+
+ /* Check if the BER measures are already available */
+ rc = foo_read_u8(state, 0x54);
+ if (rc < 0)
+ return rc;
+
+ if (!rc)
+ return 0;
+
+ /* Read Bit Error Count */
+ bit_error = foo_read_u32(state, 0x55);
+ if (bit_error < 0)
+ return bit_error;
+
+ /* Read Total Bit Count */
+ rc = foo_read_u32(state, 0x51);
+ if (rc < 0)
+ return rc;
+
+ c->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->pre_bit_error.stat[0].uvalue += bit_error;
+ c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->pre_bit_count.stat[0].uvalue += rc;
+
+ return 0;
+ }
+
+If the driver doesn't provide a statistics available check bit
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+A few devices, however, may not provide a way to check if the stats are
+available (or the way to check it is unknown). They may not even provide
+a way to directly read the total number of bits or blocks.
+
+On those devices, the driver need to ensure that it won't be reading from
+the register too often and/or estimate the total number of bits/blocks.
+
+On such drivers, a typical routine to get statistics would be like
+(adapted from dib8000 driver's logic)::
+
+ struct foo_state {
+ /* ... */
+
+ unsigned long per_jiffies_stats;
+ }
+
+ static int foo_get_pre_ber(struct dvb_frontend *fe)
+ {
+ struct foo_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int rc, bit_error;
+ u64 bits;
+
+ /* Check if time for stats was elapsed */
+ if (!time_after(jiffies, state->per_jiffies_stats))
+ return 0;
+
+ /* Next stat should be collected in 1000 ms */
+ state->per_jiffies_stats = jiffies + msecs_to_jiffies(1000);
+
+ /* Read Bit Error Count */
+ bit_error = foo_read_u32(state, 0x55);
+ if (bit_error < 0)
+ return bit_error;
+
+ /*
+ * On this particular frontend, there's no register that
+ * would provide the number of bits per 1000ms sample. So,
+ * some function would calculate it based on DTV properties
+ */
+ bits = get_number_of_bits_per_1000ms(fe);
+
+ c->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->pre_bit_error.stat[0].uvalue += bit_error;
+ c->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ c->pre_bit_count.stat[0].uvalue += bits;
+
+ return 0;
+ }
+
+Please notice that, on both cases, we're getting the statistics using the
+:c:type:`dvb_frontend_ops` ``.read_status`` callback. The rationale is that
+the frontend core will automatically call this function periodically
+(usually, 3 times per second, when the frontend is locked).
+
+That warrants that we won't miss to collect a counter and increment the
+monotonic stats at the right time.
+
+Digital TV Frontend functions and types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
.. kernel-doc:: drivers/media/dvb-core/dvb_frontend.h
diff --git a/Documentation/media/kapi/v4l2-event.rst b/Documentation/media/kapi/v4l2-event.rst
index 9a5e31546ae3..9938d21ef4d1 100644
--- a/Documentation/media/kapi/v4l2-event.rst
+++ b/Documentation/media/kapi/v4l2-event.rst
@@ -67,6 +67,8 @@ type).
The ops argument allows the driver to specify a number of callbacks:
+.. tabularcolumns:: |p{1.5cm}|p{16.0cm}|
+
======== ==============================================================
Callback Description
======== ==============================================================
diff --git a/Documentation/media/media_kapi.rst b/Documentation/media/media_kapi.rst
index bc0638956a43..83da736fad72 100644
--- a/Documentation/media/media_kapi.rst
+++ b/Documentation/media/media_kapi.rst
@@ -20,7 +20,9 @@ more details.
For more details see the file COPYING in the source distribution of Linux.
-.. class:: toc-title
+.. only:: html
+
+ .. class:: toc-title
Table of Contents
diff --git a/Documentation/media/media_uapi.rst b/Documentation/media/media_uapi.rst
index fd8ebe002cd2..28eb35a1f965 100644
--- a/Documentation/media/media_uapi.rst
+++ b/Documentation/media/media_uapi.rst
@@ -14,7 +14,9 @@ any later version published by the Free Software Foundation. A copy of
the license is included in the chapter entitled "GNU Free Documentation
License".
-.. class:: toc-title
+.. only:: html
+
+ .. class:: toc-title
Table of Contents
diff --git a/Documentation/media/typical_media_device.svg b/Documentation/media/typical_media_device.svg
index 0c8abd69f39a..d6fad90ec199 100644
--- a/Documentation/media/typical_media_device.svg
+++ b/Documentation/media/typical_media_device.svg
@@ -1,2948 +1,106 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- clip-path="url(#a)"
- xml:space="preserve"
- height="179mm"
- viewBox="0 0 22648.239 17899.829"
- width="235mm"
- version="1.2"
- preserveAspectRatio="xMidYMid"
- id="svg2"
- inkscape:version="0.91 r13725"
- sodipodi:docname="typical_media_device.svg"
- style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"><metadata
- id="metadata1533"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="1920"
- inkscape:window-height="997"
- id="namedview1531"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:zoom="1.2707744"
- inkscape:cx="410.32614"
- inkscape:cy="316.736"
- inkscape:window-x="1920"
- inkscape:window-y="30"
- inkscape:window-maximized="1"
- inkscape:current-layer="svg2" /><defs
- id="defs4"><clipPath
- id="a"
- clipPathUnits="userSpaceOnUse"><rect
- y="0"
- x="0"
- width="28000"
- height="21000"
- id="rect7" /></clipPath></defs><path
- style="fill:#ffccff"
- inkscape:connector-curvature="0"
- id="path11"
- d="m 10145.77,2636.013 c -518.0641,0 -1035.1241,515 -1035.1241,1031 l 0,4124 c 0,516 517.06,1032 1035.1241,1032 l 8572.152,0 c 518.064,0 1036.128,-516 1036.128,-1032 l 0,-4124 c 0,-516 -518.064,-1031 -1036.128,-1031 l -8572.152,0 z" /><path
- style="fill:#ffffcc"
- inkscape:connector-curvature="0"
- id="path15"
- d="m 1505.5459,13443.013 c -293,0 -585,292 -585,585 l 0,2340 c 0,293 292,586 585,586 l 3275,0 c 293,0 586,-293 586,-586 l 0,-2340 c 0,-293 -293,-585 -586,-585 l -3275,0 z" /><path
- style="fill:#e6e6e6"
- inkscape:connector-curvature="0"
- id="path19"
- d="m 517.1459,22.013 c -461,0 -922,461 -922,922 l 0,11169 c 0,461 461,923 922,923 l 3692,0 c 461,0 922,-462 922,-923 l 0,-11169 c 0,-461 -461,-922 -922,-922 l -3692,0 z" /><path
- style="fill:#ff8080"
- inkscape:connector-curvature="0"
- id="path23"
- d="m 2371.5459,6438.013 -2260,0 0,-1086 4520,0 0,1086 -2260,0 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path25"
- d="m 2371.5459,6438.013 -2260,0 0,-1086 4520,0 0,1086 -2260,0 z" /><text
- id="text27"
- class="TextShape"
- x="-2089.4541"
- y="-2163.9871"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan29"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan31"
- class="TextPosition"
- x="489.5459"
- y="6111.0132"><tspan
- style="fill:#000000"
- id="tspan33">Audio decoder</tspan></tspan></tspan></text>
-<path
- style="fill:#ff8080"
- inkscape:connector-curvature="0"
- id="path37"
- d="m 2371.5459,9608.013 -2260,0 0,-1270 4520,0 0,1270 -2260,0 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path39"
- d="m 2371.5459,9608.013 -2260,0 0,-1270 4520,0 0,1270 -2260,0 z" /><text
- id="text41"
- class="TextShape"
- x="-2089.4541"
- y="-2163.9871"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan43"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan45"
- class="TextPosition"
- x="527.5459"
- y="9189.0127"><tspan
- style="fill:#000000"
- id="tspan47">Video decoder</tspan></tspan></tspan></text>
-<path
- style="fill:#ff8080"
- inkscape:connector-curvature="0"
- id="path51"
- d="m 2363.5459,8053.013 -2269,0 0,-1224 4537,0 0,1224 -2268,0 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path53"
- d="m 2363.5459,8053.013 -2269,0 0,-1224 4537,0 0,1224 -2268,0 z" /><text
- id="text55"
- class="TextShape"
- x="-2089.4541"
- y="-2163.9871"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan57"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan59"
- class="TextPosition"
- x="481.5459"
- y="7657.0132"><tspan
- style="fill:#000000"
- id="tspan61">Audio encoder</tspan></tspan></tspan></text>
-<path
- style="fill:#ccffcc"
- inkscape:connector-curvature="0"
- id="path65"
- d="m 13621.546,10385.813 -3810.0001,0 0,-1281 7620.0001,0 0,1281 -3810,0 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path67"
- d="m 13621.546,10385.813 -3810.0001,0 0,-1281 7620.0001,0 0,1281 -3810,0 z" /><text
- id="text69"
- class="TextShape"
- x="-2089.4541"
- y="-2446.187"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan71"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan73"
- class="TextPosition"
- x="10287.546"
- y="9960.8135"><tspan
- style="fill:#000000"
- id="tspan75">Button Key/IR input logic</tspan></tspan></tspan></text>
-<path
- style="fill:#cfe7f5"
- inkscape:connector-curvature="0"
- id="path79"
- d="m 12079.546,12182.213 -2268.0001,0 0,-1412 4536.0001,0 0,1412 -2268,0 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path81"
- d="m 12079.546,12182.213 -2268.0001,0 0,-1412 4536.0001,0 0,1412 -2268,0 z" /><text
- id="text83"
- class="TextShape"
- x="-2089.4541"
- y="-2389.7871"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan85"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan87"
- class="TextPosition"
- x="10792.546"
- y="11692.213"><tspan
- style="fill:#000000"
- id="tspan89">EEPROM</tspan></tspan></tspan></text>
-<path
- style="fill:#ffcc99"
- inkscape:connector-curvature="0"
- id="path93"
- d="m 3050.5459,15498.013 -1563,0 0,-1715 3126,0 0,1715 -1563,0 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path95"
- d="m 3050.5459,15498.013 -1563,0 0,-1715 3126,0 0,1715 -1563,0 z" /><text
- id="text97"
- class="TextShape"
- x="-2089.4541"
- y="-2163.9871"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan99"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan101"
- class="TextPosition"
- x="2186.5459"
- y="14856.013"><tspan
- style="fill:#000000"
- id="tspan103">Sensor</tspan></tspan></tspan></text>
-<path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path107"
- d="m 4629.5459,5866.013 385,-353 0,176 1167,0 0,-176 386,353 -386,354 0,-177 -1167,0 0,177 -385,-354 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path109"
- d="m 4629.5459,5866.013 385,-353 0,176 1167,0 0,-176 386,353 -386,354 0,-177 -1167,0 0,177 -385,-354 z" /><path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path113"
- d="m 4629.5459,7448.013 385,-353 0,176 1167,0 0,-176 386,353 -386,354 0,-177 -1167,0 0,177 -385,-354 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path115"
- d="m 4629.5459,7448.013 385,-353 0,176 1167,0 0,-176 386,353 -386,354 0,-177 -1167,0 0,177 -385,-354 z" /><path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path119"
- d="m 4631.5459,8936.013 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path121"
- d="m 4631.5459,8936.013 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path125"
- d="m 7872.5459,11464.213 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path127"
- d="m 7872.5459,11464.213 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path131"
- d="m 7872.5459,9716.813 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path133"
- d="m 7872.5459,9716.813 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path137"
- d="m 7872.5459,14994.013 670,-353 0,176 2028.0001,0 0,-176 671,353 -671,354 0,-177 -2028.0001,0 0,177 -670,-354 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path139"
- d="m 7872.5459,14994.013 670,-353 0,176 2028.0001,0 0,-176 671,353 -671,354 0,-177 -2028.0001,0 0,177 -670,-354 z" /><path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path143"
- d="m 17534.058,14104.529 978.488,840.891 -978.488,840.89 0,-420.862 -2960.48,0 0,420.862 -979.489,-840.89 979.489,-840.891 0,420.029 2960.48,0 0,-420.029 z" /><path
- style="fill:none;stroke:#3465af;stroke-width:25.77035904"
- inkscape:connector-curvature="0"
- id="path145"
- d="m 17534.058,14104.529 978.488,840.891 -978.488,840.89 0,-420.862 -2960.48,0 0,420.862 -979.489,-840.89 979.489,-840.891 0,420.029 2960.48,0 0,-420.029 z" /><text
- id="text149"
- class="TextShape"
- x="-9922.1533"
- y="-644.58704"><tspan
- style="font-weight:400;font-size:706px;font-family:'Times New Roman', serif"
- id="tspan151"
- class="TextParagraph"
- font-weight="400"
- font-size="706px"><tspan
- id="tspan153"
- transform="matrix(0,-1,1,0,8509,40173)"
- class="TextPosition"
- x="14418.847"
- y="15187.413"><tspan
- style="fill:#000000"
- id="tspan155">System Bus</tspan></tspan></tspan></text>
-<path
- style="fill:#ccffff"
- inkscape:connector-curvature="0"
- id="path159"
- d="m 11061.546,7098.013 -1250.0001,0 0,-875 2499.0001,0 0,875 -1249,0 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path161"
- d="m 11061.546,7098.013 -1250.0001,0 0,-875 2499.0001,0 0,875 -1249,0 z" /><text
- id="text163"
- class="TextShape"
- x="-2089.4541"
- y="-2163.9871"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan165"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan167"
- class="TextPosition"
- x="10125.546"
- y="6876.0132"><tspan
- style="fill:#000000"
- id="tspan169">Demux</tspan></tspan></tspan></text>
-<path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path173"
- d="m 7906.5459,6601.013 373,-357 0,178 1130,0 0,-178 374,357 -374,358 0,-179 -1130,0 0,179 -373,-358 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path175"
- d="m 7906.5459,6601.013 373,-357 0,178 1130,0 0,-178 374,357 -374,358 0,-179 -1130,0 0,179 -373,-358 z" /><path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path179"
- d="m 7906.5459,5214.013 373,-358 0,179 1130,0 0,-179 374,358 -374,358 0,-179 -1130,0 0,179 -373,-358 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path181"
- d="m 7906.5459,5214.013 373,-358 0,179 1130,0 0,-179 374,358 -374,358 0,-179 -1130,0 0,179 -373,-358 z" /><path
- style="fill:#ccffff"
- inkscape:connector-curvature="0"
- id="path185"
- d="m 14232.546,5828.013 -4421.0001,0 0,-1270 8841.0001,0 0,1270 -4420,0 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path187"
- d="m 14232.546,5828.013 -4421.0001,0 0,-1270 8841.0001,0 0,1270 -4420,0 z" /><text
- id="text189"
- class="TextShape"
- x="-2089.4541"
- y="-2163.9871"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan191"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan193"
- class="TextPosition"
- x="10696.546"
- y="5409.0132"><tspan
- style="fill:#000000"
- id="tspan195">Conditional Access Module</tspan></tspan></tspan></text>
-<path
- style="fill:#ff8080"
- inkscape:connector-curvature="0"
- id="path199"
- d="m 2355.5459,11123.013 -2269,0 0,-1224 4537,0 0,1224 -2268,0 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path201"
- d="m 2355.5459,11123.013 -2269,0 0,-1224 4537,0 0,1224 -2268,0 z" /><text
- id="text203"
- class="TextShape"
- x="-2089.4541"
- y="-2163.9871"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan205"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan207"
- class="TextPosition"
- x="511.5459"
- y="10727.013"><tspan
- style="fill:#000000"
- id="tspan209">Video encoder</tspan></tspan></tspan></text>
-<path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path213"
- d="m 4631.5459,10470.013 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path215"
- d="m 4631.5459,10470.013 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path219"
- d="m 18701.546,5381.013 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path221"
- d="m 18701.546,5381.013 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><text
- id="text225"
- class="TextShape"
- x="-1976.5541"
- y="-2163.9871"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan227"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan229"
- class="TextPosition"
- x="13.4459"
- y="12314.013"><tspan
- style="fill:#000000"
- id="tspan231">Radio / Analog TV</tspan></tspan></tspan></text>
-<text
- id="text235"
- class="TextShape"
- x="-2089.4541"
- y="-2163.9871"><tspan
- style="font-weight:700;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan237"
- class="TextParagraph"
- font-weight="700"
- font-size="635px"><tspan
- id="tspan239"
- class="TextPosition"
- x="12866.546"
- y="8560.0127"><tspan
- style="fill:#000000"
- id="tspan241">Digital TV</tspan></tspan></tspan></text>
-<text
- id="text245"
- class="TextShape"
- x="-8919.0537"
- y="-1373.787"><tspan
- style="font-weight:400;font-size:494px;font-family:'Times New Roman', serif"
- id="tspan247"
- class="TextParagraph"
- font-weight="400"
- font-size="494px"><tspan
- id="tspan249"
- class="TextPosition"
- x="5804.9458"
- y="17793.213"><tspan
- style="fill:#000000"
- id="tspan251">PS.: picture is not complete: other blocks may be present</tspan></tspan></tspan></text>
-<text
- id="text255"
- class="TextShape"
- x="-2089.4541"
- y="-2163.9871"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan257"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan259"
- class="TextPosition"
- x="2109.5459"
- y="16397.014"><tspan
- style="fill:#000000"
- id="tspan261">Webcam</tspan></tspan></tspan></text>
-<path
- style="fill:#ff9900"
- inkscape:connector-curvature="0"
- id="path265"
- d="m 12462.546,13925.813 -2650.0001,0 0,-1412 5299.0001,0 0,1412 -2649,0 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path267"
- d="m 12462.546,13925.813 -2650.0001,0 0,-1412 5299.0001,0 0,1412 -2649,0 z" /><text
- id="text269"
- class="TextShape"
- x="-2089.4541"
- y="-2446.187"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan271"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan273"
- class="TextPosition"
- x="10175.546"
- y="13435.813"><tspan
- style="fill:#000000"
- id="tspan275">Processing blocks</tspan></tspan></tspan></text>
-<path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path279"
- d="m 7872.5459,13207.813 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path281"
- d="m 7872.5459,13207.813 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path285"
- d="m 4612.5459,14790.013 397,-353 0,176 1201,0 0,-176 398,353 -398,354 0,-177 -1201,0 0,177 -397,-354 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path287"
- d="m 4612.5459,14790.013 397,-353 0,176 1201,0 0,-176 398,353 -398,354 0,-177 -1201,0 0,177 -397,-354 z" /><text
- id="text291"
- class="TextShape"
- x="-2428.0542"
- y="-2163.9871"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan293"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan295"
- class="TextPosition"
- x="20421.945"
- y="6628.0132"><tspan
- style="fill:#000000"
- id="tspan297">Smartcard</tspan></tspan></tspan></text>
-<path
- style="fill:#ffccff"
- inkscape:connector-curvature="0"
- id="path301"
- d="m 623.3227,436.013 c -334.5984,0 -669.1968,333 -669.1968,666 l 0,2668 c 0,333 334.5984,666 669.1968,666 l 18456.1663,0 c 334.598,0 670.202,-333 670.202,-666 l 0,-2668 c 0,-333 -335.604,-666 -670.202,-666 l -18456.1663,0 z" /><path
- style="fill:#ff8080"
- inkscape:connector-curvature="0"
- id="path305"
- d="m 3031.5459,2991.013 -1614,0 0,-1816 3227,0 0,1816 -1613,0 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path307"
- d="m 3031.5459,2991.013 -1614,0 0,-1816 3227,0 0,1816 -1613,0 z" /><text
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="text309"
- class="TextShape"
- font-weight="400"
- font-size="635px"
- x="-2089.4541"
- y="-2163.9871"><tspan
- id="tspan311"
- class="TextParagraph"><tspan
- id="tspan313"
- class="TextPosition"
- x="2284.5459"
- y="1947.0129"><tspan
- style="fill:#000000"
- id="tspan315">Tuner</tspan></tspan></tspan><tspan
- id="tspan317"
- class="TextParagraph"><tspan
- id="tspan319"
- class="TextPosition"
- x="2061.5459"
- y="2650.0129"><tspan
- style="fill:#000000"
- id="tspan321">FM/TV</tspan></tspan></tspan></text>
-<path
- style="fill:#ff8080"
- inkscape:connector-curvature="0"
- id="path325"
- d="m 812.5459,1538.013 c 0,111 40,202 88,202 l 530,0 c 48,0 89,-91 89,-202 0,-110 -41,-202 -89,-202 l -530,0 c -48,0 -88,92 -88,202 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path327"
- d="m 812.5459,1538.013 c 0,111 40,202 88,202 l 530,0 c 48,0 89,-91 89,-202 0,-110 -41,-202 -89,-202 l -530,0 c -48,0 -88,92 -88,202 z" /><path
- style="fill:#ffb3b3"
- inkscape:connector-curvature="0"
- id="path329"
- d="m 812.5459,1538.013 c 0,111 40,202 88,202 48,0 88,-91 88,-202 0,-110 -40,-202 -88,-202 -48,0 -88,92 -88,202 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path331"
- d="m 812.5459,1538.013 c 0,111 40,202 88,202 48,0 88,-91 88,-202 0,-110 -40,-202 -88,-202 -48,0 -88,92 -88,202 z" /><path
- style="fill:#ff8080"
- inkscape:connector-curvature="0"
- id="path335"
- d="m 813.5459,2103.013 c 0,110 40,202 88,202 l 530,0 c 48,0 89,-92 89,-202 0,-110 -41,-203 -89,-203 l -530,0 c -48,0 -88,93 -88,203 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path337"
- d="m 813.5459,2103.013 c 0,110 40,202 88,202 l 530,0 c 48,0 89,-92 89,-202 0,-110 -41,-203 -89,-203 l -530,0 c -48,0 -88,93 -88,203 z" /><path
- style="fill:#ffb3b3"
- inkscape:connector-curvature="0"
- id="path339"
- d="m 813.5459,2103.013 c 0,110 40,202 88,202 48,0 88,-92 88,-202 0,-110 -40,-203 -88,-203 -48,0 -88,93 -88,203 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path341"
- d="m 813.5459,2103.013 c 0,110 40,202 88,202 48,0 88,-92 88,-202 0,-110 -40,-203 -88,-203 -48,0 -88,93 -88,203 z" /><path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path345"
- d="m 4629.5459,2032.013 385,-353 0,176 1167,0 0,-176 386,353 -386,354 0,-177 -1167,0 0,177 -385,-354 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path347"
- d="m 4629.5459,2032.013 385,-353 0,176 1167,0 0,-176 386,353 -386,354 0,-177 -1167,0 0,177 -385,-354 z" /><path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path351"
- d="m 7889.5459,1986.013 402,-368 0,184 1217,0 0,-184 403,368 -403,369 0,-185 -1217,0 0,185 -402,-369 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path353"
- d="m 7889.5459,1986.013 402,-368 0,184 1217,0 0,-184 403,368 -403,369 0,-185 -1217,0 0,185 -402,-369 z" /><path
- style="fill:#ccffff"
- inkscape:connector-curvature="0"
- id="path357"
- d="m 14410.546,4025.013 -4500.0001,0 0,-1389 9000.0001,0 0,1389 -4500,0 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path359"
- d="m 14410.546,4025.013 -4500.0001,0 0,-1389 9000.0001,0 0,1389 -4500,0 z" /><text
- id="text361"
- class="TextShape"
- x="-2089.4541"
- y="-2163.9871"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan363"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan365"
- class="TextPosition"
- x="9961.5459"
- y="3546.0129"><tspan
- style="fill:#000000"
- id="tspan367">Satellite Equipment Control (SEC)</tspan></tspan></tspan></text>
-<path
- style="fill:#ccffff"
- inkscape:connector-curvature="0"
- id="path371"
- d="m 11310.546,2436.013 -1400.0001,0 0,-1000 2800.0001,0 0,1000 -1400,0 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path373"
- d="m 11310.546,2436.013 -1400.0001,0 0,-1000 2800.0001,0 0,1000 -1400,0 z" /><text
- id="text375"
- class="TextShape"
- x="-2089.4541"
- y="-2163.9871"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan377"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan379"
- class="TextPosition"
- x="10375.546"
- y="2152.0129"><tspan
- style="fill:#000000"
- id="tspan381">Demod</tspan></tspan></tspan></text>
-<path
- style="fill:#729fcf"
- inkscape:connector-curvature="0"
- id="path385"
- d="m 7889.5459,3287.013 402,-368 0,184 1217,0 0,-184 403,368 -403,369 0,-185 -1217,0 0,185 -402,-369 z" /><path
- style="fill:none;stroke:#3465af"
- inkscape:connector-curvature="0"
- id="path387"
- d="m 7889.5459,3287.013 402,-368 0,184 1217,0 0,-184 403,368 -403,369 0,-185 -1217,0 0,185 -402,-369 z" /><path
- d="m 7906.5459,9121.013 0,7302 -1270,0 0,-14605 1270,0 0,7303 z"
- id="path389"
- inkscape:connector-curvature="0"
- style="fill:#ffff99" /><path
- d="m 7906.5459,9121.013 0,7302 -1270,0 0,-14605 1270,0 0,7303 z"
- id="path391"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465af" /><text
- y="-6589.021"
- x="-20792.584"
- transform="matrix(0,-1,1,0,0,0)"
- class="TextShape"
- id="text393"><tspan
- font-size="635px"
- font-weight="400"
- class="TextParagraph"
- id="tspan395"
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"><tspan
- y="7460.9849"
- x="-11215.646"
- class="TextPosition"
- transform="matrix(0,-1,1,0,-4473,23627)"
- id="tspan397"><tspan
- id="tspan399"
- style="fill:#000000">I2C Bus (control bus)</tspan></tspan></tspan></text>
-<text
- id="text403"
- class="TextShape"
- x="-2145.854"
- y="-2163.9871"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan405"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan407"
- class="TextPosition"
- x="7245.146"
- y="1114.0129"><tspan
- style="fill:#000000"
- id="tspan409">Digital TV Frontend</tspan></tspan></tspan></text>
-<path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 863.1459,636.145 c -18.27,0 -35.525,0.99994 -53.795,2.99982"
- id="path415"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 776.8709,644.14452 c -17.255,2.99982 -35.525,6.99958 -52.78,11.99928"
- id="path417"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 692.6259,666.1432 c -16.24,5.99964 -33.495,11.99928 -49.735,19.9988"
- id="path419"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 613.4559,700.14116 c -15.225,7.99952 -31.465,16.99898 -46.69,26.99838"
- id="path421"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 539.3609,745.13846 c -14.21,9.9994 -28.42,20.99874 -42.63,31.99808"
- id="path423"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 471.3559,798.13528 c -13.195,11.99928 -26.39,23.99856 -38.57,36.99778"
- id="path425"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 410.4559,859.13162 c -11.165,12.99922 -22.33,26.99838 -33.495,40.99754"
- id="path427"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 357.6759,927.12754 c -10.15,13.99916 -19.285,28.99826 -28.42,44.9973"
- id="path429"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 314.0309,1000.1232 c -8.12,15.999 -15.225,31.998 -22.33,48.997"
- id="path431"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 280.5359,1079.1184 c -5.075,16.999 -10.15,33.998 -14.21,50.997"
- id="path433"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 260.2359,1162.1134 c -3.045,17.999 -5.075,34.9979 -6.09,52.9969"
- id="path435"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,1247.1083 0,52.9969"
- id="path437"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,1333.1032 0,52.9968"
- id="path439"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,1418.0981 0,52.9968"
- id="path441"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,1504.0929 0,52.9968"
- id="path443"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,1589.0878 0,52.9968"
- id="path445"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,1675.0827 0,52.9968"
- id="path447"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,1760.0776 0,52.9968"
- id="path449"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,1845.0725 0,53.9967"
- id="path451"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,1931.0673 0,52.9968"
- id="path453"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,2016.0622 0,52.9968"
- id="path455"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,2102.057 0,52.9969"
- id="path457"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,2187.0519 0,52.9969"
- id="path459"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,2273.0468 0,52.9968"
- id="path461"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,2358.0417 0,52.9968"
- id="path463"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,2443.0366 0,53.9967"
- id="path465"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,2529.0314 0,52.9968"
- id="path467"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,2614.0263 0,52.9968"
- id="path469"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,2700.0212 0,52.9968"
- id="path471"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,2785.0161 0,52.9968"
- id="path473"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,2871.0109 0,52.9968"
- id="path475"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,2956.0058 0,52.9968"
- id="path477"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,3041.0007 0,53.9968"
- id="path479"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,3126.9955 0,52.9969"
- id="path481"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,3211.9904 0,52.9969"
- id="path483"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,3297.9853 0,52.9968"
- id="path485"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,3382.9802 0,52.9968"
- id="path487"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,3468.975 0,52.9968"
- id="path489"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,3553.9699 0,52.9968"
- id="path491"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 254.1459,3638.9648 c 0,17.9989 1.015,35.9979 3.045,52.9968"
- id="path493"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 262.2659,3723.9597 c 4.06,17.9989 8.12,34.9979 13.195,51.9969"
- id="path495"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 285.6109,3806.9547 c 6.09,15.9991 13.195,32.9981 20.3,48.9971"
- id="path497"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 321.1359,3884.9501 c 8.12,14.9991 17.255,30.9981 27.405,45.9972"
- id="path499"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 366.8109,3957.9457 c 10.15,13.9991 21.315,27.9983 32.48,41.9975"
- id="path501"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 420.6059,4023.9417 c 12.18,12.9992 25.375,25.9985 38.57,37.9977"
- id="path503"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 483.5359,4083.9381 c 13.195,10.9994 27.405,22.9986 41.615,32.998"
- id="path505"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 552.5559,4135.935 c 14.21,9.9994 29.435,18.9989 45.675,26.9984"
- id="path507"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 627.6659,4178.9324 c 15.225,6.9996 32.48,14.9991 48.72,20.9988"
- id="path509"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 707.8509,4210.9305 c 17.255,4.9997 34.51,9.9994 51.765,13.9992"
- id="path511"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 792.0959,4230.9293 c 17.255,1.9999 35.525,3.9998 53.795,4.9997"
- id="path513"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 878.3709,4235.929 53.795,0"
- id="path515"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 964.6459,4235.929 53.795,0"
- id="path517"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1051.9359,4235.929 53.795,0"
- id="path519"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1138.2109,4235.929 53.795,0"
- id="path521"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1225.5009,4235.929 53.795,0"
- id="path523"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1311.7759,4235.929 53.795,0"
- id="path525"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1398.0509,4235.929 54.81,0"
- id="path527"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1485.3409,4235.929 53.795,0"
- id="path529"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1571.6159,4235.929 53.795,0"
- id="path531"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1658.9059,4235.929 53.795,0"
- id="path533"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1745.1809,4235.929 53.795,0"
- id="path535"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1832.4709,4235.929 53.795,0"
- id="path537"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1918.7459,4235.929 53.795,0"
- id="path539"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2005.0209,4235.929 54.81,0"
- id="path541"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2092.3109,4235.929 53.795,0"
- id="path543"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2178.5859,4235.929 53.795,0"
- id="path545"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2265.8759,4235.929 53.795,0"
- id="path547"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2352.1509,4235.929 53.795,0"
- id="path549"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2439.4409,4235.929 53.795,0"
- id="path551"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2525.7159,4235.929 53.795,0"
- id="path553"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2611.9909,4235.929 54.81,0"
- id="path555"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2699.2809,4235.929 53.795,0"
- id="path557"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2785.5559,4235.929 53.795,0"
- id="path559"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2872.8459,4235.929 53.795,0"
- id="path561"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2959.1209,4235.929 53.795,0"
- id="path563"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3046.4109,4235.929 53.795,0"
- id="path565"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3132.6859,4235.929 53.795,0"
- id="path567"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3219.9759,4235.929 53.795,0"
- id="path569"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3306.2509,4235.929 53.795,0"
- id="path571"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3392.5259,4235.929 53.795,0"
- id="path573"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3479.8159,4235.929 53.795,0"
- id="path575"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3566.0909,4235.929 53.795,0"
- id="path577"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3653.3809,4235.929 53.795,0"
- id="path579"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3739.6559,4235.929 53.795,0"
- id="path581"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3826.9459,4235.929 53.795,0"
- id="path583"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3913.2209,4235.929 53.795,0"
- id="path585"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3999.4959,4235.929 53.795,0"
- id="path587"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4086.7859,4235.929 53.795,0"
- id="path589"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4173.0609,4235.929 53.795,0"
- id="path591"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4260.3509,4235.929 53.795,0"
- id="path593"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4346.6259,4235.929 53.795,0"
- id="path595"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4433.9159,4235.929 53.795,0"
- id="path597"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4520.1909,4235.929 53.795,0"
- id="path599"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4606.4659,4235.929 54.81,0"
- id="path601"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4693.7559,4235.929 53.795,0"
- id="path603"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4780.0309,4235.929 53.795,0"
- id="path605"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4867.3209,4235.929 53.795,0"
- id="path607"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4953.5959,4235.929 53.795,0"
- id="path609"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5040.8859,4235.929 53.795,0"
- id="path611"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5127.1609,4235.929 53.795,0"
- id="path613"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5213.4359,4235.929 54.81,0"
- id="path615"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5300.7259,4235.929 53.795,0"
- id="path617"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5387.0009,4235.929 53.795,0"
- id="path619"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5474.2909,4235.929 53.795,0"
- id="path621"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5560.5659,4235.929 53.795,0"
- id="path623"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5647.8559,4235.929 53.795,0"
- id="path625"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5734.1309,4235.929 53.795,0"
- id="path627"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5820.4059,4235.929 54.81,0"
- id="path629"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5907.6959,4235.929 53.795,0"
- id="path631"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5993.9709,4235.929 53.795,0"
- id="path633"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6081.2609,4235.929 53.795,0"
- id="path635"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6167.5359,4235.929 53.795,0"
- id="path637"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6254.8259,4235.929 53.795,0"
- id="path639"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6341.1009,4235.929 53.795,0"
- id="path641"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6427.3759,4235.929 54.81,0"
- id="path643"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6514.6659,4235.929 53.795,0"
- id="path645"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6600.9409,4235.929 53.795,0"
- id="path647"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6688.2309,4235.929 53.795,0"
- id="path649"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6774.5059,4235.929 53.795,0"
- id="path651"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6861.7959,4235.929 53.795,0"
- id="path653"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6948.0709,4235.929 53.795,0"
- id="path655"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7035.3609,4235.929 53.795,0"
- id="path657"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7121.6359,4235.929 53.795,0"
- id="path659"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7207.9109,4235.929 53.795,0"
- id="path661"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7295.2009,4235.929 53.795,0"
- id="path663"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7381.4759,4235.929 53.795,0"
- id="path665"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7468.7659,4235.929 53.795,0"
- id="path667"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7555.0409,4235.929 53.795,0"
- id="path669"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7642.3309,4235.929 53.795,0"
- id="path671"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7728.6059,4235.929 53.795,0"
- id="path673"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7814.8809,4235.929 53.795,0"
- id="path675"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7902.1709,4235.929 53.795,0"
- id="path677"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7988.4459,4235.929 53.795,0"
- id="path679"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8075.7359,4235.929 53.795,0"
- id="path681"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8162.0109,4235.929 53.795,0"
- id="path683"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8249.3009,4235.929 53.795,0"
- id="path685"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8335.5759,4235.929 53.795,0"
- id="path687"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8421.8509,4235.929 53.795,0"
- id="path689"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8509.1409,4235.929 53.795,0"
- id="path691"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8595.4159,4235.929 53.795,0"
- id="path693"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8682.7059,4235.929 53.795,0"
- id="path695"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8768.9809,4235.929 53.795,0"
- id="path697"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8856.2709,4235.929 53.795,0"
- id="path699"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8942.5459,4235.929 53.795,0"
- id="path701"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9028.8209,4235.929 54.81,0"
- id="path703"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9116.1109,4235.929 53.795,0"
- id="path705"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9202.3859,4235.929 53.795,0"
- id="path707"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9289.6759,4235.929 53.795,0"
- id="path709"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9375.9509,4235.929 53.795,0"
- id="path711"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9463.2409,4235.929 53.795,0"
- id="path713"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9549.5159,4235.929 53.795,0"
- id="path715"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9635.7909,4235.929 54.81,0"
- id="path717"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9723.0809,4235.929 53.795,0"
- id="path719"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9809.3559,4235.929 53.795,0"
- id="path721"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9896.6459,4235.929 53.795,0"
- id="path723"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9982.9209,4235.929 53.7951,0"
- id="path725"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10070.211,4235.929 53.795,0"
- id="path727"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10156.486,4235.929 53.795,0"
- id="path729"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10242.761,4235.929 54.81,0"
- id="path731"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10330.051,4235.929 53.795,0"
- id="path733"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10416.326,4235.929 53.795,0"
- id="path735"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10503.616,4235.929 53.795,0"
- id="path737"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10589.891,4235.929 53.795,0"
- id="path739"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10677.181,4235.929 53.795,0"
- id="path741"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10763.456,4235.929 53.795,0"
- id="path743"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10849.731,4235.929 54.81,0"
- id="path745"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10937.021,4235.929 53.795,0"
- id="path747"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11023.296,4235.929 53.795,0"
- id="path749"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11110.586,4235.929 53.795,0"
- id="path751"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11196.861,4235.929 53.795,0"
- id="path753"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11284.151,4235.929 53.795,0"
- id="path755"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11370.426,4235.929 53.795,0"
- id="path757"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11457.716,4235.929 53.795,0"
- id="path759"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11543.991,4235.929 53.795,0"
- id="path761"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11630.266,4235.929 53.795,0"
- id="path763"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11717.556,4235.929 53.795,0"
- id="path765"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11803.831,4235.929 53.795,0"
- id="path767"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11891.121,4235.929 53.795,0"
- id="path769"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11977.396,4235.929 53.795,0"
- id="path771"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12064.686,4235.929 53.795,0"
- id="path773"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12150.961,4235.929 53.795,0"
- id="path775"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12237.236,4235.929 53.795,0"
- id="path777"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12324.526,4235.929 53.795,0"
- id="path779"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12410.801,4235.929 53.795,0"
- id="path781"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12498.091,4235.929 53.795,0"
- id="path783"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12584.366,4235.929 53.795,0"
- id="path785"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12671.656,4235.929 53.795,0"
- id="path787"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12757.931,4235.929 53.795,0"
- id="path789"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12844.206,4235.929 54.81,0"
- id="path791"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12931.496,4235.929 53.795,0"
- id="path793"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13017.771,4235.929 53.795,0"
- id="path795"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13105.061,4235.929 53.795,0"
- id="path797"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13191.336,4235.929 53.795,0"
- id="path799"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13278.626,4235.929 53.795,0"
- id="path801"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13364.901,4235.929 53.795,0"
- id="path803"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13451.176,4235.929 54.81,0"
- id="path805"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13538.466,4235.929 53.795,0"
- id="path807"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13624.741,4235.929 53.795,0"
- id="path809"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13712.031,4235.929 53.795,0"
- id="path811"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13798.306,4235.929 53.795,0"
- id="path813"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13885.596,4235.929 53.795,0"
- id="path815"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13971.871,4235.929 53.795,0"
- id="path817"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14058.146,4235.929 54.81,0"
- id="path819"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14145.436,4235.929 53.795,0"
- id="path821"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14231.711,4235.929 53.795,0"
- id="path823"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14319.001,4235.929 53.795,0"
- id="path825"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14405.276,4235.929 53.795,0"
- id="path827"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14492.566,4235.929 53.795,0"
- id="path829"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14578.841,4235.929 53.795,0"
- id="path831"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14665.116,4235.929 54.81,0"
- id="path833"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14752.406,4235.929 53.795,0"
- id="path835"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14838.681,4235.929 53.795,0"
- id="path837"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14925.971,4235.929 53.795,0"
- id="path839"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15012.246,4235.929 53.795,0"
- id="path841"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15099.536,4235.929 53.795,0"
- id="path843"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15185.811,4235.929 53.795,0"
- id="path845"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15272.086,4235.929 54.81,0"
- id="path847"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15359.376,4235.929 53.795,0"
- id="path849"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15445.651,4235.929 53.795,0"
- id="path851"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15532.941,4235.929 53.795,0"
- id="path853"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15619.216,4235.929 53.795,0"
- id="path855"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15706.506,4235.929 53.795,0"
- id="path857"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15792.781,4235.929 53.795,0"
- id="path859"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15880.071,4235.929 53.795,0"
- id="path861"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15966.346,4235.929 53.795,0"
- id="path863"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16052.621,4235.929 53.795,0"
- id="path865"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16139.911,4235.929 53.795,0"
- id="path867"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16226.186,4235.929 53.795,0"
- id="path869"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16313.476,4235.929 53.795,0"
- id="path871"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16399.751,4235.929 53.795,0"
- id="path873"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16487.041,4235.929 53.795,0"
- id="path875"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16573.316,4235.929 53.795,0"
- id="path877"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16659.591,4235.929 53.795,0"
- id="path879"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16746.881,4235.929 53.795,0"
- id="path881"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16833.156,4235.929 53.795,0"
- id="path883"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16920.446,4235.929 53.795,0"
- id="path885"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17006.721,4235.929 53.795,0"
- id="path887"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17094.011,4235.929 53.795,0"
- id="path889"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17180.286,4235.929 53.795,0"
- id="path891"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17266.561,4235.929 54.81,0"
- id="path893"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17353.851,4235.929 53.795,0"
- id="path895"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17440.126,4235.929 53.795,0"
- id="path897"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17527.416,4235.929 53.795,0"
- id="path899"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17613.691,4235.929 53.795,0"
- id="path901"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17700.981,4235.929 53.795,0"
- id="path903"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17787.256,4235.929 53.795,0"
- id="path905"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17873.531,4235.929 54.81,0"
- id="path907"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17960.821,4235.929 53.795,0"
- id="path909"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18047.096,4235.929 53.795,0"
- id="path911"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18134.386,4235.929 53.795,0"
- id="path913"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18220.661,4235.929 53.795,0"
- id="path915"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18307.951,4235.929 53.795,0"
- id="path917"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18394.226,4235.929 53.795,0"
- id="path919"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18480.501,4235.929 54.81,0"
- id="path921"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18567.791,4235.929 53.795,0"
- id="path923"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18654.066,4235.929 53.795,0"
- id="path925"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18741.356,4235.929 c 17.255,-0.9999 35.525,-1.9999 53.795,-4.9997"
- id="path927"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18827.631,4225.9296 c 17.255,-3.9998 34.51,-8.9995 51.765,-13.9992"
- id="path929"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18910.861,4200.9311 c 16.24,-5.9996 32.48,-12.9992 48.72,-20.9987"
- id="path931"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18989.016,4164.9333 c 15.225,-7.9996 31.465,-16.999 45.675,-26.9984"
- id="path933"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19062.096,4118.936 c 14.21,-9.9994 28.42,-20.9987 42.63,-31.9981"
- id="path935"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19129.086,4064.9393 c 13.195,-11.9993 25.375,-24.9985 37.555,-37.9978"
- id="path937"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19188.971,4002.943 c 11.165,-13.9992 22.33,-27.9983 33.495,-41.9975"
- id="path939"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19240.736,3933.9471 c 10.15,-14.9991 19.285,-29.9982 27.405,-44.9973"
- id="path941"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19283.366,3859.9516 c 7.105,-15.9991 14.21,-32.9981 20.3,-48.9971"
- id="path943"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19314.831,3779.9564 c 5.075,-16.999 9.135,-33.998 13.195,-50.997"
- id="path945"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19333.101,3696.9613 c 2.03,-17.9989 4.06,-34.9979 4.06,-52.9968"
- id="path947"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,3611.9664 0,-53.9967"
- id="path949"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,3525.9716 0,-52.9968"
- id="path951"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,3440.9767 0,-52.9968"
- id="path953"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,3354.9819 0,-52.9969"
- id="path955"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,3269.987 0,-52.9969"
- id="path957"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,3183.9921 0,-52.9968"
- id="path959"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,3098.9972 0,-52.9968"
- id="path961"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,3014.0023 0,-53.9967"
- id="path963"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,2928.0075 0,-52.9968"
- id="path965"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,2843.0126 0,-52.9968"
- id="path967"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,2757.0177 0,-52.9968"
- id="path969"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,2672.0228 0,-52.9968"
- id="path971"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,2586.028 0,-52.9968"
- id="path973"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,2501.0331 0,-52.9968"
- id="path975"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,2415.0383 0,-52.9969"
- id="path977"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,2330.0434 0,-52.9969"
- id="path979"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,2245.0485 0,-52.9969"
- id="path981"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,2159.0536 0,-52.9968"
- id="path983"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,2074.0587 0,-52.9968"
- id="path985"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,1988.0639 0,-52.9968"
- id="path987"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,1903.069 0,-52.9968"
- id="path989"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,1817.0741 0,-52.9968"
- id="path991"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,1732.0792 0,-52.9968"
- id="path993"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,1647.0843 0,-52.9968"
- id="path995"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,1561.0895 0,-52.9968"
- id="path997"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,1476.0946 0,-52.9968"
- id="path999"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,1390.0998 0,-52.9969"
- id="path1001"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,1305.1049 0,-52.9969"
- id="path1003"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19337.161,1219.11 c -1.015,-16.999 -3.045,-34.9979 -5.075,-51.9969"
- id="path1005"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19325.996,1135.1151 c -4.06,-16.999 -8.12,-34.9979 -14.21,-50.997"
- id="path1007"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19300.621,1053.12 c -6.09,-15.9991 -13.195,-32.998 -21.315,-48.9971"
- id="path1009"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19264.081,976.1246 c -9.135,-15.99904 -18.27,-30.99814 -28.42,-45.99724"
- id="path1011"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19216.376,904.12892 c -10.15,-13.99916 -21.315,-27.99832 -33.495,-41.99748"
- id="path1013"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19160.551,838.13288 c -12.18,-12.99922 -24.36,-24.9985 -37.555,-36.99778"
- id="path1015"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19097.621,780.13636 c -14.21,-11.99928 -28.42,-21.99868 -42.63,-32.99802"
- id="path1017"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 19027.586,729.13942 c -15.225,-8.99946 -30.45,-17.99892 -46.69,-26.99838"
- id="path1019"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18951.461,688.14188 c -16.24,-7.99952 -32.48,-13.99916 -49.735,-19.9988"
- id="path1021"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18870.261,657.14374 c -17.255,-4.9997 -34.51,-8.99946 -51.765,-11.99928"
- id="path1023"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18786.016,640.14476 c -18.27,-2.99982 -35.525,-3.99976 -53.795,-3.99976"
- id="path1025"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18699.741,636.145 -53.795,0"
- id="path1027"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18612.451,636.145 -53.795,0"
- id="path1029"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18526.176,636.145 -53.795,0"
- id="path1031"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18438.886,636.145 -53.795,0"
- id="path1033"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18352.611,636.145 -53.795,0"
- id="path1035"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18266.336,636.145 -54.81,0"
- id="path1037"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18179.046,636.145 -53.795,0"
- id="path1039"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18092.771,636.145 -53.795,0"
- id="path1041"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 18005.481,636.145 -53.795,0"
- id="path1043"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17919.206,636.145 -53.795,0"
- id="path1045"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17831.916,636.145 -53.795,0"
- id="path1047"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17745.641,636.145 -53.795,0"
- id="path1049"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17659.366,636.145 -54.81,0"
- id="path1051"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17572.076,636.145 -53.795,0"
- id="path1053"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17485.801,636.145 -53.795,0"
- id="path1055"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17398.511,636.145 -53.795,0"
- id="path1057"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17312.236,636.145 -53.795,0"
- id="path1059"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17224.946,636.145 -53.795,0"
- id="path1061"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17138.671,636.145 -53.795,0"
- id="path1063"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 17052.396,636.145 -54.81,0"
- id="path1065"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16965.106,636.145 -53.795,0"
- id="path1067"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16878.831,636.145 -53.795,0"
- id="path1069"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16791.541,636.145 -53.795,0"
- id="path1071"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16705.266,636.145 -53.795,0"
- id="path1073"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16617.976,636.145 -53.795,0"
- id="path1075"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16531.701,636.145 -53.795,0"
- id="path1077"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16445.426,636.145 -54.81,0"
- id="path1079"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16358.136,636.145 -53.795,0"
- id="path1081"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16271.861,636.145 -53.795,0"
- id="path1083"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16184.571,636.145 -53.795,0"
- id="path1085"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16098.296,636.145 -53.795,0"
- id="path1087"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 16011.006,636.145 -53.795,0"
- id="path1089"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15924.731,636.145 -53.795,0"
- id="path1091"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15837.441,636.145 -53.795,0"
- id="path1093"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15751.166,636.145 -53.795,0"
- id="path1095"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15664.891,636.145 -53.795,0"
- id="path1097"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15577.601,636.145 -53.795,0"
- id="path1099"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15491.326,636.145 -53.795,0"
- id="path1101"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15404.036,636.145 -53.795,0"
- id="path1103"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15317.761,636.145 -53.795,0"
- id="path1105"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15230.471,636.145 -53.795,0"
- id="path1107"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15144.196,636.145 -53.795,0"
- id="path1109"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 15057.921,636.145 -53.795,0"
- id="path1111"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14970.631,636.145 -53.795,0"
- id="path1113"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14884.356,636.145 -53.795,0"
- id="path1115"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14797.066,636.145 -53.795,0"
- id="path1117"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14710.791,636.145 -53.795,0"
- id="path1119"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14623.501,636.145 -53.795,0"
- id="path1121"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14537.226,636.145 -53.795,0"
- id="path1123"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14450.951,636.145 -54.81,0"
- id="path1125"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14363.661,636.145 -53.795,0"
- id="path1127"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14277.386,636.145 -53.795,0"
- id="path1129"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14190.096,636.145 -53.795,0"
- id="path1131"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14103.821,636.145 -53.795,0"
- id="path1133"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 14016.531,636.145 -53.795,0"
- id="path1135"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13930.256,636.145 -53.795,0"
- id="path1137"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13843.981,636.145 -54.81,0"
- id="path1139"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13756.691,636.145 -53.795,0"
- id="path1141"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13670.416,636.145 -53.795,0"
- id="path1143"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13583.126,636.145 -53.795,0"
- id="path1145"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13496.851,636.145 -53.795,0"
- id="path1147"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13409.561,636.145 -53.795,0"
- id="path1149"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13323.286,636.145 -53.795,0"
- id="path1151"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13237.011,636.145 -54.81,0"
- id="path1153"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13149.721,636.145 -53.795,0"
- id="path1155"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 13063.446,636.145 -53.795,0"
- id="path1157"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12976.156,636.145 -53.795,0"
- id="path1159"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12889.881,636.145 -53.795,0"
- id="path1161"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12802.591,636.145 -53.795,0"
- id="path1163"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12716.316,636.145 -53.795,0"
- id="path1165"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12630.041,636.145 -54.81,0"
- id="path1167"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12542.751,636.145 -53.795,0"
- id="path1169"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12456.476,636.145 -53.795,0"
- id="path1171"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12369.186,636.145 -53.795,0"
- id="path1173"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12282.911,636.145 -53.795,0"
- id="path1175"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12195.621,636.145 -53.795,0"
- id="path1177"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12109.346,636.145 -53.795,0"
- id="path1179"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 12022.056,636.145 -53.795,0"
- id="path1181"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11935.781,636.145 -53.795,0"
- id="path1183"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11849.506,636.145 -53.795,0"
- id="path1185"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11762.216,636.145 -53.795,0"
- id="path1187"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11675.941,636.145 -53.795,0"
- id="path1189"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11588.651,636.145 -53.795,0"
- id="path1191"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11502.376,636.145 -53.795,0"
- id="path1193"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11415.086,636.145 -53.795,0"
- id="path1195"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11328.811,636.145 -53.795,0"
- id="path1197"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11242.536,636.145 -53.795,0"
- id="path1199"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11155.246,636.145 -53.795,0"
- id="path1201"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 11068.971,636.145 -53.795,0"
- id="path1203"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10981.681,636.145 -53.795,0"
- id="path1205"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10895.406,636.145 -53.795,0"
- id="path1207"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10808.116,636.145 -53.795,0"
- id="path1209"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10721.841,636.145 -53.795,0"
- id="path1211"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10635.566,636.145 -53.795,0"
- id="path1213"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10548.276,636.145 -53.795,0"
- id="path1215"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10462.001,636.145 -53.795,0"
- id="path1217"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10374.711,636.145 -53.795,0"
- id="path1219"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10288.436,636.145 -53.795,0"
- id="path1221"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10201.146,636.145 -53.795,0"
- id="path1223"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10114.871,636.145 -53.795,0"
- id="path1225"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 10028.596,636.145 -54.8101,0"
- id="path1227"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9941.3059,636.145 -53.795,0"
- id="path1229"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9855.0309,636.145 -53.795,0"
- id="path1231"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9767.7409,636.145 -53.795,0"
- id="path1233"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9681.4659,636.145 -53.795,0"
- id="path1235"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9594.1759,636.145 -53.795,0"
- id="path1237"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9507.9009,636.145 -53.795,0"
- id="path1239"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9421.6259,636.145 -54.81,0"
- id="path1241"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9334.3359,636.145 -53.795,0"
- id="path1243"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9248.0609,636.145 -53.795,0"
- id="path1245"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9160.7709,636.145 -53.795,0"
- id="path1247"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 9074.4959,636.145 -53.795,0"
- id="path1249"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8987.2059,636.145 -53.795,0"
- id="path1251"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8900.9309,636.145 -53.795,0"
- id="path1253"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8814.6559,636.145 -54.81,0"
- id="path1255"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8727.3659,636.145 -53.795,0"
- id="path1257"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8641.0909,636.145 -53.795,0"
- id="path1259"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8553.8009,636.145 -53.795,0"
- id="path1261"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8467.5259,636.145 -53.795,0"
- id="path1263"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8380.2359,636.145 -53.795,0"
- id="path1265"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8293.9609,636.145 -53.795,0"
- id="path1267"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8207.6859,636.145 -54.81,0"
- id="path1269"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8120.3959,636.145 -53.795,0"
- id="path1271"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 8034.1209,636.145 -53.795,0"
- id="path1273"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7946.8309,636.145 -53.795,0"
- id="path1275"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7860.5559,636.145 -53.795,0"
- id="path1277"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7773.2659,636.145 -53.795,0"
- id="path1279"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7686.9909,636.145 -53.795,0"
- id="path1281"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7599.7009,636.145 -53.795,0"
- id="path1283"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7513.4259,636.145 -53.795,0"
- id="path1285"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7427.1509,636.145 -53.795,0"
- id="path1287"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7339.8609,636.145 -53.795,0"
- id="path1289"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7253.5859,636.145 -53.795,0"
- id="path1291"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7166.2959,636.145 -53.795,0"
- id="path1293"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 7080.0209,636.145 -53.795,0"
- id="path1295"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6992.7309,636.145 -53.795,0"
- id="path1297"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6906.4559,636.145 -53.795,0"
- id="path1299"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6820.1809,636.145 -53.795,0"
- id="path1301"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6732.8909,636.145 -53.795,0"
- id="path1303"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6646.6159,636.145 -53.795,0"
- id="path1305"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6559.3259,636.145 -53.795,0"
- id="path1307"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6473.0509,636.145 -53.795,0"
- id="path1309"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6385.7609,636.145 -53.795,0"
- id="path1311"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6299.4859,636.145 -53.795,0"
- id="path1313"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6213.2109,636.145 -54.81,0"
- id="path1315"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6125.9209,636.145 -53.795,0"
- id="path1317"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 6039.6459,636.145 -53.795,0"
- id="path1319"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5952.3559,636.145 -53.795,0"
- id="path1321"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5866.0809,636.145 -53.795,0"
- id="path1323"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5778.7909,636.145 -53.795,0"
- id="path1325"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5692.5159,636.145 -53.795,0"
- id="path1327"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5606.2409,636.145 -54.81,0"
- id="path1329"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5518.9509,636.145 -53.795,0"
- id="path1331"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5432.6759,636.145 -53.795,0"
- id="path1333"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5345.3859,636.145 -53.795,0"
- id="path1335"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5259.1109,636.145 -53.795,0"
- id="path1337"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5171.8209,636.145 -53.795,0"
- id="path1339"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 5085.5459,636.145 -53.795,0"
- id="path1341"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4999.2709,636.145 -54.81,0"
- id="path1343"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4911.9809,636.145 -53.795,0"
- id="path1345"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4825.7059,636.145 -53.795,0"
- id="path1347"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4738.4159,636.145 -53.795,0"
- id="path1349"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4652.1409,636.145 -53.795,0"
- id="path1351"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4564.8509,636.145 -53.795,0"
- id="path1353"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4478.5759,636.145 -53.795,0"
- id="path1355"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4392.3009,636.145 -54.81,0"
- id="path1357"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4305.0109,636.145 -53.795,0"
- id="path1359"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4218.7359,636.145 -53.795,0"
- id="path1361"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4131.4459,636.145 -53.795,0"
- id="path1363"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 4045.1709,636.145 -53.795,0"
- id="path1365"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3957.8809,636.145 -53.795,0"
- id="path1367"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3871.6059,636.145 -53.795,0"
- id="path1369"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3785.3309,636.145 -54.81,0"
- id="path1371"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3698.0409,636.145 -53.795,0"
- id="path1373"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3611.7659,636.145 -53.795,0"
- id="path1375"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3524.4759,636.145 -53.795,0"
- id="path1377"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3438.2009,636.145 -53.795,0"
- id="path1379"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3350.9109,636.145 -53.795,0"
- id="path1381"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3264.6359,636.145 -53.795,0"
- id="path1383"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3177.3459,636.145 -53.795,0"
- id="path1385"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3091.0709,636.145 -53.795,0"
- id="path1387"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 3004.7959,636.145 -53.795,0"
- id="path1389"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2917.5059,636.145 -53.795,0"
- id="path1391"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2831.2309,636.145 -53.795,0"
- id="path1393"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2743.9409,636.145 -53.795,0"
- id="path1395"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2657.6659,636.145 -53.795,0"
- id="path1397"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2570.3759,636.145 -53.795,0"
- id="path1399"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2484.1009,636.145 -53.795,0"
- id="path1401"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2397.8259,636.145 -53.795,0"
- id="path1403"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2310.5359,636.145 -53.795,0"
- id="path1405"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2224.2609,636.145 -53.795,0"
- id="path1407"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2136.9709,636.145 -53.795,0"
- id="path1409"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 2050.6959,636.145 -53.795,0"
- id="path1411"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1963.4059,636.145 -53.795,0"
- id="path1413"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1877.1309,636.145 -53.795,0"
- id="path1415"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1790.8559,636.145 -54.81,0"
- id="path1417"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1703.5659,636.145 -53.795,0"
- id="path1419"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1617.2909,636.145 -53.795,0"
- id="path1421"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1530.0009,636.145 -53.795,0"
- id="path1423"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1443.7259,636.145 -53.795,0"
- id="path1425"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1356.4359,636.145 -53.795,0"
- id="path1427"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1270.1609,636.145 -53.795,0"
- id="path1429"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1183.8859,636.145 -54.81,0"
- id="path1431"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1096.5959,636.145 -53.795,0"
- id="path1433"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 1010.3209,636.145 -53.795,0"
- id="path1435"
- inkscape:connector-curvature="0" /><path
- style="fill:none;stroke:#3465af;stroke-width:28.432024"
- d="m 923.0309,636.145 -53.795,0"
- id="path1437"
- inkscape:connector-curvature="0" /><g
- id="g4044"><rect
- height="1100.7"
- width="1213.6"
- y="4753.1133"
- x="21109.146"
- id="rect1441"
- style="fill:#f3e777" /><path
- d="m 20656.146,5536.413 0,-405.46 150.7,-169.16 c 82.886,-93.039 170.53,-186.62 194.77,-207.96 l 44.069,-38.798 783.23,-0.086 783.23,-0.086 0,613.5 0,613.5 -978,0 -978,0 0,-405.46 z m 1027.7,136.98 0,-78.372 -169.91,4.925 -169.91,4.9249 -5.09,45.854 c -8.249,74.303 46.711,101.04 207.69,101.04 l 137.21,0 0,-78.372 z m 235.86,-262.94 4.495,-341.31 207.2,-8.6408 207.2,-8.6408 5.144,-46.443 c 9.596,-86.615 -41.863,-102.05 -322.02,-96.607 l -246.71,4.7956 -4.438,419.08 -4.439,419.08 74.537,0 74.538,0 4.494,-341.31 z m 391.3,313.72 c 26.41,-19.286 36.255,-41.399 32.697,-73.447 l -5.09,-45.854 -174.05,0 -174.05,0 -5.38,48.984 c -9.97,90.771 0.993,97.91 150.36,97.91 99.305,0 148.27,-7.6982 175.52,-27.594 z m -627.16,-274.84 0,-77.768 -174.05,0 -174.05,0 0,66.246 c 0,36.436 4.973,71.431 11.051,77.768 6.078,6.3366 84.401,11.521 174.05,11.521 l 163,0 0,-77.768 z m 659.89,-4.9154 5.125,-74.042 -179.18,4.9155 -179.18,4.9155 -5.38,48.984 c -10.473,95.348 -2.259,99.57 183.28,94.197 l 170.2,-4.9284 5.125,-74.042 z m -659.89,-237.63 0,-78.372 -169.91,4.925 -169.91,4.925 -5.097,73.447 -5.097,73.447 175,0 175,0 0,-78.372 z m 659.86,4.925 -5.097,-73.447 -174.05,0 -174.05,0 -5.38,48.984 c -10.289,93.673 -2.146,97.91 188.15,97.91 l 175.52,0 -5.097,-73.447 z m -659.86,-228.98 0,-77.768 -137.21,0 c -97.358,0 -147.91,7.8138 -174.05,26.902 -34.952,25.523 -49.645,92.242 -25.79,117.11 6.078,6.3366 84.401,11.521 174.05,11.521 l 163,0 0,-77.768 z"
- id="path1443"
- inkscape:connector-curvature="0"
- style="fill:#ca4677" /></g><text
- style="font-size:9.10937119px;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"
- class="TextShape"
- id="text1489"
- transform="scale(1.1035537,0.9061634)"
- x="171.41566"
- y="9913.7109"><tspan
- font-size="635px"
- font-weight="400"
- class="TextParagraph"
- id="tspan1491"
- style="font-weight:400;font-size:482.03753662px;font-family:'Times New Roman', serif" /></text>
-<g
- id="g4048"><rect
- height="2342.4341"
- width="2320.7097"
- y="13737.451"
- x="18796.941"
- id="rect1447"
- style="fill:#6076b3" /><rect
- id="rect1451"
- height="137.78799"
- x="18532.135"
- width="302.70312"
- y="13817.405"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1453"
- height="137.78799"
- x="18532.135"
- width="302.70312"
- y="14075.544"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1455"
- height="137.78799"
- x="18532.135"
- width="302.70312"
- y="14334.443"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1457"
- height="137.78799"
- x="18532.135"
- width="302.70312"
- y="14592.582"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1459"
- height="137.78799"
- x="18532.135"
- width="302.70312"
- y="14850.721"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1461"
- height="137.78799"
- x="18532.135"
- width="302.70312"
- y="15109.62"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1463"
- height="137.78799"
- x="18532.135"
- width="302.70312"
- y="15367.759"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1465"
- height="137.78799"
- x="18532.135"
- width="302.70312"
- y="15625.896"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1467"
- height="137.78799"
- x="18532.135"
- width="302.70312"
- y="15884.035"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1469"
- height="137.78799"
- x="21080.053"
- width="302.70312"
- y="13783.14"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1471"
- height="137.78799"
- x="21080.053"
- width="302.70312"
- y="14041.277"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1473"
- height="137.78799"
- x="21080.053"
- width="302.70312"
- y="14299.416"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1475"
- height="137.78799"
- x="21080.053"
- width="302.70312"
- y="14558.315"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1477"
- height="137.78799"
- x="21080.053"
- width="302.70312"
- y="14816.454"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1479"
- height="137.78799"
- x="21080.053"
- width="302.70312"
- y="15074.593"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1481"
- height="137.78799"
- x="21080.053"
- width="302.70312"
- y="15333.492"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1483"
- height="137.78799"
- x="21080.053"
- width="302.70312"
- y="15591.631"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
- id="rect1485"
- height="137.78799"
- x="21080.053"
- width="302.70312"
- y="15849.769"
- style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><text
- transform="scale(1.1035537,0.9061634)"
- sodipodi:linespacing="125%"
- id="text1493"
- line-height="125%"
- x="17205.688"
- y="16777.641"
- font-size="1128.9px"
- xml:space="preserve"
- style="font-size:856.96411133px;line-height:125%;font-family:Sans;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"><tspan
- id="tspan1495"
- x="17205.688"
- y="16777.641">CPU</tspan></text>
-</g><text
- style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"
- id="text1499"
- class="TextShape"
- x="-11700.553"
- y="565.61298"><tspan
- style="font-weight:400;font-size:706px;font-family:'Times New Roman', serif"
- id="tspan1501"
- class="TextParagraph"
- font-weight="400"
- font-size="706px"><tspan
- id="tspan1503"
- transform="matrix(0,-1,1,0,8509,40173)"
- class="TextPosition"
- x="12640.447"
- y="16397.613"><tspan
- style="fill:#000000"
- id="tspan1505">PCI, USB, SPI, I2C, ...</tspan></tspan></tspan></text>
-<path
- d="m 12408.066,15561.578 -1115.084,0 0,-1420.331 2230.169,0 0,1420.331 -1115.085,0 z"
- id="path1511"
- inkscape:connector-curvature="0"
- style="fill:#cfe7f5;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><path
- d="m 12408.066,15561.578 -1115.084,0 0,-1420.331 2230.169,0 0,1420.331 -1115.085,0 z"
- id="path1513"
- inkscape:connector-curvature="0"
- style="fill:none;fill-rule:evenodd;stroke:#3465af;stroke-width:19.84712601;stroke-linejoin:round" /><text
- style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"
- id="text1515"
- class="TextShape"
- x="-1394.0863"
- y="590.73016"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan1517"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan1519"
- class="TextPosition"
- x="11487.915"
- y="14672.743"><tspan
- style="fill:#000000"
- id="tspan1521">Bridge</tspan></tspan></tspan></text>
-<text
- style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"
- id="text1523"
- class="TextShape"
- x="-1450.5308"
- y="1324.5078"><tspan
- style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
- id="tspan1525"
- class="TextParagraph"
- font-weight="400"
- font-size="635px"><tspan
- id="tspan1527"
- class="TextPosition"
- x="11431.471"
- y="15406.52"><tspan
- style="fill:#000000"
- id="tspan1529"> DMA</tspan></tspan></tspan></text>
-</svg> \ No newline at end of file
+<?xml version="1.0" encoding="UTF-8"?>
+<svg id="svg2" width="235mm" height="179mm" clip-path="url(#a)" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" preserveAspectRatio="xMidYMid" version="1.2" viewBox="0 0 22648.239 17899.829" xml:space="preserve" xmlns="http://www.w3.org/2000/svg" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"><metadata id="metadata1533"><rdf:RDF><cc:Work rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/><dc:title/></cc:Work></rdf:RDF></metadata><defs id="defs4"><clipPath id="a"><rect id="rect7" width="28000" height="21000"/></clipPath></defs><path id="path11" d="m10146 2636c-518.06 0-1035.1 515-1035.1 1031v4124c0 516 517.06 1032 1035.1 1032h8572.2c518.06 0 1036.1-516 1036.1-1032v-4124c0-516-518.06-1031-1036.1-1031h-8572.2z"
+fill="#fcf" style=""/><path id="path15" d="m1505.5 13443c-293 0-585 292-585 585v2340c0 293 292 586 585 586h3275c293 0 586-293 586-586v-2340c0-293-293-585-586-585h-3275z" fill="#ffc" style=""/><path id="path19" d="m517.15 22.013c-461 0-922 461-922 922v11169c0 461 461 923 922 923h3692c461 0 922-462 922-923v-11169c0-461-461-922-922-922h-3692z" fill="#e6e6e6" style=""/><path id="path23" d="m2371.5 6438h-2260v-1086h4520v1086h-2260z" fill="#ff8080" style=""/><path id="path25" d="m2371.5 6438h-2260v-1086h4520v1086h-2260z" fill="none" stroke="#3465af" style=""/><text id="text27" class="TextShape" x="-2089.4541" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan29" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan31" class="TextPosition" x="489.5459" y="6111.0132" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan33"
+fill="#000000" font-family="Serif, serif" font-size="493.88px">Audio decoder</tspan></tspan></tspan></text>
+<path id="path37" d="m2371.5 9608h-2260v-1270h4520v1270h-2260z" fill="#ff8080" style=""/><path id="path39" d="m2371.5 9608h-2260v-1270h4520v1270h-2260z" fill="none" stroke="#3465af" style=""/><text id="text41" class="TextShape" x="-2089.4541" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan43" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan45" class="TextPosition" x="527.5459" y="9189.0127" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan47" fill="#000000" font-family="Serif, serif" font-size="493.88px">Video decoder</tspan></tspan></tspan></text>
+<path id="path51" d="m2363.5 8053h-2269v-1224h4537v1224h-2268z" fill="#ff8080" style=""/><path id="path53" d="m2363.5 8053h-2269v-1224h4537v1224h-2268z" fill="none" stroke="#3465af" style=""/><text id="text55" class="TextShape" x="-2089.4541" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan57" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan59" class="TextPosition" x="481.5459" y="7657.0132" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan61" fill="#000000" font-family="Serif, serif" font-size="493.88px">Audio encoder</tspan></tspan></tspan></text>
+<path id="path65" d="m13622 10386h-3810v-1281h7620v1281h-3810z" fill="#cfc" style=""/><path id="path67" d="m13622 10386h-3810v-1281h7620v1281h-3810z" fill="none" stroke="#3465af" style=""/><text id="text69" class="TextShape" x="-2089.4541" y="-2446.187" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan71" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan73" class="TextPosition" x="10287.546" y="9960.8135" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan75" fill="#000000" font-family="Serif, serif" font-size="493.88px">Button Key/IR input logic</tspan></tspan></tspan></text>
+<path id="path79" d="m12080 12182h-2268v-1412h4536v1412h-2268z" fill="#cfe7f5" style=""/><path id="path81" d="m12080 12182h-2268v-1412h4536v1412h-2268z" fill="none" stroke="#3465af" style=""/><text id="text83" class="TextShape" x="-2089.4541" y="-2389.7871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan85" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan87" class="TextPosition" x="10792.546" y="11692.213" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan89" fill="#000000" font-family="Serif, serif" font-size="493.88px">EEPROM</tspan></tspan></tspan></text>
+<path id="path93" d="m3050.5 15498h-1563v-1715h3126v1715h-1563z" fill="#fc9" style=""/><path id="path95" d="m3050.5 15498h-1563v-1715h3126v1715h-1563z" fill="none" stroke="#3465af" style=""/><text id="text97" class="TextShape" x="-2089.4541" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan99" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan101" class="TextPosition" x="2186.5459" y="14856.013" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan103" fill="#000000" font-family="Serif, serif" font-size="493.88px">Sensor</tspan></tspan></tspan></text>
+<path id="path107" d="m4629.5 5866 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z" fill="#729fcf" style=""/><path id="path109" d="m4629.5 5866 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z" fill="none" stroke="#3465af" style=""/><path id="path113" d="m4629.5 7448 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z" fill="#729fcf" style=""/><path id="path115" d="m4629.5 7448 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z" fill="none" stroke="#3465af" style=""/><path id="path119" d="m4631.5 8936 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" fill="#729fcf" style=""/><path id="path121" d="m4631.5 8936 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" fill="none" stroke="#3465af" style=""/><path id="path125" d="m7872.5 11464 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z"
+fill="#729fcf" style=""/><path id="path127" d="m7872.5 11464 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" fill="none" stroke="#3465af" style=""/><path id="path131" d="m7872.5 9716.8 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" fill="#729fcf" style=""/><path id="path133" d="m7872.5 9716.8 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" fill="none" stroke="#3465af" style=""/><path id="path137" d="m7872.5 14994 670-353v176h2028v-176l671 353-671 354v-177h-2028v177l-670-354z" fill="#729fcf" style=""/><path id="path139" d="m7872.5 14994 670-353v176h2028v-176l671 353-671 354v-177h-2028v177l-670-354z" fill="none" stroke="#3465af" style=""/><path id="path143" d="m17534 14105 978.49 840.89-978.49 840.89v-420.86h-2960.5v420.86l-979.49-840.89 979.49-840.89v420.03h2960.5v-420.03z" fill="#729fcf" style=""/><path id="path145" d="m17534 14105 978.49
+840.89-978.49 840.89v-420.86h-2960.5v420.86l-979.49-840.89 979.49-840.89v420.03h2960.5v-420.03z" fill="none" stroke="#3465af" stroke-width="25.77" style=""/><text id="text149" class="TextShape" x="-9922.1533" y="-644.58704" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan151" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan153" class="TextPosition" transform="matrix(0,-1,1,0,8509,40173)" x="14418.847" y="15187.413" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan155" fill="#000000" font-family="Serif, serif" font-size="493.88px">System Bus</tspan></tspan></tspan></text>
+<path id="path159" d="m11062 7098h-1250v-875h2499v875h-1249z" fill="#cff" style=""/><path id="path161" d="m11062 7098h-1250v-875h2499v875h-1249z" fill="none" stroke="#3465af" style=""/><text id="text163" class="TextShape" x="-2089.4541" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan165" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan167" class="TextPosition" x="10125.546" y="6876.0132" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan169" fill="#000000" font-family="Serif, serif" font-size="493.88px">Demux</tspan></tspan></tspan></text>
+<path id="path173" d="m7906.5 6601 373-357v178h1130v-178l374 357-374 358v-179h-1130v179l-373-358z" fill="#729fcf" style=""/><path id="path175" d="m7906.5 6601 373-357v178h1130v-178l374 357-374 358v-179h-1130v179l-373-358z" fill="none" stroke="#3465af" style=""/><path id="path179" d="m7906.5 5214 373-358v179h1130v-179l374 358-374 358v-179h-1130v179l-373-358z" fill="#729fcf" style=""/><path id="path181" d="m7906.5 5214 373-358v179h1130v-179l374 358-374 358v-179h-1130v179l-373-358z" fill="none" stroke="#3465af" style=""/><path id="path185" d="m14233 5828h-4421v-1270h8841v1270h-4420z" fill="#cff" style=""/><path id="path187" d="m14233 5828h-4421v-1270h8841v1270h-4420z" fill="none" stroke="#3465af" style=""/><text id="text189" class="TextShape" x="-2089.4541" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan191" class="TextParagraph" font-family="Serif, serif"
+font-size="493.88px"><tspan id="tspan193" class="TextPosition" x="10696.546" y="5409.0132" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan195" fill="#000000" font-family="Serif, serif" font-size="493.88px">Conditional Access Module</tspan></tspan></tspan></text>
+<path id="path199" d="m2355.5 11123h-2269v-1224h4537v1224h-2268z" fill="#ff8080" style=""/><path id="path201" d="m2355.5 11123h-2269v-1224h4537v1224h-2268z" fill="none" stroke="#3465af" style=""/><text id="text203" class="TextShape" x="-2089.4541" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan205" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan207" class="TextPosition" x="511.5459" y="10727.013" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan209" fill="#000000" font-family="Serif, serif" font-size="493.88px">Video encoder</tspan></tspan></tspan></text>
+<path id="path213" d="m4631.5 10470 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" fill="#729fcf" style=""/><path id="path215" d="m4631.5 10470 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" fill="none" stroke="#3465af" style=""/><path id="path219" d="m18702 5381 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" fill="#729fcf" style=""/><path id="path221" d="m18702 5381 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" fill="none" stroke="#3465af" style=""/><text id="text225" class="TextShape" x="-1976.5541" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan227" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan229" class="TextPosition" x="13.4459" y="12314.013" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan231" fill="#000000"
+font-family="Serif, serif" font-size="493.88px">Radio / Analog TV</tspan></tspan></tspan></text>
+<text id="text235" class="TextShape" x="-2089.4541" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan237" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan239" class="TextPosition" x="12866.546" y="8560.0127" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan241" fill="#000000" font-family="Serif, serif" font-size="493.88px">Digital TV</tspan></tspan></tspan></text>
+<text id="text245" class="TextShape" x="-8919.0537" y="-1373.787" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan247" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan249" class="TextPosition" x="5804.9458" y="17793.213" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan251" fill="#000000" font-family="Serif, serif" font-size="493.88px">PS.: picture is not complete: other blocks may be present</tspan></tspan></tspan></text>
+<text id="text255" class="TextShape" x="-2089.4541" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan257" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan259" class="TextPosition" x="2109.5459" y="16397.014" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan261" fill="#000000" font-family="Serif, serif" font-size="493.88px">Webcam</tspan></tspan></tspan></text>
+<path id="path265" d="m12463 13926h-2650v-1412h5299v1412h-2649z" fill="#f90" style=""/><path id="path267" d="m12463 13926h-2650v-1412h5299v1412h-2649z" fill="none" stroke="#3465af" style=""/><text id="text269" class="TextShape" x="-2089.4541" y="-2446.187" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan271" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan273" class="TextPosition" x="10175.546" y="13435.813" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan275" fill="#000000" font-family="Serif, serif" font-size="493.88px">Processing blocks</tspan></tspan></tspan></text>
+<path id="path279" d="m7872.5 13208 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" fill="#729fcf" style=""/><path id="path281" d="m7872.5 13208 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" fill="none" stroke="#3465af" style=""/><path id="path285" d="m4612.5 14790 397-353v176h1201v-176l398 353-398 354v-177h-1201v177l-397-354z" fill="#729fcf" style=""/><path id="path287" d="m4612.5 14790 397-353v176h1201v-176l398 353-398 354v-177h-1201v177l-397-354z" fill="none" stroke="#3465af" style=""/><text id="text291" class="TextShape" x="-2428.0542" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan293" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan295" class="TextPosition" x="20421.945" y="6628.0132" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan297" fill="#000000"
+font-family="Serif, serif" font-size="493.88px">Smartcard</tspan></tspan></tspan></text>
+<path id="path301" d="m623.32 436.01c-334.6 0-669.2 333-669.2 666v2668c0 333 334.6 666 669.2 666h18456c334.6 0 670.2-333 670.2-666v-2668c0-333-335.6-666-670.2-666h-18456z" fill="#fcf" style=""/><path id="path305" d="m3031.5 2991h-1614v-1816h3227v1816h-1613z" fill="#ff8080" style=""/><path id="path307" d="m3031.5 2991h-1614v-1816h3227v1816h-1613z" fill="none" stroke="#3465af" style=""/><text id="text309" class="TextShape" x="-2089.4541" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan311" class="TextParagraph"><tspan id="tspan313" class="TextPosition" x="2284.5459" y="1947.0129" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan315" fill="#000000" font-family="Serif, serif" font-size="493.88px">Tuner</tspan></tspan></tspan><tspan id="tspan317" class="TextParagraph"><tspan id="tspan319" class="TextPosition" x="2061.5459" y="2650.0129"
+font-family="Serif, serif" font-size="493.88px"><tspan id="tspan321" fill="#000000" font-family="Serif, serif" font-size="493.88px">FM/TV</tspan></tspan></tspan></text>
+<path id="path325" d="m812.55 1538c0 111 40 202 88 202h530c48 0 89-91 89-202 0-110-41-202-89-202h-530c-48 0-88 92-88 202z" fill="#ff8080" style=""/><path id="path327" d="m812.55 1538c0 111 40 202 88 202h530c48 0 89-91 89-202 0-110-41-202-89-202h-530c-48 0-88 92-88 202z" fill="none" stroke="#3465af" style=""/><path id="path329" d="m812.55 1538c0 111 40 202 88 202s88-91 88-202c0-110-40-202-88-202s-88 92-88 202z" fill="#ffb3b3" style=""/><path id="path331" d="m812.55 1538c0 111 40 202 88 202s88-91 88-202c0-110-40-202-88-202s-88 92-88 202z" fill="none" stroke="#3465af" style=""/><path id="path335" d="m813.55 2103c0 110 40 202 88 202h530c48 0 89-92 89-202s-41-203-89-203h-530c-48 0-88 93-88 203z" fill="#ff8080" style=""/><path id="path337" d="m813.55 2103c0 110 40 202 88 202h530c48 0 89-92 89-202s-41-203-89-203h-530c-48 0-88 93-88 203z" fill="none" stroke="#3465af" style=""/><path
+id="path339" d="m813.55 2103c0 110 40 202 88 202s88-92 88-202-40-203-88-203-88 93-88 203z" fill="#ffb3b3" style=""/><path id="path341" d="m813.55 2103c0 110 40 202 88 202s88-92 88-202-40-203-88-203-88 93-88 203z" fill="none" stroke="#3465af" style=""/><path id="path345" d="m4629.5 2032 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z" fill="#729fcf" style=""/><path id="path347" d="m4629.5 2032 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z" fill="none" stroke="#3465af" style=""/><path id="path351" d="m7889.5 1986 402-368v184h1217v-184l403 368-403 369v-185h-1217v185l-402-369z" fill="#729fcf" style=""/><path id="path353" d="m7889.5 1986 402-368v184h1217v-184l403 368-403 369v-185h-1217v185l-402-369z" fill="none" stroke="#3465af" style=""/><path id="path357" d="m14411 4025h-4500v-1389h9e3v1389h-4500z" fill="#cff" style=""/><path id="path359" d="m14411
+4025h-4500v-1389h9e3v1389h-4500z" fill="none" stroke="#3465af" style=""/><text id="text361" class="TextShape" x="-2089.4541" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan363" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan365" class="TextPosition" x="9961.5459" y="3546.0129" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan367" fill="#000000" font-family="Serif, serif" font-size="493.88px">Satellite Equipment Control (SEC)</tspan></tspan></tspan></text>
+<path id="path371" d="m11311 2436h-1400v-1e3h2800v1e3h-1400z" fill="#cff" style=""/><path id="path373" d="m11311 2436h-1400v-1e3h2800v1e3h-1400z" fill="none" stroke="#3465af" style=""/><text id="text375" class="TextShape" x="-2089.4541" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan377" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan379" class="TextPosition" x="10375.546" y="2152.0129" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan381" fill="#000000" font-family="Serif, serif" font-size="493.88px">Demod</tspan></tspan></tspan></text>
+<path id="path385" d="m7889.5 3287 402-368v184h1217v-184l403 368-403 369v-185h-1217v185l-402-369z" fill="#729fcf" style=""/><path id="path387" d="m7889.5 3287 402-368v184h1217v-184l403 368-403 369v-185h-1217v185l-402-369z" fill="none" stroke="#3465af" style=""/><path id="path389" d="m7906.5 9121v7302h-1270v-14605h1270v7303z" fill="#ff9" style=""/><path id="path391" d="m7906.5 9121v7302h-1270v-14605h1270v7303z" fill="none" stroke="#3465af" style=""/><text id="text393" class="TextShape" transform="rotate(-90)" x="-20792.584" y="-6589.021" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan395" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan397" class="TextPosition" transform="matrix(0,-1,1,0,-4473,23627)" x="-11215.646" y="7460.9849" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan399" fill="#000000" font-family="Serif,
+serif" font-size="493.88px">I2C Bus (control bus)</tspan></tspan></tspan></text>
+<text id="text403" class="TextShape" x="-2145.854" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan405" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan407" class="TextPosition" x="7245.146" y="1114.0129" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan409" fill="#000000" font-family="Serif, serif" font-size="493.88px">Digital TV Frontend</tspan></tspan></tspan></text>
+<path id="path415" d="m863.15 636.14c-18.27 0-35.525 0.99994-53.795 2.9998" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path417" d="m776.87 644.14c-17.255 2.9998-35.525 6.9996-52.78 11.999" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path419" d="m692.63 666.14c-16.24 5.9996-33.495 11.999-49.735 19.999" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path421" d="m613.46 700.14c-15.225 7.9995-31.465 16.999-46.69 26.998" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path423" d="m539.36 745.14c-14.21 9.9994-28.42 20.999-42.63 31.998" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path425" d="m471.36 798.14c-13.195 11.999-26.39 23.999-38.57 36.998" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path427" d="m410.46 859.13c-11.165 12.999-22.33
+26.998-33.495 40.998" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path429" d="m357.68 927.13c-10.15 13.999-19.285 28.998-28.42 44.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path431" d="m314.03 1000.1c-8.12 15.999-15.225 31.998-22.33 48.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path433" d="m280.54 1079.1c-5.075 16.999-10.15 33.998-14.21 50.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path435" d="m260.24 1162.1c-3.045 17.999-5.075 34.998-6.09 52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path437" d="m254.15 1247.1v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path439" d="m254.15 1333.1v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path441" d="m254.15 1418.1v52.997"
+fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path443" d="m254.15 1504.1v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path445" d="m254.15 1589.1v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path447" d="m254.15 1675.1v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path449" d="m254.15 1760.1v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path451" d="m254.15 1845.1v53.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path453" d="m254.15 1931.1v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path455" d="m254.15 2016.1v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path457" d="m254.15 2102.1v52.997" fill="none" stroke="#3465af" stroke-width="28.432"
+style=""/><path id="path459" d="m254.15 2187.1v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path461" d="m254.15 2273v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path463" d="m254.15 2358v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path465" d="m254.15 2443v53.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path467" d="m254.15 2529v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path469" d="m254.15 2614v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path471" d="m254.15 2700v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path473" d="m254.15 2785v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path475" d="m254.15 2871v52.997" fill="none"
+stroke="#3465af" stroke-width="28.432" style=""/><path id="path477" d="m254.15 2956v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path479" d="m254.15 3041v53.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path481" d="m254.15 3127v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path483" d="m254.15 3212v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path485" d="m254.15 3298v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path487" d="m254.15 3383v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path489" d="m254.15 3469v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path491" d="m254.15 3554v52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path493"
+d="m254.15 3639c0 17.999 1.015 35.998 3.045 52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path495" d="m262.27 3724c4.06 17.999 8.12 34.998 13.195 51.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path497" d="m285.61 3807c6.09 15.999 13.195 32.998 20.3 48.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path499" d="m321.14 3885c8.12 14.999 17.255 30.998 27.405 45.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path501" d="m366.81 3957.9c10.15 13.999 21.315 27.998 32.48 41.998" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path503" d="m420.61 4023.9c12.18 12.999 25.375 25.998 38.57 37.998" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path505" d="m483.54 4083.9c13.195 10.999 27.405 22.999 41.615 32.998" fill="none"
+stroke="#3465af" stroke-width="28.432" style=""/><path id="path507" d="m552.56 4135.9c14.21 9.9994 29.435 18.999 45.675 26.998" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path509" d="m627.67 4178.9c15.225 6.9996 32.48 14.999 48.72 20.999" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path511" d="m707.85 4210.9c17.255 4.9997 34.51 9.9994 51.765 13.999" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path513" d="m792.1 4230.9c17.255 1.9999 35.525 3.9998 53.795 4.9997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path515" d="m878.37 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path517" d="m964.65 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path519" d="m1051.9 4235.9h53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path521" d="m1138.2 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path523" d="m1225.5 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path525" d="m1311.8 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path527" d="m1398.1 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path529" d="m1485.3 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path531" d="m1571.6 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path533" d="m1658.9 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path535" d="m1745.2 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path537"
+d="m1832.5 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path539" d="m1918.7 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path541" d="m2005 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path543" d="m2092.3 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path545" d="m2178.6 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path547" d="m2265.9 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path549" d="m2352.2 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path551" d="m2439.4 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path553" d="m2525.7 4235.9h53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path555" d="m2612 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path557" d="m2699.3 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path559" d="m2785.6 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path561" d="m2872.8 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path563" d="m2959.1 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path565" d="m3046.4 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path567" d="m3132.7 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path569" d="m3220 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path571"
+d="m3306.3 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path573" d="m3392.5 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path575" d="m3479.8 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path577" d="m3566.1 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path579" d="m3653.4 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path581" d="m3739.7 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path583" d="m3826.9 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path585" d="m3913.2 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path587" d="m3999.5 4235.9h53.795" fill="none"
+stroke="#3465af" stroke-width="28.432" style=""/><path id="path589" d="m4086.8 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path591" d="m4173.1 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path593" d="m4260.4 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path595" d="m4346.6 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path597" d="m4433.9 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path599" d="m4520.2 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path601" d="m4606.5 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path603" d="m4693.8 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432"
+style=""/><path id="path605" d="m4780 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path607" d="m4867.3 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path609" d="m4953.6 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path611" d="m5040.9 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path613" d="m5127.2 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path615" d="m5213.4 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path617" d="m5300.7 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path619" d="m5387 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path621" d="m5474.3 4235.9h53.795"
+fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path623" d="m5560.6 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path625" d="m5647.9 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path627" d="m5734.1 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path629" d="m5820.4 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path631" d="m5907.7 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path633" d="m5994 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path635" d="m6081.3 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path637" d="m6167.5 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432"
+style=""/><path id="path639" d="m6254.8 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path641" d="m6341.1 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path643" d="m6427.4 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path645" d="m6514.7 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path647" d="m6600.9 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path649" d="m6688.2 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path651" d="m6774.5 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path653" d="m6861.8 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path655" d="m6948.1
+4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path657" d="m7035.4 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path659" d="m7121.6 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path661" d="m7207.9 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path663" d="m7295.2 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path665" d="m7381.5 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path667" d="m7468.8 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path669" d="m7555 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path671" d="m7642.3 4235.9h53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path673" d="m7728.6 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path675" d="m7814.9 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path677" d="m7902.2 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path679" d="m7988.4 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path681" d="m8075.7 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path683" d="m8162 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path685" d="m8249.3 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path687" d="m8335.6 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path689"
+d="m8421.9 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path691" d="m8509.1 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path693" d="m8595.4 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path695" d="m8682.7 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path697" d="m8769 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path699" d="m8856.3 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path701" d="m8942.5 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path703" d="m9028.8 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path705" d="m9116.1 4235.9h53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path707" d="m9202.4 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path709" d="m9289.7 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path711" d="m9376 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path713" d="m9463.2 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path715" d="m9549.5 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path717" d="m9635.8 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path719" d="m9723.1 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path721" d="m9809.4 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path723"
+d="m9896.6 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path725" d="m9982.9 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path727" d="m10070 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path729" d="m10156 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path731" d="m10243 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path733" d="m10330 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path735" d="m10416 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path737" d="m10504 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path739" d="m10590 4235.9h53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path741" d="m10677 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path743" d="m10763 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path745" d="m10850 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path747" d="m10937 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path749" d="m11023 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path751" d="m11111 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path753" d="m11197 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path755" d="m11284 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path757" d="m11370
+4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path759" d="m11458 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path761" d="m11544 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path763" d="m11630 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path765" d="m11718 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path767" d="m11804 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path769" d="m11891 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path771" d="m11977 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path773" d="m12065 4235.9h53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path775" d="m12151 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path777" d="m12237 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path779" d="m12325 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path781" d="m12411 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path783" d="m12498 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path785" d="m12584 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path787" d="m12672 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path789" d="m12758 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path791"
+d="m12844 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path793" d="m12931 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path795" d="m13018 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path797" d="m13105 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path799" d="m13191 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path801" d="m13279 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path803" d="m13365 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path805" d="m13451 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path807" d="m13538 4235.9h53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path809" d="m13625 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path811" d="m13712 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path813" d="m13798 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path815" d="m13886 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path817" d="m13972 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path819" d="m14058 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path821" d="m14145 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path823" d="m14232 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path825" d="m14319
+4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path827" d="m14405 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path829" d="m14493 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path831" d="m14579 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path833" d="m14665 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path835" d="m14752 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path837" d="m14839 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path839" d="m14926 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path841" d="m15012 4235.9h53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path843" d="m15100 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path845" d="m15186 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path847" d="m15272 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path849" d="m15359 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path851" d="m15446 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path853" d="m15533 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path855" d="m15619 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path857" d="m15707 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path859" d="m15793
+4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path861" d="m15880 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path863" d="m15966 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path865" d="m16053 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path867" d="m16140 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path869" d="m16226 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path871" d="m16313 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path873" d="m16400 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path875" d="m16487 4235.9h53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path877" d="m16573 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path879" d="m16660 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path881" d="m16747 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path883" d="m16833 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path885" d="m16920 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path887" d="m17007 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path889" d="m17094 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path891" d="m17180 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path893"
+d="m17267 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path895" d="m17354 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path897" d="m17440 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path899" d="m17527 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path901" d="m17614 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path903" d="m17701 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path905" d="m17787 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path907" d="m17874 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path909" d="m17961 4235.9h53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path911" d="m18047 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path913" d="m18134 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path915" d="m18221 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path917" d="m18308 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path919" d="m18394 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path921" d="m18481 4235.9h54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path923" d="m18568 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path925" d="m18654 4235.9h53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path927" d="m18741
+4235.9c17.255-0.9999 35.525-1.9999 53.795-4.9997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path929" d="m18828 4225.9c17.255-3.9998 34.51-8.9995 51.765-13.999" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path931" d="m18911 4200.9c16.24-5.9996 32.48-12.999 48.72-20.999" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path933" d="m18989 4164.9c15.225-7.9996 31.465-16.999 45.675-26.998" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path935" d="m19062 4118.9c14.21-9.9994 28.42-20.999 42.63-31.998" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path937" d="m19129 4064.9c13.195-11.999 25.375-24.998 37.555-37.998" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path939" d="m19189 4002.9c11.165-13.999 22.33-27.998 33.495-41.998" fill="none"
+stroke="#3465af" stroke-width="28.432" style=""/><path id="path941" d="m19241 3933.9c10.15-14.999 19.285-29.998 27.405-44.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path943" d="m19283 3860c7.105-15.999 14.21-32.998 20.3-48.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path945" d="m19315 3780c5.075-16.999 9.135-33.998 13.195-50.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path947" d="m19333 3697c2.03-17.999 4.06-34.998 4.06-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path949" d="m19337 3612v-53.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path951" d="m19337 3526v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path953" d="m19337 3441v-52.997" fill="none" stroke="#3465af" stroke-width="28.432"
+style=""/><path id="path955" d="m19337 3355v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path957" d="m19337 3270v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path959" d="m19337 3184v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path961" d="m19337 3099v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path963" d="m19337 3014v-53.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path965" d="m19337 2928v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path967" d="m19337 2843v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path969" d="m19337 2757v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path971" d="m19337 2672v-52.997" fill="none"
+stroke="#3465af" stroke-width="28.432" style=""/><path id="path973" d="m19337 2586v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path975" d="m19337 2501v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path977" d="m19337 2415v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path979" d="m19337 2330v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path981" d="m19337 2245v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path983" d="m19337 2159.1v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path985" d="m19337 2074.1v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path987" d="m19337 1988.1v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path
+id="path989" d="m19337 1903.1v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path991" d="m19337 1817.1v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path993" d="m19337 1732.1v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path995" d="m19337 1647.1v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path997" d="m19337 1561.1v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path999" d="m19337 1476.1v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1001" d="m19337 1390.1v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1003" d="m19337 1305.1v-52.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1005" d="m19337
+1219.1c-1.015-16.999-3.045-34.998-5.075-51.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1007" d="m19326 1135.1c-4.06-16.999-8.12-34.998-14.21-50.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1009" d="m19301 1053.1c-6.09-15.999-13.195-32.998-21.315-48.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1011" d="m19264 976.12c-9.135-15.999-18.27-30.998-28.42-45.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1013" d="m19216 904.13c-10.15-13.999-21.315-27.998-33.495-41.997" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1015" d="m19161 838.13c-12.18-12.999-24.36-24.998-37.555-36.998" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1017" d="m19098 780.14c-14.21-11.999-28.42-21.999-42.63-32.998" fill="none"
+stroke="#3465af" stroke-width="28.432" style=""/><path id="path1019" d="m19028 729.14c-15.225-8.9995-30.45-17.999-46.69-26.998" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1021" d="m18951 688.14c-16.24-7.9995-32.48-13.999-49.735-19.999" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1023" d="m18870 657.14c-17.255-4.9997-34.51-8.9995-51.765-11.999" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1025" d="m18786 640.14c-18.27-2.9998-35.525-3.9998-53.795-3.9998" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1027" d="m18700 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1029" d="m18612 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1031" d="m18526 636.14h-53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path1033" d="m18439 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1035" d="m18353 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1037" d="m18266 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1039" d="m18179 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1041" d="m18093 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1043" d="m18005 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1045" d="m17919 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1047" d="m17832 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path
+id="path1049" d="m17746 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1051" d="m17659 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1053" d="m17572 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1055" d="m17486 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1057" d="m17399 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1059" d="m17312 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1061" d="m17225 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1063" d="m17139 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1065" d="m17052 636.14h-54.81"
+fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1067" d="m16965 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1069" d="m16879 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1071" d="m16792 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1073" d="m16705 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1075" d="m16618 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1077" d="m16532 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1079" d="m16445 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1081" d="m16358 636.14h-53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path1083" d="m16272 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1085" d="m16185 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1087" d="m16098 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1089" d="m16011 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1091" d="m15925 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1093" d="m15837 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1095" d="m15751 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1097" d="m15665 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path
+id="path1099" d="m15578 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1101" d="m15491 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1103" d="m15404 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1105" d="m15318 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1107" d="m15230 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1109" d="m15144 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1111" d="m15058 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1113" d="m14971 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1115" d="m14884 636.14h-53.795"
+fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1117" d="m14797 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1119" d="m14711 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1121" d="m14624 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1123" d="m14537 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1125" d="m14451 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1127" d="m14364 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1129" d="m14277 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1131" d="m14190 636.14h-53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path1133" d="m14104 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1135" d="m14017 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1137" d="m13930 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1139" d="m13844 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1141" d="m13757 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1143" d="m13670 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1145" d="m13583 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1147" d="m13497 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path
+id="path1149" d="m13410 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1151" d="m13323 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1153" d="m13237 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1155" d="m13150 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1157" d="m13063 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1159" d="m12976 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1161" d="m12890 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1163" d="m12803 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1165" d="m12716 636.14h-53.795"
+fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1167" d="m12630 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1169" d="m12543 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1171" d="m12456 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1173" d="m12369 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1175" d="m12283 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1177" d="m12196 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1179" d="m12109 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1181" d="m12022 636.14h-53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path1183" d="m11936 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1185" d="m11850 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1187" d="m11762 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1189" d="m11676 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1191" d="m11589 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1193" d="m11502 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1195" d="m11415 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1197" d="m11329 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path
+id="path1199" d="m11243 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1201" d="m11155 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1203" d="m11069 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1205" d="m10982 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1207" d="m10895 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1209" d="m10808 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1211" d="m10722 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1213" d="m10636 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1215" d="m10548 636.14h-53.795"
+fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1217" d="m10462 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1219" d="m10375 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1221" d="m10288 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1223" d="m10201 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1225" d="m10115 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1227" d="m10029 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1229" d="m9941.3 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1231" d="m9855 636.14h-53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path1233" d="m9767.7 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1235" d="m9681.5 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1237" d="m9594.2 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1239" d="m9507.9 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1241" d="m9421.6 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1243" d="m9334.3 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1245" d="m9248.1 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1247" d="m9160.8 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432"
+style=""/><path id="path1249" d="m9074.5 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1251" d="m8987.2 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1253" d="m8900.9 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1255" d="m8814.7 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1257" d="m8727.4 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1259" d="m8641.1 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1261" d="m8553.8 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1263" d="m8467.5 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1265"
+d="m8380.2 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1267" d="m8294 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1269" d="m8207.7 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1271" d="m8120.4 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1273" d="m8034.1 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1275" d="m7946.8 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1277" d="m7860.6 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1279" d="m7773.3 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1281" d="m7687 636.14h-53.795" fill="none"
+stroke="#3465af" stroke-width="28.432" style=""/><path id="path1283" d="m7599.7 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1285" d="m7513.4 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1287" d="m7427.2 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1289" d="m7339.9 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1291" d="m7253.6 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1293" d="m7166.3 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1295" d="m7080 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1297" d="m6992.7 636.14h-53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path1299" d="m6906.5 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1301" d="m6820.2 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1303" d="m6732.9 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1305" d="m6646.6 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1307" d="m6559.3 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1309" d="m6473.1 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1311" d="m6385.8 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1313" d="m6299.5 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432"
+style=""/><path id="path1315" d="m6213.2 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1317" d="m6125.9 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1319" d="m6039.6 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1321" d="m5952.4 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1323" d="m5866.1 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1325" d="m5778.8 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1327" d="m5692.5 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1329" d="m5606.2 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1331"
+d="m5519 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1333" d="m5432.7 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1335" d="m5345.4 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1337" d="m5259.1 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1339" d="m5171.8 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1341" d="m5085.5 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1343" d="m4999.3 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1345" d="m4912 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1347" d="m4825.7 636.14h-53.795" fill="none"
+stroke="#3465af" stroke-width="28.432" style=""/><path id="path1349" d="m4738.4 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1351" d="m4652.1 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1353" d="m4564.9 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1355" d="m4478.6 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1357" d="m4392.3 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1359" d="m4305 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1361" d="m4218.7 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1363" d="m4131.4 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432"
+style=""/><path id="path1365" d="m4045.2 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1367" d="m3957.9 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1369" d="m3871.6 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1371" d="m3785.3 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1373" d="m3698 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1375" d="m3611.8 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1377" d="m3524.5 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1379" d="m3438.2 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1381"
+d="m3350.9 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1383" d="m3264.6 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1385" d="m3177.3 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1387" d="m3091.1 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1389" d="m3004.8 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1391" d="m2917.5 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1393" d="m2831.2 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1395" d="m2743.9 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1397" d="m2657.7 636.14h-53.795"
+fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1399" d="m2570.4 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1401" d="m2484.1 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1403" d="m2397.8 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1405" d="m2310.5 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1407" d="m2224.3 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1409" d="m2137 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1411" d="m2050.7 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1413" d="m1963.4 636.14h-53.795" fill="none" stroke="#3465af"
+stroke-width="28.432" style=""/><path id="path1415" d="m1877.1 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1417" d="m1790.9 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1419" d="m1703.6 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1421" d="m1617.3 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1423" d="m1530 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1425" d="m1443.7 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1427" d="m1356.4 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1429" d="m1270.2 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path
+id="path1431" d="m1183.9 636.14h-54.81" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1433" d="m1096.6 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1435" d="m1010.3 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><path id="path1437" d="m923.03 636.14h-53.795" fill="none" stroke="#3465af" stroke-width="28.432" style=""/><g id="g4044" style=""><rect id="rect1441" x="21109" y="4753.1" width="1213.6" height="1100.7" fill="#f3e777" style=""/><path id="path1443" d="m20656 5536.4v-405.46l150.7-169.16c82.886-93.039 170.53-186.62 194.77-207.96l44.069-38.798 783.23-0.086 783.23-0.086v1227h-1956v-405.46zm1027.7 136.98v-78.372l-169.91 4.925-169.91 4.9249-5.09 45.854c-8.249 74.303 46.711 101.04 207.69 101.04h137.21v-78.372zm235.86-262.94 4.495-341.31 207.2-8.6408 207.2-8.6408
+5.144-46.443c9.596-86.615-41.863-102.05-322.02-96.607l-246.71 4.7956-4.438 419.08-4.439 419.08h149.08l4.494-341.31zm391.3 313.72c26.41-19.286 36.255-41.399 32.697-73.447l-5.09-45.854h-348.1l-5.38 48.984c-9.97 90.771 0.993 97.91 150.36 97.91 99.305 0 148.27-7.6982 175.52-27.594zm-627.16-274.84v-77.768h-348.1v66.246c0 36.436 4.973 71.431 11.051 77.768 6.078 6.3366 84.401 11.521 174.05 11.521h163v-77.768zm659.89-4.9154 5.125-74.042-179.18 4.9155-179.18 4.9155-5.38 48.984c-10.473 95.348-2.259 99.57 183.28 94.197l170.2-4.9284 5.125-74.042zm-659.89-237.63v-78.372l-169.91 4.925-169.91 4.925-5.097 73.447-5.097 73.447h350v-78.372zm659.86 4.925-5.097-73.447h-348.1l-5.38 48.984c-10.289 93.673-2.146 97.91 188.15 97.91h175.52l-5.097-73.447zm-659.86-228.98v-77.768h-137.21c-97.358 0-147.91 7.8138-174.05 26.902-34.952 25.523-49.645 92.242-25.79 117.11 6.078 6.3366 84.401 11.521 174.05
+11.521h163v-77.768z" fill="#ca4677" style=""/></g><text id="text1489" class="TextShape" transform="scale(1.1036 .90616)" x="171.41566" y="9913.7109" fill-rule="evenodd" font-family="Serif, serif" font-size="493.87px" stroke-linejoin="round" stroke-width="28.222"><tspan id="tspan1491" class="TextParagraph" font-family="Serif, serif" font-size="493.87px"/></text>
+<g id="g4048" style=""><rect id="rect1447" x="18797" y="13737" width="2320.7" height="2342.4" fill="#6076b3" style=""/><rect id="rect1451" x="18532" y="13817" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><rect id="rect1453" x="18532" y="14076" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><rect id="rect1455" x="18532" y="14334" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><rect id="rect1457" x="18532" y="14593" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><rect id="rect1459" x="18532" y="14851" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round"
+stroke-width="28.222" style=""/><rect id="rect1461" x="18532" y="15110" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><rect id="rect1463" x="18532" y="15368" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><rect id="rect1465" x="18532" y="15626" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><rect id="rect1467" x="18532" y="15884" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><rect id="rect1469" x="21080" y="13783" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><rect id="rect1471" x="21080" y="14041" width="302.7"
+height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><rect id="rect1473" x="21080" y="14299" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><rect id="rect1475" x="21080" y="14558" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><rect id="rect1477" x="21080" y="14816" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><rect id="rect1479" x="21080" y="15075" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><rect id="rect1481" x="21080" y="15333" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round"
+stroke-width="28.222" style=""/><rect id="rect1483" x="21080" y="15592" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><rect id="rect1485" x="21080" y="15850" width="302.7" height="137.79" fill="#e0ee2c" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><text id="text1493" transform="scale(1.1036 .90616)" x="17205.688" y="16777.641" fill="#000000" fill-rule="evenodd" font-family="Sans" font-size="856.96px" letter-spacing="0px" stroke-linejoin="round" stroke-width="28.222" word-spacing="0px" style="line-height:125%" line-height="125%" xml:space="preserve"><tspan id="tspan1495" x="17205.688" y="16777.641" style="">CPU</tspan></text>
+</g><text id="text1499" class="TextShape" x="-11700.553" y="565.61298" fill-rule="evenodd" font-family="Serif, serif" font-size="493.88px" stroke-linejoin="round" stroke-width="28.222"><tspan id="tspan1501" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan1503" class="TextPosition" transform="matrix(0,-1,1,0,8509,40173)" x="12640.447" y="16397.613" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan1505" fill="#000000" font-family="Serif, serif" font-size="493.88px">PCI, USB, SPI, I2C, ...</tspan></tspan></tspan></text>
+<path id="path1511" d="m12408 15562h-1115.1v-1420.3h2230.2v1420.3h-1115.1z" fill="#cfe7f5" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" style=""/><path id="path1513" d="m12408 15562h-1115.1v-1420.3h2230.2v1420.3h-1115.1z" fill="none" stroke="#3465af" stroke-linejoin="round" stroke-width="19.847" style=""/><text id="text1515" class="TextShape" x="-1394.0863" y="590.73016" fill-rule="evenodd" font-family="Serif, serif" font-size="493.88px" stroke-linejoin="round" stroke-width="28.222"><tspan id="tspan1517" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan1519" class="TextPosition" x="11487.915" y="14672.743" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan1521" fill="#000000" font-family="Serif, serif" font-size="493.88px">Bridge</tspan></tspan></tspan></text>
+<text id="text1523" class="TextShape" x="-1450.5308" y="1324.5078" fill-rule="evenodd" font-family="Serif, serif" font-size="493.88px" stroke-linejoin="round" stroke-width="28.222"><tspan id="tspan1525" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan1527" class="TextPosition" x="11431.471" y="15406.52" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan1529" fill="#000000" font-family="Serif, serif" font-size="493.88px"> DMA</tspan></tspan></tspan></text>
+</svg>
diff --git a/Documentation/media/uapi/cec/cec-api.rst b/Documentation/media/uapi/cec/cec-api.rst
index bb018709970c..b68ca9c1d2e0 100644
--- a/Documentation/media/uapi/cec/cec-api.rst
+++ b/Documentation/media/uapi/cec/cec-api.rst
@@ -10,7 +10,10 @@ Part V - Consumer Electronics Control API
This part describes the CEC: Consumer Electronics Control
-.. class:: toc-title
+
+.. only:: html
+
+ .. class:: toc-title
Table of Contents
diff --git a/Documentation/media/uapi/cec/cec-func-close.rst b/Documentation/media/uapi/cec/cec-func-close.rst
index 895d9c2d1c04..334358dfa72e 100644
--- a/Documentation/media/uapi/cec/cec-func-close.rst
+++ b/Documentation/media/uapi/cec/cec-func-close.rst
@@ -40,7 +40,7 @@ freed. The device configuration remain unchanged.
Return Value
============
-:c:func:`close()` returns 0 on success. On error, -1 is returned, and
+:c:func:`close() <cec-close>` returns 0 on success. On error, -1 is returned, and
``errno`` is set appropriately. Possible error codes are:
``EBADF``
diff --git a/Documentation/media/uapi/cec/cec-func-ioctl.rst b/Documentation/media/uapi/cec/cec-func-ioctl.rst
index 22fb6304a2df..e2b6260b0086 100644
--- a/Documentation/media/uapi/cec/cec-func-ioctl.rst
+++ b/Documentation/media/uapi/cec/cec-func-ioctl.rst
@@ -39,7 +39,7 @@ Arguments
Description
===========
-The :c:func:`ioctl()` function manipulates cec device parameters. The
+The :c:func:`ioctl() <cec-ioctl>` function manipulates cec device parameters. The
argument ``fd`` must be an open file descriptor.
The ioctl ``request`` code specifies the cec function to be called. It
diff --git a/Documentation/media/uapi/cec/cec-func-open.rst b/Documentation/media/uapi/cec/cec-func-open.rst
index 18dfb62f2efe..5d6663a649bd 100644
--- a/Documentation/media/uapi/cec/cec-func-open.rst
+++ b/Documentation/media/uapi/cec/cec-func-open.rst
@@ -46,7 +46,7 @@ Arguments
Description
===========
-To open a cec device applications call :c:func:`open()` with the
+To open a cec device applications call :c:func:`open() <cec-open>` with the
desired device name. The function has no side effects; the device
configuration remain unchanged.
@@ -58,7 +58,7 @@ EBADF.
Return Value
============
-:c:func:`open()` returns the new file descriptor on success. On error,
+:c:func:`open() <cec-open>` returns the new file descriptor on success. On error,
-1 is returned, and ``errno`` is set appropriately. Possible error codes
include:
diff --git a/Documentation/media/uapi/cec/cec-func-poll.rst b/Documentation/media/uapi/cec/cec-func-poll.rst
index fa0abd8fb160..d49f1ee0742d 100644
--- a/Documentation/media/uapi/cec/cec-func-poll.rst
+++ b/Documentation/media/uapi/cec/cec-func-poll.rst
@@ -39,10 +39,10 @@ Arguments
Description
===========
-With the :c:func:`poll()` function applications can wait for CEC
+With the :c:func:`poll() <cec-poll>` function applications can wait for CEC
events.
-On success :c:func:`poll()` returns the number of file descriptors
+On success :c:func:`poll() <cec-poll>` returns the number of file descriptors
that have been selected (that is, file descriptors for which the
``revents`` field of the respective struct :c:type:`pollfd`
is non-zero). CEC devices set the ``POLLIN`` and ``POLLRDNORM`` flags in
@@ -53,13 +53,13 @@ then the ``POLLPRI`` flag is set. When the function times out it returns
a value of zero, on failure it returns -1 and the ``errno`` variable is
set appropriately.
-For more details see the :c:func:`poll()` manual page.
+For more details see the :c:func:`poll() <cec-poll>` manual page.
Return Value
============
-On success, :c:func:`poll()` returns the number structures which have
+On success, :c:func:`poll() <cec-poll>` returns the number structures which have
non-zero ``revents`` fields, or zero if the call timed out. On error -1
is returned, and the ``errno`` variable is set appropriately:
diff --git a/Documentation/media/uapi/cec/cec-funcs.rst b/Documentation/media/uapi/cec/cec-funcs.rst
index 5b7630f2e076..6d696cead5cb 100644
--- a/Documentation/media/uapi/cec/cec-funcs.rst
+++ b/Documentation/media/uapi/cec/cec-funcs.rst
@@ -7,7 +7,6 @@ Function Reference
.. toctree::
:maxdepth: 1
- :numbered:
cec-func-open
cec-func-close
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
index 6d7bf7bef3eb..6c1f6efb822e 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
@@ -21,7 +21,7 @@ Arguments
=========
``fd``
- File descriptor returned by :ref:`open() <cec-func-open>`.
+ File descriptor returned by :c:func:`open() <cec-open>`.
``argp``
@@ -121,6 +121,13 @@ returns the information to the application. The ioctl never fails.
high. This makes it impossible to use CEC to wake up displays that
set the HPD pin low when in standby mode, but keep the CEC bus
alive.
+ * .. _`CEC-CAP-MONITOR-PIN`:
+
+ - ``CEC_CAP_MONITOR_PIN``
+ - 0x00000080
+ - The CEC hardware can monitor CEC pin changes from low to high voltage
+ and vice versa. When in pin monitoring mode the application will
+ receive ``CEC_EVENT_PIN_CEC_LOW`` and ``CEC_EVENT_PIN_CEC_HIGH`` events.
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
index fcf863ab6f43..84f431a022ad 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
@@ -48,7 +48,9 @@ can only be called by a file descriptor in initiator mode (see :ref:`CEC_S_MODE`
the ``EBUSY`` error code will be returned.
To clear existing logical addresses set ``num_log_addrs`` to 0. All other fields
-will be ignored in that case. The adapter will go to the unconfigured state.
+will be ignored in that case. The adapter will go to the unconfigured state and the
+``cec_version``, ``vendor_id`` and ``osd_name`` fields are all reset to their default
+values (CEC version 2.0, no vendor ID and an empty OSD name).
If the physical address is valid (see :ref:`ioctl CEC_ADAP_S_PHYS_ADDR <CEC_ADAP_S_PHYS_ADDR>`),
then this ioctl will block until all requested logical
@@ -63,7 +65,7 @@ logical address types are already defined will return with error ``EBUSY``.
.. c:type:: cec_log_addrs
-.. tabularcolumns:: |p{1.0cm}|p{7.5cm}|p{8.0cm}|
+.. tabularcolumns:: |p{1.0cm}|p{8.0cm}|p{7.5cm}|
.. cssclass:: longtable
@@ -146,6 +148,9 @@ logical address types are already defined will return with error ``EBUSY``.
give the CEC framework more information about the device type, even
though the framework won't use it directly in the CEC message.
+
+.. tabularcolumns:: |p{7.8cm}|p{1.0cm}|p{8.7cm}|
+
.. _cec-log-addrs-flags:
.. flat-table:: Flags for struct cec_log_addrs
@@ -173,7 +178,7 @@ logical address types are already defined will return with error ``EBUSY``.
to avoid trivial snooping of the keystrokes.
* .. _`CEC-LOG-ADDRS-FL-CDC-ONLY`:
- - `CEC_LOG_ADDRS_FL_CDC_ONLY`
+ - ``CEC_LOG_ADDRS_FL_CDC_ONLY``
- 4
- If this flag is set, then the device is CDC-Only. CDC-Only CEC devices
are CEC devices that can only handle CDC messages.
@@ -181,7 +186,7 @@ logical address types are already defined will return with error ``EBUSY``.
All other messages are ignored.
-.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+.. tabularcolumns:: |p{7.8cm}|p{1.0cm}|p{8.7cm}|
.. _cec-versions:
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
index 4d3570c2e0b3..a5c821809cc6 100644
--- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
@@ -22,7 +22,7 @@ Arguments
=========
``fd``
- File descriptor returned by :ref:`open() <cec-func-open>`.
+ File descriptor returned by :c:func:`open() <cec-open>`.
``argp``
@@ -87,7 +87,7 @@ it is guaranteed that the state did change in between the two events.
this is more than enough.
-.. tabularcolumns:: |p{1.0cm}|p{4.2cm}|p{2.5cm}|p{8.8cm}|
+.. tabularcolumns:: |p{1.0cm}|p{4.4cm}|p{2.5cm}|p{9.6cm}|
.. c:type:: cec_event
@@ -98,10 +98,11 @@ it is guaranteed that the state did change in between the two events.
* - __u64
- ``ts``
- - :cspan:`1` Timestamp of the event in ns.
+ - :cspan:`1`\ Timestamp of the event in ns.
- The timestamp has been taken from the ``CLOCK_MONOTONIC`` clock. To access
- the same clock from userspace use :c:func:`clock_gettime`.
+ The timestamp has been taken from the ``CLOCK_MONOTONIC`` clock.
+
+ To access the same clock from userspace use :c:func:`clock_gettime`.
* - __u32
- ``event``
- :cspan:`1` The CEC event type, see :ref:`cec-events`.
@@ -146,6 +147,20 @@ it is guaranteed that the state did change in between the two events.
- 2
- Generated if one or more CEC messages were lost because the
application didn't dequeue CEC messages fast enough.
+ * .. _`CEC-EVENT-PIN-CEC-LOW`:
+
+ - ``CEC_EVENT_PIN_CEC_LOW``
+ - 3
+ - Generated if the CEC pin goes from a high voltage to a low voltage.
+ Only applies to adapters that have the ``CEC_CAP_MONITOR_PIN``
+ capability set.
+ * .. _`CEC-EVENT-PIN-CEC-HIGH`:
+
+ - ``CEC_EVENT_PIN_CEC_HIGH``
+ - 4
+ - Generated if the CEC pin goes from a low voltage to a high voltage.
+ Only applies to adapters that have the ``CEC_CAP_MONITOR_PIN``
+ capability set.
.. tabularcolumns:: |p{6.0cm}|p{0.6cm}|p{10.9cm}|
@@ -165,6 +180,12 @@ it is guaranteed that the state did change in between the two events.
opened. See the table above for which events do this. This allows
applications to learn the initial state of the CEC adapter at
open() time.
+ * .. _`CEC-EVENT-FL-DROPPED-EVENTS`:
+
+ - ``CEC_EVENT_FL_DROPPED_EVENTS``
+ - 2
+ - Set if one or more events of the given event type have been dropped.
+ This is an indication that the application cannot keep up.
diff --git a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
index 664f0d47bbcd..508e2e325683 100644
--- a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
@@ -108,6 +108,8 @@ Available follower modes are:
.. _cec-mode-follower_e:
+.. cssclass:: longtable
+
.. flat-table:: Follower Modes
:header-rows: 0
:stub-columns: 0
@@ -149,13 +151,28 @@ Available follower modes are:
code. You cannot become a follower if :ref:`CEC_CAP_TRANSMIT <CEC-CAP-TRANSMIT>`
is not set or if :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>` was specified,
the ``EINVAL`` error code is returned in that case.
+ * .. _`CEC-MODE-MONITOR-PIN`:
+
+ - ``CEC_MODE_MONITOR_PIN``
+ - 0xd0
+ - Put the file descriptor into pin monitoring mode. Can only be used in
+ combination with :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>`,
+ otherwise the ``EINVAL`` error code will be returned.
+ This mode requires that the :ref:`CEC_CAP_MONITOR_PIN <CEC-CAP-MONITOR-PIN>`
+ capability is set, otherwise the ``EINVAL`` error code is returned.
+ While in pin monitoring mode this file descriptor can receive the
+ ``CEC_EVENT_PIN_CEC_LOW`` and ``CEC_EVENT_PIN_CEC_HIGH`` events to see the
+ low-level CEC pin transitions. This is very useful for debugging.
+ This mode is only allowed if the process has the ``CAP_NET_ADMIN``
+ capability. If that is not set, then the ``EPERM`` error code is returned.
* .. _`CEC-MODE-MONITOR`:
- ``CEC_MODE_MONITOR``
- 0xe0
- Put the file descriptor into monitor mode. Can only be used in
- combination with :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>`, otherwise EINVAL error
- code will be returned. In monitor mode all messages this CEC
+ combination with :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>`,i
+ otherwise the ``EINVAL`` error code will be returned.
+ In monitor mode all messages this CEC
device transmits and all messages it receives (both broadcast
messages and directed messages for one its logical addresses) will
be reported. This is very useful for debugging. This is only
@@ -191,55 +208,68 @@ Core message processing details:
* .. _`CEC-MSG-GET-CEC-VERSION`:
- ``CEC_MSG_GET_CEC_VERSION``
- - When in passthrough mode this message has to be handled by
- userspace, otherwise the core will return the CEC version that was
- set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`.
+ - The core will return the CEC version that was set with
+ :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`,
+ except when in passthrough mode. In passthrough mode the core
+ does nothing and this message has to be handled by a follower
+ instead.
* .. _`CEC-MSG-GIVE-DEVICE-VENDOR-ID`:
- ``CEC_MSG_GIVE_DEVICE_VENDOR_ID``
- - When in passthrough mode this message has to be handled by
- userspace, otherwise the core will return the vendor ID that was
- set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`.
+ - The core will return the vendor ID that was set with
+ :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`,
+ except when in passthrough mode. In passthrough mode the core
+ does nothing and this message has to be handled by a follower
+ instead.
* .. _`CEC-MSG-ABORT`:
- ``CEC_MSG_ABORT``
- - When in passthrough mode this message has to be handled by
- userspace, otherwise the core will return a feature refused
- message as per the specification.
+ - The core will return a Feature Abort message with reason
+ 'Feature Refused' as per the specification, except when in
+ passthrough mode. In passthrough mode the core does nothing
+ and this message has to be handled by a follower instead.
* .. _`CEC-MSG-GIVE-PHYSICAL-ADDR`:
- ``CEC_MSG_GIVE_PHYSICAL_ADDR``
- - When in passthrough mode this message has to be handled by
- userspace, otherwise the core will report the current physical
- address.
+ - The core will report the current physical address, except when
+ in passthrough mode. In passthrough mode the core does nothing
+ and this message has to be handled by a follower instead.
* .. _`CEC-MSG-GIVE-OSD-NAME`:
- ``CEC_MSG_GIVE_OSD_NAME``
- - When in passthrough mode this message has to be handled by
- userspace, otherwise the core will report the current OSD name as
- was set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`.
+ - The core will report the current OSD name that was set with
+ :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`,
+ except when in passthrough mode. In passthrough mode the core
+ does nothing and this message has to be handled by a follower
+ instead.
* .. _`CEC-MSG-GIVE-FEATURES`:
- ``CEC_MSG_GIVE_FEATURES``
- - When in passthrough mode this message has to be handled by
- userspace, otherwise the core will report the current features as
- was set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`
- or the message is ignored if the CEC version was older than 2.0.
+ - The core will do nothing if the CEC version is older than 2.0,
+ otherwise it will report the current features that were set with
+ :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`,
+ except when in passthrough mode. In passthrough mode the core
+ does nothing (for any CEC version) and this message has to be handled
+ by a follower instead.
* .. _`CEC-MSG-USER-CONTROL-PRESSED`:
- ``CEC_MSG_USER_CONTROL_PRESSED``
- - If :ref:`CEC_CAP_RC <CEC-CAP-RC>` is set, then generate a remote control key
- press. This message is always passed on to userspace.
+ - If :ref:`CEC_CAP_RC <CEC-CAP-RC>` is set and if
+ :ref:`CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU <CEC-LOG-ADDRS-FL-ALLOW-RC-PASSTHRU>`
+ is set, then generate a remote control key
+ press. This message is always passed on to the follower(s).
* .. _`CEC-MSG-USER-CONTROL-RELEASED`:
- ``CEC_MSG_USER_CONTROL_RELEASED``
- - If :ref:`CEC_CAP_RC <CEC-CAP-RC>` is set, then generate a remote control key
- release. This message is always passed on to userspace.
+ - If :ref:`CEC_CAP_RC <CEC-CAP-RC>` is set and if
+ :ref:`CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU <CEC-LOG-ADDRS-FL-ALLOW-RC-PASSTHRU>`
+ is set, then generate a remote control key
+ release. This message is always passed on to the follower(s).
* .. _`CEC-MSG-REPORT-PHYSICAL-ADDR`:
- ``CEC_MSG_REPORT_PHYSICAL_ADDR``
- The CEC framework will make note of the reported physical address
- and then just pass the message on to userspace.
+ and then just pass the message on to the follower(s).
diff --git a/Documentation/media/uapi/cec/cec-ioc-receive.rst b/Documentation/media/uapi/cec/cec-ioc-receive.rst
index 267044f7ac30..0f397c535a4c 100644
--- a/Documentation/media/uapi/cec/cec-ioc-receive.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-receive.rst
@@ -195,6 +195,8 @@ View On' messages from initiator 0xf ('Unregistered') to destination 0 ('TV').
valid if the :ref:`CEC_TX_STATUS_ERROR <CEC-TX-STATUS-ERROR>` status bit is set.
+.. tabularcolumns:: |p{6.2cm}|p{1.0cm}|p{10.3cm}|
+
.. _cec-msg-flags:
.. flat-table:: Flags for struct cec_msg
diff --git a/Documentation/media/uapi/dvb/audio-channel-select.rst b/Documentation/media/uapi/dvb/audio-channel-select.rst
index 2ceb4efebdf0..8cab3d7abff5 100644
--- a/Documentation/media/uapi/dvb/audio-channel-select.rst
+++ b/Documentation/media/uapi/dvb/audio-channel-select.rst
@@ -44,7 +44,7 @@ Arguments
Description
-----------
-This ioctl is for DVB devices only. To control a V4L2 decoder use the
+This ioctl is for Digital TV devices only. To control a V4L2 decoder use the
V4L2 ``V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK`` control instead.
This ioctl call asks the Audio Device to select the requested channel if
diff --git a/Documentation/media/uapi/dvb/audio-fclose.rst b/Documentation/media/uapi/dvb/audio-fclose.rst
index 4df24c8d74ed..58d351a3af4b 100644
--- a/Documentation/media/uapi/dvb/audio-fclose.rst
+++ b/Documentation/media/uapi/dvb/audio-fclose.rst
@@ -2,14 +2,14 @@
.. _audio_fclose:
-=================
-DVB audio close()
-=================
+========================
+Digital TV audio close()
+========================
Name
----
-DVB audio close()
+Digital TV audio close()
.. attention:: This ioctl is deprecated
diff --git a/Documentation/media/uapi/dvb/audio-fopen.rst b/Documentation/media/uapi/dvb/audio-fopen.rst
index a802c2e0dc6a..4a174640bf11 100644
--- a/Documentation/media/uapi/dvb/audio-fopen.rst
+++ b/Documentation/media/uapi/dvb/audio-fopen.rst
@@ -2,14 +2,14 @@
.. _audio_fopen:
-================
-DVB audio open()
-================
+=======================
+Digital TV audio open()
+=======================
Name
----
-DVB audio open()
+Digital TV audio open()
.. attention:: This ioctl is deprecated
diff --git a/Documentation/media/uapi/dvb/audio-fwrite.rst b/Documentation/media/uapi/dvb/audio-fwrite.rst
index 8882cad7d165..4980ae7953ef 100644
--- a/Documentation/media/uapi/dvb/audio-fwrite.rst
+++ b/Documentation/media/uapi/dvb/audio-fwrite.rst
@@ -2,14 +2,14 @@
.. _audio_fwrite:
-=================
-DVB audio write()
-=================
+=========================
+Digital TV audio write()
+=========================
Name
----
-DVB audio write()
+Digital TV audio write()
.. attention:: This ioctl is deprecated
diff --git a/Documentation/media/uapi/dvb/audio-set-av-sync.rst b/Documentation/media/uapi/dvb/audio-set-av-sync.rst
index 0cef4917d2cf..cf621f3a3037 100644
--- a/Documentation/media/uapi/dvb/audio-set-av-sync.rst
+++ b/Documentation/media/uapi/dvb/audio-set-av-sync.rst
@@ -38,7 +38,7 @@ Arguments
- boolean state
- - Tells the DVB subsystem if A/V synchronization shall be ON or OFF.
+ - Tells the Digital TV subsystem if A/V synchronization shall be ON or OFF.
TRUE: AV-sync ON
diff --git a/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst b/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst
index b063c496c2eb..f0db1fbdb066 100644
--- a/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst
+++ b/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst
@@ -38,7 +38,7 @@ Arguments
- boolean mode
- Enables or disables the decoding of the current Audio stream in
- the DVB subsystem.
+ the Digital TV subsystem.
TRUE: Bypass is disabled
@@ -50,8 +50,8 @@ Description
This ioctl call asks the Audio Device to bypass the Audio decoder and
forward the stream without decoding. This mode shall be used if streams
-that can’t be handled by the DVB system shall be decoded. Dolby
-DigitalTM streams are automatically forwarded by the DVB subsystem if
+that can’t be handled by the Digial TV system shall be decoded. Dolby
+DigitalTM streams are automatically forwarded by the Digital TV subsystem if
the hardware can handle it.
diff --git a/Documentation/media/uapi/dvb/audio-set-mute.rst b/Documentation/media/uapi/dvb/audio-set-mute.rst
index 897e7228f4d8..0af105a8ddcc 100644
--- a/Documentation/media/uapi/dvb/audio-set-mute.rst
+++ b/Documentation/media/uapi/dvb/audio-set-mute.rst
@@ -48,7 +48,7 @@ Arguments
Description
-----------
-This ioctl is for DVB devices only. To control a V4L2 decoder use the
+This ioctl is for Digital TV devices only. To control a V4L2 decoder use the
V4L2 :ref:`VIDIOC_DECODER_CMD` with the
``V4L2_DEC_CMD_START_MUTE_AUDIO`` flag instead.
diff --git a/Documentation/media/uapi/dvb/audio.rst b/Documentation/media/uapi/dvb/audio.rst
index 155622185ea4..e9f9e589c486 100644
--- a/Documentation/media/uapi/dvb/audio.rst
+++ b/Documentation/media/uapi/dvb/audio.rst
@@ -2,15 +2,16 @@
.. _dvb_audio:
-################
-DVB Audio Device
-################
-The DVB audio device controls the MPEG2 audio decoder of the DVB
-hardware. It can be accessed through ``/dev/dvb/adapter?/audio?``. Data
+#######################
+Digital TV Audio Device
+#######################
+
+The Digital TV audio device controls the MPEG2 audio decoder of the Digital
+TV hardware. It can be accessed through ``/dev/dvb/adapter?/audio?``. Data
types and and ioctl definitions can be accessed by including
``linux/dvb/audio.h`` in your application.
-Please note that some DVB cards don’t have their own MPEG decoder, which
+Please note that some Digital TV cards don’t have their own MPEG decoder, which
results in the omission of the audio and video device.
These ioctls were also used by V4L2 to control MPEG decoders implemented
diff --git a/Documentation/media/uapi/dvb/audio_h.rst b/Documentation/media/uapi/dvb/audio_h.rst
deleted file mode 100644
index e00c3010fdf9..000000000000
--- a/Documentation/media/uapi/dvb/audio_h.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _audio_h:
-
-*********************
-DVB Audio Header File
-*********************
-
-.. kernel-include:: $BUILDDIR/audio.h.rst
diff --git a/Documentation/media/uapi/dvb/ca-fclose.rst b/Documentation/media/uapi/dvb/ca-fclose.rst
index 5ecefa4abc3d..e84bbfcfa184 100644
--- a/Documentation/media/uapi/dvb/ca-fclose.rst
+++ b/Documentation/media/uapi/dvb/ca-fclose.rst
@@ -2,14 +2,14 @@
.. _ca_fclose:
-==============
-DVB CA close()
-==============
+=====================
+Digital TV CA close()
+=====================
Name
----
-DVB CA close()
+Digital TV CA close()
Synopsis
@@ -34,13 +34,10 @@ This system call closes a previously opened CA device.
Return Value
------------
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
+On success 0 is returned.
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
- - .. row 1
-
- - ``EBADF``
-
- - fd is not a valid open file descriptor.
+Generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/ca-fopen.rst b/Documentation/media/uapi/dvb/ca-fopen.rst
index 3d2819751446..056c71b53a70 100644
--- a/Documentation/media/uapi/dvb/ca-fopen.rst
+++ b/Documentation/media/uapi/dvb/ca-fopen.rst
@@ -2,14 +2,14 @@
.. _ca_fopen:
-=============
-DVB CA open()
-=============
+====================
+Digital TV CA open()
+====================
Name
----
-DVB CA open()
+Digital TV CA open()
Synopsis
@@ -23,25 +23,25 @@ Arguments
---------
``name``
- Name of specific DVB CA device.
+ Name of specific Digital TV CA device.
``flags``
A bit-wise OR of the following flags:
+.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
+
.. flat-table::
:header-rows: 0
:stub-columns: 0
+ :widths: 1 16
- -
- - O_RDONLY
+ - - ``O_RDONLY``
- read-only access
- -
- - O_RDWR
+ - - ``O_RDWR``
- read/write access
- -
- - O_NONBLOCK
+ - - ``O_NONBLOCK``
- open in non-blocking mode
(blocking mode is the default)
@@ -49,50 +49,29 @@ Arguments
Description
-----------
-This system call opens a named ca device (e.g. /dev/ost/ca) for
-subsequent use.
+This system call opens a named ca device (e.g. ``/dev/dvb/adapter?/ca?``)
+for subsequent use.
-When an open() call has succeeded, the device will be ready for use. The
+When an ``open()`` call has succeeded, the device will be ready for use. The
significance of blocking or non-blocking mode is described in the
documentation for functions where there is a difference. It does not
-affect the semantics of the open() call itself. A device opened in
+affect the semantics of the ``open()`` call itself. A device opened in
blocking mode can later be put into non-blocking mode (and vice versa)
-using the F_SETFL command of the fcntl system call. This is a standard
-system call, documented in the Linux manual page for fcntl. Only one
-user can open the CA Device in O_RDWR mode. All other attempts to open
-the device in this mode will fail, and an error code will be returned.
+using the ``F_SETFL`` command of the ``fcntl`` system call. This is a
+standard system call, documented in the Linux manual page for fcntl.
+Only one user can open the CA Device in ``O_RDWR`` mode. All other
+attempts to open the device in this mode will fail, and an error code
+will be returned.
Return Value
------------
-.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``ENODEV``
-
- - Device driver not loaded/available.
-
- - .. row 2
-
- - ``EINTERNAL``
-
- - Internal error.
-
- - .. row 3
-
- - ``EBUSY``
-
- - Device or resource busy.
- - .. row 4
+On success 0 is returned.
- - ``EINVAL``
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
- - Invalid argument.
+Generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/ca-get-cap.rst b/Documentation/media/uapi/dvb/ca-get-cap.rst
index fbf7e359cb8a..d2d5c1355396 100644
--- a/Documentation/media/uapi/dvb/ca-get-cap.rst
+++ b/Documentation/media/uapi/dvb/ca-get-cap.rst
@@ -28,43 +28,19 @@ Arguments
``caps``
Pointer to struct :c:type:`ca_caps`.
-.. c:type:: struct ca_caps
-
-.. flat-table:: struct ca_caps
- :header-rows: 1
- :stub-columns: 0
-
- -
- - type
- - name
- - description
- -
- - unsigned int
- - slot_num
- - total number of CA card and module slots
- -
- - unsigned int
- - slot_type
- - bitmask with all supported slot types
- -
- - unsigned int
- - descr_num
- - total number of descrambler slots (keys)
- -
- - unsigned int
- - descr_type
- - bit mask with all supported descr types
-
-
Description
-----------
-.. note:: This ioctl is undocumented. Documentation is welcome.
-
+Queries the Kernel for information about the available CA and descrambler
+slots, and their types.
Return Value
------------
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned and :c:type:`ca_caps` is filled.
+
+On error, -1 is returned and the ``errno`` variable is set
+appropriately.
+
+The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/ca-get-descr-info.rst b/Documentation/media/uapi/dvb/ca-get-descr-info.rst
index 7bf327a3d0e3..e564fbb8d524 100644
--- a/Documentation/media/uapi/dvb/ca-get-descr-info.rst
+++ b/Documentation/media/uapi/dvb/ca-get-descr-info.rst
@@ -27,37 +27,16 @@ Arguments
``desc``
Pointer to struct :c:type:`ca_descr_info`.
-.. c:type:: struct ca_descr_info
-
-.. flat-table:: struct ca_descr_info
- :header-rows: 1
- :stub-columns: 0
-
- -
- - type
- - name
- - description
-
- -
- - unsigned int
- - num
- - number of available descramblers (keys)
- -
- - unsigned int
- - type
- - type of supported scrambling system. Valid values are:
- ``CA_ECD``, ``CA_NDS`` and ``CA_DSS``.
-
-
Description
-----------
-.. note:: This ioctl is undocumented. Documentation is welcome.
-
+Returns information about all descrambler slots.
Return Value
------------
-On success 0 is returned, on error -1 and the ``errno`` variable is set
+On success 0 is returned, and :c:type:`ca_descr_info` is filled.
+
+On error -1 is returned, and the ``errno`` variable is set
appropriately. The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/ca-get-msg.rst b/Documentation/media/uapi/dvb/ca-get-msg.rst
index 121588da3ef1..ceeda623ce93 100644
--- a/Documentation/media/uapi/dvb/ca-get-msg.rst
+++ b/Documentation/media/uapi/dvb/ca-get-msg.rst
@@ -28,47 +28,25 @@ Arguments
``msg``
Pointer to struct :c:type:`ca_msg`.
+Description
+-----------
-.. c:type:: struct ca_msg
-
-.. flat-table:: struct ca_msg
- :header-rows: 1
- :stub-columns: 0
-
- -
- - type
- - name
- - description
- -
- - unsigned int
- - index
- -
-
- -
- - unsigned int
- - type
- -
+Receives a message via a CI CA module.
- -
- - unsigned int
- - length
- -
+.. note::
- -
- - unsigned char
- - msg[256]
- -
+ Please notice that, on most drivers, this is done by reading from
+ the /dev/adapter?/ca? device node.
-Description
------------
+Return Value
+------------
-.. note:: This ioctl is undocumented. Documentation is welcome.
+On success 0 is returned.
-Return Value
-------------
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/ca-get-slot-info.rst b/Documentation/media/uapi/dvb/ca-get-slot-info.rst
index 54e5dc78a2dc..1a1d6f0c71b9 100644
--- a/Documentation/media/uapi/dvb/ca-get-slot-info.rst
+++ b/Documentation/media/uapi/dvb/ca-get-slot-info.rst
@@ -26,100 +26,32 @@ Arguments
File descriptor returned by a previous call to :c:func:`open() <cec-open>`.
``info``
- Pointer to struct c:type:`ca_slot_info`.
+ Pointer to struct :c:type:`ca_slot_info`.
-.. _ca_slot_info_type:
+Description
+-----------
-.. flat-table:: ca_slot_info types
- :header-rows: 1
- :stub-columns: 0
+Returns information about a CA slot identified by
+:c:type:`ca_slot_info`.slot_num.
- -
- - type
- - name
- - description
- -
- - CA_CI
- - 1
- - CI high level interface
-
- -
- - CA_CI_LINK
- - 2
- - CI link layer level interface
-
- -
- - CA_CI_PHYS
- - 4
- - CI physical layer level interface
-
- -
- - CA_DESCR
- - 8
- - built-in descrambler
-
- -
- - CA_SC
- - 128
- - simple smart card interface
-
-.. _ca_slot_info_flag:
-
-.. flat-table:: ca_slot_info flags
- :header-rows: 1
- :stub-columns: 0
- -
- - type
- - name
- - description
+Return Value
+------------
- -
- - CA_CI_MODULE_PRESENT
- - 1
- - module (or card) inserted
+On success 0 is returned, and :c:type:`ca_slot_info` is filled.
- -
- - CA_CI_MODULE_READY
- - 2
- -
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
-.. c:type:: ca_slot_info
+.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
-.. flat-table:: struct ca_slot_info
- :header-rows: 1
+.. flat-table::
+ :header-rows: 0
:stub-columns: 0
+ :widths: 1 16
- -
- - type
- - name
- - description
-
- -
- - int
- - num
- - slot number
-
- -
- - int
- - type
- - CA interface this slot supports, as defined at :ref:`ca_slot_info_type`.
-
- -
- - unsigned int
- - flags
- - flags as defined at :ref:`ca_slot_info_flag`.
-
-
-Description
------------
-
-.. note:: This ioctl is undocumented. Documentation is welcome.
-
-
-Return Value
-------------
+ - - ``ENODEV``
+ - the slot is not available.
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/ca-reset.rst b/Documentation/media/uapi/dvb/ca-reset.rst
index 477313121a65..29788325f90e 100644
--- a/Documentation/media/uapi/dvb/ca-reset.rst
+++ b/Documentation/media/uapi/dvb/ca-reset.rst
@@ -28,12 +28,17 @@ Arguments
Description
-----------
-.. note:: This ioctl is undocumented. Documentation is welcome.
+Puts the Conditional Access hardware on its initial state. It should
+be called before start using the CA hardware.
Return Value
------------
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/ca-send-msg.rst b/Documentation/media/uapi/dvb/ca-send-msg.rst
index 532ef5f9d6ac..9e91287b7bbc 100644
--- a/Documentation/media/uapi/dvb/ca-send-msg.rst
+++ b/Documentation/media/uapi/dvb/ca-send-msg.rst
@@ -32,12 +32,20 @@ Arguments
Description
-----------
-.. note:: This ioctl is undocumented. Documentation is welcome.
+Sends a message via a CI CA module.
+.. note::
+
+ Please notice that, on most drivers, this is done by writing
+ to the /dev/adapter?/ca? device node.
Return Value
------------
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/ca-set-descr.rst b/Documentation/media/uapi/dvb/ca-set-descr.rst
index 70f7b3cf12ad..a6c47205ffd8 100644
--- a/Documentation/media/uapi/dvb/ca-set-descr.rst
+++ b/Documentation/media/uapi/dvb/ca-set-descr.rst
@@ -28,16 +28,19 @@ Arguments
``msg``
Pointer to struct :c:type:`ca_descr`.
-
Description
-----------
-.. note:: This ioctl is undocumented. Documentation is welcome.
-
+CA_SET_DESCR is used for feeding descrambler CA slots with descrambling
+keys (refered as control words).
Return Value
------------
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/ca-set-pid.rst b/Documentation/media/uapi/dvb/ca-set-pid.rst
deleted file mode 100644
index 891c1c72ef24..000000000000
--- a/Documentation/media/uapi/dvb/ca-set-pid.rst
+++ /dev/null
@@ -1,60 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _CA_SET_PID:
-
-==========
-CA_SET_PID
-==========
-
-Name
-----
-
-CA_SET_PID
-
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, CA_SET_PID, struct ca_pid *pid)
- :name: CA_SET_PID
-
-
-Arguments
----------
-
-``fd``
- File descriptor returned by a previous call to :c:func:`open() <dvb-ca-open>`.
-
-``pid``
- Pointer to struct :c:type:`ca_pid`.
-
-.. c:type:: ca_pid
-
-.. flat-table:: struct ca_pid
- :header-rows: 1
- :stub-columns: 0
-
- -
- - unsigned int
- - pid
- - Program ID
-
- -
- - int
- - index
- - PID index. Use -1 to disable.
-
-
-
-Description
------------
-
-.. note:: This ioctl is undocumented. Documentation is welcome.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/ca.rst b/Documentation/media/uapi/dvb/ca.rst
index 14b14abda1ae..deac72d89e93 100644
--- a/Documentation/media/uapi/dvb/ca.rst
+++ b/Documentation/media/uapi/dvb/ca.rst
@@ -2,14 +2,20 @@
.. _dvb_ca:
-#############
-DVB CA Device
-#############
-The DVB CA device controls the conditional access hardware. It can be
-accessed through ``/dev/dvb/adapter?/ca?``. Data types and and ioctl
+####################
+Digital TV CA Device
+####################
+
+The Digital TV CA device controls the conditional access hardware. It
+can be accessed through ``/dev/dvb/adapter?/ca?``. Data types and and ioctl
definitions can be accessed by including ``linux/dvb/ca.h`` in your
application.
+.. note::
+
+ There are three ioctls at this API that aren't documented:
+ :ref:`CA_GET_MSG`, :ref:`CA_SEND_MSG` and :ref:`CA_SET_DESCR`.
+ Documentation for them are welcome.
.. toctree::
:maxdepth: 1
diff --git a/Documentation/media/uapi/dvb/ca_data_types.rst b/Documentation/media/uapi/dvb/ca_data_types.rst
index d9e27c77426c..ac7cbd76ddd5 100644
--- a/Documentation/media/uapi/dvb/ca_data_types.rst
+++ b/Documentation/media/uapi/dvb/ca_data_types.rst
@@ -6,105 +6,4 @@
CA Data Types
*************
-
-.. c:type:: ca_slot_info
-
-ca_slot_info_t
-==============
-
-
-.. code-block:: c
-
- typedef struct ca_slot_info {
- int num; /* slot number */
-
- int type; /* CA interface this slot supports */
- #define CA_CI 1 /* CI high level interface */
- #define CA_CI_LINK 2 /* CI link layer level interface */
- #define CA_CI_PHYS 4 /* CI physical layer level interface */
- #define CA_DESCR 8 /* built-in descrambler */
- #define CA_SC 128 /* simple smart card interface */
-
- unsigned int flags;
- #define CA_CI_MODULE_PRESENT 1 /* module (or card) inserted */
- #define CA_CI_MODULE_READY 2
- } ca_slot_info_t;
-
-
-.. c:type:: ca_descr_info
-
-ca_descr_info_t
-===============
-
-
-.. code-block:: c
-
- typedef struct ca_descr_info {
- unsigned int num; /* number of available descramblers (keys) */
- unsigned int type; /* type of supported scrambling system */
- #define CA_ECD 1
- #define CA_NDS 2
- #define CA_DSS 4
- } ca_descr_info_t;
-
-
-.. c:type:: ca_caps
-
-ca_caps_t
-=========
-
-
-.. code-block:: c
-
- typedef struct ca_caps {
- unsigned int slot_num; /* total number of CA card and module slots */
- unsigned int slot_type; /* OR of all supported types */
- unsigned int descr_num; /* total number of descrambler slots (keys) */
- unsigned int descr_type;/* OR of all supported types */
- } ca_cap_t;
-
-
-.. c:type:: ca_msg
-
-ca_msg_t
-========
-
-
-.. code-block:: c
-
- /* a message to/from a CI-CAM */
- typedef struct ca_msg {
- unsigned int index;
- unsigned int type;
- unsigned int length;
- unsigned char msg[256];
- } ca_msg_t;
-
-
-.. c:type:: ca_descr
-
-ca_descr_t
-==========
-
-
-.. code-block:: c
-
- typedef struct ca_descr {
- unsigned int index;
- unsigned int parity;
- unsigned char cw[8];
- } ca_descr_t;
-
-
-.. c:type:: ca_pid
-
-ca-pid
-======
-
-
-.. code-block:: c
-
- typedef struct ca_pid {
- unsigned int pid;
- int index; /* -1 == disable*/
- } ca_pid_t;
+.. kernel-doc:: include/uapi/linux/dvb/ca.h
diff --git a/Documentation/media/uapi/dvb/ca_function_calls.rst b/Documentation/media/uapi/dvb/ca_function_calls.rst
index c085a0ebbc05..87d697851e82 100644
--- a/Documentation/media/uapi/dvb/ca_function_calls.rst
+++ b/Documentation/media/uapi/dvb/ca_function_calls.rst
@@ -18,4 +18,3 @@ CA Function Calls
ca-get-msg
ca-send-msg
ca-set-descr
- ca-set-pid
diff --git a/Documentation/media/uapi/dvb/ca_h.rst b/Documentation/media/uapi/dvb/ca_h.rst
deleted file mode 100644
index f513592ef529..000000000000
--- a/Documentation/media/uapi/dvb/ca_h.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _ca_h:
-
-**********************************
-DVB Conditional Access Header File
-**********************************
-
-.. kernel-include:: $BUILDDIR/ca.h.rst
diff --git a/Documentation/media/uapi/dvb/demux.rst b/Documentation/media/uapi/dvb/demux.rst
index b12b5a2dac94..45c3d6405c46 100644
--- a/Documentation/media/uapi/dvb/demux.rst
+++ b/Documentation/media/uapi/dvb/demux.rst
@@ -2,10 +2,15 @@
.. _dvb_demux:
-################
-DVB Demux Device
-################
-The DVB demux device controls the filters of the DVB hardware/software.
+#######################
+Digital TV Demux Device
+#######################
+
+The Digital TV demux device controls the MPEG-TS filters for the
+digital TV. If the driver and hardware supports, those filters are
+implemented at the hardware. Otherwise, the Kernel provides a software
+emulation.
+
It can be accessed through ``/dev/adapter?/demux?``. Data types and and
ioctl definitions can be accessed by including ``linux/dvb/dmx.h`` in
your application.
diff --git a/Documentation/media/uapi/dvb/dmx-add-pid.rst b/Documentation/media/uapi/dvb/dmx-add-pid.rst
index 689cd1fc9142..4d5632dfb43e 100644
--- a/Documentation/media/uapi/dvb/dmx-add-pid.rst
+++ b/Documentation/media/uapi/dvb/dmx-add-pid.rst
@@ -33,13 +33,17 @@ Description
-----------
This ioctl call allows to add multiple PIDs to a transport stream filter
-previously set up with DMX_SET_PES_FILTER and output equal to
-DMX_OUT_TSDEMUX_TAP.
+previously set up with :ref:`DMX_SET_PES_FILTER` and output equal to
+:c:type:`DMX_OUT_TSDEMUX_TAP <dmx_output>`.
Return Value
------------
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/dmx-fclose.rst b/Documentation/media/uapi/dvb/dmx-fclose.rst
index ca93c23cde6d..578e929f4bde 100644
--- a/Documentation/media/uapi/dvb/dmx-fclose.rst
+++ b/Documentation/media/uapi/dvb/dmx-fclose.rst
@@ -2,14 +2,14 @@
.. _dmx_fclose:
-=================
-DVB demux close()
-=================
+========================
+Digital TV demux close()
+========================
Name
----
-DVB demux close()
+Digital TV demux close()
Synopsis
@@ -23,25 +23,23 @@ Arguments
---------
``fd``
- File descriptor returned by a previous call to :c:func:`open() <dvb-ca-open>`.
+ File descriptor returned by a previous call to
+ :c:func:`open() <dvb-dmx-open>`.
Description
-----------
This system call deactivates and deallocates a filter that was
-previously allocated via the open() call.
+previously allocated via the :c:func:`open() <dvb-dmx-open>` call.
Return Value
------------
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
+On success 0 is returned.
+On error, -1 is returned and the ``errno`` variable is set
+appropriately.
- - .. row 1
-
- - ``EBADF``
-
- - fd is not a valid open file descriptor.
+The generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/dmx-fopen.rst b/Documentation/media/uapi/dvb/dmx-fopen.rst
index a697e33c32ea..55628a18ba67 100644
--- a/Documentation/media/uapi/dvb/dmx-fopen.rst
+++ b/Documentation/media/uapi/dvb/dmx-fopen.rst
@@ -2,14 +2,14 @@
.. _dmx_fopen:
-================
-DVB demux open()
-================
+=======================
+Digital TV demux open()
+=======================
Name
----
-DVB demux open()
+Digital TV demux open()
Synopsis
@@ -22,25 +22,28 @@ Arguments
---------
``name``
- Name of specific DVB demux device.
+ Name of specific Digital TV demux device.
``flags``
A bit-wise OR of the following flags:
+.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
+
.. flat-table::
:header-rows: 0
:stub-columns: 0
+ :widths: 1 16
-
- - O_RDONLY
+ - ``O_RDONLY``
- read-only access
-
- - O_RDWR
+ - ``O_RDWR``
- read/write access
-
- - O_NONBLOCK
+ - ``O_NONBLOCK``
- open in non-blocking mode
(blocking mode is the default)
@@ -48,52 +51,41 @@ Arguments
Description
-----------
-This system call, used with a device name of /dev/dvb/adapter0/demux0,
+This system call, used with a device name of ``/dev/dvb/adapter?/demux?``,
allocates a new filter and returns a handle which can be used for
subsequent control of that filter. This call has to be made for each
filter to be used, i.e. every returned file descriptor is a reference to
-a single filter. /dev/dvb/adapter0/dvr0 is a logical device to be used
+a single filter. ``/dev/dvb/adapter?/dvr?`` is a logical device to be used
for retrieving Transport Streams for digital video recording. When
reading from this device a transport stream containing the packets from
all PES filters set in the corresponding demux device
-(/dev/dvb/adapter0/demux0) having the output set to DMX_OUT_TS_TAP. A
-recorded Transport Stream is replayed by writing to this device.
+(``/dev/dvb/adapter?/demux?``) having the output set to ``DMX_OUT_TS_TAP``.
+A recorded Transport Stream is replayed by writing to this device.
The significance of blocking or non-blocking mode is described in the
documentation for functions where there is a difference. It does not
-affect the semantics of the open() call itself. A device opened in
-blocking mode can later be put into non-blocking mode (and vice versa)
-using the F_SETFL command of the fcntl system call.
+affect the semantics of the ``open()`` call itself. A device opened
+in blocking mode can later be put into non-blocking mode (and vice versa)
+using the ``F_SETFL`` command of the fcntl system call.
Return Value
------------
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``ENODEV``
-
- - Device driver not loaded/available.
+On success 0 is returned.
- - .. row 2
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
- - ``EINVAL``
+.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
- - Invalid argument.
-
- - .. row 3
-
- - ``EMFILE``
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 16
+ - - ``EMFILE``
- “Too many open files”, i.e. no more filters available.
- - .. row 4
-
- - ``ENOMEM``
-
- - The driver failed to allocate enough memory.
+The generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/dmx-fread.rst b/Documentation/media/uapi/dvb/dmx-fread.rst
index e8c7f4db353f..488bdc4ba178 100644
--- a/Documentation/media/uapi/dvb/dmx-fread.rst
+++ b/Documentation/media/uapi/dvb/dmx-fread.rst
@@ -2,14 +2,14 @@
.. _dmx_fread:
-================
-DVB demux read()
-================
+=======================
+Digital TV demux read()
+=======================
Name
----
-DVB demux read()
+Digital TV demux read()
Synopsis
@@ -33,62 +33,48 @@ Arguments
Description
-----------
-This system call returns filtered data, which might be section or PES
-data. The filtered data is transferred from the driver’s internal
-circular buffer to buf. The maximum amount of data to be transferred is
-implied by count.
+This system call returns filtered data, which might be section or Packetized
+Elementary Stream (PES) data. The filtered data is transferred from
+the driver’s internal circular buffer to ``buf``. The maximum amount of data
+to be transferred is implied by count.
+
+.. note::
+
+ if a section filter created with
+ :c:type:`DMX_CHECK_CRC <dmx_sct_filter_params>` flag set,
+ data that fails on CRC check will be silently ignored.
+
Return Value
------------
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
.. flat-table::
:header-rows: 0
:stub-columns: 0
+ :widths: 1 16
+ - - ``EWOULDBLOCK``
+ - No data to return and ``O_NONBLOCK`` was specified.
- - .. row 1
-
- - ``EWOULDBLOCK``
-
- - No data to return and O_NONBLOCK was specified.
-
- - .. row 2
-
- - ``EBADF``
-
- - fd is not a valid open file descriptor.
-
- - .. row 3
-
- - ``ECRC``
-
- - Last section had a CRC error - no data returned. The buffer is
- flushed.
-
- - .. row 4
-
- - ``EOVERFLOW``
-
- -
-
- - .. row 5
-
- -
+ - - ``EOVERFLOW``
- The filtered data was not read from the buffer in due time,
resulting in non-read data being lost. The buffer is flushed.
- - .. row 6
-
- - ``ETIMEDOUT``
-
- - The section was not loaded within the stated timeout period. See
- ioctl DMX_SET_FILTER for how to set a timeout.
+ - - ``ETIMEDOUT``
+ - The section was not loaded within the stated timeout period.
+ See ioctl :ref:`DMX_SET_FILTER` for how to set a timeout.
- - .. row 7
+ - - ``EFAULT``
+ - The driver failed to write to the callers buffer due to an
+ invalid \*buf pointer.
- - ``EFAULT``
- - The driver failed to write to the callers buffer due to an invalid
- \*buf pointer.
+The generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/dmx-fwrite.rst b/Documentation/media/uapi/dvb/dmx-fwrite.rst
index 8a90dfe28307..519e5733e53b 100644
--- a/Documentation/media/uapi/dvb/dmx-fwrite.rst
+++ b/Documentation/media/uapi/dvb/dmx-fwrite.rst
@@ -2,14 +2,14 @@
.. _dmx_fwrite:
-=================
-DVB demux write()
-=================
+========================
+Digital TV demux write()
+========================
Name
----
-DVB demux write()
+Digital TV demux write()
Synopsis
@@ -34,42 +34,39 @@ Description
-----------
This system call is only provided by the logical device
-/dev/dvb/adapter0/dvr0, associated with the physical demux device that
+``/dev/dvb/adapter?/dvr?``, associated with the physical demux device that
provides the actual DVR functionality. It is used for replay of a
digitally recorded Transport Stream. Matching filters have to be defined
-in the corresponding physical demux device, /dev/dvb/adapter0/demux0.
+in the corresponding physical demux device, ``/dev/dvb/adapter?/demux?``.
The amount of data to be transferred is implied by count.
Return Value
------------
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
.. flat-table::
:header-rows: 0
:stub-columns: 0
+ :widths: 1 16
- - .. row 1
-
- - ``EWOULDBLOCK``
-
- - No data was written. This might happen if O_NONBLOCK was
+ - - ``EWOULDBLOCK``
+ - No data was written. This might happen if ``O_NONBLOCK`` was
specified and there is no more buffer space available (if
- O_NONBLOCK is not specified the function will block until buffer
+ ``O_NONBLOCK`` is not specified the function will block until buffer
space is available).
- - .. row 2
-
- - ``EBUSY``
-
+ - - ``EBUSY``
- This error code indicates that there are conflicting requests. The
corresponding demux device is setup to receive data from the
front- end. Make sure that these filters are stopped and that the
- filters with input set to DMX_IN_DVR are started.
-
- - .. row 3
-
- - ``EBADF``
+ filters with input set to ``DMX_IN_DVR`` are started.
- - fd is not a valid open file descriptor.
+The generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/dmx-get-caps.rst b/Documentation/media/uapi/dvb/dmx-get-caps.rst
deleted file mode 100644
index 145fb520d779..000000000000
--- a/Documentation/media/uapi/dvb/dmx-get-caps.rst
+++ /dev/null
@@ -1,41 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _DMX_GET_CAPS:
-
-============
-DMX_GET_CAPS
-============
-
-Name
-----
-
-DMX_GET_CAPS
-
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, DMX_GET_CAPS, struct dmx_caps *caps)
- :name: DMX_GET_CAPS
-
-Arguments
----------
-
-``fd``
- File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
-
-``caps``
- Pointer to struct :c:type:`dmx_caps`
-
-
-Description
------------
-
-.. note:: This ioctl is undocumented. Documentation is welcome.
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/dmx-get-event.rst b/Documentation/media/uapi/dvb/dmx-get-event.rst
deleted file mode 100644
index 8be626c29158..000000000000
--- a/Documentation/media/uapi/dvb/dmx-get-event.rst
+++ /dev/null
@@ -1,60 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _DMX_GET_EVENT:
-
-=============
-DMX_GET_EVENT
-=============
-
-Name
-----
-
-DMX_GET_EVENT
-
-
-Synopsis
---------
-
-.. c:function:: int ioctl( int fd, DMX_GET_EVENT, struct dmx_event *ev)
- :name: DMX_GET_EVENT
-
-
-Arguments
----------
-
-``fd``
- File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
-
-``ev``
- Pointer to the location where the event is to be stored.
-
-
-Description
------------
-
-This ioctl call returns an event if available. If an event is not
-available, the behavior depends on whether the device is in blocking or
-non-blocking mode. In the latter case, the call fails immediately with
-errno set to ``EWOULDBLOCK``. In the former case, the call blocks until an
-event becomes available.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
-
-
-.. flat-table::
- :header-rows: 0
- :stub-columns: 0
-
-
- - .. row 1
-
- - ``EWOULDBLOCK``
-
- - There is no event pending, and the device is in non-blocking mode.
diff --git a/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst b/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst
index b31634a1cca4..fbdbc12869d1 100644
--- a/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst
+++ b/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst
@@ -25,18 +25,40 @@ Arguments
File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
``pids``
- Undocumented.
+ Array used to store 5 Program IDs.
Description
-----------
-.. note:: This ioctl is undocumented. Documentation is welcome.
+This ioctl allows to query a DVB device to return the first PID used
+by audio, video, textext, subtitle and PCR programs on a given service.
+They're stored as:
+
+======================= ======== =======================================
+PID element position content
+======================= ======== =======================================
+pids[DMX_PES_AUDIO] 0 first audio PID
+pids[DMX_PES_VIDEO] 1 first video PID
+pids[DMX_PES_TELETEXT] 2 first teletext PID
+pids[DMX_PES_SUBTITLE] 3 first subtitle PID
+pids[DMX_PES_PCR] 4 first Program Clock Reference PID
+======================= ======== =======================================
+
+
+.. note::
+
+ A value equal to 0xffff means that the PID was not filled by the
+ Kernel.
Return Value
------------
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/dmx-get-stc.rst b/Documentation/media/uapi/dvb/dmx-get-stc.rst
index 9fc501e8128a..604031f7904b 100644
--- a/Documentation/media/uapi/dvb/dmx-get-stc.rst
+++ b/Documentation/media/uapi/dvb/dmx-get-stc.rst
@@ -25,34 +25,42 @@ Arguments
File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
``stc``
- Pointer to the location where the stc is to be stored.
+ Pointer to :c:type:`dmx_stc` where the stc data is to be stored.
Description
-----------
This ioctl call returns the current value of the system time counter
-(which is driven by a PES filter of type DMX_PES_PCR). Some hardware
-supports more than one STC, so you must specify which one by setting the
-num field of stc before the ioctl (range 0...n). The result is returned
-in form of a ratio with a 64 bit numerator and a 32 bit denominator, so
-the real 90kHz STC value is stc->stc / stc->base .
+(which is driven by a PES filter of type :c:type:`DMX_PES_PCR <dmx_ts_pes>`).
+Some hardware supports more than one STC, so you must specify which one by
+setting the :c:type:`num <dmx_stc>` field of stc before the ioctl (range 0...n).
+The result is returned in form of a ratio with a 64 bit numerator
+and a 32 bit denominator, so the real 90kHz STC value is
+``stc->stc / stc->base``.
Return Value
------------
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
.. flat-table::
:header-rows: 0
:stub-columns: 0
-
+ :widths: 1 16
- .. row 1
- ``EINVAL``
- Invalid stc number.
+
+
+The generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/dmx-remove-pid.rst b/Documentation/media/uapi/dvb/dmx-remove-pid.rst
index e411495c619c..456cc2ded2c0 100644
--- a/Documentation/media/uapi/dvb/dmx-remove-pid.rst
+++ b/Documentation/media/uapi/dvb/dmx-remove-pid.rst
@@ -34,13 +34,17 @@ Description
This ioctl call allows to remove a PID when multiple PIDs are set on a
transport stream filter, e. g. a filter previously set up with output
-equal to DMX_OUT_TSDEMUX_TAP, created via either
-DMX_SET_PES_FILTER or DMX_ADD_PID.
+equal to :c:type:`DMX_OUT_TSDEMUX_TAP <dmx_output>`, created via either
+:ref:`DMX_SET_PES_FILTER` or :ref:`DMX_ADD_PID`.
Return Value
------------
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst b/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst
index f2f7379f29ed..74fd076a9b90 100644
--- a/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst
+++ b/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst
@@ -33,13 +33,18 @@ Description
This ioctl call is used to set the size of the circular buffer used for
filtered data. The default size is two maximum sized sections, i.e. if
-this function is not called a buffer size of 2 \* 4096 bytes will be
+this function is not called a buffer size of ``2 * 4096`` bytes will be
used.
Return Value
------------
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/dmx-set-filter.rst b/Documentation/media/uapi/dvb/dmx-set-filter.rst
index 1d50c803d69a..88594b8d3846 100644
--- a/Documentation/media/uapi/dvb/dmx-set-filter.rst
+++ b/Documentation/media/uapi/dvb/dmx-set-filter.rst
@@ -40,13 +40,18 @@ state whether a section should be CRC-checked, whether the filter should
be a ”one-shot” filter, i.e. if the filtering operation should be
stopped after the first section is received, and whether the filtering
operation should be started immediately (without waiting for a
-DMX_START ioctl call). If a filter was previously set-up, this filter
-will be canceled, and the receive buffer will be flushed.
+:ref:`DMX_START` ioctl call). If a filter was previously set-up, this
+filter will be canceled, and the receive buffer will be flushed.
Return Value
------------
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst b/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst
index 145451d04f7d..d70e7bf96a41 100644
--- a/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst
+++ b/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst
@@ -42,15 +42,17 @@ capability is supported.
Return Value
------------
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
.. flat-table::
:header-rows: 0
:stub-columns: 0
+ :widths: 1 16
- .. row 1
@@ -61,3 +63,7 @@ appropriately. The generic error codes are described at the
There are active filters filtering data from another input source.
Make sure that these filters are stopped before starting this
filter.
+
+
+The generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/dmx-set-source.rst b/Documentation/media/uapi/dvb/dmx-set-source.rst
deleted file mode 100644
index ac7f77b25e06..000000000000
--- a/Documentation/media/uapi/dvb/dmx-set-source.rst
+++ /dev/null
@@ -1,44 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _DMX_SET_SOURCE:
-
-==============
-DMX_SET_SOURCE
-==============
-
-Name
-----
-
-DMX_SET_SOURCE
-
-
-Synopsis
---------
-
-.. c:function:: int ioctl(fd, DMX_SET_SOURCE, struct dmx_source *src)
- :name: DMX_SET_SOURCE
-
-
-Arguments
----------
-
-
-``fd``
- File descriptor returned by :c:func:`open() <dvb-dmx-open>`.
-
-``src``
- Undocumented.
-
-
-Description
------------
-
-.. note:: This ioctl is undocumented. Documentation is welcome.
-
-
-Return Value
-------------
-
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/dmx-start.rst b/Documentation/media/uapi/dvb/dmx-start.rst
index 641f3e017fb1..36700e775296 100644
--- a/Documentation/media/uapi/dvb/dmx-start.rst
+++ b/Documentation/media/uapi/dvb/dmx-start.rst
@@ -29,15 +29,16 @@ Description
-----------
This ioctl call is used to start the actual filtering operation defined
-via the ioctl calls DMX_SET_FILTER or DMX_SET_PES_FILTER.
+via the ioctl calls :ref:`DMX_SET_FILTER` or :ref:`DMX_SET_PES_FILTER`.
Return Value
------------
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
@@ -51,7 +52,7 @@ appropriately. The generic error codes are described at the
- ``EINVAL``
- Invalid argument, i.e. no filtering parameters provided via the
- DMX_SET_FILTER or DMX_SET_PES_FILTER functions.
+ :ref:`DMX_SET_FILTER` or :ref:`DMX_SET_PES_FILTER` ioctls.
- .. row 2
@@ -61,3 +62,7 @@ appropriately. The generic error codes are described at the
There are active filters filtering data from another input source.
Make sure that these filters are stopped before starting this
filter.
+
+
+The generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/dmx-stop.rst b/Documentation/media/uapi/dvb/dmx-stop.rst
index 569a3df44923..6d9c927bcd5f 100644
--- a/Documentation/media/uapi/dvb/dmx-stop.rst
+++ b/Documentation/media/uapi/dvb/dmx-stop.rst
@@ -29,13 +29,17 @@ Description
-----------
This ioctl call is used to stop the actual filtering operation defined
-via the ioctl calls DMX_SET_FILTER or DMX_SET_PES_FILTER and
-started via the DMX_START command.
+via the ioctl calls :ref:`DMX_SET_FILTER` or :ref:`DMX_SET_PES_FILTER` and
+started via the :ref:`DMX_START` command.
Return Value
------------
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/dmx_fcalls.rst b/Documentation/media/uapi/dvb/dmx_fcalls.rst
index 77a1554d9834..a17289143220 100644
--- a/Documentation/media/uapi/dvb/dmx_fcalls.rst
+++ b/Documentation/media/uapi/dvb/dmx_fcalls.rst
@@ -18,10 +18,7 @@ Demux Function Calls
dmx-set-filter
dmx-set-pes-filter
dmx-set-buffer-size
- dmx-get-event
dmx-get-stc
dmx-get-pes-pids
- dmx-get-caps
- dmx-set-source
dmx-add-pid
dmx-remove-pid
diff --git a/Documentation/media/uapi/dvb/dmx_h.rst b/Documentation/media/uapi/dvb/dmx_h.rst
deleted file mode 100644
index 4fd1704a0833..000000000000
--- a/Documentation/media/uapi/dvb/dmx_h.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _dmx_h:
-
-*********************
-DVB Demux Header File
-*********************
-
-.. kernel-include:: $BUILDDIR/dmx.h.rst
diff --git a/Documentation/media/uapi/dvb/dmx_types.rst b/Documentation/media/uapi/dvb/dmx_types.rst
index 80dd659860d7..2a023a4f516c 100644
--- a/Documentation/media/uapi/dvb/dmx_types.rst
+++ b/Documentation/media/uapi/dvb/dmx_types.rst
@@ -6,227 +6,4 @@
Demux Data Types
****************
-Output for the demux
-====================
-
-.. c:type:: dmx_output
-
-.. tabularcolumns:: |p{5.0cm}|p{12.5cm}|
-
-.. flat-table:: enum dmx_output
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _DMX-OUT-DECODER:
-
- DMX_OUT_DECODER
-
- - Streaming directly to decoder.
-
- - .. row 3
-
- - .. _DMX-OUT-TAP:
-
- DMX_OUT_TAP
-
- - Output going to a memory buffer (to be retrieved via the read
- command). Delivers the stream output to the demux device on which
- the ioctl is called.
-
- - .. row 4
-
- - .. _DMX-OUT-TS-TAP:
-
- DMX_OUT_TS_TAP
-
- - Output multiplexed into a new TS (to be retrieved by reading from
- the logical DVR device). Routes output to the logical DVR device
- ``/dev/dvb/adapter?/dvr?``, which delivers a TS multiplexed from
- all filters for which ``DMX_OUT_TS_TAP`` was specified.
-
- - .. row 5
-
- - .. _DMX-OUT-TSDEMUX-TAP:
-
- DMX_OUT_TSDEMUX_TAP
-
- - Like :ref:`DMX_OUT_TS_TAP <DMX-OUT-TS-TAP>` but retrieved
- from the DMX device.
-
-
-dmx_input_t
-===========
-
-.. c:type:: dmx_input
-
-.. code-block:: c
-
- typedef enum
- {
- DMX_IN_FRONTEND, /* Input from a front-end device. */
- DMX_IN_DVR /* Input from the logical DVR device. */
- } dmx_input_t;
-
-
-dmx_pes_type_t
-==============
-
-.. c:type:: dmx_pes_type
-
-
-.. code-block:: c
-
- typedef enum
- {
- DMX_PES_AUDIO0,
- DMX_PES_VIDEO0,
- DMX_PES_TELETEXT0,
- DMX_PES_SUBTITLE0,
- DMX_PES_PCR0,
-
- DMX_PES_AUDIO1,
- DMX_PES_VIDEO1,
- DMX_PES_TELETEXT1,
- DMX_PES_SUBTITLE1,
- DMX_PES_PCR1,
-
- DMX_PES_AUDIO2,
- DMX_PES_VIDEO2,
- DMX_PES_TELETEXT2,
- DMX_PES_SUBTITLE2,
- DMX_PES_PCR2,
-
- DMX_PES_AUDIO3,
- DMX_PES_VIDEO3,
- DMX_PES_TELETEXT3,
- DMX_PES_SUBTITLE3,
- DMX_PES_PCR3,
-
- DMX_PES_OTHER
- } dmx_pes_type_t;
-
-
-struct dmx_filter
-=================
-
-.. c:type:: dmx_filter
-
-.. code-block:: c
-
- typedef struct dmx_filter
- {
- __u8 filter[DMX_FILTER_SIZE];
- __u8 mask[DMX_FILTER_SIZE];
- __u8 mode[DMX_FILTER_SIZE];
- } dmx_filter_t;
-
-
-.. c:type:: dmx_sct_filter_params
-
-struct dmx_sct_filter_params
-============================
-
-
-.. code-block:: c
-
- struct dmx_sct_filter_params
- {
- __u16 pid;
- dmx_filter_t filter;
- __u32 timeout;
- __u32 flags;
- #define DMX_CHECK_CRC 1
- #define DMX_ONESHOT 2
- #define DMX_IMMEDIATE_START 4
- #define DMX_KERNEL_CLIENT 0x8000
- };
-
-
-struct dmx_pes_filter_params
-============================
-
-.. c:type:: dmx_pes_filter_params
-
-.. code-block:: c
-
- struct dmx_pes_filter_params
- {
- __u16 pid;
- dmx_input_t input;
- dmx_output_t output;
- dmx_pes_type_t pes_type;
- __u32 flags;
- };
-
-
-struct dmx_event
-================
-
-.. c:type:: dmx_event
-
-.. code-block:: c
-
- struct dmx_event
- {
- dmx_event_t event;
- time_t timeStamp;
- union
- {
- dmx_scrambling_status_t scrambling;
- } u;
- };
-
-
-struct dmx_stc
-==============
-
-.. c:type:: dmx_stc
-
-.. code-block:: c
-
- struct dmx_stc {
- unsigned int num; /* input : which STC? 0..N */
- unsigned int base; /* output: divisor for stc to get 90 kHz clock */
- __u64 stc; /* output: stc in 'base'*90 kHz units */
- };
-
-
-struct dmx_caps
-===============
-
-.. c:type:: dmx_caps
-
-.. code-block:: c
-
- typedef struct dmx_caps {
- __u32 caps;
- int num_decoders;
- } dmx_caps_t;
-
-
-enum dmx_source
-===============
-
-.. c:type:: dmx_source
-
-.. code-block:: c
-
- typedef enum dmx_source {
- DMX_SOURCE_FRONT0 = 0,
- DMX_SOURCE_FRONT1,
- DMX_SOURCE_FRONT2,
- DMX_SOURCE_FRONT3,
- DMX_SOURCE_DVR0 = 16,
- DMX_SOURCE_DVR1,
- DMX_SOURCE_DVR2,
- DMX_SOURCE_DVR3
- } dmx_source_t;
+.. kernel-doc:: include/uapi/linux/dvb/dmx.h
diff --git a/Documentation/media/uapi/dvb/dtv-fe-stats.rst b/Documentation/media/uapi/dvb/dtv-fe-stats.rst
deleted file mode 100644
index e8a02a1f138d..000000000000
--- a/Documentation/media/uapi/dvb/dtv-fe-stats.rst
+++ /dev/null
@@ -1,17 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. c:type:: dtv_fe_stats
-
-*******************
-struct dtv_fe_stats
-*******************
-
-
-.. code-block:: c
-
- #define MAX_DTV_STATS 4
-
- struct dtv_fe_stats {
- __u8 len;
- struct dtv_stats stat[MAX_DTV_STATS];
- } __packed;
diff --git a/Documentation/media/uapi/dvb/dtv-properties.rst b/Documentation/media/uapi/dvb/dtv-properties.rst
deleted file mode 100644
index 48c4e834ad11..000000000000
--- a/Documentation/media/uapi/dvb/dtv-properties.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. c:type:: dtv_properties
-
-*********************
-struct dtv_properties
-*********************
-
-
-.. code-block:: c
-
- struct dtv_properties {
- __u32 num;
- struct dtv_property *props;
- };
diff --git a/Documentation/media/uapi/dvb/dtv-property.rst b/Documentation/media/uapi/dvb/dtv-property.rst
deleted file mode 100644
index 3ddc3474b00e..000000000000
--- a/Documentation/media/uapi/dvb/dtv-property.rst
+++ /dev/null
@@ -1,31 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. c:type:: dtv_property
-
-*******************
-struct dtv_property
-*******************
-
-
-.. code-block:: c
-
- /* Reserved fields should be set to 0 */
-
- struct dtv_property {
- __u32 cmd;
- __u32 reserved[3];
- union {
- __u32 data;
- struct dtv_fe_stats st;
- struct {
- __u8 data[32];
- __u32 len;
- __u32 reserved1[3];
- void *reserved2;
- } buffer;
- } u;
- int result;
- } __attribute__ ((packed));
-
- /* num of properties cannot exceed DTV_IOCTL_MAX_MSGS per ioctl */
- #define DTV_IOCTL_MAX_MSGS 64
diff --git a/Documentation/media/uapi/dvb/dtv-stats.rst b/Documentation/media/uapi/dvb/dtv-stats.rst
deleted file mode 100644
index 35239e72bf74..000000000000
--- a/Documentation/media/uapi/dvb/dtv-stats.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. c:type:: dtv_stats
-
-****************
-struct dtv_stats
-****************
-
-
-.. code-block:: c
-
- struct dtv_stats {
- __u8 scale; /* enum fecap_scale_params type */
- union {
- __u64 uvalue; /* for counters and relative scales */
- __s64 svalue; /* for 1/1000 dB measures */
- };
- } __packed;
diff --git a/Documentation/media/uapi/dvb/dvb-fe-read-status.rst b/Documentation/media/uapi/dvb/dvb-fe-read-status.rst
index 76c20612b274..212f032cad8b 100644
--- a/Documentation/media/uapi/dvb/dvb-fe-read-status.rst
+++ b/Documentation/media/uapi/dvb/dvb-fe-read-status.rst
@@ -20,6 +20,6 @@ Signal statistics are provided via
.. note::
Most statistics require the demodulator to be fully locked
- (e. g. with FE_HAS_LOCK bit set). See
+ (e. g. with :c:type:`FE_HAS_LOCK <fe_status>` bit set). See
:ref:`Frontend statistics indicators <frontend-stat-properties>` for
more details.
diff --git a/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst b/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst
index 899fd5c3545e..b152166f8fa7 100644
--- a/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst
+++ b/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst
@@ -24,7 +24,7 @@ instead, in order to be able to support the newer System Delivery like
DVB-S2, DVB-T2, DVB-C2, ISDB, etc.
All kinds of parameters are combined as a union in the
-FrontendParameters structure:
+``dvb_frontend_parameters`` structure:
.. code-block:: c
diff --git a/Documentation/media/uapi/dvb/dvbapi.rst b/Documentation/media/uapi/dvb/dvbapi.rst
index 37680137e3f2..18c86b3a3af1 100644
--- a/Documentation/media/uapi/dvb/dvbapi.rst
+++ b/Documentation/media/uapi/dvb/dvbapi.rst
@@ -10,12 +10,27 @@ Part II - Digital TV API
.. note::
- This API is also known as **DVB API**, although it is generic
- enough to support all digital TV standards.
+ This API is also known as Linux **DVB API**.
+
+ It it was originally written to support the European digital TV
+ standard (DVB), and later extended to support all digital TV standards.
+
+ In order to avoid confusion, within this document, it was opted to refer to
+ it, and to associated hardware as **Digital TV**.
+
+ The word **DVB** is reserved to be used for:
+
+ - the Digital TV API version
+ (e. g. DVB API version 3 or DVB API version 5);
+ - digital TV data types (enums, structs, defines, etc);
+ - digital TV device nodes (``/dev/dvb/...``);
+ - the European DVB standard.
**Version 5.10**
-.. class:: toc-title
+.. only:: html
+
+ .. class:: toc-title
Table of Contents
@@ -30,12 +45,7 @@ Part II - Digital TV API
net
legacy_dvb_apis
examples
- audio_h
- ca_h
- dmx_h
- frontend_h
- net_h
- video_h
+ headers
**********************
@@ -46,11 +56,11 @@ Authors:
- J. K. Metzler, Ralph <rjkm@metzlerbros.de>
- - Original author of the DVB API documentation.
+ - Original author of the Digital TV API documentation.
- O. C. Metzler, Marcus <rjkm@metzlerbros.de>
- - Original author of the DVB API documentation.
+ - Original author of the Digital TV API documentation.
- Carvalho Chehab, Mauro <m.chehab@kernel.org>
@@ -58,21 +68,26 @@ Authors:
**Copyright** |copy| 2002-2003 : Convergence GmbH
-**Copyright** |copy| 2009-2016 : Mauro Carvalho Chehab
+**Copyright** |copy| 2009-2017 : Mauro Carvalho Chehab
****************
Revision History
****************
+:revision: 2.2.0 / 2017-09-01 (*mcc*)
+
+Most gaps between the uAPI document and the Kernel implementation
+got fixed for the non-legacy API.
+
:revision: 2.1.0 / 2015-05-29 (*mcc*)
DocBook improvements and cleanups, in order to document the system calls
on a more standard way and provide more description about the current
-DVB API.
+Digital TV API.
:revision: 2.0.4 / 2011-05-06 (*mcc*)
-Add more information about DVB APIv5, better describing the frontend
+Add more information about DVBv5 API, better describing the frontend
GET/SET props ioctl's.
diff --git a/Documentation/media/uapi/dvb/dvbproperty-006.rst b/Documentation/media/uapi/dvb/dvbproperty-006.rst
deleted file mode 100644
index 3343a0f306fe..000000000000
--- a/Documentation/media/uapi/dvb/dvbproperty-006.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-**************
-Property types
-**************
-
-On :ref:`FE_GET_PROPERTY and FE_SET_PROPERTY <FE_GET_PROPERTY>`,
-the actual action is determined by the dtv_property cmd/data pairs.
-With one single ioctl, is possible to get/set up to 64 properties. The
-actual meaning of each property is described on the next sections.
-
-The available frontend property types are shown on the next section.
diff --git a/Documentation/media/uapi/dvb/dvbproperty.rst b/Documentation/media/uapi/dvb/dvbproperty.rst
index dd2d71ce43fa..1a56c1724e59 100644
--- a/Documentation/media/uapi/dvb/dvbproperty.rst
+++ b/Documentation/media/uapi/dvb/dvbproperty.rst
@@ -2,63 +2,76 @@
.. _frontend-properties:
-DVB Frontend properties
-=======================
+**************
+Property types
+**************
Tuning into a Digital TV physical channel and starting decoding it
requires changing a set of parameters, in order to control the tuner,
the demodulator, the Linear Low-noise Amplifier (LNA) and to set the
-antenna subsystem via Satellite Equipment Control (SEC), on satellite
-systems. The actual parameters are specific to each particular digital
+antenna subsystem via Satellite Equipment Control - SEC (on satellite
+systems). The actual parameters are specific to each particular digital
TV standards, and may change as the digital TV specs evolves.
-In the past, the strategy used was to have a union with the parameters
-needed to tune for DVB-S, DVB-C, DVB-T and ATSC delivery systems grouped
-there. The problem is that, as the second generation standards appeared,
-those structs were not big enough to contain the additional parameters.
-Also, the union didn't have any space left to be expanded without
-breaking userspace. So, the decision was to deprecate the legacy
-union/struct based approach, in favor of a properties set approach.
+In the past (up to DVB API version 3 - DVBv3), the strategy used was to have a
+union with the parameters needed to tune for DVB-S, DVB-C, DVB-T and
+ATSC delivery systems grouped there. The problem is that, as the second
+generation standards appeared, the size of such union was not big
+enough to group the structs that would be required for those new
+standards. Also, extending it would break userspace.
+
+So, the legacy union/struct based approach was deprecated, in favor
+of a properties set approach. On such approach,
+:ref:`FE_GET_PROPERTY and FE_SET_PROPERTY <FE_GET_PROPERTY>` are used
+to setup the frontend and read its status.
+
+The actual action is determined by a set of dtv_property cmd/data pairs.
+With one single ioctl, is possible to get/set up to 64 properties.
+
+This section describes the new and recommended way to set the frontend,
+with supports all digital TV delivery systems.
.. note::
- On Linux DVB API version 3, setting a frontend were done via
- struct :c:type:`dvb_frontend_parameters`.
- This got replaced on version 5 (also called "S2API", as this API were
- added originally_enabled to provide support for DVB-S2), because the
- old API has a very limited support to new standards and new hardware.
- This section describes the new and recommended way to set the frontend,
- with suppports all digital TV delivery systems.
-
-Example: with the properties based approach, in order to set the tuner
-to a DVB-C channel at 651 kHz, modulated with 256-QAM, FEC 3/4 and
-symbol rate of 5.217 Mbauds, those properties should be sent to
+ 1. On Linux DVB API version 3, setting a frontend was done via
+ struct :c:type:`dvb_frontend_parameters`.
+
+ 2. Don't use DVB API version 3 calls on hardware with supports
+ newer standards. Such API provides no suport or a very limited
+ support to new standards and/or new hardware.
+
+ 3. Nowadays, most frontends support multiple delivery systems.
+ Only with DVB API version 5 calls it is possible to switch between
+ the multiple delivery systems supported by a frontend.
+
+ 4. DVB API version 5 is also called *S2API*, as the first
+ new standard added to it was DVB-S2.
+
+**Example**: in order to set the hardware to tune into a DVB-C channel
+at 651 kHz, modulated with 256-QAM, FEC 3/4 and symbol rate of 5.217
+Mbauds, those properties should be sent to
:ref:`FE_SET_PROPERTY <FE_GET_PROPERTY>` ioctl:
-- :ref:`DTV_DELIVERY_SYSTEM <DTV-DELIVERY-SYSTEM>` =
- SYS_DVBC_ANNEX_A
+ :ref:`DTV_DELIVERY_SYSTEM <DTV-DELIVERY-SYSTEM>` = SYS_DVBC_ANNEX_A
-- :ref:`DTV_FREQUENCY <DTV-FREQUENCY>` = 651000000
+ :ref:`DTV_FREQUENCY <DTV-FREQUENCY>` = 651000000
-- :ref:`DTV_MODULATION <DTV-MODULATION>` = QAM_256
+ :ref:`DTV_MODULATION <DTV-MODULATION>` = QAM_256
-- :ref:`DTV_INVERSION <DTV-INVERSION>` = INVERSION_AUTO
+ :ref:`DTV_INVERSION <DTV-INVERSION>` = INVERSION_AUTO
-- :ref:`DTV_SYMBOL_RATE <DTV-SYMBOL-RATE>` = 5217000
+ :ref:`DTV_SYMBOL_RATE <DTV-SYMBOL-RATE>` = 5217000
-- :ref:`DTV_INNER_FEC <DTV-INNER-FEC>` = FEC_3_4
+ :ref:`DTV_INNER_FEC <DTV-INNER-FEC>` = FEC_3_4
-- :ref:`DTV_TUNE <DTV-TUNE>`
+ :ref:`DTV_TUNE <DTV-TUNE>`
The code that would that would do the above is show in
:ref:`dtv-prop-example`.
-.. _dtv-prop-example:
-
-Example: Setting digital TV frontend properties
-===============================================
-
.. code-block:: c
+ :caption: Example: Setting digital TV frontend properties
+ :name: dtv-prop-example
#include <stdio.h>
#include <fcntl.h>
@@ -102,17 +115,12 @@ Example: Setting digital TV frontend properties
provides methods for usual operations like program scanning and to
read/write channel descriptor files.
-
.. toctree::
:maxdepth: 1
- dtv-stats
- dtv-fe-stats
- dtv-property
- dtv-properties
- dvbproperty-006
fe_property_parameters
frontend-stat-properties
frontend-property-terrestrial-systems
frontend-property-cable-systems
frontend-property-satellite-systems
+ frontend-header
diff --git a/Documentation/media/uapi/dvb/dvbstb.svg b/Documentation/media/uapi/dvb/dvbstb.svg
index c4140fb518af..f6fe2f837373 100644
--- a/Documentation/media/uapi/dvb/dvbstb.svg
+++ b/Documentation/media/uapi/dvb/dvbstb.svg
@@ -1,651 +1,16 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- version="1.2"
- width="237.70221mm"
- height="126.28221mm"
- viewBox="0 0 23770.221 12628.221"
- preserveAspectRatio="xMidYMid"
- xml:space="preserve"
- id="svg2"
- inkscape:version="0.91 r13725"
- sodipodi:docname="dvbstb.svg"
- style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"><metadata
- id="metadata519"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="1920"
- inkscape:window-height="997"
- id="namedview517"
- showgrid="false"
- inkscape:zoom="1.0818519"
- inkscape:cx="411.31718"
- inkscape:cy="274.87517"
- inkscape:window-x="1920"
- inkscape:window-y="30"
- inkscape:window-maximized="1"
- inkscape:current-layer="svg2"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0" /><defs
- class="ClipPathGroup"
- id="defs4" /><defs
- id="defs9" /><defs
- id="defs90" /><defs
- id="defs113" /><defs
- class="TextShapeIndex"
- id="defs124" /><defs
- class="EmbeddedBulletChars"
- id="defs128" /><defs
- class="TextEmbeddedBitmaps"
- id="defs157" /><rect
- class="BoundingBox"
- x="5355.1108"
- y="13.111"
- width="18403"
- height="9603"
- id="rect197"
- style="fill:none;stroke:none" /><path
- d="m 14556.111,9614.111 -9200,0 0,-9600 18400,0 0,9600 -9200,0 z"
- id="path199"
- inkscape:connector-curvature="0"
- style="fill:#ffffff;stroke:none" /><path
- d="m 14556.111,9614.111 -9200,0 0,-9600 18400,0 0,9600 -9200,0 z"
- id="path201"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><rect
- class="BoundingBox"
- x="13.111"
- y="4013.1111"
- width="4544"
- height="2403"
- id="rect206"
- style="fill:none;stroke:none" /><path
- d="m 2285.111,6414.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path208"
- inkscape:connector-curvature="0"
- style="fill:#ffffff;stroke:none" /><path
- d="m 2285.111,6414.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path210"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><text
- class="TextShape"
- id="text212"
- y="-4585.8892"
- x="-2443.8889"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan214"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="1281.111"
- y="5435.1108"
- id="tspan216"><tspan
- id="tspan218"
- style="fill:#000000;stroke:none">Antena</tspan></tspan></tspan></text>
-<rect
- class="BoundingBox"
- x="6213.1108"
- y="1813.111"
- width="4544"
- height="2403"
- id="rect223"
- style="fill:none;stroke:none" /><path
- d="m 8485.111,4214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path225"
- inkscape:connector-curvature="0"
- style="fill:#ffffff;stroke:none" /><path
- d="m 8485.111,4214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path227"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><text
- class="TextShape"
- id="text229"
- x="-2443.8889"
- y="-4585.8892"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan231"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="7217.1108"
- y="3235.1111"
- id="tspan233"><tspan
- id="tspan235"
- style="fill:#000000;stroke:none">Frontend</tspan></tspan></tspan></text>
-<rect
- class="BoundingBox"
- x="12113.111"
- y="1813.111"
- width="4544"
- height="2403"
- id="rect240"
- style="fill:none;stroke:none" /><path
- d="m 14385.111,4214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path242"
- inkscape:connector-curvature="0"
- style="fill:#ffffff;stroke:none" /><path
- d="m 14385.111,4214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path244"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><text
- class="TextShape"
- id="text246"
- x="-2443.8889"
- y="-4585.8892"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan248"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="13944.111"
- y="3235.1111"
- id="tspan250"><tspan
- id="tspan252"
- style="fill:#000000;stroke:none">CA</tspan></tspan></tspan></text>
-<rect
- class="BoundingBox"
- x="18113.111"
- y="1813.111"
- width="4544"
- height="2403"
- id="rect257"
- style="fill:none;stroke:none" /><path
- d="m 20385.111,4214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path259"
- inkscape:connector-curvature="0"
- style="fill:#ffffff;stroke:none" /><path
- d="m 20385.111,4214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path261"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><text
- class="TextShape"
- id="text263"
- x="-2443.8889"
- y="-4585.8892"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan265"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="19384.111"
- y="3235.1111"
- id="tspan267"><tspan
- id="tspan269"
- style="fill:#000000;stroke:none">Demux</tspan></tspan></tspan></text>
-<rect
- class="BoundingBox"
- x="6113.1108"
- y="5813.1108"
- width="4544"
- height="2403"
- id="rect274"
- style="fill:none;stroke:none" /><path
- d="m 8385.111,8214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path276"
- inkscape:connector-curvature="0"
- style="fill:#ffffff;stroke:none" /><path
- d="m 8385.111,8214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path278"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><text
- class="TextShape"
- id="text280"
- x="-2443.8889"
- y="-4585.8892"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan282"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="7733.1108"
- y="7235.1108"
- id="tspan284"><tspan
- id="tspan286"
- style="fill:#000000;stroke:none">SEC</tspan></tspan></tspan></text>
-<rect
- class="BoundingBox"
- x="12213.111"
- y="5813.1108"
- width="4544"
- height="2403"
- id="rect291"
- style="fill:none;stroke:none" /><path
- d="m 14485.111,8214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path293"
- inkscape:connector-curvature="0"
- style="fill:#ffffff;stroke:none" /><path
- d="m 14485.111,8214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path295"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><text
- class="TextShape"
- id="text297"
- x="-2443.8889"
- y="-4585.8892"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan299"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="13676.111"
- y="7235.1108"
- id="tspan301"><tspan
- id="tspan303"
- style="fill:#000000;stroke:none">Audio</tspan></tspan></tspan></text>
-<rect
- class="BoundingBox"
- x="18113.111"
- y="5813.1108"
- width="4544"
- height="2403"
- id="rect308"
- style="fill:none;stroke:none" /><path
- d="m 20385.111,8214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path310"
- inkscape:connector-curvature="0"
- style="fill:#ffffff;stroke:none" /><path
- d="m 20385.111,8214.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path312"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><text
- class="TextShape"
- id="text314"
- x="-2443.8889"
- y="-4585.8892"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan316"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="19583.111"
- y="7235.1108"
- id="tspan318"><tspan
- id="tspan320"
- style="fill:#000000;stroke:none">Video</tspan></tspan></tspan></text>
-<rect
- class="BoundingBox"
- x="15213.111"
- y="10213.111"
- width="4544"
- height="2403"
- id="rect325"
- style="fill:none;stroke:none" /><path
- d="m 17485.111,12614.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path327"
- inkscape:connector-curvature="0"
- style="fill:#ffffff;stroke:none" /><path
- d="m 17485.111,12614.111 -2271,0 0,-2400 4541,0 0,2400 -2270,0 z"
- id="path329"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><text
- class="TextShape"
- id="text331"
- x="-2443.8889"
- y="-4585.8892"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan333"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="17076.111"
- y="11635.111"
- id="tspan335"><tspan
- id="tspan337"
- style="fill:#000000;stroke:none">TV</tspan></tspan></tspan></text>
-<rect
- class="BoundingBox"
- x="4555.1108"
- y="3014.1111"
- width="1661"
- height="2202"
- id="rect342"
- style="fill:none;stroke:none" /><path
- d="m 4556.111,5214.111 1400,-1857"
- id="path344"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 6215.111,3014.111 -391,269 240,181 151,-450 z"
- id="path346"
- inkscape:connector-curvature="0"
- style="fill:#000000;stroke:none" /><rect
- class="BoundingBox"
- x="4555.1108"
- y="5213.1108"
- width="1561"
- height="1802"
- id="rect351"
- style="fill:none;stroke:none" /><path
- d="m 4556.111,5214.111 1277,1475"
- id="path353"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 6115.111,7014.111 -181,-438 -227,196 408,242 z"
- id="path355"
- inkscape:connector-curvature="0"
- style="fill:#000000;stroke:none" /><rect
- class="BoundingBox"
- x="10755.111"
- y="2864.1111"
- width="1361"
- height="301"
- id="rect360"
- style="fill:none;stroke:none" /><path
- d="m 10756.111,3014.111 929,0"
- id="path362"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 12115.111,3014.111 -450,-150 0,300 450,-150 z"
- id="path364"
- inkscape:connector-curvature="0"
- style="fill:#000000;stroke:none" /><rect
- class="BoundingBox"
- x="16655.111"
- y="2864.1111"
- width="1461"
- height="301"
- id="rect369"
- style="fill:none;stroke:none" /><path
- d="m 16656.111,3014.111 1029,0"
- id="path371"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 18115.111,3014.111 -450,-150 0,300 450,-150 z"
- id="path373"
- inkscape:connector-curvature="0"
- style="fill:#000000;stroke:none" /><rect
- class="BoundingBox"
- x="20235.111"
- y="4213.1108"
- width="301"
- height="1602"
- id="rect378"
- style="fill:none;stroke:none" /><path
- d="m 20385.111,4214.111 0,1170"
- id="path380"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 20385.111,5814.111 150,-450 -300,0 150,450 z"
- id="path382"
- inkscape:connector-curvature="0"
- style="fill:#000000;stroke:none" /><rect
- class="BoundingBox"
- x="17485.111"
- y="8213.1113"
- width="2902"
- height="2002"
- id="rect387"
- style="fill:none;stroke:none" /><path
- d="m 20385.111,8214.111 -2546,1756"
- id="path389"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 17485.111,10214.111 456,-132 -171,-247 -285,379 z"
- id="path391"
- inkscape:connector-curvature="0"
- style="fill:#000000;stroke:none" /><rect
- class="BoundingBox"
- x="14484.111"
- y="8213.1113"
- width="3002"
- height="2002"
- id="rect396"
- style="fill:none;stroke:none" /><path
- d="m 14485.111,8214.111 2642,1761"
- id="path398"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 17485.111,10214.111 -291,-374 -167,249 458,125 z"
- id="path400"
- inkscape:connector-curvature="0"
- style="fill:#000000;stroke:none" /><rect
- class="BoundingBox"
- x="14485.111"
- y="4213.1108"
- width="5902"
- height="1629"
- id="rect405"
- style="fill:none;stroke:none" /><path
- d="m 20385.111,4214.111 -51,14"
- id="path407"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 20283.111,4242.111 -52,14"
- id="path409"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 20180.111,4270.111 -51,13"
- id="path411"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 20078.111,4297.111 -52,14"
- id="path413"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 19975.111,4325.111 -51,14"
- id="path415"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 19873.111,4353.111 -52,14"
- id="path417"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 19770.111,4381.111 -51,14"
- id="path419"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 19668.111,4409.111 -52,13"
- id="path421"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 19565.111,4436.111 -51,14"
- id="path423"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 19463.111,4464.111 -52,14"
- id="path425"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 19360.111,4492.111 -51,14"
- id="path427"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 19258.111,4520.111 -52,14"
- id="path429"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 19155.111,4547.111 -51,14"
- id="path431"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 19053.111,4575.111 -52,14"
- id="path433"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 18950.111,4603.111 -51,14"
- id="path435"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 18848.111,4631.111 -51,14"
- id="path437"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 18745.111,4659.111 -51,14"
- id="path439"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 18643.111,4686.111 -51,14"
- id="path441"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 18540.111,4714.111 -51,14"
- id="path443"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 18438.111,4742.111 -51,14"
- id="path445"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 18335.111,4770.111 -51,14"
- id="path447"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 18233.111,4798.111 -51,14"
- id="path449"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 18130.111,4825.111 -51,14"
- id="path451"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 18028.111,4853.111 -51,14"
- id="path453"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 17925.111,4881.111 -51,14"
- id="path455"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 17823.111,4909.111 -51,14"
- id="path457"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 17720.111,4937.111 -51,13"
- id="path459"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 17618.111,4964.111 -51,14"
- id="path461"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 17516.111,4992.111 -52,14"
- id="path463"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 17413.111,5020.111 -51,14"
- id="path465"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 17311.111,5048.111 -52,14"
- id="path467"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 17208.111,5076.111 -51,13"
- id="path469"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 17106.111,5103.111 -52,14"
- id="path471"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 17003.111,5131.111 -51,14"
- id="path473"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 16901.111,5159.111 -52,14"
- id="path475"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 16798.111,5187.111 -51,14"
- id="path477"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 16696.111,5214.111 -52,14"
- id="path479"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 16593.111,5242.111 -51,14"
- id="path481"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 16491.111,5270.111 -52,14"
- id="path483"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 16388.111,5298.111 -51,14"
- id="path485"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 16286.111,5326.111 -52,14"
- id="path487"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 16183.111,5353.111 -51,14"
- id="path489"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 16081.111,5381.111 -51,14"
- id="path491"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 15978.111,5409.111 -51,14"
- id="path493"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 15876.111,5437.111 -51,14"
- id="path495"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 15773.111,5465.111 -51,14"
- id="path497"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 15671.111,5492.111 -51,14"
- id="path499"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 15568.111,5520.111 -51,14"
- id="path501"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 15466.111,5548.111 -51,14"
- id="path503"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 15363.111,5576.111 -51,14"
- id="path505"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 15261.111,5604.111 -51,13"
- id="path507"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 15158.111,5631.111 -51,14"
- id="path509"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 15056.111,5659.111 -51,14"
- id="path511"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 14953.111,5687.111 -51,14"
- id="path513"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000" /><path
- d="m 14485.111,5814.111 474,27 -79,-290 -395,263 z"
- id="path515"
- inkscape:connector-curvature="0"
- style="fill:#000000;stroke:none" /></svg> \ No newline at end of file
+<?xml version="1.0" encoding="UTF-8"?>
+<svg id="svg2" width="15.847cm" height="8.4187cm" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" preserveAspectRatio="xMidYMid" version="1.2" viewBox="0 0 23770.123 12628.122" xml:space="preserve" xmlns="http://www.w3.org/2000/svg" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"><defs id="defs142"><marker id="Arrow1Lend" overflow="visible" orient="auto"><path id="path954" transform="matrix(-.8 0 0 -.8 -10 0)" d="m0 0 5-5-17.5 5 17.5 5z" fill-rule="evenodd" stroke="#000" stroke-width="1pt"/></marker><marker id="marker1243" overflow="visible" orient="auto"><path id="path1241" transform="matrix(-.8 0 0 -.8 -10 0)" d="m0 0 5-5-17.5 5 17.5 5z" fill-rule="evenodd" stroke="#000" stroke-width="1pt"/></marker></defs><metadata id="metadata519"><rdf:RDF><cc:Work
+rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/><dc:title/></cc:Work></rdf:RDF></metadata><rect id="rect197" class="BoundingBox" x="5355.1" y="13.122" width="18403" height="9603" fill="none"/><path id="path199" d="m14556 9614.1h-9200v-9600h18400v9600z" fill="#fff"/><path id="path201" d="m14556 9614.1h-9200v-9600h18400v9600z" fill="none" stroke="#000"/><rect id="rect206" class="BoundingBox" x="13.122" y="4013.1" width="4544" height="2403" fill="none"/><path id="path208" d="m2285.1 6414.1h-2271v-2400h4541v2400z" fill="#fff"/><path id="path210" d="m2285.1 6414.1h-2271v-2400h4541v2400z" fill="none" stroke="#000"/><text id="text212" class="TextShape" x="-2443.8779" y="-4585.8779"><tspan id="tspan214" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan216" class="TextPosition"
+x="1281.1219" y="5435.1221"><tspan id="tspan218" fill="#000000">Antena</tspan></tspan></tspan></text>
+<rect id="rect223" class="BoundingBox" x="6213.1" y="1813.1" width="4544" height="2403" fill="none"/><path id="path225" d="m8485.1 4214.1h-2271v-2400h4541v2400z" fill="#fff"/><path id="path227" d="m8485.1 4214.1h-2271v-2400h4541v2400z" fill="none" stroke="#000"/><text id="text229" class="TextShape" x="-2443.8779" y="-4585.8779"><tspan id="tspan231" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan233" class="TextPosition" x="7217.1221" y="3235.1221"><tspan id="tspan235" fill="#000000">Frontend</tspan></tspan></tspan></text>
+<rect id="rect240" class="BoundingBox" x="12113" y="1813.1" width="4544" height="2403" fill="none"/><path id="path242" d="m14385 4214.1h-2271v-2400h4541v2400z" fill="#fff"/><path id="path244" d="m14385 4214.1h-2271v-2400h4541v2400z" fill="none" stroke="#000"/><text id="text246" class="TextShape" x="-2443.8779" y="-4585.8779"><tspan id="tspan248" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan250" class="TextPosition" x="13944.122" y="3235.1221"><tspan id="tspan252" fill="#000000">CA</tspan></tspan></tspan></text>
+<rect id="rect257" class="BoundingBox" x="18113" y="1813.1" width="4544" height="2403" fill="none"/><path id="path259" d="m20385 4214.1h-2271v-2400h4541v2400z" fill="#fff"/><path id="path261" d="m20385 4214.1h-2271v-2400h4541v2400z" fill="none" stroke="#000"/><text id="text263" class="TextShape" x="-2443.8779" y="-4585.8779"><tspan id="tspan265" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan267" class="TextPosition" x="19384.123" y="3235.1221"><tspan id="tspan269" fill="#000000">Demux</tspan></tspan></tspan></text>
+<rect id="rect274" class="BoundingBox" x="6113.1" y="5813.1" width="4544" height="2403" fill="none"/><path id="path276" d="m8385.1 8214.1h-2271v-2400h4541v2400z" fill="#fff"/><path id="path278" d="m8385.1 8214.1h-2271v-2400h4541v2400z" fill="none" stroke="#000"/><text id="text280" class="TextShape" x="-2443.8779" y="-4585.8779"><tspan id="tspan282" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan284" class="TextPosition" x="7733.1221" y="7235.1221"><tspan id="tspan286" fill="#000000">SEC</tspan></tspan></tspan></text>
+<rect id="rect291" class="BoundingBox" x="12213" y="5813.1" width="4544" height="2403" fill="none"/><path id="path293" d="m14485 8214.1h-2271v-2400h4541v2400z" fill="#fff"/><path id="path295" d="m14485 8214.1h-2271v-2400h4541v2400z" fill="none" stroke="#000" stroke-dasharray="28.22200113,56.44400226" stroke-width="28.222"/><text id="text297" class="TextShape" x="-2443.8779" y="-4903.3779"><tspan id="tspan299" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan301" class="TextPosition" x="13676.122" y="6917.6221"><tspan id="tspan303" fill="#000000">Audio</tspan></tspan></tspan></text>
+<rect id="rect308" class="BoundingBox" x="18113" y="5813.1" width="4544" height="2403" fill="none"/><path id="path310" d="m20385 8214.1h-2271v-2400h4541v2400z" fill="#fff"/><path id="path312" d="m20385 8214.1h-2271v-2400h4541v2400z" fill="none" stroke="#000" stroke-dasharray="28.22200113,56.44400226" stroke-width="28.222"/><text id="text314" class="TextShape" x="-2443.8779" y="-4903.3779"><tspan id="tspan316" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan318" class="TextPosition" x="19583.123" y="6917.6221"><tspan id="tspan320" fill="#000000">Video</tspan></tspan></tspan></text>
+<rect id="rect325" class="BoundingBox" x="15213" y="10213" width="4544" height="2403" fill="none"/><path id="path327" d="m17485 12614h-2271v-2400h4541v2400z" fill="#fff"/><path id="path329" d="m17485 12614h-2271v-2400h4541v2400z" fill="none" stroke="#000" stroke-dasharray="28.22200113,56.44400226" stroke-width="28.222"/><text id="text331" class="TextShape" x="-2443.8779" y="-4585.8779"><tspan id="tspan333" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan335" class="TextPosition" x="17076.123" y="11635.122"><tspan id="tspan337" fill="#000000">TV</tspan></tspan></tspan></text>
+<rect id="rect342" class="BoundingBox" x="4555.1" y="3014.1" width="1661" height="2202" fill="none"/><path id="path344" d="m4556.1 5214.1 1400-1857" fill="none" stroke="#000"/><path id="path346" d="m6215.1 3014.1-391 269 240 181z"/><rect id="rect351" class="BoundingBox" x="4555.1" y="5213.1" width="1561" height="1802" fill="none"/><path id="path353" d="m4556.1 5214.1 1277 1475" fill="none" stroke="#000"/><path id="path355" d="m6115.1 7014.1-181-438-227 196z"/><rect id="rect360" class="BoundingBox" x="10755" y="2864.1" width="1361" height="301" fill="none"/><path id="path362" d="m10756 3014.1h929" fill="none" stroke="#000"/><path id="path364" d="m12115 3014.1-450-150v300z"/><rect id="rect369" class="BoundingBox" x="16655" y="2864.1" width="1461" height="301" fill="none"/><path id="path371" d="m16656 3014.1h1029" fill="none" stroke="#000"/><path id="path373" d="m18115
+3014.1-450-150v300z"/><rect id="rect378" class="BoundingBox" x="20235" y="4213.1" width="301" height="1602" fill="none"/><rect id="rect387" class="BoundingBox" x="17485" y="8213.1" width="2902" height="2002" fill="none"/><path id="path389" d="m20385 8214.1-2546 1756" fill="none" stroke="#000" stroke-dasharray="28.22200113,56.44400226" stroke-width="28.222"/><path id="path391" d="m17485 10214 456-132-171-247z"/><rect id="rect396" class="BoundingBox" x="14484" y="8213.1" width="3002" height="2002" fill="none"/><path id="path398" d="m14485 8214.1 2642 1761" fill="none" stroke="#000" stroke-dasharray="28.22200113,56.44400226" stroke-width="28.222"/><path id="path400" d="m17485 10214-291-374-167 249z"/><rect id="rect405" class="BoundingBox" x="14485" y="4213.1" width="5902" height="1629" fill="none"/><path id="path949" d="m20387 4213.1v1629" fill="none" marker-end="url(#Arrow1Lend)"
+stroke="#000" stroke-dasharray="26.45879596, 52.91759192999999328" stroke-linejoin="miter" stroke-width="26.459"/><path id="path1233" d="m20385 4214.1-3628 1599" fill="none" marker-end="url(#marker1243)" stroke="#000" stroke-dasharray="26.45879596, 52.91759193" stroke-linejoin="miter" stroke-width="26.459"/><text id="text297-3" class="TextShape" x="-2911.9202" y="-4257.2061" font-size="12px" stroke-width="1"><tspan id="tspan299-6" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400" stroke-width="1"><tspan id="tspan301-7" class="TextPosition" x="13208.079" y="7563.793" stroke-width="1"><tspan id="tspan303-5" fill="#000000" stroke-width="1">Decoder</tspan></tspan></tspan></text>
+<text id="text297-3-3" class="TextShape" x="2950.9287" y="-4259.5928" font-size="12px" stroke-width="1"><tspan id="tspan299-6-5" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400" stroke-width="1"><tspan id="tspan301-7-6" class="TextPosition" x="19070.928" y="7561.4053" stroke-width="1"><tspan id="tspan303-5-2" fill="#000000" stroke-width="1">Decoder</tspan></tspan></tspan></text>
+</svg>
diff --git a/Documentation/media/uapi/dvb/examples.rst b/Documentation/media/uapi/dvb/examples.rst
index 1a94966312c0..e0f627ca2e4d 100644
--- a/Documentation/media/uapi/dvb/examples.rst
+++ b/Documentation/media/uapi/dvb/examples.rst
@@ -6,8 +6,8 @@
Examples
********
-In this section we would like to present some examples for using the DVB
-API.
+In this section we would like to present some examples for using the Digital
+TV API.
.. note::
diff --git a/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst b/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst
index 302db2857f90..f220ee351e15 100644
--- a/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst
+++ b/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst
@@ -26,8 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <frontend_f_open>`.
``argp``
- pointer to struct
- :c:type:`dvb_diseqc_slave_reply`
+ pointer to struct :c:type:`dvb_diseqc_slave_reply`.
Description
@@ -35,46 +34,15 @@ Description
Receives reply from a DiSEqC 2.0 command.
-.. c:type:: dvb_diseqc_slave_reply
-
-.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
-
-.. flat-table:: struct dvb_diseqc_slave_reply
- :header-rows: 0
- :stub-columns: 0
- :widths: 1 1 2
-
-
- - .. row 1
-
- - uint8_t
-
- - msg[4]
-
- - DiSEqC message (framing, data[3])
-
- - .. row 2
-
- - uint8_t
-
- - msg_len
-
- - Length of the DiSEqC message. Valid values are 0 to 4, where 0
- means no msg
-
- - .. row 3
-
- - int
-
- - timeout
-
- - Return from ioctl after timeout ms with errorcode when no message
- was received
-
+The received message is stored at the buffer pointed by ``argp``.
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst b/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst
index 75116f283faf..78476c1c7bf5 100644
--- a/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst
+++ b/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst
@@ -31,12 +31,16 @@ Description
If the bus has been automatically powered off due to power overload,
this ioctl call restores the power to the bus. The call requires
read/write access to the device. This call has no effect if the device
-is manually powered off. Not all DVB adapters support this ioctl.
+is manually powered off. Not all Digital TV adapters support this ioctl.
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst b/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst
index e962f6ec5aaf..a7e05914efae 100644
--- a/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst
+++ b/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst
@@ -26,7 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <frontend_f_open>`.
``tone``
- an integer enumered value described at :c:type:`fe_sec_mini_cmd`
+ An integer enumered value described at :c:type:`fe_sec_mini_cmd`.
Description
@@ -39,39 +39,14 @@ read/write permissions.
It provides support for what's specified at
`Digital Satellite Equipment Control (DiSEqC) - Simple "ToneBurst" Detection Circuit specification. <http://www.eutelsat.com/files/contributed/satellites/pdf/Diseqc/associated%20docs/simple_tone_burst_detec.pdf>`__
-.. c:type:: fe_sec_mini_cmd
-
-.. flat-table:: enum fe_sec_mini_cmd
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _SEC-MINI-A:
-
- ``SEC_MINI_A``
-
- - Sends a mini-DiSEqC 22kHz '0' Tone Burst to select satellite-A
-
- - .. row 3
-
- - .. _SEC-MINI-B:
-
- ``SEC_MINI_B``
-
- - Sends a mini-DiSEqC 22kHz '1' Data Burst to select satellite-B
-
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst b/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst
index bbcab3df39b5..6bd3994edfc2 100644
--- a/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst
+++ b/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst
@@ -33,39 +33,17 @@ Arguments
Description
===========
-Sends a DiSEqC command to the antenna subsystem.
-
-
-.. c:type:: dvb_diseqc_master_cmd
-
-.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
-
-.. flat-table:: struct dvb_diseqc_master_cmd
- :header-rows: 0
- :stub-columns: 0
- :widths: 1 1 2
-
-
- - .. row 1
-
- - uint8_t
-
- - msg[6]
-
- - DiSEqC message (framing, address, command, data[3])
-
- - .. row 2
-
- - uint8_t
-
- - msg_len
-
- - Length of the DiSEqC message. Valid values are 3 to 6
+Sends the DiSEqC command pointed by :c:type:`dvb_diseqc_master_cmd`
+to the antenna subsystem.
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst b/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst
index f41371f12456..dcf2d20d460f 100644
--- a/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst
+++ b/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst
@@ -46,6 +46,10 @@ dishes were already legacy in 2004.
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst b/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst
index bacafbc462d2..b20cb360fe37 100644
--- a/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst
+++ b/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst
@@ -45,6 +45,10 @@ voltages.
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-get-event.rst b/Documentation/media/uapi/dvb/fe-get-event.rst
index 8a719c33073d..505db94bf183 100644
--- a/Documentation/media/uapi/dvb/fe-get-event.rst
+++ b/Documentation/media/uapi/dvb/fe-get-event.rst
@@ -44,10 +44,10 @@ an event becomes available.
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
+On success 0 is returned.
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
.. flat-table::
@@ -66,3 +66,6 @@ appropriately. The generic error codes are described at the
- ``EOVERFLOW``
- Overflow in event queue - one or more events were lost.
+
+Generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-get-frontend.rst b/Documentation/media/uapi/dvb/fe-get-frontend.rst
index d53a3f8237c3..5db552cedd70 100644
--- a/Documentation/media/uapi/dvb/fe-get-frontend.rst
+++ b/Documentation/media/uapi/dvb/fe-get-frontend.rst
@@ -42,11 +42,10 @@ this command, read-only access to the device is sufficient.
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
-
+On success 0 is returned.
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
.. flat-table::
:header-rows: 0
@@ -58,3 +57,6 @@ appropriately. The generic error codes are described at the
- ``EINVAL``
- Maximum supported symbol rate reached.
+
+Generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-get-info.rst b/Documentation/media/uapi/dvb/fe-get-info.rst
index e3d64b251f61..49307c0abfee 100644
--- a/Documentation/media/uapi/dvb/fe-get-info.rst
+++ b/Documentation/media/uapi/dvb/fe-get-info.rst
@@ -9,7 +9,8 @@ ioctl FE_GET_INFO
Name
====
-FE_GET_INFO - Query DVB frontend capabilities and returns information about the - front-end. This call only requires read-only access to the device
+FE_GET_INFO - Query Digital TV frontend capabilities and returns information
+about the - front-end. This call only requires read-only access to the device.
Synopsis
@@ -33,119 +34,13 @@ Arguments
Description
===========
-All DVB frontend devices support the ``FE_GET_INFO`` ioctl. It is used
-to identify kernel devices compatible with this specification and to
+All Digital TV frontend devices support the :ref:`FE_GET_INFO` ioctl. It is
+used to identify kernel devices compatible with this specification and to
obtain information about driver and hardware capabilities. The ioctl
takes a pointer to dvb_frontend_info which is filled by the driver.
When the driver is not compatible with this specification the ioctl
returns an error.
-.. c:type:: dvb_frontend_info
-
-.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
-
-.. flat-table:: struct dvb_frontend_info
- :header-rows: 0
- :stub-columns: 0
- :widths: 1 1 2
-
-
- - .. row 1
-
- - char
-
- - name[128]
-
- - Name of the frontend
-
- - .. row 2
-
- - fe_type_t
-
- - type
-
- - **DEPRECATED**. DVBv3 type. Should not be used on modern programs,
- as a frontend may have more than one type. So, the DVBv5 API
- should be used instead to enumerate and select the frontend type.
-
- - .. row 3
-
- - uint32_t
-
- - frequency_min
-
- - Minimal frequency supported by the frontend
-
- - .. row 4
-
- - uint32_t
-
- - frequency_max
-
- - Maximal frequency supported by the frontend
-
- - .. row 5
-
- - uint32_t
-
- - frequency_stepsize
-
- - Frequency step - all frequencies are multiple of this value
-
- - .. row 6
-
- - uint32_t
-
- - frequency_tolerance
-
- - Tolerance of the frequency
-
- - .. row 7
-
- - uint32_t
-
- - symbol_rate_min
-
- - Minimal symbol rate (for Cable/Satellite systems), in bauds
-
- - .. row 8
-
- - uint32_t
-
- - symbol_rate_max
-
- - Maximal symbol rate (for Cable/Satellite systems), in bauds
-
- - .. row 9
-
- - uint32_t
-
- - symbol_rate_tolerance
-
- - Maximal symbol rate tolerance, in ppm
-
- - .. row 10
-
- - uint32_t
-
- - notifier_delay
-
- - **DEPRECATED**. Not used by any driver.
-
- - .. row 11
-
- - enum :c:type:`fe_caps`
-
- - caps
-
- - Capabilities supported by the frontend
-
-
-.. note::
-
- The frequencies are specified in Hz for Terrestrial and Cable
- systems. They're specified in kHz for Satellite systems
-
frontend capabilities
=====================
@@ -153,274 +48,16 @@ frontend capabilities
Capabilities describe what a frontend can do. Some capabilities are
supported only on some specific frontend types.
-.. c:type:: fe_caps
-
-.. tabularcolumns:: |p{6.5cm}|p{11.0cm}|
-
-.. flat-table:: enum fe_caps
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _FE-IS-STUPID:
-
- ``FE_IS_STUPID``
-
- - There's something wrong at the frontend, and it can't report its
- capabilities
-
- - .. row 3
-
- - .. _FE-CAN-INVERSION-AUTO:
-
- ``FE_CAN_INVERSION_AUTO``
-
- - The frontend is capable of auto-detecting inversion
-
- - .. row 4
-
- - .. _FE-CAN-FEC-1-2:
-
- ``FE_CAN_FEC_1_2``
-
- - The frontend supports FEC 1/2
-
- - .. row 5
-
- - .. _FE-CAN-FEC-2-3:
-
- ``FE_CAN_FEC_2_3``
-
- - The frontend supports FEC 2/3
-
- - .. row 6
-
- - .. _FE-CAN-FEC-3-4:
-
- ``FE_CAN_FEC_3_4``
-
- - The frontend supports FEC 3/4
-
- - .. row 7
-
- - .. _FE-CAN-FEC-4-5:
-
- ``FE_CAN_FEC_4_5``
-
- - The frontend supports FEC 4/5
-
- - .. row 8
-
- - .. _FE-CAN-FEC-5-6:
-
- ``FE_CAN_FEC_5_6``
-
- - The frontend supports FEC 5/6
-
- - .. row 9
-
- - .. _FE-CAN-FEC-6-7:
-
- ``FE_CAN_FEC_6_7``
-
- - The frontend supports FEC 6/7
-
- - .. row 10
-
- - .. _FE-CAN-FEC-7-8:
-
- ``FE_CAN_FEC_7_8``
-
- - The frontend supports FEC 7/8
-
- - .. row 11
-
- - .. _FE-CAN-FEC-8-9:
-
- ``FE_CAN_FEC_8_9``
-
- - The frontend supports FEC 8/9
-
- - .. row 12
-
- - .. _FE-CAN-FEC-AUTO:
-
- ``FE_CAN_FEC_AUTO``
-
- - The frontend can autodetect FEC.
-
- - .. row 13
-
- - .. _FE-CAN-QPSK:
-
- ``FE_CAN_QPSK``
-
- - The frontend supports QPSK modulation
-
- - .. row 14
-
- - .. _FE-CAN-QAM-16:
-
- ``FE_CAN_QAM_16``
-
- - The frontend supports 16-QAM modulation
-
- - .. row 15
-
- - .. _FE-CAN-QAM-32:
-
- ``FE_CAN_QAM_32``
-
- - The frontend supports 32-QAM modulation
-
- - .. row 16
-
- - .. _FE-CAN-QAM-64:
-
- ``FE_CAN_QAM_64``
-
- - The frontend supports 64-QAM modulation
-
- - .. row 17
-
- - .. _FE-CAN-QAM-128:
-
- ``FE_CAN_QAM_128``
-
- - The frontend supports 128-QAM modulation
-
- - .. row 18
-
- - .. _FE-CAN-QAM-256:
-
- ``FE_CAN_QAM_256``
-
- - The frontend supports 256-QAM modulation
-
- - .. row 19
-
- - .. _FE-CAN-QAM-AUTO:
-
- ``FE_CAN_QAM_AUTO``
-
- - The frontend can autodetect modulation
-
- - .. row 20
-
- - .. _FE-CAN-TRANSMISSION-MODE-AUTO:
-
- ``FE_CAN_TRANSMISSION_MODE_AUTO``
-
- - The frontend can autodetect the transmission mode
-
- - .. row 21
-
- - .. _FE-CAN-BANDWIDTH-AUTO:
-
- ``FE_CAN_BANDWIDTH_AUTO``
-
- - The frontend can autodetect the bandwidth
-
- - .. row 22
-
- - .. _FE-CAN-GUARD-INTERVAL-AUTO:
-
- ``FE_CAN_GUARD_INTERVAL_AUTO``
-
- - The frontend can autodetect the guard interval
-
- - .. row 23
-
- - .. _FE-CAN-HIERARCHY-AUTO:
-
- ``FE_CAN_HIERARCHY_AUTO``
-
- - The frontend can autodetect hierarch
-
- - .. row 24
-
- - .. _FE-CAN-8VSB:
-
- ``FE_CAN_8VSB``
-
- - The frontend supports 8-VSB modulation
-
- - .. row 25
-
- - .. _FE-CAN-16VSB:
-
- ``FE_CAN_16VSB``
-
- - The frontend supports 16-VSB modulation
-
- - .. row 26
-
- - .. _FE-HAS-EXTENDED-CAPS:
-
- ``FE_HAS_EXTENDED_CAPS``
-
- - Currently, unused
-
- - .. row 27
-
- - .. _FE-CAN-MULTISTREAM:
-
- ``FE_CAN_MULTISTREAM``
-
- - The frontend supports multistream filtering
-
- - .. row 28
-
- - .. _FE-CAN-TURBO-FEC:
-
- ``FE_CAN_TURBO_FEC``
-
- - The frontend supports turbo FEC modulation
-
- - .. row 29
-
- - .. _FE-CAN-2G-MODULATION:
-
- ``FE_CAN_2G_MODULATION``
-
- - The frontend supports "2nd generation modulation" (DVB-S2/T2)>
-
- - .. row 30
-
- - .. _FE-NEEDS-BENDING:
-
- ``FE_NEEDS_BENDING``
-
- - Not supported anymore, don't use it
-
- - .. row 31
-
- - .. _FE-CAN-RECOVER:
-
- ``FE_CAN_RECOVER``
-
- - The frontend can recover from a cable unplug automatically
-
- - .. row 32
-
- - .. _FE-CAN-MUTE-TS:
-
- ``FE_CAN_MUTE_TS``
-
- - The frontend can stop spurious TS data output
+The frontend capabilities are described at :c:type:`fe_caps`.
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-get-property.rst b/Documentation/media/uapi/dvb/fe-get-property.rst
index 015d4db597b5..948d2ba84f2c 100644
--- a/Documentation/media/uapi/dvb/fe-get-property.rst
+++ b/Documentation/media/uapi/dvb/fe-get-property.rst
@@ -29,13 +29,13 @@ Arguments
File descriptor returned by :ref:`open() <frontend_f_open>`.
``argp``
- pointer to struct :c:type:`dtv_properties`
+ Pointer to struct :c:type:`dtv_properties`.
Description
===========
-All DVB frontend devices support the ``FE_SET_PROPERTY`` and
+All Digital TV frontend devices support the ``FE_SET_PROPERTY`` and
``FE_GET_PROPERTY`` ioctls. The supported properties and statistics
depends on the delivery system and on the device:
@@ -64,6 +64,10 @@ depends on the delivery system and on the device:
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-read-ber.rst b/Documentation/media/uapi/dvb/fe-read-ber.rst
index e54972ad5250..1e6a79567a4c 100644
--- a/Documentation/media/uapi/dvb/fe-read-ber.rst
+++ b/Documentation/media/uapi/dvb/fe-read-ber.rst
@@ -41,6 +41,10 @@ access to the device is sufficient.
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-read-signal-strength.rst b/Documentation/media/uapi/dvb/fe-read-signal-strength.rst
index 4b13c4757744..198f6dfb53a1 100644
--- a/Documentation/media/uapi/dvb/fe-read-signal-strength.rst
+++ b/Documentation/media/uapi/dvb/fe-read-signal-strength.rst
@@ -41,6 +41,10 @@ to the device is sufficient.
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-read-snr.rst b/Documentation/media/uapi/dvb/fe-read-snr.rst
index 2aed487f5c99..6db22c043512 100644
--- a/Documentation/media/uapi/dvb/fe-read-snr.rst
+++ b/Documentation/media/uapi/dvb/fe-read-snr.rst
@@ -41,6 +41,10 @@ to the device is sufficient.
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-read-status.rst b/Documentation/media/uapi/dvb/fe-read-status.rst
index 812f086c20f5..4adb52f084ff 100644
--- a/Documentation/media/uapi/dvb/fe-read-status.rst
+++ b/Documentation/media/uapi/dvb/fe-read-status.rst
@@ -33,7 +33,7 @@ Arguments
Description
===========
-All DVB frontend devices support the ``FE_READ_STATUS`` ioctl. It is
+All Digital TV frontend devices support the ``FE_READ_STATUS`` ioctl. It is
used to check about the locking status of the frontend after being
tuned. The ioctl takes a pointer to an integer where the status will be
written.
@@ -52,85 +52,14 @@ The fe_status parameter is used to indicate the current state and/or
state changes of the frontend hardware. It is produced using the enum
:c:type:`fe_status` values on a bitmask
-.. c:type:: fe_status
-
-.. tabularcolumns:: |p{3.5cm}|p{14.0cm}|
-
-.. _fe-status:
-
-.. flat-table:: enum fe_status
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _FE-HAS-SIGNAL:
-
- ``FE_HAS_SIGNAL``
-
- - The frontend has found something above the noise level
-
- - .. row 3
-
- - .. _FE-HAS-CARRIER:
-
- ``FE_HAS_CARRIER``
-
- - The frontend has found a DVB signal
-
- - .. row 4
-
- - .. _FE-HAS-VITERBI:
-
- ``FE_HAS_VITERBI``
-
- - The frontend FEC inner coding (Viterbi, LDPC or other inner code)
- is stable
-
- - .. row 5
-
- - .. _FE-HAS-SYNC:
-
- ``FE_HAS_SYNC``
-
- - Synchronization bytes was found
-
- - .. row 6
-
- - .. _FE-HAS-LOCK:
-
- ``FE_HAS_LOCK``
-
- - The DVB were locked and everything is working
-
- - .. row 7
-
- - .. _FE-TIMEDOUT:
-
- ``FE_TIMEDOUT``
-
- - no lock within the last about 2 seconds
-
- - .. row 8
-
- - .. _FE-REINIT:
-
- ``FE_REINIT``
-
- - The frontend was reinitialized, application is recommended to
- reset DiSEqC, tone and parameters
-
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst b/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst
index 46687c123402..f2c688bcacb3 100644
--- a/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst
+++ b/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst
@@ -43,6 +43,10 @@ sufficient.
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst b/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst
index 1d5878da2f41..3c4bc179b313 100644
--- a/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst
+++ b/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst
@@ -30,7 +30,7 @@ Arguments
- 0 - normal tune mode
- - FE_TUNE_MODE_ONESHOT - When set, this flag will disable any
+ - ``FE_TUNE_MODE_ONESHOT`` - When set, this flag will disable any
zigzagging or other "normal" tuning behaviour. Additionally,
there will be no automatic monitoring of the lock status, and
hence no frontend events will be generated. If a frontend device
@@ -42,12 +42,16 @@ Description
===========
Allow setting tuner mode flags to the frontend, between 0 (normal) or
-FE_TUNE_MODE_ONESHOT mode
+``FE_TUNE_MODE_ONESHOT`` mode
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-set-frontend.rst b/Documentation/media/uapi/dvb/fe-set-frontend.rst
index 7f97dce9aee6..4f3dcf338254 100644
--- a/Documentation/media/uapi/dvb/fe-set-frontend.rst
+++ b/Documentation/media/uapi/dvb/fe-set-frontend.rst
@@ -48,17 +48,24 @@ requires read/write access to the device.
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
-:ref:`Generic Error Codes <gen-errors>` chapter.
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
.. flat-table::
:header-rows: 0
:stub-columns: 0
-
+ :widths: 1 16
- .. row 1
- ``EINVAL``
- Maximum supported symbol rate reached.
+
+
+Generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-set-tone.rst b/Documentation/media/uapi/dvb/fe-set-tone.rst
index 84e4da3fd4c9..758efa11014c 100644
--- a/Documentation/media/uapi/dvb/fe-set-tone.rst
+++ b/Documentation/media/uapi/dvb/fe-set-tone.rst
@@ -45,40 +45,14 @@ this is done using the DiSEqC ioctls.
capability of selecting the band. So, it is recommended that applications
would change to SEC_TONE_OFF when the device is not used.
-.. c:type:: fe_sec_tone_mode
-
-.. flat-table:: enum fe_sec_tone_mode
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _SEC-TONE-ON:
-
- ``SEC_TONE_ON``
-
- - Sends a 22kHz tone burst to the antenna
-
- - .. row 3
-
- - .. _SEC-TONE-OFF:
-
- ``SEC_TONE_OFF``
-
- - Don't send a 22kHz tone to the antenna (except if the
- FE_DISEQC_* ioctls are called)
-
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-set-voltage.rst b/Documentation/media/uapi/dvb/fe-set-voltage.rst
index 052f316bb4a3..38d4485290a0 100644
--- a/Documentation/media/uapi/dvb/fe-set-voltage.rst
+++ b/Documentation/media/uapi/dvb/fe-set-voltage.rst
@@ -53,6 +53,10 @@ power up the LNBf.
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/fe-type-t.rst b/Documentation/media/uapi/dvb/fe-type-t.rst
index 548b965188d0..dee32ae104d7 100644
--- a/Documentation/media/uapi/dvb/fe-type-t.rst
+++ b/Documentation/media/uapi/dvb/fe-type-t.rst
@@ -78,7 +78,7 @@ parameter.
In the old days, struct :c:type:`dvb_frontend_info`
used to contain ``fe_type_t`` field to indicate the delivery systems,
-filled with either FE_QPSK, FE_QAM, FE_OFDM or FE_ATSC. While this
+filled with either ``FE_QPSK, FE_QAM, FE_OFDM`` or ``FE_ATSC``. While this
is still filled to keep backward compatibility, the usage of this field
is deprecated, as it can report just one delivery system, but some
devices support multiple delivery systems. Please use
diff --git a/Documentation/media/uapi/dvb/fe_property_parameters.rst b/Documentation/media/uapi/dvb/fe_property_parameters.rst
index 7bb7559c4500..6eef507fea50 100644
--- a/Documentation/media/uapi/dvb/fe_property_parameters.rst
+++ b/Documentation/media/uapi/dvb/fe_property_parameters.rst
@@ -6,6 +6,11 @@
Digital TV property parameters
******************************
+There are several different Digital TV parameters that can be used by
+:ref:`FE_SET_PROPERTY and FE_GET_PROPERTY ioctls<FE_GET_PROPERTY>`.
+This section describes each of them. Please notice, however, that only
+a subset of them are needed to setup a frontend.
+
.. _DTV-UNDEFINED:
@@ -67,144 +72,36 @@ DTV_MODULATION
==============
Specifies the frontend modulation type for delivery systems that
-supports more than one modulation type. The modulation can be one of the
-types defined by enum :c:type:`fe_modulation`.
-
-
-.. c:type:: fe_modulation
-
-Modulation property
--------------------
-
-Most of the digital TV standards currently offers more than one possible
-modulation (sometimes called as "constellation" on some standards). This
-enum contains the values used by the Kernel. Please note that not all
-modulations are supported by a given standard.
-
-
-.. flat-table:: enum fe_modulation
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _QPSK:
-
- ``QPSK``
-
- - QPSK modulation
-
- - .. row 3
-
- - .. _QAM-16:
-
- ``QAM_16``
-
- - 16-QAM modulation
-
- - .. row 4
-
- - .. _QAM-32:
-
- ``QAM_32``
-
- - 32-QAM modulation
-
- - .. row 5
-
- - .. _QAM-64:
-
- ``QAM_64``
-
- - 64-QAM modulation
-
- - .. row 6
-
- - .. _QAM-128:
-
- ``QAM_128``
-
- - 128-QAM modulation
-
- - .. row 7
-
- - .. _QAM-256:
-
- ``QAM_256``
-
- - 256-QAM modulation
-
- - .. row 8
-
- - .. _QAM-AUTO:
+supports more multiple modulations.
+
+The modulation can be one of the types defined by enum :c:type:`fe_modulation`.
+
+Most of the digital TV standards offers more than one possible
+modulation type.
+
+The table below presents a summary of the types of modulation types
+supported by each delivery system, as currently defined by specs.
+
+======================= =======================================================
+Standard Modulation types
+======================= =======================================================
+ATSC (version 1) 8-VSB and 16-VSB.
+DMTB 4-QAM, 16-QAM, 32-QAM, 64-QAM and 4-QAM-NR.
+DVB-C Annex A/C 16-QAM, 32-QAM, 64-QAM and 256-QAM.
+DVB-C Annex B 64-QAM.
+DVB-T QPSK, 16-QAM and 64-QAM.
+DVB-T2 QPSK, 16-QAM, 64-QAM and 256-QAM.
+DVB-S No need to set. It supports only QPSK.
+DVB-S2 QPSK, 8-PSK, 16-APSK and 32-APSK.
+ISDB-T QPSK, DQPSK, 16-QAM and 64-QAM.
+ISDB-S 8-PSK, QPSK and BPSK.
+======================= =======================================================
- ``QAM_AUTO``
-
- - Autodetect QAM modulation
-
- - .. row 9
-
- - .. _VSB-8:
-
- ``VSB_8``
-
- - 8-VSB modulation
-
- - .. row 10
-
- - .. _VSB-16:
-
- ``VSB_16``
-
- - 16-VSB modulation
-
- - .. row 11
-
- - .. _PSK-8:
-
- ``PSK_8``
-
- - 8-PSK modulation
-
- - .. row 12
-
- - .. _APSK-16:
-
- ``APSK_16``
-
- - 16-APSK modulation
-
- - .. row 13
-
- - .. _APSK-32:
-
- ``APSK_32``
-
- - 32-APSK modulation
-
- - .. row 14
-
- - .. _DQPSK:
-
- ``DQPSK``
-
- - DQPSK modulation
-
- - .. row 15
-
- - .. _QAM-4-NR:
-
- ``QAM_4_NR``
-
- - 4-QAM-NR modulation
+.. note::
+ Please notice that some of the above modulation types may not be
+ defined currently at the Kernel. The reason is simple: no driver
+ needed such definition yet.
.. _DTV-BANDWIDTH-HZ:
@@ -214,32 +111,42 @@ DTV_BANDWIDTH_HZ
Bandwidth for the channel, in HZ.
+Should be set only for terrestrial delivery systems.
+
Possible values: ``1712000``, ``5000000``, ``6000000``, ``7000000``,
``8000000``, ``10000000``.
-.. note::
+======================= =======================================================
+Terrestrial Standard Possible values for bandwidth
+======================= =======================================================
+ATSC (version 1) No need to set. It is always 6MHz.
+DMTB No need to set. It is always 8MHz.
+DVB-T 6MHz, 7MHz and 8MHz.
+DVB-T2 1.172 MHz, 5MHz, 6MHz, 7MHz, 8MHz and 10MHz
+ISDB-T 5MHz, 6MHz, 7MHz and 8MHz, although most places
+ use 6MHz.
+======================= =======================================================
- #. DVB-T supports 6, 7 and 8MHz.
- #. DVB-T2 supports 1.172, 5, 6, 7, 8 and 10MHz.
+.. note::
- #. ISDB-T supports 5MHz, 6MHz, 7MHz and 8MHz, although most
- places use 6MHz.
- #. On DVB-C and DVB-S/S2, the bandwidth depends on the symbol rate.
- So, the Kernel will silently ignore setting :ref:`DTV-BANDWIDTH-HZ`.
+ #. For ISDB-Tsb, the bandwidth can vary depending on the number of
+ connected segments.
- #. For DVB-C and DVB-S/S2, the Kernel will return an estimation of the
- bandwidth, calculated from :ref:`DTV-SYMBOL-RATE` and from
- the rolloff, with is fixed for DVB-C and DVB-S.
+ It can be easily derived from other parameters
+ (DTV_ISDBT_SB_SEGMENT_IDX, DTV_ISDBT_SB_SEGMENT_COUNT).
- #. For DVB-S2, the bandwidth estimation will use :ref:`DTV-ROLLOFF`.
+ #. On Satellite and Cable delivery systems, the bandwidth depends on
+ the symbol rate. So, the Kernel will silently ignore any setting
+ :ref:`DTV-BANDWIDTH-HZ`. I will however fill it back with a
+ bandwidth estimation.
- #. For ISDB-Tsb, it can vary depending on the number of connected
- segments.
+ Such bandwidth estimation takes into account the symbol rate set with
+ :ref:`DTV-SYMBOL-RATE`, and the rolloff factor, with is fixed for
+ DVB-C and DVB-S.
- #. Bandwidth in ISDB-Tsb can be easily derived from other parameters
- (DTV_ISDBT_SB_SEGMENT_IDX, DTV_ISDBT_SB_SEGMENT_COUNT).
+ For DVB-S2, the rolloff should also be set via :ref:`DTV-ROLLOFF`.
.. _DTV-INVERSION:
@@ -249,53 +156,7 @@ DTV_INVERSION
Specifies if the frontend should do spectral inversion or not.
-.. c:type:: fe_spectral_inversion
-
-enum fe_modulation: Frontend spectral inversion
------------------------------------------------
-
-This parameter indicates if spectral inversion should be presumed or
-not. In the automatic setting (``INVERSION_AUTO``) the hardware will try
-to figure out the correct setting by itself. If the hardware doesn't
-support, the DVB core will try to lock at the carrier first with
-inversion off. If it fails, it will try to enable inversion.
-
-
-.. flat-table:: enum fe_modulation
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _INVERSION-OFF:
-
- ``INVERSION_OFF``
-
- - Don't do spectral band inversion.
-
- - .. row 3
-
- - .. _INVERSION-ON:
-
- ``INVERSION_ON``
-
- - Do spectral band inversion.
-
- - .. row 4
-
- - .. _INVERSION-AUTO:
-
- ``INVERSION_AUTO``
-
- - Autodetect spectral band inversion.
-
+The acceptable values are defined by :c:type:`fe_spectral_inversion`.
.. _DTV-DISEQC-MASTER:
@@ -311,8 +172,9 @@ Currently not implemented.
DTV_SYMBOL_RATE
===============
-Digital TV symbol rate, in bauds (symbols/second). Used on cable
-standards.
+Used on cable and satellite delivery systems.
+
+Digital TV symbol rate, in bauds (symbols/second).
.. _DTV-INNER-FEC:
@@ -320,128 +182,9 @@ standards.
DTV_INNER_FEC
=============
-Used cable/satellite transmissions. The acceptable values are:
-
-.. c:type:: fe_code_rate
-
-enum fe_code_rate: type of the Forward Error Correction.
---------------------------------------------------------
-
-.. flat-table:: enum fe_code_rate
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _FEC-NONE:
-
- ``FEC_NONE``
-
- - No Forward Error Correction Code
-
- - .. row 3
-
- - .. _FEC-AUTO:
-
- ``FEC_AUTO``
-
- - Autodetect Error Correction Code
-
- - .. row 4
-
- - .. _FEC-1-2:
-
- ``FEC_1_2``
-
- - Forward Error Correction Code 1/2
-
- - .. row 5
-
- - .. _FEC-2-3:
-
- ``FEC_2_3``
-
- - Forward Error Correction Code 2/3
-
- - .. row 6
-
- - .. _FEC-3-4:
-
- ``FEC_3_4``
-
- - Forward Error Correction Code 3/4
-
- - .. row 7
-
- - .. _FEC-4-5:
-
- ``FEC_4_5``
-
- - Forward Error Correction Code 4/5
-
- - .. row 8
-
- - .. _FEC-5-6:
-
- ``FEC_5_6``
-
- - Forward Error Correction Code 5/6
-
- - .. row 9
-
- - .. _FEC-6-7:
-
- ``FEC_6_7``
-
- - Forward Error Correction Code 6/7
-
- - .. row 10
-
- - .. _FEC-7-8:
-
- ``FEC_7_8``
-
- - Forward Error Correction Code 7/8
-
- - .. row 11
-
- - .. _FEC-8-9:
-
- ``FEC_8_9``
-
- - Forward Error Correction Code 8/9
-
- - .. row 12
-
- - .. _FEC-9-10:
-
- ``FEC_9_10``
-
- - Forward Error Correction Code 9/10
-
- - .. row 13
-
- - .. _FEC-2-5:
-
- ``FEC_2_5``
-
- - Forward Error Correction Code 2/5
-
- - .. row 14
-
- - .. _FEC-3-5:
-
- ``FEC_3_5``
-
- - Forward Error Correction Code 3/5
+Used on cable and satellite delivery systems.
+The acceptable values are defined by :c:type:`fe_code_rate`.
.. _DTV-VOLTAGE:
@@ -449,49 +192,14 @@ enum fe_code_rate: type of the Forward Error Correction.
DTV_VOLTAGE
===========
+Used on satellite delivery systems.
+
The voltage is usually used with non-DiSEqC capable LNBs to switch the
polarzation (horizontal/vertical). When using DiSEqC epuipment this
voltage has to be switched consistently to the DiSEqC commands as
described in the DiSEqC spec.
-
-.. c:type:: fe_sec_voltage
-
-.. flat-table:: enum fe_sec_voltage
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _SEC-VOLTAGE-13:
-
- ``SEC_VOLTAGE_13``
-
- - Set DC voltage level to 13V
-
- - .. row 3
-
- - .. _SEC-VOLTAGE-18:
-
- ``SEC_VOLTAGE_18``
-
- - Set DC voltage level to 18V
-
- - .. row 4
-
- - .. _SEC-VOLTAGE-OFF:
-
- ``SEC_VOLTAGE_OFF``
-
- - Don't send any voltage to the antenna
-
+The acceptable values are defined by :c:type:`fe_sec_voltage`.
.. _DTV-TONE:
@@ -507,50 +215,11 @@ Currently not used.
DTV_PILOT
=========
-Sets DVB-S2 pilot
-
-
-.. c:type:: fe_pilot
-
-fe_pilot type
--------------
-
-
-.. flat-table:: enum fe_pilot
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _PILOT-ON:
-
- ``PILOT_ON``
-
- - Pilot tones enabled
-
- - .. row 3
-
- - .. _PILOT-OFF:
-
- ``PILOT_OFF``
+Used on DVB-S2.
- - Pilot tones disabled
-
- - .. row 4
-
- - .. _PILOT-AUTO:
-
- ``PILOT_AUTO``
-
- - Autodetect pilot tones
+Sets DVB-S2 pilot.
+The acceptable values are defined by :c:type:`fe_pilot`.
.. _DTV-ROLLOFF:
@@ -558,58 +227,11 @@ fe_pilot type
DTV_ROLLOFF
===========
-Sets DVB-S2 rolloff
-
-
-.. c:type:: fe_rolloff
-
-fe_rolloff type
----------------
-
-
-.. flat-table:: enum fe_rolloff
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _ROLLOFF-35:
-
- ``ROLLOFF_35``
+Used on DVB-S2.
- - Roloff factor: α=35%
-
- - .. row 3
-
- - .. _ROLLOFF-20:
-
- ``ROLLOFF_20``
-
- - Roloff factor: α=20%
-
- - .. row 4
-
- - .. _ROLLOFF-25:
-
- ``ROLLOFF_25``
-
- - Roloff factor: α=25%
-
- - .. row 5
-
- - .. _ROLLOFF-AUTO:
-
- ``ROLLOFF_AUTO``
-
- - Auto-detect the roloff factor.
+Sets DVB-S2 rolloff.
+The acceptable values are defined by :c:type:`fe_rolloff`.
.. _DTV-DISEQC-SLAVE-REPLY:
@@ -641,180 +263,9 @@ Currently not implemented.
DTV_DELIVERY_SYSTEM
===================
-Specifies the type of Delivery system
-
-
-.. c:type:: fe_delivery_system
-
-fe_delivery_system type
------------------------
-
-Possible values:
-
-
-.. flat-table:: enum fe_delivery_system
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _SYS-UNDEFINED:
-
- ``SYS_UNDEFINED``
-
- - Undefined standard. Generally, indicates an error
-
- - .. row 3
-
- - .. _SYS-DVBC-ANNEX-A:
-
- ``SYS_DVBC_ANNEX_A``
-
- - Cable TV: DVB-C following ITU-T J.83 Annex A spec
-
- - .. row 4
-
- - .. _SYS-DVBC-ANNEX-B:
-
- ``SYS_DVBC_ANNEX_B``
-
- - Cable TV: DVB-C following ITU-T J.83 Annex B spec (ClearQAM)
-
- - .. row 5
-
- - .. _SYS-DVBC-ANNEX-C:
-
- ``SYS_DVBC_ANNEX_C``
-
- - Cable TV: DVB-C following ITU-T J.83 Annex C spec
-
- - .. row 6
-
- - .. _SYS-ISDBC:
-
- ``SYS_ISDBC``
-
- - Cable TV: ISDB-C (no drivers yet)
-
- - .. row 7
-
- - .. _SYS-DVBT:
-
- ``SYS_DVBT``
-
- - Terrestral TV: DVB-T
-
- - .. row 8
-
- - .. _SYS-DVBT2:
-
- ``SYS_DVBT2``
-
- - Terrestral TV: DVB-T2
-
- - .. row 9
-
- - .. _SYS-ISDBT:
-
- ``SYS_ISDBT``
-
- - Terrestral TV: ISDB-T
-
- - .. row 10
-
- - .. _SYS-ATSC:
-
- ``SYS_ATSC``
-
- - Terrestral TV: ATSC
-
- - .. row 11
-
- - .. _SYS-ATSCMH:
-
- ``SYS_ATSCMH``
-
- - Terrestral TV (mobile): ATSC-M/H
-
- - .. row 12
-
- - .. _SYS-DTMB:
-
- ``SYS_DTMB``
-
- - Terrestrial TV: DTMB
-
- - .. row 13
-
- - .. _SYS-DVBS:
-
- ``SYS_DVBS``
-
- - Satellite TV: DVB-S
-
- - .. row 14
-
- - .. _SYS-DVBS2:
-
- ``SYS_DVBS2``
-
- - Satellite TV: DVB-S2
-
- - .. row 15
-
- - .. _SYS-TURBO:
-
- ``SYS_TURBO``
-
- - Satellite TV: DVB-S Turbo
-
- - .. row 16
-
- - .. _SYS-ISDBS:
-
- ``SYS_ISDBS``
-
- - Satellite TV: ISDB-S
-
- - .. row 17
-
- - .. _SYS-DAB:
-
- ``SYS_DAB``
-
- - Digital audio: DAB (not fully supported)
-
- - .. row 18
-
- - .. _SYS-DSS:
-
- ``SYS_DSS``
-
- - Satellite TV:"DSS (not fully supported)
-
- - .. row 19
-
- - .. _SYS-CMMB:
-
- ``SYS_CMMB``
-
- - Terrestral TV (mobile):CMMB (not fully supported)
-
- - .. row 20
-
- - .. _SYS-DVBH:
-
- ``SYS_DVBH``
-
- - Terrestral TV (mobile): DVB-H (standard deprecated)
+Specifies the type of the delivery system.
+The acceptable values are defined by :c:type:`fe_delivery_system`.
.. _DTV-ISDBT-PARTIAL-RECEPTION:
@@ -822,6 +273,8 @@ Possible values:
DTV_ISDBT_PARTIAL_RECEPTION
===========================
+Used only on ISDB.
+
If ``DTV_ISDBT_SOUND_BROADCASTING`` is '0' this bit-field represents
whether the channel is in partial reception mode or not.
@@ -840,6 +293,8 @@ Possible values: 0, 1, -1 (AUTO)
DTV_ISDBT_SOUND_BROADCASTING
============================
+Used only on ISDB.
+
This field represents whether the other DTV_ISDBT_*-parameters are
referring to an ISDB-T and an ISDB-Tsb channel. (See also
``DTV_ISDBT_PARTIAL_RECEPTION``).
@@ -852,6 +307,8 @@ Possible values: 0, 1, -1 (AUTO)
DTV_ISDBT_SB_SUBCHANNEL_ID
==========================
+Used only on ISDB.
+
This field only applies if ``DTV_ISDBT_SOUND_BROADCASTING`` is '1'.
(Note of the author: This might not be the correct description of the
@@ -887,6 +344,8 @@ Possible values: 0 .. 41, -1 (AUTO)
DTV_ISDBT_SB_SEGMENT_IDX
========================
+Used only on ISDB.
+
This field only applies if ``DTV_ISDBT_SOUND_BROADCASTING`` is '1'.
``DTV_ISDBT_SB_SEGMENT_IDX`` gives the index of the segment to be
@@ -903,6 +362,8 @@ Note: This value cannot be determined by an automatic channel search.
DTV_ISDBT_SB_SEGMENT_COUNT
==========================
+Used only on ISDB.
+
This field only applies if ``DTV_ISDBT_SOUND_BROADCASTING`` is '1'.
``DTV_ISDBT_SB_SEGMENT_COUNT`` gives the total count of connected
@@ -918,6 +379,8 @@ Note: This value cannot be determined by an automatic channel search.
DTV-ISDBT-LAYER[A-C] parameters
===============================
+Used only on ISDB.
+
ISDB-T channels can be coded hierarchically. As opposed to DVB-T in
ISDB-T hierarchical layers can be decoded simultaneously. For that
reason a ISDB-T demodulator has 3 Viterbi and 3 Reed-Solomon decoders.
@@ -934,6 +397,8 @@ There are 3 parameter sets, for Layers A, B and C.
DTV_ISDBT_LAYER_ENABLED
-----------------------
+Used only on ISDB.
+
Hierarchical reception in ISDB-T is achieved by enabling or disabling
layers in the decoding process. Setting all bits of
``DTV_ISDBT_LAYER_ENABLED`` to '1' forces all layers (if applicable) to
@@ -964,7 +429,13 @@ Only the values of the first 3 bits are used. Other bits will be silently ignore
DTV_ISDBT_LAYER[A-C]_FEC
------------------------
-Possible values: ``FEC_AUTO``, ``FEC_1_2``, ``FEC_2_3``, ``FEC_3_4``,
+Used only on ISDB.
+
+The Forward Error Correction mechanism used by a given ISDB Layer, as
+defined by :c:type:`fe_code_rate`.
+
+
+Possible values are: ``FEC_AUTO``, ``FEC_1_2``, ``FEC_2_3``, ``FEC_3_4``,
``FEC_5_6``, ``FEC_7_8``
@@ -973,11 +444,19 @@ Possible values: ``FEC_AUTO``, ``FEC_1_2``, ``FEC_2_3``, ``FEC_3_4``,
DTV_ISDBT_LAYER[A-C]_MODULATION
-------------------------------
-Possible values: ``QAM_AUTO``, QP\ ``SK, QAM_16``, ``QAM_64``, ``DQPSK``
+Used only on ISDB.
+
+The modulation used by a given ISDB Layer, as defined by
+:c:type:`fe_modulation`.
-Note: If layer C is ``DQPSK`` layer B has to be ``DQPSK``. If layer B is
-``DQPSK`` and ``DTV_ISDBT_PARTIAL_RECEPTION``\ =0 layer has to be
-``DQPSK``.
+Possible values are: ``QAM_AUTO``, ``QPSK``, ``QAM_16``, ``QAM_64``, ``DQPSK``
+
+.. note::
+
+ #. If layer C is ``DQPSK``, then layer B has to be ``DQPSK``.
+
+ #. If layer B is ``DQPSK`` and ``DTV_ISDBT_PARTIAL_RECEPTION``\ = 0,
+ then layer has to be ``DQPSK``.
.. _DTV-ISDBT-LAYER-SEGMENT-COUNT:
@@ -985,6 +464,8 @@ Note: If layer C is ``DQPSK`` layer B has to be ``DQPSK``. If layer B is
DTV_ISDBT_LAYER[A-C]_SEGMENT_COUNT
----------------------------------
+Used only on ISDB.
+
Possible values: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -1 (AUTO)
Note: Truth table for ``DTV_ISDBT_SOUND_BROADCASTING`` and
@@ -993,15 +474,15 @@ Note: Truth table for ``DTV_ISDBT_SOUND_BROADCASTING`` and
.. _isdbt-layer_seg-cnt-table:
.. flat-table:: Truth table for ISDB-T Sound Broadcasting
- :header-rows: 0
+ :header-rows: 1
:stub-columns: 0
- .. row 1
- - PR
+ - Partial Reception
- - SB
+ - Sound Broadcasting
- Layer A width
@@ -1074,6 +555,8 @@ Note: Truth table for ``DTV_ISDBT_SOUND_BROADCASTING`` and
DTV_ISDBT_LAYER[A-C]_TIME_INTERLEAVING
--------------------------------------
+Used only on ISDB.
+
Valid values: 0, 1, 2, 4, -1 (AUTO)
when DTV_ISDBT_SOUND_BROADCASTING is active, value 8 is also valid.
@@ -1086,7 +569,7 @@ TMCC-structure, as shown in the table below.
.. c:type:: isdbt_layer_interleaving_table
.. flat-table:: ISDB-T time interleaving modes
- :header-rows: 0
+ :header-rows: 1
:stub-columns: 0
@@ -1147,6 +630,8 @@ TMCC-structure, as shown in the table below.
DTV_ATSCMH_FIC_VER
------------------
+Used only on ATSC-MH.
+
Version number of the FIC (Fast Information Channel) signaling data.
FIC is used for relaying information to allow rapid service acquisition
@@ -1160,6 +645,8 @@ Possible values: 0, 1, 2, 3, ..., 30, 31
DTV_ATSCMH_PARADE_ID
--------------------
+Used only on ATSC-MH.
+
Parade identification number
A parade is a collection of up to eight MH groups, conveying one or two
@@ -1173,6 +660,8 @@ Possible values: 0, 1, 2, 3, ..., 126, 127
DTV_ATSCMH_NOG
--------------
+Used only on ATSC-MH.
+
Number of MH groups per MH subframe for a designated parade.
Possible values: 1, 2, 3, 4, 5, 6, 7, 8
@@ -1183,6 +672,8 @@ Possible values: 1, 2, 3, 4, 5, 6, 7, 8
DTV_ATSCMH_TNOG
---------------
+Used only on ATSC-MH.
+
Total number of MH groups including all MH groups belonging to all MH
parades in one MH subframe.
@@ -1194,6 +685,8 @@ Possible values: 0, 1, 2, 3, ..., 30, 31
DTV_ATSCMH_SGN
--------------
+Used only on ATSC-MH.
+
Start group number.
Possible values: 0, 1, 2, 3, ..., 14, 15
@@ -1204,6 +697,8 @@ Possible values: 0, 1, 2, 3, ..., 14, 15
DTV_ATSCMH_PRC
--------------
+Used only on ATSC-MH.
+
Parade repetition cycle.
Possible values: 1, 2, 3, 4, 5, 6, 7, 8
@@ -1214,44 +709,11 @@ Possible values: 1, 2, 3, 4, 5, 6, 7, 8
DTV_ATSCMH_RS_FRAME_MODE
------------------------
-Reed Solomon (RS) frame mode.
-
-Possible values are:
-
-.. tabularcolumns:: |p{5.0cm}|p{12.5cm}|
-
-.. c:type:: atscmh_rs_frame_mode
-
-.. flat-table:: enum atscmh_rs_frame_mode
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _ATSCMH-RSFRAME-PRI-ONLY:
-
- ``ATSCMH_RSFRAME_PRI_ONLY``
-
- - Single Frame: There is only a primary RS Frame for all Group
- Regions.
-
- - .. row 3
-
- - .. _ATSCMH-RSFRAME-PRI-SEC:
-
- ``ATSCMH_RSFRAME_PRI_SEC``
+Used only on ATSC-MH.
- - Dual Frame: There are two separate RS Frames: Primary RS Frame for
- Group Region A and B and Secondary RS Frame for Group Region C and
- D.
+Reed Solomon (RS) frame mode.
+The acceptable values are defined by :c:type:`atscmh_rs_frame_mode`.
.. _DTV-ATSCMH-RS-FRAME-ENSEMBLE:
@@ -1259,48 +721,11 @@ Possible values are:
DTV_ATSCMH_RS_FRAME_ENSEMBLE
----------------------------
-Reed Solomon(RS) frame ensemble.
-
-Possible values are:
-
-
-.. c:type:: atscmh_rs_frame_ensemble
-
-.. flat-table:: enum atscmh_rs_frame_ensemble
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _ATSCMH-RSFRAME-ENS-PRI:
-
- ``ATSCMH_RSFRAME_ENS_PRI``
-
- - Primary Ensemble.
-
- - .. row 3
-
- - .. _ATSCMH-RSFRAME-ENS-SEC:
-
- ``AATSCMH_RSFRAME_PRI_SEC``
-
- - Secondary Ensemble.
-
- - .. row 4
-
- - .. _ATSCMH-RSFRAME-RES:
+Used only on ATSC-MH.
- ``AATSCMH_RSFRAME_RES``
-
- - Reserved. Shouldn't be used.
+Reed Solomon(RS) frame ensemble.
+The acceptable values are defined by :c:type:`atscmh_rs_frame_ensemble`.
.. _DTV-ATSCMH-RS-CODE-MODE-PRI:
@@ -1308,56 +733,11 @@ Possible values are:
DTV_ATSCMH_RS_CODE_MODE_PRI
---------------------------
-Reed Solomon (RS) code mode (primary).
-
-Possible values are:
-
-
-.. c:type:: atscmh_rs_code_mode
-
-.. flat-table:: enum atscmh_rs_code_mode
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _ATSCMH-RSCODE-211-187:
-
- ``ATSCMH_RSCODE_211_187``
-
- - Reed Solomon code (211,187).
-
- - .. row 3
-
- - .. _ATSCMH-RSCODE-223-187:
-
- ``ATSCMH_RSCODE_223_187``
-
- - Reed Solomon code (223,187).
-
- - .. row 4
-
- - .. _ATSCMH-RSCODE-235-187:
+Used only on ATSC-MH.
- ``ATSCMH_RSCODE_235_187``
-
- - Reed Solomon code (235,187).
-
- - .. row 5
-
- - .. _ATSCMH-RSCODE-RES:
-
- ``ATSCMH_RSCODE_RES``
-
- - Reserved. Shouldn't be used.
+Reed Solomon (RS) code mode (primary).
+The acceptable values are defined by :c:type:`atscmh_rs_code_mode`.
.. _DTV-ATSCMH-RS-CODE-MODE-SEC:
@@ -1365,10 +745,11 @@ Possible values are:
DTV_ATSCMH_RS_CODE_MODE_SEC
---------------------------
+Used only on ATSC-MH.
+
Reed Solomon (RS) code mode (secondary).
-Possible values are the same as documented on enum
-:c:type:`atscmh_rs_code_mode`:
+The acceptable values are defined by :c:type:`atscmh_rs_code_mode`.
.. _DTV-ATSCMH-SCCC-BLOCK-MODE:
@@ -1376,51 +757,11 @@ Possible values are the same as documented on enum
DTV_ATSCMH_SCCC_BLOCK_MODE
--------------------------
-Series Concatenated Convolutional Code Block Mode.
-
-Possible values are:
-
-.. tabularcolumns:: |p{4.5cm}|p{13.0cm}|
-
-.. c:type:: atscmh_sccc_block_mode
-
-.. flat-table:: enum atscmh_scc_block_mode
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _ATSCMH-SCCC-BLK-SEP:
-
- ``ATSCMH_SCCC_BLK_SEP``
-
- - Separate SCCC: the SCCC outer code mode shall be set independently
- for each Group Region (A, B, C, D)
-
- - .. row 3
-
- - .. _ATSCMH-SCCC-BLK-COMB:
-
- ``ATSCMH_SCCC_BLK_COMB``
-
- - Combined SCCC: all four Regions shall have the same SCCC outer
- code mode.
-
- - .. row 4
-
- - .. _ATSCMH-SCCC-BLK-RES:
-
- ``ATSCMH_SCCC_BLK_RES``
+Used only on ATSC-MH.
- - Reserved. Shouldn't be used.
+Series Concatenated Convolutional Code Block Mode.
+The acceptable values are defined by :c:type:`atscmh_sccc_block_mode`.
.. _DTV-ATSCMH-SCCC-CODE-MODE-A:
@@ -1428,55 +769,19 @@ Possible values are:
DTV_ATSCMH_SCCC_CODE_MODE_A
---------------------------
-Series Concatenated Convolutional Code Rate.
-
-Possible values are:
-
-
-.. c:type:: atscmh_sccc_code_mode
-
-.. flat-table:: enum atscmh_sccc_code_mode
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _ATSCMH-SCCC-CODE-HLF:
-
- ``ATSCMH_SCCC_CODE_HLF``
-
- - The outer code rate of a SCCC Block is 1/2 rate.
-
- - .. row 3
-
- - .. _ATSCMH-SCCC-CODE-QTR:
-
- ``ATSCMH_SCCC_CODE_QTR``
-
- - The outer code rate of a SCCC Block is 1/4 rate.
-
- - .. row 4
-
- - .. _ATSCMH-SCCC-CODE-RES:
-
- ``ATSCMH_SCCC_CODE_RES``
-
- - to be documented.
+Used only on ATSC-MH.
+Series Concatenated Convolutional Code Rate.
+The acceptable values are defined by :c:type:`atscmh_sccc_code_mode`.
.. _DTV-ATSCMH-SCCC-CODE-MODE-B:
DTV_ATSCMH_SCCC_CODE_MODE_B
---------------------------
+Used only on ATSC-MH.
+
Series Concatenated Convolutional Code Rate.
Possible values are the same as documented on enum
@@ -1488,6 +793,8 @@ Possible values are the same as documented on enum
DTV_ATSCMH_SCCC_CODE_MODE_C
---------------------------
+Used only on ATSC-MH.
+
Series Concatenated Convolutional Code Rate.
Possible values are the same as documented on enum
@@ -1499,6 +806,8 @@ Possible values are the same as documented on enum
DTV_ATSCMH_SCCC_CODE_MODE_D
---------------------------
+Used only on ATSC-MH.
+
Series Concatenated Convolutional Code Rate.
Possible values are the same as documented on enum
@@ -1510,7 +819,7 @@ Possible values are the same as documented on enum
DTV_API_VERSION
===============
-Returns the major/minor version of the DVB API
+Returns the major/minor version of the Digital TV API
.. _DTV-CODE-RATE-HP:
@@ -1518,8 +827,9 @@ Returns the major/minor version of the DVB API
DTV_CODE_RATE_HP
================
-Used on terrestrial transmissions. The acceptable values are the ones
-described at :c:type:`fe_transmit_mode`.
+Used on terrestrial transmissions.
+
+The acceptable values are defined by :c:type:`fe_transmit_mode`.
.. _DTV-CODE-RATE-LP:
@@ -1527,8 +837,9 @@ described at :c:type:`fe_transmit_mode`.
DTV_CODE_RATE_LP
================
-Used on terrestrial transmissions. The acceptable values are the ones
-described at :c:type:`fe_transmit_mode`.
+Used on terrestrial transmissions.
+
+The acceptable values are defined by :c:type:`fe_transmit_mode`.
.. _DTV-GUARD-INTERVAL:
@@ -1536,242 +847,56 @@ described at :c:type:`fe_transmit_mode`.
DTV_GUARD_INTERVAL
==================
-Possible values are:
-
-
-.. c:type:: fe_guard_interval
-
-Modulation guard interval
--------------------------
-
-
-.. flat-table:: enum fe_guard_interval
- :header-rows: 1
- :stub-columns: 0
-
+The acceptable values are defined by :c:type:`fe_guard_interval`.
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _GUARD-INTERVAL-AUTO:
-
- ``GUARD_INTERVAL_AUTO``
-
- - Autodetect the guard interval
-
- - .. row 3
-
- - .. _GUARD-INTERVAL-1-128:
-
- ``GUARD_INTERVAL_1_128``
-
- - Guard interval 1/128
-
- - .. row 4
-
- - .. _GUARD-INTERVAL-1-32:
-
- ``GUARD_INTERVAL_1_32``
-
- - Guard interval 1/32
-
- - .. row 5
-
- - .. _GUARD-INTERVAL-1-16:
-
- ``GUARD_INTERVAL_1_16``
-
- - Guard interval 1/16
-
- - .. row 6
-
- - .. _GUARD-INTERVAL-1-8:
-
- ``GUARD_INTERVAL_1_8``
-
- - Guard interval 1/8
-
- - .. row 7
-
- - .. _GUARD-INTERVAL-1-4:
-
- ``GUARD_INTERVAL_1_4``
-
- - Guard interval 1/4
-
- - .. row 8
-
- - .. _GUARD-INTERVAL-19-128:
-
- ``GUARD_INTERVAL_19_128``
-
- - Guard interval 19/128
-
- - .. row 9
-
- - .. _GUARD-INTERVAL-19-256:
-
- ``GUARD_INTERVAL_19_256``
-
- - Guard interval 19/256
-
- - .. row 10
-
- - .. _GUARD-INTERVAL-PN420:
-
- ``GUARD_INTERVAL_PN420``
-
- - PN length 420 (1/4)
-
- - .. row 11
-
- - .. _GUARD-INTERVAL-PN595:
-
- ``GUARD_INTERVAL_PN595``
-
- - PN length 595 (1/6)
-
- - .. row 12
-
- - .. _GUARD-INTERVAL-PN945:
-
- ``GUARD_INTERVAL_PN945``
-
- - PN length 945 (1/9)
-
-
-Notes:
-
-1) If ``DTV_GUARD_INTERVAL`` is set the ``GUARD_INTERVAL_AUTO`` the
-hardware will try to find the correct guard interval (if capable) and
-will use TMCC to fill in the missing parameters.
-
-2) Intervals 1/128, 19/128 and 19/256 are used only for DVB-T2 at
-present
-
-3) DTMB specifies PN420, PN595 and PN945.
+.. note::
+ #. If ``DTV_GUARD_INTERVAL`` is set the ``GUARD_INTERVAL_AUTO`` the
+ hardware will try to find the correct guard interval (if capable) and
+ will use TMCC to fill in the missing parameters.
+ #. Intervals ``GUARD_INTERVAL_1_128``, ``GUARD_INTERVAL_19_128``
+ and ``GUARD_INTERVAL_19_256`` are used only for DVB-T2 at
+ present.
+ #. Intervals ``GUARD_INTERVAL_PN420``, ``GUARD_INTERVAL_PN595`` and
+ ``GUARD_INTERVAL_PN945`` are used only for DMTB at the present.
+ On such standard, only those intervals and ``GUARD_INTERVAL_AUTO``
+ are valid.
.. _DTV-TRANSMISSION-MODE:
DTV_TRANSMISSION_MODE
=====================
-Specifies the number of carriers used by the standard. This is used only
-on OFTM-based standards, e. g. DVB-T/T2, ISDB-T, DTMB
-
-
-.. c:type:: fe_transmit_mode
-
-enum fe_transmit_mode: Number of carriers per channel
------------------------------------------------------
-
-.. tabularcolumns:: |p{5.0cm}|p{12.5cm}|
-
-.. flat-table:: enum fe_transmit_mode
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _TRANSMISSION-MODE-AUTO:
-
- ``TRANSMISSION_MODE_AUTO``
-
- - Autodetect transmission mode. The hardware will try to find the
- correct FFT-size (if capable) to fill in the missing parameters.
-
- - .. row 3
-
- - .. _TRANSMISSION-MODE-1K:
-
- ``TRANSMISSION_MODE_1K``
-
- - Transmission mode 1K
-
- - .. row 4
-
- - .. _TRANSMISSION-MODE-2K:
-
- ``TRANSMISSION_MODE_2K``
-
- - Transmission mode 2K
-
- - .. row 5
-
- - .. _TRANSMISSION-MODE-8K:
-
- ``TRANSMISSION_MODE_8K``
- - Transmission mode 8K
+Used only on OFTM-based standards, e. g. DVB-T/T2, ISDB-T, DTMB.
- - .. row 6
+Specifies the FFT size (with corresponds to the approximate number of
+carriers) used by the standard.
- - .. _TRANSMISSION-MODE-4K:
-
- ``TRANSMISSION_MODE_4K``
-
- - Transmission mode 4K
-
- - .. row 7
-
- - .. _TRANSMISSION-MODE-16K:
-
- ``TRANSMISSION_MODE_16K``
-
- - Transmission mode 16K
-
- - .. row 8
-
- - .. _TRANSMISSION-MODE-32K:
-
- ``TRANSMISSION_MODE_32K``
-
- - Transmission mode 32K
-
- - .. row 9
-
- - .. _TRANSMISSION-MODE-C1:
-
- ``TRANSMISSION_MODE_C1``
-
- - Single Carrier (C=1) transmission mode (DTMB)
-
- - .. row 10
-
- - .. _TRANSMISSION-MODE-C3780:
-
- ``TRANSMISSION_MODE_C3780``
-
- - Multi Carrier (C=3780) transmission mode (DTMB)
+The acceptable values are defined by :c:type:`fe_transmit_mode`.
+.. note::
-Notes:
+ #. ISDB-T supports three carrier/symbol-size: 8K, 4K, 2K. It is called
+ **mode** on such standard, and are numbered from 1 to 3:
-1) ISDB-T supports three carrier/symbol-size: 8K, 4K, 2K. It is called
-'mode' in the standard: Mode 1 is 2K, mode 2 is 4K, mode 3 is 8K
+ ==== ======== ========================
+ Mode FFT size Transmission mode
+ ==== ======== ========================
+ 1 2K ``TRANSMISSION_MODE_2K``
+ 2 4K ``TRANSMISSION_MODE_4K``
+ 3 8K ``TRANSMISSION_MODE_8K``
+ ==== ======== ========================
-2) If ``DTV_TRANSMISSION_MODE`` is set the ``TRANSMISSION_MODE_AUTO``
-the hardware will try to find the correct FFT-size (if capable) and will
-use TMCC to fill in the missing parameters.
+ #. If ``DTV_TRANSMISSION_MODE`` is set the ``TRANSMISSION_MODE_AUTO``
+ the hardware will try to find the correct FFT-size (if capable) and
+ will use TMCC to fill in the missing parameters.
-3) DVB-T specifies 2K and 8K as valid sizes.
+ #. DVB-T specifies 2K and 8K as valid sizes.
-4) DVB-T2 specifies 1K, 2K, 4K, 8K, 16K and 32K.
+ #. DVB-T2 specifies 1K, 2K, 4K, 8K, 16K and 32K.
-5) DTMB specifies C1 and C3780.
+ #. DTMB specifies C1 and C3780.
.. _DTV-HIERARCHY:
@@ -1779,66 +904,11 @@ use TMCC to fill in the missing parameters.
DTV_HIERARCHY
=============
-Frontend hierarchy
-
-
-.. c:type:: fe_hierarchy
-
-Frontend hierarchy
-------------------
-
-
-.. flat-table:: enum fe_hierarchy
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _HIERARCHY-NONE:
-
- ``HIERARCHY_NONE``
-
- - No hierarchy
-
- - .. row 3
-
- - .. _HIERARCHY-AUTO:
-
- ``HIERARCHY_AUTO``
-
- - Autodetect hierarchy (if supported)
-
- - .. row 4
+Used only on DVB-T and DVB-T2.
- - .. _HIERARCHY-1:
-
- ``HIERARCHY_1``
-
- - Hierarchy 1
-
- - .. row 5
-
- - .. _HIERARCHY-2:
-
- ``HIERARCHY_2``
-
- - Hierarchy 2
-
- - .. row 6
-
- - .. _HIERARCHY-4:
-
- ``HIERARCHY_4``
-
- - Hierarchy 4
+Frontend hierarchy.
+The acceptable values are defined by :c:type:`fe_hierarchy`.
.. _DTV-STREAM-ID:
@@ -1846,8 +916,10 @@ Frontend hierarchy
DTV_STREAM_ID
=============
+Used on DVB-S2, DVB-T2 and ISDB-S.
+
DVB-S2, DVB-T2 and ISDB-S support the transmission of several streams on
-a single transport stream. This property enables the DVB driver to
+a single transport stream. This property enables the digital TV driver to
handle substream filtering, when supported by the hardware. By default,
substream filtering is disabled.
@@ -1884,60 +956,17 @@ with it, rather than trying to use FE_GET_INFO. In the case of a
legacy frontend, the result is just the same as with FE_GET_INFO, but
in a more structured format
+The acceptable values are defined by :c:type:`fe_delivery_system`.
+
.. _DTV-INTERLEAVING:
DTV_INTERLEAVING
================
-Time interleaving to be used. Currently, used only on DTMB.
-
-
-.. c:type:: fe_interleaving
-
-.. flat-table:: enum fe_interleaving
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - .. _INTERLEAVING-NONE:
-
- ``INTERLEAVING_NONE``
-
- - No interleaving.
-
- - .. row 3
-
- - .. _INTERLEAVING-AUTO:
-
- ``INTERLEAVING_AUTO``
-
- - Auto-detect interleaving.
-
- - .. row 4
-
- - .. _INTERLEAVING-240:
-
- ``INTERLEAVING_240``
-
- - Interleaving of 240 symbols.
-
- - .. row 5
-
- - .. _INTERLEAVING-720:
-
- ``INTERLEAVING_720``
-
- - Interleaving of 720 symbols.
+Time interleaving to be used.
+The acceptable values are defined by :c:type:`fe_interleaving`.
.. _DTV-LNA:
diff --git a/Documentation/media/uapi/dvb/frontend-header.rst b/Documentation/media/uapi/dvb/frontend-header.rst
new file mode 100644
index 000000000000..8d8433cf1e12
--- /dev/null
+++ b/Documentation/media/uapi/dvb/frontend-header.rst
@@ -0,0 +1,4 @@
+Frontend uAPI data types
+========================
+
+.. kernel-doc:: include/uapi/linux/dvb/frontend.h
diff --git a/Documentation/media/uapi/dvb/frontend-property-terrestrial-systems.rst b/Documentation/media/uapi/dvb/frontend-property-terrestrial-systems.rst
index dbc717cad9ee..0beb5cb3d729 100644
--- a/Documentation/media/uapi/dvb/frontend-property-terrestrial-systems.rst
+++ b/Documentation/media/uapi/dvb/frontend-property-terrestrial-systems.rst
@@ -100,7 +100,7 @@ to tune any ISDB-T/ISDB-Tsb hardware. Of course it is possible that some
very sophisticated devices won't need certain parameters to tune.
The information given here should help application writers to know how
-to handle ISDB-T and ISDB-Tsb hardware using the Linux DVB-API.
+to handle ISDB-T and ISDB-Tsb hardware using the Linux Digital TV API.
The details given here about ISDB-T and ISDB-Tsb are just enough to
basically show the dependencies between the needed parameter values, but
diff --git a/Documentation/media/uapi/dvb/frontend.rst b/Documentation/media/uapi/dvb/frontend.rst
index e051a9012540..4967c48d46ce 100644
--- a/Documentation/media/uapi/dvb/frontend.rst
+++ b/Documentation/media/uapi/dvb/frontend.rst
@@ -2,20 +2,22 @@
.. _dvb_frontend:
-################
-DVB Frontend API
-################
-The DVB frontend API was designed to support three types of delivery
-systems:
+#######################
+Digital TV Frontend API
+#######################
+
+The Digital TV frontend API was designed to support three groups of delivery
+systems: Terrestrial, cable and Satellite. Currently, the following
+delivery systems are supported:
- Terrestrial systems: DVB-T, DVB-T2, ATSC, ATSC M/H, ISDB-T, DVB-H,
DTMB, CMMB
-- Cable systems: DVB-C Annex A/C, ClearQAM (DVB-C Annex B), ISDB-C
+- Cable systems: DVB-C Annex A/C, ClearQAM (DVB-C Annex B)
- Satellite systems: DVB-S, DVB-S2, DVB Turbo, ISDB-S, DSS
-The DVB frontend controls several sub-devices including:
+The Digital TV frontend controls several sub-devices including:
- Tuner
@@ -23,7 +25,7 @@ The DVB frontend controls several sub-devices including:
- Low noise amplifier (LNA)
-- Satellite Equipment Control (SEC) hardware (only for Satellite).
+- Satellite Equipment Control (SEC) [#f1]_.
The frontend can be accessed through ``/dev/dvb/adapter?/frontend?``.
Data types and ioctl definitions can be accessed by including
@@ -31,16 +33,18 @@ Data types and ioctl definitions can be accessed by including
.. note::
- Transmission via the internet (DVB-IP) is not yet handled by this
- API but a future extension is possible.
+ Transmission via the internet (DVB-IP) and MMT (MPEG Media Transport)
+ is not yet handled by this API but a future extension is possible.
+
+.. [#f1]
-On Satellite systems, the API support for the Satellite Equipment
-Control (SEC) allows to power control and to send/receive signals to
-control the antenna subsystem, selecting the polarization and choosing
-the Intermediate Frequency IF) of the Low Noise Block Converter Feed
-Horn (LNBf). It supports the DiSEqC and V-SEC protocols. The DiSEqC
-(digital SEC) specification is available at
-`Eutelsat <http://www.eutelsat.com/satellites/4_5_5.html>`__.
+ On Satellite systems, the API support for the Satellite Equipment
+ Control (SEC) allows to power control and to send/receive signals to
+ control the antenna subsystem, selecting the polarization and choosing
+ the Intermediate Frequency IF) of the Low Noise Block Converter Feed
+ Horn (LNBf). It supports the DiSEqC and V-SEC protocols. The DiSEqC
+ (digital SEC) specification is available at
+ `Eutelsat <http://www.eutelsat.com/satellites/4_5_5.html>`__.
.. toctree::
@@ -50,4 +54,3 @@ Horn (LNBf). It supports the DiSEqC and V-SEC protocols. The DiSEqC
dvb-fe-read-status
dvbproperty
frontend_fcalls
- frontend_legacy_dvbv3_api
diff --git a/Documentation/media/uapi/dvb/frontend_f_close.rst b/Documentation/media/uapi/dvb/frontend_f_close.rst
index f3b04b60246c..67958d73cf34 100644
--- a/Documentation/media/uapi/dvb/frontend_f_close.rst
+++ b/Documentation/media/uapi/dvb/frontend_f_close.rst
@@ -2,9 +2,9 @@
.. _frontend_f_close:
-********************
-DVB frontend close()
-********************
+***************************
+Digital TV frontend close()
+***************************
Name
====
@@ -41,8 +41,10 @@ down automatically.
Return Value
============
-The function returns 0 on success, -1 on failure and the ``errno`` is
-set appropriately. Possible error codes:
+On success 0 is returned.
-EBADF
- ``fd`` is not a valid open file descriptor.
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+Generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/frontend_f_open.rst b/Documentation/media/uapi/dvb/frontend_f_open.rst
index 690eb375bdc1..8e8cb466c24b 100644
--- a/Documentation/media/uapi/dvb/frontend_f_open.rst
+++ b/Documentation/media/uapi/dvb/frontend_f_open.rst
@@ -2,9 +2,9 @@
.. _frontend_f_open:
-*******************
-DVB frontend open()
-*******************
+***************************
+Digital TV frontend open()
+***************************
Name
====
@@ -79,24 +79,32 @@ On error, -1 is returned, and the ``errno`` variable is set appropriately.
Possible error codes are:
-EACCES
- The caller has no permission to access the device.
-EBUSY
- The the device driver is already in use.
+On success 0 is returned, and :c:type:`ca_slot_info` is filled.
-ENXIO
- No device corresponding to this device special file exists.
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
-ENOMEM
- Not enough kernel memory was available to complete the request.
+.. tabularcolumns:: |p{2.5cm}|p{15.0cm}|
-EMFILE
- The process already has the maximum number of files open.
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 16
-ENFILE
- The limit on the total number of files open on the system has been
- reached.
+ - - ``EPERM``
+ - The caller has no permission to access the device.
-ENODEV
- The device got removed.
+ - - ``EBUSY``
+ - The the device driver is already in use.
+
+ - - ``EMFILE``
+ - The process already has the maximum number of files open.
+
+ - - ``ENFILE``
+ - The limit on the total number of files open on the system has been
+ reached.
+
+
+The generic error codes are described at the
+:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/frontend_h.rst b/Documentation/media/uapi/dvb/frontend_h.rst
deleted file mode 100644
index 15fca04d1c32..000000000000
--- a/Documentation/media/uapi/dvb/frontend_h.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _frontend_h:
-
-************************
-DVB Frontend Header File
-************************
-
-.. kernel-include:: $BUILDDIR/frontend.h.rst
diff --git a/Documentation/media/uapi/dvb/frontend_legacy_dvbv3_api.rst b/Documentation/media/uapi/dvb/frontend_legacy_dvbv3_api.rst
index 7d4a091b7d7f..a4d5319cb76b 100644
--- a/Documentation/media/uapi/dvb/frontend_legacy_dvbv3_api.rst
+++ b/Documentation/media/uapi/dvb/frontend_legacy_dvbv3_api.rst
@@ -2,9 +2,9 @@
.. _frontend_legacy_dvbv3_api:
-****************************************
-DVB Frontend legacy API (a. k. a. DVBv3)
-****************************************
+***********************************************
+Digital TV Frontend legacy API (a. k. a. DVBv3)
+***********************************************
The usage of this API is deprecated, as it doesn't support all digital
TV standards, doesn't provide good statistics measurements and provides
diff --git a/Documentation/media/uapi/dvb/headers.rst b/Documentation/media/uapi/dvb/headers.rst
new file mode 100644
index 000000000000..c13fd537fbff
--- /dev/null
+++ b/Documentation/media/uapi/dvb/headers.rst
@@ -0,0 +1,21 @@
+****************************
+Digital TV uAPI header files
+****************************
+
+Digital TV uAPI headers
+***********************
+
+.. kernel-include:: $BUILDDIR/frontend.h.rst
+
+.. kernel-include:: $BUILDDIR/dmx.h.rst
+
+.. kernel-include:: $BUILDDIR/ca.h.rst
+
+.. kernel-include:: $BUILDDIR/net.h.rst
+
+Legacy uAPI
+***********
+
+.. kernel-include:: $BUILDDIR/audio.h.rst
+
+.. kernel-include:: $BUILDDIR/video.h.rst
diff --git a/Documentation/media/uapi/dvb/intro.rst b/Documentation/media/uapi/dvb/intro.rst
index 652c4aacd2c6..79b4d0e4e920 100644
--- a/Documentation/media/uapi/dvb/intro.rst
+++ b/Documentation/media/uapi/dvb/intro.rst
@@ -13,15 +13,18 @@ What you need to know
=====================
The reader of this document is required to have some knowledge in the
-area of digital video broadcasting (DVB) and should be familiar with
+area of digital video broadcasting (Digital TV) and should be familiar with
part I of the MPEG2 specification ISO/IEC 13818 (aka ITU-T H.222), i.e
you should know what a program/transport stream (PS/TS) is and what is
meant by a packetized elementary stream (PES) or an I-frame.
-Various DVB standards documents are available from http://www.dvb.org
-and/or http://www.etsi.org.
+Various Digital TV standards documents are available for download at:
-It is also necessary to know how to access unix/linux devices and how to
+- European standards (DVB): http://www.dvb.org and/or http://www.etsi.org.
+- American standards (ATSC): https://www.atsc.org/standards/
+- Japanese standards (ISDB): http://www.dibeg.org/
+
+It is also necessary to know how to access Linux devices and how to
use ioctl calls. This also includes the knowledge of C or C++.
@@ -30,21 +33,25 @@ use ioctl calls. This also includes the knowledge of C or C++.
History
=======
-The first API for DVB cards we used at Convergence in late 1999 was an
+The first API for Digital TV cards we used at Convergence in late 1999 was an
extension of the Video4Linux API which was primarily developed for frame
-grabber cards. As such it was not really well suited to be used for DVB
-cards and their new features like recording MPEG streams and filtering
+grabber cards. As such it was not really well suited to be used for Digital
+TV cards and their new features like recording MPEG streams and filtering
several section and PES data streams at the same time.
-In early 2000, we were approached by Nokia with a proposal for a new
-standard Linux DVB API. As a commitment to the development of terminals
+In early 2000, Convergence was approached by Nokia with a proposal for a new
+standard Linux Digital TV API. As a commitment to the development of terminals
based on open standards, Nokia and Convergence made it available to all
Linux developers and published it on https://linuxtv.org in September
-2000. Convergence is the maintainer of the Linux DVB API. Together with
-the LinuxTV community (i.e. you, the reader of this document), the Linux
-DVB API will be constantly reviewed and improved. With the Linux driver
-for the Siemens/Hauppauge DVB PCI card Convergence provides a first
-implementation of the Linux DVB API.
+2000. With the Linux driver for the Siemens/Hauppauge DVB PCI card,
+Convergence provided a first implementation of the Linux Digital TV API.
+Convergence was the maintainer of the Linux Digital TV API in the early
+days.
+
+Now, the API is maintained by the LinuxTV community (i.e. you, the reader
+of this document). The Linux Digital TV API is constantly reviewed and
+improved together with the improvements at the subsystem's core at the
+Kernel.
.. _overview:
@@ -59,61 +66,65 @@ Overview
:alt: dvbstb.svg
:align: center
- Components of a DVB card/STB
+ Components of a Digital TV card/STB
-A DVB PCI card or DVB set-top-box (STB) usually consists of the
+A Digital TV card or set-top-box (STB) usually consists of the
following main hardware components:
-- Frontend consisting of tuner and DVB demodulator
-
- Here the raw signal reaches the DVB hardware from a satellite dish or
+Frontend consisting of tuner and digital TV demodulator
+ Here the raw signal reaches the digital TV hardware from a satellite dish or
antenna or directly from cable. The frontend down-converts and
demodulates this signal into an MPEG transport stream (TS). In case
of a satellite frontend, this includes a facility for satellite
equipment control (SEC), which allows control of LNB polarization,
multi feed switches or dish rotors.
-- Conditional Access (CA) hardware like CI adapters and smartcard slots
-
+Conditional Access (CA) hardware like CI adapters and smartcard slots
The complete TS is passed through the CA hardware. Programs to which
the user has access (controlled by the smart card) are decoded in
real time and re-inserted into the TS.
-- Demultiplexer which filters the incoming DVB stream
+ .. note::
+ Not every digital TV hardware provides conditional access hardware.
+
+Demultiplexer which filters the incoming Digital TV MPEG-TS stream
The demultiplexer splits the TS into its components like audio and
video streams. Besides usually several of such audio and video
streams it also contains data streams with information about the
programs offered in this or other streams of the same provider.
-- MPEG2 audio and video decoder
+Audio and video decoder
+ The main targets of the demultiplexer are audio and video
+ decoders. After decoding, they pass on the uncompressed audio and
+ video to the computer screen or to a TV set.
+
+ .. note::
- The main targets of the demultiplexer are the MPEG2 audio and video
- decoders. After decoding they pass on the uncompressed audio and
- video to the computer screen or (through a PAL/NTSC encoder) to a TV
- set.
+ Modern hardware usually doesn't have a separate decoder hardware, as
+ such functionality can be provided by the main CPU, by the graphics
+ adapter of the system or by a signal processing hardware embedded on
+ a Systems on a Chip (SoC) integrated circuit.
+
+ It may also not be needed for certain usages (e.g. for data-only
+ uses like “internet over satellite”).
:ref:`stb_components` shows a crude schematic of the control and data
flow between those components.
-On a DVB PCI card not all of these have to be present since some
-functionality can be provided by the main CPU of the PC (e.g. MPEG
-picture and sound decoding) or is not needed (e.g. for data-only uses
-like “internet over satellite”). Also not every card or STB provides
-conditional access hardware.
.. _dvb_devices:
-Linux DVB Devices
-=================
+Linux Digital TV Devices
+========================
-The Linux DVB API lets you control these hardware components through
+The Linux Digital TV API lets you control these hardware components through
currently six Unix-style character devices for video, audio, frontend,
demux, CA and IP-over-DVB networking. The video and audio devices
control the MPEG2 decoder hardware, the frontend device the tuner and
-the DVB demodulator. The demux device gives you control over the PES and
-section filters of the hardware. If the hardware does not support
+the Digital TV demodulator. The demux device gives you control over the PES
+and section filters of the hardware. If the hardware does not support
filtering these filters can be implemented in software. Finally, the CA
device controls all the conditional access capabilities of the hardware.
It can depend on the individual security requirements of the platform,
@@ -137,9 +148,9 @@ individual devices are called:
- ``/dev/dvb/adapterN/caM``,
-where N enumerates the DVB PCI cards in a system starting from 0, and M
-enumerates the devices of each type within each adapter, starting
-from 0, too. We will omit the “ ``/dev/dvb/adapterN/``\ ” in the further
+where ``N`` enumerates the Digital TV cards in a system starting from 0, and
+``M`` enumerates the devices of each type within each adapter, starting
+from 0, too. We will omit the “``/dev/dvb/adapterN/``\ ” in the further
discussion of these devices.
More details about the data structures and function calls of all the
@@ -151,8 +162,8 @@ devices are described in the following chapters.
API include files
=================
-For each of the DVB devices a corresponding include file exists. The DVB
-API include files should be included in application sources with a
+For each of the Digital TV devices a corresponding include file exists. The
+Digital TV API include files should be included in application sources with a
partial path like:
diff --git a/Documentation/media/uapi/dvb/legacy_dvb_apis.rst b/Documentation/media/uapi/dvb/legacy_dvb_apis.rst
index 2957f5a988b0..e1b2c9c7b620 100644
--- a/Documentation/media/uapi/dvb/legacy_dvb_apis.rst
+++ b/Documentation/media/uapi/dvb/legacy_dvb_apis.rst
@@ -2,19 +2,31 @@
.. _legacy_dvb_apis:
-*******************
-DVB Deprecated APIs
-*******************
+***************************
+Digital TV Deprecated APIs
+***************************
-The APIs described here are kept only for historical reasons. There's
-just one driver for a very legacy hardware that uses this API. No modern
-drivers should use it. Instead, audio and video should be using the V4L2
-and ALSA APIs, and the pipelines should be set using the Media
-Controller API
+The APIs described here **should not** be used on new drivers or applications.
+
+The DVBv3 frontend API has issues with new delivery systems, including
+DVB-S2, DVB-T2, ISDB, etc.
+
+There's just one driver for a very legacy hardware using the Digital TV
+audio and video APIs. No modern drivers should use it. Instead, audio and
+video should be using the V4L2 and ALSA APIs, and the pipelines should
+be set via the Media Controller API.
+
+.. attention::
+
+ The APIs described here doesn't necessarily reflect the current
+ code implementation, as this section of the document was written
+ for DVB version 1, while the code reflects DVB version 3
+ implementation.
.. toctree::
:maxdepth: 1
+ frontend_legacy_dvbv3_api
video
audio
diff --git a/Documentation/media/uapi/dvb/net-add-if.rst b/Documentation/media/uapi/dvb/net-add-if.rst
index 82ce2438213f..6749b70246c5 100644
--- a/Documentation/media/uapi/dvb/net-add-if.rst
+++ b/Documentation/media/uapi/dvb/net-add-if.rst
@@ -41,43 +41,13 @@ created.
The struct :c:type:`dvb_net_if`::ifnum field will be
filled with the number of the created interface.
-.. c:type:: dvb_net_if
-
-.. flat-table:: struct dvb_net_if
- :header-rows: 1
- :stub-columns: 0
-
-
- - .. row 1
-
- - ID
-
- - Description
-
- - .. row 2
-
- - pid
-
- - Packet ID (PID) of the MPEG-TS that contains data
-
- - .. row 3
-
- - ifnum
-
- - number of the DVB interface.
-
- - .. row 4
-
- - feedtype
-
- - Encapsulation type of the feed. It can be:
- ``DVB_NET_FEEDTYPE_MPE`` for MPE encoding or
- ``DVB_NET_FEEDTYPE_ULE`` for ULE encoding.
-
-
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned, and :c:type:`ca_slot_info` is filled.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/net-get-if.rst b/Documentation/media/uapi/dvb/net-get-if.rst
index 1bb8ee0cbced..3733b34da9db 100644
--- a/Documentation/media/uapi/dvb/net-get-if.rst
+++ b/Documentation/media/uapi/dvb/net-get-if.rst
@@ -43,6 +43,10 @@ the ``errno`` with ``EINVAL`` error code.
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned, and :c:type:`ca_slot_info` is filled.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/net-remove-if.rst b/Documentation/media/uapi/dvb/net-remove-if.rst
index 646af23a925a..4ebe07a6b79a 100644
--- a/Documentation/media/uapi/dvb/net-remove-if.rst
+++ b/Documentation/media/uapi/dvb/net-remove-if.rst
@@ -39,6 +39,10 @@ The NET_REMOVE_IF ioctl deletes an interface previously created via
Return Value
============
-On success 0 is returned, on error -1 and the ``errno`` variable is set
-appropriately. The generic error codes are described at the
+On success 0 is returned, and :c:type:`ca_slot_info` is filled.
+
+On error -1 is returned, and the ``errno`` variable is set
+appropriately.
+
+The generic error codes are described at the
:ref:`Generic Error Codes <gen-errors>` chapter.
diff --git a/Documentation/media/uapi/dvb/net-types.rst b/Documentation/media/uapi/dvb/net-types.rst
new file mode 100644
index 000000000000..e1177bdcd623
--- /dev/null
+++ b/Documentation/media/uapi/dvb/net-types.rst
@@ -0,0 +1,9 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _dmx_types:
+
+**************
+Net Data Types
+**************
+
+.. kernel-doc:: include/uapi/linux/dvb/net.h
diff --git a/Documentation/media/uapi/dvb/net.rst b/Documentation/media/uapi/dvb/net.rst
index eca42dd53261..e0cd4e402627 100644
--- a/Documentation/media/uapi/dvb/net.rst
+++ b/Documentation/media/uapi/dvb/net.rst
@@ -2,10 +2,11 @@
.. _net:
-###############
-DVB Network API
-###############
-The DVB net device controls the mapping of data packages that are part
+######################
+Digital TV Network API
+######################
+
+The Digital TV net device controls the mapping of data packages that are part
of a transport stream to be mapped into a virtual network interface,
visible through the standard Linux network protocol stack.
@@ -28,13 +29,13 @@ header.
.. _net_fcalls:
-######################
-DVB net Function Calls
-######################
+Digital TV net Function Calls
+#############################
.. toctree::
:maxdepth: 1
+ net-types
net-add-if
net-remove-if
net-get-if
diff --git a/Documentation/media/uapi/dvb/net_h.rst b/Documentation/media/uapi/dvb/net_h.rst
deleted file mode 100644
index 7bcf5ba9d1eb..000000000000
--- a/Documentation/media/uapi/dvb/net_h.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _net_h:
-
-***********************
-DVB Network Header File
-***********************
-
-.. kernel-include:: $BUILDDIR/net.h.rst
diff --git a/Documentation/media/uapi/dvb/query-dvb-frontend-info.rst b/Documentation/media/uapi/dvb/query-dvb-frontend-info.rst
index 81cd9b92a36c..51ec0b04b496 100644
--- a/Documentation/media/uapi/dvb/query-dvb-frontend-info.rst
+++ b/Documentation/media/uapi/dvb/query-dvb-frontend-info.rst
@@ -9,5 +9,5 @@ Querying frontend information
Usually, the first thing to do when the frontend is opened is to check
the frontend capabilities. This is done using
:ref:`FE_GET_INFO`. This ioctl will enumerate the
-DVB API version and other characteristics about the frontend, and can be
-opened either in read only or read/write mode.
+Digital TV API version and other characteristics about the frontend, and can
+be opened either in read only or read/write mode.
diff --git a/Documentation/media/uapi/dvb/video-continue.rst b/Documentation/media/uapi/dvb/video-continue.rst
index 030c2ec98869..e65e600be632 100644
--- a/Documentation/media/uapi/dvb/video-continue.rst
+++ b/Documentation/media/uapi/dvb/video-continue.rst
@@ -44,7 +44,7 @@ Arguments
Description
-----------
-This ioctl is for DVB devices only. To control a V4L2 decoder use the
+This ioctl is for Digital TV devices only. To control a V4L2 decoder use the
V4L2 :ref:`VIDIOC_DECODER_CMD` instead.
This ioctl call restarts decoding and playing processes of the video
diff --git a/Documentation/media/uapi/dvb/video-freeze.rst b/Documentation/media/uapi/dvb/video-freeze.rst
index 9cef65a02e8d..5a28bdc8badd 100644
--- a/Documentation/media/uapi/dvb/video-freeze.rst
+++ b/Documentation/media/uapi/dvb/video-freeze.rst
@@ -44,14 +44,14 @@ Arguments
Description
-----------
-This ioctl is for DVB devices only. To control a V4L2 decoder use the
+This ioctl is for Digital TV devices only. To control a V4L2 decoder use the
V4L2 :ref:`VIDIOC_DECODER_CMD` instead.
This ioctl call suspends the live video stream being played. Decoding
and playing are frozen. It is then possible to restart the decoding and
playing process of the video stream using the VIDEO_CONTINUE command.
If VIDEO_SOURCE_MEMORY is selected in the ioctl call
-VIDEO_SELECT_SOURCE, the DVB subsystem will not decode any more data
+VIDEO_SELECT_SOURCE, the Digital TV subsystem will not decode any more data
until the ioctl call VIDEO_CONTINUE or VIDEO_PLAY is performed.
diff --git a/Documentation/media/uapi/dvb/video-get-event.rst b/Documentation/media/uapi/dvb/video-get-event.rst
index 6ad14cdb894a..b4f53616db9a 100644
--- a/Documentation/media/uapi/dvb/video-get-event.rst
+++ b/Documentation/media/uapi/dvb/video-get-event.rst
@@ -50,7 +50,7 @@ Arguments
Description
-----------
-This ioctl is for DVB devices only. To get events from a V4L2 decoder
+This ioctl is for Digital TV devices only. To get events from a V4L2 decoder
use the V4L2 :ref:`VIDIOC_DQEVENT` ioctl instead.
This ioctl call returns an event of type video_event if available. If
diff --git a/Documentation/media/uapi/dvb/video-play.rst b/Documentation/media/uapi/dvb/video-play.rst
index 3f66ae3b7e35..2124120aec22 100644
--- a/Documentation/media/uapi/dvb/video-play.rst
+++ b/Documentation/media/uapi/dvb/video-play.rst
@@ -44,7 +44,7 @@ Arguments
Description
-----------
-This ioctl is for DVB devices only. To control a V4L2 decoder use the
+This ioctl is for Digital TV devices only. To control a V4L2 decoder use the
V4L2 :ref:`VIDIOC_DECODER_CMD` instead.
This ioctl call asks the Video Device to start playing a video stream
diff --git a/Documentation/media/uapi/dvb/video-select-source.rst b/Documentation/media/uapi/dvb/video-select-source.rst
index 2f4fbf4b490c..cde6542723ca 100644
--- a/Documentation/media/uapi/dvb/video-select-source.rst
+++ b/Documentation/media/uapi/dvb/video-select-source.rst
@@ -50,7 +50,7 @@ Arguments
Description
-----------
-This ioctl is for DVB devices only. This ioctl was also supported by the
+This ioctl is for Digital TV devices only. This ioctl was also supported by the
V4L2 ivtv driver, but that has been replaced by the ivtv-specific
``IVTV_IOC_PASSTHROUGH_MODE`` ioctl.
diff --git a/Documentation/media/uapi/dvb/video-stop.rst b/Documentation/media/uapi/dvb/video-stop.rst
index fb827effb276..474309ad31c2 100644
--- a/Documentation/media/uapi/dvb/video-stop.rst
+++ b/Documentation/media/uapi/dvb/video-stop.rst
@@ -60,7 +60,7 @@ Arguments
Description
-----------
-This ioctl is for DVB devices only. To control a V4L2 decoder use the
+This ioctl is for Digital TV devices only. To control a V4L2 decoder use the
V4L2 :ref:`VIDIOC_DECODER_CMD` instead.
This ioctl call asks the Video Device to stop playing the current
diff --git a/Documentation/media/uapi/dvb/video.rst b/Documentation/media/uapi/dvb/video.rst
index 60d43fb7ce22..e7d68cd0cf23 100644
--- a/Documentation/media/uapi/dvb/video.rst
+++ b/Documentation/media/uapi/dvb/video.rst
@@ -2,20 +2,21 @@
.. _dvb_video:
-################
-DVB Video Device
-################
-The DVB video device controls the MPEG2 video decoder of the DVB
-hardware. It can be accessed through **/dev/dvb/adapter0/video0**. Data
+#######################
+Digital TV Video Device
+#######################
+
+The Digital TV video device controls the MPEG2 video decoder of the Digital
+TV hardware. It can be accessed through **/dev/dvb/adapter0/video0**. Data
types and and ioctl definitions can be accessed by including
**linux/dvb/video.h** in your application.
-Note that the DVB video device only controls decoding of the MPEG video
+Note that the Digital TV video device only controls decoding of the MPEG video
stream, not its presentation on the TV or computer screen. On PCs this
is typically handled by an associated video4linux device, e.g.
**/dev/video**, which allows scaling and defining output windows.
-Some DVB cards don’t have their own MPEG decoder, which results in the
+Some Digital TV cards don’t have their own MPEG decoder, which results in the
omission of the audio and video device as well as the video4linux
device.
diff --git a/Documentation/media/uapi/dvb/video_h.rst b/Documentation/media/uapi/dvb/video_h.rst
deleted file mode 100644
index 3f39b0c4879c..000000000000
--- a/Documentation/media/uapi/dvb/video_h.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _video_h:
-
-*********************
-DVB Video Header File
-*********************
-
-.. kernel-include:: $BUILDDIR/video.h.rst
diff --git a/Documentation/media/uapi/gen-errors.rst b/Documentation/media/uapi/gen-errors.rst
index d39e34d1b19d..689d3b101ede 100644
--- a/Documentation/media/uapi/gen-errors.rst
+++ b/Documentation/media/uapi/gen-errors.rst
@@ -17,9 +17,7 @@ Generic Error Codes
:widths: 1 16
- - .. row 1
-
- - ``EAGAIN`` (aka ``EWOULDBLOCK``)
+ - - ``EAGAIN`` (aka ``EWOULDBLOCK``)
- The ioctl can't be handled because the device is in state where it
can't perform it. This could happen for example in case where
@@ -27,15 +25,11 @@ Generic Error Codes
is also returned when the ioctl would need to wait for an event,
but the device was opened in non-blocking mode.
- - .. row 2
-
- - ``EBADF``
+ - - ``EBADF``
- The file descriptor is not a valid.
- - .. row 3
-
- - ``EBUSY``
+ - - ``EBUSY``
- The ioctl can't be handled because the device is busy. This is
typically return while device is streaming, and an ioctl tried to
@@ -44,64 +38,53 @@ Generic Error Codes
ioctl must not be retried without performing another action to fix
the problem first (typically: stop the stream before retrying).
- - .. row 4
-
- - ``EFAULT``
+ - - ``EFAULT``
- There was a failure while copying data from/to userspace, probably
caused by an invalid pointer reference.
- - .. row 5
-
- - ``EINVAL``
+ - - ``EINVAL``
- One or more of the ioctl parameters are invalid or out of the
allowed range. This is a widely used error code. See the
individual ioctl requests for specific causes.
- - .. row 6
-
- - ``ENODEV``
+ - - ``ENODEV``
- Device not found or was removed.
- - .. row 7
-
- - ``ENOMEM``
+ - - ``ENOMEM``
- There's not enough memory to handle the desired operation.
- - .. row 8
-
- - ``ENOTTY``
+ - - ``ENOTTY``
- The ioctl is not supported by the driver, actually meaning that
the required functionality is not available, or the file
descriptor is not for a media device.
- - .. row 9
-
- - ``ENOSPC``
+ - - ``ENOSPC``
- On USB devices, the stream ioctl's can return this error, meaning
that this request would overcommit the usb bandwidth reserved for
periodic transfers (up to 80% of the USB bandwidth).
- - .. row 10
-
- - ``EPERM``
+ - - ``EPERM``
- Permission denied. Can be returned if the device needs write
permission, or some special capabilities is needed (e. g. root)
- - .. row 11
-
- - ``EIO``
+ - - ``EIO``
- I/O error. Typically used when there are problems communicating with
a hardware device. This could indicate broken or flaky hardware.
It's a 'Something is wrong, I give up!' type of error.
+ - - ``ENXIO``
+
+ - No device corresponding to this device special file exists.
+
+
.. note::
#. This list is not exhaustive; ioctls may return other error codes.
diff --git a/Documentation/media/uapi/mediactl/media-controller.rst b/Documentation/media/uapi/mediactl/media-controller.rst
index 7ae38d48969e..0eea4f9a07d5 100644
--- a/Documentation/media/uapi/mediactl/media-controller.rst
+++ b/Documentation/media/uapi/mediactl/media-controller.rst
@@ -8,7 +8,9 @@
Part IV - Media Controller API
##############################
-.. class:: toc-title
+.. only:: html
+
+ .. class:: toc-title
Table of Contents
diff --git a/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst b/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst
index 0fd329279bef..b59ce149efb5 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst
@@ -51,7 +51,7 @@ id's until they get an error.
.. c:type:: media_entity_desc
-.. tabularcolumns:: |p{1.5cm}|p{1.5cm}|p{1.5cm}|p{1.5cm}|p{11.5cm}|
+.. tabularcolumns:: |p{1.5cm}|p{1.7cm}|p{1.6cm}|p{1.5cm}|p{11.2cm}|
.. flat-table:: struct media_entity_desc
:header-rows: 0
diff --git a/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst b/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
index add8281494f8..997e6b17440d 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
@@ -46,7 +46,7 @@ other values untouched.
If the ``topology_version`` remains the same, the ioctl should fill the
desired arrays with the media graph elements.
-.. tabularcolumns:: |p{1.6cm}|p{3.2cm}|p{12.7cm}|
+.. tabularcolumns:: |p{1.6cm}|p{3.4cm}|p{12.5cm}|
.. c:type:: media_v2_topology
diff --git a/Documentation/media/uapi/mediactl/media-types.rst b/Documentation/media/uapi/mediactl/media-types.rst
index 71078565d644..8d64b0c06ebc 100644
--- a/Documentation/media/uapi/mediactl/media-types.rst
+++ b/Documentation/media/uapi/mediactl/media-types.rst
@@ -5,7 +5,7 @@
Types and flags used to represent the media graph elements
==========================================================
-.. tabularcolumns:: |p{8.0cm}|p{10.5cm}|
+.. tabularcolumns:: |p{8.2cm}|p{10.3cm}|
.. _media-entity-type:
diff --git a/Documentation/media/uapi/rc/rc-sysfs-nodes.rst b/Documentation/media/uapi/rc/rc-sysfs-nodes.rst
index 3476ae29708f..2d01358d5504 100644
--- a/Documentation/media/uapi/rc/rc-sysfs-nodes.rst
+++ b/Documentation/media/uapi/rc/rc-sysfs-nodes.rst
@@ -34,9 +34,9 @@ receiver device where N is the number of the receiver.
/sys/class/rc/rcN/protocols
===========================
-Reading this file returns a list of available protocols, something like:
+Reading this file returns a list of available protocols, something like::
-``rc5 [rc6] nec jvc [sony]``
+ rc5 [rc6] nec jvc [sony]
Enabled protocols are shown in [] brackets.
@@ -90,11 +90,11 @@ This value may be reset to 0 if the current protocol is altered.
==================================
Reading this file returns a list of available protocols to use for the
-wakeup filter, something like:
+wakeup filter, something like::
-``rc-5 nec nec-x rc-6-0 rc-6-6a-24 [rc-6-6a-32] rc-6-mce``
+ rc-5 nec nec-x rc-6-0 rc-6-6a-24 [rc-6-6a-32] rc-6-mce
-Note that protocol variants are listed, so "nec", "sony", "rc-5", "rc-6"
+Note that protocol variants are listed, so ``nec``, ``sony``, ``rc-5``, ``rc-6``
have their different bit length encodings listed if available.
Note that all protocol variants are listed.
diff --git a/Documentation/media/uapi/rc/remote_controllers.rst b/Documentation/media/uapi/rc/remote_controllers.rst
index 3e25cc9f65e0..46a8acb82125 100644
--- a/Documentation/media/uapi/rc/remote_controllers.rst
+++ b/Documentation/media/uapi/rc/remote_controllers.rst
@@ -8,7 +8,9 @@
Part III - Remote Controller API
################################
-.. class:: toc-title
+.. only:: html
+
+ .. class:: toc-title
Table of Contents
diff --git a/Documentation/media/uapi/v4l/bayer.svg b/Documentation/media/uapi/v4l/bayer.svg
index fbd4cfb5e6bf..c395113d1876 100644
--- a/Documentation/media/uapi/v4l/bayer.svg
+++ b/Documentation/media/uapi/v4l/bayer.svg
@@ -1,984 +1,29 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- version="1.2"
- width="164.15334mm"
- height="46.771107mm"
- viewBox="0 0 16415.333 4677.1107"
- preserveAspectRatio="xMidYMid"
- xml:space="preserve"
- id="svg2"
- inkscape:version="0.91 r13725"
- sodipodi:docname="bayer.svg"
- style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"><metadata
- id="metadata652"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="1920"
- inkscape:window-height="997"
- id="namedview650"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:zoom="2.4000866"
- inkscape:cx="290.82284"
- inkscape:cy="82.862197"
- inkscape:window-x="1920"
- inkscape:window-y="30"
- inkscape:window-maximized="1"
- inkscape:current-layer="svg2" /><defs
- class="ClipPathGroup"
- id="defs4" /><defs
- id="defs9" /><defs
- id="defs82" /><defs
- id="defs105" /><defs
- class="TextShapeIndex"
- id="defs116" /><defs
- class="EmbeddedBulletChars"
- id="defs120" /><defs
- class="TextEmbeddedBitmaps"
- id="defs149" /><g
- class="com.sun.star.drawing.CustomShape"
- id="g186"
- transform="translate(-3285.889,-3185.889)"><g
- id="id6"><rect
- class="BoundingBox"
- x="3299"
- y="3199"
- width="1303"
- height="1203"
- id="rect189"
- style="fill:none;stroke:none" /><path
- d="m 3950,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path191"
- inkscape:connector-curvature="0"
- style="fill:#0000ff;stroke:none" /><path
- d="m 3950,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path193"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text195"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan197"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="3739"
- y="4021"
- id="tspan199"><tspan
- id="tspan201"
- style="fill:#ffffff;stroke:none">B</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g203"
- transform="translate(-3285.889,-3185.889)"><g
- id="id7"><rect
- class="BoundingBox"
- x="4599"
- y="3199"
- width="1303"
- height="1203"
- id="rect206"
- style="fill:none;stroke:none" /><path
- d="m 5250,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path208"
- inkscape:connector-curvature="0"
- style="fill:#00cc00;stroke:none" /><path
- d="m 5250,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path210"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text212"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan214"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="5003"
- y="4021"
- id="tspan216"><tspan
- id="tspan218"
- style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g220"
- transform="translate(-3285.889,-3185.889)"><g
- id="id8"><rect
- class="BoundingBox"
- x="3299"
- y="4399"
- width="1303"
- height="1203"
- id="rect223"
- style="fill:none;stroke:none" /><path
- d="m 3950,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path225"
- inkscape:connector-curvature="0"
- style="fill:#00cc00;stroke:none" /><path
- d="m 3950,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path227"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text229"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan231"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="3703"
- y="5221"
- id="tspan233"><tspan
- id="tspan235"
- style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g237"
- transform="translate(-3285.889,-3185.889)"><g
- id="id9"><rect
- class="BoundingBox"
- x="4599"
- y="4399"
- width="1303"
- height="1203"
- id="rect240"
- style="fill:none;stroke:none" /><path
- d="m 5250,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path242"
- inkscape:connector-curvature="0"
- style="fill:#ff0000;stroke:none" /><path
- d="m 5250,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path244"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text246"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan248"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="5022"
- y="5221"
- id="tspan250"><tspan
- id="tspan252"
- style="fill:#ffffff;stroke:none">R</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g254"
- transform="translate(-3285.889,-3185.889)"><g
- id="id10"><rect
- class="BoundingBox"
- x="5999"
- y="3299"
- width="1003"
- height="1003"
- id="rect257"
- style="fill:none;stroke:none" /><path
- d="m 6500,4300 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path259"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#0000ff" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g261"
- transform="translate(-3285.889,-3185.889)"><g
- id="id11"><rect
- class="BoundingBox"
- x="4699"
- y="5699"
- width="1003"
- height="1003"
- id="rect264"
- style="fill:none;stroke:none" /><path
- d="m 5200,6700 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path266"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#00cc00" /></g></g><g
- class="com.sun.star.drawing.TextShape"
- id="g268"
- transform="translate(-3285.889,-3185.889)"><g
- id="id12"><rect
- class="BoundingBox"
- x="4000"
- y="6900"
- width="2374"
- height="963"
- id="rect271"
- style="fill:none;stroke:none" /><text
- class="TextShape"
- id="text273"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan275"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="4250"
- y="7601"
- id="tspan277"><tspan
- id="tspan279"
- style="fill:#000000;stroke:none">BGGR</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g281"
- transform="translate(-3285.889,-3185.889)"><g
- id="id13"><rect
- class="BoundingBox"
- x="8799"
- y="3199"
- width="1303"
- height="1203"
- id="rect284"
- style="fill:none;stroke:none" /><path
- d="m 9450,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path286"
- inkscape:connector-curvature="0"
- style="fill:#0000ff;stroke:none" /><path
- d="m 9450,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path288"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text290"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan292"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="9239"
- y="4021"
- id="tspan294"><tspan
- id="tspan296"
- style="fill:#ffffff;stroke:none">B</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g298"
- transform="translate(-3285.889,-3185.889)"><g
- id="id14"><rect
- class="BoundingBox"
- x="7499"
- y="3199"
- width="1303"
- height="1203"
- id="rect301"
- style="fill:none;stroke:none" /><path
- d="m 8150,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path303"
- inkscape:connector-curvature="0"
- style="fill:#00cc00;stroke:none" /><path
- d="m 8150,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path305"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text307"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan309"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="7903"
- y="4021"
- id="tspan311"><tspan
- id="tspan313"
- style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g315"
- transform="translate(-3285.889,-3185.889)"><g
- id="id15"><rect
- class="BoundingBox"
- x="8799"
- y="4399"
- width="1303"
- height="1203"
- id="rect318"
- style="fill:none;stroke:none" /><path
- d="m 9450,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path320"
- inkscape:connector-curvature="0"
- style="fill:#00cc00;stroke:none" /><path
- d="m 9450,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path322"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text324"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan326"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="9203"
- y="5221"
- id="tspan328"><tspan
- id="tspan330"
- style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g332"
- transform="translate(-3285.889,-3185.889)"><g
- id="id16"><rect
- class="BoundingBox"
- x="7499"
- y="4399"
- width="1303"
- height="1203"
- id="rect335"
- style="fill:none;stroke:none" /><path
- d="m 8150,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path337"
- inkscape:connector-curvature="0"
- style="fill:#ff0000;stroke:none" /><path
- d="m 8150,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path339"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text341"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan343"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="7922"
- y="5221"
- id="tspan345"><tspan
- id="tspan347"
- style="fill:#ffffff;stroke:none">R</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.TextShape"
- id="g349"
- transform="translate(-3285.889,-3185.889)"><g
- id="id17"><rect
- class="BoundingBox"
- x="8200"
- y="6900"
- width="2374"
- height="963"
- id="rect352"
- style="fill:none;stroke:none" /><text
- class="TextShape"
- id="text354"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan356"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="8450"
- y="7601"
- id="tspan358"><tspan
- id="tspan360"
- style="fill:#000000;stroke:none">GBRG</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g362"
- transform="translate(-3285.889,-3185.889)"><g
- id="id18"><rect
- class="BoundingBox"
- x="17299"
- y="4399"
- width="1303"
- height="1203"
- id="rect365"
- style="fill:none;stroke:none" /><path
- d="m 17950,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path367"
- inkscape:connector-curvature="0"
- style="fill:#0000ff;stroke:none" /><path
- d="m 17950,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path369"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text371"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan373"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="17739"
- y="5221"
- id="tspan375"><tspan
- id="tspan377"
- style="fill:#ffffff;stroke:none">B</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g379"
- transform="translate(-3285.889,-3185.889)"><g
- id="id19"><rect
- class="BoundingBox"
- x="17299"
- y="3199"
- width="1303"
- height="1203"
- id="rect382"
- style="fill:none;stroke:none" /><path
- d="m 17950,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path384"
- inkscape:connector-curvature="0"
- style="fill:#00cc00;stroke:none" /><path
- d="m 17950,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path386"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text388"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan390"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="17703"
- y="4021"
- id="tspan392"><tspan
- id="tspan394"
- style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g396"
- transform="translate(-3285.889,-3185.889)"><g
- id="id20"><rect
- class="BoundingBox"
- x="15999"
- y="4399"
- width="1303"
- height="1203"
- id="rect399"
- style="fill:none;stroke:none" /><path
- d="m 16650,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path401"
- inkscape:connector-curvature="0"
- style="fill:#00cc00;stroke:none" /><path
- d="m 16650,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path403"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text405"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan407"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="16403"
- y="5221"
- id="tspan409"><tspan
- id="tspan411"
- style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g413"
- transform="translate(-3285.889,-3185.889)"><g
- id="id21"><rect
- class="BoundingBox"
- x="15999"
- y="3199"
- width="1303"
- height="1203"
- id="rect416"
- style="fill:none;stroke:none" /><path
- d="m 16650,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path418"
- inkscape:connector-curvature="0"
- style="fill:#ff0000;stroke:none" /><path
- d="m 16650,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path420"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text422"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan424"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="16422"
- y="4021"
- id="tspan426"><tspan
- id="tspan428"
- style="fill:#ffffff;stroke:none">R</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.TextShape"
- id="g430"
- transform="translate(-3285.889,-3185.889)"><g
- id="id22"><rect
- class="BoundingBox"
- x="16700"
- y="6900"
- width="2374"
- height="963"
- id="rect433"
- style="fill:none;stroke:none" /><text
- class="TextShape"
- id="text435"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan437"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="16950"
- y="7601"
- id="tspan439"><tspan
- id="tspan441"
- style="fill:#000000;stroke:none">RGGB</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g443"
- transform="translate(-3285.889,-3185.889)"><g
- id="id23"><rect
- class="BoundingBox"
- x="11699"
- y="4399"
- width="1303"
- height="1203"
- id="rect446"
- style="fill:none;stroke:none" /><path
- d="m 12350,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path448"
- inkscape:connector-curvature="0"
- style="fill:#0000ff;stroke:none" /><path
- d="m 12350,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path450"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text452"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan454"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="12139"
- y="5221"
- id="tspan456"><tspan
- id="tspan458"
- style="fill:#ffffff;stroke:none">B</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g460"
- transform="translate(-3285.889,-3185.889)"><g
- id="id24"><rect
- class="BoundingBox"
- x="11699"
- y="3199"
- width="1303"
- height="1203"
- id="rect463"
- style="fill:none;stroke:none" /><path
- d="m 12350,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path465"
- inkscape:connector-curvature="0"
- style="fill:#00cc00;stroke:none" /><path
- d="m 12350,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path467"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text469"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan471"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="12103"
- y="4021"
- id="tspan473"><tspan
- id="tspan475"
- style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g477"
- transform="translate(-3285.889,-3185.889)"><g
- id="id25"><rect
- class="BoundingBox"
- x="12999"
- y="4399"
- width="1303"
- height="1203"
- id="rect480"
- style="fill:none;stroke:none" /><path
- d="m 13650,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path482"
- inkscape:connector-curvature="0"
- style="fill:#00cc00;stroke:none" /><path
- d="m 13650,5600 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path484"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text486"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan488"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="13403"
- y="5221"
- id="tspan490"><tspan
- id="tspan492"
- style="fill:#ffffff;stroke:none">G</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g494"
- transform="translate(-3285.889,-3185.889)"><g
- id="id26"><rect
- class="BoundingBox"
- x="12999"
- y="3199"
- width="1303"
- height="1203"
- id="rect497"
- style="fill:none;stroke:none" /><path
- d="m 13650,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path499"
- inkscape:connector-curvature="0"
- style="fill:#ff0000;stroke:none" /><path
- d="m 13650,4400 -650,0 0,-1200 1300,0 0,1200 -650,0 z"
- id="path501"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text503"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan505"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="13422"
- y="4021"
- id="tspan507"><tspan
- id="tspan509"
- style="fill:#ffffff;stroke:none">R</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.TextShape"
- id="g511"
- transform="translate(-3285.889,-3185.889)"><g
- id="id27"><rect
- class="BoundingBox"
- x="12400"
- y="6900"
- width="2374"
- height="963"
- id="rect514"
- style="fill:none;stroke:none" /><text
- class="TextShape"
- id="text516"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan518"
- style="font-weight:400;font-size:635px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="12650"
- y="7601"
- id="tspan520"><tspan
- id="tspan522"
- style="fill:#000000;stroke:none">GRBG</tspan></tspan></tspan></text>
-</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g524"
- transform="translate(-3285.889,-3185.889)"><g
- id="id28"><rect
- class="BoundingBox"
- x="5999"
- y="5699"
- width="1003"
- height="1003"
- id="rect527"
- style="fill:none;stroke:none" /><path
- d="m 6500,6700 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path529"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#0000ff" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g531"
- transform="translate(-3285.889,-3185.889)"><g
- id="id29"><rect
- class="BoundingBox"
- x="3399"
- y="5699"
- width="1003"
- height="1003"
- id="rect534"
- style="fill:none;stroke:none" /><path
- d="m 3900,6700 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path536"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#0000ff" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g538"
- transform="translate(-3285.889,-3185.889)"><g
- id="id30"><rect
- class="BoundingBox"
- x="5999"
- y="4499"
- width="1003"
- height="1003"
- id="rect541"
- style="fill:none;stroke:none" /><path
- d="m 6500,5500 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path543"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#00cc00" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g545"
- transform="translate(-3285.889,-3185.889)"><g
- id="id31"><rect
- class="BoundingBox"
- x="7599"
- y="5799"
- width="1003"
- height="1003"
- id="rect548"
- style="fill:none;stroke:none" /><path
- d="m 8100,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path550"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#00cc00" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g552"
- transform="translate(-3285.889,-3185.889)"><g
- id="id32"><rect
- class="BoundingBox"
- x="10199"
- y="5799"
- width="1003"
- height="1003"
- id="rect555"
- style="fill:none;stroke:none" /><path
- d="m 10700,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path557"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#00cc00" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g559"
- transform="translate(-3285.889,-3185.889)"><g
- id="id33"><rect
- class="BoundingBox"
- x="8899"
- y="5799"
- width="1003"
- height="1003"
- id="rect562"
- style="fill:none;stroke:none" /><path
- d="m 9400,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path564"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#0000ff" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g566"
- transform="translate(-3285.889,-3185.889)"><g
- id="id34"><rect
- class="BoundingBox"
- x="10199"
- y="4499"
- width="1003"
- height="1003"
- id="rect569"
- style="fill:none;stroke:none" /><path
- d="m 10700,5500 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path571"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff0000" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g573"
- transform="translate(-3285.889,-3185.889)"><g
- id="id35"><rect
- class="BoundingBox"
- x="10199"
- y="3299"
- width="1003"
- height="1003"
- id="rect576"
- style="fill:none;stroke:none" /><path
- d="m 10700,4300 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path578"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#00cc00" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g580"
- transform="translate(-3285.889,-3185.889)"><g
- id="id36"><rect
- class="BoundingBox"
- x="14399"
- y="3299"
- width="1003"
- height="1003"
- id="rect583"
- style="fill:none;stroke:none" /><path
- d="m 14900,4300 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path585"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#00cc00" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g587"
- transform="translate(-3285.889,-3185.889)"><g
- id="id37"><rect
- class="BoundingBox"
- x="14399"
- y="5799"
- width="1003"
- height="1003"
- id="rect590"
- style="fill:none;stroke:none" /><path
- d="m 14900,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path592"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#00cc00" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g594"
- transform="translate(-3285.889,-3185.889)"><g
- id="id38"><rect
- class="BoundingBox"
- x="11799"
- y="5799"
- width="1003"
- height="1003"
- id="rect597"
- style="fill:none;stroke:none" /><path
- d="m 12300,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path599"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#00cc00" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g601"
- transform="translate(-3285.889,-3185.889)"><g
- id="id39"><rect
- class="BoundingBox"
- x="14399"
- y="4499"
- width="1003"
- height="1003"
- id="rect604"
- style="fill:none;stroke:none" /><path
- d="m 14900,5500 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path606"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#0000ff" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g608"
- transform="translate(-3285.889,-3185.889)"><g
- id="id40"><rect
- class="BoundingBox"
- x="13099"
- y="5799"
- width="1003"
- height="1003"
- id="rect611"
- style="fill:none;stroke:none" /><path
- d="m 13600,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path613"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff0000" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g615"
- transform="translate(-3285.889,-3185.889)"><g
- id="id41"><rect
- class="BoundingBox"
- x="16099"
- y="5799"
- width="1003"
- height="1003"
- id="rect618"
- style="fill:none;stroke:none" /><path
- d="m 16600,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path620"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff0000" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g622"
- transform="translate(-3398.7778,-3185.889)"><g
- id="id42"><rect
- class="BoundingBox"
- x="18799"
- y="5799"
- width="1003"
- height="1003"
- id="rect625"
- style="fill:none;stroke:none" /><path
- d="m 19300,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path627"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff0000" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g629"
- transform="translate(-3398.7778,-3185.889)"><g
- id="id43"><rect
- class="BoundingBox"
- x="18799"
- y="3299"
- width="1003"
- height="1003"
- id="rect632"
- style="fill:none;stroke:none" /><path
- d="m 19300,4300 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path634"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff0000" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g636"
- transform="translate(-3285.889,-3185.889)"><g
- id="id44"><rect
- class="BoundingBox"
- x="17399"
- y="5799"
- width="1003"
- height="1003"
- id="rect639"
- style="fill:none;stroke:none" /><path
- d="m 17900,6800 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path641"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#00cc00" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g643"
- transform="translate(-3398.7778,-3185.889)"><g
- id="id45"><rect
- class="BoundingBox"
- x="18799"
- y="4499"
- width="1003"
- height="1003"
- id="rect646"
- style="fill:none;stroke:none" /><path
- d="m 19300,5500 -500,0 0,-1000 1000,0 0,1000 -500,0 z"
- id="path648"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#00cc00" /></g></g></svg> \ No newline at end of file
+<?xml version="1.0" encoding="UTF-8"?>
+<svg id="svg2" width="164.15mm" height="46.771mm" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" preserveAspectRatio="xMidYMid" version="1.2" viewBox="0 0 16415.333 4677.1107" xml:space="preserve" xmlns="http://www.w3.org/2000/svg" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"><metadata id="metadata652"><rdf:RDF><cc:Work rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/><dc:title/></cc:Work></rdf:RDF></metadata><g id="g186" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id6"><rect id="rect189" class="BoundingBox" x="3299" y="3199" width="1303" height="1203" fill="none"/><path id="path191" d="m3950 4400h-650v-1200h1300v1200h-650z" fill="#00f"/><path id="path193" d="m3950
+4400h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text195" class="TextShape"><tspan id="tspan197" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan199" class="TextPosition" x="3739" y="4021"><tspan id="tspan201" fill="#ffffff">B</tspan></tspan></tspan></text>
+</g></g><g id="g203" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id7"><rect id="rect206" class="BoundingBox" x="4599" y="3199" width="1303" height="1203" fill="none"/><path id="path208" d="m5250 4400h-650v-1200h1300v1200h-650z" fill="#0c0"/><path id="path210" d="m5250 4400h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text212" class="TextShape"><tspan id="tspan214" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan216" class="TextPosition" x="5003" y="4021"><tspan id="tspan218" fill="#ffffff">G</tspan></tspan></tspan></text>
+</g></g><g id="g220" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id8"><rect id="rect223" class="BoundingBox" x="3299" y="4399" width="1303" height="1203" fill="none"/><path id="path225" d="m3950 5600h-650v-1200h1300v1200h-650z" fill="#0c0"/><path id="path227" d="m3950 5600h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text229" class="TextShape"><tspan id="tspan231" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan233" class="TextPosition" x="3703" y="5221"><tspan id="tspan235" fill="#ffffff">G</tspan></tspan></tspan></text>
+</g></g><g id="g237" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id9"><rect id="rect240" class="BoundingBox" x="4599" y="4399" width="1303" height="1203" fill="none"/><path id="path242" d="m5250 5600h-650v-1200h1300v1200h-650z" fill="#f00"/><path id="path244" d="m5250 5600h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text246" class="TextShape"><tspan id="tspan248" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan250" class="TextPosition" x="5022" y="5221"><tspan id="tspan252" fill="#ffffff">R</tspan></tspan></tspan></text>
+</g></g><g id="g254" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id10" fill="none"><rect id="rect257" class="BoundingBox" x="5999" y="3299" width="1003" height="1003"/><path id="path259" d="m6500 4300h-500v-1e3h1e3v1e3h-500z" stroke="#00f"/></g></g><g id="g261" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id11" fill="none"><rect id="rect264" class="BoundingBox" x="4699" y="5699" width="1003" height="1003"/><path id="path266" d="m5200 6700h-500v-1e3h1e3v1e3h-500z" stroke="#0c0"/></g></g><g id="g268" class="com.sun.star.drawing.TextShape" transform="translate(-3285.9 -3185.9)"><g id="id12"><rect id="rect271" class="BoundingBox" x="4e3" y="6900" width="2374" height="963" fill="none"/><text id="text273" class="TextShape"><tspan id="tspan275" class="TextParagraph" font-family="sans-serif" font-size="635px"
+font-weight="400"><tspan id="tspan277" class="TextPosition" x="4250" y="7601"><tspan id="tspan279" fill="#000000">BGGR</tspan></tspan></tspan></text>
+</g></g><g id="g281" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id13"><rect id="rect284" class="BoundingBox" x="8799" y="3199" width="1303" height="1203" fill="none"/><path id="path286" d="m9450 4400h-650v-1200h1300v1200h-650z" fill="#00f"/><path id="path288" d="m9450 4400h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text290" class="TextShape"><tspan id="tspan292" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan294" class="TextPosition" x="9239" y="4021"><tspan id="tspan296" fill="#ffffff">B</tspan></tspan></tspan></text>
+</g></g><g id="g298" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id14"><rect id="rect301" class="BoundingBox" x="7499" y="3199" width="1303" height="1203" fill="none"/><path id="path303" d="m8150 4400h-650v-1200h1300v1200h-650z" fill="#0c0"/><path id="path305" d="m8150 4400h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text307" class="TextShape"><tspan id="tspan309" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan311" class="TextPosition" x="7903" y="4021"><tspan id="tspan313" fill="#ffffff">G</tspan></tspan></tspan></text>
+</g></g><g id="g315" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id15"><rect id="rect318" class="BoundingBox" x="8799" y="4399" width="1303" height="1203" fill="none"/><path id="path320" d="m9450 5600h-650v-1200h1300v1200h-650z" fill="#0c0"/><path id="path322" d="m9450 5600h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text324" class="TextShape"><tspan id="tspan326" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan328" class="TextPosition" x="9203" y="5221"><tspan id="tspan330" fill="#ffffff">G</tspan></tspan></tspan></text>
+</g></g><g id="g332" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id16"><rect id="rect335" class="BoundingBox" x="7499" y="4399" width="1303" height="1203" fill="none"/><path id="path337" d="m8150 5600h-650v-1200h1300v1200h-650z" fill="#f00"/><path id="path339" d="m8150 5600h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text341" class="TextShape"><tspan id="tspan343" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan345" class="TextPosition" x="7922" y="5221"><tspan id="tspan347" fill="#ffffff">R</tspan></tspan></tspan></text>
+</g></g><g id="g349" class="com.sun.star.drawing.TextShape" transform="translate(-3285.9 -3185.9)"><g id="id17"><rect id="rect352" class="BoundingBox" x="8200" y="6900" width="2374" height="963" fill="none"/><text id="text354" class="TextShape"><tspan id="tspan356" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan358" class="TextPosition" x="8450" y="7601"><tspan id="tspan360" fill="#000000">GBRG</tspan></tspan></tspan></text>
+</g></g><g id="g362" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id18"><rect id="rect365" class="BoundingBox" x="17299" y="4399" width="1303" height="1203" fill="none"/><path id="path367" d="m17950 5600h-650v-1200h1300v1200h-650z" fill="#00f"/><path id="path369" d="m17950 5600h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text371" class="TextShape"><tspan id="tspan373" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan375" class="TextPosition" x="17739" y="5221"><tspan id="tspan377" fill="#ffffff">B</tspan></tspan></tspan></text>
+</g></g><g id="g379" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id19"><rect id="rect382" class="BoundingBox" x="17299" y="3199" width="1303" height="1203" fill="none"/><path id="path384" d="m17950 4400h-650v-1200h1300v1200h-650z" fill="#0c0"/><path id="path386" d="m17950 4400h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text388" class="TextShape"><tspan id="tspan390" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan392" class="TextPosition" x="17703" y="4021"><tspan id="tspan394" fill="#ffffff">G</tspan></tspan></tspan></text>
+</g></g><g id="g396" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id20"><rect id="rect399" class="BoundingBox" x="15999" y="4399" width="1303" height="1203" fill="none"/><path id="path401" d="m16650 5600h-650v-1200h1300v1200h-650z" fill="#0c0"/><path id="path403" d="m16650 5600h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text405" class="TextShape"><tspan id="tspan407" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan409" class="TextPosition" x="16403" y="5221"><tspan id="tspan411" fill="#ffffff">G</tspan></tspan></tspan></text>
+</g></g><g id="g413" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id21"><rect id="rect416" class="BoundingBox" x="15999" y="3199" width="1303" height="1203" fill="none"/><path id="path418" d="m16650 4400h-650v-1200h1300v1200h-650z" fill="#f00"/><path id="path420" d="m16650 4400h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text422" class="TextShape"><tspan id="tspan424" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan426" class="TextPosition" x="16422" y="4021"><tspan id="tspan428" fill="#ffffff">R</tspan></tspan></tspan></text>
+</g></g><g id="g430" class="com.sun.star.drawing.TextShape" transform="translate(-3285.9 -3185.9)"><g id="id22"><rect id="rect433" class="BoundingBox" x="16700" y="6900" width="2374" height="963" fill="none"/><text id="text435" class="TextShape"><tspan id="tspan437" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan439" class="TextPosition" x="16950" y="7601"><tspan id="tspan441" fill="#000000">RGGB</tspan></tspan></tspan></text>
+</g></g><g id="g443" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id23"><rect id="rect446" class="BoundingBox" x="11699" y="4399" width="1303" height="1203" fill="none"/><path id="path448" d="m12350 5600h-650v-1200h1300v1200h-650z" fill="#00f"/><path id="path450" d="m12350 5600h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text452" class="TextShape"><tspan id="tspan454" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan456" class="TextPosition" x="12139" y="5221"><tspan id="tspan458" fill="#ffffff">B</tspan></tspan></tspan></text>
+</g></g><g id="g460" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id24"><rect id="rect463" class="BoundingBox" x="11699" y="3199" width="1303" height="1203" fill="none"/><path id="path465" d="m12350 4400h-650v-1200h1300v1200h-650z" fill="#0c0"/><path id="path467" d="m12350 4400h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text469" class="TextShape"><tspan id="tspan471" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan473" class="TextPosition" x="12103" y="4021"><tspan id="tspan475" fill="#ffffff">G</tspan></tspan></tspan></text>
+</g></g><g id="g477" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id25"><rect id="rect480" class="BoundingBox" x="12999" y="4399" width="1303" height="1203" fill="none"/><path id="path482" d="m13650 5600h-650v-1200h1300v1200h-650z" fill="#0c0"/><path id="path484" d="m13650 5600h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text486" class="TextShape"><tspan id="tspan488" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan490" class="TextPosition" x="13403" y="5221"><tspan id="tspan492" fill="#ffffff">G</tspan></tspan></tspan></text>
+</g></g><g id="g494" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id26"><rect id="rect497" class="BoundingBox" x="12999" y="3199" width="1303" height="1203" fill="none"/><path id="path499" d="m13650 4400h-650v-1200h1300v1200h-650z" fill="#f00"/><path id="path501" d="m13650 4400h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text503" class="TextShape"><tspan id="tspan505" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan507" class="TextPosition" x="13422" y="4021"><tspan id="tspan509" fill="#ffffff">R</tspan></tspan></tspan></text>
+</g></g><g id="g511" class="com.sun.star.drawing.TextShape" transform="translate(-3285.9 -3185.9)"><g id="id27"><rect id="rect514" class="BoundingBox" x="12400" y="6900" width="2374" height="963" fill="none"/><text id="text516" class="TextShape"><tspan id="tspan518" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan520" class="TextPosition" x="12650" y="7601"><tspan id="tspan522" fill="#000000">GRBG</tspan></tspan></tspan></text>
+</g></g><g id="g524" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id28" fill="none"><rect id="rect527" class="BoundingBox" x="5999" y="5699" width="1003" height="1003"/><path id="path529" d="m6500 6700h-500v-1e3h1e3v1e3h-500z" stroke="#00f"/></g></g><g id="g531" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id29" fill="none"><rect id="rect534" class="BoundingBox" x="3399" y="5699" width="1003" height="1003"/><path id="path536" d="m3900 6700h-500v-1e3h1e3v1e3h-500z" stroke="#00f"/></g></g><g id="g538" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id30" fill="none"><rect id="rect541" class="BoundingBox" x="5999" y="4499" width="1003" height="1003"/><path id="path543" d="m6500 5500h-500v-1e3h1e3v1e3h-500z" stroke="#0c0"/></g></g><g id="g545"
+class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id31" fill="none"><rect id="rect548" class="BoundingBox" x="7599" y="5799" width="1003" height="1003"/><path id="path550" d="m8100 6800h-500v-1e3h1e3v1e3h-500z" stroke="#0c0"/></g></g><g id="g552" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id32" fill="none"><rect id="rect555" class="BoundingBox" x="10199" y="5799" width="1003" height="1003"/><path id="path557" d="m10700 6800h-500v-1e3h1e3v1e3h-500z" stroke="#0c0"/></g></g><g id="g559" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id33" fill="none"><rect id="rect562" class="BoundingBox" x="8899" y="5799" width="1003" height="1003"/><path id="path564" d="m9400 6800h-500v-1e3h1e3v1e3h-500z" stroke="#00f"/></g></g><g id="g566" class="com.sun.star.drawing.CustomShape"
+transform="translate(-3285.9 -3185.9)"><g id="id34" fill="none"><rect id="rect569" class="BoundingBox" x="10199" y="4499" width="1003" height="1003"/><path id="path571" d="m10700 5500h-500v-1e3h1e3v1e3h-500z" stroke="#f00"/></g></g><g id="g573" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id35" fill="none"><rect id="rect576" class="BoundingBox" x="10199" y="3299" width="1003" height="1003"/><path id="path578" d="m10700 4300h-500v-1e3h1e3v1e3h-500z" stroke="#0c0"/></g></g><g id="g580" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id36" fill="none"><rect id="rect583" class="BoundingBox" x="14399" y="3299" width="1003" height="1003"/><path id="path585" d="m14900 4300h-500v-1e3h1e3v1e3h-500z" stroke="#0c0"/></g></g><g id="g587" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g
+id="id37" fill="none"><rect id="rect590" class="BoundingBox" x="14399" y="5799" width="1003" height="1003"/><path id="path592" d="m14900 6800h-500v-1e3h1e3v1e3h-500z" stroke="#0c0"/></g></g><g id="g594" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id38" fill="none"><rect id="rect597" class="BoundingBox" x="11799" y="5799" width="1003" height="1003"/><path id="path599" d="m12300 6800h-500v-1e3h1e3v1e3h-500z" stroke="#0c0"/></g></g><g id="g601" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id39" fill="none"><rect id="rect604" class="BoundingBox" x="14399" y="4499" width="1003" height="1003"/><path id="path606" d="m14900 5500h-500v-1e3h1e3v1e3h-500z" stroke="#00f"/></g></g><g id="g608" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id40" fill="none"><rect id="rect611"
+class="BoundingBox" x="13099" y="5799" width="1003" height="1003"/><path id="path613" d="m13600 6800h-500v-1e3h1e3v1e3h-500z" stroke="#f00"/></g></g><g id="g615" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id41" fill="none"><rect id="rect618" class="BoundingBox" x="16099" y="5799" width="1003" height="1003"/><path id="path620" d="m16600 6800h-500v-1e3h1e3v1e3h-500z" stroke="#f00"/></g></g><g id="g622" class="com.sun.star.drawing.CustomShape" transform="translate(-3398.8 -3185.9)"><g id="id42" fill="none"><rect id="rect625" class="BoundingBox" x="18799" y="5799" width="1003" height="1003"/><path id="path627" d="m19300 6800h-500v-1e3h1e3v1e3h-500z" stroke="#f00"/></g></g><g id="g629" class="com.sun.star.drawing.CustomShape" transform="translate(-3398.8 -3185.9)"><g id="id43" fill="none"><rect id="rect632" class="BoundingBox" x="18799" y="3299"
+width="1003" height="1003"/><path id="path634" d="m19300 4300h-500v-1e3h1e3v1e3h-500z" stroke="#f00"/></g></g><g id="g636" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id44" fill="none"><rect id="rect639" class="BoundingBox" x="17399" y="5799" width="1003" height="1003"/><path id="path641" d="m17900 6800h-500v-1e3h1e3v1e3h-500z" stroke="#0c0"/></g></g><g id="g643" class="com.sun.star.drawing.CustomShape" transform="translate(-3398.8 -3185.9)"><g id="id45" fill="none"><rect id="rect646" class="BoundingBox" x="18799" y="4499" width="1003" height="1003"/><path id="path648" d="m19300 5500h-500v-1e3h1e3v1e3h-500z" stroke="#0c0"/></g></g></svg>
diff --git a/Documentation/media/uapi/v4l/pixfmt-006.rst b/Documentation/media/uapi/v4l/colorspaces-defs.rst
index 7ae7dcf73f63..410907fe9415 100644
--- a/Documentation/media/uapi/v4l/pixfmt-006.rst
+++ b/Documentation/media/uapi/v4l/colorspaces-defs.rst
@@ -76,6 +76,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
.. c:type:: v4l2_xfer_func
+.. tabularcolumns:: |p{5.5cm}|p{12.0cm}|
+
.. flat-table:: V4L2 Transfer Function
:header-rows: 1
:stub-columns: 0
@@ -97,7 +99,7 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
* - ``V4L2_XFER_FUNC_DCI_P3``
- Use the DCI-P3 transfer function.
* - ``V4L2_XFER_FUNC_SMPTE2084``
- - Use the SMPTE 2084 transfer function.
+ - Use the SMPTE 2084 transfer function. See :ref:`xf-smpte-2084`.
diff --git a/Documentation/media/uapi/v4l/pixfmt-007.rst b/Documentation/media/uapi/v4l/colorspaces-details.rst
index 0c30ee2577d3..b5d551b9cc8f 100644
--- a/Documentation/media/uapi/v4l/pixfmt-007.rst
+++ b/Documentation/media/uapi/v4l/colorspaces-details.rst
@@ -418,6 +418,11 @@ Inverse Transfer function:
L = \left( \frac{L' + 0.099}{1.099}\right) ^{\frac{1}{0.45} }\text{, for } L' \ge 0.081
+Please note that while Rec. 709 is defined as the default transfer function
+by the :ref:`itu2020` standard, in practice this colorspace is often used
+with the :ref:`xf-smpte-2084`. In particular Ultra HD Blu-ray discs use
+this combination.
+
The luminance (Y') and color difference (Cb and Cr) are obtained with
the following ``V4L2_YCBCR_ENC_BT2020`` encoding:
@@ -758,3 +763,45 @@ scaled to [-128…128] and then clipped to [-128…127].
``V4L2_COLORSPACE_JPEG`` can be considered to be an abbreviation for
``V4L2_COLORSPACE_SRGB``, ``V4L2_YCBCR_ENC_601`` and
``V4L2_QUANTIZATION_FULL_RANGE``.
+
+***************************************
+Detailed Transfer Function Descriptions
+***************************************
+
+.. _xf-smpte-2084:
+
+Transfer Function SMPTE 2084 (V4L2_XFER_FUNC_SMPTE2084)
+=======================================================
+
+The :ref:`smpte2084` standard defines the transfer function used by
+High Dynamic Range content.
+
+Constants:
+ m1 = (2610 / 4096) / 4
+
+ m2 = (2523 / 4096) * 128
+
+ c1 = 3424 / 4096
+
+ c2 = (2413 / 4096) * 32
+
+ c3 = (2392 / 4096) * 32
+
+Transfer function:
+ L' = ((c1 + c2 * L\ :sup:`m1`) / (1 + c3 * L\ :sup:`m1`))\ :sup:`m2`
+
+Inverse Transfer function:
+ L = (max(L':sup:`1/m2` - c1, 0) / (c2 - c3 *
+ L'\ :sup:`1/m2`))\ :sup:`1/m1`
+
+Take care when converting between this transfer function and non-HDR transfer
+functions: the linear RGB values [0…1] of HDR content map to a luminance range
+of 0 to 10000 cd/m\ :sup:`2` whereas the linear RGB values of non-HDR (aka
+Standard Dynamic Range or SDR) map to a luminance range of 0 to 100 cd/m\ :sup:`2`.
+
+To go from SDR to HDR you will have to divide L by 100 first. To go in the other
+direction you will have to multiply L by 100. Of course, this clamps all
+luminance values over 100 cd/m\ :sup:`2` to 100 cd/m\ :sup:`2`.
+
+There are better methods, see e.g. :ref:`colimg` for more in-depth information
+about this.
diff --git a/Documentation/media/uapi/v4l/constraints.svg b/Documentation/media/uapi/v4l/constraints.svg
index f710ee46b1f8..7e5d7185ca49 100644
--- a/Documentation/media/uapi/v4l/constraints.svg
+++ b/Documentation/media/uapi/v4l/constraints.svg
@@ -1,346 +1,10 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- version="1.2"
- width="249.00998mm"
- height="143.00999mm"
- viewBox="0 0 24900.998 14300.999"
- preserveAspectRatio="xMidYMid"
- xml:space="preserve"
- id="svg2"
- inkscape:version="0.91 r13725"
- sodipodi:docname="constraints.svg"
- style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"><metadata
- id="metadata325"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="1920"
- inkscape:window-height="997"
- id="namedview323"
- showgrid="false"
- inkscape:zoom="1.0818519"
- inkscape:cx="270.29272"
- inkscape:cy="249.83854"
- inkscape:window-x="1920"
- inkscape:window-y="30"
- inkscape:window-maximized="1"
- inkscape:current-layer="svg2"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0" /><defs
- class="ClipPathGroup"
- id="defs4"><marker
- inkscape:isstock="true"
- style="overflow:visible"
- id="marker6261"
- refX="0"
- refY="0"
- orient="auto"
- inkscape:stockid="Arrow1Mend"><path
- transform="matrix(-0.4,0,0,-0.4,-4,0)"
- style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1pt;stroke-opacity:1"
- d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
- id="path6263"
- inkscape:connector-curvature="0" /></marker><marker
- inkscape:stockid="Arrow1Mend"
- orient="auto"
- refY="0"
- refX="0"
- id="marker6125"
- style="overflow:visible"
- inkscape:isstock="true"
- inkscape:collect="always"><path
- id="path6127"
- d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
- style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1pt;stroke-opacity:1"
- transform="matrix(-0.4,0,0,-0.4,-4,0)"
- inkscape:connector-curvature="0" /></marker><marker
- inkscape:isstock="true"
- style="overflow:visible"
- id="marker6001"
- refX="0"
- refY="0"
- orient="auto"
- inkscape:stockid="Arrow1Mend"
- inkscape:collect="always"><path
- transform="matrix(-0.4,0,0,-0.4,-4,0)"
- style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1pt;stroke-opacity:1"
- d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
- id="path6003"
- inkscape:connector-curvature="0" /></marker><marker
- inkscape:stockid="Arrow1Mend"
- orient="auto"
- refY="0"
- refX="0"
- id="marker5693"
- style="overflow:visible"
- inkscape:isstock="true"
- inkscape:collect="always"><path
- id="path5695"
- d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
- style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1pt;stroke-opacity:1"
- transform="matrix(-0.4,0,0,-0.4,-4,0)"
- inkscape:connector-curvature="0" /></marker><marker
- inkscape:isstock="true"
- style="overflow:visible"
- id="marker5575"
- refX="0"
- refY="0"
- orient="auto"
- inkscape:stockid="Arrow1Mend"
- inkscape:collect="always"><path
- transform="matrix(-0.4,0,0,-0.4,-4,0)"
- style="fill:#000080;fill-opacity:1;fill-rule:evenodd;stroke:#000080;stroke-width:1pt;stroke-opacity:1"
- d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
- id="path5577"
- inkscape:connector-curvature="0" /></marker><marker
- inkscape:stockid="Arrow1Mend"
- orient="auto"
- refY="0"
- refX="0"
- id="marker5469"
- style="overflow:visible"
- inkscape:isstock="true"
- inkscape:collect="always"><path
- id="path5471"
- d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
- style="fill:#000080;fill-opacity:1;fill-rule:evenodd;stroke:#000080;stroke-width:1pt;stroke-opacity:1"
- transform="matrix(-0.4,0,0,-0.4,-4,0)"
- inkscape:connector-curvature="0" /></marker><marker
- inkscape:stockid="Arrow1Mend"
- orient="auto"
- refY="0"
- refX="0"
- id="marker5259"
- style="overflow:visible"
- inkscape:isstock="true"><path
- id="path5261"
- d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
- style="fill:#000080;fill-opacity:1;fill-rule:evenodd;stroke:#000080;stroke-width:1pt;stroke-opacity:1"
- transform="matrix(-0.4,0,0,-0.4,-4,0)"
- inkscape:connector-curvature="0" /></marker><marker
- inkscape:stockid="Arrow2Mend"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow2Mend"
- style="overflow:visible"
- inkscape:isstock="true"><path
- id="path4241"
- style="fill:#000080;fill-opacity:1;fill-rule:evenodd;stroke:#000080;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
- d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
- transform="scale(-0.6,-0.6)"
- inkscape:connector-curvature="0" /></marker></defs><defs
- id="defs9" /><defs
- id="defs100" /><defs
- id="defs123" /><defs
- class="TextShapeIndex"
- id="defs134" /><defs
- class="EmbeddedBulletChars"
- id="defs138" /><defs
- class="TextEmbeddedBitmaps"
- id="defs167" /><g
- class="com.sun.star.drawing.CustomShape"
- id="g204"
- transform="translate(-1350,-3250)"><g
- id="id6"><rect
- class="BoundingBox"
- x="1350"
- y="3250"
- width="24901"
- height="14301"
- id="rect207"
- style="fill:none;stroke:none" /><path
- d="m 13800,17500 -12400,0 0,-14200 24800,0 0,14200 -12400,0 z"
- id="path209"
- inkscape:connector-curvature="0"
- style="fill:#ffffff;stroke:none" /><path
- d="m 13800,17500 -12400,0 0,-14200 24800,0 0,14200 -12400,0 z"
- id="path211"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff0000;stroke-width:100;stroke-linejoin:round" /><text
- class="TextShape"
- id="text213"><tspan
- class="TextParagraph"
- font-size="846px"
- font-weight="400"
- id="tspan215"
- style="font-weight:400;font-size:846px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="1652"
- y="17093"
- id="tspan217"><tspan
- id="tspan219"
- style="fill:#ff0000;stroke:none" /><tspan
- id="tspan221"
- style="fill:#ff0000;stroke:none">V4L2_SEL_FLAG_GE</tspan></tspan></tspan></text>
-</g></g><rect
- class="BoundingBox"
- x="3000"
- y="2200"
- width="18101"
- height="10101"
- id="rect226"
- style="fill:none;stroke:none" /><path
- d="m 12050,12250 -9000,0 0,-10000 18000,0 0,10000 -9000,0 z"
- id="path228"
- inkscape:connector-curvature="0"
- style="fill:#ffffff;stroke:none" /><path
- d="m 12050,12250 -9000,0 0,-10000 18000,0 0,10000 -9000,0 z"
- id="path230"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:100;stroke-linejoin:round" /><text
- class="TextShape"
- id="text232"
- x="-1350"
- y="-3250"><tspan
- class="TextParagraph"
- font-size="987px"
- font-weight="400"
- id="tspan234"
- style="font-weight:400;font-size:987px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="3227"
- y="11503"
- id="tspan236"><tspan
- id="tspan238"
- style="fill:#000000;stroke:none" /><tspan
- id="tspan240"
- style="fill:#000000;stroke:none">ORIGINAL</tspan></tspan></tspan></text>
-<g
- class="com.sun.star.drawing.CustomShape"
- id="g242"
- transform="translate(-1350,-3250)"><g
- id="id8"><rect
- class="BoundingBox"
- x="7050"
- y="7950"
- width="7901"
- height="5501"
- id="rect245"
- style="fill:none;stroke:none" /><path
- d="m 11000,13400 -3900,0 0,-5400 7800,0 0,5400 -3900,0 z"
- id="path247"
- inkscape:connector-curvature="0"
- style="fill:#ffffff;stroke:none" /><path
- d="m 11000,13400 -3900,0 0,-5400 7800,0 0,5400 -3900,0 z"
- id="path249"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4;stroke-width:100;stroke-linejoin:round" /><text
- class="TextShape"
- id="text251"><tspan
- class="TextParagraph"
- font-size="776px"
- font-weight="400"
- id="tspan253"
- style="font-weight:400;font-size:776px;font-family:'Liberation Sans', sans-serif"><tspan
- class="TextPosition"
- x="7228"
- y="10969"
- id="tspan255"><tspan
- id="tspan257"
- style="fill:#000080;stroke:none">V4L2_SEL_FLAG_LE</tspan></tspan></tspan></text>
-</g></g><rect
- class="BoundingBox"
- x="13700"
- y="7100"
- width="7101"
- height="101"
- id="rect262"
- style="fill:none;stroke:none" /><path
- d="m 20750,7150 -7000,0"
- id="path264"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000080;stroke-width:99.99134064;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Mend)" /><rect
- class="BoundingBox"
- x="3400"
- y="7100"
- width="2101"
- height="101"
- id="rect269"
- style="fill:none;stroke:none" /><path
- d="m 3450,7150 2000,0"
- id="path271"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000080;stroke-width:100;stroke-linejoin:round;marker-end:url(#marker5575)" /><rect
- class="BoundingBox"
- x="9800"
- y="2900"
- width="101"
- height="1501"
- id="rect276"
- style="fill:none;stroke:none" /><path
- d="m 9850,2950 0,1400"
- id="path278"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000080;stroke-width:100;stroke-linejoin:round;marker-end:url(#marker5259)" /><rect
- class="BoundingBox"
- x="9600"
- y="10600"
- width="101"
- height="1301"
- id="rect283"
- style="fill:none;stroke:none" /><path
- d="m 9650,11850 0,-1200"
- id="path285"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000080;stroke-width:100;stroke-linejoin:round;marker-end:url(#marker5469)" /><rect
- class="BoundingBox"
- x="450"
- y="6850"
- width="2051"
- height="601"
- id="rect290"
- style="fill:none;stroke:none" /><path
- d="m 2450,7150 -2000.8696,0"
- id="path292"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff0000;stroke-width:132.48202515;stroke-linejoin:round;marker-end:url(#marker6125)" /><rect
- class="BoundingBox"
- x="21600"
- y="6750"
- width="2651"
- height="601"
- id="rect299"
- style="fill:none;stroke:none" /><path
- d="m 21650,7050 2522.609,0"
- id="path301"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff0000;stroke-width:120.40660858;stroke-linejoin:round;marker-end:url(#marker6001)" /><rect
- class="BoundingBox"
- x="9550"
- y="550"
- width="601"
- height="1451"
- id="rect308"
- style="fill:none;stroke:none" /><path
- d="m 9836.957,1950 0,-1453.0435"
- id="path310"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff0000;stroke-width:164.03721619;stroke-linejoin:round;marker-end:url(#marker6261)" /><rect
- class="BoundingBox"
- x="9350"
- y="12500"
- width="601"
- height="1451"
- id="rect317"
- style="fill:none;stroke:none" /><path
- d="m 9650,12550 0,1505.217"
- id="path319"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff0000;stroke-width:166.95626831;stroke-linejoin:round;marker-end:url(#marker5693)" /></svg> \ No newline at end of file
+<?xml version="1.0" encoding="UTF-8"?>
+<svg id="svg2" width="249.01mm" height="143.01mm" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" preserveAspectRatio="xMidYMid" version="1.2" viewBox="0 0 24900.998 14300.999" xml:space="preserve" xmlns="http://www.w3.org/2000/svg" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"><metadata id="metadata325"><rdf:RDF><cc:Work rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/><dc:title/></cc:Work></rdf:RDF></metadata><defs id="defs4" class="ClipPathGroup"><marker id="marker6261" overflow="visible" orient="auto"><path id="path6263" transform="matrix(-.4 0 0 -.4 -4 0)" d="m0 0 5-5-17.5 5 17.5 5-5-5z" fill="#f00" fill-rule="evenodd" stroke="#f00" stroke-width="1pt"/></marker><marker id="marker6125" overflow="visible"
+orient="auto"><path id="path6127" transform="matrix(-.4 0 0 -.4 -4 0)" d="m0 0 5-5-17.5 5 17.5 5-5-5z" fill="#f00" fill-rule="evenodd" stroke="#f00" stroke-width="1pt"/></marker><marker id="marker6001" overflow="visible" orient="auto"><path id="path6003" transform="matrix(-.4 0 0 -.4 -4 0)" d="m0 0 5-5-17.5 5 17.5 5-5-5z" fill="#f00" fill-rule="evenodd" stroke="#f00" stroke-width="1pt"/></marker><marker id="marker5693" overflow="visible" orient="auto"><path id="path5695" transform="matrix(-.4 0 0 -.4 -4 0)" d="m0 0 5-5-17.5 5 17.5 5-5-5z" fill="#f00" fill-rule="evenodd" stroke="#f00" stroke-width="1pt"/></marker><marker id="marker5575" overflow="visible" orient="auto"><path id="path5577" transform="matrix(-.4 0 0 -.4 -4 0)" d="m0 0 5-5-17.5 5 17.5 5-5-5z" fill="#000080" fill-rule="evenodd" stroke="#000080" stroke-width="1pt"/></marker><marker id="marker5469" overflow="visible"
+orient="auto"><path id="path5471" transform="matrix(-.4 0 0 -.4 -4 0)" d="m0 0 5-5-17.5 5 17.5 5-5-5z" fill="#000080" fill-rule="evenodd" stroke="#000080" stroke-width="1pt"/></marker><marker id="marker5259" overflow="visible" orient="auto"><path id="path5261" transform="matrix(-.4 0 0 -.4 -4 0)" d="m0 0 5-5-17.5 5 17.5 5-5-5z" fill="#000080" fill-rule="evenodd" stroke="#000080" stroke-width="1pt"/></marker><marker id="Arrow2Mend" overflow="visible" orient="auto"><path id="path4241" transform="scale(-.6)" d="m8.7186 4.0337-10.926-4.0177 10.926-4.0177c-1.7455 2.3721-1.7354 5.6175-6e-7 8.0354z" fill="#000080" fill-rule="evenodd" stroke="#000080" stroke-linejoin="round" stroke-width=".625"/></marker></defs><g id="g204" class="com.sun.star.drawing.CustomShape" transform="translate(-1350,-3250)"><g id="id6"><rect id="rect207" class="BoundingBox" x="1350" y="3250" width="24901" height="14301"
+fill="none"/><path id="path209" d="m13800 17500h-12400v-14200h24800v14200h-12400z" fill="#fff"/><path id="path211" d="m13800 17500h-12400v-14200h24800v14200h-12400z" fill="none" stroke="#f00" stroke-linejoin="round" stroke-width="100"/><text id="text213" class="TextShape"><tspan id="tspan215" class="TextParagraph" font-family="sans-serif" font-size="846px" font-weight="400"><tspan id="tspan217" class="TextPosition" x="1652" y="17093" fill="#ff0000"><tspan id="tspan219"/><tspan id="tspan221">V4L2_SEL_FLAG_GE</tspan></tspan></tspan></text>
+</g></g><rect id="rect226" class="BoundingBox" x="3e3" y="2200" width="18101" height="10101" fill="none"/><path id="path228" d="m12050 12250h-9e3v-1e4h18000v1e4h-9e3z" fill="#fff"/><path id="path230" d="m12050 12250h-9e3v-1e4h18000v1e4h-9e3z" fill="none" stroke="#000" stroke-linejoin="round" stroke-width="100"/><text id="text232" class="TextShape" x="-1350" y="-3250"><tspan id="tspan234" class="TextParagraph" font-family="sans-serif" font-size="987px" font-weight="400"><tspan id="tspan236" class="TextPosition" x="3227" y="11503" fill="#000000"><tspan id="tspan238"/><tspan id="tspan240">ORIGINAL</tspan></tspan></tspan></text>
+<g id="g242" class="com.sun.star.drawing.CustomShape" transform="translate(-1350,-3250)"><g id="id8"><rect id="rect245" class="BoundingBox" x="7050" y="7950" width="7901" height="5501" fill="none"/><path id="path247" d="m11000 13400h-3900v-5400h7800v5400h-3900z" fill="#fff"/><path id="path249" d="m11000 13400h-3900v-5400h7800v5400h-3900z" fill="none" stroke="#3465a4" stroke-linejoin="round" stroke-width="100"/><text id="text251" class="TextShape"><tspan id="tspan253" class="TextParagraph" font-family="sans-serif" font-size="776px" font-weight="400"><tspan id="tspan255" class="TextPosition" x="7228" y="10969"><tspan id="tspan257" fill="#000080">V4L2_SEL_FLAG_LE</tspan></tspan></tspan></text>
+</g></g><rect id="rect262" class="BoundingBox" x="13700" y="7100" width="7101" height="101" fill="none"/><path id="path264" d="m20750 7150h-7e3" fill="none" marker-end="url(#Arrow2Mend)" stroke="#000080" stroke-linejoin="round" stroke-width="99.991"/><rect id="rect269" class="BoundingBox" x="3400" y="7100" width="2101" height="101" fill="none"/><path id="path271" d="m3450 7150h2e3" fill="none" marker-end="url(#marker5575)" stroke="#000080" stroke-linejoin="round" stroke-width="100"/><rect id="rect276" class="BoundingBox" x="9800" y="2900" width="101" height="1501" fill="none"/><path id="path278" d="m9850 2950v1400" fill="none" marker-end="url(#marker5259)" stroke="#000080" stroke-linejoin="round" stroke-width="100"/><rect id="rect283" class="BoundingBox" x="9600" y="10600" width="101" height="1301" fill="none"/><path id="path285" d="m9650 11850v-1200" fill="none"
+marker-end="url(#marker5469)" stroke="#000080" stroke-linejoin="round" stroke-width="100"/><rect id="rect290" class="BoundingBox" x="450" y="6850" width="2051" height="601" fill="none"/><path id="path292" d="m2450 7150h-2000.9" fill="none" marker-end="url(#marker6125)" stroke="#f00" stroke-linejoin="round" stroke-width="132.48"/><rect id="rect299" class="BoundingBox" x="21600" y="6750" width="2651" height="601" fill="none"/><path id="path301" d="m21650 7050h2522.6" fill="none" marker-end="url(#marker6001)" stroke="#f00" stroke-linejoin="round" stroke-width="120.41"/><rect id="rect308" class="BoundingBox" x="9550" y="550" width="601" height="1451" fill="none"/><path id="path310" d="m9837 1950v-1453" fill="none" marker-end="url(#marker6261)" stroke="#f00" stroke-linejoin="round" stroke-width="164.04"/><rect id="rect317" class="BoundingBox" x="9350" y="12500" width="601" height="1451"
+fill="none"/><path id="path319" d="m9650 12550v1505.2" fill="none" marker-end="url(#marker5693)" stroke="#f00" stroke-linejoin="round" stroke-width="166.96"/></svg>
diff --git a/Documentation/media/uapi/v4l/crop.svg b/Documentation/media/uapi/v4l/crop.svg
index dc9a471bae6b..3878fe4c49e9 100644
--- a/Documentation/media/uapi/v4l/crop.svg
+++ b/Documentation/media/uapi/v4l/crop.svg
@@ -18,33 +18,34 @@
viewBox="0 0 739.11388 339.6584"
sodipodi:docname="crop.svg"><metadata
id="metadata8"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
id="defs6"><clipPath
clipPathUnits="userSpaceOnUse"
id="clipPath44"><path
- d="m 0,0 0,1895 4118,0 L 4118,0 0,0 Z m 3051.62,250.48 8.19,17.01 -46.93,23.31 29.61,-25.515 -38.12,8.505 47.25,-23.31 z m -1559.25,800.73 -8.5,-17.01 46.93,-23.31 -29.29,25.2 37.8,-8.19 -46.94,23.31 z"
- id="path46"
- inkscape:connector-curvature="0"
- style="clip-rule:evenodd" /></clipPath><clipPath
+ d="m 0,0 0,1895 4118,0 L 4118,0 0,0 Z m 3051.62,250.48 8.19,17.01 -46.93,23.31 29.61,-25.515 -38.12,8.505 47.25,-23.31 z m -1559.25,800.73 -8.5,-17.01 46.93,-23.31 -29.29,25.2 37.8,-8.19 -46.94,23.31 z"
+ id="path46"
+ inkscape:connector-curvature="0"
+ style="clip-rule:evenodd" /></clipPath><clipPath
clipPathUnits="userSpaceOnUse"
id="clipPath64"><path
- d="m 0,0 0,1895 4118,0 0,-1626 -1,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -1,0 0,-1 1,0 0,-1 1,0 0,-1 2,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 2,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 2,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 -4,0 0,1 -5,0 0,1 -4,0 0,1 -5,0 0,1 -4,0 0,1 -4,0 0,1 -4,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 1,0 0,3 1,0 0,2 1,0 0,2 1,0 L 4118,0 0,0 Z m 4074,272 0,-1 1,0 0,1 -1,0 z m -1486,743 0,-1 1,0 0,1 -1,0 z m -2,1 0,-1 1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -2,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -2,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -2,0 0,1 -1,0 0,2 2,0 0,-1 4,0 0,-1 5,0 0,-1 4,0 0,-1 5,0 0,-1 5,0 0,-1 4,0 0,-1 3,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -3,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,-2 -1,0 0,-2 -1,0 0,-2 -1,0 0,-2 -1,0 0,-2 -1,0 0,-2 -1,0 0,-2 -1,0 0,-2 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 z"
- id="path66"
- inkscape:connector-curvature="0"
- style="clip-rule:evenodd" /></clipPath><clipPath
+ d="m 0,0 0,1895 4118,0 0,-1626 -1,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -1,0 0,-1 1,0 0,-1 1,0 0,-1 2,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 2,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 2,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 1,0 0,-1 -4,0 0,1 -5,0 0,1 -4,0 0,1 -5,0 0,1 -4,0 0,1 -4,0 0,1 -4,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 1,0 0,3 1,0 0,2 1,0 0,2 1,0 L 4118,0 0,0 Z m 4074,272 0,-1 1,0 0,1 -1,0 z m -1486,743 0,-1 1,0 0,1 -1,0 z m -2,1 0,-1 1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -2,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -1,0 0,1 -2,0 0,1 -1,0 0,1 -1,0 0,1
+-1,0 0,1 -1,0 0,1 -1,0 0,1 -2,0 0,1 -1,0 0,2 2,0 0,-1 4,0 0,-1 5,0 0,-1 4,0 0,-1 5,0 0,-1 5,0 0,-1 4,0 0,-1 3,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -3,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,1 -2,0 0,-2 -1,0 0,-2 -1,0 0,-2 -1,0 0,-2 -1,0 0,-2 -1,0 0,-2 -1,0 0,-2 -1,0 0,-2 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 0,-1 2,0 z"
+ id="path66"
+ inkscape:connector-curvature="0"
+ style="clip-rule:evenodd" /></clipPath><clipPath
clipPathUnits="userSpaceOnUse"
id="clipPath84"><path
- d="m 0,0 0,1895 4118,0 0,-136 -3,0 0,-1 -11,0 0,-1 -11,0 0,-1 -11,0 0,-1 -11,0 0,-1 5,0 0,-1 6,0 0,-1 7,0 0,-1 6,0 0,-1 6,0 0,-1 4,0 0,-1 -1,0 0,-1 -3,0 0,-1 -3,0 0,-1 -2,0 0,-1 -3,0 0,-1 -3,0 0,-1 -3,0 0,-1 -3,0 0,-1 -3,0 0,-1 -3,0 0,-1 -3,0 0,-1 7,0 0,1 11,0 0,1 11,0 0,1 11,0 0,1 3,0 L 4118,0 0,0 Z m 2552,1599 0,-1 2,0 0,1 11,0 0,1 11,0 0,1 11,0 0,1 11,0 0,1 -4,0 0,1 -7,0 0,1 -6,0 0,1 -7,0 0,1 -6,0 0,1 -3,0 0,1 2,0 0,1 2,0 0,1 3,0 0,1 3,0 0,1 3,0 0,1 3,0 0,1 3,0 0,1 2,0 0,1 3,0 0,1 3,0 0,1 3,0 0,1 -7,0 0,-1 -12,0 0,-1 -11,0 0,-1 -11,0 0,-1 -4,0 0,-1 1,0 0,-12 1,0 0,-4 z"
- id="path86"
- inkscape:connector-curvature="0"
- style="clip-rule:evenodd" /></clipPath><clipPath
+ d="m 0,0 0,1895 4118,0 0,-136 -3,0 0,-1 -11,0 0,-1 -11,0 0,-1 -11,0 0,-1 -11,0 0,-1 5,0 0,-1 6,0 0,-1 7,0 0,-1 6,0 0,-1 6,0 0,-1 4,0 0,-1 -1,0 0,-1 -3,0 0,-1 -3,0 0,-1 -2,0 0,-1 -3,0 0,-1 -3,0 0,-1 -3,0 0,-1 -3,0 0,-1 -3,0 0,-1 -3,0 0,-1 -3,0 0,-1 7,0 0,1 11,0 0,1 11,0 0,1 11,0 0,1 3,0 L 4118,0 0,0 Z m 2552,1599 0,-1 2,0 0,1 11,0 0,1 11,0 0,1 11,0 0,1 11,0 0,1 -4,0 0,1 -7,0 0,1 -6,0 0,1 -7,0 0,1 -6,0 0,1 -3,0 0,1 2,0 0,1 2,0 0,1 3,0 0,1 3,0 0,1 3,0 0,1 3,0 0,1 3,0 0,1 2,0 0,1 3,0 0,1 3,0 0,1 3,0 0,1 -7,0 0,-1 -12,0 0,-1 -11,0 0,-1 -11,0 0,-1 -4,0 0,-1 1,0 0,-12 1,0 0,-4 z"
+ id="path86"
+ inkscape:connector-curvature="0"
+ style="clip-rule:evenodd" /></clipPath><clipPath
clipPathUnits="userSpaceOnUse"
id="clipPath104"><path
- d="m 0,0 0,1895 4118,0 L 4118,0 0,0 Z m 3056.98,1740.43 -1.58,18.9 -52.6,-4.72 38.74,-6.3 -36.85,-12.6 52.29,4.72 z m -1570.28,-123.79 1.58,-18.9 52.6,4.72 -38.43,5.99 36.54,12.91 -52.29,-4.72 z"
- id="path106"
- inkscape:connector-curvature="0"
- style="clip-rule:evenodd" /></clipPath></defs><sodipodi:namedview
+ d="m 0,0 0,1895 4118,0 L 4118,0 0,0 Z m 3056.98,1740.43 -1.58,18.9 -52.6,-4.72 38.74,-6.3 -36.85,-12.6 52.29,4.72 z m -1570.28,-123.79 1.58,-18.9 52.6,4.72 -38.43,5.99 36.54,12.91 -52.29,-4.72 z"
+ id="path106"
+ inkscape:connector-curvature="0"
+ style="clip-rule:evenodd" /></clipPath></defs><sodipodi:namedview
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1"
@@ -123,90 +124,90 @@
inkscape:connector-curvature="0" /><g
id="g40"
transform="matrix(0.14375794,0,0,0.14375794,-0.12334269,-0.08856738)"><g
- clip-path="url(#clipPath44)"
- id="g42"><path
- inkscape:connector-curvature="0"
- id="path48"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="M 1492.37,1040.5 3051.62,260.875" /></g></g><g
+ clip-path="url(#clipPath44)"
+ id="g42"><path
+ inkscape:connector-curvature="0"
+ id="path48"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="M 1492.37,1040.5 3051.62,260.875" /></g></g><g
id="g50"
transform="matrix(0.14375794,0,0,0.14375794,-0.12334269,-0.08856738)"><path
- inkscape:connector-curvature="0"
- id="path52"
- style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 1539.31,1027.9 -37.8,8.19 29.29,-25.2 8.51,17.01" /><path
- inkscape:connector-curvature="0"
- id="path54"
- style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1539.31,1027.9 -37.8,8.19 29.29,-25.2 8.51,17.01 z" /><path
- inkscape:connector-curvature="0"
- id="path56"
- style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 3004.37,273.79 38.12,-8.505 -29.61,25.515 -8.51,-17.01" /><path
- inkscape:connector-curvature="0"
- id="path58"
- style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 3004.37,273.79 38.12,-8.505 -29.61,25.515 -8.51,-17.01 z" /></g><g
+ inkscape:connector-curvature="0"
+ id="path52"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 1539.31,1027.9 -37.8,8.19 29.29,-25.2 8.51,17.01" /><path
+ inkscape:connector-curvature="0"
+ id="path54"
+ style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1539.31,1027.9 -37.8,8.19 29.29,-25.2 8.51,17.01 z" /><path
+ inkscape:connector-curvature="0"
+ id="path56"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 3004.37,273.79 38.12,-8.505 -29.61,25.515 -8.51,-17.01" /><path
+ inkscape:connector-curvature="0"
+ id="path58"
+ style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3004.37,273.79 38.12,-8.505 -29.61,25.515 -8.51,-17.01 z" /></g><g
id="g60"
transform="matrix(0.14375794,0,0,0.14375794,-0.12334269,-0.08856738)"><g
- clip-path="url(#clipPath64)"
- id="g62"><path
- inkscape:connector-curvature="0"
- id="path68"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="M 2555.5,1040.5 4114.75,260.875" /></g></g><g
+ clip-path="url(#clipPath64)"
+ id="g62"><path
+ inkscape:connector-curvature="0"
+ id="path68"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="M 2555.5,1040.5 4114.75,260.875" /></g></g><g
id="g70"
transform="matrix(0.14375794,0,0,0.14375794,-0.12334269,-0.08856738)"><path
- inkscape:connector-curvature="0"
- id="path72"
- style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 2602.43,1027.9 -37.8,8.19 29.3,-25.2 8.5,17.01" /><path
- inkscape:connector-curvature="0"
- id="path74"
- style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 2602.43,1027.9 -37.8,8.19 29.3,-25.2 8.5,17.01 z" /><path
- inkscape:connector-curvature="0"
- id="path76"
- style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 4067.5,273.79 38.11,-8.505 -29.61,25.515 -8.5,-17.01" /><path
- inkscape:connector-curvature="0"
- id="path78"
- style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 4067.5,273.79 38.11,-8.505 -29.61,25.515 -8.5,-17.01 z" /></g><g
+ inkscape:connector-curvature="0"
+ id="path72"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 2602.43,1027.9 -37.8,8.19 29.3,-25.2 8.5,17.01" /><path
+ inkscape:connector-curvature="0"
+ id="path74"
+ style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2602.43,1027.9 -37.8,8.19 29.3,-25.2 8.5,17.01 z" /><path
+ inkscape:connector-curvature="0"
+ id="path76"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 4067.5,273.79 38.11,-8.505 -29.61,25.515 -8.5,-17.01" /><path
+ inkscape:connector-curvature="0"
+ id="path78"
+ style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4067.5,273.79 38.11,-8.505 -29.61,25.515 -8.5,-17.01 z" /></g><g
id="g80"
transform="matrix(0.14375794,0,0,0.14375794,-0.12334269,-0.08856738)"><g
- clip-path="url(#clipPath84)"
- id="g82"><path
- inkscape:connector-curvature="0"
- id="path88"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 2555.5,1607.5 1559.25,141.75" /></g></g><g
+ clip-path="url(#clipPath84)"
+ id="g82"><path
+ inkscape:connector-curvature="0"
+ id="path88"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2555.5,1607.5 1559.25,141.75" /></g></g><g
id="g90"
transform="matrix(0.14375794,0,0,0.14375794,-0.12334269,-0.08856738)"><path
- inkscape:connector-curvature="0"
- id="path92"
- style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 2602.12,1621.36 -36.54,-12.91 38.43,-5.99 -1.89,18.9" /><path
- inkscape:connector-curvature="0"
- id="path94"
- style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 2602.12,1621.36 -36.54,-12.91 38.43,-5.99 -1.89,18.9 z" /><path
- inkscape:connector-curvature="0"
- id="path96"
- style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 4067.81,1735.71 36.86,12.6 -38.75,6.3 1.89,-18.9" /><path
- inkscape:connector-curvature="0"
- id="path98"
- style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 4067.81,1735.71 36.86,12.6 -38.75,6.3 1.89,-18.9 z" /></g><g
+ inkscape:connector-curvature="0"
+ id="path92"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 2602.12,1621.36 -36.54,-12.91 38.43,-5.99 -1.89,18.9" /><path
+ inkscape:connector-curvature="0"
+ id="path94"
+ style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2602.12,1621.36 -36.54,-12.91 38.43,-5.99 -1.89,18.9 z" /><path
+ inkscape:connector-curvature="0"
+ id="path96"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 4067.81,1735.71 36.86,12.6 -38.75,6.3 1.89,-18.9" /><path
+ inkscape:connector-curvature="0"
+ id="path98"
+ style="fill:none;stroke:#000000;stroke-width:4.7249999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4067.81,1735.71 36.86,12.6 -38.75,6.3 1.89,-18.9 z" /></g><g
id="g100"
transform="matrix(0.14375794,0,0,0.14375794,-0.12334269,-0.08856738)"><g
- clip-path="url(#clipPath104)"
- id="g102"><path
- inkscape:connector-curvature="0"
- id="path108"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1492.37,1607.5 1559.25,141.75" /></g></g><path
+ clip-path="url(#clipPath104)"
+ id="g102"><path
+ inkscape:connector-curvature="0"
+ id="path108"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1492.37,1607.5 1559.25,141.75" /></g></g><path
d="m 221.11869,232.99481 -5.25292,-1.85592 5.52462,-0.86111 -0.2717,2.71703"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
id="path112"
@@ -225,43 +226,43 @@
inkscape:connector-curvature="0" /><g
id="g120"
transform="matrix(1.4375794,0,0,1.4375794,-0.12334269,-0.08856738)"><text
- transform="matrix(1,0,0,-1,204.52,9.07751)"
- style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#d10000;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="text122"><tspan
- x="0 3.3074999 6.9854398 8.45397 12.13191 15.80985 19.11735 21.320145 24.998085 28.676025 31.983524 35.661465 39.339405 41.178375 44.856316 48.534256 52.212196 55.890137 59.568073"
- y="0"
- sodipodi:role="line"
- id="tspan124">v4l2_cropcap.bounds</tspan></text>
+ transform="matrix(1,0,0,-1,204.52,9.07751)"
+ style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#d10000;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="text122"><tspan
+ x="0 3.3074999 6.9854398 8.45397 12.13191 15.80985 19.11735 21.320145 24.998085 28.676025 31.983524 35.661465 39.339405 41.178375 44.856316 48.534256 52.212196 55.890137 59.568073"
+ y="0"
+ sodipodi:role="line"
+ id="tspan124">v4l2_cropcap.bounds</tspan></text>
</g><g
id="g126"
transform="matrix(1.4375794,0,0,1.4375794,-0.12334269,-0.08856738)"><text
- transform="matrix(1,0,0,-1,58.5175,166.42)"
- style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#0000d1;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="text128"><tspan
- x="0 3.3074999 6.9854398 8.45397 12.13191 15.80985 19.11735 21.320145 24.998085 28.676025 31.983524 35.661465 39.339405 41.178375 44.856316 48.534256 50.373226 52.576019 56.25396 59.561459"
- y="0"
- sodipodi:role="line"
- id="tspan130">v4l2_cropcap.defrect</tspan></text>
+ transform="matrix(1,0,0,-1,58.5175,166.42)"
+ style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#0000d1;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="text128"><tspan
+ x="0 3.3074999 6.9854398 8.45397 12.13191 15.80985 19.11735 21.320145 24.998085 28.676025 31.983524 35.661465 39.339405 41.178375 44.856316 48.534256 50.373226 52.576019 56.25396 59.561459"
+ y="0"
+ sodipodi:role="line"
+ id="tspan130">v4l2_cropcap.defrect</tspan></text>
</g><g
id="g132"
transform="matrix(1.4375794,0,0,1.4375794,-0.12334269,-0.08856738)"><text
- transform="matrix(1,0,0,-1,153.49,152.245)"
- style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#008f00;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="text134"><tspan
- x="0 3.3074999 6.9854398 8.45397 12.13191 15.80985 19.11735 21.320145 24.998085 28.676025 30.514996"
- y="0"
- sodipodi:role="line"
- id="tspan136">v4l2_crop.c</tspan></text>
+ transform="matrix(1,0,0,-1,153.49,152.245)"
+ style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#008f00;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="text134"><tspan
+ x="0 3.3074999 6.9854398 8.45397 12.13191 15.80985 19.11735 21.320145 24.998085 28.676025 30.514996"
+ y="0"
+ sodipodi:role="line"
+ id="tspan136">v4l2_crop.c</tspan></text>
</g><g
id="g138"
transform="matrix(1.4375794,0,0,1.4375794,-0.12334269,-0.08856738)"><text
- transform="matrix(1,0,0,-1,309.415,30.34)"
- style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#b000b0;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="text140"><tspan
- x="0 3.3074999 6.9854398 8.45397 12.13191 15.80985 17.648821 21.326759 23.529554 29.03985 32.717789"
- y="0"
- sodipodi:role="line"
- id="tspan142">v4l2_format</tspan></text>
+ transform="matrix(1,0,0,-1,309.415,30.34)"
+ style="font-variant:normal;font-weight:normal;font-size:6.61499977px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#b000b0;fill-opacity:1;fill-rule:nonzero;stroke:none"
+ id="text140"><tspan
+ x="0 3.3074999 6.9854398 8.45397 12.13191 15.80985 17.648821 21.326759 23.529554 29.03985 32.717789"
+ y="0"
+ sodipodi:role="line"
+ id="tspan142">v4l2_format</tspan></text>
</g><text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:32px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
@@ -270,12 +271,12 @@
id="text3396"
sodipodi:linespacing="125%"
transform="scale(1,-1)"><tspan
- sodipodi:role="line"
- id="tspan3398"
- x="-99.291145"
- y="-239.49893"></tspan><tspan
- sodipodi:role="line"
- x="-99.291145"
- y="-199.49893"
- id="tspan3400" /></text>
+ sodipodi:role="line"
+ id="tspan3398"
+ x="-99.291145"
+ y="-239.49893"></tspan><tspan
+ sodipodi:role="line"
+ x="-99.291145"
+ y="-199.49893"
+ id="tspan3400" /></text>
</g></svg>
diff --git a/Documentation/media/uapi/v4l/dev-meta.rst b/Documentation/media/uapi/v4l/dev-meta.rst
index 62518adfe37b..f7ac8d0d3af1 100644
--- a/Documentation/media/uapi/v4l/dev-meta.rst
+++ b/Documentation/media/uapi/v4l/dev-meta.rst
@@ -42,6 +42,8 @@ the :c:type:`v4l2_format` structure to 0.
.. _v4l2-meta-format:
+.. tabularcolumns:: |p{1.4cm}|p{2.2cm}|p{13.9cm}|
+
.. flat-table:: struct v4l2_meta_format
:header-rows: 0
:stub-columns: 0
diff --git a/Documentation/media/uapi/v4l/dev-sliced-vbi.rst b/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
index 5f6d534ea73b..9d6c860271cb 100644
--- a/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
+++ b/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
@@ -105,7 +105,13 @@ which may return ``EBUSY`` can be the
struct v4l2_sliced_vbi_format
-----------------------------
-.. tabularcolumns:: |p{1.0cm}|p{4.5cm}|p{4.0cm}|p{4.0cm}|p{4.0cm}|
+.. raw:: latex
+
+ \begingroup
+ \scriptsize
+ \setlength{\tabcolsep}{2pt}
+
+.. tabularcolumns:: |p{.75cm}|p{3.3cm}|p{3.4cm}|p{3.4cm}|p{3.4cm}|
.. cssclass:: longtable
@@ -199,6 +205,9 @@ struct v4l2_sliced_vbi_format
Applications and drivers must set it to zero.
+.. raw:: latex
+
+ \endgroup
.. _vbi-services2:
@@ -207,9 +216,9 @@ Sliced VBI services
.. raw:: latex
- \begin{adjustbox}{width=\columnwidth}
+ \footnotesize
-.. tabularcolumns:: |p{5.0cm}|p{1.4cm}|p{3.0cm}|p{2.5cm}|p{9.0cm}|
+.. tabularcolumns:: |p{4.1cm}|p{1.1cm}|p{2.4cm}|p{2.0cm}|p{7.3cm}|
.. flat-table::
:header-rows: 1
@@ -263,7 +272,7 @@ Sliced VBI services
.. raw:: latex
- \end{adjustbox}\newline\newline
+ \normalsize
Drivers may return an ``EINVAL`` error code when applications attempt to
@@ -457,7 +466,7 @@ number).
struct v4l2_mpeg_vbi_fmt_ivtv
-----------------------------
-.. tabularcolumns:: |p{1.0cm}|p{3.5cm}|p{1.0cm}|p{11.5cm}|
+.. tabularcolumns:: |p{1.0cm}|p{3.8cm}|p{1.0cm}|p{11.2cm}|
.. flat-table::
:header-rows: 0
@@ -525,7 +534,7 @@ Magic Constants for struct v4l2_mpeg_vbi_fmt_ivtv magic field
structs v4l2_mpeg_vbi_itv0 and v4l2_mpeg_vbi_ITV0
-------------------------------------------------
-.. tabularcolumns:: |p{4.4cm}|p{2.4cm}|p{10.7cm}|
+.. tabularcolumns:: |p{4.9cm}|p{2.4cm}|p{10.2cm}|
.. flat-table::
:header-rows: 0
@@ -574,7 +583,7 @@ structs v4l2_mpeg_vbi_itv0 and v4l2_mpeg_vbi_ITV0
struct v4l2_mpeg_vbi_ITV0
-------------------------
-.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+.. tabularcolumns:: |p{4.9cm}|p{4.4cm}|p{8.2cm}|
.. flat-table::
:header-rows: 0
diff --git a/Documentation/media/uapi/v4l/dev-subdev.rst b/Documentation/media/uapi/v4l/dev-subdev.rst
index f0e762167730..d20d945803a7 100644
--- a/Documentation/media/uapi/v4l/dev-subdev.rst
+++ b/Documentation/media/uapi/v4l/dev-subdev.rst
@@ -204,9 +204,9 @@ list entity names and pad numbers).
.. raw:: latex
- \begin{adjustbox}{width=\columnwidth}
+ \tiny
-.. tabularcolumns:: |p{4.5cm}|p{4.5cm}|p{4.5cm}|p{4.5cm}|p{4.5cm}|p{4.5cm}|p{4.5cm}|
+.. tabularcolumns:: |p{2.0cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|
.. _sample-pipeline-config:
@@ -253,7 +253,7 @@ list entity names and pad numbers).
.. raw:: latex
- \end{adjustbox}\newline\newline
+ \normalsize
1. Initial state. The sensor source pad format is set to its native 3MP
size and V4L2_MBUS_FMT_SGRBG8_1X8 media bus code. Formats on the
@@ -370,7 +370,7 @@ circumstances. This may also cause the accessed rectangle to be adjusted
by the driver, depending on the properties of the underlying hardware.
The coordinates to a step always refer to the actual size of the
-previous step. The exception to this rule is the source compose
+previous step. The exception to this rule is the sink compose
rectangle, which refers to the sink compose bounds rectangle --- if it
is supported by the hardware.
diff --git a/Documentation/media/uapi/v4l/driver.rst b/Documentation/media/uapi/v4l/driver.rst
deleted file mode 100644
index 2319b383f0a4..000000000000
--- a/Documentation/media/uapi/v4l/driver.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _driver:
-
-***********************
-V4L2 Driver Programming
-***********************
-
-to do
diff --git a/Documentation/media/uapi/v4l/extended-controls.rst b/Documentation/media/uapi/v4l/extended-controls.rst
index 9acc9cad49e2..a3e81c1d276b 100644
--- a/Documentation/media/uapi/v4l/extended-controls.rst
+++ b/Documentation/media/uapi/v4l/extended-controls.rst
@@ -942,21 +942,21 @@ enum v4l2_mpeg_video_mpeg4_level -
:header-rows: 0
:stub-columns: 0
- * - ``V4L2_MPEG_VIDEO_LEVEL_0``
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_0``
- Level 0
- * - ``V4L2_MPEG_VIDEO_LEVEL_0B``
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B``
- Level 0b
- * - ``V4L2_MPEG_VIDEO_LEVEL_1``
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_1``
- Level 1
- * - ``V4L2_MPEG_VIDEO_LEVEL_2``
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_2``
- Level 2
- * - ``V4L2_MPEG_VIDEO_LEVEL_3``
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_3``
- Level 3
- * - ``V4L2_MPEG_VIDEO_LEVEL_3B``
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B``
- Level 3b
- * - ``V4L2_MPEG_VIDEO_LEVEL_4``
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_4``
- Level 4
- * - ``V4L2_MPEG_VIDEO_LEVEL_5``
+ * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_5``
- Level 5
@@ -1028,15 +1028,15 @@ enum v4l2_mpeg_video_mpeg4_profile -
:header-rows: 0
:stub-columns: 0
- * - ``V4L2_MPEG_VIDEO_PROFILE_SIMPLE``
+ * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE``
- Simple profile
- * - ``V4L2_MPEG_VIDEO_PROFILE_ADVANCED_SIMPLE``
+ * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE``
- Advanced Simple profile
- * - ``V4L2_MPEG_VIDEO_PROFILE_CORE``
+ * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_CORE``
- Core profile
- * - ``V4L2_MPEG_VIDEO_PROFILE_SIMPLE_SCALABLE``
+ * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE``
- Simple Scalable profile
- * - ``V4L2_MPEG_VIDEO_PROFILE_ADVANCED_CODING_EFFICIENCY``
+ * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY``
-
@@ -1922,9 +1922,9 @@ enum v4l2_vp8_golden_frame_sel -
.. raw:: latex
- \begin{adjustbox}{width=\columnwidth}
+ \footnotesize
-.. tabularcolumns:: |p{11.0cm}|p{10.0cm}|
+.. tabularcolumns:: |p{9.0cm}|p{8.0cm}|
.. flat-table::
:header-rows: 0
@@ -1940,7 +1940,7 @@ enum v4l2_vp8_golden_frame_sel -
.. raw:: latex
- \end{adjustbox}
+ \normalsize
``V4L2_CID_MPEG_VIDEO_VPX_MIN_QP (integer)``
diff --git a/Documentation/media/uapi/v4l/fieldseq_bt.svg b/Documentation/media/uapi/v4l/fieldseq_bt.svg
index b195301771ce..909d758f8543 100644
--- a/Documentation/media/uapi/v4l/fieldseq_bt.svg
+++ b/Documentation/media/uapi/v4l/fieldseq_bt.svg
@@ -42,15 +42,15 @@
inkscape:window-maximized="1"
inkscape:current-layer="g3627" /><metadata
id="metadata3625"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
id="defs3623"><clipPath
id="clipPath4301"
clipPathUnits="userSpaceOnUse"><path
- style="clip-rule:evenodd"
- inkscape:connector-curvature="0"
- id="path4303"
- d="M 0,6040 0,0 l 5650,0 0,6040 -5650,0 z m 4786.76,-99.89 103.92,0 0,56.69 -103.92,0 0,0 85.03,-28.35 -85.03,-28.34 z" /></clipPath></defs><g
+ style="clip-rule:evenodd"
+ inkscape:connector-curvature="0"
+ id="path4303"
+ d="M 0,6040 0,0 l 5650,0 0,6040 -5650,0 z m 4786.76,-99.89 103.92,0 0,56.69 -103.92,0 0,0 85.03,-28.35 -85.03,-28.34 z" /></clipPath></defs><g
transform="matrix(1.25,0,0,-1.25,-1.0537,751.94632)"
inkscape:label="fieldseq_bt"
inkscape:groupmode="layer"
@@ -1390,13 +1390,13 @@
transform="scale(0.1,0.1)"
id="g4297"
style=""><g
- id="g4299"
- clip-path="url(#clipPath4301)"
- style=""><path
- d="m 3778.18,5968.45 1105.42,0"
- style="fill:none;stroke:#000000;stroke-width:14.17199993;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- id="path4305"
- inkscape:connector-curvature="0" /></g></g><path
+ id="g4299"
+ clip-path="url(#clipPath4301)"
+ style=""><path
+ d="m 3778.18,5968.45 1105.42,0"
+ style="fill:none;stroke:#000000;stroke-width:14.17199993;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path4305"
+ inkscape:connector-curvature="0" /></g></g><path
d="m 478.676,594.011 8.503,2.834 -8.503,2.835 0,-5.669"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
id="path4307"
@@ -2466,148 +2466,148 @@
id="text4841"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan4843"
- sodipodi:role="line"
- y="-533.07098"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOP</tspan><tspan
- id="tspan4845"
- sodipodi:role="line"
- y="-465.04559"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054">V4L2_FIELD_BOTTOM</tspan><tspan
- id="tspan4847"
- sodipodi:role="line"
- y="-397.0202"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963">V4L2_FIELD_ALTERNATE</tspan></text>
+ id="tspan4843"
+ sodipodi:role="line"
+ y="-533.07098"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOP</tspan><tspan
+ id="tspan4845"
+ sodipodi:role="line"
+ y="-465.04559"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054">V4L2_FIELD_BOTTOM</tspan><tspan
+ id="tspan4847"
+ sodipodi:role="line"
+ y="-397.0202"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963">V4L2_FIELD_ALTERNATE</tspan></text>
<text
y="-316.23969"
x="103.58983"
id="text4849"
style="font-variant:normal;font-weight:normal;font-size:8.2495203px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan4851"
- sodipodi:role="line"
- y="-316.23969"
- x="103.58983 109.09226 113.67899 118.26572 122.85246 127.43919 132.47964 134.77301 140.27545 144.86218 150.81833 155.40506 160.44553 166.86365 188.62184 194.12427 198.711 203.29774 207.88448 212.47121 217.51166 219.80502 225.30746 229.8942 235.85034 240.43707 245.9395 252.35764 257.3981 262.43854 268.85669 375.69293 381.19534 385.78207 390.3688 394.95554 399.54227 404.58273 406.8761 412.37854 416.96527 422.92142 427.50815 433.01059 439.42871 444.46918 449.50961 455.92776 1.551828 7.0542617 11.640993 16.227724 20.814463 25.401194 30.441652 32.735016 38.237442 42.824177 48.780331 53.367065 58.869492 65.287621 70.328079 75.368538 81.786659">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOM</tspan><tspan
- id="tspan4853"
- sodipodi:role="line"
- y="-328.99481"
- x="10.054964 14.17972 18.766451 20.597849 25.18458 29.771311 34.358047 38.944778 41.238144 43.531509 48.118244 50.865334 53.158699 55.452068 57.283459 61.870193 63.701588 68.288322">v4l2_buffer.field:</tspan></text>
+ id="tspan4851"
+ sodipodi:role="line"
+ y="-316.23969"
+ x="103.58983 109.09226 113.67899 118.26572 122.85246 127.43919 132.47964 134.77301 140.27545 144.86218 150.81833 155.40506 160.44553 166.86365 188.62184 194.12427 198.711 203.29774 207.88448 212.47121 217.51166 219.80502 225.30746 229.8942 235.85034 240.43707 245.9395 252.35764 257.3981 262.43854 268.85669 375.69293 381.19534 385.78207 390.3688 394.95554 399.54227 404.58273 406.8761 412.37854 416.96527 422.92142 427.50815 433.01059 439.42871 444.46918 449.50961 455.92776 1.551828 7.0542617 11.640993 16.227724 20.814463 25.401194 30.441652 32.735016 38.237442 42.824177 48.780331 53.367065 58.869492 65.287621 70.328079 75.368538 81.786659">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOM</tspan><tspan
+ id="tspan4853"
+ sodipodi:role="line"
+ y="-328.99481"
+ x="10.054964 14.17972 18.766451 20.597849 25.18458 29.771311 34.358047 38.944778 41.238144 43.531509 48.118244 50.865334 53.158699 55.452068 57.283459 61.870193 63.701588 68.288322">v4l2_buffer.field:</tspan></text>
<text
y="-592.59381"
x="5.8034"
id="text4855"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan4857"
- sodipodi:role="line"
- y="-592.59381"
- x="5.8034 13.134789 19.806232 29.801399 36.472843 43.144287 47.139954 53.811398 56.475174 59.810898 66.482346 70.478004 77.149452 83.820892 87.816566 91.152283 94.488007 101.15945 107.83089 111.16662 114.50233 121.17377 131.16895 134.50468 137.84041 140.50418 147.17563 149.8394 156.51085 159.84657 163.1823 165.84607 169.84174 175.84123 179.17696 182.51268 185.8484 189.84407 196.51552 203.18695 209.18646 219.18163 221.8454 225.18112 228.51685 235.18829 241.85973 245.19545 249.19112 255.86256 259.19827 265.86972 269.20544 272.54117 282.53635 285.87207 294.53534 301.86673 309.87006 318.53336">Temporal order, bottom field first transmitted (e.g. M/NTSC)</tspan></text>
+ id="tspan4857"
+ sodipodi:role="line"
+ y="-592.59381"
+ x="5.8034 13.134789 19.806232 29.801399 36.472843 43.144287 47.139954 53.811398 56.475174 59.810898 66.482346 70.478004 77.149452 83.820892 87.816566 91.152283 94.488007 101.15945 107.83089 111.16662 114.50233 121.17377 131.16895 134.50468 137.84041 140.50418 147.17563 149.8394 156.51085 159.84657 163.1823 165.84607 169.84174 175.84123 179.17696 182.51268 185.8484 189.84407 196.51552 203.18695 209.18646 219.18163 221.8454 225.18112 228.51685 235.18829 241.85973 245.19545 249.19112 255.86256 259.19827 265.86972 269.20544 272.54117 282.53635 285.87207 294.53534 301.86673 309.87006 318.53336">Temporal order, bottom field first transmitted (e.g. M/NTSC)</tspan></text>
<text
y="-316.23981"
x="290.6604"
id="text4859"
style="font-variant:normal;font-weight:normal;font-size:8.2495203px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan4861"
- sodipodi:role="line"
- y="-316.23981"
- x="290.6604 296.16284 300.74957 305.3363 309.92303 314.50977 319.55023 321.8436 327.34601 331.93274 337.88889 342.47565 347.51608 353.9342 477.73062 483.23306 487.81979 492.40652 496.99326 501.57999 506.62045 508.91382 514.41626 519.00299 524.95911 529.5459 534.5863 541.00446">V4L2_FIELD_TOPV4L2_FIELD_TOP</tspan></text>
+ id="tspan4861"
+ sodipodi:role="line"
+ y="-316.23981"
+ x="290.6604 296.16284 300.74957 305.3363 309.92303 314.50977 319.55023 321.8436 327.34601 331.93274 337.88889 342.47565 347.51608 353.9342 477.73062 483.23306 487.81979 492.40652 496.99326 501.57999 506.62045 508.91382 514.41626 519.00299 524.95911 529.5459 534.5863 541.00446">V4L2_FIELD_TOPV4L2_FIELD_TOP</tspan></text>
<text
y="-299.23349"
x="5.8034"
id="text4863"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan4865"
- sodipodi:role="line"
- y="-299.23349"
- x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 84.504837 93.168114 100.4995 108.50284 117.16611 123.83755 131.8409 140.50418 148.50751 157.17079 160.50652 163.84224 167.17796 175.18129 181.85274 188.52419 195.19562 201.86707 209.19846 212.53418 220.53751 227.20895 235.87224 242.54367 245.87939 254.54268 261.87405 269.87741 278.54068 285.21213 293.21545 301.87872 309.88205 318.54535 325.2168 333.22012">V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_BT</tspan><tspan
- id="tspan4867"
- sodipodi:role="line"
- y="-192.9435"
- x="1.5518398 9.5551729 16.226616 22.898062 29.569506 36.240948 43.572334 46.908058 54.911392 61.582836 70.246117 76.917557 80.253281 88.916557 96.247948 104.25128 112.91456 119.586 127.58932 136.25262 144.25595 152.91924 159.59067 166.92206 174.9254 178.26112 182.25679 192.25195 194.91573 200.91524 207.58667 210.25046 212.91423 219.58568 226.25713 232.92856 239.60001">V4L2_FIELD_INTERLACED_TB (misaligned)</tspan><tspan
- id="tspan4869"
- sodipodi:role="line"
- y="-86.653496"
- x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 89.172447 97.175781 106.511 113.18245 121.18579">V4L2_FIELD_SEQ_BT</tspan></text>
+ id="tspan4865"
+ sodipodi:role="line"
+ y="-299.23349"
+ x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 84.504837 93.168114 100.4995 108.50284 117.16611 123.83755 131.8409 140.50418 148.50751 157.17079 160.50652 163.84224 167.17796 175.18129 181.85274 188.52419 195.19562 201.86707 209.19846 212.53418 220.53751 227.20895 235.87224 242.54367 245.87939 254.54268 261.87405 269.87741 278.54068 285.21213 293.21545 301.87872 309.88205 318.54535 325.2168 333.22012">V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_BT</tspan><tspan
+ id="tspan4867"
+ sodipodi:role="line"
+ y="-192.9435"
+ x="1.5518398 9.5551729 16.226616 22.898062 29.569506 36.240948 43.572334 46.908058 54.911392 61.582836 70.246117 76.917557 80.253281 88.916557 96.247948 104.25128 112.91456 119.586 127.58932 136.25262 144.25595 152.91924 159.59067 166.92206 174.9254 178.26112 182.25679 192.25195 194.91573 200.91524 207.58667 210.25046 212.91423 219.58568 226.25713 232.92856 239.60001">V4L2_FIELD_INTERLACED_TB (misaligned)</tspan><tspan
+ id="tspan4869"
+ sodipodi:role="line"
+ y="-86.653496"
+ x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 89.172447 97.175781 106.511 113.18245 121.18579">V4L2_FIELD_SEQ_BT</tspan></text>
<text
y="-533.07098"
x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464"
id="text4592"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan4594"
- sodipodi:role="line"
- y="-533.07098"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOP</tspan></text>
+ id="tspan4594"
+ sodipodi:role="line"
+ y="-533.07098"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOP</tspan></text>
<text
y="-465.04559"
x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054"
id="text4596"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan4598"
- sodipodi:role="line"
- y="-465.04559"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054">V4L2_FIELD_BOTTOM</tspan></text>
+ id="tspan4598"
+ sodipodi:role="line"
+ y="-465.04559"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054">V4L2_FIELD_BOTTOM</tspan></text>
<text
y="-397.0202"
x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963"
id="text4600"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan4602"
- sodipodi:role="line"
- y="-397.0202"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963">V4L2_FIELD_ALTERNATE</tspan></text>
+ id="tspan4602"
+ sodipodi:role="line"
+ y="-397.0202"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963">V4L2_FIELD_ALTERNATE</tspan></text>
<text
y="-299.23349"
x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 84.504837 93.168114 100.4995 108.50284 117.16611 123.83755 131.8409 140.50418 148.50751 157.17079 160.50652 163.84224 167.17796 175.18129 181.85274 188.52419 195.19562 201.86707 209.19846 212.53418 220.53751 227.20895 235.87224 242.54367 245.87939 254.54268 261.87405 269.87741 278.54068 285.21213 293.21545 301.87872 309.88205 318.54535 325.2168 333.22012"
id="text5862"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan5864"
- sodipodi:role="line"
- y="-299.23349"
- x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 84.504837 93.168114 100.4995 108.50284 117.16611 123.83755 131.8409 140.50418 148.50751 157.17079 160.50652 163.84224 167.17796 175.18129 181.85274 188.52419 195.19562 201.86707 209.19846 212.53418 220.53751 227.20895 235.87224 242.54367 245.87939 254.54268 261.87405 269.87741 278.54068 285.21213 293.21545 301.87872 309.88205 318.54535 325.2168 333.22012">V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_BT</tspan></text>
+ id="tspan5864"
+ sodipodi:role="line"
+ y="-299.23349"
+ x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 84.504837 93.168114 100.4995 108.50284 117.16611 123.83755 131.8409 140.50418 148.50751 157.17079 160.50652 163.84224 167.17796 175.18129 181.85274 188.52419 195.19562 201.86707 209.19846 212.53418 220.53751 227.20895 235.87224 242.54367 245.87939 254.54268 261.87405 269.87741 278.54068 285.21213 293.21545 301.87872 309.88205 318.54535 325.2168 333.22012">V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_BT</tspan></text>
<text
y="-192.9435"
x="1.5518398 9.5551729 16.226616 22.898062 29.569506 36.240948 43.572334 46.908058 54.911392 61.582836 70.246117 76.917557 80.253281 88.916557 96.247948 104.25128 112.91456 119.586 127.58932 136.25262 144.25595 152.91924 159.59067 166.92206 174.9254 178.26112 182.25679 192.25195 194.91573 200.91524 207.58667 210.25046 212.91423 219.58568 226.25713 232.92856 239.60001"
id="text5866"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan5868"
- sodipodi:role="line"
- y="-192.9435"
- x="1.5518398 9.5551729 16.226616 22.898062 29.569506 36.240948 43.572334 46.908058 54.911392 61.582836 70.246117 76.917557 80.253281 88.916557 96.247948 104.25128 112.91456 119.586 127.58932 136.25262 144.25595 152.91924 159.59067 166.92206 174.9254 178.26112 182.25679 192.25195 194.91573 200.91524 207.58667 210.25046 212.91423 219.58568 226.25713 232.92856 239.60001">V4L2_FIELD_INTERLACED_TB (misaligned)</tspan></text>
+ id="tspan5868"
+ sodipodi:role="line"
+ y="-192.9435"
+ x="1.5518398 9.5551729 16.226616 22.898062 29.569506 36.240948 43.572334 46.908058 54.911392 61.582836 70.246117 76.917557 80.253281 88.916557 96.247948 104.25128 112.91456 119.586 127.58932 136.25262 144.25595 152.91924 159.59067 166.92206 174.9254 178.26112 182.25679 192.25195 194.91573 200.91524 207.58667 210.25046 212.91423 219.58568 226.25713 232.92856 239.60001">V4L2_FIELD_INTERLACED_TB (misaligned)</tspan></text>
<text
y="-86.653496"
x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 89.172447 97.175781 106.511 113.18245 121.18579"
id="text5870"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan5872"
- sodipodi:role="line"
- y="-86.653496"
- x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 89.172447 97.175781 106.511 113.18245 121.18579">V4L2_FIELD_SEQ_BT</tspan></text>
+ id="tspan5872"
+ sodipodi:role="line"
+ y="-86.653496"
+ x="5.8034 13.806733 20.478176 27.149622 33.821064 40.492508 47.823895 51.159618 59.162952 65.834396 74.497673 81.169121 89.172447 97.175781 106.511 113.18245 121.18579">V4L2_FIELD_SEQ_BT</tspan></text>
<text
y="-316.23969"
x="103.58983 109.09226 113.67899 118.26572 122.85246 127.43919 132.47964 134.77301 140.27545 144.86218 150.81833 155.40506 160.44553 166.86365 188.62184 194.12427 198.711 203.29774 207.88448 212.47121 217.51166 219.80502 225.30746 229.8942 235.85034 240.43707 245.9395 252.35764 257.3981 262.43854 268.85669 375.69293 381.19534 385.78207 390.3688 394.95554 399.54227 404.58273 406.8761 412.37854 416.96527 422.92142 427.50815 433.01059 439.42871 444.46918 449.50961 455.92776 1.551828 7.0542617 11.640993 16.227724 20.814463 25.401194 30.441652 32.735016 38.237442 42.824177 48.780331 53.367065 58.869492 65.287621 70.328079 75.368538 81.786659"
id="text7144"
style="font-variant:normal;font-weight:normal;font-size:8.2495203px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan7146"
- sodipodi:role="line"
- y="-316.23969"
- x="103.58983 109.09226 113.67899 118.26572 122.85246 127.43919 132.47964 134.77301 140.27545 144.86218 150.81833 155.40506 160.44553 166.86365 188.62184 194.12427 198.711 203.29774 207.88448 212.47121 217.51166 219.80502 225.30746 229.8942 235.85034 240.43707 245.9395 252.35764 257.3981 262.43854 268.85669 375.69293 381.19534 385.78207 390.3688 394.95554 399.54227 404.58273 406.8761 412.37854 416.96527 422.92142 427.50815 433.01059 439.42871 444.46918 449.50961 455.92776 1.551828 7.0542617 11.640993 16.227724 20.814463 25.401194 30.441652 32.735016 38.237442 42.824177 48.780331 53.367065 58.869492 65.287621 70.328079 75.368538 81.786659">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOM</tspan></text>
+ id="tspan7146"
+ sodipodi:role="line"
+ y="-316.23969"
+ x="103.58983 109.09226 113.67899 118.26572 122.85246 127.43919 132.47964 134.77301 140.27545 144.86218 150.81833 155.40506 160.44553 166.86365 188.62184 194.12427 198.711 203.29774 207.88448 212.47121 217.51166 219.80502 225.30746 229.8942 235.85034 240.43707 245.9395 252.35764 257.3981 262.43854 268.85669 375.69293 381.19534 385.78207 390.3688 394.95554 399.54227 404.58273 406.8761 412.37854 416.96527 422.92142 427.50815 433.01059 439.42871 444.46918 449.50961 455.92776 1.551828 7.0542617 11.640993 16.227724 20.814463 25.401194 30.441652 32.735016 38.237442 42.824177 48.780331 53.367065 58.869492 65.287621 70.328079 75.368538 81.786659">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOMV4L2_FIELD_BOTTOM</tspan></text>
<text
y="-328.99481"
x="10.054964 14.17972 18.766451 20.597849 25.18458 29.771311 34.358047 38.944778 41.238144 43.531509 48.118244 50.865334 53.158699 55.452068 57.283459 61.870193 63.701588 68.288322"
id="text7148"
style="font-variant:normal;font-weight:normal;font-size:8.2495203px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan7150"
- sodipodi:role="line"
- y="-328.99481"
- x="10.054964 14.17972 18.766451 20.597849 25.18458 29.771311 34.358047 38.944778 41.238144 43.531509 48.118244 50.865334 53.158699 55.452068 57.283459 61.870193 63.701588 68.288322">v4l2_buffer.field:</tspan></text>
+ id="tspan7150"
+ sodipodi:role="line"
+ y="-328.99481"
+ x="10.054964 14.17972 18.766451 20.597849 25.18458 29.771311 34.358047 38.944778 41.238144 43.531509 48.118244 50.865334 53.158699 55.452068 57.283459 61.870193 63.701588 68.288322">v4l2_buffer.field:</tspan></text>
</g></svg> \ No newline at end of file
diff --git a/Documentation/media/uapi/v4l/fieldseq_tb.svg b/Documentation/media/uapi/v4l/fieldseq_tb.svg
index 6a7b10ad4ab8..7c74344e770f 100644
--- a/Documentation/media/uapi/v4l/fieldseq_tb.svg
+++ b/Documentation/media/uapi/v4l/fieldseq_tb.svg
@@ -42,15 +42,15 @@
inkscape:window-maximized="1"
inkscape:current-layer="g5551" /><metadata
id="metadata5549"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
id="defs5547"><clipPath
id="clipPath6753"
clipPathUnits="userSpaceOnUse"><path
- style="clip-rule:evenodd"
- inkscape:connector-curvature="0"
- id="path6755"
- d="M 0,6000 0,0 l 5660,0 0,6000 -5660,0 z m 4786.76,-102.89 103.92,0 0,56.69 -103.92,0 0,0 85.03,-28.35 -85.03,-28.34 z" /></clipPath></defs><g
+ style="clip-rule:evenodd"
+ inkscape:connector-curvature="0"
+ id="path6755"
+ d="M 0,6000 0,0 l 5660,0 0,6000 -5660,0 z m 4786.76,-102.89 103.92,0 0,56.69 -103.92,0 0,0 85.03,-28.35 -85.03,-28.34 z" /></clipPath></defs><g
transform="matrix(1.25,0,0,-1.25,-1.0537,746.57119)"
inkscape:label="fieldseq_tb"
inkscape:groupmode="layer"
@@ -2446,13 +2446,13 @@
transform="scale(0.1,0.1)"
id="g6749"
style=""><g
- id="g6751"
- clip-path="url(#clipPath6753)"
- style=""><path
- d="m 3778.18,5925.45 1105.42,0"
- style="fill:none;stroke:#000000;stroke-width:14.17199993;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- id="path6757"
- inkscape:connector-curvature="0" /></g></g><path
+ id="g6751"
+ clip-path="url(#clipPath6753)"
+ style=""><path
+ d="m 3778.18,5925.45 1105.42,0"
+ style="fill:none;stroke:#000000;stroke-width:14.17199993;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path6757"
+ inkscape:connector-curvature="0" /></g></g><path
d="m 478.676,589.711 8.503,2.834 -8.503,2.835 0,-5.669"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
id="path6759"
@@ -2466,142 +2466,145 @@
id="text6765"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan6767"
- sodipodi:role="line"
- y="-528.771"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOP</tspan><tspan
- id="tspan6769"
- sodipodi:role="line"
- y="-460.74561"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054">V4L2_FIELD_BOTTOM</tspan><tspan
- id="tspan6771"
- sodipodi:role="line"
- y="-392.72021"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963">V4L2_FIELD_ALTERNATE</tspan></text>
+ id="tspan6767"
+ sodipodi:role="line"
+ y="-528.771"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOP</tspan><tspan
+ id="tspan6769"
+ sodipodi:role="line"
+ y="-460.74561"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054">V4L2_FIELD_BOTTOM</tspan><tspan
+ id="tspan6771"
+ sodipodi:role="line"
+ y="-392.72021"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963">V4L2_FIELD_ALTERNATE</tspan></text>
<text
y="-324.69479"
x="10.05469"
id="text6773"
style="font-variant:normal;font-weight:normal;font-size:8.2495203px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan6775"
- sodipodi:role="line"
- y="-324.69479"
- x="10.05469 14.17945 18.766184 20.597576 25.184309 29.771042 34.357777 38.944508 41.237877 43.531242 48.117977 50.865067 53.158432 55.451801 57.283192 61.869926 63.701321 68.288048">v4l2_buffer.field:</tspan><tspan
- id="tspan6777"
- sodipodi:role="line"
- y="-311.9397"
- x="10.05469 15.55712 20.143852 24.730585 29.317318 33.904053 38.944508 41.237877 46.740307 51.327042 57.283192 61.869926 66.910378 73.328506 95.0867 100.58913 105.17586 109.7626 114.34933 118.93606 123.97652 126.26987 131.77232 136.35905 142.3152 146.90193 152.40436 158.82249 163.86295 168.9034 175.32153 197.12534 202.62778 207.21451 211.80124 216.38797 220.9747 226.01515 228.30853 233.81096 238.39769 244.35384 248.94058 253.98103 260.39917 282.15695 287.65936 292.24609 296.83282 301.41956 306.00629 311.04675 313.34012 318.84256 323.42929 329.38544 333.97217 339.47461 345.89273 350.9332 355.97363 362.39175 384.19559 389.698 394.28473 398.87149 403.45822 408.04495 413.08539 415.37875 420.8812 425.46793 431.42407 436.0108 441.05127 447.46939 469.2276 474.73001 479.31674 483.90347 488.49023 493.07697 498.1174 500.41077 505.91321 510.49994 516.45612 521.04285 526.54523 532.96338 538.00385 543.04431 549.4624">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOM</tspan></text>
+ id="tspan6775"
+ sodipodi:role="line"
+ y="-324.69479"
+ x="10.05469 14.17945 18.766184 20.597576 25.184309 29.771042 34.357777 38.944508 41.237877 43.531242 48.117977 50.865067 53.158432 55.451801 57.283192 61.869926 63.701321 68.288048">v4l2_buffer.field:</tspan><tspan
+ id="tspan6777"
+ sodipodi:role="line"
+ y="-311.9397"
+ x="10.05469 15.55712 20.143852 24.730585 29.317318 33.904053 38.944508 41.237877 46.740307 51.327042 57.283192 61.869926 66.910378 73.328506 95.0867 100.58913 105.17586 109.7626 114.34933 118.93606 123.97652 126.26987 131.77232 136.35905 142.3152 146.90193 152.40436 158.82249 163.86295 168.9034 175.32153 197.12534 202.62778 207.21451 211.80124 216.38797 220.9747 226.01515 228.30853 233.81096 238.39769 244.35384 248.94058 253.98103 260.39917 282.15695 287.65936 292.24609 296.83282 301.41956 306.00629 311.04675 313.34012 318.84256 323.42929 329.38544 333.97217 339.47461 345.89273 350.9332 355.97363 362.39175 384.19559 389.698 394.28473 398.87149 403.45822 408.04495 413.08539 415.37875 420.8812 425.46793 431.42407 436.0108 441.05127 447.46939 469.2276 474.73001 479.31674 483.90347 488.49023 493.07697 498.1174 500.41077 505.91321 510.49994 516.45612 521.04285 526.54523 532.96338
+538.00385 543.04431 549.4624">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOM</tspan></text>
<text
y="-588.2937"
x="5.8031301"
id="text6779"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan6781"
- sodipodi:role="line"
- y="-588.2937"
- x="5.8031301 13.134519 19.805964 29.801128 36.472572 43.14402 47.139687 53.811131 56.474907 59.810631 66.482071 70.477737 77.149185 83.820625 87.816299 91.152016 94.48774 97.823463 104.4949 111.16635 114.50207 117.83779 120.50157 127.17302 129.83679 136.50824 139.84396 143.17969 145.84346 149.83913 155.83862 159.17435 162.51007 165.84579 169.84146 176.51291 183.18434 189.18385 199.17902 201.84279 205.17851 208.51424 215.18568 221.85713 225.19284 229.18851 235.85995 239.19568 245.86713 249.20285 252.53857 260.5419 269.87714 273.21286 281.21619 289.21951 295.89096">Temporal order, top field first transmitted (e.g. BG/PAL)</tspan><tspan
- id="tspan6783"
- sodipodi:role="line"
- y="-86.604706"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 97.175514 106.51073 113.18218 120.51357">V4L2_FIELD_SEQ_TB</tspan><tspan
- id="tspan6785"
- sodipodi:role="line"
- y="-192.89471"
- x="10.05469 18.058023 24.729465 31.400909 38.072357 44.743801 52.075188 55.410912 63.414246 70.085686 78.748962 85.42041 88.756134 97.419411 104.7508 112.75413 121.41741 128.08885 136.09219 144.75546 152.7588 161.42207 168.09352 176.09685 183.42824 186.76396 190.75963 200.75479 203.41858 209.41808 216.08952 218.7533 221.41707 228.08852 234.75996 241.43141 248.10286">V4L2_FIELD_INTERLACED_BT (misaligned)</tspan><tspan
- id="tspan6787"
- sodipodi:role="line"
- y="-294.93271"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 84.50457 93.167847 100.49924 108.50257 117.16585 123.83729 131.84062 140.50391 148.50723 157.17052 160.50624 163.84196 167.17769 175.18102 181.85246 188.52391 195.19534 201.86679 209.19818 212.53391 220.53723 227.20868 235.87196 242.5434 245.87912 254.5424 261.87378 269.87714 278.54041 285.21185 293.21518 301.87845 309.88177 318.54507 325.21652 332.54791">V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_TB</tspan></text>
+ id="tspan6781"
+ sodipodi:role="line"
+ y="-588.2937"
+ x="5.8031301 13.134519 19.805964 29.801128 36.472572 43.14402 47.139687 53.811131 56.474907 59.810631 66.482071 70.477737 77.149185 83.820625 87.816299 91.152016 94.48774 97.823463 104.4949 111.16635 114.50207 117.83779 120.50157 127.17302 129.83679 136.50824 139.84396 143.17969 145.84346 149.83913 155.83862 159.17435 162.51007 165.84579 169.84146 176.51291 183.18434 189.18385 199.17902 201.84279 205.17851 208.51424 215.18568 221.85713 225.19284 229.18851 235.85995 239.19568 245.86713 249.20285 252.53857 260.5419 269.87714 273.21286 281.21619 289.21951 295.89096">Temporal order, top field first transmitted (e.g. BG/PAL)</tspan><tspan
+ id="tspan6783"
+ sodipodi:role="line"
+ y="-86.604706"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 97.175514 106.51073 113.18218 120.51357">V4L2_FIELD_SEQ_TB</tspan><tspan
+ id="tspan6785"
+ sodipodi:role="line"
+ y="-192.89471"
+ x="10.05469 18.058023 24.729465 31.400909 38.072357 44.743801 52.075188 55.410912 63.414246 70.085686 78.748962 85.42041 88.756134 97.419411 104.7508 112.75413 121.41741 128.08885 136.09219 144.75546 152.7588 161.42207 168.09352 176.09685 183.42824 186.76396 190.75963 200.75479 203.41858 209.41808 216.08952 218.7533 221.41707 228.08852 234.75996 241.43141 248.10286">V4L2_FIELD_INTERLACED_BT (misaligned)</tspan><tspan
+ id="tspan6787"
+ sodipodi:role="line"
+ y="-294.93271"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 84.50457 93.167847 100.49924 108.50257 117.16585 123.83729 131.84062 140.50391 148.50723 157.17052 160.50624 163.84196 167.17769 175.18102 181.85246 188.52391 195.19534 201.86679 209.19818 212.53391 220.53723 227.20868 235.87196 242.5434 245.87912 254.5424 261.87378 269.87714 278.54041 285.21185 293.21518 301.87845 309.88177 318.54507 325.21652 332.54791">V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_TB</tspan></text>
<text
y="-528.771"
x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464"
id="text4583"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan4585"
- sodipodi:role="line"
- y="-528.771"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOP</tspan></text>
+ id="tspan4585"
+ sodipodi:role="line"
+ y="-528.771"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 88.500237 97.835464">V4L2_FIELD_TOP</tspan></text>
<text
y="-460.74561"
x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054"
id="text4587"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan4589"
- sodipodi:role="line"
- y="-460.74561"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054">V4L2_FIELD_BOTTOM</tspan></text>
+ id="tspan4589"
+ sodipodi:role="line"
+ y="-460.74561"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 98.507408 105.83879 113.17018 122.5054">V4L2_FIELD_BOTTOM</tspan></text>
<text
y="-392.72021"
x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963"
id="text4591"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan4593"
- sodipodi:role="line"
- y="-392.72021"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963">V4L2_FIELD_ALTERNATE</tspan></text>
+ id="tspan4593"
+ sodipodi:role="line"
+ y="-392.72021"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 95.843628 103.17502 111.17835 119.84163 128.5049 136.50824 143.83963">V4L2_FIELD_ALTERNATE</tspan></text>
<text
y="-588.2937"
x="5.8031301 13.134519 19.805964 29.801128 36.472572 43.14402 47.139687 53.811131 56.474907 59.810631 66.482071 70.477737 77.149185 83.820625 87.816299 91.152016 94.48774 97.823463 104.4949 111.16635 114.50207 117.83779 120.50157 127.17302 129.83679 136.50824 139.84396 143.17969 145.84346 149.83913 155.83862 159.17435 162.51007 165.84579 169.84146 176.51291 183.18434 189.18385 199.17902 201.84279 205.17851 208.51424 215.18568 221.85713 225.19284 229.18851 235.85995 239.19568 245.86713 249.20285 252.53857 260.5419 269.87714 273.21286 281.21619 289.21951 295.89096"
id="text5847"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan5849"
- sodipodi:role="line"
- y="-588.2937"
- x="5.8031301 13.134519 19.805964 29.801128 36.472572 43.14402 47.139687 53.811131 56.474907 59.810631 66.482071 70.477737 77.149185 83.820625 87.816299 91.152016 94.48774 97.823463 104.4949 111.16635 114.50207 117.83779 120.50157 127.17302 129.83679 136.50824 139.84396 143.17969 145.84346 149.83913 155.83862 159.17435 162.51007 165.84579 169.84146 176.51291 183.18434 189.18385 199.17902 201.84279 205.17851 208.51424 215.18568 221.85713 225.19284 229.18851 235.85995 239.19568 245.86713 249.20285 252.53857 260.5419 269.87714 273.21286 281.21619 289.21951 295.89096">Temporal order, top field first transmitted (e.g. BG/PAL)</tspan></text>
+ id="tspan5849"
+ sodipodi:role="line"
+ y="-588.2937"
+ x="5.8031301 13.134519 19.805964 29.801128 36.472572 43.14402 47.139687 53.811131 56.474907 59.810631 66.482071 70.477737 77.149185 83.820625 87.816299 91.152016 94.48774 97.823463 104.4949 111.16635 114.50207 117.83779 120.50157 127.17302 129.83679 136.50824 139.84396 143.17969 145.84346 149.83913 155.83862 159.17435 162.51007 165.84579 169.84146 176.51291 183.18434 189.18385 199.17902 201.84279 205.17851 208.51424 215.18568 221.85713 225.19284 229.18851 235.85995 239.19568 245.86713 249.20285 252.53857 260.5419 269.87714 273.21286 281.21619 289.21951 295.89096">Temporal order, top field first transmitted (e.g. BG/PAL)</tspan></text>
<text
y="-86.604706"
x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 97.175514 106.51073 113.18218 120.51357"
id="text5851"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan5853"
- sodipodi:role="line"
- y="-86.604706"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 97.175514 106.51073 113.18218 120.51357">V4L2_FIELD_SEQ_TB</tspan></text>
+ id="tspan5853"
+ sodipodi:role="line"
+ y="-86.604706"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 89.17218 97.175514 106.51073 113.18218 120.51357">V4L2_FIELD_SEQ_TB</tspan></text>
<text
y="-192.89471"
x="10.05469 18.058023 24.729465 31.400909 38.072357 44.743801 52.075188 55.410912 63.414246 70.085686 78.748962 85.42041 88.756134 97.419411 104.7508 112.75413 121.41741 128.08885 136.09219 144.75546 152.7588 161.42207 168.09352 176.09685 183.42824 186.76396 190.75963 200.75479 203.41858 209.41808 216.08952 218.7533 221.41707 228.08852 234.75996 241.43141 248.10286"
id="text5855"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan5857"
- sodipodi:role="line"
- y="-192.89471"
- x="10.05469 18.058023 24.729465 31.400909 38.072357 44.743801 52.075188 55.410912 63.414246 70.085686 78.748962 85.42041 88.756134 97.419411 104.7508 112.75413 121.41741 128.08885 136.09219 144.75546 152.7588 161.42207 168.09352 176.09685 183.42824 186.76396 190.75963 200.75479 203.41858 209.41808 216.08952 218.7533 221.41707 228.08852 234.75996 241.43141 248.10286">V4L2_FIELD_INTERLACED_BT (misaligned)</tspan></text>
+ id="tspan5857"
+ sodipodi:role="line"
+ y="-192.89471"
+ x="10.05469 18.058023 24.729465 31.400909 38.072357 44.743801 52.075188 55.410912 63.414246 70.085686 78.748962 85.42041 88.756134 97.419411 104.7508 112.75413 121.41741 128.08885 136.09219 144.75546 152.7588 161.42207 168.09352 176.09685 183.42824 186.76396 190.75963 200.75479 203.41858 209.41808 216.08952 218.7533 221.41707 228.08852 234.75996 241.43141 248.10286">V4L2_FIELD_INTERLACED_BT (misaligned)</tspan></text>
<text
y="-294.93271"
x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 84.50457 93.167847 100.49924 108.50257 117.16585 123.83729 131.84062 140.50391 148.50723 157.17052 160.50624 163.84196 167.17769 175.18102 181.85246 188.52391 195.19534 201.86679 209.19818 212.53391 220.53723 227.20868 235.87196 242.5434 245.87912 254.5424 261.87378 269.87714 278.54041 285.21185 293.21518 301.87845 309.88177 318.54507 325.21652 332.54791"
id="text5859"
style="font-variant:normal;font-weight:normal;font-size:11.9989996px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan5861"
- sodipodi:role="line"
- y="-294.93271"
- x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 84.50457 93.167847 100.49924 108.50257 117.16585 123.83729 131.84062 140.50391 148.50723 157.17052 160.50624 163.84196 167.17769 175.18102 181.85246 188.52391 195.19534 201.86679 209.19818 212.53391 220.53723 227.20868 235.87196 242.5434 245.87912 254.5424 261.87378 269.87714 278.54041 285.21185 293.21518 301.87845 309.88177 318.54507 325.21652 332.54791">V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_TB</tspan></text>
+ id="tspan5861"
+ sodipodi:role="line"
+ y="-294.93271"
+ x="5.8031301 13.806463 20.477907 27.149351 33.820797 40.492241 47.823627 51.159351 59.162685 65.834129 74.497406 81.168854 84.50457 93.167847 100.49924 108.50257 117.16585 123.83729 131.84062 140.50391 148.50723 157.17052 160.50624 163.84196 167.17769 175.18102 181.85246 188.52391 195.19534 201.86679 209.19818 212.53391 220.53723 227.20868 235.87196 242.5434 245.87912 254.5424 261.87378 269.87714 278.54041 285.21185 293.21518 301.87845 309.88177 318.54507 325.21652 332.54791">V4L2_FIELD_INTERLACED / V4L2_FIELD_INTERLACED_TB</tspan></text>
<text
y="-324.69479"
x="10.05469 14.17945 18.766184 20.597576 25.184309 29.771042 34.357777 38.944508 41.237877 43.531242 48.117977 50.865067 53.158432 55.451801 57.283192 61.869926 63.701321 68.288048"
id="text7131"
style="font-variant:normal;font-weight:normal;font-size:8.2495203px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan7133"
- sodipodi:role="line"
- y="-324.69479"
- x="10.05469 14.17945 18.766184 20.597576 25.184309 29.771042 34.357777 38.944508 41.237877 43.531242 48.117977 50.865067 53.158432 55.451801 57.283192 61.869926 63.701321 68.288048">v4l2_buffer.field:</tspan></text>
+ id="tspan7133"
+ sodipodi:role="line"
+ y="-324.69479"
+ x="10.05469 14.17945 18.766184 20.597576 25.184309 29.771042 34.357777 38.944508 41.237877 43.531242 48.117977 50.865067 53.158432 55.451801 57.283192 61.869926 63.701321 68.288048">v4l2_buffer.field:</tspan></text>
<text
y="-311.9397"
- x="10.05469 15.55712 20.143852 24.730585 29.317318 33.904053 38.944508 41.237877 46.740307 51.327042 57.283192 61.869926 66.910378 73.328506 95.0867 100.58913 105.17586 109.7626 114.34933 118.93606 123.97652 126.26987 131.77232 136.35905 142.3152 146.90193 152.40436 158.82249 163.86295 168.9034 175.32153 197.12534 202.62778 207.21451 211.80124 216.38797 220.9747 226.01515 228.30853 233.81096 238.39769 244.35384 248.94058 253.98103 260.39917 282.15695 287.65936 292.24609 296.83282 301.41956 306.00629 311.04675 313.34012 318.84256 323.42929 329.38544 333.97217 339.47461 345.89273 350.9332 355.97363 362.39175 384.19559 389.698 394.28473 398.87149 403.45822 408.04495 413.08539 415.37875 420.8812 425.46793 431.42407 436.0108 441.05127 447.46939 469.2276 474.73001 479.31674 483.90347 488.49023 493.07697 498.1174 500.41077 505.91321 510.49994 516.45612 521.04285 526.54523 532.96338 538.00385 543.04431 549.4624"
+ x="10.05469 15.55712 20.143852 24.730585 29.317318 33.904053 38.944508 41.237877 46.740307 51.327042 57.283192 61.869926 66.910378 73.328506 95.0867 100.58913 105.17586 109.7626 114.34933 118.93606 123.97652 126.26987 131.77232 136.35905 142.3152 146.90193 152.40436 158.82249 163.86295 168.9034 175.32153 197.12534 202.62778 207.21451 211.80124 216.38797 220.9747 226.01515 228.30853 233.81096 238.39769 244.35384 248.94058 253.98103 260.39917 282.15695 287.65936 292.24609 296.83282 301.41956 306.00629 311.04675 313.34012 318.84256 323.42929 329.38544 333.97217 339.47461 345.89273 350.9332 355.97363 362.39175 384.19559 389.698 394.28473 398.87149 403.45822 408.04495 413.08539 415.37875 420.8812 425.46793 431.42407 436.0108 441.05127 447.46939 469.2276 474.73001 479.31674 483.90347 488.49023 493.07697 498.1174 500.41077 505.91321 510.49994 516.45612 521.04285 526.54523 532.96338
+538.00385 543.04431 549.4624"
id="text7135"
style="font-variant:normal;font-weight:normal;font-size:8.2495203px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan7137"
- sodipodi:role="line"
- y="-311.9397"
- x="10.05469 15.55712 20.143852 24.730585 29.317318 33.904053 38.944508 41.237877 46.740307 51.327042 57.283192 61.869926 66.910378 73.328506 95.0867 100.58913 105.17586 109.7626 114.34933 118.93606 123.97652 126.26987 131.77232 136.35905 142.3152 146.90193 152.40436 158.82249 163.86295 168.9034 175.32153 197.12534 202.62778 207.21451 211.80124 216.38797 220.9747 226.01515 228.30853 233.81096 238.39769 244.35384 248.94058 253.98103 260.39917 282.15695 287.65936 292.24609 296.83282 301.41956 306.00629 311.04675 313.34012 318.84256 323.42929 329.38544 333.97217 339.47461 345.89273 350.9332 355.97363 362.39175 384.19559 389.698 394.28473 398.87149 403.45822 408.04495 413.08539 415.37875 420.8812 425.46793 431.42407 436.0108 441.05127 447.46939 469.2276 474.73001 479.31674 483.90347 488.49023 493.07697 498.1174 500.41077 505.91321 510.49994 516.45612 521.04285 526.54523 532.96338 538.00385 543.04431 549.4624">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOM</tspan></text>
+ id="tspan7137"
+ sodipodi:role="line"
+ y="-311.9397"
+ x="10.05469 15.55712 20.143852 24.730585 29.317318 33.904053 38.944508 41.237877 46.740307 51.327042 57.283192 61.869926 66.910378 73.328506 95.0867 100.58913 105.17586 109.7626 114.34933 118.93606 123.97652 126.26987 131.77232 136.35905 142.3152 146.90193 152.40436 158.82249 163.86295 168.9034 175.32153 197.12534 202.62778 207.21451 211.80124 216.38797 220.9747 226.01515 228.30853 233.81096 238.39769 244.35384 248.94058 253.98103 260.39917 282.15695 287.65936 292.24609 296.83282 301.41956 306.00629 311.04675 313.34012 318.84256 323.42929 329.38544 333.97217 339.47461 345.89273 350.9332 355.97363 362.39175 384.19559 389.698 394.28473 398.87149 403.45822 408.04495 413.08539 415.37875 420.8812 425.46793 431.42407 436.0108 441.05127 447.46939 469.2276 474.73001 479.31674 483.90347 488.49023 493.07697 498.1174 500.41077 505.91321 510.49994 516.45612 521.04285 526.54523 532.96338
+538.00385 543.04431 549.4624">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOM</tspan></text>
</g></svg> \ No newline at end of file
diff --git a/Documentation/media/uapi/v4l/format.rst b/Documentation/media/uapi/v4l/format.rst
index 452c6d59cad5..3e3efb0e349e 100644
--- a/Documentation/media/uapi/v4l/format.rst
+++ b/Documentation/media/uapi/v4l/format.rst
@@ -78,7 +78,7 @@ output devices is available. [#f1]_
The :ref:`VIDIOC_ENUM_FMT` ioctl must be supported
by all drivers exchanging image data with applications.
- **Important**
+.. important::
Drivers are not supposed to convert image formats in kernel space.
They must enumerate only formats directly supported by the hardware.
diff --git a/Documentation/media/uapi/v4l/nv12mt.svg b/Documentation/media/uapi/v4l/nv12mt.svg
index 21fcccda9723..65d05606c04c 100644
--- a/Documentation/media/uapi/v4l/nv12mt.svg
+++ b/Documentation/media/uapi/v4l/nv12mt.svg
@@ -18,8 +18,8 @@
sodipodi:docname="nv12mt.svg"
style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"><metadata
id="metadata383"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><sodipodi:namedview
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><sodipodi:namedview
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1"
@@ -47,11 +47,11 @@
id="defs4"><clipPath
id="presentation_clip_path"
clipPathUnits="userSpaceOnUse"><rect
- x="0"
- y="0"
- width="28000"
- height="21000"
- id="rect7" /></clipPath></defs><defs
+ x="0"
+ y="0"
+ width="28000"
+ height="21000"
+ id="rect7" /></clipPath></defs><defs
id="defs9" /><defs
id="defs80" /><defs
id="defs103" /><defs
@@ -65,386 +65,386 @@
id="g177"
transform="translate(-3285.889,-3185.889)"><g
id="g179"><g
- id="id1"
- class="Slide"
- clip-path="url(#presentation_clip_path)"><g
- class="Page"
- id="g182"><g
- class="com.sun.star.drawing.CustomShape"
- id="g184"><g
- id="id6"><rect
- class="BoundingBox"
- x="3299"
- y="3199"
- width="2403"
- height="1403"
- id="rect187"
- style="fill:none;stroke:none" /><path
- d="m 4500,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path189"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text191"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan193"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="4325"
- y="4121"
- id="tspan195"><tspan
- id="tspan197"
- style="fill:#000000;stroke:none">0</tspan></tspan></tspan></text>
+ id="id1"
+ class="Slide"
+ clip-path="url(#presentation_clip_path)"><g
+ class="Page"
+ id="g182"><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g184"><g
+ id="id6"><rect
+ class="BoundingBox"
+ x="3299"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect187"
+ style="fill:none;stroke:none" /><path
+ d="m 4500,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path189"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text191"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan193"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="4325"
+ y="4121"
+ id="tspan195"><tspan
+ id="tspan197"
+ style="fill:#000000;stroke:none">0</tspan></tspan></tspan></text>
</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g199"><g
- id="id7"><rect
- class="BoundingBox"
- x="5699"
- y="3199"
- width="2403"
- height="1403"
- id="rect202"
- style="fill:none;stroke:none" /><path
- d="m 6900,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path204"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g206"><g
- id="id8"><rect
- class="BoundingBox"
- x="8099"
- y="3199"
- width="2403"
- height="1403"
- id="rect209"
- style="fill:none;stroke:none" /><path
- d="m 9300,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path211"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text213"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan215"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="9125"
- y="4121"
- id="tspan217"><tspan
- id="tspan219"
- style="fill:#000000;stroke:none">6</tspan></tspan></tspan></text>
+ class="com.sun.star.drawing.CustomShape"
+ id="g199"><g
+ id="id7"><rect
+ class="BoundingBox"
+ x="5699"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect202"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path204"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g206"><g
+ id="id8"><rect
+ class="BoundingBox"
+ x="8099"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect209"
+ style="fill:none;stroke:none" /><path
+ d="m 9300,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path211"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text213"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan215"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="9125"
+ y="4121"
+ id="tspan217"><tspan
+ id="tspan219"
+ style="fill:#000000;stroke:none">6</tspan></tspan></tspan></text>
</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g221"><g
- id="id9"><rect
- class="BoundingBox"
- x="5699"
- y="3199"
- width="2403"
- height="1403"
- id="rect224"
- style="fill:none;stroke:none" /><path
- d="m 6900,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path226"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text228"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan230"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="6725"
- y="4121"
- id="tspan232"><tspan
- id="tspan234"
- style="fill:#000000;stroke:none">1</tspan></tspan></tspan></text>
+ class="com.sun.star.drawing.CustomShape"
+ id="g221"><g
+ id="id9"><rect
+ class="BoundingBox"
+ x="5699"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect224"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path226"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text228"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan230"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="6725"
+ y="4121"
+ id="tspan232"><tspan
+ id="tspan234"
+ style="fill:#000000;stroke:none">1</tspan></tspan></tspan></text>
</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g236"><g
- id="id10"><rect
- class="BoundingBox"
- x="10499"
- y="3199"
- width="2403"
- height="1403"
- id="rect239"
- style="fill:none;stroke:none" /><path
- d="m 11700,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path241"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text243"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan245"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="11525"
- y="4121"
- id="tspan247"><tspan
- id="tspan249"
- style="fill:#000000;stroke:none">7</tspan></tspan></tspan></text>
+ class="com.sun.star.drawing.CustomShape"
+ id="g236"><g
+ id="id10"><rect
+ class="BoundingBox"
+ x="10499"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect239"
+ style="fill:none;stroke:none" /><path
+ d="m 11700,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path241"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text243"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan245"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="11525"
+ y="4121"
+ id="tspan247"><tspan
+ id="tspan249"
+ style="fill:#000000;stroke:none">7</tspan></tspan></tspan></text>
</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g251"><g
- id="id11"><rect
- class="BoundingBox"
- x="3299"
- y="4599"
- width="2403"
- height="1403"
- id="rect254"
- style="fill:none;stroke:none" /><path
- d="m 4500,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path256"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text258"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan260"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="4325"
- y="5521"
- id="tspan262"><tspan
- id="tspan264"
- style="fill:#000000;stroke:none">2</tspan></tspan></tspan></text>
+ class="com.sun.star.drawing.CustomShape"
+ id="g251"><g
+ id="id11"><rect
+ class="BoundingBox"
+ x="3299"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect254"
+ style="fill:none;stroke:none" /><path
+ d="m 4500,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path256"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text258"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan260"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="4325"
+ y="5521"
+ id="tspan262"><tspan
+ id="tspan264"
+ style="fill:#000000;stroke:none">2</tspan></tspan></tspan></text>
</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g266"><g
- id="id12"><rect
- class="BoundingBox"
- x="5699"
- y="4599"
- width="2403"
- height="1403"
- id="rect269"
- style="fill:none;stroke:none" /><path
- d="m 6900,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path271"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /></g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g273"><g
- id="id13"><rect
- class="BoundingBox"
- x="8099"
- y="4599"
- width="2403"
- height="1403"
- id="rect276"
- style="fill:none;stroke:none" /><path
- d="m 9300,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path278"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text280"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan282"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="9125"
- y="5521"
- id="tspan284"><tspan
- id="tspan286"
- style="fill:#000000;stroke:none">4</tspan></tspan></tspan></text>
+ class="com.sun.star.drawing.CustomShape"
+ id="g266"><g
+ id="id12"><rect
+ class="BoundingBox"
+ x="5699"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect269"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path271"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
+ class="com.sun.star.drawing.CustomShape"
+ id="g273"><g
+ id="id13"><rect
+ class="BoundingBox"
+ x="8099"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect276"
+ style="fill:none;stroke:none" /><path
+ d="m 9300,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path278"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text280"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan282"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="9125"
+ y="5521"
+ id="tspan284"><tspan
+ id="tspan286"
+ style="fill:#000000;stroke:none">4</tspan></tspan></tspan></text>
</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g288"><g
- id="id14"><rect
- class="BoundingBox"
- x="5699"
- y="4599"
- width="2403"
- height="1403"
- id="rect291"
- style="fill:none;stroke:none" /><path
- d="m 6900,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path293"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text295"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan297"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="6725"
- y="5521"
- id="tspan299"><tspan
- id="tspan301"
- style="fill:#000000;stroke:none">3</tspan></tspan></tspan></text>
+ class="com.sun.star.drawing.CustomShape"
+ id="g288"><g
+ id="id14"><rect
+ class="BoundingBox"
+ x="5699"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect291"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path293"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text295"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan297"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="6725"
+ y="5521"
+ id="tspan299"><tspan
+ id="tspan301"
+ style="fill:#000000;stroke:none">3</tspan></tspan></tspan></text>
</g></g><g
- class="com.sun.star.drawing.CustomShape"
- id="g303"><g
- id="id15"><rect
- class="BoundingBox"
- x="10499"
- y="4599"
- width="2403"
- height="1403"
- id="rect306"
- style="fill:none;stroke:none" /><path
- d="m 11700,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path308"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text310"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan312"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="11525"
- y="5521"
- id="tspan314"><tspan
- id="tspan316"
- style="fill:#000000;stroke:none">5</tspan></tspan></tspan></text>
+ class="com.sun.star.drawing.CustomShape"
+ id="g303"><g
+ id="id15"><rect
+ class="BoundingBox"
+ x="10499"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect306"
+ style="fill:none;stroke:none" /><path
+ d="m 11700,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path308"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text310"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan312"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="11525"
+ y="5521"
+ id="tspan314"><tspan
+ id="tspan316"
+ style="fill:#000000;stroke:none">5</tspan></tspan></tspan></text>
</g></g><g
- class="com.sun.star.drawing.LineShape"
- id="g318"><g
- id="id16"><rect
- class="BoundingBox"
- x="5199"
- y="3850"
- width="1402"
- height="301"
- id="rect321"
- style="fill:none;stroke:none" /><path
- d="m 5200,4000 970,0"
- id="path323"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 6600,4000 -450,-150 0,300 450,-150 z"
- id="path325"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
- class="com.sun.star.drawing.LineShape"
- id="g327"><g
- id="id17"><rect
- class="BoundingBox"
- x="5000"
- y="4299"
- width="1202"
- height="802"
- id="rect330"
- style="fill:none;stroke:none" /><path
- d="m 6200,4300 -842,561"
- id="path332"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 5000,5100 458,-125 -167,-249 -291,374 z"
- id="path334"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
- class="com.sun.star.drawing.LineShape"
- id="g336"><g
- id="id18"><rect
- class="BoundingBox"
- x="5399"
- y="5250"
- width="1202"
- height="301"
- id="rect339"
- style="fill:none;stroke:none" /><path
- d="m 5400,5400 770,0"
- id="path341"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 6600,5400 -450,-150 0,300 450,-150 z"
- id="path343"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
- class="com.sun.star.drawing.LineShape"
- id="g345"><g
- id="id19"><rect
- class="BoundingBox"
- x="7599"
- y="5250"
- width="1202"
- height="301"
- id="rect348"
- style="fill:none;stroke:none" /><path
- d="m 7600,5400 770,0"
- id="path350"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 8800,5400 -450,-150 0,300 450,-150 z"
- id="path352"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
- class="com.sun.star.drawing.LineShape"
- id="g354"><g
- id="id20"><rect
- class="BoundingBox"
- x="9799"
- y="5250"
- width="1402"
- height="301"
- id="rect357"
- style="fill:none;stroke:none" /><path
- d="m 9800,5400 970,0"
- id="path359"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 11200,5400 -450,-150 0,300 450,-150 z"
- id="path361"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
- class="com.sun.star.drawing.LineShape"
- id="g363"><g
- id="id21"><rect
- class="BoundingBox"
- x="9900"
- y="4200"
- width="1202"
- height="802"
- id="rect366"
- style="fill:none;stroke:none" /><path
- d="m 11100,5000 -842,-561"
- id="path368"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 9900,4200 291,374 167,-249 -458,-125 z"
- id="path370"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
- class="com.sun.star.drawing.LineShape"
- id="g372"><g
- id="id22"><rect
- class="BoundingBox"
- x="9999"
- y="3850"
- width="1402"
- height="301"
- id="rect375"
- style="fill:none;stroke:none" /><path
- d="m 10000,4000 970,0"
- id="path377"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 11400,4000 -450,-150 0,300 450,-150 z"
- id="path379"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g></g></g></g></g></svg>
+ class="com.sun.star.drawing.LineShape"
+ id="g318"><g
+ id="id16"><rect
+ class="BoundingBox"
+ x="5199"
+ y="3850"
+ width="1402"
+ height="301"
+ id="rect321"
+ style="fill:none;stroke:none" /><path
+ d="m 5200,4000 970,0"
+ id="path323"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 6600,4000 -450,-150 0,300 450,-150 z"
+ id="path325"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g327"><g
+ id="id17"><rect
+ class="BoundingBox"
+ x="5000"
+ y="4299"
+ width="1202"
+ height="802"
+ id="rect330"
+ style="fill:none;stroke:none" /><path
+ d="m 6200,4300 -842,561"
+ id="path332"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 5000,5100 458,-125 -167,-249 -291,374 z"
+ id="path334"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g336"><g
+ id="id18"><rect
+ class="BoundingBox"
+ x="5399"
+ y="5250"
+ width="1202"
+ height="301"
+ id="rect339"
+ style="fill:none;stroke:none" /><path
+ d="m 5400,5400 770,0"
+ id="path341"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 6600,5400 -450,-150 0,300 450,-150 z"
+ id="path343"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g345"><g
+ id="id19"><rect
+ class="BoundingBox"
+ x="7599"
+ y="5250"
+ width="1202"
+ height="301"
+ id="rect348"
+ style="fill:none;stroke:none" /><path
+ d="m 7600,5400 770,0"
+ id="path350"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 8800,5400 -450,-150 0,300 450,-150 z"
+ id="path352"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g354"><g
+ id="id20"><rect
+ class="BoundingBox"
+ x="9799"
+ y="5250"
+ width="1402"
+ height="301"
+ id="rect357"
+ style="fill:none;stroke:none" /><path
+ d="m 9800,5400 970,0"
+ id="path359"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 11200,5400 -450,-150 0,300 450,-150 z"
+ id="path361"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g363"><g
+ id="id21"><rect
+ class="BoundingBox"
+ x="9900"
+ y="4200"
+ width="1202"
+ height="802"
+ id="rect366"
+ style="fill:none;stroke:none" /><path
+ d="m 11100,5000 -842,-561"
+ id="path368"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 9900,4200 291,374 167,-249 -458,-125 z"
+ id="path370"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
+ class="com.sun.star.drawing.LineShape"
+ id="g372"><g
+ id="id22"><rect
+ class="BoundingBox"
+ x="9999"
+ y="3850"
+ width="1402"
+ height="301"
+ id="rect375"
+ style="fill:none;stroke:none" /><path
+ d="m 10000,4000 970,0"
+ id="path377"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 11400,4000 -450,-150 0,300 450,-150 z"
+ id="path379"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g></g></g></g></g></svg>
diff --git a/Documentation/media/uapi/v4l/nv12mt_example.svg b/Documentation/media/uapi/v4l/nv12mt_example.svg
index d65d989ee73b..fc51fe8fda8b 100644
--- a/Documentation/media/uapi/v4l/nv12mt_example.svg
+++ b/Documentation/media/uapi/v4l/nv12mt_example.svg
@@ -18,8 +18,8 @@
sodipodi:docname="nv12mt_example.svg"
style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"><metadata
id="metadata953"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><sodipodi:namedview
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><sodipodi:namedview
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1"
@@ -58,1532 +58,1532 @@
id="g188"
transform="translate(-3285.889,-3185.889)"><g
id="id6"><rect
- class="BoundingBox"
- x="3299"
- y="3199"
- width="2403"
- height="1403"
- id="rect191"
- style="fill:none;stroke:none" /><path
- d="m 4500,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path193"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text195"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan197"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="4325"
- y="4121"
- id="tspan199"><tspan
- id="tspan201"
- style="fill:#000000;stroke:none">0</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="3299"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect191"
+ style="fill:none;stroke:none" /><path
+ d="m 4500,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path193"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text195"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan197"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="4325"
+ y="4121"
+ id="tspan199"><tspan
+ id="tspan201"
+ style="fill:#000000;stroke:none">0</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g203"
transform="translate(-3285.889,-3185.889)"><g
id="id7"><rect
- class="BoundingBox"
- x="5699"
- y="3199"
- width="2403"
- height="1403"
- id="rect206"
- style="fill:none;stroke:none" /><path
- d="m 6900,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path208"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /></g></g><g
+ class="BoundingBox"
+ x="5699"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect206"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path208"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
class="com.sun.star.drawing.CustomShape"
id="g210"
transform="translate(-3285.889,-3185.889)"><g
id="id8"><rect
- class="BoundingBox"
- x="8099"
- y="3199"
- width="2403"
- height="1403"
- id="rect213"
- style="fill:none;stroke:none" /><path
- d="m 9300,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path215"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text217"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan219"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="9125"
- y="4121"
- id="tspan221"><tspan
- id="tspan223"
- style="fill:#000000;stroke:none">6</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="8099"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect213"
+ style="fill:none;stroke:none" /><path
+ d="m 9300,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path215"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text217"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan219"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="9125"
+ y="4121"
+ id="tspan221"><tspan
+ id="tspan223"
+ style="fill:#000000;stroke:none">6</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g225"
transform="translate(-3285.889,-3185.889)"><g
id="id9"><rect
- class="BoundingBox"
- x="5699"
- y="3199"
- width="2403"
- height="1403"
- id="rect228"
- style="fill:none;stroke:none" /><path
- d="m 6900,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path230"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text232"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan234"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="6725"
- y="4121"
- id="tspan236"><tspan
- id="tspan238"
- style="fill:#000000;stroke:none">1</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="5699"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect228"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path230"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text232"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan234"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="6725"
+ y="4121"
+ id="tspan236"><tspan
+ id="tspan238"
+ style="fill:#000000;stroke:none">1</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g240"
transform="translate(-3285.889,-3185.889)"><g
id="id10"><rect
- class="BoundingBox"
- x="10499"
- y="3199"
- width="2403"
- height="1403"
- id="rect243"
- style="fill:none;stroke:none" /><path
- d="m 11700,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path245"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text247"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan249"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="11525"
- y="4121"
- id="tspan251"><tspan
- id="tspan253"
- style="fill:#000000;stroke:none">7</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="10499"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect243"
+ style="fill:none;stroke:none" /><path
+ d="m 11700,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path245"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text247"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan249"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="11525"
+ y="4121"
+ id="tspan251"><tspan
+ id="tspan253"
+ style="fill:#000000;stroke:none">7</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g255"
transform="translate(-3285.889,-3185.889)"><g
id="id11"><rect
- class="BoundingBox"
- x="12899"
- y="3199"
- width="2403"
- height="1403"
- id="rect258"
- style="fill:none;stroke:none" /><path
- d="m 14100,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path260"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /></g></g><g
+ class="BoundingBox"
+ x="12899"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect258"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path260"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
class="com.sun.star.drawing.CustomShape"
id="g262"
transform="translate(-3285.889,-3185.889)"><g
id="id12"><rect
- class="BoundingBox"
- x="15299"
- y="3199"
- width="2403"
- height="1403"
- id="rect265"
- style="fill:none;stroke:none" /><path
- d="m 16500,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path267"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text269"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan271"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="16325"
- y="4121"
- id="tspan273"><tspan
- id="tspan275"
- style="fill:#000000;stroke:none">9</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="15299"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect265"
+ style="fill:none;stroke:none" /><path
+ d="m 16500,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path267"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text269"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan271"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="16325"
+ y="4121"
+ id="tspan273"><tspan
+ id="tspan275"
+ style="fill:#000000;stroke:none">9</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g277"
transform="translate(-3285.889,-3185.889)"><g
id="id13"><rect
- class="BoundingBox"
- x="12899"
- y="3199"
- width="2403"
- height="1403"
- id="rect280"
- style="fill:none;stroke:none" /><path
- d="m 14100,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path282"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text284"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan286"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="13925"
- y="4121"
- id="tspan288"><tspan
- id="tspan290"
- style="fill:#000000;stroke:none">8</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="12899"
+ y="3199"
+ width="2403"
+ height="1403"
+ id="rect280"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,4600 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path282"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text284"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan286"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="13925"
+ y="4121"
+ id="tspan288"><tspan
+ id="tspan290"
+ style="fill:#000000;stroke:none">8</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g292"
transform="translate(-3285.889,-3185.889)"><g
id="id14"><rect
- class="BoundingBox"
- x="3299"
- y="4599"
- width="2403"
- height="1403"
- id="rect295"
- style="fill:none;stroke:none" /><path
- d="m 4500,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path297"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text299"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan301"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="4325"
- y="5521"
- id="tspan303"><tspan
- id="tspan305"
- style="fill:#000000;stroke:none">2</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="3299"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect295"
+ style="fill:none;stroke:none" /><path
+ d="m 4500,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path297"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text299"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan301"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="4325"
+ y="5521"
+ id="tspan303"><tspan
+ id="tspan305"
+ style="fill:#000000;stroke:none">2</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g307"
transform="translate(-3285.889,-3185.889)"><g
id="id15"><rect
- class="BoundingBox"
- x="5699"
- y="4599"
- width="2403"
- height="1403"
- id="rect310"
- style="fill:none;stroke:none" /><path
- d="m 6900,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path312"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /></g></g><g
+ class="BoundingBox"
+ x="5699"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect310"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path312"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
class="com.sun.star.drawing.CustomShape"
id="g314"
transform="translate(-3285.889,-3185.889)"><g
id="id16"><rect
- class="BoundingBox"
- x="8099"
- y="4599"
- width="2403"
- height="1403"
- id="rect317"
- style="fill:none;stroke:none" /><path
- d="m 9300,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path319"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text321"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan323"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="9125"
- y="5521"
- id="tspan325"><tspan
- id="tspan327"
- style="fill:#000000;stroke:none">4</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="8099"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect317"
+ style="fill:none;stroke:none" /><path
+ d="m 9300,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path319"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text321"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan323"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="9125"
+ y="5521"
+ id="tspan325"><tspan
+ id="tspan327"
+ style="fill:#000000;stroke:none">4</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g329"
transform="translate(-3285.889,-3185.889)"><g
id="id17"><rect
- class="BoundingBox"
- x="5699"
- y="4599"
- width="2403"
- height="1403"
- id="rect332"
- style="fill:none;stroke:none" /><path
- d="m 6900,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path334"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text336"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan338"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="6725"
- y="5521"
- id="tspan340"><tspan
- id="tspan342"
- style="fill:#000000;stroke:none">3</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="5699"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect332"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path334"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text336"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan338"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="6725"
+ y="5521"
+ id="tspan340"><tspan
+ id="tspan342"
+ style="fill:#000000;stroke:none">3</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g344"
transform="translate(-3285.889,-3185.889)"><g
id="id18"><rect
- class="BoundingBox"
- x="10499"
- y="4599"
- width="2403"
- height="1403"
- id="rect347"
- style="fill:none;stroke:none" /><path
- d="m 11700,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path349"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text351"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan353"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="11525"
- y="5521"
- id="tspan355"><tspan
- id="tspan357"
- style="fill:#000000;stroke:none">5</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="10499"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect347"
+ style="fill:none;stroke:none" /><path
+ d="m 11700,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path349"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text351"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan353"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="11525"
+ y="5521"
+ id="tspan355"><tspan
+ id="tspan357"
+ style="fill:#000000;stroke:none">5</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g359"
transform="translate(-3285.889,-3185.889)"><g
id="id19"><rect
- class="BoundingBox"
- x="12899"
- y="4599"
- width="2403"
- height="1403"
- id="rect362"
- style="fill:none;stroke:none" /><path
- d="m 14100,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path364"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /></g></g><g
+ class="BoundingBox"
+ x="12899"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect362"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path364"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
class="com.sun.star.drawing.CustomShape"
id="g366"
transform="translate(-3285.889,-3185.889)"><g
id="id20"><rect
- class="BoundingBox"
- x="15299"
- y="4599"
- width="2403"
- height="1403"
- id="rect369"
- style="fill:none;stroke:none" /><path
- d="m 16500,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path371"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text373"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan375"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="16149"
- y="5521"
- id="tspan377"><tspan
- id="tspan379"
- style="fill:#000000;stroke:none">11</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="15299"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect369"
+ style="fill:none;stroke:none" /><path
+ d="m 16500,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path371"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text373"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan375"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="16149"
+ y="5521"
+ id="tspan377"><tspan
+ id="tspan379"
+ style="fill:#000000;stroke:none">11</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g381"
transform="translate(-3285.889,-3185.889)"><g
id="id21"><rect
- class="BoundingBox"
- x="12899"
- y="4599"
- width="2403"
- height="1403"
- id="rect384"
- style="fill:none;stroke:none" /><path
- d="m 14100,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path386"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text388"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan390"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="13749"
- y="5521"
- id="tspan392"><tspan
- id="tspan394"
- style="fill:#000000;stroke:none">10</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="12899"
+ y="4599"
+ width="2403"
+ height="1403"
+ id="rect384"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,6000 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path386"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text388"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan390"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="13749"
+ y="5521"
+ id="tspan392"><tspan
+ id="tspan394"
+ style="fill:#000000;stroke:none">10</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g396"
transform="translate(-3285.889,-3185.889)"><g
id="id22"><rect
- class="BoundingBox"
- x="3299"
- y="5999"
- width="2403"
- height="1403"
- id="rect399"
- style="fill:none;stroke:none" /><path
- d="m 4500,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path401"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text403"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan405"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="4149"
- y="6921"
- id="tspan407"><tspan
- id="tspan409"
- style="fill:#000000;stroke:none">12</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="3299"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect399"
+ style="fill:none;stroke:none" /><path
+ d="m 4500,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path401"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text403"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan405"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="4149"
+ y="6921"
+ id="tspan407"><tspan
+ id="tspan409"
+ style="fill:#000000;stroke:none">12</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g411"
transform="translate(-3285.889,-3185.889)"><g
id="id23"><rect
- class="BoundingBox"
- x="5699"
- y="5999"
- width="2403"
- height="1403"
- id="rect414"
- style="fill:none;stroke:none" /><path
- d="m 6900,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path416"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /></g></g><g
+ class="BoundingBox"
+ x="5699"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect414"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path416"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
class="com.sun.star.drawing.CustomShape"
id="g418"
transform="translate(-3285.889,-3185.889)"><g
id="id24"><rect
- class="BoundingBox"
- x="8099"
- y="5999"
- width="2403"
- height="1403"
- id="rect421"
- style="fill:none;stroke:none" /><path
- d="m 9300,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path423"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text425"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan427"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="8949"
- y="6921"
- id="tspan429"><tspan
- id="tspan431"
- style="fill:#000000;stroke:none">18</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="8099"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect421"
+ style="fill:none;stroke:none" /><path
+ d="m 9300,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path423"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text425"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan427"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="8949"
+ y="6921"
+ id="tspan429"><tspan
+ id="tspan431"
+ style="fill:#000000;stroke:none">18</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g433"
transform="translate(-3285.889,-3185.889)"><g
id="id25"><rect
- class="BoundingBox"
- x="5699"
- y="5999"
- width="2403"
- height="1403"
- id="rect436"
- style="fill:none;stroke:none" /><path
- d="m 6900,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path438"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text440"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan442"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="6549"
- y="6921"
- id="tspan444"><tspan
- id="tspan446"
- style="fill:#000000;stroke:none">13</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="5699"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect436"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path438"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text440"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan442"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="6549"
+ y="6921"
+ id="tspan444"><tspan
+ id="tspan446"
+ style="fill:#000000;stroke:none">13</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g448"
transform="translate(-3285.889,-3185.889)"><g
id="id26"><rect
- class="BoundingBox"
- x="10499"
- y="5999"
- width="2403"
- height="1403"
- id="rect451"
- style="fill:none;stroke:none" /><path
- d="m 11700,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path453"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text455"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan457"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="11349"
- y="6921"
- id="tspan459"><tspan
- id="tspan461"
- style="fill:#000000;stroke:none">19</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="10499"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect451"
+ style="fill:none;stroke:none" /><path
+ d="m 11700,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path453"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text455"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan457"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="11349"
+ y="6921"
+ id="tspan459"><tspan
+ id="tspan461"
+ style="fill:#000000;stroke:none">19</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g463"
transform="translate(-3285.889,-3185.889)"><g
id="id27"><rect
- class="BoundingBox"
- x="12899"
- y="5999"
- width="2403"
- height="1403"
- id="rect466"
- style="fill:none;stroke:none" /><path
- d="m 14100,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path468"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /></g></g><g
+ class="BoundingBox"
+ x="12899"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect466"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path468"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
class="com.sun.star.drawing.CustomShape"
id="g470"
transform="translate(-3285.889,-3185.889)"><g
id="id28"><rect
- class="BoundingBox"
- x="15299"
- y="5999"
- width="2403"
- height="1403"
- id="rect473"
- style="fill:none;stroke:none" /><path
- d="m 16500,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path475"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text477"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan479"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="16149"
- y="6921"
- id="tspan481"><tspan
- id="tspan483"
- style="fill:#000000;stroke:none">21</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="15299"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect473"
+ style="fill:none;stroke:none" /><path
+ d="m 16500,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path475"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text477"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan479"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="16149"
+ y="6921"
+ id="tspan481"><tspan
+ id="tspan483"
+ style="fill:#000000;stroke:none">21</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g485"
transform="translate(-3285.889,-3185.889)"><g
id="id29"><rect
- class="BoundingBox"
- x="12899"
- y="5999"
- width="2403"
- height="1403"
- id="rect488"
- style="fill:none;stroke:none" /><path
- d="m 14100,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path490"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text492"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan494"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="13749"
- y="6921"
- id="tspan496"><tspan
- id="tspan498"
- style="fill:#000000;stroke:none">20</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="12899"
+ y="5999"
+ width="2403"
+ height="1403"
+ id="rect488"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,7400 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path490"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text492"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan494"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="13749"
+ y="6921"
+ id="tspan496"><tspan
+ id="tspan498"
+ style="fill:#000000;stroke:none">20</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g500"
transform="translate(-3285.889,-3185.889)"><g
id="id30"><rect
- class="BoundingBox"
- x="3299"
- y="7399"
- width="2403"
- height="1403"
- id="rect503"
- style="fill:none;stroke:none" /><path
- d="m 4500,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path505"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text507"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan509"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="4149"
- y="8321"
- id="tspan511"><tspan
- id="tspan513"
- style="fill:#000000;stroke:none">14</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="3299"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect503"
+ style="fill:none;stroke:none" /><path
+ d="m 4500,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path505"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text507"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan509"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="4149"
+ y="8321"
+ id="tspan511"><tspan
+ id="tspan513"
+ style="fill:#000000;stroke:none">14</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g515"
transform="translate(-3285.889,-3185.889)"><g
id="id31"><rect
- class="BoundingBox"
- x="5699"
- y="7399"
- width="2403"
- height="1403"
- id="rect518"
- style="fill:none;stroke:none" /><path
- d="m 6900,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path520"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /></g></g><g
+ class="BoundingBox"
+ x="5699"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect518"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path520"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
class="com.sun.star.drawing.CustomShape"
id="g522"
transform="translate(-3285.889,-3185.889)"><g
id="id32"><rect
- class="BoundingBox"
- x="8099"
- y="7399"
- width="2403"
- height="1403"
- id="rect525"
- style="fill:none;stroke:none" /><path
- d="m 9300,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path527"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text529"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan531"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="8949"
- y="8321"
- id="tspan533"><tspan
- id="tspan535"
- style="fill:#000000;stroke:none">16</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="8099"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect525"
+ style="fill:none;stroke:none" /><path
+ d="m 9300,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path527"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text529"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan531"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="8949"
+ y="8321"
+ id="tspan533"><tspan
+ id="tspan535"
+ style="fill:#000000;stroke:none">16</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g537"
transform="translate(-3285.889,-3185.889)"><g
id="id33"><rect
- class="BoundingBox"
- x="5699"
- y="7399"
- width="2403"
- height="1403"
- id="rect540"
- style="fill:none;stroke:none" /><path
- d="m 6900,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path542"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text544"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan546"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="6549"
- y="8321"
- id="tspan548"><tspan
- id="tspan550"
- style="fill:#000000;stroke:none">15</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="5699"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect540"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path542"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text544"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan546"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="6549"
+ y="8321"
+ id="tspan548"><tspan
+ id="tspan550"
+ style="fill:#000000;stroke:none">15</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g552"
transform="translate(-3285.889,-3185.889)"><g
id="id34"><rect
- class="BoundingBox"
- x="10499"
- y="7399"
- width="2403"
- height="1403"
- id="rect555"
- style="fill:none;stroke:none" /><path
- d="m 11700,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path557"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text559"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan561"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="11349"
- y="8321"
- id="tspan563"><tspan
- id="tspan565"
- style="fill:#000000;stroke:none">17</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="10499"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect555"
+ style="fill:none;stroke:none" /><path
+ d="m 11700,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path557"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text559"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan561"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="11349"
+ y="8321"
+ id="tspan563"><tspan
+ id="tspan565"
+ style="fill:#000000;stroke:none">17</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g567"
transform="translate(-3285.889,-3185.889)"><g
id="id35"><rect
- class="BoundingBox"
- x="12899"
- y="7399"
- width="2403"
- height="1403"
- id="rect570"
- style="fill:none;stroke:none" /><path
- d="m 14100,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path572"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /></g></g><g
+ class="BoundingBox"
+ x="12899"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect570"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path572"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
class="com.sun.star.drawing.CustomShape"
id="g574"
transform="translate(-3285.889,-3185.889)"><g
id="id36"><rect
- class="BoundingBox"
- x="15299"
- y="7399"
- width="2403"
- height="1403"
- id="rect577"
- style="fill:none;stroke:none" /><path
- d="m 16500,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path579"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text581"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan583"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="16149"
- y="8321"
- id="tspan585"><tspan
- id="tspan587"
- style="fill:#000000;stroke:none">23</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="15299"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect577"
+ style="fill:none;stroke:none" /><path
+ d="m 16500,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path579"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text581"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan583"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="16149"
+ y="8321"
+ id="tspan585"><tspan
+ id="tspan587"
+ style="fill:#000000;stroke:none">23</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g589"
transform="translate(-3285.889,-3185.889)"><g
id="id37"><rect
- class="BoundingBox"
- x="12899"
- y="7399"
- width="2403"
- height="1403"
- id="rect592"
- style="fill:none;stroke:none" /><path
- d="m 14100,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path594"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text596"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan598"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="13749"
- y="8321"
- id="tspan600"><tspan
- id="tspan602"
- style="fill:#000000;stroke:none">22</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="12899"
+ y="7399"
+ width="2403"
+ height="1403"
+ id="rect592"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,8800 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path594"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text596"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan598"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="13749"
+ y="8321"
+ id="tspan600"><tspan
+ id="tspan602"
+ style="fill:#000000;stroke:none">22</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g604"
transform="translate(-3285.889,-3185.889)"><g
id="id38"><rect
- class="BoundingBox"
- x="3299"
- y="8799"
- width="2403"
- height="1403"
- id="rect607"
- style="fill:none;stroke:none" /><path
- d="m 4500,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path609"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text611"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan613"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="4149"
- y="9721"
- id="tspan615"><tspan
- id="tspan617"
- style="fill:#000000;stroke:none">24</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="3299"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect607"
+ style="fill:none;stroke:none" /><path
+ d="m 4500,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path609"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text611"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan613"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="4149"
+ y="9721"
+ id="tspan615"><tspan
+ id="tspan617"
+ style="fill:#000000;stroke:none">24</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g619"
transform="translate(-3285.889,-3185.889)"><g
id="id39"><rect
- class="BoundingBox"
- x="5699"
- y="8799"
- width="2403"
- height="1403"
- id="rect622"
- style="fill:none;stroke:none" /><path
- d="m 6900,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path624"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /></g></g><g
+ class="BoundingBox"
+ x="5699"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect622"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path624"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
class="com.sun.star.drawing.CustomShape"
id="g626"
transform="translate(-3285.889,-3185.889)"><g
id="id40"><rect
- class="BoundingBox"
- x="8099"
- y="8799"
- width="2403"
- height="1403"
- id="rect629"
- style="fill:none;stroke:none" /><path
- d="m 9300,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path631"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text633"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan635"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="8949"
- y="9721"
- id="tspan637"><tspan
- id="tspan639"
- style="fill:#000000;stroke:none">26</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="8099"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect629"
+ style="fill:none;stroke:none" /><path
+ d="m 9300,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path631"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text633"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan635"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="8949"
+ y="9721"
+ id="tspan637"><tspan
+ id="tspan639"
+ style="fill:#000000;stroke:none">26</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g641"
transform="translate(-3285.889,-3185.889)"><g
id="id41"><rect
- class="BoundingBox"
- x="5699"
- y="8799"
- width="2403"
- height="1403"
- id="rect644"
- style="fill:none;stroke:none" /><path
- d="m 6900,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path646"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text648"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan650"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="6549"
- y="9721"
- id="tspan652"><tspan
- id="tspan654"
- style="fill:#000000;stroke:none">25</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="5699"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect644"
+ style="fill:none;stroke:none" /><path
+ d="m 6900,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path646"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text648"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan650"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="6549"
+ y="9721"
+ id="tspan652"><tspan
+ id="tspan654"
+ style="fill:#000000;stroke:none">25</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g656"
transform="translate(-3285.889,-3185.889)"><g
id="id42"><rect
- class="BoundingBox"
- x="10499"
- y="8799"
- width="2403"
- height="1403"
- id="rect659"
- style="fill:none;stroke:none" /><path
- d="m 11700,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path661"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text663"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan665"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="11349"
- y="9721"
- id="tspan667"><tspan
- id="tspan669"
- style="fill:#000000;stroke:none">27</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="10499"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect659"
+ style="fill:none;stroke:none" /><path
+ d="m 11700,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path661"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text663"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan665"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="11349"
+ y="9721"
+ id="tspan667"><tspan
+ id="tspan669"
+ style="fill:#000000;stroke:none">27</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g671"
transform="translate(-3285.889,-3185.889)"><g
id="id43"><rect
- class="BoundingBox"
- x="12899"
- y="8799"
- width="2403"
- height="1403"
- id="rect674"
- style="fill:none;stroke:none" /><path
- d="m 14100,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path676"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /></g></g><g
+ class="BoundingBox"
+ x="12899"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect674"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path676"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /></g></g><g
class="com.sun.star.drawing.CustomShape"
id="g678"
transform="translate(-3285.889,-3185.889)"><g
id="id44"><rect
- class="BoundingBox"
- x="15299"
- y="8799"
- width="2403"
- height="1403"
- id="rect681"
- style="fill:none;stroke:none" /><path
- d="m 16500,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path683"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text685"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan687"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="16149"
- y="9721"
- id="tspan689"><tspan
- id="tspan691"
- style="fill:#000000;stroke:none">29</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="15299"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect681"
+ style="fill:none;stroke:none" /><path
+ d="m 16500,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path683"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text685"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan687"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="16149"
+ y="9721"
+ id="tspan689"><tspan
+ id="tspan691"
+ style="fill:#000000;stroke:none">29</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.CustomShape"
id="g693"
transform="translate(-3285.889,-3185.889)"><g
id="id45"><rect
- class="BoundingBox"
- x="12899"
- y="8799"
- width="2403"
- height="1403"
- id="rect696"
- style="fill:none;stroke:none" /><path
- d="m 14100,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
- id="path698"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#3465a4" /><text
- class="TextShape"
- id="text700"><tspan
- class="TextParagraph"
- font-size="635px"
- font-weight="400"
- id="tspan702"
- style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
- class="TextPosition"
- x="13749"
- y="9721"
- id="tspan704"><tspan
- id="tspan706"
- style="fill:#000000;stroke:none">28</tspan></tspan></tspan></text>
+ class="BoundingBox"
+ x="12899"
+ y="8799"
+ width="2403"
+ height="1403"
+ id="rect696"
+ style="fill:none;stroke:none" /><path
+ d="m 14100,10200 -1200,0 0,-1400 2400,0 0,1400 -1200,0 z"
+ id="path698"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#3465a4" /><text
+ class="TextShape"
+ id="text700"><tspan
+ class="TextParagraph"
+ font-size="635px"
+ font-weight="400"
+ id="tspan702"
+ style="font-weight:400;font-size:635px;font-family:sans-serif"><tspan
+ class="TextPosition"
+ x="13749"
+ y="9721"
+ id="tspan704"><tspan
+ id="tspan706"
+ style="fill:#000000;stroke:none">28</tspan></tspan></tspan></text>
</g></g><g
class="com.sun.star.drawing.LineShape"
id="g708"
transform="translate(-3285.889,-3185.889)"><g
id="id46"><rect
- class="BoundingBox"
- x="5199"
- y="3850"
- width="1402"
- height="301"
- id="rect711"
- style="fill:none;stroke:none" /><path
- d="m 5200,4000 970,0"
- id="path713"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 6600,4000 -450,-150 0,300 450,-150 z"
- id="path715"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="5199"
+ y="3850"
+ width="1402"
+ height="301"
+ id="rect711"
+ style="fill:none;stroke:none" /><path
+ d="m 5200,4000 970,0"
+ id="path713"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 6600,4000 -450,-150 0,300 450,-150 z"
+ id="path715"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g717"
transform="translate(-3285.889,-3185.889)"><g
id="id47"><rect
- class="BoundingBox"
- x="5000"
- y="4299"
- width="1202"
- height="802"
- id="rect720"
- style="fill:none;stroke:none" /><path
- d="m 6200,4300 -842,561"
- id="path722"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 5000,5100 458,-125 -167,-249 -291,374 z"
- id="path724"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="5000"
+ y="4299"
+ width="1202"
+ height="802"
+ id="rect720"
+ style="fill:none;stroke:none" /><path
+ d="m 6200,4300 -842,561"
+ id="path722"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 5000,5100 458,-125 -167,-249 -291,374 z"
+ id="path724"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g726"
transform="translate(-3285.889,-3185.889)"><g
id="id48"><rect
- class="BoundingBox"
- x="5399"
- y="5250"
- width="1202"
- height="301"
- id="rect729"
- style="fill:none;stroke:none" /><path
- d="m 5400,5400 770,0"
- id="path731"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 6600,5400 -450,-150 0,300 450,-150 z"
- id="path733"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="5399"
+ y="5250"
+ width="1202"
+ height="301"
+ id="rect729"
+ style="fill:none;stroke:none" /><path
+ d="m 5400,5400 770,0"
+ id="path731"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 6600,5400 -450,-150 0,300 450,-150 z"
+ id="path733"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g735"
transform="translate(-3285.889,-3185.889)"><g
id="id49"><rect
- class="BoundingBox"
- x="7599"
- y="5250"
- width="1202"
- height="301"
- id="rect738"
- style="fill:none;stroke:none" /><path
- d="m 7600,5400 770,0"
- id="path740"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 8800,5400 -450,-150 0,300 450,-150 z"
- id="path742"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="7599"
+ y="5250"
+ width="1202"
+ height="301"
+ id="rect738"
+ style="fill:none;stroke:none" /><path
+ d="m 7600,5400 770,0"
+ id="path740"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 8800,5400 -450,-150 0,300 450,-150 z"
+ id="path742"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g744"
transform="translate(-3285.889,-3185.889)"><g
id="id50"><rect
- class="BoundingBox"
- x="9799"
- y="5250"
- width="1402"
- height="301"
- id="rect747"
- style="fill:none;stroke:none" /><path
- d="m 9800,5400 970,0"
- id="path749"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 11200,5400 -450,-150 0,300 450,-150 z"
- id="path751"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="9799"
+ y="5250"
+ width="1402"
+ height="301"
+ id="rect747"
+ style="fill:none;stroke:none" /><path
+ d="m 9800,5400 970,0"
+ id="path749"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 11200,5400 -450,-150 0,300 450,-150 z"
+ id="path751"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g753"
transform="translate(-3285.889,-3185.889)"><g
id="id51"><rect
- class="BoundingBox"
- x="9900"
- y="4200"
- width="1202"
- height="802"
- id="rect756"
- style="fill:none;stroke:none" /><path
- d="m 11100,5000 -842,-561"
- id="path758"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 9900,4200 291,374 167,-249 -458,-125 z"
- id="path760"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="9900"
+ y="4200"
+ width="1202"
+ height="802"
+ id="rect756"
+ style="fill:none;stroke:none" /><path
+ d="m 11100,5000 -842,-561"
+ id="path758"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 9900,4200 291,374 167,-249 -458,-125 z"
+ id="path760"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g762"
transform="translate(-3285.889,-3185.889)"><g
id="id52"><rect
- class="BoundingBox"
- x="9999"
- y="3850"
- width="1402"
- height="301"
- id="rect765"
- style="fill:none;stroke:none" /><path
- d="m 10000,4000 970,0"
- id="path767"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 11400,4000 -450,-150 0,300 450,-150 z"
- id="path769"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="9999"
+ y="3850"
+ width="1402"
+ height="301"
+ id="rect765"
+ style="fill:none;stroke:none" /><path
+ d="m 10000,4000 970,0"
+ id="path767"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 11400,4000 -450,-150 0,300 450,-150 z"
+ id="path769"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g771"
transform="translate(-3285.889,-3185.889)"><g
id="id53"><rect
- class="BoundingBox"
- x="12399"
- y="3850"
- width="1202"
- height="301"
- id="rect774"
- style="fill:none;stroke:none" /><path
- d="m 12400,4000 770,0"
- id="path776"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 13600,4000 -450,-150 0,300 450,-150 z"
- id="path778"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="12399"
+ y="3850"
+ width="1202"
+ height="301"
+ id="rect774"
+ style="fill:none;stroke:none" /><path
+ d="m 12400,4000 770,0"
+ id="path776"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 13600,4000 -450,-150 0,300 450,-150 z"
+ id="path778"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g780"
transform="translate(-3285.889,-3185.889)"><g
id="id54"><rect
- class="BoundingBox"
- x="14799"
- y="3850"
- width="1202"
- height="301"
- id="rect783"
- style="fill:none;stroke:none" /><path
- d="m 14800,4000 770,0"
- id="path785"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 16000,4000 -450,-150 0,300 450,-150 z"
- id="path787"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="14799"
+ y="3850"
+ width="1202"
+ height="301"
+ id="rect783"
+ style="fill:none;stroke:none" /><path
+ d="m 14800,4000 770,0"
+ id="path785"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 16000,4000 -450,-150 0,300 450,-150 z"
+ id="path787"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g789"
transform="translate(-3285.889,-3185.889)"><g
id="id55"><rect
- class="BoundingBox"
- x="14400"
- y="4399"
- width="1402"
- height="602"
- id="rect792"
- style="fill:none;stroke:none" /><path
- d="m 15800,4400 -1005,431"
- id="path794"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 14400,5000 473,-39 -118,-276 -355,315 z"
- id="path796"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="14400"
+ y="4399"
+ width="1402"
+ height="602"
+ id="rect792"
+ style="fill:none;stroke:none" /><path
+ d="m 15800,4400 -1005,431"
+ id="path794"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 14400,5000 473,-39 -118,-276 -355,315 z"
+ id="path796"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g798"
transform="translate(-3285.889,-3185.889)"><g
id="id56"><rect
- class="BoundingBox"
- x="14599"
- y="5250"
- width="1402"
- height="301"
- id="rect801"
- style="fill:none;stroke:none" /><path
- d="m 14600,5400 970,0"
- id="path803"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 16000,5400 -450,-150 0,300 450,-150 z"
- id="path805"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="14599"
+ y="5250"
+ width="1402"
+ height="301"
+ id="rect801"
+ style="fill:none;stroke:none" /><path
+ d="m 14600,5400 970,0"
+ id="path803"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 16000,5400 -450,-150 0,300 450,-150 z"
+ id="path805"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g807"
transform="translate(-3285.889,-3185.889)"><g
id="id57"><rect
- class="BoundingBox"
- x="5199"
- y="6550"
- width="1402"
- height="301"
- id="rect810"
- style="fill:none;stroke:none" /><path
- d="m 5200,6700 970,0"
- id="path812"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 6600,6700 -450,-150 0,300 450,-150 z"
- id="path814"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="5199"
+ y="6550"
+ width="1402"
+ height="301"
+ id="rect810"
+ style="fill:none;stroke:none" /><path
+ d="m 5200,6700 970,0"
+ id="path812"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 6600,6700 -450,-150 0,300 450,-150 z"
+ id="path814"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g816"
transform="translate(-3285.889,-3129.4446)"><g
id="id58"><rect
- class="BoundingBox"
- x="5000"
- y="6999"
- width="1202"
- height="802"
- id="rect819"
- style="fill:none;stroke:none" /><path
- d="m 6200,7000 -842,561"
- id="path821"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 5000,7800 458,-125 -167,-249 -291,374 z"
- id="path823"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="5000"
+ y="6999"
+ width="1202"
+ height="802"
+ id="rect819"
+ style="fill:none;stroke:none" /><path
+ d="m 6200,7000 -842,561"
+ id="path821"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 5000,7800 458,-125 -167,-249 -291,374 z"
+ id="path823"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g825"
transform="translate(-3285.889,-3185.889)"><g
id="id59"><rect
- class="BoundingBox"
- x="5399"
- y="7950"
- width="1202"
- height="301"
- id="rect828"
- style="fill:none;stroke:none" /><path
- d="m 5400,8100 770,0"
- id="path830"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 6600,8100 -450,-150 0,300 450,-150 z"
- id="path832"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="5399"
+ y="7950"
+ width="1202"
+ height="301"
+ id="rect828"
+ style="fill:none;stroke:none" /><path
+ d="m 5400,8100 770,0"
+ id="path830"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 6600,8100 -450,-150 0,300 450,-150 z"
+ id="path832"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g834"
transform="translate(-3285.889,-3185.889)"><g
id="id60"><rect
- class="BoundingBox"
- x="7599"
- y="7950"
- width="1202"
- height="301"
- id="rect837"
- style="fill:none;stroke:none" /><path
- d="m 7600,8100 770,0"
- id="path839"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 8800,8100 -450,-150 0,300 450,-150 z"
- id="path841"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="7599"
+ y="7950"
+ width="1202"
+ height="301"
+ id="rect837"
+ style="fill:none;stroke:none" /><path
+ d="m 7600,8100 770,0"
+ id="path839"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 8800,8100 -450,-150 0,300 450,-150 z"
+ id="path841"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g843"
transform="translate(-3285.889,-3185.889)"><g
id="id61"><rect
- class="BoundingBox"
- x="9799"
- y="7950"
- width="1402"
- height="301"
- id="rect846"
- style="fill:none;stroke:none" /><path
- d="m 9800,8100 970,0"
- id="path848"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 11200,8100 -450,-150 0,300 450,-150 z"
- id="path850"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="9799"
+ y="7950"
+ width="1402"
+ height="301"
+ id="rect846"
+ style="fill:none;stroke:none" /><path
+ d="m 9800,8100 970,0"
+ id="path848"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 11200,8100 -450,-150 0,300 450,-150 z"
+ id="path850"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g852"
transform="translate(-3285.889,-3129.4446)"><g
id="id62"><rect
- class="BoundingBox"
- x="9900"
- y="6900"
- width="1202"
- height="802"
- id="rect855"
- style="fill:none;stroke:none" /><path
- d="m 11100,7700 -842,-561"
- id="path857"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 9900,6900 291,374 167,-249 -458,-125 z"
- id="path859"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="9900"
+ y="6900"
+ width="1202"
+ height="802"
+ id="rect855"
+ style="fill:none;stroke:none" /><path
+ d="m 11100,7700 -842,-561"
+ id="path857"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 9900,6900 291,374 167,-249 -458,-125 z"
+ id="path859"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g861"
transform="translate(-3285.889,-3185.889)"><g
id="id63"><rect
- class="BoundingBox"
- x="9999"
- y="6550"
- width="1402"
- height="301"
- id="rect864"
- style="fill:none;stroke:none" /><path
- d="m 10000,6700 970,0"
- id="path866"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 11400,6700 -450,-150 0,300 450,-150 z"
- id="path868"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="9999"
+ y="6550"
+ width="1402"
+ height="301"
+ id="rect864"
+ style="fill:none;stroke:none" /><path
+ d="m 10000,6700 970,0"
+ id="path866"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 11400,6700 -450,-150 0,300 450,-150 z"
+ id="path868"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g870"
transform="translate(-3285.889,-3185.889)"><g
id="id64"><rect
- class="BoundingBox"
- x="12399"
- y="6550"
- width="1202"
- height="301"
- id="rect873"
- style="fill:none;stroke:none" /><path
- d="m 12400,6700 770,0"
- id="path875"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 13600,6700 -450,-150 0,300 450,-150 z"
- id="path877"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="12399"
+ y="6550"
+ width="1202"
+ height="301"
+ id="rect873"
+ style="fill:none;stroke:none" /><path
+ d="m 12400,6700 770,0"
+ id="path875"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 13600,6700 -450,-150 0,300 450,-150 z"
+ id="path877"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g879"
transform="translate(-3285.889,-3185.889)"><g
id="id65"><rect
- class="BoundingBox"
- x="14799"
- y="6550"
- width="1202"
- height="301"
- id="rect882"
- style="fill:none;stroke:none" /><path
- d="m 14800,6700 770,0"
- id="path884"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 16000,6700 -450,-150 0,300 450,-150 z"
- id="path886"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="14799"
+ y="6550"
+ width="1202"
+ height="301"
+ id="rect882"
+ style="fill:none;stroke:none" /><path
+ d="m 14800,6700 770,0"
+ id="path884"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 16000,6700 -450,-150 0,300 450,-150 z"
+ id="path886"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g888"
transform="translate(-3285.889,-3129.4446)"><g
id="id66"><rect
- class="BoundingBox"
- x="14400"
- y="7099"
- width="1402"
- height="602"
- id="rect891"
- style="fill:none;stroke:none" /><path
- d="m 15800,7100 -1005,431"
- id="path893"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 14400,7700 473,-39 -118,-276 -355,315 z"
- id="path895"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="14400"
+ y="7099"
+ width="1402"
+ height="602"
+ id="rect891"
+ style="fill:none;stroke:none" /><path
+ d="m 15800,7100 -1005,431"
+ id="path893"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 14400,7700 473,-39 -118,-276 -355,315 z"
+ id="path895"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g897"
transform="translate(-3285.889,-3185.889)"><g
id="id67"><rect
- class="BoundingBox"
- x="14599"
- y="7950"
- width="1402"
- height="301"
- id="rect900"
- style="fill:none;stroke:none" /><path
- d="m 14600,8100 970,0"
- id="path902"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 16000,8100 -450,-150 0,300 450,-150 z"
- id="path904"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="14599"
+ y="7950"
+ width="1402"
+ height="301"
+ id="rect900"
+ style="fill:none;stroke:none" /><path
+ d="m 14600,8100 970,0"
+ id="path902"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 16000,8100 -450,-150 0,300 450,-150 z"
+ id="path904"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g906"
transform="translate(-3285.889,-3185.889)"><g
id="id68"><rect
- class="BoundingBox"
- x="5399"
- y="9450"
- width="1202"
- height="301"
- id="rect909"
- style="fill:none;stroke:none" /><path
- d="m 5400,9600 770,0"
- id="path911"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 6600,9600 -450,-150 0,300 450,-150 z"
- id="path913"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="5399"
+ y="9450"
+ width="1202"
+ height="301"
+ id="rect909"
+ style="fill:none;stroke:none" /><path
+ d="m 5400,9600 770,0"
+ id="path911"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 6600,9600 -450,-150 0,300 450,-150 z"
+ id="path913"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g915"
transform="translate(-3285.889,-3185.889)"><g
id="id69"><rect
- class="BoundingBox"
- x="7599"
- y="9450"
- width="1202"
- height="301"
- id="rect918"
- style="fill:none;stroke:none" /><path
- d="m 7600,9600 770,0"
- id="path920"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 8800,9600 -450,-150 0,300 450,-150 z"
- id="path922"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="7599"
+ y="9450"
+ width="1202"
+ height="301"
+ id="rect918"
+ style="fill:none;stroke:none" /><path
+ d="m 7600,9600 770,0"
+ id="path920"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 8800,9600 -450,-150 0,300 450,-150 z"
+ id="path922"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g924"
transform="translate(-3285.889,-3185.889)"><g
id="id70"><rect
- class="BoundingBox"
- x="9999"
- y="9450"
- width="1202"
- height="301"
- id="rect927"
- style="fill:none;stroke:none" /><path
- d="m 10000,9600 770,0"
- id="path929"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 11200,9600 -450,-150 0,300 450,-150 z"
- id="path931"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="9999"
+ y="9450"
+ width="1202"
+ height="301"
+ id="rect927"
+ style="fill:none;stroke:none" /><path
+ d="m 10000,9600 770,0"
+ id="path929"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 11200,9600 -450,-150 0,300 450,-150 z"
+ id="path931"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g933"
transform="translate(-3285.889,-3185.889)"><g
id="id71"><rect
- class="BoundingBox"
- x="12399"
- y="9450"
- width="1202"
- height="301"
- id="rect936"
- style="fill:none;stroke:none" /><path
- d="m 12400,9600 770,0"
- id="path938"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 13600,9600 -450,-150 0,300 450,-150 z"
- id="path940"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g><g
+ class="BoundingBox"
+ x="12399"
+ y="9450"
+ width="1202"
+ height="301"
+ id="rect936"
+ style="fill:none;stroke:none" /><path
+ d="m 12400,9600 770,0"
+ id="path938"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 13600,9600 -450,-150 0,300 450,-150 z"
+ id="path940"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g><g
class="com.sun.star.drawing.LineShape"
id="g942"
transform="translate(-3285.889,-3185.889)"><g
id="id72"><rect
- class="BoundingBox"
- x="14799"
- y="9450"
- width="1202"
- height="301"
- id="rect945"
- style="fill:none;stroke:none" /><path
- d="m 14800,9600 770,0"
- id="path947"
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#ff3333" /><path
- d="m 16000,9600 -450,-150 0,300 450,-150 z"
- id="path949"
- inkscape:connector-curvature="0"
- style="fill:#ff3333;stroke:none" /></g></g></svg>
+ class="BoundingBox"
+ x="14799"
+ y="9450"
+ width="1202"
+ height="301"
+ id="rect945"
+ style="fill:none;stroke:none" /><path
+ d="m 14800,9600 770,0"
+ id="path947"
+ inkscape:connector-curvature="0"
+ style="fill:none;stroke:#ff3333" /><path
+ d="m 16000,9600 -450,-150 0,300 450,-150 z"
+ id="path949"
+ inkscape:connector-curvature="0"
+ style="fill:#ff3333;stroke:none" /></g></g></svg>
diff --git a/Documentation/media/uapi/v4l/pixfmt-008.rst b/Documentation/media/uapi/v4l/pixfmt-008.rst
deleted file mode 100644
index 4bec79784bdd..000000000000
--- a/Documentation/media/uapi/v4l/pixfmt-008.rst
+++ /dev/null
@@ -1,32 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-***************************************
-Detailed Transfer Function Descriptions
-***************************************
-
-
-.. _xf-smpte-2084:
-
-Transfer Function SMPTE 2084 (V4L2_XFER_FUNC_SMPTE2084)
-=======================================================
-
-The :ref:`smpte2084` standard defines the transfer function used by
-High Dynamic Range content.
-
-Constants:
- m1 = (2610 / 4096) / 4
-
- m2 = (2523 / 4096) * 128
-
- c1 = 3424 / 4096
-
- c2 = (2413 / 4096) * 32
-
- c3 = (2392 / 4096) * 32
-
-Transfer function:
- L' = ((c1 + c2 * L\ :sup:`m1`) / (1 + c3 * L\ :sup:`m1`))\ :sup:`m2`
-
-Inverse Transfer function:
- L = (max(L':sup:`1/m2` - c1, 0) / (c2 - c3 *
- L'\ :sup:`1/m2`))\ :sup:`1/m1`
diff --git a/Documentation/media/uapi/v4l/pixfmt-013.rst b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
index 728d7ede10fa..728d7ede10fa 100644
--- a/Documentation/media/uapi/v4l/pixfmt-013.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
diff --git a/Documentation/media/uapi/v4l/pixfmt-004.rst b/Documentation/media/uapi/v4l/pixfmt-intro.rst
index 4bc116aa8193..4bc116aa8193 100644
--- a/Documentation/media/uapi/v4l/pixfmt-004.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-intro.rst
diff --git a/Documentation/media/uapi/v4l/pixfmt-inzi.rst b/Documentation/media/uapi/v4l/pixfmt-inzi.rst
index 9849e799f205..75272f80bc8a 100644
--- a/Documentation/media/uapi/v4l/pixfmt-inzi.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-inzi.rst
@@ -34,11 +34,12 @@ The second plane provides 16-bit per-pixel Depth data arranged in
Each cell is a 16-bit word with more significant data stored at higher
memory address (byte order is little-endian).
+
.. raw:: latex
- \newline\newline\begin{adjustbox}{width=\columnwidth}
+ \small
-.. tabularcolumns:: |p{4.0cm}|p{4.0cm}|p{4.0cm}|p{4.0cm}|p{4.0cm}|p{4.0cm}|
+.. tabularcolumns:: |p{2.5cm}|p{2.5cm}|p{2.5cm}|p{2.5cm}|p{2.5cm}|p{2.5cm}|
.. flat-table::
:header-rows: 0
@@ -78,4 +79,4 @@ memory address (byte order is little-endian).
.. raw:: latex
- \end{adjustbox}\newline\newline
+ \normalsize
diff --git a/Documentation/media/uapi/v4l/pixfmt-m420.rst b/Documentation/media/uapi/v4l/pixfmt-m420.rst
index 7dd47c071e2f..6703f4079c3e 100644
--- a/Documentation/media/uapi/v4l/pixfmt-m420.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-m420.rst
@@ -66,7 +66,7 @@ Each cell is one byte.
- Cr\ :sub:`11`
-**Color Sample Location..**
+**Color Sample Location:**
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12.rst b/Documentation/media/uapi/v4l/pixfmt-nv12.rst
index 5b45a6d2ac95..2776b41377d5 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv12.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv12.rst
@@ -71,7 +71,7 @@ Each cell is one byte.
- Cr\ :sub:`11`
-**Color Sample Location..**
+**Color Sample Location:**
.. flat-table::
:header-rows: 0
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12m.rst b/Documentation/media/uapi/v4l/pixfmt-nv12m.rst
index de3051fd6b50..c1a2779f604c 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv12m.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv12m.rst
@@ -83,7 +83,7 @@ Each cell is one byte.
- Cr\ :sub:`11`
-**Color Sample Location..**
+**Color Sample Location:**
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv16.rst b/Documentation/media/uapi/v4l/pixfmt-nv16.rst
index 8ceba79ff636..f0fdad3006cf 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv16.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv16.rst
@@ -79,7 +79,7 @@ Each cell is one byte.
- Cr\ :sub:`31`
-**Color Sample Location..**
+**Color Sample Location:**
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv16m.rst b/Documentation/media/uapi/v4l/pixfmt-nv16m.rst
index 4d46ab39f9f1..c45f036763e7 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv16m.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv16m.rst
@@ -83,7 +83,7 @@ Each cell is one byte.
- Cr\ :sub:`32`
-**Color Sample Location..**
+**Color Sample Location:**
diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst b/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst
index 3fdb34ce2f09..8edf65c80660 100644
--- a/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst
@@ -17,11 +17,14 @@ cylinder: 0 being the smallest value and 255 the maximum.
The values are packed in 24 or 32 bit formats.
+
.. raw:: latex
- \newline\begin{adjustbox}{width=\columnwidth}
+ \begingroup
+ \tiny
+ \setlength{\tabcolsep}{2pt}
-.. tabularcolumns:: |p{4.2cm}|p{1.0cm}|p{0.7cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{1.7cm}|
+.. tabularcolumns:: |p{2.0cm}|p{0.54cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
.. _packed-hsv-formats:
@@ -33,11 +36,8 @@ The values are packed in 24 or 32 bit formats.
- Code
-
- :cspan:`7` Byte 0 in memory
- -
- :cspan:`7` Byte 1
- -
- :cspan:`7` Byte 2
- -
- :cspan:`7` Byte 3
* -
-
@@ -50,7 +50,7 @@ The values are packed in 24 or 32 bit formats.
- 2
- 1
- 0
- -
+
- 7
- 6
- 5
@@ -59,7 +59,7 @@ The values are packed in 24 or 32 bit formats.
- 2
- 1
- 0
- -
+
- 7
- 6
- 5
@@ -68,7 +68,7 @@ The values are packed in 24 or 32 bit formats.
- 2
- 1
- 0
- -
+
- 7
- 6
- 5
@@ -90,7 +90,7 @@ The values are packed in 24 or 32 bit formats.
-
-
-
- -
+
- h\ :sub:`7`
- h\ :sub:`6`
- h\ :sub:`5`
@@ -99,7 +99,7 @@ The values are packed in 24 or 32 bit formats.
- h\ :sub:`2`
- h\ :sub:`1`
- h\ :sub:`0`
- -
+
- s\ :sub:`7`
- s\ :sub:`6`
- s\ :sub:`5`
@@ -108,7 +108,7 @@ The values are packed in 24 or 32 bit formats.
- s\ :sub:`2`
- s\ :sub:`1`
- s\ :sub:`0`
- -
+
- v\ :sub:`7`
- v\ :sub:`6`
- v\ :sub:`5`
@@ -130,7 +130,7 @@ The values are packed in 24 or 32 bit formats.
- h\ :sub:`2`
- h\ :sub:`1`
- h\ :sub:`0`
- -
+
- s\ :sub:`7`
- s\ :sub:`6`
- s\ :sub:`5`
@@ -139,7 +139,7 @@ The values are packed in 24 or 32 bit formats.
- s\ :sub:`2`
- s\ :sub:`1`
- s\ :sub:`0`
- -
+
- v\ :sub:`7`
- v\ :sub:`6`
- v\ :sub:`5`
@@ -149,9 +149,9 @@ The values are packed in 24 or 32 bit formats.
- v\ :sub:`1`
- v\ :sub:`0`
-
- -
+
.. raw:: latex
- \end{adjustbox}\newline\newline
+ \endgroup
Bit 7 is the most significant bit.
diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst b/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst
index 84fcbcb74171..4938d9655a41 100644
--- a/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst
@@ -16,9 +16,12 @@ next to each other in memory.
.. raw:: latex
- \begin{adjustbox}{width=\columnwidth}
+ \begingroup
+ \tiny
+ \setlength{\tabcolsep}{2pt}
+
+.. tabularcolumns:: |p{2.3cm}|p{1.6cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
-.. tabularcolumns:: |p{4.5cm}|p{3.3cm}|p{0.7cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{1.7cm}|
.. _rgb-formats:
@@ -28,17 +31,12 @@ next to each other in memory.
* - Identifier
- Code
- -
- :cspan:`7` Byte 0 in memory
- -
- :cspan:`7` Byte 1
- -
- :cspan:`7` Byte 2
- -
- :cspan:`7` Byte 3
* -
-
- - Bit
- 7
- 6
- 5
@@ -47,7 +45,7 @@ next to each other in memory.
- 2
- 1
- 0
- -
+
- 7
- 6
- 5
@@ -56,7 +54,7 @@ next to each other in memory.
- 2
- 1
- 0
- -
+
- 7
- 6
- 5
@@ -65,7 +63,7 @@ next to each other in memory.
- 2
- 1
- 0
- -
+
- 7
- 6
- 5
@@ -78,7 +76,7 @@ next to each other in memory.
- ``V4L2_PIX_FMT_RGB332``
- 'RGB1'
- -
+
- r\ :sub:`2`
- r\ :sub:`1`
- r\ :sub:`0`
@@ -87,11 +85,12 @@ next to each other in memory.
- g\ :sub:`0`
- b\ :sub:`1`
- b\ :sub:`0`
+ -
* .. _V4L2-PIX-FMT-ARGB444:
- ``V4L2_PIX_FMT_ARGB444``
- 'AR12'
- -
+
- g\ :sub:`3`
- g\ :sub:`2`
- g\ :sub:`1`
@@ -100,7 +99,7 @@ next to each other in memory.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
- -
+
- a\ :sub:`3`
- a\ :sub:`2`
- a\ :sub:`1`
@@ -109,11 +108,12 @@ next to each other in memory.
- r\ :sub:`2`
- r\ :sub:`1`
- r\ :sub:`0`
+ -
* .. _V4L2-PIX-FMT-XRGB444:
- ``V4L2_PIX_FMT_XRGB444``
- 'XR12'
- -
+
- g\ :sub:`3`
- g\ :sub:`2`
- g\ :sub:`1`
@@ -122,7 +122,7 @@ next to each other in memory.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
- -
+
-
-
-
@@ -131,11 +131,12 @@ next to each other in memory.
- r\ :sub:`2`
- r\ :sub:`1`
- r\ :sub:`0`
+ -
* .. _V4L2-PIX-FMT-ARGB555:
- ``V4L2_PIX_FMT_ARGB555``
- 'AR15'
- -
+
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
@@ -144,7 +145,7 @@ next to each other in memory.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
- -
+
- a
- r\ :sub:`4`
- r\ :sub:`3`
@@ -153,11 +154,12 @@ next to each other in memory.
- r\ :sub:`0`
- g\ :sub:`4`
- g\ :sub:`3`
+ -
* .. _V4L2-PIX-FMT-XRGB555:
- ``V4L2_PIX_FMT_XRGB555``
- 'XR15'
- -
+
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
@@ -166,7 +168,7 @@ next to each other in memory.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
- -
+
-
- r\ :sub:`4`
- r\ :sub:`3`
@@ -175,11 +177,12 @@ next to each other in memory.
- r\ :sub:`0`
- g\ :sub:`4`
- g\ :sub:`3`
+ -
* .. _V4L2-PIX-FMT-RGB565:
- ``V4L2_PIX_FMT_RGB565``
- 'RGBP'
- -
+
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
@@ -188,7 +191,7 @@ next to each other in memory.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
- -
+
- r\ :sub:`4`
- r\ :sub:`3`
- r\ :sub:`2`
@@ -197,11 +200,12 @@ next to each other in memory.
- g\ :sub:`5`
- g\ :sub:`4`
- g\ :sub:`3`
+ -
* .. _V4L2-PIX-FMT-ARGB555X:
- ``V4L2_PIX_FMT_ARGB555X``
- 'AR15' | (1 << 31)
- -
+
- a
- r\ :sub:`4`
- r\ :sub:`3`
@@ -210,7 +214,7 @@ next to each other in memory.
- r\ :sub:`0`
- g\ :sub:`4`
- g\ :sub:`3`
- -
+
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
@@ -219,11 +223,12 @@ next to each other in memory.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ -
* .. _V4L2-PIX-FMT-XRGB555X:
- ``V4L2_PIX_FMT_XRGB555X``
- 'XR15' | (1 << 31)
- -
+
-
- r\ :sub:`4`
- r\ :sub:`3`
@@ -232,7 +237,7 @@ next to each other in memory.
- r\ :sub:`0`
- g\ :sub:`4`
- g\ :sub:`3`
- -
+
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
@@ -241,11 +246,12 @@ next to each other in memory.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ -
* .. _V4L2-PIX-FMT-RGB565X:
- ``V4L2_PIX_FMT_RGB565X``
- 'RGBR'
- -
+
- r\ :sub:`4`
- r\ :sub:`3`
- r\ :sub:`2`
@@ -254,7 +260,7 @@ next to each other in memory.
- g\ :sub:`5`
- g\ :sub:`4`
- g\ :sub:`3`
- -
+
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
@@ -263,11 +269,12 @@ next to each other in memory.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ -
* .. _V4L2-PIX-FMT-BGR24:
- ``V4L2_PIX_FMT_BGR24``
- 'BGR3'
- -
+
- b\ :sub:`7`
- b\ :sub:`6`
- b\ :sub:`5`
@@ -276,7 +283,7 @@ next to each other in memory.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
- -
+
- g\ :sub:`7`
- g\ :sub:`6`
- g\ :sub:`5`
@@ -285,7 +292,7 @@ next to each other in memory.
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
- -
+
- r\ :sub:`7`
- r\ :sub:`6`
- r\ :sub:`5`
@@ -294,11 +301,12 @@ next to each other in memory.
- r\ :sub:`2`
- r\ :sub:`1`
- r\ :sub:`0`
+ -
* .. _V4L2-PIX-FMT-RGB24:
- ``V4L2_PIX_FMT_RGB24``
- 'RGB3'
- -
+
- r\ :sub:`7`
- r\ :sub:`6`
- r\ :sub:`5`
@@ -307,7 +315,7 @@ next to each other in memory.
- r\ :sub:`2`
- r\ :sub:`1`
- r\ :sub:`0`
- -
+
- g\ :sub:`7`
- g\ :sub:`6`
- g\ :sub:`5`
@@ -316,7 +324,7 @@ next to each other in memory.
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
- -
+
- b\ :sub:`7`
- b\ :sub:`6`
- b\ :sub:`5`
@@ -325,11 +333,12 @@ next to each other in memory.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ -
* .. _V4L2-PIX-FMT-BGR666:
- ``V4L2_PIX_FMT_BGR666``
- 'BGRH'
- -
+
- b\ :sub:`5`
- b\ :sub:`4`
- b\ :sub:`3`
@@ -338,7 +347,7 @@ next to each other in memory.
- b\ :sub:`0`
- g\ :sub:`5`
- g\ :sub:`4`
- -
+
- g\ :sub:`3`
- g\ :sub:`2`
- g\ :sub:`1`
@@ -347,7 +356,7 @@ next to each other in memory.
- r\ :sub:`4`
- r\ :sub:`3`
- r\ :sub:`2`
- -
+
- r\ :sub:`1`
- r\ :sub:`0`
-
@@ -356,7 +365,7 @@ next to each other in memory.
-
-
-
- -
+
-
-
-
@@ -369,7 +378,7 @@ next to each other in memory.
- ``V4L2_PIX_FMT_ABGR32``
- 'AR24'
- -
+
- b\ :sub:`7`
- b\ :sub:`6`
- b\ :sub:`5`
@@ -378,7 +387,7 @@ next to each other in memory.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
- -
+
- g\ :sub:`7`
- g\ :sub:`6`
- g\ :sub:`5`
@@ -387,7 +396,7 @@ next to each other in memory.
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
- -
+
- r\ :sub:`7`
- r\ :sub:`6`
- r\ :sub:`5`
@@ -396,7 +405,7 @@ next to each other in memory.
- r\ :sub:`2`
- r\ :sub:`1`
- r\ :sub:`0`
- -
+
- a\ :sub:`7`
- a\ :sub:`6`
- a\ :sub:`5`
@@ -409,7 +418,7 @@ next to each other in memory.
- ``V4L2_PIX_FMT_XBGR32``
- 'XR24'
- -
+
- b\ :sub:`7`
- b\ :sub:`6`
- b\ :sub:`5`
@@ -418,7 +427,7 @@ next to each other in memory.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
- -
+
- g\ :sub:`7`
- g\ :sub:`6`
- g\ :sub:`5`
@@ -427,7 +436,7 @@ next to each other in memory.
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
- -
+
- r\ :sub:`7`
- r\ :sub:`6`
- r\ :sub:`5`
@@ -436,7 +445,7 @@ next to each other in memory.
- r\ :sub:`2`
- r\ :sub:`1`
- r\ :sub:`0`
- -
+
-
-
-
@@ -449,7 +458,7 @@ next to each other in memory.
- ``V4L2_PIX_FMT_ARGB32``
- 'BA24'
- -
+
- a\ :sub:`7`
- a\ :sub:`6`
- a\ :sub:`5`
@@ -458,7 +467,7 @@ next to each other in memory.
- a\ :sub:`2`
- a\ :sub:`1`
- a\ :sub:`0`
- -
+
- r\ :sub:`7`
- r\ :sub:`6`
- r\ :sub:`5`
@@ -467,7 +476,7 @@ next to each other in memory.
- r\ :sub:`2`
- r\ :sub:`1`
- r\ :sub:`0`
- -
+
- g\ :sub:`7`
- g\ :sub:`6`
- g\ :sub:`5`
@@ -476,7 +485,7 @@ next to each other in memory.
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
- -
+
- b\ :sub:`7`
- b\ :sub:`6`
- b\ :sub:`5`
@@ -489,6 +498,7 @@ next to each other in memory.
- ``V4L2_PIX_FMT_XRGB32``
- 'BX24'
+
-
-
-
@@ -497,8 +507,7 @@ next to each other in memory.
-
-
-
- -
- -
+
- r\ :sub:`7`
- r\ :sub:`6`
- r\ :sub:`5`
@@ -507,7 +516,7 @@ next to each other in memory.
- r\ :sub:`2`
- r\ :sub:`1`
- r\ :sub:`0`
- -
+
- g\ :sub:`7`
- g\ :sub:`6`
- g\ :sub:`5`
@@ -516,7 +525,7 @@ next to each other in memory.
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
- -
+
- b\ :sub:`7`
- b\ :sub:`6`
- b\ :sub:`5`
@@ -528,7 +537,7 @@ next to each other in memory.
.. raw:: latex
- \end{adjustbox}\newline\newline
+ \endgroup
.. note:: Bit 7 is the most significant bit.
@@ -562,9 +571,9 @@ Each cell is one byte.
.. raw:: latex
- \newline\newline\begin{adjustbox}{width=\columnwidth}
+ \small
-.. tabularcolumns:: |p{4.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.1cm}|p{1.3cm}|
+.. tabularcolumns:: |p{3.1cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|
.. flat-table:: RGB byte order
:header-rows: 0
@@ -626,19 +635,21 @@ Each cell is one byte.
.. raw:: latex
- \end{adjustbox}\newline\newline
+ \normalsize
Formats defined in :ref:`rgb-formats-deprecated` are deprecated and
must not be used by new drivers. They are documented here for reference.
-The meaning of their alpha bits (a) is ill-defined and interpreted as in
+The meaning of their alpha bits ``(a)`` are ill-defined and interpreted as in
either the corresponding ARGB or XRGB format, depending on the driver.
.. raw:: latex
- \begin{adjustbox}{width=\columnwidth}
+ \begingroup
+ \tiny
+ \setlength{\tabcolsep}{2pt}
-.. tabularcolumns:: |p{4.2cm}|p{1.0cm}|p{0.7cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{1.7cm}|
+.. tabularcolumns:: |p{2.2cm}|p{0.60cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
.. _rgb-formats-deprecated:
@@ -648,17 +659,15 @@ either the corresponding ARGB or XRGB format, depending on the driver.
* - Identifier
- Code
- -
- :cspan:`7` Byte 0 in memory
- -
+
- :cspan:`7` Byte 1
- -
+
- :cspan:`7` Byte 2
- -
+
- :cspan:`7` Byte 3
* -
-
- - Bit
- 7
- 6
- 5
@@ -667,7 +676,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- 2
- 1
- 0
- -
+
- 7
- 6
- 5
@@ -676,7 +685,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- 2
- 1
- 0
- -
+
- 7
- 6
- 5
@@ -685,7 +694,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- 2
- 1
- 0
- -
+
- 7
- 6
- 5
@@ -698,7 +707,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- ``V4L2_PIX_FMT_RGB444``
- 'R444'
- -
+
- g\ :sub:`3`
- g\ :sub:`2`
- g\ :sub:`1`
@@ -707,7 +716,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
- -
+
- a\ :sub:`3`
- a\ :sub:`2`
- a\ :sub:`1`
@@ -716,11 +725,12 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- r\ :sub:`2`
- r\ :sub:`1`
- r\ :sub:`0`
+ -
* .. _V4L2-PIX-FMT-RGB555:
- ``V4L2_PIX_FMT_RGB555``
- 'RGBO'
- -
+
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
@@ -729,7 +739,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
- -
+
- a
- r\ :sub:`4`
- r\ :sub:`3`
@@ -738,11 +748,12 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- r\ :sub:`0`
- g\ :sub:`4`
- g\ :sub:`3`
+ -
* .. _V4L2-PIX-FMT-RGB555X:
- ``V4L2_PIX_FMT_RGB555X``
- 'RGBQ'
- -
+
- a
- r\ :sub:`4`
- r\ :sub:`3`
@@ -751,7 +762,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- r\ :sub:`0`
- g\ :sub:`4`
- g\ :sub:`3`
- -
+
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
@@ -760,11 +771,12 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
+ -
* .. _V4L2-PIX-FMT-BGR32:
- ``V4L2_PIX_FMT_BGR32``
- 'BGR4'
- -
+
- b\ :sub:`7`
- b\ :sub:`6`
- b\ :sub:`5`
@@ -773,7 +785,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- b\ :sub:`2`
- b\ :sub:`1`
- b\ :sub:`0`
- -
+
- g\ :sub:`7`
- g\ :sub:`6`
- g\ :sub:`5`
@@ -782,7 +794,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
- -
+
- r\ :sub:`7`
- r\ :sub:`6`
- r\ :sub:`5`
@@ -791,7 +803,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- r\ :sub:`2`
- r\ :sub:`1`
- r\ :sub:`0`
- -
+
- a\ :sub:`7`
- a\ :sub:`6`
- a\ :sub:`5`
@@ -804,7 +816,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- ``V4L2_PIX_FMT_RGB32``
- 'RGB4'
- -
+
- a\ :sub:`7`
- a\ :sub:`6`
- a\ :sub:`5`
@@ -813,7 +825,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- a\ :sub:`2`
- a\ :sub:`1`
- a\ :sub:`0`
- -
+
- r\ :sub:`7`
- r\ :sub:`6`
- r\ :sub:`5`
@@ -822,7 +834,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- r\ :sub:`2`
- r\ :sub:`1`
- r\ :sub:`0`
- -
+
- g\ :sub:`7`
- g\ :sub:`6`
- g\ :sub:`5`
@@ -831,7 +843,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
- g\ :sub:`2`
- g\ :sub:`1`
- g\ :sub:`0`
- -
+
- b\ :sub:`7`
- b\ :sub:`6`
- b\ :sub:`5`
@@ -843,7 +855,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
.. raw:: latex
- \end{adjustbox}\newline\newline
+ \endgroup
A test utility to determine which RGB formats a driver actually supports
is available from the LinuxTV v4l-dvb repository. See
diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst b/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst
index ebc8fcc937ad..d7644b411ccc 100644
--- a/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst
@@ -12,13 +12,16 @@ Description
Similar to the packed RGB formats these formats store the Y, Cb and Cr
component of each pixel in one 16 or 32 bit word.
+
.. raw:: latex
- \newline\newline\begin{adjustbox}{width=\columnwidth}
+ \begingroup
+ \tiny
+ \setlength{\tabcolsep}{2pt}
.. _packed-yuv-formats:
-.. tabularcolumns:: |p{4.5cm}|p{3.3cm}|p{0.7cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{1.7cm}|
+.. tabularcolumns:: |p{2.0cm}|p{0.67cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|
.. flat-table:: Packed YUV Image Formats
:header-rows: 2
@@ -26,17 +29,16 @@ component of each pixel in one 16 or 32 bit word.
* - Identifier
- Code
- -
+
- :cspan:`7` Byte 0 in memory
- -
+
- :cspan:`7` Byte 1
- -
+
- :cspan:`7` Byte 2
- -
+
- :cspan:`7` Byte 3
* -
-
- - Bit
- 7
- 6
- 5
@@ -45,7 +47,7 @@ component of each pixel in one 16 or 32 bit word.
- 2
- 1
- 0
- -
+
- 7
- 6
- 5
@@ -54,7 +56,7 @@ component of each pixel in one 16 or 32 bit word.
- 2
- 1
- 0
- -
+
- 7
- 6
- 5
@@ -63,7 +65,7 @@ component of each pixel in one 16 or 32 bit word.
- 2
- 1
- 0
- -
+
- 7
- 6
- 5
@@ -76,7 +78,7 @@ component of each pixel in one 16 or 32 bit word.
- ``V4L2_PIX_FMT_YUV444``
- 'Y444'
- -
+
- Cb\ :sub:`3`
- Cb\ :sub:`2`
- Cb\ :sub:`1`
@@ -85,7 +87,7 @@ component of each pixel in one 16 or 32 bit word.
- Cr\ :sub:`2`
- Cr\ :sub:`1`
- Cr\ :sub:`0`
- -
+
- a\ :sub:`3`
- a\ :sub:`2`
- a\ :sub:`1`
@@ -94,11 +96,12 @@ component of each pixel in one 16 or 32 bit word.
- Y'\ :sub:`2`
- Y'\ :sub:`1`
- Y'\ :sub:`0`
+ -
* .. _V4L2-PIX-FMT-YUV555:
- ``V4L2_PIX_FMT_YUV555``
- 'YUVO'
- -
+
- Cb\ :sub:`2`
- Cb\ :sub:`1`
- Cb\ :sub:`0`
@@ -107,7 +110,7 @@ component of each pixel in one 16 or 32 bit word.
- Cr\ :sub:`2`
- Cr\ :sub:`1`
- Cr\ :sub:`0`
- -
+
- a
- Y'\ :sub:`4`
- Y'\ :sub:`3`
@@ -116,11 +119,12 @@ component of each pixel in one 16 or 32 bit word.
- Y'\ :sub:`0`
- Cb\ :sub:`4`
- Cb\ :sub:`3`
+ -
* .. _V4L2-PIX-FMT-YUV565:
- ``V4L2_PIX_FMT_YUV565``
- 'YUVP'
- -
+
- Cb\ :sub:`2`
- Cb\ :sub:`1`
- Cb\ :sub:`0`
@@ -129,7 +133,7 @@ component of each pixel in one 16 or 32 bit word.
- Cr\ :sub:`2`
- Cr\ :sub:`1`
- Cr\ :sub:`0`
- -
+
- Y'\ :sub:`4`
- Y'\ :sub:`3`
- Y'\ :sub:`2`
@@ -138,11 +142,12 @@ component of each pixel in one 16 or 32 bit word.
- Cb\ :sub:`5`
- Cb\ :sub:`4`
- Cb\ :sub:`3`
+ -
* .. _V4L2-PIX-FMT-YUV32:
- ``V4L2_PIX_FMT_YUV32``
- 'YUV4'
- -
+
- a\ :sub:`7`
- a\ :sub:`6`
- a\ :sub:`5`
@@ -151,7 +156,7 @@ component of each pixel in one 16 or 32 bit word.
- a\ :sub:`2`
- a\ :sub:`1`
- a\ :sub:`0`
- -
+
- Y'\ :sub:`7`
- Y'\ :sub:`6`
- Y'\ :sub:`5`
@@ -160,7 +165,7 @@ component of each pixel in one 16 or 32 bit word.
- Y'\ :sub:`2`
- Y'\ :sub:`1`
- Y'\ :sub:`0`
- -
+
- Cb\ :sub:`7`
- Cb\ :sub:`6`
- Cb\ :sub:`5`
@@ -169,7 +174,7 @@ component of each pixel in one 16 or 32 bit word.
- Cb\ :sub:`2`
- Cb\ :sub:`1`
- Cb\ :sub:`0`
- -
+
- Cr\ :sub:`7`
- Cr\ :sub:`6`
- Cr\ :sub:`5`
@@ -181,7 +186,7 @@ component of each pixel in one 16 or 32 bit word.
.. raw:: latex
- \end{adjustbox}\newline\newline
+ \endgroup
.. note::
diff --git a/Documentation/media/uapi/v4l/pixfmt-rgb.rst b/Documentation/media/uapi/v4l/pixfmt-rgb.rst
index b0f35136021e..4cc27195dc79 100644
--- a/Documentation/media/uapi/v4l/pixfmt-rgb.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-rgb.rst
@@ -17,4 +17,5 @@ RGB Formats
pixfmt-srggb10alaw8
pixfmt-srggb10dpcm8
pixfmt-srggb12
+ pixfmt-srggb12p
pixfmt-srggb16
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst
index b6d426c70ccd..d9e07a4b8b31 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst
@@ -33,11 +33,7 @@ of a small V4L2_PIX_FMT_SBGGR10P image:
**Byte Order.**
Each cell is one byte.
-.. raw:: latex
-
- \newline\newline\begin{adjustbox}{width=\columnwidth}
-
-.. tabularcolumns:: |p{2.0cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{10.9cm}|
+.. tabularcolumns:: |p{2.0cm}|p{1.0cm}|p{1.0cm}|p{1.0cm}|p{1.0cm}|p{5.4cm}|
.. flat-table::
:header-rows: 0
@@ -50,6 +46,7 @@ Each cell is one byte.
- B\ :sub:`02high`
- G\ :sub:`03high`
- G\ :sub:`03low`\ (bits 7--6) B\ :sub:`02low`\ (bits 5--4)
+
G\ :sub:`01low`\ (bits 3--2) B\ :sub:`00low`\ (bits 1--0)
* - start + 5:
- G\ :sub:`10high`
@@ -57,6 +54,7 @@ Each cell is one byte.
- G\ :sub:`12high`
- R\ :sub:`13high`
- R\ :sub:`13low`\ (bits 7--6) G\ :sub:`12low`\ (bits 5--4)
+
R\ :sub:`11low`\ (bits 3--2) G\ :sub:`10low`\ (bits 1--0)
* - start + 10:
- B\ :sub:`20high`
@@ -64,6 +62,7 @@ Each cell is one byte.
- B\ :sub:`22high`
- G\ :sub:`23high`
- G\ :sub:`23low`\ (bits 7--6) B\ :sub:`22low`\ (bits 5--4)
+
G\ :sub:`21low`\ (bits 3--2) B\ :sub:`20low`\ (bits 1--0)
* - start + 15:
- G\ :sub:`30high`
@@ -71,8 +70,5 @@ Each cell is one byte.
- G\ :sub:`32high`
- R\ :sub:`33high`
- R\ :sub:`33low`\ (bits 7--6) G\ :sub:`32low`\ (bits 5--4)
- R\ :sub:`31low`\ (bits 3--2) G\ :sub:`30low`\ (bits 1--0)
-.. raw:: latex
-
- \end{adjustbox}\newline\newline
+ R\ :sub:`31low`\ (bits 3--2) G\ :sub:`30low`\ (bits 1--0)
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst
new file mode 100644
index 000000000000..59918a7913fe
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst
@@ -0,0 +1,86 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _V4L2-PIX-FMT-SRGGB12P:
+.. _v4l2-pix-fmt-sbggr12p:
+.. _v4l2-pix-fmt-sgbrg12p:
+.. _v4l2-pix-fmt-sgrbg12p:
+
+*******************************************************************************************************************************
+V4L2_PIX_FMT_SRGGB12P ('pRAA'), V4L2_PIX_FMT_SGRBG12P ('pgAA'), V4L2_PIX_FMT_SGBRG12P ('pGAA'), V4L2_PIX_FMT_SBGGR12P ('pBAA'),
+*******************************************************************************************************************************
+
+
+12-bit packed Bayer formats
+
+
+Description
+===========
+
+These four pixel formats are packed raw sRGB / Bayer formats with 12
+bits per colour. Every two consecutive samples are packed into three
+bytes. Each of the first two bytes contain the 8 high order bits of
+the pixels, and the third byte contains the four least significants
+bits of each pixel, in the same order.
+
+Each n-pixel row contains n/2 green samples and n/2 blue or red
+samples, with alternating green-red and green-blue rows. They are
+conventionally described as GRGR... BGBG..., RGRG... GBGB..., etc.
+Below is an example of a small V4L2_PIX_FMT_SBGGR12P image:
+
+**Byte Order.**
+Each cell is one byte.
+
+.. tabularcolumns:: |p{2.0cm}|p{1.0cm}|p{1.0cm}|p{2.7cm}|p{1.0cm}|p{1.0cm}|p{2.7cm}|
+
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 2 1 1 1 1 1 1
+
+
+ - - start + 0:
+ - B\ :sub:`00high`
+ - G\ :sub:`01high`
+ - G\ :sub:`01low`\ (bits 7--4)
+
+ B\ :sub:`00low`\ (bits 3--0)
+ - B\ :sub:`02high`
+ - G\ :sub:`03high`
+ - G\ :sub:`03low`\ (bits 7--4)
+
+ B\ :sub:`02low`\ (bits 3--0)
+
+ - - start + 6:
+ - G\ :sub:`10high`
+ - R\ :sub:`11high`
+ - R\ :sub:`11low`\ (bits 7--4)
+
+ G\ :sub:`10low`\ (bits 3--0)
+ - G\ :sub:`12high`
+ - R\ :sub:`13high`
+ - R\ :sub:`13low`\ (bits 3--2)
+
+ G\ :sub:`12low`\ (bits 3--0)
+ - - start + 12:
+ - B\ :sub:`20high`
+ - G\ :sub:`21high`
+ - G\ :sub:`21low`\ (bits 7--4)
+
+ B\ :sub:`20low`\ (bits 3--0)
+ - B\ :sub:`22high`
+ - G\ :sub:`23high`
+ - G\ :sub:`23low`\ (bits 7--4)
+
+ B\ :sub:`22low`\ (bits 3--0)
+ - - start + 18:
+ - G\ :sub:`30high`
+ - R\ :sub:`31high`
+ - R\ :sub:`31low`\ (bits 7--4)
+
+ G\ :sub:`30low`\ (bits 3--0)
+ - G\ :sub:`32high`
+ - R\ :sub:`33high`
+ - R\ :sub:`33low`\ (bits 3--2)
+
+ G\ :sub:`32low`\ (bits 3--0)
diff --git a/Documentation/media/uapi/v4l/pixfmt-uyvy.rst b/Documentation/media/uapi/v4l/pixfmt-uyvy.rst
index 30660e04dd0e..ecdc2d94c209 100644
--- a/Documentation/media/uapi/v4l/pixfmt-uyvy.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-uyvy.rst
@@ -65,7 +65,7 @@ Each cell is one byte.
- Y'\ :sub:`33`
-**Color Sample Location..**
+**Color Sample Location:**
diff --git a/Documentation/media/uapi/v4l/pixfmt-003.rst b/Documentation/media/uapi/v4l/pixfmt-v4l2-mplane.rst
index 337e8188caf1..337e8188caf1 100644
--- a/Documentation/media/uapi/v4l/pixfmt-003.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-v4l2-mplane.rst
diff --git a/Documentation/media/uapi/v4l/pixfmt-002.rst b/Documentation/media/uapi/v4l/pixfmt-v4l2.rst
index 2ee164c25637..2ee164c25637 100644
--- a/Documentation/media/uapi/v4l/pixfmt-002.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-v4l2.rst
diff --git a/Documentation/media/uapi/v4l/pixfmt-vyuy.rst b/Documentation/media/uapi/v4l/pixfmt-vyuy.rst
index a3f61f280b94..670c339c1714 100644
--- a/Documentation/media/uapi/v4l/pixfmt-vyuy.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-vyuy.rst
@@ -65,7 +65,7 @@ Each cell is one byte.
- Y'\ :sub:`33`
-**Color Sample Location..**
+**Color Sample Location:**
.. flat-table::
:header-rows: 0
diff --git a/Documentation/media/uapi/v4l/pixfmt-y41p.rst b/Documentation/media/uapi/v4l/pixfmt-y41p.rst
index 05d040c46a47..e1fe548807a4 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y41p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y41p.rst
@@ -88,7 +88,7 @@ Each cell is one byte.
- Y'\ :sub:`37`
-**Color Sample Location..**
+**Color Sample Location:**
.. flat-table::
:header-rows: 0
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv410.rst b/Documentation/media/uapi/v4l/pixfmt-yuv410.rst
index 0c49915af850..b51a0d1c6108 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv410.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv410.rst
@@ -67,7 +67,7 @@ Each cell is one byte.
- Cb\ :sub:`00`
-**Color Sample Location..**
+**Color Sample Location:**
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst b/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst
index 2cf33fad7254..2582341972db 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst
@@ -75,7 +75,7 @@ Each cell is one byte.
- Cr\ :sub:`30`
-**Color Sample Location..**
+**Color Sample Location:**
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv420.rst b/Documentation/media/uapi/v4l/pixfmt-yuv420.rst
index fd98904058ed..a9b85c4b1dbc 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv420.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv420.rst
@@ -76,7 +76,7 @@ Each cell is one byte.
- Cb\ :sub:`11`
-**Color Sample Location..**
+**Color Sample Location:**
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst b/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst
index cce8c477fdfc..32c68c33f2b1 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst
@@ -85,7 +85,7 @@ Each cell is one byte.
- Cr\ :sub:`11`
-**Color Sample Location..**
+**Color Sample Location:**
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst b/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst
index d986393aa934..9e7028c4967c 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst
@@ -96,7 +96,7 @@ Each cell is one byte.
- Cr\ :sub:`31`
-**Color Sample Location..**
+**Color Sample Location:**
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst b/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst
index e6f5de546dba..a96f836c7fa5 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst
@@ -84,7 +84,7 @@ Each cell is one byte.
- Cr\ :sub:`31`
-**Color Sample Location..**
+**Color Sample Location:**
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst b/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst
index 830fbf6fcd1d..8605bfaee112 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst
@@ -106,7 +106,7 @@ Each cell is one byte.
- Cr\ :sub:`33`
-**Color Sample Location..**
+**Color Sample Location:**
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuyv.rst b/Documentation/media/uapi/v4l/pixfmt-yuyv.rst
index e1bdd6b1aefc..53e876d053fb 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuyv.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuyv.rst
@@ -68,7 +68,7 @@ Each cell is one byte.
- Cr\ :sub:`31`
-**Color Sample Location..**
+**Color Sample Location:**
diff --git a/Documentation/media/uapi/v4l/pixfmt-yvyu.rst b/Documentation/media/uapi/v4l/pixfmt-yvyu.rst
index 0244ce6741a6..b9c31746e565 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yvyu.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yvyu.rst
@@ -65,7 +65,7 @@ Each cell is one byte.
- Cb\ :sub:`31`
-**Color Sample Location..**
+**Color Sample Location:**
.. flat-table::
:header-rows: 0
diff --git a/Documentation/media/uapi/v4l/pixfmt.rst b/Documentation/media/uapi/v4l/pixfmt.rst
index 00737152497b..2aa449e2da67 100644
--- a/Documentation/media/uapi/v4l/pixfmt.rst
+++ b/Documentation/media/uapi/v4l/pixfmt.rst
@@ -19,20 +19,19 @@ see also :ref:`VIDIOC_G_FBUF <VIDIOC_G_FBUF>`.)
.. toctree::
:maxdepth: 1
- pixfmt-002
- pixfmt-003
- pixfmt-004
- colorspaces
- pixfmt-006
- pixfmt-007
- pixfmt-008
+ pixfmt-v4l2
+ pixfmt-v4l2-mplane
+ pixfmt-intro
pixfmt-indexed
pixfmt-rgb
yuv-formats
hsv-formats
depth-formats
- pixfmt-013
+ pixfmt-compressed
sdr-formats
tch-formats
meta-formats
pixfmt-reserved
+ colorspaces
+ colorspaces-defs
+ colorspaces-details
diff --git a/Documentation/media/uapi/v4l/selection.svg b/Documentation/media/uapi/v4l/selection.svg
index d309187af967..a93e3b59786d 100644
--- a/Documentation/media/uapi/v4l/selection.svg
+++ b/Documentation/media/uapi/v4l/selection.svg
@@ -1,5812 +1,1151 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:osb="http://www.openswatchbook.org/uri/2009/osb"
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:xlink="http://www.w3.org/1999/xlink"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="4226.3345"
- height="1686.8481"
- id="svg2"
- sodipodi:version="0.32"
- inkscape:version="0.91 r13725"
- sodipodi:docname="selection.svg"
- inkscape:output_extension="org.inkscape.output.svg.inkscape"
- version="1.0"
- style="display:inline;enable-background:new"
- inkscape:export-filename="/home/cheeseness/Documents/LCA09/mascot/tuz_final.png"
- inkscape:export-xdpi="100.03588"
- inkscape:export-ydpi="100.03588">
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- gridtolerance="10000"
- guidetolerance="10"
- objecttolerance="10"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="0.32297491"
- inkscape:cx="2113.1672"
- inkscape:cy="843.42407"
- inkscape:document-units="px"
- inkscape:current-layer="layer16"
- showgrid="false"
- inkscape:window-width="1920"
- inkscape:window-height="997"
- inkscape:window-x="1920"
- inkscape:window-y="30"
- showguides="false"
- inkscape:guide-bbox="true"
- units="mm"
- inkscape:window-maximized="1"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0" />
- <defs
- id="defs4">
- <pattern
- inkscape:collect="always"
- xlink:href="#Strips1_1"
- id="pattern5557"
- patternTransform="matrix(5.4431804,0,0,10.10048,1808.3554,-48.222348)" />
- <marker
- inkscape:stockid="Arrow1Send"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow1Send"
- style="overflow:visible"
- inkscape:isstock="true">
- <path
- id="path7188"
- d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
- style="fill:#f8d615;fill-opacity:1;fill-rule:evenodd;stroke:#f8d615;stroke-width:1pt;stroke-opacity:1"
- transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
- inkscape:connector-curvature="0" />
- </marker>
- <pattern
- inkscape:isstock="true"
- inkscape:stockid="Stripes 1:1"
- id="Strips1_1"
- patternTransform="translate(0,0) scale(10,10)"
- height="1"
- width="2"
- patternUnits="userSpaceOnUse"
- inkscape:collect="always">
- <rect
- id="rect5945"
- height="2"
- width="1"
- y="-0.5"
- x="0"
- style="fill:#f815bb;stroke:none" />
- </pattern>
- <linearGradient
- id="linearGradient10954"
- osb:paint="solid">
- <stop
- style="stop-color:#d9f90b;stop-opacity:1;"
- offset="0"
- id="stop10956" />
- </linearGradient>
- <linearGradient
- id="linearGradient9165"
- osb:paint="solid">
- <stop
- style="stop-color:#000000;stop-opacity:0.31330472;"
- offset="0"
- id="stop9167" />
- </linearGradient>
- <filter
- inkscape:collect="always"
- x="-0.084654994"
- width="1.16931"
- y="-0.36592469"
- height="1.7318494"
- id="filter11361">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="4.5740586"
- id="feGaussianBlur11363" />
- </filter>
- <linearGradient
- id="linearGradient7622">
- <stop
- style="stop-color:#ffffff;stop-opacity:1;"
- offset="0"
- id="stop7624" />
- <stop
- style="stop-color:#ffffff;stop-opacity:0;"
- offset="1"
- id="stop7626" />
- </linearGradient>
- <linearGradient
- id="linearGradient4113">
- <stop
- style="stop-color:#000000;stop-opacity:0;"
- offset="0"
- id="stop4115" />
- <stop
- style="stop-color:#000000;stop-opacity:1;"
- offset="1"
- id="stop4117" />
- </linearGradient>
- <linearGradient
- inkscape:collect="always"
- id="linearGradient3660">
- <stop
- style="stop-color:#ffffff;stop-opacity:1;"
- offset="0"
- id="stop3662" />
- <stop
- style="stop-color:#ffffff;stop-opacity:0;"
- offset="1"
- id="stop3664" />
- </linearGradient>
- <linearGradient
- id="linearGradient3627">
- <stop
- style="stop-color:#ffffff;stop-opacity:1;"
- offset="0"
- id="stop3629" />
- <stop
- style="stop-color:#000000;stop-opacity:1;"
- offset="1"
- id="stop3631" />
- </linearGradient>
- <linearGradient
- id="linearGradient2843">
- <stop
- id="stop2845"
- offset="0"
- style="stop-color:#000000;stop-opacity:1;" />
- <stop
- style="stop-color:#000000;stop-opacity:1;"
- offset="0.02188784"
- id="stop2847" />
- <stop
- style="stop-color:#000000;stop-opacity:1;"
- offset="0.75866222"
- id="stop2849" />
- <stop
- id="stop2851"
- offset="0.88508981"
- style="stop-color:#232323;stop-opacity:1;" />
- <stop
- id="stop2853"
- offset="1"
- style="stop-color:#595959;stop-opacity:1;" />
- </linearGradient>
- <linearGradient
- inkscape:collect="always"
- id="linearGradient8964">
- <stop
- style="stop-color:#1a1a1a;stop-opacity:1;"
- offset="0"
- id="stop8966" />
- <stop
- style="stop-color:#1a1a1a;stop-opacity:0;"
- offset="1"
- id="stop8968" />
- </linearGradient>
- <linearGradient
- id="linearGradient8952">
- <stop
- style="stop-color:#0a0c0c;stop-opacity:1;"
- offset="0"
- id="stop8954" />
- <stop
- style="stop-color:#1f2727;stop-opacity:0;"
- offset="1"
- id="stop8956" />
- </linearGradient>
- <linearGradient
- id="linearGradient8430">
- <stop
- style="stop-color:#1e2323;stop-opacity:1;"
- offset="0"
- id="stop8432" />
- <stop
- id="stop8438"
- offset="0.55992389"
- style="stop-color:#181d1d;stop-opacity:1;" />
- <stop
- style="stop-color:#000000;stop-opacity:1;"
- offset="1"
- id="stop8434" />
- </linearGradient>
- <linearGradient
- id="linearGradient8398">
- <stop
- style="stop-color:#283131;stop-opacity:0;"
- offset="0"
- id="stop8400" />
- <stop
- id="stop8402"
- offset="0.5125587"
- style="stop-color:#1e2424;stop-opacity:0;" />
- <stop
- style="stop-color:#000000;stop-opacity:1;"
- offset="1"
- id="stop8404" />
- </linearGradient>
- <linearGradient
- inkscape:collect="always"
- id="linearGradient4870">
- <stop
- style="stop-color:#c7bd80;stop-opacity:1;"
- offset="0"
- id="stop4872" />
- <stop
- style="stop-color:#c7bd80;stop-opacity:0;"
- offset="1"
- id="stop4874" />
- </linearGradient>
- <linearGradient
- inkscape:collect="always"
- id="linearGradient4862">
- <stop
- style="stop-color:#e2e2e2;stop-opacity:1;"
- offset="0"
- id="stop4864" />
- <stop
- style="stop-color:#e2e2e2;stop-opacity:0;"
- offset="1"
- id="stop4866" />
- </linearGradient>
- <linearGradient
- id="linearGradient4478">
- <stop
- style="stop-color:#f9eed3;stop-opacity:1;"
- offset="0"
- id="stop4480" />
- <stop
- style="stop-color:#000000;stop-opacity:0;"
- offset="1"
- id="stop4482" />
- </linearGradient>
- <linearGradient
- id="linearGradient4106">
- <stop
- style="stop-color:#d9e002;stop-opacity:1;"
- offset="0"
- id="stop4108" />
- <stop
- id="stop4114"
- offset="0.5"
- style="stop-color:#a9ae01;stop-opacity:1;" />
- <stop
- style="stop-color:#717501;stop-opacity:1;"
- offset="1"
- id="stop4110" />
- </linearGradient>
- <linearGradient
- id="linearGradient4084">
- <stop
- style="stop-color:#7d7d00;stop-opacity:1;"
- offset="0"
- id="stop4086" />
- <stop
- id="stop4088"
- offset="0.3636601"
- style="stop-color:#c6c700;stop-opacity:1;" />
- <stop
- style="stop-color:#f6f800;stop-opacity:1;"
- offset="1"
- id="stop4090" />
- </linearGradient>
- <linearGradient
- id="linearGradient4041">
- <stop
- id="stop4043"
- offset="0"
- style="stop-color:#ffff00;stop-opacity:1;" />
- <stop
- id="stop4045"
- offset="1"
- style="stop-color:#ffff00;stop-opacity:0;" />
- </linearGradient>
- <linearGradient
- id="linearGradient4025">
- <stop
- style="stop-color:#ffffff;stop-opacity:1;"
- offset="0"
- id="stop4027" />
- <stop
- style="stop-color:#ffffff;stop-opacity:0;"
- offset="1"
- id="stop4031" />
- </linearGradient>
- <linearGradient
- id="linearGradient4013">
- <stop
- style="stop-color:#ffff00;stop-opacity:1;"
- offset="0"
- id="stop4015" />
- <stop
- style="stop-color:#b2b200;stop-opacity:1;"
- offset="1"
- id="stop4017" />
- </linearGradient>
- <linearGradient
- id="linearGradient3985">
- <stop
- style="stop-color:#000000;stop-opacity:1;"
- offset="0"
- id="stop3987" />
- <stop
- style="stop-color:#1d1d1d;stop-opacity:1;"
- offset="1"
- id="stop3989" />
- </linearGradient>
- <linearGradient
- id="linearGradient3961">
- <stop
- style="stop-color:#283131;stop-opacity:0;"
- offset="0"
- id="stop3963" />
- <stop
- id="stop3965"
- offset="0.5"
- style="stop-color:#1e2424;stop-opacity:1;" />
- <stop
- style="stop-color:#000000;stop-opacity:1;"
- offset="1"
- id="stop3967" />
- </linearGradient>
- <linearGradient
- id="linearGradient3951">
- <stop
- id="stop3953"
- offset="0"
- style="stop-color:#344040;stop-opacity:1;" />
- <stop
- style="stop-color:#222929;stop-opacity:1;"
- offset="0.5"
- id="stop3955" />
- <stop
- id="stop3957"
- offset="1"
- style="stop-color:#000000;stop-opacity:1;" />
- </linearGradient>
- <linearGradient
- id="linearGradient3909">
- <stop
- style="stop-color:#283131;stop-opacity:1;"
- offset="0"
- id="stop3911" />
- <stop
- id="stop3917"
- offset="0.5"
- style="stop-color:#1e2424;stop-opacity:1;" />
- <stop
- style="stop-color:#000000;stop-opacity:1;"
- offset="1"
- id="stop3913" />
- </linearGradient>
- <linearGradient
- id="linearGradient3537">
- <stop
- style="stop-color:#ada469;stop-opacity:1;"
- offset="0"
- id="stop3539" />
- <stop
- id="stop3545"
- offset="0.81132078"
- style="stop-color:#ada469;stop-opacity:1;" />
- <stop
- style="stop-color:#ffffff;stop-opacity:1;"
- offset="1"
- id="stop3541" />
- </linearGradient>
- <linearGradient
- id="linearGradient3317">
- <stop
- style="stop-color:#cfc690;stop-opacity:1"
- offset="0"
- id="stop3319" />
- <stop
- id="stop3321"
- offset="0.21161865"
- style="stop-color:#afa775;stop-opacity:1;" />
- <stop
- id="stop3323"
- offset="0.53408515"
- style="stop-color:#615c3a;stop-opacity:1;" />
- <stop
- style="stop-color:#000000;stop-opacity:1;"
- offset="0.76504093"
- id="stop3325" />
- <stop
- id="stop3327"
- offset="1"
- style="stop-color:#403518;stop-opacity:1;" />
- </linearGradient>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3317"
- id="radialGradient3315"
- cx="543.6698"
- cy="147.3131"
- fx="543.6698"
- fy="147.3131"
- r="47.863216"
- gradientTransform="matrix(2.1382256,0,0,2.3382884,-77.03847,-101.68704)"
- gradientUnits="userSpaceOnUse" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3537"
- id="radialGradient3543"
- cx="385"
- cy="237.00504"
- fx="385"
- fy="237.00504"
- r="86.928574"
- gradientTransform="matrix(1,0,0,0.8562038,0,34.080427)"
- gradientUnits="userSpaceOnUse" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3909"
- id="radialGradient3915"
- cx="418.30365"
- cy="342.47794"
- fx="418.30365"
- fy="342.47794"
- r="131.4509"
- gradientTransform="matrix(1.3957347,0.6211056,-0.4244067,0.9537174,-15.061913,-227.96711)"
- gradientUnits="userSpaceOnUse" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3951"
- id="radialGradient3933"
- cx="397.16388"
- cy="336.95245"
- fx="397.16388"
- fy="336.95245"
- r="36.75"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(1.9449972,2.4894837e-7,-2.4894833e-7,1.9449969,-375.31868,-318.41912)" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3961"
- id="linearGradient3959"
- x1="398.21429"
- y1="343.52289"
- x2="379.28571"
- y2="265.30862"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(450.03125,73.843964)" />
- <filter
- inkscape:collect="always"
- id="filter3981"
- x="-0.30000001"
- width="1.6"
- y="-0.30000001"
- height="1.6">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="2"
- id="feGaussianBlur3983" />
- </filter>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3985"
- id="radialGradient3991"
- cx="402.48898"
- cy="317.23578"
- fx="402.48898"
- fy="317.23578"
- r="23.714285"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(4.3776616,0,0,4.3776616,-1358.3025,-1070.7357)" />
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3999">
- <path
- style="display:inline;opacity:1;fill:#f5ff04;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
- d="m 179.64286,267.36218 c -22.41044,39.70292 -60.6161,115.78029 -69.28571,149.64286 -8.64721,33.7751 -8.77199,66.41654 -0.35715,86.42858 8.3602,19.88213 26.16398,35.6328 40.71428,41.42856 -0.59638,-14.37587 14.37295,-43.28583 72.85715,-72.5 58.62627,-29.28514 78.38163,-27.13086 103.57142,-47.14286 25.63006,-20.36176 12.61031,-67.04463 3.21429,-93.92857 -9.43424,-26.99328 -34.96741,-59.12448 -66.42857,-69.64285 -31.03327,-10.37532 -65.01776,-4.84837 -84.28571,5.71428 z"
- id="path4001"
- sodipodi:nodetypes="czzczzzzc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4013"
- id="radialGradient4056"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(1.1323239,0.7659488,-1.4550286,2.1510098,588.75376,-711.79716)"
- cx="228.81355"
- cy="440.26971"
- fx="228.81355"
- fy="440.26971"
- r="119.17509" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4041"
- id="radialGradient4060"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(0.05911206,2.6869855,-0.7234268,0.01591495,408.72779,-424.56452)"
- cx="275.4422"
- cy="335.34866"
- fx="275.4422"
- fy="335.34866"
- r="36.75" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4025"
- id="radialGradient4062"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(0.05911206,2.6869855,-0.7234268,0.01591495,408.72779,-424.56452)"
- cx="275.4422"
- cy="335.34866"
- fx="275.4422"
- fy="335.34866"
- r="36.75" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4084"
- id="linearGradient4082"
- gradientUnits="userSpaceOnUse"
- x1="182.35046"
- y1="256.11136"
- x2="145.53348"
- y2="542.20502" />
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath4100">
- <path
- style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.9000755px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
- d="m 265.93541,126.68393 -18.76721,168.86308 174.10543,-73.12068 61.9544,88.65883 57.8844,-31.9903 -37.53442,-180.059677 -237.6426,27.648747 z"
- id="path4102"
- sodipodi:nodetypes="ccccccc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4106"
- id="radialGradient4112"
- cx="250.22678"
- cy="475.09763"
- fx="250.22678"
- fy="475.09763"
- r="95.98877"
- gradientTransform="matrix(1.2259004,-0.7077739,0.1413989,0.2449102,322.22326,608.91815)"
- gradientUnits="userSpaceOnUse" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4478"
- id="linearGradient4484"
- x1="412.08926"
- y1="404.91574"
- x2="417.375"
- y2="401.82648"
- gradientUnits="userSpaceOnUse" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4478"
- id="linearGradient4486"
- x1="411.91071"
- y1="404.91577"
- x2="417.375"
- y2="401.82648"
- gradientUnits="userSpaceOnUse" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4478"
- id="linearGradient4488"
- x1="411.91071"
- y1="405.54077"
- x2="417.375"
- y2="401.82648"
- gradientUnits="userSpaceOnUse" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4478"
- id="linearGradient4490"
- x1="412.08926"
- y1="405.54077"
- x2="417.375"
- y2="401.82648"
- gradientUnits="userSpaceOnUse" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4478"
- id="linearGradient4492"
- x1="411.73212"
- y1="405.54077"
- x2="417.375"
- y2="401.82648"
- gradientUnits="userSpaceOnUse" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4862"
- id="radialGradient4868"
- cx="429.56738"
- cy="377.42877"
- fx="429.56738"
- fy="377.42877"
- r="72.079735"
- gradientTransform="matrix(1,0,0,0.618034,0,144.16496)"
- gradientUnits="userSpaceOnUse" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4870"
- id="radialGradient4876"
- cx="437.6991"
- cy="391.21735"
- fx="437.6991"
- fy="391.21735"
- r="36.611931"
- gradientTransform="matrix(1,0,0,0.618034,0,149.43174)"
- gradientUnits="userSpaceOnUse" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4013"
- id="radialGradient3585"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(1.1323239,0.7659488,-1.4550286,2.1510098,588.75376,-711.79716)"
- cx="228.81355"
- cy="440.26971"
- fx="228.81355"
- fy="440.26971"
- r="119.17509" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4084"
- id="linearGradient3587"
- gradientUnits="userSpaceOnUse"
- x1="182.35046"
- y1="256.11136"
- x2="145.53348"
- y2="542.20502" />
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8514">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
- id="path8516"
- sodipodi:nodetypes="cscccccccccccc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8604">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
- id="path8606"
- sodipodi:nodetypes="cscccccccccccc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8610">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
- id="path8612"
- sodipodi:nodetypes="cscccccccccccc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8616">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
- id="path8618"
- sodipodi:nodetypes="cscccccccccccc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8622">
- <path
- style="display:inline;opacity:1;fill:#202020;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 821.64329,477.88997 c 0,0 22.61947,-6.50681 35.74275,-5.87273 13.12328,0.63409 30.64158,1.93862 43.70885,12.18619 13.06727,10.24756 25.06774,27.14007 34.11239,58.36965 9.04465,31.22958 1.69832,99.25201 -6.17603,143.34735 -7.87435,44.09534 -28.2651,106.11298 -45,140 -16.7349,33.88702 -49.79771,77.4952 -60.56943,89.87616 -11.36422,13.06197 -56.20589,36.42617 -79.43057,42.26667 5.3033,-10.6066 48.89976,-50.58884 35,-60.71426 -14.01897,-10.21226 -45.76009,45.98236 -84.29315,29.03317 21.38231,-13.13212 41.7794,-51.18606 34.04061,-66.59445 -7.84025,-15.61039 -30.70493,48.75757 -93.53554,37.01288 30.05204,-27.52666 55.40706,-70.90401 41.2627,-82.9797 -14.41516,-12.30687 -60.46175,54.29315 -60.46175,54.29315 0,0 -2.8219,-41.70118 13.7732,-68.60732 16.63935,-26.97787 79.65297,-81.61527 99.55313,-111.70342 19.90015,-30.08814 33.61256,-66.00902 42.13542,-92.51794 8.52286,-26.50892 15.80094,-77.09954 15.80094,-77.09954"
- id="path8624"
- sodipodi:nodetypes="czzzzzzczczczczzzc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8642">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0f0f0f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 366.88839,504.13471 c 0,0 -29.55406,40.57305 -47.85714,74.28571 -18.30309,33.71267 -58.62109,126.35694 -70.35714,171.07143 -11.7594,44.80344 -62.5,123.57144 -62.5,123.57144 l 76.07143,18.21428 c 0,0 11.80712,-12.82335 31.07142,-46.07143 19.2643,-33.24808 60.35715,-138.57143 60.35715,-138.57143 l 13.21428,-202.5 z"
- id="path8644"
- sodipodi:nodetypes="czzcczcc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8658">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0b0b0b;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 569.03125,1018.7776 c -4.28571,0.7143 -27.62815,3.6181 -57.85714,10 -30.22899,6.3819 -99.77493,25.9619 -142.85715,35.7143 -43.08222,9.7524 -117.26443,34.816 -156.91262,27.2654 -39.64818,-7.5506 -89.51595,-64.4083 -89.51595,-64.4083 l 4.28572,-94.28571 c 0,0 85.88551,-16.20094 112.14285,-33.57143 26.25735,-17.37049 45.58238,-49.66598 59.28572,-71.42857 13.70334,-21.76259 32.85714,-71.42858 32.85714,-71.42858 l 238.57143,262.14289 z"
- id="path8660"
- sodipodi:nodetypes="czzzcczzcc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8802"
- x="-0.35311759"
- width="1.7062352"
- y="-0.1817714"
- height="1.3635428">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="48.038491"
- id="feGaussianBlur8804" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter8806"
- x="-0.61142862"
- width="2.2228572"
- y="-0.14930232"
- height="1.2986046">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="37.830213"
- id="feGaussianBlur8808" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter8810"
- x="-0.23519406"
- width="1.4703881"
- y="-0.24500646"
- height="1.4900129">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="58.328041"
- id="feGaussianBlur8812" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter8814"
- x="-0.20466694"
- width="1.4093339"
- y="-0.29007819"
- height="1.5801564">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="22.300169"
- id="feGaussianBlur8816" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter8818"
- x="-0.34381232"
- width="1.6876246"
- y="-0.18433961"
- height="1.3686792">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="34.542167"
- id="feGaussianBlur8820" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter8822"
- x="-0.2742857"
- width="1.5485713"
- y="-0.21333334"
- height="1.4266667">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="11.313708"
- id="feGaussianBlur8824" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter8826"
- x="-0.25894088"
- width="1.5178818"
- y="-0.2236412"
- height="1.4472824">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="19.631544"
- id="feGaussianBlur8828" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter8856"
- x="-0.3253231"
- width="1.6506462"
- y="-0.19013336"
- height="1.3802667">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="28.712591"
- id="feGaussianBlur8858" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter8860"
- x="-0.38093024"
- width="1.7618605"
- y="-0.17518716"
- height="1.3503743">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="19.304015"
- id="feGaussianBlur8862" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter8888"
- x="-0.2112188"
- width="1.4224375"
- y="-0.16808605"
- height="1.3361721">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="8.3693583"
- id="feGaussianBlur8890" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter8892"
- x="-0.18692794"
- width="1.3738559"
- y="-0.23646873"
- height="1.4729375">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="31.21228"
- id="feGaussianBlur8894" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8906">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
- id="path8908"
- sodipodi:nodetypes="cscccccccccccc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8940"
- x="-0.25152978"
- width="1.5030596"
- y="-0.053035267"
- height="1.1060705">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="13.024603"
- id="feGaussianBlur8942" />
- </filter>
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient8952"
- id="linearGradient8958"
- x1="609.31244"
- y1="239.46866"
- x2="560.83142"
- y2="262.86206"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(450.03125,73.843964)" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient8964"
- id="linearGradient8970"
- x1="603.84064"
- y1="627.85303"
- x2="616.24396"
- y2="585.42664"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(450.03125,73.843964)" />
- <filter
- inkscape:collect="always"
- id="filter9020"
- x="-0.32861114"
- width="1.6572223"
- y="-0.182"
- height="1.364">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="20.912684"
- id="feGaussianBlur9022" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter9024"
- x="-0.55453134"
- width="2.1090627"
- y="-0.51434779"
- height="2.0286956">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="20.912684"
- id="feGaussianBlur9026" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter9044"
- x="-0.32631579"
- width="1.6526316"
- y="-0.84545463"
- height="2.6909094">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="21.92031"
- id="feGaussianBlur9046" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter9048"
- x="-0.40879121"
- width="1.8175824"
- y="-0.71538466"
- height="2.4307692">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="21.92031"
- id="feGaussianBlur9050" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter3587"
- x="-0.1">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="8.881432"
- id="feGaussianBlur3589" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3602">
- <path
- sodipodi:nodetypes="czzzzzzczczczczzzc"
- id="path3604"
- d="m 647.61204,540.04601 c 0,0 22.61947,-6.50681 35.74275,-5.87273 13.12328,0.63409 30.64158,1.93862 43.70885,12.18619 13.06727,10.24756 25.06774,27.14007 34.11239,58.36965 9.04465,31.22958 1.69832,99.25201 -6.17603,143.34735 -7.87435,44.09534 -28.2651,106.11298 -45,140 -16.7349,33.88702 -49.79771,77.4952 -60.56943,89.87616 -11.36422,13.06197 -56.20589,36.42617 -79.43057,42.26667 5.3033,-10.6066 48.89976,-50.58884 35,-60.71426 -14.01897,-10.21226 -45.76009,45.98236 -84.29315,29.03317 21.38231,-13.13212 41.7794,-51.18606 34.04061,-66.59445 -7.84025,-15.61039 -30.70493,48.75757 -93.53554,37.01288 30.05204,-27.52666 55.40706,-70.90401 41.2627,-82.9797 -14.41516,-12.30687 -60.46175,54.29315 -60.46175,54.29315 0,0 -2.8219,-41.70118 13.7732,-68.60732 16.63935,-26.97787 79.65297,-81.61527 99.55313,-111.70342 19.90015,-30.08814 33.61256,-66.00902 42.13542,-92.51794 8.52286,-26.50892 15.80094,-77.09954 15.80094,-77.09954"
- style="display:inline;opacity:1;fill:#202020;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter4120"
- x="-0.2770822"
- width="1.5541644"
- y="-0.32482043"
- height="1.6496409">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="19.956289"
- id="feGaussianBlur4122" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3631">
- <path
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
- id="path3633"
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- inkscape:connector-curvature="0" />
- </clipPath>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3665">
- <path
- sodipodi:nodetypes="czzcczcc"
- id="path3667"
- d="m 366.88839,504.13471 c 0,0 -29.55406,40.57305 -47.85714,74.28571 -18.30309,33.71267 -58.62109,126.35694 -70.35714,171.07143 -11.7594,44.80344 -62.5,123.57144 -62.5,123.57144 l 76.07143,18.21428 c 0,0 11.80712,-12.82335 31.07142,-46.07143 19.2643,-33.24808 60.35715,-138.57143 60.35715,-138.57143 l 13.21428,-202.5 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0f0f0f;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- inkscape:connector-curvature="0" />
- </clipPath>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3677">
- <path
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 586.13271,997.98981 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.9123 -3.78268,51.8008 -2.90046,70.6561 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.0839 38.76107,-114.49733 44.6608,-149.76855 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
- id="path3679"
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter3898">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="10.892985"
- id="feGaussianBlur3900" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter4130"
- x="-0.49509686"
- width="1.9901937"
- y="-0.26708817"
- height="1.5341763">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="10.730622"
- id="feGaussianBlur4132" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter4141"
- x="-0.40611032"
- width="1.8122206"
- y="-0.30260596"
- height="1.6052119">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="9.8586086"
- id="feGaussianBlur4143" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath4177">
- <path
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- id="path4179"
- d="m 586.13271,997.98981 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.9123 -3.78268,51.8008 -2.90046,70.6561 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.0839 38.76107,-114.49733 44.6608,-149.76855 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter4185">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="3.6164709"
- id="feGaussianBlur4187" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter4105">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="3.8640966"
- id="feGaussianBlur4107" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath2833">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#292929;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 569.03125,1018.7776 c -4.28571,0.7143 -27.62815,3.6181 -57.85714,10 -30.22899,6.3819 -57.31395,4.9661 -135.78608,17.3296 -79.85178,12.5808 -94.06436,42.5423 -108.12225,47.0643 -14.70014,4.7286 -145.37739,-65.8225 -145.37739,-65.8225 l 4.28572,-94.28571 c 0,0 85.88551,-16.20094 112.14285,-33.57143 26.25735,-17.37049 45.58238,-49.66598 59.28572,-71.42857 13.70334,-21.76259 32.85714,-71.42858 32.85714,-71.42858 l 238.57143,262.14289 z"
- id="path2835"
- sodipodi:nodetypes="czzzcczzcc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient2843"
- id="linearGradient2841"
- gradientUnits="userSpaceOnUse"
- x1="347.89655"
- y1="1070.2124"
- x2="275.58191"
- y2="867.97992" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3627"
- id="linearGradient3688"
- gradientUnits="userSpaceOnUse"
- x1="699.32867"
- y1="269.76755"
- x2="698.97504"
- y2="346.1351" />
- <mask
- maskUnits="userSpaceOnUse"
- id="mask3684">
- <ellipse
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient3688);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.43724918px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="path3686"
- transform="translate(-174.03125,62.156036)"
- cx="579.474"
- cy="260.57516"
- rx="192.6866"
- ry="164.04877" />
- </mask>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3622">
- <path
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 266.27183,924.57186 c -1.40727,18.80121 -1.1449,32.75103 2.08174,49.30328 3.22665,16.55234 16.40608,45.90736 20.3344,63.18376 3.92622,17.2671 2.69413,38.3096 -12.45944,51.1482 -15.31761,12.9774 -42.05127,21.5989 -67.8323,15.7338 -25.78106,-5.8653 -69.54907,-49.2234 -88.59019,-70.2283 C 100.6939,1012.6293 56.045183,939.86194 41.867508,909.43681 27.689836,879.01169 29.207903,872.71824 33.747793,863.90708 24.381071,839.38658 21.334081,813.84027 0.03533552,788.33044 30.360815,791.44488 43.915625,815.28677 60.161025,835.47019 54.631129,787.39416 42.10631,771.05369 31.787073,744.74589 c 29.994295,6.08166 50.57936,31.8724 63.979783,72.7125 9.554154,-3.91791 18.237764,-9.37294 30.187414,-9.0612 -11.2975,-41.6958 -17.94946,-69.91584 -36.687255,-101.06994 53.441965,5.67033 83.657025,80.63932 78.971425,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24896,-38.34702 -21.04781,-76.8679 -3.65971,-118.64818 0,0 48.28678,65.43687 54.38966,85.80577 6.10287,20.36891 1.51881,38.70052 1.51881,38.70052 0,0 16.95957,31.08529 20.29392,51.09413 3.3731,20.24135 -3.53269,59.10332 -4.94582,77.98324 z"
- id="path3624"
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- inkscape:connector-curvature="0" />
- </clipPath>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3636">
- <path
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
- id="path3638"
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- inkscape:connector-curvature="0" />
- </clipPath>
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3660"
- id="linearGradient3666"
- x1="1255.7386"
- y1="667.09216"
- x2="893.69995"
- y2="858.01099"
- gradientUnits="userSpaceOnUse" />
- <filter
- inkscape:collect="always"
- id="filter3779"
- x="-0.087980822"
- width="1.1759616"
- y="-0.17728332"
- height="1.3545666">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="16.340344"
- id="feGaussianBlur3781" />
- </filter>
- <filter
- id="filter3785"
- inkscape:label="White Fur">
- <feTurbulence
- id="feTurbulence3787"
- type="fractalNoise"
- baseFrequency="0.24044943820224721"
- numOctaves="10"
- seed="655"
- result="result0" />
- <feDisplacementMap
- id="feDisplacementMap3789"
- in="SourceGraphic"
- in2="result0"
- scale="62"
- xChannelSelector="B"
- yChannelSelector="G" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter3677">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="2.0397518"
- id="feGaussianBlur3679" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3722">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 709.28572,844.50504 c 54.28571,-1.42857 126.035,-15.05199 170,-26.78572 44.05271,-11.75714 125.88628,-36.34724 175.35708,-57.85714 49.3393,-21.45272 113.6037,-59.2816 154.2858,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7143,-33.57143 8.3691,22.36779 -16.407,56.32562 -37.8572,81.07143 -21.6041,24.9234 -52.7313,52.70533 -98.9286,89.28571 C 1086.6598,841.08542 976.77458,906.08967 920,933.07647 c -57.06606,27.12536 -128.20334,58.23842 -172.14286,72.50003 -43.93952,14.2616 -131.42857,31.0714 -131.42857,31.0714 l 92.85715,-192.14286 z"
- id="path3724"
- sodipodi:nodetypes="czzzzzzzzcc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3986">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 709.28572,844.50504 c 54.28571,-1.42857 126.035,-15.05199 170,-26.78572 44.05271,-11.75714 125.88628,-36.34724 175.35708,-57.85714 49.3393,-21.45272 113.6037,-59.2816 154.2858,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7143,-33.57143 8.3691,22.36779 -16.407,56.32562 -37.8572,81.07143 -21.6041,24.9234 -52.7313,52.70533 -98.9286,89.28571 C 1086.6598,841.08542 976.77458,906.08967 920,933.07647 c -57.06606,27.12536 -128.20334,58.23842 -172.14286,72.50003 -43.93952,14.2616 -131.42857,31.0714 -131.42857,31.0714 l 92.85715,-192.14286 z"
- id="path3988"
- sodipodi:nodetypes="czzzzzzzzcc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3992">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 709.28572,844.50504 c 54.28571,-1.42857 126.035,-15.05199 170,-26.78572 44.05271,-11.75714 125.88628,-36.34724 175.35708,-57.85714 49.3393,-21.45272 113.6037,-59.2816 154.2858,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7143,-33.57143 8.3691,22.36779 -16.407,56.32562 -37.8572,81.07143 -21.6041,24.9234 -52.7313,52.70533 -98.9286,89.28571 C 1086.6598,841.08542 976.77458,906.08967 920,933.07647 c -57.06606,27.12536 -128.20334,58.23842 -172.14286,72.50003 -43.93952,14.2616 -131.42857,31.0714 -131.42857,31.0714 l 92.85715,-192.14286 z"
- id="path3994"
- sodipodi:nodetypes="czzzzzzzzcc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3998">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 178.21428,274.14789 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.55405 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401287 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.3574 -122.78647,50.053 -187.06988,59.0023 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.1982 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
- id="path4000"
- sodipodi:nodetypes="cscccccccccccc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter4002"
- x="-0.24334238"
- width="1.4866848"
- y="-0.39104807"
- height="1.7820961">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="14.589518"
- id="feGaussianBlur4004" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter4010"
- x="-0.14577261"
- width="1.2915452"
- y="-0.23523259"
- height="1.4704652">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="4.4442907"
- id="feGaussianBlur4012" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter4053">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.6062947"
- id="feGaussianBlur4055" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter4079">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="6.5887624"
- id="feGaussianBlur4081" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter4083">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.5052066"
- id="feGaussianBlur4085" />
- </filter>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4113"
- id="radialGradient4119"
- cx="296.33783"
- cy="427.17749"
- fx="296.33783"
- fy="427.17749"
- r="19.704132"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(2.9797125,0,0,2.9797125,-599.28727,-827.0855)" />
- <filter
- inkscape:collect="always"
- id="filter6949"
- x="-0.10294895"
- width="1.2058979"
- y="-0.34224695"
- height="1.6844939">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6951" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6953"
- x="-0.098320946"
- width="1.1966419"
- y="-0.19750816"
- height="1.3950163">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6955" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6957"
- x="-0.098213427"
- width="1.1964267"
- y="-0.19838208"
- height="1.3967642">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6959" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6961"
- x="-0.09919104"
- width="1.1983821"
- y="-0.22643611"
- height="1.4528722">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6963" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6965"
- x="-0.099081434"
- width="1.1981629"
- y="-0.22529824"
- height="1.4505965">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6967" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6969"
- x="-0.10450897"
- width="1.2090179"
- y="-0.40468886"
- height="1.8093777">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6971" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6973"
- x="-0.10330495"
- width="1.2066098"
- y="-0.36439717"
- height="1.7287945">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6975" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6977"
- x="-0.10224481"
- width="1.2044896"
- y="-0.32371372"
- height="1.6474274">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6979" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6981"
- x="-0.10052545"
- width="1.2010509"
- y="-0.2742162"
- height="1.5484324">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6983" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6985"
- x="-0.098428868"
- width="1.1968577"
- y="-0.20853186"
- height="1.4170637">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6987" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6989"
- x="-0.098428868"
- width="1.1968577"
- y="-0.20287035"
- height="1.4057407">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6991" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6993"
- x="-0.098213255"
- width="1.1964265"
- y="-0.19838208"
- height="1.3967642">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6995" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6997">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6999" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7001">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur7003" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7285"
- x="-0.030884685"
- width="1.0617694"
- y="-0.10267408"
- height="1.2053483">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7287" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7289">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7291" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7293">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7295" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7297">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7299" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7301">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7303" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7305">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7307" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7309">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7311" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7313">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7315" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7317">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7319" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7321">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7323" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7325"
- x="-0.031352691"
- width="1.0627054"
- y="-0.12140666"
- height="1.2428133">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7327" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7329"
- x="-0.030991485"
- width="1.061983"
- y="-0.10931916"
- height="1.2186383">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7331" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7333">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7335" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7337">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7339" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7345">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.7233839"
- id="feGaussianBlur7347" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath7421">
- <path
- sodipodi:type="inkscape:offset"
- inkscape:radius="0"
- inkscape:original="M 1111.4062 -285.9375 L 1107.4688 -284.0625 C 1107.4283 -284.05228 1107.3692 -284.04201 1107.3438 -284.03125 C 1106.925 -283.8184 1107.1791 -283.93067 1106.6875 -283.71875 C 1106.2014 -283.50919 1104.9499 -283.13456 1102.5938 -282.25 C 1099.2626 -280.99942 1096.7895 -280.10016 1095.5938 -279.1875 C 1094.0576 -279.16623 1091.8733 -278.95419 1089.9375 -278.46875 C 1086.956 -277.72108 1085.0823 -277.29474 1083.1875 -276.875 C 1081.2927 -276.45527 1081.512 -276.23281 1080.3125 -276 C 1079.0159 -275.74833 1078.5911 -276.00899 1074.875 -275.21875 C 1071.3851 -274.4766 1065.9802 -273.28768 1064.7188 -272.53125 C 1063.1348 -272.71203 1060.8513 -272.85303 1058.875 -272.5625 C 1055.8346 -272.11554 1053.9588 -271.88974 1052.0312 -271.65625 C 1051.3758 -271.57687 1050.9902 -271.45547 1050.6875 -271.375 C 1050.2613 -271.24334 1050.0017 -271.11498 1049.3125 -271.03125 C 1048.0009 -270.87188 1047.5503 -271.18808 1043.7812 -270.75 C 1040.2273 -270.33691 1034.7758 -269.47718 1033.5312 -268.8125 C 1031.9322 -269.10979 1029.6735 -269.34669 1027.6875 -269.15625 C 1024.6287 -268.86293 1022.7155 -268.67226 1020.7812 -268.5 C 1018.847 -268.32773 1019.0926 -268.07763 1017.875 -267.96875 C 1016.5588 -267.85105 1016.1152 -268.13238 1012.3438 -267.71875 C 1008.8017 -267.3303 1003.3359 -266.50948 1002.0625 -265.84375 C 1000.4636 -266.13844 998.1753 -266.35076 996.1875 -266.15625 C 993.12921 -265.857 991.2463 -265.67601 989.3125 -265.5 C 988.65501 -265.44015 988.27245 -265.32144 987.96875 -265.25 C 987.54105 -265.13104 987.28525 -265.03193 986.59375 -264.96875 C 985.27775 -264.84849 984.834 -265.16363 981.0625 -264.75 C 977.50631 -264.35998 972.0569 -263.51084 970.8125 -262.84375 C 969.21381 -263.13793 966.95265 -263.36747 964.96875 -263.15625 C 961.91305 -262.83092 959.9947 -262.63001 958.0625 -262.4375 C 956.13031 -262.24499 956.37275 -261.99662 955.15625 -261.875 C 953.84137 -261.74353 953.3932 -262.03954 949.625 -261.59375 C 946.08611 -261.17509 940.6473 -260.30158 939.375 -259.625 C 937.77741 -259.90604 935.51505 -260.04543 933.53125 -259.8125 C 930.47927 -259.45413 928.58625 -259.24464 926.65625 -259.03125 C 926.00007 -258.95869 925.6156 -258.85856 925.3125 -258.78125 C 924.88571 -258.65402 924.6276 -258.51405 923.9375 -258.4375 C 922.62411 -258.29181 922.17015 -258.61152 918.40625 -258.125 C 914.85737 -257.66624 909.4276 -256.70598 908.1875 -256 C 906.59441 -256.24424 904.3537 -256.38135 902.375 -256.125 C 899.32741 -255.73018 897.4243 -255.47655 895.5 -255.21875 C 893.57571 -254.96096 893.7739 -254.72522 892.5625 -254.5625 C 891.25301 -254.3866 890.8153 -254.66688 887.0625 -254.09375 C 883.53821 -253.55551 878.1393 -252.39458 876.875 -251.65625 C 875.28751 -251.85979 873.0295 -251.91098 871.0625 -251.5625 C 868.03631 -251.02638 866.1636 -250.70081 864.25 -250.375 C 863.59941 -250.26423 863.2363 -250.10406 862.9375 -250 C 862.51681 -249.83512 862.27405 -249.6687 861.59375 -249.53125 C 860.29905 -249.26966 859.86665 -249.53745 856.15625 -248.71875 C 852.65777 -247.9468 847.31035 -246.33582 846.09375 -245.5 C 844.53085 -245.57745 842.33625 -245.41472 840.40625 -244.90625 C 837.43387 -244.12312 835.58855 -243.67416 833.71875 -243.15625 C 831.84875 -242.63835 832.0521 -242.38897 830.875 -242.0625 C 829.60251 -241.7096 829.17795 -241.95541 825.53125 -240.875 C 822.10657 -239.86037 816.88185 -237.94183 815.65625 -237.03125 C 814.11747 -237.01851 811.93645 -236.75903 810.03125 -236.15625 C 807.10027 -235.22891 805.2809 -234.69783 803.4375 -234.09375 C 802.81071 -233.88837 802.44585 -233.70117 802.15625 -233.5625 C 801.74867 -233.34889 801.50295 -233.15375 800.84375 -232.9375 C 799.58925 -232.52596 799.1576 -232.74846 795.5625 -231.5 C 792.17261 -230.32283 786.96755 -228.2863 785.78125 -227.34375 C 784.25737 -227.28408 782.1312 -226.94888 780.25 -226.28125 C 777.35261 -225.25296 775.55095 -224.60577 773.71875 -223.96875 C 771.88655 -223.33174 772.0909 -223.12021 770.9375 -222.71875 C 769.69071 -222.28479 769.27395 -222.51903 765.71875 -221.15625 C 762.38005 -219.87645 757.23165 -217.6737 756.03125 -216.6875 C 754.52407 -216.57981 752.39555 -216.1887 750.53125 -215.46875 C 747.66307 -214.36115 745.90735 -213.68719 744.09375 -213 C 743.47705 -212.76637 743.0973 -212.55797 742.8125 -212.40625 C 742.81251 -212.40625 742.8125 -212.37673 742.8125 -212.375 L 734.8125 -209.1875 L 736.625 -194.46875 C 736.36701 -194.52956 742.8125 -191.15625 742.8125 -191.15625 C 743.03891 -191.30093 743.26145 -191.42886 743.53125 -191.53125 C 744.61177 -191.94123 745.70285 -191.74702 749.53125 -193.21875 C 753.35977 -194.69049 754.7553 -195.22373 755.4375 -195.625 C 756.11711 -196.02478 757.04925 -196.50437 757.65625 -197.15625 C 759.48317 -197.294 761.22705 -197.64948 762.59375 -198.15625 C 765.56175 -199.25677 767.4691 -199.96244 769.375 -200.625 C 771.28081 -201.28754 771.72915 -202.03987 772.78125 -202.40625 C 773.87287 -202.78636 774.97635 -202.57163 778.84375 -203.9375 C 782.71115 -205.30336 784.1269 -205.76458 784.8125 -206.15625 C 785.51361 -206.55677 786.5133 -207.08923 787.125 -207.75 C 789.09581 -207.80466 790.94195 -208.13463 792.40625 -208.625 C 795.40777 -209.63008 797.3324 -210.24671 799.25 -210.875 C 800.78861 -211.3791 801.42415 -211.92177 802.15625 -212.3125 C 802.38647 -212.44681 802.63215 -212.56623 802.90625 -212.65625 C 804.00457 -213.01673 805.0877 -212.73762 809 -213.96875 C 812.91231 -215.19988 814.366 -215.6417 815.0625 -216 C 815.75641 -216.35697 816.6926 -216.79261 817.3125 -217.40625 C 819.17771 -217.42891 820.94835 -217.67308 822.34375 -218.09375 C 825.37415 -219.00729 827.33615 -219.52385 829.28125 -220.0625 C 831.22637 -220.60114 831.70745 -221.32702 832.78125 -221.625 C 833.89527 -221.93415 835.00125 -221.61761 838.96875 -222.65625 C 842.93625 -223.69488 844.38625 -224.08898 845.09375 -224.40625 C 845.82855 -224.73584 846.90765 -225.15997 847.53125 -225.78125 C 849.52907 -225.66525 851.3887 -225.80134 852.875 -226.15625 C 855.95311 -226.89125 857.9584 -227.25719 859.9375 -227.65625 C 861.52541 -227.97643 862.1818 -228.4468 862.9375 -228.75 C 863.17501 -228.8568 863.4044 -228.94276 863.6875 -229 C 864.82091 -229.22919 865.99215 -228.79107 870.03125 -229.5 C 874.07067 -230.20893 875.5315 -230.42709 876.25 -230.6875 C 876.96581 -230.94694 877.95435 -231.25474 878.59375 -231.78125 C 880.51795 -231.54176 882.34165 -231.55672 883.78125 -231.78125 C 886.90767 -232.26887 888.9358 -232.48192 890.9375 -232.75 C 892.93921 -233.01807 893.42625 -233.69514 894.53125 -233.84375 C 895.67767 -233.99793 896.8071 -233.54218 900.875 -234.0625 C 904.94281 -234.58282 906.43525 -234.75823 907.15625 -235 C 907.89337 -235.24714 908.95435 -235.58623 909.59375 -236.125 C 911.64375 -235.78947 913.56745 -235.72704 915.09375 -235.90625 C 918.23595 -236.27521 920.27375 -236.46561 922.28125 -236.6875 C 923.89207 -236.86552 924.5459 -237.2957 925.3125 -237.53125 C 925.55341 -237.61677 925.80655 -237.68685 926.09375 -237.71875 C 927.24345 -237.84647 928.39505 -237.3721 932.46875 -237.84375 C 936.54245 -238.3154 938.0278 -238.45435 938.75 -238.6875 C 939.46941 -238.91977 940.45025 -239.16096 941.09375 -239.65625 C 943.03005 -239.32279 944.8638 -239.25201 946.3125 -239.40625 C 949.45851 -239.7412 951.49 -239.92484 953.5 -240.125 C 955.50991 -240.32514 955.98415 -240.95139 957.09375 -241.0625 C 958.24485 -241.17778 959.39025 -240.69744 963.46875 -241.125 C 967.54725 -241.55256 969.05765 -241.68709 969.78125 -241.90625 C 970.52047 -242.13011 971.57685 -242.4195 972.21875 -242.9375 C 974.27575 -242.53883 976.2206 -242.4441 977.75 -242.59375 C 980.89871 -242.90185 982.9258 -243.067 984.9375 -243.25 C 986.55151 -243.39682 987.20055 -243.81055 987.96875 -244.03125 C 988.21005 -244.11211 988.4623 -244.16116 988.75 -244.1875 C 989.90211 -244.29295 991.0429 -243.79475 995.125 -244.1875 C 999.20711 -244.58025 1000.7139 -244.71834 1001.4375 -244.9375 C 1002.1584 -245.15583 1003.1371 -245.3852 1003.7812 -245.875 C 1005.7193 -245.52501 1007.5501 -245.42062 1009 -245.5625 C 1012.1487 -245.8706 1014.1758 -246.03575 1016.1875 -246.21875 C 1018.1991 -246.40174 1018.7017 -247.05677 1019.8125 -247.15625 C 1020.9648 -247.25948 1022.1047 -246.77142 1026.1875 -247.15625 C 1030.2704 -247.54107 1031.7762 -247.65725 1032.5 -247.875 C 1033.2393 -248.09743 1034.2956 -248.38949 1034.9375 -248.90625 C 1036.9949 -248.50448 1038.9404 -248.40292 1040.4688 -248.5625 C 1043.6153 -248.89102 1045.6458 -249.0852 1047.6562 -249.28125 C 1049.2692 -249.43854 1049.9219 -249.91273 1050.6875 -250.15625 C 1050.9282 -250.24429 1051.1507 -250.27762 1051.4375 -250.3125 C 1052.5858 -250.4522 1053.7542 -249.97259 1057.8125 -250.5625 C 1061.8708 -251.15242 1063.3743 -251.33964 1064.0938 -251.59375 C 1064.8104 -251.84691 1065.7684 -252.15182 1066.4062 -252.6875 C 1068.3259 -252.47556 1070.1262 -252.53609 1071.5625 -252.78125 C 1074.6816 -253.31365 1076.6741 -253.70986 1078.6562 -254.09375 C 1080.6383 -254.47762 1081.1305 -255.1334 1082.2188 -255.375 C 1083.3475 -255.62566 1084.489 -255.25871 1088.4688 -256.25 C 1092.4483 -257.24127 1093.8983 -257.6693 1094.5938 -258.03125 C 1095.316 -258.40725 1096.3555 -258.90183 1096.9688 -259.5625 C 1098.9317 -259.57454 1100.7625 -259.85355 1102.1875 -260.40625 C 1105.1387 -261.55085 1107.0607 -262.27567 1108.875 -263.15625 C 1110.3307 -263.86277 1111.1941 -264.85828 1111.4062 -265.15625 C 1111.6185 -265.4542 1111.5051 -265.8848 1111.5312 -265.90625 C 1111.5742 -265.94148 1111.8716 -266.00028 1112.0312 -266.34375 C 1112.8902 -268.19082 1114.3544 -271.97139 1114.4688 -272.65625 C 1114.5825 -273.33839 1114.6368 -274.00902 1114.6875 -274.40625 C 1114.7169 -274.63575 1114.5404 -275.28515 1114.5625 -275.34375 C 1114.5934 -275.42579 1114.8508 -275.59432 1114.9062 -275.84375 C 1115.1725 -277.04206 1114.9953 -278.05111 1114.7812 -279.46875 C 1114.5673 -280.88638 1113.8096 -284.08338 1113.1562 -284.9375 C 1112.4973 -285.79922 1111.9314 -285.94801 1111.4062 -285.9375 z "
- style="display:inline;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- id="path7423"
- d="m 1111.4062,-285.9375 -3.9374,1.875 c -0.041,0.0102 -0.1,0.0205 -0.125,0.0312 -0.4188,0.21285 -0.1647,0.10058 -0.6563,0.3125 -0.4861,0.20956 -1.7376,0.58419 -4.0937,1.46875 -3.3312,1.25058 -5.8043,2.14984 -7,3.0625 -1.5362,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74767 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41973 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25167 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74215 -8.8948,1.93107 -10.1562,2.6875 -1.584,-0.18078 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44696 -4.9162,0.67276 -6.8438,0.90625 -0.6554,0.0794 -1.041,0.20078 -1.3437,0.28125 -0.4262,0.13166 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15937 -1.7622,-0.15683 -5.5313,0.28125 -3.5539,0.41309 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.29729 -3.8577,-0.53419 -5.8437,-0.34375 -3.0588,0.29332 -4.972,0.48399 -6.9063,0.65625 -1.9342,0.17227 -1.6886,0.42237 -2.9062,0.53125 -1.3162,0.1177 -1.7598,-0.16363 -5.5312,0.25 -3.5421,0.38845 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.29469 -3.8872,-0.50701 -5.875,-0.3125 -3.05829,0.29925 -4.9412,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04005,0.17856 -1.34375,0.25 -0.4277,0.11896 -0.6835,0.21807 -1.375,0.28125 -1.316,0.12026 -1.75975,-0.19488 -5.53125,0.21875 -3.55619,0.39002 -9.0056,1.23916 -10.25,1.90625 -1.59869,-0.29418 -3.85985,-0.52372 -5.84375,-0.3125 -3.0557,0.32533 -4.97405,0.52624 -6.90625,0.71875 -1.93219,0.19251 -1.68975,0.44088 -2.90625,0.5625 -1.31488,0.13147 -1.76305,-0.16454 -5.53125,0.28125 -3.53889,0.41866 -8.9777,1.29217 -10.25,1.96875 -1.59759,-0.28104 -3.85995,-0.42043 -5.84375,-0.1875 -3.05198,0.35837 -4.945,0.56786 -6.875,0.78125 -0.65618,0.0726 -1.04065,0.17269 -1.34375,0.25 -0.42679,0.12723 -0.6849,0.2672 -1.375,0.34375 -1.31339,0.14569 -1.76735,-0.17402 -5.53125,0.3125 -3.54888,0.45876 -8.97865,1.41902 -10.21875,2.125 -1.59309,-0.24424 -3.8338,-0.38135 -5.8125,-0.125 -3.04759,0.39482 -4.9507,0.64845 -6.875,0.90625 -1.92429,0.25779 -1.7261,0.49353 -2.9375,0.65625 -1.30949,0.1759 -1.7472,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.9232,1.69917 -10.1875,2.4375 -1.58749,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02619,0.53612 -4.8989,0.86169 -6.8125,1.1875 -0.65059,0.11077 -1.0137,0.27094 -1.3125,0.375 -0.42069,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.2947,0.26159 -1.7271,-0.006 -5.4375,0.8125 -3.49848,0.77195 -8.8459,2.38293 -10.0625,3.21875 -1.5629,-0.0774 -3.7575,0.0853 -5.6875,0.59375 -2.97238,0.78313 -4.8177,1.23209 -6.6875,1.75 -1.87,0.5179 -1.66665,0.76728 -2.84375,1.09375 -1.27249,0.3529 -1.69705,0.10709 -5.34375,1.1875 -3.42468,1.01463 -8.6494,2.93317 -9.875,3.84375 -1.53878,0.0127 -3.7198,0.27222 -5.625,0.875 -2.93098,0.92734 -4.75035,1.45842 -6.59375,2.0625 -0.62679,0.20538 -0.99165,0.39258 -1.28125,0.53125 -0.40758,0.21361 -0.6533,0.40875 -1.3125,0.625 -1.2545,0.41154 -1.68615,0.18904 -5.28125,1.4375 -3.38989,1.17717 -8.59495,3.2137 -9.78125,4.15625 -1.52388,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69905,1.67548 -6.53125,2.3125 -1.8322,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24679,0.43396 -1.66355,0.19972 -5.21875,1.5625 -3.3387,1.2798 -8.4871,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.6357,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6239,1.78156 -6.4375,2.46875 -0.6167,0.23363 -0.99645,0.44203 -1.28125,0.59375 10e-6,0 0,0.0295 0,0.0312 l -8,3.1875 1.8125,14.71875 c -0.25799,-0.0608 6.1875,3.3125 6.1875,3.3125 0.22641,-0.14468 0.44895,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.1716,-0.21577 6,-1.6875 3.82852,-1.47174 5.22405,-2.00498 5.90625,-2.40625 0.67961,-0.39978 1.61175,-0.87937 2.21875,-1.53125 1.82692,-0.13775 3.5708,-0.49323 4.9375,-1 2.968,-1.10052 4.87535,-1.80619 6.78125,-2.46875 1.90581,-0.66254 2.35415,-1.41487 3.40625,-1.78125 1.09162,-0.38011 2.1951,-0.16538 6.0625,-1.53125 3.8674,-1.36586 5.28315,-1.82708 5.96875,-2.21875 0.70111,-0.40052 1.7008,-0.93298 2.3125,-1.59375 1.97081,-0.0547 3.81695,-0.38463 5.28125,-0.875 3.00152,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.53861,-0.5041 2.17415,-1.04677 2.90625,-1.4375 0.23022,-0.13431 0.4759,-0.25373 0.75,-0.34375 1.09832,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91231,-1.23113 5.366,-1.67295 6.0625,-2.03125 0.69391,-0.35697 1.6301,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63585,-0.26683 5.03125,-0.6875 3.0304,-0.91354 4.9924,-1.4301 6.9375,-1.96875 1.94512,-0.53864 2.4262,-1.26452 3.5,-1.5625 1.11402,-0.30915 2.22,0.007 6.1875,-1.03125 3.9675,-1.03863 5.4175,-1.43273 6.125,-1.75 0.7348,-0.32959 1.8139,-0.75372 2.4375,-1.375 1.99782,0.116 3.85745,-0.0201 5.34375,-0.375 3.07811,-0.735 5.0834,-1.10094 7.0625,-1.5 1.58791,-0.32018 2.2443,-0.79055 3,-1.09375 0.23751,-0.1068 0.4669,-0.19276 0.75,-0.25 1.13341,-0.22919 2.30465,0.20893 6.34375,-0.5 4.03942,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71581,-0.25944 1.70435,-0.56724 2.34375,-1.09375 1.9242,0.23949 3.7479,0.22453 5.1875,0 3.12642,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48875,-0.94514 3.59375,-1.09375 1.14642,-0.15418 2.27585,0.30157 6.34375,-0.21875 4.06781,-0.52032 5.56025,-0.69573 6.28125,-0.9375 0.73712,-0.24714 1.7981,-0.58623 2.4375,-1.125 2.05,0.33553 3.9737,0.39796 5.5,0.21875 3.1422,-0.36896 5.18,-0.55936 7.1875,-0.78125 1.61082,-0.17802 2.26465,-0.6082 3.03125,-0.84375 0.24091,-0.0855 0.49405,-0.1556 0.78125,-0.1875 1.1497,-0.12772 2.3013,0.34665 6.375,-0.125 4.0737,-0.47165 5.55905,-0.6106 6.28125,-0.84375 0.71941,-0.23227 1.70025,-0.47346 2.34375,-0.96875 1.9363,0.33346 3.77005,0.40424 5.21875,0.25 3.14601,-0.33495 5.1775,-0.51859 7.1875,-0.71875 2.00991,-0.20014 2.48415,-0.82639 3.59375,-0.9375 1.1511,-0.11528 2.2965,0.36506 6.375,-0.0625 4.0785,-0.42756 5.5889,-0.56209 6.3125,-0.78125 0.73922,-0.22386 1.7956,-0.51325 2.4375,-1.03125 2.057,0.39867 4.00185,0.4934 5.53125,0.34375 3.14871,-0.3081 5.1758,-0.47325 7.1875,-0.65625 1.61401,-0.14682 2.26305,-0.56055 3.03125,-0.78125 0.2413,-0.0809 0.49355,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.2929,0.39275 6.375,0 4.08211,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6996,-0.4477 2.3437,-0.9375 1.9381,0.34999 3.7689,0.45438 5.2188,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1465,-0.32852 5.177,-0.5227 7.1874,-0.71875 1.613,-0.15729 2.2657,-0.63148 3.0313,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7166,-0.25316 1.6746,-0.55807 2.3124,-1.09375 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99127 5.4295,-1.4193 6.125,-1.78125 0.7222,-0.376 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.1446 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70652 2.3191,-1.70203 2.5312,-2 0.2123,-0.29795 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3404,-0.094 0.5,-0.4375 0.859,-1.84707 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68214 0.168,-1.35277 0.2187,-1.75 0.029,-0.2295 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19831 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41763 -0.9716,-4.61463 -1.625,-5.46875 -0.6589,-0.86172 -1.2248,-1.01051 -1.75,-1 z"
- transform="translate(0.08004571,-0.03125)" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter7578"
- x="-0.08160872"
- width="1.1632174"
- y="-0.22659944"
- height="1.4531989">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="2.437399"
- id="feGaussianBlur7580" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7594"
- x="-0.040804356"
- width="1.0816087"
- y="-0.11329972"
- height="1.2265995">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.2186995"
- id="feGaussianBlur7596" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath7606">
- <path
- id="path7608"
- d="m 1049.205,-282.26672 -0.09,0.008 c -1.3874,0.88445 -6.6033,1.6072 -6.629,9.52344 -0.024,7.42525 15.0129,17.09146 17.1563,18.09375 1.7302,0.80909 3.5916,1.40876 5.4063,1.71875 l 1.4374,0.21875 c 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99128 5.4294,-1.4193 6.125,-1.78125 0.7222,-0.37601 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3191,-1.70203 2.5312,-2 0.2123,-0.29796 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3405,-0.094 0.5,-0.4375 0.859,-1.84708 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68215 0.168,-1.35277 0.2187,-1.75 0.029,-0.22951 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19832 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41764 -0.9716,-4.61463 -1.625,-5.46875 -0.4194,-0.54857 -0.7993,-0.7925 -1.1562,-0.90625 -0.067,-0.0173 -0.1239,-0.0467 -0.1875,-0.0625 -0.021,-0.004 -0.042,0.003 -0.062,0 -0.3116,-0.0755 -0.6085,-0.15867 -1.1562,-0.21875 -0.9855,-0.10812 -2.4247,-0.2594 -3.9688,-0.25 -0.5147,0.003 -1.0371,0.0476 -1.5625,0.0937 -3.5589,0.31228 -9.0098,0.99108 -10.2187,1.625 -1.6331,-0.33402 -3.9482,-0.61223 -5.9376,-0.46875 -3.064,0.22097 -4.9677,0.34219 -6.9062,0.46875 -1.9384,0.12655 -1.6861,0.38864 -2.9062,0.46875 -1.3191,0.0866 -1.7869,-0.22325 -5.5626,0.0937 -3.5457,0.29772 -8.9806,0.99317 -10.2187,1.625 -1.6334,-0.33451 -3.9459,-0.61239 -5.9375,-0.46875 -3.0642,0.22098 -4.9678,0.37344 -6.9062,0.5 -0.6592,0.043 -1.0424,0.12393 -1.3438,0.1875 z"
- style="display:inline;opacity:0.82448976;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter7610"
- x="-0.021942979"
- width="1.0438859"
- y="-0.10017137"
- height="1.2003427">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.57530213"
- id="feGaussianBlur7612" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath7616">
- <path
- id="path7618"
- d="m 205.47016,-408.97318 -0.0901,0.002 c -1.44563,0.78566 -6.69921,1.14335 -7.27625,9.03857 -0.54134,7.40553 13.78595,18.09566 15.85433,19.24481 1.66964,0.92764 3.48475,1.65551 5.27345,2.09115 l 1.41867,0.31834 c 1.90027,0.34514 3.70042,0.41015 5.15031,0.26563 3.1486,-0.31384 5.16386,-0.57031 7.16789,-0.8152 2.00402,-0.24488 2.5407,-0.86478 3.64319,-1.02999 1.14342,-0.17143 2.25659,0.27414 6.29577,-0.43753 4.03888,-0.71169 5.51507,-1.03768 6.23419,-1.3503 0.74664,-0.32479 1.81806,-0.74575 2.47589,-1.3621 1.95897,0.12471 3.80476,-0.0261 5.2648,-0.47819 3.02376,-0.93627 4.99157,-1.52544 6.8628,-2.27751 1.50138,-0.60342 2.43202,-1.53636 2.66436,-1.81883 0.23254,-0.28245 0.14951,-0.71989 0.17694,-0.73948 0.0453,-0.0322 0.34622,-0.0701 0.52926,-0.40161 0.98557,-1.78276 2.70955,-5.45215 2.87137,-6.12738 0.16094,-0.67257 0.26182,-1.33778 0.34007,-1.73051 0.0453,-0.22691 -0.0855,-0.88701 -0.0594,-0.94393 0.0365,-0.0797 0.30505,-0.22988 0.37769,-0.47485 0.34913,-1.17686 0.24274,-2.19578 0.1278,-3.6249 -0.11463,-1.42909 -0.64781,-4.6711 -1.24013,-5.56865 -0.38017,-0.57646 -0.74215,-0.84625 -1.09026,-0.98459 -0.0657,-0.0219 -0.12035,-0.0553 -0.1827,-0.0754 -0.0207,-0.005 -0.0418,2.3e-4 -0.0623,-0.004 -0.30559,-0.097 -0.59597,-0.20067 -1.13816,-0.29875 -0.97557,-0.1765 -2.40074,-0.42766 -3.94175,-0.52584 -0.51366,-0.0327 -1.0379,-0.0247 -1.56523,-0.0153 -3.57201,0.0636 -9.05695,0.3611 -10.30707,0.90928 -1.60587,-0.44697 -3.89597,-0.88576 -5.89053,-0.8812 -3.07195,0.007 -4.97947,-0.005 -6.92207,-0.0134 -1.94251,-0.009 -1.70908,0.27025 -2.9318,0.26518 -1.32192,-0.005 -1.76701,-0.34717 -5.55562,-0.29393 -3.55782,0.05 -9.02796,0.36522 -10.30706,0.90927 -1.60614,-0.44747 -3.89367,-0.88575 -5.89043,-0.88118 -3.07215,0.007 -4.98175,0.0265 -6.92426,0.0177 -0.66059,-0.003 -1.0485,0.051 -1.35359,0.0934 z"
- style="display:inline;opacity:0.82448976;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient7622"
- id="linearGradient7708"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(-19.091883,4.2426407)"
- x1="774.97668"
- y1="-211.87105"
- x2="755.11584"
- y2="-202.67865" />
- <mask
- maskUnits="userSpaceOnUse"
- id="mask7704">
- <path
- style="fill:url(#linearGradient7708);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
- d="m 718.40812,-224.31217 33.25,56 275.99998,-24 159.5,-48.25 -66.5,-82.75 -402.24998,99 z"
- id="path7706"
- inkscape:connector-curvature="0" />
- </mask>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient8430"
- id="radialGradient7904"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(-0.3324832,0.9022288,-0.9582407,-0.3531242,305.29227,19.909497)"
- cx="142.95833"
- cy="107.09234"
- fx="142.95833"
- fy="107.09234"
- r="66.981766" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3317"
- id="radialGradient7906"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(1.0036478,-1.0345492e-7,1.7124628e-7,1.6613125,-160.53487,-96.205369)"
- cx="317.78754"
- cy="129.65378"
- fx="317.78754"
- fy="129.65378"
- r="47.863216" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient8398"
- id="radialGradient7908"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(2.0747661,-0.1577957,0.2382425,3.1325183,-550.77432,-65.728909)"
- cx="325.30847"
- cy="80.909554"
- fx="325.30847"
- fy="80.909554"
- r="26.937988" />
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8209">
- <path
- sodipodi:nodetypes="czcc"
- id="path8211"
- d="m 734.03125,519.49186 c 0,0 16.75513,37.01806 28.70141,53.95395 11.94629,16.93589 52.72716,56.04605 52.72716,56.04605 l 0.59717,-138.58975"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#1a1a1a;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8225">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="10.661912"
- id="feGaussianBlur8227" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter8333">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="7.18"
- id="feGaussianBlur8335" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8338">
- <path
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- id="path8340"
- d="m 266.27183,924.57185 c -1.40727,18.80122 -1.1449,32.75104 2.08174,49.30328 3.22666,16.55238 16.40609,45.90737 20.33441,63.18377 3.92621,17.2671 2.69413,38.3097 -12.45944,51.1482 -15.31761,12.9775 -42.05127,21.599 -67.8323,15.7338 -25.78106,-5.8653 -69.54908,-49.2234 -88.59019,-70.2283 C 100.6939,1012.6293 56.045182,939.86193 41.867507,909.4368 27.689835,879.01168 29.207902,872.71823 33.747792,863.90708 24.38107,839.38658 21.33408,813.84026 0.03533448,788.33044 30.360814,791.44487 43.915624,815.28676 60.161024,835.47019 54.631128,787.39416 42.106309,771.05368 31.787072,744.74589 c 29.994295,6.08165 50.57936,31.87239 63.979783,72.7125 9.554155,-3.91792 18.237765,-9.37294 30.187415,-9.0612 -11.2975,-41.6958 -17.94947,-69.91585 -36.687256,-101.06994 53.441966,5.67032 83.657026,80.63932 78.971426,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24897,-38.34703 -21.04782,-76.8679 -3.65971,-118.64819 0,0 48.28678,65.43688 54.38965,85.80578 6.10288,20.3689 1.51882,38.70051 1.51882,38.70051 0,0 16.95957,31.0853 20.29392,51.09414 3.3731,20.24134 -3.53269,59.10331 -4.94582,77.98323 z"
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8354">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="6.82"
- id="feGaussianBlur8356" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8359">
- <path
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- id="path8361"
- d="m 266.27183,924.57185 c -1.40727,18.80122 -1.1449,32.75104 2.08174,49.30328 3.22666,16.55238 16.40609,45.90737 20.33441,63.18377 3.92621,17.2671 2.69413,38.3097 -12.45944,51.1482 -15.31761,12.9775 -42.05127,21.599 -67.8323,15.7338 -25.78106,-5.8653 -69.54908,-49.2234 -88.59019,-70.2283 C 100.6939,1012.6293 56.045182,939.86193 41.867507,909.4368 27.689835,879.01168 29.207902,872.71823 33.747792,863.90708 24.38107,839.38658 21.33408,813.84026 0.03533448,788.33044 30.360814,791.44487 43.915624,815.28676 60.161024,835.47019 54.631128,787.39416 42.106309,771.05368 31.787072,744.74589 c 29.994295,6.08165 50.57936,31.87239 63.979783,72.7125 9.554155,-3.91792 18.237765,-9.37294 30.187415,-9.0612 -11.2975,-41.6958 -17.94947,-69.91585 -36.687256,-101.06994 53.441966,5.67032 83.657026,80.63932 78.971426,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24897,-38.34703 -21.04782,-76.8679 -3.65971,-118.64819 0,0 48.28678,65.43688 54.38965,85.80578 6.10288,20.3689 1.51882,38.70051 1.51882,38.70051 0,0 16.95957,31.0853 20.29392,51.09414 3.3731,20.24134 -3.53269,59.10331 -4.94582,77.98323 z"
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8379"
- x="-0.14413793"
- width="1.288276"
- y="-0.10278689"
- height="1.2055738">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="7.389266"
- id="feGaussianBlur8381" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8392">
- <path
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- id="path8394"
- d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8404"
- x="-0.090268657"
- width="1.1805373"
- y="-0.10250848"
- height="1.205017">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="5.3457272"
- id="feGaussianBlur8406" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8417">
- <path
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- id="path8419"
- d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- x="-0.084654994"
- width="1.16931"
- y="-0.36592469"
- height="1.7318494"
- id="filter11361-3">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="4.5740586"
- id="feGaussianBlur11363-6" />
- </filter>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient8430"
- id="radialGradient7904-7"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(-0.3324832,0.9022288,-0.9582407,-0.3531242,305.29227,19.909497)"
- cx="142.95833"
- cy="107.09234"
- fx="142.95833"
- fy="107.09234"
- r="66.981766" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3317"
- id="radialGradient7906-6"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(1.0036478,-1.0345492e-7,1.7124628e-7,1.6613125,-160.53487,-96.205369)"
- cx="317.78754"
- cy="129.65378"
- fx="317.78754"
- fy="129.65378"
- r="47.863216" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient8398"
- id="radialGradient7908-0"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(2.0747661,-0.1577957,0.2382425,3.1325183,-550.77432,-65.728909)"
- cx="325.30847"
- cy="80.909554"
- fx="325.30847"
- fy="80.909554"
- r="26.937988" />
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8658-06">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0b0b0b;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 569.03125,1018.7776 c -4.28571,0.7143 -27.62815,3.6181 -57.85714,10 -30.22899,6.3819 -99.77493,25.9619 -142.85715,35.7143 -43.08222,9.7524 -117.26443,34.816 -156.91262,27.2654 -39.64818,-7.5506 -89.51595,-64.4083 -89.51595,-64.4083 l 4.28572,-94.28571 c 0,0 85.88551,-16.20094 112.14285,-33.57143 26.25735,-17.37049 45.58238,-49.66598 59.28572,-71.42857 13.70334,-21.76259 32.85714,-71.42858 32.85714,-71.42858 l 238.57143,262.14289 z"
- id="path8660-2"
- sodipodi:nodetypes="czzzcczzcc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8888-6"
- x="-0.2112188"
- width="1.4224375"
- y="-0.16808605"
- height="1.3361721">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="8.3693583"
- id="feGaussianBlur8890-1" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath2833-2">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#292929;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 569.03125,1018.7776 c -4.28571,0.7143 -27.62815,3.6181 -57.85714,10 -30.22899,6.3819 -57.31395,4.9661 -135.78608,17.3296 -79.85178,12.5808 -94.06436,42.5423 -108.12225,47.0643 -14.70014,4.7286 -145.37739,-65.8225 -145.37739,-65.8225 l 4.28572,-94.28571 c 0,0 85.88551,-16.20094 112.14285,-33.57143 26.25735,-17.37049 45.58238,-49.66598 59.28572,-71.42857 13.70334,-21.76259 32.85714,-71.42858 32.85714,-71.42858 l 238.57143,262.14289 z"
- id="path2835-3"
- sodipodi:nodetypes="czzzcczzcc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8892-7"
- x="-0.18692794"
- width="1.3738559"
- y="-0.23646873"
- height="1.4729375">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="31.21228"
- id="feGaussianBlur8894-5" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3665-9">
- <path
- sodipodi:nodetypes="czzcczcc"
- id="path3667-2"
- d="m 366.88839,504.13471 c 0,0 -29.55406,40.57305 -47.85714,74.28571 -18.30309,33.71267 -58.62109,126.35694 -70.35714,171.07143 -11.7594,44.80344 -62.5,123.57144 -62.5,123.57144 l 76.07143,18.21428 c 0,0 11.80712,-12.82335 31.07142,-46.07143 19.2643,-33.24808 60.35715,-138.57143 60.35715,-138.57143 l 13.21428,-202.5 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0f0f0f;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8856-2"
- x="-0.3253231"
- width="1.6506462"
- y="-0.19013336"
- height="1.3802667">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="28.712591"
- id="feGaussianBlur8858-8" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8642-9">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0f0f0f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 366.88839,504.13471 c 0,0 -29.55406,40.57305 -47.85714,74.28571 -18.30309,33.71267 -58.62109,126.35694 -70.35714,171.07143 -11.7594,44.80344 -62.5,123.57144 -62.5,123.57144 l 76.07143,18.21428 c 0,0 11.80712,-12.82335 31.07142,-46.07143 19.2643,-33.24808 60.35715,-138.57143 60.35715,-138.57143 l 13.21428,-202.5 z"
- id="path8644-7"
- sodipodi:nodetypes="czzcczcc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8860-3"
- x="-0.38093024"
- width="1.7618605"
- y="-0.17518716"
- height="1.3503743">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="19.304015"
- id="feGaussianBlur8862-6" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath7616-1">
- <path
- id="path7618-2"
- d="m 205.47016,-408.97318 -0.0901,0.002 c -1.44563,0.78566 -6.69921,1.14335 -7.27625,9.03857 -0.54134,7.40553 13.78595,18.09566 15.85433,19.24481 1.66964,0.92764 3.48475,1.65551 5.27345,2.09115 l 1.41867,0.31834 c 1.90027,0.34514 3.70042,0.41015 5.15031,0.26563 3.1486,-0.31384 5.16386,-0.57031 7.16789,-0.8152 2.00402,-0.24488 2.5407,-0.86478 3.64319,-1.02999 1.14342,-0.17143 2.25659,0.27414 6.29577,-0.43753 4.03888,-0.71169 5.51507,-1.03768 6.23419,-1.3503 0.74664,-0.32479 1.81806,-0.74575 2.47589,-1.3621 1.95897,0.12471 3.80476,-0.0261 5.2648,-0.47819 3.02376,-0.93627 4.99157,-1.52544 6.8628,-2.27751 1.50138,-0.60342 2.43202,-1.53636 2.66436,-1.81883 0.23254,-0.28245 0.14951,-0.71989 0.17694,-0.73948 0.0453,-0.0322 0.34622,-0.0701 0.52926,-0.40161 0.98557,-1.78276 2.70955,-5.45215 2.87137,-6.12738 0.16094,-0.67257 0.26182,-1.33778 0.34007,-1.73051 0.0453,-0.22691 -0.0855,-0.88701 -0.0594,-0.94393 0.0365,-0.0797 0.30505,-0.22988 0.37769,-0.47485 0.34913,-1.17686 0.24274,-2.19578 0.1278,-3.6249 -0.11463,-1.42909 -0.64781,-4.6711 -1.24013,-5.56865 -0.38017,-0.57646 -0.74215,-0.84625 -1.09026,-0.98459 -0.0657,-0.0219 -0.12035,-0.0553 -0.1827,-0.0754 -0.0207,-0.005 -0.0418,2.3e-4 -0.0623,-0.004 -0.30559,-0.097 -0.59597,-0.20067 -1.13816,-0.29875 -0.97557,-0.1765 -2.40074,-0.42766 -3.94175,-0.52584 -0.51366,-0.0327 -1.0379,-0.0247 -1.56523,-0.0153 -3.57201,0.0636 -9.05695,0.3611 -10.30707,0.90928 -1.60587,-0.44697 -3.89597,-0.88576 -5.89053,-0.8812 -3.07195,0.007 -4.97947,-0.005 -6.92207,-0.0134 -1.94251,-0.009 -1.70908,0.27025 -2.9318,0.26518 -1.32192,-0.005 -1.76701,-0.34717 -5.55562,-0.29393 -3.55782,0.05 -9.02796,0.36522 -10.30706,0.90927 -1.60614,-0.44747 -3.89367,-0.88575 -5.89043,-0.88118 -3.07215,0.007 -4.98175,0.0265 -6.92426,0.0177 -0.66059,-0.003 -1.0485,0.051 -1.35359,0.0934 z"
- style="display:inline;opacity:0.82448976;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter7610-9"
- x="-0.021942979"
- width="1.0438859"
- y="-0.10017137"
- height="1.2003427">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.57530213"
- id="feGaussianBlur7612-3" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath7606-1">
- <path
- id="path7608-9"
- d="m 1049.205,-282.26672 -0.09,0.008 c -1.3874,0.88445 -6.6033,1.6072 -6.629,9.52344 -0.024,7.42525 15.0129,17.09146 17.1563,18.09375 1.7302,0.80909 3.5916,1.40876 5.4063,1.71875 l 1.4374,0.21875 c 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99128 5.4294,-1.4193 6.125,-1.78125 0.7222,-0.37601 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3191,-1.70203 2.5312,-2 0.2123,-0.29796 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3405,-0.094 0.5,-0.4375 0.859,-1.84708 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68215 0.168,-1.35277 0.2187,-1.75 0.029,-0.22951 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19832 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41764 -0.9716,-4.61463 -1.625,-5.46875 -0.4194,-0.54857 -0.7993,-0.7925 -1.1562,-0.90625 -0.067,-0.0173 -0.1239,-0.0467 -0.1875,-0.0625 -0.021,-0.004 -0.042,0.003 -0.062,0 -0.3116,-0.0755 -0.6085,-0.15867 -1.1562,-0.21875 -0.9855,-0.10812 -2.4247,-0.2594 -3.9688,-0.25 -0.5147,0.003 -1.0371,0.0476 -1.5625,0.0937 -3.5589,0.31228 -9.0098,0.99108 -10.2187,1.625 -1.6331,-0.33402 -3.9482,-0.61223 -5.9376,-0.46875 -3.064,0.22097 -4.9677,0.34219 -6.9062,0.46875 -1.9384,0.12655 -1.6861,0.38864 -2.9062,0.46875 -1.3191,0.0866 -1.7869,-0.22325 -5.5626,0.0937 -3.5457,0.29772 -8.9806,0.99317 -10.2187,1.625 -1.6334,-0.33451 -3.9459,-0.61239 -5.9375,-0.46875 -3.0642,0.22098 -4.9678,0.37344 -6.9062,0.5 -0.6592,0.043 -1.0424,0.12393 -1.3438,0.1875 z"
- style="display:inline;opacity:0.82448976;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter7578-4"
- x="-0.08160872"
- width="1.1632174"
- y="-0.22659944"
- height="1.4531989">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="2.437399"
- id="feGaussianBlur7580-7" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7594-8"
- x="-0.040804356"
- width="1.0816087"
- y="-0.11329972"
- height="1.2265995">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.2186995"
- id="feGaussianBlur7596-4" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8616-5">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
- id="path8618-0"
- sodipodi:nodetypes="cscccccccccccc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8940-3"
- x="-0.25152978"
- width="1.5030596"
- y="-0.053035267"
- height="1.1060705">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="13.024603"
- id="feGaussianBlur8942-6" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8209-6">
- <path
- sodipodi:nodetypes="czcc"
- id="path8211-3"
- d="m 734.03125,519.49186 c 0,0 16.75513,37.01806 28.70141,53.95395 11.94629,16.93589 52.72716,56.04605 52.72716,56.04605 l 0.59717,-138.58975"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#1a1a1a;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8822-2"
- x="-0.2742857"
- width="1.5485713"
- y="-0.21333334"
- height="1.4266667">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="11.313708"
- id="feGaussianBlur8824-0" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3998-6">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 178.21428,274.14789 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.55405 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401287 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.3574 -122.78647,50.053 -187.06988,59.0023 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.1982 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
- id="path4000-1"
- sodipodi:nodetypes="cscccccccccccc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter3677-5">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="2.0397518"
- id="feGaussianBlur3679-5" />
- </filter>
- <filter
- id="filter3785-4"
- inkscape:label="White Fur">
- <feTurbulence
- id="feTurbulence3787-7"
- type="fractalNoise"
- baseFrequency="0.24044943820224721"
- numOctaves="10"
- seed="655"
- result="result0" />
- <feDisplacementMap
- id="feDisplacementMap3789-65"
- in="SourceGraphic"
- in2="result0"
- scale="62"
- xChannelSelector="B"
- yChannelSelector="G" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8604-69">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
- id="path8606-3"
- sodipodi:nodetypes="cscccccccccccc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8802-7"
- x="-0.35311759"
- width="1.7062352"
- y="-0.1817714"
- height="1.3635428">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="48.038491"
- id="feGaussianBlur8804-4" />
- </filter>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3317"
- id="radialGradient3315-5"
- cx="543.6698"
- cy="147.3131"
- fx="543.6698"
- fy="147.3131"
- r="47.863216"
- gradientTransform="matrix(2.1382256,0,0,2.3382884,-77.03847,-101.68704)"
- gradientUnits="userSpaceOnUse" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3537"
- id="radialGradient3543-4"
- cx="385"
- cy="237.00504"
- fx="385"
- fy="237.00504"
- r="86.928574"
- gradientTransform="matrix(1,0,0,0.8562038,0,34.080427)"
- gradientUnits="userSpaceOnUse" />
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath4100-3">
- <path
- style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.9000755px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
- d="m 265.93541,126.68393 -18.76721,168.86308 174.10543,-73.12068 61.9544,88.65883 57.8844,-31.9903 -37.53442,-180.059677 -237.6426,27.648747 z"
- id="path4102-0"
- sodipodi:nodetypes="ccccccc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter4120-7"
- x="-0.2770822"
- width="1.5541644"
- y="-0.32482043"
- height="1.6496409">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="19.956289"
- id="feGaussianBlur4122-8" />
- </filter>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3961"
- id="radialGradient3915-6"
- cx="418.30365"
- cy="342.47794"
- fx="418.30365"
- fy="342.47794"
- r="131.4509"
- gradientTransform="matrix(1.3957347,0.6211056,-0.4244067,0.9537174,-15.061913,-227.96711)"
- gradientUnits="userSpaceOnUse" />
- <mask
- maskUnits="userSpaceOnUse"
- id="mask3684-3">
- <ellipse
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient3688);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.43724918px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="path3686-1"
- transform="translate(-174.03125,62.156036)"
- cx="579.474"
- cy="260.57516"
- rx="192.6866"
- ry="164.04877" />
- </mask>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3951"
- id="radialGradient3933-8"
- cx="397.16388"
- cy="336.95245"
- fx="397.16388"
- fy="336.95245"
- r="36.75"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(1.9449972,2.4894837e-7,-2.4894833e-7,1.9449969,-375.31868,-318.41912)" />
- <filter
- inkscape:collect="always"
- id="filter8806-6"
- x="-0.61142862"
- width="2.2228572"
- y="-0.14930232"
- height="1.2986046">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="37.830213"
- id="feGaussianBlur8808-4" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter8826-9"
- x="-0.25894088"
- width="1.5178818"
- y="-0.2236412"
- height="1.4472824">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="19.631544"
- id="feGaussianBlur8828-5" />
- </filter>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3985"
- id="radialGradient3991-0"
- cx="402.48898"
- cy="317.23578"
- fx="402.48898"
- fy="317.23578"
- r="23.714285"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(4.3776616,0,0,4.3776616,-1358.3025,-1070.7357)" />
- <filter
- inkscape:collect="always"
- id="filter3981-7"
- x="-0.30000001"
- width="1.6"
- y="-0.30000001"
- height="1.6">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="2"
- id="feGaussianBlur3983-1" />
- </filter>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4106"
- id="radialGradient4112-7"
- cx="250.22678"
- cy="475.09763"
- fx="250.22678"
- fy="475.09763"
- r="95.98877"
- gradientTransform="matrix(1.2259004,-0.7077739,0.1413989,0.2449102,322.22326,608.91815)"
- gradientUnits="userSpaceOnUse" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4013"
- id="radialGradient3585-2"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(1.1323239,0.7659488,-1.4550286,2.1510098,588.75376,-711.79716)"
- cx="228.81355"
- cy="440.26971"
- fx="228.81355"
- fy="440.26971"
- r="119.17509" />
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3999-0">
- <path
- style="display:inline;opacity:1;fill:#f5ff04;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
- d="m 179.64286,267.36218 c -22.41044,39.70292 -60.6161,115.78029 -69.28571,149.64286 -8.64721,33.7751 -8.77199,66.41654 -0.35715,86.42858 8.3602,19.88213 26.16398,35.6328 40.71428,41.42856 -0.59638,-14.37587 14.37295,-43.28583 72.85715,-72.5 58.62627,-29.28514 78.38163,-27.13086 103.57142,-47.14286 25.63006,-20.36176 12.61031,-67.04463 3.21429,-93.92857 -9.43424,-26.99328 -34.96741,-59.12448 -66.42857,-69.64285 -31.03327,-10.37532 -65.01776,-4.84837 -84.28571,5.71428 z"
- id="path4001-61"
- sodipodi:nodetypes="czzczzzzc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4041"
- id="radialGradient4060-5"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(0.05911206,2.6869855,-0.7234268,0.01591495,408.72779,-424.56452)"
- cx="275.4422"
- cy="335.34866"
- fx="275.4422"
- fy="335.34866"
- r="36.75" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient7622"
- id="radialGradient4062-9"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(0.05911206,2.6869855,-0.7234268,0.01591495,408.72779,-424.56452)"
- cx="275.4422"
- cy="335.34866"
- fx="275.4422"
- fy="335.34866"
- r="36.75" />
- <filter
- inkscape:collect="always"
- id="filter4079-1">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="6.5887624"
- id="feGaussianBlur4081-1" />
- </filter>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4013"
- id="radialGradient4056-5"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(1.1323239,0.7659488,-1.4550286,2.1510098,588.75376,-711.79716)"
- cx="228.81355"
- cy="440.26971"
- fx="228.81355"
- fy="440.26971"
- r="119.17509" />
- <filter
- inkscape:collect="always"
- id="filter4083-9">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.5052066"
- id="feGaussianBlur4085-7" />
- </filter>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4113"
- id="radialGradient4119-7"
- cx="296.33783"
- cy="427.17749"
- fx="296.33783"
- fy="427.17749"
- r="19.704132"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(2.9797125,0,0,2.9797125,-599.28727,-827.0855)" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4862"
- id="radialGradient4868-3"
- cx="429.56738"
- cy="377.42877"
- fx="429.56738"
- fy="377.42877"
- r="72.079735"
- gradientTransform="matrix(1,0,0,0.618034,0,144.16496)"
- gradientUnits="userSpaceOnUse" />
- <filter
- inkscape:collect="always"
- id="filter4002-6"
- x="-0.24334238"
- width="1.4866848"
- y="-0.39104807"
- height="1.7820961">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="14.589518"
- id="feGaussianBlur4004-3" />
- </filter>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4870"
- id="radialGradient4876-9"
- cx="437.6991"
- cy="391.21735"
- fx="437.6991"
- fy="391.21735"
- r="36.611931"
- gradientTransform="matrix(1,0,0,0.618034,0,149.43174)"
- gradientUnits="userSpaceOnUse" />
- <filter
- inkscape:collect="always"
- id="filter4010-1"
- x="-0.14577261"
- width="1.2915452"
- y="-0.23523259"
- height="1.4704652">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="4.4442907"
- id="feGaussianBlur4012-2" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter4053-9">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.6062947"
- id="feGaussianBlur4055-3" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8514-8">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
- id="path8516-8"
- sodipodi:nodetypes="cscccccccccccc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8814-5"
- x="-0.20466694"
- width="1.4093339"
- y="-0.29007819"
- height="1.5801564">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="22.300169"
- id="feGaussianBlur8816-0" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8610-9">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
- id="path8612-6"
- sodipodi:nodetypes="cscccccccccccc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8810-3"
- x="-0.23519406"
- width="1.4703881"
- y="-0.24500646"
- height="1.4900129">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="58.328041"
- id="feGaussianBlur8812-8" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8622-5">
- <path
- style="display:inline;opacity:1;fill:#202020;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 821.64329,477.88997 c 0,0 22.61947,-6.50681 35.74275,-5.87273 13.12328,0.63409 30.64158,1.93862 43.70885,12.18619 13.06727,10.24756 25.06774,27.14007 34.11239,58.36965 9.04465,31.22958 1.69832,99.25201 -6.17603,143.34735 -7.87435,44.09534 -28.2651,106.11298 -45,140 -16.7349,33.88702 -49.79771,77.4952 -60.56943,89.87616 -11.36422,13.06197 -56.20589,36.42617 -79.43057,42.26667 5.3033,-10.6066 48.89976,-50.58884 35,-60.71426 -14.01897,-10.21226 -45.76009,45.98236 -84.29315,29.03317 21.38231,-13.13212 41.7794,-51.18606 34.04061,-66.59445 -7.84025,-15.61039 -30.70493,48.75757 -93.53554,37.01288 30.05204,-27.52666 55.40706,-70.90401 41.2627,-82.9797 -14.41516,-12.30687 -60.46175,54.29315 -60.46175,54.29315 0,0 -2.8219,-41.70118 13.7732,-68.60732 16.63935,-26.97787 79.65297,-81.61527 99.55313,-111.70342 19.90015,-30.08814 33.61256,-66.00902 42.13542,-92.51794 8.52286,-26.50892 15.80094,-77.09954 15.80094,-77.09954"
- id="path8624-61"
- sodipodi:nodetypes="czzzzzzczczczczzzc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8818-1"
- x="-0.34381232"
- width="1.6876246"
- y="-0.18433961"
- height="1.3686792">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="34.542167"
- id="feGaussianBlur8820-5" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8906-9">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#262f2f;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 352.24553,211.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.554046 36.34702,-65.295835 116.94091,-84.694685 185.93466,-91.465427 86.92239,-11.016801 184.91267,17.940072 233.37134,95.401283 54.12402,75.7333 56.67476,172.53912 80.61204,259.52795 29.43779,127.1276 54.77914,256.21414 60.39224,386.85035 -3.06348,78.18185 -8.42634,165.18415 -60.50321,228.13413 -48.02654,50.35744 -122.78647,50.05304 -187.06988,59.00234 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.19824 -64.77564,-37.94001 -95.73019,-113.47863 -97.2794,-186.01958 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
- id="path8908-8"
- sodipodi:nodetypes="cscccccccccccc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3602-4">
- <path
- sodipodi:nodetypes="czzzzzzczczczczzzc"
- id="path3604-8"
- d="m 647.61204,540.04601 c 0,0 22.61947,-6.50681 35.74275,-5.87273 13.12328,0.63409 30.64158,1.93862 43.70885,12.18619 13.06727,10.24756 25.06774,27.14007 34.11239,58.36965 9.04465,31.22958 1.69832,99.25201 -6.17603,143.34735 -7.87435,44.09534 -28.2651,106.11298 -45,140 -16.7349,33.88702 -49.79771,77.4952 -60.56943,89.87616 -11.36422,13.06197 -56.20589,36.42617 -79.43057,42.26667 5.3033,-10.6066 48.89976,-50.58884 35,-60.71426 -14.01897,-10.21226 -45.76009,45.98236 -84.29315,29.03317 21.38231,-13.13212 41.7794,-51.18606 34.04061,-66.59445 -7.84025,-15.61039 -30.70493,48.75757 -93.53554,37.01288 30.05204,-27.52666 55.40706,-70.90401 41.2627,-82.9797 -14.41516,-12.30687 -60.46175,54.29315 -60.46175,54.29315 0,0 -2.8219,-41.70118 13.7732,-68.60732 16.63935,-26.97787 79.65297,-81.61527 99.55313,-111.70342 19.90015,-30.08814 33.61256,-66.00902 42.13542,-92.51794 8.52286,-26.50892 15.80094,-77.09954 15.80094,-77.09954"
- style="display:inline;opacity:1;fill:#202020;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter3587-1"
- x="-0.1">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="8.881432"
- id="feGaussianBlur3589-0" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3992-4">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 709.28572,844.50504 c 54.28571,-1.42857 126.035,-15.05199 170,-26.78572 44.05271,-11.75714 125.88628,-36.34724 175.35708,-57.85714 49.3393,-21.45272 113.6037,-59.2816 154.2858,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7143,-33.57143 8.3691,22.36779 -16.407,56.32562 -37.8572,81.07143 -21.6041,24.9234 -52.7313,52.70533 -98.9286,89.28571 C 1086.6598,841.08542 976.77458,906.08967 920,933.07647 c -57.06606,27.12536 -128.20334,58.23842 -172.14286,72.50003 -43.93952,14.2616 -131.42857,31.0714 -131.42857,31.0714 l 92.85715,-192.14286 z"
- id="path3994-4"
- sodipodi:nodetypes="czzzzzzzzcc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter3779-4"
- x="-0.087980822"
- width="1.1759616"
- y="-0.17728332"
- height="1.3545666">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="16.340344"
- id="feGaussianBlur3781-4" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3986-7">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 709.28572,844.50504 c 54.28571,-1.42857 126.035,-15.05199 170,-26.78572 44.05271,-11.75714 125.88628,-36.34724 175.35708,-57.85714 49.3393,-21.45272 113.6037,-59.2816 154.2858,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7143,-33.57143 8.3691,22.36779 -16.407,56.32562 -37.8572,81.07143 -21.6041,24.9234 -52.7313,52.70533 -98.9286,89.28571 C 1086.6598,841.08542 976.77458,906.08967 920,933.07647 c -57.06606,27.12536 -128.20334,58.23842 -172.14286,72.50003 -43.93952,14.2616 -131.42857,31.0714 -131.42857,31.0714 l 92.85715,-192.14286 z"
- id="path3988-6"
- sodipodi:nodetypes="czzzzzzzzcc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3722-3">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 709.28572,844.50504 c 54.28571,-1.42857 126.035,-15.05199 170,-26.78572 44.05271,-11.75714 125.88628,-36.34724 175.35708,-57.85714 49.3393,-21.45272 113.6037,-59.2816 154.2858,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7143,-33.57143 8.3691,22.36779 -16.407,56.32562 -37.8572,81.07143 -21.6041,24.9234 -52.7313,52.70533 -98.9286,89.28571 C 1086.6598,841.08542 976.77458,906.08967 920,933.07647 c -57.06606,27.12536 -128.20334,58.23842 -172.14286,72.50003 -43.93952,14.2616 -131.42857,31.0714 -131.42857,31.0714 l 92.85715,-192.14286 z"
- id="path3724-1"
- sodipodi:nodetypes="czzzzzzzzcc"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8225-7">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="10.661912"
- id="feGaussianBlur8227-5" />
- </filter>
- <mask
- maskUnits="userSpaceOnUse"
- id="mask7704-9">
- <path
- style="fill:url(#linearGradient7708);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
- d="m 718.40812,-224.31217 33.25,56 275.99998,-24 159.5,-48.25 -66.5,-82.75 -402.24998,99 z"
- id="path7706-6"
- inkscape:connector-curvature="0" />
- </mask>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath7421-7">
- <path
- sodipodi:type="inkscape:offset"
- inkscape:radius="0"
- inkscape:original="M 1111.4062 -285.9375 L 1107.4688 -284.0625 C 1107.4283 -284.05228 1107.3692 -284.04201 1107.3438 -284.03125 C 1106.925 -283.8184 1107.1791 -283.93067 1106.6875 -283.71875 C 1106.2014 -283.50919 1104.9499 -283.13456 1102.5938 -282.25 C 1099.2626 -280.99942 1096.7895 -280.10016 1095.5938 -279.1875 C 1094.0576 -279.16623 1091.8733 -278.95419 1089.9375 -278.46875 C 1086.956 -277.72108 1085.0823 -277.29474 1083.1875 -276.875 C 1081.2927 -276.45527 1081.512 -276.23281 1080.3125 -276 C 1079.0159 -275.74833 1078.5911 -276.00899 1074.875 -275.21875 C 1071.3851 -274.4766 1065.9802 -273.28768 1064.7188 -272.53125 C 1063.1348 -272.71203 1060.8513 -272.85303 1058.875 -272.5625 C 1055.8346 -272.11554 1053.9588 -271.88974 1052.0312 -271.65625 C 1051.3758 -271.57687 1050.9902 -271.45547 1050.6875 -271.375 C 1050.2613 -271.24334 1050.0017 -271.11498 1049.3125 -271.03125 C 1048.0009 -270.87188 1047.5503 -271.18808 1043.7812 -270.75 C 1040.2273 -270.33691 1034.7758 -269.47718 1033.5312 -268.8125 C 1031.9322 -269.10979 1029.6735 -269.34669 1027.6875 -269.15625 C 1024.6287 -268.86293 1022.7155 -268.67226 1020.7812 -268.5 C 1018.847 -268.32773 1019.0926 -268.07763 1017.875 -267.96875 C 1016.5588 -267.85105 1016.1152 -268.13238 1012.3438 -267.71875 C 1008.8017 -267.3303 1003.3359 -266.50948 1002.0625 -265.84375 C 1000.4636 -266.13844 998.1753 -266.35076 996.1875 -266.15625 C 993.12921 -265.857 991.2463 -265.67601 989.3125 -265.5 C 988.65501 -265.44015 988.27245 -265.32144 987.96875 -265.25 C 987.54105 -265.13104 987.28525 -265.03193 986.59375 -264.96875 C 985.27775 -264.84849 984.834 -265.16363 981.0625 -264.75 C 977.50631 -264.35998 972.0569 -263.51084 970.8125 -262.84375 C 969.21381 -263.13793 966.95265 -263.36747 964.96875 -263.15625 C 961.91305 -262.83092 959.9947 -262.63001 958.0625 -262.4375 C 956.13031 -262.24499 956.37275 -261.99662 955.15625 -261.875 C 953.84137 -261.74353 953.3932 -262.03954 949.625 -261.59375 C 946.08611 -261.17509 940.6473 -260.30158 939.375 -259.625 C 937.77741 -259.90604 935.51505 -260.04543 933.53125 -259.8125 C 930.47927 -259.45413 928.58625 -259.24464 926.65625 -259.03125 C 926.00007 -258.95869 925.6156 -258.85856 925.3125 -258.78125 C 924.88571 -258.65402 924.6276 -258.51405 923.9375 -258.4375 C 922.62411 -258.29181 922.17015 -258.61152 918.40625 -258.125 C 914.85737 -257.66624 909.4276 -256.70598 908.1875 -256 C 906.59441 -256.24424 904.3537 -256.38135 902.375 -256.125 C 899.32741 -255.73018 897.4243 -255.47655 895.5 -255.21875 C 893.57571 -254.96096 893.7739 -254.72522 892.5625 -254.5625 C 891.25301 -254.3866 890.8153 -254.66688 887.0625 -254.09375 C 883.53821 -253.55551 878.1393 -252.39458 876.875 -251.65625 C 875.28751 -251.85979 873.0295 -251.91098 871.0625 -251.5625 C 868.03631 -251.02638 866.1636 -250.70081 864.25 -250.375 C 863.59941 -250.26423 863.2363 -250.10406 862.9375 -250 C 862.51681 -249.83512 862.27405 -249.6687 861.59375 -249.53125 C 860.29905 -249.26966 859.86665 -249.53745 856.15625 -248.71875 C 852.65777 -247.9468 847.31035 -246.33582 846.09375 -245.5 C 844.53085 -245.57745 842.33625 -245.41472 840.40625 -244.90625 C 837.43387 -244.12312 835.58855 -243.67416 833.71875 -243.15625 C 831.84875 -242.63835 832.0521 -242.38897 830.875 -242.0625 C 829.60251 -241.7096 829.17795 -241.95541 825.53125 -240.875 C 822.10657 -239.86037 816.88185 -237.94183 815.65625 -237.03125 C 814.11747 -237.01851 811.93645 -236.75903 810.03125 -236.15625 C 807.10027 -235.22891 805.2809 -234.69783 803.4375 -234.09375 C 802.81071 -233.88837 802.44585 -233.70117 802.15625 -233.5625 C 801.74867 -233.34889 801.50295 -233.15375 800.84375 -232.9375 C 799.58925 -232.52596 799.1576 -232.74846 795.5625 -231.5 C 792.17261 -230.32283 786.96755 -228.2863 785.78125 -227.34375 C 784.25737 -227.28408 782.1312 -226.94888 780.25 -226.28125 C 777.35261 -225.25296 775.55095 -224.60577 773.71875 -223.96875 C 771.88655 -223.33174 772.0909 -223.12021 770.9375 -222.71875 C 769.69071 -222.28479 769.27395 -222.51903 765.71875 -221.15625 C 762.38005 -219.87645 757.23165 -217.6737 756.03125 -216.6875 C 754.52407 -216.57981 752.39555 -216.1887 750.53125 -215.46875 C 747.66307 -214.36115 745.90735 -213.68719 744.09375 -213 C 743.47705 -212.76637 743.0973 -212.55797 742.8125 -212.40625 C 742.81251 -212.40625 742.8125 -212.37673 742.8125 -212.375 L 734.8125 -209.1875 L 736.625 -194.46875 C 736.36701 -194.52956 742.8125 -191.15625 742.8125 -191.15625 C 743.03891 -191.30093 743.26145 -191.42886 743.53125 -191.53125 C 744.61177 -191.94123 745.70285 -191.74702 749.53125 -193.21875 C 753.35977 -194.69049 754.7553 -195.22373 755.4375 -195.625 C 756.11711 -196.02478 757.04925 -196.50437 757.65625 -197.15625 C 759.48317 -197.294 761.22705 -197.64948 762.59375 -198.15625 C 765.56175 -199.25677 767.4691 -199.96244 769.375 -200.625 C 771.28081 -201.28754 771.72915 -202.03987 772.78125 -202.40625 C 773.87287 -202.78636 774.97635 -202.57163 778.84375 -203.9375 C 782.71115 -205.30336 784.1269 -205.76458 784.8125 -206.15625 C 785.51361 -206.55677 786.5133 -207.08923 787.125 -207.75 C 789.09581 -207.80466 790.94195 -208.13463 792.40625 -208.625 C 795.40777 -209.63008 797.3324 -210.24671 799.25 -210.875 C 800.78861 -211.3791 801.42415 -211.92177 802.15625 -212.3125 C 802.38647 -212.44681 802.63215 -212.56623 802.90625 -212.65625 C 804.00457 -213.01673 805.0877 -212.73762 809 -213.96875 C 812.91231 -215.19988 814.366 -215.6417 815.0625 -216 C 815.75641 -216.35697 816.6926 -216.79261 817.3125 -217.40625 C 819.17771 -217.42891 820.94835 -217.67308 822.34375 -218.09375 C 825.37415 -219.00729 827.33615 -219.52385 829.28125 -220.0625 C 831.22637 -220.60114 831.70745 -221.32702 832.78125 -221.625 C 833.89527 -221.93415 835.00125 -221.61761 838.96875 -222.65625 C 842.93625 -223.69488 844.38625 -224.08898 845.09375 -224.40625 C 845.82855 -224.73584 846.90765 -225.15997 847.53125 -225.78125 C 849.52907 -225.66525 851.3887 -225.80134 852.875 -226.15625 C 855.95311 -226.89125 857.9584 -227.25719 859.9375 -227.65625 C 861.52541 -227.97643 862.1818 -228.4468 862.9375 -228.75 C 863.17501 -228.8568 863.4044 -228.94276 863.6875 -229 C 864.82091 -229.22919 865.99215 -228.79107 870.03125 -229.5 C 874.07067 -230.20893 875.5315 -230.42709 876.25 -230.6875 C 876.96581 -230.94694 877.95435 -231.25474 878.59375 -231.78125 C 880.51795 -231.54176 882.34165 -231.55672 883.78125 -231.78125 C 886.90767 -232.26887 888.9358 -232.48192 890.9375 -232.75 C 892.93921 -233.01807 893.42625 -233.69514 894.53125 -233.84375 C 895.67767 -233.99793 896.8071 -233.54218 900.875 -234.0625 C 904.94281 -234.58282 906.43525 -234.75823 907.15625 -235 C 907.89337 -235.24714 908.95435 -235.58623 909.59375 -236.125 C 911.64375 -235.78947 913.56745 -235.72704 915.09375 -235.90625 C 918.23595 -236.27521 920.27375 -236.46561 922.28125 -236.6875 C 923.89207 -236.86552 924.5459 -237.2957 925.3125 -237.53125 C 925.55341 -237.61677 925.80655 -237.68685 926.09375 -237.71875 C 927.24345 -237.84647 928.39505 -237.3721 932.46875 -237.84375 C 936.54245 -238.3154 938.0278 -238.45435 938.75 -238.6875 C 939.46941 -238.91977 940.45025 -239.16096 941.09375 -239.65625 C 943.03005 -239.32279 944.8638 -239.25201 946.3125 -239.40625 C 949.45851 -239.7412 951.49 -239.92484 953.5 -240.125 C 955.50991 -240.32514 955.98415 -240.95139 957.09375 -241.0625 C 958.24485 -241.17778 959.39025 -240.69744 963.46875 -241.125 C 967.54725 -241.55256 969.05765 -241.68709 969.78125 -241.90625 C 970.52047 -242.13011 971.57685 -242.4195 972.21875 -242.9375 C 974.27575 -242.53883 976.2206 -242.4441 977.75 -242.59375 C 980.89871 -242.90185 982.9258 -243.067 984.9375 -243.25 C 986.55151 -243.39682 987.20055 -243.81055 987.96875 -244.03125 C 988.21005 -244.11211 988.4623 -244.16116 988.75 -244.1875 C 989.90211 -244.29295 991.0429 -243.79475 995.125 -244.1875 C 999.20711 -244.58025 1000.7139 -244.71834 1001.4375 -244.9375 C 1002.1584 -245.15583 1003.1371 -245.3852 1003.7812 -245.875 C 1005.7193 -245.52501 1007.5501 -245.42062 1009 -245.5625 C 1012.1487 -245.8706 1014.1758 -246.03575 1016.1875 -246.21875 C 1018.1991 -246.40174 1018.7017 -247.05677 1019.8125 -247.15625 C 1020.9648 -247.25948 1022.1047 -246.77142 1026.1875 -247.15625 C 1030.2704 -247.54107 1031.7762 -247.65725 1032.5 -247.875 C 1033.2393 -248.09743 1034.2956 -248.38949 1034.9375 -248.90625 C 1036.9949 -248.50448 1038.9404 -248.40292 1040.4688 -248.5625 C 1043.6153 -248.89102 1045.6458 -249.0852 1047.6562 -249.28125 C 1049.2692 -249.43854 1049.9219 -249.91273 1050.6875 -250.15625 C 1050.9282 -250.24429 1051.1507 -250.27762 1051.4375 -250.3125 C 1052.5858 -250.4522 1053.7542 -249.97259 1057.8125 -250.5625 C 1061.8708 -251.15242 1063.3743 -251.33964 1064.0938 -251.59375 C 1064.8104 -251.84691 1065.7684 -252.15182 1066.4062 -252.6875 C 1068.3259 -252.47556 1070.1262 -252.53609 1071.5625 -252.78125 C 1074.6816 -253.31365 1076.6741 -253.70986 1078.6562 -254.09375 C 1080.6383 -254.47762 1081.1305 -255.1334 1082.2188 -255.375 C 1083.3475 -255.62566 1084.489 -255.25871 1088.4688 -256.25 C 1092.4483 -257.24127 1093.8983 -257.6693 1094.5938 -258.03125 C 1095.316 -258.40725 1096.3555 -258.90183 1096.9688 -259.5625 C 1098.9317 -259.57454 1100.7625 -259.85355 1102.1875 -260.40625 C 1105.1387 -261.55085 1107.0607 -262.27567 1108.875 -263.15625 C 1110.3307 -263.86277 1111.1941 -264.85828 1111.4062 -265.15625 C 1111.6185 -265.4542 1111.5051 -265.8848 1111.5312 -265.90625 C 1111.5742 -265.94148 1111.8716 -266.00028 1112.0312 -266.34375 C 1112.8902 -268.19082 1114.3544 -271.97139 1114.4688 -272.65625 C 1114.5825 -273.33839 1114.6368 -274.00902 1114.6875 -274.40625 C 1114.7169 -274.63575 1114.5404 -275.28515 1114.5625 -275.34375 C 1114.5934 -275.42579 1114.8508 -275.59432 1114.9062 -275.84375 C 1115.1725 -277.04206 1114.9953 -278.05111 1114.7812 -279.46875 C 1114.5673 -280.88638 1113.8096 -284.08338 1113.1562 -284.9375 C 1112.4973 -285.79922 1111.9314 -285.94801 1111.4062 -285.9375 z "
- style="display:inline;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- id="path7423-8"
- d="m 1111.4062,-285.9375 -3.9374,1.875 c -0.041,0.0102 -0.1,0.0205 -0.125,0.0312 -0.4188,0.21285 -0.1647,0.10058 -0.6563,0.3125 -0.4861,0.20956 -1.7376,0.58419 -4.0937,1.46875 -3.3312,1.25058 -5.8043,2.14984 -7,3.0625 -1.5362,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74767 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41973 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25167 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74215 -8.8948,1.93107 -10.1562,2.6875 -1.584,-0.18078 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44696 -4.9162,0.67276 -6.8438,0.90625 -0.6554,0.0794 -1.041,0.20078 -1.3437,0.28125 -0.4262,0.13166 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15937 -1.7622,-0.15683 -5.5313,0.28125 -3.5539,0.41309 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.29729 -3.8577,-0.53419 -5.8437,-0.34375 -3.0588,0.29332 -4.972,0.48399 -6.9063,0.65625 -1.9342,0.17227 -1.6886,0.42237 -2.9062,0.53125 -1.3162,0.1177 -1.7598,-0.16363 -5.5312,0.25 -3.5421,0.38845 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.29469 -3.8872,-0.50701 -5.875,-0.3125 -3.05829,0.29925 -4.9412,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04005,0.17856 -1.34375,0.25 -0.4277,0.11896 -0.6835,0.21807 -1.375,0.28125 -1.316,0.12026 -1.75975,-0.19488 -5.53125,0.21875 -3.55619,0.39002 -9.0056,1.23916 -10.25,1.90625 -1.59869,-0.29418 -3.85985,-0.52372 -5.84375,-0.3125 -3.0557,0.32533 -4.97405,0.52624 -6.90625,0.71875 -1.93219,0.19251 -1.68975,0.44088 -2.90625,0.5625 -1.31488,0.13147 -1.76305,-0.16454 -5.53125,0.28125 -3.53889,0.41866 -8.9777,1.29217 -10.25,1.96875 -1.59759,-0.28104 -3.85995,-0.42043 -5.84375,-0.1875 -3.05198,0.35837 -4.945,0.56786 -6.875,0.78125 -0.65618,0.0726 -1.04065,0.17269 -1.34375,0.25 -0.42679,0.12723 -0.6849,0.2672 -1.375,0.34375 -1.31339,0.14569 -1.76735,-0.17402 -5.53125,0.3125 -3.54888,0.45876 -8.97865,1.41902 -10.21875,2.125 -1.59309,-0.24424 -3.8338,-0.38135 -5.8125,-0.125 -3.04759,0.39482 -4.9507,0.64845 -6.875,0.90625 -1.92429,0.25779 -1.7261,0.49353 -2.9375,0.65625 -1.30949,0.1759 -1.7472,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.9232,1.69917 -10.1875,2.4375 -1.58749,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02619,0.53612 -4.8989,0.86169 -6.8125,1.1875 -0.65059,0.11077 -1.0137,0.27094 -1.3125,0.375 -0.42069,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.2947,0.26159 -1.7271,-0.006 -5.4375,0.8125 -3.49848,0.77195 -8.8459,2.38293 -10.0625,3.21875 -1.5629,-0.0774 -3.7575,0.0853 -5.6875,0.59375 -2.97238,0.78313 -4.8177,1.23209 -6.6875,1.75 -1.87,0.5179 -1.66665,0.76728 -2.84375,1.09375 -1.27249,0.3529 -1.69705,0.10709 -5.34375,1.1875 -3.42468,1.01463 -8.6494,2.93317 -9.875,3.84375 -1.53878,0.0127 -3.7198,0.27222 -5.625,0.875 -2.93098,0.92734 -4.75035,1.45842 -6.59375,2.0625 -0.62679,0.20538 -0.99165,0.39258 -1.28125,0.53125 -0.40758,0.21361 -0.6533,0.40875 -1.3125,0.625 -1.2545,0.41154 -1.68615,0.18904 -5.28125,1.4375 -3.38989,1.17717 -8.59495,3.2137 -9.78125,4.15625 -1.52388,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69905,1.67548 -6.53125,2.3125 -1.8322,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24679,0.43396 -1.66355,0.19972 -5.21875,1.5625 -3.3387,1.2798 -8.4871,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.6357,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6239,1.78156 -6.4375,2.46875 -0.6167,0.23363 -0.99645,0.44203 -1.28125,0.59375 10e-6,0 0,0.0295 0,0.0312 l -8,3.1875 1.8125,14.71875 c -0.25799,-0.0608 6.1875,3.3125 6.1875,3.3125 0.22641,-0.14468 0.44895,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.1716,-0.21577 6,-1.6875 3.82852,-1.47174 5.22405,-2.00498 5.90625,-2.40625 0.67961,-0.39978 1.61175,-0.87937 2.21875,-1.53125 1.82692,-0.13775 3.5708,-0.49323 4.9375,-1 2.968,-1.10052 4.87535,-1.80619 6.78125,-2.46875 1.90581,-0.66254 2.35415,-1.41487 3.40625,-1.78125 1.09162,-0.38011 2.1951,-0.16538 6.0625,-1.53125 3.8674,-1.36586 5.28315,-1.82708 5.96875,-2.21875 0.70111,-0.40052 1.7008,-0.93298 2.3125,-1.59375 1.97081,-0.0547 3.81695,-0.38463 5.28125,-0.875 3.00152,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.53861,-0.5041 2.17415,-1.04677 2.90625,-1.4375 0.23022,-0.13431 0.4759,-0.25373 0.75,-0.34375 1.09832,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91231,-1.23113 5.366,-1.67295 6.0625,-2.03125 0.69391,-0.35697 1.6301,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63585,-0.26683 5.03125,-0.6875 3.0304,-0.91354 4.9924,-1.4301 6.9375,-1.96875 1.94512,-0.53864 2.4262,-1.26452 3.5,-1.5625 1.11402,-0.30915 2.22,0.007 6.1875,-1.03125 3.9675,-1.03863 5.4175,-1.43273 6.125,-1.75 0.7348,-0.32959 1.8139,-0.75372 2.4375,-1.375 1.99782,0.116 3.85745,-0.0201 5.34375,-0.375 3.07811,-0.735 5.0834,-1.10094 7.0625,-1.5 1.58791,-0.32018 2.2443,-0.79055 3,-1.09375 0.23751,-0.1068 0.4669,-0.19276 0.75,-0.25 1.13341,-0.22919 2.30465,0.20893 6.34375,-0.5 4.03942,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71581,-0.25944 1.70435,-0.56724 2.34375,-1.09375 1.9242,0.23949 3.7479,0.22453 5.1875,0 3.12642,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48875,-0.94514 3.59375,-1.09375 1.14642,-0.15418 2.27585,0.30157 6.34375,-0.21875 4.06781,-0.52032 5.56025,-0.69573 6.28125,-0.9375 0.73712,-0.24714 1.7981,-0.58623 2.4375,-1.125 2.05,0.33553 3.9737,0.39796 5.5,0.21875 3.1422,-0.36896 5.18,-0.55936 7.1875,-0.78125 1.61082,-0.17802 2.26465,-0.6082 3.03125,-0.84375 0.24091,-0.0855 0.49405,-0.1556 0.78125,-0.1875 1.1497,-0.12772 2.3013,0.34665 6.375,-0.125 4.0737,-0.47165 5.55905,-0.6106 6.28125,-0.84375 0.71941,-0.23227 1.70025,-0.47346 2.34375,-0.96875 1.9363,0.33346 3.77005,0.40424 5.21875,0.25 3.14601,-0.33495 5.1775,-0.51859 7.1875,-0.71875 2.00991,-0.20014 2.48415,-0.82639 3.59375,-0.9375 1.1511,-0.11528 2.2965,0.36506 6.375,-0.0625 4.0785,-0.42756 5.5889,-0.56209 6.3125,-0.78125 0.73922,-0.22386 1.7956,-0.51325 2.4375,-1.03125 2.057,0.39867 4.00185,0.4934 5.53125,0.34375 3.14871,-0.3081 5.1758,-0.47325 7.1875,-0.65625 1.61401,-0.14682 2.26305,-0.56055 3.03125,-0.78125 0.2413,-0.0809 0.49355,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.2929,0.39275 6.375,0 4.08211,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6996,-0.4477 2.3437,-0.9375 1.9381,0.34999 3.7689,0.45438 5.2188,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1465,-0.32852 5.177,-0.5227 7.1874,-0.71875 1.613,-0.15729 2.2657,-0.63148 3.0313,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7166,-0.25316 1.6746,-0.55807 2.3124,-1.09375 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99127 5.4295,-1.4193 6.125,-1.78125 0.7222,-0.376 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.1446 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70652 2.3191,-1.70203 2.5312,-2 0.2123,-0.29795 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3404,-0.094 0.5,-0.4375 0.859,-1.84707 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68214 0.168,-1.35277 0.2187,-1.75 0.029,-0.2295 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19831 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41763 -0.9716,-4.61463 -1.625,-5.46875 -0.6589,-0.86172 -1.2248,-1.01051 -1.75,-1 z"
- transform="translate(0.08004571,-0.03125)" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter7001-5">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur7003-7" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6949-4"
- x="-0.10294895"
- width="1.2058979"
- y="-0.34224695"
- height="1.6844939">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6951-1" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6961-8"
- x="-0.09919104"
- width="1.1983821"
- y="-0.22643611"
- height="1.4528722">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6963-5" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6957-9"
- x="-0.098213427"
- width="1.1964267"
- y="-0.19838208"
- height="1.3967642">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6959-7" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6997-5">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6999-3" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6953-8"
- x="-0.098320946"
- width="1.1966419"
- y="-0.19750816"
- height="1.3950163">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6955-8" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6993-3"
- x="-0.098213255"
- width="1.1964265"
- y="-0.19838208"
- height="1.3967642">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6995-1" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6989-8"
- x="-0.098428868"
- width="1.1968577"
- y="-0.20287035"
- height="1.4057407">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6991-9" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6985-6"
- x="-0.098428868"
- width="1.1968577"
- y="-0.20853186"
- height="1.4170637">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6987-4" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6965-3"
- x="-0.099081434"
- width="1.1981629"
- y="-0.22529824"
- height="1.4505965">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6967-3" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6981-3"
- x="-0.10052545"
- width="1.2010509"
- y="-0.2742162"
- height="1.5484324">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6983-8" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6977-6"
- x="-0.10224481"
- width="1.2044896"
- y="-0.32371372"
- height="1.6474274">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6979-0" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6973-4"
- x="-0.10330495"
- width="1.2066098"
- y="-0.36439717"
- height="1.7287945">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6975-8" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter6969-8"
- x="-0.10450897"
- width="1.2090179"
- y="-0.40468886"
- height="1.8093777">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.1675612"
- id="feGaussianBlur6971-8" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7345-9">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="1.7233839"
- id="feGaussianBlur7347-7" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7333-7">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7335-6" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7285-4"
- x="-0.030884685"
- width="1.0617694"
- y="-0.10267408"
- height="1.2053483">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7287-3" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7289-0">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7291-3" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7293-0">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7295-9" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7337-2">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7339-5" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7297-4">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7299-0" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7301-5">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7303-9" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7305-4">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7307-6" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7309-9">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7311-2" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7313-2">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7315-4" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7317-7">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7319-7" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7321-5">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7323-4" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7329-8"
- x="-0.030991485"
- width="1.061983"
- y="-0.10931916"
- height="1.2186383">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7331-1" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter7325-2"
- x="-0.031352691"
- width="1.0627054"
- y="-0.12140666"
- height="1.2428133">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.35026836"
- id="feGaussianBlur7327-8" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter9048-9"
- x="-0.40879121"
- width="1.8175824"
- y="-0.71538466"
- height="2.4307692">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="21.92031"
- id="feGaussianBlur9050-3" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3631-6">
- <path
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
- id="path3633-8"
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- inkscape:connector-curvature="0" />
- </clipPath>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3677-0">
- <path
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 586.13271,997.98981 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.9123 -3.78268,51.8008 -2.90046,70.6561 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.0839 38.76107,-114.49733 44.6608,-149.76855 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
- id="path3679-2"
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter3898-1">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="10.892985"
- id="feGaussianBlur3900-0" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3622-5">
- <path
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 266.27183,924.57186 c -1.40727,18.80121 -1.1449,32.75103 2.08174,49.30328 3.22665,16.55234 16.40608,45.90736 20.3344,63.18376 3.92622,17.2671 2.69413,38.3096 -12.45944,51.1482 -15.31761,12.9774 -42.05127,21.5989 -67.8323,15.7338 -25.78106,-5.8653 -69.54907,-49.2234 -88.59019,-70.2283 C 100.6939,1012.6293 56.045183,939.86194 41.867508,909.43681 27.689836,879.01169 29.207903,872.71824 33.747793,863.90708 24.381071,839.38658 21.334081,813.84027 0.03533552,788.33044 30.360815,791.44488 43.915625,815.28677 60.161025,835.47019 54.631129,787.39416 42.10631,771.05369 31.787073,744.74589 c 29.994295,6.08166 50.57936,31.8724 63.979783,72.7125 9.554154,-3.91791 18.237764,-9.37294 30.187414,-9.0612 -11.2975,-41.6958 -17.94946,-69.91584 -36.687255,-101.06994 53.441965,5.67033 83.657025,80.63932 78.971425,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24896,-38.34702 -21.04781,-76.8679 -3.65971,-118.64818 0,0 48.28678,65.43687 54.38966,85.80577 6.10287,20.36891 1.51881,38.70052 1.51881,38.70052 0,0 16.95957,31.08529 20.29392,51.09413 3.3731,20.24135 -3.53269,59.10332 -4.94582,77.98324 z"
- id="path3624-1"
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter9024-1"
- x="-0.55453134"
- width="2.1090627"
- y="-0.51434779"
- height="2.0286956">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="20.912684"
- id="feGaussianBlur9026-0" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter9020-8"
- x="-0.32861114"
- width="1.6572223"
- y="-0.182"
- height="1.364">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="20.912684"
- id="feGaussianBlur9022-5" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter9044-0"
- x="-0.32631579"
- width="1.6526316"
- y="-0.84545463"
- height="2.6909094">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="21.92031"
- id="feGaussianBlur9046-6" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath4177-4">
- <path
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- id="path4179-6"
- d="m 586.13271,997.98981 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.9123 -3.78268,51.8008 -2.90046,70.6561 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.0839 38.76107,-114.49733 44.6608,-149.76855 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter4105-2">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="3.8640966"
- id="feGaussianBlur4107-5" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter4130-8"
- x="-0.49509686"
- width="1.9901937"
- y="-0.26708817"
- height="1.5341763">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="10.730622"
- id="feGaussianBlur4132-6" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter4141-2"
- x="-0.40611032"
- width="1.8122206"
- y="-0.30260596"
- height="1.6052119">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="9.8586086"
- id="feGaussianBlur4143-8" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8338-4">
- <path
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- id="path8340-7"
- d="m 266.27183,924.57185 c -1.40727,18.80122 -1.1449,32.75104 2.08174,49.30328 3.22666,16.55238 16.40609,45.90737 20.33441,63.18377 3.92621,17.2671 2.69413,38.3097 -12.45944,51.1482 -15.31761,12.9775 -42.05127,21.599 -67.8323,15.7338 -25.78106,-5.8653 -69.54908,-49.2234 -88.59019,-70.2283 C 100.6939,1012.6293 56.045182,939.86193 41.867507,909.4368 27.689835,879.01168 29.207902,872.71823 33.747792,863.90708 24.38107,839.38658 21.33408,813.84026 0.03533448,788.33044 30.360814,791.44487 43.915624,815.28676 60.161024,835.47019 54.631128,787.39416 42.106309,771.05368 31.787072,744.74589 c 29.994295,6.08165 50.57936,31.87239 63.979783,72.7125 9.554155,-3.91792 18.237765,-9.37294 30.187415,-9.0612 -11.2975,-41.6958 -17.94947,-69.91585 -36.687256,-101.06994 53.441966,5.67032 83.657026,80.63932 78.971426,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24897,-38.34703 -21.04782,-76.8679 -3.65971,-118.64819 0,0 48.28678,65.43688 54.38965,85.80578 6.10288,20.3689 1.51882,38.70051 1.51882,38.70051 0,0 16.95957,31.0853 20.29392,51.09414 3.3731,20.24134 -3.53269,59.10331 -4.94582,77.98323 z"
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8333-2">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="7.18"
- id="feGaussianBlur8335-4" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8359-0">
- <path
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- id="path8361-6"
- d="m 266.27183,924.57185 c -1.40727,18.80122 -1.1449,32.75104 2.08174,49.30328 3.22666,16.55238 16.40609,45.90737 20.33441,63.18377 3.92621,17.2671 2.69413,38.3097 -12.45944,51.1482 -15.31761,12.9775 -42.05127,21.599 -67.8323,15.7338 -25.78106,-5.8653 -69.54908,-49.2234 -88.59019,-70.2283 C 100.6939,1012.6293 56.045182,939.86193 41.867507,909.4368 27.689835,879.01168 29.207902,872.71823 33.747792,863.90708 24.38107,839.38658 21.33408,813.84026 0.03533448,788.33044 30.360814,791.44487 43.915624,815.28676 60.161024,835.47019 54.631128,787.39416 42.106309,771.05368 31.787072,744.74589 c 29.994295,6.08165 50.57936,31.87239 63.979783,72.7125 9.554155,-3.91792 18.237765,-9.37294 30.187415,-9.0612 -11.2975,-41.6958 -17.94947,-69.91585 -36.687256,-101.06994 53.441966,5.67032 83.657026,80.63932 78.971426,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24897,-38.34703 -21.04782,-76.8679 -3.65971,-118.64819 0,0 48.28678,65.43688 54.38965,85.80578 6.10288,20.3689 1.51882,38.70051 1.51882,38.70051 0,0 16.95957,31.0853 20.29392,51.09414 3.3731,20.24134 -3.53269,59.10331 -4.94582,77.98323 z"
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8354-2">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="6.82"
- id="feGaussianBlur8356-9" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath3636-90">
- <path
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
- id="path3638-8"
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter4185-1">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="3.6164709"
- id="feGaussianBlur4187-3" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8392-1">
- <path
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- id="path8394-1"
- d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8379-0"
- x="-0.14413793"
- width="1.288276"
- y="-0.10278689"
- height="1.2055738">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="7.389266"
- id="feGaussianBlur8381-3" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath8417-4">
- <path
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- id="path8419-03"
- d="m 760.16396,935.83377 c 6.7941,18.90279 10.49369,33.29969 11.8903,51.21189 1.39662,17.91234 -3.78268,51.80084 -2.90046,70.65614 0.88175,18.8452 8.13369,40.099 27.34463,48.9689 19.41887,8.9658 49.31924,10.2113 74.11984,-3.1456 24.8006,-13.357 57.40102,-70.3255 70.97426,-97.3087 13.62385,-27.08394 38.76107,-114.49737 44.6608,-149.76859 5.89973,-35.27121 2.55054,-41.30077 -4.61748,-49.05549 2.6403,-27.84015 -1.49972,-54.93543 13.10969,-87.18618 -30.24901,11.8257 -37.38229,40.1607 -48.31889,65.50508 -8.00091,-50.93293 0.20916,-71.27319 3.31889,-101.21936 -29.06476,14.77791 -42.86151,47.11402 -45,92.85714 -10.92395,-1.3042 -21.39144,-4.43423 -33.57143,-0.71429 -0.26404,-46.02334 -1.46356,-76.88941 8.91063,-114.20649 -53.25547,21.02686 -62.94728,106.5941 -56.05349,112.77792 -10.88282,0.535 -21.37108,-1.2973 -32.85714,2.85715 0.63892,-42.57135 -0.26046,-84.90861 -30,-122.85715 0,0 -30.95806,80.92234 -31.42857,103.57143 -0.47051,22.64909 9.45159,40.16588 9.45159,40.16588 0,0 -8.56807,36.74051 -6.29859,58.23223 2.29585,21.74146 20.4429,59.67617 27.26542,78.65809 z"
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter8404-9"
- x="-0.090268657"
- width="1.1805373"
- y="-0.10250848"
- height="1.205017">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="5.3457272"
- id="feGaussianBlur8406-1" />
- </filter>
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient2843"
- id="linearGradient6951"
- gradientUnits="userSpaceOnUse"
- x1="347.89655"
- y1="1070.2124"
- x2="275.58191"
- y2="867.97992" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient8964"
- id="linearGradient6953"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(450.03125,73.843964)"
- x1="603.84064"
- y1="627.85303"
- x2="616.24396"
- y2="585.42664" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient8952"
- id="linearGradient6955"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(450.03125,73.843964)"
- x1="609.31244"
- y1="239.46866"
- x2="560.83142"
- y2="262.86206" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3961"
- id="linearGradient6957"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(450.03125,73.843964)"
- x1="398.21429"
- y1="343.52289"
- x2="379.28571"
- y2="265.30862" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4084"
- id="linearGradient6959"
- gradientUnits="userSpaceOnUse"
- x1="182.35046"
- y1="256.11136"
- x2="145.53348"
- y2="542.20502" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4084"
- id="linearGradient6961"
- gradientUnits="userSpaceOnUse"
- x1="182.35046"
- y1="256.11136"
- x2="145.53348"
- y2="542.20502" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4478"
- id="linearGradient6963"
- gradientUnits="userSpaceOnUse"
- x1="412.08926"
- y1="404.91574"
- x2="417.375"
- y2="401.82648" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4478"
- id="linearGradient6965"
- gradientUnits="userSpaceOnUse"
- x1="411.91071"
- y1="404.91577"
- x2="417.375"
- y2="401.82648" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4478"
- id="linearGradient6967"
- gradientUnits="userSpaceOnUse"
- x1="411.91071"
- y1="405.54077"
- x2="417.375"
- y2="401.82648" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4478"
- id="linearGradient6969"
- gradientUnits="userSpaceOnUse"
- x1="412.08926"
- y1="405.54077"
- x2="417.375"
- y2="401.82648" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4478"
- id="linearGradient6971"
- gradientUnits="userSpaceOnUse"
- x1="411.73212"
- y1="405.54077"
- x2="417.375"
- y2="401.82648" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3660"
- id="linearGradient6973"
- gradientUnits="userSpaceOnUse"
- x1="1255.7386"
- y1="667.09216"
- x2="893.69995"
- y2="858.01099" />
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath6975">
- <rect
- id="rect6977"
- width="440"
- height="376"
- x="547.99994"
- y="205.32277"
- style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:#f8d615;stroke-width:18;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;enable-background:new" />
- </clipPath>
- <marker
- inkscape:stockid="Arrow1Send"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow1Send-4"
- style="overflow:visible"
- inkscape:isstock="true">
- <path
- inkscape:connector-curvature="0"
- id="path7188-9"
- d="M 0,0 5,-5 -12.5,0 5,5 0,0 Z"
- style="fill:#f8d615;fill-opacity:1;fill-rule:evenodd;stroke:#f8d615;stroke-width:1pt;stroke-opacity:1"
- transform="matrix(-0.2,0,0,-0.2,-1.2,0)" />
- </marker>
- </defs>
- <metadata
- id="metadata7">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:groupmode="layer"
- id="layer1"
- inkscape:label="Shadow"
- transform="translate(48.571445,195.53053)" />
- <g
- inkscape:groupmode="layer"
- id="layer20"
- inkscape:label="New Ear"
- transform="translate(48.571445,195.53053)" />
- <g
- inkscape:groupmode="layer"
- id="layer21"
- inkscape:label="Rendered2"
- style="display:inline"
- transform="translate(48.571445,195.53053)" />
- <g
- inkscape:groupmode="layer"
- id="layer15"
- inkscape:label="Feet"
- style="display:inline"
- transform="translate(48.571445,195.53053)" />
- <g
- inkscape:groupmode="layer"
- id="layer16"
- inkscape:label="Left Foot"
- style="display:inline"
- transform="translate(48.571445,195.53053)">
- <rect
- style="display:inline;opacity:1;fill:#a8a8a8;fill-opacity:1;stroke:#000000;stroke-width:20.89992332;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;enable-background:new"
- id="rect6676-3-7-5"
- width="1876.7191"
- height="1562.9667"
- x="-38.121483"
- y="-86.153076" />
- <rect
- style="display:inline;opacity:1;fill:none;fill-opacity:1;stroke:#000000;stroke-width:20.92477036;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;enable-background:new"
- id="rect6676-3-7"
- width="1878.7875"
- height="1564.9603"
- x="2288.5129"
- y="-84.10511" />
- <rect
- style="display:inline;opacity:1;fill:#a8a8a8;fill-opacity:1;stroke:#f83615;stroke-width:20.39127541;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;enable-background:new"
- id="rect6676-3"
- width="1833.4282"
- height="1522.9458"
- x="2309.7717"
- y="-62.567806" />
- <g
- id="g4303">
- <path
- inkscape:export-ydpi="142.10527"
- inkscape:export-xdpi="142.10527"
- inkscape:export-filename="/home/cheeseness/Documents/LCA09/mascot/tuz_new.png"
- transform="matrix(10.726753,0,0,10.726753,-2882.1235,-4565.4583)"
- sodipodi:nodetypes="cccccccccsccccccccccc"
- id="path10326"
- d="m 304.64285,526.6479 c -10,0.35715 -18.21428,2.85714 -18.21428,2.85714 l 7.5,6.07143 10.35714,3.57143 16.07143,0.35714 22.5,-5.35714 7.85714,1.07143 20.35715,-2.14286 -10.35715,6.78572 c 5.45923,-1.02361 17.39329,3.56911 9.64286,5.35714 -1.74,0.40142 13.92857,-4.64285 13.92857,-4.64285 l 2.5,-4.64287 3.57143,-9.28571 11.42857,0 18.21428,-4.64286 3.57144,-4.99999 -16.07144,1.07142 -12.14285,2.14286 -14.64286,-5 -70.6921,16.70774 -5.37933,-5.27917 z"
- style="display:inline;opacity:0.5;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter11361);enable-background:new"
- inkscape:connector-curvature="0" />
- <g
- transform="matrix(0.71084,-0.1937433,0.262963,0.9648058,503.68027,136.48399)"
- id="g7882"
- style="display:inline;opacity:1;enable-background:new">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient7904);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 245.12255,100.05344 c 0,0 -47.12811,-31.646921 -67.21465,-35.800939 -20.03792,-4.143963 -38.4729,-3.317578 -51.93364,13.607323 -13.46074,16.924901 -12.07739,61.265196 -13.53554,86.969546 -1.45815,25.70435 2.54945,70.17701 17.6046,88.66552 15.05516,18.4885 45.88634,13.58502 49.92695,21.4137 2.21283,4.28736 65.15228,-174.85515 65.15228,-174.85515 z"
- id="path7876"
- sodipodi:nodetypes="czzzzcc"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient7906);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 135.37935,82.017807 c 0,0 26.34355,1.938783 37.63307,13.903188 11.41494,12.097335 13.73457,21.331515 15.29586,37.734585 1.56337,16.42499 -0.84957,28.41812 -7.81382,36.03734 -6.96425,7.61922 -1.00429,19.58332 -25.91605,12.07107 -24.91176,-7.51225 -27.03224,-27.78298 -26.51523,-46.30475 0.51721,-18.52898 7.31617,-53.441433 7.31617,-53.441433 z"
- id="path7878"
- sodipodi:nodetypes="czzzzzc"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;opacity:1;fill:url(#radialGradient7908);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 135.648,81.927211 c 0,0 -4.64465,16.365075 0.58825,28.563099 5.48794,12.79254 27.22425,44.26007 27.22425,54.65565 l 22.65625,-5 c 2.54218,-6.96644 3.21052,-15.75206 2.1875,-26.5 -1.56129,-16.40307 -3.8663,-25.62141 -15.28125,-37.718749 -9.65488,-10.232047 -31.59311,-13.374857 -37.375,-14 z"
- id="path7880"
- sodipodi:nodetypes="czccssc"
- inkscape:connector-curvature="0" />
- </g>
- <path
- sodipodi:nodetypes="czzzcczzcc"
- id="path7917"
- d="m 845.03125,1154.7776 c -4.28571,0.7143 -27.62815,3.6181 -57.85714,10 -30.22899,6.3819 -57.31395,4.9661 -135.78608,17.3296 -79.85178,12.5808 -94.06436,42.5423 -108.12225,47.0643 -14.70014,4.7286 -145.37739,-65.8225 -145.37739,-65.8225 l 4.28572,-94.2857 c 0,0 85.88551,-16.2009 112.14285,-33.5714 26.25735,-17.3705 45.58238,-49.66602 59.28572,-71.42861 13.70334,-21.76259 32.85714,-71.42858 32.85714,-71.42858 l 238.57143,262.14289 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- transform="translate(276,136)"
- sodipodi:nodetypes="ccccc"
- clip-path="url(#clipPath8658)"
- id="path7919"
- d="m 332.34019,898.38549 -32.73181,-61.29956 -37.61734,45.10646 c 2.17675,1.31711 5.77425,-20.85603 45.6004,-64.41708 l 24.74875,80.61018 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8888);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- transform="translate(276,136)"
- sodipodi:nodetypes="cccccc"
- clip-path="url(#clipPath2833)"
- id="path7923"
- d="m 200.81833,863.03015 146.3711,-51.61879 243.95184,226.27414 -241.83052,140.0072 -181.01934,-87.6813 32.52692,-226.98125 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient2841);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8892);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="czzcczcc"
- id="path7921"
- d="m 642.88839,640.13471 c 0,0 -29.55406,40.57305 -47.85714,74.28571 -18.30309,33.71267 -58.62109,126.35694 -70.35714,171.07143 -11.7594,44.80344 -62.5,123.57145 -62.5,123.57145 l 76.07143,18.2143 c 0,0 11.80712,-12.8234 31.07142,-46.07146 19.2643,-33.24808 60.35715,-138.57143 60.35715,-138.57143 l 13.21428,-202.5 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0f0f0f;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- transform="translate(276,136)"
- clip-path="url(#clipPath3665)"
- sodipodi:nodetypes="ccccccc"
- id="path7925"
- d="m 430.28131,381.94122 c -7.07106,2.82843 -236.18124,32.15181 -236.18124,32.15181 l -39.63961,359.83304 90.19849,92.63961 52.3259,-114.5513 100.46804,-186.39192 32.82842,-183.68124 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.4;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8856);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="czzzzcc"
- id="path7927"
- d="m 969.67051,1164.0346 c 0,0 23.25628,11.3937 36.06779,20.4761 12.6974,9.0015 29.4724,24.6491 41.6924,37.3605 12.3055,12.8002 20.1127,22.5987 41.5327,24.1608 21.4322,1.5629 53.2824,-8.7876 73.296,-24.6642 20.0135,-15.8766 45.6469,-69.2328 45.6469,-69.2328 l -127.1608,-143.0717"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- transform="translate(276,136)"
- sodipodi:nodetypes="ccccc"
- clip-path="url(#clipPath8642)"
- id="path7929"
- d="M 331.34019,641.50471 216.17367,835.36467 260.2153,925.96265 357.79603,732.21539 331.34019,641.50471 Z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8860);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <g
- inkscape:transform-center-y="-28.255779"
- inkscape:transform-center-x="-347.89063"
- transform="matrix(0.9934486,0.1142802,-0.1142802,0.9934486,-9.24324,588.09054)"
- id="g7931"
- style="display:inline;opacity:1;enable-background:new">
- <path
- id="path7933"
- d="m 1049.205,-282.26672 -0.09,0.008 c -1.3874,0.88445 -6.6033,1.6072 -6.629,9.52344 -0.024,7.42525 15.0129,17.09146 17.1563,18.09375 1.7302,0.80909 3.5916,1.40876 5.4063,1.71875 l 1.4374,0.21875 c 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99128 5.4294,-1.4193 6.125,-1.78125 0.7222,-0.37601 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3191,-1.70203 2.5312,-2 0.2123,-0.29796 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3405,-0.094 0.5,-0.4375 0.859,-1.84708 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68215 0.168,-1.35277 0.2187,-1.75 0.029,-0.22951 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19832 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41764 -0.9716,-4.61463 -1.625,-5.46875 -0.4194,-0.54857 -0.7993,-0.7925 -1.1562,-0.90625 -0.067,-0.0173 -0.1239,-0.0467 -0.1875,-0.0625 -0.021,-0.004 -0.042,0.003 -0.062,0 -0.3116,-0.0755 -0.6085,-0.15867 -1.1562,-0.21875 -0.9855,-0.10812 -2.4247,-0.2594 -3.9688,-0.25 -0.5147,0.003 -1.0371,0.0476 -1.5625,0.0937 -3.5589,0.31228 -9.0098,0.99108 -10.2187,1.625 -1.6331,-0.33402 -3.9482,-0.61223 -5.9376,-0.46875 -3.064,0.22097 -4.9677,0.34219 -6.9062,0.46875 -1.9384,0.12655 -1.6861,0.38864 -2.9062,0.46875 -1.3191,0.0866 -1.7869,-0.22325 -5.5626,0.0937 -3.5457,0.29772 -8.9806,0.99317 -10.2187,1.625 -1.6334,-0.33451 -3.9459,-0.61239 -5.9375,-0.46875 -3.0642,0.22098 -4.9678,0.37344 -6.9062,0.5 -0.6592,0.043 -1.0424,0.12393 -1.3438,0.1875 z"
- style="display:inline;opacity:1;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- <g
- transform="matrix(0.9975712,-0.06965428,0.06965428,0.9975712,872.72062,140.02502)"
- id="g7935"
- style="display:inline;filter:url(#filter7610);enable-background:new"
- clip-path="url(#clipPath7616)">
- <path
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 229.94262,-409.12268 c -3.55781,0.05 -9.0242,0.36009 -10.30334,0.90414 -1.60609,-0.44747 -3.90316,-0.88131 -5.89995,-0.87674 -3.07199,0.007 -4.96469,0.009 -6.90727,0 -0.66047,-0.003 -1.04759,0.0672 -1.35267,0.10959 0,0 0,1.09593 0,1.09593 0.11972,-0.17947 0.39252,-0.69046 0.94975,-0.76715 0.74758,-0.10289 5.16928,-0.15123 7.31019,-0.1096 1.7746,0.0345 4.45523,0.27427 6.38921,0.95895 0.3214,0.11378 0.61925,0.27378 0.89219,0.41097 1.96342,0.98693 7.94336,4.30154 7.94336,4.30154 0,0 -6.63275,-3.94768 -7.48287,-4.43853 -0.20331,-0.11739 -0.57464,-0.25769 -1.03609,-0.41098 1.22063,-0.44779 5.07597,-0.61971 7.82823,-0.71235 3.0245,-0.10182 3.34776,-0.0896 5.41069,0.19179 2.12931,0.29043 3.33851,0.60276 3.33851,0.60276 -1e-5,0 -0.0784,-0.64118 1.03609,-0.79455 0.74757,-0.10289 5.16929,-0.15123 7.31019,-0.1096 2.0695,0.0403 5.36605,0.40716 7.2814,1.36992 1.00332,0.50433 3.03564,1.56863 4.79535,2.53571 l 0.0956,-0.0194 c 0,0 -3.58034,-2.16242 -4.43047,-2.65327 -0.20331,-0.11739 -0.57463,-0.25769 -1.03609,-0.41098 1.22062,-0.44779 5.04719,-0.61971 7.79945,-0.71235 3.0245,-0.10182 3.34775,-0.0896 5.41069,0.19179 1.95316,0.2664 3.01292,0.53006 3.19461,0.57536 0,0 -0.0271,-0.31146 -0.0271,-0.31146 -0.40903,-0.13645 -0.71424,-0.23335 -1.40038,-0.35748 -1.30081,-0.23533 -3.39912,-0.60156 -5.50857,-0.56398 -3.57195,0.0636 -9.05328,0.35596 -10.30334,0.90414 -1.60583,-0.44695 -3.87662,-0.8813 -5.87117,-0.87674 -3.07199,0.007 -4.99348,0.009 -6.93605,0 -1.94256,-0.009 -1.71268,0.27907 -2.93558,0.27398 -1.32191,-0.005 -1.76612,-0.35463 -5.55459,-0.30138 0,0 0,0 0,0"
- id="path7937"
- sodipodi:nodetypes="ccssscsssscscsscsssccscssccsscssscc"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="cssccsscc"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 206.1989,-407.47878 c 1.92021,0.81706 4.57715,2.19283 6.15897,3.39739 1.58184,1.20456 2.90757,1.77368 5.55459,3.91795 0.88557,0.71738 1.74865,1.34985 2.59193,1.92174 l 0.54057,-0.19011 c -0.71323,-0.48339 -1.46776,-1.02031 -2.26909,-1.62203 -2.82223,-2.11921 -3.62655,-2.80973 -6.01507,-4.27414 -2.38854,-1.4644 -4.09948,-2.36576 -6.5619,-3.1508 0,0 0,0 0,0"
- id="path7939"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="csccscc"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 237.79963,-407.47878 c 1.92021,0.81706 4.60594,2.19283 6.18775,3.39739 0.81307,0.61916 1.55849,1.07042 2.45046,1.65401 l 0.649,-0.11666 c -0.79831,-0.57637 -1.57177,-1.09435 -2.69653,-1.78394 -2.38854,-1.4644 -4.12826,-2.36576 -6.59068,-3.1508 0,0 0,0 0,0"
- id="path7941"
- inkscape:connector-curvature="0" />
- </g>
- <g
- id="g7943"
- clip-path="url(#clipPath7606)">
- <path
- sodipodi:nodetypes="czzzzzzzzzzzzzz"
- id="path7945"
- d="m 1056.25,-278.80481 c 4.1446,-1.47877 10,3.125 10,3.125 0.899,0.28092 2.7251,-0.89447 2.6243,-1.68614 0,0 -1.5503,-1.86062 -0.3743,-2.93886 1.176,-1.07824 5.296,1.50738 7.5,1.625 2.204,0.11762 5.5621,-0.22941 7,-0.75 1.4379,-0.52059 1.1129,-1.42459 2.625,-1.75 1.5121,-0.32541 5.1189,1.03754 7.0605,1.16883 1.9416,0.13129 4.6481,0.33427 5.8145,-0.16883 1.1664,-0.5031 0.1782,-1.15921 1.875,-1.875 1.6968,-0.71579 7.7602,-0.95662 9.625,-0.125 1.8648,0.83162 1.8099,0.5192 2.625,3 0.8151,2.4808 7.4398,5.16285 -1.125,13.375 -8.5648,8.21215 -59.3779,13.78594 -65.625,2.75 -6.2471,-11.03594 6.2304,-14.27123 10.375,-15.75 z"
- style="display:inline;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7578);enable-background:new"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="czzzzzzzzzzzzzz"
- id="path7947"
- d="m 1058.5,-275.42981 c 4.1446,-1.47877 10,3.125 10,3.125 0.899,0.28092 2.7251,-0.89447 2.6243,-1.68614 0,0 -1.5503,-1.86062 -0.3743,-2.93886 1.176,-1.07824 5.296,1.50738 7.5,1.625 2.204,0.11762 5.5621,-0.22941 7,-0.75 1.4379,-0.52059 1.1129,-1.42459 2.625,-1.75 1.5121,-0.32541 5.1189,1.03754 7.0605,1.16883 1.9416,0.13129 4.6481,0.33427 5.8145,-0.16883 1.1664,-0.5031 0.1782,-1.15921 1.875,-1.875 1.6968,-0.71579 7.7602,-0.95662 9.625,-0.125 1.8648,0.83162 1.8099,0.5192 2.625,3 0.8151,2.4808 7.4398,5.16285 -1.125,13.375 -8.5648,8.21215 -59.3779,13.78594 -65.625,2.75 -6.2471,-11.03594 6.2304,-14.27123 10.375,-15.75 z"
- style="display:inline;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7594);enable-background:new"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- <path
- sodipodi:nodetypes="cscccccccccccc"
- id="path7949"
- d="m 628.24553,347.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.55405 36.34702,-65.29583 116.94091,-84.69468 185.93466,-91.46542 86.92239,-11.0168 184.91267,17.94007 233.37138,95.40128 54.124,75.7333 56.6747,172.53912 80.612,259.52795 29.4378,127.1276 54.7791,256.21414 60.3922,386.85035 -3.0634,78.18185 -8.4263,165.18417 -60.5032,228.13417 -48.0265,50.3574 -122.7864,50.053 -187.06985,59.0023 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.1982 -64.77564,-37.94 -95.73019,-113.47867 -97.2794,-186.01962 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#101414;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- transform="translate(276,136)"
- clip-path="url(#clipPath8616)"
- sodipodi:nodetypes="ccccczzzcc"
- id="path7951"
- d="m 311.83409,415.43155 9.8995,121.62237 -60.10408,136.47161 15.55635,174.65537 c 15.61326,61.8792 32.18545,98.66905 74.37615,117.05383 4.31911,-36.23998 -38.61152,-142.95988 -39.24264,-189.11984 -0.63145,-46.18445 10.83034,-108.60786 30.67767,-158.29647 20.04835,-50.19188 36.89674,-44.84642 42.12489,-92.59293 5.22815,-47.74651 -17.4264,-149.39192 -17.4264,-149.39192 l -55.86144,39.59798 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8940);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="czcc"
- id="path7953"
- d="m 1010.0312,655.49186 c 0,0 16.7552,37.01806 28.7015,53.95395 11.9462,16.93589 52.7271,56.04605 52.7271,56.04605 l 52.5972,-127.58975"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient8970);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- transform="translate(276,136)"
- clip-path="url(#clipPath8209)"
- sodipodi:nodetypes="cccc"
- id="path7955"
- d="m 730.31998,536.56864 c 0,8.48528 42.54774,58.46803 42.54774,58.46803 l 12.60659,-28.76954 -55.15433,-29.69849 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.07999998;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8822);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <g
- clip-path="url(#clipPath3998)"
- id="g7957"
- style="display:inline;opacity:1;enable-background:new"
- transform="translate(450.03125,73.843964)">
- <g
- id="g7959"
- style="filter:url(#filter3677)"
- transform="translate(-174.03125,62.156036)">
- <g
- style="filter:url(#filter3785)"
- id="g7961">
- <path
- style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 425.88244,476.99186 c 10.80543,-1.47866 24.74401,3.35451 44.64286,3.21428 19.89885,-0.14023 57.45322,-16.91122 82.14285,-17.14286 24.68963,-0.23164 62.7517,12.28406 79.28572,15 16.53402,2.71594 22.84832,-0.15852 27.49999,7.85715 4.65167,8.01567 1.92671,10.74724 -10.35714,20.71429 -12.28385,9.96705 -40.78968,12.63632 -66.07143,12.85714 -25.28234,0.22082 -70.38129,7.07852 -95.35714,3.92856 -24.97585,-3.14996 -56.93756,-7.82267 -68.92857,-17.85714 -11.99101,-10.03447 -19.85084,-16.73182 -17.5,-23.92857 2.35084,-7.19675 13.83743,-3.16419 24.64286,-4.64285 z"
- id="path7963"
- sodipodi:nodetypes="czzzzzzzzzz"
- inkscape:connector-curvature="0" />
- <rect
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="rect7965"
- width="381.83765"
- height="181.01935"
- x="343.6539"
- y="412.60312" />
- </g>
- <g
- style="filter:url(#filter3785)"
- id="g7967">
- <path
- transform="translate(174.03125,-62.156036)"
- style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 687.14286,452.36218 c -10.46169,9.71443 -86.9796,19.00514 -100.71429,29.28572 -13.73469,10.28058 -14.75252,12.88826 -12.14286,20 2.60966,7.11174 6.54527,9.40572 25.71429,8.57142 19.16902,-0.8343 98.57143,-27.62172 98.57143,-21.42857 l -11.42857,-36.42857 z"
- id="path7969"
- sodipodi:nodetypes="czzzcc"
- inkscape:connector-curvature="0" />
- <rect
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="rect7971"
- width="207.8894"
- height="162.63455"
- x="702.86414"
- y="344.82138" />
- </g>
- </g>
- <g
- id="g7973"
- style="display:inline;opacity:0.18000004;enable-background:new"
- transform="translate(-174.03125,62.156036)">
- <g
- style="filter:url(#filter3785)"
- id="g7975">
- <path
- style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 425.88244,476.99186 c 10.80543,-1.47866 24.74401,3.35451 44.64286,3.21428 19.89885,-0.14023 57.45322,-16.91122 82.14285,-17.14286 24.68963,-0.23164 62.7517,12.28406 79.28572,15 16.53402,2.71594 22.84832,-0.15852 27.49999,7.85715 4.65167,8.01567 1.92671,10.74724 -10.35714,20.71429 -12.28385,9.96705 -40.78968,12.63632 -66.07143,12.85714 -25.28234,0.22082 -70.38129,7.07852 -95.35714,3.92856 -24.97585,-3.14996 -56.93756,-7.82267 -68.92857,-17.85714 -11.99101,-10.03447 -19.85084,-16.73182 -17.5,-23.92857 2.35084,-7.19675 13.83743,-3.16419 24.64286,-4.64285 z"
- id="path7977"
- sodipodi:nodetypes="czzzzzzzzzz"
- inkscape:connector-curvature="0" />
- <rect
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="rect7979"
- width="381.83765"
- height="181.01935"
- x="343.6539"
- y="412.60312" />
- </g>
- <g
- style="filter:url(#filter3785)"
- id="g7981">
- <path
- transform="translate(174.03125,-62.156036)"
- style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 687.14286,452.36218 c -10.46169,9.71443 -86.9796,19.00514 -100.71429,29.28572 -13.73469,10.28058 -14.75252,12.88826 -12.14286,20 2.60966,7.11174 6.54527,9.40572 25.71429,8.57142 19.16902,-0.8343 98.57143,-27.62172 98.57143,-21.42857 l -11.42857,-36.42857 z"
- id="path7983"
- sodipodi:nodetypes="czzzcc"
- inkscape:connector-curvature="0" />
- <rect
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="rect7985"
- width="207.8894"
- height="162.63455"
- x="702.86414"
- y="344.82138" />
- </g>
- </g>
- </g>
- <path
- transform="translate(276,136)"
- sodipodi:nodetypes="cccccscc"
- clip-path="url(#clipPath8604)"
- id="path7987"
- d="M 582.65599,-7.4183011 695.79307,78.848726 804.68752,337.64981 842.87128,545.5392 963.07944,637.46308 c 0,0 -12.72793,-287.08535 -19.799,-313.95541 C 936.20938,296.63761 793.37381,-69.643698 793.37381,-69.643698 L 582.65599,-7.4183011 Z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8802);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="czzzzzzc"
- id="path7989"
- d="m 964.13839,239.599 c 0,0 8.67732,10.89662 24.10715,11.96428 15.42986,1.06766 49.72166,-39.95267 70.17856,-52.14285 20.4793,-12.20353 47.0464,-26.60225 63.9286,-20.35714 16.8821,6.2451 22.1578,26.43609 27.8571,48.03571 5.6994,21.59961 6.7186,61.81389 -2.6785,92.85715 -9.3972,31.04325 -50.5033,73.10375 -65.3572,103.39285 -14.8539,30.2891 -11.6071,39.82143 -11.6071,39.82143"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient8958);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="czzzzc"
- id="path7991"
- d="m 1124.4955,207.63471 c -15.8928,-0.89286 -49.7188,12.10583 -66.0714,24.28572 -16.4386,12.2439 -29.2209,24.1144 -29.2857,52.14285 -0.065,28.20604 13.1191,39.07641 29.1071,46.96429 15.988,7.88789 33.6862,7.11928 51.9643,-11.78571 18.2782,-18.905 14.2857,-111.60715 14.2857,-111.60715 z"
- style="display:inline;opacity:1;fill:url(#radialGradient3315);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- <ellipse
- clip-path="url(#clipPath4100)"
- transform="matrix(0.9434749,-0.1239943,0.1440089,1.0957669,451.94827,134.5988)"
- id="path7993"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.75;fill:url(#radialGradient3543);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4120);enable-background:accumulate"
- cx="385"
- cy="237.00504"
- rx="86.428574"
- ry="73.928574" />
- <path
- mask="url(#mask3684)"
- sodipodi:nodetypes="csczzc"
- id="path7995"
- d="m 527.60588,407.44884 c 0,0 -122.04144,38.40348 -187.51434,9.63181 -65.47289,-28.77166 -74.37725,-124.71847 -74.37725,-124.71847 0,0 73.38158,-80.50393 129.92078,-83.61476 55.82705,-3.07164 90.57386,20.14332 114.87001,65.85171 24.352,45.81348 17.1008,132.84971 17.1008,132.84971 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient3915);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- transform="translate(450.03125,73.843964)"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="czcc"
- id="path7997"
- d="m 772.17411,393.349 c 0,0 36.21754,-27.38247 51.60714,-35.89286 15.17734,-8.39301 25.71428,-11.60714 35.89285,-11.60714 l -15.53571,66.96428"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient3959);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <circle
- transform="translate(449.49554,74.915393)"
- id="path7999"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient3933);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- cx="409.28571"
- cy="306.64789"
- r="36.25" />
- <path
- transform="translate(276,136)"
- clip-path="url(#clipPath8616)"
- sodipodi:nodetypes="cccccccccc"
- id="path8001"
- d="m 311.83409,415.43155 9.8995,121.62237 -60.10408,136.47161 15.55635,174.65537 c 15.61326,61.8792 32.18545,98.66905 74.37615,117.05383 4.31911,-36.23998 8.68161,-72.36764 -31.24264,-223.11984 l 17.67767,-69.29647 72.12489,-138.59293 -42.4264,-158.39192 -55.86144,39.59798 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.3;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8806);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="czzcc"
- id="path8003"
- d="m 635.21025,581.13004 c -14.14214,12.72792 39.23347,34.58015 76.36753,24.04163 37.13406,-10.53852 104.64487,-35.56437 103.23759,-79.19596 -1.40728,-43.63158 -76.36753,-128.69343 -76.36753,-128.69343 L 635.21025,581.13004 Z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8826);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <circle
- transform="translate(449.67411,74.915393)"
- id="path8005"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient3991);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- cx="410"
- cy="306.64789"
- r="23.214285" />
- <circle
- transform="translate(451.99554,73.486821)"
- id="path8007"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3981);enable-background:accumulate"
- cx="414.28571"
- cy="303.07648"
- r="7.5" />
- <path
- sodipodi:nodetypes="czzzczc"
- id="path8009"
- d="m 789.31696,478.349 c 0,0 7.02281,19.56859 -1.07143,35 -8.09424,15.43141 -42.32317,38.98822 -67.49999,50 -25.30972,11.06991 -85.473,32.96393 -101.78572,41.96428 -16.46148,9.08243 -18.21428,12.67857 -18.21428,12.67857 0,0 -7.14693,-19.06441 28.74999,-51.7857 36.17211,-32.97214 142.02712,-48.0495 159.82143,-87.85715 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4112);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <g
- transform="translate(780.74553,74.55825)"
- id="g8011"
- style="display:inline;opacity:1;enable-background:new">
- <path
- style="display:inline;opacity:1;fill:url(#radialGradient3585);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
- d="m 179.64286,267.36218 c -22.41044,39.70292 -60.6161,115.78029 -69.28571,149.64286 -8.64721,33.7751 -8.77199,66.41654 -0.35715,86.42858 8.3602,19.88213 26.16398,35.6328 40.71428,41.42856 -0.59638,-14.37587 14.37295,-43.28583 72.85715,-72.5 58.62627,-29.28514 78.38163,-27.13086 103.57142,-47.14286 25.63006,-20.36176 8.20587,-79.64664 3.21429,-93.92857 -4.99158,-14.28193 -1.23663,-3.37974 -1.94602,-5.09301 -10.68928,-25.81592 -34.21432,-54.4303 -64.48255,-64.54984 -30.26823,-10.11954 -65.01776,-4.84837 -84.28571,5.71428 z"
- id="path8013"
- sodipodi:nodetypes="czzczzzszc"
- clip-path="url(#clipPath3999)"
- transform="translate(-329.81481,0)"
- inkscape:connector-curvature="0" />
- <ellipse
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4060);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="path8015"
- transform="matrix(0.8823874,0.4705236,-0.4705236,0.8823874,-166.62245,2.387362)"
- cx="183.57143"
- cy="338.07648"
- rx="64.715881"
- ry="134.00607" />
- <ellipse
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4062);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="path8017"
- transform="matrix(0.8823874,0.4705236,-0.4705236,0.8823874,-162.19388,-18.755495)"
- cx="183.57143"
- cy="338.07648"
- rx="64.715881"
- ry="134.00607" />
- <path
- style="display:inline;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient3587);stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4079);enable-background:new"
- d="m 179.64286,267.36218 c -22.41044,39.70292 -60.6161,115.78029 -69.28571,149.64286 -8.64721,33.7751 -8.77199,66.41654 -0.35715,86.42858 8.3602,19.88213 26.16398,35.6328 40.71428,41.42856 -0.59638,-14.37587 14.37295,-43.28583 72.85715,-72.5 58.62627,-29.28514 78.38163,-27.13086 103.57142,-47.14286 25.63006,-20.36176 8.20587,-79.64664 3.21429,-93.92857 -4.99158,-14.28193 -1.23663,-3.37974 -1.94602,-5.09301 -10.68928,-25.81592 -34.21432,-54.4303 -64.48255,-64.54984 -30.26823,-10.11954 -65.01776,-4.84837 -84.28571,5.71428 z"
- id="path8019"
- sodipodi:nodetypes="czzczzzszc"
- clip-path="url(#clipPath3999)"
- transform="translate(-329.81481,3e-7)"
- inkscape:connector-curvature="0" />
- </g>
- <circle
- transform="translate(452.55663,72.581273)"
- id="path8021"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- cx="310.71429"
- cy="398.07648"
- r="19.704132" />
- <circle
- transform="translate(450.55663,72.581273)"
- id="path8023"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4056);fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient4082);stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4083);enable-background:accumulate"
- cx="310.71429"
- cy="398.07648"
- r="19.704132" />
- <circle
- transform="translate(450.55663,72.581273)"
- id="path8025"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4119);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- cx="310.71429"
- cy="398.07648"
- r="19.704132" />
- <ellipse
- inkscape:transform-center-y="-3.6935079"
- inkscape:transform-center-x="-47.231976"
- transform="matrix(0.9969564,-0.07796167,0.07796167,0.9969564,436.61877,125.29509)"
- id="path8027"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4868);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4002);enable-background:accumulate"
- cx="429.56738"
- cy="377.42877"
- rx="72.079735"
- ry="44.547726" />
- <ellipse
- inkscape:transform-center-y="-13.056625"
- inkscape:transform-center-x="-20.955902"
- transform="matrix(1.4357951,-0.06999104,0.06999104,1.4357951,235.18065,-63.86546)"
- id="path8029"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4876);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4010);enable-background:accumulate"
- cx="437.6991"
- cy="391.21735"
- rx="36.611931"
- ry="22.627417" />
- <g
- style="display:inline;opacity:1;filter:url(#filter4053);enable-background:new"
- id="g8031"
- transform="translate(450.03125,73.843964)">
- <circle
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient4484);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="path8033"
- cx="413.66071"
- cy="401.82648"
- r="3.2142856" />
- <circle
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient4486);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="path8035"
- transform="translate(13.125009,8.1249913)"
- cx="413.66071"
- cy="401.82648"
- r="3.2142856" />
- <circle
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient4488);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="path8037"
- transform="translate(32.946437,7.4999913)"
- cx="413.66071"
- cy="401.82648"
- r="3.2142856" />
- <circle
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient4490);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="path8039"
- transform="translate(24.910723,-10.267866)"
- cx="413.66071"
- cy="401.82648"
- r="3.2142856" />
- <circle
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient4492);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="path8041"
- transform="translate(47.589294,-0.6250087)"
- cx="413.66071"
- cy="401.82648"
- r="3.2142856" />
- </g>
- <path
- sodipodi:nodetypes="ccccccccc"
- id="path8043"
- d="m 896.20301,482.92837 c 0.98509,4.35008 4.53707,6.17948 7.38673,7.89182 4.46068,2.51292 6.52016,1.52211 9.15451,-0.75761 1.60195,-1.92117 10.68311,-4.69865 15.59423,-7.07107 4.32961,-1.45891 8.9033,-5.35873 13.38452,-8.33376 3.39514,-1.62724 5.34664,0.35464 7.82868,1.01015 2.94412,0.71661 4.41117,2.17175 6.06092,3.53554 2.39616,1.17519 -0.9279,3.14313 3.283,4.29314 1.19091,0.21794 2.41695,0.57645 3.28299,-0.50507"
- style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="cccccccccccc"
- id="path8045"
- d="m 910.85021,475.35223 c 2.31494,-0.032 3.17778,0.64253 5.49271,-0.82075 3.45564,-3.08113 5.40254,-3.14477 7.95495,-4.41942 3.02657,-1.31523 6.5357,8.15169 10.10153,9.84899 2.39509,-0.82142 1.28914,1.79379 1.45209,2.65165 0.0571,2.64684 2.80694,3.67806 4.35628,5.42957 3.31604,2.25549 7.37523,6.29546 11.11168,5.3033 6.44525,-2.93107 10.27922,-1.28146 16.28871,-7.38674 0.70405,-1.18134 -0.58425,-6.8946 3.09359,-7.19734 2.52399,0.25338 4.16667,0.0502 6.06092,0.56822 5.441,2.11719 7.73778,6.45 14.71034,7.95495 6.1829,0.96639 7.61264,3.79426 13.88959,5.05076"
- style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="ccccccccccccc"
- id="path8047"
- d="m 876.98133,483.52197 c 2.39858,-0.7938 6.10613,4.1921 8.17313,7.04568 0.59281,2.67952 1.15377,5.48645 0.75761,12.12183 0.78513,2.41754 2.68049,3.03095 4.79823,3.283 3.11745,-0.53678 5.87669,-1.3243 7.3236,-3.03046 1.8716,-1.94167 5.31253,2.39394 8.08122,4.04061 3.61009,1.91209 7.77378,1.97886 11.8693,2.27284 1.70358,-0.23064 2.3704,4.51515 3.28299,8.08123 0.38414,4.37806 -0.88544,6.89569 -1.76776,9.84898 -0.2943,2.49655 2.9885,3.52974 6.31345,4.54569 3.18244,0.74124 6.54424,1.66184 9.09137,1.76777 5.14186,0.87491 8.08874,2.69052 12.12183,4.04061 2.23914,0.81655 3.26019,2.24216 4.54569,3.53553"
- style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- <path
- transform="translate(276,136)"
- clip-path="url(#clipPath8514)"
- id="path8049"
- d="m 332,187.69519 c 0,0 57.5,-25.5 57.5,-28 0,-2.5 5.5,-52 5.5,-52 0,0 91,-48.500001 91.5,-50.500001 0.5,-2 86,-62.0000004 86,-62.0000004 L 386.5,17.195189 311,123.19519 l 21,64.5 z"
- style="display:inline;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter8814);enable-background:new"
- inkscape:connector-curvature="0" />
- <path
- id="path8051"
- d="m 1697.2846,722.5514 c 0,0 -115.9655,73.5391 -123.0365,77.78174 -7.0711,4.24264 -230.5169,137.17872 -230.5169,137.17872 l 4.2427,39.59798 216.3747,-100.40917 117.3797,-101.82337 15.5563,-52.3259 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- transform="translate(276,136)"
- sodipodi:nodetypes="cccccscccc"
- clip-path="url(#clipPath8610)"
- id="path8053"
- d="m 528.91587,556.85291 c -5.65685,-1.41421 -181.01933,74.95332 -181.01933,74.95332 l -33.94113,181.01934 51.09546,193.94823 257.2031,67.6813 c 0,0 206.47518,152.735 212.13203,148.4924 5.65686,-4.2426 168.2914,-193.7473 168.2914,-193.7473 L 842.87128,845.35248 796.20224,667.16157 528.91587,556.85291 Z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8810);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="czzzzzzczczczczzzc"
- id="path8055"
- d="m 1097.6433,613.88997 c 0,0 22.6195,-6.50681 35.7427,-5.87273 13.1233,0.63409 30.6416,1.93862 43.7089,12.18619 13.0673,10.24756 25.0677,27.14007 34.1124,58.36965 9.0446,31.22958 1.6983,99.25201 -6.1761,143.34735 -7.8743,44.09534 -28.2651,106.11298 -45,140 -16.7348,33.88702 -49.7977,77.49517 -60.5694,89.87617 -11.3642,13.062 -56.2059,36.4262 -79.4306,42.2667 5.3034,-10.6066 48.8998,-50.5889 35,-60.7143 -14.0189,-10.2123 -45.76,45.9824 -84.2931,29.0332 21.38231,-13.1321 41.7794,-51.1861 34.0406,-66.59448 -7.84024,-15.61039 -30.70492,48.75758 -93.53553,37.01288 30.05204,-27.5267 55.40706,-70.90401 41.2627,-82.9797 -14.41516,-12.30687 -60.46175,54.2932 -60.46175,54.2932 0,0 -2.8219,-41.70123 13.7732,-68.60737 16.63935,-26.97787 79.65297,-81.61527 99.55308,-111.70342 19.9002,-30.08814 33.6126,-66.00902 42.1355,-92.51794 8.5228,-26.50892 15.8009,-77.09954 15.8009,-77.09954"
- style="display:inline;opacity:1;fill:#0c0c0c;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- <path
- transform="translate(276,136)"
- sodipodi:nodetypes="cccccccccc"
- clip-path="url(#clipPath8622)"
- id="path8057"
- d="m 770.74639,609.17881 -50.91169,97.58074 -79.90307,111.01576 34.64824,71.41778 42.42641,79.19597 72.12489,-45.25484 14.14214,-192.33305 21.2132,-138.59292 -14.14214,-90.15612 -39.59798,107.12668 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8818);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- transform="translate(276,136)"
- sodipodi:nodetypes="cczcccccc"
- clip-path="url(#clipPath8906)"
- id="path8059"
- d="m 295,846.19519 6.64488,-68.92285 c 0,0 90.31951,89.00457 162.35512,122.92285 72.03561,33.91828 308,62 308,62 l 154,-26 -36,162.00001 -286,26 -298,-89 -11,-189.00001 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8810);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- clip-path="url(#clipPath3602)"
- sodipodi:nodetypes="cccccccccccc"
- id="path8061"
- d="m 405.79629,845.99023 74.95332,65.05383 2.49963,16.8804 19.40336,10.15891 6.49204,23.05109 31.70905,-8.3711 14.84924,48.08324 c 12.25652,12.7279 89.79344,-113.1097 55.86143,38.1838 l -60.81118,16.2635 -89.20292,-94.69286 -62.82503,-53.79963 7.07106,-60.81118 z"
- style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter3587);enable-background:new"
- transform="translate(450.03125,73.843964)"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="czzzzzzzzcc"
- id="path8063"
- d="m 1159.317,918.349 c 54.2857,-1.42857 126.035,-15.05199 170,-26.78572 44.0527,-11.75714 125.8863,-36.34724 175.357,-57.85714 49.3393,-21.45272 113.6038,-59.2816 154.2859,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7142,-33.57143 8.3691,22.36779 -16.4069,56.32562 -37.8571,81.07143 -21.6042,24.9234 -52.7314,52.70533 -98.9287,89.28571 -46.1973,36.58038 -156.0825,101.58463 -212.8571,128.5714 -57.066,27.1254 -128.2033,58.2385 -172.1428,72.5001 -43.9395,14.2616 -131.4286,31.0714 -131.4286,31.0714 L 1159.317,918.349 Z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- clip-path="url(#clipPath3992)"
- sodipodi:nodetypes="czczzcc"
- id="path8065"
- d="m 1241.5965,652.95007 c 0,0 -64.7215,54.33706 -145.6639,98.99494 -82.0244,45.25484 -284.25704,93.3381 -284.25704,93.3381 0,0 -15.10137,21.05196 45.25489,28.28428 60.35626,7.23232 224.08195,-53.30069 278.60015,-96.16654 54.5182,-42.86585 120.2081,-111.72286 120.2081,-111.72286 l -14.1422,-12.72792 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:url(#linearGradient3666);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3779);enable-background:accumulate"
- transform="translate(450.03125,73.843964)"
- inkscape:connector-curvature="0" />
- <g
- clip-path="url(#clipPath3986)"
- id="g8067"
- style="display:inline;opacity:1;enable-background:new"
- transform="translate(450.03125,73.843964)">
- <g
- id="g8069"
- style="filter:url(#filter3677)"
- transform="translate(-174.03125,62.156036)">
- <g
- id="g8071"
- style="filter:url(#filter3785)">
- <path
- transform="translate(174.03125,-62.156036)"
- style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 1094.2857,725.93361 c 0,0 -0.2961,26.16091 4.6428,37.85715 4.9389,11.69624 20.0381,26.48665 28.5715,31.42857 8.5334,4.94192 18.9286,8.57142 18.9286,8.57142 l 117.8571,-115 17.8572,-75.71428 -96.4286,38.57143 -91.4286,74.28571 z"
- id="path8073"
- sodipodi:nodetypes="czzccccc"
- inkscape:connector-curvature="0" />
- <rect
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="rect8075"
- width="333.75412"
- height="309.71277"
- x="1197.8389"
- y="486.14224" />
- </g>
- </g>
- <g
- id="g8077"
- style="display:inline;opacity:0.18000004;enable-background:new"
- transform="translate(-174.03125,62.156036)">
- <g
- id="g8079"
- style="filter:url(#filter3785)">
- <path
- transform="translate(174.03125,-62.156036)"
- style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 1094.2857,725.93361 c 0,0 -0.2961,26.16091 4.6428,37.85715 4.9389,11.69624 20.0381,26.48665 28.5715,31.42857 8.5334,4.94192 18.9286,8.57142 18.9286,8.57142 l 117.8571,-115 17.8572,-75.71428 -96.4286,38.57143 -91.4286,74.28571 z"
- id="path8081"
- sodipodi:nodetypes="czzccccc"
- inkscape:connector-curvature="0" />
- <rect
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="rect8083"
- width="333.75412"
- height="309.71277"
- x="1197.8389"
- y="486.14224" />
- </g>
- </g>
- </g>
- <path
- sodipodi:nodetypes="cssssccccccssssssssccssssssccssssc"
- clip-path="url(#clipPath3722)"
- id="path8085"
- d="m 1264.1875,605 c -4.4911,0.73268 -8.157,3.45509 -11.9375,6.40625 -10.0813,7.86976 -28.1695,34.42524 -48.0312,50.46875 -39.8674,32.20316 -103.996,69.97701 -152.5626,91.09375 -48.614,21.13738 -130.54122,45.81801 -174.31245,57.5 -43.39821,11.58246 -115.04403,25.13107 -168.25,26.53125 l -4.5625,0.125 -2,4.125 -92.84375,192.125 -6.5,13.4688 14.65625,-2.8438 c 0,0 87.26968,-16.6514 132.34375,-31.2812 44.7252,-14.51667 115.79086,-45.66683 173.03125,-72.87505 C 980.82199,912.46306 1090.1551,847.86412 1137.5,810.375 c 46.3608,-36.70982 77.8049,-64.71682 99.9375,-90.25 10.9011,-12.576 22.7448,-27.53144 31.0313,-42.75 8.2864,-15.21856 19.1597,-44.21808 13.6874,-58.84375 -1.2177,-3.25474 -2.5514,-6.0613 -4.5937,-8.5 -2.0423,-2.4387 -8.4747,-1.57199 -8.5625,-5.03125 -0.2098,-8.26482 -3.3155,-0.24423 -4.8125,0 z m 2.1563,15.21875 c 0.4148,0.58574 1.0311,1.55766 1.7812,3.5625 2.8968,7.74213 -1.4407,31.89875 -8.8125,45.4375 -7.3718,13.53875 -22.6384,28.92394 -33.1875,41.09375 -21.0754,24.31356 -51.9037,51.86156 -97.9375,88.3125 -45.0496,35.67159 -155.46033,101.09459 -211.40625,127.6875 -56.89173,27.04249 -128.09616,58.1184 -171.25,72.125 -36.36491,11.8031 -95.84471,23.8338 -115.71875,27.7813 L 714.09375,851.75 c 54.70691,-2.0493 123.79259,-15.21635 167.125,-26.78125 44.33422,-11.83225 126.07865,-36.33633 176.40625,-58.21875 50.112,-21.78871 112.5344,-61.16816 154.0312,-94.6875 20.6464,-16.67721 41.7449,-42.54588 49.8126,-48.84375 2.437,-1.90242 4.0806,-2.6358 4.875,-3 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.83300003;fill:#050505;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:15;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;filter:url(#filter8225);enable-background:accumulate"
- transform="translate(450.03125,73.843964)"
- inkscape:connector-curvature="0" />
- <g
- inkscape:transform-center-y="-12.859654"
- inkscape:transform-center-x="-185.09603"
- transform="matrix(0.9934486,0.1142802,-0.1142802,0.9934486,-9.24324,588.09054)"
- mask="url(#mask7704)"
- id="g8087"
- style="display:inline;opacity:1;enable-background:new">
- <path
- sodipodi:nodetypes="ccssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssscccccssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssssssssc"
- id="path8089"
- d="m 1111.4062,-285.9375 -3.9374,1.875 c -0.041,0.0102 -0.1,0.0205 -0.125,0.0312 -0.4188,0.21285 -0.1647,0.10058 -0.6563,0.3125 -0.4861,0.20956 -1.7376,0.58419 -4.0937,1.46875 -3.3312,1.25058 -5.8043,2.14984 -7,3.0625 -1.5362,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74767 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41973 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25167 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74215 -8.8948,1.93107 -10.1562,2.6875 -1.584,-0.18078 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44696 -4.9162,0.67276 -6.8438,0.90625 -0.6554,0.0794 -1.041,0.20078 -1.3437,0.28125 -0.4262,0.13166 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15937 -1.7622,-0.15683 -5.5313,0.28125 -3.5539,0.41309 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.29729 -3.8577,-0.53419 -5.8437,-0.34375 -3.0588,0.29332 -4.972,0.48399 -6.9063,0.65625 -1.9342,0.17227 -1.6886,0.42237 -2.9062,0.53125 -1.3162,0.1177 -1.7598,-0.16363 -5.5312,0.25 -3.5421,0.38845 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.29469 -3.8872,-0.50701 -5.875,-0.3125 -3.05829,0.29925 -4.9412,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04005,0.17856 -1.34375,0.25 -0.4277,0.11896 -0.6835,0.21807 -1.375,0.28125 -1.316,0.12026 -1.75975,-0.19488 -5.53125,0.21875 -3.55619,0.39002 -9.0056,1.23916 -10.25,1.90625 -1.59869,-0.29418 -3.85985,-0.52372 -5.84375,-0.3125 -3.0557,0.32533 -4.97405,0.52624 -6.90625,0.71875 -1.93219,0.19251 -1.68975,0.44088 -2.90625,0.5625 -1.31488,0.13147 -1.76305,-0.16454 -5.53125,0.28125 -3.53889,0.41866 -8.9777,1.29217 -10.25,1.96875 -1.59759,-0.28104 -3.85995,-0.42043 -5.84375,-0.1875 -3.05198,0.35837 -4.945,0.56786 -6.875,0.78125 -0.65618,0.0726 -1.04065,0.17269 -1.34375,0.25 -0.42679,0.12723 -0.6849,0.2672 -1.375,0.34375 -1.31339,0.14569 -1.76735,-0.17402 -5.53125,0.3125 -3.54888,0.45876 -8.97865,1.41902 -10.21875,2.125 -1.59309,-0.24424 -3.8338,-0.38135 -5.8125,-0.125 -3.04759,0.39482 -4.9507,0.64845 -6.875,0.90625 -1.92429,0.25779 -1.7261,0.49353 -2.9375,0.65625 -1.30949,0.1759 -1.7472,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.9232,1.69917 -10.1875,2.4375 -1.58749,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02619,0.53612 -4.8989,0.86169 -6.8125,1.1875 -0.65059,0.11077 -1.0137,0.27094 -1.3125,0.375 -0.42069,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.2947,0.26159 -1.7271,-0.006 -5.4375,0.8125 -3.49848,0.77195 -8.8459,2.38293 -10.0625,3.21875 -1.5629,-0.0774 -3.7575,0.0853 -5.6875,0.59375 -2.97238,0.78313 -4.8177,1.23209 -6.6875,1.75 -1.87,0.5179 -1.66665,0.76728 -2.84375,1.09375 -1.27249,0.3529 -1.69705,0.10709 -5.34375,1.1875 -3.42468,1.01463 -8.6494,2.93317 -9.875,3.84375 -1.53878,0.0127 -3.7198,0.27222 -5.625,0.875 -2.93098,0.92734 -4.75035,1.45842 -6.59375,2.0625 -0.62679,0.20538 -0.99165,0.39258 -1.28125,0.53125 -0.40758,0.21361 -0.6533,0.40875 -1.3125,0.625 -1.2545,0.41154 -1.68615,0.18904 -5.28125,1.4375 -3.38989,1.17717 -8.59495,3.2137 -9.78125,4.15625 -1.52388,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69905,1.67548 -6.53125,2.3125 -1.8322,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24679,0.43396 -1.66355,0.19972 -5.21875,1.5625 -3.3387,1.2798 -8.4871,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.6357,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6239,1.78156 -6.4375,2.46875 -0.6167,0.23363 -0.99645,0.44203 -1.28125,0.59375 10e-6,0 0,0.0295 0,0.0312 l -8,3.1875 -12.4759,3.49189 7.92966,19.27772 c -0.59163,1.97357 12.54624,-4.73836 12.54624,-4.73836 0.22641,-0.14468 0.44895,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.1716,-0.21577 6,-1.6875 3.82852,-1.47174 5.22405,-2.00498 5.90625,-2.40625 0.67961,-0.39978 1.61175,-0.87937 2.21875,-1.53125 1.82692,-0.13775 3.5708,-0.49323 4.9375,-1 2.968,-1.10052 4.87535,-1.80619 6.78125,-2.46875 1.90581,-0.66254 2.35415,-1.41487 3.40625,-1.78125 1.09162,-0.38011 2.1951,-0.16538 6.0625,-1.53125 3.8674,-1.36586 5.28315,-1.82708 5.96875,-2.21875 0.70111,-0.40052 1.7008,-0.93298 2.3125,-1.59375 1.97081,-0.0547 3.81695,-0.38463 5.28125,-0.875 3.00152,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.53861,-0.5041 2.17415,-1.04677 2.90625,-1.4375 0.23022,-0.13431 0.4759,-0.25373 0.75,-0.34375 1.09832,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91231,-1.23113 5.366,-1.67295 6.0625,-2.03125 0.69391,-0.35697 1.6301,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63585,-0.26683 5.03125,-0.6875 3.0304,-0.91354 4.9924,-1.4301 6.9375,-1.96875 1.94512,-0.53864 2.4262,-1.26452 3.5,-1.5625 1.11402,-0.30915 2.22,0.007 6.1875,-1.03125 3.9675,-1.03863 5.4175,-1.43273 6.125,-1.75 0.7348,-0.32959 1.8139,-0.75372 2.4375,-1.375 1.99782,0.116 3.85745,-0.0201 5.34375,-0.375 3.07811,-0.735 5.0834,-1.10094 7.0625,-1.5 1.58791,-0.32018 2.2443,-0.79055 3,-1.09375 0.23751,-0.1068 0.4669,-0.19276 0.75,-0.25 1.13341,-0.22919 2.30465,0.20893 6.34375,-0.5 4.03942,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71581,-0.25944 1.70435,-0.56724 2.34375,-1.09375 1.9242,0.23949 3.7479,0.22453 5.1875,0 3.12642,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48875,-0.94514 3.59375,-1.09375 1.14642,-0.15418 2.27585,0.30157 6.34375,-0.21875 4.06781,-0.52032 5.56025,-0.69573 6.28125,-0.9375 0.73712,-0.24714 1.7981,-0.58623 2.4375,-1.125 2.05,0.33553 3.9737,0.39796 5.5,0.21875 3.1422,-0.36896 5.18,-0.55936 7.1875,-0.78125 1.61082,-0.17802 2.26465,-0.6082 3.03125,-0.84375 0.24091,-0.0855 0.49405,-0.1556 0.78125,-0.1875 1.1497,-0.12772 2.3013,0.34665 6.375,-0.125 4.0737,-0.47165 5.55905,-0.6106 6.28125,-0.84375 0.71941,-0.23227 1.70025,-0.47346 2.34375,-0.96875 1.9363,0.33346 3.77005,0.40424 5.21875,0.25 3.14601,-0.33495 5.1775,-0.51859 7.1875,-0.71875 2.00991,-0.20014 2.48415,-0.82639 3.59375,-0.9375 1.1511,-0.11528 2.2965,0.36506 6.375,-0.0625 4.0785,-0.42756 5.5889,-0.56209 6.3125,-0.78125 0.73922,-0.22386 1.7956,-0.51325 2.4375,-1.03125 2.057,0.39867 4.00185,0.4934 5.53125,0.34375 3.14871,-0.3081 5.1758,-0.47325 7.1875,-0.65625 1.61401,-0.14682 2.26305,-0.56055 3.03125,-0.78125 0.2413,-0.0809 0.49355,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.2929,0.39275 6.375,0 4.08211,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6996,-0.4477 2.3437,-0.9375 1.9381,0.34999 3.7689,0.45438 5.2188,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1465,-0.32852 5.177,-0.5227 7.1874,-0.71875 1.613,-0.15729 2.2657,-0.63148 3.0313,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7166,-0.25316 1.6746,-0.55807 2.3124,-1.09375 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99127 5.4295,-1.4193 6.125,-1.78125 0.7222,-0.376 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.1446 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70652 2.3191,-1.70203 2.5312,-2 0.2123,-0.29795 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3404,-0.094 0.5,-0.4375 0.859,-1.84707 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68214 0.168,-1.35277 0.2187,-1.75 0.029,-0.2295 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19831 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41763 -0.9716,-4.61463 -1.625,-5.46875 -0.6589,-0.86172 -1.2248,-1.01051 -1.75,-1 z"
- style="display:inline;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- transform="translate(0.08004571,-0.03125)"
- inkscape:connector-curvature="0" />
- <g
- id="g8091"
- clip-path="url(#clipPath7421)">
- <path
- sodipodi:nodetypes="czscsssscssssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssccsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscc"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7001);enable-background:new"
- d="m 1107.409,-284.04961 c -0.4187,0.21283 -0.1556,0.0939 -0.6472,0.30581 -0.4861,0.20954 -1.7234,0.57439 -4.0796,1.45895 -3.3311,1.25057 -5.8302,2.15344 -7.0259,3.0661 -1.5361,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74766 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41972 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25166 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74214 -8.8948,1.93107 -10.1562,2.6875 -1.5839,-0.18079 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44695 -4.9162,0.67276 -6.8437,0.90625 -0.6554,0.0794 -1.0411,0.20078 -1.3438,0.28125 -0.4262,0.13165 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15936 -1.7622,-0.15683 -5.5312,0.28125 -3.5539,0.41308 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.2973 -3.8578,-0.53419 -5.8438,-0.34375 -3.0588,0.29331 -4.972,0.48399 -6.9062,0.65625 -1.9343,0.17226 -1.6887,0.42237 -2.9063,0.53125 -1.3162,0.11769 -1.7598,-0.16363 -5.5312,0.25 -3.5419,0.38844 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.2947 -3.88717,-0.50701 -5.875,-0.3125 -3.05824,0.29924 -4.94113,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04004,0.17856 -1.34375,0.25 -0.42765,0.11895 -0.68351,0.21807 -1.375,0.28125 -1.31596,0.12025 -1.75976,-0.19488 -5.53125,0.21875 -3.55614,0.39001 -9.00554,1.23916 -10.25,1.90625 -1.59863,-0.29419 -3.85984,-0.52372 -5.84375,-0.3125 -3.0556,0.32532 -4.97404,0.52624 -6.90625,0.71875 -1.93221,0.1925 -1.68987,0.44088 -2.90625,0.5625 -1.31488,0.13146 -1.76298,-0.16454 -5.53125,0.28125 -3.53887,0.41865 -8.97768,1.29217 -10.25,1.96875 -1.59755,-0.28105 -3.85996,-0.42043 -5.84375,-0.1875 -3.05198,0.35836 -4.94508,0.56786 -6.875,0.78125 -0.6562,0.0725 -1.04066,0.17269 -1.34375,0.25 -0.42677,0.12722 -0.68491,0.2672 -1.375,0.34375 -1.31333,0.14568 -1.76746,-0.17402 -5.53125,0.3125 -3.54889,0.45875 -8.97863,1.41902 -10.21875,2.125 -1.59305,-0.24424 -3.83381,-0.38135 -5.8125,-0.125 -3.04759,0.39481 -4.95071,0.64845 -6.875,0.90625 -1.92428,0.25779 -1.72611,0.49353 -2.9375,0.65625 -1.30946,0.1759 -1.74719,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.92315,1.69917 -10.1875,2.4375 -1.5875,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02617,0.53612 -4.89889,0.86169 -6.8125,1.1875 -0.65061,0.11077 -1.01371,0.27094 -1.3125,0.375 -0.42067,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.29465,0.26159 -1.72712,-0.006 -5.4375,0.8125 -3.49853,0.77195 -8.84595,2.38293 -10.0625,3.21875 -1.56278,-0.0774 -3.75758,0.0853 -5.6875,0.59375 -2.97244,0.78313 -4.81761,1.23209 -6.6875,1.75 -1.86988,0.5179 -1.6666,0.76728 -2.84375,1.09375 -1.27246,0.3529 -1.69703,0.10709 -5.34375,1.1875 -3.4247,1.01463 -8.64944,2.93317 -9.875,3.84375 -1.53883,0.0127 -3.71983,0.27222 -5.625,0.875 -2.93106,0.92734 -4.75031,1.45842 -6.59375,2.0625 -0.62676,0.20538 -0.99173,0.39258 -1.28125,0.53125 -0.40763,0.21361 -0.65334,0.40875 -1.3125,0.625 -1.25446,0.41154 -1.68611,0.18904 -5.28125,1.4375 -3.38985,1.17717 -8.59498,3.2137 -9.78125,4.15625 -1.52389,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69908,1.67548 -6.53125,2.3125 -1.83217,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24678,0.43396 -1.66361,0.19972 -5.21875,1.5625 -3.33867,1.2798 -8.48715,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.63569,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6238,1.78156 -6.4375,2.46875 -0.61666,0.23363 -0.99641,0.44203 -1.28125,0.59375 0,0 0,1.09375 0,1.09375 0.11178,-0.22236 0.38599,-0.81743 0.90625,-1.09375 0.69797,-0.37072 4.81363,-1.99337 6.8125,-2.71875 1.65686,-0.60125 4.15389,-1.32868 5.96875,-1.3125 0.30162,0.003 0.58762,0.0509 0.84375,0.0937 1.84249,0.30825 7.46875,1.5625 7.46875,1.5625 -10e-6,0 -6.23349,-1.64675 -7.03125,-1.84375 -0.19079,-0.0471 -0.53572,-0.0687 -0.96875,-0.0625 1.14546,-0.86971 4.761,-2.39351 7.34375,-3.4375 2.83822,-1.14727 3.11681,-1.25182 5.0625,-1.65625 2.0083,-0.41744 3.15625,-0.5 3.15625,-0.5 0,10e-6 -0.0824,-0.60114 0.96875,-1.125 0.7051,-0.35141 4.88702,-1.8924 6.90625,-2.5625 1.9519,-0.64773 5.0574,-1.3585 6.875,-1 1.86323,0.3675 7.53125,1.8125 7.53125,1.8125 10e-6,0 -6.287,-1.87111 -7.09375,-2.09375 -0.19292,-0.0532 -0.53084,-0.086 -0.96875,-0.0937 1.15834,-0.83288 4.79444,-2.19532 7.40625,-3.15625 2.87016,-1.05601 3.16734,-1.1618 5.125,-1.53125 1.85349,-0.34979 2.85884,-0.42548 3.03125,-0.4375 0.1136,-0.21724 0.37745,-0.81002 0.90625,-1.0625 0.70944,-0.33874 4.92607,-1.71275 6.96875,-2.3125 1.69317,-0.49711 4.24077,-1.03677 6.09375,-0.90625 0.30795,0.0217 0.61349,0.0973 0.875,0.15625 1.88118,0.42432 7.59375,2.03125 7.59375,2.03125 10e-6,0 -6.34174,-2.06525 -7.15625,-2.3125 -0.19479,-0.0591 -0.55788,-0.10394 -1,-0.125 1.16949,-0.79755 4.86302,-2.05622 7.5,-2.9375 2.89781,-0.96847 3.23301,-1.00332 5.21875,-1.28125 2.04965,-0.28689 3.1875,-0.3125 3.1875,-0.3125 -2e-5,0 -0.0728,-0.60697 1,-1.0625 0.7196,-0.30557 4.99098,-1.50075 7.0625,-2 2.00244,-0.48258 5.19849,-0.92829 7.0625,-0.40625 1.91078,0.53515 7.71875,2.5 7.71875,2.5 0,0 -6.42266,-2.42351 -7.25,-2.71875 -0.19784,-0.0706 -0.58216,-0.14039 -1.03125,-0.1875 1.1879,-0.72865 4.91527,-1.77408 7.59375,-2.5 2.94342,-0.79775 3.29208,-0.77083 5.3125,-0.90625 1.91289,-0.12823 2.94705,-0.0711 3.125,-0.0625 0.11728,-0.20366 0.39176,-0.77948 0.9375,-0.96875 0.73219,-0.25394 5.07852,-1.04789 7.1875,-1.375 1.74813,-0.27111 4.40088,-0.4847 6.3125,-0.0937 0.31766,0.065 0.60522,0.18551 0.875,0.28125 1.94074,0.68873 7.84375,3.09375 7.84375,3.09375 10e-6,0 -6.53471,-2.95077 -7.375,-3.3125 -0.20097,-0.0865 -0.57513,-0.16679 -1.03125,-0.25 1.2065,-0.63318 5.02956,-1.3956 7.75,-1.90625 2.98953,-0.56119 3.30023,-0.52954 5.34375,-0.53125 2.10926,-0.002 3.3125,0.125 3.3125,0.125 0,1e-5 -0.0727,-0.63119 1.03125,-0.9375 0.74052,-0.20547 5.12612,-0.83387 7.25,-1.0625 2.05302,-0.22099 5.31863,-0.25222 7.21875,0.46875 1.94779,0.73907 7.84375,3.375 7.84375,3.375 2e-5,0 -6.56288,-3.17897 -7.40625,-3.5625 -0.20168,-0.0917 -0.54221,-0.18621 -1,-0.28125 1.21092,-0.60188 4.98442,-1.24884 7.71875,-1.65625 3.0048,-0.44772 3.32551,-0.4517 5.375,-0.40625 1.94045,0.043 3.00699,0.19423 3.1875,0.21875 0.11892,-0.19316 0.3839,-0.76583 0.9375,-0.90625 0.74271,-0.18838 5.15429,-0.73428 7.28125,-0.9375 1.76303,-0.16842 4.42009,-0.23429 6.34375,0.25 0.31968,0.0805 0.60351,0.20359 0.875,0.3125 1.95293,0.78349 7.90625,3.46875 7.90625,3.46875 -2e-5,0 -6.59191,-3.25348 -7.4375,-3.65625 -0.20222,-0.0963 -0.57226,-0.20703 -1.03125,-0.3125 1.21414,-0.57427 5.04366,-1.12219 7.78125,-1.5 3.00838,-0.4152 3.32307,-0.44263 5.375,-0.375 2.11798,0.0698 3.3125,0.25 3.3125,0.25 -2e-5,0 -0.0773,-0.63741 1.03125,-0.90625 0.74362,-0.18035 5.15176,-0.66355 7.28125,-0.84375 2.05847,-0.17417 5.34324,-0.12432 7.25,0.65625 1.95459,0.80016 7.875,3.53125 7.875,3.53125 -2e-5,0 -6.55993,-3.30876 -7.40625,-3.71875 -0.20237,-0.0981 -0.57186,-0.2031 -1.03125,-0.3125 1.21517,-0.5639 5.01008,-1.1143 7.75,-1.46875 3.01091,-0.38952 3.32131,-0.39765 5.375,-0.3125 1.94439,0.0806 3.00663,0.25324 3.1875,0.28125 0.11916,-0.19086 0.38277,-0.74531 0.9375,-0.875 0.74426,-0.174 5.14993,-0.65047 7.28125,-0.8125 1.76662,-0.13427 4.4497,-0.12571 6.375,0.375 0.32,0.0832 0.6033,0.20127 0.875,0.3125 1.9546,0.80016 7.9063,3.5625 7.9063,3.5625 -10e-5,0 -6.5912,-3.34001 -7.4375,-3.75 -0.2024,-0.0981 -0.5719,-0.20311 -1.0313,-0.3125 1.2151,-0.5639 5.0413,-1.08306 7.7813,-1.4375 3.0109,-0.38953 3.3525,-0.4289 5.4062,-0.34375 2.1197,0.0879 3.3125,0.3125 3.3125,0.3125 0,0 -0.078,-0.64902 1.0313,-0.90625 0.7443,-0.17256 5.1495,-0.62336 7.2812,-0.78125 2.0606,-0.1526 5.3429,-0.0968 7.25,0.6875 1.955,0.80395 7.875,3.5 7.875,3.5 0,0 -6.5598,-3.27587 -7.4062,-3.6875 -0.2025,-0.0984 -0.5718,-0.20222 -1.0313,-0.3125 1.2154,-0.56154 5.0119,-1.12778 7.75,-1.5 3.009,-0.40905 3.3227,-0.41558 5.375,-0.34375 1.9431,0.068 3.0072,0.16485 3.1875,0.1875 0.1188,-0.1944 0.3846,-0.72881 0.9375,-0.875 0.7418,-0.19612 5.1311,-0.82878 7.25,-1.09375 1.7564,-0.21961 4.4053,-0.33231 6.3125,0.0312 0.3169,0.0604 0.6058,0.18938 0.875,0.28125 1.9362,0.66092 7.8438,2.9375 7.8438,2.9375 -1e-4,0 -6.5367,-2.80655 -7.375,-3.15625 -0.2005,-0.0836 -0.5762,-0.17333 -1.0313,-0.25 1.2037,-0.65046 5.0191,-1.37195 7.7188,-2 2.9667,-0.6902 3.2889,-0.75507 5.3125,-0.875 2.0886,-0.1238 3.2812,-0.0312 3.2812,-0.0312 0,1e-5 -0.087,-0.63205 1,-1.03125 0.7292,-0.2678 5.0472,-1.33797 7.125,-1.8125 2.0085,-0.45869 5.1679,-1.0293 7,-0.625 1.8781,0.41446 13.5782,3.01563 13.5782,3.01563 0,0 -12.3275,-3.02266 -13.1407,-3.26563 -0.1945,-0.0581 -0.5586,-0.10626 -1,-0.125 1.1676,-0.80369 3.5142,-1.6873 6.1094,-2.70312 1.6814,-0.65818 0.9237,-0.37659 2.7759,-1.0036 1.7536,-0.59366 2.4854,-1.01071 2.6304,-1.11299 0.3461,-0.20651 -0.356,-0.12188 -0.5442,-0.0424 z"
- id="path8093"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6949);enable-background:new"
- d="m 1082.625,-275.125 c 1.873,0.39348 4.4961,1.14555 6.0313,1.96875 1.5352,0.82319 2.8222,1.056 5.375,2.5 2.5266,1.42926 4.7958,2.00696 6.9687,2.53125 2.3476,0.56642 5.4354,0.71523 8.8438,1.1875 -1.0889,-0.83975 -6.6074,-1.17245 -8.4063,-1.5625 -1.7989,-0.39006 -3.8941,-1.01616 -6.5937,-2.3125 -2.6997,-1.29634 -3.4944,-1.79896 -5.8125,-2.6875 -2.3182,-0.88854 -4.0044,-1.38314 -6.4063,-1.625 z"
- id="path8095"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6961);enable-background:new"
- d="m 1051.4688,-270 c 1.9053,0.57759 4.5281,1.61572 6.0937,2.59375 1.5656,0.97802 2.8802,1.35981 5.5,3.125 2.593,1.74716 4.9859,2.70927 7.25,3.59375 2.4461,0.95557 5.6826,1.65713 9.4063,3.0625 -1.1896,-1.13784 -7.0631,-2.68675 -8.9375,-3.375 -1.8745,-0.68825 -4.0818,-1.5662 -6.875,-3.28125 -2.7933,-1.71504 -3.5736,-2.2839 -5.9375,-3.40625 -2.3641,-1.12234 -4.0567,-1.83455 -6.5,-2.3125 z"
- id="path8097"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6957);enable-background:new"
- d="m 1020.2188,-266.84375 c 1.9119,0.63811 4.5812,1.75536 6.1562,2.8125 1.5751,1.05715 2.8956,1.50867 5.5313,3.40625 2.6086,1.87821 5.0284,3.03003 7.3125,4.0625 2.4677,1.11545 5.7645,2.1733 9.5312,3.84375 -1.2033,-1.22253 -7.2028,-3.31423 -9.0937,-4.125 -1.891,-0.81077 -4.0649,-1.89379 -6.875,-3.75 -2.8102,-1.8562 -3.6218,-2.47693 -6,-3.71875 -2.3783,-1.2418 -4.1107,-1.97569 -6.5625,-2.53125 z"
- id="path8099"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="cssscscsscsssccscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssccscsscscssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsszsszssszzcczzzczzzc"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6997);enable-background:new"
- d="m 1110.1719,-266.89063 c 0.1508,0.0486 0.688,0.631 0.1094,1.48438 -0.8101,1.19459 -5.7049,3.32429 -8.5625,4.125 -2.8449,0.79712 -6.2901,0.97774 -10.5625,-0.375 -4.3016,-1.36195 -5.4697,-2.46872 -10.6563,-4.3125 4.664,2.11517 6.1953,3.95233 10.125,5.34375 1.6207,0.57387 3.3671,0.9396 5.0625,1.03125 -0.4451,0.32563 -1.5303,0.9833 -3.5625,1.59375 -2.7955,0.83969 -6.6491,1.53378 -8.25,1.625 -1.5146,0.0863 -3.142,-0.51249 -3.4375,-0.625 0.1667,0.10308 0.3732,0.37734 -0.25,1.03125 -0.8993,0.94363 -6.1474,1.923 -9.125,2.25 -2.9643,0.32555 -6.5216,-0.016 -10.9062,-1.90625 -3.978,-1.71497 -5.339,-2.91536 -9.4063,-4.75 0,0 0,0.15625 0,0.15625 3.6431,2.09529 5.284,3.88327 8.875,5.5625 1.7302,0.80909 3.5917,1.40876 5.4063,1.71875 -0.5349,0.28676 -1.5578,0.71151 -3.4375,1.03125 -2.869,0.48796 -6.809,0.81614 -8.4375,0.75 -0.8507,-0.0345 -1.7286,-0.18437 -2.4063,-0.40625 -0.6848,-0.21488 -1.1897,-0.44467 -1.3125,-0.5 0.1694,0.10721 0.4311,0.40288 -0.2187,1.03125 -0.9097,0.87962 -6.2461,1.33638 -9.25,1.46875 -2.9905,0.13179 -6.5889,-0.45063 -11,-2.5625 -4.4412,-2.12626 -5.6415,-3.4016 -10.9063,-5.78125 4.7343,2.59704 6.2865,4.6291 10.3438,6.71875 1.6733,0.86185 3.4852,1.49425 5.25,1.9375 -0.4633,0.23332 -1.5894,0.68814 -3.6875,0.9375 -2.8863,0.34298 -6.8346,0.49288 -8.4688,0.375 -1.5462,-0.1115 -3.2312,-0.85696 -3.5312,-1 0.1691,0.12029 0.4138,0.41048 -0.2188,1 -0.9128,0.85073 -6.2441,1.26212 -9.25,1.375 -2.9925,0.11237 -6.5897,-0.49043 -11,-2.59375 -4.00125,-1.90823 -5.38803,-3.13783 -9.46875,-5.09375 -3e-5,0 0,0.15625 0,0.15625 3.65506,2.20392 5.29421,4.05255 8.90625,5.90625 1.74029,0.89315 3.637,1.52827 5.4688,1.96875 -0.54,0.2483 -1.5781,0.61533 -3.4688,0.84375 -2.88568,0.34858 -6.86605,0.52095 -8.5,0.40625 -0.85345,-0.0599 -1.72631,-0.25791 -2.40625,-0.5 -0.6871,-0.2353 -1.18935,-0.47226 -1.3125,-0.53125 0.16998,0.11227 0.46448,0.42225 -0.1875,1.03125 -0.91265,0.8525 -6.27533,1.29337 -9.28125,1.40625 -2.99246,0.11237 -6.59346,-0.52805 -11,-2.59375 -4.43653,-2.07978 -5.64688,-3.33171 -10.90625,-5.65625 4.72938,2.54749 6.29074,4.5778 10.34375,6.625 1.67155,0.84433 3.48554,1.46643 5.25,1.90625 -0.46323,0.23422 -1.5897,0.68407 -3.6875,0.9375 -2.88569,0.34858 -6.8362,0.56952 -8.46875,0.46875 -1.54456,-0.0953 -3.20031,-0.82885 -3.5,-0.96875 0.16899,0.11853 0.38192,0.40385 -0.25,1 -0.91186,0.86028 -6.24665,1.33025 -9.25,1.46875 -2.98995,0.1379 -6.56745,-0.45068 -10.96875,-2.46875 -3.99308,-1.83089 -5.36511,-3.0292 -9.4375,-4.90625 -2e-5,0 0,0.15625 0,0.15625 3.64761,2.13327 5.27033,3.93487 8.875,5.71875 1.73675,0.85951 3.60727,1.45014 5.4375,1.875 -0.53947,0.2529 -1.55063,0.64129 -3.4375,0.90625 -2.87978,0.40436 -6.83813,0.64562 -8.46875,0.5625 -0.85172,-0.0434 -1.7277,-0.20855 -2.40625,-0.4375 -0.68569,-0.22201 -1.1896,-0.44339 -1.3125,-0.5 0.16959,0.10899 0.4319,0.40965 -0.21875,1.03125 -0.91079,0.87014 -6.25021,1.39152 -9.25,1.5625 -2.98633,0.17021 -6.57381,-0.31577 -10.96875,-2.28125 -4.42489,-1.97888 -5.60596,-3.22819 -10.84375,-5.375 4.70997,2.38767 6.27017,4.38873 10.3125,6.34375 1.66715,0.80631 3.46043,1.39658 5.21875,1.78125 -0.46163,0.2487 -1.597,0.71225 -3.6875,1.03125 -2.8756,0.43876 -6.7804,0.7331 -8.40625,0.6875 -1.53823,-0.0431 -3.2328,-0.74522 -3.53125,-0.875 0.16833,0.11282 0.41057,0.41375 -0.21875,1.03125 -0.90812,0.8911 -6.20295,1.52825 -9.1875,1.8125 -2.97118,0.28298 -6.57342,-0.1758 -10.9375,-1.9375 -3.95934,-1.59831 -5.32915,-2.79487 -9.34375,-4.3125 3e-5,0 0,0.15625 0,0.15625 3.5959,1.81135 5.23831,3.58233 8.8125,5.15625 1.72207,0.75835 3.58748,1.28895 5.40625,1.625 -0.53609,0.27908 -1.56658,0.68763 -3.4375,1.0625 -2.85539,0.5721 -6.78942,1.01939 -8.40625,1.03125 -0.84451,0.006 -1.70608,-0.0809 -2.375,-0.25 -0.67591,-0.16151 -1.16009,-0.32923 -1.28125,-0.375 0.16722,0.094 0.42267,0.38348 -0.21875,1.0625 -0.89787,0.95052 -6.18648,1.91708 -9.125,2.4375 -2.92534,0.51809 -6.43215,0.37424 -10.71875,-1.03125 -4.3158,-1.41507 -5.47277,-2.52994 -10.5625,-3.96875 4.57685,1.75101 6.08855,3.56006 10.03125,5 1.62608,0.59389 3.36885,0.95565 5.09375,1.15625 -0.45285,0.29702 -1.55478,0.88339 -3.59375,1.46875 -2.80472,0.80517 -6.63886,1.57583 -8.21875,1.75 -1.49475,0.1648 -3.11623,-0.31681 -3.40625,-0.40625 0.16356,0.0901 0.39278,0.35993 -0.21875,1.0625 -0.88247,1.01385 -6.04452,2.37165 -8.9375,3.0625 -2.88002,0.68778 -6.3356,0.76002 -10.5625,-0.4375 -3.83485,-1.08645 -5.17258,-2.07237 -9.0625,-3.125 -10e-6,0 0,0.15625 0,0.15625 3.48418,1.39485 5.06941,2.9194 8.53125,4.03125 1.66793,0.53572 3.45578,0.78674 5.21875,0.875 -0.51964,0.35212 -1.50039,0.91452 -3.3125,1.53125 -2.76566,0.94125 -6.59024,1.93537 -8.15625,2.15625 -0.81794,0.11539 -1.6331,0.12283 -2.28125,0.0312 -0.65496,-0.0832 -1.1326,-0.21827 -1.25,-0.25 0.16204,0.0746 0.43399,0.34044 -0.1875,1.09375 -0.87,1.05453 -6.00963,2.65925 -8.875,3.4375 -2.85253,0.77476 -6.25912,0.9582 -10.4375,-0.0937 -4.20683,-1.05913 -5.35669,-2.04166 -10.34375,-3.15625 4.48454,1.45946 5.96935,3.13523 9.8125,4.25 1.58504,0.45977 3.28679,0.63825 4.96875,0.6875 -0.44157,0.33676 -1.51251,1.02773 -3.5,1.78125 -2.73393,1.03649 -6.45198,2.16269 -8,2.4375 -1.46462,0.26002 -3.05958,-0.11654 -3.34375,-0.1875 0.16025,0.0796 0.38044,0.32098 -0.21875,1.0625 -0.86466,1.07006 -5.91652,2.81815 -8.75,3.6875 -2.8208,0.86547 -6.2075,1.15631 -10.34375,0.21875 -3.75259,-0.85061 -5.04785,-1.71647 -8.875,-2.59375 0,0 0,0.15625 0,0.15625 3.42796,1.23779 4.98741,2.6323 8.375,3.53125 1.63216,0.43314 3.36704,0.58301 5.09375,0.5625 -0.50893,0.38417 -1.47675,1.02182 -3.25,1.75 -2.70634,1.11134 -6.43633,2.30781 -7.96875,2.625 -0.8004,0.16569 -1.61231,0.21862 -2.25,0.15625 0,0 0,0.51552 0,0.92229 0,0.26507 0,0.48396 0,0.48396 0.22645,-0.14468 0.44891,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.17161,-0.21577 6,-1.6875 3.82843,-1.47174 5.22412,-2.00498 5.90625,-2.40625 0.6796,-0.39978 1.61165,-0.87937 2.21875,-1.53125 1.82685,-0.13775 3.57075,-0.49323 4.9375,-1 2.96812,-1.10052 4.87537,-1.80619 6.78125,-2.46875 1.90586,-0.66254 2.35409,-1.41487 3.40625,-1.78125 1.09155,-0.38011 2.19511,-0.16538 6.0625,-1.53125 3.86745,-1.36586 5.28316,-1.82708 5.96875,-2.21875 0.70109,-0.40052 1.70081,-0.93298 2.3125,-1.59375 1.9708,-0.0547 3.81685,-0.38463 5.28125,-0.875 3.00148,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.5386,-0.5041 2.17402,-1.04677 2.90625,-1.4375 0.23016,-0.13431 0.47574,-0.25373 0.75,-0.34375 1.09823,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91233,-1.23113 5.36605,-1.67295 6.0625,-2.03125 0.69388,-0.35697 1.63015,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63581,-0.26683 5.03125,-0.6875 3.03043,-0.91354 4.99238,-1.4301 6.9375,-1.96875 1.94511,-0.53864 2.42618,-1.26452 3.5,-1.5625 1.11401,-0.30915 2.21994,0.007 6.1875,-1.03125 3.96761,-1.03863 5.41758,-1.43273 6.125,-1.75 0.73487,-0.32959 1.81383,-0.75372 2.4375,-1.375 1.99774,0.116 3.85743,-0.0201 5.34375,-0.375 3.07811,-0.735 5.08344,-1.10094 7.0625,-1.5 1.58792,-0.32018 2.24429,-0.79055 3,-1.09375 0.23757,-0.1068 0.46695,-0.19276 0.75,-0.25 1.13347,-0.22919 2.30448,0.20893 6.34375,-0.5 4.03933,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71586,-0.25944 1.70428,-0.56724 2.34375,-1.09375 1.92427,0.23949 3.74788,0.22453 5.1875,0 3.12633,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48869,-0.94514 3.59375,-1.09375 1.14639,-0.15418 2.27592,0.30157 6.34375,-0.21875 4.06784,-0.52032 5.56013,-0.69573 6.28125,-0.9375 0.7371,-0.24714 1.79809,-0.58623 2.4375,-1.125 2.05007,0.33553 3.97378,0.39796 5.5,0.21875 3.14231,-0.36896 5.17994,-0.55936 7.1875,-0.78125 1.61076,-0.17802 2.26467,-0.6082 3.03125,-0.84375 0.24094,-0.0855 0.49412,-0.1556 0.78125,-0.1875 1.14978,-0.12772 2.30129,0.34665 6.375,-0.125 4.07374,-0.47165 5.55909,-0.6106 6.28125,-0.84375 0.71946,-0.23227 1.70024,-0.47346 2.34375,-0.96875 1.93637,0.33346 3.77006,0.40424 5.21875,0.25 3.14602,-0.33495 5.17756,-0.51859 7.1875,-0.71875 2.00996,-0.20014 2.48414,-0.82639 3.59375,-0.9375 1.15114,-0.11528 2.29643,0.36506 6.375,-0.0625 4.07861,-0.42756 5.58886,-0.56209 6.3125,-0.78125 0.73915,-0.22386 1.79572,-0.51325 2.4375,-1.03125 2.0571,0.39867 4.00187,0.4934 5.53125,0.34375 3.14873,-0.3081 5.17584,-0.47325 7.1875,-0.65625 1.61407,-0.14682 2.2631,-0.56055 3.03125,-0.78125 0.24142,-0.0809 0.49353,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.29296,0.39275 6.375,0 4.08208,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6997,-0.4477 2.3438,-0.9375 1.938,0.34999 3.7688,0.45438 5.2187,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1466,-0.32852 5.1771,-0.5227 7.1875,-0.71875 1.613,-0.15729 2.2656,-0.63148 3.0312,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7167,-0.25316 1.6745,-0.55807 2.3125,-1.09375 1.9197,0.21194 3.7199,0.15141 5.1562,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0938,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5625,-1.28125 1.1288,-0.25066 2.2703,0.11629 6.25,-0.875 3.9796,-0.99128 5.4296,-1.4193 6.125,-1.78125 0.7223,-0.37601 1.7619,-0.87058 2.375,-1.53125 1.963,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3348,-1.68641 2.5469,-1.98438 0.2122,-0.29796 0.1118,-0.7453 0.1379,-0.76675 0.043,-0.0352 0.3193,-0.085 0.479,-0.42844 0.8589,-1.84708 2.321,-5.64459 2.4352,-6.32945 0.1137,-0.68216 0.1638,-1.34774 0.2145,-1.74497 0.029,-0.22952 -0.1467,-0.86544 -0.1246,-0.92404 0.031,-0.0821 0.3045,-0.26528 0.3599,-0.51471 0.2663,-1.19833 0.089,-2.19129 -0.1251,-3.60893 -0.214,-1.41764 -0.9837,-4.62214 -1.6369,-5.47626 -0.6589,-0.86172 -1.2229,-1.01117 -1.7479,-1.00066 -0.2086,0.26976 0.1368,0.26309 0.1626,0.31261 0.6806,0.0508 0.934,0.36864 1.4192,0.89662 0.4852,0.52798 1.4428,3.93956 1.5794,5.38995 0.1366,1.45039 0.19,2.8602 -0.088,3.46864 -0.2781,0.60845 -0.9442,0.42864 -1.2366,0.49452 0.531,0.18589 0.8908,0.21322 0.9524,1.05768 0.059,0.81338 -0.1332,1.63969 -0.5198,2.80562 -0.3912,1.18001 -1.8452,4.34998 -2.2857,4.59877 -0.4523,0.25551 -0.9524,0.18199 -1.288,0.0511 z"
- id="path8101"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6953);enable-background:new"
- d="m 988.75,-263.84375 c 1.91161,0.6344 4.55027,1.75841 6.125,2.8125 1.57477,1.05409 2.8961,1.48252 5.5313,3.375 2.6082,1.87314 5.0269,3.01522 7.3125,4.0625 2.4693,1.13147 5.7521,2.15474 9.5312,3.9375 -1.2072,-1.2584 -7.139,-3.36445 -9.0312,-4.1875 -1.8922,-0.82304 -4.128,-1.93049 -6.9375,-3.78125 -2.80961,-1.85075 -3.62224,-2.48154 -6.00005,-3.71875 -2.37782,-1.23719 -4.07988,-1.9492 -6.53125,-2.5 z"
- id="path8103"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6993);enable-background:new"
- d="m 957.5,-260.78125 c 1.91,0.6181 4.58288,1.70934 6.15625,2.75 1.57339,1.04066 2.89608,1.48252 5.53125,3.375 2.60823,1.87315 5.02692,3.01521 7.3125,4.0625 2.46931,1.13147 5.75213,2.15475 9.53125,3.9375 -1.20728,-1.2584 -7.20154,-3.3957 -9.09375,-4.21875 -1.89217,-0.82304 -4.09666,-1.9305 -6.90625,-3.78125 -2.80958,-1.85075 -3.59295,-2.43932 -5.96875,-3.65625 -2.37578,-1.21691 -4.11321,-1.93885 -6.5625,-2.46875 z"
- id="path8105"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6989);enable-background:new"
- d="m 926.09375,-257.375 c 1.90772,0.59745 4.55348,1.66384 6.125,2.6875 1.5715,1.02365 2.87022,1.43971 5.5,3.28125 2.60291,1.82273 5.02887,2.9722 7.3125,4 2.4672,1.11041 5.75535,2.09323 9.53125,3.84375 -1.20623,-1.2481 -7.1719,-3.31809 -9.0625,-4.125 -1.89058,-0.8069 -4.10242,-1.89104 -6.90625,-3.6875 -2.80385,-1.79644 -3.62704,-2.40251 -6,-3.59375 -2.37297,-1.19124 -4.05362,-1.90283 -6.5,-2.40625 z"
- id="path8107"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6985);enable-background:new"
- d="m 894.90625,-253.5625 c 1.90213,0.55355 4.58701,1.58887 6.15625,2.59375 1.56923,1.00487 2.87401,1.40864 5.5,3.21875 2.59912,1.79164 5.00034,2.87189 7.28125,3.875 2.46428,1.08374 5.75984,2.04029 9.53125,3.75 -1.2048,-1.23507 -7.17416,-3.24478 -9.0625,-4.03125 -1.88832,-0.78647 -4.0752,-1.8308 -6.875,-3.59375 -2.79977,-1.76294 -3.59919,-2.36836 -5.96875,-3.53125 -2.36957,-1.16288 -4.12325,-1.83412 -6.5625,-2.28125 z"
- id="path8109"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6965);enable-background:new"
- d="m 863.71875,-248.65625 c 1.88062,0.42909 4.50427,1.38038 6.0625,2.3125 1.55823,0.93211 2.85233,1.25776 5.46875,3 2.58971,1.72444 4.98067,2.70802 7.25,3.625 2.45176,0.99069 5.73959,1.87707 9.5,3.5 -1.20131,-1.20734 -7.15249,-3.06609 -9.03125,-3.78125 -1.87875,-0.71517 -4.0854,-1.68442 -6.875,-3.375 -2.78963,-1.69057 -3.58461,-2.22822 -5.9375,-3.28125 -2.35292,-1.05301 -4.02584,-1.71248 -6.4375,-2 z"
- id="path8111"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6981);enable-background:new"
- d="m 833.15625,-241.375 c 1.84836,0.29644 4.46945,0.97632 6,1.78125 1.53058,0.80493 2.81374,1.05573 5.375,2.53125 2.53504,1.46046 4.89068,2.32509 7.125,3.0625 2.41399,0.79668 5.65711,1.46689 9.375,2.84375 -1.18771,-1.12873 -7.08772,-2.58975 -8.9375,-3.15625 -1.84977,-0.5665 -4.00342,-1.37392 -6.75,-2.84375 -2.74657,-1.46983 -3.50136,-1.92028 -5.8125,-2.78125 -2.31115,-0.86095 -4.00471,-1.32009 -6.375,-1.4375 z"
- id="path8113"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6977);enable-background:new"
- d="m 802.90625,-232.3125 c 1.8222,0.21127 4.36576,0.80057 5.875,1.53125 1.50925,0.73066 2.75568,0.92998 5.28125,2.28125 2.49976,1.33746 4.83154,2.04843 7.03125,2.65625 2.37653,0.65667 5.56464,1.07288 9.21875,2.1875 -1.16735,-1.04496 -6.92888,-2.10329 -8.75,-2.5625 -1.82111,-0.45921 -3.95225,-1.12696 -6.65625,-2.4375 -2.70403,-1.31052 -3.47106,-1.7199 -5.75,-2.46875 -2.27895,-0.74883 -3.91325,-1.17931 -6.25,-1.1875 z"
- id="path8115"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6973);enable-background:new"
- d="m 773.1875,-222.1875 c 1.81109,0.1787 4.32059,0.66506 5.8125,1.34375 1.49194,0.67869 2.7534,0.79822 5.25,2.0625 2.47107,1.25138 4.79005,1.89614 6.96875,2.4375 2.35387,0.58488 5.49134,0.89752 9.09375,1.84375 -1.15084,-0.99116 -6.85251,-1.7833 -8.65625,-2.1875 -1.80372,-0.4042 -3.91553,-1.02116 -6.59375,-2.25 -2.67818,-1.22884 -3.40345,-1.61089 -5.65625,-2.28125 -2.25279,-0.67034 -3.89627,-1.00232 -6.21875,-0.96875 z"
- id="path8117"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6969);enable-background:new"
- d="m 743.5625,-211.1875 c 1.79281,0.12911 4.27313,0.54965 5.75,1.1875 1.4769,0.63785 2.7161,0.74156 5.1875,1.9375 2.44618,1.18372 4.72054,1.74666 6.875,2.21875 2.32767,0.51003 5.4196,0.68064 9,1.5625 -1.14379,-0.9706 -6.74759,-1.59065 -8.53125,-1.9375 -1.78367,-0.34684 -3.88285,-0.88756 -6.53125,-2.03125 -2.64841,-1.14368 -3.39495,-1.51631 -5.625,-2.125 -2.23008,-0.60868 -3.82594,-0.90966 -6.125,-0.8125 z"
- id="path8119"
- inkscape:connector-curvature="0" />
- <g
- style="fill:#ffffff;fill-opacity:1;filter:url(#filter7345)"
- id="g8121">
- <path
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
- d="m 744.9375,-212.11731 c 0,0 7.22229,-3.22318 9.0625,-3.5 1.84021,-0.27682 3.35225,-0.003 6,0.5625 2.64775,0.56573 8.7357,2.21518 11.1875,3.375 2.4518,1.15982 5.3125,3.5625 5.3125,3.5625 0,0 -7.14644,-2.78019 -10.1875,-3.5625 -3.04106,-0.78231 -7.64461,-2.08374 -10.375,-2.3125 -2.73039,-0.22876 -11,1.875 -11,1.875 z"
- id="path8123"
- sodipodi:nodetypes="czzzczzc"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 735.46875,-206.95416 c 0,0 3.65979,-2.22318 5.5,-2.5 1.84021,-0.27682 3.66475,0.24677 6.3125,0.8125 2.64775,0.56573 8.7357,2.21518 11.1875,3.375 2.4518,1.15982 6.5625,2.125 6.5625,2.125 0,0 -8.39644,-1.34269 -11.4375,-2.125 -3.04106,-0.78231 -7.95711,-2.33374 -10.6875,-2.5625 -2.73039,-0.22876 -7.4375,0.875 -7.4375,0.875 z"
- id="path8125"
- sodipodi:nodetypes="czzzczzc"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 759.85042,-217.61116 c 0,0 8.5437,-3.29857 10.39778,-3.45786 1.85409,-0.1593 3.64166,0.4792 6.2481,1.21208 2.60644,0.73288 8.57724,2.76594 10.95036,4.07925 2.37312,1.31331 6.41417,2.53782 6.41417,2.53782 0,0 -8.29413,-1.87365 -11.27931,-2.84767 -2.98519,-0.97402 -7.79269,-2.83478 -10.50302,-3.23662 -2.71033,-0.40184 -12.22808,1.713 -12.22808,1.713 z"
- id="path8127"
- sodipodi:nodetypes="czzzczzc"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 775.19813,-223.2266 c 0,0 7.77133,-2.78244 9.62831,-2.90349 1.85697,-0.12104 3.631,0.55417 6.22178,1.34062 2.59077,0.78645 8.5184,2.94217 10.86394,4.30412 2.34555,1.36195 6.36049,2.6695 6.36049,2.6695 0,0 -8.25373,-2.04423 -11.21821,-3.07958 -2.96447,-1.03535 -7.73259,-2.99481 -10.43406,-3.45243 -2.70147,-0.45763 -11.42225,1.12126 -11.42225,1.12126 z"
- id="path8129"
- sodipodi:nodetypes="czzzczzc"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 789.64298,-227.95417 c 0,0 8.68256,-3.52031 10.54154,-3.60535 1.85897,-0.085 3.61958,0.62442 6.19463,1.46093 2.57505,0.83649 8.45979,3.10666 10.77851,4.5138 2.31872,1.40715 6.30757,2.79224 6.30757,2.79224 0,0 -8.21257,-2.20377 -11.15643,-3.29636 -2.94386,-1.09259 -7.67312,-3.14408 -10.36522,-3.65397 -2.69209,-0.50988 -12.3006,1.78871 -12.3006,1.78871 z"
- id="path8131"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.852145"
- inkscape:transform-center-y="-4.3190906"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 804.49513,-233.32948 c 0,0 7.80756,-2.58281 9.66654,-2.66785 1.85897,-0.085 3.61958,0.62442 6.19463,1.46093 2.57505,0.83649 8.45979,3.10666 10.77851,4.5138 2.31872,1.40715 6.30757,2.79224 6.30757,2.79224 0,0 -8.21257,-2.20377 -11.15643,-3.29636 -2.94386,-1.09259 -7.67312,-3.14408 -10.36522,-3.65397 -2.69209,-0.50988 -11.4256,0.85121 -11.4256,0.85121 z"
- id="path8133"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.852145"
- inkscape:transform-center-y="-4.3190906"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 819.55763,-237.57948 c 0,0 8.55756,-2.58281 10.41654,-2.66785 1.85897,-0.085 3.61958,0.62442 6.19463,1.46093 2.57505,0.83649 8.45979,3.10666 10.77851,4.5138 2.31872,1.40715 6.30757,2.79224 6.30757,2.79224 0,0 -8.21257,-2.20377 -11.15643,-3.29636 -2.94386,-1.09259 -7.67312,-3.14408 -10.36522,-3.65397 -2.69209,-0.50988 -12.1756,0.85121 -12.1756,0.85121 z"
- id="path8135"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.852145"
- inkscape:transform-center-y="-4.3190906"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 836.23395,-242.60125 c 0,0 6.96702,-1.98723 8.82784,-1.96757 1.86081,0.0197 3.57873,0.82702 6.10265,1.80705 2.52393,0.98 8.27166,3.57758 10.50756,5.11291 2.2359,1.53535 6.14053,3.14261 6.14053,3.14261 0,0 -8.07561,-2.66222 -10.95336,-3.91866 -2.87774,-1.25645 -7.48412,-3.5707 -10.14328,-4.23121 -2.65915,-0.66049 -10.48194,0.0549 -10.48194,0.0549 z"
- id="path8137"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.64141"
- inkscape:transform-center-y="-4.9269042"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 850.73028,-246.00461 c 0,0 7.68784,-2.02768 9.54782,-1.96854 1.85997,0.0592 3.56038,0.90279 6.06293,1.93616 2.50255,1.03334 8.19387,3.75232 10.39668,5.33475 2.20282,1.58245 6.07245,3.2722 6.07245,3.2722 0,0 -8.01729,-2.83298 -10.86772,-4.15022 -2.85043,-1.31723 -7.40666,-3.72872 -10.0512,-4.4455 -2.64454,-0.71678 -11.16096,0.0211 -11.16096,0.0211 z"
- id="path8139"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.55068"
- inkscape:transform-center-y="-5.1542119"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 864.82496,-249.21081 c 0,0 8.16952,-1.96906 10.02688,-1.85396 1.85735,0.11512 3.53158,1.00956 6.0019,2.11779 2.47031,1.10821 8.0772,3.99727 10.23138,5.64531 2.15418,1.64804 5.9712,3.45352 5.9712,3.45352 0,0 -7.92839,-3.07306 -10.73787,-4.4755 -2.80949,-1.40244 -7.29106,-3.94999 -9.91283,-4.74606 -2.62176,-0.79606 -11.58066,-0.1411 -11.58066,-0.1411 z"
- id="path8141"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.41151"
- inkscape:transform-center-y="-5.4740887"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 881.38485,-251.60282 c 0,0 8.08536,-1.90809 9.93837,-1.73664 1.853,0.17147 3.4993,1.11633 5.93482,2.29908 2.43553,1.18271 7.95209,4.2407 10.05523,5.95339 2.10314,1.7127 5.86357,3.63326 5.86357,3.63326 0,0 -7.8314,-3.3124 -10.597,-4.7995 -2.76561,-1.48712 -7.16775,-4.16959 -9.76414,-5.04491 -2.59637,-0.87531 -11.43085,-0.30468 -11.43085,-0.30468 z"
- id="path8143"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.258805"
- inkscape:transform-center-y="-5.79376"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 896.58415,-254.34724 c 0,0 7.64166,-1.4277 9.49547,-1.26515 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -10.99774,-0.76897 -10.99774,-0.76897 z"
- id="path8145"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.28378"
- inkscape:transform-center-y="-5.7433893"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 911.45328,-255.98544 c 0,0 8.64166,-1.5527 10.49547,-1.39015 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.99774,-0.64397 -11.99774,-0.64397 z"
- id="path8147"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.28378"
- inkscape:transform-center-y="-5.7433893"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 927.70328,-258.29794 c 0,0 7.64166,-0.8652 9.49547,-0.70265 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -10.99774,-1.33147 -10.99774,-1.33147 z"
- id="path8149"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.28378"
- inkscape:transform-center-y="-5.7433893"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 942.82828,-259.48544 c 0,0 8.57916,-1.4902 10.43297,-1.32765 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.93524,-0.70647 -11.93524,-0.70647 z"
- id="path8151"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.28378"
- inkscape:transform-center-y="-5.7433893"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 959.07828,-261.54794 c 0,0 7.82916,-0.8027 9.68297,-0.64015 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.18524,-1.39397 -11.18524,-1.39397 z"
- id="path8153"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.28378"
- inkscape:transform-center-y="-5.7433893"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 974.45328,-262.79794 c 0,0 8.39166,-1.1777 10.24547,-1.01515 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08376,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.84721,-3.27474 -10.61993,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.74774,-1.01897 -11.74774,-1.01897 z"
- id="path8155"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.28378"
- inkscape:transform-center-y="-5.7433893"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 990.64078,-264.86044 c 0,0 6.89166,-0.9902 8.74547,-0.82765 1.85385,0.16256 3.50465,1.0995 5.94575,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.60053,-0.86282 -10.24772,-1.20647 -10.24772,-1.20647 z"
- id="path8157"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.28378"
- inkscape:transform-center-y="-5.7433893"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 1007.7658,-265.79794 c 0,0 6.8291,-1.1777 8.683,-1.01515 1.8538,0.16256 3.5046,1.0995 5.9457,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.6005,-0.86282 -10.1852,-1.01897 -10.1852,-1.01897 z"
- id="path8159"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.28378"
- inkscape:transform-center-y="-5.7433893"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 1023.8908,-267.79794 c 0,0 6.0791,-0.4277 7.933,-0.26515 1.8538,0.16256 3.5046,1.0995 5.9457,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.6005,-0.86282 -9.4352,-1.76897 -9.4352,-1.76897 z"
- id="path8161"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.28378"
- inkscape:transform-center-y="-5.7433893"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 1039.7033,-269.17294 c 0,0 6.4541,-0.6777 8.308,-0.51515 1.8538,0.16256 3.5046,1.0995 5.9457,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.6005,-0.86282 -9.8102,-1.51897 -9.8102,-1.51897 z"
- id="path8163"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.28378"
- inkscape:transform-center-y="-5.7433893"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 1055.2718,-271.03319 c 0,0 5.4976,-0.90945 7.3578,-0.85348 1.8601,0.056 3.5619,0.89674 6.0661,1.92586 2.5044,1.0291 8.2003,3.7384 10.4058,5.31709 2.2055,1.57871 6.078,3.2619 6.078,3.2619 0,0 -8.022,-2.81939 -10.8748,-4.13178 -2.8526,-1.31238 -7.4129,-3.71613 -10.0587,-4.42843 -2.6457,-0.71228 -8.9742,-1.09116 -8.9742,-1.09116 z"
- id="path8165"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.55813"
- inkscape:transform-center-y="-5.1360724"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 1072.7007,-273.48537 c 0,0 4.5472,-1.15581 6.408,-1.18621 1.8607,-0.0304 3.5996,0.73049 6.1489,1.64231 2.5494,0.91177 8.3649,3.35386 10.6414,4.8285 2.2763,1.47468 6.2227,2.97636 6.2227,2.97636 0,0 -8.1442,-2.44411 -11.0547,-3.62272 -2.9105,-1.1786 -7.5774,-3.36815 -10.2534,-3.95691 -2.6759,-0.58875 -8.1129,-0.68133 -8.1129,-0.68133 z"
- id="path8167"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.74758"
- inkscape:transform-center-y="-4.6370147"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 1087.1585,-276.5244 c 0,0 5.96,-1.77355 7.8202,-1.83024 1.86,-0.0567 3.6096,0.67955 6.1715,1.55525 2.562,0.87566 2.5226,0.85713 5.3335,1.49015 2.7969,0.62986 7.0767,1.51313 7.0767,1.51313 0,0 -3.6155,-0.0163 -6.7923,-0.46614 -3.1155,-0.44119 -7.3743,-1.69825 -10.0584,-2.24913 -2.6839,-0.55088 -9.5512,-0.013 -9.5512,-0.013 z"
- id="path8169"
- sodipodi:nodetypes="czzzczzc"
- inkscape:transform-center-x="13.79933"
- inkscape:transform-center-y="-4.4842392"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
- d="m 1099.25,-279.92981 c 0.1612,0.26862 11.2081,-4.60046 12.1875,-4.6875 0.9794,-0.087 2,3.125 2,3.125 0,0 -0.7751,-1.50434 -2.875,-1.0625 -2.0999,0.44184 -11.3009,2.67141 -11.3125,2.625 z"
- id="path8171"
- sodipodi:nodetypes="czczc"
- inkscape:connector-curvature="0" />
- </g>
- <path
- sodipodi:nodetypes="czscsssscssssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssccsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscc"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7333);enable-background:new"
- d="m 1107.4532,-284.0938 c -0.4187,0.21283 -0.1556,0.0939 -0.6472,0.30581 -0.4861,0.20954 -1.7234,0.57439 -4.0796,1.45895 -3.3311,1.25057 -5.8302,2.15344 -7.0259,3.0661 -1.5361,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74766 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41972 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25166 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74214 -8.8948,1.93107 -10.1562,2.6875 -1.5839,-0.18079 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44695 -4.9162,0.67276 -6.8437,0.90625 -0.6554,0.0794 -1.0411,0.20078 -1.3438,0.28125 -0.4262,0.13165 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15936 -1.7622,-0.15683 -5.5312,0.28125 -3.5539,0.41308 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.2973 -3.8578,-0.53419 -5.8438,-0.34375 -3.0588,0.29331 -4.972,0.48399 -6.9062,0.65625 -1.9343,0.17226 -1.6887,0.42237 -2.9063,0.53125 -1.3162,0.11769 -1.7598,-0.16363 -5.5312,0.25 -3.5419,0.38844 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.2947 -3.88718,-0.50701 -5.87501,-0.3125 -3.05824,0.29924 -4.94113,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04004,0.17856 -1.34375,0.25 -0.42765,0.11895 -0.68351,0.21807 -1.375,0.28125 -1.31596,0.12025 -1.75976,-0.19488 -5.53125,0.21875 -3.55614,0.39001 -9.00554,1.23916 -10.25,1.90625 -1.59863,-0.29419 -3.85984,-0.52372 -5.84375,-0.3125 -3.0556,0.32532 -4.97404,0.52624 -6.90625,0.71875 -1.93221,0.1925 -1.68987,0.44088 -2.90625,0.5625 -1.31488,0.13146 -1.76298,-0.16454 -5.53125,0.28125 -3.53887,0.41865 -8.97768,1.29217 -10.25,1.96875 -1.59755,-0.28105 -3.85996,-0.42043 -5.84375,-0.1875 -3.05198,0.35836 -4.94508,0.56786 -6.875,0.78125 -0.6562,0.0726 -1.04066,0.17269 -1.34375,0.25 -0.42677,0.12722 -0.68491,0.2672 -1.375,0.34375 -1.31333,0.14568 -1.76746,-0.17402 -5.53125,0.3125 -3.54889,0.45875 -8.97863,1.41902 -10.21875,2.125 -1.59305,-0.24424 -3.83381,-0.38135 -5.8125,-0.125 -3.04759,0.39481 -4.95071,0.64845 -6.875,0.90625 -1.92428,0.25779 -1.72611,0.49353 -2.9375,0.65625 -1.30946,0.1759 -1.74719,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.92315,1.69917 -10.1875,2.4375 -1.5875,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02617,0.53612 -4.89889,0.86169 -6.8125,1.1875 -0.65061,0.11077 -1.01371,0.27094 -1.3125,0.375 -0.42067,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.29465,0.26159 -1.72712,-0.006 -5.4375,0.8125 -3.49853,0.77195 -8.84595,2.38293 -10.0625,3.21875 -1.56278,-0.0775 -3.75758,0.0853 -5.6875,0.59375 -2.97244,0.78313 -4.81761,1.23209 -6.6875,1.75 -1.86988,0.5179 -1.6666,0.76728 -2.84375,1.09375 -1.27246,0.3529 -1.69703,0.10709 -5.34375,1.1875 -3.4247,1.01463 -8.64944,2.93317 -9.875,3.84375 -1.53883,0.0127 -3.71983,0.27222 -5.625,0.875 -2.93106,0.92734 -4.75031,1.45842 -6.59375,2.0625 -0.62676,0.20538 -0.99173,0.39258 -1.28125,0.53125 -0.40763,0.21361 -0.65334,0.40875 -1.3125,0.625 -1.25446,0.41154 -1.68611,0.18904 -5.28125,1.4375 -3.38985,1.17717 -8.59498,3.2137 -9.78125,4.15625 -1.52389,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69908,1.67548 -6.53125,2.3125 -1.83217,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24678,0.43396 -1.66361,0.19972 -5.21875,1.5625 -3.33867,1.2798 -8.48715,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.63569,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6238,1.78156 -6.4375,2.46875 -0.61666,0.23363 -0.99641,0.44203 -1.28125,0.59375 0,0 0,1.09375 0,1.09375 0.11178,-0.22236 0.38599,-0.81743 0.90625,-1.09375 0.69797,-0.37072 4.81363,-1.99337 6.8125,-2.71875 1.65686,-0.60125 4.15389,-1.32868 5.96875,-1.3125 0.30162,0.003 0.58762,0.0509 0.84375,0.0937 1.84249,0.30825 7.46875,1.5625 7.46875,1.5625 -10e-6,0 -6.23349,-1.64675 -7.03125,-1.84375 -0.19079,-0.0471 -0.53572,-0.0687 -0.96875,-0.0625 1.14546,-0.86971 4.761,-2.39351 7.34375,-3.4375 2.83822,-1.14727 3.11681,-1.25182 5.0625,-1.65625 2.0083,-0.41744 3.15625,-0.5 3.15625,-0.5 0,1e-5 -0.0824,-0.60114 0.96875,-1.125 0.7051,-0.35141 4.88702,-1.8924 6.90625,-2.5625 1.9519,-0.64773 5.0574,-1.3585 6.875,-1 1.86323,0.3675 7.53125,1.8125 7.53125,1.8125 1e-5,0 -6.287,-1.87111 -7.09375,-2.09375 -0.19292,-0.0533 -0.53084,-0.086 -0.96875,-0.0937 1.15834,-0.83288 4.79444,-2.19532 7.40625,-3.15625 2.87016,-1.05601 3.16734,-1.1618 5.125,-1.53125 1.85349,-0.34979 2.85884,-0.42548 3.03125,-0.4375 0.1136,-0.21724 0.37745,-0.81002 0.90625,-1.0625 0.70944,-0.33874 4.92607,-1.71275 6.96875,-2.3125 1.69317,-0.49711 4.24077,-1.03677 6.09375,-0.90625 0.30795,0.0217 0.61349,0.0973 0.875,0.15625 1.88118,0.42432 7.59375,2.03125 7.59375,2.03125 1e-5,0 -6.34174,-2.06525 -7.15625,-2.3125 -0.19479,-0.0591 -0.55788,-0.10394 -1,-0.125 1.16949,-0.79755 4.86302,-2.05622 7.5,-2.9375 2.89781,-0.96847 3.23301,-1.00332 5.21875,-1.28125 2.04965,-0.28689 3.1875,-0.3125 3.1875,-0.3125 -2e-5,0 -0.0727,-0.60697 1,-1.0625 0.7196,-0.30557 4.99098,-1.50075 7.0625,-2 2.00244,-0.48258 5.19849,-0.92829 7.0625,-0.40625 1.91078,0.53515 7.71875,2.5 7.71875,2.5 0,0 -6.42266,-2.42351 -7.25,-2.71875 -0.19784,-0.0706 -0.58216,-0.14039 -1.03125,-0.1875 1.1879,-0.72865 4.91527,-1.77408 7.59375,-2.5 2.94342,-0.79775 3.29208,-0.77083 5.3125,-0.90625 1.91289,-0.12823 2.94705,-0.0711 3.125,-0.0625 0.11728,-0.20366 0.39176,-0.77948 0.9375,-0.96875 0.73219,-0.25394 5.07852,-1.04789 7.1875,-1.375 1.74813,-0.27111 4.40088,-0.4847 6.3125,-0.0937 0.31766,0.065 0.60522,0.18551 0.875,0.28125 1.94074,0.68873 7.84375,3.09375 7.84375,3.09375 1e-5,0 -6.53471,-2.95077 -7.375,-3.3125 -0.20097,-0.0865 -0.57513,-0.16679 -1.03125,-0.25 1.2065,-0.63318 5.02956,-1.3956 7.75,-1.90625 2.98953,-0.56119 3.30023,-0.52954 5.34375,-0.53125 2.10926,-0.002 3.3125,0.125 3.3125,0.125 0,1e-5 -0.0727,-0.63119 1.03125,-0.9375 0.74052,-0.20547 5.12612,-0.83387 7.25,-1.0625 2.05302,-0.22099 5.31863,-0.25222 7.21875,0.46875 1.94779,0.73907 7.84375,3.375 7.84375,3.375 2e-5,0 -6.56288,-3.17897 -7.40625,-3.5625 -0.20168,-0.0917 -0.54221,-0.18621 -1,-0.28125 1.21092,-0.60188 4.98442,-1.24884 7.71875,-1.65625 3.0048,-0.44772 3.32551,-0.4517 5.375,-0.40625 1.94045,0.043 3.00699,0.19423 3.1875,0.21875 0.11892,-0.19316 0.3839,-0.76583 0.9375,-0.90625 0.74271,-0.18838 5.15429,-0.73428 7.28125,-0.9375 1.76303,-0.16842 4.42009,-0.23429 6.34375,0.25 0.31968,0.0805 0.60351,0.20359 0.875,0.3125 1.95293,0.78349 7.90625,3.46875 7.90625,3.46875 -2e-5,0 -6.59191,-3.25348 -7.4375,-3.65625 -0.20222,-0.0963 -0.57226,-0.20703 -1.03125,-0.3125 1.21414,-0.57427 5.04366,-1.12219 7.78125,-1.5 3.00838,-0.4152 3.32307,-0.44263 5.375,-0.375 2.11798,0.0698 3.3125,0.25 3.3125,0.25 -2e-5,0 -0.0772,-0.63741 1.03125,-0.90625 0.74362,-0.18035 5.15176,-0.66355 7.28125,-0.84375 2.05847,-0.17417 5.34324,-0.12432 7.25,0.65625 1.95459,0.80016 7.875,3.53125 7.875,3.53125 -2e-5,0 -6.55993,-3.30876 -7.40625,-3.71875 -0.20237,-0.0981 -0.57186,-0.2031 -1.03125,-0.3125 1.21517,-0.5639 5.01008,-1.1143 7.75,-1.46875 3.01091,-0.38952 3.32131,-0.39765 5.375,-0.3125 1.94439,0.0806 3.00663,0.25324 3.1875,0.28125 0.11916,-0.19086 0.38277,-0.74531 0.9375,-0.875 0.74426,-0.174 5.14993,-0.65047 7.28125,-0.8125 1.76662,-0.13427 4.44971,-0.12571 6.37501,0.375 0.32,0.0832 0.6033,0.20127 0.875,0.3125 1.9546,0.80016 7.9063,3.5625 7.9063,3.5625 -1e-4,0 -6.5912,-3.34001 -7.4375,-3.75 -0.2024,-0.0981 -0.5719,-0.20311 -1.0313,-0.3125 1.2151,-0.5639 5.0413,-1.08306 7.7813,-1.4375 3.0109,-0.38953 3.3525,-0.4289 5.4062,-0.34375 2.1197,0.0879 3.3125,0.3125 3.3125,0.3125 0,0 -0.078,-0.64902 1.0313,-0.90625 0.7443,-0.17256 5.1495,-0.62336 7.2812,-0.78125 2.0606,-0.1526 5.3429,-0.0968 7.25,0.6875 1.955,0.80395 7.875,3.5 7.875,3.5 0,0 -6.5598,-3.27587 -7.4062,-3.6875 -0.2025,-0.0984 -0.5718,-0.20222 -1.0313,-0.3125 1.2154,-0.56154 5.0119,-1.12778 7.75,-1.5 3.009,-0.40905 3.3227,-0.41558 5.375,-0.34375 1.9431,0.068 3.0072,0.16485 3.1875,0.1875 0.1188,-0.1944 0.3846,-0.72881 0.9375,-0.875 0.7418,-0.19612 5.1311,-0.82878 7.25,-1.09375 1.7564,-0.21961 4.4053,-0.33231 6.3125,0.0312 0.3169,0.0604 0.6058,0.18938 0.875,0.28125 1.9362,0.66092 7.8438,2.9375 7.8438,2.9375 -10e-5,0 -6.5367,-2.80655 -7.375,-3.15625 -0.2005,-0.0836 -0.5762,-0.17333 -1.0313,-0.25 1.2037,-0.65046 5.0191,-1.37195 7.7188,-2 2.9667,-0.6902 3.2889,-0.75507 5.3125,-0.875 2.0886,-0.1238 3.2812,-0.0312 3.2812,-0.0312 0,1e-5 -0.087,-0.63205 1,-1.03125 0.7292,-0.2678 5.0472,-1.33797 7.125,-1.8125 2.0085,-0.45869 5.1679,-1.0293 7,-0.625 1.8781,0.41446 13.5782,3.01563 13.5782,3.01563 0,0 -12.3275,-3.02266 -13.1407,-3.26563 -0.1945,-0.0581 -0.5586,-0.10626 -1,-0.125 1.1676,-0.80369 3.5142,-1.6873 6.1094,-2.70312 1.6814,-0.65818 0.9237,-0.37659 2.7759,-1.0036 1.7536,-0.59366 2.4854,-1.01071 2.6304,-1.11299 0.3461,-0.20651 -0.356,-0.12188 -0.5442,-0.0424 z"
- id="path8173"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7285);enable-background:new"
- d="m 1082.625,-275.125 c 1.873,0.39348 4.4961,1.14555 6.0313,1.96875 1.5352,0.82319 2.8222,1.056 5.375,2.5 2.5266,1.42926 4.7958,2.00696 6.9687,2.53125 2.3476,0.56642 5.4354,0.71523 8.8438,1.1875 -1.0889,-0.83975 -6.6074,-1.17245 -8.4063,-1.5625 -1.7989,-0.39006 -3.8941,-1.01616 -6.5937,-2.3125 -2.6997,-1.29634 -3.4944,-1.79896 -5.8125,-2.6875 -2.3182,-0.88854 -4.0044,-1.38314 -6.4063,-1.625 z"
- id="path8175"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7289);enable-background:new"
- d="m 1051.4688,-270 c 1.9053,0.57759 4.5281,1.61572 6.0937,2.59375 1.5656,0.97802 2.8802,1.35981 5.5,3.125 2.593,1.74716 4.9859,2.70927 7.25,3.59375 2.4461,0.95557 5.6826,1.65713 9.4063,3.0625 -1.1896,-1.13784 -7.0631,-2.68675 -8.9375,-3.375 -1.8745,-0.68825 -4.0818,-1.5662 -6.875,-3.28125 -2.7933,-1.71504 -3.5736,-2.2839 -5.9375,-3.40625 -2.3641,-1.12234 -4.0567,-1.83455 -6.5,-2.3125 z"
- id="path8177"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7293);enable-background:new"
- d="m 1020.2188,-266.84375 c 1.9119,0.63811 4.5812,1.75536 6.1562,2.8125 1.5751,1.05715 2.8956,1.50867 5.5313,3.40625 2.6086,1.87821 5.0284,3.03003 7.3125,4.0625 2.4677,1.11545 5.7645,2.1733 9.5312,3.84375 -1.2033,-1.22253 -7.2028,-3.31423 -9.0937,-4.125 -1.891,-0.81077 -4.0649,-1.89379 -6.875,-3.75 -2.8102,-1.8562 -3.6218,-2.47693 -6,-3.71875 -2.3783,-1.2418 -4.1107,-1.97569 -6.5625,-2.53125 z"
- id="path8179"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="cssscscsscsssccscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssccscsscscssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsszsszssszzcczzzczzzc"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7337);enable-background:new"
- d="m 1110.1719,-266.89063 c 0.1508,0.0486 0.688,0.631 0.1094,1.48438 -0.8101,1.19459 -5.7049,3.32429 -8.5625,4.125 -2.8449,0.79712 -6.2901,0.97774 -10.5625,-0.375 -4.3016,-1.36195 -5.4697,-2.46872 -10.6563,-4.3125 4.664,2.11517 6.1953,3.95233 10.125,5.34375 1.6207,0.57387 3.3671,0.9396 5.0625,1.03125 -0.4451,0.32563 -1.5303,0.9833 -3.5625,1.59375 -2.7955,0.83969 -6.6491,1.53378 -8.25,1.625 -1.5146,0.0863 -3.142,-0.51249 -3.4375,-0.625 0.1667,0.10308 0.3732,0.37734 -0.25,1.03125 -0.8993,0.94363 -6.1474,1.923 -9.125,2.25 -2.9643,0.32555 -6.5216,-0.016 -10.9062,-1.90625 -3.978,-1.71497 -5.339,-2.91536 -9.4063,-4.75 0,0 0,0.15625 0,0.15625 3.6431,2.09529 5.284,3.88327 8.875,5.5625 1.7302,0.80909 3.5917,1.40876 5.4063,1.71875 -0.5349,0.28676 -1.5578,0.71151 -3.4375,1.03125 -2.869,0.48796 -6.809,0.81614 -8.4375,0.75 -0.8507,-0.0345 -1.7286,-0.18437 -2.4063,-0.40625 -0.6848,-0.21488 -1.1897,-0.44467 -1.3125,-0.5 0.1694,0.10721 0.4311,0.40288 -0.2187,1.03125 -0.9097,0.87962 -6.2461,1.33638 -9.25,1.46875 -2.9905,0.13179 -6.5889,-0.45063 -11,-2.5625 -4.4412,-2.12626 -5.6415,-3.4016 -10.9063,-5.78125 4.7343,2.59704 6.2865,4.6291 10.3438,6.71875 1.6733,0.86185 3.4852,1.49425 5.25,1.9375 -0.4633,0.23332 -1.5894,0.68814 -3.6875,0.9375 -2.8863,0.34298 -6.8346,0.49288 -8.4688,0.375 -1.5462,-0.1115 -3.2312,-0.85696 -3.5312,-1 0.1691,0.12029 0.4138,0.41048 -0.2188,1 -0.9128,0.85073 -6.2441,1.26212 -9.25,1.375 -2.9925,0.11237 -6.5897,-0.49043 -11,-2.59375 -4.00125,-1.90823 -5.38803,-3.13783 -9.46875,-5.09375 -3e-5,0 0,0.15625 0,0.15625 3.65506,2.20392 5.29421,4.05255 8.90625,5.90625 1.74029,0.89315 3.637,1.52827 5.4688,1.96875 -0.54,0.2483 -1.5781,0.61533 -3.4688,0.84375 -2.88568,0.34858 -6.86605,0.52095 -8.5,0.40625 -0.85345,-0.0599 -1.72631,-0.25791 -2.40625,-0.5 -0.6871,-0.2353 -1.18935,-0.47226 -1.3125,-0.53125 0.16998,0.11227 0.46448,0.42225 -0.1875,1.03125 -0.91265,0.8525 -6.27533,1.29337 -9.28125,1.40625 -2.99246,0.11237 -6.59346,-0.52805 -11,-2.59375 -4.43653,-2.07978 -5.64688,-3.33171 -10.90625,-5.65625 4.72938,2.54749 6.29074,4.5778 10.34375,6.625 1.67155,0.84433 3.48554,1.46643 5.25,1.90625 -0.46323,0.23422 -1.5897,0.68407 -3.6875,0.9375 -2.88569,0.34858 -6.8362,0.56952 -8.46875,0.46875 -1.54456,-0.0953 -3.20031,-0.82885 -3.5,-0.96875 0.16899,0.11853 0.38192,0.40385 -0.25,1 -0.91186,0.86028 -6.24665,1.33025 -9.25,1.46875 -2.98995,0.1379 -6.56745,-0.45068 -10.96875,-2.46875 -3.99308,-1.83089 -5.36511,-3.0292 -9.4375,-4.90625 -2e-5,0 0,0.15625 0,0.15625 3.64761,2.13327 5.27033,3.93487 8.875,5.71875 1.73675,0.85951 3.60727,1.45014 5.4375,1.875 -0.53947,0.2529 -1.55063,0.64129 -3.4375,0.90625 -2.87978,0.40436 -6.83813,0.64562 -8.46875,0.5625 -0.85172,-0.0434 -1.7277,-0.20855 -2.40625,-0.4375 -0.68569,-0.22201 -1.1896,-0.44339 -1.3125,-0.5 0.16959,0.10899 0.4319,0.40965 -0.21875,1.03125 -0.91079,0.87014 -6.25021,1.39152 -9.25,1.5625 -2.98633,0.17021 -6.57381,-0.31577 -10.96875,-2.28125 -4.42489,-1.97888 -5.60596,-3.22819 -10.84375,-5.375 4.70997,2.38767 6.27017,4.38873 10.3125,6.34375 1.66715,0.80631 3.46043,1.39658 5.21875,1.78125 -0.46163,0.2487 -1.597,0.71225 -3.6875,1.03125 -2.8756,0.43876 -6.7804,0.7331 -8.40625,0.6875 -1.53823,-0.0431 -3.2328,-0.74522 -3.53125,-0.875 0.16833,0.11282 0.41057,0.41375 -0.21875,1.03125 -0.90812,0.8911 -6.20295,1.52825 -9.1875,1.8125 -2.97118,0.28298 -6.57342,-0.1758 -10.9375,-1.9375 -3.95934,-1.59831 -5.32915,-2.79487 -9.34375,-4.3125 3e-5,0 0,0.15625 0,0.15625 3.5959,1.81135 5.23831,3.58233 8.8125,5.15625 1.72207,0.75835 3.58748,1.28895 5.40625,1.625 -0.53609,0.27908 -1.56658,0.68763 -3.4375,1.0625 -2.85539,0.5721 -6.78942,1.01939 -8.40625,1.03125 -0.84451,0.006 -1.70608,-0.0809 -2.375,-0.25 -0.67591,-0.16151 -1.16009,-0.32923 -1.28125,-0.375 0.16722,0.094 0.42267,0.38348 -0.21875,1.0625 -0.89787,0.95052 -6.18648,1.91708 -9.125,2.4375 -2.92534,0.51809 -6.43215,0.37424 -10.71875,-1.03125 -4.3158,-1.41507 -5.47277,-2.52994 -10.5625,-3.96875 4.57685,1.75101 6.08855,3.56006 10.03125,5 1.62608,0.59389 3.36885,0.95565 5.09375,1.15625 -0.45285,0.29702 -1.55478,0.88339 -3.59375,1.46875 -2.80472,0.80517 -6.63886,1.57583 -8.21875,1.75 -1.49475,0.1648 -3.11623,-0.31681 -3.40625,-0.40625 0.16356,0.0901 0.39278,0.35993 -0.21875,1.0625 -0.88247,1.01385 -6.04452,2.37165 -8.9375,3.0625 -2.88002,0.68778 -6.3356,0.76002 -10.5625,-0.4375 -3.83485,-1.08645 -5.17258,-2.07237 -9.0625,-3.125 -10e-6,0 0,0.15625 0,0.15625 3.48418,1.39485 5.06941,2.9194 8.53125,4.03125 1.66793,0.53572 3.45578,0.78674 5.21875,0.875 -0.51964,0.35212 -1.50039,0.91452 -3.3125,1.53125 -2.76566,0.94125 -6.59024,1.93537 -8.15625,2.15625 -0.81794,0.11539 -1.6331,0.12283 -2.28125,0.0312 -0.65496,-0.0832 -1.1326,-0.21827 -1.25,-0.25 0.16204,0.0746 0.43399,0.34044 -0.1875,1.09375 -0.87,1.05453 -6.00963,2.65925 -8.875,3.4375 -2.85253,0.77476 -6.25912,0.9582 -10.4375,-0.0937 -4.20683,-1.05913 -5.35669,-2.04166 -10.34375,-3.15625 4.48454,1.45946 5.96935,3.13523 9.8125,4.25 1.58504,0.45977 3.28679,0.63825 4.96875,0.6875 -0.44157,0.33676 -1.51251,1.02773 -3.5,1.78125 -2.73393,1.03649 -6.45198,2.16269 -8,2.4375 -1.46462,0.26002 -3.05958,-0.11654 -3.34375,-0.1875 0.16025,0.0796 0.38044,0.32098 -0.21875,1.0625 -0.86466,1.07006 -5.91652,2.81815 -8.75,3.6875 -2.8208,0.86547 -6.2075,1.15631 -10.34375,0.21875 -3.75259,-0.85061 -5.04785,-1.71647 -8.875,-2.59375 0,0 0,0.15625 0,0.15625 3.42796,1.23779 4.98741,2.6323 8.375,3.53125 1.63216,0.43314 3.36704,0.58301 5.09375,0.5625 -0.50893,0.38417 -1.47675,1.02182 -3.25,1.75 -2.70634,1.11134 -6.43633,2.30781 -7.96875,2.625 -0.8004,0.16569 -1.61231,0.21862 -2.25,0.15625 0,0 0,0.51552 0,0.92229 0,0.26507 0,0.48396 0,0.48396 0.22645,-0.14468 0.44891,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.17161,-0.21577 6,-1.6875 3.82843,-1.47174 5.22412,-2.00498 5.90625,-2.40625 0.6796,-0.39978 1.61165,-0.87937 2.21875,-1.53125 1.82685,-0.13775 3.57075,-0.49323 4.9375,-1 2.96812,-1.10052 4.87537,-1.80619 6.78125,-2.46875 1.90586,-0.66254 2.35409,-1.41487 3.40625,-1.78125 1.09155,-0.38011 2.19511,-0.16538 6.0625,-1.53125 3.86745,-1.36586 5.28316,-1.82708 5.96875,-2.21875 0.70109,-0.40052 1.70081,-0.93298 2.3125,-1.59375 1.9708,-0.0547 3.81685,-0.38463 5.28125,-0.875 3.00148,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.5386,-0.5041 2.17402,-1.04677 2.90625,-1.4375 0.23016,-0.13431 0.47574,-0.25373 0.75,-0.34375 1.09823,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91233,-1.23113 5.36605,-1.67295 6.0625,-2.03125 0.69388,-0.35697 1.63015,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63581,-0.26683 5.03125,-0.6875 3.03043,-0.91354 4.99238,-1.4301 6.9375,-1.96875 1.94511,-0.53864 2.42618,-1.26452 3.5,-1.5625 1.11401,-0.30915 2.21994,0.007 6.1875,-1.03125 3.96761,-1.03863 5.41758,-1.43273 6.125,-1.75 0.73487,-0.32959 1.81383,-0.75372 2.4375,-1.375 1.99774,0.116 3.85743,-0.0201 5.34375,-0.375 3.07811,-0.735 5.08344,-1.10094 7.0625,-1.5 1.58792,-0.32018 2.24429,-0.79055 3,-1.09375 0.23757,-0.1068 0.46695,-0.19276 0.75,-0.25 1.13347,-0.22919 2.30448,0.20893 6.34375,-0.5 4.03933,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71586,-0.25944 1.70428,-0.56724 2.34375,-1.09375 1.92427,0.23949 3.74788,0.22453 5.1875,0 3.12633,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48869,-0.94514 3.59375,-1.09375 1.14639,-0.15418 2.27592,0.30157 6.34375,-0.21875 4.06784,-0.52032 5.56013,-0.69573 6.28125,-0.9375 0.7371,-0.24714 1.79809,-0.58623 2.4375,-1.125 2.05007,0.33553 3.97378,0.39796 5.5,0.21875 3.14231,-0.36896 5.17994,-0.55936 7.1875,-0.78125 1.61076,-0.17802 2.26467,-0.6082 3.03125,-0.84375 0.24094,-0.0855 0.49412,-0.1556 0.78125,-0.1875 1.14978,-0.12772 2.30129,0.34665 6.375,-0.125 4.07374,-0.47165 5.55909,-0.6106 6.28125,-0.84375 0.71946,-0.23227 1.70024,-0.47346 2.34375,-0.96875 1.93637,0.33346 3.77006,0.40424 5.21875,0.25 3.14602,-0.33495 5.17756,-0.51859 7.1875,-0.71875 2.00996,-0.20014 2.48414,-0.82639 3.59375,-0.9375 1.15114,-0.11528 2.29643,0.36506 6.375,-0.0625 4.07861,-0.42756 5.58886,-0.56209 6.3125,-0.78125 0.73915,-0.22386 1.79572,-0.51325 2.4375,-1.03125 2.0571,0.39867 4.00187,0.4934 5.53125,0.34375 3.14873,-0.3081 5.17584,-0.47325 7.1875,-0.65625 1.61407,-0.14682 2.2631,-0.56055 3.03125,-0.78125 0.24142,-0.0809 0.49353,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.29296,0.39275 6.375,0 4.08208,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6997,-0.4477 2.3438,-0.9375 1.938,0.34999 3.7688,0.45438 5.2187,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1466,-0.32852 5.1771,-0.5227 7.1875,-0.71875 1.613,-0.15729 2.2656,-0.63148 3.0312,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7167,-0.25316 1.6745,-0.55807 2.3125,-1.09375 1.9197,0.21194 3.7199,0.15141 5.1562,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0938,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5625,-1.28125 1.1288,-0.25066 2.2703,0.11629 6.25,-0.875 3.9796,-0.99128 5.4296,-1.4193 6.125,-1.78125 0.7223,-0.37601 1.7619,-0.87058 2.375,-1.53125 1.963,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3348,-1.68641 2.5469,-1.98438 0.2122,-0.29796 0.1118,-0.7453 0.1379,-0.76675 0.043,-0.0352 0.3193,-0.085 0.479,-0.42844 0.8589,-1.84708 2.321,-5.64459 2.4352,-6.32945 0.1137,-0.68216 0.1638,-1.34774 0.2145,-1.74497 0.029,-0.22952 -0.1467,-0.86544 -0.1246,-0.92404 0.031,-0.0821 0.3045,-0.26528 0.3599,-0.51471 0.2663,-1.19833 0.089,-2.19129 -0.1251,-3.60893 -0.214,-1.41764 -0.9837,-4.62214 -1.6369,-5.47626 -0.6589,-0.86172 -1.2229,-1.01117 -1.7479,-1.00066 -0.2086,0.26976 0.1368,0.26309 0.1626,0.31261 0.6806,0.0508 0.934,0.36864 1.4192,0.89662 0.4852,0.52798 1.2218,3.85117 1.3584,5.30156 0.1366,1.45039 0.19,2.8602 -0.088,3.46864 -0.2781,0.60845 -0.7232,0.51703 -1.0156,0.58291 0.531,0.18589 0.6698,0.12483 0.7314,0.96929 0.059,0.81338 -0.1332,1.63969 -0.5198,2.80562 -0.3912,1.18001 -1.8452,4.34998 -2.2857,4.59877 -0.4523,0.25551 -0.7314,0.27038 -1.067,0.13944 z"
- id="path8181"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7297);enable-background:new"
- d="m 988.75,-263.84375 c 1.91161,0.6344 4.55027,1.75841 6.125,2.8125 1.57477,1.05409 2.8961,1.48252 5.5313,3.375 2.6082,1.87314 5.0269,3.01522 7.3125,4.0625 2.4693,1.13147 5.7521,2.15474 9.5312,3.9375 -1.2072,-1.2584 -7.139,-3.36445 -9.0312,-4.1875 -1.8922,-0.82304 -4.128,-1.93049 -6.9375,-3.78125 -2.80961,-1.85075 -3.62224,-2.48154 -6.00005,-3.71875 -2.37782,-1.23719 -4.07988,-1.9492 -6.53125,-2.5 z"
- id="path8183"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7301);enable-background:new"
- d="m 957.5,-260.78125 c 1.91,0.6181 4.58288,1.70934 6.15625,2.75 1.57339,1.04066 2.89608,1.48252 5.53125,3.375 2.60823,1.87315 5.02692,3.01521 7.3125,4.0625 2.46931,1.13147 5.75213,2.15475 9.53125,3.9375 -1.20728,-1.2584 -7.20154,-3.3957 -9.09375,-4.21875 -1.89217,-0.82304 -4.09666,-1.9305 -6.90625,-3.78125 -2.80958,-1.85075 -3.59295,-2.43932 -5.96875,-3.65625 -2.37578,-1.21691 -4.11321,-1.93885 -6.5625,-2.46875 z"
- id="path8185"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7305);enable-background:new"
- d="m 926.09375,-257.375 c 1.90772,0.59745 4.55348,1.66384 6.125,2.6875 1.5715,1.02365 2.87022,1.43971 5.5,3.28125 2.60291,1.82273 5.02887,2.9722 7.3125,4 2.4672,1.11041 5.75535,2.09323 9.53125,3.84375 -1.20623,-1.2481 -7.1719,-3.31809 -9.0625,-4.125 -1.89058,-0.8069 -4.10242,-1.89104 -6.90625,-3.6875 -2.80385,-1.79644 -3.62704,-2.40251 -6,-3.59375 -2.37297,-1.19124 -4.05362,-1.90283 -6.5,-2.40625 z"
- id="path8187"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7309);enable-background:new"
- d="m 894.90625,-253.5625 c 1.90213,0.55355 4.58701,1.58887 6.15625,2.59375 1.56923,1.00487 2.87401,1.40864 5.5,3.21875 2.59912,1.79164 5.00034,2.87189 7.28125,3.875 2.46428,1.08374 5.75984,2.04029 9.53125,3.75 -1.2048,-1.23507 -7.17416,-3.24478 -9.0625,-4.03125 -1.88832,-0.78647 -4.0752,-1.8308 -6.875,-3.59375 -2.79977,-1.76294 -3.59919,-2.36836 -5.96875,-3.53125 -2.36957,-1.16288 -4.12325,-1.83412 -6.5625,-2.28125 z"
- id="path8189"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7313);enable-background:new"
- d="m 863.71875,-248.65625 c 1.88062,0.42909 4.50427,1.38038 6.0625,2.3125 1.55823,0.93211 2.85233,1.25776 5.46875,3 2.58971,1.72444 4.98067,2.70802 7.25,3.625 2.45176,0.99069 5.73959,1.87707 9.5,3.5 -1.20131,-1.20734 -7.15249,-3.06609 -9.03125,-3.78125 -1.87875,-0.71517 -4.0854,-1.68442 -6.875,-3.375 -2.78963,-1.69057 -3.58461,-2.22822 -5.9375,-3.28125 -2.35292,-1.05301 -4.02584,-1.71248 -6.4375,-2 z"
- id="path8191"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7317);enable-background:new"
- d="m 833.15625,-241.375 c 1.84836,0.29644 4.46945,0.97632 6,1.78125 1.53058,0.80493 2.81374,1.05573 5.375,2.53125 2.53504,1.46046 4.89068,2.32509 7.125,3.0625 2.41399,0.79668 5.65711,1.46689 9.375,2.84375 -1.18771,-1.12873 -7.08772,-2.58975 -8.9375,-3.15625 -1.84977,-0.5665 -4.00342,-1.37392 -6.75,-2.84375 -2.74657,-1.46983 -3.50136,-1.92028 -5.8125,-2.78125 -2.31115,-0.86095 -4.00471,-1.32009 -6.375,-1.4375 z"
- id="path8193"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7321);enable-background:new"
- d="m 802.90625,-232.3125 c 1.8222,0.21127 4.36576,0.80057 5.875,1.53125 1.50925,0.73066 2.75568,0.92998 5.28125,2.28125 2.49976,1.33746 4.83154,2.04843 7.03125,2.65625 2.37653,0.65667 5.56464,1.07288 9.21875,2.1875 -1.16735,-1.04496 -6.92888,-2.10329 -8.75,-2.5625 -1.82111,-0.45921 -3.95225,-1.12696 -6.65625,-2.4375 -2.70403,-1.31052 -3.47106,-1.7199 -5.75,-2.46875 -2.27895,-0.74883 -3.91325,-1.17931 -6.25,-1.1875 z"
- id="path8195"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7329);enable-background:new"
- d="m 773.1875,-222.1875 c 1.81109,0.1787 4.32059,0.66506 5.8125,1.34375 1.49194,0.67869 2.7534,0.79822 5.25,2.0625 2.47107,1.25138 4.79005,1.89614 6.96875,2.4375 2.35387,0.58488 5.49134,0.89752 9.09375,1.84375 -1.15084,-0.99116 -6.85251,-1.7833 -8.65625,-2.1875 -1.80372,-0.4042 -3.91553,-1.02116 -6.59375,-2.25 -2.67818,-1.22884 -3.40345,-1.61089 -5.65625,-2.28125 -2.25279,-0.67034 -3.89627,-1.00232 -6.21875,-0.96875 z"
- id="path8197"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7325);enable-background:new"
- d="m 743.5625,-211.1875 c 1.79281,0.12911 4.27313,0.54965 5.75,1.1875 1.4769,0.63785 2.7161,0.74156 5.1875,1.9375 2.44618,1.18372 4.72054,1.74666 6.875,2.21875 2.32767,0.51003 5.4196,0.68064 9,1.5625 -1.14379,-0.9706 -6.74759,-1.59065 -8.53125,-1.9375 -1.78367,-0.34684 -3.88285,-0.88756 -6.53125,-2.03125 -2.64841,-1.14368 -3.39495,-1.51631 -5.625,-2.125 -2.23008,-0.60868 -3.82594,-0.90966 -6.125,-0.8125 z"
- id="path8199"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- <path
- sodipodi:nodetypes="ccccccccc"
- id="path8201"
- d="m 863.87812,475.6679 c 1.64212,-3.218 3.51781,-5.73529 4.86136,-9.84898 0.79872,-3.65789 3.31204,-2.03073 7.26047,-8.3969 1.40193,-2.2395 5.47653,0.39136 8.9651,-2.39911 1.27072,-0.80319 2.88488,-0.40431 4.48256,-0.0631 3.76539,1.31896 5.82576,3.70355 8.33376,5.80837 6.13906,5.97023 20.53414,7.94327 23.48604,6.31346 1.43405,-2.90474 7.88128,-5.40888 12.37437,-11.11168 0.74811,-1.12267 11.72936,-8.74446 14.64721,-6.56599"
- style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="ccccccccccc"
- id="path8203"
- d="m 888.50059,465.25071 c 7.36341,-3.23297 13.8109,-8.9084 20.70813,-13.38452 3.31057,-1.96954 6.86983,3.21601 10.796,3.59866 2.29773,-0.21813 3.7129,1.20259 5.68211,1.6415 5.15636,1.31779 2.39793,3.86488 9.97526,6.43972 6.15561,1.7204 8.9074,-6.79847 14.89975,-7.3236 4.87739,-0.50299 8.09892,-0.31603 11.61675,-0.25254 3.92696,0.13889 4.07855,-3.4976 6.06092,-5.3033 2.98056,-2.80522 7.15561,-1.84972 10.14485,-4.7409 1.01754,-1.38468 1.95458,-3.01085 2.73459,-5.10809 0.88201,-2.00034 3.04006,0.30598 4.79823,1.26269"
- style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- <path
- id="path8994"
- d="m 403.27922,1056.3058 56.56854,-42.4264 72.12489,14.1421 -46.66904,52.3259 -53.74012,7.0711 -28.28427,-31.1127 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9048);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- id="path4189"
- d="m 542.27183,1060.5719 c -1.40727,18.8012 -1.1449,32.751 2.08174,49.3033 3.22666,16.5523 16.40609,45.9073 20.33441,63.1837 3.92621,17.2671 2.69413,38.3097 -12.45944,51.1483 -15.31761,12.9774 -42.05128,21.5989 -67.83231,15.7337 -25.78105,-5.8652 -69.54907,-49.2234 -88.59019,-70.2283 -19.11214,-21.0833 -63.76086,-93.8506 -77.93853,-124.2758 -14.17767,-30.4251 -12.65961,-36.7186 -8.11972,-45.52972 -9.36672,-24.5205 -12.41371,-50.06681 -33.71245,-75.57664 30.32547,3.11444 43.88028,26.95633 60.12568,47.13975 -5.52989,-48.07603 -18.05471,-64.4165 -28.37395,-90.7243 29.9943,6.08165 50.57936,31.87239 63.97979,72.7125 9.55415,-3.91791 18.23776,-9.37294 30.18741,-9.0612 -11.2975,-41.6958 -17.94946,-69.91584 -36.68725,-101.06994 53.44196,5.67033 83.65702,80.63932 78.97142,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24897,-38.34702 -21.04781,-76.8679 -3.65971,-118.64819 0,0 48.28678,65.43688 54.38966,85.80578 6.10287,20.3689 1.51881,38.70051 1.51881,38.70051 0,0 16.95957,31.0853 20.29392,51.09414 3.3731,20.24138 -3.53269,59.10328 -4.94582,77.98328 z"
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- <path
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,995.28646,23.53493)"
- clip-path="url(#clipPath3631)"
- sodipodi:nodetypes="cccccccccccccccccccccc"
- id="path4191"
- d="m 719.5,738.69519 18.31177,15.43196 44.41103,-15.38821 23.2772,-25.54375 11.46397,19.22065 30.67161,12.78354 25.09737,5.72837 L 892,723.19519 908.02309,747.02126 947,752.19519 l 10.24541,-6.19852 6.75471,8.6982 25.49988,11.00032 2,-40.5 L 955.94866,710.6576 923.45591,689.1305 883.0038,677.66492 861.69668,662.13148 840,685.19519 755.02878,638.61208 722,676.69519 l -2.5,62 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3587);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,822.28931,10.93589)"
- sodipodi:nodetypes="ccssscsssssssssssssccccscccccccccsscccccccccccssscccccccccccccccsccccssssssssssssscccsssc"
- clip-path="url(#clipPath3677)"
- id="path4193"
- d="m 584,696.5 -6.5625,17.15625 c 0,0 -7.81152,20.36488 -15.6875,43.65625 -3.93799,11.64568 -7.88302,24.04145 -10.9375,35.125 -3.05448,11.08355 -5.33586,20.37986 -5.5,28.28125 -0.39807,19.16196 5.74653,34.8883 8.9375,41.75 -0.77153,3.55523 -1.99137,9.45432 -3.34375,18.09375 -1.92042,12.26821 -3.71827,27.15441 -2.375,39.875 1.38209,13.08835 6.81222,28.18765 12.59375,43.03125 5.78153,14.8436 12.05435,29.22711 15.21875,38.03125 6.63206,18.4519 9.99296,31.5763 11.3125,48.5 0.58135,7.4561 -0.24227,20.336 -1.25,33.375 -1.00773,13.039 -2.18661,26.3014 -1.6875,36.9688 0.98911,21.1398 9.32798,46.8347 33.375,57.9374 22.77483,10.5154 55.32682,11.7022 83.4375,-3.4374 16.15992,-8.7034 30.07634,-27.0976 43.375,-46.9063 13.29866,-19.8087 24.96917,-41.0534 31.9375,-54.9063 15.35292,-30.5212 39.39353,-115.46418 45.625,-152.7187 3.01859,-18.04653 3.92166,-29.06555 2.625,-38.03125 -0.97853,-6.76604 -3.82819,-12.1474 -6.875,-16.21875 2.04274,-27.50791 -0.73207,-51.36878 11.96875,-79.40625 L 840.75,763.375 l -23.8125,9.3125 c -17.48975,6.83753 -28.90164,19.04536 -36.59375,32.0625 -0.32251,0.54577 -0.56314,1.10776 -0.875,1.65625 0.22203,-22.51521 4.40784,-37.63759 6.59375,-58.6875 l 1.96875,-19 L 771,737.375 c -30.59449,15.55571 -45.69489,48.19321 -49.71875,90.21875 -4.24532,-0.62547 -8.8314,-1.01965 -13.8125,-0.84375 -0.29149,-39.18036 -0.39629,-67.03685 8.59375,-99.375 l 5.59375,-20.125 -19.4375,7.65625 c -30.90937,12.20394 -47.85954,41.93073 -56.625,68.375 -4.38273,13.22214 -6.74582,25.80121 -7.59375,35.9375 -0.23203,2.77373 -0.31106,5.31132 -0.3125,7.71875 -3.24187,-0.0364 -6.42052,0.13589 -10.0625,0.5 0.0416,-39.00473 -3.48424,-79.75415 -32.28125,-116.5 L 584,696.5 Z m 5.8125,43.8125 c 16.80691,30.64383 17.47451,63.96728 16.9375,99.75 l -0.21875,15.0625 12.03493,-6.53921 c 8.66205,-3.13302 19.56058,-0.22752 31.93382,-0.83579 l 14.67465,9.3566 -6.3309,-25.7941 c -0.0897,-0.22997 -0.22046,-0.41669 -0.25,-0.71875 -0.19951,-2.03986 -0.22232,-5.47307 0.125,-9.625 0.69464,-8.30386 2.78957,-19.58524 6.625,-31.15625 5.15532,-15.55294 13.48801,-31.19248 25.125,-42.53125 -4.68381,28.63798 -3.21559,60.25934 -3.01164,95.80514 l -2.76593,13.26164 15.49632,-7.59803 c 9.0294,-2.75771 17.18897,-0.34996 29.28125,1.09375 l 13.24632,9.44423 L 741.09375,840 c 1.44793,-30.97177 8.22149,-53.67808 20.71875,-68.875 -2.98688,19.77884 -5.43043,41.7848 0.3125,78.34375 l 1.06552,6.37318 -2.93815,11.51685 10.61711,-8.16818 9.18973,10.22198 -1.54828,-10.4636 L 781.9375,852 c 5.70102,-13.21149 10.17282,-26.21337 16.34375,-36.65625 0.95986,-1.62434 2.03153,-3.06436 3.0625,-4.5625 -3.68066,21.15535 -2.42716,40.20815 -4.09375,57.78125 l -4.68014,7.80698 7.39889,0.22427 c 3.22005,3.48361 3.8675,3.85068 4.5625,8.65625 0.695,4.80557 0.31862,14.40035 -2.5625,31.625 -5.56799,33.28792 -31.84562,77.83981 -43.7404,101.4864 -6.60491,13.1304 -18.52833,57.4859 -31.12335,76.2465 -12.59502,18.7605 -28.53137,39.7673 -37.17204,44.4209 -21.49052,11.5742 -44.55594,25.5059 -60.61889,18.0895 -14.37486,-6.637 -23.03969,-21.1927 -23.81407,-37.7433 -0.38311,-8.188 0.61279,-21.3092 1.625,-34.4062 1.01221,-13.0971 11.28891,-22.5708 15.42339,-36.5626 5.37229,-18.1808 -1.44687,-36.5944 -12.5,-53.93745 -6.48655,-10.17778 -23.9768,-24.2579 -29.54839,-38.5625 -5.57159,-14.3046 -10.36751,-29.00315 -11.28125,-37.65625 -0.92621,-8.77113 0.4225,-23.02502 2.21875,-34.5 1.79625,-11.47497 3.84375,-20.28125 3.84375,-20.28125 l 9.42278,-3.6152 -10.48528,-3.8848 c 0,0 -8.49889,-15.3101 -8.09375,-34.8125 0.0711,-3.42316 1.83626,-12.72805 4.71875,-23.1875 2.88249,-10.45945 6.76466,-22.55271 10.625,-33.96875 3.04439,-9.00308 5.78063,-16.60345 8.34375,-23.6875 z"
- style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter3898);enable-background:new"
- inkscape:connector-curvature="0" />
- <g
- transform="translate(276,136)"
- clip-path="url(#clipPath3622)"
- id="g3617"
- style="display:inline;enable-background:new">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9024);enable-background:accumulate"
- d="m -15.66751,843.48852 -49.49748,-15.55635 -26.87005,52.3259 41.01219,45.25484 49.49747,-38.18377 -14.14213,-43.84062 z"
- id="path4195"
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,-52.200498,74.09707)"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9020);enable-background:accumulate"
- d="m 118.70648,859.93048 -55.154328,-46.66904 -43.84062,36.76955 33.94113,53.74011 -13.596814,85.46203 -39.44536579,28.29217 -41.01220021,11.3137 -2.82842,46.669 56.56854,25.4559 18.943987,-69.65 23.45655,-58.85663 46.347541,-72.61491 16.62,-39.91188 z"
- id="path4197"
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,-46.92842,75.511284)"
- sodipodi:nodetypes="ccccccccccccc"
- inkscape:connector-curvature="0" />
- </g>
- <path
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,229.07158,211.51128)"
- id="path4199"
- d="m -70.82184,932.58397 60.81118,-26.87005 100.40916,31.1127 -63.63961,31.11269 -82.02438,-16.97056 -15.55635,-18.38478 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9044);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,822.28931,10.93589)"
- clip-path="url(#clipPath4177)"
- sodipodi:nodetypes="ccccccccccccccccccccccccccccccccccccccccccccccccccczzzcccccc"
- id="path4201"
- d="m 583.0625,715.75 c -12.10609,34.44974 -26.7145,68.53333 -31.75,104.84375 -0.83208,14.92867 4.58915,29.15943 8.84375,43.0625 -5.91624,27.20126 -10.13681,56.89995 1.15625,83.125 13.51717,38.16085 35.00147,75.68215 32.42279,117.46825 -0.9483,29.2942 -9.01444,60.9941 5.38971,88.2817 10.19864,19.3348 33.13956,27.3117 53.96785,27.6676 27.86219,1.1741 56.46261,-11.6216 72.0009,-35.2613 22.59549,-29.3717 41.80051,-61.4973 55.23865,-96.0598 16.89053,-45.506 29.6718,-92.56072 37.93402,-140.3989 1.8244,-12.94106 3.10108,-27.46985 -4.57892,-38.82255 -3.43115,-7.33632 0.0421,-15.56014 -0.68457,-23.30977 0.674,-24.99466 4.01232,-50.66376 16.65332,-72.59648 -17.73313,6.4446 -35.07268,16.55971 -44.00307,33.86425 -3.93508,6.70955 -7.60482,13.57413 -11.37193,20.38575 -3.54999,-30.01408 3.71963,-59.64828 6.78125,-89.28125 -20.16604,9.05463 -36.87672,25.65522 -44.17495,46.682 -6.30463,15.58003 -8.80222,32.31718 -10.26255,49.03675 -8.25334,-1.51925 -16.68447,-2.10155 -25.0625,-1.5 -0.96308,-38.69787 -0.46696,-79.40715 10.96875,-115.90625 -18.68113,6.21776 -35.16621,18.73551 -45.62803,35.38723 -13.85254,20.87979 -21.2614,45.75395 -23.05947,70.61277 0.58534,4.32454 -0.0613,11.84009 -6.34375,9.875 -5.33118,0.0176 -10.62908,0.67883 -15.9375,1.09375 1.14784,-39.38148 -3.34144,-81.6282 -27.0625,-114.21875 -3.06071,-3.63717 -5.63685,-7.68438 -8.625,-11.34375 -0.9375,2.4375 -1.875,4.875 -2.8125,7.3125 z m 7.75,13.84375 c 18.56527,29.29629 22.4825,64.82012 22.125,98.875 0.20409,5.17526 -0.51656,11.8292 0.125,16.0625 12.31856,-6.10275 26.73912,-2.4399 39.78125,-2.1875 2.31712,1.22325 3.1921,1.65243 1.90625,-1.40625 -4.16455,-13.95285 -1.84828,-28.613 1.80504,-42.40764 6.36687,-26.29064 20.62828,-51.08798 42.81996,-67.02986 -8.61709,37.23706 -5.71658,76.56161 -6.09375,113.96875 12.25344,-6.9099 27.27879,-3.44613 40.03125,-0.25 3.39222,3.5348 2.28935,-0.72948 2.1875,-3.8125 -0.48309,-21.37058 4.13133,-43.06963 13.6875,-62.15625 5.96266,-10.68727 14.24338,-19.80379 22.4375,-28.875 -7.87156,33.8381 -9.2029,69.33593 -2.71875,103.5 1.72485,-1.41118 4.60681,-0.45414 5.65625,-0.375 9.68369,-21.23682 16.35112,-45.38062 34.89016,-60.74185 1.87329,-0.37122 -1.44818,8.52495 -1.48391,11.8981 -3.53488,21.84581 -7.17516,44.14234 -8.78421,66.21911 -8.78379,2.34171 2.84835,2.32354 3.46875,4.0625 7.92311,10.5658 4.66299,24.40472 3.63165,36.35334 -7.06405,45.03355 -22.14231,87.36194 -35.95355,130.6798 -12.07476,32.9493 -27.3742,58.8525 -47.88808,87.2015 -10.95257,13.5514 -23.24472,27.8513 -40.84375,32.5 -20.15601,6.2413 -44.20676,10.8769 -62.59956,0.046 -17.28966,-12.3414 -21.02393,-35.7089 -19.26226,-55.6864 0.0488,-15.8262 4.93886,-28.5121 4.4106,-43.4918 -0.53824,-15.2629 -2.29135,-30.5647 -6.54261,-46.8663 -4.25126,-16.30162 -9.04325,-24.91794 -16.11906,-41.57338 -7.24111,-17.04456 -15.07015,-36.74863 -18.20542,-56.28842 -1.74948,-18.62714 2.89171,-37.12262 5.78125,-55.25 3.29623,-2.83696 -1.59799,-5.19659 -2.3125,-8.1875 -7.60113,-17.01508 -8.40747,-36.7749 -2.74234,-54.55998 7.1302,-25.0723 15.76087,-49.63241 24.67984,-74.12752 0.70833,1.30208 1.41667,2.60417 2.125,3.90625 z"
- style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4105);enable-background:new"
- inkscape:connector-curvature="0" />
- <path
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,995.28646,23.53493)"
- clip-path="url(#clipPath3631)"
- sodipodi:nodetypes="cccccccc"
- id="path4203"
- d="m 735.05635,733.03834 2.75542,21.08881 44.41103,-15.38821 4.85063,-22.38975 -3.93617,-22.05222 -22.45163,-36.59301 -8.28004,30.30494 -17.34924,45.02944 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4130);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,995.28646,23.53493)"
- clip-path="url(#clipPath3631)"
- sodipodi:nodetypes="cccccccc"
- id="path4205"
- d="m 831.81321,730.29452 15.82237,14.90486 20.85473,2.89994 -1.59029,-39.92598 8.32561,-30.50842 -7.16499,-6.34106 -21.69669,20.9424 -14.55074,38.02826 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4141);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <g
- transform="translate(276,136)"
- clip-path="url(#clipPath8338)"
- style="display:inline;filter:url(#filter8333);enable-background:new"
- id="g8317">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 964.00012,754.69487 18.42881,7.46479 9.07107,-36.96447 -14.87031,4.83886 -12.62957,24.66082 z"
- id="path4209"
- sodipodi:nodetypes="ccccc"
- clip-path="none"
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,719.28646,-112.46507)"
- inkscape:connector-curvature="0" />
- <rect
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="rect8315"
- width="182"
- height="177"
- x="-55"
- y="757.19519" />
- </g>
- <g
- transform="translate(276,136)"
- clip-path="url(#clipPath8359)"
- style="display:inline;filter:url(#filter8354);enable-background:new"
- id="g8346">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 910.14441,746.31415 32.61295,5.17393 -0.36119,-23.87619 7.18853,-29.68221 -8.45112,-5.26365 -21.82194,26.51077 -9.16723,27.13735 z"
- id="path4207"
- sodipodi:nodetypes="ccccccc"
- clip-path="none"
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,719.28646,-112.46507)"
- inkscape:connector-curvature="0" />
- <rect
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="rect8344"
- width="165"
- height="176"
- x="-22"
- y="696.19519" />
- </g>
- <path
- sodipodi:nodetypes="czzzzzzcccccccccczczz"
- id="path8848"
- d="m 1036.164,1071.8338 c 6.7941,18.9028 10.4937,33.2997 11.8903,51.2119 1.3966,17.9123 -3.7827,51.8008 -2.9005,70.6561 0.8818,18.8452 8.1337,40.099 27.3446,48.9689 19.4189,8.9658 49.3193,10.2113 74.1199,-3.1456 24.8006,-13.357 57.401,-70.3255 70.9742,-97.3087 13.6239,-27.0839 38.7611,-114.4974 44.6608,-149.76859 5.8998,-35.27121 2.5506,-41.30077 -4.6174,-49.05549 2.6403,-27.84015 -1.4998,-54.93543 13.1096,-87.18618 -30.249,11.8257 -37.3823,40.1607 -48.3189,65.50508 -8.0009,-50.93293 0.2092,-71.27319 3.3189,-101.21936 -29.0647,14.77791 -42.8615,47.11402 -45,92.85714 -10.9239,-1.3042 -21.3914,-4.43423 -33.5714,-0.71429 -0.264,-46.02334 -1.4635,-76.88941 8.9106,-114.20649 -53.2554,21.02686 -62.9472,106.5941 -56.0535,112.77792 -10.8828,0.535 -21.371,-1.2973 -32.8571,2.85715 0.6389,-42.57135 -0.2605,-84.90861 -30,-122.85715 0,0 -30.958,80.92234 -31.4286,103.57143 -0.4705,22.64909 9.4516,40.16588 9.4516,40.16588 0,0 -8.568,36.74051 -6.2986,58.23223 2.2959,21.74142 20.4429,59.67622 27.2655,78.65812 z"
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- inkscape:connector-curvature="0" />
- <path
- transform="translate(276,136)"
- clip-path="url(#clipPath3631)"
- sodipodi:nodetypes="cccccccccccccccccccccc"
- id="path3635"
- d="m 719.5,738.69519 18.31177,15.43196 44.41103,-15.38821 23.2772,-25.54375 11.46397,19.22065 30.67161,12.78354 25.09737,5.72837 L 892,723.19519 908.02309,747.02126 947,752.19519 l 10.24541,-6.19852 6.75471,8.6982 25.49988,11.00032 2,-40.5 L 955.94866,710.6576 923.45591,689.1305 883.0038,677.66492 861.69668,662.13148 840,685.19519 755.02878,638.61208 722,676.69519 l -2.5,62 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3587);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- sodipodi:nodetypes="ccssscsssssssssssssccccscccccccccsscccccccccccssscccccccccccccccsccccssssssssssssscccsssc"
- clip-path="url(#clipPath3677)"
- id="path3669"
- d="m 584,696.5 -6.5625,17.15625 c 0,0 -7.81152,20.36488 -15.6875,43.65625 -3.93799,11.64568 -7.88302,24.04145 -10.9375,35.125 -3.05448,11.08355 -5.33586,20.37986 -5.5,28.28125 -0.39807,19.16196 5.74653,34.8883 8.9375,41.75 -0.77153,3.55523 -1.99137,9.45432 -3.34375,18.09375 -1.92042,12.26821 -3.71827,27.15441 -2.375,39.875 1.38209,13.08835 6.81222,28.18765 12.59375,43.03125 5.78153,14.8436 12.05435,29.22711 15.21875,38.03125 6.63206,18.4519 9.99296,31.5763 11.3125,48.5 0.58135,7.4561 -0.24227,20.336 -1.25,33.375 -1.00773,13.039 -2.18661,26.3014 -1.6875,36.9688 0.98911,21.1398 9.32798,46.8347 33.375,57.9374 22.77483,10.5154 55.32682,11.7022 83.4375,-3.4374 16.15992,-8.7034 30.07634,-27.0976 43.375,-46.9063 13.29866,-19.8087 24.96917,-41.0534 31.9375,-54.9063 15.35292,-30.5212 39.39353,-115.46418 45.625,-152.7187 3.01859,-18.04653 3.92166,-29.06555 2.625,-38.03125 -0.97853,-6.76604 -3.82819,-12.1474 -6.875,-16.21875 2.04274,-27.50791 -0.73207,-51.36878 11.96875,-79.40625 L 840.75,763.375 l -23.8125,9.3125 c -17.48975,6.83753 -28.90164,19.04536 -36.59375,32.0625 -0.32251,0.54577 -0.56314,1.10776 -0.875,1.65625 0.22203,-22.51521 4.40784,-37.63759 6.59375,-58.6875 l 1.96875,-19 L 771,737.375 c -30.59449,15.55571 -45.69489,48.19321 -49.71875,90.21875 -4.24532,-0.62547 -8.8314,-1.01965 -13.8125,-0.84375 -0.29149,-39.18036 -0.39629,-67.03685 8.59375,-99.375 l 5.59375,-20.125 -19.4375,7.65625 c -30.90937,12.20394 -47.85954,41.93073 -56.625,68.375 -4.38273,13.22214 -6.74582,25.80121 -7.59375,35.9375 -0.23203,2.77373 -0.31106,5.31132 -0.3125,7.71875 -3.24187,-0.0364 -6.42052,0.13589 -10.0625,0.5 0.0416,-39.00473 -3.48424,-79.75415 -32.28125,-116.5 L 584,696.5 Z m 5.8125,43.8125 c 16.80691,30.64383 17.47451,63.96728 16.9375,99.75 l -0.21875,15.0625 12.03493,-6.53921 c 8.66205,-3.13302 19.56058,-0.22752 31.93382,-0.83579 l 14.67465,9.3566 -6.3309,-25.7941 c -0.0897,-0.22997 -0.22046,-0.41669 -0.25,-0.71875 -0.19951,-2.03986 -0.22232,-5.47307 0.125,-9.625 0.69464,-8.30386 2.78957,-19.58524 6.625,-31.15625 5.15532,-15.55294 13.48801,-31.19248 25.125,-42.53125 -4.68381,28.63798 -3.21559,60.25934 -3.01164,95.80514 l -2.76593,13.26164 15.49632,-7.59803 c 9.0294,-2.75771 17.18897,-0.34996 29.28125,1.09375 l 13.24632,9.44423 L 741.09375,840 c 1.44793,-30.97177 8.22149,-53.67808 20.71875,-68.875 -2.98688,19.77884 -5.43043,41.7848 0.3125,78.34375 l 1.06552,6.37318 -2.93815,11.51685 10.61711,-8.16818 9.18973,10.22198 -1.54828,-10.4636 L 781.9375,852 c 5.70102,-13.21149 10.17282,-26.21337 16.34375,-36.65625 0.95986,-1.62434 2.03153,-3.06436 3.0625,-4.5625 -3.68066,21.15535 -2.42716,40.20815 -4.09375,57.78125 l -4.68014,7.80698 7.39889,0.22427 c 3.22005,3.48361 3.8675,3.85068 4.5625,8.65625 0.695,4.80557 0.31862,14.40035 -2.5625,31.625 -5.56799,33.28792 -31.79272,123.1659 -43.6875,146.8125 -6.60491,13.1304 -18.02998,33.8957 -30.625,52.6563 -12.59502,18.7605 -27.35933,35.5338 -36,40.1874 -21.49052,11.5742 -48.7808,10.2602 -64.84375,2.8438 -14.37486,-6.637 -20.53812,-23.4494 -21.3125,-40 -0.38311,-8.188 0.61279,-21.3092 1.625,-34.4062 1.01221,-13.0971 11.28891,-22.5708 15.42339,-36.5626 5.37229,-18.1808 -1.44687,-36.5944 -12.5,-53.93745 -6.48655,-10.17778 -23.9768,-24.2579 -29.54839,-38.5625 -5.57159,-14.3046 -10.36751,-29.00315 -11.28125,-37.65625 -0.92621,-8.77113 0.4225,-23.02502 2.21875,-34.5 1.79625,-11.47497 3.84375,-20.28125 3.84375,-20.28125 l 9.42278,-3.6152 -10.48528,-3.8848 c 0,0 -8.49889,-15.3101 -8.09375,-34.8125 0.0711,-3.42316 1.83626,-12.72805 4.71875,-23.1875 2.88249,-10.45945 6.76466,-22.55271 10.625,-33.96875 3.04439,-9.00308 5.78063,-16.60345 8.34375,-23.6875 z"
- style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter3898);enable-background:new"
- transform="translate(450.03125,73.843964)"
- inkscape:connector-curvature="0" />
- <g
- transform="translate(276,136)"
- clip-path="url(#clipPath3636)"
- id="g3628">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9024);enable-background:accumulate"
- d="m 824.48651,818.48242 -49.49748,-15.55635 -26.87005,52.3259 41.01219,45.25484 49.49747,-38.18377 -14.14213,-43.84062 z"
- id="path8988"
- inkscape:connector-curvature="0" />
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9020);enable-background:accumulate"
- d="m 964.49365,855.25197 -55.15433,-46.66904 -43.84062,36.76955 33.94113,53.74011 7.07106,66.46804 -50.91168,35.35537 -41.0122,11.3137 -2.82842,46.669 56.56854,25.4559 63.63961,-76.3676 24.04163,-94.75227 8.48528,-57.98276 z"
- id="path8990"
- inkscape:connector-curvature="0" />
- </g>
- <path
- id="path8992"
- d="m 1045.3322,1043.5779 60.8112,-26.8701 100.4091,31.1127 -63.6396,31.1127 -82.0244,-16.9706 -15.5563,-18.3847 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9044);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- clip-path="url(#clipPath4177)"
- sodipodi:nodetypes="ccccccccccccccccccccccccccccccccccccccccccccccccccczzzcccccc"
- id="path4149"
- d="m 583.0625,715.75 c -12.10609,34.44974 -26.7145,68.53333 -31.75,104.84375 -0.83208,14.92867 4.58915,29.15943 8.84375,43.0625 -5.91624,27.20126 -10.13681,56.89995 1.15625,83.125 13.51717,38.16085 35.00147,75.68215 32.42279,117.46825 -0.9483,29.2942 -9.01444,60.9941 5.38971,88.2817 10.19864,19.3348 33.13956,27.3117 53.96785,27.6676 27.86219,1.1741 56.46261,-11.6216 72.0009,-35.2613 22.59549,-29.3717 41.80051,-61.4973 55.23865,-96.0598 16.89053,-45.506 29.6718,-92.56072 37.93402,-140.3989 1.8244,-12.94106 3.10108,-27.46985 -4.57892,-38.82255 -3.43115,-7.33632 0.0421,-15.56014 -0.68457,-23.30977 0.674,-24.99466 4.01232,-50.66376 16.65332,-72.59648 -17.73313,6.4446 -35.07268,16.55971 -44.00307,33.86425 -3.93508,6.70955 -7.60482,13.57413 -11.37193,20.38575 -3.54999,-30.01408 3.71963,-59.64828 6.78125,-89.28125 -20.16604,9.05463 -36.87672,25.65522 -44.17495,46.682 -6.30463,15.58003 -8.80222,32.31718 -10.26255,49.03675 -8.25334,-1.51925 -16.68447,-2.10155 -25.0625,-1.5 -0.96308,-38.69787 -0.46696,-79.40715 10.96875,-115.90625 -18.68113,6.21776 -35.16621,18.73551 -45.62803,35.38723 -13.85254,20.87979 -21.2614,45.75395 -23.05947,70.61277 0.58534,4.32454 -0.0613,11.84009 -6.34375,9.875 -5.33118,0.0176 -10.62908,0.67883 -15.9375,1.09375 1.14784,-39.38148 -3.34144,-81.6282 -27.0625,-114.21875 -3.06071,-3.63717 -5.63685,-7.68438 -8.625,-11.34375 -0.9375,2.4375 -1.875,4.875 -2.8125,7.3125 z m 7.75,13.84375 c 18.56527,29.29629 22.4825,64.82012 22.125,98.875 0.20409,5.17526 -0.51656,11.8292 0.125,16.0625 12.31856,-6.10275 26.73912,-2.4399 39.78125,-2.1875 2.31712,1.22325 3.1921,1.65243 1.90625,-1.40625 -4.16455,-13.95285 -1.84828,-28.613 1.80504,-42.40764 6.36687,-26.29064 20.62828,-51.08798 42.81996,-67.02986 -8.61709,37.23706 -5.71658,76.56161 -6.09375,113.96875 12.25344,-6.9099 27.27879,-3.44613 40.03125,-0.25 3.39222,3.5348 2.28935,-0.72948 2.1875,-3.8125 -0.48309,-21.37058 4.13133,-43.06963 13.6875,-62.15625 5.96266,-10.68727 14.24338,-19.80379 22.4375,-28.875 -7.87156,33.8381 -9.2029,69.33593 -2.71875,103.5 1.72485,-1.41118 4.60681,-0.45414 5.65625,-0.375 9.68369,-21.23682 16.35112,-45.38062 34.89016,-60.74185 1.87329,-0.37122 -1.44818,8.52495 -1.48391,11.8981 -3.53488,21.84581 -3.2972,44.17323 -4.90625,66.25 -1.31238,1.37679 2.84835,2.32354 3.46875,4.0625 7.92311,10.5658 3.12294,24.83149 2.0916,36.78011 -7.06405,45.03355 -21.76553,88.37934 -35.57677,131.69714 -12.07476,32.9493 -30.7197,63.08 -51.23358,91.429 -10.95257,13.5514 -23.24472,27.8513 -40.84375,32.5 -20.15601,6.2413 -43.57595,5.1744 -61.96875,-5.6562 -17.28966,-12.3414 -21.02393,-35.7089 -19.26226,-55.6864 0.0488,-15.8262 2.37211,-27.8008 7.91747,-42.8053 5.54535,-15.0045 2.47105,-31.3317 -1.78021,-47.6333 -4.25126,-16.3016 -12.17903,-26.26002 -21.82158,-42.20417 -9.64255,-15.94415 -17.6369,-36.03734 -20.77217,-55.57713 -1.74948,-18.62714 2.89171,-37.12262 5.78125,-55.25 3.29623,-2.83696 -1.59799,-5.19659 -2.3125,-8.1875 -7.60113,-17.01508 -8.40747,-36.7749 -2.74234,-54.55998 7.1302,-25.0723 15.76087,-49.63241 24.67984,-74.12752 0.70833,1.30208 1.41667,2.60417 2.125,3.90625 z"
- style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4185);enable-background:new"
- transform="translate(450.03125,73.843964)"
- inkscape:connector-curvature="0" />
- <path
- transform="translate(276,136)"
- clip-path="url(#clipPath3631)"
- sodipodi:nodetypes="cccccccc"
- id="path3902"
- d="m 735.05635,733.03834 2.75542,21.08881 44.41103,-15.38821 4.85063,-22.38975 -3.93617,-22.05222 -22.45163,-36.59301 -8.28004,30.30494 -17.34924,45.02944 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4130);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <path
- transform="translate(276,136)"
- clip-path="url(#clipPath3631)"
- sodipodi:nodetypes="cccccccc"
- id="path4135"
- d="m 831.81321,730.29452 15.82237,14.90486 20.85473,2.89994 -1.59029,-39.92598 8.32561,-30.50842 -7.16499,-6.34106 -21.69669,20.9424 -14.55074,38.02826 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4141);enable-background:accumulate"
- inkscape:connector-curvature="0" />
- <g
- transform="translate(276,136)"
- clip-path="url(#clipPath8392)"
- style="filter:url(#filter8379)"
- id="g8367">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 910.14441,746.31415 32.61295,5.17393 -0.36119,-23.87619 7.18853,-29.68221 -8.45112,-5.26365 -21.82194,26.51077 -9.16723,27.13735 z"
- id="path4145"
- sodipodi:nodetypes="ccccccc"
- clip-path="none"
- inkscape:connector-curvature="0" />
- <rect
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="rect8365"
- width="123.03658"
- height="172.53406"
- x="877.51953"
- y="650.19098" />
- </g>
- <g
- transform="translate(276,136)"
- clip-path="url(#clipPath8417)"
- style="filter:url(#filter8404)"
- id="g8400">
- <path
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 964.00012,754.69487 18.42881,7.46479 9.07107,-36.96447 -14.87031,4.83886 -12.62957,24.66082 z"
- id="path4147"
- sodipodi:nodetypes="ccccc"
- clip-path="none"
- inkscape:connector-curvature="0" />
- <rect
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="rect8398"
- width="142.12846"
- height="125.1579"
- x="924.89569"
- y="677.06104" />
- </g>
+<?xml version="1.0" encoding="UTF-8"?>
+<svg enable-background="new" version="1" viewBox="0 0 4226.3 1686.8" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+ <defs>
+ <pattern id="ig" xlink:href="#ka" patternTransform="matrix(5.4432 0 0 10.1 1722.4 161.06)"/>
+ <marker id="er" overflow="visible" orient="auto">
+ <path d="m-1.2 0l-1 1 3.5-1-3.5-1 1 1z" fill="#f8d615" fill-rule="evenodd" stroke="#f8d615" stroke-width=".2pt"/>
+ </marker>
+ <pattern id="ka" width="2" height="1" patternTransform="scale(10)" patternUnits="userSpaceOnUse">
+ <path d="M0-.5h1v2H0z" fill="#f815bb"/>
+ </pattern>
+ <filter id="ep" x="-.085" y="-.366" width="1.169" height="1.732">
+ <feGaussianBlur stdDeviation="4.574"/>
+ </filter>
+ <linearGradient id="n">
+ <stop stop-color="#fff" offset="0"/>
+ <stop stop-color="#fff" stop-opacity="0" offset="1"/>
+ </linearGradient>
+ <linearGradient id="j">
+ <stop stop-color="#f9eed3" offset="0"/>
+ <stop stop-opacity="0" offset="1"/>
+ </linearGradient>
+ <linearGradient id="s">
+ <stop stop-color="#283131" stop-opacity="0" offset="0"/>
+ <stop stop-color="#1e2424" offset=".5"/>
+ <stop offset="1"/>
+ </linearGradient>
+ <linearGradient id="u">
+ <stop stop-color="#cfc690" offset="0"/>
+ <stop stop-color="#afa775" offset=".212"/>
+ <stop stop-color="#615c3a" offset=".534"/>
+ <stop offset=".765"/>
+ <stop stop-color="#403518" offset="1"/>
+ </linearGradient>
+ <radialGradient id="jd" cx="418.3" cy="342.48" r="131.45" gradientTransform="matrix(1.3957 .62111 -.42441 .95372 -15.062 -227.97)" gradientUnits="userSpaceOnUse">
+ <stop stop-color="#283131" offset="0"/>
+ <stop stop-color="#1e2424" offset=".5"/>
+ <stop offset="1"/>
+ </radialGradient>
+ <filter id="iz" x="-.3" y="-.3" width="1.6" height="1.6">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ <clipPath id="ea">
+ <path d="M179.64 267.36c-22.41 39.703-60.616 115.78-69.286 149.64-8.647 33.775-8.772 66.417-.357 86.429 8.36 19.882 26.164 35.633 40.714 41.429-.597-14.376 14.373-43.286 72.857-72.5 58.626-29.285 78.382-27.131 103.57-47.143 25.63-20.362 12.61-67.045 3.214-93.929-9.434-26.993-34.967-59.124-66.429-69.643-31.033-10.375-65.018-4.848-84.286 5.714z" fill="#f5ff04" fill-rule="evenodd"/>
+ </clipPath>
+ <radialGradient id="iy" cx="275.44" cy="335.35" r="36.75" gradientTransform="matrix(.05911 2.687 -.72343 .01591 408.73 -424.56)" gradientUnits="userSpaceOnUse">
+ <stop stop-color="#fff" offset="0"/>
+ <stop stop-color="#fff" stop-opacity="0" offset="1"/>
+ </radialGradient>
+ <clipPath id="kb">
+ <path d="m265.94 126.68l-18.767 168.86 174.11-73.121 61.954 88.659 57.884-31.99-37.534-180.06-237.64 27.649z" fill-rule="evenodd" stroke="#000" stroke-width=".9"/>
+ </clipPath>
+ <clipPath id="jz">
+ <path d="M352.25 211.99c-3.804-25.264-16.81-50.638-17.157-75.525-.186-13.356 3.273-26.571 13.756-39.554 36.347-65.296 116.94-84.695 185.93-91.465 86.922-11.017 184.91 17.94 233.37 95.401 54.124 75.733 56.675 172.54 80.612 259.53 29.438 127.13 54.779 256.21 60.392 386.85-3.063 78.182-8.426 165.18-60.503 228.13-48.027 50.357-122.79 50.053-187.07 59.002-90.555 4.655-184.35-16.146-261.78-64.198-64.776-37.94-95.73-113.48-97.279-186.02-8.39-79.875 26.392-153.81 51.62-227.16 7.47-82.761 9.413-166.25 9.653-249.38-.837-32.195-7.09-63.817-11.546-95.609z" enable-background="accumulate" fill="#262f2f" fill-rule="evenodd" stroke="#000"/>
+ </clipPath>
+ <clipPath id="kd">
+ <path d="M352.25 211.99c-3.804-25.264-16.81-50.638-17.157-75.525-.186-13.356 3.273-26.571 13.756-39.554 36.347-65.296 116.94-84.695 185.93-91.465 86.922-11.017 184.91 17.94 233.37 95.401 54.124 75.733 56.675 172.54 80.612 259.53 29.438 127.13 54.779 256.21 60.392 386.85-3.063 78.182-8.426 165.18-60.503 228.13-48.027 50.357-122.79 50.053-187.07 59.002-90.555 4.655-184.35-16.146-261.78-64.198-64.776-37.94-95.73-113.48-97.279-186.02-8.39-79.875 26.392-153.81 51.62-227.16 7.47-82.761 9.413-166.25 9.653-249.38-.837-32.195-7.09-63.817-11.546-95.609z" enable-background="accumulate" fill="#262f2f" fill-rule="evenodd" stroke="#000"/>
+ </clipPath>
+ <clipPath id="jx">
+ <path d="M352.25 211.99c-3.804-25.264-16.81-50.638-17.157-75.525-.186-13.356 3.273-26.571 13.756-39.554 36.347-65.296 116.94-84.695 185.93-91.465 86.922-11.017 184.91 17.94 233.37 95.401 54.124 75.733 56.675 172.54 80.612 259.53 29.438 127.13 54.779 256.21 60.392 386.85-3.063 78.182-8.426 165.18-60.503 228.13-48.027 50.357-122.79 50.053-187.07 59.002-90.555 4.655-184.35-16.146-261.78-64.198-64.776-37.94-95.73-113.48-97.279-186.02-8.39-79.875 26.392-153.81 51.62-227.16 7.47-82.761 9.413-166.25 9.653-249.38-.837-32.195-7.09-63.817-11.546-95.609z" enable-background="accumulate" fill="#262f2f" fill-rule="evenodd" stroke="#000"/>
+ </clipPath>
+ <clipPath id="en">
+ <path d="M352.25 211.99c-3.804-25.264-16.81-50.638-17.157-75.525-.186-13.356 3.273-26.571 13.756-39.554 36.347-65.296 116.94-84.695 185.93-91.465 86.922-11.017 184.91 17.94 233.37 95.401 54.124 75.733 56.675 172.54 80.612 259.53 29.438 127.13 54.779 256.21 60.392 386.85-3.063 78.182-8.426 165.18-60.503 228.13-48.027 50.357-122.79 50.053-187.07 59.002-90.555 4.655-184.35-16.146-261.78-64.198-64.776-37.94-95.73-113.48-97.279-186.02-8.39-79.875 26.392-153.81 51.62-227.16 7.47-82.761 9.413-166.25 9.653-249.38-.837-32.195-7.09-63.817-11.546-95.609z" enable-background="accumulate" fill="#262f2f" fill-rule="evenodd" stroke="#000"/>
+ </clipPath>
+ <clipPath id="jw">
+ <path d="M821.64 477.89s22.619-6.507 35.743-5.873c13.123.634 30.642 1.939 43.709 12.186 13.067 10.248 25.068 27.14 34.112 58.37s1.698 99.252-6.176 143.35-28.265 106.11-45 140-49.798 77.495-60.569 89.876c-11.364 13.062-56.206 36.426-79.431 42.267 5.303-10.607 48.9-50.589 35-60.714-14.019-10.212-45.76 45.982-84.293 29.033 21.382-13.132 41.779-51.186 34.041-66.594-7.84-15.61-30.705 48.758-93.536 37.013 30.052-27.527 55.407-70.904 41.263-82.98-14.415-12.307-60.462 54.293-60.462 54.293s-2.822-41.7 13.773-68.607c16.639-26.978 79.653-81.615 99.553-111.7 19.9-30.088 33.613-66.009 42.135-92.518s15.801-77.1 15.801-77.1" enable-background="new" fill="#202020" fill-rule="evenodd" stroke="#000"/>
+ </clipPath>
+ <clipPath id="kp">
+ <path d="m366.89 504.13s-29.554 40.573-47.857 74.286-58.621 126.36-70.357 171.07c-11.759 44.803-62.5 123.57-62.5 123.57l76.071 18.214s11.807-12.823 31.071-46.071 60.357-138.57 60.357-138.57l13.214-202.5z" enable-background="accumulate" fill="#0f0f0f" fill-rule="evenodd" stroke="#000"/>
+ </clipPath>
+ <clipPath id="eo">
+ <path d="M569.03 1018.8c-4.286.714-27.628 3.618-57.857 10s-99.775 25.962-142.86 35.714-117.26 34.816-156.91 27.265c-39.648-7.55-89.516-64.408-89.516-64.408l4.286-94.286s85.886-16.201 112.14-33.571c26.257-17.37 45.582-49.666 59.286-71.429s32.857-71.429 32.857-71.429l238.57 262.14z" enable-background="accumulate" fill="#0b0b0b" fill-rule="evenodd" stroke="#000"/>
+ </clipPath>
+ <filter id="kc" x="-.353" y="-.182" width="1.706" height="1.363">
+ <feGaussianBlur stdDeviation="48.038"/>
+ </filter>
+ <filter id="jb" x="-.611" y="-.149" width="2.223" height="1.299">
+ <feGaussianBlur stdDeviation="37.83"/>
+ </filter>
+ <filter id="eg" x="-.235" y="-.245" width="1.47" height="1.49">
+ <feGaussianBlur stdDeviation="58.328"/>
+ </filter>
+ <filter id="jy" x="-.205" y="-.29" width="1.409" height="1.58">
+ <feGaussianBlur stdDeviation="22.3"/>
+ </filter>
+ <filter id="jv" x="-.344" y="-.184" width="1.688" height="1.369">
+ <feGaussianBlur stdDeviation="34.542"/>
+ </filter>
+ <filter id="kf" x="-.274" y="-.213" width="1.549" height="1.427">
+ <feGaussianBlur stdDeviation="11.314"/>
+ </filter>
+ <filter id="ja" x="-.259" y="-.224" width="1.518" height="1.447">
+ <feGaussianBlur stdDeviation="19.632"/>
+ </filter>
+ <filter id="kq" x="-.325" y="-.19" width="1.651" height="1.38">
+ <feGaussianBlur stdDeviation="28.713"/>
+ </filter>
+ <filter id="ko" x="-.381" y="-.175" width="1.762" height="1.35">
+ <feGaussianBlur stdDeviation="19.304"/>
+ </filter>
+ <filter id="kv" x="-.211" y="-.168" width="1.422" height="1.336">
+ <feGaussianBlur stdDeviation="8.369"/>
+ </filter>
+ <filter id="ks" x="-.187" y="-.236" width="1.374" height="1.473">
+ <feGaussianBlur stdDeviation="31.212"/>
+ </filter>
+ <clipPath id="ju">
+ <path d="M352.25 211.99c-3.804-25.264-16.81-50.638-17.157-75.525-.186-13.356 3.273-26.571 13.756-39.554 36.347-65.296 116.94-84.695 185.93-91.465 86.922-11.017 184.91 17.94 233.37 95.401 54.124 75.733 56.675 172.54 80.612 259.53 29.438 127.13 54.779 256.21 60.392 386.85-3.063 78.182-8.426 165.18-60.503 228.13-48.027 50.357-122.79 50.053-187.07 59.002-90.555 4.655-184.35-16.146-261.78-64.198-64.776-37.94-95.73-113.48-97.279-186.02-8.39-79.875 26.392-153.81 51.62-227.16 7.47-82.761 9.413-166.25 9.653-249.38-.837-32.195-7.09-63.817-11.546-95.609z" enable-background="accumulate" fill="#262f2f" fill-rule="evenodd" stroke="#000"/>
+ </clipPath>
+ <filter id="ki" x="-.252" y="-.053" width="1.503" height="1.106">
+ <feGaussianBlur stdDeviation="13.025"/>
+ </filter>
+ <linearGradient id="t" x1="603.84" x2="616.24" y1="627.85" y2="585.43" gradientTransform="translate(450.03 73.844)" gradientUnits="userSpaceOnUse">
+ <stop stop-color="#1a1a1a" offset="0"/>
+ <stop stop-color="#1a1a1a" stop-opacity="0" offset="1"/>
+ </linearGradient>
+ <filter id="dq" x="-.329" y="-.182" width="1.657" height="1.364">
+ <feGaussianBlur stdDeviation="20.913"/>
+ </filter>
+ <filter id="dr" x="-.555" y="-.514" width="2.109" height="2.029">
+ <feGaussianBlur stdDeviation="20.913"/>
+ </filter>
+ <filter id="cy" x="-.326" y="-.845" width="1.653" height="2.691">
+ <feGaussianBlur stdDeviation="21.92"/>
+ </filter>
+ <filter id="he" x="-.409" y="-.715" width="1.818" height="2.431">
+ <feGaussianBlur stdDeviation="21.92"/>
+ </filter>
+ <filter id="o">
+ <feGaussianBlur stdDeviation="8.881"/>
+ </filter>
+ <clipPath id="jt">
+ <path d="M647.61 540.05s22.619-6.507 35.743-5.873c13.123.634 30.642 1.939 43.709 12.186 13.067 10.248 25.068 27.14 34.112 58.37s1.698 99.252-6.176 143.35-28.265 106.11-45 140-49.798 77.495-60.569 89.876c-11.364 13.062-56.206 36.426-79.431 42.267 5.303-10.607 48.9-50.589 35-60.714-14.019-10.212-45.76 45.982-84.293 29.033 21.382-13.132 41.779-51.186 34.041-66.594-7.84-15.61-30.705 48.758-93.536 37.013 30.052-27.527 55.407-70.904 41.263-82.98-14.415-12.307-60.462 54.293-60.462 54.293s-2.822-41.7 13.773-68.607c16.639-26.978 79.653-81.615 99.553-111.7 19.9-30.088 33.613-66.009 42.135-92.518s15.801-77.1 15.801-77.1" enable-background="new" fill="#202020" fill-rule="evenodd"/>
+ </clipPath>
+ <filter id="je" x="-.277" y="-.325" width="1.554" height="1.65">
+ <feGaussianBlur stdDeviation="19.956"/>
+ </filter>
+ <clipPath id="e">
+ <path d="M760.16 935.83c6.794 18.903 10.494 33.3 11.89 51.212 1.397 17.912-3.783 51.801-2.9 70.656.881 18.845 8.133 40.099 27.345 48.969 19.419 8.966 49.319 10.211 74.12-3.146 24.8-13.357 57.4-70.326 70.974-97.309 13.624-27.084 38.76-114.5 44.66-149.77 5.9-35.27 2.551-41.3-4.617-49.055 2.64-27.84-1.5-54.935 13.11-87.186-30.249 11.826-37.382 40.161-48.319 65.505-8-50.933.21-71.273 3.319-101.22-29.065 14.778-42.862 47.114-45 92.857-10.924-1.304-21.391-4.434-33.571-.714-.264-46.023-1.464-76.889 8.91-114.21-53.254 21.027-62.946 106.59-56.052 112.78-10.883.535-21.371-1.297-32.857 2.857.638-42.57-.261-84.909-30-122.86 0 0-30.958 80.922-31.43 103.57s9.452 40.166 9.452 40.166-8.568 36.741-6.298 58.232c2.295 21.741 20.443 59.676 27.265 78.658z" enable-background="new" fill="#ada469" fill-rule="evenodd"/>
+ </clipPath>
+ <clipPath id="kr">
+ <path d="m366.89 504.13s-29.554 40.573-47.857 74.286-58.621 126.36-70.357 171.07c-11.759 44.803-62.5 123.57-62.5 123.57l76.071 18.214s11.807-12.823 31.071-46.071 60.357-138.57 60.357-138.57l13.214-202.5z" enable-background="accumulate" fill="#0f0f0f" fill-rule="evenodd"/>
+ </clipPath>
+ <clipPath id="am">
+ <path d="M586.13 997.99c6.794 18.903 10.494 33.3 11.89 51.212 1.397 17.912-3.783 51.801-2.9 70.656.881 18.845 8.133 40.099 27.345 48.969 19.419 8.966 49.319 10.211 74.12-3.146 24.8-13.357 57.4-70.326 70.974-97.309 13.624-27.084 38.76-114.5 44.66-149.77 5.9-35.27 2.551-41.3-4.617-49.055 2.64-27.84-1.5-54.935 13.11-87.186-30.249 11.826-37.382 40.161-48.319 65.505-8-50.933.21-71.273 3.319-101.22-29.065 14.778-42.862 47.114-45 92.857-10.924-1.304-21.391-4.434-33.571-.714-.264-46.023-1.464-76.889 8.91-114.21-53.254 21.027-62.946 106.59-56.052 112.78-10.883.535-21.371-1.297-32.857 2.857.638-42.57-.261-84.909-30-122.86 0 0-30.958 80.922-31.43 103.57s9.452 40.166 9.452 40.166-8.568 36.741-6.298 58.232c2.295 21.741 20.443 59.676 27.265 78.658z" enable-background="new" fill="#ada469" fill-rule="evenodd"/>
+ </clipPath>
+ <filter id="ds">
+ <feGaussianBlur stdDeviation="10.893"/>
+ </filter>
+ <filter id="bz" x="-.495" y="-.267" width="1.99" height="1.534">
+ <feGaussianBlur stdDeviation="10.731"/>
+ </filter>
+ <filter id="by" x="-.406" y="-.303" width="1.812" height="1.605">
+ <feGaussianBlur stdDeviation="9.859"/>
+ </filter>
+ <clipPath id="cj">
+ <path d="M586.13 997.99c6.794 18.903 10.494 33.3 11.89 51.212 1.397 17.912-3.783 51.801-2.9 70.656.881 18.845 8.133 40.099 27.345 48.969 19.419 8.966 49.319 10.211 74.12-3.146 24.8-13.357 57.4-70.326 70.974-97.309 13.624-27.084 38.76-114.5 44.66-149.77 5.9-35.27 2.551-41.3-4.617-49.055 2.64-27.84-1.5-54.935 13.11-87.186-30.249 11.826-37.382 40.161-48.319 65.505-8-50.933.21-71.273 3.319-101.22-29.065 14.778-42.862 47.114-45 92.857-10.924-1.304-21.391-4.434-33.571-.714-.264-46.023-1.464-76.889 8.91-114.21-53.254 21.027-62.946 106.59-56.052 112.78-10.883.535-21.371-1.297-32.857 2.857.638-42.57-.261-84.909-30-122.86 0 0-30.958 80.922-31.43 103.57s9.452 40.166 9.452 40.166-8.568 36.741-6.298 58.232c2.295 21.741 20.443 59.676 27.265 78.658z" enable-background="new" fill="#ada469" fill-rule="evenodd"/>
+ </clipPath>
+ <filter id="il">
+ <feGaussianBlur stdDeviation="3.616"/>
+ </filter>
+ <filter id="ir">
+ <feGaussianBlur stdDeviation="3.864"/>
+ </filter>
+ <clipPath id="ku">
+ <path d="M569.03 1018.8c-4.286.714-27.628 3.618-57.857 10s-57.314 4.966-135.79 17.33c-79.852 12.581-94.064 42.542-108.12 47.064-14.7 4.729-145.38-65.822-145.38-65.822l4.286-94.286s85.886-16.201 112.14-33.571c26.257-17.37 45.582-49.666 59.286-71.429s32.857-71.429 32.857-71.429l238.57 262.14z" enable-background="accumulate" fill="#292929" fill-rule="evenodd" stroke="#000"/>
+ </clipPath>
+ <linearGradient id="dt" x1="699.33" x2="698.98" y1="269.77" y2="346.14" gradientUnits="userSpaceOnUse">
+ <stop stop-color="#fff" offset="0"/>
+ <stop offset="1"/>
+ </linearGradient>
+ <mask id="jc" maskUnits="userSpaceOnUse">
+ <ellipse transform="translate(-174.03 62.156)" cx="579.47" cy="260.58" rx="192.69" ry="164.05" enable-background="accumulate" fill="url(#dt)"/>
+ </mask>
+ <clipPath id="is">
+ <path d="m266.27 924.57c-1.407 18.801-1.145 32.751 2.082 49.303s16.406 45.907 20.334 63.184c3.926 17.267 2.694 38.31-12.46 51.148-15.317 12.977-42.05 21.599-67.831 15.734s-69.55-49.223-88.59-70.228c-19.112-21.083-63.761-93.851-77.94-124.28-14.177-30.425-12.66-36.719-8.119-45.53-9.367-24.52-12.414-50.067-33.712-75.577 30.325 3.114 43.88 26.956 60.126 47.14-5.53-48.076-18.055-64.416-28.374-90.724 29.994 6.082 50.579 31.872 63.98 72.712 9.554-3.918 18.238-9.373 30.187-9.061-11.298-41.696-17.949-69.916-36.687-101.07 53.442 5.67 83.657 80.639 78.971 87.96 9.978-2.243 19.006-6.53 30.437-5.65-11.249-38.348-21.048-76.869-3.66-118.65 0 0 48.287 65.436 54.39 85.805 6.103 20.37 1.52 38.701 1.52 38.701s16.96 31.085 20.293 51.094c3.373 20.241-3.533 59.103-4.946 77.983z" enable-background="new" fill="#ada469" fill-rule="evenodd"/>
+ </clipPath>
+ <clipPath id="im">
+ <path d="M760.16 935.83c6.794 18.903 10.494 33.3 11.89 51.212 1.397 17.912-3.783 51.801-2.9 70.656.881 18.845 8.133 40.099 27.345 48.969 19.419 8.966 49.319 10.211 74.12-3.146 24.8-13.357 57.4-70.326 70.974-97.309 13.624-27.084 38.76-114.5 44.66-149.77 5.9-35.27 2.551-41.3-4.617-49.055 2.64-27.84-1.5-54.935 13.11-87.186-30.249 11.826-37.382 40.161-48.319 65.505-8-50.933.21-71.273 3.319-101.22-29.065 14.778-42.862 47.114-45 92.857-10.924-1.304-21.391-4.434-33.571-.714-.264-46.023-1.464-76.889 8.91-114.21-53.254 21.027-62.946 106.59-56.052 112.78-10.883.535-21.371-1.297-32.857 2.857.638-42.57-.261-84.909-30-122.86 0 0-30.958 80.922-31.43 103.57s9.452 40.166 9.452 40.166-8.568 36.741-6.298 58.232c2.295 21.741 20.443 59.676 27.265 78.658z" enable-background="new" fill="#ada469" fill-rule="evenodd" stroke="#000"/>
+ </clipPath>
+ <filter id="jq" x="-.088" y="-.177" width="1.176" height="1.355">
+ <feGaussianBlur stdDeviation="16.34"/>
+ </filter>
+ <filter id="i">
+ <feTurbulence baseFrequency=".24" numOctaves="10" result="result0" seed="655" type="fractalNoise"/>
+ <feDisplacementMap in="SourceGraphic" in2="result0" scale="62" xChannelSelector="B" yChannelSelector="G"/>
+ </filter>
+ <filter id="em">
+ <feGaussianBlur stdDeviation="2.04"/>
+ </filter>
+ <clipPath id="jo">
+ <path d="m709.29 844.51c54.286-1.429 126.04-15.052 170-26.786 44.053-11.757 125.89-36.347 175.36-57.857 49.339-21.453 113.6-59.282 154.29-92.143 40.508-32.721 52.39-55.82 60.714-33.571 8.37 22.368-16.407 56.326-37.857 81.071-21.604 24.923-52.731 52.705-98.929 89.286s-156.08 101.58-212.86 128.57c-57.066 27.125-128.2 58.238-172.14 72.5s-131.43 31.071-131.43 31.071l92.857-192.14z" enable-background="accumulate" fill="#121212" fill-rule="evenodd"/>
+ </clipPath>
+ <clipPath id="jp">
+ <path d="m709.29 844.51c54.286-1.429 126.04-15.052 170-26.786 44.053-11.757 125.89-36.347 175.36-57.857 49.339-21.453 113.6-59.282 154.29-92.143 40.508-32.721 52.39-55.82 60.714-33.571 8.37 22.368-16.407 56.326-37.857 81.071-21.604 24.923-52.731 52.705-98.929 89.286s-156.08 101.58-212.86 128.57c-57.066 27.125-128.2 58.238-172.14 72.5s-131.43 31.071-131.43 31.071l92.857-192.14z" enable-background="accumulate" fill="#121212" fill-rule="evenodd"/>
+ </clipPath>
+ <clipPath id="js">
+ <path d="m709.29 844.51c54.286-1.429 126.04-15.052 170-26.786 44.053-11.757 125.89-36.347 175.36-57.857 49.339-21.453 113.6-59.282 154.29-92.143 40.508-32.721 52.39-55.82 60.714-33.571 8.37 22.368-16.407 56.326-37.857 81.071-21.604 24.923-52.731 52.705-98.929 89.286s-156.08 101.58-212.86 128.57c-57.066 27.125-128.2 58.238-172.14 72.5s-131.43 31.071-131.43 31.071l92.857-192.14z" enable-background="accumulate" fill="#121212" fill-rule="evenodd"/>
+ </clipPath>
+ <clipPath id="ke">
+ <path d="M178.21 274.15c-3.804-25.264-16.81-50.638-17.157-75.525-.186-13.356 3.273-26.571 13.756-39.554 36.347-65.296 116.94-84.695 185.93-91.465 86.922-11.017 184.91 17.94 233.37 95.401 54.124 75.733 56.675 172.54 80.612 259.53 29.438 127.13 54.779 256.21 60.392 386.85-3.063 78.182-8.426 165.18-60.503 228.13-48.027 50.357-122.79 50.053-187.07 59.002-90.555 4.655-184.35-16.146-261.78-64.198-64.776-37.94-95.73-113.48-97.279-186.02-8.39-79.875 26.392-153.81 51.62-227.16 7.47-82.761 9.413-166.25 9.653-249.38-.837-32.195-7.09-63.817-11.546-95.609z" enable-background="accumulate" fill="#262f2f" fill-rule="evenodd"/>
+ </clipPath>
+ <filter id="iv" x="-.243" y="-.391" width="1.487" height="1.782">
+ <feGaussianBlur stdDeviation="14.59"/>
+ </filter>
+ <filter id="iu" x="-.146" y="-.235" width="1.292" height="1.47">
+ <feGaussianBlur stdDeviation="4.444"/>
+ </filter>
+ <filter id="it">
+ <feGaussianBlur stdDeviation=".606"/>
+ </filter>
+ <filter id="ix">
+ <feGaussianBlur stdDeviation="6.589"/>
+ </filter>
+ <filter id="iw">
+ <feGaussianBlur stdDeviation="1.505"/>
+ </filter>
+ <filter id="jj" x="-.103" y="-.342" width="1.206" height="1.685">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="jf" x="-.098" y="-.198" width="1.197" height="1.395">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="jh" x="-.098" y="-.198" width="1.196" height="1.397">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="ji" x="-.099" y="-.226" width="1.198" height="1.453">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="hy" x="-.099" y="-.225" width="1.198" height="1.451">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="hu" x="-.105" y="-.405" width="1.209" height="1.809">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="hv" x="-.103" y="-.364" width="1.207" height="1.729">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="hw" x="-.102" y="-.324" width="1.204" height="1.647">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="hx" x="-.101" y="-.274" width="1.201" height="1.548">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="hz" x="-.098" y="-.209" width="1.197" height="1.417">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="ia" x="-.098" y="-.203" width="1.197" height="1.406">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="ib" x="-.098" y="-.198" width="1.196" height="1.397">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="jg">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="jk">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="hr" x="-.031" y="-.103" width="1.062" height="1.205">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="hq">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="hp">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="hn">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="hm">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="hl">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="hk">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="hj">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="hi">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="hh">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="hf" x="-.031" y="-.121" width="1.063" height="1.243">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="hg" x="-.031" y="-.109" width="1.062" height="1.219">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="hs">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="ho">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="ht">
+ <feGaussianBlur stdDeviation="1.723"/>
+ </filter>
+ <clipPath id="jl">
+ <path transform="translate(.08 -.031)" d="M1111.4-285.94l-3.937 1.875c-.041.01-.1.02-.125.031-.42.213-.165.1-.657.313-.486.21-1.737.584-4.093 1.469-3.332 1.25-5.805 2.15-7 3.062-1.537.021-3.72.233-5.657.719a227.677 227.677 0 0 1-6.75 1.594c-1.895.42-1.675.642-2.875.875-1.296.251-1.721-.01-5.437.78-3.49.743-8.895 1.932-10.156 2.688-1.584-.18-3.868-.321-5.844-.03-3.04.446-4.916.672-6.844.905-.655.08-1.04.201-1.344.282-.426.131-.685.26-1.375.343-1.311.16-1.762-.156-5.53.282-3.555.413-9.006 1.272-10.25 1.937-1.6-.297-3.859-.534-5.845-.344-3.058.294-4.972.484-6.906.657-1.934.172-1.688.422-2.906.53-1.316.119-1.76-.163-5.531.25-3.542.39-9.008 1.21-10.281 1.876-1.6-.295-3.887-.507-5.875-.313-3.059.3-4.941.48-6.875.657-.658.06-1.04.178-1.344.25-.428.119-.683.218-1.375.28-1.316.121-1.76-.194-5.531.22-3.556.39-9.006 1.239-10.25
+1.906-1.599-.294-3.86-.524-5.844-.313-3.056.326-4.974.527-6.906.719s-1.69.44-2.906.563c-1.315.131-1.763-.165-5.532.28-3.539.42-8.977 1.293-10.25 1.97-1.597-.281-3.86-.42-5.843-.188-3.052.358-4.945.568-6.875.781-.657.073-1.041.173-1.344.25-.427.128-.685.268-1.375.344-1.314.146-1.768-.174-5.531.313-3.55.458-8.979 1.419-10.22 2.125-1.593-.245-3.833-.382-5.812-.125-3.048.394-4.95.648-6.875.906-1.924.258-1.726.493-2.937.656-1.31.176-1.748-.104-5.5.469-3.525.538-8.924 1.699-10.188 2.437-1.588-.203-3.846-.254-5.813.094-3.026.536-4.899.862-6.812 1.188-.651.11-1.014.27-1.313.375-.42.164-.663.33-1.344.468-1.294.262-1.727-.006-5.437.813-3.499.772-8.846 2.383-10.062 3.219-1.563-.078-3.758.085-5.688.593-2.972.783-4.817 1.232-6.687 1.75s-1.667.768-2.844 1.094c-1.273.353-1.697.107-5.344 1.188-3.425 1.014-8.65 2.933-9.875 3.843-1.539.013-3.72.273-5.625.875-2.93.928-4.75 1.459-6.594
+2.063-.626.205-.991.393-1.28.531-.408.214-.654.409-1.313.625-1.255.412-1.687.19-5.282 1.438-3.39 1.177-8.595 3.213-9.78 4.156-1.525.06-3.65.395-5.532 1.062-2.897 1.029-4.699 1.676-6.531 2.313-1.832.637-1.628.848-2.781 1.25-1.247.434-1.664.2-5.22 1.562-3.338 1.28-8.486 3.483-9.687 4.47-1.507.107-3.635.498-5.5 1.218a1047.26 1047.26 0 0 1-6.437 2.469c-.617.233-.997.442-1.281.594v.03l-8 3.188 1.812 14.72c-.258-.062 6.188 3.312 6.188 3.312.226-.145.449-.273.718-.375 1.08-.41 2.172-.216 6-1.688 3.829-1.471 5.224-2.005 5.907-2.406.68-.4 1.611-.88 2.218-1.531 1.827-.138 3.571-.493 4.938-1 2.968-1.1 4.875-1.806 6.781-2.469 1.906-.662 2.354-1.415 3.406-1.781 1.092-.38 2.195-.166 6.063-1.531 3.867-1.366 5.283-1.827 5.969-2.22.7-.4 1.7-.932 2.312-1.593 1.97-.055 3.817-.385 5.281-.875 3.002-1.005 4.927-1.622 6.844-2.25 1.539-.504 2.174-1.047 2.906-1.437.23-.135.476-.254.75-.344 1.099-.36 2.182-.082
+6.094-1.313 3.912-1.23 5.366-1.673 6.063-2.03.694-.358 1.63-.794 2.25-1.407 1.865-.023 3.635-.267 5.03-.688 3.031-.913 4.993-1.43 6.938-1.968 1.945-.539 2.427-1.265 3.5-1.563 1.114-.31 2.22.007 6.188-1.031 3.967-1.039 5.417-1.433 6.125-1.75.735-.33 1.814-.754 2.437-1.375 1.998.116 3.858-.02 5.344-.375 3.078-.735 5.084-1.101 7.063-1.5 1.588-.32 2.244-.79 3-1.094a3.4 3.4 0 0 1 .75-.25c1.133-.23 2.304.209 6.343-.5 4.04-.709 5.5-.927 6.22-1.187.715-.26 1.704-.568 2.343-1.094 1.924.24 3.748.224 5.188 0 3.126-.488 5.154-.7 7.156-.969 2.001-.268 2.489-.945 3.594-1.094 1.146-.154 2.275.302 6.343-.219 4.068-.52 5.56-.695 6.282-.937.737-.247 1.798-.586 2.437-1.125 2.05.336 3.974.398 5.5.219 3.142-.37 5.18-.56 7.188-.782 1.61-.178 2.264-.608 3.03-.843.242-.086.495-.156.782-.188 1.15-.127 2.301.347 6.375-.125s5.559-.61 6.281-.844c.72-.232 1.7-.473 2.344-.968 1.936.333 3.77.404 5.219.25 3.146-.335
+5.177-.519 7.187-.719 2.01-.2 2.484-.826 3.594-.938 1.151-.115 2.297.366 6.375-.062s5.589-.562 6.313-.781c.739-.224 1.795-.514 2.437-1.031 2.057.398 4.002.493 5.531.343 3.149-.308 5.176-.473 7.188-.656 1.614-.147 2.263-.56 3.031-.781.241-.081.494-.13.781-.156 1.152-.106 2.293.392 6.375 0s5.59-.531 6.313-.75c.72-.219 1.7-.448 2.343-.938 1.939.35 3.77.454 5.22.313 3.148-.309 5.175-.474 7.187-.657 2.011-.183 2.514-.838 3.625-.937 1.152-.103 2.292.385 6.375 0s5.588-.501 6.312-.719c.74-.222 1.796-.514 2.438-1.031 2.057.402 4.003.503 5.531.344 3.147-.329 5.177-.523 7.187-.72 1.613-.156 2.266-.63 3.032-.874.24-.088.463-.122.75-.156 1.148-.14 2.316.34 6.375-.25 4.058-.59 5.562-.778 6.281-1.032.717-.253 1.675-.558 2.312-1.093 1.92.212 3.72.151 5.157-.094 3.119-.533 5.111-.929 7.093-1.313 1.983-.383 2.475-1.04 3.563-1.28 1.129-.251 2.27.115 6.25-.876s5.43-1.42 6.125-1.781c.722-.376 1.762-.87
+2.375-1.531 1.963-.012 3.794-.291 5.219-.844 2.95-1.145 4.873-1.87 6.687-2.75 1.456-.706 2.32-1.702 2.531-2 .213-.298.1-.729.125-.75.043-.035.34-.094.5-.437.86-1.848 2.324-5.628 2.438-6.313.114-.682.168-1.353.219-1.75.029-.23-.147-.879-.125-.937.03-.082.288-.251.343-.5.267-1.199.09-2.208-.125-3.625-.213-1.418-.971-4.615-1.625-5.47-.658-.861-1.224-1.01-1.75-1z" enable-background="new" fill="#bcb786" fill-rule="evenodd"/>
+ </clipPath>
+ <filter id="kk" x="-.082" y="-.227" width="1.163" height="1.453">
+ <feGaussianBlur stdDeviation="2.437"/>
+ </filter>
+ <filter id="kj" x="-.041" y="-.113" width="1.082" height="1.227">
+ <feGaussianBlur stdDeviation="1.219"/>
+ </filter>
+ <clipPath id="kl">
+ <path d="M1049.2-282.27l-.09.008c-1.387.884-6.603 1.607-6.629 9.523-.024 7.426 15.013 17.091 17.156 18.094 1.73.81 3.592 1.41 5.406 1.72l1.438.218c1.92.212 3.72.151 5.156-.094 3.12-.532 5.112-.928 7.094-1.312 1.982-.384 2.474-1.04 3.562-1.281 1.129-.251 2.27.116 6.25-.875 3.98-.992 5.43-1.42 6.125-1.782.723-.376 1.762-.87 2.375-1.53 1.963-.013 3.794-.292 5.219-.845 2.951-1.144 4.873-1.869 6.688-2.75 1.455-.706 2.319-1.702 2.53-2 .213-.298.1-.728.126-.75.043-.035.34-.094.5-.437.859-1.847 2.323-5.628 2.437-6.313.114-.682.168-1.352.219-1.75.029-.23-.147-.879-.125-.937.031-.082.288-.25.344-.5.266-1.198.089-2.208-.125-3.625-.214-1.418-.972-4.615-1.625-5.469-.42-.548-.8-.792-1.157-.906-.067-.017-.123-.047-.187-.063-.021-.004-.042.003-.062 0-.312-.075-.609-.158-1.156-.218-.986-.109-2.425-.26-3.969-.25-.515.003-1.037.047-1.563.093-3.558.313-9.01.991-10.218
+1.625-1.634-.334-3.949-.612-5.938-.468-3.064.22-4.968.342-6.906.468-1.939.127-1.686.389-2.906.469-1.32.087-1.787-.223-5.563.094-3.546.297-8.98.993-10.22 1.625-1.632-.335-3.945-.613-5.937-.469-3.064.221-4.967.373-6.906.5-.659.043-1.042.124-1.344.187z" enable-background="new" fill="#bcb786" fill-rule="evenodd" opacity=".824"/>
+ </clipPath>
+ <filter id="km" x="-.022" width="1.044">
+ <feGaussianBlur stdDeviation=".575"/>
+ </filter>
+ <clipPath id="kn">
+ <path d="M205.47-408.97l-.09.002c-1.446.786-6.7 1.143-7.276 9.039-.542 7.405 13.786 18.096 15.854 19.245 1.67.927 3.484 1.655 5.273 2.09l1.419.32c1.9.344 3.7.41 5.15.265 3.149-.314 5.164-.57 7.168-.815 2.004-.245 2.54-.865 3.643-1.03 1.144-.172 2.257.274 6.296-.438s5.515-1.038 6.234-1.35c.747-.325 1.818-.746 2.476-1.362 1.96.124 3.805-.026 5.265-.479 3.024-.936 4.991-1.525 6.863-2.277 1.501-.603 2.432-1.536 2.664-1.819.233-.282.15-.72.177-.74.045-.032.346-.07.53-.4.985-1.784 2.709-5.453 2.87-6.128.162-.673.263-1.338.34-1.73.046-.228-.085-.888-.059-.945.037-.08.305-.23.378-.475.35-1.177.243-2.195.128-3.625-.115-1.429-.648-4.67-1.24-5.568-.38-.577-.742-.846-1.09-.985-.066-.022-.12-.055-.183-.075-.02-.005-.042 0-.063-.004-.305-.097-.596-.2-1.138-.299-.975-.176-2.4-.428-3.942-.526a19.346 19.346 0 0
+0-1.565-.015c-3.572.064-9.057.361-10.307.91-1.606-.448-3.896-.886-5.89-.882-3.072.007-4.98-.005-6.922-.013-1.943-.01-1.71.27-2.932.265-1.322-.005-1.767-.347-5.556-.294-3.558.05-9.028.365-10.307.91-1.606-.448-3.893-.887-5.89-.882-3.072.007-4.982.027-6.924.018-.661-.003-1.049.05-1.354.093z" enable-background="new" fill="#bcb786" fill-rule="evenodd" opacity=".824"/>
+ </clipPath>
+ <linearGradient id="w" x1="774.98" x2="755.12" y1="-211.87" y2="-202.68" gradientTransform="translate(-19.092 4.243)" gradientUnits="userSpaceOnUse" xlink:href="#n"/>
+ <mask id="jm" maskUnits="userSpaceOnUse">
+ <path d="m718.41-224.31l33.25 56 276-24 159.5-48.25-66.5-82.75-402.25 99z" fill="url(#w)" fill-rule="evenodd"/>
+ </mask>
+ <clipPath id="kg">
+ <path d="M734.03 519.49s16.755 37.018 28.701 53.954 52.727 56.046 52.727 56.046l.597-138.59" enable-background="accumulate" fill="#1a1a1a" fill-rule="evenodd" stroke="#000"/>
+ </clipPath>
+ <filter id="jn">
+ <feGaussianBlur stdDeviation="10.662"/>
+ </filter>
+ <filter id="ip">
+ <feGaussianBlur stdDeviation="7.18"/>
+ </filter>
+ <clipPath id="iq">
+ <path d="m266.27 924.57c-1.407 18.801-1.145 32.751 2.082 49.303 3.226 16.552 16.406 45.907 20.334 63.184 3.926 17.267 2.694 38.31-12.46 51.148-15.317 12.978-42.05 21.599-67.831 15.734s-69.55-49.223-88.59-70.228c-19.112-21.083-63.761-93.851-77.94-124.28-14.177-30.425-12.66-36.719-8.119-45.53-9.367-24.52-12.414-50.067-33.712-75.577 30.325 3.114 43.88 26.956 60.126 47.14-5.53-48.076-18.055-64.417-28.374-90.724 29.994 6.082 50.579 31.872 63.98 72.712 9.554-3.918 18.238-9.373 30.187-9.061-11.298-41.696-17.949-69.916-36.687-101.07 53.442 5.67 83.657 80.639 78.971 87.96 9.978-2.243 19.006-6.53 30.437-5.65-11.249-38.348-21.048-76.869-3.66-118.65 0 0 48.287 65.436 54.39 85.805 6.103 20.37 1.52 38.701 1.52 38.701s16.96 31.085 20.293 51.094c3.373 20.241-3.533 59.103-4.946 77.983z" enable-background="new" fill="#ada469" fill-rule="evenodd"/>
+ </clipPath>
+ <filter id="in">
+ <feGaussianBlur stdDeviation="6.82"/>
+ </filter>
+ <clipPath id="io">
+ <path d="m266.27 924.57c-1.407 18.801-1.145 32.751 2.082 49.303 3.226 16.552 16.406 45.907 20.334 63.184 3.926 17.267 2.694 38.31-12.46 51.148-15.317 12.978-42.05 21.599-67.831 15.734s-69.55-49.223-88.59-70.228c-19.112-21.083-63.761-93.851-77.94-124.28-14.177-30.425-12.66-36.719-8.119-45.53-9.367-24.52-12.414-50.067-33.712-75.577 30.325 3.114 43.88 26.956 60.126 47.14-5.53-48.076-18.055-64.417-28.374-90.724 29.994 6.082 50.579 31.872 63.98 72.712 9.554-3.918 18.238-9.373 30.187-9.061-11.298-41.696-17.949-69.916-36.687-101.07 53.442 5.67 83.657 80.639 78.971 87.96 9.978-2.243 19.006-6.53 30.437-5.65-11.249-38.348-21.048-76.869-3.66-118.65 0 0 48.287 65.436 54.39 85.805 6.103 20.37 1.52 38.701 1.52 38.701s16.96 31.085 20.293 51.094c3.373 20.241-3.533 59.103-4.946 77.983z" enable-background="new" fill="#ada469" fill-rule="evenodd"/>
+ </clipPath>
+ <filter id="ij" x="-.144" y="-.103" width="1.288" height="1.206">
+ <feGaussianBlur stdDeviation="7.389"/>
+ </filter>
+ <clipPath id="ik">
+ <path d="M760.16 935.83c6.794 18.903 10.494 33.3 11.89 51.212 1.397 17.912-3.783 51.801-2.9 70.656.881 18.845 8.133 40.099 27.345 48.969 19.419 8.966 49.319 10.211 74.12-3.146 24.8-13.357 57.4-70.326 70.974-97.309 13.624-27.084 38.76-114.5 44.66-149.77 5.9-35.27 2.551-41.3-4.617-49.055 2.64-27.84-1.5-54.935 13.11-87.186-30.249 11.826-37.382 40.161-48.319 65.505-8-50.933.21-71.273 3.319-101.22-29.065 14.778-42.862 47.114-45 92.857-10.924-1.304-21.391-4.434-33.571-.714-.264-46.023-1.464-76.889 8.91-114.21-53.254 21.027-62.946 106.59-56.052 112.78-10.883.535-21.371-1.297-32.857 2.857.638-42.57-.261-84.909-30-122.86 0 0-30.958 80.922-31.43 103.57s9.452 40.166 9.452 40.166-8.568 36.741-6.298 58.232c2.295 21.741 20.443 59.676 27.265 78.658z" enable-background="new" fill="#ada469" fill-rule="evenodd"/>
+ </clipPath>
+ <filter id="ih" x="-.09" y="-.103" width="1.181" height="1.205">
+ <feGaussianBlur stdDeviation="5.346"/>
+ </filter>
+ <clipPath id="ii">
+ <path d="M760.16 935.83c6.794 18.903 10.494 33.3 11.89 51.212 1.397 17.912-3.783 51.801-2.9 70.656.881 18.845 8.133 40.099 27.345 48.969 19.419 8.966 49.319 10.211 74.12-3.146 24.8-13.357 57.4-70.326 70.974-97.309 13.624-27.084 38.76-114.5 44.66-149.77 5.9-35.27 2.551-41.3-4.617-49.055 2.64-27.84-1.5-54.935 13.11-87.186-30.249 11.826-37.382 40.161-48.319 65.505-8-50.933.21-71.273 3.319-101.22-29.065 14.778-42.862 47.114-45 92.857-10.924-1.304-21.391-4.434-33.571-.714-.264-46.023-1.464-76.889 8.91-114.21-53.254 21.027-62.946 106.59-56.052 112.78-10.883.535-21.371-1.297-32.857 2.857.638-42.57-.261-84.909-30-122.86 0 0-30.958 80.922-31.43 103.57s9.452 40.166 9.452 40.166-8.568 36.741-6.298 58.232c2.295 21.741 20.443 59.676 27.265 78.658z" enable-background="new" fill="#ada469" fill-rule="evenodd"/>
+ </clipPath>
+ <clipPath id="gc">
+ <path d="M569.03 1018.8c-4.286.714-27.628 3.618-57.857 10s-99.775 25.962-142.86 35.714-117.26 34.816-156.91 27.265c-39.648-7.55-89.516-64.408-89.516-64.408l4.286-94.286s85.886-16.201 112.14-33.571c26.257-17.37 45.582-49.666 59.286-71.429s32.857-71.429 32.857-71.429l238.57 262.14z" enable-background="accumulate" fill="#0b0b0b" fill-rule="evenodd" stroke="#000"/>
+ </clipPath>
+ <filter id="gb" x="-.211" y="-.168" width="1.422" height="1.336">
+ <feGaussianBlur stdDeviation="8.369"/>
+ </filter>
+ <clipPath id="fz">
+ <path d="M205.47-408.97l-.09.002c-1.446.786-6.7 1.143-7.276 9.039-.542 7.405 13.786 18.096 15.854 19.245 1.67.927 3.484 1.655 5.273 2.09l1.419.32c1.9.344 3.7.41 5.15.265 3.149-.314 5.164-.57 7.168-.815 2.004-.245 2.54-.865 3.643-1.03 1.144-.172 2.257.274 6.296-.438s5.515-1.038 6.234-1.35c.747-.325 1.818-.746 2.476-1.362 1.96.124 3.805-.026 5.265-.479 3.024-.936 4.991-1.525 6.863-2.277 1.501-.603 2.432-1.536 2.664-1.819.233-.282.15-.72.177-.74.045-.032.346-.07.53-.4.985-1.784 2.709-5.453 2.87-6.128.162-.673.263-1.338.34-1.73.046-.228-.085-.888-.059-.945.037-.08.305-.23.378-.475.35-1.177.243-2.195.128-3.625-.115-1.429-.648-4.67-1.24-5.568-.38-.577-.742-.846-1.09-.985-.066-.022-.12-.055-.183-.075-.02-.005-.042 0-.063-.004-.305-.097-.596-.2-1.138-.299-.975-.176-2.4-.428-3.942-.526a19.346 19.346 0 0
+0-1.565-.015c-3.572.064-9.057.361-10.307.91-1.606-.448-3.896-.886-5.89-.882-3.072.007-4.98-.005-6.922-.013-1.943-.01-1.71.27-2.932.265-1.322-.005-1.767-.347-5.556-.294-3.558.05-9.028.365-10.307.91-1.606-.448-3.893-.887-5.89-.882-3.072.007-4.982.027-6.924.018-.661-.003-1.049.05-1.354.093z" enable-background="new" fill="#bcb786" fill-rule="evenodd" opacity=".824"/>
+ </clipPath>
+ <filter id="fy" x="-.022" width="1.044">
+ <feGaussianBlur stdDeviation=".575"/>
+ </filter>
+ <clipPath id="fx">
+ <path d="M1049.2-282.27l-.09.008c-1.387.884-6.603 1.607-6.629 9.523-.024 7.426 15.013 17.091 17.156 18.094 1.73.81 3.592 1.41 5.406 1.72l1.438.218c1.92.212 3.72.151 5.156-.094 3.12-.532 5.112-.928 7.094-1.312 1.982-.384 2.474-1.04 3.562-1.281 1.129-.251 2.27.116 6.25-.875 3.98-.992 5.43-1.42 6.125-1.782.723-.376 1.762-.87 2.375-1.53 1.963-.013 3.794-.292 5.219-.845 2.951-1.144 4.873-1.869 6.688-2.75 1.455-.706 2.319-1.702 2.53-2 .213-.298.1-.728.126-.75.043-.035.34-.094.5-.437.859-1.847 2.323-5.628 2.437-6.313.114-.682.168-1.352.219-1.75.029-.23-.147-.879-.125-.937.031-.082.288-.25.344-.5.266-1.198.089-2.208-.125-3.625-.214-1.418-.972-4.615-1.625-5.469-.42-.548-.8-.792-1.157-.906-.067-.017-.123-.047-.187-.063-.021-.004-.042.003-.062 0-.312-.075-.609-.158-1.156-.218-.986-.109-2.425-.26-3.969-.25-.515.003-1.037.047-1.563.093-3.558.313-9.01.991-10.218
+1.625-1.634-.334-3.949-.612-5.938-.468-3.064.22-4.968.342-6.906.468-1.939.127-1.686.389-2.906.469-1.32.087-1.787-.223-5.563.094-3.546.297-8.98.993-10.22 1.625-1.632-.335-3.945-.613-5.937-.469-3.064.221-4.967.373-6.906.5-.659.043-1.042.124-1.344.187z" enable-background="new" fill="#bcb786" fill-rule="evenodd" opacity=".824"/>
+ </clipPath>
+ <filter id="fw" x="-.082" y="-.227" width="1.163" height="1.453">
+ <feGaussianBlur stdDeviation="2.437"/>
+ </filter>
+ <filter id="fv" x="-.041" y="-.113" width="1.082" height="1.227">
+ <feGaussianBlur stdDeviation="1.219"/>
+ </filter>
+ <clipPath id="y">
+ <path d="M352.25 211.99c-3.804-25.264-16.81-50.638-17.157-75.525-.186-13.356 3.273-26.571 13.756-39.554 36.347-65.296 116.94-84.695 185.93-91.465 86.922-11.017 184.91 17.94 233.37 95.401 54.124 75.733 56.675 172.54 80.612 259.53 29.438 127.13 54.779 256.21 60.392 386.85-3.063 78.182-8.426 165.18-60.503 228.13-48.027 50.357-122.79 50.053-187.07 59.002-90.555 4.655-184.35-16.146-261.78-64.198-64.776-37.94-95.73-113.48-97.279-186.02-8.39-79.875 26.392-153.81 51.62-227.16 7.47-82.761 9.413-166.25 9.653-249.38-.837-32.195-7.09-63.817-11.546-95.609z" enable-background="accumulate" fill="#262f2f" fill-rule="evenodd" stroke="#000"/>
+ </clipPath>
+ <filter id="fu" x="-.252" y="-.053" width="1.503" height="1.106">
+ <feGaussianBlur stdDeviation="13.025"/>
+ </filter>
+ <clipPath id="fs">
+ <path d="M734.03 519.49s16.755 37.018 28.701 53.954 52.727 56.046 52.727 56.046l.597-138.59" enable-background="accumulate" fill="#1a1a1a" fill-rule="evenodd" stroke="#000"/>
+ </clipPath>
+ <filter id="fr" x="-.274" y="-.213" width="1.549" height="1.427">
+ <feGaussianBlur stdDeviation="11.314"/>
+ </filter>
+ <clipPath id="fq">
+ <path d="M178.21 274.15c-3.804-25.264-16.81-50.638-17.157-75.525-.186-13.356 3.273-26.571 13.756-39.554 36.347-65.296 116.94-84.695 185.93-91.465 86.922-11.017 184.91 17.94 233.37 95.401 54.124 75.733 56.675 172.54 80.612 259.53 29.438 127.13 54.779 256.21 60.392 386.85-3.063 78.182-8.426 165.18-60.503 228.13-48.027 50.357-122.79 50.053-187.07 59.002-90.555 4.655-184.35-16.146-261.78-64.198-64.776-37.94-95.73-113.48-97.279-186.02-8.39-79.875 26.392-153.81 51.62-227.16 7.47-82.761 9.413-166.25 9.653-249.38-.837-32.195-7.09-63.817-11.546-95.609z" enable-background="accumulate" fill="#262f2f" fill-rule="evenodd"/>
+ </clipPath>
+ <filter id="aq">
+ <feGaussianBlur stdDeviation="2.04"/>
+ </filter>
+ <filter id="g">
+ <feTurbulence baseFrequency=".24" numOctaves="10" result="result0" seed="655" type="fractalNoise"/>
+ <feDisplacementMap in="SourceGraphic" in2="result0" scale="62" xChannelSelector="B" yChannelSelector="G"/>
+ </filter>
+ <clipPath id="fp">
+ <path d="M352.25 211.99c-3.804-25.264-16.81-50.638-17.157-75.525-.186-13.356 3.273-26.571 13.756-39.554 36.347-65.296 116.94-84.695 185.93-91.465 86.922-11.017 184.91 17.94 233.37 95.401 54.124 75.733 56.675 172.54 80.612 259.53 29.438 127.13 54.779 256.21 60.392 386.85-3.063 78.182-8.426 165.18-60.503 228.13-48.027 50.357-122.79 50.053-187.07 59.002-90.555 4.655-184.35-16.146-261.78-64.198-64.776-37.94-95.73-113.48-97.279-186.02-8.39-79.875 26.392-153.81 51.62-227.16 7.47-82.761 9.413-166.25 9.653-249.38-.837-32.195-7.09-63.817-11.546-95.609z" enable-background="accumulate" fill="#262f2f" fill-rule="evenodd" stroke="#000"/>
+ </clipPath>
+ <filter id="fo" x="-.353" y="-.182" width="1.706" height="1.363">
+ <feGaussianBlur stdDeviation="48.038"/>
+ </filter>
+ <clipPath id="fn">
+ <path d="m265.94 126.68l-18.767 168.86 174.11-73.121 61.954 88.659 57.884-31.99-37.534-180.06-237.64 27.649z" fill-rule="evenodd" stroke="#000" stroke-width=".9"/>
+ </clipPath>
+ <filter id="fm" x="-.277" y="-.325" width="1.554" height="1.65">
+ <feGaussianBlur stdDeviation="19.956"/>
+ </filter>
+ <mask id="fk" maskUnits="userSpaceOnUse">
+ <ellipse transform="translate(-174.03 62.156)" cx="579.47" cy="260.58" rx="192.69" ry="164.05" enable-background="accumulate" fill="url(#dt)"/>
+ </mask>
+ <filter id="fj" x="-.611" y="-.149" width="2.223" height="1.299">
+ <feGaussianBlur stdDeviation="37.83"/>
+ </filter>
+ <filter id="fi" x="-.259" y="-.224" width="1.518" height="1.447">
+ <feGaussianBlur stdDeviation="19.632"/>
+ </filter>
+ <filter id="fh" x="-.3" y="-.3" width="1.6" height="1.6">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ <clipPath id="x">
+ <path d="M179.64 267.36c-22.41 39.703-60.616 115.78-69.286 149.64-8.647 33.775-8.772 66.417-.357 86.429 8.36 19.882 26.164 35.633 40.714 41.429-.597-14.376 14.373-43.286 72.857-72.5 58.626-29.285 78.382-27.131 103.57-47.143 25.63-20.362 12.61-67.045 3.214-93.929-9.434-26.993-34.967-59.124-66.429-69.643-31.033-10.375-65.018-4.848-84.286 5.714z" fill="#f5ff04" fill-rule="evenodd"/>
+ </clipPath>
+ <filter id="hd">
+ <feGaussianBlur stdDeviation="6.589"/>
+ </filter>
+ <filter id="hc">
+ <feGaussianBlur stdDeviation="1.505"/>
+ </filter>
+ <filter id="hb" x="-.243" y="-.391" width="1.487" height="1.782">
+ <feGaussianBlur stdDeviation="14.59"/>
+ </filter>
+ <filter id="ha" x="-.146" y="-.235" width="1.292" height="1.47">
+ <feGaussianBlur stdDeviation="4.444"/>
+ </filter>
+ <filter id="gz">
+ <feGaussianBlur stdDeviation=".606"/>
+ </filter>
+ <clipPath id="if">
+ <path d="m709.29 844.51c54.286-1.429 126.04-15.052 170-26.786 44.053-11.757 125.89-36.347 175.36-57.857 49.339-21.453 113.6-59.282 154.29-92.143 40.508-32.721 52.39-55.82 60.714-33.571 8.37 22.368-16.407 56.326-37.857 81.071-21.604 24.923-52.731 52.705-98.929 89.286s-156.08 101.58-212.86 128.57c-57.066 27.125-128.2 58.238-172.14 72.5s-131.43 31.071-131.43 31.071l92.857-192.14z" enable-background="accumulate" fill="#121212" fill-rule="evenodd"/>
+ </clipPath>
+ <mask id="gy" maskUnits="userSpaceOnUse">
+ <path d="m718.41-224.31l33.25 56 276-24 159.5-48.25-66.5-82.75-402.25 99z" fill="url(#w)" fill-rule="evenodd"/>
+ </mask>
+ <clipPath id="gx">
+ <path transform="translate(.08 -.031)" d="M1111.4-285.94l-3.937 1.875c-.041.01-.1.02-.125.031-.42.213-.165.1-.657.313-.486.21-1.737.584-4.093 1.469-3.332 1.25-5.805 2.15-7 3.062-1.537.021-3.72.233-5.657.719a227.677 227.677 0 0 1-6.75 1.594c-1.895.42-1.675.642-2.875.875-1.296.251-1.721-.01-5.437.78-3.49.743-8.895 1.932-10.156 2.688-1.584-.18-3.868-.321-5.844-.03-3.04.446-4.916.672-6.844.905-.655.08-1.04.201-1.344.282-.426.131-.685.26-1.375.343-1.311.16-1.762-.156-5.53.282-3.555.413-9.006 1.272-10.25 1.937-1.6-.297-3.859-.534-5.845-.344-3.058.294-4.972.484-6.906.657-1.934.172-1.688.422-2.906.53-1.316.119-1.76-.163-5.531.25-3.542.39-9.008 1.21-10.281 1.876-1.6-.295-3.887-.507-5.875-.313-3.059.3-4.941.48-6.875.657-.658.06-1.04.178-1.344.25-.428.119-.683.218-1.375.28-1.316.121-1.76-.194-5.531.22-3.556.39-9.006 1.239-10.25
+1.906-1.599-.294-3.86-.524-5.844-.313-3.056.326-4.974.527-6.906.719s-1.69.44-2.906.563c-1.315.131-1.763-.165-5.532.28-3.539.42-8.977 1.293-10.25 1.97-1.597-.281-3.86-.42-5.843-.188-3.052.358-4.945.568-6.875.781-.657.073-1.041.173-1.344.25-.427.128-.685.268-1.375.344-1.314.146-1.768-.174-5.531.313-3.55.458-8.979 1.419-10.22 2.125-1.593-.245-3.833-.382-5.812-.125-3.048.394-4.95.648-6.875.906-1.924.258-1.726.493-2.937.656-1.31.176-1.748-.104-5.5.469-3.525.538-8.924 1.699-10.188 2.437-1.588-.203-3.846-.254-5.813.094-3.026.536-4.899.862-6.812 1.188-.651.11-1.014.27-1.313.375-.42.164-.663.33-1.344.468-1.294.262-1.727-.006-5.437.813-3.499.772-8.846 2.383-10.062 3.219-1.563-.078-3.758.085-5.688.593-2.972.783-4.817 1.232-6.687 1.75s-1.667.768-2.844 1.094c-1.273.353-1.697.107-5.344 1.188-3.425 1.014-8.65 2.933-9.875 3.843-1.539.013-3.72.273-5.625.875-2.93.928-4.75 1.459-6.594
+2.063-.626.205-.991.393-1.28.531-.408.214-.654.409-1.313.625-1.255.412-1.687.19-5.282 1.438-3.39 1.177-8.595 3.213-9.78 4.156-1.525.06-3.65.395-5.532 1.062-2.897 1.029-4.699 1.676-6.531 2.313-1.832.637-1.628.848-2.781 1.25-1.247.434-1.664.2-5.22 1.562-3.338 1.28-8.486 3.483-9.687 4.47-1.507.107-3.635.498-5.5 1.218a1047.26 1047.26 0 0 1-6.437 2.469c-.617.233-.997.442-1.281.594v.03l-8 3.188 1.812 14.72c-.258-.062 6.188 3.312 6.188 3.312.226-.145.449-.273.718-.375 1.08-.41 2.172-.216 6-1.688 3.829-1.471 5.224-2.005 5.907-2.406.68-.4 1.611-.88 2.218-1.531 1.827-.138 3.571-.493 4.938-1 2.968-1.1 4.875-1.806 6.781-2.469 1.906-.662 2.354-1.415 3.406-1.781 1.092-.38 2.195-.166 6.063-1.531 3.867-1.366 5.283-1.827 5.969-2.22.7-.4 1.7-.932 2.312-1.593 1.97-.055 3.817-.385 5.281-.875 3.002-1.005 4.927-1.622 6.844-2.25 1.539-.504 2.174-1.047 2.906-1.437.23-.135.476-.254.75-.344 1.099-.36 2.182-.082
+6.094-1.313 3.912-1.23 5.366-1.673 6.063-2.03.694-.358 1.63-.794 2.25-1.407 1.865-.023 3.635-.267 5.03-.688 3.031-.913 4.993-1.43 6.938-1.968 1.945-.539 2.427-1.265 3.5-1.563 1.114-.31 2.22.007 6.188-1.031 3.967-1.039 5.417-1.433 6.125-1.75.735-.33 1.814-.754 2.437-1.375 1.998.116 3.858-.02 5.344-.375 3.078-.735 5.084-1.101 7.063-1.5 1.588-.32 2.244-.79 3-1.094a3.4 3.4 0 0 1 .75-.25c1.133-.23 2.304.209 6.343-.5 4.04-.709 5.5-.927 6.22-1.187.715-.26 1.704-.568 2.343-1.094 1.924.24 3.748.224 5.188 0 3.126-.488 5.154-.7 7.156-.969 2.001-.268 2.489-.945 3.594-1.094 1.146-.154 2.275.302 6.343-.219 4.068-.52 5.56-.695 6.282-.937.737-.247 1.798-.586 2.437-1.125 2.05.336 3.974.398 5.5.219 3.142-.37 5.18-.56 7.188-.782 1.61-.178 2.264-.608 3.03-.843.242-.086.495-.156.782-.188 1.15-.127 2.301.347 6.375-.125s5.559-.61 6.281-.844c.72-.232 1.7-.473 2.344-.968 1.936.333 3.77.404 5.219.25 3.146-.335
+5.177-.519 7.187-.719 2.01-.2 2.484-.826 3.594-.938 1.151-.115 2.297.366 6.375-.062s5.589-.562 6.313-.781c.739-.224 1.795-.514 2.437-1.031 2.057.398 4.002.493 5.531.343 3.149-.308 5.176-.473 7.188-.656 1.614-.147 2.263-.56 3.031-.781.241-.081.494-.13.781-.156 1.152-.106 2.293.392 6.375 0s5.59-.531 6.313-.75c.72-.219 1.7-.448 2.343-.938 1.939.35 3.77.454 5.22.313 3.148-.309 5.175-.474 7.187-.657 2.011-.183 2.514-.838 3.625-.937 1.152-.103 2.292.385 6.375 0s5.588-.501 6.312-.719c.74-.222 1.796-.514 2.438-1.031 2.057.402 4.003.503 5.531.344 3.147-.329 5.177-.523 7.187-.72 1.613-.156 2.266-.63 3.032-.874.24-.088.463-.122.75-.156 1.148-.14 2.316.34 6.375-.25 4.058-.59 5.562-.778 6.281-1.032.717-.253 1.675-.558 2.312-1.093 1.92.212 3.72.151 5.157-.094 3.119-.533 5.111-.929 7.093-1.313 1.983-.383 2.475-1.04 3.563-1.28 1.129-.251 2.27.115 6.25-.876s5.43-1.42 6.125-1.781c.722-.376 1.762-.87
+2.375-1.531 1.963-.012 3.794-.291 5.219-.844 2.95-1.145 4.873-1.87 6.687-2.75 1.456-.706 2.32-1.702 2.531-2 .213-.298.1-.729.125-.75.043-.035.34-.094.5-.437.86-1.848 2.324-5.628 2.438-6.313.114-.682.168-1.353.219-1.75.029-.23-.147-.879-.125-.937.03-.082.288-.251.343-.5.267-1.199.09-2.208-.125-3.625-.213-1.418-.971-4.615-1.625-5.47-.658-.861-1.224-1.01-1.75-1z" enable-background="new" fill="#bcb786" fill-rule="evenodd"/>
+ </clipPath>
+ <filter id="gw">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="gv" x="-.103" y="-.342" width="1.206" height="1.685">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="gu" x="-.099" y="-.226" width="1.198" height="1.453">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="gt" x="-.098" y="-.198" width="1.196" height="1.397">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="gs">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="gr" x="-.098" y="-.198" width="1.197" height="1.395">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="gq" x="-.098" y="-.198" width="1.196" height="1.397">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="gp" x="-.098" y="-.203" width="1.197" height="1.406">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="go" x="-.098" y="-.209" width="1.197" height="1.417">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="gn" x="-.099" y="-.225" width="1.198" height="1.451">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="gm" x="-.101" y="-.274" width="1.201" height="1.548">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="gl" x="-.102" y="-.324" width="1.204" height="1.647">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="gk" x="-.103" y="-.364" width="1.207" height="1.729">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="gj" x="-.105" y="-.405" width="1.209" height="1.809">
+ <feGaussianBlur stdDeviation="1.168"/>
+ </filter>
+ <filter id="gi">
+ <feGaussianBlur stdDeviation="1.723"/>
+ </filter>
+ <filter id="gh">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="gg" x="-.031" y="-.103" width="1.062" height="1.205">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="gf">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="ge">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="ff">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="fe">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="fd">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="fc">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="fb">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="fa">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="ez">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="ey">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="ex" x="-.031" y="-.109" width="1.062" height="1.219">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <filter id="ew" x="-.031" y="-.121" width="1.063" height="1.243">
+ <feGaussianBlur stdDeviation=".35"/>
+ </filter>
+ <clipPath id="ie">
+ <path d="m266.27 924.57c-1.407 18.801-1.145 32.751 2.082 49.303 3.226 16.552 16.406 45.907 20.334 63.184 3.926 17.267 2.694 38.31-12.46 51.148-15.317 12.978-42.05 21.599-67.831 15.734s-69.55-49.223-88.59-70.228c-19.112-21.083-63.761-93.851-77.94-124.28-14.177-30.425-12.66-36.719-8.119-45.53-9.367-24.52-12.414-50.067-33.712-75.577 30.325 3.114 43.88 26.956 60.126 47.14-5.53-48.076-18.055-64.417-28.374-90.724 29.994 6.082 50.579 31.872 63.98 72.712 9.554-3.918 18.238-9.373 30.187-9.061-11.298-41.696-17.949-69.916-36.687-101.07 53.442 5.67 83.657 80.639 78.971 87.96 9.978-2.243 19.006-6.53 30.437-5.65-11.249-38.348-21.048-76.869-3.66-118.65 0 0 48.287 65.436 54.39 85.805 6.103 20.37 1.52 38.701 1.52 38.701s16.96 31.085 20.293 51.094c3.373 20.241-3.533 59.103-4.946 77.983z" enable-background="new" fill="#ada469" fill-rule="evenodd"/>
+ </clipPath>
+ <filter id="id">
+ <feGaussianBlur stdDeviation="7.18"/>
+ </filter>
+ <clipPath id="ic">
+ <path d="m266.27 924.57c-1.407 18.801-1.145 32.751 2.082 49.303 3.226 16.552 16.406 45.907 20.334 63.184 3.926 17.267 2.694 38.31-12.46 51.148-15.317 12.978-42.05 21.599-67.831 15.734s-69.55-49.223-88.59-70.228c-19.112-21.083-63.761-93.851-77.94-124.28-14.177-30.425-12.66-36.719-8.119-45.53-9.367-24.52-12.414-50.067-33.712-75.577 30.325 3.114 43.88 26.956 60.126 47.14-5.53-48.076-18.055-64.417-28.374-90.724 29.994 6.082 50.579 31.872 63.98 72.712 9.554-3.918 18.238-9.373 30.187-9.061-11.298-41.696-17.949-69.916-36.687-101.07 53.442 5.67 83.657 80.639 78.971 87.96 9.978-2.243 19.006-6.53 30.437-5.65-11.249-38.348-21.048-76.869-3.66-118.65 0 0 48.287 65.436 54.39 85.805 6.103 20.37 1.52 38.701 1.52 38.701s16.96 31.085 20.293 51.094c3.373 20.241-3.533 59.103-4.946 77.983z" enable-background="new" fill="#ada469" fill-rule="evenodd"/>
+ </clipPath>
+ <filter id="gd">
+ <feGaussianBlur stdDeviation="6.82"/>
+ </filter>
+ <clipPath id="ev">
+ <path d="M760.16 935.83c6.794 18.903 10.494 33.3 11.89 51.212 1.397 17.912-3.783 51.801-2.9 70.656.881 18.845 8.133 40.099 27.345 48.969 19.419 8.966 49.319 10.211 74.12-3.146 24.8-13.357 57.4-70.326 70.974-97.309 13.624-27.084 38.76-114.5 44.66-149.77 5.9-35.27 2.551-41.3-4.617-49.055 2.64-27.84-1.5-54.935 13.11-87.186-30.249 11.826-37.382 40.161-48.319 65.505-8-50.933.21-71.273 3.319-101.22-29.065 14.778-42.862 47.114-45 92.857-10.924-1.304-21.391-4.434-33.571-.714-.264-46.023-1.464-76.889 8.91-114.21-53.254 21.027-62.946 106.59-56.052 112.78-10.883.535-21.371-1.297-32.857 2.857.638-42.57-.261-84.909-30-122.86 0 0-30.958 80.922-31.43 103.57s9.452 40.166 9.452 40.166-8.568 36.741-6.298 58.232c2.295 21.741 20.443 59.676 27.265 78.658z" enable-background="new" fill="#ada469" fill-rule="evenodd"/>
+ </clipPath>
+ <filter id="eu" x="-.144" y="-.103" width="1.288" height="1.206">
+ <feGaussianBlur stdDeviation="7.389"/>
+ </filter>
+ <clipPath id="et">
+ <path d="M760.16 935.83c6.794 18.903 10.494 33.3 11.89 51.212 1.397 17.912-3.783 51.801-2.9 70.656.881 18.845 8.133 40.099 27.345 48.969 19.419 8.966 49.319 10.211 74.12-3.146 24.8-13.357 57.4-70.326 70.974-97.309 13.624-27.084 38.76-114.5 44.66-149.77 5.9-35.27 2.551-41.3-4.617-49.055 2.64-27.84-1.5-54.935 13.11-87.186-30.249 11.826-37.382 40.161-48.319 65.505-8-50.933.21-71.273 3.319-101.22-29.065 14.778-42.862 47.114-45 92.857-10.924-1.304-21.391-4.434-33.571-.714-.264-46.023-1.464-76.889 8.91-114.21-53.254 21.027-62.946 106.59-56.052 112.78-10.883.535-21.371-1.297-32.857 2.857.638-42.57-.261-84.909-30-122.86 0 0-30.958 80.922-31.43 103.57s9.452 40.166 9.452 40.166-8.568 36.741-6.298 58.232c2.295 21.741 20.443 59.676 27.265 78.658z" enable-background="new" fill="#ada469" fill-rule="evenodd"/>
+ </clipPath>
+ <filter id="es" x="-.09" y="-.103" width="1.181" height="1.205">
+ <feGaussianBlur stdDeviation="5.346"/>
+ </filter>
+ <marker id="eq" overflow="visible" orient="auto">
+ <path d="m-1.2 0l-1 1 3.5-1-3.5-1 1 1z" fill="#f8d615" fill-rule="evenodd" stroke="#f8d615" stroke-width=".2pt"/>
+ </marker>
+ <radialGradient id="r" cx="142.96" cy="107.09" r="66.982" gradientTransform="matrix(-.33248 .90223 -.95824 -.35312 305.29 19.909)" gradientUnits="userSpaceOnUse">
+ <stop stop-color="#1e2323" offset="0"/>
+ <stop stop-color="#181d1d" offset=".56"/>
+ <stop offset="1"/>
+ </radialGradient>
+ <radialGradient id="q" cx="317.79" cy="129.65" r="47.863" gradientTransform="matrix(1.0036 0 0 1.6613 -160.53 -96.205)" gradientUnits="userSpaceOnUse" xlink:href="#u"/>
+ <radialGradient id="p" cx="325.31" cy="80.91" r="26.938" gradientTransform="matrix(2.0748 -.1578 .23824 3.1325 -550.77 -65.729)" gradientUnits="userSpaceOnUse">
+ <stop stop-color="#283131" stop-opacity="0" offset="0"/>
+ <stop stop-color="#1e2424" stop-opacity="0" offset=".513"/>
+ <stop offset="1"/>
+ </radialGradient>
+ <linearGradient id="kt" x1="347.9" x2="275.58" y1="1070.2" y2="867.98" gradientUnits="userSpaceOnUse">
+ <stop offset="0"/>
+ <stop offset=".022"/>
+ <stop offset=".759"/>
+ <stop stop-color="#232323" offset=".885"/>
+ <stop stop-color="#595959" offset="1"/>
+ </linearGradient>
+ <linearGradient id="kh" x1="603.84" x2="616.24" y1="627.85" y2="585.43" gradientTransform="translate(450.03 73.844)" gradientUnits="userSpaceOnUse" xlink:href="#t"/>
+ <linearGradient id="el" x1="609.31" x2="560.83" y1="239.47" y2="262.86" gradientTransform="translate(450.03 73.844)" gradientUnits="userSpaceOnUse">
+ <stop stop-color="#0a0c0c" offset="0"/>
+ <stop stop-color="#1f2727" stop-opacity="0" offset="1"/>
+ </linearGradient>
+ <radialGradient id="ek" cx="543.67" cy="147.31" r="47.863" gradientTransform="matrix(2.1382 0 0 2.3383 -77.038 -101.69)" gradientUnits="userSpaceOnUse" xlink:href="#u"/>
+ <radialGradient id="ef" cx="385" cy="237.01" r="86.929" gradientTransform="matrix(1 0 0 .8562 0 34.08)" gradientUnits="userSpaceOnUse">
+ <stop stop-color="#ada469" offset="0"/>
+ <stop stop-color="#ada469" offset=".811"/>
+ <stop stop-color="#fff" offset="1"/>
+ </radialGradient>
+ <linearGradient id="ee" x1="398.21" x2="379.29" y1="343.52" y2="265.31" gradientTransform="translate(450.03 73.844)" gradientUnits="userSpaceOnUse" xlink:href="#s"/>
+ <radialGradient id="ed" cx="397.16" cy="336.95" r="36.75" gradientTransform="translate(-375.32 -318.42) scale(1.945)" gradientUnits="userSpaceOnUse">
+ <stop stop-color="#344040" offset="0"/>
+ <stop stop-color="#222929" offset=".5"/>
+ <stop offset="1"/>
+ </radialGradient>
+ <radialGradient id="ec" cx="402.49" cy="317.24" r="23.714" gradientTransform="translate(-1358.3 -1070.7) scale(4.3777)" gradientUnits="userSpaceOnUse">
+ <stop offset="0"/>
+ <stop stop-color="#1d1d1d" offset="1"/>
+ </radialGradient>
+ <radialGradient id="eb" cx="250.23" cy="475.1" r="95.989" gradientTransform="matrix(1.2259 -.70777 .1414 .24491 322.22 608.92)" gradientUnits="userSpaceOnUse">
+ <stop stop-color="#d9e002" offset="0"/>
+ <stop stop-color="#a9ae01" offset=".5"/>
+ <stop stop-color="#717501" offset="1"/>
+ </radialGradient>
+ <radialGradient id="m" cx="228.81" cy="440.27" r="119.18" gradientTransform="matrix(1.1323 .76595 -1.455 2.151 588.75 -711.8)" gradientUnits="userSpaceOnUse">
+ <stop stop-color="#ff0" offset="0"/>
+ <stop stop-color="#b2b200" offset="1"/>
+ </radialGradient>
+ <radialGradient id="dz" cx="275.44" cy="335.35" r="36.75" gradientTransform="matrix(.05911 2.687 -.72343 .01591 408.73 -424.56)" gradientUnits="userSpaceOnUse">
+ <stop stop-color="#ff0" offset="0"/>
+ <stop stop-color="#ff0" stop-opacity="0" offset="1"/>
+ </radialGradient>
+ <linearGradient id="l" x1="182.35" x2="145.53" y1="256.11" y2="542.21" gradientUnits="userSpaceOnUse">
+ <stop stop-color="#7d7d00" offset="0"/>
+ <stop stop-color="#c6c700" offset=".364"/>
+ <stop stop-color="#f6f800" offset="1"/>
+ </linearGradient>
+ <radialGradient id="dy" cx="296.34" cy="427.18" r="19.704" gradientTransform="translate(-599.29 -827.09) scale(2.9797)" gradientUnits="userSpaceOnUse">
+ <stop stop-opacity="0" offset="0"/>
+ <stop offset="1"/>
+ </radialGradient>
+ <radialGradient id="dx" cx="429.57" cy="377.43" r="72.08" gradientTransform="matrix(1 0 0 .61803 0 144.16)" gradientUnits="userSpaceOnUse">
+ <stop stop-color="#e2e2e2" offset="0"/>
+ <stop stop-color="#e2e2e2" stop-opacity="0" offset="1"/>
+ </radialGradient>
+ <radialGradient id="dw" cx="437.7" cy="391.22" r="36.612" gradientTransform="matrix(1 0 0 .61803 0 149.43)" gradientUnits="userSpaceOnUse">
+ <stop stop-color="#c7bd80" offset="0"/>
+ <stop stop-color="#c7bd80" stop-opacity="0" offset="1"/>
+ </radialGradient>
+ <linearGradient id="dv" x1="412.09" x2="417.38" y1="404.92" y2="401.83" gradientUnits="userSpaceOnUse" xlink:href="#j"/>
+ <linearGradient id="du" x1="411.91" x2="417.38" y1="404.92" y2="401.83" gradientUnits="userSpaceOnUse" xlink:href="#j"/>
+ <linearGradient id="ej" x1="411.91" x2="417.38" y1="405.54" y2="401.83" gradientUnits="userSpaceOnUse" xlink:href="#j"/>
+ <linearGradient id="ei" x1="412.09" x2="417.38" y1="405.54" y2="401.83" gradientUnits="userSpaceOnUse" xlink:href="#j"/>
+ <linearGradient id="eh" x1="411.73" x2="417.38" y1="405.54" y2="401.83" gradientUnits="userSpaceOnUse" xlink:href="#j"/>
+ <linearGradient id="jr" x1="1255.7" x2="893.7" y1="667.09" y2="858.01" gradientUnits="userSpaceOnUse" xlink:href="#n"/>
+ <linearGradient id="ft" x1="603.84" x2="616.24" y1="627.85" y2="585.43" gradientTransform="matrix(1.0057 0 0 2.3995 3424.4 -24.137)" gradientUnits="userSpaceOnUse" xlink:href="#t"/>
+ <radialGradient id="fl" cx="418.3" cy="342.48" r="131.45" gradientTransform="matrix(1.3957 .62111 -.42441 .95372 -15.062 -227.97)" gradientUnits="userSpaceOnUse" xlink:href="#s"/>
+ <radialGradient id="fg" cx="275.44" cy="335.35" r="36.75" gradientTransform="matrix(.05911 2.687 -.72343 .01591 408.73 -424.56)" gradientUnits="userSpaceOnUse" xlink:href="#n"/>
+ <clipPath id="ga">
+ <path d="M3492.3 296.46H3930v902.66h-437.7z" fill="#fff"/>
+ </clipPath>
+ </defs>
+ <g stroke-miterlimit="1">
+ <path d="M10.45 109.38h1876.7v1563H10.45z" enable-background="new" fill="#a8a8a8" stroke="#000" stroke-width="20.9"/>
+ <path d="M2337.1 111.42h1878.8v1565H2337.1z" enable-background="new" fill="none" stroke="#000" stroke-width="20.925"/>
+ <path d="M2358.4 132.96h1833.4v1522.9H2358.4z" enable-background="new" fill="#a8a8a8" stroke="#f83615" stroke-width="20.391"/>
+ </g>
+ <g fill-rule="evenodd">
+ <path transform="translate(-2833.5 -4370) scale(10.727)" d="M304.64 526.65c-10 .357-18.214 2.857-18.214 2.857l7.5 6.071 10.357 3.572 16.071.357 22.5-5.357 7.857 1.071 20.357-2.143-10.357 6.786c5.46-1.023 17.393 3.57 9.643 5.357-1.74.402 13.929-4.643 13.929-4.643l2.5-4.642 3.571-9.286h11.43l18.213-4.643 3.572-5-16.071 1.071-12.143 2.143-14.643-5-70.692 16.708-5.38-5.279z" enable-background="new" filter="url(#ep)" opacity=".5"/>
+ <g enable-background="new">
+ <path transform="matrix(.71084 -.19374 .26296 .96481 552.25 332.01)" d="m245.12 100.05s-47.128-31.647-67.215-35.801c-20.038-4.144-38.473-3.318-51.934 13.607s-12.077 61.265-13.536 86.97 2.55 70.177 17.605 88.666c15.055 18.488 45.886 13.585 49.927 21.414 2.213 4.287 65.152-174.86 65.152-174.86z" enable-background="accumulate" fill="url(#r)"/>
+ <path transform="matrix(.71084 -.19374 .26296 .96481 552.25 332.01)" d="M135.38 82.018s26.344 1.939 37.633 13.903c11.415 12.097 13.735 21.332 15.296 37.735 1.563 16.425-.85 28.418-7.814 36.037s-1.004 19.583-25.916 12.071-27.032-27.783-26.515-46.305c.517-18.529 7.316-53.441 7.316-53.441z" enable-background="accumulate" fill="url(#q)"/>
+ <path transform="matrix(.71084 -.19374 .26296 .96481 552.25 332.01)" d="M135.65 81.927s-4.645 16.365.588 28.563c5.488 12.793 27.224 44.26 27.224 54.656l22.656-5c2.542-6.966 3.21-15.752 2.188-26.5-1.562-16.403-3.867-25.621-15.281-37.719-9.655-10.232-31.593-13.375-37.375-14z" enable-background="new" fill="url(#p)"/>
+ </g>
+ <path d="m893.6 1350.3c-4.286 0.714-27.628 3.618-57.857 10s-57.314 4.966-135.79 17.33c-79.852 12.581-94.064 42.542-108.12 47.064-14.7 4.729-145.38-65.822-145.38-65.822l4.286-94.286s85.886-16.201 112.14-33.571c26.257-17.37 45.582-49.666 59.286-71.429s32.857-71.429 32.857-71.429l238.57 262.14z" enable-background="accumulate" stroke="#000"/>
+ <path transform="translate(324.57 331.53)" d="m332.34 898.39l-32.732-61.3-37.617 45.106c2.177 1.317 5.774-20.856 45.6-64.417l24.749 80.61z" clip-path="url(#eo)" enable-background="accumulate" fill="#fff" filter="url(#kv)" opacity=".5"/>
+ <path transform="translate(324.57 331.53)" d="m200.82 863.03l146.37-51.619 243.95 226.27-241.83 140.01-181.02-87.681 32.527-226.98z" clip-path="url(#ku)" enable-background="accumulate" fill="url(#kt)" filter="url(#ks)"/>
+ <path d="m691.46 835.66s-29.554 40.573-47.857 74.286-58.621 126.36-70.357 171.07c-11.759 44.803-62.5 123.57-62.5 123.57l76.071 18.214s11.807-12.823 31.071-46.071 60.357-138.57 60.357-138.57l13.214-202.5z" enable-background="accumulate" fill="#0f0f0f"/>
+ <path transform="translate(324.57 331.53)" d="m430.28 381.94c-7.071 2.828-236.18 32.152-236.18 32.152l-39.64 359.83 90.198 92.64 52.326-114.55 100.47-186.39 32.828-183.68z" clip-path="url(#kr)" enable-background="accumulate" fill="#fff" filter="url(#kq)" opacity=".4"/>
+ <path d="m1018.2 1359.5s23.256 11.394 36.068 20.476c12.697 9.001 29.472 24.649 41.692 37.36 12.306 12.8 20.113 22.599 41.533 24.161 21.432 1.563 53.282-8.788 73.296-24.664 20.014-15.877 45.647-69.233 45.647-69.233l-127.16-143.07" enable-background="accumulate" stroke="#000"/>
+ <path transform="translate(324.57 331.53)" d="M331.34 641.5L216.17 835.36l44.042 90.598 97.581-193.75-26.456-90.711z" clip-path="url(#kp)" enable-background="accumulate" filter="url(#ko)" opacity=".75"/>
+ <g enable-background="new">
+ <path d="M1113.913 623.101l-.09-.002c-1.48.72-6.744.842-7.674 8.704-.872 7.373 12.962 18.694 14.976 19.936 1.626 1.001 3.407 1.81 5.174 2.325l1.403.382c1.883.43 3.679.575 5.134.496 3.16-.173 5.184-.339 7.197-.493 2.013-.155 2.577-.75 3.686-.866 1.15-.12 2.242.375 6.309-.155 4.066-.53 5.556-.79 6.288-1.07.76-.29 1.85-.663 2.534-1.25 1.952.213 3.803.145 5.281-.241 3.063-.8 5.055-1.3 6.958-1.968 1.527-.536 2.499-1.426 2.744-1.698.245-.271.181-.712.21-.73.046-.03.348-.055.546-.378 1.065-1.737 2.951-5.325 3.143-5.993.191-.664.322-1.324.417-1.713.056-.225-.045-.89-.017-.946.04-.078.315-.216.399-.457.402-1.16.34-2.183.29-3.616-.05-1.433-.438-4.695-.99-5.618-.353-.593-.703-.88-1.044-1.033-.065-.025-.118-.06-.18-.083-.02-.007-.042-.002-.061-.007-.301-.111-.586-.228-1.124-.35-.966-.22-2.379-.535-3.914-.702a19.278 19.278 0 0
+0-1.563-.085c-3.571-.097-9.064-.045-10.338.446-1.584-.518-3.852-1.06-5.845-1.144-3.069-.13-4.974-.228-6.914-.324-1.94-.095-1.72.194-2.941.134-1.32-.065-1.75-.426-5.537-.543-3.556-.11-9.035-.04-10.338.447-1.584-.52-3.85-1.06-5.845-1.144-3.07-.131-4.978-.197-6.918-.293-.66-.032-1.05.004-1.356.033z" enable-background="new" fill="#bcb786"/>
+ <g transform="rotate(2.568 -22364 20373)" clip-path="url(#kn)" enable-background="new" filter="url(#km)">
+ <path d="M229.94-409.12c-3.558.05-9.024.36-10.303.904-1.606-.447-3.903-.881-5.9-.877a979.03 979.03 0 0 1-6.907 0c-.66-.003-1.048.068-1.353.11v1.096c.12-.18.393-.69.95-.767.747-.103 5.17-.151 7.31-.11 1.775.035 4.455.274 6.39.96.32.113.618.273.891.41 1.964.987 7.944 4.302 7.944 4.302s-6.633-3.948-7.483-4.439c-.203-.117-.575-.258-1.036-.41 1.22-.449 5.076-.62 7.828-.713 3.024-.102 3.348-.09 5.41.192 2.13.29 3.34.602 3.34.602s-.08-.64 1.035-.794c.748-.103 5.17-.152 7.31-.11 2.07.04 5.366.407 7.282 1.37 1.003.504 3.035 1.569 4.795 2.536l.096-.02s-3.58-2.162-4.43-2.653c-.204-.117-.575-.258-1.037-.411 1.22-.448 5.047-.62 7.8-.712 3.024-.102 3.347-.09 5.41.191 1.954.267 3.013.53 3.195.576l-.027-.312a8.503 8.503 0 0 0-1.4-.357c-1.301-.235-3.4-.602-5.51-.564-3.571.064-9.052.356-10.302.904-1.606-.447-3.877-.881-5.871-.877-3.072.007-4.994.01-6.936
+0-1.943-.009-1.713.28-2.936.274-1.322-.005-1.766-.354-5.555-.301M206.2-407.48c1.92.817 4.577 2.193 6.159 3.397s2.908 1.774 5.555 3.918c.885.718 1.748 1.35 2.591 1.922l.541-.19a57.511 57.511 0 0 1-2.269-1.622c-2.822-2.12-3.627-2.81-6.015-4.274s-4.1-2.366-6.562-3.15" enable-background="new"/>
+ <path d="M237.8-407.48c1.92.817 4.606 2.193 6.188 3.397.813.62 1.558 1.07 2.45 1.654l.65-.116a40.414 40.414 0 0 0-2.697-1.784c-2.389-1.465-4.128-2.366-6.59-3.151" enable-background="new"/>
+ </g>
+ <g transform="rotate(6.561 -6814.9 734.73)" clip-path="url(#kl)">
+ <path d="M1056.2-278.8c4.145-1.479 10 3.125 10 3.125.899.28 2.725-.894 2.624-1.686 0 0-1.55-1.86-.374-2.939s5.296 1.507 7.5 1.625 5.562-.23 7-.75 1.113-1.425 2.625-1.75 5.119 1.038 7.06 1.169 4.649.334 5.815-.169.178-1.16 1.875-1.875 7.76-.957 9.625-.125 1.81.52 2.625 3 7.44 5.163-1.125 13.375-59.378 13.786-65.625 2.75 6.23-14.271 10.375-15.75z" enable-background="new" filter="url(#kk)" opacity=".75"/>
+ <path d="M1058.5-275.43c4.145-1.479 10 3.125 10 3.125.899.28 2.725-.894 2.624-1.686 0 0-1.55-1.86-.374-2.939s5.296 1.507 7.5 1.625 5.562-.23 7-.75 1.113-1.425 2.625-1.75 5.119 1.038 7.06 1.169 4.649.334 5.815-.169.178-1.16 1.875-1.875 7.76-.957 9.625-.125 1.81.52 2.625 3 7.44 5.163-1.125 13.375-59.378 13.786-65.625 2.75 6.23-14.271 10.375-15.75z" enable-background="new" filter="url(#kj)" opacity=".75"/>
+ </g>
+ </g>
+ <path d="M676.821 543.52c-3.804-25.264-16.81-50.638-17.157-75.525-.186-13.356 3.273-26.571 13.756-39.554 36.347-65.296 116.94-84.695 185.93-91.465 86.922-11.017 184.91 17.94 233.37 95.401 54.124 75.733 56.675 172.54 80.612 259.53 29.438 127.13 54.779 256.21 60.392 386.85-3.063 78.182-8.426 165.18-60.503 228.13-48.026 50.357-122.79 50.053-187.07 59.002-90.555 4.655-184.35-16.146-261.78-64.198-64.776-37.94-95.73-113.48-97.279-186.02-8.39-79.875 26.392-153.81 51.62-227.16 7.47-82.761 9.413-166.25 9.653-249.38-.837-32.195-7.09-63.817-11.546-95.609z" enable-background="accumulate" fill="#101414"/>
+ <path transform="translate(324.57 331.53)" d="M311.83 415.43l9.9 121.62-60.105 136.47 15.556 174.66c15.613 61.879 32.185 98.669 74.376 117.05 4.32-36.24-38.612-142.96-39.243-189.12-.631-46.184 10.83-108.61 30.678-158.3 20.048-50.192 36.897-44.846 42.125-92.593s-17.426-149.39-17.426-149.39l-55.86 39.598z" clip-path="url(#en)" enable-background="accumulate" fill="#fff" filter="url(#ki)" opacity=".25"/>
+ <path transform="translate(48.571 195.53)" d="m1010 655.49s16.755 37.018 28.702 53.954c11.946 16.936 52.727 56.046 52.727 56.046l52.597-127.59" enable-background="accumulate" fill="url(#kh)"/>
+ <path transform="translate(324.57 331.53)" d="m730.32 536.57c0 8.485 42.548 58.468 42.548 58.468l12.607-28.77-55.154-29.698z" clip-path="url(#kg)" enable-background="accumulate" fill="#fff" filter="url(#kf)" opacity=".08"/>
+ </g>
+ <g transform="translate(498.6 269.37)" clip-path="url(#ke)" enable-background="new">
+ <g transform="translate(-174.03 62.156)" filter="url(#em)">
+ <g filter="url(#i)">
+ <path d="M425.88 476.99c10.805-1.479 24.744 3.354 44.643 3.214s57.453-16.91 82.143-17.143 62.752 12.284 79.286 15 22.848-.158 27.5 7.857 1.927 10.747-10.357 20.714-40.79 12.636-66.071 12.857c-25.282.221-70.381 7.079-95.357 3.93s-56.938-7.824-68.929-17.858-19.851-16.732-17.5-23.929 13.837-3.164 24.643-4.643z" enable-background="new" fill="#fff" fill-rule="evenodd"/>
+ <path d="M343.65 412.6h381.84v181.02H343.65z" enable-background="accumulate" fill="none"/>
+ </g>
+ <g filter="url(#i)">
+ <path d="m861.17 390.2c-10.462 9.714-86.98 19.005-100.71 29.286s-14.753 12.888-12.143 20 6.545 9.406 25.714 8.571 98.571-27.622 98.571-21.429l-11.429-36.429z" enable-background="new" fill="#fff" fill-rule="evenodd"/>
+ <path d="M702.86 344.82h207.89v162.63H702.86z" enable-background="accumulate" fill="none"/>
+ </g>
+ </g>
+ <g enable-background="new" opacity=".18">
+ <g transform="translate(-174.03 62.156)" filter="url(#i)">
+ <path d="M425.88 476.99c10.805-1.479 24.744 3.354 44.643 3.214s57.453-16.91 82.143-17.143 62.752 12.284 79.286 15 22.848-.158 27.5 7.857 1.927 10.747-10.357 20.714-40.79 12.636-66.071 12.857c-25.282.221-70.381 7.079-95.357 3.93s-56.938-7.824-68.929-17.858-19.851-16.732-17.5-23.929 13.837-3.164 24.643-4.643z" enable-background="new" fill="#fff" fill-rule="evenodd"/>
+ <path d="M343.65 412.6h381.84v181.02H343.65z" enable-background="accumulate" fill="none"/>
+ </g>
+ <g transform="translate(-174.03 62.156)" filter="url(#i)">
+ <path d="m861.17 390.2c-10.462 9.714-86.98 19.005-100.71 29.286s-14.753 12.888-12.143 20 6.545 9.406 25.714 8.571 98.571-27.622 98.571-21.429l-11.429-36.429z" enable-background="new" fill="#fff" fill-rule="evenodd"/>
+ <path d="M702.86 344.82h207.89v162.63H702.86z" enable-background="accumulate" fill="none"/>
+ </g>
+ </g>
+ </g>
+ <g transform="translate(48.571 195.53)" fill-rule="evenodd">
+ <path transform="translate(276 136)" d="m582.66-7.418l113.14 86.267 108.89 258.8 38.184 207.89 120.21 91.924s-12.728-287.09-19.799-313.96-149.91-393.15-149.91-393.15l-210.72 62.225z" clip-path="url(#kd)" enable-background="accumulate" filter="url(#kc)" opacity=".75"/>
+ <path d="m964.14 239.6s8.677 10.897 24.107 11.964c15.43 1.068 49.722-39.953 70.179-52.143 20.479-12.204 47.046-26.602 63.929-20.357 16.882 6.245 22.158 26.436 27.857 48.036 5.7 21.6 6.719 61.814-2.679 92.857-9.397 31.043-50.502 73.104-65.356 103.39s-11.607 39.821-11.607 39.821" enable-background="accumulate" fill="url(#el)"/>
+ <path d="m1124.5 207.63c-15.893-0.893-49.719 12.106-66.071 24.286-16.439 12.244-29.221 24.114-29.286 52.143-0.065 28.206 13.119 39.076 29.107 46.964s33.686 7.12 51.964-11.786c18.278-18.905 14.286-111.61 14.286-111.61z" enable-background="new" fill="url(#ek)"/>
+ <ellipse transform="matrix(.94347 -.12399 .14401 1.0958 451.95 134.6)" cx="385" cy="237.01" rx="86.429" ry="73.929" clip-path="url(#kb)" enable-background="accumulate" fill="url(#ef)" filter="url(#je)" opacity=".75"/>
+ <path transform="translate(450.03 73.844)" d="m527.61 407.45s-122.04 38.403-187.51 9.632c-65.473-28.772-74.377-124.72-74.377-124.72s73.382-80.504 129.92-83.615c55.827-3.072 90.574 20.143 114.87 65.852 24.352 45.813 17.101 132.85 17.101 132.85z" enable-background="accumulate" fill="url(#jd)" mask="url(#jc)"/>
+ <path d="m772.17 393.35s36.218-27.382 51.607-35.893c15.177-8.393 25.714-11.607 35.893-11.607l-15.536 66.964" enable-background="accumulate" fill="url(#ee)"/>
+ <circle transform="translate(449.5 74.915)" cx="409.29" cy="306.65" r="36.25" enable-background="accumulate" fill="url(#ed)"/>
+ <path transform="translate(276 136)" d="m311.83 415.43l9.9 121.62-60.105 136.47 15.556 174.66c15.613 61.879 32.185 98.669 74.376 117.05 4.32-36.24 8.682-72.368-31.243-223.12l17.678-69.296 72.125-138.59-42.426-158.39-55.86 39.598z" clip-path="url(#en)" enable-background="accumulate" fill="#fff" filter="url(#jb)" opacity=".3"/>
+ <path d="m635.21 581.13c-14.142 12.728 39.233 34.58 76.368 24.042s104.64-35.564 103.24-79.196c-1.407-43.632-76.368-128.69-76.368-128.69l-103.24 183.85z" enable-background="accumulate" filter="url(#ja)" opacity=".5"/>
+ <circle transform="translate(449.67 74.915)" cx="410" cy="306.65" r="23.214" enable-background="accumulate" fill="url(#ec)"/>
+ <circle transform="translate(452 73.487)" cx="414.29" cy="303.08" r="7.5" enable-background="accumulate" fill="#fff" filter="url(#iz)" stroke="#000" stroke-linejoin="bevel"/>
+ <path d="m789.32 478.35s7.023 19.569-1.071 35-42.323 38.988-67.5 50c-25.31 11.07-85.473 32.964-101.79 41.964-16.461 9.082-18.214 12.679-18.214 12.679s-7.147-19.064 28.75-51.786c36.172-32.972 142.03-48.05 159.82-87.857z" enable-background="accumulate" fill="url(#eb)"/>
+ </g>
+ <g enable-background="new">
+ <g transform="translate(829.32 270.09)" fill-rule="evenodd">
+ <path transform="translate(-329.81)" d="M179.64 267.36c-22.41 39.703-60.616 115.78-69.286 149.64-8.647 33.775-8.772 66.417-.357 86.429 8.36 19.882 26.164 35.633 40.714 41.429-.597-14.376 14.373-43.286 72.857-72.5 58.626-29.285 78.382-27.131 103.57-47.143 25.63-20.362 8.206-79.647 3.214-93.929s-1.236-3.38-1.946-5.093c-10.689-25.816-34.214-54.43-64.483-64.55s-65.018-4.848-84.286 5.714z" clip-path="url(#ea)" fill="url(#m)"/>
+ <ellipse transform="rotate(28.068 -88.085 -332.1)" cx="183.57" cy="338.08" rx="64.716" ry="134.01" enable-background="accumulate" fill="url(#dz)"/>
+ <ellipse transform="rotate(28.068 -43.578 -333.81)" cx="183.57" cy="338.08" rx="64.716" ry="134.01" enable-background="accumulate" fill="url(#iy)"/>
+ </g>
+ <path transform="translate(499.51 270.09)" d="M179.64 267.36c-22.41 39.703-60.616 115.78-69.286 149.64-8.647 33.775-8.772 66.417-.357 86.429 8.36 19.882 26.164 35.633 40.714 41.429-.597-14.376 14.373-43.286 72.857-72.5 58.626-29.285 78.382-27.131 103.57-47.143 25.63-20.362 8.206-79.647 3.214-93.929s-1.236-3.38-1.946-5.093c-10.689-25.816-34.214-54.43-64.483-64.55s-65.018-4.848-84.286 5.714z" clip-path="url(#ea)" enable-background="new" fill="none" filter="url(#ix)" stroke="url(#l)" stroke-width="20.8"/>
+ </g>
+ <g transform="translate(48.571 195.53)" fill-rule="evenodd">
+ <circle transform="translate(452.56 72.581)" cx="310.71" cy="398.08" r="19.704" enable-background="accumulate" stroke="#000" stroke-linejoin="bevel"/>
+ <circle transform="translate(450.56 72.581)" cx="310.71" cy="398.08" r="19.704" enable-background="accumulate" fill="url(#m)" filter="url(#iw)" stroke="url(#l)" stroke-width="20.8"/>
+ <circle transform="translate(450.56 72.581)" cx="310.71" cy="398.08" r="19.704" enable-background="accumulate" fill="url(#dy)"/>
+ <ellipse transform="rotate(-4.471 1823.1 -5529.2)" cx="429.57" cy="377.43" rx="72.08" ry="44.548" enable-background="accumulate" fill="url(#dx)" filter="url(#iv)"/>
+ <ellipse transform="matrix(1.4358 -.07 .07 1.4358 235.18 -63.865)" cx="437.7" cy="391.22" rx="36.612" ry="22.627" enable-background="accumulate" fill="url(#dw)" filter="url(#iu)"/>
+ <g transform="translate(450.03 73.844)" enable-background="new" filter="url(#it)">
+ <circle cx="413.66" cy="401.83" r="3.214" enable-background="accumulate" stroke="url(#dv)"/>
+ <circle transform="translate(13.125 8.125)" cx="413.66" cy="401.83" r="3.214" enable-background="accumulate" stroke="url(#du)"/>
+ <circle transform="translate(32.946 7.5)" cx="413.66" cy="401.83" r="3.214" enable-background="accumulate" stroke="url(#ej)"/>
+ <circle transform="translate(24.911 -10.268)" cx="413.66" cy="401.83" r="3.214" enable-background="accumulate" stroke="url(#ei)"/>
+ <circle transform="translate(47.589 -.625)" cx="413.66" cy="401.83" r="3.214" enable-background="accumulate" stroke="url(#eh)"/>
+ </g>
+ </g>
+ <g fill="none" stroke="#000">
+ <path d="M944.771 678.46c.985 4.35 4.537 6.18 7.387 7.892 4.46 2.513 6.52 1.522 9.154-.758 1.602-1.921 10.683-4.698 15.594-7.07 4.33-1.46 8.904-5.36 13.385-8.335 3.395-1.627 5.347.355 7.829 1.01 2.944.717 4.411 2.172 6.06 3.536 2.397 1.175-.927 3.143 3.284 4.293 1.19.218 2.417.577 3.283-.505" enable-background="new"/>
+ <path d="M959.421 670.88c2.315-.032 3.178.643 5.493-.82 3.455-3.082 5.402-3.146 7.955-4.42 3.026-1.315 6.535 8.152 10.102 9.849 2.395-.822 1.289 1.794 1.452 2.651.057 2.647 2.807 3.679 4.356 5.43 3.316 2.256 7.375 6.296 11.112 5.303 6.445-2.93 10.28-1.281 16.29-7.386.703-1.182-.585-6.895 3.093-7.198 2.524.254 4.166.05 6.06.569 5.442 2.117 7.738 6.45 14.71 7.955 6.184.966 7.613 3.794 13.89 5.05M925.551 679.05c2.399-.794 6.106 4.192 8.173 7.046.593 2.68 1.154 5.486.758 12.122.785 2.417 2.68 3.03 4.798 3.283 3.117-.537 5.877-1.325 7.324-3.03 1.871-1.943 5.312 2.393 8.08 4.04 3.61 1.912 7.775 1.979 11.87 2.273 1.703-.231 2.37 4.515 3.283 8.08.384 4.379-.886 6.896-1.768 9.85-.294 2.496 2.988 3.53 6.313 4.545 3.183.742 6.545 1.662 9.092 1.768 5.142.875 8.088 2.69 12.122 4.04 2.239.817 3.26 2.243 4.545 3.536" enable-background="new"/>
+ </g>
+ <g fill-rule="evenodd">
+ <path transform="translate(324.57 331.53)" d="m332 187.7s57.5-25.5 57.5-28 5.5-52 5.5-52 91-48.5 91.5-50.5 86-62 86-62l-186 22-75.5 106z" clip-path="url(#jz)" enable-background="new" fill="#fff" filter="url(#jy)" opacity=".25"/>
+ <path d="m1745.9 918.08s-115.97 73.539-123.04 77.782c-7.071 4.243-230.52 137.18-230.52 137.18l4.243 39.598 216.37-100.41 117.38-101.82 15.556-52.326z" enable-background="accumulate" fill="#fff" opacity=".25"/>
+ <path transform="translate(324.57 331.53)" d="m528.92 556.85c-5.657-1.414-181.02 74.953-181.02 74.953l-33.941 181.02 51.095 193.95 257.2 67.681s206.48 152.74 212.13 148.49c5.657-4.243 168.29-193.75 168.29-193.75l-159.81-183.85-46.669-178.19-267.29-110.31z" clip-path="url(#jx)" enable-background="accumulate" filter="url(#eg)" opacity=".5"/>
+ <path d="m1146.2 809.42s22.62-6.507 35.743-5.873 30.642 1.939 43.709 12.186c13.067 10.248 25.068 27.14 34.112 58.37 9.045 31.23 1.698 99.252-6.176 143.35-7.874 44.095-28.265 106.11-45 140-16.735 33.887-49.798 77.495-60.57 89.876-11.363 13.062-56.205 36.426-79.43 42.267 5.303-10.607 48.9-50.589 35-60.714-14.02-10.212-45.76 45.982-84.293 29.033 21.382-13.132 41.779-51.186 34.04-66.594-7.84-15.61-30.704 48.758-93.535 37.013 30.052-27.527 55.407-70.904 41.263-82.98-14.415-12.307-60.462 54.293-60.462 54.293s-2.822-41.7 13.773-68.607c16.639-26.978 79.653-81.615 99.553-111.7 19.9-30.088 33.613-66.009 42.136-92.518s15.8-77.1 15.8-77.1" enable-background="new" fill="#0c0c0c"/>
+ <path transform="translate(324.57 331.53)" d="m770.75 609.18l-50.912 97.581-79.903 111.02 34.648 71.418 42.426 79.196 72.125-45.255 14.142-192.33 21.213-138.59-14.142-90.156-39.598 107.13z" clip-path="url(#jw)" enable-background="accumulate" fill="#fff" filter="url(#jv)" opacity=".25"/>
+ <path transform="translate(324.57 331.53)" d="m295 846.2l6.645-68.923s90.32 89.005 162.36 122.92 308 62 308 62l154-26-36 162-286 26-298-89-11-189z" clip-path="url(#ju)" enable-background="accumulate" filter="url(#eg)"/>
+ <path transform="translate(498.6 269.37)" d="m405.8 845.99l74.953 65.054 2.5 16.88 19.403 10.159 6.492 23.051 31.709-8.371 14.849 48.083c12.257 12.728 89.793-113.11 55.86 38.184l-60.81 16.264-89.203-94.693-62.825-53.8 7.07-60.811z" clip-path="url(#jt)" enable-background="new" fill="#fff" filter="url(#o)"/>
+ <path d="m1207.9 1113.9c54.286-1.429 126.04-15.052 170-26.786 44.053-11.757 125.89-36.347 175.36-57.857 49.339-21.453 113.6-59.282 154.29-92.143 40.508-32.721 52.39-55.82 60.714-33.571 8.37 22.368-16.407 56.326-37.857 81.071-21.604 24.923-52.731 52.705-98.929 89.286s-156.08 101.58-212.86 128.57c-57.066 27.125-128.2 58.238-172.14 72.5s-131.43 31.071-131.43 31.071l92.857-192.14z" enable-background="accumulate" fill="#121212"/>
+ <path transform="translate(498.6 269.37)" d="m1241.6 652.95s-64.722 54.337-145.66 98.995c-82.024 45.255-284.26 93.338-284.26 93.338s-15.101 21.052 45.255 28.284 224.08-53.301 278.6-96.167 120.21-111.72 120.21-111.72l-14.142-12.728z" clip-path="url(#js)" enable-background="accumulate" fill="url(#jr)" filter="url(#jq)" opacity=".5"/>
+ </g>
+ <g transform="translate(498.6 269.37)" clip-path="url(#jp)" enable-background="new">
+ <g filter="url(#em)">
+ <g transform="translate(-174.03 62.156)" filter="url(#i)">
+ <path d="m1268.3 663.77s-0.296 26.161 4.643 37.857 20.038 26.487 28.572 31.429 18.929 8.571 18.929 8.571l117.86-115 17.857-75.714-96.43 38.571-91.428 74.286z" enable-background="new" fill="#fff" fill-rule="evenodd"/>
+ <path d="M1197.8 486.14h333.75v309.71H1197.8z" enable-background="accumulate" fill="none"/>
+ </g>
+ </g>
+ <g transform="translate(-174.03 62.156)" enable-background="new" filter="url(#i)" opacity=".18">
+ <path d="m1268.3 663.77s-0.296 26.161 4.643 37.857 20.038 26.487 28.572 31.429 18.929 8.571 18.929 8.571l117.86-115 17.857-75.714-96.43 38.571-91.428 74.286z" enable-background="new" fill="#fff" fill-rule="evenodd"/>
+ <path d="M1197.8 486.14h333.75v309.71H1197.8z" enable-background="accumulate" fill="none"/>
+ </g>
+ </g>
+ <path transform="translate(498.6 269.37)" d="M1264.2 605c-4.491.733-8.157 3.455-11.938 6.406-10.081 7.87-28.17 34.425-48.031 50.47-39.867 32.202-104 69.976-152.56 91.093-48.614 21.137-130.54 45.818-174.31 57.5-43.398 11.582-115.04 25.131-168.25 26.531l-4.562.125-2 4.125-92.844 192.12-6.5 13.47 14.656-2.845s87.27-16.65 132.34-31.28c44.725-14.518 115.79-45.668 173.03-72.876 57.603-27.38 166.94-91.98 214.28-129.47 46.36-36.71 77.805-64.717 99.938-90.25 10.9-12.576 22.745-27.53 31.03-42.75 8.287-15.219 19.16-44.218 13.688-58.844-1.218-3.254-2.552-6.06-4.594-8.5s-8.475-1.572-8.563-5.03c-.21-8.266-3.315-.245-4.812 0zm2.156 15.219c.415.586 1.031 1.558 1.782 3.563 2.896 7.742-1.441 31.899-8.813 45.438s-22.638 28.924-33.188 41.094c-21.075 24.314-51.904 51.862-97.938 88.312-45.05 35.672-155.46 101.09-211.41 127.69-56.892 27.042-128.1 58.118-171.25 72.125-36.365 11.803-95.845 23.834-115.72
+27.78l84.281-174.47c54.707-2.049 123.79-15.215 167.12-26.78 44.334-11.832 126.08-36.336 176.41-58.22 50.112-21.788 112.53-61.167 154.03-94.687 20.646-16.677 41.745-42.546 49.813-48.844 2.437-1.903 4.08-2.636 4.875-3z" clip-path="url(#jo)" enable-background="accumulate" fill="#050505" fill-rule="evenodd" filter="url(#jn)" opacity=".833"/>
+ <g transform="rotate(6.561 -6814.9 734.74)" enable-background="new" fill-rule="evenodd" mask="url(#jm)">
+ <path d="M1111.48-285.971l-3.937 1.875c-.041.01-.1.02-.125.031-.42.213-.165.1-.657.312-.486.21-1.737.585-4.093 1.47-3.332 1.25-5.805 2.15-7 3.062-1.537.021-3.72.233-5.657.719a227.677 227.677 0 0 1-6.75 1.593c-1.894.42-1.675.642-2.875.875-1.296.252-1.721-.009-5.437.782-3.49.742-8.895 1.93-10.156 2.687-1.584-.18-3.868-.322-5.844-.031-3.04.447-4.916.673-6.844.906-.655.08-1.04.2-1.343.281-.427.132-.686.26-1.375.344-1.312.16-1.763-.157-5.532.281-3.554.413-9.005 1.273-10.25 1.938-1.599-.297-3.857-.534-5.843-.344-3.06.293-4.972.484-6.907.656-1.934.173-1.688.423-2.906.532-1.316.117-1.76-.164-5.531.25-3.542.388-9.008 1.209-10.281 1.875-1.6-.295-3.887-.507-5.875-.313-3.058.3-4.941.48-6.875.656-.658.06-1.04.179-1.344.25-.428.12-.683.218-1.375.282-1.316.12-1.76-.195-5.531.218-3.556.39-9.006 1.24-10.25
+1.907-1.599-.295-3.86-.524-5.844-.313-3.056.325-4.974.526-6.906.719s-1.69.44-2.906.562c-1.315.132-1.763-.164-5.532.282-3.538.418-8.977 1.292-10.25 1.968-1.597-.28-3.86-.42-5.843-.187-3.052.358-4.945.568-6.875.781-.657.073-1.041.173-1.344.25-.427.127-.685.267-1.375.344-1.314.146-1.768-.174-5.531.312-3.55.46-8.979 1.42-10.22 2.125-1.592-.244-3.833-.381-5.812-.125-3.047.395-4.95.649-6.875.907-1.924.257-1.726.493-2.937.656-1.31.176-1.748-.105-5.5.469-3.525.538-8.924 1.699-10.188 2.437-1.588-.203-3.846-.255-5.813.094-3.026.536-4.899.861-6.812 1.187-.65.111-1.014.271-1.313.375-.42.165-.663.332-1.344.469-1.294.262-1.727-.006-5.437.813-3.499.771-8.846 2.382-10.062 3.218-1.563-.077-3.758.086-5.688.594-2.972.783-4.817 1.232-6.687 1.75s-1.667.767-2.844 1.094c-1.272.353-1.697.107-5.344 1.187-3.424 1.015-8.65 2.934-9.875 3.844-1.539.013-3.72.272-5.625.875-2.93.928-4.75 1.459-6.594
+2.063-.626.205-.991.392-1.28.53-.408.215-.654.41-1.313.626-1.255.411-1.686.189-5.281 1.437-3.39 1.178-8.595 3.214-9.782 4.157-1.524.06-3.65.395-5.53 1.062-2.898 1.028-4.7 1.676-6.532 2.313-1.832.637-1.628.848-2.781 1.25-1.247.434-1.664.2-5.22 1.562-3.338 1.28-8.486 3.483-9.687 4.469-1.507.108-3.635.499-5.5 1.219a1047.26 1047.26 0 0 1-6.437 2.469c-.617.233-.997.442-1.281.593v.031l-8 3.188-12.476 3.492 7.93 19.278c-.592 1.973 12.545-4.739 12.545-4.739.227-.144.45-.272.72-.375 1.08-.41 2.17-.215 6-1.687 3.828-1.472 5.223-2.005 5.905-2.406.68-.4 1.612-.88 2.22-1.531 1.826-.138 3.57-.494 4.937-1 2.968-1.1 4.875-1.807 6.78-2.47 1.907-.662 2.355-1.414 3.407-1.78 1.092-.38 2.195-.166 6.063-1.532 3.867-1.366 5.283-1.827 5.968-2.218.702-.4 1.701-.933 2.313-1.594 1.97-.055 3.817-.385 5.281-.875 3.002-1.005 4.926-1.622 6.844-2.25 1.538-.504 2.174-1.047 2.906-1.438.23-.134.476-.253.75-.343 1.098-.36
+2.181-.082 6.094-1.313 3.912-1.231 5.366-1.673 6.062-2.031.694-.357 1.63-.793 2.25-1.406 1.866-.023 3.636-.267 5.032-.688 3.03-.913 4.992-1.43 6.937-1.969 1.945-.538 2.426-1.264 3.5-1.562 1.114-.31 2.22.007 6.188-1.031 3.967-1.039 5.417-1.433 6.125-1.75.734-.33 1.813-.754 2.437-1.375 1.998.116 3.857-.02 5.344-.375 3.078-.735 5.083-1.101 7.062-1.5 1.588-.32 2.245-.79 3-1.094a3.4 3.4 0 0 1 .75-.25c1.134-.23 2.305.209 6.344-.5 4.04-.71 5.5-.927 6.219-1.188.716-.26 1.704-.567 2.344-1.093 1.924.239 3.748.224 5.187 0 3.127-.488 5.155-.701 7.156-.97 2.002-.267 2.49-.944 3.594-1.093 1.147-.154 2.276.302 6.344-.219 4.068-.52 5.56-.695 6.281-.937.737-.247 1.798-.586 2.438-1.125 2.05.335 3.973.398 5.5.218 3.142-.368 5.18-.559 7.187-.78 1.611-.179 2.265-.609 3.031-.845.241-.085.495-.155.782-.187 1.15-.128 2.301.347 6.375-.125s5.559-.61 6.28-.844c.72-.232 1.701-.473 2.345-.969 1.936.334 3.77.405
+5.219.25 3.146-.334 5.177-.518 7.187-.718 2.01-.2 2.484-.827 3.594-.938 1.15-.115 2.296.365 6.375-.062s5.589-.562 6.312-.782c.74-.223 1.796-.513 2.438-1.03 2.057.398 4.002.493 5.531.343 3.149-.308 5.176-.473 7.188-.656 1.614-.147 2.263-.56 3.03-.781.242-.081.494-.13.782-.157 1.152-.105 2.293.393 6.375 0s5.589-.53 6.312-.75c.721-.218 1.7-.447 2.344-.937 1.938.35 3.769.454 5.219.312 3.149-.308 5.176-.473 7.187-.656 2.012-.183 2.515-.838 3.625-.937 1.153-.104 2.293.384 6.375 0 4.083-.385 5.59-.501 6.313-.72.74-.222 1.796-.514 2.437-1.03 2.058.401 4.003.503 5.532.343 3.146-.328 5.177-.522 7.187-.718 1.613-.158 2.266-.632 3.031-.875.241-.088.464-.122.75-.157 1.149-.14 2.317.34 6.375-.25 4.059-.59 5.562-.777 6.282-1.03.716-.254 1.674-.559 2.312-1.095 1.92.212 3.72.152 5.156-.093 3.12-.533 5.112-.929 7.094-1.313 1.982-.384 2.474-1.04 3.563-1.281 1.128-.25 2.27.116 6.25-.875s5.43-1.42
+6.125-1.781c.722-.376 1.761-.87 2.375-1.531 1.963-.012 3.793-.292 5.218-.844 2.952-1.145 4.874-1.87 6.688-2.75 1.456-.707 2.32-1.702 2.531-2 .212-.298.1-.729.125-.75.043-.035.34-.094.5-.438.86-1.847 2.323-5.627 2.438-6.312.113-.682.168-1.353.218-1.75.03-.23-.147-.88-.125-.938.031-.082.289-.25.344-.5.266-1.198.09-2.207-.125-3.625-.214-1.417-.972-4.614-1.625-5.469-.659-.861-1.225-1.01-1.75-1z" enable-background="new" fill="#bcb786"/>
+ <g clip-path="url(#jl)">
+ <path d="M1107.4-284.05c-.419.213-.156.094-.647.306-.486.21-1.724.574-4.08 1.459-3.33 1.25-5.83 2.153-7.026 3.066-1.536.021-3.72.233-5.656.719a227.709 227.709 0 0 1-6.75 1.593c-1.895.42-1.676.643-2.875.875-1.297.252-1.721-.009-5.438.782-3.49.742-8.894 1.93-10.156 2.687-1.583-.18-3.867-.322-5.843-.031-3.04.447-4.917.673-6.844.906-.655.08-1.041.201-1.344.282-.426.131-.686.26-1.375.343-1.311.16-1.762-.157-5.531.282-3.554.413-9.005 1.272-10.25 1.937-1.599-.297-3.858-.534-5.844-.344-3.059.294-4.972.484-6.906.657-1.934.172-1.689.422-2.906.53-1.317.118-1.76-.163-5.532.25-3.541.39-9.007 1.21-10.28 1.876-1.6-.295-3.888-.507-5.876-.313-3.058.3-4.94.48-6.875.657-.657.06-1.04.178-1.343.25-.428.118-.684.218-1.375.28-1.316.121-1.76-.194-5.532.22-3.556.39-9.005 1.239-10.25
+1.906-1.598-.294-3.86-.524-5.843-.313-3.056.326-4.974.526-6.907.719-1.932.192-1.69.44-2.906.562-1.315.132-1.763-.164-5.53.282-3.54.418-8.979 1.292-10.25 1.969-1.599-.282-3.86-.42-5.845-.188-3.052.358-4.945.568-6.875.781-.656.073-1.04.173-1.344.25-.426.127-.684.267-1.375.344-1.313.146-1.767-.174-5.53.313-3.55.458-8.98 1.419-10.22 2.125-1.593-.245-3.834-.382-5.812-.125-3.048.394-4.95.648-6.875.906-1.925.258-1.726.493-2.938.656-1.31.176-1.747-.104-5.5.469-3.524.538-8.923 1.699-10.188 2.437-1.587-.203-3.845-.254-5.812.094-3.026.536-4.9.862-6.813 1.187-.65.111-1.013.271-1.312.375-.42.165-.664.332-1.344.47-1.295.26-1.727-.007-5.438.812-3.498.772-8.846 2.383-10.062 3.219-1.562-.078-3.757.085-5.687.593-2.972.783-4.818 1.232-6.688 1.75s-1.666.768-2.843 1.094c-1.273.353-1.697.107-5.344 1.188-3.425 1.014-8.65 2.933-9.875 3.843-1.539.013-3.72.273-5.625.875-2.931.928-4.75 1.459-6.594
+2.063-.627.205-.992.392-1.281.531-.408.214-.653.409-1.313.625-1.254.412-1.686.19-5.28 1.438-3.39 1.177-8.596 3.213-9.782 4.156-1.524.06-3.65.395-5.531 1.062-2.898 1.029-4.7 1.676-6.531 2.313-1.833.637-1.628.848-2.782 1.25-1.246.434-1.663.2-5.218 1.562-3.34 1.28-8.488 3.483-9.688 4.47-1.507.107-3.636.498-5.5 1.218a1044.752 1044.752 0 0 1-6.437 2.469c-.617.233-.997.442-1.282.593v1.094c.112-.222.386-.817.907-1.094.698-.37 4.813-1.993 6.812-2.718 1.657-.602 4.154-1.329 5.969-1.313.302.003.588.051.844.094 1.842.308 7.468 1.562 7.468 1.562s-6.233-1.646-7.03-1.843c-.191-.048-.536-.07-.97-.063 1.146-.87 4.762-2.393 7.344-3.437 2.839-1.148 3.117-1.252 5.063-1.657 2.008-.417 3.156-.5 3.156-.5s-.082-.6.969-1.125c.705-.351 4.887-1.892 6.906-2.562 1.952-.648 5.057-1.359 6.875-1 1.863.367 7.531 1.812 7.531 1.812s-6.287-1.87-7.094-2.093c-.193-.054-.53-.086-.968-.094 1.158-.833 4.794-2.195 7.406-3.156
+2.87-1.056 3.167-1.162 5.125-1.532 1.853-.35 2.859-.425 3.031-.437.114-.217.377-.81.906-1.063.71-.338 4.926-1.712 6.97-2.312 1.692-.497 4.24-1.037 6.093-.906.308.021.613.097.875.156 1.881.424 7.594 2.031 7.594 2.031s-6.342-2.065-7.157-2.312c-.194-.06-.557-.104-1-.125 1.17-.798 4.863-2.057 7.5-2.938 2.898-.968 3.233-1.003 5.22-1.281 2.049-.287 3.187-.313 3.187-.313s-.073-.607 1-1.062c.72-.306 4.99-1.5 7.062-2 2.003-.483 5.199-.928 7.063-.406 1.91.535 7.719 2.5 7.719 2.5s-6.423-2.424-7.25-2.72c-.198-.07-.583-.14-1.032-.187 1.188-.728 4.916-1.774 7.594-2.5 2.944-.797 3.292-.77 5.313-.906 1.913-.128 2.947-.07 3.125-.062.117-.204.391-.78.937-.97.732-.253 5.079-1.047 7.188-1.374 1.748-.271 4.4-.485 6.312-.094.318.065.605.186.875.281 1.94.69 7.844 3.094 7.844 3.094s-6.535-2.95-7.375-3.312c-.201-.087-.575-.167-1.031-.25 1.206-.633 5.03-1.396 7.75-1.906 2.99-.562 3.3-.53 5.344-.532 2.109-.002
+3.312.125 3.312.125s-.073-.63 1.031-.937c.74-.206 5.126-.834 7.25-1.063 2.053-.22 5.319-.252 7.22.47 1.947.738 7.843 3.374 7.843 3.374s-6.563-3.179-7.406-3.562c-.202-.092-.543-.187-1-.282 1.21-.602 4.984-1.248 7.718-1.656 3.005-.448 3.326-.452 5.375-.406 1.94.043 3.007.194 3.188.219.119-.194.384-.766.937-.907.743-.188 5.155-.734 7.282-.937 1.763-.169 4.42-.234 6.343.25.32.08.604.203.875.312 1.953.784 7.907 3.47 7.907 3.47s-6.592-3.254-7.438-3.657c-.202-.096-.572-.207-1.031-.313 1.214-.574 5.044-1.122 7.781-1.5 3.009-.415 3.323-.442 5.375-.375 2.118.07 3.313.25 3.313.25s-.078-.637 1.03-.906c.745-.18 5.153-.663 7.282-.844 2.059-.174 5.343-.124 7.25.657 1.955.8 7.875 3.53 7.875 3.53s-6.56-3.308-7.406-3.718c-.202-.098-.572-.203-1.031-.312 1.215-.564 5.01-1.115 7.75-1.47 3.01-.389 3.321-.397 5.375-.312 1.944.08 3.006.254 3.187.282.12-.191.383-.746.938-.875.744-.174 5.15-.65 7.28-.813
+1.767-.134 4.45-.126 6.376.375.32.083.603.201.875.313 1.954.8 7.906 3.562 7.906 3.562s-6.591-3.34-7.437-3.75c-.203-.098-.572-.203-1.032-.312 1.215-.564 5.042-1.084 7.782-1.438 3.01-.39 3.352-.429 5.406-.344 2.12.088 3.312.313 3.312.313s-.078-.65 1.032-.906c.744-.173 5.15-.624 7.28-.782 2.061-.152 5.344-.096 7.25.688 1.956.804 7.876 3.5 7.876 3.5s-6.56-3.276-7.406-3.688c-.203-.098-.572-.202-1.032-.312 1.216-.562 5.012-1.128 7.75-1.5 3.01-.41 3.323-.416 5.375-.344 1.943.068 3.008.165 3.188.188.119-.195.384-.73.937-.875.742-.197 5.131-.83 7.25-1.094 1.757-.22 4.406-.333 6.313.031.317.06.606.19.875.281 1.936.661 7.844 2.938 7.844 2.938s-6.537-2.807-7.375-3.156c-.2-.084-.577-.174-1.032-.25 1.204-.651 5.02-1.372 7.72-2 2.966-.69 3.288-.756 5.312-.875 2.088-.124 3.28-.032 3.28-.032s-.086-.632 1-1.03c.73-.269 5.048-1.339 7.126-1.813 2.008-.46 5.168-1.03 7-.625 1.878.414 13.578 3.015 13.578
+3.015s-12.328-3.022-13.141-3.265c-.195-.058-.559-.107-1-.125 1.167-.804 3.514-1.688 6.11-2.703 1.68-.659.923-.377 2.775-1.004 1.754-.594 2.486-1.01 2.63-1.113.347-.207-.355-.122-.544-.042z" enable-background="new" filter="url(#jk)"/>
+ <path d="m1082.6-275.12c1.873 0.393 4.496 1.146 6.031 1.969s2.822 1.056 5.375 2.5c2.527 1.43 4.796 2.007 6.969 2.531 2.348 0.566 5.435 0.715 8.844 1.188-1.09-0.84-6.608-1.173-8.406-1.563-1.8-0.39-3.895-1.016-6.594-2.313-2.7-1.296-3.495-1.799-5.813-2.687-2.318-0.889-4.004-1.383-6.406-1.625z" enable-background="new" filter="url(#jj)"/>
+ <path d="M1051.5-270c1.905.578 4.528 1.616 6.094 2.594 1.565.978 2.88 1.36 5.5 3.125 2.593 1.747 4.986 2.71 7.25 3.594 2.446.955 5.682 1.657 9.406 3.062-1.19-1.138-7.063-2.687-8.938-3.375-1.874-.688-4.081-1.566-6.874-3.281-2.794-1.715-3.574-2.284-5.938-3.406-2.364-1.123-4.057-1.835-6.5-2.313z" enable-background="new" filter="url(#ji)"/>
+ <path d="m1020.2-266.84c1.912 0.638 4.581 1.755 6.156 2.813 1.575 1.057 2.896 1.508 5.531 3.406 2.61 1.878 5.029 3.03 7.313 4.062 2.468 1.116 5.764 2.174 9.531 3.844-1.203-1.222-7.203-3.314-9.094-4.125-1.89-0.81-4.064-1.894-6.874-3.75s-3.622-2.477-6-3.719c-2.379-1.242-4.111-1.975-6.563-2.531z" enable-background="new" filter="url(#jh)"/>
+ <path d="M1110.2-266.89c.15.049.688.631.11 1.484-.81 1.195-5.705 3.325-8.563 4.125-2.845.798-6.29.978-10.562-.375-4.302-1.362-5.47-2.468-10.656-4.312 4.664 2.115 6.195 3.952 10.125 5.344 1.62.574 3.367.94 5.062 1.03-.445.327-1.53.984-3.562 1.595-2.796.84-6.65 1.534-8.25 1.625-1.515.086-3.142-.513-3.438-.625.167.103.374.377-.25 1.03-.899.945-6.147 1.924-9.125 2.25-2.964.326-6.521-.015-10.906-1.905-3.978-1.715-5.339-2.916-9.406-4.75v.156c3.643 2.095 5.284 3.883 8.875 5.562 1.73.81 3.592 1.41 5.406 1.72-.534.286-1.557.71-3.437 1.03-2.87.488-6.81.817-8.438.75-.85-.034-1.728-.184-2.406-.406-.685-.215-1.19-.444-1.312-.5.169.107.43.403-.22 1.031-.909.88-6.245 1.337-9.25 1.47-2.99.131-6.588-.451-11-2.563-4.44-2.127-5.64-3.402-10.905-5.782 4.734 2.597 6.286 4.63 10.344 6.72 1.673.861 3.485 1.493 5.25 1.937-.463.233-1.59.688-3.688.937-2.886.343-6.834.493-8.468.375-1.547-.111-3.232-.857-3.532-1
+.17.12.414.41-.218 1-.913.851-6.244 1.262-9.25 1.375-2.993.113-6.59-.49-11-2.594-4.002-1.908-5.388-3.137-9.47-5.093v.156c3.656 2.204 5.295 4.053 8.907 5.906 1.74.893 3.637 1.528 5.469 1.969-.54.248-1.578.615-3.469.844-2.886.348-6.866.52-8.5.406a9.446 9.446 0 0 1-2.406-.5 12.532 12.532 0 0 1-1.313-.531c.17.112.465.422-.187 1.03-.913.853-6.275 1.294-9.281 1.407-2.993.112-6.594-.528-11-2.594-4.437-2.08-5.647-3.331-10.906-5.656 4.729 2.548 6.29 4.578 10.344 6.625 1.671.844 3.485 1.467 5.25 1.906-.464.235-1.59.684-3.688.938-2.886.348-6.836.57-8.469.469-1.544-.096-3.2-.83-3.5-.97.17.12.382.405-.25 1-.912.861-6.246 1.331-9.25 1.47-2.99.138-6.567-.451-10.969-2.47-3.993-1.83-5.365-3.028-9.437-4.905v.156c3.647 2.133 5.27 3.935 8.875 5.719 1.737.86 3.607 1.45 5.437 1.875-.54.253-1.55.64-3.437.906-2.88.404-6.838.646-8.469.562a9.36 9.36 0 0 1-2.406-.437 12.971 12.971 0 0
+1-1.313-.5c.17.109.432.41-.218 1.031-.911.87-6.25 1.392-9.25 1.563-2.987.17-6.574-.316-10.97-2.282-4.424-1.978-5.605-3.228-10.843-5.375 4.71 2.388 6.27 4.39 10.312 6.344a23.73 23.73 0 0 0 5.218 1.781c-.461.25-1.597.713-3.687 1.032-2.876.438-6.78.733-8.406.687-1.539-.043-3.233-.745-3.532-.875.169.113.411.414-.218 1.031-.908.891-6.203 1.529-9.188 1.813-2.971.283-6.573-.176-10.938-1.938-3.96-1.598-5.329-2.795-9.344-4.312v.156c3.596 1.811 5.239 3.582 8.813 5.156 1.722.759 3.587 1.29 5.406 1.625-.536.28-1.566.688-3.437 1.063-2.856.572-6.79 1.02-8.407 1.031-.844.006-1.706-.08-2.375-.25-.676-.162-1.16-.33-1.28-.375.166.094.422.383-.22 1.062-.897.951-6.186 1.918-9.125 2.438-2.925.518-6.432.374-10.719-1.031-4.315-1.415-5.472-2.53-10.562-3.969 4.577 1.751 6.09 3.56 10.031 5 1.627.594 3.37.956 5.094 1.156-.453.297-1.555.884-3.594 1.469-2.804.805-6.638 1.576-8.218
+1.75-1.495.165-3.117-.317-3.407-.406.164.09.393.36-.218 1.062-.883 1.014-6.045 2.372-8.938 3.063-2.88.687-6.335.76-10.562-.438-3.835-1.086-5.172-2.072-9.062-3.125v.156c3.484 1.395 5.07 2.92 8.53 4.032 1.669.535 3.457.786 5.22.875-.52.352-1.5.914-3.313 1.53-2.765.942-6.59 1.936-8.156 2.157-.818.115-1.633.123-2.281.031-.655-.083-1.133-.218-1.25-.25.162.075.434.34-.188 1.094-.87 1.055-6.01 2.66-8.875 3.438-2.852.774-6.259.958-10.438-.094-4.206-1.06-5.356-2.042-10.344-3.156 4.485 1.46 5.97 3.135 9.813 4.25 1.585.46 3.287.638 4.969.687-.442.337-1.513 1.028-3.5 1.781-2.734 1.037-6.452 2.163-8 2.438-1.465.26-3.06-.117-3.344-.188.16.08.38.321-.219 1.063-.865 1.07-5.916 2.818-8.75 3.687-2.82.866-6.207 1.157-10.344.22-3.753-.852-5.048-1.717-8.875-2.595v.157c3.428 1.237 4.987 2.632 8.375 3.53 1.632.434 3.367.584 5.094.563-.51.384-1.477 1.022-3.25 1.75-2.706 1.112-6.436 2.308-7.969
+2.625-.8.166-1.612.219-2.25.157v1.406c.227-.145.449-.273.719-.375 1.08-.41 2.171-.216 6-1.688 3.828-1.471 5.224-2.005 5.906-2.406.68-.4 1.612-.88 2.219-1.531 1.827-.138 3.57-.493 4.937-1 2.968-1.1 4.876-1.806 6.782-2.469 1.905-.663 2.354-1.415 3.406-1.781 1.091-.38 2.195-.166 6.062-1.531 3.868-1.366 5.283-1.827 5.969-2.22.701-.4 1.7-.932 2.313-1.593 1.97-.055 3.816-.385 5.28-.875 3.002-1.005 4.927-1.622 6.845-2.25 1.538-.504 2.174-1.047 2.906-1.437.23-.135.475-.254.75-.344 1.098-.36 2.181-.082 6.094-1.313 3.912-1.23 5.366-1.673 6.062-2.03.694-.358 1.63-.794 2.25-1.407 1.865-.023 3.636-.267 5.031-.688 3.03-.913 4.993-1.43 6.938-1.968 1.945-.54 2.426-1.265 3.5-1.563 1.114-.31 2.22.007 6.187-1.031 3.968-1.039 5.418-1.433 6.125-1.75.735-.33 1.814-.754 2.438-1.375 1.997.116 3.857-.02 5.344-.375 3.078-.735 5.083-1.101 7.062-1.5 1.588-.32 2.244-.79 3-1.094.238-.107.467-.193.75-.25 1.134-.23
+2.305.209 6.344-.5s5.5-.927 6.219-1.187c.715-.26 1.704-.568 2.343-1.094 1.925.24 3.748.224 5.188 0 3.126-.488 5.155-.7 7.156-.969 2.002-.268 2.489-.945 3.594-1.094 1.146-.154 2.276.302 6.344-.219 4.068-.52 5.56-.695 6.28-.937.738-.247 1.799-.586 2.438-1.125 2.05.335 3.974.398 5.5.219 3.143-.37 5.18-.56 7.188-.782 1.61-.178 2.265-.608 3.031-.843a3.43 3.43 0 0 1 .781-.188c1.15-.128 2.302.347 6.375-.125s5.56-.61 6.282-.844c.719-.232 1.7-.473 2.343-.968 1.937.333 3.77.404 5.22.25 3.145-.335 5.177-.519 7.187-.719 2.01-.2 2.484-.826 3.593-.938 1.152-.115 2.297.366 6.375-.062s5.59-.562 6.313-.781c.74-.224 1.796-.514 2.437-1.031 2.058.398 4.002.493 5.532.343 3.148-.308 5.175-.473 7.187-.656 1.614-.147 2.263-.56 3.031-.781.242-.081.494-.13.782-.156 1.152-.106 2.293.392 6.375 0 4.082-.393 5.589-.531 6.312-.75.721-.219 1.7-.448 2.344-.938 1.938.35 3.769.454 5.219.313 3.148-.309 5.175-.474
+7.187-.657 2.012-.183 2.514-.838 3.625-.937 1.152-.103 2.292.385 6.375 0s5.589-.501 6.313-.719c.739-.222 1.795-.514 2.437-1.031 2.057.402 4.003.503 5.531.344 3.147-.329 5.178-.523 7.188-.72 1.613-.156 2.266-.63 3.031-.874.24-.088.463-.122.75-.156 1.148-.14 2.317.34 6.375-.25 4.058-.59 5.562-.778 6.281-1.032.717-.253 1.675-.558 2.313-1.093 1.92.211 3.72.151 5.156-.094 3.12-.533 5.112-.929 7.094-1.313 1.982-.384 2.474-1.04 3.562-1.28 1.13-.252 2.27.115 6.25-.876s5.43-1.42 6.125-1.781c.723-.376 1.762-.87 2.375-1.531 1.963-.012 3.794-.291 5.22-.844 2.95-1.145 4.872-1.87 6.687-2.75 1.455-.707 2.334-1.686 2.547-1.984.212-.298.111-.746.137-.767.043-.035.32-.085.48-.429.858-1.847 2.32-5.644
+2.435-6.329.113-.682.163-1.348.214-1.745.03-.23-.147-.865-.125-.924.031-.082.305-.265.36-.515.267-1.198.09-2.191-.125-3.609-.214-1.417-.983-4.622-1.637-5.476-.659-.862-1.223-1.011-1.748-1-.208.27.137.262.163.312.68.05.934.369 1.42.897s1.442 3.94 1.579 5.39.19 2.86-.088 3.468c-.278.609-.944.429-1.237.495.531.186.89.213.953 1.057.058.814-.134 1.64-.52 2.806-.391 1.18-1.845 4.35-2.286 4.599-.452.255-.952.182-1.288.05z" enable-background="new" filter="url(#jg)"/>
+ <path d="m988.75-263.84c1.912 0.634 4.55 1.758 6.125 2.813 1.575 1.054 2.896 1.482 5.531 3.375 2.609 1.873 5.027 3.015 7.313 4.062 2.47 1.132 5.752 2.155 9.531 3.938-1.207-1.259-7.139-3.365-9.031-4.188s-4.128-1.93-6.938-3.781-3.622-2.482-6-3.719c-2.377-1.237-4.08-1.95-6.53-2.5z" enable-background="new" filter="url(#jf)"/>
+ <path d="M957.5-260.78c1.91.618 4.583 1.71 6.156 2.75 1.574 1.04 2.896 1.482 5.531 3.375 2.609 1.873 5.027 3.015 7.313 4.063 2.47 1.131 5.752 2.154 9.531 3.937-1.207-1.258-7.201-3.396-9.094-4.219-1.892-.823-4.096-1.93-6.906-3.781-2.81-1.85-3.593-2.44-5.969-3.656s-4.113-1.939-6.562-2.469z" enable-background="new" filter="url(#ib)"/>
+ <path d="M926.09-257.38c1.908.597 4.553 1.664 6.125 2.688 1.571 1.023 2.87 1.44 5.5 3.28 2.603 1.823 5.029 2.973 7.313 4 2.467 1.111 5.755 2.094 9.53 3.845-1.205-1.249-7.171-3.319-9.062-4.125s-4.102-1.891-6.906-3.688c-2.804-1.796-3.627-2.402-6-3.594-2.373-1.191-4.054-1.903-6.5-2.406z" enable-background="new" filter="url(#ia)"/>
+ <path d="M894.91-253.56c1.902.554 4.587 1.589 6.156 2.594s2.874 1.408 5.5 3.219c2.6 1.791 5 2.871 7.281 3.875 2.465 1.083 5.76 2.04 9.532 3.75-1.205-1.236-7.175-3.245-9.063-4.032-1.888-.786-4.075-1.83-6.875-3.593s-3.6-2.369-5.969-3.532c-2.37-1.163-4.123-1.834-6.562-2.28z" enable-background="new" filter="url(#hz)"/>
+ <path d="M863.72-248.66c1.88.43 4.504 1.38 6.063 2.313 1.558.932 2.852 1.257 5.468 3 2.59 1.724 4.981 2.708 7.25 3.625 2.452.99 5.74 1.877 9.5 3.5-1.201-1.208-7.152-3.067-9.03-3.782-1.88-.715-4.086-1.684-6.876-3.375s-3.585-2.228-5.937-3.28-4.026-1.713-6.438-2z" enable-background="new" filter="url(#hy)"/>
+ <path d="m833.16-241.38c1.848 0.296 4.47 0.976 6 1.781s2.814 1.056 5.375 2.531c2.535 1.46 4.89 2.326 7.125 3.063 2.414 0.797 5.657 1.467 9.375 2.844-1.188-1.129-7.088-2.59-8.938-3.156-1.85-0.567-4.003-1.374-6.75-2.844-2.746-1.47-3.5-1.92-5.812-2.781-2.311-0.861-4.005-1.32-6.375-1.438z" enable-background="new" filter="url(#hx)"/>
+ <path d="m802.91-232.31c1.822 0.211 4.366 0.8 5.875 1.531 1.51 0.73 2.756 0.93 5.281 2.281 2.5 1.338 4.832 2.049 7.031 2.657 2.377 0.656 5.565 1.073 9.22 2.187-1.168-1.045-6.93-2.103-8.75-2.562-1.822-0.46-3.953-1.127-6.657-2.438s-3.471-1.72-5.75-2.469-3.913-1.179-6.25-1.187z" enable-background="new" filter="url(#hw)"/>
+ <path d="M773.19-222.19c1.811.179 4.32.665 5.813 1.344 1.491.678 2.753.798 5.25 2.062 2.47 1.252 4.79 1.896 6.968 2.438 2.354.585 5.492.897 9.094 1.844-1.15-.992-6.852-1.784-8.656-2.188s-3.916-1.021-6.594-2.25c-2.678-1.229-3.403-1.61-5.656-2.281-2.253-.67-3.896-1.002-6.219-.969z" enable-background="new" filter="url(#hv)"/>
+ <path d="M743.56-211.19c1.793.13 4.273.55 5.75 1.188s2.716.741 5.188 1.937c2.446 1.184 4.72 1.747 6.874 2.219 2.328.51 5.42.68 9 1.562-1.143-.97-6.747-1.59-8.53-1.937-1.784-.347-3.884-.888-6.532-2.031-2.648-1.144-3.395-1.517-5.625-2.125-2.23-.61-3.826-.91-6.125-.813z" enable-background="new" filter="url(#hu)"/>
+ <g fill="#fff" filter="url(#ht)">
+ <path d="M744.94-212.12s7.222-3.223 9.063-3.5 3.352-.003 6 .563c2.647.565 8.735 2.215 11.188 3.374s5.312 3.563 5.312 3.563-7.146-2.78-10.188-3.563-7.645-2.083-10.375-2.312-11 1.875-11 1.875z"/>
+ <path d="m735.47-206.95s3.66-2.223 5.5-2.5 3.665 0.247 6.313 0.813 8.735 2.215 11.188 3.375 6.562 2.125 6.562 2.125-8.396-1.343-11.438-2.125-7.957-2.334-10.688-2.563-7.438 0.875-7.438 0.875zm24.38-10.66s8.544-3.299 10.398-3.458c1.854-0.16 3.642 0.48 6.248 1.212s8.577 2.766 10.95 4.08 6.414 2.537 6.414 2.537-8.294-1.873-11.279-2.848c-2.985-0.974-7.792-2.834-10.503-3.236s-12.228 1.713-12.228 1.713zm15.35-5.62s7.771-2.782 9.628-2.904c1.857-0.12 3.631 0.555 6.222 1.341 2.59 0.787 8.519 2.942 10.864 4.304 2.346 1.362 6.36 2.67 6.36 2.67s-8.253-2.045-11.217-3.08c-2.965-1.035-7.733-2.995-10.434-3.452-2.702-0.458-11.422 1.121-11.422 1.121zm14.44-4.72s8.683-3.52 10.542-3.605 3.62 0.624 6.195 1.46c2.575 0.837 8.46 3.107 10.779 4.514 2.318 1.408 6.307 2.793 6.307 2.793s-8.212-2.204-11.156-3.297-7.673-3.144-10.365-3.654-12.3 1.789-12.3 1.789zm14.86-5.38s7.808-2.583 9.666-2.668c1.86-0.085 3.62
+0.625 6.195 1.461 2.575 0.837 8.46 3.107 10.78 4.514 2.318 1.407 6.307 2.792 6.307 2.792s-8.213-2.204-11.156-3.296-7.673-3.144-10.365-3.654-11.426 0.85-11.426 0.85zm15.06-4.25s8.558-2.583 10.417-2.668 3.62 0.625 6.195 1.461c2.575 0.837 8.46 3.107 10.779 4.514 2.318 1.407 6.307 2.792 6.307 2.792s-8.212-2.204-11.156-3.296-7.673-3.144-10.365-3.654-12.176 0.85-12.176 0.85zm16.67-5.02s6.967-1.987 8.828-1.968c1.86 0.02 3.579 0.827 6.102 1.807 2.524 0.98 8.272 3.578 10.508 5.113 2.236 1.536 6.14 3.143 6.14 3.143s-8.075-2.662-10.952-3.919c-2.878-1.256-7.484-3.57-10.143-4.231-2.66-0.66-10.482 0.055-10.482 0.055zm14.5-3.4s7.688-2.028 9.548-1.968 3.56 0.902 6.063 1.936c2.502 1.033 8.194 3.752 10.397 5.335 2.203 1.582 6.072 3.272 6.072 3.272s-8.017-2.833-10.868-4.15c-2.85-1.318-7.407-3.73-10.05-4.446s-11.162 0.021-11.162 0.021zm14.09-3.21s8.17-1.97 10.027-1.854c1.857 0.115 3.532 1.01 6.002
+2.118s8.077 3.997 10.23 5.645 5.972 3.454 5.972 3.454-7.928-3.074-10.738-4.476-7.291-3.95-9.913-4.746c-2.621-0.796-11.58-0.141-11.58-0.141zm16.56-2.39s8.085-1.908 9.938-1.737c1.853 0.172 3.5 1.117 5.935 2.3 2.436 1.182 7.952 4.24 10.055 5.953s5.864 3.633 5.864 3.633-7.832-3.312-10.597-4.8-7.168-4.169-9.764-5.044c-2.597-0.876-11.431-0.305-11.431-0.305zm15.2-2.75s7.642-1.428 9.495-1.265c1.854 0.162 3.505 1.1 5.946 2.27s7.973 4.203 10.084 5.905c2.112 1.703 5.881 3.605 5.881 3.605s-7.847-3.274-10.62-4.748c-2.772-1.474-7.187-4.135-9.788-4.998-2.6-0.863-10.998-0.77-10.998-0.77zm14.87-1.64s8.642-1.553 10.495-1.39c1.854 0.162 3.505 1.1 5.946 2.27s7.972 4.203 10.084 5.905c2.111 1.703 5.88 3.605 5.88 3.605s-7.846-3.274-10.62-4.748c-2.772-1.474-7.187-4.135-9.787-4.998-2.601-0.863-11.998-0.644-11.998-0.644zm16.25-2.31s7.642-0.865 9.495-0.703c1.854 0.163 3.505 1.1 5.946 2.27s7.973 4.203 10.084
+5.906c2.112 1.702 5.881 3.605 5.881 3.605s-7.847-3.275-10.62-4.749c-2.772-1.474-7.187-4.135-9.788-4.998-2.6-0.862-10.998-1.331-10.998-1.331zm15.13-1.19s8.58-1.49 10.433-1.328c1.854 0.163 3.505 1.1 5.946 2.27s7.972 4.203 10.084 5.906c2.111 1.702 5.88 3.605 5.88 3.605s-7.846-3.275-10.62-4.749c-2.772-1.474-7.187-4.135-9.787-4.998-2.601-0.862-11.935-0.706-11.935-0.706zm16.25-2.06s7.83-0.803 9.683-0.64c1.854 0.162 3.505 1.1 5.946 2.27s7.972 4.203 10.084 5.905c2.111 1.703 5.88 3.605 5.88 3.605s-7.846-3.274-10.62-4.748c-2.772-1.474-7.187-4.135-9.787-4.998-2.601-0.863-11.185-1.394-11.185-1.394zm15.37-1.25s8.392-1.178 10.245-1.015c1.854 0.162 3.505 1.1 5.946 2.27s7.972 4.203 10.084 5.905c2.111 1.703 5.88 3.605 5.88 3.605s-7.847-3.274-10.62-4.748c-2.772-1.474-7.187-4.135-9.788-4.998-2.6-0.863-11.748-1.02-11.748-1.02zm16.19-2.06s6.892-0.99 8.745-0.828c1.854 0.163 3.505 1.1 5.946 2.27s7.973 4.203
+10.084 5.906c2.112 1.702 5.881 3.605 5.881 3.605s-7.847-3.275-10.62-4.749-7.188-4.135-9.788-4.998c-2.6-0.862-10.248-1.206-10.248-1.206zm17.16-0.94s6.83-1.178 8.683-1.015c1.854 0.162 3.505 1.1 5.946 2.27 2.44 1.171 7.972 4.203 10.084 5.905 2.111 1.703 5.88 3.605 5.88 3.605s-7.847-3.274-10.62-4.748c-2.772-1.474-7.187-4.135-9.788-4.998-2.6-0.863-10.185-1.02-10.185-1.02zm16.1-2s6.08-0.428 7.933-0.265c1.854 0.162 3.505 1.1 5.946 2.27 2.44 1.171 7.972 4.203 10.084 5.905 2.111 1.703 5.88 3.605 5.88 3.605s-7.847-3.274-10.62-4.748c-2.772-1.474-7.187-4.135-9.788-4.998-2.6-0.863-9.435-1.77-9.435-1.77zm15.8-1.37s6.454-0.678 8.308-0.515c1.854 0.162 3.505 1.1 5.946 2.27 2.44 1.171 7.972 4.203 10.084 5.905 2.111 1.703 5.88 3.605 5.88 3.605s-7.847-3.274-10.62-4.748c-2.772-1.474-7.187-4.135-9.788-4.998-2.6-0.863-9.81-1.52-9.81-1.52zm15.6-1.86s5.498-0.91 7.358-0.853c1.86 0.056 3.562 0.896 6.066 1.925
+2.504 1.03 8.2 3.739 10.406 5.318 2.205 1.578 6.078 3.261 6.078 3.261s-8.022-2.819-10.875-4.131c-2.853-1.313-7.413-3.716-10.06-4.429-2.645-0.712-8.973-1.091-8.973-1.091zm17.4-2.46s4.547-1.156 6.408-1.186c1.86-0.03 3.6 0.73 6.149 1.642 2.55 0.912 8.365 3.354 10.64 4.829 2.277 1.474 6.224 2.976 6.224 2.976s-8.145-2.444-11.055-3.623c-2.91-1.178-7.578-3.368-10.253-3.957-2.676-0.588-8.113-0.68-8.113-0.68zm14.5-3.03s5.96-1.774 7.82-1.83c1.86-0.057 3.61 0.68 6.172 1.555 2.562 0.876 2.522 0.857 5.333 1.49 2.797 0.63 7.077 1.513 7.077 1.513s-3.616-0.016-6.792-0.466c-3.116-0.441-7.375-1.698-10.058-2.249-2.684-0.55-9.552-0.013-9.552-0.013z" enable-background="new"/>
+ <path d="M1099.2-279.93c.161.269 11.208-4.6 12.188-4.688.98-.087 2 3.125 2 3.125s-.775-1.504-2.875-1.062-11.301 2.671-11.312 2.625z"/>
+ </g>
+ <path d="M1107.5-284.09c-.419.213-.156.094-.647.306-.486.21-1.724.574-4.08 1.459-3.33 1.25-5.83 2.153-7.026 3.066-1.536.021-3.72.233-5.656.719a227.709 227.709 0 0 1-6.75 1.593c-1.895.42-1.676.643-2.875.875-1.297.252-1.721-.009-5.438.782-3.49.742-8.894 1.93-10.156 2.687-1.583-.18-3.867-.322-5.843-.031-3.04.447-4.917.673-6.844.906-.655.08-1.041.201-1.344.282-.426.131-.686.26-1.375.343-1.311.16-1.762-.157-5.531.282-3.554.413-9.005 1.272-10.25 1.937-1.599-.297-3.858-.534-5.844-.344-3.059.294-4.972.484-6.906.657-1.934.172-1.689.422-2.906.53-1.317.118-1.76-.163-5.532.25-3.541.39-9.007 1.21-10.28 1.876-1.6-.295-3.888-.507-5.876-.313-3.058.3-4.94.48-6.875.657-.657.06-1.04.178-1.343.25-.428.118-.684.218-1.375.28-1.316.121-1.76-.194-5.532.22-3.556.39-9.005 1.239-10.25
+1.906-1.598-.294-3.86-.524-5.843-.313-3.056.326-4.974.526-6.907.719-1.932.192-1.69.44-2.906.562-1.315.132-1.763-.164-5.53.282-3.54.418-8.979 1.292-10.25 1.969-1.599-.282-3.86-.42-5.845-.188-3.052.358-4.945.568-6.875.781-.656.073-1.04.173-1.344.25-.426.127-.684.267-1.375.344-1.313.146-1.767-.174-5.53.313-3.55.458-8.98 1.419-10.22 2.125-1.593-.245-3.834-.382-5.812-.125-3.048.394-4.95.648-6.875.906-1.925.258-1.726.493-2.938.656-1.31.176-1.747-.104-5.5.469-3.524.538-8.923 1.699-10.188 2.437-1.587-.203-3.845-.254-5.812.094-3.026.536-4.9.862-6.813 1.187-.65.111-1.013.271-1.312.375-.42.165-.664.332-1.344.47-1.295.26-1.727-.007-5.438.812-3.498.772-8.846 2.383-10.062 3.219-1.562-.078-3.757.085-5.687.593-2.972.783-4.818 1.232-6.688 1.75s-1.666.768-2.843 1.094c-1.273.353-1.697.107-5.344 1.188-3.425 1.014-8.65 2.933-9.875 3.843-1.539.013-3.72.273-5.625.875-2.931.928-4.75 1.459-6.594
+2.063-.627.205-.992.392-1.281.531-.408.214-.653.409-1.313.625-1.254.412-1.686.19-5.28 1.438-3.39 1.177-8.596 3.213-9.782 4.156-1.524.06-3.65.395-5.531 1.062-2.898 1.029-4.7 1.676-6.531 2.313-1.833.637-1.628.848-2.782 1.25-1.246.434-1.663.2-5.218 1.562-3.34 1.28-8.488 3.483-9.688 4.47-1.507.107-3.636.498-5.5 1.218a1044.752 1044.752 0 0 1-6.437 2.469c-.617.233-.997.442-1.282.593v1.094c.112-.222.386-.817.907-1.094.698-.37 4.813-1.993 6.812-2.718 1.657-.602 4.154-1.329 5.969-1.313.302.003.588.051.844.094 1.842.308 7.468 1.562 7.468 1.562s-6.233-1.646-7.03-1.843c-.191-.048-.536-.07-.97-.063 1.146-.87 4.762-2.393 7.344-3.437 2.839-1.148 3.117-1.252 5.063-1.657 2.008-.417 3.156-.5 3.156-.5s-.082-.6.969-1.125c.705-.351 4.887-1.892 6.906-2.562 1.952-.648 5.057-1.359 6.875-1 1.863.367 7.531 1.812 7.531 1.812s-6.287-1.87-7.094-2.093c-.193-.054-.53-.086-.968-.094 1.158-.833 4.794-2.195 7.406-3.156
+2.87-1.056 3.167-1.162 5.125-1.532 1.853-.35 2.859-.425 3.031-.437.114-.217.377-.81.906-1.063.71-.338 4.926-1.712 6.97-2.312 1.692-.497 4.24-1.037 6.093-.906.308.021.613.097.875.156 1.881.424 7.594 2.031 7.594 2.031s-6.342-2.065-7.157-2.312c-.194-.06-.557-.104-1-.125 1.17-.798 4.863-2.057 7.5-2.938 2.898-.968 3.233-1.003 5.22-1.281 2.049-.287 3.187-.313 3.187-.313s-.073-.607 1-1.062c.72-.306 4.99-1.5 7.062-2 2.003-.483 5.199-.928 7.063-.406 1.91.535 7.719 2.5 7.719 2.5s-6.423-2.424-7.25-2.72c-.198-.07-.583-.14-1.032-.187 1.188-.728 4.916-1.774 7.594-2.5 2.944-.797 3.292-.77 5.313-.906 1.913-.128 2.947-.07 3.125-.062.117-.204.391-.78.937-.97.732-.253 5.079-1.047 7.188-1.374 1.748-.271 4.4-.485 6.312-.094.318.065.605.186.875.281 1.94.69 7.844 3.094 7.844 3.094s-6.535-2.95-7.375-3.312c-.201-.087-.575-.167-1.031-.25 1.206-.633 5.03-1.396 7.75-1.906 2.99-.562 3.3-.53 5.344-.532 2.109-.002
+3.312.125 3.312.125s-.073-.63 1.031-.937c.74-.206 5.126-.834 7.25-1.063 2.053-.22 5.319-.252 7.22.47 1.947.738 7.843 3.374 7.843 3.374s-6.563-3.179-7.406-3.562c-.202-.092-.543-.187-1-.282 1.21-.602 4.984-1.248 7.718-1.656 3.005-.448 3.326-.452 5.375-.406 1.94.043 3.007.194 3.188.219.119-.194.384-.766.937-.907.743-.188 5.155-.734 7.282-.937 1.763-.169 4.42-.234 6.343.25.32.08.604.203.875.312 1.953.784 7.907 3.47 7.907 3.47s-6.592-3.254-7.438-3.657c-.202-.096-.572-.207-1.031-.313 1.214-.574 5.044-1.122 7.781-1.5 3.009-.415 3.323-.442 5.375-.375 2.118.07 3.313.25 3.313.25s-.078-.637 1.03-.906c.745-.18 5.153-.663 7.282-.844 2.059-.174 5.343-.124 7.25.657 1.955.8 7.875 3.53 7.875 3.53s-6.56-3.308-7.406-3.718c-.202-.098-.572-.203-1.031-.312 1.215-.564 5.01-1.115 7.75-1.47 3.01-.389 3.321-.397 5.375-.312 1.944.08 3.006.254 3.187.282.12-.191.383-.746.938-.875.744-.174 5.15-.65 7.28-.813
+1.767-.134 4.45-.126 6.376.375.32.083.603.201.875.313 1.954.8 7.906 3.562 7.906 3.562s-6.591-3.34-7.437-3.75c-.203-.098-.572-.203-1.032-.312 1.215-.564 5.042-1.084 7.782-1.438 3.01-.39 3.352-.429 5.406-.344 2.12.088 3.312.313 3.312.313s-.078-.65 1.032-.906c.744-.173 5.15-.624 7.28-.782 2.061-.152 5.344-.096 7.25.688 1.956.804 7.876 3.5 7.876 3.5s-6.56-3.276-7.406-3.688c-.203-.098-.572-.202-1.032-.312 1.216-.562 5.012-1.128 7.75-1.5 3.01-.41 3.323-.416 5.375-.344 1.943.068 3.008.165 3.188.188.119-.195.384-.73.937-.875.742-.197 5.131-.83 7.25-1.094 1.757-.22 4.406-.333 6.313.031.317.06.606.19.875.281 1.936.661 7.844 2.938 7.844 2.938s-6.537-2.807-7.375-3.156c-.2-.084-.577-.174-1.032-.25 1.204-.651 5.02-1.372 7.72-2 2.966-.69 3.288-.756 5.312-.875 2.088-.124 3.28-.032 3.28-.032s-.086-.632 1-1.03c.73-.269 5.048-1.339 7.126-1.813 2.008-.46 5.168-1.03 7-.625 1.878.414 13.578 3.015 13.578
+3.015s-12.328-3.022-13.141-3.265c-.195-.058-.559-.107-1-.125 1.167-.804 3.514-1.688 6.11-2.703 1.68-.659.923-.377 2.775-1.004 1.754-.594 2.486-1.01 2.63-1.113.347-.207-.355-.122-.544-.042z" enable-background="new" filter="url(#hs)" opacity=".25"/>
+ <path d="m1082.6-275.12c1.873 0.393 4.496 1.146 6.031 1.969s2.822 1.056 5.375 2.5c2.527 1.43 4.796 2.007 6.969 2.531 2.348 0.566 5.435 0.715 8.844 1.188-1.09-0.84-6.608-1.173-8.406-1.563-1.8-0.39-3.895-1.016-6.594-2.313-2.7-1.296-3.495-1.799-5.813-2.687-2.318-0.889-4.004-1.383-6.406-1.625z" enable-background="new" filter="url(#hr)" opacity=".25"/>
+ <path d="M1051.5-270c1.905.578 4.528 1.616 6.094 2.594 1.565.978 2.88 1.36 5.5 3.125 2.593 1.747 4.986 2.71 7.25 3.594 2.446.955 5.682 1.657 9.406 3.062-1.19-1.138-7.063-2.687-8.938-3.375-1.874-.688-4.081-1.566-6.874-3.281-2.794-1.715-3.574-2.284-5.938-3.406-2.364-1.123-4.057-1.835-6.5-2.313z" enable-background="new" filter="url(#hq)" opacity=".25"/>
+ <path d="m1020.2-266.84c1.912 0.638 4.581 1.755 6.156 2.813 1.575 1.057 2.896 1.508 5.531 3.406 2.61 1.878 5.029 3.03 7.313 4.062 2.468 1.116 5.764 2.174 9.531 3.844-1.203-1.222-7.203-3.314-9.094-4.125-1.89-0.81-4.064-1.894-6.874-3.75s-3.622-2.477-6-3.719c-2.379-1.242-4.111-1.975-6.563-2.531z" enable-background="new" filter="url(#hp)" opacity=".25"/>
+ <path d="M1110.2-266.89c.15.049.688.631.11 1.484-.81 1.195-5.705 3.325-8.563 4.125-2.845.798-6.29.978-10.562-.375-4.302-1.362-5.47-2.468-10.656-4.312 4.664 2.115 6.195 3.952 10.125 5.344 1.62.574 3.367.94 5.062 1.03-.445.327-1.53.984-3.562 1.595-2.796.84-6.65 1.534-8.25 1.625-1.515.086-3.142-.513-3.438-.625.167.103.374.377-.25 1.03-.899.945-6.147 1.924-9.125 2.25-2.964.326-6.521-.015-10.906-1.905-3.978-1.715-5.339-2.916-9.406-4.75v.156c3.643 2.095 5.284 3.883 8.875 5.562 1.73.81 3.592 1.41 5.406 1.72-.534.286-1.557.71-3.437 1.03-2.87.488-6.81.817-8.438.75-.85-.034-1.728-.184-2.406-.406-.685-.215-1.19-.444-1.312-.5.169.107.43.403-.22 1.031-.909.88-6.245 1.337-9.25 1.47-2.99.131-6.588-.451-11-2.563-4.44-2.127-5.64-3.402-10.905-5.782 4.734 2.597 6.286 4.63 10.344 6.72 1.673.861 3.485 1.493 5.25 1.937-.463.233-1.59.688-3.688.937-2.886.343-6.834.493-8.468.375-1.547-.111-3.232-.857-3.532-1
+.17.12.414.41-.218 1-.913.851-6.244 1.262-9.25 1.375-2.993.113-6.59-.49-11-2.594-4.002-1.908-5.388-3.137-9.47-5.093v.156c3.656 2.204 5.295 4.053 8.907 5.906 1.74.893 3.637 1.528 5.469 1.969-.54.248-1.578.615-3.469.844-2.886.348-6.866.52-8.5.406a9.446 9.446 0 0 1-2.406-.5 12.532 12.532 0 0 1-1.313-.531c.17.112.465.422-.187 1.03-.913.853-6.275 1.294-9.281 1.407-2.993.112-6.594-.528-11-2.594-4.437-2.08-5.647-3.331-10.906-5.656 4.729 2.548 6.29 4.578 10.344 6.625 1.671.844 3.485 1.467 5.25 1.906-.464.235-1.59.684-3.688.938-2.886.348-6.836.57-8.469.469-1.544-.096-3.2-.83-3.5-.97.17.12.382.405-.25 1-.912.861-6.246 1.331-9.25 1.47-2.99.138-6.567-.451-10.969-2.47-3.993-1.83-5.365-3.028-9.437-4.905v.156c3.647 2.133 5.27 3.935 8.875 5.719 1.737.86 3.607 1.45 5.437 1.875-.54.253-1.55.64-3.437.906-2.88.404-6.838.646-8.469.562a9.36 9.36 0 0 1-2.406-.437 12.971 12.971 0 0
+1-1.313-.5c.17.109.432.41-.218 1.031-.911.87-6.25 1.392-9.25 1.563-2.987.17-6.574-.316-10.97-2.282-4.424-1.978-5.605-3.228-10.843-5.375 4.71 2.388 6.27 4.39 10.312 6.344a23.73 23.73 0 0 0 5.218 1.781c-.461.25-1.597.713-3.687 1.032-2.876.438-6.78.733-8.406.687-1.539-.043-3.233-.745-3.532-.875.169.113.411.414-.218 1.031-.908.891-6.203 1.529-9.188 1.813-2.971.283-6.573-.176-10.938-1.938-3.96-1.598-5.329-2.795-9.344-4.312v.156c3.596 1.811 5.239 3.582 8.813 5.156 1.722.759 3.587 1.29 5.406 1.625-.536.28-1.566.688-3.437 1.063-2.856.572-6.79 1.02-8.407 1.031-.844.006-1.706-.08-2.375-.25-.676-.162-1.16-.33-1.28-.375.166.094.422.383-.22 1.062-.897.951-6.186 1.918-9.125 2.438-2.925.518-6.432.374-10.719-1.031-4.315-1.415-5.472-2.53-10.562-3.969 4.577 1.751 6.09 3.56 10.031 5 1.627.594 3.37.956 5.094 1.156-.453.297-1.555.884-3.594 1.469-2.804.805-6.638 1.576-8.218
+1.75-1.495.165-3.117-.317-3.407-.406.164.09.393.36-.218 1.062-.883 1.014-6.045 2.372-8.938 3.063-2.88.687-6.335.76-10.562-.438-3.835-1.086-5.172-2.072-9.062-3.125v.156c3.484 1.395 5.07 2.92 8.53 4.032 1.669.535 3.457.786 5.22.875-.52.352-1.5.914-3.313 1.53-2.765.942-6.59 1.936-8.156 2.157-.818.115-1.633.123-2.281.031-.655-.083-1.133-.218-1.25-.25.162.075.434.34-.188 1.094-.87 1.055-6.01 2.66-8.875 3.438-2.852.774-6.259.958-10.438-.094-4.206-1.06-5.356-2.042-10.344-3.156 4.485 1.46 5.97 3.135 9.813 4.25 1.585.46 3.287.638 4.969.687-.442.337-1.513 1.028-3.5 1.781-2.734 1.037-6.452 2.163-8 2.438-1.465.26-3.06-.117-3.344-.188.16.08.38.321-.219 1.063-.865 1.07-5.916 2.818-8.75 3.687-2.82.866-6.207 1.157-10.344.22-3.753-.852-5.048-1.717-8.875-2.595v.157c3.428 1.237 4.987 2.632 8.375 3.53 1.632.434 3.367.584 5.094.563-.51.384-1.477 1.022-3.25 1.75-2.706 1.112-6.436 2.308-7.969
+2.625-.8.166-1.612.219-2.25.157v1.406c.227-.145.449-.273.719-.375 1.08-.41 2.171-.216 6-1.688 3.828-1.471 5.224-2.005 5.906-2.406.68-.4 1.612-.88 2.219-1.531 1.827-.138 3.57-.493 4.937-1 2.968-1.1 4.876-1.806 6.782-2.469 1.905-.663 2.354-1.415 3.406-1.781 1.091-.38 2.195-.166 6.062-1.531 3.868-1.366 5.283-1.827 5.969-2.22.701-.4 1.7-.932 2.313-1.593 1.97-.055 3.816-.385 5.28-.875 3.002-1.005 4.927-1.622 6.845-2.25 1.538-.504 2.174-1.047 2.906-1.437.23-.135.475-.254.75-.344 1.098-.36 2.181-.082 6.094-1.313 3.912-1.23 5.366-1.673 6.062-2.03.694-.358 1.63-.794 2.25-1.407 1.865-.023 3.636-.267 5.031-.688 3.03-.913 4.993-1.43 6.938-1.968 1.945-.54 2.426-1.265 3.5-1.563 1.114-.31 2.22.007 6.187-1.031 3.968-1.039 5.418-1.433 6.125-1.75.735-.33 1.814-.754 2.438-1.375 1.997.116 3.857-.02 5.344-.375 3.078-.735 5.083-1.101 7.062-1.5 1.588-.32 2.244-.79 3-1.094.238-.107.467-.193.75-.25 1.134-.23
+2.305.209 6.344-.5s5.5-.927 6.219-1.187c.715-.26 1.704-.568 2.343-1.094 1.925.24 3.748.224 5.188 0 3.126-.488 5.155-.7 7.156-.969 2.002-.268 2.489-.945 3.594-1.094 1.146-.154 2.276.302 6.344-.219 4.068-.52 5.56-.695 6.28-.937.738-.247 1.799-.586 2.438-1.125 2.05.335 3.974.398 5.5.219 3.143-.37 5.18-.56 7.188-.782 1.61-.178 2.265-.608 3.031-.843a3.43 3.43 0 0 1 .781-.188c1.15-.128 2.302.347 6.375-.125s5.56-.61 6.282-.844c.719-.232 1.7-.473 2.343-.968 1.937.333 3.77.404 5.22.25 3.145-.335 5.177-.519 7.187-.719 2.01-.2 2.484-.826 3.593-.938 1.152-.115 2.297.366 6.375-.062s5.59-.562 6.313-.781c.74-.224 1.796-.514 2.437-1.031 2.058.398 4.002.493 5.532.343 3.148-.308 5.175-.473 7.187-.656 1.614-.147 2.263-.56 3.031-.781.242-.081.494-.13.782-.156 1.152-.106 2.293.392 6.375 0 4.082-.393 5.589-.531 6.312-.75.721-.219 1.7-.448 2.344-.938 1.938.35 3.769.454 5.219.313 3.148-.309 5.175-.474
+7.187-.657 2.012-.183 2.514-.838 3.625-.937 1.152-.103 2.292.385 6.375 0s5.589-.501 6.313-.719c.739-.222 1.795-.514 2.437-1.031 2.057.402 4.003.503 5.531.344 3.147-.329 5.178-.523 7.188-.72 1.613-.156 2.266-.63 3.031-.874.24-.088.463-.122.75-.156 1.148-.14 2.317.34 6.375-.25 4.058-.59 5.562-.778 6.281-1.032.717-.253 1.675-.558 2.313-1.093 1.92.211 3.72.151 5.156-.094 3.12-.533 5.112-.929 7.094-1.313 1.982-.384 2.474-1.04 3.562-1.28 1.13-.252 2.27.115 6.25-.876s5.43-1.42 6.125-1.781c.723-.376 1.762-.87 2.375-1.531 1.963-.012 3.794-.291 5.22-.844 2.95-1.145 4.872-1.87 6.687-2.75 1.455-.707 2.334-1.686 2.547-1.984.212-.298.111-.746.137-.767.043-.035.32-.085.48-.429.858-1.847 2.32-5.644
+2.435-6.329.113-.682.163-1.348.214-1.745.03-.23-.147-.865-.125-.924.031-.082.305-.265.36-.515.267-1.198.09-2.191-.125-3.609-.214-1.417-.983-4.622-1.637-5.476-.659-.862-1.223-1.011-1.748-1-.208.27.137.262.163.312.68.05.934.369 1.42.897s1.221 3.85 1.358 5.301.19 2.86-.088 3.469c-.278.608-.723.517-1.016.583.531.186.67.125.732.969.058.813-.134 1.64-.52 2.806-.392 1.18-1.846 4.35-2.286 4.598-.452.256-.731.27-1.067.14z" enable-background="new" filter="url(#ho)" opacity=".25"/>
+ <path d="m988.75-263.84c1.912 0.634 4.55 1.758 6.125 2.813 1.575 1.054 2.896 1.482 5.531 3.375 2.609 1.873 5.027 3.015 7.313 4.062 2.47 1.132 5.752 2.155 9.531 3.938-1.207-1.259-7.139-3.365-9.031-4.188s-4.128-1.93-6.938-3.781-3.622-2.482-6-3.719c-2.377-1.237-4.08-1.95-6.53-2.5z" enable-background="new" filter="url(#hn)" opacity=".25"/>
+ <path d="M957.5-260.78c1.91.618 4.583 1.71 6.156 2.75 1.574 1.04 2.896 1.482 5.531 3.375 2.609 1.873 5.027 3.015 7.313 4.063 2.47 1.131 5.752 2.154 9.531 3.937-1.207-1.258-7.201-3.396-9.094-4.219-1.892-.823-4.096-1.93-6.906-3.781-2.81-1.85-3.593-2.44-5.969-3.656s-4.113-1.939-6.562-2.469z" enable-background="new" filter="url(#hm)" opacity=".25"/>
+ <path d="M926.09-257.38c1.908.597 4.553 1.664 6.125 2.688 1.571 1.023 2.87 1.44 5.5 3.28 2.603 1.823 5.029 2.973 7.313 4 2.467 1.111 5.755 2.094 9.53 3.845-1.205-1.249-7.171-3.319-9.062-4.125s-4.102-1.891-6.906-3.688c-2.804-1.796-3.627-2.402-6-3.594-2.373-1.191-4.054-1.903-6.5-2.406z" enable-background="new" filter="url(#hl)" opacity=".25"/>
+ <path d="M894.91-253.56c1.902.554 4.587 1.589 6.156 2.594s2.874 1.408 5.5 3.219c2.6 1.791 5 2.871 7.281 3.875 2.465 1.083 5.76 2.04 9.532 3.75-1.205-1.236-7.175-3.245-9.063-4.032-1.888-.786-4.075-1.83-6.875-3.593s-3.6-2.369-5.969-3.532c-2.37-1.163-4.123-1.834-6.562-2.28z" enable-background="new" filter="url(#hk)" opacity=".25"/>
+ <path d="M863.72-248.66c1.88.43 4.504 1.38 6.063 2.313 1.558.932 2.852 1.257 5.468 3 2.59 1.724 4.981 2.708 7.25 3.625 2.452.99 5.74 1.877 9.5 3.5-1.201-1.208-7.152-3.067-9.03-3.782-1.88-.715-4.086-1.684-6.876-3.375s-3.585-2.228-5.937-3.28-4.026-1.713-6.438-2z" enable-background="new" filter="url(#hj)" opacity=".25"/>
+ <path d="m833.16-241.38c1.848 0.296 4.47 0.976 6 1.781s2.814 1.056 5.375 2.531c2.535 1.46 4.89 2.326 7.125 3.063 2.414 0.797 5.657 1.467 9.375 2.844-1.188-1.129-7.088-2.59-8.938-3.156-1.85-0.567-4.003-1.374-6.75-2.844-2.746-1.47-3.5-1.92-5.812-2.781-2.311-0.861-4.005-1.32-6.375-1.438z" enable-background="new" filter="url(#hi)" opacity=".25"/>
+ <path d="m802.91-232.31c1.822 0.211 4.366 0.8 5.875 1.531 1.51 0.73 2.756 0.93 5.281 2.281 2.5 1.338 4.832 2.049 7.031 2.657 2.377 0.656 5.565 1.073 9.22 2.187-1.168-1.045-6.93-2.103-8.75-2.562-1.822-0.46-3.953-1.127-6.657-2.438s-3.471-1.72-5.75-2.469-3.913-1.179-6.25-1.187z" enable-background="new" filter="url(#hh)" opacity=".25"/>
+ <path d="M773.19-222.19c1.811.179 4.32.665 5.813 1.344 1.491.678 2.753.798 5.25 2.062 2.47 1.252 4.79 1.896 6.968 2.438 2.354.585 5.492.897 9.094 1.844-1.15-.992-6.852-1.784-8.656-2.188s-3.916-1.021-6.594-2.25c-2.678-1.229-3.403-1.61-5.656-2.281-2.253-.67-3.896-1.002-6.219-.969z" enable-background="new" filter="url(#hg)" opacity=".25"/>
+ <path d="M743.56-211.19c1.793.13 4.273.55 5.75 1.188s2.716.741 5.188 1.937c2.446 1.184 4.72 1.747 6.874 2.219 2.328.51 5.42.68 9 1.562-1.143-.97-6.747-1.59-8.53-1.937-1.784-.347-3.884-.888-6.532-2.031-2.648-1.144-3.395-1.517-5.625-2.125-2.23-.61-3.826-.91-6.125-.813z" enable-background="new" filter="url(#hf)" opacity=".25"/>
+ </g>
+ </g>
+ <path d="M912.45 671.2c1.642-3.218 3.518-5.735 4.861-9.849.8-3.658 3.312-2.03 7.26-8.397 1.403-2.24 5.477.391 8.966-2.399 1.27-.803 2.885-.404 4.483-.063 3.765 1.319 5.825 3.704 8.333 5.808 6.14 5.97 20.534 7.944 23.486 6.314 1.434-2.905 7.882-5.41 12.374-11.112.749-1.123 11.73-8.745 14.647-6.566" enable-background="new" fill="none" stroke="#000"/>
+ <path d="M937.07 660.78c7.363-3.233 13.811-8.908 20.708-13.385 3.31-1.97 6.87 3.216 10.796 3.599 2.298-.218 3.713 1.202 5.682 1.641 5.157 1.318 2.398 3.865 9.975 6.44 6.156 1.72 8.908-6.799 14.9-7.324 4.878-.503 8.1-.316 11.617-.252 3.927.139 4.079-3.498 6.061-5.304 2.98-2.805 7.156-1.85 10.145-4.74 1.018-1.385 1.955-3.011 2.735-5.109.882-2 3.04.306 4.798 1.263" enable-background="new" fill="none" stroke="#000"/>
+ <g fill-rule="evenodd">
+ <path transform="translate(48.571 195.53)" d="m403.28 1056.3l56.569-42.426 72.125 14.142-46.669 52.326-53.74 7.071-28.284-31.113z" enable-background="accumulate" filter="url(#he)" opacity=".25"/>
+ <path d="m590.84 1256.1c-1.407 18.801-1.145 32.751 2.082 49.303 3.226 16.552 16.406 45.907 20.334 63.184 3.926 17.267 2.694 38.31-12.46 51.148-15.317 12.977-42.05 21.599-67.831 15.734s-69.55-49.223-88.59-70.228c-19.112-21.083-63.761-93.851-77.94-124.28-14.177-30.425-12.66-36.719-8.119-45.53-9.367-24.52-12.414-50.067-33.712-75.577 30.325 3.114 43.88 26.956 60.126 47.14-5.53-48.076-18.055-64.416-28.374-90.724 29.994 6.082 50.579 31.872 63.98 72.712 9.554-3.918 18.238-9.373 30.187-9.061-11.298-41.696-17.949-69.916-36.687-101.07 53.442 5.67 83.657 80.639 78.971 87.96 9.978-2.243 19.006-6.53 30.437-5.65-11.249-38.348-21.048-76.869-3.66-118.65 0 0 48.287 65.436 54.39 85.805 6.103 20.37 1.52 38.701 1.52 38.701s16.96 31.085 20.293 51.094c3.373 20.241-3.533 59.103-4.946 77.983z" enable-background="new" fill="#ada469"/>
+ <path transform="matrix(-.90453 .25066 .25066 .90453 1043.9 219.06)" d="M719.5 738.7l18.312 15.432 44.411-15.388L805.5 713.2l11.464 19.221 30.672 12.784 25.097 5.728L892 723.2l16.023 23.826L947 752.2l10.245-6.198L964 754.7l25.5 11 2-40.5-35.551-14.538-32.493-21.527-40.452-11.466-21.307-15.533L840 685.2l-84.971-46.583-33.03 38.083-2.5 62z" clip-path="url(#e)" enable-background="accumulate" fill="#fff" filter="url(#o)"/>
+ <path transform="matrix(-.90453 .25066 .25066 .90453 870.86 206.47)" d="M584 696.5l-6.563 17.156s-7.811 20.365-15.688 43.656c-3.938 11.646-7.883 24.041-10.938 35.125s-5.335 20.38-5.5 28.281c-.398 19.162 5.747 34.888 8.938 41.75-.772 3.555-1.991 9.454-3.344 18.094-1.92 12.268-3.718 27.154-2.375 39.875 1.382 13.088 6.812 28.188 12.594 43.031s12.054 29.227 15.22 38.031c6.631 18.452 9.992 31.576 11.311 48.5.582 7.456-.242 20.336-1.25 33.375s-2.186 26.301-1.687 36.969c.989 21.14 9.328 46.835 33.375 57.937 22.775 10.515 55.327 11.702 83.438-3.437 16.16-8.704 30.076-27.098 43.375-46.906s24.969-41.053 31.938-54.906c15.353-30.521 39.394-115.46 45.625-152.72 3.018-18.047 3.921-29.066 2.625-38.031-.979-6.766-3.828-12.147-6.875-16.22 2.042-27.507-.732-51.368 11.969-79.405l10.562-23.281-23.812 9.312c-17.49 6.838-28.902 19.045-36.594 32.062-.323.546-.563 1.108-.875 1.656.222-22.515 4.408-37.638
+6.594-58.688l1.968-19-17.03 8.656c-30.595 15.556-45.696 48.193-49.72 90.22-4.245-.626-8.831-1.02-13.812-.844-.291-39.18-.396-67.037 8.594-99.375l5.594-20.125-19.438 7.656c-30.91 12.204-47.86 41.931-56.625 68.375-4.383 13.222-6.746 25.801-7.594 35.938a92.19 92.19 0 0 0-.312 7.719c-3.242-.037-6.42.136-10.062.5.041-39.005-3.485-79.754-32.281-116.5L584 696.498zm5.813 43.812c16.807 30.644 17.475 63.967 16.938 99.75l-.22 15.062 12.036-6.54c8.662-3.132 19.56-.227 31.934-.835l14.675 9.357-6.331-25.794c-.09-.23-.22-.417-.25-.72-.2-2.039-.222-5.472.125-9.624.694-8.304 2.79-19.585 6.625-31.156 5.155-15.553 13.488-31.192 25.125-42.531-4.684 28.638-3.216 60.259-3.012 95.805l-2.766 13.262 15.496-7.598c9.03-2.758 17.19-.35 29.281 1.094l13.246 9.444L741.094 840c1.448-30.972 8.222-53.678 20.719-68.875-2.987 19.779-5.43 41.785.313 78.344l1.065 6.373-2.938 11.517 10.617-8.168 9.19 10.222-1.549-10.464
+3.427-6.95c5.7-13.21 10.173-26.212 16.344-36.655.96-1.624 2.031-3.065 3.062-4.563-3.68 21.155-2.427 40.208-4.093 57.781l-4.68 7.807 7.398.225c3.22 3.483 3.868 3.85 4.563 8.656s.318 14.4-2.563 31.625c-5.568 33.288-31.846 77.84-43.74 101.49-6.605 13.13-18.528 57.486-31.123 76.246s-28.53 39.767-37.172 44.42c-21.49 11.575-44.556 25.507-60.619 18.09-14.375-6.636-23.04-21.192-23.814-37.742-.383-8.188.613-21.31 1.625-34.406 1.013-13.097 11.29-22.571 15.423-36.563 5.373-18.181-1.447-36.594-12.5-53.937-6.486-10.178-23.977-24.258-29.548-38.562s-10.368-29.003-11.28-37.656c-.927-8.771.422-23.025 2.218-34.5 1.796-11.475 3.844-20.281 3.844-20.281l9.423-3.615-10.485-3.885s-8.5-15.31-8.094-34.812c.071-3.423 1.836-12.728 4.719-23.188s6.764-22.553 10.625-33.97c3.044-9.002 5.78-16.602 8.344-23.687z" clip-path="url(#am)" enable-background="new" filter="url(#ds)" opacity=".588"/>
+ <g transform="translate(324.57 331.53)" clip-path="url(#is)" enable-background="new" fill="#fff">
+ <path transform="matrix(-.90453 .25066 .25066 .90453 -52.2 74.097)" d="m-15.668 843.49l-49.497-15.556-26.87 52.326 41.012 45.255 49.497-38.184z" enable-background="accumulate" filter="url(#dr)"/>
+ <path transform="matrix(-.90453 .25066 .25066 .90453 -46.928 75.511)" d="m118.71 859.93l-55.154-46.669-43.841 36.77 33.941 53.74-13.597 85.462-39.445 28.292-41.012 11.314-2.828 46.669 56.569 25.456 18.944-69.65 23.457-58.857 46.348-72.615 16.62-39.912z" enable-background="accumulate" filter="url(#dq)"/>
+ </g>
+ <path transform="matrix(-.90453 .25066 .25066 .90453 277.64 407.04)" d="m-70.822 932.58l60.811-26.87 100.41 31.113-63.64 31.113-82.024-16.971-15.556-18.385z" enable-background="accumulate" filter="url(#cy)" opacity=".25"/>
+ <path transform="matrix(-.90453 .25066 .25066 .90453 870.86 206.47)" d="M583.06 715.75c-12.106 34.45-26.714 68.533-31.75 104.84-.832 14.929 4.59 29.159 8.844 43.062-5.916 27.201-10.137 56.9 1.156 83.125 13.517 38.161 35.001 75.682 32.423 117.47-.948 29.294-9.014 60.994 5.39 88.282 10.199 19.335 33.14 27.312 53.968 27.668 27.862 1.174 56.463-11.622 72-35.261 22.596-29.372 41.802-61.497 55.24-96.06 16.89-45.506 29.672-92.561 37.934-140.4 1.824-12.941 3.1-27.47-4.58-38.823-3.43-7.336.043-15.56-.684-23.31.674-24.995 4.013-50.664 16.653-72.596-17.733 6.445-35.073 16.56-44.003 33.864-3.935 6.71-7.605 13.574-11.372 20.386-3.55-30.014 3.72-59.648 6.781-89.281-20.166 9.055-36.877 25.655-44.175 46.682-6.304 15.58-8.802 32.317-10.263 49.037-8.253-1.52-16.684-2.102-25.062-1.5-.963-38.698-.467-79.407 10.97-115.91-18.682 6.218-35.167 18.736-45.629 35.387-13.853 20.88-21.26 45.754-23.059 70.613.586
+4.325-.06 11.84-6.343 9.875-5.332.018-10.63.679-15.938 1.094 1.147-39.381-3.342-81.628-27.062-114.22-3.061-3.637-5.637-7.685-8.625-11.344l-2.813 7.312zm7.75 13.844c18.565 29.296 22.482 64.82 22.125 98.875.204 5.175-.517 11.829.125 16.062 12.319-6.103 26.739-2.44 39.781-2.187 2.317 1.223 3.192 1.652 1.906-1.407-4.164-13.953-1.848-28.613 1.805-42.408 6.367-26.29 20.628-51.088 42.82-67.03-8.617 37.237-5.716 76.562-6.094 113.97 12.253-6.91 27.28-3.446 40.031-.25 3.393 3.535 2.29-.73 2.188-3.812-.483-21.371 4.131-43.07 13.688-62.156 5.963-10.687 14.243-19.804 22.438-28.875-7.872 33.838-9.203 69.336-2.719 103.5 1.725-1.411 4.607-.454 5.656-.375 9.684-21.237 16.351-45.381 34.89-60.742 1.874-.371-1.448 8.525-1.484 11.898-3.535 21.846-7.175 44.142-8.784 66.219-8.784 2.342 2.849 2.323 3.469 4.062 7.923 10.566 4.663 24.405 3.632 36.353-7.064 45.034-22.142 87.362-35.954 130.68-12.075 32.95-27.374
+58.852-47.888 87.202-10.953 13.551-23.245 27.851-40.844 32.5-20.156 6.242-44.207 10.877-62.6.046-17.29-12.34-21.024-35.709-19.262-55.686.048-15.826 4.938-28.512 4.41-43.492-.538-15.263-2.291-30.565-6.542-46.866-4.252-16.302-9.044-24.918-16.12-41.573-7.24-17.045-15.07-36.749-18.204-56.288-1.75-18.627 2.891-37.123 5.78-55.25 3.297-2.837-1.597-5.196-2.312-8.187-7.6-17.015-8.407-36.775-2.742-54.56 7.13-25.072 15.761-49.632 24.68-74.128l2.125 3.906z" clip-path="url(#cj)" enable-background="new" filter="url(#ir)" opacity=".588"/>
+ <path transform="matrix(-.90453 .25066 .25066 .90453 1043.9 219.06)" d="m735.06 733.04l2.755 21.089 44.411-15.388 4.851-22.39-3.936-22.052-22.452-36.593-8.28 30.305-17.35 45.029z" clip-path="url(#e)" enable-background="accumulate" fill="#fff" filter="url(#bz)"/>
+ <path transform="matrix(-.90453 .25066 .25066 .90453 1043.9 219.06)" d="m831.81 730.29l15.822 14.905 20.855 2.9-1.59-39.926 8.325-30.508-7.165-6.341-21.697 20.942-14.55 38.028z" clip-path="url(#e)" enable-background="accumulate" fill="#fff" filter="url(#by)"/>
+ </g>
+ <g transform="translate(324.57 331.53)" clip-path="url(#iq)" enable-background="new" filter="url(#ip)">
+ <path d="M36.494 811.806l-14.799 11.372-17.47-31.162 14.663.65 17.606 19.14z" enable-background="accumulate" fill="#fff" fill-rule="evenodd"/>
+ <path d="M-55 757.2h182v177H-55z" enable-background="accumulate" fill="none"/>
+ </g>
+ <g transform="translate(324.57 331.53)" clip-path="url(#io)" enable-background="new" filter="url(#in)">
+ <path d="m83.111 790.72l-28.201 12.855-5.658-21.687-13.943-25.046 6.325-6.88 26.384 18.51 15.094 22.249z" enable-background="accumulate" fill="#fff" fill-rule="evenodd"/>
+ <path d="M-22 696.2h165v176H-22z" enable-background="accumulate" fill="none"/>
+ </g>
+ <g fill-rule="evenodd">
+ <path d="m1084.8 1267.3c6.794 18.903 10.494 33.3 11.89 51.212 1.397 17.912-3.783 51.801-2.9 70.656 0.881 18.845 8.133 40.099 27.345 48.969 19.419 8.966 49.319 10.211 74.12-3.146 24.8-13.357 57.4-70.326 70.973-97.309 13.624-27.084 38.761-114.5 44.661-149.77s2.551-41.3-4.617-49.055c2.64-27.84-1.5-54.935 13.11-87.186-30.249 11.826-37.382 40.161-48.319 65.505-8-50.933 0.21-71.273 3.319-101.22-29.065 14.778-42.862 47.114-45 92.857-10.924-1.304-21.391-4.434-33.571-0.714-0.264-46.023-1.464-76.889 8.91-114.21-53.254 21.027-62.946 106.59-56.053 112.78-10.883 0.535-21.371-1.297-32.857 2.857 0.638-42.57-0.26-84.909-30-122.86 0 0-30.958 80.922-31.43 103.57-0.47 22.65 9.452 40.166 9.452 40.166s-8.568 36.741-6.298 58.232c2.295 21.741 20.443 59.676 27.266 78.658z" enable-background="new" fill="#ada469"/>
+ <path transform="translate(324.57 331.53)" d="M719.5 738.7l18.312 15.432 44.411-15.388L805.5 713.2l11.464 19.221 30.672 12.784 25.097 5.728L892 723.2l16.023 23.826L947 752.2l10.245-6.198L964 754.7l25.5 11 2-40.5-35.551-14.538-32.493-21.527-40.452-11.466-21.307-15.533L840 685.2l-84.971-46.583-33.03 38.083-2.5 62z" clip-path="url(#e)" enable-background="accumulate" fill="#fff" filter="url(#o)"/>
+ <path transform="translate(498.6 269.37)" d="M584 696.5l-6.563 17.156s-7.811 20.365-15.688 43.656c-3.938 11.646-7.883 24.041-10.938 35.125s-5.335 20.38-5.5 28.281c-.398 19.162 5.747 34.888 8.938 41.75-.772 3.555-1.991 9.454-3.344 18.094-1.92 12.268-3.718 27.154-2.375 39.875 1.382 13.088 6.812 28.188 12.594 43.031s12.054 29.227 15.22 38.031c6.631 18.452 9.992 31.576 11.311 48.5.582 7.456-.242 20.336-1.25 33.375s-2.186 26.301-1.687 36.969c.989 21.14 9.328 46.835 33.375 57.937 22.775 10.515 55.327 11.702 83.438-3.437 16.16-8.704 30.076-27.098 43.375-46.906s24.969-41.053 31.938-54.906c15.353-30.521 39.394-115.46 45.625-152.72 3.018-18.047 3.921-29.066 2.625-38.031-.979-6.766-3.828-12.147-6.875-16.22 2.042-27.507-.732-51.368 11.969-79.405l10.562-23.281-23.812 9.312c-17.49 6.838-28.902 19.045-36.594 32.062-.323.546-.563 1.108-.875 1.656.222-22.515 4.408-37.638 6.594-58.688l1.968-19-17.03
+8.656c-30.595 15.556-45.696 48.193-49.72 90.22-4.245-.626-8.831-1.02-13.812-.844-.291-39.18-.396-67.037 8.594-99.375l5.594-20.125-19.438 7.656c-30.91 12.204-47.86 41.931-56.625 68.375-4.383 13.222-6.746 25.801-7.594 35.938a92.19 92.19 0 0 0-.312 7.719c-3.242-.037-6.42.136-10.062.5.041-39.005-3.485-79.754-32.281-116.5L584 696.498zm5.813 43.812c16.807 30.644 17.475 63.967 16.938 99.75l-.22 15.062 12.036-6.54c8.662-3.132 19.56-.227 31.934-.835l14.675 9.357-6.331-25.794c-.09-.23-.22-.417-.25-.72-.2-2.039-.222-5.472.125-9.624.694-8.304 2.79-19.585 6.625-31.156 5.155-15.553 13.488-31.192 25.125-42.531-4.684 28.638-3.216 60.259-3.012 95.805l-2.766 13.262 15.496-7.598c9.03-2.758 17.19-.35 29.281 1.094l13.246 9.444L741.094 840c1.448-30.972 8.222-53.678 20.719-68.875-2.987 19.779-5.43 41.785.313 78.344l1.065 6.373-2.938 11.517 10.617-8.168 9.19 10.222-1.549-10.464 3.427-6.95c5.7-13.21
+10.173-26.212 16.344-36.655.96-1.624 2.031-3.065 3.062-4.563-3.68 21.155-2.427 40.208-4.093 57.781l-4.68 7.807 7.398.225c3.22 3.483 3.868 3.85 4.563 8.656s.318 14.4-2.563 31.625c-5.568 33.288-31.793 123.17-43.688 146.81-6.605 13.13-18.03 33.896-30.625 52.656s-27.359 35.534-36 40.187c-21.49 11.574-48.78 10.26-64.844 2.844-14.375-6.637-20.538-23.45-21.312-40-.383-8.188.613-21.31 1.625-34.406 1.013-13.097 11.29-22.571 15.423-36.563 5.373-18.181-1.447-36.594-12.5-53.937-6.486-10.178-23.977-24.258-29.548-38.562s-10.368-29.003-11.28-37.656c-.927-8.771.422-23.025 2.218-34.5 1.796-11.475 3.844-20.281 3.844-20.281l9.423-3.616-10.485-3.884s-8.5-15.31-8.094-34.812c.071-3.424 1.836-12.728 4.719-23.188s6.764-22.553 10.625-33.97c3.044-9.002 5.78-16.602 8.344-23.687z" clip-path="url(#am)" enable-background="new" filter="url(#ds)" opacity=".588"/>
+ <g transform="translate(324.57 331.53)" clip-path="url(#im)" fill="#fff">
+ <path d="m824.49 818.48l-49.497-15.556-26.87 52.326 41.012 45.255 49.497-38.184z" enable-background="accumulate" filter="url(#dr)"/>
+ <path d="m964.49 855.25l-55.154-46.669-43.841 36.77 33.941 53.74 7.071 66.468-50.912 35.355-41.012 11.314-2.828 46.669 56.569 25.456 63.64-76.368 24.042-94.752 8.485-57.983z" enable-background="accumulate" filter="url(#dq)"/>
+ </g>
+ <path transform="translate(48.571 195.53)" d="m1045.3 1043.6l60.811-26.87 100.41 31.113-63.64 31.113-82.024-16.971-15.556-18.385z" enable-background="accumulate" filter="url(#cy)" opacity=".25"/>
+ <path transform="translate(498.6 269.37)" d="M583.06 715.75c-12.106 34.45-26.714 68.533-31.75 104.84-.832 14.929 4.59 29.159 8.844 43.062-5.916 27.201-10.137 56.9 1.156 83.125 13.517 38.161 35.001 75.682 32.423 117.47-.948 29.294-9.014 60.994 5.39 88.282 10.199 19.335 33.14 27.312 53.968 27.668 27.862 1.174 56.463-11.622 72-35.261 22.596-29.372 41.802-61.497 55.24-96.06 16.89-45.506 29.672-92.561 37.934-140.4 1.824-12.941 3.1-27.47-4.58-38.823-3.43-7.336.043-15.56-.684-23.31.674-24.995 4.013-50.664 16.653-72.596-17.733 6.445-35.073 16.56-44.003 33.864-3.935 6.71-7.605 13.574-11.372 20.386-3.55-30.014 3.72-59.648 6.781-89.281-20.166 9.055-36.877 25.655-44.175 46.682-6.304 15.58-8.802 32.317-10.263 49.037-8.253-1.52-16.684-2.102-25.062-1.5-.963-38.698-.467-79.407 10.97-115.91-18.682 6.218-35.167 18.736-45.629 35.387-13.853 20.88-21.26 45.754-23.059 70.613.586 4.325-.06 11.84-6.343
+9.875-5.332.018-10.63.679-15.938 1.094 1.147-39.381-3.342-81.628-27.062-114.22-3.061-3.637-5.637-7.685-8.625-11.344l-2.813 7.312zm7.75 13.844c18.565 29.296 22.482 64.82 22.125 98.875.204 5.175-.517 11.829.125 16.062 12.319-6.103 26.739-2.44 39.781-2.187 2.317 1.223 3.192 1.652 1.906-1.407-4.164-13.953-1.848-28.613 1.805-42.408 6.367-26.29 20.628-51.088 42.82-67.03-8.617 37.237-5.716 76.562-6.094 113.97 12.253-6.91 27.28-3.446 40.031-.25 3.393 3.535 2.29-.73 2.188-3.812-.483-21.371 4.131-43.07 13.688-62.156 5.963-10.687 14.243-19.804 22.438-28.875-7.872 33.838-9.203 69.336-2.719 103.5 1.725-1.411 4.607-.454 5.656-.375 9.684-21.237 16.351-45.381 34.89-60.742 1.874-.371-1.448 8.525-1.484 11.898-3.535 21.846-3.297 44.173-4.906 66.25-1.312 1.377 2.849 2.323 3.469 4.062 7.923 10.566 3.123 24.831 2.092 36.78-7.064 45.034-21.766 88.38-35.577 131.7-12.075 32.95-30.72 63.08-51.234 91.43-10.953
+13.55-23.245 27.85-40.844 32.5-20.156 6.24-43.576 5.174-61.97-5.657-17.29-12.34-21.023-35.709-19.261-55.686.048-15.826 2.372-27.8 7.917-42.805s2.471-31.332-1.78-47.633-12.18-26.26-21.822-42.204-17.637-36.037-20.772-55.577c-1.75-18.627 2.892-37.123 5.781-55.25 3.296-2.837-1.598-5.196-2.312-8.187-7.602-17.015-8.408-36.775-2.743-54.56 7.13-25.072 15.761-49.632 24.68-74.128l2.125 3.906z" clip-path="url(#cj)" enable-background="new" filter="url(#il)" opacity=".588"/>
+ <path transform="translate(324.57 331.53)" d="m735.06 733.04l2.755 21.089 44.411-15.388 4.851-22.39-3.936-22.052-22.452-36.593-8.28 30.305-17.35 45.029z" clip-path="url(#e)" enable-background="accumulate" fill="#fff" filter="url(#bz)"/>
+ <path transform="translate(324.57 331.53)" d="m831.81 730.29l15.822 14.905 20.855 2.9-1.59-39.926 8.325-30.508-7.165-6.341-21.697 20.942-14.55 38.028z" clip-path="url(#e)" enable-background="accumulate" fill="#fff" filter="url(#by)"/>
+ </g>
+ <g transform="translate(324.57 331.53)" clip-path="url(#ik)" filter="url(#ij)">
+ <path d="M910.14 746.31l32.613 5.174-.361-23.876 7.188-29.682-8.45-5.264-21.823 26.511-9.167 27.137z" enable-background="accumulate" fill="#fff" fill-rule="evenodd"/>
+ <path d="M877.52 650.19h123.04v172.53H877.52z" enable-background="accumulate" fill="none"/>
+ </g>
+ <g transform="translate(324.57 331.53)" clip-path="url(#ii)" filter="url(#ih)">
+ <path d="M964 754.69l18.429 7.465 9.071-36.964-14.87 4.839L964 754.69z" enable-background="accumulate" fill="#fff" fill-rule="evenodd"/>
+ <path d="M924.9 677.06h142.13v125.16H924.9z" enable-background="accumulate" fill="none"/>
+ </g>
+ <g stroke-miterlimit="1">
+ <path d="m596.57 400.85h440v376h-440z" fill="none" stroke="#f8d615" stroke-width="18"/>
+ <path d="m104.57 248.85h1684v1292h-1684z" fill="none" stroke="#f83615" stroke-width="18"/>
+ <path d="M3407.4 296.46h522.57v1182.5H3407.4z" fill="url(#ig)" stroke="#f815bb" stroke-width="13.347"/>
+ </g>
+ <g transform="matrix(1.0057 0 0 2.3995 3452.3 -18.515)" clip-path="url(#if)" enable-background="new">
+ <g filter="url(#aq)">
+ <g transform="translate(-174.03 62.156)" filter="url(#g)">
+ <path d="m1268.3 663.77s-0.296 26.161 4.643 37.857 20.038 26.487 28.572 31.429 18.929 8.571 18.929 8.571l117.86-115 17.857-75.714-96.43 38.571-91.428 74.286z" enable-background="new" fill="#fff" fill-rule="evenodd"/>
+ <path d="M1197.8 486.14h333.75v309.71H1197.8z" enable-background="accumulate" fill="none"/>
+ </g>
+ </g>
+ <g transform="translate(-174.03 62.156)" enable-background="new" filter="url(#g)" opacity=".18">
+ <path d="m1268.3 663.77s-0.296 26.161 4.643 37.857 20.038 26.487 28.572 31.429 18.929 8.571 18.929 8.571l117.86-115 17.857-75.714-96.43 38.571-91.428 74.286z" enable-background="new" fill="#fff" fill-rule="evenodd"/>
+ <path d="M1197.8 486.14h333.75v309.71H1197.8z" enable-background="accumulate" fill="none"/>
+ </g>
+ </g>
+ <path d="M3492.3 296.46H3930v902.66h-437.7z" fill="#fff" stroke="#f8d615" stroke-miterlimit="1" stroke-width="18"/>
+ <g transform="matrix(1.0057 0 0 2.3995 3249.4 125.01)" clip-path="url(#ie)" enable-background="new" filter="url(#id)">
+ <path d="M36.494 811.806l-14.799 11.372-17.47-31.162 14.663.65 17.606 19.14z" enable-background="accumulate" fill="#fff" fill-rule="evenodd"/>
+ <path d="M-55 757.2h182v177H-55z" enable-background="accumulate" fill="none"/>
+ </g>
+ <g transform="matrix(1.0057 0 0 2.3995 3249.4 125.01)" clip-path="url(#ic)" enable-background="new" filter="url(#gd)">
+ <path d="m83.111 790.72l-28.201 12.855-5.658-21.687-13.943-25.046 6.325-6.88 26.384 18.51 15.094 22.249z" enable-background="accumulate" fill="#fff" fill-rule="evenodd"/>
+ <path d="M-22 696.2h165v176H-22z" enable-background="accumulate" fill="none"/>
+ </g>
+ <path transform="matrix(1.0057 0 0 2.3995 3249.4 125.01)" d="m332.34 898.39l-32.732-61.3-37.617 45.106c2.177 1.317 5.774-20.856 45.6-64.417l24.749 80.61z" clip-path="url(#gc)" enable-background="accumulate" fill="#fff" fill-rule="evenodd" filter="url(#gb)" opacity=".5"/>
+ <g clip-path="url(#ga)">
+ <g fill-rule="evenodd">
+ <g enable-background="new">
+ <path transform="matrix(.71488 -.46488 .26446 2.3151 3478.4 126.16)" d="m245.12 100.05s-47.128-31.647-67.215-35.801c-20.038-4.144-38.473-3.318-51.934 13.607s-12.077 61.265-13.536 86.97 2.55 70.177 17.605 88.666c15.055 18.488 45.886 13.585 49.927 21.414 2.213 4.287 65.152-174.86 65.152-174.86z" enable-background="accumulate" fill="url(#r)"/>
+ <path transform="matrix(.71488 -.46488 .26446 2.3151 3478.4 126.16)" d="M135.38 82.018s26.344 1.939 37.633 13.903c11.415 12.097 13.735 21.332 15.296 37.735 1.563 16.425-.85 28.418-7.814 36.037s-1.004 19.583-25.916 12.071-27.032-27.783-26.515-46.305c.517-18.529 7.316-53.441 7.316-53.441z" enable-background="accumulate" fill="url(#q)"/>
+ <path transform="matrix(.71488 -.46488 .26446 2.3151 3478.4 126.16)" d="M135.65 81.927s-4.645 16.365.588 28.563c5.488 12.793 27.224 44.26 27.224 54.656l22.656-5c2.542-6.966 3.21-15.752 2.188-26.5-1.562-16.403-3.867-25.621-15.281-37.719-9.655-10.232-31.593-13.375-37.375-14z" enable-background="new" fill="url(#p)"/>
+ </g>
+ <g enable-background="new">
+ <path d="M4043.297 824.626l-.09-.006c-1.489 1.728-6.783 2.02-7.718 20.884-.878 17.694 13.035 44.859 15.06 47.837 1.636 2.403 3.427 4.343 5.205 5.58l1.41.916c1.894 1.031 3.7 1.38 5.163 1.19 3.177-.414 5.214-.812 7.238-1.183 2.025-.372 2.592-1.8 3.707-2.078 1.156-.288 2.255.9 6.345-.372 4.09-1.271 5.587-1.894 6.324-2.566.765-.698 1.86-1.592 2.549-2.999 1.962.51 3.823.346 5.31-.58 3.08-1.92 5.084-3.12 6.998-4.722 1.536-1.285 2.513-3.421 2.759-4.073.246-.653.183-1.71.211-1.754.047-.072.351-.13.55-.906 1.07-4.167 2.968-12.778
+3.16-14.38.193-1.594.324-3.178.42-4.11.056-.54-.046-2.136-.017-2.27.04-.187.317-.518.401-1.098.404-2.783.343-5.237.292-8.675-.05-3.438-.44-11.267-.995-13.482-.356-1.423-.708-2.109-1.051-2.478-.065-.06-.119-.145-.18-.2-.02-.015-.043-.004-.062-.017-.303-.265-.59-.545-1.13-.839-.972-.527-2.393-1.283-3.937-1.684-.514-.134-1.041-.17-1.572-.205-3.591-.231-9.115-.108-10.396 1.072-1.593-1.244-3.874-2.542-5.879-2.746-3.086-.313-5.002-.546-6.953-.776-1.952-.23-1.73.464-2.958.32-1.328-.155-1.76-1.022-5.568-1.302-3.577-.262-9.087-.095-10.397 1.072-1.593-1.245-3.872-2.542-5.878-2.746-3.087-.313-5.006-.472-6.958-.702-.663-.078-1.055.01-1.364.079z" enable-background="new" fill="#bcb786"/>
+ <g transform="matrix(1.0047 .1075 -.04506 2.3971 3818.4 1782.9)" clip-path="url(#fz)" enable-background="new" filter="url(#fy)">
+ <path d="M229.94-409.12c-3.558.05-9.024.36-10.303.904-1.606-.447-3.903-.881-5.9-.877a979.03 979.03 0 0 1-6.907 0c-.66-.003-1.048.068-1.353.11v1.096c.12-.18.393-.69.95-.767.747-.103 5.17-.151 7.31-.11 1.775.035 4.455.274 6.39.96.32.113.618.273.891.41 1.964.987 7.944 4.302 7.944 4.302s-6.633-3.948-7.483-4.439c-.203-.117-.575-.258-1.036-.41 1.22-.449 5.076-.62 7.828-.713 3.024-.102 3.348-.09 5.41.192 2.13.29 3.34.602 3.34.602s-.08-.64 1.035-.794c.748-.103 5.17-.152 7.31-.11 2.07.04 5.366.407 7.282 1.37 1.003.504 3.035 1.569 4.795 2.536l.096-.02s-3.58-2.162-4.43-2.653c-.204-.117-.575-.258-1.037-.411 1.22-.448 5.047-.62 7.8-.712 3.024-.102 3.347-.09 5.41.191 1.954.267 3.013.53 3.195.576l-.027-.312a8.503 8.503 0 0 0-1.4-.357c-1.301-.235-3.4-.602-5.51-.564-3.571.064-9.052.356-10.302.904-1.606-.447-3.877-.881-5.871-.877-3.072.007-4.994.01-6.936
+0-1.943-.009-1.713.28-2.936.274-1.322-.005-1.766-.354-5.555-.301M206.2-407.48c1.92.817 4.577 2.193 6.159 3.397s2.908 1.774 5.555 3.918c.885.718 1.748 1.35 2.591 1.922l.541-.19a57.511 57.511 0 0 1-2.269-1.622c-2.822-2.12-3.627-2.81-6.015-4.274s-4.1-2.366-6.562-3.15" enable-background="new"/>
+ <path d="M237.8-407.48c1.92.817 4.606 2.193 6.188 3.397.813.62 1.558 1.07 2.45 1.654l.65-.116a40.414 40.414 0 0 0-2.697-1.784c-2.389-1.465-4.128-2.366-6.59-3.151" enable-background="new"/>
</g>
- <rect
- id="rect6247"
- width="440"
- height="376"
- x="548"
- y="205.32275"
- style="opacity:1;fill:none;fill-opacity:1;stroke:#f8d615;stroke-width:18;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
- <rect
- style="opacity:1;fill:none;fill-opacity:1;stroke:#f83615;stroke-width:18;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
- id="rect6676"
- width="1684"
- height="1292"
- x="56"
- y="53.322754" />
- <rect
- style="opacity:1;fill:url(#pattern5557);fill-opacity:1;stroke:#f815bb;stroke-width:13.34657478;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
- id="rect6731"
- width="522.56604"
- height="1182.4679"
- x="3493.3721"
- y="87.178711" />
- <g
- id="g7477"
- transform="matrix(0.53474256,0,0,1,1882.7509,3.0962157)">
- <rect
- y="99.705269"
- x="3039.4895"
- height="902.66437"
- width="818.51605"
- id="rect6979"
- style="opacity:1;fill:#ffffff;fill-opacity:1;stroke:#f8d615;stroke-width:18;stroke-linejoin:miter;stroke-miterlimit:1;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
- <g
- clip-path="url(#clipPath6975)"
- id="g4303-9"
- style="display:inline;enable-background:new"
- transform="matrix(1.8806916,0,0,2.3994874,1997.8763,-394.32602)">
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:0.5;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter11361-3);enable-background:new"
- d="m 304.64285,526.6479 c -10,0.35715 -18.21428,2.85714 -18.21428,2.85714 l 7.5,6.07143 10.35714,3.57143 16.07143,0.35714 22.5,-5.35714 7.85714,1.07143 20.35715,-2.14286 -10.35715,6.78572 c 5.45923,-1.02361 17.39329,3.56911 9.64286,5.35714 -1.74,0.40142 13.92857,-4.64285 13.92857,-4.64285 l 2.5,-4.64287 3.57143,-9.28571 11.42857,0 18.21428,-4.64286 3.57144,-4.99999 -16.07144,1.07142 -12.14285,2.14286 -14.64286,-5 -70.6921,16.70774 -5.37933,-5.27917 z"
- id="path10326-6"
- sodipodi:nodetypes="cccccccccsccccccccccc"
- transform="matrix(10.726753,0,0,10.726753,-2882.1235,-4565.4583)"
- inkscape:export-filename="/home/cheeseness/Documents/LCA09/mascot/tuz_new.png"
- inkscape:export-xdpi="142.10527"
- inkscape:export-ydpi="142.10527" />
- <g
- style="display:inline;opacity:1;enable-background:new"
- id="g7882-9"
- transform="matrix(0.71084,-0.1937433,0.262963,0.9648058,503.68027,136.48399)">
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="czzzzcc"
- id="path7876-3"
- d="m 245.12255,100.05344 c 0,0 -47.12811,-31.646921 -67.21465,-35.800939 -20.03792,-4.143963 -38.4729,-3.317578 -51.93364,13.607323 -13.46074,16.924901 -12.07739,61.265196 -13.53554,86.969546 -1.45815,25.70435 2.54945,70.17701 17.6046,88.66552 15.05516,18.4885 45.88634,13.58502 49.92695,21.4137 2.21283,4.28736 65.15228,-174.85515 65.15228,-174.85515 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient7904-7);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="czzzzzc"
- id="path7878-3"
- d="m 135.37935,82.017807 c 0,0 26.34355,1.938783 37.63307,13.903188 11.41494,12.097335 13.73457,21.331515 15.29586,37.734585 1.56337,16.42499 -0.84957,28.41812 -7.81382,36.03734 -6.96425,7.61922 -1.00429,19.58332 -25.91605,12.07107 -24.91176,-7.51225 -27.03224,-27.78298 -26.51523,-46.30475 0.51721,-18.52898 7.31617,-53.441433 7.31617,-53.441433 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient7906-6);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="czccssc"
- id="path7880-8"
- d="m 135.648,81.927211 c 0,0 -4.64465,16.365075 0.58825,28.563099 5.48794,12.79254 27.22425,44.26007 27.22425,54.65565 l 22.65625,-5 c 2.54218,-6.96644 3.21052,-15.75206 2.1875,-26.5 -1.56129,-16.40307 -3.8663,-25.62141 -15.28125,-37.718749 -9.65488,-10.232047 -31.59311,-13.374857 -37.375,-14 z"
- style="display:inline;opacity:1;fill:url(#radialGradient7908-0);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- </g>
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 845.03125,1154.7776 c -4.28571,0.7143 -27.62815,3.6181 -57.85714,10 -30.22899,6.3819 -57.31395,4.9661 -135.78608,17.3296 -79.85178,12.5808 -94.06436,42.5423 -108.12225,47.0643 -14.70014,4.7286 -145.37739,-65.8225 -145.37739,-65.8225 l 4.28572,-94.2857 c 0,0 85.88551,-16.2009 112.14285,-33.5714 26.25735,-17.3705 45.58238,-49.66602 59.28572,-71.42861 13.70334,-21.76259 32.85714,-71.42858 32.85714,-71.42858 l 238.57143,262.14289 z"
- id="path7917-0"
- sodipodi:nodetypes="czzzcczzcc" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8888-6);enable-background:accumulate"
- d="m 332.34019,898.38549 -32.73181,-61.29956 -37.61734,45.10646 c 2.17675,1.31711 5.77425,-20.85603 45.6004,-64.41708 l 24.74875,80.61018 z"
- id="path7919-5"
- clip-path="url(#clipPath8658-06)"
- sodipodi:nodetypes="ccccc"
- transform="translate(276,136)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient6951);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8892-7);enable-background:accumulate"
- d="m 200.81833,863.03015 146.3711,-51.61879 243.95184,226.27414 -241.83052,140.0072 -181.01934,-87.6813 32.52692,-226.98125 z"
- id="path7923-6"
- clip-path="url(#clipPath2833-2)"
- sodipodi:nodetypes="cccccc"
- transform="translate(276,136)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#0f0f0f;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 642.88839,640.13471 c 0,0 -29.55406,40.57305 -47.85714,74.28571 -18.30309,33.71267 -58.62109,126.35694 -70.35714,171.07143 -11.7594,44.80344 -62.5,123.57145 -62.5,123.57145 l 76.07143,18.2143 c 0,0 11.80712,-12.8234 31.07142,-46.07146 19.2643,-33.24808 60.35715,-138.57143 60.35715,-138.57143 l 13.21428,-202.5 z"
- id="path7921-6"
- sodipodi:nodetypes="czzcczcc" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.4;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8856-2);enable-background:accumulate"
- d="m 430.28131,381.94122 c -7.07106,2.82843 -236.18124,32.15181 -236.18124,32.15181 l -39.63961,359.83304 90.19849,92.63961 52.3259,-114.5513 100.46804,-186.39192 32.82842,-183.68124 z"
- id="path7925-4"
- sodipodi:nodetypes="ccccccc"
- clip-path="url(#clipPath3665-9)"
- transform="translate(276,136)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 969.67051,1164.0346 c 0,0 23.25628,11.3937 36.06779,20.4761 12.6974,9.0015 29.4724,24.6491 41.6924,37.3605 12.3055,12.8002 20.1127,22.5987 41.5327,24.1608 21.4322,1.5629 53.2824,-8.7876 73.296,-24.6642 20.0135,-15.8766 45.6469,-69.2328 45.6469,-69.2328 l -127.1608,-143.0717"
- id="path7927-0"
- sodipodi:nodetypes="czzzzcc" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8860-3);enable-background:accumulate"
- d="M 331.34019,641.50471 216.17367,835.36467 260.2153,925.96265 357.79603,732.21539 331.34019,641.50471 Z"
- id="path7929-0"
- clip-path="url(#clipPath8642-9)"
- sodipodi:nodetypes="ccccc"
- transform="translate(276,136)" />
- <g
- style="display:inline;opacity:1;enable-background:new"
- id="g7931-4"
- transform="matrix(0.9934486,0.1142802,-0.1142802,0.9934486,-9.24324,588.09054)"
- inkscape:transform-center-x="-347.89063"
- inkscape:transform-center-y="-28.255779">
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:1;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 1049.205,-282.26672 -0.09,0.008 c -1.3874,0.88445 -6.6033,1.6072 -6.629,9.52344 -0.024,7.42525 15.0129,17.09146 17.1563,18.09375 1.7302,0.80909 3.5916,1.40876 5.4063,1.71875 l 1.4374,0.21875 c 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99128 5.4294,-1.4193 6.125,-1.78125 0.7222,-0.37601 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3191,-1.70203 2.5312,-2 0.2123,-0.29796 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3405,-0.094 0.5,-0.4375 0.859,-1.84708 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68215 0.168,-1.35277 0.2187,-1.75 0.029,-0.22951 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19832 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41764 -0.9716,-4.61463 -1.625,-5.46875 -0.4194,-0.54857 -0.7993,-0.7925 -1.1562,-0.90625 -0.067,-0.0173 -0.1239,-0.0467 -0.1875,-0.0625 -0.021,-0.004 -0.042,0.003 -0.062,0 -0.3116,-0.0755 -0.6085,-0.15867 -1.1562,-0.21875 -0.9855,-0.10812 -2.4247,-0.2594 -3.9688,-0.25 -0.5147,0.003 -1.0371,0.0476 -1.5625,0.0937 -3.5589,0.31228 -9.0098,0.99108 -10.2187,1.625 -1.6331,-0.33402 -3.9482,-0.61223 -5.9376,-0.46875 -3.064,0.22097 -4.9677,0.34219 -6.9062,0.46875 -1.9384,0.12655 -1.6861,0.38864 -2.9062,0.46875 -1.3191,0.0866 -1.7869,-0.22325 -5.5626,0.0937 -3.5457,0.29772 -8.9806,0.99317 -10.2187,1.625 -1.6334,-0.33451 -3.9459,-0.61239 -5.9375,-0.46875 -3.0642,0.22098 -4.9678,0.37344 -6.9062,0.5 -0.6592,0.043 -1.0424,0.12393 -1.3438,0.1875 z"
- id="path7933-6" />
- <g
- clip-path="url(#clipPath7616-1)"
- style="display:inline;filter:url(#filter7610-9);enable-background:new"
- id="g7935-2"
- transform="matrix(0.9975712,-0.06965428,0.06965428,0.9975712,872.72062,140.02502)">
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="ccssscsssscscsscsssccscssccsscssscc"
- id="path7937-6"
- d="m 229.94262,-409.12268 c -3.55781,0.05 -9.0242,0.36009 -10.30334,0.90414 -1.60609,-0.44747 -3.90316,-0.88131 -5.89995,-0.87674 -3.07199,0.007 -4.96469,0.009 -6.90727,0 -0.66047,-0.003 -1.04759,0.0672 -1.35267,0.10959 0,0 0,1.09593 0,1.09593 0.11972,-0.17947 0.39252,-0.69046 0.94975,-0.76715 0.74758,-0.10289 5.16928,-0.15123 7.31019,-0.1096 1.7746,0.0345 4.45523,0.27427 6.38921,0.95895 0.3214,0.11378 0.61925,0.27378 0.89219,0.41097 1.96342,0.98693 7.94336,4.30154 7.94336,4.30154 0,0 -6.63275,-3.94768 -7.48287,-4.43853 -0.20331,-0.11739 -0.57464,-0.25769 -1.03609,-0.41098 1.22063,-0.44779 5.07597,-0.61971 7.82823,-0.71235 3.0245,-0.10182 3.34776,-0.0896 5.41069,0.19179 2.12931,0.29043 3.33851,0.60276 3.33851,0.60276 -1e-5,0 -0.0784,-0.64118 1.03609,-0.79455 0.74757,-0.10289 5.16929,-0.15123 7.31019,-0.1096 2.0695,0.0403 5.36605,0.40716 7.2814,1.36992 1.00332,0.50433 3.03564,1.56863 4.79535,2.53571 l 0.0956,-0.0194 c 0,0 -3.58034,-2.16242 -4.43047,-2.65327 -0.20331,-0.11739 -0.57463,-0.25769 -1.03609,-0.41098 1.22062,-0.44779 5.04719,-0.61971 7.79945,-0.71235 3.0245,-0.10182 3.34775,-0.0896 5.41069,0.19179 1.95316,0.2664 3.01292,0.53006 3.19461,0.57536 0,0 -0.0271,-0.31146 -0.0271,-0.31146 -0.40903,-0.13645 -0.71424,-0.23335 -1.40038,-0.35748 -1.30081,-0.23533 -3.39912,-0.60156 -5.50857,-0.56398 -3.57195,0.0636 -9.05328,0.35596 -10.30334,0.90414 -1.60583,-0.44695 -3.87662,-0.8813 -5.87117,-0.87674 -3.07199,0.007 -4.99348,0.009 -6.93605,0 -1.94256,-0.009 -1.71268,0.27907 -2.93558,0.27398 -1.32191,-0.005 -1.76612,-0.35463 -5.55459,-0.30138 0,0 0,0 0,0"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path7939-7"
- d="m 206.1989,-407.47878 c 1.92021,0.81706 4.57715,2.19283 6.15897,3.39739 1.58184,1.20456 2.90757,1.77368 5.55459,3.91795 0.88557,0.71738 1.74865,1.34985 2.59193,1.92174 l 0.54057,-0.19011 c -0.71323,-0.48339 -1.46776,-1.02031 -2.26909,-1.62203 -2.82223,-2.11921 -3.62655,-2.80973 -6.01507,-4.27414 -2.38854,-1.4644 -4.09948,-2.36576 -6.5619,-3.1508 0,0 0,0 0,0"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- sodipodi:nodetypes="cssccsscc" />
- <path
- inkscape:connector-curvature="0"
- id="path7941-5"
- d="m 237.79963,-407.47878 c 1.92021,0.81706 4.60594,2.19283 6.18775,3.39739 0.81307,0.61916 1.55849,1.07042 2.45046,1.65401 l 0.649,-0.11666 c -0.79831,-0.57637 -1.57177,-1.09435 -2.69653,-1.78394 -2.38854,-1.4644 -4.12826,-2.36576 -6.59068,-3.1508 0,0 0,0 0,0"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- sodipodi:nodetypes="csccscc" />
- </g>
- <g
- clip-path="url(#clipPath7606-1)"
- id="g7943-6">
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7578-4);enable-background:new"
- d="m 1056.25,-278.80481 c 4.1446,-1.47877 10,3.125 10,3.125 0.899,0.28092 2.7251,-0.89447 2.6243,-1.68614 0,0 -1.5503,-1.86062 -0.3743,-2.93886 1.176,-1.07824 5.296,1.50738 7.5,1.625 2.204,0.11762 5.5621,-0.22941 7,-0.75 1.4379,-0.52059 1.1129,-1.42459 2.625,-1.75 1.5121,-0.32541 5.1189,1.03754 7.0605,1.16883 1.9416,0.13129 4.6481,0.33427 5.8145,-0.16883 1.1664,-0.5031 0.1782,-1.15921 1.875,-1.875 1.6968,-0.71579 7.7602,-0.95662 9.625,-0.125 1.8648,0.83162 1.8099,0.5192 2.625,3 0.8151,2.4808 7.4398,5.16285 -1.125,13.375 -8.5648,8.21215 -59.3779,13.78594 -65.625,2.75 -6.2471,-11.03594 6.2304,-14.27123 10.375,-15.75 z"
- id="path7945-9"
- sodipodi:nodetypes="czzzzzzzzzzzzzz" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7594-8);enable-background:new"
- d="m 1058.5,-275.42981 c 4.1446,-1.47877 10,3.125 10,3.125 0.899,0.28092 2.7251,-0.89447 2.6243,-1.68614 0,0 -1.5503,-1.86062 -0.3743,-2.93886 1.176,-1.07824 5.296,1.50738 7.5,1.625 2.204,0.11762 5.5621,-0.22941 7,-0.75 1.4379,-0.52059 1.1129,-1.42459 2.625,-1.75 1.5121,-0.32541 5.1189,1.03754 7.0605,1.16883 1.9416,0.13129 4.6481,0.33427 5.8145,-0.16883 1.1664,-0.5031 0.1782,-1.15921 1.875,-1.875 1.6968,-0.71579 7.7602,-0.95662 9.625,-0.125 1.8648,0.83162 1.8099,0.5192 2.625,3 0.8151,2.4808 7.4398,5.16285 -1.125,13.375 -8.5648,8.21215 -59.3779,13.78594 -65.625,2.75 -6.2471,-11.03594 6.2304,-14.27123 10.375,-15.75 z"
- id="path7947-8"
- sodipodi:nodetypes="czzzzzzzzzzzzzz" />
- </g>
- </g>
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#101414;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 628.24553,347.99185 c -3.80443,-25.26423 -16.80972,-50.63802 -17.1568,-75.52523 -0.18626,-13.35552 3.27285,-26.57091 13.75553,-39.55405 36.34702,-65.29583 116.94091,-84.69468 185.93466,-91.46542 86.92239,-11.0168 184.91267,17.94007 233.37138,95.40128 54.124,75.7333 56.6747,172.53912 80.612,259.52795 29.4378,127.1276 54.7791,256.21414 60.3922,386.85035 -3.0634,78.18185 -8.4263,165.18417 -60.5032,228.13417 -48.0265,50.3574 -122.7864,50.053 -187.06985,59.0023 -90.55539,4.655 -184.35153,-16.1458 -261.7839,-64.1982 -64.77564,-37.94 -95.73019,-113.47867 -97.2794,-186.01962 -8.38917,-79.87516 26.39152,-153.80851 51.6204,-227.15961 7.47061,-82.76107 9.41286,-166.24775 9.65334,-249.38484 -0.83682,-32.19544 -7.08953,-63.81733 -11.54636,-95.60908 z"
- id="path7949-7"
- sodipodi:nodetypes="cscccccccccccc" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8940-3);enable-background:accumulate"
- d="m 311.83409,415.43155 9.8995,121.62237 -60.10408,136.47161 15.55635,174.65537 c 15.61326,61.8792 32.18545,98.66905 74.37615,117.05383 4.31911,-36.23998 -38.61152,-142.95988 -39.24264,-189.11984 -0.63145,-46.18445 10.83034,-108.60786 30.67767,-158.29647 20.04835,-50.19188 36.89674,-44.84642 42.12489,-92.59293 5.22815,-47.74651 -17.4264,-149.39192 -17.4264,-149.39192 l -55.86144,39.59798 z"
- id="path7951-2"
- sodipodi:nodetypes="ccccczzzcc"
- clip-path="url(#clipPath8616-5)"
- transform="translate(276,136)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient6953);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 1010.0312,655.49186 c 0,0 16.7552,37.01806 28.7015,53.95395 11.9462,16.93589 52.7271,56.04605 52.7271,56.04605 l 52.5972,-127.58975"
- id="path7953-8"
- sodipodi:nodetypes="czcc" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.07999998;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8822-2);enable-background:accumulate"
- d="m 730.31998,536.56864 c 0,8.48528 42.54774,58.46803 42.54774,58.46803 l 12.60659,-28.76954 -55.15433,-29.69849 z"
- id="path7955-2"
- sodipodi:nodetypes="cccc"
- clip-path="url(#clipPath8209-6)"
- transform="translate(276,136)" />
- <g
- transform="translate(450.03125,73.843964)"
- style="display:inline;opacity:1;enable-background:new"
- id="g7957-9"
- clip-path="url(#clipPath3998-6)">
- <g
- transform="translate(-174.03125,62.156036)"
- style="filter:url(#filter3677-5)"
- id="g7959-9">
- <g
- id="g7961-6"
- style="filter:url(#filter3785-4)">
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="czzzzzzzzzz"
- id="path7963-0"
- d="m 425.88244,476.99186 c 10.80543,-1.47866 24.74401,3.35451 44.64286,3.21428 19.89885,-0.14023 57.45322,-16.91122 82.14285,-17.14286 24.68963,-0.23164 62.7517,12.28406 79.28572,15 16.53402,2.71594 22.84832,-0.15852 27.49999,7.85715 4.65167,8.01567 1.92671,10.74724 -10.35714,20.71429 -12.28385,9.96705 -40.78968,12.63632 -66.07143,12.85714 -25.28234,0.22082 -70.38129,7.07852 -95.35714,3.92856 -24.97585,-3.14996 -56.93756,-7.82267 -68.92857,-17.85714 -11.99101,-10.03447 -19.85084,-16.73182 -17.5,-23.92857 2.35084,-7.19675 13.83743,-3.16419 24.64286,-4.64285 z"
- style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <rect
- y="412.60312"
- x="343.6539"
- height="181.01935"
- width="381.83765"
- id="rect7965-2"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- </g>
- <g
- id="g7967-7"
- style="filter:url(#filter3785-4)">
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="czzzcc"
- id="path7969-6"
- d="m 687.14286,452.36218 c -10.46169,9.71443 -86.9796,19.00514 -100.71429,29.28572 -13.73469,10.28058 -14.75252,12.88826 -12.14286,20 2.60966,7.11174 6.54527,9.40572 25.71429,8.57142 19.16902,-0.8343 98.57143,-27.62172 98.57143,-21.42857 l -11.42857,-36.42857 z"
- style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- transform="translate(174.03125,-62.156036)" />
- <rect
- y="344.82138"
- x="702.86414"
- height="162.63455"
- width="207.8894"
- id="rect7971-1"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- </g>
- </g>
- <g
- transform="translate(-174.03125,62.156036)"
- style="display:inline;opacity:0.18000004;enable-background:new"
- id="g7973-3">
- <g
- id="g7975-2"
- style="filter:url(#filter3785-4)">
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="czzzzzzzzzz"
- id="path7977-1"
- d="m 425.88244,476.99186 c 10.80543,-1.47866 24.74401,3.35451 44.64286,3.21428 19.89885,-0.14023 57.45322,-16.91122 82.14285,-17.14286 24.68963,-0.23164 62.7517,12.28406 79.28572,15 16.53402,2.71594 22.84832,-0.15852 27.49999,7.85715 4.65167,8.01567 1.92671,10.74724 -10.35714,20.71429 -12.28385,9.96705 -40.78968,12.63632 -66.07143,12.85714 -25.28234,0.22082 -70.38129,7.07852 -95.35714,3.92856 -24.97585,-3.14996 -56.93756,-7.82267 -68.92857,-17.85714 -11.99101,-10.03447 -19.85084,-16.73182 -17.5,-23.92857 2.35084,-7.19675 13.83743,-3.16419 24.64286,-4.64285 z"
- style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <rect
- y="412.60312"
- x="343.6539"
- height="181.01935"
- width="381.83765"
- id="rect7979-5"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- </g>
- <g
- id="g7981-9"
- style="filter:url(#filter3785-4)">
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="czzzcc"
- id="path7983-9"
- d="m 687.14286,452.36218 c -10.46169,9.71443 -86.9796,19.00514 -100.71429,29.28572 -13.73469,10.28058 -14.75252,12.88826 -12.14286,20 2.60966,7.11174 6.54527,9.40572 25.71429,8.57142 19.16902,-0.8343 98.57143,-27.62172 98.57143,-21.42857 l -11.42857,-36.42857 z"
- style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- transform="translate(174.03125,-62.156036)" />
- <rect
- y="344.82138"
- x="702.86414"
- height="162.63455"
- width="207.8894"
- id="rect7985-1"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- </g>
- </g>
- </g>
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.75;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8802-7);enable-background:accumulate"
- d="M 582.65599,-7.4183011 695.79307,78.848726 804.68752,337.64981 842.87128,545.5392 963.07944,637.46308 c 0,0 -12.72793,-287.08535 -19.799,-313.95541 C 936.20938,296.63761 793.37381,-69.643698 793.37381,-69.643698 L 582.65599,-7.4183011 Z"
- id="path7987-4"
- clip-path="url(#clipPath8604-69)"
- sodipodi:nodetypes="cccccscc"
- transform="translate(276,136)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient6955);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 964.13839,239.599 c 0,0 8.67732,10.89662 24.10715,11.96428 15.42986,1.06766 49.72166,-39.95267 70.17856,-52.14285 20.4793,-12.20353 47.0464,-26.60225 63.9286,-20.35714 16.8821,6.2451 22.1578,26.43609 27.8571,48.03571 5.6994,21.59961 6.7186,61.81389 -2.6785,92.85715 -9.3972,31.04325 -50.5033,73.10375 -65.3572,103.39285 -14.8539,30.2891 -11.6071,39.82143 -11.6071,39.82143"
- id="path7989-9"
- sodipodi:nodetypes="czzzzzzc" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:1;fill:url(#radialGradient3315-5);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 1124.4955,207.63471 c -15.8928,-0.89286 -49.7188,12.10583 -66.0714,24.28572 -16.4386,12.2439 -29.2209,24.1144 -29.2857,52.14285 -0.065,28.20604 13.1191,39.07641 29.1071,46.96429 15.988,7.88789 33.6862,7.11928 51.9643,-11.78571 18.2782,-18.905 14.2857,-111.60715 14.2857,-111.60715 z"
- id="path7991-1"
- sodipodi:nodetypes="czzzzc" />
- <ellipse
- ry="73.928574"
- rx="86.428574"
- cy="237.00504"
- cx="385"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.75;fill:url(#radialGradient3543-4);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4120-7);enable-background:accumulate"
- id="path7993-0"
- transform="matrix(0.9434749,-0.1239943,0.1440089,1.0957669,451.94827,134.5988)"
- clip-path="url(#clipPath4100-3)" />
- <path
- inkscape:connector-curvature="0"
- transform="translate(450.03125,73.843964)"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient3915-6);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 527.60588,407.44884 c 0,0 -122.04144,38.40348 -187.51434,9.63181 -65.47289,-28.77166 -74.37725,-124.71847 -74.37725,-124.71847 0,0 73.38158,-80.50393 129.92078,-83.61476 55.82705,-3.07164 90.57386,20.14332 114.87001,65.85171 24.352,45.81348 17.1008,132.84971 17.1008,132.84971 z"
- id="path7995-7"
- sodipodi:nodetypes="csczzc"
- mask="url(#mask3684-3)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#linearGradient6957);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 772.17411,393.349 c 0,0 36.21754,-27.38247 51.60714,-35.89286 15.17734,-8.39301 25.71428,-11.60714 35.89285,-11.60714 l -15.53571,66.96428"
- id="path7997-5"
- sodipodi:nodetypes="czcc" />
- <circle
- r="36.25"
- cy="306.64789"
- cx="409.28571"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient3933-8);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="path7999-8"
- transform="translate(449.49554,74.915393)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.3;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8806-6);enable-background:accumulate"
- d="m 311.83409,415.43155 9.8995,121.62237 -60.10408,136.47161 15.55635,174.65537 c 15.61326,61.8792 32.18545,98.66905 74.37615,117.05383 4.31911,-36.23998 8.68161,-72.36764 -31.24264,-223.11984 l 17.67767,-69.29647 72.12489,-138.59293 -42.4264,-158.39192 -55.86144,39.59798 z"
- id="path8001-7"
- sodipodi:nodetypes="cccccccccc"
- clip-path="url(#clipPath8616-5)"
- transform="translate(276,136)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8826-9);enable-background:accumulate"
- d="m 635.21025,581.13004 c -14.14214,12.72792 39.23347,34.58015 76.36753,24.04163 37.13406,-10.53852 104.64487,-35.56437 103.23759,-79.19596 -1.40728,-43.63158 -76.36753,-128.69343 -76.36753,-128.69343 L 635.21025,581.13004 Z"
- id="path8003-0"
- sodipodi:nodetypes="czzcc" />
- <circle
- r="23.214285"
- cy="306.64789"
- cx="410"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient3991-0);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="path8005-4"
- transform="translate(449.67411,74.915393)" />
- <circle
- r="7.5"
- cy="303.07648"
- cx="414.28571"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3981-7);enable-background:accumulate"
- id="path8007-8"
- transform="translate(451.99554,73.486821)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4112-7);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 789.31696,478.349 c 0,0 7.02281,19.56859 -1.07143,35 -8.09424,15.43141 -42.32317,38.98822 -67.49999,50 -25.30972,11.06991 -85.473,32.96393 -101.78572,41.96428 -16.46148,9.08243 -18.21428,12.67857 -18.21428,12.67857 0,0 -7.14693,-19.06441 28.74999,-51.7857 36.17211,-32.97214 142.02712,-48.0495 159.82143,-87.85715 z"
- id="path8009-0"
- sodipodi:nodetypes="czzzczc" />
- <g
- style="display:inline;opacity:1;enable-background:new"
- id="g8011-4"
- transform="translate(780.74553,74.55825)">
- <path
- inkscape:connector-curvature="0"
- transform="translate(-329.81481,0)"
- clip-path="url(#clipPath3999-0)"
- sodipodi:nodetypes="czzczzzszc"
- id="path8013-2"
- d="m 179.64286,267.36218 c -22.41044,39.70292 -60.6161,115.78029 -69.28571,149.64286 -8.64721,33.7751 -8.77199,66.41654 -0.35715,86.42858 8.3602,19.88213 26.16398,35.6328 40.71428,41.42856 -0.59638,-14.37587 14.37295,-43.28583 72.85715,-72.5 58.62627,-29.28514 78.38163,-27.13086 103.57142,-47.14286 25.63006,-20.36176 8.20587,-79.64664 3.21429,-93.92857 -4.99158,-14.28193 -1.23663,-3.37974 -1.94602,-5.09301 -10.68928,-25.81592 -34.21432,-54.4303 -64.48255,-64.54984 -30.26823,-10.11954 -65.01776,-4.84837 -84.28571,5.71428 z"
- style="display:inline;opacity:1;fill:url(#radialGradient3585-2);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
- <ellipse
- ry="134.00607"
- rx="64.715881"
- cy="338.07648"
- cx="183.57143"
- transform="matrix(0.8823874,0.4705236,-0.4705236,0.8823874,-166.62245,2.387362)"
- id="path8015-9"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4060-5);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- <ellipse
- ry="134.00607"
- rx="64.715881"
- cy="338.07648"
- cx="183.57143"
- transform="matrix(0.8823874,0.4705236,-0.4705236,0.8823874,-162.19388,-18.755495)"
- id="path8017-6"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4062-9);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- <path
- inkscape:connector-curvature="0"
- transform="translate(-329.81481,3e-7)"
- clip-path="url(#clipPath3999-0)"
- sodipodi:nodetypes="czzczzzszc"
- id="path8019-1"
- d="m 179.64286,267.36218 c -22.41044,39.70292 -60.6161,115.78029 -69.28571,149.64286 -8.64721,33.7751 -8.77199,66.41654 -0.35715,86.42858 8.3602,19.88213 26.16398,35.6328 40.71428,41.42856 -0.59638,-14.37587 14.37295,-43.28583 72.85715,-72.5 58.62627,-29.28514 78.38163,-27.13086 103.57142,-47.14286 25.63006,-20.36176 8.20587,-79.64664 3.21429,-93.92857 -4.99158,-14.28193 -1.23663,-3.37974 -1.94602,-5.09301 -10.68928,-25.81592 -34.21432,-54.4303 -64.48255,-64.54984 -30.26823,-10.11954 -65.01776,-4.84837 -84.28571,5.71428 z"
- style="display:inline;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient6959);stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4079-1);enable-background:new" />
- </g>
- <circle
- r="19.704132"
- cy="398.07648"
- cx="310.71429"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="path8021-0"
- transform="translate(452.55663,72.581273)" />
- <circle
- r="19.704132"
- cy="398.07648"
- cx="310.71429"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4056-5);fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient6961);stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4083-9);enable-background:accumulate"
- id="path8023-4"
- transform="translate(450.55663,72.581273)" />
- <circle
- r="19.704132"
- cy="398.07648"
- cx="310.71429"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4119-7);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- id="path8025-2"
- transform="translate(450.55663,72.581273)" />
- <ellipse
- ry="44.547726"
- rx="72.079735"
- cy="377.42877"
- cx="429.56738"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4868-3);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4002-6);enable-background:accumulate"
- id="path8027-2"
- transform="matrix(0.9969564,-0.07796167,0.07796167,0.9969564,436.61877,125.29509)"
- inkscape:transform-center-x="-47.231976"
- inkscape:transform-center-y="-3.6935079" />
- <ellipse
- ry="22.627417"
- rx="36.611931"
- cy="391.21735"
- cx="437.6991"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:url(#radialGradient4876-9);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:bevel;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4010-1);enable-background:accumulate"
- id="path8029-2"
- transform="matrix(1.4357951,-0.06999104,0.06999104,1.4357951,235.18065,-63.86546)"
- inkscape:transform-center-x="-20.955902"
- inkscape:transform-center-y="-13.056625" />
- <g
- transform="translate(450.03125,73.843964)"
- id="g8031-0"
- style="display:inline;opacity:1;filter:url(#filter4053-9);enable-background:new">
- <circle
- r="3.2142856"
- cy="401.82648"
- cx="413.66071"
- id="path8033-5"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient6963);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- <circle
- r="3.2142856"
- cy="401.82648"
- cx="413.66071"
- transform="translate(13.125009,8.1249913)"
- id="path8035-5"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient6965);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- <circle
- r="3.2142856"
- cy="401.82648"
- cx="413.66071"
- transform="translate(32.946437,7.4999913)"
- id="path8037-2"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient6967);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- <circle
- r="3.2142856"
- cy="401.82648"
- cx="413.66071"
- transform="translate(24.910723,-10.267866)"
- id="path8039-9"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient6969);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- <circle
- r="3.2142856"
- cy="401.82648"
- cx="413.66071"
- transform="translate(47.589294,-0.6250087)"
- id="path8041-0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:url(#linearGradient6971);stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- </g>
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 896.20301,482.92837 c 0.98509,4.35008 4.53707,6.17948 7.38673,7.89182 4.46068,2.51292 6.52016,1.52211 9.15451,-0.75761 1.60195,-1.92117 10.68311,-4.69865 15.59423,-7.07107 4.32961,-1.45891 8.9033,-5.35873 13.38452,-8.33376 3.39514,-1.62724 5.34664,0.35464 7.82868,1.01015 2.94412,0.71661 4.41117,2.17175 6.06092,3.53554 2.39616,1.17519 -0.9279,3.14313 3.283,4.29314 1.19091,0.21794 2.41695,0.57645 3.28299,-0.50507"
- id="path8043-2"
- sodipodi:nodetypes="ccccccccc" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 910.85021,475.35223 c 2.31494,-0.032 3.17778,0.64253 5.49271,-0.82075 3.45564,-3.08113 5.40254,-3.14477 7.95495,-4.41942 3.02657,-1.31523 6.5357,8.15169 10.10153,9.84899 2.39509,-0.82142 1.28914,1.79379 1.45209,2.65165 0.0571,2.64684 2.80694,3.67806 4.35628,5.42957 3.31604,2.25549 7.37523,6.29546 11.11168,5.3033 6.44525,-2.93107 10.27922,-1.28146 16.28871,-7.38674 0.70405,-1.18134 -0.58425,-6.8946 3.09359,-7.19734 2.52399,0.25338 4.16667,0.0502 6.06092,0.56822 5.441,2.11719 7.73778,6.45 14.71034,7.95495 6.1829,0.96639 7.61264,3.79426 13.88959,5.05076"
- id="path8045-8"
- sodipodi:nodetypes="cccccccccccc" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 876.98133,483.52197 c 2.39858,-0.7938 6.10613,4.1921 8.17313,7.04568 0.59281,2.67952 1.15377,5.48645 0.75761,12.12183 0.78513,2.41754 2.68049,3.03095 4.79823,3.283 3.11745,-0.53678 5.87669,-1.3243 7.3236,-3.03046 1.8716,-1.94167 5.31253,2.39394 8.08122,4.04061 3.61009,1.91209 7.77378,1.97886 11.8693,2.27284 1.70358,-0.23064 2.3704,4.51515 3.28299,8.08123 0.38414,4.37806 -0.88544,6.89569 -1.76776,9.84898 -0.2943,2.49655 2.9885,3.52974 6.31345,4.54569 3.18244,0.74124 6.54424,1.66184 9.09137,1.76777 5.14186,0.87491 8.08874,2.69052 12.12183,4.04061 2.23914,0.81655 3.26019,2.24216 4.54569,3.53553"
- id="path8047-3"
- sodipodi:nodetypes="ccccccccccccc" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter8814-5);enable-background:new"
- d="m 332,187.69519 c 0,0 57.5,-25.5 57.5,-28 0,-2.5 5.5,-52 5.5,-52 0,0 91,-48.500001 91.5,-50.500001 0.5,-2 86,-62.0000004 86,-62.0000004 L 386.5,17.195189 311,123.19519 l 21,64.5 z"
- id="path8049-8"
- clip-path="url(#clipPath8514-8)"
- transform="translate(276,136)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 1697.2846,722.5514 c 0,0 -115.9655,73.5391 -123.0365,77.78174 -7.0711,4.24264 -230.5169,137.17872 -230.5169,137.17872 l 4.2427,39.59798 216.3747,-100.40917 117.3797,-101.82337 15.5563,-52.3259 z"
- id="path8051-0" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8810-3);enable-background:accumulate"
- d="m 528.91587,556.85291 c -5.65685,-1.41421 -181.01933,74.95332 -181.01933,74.95332 l -33.94113,181.01934 51.09546,193.94823 257.2031,67.6813 c 0,0 206.47518,152.735 212.13203,148.4924 5.65686,-4.2426 168.2914,-193.7473 168.2914,-193.7473 L 842.87128,845.35248 796.20224,667.16157 528.91587,556.85291 Z"
- id="path8053-4"
- clip-path="url(#clipPath8610-9)"
- sodipodi:nodetypes="cccccscccc"
- transform="translate(276,136)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:1;fill:#0c0c0c;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 1097.6433,613.88997 c 0,0 22.6195,-6.50681 35.7427,-5.87273 13.1233,0.63409 30.6416,1.93862 43.7089,12.18619 13.0673,10.24756 25.0677,27.14007 34.1124,58.36965 9.0446,31.22958 1.6983,99.25201 -6.1761,143.34735 -7.8743,44.09534 -28.2651,106.11298 -45,140 -16.7348,33.88702 -49.7977,77.49517 -60.5694,89.87617 -11.3642,13.062 -56.2059,36.4262 -79.4306,42.2667 5.3034,-10.6066 48.8998,-50.5889 35,-60.7143 -14.0189,-10.2123 -45.76,45.9824 -84.2931,29.0332 21.38231,-13.1321 41.7794,-51.1861 34.0406,-66.59448 -7.84024,-15.61039 -30.70492,48.75758 -93.53553,37.01288 30.05204,-27.5267 55.40706,-70.90401 41.2627,-82.9797 -14.41516,-12.30687 -60.46175,54.2932 -60.46175,54.2932 0,0 -2.8219,-41.70123 13.7732,-68.60737 16.63935,-26.97787 79.65297,-81.61527 99.55308,-111.70342 19.9002,-30.08814 33.6126,-66.00902 42.1355,-92.51794 8.5228,-26.50892 15.8009,-77.09954 15.8009,-77.09954"
- id="path8055-0"
- sodipodi:nodetypes="czzzzzzczczczczzzc" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8818-1);enable-background:accumulate"
- d="m 770.74639,609.17881 -50.91169,97.58074 -79.90307,111.01576 34.64824,71.41778 42.42641,79.19597 72.12489,-45.25484 14.14214,-192.33305 21.2132,-138.59292 -14.14214,-90.15612 -39.59798,107.12668 z"
- id="path8057-9"
- clip-path="url(#clipPath8622-5)"
- sodipodi:nodetypes="cccccccccc"
- transform="translate(276,136)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter8810-3);enable-background:accumulate"
- d="m 295,846.19519 6.64488,-68.92285 c 0,0 90.31951,89.00457 162.35512,122.92285 72.03561,33.91828 308,62 308,62 l 154,-26 -36,162.00001 -286,26 -298,-89 -11,-189.00001 z"
- id="path8059-1"
- clip-path="url(#clipPath8906-9)"
- sodipodi:nodetypes="cczcccccc"
- transform="translate(276,136)" />
- <path
- inkscape:connector-curvature="0"
- transform="translate(450.03125,73.843964)"
- style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter3587-1);enable-background:new"
- d="m 405.79629,845.99023 74.95332,65.05383 2.49963,16.8804 19.40336,10.15891 6.49204,23.05109 31.70905,-8.3711 14.84924,48.08324 c 12.25652,12.7279 89.79344,-113.1097 55.86143,38.1838 l -60.81118,16.2635 -89.20292,-94.69286 -62.82503,-53.79963 7.07106,-60.81118 z"
- id="path8061-9"
- sodipodi:nodetypes="cccccccccccc"
- clip-path="url(#clipPath3602-4)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#121212;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate"
- d="m 1159.317,918.349 c 54.2857,-1.42857 126.035,-15.05199 170,-26.78572 44.0527,-11.75714 125.8863,-36.34724 175.357,-57.85714 49.3393,-21.45272 113.6038,-59.2816 154.2859,-92.14285 40.5081,-32.72069 52.3899,-55.81981 60.7142,-33.57143 8.3691,22.36779 -16.4069,56.32562 -37.8571,81.07143 -21.6042,24.9234 -52.7314,52.70533 -98.9287,89.28571 -46.1973,36.58038 -156.0825,101.58463 -212.8571,128.5714 -57.066,27.1254 -128.2033,58.2385 -172.1428,72.5001 -43.9395,14.2616 -131.4286,31.0714 -131.4286,31.0714 L 1159.317,918.349 Z"
- id="path8063-6"
- sodipodi:nodetypes="czzzzzzzzcc" />
- <path
- inkscape:connector-curvature="0"
- transform="translate(450.03125,73.843964)"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.5;fill:url(#linearGradient6973);fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3779-4);enable-background:accumulate"
- d="m 1241.5965,652.95007 c 0,0 -64.7215,54.33706 -145.6639,98.99494 -82.0244,45.25484 -284.25704,93.3381 -284.25704,93.3381 0,0 -15.10137,21.05196 45.25489,28.28428 60.35626,7.23232 224.08195,-53.30069 278.60015,-96.16654 54.5182,-42.86585 120.2081,-111.72286 120.2081,-111.72286 l -14.1422,-12.72792 z"
- id="path8065-2"
- sodipodi:nodetypes="czczzcc"
- clip-path="url(#clipPath3992-4)" />
- <g
- transform="translate(450.03125,73.843964)"
- style="display:inline;opacity:1;enable-background:new"
- id="g8067-5"
- clip-path="url(#clipPath3986-7)">
- <g
- transform="translate(-174.03125,62.156036)"
- style="filter:url(#filter3677-5)"
- id="g8069-4">
- <g
- style="filter:url(#filter3785-4)"
- id="g8071-4">
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="czzccccc"
- id="path8073-9"
- d="m 1094.2857,725.93361 c 0,0 -0.2961,26.16091 4.6428,37.85715 4.9389,11.69624 20.0381,26.48665 28.5715,31.42857 8.5334,4.94192 18.9286,8.57142 18.9286,8.57142 l 117.8571,-115 17.8572,-75.71428 -96.4286,38.57143 -91.4286,74.28571 z"
- style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- transform="translate(174.03125,-62.156036)" />
- <rect
- y="486.14224"
- x="1197.8389"
- height="309.71277"
- width="333.75412"
- id="rect8075-9"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- </g>
- </g>
- <g
- transform="translate(-174.03125,62.156036)"
- style="display:inline;opacity:0.18000004;enable-background:new"
- id="g8077-3">
- <g
- style="filter:url(#filter3785-4)"
- id="g8079-6">
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="czzccccc"
- id="path8081-0"
- d="m 1094.2857,725.93361 c 0,0 -0.2961,26.16091 4.6428,37.85715 4.9389,11.69624 20.0381,26.48665 28.5715,31.42857 8.5334,4.94192 18.9286,8.57142 18.9286,8.57142 l 117.8571,-115 17.8572,-75.71428 -96.4286,38.57143 -91.4286,74.28571 z"
- style="display:inline;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- transform="translate(174.03125,-62.156036)" />
- <rect
- y="486.14224"
- x="1197.8389"
- height="309.71277"
- width="333.75412"
- id="rect8083-5"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- </g>
- </g>
- </g>
- <path
- inkscape:connector-curvature="0"
- transform="translate(450.03125,73.843964)"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.83300003;fill:#050505;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:15;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;filter:url(#filter8225-7);enable-background:accumulate"
- d="m 1264.1875,605 c -4.4911,0.73268 -8.157,3.45509 -11.9375,6.40625 -10.0813,7.86976 -28.1695,34.42524 -48.0312,50.46875 -39.8674,32.20316 -103.996,69.97701 -152.5626,91.09375 -48.614,21.13738 -130.54122,45.81801 -174.31245,57.5 -43.39821,11.58246 -115.04403,25.13107 -168.25,26.53125 l -4.5625,0.125 -2,4.125 -92.84375,192.125 -6.5,13.4688 14.65625,-2.8438 c 0,0 87.26968,-16.6514 132.34375,-31.2812 44.7252,-14.51667 115.79086,-45.66683 173.03125,-72.87505 C 980.82199,912.46306 1090.1551,847.86412 1137.5,810.375 c 46.3608,-36.70982 77.8049,-64.71682 99.9375,-90.25 10.9011,-12.576 22.7448,-27.53144 31.0313,-42.75 8.2864,-15.21856 19.1597,-44.21808 13.6874,-58.84375 -1.2177,-3.25474 -2.5514,-6.0613 -4.5937,-8.5 -2.0423,-2.4387 -8.4747,-1.57199 -8.5625,-5.03125 -0.2098,-8.26482 -3.3155,-0.24423 -4.8125,0 z m 2.1563,15.21875 c 0.4148,0.58574 1.0311,1.55766 1.7812,3.5625 2.8968,7.74213 -1.4407,31.89875 -8.8125,45.4375 -7.3718,13.53875 -22.6384,28.92394 -33.1875,41.09375 -21.0754,24.31356 -51.9037,51.86156 -97.9375,88.3125 -45.0496,35.67159 -155.46033,101.09459 -211.40625,127.6875 -56.89173,27.04249 -128.09616,58.1184 -171.25,72.125 -36.36491,11.8031 -95.84471,23.8338 -115.71875,27.7813 L 714.09375,851.75 c 54.70691,-2.0493 123.79259,-15.21635 167.125,-26.78125 44.33422,-11.83225 126.07865,-36.33633 176.40625,-58.21875 50.112,-21.78871 112.5344,-61.16816 154.0312,-94.6875 20.6464,-16.67721 41.7449,-42.54588 49.8126,-48.84375 2.437,-1.90242 4.0806,-2.6358 4.875,-3 z"
- id="path8085-0"
- clip-path="url(#clipPath3722-3)"
- sodipodi:nodetypes="cssssccccccssssssssccssssssccssssc" />
- <g
- style="display:inline;opacity:1;enable-background:new"
- id="g8087-2"
- mask="url(#mask7704-9)"
- transform="matrix(0.9934486,0.1142802,-0.1142802,0.9934486,-9.24324,588.09054)"
- inkscape:transform-center-x="-185.09603"
- inkscape:transform-center-y="-12.859654">
- <path
- inkscape:connector-curvature="0"
- transform="translate(0.08004571,-0.03125)"
- style="display:inline;fill:#bcb786;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 1111.4062,-285.9375 -3.9374,1.875 c -0.041,0.0102 -0.1,0.0205 -0.125,0.0312 -0.4188,0.21285 -0.1647,0.10058 -0.6563,0.3125 -0.4861,0.20956 -1.7376,0.58419 -4.0937,1.46875 -3.3312,1.25058 -5.8043,2.14984 -7,3.0625 -1.5362,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74767 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41973 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25167 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74215 -8.8948,1.93107 -10.1562,2.6875 -1.584,-0.18078 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44696 -4.9162,0.67276 -6.8438,0.90625 -0.6554,0.0794 -1.041,0.20078 -1.3437,0.28125 -0.4262,0.13166 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15937 -1.7622,-0.15683 -5.5313,0.28125 -3.5539,0.41309 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.29729 -3.8577,-0.53419 -5.8437,-0.34375 -3.0588,0.29332 -4.972,0.48399 -6.9063,0.65625 -1.9342,0.17227 -1.6886,0.42237 -2.9062,0.53125 -1.3162,0.1177 -1.7598,-0.16363 -5.5312,0.25 -3.5421,0.38845 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.29469 -3.8872,-0.50701 -5.875,-0.3125 -3.05829,0.29925 -4.9412,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04005,0.17856 -1.34375,0.25 -0.4277,0.11896 -0.6835,0.21807 -1.375,0.28125 -1.316,0.12026 -1.75975,-0.19488 -5.53125,0.21875 -3.55619,0.39002 -9.0056,1.23916 -10.25,1.90625 -1.59869,-0.29418 -3.85985,-0.52372 -5.84375,-0.3125 -3.0557,0.32533 -4.97405,0.52624 -6.90625,0.71875 -1.93219,0.19251 -1.68975,0.44088 -2.90625,0.5625 -1.31488,0.13147 -1.76305,-0.16454 -5.53125,0.28125 -3.53889,0.41866 -8.9777,1.29217 -10.25,1.96875 -1.59759,-0.28104 -3.85995,-0.42043 -5.84375,-0.1875 -3.05198,0.35837 -4.945,0.56786 -6.875,0.78125 -0.65618,0.0726 -1.04065,0.17269 -1.34375,0.25 -0.42679,0.12723 -0.6849,0.2672 -1.375,0.34375 -1.31339,0.14569 -1.76735,-0.17402 -5.53125,0.3125 -3.54888,0.45876 -8.97865,1.41902 -10.21875,2.125 -1.59309,-0.24424 -3.8338,-0.38135 -5.8125,-0.125 -3.04759,0.39482 -4.9507,0.64845 -6.875,0.90625 -1.92429,0.25779 -1.7261,0.49353 -2.9375,0.65625 -1.30949,0.1759 -1.7472,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.9232,1.69917 -10.1875,2.4375 -1.58749,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02619,0.53612 -4.8989,0.86169 -6.8125,1.1875 -0.65059,0.11077 -1.0137,0.27094 -1.3125,0.375 -0.42069,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.2947,0.26159 -1.7271,-0.006 -5.4375,0.8125 -3.49848,0.77195 -8.8459,2.38293 -10.0625,3.21875 -1.5629,-0.0774 -3.7575,0.0853 -5.6875,0.59375 -2.97238,0.78313 -4.8177,1.23209 -6.6875,1.75 -1.87,0.5179 -1.66665,0.76728 -2.84375,1.09375 -1.27249,0.3529 -1.69705,0.10709 -5.34375,1.1875 -3.42468,1.01463 -8.6494,2.93317 -9.875,3.84375 -1.53878,0.0127 -3.7198,0.27222 -5.625,0.875 -2.93098,0.92734 -4.75035,1.45842 -6.59375,2.0625 -0.62679,0.20538 -0.99165,0.39258 -1.28125,0.53125 -0.40758,0.21361 -0.6533,0.40875 -1.3125,0.625 -1.2545,0.41154 -1.68615,0.18904 -5.28125,1.4375 -3.38989,1.17717 -8.59495,3.2137 -9.78125,4.15625 -1.52388,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69905,1.67548 -6.53125,2.3125 -1.8322,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24679,0.43396 -1.66355,0.19972 -5.21875,1.5625 -3.3387,1.2798 -8.4871,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.6357,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6239,1.78156 -6.4375,2.46875 -0.6167,0.23363 -0.99645,0.44203 -1.28125,0.59375 10e-6,0 0,0.0295 0,0.0312 l -8,3.1875 -12.4759,3.49189 7.92966,19.27772 c -0.59163,1.97357 12.54624,-4.73836 12.54624,-4.73836 0.22641,-0.14468 0.44895,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.1716,-0.21577 6,-1.6875 3.82852,-1.47174 5.22405,-2.00498 5.90625,-2.40625 0.67961,-0.39978 1.61175,-0.87937 2.21875,-1.53125 1.82692,-0.13775 3.5708,-0.49323 4.9375,-1 2.968,-1.10052 4.87535,-1.80619 6.78125,-2.46875 1.90581,-0.66254 2.35415,-1.41487 3.40625,-1.78125 1.09162,-0.38011 2.1951,-0.16538 6.0625,-1.53125 3.8674,-1.36586 5.28315,-1.82708 5.96875,-2.21875 0.70111,-0.40052 1.7008,-0.93298 2.3125,-1.59375 1.97081,-0.0547 3.81695,-0.38463 5.28125,-0.875 3.00152,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.53861,-0.5041 2.17415,-1.04677 2.90625,-1.4375 0.23022,-0.13431 0.4759,-0.25373 0.75,-0.34375 1.09832,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91231,-1.23113 5.366,-1.67295 6.0625,-2.03125 0.69391,-0.35697 1.6301,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63585,-0.26683 5.03125,-0.6875 3.0304,-0.91354 4.9924,-1.4301 6.9375,-1.96875 1.94512,-0.53864 2.4262,-1.26452 3.5,-1.5625 1.11402,-0.30915 2.22,0.007 6.1875,-1.03125 3.9675,-1.03863 5.4175,-1.43273 6.125,-1.75 0.7348,-0.32959 1.8139,-0.75372 2.4375,-1.375 1.99782,0.116 3.85745,-0.0201 5.34375,-0.375 3.07811,-0.735 5.0834,-1.10094 7.0625,-1.5 1.58791,-0.32018 2.2443,-0.79055 3,-1.09375 0.23751,-0.1068 0.4669,-0.19276 0.75,-0.25 1.13341,-0.22919 2.30465,0.20893 6.34375,-0.5 4.03942,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71581,-0.25944 1.70435,-0.56724 2.34375,-1.09375 1.9242,0.23949 3.7479,0.22453 5.1875,0 3.12642,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48875,-0.94514 3.59375,-1.09375 1.14642,-0.15418 2.27585,0.30157 6.34375,-0.21875 4.06781,-0.52032 5.56025,-0.69573 6.28125,-0.9375 0.73712,-0.24714 1.7981,-0.58623 2.4375,-1.125 2.05,0.33553 3.9737,0.39796 5.5,0.21875 3.1422,-0.36896 5.18,-0.55936 7.1875,-0.78125 1.61082,-0.17802 2.26465,-0.6082 3.03125,-0.84375 0.24091,-0.0855 0.49405,-0.1556 0.78125,-0.1875 1.1497,-0.12772 2.3013,0.34665 6.375,-0.125 4.0737,-0.47165 5.55905,-0.6106 6.28125,-0.84375 0.71941,-0.23227 1.70025,-0.47346 2.34375,-0.96875 1.9363,0.33346 3.77005,0.40424 5.21875,0.25 3.14601,-0.33495 5.1775,-0.51859 7.1875,-0.71875 2.00991,-0.20014 2.48415,-0.82639 3.59375,-0.9375 1.1511,-0.11528 2.2965,0.36506 6.375,-0.0625 4.0785,-0.42756 5.5889,-0.56209 6.3125,-0.78125 0.73922,-0.22386 1.7956,-0.51325 2.4375,-1.03125 2.057,0.39867 4.00185,0.4934 5.53125,0.34375 3.14871,-0.3081 5.1758,-0.47325 7.1875,-0.65625 1.61401,-0.14682 2.26305,-0.56055 3.03125,-0.78125 0.2413,-0.0809 0.49355,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.2929,0.39275 6.375,0 4.08211,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6996,-0.4477 2.3437,-0.9375 1.9381,0.34999 3.7689,0.45438 5.2188,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1465,-0.32852 5.177,-0.5227 7.1874,-0.71875 1.613,-0.15729 2.2657,-0.63148 3.0313,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7166,-0.25316 1.6746,-0.55807 2.3124,-1.09375 1.9197,0.21194 3.72,0.15141 5.1563,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0937,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5626,-1.28125 1.1287,-0.25066 2.2702,0.11629 6.25,-0.875 3.9795,-0.99127 5.4295,-1.4193 6.125,-1.78125 0.7222,-0.376 1.7617,-0.87058 2.375,-1.53125 1.9629,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.1446 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70652 2.3191,-1.70203 2.5312,-2 0.2123,-0.29795 0.099,-0.72855 0.125,-0.75 0.043,-0.0352 0.3404,-0.094 0.5,-0.4375 0.859,-1.84707 2.3232,-5.62764 2.4376,-6.3125 0.1137,-0.68214 0.168,-1.35277 0.2187,-1.75 0.029,-0.2295 -0.1471,-0.8789 -0.125,-0.9375 0.031,-0.082 0.2883,-0.25057 0.3437,-0.5 0.2663,-1.19831 0.089,-2.20736 -0.125,-3.625 -0.2139,-1.41763 -0.9716,-4.61463 -1.625,-5.46875 -0.6589,-0.86172 -1.2248,-1.01051 -1.75,-1 z"
- id="path8089-9"
- sodipodi:nodetypes="ccssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssscccccssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssssssssc" />
- <g
- clip-path="url(#clipPath7421-7)"
- id="g8091-4">
- <path
- inkscape:connector-curvature="0"
- id="path8093-3"
- d="m 1107.409,-284.04961 c -0.4187,0.21283 -0.1556,0.0939 -0.6472,0.30581 -0.4861,0.20954 -1.7234,0.57439 -4.0796,1.45895 -3.3311,1.25057 -5.8302,2.15344 -7.0259,3.0661 -1.5361,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74766 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41972 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25166 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74214 -8.8948,1.93107 -10.1562,2.6875 -1.5839,-0.18079 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44695 -4.9162,0.67276 -6.8437,0.90625 -0.6554,0.0794 -1.0411,0.20078 -1.3438,0.28125 -0.4262,0.13165 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15936 -1.7622,-0.15683 -5.5312,0.28125 -3.5539,0.41308 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.2973 -3.8578,-0.53419 -5.8438,-0.34375 -3.0588,0.29331 -4.972,0.48399 -6.9062,0.65625 -1.9343,0.17226 -1.6887,0.42237 -2.9063,0.53125 -1.3162,0.11769 -1.7598,-0.16363 -5.5312,0.25 -3.5419,0.38844 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.2947 -3.88717,-0.50701 -5.875,-0.3125 -3.05824,0.29924 -4.94113,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04004,0.17856 -1.34375,0.25 -0.42765,0.11895 -0.68351,0.21807 -1.375,0.28125 -1.31596,0.12025 -1.75976,-0.19488 -5.53125,0.21875 -3.55614,0.39001 -9.00554,1.23916 -10.25,1.90625 -1.59863,-0.29419 -3.85984,-0.52372 -5.84375,-0.3125 -3.0556,0.32532 -4.97404,0.52624 -6.90625,0.71875 -1.93221,0.1925 -1.68987,0.44088 -2.90625,0.5625 -1.31488,0.13146 -1.76298,-0.16454 -5.53125,0.28125 -3.53887,0.41865 -8.97768,1.29217 -10.25,1.96875 -1.59755,-0.28105 -3.85996,-0.42043 -5.84375,-0.1875 -3.05198,0.35836 -4.94508,0.56786 -6.875,0.78125 -0.6562,0.0725 -1.04066,0.17269 -1.34375,0.25 -0.42677,0.12722 -0.68491,0.2672 -1.375,0.34375 -1.31333,0.14568 -1.76746,-0.17402 -5.53125,0.3125 -3.54889,0.45875 -8.97863,1.41902 -10.21875,2.125 -1.59305,-0.24424 -3.83381,-0.38135 -5.8125,-0.125 -3.04759,0.39481 -4.95071,0.64845 -6.875,0.90625 -1.92428,0.25779 -1.72611,0.49353 -2.9375,0.65625 -1.30946,0.1759 -1.74719,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.92315,1.69917 -10.1875,2.4375 -1.5875,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02617,0.53612 -4.89889,0.86169 -6.8125,1.1875 -0.65061,0.11077 -1.01371,0.27094 -1.3125,0.375 -0.42067,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.29465,0.26159 -1.72712,-0.006 -5.4375,0.8125 -3.49853,0.77195 -8.84595,2.38293 -10.0625,3.21875 -1.56278,-0.0774 -3.75758,0.0853 -5.6875,0.59375 -2.97244,0.78313 -4.81761,1.23209 -6.6875,1.75 -1.86988,0.5179 -1.6666,0.76728 -2.84375,1.09375 -1.27246,0.3529 -1.69703,0.10709 -5.34375,1.1875 -3.4247,1.01463 -8.64944,2.93317 -9.875,3.84375 -1.53883,0.0127 -3.71983,0.27222 -5.625,0.875 -2.93106,0.92734 -4.75031,1.45842 -6.59375,2.0625 -0.62676,0.20538 -0.99173,0.39258 -1.28125,0.53125 -0.40763,0.21361 -0.65334,0.40875 -1.3125,0.625 -1.25446,0.41154 -1.68611,0.18904 -5.28125,1.4375 -3.38985,1.17717 -8.59498,3.2137 -9.78125,4.15625 -1.52389,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69908,1.67548 -6.53125,2.3125 -1.83217,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24678,0.43396 -1.66361,0.19972 -5.21875,1.5625 -3.33867,1.2798 -8.48715,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.63569,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6238,1.78156 -6.4375,2.46875 -0.61666,0.23363 -0.99641,0.44203 -1.28125,0.59375 0,0 0,1.09375 0,1.09375 0.11178,-0.22236 0.38599,-0.81743 0.90625,-1.09375 0.69797,-0.37072 4.81363,-1.99337 6.8125,-2.71875 1.65686,-0.60125 4.15389,-1.32868 5.96875,-1.3125 0.30162,0.003 0.58762,0.0509 0.84375,0.0937 1.84249,0.30825 7.46875,1.5625 7.46875,1.5625 -10e-6,0 -6.23349,-1.64675 -7.03125,-1.84375 -0.19079,-0.0471 -0.53572,-0.0687 -0.96875,-0.0625 1.14546,-0.86971 4.761,-2.39351 7.34375,-3.4375 2.83822,-1.14727 3.11681,-1.25182 5.0625,-1.65625 2.0083,-0.41744 3.15625,-0.5 3.15625,-0.5 0,10e-6 -0.0824,-0.60114 0.96875,-1.125 0.7051,-0.35141 4.88702,-1.8924 6.90625,-2.5625 1.9519,-0.64773 5.0574,-1.3585 6.875,-1 1.86323,0.3675 7.53125,1.8125 7.53125,1.8125 10e-6,0 -6.287,-1.87111 -7.09375,-2.09375 -0.19292,-0.0532 -0.53084,-0.086 -0.96875,-0.0937 1.15834,-0.83288 4.79444,-2.19532 7.40625,-3.15625 2.87016,-1.05601 3.16734,-1.1618 5.125,-1.53125 1.85349,-0.34979 2.85884,-0.42548 3.03125,-0.4375 0.1136,-0.21724 0.37745,-0.81002 0.90625,-1.0625 0.70944,-0.33874 4.92607,-1.71275 6.96875,-2.3125 1.69317,-0.49711 4.24077,-1.03677 6.09375,-0.90625 0.30795,0.0217 0.61349,0.0973 0.875,0.15625 1.88118,0.42432 7.59375,2.03125 7.59375,2.03125 10e-6,0 -6.34174,-2.06525 -7.15625,-2.3125 -0.19479,-0.0591 -0.55788,-0.10394 -1,-0.125 1.16949,-0.79755 4.86302,-2.05622 7.5,-2.9375 2.89781,-0.96847 3.23301,-1.00332 5.21875,-1.28125 2.04965,-0.28689 3.1875,-0.3125 3.1875,-0.3125 -2e-5,0 -0.0728,-0.60697 1,-1.0625 0.7196,-0.30557 4.99098,-1.50075 7.0625,-2 2.00244,-0.48258 5.19849,-0.92829 7.0625,-0.40625 1.91078,0.53515 7.71875,2.5 7.71875,2.5 0,0 -6.42266,-2.42351 -7.25,-2.71875 -0.19784,-0.0706 -0.58216,-0.14039 -1.03125,-0.1875 1.1879,-0.72865 4.91527,-1.77408 7.59375,-2.5 2.94342,-0.79775 3.29208,-0.77083 5.3125,-0.90625 1.91289,-0.12823 2.94705,-0.0711 3.125,-0.0625 0.11728,-0.20366 0.39176,-0.77948 0.9375,-0.96875 0.73219,-0.25394 5.07852,-1.04789 7.1875,-1.375 1.74813,-0.27111 4.40088,-0.4847 6.3125,-0.0937 0.31766,0.065 0.60522,0.18551 0.875,0.28125 1.94074,0.68873 7.84375,3.09375 7.84375,3.09375 10e-6,0 -6.53471,-2.95077 -7.375,-3.3125 -0.20097,-0.0865 -0.57513,-0.16679 -1.03125,-0.25 1.2065,-0.63318 5.02956,-1.3956 7.75,-1.90625 2.98953,-0.56119 3.30023,-0.52954 5.34375,-0.53125 2.10926,-0.002 3.3125,0.125 3.3125,0.125 0,1e-5 -0.0727,-0.63119 1.03125,-0.9375 0.74052,-0.20547 5.12612,-0.83387 7.25,-1.0625 2.05302,-0.22099 5.31863,-0.25222 7.21875,0.46875 1.94779,0.73907 7.84375,3.375 7.84375,3.375 2e-5,0 -6.56288,-3.17897 -7.40625,-3.5625 -0.20168,-0.0917 -0.54221,-0.18621 -1,-0.28125 1.21092,-0.60188 4.98442,-1.24884 7.71875,-1.65625 3.0048,-0.44772 3.32551,-0.4517 5.375,-0.40625 1.94045,0.043 3.00699,0.19423 3.1875,0.21875 0.11892,-0.19316 0.3839,-0.76583 0.9375,-0.90625 0.74271,-0.18838 5.15429,-0.73428 7.28125,-0.9375 1.76303,-0.16842 4.42009,-0.23429 6.34375,0.25 0.31968,0.0805 0.60351,0.20359 0.875,0.3125 1.95293,0.78349 7.90625,3.46875 7.90625,3.46875 -2e-5,0 -6.59191,-3.25348 -7.4375,-3.65625 -0.20222,-0.0963 -0.57226,-0.20703 -1.03125,-0.3125 1.21414,-0.57427 5.04366,-1.12219 7.78125,-1.5 3.00838,-0.4152 3.32307,-0.44263 5.375,-0.375 2.11798,0.0698 3.3125,0.25 3.3125,0.25 -2e-5,0 -0.0773,-0.63741 1.03125,-0.90625 0.74362,-0.18035 5.15176,-0.66355 7.28125,-0.84375 2.05847,-0.17417 5.34324,-0.12432 7.25,0.65625 1.95459,0.80016 7.875,3.53125 7.875,3.53125 -2e-5,0 -6.55993,-3.30876 -7.40625,-3.71875 -0.20237,-0.0981 -0.57186,-0.2031 -1.03125,-0.3125 1.21517,-0.5639 5.01008,-1.1143 7.75,-1.46875 3.01091,-0.38952 3.32131,-0.39765 5.375,-0.3125 1.94439,0.0806 3.00663,0.25324 3.1875,0.28125 0.11916,-0.19086 0.38277,-0.74531 0.9375,-0.875 0.74426,-0.174 5.14993,-0.65047 7.28125,-0.8125 1.76662,-0.13427 4.4497,-0.12571 6.375,0.375 0.32,0.0832 0.6033,0.20127 0.875,0.3125 1.9546,0.80016 7.9063,3.5625 7.9063,3.5625 -10e-5,0 -6.5912,-3.34001 -7.4375,-3.75 -0.2024,-0.0981 -0.5719,-0.20311 -1.0313,-0.3125 1.2151,-0.5639 5.0413,-1.08306 7.7813,-1.4375 3.0109,-0.38953 3.3525,-0.4289 5.4062,-0.34375 2.1197,0.0879 3.3125,0.3125 3.3125,0.3125 0,0 -0.078,-0.64902 1.0313,-0.90625 0.7443,-0.17256 5.1495,-0.62336 7.2812,-0.78125 2.0606,-0.1526 5.3429,-0.0968 7.25,0.6875 1.955,0.80395 7.875,3.5 7.875,3.5 0,0 -6.5598,-3.27587 -7.4062,-3.6875 -0.2025,-0.0984 -0.5718,-0.20222 -1.0313,-0.3125 1.2154,-0.56154 5.0119,-1.12778 7.75,-1.5 3.009,-0.40905 3.3227,-0.41558 5.375,-0.34375 1.9431,0.068 3.0072,0.16485 3.1875,0.1875 0.1188,-0.1944 0.3846,-0.72881 0.9375,-0.875 0.7418,-0.19612 5.1311,-0.82878 7.25,-1.09375 1.7564,-0.21961 4.4053,-0.33231 6.3125,0.0312 0.3169,0.0604 0.6058,0.18938 0.875,0.28125 1.9362,0.66092 7.8438,2.9375 7.8438,2.9375 -1e-4,0 -6.5367,-2.80655 -7.375,-3.15625 -0.2005,-0.0836 -0.5762,-0.17333 -1.0313,-0.25 1.2037,-0.65046 5.0191,-1.37195 7.7188,-2 2.9667,-0.6902 3.2889,-0.75507 5.3125,-0.875 2.0886,-0.1238 3.2812,-0.0312 3.2812,-0.0312 0,1e-5 -0.087,-0.63205 1,-1.03125 0.7292,-0.2678 5.0472,-1.33797 7.125,-1.8125 2.0085,-0.45869 5.1679,-1.0293 7,-0.625 1.8781,0.41446 13.5782,3.01563 13.5782,3.01563 0,0 -12.3275,-3.02266 -13.1407,-3.26563 -0.1945,-0.0581 -0.5586,-0.10626 -1,-0.125 1.1676,-0.80369 3.5142,-1.6873 6.1094,-2.70312 1.6814,-0.65818 0.9237,-0.37659 2.7759,-1.0036 1.7536,-0.59366 2.4854,-1.01071 2.6304,-1.11299 0.3461,-0.20651 -0.356,-0.12188 -0.5442,-0.0424 z"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7001-5);enable-background:new"
- sodipodi:nodetypes="czscsssscssssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssccsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscc" />
- <path
- inkscape:connector-curvature="0"
- id="path8095-5"
- d="m 1082.625,-275.125 c 1.873,0.39348 4.4961,1.14555 6.0313,1.96875 1.5352,0.82319 2.8222,1.056 5.375,2.5 2.5266,1.42926 4.7958,2.00696 6.9687,2.53125 2.3476,0.56642 5.4354,0.71523 8.8438,1.1875 -1.0889,-0.83975 -6.6074,-1.17245 -8.4063,-1.5625 -1.7989,-0.39006 -3.8941,-1.01616 -6.5937,-2.3125 -2.6997,-1.29634 -3.4944,-1.79896 -5.8125,-2.6875 -2.3182,-0.88854 -4.0044,-1.38314 -6.4063,-1.625 z"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6949-4);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8097-1"
- d="m 1051.4688,-270 c 1.9053,0.57759 4.5281,1.61572 6.0937,2.59375 1.5656,0.97802 2.8802,1.35981 5.5,3.125 2.593,1.74716 4.9859,2.70927 7.25,3.59375 2.4461,0.95557 5.6826,1.65713 9.4063,3.0625 -1.1896,-1.13784 -7.0631,-2.68675 -8.9375,-3.375 -1.8745,-0.68825 -4.0818,-1.5662 -6.875,-3.28125 -2.7933,-1.71504 -3.5736,-2.2839 -5.9375,-3.40625 -2.3641,-1.12234 -4.0567,-1.83455 -6.5,-2.3125 z"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6961-8);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8099-7"
- d="m 1020.2188,-266.84375 c 1.9119,0.63811 4.5812,1.75536 6.1562,2.8125 1.5751,1.05715 2.8956,1.50867 5.5313,3.40625 2.6086,1.87821 5.0284,3.03003 7.3125,4.0625 2.4677,1.11545 5.7645,2.1733 9.5312,3.84375 -1.2033,-1.22253 -7.2028,-3.31423 -9.0937,-4.125 -1.891,-0.81077 -4.0649,-1.89379 -6.875,-3.75 -2.8102,-1.8562 -3.6218,-2.47693 -6,-3.71875 -2.3783,-1.2418 -4.1107,-1.97569 -6.5625,-2.53125 z"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6957-9);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8101-4"
- d="m 1110.1719,-266.89063 c 0.1508,0.0486 0.688,0.631 0.1094,1.48438 -0.8101,1.19459 -5.7049,3.32429 -8.5625,4.125 -2.8449,0.79712 -6.2901,0.97774 -10.5625,-0.375 -4.3016,-1.36195 -5.4697,-2.46872 -10.6563,-4.3125 4.664,2.11517 6.1953,3.95233 10.125,5.34375 1.6207,0.57387 3.3671,0.9396 5.0625,1.03125 -0.4451,0.32563 -1.5303,0.9833 -3.5625,1.59375 -2.7955,0.83969 -6.6491,1.53378 -8.25,1.625 -1.5146,0.0863 -3.142,-0.51249 -3.4375,-0.625 0.1667,0.10308 0.3732,0.37734 -0.25,1.03125 -0.8993,0.94363 -6.1474,1.923 -9.125,2.25 -2.9643,0.32555 -6.5216,-0.016 -10.9062,-1.90625 -3.978,-1.71497 -5.339,-2.91536 -9.4063,-4.75 0,0 0,0.15625 0,0.15625 3.6431,2.09529 5.284,3.88327 8.875,5.5625 1.7302,0.80909 3.5917,1.40876 5.4063,1.71875 -0.5349,0.28676 -1.5578,0.71151 -3.4375,1.03125 -2.869,0.48796 -6.809,0.81614 -8.4375,0.75 -0.8507,-0.0345 -1.7286,-0.18437 -2.4063,-0.40625 -0.6848,-0.21488 -1.1897,-0.44467 -1.3125,-0.5 0.1694,0.10721 0.4311,0.40288 -0.2187,1.03125 -0.9097,0.87962 -6.2461,1.33638 -9.25,1.46875 -2.9905,0.13179 -6.5889,-0.45063 -11,-2.5625 -4.4412,-2.12626 -5.6415,-3.4016 -10.9063,-5.78125 4.7343,2.59704 6.2865,4.6291 10.3438,6.71875 1.6733,0.86185 3.4852,1.49425 5.25,1.9375 -0.4633,0.23332 -1.5894,0.68814 -3.6875,0.9375 -2.8863,0.34298 -6.8346,0.49288 -8.4688,0.375 -1.5462,-0.1115 -3.2312,-0.85696 -3.5312,-1 0.1691,0.12029 0.4138,0.41048 -0.2188,1 -0.9128,0.85073 -6.2441,1.26212 -9.25,1.375 -2.9925,0.11237 -6.5897,-0.49043 -11,-2.59375 -4.00125,-1.90823 -5.38803,-3.13783 -9.46875,-5.09375 -3e-5,0 0,0.15625 0,0.15625 3.65506,2.20392 5.29421,4.05255 8.90625,5.90625 1.74029,0.89315 3.637,1.52827 5.4688,1.96875 -0.54,0.2483 -1.5781,0.61533 -3.4688,0.84375 -2.88568,0.34858 -6.86605,0.52095 -8.5,0.40625 -0.85345,-0.0599 -1.72631,-0.25791 -2.40625,-0.5 -0.6871,-0.2353 -1.18935,-0.47226 -1.3125,-0.53125 0.16998,0.11227 0.46448,0.42225 -0.1875,1.03125 -0.91265,0.8525 -6.27533,1.29337 -9.28125,1.40625 -2.99246,0.11237 -6.59346,-0.52805 -11,-2.59375 -4.43653,-2.07978 -5.64688,-3.33171 -10.90625,-5.65625 4.72938,2.54749 6.29074,4.5778 10.34375,6.625 1.67155,0.84433 3.48554,1.46643 5.25,1.90625 -0.46323,0.23422 -1.5897,0.68407 -3.6875,0.9375 -2.88569,0.34858 -6.8362,0.56952 -8.46875,0.46875 -1.54456,-0.0953 -3.20031,-0.82885 -3.5,-0.96875 0.16899,0.11853 0.38192,0.40385 -0.25,1 -0.91186,0.86028 -6.24665,1.33025 -9.25,1.46875 -2.98995,0.1379 -6.56745,-0.45068 -10.96875,-2.46875 -3.99308,-1.83089 -5.36511,-3.0292 -9.4375,-4.90625 -2e-5,0 0,0.15625 0,0.15625 3.64761,2.13327 5.27033,3.93487 8.875,5.71875 1.73675,0.85951 3.60727,1.45014 5.4375,1.875 -0.53947,0.2529 -1.55063,0.64129 -3.4375,0.90625 -2.87978,0.40436 -6.83813,0.64562 -8.46875,0.5625 -0.85172,-0.0434 -1.7277,-0.20855 -2.40625,-0.4375 -0.68569,-0.22201 -1.1896,-0.44339 -1.3125,-0.5 0.16959,0.10899 0.4319,0.40965 -0.21875,1.03125 -0.91079,0.87014 -6.25021,1.39152 -9.25,1.5625 -2.98633,0.17021 -6.57381,-0.31577 -10.96875,-2.28125 -4.42489,-1.97888 -5.60596,-3.22819 -10.84375,-5.375 4.70997,2.38767 6.27017,4.38873 10.3125,6.34375 1.66715,0.80631 3.46043,1.39658 5.21875,1.78125 -0.46163,0.2487 -1.597,0.71225 -3.6875,1.03125 -2.8756,0.43876 -6.7804,0.7331 -8.40625,0.6875 -1.53823,-0.0431 -3.2328,-0.74522 -3.53125,-0.875 0.16833,0.11282 0.41057,0.41375 -0.21875,1.03125 -0.90812,0.8911 -6.20295,1.52825 -9.1875,1.8125 -2.97118,0.28298 -6.57342,-0.1758 -10.9375,-1.9375 -3.95934,-1.59831 -5.32915,-2.79487 -9.34375,-4.3125 3e-5,0 0,0.15625 0,0.15625 3.5959,1.81135 5.23831,3.58233 8.8125,5.15625 1.72207,0.75835 3.58748,1.28895 5.40625,1.625 -0.53609,0.27908 -1.56658,0.68763 -3.4375,1.0625 -2.85539,0.5721 -6.78942,1.01939 -8.40625,1.03125 -0.84451,0.006 -1.70608,-0.0809 -2.375,-0.25 -0.67591,-0.16151 -1.16009,-0.32923 -1.28125,-0.375 0.16722,0.094 0.42267,0.38348 -0.21875,1.0625 -0.89787,0.95052 -6.18648,1.91708 -9.125,2.4375 -2.92534,0.51809 -6.43215,0.37424 -10.71875,-1.03125 -4.3158,-1.41507 -5.47277,-2.52994 -10.5625,-3.96875 4.57685,1.75101 6.08855,3.56006 10.03125,5 1.62608,0.59389 3.36885,0.95565 5.09375,1.15625 -0.45285,0.29702 -1.55478,0.88339 -3.59375,1.46875 -2.80472,0.80517 -6.63886,1.57583 -8.21875,1.75 -1.49475,0.1648 -3.11623,-0.31681 -3.40625,-0.40625 0.16356,0.0901 0.39278,0.35993 -0.21875,1.0625 -0.88247,1.01385 -6.04452,2.37165 -8.9375,3.0625 -2.88002,0.68778 -6.3356,0.76002 -10.5625,-0.4375 -3.83485,-1.08645 -5.17258,-2.07237 -9.0625,-3.125 -10e-6,0 0,0.15625 0,0.15625 3.48418,1.39485 5.06941,2.9194 8.53125,4.03125 1.66793,0.53572 3.45578,0.78674 5.21875,0.875 -0.51964,0.35212 -1.50039,0.91452 -3.3125,1.53125 -2.76566,0.94125 -6.59024,1.93537 -8.15625,2.15625 -0.81794,0.11539 -1.6331,0.12283 -2.28125,0.0312 -0.65496,-0.0832 -1.1326,-0.21827 -1.25,-0.25 0.16204,0.0746 0.43399,0.34044 -0.1875,1.09375 -0.87,1.05453 -6.00963,2.65925 -8.875,3.4375 -2.85253,0.77476 -6.25912,0.9582 -10.4375,-0.0937 -4.20683,-1.05913 -5.35669,-2.04166 -10.34375,-3.15625 4.48454,1.45946 5.96935,3.13523 9.8125,4.25 1.58504,0.45977 3.28679,0.63825 4.96875,0.6875 -0.44157,0.33676 -1.51251,1.02773 -3.5,1.78125 -2.73393,1.03649 -6.45198,2.16269 -8,2.4375 -1.46462,0.26002 -3.05958,-0.11654 -3.34375,-0.1875 0.16025,0.0796 0.38044,0.32098 -0.21875,1.0625 -0.86466,1.07006 -5.91652,2.81815 -8.75,3.6875 -2.8208,0.86547 -6.2075,1.15631 -10.34375,0.21875 -3.75259,-0.85061 -5.04785,-1.71647 -8.875,-2.59375 0,0 0,0.15625 0,0.15625 3.42796,1.23779 4.98741,2.6323 8.375,3.53125 1.63216,0.43314 3.36704,0.58301 5.09375,0.5625 -0.50893,0.38417 -1.47675,1.02182 -3.25,1.75 -2.70634,1.11134 -6.43633,2.30781 -7.96875,2.625 -0.8004,0.16569 -1.61231,0.21862 -2.25,0.15625 0,0 0,0.51552 0,0.92229 0,0.26507 0,0.48396 0,0.48396 0.22645,-0.14468 0.44891,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.17161,-0.21577 6,-1.6875 3.82843,-1.47174 5.22412,-2.00498 5.90625,-2.40625 0.6796,-0.39978 1.61165,-0.87937 2.21875,-1.53125 1.82685,-0.13775 3.57075,-0.49323 4.9375,-1 2.96812,-1.10052 4.87537,-1.80619 6.78125,-2.46875 1.90586,-0.66254 2.35409,-1.41487 3.40625,-1.78125 1.09155,-0.38011 2.19511,-0.16538 6.0625,-1.53125 3.86745,-1.36586 5.28316,-1.82708 5.96875,-2.21875 0.70109,-0.40052 1.70081,-0.93298 2.3125,-1.59375 1.9708,-0.0547 3.81685,-0.38463 5.28125,-0.875 3.00148,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.5386,-0.5041 2.17402,-1.04677 2.90625,-1.4375 0.23016,-0.13431 0.47574,-0.25373 0.75,-0.34375 1.09823,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91233,-1.23113 5.36605,-1.67295 6.0625,-2.03125 0.69388,-0.35697 1.63015,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63581,-0.26683 5.03125,-0.6875 3.03043,-0.91354 4.99238,-1.4301 6.9375,-1.96875 1.94511,-0.53864 2.42618,-1.26452 3.5,-1.5625 1.11401,-0.30915 2.21994,0.007 6.1875,-1.03125 3.96761,-1.03863 5.41758,-1.43273 6.125,-1.75 0.73487,-0.32959 1.81383,-0.75372 2.4375,-1.375 1.99774,0.116 3.85743,-0.0201 5.34375,-0.375 3.07811,-0.735 5.08344,-1.10094 7.0625,-1.5 1.58792,-0.32018 2.24429,-0.79055 3,-1.09375 0.23757,-0.1068 0.46695,-0.19276 0.75,-0.25 1.13347,-0.22919 2.30448,0.20893 6.34375,-0.5 4.03933,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71586,-0.25944 1.70428,-0.56724 2.34375,-1.09375 1.92427,0.23949 3.74788,0.22453 5.1875,0 3.12633,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48869,-0.94514 3.59375,-1.09375 1.14639,-0.15418 2.27592,0.30157 6.34375,-0.21875 4.06784,-0.52032 5.56013,-0.69573 6.28125,-0.9375 0.7371,-0.24714 1.79809,-0.58623 2.4375,-1.125 2.05007,0.33553 3.97378,0.39796 5.5,0.21875 3.14231,-0.36896 5.17994,-0.55936 7.1875,-0.78125 1.61076,-0.17802 2.26467,-0.6082 3.03125,-0.84375 0.24094,-0.0855 0.49412,-0.1556 0.78125,-0.1875 1.14978,-0.12772 2.30129,0.34665 6.375,-0.125 4.07374,-0.47165 5.55909,-0.6106 6.28125,-0.84375 0.71946,-0.23227 1.70024,-0.47346 2.34375,-0.96875 1.93637,0.33346 3.77006,0.40424 5.21875,0.25 3.14602,-0.33495 5.17756,-0.51859 7.1875,-0.71875 2.00996,-0.20014 2.48414,-0.82639 3.59375,-0.9375 1.15114,-0.11528 2.29643,0.36506 6.375,-0.0625 4.07861,-0.42756 5.58886,-0.56209 6.3125,-0.78125 0.73915,-0.22386 1.79572,-0.51325 2.4375,-1.03125 2.0571,0.39867 4.00187,0.4934 5.53125,0.34375 3.14873,-0.3081 5.17584,-0.47325 7.1875,-0.65625 1.61407,-0.14682 2.2631,-0.56055 3.03125,-0.78125 0.24142,-0.0809 0.49353,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.29296,0.39275 6.375,0 4.08208,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6997,-0.4477 2.3438,-0.9375 1.938,0.34999 3.7688,0.45438 5.2187,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1466,-0.32852 5.1771,-0.5227 7.1875,-0.71875 1.613,-0.15729 2.2656,-0.63148 3.0312,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7167,-0.25316 1.6745,-0.55807 2.3125,-1.09375 1.9197,0.21194 3.7199,0.15141 5.1562,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0938,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5625,-1.28125 1.1288,-0.25066 2.2703,0.11629 6.25,-0.875 3.9796,-0.99128 5.4296,-1.4193 6.125,-1.78125 0.7223,-0.37601 1.7619,-0.87058 2.375,-1.53125 1.963,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3348,-1.68641 2.5469,-1.98438 0.2122,-0.29796 0.1118,-0.7453 0.1379,-0.76675 0.043,-0.0352 0.3193,-0.085 0.479,-0.42844 0.8589,-1.84708 2.321,-5.64459 2.4352,-6.32945 0.1137,-0.68216 0.1638,-1.34774 0.2145,-1.74497 0.029,-0.22952 -0.1467,-0.86544 -0.1246,-0.92404 0.031,-0.0821 0.3045,-0.26528 0.3599,-0.51471 0.2663,-1.19833 0.089,-2.19129 -0.1251,-3.60893 -0.214,-1.41764 -0.9837,-4.62214 -1.6369,-5.47626 -0.6589,-0.86172 -1.2229,-1.01117 -1.7479,-1.00066 -0.2086,0.26976 0.1368,0.26309 0.1626,0.31261 0.6806,0.0508 0.934,0.36864 1.4192,0.89662 0.4852,0.52798 1.4428,3.93956 1.5794,5.38995 0.1366,1.45039 0.19,2.8602 -0.088,3.46864 -0.2781,0.60845 -0.9442,0.42864 -1.2366,0.49452 0.531,0.18589 0.8908,0.21322 0.9524,1.05768 0.059,0.81338 -0.1332,1.63969 -0.5198,2.80562 -0.3912,1.18001 -1.8452,4.34998 -2.2857,4.59877 -0.4523,0.25551 -0.9524,0.18199 -1.288,0.0511 z"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6997-5);enable-background:new"
- sodipodi:nodetypes="cssscscsscsssccscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssccscsscscssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsszsszssszzcczzzczzzc" />
- <path
- inkscape:connector-curvature="0"
- id="path8103-3"
- d="m 988.75,-263.84375 c 1.91161,0.6344 4.55027,1.75841 6.125,2.8125 1.57477,1.05409 2.8961,1.48252 5.5313,3.375 2.6082,1.87314 5.0269,3.01522 7.3125,4.0625 2.4693,1.13147 5.7521,2.15474 9.5312,3.9375 -1.2072,-1.2584 -7.139,-3.36445 -9.0312,-4.1875 -1.8922,-0.82304 -4.128,-1.93049 -6.9375,-3.78125 -2.80961,-1.85075 -3.62224,-2.48154 -6.00005,-3.71875 -2.37782,-1.23719 -4.07988,-1.9492 -6.53125,-2.5 z"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6953-8);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8105-1"
- d="m 957.5,-260.78125 c 1.91,0.6181 4.58288,1.70934 6.15625,2.75 1.57339,1.04066 2.89608,1.48252 5.53125,3.375 2.60823,1.87315 5.02692,3.01521 7.3125,4.0625 2.46931,1.13147 5.75213,2.15475 9.53125,3.9375 -1.20728,-1.2584 -7.20154,-3.3957 -9.09375,-4.21875 -1.89217,-0.82304 -4.09666,-1.9305 -6.90625,-3.78125 -2.80958,-1.85075 -3.59295,-2.43932 -5.96875,-3.65625 -2.37578,-1.21691 -4.11321,-1.93885 -6.5625,-2.46875 z"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6993-3);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8107-4"
- d="m 926.09375,-257.375 c 1.90772,0.59745 4.55348,1.66384 6.125,2.6875 1.5715,1.02365 2.87022,1.43971 5.5,3.28125 2.60291,1.82273 5.02887,2.9722 7.3125,4 2.4672,1.11041 5.75535,2.09323 9.53125,3.84375 -1.20623,-1.2481 -7.1719,-3.31809 -9.0625,-4.125 -1.89058,-0.8069 -4.10242,-1.89104 -6.90625,-3.6875 -2.80385,-1.79644 -3.62704,-2.40251 -6,-3.59375 -2.37297,-1.19124 -4.05362,-1.90283 -6.5,-2.40625 z"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6989-8);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8109-6"
- d="m 894.90625,-253.5625 c 1.90213,0.55355 4.58701,1.58887 6.15625,2.59375 1.56923,1.00487 2.87401,1.40864 5.5,3.21875 2.59912,1.79164 5.00034,2.87189 7.28125,3.875 2.46428,1.08374 5.75984,2.04029 9.53125,3.75 -1.2048,-1.23507 -7.17416,-3.24478 -9.0625,-4.03125 -1.88832,-0.78647 -4.0752,-1.8308 -6.875,-3.59375 -2.79977,-1.76294 -3.59919,-2.36836 -5.96875,-3.53125 -2.36957,-1.16288 -4.12325,-1.83412 -6.5625,-2.28125 z"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6985-6);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8111-9"
- d="m 863.71875,-248.65625 c 1.88062,0.42909 4.50427,1.38038 6.0625,2.3125 1.55823,0.93211 2.85233,1.25776 5.46875,3 2.58971,1.72444 4.98067,2.70802 7.25,3.625 2.45176,0.99069 5.73959,1.87707 9.5,3.5 -1.20131,-1.20734 -7.15249,-3.06609 -9.03125,-3.78125 -1.87875,-0.71517 -4.0854,-1.68442 -6.875,-3.375 -2.78963,-1.69057 -3.58461,-2.22822 -5.9375,-3.28125 -2.35292,-1.05301 -4.02584,-1.71248 -6.4375,-2 z"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6965-3);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8113-4"
- d="m 833.15625,-241.375 c 1.84836,0.29644 4.46945,0.97632 6,1.78125 1.53058,0.80493 2.81374,1.05573 5.375,2.53125 2.53504,1.46046 4.89068,2.32509 7.125,3.0625 2.41399,0.79668 5.65711,1.46689 9.375,2.84375 -1.18771,-1.12873 -7.08772,-2.58975 -8.9375,-3.15625 -1.84977,-0.5665 -4.00342,-1.37392 -6.75,-2.84375 -2.74657,-1.46983 -3.50136,-1.92028 -5.8125,-2.78125 -2.31115,-0.86095 -4.00471,-1.32009 -6.375,-1.4375 z"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6981-3);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8115-2"
- d="m 802.90625,-232.3125 c 1.8222,0.21127 4.36576,0.80057 5.875,1.53125 1.50925,0.73066 2.75568,0.92998 5.28125,2.28125 2.49976,1.33746 4.83154,2.04843 7.03125,2.65625 2.37653,0.65667 5.56464,1.07288 9.21875,2.1875 -1.16735,-1.04496 -6.92888,-2.10329 -8.75,-2.5625 -1.82111,-0.45921 -3.95225,-1.12696 -6.65625,-2.4375 -2.70403,-1.31052 -3.47106,-1.7199 -5.75,-2.46875 -2.27895,-0.74883 -3.91325,-1.17931 -6.25,-1.1875 z"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6977-6);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8117-2"
- d="m 773.1875,-222.1875 c 1.81109,0.1787 4.32059,0.66506 5.8125,1.34375 1.49194,0.67869 2.7534,0.79822 5.25,2.0625 2.47107,1.25138 4.79005,1.89614 6.96875,2.4375 2.35387,0.58488 5.49134,0.89752 9.09375,1.84375 -1.15084,-0.99116 -6.85251,-1.7833 -8.65625,-2.1875 -1.80372,-0.4042 -3.91553,-1.02116 -6.59375,-2.25 -2.67818,-1.22884 -3.40345,-1.61089 -5.65625,-2.28125 -2.25279,-0.67034 -3.89627,-1.00232 -6.21875,-0.96875 z"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6973-4);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8119-6"
- d="m 743.5625,-211.1875 c 1.79281,0.12911 4.27313,0.54965 5.75,1.1875 1.4769,0.63785 2.7161,0.74156 5.1875,1.9375 2.44618,1.18372 4.72054,1.74666 6.875,2.21875 2.32767,0.51003 5.4196,0.68064 9,1.5625 -1.14379,-0.9706 -6.74759,-1.59065 -8.53125,-1.9375 -1.78367,-0.34684 -3.88285,-0.88756 -6.53125,-2.03125 -2.64841,-1.14368 -3.39495,-1.51631 -5.625,-2.125 -2.23008,-0.60868 -3.82594,-0.90966 -6.125,-0.8125 z"
- style="display:inline;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter6969-8);enable-background:new" />
- <g
- id="g8121-4"
- style="fill:#ffffff;fill-opacity:1;filter:url(#filter7345-9)">
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="czzzczzc"
- id="path8123-1"
- d="m 744.9375,-212.11731 c 0,0 7.22229,-3.22318 9.0625,-3.5 1.84021,-0.27682 3.35225,-0.003 6,0.5625 2.64775,0.56573 8.7357,2.21518 11.1875,3.375 2.4518,1.15982 5.3125,3.5625 5.3125,3.5625 0,0 -7.14644,-2.78019 -10.1875,-3.5625 -3.04106,-0.78231 -7.64461,-2.08374 -10.375,-2.3125 -2.73039,-0.22876 -11,1.875 -11,1.875 z"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="czzzczzc"
- id="path8125-2"
- d="m 735.46875,-206.95416 c 0,0 3.65979,-2.22318 5.5,-2.5 1.84021,-0.27682 3.66475,0.24677 6.3125,0.8125 2.64775,0.56573 8.7357,2.21518 11.1875,3.375 2.4518,1.15982 6.5625,2.125 6.5625,2.125 0,0 -8.39644,-1.34269 -11.4375,-2.125 -3.04106,-0.78231 -7.95711,-2.33374 -10.6875,-2.5625 -2.73039,-0.22876 -7.4375,0.875 -7.4375,0.875 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="czzzczzc"
- id="path8127-8"
- d="m 759.85042,-217.61116 c 0,0 8.5437,-3.29857 10.39778,-3.45786 1.85409,-0.1593 3.64166,0.4792 6.2481,1.21208 2.60644,0.73288 8.57724,2.76594 10.95036,4.07925 2.37312,1.31331 6.41417,2.53782 6.41417,2.53782 0,0 -8.29413,-1.87365 -11.27931,-2.84767 -2.98519,-0.97402 -7.79269,-2.83478 -10.50302,-3.23662 -2.71033,-0.40184 -12.22808,1.713 -12.22808,1.713 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="czzzczzc"
- id="path8129-8"
- d="m 775.19813,-223.2266 c 0,0 7.77133,-2.78244 9.62831,-2.90349 1.85697,-0.12104 3.631,0.55417 6.22178,1.34062 2.59077,0.78645 8.5184,2.94217 10.86394,4.30412 2.34555,1.36195 6.36049,2.6695 6.36049,2.6695 0,0 -8.25373,-2.04423 -11.21821,-3.07958 -2.96447,-1.03535 -7.73259,-2.99481 -10.43406,-3.45243 -2.70147,-0.45763 -11.42225,1.12126 -11.42225,1.12126 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-4.3190906"
- inkscape:transform-center-x="13.852145"
- sodipodi:nodetypes="czzzczzc"
- id="path8131-9"
- d="m 789.64298,-227.95417 c 0,0 8.68256,-3.52031 10.54154,-3.60535 1.85897,-0.085 3.61958,0.62442 6.19463,1.46093 2.57505,0.83649 8.45979,3.10666 10.77851,4.5138 2.31872,1.40715 6.30757,2.79224 6.30757,2.79224 0,0 -8.21257,-2.20377 -11.15643,-3.29636 -2.94386,-1.09259 -7.67312,-3.14408 -10.36522,-3.65397 -2.69209,-0.50988 -12.3006,1.78871 -12.3006,1.78871 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-4.3190906"
- inkscape:transform-center-x="13.852145"
- sodipodi:nodetypes="czzzczzc"
- id="path8133-2"
- d="m 804.49513,-233.32948 c 0,0 7.80756,-2.58281 9.66654,-2.66785 1.85897,-0.085 3.61958,0.62442 6.19463,1.46093 2.57505,0.83649 8.45979,3.10666 10.77851,4.5138 2.31872,1.40715 6.30757,2.79224 6.30757,2.79224 0,0 -8.21257,-2.20377 -11.15643,-3.29636 -2.94386,-1.09259 -7.67312,-3.14408 -10.36522,-3.65397 -2.69209,-0.50988 -11.4256,0.85121 -11.4256,0.85121 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-4.3190906"
- inkscape:transform-center-x="13.852145"
- sodipodi:nodetypes="czzzczzc"
- id="path8135-8"
- d="m 819.55763,-237.57948 c 0,0 8.55756,-2.58281 10.41654,-2.66785 1.85897,-0.085 3.61958,0.62442 6.19463,1.46093 2.57505,0.83649 8.45979,3.10666 10.77851,4.5138 2.31872,1.40715 6.30757,2.79224 6.30757,2.79224 0,0 -8.21257,-2.20377 -11.15643,-3.29636 -2.94386,-1.09259 -7.67312,-3.14408 -10.36522,-3.65397 -2.69209,-0.50988 -12.1756,0.85121 -12.1756,0.85121 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-4.9269042"
- inkscape:transform-center-x="13.64141"
- sodipodi:nodetypes="czzzczzc"
- id="path8137-8"
- d="m 836.23395,-242.60125 c 0,0 6.96702,-1.98723 8.82784,-1.96757 1.86081,0.0197 3.57873,0.82702 6.10265,1.80705 2.52393,0.98 8.27166,3.57758 10.50756,5.11291 2.2359,1.53535 6.14053,3.14261 6.14053,3.14261 0,0 -8.07561,-2.66222 -10.95336,-3.91866 -2.87774,-1.25645 -7.48412,-3.5707 -10.14328,-4.23121 -2.65915,-0.66049 -10.48194,0.0549 -10.48194,0.0549 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-5.1542119"
- inkscape:transform-center-x="13.55068"
- sodipodi:nodetypes="czzzczzc"
- id="path8139-8"
- d="m 850.73028,-246.00461 c 0,0 7.68784,-2.02768 9.54782,-1.96854 1.85997,0.0592 3.56038,0.90279 6.06293,1.93616 2.50255,1.03334 8.19387,3.75232 10.39668,5.33475 2.20282,1.58245 6.07245,3.2722 6.07245,3.2722 0,0 -8.01729,-2.83298 -10.86772,-4.15022 -2.85043,-1.31723 -7.40666,-3.72872 -10.0512,-4.4455 -2.64454,-0.71678 -11.16096,0.0211 -11.16096,0.0211 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-5.4740887"
- inkscape:transform-center-x="13.41151"
- sodipodi:nodetypes="czzzczzc"
- id="path8141-6"
- d="m 864.82496,-249.21081 c 0,0 8.16952,-1.96906 10.02688,-1.85396 1.85735,0.11512 3.53158,1.00956 6.0019,2.11779 2.47031,1.10821 8.0772,3.99727 10.23138,5.64531 2.15418,1.64804 5.9712,3.45352 5.9712,3.45352 0,0 -7.92839,-3.07306 -10.73787,-4.4755 -2.80949,-1.40244 -7.29106,-3.94999 -9.91283,-4.74606 -2.62176,-0.79606 -11.58066,-0.1411 -11.58066,-0.1411 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-5.79376"
- inkscape:transform-center-x="13.258805"
- sodipodi:nodetypes="czzzczzc"
- id="path8143-8"
- d="m 881.38485,-251.60282 c 0,0 8.08536,-1.90809 9.93837,-1.73664 1.853,0.17147 3.4993,1.11633 5.93482,2.29908 2.43553,1.18271 7.95209,4.2407 10.05523,5.95339 2.10314,1.7127 5.86357,3.63326 5.86357,3.63326 0,0 -7.8314,-3.3124 -10.597,-4.7995 -2.76561,-1.48712 -7.16775,-4.16959 -9.76414,-5.04491 -2.59637,-0.87531 -11.43085,-0.30468 -11.43085,-0.30468 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-5.7433893"
- inkscape:transform-center-x="13.28378"
- sodipodi:nodetypes="czzzczzc"
- id="path8145-3"
- d="m 896.58415,-254.34724 c 0,0 7.64166,-1.4277 9.49547,-1.26515 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -10.99774,-0.76897 -10.99774,-0.76897 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-5.7433893"
- inkscape:transform-center-x="13.28378"
- sodipodi:nodetypes="czzzczzc"
- id="path8147-8"
- d="m 911.45328,-255.98544 c 0,0 8.64166,-1.5527 10.49547,-1.39015 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.99774,-0.64397 -11.99774,-0.64397 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-5.7433893"
- inkscape:transform-center-x="13.28378"
- sodipodi:nodetypes="czzzczzc"
- id="path8149-3"
- d="m 927.70328,-258.29794 c 0,0 7.64166,-0.8652 9.49547,-0.70265 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -10.99774,-1.33147 -10.99774,-1.33147 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-5.7433893"
- inkscape:transform-center-x="13.28378"
- sodipodi:nodetypes="czzzczzc"
- id="path8151-3"
- d="m 942.82828,-259.48544 c 0,0 8.57916,-1.4902 10.43297,-1.32765 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.93524,-0.70647 -11.93524,-0.70647 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-5.7433893"
- inkscape:transform-center-x="13.28378"
- sodipodi:nodetypes="czzzczzc"
- id="path8153-3"
- d="m 959.07828,-261.54794 c 0,0 7.82916,-0.8027 9.68297,-0.64015 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08372,5.90502 2.11134,1.70258 5.88096,3.60505 5.88096,3.60505 0,0 -7.84723,-3.27474 -10.61995,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.18524,-1.39397 -11.18524,-1.39397 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-5.7433893"
- inkscape:transform-center-x="13.28378"
- sodipodi:nodetypes="czzzczzc"
- id="path8155-8"
- d="m 974.45328,-262.79794 c 0,0 8.39166,-1.1777 10.24547,-1.01515 1.8538,0.16256 3.50462,1.0995 5.94579,2.27053 2.44118,1.171 7.97238,4.20246 10.08376,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.84721,-3.27474 -10.61993,-4.74855 -2.77271,-1.4738 -7.18769,-4.13509 -9.78825,-4.99793 -2.60055,-0.86282 -11.74774,-1.01897 -11.74774,-1.01897 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-5.7433893"
- inkscape:transform-center-x="13.28378"
- sodipodi:nodetypes="czzzczzc"
- id="path8157-0"
- d="m 990.64078,-264.86044 c 0,0 6.89166,-0.9902 8.74547,-0.82765 1.85385,0.16256 3.50465,1.0995 5.94575,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.60053,-0.86282 -10.24772,-1.20647 -10.24772,-1.20647 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-5.7433893"
- inkscape:transform-center-x="13.28378"
- sodipodi:nodetypes="czzzczzc"
- id="path8159-4"
- d="m 1007.7658,-265.79794 c 0,0 6.8291,-1.1777 8.683,-1.01515 1.8538,0.16256 3.5046,1.0995 5.9457,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.6005,-0.86282 -10.1852,-1.01897 -10.1852,-1.01897 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-5.7433893"
- inkscape:transform-center-x="13.28378"
- sodipodi:nodetypes="czzzczzc"
- id="path8161-7"
- d="m 1023.8908,-267.79794 c 0,0 6.0791,-0.4277 7.933,-0.26515 1.8538,0.16256 3.5046,1.0995 5.9457,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.6005,-0.86282 -9.4352,-1.76897 -9.4352,-1.76897 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-5.7433893"
- inkscape:transform-center-x="13.28378"
- sodipodi:nodetypes="czzzczzc"
- id="path8163-6"
- d="m 1039.7033,-269.17294 c 0,0 6.4541,-0.6777 8.308,-0.51515 1.8538,0.16256 3.5046,1.0995 5.9457,2.27053 2.4412,1.171 7.9724,4.20246 10.0838,5.90502 2.1113,1.70258 5.8809,3.60505 5.8809,3.60505 0,0 -7.8472,-3.27474 -10.6199,-4.74855 -2.7727,-1.4738 -7.1877,-4.13509 -9.7883,-4.99793 -2.6005,-0.86282 -9.8102,-1.51897 -9.8102,-1.51897 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-5.1360724"
- inkscape:transform-center-x="13.55813"
- sodipodi:nodetypes="czzzczzc"
- id="path8165-8"
- d="m 1055.2718,-271.03319 c 0,0 5.4976,-0.90945 7.3578,-0.85348 1.8601,0.056 3.5619,0.89674 6.0661,1.92586 2.5044,1.0291 8.2003,3.7384 10.4058,5.31709 2.2055,1.57871 6.078,3.2619 6.078,3.2619 0,0 -8.022,-2.81939 -10.8748,-4.13178 -2.8526,-1.31238 -7.4129,-3.71613 -10.0587,-4.42843 -2.6457,-0.71228 -8.9742,-1.09116 -8.9742,-1.09116 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-4.6370147"
- inkscape:transform-center-x="13.74758"
- sodipodi:nodetypes="czzzczzc"
- id="path8167-9"
- d="m 1072.7007,-273.48537 c 0,0 4.5472,-1.15581 6.408,-1.18621 1.8607,-0.0304 3.5996,0.73049 6.1489,1.64231 2.5494,0.91177 8.3649,3.35386 10.6414,4.8285 2.2763,1.47468 6.2227,2.97636 6.2227,2.97636 0,0 -8.1442,-2.44411 -11.0547,-3.62272 -2.9105,-1.1786 -7.5774,-3.36815 -10.2534,-3.95691 -2.6759,-0.58875 -8.1129,-0.68133 -8.1129,-0.68133 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- inkscape:transform-center-y="-4.4842392"
- inkscape:transform-center-x="13.79933"
- sodipodi:nodetypes="czzzczzc"
- id="path8169-0"
- d="m 1087.1585,-276.5244 c 0,0 5.96,-1.77355 7.8202,-1.83024 1.86,-0.0567 3.6096,0.67955 6.1715,1.55525 2.562,0.87566 2.5226,0.85713 5.3335,1.49015 2.7969,0.62986 7.0767,1.51313 7.0767,1.51313 0,0 -3.6155,-0.0163 -6.7923,-0.46614 -3.1155,-0.44119 -7.3743,-1.69825 -10.0584,-2.24913 -2.6839,-0.55088 -9.5512,-0.013 -9.5512,-0.013 z"
- style="display:inline;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="czczc"
- id="path8171-6"
- d="m 1099.25,-279.92981 c 0.1612,0.26862 11.2081,-4.60046 12.1875,-4.6875 0.9794,-0.087 2,3.125 2,3.125 0,0 -0.7751,-1.50434 -2.875,-1.0625 -2.0999,0.44184 -11.3009,2.67141 -11.3125,2.625 z"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
- </g>
- <path
- inkscape:connector-curvature="0"
- id="path8173-8"
- d="m 1107.4532,-284.0938 c -0.4187,0.21283 -0.1556,0.0939 -0.6472,0.30581 -0.4861,0.20954 -1.7234,0.57439 -4.0796,1.45895 -3.3311,1.25057 -5.8302,2.15344 -7.0259,3.0661 -1.5361,0.0213 -3.7205,0.23331 -5.6563,0.71875 -2.9815,0.74766 -4.8552,1.17401 -6.75,1.59375 -1.8948,0.41972 -1.6755,0.64219 -2.875,0.875 -1.2966,0.25166 -1.7214,-0.009 -5.4375,0.78125 -3.4899,0.74214 -8.8948,1.93107 -10.1562,2.6875 -1.5839,-0.18079 -3.8675,-0.32178 -5.8438,-0.0312 -3.0404,0.44695 -4.9162,0.67276 -6.8437,0.90625 -0.6554,0.0794 -1.0411,0.20078 -1.3438,0.28125 -0.4262,0.13165 -0.6858,0.26002 -1.375,0.34375 -1.3116,0.15936 -1.7622,-0.15683 -5.5312,0.28125 -3.5539,0.41308 -9.0054,1.27282 -10.25,1.9375 -1.599,-0.2973 -3.8578,-0.53419 -5.8438,-0.34375 -3.0588,0.29331 -4.972,0.48399 -6.9062,0.65625 -1.9343,0.17226 -1.6887,0.42237 -2.9063,0.53125 -1.3162,0.11769 -1.7598,-0.16363 -5.5312,0.25 -3.5419,0.38844 -9.0079,1.20927 -10.2813,1.875 -1.5989,-0.2947 -3.88718,-0.50701 -5.87501,-0.3125 -3.05824,0.29924 -4.94113,0.48024 -6.875,0.65625 -0.65749,0.0598 -1.04004,0.17856 -1.34375,0.25 -0.42765,0.11895 -0.68351,0.21807 -1.375,0.28125 -1.31596,0.12025 -1.75976,-0.19488 -5.53125,0.21875 -3.55614,0.39001 -9.00554,1.23916 -10.25,1.90625 -1.59863,-0.29419 -3.85984,-0.52372 -5.84375,-0.3125 -3.0556,0.32532 -4.97404,0.52624 -6.90625,0.71875 -1.93221,0.1925 -1.68987,0.44088 -2.90625,0.5625 -1.31488,0.13146 -1.76298,-0.16454 -5.53125,0.28125 -3.53887,0.41865 -8.97768,1.29217 -10.25,1.96875 -1.59755,-0.28105 -3.85996,-0.42043 -5.84375,-0.1875 -3.05198,0.35836 -4.94508,0.56786 -6.875,0.78125 -0.6562,0.0726 -1.04066,0.17269 -1.34375,0.25 -0.42677,0.12722 -0.68491,0.2672 -1.375,0.34375 -1.31333,0.14568 -1.76746,-0.17402 -5.53125,0.3125 -3.54889,0.45875 -8.97863,1.41902 -10.21875,2.125 -1.59305,-0.24424 -3.83381,-0.38135 -5.8125,-0.125 -3.04759,0.39481 -4.95071,0.64845 -6.875,0.90625 -1.92428,0.25779 -1.72611,0.49353 -2.9375,0.65625 -1.30946,0.1759 -1.74719,-0.10438 -5.5,0.46875 -3.52429,0.53824 -8.92315,1.69917 -10.1875,2.4375 -1.5875,-0.20354 -3.8455,-0.25473 -5.8125,0.0937 -3.02617,0.53612 -4.89889,0.86169 -6.8125,1.1875 -0.65061,0.11077 -1.01371,0.27094 -1.3125,0.375 -0.42067,0.16488 -0.66345,0.3313 -1.34375,0.46875 -1.29465,0.26159 -1.72712,-0.006 -5.4375,0.8125 -3.49853,0.77195 -8.84595,2.38293 -10.0625,3.21875 -1.56278,-0.0775 -3.75758,0.0853 -5.6875,0.59375 -2.97244,0.78313 -4.81761,1.23209 -6.6875,1.75 -1.86988,0.5179 -1.6666,0.76728 -2.84375,1.09375 -1.27246,0.3529 -1.69703,0.10709 -5.34375,1.1875 -3.4247,1.01463 -8.64944,2.93317 -9.875,3.84375 -1.53883,0.0127 -3.71983,0.27222 -5.625,0.875 -2.93106,0.92734 -4.75031,1.45842 -6.59375,2.0625 -0.62676,0.20538 -0.99173,0.39258 -1.28125,0.53125 -0.40763,0.21361 -0.65334,0.40875 -1.3125,0.625 -1.25446,0.41154 -1.68611,0.18904 -5.28125,1.4375 -3.38985,1.17717 -8.59498,3.2137 -9.78125,4.15625 -1.52389,0.0597 -3.65005,0.39487 -5.53125,1.0625 -2.89739,1.02829 -4.69908,1.67548 -6.53125,2.3125 -1.83217,0.63701 -1.62785,0.84854 -2.78125,1.25 -1.24678,0.43396 -1.66361,0.19972 -5.21875,1.5625 -3.33867,1.2798 -8.48715,3.48255 -9.6875,4.46875 -1.50718,0.10769 -3.63569,0.4988 -5.5,1.21875 -2.86818,1.1076 -4.6238,1.78156 -6.4375,2.46875 -0.61666,0.23363 -0.99641,0.44203 -1.28125,0.59375 0,0 0,1.09375 0,1.09375 0.11178,-0.22236 0.38599,-0.81743 0.90625,-1.09375 0.69797,-0.37072 4.81363,-1.99337 6.8125,-2.71875 1.65686,-0.60125 4.15389,-1.32868 5.96875,-1.3125 0.30162,0.003 0.58762,0.0509 0.84375,0.0937 1.84249,0.30825 7.46875,1.5625 7.46875,1.5625 -10e-6,0 -6.23349,-1.64675 -7.03125,-1.84375 -0.19079,-0.0471 -0.53572,-0.0687 -0.96875,-0.0625 1.14546,-0.86971 4.761,-2.39351 7.34375,-3.4375 2.83822,-1.14727 3.11681,-1.25182 5.0625,-1.65625 2.0083,-0.41744 3.15625,-0.5 3.15625,-0.5 0,1e-5 -0.0824,-0.60114 0.96875,-1.125 0.7051,-0.35141 4.88702,-1.8924 6.90625,-2.5625 1.9519,-0.64773 5.0574,-1.3585 6.875,-1 1.86323,0.3675 7.53125,1.8125 7.53125,1.8125 1e-5,0 -6.287,-1.87111 -7.09375,-2.09375 -0.19292,-0.0533 -0.53084,-0.086 -0.96875,-0.0937 1.15834,-0.83288 4.79444,-2.19532 7.40625,-3.15625 2.87016,-1.05601 3.16734,-1.1618 5.125,-1.53125 1.85349,-0.34979 2.85884,-0.42548 3.03125,-0.4375 0.1136,-0.21724 0.37745,-0.81002 0.90625,-1.0625 0.70944,-0.33874 4.92607,-1.71275 6.96875,-2.3125 1.69317,-0.49711 4.24077,-1.03677 6.09375,-0.90625 0.30795,0.0217 0.61349,0.0973 0.875,0.15625 1.88118,0.42432 7.59375,2.03125 7.59375,2.03125 1e-5,0 -6.34174,-2.06525 -7.15625,-2.3125 -0.19479,-0.0591 -0.55788,-0.10394 -1,-0.125 1.16949,-0.79755 4.86302,-2.05622 7.5,-2.9375 2.89781,-0.96847 3.23301,-1.00332 5.21875,-1.28125 2.04965,-0.28689 3.1875,-0.3125 3.1875,-0.3125 -2e-5,0 -0.0727,-0.60697 1,-1.0625 0.7196,-0.30557 4.99098,-1.50075 7.0625,-2 2.00244,-0.48258 5.19849,-0.92829 7.0625,-0.40625 1.91078,0.53515 7.71875,2.5 7.71875,2.5 0,0 -6.42266,-2.42351 -7.25,-2.71875 -0.19784,-0.0706 -0.58216,-0.14039 -1.03125,-0.1875 1.1879,-0.72865 4.91527,-1.77408 7.59375,-2.5 2.94342,-0.79775 3.29208,-0.77083 5.3125,-0.90625 1.91289,-0.12823 2.94705,-0.0711 3.125,-0.0625 0.11728,-0.20366 0.39176,-0.77948 0.9375,-0.96875 0.73219,-0.25394 5.07852,-1.04789 7.1875,-1.375 1.74813,-0.27111 4.40088,-0.4847 6.3125,-0.0937 0.31766,0.065 0.60522,0.18551 0.875,0.28125 1.94074,0.68873 7.84375,3.09375 7.84375,3.09375 1e-5,0 -6.53471,-2.95077 -7.375,-3.3125 -0.20097,-0.0865 -0.57513,-0.16679 -1.03125,-0.25 1.2065,-0.63318 5.02956,-1.3956 7.75,-1.90625 2.98953,-0.56119 3.30023,-0.52954 5.34375,-0.53125 2.10926,-0.002 3.3125,0.125 3.3125,0.125 0,1e-5 -0.0727,-0.63119 1.03125,-0.9375 0.74052,-0.20547 5.12612,-0.83387 7.25,-1.0625 2.05302,-0.22099 5.31863,-0.25222 7.21875,0.46875 1.94779,0.73907 7.84375,3.375 7.84375,3.375 2e-5,0 -6.56288,-3.17897 -7.40625,-3.5625 -0.20168,-0.0917 -0.54221,-0.18621 -1,-0.28125 1.21092,-0.60188 4.98442,-1.24884 7.71875,-1.65625 3.0048,-0.44772 3.32551,-0.4517 5.375,-0.40625 1.94045,0.043 3.00699,0.19423 3.1875,0.21875 0.11892,-0.19316 0.3839,-0.76583 0.9375,-0.90625 0.74271,-0.18838 5.15429,-0.73428 7.28125,-0.9375 1.76303,-0.16842 4.42009,-0.23429 6.34375,0.25 0.31968,0.0805 0.60351,0.20359 0.875,0.3125 1.95293,0.78349 7.90625,3.46875 7.90625,3.46875 -2e-5,0 -6.59191,-3.25348 -7.4375,-3.65625 -0.20222,-0.0963 -0.57226,-0.20703 -1.03125,-0.3125 1.21414,-0.57427 5.04366,-1.12219 7.78125,-1.5 3.00838,-0.4152 3.32307,-0.44263 5.375,-0.375 2.11798,0.0698 3.3125,0.25 3.3125,0.25 -2e-5,0 -0.0772,-0.63741 1.03125,-0.90625 0.74362,-0.18035 5.15176,-0.66355 7.28125,-0.84375 2.05847,-0.17417 5.34324,-0.12432 7.25,0.65625 1.95459,0.80016 7.875,3.53125 7.875,3.53125 -2e-5,0 -6.55993,-3.30876 -7.40625,-3.71875 -0.20237,-0.0981 -0.57186,-0.2031 -1.03125,-0.3125 1.21517,-0.5639 5.01008,-1.1143 7.75,-1.46875 3.01091,-0.38952 3.32131,-0.39765 5.375,-0.3125 1.94439,0.0806 3.00663,0.25324 3.1875,0.28125 0.11916,-0.19086 0.38277,-0.74531 0.9375,-0.875 0.74426,-0.174 5.14993,-0.65047 7.28125,-0.8125 1.76662,-0.13427 4.44971,-0.12571 6.37501,0.375 0.32,0.0832 0.6033,0.20127 0.875,0.3125 1.9546,0.80016 7.9063,3.5625 7.9063,3.5625 -1e-4,0 -6.5912,-3.34001 -7.4375,-3.75 -0.2024,-0.0981 -0.5719,-0.20311 -1.0313,-0.3125 1.2151,-0.5639 5.0413,-1.08306 7.7813,-1.4375 3.0109,-0.38953 3.3525,-0.4289 5.4062,-0.34375 2.1197,0.0879 3.3125,0.3125 3.3125,0.3125 0,0 -0.078,-0.64902 1.0313,-0.90625 0.7443,-0.17256 5.1495,-0.62336 7.2812,-0.78125 2.0606,-0.1526 5.3429,-0.0968 7.25,0.6875 1.955,0.80395 7.875,3.5 7.875,3.5 0,0 -6.5598,-3.27587 -7.4062,-3.6875 -0.2025,-0.0984 -0.5718,-0.20222 -1.0313,-0.3125 1.2154,-0.56154 5.0119,-1.12778 7.75,-1.5 3.009,-0.40905 3.3227,-0.41558 5.375,-0.34375 1.9431,0.068 3.0072,0.16485 3.1875,0.1875 0.1188,-0.1944 0.3846,-0.72881 0.9375,-0.875 0.7418,-0.19612 5.1311,-0.82878 7.25,-1.09375 1.7564,-0.21961 4.4053,-0.33231 6.3125,0.0312 0.3169,0.0604 0.6058,0.18938 0.875,0.28125 1.9362,0.66092 7.8438,2.9375 7.8438,2.9375 -10e-5,0 -6.5367,-2.80655 -7.375,-3.15625 -0.2005,-0.0836 -0.5762,-0.17333 -1.0313,-0.25 1.2037,-0.65046 5.0191,-1.37195 7.7188,-2 2.9667,-0.6902 3.2889,-0.75507 5.3125,-0.875 2.0886,-0.1238 3.2812,-0.0312 3.2812,-0.0312 0,1e-5 -0.087,-0.63205 1,-1.03125 0.7292,-0.2678 5.0472,-1.33797 7.125,-1.8125 2.0085,-0.45869 5.1679,-1.0293 7,-0.625 1.8781,0.41446 13.5782,3.01563 13.5782,3.01563 0,0 -12.3275,-3.02266 -13.1407,-3.26563 -0.1945,-0.0581 -0.5586,-0.10626 -1,-0.125 1.1676,-0.80369 3.5142,-1.6873 6.1094,-2.70312 1.6814,-0.65818 0.9237,-0.37659 2.7759,-1.0036 1.7536,-0.59366 2.4854,-1.01071 2.6304,-1.11299 0.3461,-0.20651 -0.356,-0.12188 -0.5442,-0.0424 z"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7333-7);enable-background:new"
- sodipodi:nodetypes="czscsssscssssscsssscssssscsssscssssscsssscssssscsssscssssscsssscssccsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscsssscscsscssscscsscc" />
- <path
- inkscape:connector-curvature="0"
- id="path8175-7"
- d="m 1082.625,-275.125 c 1.873,0.39348 4.4961,1.14555 6.0313,1.96875 1.5352,0.82319 2.8222,1.056 5.375,2.5 2.5266,1.42926 4.7958,2.00696 6.9687,2.53125 2.3476,0.56642 5.4354,0.71523 8.8438,1.1875 -1.0889,-0.83975 -6.6074,-1.17245 -8.4063,-1.5625 -1.7989,-0.39006 -3.8941,-1.01616 -6.5937,-2.3125 -2.6997,-1.29634 -3.4944,-1.79896 -5.8125,-2.6875 -2.3182,-0.88854 -4.0044,-1.38314 -6.4063,-1.625 z"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7285-4);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8177-9"
- d="m 1051.4688,-270 c 1.9053,0.57759 4.5281,1.61572 6.0937,2.59375 1.5656,0.97802 2.8802,1.35981 5.5,3.125 2.593,1.74716 4.9859,2.70927 7.25,3.59375 2.4461,0.95557 5.6826,1.65713 9.4063,3.0625 -1.1896,-1.13784 -7.0631,-2.68675 -8.9375,-3.375 -1.8745,-0.68825 -4.0818,-1.5662 -6.875,-3.28125 -2.7933,-1.71504 -3.5736,-2.2839 -5.9375,-3.40625 -2.3641,-1.12234 -4.0567,-1.83455 -6.5,-2.3125 z"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7289-0);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8179-0"
- d="m 1020.2188,-266.84375 c 1.9119,0.63811 4.5812,1.75536 6.1562,2.8125 1.5751,1.05715 2.8956,1.50867 5.5313,3.40625 2.6086,1.87821 5.0284,3.03003 7.3125,4.0625 2.4677,1.11545 5.7645,2.1733 9.5312,3.84375 -1.2033,-1.22253 -7.2028,-3.31423 -9.0937,-4.125 -1.891,-0.81077 -4.0649,-1.89379 -6.875,-3.75 -2.8102,-1.8562 -3.6218,-2.47693 -6,-3.71875 -2.3783,-1.2418 -4.1107,-1.97569 -6.5625,-2.53125 z"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7293-0);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8181-3"
- d="m 1110.1719,-266.89063 c 0.1508,0.0486 0.688,0.631 0.1094,1.48438 -0.8101,1.19459 -5.7049,3.32429 -8.5625,4.125 -2.8449,0.79712 -6.2901,0.97774 -10.5625,-0.375 -4.3016,-1.36195 -5.4697,-2.46872 -10.6563,-4.3125 4.664,2.11517 6.1953,3.95233 10.125,5.34375 1.6207,0.57387 3.3671,0.9396 5.0625,1.03125 -0.4451,0.32563 -1.5303,0.9833 -3.5625,1.59375 -2.7955,0.83969 -6.6491,1.53378 -8.25,1.625 -1.5146,0.0863 -3.142,-0.51249 -3.4375,-0.625 0.1667,0.10308 0.3732,0.37734 -0.25,1.03125 -0.8993,0.94363 -6.1474,1.923 -9.125,2.25 -2.9643,0.32555 -6.5216,-0.016 -10.9062,-1.90625 -3.978,-1.71497 -5.339,-2.91536 -9.4063,-4.75 0,0 0,0.15625 0,0.15625 3.6431,2.09529 5.284,3.88327 8.875,5.5625 1.7302,0.80909 3.5917,1.40876 5.4063,1.71875 -0.5349,0.28676 -1.5578,0.71151 -3.4375,1.03125 -2.869,0.48796 -6.809,0.81614 -8.4375,0.75 -0.8507,-0.0345 -1.7286,-0.18437 -2.4063,-0.40625 -0.6848,-0.21488 -1.1897,-0.44467 -1.3125,-0.5 0.1694,0.10721 0.4311,0.40288 -0.2187,1.03125 -0.9097,0.87962 -6.2461,1.33638 -9.25,1.46875 -2.9905,0.13179 -6.5889,-0.45063 -11,-2.5625 -4.4412,-2.12626 -5.6415,-3.4016 -10.9063,-5.78125 4.7343,2.59704 6.2865,4.6291 10.3438,6.71875 1.6733,0.86185 3.4852,1.49425 5.25,1.9375 -0.4633,0.23332 -1.5894,0.68814 -3.6875,0.9375 -2.8863,0.34298 -6.8346,0.49288 -8.4688,0.375 -1.5462,-0.1115 -3.2312,-0.85696 -3.5312,-1 0.1691,0.12029 0.4138,0.41048 -0.2188,1 -0.9128,0.85073 -6.2441,1.26212 -9.25,1.375 -2.9925,0.11237 -6.5897,-0.49043 -11,-2.59375 -4.00125,-1.90823 -5.38803,-3.13783 -9.46875,-5.09375 -3e-5,0 0,0.15625 0,0.15625 3.65506,2.20392 5.29421,4.05255 8.90625,5.90625 1.74029,0.89315 3.637,1.52827 5.4688,1.96875 -0.54,0.2483 -1.5781,0.61533 -3.4688,0.84375 -2.88568,0.34858 -6.86605,0.52095 -8.5,0.40625 -0.85345,-0.0599 -1.72631,-0.25791 -2.40625,-0.5 -0.6871,-0.2353 -1.18935,-0.47226 -1.3125,-0.53125 0.16998,0.11227 0.46448,0.42225 -0.1875,1.03125 -0.91265,0.8525 -6.27533,1.29337 -9.28125,1.40625 -2.99246,0.11237 -6.59346,-0.52805 -11,-2.59375 -4.43653,-2.07978 -5.64688,-3.33171 -10.90625,-5.65625 4.72938,2.54749 6.29074,4.5778 10.34375,6.625 1.67155,0.84433 3.48554,1.46643 5.25,1.90625 -0.46323,0.23422 -1.5897,0.68407 -3.6875,0.9375 -2.88569,0.34858 -6.8362,0.56952 -8.46875,0.46875 -1.54456,-0.0953 -3.20031,-0.82885 -3.5,-0.96875 0.16899,0.11853 0.38192,0.40385 -0.25,1 -0.91186,0.86028 -6.24665,1.33025 -9.25,1.46875 -2.98995,0.1379 -6.56745,-0.45068 -10.96875,-2.46875 -3.99308,-1.83089 -5.36511,-3.0292 -9.4375,-4.90625 -2e-5,0 0,0.15625 0,0.15625 3.64761,2.13327 5.27033,3.93487 8.875,5.71875 1.73675,0.85951 3.60727,1.45014 5.4375,1.875 -0.53947,0.2529 -1.55063,0.64129 -3.4375,0.90625 -2.87978,0.40436 -6.83813,0.64562 -8.46875,0.5625 -0.85172,-0.0434 -1.7277,-0.20855 -2.40625,-0.4375 -0.68569,-0.22201 -1.1896,-0.44339 -1.3125,-0.5 0.16959,0.10899 0.4319,0.40965 -0.21875,1.03125 -0.91079,0.87014 -6.25021,1.39152 -9.25,1.5625 -2.98633,0.17021 -6.57381,-0.31577 -10.96875,-2.28125 -4.42489,-1.97888 -5.60596,-3.22819 -10.84375,-5.375 4.70997,2.38767 6.27017,4.38873 10.3125,6.34375 1.66715,0.80631 3.46043,1.39658 5.21875,1.78125 -0.46163,0.2487 -1.597,0.71225 -3.6875,1.03125 -2.8756,0.43876 -6.7804,0.7331 -8.40625,0.6875 -1.53823,-0.0431 -3.2328,-0.74522 -3.53125,-0.875 0.16833,0.11282 0.41057,0.41375 -0.21875,1.03125 -0.90812,0.8911 -6.20295,1.52825 -9.1875,1.8125 -2.97118,0.28298 -6.57342,-0.1758 -10.9375,-1.9375 -3.95934,-1.59831 -5.32915,-2.79487 -9.34375,-4.3125 3e-5,0 0,0.15625 0,0.15625 3.5959,1.81135 5.23831,3.58233 8.8125,5.15625 1.72207,0.75835 3.58748,1.28895 5.40625,1.625 -0.53609,0.27908 -1.56658,0.68763 -3.4375,1.0625 -2.85539,0.5721 -6.78942,1.01939 -8.40625,1.03125 -0.84451,0.006 -1.70608,-0.0809 -2.375,-0.25 -0.67591,-0.16151 -1.16009,-0.32923 -1.28125,-0.375 0.16722,0.094 0.42267,0.38348 -0.21875,1.0625 -0.89787,0.95052 -6.18648,1.91708 -9.125,2.4375 -2.92534,0.51809 -6.43215,0.37424 -10.71875,-1.03125 -4.3158,-1.41507 -5.47277,-2.52994 -10.5625,-3.96875 4.57685,1.75101 6.08855,3.56006 10.03125,5 1.62608,0.59389 3.36885,0.95565 5.09375,1.15625 -0.45285,0.29702 -1.55478,0.88339 -3.59375,1.46875 -2.80472,0.80517 -6.63886,1.57583 -8.21875,1.75 -1.49475,0.1648 -3.11623,-0.31681 -3.40625,-0.40625 0.16356,0.0901 0.39278,0.35993 -0.21875,1.0625 -0.88247,1.01385 -6.04452,2.37165 -8.9375,3.0625 -2.88002,0.68778 -6.3356,0.76002 -10.5625,-0.4375 -3.83485,-1.08645 -5.17258,-2.07237 -9.0625,-3.125 -10e-6,0 0,0.15625 0,0.15625 3.48418,1.39485 5.06941,2.9194 8.53125,4.03125 1.66793,0.53572 3.45578,0.78674 5.21875,0.875 -0.51964,0.35212 -1.50039,0.91452 -3.3125,1.53125 -2.76566,0.94125 -6.59024,1.93537 -8.15625,2.15625 -0.81794,0.11539 -1.6331,0.12283 -2.28125,0.0312 -0.65496,-0.0832 -1.1326,-0.21827 -1.25,-0.25 0.16204,0.0746 0.43399,0.34044 -0.1875,1.09375 -0.87,1.05453 -6.00963,2.65925 -8.875,3.4375 -2.85253,0.77476 -6.25912,0.9582 -10.4375,-0.0937 -4.20683,-1.05913 -5.35669,-2.04166 -10.34375,-3.15625 4.48454,1.45946 5.96935,3.13523 9.8125,4.25 1.58504,0.45977 3.28679,0.63825 4.96875,0.6875 -0.44157,0.33676 -1.51251,1.02773 -3.5,1.78125 -2.73393,1.03649 -6.45198,2.16269 -8,2.4375 -1.46462,0.26002 -3.05958,-0.11654 -3.34375,-0.1875 0.16025,0.0796 0.38044,0.32098 -0.21875,1.0625 -0.86466,1.07006 -5.91652,2.81815 -8.75,3.6875 -2.8208,0.86547 -6.2075,1.15631 -10.34375,0.21875 -3.75259,-0.85061 -5.04785,-1.71647 -8.875,-2.59375 0,0 0,0.15625 0,0.15625 3.42796,1.23779 4.98741,2.6323 8.375,3.53125 1.63216,0.43314 3.36704,0.58301 5.09375,0.5625 -0.50893,0.38417 -1.47675,1.02182 -3.25,1.75 -2.70634,1.11134 -6.43633,2.30781 -7.96875,2.625 -0.8004,0.16569 -1.61231,0.21862 -2.25,0.15625 0,0 0,0.51552 0,0.92229 0,0.26507 0,0.48396 0,0.48396 0.22645,-0.14468 0.44891,-0.27261 0.71875,-0.375 1.08052,-0.40998 2.17161,-0.21577 6,-1.6875 3.82843,-1.47174 5.22412,-2.00498 5.90625,-2.40625 0.6796,-0.39978 1.61165,-0.87937 2.21875,-1.53125 1.82685,-0.13775 3.57075,-0.49323 4.9375,-1 2.96812,-1.10052 4.87537,-1.80619 6.78125,-2.46875 1.90586,-0.66254 2.35409,-1.41487 3.40625,-1.78125 1.09155,-0.38011 2.19511,-0.16538 6.0625,-1.53125 3.86745,-1.36586 5.28316,-1.82708 5.96875,-2.21875 0.70109,-0.40052 1.70081,-0.93298 2.3125,-1.59375 1.9708,-0.0547 3.81685,-0.38463 5.28125,-0.875 3.00148,-1.00508 4.92615,-1.62171 6.84375,-2.25 1.5386,-0.5041 2.17402,-1.04677 2.90625,-1.4375 0.23016,-0.13431 0.47574,-0.25373 0.75,-0.34375 1.09823,-0.36048 2.18145,-0.0814 6.09375,-1.3125 3.91233,-1.23113 5.36605,-1.67295 6.0625,-2.03125 0.69388,-0.35697 1.63015,-0.79261 2.25,-1.40625 1.86521,-0.0227 3.63581,-0.26683 5.03125,-0.6875 3.03043,-0.91354 4.99238,-1.4301 6.9375,-1.96875 1.94511,-0.53864 2.42618,-1.26452 3.5,-1.5625 1.11401,-0.30915 2.21994,0.007 6.1875,-1.03125 3.96761,-1.03863 5.41758,-1.43273 6.125,-1.75 0.73487,-0.32959 1.81383,-0.75372 2.4375,-1.375 1.99774,0.116 3.85743,-0.0201 5.34375,-0.375 3.07811,-0.735 5.08344,-1.10094 7.0625,-1.5 1.58792,-0.32018 2.24429,-0.79055 3,-1.09375 0.23757,-0.1068 0.46695,-0.19276 0.75,-0.25 1.13347,-0.22919 2.30448,0.20893 6.34375,-0.5 4.03933,-0.70893 5.50025,-0.92709 6.21875,-1.1875 0.71586,-0.25944 1.70428,-0.56724 2.34375,-1.09375 1.92427,0.23949 3.74788,0.22453 5.1875,0 3.12633,-0.48762 5.15455,-0.70067 7.15625,-0.96875 2.00171,-0.26807 2.48869,-0.94514 3.59375,-1.09375 1.14639,-0.15418 2.27592,0.30157 6.34375,-0.21875 4.06784,-0.52032 5.56013,-0.69573 6.28125,-0.9375 0.7371,-0.24714 1.79809,-0.58623 2.4375,-1.125 2.05007,0.33553 3.97378,0.39796 5.5,0.21875 3.14231,-0.36896 5.17994,-0.55936 7.1875,-0.78125 1.61076,-0.17802 2.26467,-0.6082 3.03125,-0.84375 0.24094,-0.0855 0.49412,-0.1556 0.78125,-0.1875 1.14978,-0.12772 2.30129,0.34665 6.375,-0.125 4.07374,-0.47165 5.55909,-0.6106 6.28125,-0.84375 0.71946,-0.23227 1.70024,-0.47346 2.34375,-0.96875 1.93637,0.33346 3.77006,0.40424 5.21875,0.25 3.14602,-0.33495 5.17756,-0.51859 7.1875,-0.71875 2.00996,-0.20014 2.48414,-0.82639 3.59375,-0.9375 1.15114,-0.11528 2.29643,0.36506 6.375,-0.0625 4.07861,-0.42756 5.58886,-0.56209 6.3125,-0.78125 0.73915,-0.22386 1.79572,-0.51325 2.4375,-1.03125 2.0571,0.39867 4.00187,0.4934 5.53125,0.34375 3.14873,-0.3081 5.17584,-0.47325 7.1875,-0.65625 1.61407,-0.14682 2.2631,-0.56055 3.03125,-0.78125 0.24142,-0.0809 0.49353,-0.12991 0.78125,-0.15625 1.15211,-0.10545 2.29296,0.39275 6.375,0 4.08208,-0.39275 5.5889,-0.53084 6.3125,-0.75 0.7209,-0.21833 1.6997,-0.4477 2.3438,-0.9375 1.938,0.34999 3.7688,0.45438 5.2187,0.3125 3.1487,-0.3081 5.1758,-0.47325 7.1875,-0.65625 2.0116,-0.18299 2.5142,-0.83802 3.625,-0.9375 1.1523,-0.10323 2.2922,0.38483 6.375,0 4.0829,-0.38482 5.5887,-0.501 6.3125,-0.71875 0.7393,-0.22243 1.7956,-0.51449 2.4375,-1.03125 2.0574,0.40177 4.0029,0.50333 5.5313,0.34375 3.1466,-0.32852 5.1771,-0.5227 7.1875,-0.71875 1.613,-0.15729 2.2656,-0.63148 3.0312,-0.875 0.2407,-0.088 0.4632,-0.12137 0.75,-0.15625 1.1483,-0.1397 2.3167,0.33991 6.375,-0.25 4.0583,-0.58992 5.5618,-0.77714 6.2813,-1.03125 0.7167,-0.25316 1.6745,-0.55807 2.3125,-1.09375 1.9197,0.21194 3.7199,0.15141 5.1562,-0.0937 3.1191,-0.5324 5.1116,-0.92861 7.0938,-1.3125 1.9821,-0.38387 2.4743,-1.03965 3.5625,-1.28125 1.1288,-0.25066 2.2703,0.11629 6.25,-0.875 3.9796,-0.99128 5.4296,-1.4193 6.125,-1.78125 0.7223,-0.37601 1.7619,-0.87058 2.375,-1.53125 1.963,-0.012 3.7937,-0.29105 5.2187,-0.84375 2.9512,-1.14461 4.8732,-1.86942 6.6875,-2.75 1.4557,-0.70653 2.3348,-1.68641 2.5469,-1.98438 0.2122,-0.29796 0.1118,-0.7453 0.1379,-0.76675 0.043,-0.0352 0.3193,-0.085 0.479,-0.42844 0.8589,-1.84708 2.321,-5.64459 2.4352,-6.32945 0.1137,-0.68216 0.1638,-1.34774 0.2145,-1.74497 0.029,-0.22952 -0.1467,-0.86544 -0.1246,-0.92404 0.031,-0.0821 0.3045,-0.26528 0.3599,-0.51471 0.2663,-1.19833 0.089,-2.19129 -0.1251,-3.60893 -0.214,-1.41764 -0.9837,-4.62214 -1.6369,-5.47626 -0.6589,-0.86172 -1.2229,-1.01117 -1.7479,-1.00066 -0.2086,0.26976 0.1368,0.26309 0.1626,0.31261 0.6806,0.0508 0.934,0.36864 1.4192,0.89662 0.4852,0.52798 1.2218,3.85117 1.3584,5.30156 0.1366,1.45039 0.19,2.8602 -0.088,3.46864 -0.2781,0.60845 -0.7232,0.51703 -1.0156,0.58291 0.531,0.18589 0.6698,0.12483 0.7314,0.96929 0.059,0.81338 -0.1332,1.63969 -0.5198,2.80562 -0.3912,1.18001 -1.8452,4.34998 -2.2857,4.59877 -0.4523,0.25551 -0.7314,0.27038 -1.067,0.13944 z"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7337-2);enable-background:new"
- sodipodi:nodetypes="cssscscsscsssccscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssscscssssssscscsscsssccscsscscssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsssssscssssscsszsszssszzcczzzczzzc" />
- <path
- inkscape:connector-curvature="0"
- id="path8183-3"
- d="m 988.75,-263.84375 c 1.91161,0.6344 4.55027,1.75841 6.125,2.8125 1.57477,1.05409 2.8961,1.48252 5.5313,3.375 2.6082,1.87314 5.0269,3.01522 7.3125,4.0625 2.4693,1.13147 5.7521,2.15474 9.5312,3.9375 -1.2072,-1.2584 -7.139,-3.36445 -9.0312,-4.1875 -1.8922,-0.82304 -4.128,-1.93049 -6.9375,-3.78125 -2.80961,-1.85075 -3.62224,-2.48154 -6.00005,-3.71875 -2.37782,-1.23719 -4.07988,-1.9492 -6.53125,-2.5 z"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7297-4);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8185-3"
- d="m 957.5,-260.78125 c 1.91,0.6181 4.58288,1.70934 6.15625,2.75 1.57339,1.04066 2.89608,1.48252 5.53125,3.375 2.60823,1.87315 5.02692,3.01521 7.3125,4.0625 2.46931,1.13147 5.75213,2.15475 9.53125,3.9375 -1.20728,-1.2584 -7.20154,-3.3957 -9.09375,-4.21875 -1.89217,-0.82304 -4.09666,-1.9305 -6.90625,-3.78125 -2.80958,-1.85075 -3.59295,-2.43932 -5.96875,-3.65625 -2.37578,-1.21691 -4.11321,-1.93885 -6.5625,-2.46875 z"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7301-5);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8187-7"
- d="m 926.09375,-257.375 c 1.90772,0.59745 4.55348,1.66384 6.125,2.6875 1.5715,1.02365 2.87022,1.43971 5.5,3.28125 2.60291,1.82273 5.02887,2.9722 7.3125,4 2.4672,1.11041 5.75535,2.09323 9.53125,3.84375 -1.20623,-1.2481 -7.1719,-3.31809 -9.0625,-4.125 -1.89058,-0.8069 -4.10242,-1.89104 -6.90625,-3.6875 -2.80385,-1.79644 -3.62704,-2.40251 -6,-3.59375 -2.37297,-1.19124 -4.05362,-1.90283 -6.5,-2.40625 z"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7305-4);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8189-3"
- d="m 894.90625,-253.5625 c 1.90213,0.55355 4.58701,1.58887 6.15625,2.59375 1.56923,1.00487 2.87401,1.40864 5.5,3.21875 2.59912,1.79164 5.00034,2.87189 7.28125,3.875 2.46428,1.08374 5.75984,2.04029 9.53125,3.75 -1.2048,-1.23507 -7.17416,-3.24478 -9.0625,-4.03125 -1.88832,-0.78647 -4.0752,-1.8308 -6.875,-3.59375 -2.79977,-1.76294 -3.59919,-2.36836 -5.96875,-3.53125 -2.36957,-1.16288 -4.12325,-1.83412 -6.5625,-2.28125 z"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7309-9);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8191-2"
- d="m 863.71875,-248.65625 c 1.88062,0.42909 4.50427,1.38038 6.0625,2.3125 1.55823,0.93211 2.85233,1.25776 5.46875,3 2.58971,1.72444 4.98067,2.70802 7.25,3.625 2.45176,0.99069 5.73959,1.87707 9.5,3.5 -1.20131,-1.20734 -7.15249,-3.06609 -9.03125,-3.78125 -1.87875,-0.71517 -4.0854,-1.68442 -6.875,-3.375 -2.78963,-1.69057 -3.58461,-2.22822 -5.9375,-3.28125 -2.35292,-1.05301 -4.02584,-1.71248 -6.4375,-2 z"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7313-2);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8193-6"
- d="m 833.15625,-241.375 c 1.84836,0.29644 4.46945,0.97632 6,1.78125 1.53058,0.80493 2.81374,1.05573 5.375,2.53125 2.53504,1.46046 4.89068,2.32509 7.125,3.0625 2.41399,0.79668 5.65711,1.46689 9.375,2.84375 -1.18771,-1.12873 -7.08772,-2.58975 -8.9375,-3.15625 -1.84977,-0.5665 -4.00342,-1.37392 -6.75,-2.84375 -2.74657,-1.46983 -3.50136,-1.92028 -5.8125,-2.78125 -2.31115,-0.86095 -4.00471,-1.32009 -6.375,-1.4375 z"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7317-7);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8195-5"
- d="m 802.90625,-232.3125 c 1.8222,0.21127 4.36576,0.80057 5.875,1.53125 1.50925,0.73066 2.75568,0.92998 5.28125,2.28125 2.49976,1.33746 4.83154,2.04843 7.03125,2.65625 2.37653,0.65667 5.56464,1.07288 9.21875,2.1875 -1.16735,-1.04496 -6.92888,-2.10329 -8.75,-2.5625 -1.82111,-0.45921 -3.95225,-1.12696 -6.65625,-2.4375 -2.70403,-1.31052 -3.47106,-1.7199 -5.75,-2.46875 -2.27895,-0.74883 -3.91325,-1.17931 -6.25,-1.1875 z"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7321-5);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8197-2"
- d="m 773.1875,-222.1875 c 1.81109,0.1787 4.32059,0.66506 5.8125,1.34375 1.49194,0.67869 2.7534,0.79822 5.25,2.0625 2.47107,1.25138 4.79005,1.89614 6.96875,2.4375 2.35387,0.58488 5.49134,0.89752 9.09375,1.84375 -1.15084,-0.99116 -6.85251,-1.7833 -8.65625,-2.1875 -1.80372,-0.4042 -3.91553,-1.02116 -6.59375,-2.25 -2.67818,-1.22884 -3.40345,-1.61089 -5.65625,-2.28125 -2.25279,-0.67034 -3.89627,-1.00232 -6.21875,-0.96875 z"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7329-8);enable-background:new" />
- <path
- inkscape:connector-curvature="0"
- id="path8199-6"
- d="m 743.5625,-211.1875 c 1.79281,0.12911 4.27313,0.54965 5.75,1.1875 1.4769,0.63785 2.7161,0.74156 5.1875,1.9375 2.44618,1.18372 4.72054,1.74666 6.875,2.21875 2.32767,0.51003 5.4196,0.68064 9,1.5625 -1.14379,-0.9706 -6.74759,-1.59065 -8.53125,-1.9375 -1.78367,-0.34684 -3.88285,-0.88756 -6.53125,-2.03125 -2.64841,-1.14368 -3.39495,-1.51631 -5.625,-2.125 -2.23008,-0.60868 -3.82594,-0.90966 -6.125,-0.8125 z"
- style="display:inline;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;filter:url(#filter7325-2);enable-background:new" />
- </g>
- </g>
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 863.87812,475.6679 c 1.64212,-3.218 3.51781,-5.73529 4.86136,-9.84898 0.79872,-3.65789 3.31204,-2.03073 7.26047,-8.3969 1.40193,-2.2395 5.47653,0.39136 8.9651,-2.39911 1.27072,-0.80319 2.88488,-0.40431 4.48256,-0.0631 3.76539,1.31896 5.82576,3.70355 8.33376,5.80837 6.13906,5.97023 20.53414,7.94327 23.48604,6.31346 1.43405,-2.90474 7.88128,-5.40888 12.37437,-11.11168 0.74811,-1.12267 11.72936,-8.74446 14.64721,-6.56599"
- id="path8201-5"
- sodipodi:nodetypes="ccccccccc" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:1;fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 888.50059,465.25071 c 7.36341,-3.23297 13.8109,-8.9084 20.70813,-13.38452 3.31057,-1.96954 6.86983,3.21601 10.796,3.59866 2.29773,-0.21813 3.7129,1.20259 5.68211,1.6415 5.15636,1.31779 2.39793,3.86488 9.97526,6.43972 6.15561,1.7204 8.9074,-6.79847 14.89975,-7.3236 4.87739,-0.50299 8.09892,-0.31603 11.61675,-0.25254 3.92696,0.13889 4.07855,-3.4976 6.06092,-5.3033 2.98056,-2.80522 7.15561,-1.84972 10.14485,-4.7409 1.01754,-1.38468 1.95458,-3.01085 2.73459,-5.10809 0.88201,-2.00034 3.04006,0.30598 4.79823,1.26269"
- id="path8203-8"
- sodipodi:nodetypes="ccccccccccc" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9048-9);enable-background:accumulate"
- d="m 403.27922,1056.3058 56.56854,-42.4264 72.12489,14.1421 -46.66904,52.3259 -53.74012,7.0711 -28.28427,-31.1127 z"
- id="path8994-7" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 542.27183,1060.5719 c -1.40727,18.8012 -1.1449,32.751 2.08174,49.3033 3.22666,16.5523 16.40609,45.9073 20.33441,63.1837 3.92621,17.2671 2.69413,38.3097 -12.45944,51.1483 -15.31761,12.9774 -42.05128,21.5989 -67.83231,15.7337 -25.78105,-5.8652 -69.54907,-49.2234 -88.59019,-70.2283 -19.11214,-21.0833 -63.76086,-93.8506 -77.93853,-124.2758 -14.17767,-30.4251 -12.65961,-36.7186 -8.11972,-45.52972 -9.36672,-24.5205 -12.41371,-50.06681 -33.71245,-75.57664 30.32547,3.11444 43.88028,26.95633 60.12568,47.13975 -5.52989,-48.07603 -18.05471,-64.4165 -28.37395,-90.7243 29.9943,6.08165 50.57936,31.87239 63.97979,72.7125 9.55415,-3.91791 18.23776,-9.37294 30.18741,-9.0612 -11.2975,-41.6958 -17.94946,-69.91584 -36.68725,-101.06994 53.44196,5.67033 83.65702,80.63932 78.97142,87.9608 9.97797,-2.24399 19.00565,-6.53038 30.43653,-5.65167 -11.24897,-38.34702 -21.04781,-76.8679 -3.65971,-118.64819 0,0 48.28678,65.43688 54.38966,85.80578 6.10287,20.3689 1.51881,38.70051 1.51881,38.70051 0,0 16.95957,31.0853 20.29392,51.09414 3.3731,20.24138 -3.53269,59.10328 -4.94582,77.98328 z"
- id="path4189-9"
- sodipodi:nodetypes="czzzzzzcccccccccczczz" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3587-1);enable-background:accumulate"
- d="m 719.5,738.69519 18.31177,15.43196 44.41103,-15.38821 23.2772,-25.54375 11.46397,19.22065 30.67161,12.78354 25.09737,5.72837 L 892,723.19519 908.02309,747.02126 947,752.19519 l 10.24541,-6.19852 6.75471,8.6982 25.49988,11.00032 2,-40.5 L 955.94866,710.6576 923.45591,689.1305 883.0038,677.66492 861.69668,662.13148 840,685.19519 755.02878,638.61208 722,676.69519 l -2.5,62 z"
- id="path4191-6"
- sodipodi:nodetypes="cccccccccccccccccccccc"
- clip-path="url(#clipPath3631-6)"
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,995.28646,23.53493)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter3898-1);enable-background:new"
- d="m 584,696.5 -6.5625,17.15625 c 0,0 -7.81152,20.36488 -15.6875,43.65625 -3.93799,11.64568 -7.88302,24.04145 -10.9375,35.125 -3.05448,11.08355 -5.33586,20.37986 -5.5,28.28125 -0.39807,19.16196 5.74653,34.8883 8.9375,41.75 -0.77153,3.55523 -1.99137,9.45432 -3.34375,18.09375 -1.92042,12.26821 -3.71827,27.15441 -2.375,39.875 1.38209,13.08835 6.81222,28.18765 12.59375,43.03125 5.78153,14.8436 12.05435,29.22711 15.21875,38.03125 6.63206,18.4519 9.99296,31.5763 11.3125,48.5 0.58135,7.4561 -0.24227,20.336 -1.25,33.375 -1.00773,13.039 -2.18661,26.3014 -1.6875,36.9688 0.98911,21.1398 9.32798,46.8347 33.375,57.9374 22.77483,10.5154 55.32682,11.7022 83.4375,-3.4374 16.15992,-8.7034 30.07634,-27.0976 43.375,-46.9063 13.29866,-19.8087 24.96917,-41.0534 31.9375,-54.9063 15.35292,-30.5212 39.39353,-115.46418 45.625,-152.7187 3.01859,-18.04653 3.92166,-29.06555 2.625,-38.03125 -0.97853,-6.76604 -3.82819,-12.1474 -6.875,-16.21875 2.04274,-27.50791 -0.73207,-51.36878 11.96875,-79.40625 L 840.75,763.375 l -23.8125,9.3125 c -17.48975,6.83753 -28.90164,19.04536 -36.59375,32.0625 -0.32251,0.54577 -0.56314,1.10776 -0.875,1.65625 0.22203,-22.51521 4.40784,-37.63759 6.59375,-58.6875 l 1.96875,-19 L 771,737.375 c -30.59449,15.55571 -45.69489,48.19321 -49.71875,90.21875 -4.24532,-0.62547 -8.8314,-1.01965 -13.8125,-0.84375 -0.29149,-39.18036 -0.39629,-67.03685 8.59375,-99.375 l 5.59375,-20.125 -19.4375,7.65625 c -30.90937,12.20394 -47.85954,41.93073 -56.625,68.375 -4.38273,13.22214 -6.74582,25.80121 -7.59375,35.9375 -0.23203,2.77373 -0.31106,5.31132 -0.3125,7.71875 -3.24187,-0.0364 -6.42052,0.13589 -10.0625,0.5 0.0416,-39.00473 -3.48424,-79.75415 -32.28125,-116.5 L 584,696.5 Z m 5.8125,43.8125 c 16.80691,30.64383 17.47451,63.96728 16.9375,99.75 l -0.21875,15.0625 12.03493,-6.53921 c 8.66205,-3.13302 19.56058,-0.22752 31.93382,-0.83579 l 14.67465,9.3566 -6.3309,-25.7941 c -0.0897,-0.22997 -0.22046,-0.41669 -0.25,-0.71875 -0.19951,-2.03986 -0.22232,-5.47307 0.125,-9.625 0.69464,-8.30386 2.78957,-19.58524 6.625,-31.15625 5.15532,-15.55294 13.48801,-31.19248 25.125,-42.53125 -4.68381,28.63798 -3.21559,60.25934 -3.01164,95.80514 l -2.76593,13.26164 15.49632,-7.59803 c 9.0294,-2.75771 17.18897,-0.34996 29.28125,1.09375 l 13.24632,9.44423 L 741.09375,840 c 1.44793,-30.97177 8.22149,-53.67808 20.71875,-68.875 -2.98688,19.77884 -5.43043,41.7848 0.3125,78.34375 l 1.06552,6.37318 -2.93815,11.51685 10.61711,-8.16818 9.18973,10.22198 -1.54828,-10.4636 L 781.9375,852 c 5.70102,-13.21149 10.17282,-26.21337 16.34375,-36.65625 0.95986,-1.62434 2.03153,-3.06436 3.0625,-4.5625 -3.68066,21.15535 -2.42716,40.20815 -4.09375,57.78125 l -4.68014,7.80698 7.39889,0.22427 c 3.22005,3.48361 3.8675,3.85068 4.5625,8.65625 0.695,4.80557 0.31862,14.40035 -2.5625,31.625 -5.56799,33.28792 -31.84562,77.83981 -43.7404,101.4864 -6.60491,13.1304 -18.52833,57.4859 -31.12335,76.2465 -12.59502,18.7605 -28.53137,39.7673 -37.17204,44.4209 -21.49052,11.5742 -44.55594,25.5059 -60.61889,18.0895 -14.37486,-6.637 -23.03969,-21.1927 -23.81407,-37.7433 -0.38311,-8.188 0.61279,-21.3092 1.625,-34.4062 1.01221,-13.0971 11.28891,-22.5708 15.42339,-36.5626 5.37229,-18.1808 -1.44687,-36.5944 -12.5,-53.93745 -6.48655,-10.17778 -23.9768,-24.2579 -29.54839,-38.5625 -5.57159,-14.3046 -10.36751,-29.00315 -11.28125,-37.65625 -0.92621,-8.77113 0.4225,-23.02502 2.21875,-34.5 1.79625,-11.47497 3.84375,-20.28125 3.84375,-20.28125 l 9.42278,-3.6152 -10.48528,-3.8848 c 0,0 -8.49889,-15.3101 -8.09375,-34.8125 0.0711,-3.42316 1.83626,-12.72805 4.71875,-23.1875 2.88249,-10.45945 6.76466,-22.55271 10.625,-33.96875 3.04439,-9.00308 5.78063,-16.60345 8.34375,-23.6875 z"
- id="path4193-0"
- clip-path="url(#clipPath3677-0)"
- sodipodi:nodetypes="ccssscsssssssssssssccccscccccccccsscccccccccccssscccccccccccccccsccccssssssssssssscccsssc"
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,822.28931,10.93589)" />
- <g
- style="display:inline;enable-background:new"
- id="g3617-4"
- clip-path="url(#clipPath3622-5)"
- transform="translate(276,136)">
- <path
- inkscape:connector-curvature="0"
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,-52.200498,74.09707)"
- id="path4195-1"
- d="m -15.66751,843.48852 -49.49748,-15.55635 -26.87005,52.3259 41.01219,45.25484 49.49747,-38.18377 -14.14213,-43.84062 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9024-1);enable-background:accumulate" />
- <path
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="ccccccccccccc"
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,-46.92842,75.511284)"
- id="path4197-0"
- d="m 118.70648,859.93048 -55.154328,-46.66904 -43.84062,36.76955 33.94113,53.74011 -13.596814,85.46203 -39.44536579,28.29217 -41.01220021,11.3137 -2.82842,46.669 56.56854,25.4559 18.943987,-69.65 23.45655,-58.85663 46.347541,-72.61491 16.62,-39.91188 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9020-8);enable-background:accumulate" />
- </g>
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9044-0);enable-background:accumulate"
- d="m -70.82184,932.58397 60.81118,-26.87005 100.40916,31.1127 -63.63961,31.11269 -82.02438,-16.97056 -15.55635,-18.38478 z"
- id="path4199-4"
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,229.07158,211.51128)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4105-2);enable-background:new"
- d="m 583.0625,715.75 c -12.10609,34.44974 -26.7145,68.53333 -31.75,104.84375 -0.83208,14.92867 4.58915,29.15943 8.84375,43.0625 -5.91624,27.20126 -10.13681,56.89995 1.15625,83.125 13.51717,38.16085 35.00147,75.68215 32.42279,117.46825 -0.9483,29.2942 -9.01444,60.9941 5.38971,88.2817 10.19864,19.3348 33.13956,27.3117 53.96785,27.6676 27.86219,1.1741 56.46261,-11.6216 72.0009,-35.2613 22.59549,-29.3717 41.80051,-61.4973 55.23865,-96.0598 16.89053,-45.506 29.6718,-92.56072 37.93402,-140.3989 1.8244,-12.94106 3.10108,-27.46985 -4.57892,-38.82255 -3.43115,-7.33632 0.0421,-15.56014 -0.68457,-23.30977 0.674,-24.99466 4.01232,-50.66376 16.65332,-72.59648 -17.73313,6.4446 -35.07268,16.55971 -44.00307,33.86425 -3.93508,6.70955 -7.60482,13.57413 -11.37193,20.38575 -3.54999,-30.01408 3.71963,-59.64828 6.78125,-89.28125 -20.16604,9.05463 -36.87672,25.65522 -44.17495,46.682 -6.30463,15.58003 -8.80222,32.31718 -10.26255,49.03675 -8.25334,-1.51925 -16.68447,-2.10155 -25.0625,-1.5 -0.96308,-38.69787 -0.46696,-79.40715 10.96875,-115.90625 -18.68113,6.21776 -35.16621,18.73551 -45.62803,35.38723 -13.85254,20.87979 -21.2614,45.75395 -23.05947,70.61277 0.58534,4.32454 -0.0613,11.84009 -6.34375,9.875 -5.33118,0.0176 -10.62908,0.67883 -15.9375,1.09375 1.14784,-39.38148 -3.34144,-81.6282 -27.0625,-114.21875 -3.06071,-3.63717 -5.63685,-7.68438 -8.625,-11.34375 -0.9375,2.4375 -1.875,4.875 -2.8125,7.3125 z m 7.75,13.84375 c 18.56527,29.29629 22.4825,64.82012 22.125,98.875 0.20409,5.17526 -0.51656,11.8292 0.125,16.0625 12.31856,-6.10275 26.73912,-2.4399 39.78125,-2.1875 2.31712,1.22325 3.1921,1.65243 1.90625,-1.40625 -4.16455,-13.95285 -1.84828,-28.613 1.80504,-42.40764 6.36687,-26.29064 20.62828,-51.08798 42.81996,-67.02986 -8.61709,37.23706 -5.71658,76.56161 -6.09375,113.96875 12.25344,-6.9099 27.27879,-3.44613 40.03125,-0.25 3.39222,3.5348 2.28935,-0.72948 2.1875,-3.8125 -0.48309,-21.37058 4.13133,-43.06963 13.6875,-62.15625 5.96266,-10.68727 14.24338,-19.80379 22.4375,-28.875 -7.87156,33.8381 -9.2029,69.33593 -2.71875,103.5 1.72485,-1.41118 4.60681,-0.45414 5.65625,-0.375 9.68369,-21.23682 16.35112,-45.38062 34.89016,-60.74185 1.87329,-0.37122 -1.44818,8.52495 -1.48391,11.8981 -3.53488,21.84581 -7.17516,44.14234 -8.78421,66.21911 -8.78379,2.34171 2.84835,2.32354 3.46875,4.0625 7.92311,10.5658 4.66299,24.40472 3.63165,36.35334 -7.06405,45.03355 -22.14231,87.36194 -35.95355,130.6798 -12.07476,32.9493 -27.3742,58.8525 -47.88808,87.2015 -10.95257,13.5514 -23.24472,27.8513 -40.84375,32.5 -20.15601,6.2413 -44.20676,10.8769 -62.59956,0.046 -17.28966,-12.3414 -21.02393,-35.7089 -19.26226,-55.6864 0.0488,-15.8262 4.93886,-28.5121 4.4106,-43.4918 -0.53824,-15.2629 -2.29135,-30.5647 -6.54261,-46.8663 -4.25126,-16.30162 -9.04325,-24.91794 -16.11906,-41.57338 -7.24111,-17.04456 -15.07015,-36.74863 -18.20542,-56.28842 -1.74948,-18.62714 2.89171,-37.12262 5.78125,-55.25 3.29623,-2.83696 -1.59799,-5.19659 -2.3125,-8.1875 -7.60113,-17.01508 -8.40747,-36.7749 -2.74234,-54.55998 7.1302,-25.0723 15.76087,-49.63241 24.67984,-74.12752 0.70833,1.30208 1.41667,2.60417 2.125,3.90625 z"
- id="path4201-8"
- sodipodi:nodetypes="ccccccccccccccccccccccccccccccccccccccccccccccccccczzzcccccc"
- clip-path="url(#clipPath4177-4)"
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,822.28931,10.93589)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4130-8);enable-background:accumulate"
- d="m 735.05635,733.03834 2.75542,21.08881 44.41103,-15.38821 4.85063,-22.38975 -3.93617,-22.05222 -22.45163,-36.59301 -8.28004,30.30494 -17.34924,45.02944 z"
- id="path4203-7"
- sodipodi:nodetypes="cccccccc"
- clip-path="url(#clipPath3631-6)"
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,995.28646,23.53493)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4141-2);enable-background:accumulate"
- d="m 831.81321,730.29452 15.82237,14.90486 20.85473,2.89994 -1.59029,-39.92598 8.32561,-30.50842 -7.16499,-6.34106 -21.69669,20.9424 -14.55074,38.02826 z"
- id="path4205-0"
- sodipodi:nodetypes="cccccccc"
- clip-path="url(#clipPath3631-6)"
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,995.28646,23.53493)" />
- <g
- id="g8317-8"
- style="display:inline;filter:url(#filter8333-2);enable-background:new"
- clip-path="url(#clipPath8338-4)"
- transform="translate(276,136)">
- <path
- inkscape:connector-curvature="0"
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,719.28646,-112.46507)"
- clip-path="none"
- sodipodi:nodetypes="ccccc"
- id="path4209-6"
- d="m 964.00012,754.69487 18.42881,7.46479 9.07107,-36.96447 -14.87031,4.83886 -12.62957,24.66082 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- <rect
- y="757.19519"
- x="-55"
- height="177"
- width="182"
- id="rect8315-2"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- </g>
- <g
- id="g8346-4"
- style="display:inline;filter:url(#filter8354-2);enable-background:new"
- clip-path="url(#clipPath8359-0)"
- transform="translate(276,136)">
- <path
- inkscape:connector-curvature="0"
- transform="matrix(-0.9045327,0.2506626,0.2506626,0.9045327,719.28646,-112.46507)"
- clip-path="none"
- sodipodi:nodetypes="ccccccc"
- id="path4207-7"
- d="m 910.14441,746.31415 32.61295,5.17393 -0.36119,-23.87619 7.18853,-29.68221 -8.45112,-5.26365 -21.82194,26.51077 -9.16723,27.13735 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- <rect
- y="696.19519"
- x="-22"
- height="176"
- width="165"
- id="rect8344-9"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- </g>
- <path
- inkscape:connector-curvature="0"
- style="display:inline;opacity:1;fill:#ada469;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- d="m 1036.164,1071.8338 c 6.7941,18.9028 10.4937,33.2997 11.8903,51.2119 1.3966,17.9123 -3.7827,51.8008 -2.9005,70.6561 0.8818,18.8452 8.1337,40.099 27.3446,48.9689 19.4189,8.9658 49.3193,10.2113 74.1199,-3.1456 24.8006,-13.357 57.401,-70.3255 70.9742,-97.3087 13.6239,-27.0839 38.7611,-114.4974 44.6608,-149.76859 5.8998,-35.27121 2.5506,-41.30077 -4.6174,-49.05549 2.6403,-27.84015 -1.4998,-54.93543 13.1096,-87.18618 -30.249,11.8257 -37.3823,40.1607 -48.3189,65.50508 -8.0009,-50.93293 0.2092,-71.27319 3.3189,-101.21936 -29.0647,14.77791 -42.8615,47.11402 -45,92.85714 -10.9239,-1.3042 -21.3914,-4.43423 -33.5714,-0.71429 -0.264,-46.02334 -1.4635,-76.88941 8.9106,-114.20649 -53.2554,21.02686 -62.9472,106.5941 -56.0535,112.77792 -10.8828,0.535 -21.371,-1.2973 -32.8571,2.85715 0.6389,-42.57135 -0.2605,-84.90861 -30,-122.85715 0,0 -30.958,80.92234 -31.4286,103.57143 -0.4705,22.64909 9.4516,40.16588 9.4516,40.16588 0,0 -8.568,36.74051 -6.2986,58.23223 2.2959,21.74142 20.4429,59.67622 27.2655,78.65812 z"
- id="path8848-3"
- sodipodi:nodetypes="czzzzzzcccccccccczczz" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter3587-1);enable-background:accumulate"
- d="m 719.5,738.69519 18.31177,15.43196 44.41103,-15.38821 23.2772,-25.54375 11.46397,19.22065 30.67161,12.78354 25.09737,5.72837 L 892,723.19519 908.02309,747.02126 947,752.19519 l 10.24541,-6.19852 6.75471,8.6982 25.49988,11.00032 2,-40.5 L 955.94866,710.6576 923.45591,689.1305 883.0038,677.66492 861.69668,662.13148 840,685.19519 755.02878,638.61208 722,676.69519 l -2.5,62 z"
- id="path3635-9"
- sodipodi:nodetypes="cccccccccccccccccccccc"
- clip-path="url(#clipPath3631-6)"
- transform="translate(276,136)" />
- <path
- inkscape:connector-curvature="0"
- transform="translate(450.03125,73.843964)"
- style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter3898-1);enable-background:new"
- d="m 584,696.5 -6.5625,17.15625 c 0,0 -7.81152,20.36488 -15.6875,43.65625 -3.93799,11.64568 -7.88302,24.04145 -10.9375,35.125 -3.05448,11.08355 -5.33586,20.37986 -5.5,28.28125 -0.39807,19.16196 5.74653,34.8883 8.9375,41.75 -0.77153,3.55523 -1.99137,9.45432 -3.34375,18.09375 -1.92042,12.26821 -3.71827,27.15441 -2.375,39.875 1.38209,13.08835 6.81222,28.18765 12.59375,43.03125 5.78153,14.8436 12.05435,29.22711 15.21875,38.03125 6.63206,18.4519 9.99296,31.5763 11.3125,48.5 0.58135,7.4561 -0.24227,20.336 -1.25,33.375 -1.00773,13.039 -2.18661,26.3014 -1.6875,36.9688 0.98911,21.1398 9.32798,46.8347 33.375,57.9374 22.77483,10.5154 55.32682,11.7022 83.4375,-3.4374 16.15992,-8.7034 30.07634,-27.0976 43.375,-46.9063 13.29866,-19.8087 24.96917,-41.0534 31.9375,-54.9063 15.35292,-30.5212 39.39353,-115.46418 45.625,-152.7187 3.01859,-18.04653 3.92166,-29.06555 2.625,-38.03125 -0.97853,-6.76604 -3.82819,-12.1474 -6.875,-16.21875 2.04274,-27.50791 -0.73207,-51.36878 11.96875,-79.40625 L 840.75,763.375 l -23.8125,9.3125 c -17.48975,6.83753 -28.90164,19.04536 -36.59375,32.0625 -0.32251,0.54577 -0.56314,1.10776 -0.875,1.65625 0.22203,-22.51521 4.40784,-37.63759 6.59375,-58.6875 l 1.96875,-19 L 771,737.375 c -30.59449,15.55571 -45.69489,48.19321 -49.71875,90.21875 -4.24532,-0.62547 -8.8314,-1.01965 -13.8125,-0.84375 -0.29149,-39.18036 -0.39629,-67.03685 8.59375,-99.375 l 5.59375,-20.125 -19.4375,7.65625 c -30.90937,12.20394 -47.85954,41.93073 -56.625,68.375 -4.38273,13.22214 -6.74582,25.80121 -7.59375,35.9375 -0.23203,2.77373 -0.31106,5.31132 -0.3125,7.71875 -3.24187,-0.0364 -6.42052,0.13589 -10.0625,0.5 0.0416,-39.00473 -3.48424,-79.75415 -32.28125,-116.5 L 584,696.5 Z m 5.8125,43.8125 c 16.80691,30.64383 17.47451,63.96728 16.9375,99.75 l -0.21875,15.0625 12.03493,-6.53921 c 8.66205,-3.13302 19.56058,-0.22752 31.93382,-0.83579 l 14.67465,9.3566 -6.3309,-25.7941 c -0.0897,-0.22997 -0.22046,-0.41669 -0.25,-0.71875 -0.19951,-2.03986 -0.22232,-5.47307 0.125,-9.625 0.69464,-8.30386 2.78957,-19.58524 6.625,-31.15625 5.15532,-15.55294 13.48801,-31.19248 25.125,-42.53125 -4.68381,28.63798 -3.21559,60.25934 -3.01164,95.80514 l -2.76593,13.26164 15.49632,-7.59803 c 9.0294,-2.75771 17.18897,-0.34996 29.28125,1.09375 l 13.24632,9.44423 L 741.09375,840 c 1.44793,-30.97177 8.22149,-53.67808 20.71875,-68.875 -2.98688,19.77884 -5.43043,41.7848 0.3125,78.34375 l 1.06552,6.37318 -2.93815,11.51685 10.61711,-8.16818 9.18973,10.22198 -1.54828,-10.4636 L 781.9375,852 c 5.70102,-13.21149 10.17282,-26.21337 16.34375,-36.65625 0.95986,-1.62434 2.03153,-3.06436 3.0625,-4.5625 -3.68066,21.15535 -2.42716,40.20815 -4.09375,57.78125 l -4.68014,7.80698 7.39889,0.22427 c 3.22005,3.48361 3.8675,3.85068 4.5625,8.65625 0.695,4.80557 0.31862,14.40035 -2.5625,31.625 -5.56799,33.28792 -31.79272,123.1659 -43.6875,146.8125 -6.60491,13.1304 -18.02998,33.8957 -30.625,52.6563 -12.59502,18.7605 -27.35933,35.5338 -36,40.1874 -21.49052,11.5742 -48.7808,10.2602 -64.84375,2.8438 -14.37486,-6.637 -20.53812,-23.4494 -21.3125,-40 -0.38311,-8.188 0.61279,-21.3092 1.625,-34.4062 1.01221,-13.0971 11.28891,-22.5708 15.42339,-36.5626 5.37229,-18.1808 -1.44687,-36.5944 -12.5,-53.93745 -6.48655,-10.17778 -23.9768,-24.2579 -29.54839,-38.5625 -5.57159,-14.3046 -10.36751,-29.00315 -11.28125,-37.65625 -0.92621,-8.77113 0.4225,-23.02502 2.21875,-34.5 1.79625,-11.47497 3.84375,-20.28125 3.84375,-20.28125 l 9.42278,-3.6152 -10.48528,-3.8848 c 0,0 -8.49889,-15.3101 -8.09375,-34.8125 0.0711,-3.42316 1.83626,-12.72805 4.71875,-23.1875 2.88249,-10.45945 6.76466,-22.55271 10.625,-33.96875 3.04439,-9.00308 5.78063,-16.60345 8.34375,-23.6875 z"
- id="path3669-2"
- clip-path="url(#clipPath3677-0)"
- sodipodi:nodetypes="ccssscsssssssssssssccccscccccccccsscccccccccccssscccccccccccccccsccccssssssssssssscccsssc" />
- <g
- id="g3628-8"
- clip-path="url(#clipPath3636-90)"
- transform="translate(276,136)">
- <path
- inkscape:connector-curvature="0"
- id="path8988-3"
- d="m 824.48651,818.48242 -49.49748,-15.55635 -26.87005,52.3259 41.01219,45.25484 49.49747,-38.18377 -14.14213,-43.84062 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9024-1);enable-background:accumulate" />
- <path
- inkscape:connector-curvature="0"
- id="path8990-0"
- d="m 964.49365,855.25197 -55.15433,-46.66904 -43.84062,36.76955 33.94113,53.74011 7.07106,66.46804 -50.91168,35.35537 -41.0122,11.3137 -2.82842,46.669 56.56854,25.4559 63.63961,-76.3676 24.04163,-94.75227 8.48528,-57.98276 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9020-8);enable-background:accumulate" />
- </g>
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:0.25;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter9044-0);enable-background:accumulate"
- d="m 1045.3322,1043.5779 60.8112,-26.8701 100.4091,31.1127 -63.6396,31.1127 -82.0244,-16.9706 -15.5563,-18.3847 z"
- id="path8992-1" />
- <path
- inkscape:connector-curvature="0"
- transform="translate(450.03125,73.843964)"
- style="display:inline;opacity:0.58775509;fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:20.79999924;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;filter:url(#filter4185-1);enable-background:new"
- d="m 583.0625,715.75 c -12.10609,34.44974 -26.7145,68.53333 -31.75,104.84375 -0.83208,14.92867 4.58915,29.15943 8.84375,43.0625 -5.91624,27.20126 -10.13681,56.89995 1.15625,83.125 13.51717,38.16085 35.00147,75.68215 32.42279,117.46825 -0.9483,29.2942 -9.01444,60.9941 5.38971,88.2817 10.19864,19.3348 33.13956,27.3117 53.96785,27.6676 27.86219,1.1741 56.46261,-11.6216 72.0009,-35.2613 22.59549,-29.3717 41.80051,-61.4973 55.23865,-96.0598 16.89053,-45.506 29.6718,-92.56072 37.93402,-140.3989 1.8244,-12.94106 3.10108,-27.46985 -4.57892,-38.82255 -3.43115,-7.33632 0.0421,-15.56014 -0.68457,-23.30977 0.674,-24.99466 4.01232,-50.66376 16.65332,-72.59648 -17.73313,6.4446 -35.07268,16.55971 -44.00307,33.86425 -3.93508,6.70955 -7.60482,13.57413 -11.37193,20.38575 -3.54999,-30.01408 3.71963,-59.64828 6.78125,-89.28125 -20.16604,9.05463 -36.87672,25.65522 -44.17495,46.682 -6.30463,15.58003 -8.80222,32.31718 -10.26255,49.03675 -8.25334,-1.51925 -16.68447,-2.10155 -25.0625,-1.5 -0.96308,-38.69787 -0.46696,-79.40715 10.96875,-115.90625 -18.68113,6.21776 -35.16621,18.73551 -45.62803,35.38723 -13.85254,20.87979 -21.2614,45.75395 -23.05947,70.61277 0.58534,4.32454 -0.0613,11.84009 -6.34375,9.875 -5.33118,0.0176 -10.62908,0.67883 -15.9375,1.09375 1.14784,-39.38148 -3.34144,-81.6282 -27.0625,-114.21875 -3.06071,-3.63717 -5.63685,-7.68438 -8.625,-11.34375 -0.9375,2.4375 -1.875,4.875 -2.8125,7.3125 z m 7.75,13.84375 c 18.56527,29.29629 22.4825,64.82012 22.125,98.875 0.20409,5.17526 -0.51656,11.8292 0.125,16.0625 12.31856,-6.10275 26.73912,-2.4399 39.78125,-2.1875 2.31712,1.22325 3.1921,1.65243 1.90625,-1.40625 -4.16455,-13.95285 -1.84828,-28.613 1.80504,-42.40764 6.36687,-26.29064 20.62828,-51.08798 42.81996,-67.02986 -8.61709,37.23706 -5.71658,76.56161 -6.09375,113.96875 12.25344,-6.9099 27.27879,-3.44613 40.03125,-0.25 3.39222,3.5348 2.28935,-0.72948 2.1875,-3.8125 -0.48309,-21.37058 4.13133,-43.06963 13.6875,-62.15625 5.96266,-10.68727 14.24338,-19.80379 22.4375,-28.875 -7.87156,33.8381 -9.2029,69.33593 -2.71875,103.5 1.72485,-1.41118 4.60681,-0.45414 5.65625,-0.375 9.68369,-21.23682 16.35112,-45.38062 34.89016,-60.74185 1.87329,-0.37122 -1.44818,8.52495 -1.48391,11.8981 -3.53488,21.84581 -3.2972,44.17323 -4.90625,66.25 -1.31238,1.37679 2.84835,2.32354 3.46875,4.0625 7.92311,10.5658 3.12294,24.83149 2.0916,36.78011 -7.06405,45.03355 -21.76553,88.37934 -35.57677,131.69714 -12.07476,32.9493 -30.7197,63.08 -51.23358,91.429 -10.95257,13.5514 -23.24472,27.8513 -40.84375,32.5 -20.15601,6.2413 -43.57595,5.1744 -61.96875,-5.6562 -17.28966,-12.3414 -21.02393,-35.7089 -19.26226,-55.6864 0.0488,-15.8262 2.37211,-27.8008 7.91747,-42.8053 5.54535,-15.0045 2.47105,-31.3317 -1.78021,-47.6333 -4.25126,-16.3016 -12.17903,-26.26002 -21.82158,-42.20417 -9.64255,-15.94415 -17.6369,-36.03734 -20.77217,-55.57713 -1.74948,-18.62714 2.89171,-37.12262 5.78125,-55.25 3.29623,-2.83696 -1.59799,-5.19659 -2.3125,-8.1875 -7.60113,-17.01508 -8.40747,-36.7749 -2.74234,-54.55998 7.1302,-25.0723 15.76087,-49.63241 24.67984,-74.12752 0.70833,1.30208 1.41667,2.60417 2.125,3.90625 z"
- id="path4149-7"
- sodipodi:nodetypes="ccccccccccccccccccccccccccccccccccccccccccccccccccczzzcccccc"
- clip-path="url(#clipPath4177-4)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4130-8);enable-background:accumulate"
- d="m 735.05635,733.03834 2.75542,21.08881 44.41103,-15.38821 4.85063,-22.38975 -3.93617,-22.05222 -22.45163,-36.59301 -8.28004,30.30494 -17.34924,45.02944 z"
- id="path3902-8"
- sodipodi:nodetypes="cccccccc"
- clip-path="url(#clipPath3631-6)"
- transform="translate(276,136)" />
- <path
- inkscape:connector-curvature="0"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;filter:url(#filter4141-2);enable-background:accumulate"
- d="m 831.81321,730.29452 15.82237,14.90486 20.85473,2.89994 -1.59029,-39.92598 8.32561,-30.50842 -7.16499,-6.34106 -21.69669,20.9424 -14.55074,38.02826 z"
- id="path4135-9"
- sodipodi:nodetypes="cccccccc"
- clip-path="url(#clipPath3631-6)"
- transform="translate(276,136)" />
- <g
- id="g8367-1"
- style="filter:url(#filter8379-0)"
- clip-path="url(#clipPath8392-1)"
- transform="translate(276,136)">
- <path
- inkscape:connector-curvature="0"
- clip-path="none"
- sodipodi:nodetypes="ccccccc"
- id="path4145-5"
- d="m 910.14441,746.31415 32.61295,5.17393 -0.36119,-23.87619 7.18853,-29.68221 -8.45112,-5.26365 -21.82194,26.51077 -9.16723,27.13735 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- <rect
- y="650.19098"
- x="877.51953"
- height="172.53406"
- width="123.03658"
- id="rect8365-4"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- </g>
- <g
- id="g8400-9"
- style="filter:url(#filter8404-9)"
- clip-path="url(#clipPath8417-4)"
- transform="translate(276,136)">
- <path
- inkscape:connector-curvature="0"
- clip-path="none"
- sodipodi:nodetypes="ccccc"
- id="path4147-2"
- d="m 964.00012,754.69487 18.42881,7.46479 9.07107,-36.96447 -14.87031,4.83886 -12.62957,24.66082 z"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- <rect
- y="677.06104"
- x="924.89569"
- height="125.1579"
- width="142.12846"
- id="rect8398-5"
- style="display:inline;overflow:visible;visibility:visible;opacity:1;fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:25;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1;marker:none;marker-start:none;marker-mid:none;marker-end:none;enable-background:accumulate" />
- </g>
- </g>
+ <g transform="matrix(.9991 .27421 -.11493 2.3838 2962.6 1209.8)" clip-path="url(#fx)">
+ <path d="M1056.2-278.8c4.145-1.479 10 3.125 10 3.125.899.28 2.725-.894 2.624-1.686 0 0-1.55-1.86-.374-2.939s5.296 1.507 7.5 1.625 5.562-.23 7-.75 1.113-1.425 2.625-1.75 5.119 1.038 7.06 1.169 4.649.334 5.815-.169.178-1.16 1.875-1.875 7.76-.957 9.625-.125 1.81.52 2.625 3 7.44 5.163-1.125 13.375-59.378 13.786-65.625 2.75 6.23-14.271 10.375-15.75z" enable-background="new" filter="url(#fw)" opacity=".75"/>
+ <path d="M1058.5-275.43c4.145-1.479 10 3.125 10 3.125.899.28 2.725-.894 2.624-1.686 0 0-1.55-1.86-.374-2.939s5.296 1.507 7.5 1.625 5.562-.23 7-.75 1.113-1.425 2.625-1.75 5.119 1.038 7.06 1.169 4.649.334 5.815-.169.178-1.16 1.875-1.875 7.76-.957 9.625-.125 1.81.52 2.625 3 7.44 5.163-1.125 13.375-59.378 13.786-65.625 2.75 6.23-14.271 10.375-15.75z" enable-background="new" filter="url(#fv)" opacity=".75"/>
</g>
- <path
- style="fill:#f8d615;fill-opacity:1;fill-rule:evenodd;stroke:#f8d615;stroke-width:17.84425545;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow1Send)"
- d="M 544.23337,203.09259 3443.746,100.92806"
- id="path7167"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="display:inline;fill:#f8d615;fill-opacity:1;fill-rule:evenodd;stroke:#f8d615;stroke-width:18;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#Arrow1Send-4);enable-background:new"
- d="M 527.91203,584.39421 3442.4188,1000.8355"
- id="path7167-9"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#f83615;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
- x="80.219048"
- y="107.38741"
- id="text8200"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- id="tspan8202"
- x="80.219048"
- y="107.38741"
- style="font-size:50px;fill:#f83615;fill-opacity:1">CROP_DEFAULT</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:45.31394196px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#f80000;fill-opacity:0;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- x="3861.3669"
- y="1281.7198"
- id="text8200-4"
- sodipodi:linespacing="125%"
- transform="scale(0.96105877,1.0405191)"><tspan
- sodipodi:role="line"
- id="tspan8202-5"
- x="3861.3669"
- y="1281.7198"
- style="font-size:56.64243317px;fill:#f80000;fill-opacity:0">COMPOSE_PADDED</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:45.31394196px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#f8d615;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- x="3615.1545"
- y="49.156631"
- id="text8200-4-9"
- sodipodi:linespacing="125%"
- transform="scale(0.96105877,1.0405191)"><tspan
- sodipodi:role="line"
- id="tspan8202-5-3"
- x="3615.1545"
- y="49.156631"
- style="font-size:50px;fill:#f8d615;fill-opacity:1">COMPOSE_ACTIVE</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:45.31394196px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#f83615;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- x="2429.1526"
- y="-3.1657715"
- id="text8200-4-5"
- sodipodi:linespacing="125%"
- transform="scale(0.96105878,1.0405191)"><tspan
- sodipodi:role="line"
- id="tspan8202-5-7"
- x="2429.1526"
- y="-3.1657715"
- style="font-size:49.99999958px;fill:#f83615;fill-opacity:1">COMPOSE_DEFAULT</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:45.31394196px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#f815bb;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- x="3681.5449"
- y="1289.9539"
- id="text8200-4-9-3"
- sodipodi:linespacing="125%"
- transform="scale(0.96105877,1.0405191)"><tspan
- sodipodi:role="line"
- id="tspan8202-5-3-6"
- x="3681.5449"
- y="1289.9539"
- style="font-size:50px;fill:#f815bb;fill-opacity:1">COMPOSE_PADDED</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:50px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new;"
- x="2438.0618"
- y="1368.4291"
- id="text8200-4-9-3-5"
- sodipodi:linespacing="125%"
- transform="scale(0.96105877,1.0405191)"><tspan
- sodipodi:role="line"
- id="tspan8202-5-3-6-3"
- x="2438.0618"
- y="1368.4291"
- style="font-size:50px;fill:#000000;fill-opacity:1;">COMPOSE_BONDS</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- x="8.0815096"
- y="1438.8961"
- id="text8200-4-9-3-5-6"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- id="tspan8202-5-3-6-3-2"
- x="8.0815096"
- y="1438.8961"
- style="font-size:50px;fill:#000000;fill-opacity:1">CROP_BONDS</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- x="1455.4426"
- y="-26.808125"
- id="text8200-4-9-3-5-6-9"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- id="tspan8202-5-3-6-3-2-1"
- x="1455.4426"
- y="-26.808125"
- style="font-size:50px;fill:#000000;fill-opacity:1">overscan area</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#f8d615;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- x="179.63055"
- y="385.38785"
- id="text8200-4-9-2"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- id="tspan8202-5-3-7"
- x="179.63055"
- y="385.38785"
- style="font-size:50px;fill:#f8d615;fill-opacity:1">CROP_ACTIVE</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- x="636.67419"
- y="-138.84549"
- id="text8200-4-9-3-5-6-0"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- id="tspan8202-5-3-6-3-2-9"
- x="636.67419"
- y="-138.84549"
- style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:70px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';fill:#000000;fill-opacity:1">DATA SOURCE</tspan></text>
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:45.31394196px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;enable-background:new"
- x="3178.7151"
- y="-129.06131"
- id="text8200-4-9-3-5-6-0-3"
- sodipodi:linespacing="125%"
- transform="scale(0.96105877,1.0405191)"><tspan
- sodipodi:role="line"
- id="tspan8202-5-3-6-3-2-9-6"
- x="3178.7151"
- y="-129.06131"
- style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:69.99999978px;font-family:sans-serif;-inkscape-font-specification:'sans-serif Bold';fill:#000000;fill-opacity:1">DATA SINK</tspan></text>
- <flowRoot
- xml:space="preserve"
- id="flowRoot7469"
- style="fill:black;stroke:none;stroke-opacity:1;stroke-width:1px;stroke-linejoin:miter;stroke-linecap:butt;fill-opacity:1;font-family:sans-serif;font-style:normal;font-weight:normal;font-size:57.5px;line-height:125%;letter-spacing:0px;word-spacing:0px;"><flowRegion
- id="flowRegion7471"><rect
- id="rect7473"
- width="4297.5474"
- height="1851.537"
- x="-52.635666"
- y="70.623535"
- style="font-size:57.5px;" /></flowRegion><flowPara
- id="flowPara7475"></flowPara></flowRoot> </g>
+ </g>
+ <path d="M3603.7 633.68c-3.826-60.621-16.906-121.51-17.254-181.22-.187-32.048 3.291-63.757 13.834-94.91 36.554-156.68 117.6-203.23 186.99-219.47 87.416-26.435 185.96 43.047 234.7 228.91 54.432 181.72 56.997 414.01 81.07 622.74 29.605 305.05 55.09 614.78 60.735 928.25-3.08 187.6-8.474 396.35-60.847 547.4-48.299 120.83-123.49 120.1-188.13 141.58-91.07 11.17-185.4-38.742-263.27-154.04-65.144-91.037-96.274-272.3-97.832-446.36-8.437-191.66 26.542-369.07 51.914-545.07 7.513-198.58 9.466-398.92 9.708-598.39-.841-77.252-7.13-153.13-11.612-229.41z" enable-background="accumulate" fill="#101414"/>
+ <path transform="matrix(1.0057 0 0 2.3995 3249.4 125.01)" d="M311.83 415.43l9.9 121.62-60.105 136.47 15.556 174.66c15.613 61.879 32.185 98.669 74.376 117.05 4.32-36.24-38.612-142.96-39.243-189.12-.631-46.184 10.83-108.61 30.678-158.3 20.048-50.192 36.897-44.846 42.125-92.593s-17.426-149.39-17.426-149.39l-55.86 39.598z" clip-path="url(#y)" enable-background="accumulate" fill="#fff" filter="url(#fu)" opacity=".25"/>
+ <path d="m3987.6 1371.5s16.85 88.825 28.865 129.46c12.014 40.638 53.027 134.48 53.027 134.48l52.896-306.15" enable-background="accumulate" fill="url(#ft)"/>
+ <path transform="matrix(1.0057 0 0 2.3995 3249.4 125.01)" d="m730.32 536.57c0 8.485 42.548 58.468 42.548 58.468l12.607-28.77-55.154-29.698z" clip-path="url(#fs)" enable-background="accumulate" fill="#fff" filter="url(#fr)" opacity=".08"/>
+ </g>
+ <g transform="matrix(1.0057 0 0 2.3995 3424.4 -24.137)" clip-path="url(#fq)" enable-background="new">
+ <g transform="translate(-174.03 62.156)" filter="url(#aq)">
+ <g filter="url(#g)">
+ <path d="M425.88 476.99c10.805-1.479 24.744 3.354 44.643 3.214s57.453-16.91 82.143-17.143 62.752 12.284 79.286 15 22.848-.158 27.5 7.857 1.927 10.747-10.357 20.714-40.79 12.636-66.071 12.857c-25.282.221-70.381 7.079-95.357 3.93s-56.938-7.824-68.929-17.858-19.851-16.732-17.5-23.929 13.837-3.164 24.643-4.643z" enable-background="new" fill="#fff" fill-rule="evenodd"/>
+ <path d="M343.65 412.6h381.84v181.02H343.65z" enable-background="accumulate" fill="none"/>
+ </g>
+ <g filter="url(#g)">
+ <path d="m861.17 390.2c-10.462 9.714-86.98 19.005-100.71 29.286s-14.753 12.888-12.143 20 6.545 9.406 25.714 8.571 98.571-27.622 98.571-21.429l-11.429-36.429z" enable-background="new" fill="#fff" fill-rule="evenodd"/>
+ <path d="M702.86 344.82h207.89v162.63H702.86z" enable-background="accumulate" fill="none"/>
+ </g>
+ </g>
+ <g enable-background="new" opacity=".18">
+ <g transform="translate(-174.03 62.156)" filter="url(#g)">
+ <path d="M425.88 476.99c10.805-1.479 24.744 3.354 44.643 3.214s57.453-16.91 82.143-17.143 62.752 12.284 79.286 15 22.848-.158 27.5 7.857 1.927 10.747-10.357 20.714-40.79 12.636-66.071 12.857c-25.282.221-70.381 7.079-95.357 3.93s-56.938-7.824-68.929-17.858-19.851-16.732-17.5-23.929 13.837-3.164 24.643-4.643z" enable-background="new" fill="#fff" fill-rule="evenodd"/>
+ <path d="M343.65 412.6h381.84v181.02H343.65z" enable-background="accumulate" fill="none"/>
+ </g>
+ <g transform="translate(-174.03 62.156)" filter="url(#g)">
+ <path d="m861.17 390.2c-10.462 9.714-86.98 19.005-100.71 29.286s-14.753 12.888-12.143 20 6.545 9.406 25.714 8.571 98.571-27.622 98.571-21.429l-11.429-36.429z" enable-background="new" fill="#fff" fill-rule="evenodd"/>
+ <path d="M702.86 344.82h207.89v162.63H702.86z" enable-background="accumulate" fill="none"/>
+ </g>
+ </g>
+ </g>
+ <g transform="matrix(1.0057 0 0 2.3995 2971.9 -201.33)" fill-rule="evenodd">
+ <path transform="translate(276 136)" d="m582.66-7.418l113.14 86.267 108.89 258.8 38.184 207.89 120.21 91.924s-12.728-287.09-19.799-313.96-149.91-393.15-149.91-393.15l-210.72 62.225z" clip-path="url(#fp)" enable-background="accumulate" filter="url(#fo)" opacity=".75"/>
+ <path d="m964.14 239.6s8.677 10.897 24.107 11.964c15.43 1.068 49.722-39.953 70.179-52.143 20.479-12.204 47.046-26.602 63.929-20.357 16.882 6.245 22.158 26.436 27.857 48.036 5.7 21.6 6.719 61.814-2.679 92.857-9.397 31.043-50.502 73.104-65.356 103.39s-11.607 39.821-11.607 39.821" enable-background="accumulate" fill="url(#el)"/>
+ <path d="m1124.5 207.63c-15.893-0.893-49.719 12.106-66.071 24.286-16.439 12.244-29.221 24.114-29.286 52.143-0.065 28.206 13.119 39.076 29.107 46.964s33.686 7.12 51.964-11.786c18.278-18.905 14.286-111.61 14.286-111.61z" enable-background="new" fill="url(#ek)"/>
+ <ellipse transform="matrix(.94347 -.12399 .14401 1.0958 451.95 134.6)" cx="385" cy="237.01" rx="86.429" ry="73.929" clip-path="url(#fn)" enable-background="accumulate" fill="url(#ef)" filter="url(#fm)" opacity=".75"/>
+ <path transform="translate(450.03 73.844)" d="m527.61 407.45s-122.04 38.403-187.51 9.632c-65.473-28.772-74.377-124.72-74.377-124.72s73.382-80.504 129.92-83.615c55.827-3.072 90.574 20.143 114.87 65.852 24.352 45.813 17.101 132.85 17.101 132.85z" enable-background="accumulate" fill="url(#fl)" mask="url(#fk)"/>
+ <path d="m772.17 393.35s36.218-27.382 51.607-35.893c15.177-8.393 25.714-11.607 35.893-11.607l-15.536 66.964" enable-background="accumulate" fill="url(#ee)"/>
+ <circle transform="translate(449.5 74.915)" cx="409.29" cy="306.65" r="36.25" enable-background="accumulate" fill="url(#ed)"/>
+ <path transform="translate(276 136)" d="m311.83 415.43l9.9 121.62-60.105 136.47 15.556 174.66c15.613 61.879 32.185 98.669 74.376 117.05 4.32-36.24 8.682-72.368-31.243-223.12l17.678-69.296 72.125-138.59-42.426-158.39-55.86 39.598z" clip-path="url(#y)" enable-background="accumulate" fill="#fff" filter="url(#fj)" opacity=".3"/>
+ <path d="m635.21 581.13c-14.142 12.728 39.233 34.58 76.368 24.042s104.64-35.564 103.24-79.196c-1.407-43.632-76.368-128.69-76.368-128.69l-103.24 183.85z" enable-background="accumulate" filter="url(#fi)" opacity=".5"/>
+ <circle transform="translate(449.67 74.915)" cx="410" cy="306.65" r="23.214" enable-background="accumulate" fill="url(#ec)"/>
+ <circle transform="translate(452 73.487)" cx="414.29" cy="303.08" r="7.5" enable-background="accumulate" fill="#fff" filter="url(#fh)" stroke="#000" stroke-linejoin="bevel"/>
+ <path d="m789.32 478.35s7.023 19.569-1.071 35-42.323 38.988-67.5 50c-25.31 11.07-85.473 32.964-101.79 41.964-16.461 9.082-18.214 12.679-18.214 12.679s-7.147-19.064 28.75-51.786c36.172-32.972 142.03-48.05 159.82-87.857z" enable-background="accumulate" fill="url(#eb)"/>
+ </g>
+ <g enable-background="new">
+ <g transform="matrix(1.0057 0 0 2.3995 3757 -22.424)" fill-rule="evenodd">
+ <path transform="translate(-329.81)" d="M179.64 267.36c-22.41 39.703-60.616 115.78-69.286 149.64-8.647 33.775-8.772 66.417-.357 86.429 8.36 19.882 26.164 35.633 40.714 41.429-.597-14.376 14.373-43.286 72.857-72.5 58.626-29.285 78.382-27.131 103.57-47.143 25.63-20.362 8.206-79.647 3.214-93.929s-1.236-3.38-1.946-5.093c-10.689-25.816-34.214-54.43-64.483-64.55s-65.018-4.848-84.286 5.714z" clip-path="url(#x)" fill="url(#m)"/>
+ <ellipse transform="rotate(28.068 -88.085 -332.1)" cx="183.57" cy="338.08" rx="64.716" ry="134.01" enable-background="accumulate" fill="url(#dz)"/>
+ <ellipse transform="rotate(28.068 -43.578 -333.81)" cx="183.57" cy="338.08" rx="64.716" ry="134.01" enable-background="accumulate" fill="url(#fg)"/>
+ </g>
+ <path transform="matrix(1.0057 0 0 2.3995 3425.3 -22.424)" d="M179.64 267.36c-22.41 39.703-60.616 115.78-69.286 149.64-8.647 33.775-8.772 66.417-.357 86.429 8.36 19.882 26.164 35.633 40.714 41.429-.597-14.376 14.373-43.286 72.857-72.5 58.626-29.285 78.382-27.131 103.57-47.143 25.63-20.362 8.206-79.647 3.214-93.929s-1.236-3.38-1.946-5.093c-10.689-25.816-34.214-54.43-64.483-64.55s-65.018-4.848-84.286 5.714z" clip-path="url(#x)" enable-background="new" fill="none" filter="url(#hd)" stroke="url(#l)" stroke-width="20.8"/>
+ </g>
+ <g transform="matrix(1.0057 0 0 2.3995 2971.9 -201.33)" fill-rule="evenodd">
+ <circle transform="translate(452.56 72.581)" cx="310.71" cy="398.08" r="19.704" enable-background="accumulate" stroke="#000" stroke-linejoin="bevel"/>
+ <circle transform="translate(450.56 72.581)" cx="310.71" cy="398.08" r="19.704" enable-background="accumulate" fill="url(#m)" filter="url(#hc)" stroke="url(#l)" stroke-width="20.8"/>
+ <circle transform="translate(450.56 72.581)" cx="310.71" cy="398.08" r="19.704" enable-background="accumulate" fill="url(#dy)"/>
+ <ellipse transform="rotate(-4.471 1823.1 -5529.2)" cx="429.57" cy="377.43" rx="72.08" ry="44.548" enable-background="accumulate" fill="url(#dx)" filter="url(#hb)"/>
+ <ellipse transform="matrix(1.4358 -.07 .07 1.4358 235.18 -63.865)" cx="437.7" cy="391.22" rx="36.612" ry="22.627" enable-background="accumulate" fill="url(#dw)" filter="url(#ha)"/>
+ <g transform="translate(450.03 73.844)" enable-background="new" filter="url(#gz)">
+ <circle cx="413.66" cy="401.83" r="3.214" enable-background="accumulate" stroke="url(#dv)"/>
+ <circle transform="translate(13.125 8.125)" cx="413.66" cy="401.83" r="3.214" enable-background="accumulate" stroke="url(#du)"/>
+ <circle transform="translate(32.946 7.5)" cx="413.66" cy="401.83" r="3.214" enable-background="accumulate" stroke="url(#ej)"/>
+ <circle transform="translate(24.911 -10.268)" cx="413.66" cy="401.83" r="3.214" enable-background="accumulate" stroke="url(#ei)"/>
+ <circle transform="translate(47.589 -.625)" cx="413.66" cy="401.83" r="3.214" enable-background="accumulate" stroke="url(#eh)"/>
+ </g>
+ </g>
+ <g fill="none" stroke="#000">
+ <path transform="matrix(1.0057 0 0 2.3995 2971.9 -201.33)" d="M896.2 482.93c.985 4.35 4.537 6.18 7.387 7.892 4.46 2.513 6.52 1.522 9.154-.758 1.602-1.921 10.683-4.698 15.594-7.07 4.33-1.46 8.904-5.36 13.385-8.335 3.395-1.627 5.347.355 7.829 1.01 2.944.717 4.411 2.172 6.06 3.536 2.397 1.175-.927 3.143 3.284 4.293 1.19.218 2.417.577 3.283-.505" enable-background="new"/>
+ <path transform="matrix(1.0057 0 0 2.3995 2971.9 -201.33)" d="M910.85 475.35c2.315-.032 3.178.643 5.493-.82 3.455-3.082 5.402-3.146 7.955-4.42 3.026-1.315 6.535 8.152 10.102 9.849 2.395-.822 1.289 1.794 1.452 2.651.057 2.647 2.807 3.679 4.356 5.43 3.316 2.256 7.375 6.296 11.112 5.303 6.445-2.93 10.28-1.281 16.29-7.386.703-1.182-.585-6.895 3.093-7.198 2.524.254 4.166.05 6.06.569 5.442 2.117 7.738 6.45 14.71 7.955 6.184.966 7.613 3.794 13.89 5.05M876.98 483.52c2.399-.794 6.106 4.192 8.173 7.046.593 2.68 1.154 5.486.758 12.122.785 2.417 2.68 3.03 4.798 3.283 3.117-.537 5.877-1.325 7.324-3.03 1.871-1.942 5.312 2.393 8.08 4.04 3.61 1.912 7.775 1.979 11.87 2.273 1.703-.231 2.37 4.515 3.283 8.08.384 4.379-.886 6.897-1.768 9.85-.294 2.496 2.988 3.53 6.313 4.546 3.183.74 6.545 1.661 9.092 1.767 5.142.875 8.088 2.69 12.122 4.04 2.239.817 3.26 2.243 4.545 3.536" enable-background="new"/>
+ </g>
+ <g transform="matrix(.9991 .27421 -.11493 2.3838 2962.6 1209.8)" enable-background="new" fill-rule="evenodd" mask="url(#gy)">
+ <path d="M1111.48-285.971l-3.937 1.875c-.041.01-.1.02-.125.031-.42.213-.165.1-.657.312-.486.21-1.737.585-4.093 1.47-3.332 1.25-5.805 2.15-7 3.062-1.537.021-3.72.233-5.657.719a227.677 227.677 0 0 1-6.75 1.593c-1.894.42-1.675.642-2.875.875-1.296.252-1.721-.009-5.437.782-3.49.742-8.895 1.93-10.156 2.687-1.584-.18-3.868-.322-5.844-.031-3.04.447-4.916.673-6.844.906-.655.08-1.04.2-1.343.281-.427.132-.686.26-1.375.344-1.312.16-1.763-.157-5.532.281-3.554.413-9.005 1.273-10.25 1.938-1.599-.297-3.857-.534-5.843-.344-3.06.293-4.972.484-6.907.656-1.934.173-1.688.423-2.906.532-1.316.117-1.76-.164-5.531.25-3.542.388-9.008 1.209-10.281 1.875-1.6-.295-3.887-.507-5.875-.313-3.058.3-4.941.48-6.875.656-.658.06-1.04.179-1.344.25-.428.12-.683.218-1.375.282-1.316.12-1.76-.195-5.531.218-3.556.39-9.006 1.24-10.25
+1.907-1.599-.295-3.86-.524-5.844-.313-3.056.325-4.974.526-6.906.719s-1.69.44-2.906.562c-1.315.132-1.763-.164-5.532.282-3.538.418-8.977 1.292-10.25 1.968-1.597-.28-3.86-.42-5.843-.187-3.052.358-4.945.568-6.875.781-.657.073-1.041.173-1.344.25-.427.127-.685.267-1.375.344-1.314.146-1.768-.174-5.531.312-3.55.46-8.979 1.42-10.22 2.125-1.592-.244-3.833-.381-5.812-.125-3.047.395-4.95.649-6.875.907-1.924.257-1.726.493-2.937.656-1.31.176-1.748-.105-5.5.469-3.525.538-8.924 1.699-10.188 2.437-1.588-.203-3.846-.255-5.813.094-3.026.536-4.899.861-6.812 1.187-.65.111-1.014.271-1.313.375-.42.165-.663.332-1.344.469-1.294.262-1.727-.006-5.437.813-3.499.771-8.846 2.382-10.062 3.218-1.563-.077-3.758.086-5.688.594-2.972.783-4.817 1.232-6.687 1.75s-1.667.767-2.844 1.094c-1.272.353-1.697.107-5.344 1.187-3.424 1.015-8.65 2.934-9.875 3.844-1.539.013-3.72.272-5.625.875-2.93.928-4.75 1.459-6.594
+2.063-.626.205-.991.392-1.28.53-.408.215-.654.41-1.313.626-1.255.411-1.686.189-5.281 1.437-3.39 1.178-8.595 3.214-9.782 4.157-1.524.06-3.65.395-5.53 1.062-2.898 1.028-4.7 1.676-6.532 2.313-1.832.637-1.628.848-2.781 1.25-1.247.434-1.664.2-5.22 1.562-3.338 1.28-8.486 3.483-9.687 4.469-1.507.108-3.635.499-5.5 1.219a1047.26 1047.26 0 0 1-6.437 2.469c-.617.233-.997.442-1.281.593v.031l-8 3.188-12.476 3.492 7.93 19.278c-.592 1.973 12.545-4.739 12.545-4.739.227-.144.45-.272.72-.375 1.08-.41 2.17-.215 6-1.687 3.828-1.472 5.223-2.005 5.905-2.406.68-.4 1.612-.88 2.22-1.531 1.826-.138 3.57-.494 4.937-1 2.968-1.1 4.875-1.807 6.78-2.47 1.907-.662 2.355-1.414 3.407-1.78 1.092-.38 2.195-.166 6.063-1.532 3.867-1.366 5.283-1.827 5.968-2.218.702-.4 1.701-.933 2.313-1.594 1.97-.055 3.817-.385 5.281-.875 3.002-1.005 4.926-1.622 6.844-2.25 1.538-.504 2.174-1.047 2.906-1.438.23-.134.476-.253.75-.343 1.098-.36
+2.181-.082 6.094-1.313 3.912-1.231 5.366-1.673 6.062-2.031.694-.357 1.63-.793 2.25-1.406 1.866-.023 3.636-.267 5.032-.688 3.03-.913 4.992-1.43 6.937-1.969 1.945-.538 2.426-1.264 3.5-1.562 1.114-.31 2.22.007 6.188-1.031 3.967-1.039 5.417-1.433 6.125-1.75.734-.33 1.813-.754 2.437-1.375 1.998.116 3.857-.02 5.344-.375 3.078-.735 5.083-1.101 7.062-1.5 1.588-.32 2.245-.79 3-1.094a3.4 3.4 0 0 1 .75-.25c1.134-.23 2.305.209 6.344-.5 4.04-.71 5.5-.927 6.219-1.188.716-.26 1.704-.567 2.344-1.093 1.924.239 3.748.224 5.187 0 3.127-.488 5.155-.701 7.156-.97 2.002-.267 2.49-.944 3.594-1.093 1.147-.154 2.276.302 6.344-.219 4.068-.52 5.56-.695 6.281-.937.737-.247 1.798-.586 2.438-1.125 2.05.335 3.973.398 5.5.218 3.142-.368 5.18-.559 7.187-.78 1.611-.179 2.265-.609 3.031-.845.241-.085.495-.155.782-.187 1.15-.128 2.301.347 6.375-.125s5.559-.61 6.28-.844c.72-.232 1.701-.473 2.345-.969 1.936.334 3.77.405
+5.219.25 3.146-.334 5.177-.518 7.187-.718 2.01-.2 2.484-.827 3.594-.938 1.15-.115 2.296.365 6.375-.062s5.589-.562 6.312-.782c.74-.223 1.796-.513 2.438-1.03 2.057.398 4.002.493 5.531.343 3.149-.308 5.176-.473 7.188-.656 1.614-.147 2.263-.56 3.03-.781.242-.081.494-.13.782-.157 1.152-.105 2.293.393 6.375 0s5.589-.53 6.312-.75c.721-.218 1.7-.447 2.344-.937 1.938.35 3.769.454 5.219.312 3.149-.308 5.176-.473 7.187-.656 2.012-.183 2.515-.838 3.625-.937 1.153-.104 2.293.384 6.375 0 4.083-.385 5.59-.501 6.313-.72.74-.222 1.796-.514 2.437-1.03 2.058.401 4.003.503 5.532.343 3.146-.328 5.177-.522 7.187-.718 1.613-.158 2.266-.632 3.031-.875.241-.088.464-.122.75-.157 1.149-.14 2.317.34 6.375-.25 4.059-.59 5.562-.777 6.282-1.03.716-.254 1.674-.559 2.312-1.095 1.92.212 3.72.152 5.156-.093 3.12-.533 5.112-.929 7.094-1.313 1.982-.384 2.474-1.04 3.563-1.281 1.128-.25 2.27.116 6.25-.875s5.43-1.42
+6.125-1.781c.722-.376 1.761-.87 2.375-1.531 1.963-.012 3.793-.292 5.218-.844 2.952-1.145 4.874-1.87 6.688-2.75 1.456-.707 2.32-1.702 2.531-2 .212-.298.1-.729.125-.75.043-.035.34-.094.5-.438.86-1.847 2.323-5.627 2.438-6.312.113-.682.168-1.353.218-1.75.03-.23-.147-.88-.125-.938.031-.082.289-.25.344-.5.266-1.198.09-2.207-.125-3.625-.214-1.417-.972-4.614-1.625-5.469-.659-.861-1.225-1.01-1.75-1z" enable-background="new" fill="#bcb786"/>
+ <g clip-path="url(#gx)">
+ <path d="M1107.4-284.05c-.419.213-.156.094-.647.306-.486.21-1.724.574-4.08 1.459-3.33 1.25-5.83 2.153-7.026 3.066-1.536.021-3.72.233-5.656.719a227.709 227.709 0 0 1-6.75 1.593c-1.895.42-1.676.643-2.875.875-1.297.252-1.721-.009-5.438.782-3.49.742-8.894 1.93-10.156 2.687-1.583-.18-3.867-.322-5.843-.031-3.04.447-4.917.673-6.844.906-.655.08-1.041.201-1.344.282-.426.131-.686.26-1.375.343-1.311.16-1.762-.157-5.531.282-3.554.413-9.005 1.272-10.25 1.937-1.599-.297-3.858-.534-5.844-.344-3.059.294-4.972.484-6.906.657-1.934.172-1.689.422-2.906.53-1.317.118-1.76-.163-5.532.25-3.541.39-9.007 1.21-10.28 1.876-1.6-.295-3.888-.507-5.876-.313-3.058.3-4.94.48-6.875.657-.657.06-1.04.178-1.343.25-.428.118-.684.218-1.375.28-1.316.121-1.76-.194-5.532.22-3.556.39-9.005 1.239-10.25
+1.906-1.598-.294-3.86-.524-5.843-.313-3.056.326-4.974.526-6.907.719-1.932.192-1.69.44-2.906.562-1.315.132-1.763-.164-5.53.282-3.54.418-8.979 1.292-10.25 1.969-1.599-.282-3.86-.42-5.845-.188-3.052.358-4.945.568-6.875.781-.656.073-1.04.173-1.344.25-.426.127-.684.267-1.375.344-1.313.146-1.767-.174-5.53.313-3.55.458-8.98 1.419-10.22 2.125-1.593-.245-3.834-.382-5.812-.125-3.048.394-4.95.648-6.875.906-1.925.258-1.726.493-2.938.656-1.31.176-1.747-.104-5.5.469-3.524.538-8.923 1.699-10.188 2.437-1.587-.203-3.845-.254-5.812.094-3.026.536-4.9.862-6.813 1.187-.65.111-1.013.271-1.312.375-.42.165-.664.332-1.344.47-1.295.26-1.727-.007-5.438.812-3.498.772-8.846 2.383-10.062 3.219-1.562-.078-3.757.085-5.687.593-2.972.783-4.818 1.232-6.688 1.75s-1.666.768-2.843 1.094c-1.273.353-1.697.107-5.344 1.188-3.425 1.014-8.65 2.933-9.875 3.843-1.539.013-3.72.273-5.625.875-2.931.928-4.75 1.459-6.594
+2.063-.627.205-.992.392-1.281.531-.408.214-.653.409-1.313.625-1.254.412-1.686.19-5.28 1.438-3.39 1.177-8.596 3.213-9.782 4.156-1.524.06-3.65.395-5.531 1.062-2.898 1.029-4.7 1.676-6.531 2.313-1.833.637-1.628.848-2.782 1.25-1.246.434-1.663.2-5.218 1.562-3.34 1.28-8.488 3.483-9.688 4.47-1.507.107-3.636.498-5.5 1.218a1044.752 1044.752 0 0 1-6.437 2.469c-.617.233-.997.442-1.282.593v1.094c.112-.222.386-.817.907-1.094.698-.37 4.813-1.993 6.812-2.718 1.657-.602 4.154-1.329 5.969-1.313.302.003.588.051.844.094 1.842.308 7.468 1.562 7.468 1.562s-6.233-1.646-7.03-1.843c-.191-.048-.536-.07-.97-.063 1.146-.87 4.762-2.393 7.344-3.437 2.839-1.148 3.117-1.252 5.063-1.657 2.008-.417 3.156-.5 3.156-.5s-.082-.6.969-1.125c.705-.351 4.887-1.892 6.906-2.562 1.952-.648 5.057-1.359 6.875-1 1.863.367 7.531 1.812 7.531 1.812s-6.287-1.87-7.094-2.093c-.193-.054-.53-.086-.968-.094 1.158-.833 4.794-2.195 7.406-3.156
+2.87-1.056 3.167-1.162 5.125-1.532 1.853-.35 2.859-.425 3.031-.437.114-.217.377-.81.906-1.063.71-.338 4.926-1.712 6.97-2.312 1.692-.497 4.24-1.037 6.093-.906.308.021.613.097.875.156 1.881.424 7.594 2.031 7.594 2.031s-6.342-2.065-7.157-2.312c-.194-.06-.557-.104-1-.125 1.17-.798 4.863-2.057 7.5-2.938 2.898-.968 3.233-1.003 5.22-1.281 2.049-.287 3.187-.313 3.187-.313s-.073-.607 1-1.062c.72-.306 4.99-1.5 7.062-2 2.003-.483 5.199-.928 7.063-.406 1.91.535 7.719 2.5 7.719 2.5s-6.423-2.424-7.25-2.72c-.198-.07-.583-.14-1.032-.187 1.188-.728 4.916-1.774 7.594-2.5 2.944-.797 3.292-.77 5.313-.906 1.913-.128 2.947-.07 3.125-.062.117-.204.391-.78.937-.97.732-.253 5.079-1.047 7.188-1.374 1.748-.271 4.4-.485 6.312-.094.318.065.605.186.875.281 1.94.69 7.844 3.094 7.844 3.094s-6.535-2.95-7.375-3.312c-.201-.087-.575-.167-1.031-.25 1.206-.633 5.03-1.396 7.75-1.906 2.99-.562 3.3-.53 5.344-.532 2.109-.002
+3.312.125 3.312.125s-.073-.63 1.031-.937c.74-.206 5.126-.834 7.25-1.063 2.053-.22 5.319-.252 7.22.47 1.947.738 7.843 3.374 7.843 3.374s-6.563-3.179-7.406-3.562c-.202-.092-.543-.187-1-.282 1.21-.602 4.984-1.248 7.718-1.656 3.005-.448 3.326-.452 5.375-.406 1.94.043 3.007.194 3.188.219.119-.194.384-.766.937-.907.743-.188 5.155-.734 7.282-.937 1.763-.169 4.42-.234 6.343.25.32.08.604.203.875.312 1.953.784 7.907 3.47 7.907 3.47s-6.592-3.254-7.438-3.657c-.202-.096-.572-.207-1.031-.313 1.214-.574 5.044-1.122 7.781-1.5 3.009-.415 3.323-.442 5.375-.375 2.118.07 3.313.25 3.313.25s-.078-.637 1.03-.906c.745-.18 5.153-.663 7.282-.844 2.059-.174 5.343-.124 7.25.657 1.955.8 7.875 3.53 7.875 3.53s-6.56-3.308-7.406-3.718c-.202-.098-.572-.203-1.031-.312 1.215-.564 5.01-1.115 7.75-1.47 3.01-.389 3.321-.397 5.375-.312 1.944.08 3.006.254 3.187.282.12-.191.383-.746.938-.875.744-.174 5.15-.65 7.28-.813
+1.767-.134 4.45-.126 6.376.375.32.083.603.201.875.313 1.954.8 7.906 3.562 7.906 3.562s-6.591-3.34-7.437-3.75c-.203-.098-.572-.203-1.032-.312 1.215-.564 5.042-1.084 7.782-1.438 3.01-.39 3.352-.429 5.406-.344 2.12.088 3.312.313 3.312.313s-.078-.65 1.032-.906c.744-.173 5.15-.624 7.28-.782 2.061-.152 5.344-.096 7.25.688 1.956.804 7.876 3.5 7.876 3.5s-6.56-3.276-7.406-3.688c-.203-.098-.572-.202-1.032-.312 1.216-.562 5.012-1.128 7.75-1.5 3.01-.41 3.323-.416 5.375-.344 1.943.068 3.008.165 3.188.188.119-.195.384-.73.937-.875.742-.197 5.131-.83 7.25-1.094 1.757-.22 4.406-.333 6.313.031.317.06.606.19.875.281 1.936.661 7.844 2.938 7.844 2.938s-6.537-2.807-7.375-3.156c-.2-.084-.577-.174-1.032-.25 1.204-.651 5.02-1.372 7.72-2 2.966-.69 3.288-.756 5.312-.875 2.088-.124 3.28-.032 3.28-.032s-.086-.632 1-1.03c.73-.269 5.048-1.339 7.126-1.813 2.008-.46 5.168-1.03 7-.625 1.878.414 13.578 3.015 13.578
+3.015s-12.328-3.022-13.141-3.265c-.195-.058-.559-.107-1-.125 1.167-.804 3.514-1.688 6.11-2.703 1.68-.659.923-.377 2.775-1.004 1.754-.594 2.486-1.01 2.63-1.113.347-.207-.355-.122-.544-.042z" enable-background="new" filter="url(#gw)"/>
+ <path d="m1082.6-275.12c1.873 0.393 4.496 1.146 6.031 1.969s2.822 1.056 5.375 2.5c2.527 1.43 4.796 2.007 6.969 2.531 2.348 0.566 5.435 0.715 8.844 1.188-1.09-0.84-6.608-1.173-8.406-1.563-1.8-0.39-3.895-1.016-6.594-2.313-2.7-1.296-3.495-1.799-5.813-2.687-2.318-0.889-4.004-1.383-6.406-1.625z" enable-background="new" filter="url(#gv)"/>
+ <path d="M1051.5-270c1.905.578 4.528 1.616 6.094 2.594 1.565.978 2.88 1.36 5.5 3.125 2.593 1.747 4.986 2.71 7.25 3.594 2.446.955 5.682 1.657 9.406 3.062-1.19-1.138-7.063-2.687-8.938-3.375-1.874-.688-4.081-1.566-6.874-3.281-2.794-1.715-3.574-2.284-5.938-3.406-2.364-1.123-4.057-1.835-6.5-2.313z" enable-background="new" filter="url(#gu)"/>
+ <path d="m1020.2-266.84c1.912 0.638 4.581 1.755 6.156 2.813 1.575 1.057 2.896 1.508 5.531 3.406 2.61 1.878 5.029 3.03 7.313 4.062 2.468 1.116 5.764 2.174 9.531 3.844-1.203-1.222-7.203-3.314-9.094-4.125-1.89-0.81-4.064-1.894-6.874-3.75s-3.622-2.477-6-3.719c-2.379-1.242-4.111-1.975-6.563-2.531z" enable-background="new" filter="url(#gt)"/>
+ <path d="M1110.2-266.89c.15.049.688.631.11 1.484-.81 1.195-5.705 3.325-8.563 4.125-2.845.798-6.29.978-10.562-.375-4.302-1.362-5.47-2.468-10.656-4.312 4.664 2.115 6.195 3.952 10.125 5.344 1.62.574 3.367.94 5.062 1.03-.445.327-1.53.984-3.562 1.595-2.796.84-6.65 1.534-8.25 1.625-1.515.086-3.142-.513-3.438-.625.167.103.374.377-.25 1.03-.899.945-6.147 1.924-9.125 2.25-2.964.326-6.521-.015-10.906-1.905-3.978-1.715-5.339-2.916-9.406-4.75v.156c3.643 2.095 5.284 3.883 8.875 5.562 1.73.81 3.592 1.41 5.406 1.72-.534.286-1.557.71-3.437 1.03-2.87.488-6.81.817-8.438.75-.85-.034-1.728-.184-2.406-.406-.685-.215-1.19-.444-1.312-.5.169.107.43.403-.22 1.031-.909.88-6.245 1.337-9.25 1.47-2.99.131-6.588-.451-11-2.563-4.44-2.127-5.64-3.402-10.905-5.782 4.734 2.597 6.286 4.63 10.344 6.72 1.673.861 3.485 1.493 5.25
+1.937-.463.233-1.59.688-3.688.937-2.886.343-6.834.493-8.468.375-1.547-.111-3.232-.857-3.532-1 .17.12.414.41-.218 1-.913.851-6.244 1.262-9.25 1.375-2.993.113-6.59-.49-11-2.594-4.002-1.908-5.388-3.137-9.47-5.093v.156c3.656 2.204 5.295 4.053 8.907 5.906 1.74.893 3.637 1.528 5.469 1.969-.54.248-1.578.615-3.469.844-2.886.348-6.866.52-8.5.406a9.446 9.446 0 0 1-2.406-.5 12.532 12.532 0 0 1-1.313-.531c.17.112.465.422-.187 1.03-.913.853-6.275 1.294-9.281 1.407-2.993.112-6.594-.528-11-2.594-4.437-2.08-5.647-3.331-10.906-5.656 4.729 2.548 6.29 4.578 10.344 6.625 1.671.844 3.485 1.467 5.25 1.906-.464.235-1.59.684-3.688.938-2.886.348-6.836.57-8.469.469-1.544-.096-3.2-.83-3.5-.97.17.12.382.405-.25 1-.912.861-6.246 1.331-9.25 1.47-2.99.138-6.567-.451-10.969-2.47-3.993-1.83-5.365-3.028-9.437-4.905v.156c3.647 2.133 5.27 3.935 8.875 5.719 1.737.86 3.607 1.45 5.437
+1.875-.54.253-1.55.64-3.437.906-2.88.404-6.838.646-8.469.562a9.36 9.36 0 0 1-2.406-.437 12.971 12.971 0 0 1-1.313-.5c.17.109.432.41-.218 1.031-.911.87-6.25 1.392-9.25 1.563-2.987.17-6.574-.316-10.97-2.282-4.424-1.978-5.605-3.228-10.843-5.375 4.71 2.388 6.27 4.39 10.312 6.344a23.73 23.73 0 0 0 5.218 1.781c-.461.25-1.597.713-3.687 1.032-2.876.438-6.78.733-8.406.687-1.539-.043-3.233-.745-3.532-.875.169.113.411.414-.218 1.031-.908.891-6.203 1.529-9.188 1.813-2.971.283-6.573-.176-10.938-1.938-3.96-1.598-5.329-2.795-9.344-4.312v.156c3.596 1.811 5.239 3.582 8.813 5.156 1.722.759 3.587 1.29 5.406 1.625-.536.28-1.566.688-3.437 1.063-2.856.572-6.79 1.02-8.407 1.031-.844.006-1.706-.08-2.375-.25-.676-.162-1.16-.33-1.28-.375.166.094.422.383-.22 1.062-.897.951-6.186 1.918-9.125 2.438-2.925.518-6.432.374-10.719-1.031-4.315-1.415-5.472-2.53-10.562-3.969 4.577 1.751 6.09 3.56 10.031 5 1.627.594 3.37.956
+5.094 1.156-.453.297-1.555.884-3.594 1.469-2.804.805-6.638 1.576-8.218 1.75-1.495.165-3.117-.317-3.407-.406.164.09.393.36-.218 1.062-.883 1.014-6.045 2.372-8.938 3.063-2.88.687-6.335.76-10.562-.438-3.835-1.086-5.172-2.072-9.062-3.125v.156c3.484 1.395 5.07 2.92 8.53 4.032 1.669.535 3.457.786 5.22.875-.52.352-1.5.914-3.313 1.53-2.765.942-6.59 1.936-8.156 2.157-.818.115-1.633.123-2.281.031-.655-.083-1.133-.218-1.25-.25.162.075.434.34-.188 1.094-.87 1.055-6.01 2.66-8.875 3.438-2.852.774-6.259.958-10.438-.094-4.206-1.06-5.356-2.042-10.344-3.156 4.485 1.46 5.97 3.135 9.813 4.25 1.585.46 3.287.638 4.969.687-.442.337-1.513 1.028-3.5 1.781-2.734 1.037-6.452 2.163-8 2.438-1.465.26-3.06-.117-3.344-.188.16.08.38.321-.219 1.063-.865 1.07-5.916 2.818-8.75 3.687-2.82.866-6.207 1.157-10.344.22-3.753-.852-5.048-1.717-8.875-2.595v.157c3.428 1.237 4.987 2.632 8.375 3.53 1.632.434 3.367.584
+5.094.563-.51.384-1.477 1.022-3.25 1.75-2.706 1.112-6.436 2.308-7.969 2.625-.8.166-1.612.219-2.25.157v1.406c.227-.145.449-.273.719-.375 1.08-.41 2.171-.216 6-1.688 3.828-1.471 5.224-2.005 5.906-2.406.68-.4 1.612-.88 2.219-1.531 1.827-.138 3.57-.493 4.937-1 2.968-1.1 4.876-1.806 6.782-2.469 1.905-.663 2.354-1.415 3.406-1.781 1.091-.38 2.195-.166 6.062-1.531 3.868-1.366 5.283-1.827 5.969-2.22.701-.4 1.7-.932 2.313-1.593 1.97-.055 3.816-.385 5.28-.875 3.002-1.005 4.927-1.622 6.845-2.25 1.538-.504 2.174-1.047 2.906-1.437.23-.135.475-.254.75-.344 1.098-.36 2.181-.082 6.094-1.313 3.912-1.23 5.366-1.673 6.062-2.03.694-.358 1.63-.794 2.25-1.407 1.865-.023 3.636-.267 5.031-.688 3.03-.913 4.993-1.43 6.938-1.968 1.945-.54 2.426-1.265 3.5-1.563 1.114-.31 2.22.007 6.187-1.031 3.968-1.039 5.418-1.433 6.125-1.75.735-.33 1.814-.754 2.438-1.375 1.997.116 3.857-.02 5.344-.375 3.078-.735 5.083-1.101
+7.062-1.5 1.588-.32 2.244-.79 3-1.094.238-.107.467-.193.75-.25 1.134-.23 2.305.209 6.344-.5s5.5-.927 6.219-1.187c.715-.26 1.704-.568 2.343-1.094 1.925.24 3.748.224 5.188 0 3.126-.488 5.155-.7 7.156-.969 2.002-.268 2.489-.945 3.594-1.094 1.146-.154 2.276.302 6.344-.219 4.068-.52 5.56-.695 6.28-.937.738-.247 1.799-.586 2.438-1.125 2.05.335 3.974.398 5.5.219 3.143-.37 5.18-.56 7.188-.782 1.61-.178 2.265-.608 3.031-.843a3.43 3.43 0 0 1 .781-.188c1.15-.128 2.302.347 6.375-.125s5.56-.61 6.282-.844c.719-.232 1.7-.473 2.343-.968 1.937.333 3.77.404 5.22.25 3.145-.335 5.177-.519 7.187-.719 2.01-.2 2.484-.826 3.593-.938 1.152-.115 2.297.366 6.375-.062s5.59-.562 6.313-.781c.74-.224 1.796-.514 2.437-1.031 2.058.398 4.002.493 5.532.343 3.148-.308 5.175-.473 7.187-.656 1.614-.147 2.263-.56 3.031-.781.242-.081.494-.13.782-.156 1.152-.106 2.293.392 6.375 0 4.082-.393 5.589-.531 6.312-.75.721-.219
+1.7-.448 2.344-.938 1.938.35 3.769.454 5.219.313 3.148-.309 5.175-.474 7.187-.657 2.012-.183 2.514-.838 3.625-.937 1.152-.103 2.292.385 6.375 0s5.589-.501 6.313-.719c.739-.222 1.795-.514 2.437-1.031 2.057.402 4.003.503 5.531.344 3.147-.329 5.178-.523 7.188-.72 1.613-.156 2.266-.63 3.031-.874.24-.088.463-.122.75-.156 1.148-.14 2.317.34 6.375-.25 4.058-.59 5.562-.778 6.281-1.032.717-.253 1.675-.558 2.313-1.093 1.92.211 3.72.151 5.156-.094 3.12-.533 5.112-.929 7.094-1.313 1.982-.384 2.474-1.04 3.562-1.28 1.13-.252 2.27.115 6.25-.876s5.43-1.42 6.125-1.781c.723-.376 1.762-.87 2.375-1.531 1.963-.012 3.794-.291 5.22-.844 2.95-1.145 4.872-1.87 6.687-2.75 1.455-.707 2.334-1.686 2.547-1.984.212-.298.111-.746.137-.767.043-.035.32-.085.48-.429.858-1.847 2.32-5.644
+2.435-6.329.113-.682.163-1.348.214-1.745.03-.23-.147-.865-.125-.924.031-.082.305-.265.36-.515.267-1.198.09-2.191-.125-3.609-.214-1.417-.983-4.622-1.637-5.476-.659-.862-1.223-1.011-1.748-1-.208.27.137.262.163.312.68.05.934.369 1.42.897s1.442 3.94 1.579 5.39.19 2.86-.088 3.468c-.278.609-.944.429-1.237.495.531.186.89.213.953 1.057.058.814-.134 1.64-.52 2.806-.391 1.18-1.845 4.35-2.286 4.599-.452.255-.952.182-1.288.05z" enable-background="new" filter="url(#gs)"/>
+ <path d="m988.75-263.84c1.912 0.634 4.55 1.758 6.125 2.813 1.575 1.054 2.896 1.482 5.531 3.375 2.609 1.873 5.027 3.015 7.313 4.062 2.47 1.132 5.752 2.155 9.531 3.938-1.207-1.259-7.139-3.365-9.031-4.188s-4.128-1.93-6.938-3.781-3.622-2.482-6-3.719c-2.377-1.237-4.08-1.95-6.53-2.5z" enable-background="new" filter="url(#gr)"/>
+ <path d="M957.5-260.78c1.91.618 4.583 1.71 6.156 2.75 1.574 1.04 2.896 1.482 5.531 3.375 2.609 1.873 5.027 3.015 7.313 4.063 2.47 1.131 5.752 2.154 9.531 3.937-1.207-1.258-7.201-3.396-9.094-4.219-1.892-.823-4.096-1.93-6.906-3.781-2.81-1.85-3.593-2.44-5.969-3.656s-4.113-1.939-6.562-2.469z" enable-background="new" filter="url(#gq)"/>
+ <path d="M926.09-257.38c1.908.597 4.553 1.664 6.125 2.688 1.571 1.023 2.87 1.44 5.5 3.28 2.603 1.823 5.029 2.973 7.313 4 2.467 1.111 5.755 2.094 9.53 3.845-1.205-1.249-7.171-3.319-9.062-4.125s-4.102-1.891-6.906-3.688c-2.804-1.796-3.627-2.402-6-3.594-2.373-1.191-4.054-1.903-6.5-2.406z" enable-background="new" filter="url(#gp)"/>
+ <path d="M894.91-253.56c1.902.554 4.587 1.589 6.156 2.594s2.874 1.408 5.5 3.219c2.6 1.791 5 2.871 7.281 3.875 2.465 1.083 5.76 2.04 9.532 3.75-1.205-1.236-7.175-3.245-9.063-4.032-1.888-.786-4.075-1.83-6.875-3.593s-3.6-2.369-5.969-3.532c-2.37-1.163-4.123-1.834-6.562-2.28z" enable-background="new" filter="url(#go)"/>
+ <path d="M863.72-248.66c1.88.43 4.504 1.38 6.063 2.313 1.558.932 2.852 1.257 5.468 3 2.59 1.724 4.981 2.708 7.25 3.625 2.452.99 5.74 1.877 9.5 3.5-1.201-1.208-7.152-3.067-9.03-3.782-1.88-.715-4.086-1.684-6.876-3.375s-3.585-2.228-5.937-3.28-4.026-1.713-6.438-2z" enable-background="new" filter="url(#gn)"/>
+ <path d="m833.16-241.38c1.848 0.296 4.47 0.976 6 1.781s2.814 1.056 5.375 2.531c2.535 1.46 4.89 2.326 7.125 3.063 2.414 0.797 5.657 1.467 9.375 2.844-1.188-1.129-7.088-2.59-8.938-3.156-1.85-0.567-4.003-1.374-6.75-2.844-2.746-1.47-3.5-1.92-5.812-2.781-2.311-0.861-4.005-1.32-6.375-1.438z" enable-background="new" filter="url(#gm)"/>
+ <path d="m802.91-232.31c1.822 0.211 4.366 0.8 5.875 1.531 1.51 0.73 2.756 0.93 5.281 2.281 2.5 1.338 4.832 2.049 7.031 2.657 2.377 0.656 5.565 1.073 9.22 2.187-1.168-1.045-6.93-2.103-8.75-2.562-1.822-0.46-3.953-1.127-6.657-2.438s-3.471-1.72-5.75-2.469-3.913-1.179-6.25-1.187z" enable-background="new" filter="url(#gl)"/>
+ <path d="M773.19-222.19c1.811.179 4.32.665 5.813 1.344 1.491.678 2.753.798 5.25 2.062 2.47 1.252 4.79 1.896 6.968 2.438 2.354.585 5.492.897 9.094 1.844-1.15-.992-6.852-1.784-8.656-2.188s-3.916-1.021-6.594-2.25c-2.678-1.229-3.403-1.61-5.656-2.281-2.253-.67-3.896-1.002-6.219-.969z" enable-background="new" filter="url(#gk)"/>
+ <path d="M743.56-211.19c1.793.13 4.273.55 5.75 1.188s2.716.741 5.188 1.937c2.446 1.184 4.72 1.747 6.874 2.219 2.328.51 5.42.68 9 1.562-1.143-.97-6.747-1.59-8.53-1.937-1.784-.347-3.884-.888-6.532-2.031-2.648-1.144-3.395-1.517-5.625-2.125-2.23-.61-3.826-.91-6.125-.813z" enable-background="new" filter="url(#gj)"/>
+ <g fill="#fff" filter="url(#gi)">
+ <path d="M744.94-212.12s7.222-3.223 9.063-3.5 3.352-.003 6 .563c2.647.565 8.735 2.215 11.188 3.374s5.312 3.563 5.312 3.563-7.146-2.78-10.188-3.563-7.645-2.083-10.375-2.312-11 1.875-11 1.875z"/>
+ <path d="m735.47-206.95s3.66-2.223 5.5-2.5 3.665 0.247 6.313 0.813 8.735 2.215 11.188 3.375 6.562 2.125 6.562 2.125-8.396-1.343-11.438-2.125-7.957-2.334-10.688-2.563-7.438 0.875-7.438 0.875zm24.38-10.66s8.544-3.299 10.398-3.458c1.854-0.16 3.642 0.48 6.248 1.212s8.577 2.766 10.95 4.08 6.414 2.537 6.414 2.537-8.294-1.873-11.279-2.848c-2.985-0.974-7.792-2.834-10.503-3.236s-12.228 1.713-12.228 1.713zm15.35-5.62s7.771-2.782 9.628-2.904c1.857-0.12 3.631 0.555 6.222 1.341 2.59 0.787 8.519 2.942 10.864 4.304 2.346 1.362 6.36 2.67 6.36 2.67s-8.253-2.045-11.217-3.08c-2.965-1.035-7.733-2.995-10.434-3.452-2.702-0.458-11.422 1.121-11.422 1.121zm14.44-4.72s8.683-3.52 10.542-3.605 3.62 0.624 6.195 1.46c2.575 0.837 8.46 3.107 10.779 4.514 2.318 1.408 6.307 2.793 6.307 2.793s-8.212-2.204-11.156-3.297-7.673-3.144-10.365-3.654-12.3 1.789-12.3 1.789zm14.86-5.38s7.808-2.583 9.666-2.668c1.86-0.085 3.62
+0.625 6.195 1.461 2.575 0.837 8.46 3.107 10.78 4.514 2.318 1.407 6.307 2.792 6.307 2.792s-8.213-2.204-11.156-3.296-7.673-3.144-10.365-3.654-11.426 0.85-11.426 0.85zm15.06-4.25s8.558-2.583 10.417-2.668 3.62 0.625 6.195 1.461c2.575 0.837 8.46 3.107 10.779 4.514 2.318 1.407 6.307 2.792 6.307 2.792s-8.212-2.204-11.156-3.296-7.673-3.144-10.365-3.654-12.176 0.85-12.176 0.85zm16.67-5.02s6.967-1.987 8.828-1.968c1.86 0.02 3.579 0.827 6.102 1.807 2.524 0.98 8.272 3.578 10.508 5.113 2.236 1.536 6.14 3.143 6.14 3.143s-8.075-2.662-10.952-3.919c-2.878-1.256-7.484-3.57-10.143-4.231-2.66-0.66-10.482 0.055-10.482 0.055zm14.5-3.4s7.688-2.028 9.548-1.968 3.56 0.902 6.063 1.936c2.502 1.033 8.194 3.752 10.397 5.335 2.203 1.582 6.072 3.272 6.072 3.272s-8.017-2.833-10.868-4.15c-2.85-1.318-7.407-3.73-10.05-4.446s-11.162 0.021-11.162 0.021zm14.09-3.21s8.17-1.97 10.027-1.854c1.857 0.115 3.532 1.01 6.002
+2.118s8.077 3.997 10.23 5.645 5.972 3.454 5.972 3.454-7.928-3.074-10.738-4.476-7.291-3.95-9.913-4.746c-2.621-0.796-11.58-0.141-11.58-0.141zm16.56-2.39s8.085-1.908 9.938-1.737c1.853 0.172 3.5 1.117 5.935 2.3 2.436 1.182 7.952 4.24 10.055 5.953s5.864 3.633 5.864 3.633-7.832-3.312-10.597-4.8-7.168-4.169-9.764-5.044c-2.597-0.876-11.431-0.305-11.431-0.305zm15.2-2.75s7.642-1.428 9.495-1.265c1.854 0.162 3.505 1.1 5.946 2.27s7.973 4.203 10.084 5.905c2.112 1.703 5.881 3.605 5.881 3.605s-7.847-3.274-10.62-4.748c-2.772-1.474-7.187-4.135-9.788-4.998-2.6-0.863-10.998-0.77-10.998-0.77zm14.87-1.64s8.642-1.553 10.495-1.39c1.854 0.162 3.505 1.1 5.946 2.27s7.972 4.203 10.084 5.905c2.111 1.703 5.88 3.605 5.88 3.605s-7.846-3.274-10.62-4.748c-2.772-1.474-7.187-4.135-9.787-4.998-2.601-0.863-11.998-0.644-11.998-0.644zm16.25-2.31s7.642-0.865 9.495-0.703c1.854 0.163 3.505 1.1 5.946 2.27s7.973 4.203 10.084
+5.906c2.112 1.702 5.881 3.605 5.881 3.605s-7.847-3.275-10.62-4.749c-2.772-1.474-7.187-4.135-9.788-4.998-2.6-0.862-10.998-1.331-10.998-1.331zm15.13-1.19s8.58-1.49 10.433-1.328c1.854 0.163 3.505 1.1 5.946 2.27s7.972 4.203 10.084 5.906c2.111 1.702 5.88 3.605 5.88 3.605s-7.846-3.275-10.62-4.749c-2.772-1.474-7.187-4.135-9.787-4.998-2.601-0.862-11.935-0.706-11.935-0.706zm16.25-2.06s7.83-0.803 9.683-0.64c1.854 0.162 3.505 1.1 5.946 2.27s7.972 4.203 10.084 5.905c2.111 1.703 5.88 3.605 5.88 3.605s-7.846-3.274-10.62-4.748c-2.772-1.474-7.187-4.135-9.787-4.998-2.601-0.863-11.185-1.394-11.185-1.394zm15.37-1.25s8.392-1.178 10.245-1.015c1.854 0.162 3.505 1.1 5.946 2.27s7.972 4.203 10.084 5.905c2.111 1.703 5.88 3.605 5.88 3.605s-7.847-3.274-10.62-4.748c-2.772-1.474-7.187-4.135-9.788-4.998-2.6-0.863-11.748-1.02-11.748-1.02zm16.19-2.06s6.892-0.99 8.745-0.828c1.854 0.163 3.505 1.1 5.946 2.27s7.973 4.203
+10.084 5.906c2.112 1.702 5.881 3.605 5.881 3.605s-7.847-3.275-10.62-4.749-7.188-4.135-9.788-4.998c-2.6-0.862-10.248-1.206-10.248-1.206zm17.16-0.94s6.83-1.178 8.683-1.015c1.854 0.162 3.505 1.1 5.946 2.27 2.44 1.171 7.972 4.203 10.084 5.905 2.111 1.703 5.88 3.605 5.88 3.605s-7.847-3.274-10.62-4.748c-2.772-1.474-7.187-4.135-9.788-4.998-2.6-0.863-10.185-1.02-10.185-1.02zm16.1-2s6.08-0.428 7.933-0.265c1.854 0.162 3.505 1.1 5.946 2.27 2.44 1.171 7.972 4.203 10.084 5.905 2.111 1.703 5.88 3.605 5.88 3.605s-7.847-3.274-10.62-4.748c-2.772-1.474-7.187-4.135-9.788-4.998-2.6-0.863-9.435-1.77-9.435-1.77zm15.8-1.37s6.454-0.678 8.308-0.515c1.854 0.162 3.505 1.1 5.946 2.27 2.44 1.171 7.972 4.203 10.084 5.905 2.111 1.703 5.88 3.605 5.88 3.605s-7.847-3.274-10.62-4.748c-2.772-1.474-7.187-4.135-9.788-4.998-2.6-0.863-9.81-1.52-9.81-1.52zm15.6-1.86s5.498-0.91 7.358-0.853c1.86 0.056 3.562 0.896 6.066 1.925
+2.504 1.03 8.2 3.739 10.406 5.318 2.205 1.578 6.078 3.261 6.078 3.261s-8.022-2.819-10.875-4.131c-2.853-1.313-7.413-3.716-10.06-4.429-2.645-0.712-8.973-1.091-8.973-1.091zm17.4-2.46s4.547-1.156 6.408-1.186c1.86-0.03 3.6 0.73 6.149 1.642 2.55 0.912 8.365 3.354 10.64 4.829 2.277 1.474 6.224 2.976 6.224 2.976s-8.145-2.444-11.055-3.623c-2.91-1.178-7.578-3.368-10.253-3.957-2.676-0.588-8.113-0.68-8.113-0.68zm14.5-3.03s5.96-1.774 7.82-1.83c1.86-0.057 3.61 0.68 6.172 1.555 2.562 0.876 2.522 0.857 5.333 1.49 2.797 0.63 7.077 1.513 7.077 1.513s-3.616-0.016-6.792-0.466c-3.116-0.441-7.375-1.698-10.058-2.249-2.684-0.55-9.552-0.013-9.552-0.013z" enable-background="new"/>
+ <path d="M1099.2-279.93c.161.269 11.208-4.6 12.188-4.688.98-.087 2 3.125 2 3.125s-.775-1.504-2.875-1.062-11.301 2.671-11.312 2.625z"/>
+ </g>
+ <path d="M1107.5-284.09c-.419.213-.156.094-.647.306-.486.21-1.724.574-4.08 1.459-3.33 1.25-5.83 2.153-7.026 3.066-1.536.021-3.72.233-5.656.719a227.709 227.709 0 0 1-6.75 1.593c-1.895.42-1.676.643-2.875.875-1.297.252-1.721-.009-5.438.782-3.49.742-8.894 1.93-10.156 2.687-1.583-.18-3.867-.322-5.843-.031-3.04.447-4.917.673-6.844.906-.655.08-1.041.201-1.344.282-.426.131-.686.26-1.375.343-1.311.16-1.762-.157-5.531.282-3.554.413-9.005 1.272-10.25 1.937-1.599-.297-3.858-.534-5.844-.344-3.059.294-4.972.484-6.906.657-1.934.172-1.689.422-2.906.53-1.317.118-1.76-.163-5.532.25-3.541.39-9.007 1.21-10.28 1.876-1.6-.295-3.888-.507-5.876-.313-3.058.3-4.94.48-6.875.657-.657.06-1.04.178-1.343.25-.428.118-.684.218-1.375.28-1.316.121-1.76-.194-5.532.22-3.556.39-9.005 1.239-10.25
+1.906-1.598-.294-3.86-.524-5.843-.313-3.056.326-4.974.526-6.907.719-1.932.192-1.69.44-2.906.562-1.315.132-1.763-.164-5.53.282-3.54.418-8.979 1.292-10.25 1.969-1.599-.282-3.86-.42-5.845-.188-3.052.358-4.945.568-6.875.781-.656.073-1.04.173-1.344.25-.426.127-.684.267-1.375.344-1.313.146-1.767-.174-5.53.313-3.55.458-8.98 1.419-10.22 2.125-1.593-.245-3.834-.382-5.812-.125-3.048.394-4.95.648-6.875.906-1.925.258-1.726.493-2.938.656-1.31.176-1.747-.104-5.5.469-3.524.538-8.923 1.699-10.188 2.437-1.587-.203-3.845-.254-5.812.094-3.026.536-4.9.862-6.813 1.187-.65.111-1.013.271-1.312.375-.42.165-.664.332-1.344.47-1.295.26-1.727-.007-5.438.812-3.498.772-8.846 2.383-10.062 3.219-1.562-.078-3.757.085-5.687.593-2.972.783-4.818 1.232-6.688 1.75s-1.666.768-2.843 1.094c-1.273.353-1.697.107-5.344 1.188-3.425 1.014-8.65 2.933-9.875 3.843-1.539.013-3.72.273-5.625.875-2.931.928-4.75 1.459-6.594
+2.063-.627.205-.992.392-1.281.531-.408.214-.653.409-1.313.625-1.254.412-1.686.19-5.28 1.438-3.39 1.177-8.596 3.213-9.782 4.156-1.524.06-3.65.395-5.531 1.062-2.898 1.029-4.7 1.676-6.531 2.313-1.833.637-1.628.848-2.782 1.25-1.246.434-1.663.2-5.218 1.562-3.34 1.28-8.488 3.483-9.688 4.47-1.507.107-3.636.498-5.5 1.218a1044.752 1044.752 0 0 1-6.437 2.469c-.617.233-.997.442-1.282.593v1.094c.112-.222.386-.817.907-1.094.698-.37 4.813-1.993 6.812-2.718 1.657-.602 4.154-1.329 5.969-1.313.302.003.588.051.844.094 1.842.308 7.468 1.562 7.468 1.562s-6.233-1.646-7.03-1.843c-.191-.048-.536-.07-.97-.063 1.146-.87 4.762-2.393 7.344-3.437 2.839-1.148 3.117-1.252 5.063-1.657 2.008-.417 3.156-.5 3.156-.5s-.082-.6.969-1.125c.705-.351 4.887-1.892 6.906-2.562 1.952-.648 5.057-1.359 6.875-1 1.863.367 7.531 1.812 7.531 1.812s-6.287-1.87-7.094-2.093c-.193-.054-.53-.086-.968-.094 1.158-.833 4.794-2.195 7.406-3.156
+2.87-1.056 3.167-1.162 5.125-1.532 1.853-.35 2.859-.425 3.031-.437.114-.217.377-.81.906-1.063.71-.338 4.926-1.712 6.97-2.312 1.692-.497 4.24-1.037 6.093-.906.308.021.613.097.875.156 1.881.424 7.594 2.031 7.594 2.031s-6.342-2.065-7.157-2.312c-.194-.06-.557-.104-1-.125 1.17-.798 4.863-2.057 7.5-2.938 2.898-.968 3.233-1.003 5.22-1.281 2.049-.287 3.187-.313 3.187-.313s-.073-.607 1-1.062c.72-.306 4.99-1.5 7.062-2 2.003-.483 5.199-.928 7.063-.406 1.91.535 7.719 2.5 7.719 2.5s-6.423-2.424-7.25-2.72c-.198-.07-.583-.14-1.032-.187 1.188-.728 4.916-1.774 7.594-2.5 2.944-.797 3.292-.77 5.313-.906 1.913-.128 2.947-.07 3.125-.062.117-.204.391-.78.937-.97.732-.253 5.079-1.047 7.188-1.374 1.748-.271 4.4-.485 6.312-.094.318.065.605.186.875.281 1.94.69 7.844 3.094 7.844 3.094s-6.535-2.95-7.375-3.312c-.201-.087-.575-.167-1.031-.25 1.206-.633 5.03-1.396 7.75-1.906 2.99-.562 3.3-.53 5.344-.532 2.109-.002
+3.312.125 3.312.125s-.073-.63 1.031-.937c.74-.206 5.126-.834 7.25-1.063 2.053-.22 5.319-.252 7.22.47 1.947.738 7.843 3.374 7.843 3.374s-6.563-3.179-7.406-3.562c-.202-.092-.543-.187-1-.282 1.21-.602 4.984-1.248 7.718-1.656 3.005-.448 3.326-.452 5.375-.406 1.94.043 3.007.194 3.188.219.119-.194.384-.766.937-.907.743-.188 5.155-.734 7.282-.937 1.763-.169 4.42-.234 6.343.25.32.08.604.203.875.312 1.953.784 7.907 3.47 7.907 3.47s-6.592-3.254-7.438-3.657c-.202-.096-.572-.207-1.031-.313 1.214-.574 5.044-1.122 7.781-1.5 3.009-.415 3.323-.442 5.375-.375 2.118.07 3.313.25 3.313.25s-.078-.637 1.03-.906c.745-.18 5.153-.663 7.282-.844 2.059-.174 5.343-.124 7.25.657 1.955.8 7.875 3.53 7.875 3.53s-6.56-3.308-7.406-3.718c-.202-.098-.572-.203-1.031-.312 1.215-.564 5.01-1.115 7.75-1.47 3.01-.389 3.321-.397 5.375-.312 1.944.08 3.006.254 3.187.282.12-.191.383-.746.938-.875.744-.174 5.15-.65 7.28-.813
+1.767-.134 4.45-.126 6.376.375.32.083.603.201.875.313 1.954.8 7.906 3.562 7.906 3.562s-6.591-3.34-7.437-3.75c-.203-.098-.572-.203-1.032-.312 1.215-.564 5.042-1.084 7.782-1.438 3.01-.39 3.352-.429 5.406-.344 2.12.088 3.312.313 3.312.313s-.078-.65 1.032-.906c.744-.173 5.15-.624 7.28-.782 2.061-.152 5.344-.096 7.25.688 1.956.804 7.876 3.5 7.876 3.5s-6.56-3.276-7.406-3.688c-.203-.098-.572-.202-1.032-.312 1.216-.562 5.012-1.128 7.75-1.5 3.01-.41 3.323-.416 5.375-.344 1.943.068 3.008.165 3.188.188.119-.195.384-.73.937-.875.742-.197 5.131-.83 7.25-1.094 1.757-.22 4.406-.333 6.313.031.317.06.606.19.875.281 1.936.661 7.844 2.938 7.844 2.938s-6.537-2.807-7.375-3.156c-.2-.084-.577-.174-1.032-.25 1.204-.651 5.02-1.372 7.72-2 2.966-.69 3.288-.756 5.312-.875 2.088-.124 3.28-.032 3.28-.032s-.086-.632 1-1.03c.73-.269 5.048-1.339 7.126-1.813 2.008-.46 5.168-1.03 7-.625 1.878.414 13.578 3.015 13.578
+3.015s-12.328-3.022-13.141-3.265c-.195-.058-.559-.107-1-.125 1.167-.804 3.514-1.688 6.11-2.703 1.68-.659.923-.377 2.775-1.004 1.754-.594 2.486-1.01 2.63-1.113.347-.207-.355-.122-.544-.042z" enable-background="new" filter="url(#gh)" opacity=".25"/>
+ <path d="m1082.6-275.12c1.873 0.393 4.496 1.146 6.031 1.969s2.822 1.056 5.375 2.5c2.527 1.43 4.796 2.007 6.969 2.531 2.348 0.566 5.435 0.715 8.844 1.188-1.09-0.84-6.608-1.173-8.406-1.563-1.8-0.39-3.895-1.016-6.594-2.313-2.7-1.296-3.495-1.799-5.813-2.687-2.318-0.889-4.004-1.383-6.406-1.625z" enable-background="new" filter="url(#gg)" opacity=".25"/>
+ <path d="M1051.5-270c1.905.578 4.528 1.616 6.094 2.594 1.565.978 2.88 1.36 5.5 3.125 2.593 1.747 4.986 2.71 7.25 3.594 2.446.955 5.682 1.657 9.406 3.062-1.19-1.138-7.063-2.687-8.938-3.375-1.874-.688-4.081-1.566-6.874-3.281-2.794-1.715-3.574-2.284-5.938-3.406-2.364-1.123-4.057-1.835-6.5-2.313z" enable-background="new" filter="url(#gf)" opacity=".25"/>
+ <path d="m1020.2-266.84c1.912 0.638 4.581 1.755 6.156 2.813 1.575 1.057 2.896 1.508 5.531 3.406 2.61 1.878 5.029 3.03 7.313 4.062 2.468 1.116 5.764 2.174 9.531 3.844-1.203-1.222-7.203-3.314-9.094-4.125-1.89-0.81-4.064-1.894-6.874-3.75s-3.622-2.477-6-3.719c-2.379-1.242-4.111-1.975-6.563-2.531z" enable-background="new" filter="url(#ge)" opacity=".25"/>
+ <path d="M1110.2-266.89c.15.049.688.631.11 1.484-.81 1.195-5.705 3.325-8.563 4.125-2.845.798-6.29.978-10.562-.375-4.302-1.362-5.47-2.468-10.656-4.312 4.664 2.115 6.195 3.952 10.125 5.344 1.62.574 3.367.94 5.062 1.03-.445.327-1.53.984-3.562 1.595-2.796.84-6.65 1.534-8.25 1.625-1.515.086-3.142-.513-3.438-.625.167.103.374.377-.25 1.03-.899.945-6.147 1.924-9.125 2.25-2.964.326-6.521-.015-10.906-1.905-3.978-1.715-5.339-2.916-9.406-4.75v.156c3.643 2.095 5.284 3.883 8.875 5.562 1.73.81 3.592 1.41 5.406 1.72-.534.286-1.557.71-3.437 1.03-2.87.488-6.81.817-8.438.75-.85-.034-1.728-.184-2.406-.406-.685-.215-1.19-.444-1.312-.5.169.107.43.403-.22 1.031-.909.88-6.245 1.337-9.25 1.47-2.99.131-6.588-.451-11-2.563-4.44-2.127-5.64-3.402-10.905-5.782 4.734 2.597 6.286 4.63 10.344 6.72 1.673.861 3.485 1.493 5.25
+1.937-.463.233-1.59.688-3.688.937-2.886.343-6.834.493-8.468.375-1.547-.111-3.232-.857-3.532-1 .17.12.414.41-.218 1-.913.851-6.244 1.262-9.25 1.375-2.993.113-6.59-.49-11-2.594-4.002-1.908-5.388-3.137-9.47-5.093v.156c3.656 2.204 5.295 4.053 8.907 5.906 1.74.893 3.637 1.528 5.469 1.969-.54.248-1.578.615-3.469.844-2.886.348-6.866.52-8.5.406a9.446 9.446 0 0 1-2.406-.5 12.532 12.532 0 0 1-1.313-.531c.17.112.465.422-.187 1.03-.913.853-6.275 1.294-9.281 1.407-2.993.112-6.594-.528-11-2.594-4.437-2.08-5.647-3.331-10.906-5.656 4.729 2.548 6.29 4.578 10.344 6.625 1.671.844 3.485 1.467 5.25 1.906-.464.235-1.59.684-3.688.938-2.886.348-6.836.57-8.469.469-1.544-.096-3.2-.83-3.5-.97.17.12.382.405-.25 1-.912.861-6.246 1.331-9.25 1.47-2.99.138-6.567-.451-10.969-2.47-3.993-1.83-5.365-3.028-9.437-4.905v.156c3.647 2.133 5.27 3.935 8.875 5.719 1.737.86 3.607 1.45 5.437
+1.875-.54.253-1.55.64-3.437.906-2.88.404-6.838.646-8.469.562a9.36 9.36 0 0 1-2.406-.437 12.971 12.971 0 0 1-1.313-.5c.17.109.432.41-.218 1.031-.911.87-6.25 1.392-9.25 1.563-2.987.17-6.574-.316-10.97-2.282-4.424-1.978-5.605-3.228-10.843-5.375 4.71 2.388 6.27 4.39 10.312 6.344a23.73 23.73 0 0 0 5.218 1.781c-.461.25-1.597.713-3.687 1.032-2.876.438-6.78.733-8.406.687-1.539-.043-3.233-.745-3.532-.875.169.113.411.414-.218 1.031-.908.891-6.203 1.529-9.188 1.813-2.971.283-6.573-.176-10.938-1.938-3.96-1.598-5.329-2.795-9.344-4.312v.156c3.596 1.811 5.239 3.582 8.813 5.156 1.722.759 3.587 1.29 5.406 1.625-.536.28-1.566.688-3.437 1.063-2.856.572-6.79 1.02-8.407 1.031-.844.006-1.706-.08-2.375-.25-.676-.162-1.16-.33-1.28-.375.166.094.422.383-.22 1.062-.897.951-6.186 1.918-9.125 2.438-2.925.518-6.432.374-10.719-1.031-4.315-1.415-5.472-2.53-10.562-3.969 4.577 1.751 6.09 3.56 10.031 5 1.627.594 3.37.956
+5.094 1.156-.453.297-1.555.884-3.594 1.469-2.804.805-6.638 1.576-8.218 1.75-1.495.165-3.117-.317-3.407-.406.164.09.393.36-.218 1.062-.883 1.014-6.045 2.372-8.938 3.063-2.88.687-6.335.76-10.562-.438-3.835-1.086-5.172-2.072-9.062-3.125v.156c3.484 1.395 5.07 2.92 8.53 4.032 1.669.535 3.457.786 5.22.875-.52.352-1.5.914-3.313 1.53-2.765.942-6.59 1.936-8.156 2.157-.818.115-1.633.123-2.281.031-.655-.083-1.133-.218-1.25-.25.162.075.434.34-.188 1.094-.87 1.055-6.01 2.66-8.875 3.438-2.852.774-6.259.958-10.438-.094-4.206-1.06-5.356-2.042-10.344-3.156 4.485 1.46 5.97 3.135 9.813 4.25 1.585.46 3.287.638 4.969.687-.442.337-1.513 1.028-3.5 1.781-2.734 1.037-6.452 2.163-8 2.438-1.465.26-3.06-.117-3.344-.188.16.08.38.321-.219 1.063-.865 1.07-5.916 2.818-8.75 3.687-2.82.866-6.207 1.157-10.344.22-3.753-.852-5.048-1.717-8.875-2.595v.157c3.428 1.237 4.987 2.632 8.375 3.53 1.632.434 3.367.584
+5.094.563-.51.384-1.477 1.022-3.25 1.75-2.706 1.112-6.436 2.308-7.969 2.625-.8.166-1.612.219-2.25.157v1.406c.227-.145.449-.273.719-.375 1.08-.41 2.171-.216 6-1.688 3.828-1.471 5.224-2.005 5.906-2.406.68-.4 1.612-.88 2.219-1.531 1.827-.138 3.57-.493 4.937-1 2.968-1.1 4.876-1.806 6.782-2.469 1.905-.663 2.354-1.415 3.406-1.781 1.091-.38 2.195-.166 6.062-1.531 3.868-1.366 5.283-1.827 5.969-2.22.701-.4 1.7-.932 2.313-1.593 1.97-.055 3.816-.385 5.28-.875 3.002-1.005 4.927-1.622 6.845-2.25 1.538-.504 2.174-1.047 2.906-1.437.23-.135.475-.254.75-.344 1.098-.36 2.181-.082 6.094-1.313 3.912-1.23 5.366-1.673 6.062-2.03.694-.358 1.63-.794 2.25-1.407 1.865-.023 3.636-.267 5.031-.688 3.03-.913 4.993-1.43 6.938-1.968 1.945-.54 2.426-1.265 3.5-1.563 1.114-.31 2.22.007 6.187-1.031 3.968-1.039 5.418-1.433 6.125-1.75.735-.33 1.814-.754 2.438-1.375 1.997.116 3.857-.02 5.344-.375 3.078-.735 5.083-1.101
+7.062-1.5 1.588-.32 2.244-.79 3-1.094.238-.107.467-.193.75-.25 1.134-.23 2.305.209 6.344-.5s5.5-.927 6.219-1.187c.715-.26 1.704-.568 2.343-1.094 1.925.24 3.748.224 5.188 0 3.126-.488 5.155-.7 7.156-.969 2.002-.268 2.489-.945 3.594-1.094 1.146-.154 2.276.302 6.344-.219 4.068-.52 5.56-.695 6.28-.937.738-.247 1.799-.586 2.438-1.125 2.05.335 3.974.398 5.5.219 3.143-.37 5.18-.56 7.188-.782 1.61-.178 2.265-.608 3.031-.843a3.43 3.43 0 0 1 .781-.188c1.15-.128 2.302.347 6.375-.125s5.56-.61 6.282-.844c.719-.232 1.7-.473 2.343-.968 1.937.333 3.77.404 5.22.25 3.145-.335 5.177-.519 7.187-.719 2.01-.2 2.484-.826 3.593-.938 1.152-.115 2.297.366 6.375-.062s5.59-.562 6.313-.781c.74-.224 1.796-.514 2.437-1.031 2.058.398 4.002.493 5.532.343 3.148-.308 5.175-.473 7.187-.656 1.614-.147 2.263-.56 3.031-.781.242-.081.494-.13.782-.156 1.152-.106 2.293.392 6.375 0 4.082-.393 5.589-.531 6.312-.75.721-.219
+1.7-.448 2.344-.938 1.938.35 3.769.454 5.219.313 3.148-.309 5.175-.474 7.187-.657 2.012-.183 2.514-.838 3.625-.937 1.152-.103 2.292.385 6.375 0s5.589-.501 6.313-.719c.739-.222 1.795-.514 2.437-1.031 2.057.402 4.003.503 5.531.344 3.147-.329 5.178-.523 7.188-.72 1.613-.156 2.266-.63 3.031-.874.24-.088.463-.122.75-.156 1.148-.14 2.317.34 6.375-.25 4.058-.59 5.562-.778 6.281-1.032.717-.253 1.675-.558 2.313-1.093 1.92.211 3.72.151 5.156-.094 3.12-.533 5.112-.929 7.094-1.313 1.982-.384 2.474-1.04 3.562-1.28 1.13-.252 2.27.115 6.25-.876s5.43-1.42 6.125-1.781c.723-.376 1.762-.87 2.375-1.531 1.963-.012 3.794-.291 5.22-.844 2.95-1.145 4.872-1.87 6.687-2.75 1.455-.707 2.334-1.686 2.547-1.984.212-.298.111-.746.137-.767.043-.035.32-.085.48-.429.858-1.847 2.32-5.644
+2.435-6.329.113-.682.163-1.348.214-1.745.03-.23-.147-.865-.125-.924.031-.082.305-.265.36-.515.267-1.198.09-2.191-.125-3.609-.214-1.417-.983-4.622-1.637-5.476-.659-.862-1.223-1.011-1.748-1-.208.27.137.262.163.312.68.05.934.369 1.42.897s1.221 3.85 1.358 5.301.19 2.86-.088 3.469c-.278.608-.723.517-1.016.583.531.186.67.125.732.969.058.813-.134 1.64-.52 2.806-.392 1.18-1.846 4.35-2.286 4.598-.452.256-.731.27-1.067.14z" enable-background="new" filter="url(#ff)" opacity=".25"/>
+ <path d="m988.75-263.84c1.912 0.634 4.55 1.758 6.125 2.813 1.575 1.054 2.896 1.482 5.531 3.375 2.609 1.873 5.027 3.015 7.313 4.062 2.47 1.132 5.752 2.155 9.531 3.938-1.207-1.259-7.139-3.365-9.031-4.188s-4.128-1.93-6.938-3.781-3.622-2.482-6-3.719c-2.377-1.237-4.08-1.95-6.53-2.5z" enable-background="new" filter="url(#fe)" opacity=".25"/>
+ <path d="M957.5-260.78c1.91.618 4.583 1.71 6.156 2.75 1.574 1.04 2.896 1.482 5.531 3.375 2.609 1.873 5.027 3.015 7.313 4.063 2.47 1.131 5.752 2.154 9.531 3.937-1.207-1.258-7.201-3.396-9.094-4.219-1.892-.823-4.096-1.93-6.906-3.781-2.81-1.85-3.593-2.44-5.969-3.656s-4.113-1.939-6.562-2.469z" enable-background="new" filter="url(#fd)" opacity=".25"/>
+ <path d="M926.09-257.38c1.908.597 4.553 1.664 6.125 2.688 1.571 1.023 2.87 1.44 5.5 3.28 2.603 1.823 5.029 2.973 7.313 4 2.467 1.111 5.755 2.094 9.53 3.845-1.205-1.249-7.171-3.319-9.062-4.125s-4.102-1.891-6.906-3.688c-2.804-1.796-3.627-2.402-6-3.594-2.373-1.191-4.054-1.903-6.5-2.406z" enable-background="new" filter="url(#fc)" opacity=".25"/>
+ <path d="M894.91-253.56c1.902.554 4.587 1.589 6.156 2.594s2.874 1.408 5.5 3.219c2.6 1.791 5 2.871 7.281 3.875 2.465 1.083 5.76 2.04 9.532 3.75-1.205-1.236-7.175-3.245-9.063-4.032-1.888-.786-4.075-1.83-6.875-3.593s-3.6-2.369-5.969-3.532c-2.37-1.163-4.123-1.834-6.562-2.28z" enable-background="new" filter="url(#fb)" opacity=".25"/>
+ <path d="M863.72-248.66c1.88.43 4.504 1.38 6.063 2.313 1.558.932 2.852 1.257 5.468 3 2.59 1.724 4.981 2.708 7.25 3.625 2.452.99 5.74 1.877 9.5 3.5-1.201-1.208-7.152-3.067-9.03-3.782-1.88-.715-4.086-1.684-6.876-3.375s-3.585-2.228-5.937-3.28-4.026-1.713-6.438-2z" enable-background="new" filter="url(#fa)" opacity=".25"/>
+ <path d="m833.16-241.38c1.848 0.296 4.47 0.976 6 1.781s2.814 1.056 5.375 2.531c2.535 1.46 4.89 2.326 7.125 3.063 2.414 0.797 5.657 1.467 9.375 2.844-1.188-1.129-7.088-2.59-8.938-3.156-1.85-0.567-4.003-1.374-6.75-2.844-2.746-1.47-3.5-1.92-5.812-2.781-2.311-0.861-4.005-1.32-6.375-1.438z" enable-background="new" filter="url(#ez)" opacity=".25"/>
+ <path d="m802.91-232.31c1.822 0.211 4.366 0.8 5.875 1.531 1.51 0.73 2.756 0.93 5.281 2.281 2.5 1.338 4.832 2.049 7.031 2.657 2.377 0.656 5.565 1.073 9.22 2.187-1.168-1.045-6.93-2.103-8.75-2.562-1.822-0.46-3.953-1.127-6.657-2.438s-3.471-1.72-5.75-2.469-3.913-1.179-6.25-1.187z" enable-background="new" filter="url(#ey)" opacity=".25"/>
+ <path d="M773.19-222.19c1.811.179 4.32.665 5.813 1.344 1.491.678 2.753.798 5.25 2.062 2.47 1.252 4.79 1.896 6.968 2.438 2.354.585 5.492.897 9.094 1.844-1.15-.992-6.852-1.784-8.656-2.188s-3.916-1.021-6.594-2.25c-2.678-1.229-3.403-1.61-5.656-2.281-2.253-.67-3.896-1.002-6.219-.969z" enable-background="new" filter="url(#ex)" opacity=".25"/>
+ <path d="M743.56-211.19c1.793.13 4.273.55 5.75 1.188s2.716.741 5.188 1.937c2.446 1.184 4.72 1.747 6.874 2.219 2.328.51 5.42.68 9 1.562-1.143-.97-6.747-1.59-8.53-1.937-1.784-.347-3.884-.888-6.532-2.031-2.648-1.144-3.395-1.517-5.625-2.125-2.23-.61-3.826-.91-6.125-.813z" enable-background="new" filter="url(#ew)" opacity=".25"/>
+ </g>
+ </g>
+ <path d="M3840.7 940.04c1.651-7.722 3.538-13.762 4.889-23.633.803-8.777 3.33-4.873 7.302-20.148 1.41-5.374 5.507.94 9.016-5.757 1.278-1.927 2.901-.97 4.508-.151 3.787 3.165 5.859 8.887 8.381 13.937 6.174 14.326 20.651 19.06 23.62 15.149 1.442-6.97 7.926-12.979 12.444-26.663.752-2.694 11.796-20.982 14.73-15.755" enable-background="new" fill="none" stroke="#000"/>
+ <path d="M3865.4 915.04c7.405-7.758 13.89-21.376 20.826-32.117 3.33-4.726 6.909 7.717 10.857 8.635 2.31-.523 3.734 2.886 5.714 3.939 5.186 3.162 2.412 9.274 10.032 15.452 6.191 4.128 8.958-16.313 14.985-17.573 4.906-1.207 8.145-.758 11.683-.606 3.95.333 4.102-8.393 6.096-12.725 2.997-6.731 7.196-4.438 10.203-11.376 1.023-3.323 1.965-7.224 2.75-12.257.887-4.8 3.057.734 4.825 3.03" enable-background="new" fill="none" stroke="#000"/>
+ <g transform="matrix(1.0057 0 0 2.3995 3249.4 125.01)" clip-path="url(#ev)" filter="url(#eu)">
+ <path d="M910.14 746.31l32.613 5.174-.361-23.876 7.188-29.682-8.45-5.264-21.823 26.511-9.167 27.137z" enable-background="accumulate" fill="#fff" fill-rule="evenodd"/>
+ <path d="M877.52 650.19h123.04v172.53H877.52z" enable-background="accumulate" fill="none"/>
+ </g>
+ <g transform="matrix(1.0057 0 0 2.3995 3249.4 125.01)" clip-path="url(#et)" filter="url(#es)">
+ <path d="M964 754.69l18.429 7.465 9.071-36.964-14.87 4.839L964 754.69z" enable-background="accumulate" fill="#fff" fill-rule="evenodd"/>
+ <path d="M924.9 677.06h142.13v125.16H924.9z" enable-background="accumulate" fill="none"/>
+ </g>
+ </g>
+ <path d="m592.8 398.62l2899.5-102.16" fill="#f8d615" fill-rule="evenodd" marker-end="url(#er)" stroke="#f8d615" stroke-width="17.844"/>
+ <path d="m576.48 779.92l2914.5 416.44" enable-background="new" fill="#f8d615" fill-rule="evenodd" marker-end="url(#eq)" stroke="#f8d615" stroke-width="18"/>
+ <g font-family="sans-serif" letter-spacing="0" word-spacing="0">
+ <text transform="translate(48.571 195.53)" x="80.219" y="107.387" fill="#f83615" font-size="40" style="line-height:125%">
+ <tspan x="80.219" y="107.387" font-size="50">CROP_DEFAULT</tspan>
+ </text>
+ <g font-size="45.314">
+ <text transform="matrix(.96106 0 0 1.0405 48.571 195.53)" x="3861.367" y="1281.72" enable-background="new" fill="#f80000" fill-opacity="0" style="line-height:125%">
+ <tspan x="3861.367" y="1281.72" font-size="56.642">COMPOSE_PADDED</tspan>
+ </text>
+ <text transform="matrix(.96106 0 0 1.0405 48.571 195.53)" x="3615.155" y="49.157" enable-background="new" fill="#f8d615" style="line-height:125%">
+ <tspan x="3615.155" y="49.157" font-size="50">COMPOSE_ACTIVE</tspan>
+ </text>
+ <text transform="matrix(.96106 0 0 1.0405 48.571 195.53)" x="2429.153" y="-3.166" enable-background="new" fill="#f83615" style="line-height:125%">
+ <tspan x="2429.153" y="-3.166" font-size="50">COMPOSE_DEFAULT</tspan>
+ </text>
+ <text transform="matrix(.96106 0 0 1.0405 48.571 195.53)" x="3681.545" y="1289.954" enable-background="new" fill="#f815bb" style="line-height:125%">
+ <tspan x="3681.545" y="1289.954" font-size="50">COMPOSE_PADDED</tspan>
+ </text>
+ </g>
+ <text transform="matrix(.96106 0 0 1.0405 48.571 195.53)" x="2438.062" y="1368.429" enable-background="new" font-size="50" style="line-height:125%">
+ <tspan x="2438.062" y="1368.429">COMPOSE_BONDS</tspan>
+ </text>
+ <g font-size="40">
+ <text transform="translate(48.571 195.53)" x="8.082" y="1438.896" enable-background="new" style="line-height:125%">
+ <tspan x="8.082" y="1438.896" font-size="50">CROP_BONDS</tspan>
+ </text>
+ <text transform="translate(48.571 195.53)" x="1455.443" y="-26.808" enable-background="new" style="line-height:125%">
+ <tspan x="1455.443" y="-26.808" font-size="50">overscan area</tspan>
+ </text>
+ <text transform="translate(48.571 195.53)" x="179.631" y="385.388" enable-background="new" fill="#f8d615" style="line-height:125%">
+ <tspan x="179.631" y="385.388" font-size="50">CROP_ACTIVE</tspan>
+ </text>
+ <text transform="translate(48.571 195.53)" x="636.674" y="-138.845" enable-background="new" style="line-height:125%">
+ <tspan x="636.674" y="-138.845" font-size="70" font-weight="bold">DATA SOURCE</tspan>
+ </text>
+ </g>
+ <text transform="matrix(.96106 0 0 1.0405 48.571 195.53)" x="3178.715" y="-129.061" enable-background="new" font-size="45.314" style="line-height:125%">
+ <tspan x="3178.715" y="-129.061" font-size="70" font-weight="bold">DATA SINK</tspan>
+ </text>
+ </g>
</svg>
diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst
index 8e73bb00c0d5..b1eea44550e1 100644
--- a/Documentation/media/uapi/v4l/subdev-formats.rst
+++ b/Documentation/media/uapi/v4l/subdev-formats.rst
@@ -1586,7 +1586,7 @@ JEIDA defined bit mapping will be named
.. raw:: latex
- \begin{adjustbox}{width=\columnwidth}
+ \tiny
.. _v4l2-mbus-pixelcode-rgb-lvds:
@@ -1784,7 +1784,7 @@ JEIDA defined bit mapping will be named
.. raw:: latex
- \end{adjustbox}\newline\newline
+ \normalsize
Bayer Formats
@@ -7321,11 +7321,14 @@ following information.
The following table lists existing HSV/HSL formats.
+
.. raw:: latex
- \newline\newline\begin{adjustbox}{width=\columnwidth}
+ \begingroup
+ \tiny
+ \setlength{\tabcolsep}{2pt}
-.. tabularcolumns:: |p{6.2cm}|p{1.6cm}|p{0.7cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|p{0.5cm}|
+.. tabularcolumns:: |p{3.0cm}|p{0.60cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
.. _v4l2-mbus-pixelcode-hsv:
@@ -7413,7 +7416,7 @@ The following table lists existing HSV/HSL formats.
.. raw:: latex
- \end{adjustbox}\newline\newline
+ \normalsize
JPEG Compressed Formats
@@ -7435,7 +7438,7 @@ The following table lists existing JPEG compressed formats.
.. _v4l2-mbus-pixelcode-jpeg:
-.. tabularcolumns:: |p{5.6cm}|p{1.2cm}|p{10.7cm}|
+.. tabularcolumns:: |p{5.4cm}|p{1.4cm}|p{10.7cm}|
.. flat-table:: JPEG Formats
:header-rows: 1
@@ -7468,7 +7471,7 @@ formats.
.. _v4l2-mbus-pixelcode-vendor-specific:
-.. tabularcolumns:: |p{6.6cm}|p{1.2cm}|p{9.7cm}|
+.. tabularcolumns:: |p{6.8cm}|p{1.4cm}|p{9.3cm}|
.. flat-table:: Vendor and device specific formats
:header-rows: 1
diff --git a/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg b/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg
index 1903dd3846c2..ee1df49f83e8 100644
--- a/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg
+++ b/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg
@@ -18,11 +18,11 @@
id="metadata100">
<rdf:RDF>
<cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title />
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
</cc:Work>
</rdf:RDF>
</metadata>
diff --git a/Documentation/media/uapi/v4l/subdev-image-processing-full.svg b/Documentation/media/uapi/v4l/subdev-image-processing-full.svg
index 91cf51832c12..c10d222b9ea9 100644
--- a/Documentation/media/uapi/v4l/subdev-image-processing-full.svg
+++ b/Documentation/media/uapi/v4l/subdev-image-processing-full.svg
@@ -18,11 +18,11 @@
id="metadata260">
<rdf:RDF>
<cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title />
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
</cc:Work>
</rdf:RDF>
</metadata>
diff --git a/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg b/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg
index cedcbf598923..3cb68bf9fc04 100644
--- a/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg
+++ b/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg
@@ -18,11 +18,11 @@
id="metadata186">
<rdf:RDF>
<cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title />
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
</cc:Work>
</rdf:RDF>
</metadata>
diff --git a/Documentation/media/uapi/v4l/v4l2-selection-targets.rst b/Documentation/media/uapi/v4l/v4l2-selection-targets.rst
index cab07de6f4da..87433ec76c6b 100644
--- a/Documentation/media/uapi/v4l/v4l2-selection-targets.rst
+++ b/Documentation/media/uapi/v4l/v4l2-selection-targets.rst
@@ -12,7 +12,7 @@ of the two interfaces they are used.
.. _v4l2-selection-targets-table:
-.. tabularcolumns:: |p{5.8cm}|p{1.4cm}|p{6.5cm}|p{1.2cm}|p{1.6cm}|
+.. tabularcolumns:: |p{6.0cm}|p{1.4cm}|p{7.4cm}|p{1.2cm}|p{1.4cm}|
.. flat-table:: Selection target definitions
:header-rows: 1
diff --git a/Documentation/media/uapi/v4l/v4l2.rst b/Documentation/media/uapi/v4l/v4l2.rst
index f52a11c949d3..2128717299b3 100644
--- a/Documentation/media/uapi/v4l/v4l2.rst
+++ b/Documentation/media/uapi/v4l/v4l2.rst
@@ -11,7 +11,9 @@ This part describes the Video for Linux API version 2 (V4L2 API) specification.
**Revision 4.5**
-.. class:: toc-title
+.. only:: html
+
+ .. class:: toc-title
Table of Contents
@@ -23,7 +25,6 @@ This part describes the Video for Linux API version 2 (V4L2 API) specification.
pixfmt
io
devices
- driver
libv4l
compat
user-func
diff --git a/Documentation/media/uapi/v4l/vbi_525.svg b/Documentation/media/uapi/v4l/vbi_525.svg
index b05f7777ccf8..643aec8d0ba2 100644
--- a/Documentation/media/uapi/v4l/vbi_525.svg
+++ b/Documentation/media/uapi/v4l/vbi_525.svg
@@ -42,19 +42,21 @@
inkscape:current-layer="g10"
units="mm" /><metadata
id="metadata8"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
id="defs6"><clipPath
id="clipPath20"
clipPathUnits="userSpaceOnUse"><path
- inkscape:connector-curvature="0"
- id="path22"
- d="m 0,0 5950,0 0,3922 L 0,3922 0,0 Z m 0,3922 5950,0 0,1 -5950,0 0,-1 z m 0,1 1359,0 0,1 -1359,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1363,0 0,1 -1363,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1367,0 0,1 -1367,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1371,0 0,1 -1371,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1375,0 0,1 -1375,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1379,0 0,1 -1379,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1383,0 0,1 -1383,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1387,0 0,1 -1387,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1391,0 0,1 -1391,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1390,0 0,1 -1390,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1386,0 0,1 -1386,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1382,0 0,1 -1382,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1378,0 0,1 -1378,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1374,0 0,1 -1374,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1370,0 0,1 -1370,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1366,0 0,1 -1366,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1362,0 0,1 -1362,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1358,0 0,1 -1358,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 5950,0 0,1 -5950,0 0,-1 z m 0,1 5950,0 0,4478 -5950,0 0,-4478 z" /></clipPath><clipPath
+ inkscape:connector-curvature="0"
+ id="path22"
+ d="m 0,0 5950,0 0,3922 L 0,3922 0,0 Z m 0,3922 5950,0 0,1 -5950,0 0,-1 z m 0,1 1359,0 0,1 -1359,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1363,0 0,1 -1363,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1367,0 0,1 -1367,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1371,0 0,1 -1371,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1375,0 0,1 -1375,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1379,0 0,1 -1379,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1383,0 0,1 -1383,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1387,0 0,1 -1387,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1391,0 0,1 -1391,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1390,0 0,1 -1390,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1386,0 0,1 -1386,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1382,0 0,1 -1382,0 0,-1 z m
+1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1378,0 0,1 -1378,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1374,0 0,1 -1374,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1370,0 0,1 -1370,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1366,0 0,1 -1366,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1362,0 0,1 -1362,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1358,0 0,1 -1358,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 5950,0 0,1 -5950,0 0,-1 z m 0,1 5950,0 0,4478 -5950,0 0,-4478 z" /></clipPath><clipPath
id="clipPath98"
clipPathUnits="userSpaceOnUse"><path
- inkscape:connector-curvature="0"
- id="path100"
- d="m 0,0 5950,0 0,4546 L 0,4546 0,0 Z m 0,4546 5950,0 0,1 -5950,0 0,-1 z m 0,1 1360,0 0,1 -1360,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1364,0 0,1 -1364,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1368,0 0,1 -1368,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1372,0 0,1 -1372,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1376,0 0,1 -1376,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1380,0 0,1 -1380,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1384,0 0,1 -1384,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1388,0 0,1 -1388,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1391,0 0,1 -1391,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1389,0 0,1 -1389,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1385,0 0,1 -1385,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1381,0 0,1 -1381,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1377,0 0,1 -1377,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1373,0 0,1 -1373,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1369,0 0,1 -1369,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1365,0 0,1 -1365,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1361,0 0,1 -1361,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1357,0 0,1 -1357,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 5950,0 0,1 -5950,0 0,-1 z m 0,1 5950,0 0,3854 -5950,0 0,-3854 z" /></clipPath></defs><g
+ inkscape:connector-curvature="0"
+ id="path100"
+ d="m 0,0 5950,0 0,4546 L 0,4546 0,0 Z m 0,4546 5950,0 0,1 -5950,0 0,-1 z m 0,1 1360,0 0,1 -1360,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1364,0 0,1 -1364,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1368,0 0,1 -1368,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1372,0 0,1 -1372,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1376,0 0,1 -1376,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1380,0 0,1 -1380,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1384,0 0,1 -1384,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1388,0 0,1 -1388,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1391,0 0,1 -1391,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1389,0 0,1 -1389,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1385,0 0,1 -1385,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1381,0 0,1 -1381,0 0,-1 z m
+1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1377,0 0,1 -1377,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1373,0 0,1 -1373,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1369,0 0,1 -1369,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1365,0 0,1 -1365,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1361,0 0,1 -1361,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 1357,0 0,1 -1357,0 0,-1 z m 1399,0 4551,0 0,1 -4551,0 0,-1 z m -1399,1 5950,0 0,1 -5950,0 0,-1 z m 0,1 5950,0 0,3854 -5950,0 0,-3854 z" /></clipPath></defs><g
transform="matrix(0.125,0,0,-0.125,-87.571875,638.05691)"
inkscape:label="vbi_525"
inkscape:groupmode="layer"
@@ -62,165 +64,165 @@
transform="matrix(1.3000026,0,0,1.3000026,-210.17435,-1094.2823)"
id="g12"
style=""><path
- inkscape:connector-curvature="0"
- id="path14"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1281.75,3974.45 0,-85.05" /></g><g
+ inkscape:connector-curvature="0"
+ id="path14"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1281.75,3974.45 0,-85.05" /></g><g
transform="matrix(1.3000026,0,0,1.3000026,-210.17435,-1094.2823)"
id="g16"
style=""><g
- clip-path="url(#clipPath20)"
- id="g18"
- style=""><path
- inkscape:connector-curvature="0"
- id="path24"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1281.75,3931.93 113.4,0" /></g></g><g
+ clip-path="url(#clipPath20)"
+ id="g18"
+ style=""><path
+ inkscape:connector-curvature="0"
+ id="path24"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1281.75,3931.93 113.4,0" /></g></g><g
transform="matrix(1.3000026,0,0,1.3000026,-210.17435,-1094.2823)"
id="g26"
style=""><path
- inkscape:connector-curvature="0"
- id="path28"
- style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 1352.31,3922.48 37.8,9.45 -37.8,9.45 0,-18.9" /><path
- inkscape:connector-curvature="0"
- id="path30"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1352.31,3922.48 37.8,9.45 -37.8,9.45 0,-18.9 z" /><path
- inkscape:connector-curvature="0"
- id="path32"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 4683.75,4059.5 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path34"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 4400.25,4059.5 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path36"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 4116.75,4059.5 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path38"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 3833.25,4059.5 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path40"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 3549.75,4059.5 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path42"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 3266.25,4059.5 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path44"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 2982.75,4059.5 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path46"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 2699.25,4059.5 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path48"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 2415.75,4059.5 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path50"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 2132.25,4059.5 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path52"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1848.75,4059.5 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path54"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1565.25,4059.5 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path56"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1281.75,4059.5 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path58"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 998.25,4059.5 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path60"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 714.75,4059.5 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path62"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 4683.75,4144.55 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path64"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 4400.25,4144.55 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path66"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 4116.75,4144.55 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path68"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 3833.25,4144.55 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path70"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 3549.75,4144.55 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path72"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 3266.25,4144.55 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path74"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 2982.75,4144.55 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path76"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 2699.25,4144.55 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path78"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 2415.75,4144.55 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path80"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 2132.25,4144.55 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path82"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1848.75,4144.55 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path84"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1565.25,4144.55 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path86"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1281.75,4144.55 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path88"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 998.25,4144.55 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path90"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 714.75,4144.55 0,-56.7" /><path
- inkscape:connector-curvature="0"
- id="path92"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1281.75,4598.15 0,-85.05" /></g><g
+ inkscape:connector-curvature="0"
+ id="path28"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 1352.31,3922.48 37.8,9.45 -37.8,9.45 0,-18.9" /><path
+ inkscape:connector-curvature="0"
+ id="path30"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1352.31,3922.48 37.8,9.45 -37.8,9.45 0,-18.9 z" /><path
+ inkscape:connector-curvature="0"
+ id="path32"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4683.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path34"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4400.25,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path36"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4116.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path38"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3833.25,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path40"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3549.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path42"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3266.25,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path44"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2982.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path46"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2699.25,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path48"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2415.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path50"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2132.25,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path52"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1848.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path54"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1565.25,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path56"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1281.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path58"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 998.25,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path60"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 714.75,4059.5 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path62"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4683.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path64"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4400.25,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path66"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 4116.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path68"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3833.25,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path70"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3549.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path72"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 3266.25,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path74"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2982.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path76"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2699.25,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path78"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2415.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path80"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 2132.25,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path82"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1848.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path84"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1565.25,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path86"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1281.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path88"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 998.25,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path90"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 714.75,4144.55 0,-56.7" /><path
+ inkscape:connector-curvature="0"
+ id="path92"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1281.75,4598.15 0,-85.05" /></g><g
transform="matrix(1.3000026,0,0,1.3000026,-210.17435,-1094.2823)"
id="g94"
style=""><g
- clip-path="url(#clipPath98)"
- id="g96"
- style=""><path
- inkscape:connector-curvature="0"
- id="path102"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1281.75,4555.63 113.4,0" /></g></g><path
+ clip-path="url(#clipPath98)"
+ id="g96"
+ style=""><path
+ inkscape:connector-curvature="0"
+ id="path102"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1281.75,4555.63 113.4,0" /></g></g><path
d="m 1547.8322,4815.7637 49.1401,12.2851 -49.1401,12.285 0,-24.5701"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
id="path106"
@@ -546,266 +548,266 @@
id="text268"
style="font-variant:normal;font-weight:normal;font-size:61.42512512px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan270"
- sodipodi:role="line"
- y="-4035.6582"
- x="1621.9453 1642.3999 1676.5522">(1)</tspan></text>
+ id="tspan270"
+ sodipodi:role="line"
+ y="-4035.6582"
+ x="1621.9453 1642.3999 1676.5522">(1)</tspan></text>
<text
y="-4127.7959"
x="4199.7334"
id="text272"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan274"
- sodipodi:role="line"
- y="-4127.7959"
- x="4199.7334 3831.1829 2725.5305 3112.509 3462.6321 4568.2842 4916.3442 4957.3271 5653.4458 5694.4287 5284.895 5325.8779 2356.9773 1988.4264 1210.3424 1251.3252 1292.3081 1619.8759 841.79163 882.77454 923.75732">874569101211322631262</tspan><tspan
- id="tspan276"
- sodipodi:role="line"
- y="-4238.3613"
- x="4158.748 4199.7314 4240.7144 3790.1975 3831.1807 3872.1633 2684.5457 2725.5283 2766.5112 3071.5237 3112.5063 3153.4895 3421.647 3462.6299 3503.6125 4527.2988 4568.2822 4609.2646 4895.8496 4936.833 4977.8154 5632.9517 5673.9341 5714.917 5264.4009 5305.3833 5346.3662 2315.9946 2356.9775 2397.9604 1947.444 1988.4269 2029.4097 1210.3424 1251.3252 1292.3081 1578.8931 1619.876 1660.8589 841.79163 882.77454 923.75732">271270267268269272273275274266265263264262</tspan><tspan
- id="tspan278"
- sodipodi:role="line"
- y="-5049.1729"
- x="2725.5347 4568.2881 1988.4331 2356.9839 1619.8822 3094.0852 3462.636 4916.3506 4957.334 5284.9019 5325.8843 5653.4526 5694.4351 3812.7656 4181.3164">492315610111278</tspan><tspan
- id="tspan280"
- sodipodi:role="line"
- y="-4938.6074"
- x="2725.5474 4568.3013 1988.446 2356.9966 1619.8953 3094.0981 3462.6489 4916.3638 4957.3472 5284.9146 5325.8975 5653.4653 5694.4482 3812.7788 4181.3296">492315610111278</tspan><tspan
- id="tspan282"
- sodipodi:role="line"
- y="-5049.1729"
- x="841.81781 882.8006 923.78326">524</tspan><tspan
- id="tspan284"
- sodipodi:role="line"
- y="-4938.6074"
- x="841.81781 882.8006 923.78326">261</tspan><tspan
- id="tspan286"
- sodipodi:role="line"
- y="-5049.1729"
- x="1210.3684 1251.3512 1292.3342">525</tspan><tspan
- id="tspan288"
- sodipodi:role="line"
- y="-4938.6074"
- x="1210.3684 1251.3512 1292.3342">262</tspan><tspan
- id="tspan290"
- sodipodi:role="line"
- y="-5049.1729"
- x="6022.0161 6062.999">22</tspan><tspan
- id="tspan292"
- sodipodi:role="line"
- y="-4938.6074"
- x="6022.0161 6062.999">22</tspan><tspan
- id="tspan294"
- sodipodi:role="line"
- y="-5049.1729"
- x="6390.5669 6431.5498">23</tspan><tspan
- id="tspan296"
- sodipodi:role="line"
- y="-4938.6074"
- x="6390.5669 6431.5498">23</tspan><tspan
- id="tspan298"
- sodipodi:role="line"
- y="-4238.3623"
- x="6001.5244 6042.5068 6083.4902">285</tspan><tspan
- id="tspan300"
- sodipodi:role="line"
- y="-4127.7964"
- x="6022.0156 6062.9985">22</tspan><tspan
- id="tspan302"
- sodipodi:role="line"
- y="-4238.3623"
- x="6370.0747 6411.0571 6452.04">286</tspan><tspan
- id="tspan304"
- sodipodi:role="line"
- y="-4127.7964"
- x="6390.5664 6431.5493">23</tspan><tspan
- id="tspan306"
- sodipodi:role="line"
- y="-4459.4922"
- x="3540.4146 3581.3972 3618.2522 3638.7437 3659.2354 3679.7266 3696.0901 3737.073 3753.4365">1st field</tspan><tspan
- id="tspan308"
- sodipodi:role="line"
- y="-3648.6809"
- x="3528.1047 3569.0876 3610.0703 3651.0532 3671.5447 3692.0361 3708.3999 3749.3826 3765.7463">2nd field</tspan></text>
+ id="tspan274"
+ sodipodi:role="line"
+ y="-4127.7959"
+ x="4199.7334 3831.1829 2725.5305 3112.509 3462.6321 4568.2842 4916.3442 4957.3271 5653.4458 5694.4287 5284.895 5325.8779 2356.9773 1988.4264 1210.3424 1251.3252 1292.3081 1619.8759 841.79163 882.77454 923.75732">874569101211322631262</tspan><tspan
+ id="tspan276"
+ sodipodi:role="line"
+ y="-4238.3613"
+ x="4158.748 4199.7314 4240.7144 3790.1975 3831.1807 3872.1633 2684.5457 2725.5283 2766.5112 3071.5237 3112.5063 3153.4895 3421.647 3462.6299 3503.6125 4527.2988 4568.2822 4609.2646 4895.8496 4936.833 4977.8154 5632.9517 5673.9341 5714.917 5264.4009 5305.3833 5346.3662 2315.9946 2356.9775 2397.9604 1947.444 1988.4269 2029.4097 1210.3424 1251.3252 1292.3081 1578.8931 1619.876 1660.8589 841.79163 882.77454 923.75732">271270267268269272273275274266265263264262</tspan><tspan
+ id="tspan278"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="2725.5347 4568.2881 1988.4331 2356.9839 1619.8822 3094.0852 3462.636 4916.3506 4957.334 5284.9019 5325.8843 5653.4526 5694.4351 3812.7656 4181.3164">492315610111278</tspan><tspan
+ id="tspan280"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="2725.5474 4568.3013 1988.446 2356.9966 1619.8953 3094.0981 3462.6489 4916.3638 4957.3472 5284.9146 5325.8975 5653.4653 5694.4482 3812.7788 4181.3296">492315610111278</tspan><tspan
+ id="tspan282"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="841.81781 882.8006 923.78326">524</tspan><tspan
+ id="tspan284"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="841.81781 882.8006 923.78326">261</tspan><tspan
+ id="tspan286"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="1210.3684 1251.3512 1292.3342">525</tspan><tspan
+ id="tspan288"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="1210.3684 1251.3512 1292.3342">262</tspan><tspan
+ id="tspan290"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="6022.0161 6062.999">22</tspan><tspan
+ id="tspan292"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="6022.0161 6062.999">22</tspan><tspan
+ id="tspan294"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="6390.5669 6431.5498">23</tspan><tspan
+ id="tspan296"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="6390.5669 6431.5498">23</tspan><tspan
+ id="tspan298"
+ sodipodi:role="line"
+ y="-4238.3623"
+ x="6001.5244 6042.5068 6083.4902">285</tspan><tspan
+ id="tspan300"
+ sodipodi:role="line"
+ y="-4127.7964"
+ x="6022.0156 6062.9985">22</tspan><tspan
+ id="tspan302"
+ sodipodi:role="line"
+ y="-4238.3623"
+ x="6370.0747 6411.0571 6452.04">286</tspan><tspan
+ id="tspan304"
+ sodipodi:role="line"
+ y="-4127.7964"
+ x="6390.5664 6431.5493">23</tspan><tspan
+ id="tspan306"
+ sodipodi:role="line"
+ y="-4459.4922"
+ x="3540.4146 3581.3972 3618.2522 3638.7437 3659.2354 3679.7266 3696.0901 3737.073 3753.4365">1st field</tspan><tspan
+ id="tspan308"
+ sodipodi:role="line"
+ y="-3648.6809"
+ x="3528.1047 3569.0876 3610.0703 3651.0532 3671.5447 3692.0361 3708.3999 3749.3826 3765.7463">2nd field</tspan></text>
<text
y="-4127.7959"
x="4199.7334 3831.1829 2725.5305 3112.509 3462.6321 4568.2842 4916.3442 4957.3271 5653.4458 5694.4287 5284.895 5325.8779 2356.9773 1988.4264 1210.3424 1251.3252 1292.3081 1619.8759 841.79163 882.77454 923.75732"
id="text3632"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3634"
- sodipodi:role="line"
- y="-4127.7959"
- x="4199.7334 3831.1829 2725.5305 3112.509 3462.6321 4568.2842 4916.3442 4957.3271 5653.4458 5694.4287 5284.895 5325.8779 2356.9773 1988.4264 1210.3424 1251.3252 1292.3081 1619.8759 841.79163 882.77454 923.75732">874569101211322631262</tspan></text>
+ id="tspan3634"
+ sodipodi:role="line"
+ y="-4127.7959"
+ x="4199.7334 3831.1829 2725.5305 3112.509 3462.6321 4568.2842 4916.3442 4957.3271 5653.4458 5694.4287 5284.895 5325.8779 2356.9773 1988.4264 1210.3424 1251.3252 1292.3081 1619.8759 841.79163 882.77454 923.75732">874569101211322631262</tspan></text>
<text
y="-4238.3613"
x="4158.748 4199.7314 4240.7144 3790.1975 3831.1807 3872.1633 2684.5457 2725.5283 2766.5112 3071.5237 3112.5063 3153.4895 3421.647 3462.6299 3503.6125 4527.2988 4568.2822 4609.2646 4895.8496 4936.833 4977.8154 5632.9517 5673.9341 5714.917 5264.4009 5305.3833 5346.3662 2315.9946 2356.9775 2397.9604 1947.444 1988.4269 2029.4097 1210.3424 1251.3252 1292.3081 1578.8931 1619.876 1660.8589 841.79163 882.77454 923.75732"
id="text3636"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3638"
- sodipodi:role="line"
- y="-4238.3613"
- x="4158.748 4199.7314 4240.7144 3790.1975 3831.1807 3872.1633 2684.5457 2725.5283 2766.5112 3071.5237 3112.5063 3153.4895 3421.647 3462.6299 3503.6125 4527.2988 4568.2822 4609.2646 4895.8496 4936.833 4977.8154 5632.9517 5673.9341 5714.917 5264.4009 5305.3833 5346.3662 2315.9946 2356.9775 2397.9604 1947.444 1988.4269 2029.4097 1210.3424 1251.3252 1292.3081 1578.8931 1619.876 1660.8589 841.79163 882.77454 923.75732">271270267268269272273275274266265263264262</tspan></text>
+ id="tspan3638"
+ sodipodi:role="line"
+ y="-4238.3613"
+ x="4158.748 4199.7314 4240.7144 3790.1975 3831.1807 3872.1633 2684.5457 2725.5283 2766.5112 3071.5237 3112.5063 3153.4895 3421.647 3462.6299 3503.6125 4527.2988 4568.2822 4609.2646 4895.8496 4936.833 4977.8154 5632.9517 5673.9341 5714.917 5264.4009 5305.3833 5346.3662 2315.9946 2356.9775 2397.9604 1947.444 1988.4269 2029.4097 1210.3424 1251.3252 1292.3081 1578.8931 1619.876 1660.8589 841.79163 882.77454 923.75732">271270267268269272273275274266265263264262</tspan></text>
<text
y="-5049.1729"
x="2725.5347 4568.2881 1988.4331 2356.9839 1619.8822 3094.0852 3462.636 4916.3506 4957.334 5284.9019 5325.8843 5653.4526 5694.4351 3812.7656 4181.3164"
id="text3640"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3642"
- sodipodi:role="line"
- y="-5049.1729"
- x="2725.5347 4568.2881 1988.4331 2356.9839 1619.8822 3094.0852 3462.636 4916.3506 4957.334 5284.9019 5325.8843 5653.4526 5694.4351 3812.7656 4181.3164">492315610111278</tspan></text>
+ id="tspan3642"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="2725.5347 4568.2881 1988.4331 2356.9839 1619.8822 3094.0852 3462.636 4916.3506 4957.334 5284.9019 5325.8843 5653.4526 5694.4351 3812.7656 4181.3164">492315610111278</tspan></text>
<text
y="-4938.6074"
x="2725.5474 4568.3013 1988.446 2356.9966 1619.8953 3094.0981 3462.6489 4916.3638 4957.3472 5284.9146 5325.8975 5653.4653 5694.4482 3812.7788 4181.3296"
id="text3644"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3646"
- sodipodi:role="line"
- y="-4938.6074"
- x="2725.5474 4568.3013 1988.446 2356.9966 1619.8953 3094.0981 3462.6489 4916.3638 4957.3472 5284.9146 5325.8975 5653.4653 5694.4482 3812.7788 4181.3296">492315610111278</tspan></text>
+ id="tspan3646"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="2725.5474 4568.3013 1988.446 2356.9966 1619.8953 3094.0981 3462.6489 4916.3638 4957.3472 5284.9146 5325.8975 5653.4653 5694.4482 3812.7788 4181.3296">492315610111278</tspan></text>
<text
y="-5049.1729"
x="841.81781 882.8006 923.78326"
id="text3648"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3650"
- sodipodi:role="line"
- y="-5049.1729"
- x="841.81781 882.8006 923.78326">524</tspan></text>
+ id="tspan3650"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="841.81781 882.8006 923.78326">524</tspan></text>
<text
y="-4938.6074"
x="841.81781 882.8006 923.78326"
id="text3652"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3654"
- sodipodi:role="line"
- y="-4938.6074"
- x="841.81781 882.8006 923.78326">261</tspan></text>
+ id="tspan3654"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="841.81781 882.8006 923.78326">261</tspan></text>
<text
y="-5049.1729"
x="1210.3684 1251.3512 1292.3342"
id="text3656"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3658"
- sodipodi:role="line"
- y="-5049.1729"
- x="1210.3684 1251.3512 1292.3342">525</tspan></text>
+ id="tspan3658"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="1210.3684 1251.3512 1292.3342">525</tspan></text>
<text
y="-4938.6074"
x="1210.3684 1251.3512 1292.3342"
id="text3660"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3662"
- sodipodi:role="line"
- y="-4938.6074"
- x="1210.3684 1251.3512 1292.3342">262</tspan></text>
+ id="tspan3662"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="1210.3684 1251.3512 1292.3342">262</tspan></text>
<text
y="-5049.1729"
x="6022.0161 6062.999"
id="text3664"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3666"
- sodipodi:role="line"
- y="-5049.1729"
- x="6022.0161 6062.999">22</tspan></text>
+ id="tspan3666"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="6022.0161 6062.999">22</tspan></text>
<text
y="-4938.6074"
x="6022.0161 6062.999"
id="text3668"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3670"
- sodipodi:role="line"
- y="-4938.6074"
- x="6022.0161 6062.999">22</tspan></text>
+ id="tspan3670"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="6022.0161 6062.999">22</tspan></text>
<text
y="-5049.1729"
x="6390.5669 6431.5498"
id="text3672"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3674"
- sodipodi:role="line"
- y="-5049.1729"
- x="6390.5669 6431.5498">23</tspan></text>
+ id="tspan3674"
+ sodipodi:role="line"
+ y="-5049.1729"
+ x="6390.5669 6431.5498">23</tspan></text>
<text
y="-4938.6074"
x="6390.5669 6431.5498"
id="text3676"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3678"
- sodipodi:role="line"
- y="-4938.6074"
- x="6390.5669 6431.5498">23</tspan></text>
+ id="tspan3678"
+ sodipodi:role="line"
+ y="-4938.6074"
+ x="6390.5669 6431.5498">23</tspan></text>
<text
y="-4238.3623"
x="6001.5244 6042.5068 6083.4902"
id="text3680"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3682"
- sodipodi:role="line"
- y="-4238.3623"
- x="6001.5244 6042.5068 6083.4902">285</tspan></text>
+ id="tspan3682"
+ sodipodi:role="line"
+ y="-4238.3623"
+ x="6001.5244 6042.5068 6083.4902">285</tspan></text>
<text
y="-4127.7964"
x="6022.0156 6062.9985"
id="text3684"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3686"
- sodipodi:role="line"
- y="-4127.7964"
- x="6022.0156 6062.9985">22</tspan></text>
+ id="tspan3686"
+ sodipodi:role="line"
+ y="-4127.7964"
+ x="6022.0156 6062.9985">22</tspan></text>
<text
y="-4238.3623"
x="6370.0747 6411.0571 6452.04"
id="text3688"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3690"
- sodipodi:role="line"
- y="-4238.3623"
- x="6370.0747 6411.0571 6452.04">286</tspan></text>
+ id="tspan3690"
+ sodipodi:role="line"
+ y="-4238.3623"
+ x="6370.0747 6411.0571 6452.04">286</tspan></text>
<text
y="-4127.7964"
x="6390.5664 6431.5493"
id="text3692"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3694"
- sodipodi:role="line"
- y="-4127.7964"
- x="6390.5664 6431.5493">23</tspan></text>
+ id="tspan3694"
+ sodipodi:role="line"
+ y="-4127.7964"
+ x="6390.5664 6431.5493">23</tspan></text>
<text
y="-4459.4922"
x="3540.4146 3581.3972 3618.2522 3638.7437 3659.2354 3679.7266 3696.0901 3737.073 3753.4365"
id="text3696"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3698"
- sodipodi:role="line"
- y="-4459.4922"
- x="3540.4146 3581.3972 3618.2522 3638.7437 3659.2354 3679.7266 3696.0901 3737.073 3753.4365">1st field</tspan></text>
+ id="tspan3698"
+ sodipodi:role="line"
+ y="-4459.4922"
+ x="3540.4146 3581.3972 3618.2522 3638.7437 3659.2354 3679.7266 3696.0901 3737.073 3753.4365">1st field</tspan></text>
<text
y="-3648.6809"
x="3528.1047 3569.0876 3610.0703 3651.0532 3671.5447 3692.0361 3708.3999 3749.3826 3765.7463"
id="text3700"
style="font-variant:normal;font-weight:normal;font-size:73.71015167px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"><tspan
- id="tspan3702"
- sodipodi:role="line"
- y="-3648.6809"
- x="3528.1047 3569.0876 3610.0703 3651.0532 3671.5447 3692.0361 3708.3999 3749.3826 3765.7463">2nd field</tspan></text>
+ id="tspan3702"
+ sodipodi:role="line"
+ y="-3648.6809"
+ x="3528.1047 3569.0876 3610.0703 3651.0532 3671.5447 3692.0361 3708.3999 3749.3826 3765.7463">2nd field</tspan></text>
</g></svg> \ No newline at end of file
diff --git a/Documentation/media/uapi/v4l/vbi_625.svg b/Documentation/media/uapi/v4l/vbi_625.svg
index c117ddb7bf7e..9b18243c0a06 100644
--- a/Documentation/media/uapi/v4l/vbi_625.svg
+++ b/Documentation/media/uapi/v4l/vbi_625.svg
@@ -42,19 +42,21 @@
inkscape:current-layer="g10"
units="mm" /><metadata
id="metadata8"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
id="defs6"><clipPath
clipPathUnits="userSpaceOnUse"
id="clipPath20"><path
- d="m 0,0 5950,0 0,4546 L 0,4546 0,0 Z m 0,4546 5950,0 0,1 -5950,0 0,-1 z m 0,1 2211,0 0,1 -2211,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2215,0 0,1 -2215,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2219,0 0,1 -2219,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2223,0 0,1 -2223,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2227,0 0,1 -2227,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2231,0 0,1 -2231,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2235,0 0,1 -2235,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2239,0 0,1 -2239,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2241,0 0,1 -2241,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2240,0 0,1 -2240,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2236,0 0,1 -2236,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2232,0 0,1 -2232,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2228,0 0,1 -2228,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2224,0 0,1 -2224,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2220,0 0,1 -2220,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2216,0 0,1 -2216,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2212,0 0,1 -2212,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2208,0 0,1 -2208,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 5950,0 0,1 -5950,0 0,-1 z m 0,1 5950,0 0,3854 -5950,0 0,-3854 z"
- id="path22"
- inkscape:connector-curvature="0" /></clipPath><clipPath
+ d="m 0,0 5950,0 0,4546 L 0,4546 0,0 Z m 0,4546 5950,0 0,1 -5950,0 0,-1 z m 0,1 2211,0 0,1 -2211,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2215,0 0,1 -2215,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2219,0 0,1 -2219,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2223,0 0,1 -2223,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2227,0 0,1 -2227,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2231,0 0,1 -2231,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2235,0 0,1 -2235,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2239,0 0,1 -2239,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2241,0 0,1 -2241,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2240,0 0,1 -2240,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2236,0 0,1 -2236,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2232,0 0,1 -2232,0 0,-1 z m
+2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2228,0 0,1 -2228,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2224,0 0,1 -2224,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2220,0 0,1 -2220,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2216,0 0,1 -2216,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2212,0 0,1 -2212,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2208,0 0,1 -2208,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 5950,0 0,1 -5950,0 0,-1 z m 0,1 5950,0 0,3854 -5950,0 0,-3854 z"
+ id="path22"
+ inkscape:connector-curvature="0" /></clipPath><clipPath
clipPathUnits="userSpaceOnUse"
id="clipPath98"><path
- d="m 0,0 5950,0 0,3922 L 0,3922 0,0 Z m 0,3922 5950,0 0,1 -5950,0 0,-1 z m 0,1 2209,0 0,1 -2209,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2213,0 0,1 -2213,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2217,0 0,1 -2217,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2221,0 0,1 -2221,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2225,0 0,1 -2225,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2229,0 0,1 -2229,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2233,0 0,1 -2233,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2237,0 0,1 -2237,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2241,0 0,1 -2241,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2241,0 0,1 -2241,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2237,0 0,1 -2237,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2233,0 0,1 -2233,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2229,0 0,1 -2229,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2225,0 0,1 -2225,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2221,0 0,1 -2221,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2217,0 0,1 -2217,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2213,0 0,1 -2213,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2209,0 0,1 -2209,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 5950,0 0,1 -5950,0 0,-1 z m 0,1 5950,0 0,4478 -5950,0 0,-4478 z"
- id="path100"
- inkscape:connector-curvature="0" /></clipPath></defs><g
+ d="m 0,0 5950,0 0,3922 L 0,3922 0,0 Z m 0,3922 5950,0 0,1 -5950,0 0,-1 z m 0,1 2209,0 0,1 -2209,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2213,0 0,1 -2213,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2217,0 0,1 -2217,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2221,0 0,1 -2221,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2225,0 0,1 -2225,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2229,0 0,1 -2229,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2233,0 0,1 -2233,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2237,0 0,1 -2237,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2241,0 0,1 -2241,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2241,0 0,1 -2241,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2237,0 0,1 -2237,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2233,0 0,1 -2233,0 0,-1 z m
+2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2229,0 0,1 -2229,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2225,0 0,1 -2225,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2221,0 0,1 -2221,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2217,0 0,1 -2217,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2213,0 0,1 -2213,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 2209,0 0,1 -2209,0 0,-1 z m 2250,0 3700,0 0,1 -3700,0 0,-1 z m -2250,1 5950,0 0,1 -5950,0 0,-1 z m 0,1 5950,0 0,4478 -5950,0 0,-4478 z"
+ id="path100"
+ inkscape:connector-curvature="0" /></clipPath></defs><g
id="g10"
inkscape:groupmode="layer"
inkscape:label="vbi_625"
@@ -62,20 +64,20 @@
id="g12"
transform="matrix(1.3045828,0,0,1.3045828,-213.38312,-1110.9872)"
style=""><path
- d="m 2132.25,4598.15 0,-85.05"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- id="path14"
- inkscape:connector-curvature="0" /></g><g
+ d="m 2132.25,4598.15 0,-85.05"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path14"
+ inkscape:connector-curvature="0" /></g><g
id="g16"
transform="matrix(1.3045828,0,0,1.3045828,-213.38312,-1110.9872)"
style=""><g
- id="g18"
- clip-path="url(#clipPath20)"
- style=""><path
- d="m 2132.25,4555.63 113.4,0"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- id="path24"
- inkscape:connector-curvature="0" /></g></g><path
+ id="g18"
+ clip-path="url(#clipPath20)"
+ style=""><path
+ d="m 2132.25,4555.63 113.4,0"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path24"
+ inkscape:connector-curvature="0" /></g></g><path
inkscape:connector-curvature="0"
id="path28"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
@@ -211,13 +213,13 @@
id="g94"
transform="matrix(1.3045828,0,0,1.3045828,-213.38312,-1110.9872)"
style=""><g
- id="g96"
- clip-path="url(#clipPath98)"
- style=""><path
- d="m 2132.25,3931.93 113.4,0"
- style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- id="path102"
- inkscape:connector-curvature="0" /></g></g><path
+ id="g96"
+ clip-path="url(#clipPath98)"
+ style=""><path
+ d="m 2132.25,3931.93 113.4,0"
+ style="fill:none;stroke:#000000;stroke-width:2.36249995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ id="path102"
+ inkscape:connector-curvature="0" /></g></g><path
inkscape:connector-curvature="0"
id="path106"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
@@ -385,7 +387,8 @@
inkscape:connector-curvature="0"
id="path188"
style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1162.8865,4739.7407 18.486,0 0,92.4688 18.499,-18.499 18.4859,36.9849 18.499,-36.9849 18.4859,36.9849 18.499,-55.4708 18.4859,55.4708 18.499,-18.4859 18.486,55.4708 18.499,-55.4708 18.4859,18.4859 18.499,36.9849 18.4859,-73.9698 18.499,36.9849 18.4859,-55.4708 0,-55.4839 18.499,0 0,-110.9548 36.985,0 0,110.9548 18.4859,0 0,55.4839 18.499,36.9849 18.4859,-36.9849 18.499,55.4708 18.4859,-18.4859 18.499,55.4708 18.486,-18.4859 0,-129.4537 18.4989,0 0,-110.9548 18.486,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 147.9397,0 0,110.9548 36.9849,0 0,-110.9548 147.9397,0 0,110.9548 36.985,0 0,-110.9548 147.9397,0 0,110.9548 36.9849,0 0,-110.9548 147.9397,0 0,110.9548 36.9849,0 0,-110.9548 147.9397,0 0,110.9548 36.9849,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 18.486,0 0,110.9548 166.4386,0 0,-110.9548 18.486,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 36.9849,0 0,110.9548 332.8643,0 0,-110.9548 36.9849,0 0,110.9548 18.486,0" /><path
+ d="m 1162.8865,4739.7407 18.486,0 0,92.4688 18.499,-18.499 18.4859,36.9849 18.499,-36.9849 18.4859,36.9849 18.499,-55.4708 18.4859,55.4708 18.499,-18.4859 18.486,55.4708 18.499,-55.4708 18.4859,18.4859 18.499,36.9849 18.4859,-73.9698 18.499,36.9849 18.4859,-55.4708 0,-55.4839 18.499,0 0,-110.9548 36.985,0 0,110.9548 18.4859,0 0,55.4839 18.499,36.9849 18.4859,-36.9849 18.499,55.4708 18.4859,-18.4859 18.499,55.4708 18.486,-18.4859 0,-129.4537 18.4989,0 0,-110.9548 18.486,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 147.9397,0 0,110.9548 36.9849,0 0,-110.9548 147.9397,0 0,110.9548 36.985,0 0,-110.9548 147.9397,0 0,110.9548 36.9849,0 0,-110.9548 147.9397,0 0,110.9548 36.9849,0 0,-110.9548 147.9397,0 0,110.9548 36.9849,0
+0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 18.486,0 0,110.9548 166.4386,0 0,-110.9548 18.486,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 18.4859,0 0,110.9548 166.4387,0 0,-110.9548 36.9849,0 0,110.9548 332.8643,0 0,-110.9548 36.9849,0 0,110.9548 18.486,0" /><path
inkscape:connector-curvature="0"
id="path190"
style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
@@ -409,7 +412,8 @@
inkscape:connector-curvature="0"
id="path200"
style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1162.8865,3926.0723 18.486,0 0,55.4839 18.499,92.4558 18.4859,-55.4708 36.9849,36.9849 18.499,-55.4839 18.4859,18.499 18.499,-36.985 18.486,55.4709 18.499,-18.4859 18.4859,-36.985 18.499,18.486 18.4859,36.9849 18.499,-36.9849 18.4859,36.9849 0,-110.9548 18.499,0 0,-110.9547 18.486,0 0,110.9547 166.4386,0 0,-110.9547 18.486,0 0,110.9547 166.4387,0 0,-110.9547 18.4859,0 0,110.9547 166.4387,0 0,-110.9547 18.4859,0 0,110.9547 166.4387,0 0,-110.9547 18.4859,0 0,110.9547 166.4387,0 0,-110.9547 147.9397,0 0,110.9547 36.9849,0 0,-110.9547 147.9397,0 0,110.9547 36.9849,0 0,-110.9547 147.9397,0 0,110.9547 36.985,0 0,-110.9547 147.9397,0 0,110.9547 36.9849,0 0,-110.9547 147.9397,0 0,110.9547 36.9849,0 0,-110.9547 18.4859,0 0,110.9547 166.4387,0 0,-110.9547 18.4859,0 0,110.9547 166.4387,0 0,-110.9547 18.486,0 0,110.9547 166.4386,0 0,-110.9547 18.486,0 0,110.9547 166.4387,0 0,-110.9547 18.4859,0 0,110.9547 351.3633,0 0,-110.9547 36.9849,0 0,110.9547 18.486,0" /><path
+ d="m 1162.8865,3926.0723 18.486,0 0,55.4839 18.499,92.4558 18.4859,-55.4708 36.9849,36.9849 18.499,-55.4839 18.4859,18.499 18.499,-36.985 18.486,55.4709 18.499,-18.4859 18.4859,-36.985 18.499,18.486 18.4859,36.9849 18.499,-36.9849 18.4859,36.9849 0,-110.9548 18.499,0 0,-110.9547 18.486,0 0,110.9547 166.4386,0 0,-110.9547 18.486,0 0,110.9547 166.4387,0 0,-110.9547 18.4859,0 0,110.9547 166.4387,0 0,-110.9547 18.4859,0 0,110.9547 166.4387,0 0,-110.9547 18.4859,0 0,110.9547 166.4387,0 0,-110.9547 147.9397,0 0,110.9547 36.9849,0 0,-110.9547 147.9397,0 0,110.9547 36.9849,0 0,-110.9547 147.9397,0 0,110.9547 36.985,0 0,-110.9547 147.9397,0 0,110.9547 36.9849,0 0,-110.9547 147.9397,0 0,110.9547 36.9849,0 0,-110.9547 18.4859,0 0,110.9547 166.4387,0 0,-110.9547 18.4859,0 0,110.9547 166.4387,0 0,-110.9547 18.486,0 0,110.9547 166.4386,0 0,-110.9547 18.486,0 0,110.9547 166.4387,0 0,-110.9547
+18.4859,0 0,110.9547 351.3633,0 0,-110.9547 36.9849,0 0,110.9547 18.486,0" /><path
inkscape:connector-curvature="0"
id="path202"
style="fill:none;stroke:#000000;stroke-width:3.08207679;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
@@ -559,300 +563,300 @@
id="text276"
x="3550.4165"
y="-4462.3472"><tspan
- x="3550.4165 3591.5437 3628.5286 3649.0923 3669.656 3690.2195 3706.6409 3747.7681 3764.1895"
- y="-4462.3472"
- sodipodi:role="line"
- id="tspan278">1st field</tspan><tspan
- x="2732.6792 3823.7344 4193.5835 4581.9253 4951.7744 5321.6235 3472.3777 3102.5283 2321.7029 2362.8303 2403.9575 1951.8538 1992.981 2034.1083 1582.0045 1623.132 1664.259 5670.9062 5712.0337"
- y="-4943.1509"
- sodipodi:role="line"
- id="tspan280">1456783231231131022</tspan><tspan
- x="2732.6726 3823.7278 4193.5771 4581.9189 4951.7681 5321.6172 3472.3711 3102.522 2321.6965 2362.8237 2403.9509 1951.8473 1992.9745 2034.1018 1581.998 1623.1254 1664.2524 5670.8999 5712.0269"
- y="-5054.1064"
- sodipodi:role="line"
- id="tspan282">1456783262562462322</tspan><tspan
- x="842.29962 883.42694 924.55408"
- y="-4943.1509"
- sodipodi:role="line"
- id="tspan284">308</tspan><tspan
- x="842.29962 883.42694 924.55408"
- y="-5054.1064"
- sodipodi:role="line"
- id="tspan286">621</tspan><tspan
- x="1212.1489 1253.276 1294.4033"
- y="-4943.1509"
- sodipodi:role="line"
- id="tspan288">309</tspan><tspan
- x="1212.1489 1253.276 1294.4033"
- y="-5054.1064"
- sodipodi:role="line"
- id="tspan290">622</tspan><tspan
- x="3538.0635 3579.1907 3620.3179 3661.4451 3682.0088 3702.5723 3718.9937 3760.1208 3776.5422"
- y="-3648.6792"
- sodipodi:role="line"
- id="tspan292">2nd field</tspan></text>
+ x="3550.4165 3591.5437 3628.5286 3649.0923 3669.656 3690.2195 3706.6409 3747.7681 3764.1895"
+ y="-4462.3472"
+ sodipodi:role="line"
+ id="tspan278">1st field</tspan><tspan
+ x="2732.6792 3823.7344 4193.5835 4581.9253 4951.7744 5321.6235 3472.3777 3102.5283 2321.7029 2362.8303 2403.9575 1951.8538 1992.981 2034.1083 1582.0045 1623.132 1664.259 5670.9062 5712.0337"
+ y="-4943.1509"
+ sodipodi:role="line"
+ id="tspan280">1456783231231131022</tspan><tspan
+ x="2732.6726 3823.7278 4193.5771 4581.9189 4951.7681 5321.6172 3472.3711 3102.522 2321.6965 2362.8237 2403.9509 1951.8473 1992.9745 2034.1018 1581.998 1623.1254 1664.2524 5670.8999 5712.0269"
+ y="-5054.1064"
+ sodipodi:role="line"
+ id="tspan282">1456783262562462322</tspan><tspan
+ x="842.29962 883.42694 924.55408"
+ y="-4943.1509"
+ sodipodi:role="line"
+ id="tspan284">308</tspan><tspan
+ x="842.29962 883.42694 924.55408"
+ y="-5054.1064"
+ sodipodi:role="line"
+ id="tspan286">621</tspan><tspan
+ x="1212.1489 1253.276 1294.4033"
+ y="-4943.1509"
+ sodipodi:role="line"
+ id="tspan288">309</tspan><tspan
+ x="1212.1489 1253.276 1294.4033"
+ y="-5054.1064"
+ sodipodi:role="line"
+ id="tspan290">622</tspan><tspan
+ x="3538.0635 3579.1907 3620.3179 3661.4451 3682.0088 3702.5723 3718.9937 3760.1208 3776.5422"
+ y="-3648.6792"
+ sodipodi:role="line"
+ id="tspan292">2nd field</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:61.64153671px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text294"
x="2734.751"
y="-4037.021"><tspan
- x="2734.751 2755.2776 2789.5503"
- y="-4037.021"
- sodipodi:role="line"
- id="tspan296">(1)</tspan></text>
+ x="2734.751 2755.2776 2789.5503"
+ y="-4037.021"
+ sodipodi:role="line"
+ id="tspan296">(1)</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text298"
x="4951.772"
y="-4129.4834"><tspan
- x="4951.772 4581.9229 4212.0737 3842.2244 3490.8677 3102.5259 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
- y="-4129.4834"
- sodipodi:role="line"
- id="tspan300">765432313312311</tspan><tspan
- x="6020.1929 6061.3198 6102.4473 5650.3433 5691.4707 5732.5981 5280.4941 5321.6216 5362.7485 4910.645 4951.7725 4992.8994 4540.7959 4581.9229 4623.0503 4170.9468 4212.0737 4253.2012 3801.0974 3842.2246 3883.3518 3449.7405 3490.8677 3531.9951 3061.3989 3102.5261 3143.6533 2691.5496 2732.677 2773.8042 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
- y="-4240.4385"
- sodipodi:role="line"
- id="tspan302">336335321320319318317316315314313312311</tspan><tspan
- x="2732.6765 5321.6211 5670.9062 5712.0337 6040.7554 6081.8828 842.30634 883.43323 924.56055"
- y="-4129.4834"
- sodipodi:role="line"
- id="tspan304">182223309</tspan><tspan
- x="842.30634 883.43323 924.56055"
- y="-4240.4385"
- sodipodi:role="line"
- id="tspan306">309</tspan><tspan
- x="1212.1553 1253.2826 1294.4099"
- y="-4129.4834"
- sodipodi:role="line"
- id="tspan308">310</tspan><tspan
- x="1212.1553 1253.2826 1294.4099"
- y="-4240.4385"
- sodipodi:role="line"
- id="tspan310">310</tspan><tspan
- x="6410.605 6451.7319"
- y="-4129.4834"
- sodipodi:role="line"
- id="tspan312">24</tspan><tspan
- x="6390.041 6431.1685 6472.2954"
- y="-4240.4385"
- sodipodi:role="line"
- id="tspan314">337</tspan><tspan
- x="6040.7559 6081.8833"
- y="-4943.1504"
- sodipodi:role="line"
- id="tspan316">23</tspan><tspan
- x="6040.7559 6081.8833"
- y="-5054.106"
- sodipodi:role="line"
- id="tspan318">23</tspan><tspan
- x="6410.605 6451.7324"
- y="-4943.1504"
- sodipodi:role="line"
- id="tspan320">24</tspan><tspan
- x="6410.605 6451.7324"
- y="-5054.106"
- sodipodi:role="line"
- id="tspan322">24</tspan></text>
+ x="4951.772 4581.9229 4212.0737 3842.2244 3490.8677 3102.5259 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan300">765432313312311</tspan><tspan
+ x="6020.1929 6061.3198 6102.4473 5650.3433 5691.4707 5732.5981 5280.4941 5321.6216 5362.7485 4910.645 4951.7725 4992.8994 4540.7959 4581.9229 4623.0503 4170.9468 4212.0737 4253.2012 3801.0974 3842.2246 3883.3518 3449.7405 3490.8677 3531.9951 3061.3989 3102.5261 3143.6533 2691.5496 2732.677 2773.8042 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan302">336335321320319318317316315314313312311</tspan><tspan
+ x="2732.6765 5321.6211 5670.9062 5712.0337 6040.7554 6081.8828 842.30634 883.43323 924.56055"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan304">182223309</tspan><tspan
+ x="842.30634 883.43323 924.56055"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan306">309</tspan><tspan
+ x="1212.1553 1253.2826 1294.4099"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan308">310</tspan><tspan
+ x="1212.1553 1253.2826 1294.4099"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan310">310</tspan><tspan
+ x="6410.605 6451.7319"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan312">24</tspan><tspan
+ x="6390.041 6431.1685 6472.2954"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan314">337</tspan><tspan
+ x="6040.7559 6081.8833"
+ y="-4943.1504"
+ sodipodi:role="line"
+ id="tspan316">23</tspan><tspan
+ x="6040.7559 6081.8833"
+ y="-5054.106"
+ sodipodi:role="line"
+ id="tspan318">23</tspan><tspan
+ x="6410.605 6451.7324"
+ y="-4943.1504"
+ sodipodi:role="line"
+ id="tspan320">24</tspan><tspan
+ x="6410.605 6451.7324"
+ y="-5054.106"
+ sodipodi:role="line"
+ id="tspan322">24</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text3671"
x="3550.4165 3591.5437 3628.5286 3649.0923 3669.656 3690.2195 3706.6409 3747.7681 3764.1895"
y="-4462.3472"><tspan
- x="3550.4165 3591.5437 3628.5286 3649.0923 3669.656 3690.2195 3706.6409 3747.7681 3764.1895"
- y="-4462.3472"
- sodipodi:role="line"
- id="tspan3673">1st field</tspan></text>
+ x="3550.4165 3591.5437 3628.5286 3649.0923 3669.656 3690.2195 3706.6409 3747.7681 3764.1895"
+ y="-4462.3472"
+ sodipodi:role="line"
+ id="tspan3673">1st field</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text3675"
x="2732.6792 3823.7344 4193.5835 4581.9253 4951.7744 5321.6235 3472.3777 3102.5283 2321.7029 2362.8303 2403.9575 1951.8538 1992.981 2034.1083 1582.0045 1623.132 1664.259 5670.9062 5712.0337"
y="-4943.1509"><tspan
- x="2732.6792 3823.7344 4193.5835 4581.9253 4951.7744 5321.6235 3472.3777 3102.5283 2321.7029 2362.8303 2403.9575 1951.8538 1992.981 2034.1083 1582.0045 1623.132 1664.259 5670.9062 5712.0337"
- y="-4943.1509"
- sodipodi:role="line"
- id="tspan3677">1456783231231131022</tspan></text>
+ x="2732.6792 3823.7344 4193.5835 4581.9253 4951.7744 5321.6235 3472.3777 3102.5283 2321.7029 2362.8303 2403.9575 1951.8538 1992.981 2034.1083 1582.0045 1623.132 1664.259 5670.9062 5712.0337"
+ y="-4943.1509"
+ sodipodi:role="line"
+ id="tspan3677">1456783231231131022</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text3679"
x="2732.6726 3823.7278 4193.5771 4581.9189 4951.7681 5321.6172 3472.3711 3102.522 2321.6965 2362.8237 2403.9509 1951.8473 1992.9745 2034.1018 1581.998 1623.1254 1664.2524 5670.8999 5712.0269"
y="-5054.1064"><tspan
- x="2732.6726 3823.7278 4193.5771 4581.9189 4951.7681 5321.6172 3472.3711 3102.522 2321.6965 2362.8237 2403.9509 1951.8473 1992.9745 2034.1018 1581.998 1623.1254 1664.2524 5670.8999 5712.0269"
- y="-5054.1064"
- sodipodi:role="line"
- id="tspan3681">1456783262562462322</tspan></text>
+ x="2732.6726 3823.7278 4193.5771 4581.9189 4951.7681 5321.6172 3472.3711 3102.522 2321.6965 2362.8237 2403.9509 1951.8473 1992.9745 2034.1018 1581.998 1623.1254 1664.2524 5670.8999 5712.0269"
+ y="-5054.1064"
+ sodipodi:role="line"
+ id="tspan3681">1456783262562462322</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text3683"
x="842.29962 883.42694 924.55408"
y="-4943.1509"><tspan
- x="842.29962 883.42694 924.55408"
- y="-4943.1509"
- sodipodi:role="line"
- id="tspan3685">308</tspan></text>
+ x="842.29962 883.42694 924.55408"
+ y="-4943.1509"
+ sodipodi:role="line"
+ id="tspan3685">308</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text3687"
x="842.29962 883.42694 924.55408"
y="-5054.1064"><tspan
- x="842.29962 883.42694 924.55408"
- y="-5054.1064"
- sodipodi:role="line"
- id="tspan3689">621</tspan></text>
+ x="842.29962 883.42694 924.55408"
+ y="-5054.1064"
+ sodipodi:role="line"
+ id="tspan3689">621</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text3691"
x="1212.1489 1253.276 1294.4033"
y="-4943.1509"><tspan
- x="1212.1489 1253.276 1294.4033"
- y="-4943.1509"
- sodipodi:role="line"
- id="tspan3693">309</tspan></text>
+ x="1212.1489 1253.276 1294.4033"
+ y="-4943.1509"
+ sodipodi:role="line"
+ id="tspan3693">309</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text3695"
x="1212.1489 1253.276 1294.4033"
y="-5054.1064"><tspan
- x="1212.1489 1253.276 1294.4033"
- y="-5054.1064"
- sodipodi:role="line"
- id="tspan3697">622</tspan></text>
+ x="1212.1489 1253.276 1294.4033"
+ y="-5054.1064"
+ sodipodi:role="line"
+ id="tspan3697">622</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text3699"
x="3538.0635 3579.1907 3620.3179 3661.4451 3682.0088 3702.5723 3718.9937 3760.1208 3776.5422"
y="-3648.6792"><tspan
- x="3538.0635 3579.1907 3620.3179 3661.4451 3682.0088 3702.5723 3718.9937 3760.1208 3776.5422"
- y="-3648.6792"
- sodipodi:role="line"
- id="tspan3701">2nd field</tspan></text>
+ x="3538.0635 3579.1907 3620.3179 3661.4451 3682.0088 3702.5723 3718.9937 3760.1208 3776.5422"
+ y="-3648.6792"
+ sodipodi:role="line"
+ id="tspan3701">2nd field</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text4083"
x="4951.772 4581.9229 4212.0737 3842.2244 3490.8677 3102.5259 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
y="-4129.4834"><tspan
- x="4951.772 4581.9229 4212.0737 3842.2244 3490.8677 3102.5259 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
- y="-4129.4834"
- sodipodi:role="line"
- id="tspan4085">765432313312311</tspan></text>
+ x="4951.772 4581.9229 4212.0737 3842.2244 3490.8677 3102.5259 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan4085">765432313312311</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text4087"
x="6020.1929 6061.3198 6102.4473 5650.3433 5691.4707 5732.5981 5280.4941 5321.6216 5362.7485 4910.645 4951.7725 4992.8994 4540.7959 4581.9229 4623.0503 4170.9468 4212.0737 4253.2012 3801.0974 3842.2246 3883.3518 3449.7405 3490.8677 3531.9951 3061.3989 3102.5261 3143.6533 2691.5496 2732.677 2773.8042 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
y="-4240.4385"><tspan
- x="6020.1929 6061.3198 6102.4473 5650.3433 5691.4707 5732.5981 5280.4941 5321.6216 5362.7485 4910.645 4951.7725 4992.8994 4540.7959 4581.9229 4623.0503 4170.9468 4212.0737 4253.2012 3801.0974 3842.2246 3883.3518 3449.7405 3490.8677 3531.9951 3061.3989 3102.5261 3143.6533 2691.5496 2732.677 2773.8042 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
- y="-4240.4385"
- sodipodi:role="line"
- id="tspan4089">336335321320319318317316315314313312311</tspan></text>
+ x="6020.1929 6061.3198 6102.4473 5650.3433 5691.4707 5732.5981 5280.4941 5321.6216 5362.7485 4910.645 4951.7725 4992.8994 4540.7959 4581.9229 4623.0503 4170.9468 4212.0737 4253.2012 3801.0974 3842.2246 3883.3518 3449.7405 3490.8677 3531.9951 3061.3989 3102.5261 3143.6533 2691.5496 2732.677 2773.8042 2321.7004 2362.8276 2403.9551 1951.8512 1992.9785 2034.1057 1582.0022 1623.1293 1664.2563"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan4089">336335321320319318317316315314313312311</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text4091"
x="2732.6765 5321.6211 5670.9062 5712.0337 6040.7554 6081.8828 842.30634 883.43323 924.56055"
y="-4129.4834"><tspan
- x="2732.6765 5321.6211 5670.9062 5712.0337 6040.7554 6081.8828 842.30634 883.43323 924.56055"
- y="-4129.4834"
- sodipodi:role="line"
- id="tspan4093">182223309</tspan></text>
+ x="2732.6765 5321.6211 5670.9062 5712.0337 6040.7554 6081.8828 842.30634 883.43323 924.56055"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan4093">182223309</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text4095"
x="842.30634 883.43323 924.56055"
y="-4240.4385"><tspan
- x="842.30634 883.43323 924.56055"
- y="-4240.4385"
- sodipodi:role="line"
- id="tspan4097">309</tspan></text>
+ x="842.30634 883.43323 924.56055"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan4097">309</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text4099"
x="1212.1553 1253.2826 1294.4099"
y="-4129.4834"><tspan
- x="1212.1553 1253.2826 1294.4099"
- y="-4129.4834"
- sodipodi:role="line"
- id="tspan4101">310</tspan></text>
+ x="1212.1553 1253.2826 1294.4099"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan4101">310</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text4103"
x="1212.1553 1253.2826 1294.4099"
y="-4240.4385"><tspan
- x="1212.1553 1253.2826 1294.4099"
- y="-4240.4385"
- sodipodi:role="line"
- id="tspan4105">310</tspan></text>
+ x="1212.1553 1253.2826 1294.4099"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan4105">310</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text4107"
x="6410.605 6451.7319"
y="-4129.4834"><tspan
- x="6410.605 6451.7319"
- y="-4129.4834"
- sodipodi:role="line"
- id="tspan4109">24</tspan></text>
+ x="6410.605 6451.7319"
+ y="-4129.4834"
+ sodipodi:role="line"
+ id="tspan4109">24</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text4111"
x="6390.041 6431.1685 6472.2954"
y="-4240.4385"><tspan
- x="6390.041 6431.1685 6472.2954"
- y="-4240.4385"
- sodipodi:role="line"
- id="tspan4113">337</tspan></text>
+ x="6390.041 6431.1685 6472.2954"
+ y="-4240.4385"
+ sodipodi:role="line"
+ id="tspan4113">337</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text4115"
x="6040.7559 6081.8833"
y="-4943.1504"><tspan
- x="6040.7559 6081.8833"
- y="-4943.1504"
- sodipodi:role="line"
- id="tspan4117">23</tspan></text>
+ x="6040.7559 6081.8833"
+ y="-4943.1504"
+ sodipodi:role="line"
+ id="tspan4117">23</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text4119"
x="6040.7559 6081.8833"
y="-5054.106"><tspan
- x="6040.7559 6081.8833"
- y="-5054.106"
- sodipodi:role="line"
- id="tspan4121">23</tspan></text>
+ x="6040.7559 6081.8833"
+ y="-5054.106"
+ sodipodi:role="line"
+ id="tspan4121">23</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text4123"
x="6410.605 6451.7324"
y="-4943.1504"><tspan
- x="6410.605 6451.7324"
- y="-4943.1504"
- sodipodi:role="line"
- id="tspan4125">24</tspan></text>
+ x="6410.605 6451.7324"
+ y="-4943.1504"
+ sodipodi:role="line"
+ id="tspan4125">24</tspan></text>
<text
transform="scale(1,-1)"
style="font-variant:normal;font-weight:normal;font-size:73.96984863px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
id="text4127"
x="6410.605 6451.7324"
y="-5054.106"><tspan
- x="6410.605 6451.7324"
- y="-5054.106"
- sodipodi:role="line"
- id="tspan4129">24</tspan></text>
+ x="6410.605 6451.7324"
+ y="-5054.106"
+ sodipodi:role="line"
+ id="tspan4129">24</tspan></text>
</g></svg> \ No newline at end of file
diff --git a/Documentation/media/uapi/v4l/vbi_hsync.svg b/Documentation/media/uapi/v4l/vbi_hsync.svg
index 4d5c0b4f146e..e17ff8314e7b 100644
--- a/Documentation/media/uapi/v4l/vbi_hsync.svg
+++ b/Documentation/media/uapi/v4l/vbi_hsync.svg
@@ -42,27 +42,27 @@
fit-margin-right="0"
fit-margin-bottom="0" /><metadata
id="metadata8"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
id="defs6"><clipPath
clipPathUnits="userSpaceOnUse"
id="clipPath30"><path
- d="m 0,0 0,1163 1544,0 L 1544,0 0,0 Z m 187.184,836.05 0,-19.278 48.517,0 -38.556,9.639 38.556,9.639 -48.517,0 z m 689.189,-19.278 0,19.278 -48.516,0 38.556,-9.639 -38.556,-9.639 48.516,0 z"
- id="path32"
- inkscape:connector-curvature="0"
- style="clip-rule:evenodd" /></clipPath><clipPath
+ d="m 0,0 0,1163 1544,0 L 1544,0 0,0 Z m 187.184,836.05 0,-19.278 48.517,0 -38.556,9.639 38.556,9.639 -48.517,0 z m 689.189,-19.278 0,19.278 -48.516,0 38.556,-9.639 -38.556,-9.639 48.516,0 z"
+ id="path32"
+ inkscape:connector-curvature="0"
+ style="clip-rule:evenodd" /></clipPath><clipPath
clipPathUnits="userSpaceOnUse"
id="clipPath52"><path
- d="m 0,0 0,1163 1544,0 L 1544,0 0,0 Z m 804.08,79.3887 0,19.2778 -48.516,0 38.556,-9.6389 -38.556,-9.6389 48.516,0 z m -703.647,19.2778 0,-19.2778 48.517,0 -38.556,9.6389 38.556,9.6389 -48.517,0 z"
- id="path54"
- inkscape:connector-curvature="0"
- style="clip-rule:evenodd" /></clipPath><clipPath
+ d="m 0,0 0,1163 1544,0 L 1544,0 0,0 Z m 804.08,79.3887 0,19.2778 -48.516,0 38.556,-9.6389 -38.556,-9.6389 48.516,0 z m -703.647,19.2778 0,-19.2778 48.517,0 -38.556,9.6389 38.556,9.6389 -48.517,0 z"
+ id="path54"
+ inkscape:connector-curvature="0"
+ style="clip-rule:evenodd" /></clipPath><clipPath
clipPathUnits="userSpaceOnUse"
id="clipPath94"><path
- d="m 0,0 0,1163 1544,0 L 1544,0 0,0 Z m 471.535,195.057 0,19.278 -48.516,0 38.555,-9.639 -38.555,-9.639 48.516,0 z m -284.351,19.278 0,-19.278 48.517,0 -38.556,9.639 38.556,9.639 -48.517,0 z"
- id="path96"
- inkscape:connector-curvature="0"
- style="clip-rule:evenodd" /></clipPath></defs><g
+ d="m 0,0 0,1163 1544,0 L 1544,0 0,0 Z m 471.535,195.057 0,19.278 -48.516,0 38.555,-9.639 -38.555,-9.639 48.516,0 z m -284.351,19.278 0,-19.278 48.517,0 -38.556,9.639 38.556,9.639 -48.517,0 z"
+ id="path96"
+ inkscape:connector-curvature="0"
+ style="clip-rule:evenodd" /></clipPath></defs><g
id="g10"
inkscape:groupmode="layer"
inkscape:label="vbi_hsync"
@@ -70,69 +70,69 @@
id="g14"
transform="matrix(0.36030235,0,0,0.36030235,-0.75498483,-1.0743684)"
style=""><path
- inkscape:connector-curvature="0"
- id="path16"
- style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="M 32.9604,580.617 4.04346,493.866" /><path
- inkscape:connector-curvature="0"
- id="path18"
- style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 192.004,855.328 0,-665.091" /><path
- inkscape:connector-curvature="0"
- id="path20"
- style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 466.715,392.656 0,-202.419" /><path
- inkscape:connector-curvature="0"
- id="path22"
- style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 799.261,508.324 0,-433.7549" /><path
- inkscape:connector-curvature="0"
- id="path24"
- style="fill:none;stroke:#000000;stroke-width:4.81949997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 857.095,537.241 231.335,0" /></g><g
+ inkscape:connector-curvature="0"
+ id="path16"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="M 32.9604,580.617 4.04346,493.866" /><path
+ inkscape:connector-curvature="0"
+ id="path18"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 192.004,855.328 0,-665.091" /><path
+ inkscape:connector-curvature="0"
+ id="path20"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 466.715,392.656 0,-202.419" /><path
+ inkscape:connector-curvature="0"
+ id="path22"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 799.261,508.324 0,-433.7549" /><path
+ inkscape:connector-curvature="0"
+ id="path24"
+ style="fill:none;stroke:#000000;stroke-width:4.81949997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 857.095,537.241 231.335,0" /></g><g
id="g26"
transform="matrix(0.36030235,0,0,0.36030235,-0.75498483,-1.0743684)"
style=""><g
- clip-path="url(#clipPath30)"
- id="g28"
- style=""><path
- inkscape:connector-curvature="0"
- id="path34"
- style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 871.553,826.411 -679.549,0" /></g></g><g
+ clip-path="url(#clipPath30)"
+ id="g28"
+ style=""><path
+ inkscape:connector-curvature="0"
+ id="path34"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 871.553,826.411 -679.549,0" /></g></g><g
id="g36"
transform="matrix(0.36030235,0,0,0.36030235,-0.75498483,-1.0743684)"
style=""><path
- inkscape:connector-curvature="0"
- id="path38"
- style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 827.857,816.772 38.556,9.639 -38.556,9.639 0,-19.278" /><path
- inkscape:connector-curvature="0"
- id="path40"
- style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 827.857,816.772 38.556,9.639 -38.556,9.639 0,-19.278 z" /><path
- inkscape:connector-curvature="0"
- id="path42"
- style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 235.701,836.05 -38.556,-9.639 38.556,-9.639 0,19.278" /><path
- inkscape:connector-curvature="0"
- id="path44"
- style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 235.701,836.05 -38.556,-9.639 38.556,-9.639 0,19.278 z" /><path
- inkscape:connector-curvature="0"
- id="path46"
- style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 1073.97,493.866 28.92,86.751" /></g><g
+ inkscape:connector-curvature="0"
+ id="path38"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 827.857,816.772 38.556,9.639 -38.556,9.639 0,-19.278" /><path
+ inkscape:connector-curvature="0"
+ id="path40"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 827.857,816.772 38.556,9.639 -38.556,9.639 0,-19.278 z" /><path
+ inkscape:connector-curvature="0"
+ id="path42"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
+ d="m 235.701,836.05 -38.556,-9.639 38.556,-9.639 0,19.278" /><path
+ inkscape:connector-curvature="0"
+ id="path44"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 235.701,836.05 -38.556,-9.639 38.556,-9.639 0,19.278 z" /><path
+ inkscape:connector-curvature="0"
+ id="path46"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 1073.97,493.866 28.92,86.751" /></g><g
id="g48"
transform="matrix(0.36030235,0,0,0.36030235,-0.75498483,-1.0743684)"
style=""><g
- clip-path="url(#clipPath52)"
- id="g50"
- style=""><path
- inkscape:connector-curvature="0"
- id="path56"
- style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 105.253,89.0276 694.008,0" /></g></g><path
+ clip-path="url(#clipPath52)"
+ id="g50"
+ style=""><path
+ inkscape:connector-curvature="0"
+ id="path56"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 105.253,89.0276 694.008,0" /></g></g><path
d="m 52.91205,34.475403 -13.891817,-3.472918 13.891817,-3.472918 0,6.945836"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
id="path60"
@@ -196,13 +196,13 @@
id="g90"
transform="matrix(0.36030235,0,0,0.36030235,-0.75498483,-1.0743684)"
style=""><g
- clip-path="url(#clipPath94)"
- id="g92"
- style=""><path
- inkscape:connector-curvature="0"
- id="path98"
- style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
- d="m 192.004,204.696 274.711,0" /></g></g><path
+ clip-path="url(#clipPath94)"
+ id="g92"
+ style=""><path
+ inkscape:connector-curvature="0"
+ id="path98"
+ style="fill:none;stroke:#000000;stroke-width:2.40974998;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1"
+ d="m 192.004,204.696 274.711,0" /></g></g><path
d="m 84.168639,76.151036 -13.891817,-3.472955 13.891817,-3.472954 0,6.945909"
style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:none"
id="path102"
@@ -224,90 +224,90 @@
transform="scale(1,-1)"
x="438.29504"
y="-187.28558"><tspan
- id="tspan114"
- sodipodi:role="line"
- y="-187.28558"
- x="438.29504 452.19382 456.81979 468.40555 478.82443 489.24329 495.03619 506.62195 518.2077 528.62659 540.21234">Black Level</tspan><tspan
- id="tspan116"
- sodipodi:role="line"
- y="-83.096947"
- x="438.29504 452.19382 462.61267 474.19846 484.61731 490.41019 501.99597 513.58173 524.00061 535.58636">Sync Level</tspan><tspan
- id="tspan118"
- sodipodi:role="line"
- y="-395.66284"
- x="438.29504 457.96585 469.55164 474.17761 479.97049 491.55627 497.34915 508.93494 520.52069 530.93958 542.52533">White Level</tspan></text>
+ id="tspan114"
+ sodipodi:role="line"
+ y="-187.28558"
+ x="438.29504 452.19382 456.81979 468.40555 478.82443 489.24329 495.03619 506.62195 518.2077 528.62659 540.21234">Black Level</tspan><tspan
+ id="tspan116"
+ sodipodi:role="line"
+ y="-83.096947"
+ x="438.29504 452.19382 462.61267 474.19846 484.61731 490.41019 501.99597 513.58173 524.00061 535.58636">Sync Level</tspan><tspan
+ id="tspan118"
+ sodipodi:role="line"
+ y="-395.66284"
+ x="438.29504 457.96585 469.55164 474.17761 479.97049 491.55627 497.34915 508.93494 520.52069 530.93958 542.52533">White Level</tspan></text>
<text
id="text120"
style="font-variant:normal;font-weight:normal;font-size:20.83772659px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"
x="159.88258"
y="-270.63647"><tspan
- id="tspan122"
- sodipodi:role="line"
- y="-270.63647"
- x="159.88258 172.61443 179.55339 186.49236 198.07812 209.66391">offset</tspan></text>
+ id="tspan122"
+ sodipodi:role="line"
+ y="-270.63647"
+ x="159.88258 172.61443 179.55339 186.49236 198.07812 209.66391">offset</tspan></text>
<text
id="text124"
style="font-variant:normal;font-weight:normal;font-size:20.83772659px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"
x="46.973549"
y="-46.630745"><tspan
- id="tspan126"
- sodipodi:role="line"
- y="-46.630745"
- x="46.973549 58.559322 63.185299 74.771072 86.35685 92.149734 102.5686 112.98746 124.57324 134.9921 146.57788 153.51685 159.30972 165.10262 176.68839 188.27417 192.90015 203.319">Line synchr. pulse</tspan><tspan
- id="tspan128"
- sodipodi:role="line"
- y="-4.9552913"
- x="100.80776 112.39354 117.01952 128.60529 140.19107 145.98395 157.56973 162.19569 173.78148 185.36726 195.78612 200.41209 211.99788">Line blanking</tspan></text>
+ id="tspan126"
+ sodipodi:role="line"
+ y="-46.630745"
+ x="46.973549 58.559322 63.185299 74.771072 86.35685 92.149734 102.5686 112.98746 124.57324 134.9921 146.57788 153.51685 159.30972 165.10262 176.68839 188.27417 192.90015 203.319">Line synchr. pulse</tspan><tspan
+ id="tspan128"
+ sodipodi:role="line"
+ y="-4.9552913"
+ x="100.80776 112.39354 117.01952 128.60529 140.19107 145.98395 157.56973 162.19569 173.78148 185.36726 195.78612 200.41209 211.99788">Line blanking</tspan></text>
<text
id="text3473"
style="font-variant:normal;font-weight:normal;font-size:20.83772659px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"
x="46.973549 58.559322 63.185299 74.771072 86.35685 92.149734 102.5686 112.98746 124.57324 134.9921 146.57788 153.51685 159.30972 165.10262 176.68839 188.27417 192.90015 203.319"
y="-46.630745"><tspan
- id="tspan3475"
- sodipodi:role="line"
- y="-46.630745"
- x="46.973549 58.559322 63.185299 74.771072 86.35685 92.149734 102.5686 112.98746 124.57324 134.9921 146.57788 153.51685 159.30972 165.10262 176.68839 188.27417 192.90015 203.319">Line synchr. pulse</tspan></text>
+ id="tspan3475"
+ sodipodi:role="line"
+ y="-46.630745"
+ x="46.973549 58.559322 63.185299 74.771072 86.35685 92.149734 102.5686 112.98746 124.57324 134.9921 146.57788 153.51685 159.30972 165.10262 176.68839 188.27417 192.90015 203.319">Line synchr. pulse</tspan></text>
<text
id="text3477"
style="font-variant:normal;font-weight:normal;font-size:20.83772659px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"
x="100.80776 112.39354 117.01952 128.60529 140.19107 145.98395 157.56973 162.19569 173.78148 185.36726 195.78612 200.41209 211.99788"
y="-4.9552913"><tspan
- id="tspan3479"
- sodipodi:role="line"
- y="-4.9552913"
- x="100.80776 112.39354 117.01952 128.60529 140.19107 145.98395 157.56973 162.19569 173.78148 185.36726 195.78612 200.41209 211.99788">Line blanking</tspan></text>
+ id="tspan3479"
+ sodipodi:role="line"
+ y="-4.9552913"
+ x="100.80776 112.39354 117.01952 128.60529 140.19107 145.98395 157.56973 162.19569 173.78148 185.36726 195.78612 200.41209 211.99788">Line blanking</tspan></text>
<text
id="text3607"
style="font-variant:normal;font-weight:normal;font-size:20.83772659px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"
x="438.29504 452.19382 456.81979 468.40555 478.82443 489.24329 495.03619 506.62195 518.2077 528.62659 540.21234"
y="-187.28558"><tspan
- id="tspan3609"
- sodipodi:role="line"
- y="-187.28558"
- x="438.29504 452.19382 456.81979 468.40555 478.82443 489.24329 495.03619 506.62195 518.2077 528.62659 540.21234">Black Level</tspan></text>
+ id="tspan3609"
+ sodipodi:role="line"
+ y="-187.28558"
+ x="438.29504 452.19382 456.81979 468.40555 478.82443 489.24329 495.03619 506.62195 518.2077 528.62659 540.21234">Black Level</tspan></text>
<text
id="text3611"
style="font-variant:normal;font-weight:normal;font-size:20.83772659px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"
x="438.29504 452.19382 462.61267 474.19846 484.61731 490.41019 501.99597 513.58173 524.00061 535.58636"
y="-83.096947"><tspan
- id="tspan3613"
- sodipodi:role="line"
- y="-83.096947"
- x="438.29504 452.19382 462.61267 474.19846 484.61731 490.41019 501.99597 513.58173 524.00061 535.58636">Sync Level</tspan></text>
+ id="tspan3613"
+ sodipodi:role="line"
+ y="-83.096947"
+ x="438.29504 452.19382 462.61267 474.19846 484.61731 490.41019 501.99597 513.58173 524.00061 535.58636">Sync Level</tspan></text>
<text
id="text3615"
style="font-variant:normal;font-weight:normal;font-size:20.83772659px;font-family:sans-serif;-inkscape-font-specification:sans-serif;writing-mode:lr-tb;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;font-style:normal;font-stretch:normal;"
transform="scale(1,-1)"
x="438.29504 457.96585 469.55164 474.17761 479.97049 491.55627 497.34915 508.93494 520.52069 530.93958 542.52533"
y="-395.66284"><tspan
- id="tspan3617"
- sodipodi:role="line"
- y="-395.66284"
- x="438.29504 457.96585 469.55164 474.17761 479.97049 491.55627 497.34915 508.93494 520.52069 530.93958 542.52533">White Level</tspan></text>
+ id="tspan3617"
+ sodipodi:role="line"
+ y="-395.66284"
+ x="438.29504 457.96585 469.55164 474.17761 479.97049 491.55627 497.34915 508.93494 520.52069 530.93958 542.52533">White Level</tspan></text>
</g></svg> \ No newline at end of file
diff --git a/Documentation/media/uapi/v4l/vidioc-create-bufs.rst b/Documentation/media/uapi/v4l/vidioc-create-bufs.rst
index aaca12fca06e..a39e18d69511 100644
--- a/Documentation/media/uapi/v4l/vidioc-create-bufs.rst
+++ b/Documentation/media/uapi/v4l/vidioc-create-bufs.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_create_buffers`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-cropcap.rst b/Documentation/media/uapi/v4l/vidioc-cropcap.rst
index 0f80d5ca2643..a65dbec6b20b 100644
--- a/Documentation/media/uapi/v4l/vidioc-cropcap.rst
+++ b/Documentation/media/uapi/v4l/vidioc-cropcap.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_cropcap`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst b/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst
index e1e5507e79ff..7709852282c2 100644
--- a/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst
+++ b/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_dbg_chip_info`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst b/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst
index 5960a6547f41..f4e8dd5f7889 100644
--- a/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst
+++ b/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_dbg_register`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-dqevent.rst b/Documentation/media/uapi/v4l/vidioc-dqevent.rst
index 8d663a73818e..cb3565f36793 100644
--- a/Documentation/media/uapi/v4l/vidioc-dqevent.rst
+++ b/Documentation/media/uapi/v4l/vidioc-dqevent.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_event`.
Description
@@ -38,7 +39,7 @@ exceptions which the application may get by e.g. using the select system
call.
-.. tabularcolumns:: |p{3.0cm}|p{4.3cm}|p{2.5cm}|p{7.7cm}|
+.. tabularcolumns:: |p{3.0cm}|p{4.4cm}|p{2.4cm}|p{7.7cm}|
.. c:type:: v4l2_event
diff --git a/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst b/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst
index 424f3a1c7f56..63ead6b7a115 100644
--- a/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst
+++ b/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_dv_timings_cap`.
Description
@@ -97,7 +98,7 @@ that doesn't support them will return an ``EINVAL`` error code.
-.. tabularcolumns:: |p{1.0cm}|p{3.5cm}|p{3.5cm}|p{9.5cm}|
+.. tabularcolumns:: |p{1.0cm}|p{4.0cm}|p{3.5cm}|p{9.2cm}|
.. c:type:: v4l2_dv_timings_cap
diff --git a/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst b/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst
index ae20ee573757..5ae8c933b1b9 100644
--- a/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst
@@ -29,7 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
-
+ Pointer to struct :c:type:`v4l2_encoder_cmd`.
Description
===========
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst b/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst
index 3e9d0f69cc73..63dca65f49e4 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_enum_dv_timings`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst b/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst
index a2adaa4bd4dd..019c513df217 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_fmtdesc`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst b/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst
index 39492453f02d..fea7dc3c879d 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst
@@ -26,9 +26,8 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
- Pointer to a struct :c:type:`v4l2_frmivalenum`
- structure that contains a pixel format and size and receives a frame
- interval.
+ Pointer to struct :c:type:`v4l2_frmivalenum`
+ that contains a pixel format and size and receives a frame interval.
Description
@@ -124,6 +123,8 @@ application should zero out all members except for the *IN* fields.
.. c:type:: v4l2_frmivalenum
+.. tabularcolumns:: |p{1.8cm}|p{4.4cm}|p{2.4cm}|p{8.9cm}|
+
.. flat-table:: struct v4l2_frmivalenum
:header-rows: 0
:stub-columns: 0
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst b/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst
index 628f1aa66338..6de117f163e0 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst
@@ -26,7 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
- Pointer to a struct :c:type:`v4l2_frmsizeenum`
+ Pointer to struct :c:type:`v4l2_frmsizeenum`
that contains an index and pixel format and receives a frame width
and height.
@@ -140,6 +140,8 @@ application should zero out all members except for the *IN* fields.
.. c:type:: v4l2_frmsizeenum
+.. tabularcolumns:: |p{1.4cm}|p{5.9cm}|p{2.3cm}|p{8.0cm}|
+
.. flat-table:: struct v4l2_frmsizeenum
:header-rows: 0
:stub-columns: 0
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst b/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst
index 4e5f5e5bf632..195cf45f3c32 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_frequency_band`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-enumaudio.rst b/Documentation/media/uapi/v4l/vidioc-enumaudio.rst
index 74bc3ed0bdd8..8e5193e8696f 100644
--- a/Documentation/media/uapi/v4l/vidioc-enumaudio.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enumaudio.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_audio`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst b/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst
index 4470a1ece5cf..6d2b4f6e78b0 100644
--- a/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_audioout`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-enuminput.rst b/Documentation/media/uapi/v4l/vidioc-enuminput.rst
index 266e48ab237f..0350069a56c5 100644
--- a/Documentation/media/uapi/v4l/vidioc-enuminput.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enuminput.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_input`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-enumoutput.rst b/Documentation/media/uapi/v4l/vidioc-enumoutput.rst
index 93a2cf3b310c..697dcd186ae3 100644
--- a/Documentation/media/uapi/v4l/vidioc-enumoutput.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enumoutput.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_output`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-enumstd.rst b/Documentation/media/uapi/v4l/vidioc-enumstd.rst
index f2bdd45cfa0d..b7fda29f46a1 100644
--- a/Documentation/media/uapi/v4l/vidioc-enumstd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enumstd.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_standard`.
Description
@@ -224,12 +225,15 @@ support digital TV. See also the Linux DVB API at
#define V4L2_STD_ALL (V4L2_STD_525_60 |
V4L2_STD_625_50)
+
.. raw:: latex
- \begin{adjustbox}{width=\columnwidth}
+ \begingroup
+ \tiny
+ \setlength{\tabcolsep}{2pt}
.. NTSC/M PAL/M /N /B /D /H /I SECAM/B /D /K1 /L
-.. tabularcolumns:: |p{2.7cm}|p{2.6cm}|p{3.0cm}|p{3.2cm}|p{3.2cm}|p{2.2cm}|p{1.2cm}|p{3.2cm}|p{3.0cm}|p{2.0cm}|p{2.0cm}|p{2.0cm}|
+.. tabularcolumns:: |p{1.43cm}|p{1.38cm}|p{1.59cm}|p{1.7cm}|p{1.7cm}|p{1.17cm}|p{0.64cm}|p{1.71cm}|p{1.6cm}|p{1.07cm}|p{1.07cm}|p{1.07cm}|
.. _video-standards:
@@ -293,7 +297,7 @@ support digital TV. See also the Linux DVB API at
.. raw:: latex
- \end{adjustbox}\newline\newline
+ \endgroup
diff --git a/Documentation/media/uapi/v4l/vidioc-expbuf.rst b/Documentation/media/uapi/v4l/vidioc-expbuf.rst
index 246e48028d40..226e83eb28a9 100644
--- a/Documentation/media/uapi/v4l/vidioc-expbuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-expbuf.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_exportbuffer`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-g-audio.rst b/Documentation/media/uapi/v4l/vidioc-g-audio.rst
index 5b67e81a0db6..290851f99386 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-audio.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-audio.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_audio`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-g-audioout.rst b/Documentation/media/uapi/v4l/vidioc-g-audioout.rst
index d16ecbaddc59..1c98af33ee70 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-audioout.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-audioout.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_audioout`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-g-crop.rst b/Documentation/media/uapi/v4l/vidioc-g-crop.rst
index 13771ee3e94a..a6ed43ba9ca3 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-crop.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-crop.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_crop`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst b/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst
index d8a379182a34..299b9aabbac2 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_control`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst b/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
index e573c74138de..2696380626d4 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
@@ -35,6 +35,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_dv_timings`.
Description
@@ -208,7 +209,7 @@ EBUSY
- 0
- BT.656/1120 timings
-
+.. tabularcolumns:: |p{4.5cm}|p{12.8cm}|
.. _dv-bt-standards:
@@ -231,7 +232,7 @@ EBUSY
There are no horizontal syncs/porches at all in this format.
Total blanking timings must be set in hsync or vsync fields only.
-.. tabularcolumns:: |p{6.0cm}|p{11.5cm}|
+.. tabularcolumns:: |p{7.0cm}|p{10.5cm}|
.. _dv-bt-flags:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-edid.rst b/Documentation/media/uapi/v4l/vidioc-g-edid.rst
index a16a193a1cbf..acab90f06e5a 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-edid.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-edid.rst
@@ -36,6 +36,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_edid`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst b/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst
index 418e886fd44b..9dfe64fc21a4 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_enc_idx`.
Description
@@ -55,7 +56,7 @@ Currently this ioctl is only defined for MPEG-2 program streams and
video elementary streams.
-.. tabularcolumns:: |p{3.5cm}|p{5.6cm}|p{8.4cm}|
+.. tabularcolumns:: |p{3.8cm}|p{5.6cm}|p{8.1cm}|
.. c:type:: v4l2_enc_idx
diff --git a/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst b/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
index 5ab8d2ac27b9..2011c2b2ee67 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
@@ -34,6 +34,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_ext_controls`.
Description
@@ -180,7 +181,7 @@ still cause this situation.
``V4L2_CTRL_FLAG_HAS_PAYLOAD`` is set for this control.
-.. tabularcolumns:: |p{4.0cm}|p{2.0cm}|p{2.0cm}|p{8.5cm}|
+.. tabularcolumns:: |p{4.0cm}|p{2.2cm}|p{2.1cm}|p{8.2cm}|
.. c:type:: v4l2_ext_controls
diff --git a/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst b/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst
index 4a6a03d158ca..fc73bf0f6052 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_framebuffer`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
index b853e48312e2..3ead350e099f 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
@@ -31,6 +31,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_format`.
Description
@@ -87,7 +88,7 @@ The format as returned by :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>` must be identical
.. c:type:: v4l2_format
-.. tabularcolumns:: |p{1.2cm}|p{4.3cm}|p{3.0cm}|p{9.0cm}|
+.. tabularcolumns:: |p{1.2cm}|p{4.6cm}|p{3.0cm}|p{8.6cm}|
.. flat-table:: struct v4l2_format
:header-rows: 0
@@ -147,3 +148,9 @@ appropriately. The generic error codes are described at the
EINVAL
The struct :c:type:`v4l2_format` ``type`` field is
invalid or the requested buffer type not supported.
+
+EBUSY
+ The device is busy and cannot change the format. This could be
+ because or the device is streaming or buffers are allocated or
+ queued to the driver. Relevant for :ref:`VIDIOC_S_FMT
+ <VIDIOC_G_FMT>` only.
diff --git a/Documentation/media/uapi/v4l/vidioc-g-frequency.rst b/Documentation/media/uapi/v4l/vidioc-g-frequency.rst
index 46ab276f412b..c1cccb144660 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-frequency.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-frequency.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_frequency`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-g-input.rst b/Documentation/media/uapi/v4l/vidioc-g-input.rst
index 1364a918fbce..1dcef44eef02 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-input.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-input.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer an integer with input index.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst b/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst
index 8ba353067b33..a1773ea9543e 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_jpegcompression`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-g-modulator.rst b/Documentation/media/uapi/v4l/vidioc-g-modulator.rst
index 77d017eb3fcc..a47b6a15cfbe 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-modulator.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-modulator.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_modulator`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-g-output.rst b/Documentation/media/uapi/v4l/vidioc-g-output.rst
index 7750948fc61b..3e0093f66834 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-output.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-output.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to an integer with output index.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-g-parm.rst b/Documentation/media/uapi/v4l/vidioc-g-parm.rst
index 3b2e6e59a334..616a5ea3f8fa 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-parm.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-parm.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_streamparm`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-g-priority.rst b/Documentation/media/uapi/v4l/vidioc-g-priority.rst
index a763988f64e4..c28996b4a45c 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-priority.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-priority.rst
@@ -29,7 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
- Pointer to an enum v4l2_priority type.
+ Pointer to an enum :c:type:`v4l2_priority` type.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-g-selection.rst b/Documentation/media/uapi/v4l/vidioc-g-selection.rst
index c1ee86472918..f1d9df029e0d 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-selection.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-selection.rst
@@ -29,11 +29,8 @@ Arguments
``fd``
File descriptor returned by :ref:`open() <func-open>`.
-``request``
- VIDIOC_G_SELECTION, VIDIOC_S_SELECTION
-
``argp``
-
+ Pointer to struct :c:type:`v4l2_selection`.
Description
===========
diff --git a/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst b/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst
index d7e2b2fa8b88..a9633cae76c5 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_sliced_vbi_cap`.
Description
@@ -122,9 +123,9 @@ the sliced VBI API is unsupported or ``type`` is invalid.
.. raw:: latex
- \begin{adjustbox}{width=\columnwidth}
+ \scriptsize
-.. tabularcolumns:: |p{5.0cm}|p{1.4cm}|p{3.0cm}|p{2.5cm}|p{9.0cm}|
+.. tabularcolumns:: |p{3.5cm}|p{1.0cm}|p{2.0cm}|p{2.0cm}|p{8.0cm}|
.. _vbi-services:
@@ -180,7 +181,7 @@ the sliced VBI API is unsupported or ``type`` is invalid.
.. raw:: latex
- \end{adjustbox}\newline\newline
+ \normalsize
Return Value
diff --git a/Documentation/media/uapi/v4l/vidioc-g-std.rst b/Documentation/media/uapi/v4l/vidioc-g-std.rst
index cd856ad21a28..90791ab51a53 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-std.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-std.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to :c:type:`v4l2_std_id`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-g-tuner.rst b/Documentation/media/uapi/v4l/vidioc-g-tuner.rst
index 57c79fa43866..acdd15901a51 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-tuner.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-tuner.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_tuner`.
Description
@@ -392,22 +393,25 @@ To change the radio frequency the
.. raw:: latex
- \begin{adjustbox}{width=\columnwidth}
+ \scriptsize
+
+.. tabularcolumns:: |p{1.5cm}|p{1.5cm}|p{2.9cm}|p{2.9cm}|p{2.9cm}|p{2.9cm}|
.. _tuner-matrix:
.. flat-table:: Tuner Audio Matrix
:header-rows: 2
:stub-columns: 0
+ :widths: 7 7 14 14 14 14
* -
- - :cspan:`5` Selected ``V4L2_TUNER_MODE_``
+ - :cspan:`4` Selected ``V4L2_TUNER_MODE_``
* - Received ``V4L2_TUNER_SUB_``
- ``MONO``
- ``STEREO``
- ``LANG1``
- ``LANG2 = SAP``
- - ``LANG1_LANG2``\ [#f1]_
+ - ``LANG1_LANG2``\ [#f1]_
* - ``MONO``
- Mono
- Mono/Mono
@@ -434,14 +438,14 @@ To change the radio frequency the
- L+R/SAP (preferred) or L/R or L+R/L+R
* - ``LANG1 | LANG2``
- Language 1
- - Lang1/Lang2 (deprecated [#f2]_) or Lang1/Lang1
+ - Lang1/Lang2 (deprecated\ [#f2]_) or Lang1/Lang1
- Language 1
- Language 2
- Lang1/Lang2 (preferred) or Lang1/Lang1
.. raw:: latex
- \end{adjustbox}\newline\newline
+ \normalsize
Return Value
============
diff --git a/Documentation/media/uapi/v4l/vidioc-overlay.rst b/Documentation/media/uapi/v4l/vidioc-overlay.rst
index cd7b62ebc53b..1383e3db25fc 100644
--- a/Documentation/media/uapi/v4l/vidioc-overlay.rst
+++ b/Documentation/media/uapi/v4l/vidioc-overlay.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to an integer.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst b/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst
index bdcfd9fe550d..70687a86ae38 100644
--- a/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_buffer`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-qbuf.rst b/Documentation/media/uapi/v4l/vidioc-qbuf.rst
index 1f3612637200..9e448a4aa3aa 100644
--- a/Documentation/media/uapi/v4l/vidioc-qbuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-qbuf.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_buffer`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst b/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst
index 0d16853b1b51..6c82eafd28bb 100644
--- a/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst
+++ b/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_dv_timings`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-querybuf.rst b/Documentation/media/uapi/v4l/vidioc-querybuf.rst
index 0bdc8e0abddc..dd54747fabc9 100644
--- a/Documentation/media/uapi/v4l/vidioc-querybuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-querybuf.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_buffer`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-querycap.rst b/Documentation/media/uapi/v4l/vidioc-querycap.rst
index 12e0d9a63cd8..66fb1b3d6e6e 100644
--- a/Documentation/media/uapi/v4l/vidioc-querycap.rst
+++ b/Documentation/media/uapi/v4l/vidioc-querycap.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_capability`.
Description
@@ -91,12 +92,13 @@ specification the ioctl returns an ``EINVAL`` error code.
stack from a newer kernel.
The version number is formatted using the ``KERNEL_VERSION()``
- macro:
+ macro. For example if the media stack corresponds to the V4L2
+ version shipped with Kernel 4.14, it would be equivalent to:
* - :cspan:`2`
``#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))``
- ``__u32 version = KERNEL_VERSION(0, 8, 1);``
+ ``__u32 version = KERNEL_VERSION(4, 14, 0);``
``printf ("Version: %u.%u.%u\\n",``
@@ -131,7 +133,7 @@ specification the ioctl returns an ``EINVAL`` error code.
-.. tabularcolumns:: |p{6cm}|p{2.2cm}|p{8.8cm}|
+.. tabularcolumns:: |p{6.1cm}|p{2.2cm}|p{8.7cm}|
.. _device-capabilities:
diff --git a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
index 41c5744a1239..5bd26e8c9a1a 100644
--- a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
+++ b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
@@ -32,6 +32,8 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_queryctl`, :c:type:`v4l2_query_ext_ctrl`
+ or :c:type`v4l2_querymenu` (depending on the ioctl).
Description
@@ -274,7 +276,7 @@ See also the examples in :ref:`control`.
-.. tabularcolumns:: |p{1.2cm}|p{0.6cm}|p{1.6cm}|p{13.5cm}|
+.. tabularcolumns:: |p{1.2cm}|p{1.0cm}|p{1.7cm}|p{13.0cm}|
.. _v4l2-querymenu:
diff --git a/Documentation/media/uapi/v4l/vidioc-querystd.rst b/Documentation/media/uapi/v4l/vidioc-querystd.rst
index 3ef9ab37f582..cf40bca19b9f 100644
--- a/Documentation/media/uapi/v4l/vidioc-querystd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-querystd.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to :c:type:`v4l2_std_id`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-reqbufs.rst b/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
index a4180d576ee5..316f52c8a310 100644
--- a/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
+++ b/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
@@ -26,7 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
-
+ Pointer to struct :c:type:`v4l2_requestbuffers`.
Description
===========
diff --git a/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst b/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst
index 5672ca48d2bd..b318cb8e1df3 100644
--- a/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst
+++ b/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_hw_freq_seek`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-streamon.rst b/Documentation/media/uapi/v4l/vidioc-streamon.rst
index 972d5b3c74aa..e851a6961b78 100644
--- a/Documentation/media/uapi/v4l/vidioc-streamon.rst
+++ b/Documentation/media/uapi/v4l/vidioc-streamon.rst
@@ -29,7 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
-
+ Pointer to an integer.
Description
===========
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst
index 1a02c935c8b5..1bfe3865dcc2 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_subdev_frame_interval_enum`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst
index 746c24ed97a0..33fdc3ac9316 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_subdev_frame_size_enum`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst b/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst
index 0dfee3829ee2..4e4291798e4b 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst
@@ -26,6 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_subdev_mbus_code_enum`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst
index 000e8fcd3f25..69b2ae8e7c15 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_subdev_crop`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst
index b352456dfe2c..81c5d331af9a 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_subdev_format`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst
index 46159dcfce30..5af0a7179941 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_subdev_frame_interval`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst
index 071d9c033db6..b1d3dbbef42a 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst
@@ -29,6 +29,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_subdev_selection`.
Description
diff --git a/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst b/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst
index e4a51431032c..b521efa53ceb 100644
--- a/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst
@@ -30,6 +30,7 @@ Arguments
File descriptor returned by :ref:`open() <func-open>`.
``argp``
+ Pointer to struct :c:type:`v4l2_event_subscription`.
Description
@@ -39,7 +40,7 @@ Subscribe or unsubscribe V4L2 event. Subscribed events are dequeued by
using the :ref:`VIDIOC_DQEVENT` ioctl.
-.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+.. tabularcolumns:: |p{4.6cm}|p{4.4cm}|p{8.7cm}|
.. c:type:: v4l2_event_subscription
@@ -72,7 +73,7 @@ using the :ref:`VIDIOC_DQEVENT` ioctl.
-.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+.. tabularcolumns:: |p{6.8cm}|p{2.2cm}|p{8.5cm}|
.. _event-flags:
diff --git a/Documentation/media/v4l-drivers/au0828-cardlist.rst b/Documentation/media/v4l-drivers/au0828-cardlist.rst
index 82d2567bc7c1..bb87b7b36a83 100644
--- a/Documentation/media/v4l-drivers/au0828-cardlist.rst
+++ b/Documentation/media/v4l-drivers/au0828-cardlist.rst
@@ -1,13 +1,37 @@
AU0828 cards list
=================
-=========== ========================== =======================================================================================================================
-Card number Card name USB IDs
-=========== ========================== =======================================================================================================================
-0 Unknown board
-1 Hauppauge HVR950Q 2040:7200, 2040:7210, 2040:7217, 2040:721b, 2040:721e, 2040:721f, 2040:7280, 0fd9:0008, 2040:7260, 2040:7213, 2040:7270
-2 Hauppauge HVR850 2040:7240
-3 DViCO FusionHDTV USB 0fe9:d620
-4 Hauppauge HVR950Q rev xxF8 2040:7201, 2040:7211, 2040:7281
-5 Hauppauge Woodbury 05e1:0480, 2040:8200
-=========== ========================== =======================================================================================================================
+.. tabularcolumns:: |p{1.4cm}|p{6.5cm}|p{10.0cm}|
+
+.. flat-table::
+ :header-rows: 1
+ :widths: 2 19 18
+ :stub-columns: 0
+
+ * - Card number
+ - Card name
+ - USB IDs
+
+ * - 0
+ - Unknown board
+ -
+
+ * - 1
+ - Hauppauge HVR950Q
+ - 2040:7200, 2040:7210, 2040:7217, 2040:721b, 2040:721e, 2040:721f, 2040:7280, 0fd9:0008, 2040:7260, 2040:7213, 2040:7270
+
+ * - 2
+ - Hauppauge HVR850
+ - 2040:7240
+
+ * - 3
+ - DViCO FusionHDTV USB
+ - 0fe9:d620
+
+ * - 4
+ - Hauppauge HVR950Q rev xxF8
+ - 2040:7201, 2040:7211, 2040:7281
+
+ * - 5
+ - Hauppauge Woodbury
+ - 05e1:0480, 2040:8200
diff --git a/Documentation/media/v4l-drivers/bttv-cardlist.rst b/Documentation/media/v4l-drivers/bttv-cardlist.rst
index 28a01cd6cf2e..8da27b924e01 100644
--- a/Documentation/media/v4l-drivers/bttv-cardlist.rst
+++ b/Documentation/media/v4l-drivers/bttv-cardlist.rst
@@ -1,174 +1,681 @@
BTTV cards list
===============
-=========== ================================================================================= ==============================================================================================================================================================================
-Card number Card name PCI IDs
-=========== ================================================================================= ==============================================================================================================================================================================
-0 *** UNKNOWN/GENERIC ***
-1 MIRO PCTV
-2 Hauppauge (bt848)
-3 STB, Gateway P/N 6000699 (bt848)
-4 Intel Create and Share PCI/ Smart Video Recorder III
-5 Diamond DTV2000
-6 AVerMedia TVPhone
-7 MATRIX-Vision MV-Delta
-8 Lifeview FlyVideo II (Bt848) LR26 / MAXI TV Video PCI2 LR26
-9 IMS/IXmicro TurboTV
-10 Hauppauge (bt878) 0070:13eb, 0070:3900, 2636:10b4
-11 MIRO PCTV pro
-12 ADS Technologies Channel Surfer TV (bt848)
-13 AVerMedia TVCapture 98 1461:0002, 1461:0004, 1461:0300
-14 Aimslab Video Highway Xtreme (VHX)
-15 Zoltrix TV-Max a1a0:a0fc
-16 Prolink Pixelview PlayTV (bt878)
-17 Leadtek WinView 601
-18 AVEC Intercapture
-19 Lifeview FlyVideo II EZ /FlyKit LR38 Bt848 (capture only)
-20 CEI Raffles Card
-21 Lifeview FlyVideo 98/ Lucky Star Image World ConferenceTV LR50
-22 Askey CPH050/ Phoebe Tv Master + FM 14ff:3002
-23 Modular Technology MM201/MM202/MM205/MM210/MM215 PCTV, bt878 14c7:0101
-24 Askey CPH05X/06X (bt878) [many vendors] 144f:3002, 144f:3005, 144f:5000, 14ff:3000
-25 Terratec TerraTV+ Version 1.0 (Bt848)/ Terra TValue Version 1.0/ Vobis TV-Boostar
-26 Hauppauge WinCam newer (bt878)
-27 Lifeview FlyVideo 98/ MAXI TV Video PCI2 LR50
-28 Terratec TerraTV+ Version 1.1 (bt878) 153b:1127, 1852:1852
-29 Imagenation PXC200 1295:200a
-30 Lifeview FlyVideo 98 LR50 1f7f:1850
-31 Formac iProTV, Formac ProTV I (bt848)
-32 Intel Create and Share PCI/ Smart Video Recorder III
-33 Terratec TerraTValue Version Bt878 153b:1117, 153b:1118, 153b:1119, 153b:111a, 153b:1134, 153b:5018
-34 Leadtek WinFast 2000/ WinFast 2000 XP 107d:6606, 107d:6609, 6606:217d, f6ff:fff6
-35 Lifeview FlyVideo 98 LR50 / Chronos Video Shuttle II 1851:1850, 1851:a050
-36 Lifeview FlyVideo 98FM LR50 / Typhoon TView TV/FM Tuner 1852:1852
-37 Prolink PixelView PlayTV pro
-38 Askey CPH06X TView99 144f:3000, 144f:a005, a04f:a0fc
-39 Pinnacle PCTV Studio/Rave 11bd:0012, bd11:1200, bd11:ff00, 11bd:ff12
-40 STB TV PCI FM, Gateway P/N 6000704 (bt878), 3Dfx VoodooTV 100 10b4:2636, 10b4:2645, 121a:3060
-41 AVerMedia TVPhone 98 1461:0001, 1461:0003
-42 ProVideo PV951 aa0c:146c
-43 Little OnAir TV
-44 Sigma TVII-FM
-45 MATRIX-Vision MV-Delta 2
-46 Zoltrix Genie TV/FM 15b0:4000, 15b0:400a, 15b0:400d, 15b0:4010, 15b0:4016
-47 Terratec TV/Radio+ 153b:1123
-48 Askey CPH03x/ Dynalink Magic TView
-49 IODATA GV-BCTV3/PCI 10fc:4020
-50 Prolink PV-BT878P+4E / PixelView PlayTV PAK / Lenco MXTV-9578 CP
-51 Eagle Wireless Capricorn2 (bt878A)
-52 Pinnacle PCTV Studio Pro
-53 Typhoon TView RDS + FM Stereo / KNC1 TV Station RDS
-54 Lifeview FlyVideo 2000 /FlyVideo A2/ Lifetec LT 9415 TV [LR90]
-55 Askey CPH031/ BESTBUY Easy TV
-56 Lifeview FlyVideo 98FM LR50 a051:41a0
-57 GrandTec 'Grand Video Capture' (Bt848) 4344:4142
-58 Askey CPH060/ Phoebe TV Master Only (No FM)
-59 Askey CPH03x TV Capturer
-60 Modular Technology MM100PCTV
-61 AG Electronics GMV1 15cb:0101
-62 Askey CPH061/ BESTBUY Easy TV (bt878)
-63 ATI TV-Wonder 1002:0001
-64 ATI TV-Wonder VE 1002:0003
-65 Lifeview FlyVideo 2000S LR90
-66 Terratec TValueRadio 153b:1135, 153b:ff3b
-67 IODATA GV-BCTV4/PCI 10fc:4050
-68 3Dfx VoodooTV FM (Euro) 10b4:2637
-69 Active Imaging AIMMS
-70 Prolink Pixelview PV-BT878P+ (Rev.4C,8E)
-71 Lifeview FlyVideo 98EZ (capture only) LR51 1851:1851
-72 Prolink Pixelview PV-BT878P+9B (PlayTV Pro rev.9B FM+NICAM) 1554:4011
-73 Sensoray 311/611 6000:0311, 6000:0611
-74 RemoteVision MX (RV605)
-75 Powercolor MTV878/ MTV878R/ MTV878F
-76 Canopus WinDVR PCI (COMPAQ Presario 3524JP, 5112JP) 0e11:0079
-77 GrandTec Multi Capture Card (Bt878)
-78 Jetway TV/Capture JW-TV878-FBK, Kworld KW-TV878RF 0a01:17de
-79 DSP Design TCVIDEO
-80 Hauppauge WinTV PVR 0070:4500
-81 IODATA GV-BCTV5/PCI 10fc:4070, 10fc:d018
-82 Osprey 100/150 (878) 0070:ff00
-83 Osprey 100/150 (848)
-84 Osprey 101 (848)
-85 Osprey 101/151
-86 Osprey 101/151 w/ svid
-87 Osprey 200/201/250/251
-88 Osprey 200/250 0070:ff01
-89 Osprey 210/220/230
-90 Osprey 500 0070:ff02
-91 Osprey 540 0070:ff04
-92 Osprey 2000 0070:ff03
-93 IDS Eagle
-94 Pinnacle PCTV Sat 11bd:001c
-95 Formac ProTV II (bt878)
-96 MachTV
-97 Euresys Picolo
-98 ProVideo PV150 aa00:1460, aa01:1461, aa02:1462, aa03:1463, aa04:1464, aa05:1465, aa06:1466, aa07:1467
-99 AD-TVK503
-100 Hercules Smart TV Stereo
-101 Pace TV & Radio Card
-102 IVC-200 0000:a155, 0001:a155, 0002:a155, 0003:a155, 0100:a155, 0101:a155, 0102:a155, 0103:a155, 0800:a155, 0801:a155, 0802:a155, 0803:a155
-103 Grand X-Guard / Trust 814PCI 0304:0102
-104 Nebula Electronics DigiTV 0071:0101
-105 ProVideo PV143 aa00:1430, aa00:1431, aa00:1432, aa00:1433, aa03:1433
-106 PHYTEC VD-009-X1 VD-011 MiniDIN (bt878)
-107 PHYTEC VD-009-X1 VD-011 Combi (bt878)
-108 PHYTEC VD-009 MiniDIN (bt878)
-109 PHYTEC VD-009 Combi (bt878)
-110 IVC-100 ff00:a132
-111 IVC-120G ff00:a182, ff01:a182, ff02:a182, ff03:a182, ff04:a182, ff05:a182, ff06:a182, ff07:a182, ff08:a182, ff09:a182, ff0a:a182, ff0b:a182, ff0c:a182, ff0d:a182, ff0e:a182, ff0f:a182
-112 pcHDTV HD-2000 TV 7063:2000
-113 Twinhan DST + clones 11bd:0026, 1822:0001, 270f:fc00, 1822:0026
-114 Winfast VC100 107d:6607
-115 Teppro TEV-560/InterVision IV-560
-116 SIMUS GVC1100 aa6a:82b2
-117 NGS NGSTV+
-118 LMLBT4
-119 Tekram M205 PRO
-120 Conceptronic CONTVFMi
-121 Euresys Picolo Tetra 1805:0105, 1805:0106, 1805:0107, 1805:0108
-122 Spirit TV Tuner
-123 AVerMedia AVerTV DVB-T 771 1461:0771
-124 AverMedia AverTV DVB-T 761 1461:0761
-125 MATRIX Vision Sigma-SQ
-126 MATRIX Vision Sigma-SLC
-127 APAC Viewcomp 878(AMAX)
-128 DViCO FusionHDTV DVB-T Lite 18ac:db10, 18ac:db11
-129 V-Gear MyVCD
-130 Super TV Tuner
-131 Tibet Systems 'Progress DVR' CS16
-132 Kodicom 4400R (master)
-133 Kodicom 4400R (slave)
-134 Adlink RTV24
-135 DViCO FusionHDTV 5 Lite 18ac:d500
-136 Acorp Y878F 9511:1540
-137 Conceptronic CTVFMi v2 036e:109e
-138 Prolink Pixelview PV-BT878P+ (Rev.2E)
-139 Prolink PixelView PlayTV MPEG2 PV-M4900
-140 Osprey 440 0070:ff07
-141 Asound Skyeye PCTV
-142 Sabrent TV-FM (bttv version)
-143 Hauppauge ImpactVCB (bt878) 0070:13eb
-144 MagicTV
-145 SSAI Security Video Interface 4149:5353
-146 SSAI Ultrasound Video Interface 414a:5353
-147 VoodooTV 200 (USA) 121a:3000
-148 DViCO FusionHDTV 2 dbc0:d200
-149 Typhoon TV-Tuner PCI (50684)
-150 Geovision GV-600 008a:763c
-151 Kozumi KTV-01C
-152 Encore ENL TV-FM-2 1000:1801
-153 PHYTEC VD-012 (bt878)
-154 PHYTEC VD-012-X1 (bt878)
-155 PHYTEC VD-012-X2 (bt878)
-156 IVCE-8784 0000:f050, 0001:f050, 0002:f050, 0003:f050
-157 Geovision GV-800(S) (master) 800a:763d
-158 Geovision GV-800(S) (slave) 800b:763d, 800c:763d, 800d:763d
-159 ProVideo PV183 1830:1540, 1831:1540, 1832:1540, 1833:1540, 1834:1540, 1835:1540, 1836:1540, 1837:1540
-160 Tongwei Video Technology TD-3116 f200:3116
-161 Aposonic W-DVR 0279:0228
-162 Adlink MPG24
-163 Bt848 Capture 14MHz
-164 CyberVision CV06 (SV)
-165 Kworld V-Stream Xpert TV PVR878
-166 PCI-8604PW
-=========== ================================================================================= ==============================================================================================================================================================================
+.. tabularcolumns:: |p{1.4cm}|p{11.1cm}|p{4.2cm}|
+
+.. flat-table::
+ :header-rows: 1
+ :widths: 2 19 18
+ :stub-columns: 0
+
+ * - Card number
+ - Card name
+ - PCI IDs
+
+ * - 0
+ - *** UNKNOWN/GENERIC ***
+ -
+
+ * - 1
+ - MIRO PCTV
+ -
+
+ * - 2
+ - Hauppauge (bt848)
+ -
+
+ * - 3
+ - STB, Gateway P/N 6000699 (bt848)
+ -
+
+ * - 4
+ - Intel Create and Share PCI/ Smart Video Recorder III
+ -
+
+ * - 5
+ - Diamond DTV2000
+ -
+
+ * - 6
+ - AVerMedia TVPhone
+ -
+
+ * - 7
+ - MATRIX-Vision MV-Delta
+ -
+
+ * - 8
+ - Lifeview FlyVideo II (Bt848) LR26 / MAXI TV Video PCI2 LR26
+ -
+
+ * - 9
+ - IMS/IXmicro TurboTV
+ -
+
+ * - 10
+ - Hauppauge (bt878)
+ - 0070:13eb, 0070:3900, 2636:10b4
+
+ * - 11
+ - MIRO PCTV pro
+ -
+
+ * - 12
+ - ADS Technologies Channel Surfer TV (bt848)
+ -
+
+ * - 13
+ - AVerMedia TVCapture 98
+ - 1461:0002, 1461:0004, 1461:0300
+
+ * - 14
+ - Aimslab Video Highway Xtreme (VHX)
+ -
+
+ * - 15
+ - Zoltrix TV-Max
+ - a1a0:a0fc
+
+ * - 16
+ - Prolink Pixelview PlayTV (bt878)
+ -
+
+ * - 17
+ - Leadtek WinView 601
+ -
+
+ * - 18
+ - AVEC Intercapture
+ -
+
+ * - 19
+ - Lifeview FlyVideo II EZ /FlyKit LR38 Bt848 (capture only)
+ -
+
+ * - 20
+ - CEI Raffles Card
+ -
+
+ * - 21
+ - Lifeview FlyVideo 98/ Lucky Star Image World ConferenceTV LR50
+ -
+
+ * - 22
+ - Askey CPH050/ Phoebe Tv Master + FM
+ - 14ff:3002
+
+ * - 23
+ - Modular Technology MM201/MM202/MM205/MM210/MM215 PCTV, bt878
+ - 14c7:0101
+
+ * - 24
+ - Askey CPH05X/06X (bt878) [many vendors]
+ - 144f:3002, 144f:3005, 144f:5000, 14ff:3000
+
+ * - 25
+ - Terratec TerraTV+ Version 1.0 (Bt848)/ Terra TValue Version 1.0/ Vobis TV-Boostar
+ -
+
+ * - 26
+ - Hauppauge WinCam newer (bt878)
+ -
+
+ * - 27
+ - Lifeview FlyVideo 98/ MAXI TV Video PCI2 LR50
+ -
+
+ * - 28
+ - Terratec TerraTV+ Version 1.1 (bt878)
+ - 153b:1127, 1852:1852
+
+ * - 29
+ - Imagenation PXC200
+ - 1295:200a
+
+ * - 30
+ - Lifeview FlyVideo 98 LR50
+ - 1f7f:1850
+
+ * - 31
+ - Formac iProTV, Formac ProTV I (bt848)
+ -
+
+ * - 32
+ - Intel Create and Share PCI/ Smart Video Recorder III
+ -
+
+ * - 33
+ - Terratec TerraTValue Version Bt878
+ - 153b:1117, 153b:1118, 153b:1119, 153b:111a, 153b:1134, 153b:5018
+
+ * - 34
+ - Leadtek WinFast 2000/ WinFast 2000 XP
+ - 107d:6606, 107d:6609, 6606:217d, f6ff:fff6
+
+ * - 35
+ - Lifeview FlyVideo 98 LR50 / Chronos Video Shuttle II
+ - 1851:1850, 1851:a050
+
+ * - 36
+ - Lifeview FlyVideo 98FM LR50 / Typhoon TView TV/FM Tuner
+ - 1852:1852
+
+ * - 37
+ - Prolink PixelView PlayTV pro
+ -
+
+ * - 38
+ - Askey CPH06X TView99
+ - 144f:3000, 144f:a005, a04f:a0fc
+
+ * - 39
+ - Pinnacle PCTV Studio/Rave
+ - 11bd:0012, bd11:1200, bd11:ff00, 11bd:ff12
+
+ * - 40
+ - STB TV PCI FM, Gateway P/N 6000704 (bt878), 3Dfx VoodooTV 100
+ - 10b4:2636, 10b4:2645, 121a:3060
+
+ * - 41
+ - AVerMedia TVPhone 98
+ - 1461:0001, 1461:0003
+
+ * - 42
+ - ProVideo PV951
+ - aa0c:146c
+
+ * - 43
+ - Little OnAir TV
+ -
+
+ * - 44
+ - Sigma TVII-FM
+ -
+
+ * - 45
+ - MATRIX-Vision MV-Delta 2
+ -
+
+ * - 46
+ - Zoltrix Genie TV/FM
+ - 15b0:4000, 15b0:400a, 15b0:400d, 15b0:4010, 15b0:4016
+
+ * - 47
+ - Terratec TV/Radio+
+ - 153b:1123
+
+ * - 48
+ - Askey CPH03x/ Dynalink Magic TView
+ -
+
+ * - 49
+ - IODATA GV-BCTV3/PCI
+ - 10fc:4020
+
+ * - 50
+ - Prolink PV-BT878P+4E / PixelView PlayTV PAK / Lenco MXTV-9578 CP
+ -
+
+ * - 51
+ - Eagle Wireless Capricorn2 (bt878A)
+ -
+
+ * - 52
+ - Pinnacle PCTV Studio Pro
+ -
+
+ * - 53
+ - Typhoon TView RDS + FM Stereo / KNC1 TV Station RDS
+ -
+
+ * - 54
+ - Lifeview FlyVideo 2000 /FlyVideo A2/ Lifetec LT 9415 TV [LR90]
+ -
+
+ * - 55
+ - Askey CPH031/ BESTBUY Easy TV
+ -
+
+ * - 56
+ - Lifeview FlyVideo 98FM LR50
+ - a051:41a0
+
+ * - 57
+ - GrandTec 'Grand Video Capture' (Bt848)
+ - 4344:4142
+
+ * - 58
+ - Askey CPH060/ Phoebe TV Master Only (No FM)
+ -
+
+ * - 59
+ - Askey CPH03x TV Capturer
+ -
+
+ * - 60
+ - Modular Technology MM100PCTV
+ -
+
+ * - 61
+ - AG Electronics GMV1
+ - 15cb:0101
+
+ * - 62
+ - Askey CPH061/ BESTBUY Easy TV (bt878)
+ -
+
+ * - 63
+ - ATI TV-Wonder
+ - 1002:0001
+
+ * - 64
+ - ATI TV-Wonder VE
+ - 1002:0003
+
+ * - 65
+ - Lifeview FlyVideo 2000S LR90
+ -
+
+ * - 66
+ - Terratec TValueRadio
+ - 153b:1135, 153b:ff3b
+
+ * - 67
+ - IODATA GV-BCTV4/PCI
+ - 10fc:4050
+
+ * - 68
+ - 3Dfx VoodooTV FM (Euro)
+ - 10b4:2637
+
+ * - 69
+ - Active Imaging AIMMS
+ -
+
+ * - 70
+ - Prolink Pixelview PV-BT878P+ (Rev.4C,8E)
+ -
+
+ * - 71
+ - Lifeview FlyVideo 98EZ (capture only) LR51
+ - 1851:1851
+
+ * - 72
+ - Prolink Pixelview PV-BT878P+9B (PlayTV Pro rev.9B FM+NICAM)
+ - 1554:4011
+
+ * - 73
+ - Sensoray 311/611
+ - 6000:0311, 6000:0611
+
+ * - 74
+ - RemoteVision MX (RV605)
+ -
+
+ * - 75
+ - Powercolor MTV878/ MTV878R/ MTV878F
+ -
+
+ * - 76
+ - Canopus WinDVR PCI (COMPAQ Presario 3524JP, 5112JP)
+ - 0e11:0079
+
+ * - 77
+ - GrandTec Multi Capture Card (Bt878)
+ -
+
+ * - 78
+ - Jetway TV/Capture JW-TV878-FBK, Kworld KW-TV878RF
+ - 0a01:17de
+
+ * - 79
+ - DSP Design TCVIDEO
+ -
+
+ * - 80
+ - Hauppauge WinTV PVR
+ - 0070:4500
+
+ * - 81
+ - IODATA GV-BCTV5/PCI
+ - 10fc:4070, 10fc:d018
+
+ * - 82
+ - Osprey 100/150 (878)
+ - 0070:ff00
+
+ * - 83
+ - Osprey 100/150 (848)
+ -
+
+ * - 84
+ - Osprey 101 (848)
+ -
+
+ * - 85
+ - Osprey 101/151
+ -
+
+ * - 86
+ - Osprey 101/151 w/ svid
+ -
+
+ * - 87
+ - Osprey 200/201/250/251
+ -
+
+ * - 88
+ - Osprey 200/250
+ - 0070:ff01
+
+ * - 89
+ - Osprey 210/220/230
+ -
+
+ * - 90
+ - Osprey 500
+ - 0070:ff02
+
+ * - 91
+ - Osprey 540
+ - 0070:ff04
+
+ * - 92
+ - Osprey 2000
+ - 0070:ff03
+
+ * - 93
+ - IDS Eagle
+ -
+
+ * - 94
+ - Pinnacle PCTV Sat
+ - 11bd:001c
+
+ * - 95
+ - Formac ProTV II (bt878)
+ -
+
+ * - 96
+ - MachTV
+ -
+
+ * - 97
+ - Euresys Picolo
+ -
+
+ * - 98
+ - ProVideo PV150
+ - aa00:1460, aa01:1461, aa02:1462, aa03:1463, aa04:1464, aa05:1465, aa06:1466, aa07:1467
+
+ * - 99
+ - AD-TVK503
+ -
+
+ * - 100
+ - Hercules Smart TV Stereo
+ -
+
+ * - 101
+ - Pace TV & Radio Card
+ -
+
+ * - 102
+ - IVC-200
+ - 0000:a155, 0001:a155, 0002:a155, 0003:a155, 0100:a155, 0101:a155, 0102:a155, 0103:a155, 0800:a155, 0801:a155, 0802:a155, 0803:a155
+
+ * - 103
+ - Grand X-Guard / Trust 814PCI
+ - 0304:0102
+
+ * - 104
+ - Nebula Electronics DigiTV
+ - 0071:0101
+
+ * - 105
+ - ProVideo PV143
+ - aa00:1430, aa00:1431, aa00:1432, aa00:1433, aa03:1433
+
+ * - 106
+ - PHYTEC VD-009-X1 VD-011 MiniDIN (bt878)
+ -
+
+ * - 107
+ - PHYTEC VD-009-X1 VD-011 Combi (bt878)
+ -
+
+ * - 108
+ - PHYTEC VD-009 MiniDIN (bt878)
+ -
+
+ * - 109
+ - PHYTEC VD-009 Combi (bt878)
+ -
+
+ * - 110
+ - IVC-100
+ - ff00:a132
+
+ * - 111
+ - IVC-120G
+ - ff00:a182, ff01:a182, ff02:a182, ff03:a182, ff04:a182, ff05:a182, ff06:a182, ff07:a182, ff08:a182, ff09:a182, ff0a:a182, ff0b:a182, ff0c:a182, ff0d:a182, ff0e:a182, ff0f:a182
+
+ * - 112
+ - pcHDTV HD-2000 TV
+ - 7063:2000
+
+ * - 113
+ - Twinhan DST + clones
+ - 11bd:0026, 1822:0001, 270f:fc00, 1822:0026
+
+ * - 114
+ - Winfast VC100
+ - 107d:6607
+
+ * - 115
+ - Teppro TEV-560/InterVision IV-560
+ -
+
+ * - 116
+ - SIMUS GVC1100
+ - aa6a:82b2
+
+ * - 117
+ - NGS NGSTV+
+ -
+
+ * - 118
+ - LMLBT4
+ -
+
+ * - 119
+ - Tekram M205 PRO
+ -
+
+ * - 120
+ - Conceptronic CONTVFMi
+ -
+
+ * - 121
+ - Euresys Picolo Tetra
+ - 1805:0105, 1805:0106, 1805:0107, 1805:0108
+
+ * - 122
+ - Spirit TV Tuner
+ -
+
+ * - 123
+ - AVerMedia AVerTV DVB-T 771
+ - 1461:0771
+
+ * - 124
+ - AverMedia AverTV DVB-T 761
+ - 1461:0761
+
+ * - 125
+ - MATRIX Vision Sigma-SQ
+ -
+
+ * - 126
+ - MATRIX Vision Sigma-SLC
+ -
+
+ * - 127
+ - APAC Viewcomp 878(AMAX)
+ -
+
+ * - 128
+ - DViCO FusionHDTV DVB-T Lite
+ - 18ac:db10, 18ac:db11
+
+ * - 129
+ - V-Gear MyVCD
+ -
+
+ * - 130
+ - Super TV Tuner
+ -
+
+ * - 131
+ - Tibet Systems 'Progress DVR' CS16
+ -
+
+ * - 132
+ - Kodicom 4400R (master)
+ -
+
+ * - 133
+ - Kodicom 4400R (slave)
+ -
+
+ * - 134
+ - Adlink RTV24
+ -
+
+ * - 135
+ - DViCO FusionHDTV 5 Lite
+ - 18ac:d500
+
+ * - 136
+ - Acorp Y878F
+ - 9511:1540
+
+ * - 137
+ - Conceptronic CTVFMi v2
+ - 036e:109e
+
+ * - 138
+ - Prolink Pixelview PV-BT878P+ (Rev.2E)
+ -
+
+ * - 139
+ - Prolink PixelView PlayTV MPEG2 PV-M4900
+ -
+
+ * - 140
+ - Osprey 440
+ - 0070:ff07
+
+ * - 141
+ - Asound Skyeye PCTV
+ -
+
+ * - 142
+ - Sabrent TV-FM (bttv version)
+ -
+
+ * - 143
+ - Hauppauge ImpactVCB (bt878)
+ - 0070:13eb
+
+ * - 144
+ - MagicTV
+ -
+
+ * - 145
+ - SSAI Security Video Interface
+ - 4149:5353
+
+ * - 146
+ - SSAI Ultrasound Video Interface
+ - 414a:5353
+
+ * - 147
+ - VoodooTV 200 (USA)
+ - 121a:3000
+
+ * - 148
+ - DViCO FusionHDTV 2
+ - dbc0:d200
+
+ * - 149
+ - Typhoon TV-Tuner PCI (50684)
+ -
+
+ * - 150
+ - Geovision GV-600
+ - 008a:763c
+
+ * - 151
+ - Kozumi KTV-01C
+ -
+
+ * - 152
+ - Encore ENL TV-FM-2
+ - 1000:1801
+
+ * - 153
+ - PHYTEC VD-012 (bt878)
+ -
+
+ * - 154
+ - PHYTEC VD-012-X1 (bt878)
+ -
+
+ * - 155
+ - PHYTEC VD-012-X2 (bt878)
+ -
+
+ * - 156
+ - IVCE-8784
+ - 0000:f050, 0001:f050, 0002:f050, 0003:f050
+
+ * - 157
+ - Geovision GV-800(S) (master)
+ - 800a:763d
+
+ * - 158
+ - Geovision GV-800(S) (slave)
+ - 800b:763d, 800c:763d, 800d:763d
+
+ * - 159
+ - ProVideo PV183
+ - 1830:1540, 1831:1540, 1832:1540, 1833:1540, 1834:1540, 1835:1540, 1836:1540, 1837:1540
+
+ * - 160
+ - Tongwei Video Technology TD-3116
+ - f200:3116
+
+ * - 161
+ - Aposonic W-DVR
+ - 0279:0228
+
+ * - 162
+ - Adlink MPG24
+ -
+
+ * - 163
+ - Bt848 Capture 14MHz
+ -
+
+ * - 164
+ - CyberVision CV06 (SV)
+ -
+
+ * - 165
+ - Kworld V-Stream Xpert TV PVR878
+ -
+
+ * - 166
+ - PCI-8604PW
+ -
diff --git a/Documentation/media/v4l-drivers/cx23885-cardlist.rst b/Documentation/media/v4l-drivers/cx23885-cardlist.rst
index fd20b50d2c1d..3129ef04ddd3 100644
--- a/Documentation/media/v4l-drivers/cx23885-cardlist.rst
+++ b/Documentation/media/v4l-drivers/cx23885-cardlist.rst
@@ -1,65 +1,245 @@
cx23885 cards list
==================
-=========== ==================================== ======================================================================================
-Card number Card name PCI IDs
-=========== ==================================== ======================================================================================
-0 UNKNOWN/GENERIC 0070:3400
-1 Hauppauge WinTV-HVR1800lp 0070:7600
-2 Hauppauge WinTV-HVR1800 0070:7800, 0070:7801, 0070:7809
-3 Hauppauge WinTV-HVR1250 0070:7911
-4 DViCO FusionHDTV5 Express 18ac:d500
-5 Hauppauge WinTV-HVR1500Q 0070:7790, 0070:7797
-6 Hauppauge WinTV-HVR1500 0070:7710, 0070:7717
-7 Hauppauge WinTV-HVR1200 0070:71d1, 0070:71d3
-8 Hauppauge WinTV-HVR1700 0070:8101
-9 Hauppauge WinTV-HVR1400 0070:8010
-10 DViCO FusionHDTV7 Dual Express 18ac:d618
-11 DViCO FusionHDTV DVB-T Dual Express 18ac:db78
-12 Leadtek Winfast PxDVR3200 H 107d:6681
-13 Compro VideoMate E650F 185b:e800
-14 TurboSight TBS 6920 6920:8888
-15 TeVii S470 d470:9022
-16 DVBWorld DVB-S2 2005 0001:2005
-17 NetUP Dual DVB-S2 CI 1b55:2a2c
-18 Hauppauge WinTV-HVR1270 0070:2211
-19 Hauppauge WinTV-HVR1275 0070:2215, 0070:221d, 0070:22f2
-20 Hauppauge WinTV-HVR1255 0070:2251, 0070:22f1
-21 Hauppauge WinTV-HVR1210 0070:2291, 0070:2295, 0070:2299, 0070:229d, 0070:22f0, 0070:22f3, 0070:22f4, 0070:22f5
-22 Mygica X8506 DMB-TH 14f1:8651
-23 Magic-Pro ProHDTV Extreme 2 14f1:8657
-24 Hauppauge WinTV-HVR1850 0070:8541
-25 Compro VideoMate E800 1858:e800
-26 Hauppauge WinTV-HVR1290 0070:8551
-27 Mygica X8558 PRO DMB-TH 14f1:8578
-28 LEADTEK WinFast PxTV1200 107d:6f22
-29 GoTView X5 3D Hybrid 5654:2390
-30 NetUP Dual DVB-T/C-CI RF 1b55:e2e4
-31 Leadtek Winfast PxDVR3200 H XC4000 107d:6f39
-32 MPX-885
-33 Mygica X8502/X8507 ISDB-T 14f1:8502
-34 TerraTec Cinergy T PCIe Dual 153b:117e
-35 TeVii S471 d471:9022
-36 Hauppauge WinTV-HVR1255 0070:2259
-37 Prof Revolution DVB-S2 8000 8000:3034
-38 Hauppauge WinTV-HVR4400/HVR5500 0070:c108, 0070:c138, 0070:c1f8
-39 AVerTV Hybrid Express Slim HC81R 1461:d939
-40 TurboSight TBS 6981 6981:8888
-41 TurboSight TBS 6980 6980:8888
-42 Leadtek Winfast PxPVR2200 107d:6f21
-43 Hauppauge ImpactVCB-e 0070:7133
-44 DViCO FusionHDTV DVB-T Dual Express2 18ac:db98
-45 DVBSky T9580 4254:9580
-46 DVBSky T980C 4254:980c
-47 DVBSky S950C 4254:950c
-48 Technotrend TT-budget CT2-4500 CI 13c2:3013
-49 DVBSky S950 4254:0950
-50 DVBSky S952 4254:0952
-51 DVBSky T982 4254:0982
-52 Hauppauge WinTV-HVR5525 0070:f038
-53 Hauppauge WinTV Starburst 0070:c12a
-54 ViewCast 260e 1576:0260
-55 ViewCast 460e 1576:0460
-56 Hauppauge WinTV-QuadHD-DVB 0070:6a28, 0070:6b28
-57 Hauppauge WinTV-QuadHD-ATSC 0070:6a18, 0070:6b18
-=========== ==================================== ======================================================================================
+.. tabularcolumns:: |p{1.4cm}|p{11.1cm}|p{4.2cm}|
+
+.. flat-table::
+ :header-rows: 1
+ :widths: 2 19 18
+ :stub-columns: 0
+
+ * - Card number
+ - Card name
+ - PCI IDs
+
+ * - 0
+ - UNKNOWN/GENERIC
+ - 0070:3400
+
+ * - 1
+ - Hauppauge WinTV-HVR1800lp
+ - 0070:7600
+
+ * - 2
+ - Hauppauge WinTV-HVR1800
+ - 0070:7800, 0070:7801, 0070:7809
+
+ * - 3
+ - Hauppauge WinTV-HVR1250
+ - 0070:7911
+
+ * - 4
+ - DViCO FusionHDTV5 Express
+ - 18ac:d500
+
+ * - 5
+ - Hauppauge WinTV-HVR1500Q
+ - 0070:7790, 0070:7797
+
+ * - 6
+ - Hauppauge WinTV-HVR1500
+ - 0070:7710, 0070:7717
+
+ * - 7
+ - Hauppauge WinTV-HVR1200
+ - 0070:71d1, 0070:71d3
+
+ * - 8
+ - Hauppauge WinTV-HVR1700
+ - 0070:8101
+
+ * - 9
+ - Hauppauge WinTV-HVR1400
+ - 0070:8010
+
+ * - 10
+ - DViCO FusionHDTV7 Dual Express
+ - 18ac:d618
+
+ * - 11
+ - DViCO FusionHDTV DVB-T Dual Express
+ - 18ac:db78
+
+ * - 12
+ - Leadtek Winfast PxDVR3200 H
+ - 107d:6681
+
+ * - 13
+ - Compro VideoMate E650F
+ - 185b:e800
+
+ * - 14
+ - TurboSight TBS 6920
+ - 6920:8888
+
+ * - 15
+ - TeVii S470
+ - d470:9022
+
+ * - 16
+ - DVBWorld DVB-S2 2005
+ - 0001:2005
+
+ * - 17
+ - NetUP Dual DVB-S2 CI
+ - 1b55:2a2c
+
+ * - 18
+ - Hauppauge WinTV-HVR1270
+ - 0070:2211
+
+ * - 19
+ - Hauppauge WinTV-HVR1275
+ - 0070:2215, 0070:221d, 0070:22f2
+
+ * - 20
+ - Hauppauge WinTV-HVR1255
+ - 0070:2251, 0070:22f1
+
+ * - 21
+ - Hauppauge WinTV-HVR1210
+ - 0070:2291, 0070:2295, 0070:2299, 0070:229d, 0070:22f0, 0070:22f3, 0070:22f4, 0070:22f5
+
+ * - 22
+ - Mygica X8506 DMB-TH
+ - 14f1:8651
+
+ * - 23
+ - Magic-Pro ProHDTV Extreme 2
+ - 14f1:8657
+
+ * - 24
+ - Hauppauge WinTV-HVR1850
+ - 0070:8541
+
+ * - 25
+ - Compro VideoMate E800
+ - 1858:e800
+
+ * - 26
+ - Hauppauge WinTV-HVR1290
+ - 0070:8551
+
+ * - 27
+ - Mygica X8558 PRO DMB-TH
+ - 14f1:8578
+
+ * - 28
+ - LEADTEK WinFast PxTV1200
+ - 107d:6f22
+
+ * - 29
+ - GoTView X5 3D Hybrid
+ - 5654:2390
+
+ * - 30
+ - NetUP Dual DVB-T/C-CI RF
+ - 1b55:e2e4
+
+ * - 31
+ - Leadtek Winfast PxDVR3200 H XC4000
+ - 107d:6f39
+
+ * - 32
+ - MPX-885
+ -
+
+ * - 33
+ - Mygica X8502/X8507 ISDB-T
+ - 14f1:8502
+
+ * - 34
+ - TerraTec Cinergy T PCIe Dual
+ - 153b:117e
+
+ * - 35
+ - TeVii S471
+ - d471:9022
+
+ * - 36
+ - Hauppauge WinTV-HVR1255
+ - 0070:2259
+
+ * - 37
+ - Prof Revolution DVB-S2 8000
+ - 8000:3034
+
+ * - 38
+ - Hauppauge WinTV-HVR4400/HVR5500
+ - 0070:c108, 0070:c138, 0070:c1f8
+
+ * - 39
+ - AVerTV Hybrid Express Slim HC81R
+ - 1461:d939
+
+ * - 40
+ - TurboSight TBS 6981
+ - 6981:8888
+
+ * - 41
+ - TurboSight TBS 6980
+ - 6980:8888
+
+ * - 42
+ - Leadtek Winfast PxPVR2200
+ - 107d:6f21
+
+ * - 43
+ - Hauppauge ImpactVCB-e
+ - 0070:7133
+
+ * - 44
+ - DViCO FusionHDTV DVB-T Dual Express2
+ - 18ac:db98
+
+ * - 45
+ - DVBSky T9580
+ - 4254:9580
+
+ * - 46
+ - DVBSky T980C
+ - 4254:980c
+
+ * - 47
+ - DVBSky S950C
+ - 4254:950c
+
+ * - 48
+ - Technotrend TT-budget CT2-4500 CI
+ - 13c2:3013
+
+ * - 49
+ - DVBSky S950
+ - 4254:0950
+
+ * - 50
+ - DVBSky S952
+ - 4254:0952
+
+ * - 51
+ - DVBSky T982
+ - 4254:0982
+
+ * - 52
+ - Hauppauge WinTV-HVR5525
+ - 0070:f038
+
+ * - 53
+ - Hauppauge WinTV Starburst
+ - 0070:c12a
+
+ * - 54
+ - ViewCast 260e
+ - 1576:0260
+
+ * - 55
+ - ViewCast 460e
+ - 1576:0460
+
+ * - 56
+ - Hauppauge WinTV-QuadHD-DVB
+ - 0070:6a28, 0070:6b28
+
+ * - 57
+ - Hauppauge WinTV-QuadHD-ATSC
+ - 0070:6a18, 0070:6b18
diff --git a/Documentation/media/v4l-drivers/cx88-cardlist.rst b/Documentation/media/v4l-drivers/cx88-cardlist.rst
index 8cc1cea17035..21648b8c2e83 100644
--- a/Documentation/media/v4l-drivers/cx88-cardlist.rst
+++ b/Documentation/media/v4l-drivers/cx88-cardlist.rst
@@ -1,98 +1,377 @@
CX88 cards list
===============
-=========== =================================================== ======================================================================================
-Card number Card name PCI IDs
-=========== =================================================== ======================================================================================
-0 UNKNOWN/GENERIC
-1 Hauppauge WinTV 34xxx models 0070:3400, 0070:3401
-2 GDI Black Gold 14c7:0106, 14c7:0107
-3 PixelView 1554:4811
-4 ATI TV Wonder Pro 1002:00f8, 1002:00f9
-5 Leadtek Winfast 2000XP Expert 107d:6611, 107d:6613
-6 AverTV Studio 303 (M126) 1461:000b
-7 MSI TV-@nywhere Master 1462:8606
-8 Leadtek Winfast DV2000 107d:6620, 107d:6621
-9 Leadtek PVR 2000 107d:663b, 107d:663c, 107d:6632, 107d:6630, 107d:6638, 107d:6631, 107d:6637, 107d:663d
-10 IODATA GV-VCP3/PCI 10fc:d003
-11 Prolink PlayTV PVR
-12 ASUS PVR-416 1043:4823, 1461:c111
-13 MSI TV-@nywhere
-14 KWorld/VStream XPert DVB-T 17de:08a6
-15 DViCO FusionHDTV DVB-T1 18ac:db00
-16 KWorld LTV883RF
-17 DViCO FusionHDTV 3 Gold-Q 18ac:d810, 18ac:d800
-18 Hauppauge Nova-T DVB-T 0070:9002, 0070:9001, 0070:9000
-19 Conexant DVB-T reference design 14f1:0187
-20 Provideo PV259 1540:2580
-21 DViCO FusionHDTV DVB-T Plus 18ac:db10, 18ac:db11
-22 pcHDTV HD3000 HDTV 7063:3000
-23 digitalnow DNTV Live! DVB-T 17de:a8a6
-24 Hauppauge WinTV 28xxx (Roslyn) models 0070:2801
-25 Digital-Logic MICROSPACE Entertainment Center (MEC) 14f1:0342
-26 IODATA GV/BCTV7E 10fc:d035
-27 PixelView PlayTV Ultra Pro (Stereo)
-28 DViCO FusionHDTV 3 Gold-T 18ac:d820
-29 ADS Tech Instant TV DVB-T PCI 1421:0334
-30 TerraTec Cinergy 1400 DVB-T 153b:1166
-31 DViCO FusionHDTV 5 Gold 18ac:d500
-32 AverMedia UltraTV Media Center PCI 550 1461:8011
-33 Kworld V-Stream Xpert DVD
-34 ATI HDTV Wonder 1002:a101
-35 WinFast DTV1000-T 107d:665f
-36 AVerTV 303 (M126) 1461:000a
-37 Hauppauge Nova-S-Plus DVB-S 0070:9201, 0070:9202
-38 Hauppauge Nova-SE2 DVB-S 0070:9200
-39 KWorld DVB-S 100 17de:08b2, 1421:0341
-40 Hauppauge WinTV-HVR1100 DVB-T/Hybrid 0070:9400, 0070:9402
-41 Hauppauge WinTV-HVR1100 DVB-T/Hybrid (Low Profile) 0070:9800, 0070:9802
-42 digitalnow DNTV Live! DVB-T Pro 1822:0025, 1822:0019
-43 KWorld/VStream XPert DVB-T with cx22702 17de:08a1, 12ab:2300
-44 DViCO FusionHDTV DVB-T Dual Digital 18ac:db50, 18ac:db54
-45 KWorld HardwareMpegTV XPert 17de:0840, 1421:0305
-46 DViCO FusionHDTV DVB-T Hybrid 18ac:db40, 18ac:db44
-47 pcHDTV HD5500 HDTV 7063:5500
-48 Kworld MCE 200 Deluxe 17de:0841
-49 PixelView PlayTV P7000 1554:4813
-50 NPG Tech Real TV FM Top 10 14f1:0842
-51 WinFast DTV2000 H 107d:665e
-52 Geniatech DVB-S 14f1:0084
-53 Hauppauge WinTV-HVR3000 TriMode Analog/DVB-S/DVB-T 0070:1404, 0070:1400, 0070:1401, 0070:1402
-54 Norwood Micro TV Tuner
-55 Shenzhen Tungsten Ages Tech TE-DTV-250 / Swann OEM c180:c980
-56 Hauppauge WinTV-HVR1300 DVB-T/Hybrid MPEG Encoder 0070:9600, 0070:9601, 0070:9602
-57 ADS Tech Instant Video PCI 1421:0390
-58 Pinnacle PCTV HD 800i 11bd:0051
-59 DViCO FusionHDTV 5 PCI nano 18ac:d530
-60 Pinnacle Hybrid PCTV 12ab:1788
-61 Leadtek TV2000 XP Global 107d:6f18, 107d:6618, 107d:6619
-62 PowerColor RA330 14f1:ea3d
-63 Geniatech X8000-MT DVBT 14f1:8852
-64 DViCO FusionHDTV DVB-T PRO 18ac:db30
-65 DViCO FusionHDTV 7 Gold 18ac:d610
-66 Prolink Pixelview MPEG 8000GT 1554:4935
-67 Kworld PlusTV HD PCI 120 (ATSC 120) 17de:08c1
-68 Hauppauge WinTV-HVR4000 DVB-S/S2/T/Hybrid 0070:6900, 0070:6904, 0070:6902
-69 Hauppauge WinTV-HVR4000(Lite) DVB-S/S2 0070:6905, 0070:6906
-70 TeVii S460 DVB-S/S2 d460:9022
-71 Omicom SS4 DVB-S/S2 PCI A044:2011
-72 TBS 8920 DVB-S/S2 8920:8888
-73 TeVii S420 DVB-S d420:9022
-74 Prolink Pixelview Global Extreme 1554:4976
-75 PROF 7300 DVB-S/S2 B033:3033
-76 SATTRADE ST4200 DVB-S/S2 b200:4200
-77 TBS 8910 DVB-S 8910:8888
-78 Prof 6200 DVB-S b022:3022
-79 Terratec Cinergy HT PCI MKII 153b:1177
-80 Hauppauge WinTV-IR Only 0070:9290
-81 Leadtek WinFast DTV1800 Hybrid 107d:6654
-82 WinFast DTV2000 H rev. J 107d:6f2b
-83 Prof 7301 DVB-S/S2 b034:3034
-84 Samsung SMT 7020 DVB-S 18ac:dc00, 18ac:dccd
-85 Twinhan VP-1027 DVB-S 1822:0023
-86 TeVii S464 DVB-S/S2 d464:9022
-87 Leadtek WinFast DTV2000 H PLUS 107d:6f42
-88 Leadtek WinFast DTV1800 H (XC4000) 107d:6f38
-89 Leadtek TV2000 XP Global (SC4100) 107d:6f36
-90 Leadtek TV2000 XP Global (XC4100) 107d:6f43
-=========== =================================================== ======================================================================================
+.. tabularcolumns:: |p{1.4cm}|p{11.1cm}|p{4.2cm}|
+
+.. flat-table::
+ :header-rows: 1
+ :widths: 2 19 18
+ :stub-columns: 0
+
+ * - Card number
+ - Card name
+ - PCI IDs
+
+ * - 0
+ - UNKNOWN/GENERIC
+ -
+
+ * - 1
+ - Hauppauge WinTV 34xxx models
+ - 0070:3400, 0070:3401
+
+ * - 2
+ - GDI Black Gold
+ - 14c7:0106, 14c7:0107
+
+ * - 3
+ - PixelView
+ - 1554:4811
+
+ * - 4
+ - ATI TV Wonder Pro
+ - 1002:00f8, 1002:00f9
+
+ * - 5
+ - Leadtek Winfast 2000XP Expert
+ - 107d:6611, 107d:6613
+
+ * - 6
+ - AverTV Studio 303 (M126)
+ - 1461:000b
+
+ * - 7
+ - MSI TV-@nywhere Master
+ - 1462:8606
+
+ * - 8
+ - Leadtek Winfast DV2000
+ - 107d:6620, 107d:6621
+
+ * - 9
+ - Leadtek PVR 2000
+ - 107d:663b, 107d:663c, 107d:6632, 107d:6630, 107d:6638, 107d:6631, 107d:6637, 107d:663d
+
+ * - 10
+ - IODATA GV-VCP3/PCI
+ - 10fc:d003
+
+ * - 11
+ - Prolink PlayTV PVR
+ -
+
+ * - 12
+ - ASUS PVR-416
+ - 1043:4823, 1461:c111
+
+ * - 13
+ - MSI TV-@nywhere
+ -
+
+ * - 14
+ - KWorld/VStream XPert DVB-T
+ - 17de:08a6
+
+ * - 15
+ - DViCO FusionHDTV DVB-T1
+ - 18ac:db00
+
+ * - 16
+ - KWorld LTV883RF
+ -
+
+ * - 17
+ - DViCO FusionHDTV 3 Gold-Q
+ - 18ac:d810, 18ac:d800
+
+ * - 18
+ - Hauppauge Nova-T DVB-T
+ - 0070:9002, 0070:9001, 0070:9000
+
+ * - 19
+ - Conexant DVB-T reference design
+ - 14f1:0187
+
+ * - 20
+ - Provideo PV259
+ - 1540:2580
+
+ * - 21
+ - DViCO FusionHDTV DVB-T Plus
+ - 18ac:db10, 18ac:db11
+
+ * - 22
+ - pcHDTV HD3000 HDTV
+ - 7063:3000
+
+ * - 23
+ - digitalnow DNTV Live! DVB-T
+ - 17de:a8a6
+
+ * - 24
+ - Hauppauge WinTV 28xxx (Roslyn) models
+ - 0070:2801
+
+ * - 25
+ - Digital-Logic MICROSPACE Entertainment Center (MEC)
+ - 14f1:0342
+
+ * - 26
+ - IODATA GV/BCTV7E
+ - 10fc:d035
+
+ * - 27
+ - PixelView PlayTV Ultra Pro (Stereo)
+ -
+
+ * - 28
+ - DViCO FusionHDTV 3 Gold-T
+ - 18ac:d820
+
+ * - 29
+ - ADS Tech Instant TV DVB-T PCI
+ - 1421:0334
+
+ * - 30
+ - TerraTec Cinergy 1400 DVB-T
+ - 153b:1166
+
+ * - 31
+ - DViCO FusionHDTV 5 Gold
+ - 18ac:d500
+
+ * - 32
+ - AverMedia UltraTV Media Center PCI 550
+ - 1461:8011
+
+ * - 33
+ - Kworld V-Stream Xpert DVD
+ -
+
+ * - 34
+ - ATI HDTV Wonder
+ - 1002:a101
+
+ * - 35
+ - WinFast DTV1000-T
+ - 107d:665f
+
+ * - 36
+ - AVerTV 303 (M126)
+ - 1461:000a
+
+ * - 37
+ - Hauppauge Nova-S-Plus DVB-S
+ - 0070:9201, 0070:9202
+
+ * - 38
+ - Hauppauge Nova-SE2 DVB-S
+ - 0070:9200
+
+ * - 39
+ - KWorld DVB-S 100
+ - 17de:08b2, 1421:0341
+
+ * - 40
+ - Hauppauge WinTV-HVR1100 DVB-T/Hybrid
+ - 0070:9400, 0070:9402
+
+ * - 41
+ - Hauppauge WinTV-HVR1100 DVB-T/Hybrid (Low Profile)
+ - 0070:9800, 0070:9802
+
+ * - 42
+ - digitalnow DNTV Live! DVB-T Pro
+ - 1822:0025, 1822:0019
+
+ * - 43
+ - KWorld/VStream XPert DVB-T with cx22702
+ - 17de:08a1, 12ab:2300
+
+ * - 44
+ - DViCO FusionHDTV DVB-T Dual Digital
+ - 18ac:db50, 18ac:db54
+
+ * - 45
+ - KWorld HardwareMpegTV XPert
+ - 17de:0840, 1421:0305
+
+ * - 46
+ - DViCO FusionHDTV DVB-T Hybrid
+ - 18ac:db40, 18ac:db44
+
+ * - 47
+ - pcHDTV HD5500 HDTV
+ - 7063:5500
+
+ * - 48
+ - Kworld MCE 200 Deluxe
+ - 17de:0841
+
+ * - 49
+ - PixelView PlayTV P7000
+ - 1554:4813
+
+ * - 50
+ - NPG Tech Real TV FM Top 10
+ - 14f1:0842
+
+ * - 51
+ - WinFast DTV2000 H
+ - 107d:665e
+
+ * - 52
+ - Geniatech DVB-S
+ - 14f1:0084
+
+ * - 53
+ - Hauppauge WinTV-HVR3000 TriMode Analog/DVB-S/DVB-T
+ - 0070:1404, 0070:1400, 0070:1401, 0070:1402
+
+ * - 54
+ - Norwood Micro TV Tuner
+ -
+
+ * - 55
+ - Shenzhen Tungsten Ages Tech TE-DTV-250 / Swann OEM
+ - c180:c980
+
+ * - 56
+ - Hauppauge WinTV-HVR1300 DVB-T/Hybrid MPEG Encoder
+ - 0070:9600, 0070:9601, 0070:9602
+
+ * - 57
+ - ADS Tech Instant Video PCI
+ - 1421:0390
+
+ * - 58
+ - Pinnacle PCTV HD 800i
+ - 11bd:0051
+
+ * - 59
+ - DViCO FusionHDTV 5 PCI nano
+ - 18ac:d530
+
+ * - 60
+ - Pinnacle Hybrid PCTV
+ - 12ab:1788
+
+ * - 61
+ - Leadtek TV2000 XP Global
+ - 107d:6f18, 107d:6618, 107d:6619
+
+ * - 62
+ - PowerColor RA330
+ - 14f1:ea3d
+
+ * - 63
+ - Geniatech X8000-MT DVBT
+ - 14f1:8852
+
+ * - 64
+ - DViCO FusionHDTV DVB-T PRO
+ - 18ac:db30
+
+ * - 65
+ - DViCO FusionHDTV 7 Gold
+ - 18ac:d610
+
+ * - 66
+ - Prolink Pixelview MPEG 8000GT
+ - 1554:4935
+
+ * - 67
+ - Kworld PlusTV HD PCI 120 (ATSC 120)
+ - 17de:08c1
+
+ * - 68
+ - Hauppauge WinTV-HVR4000 DVB-S/S2/T/Hybrid
+ - 0070:6900, 0070:6904, 0070:6902
+
+ * - 69
+ - Hauppauge WinTV-HVR4000(Lite) DVB-S/S2
+ - 0070:6905, 0070:6906
+
+ * - 70
+ - TeVii S460 DVB-S/S2
+ - d460:9022
+
+ * - 71
+ - Omicom SS4 DVB-S/S2 PCI
+ - A044:2011
+
+ * - 72
+ - TBS 8920 DVB-S/S2
+ - 8920:8888
+
+ * - 73
+ - TeVii S420 DVB-S
+ - d420:9022
+
+ * - 74
+ - Prolink Pixelview Global Extreme
+ - 1554:4976
+
+ * - 75
+ - PROF 7300 DVB-S/S2
+ - B033:3033
+
+ * - 76
+ - SATTRADE ST4200 DVB-S/S2
+ - b200:4200
+
+ * - 77
+ - TBS 8910 DVB-S
+ - 8910:8888
+
+ * - 78
+ - Prof 6200 DVB-S
+ - b022:3022
+
+ * - 79
+ - Terratec Cinergy HT PCI MKII
+ - 153b:1177
+
+ * - 80
+ - Hauppauge WinTV-IR Only
+ - 0070:9290
+
+ * - 81
+ - Leadtek WinFast DTV1800 Hybrid
+ - 107d:6654
+
+ * - 82
+ - WinFast DTV2000 H rev. J
+ - 107d:6f2b
+
+ * - 83
+ - Prof 7301 DVB-S/S2
+ - b034:3034
+
+ * - 84
+ - Samsung SMT 7020 DVB-S
+ - 18ac:dc00, 18ac:dccd
+
+ * - 85
+ - Twinhan VP-1027 DVB-S
+ - 1822:0023
+
+ * - 86
+ - TeVii S464 DVB-S/S2
+ - d464:9022
+
+ * - 87
+ - Leadtek WinFast DTV2000 H PLUS
+ - 107d:6f42
+
+ * - 88
+ - Leadtek WinFast DTV1800 H (XC4000)
+ - 107d:6f38
+
+ * - 89
+ - Leadtek TV2000 XP Global (SC4100)
+ - 107d:6f36
+
+ * - 90
+ - Leadtek TV2000 XP Global (XC4100)
+ - 107d:6f43
diff --git a/Documentation/media/v4l-drivers/em28xx-cardlist.rst b/Documentation/media/v4l-drivers/em28xx-cardlist.rst
index 76b1d301754c..ec938c08f43d 100644
--- a/Documentation/media/v4l-drivers/em28xx-cardlist.rst
+++ b/Documentation/media/v4l-drivers/em28xx-cardlist.rst
@@ -1,107 +1,422 @@
EM28xx cards list
=================
-=========== ==================================================================== ================ ==================================================================================================================================
-Card number Card name Empia Chip USB IDs
-=========== ==================================================================== ================ ==================================================================================================================================
-0 Unknown EM2800 video grabber em2800 eb1a:2800
-1 Unknown EM2750/28xx video grabber em2820 or em2840 eb1a:2710, eb1a:2820, eb1a:2821, eb1a:2860, eb1a:2861, eb1a:2862, eb1a:2863, eb1a:2870, eb1a:2881, eb1a:2883, eb1a:2868, eb1a:2875
-2 Terratec Cinergy 250 USB em2820 or em2840 0ccd:0036
-3 Pinnacle PCTV USB 2 em2820 or em2840 2304:0208
-4 Hauppauge WinTV USB 2 em2820 or em2840 2040:4200, 2040:4201
-5 MSI VOX USB 2.0 em2820 or em2840
-6 Terratec Cinergy 200 USB em2800
-7 Leadtek Winfast USB II em2800 0413:6023
-8 Kworld USB2800 em2800
-9 Pinnacle Dazzle DVC 90/100/101/107 / Kaiser Baas Video to DVD maker em2820 or em2840 1b80:e302, 1b80:e304, 2304:0207, 2304:021a, 093b:a003
-10 Hauppauge WinTV HVR 900 em2880 2040:6500
-11 Terratec Hybrid XS em2880
-12 Kworld PVR TV 2800 RF em2820 or em2840
-13 Terratec Prodigy XS em2880
-14 SIIG AVTuner-PVR / Pixelview Prolink PlayTV USB 2.0 em2820 or em2840
-15 V-Gear PocketTV em2800
-16 Hauppauge WinTV HVR 950 em2883 2040:6513, 2040:6517, 2040:651b
-17 Pinnacle PCTV HD Pro Stick em2880 2304:0227
-18 Hauppauge WinTV HVR 900 (R2) em2880 2040:6502
-19 EM2860/SAA711X Reference Design em2860
-20 AMD ATI TV Wonder HD 600 em2880 0438:b002
-21 eMPIA Technology, Inc. GrabBeeX+ Video Encoder em2800 eb1a:2801
-22 EM2710/EM2750/EM2751 webcam grabber em2750 eb1a:2750, eb1a:2751
-23 Huaqi DLCW-130 em2750
-24 D-Link DUB-T210 TV Tuner em2820 or em2840 2001:f112
-25 Gadmei UTV310 em2820 or em2840
-26 Hercules Smart TV USB 2.0 em2820 or em2840
-27 Pinnacle PCTV USB 2 (Philips FM1216ME) em2820 or em2840
-28 Leadtek Winfast USB II Deluxe em2820 or em2840
-29 EM2860/TVP5150 Reference Design em2860
-30 Videology 20K14XUSB USB2.0 em2820 or em2840
-31 Usbgear VD204v9 em2821
-32 Supercomp USB 2.0 TV em2821
-33 Elgato Video Capture em2860 0fd9:0033
-34 Terratec Cinergy A Hybrid XS em2860 0ccd:004f
-35 Typhoon DVD Maker em2860
-36 NetGMBH Cam em2860
-37 Gadmei UTV330 em2860 eb1a:50a6
-38 Yakumo MovieMixer em2861
-39 KWorld PVRTV 300U em2861 eb1a:e300
-40 Plextor ConvertX PX-TV100U em2861 093b:a005
-41 Kworld 350 U DVB-T em2870 eb1a:e350
-42 Kworld 355 U DVB-T em2870 eb1a:e355, eb1a:e357, eb1a:e359
-43 Terratec Cinergy T XS em2870
-44 Terratec Cinergy T XS (MT2060) em2870 0ccd:0043
-45 Pinnacle PCTV DVB-T em2870
-46 Compro, VideoMate U3 em2870 185b:2870
-47 KWorld DVB-T 305U em2880 eb1a:e305
-48 KWorld DVB-T 310U em2880
-49 MSI DigiVox A/D em2880 eb1a:e310
-50 MSI DigiVox A/D II em2880 eb1a:e320
-51 Terratec Hybrid XS Secam em2880 0ccd:004c
-52 DNT DA2 Hybrid em2881
-53 Pinnacle Hybrid Pro em2881
-54 Kworld VS-DVB-T 323UR em2882 eb1a:e323
-55 Terratec Cinnergy Hybrid T USB XS (em2882) em2882 0ccd:005e, 0ccd:0042
-56 Pinnacle Hybrid Pro (330e) em2882 2304:0226
-57 Kworld PlusTV HD Hybrid 330 em2883 eb1a:a316
-58 Compro VideoMate ForYou/Stereo em2820 or em2840 185b:2041
-59 Pinnacle PCTV HD Mini em2874 2304:023f
-60 Hauppauge WinTV HVR 850 em2883 2040:651f
-61 Pixelview PlayTV Box 4 USB 2.0 em2820 or em2840
-62 Gadmei TVR200 em2820 or em2840
-63 Kaiomy TVnPC U2 em2860 eb1a:e303
-64 Easy Cap Capture DC-60 em2860 1b80:e309
-65 IO-DATA GV-MVP/SZ em2820 or em2840 04bb:0515
-66 Empire dual TV em2880
-67 Terratec Grabby em2860 0ccd:0096, 0ccd:10AF
-68 Terratec AV350 em2860 0ccd:0084
-69 KWorld ATSC 315U HDTV TV Box em2882 eb1a:a313
-70 Evga inDtube em2882
-71 Silvercrest Webcam 1.3mpix em2820 or em2840
-72 Gadmei UTV330+ em2861
-73 Reddo DVB-C USB TV Box em2870
-74 Actionmaster/LinXcel/Digitus VC211A em2800
-75 Dikom DK300 em2882
-76 KWorld PlusTV 340U or UB435-Q (ATSC) em2870 1b80:a340
-77 EM2874 Leadership ISDBT em2874
-78 PCTV nanoStick T2 290e em28174 2013:024f
-79 Terratec Cinergy H5 em2884 eb1a:2885, 0ccd:10a2, 0ccd:10ad, 0ccd:10b6
-80 PCTV DVB-S2 Stick (460e) em28174 2013:024c
-81 Hauppauge WinTV HVR 930C em2884 2040:1605
-82 Terratec Cinergy HTC Stick em2884 0ccd:00b2
-83 Honestech Vidbox NW03 em2860 eb1a:5006
-84 MaxMedia UB425-TC em2874 1b80:e425
-85 PCTV QuatroStick (510e) em2884 2304:0242
-86 PCTV QuatroStick nano (520e) em2884 2013:0251
-87 Terratec Cinergy HTC USB XS em2884 0ccd:008e, 0ccd:00ac
-88 C3 Tech Digital Duo HDTV/SDTV USB em2884 1b80:e755
-89 Delock 61959 em2874 1b80:e1cc
-90 KWorld USB ATSC TV Stick UB435-Q V2 em2874 1b80:e346
-91 SpeedLink Vicious And Devine Laplace webcam em2765 1ae7:9003, 1ae7:9004
-92 PCTV DVB-S2 Stick (461e) em28178 2013:0258
-93 KWorld USB ATSC TV Stick UB435-Q V3 em2874 1b80:e34c
-94 PCTV tripleStick (292e) em28178 2013:025f, 2040:0264
-95 Leadtek VC100 em2861 0413:6f07
-96 Terratec Cinergy T2 Stick HD em28178 eb1a:8179
-97 Elgato EyeTV Hybrid 2008 INT em2884 0fd9:0018
-98 PLEX PX-BCUD em28178 3275:0085
-99 Hauppauge WinTV-dualHD DVB em28174 2040:0265
-=========== ==================================================================== ================ ==================================================================================================================================
+.. tabularcolumns:: |p{1.4cm}|p{10.0cm}|p{1.9cm}|p{4.2cm}|
+
+.. flat-table::
+ :header-rows: 1
+ :widths: 2 12 3 16
+ :stub-columns: 0
+
+ * - Card number
+ - Card name
+ - Empia Chip
+ - USB IDs
+ * - 0
+ - Unknown EM2800 video grabber
+ - em2800
+ - eb1a:2800
+ * - 1
+ - Unknown EM2750/28xx video grabber
+ - em2820 or em2840
+ - eb1a:2710, eb1a:2820, eb1a:2821, eb1a:2860, eb1a:2861, eb1a:2862, eb1a:2863, eb1a:2870, eb1a:2881, eb1a:2883, eb1a:2868, eb1a:2875
+ * - 2
+ - Terratec Cinergy 250 USB
+ - em2820 or em2840
+ - 0ccd:0036
+ * - 3
+ - Pinnacle PCTV USB 2
+ - em2820 or em2840
+ - 2304:0208
+ * - 4
+ - Hauppauge WinTV USB 2
+ - em2820 or em2840
+ - 2040:4200, 2040:4201
+ * - 5
+ - MSI VOX USB 2.0
+ - em2820 or em2840
+ -
+ * - 6
+ - Terratec Cinergy 200 USB
+ - em2800
+ -
+ * - 7
+ - Leadtek Winfast USB II
+ - em2800
+ - 0413:6023
+ * - 8
+ - Kworld USB2800
+ - em2800
+ -
+ * - 9
+ - Pinnacle Dazzle DVC 90/100/101/107 / Kaiser Baas Video to DVD maker / Kworld DVD Maker 2 / Plextor ConvertX PX-AV100U
+ - em2820 or em2840
+ - 1b80:e302, 1b80:e304, 2304:0207, 2304:021a, 093b:a003
+ * - 10
+ - Hauppauge WinTV HVR 900
+ - em2880
+ - 2040:6500
+ * - 11
+ - Terratec Hybrid XS
+ - em2880
+ -
+ * - 12
+ - Kworld PVR TV 2800 RF
+ - em2820 or em2840
+ -
+ * - 13
+ - Terratec Prodigy XS
+ - em2880
+ -
+ * - 14
+ - SIIG AVTuner-PVR / Pixelview Prolink PlayTV USB 2.0
+ - em2820 or em2840
+ -
+ * - 15
+ - V-Gear PocketTV
+ - em2800
+ -
+ * - 16
+ - Hauppauge WinTV HVR 950
+ - em2883
+ - 2040:6513, 2040:6517, 2040:651b
+ * - 17
+ - Pinnacle PCTV HD Pro Stick
+ - em2880
+ - 2304:0227
+ * - 18
+ - Hauppauge WinTV HVR 900 (R2)
+ - em2880
+ - 2040:6502
+ * - 19
+ - EM2860/SAA711X Reference Design
+ - em2860
+ -
+ * - 20
+ - AMD ATI TV Wonder HD 600
+ - em2880
+ - 0438:b002
+ * - 21
+ - eMPIA Technology, Inc. GrabBeeX+ Video Encoder
+ - em2800
+ - eb1a:2801
+ * - 22
+ - EM2710/EM2750/EM2751 webcam grabber
+ - em2750
+ - eb1a:2750, eb1a:2751
+ * - 23
+ - Huaqi DLCW-130
+ - em2750
+ -
+ * - 24
+ - D-Link DUB-T210 TV Tuner
+ - em2820 or em2840
+ - 2001:f112
+ * - 25
+ - Gadmei UTV310
+ - em2820 or em2840
+ -
+ * - 26
+ - Hercules Smart TV USB 2.0
+ - em2820 or em2840
+ -
+ * - 27
+ - Pinnacle PCTV USB 2 (Philips FM1216ME)
+ - em2820 or em2840
+ -
+ * - 28
+ - Leadtek Winfast USB II Deluxe
+ - em2820 or em2840
+ -
+ * - 29
+ - EM2860/TVP5150 Reference Design
+ - em2860
+ - eb1a:5051
+ * - 30
+ - Videology 20K14XUSB USB2.0
+ - em2820 or em2840
+ -
+ * - 31
+ - Usbgear VD204v9
+ - em2821
+ -
+ * - 32
+ - Supercomp USB 2.0 TV
+ - em2821
+ -
+ * - 33
+ - Elgato Video Capture
+ - em2860
+ - 0fd9:0033
+ * - 34
+ - Terratec Cinergy A Hybrid XS
+ - em2860
+ - 0ccd:004f
+ * - 35
+ - Typhoon DVD Maker
+ - em2860
+ -
+ * - 36
+ - NetGMBH Cam
+ - em2860
+ -
+ * - 37
+ - Gadmei UTV330
+ - em2860
+ - eb1a:50a6
+ * - 38
+ - Yakumo MovieMixer
+ - em2861
+ -
+ * - 39
+ - KWorld PVRTV 300U
+ - em2861
+ - eb1a:e300
+ * - 40
+ - Plextor ConvertX PX-TV100U
+ - em2861
+ - 093b:a005
+ * - 41
+ - Kworld 350 U DVB-T
+ - em2870
+ - eb1a:e350
+ * - 42
+ - Kworld 355 U DVB-T
+ - em2870
+ - eb1a:e355, eb1a:e357, eb1a:e359
+ * - 43
+ - Terratec Cinergy T XS
+ - em2870
+ -
+ * - 44
+ - Terratec Cinergy T XS (MT2060)
+ - em2870
+ - 0ccd:0043
+ * - 45
+ - Pinnacle PCTV DVB-T
+ - em2870
+ -
+ * - 46
+ - Compro, VideoMate U3
+ - em2870
+ - 185b:2870
+ * - 47
+ - KWorld DVB-T 305U
+ - em2880
+ - eb1a:e305
+ * - 48
+ - KWorld DVB-T 310U
+ - em2880
+ -
+ * - 49
+ - MSI DigiVox A/D
+ - em2880
+ - eb1a:e310
+ * - 50
+ - MSI DigiVox A/D II
+ - em2880
+ - eb1a:e320
+ * - 51
+ - Terratec Hybrid XS Secam
+ - em2880
+ - 0ccd:004c
+ * - 52
+ - DNT DA2 Hybrid
+ - em2881
+ -
+ * - 53
+ - Pinnacle Hybrid Pro
+ - em2881
+ -
+ * - 54
+ - Kworld VS-DVB-T 323UR
+ - em2882
+ - eb1a:e323
+ * - 55
+ - Terratec Cinnergy Hybrid T USB XS (em2882)
+ - em2882
+ - 0ccd:005e, 0ccd:0042
+ * - 56
+ - Pinnacle Hybrid Pro (330e)
+ - em2882
+ - 2304:0226
+ * - 57
+ - Kworld PlusTV HD Hybrid 330
+ - em2883
+ - eb1a:a316
+ * - 58
+ - Compro VideoMate ForYou/Stereo
+ - em2820 or em2840
+ - 185b:2041
+ * - 59
+ - Pinnacle PCTV HD Mini
+ - em2874
+ - 2304:023f
+ * - 60
+ - Hauppauge WinTV HVR 850
+ - em2883
+ - 2040:651f
+ * - 61
+ - Pixelview PlayTV Box 4 USB 2.0
+ - em2820 or em2840
+ -
+ * - 62
+ - Gadmei TVR200
+ - em2820 or em2840
+ -
+ * - 63
+ - Kaiomy TVnPC U2
+ - em2860
+ - eb1a:e303
+ * - 64
+ - Easy Cap Capture DC-60
+ - em2860
+ - 1b80:e309
+ * - 65
+ - IO-DATA GV-MVP/SZ
+ - em2820 or em2840
+ - 04bb:0515
+ * - 66
+ - Empire dual TV
+ - em2880
+ -
+ * - 67
+ - Terratec Grabby
+ - em2860
+ - 0ccd:0096, 0ccd:10AF
+ * - 68
+ - Terratec AV350
+ - em2860
+ - 0ccd:0084
+ * - 69
+ - KWorld ATSC 315U HDTV TV Box
+ - em2882
+ - eb1a:a313
+ * - 70
+ - Evga inDtube
+ - em2882
+ -
+ * - 71
+ - Silvercrest Webcam 1.3mpix
+ - em2820 or em2840
+ -
+ * - 72
+ - Gadmei UTV330+
+ - em2861
+ -
+ * - 73
+ - Reddo DVB-C USB TV Box
+ - em2870
+ -
+ * - 74
+ - Actionmaster/LinXcel/Digitus VC211A
+ - em2800
+ -
+ * - 75
+ - Dikom DK300
+ - em2882
+ -
+ * - 76
+ - KWorld PlusTV 340U or UB435-Q (ATSC)
+ - em2870
+ - 1b80:a340
+ * - 77
+ - EM2874 Leadership ISDBT
+ - em2874
+ -
+ * - 78
+ - PCTV nanoStick T2 290e
+ - em28174
+ - 2013:024f
+ * - 79
+ - Terratec Cinergy H5
+ - em2884
+ - eb1a:2885, 0ccd:10a2, 0ccd:10ad, 0ccd:10b6
+ * - 80
+ - PCTV DVB-S2 Stick (460e)
+ - em28174
+ - 2013:024c
+ * - 81
+ - Hauppauge WinTV HVR 930C
+ - em2884
+ - 2040:1605
+ * - 82
+ - Terratec Cinergy HTC Stick
+ - em2884
+ - 0ccd:00b2
+ * - 83
+ - Honestech Vidbox NW03
+ - em2860
+ - eb1a:5006
+ * - 84
+ - MaxMedia UB425-TC
+ - em2874
+ - 1b80:e425
+ * - 85
+ - PCTV QuatroStick (510e)
+ - em2884
+ - 2304:0242
+ * - 86
+ - PCTV QuatroStick nano (520e)
+ - em2884
+ - 2013:0251
+ * - 87
+ - Terratec Cinergy HTC USB XS
+ - em2884
+ - 0ccd:008e, 0ccd:00ac
+ * - 88
+ - C3 Tech Digital Duo HDTV/SDTV USB
+ - em2884
+ - 1b80:e755
+ * - 89
+ - Delock 61959
+ - em2874
+ - 1b80:e1cc
+ * - 90
+ - KWorld USB ATSC TV Stick UB435-Q V2
+ - em2874
+ - 1b80:e346
+ * - 91
+ - SpeedLink Vicious And Devine Laplace webcam
+ - em2765
+ - 1ae7:9003, 1ae7:9004
+ * - 92
+ - PCTV DVB-S2 Stick (461e)
+ - em28178
+ - 2013:0258
+ * - 93
+ - KWorld USB ATSC TV Stick UB435-Q V3
+ - em2874
+ - 1b80:e34c
+ * - 94
+ - PCTV tripleStick (292e)
+ - em28178
+ - 2013:025f, 2040:0264
+ * - 95
+ - Leadtek VC100
+ - em2861
+ - 0413:6f07
+ * - 96
+ - Terratec Cinergy T2 Stick HD
+ - em28178
+ - eb1a:8179
+ * - 97
+ - Elgato EyeTV Hybrid 2008 INT
+ - em2884
+ - 0fd9:0018
+ * - 98
+ - PLEX PX-BCUD
+ - em28178
+ - 3275:0085
+ * - 99
+ - Hauppauge WinTV-dualHD DVB
+ - em28174
+ - 2040:0265
+ * - 100
+ - Hauppauge WinTV-dualHD 01595 ATSC/QAM
+ - em28174
+ - 2040:026d
+ * - 101
+ - Terratec Cinergy H6 rev. 2
+ - em2884
+ - 0ccd:10b2
diff --git a/Documentation/media/v4l-drivers/imx.rst b/Documentation/media/v4l-drivers/imx.rst
index e0ee0f1aeb05..3c4f58bda178 100644
--- a/Documentation/media/v4l-drivers/imx.rst
+++ b/Documentation/media/v4l-drivers/imx.rst
@@ -607,8 +607,9 @@ References
Authors
-------
-Steve Longerbeam <steve_longerbeam@mentor.com>
-Philipp Zabel <kernel@pengutronix.de>
-Russell King <linux@armlinux.org.uk>
+
+- Steve Longerbeam <steve_longerbeam@mentor.com>
+- Philipp Zabel <kernel@pengutronix.de>
+- Russell King <linux@armlinux.org.uk>
Copyright (C) 2012-2017 Mentor Graphics Inc.
diff --git a/Documentation/media/v4l-drivers/index.rst b/Documentation/media/v4l-drivers/index.rst
index 2e24d6806052..679238e786a7 100644
--- a/Documentation/media/v4l-drivers/index.rst
+++ b/Documentation/media/v4l-drivers/index.rst
@@ -21,7 +21,9 @@ more details.
For more details see the file COPYING in the source distribution of Linux.
-.. class:: toc-title
+.. only:: html
+
+ .. class:: toc-title
Table of Contents
@@ -41,6 +43,7 @@ For more details see the file COPYING in the source distribution of Linux.
cx88
davinci-vpbe
fimc
+ imx
ivtv
max2175
meye
@@ -49,6 +52,7 @@ For more details see the file COPYING in the source distribution of Linux.
philips
pvrusb2
pxa_camera
+ qcom_camss
radiotrack
rcar-fdp1
saa7134
diff --git a/Documentation/media/v4l-drivers/ivtv-cardlist.rst b/Documentation/media/v4l-drivers/ivtv-cardlist.rst
index 754ffa820b4c..022dca80c2c8 100644
--- a/Documentation/media/v4l-drivers/ivtv-cardlist.rst
+++ b/Documentation/media/v4l-drivers/ivtv-cardlist.rst
@@ -1,38 +1,137 @@
IVTV cards list
===============
-=========== ============================================================= ====================================================
-Card number Card name PCI IDs
-=========== ============================================================= ====================================================
-0 Hauppauge WinTV PVR-250 IVTV16 104d:813d
-1 Hauppauge WinTV PVR-350 IVTV16 104d:813d
-2 Hauppauge WinTV PVR-150 IVTV16 104d:813d
-3 AVerMedia M179 IVTV15 1461:a3cf, IVTV15 1461:a3ce
-4 Yuan MPG600, Kuroutoshikou ITVC16-STVLP IVTV16 12ab:fff3, IVTV16 12ab:ffff
-5 YUAN MPG160, Kuroutoshikou ITVC15-STVLP, I/O Data GV-M2TV/PCI IVTV15 10fc:40a0
-6 Yuan PG600, Diamond PVR-550 IVTV16 ff92:0070, IVTV16 ffab:0600
-7 Adaptec VideOh! AVC-2410 IVTV16 9005:0093
-8 Adaptec VideOh! AVC-2010 IVTV16 9005:0092
-9 Nagase Transgear 5000TV IVTV16 1461:bfff
-10 AOpen VA2000MAX-SNT6 IVTV16 0000:ff5f
-11 Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP IVTV16 12ab:0600, IVTV16 fbab:0600, IVTV16 1154:0523
-12 I/O Data GV-MVP/RX, GV-MVP/RX2W (dual tuner) IVTV16 10fc:d01e, IVTV16 10fc:d038, IVTV16 10fc:d039
-13 I/O Data GV-MVP/RX2E IVTV16 10fc:d025
-14 GotView PCI DVD IVTV16 12ab:0600
-15 GotView PCI DVD2 Deluxe IVTV16 ffac:0600
-16 Yuan MPC622 IVTV16 ff01:d998
-17 Digital Cowboy DCT-MTVP1 IVTV16 1461:bfff
-18 Yuan PG600-2, GotView PCI DVD Lite IVTV16 ffab:0600, IVTV16 ffad:0600
-19 Club3D ZAP-TV1x01 IVTV16 ffab:0600
-20 AVerTV MCE 116 Plus IVTV16 1461:c439
-21 ASUS Falcon2 IVTV16 1043:4b66, IVTV16 1043:462e, IVTV16 1043:4b2e
-22 AVerMedia PVR-150 Plus / AVerTV M113 Partsnic (Daewoo) Tuner IVTV16 1461:c034, IVTV16 1461:c035
-23 AVerMedia EZMaker PCI Deluxe IVTV16 1461:c03f
-24 AVerMedia M104 IVTV16 1461:c136
-25 Buffalo PC-MV5L/PCI IVTV16 1154:052b
-26 AVerMedia UltraTV 1500 MCE / AVerTV M113 Philips Tuner IVTV16 1461:c019, IVTV16 1461:c01b
-27 Sony VAIO Giga Pocket (ENX Kikyou) IVTV16 104d:813d
-28 Hauppauge WinTV PVR-350 (V1) IVTV16 104d:813d
-29 Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR) IVTV16 104d:813d
-30 Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR/YCS) IVTV16 104d:813d
-=========== ============================================================= ====================================================
+.. tabularcolumns:: |p{1.4cm}|p{12.7cm}|p{3.4cm}|
+
+.. flat-table::
+ :header-rows: 1
+ :widths: 2 19 18
+ :stub-columns: 0
+
+ * - Card number
+ - Card name
+ - PCI IDs
+
+ * - 0
+ - Hauppauge WinTV PVR-250
+ - IVTV16 104d:813d
+
+ * - 1
+ - Hauppauge WinTV PVR-350
+ - IVTV16 104d:813d
+
+ * - 2
+ - Hauppauge WinTV PVR-150
+ - IVTV16 104d:813d
+
+ * - 3
+ - AVerMedia M179
+ - IVTV15 1461:a3cf, IVTV15 1461:a3ce
+
+ * - 4
+ - Yuan MPG600, Kuroutoshikou ITVC16-STVLP
+ - IVTV16 12ab:fff3, IVTV16 12ab:ffff
+
+ * - 5
+ - YUAN MPG160, Kuroutoshikou ITVC15-STVLP, I/O Data GV-M2TV/PCI
+ - IVTV15 10fc:40a0
+
+ * - 6
+ - Yuan PG600, Diamond PVR-550
+ - IVTV16 ff92:0070, IVTV16 ffab:0600
+
+ * - 7
+ - Adaptec VideOh! AVC-2410
+ - IVTV16 9005:0093
+
+ * - 8
+ - Adaptec VideOh! AVC-2010
+ - IVTV16 9005:0092
+
+ * - 9
+ - Nagase Transgear 5000TV
+ - IVTV16 1461:bfff
+
+ * - 10
+ - AOpen VA2000MAX-SNT6
+ - IVTV16 0000:ff5f
+
+ * - 11
+ - Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP
+ - IVTV16 12ab:0600, IVTV16 fbab:0600, IVTV16 1154:0523
+
+ * - 12
+ - I/O Data GV-MVP/RX, GV-MVP/RX2W (dual tuner)
+ - IVTV16 10fc:d01e, IVTV16 10fc:d038, IVTV16 10fc:d039
+
+ * - 13
+ - I/O Data GV-MVP/RX2E
+ - IVTV16 10fc:d025
+
+ * - 14
+ - GotView PCI DVD
+ - IVTV16 12ab:0600
+
+ * - 15
+ - GotView PCI DVD2 Deluxe
+ - IVTV16 ffac:0600
+
+ * - 16
+ - Yuan MPC622
+ - IVTV16 ff01:d998
+
+ * - 17
+ - Digital Cowboy DCT-MTVP1
+ - IVTV16 1461:bfff
+
+ * - 18
+ - Yuan PG600-2, GotView PCI DVD Lite
+ - IVTV16 ffab:0600, IVTV16 ffad:0600
+
+ * - 19
+ - Club3D ZAP-TV1x01
+ - IVTV16 ffab:0600
+
+ * - 20
+ - AVerTV MCE 116 Plus
+ - IVTV16 1461:c439
+
+ * - 21
+ - ASUS Falcon2
+ - IVTV16 1043:4b66, IVTV16 1043:462e, IVTV16 1043:4b2e
+
+ * - 22
+ - AVerMedia PVR-150 Plus / AVerTV M113 Partsnic (Daewoo) Tuner
+ - IVTV16 1461:c034, IVTV16 1461:c035
+
+ * - 23
+ - AVerMedia EZMaker PCI Deluxe
+ - IVTV16 1461:c03f
+
+ * - 24
+ - AVerMedia M104
+ - IVTV16 1461:c136
+
+ * - 25
+ - Buffalo PC-MV5L/PCI
+ - IVTV16 1154:052b
+
+ * - 26
+ - AVerMedia UltraTV 1500 MCE / AVerTV M113 Philips Tuner
+ - IVTV16 1461:c019, IVTV16 1461:c01b
+
+ * - 27
+ - Sony VAIO Giga Pocket (ENX Kikyou)
+ - IVTV16 104d:813d
+
+ * - 28
+ - Hauppauge WinTV PVR-350 (V1)
+ - IVTV16 104d:813d
+
+ * - 29
+ - Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR)
+ - IVTV16 104d:813d
+
+ * - 30
+ - Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR/YCS)
+ - IVTV16 104d:813d
diff --git a/Documentation/media/v4l-drivers/qcom_camss.rst b/Documentation/media/v4l-drivers/qcom_camss.rst
new file mode 100644
index 000000000000..9e66b7b5770f
--- /dev/null
+++ b/Documentation/media/v4l-drivers/qcom_camss.rst
@@ -0,0 +1,156 @@
+.. include:: <isonum.txt>
+
+Qualcomm Camera Subsystem driver
+================================
+
+Introduction
+------------
+
+This file documents the Qualcomm Camera Subsystem driver located under
+drivers/media/platform/qcom/camss-8x16.
+
+The current version of the driver supports the Camera Subsystem found on
+Qualcomm MSM8916 and APQ8016 processors.
+
+The driver implements V4L2, Media controller and V4L2 subdev interfaces.
+Camera sensor using V4L2 subdev interface in the kernel is supported.
+
+The driver is implemented using as a reference the Qualcomm Camera Subsystem
+driver for Android as found in Code Aurora [#f1]_.
+
+
+Qualcomm Camera Subsystem hardware
+----------------------------------
+
+The Camera Subsystem hardware found on 8x16 processors and supported by the
+driver consists of:
+
+- 2 CSIPHY modules. They handle the Physical layer of the CSI2 receivers.
+ A separate camera sensor can be connected to each of the CSIPHY module;
+- 2 CSID (CSI Decoder) modules. They handle the Protocol and Application layer
+ of the CSI2 receivers. A CSID can decode data stream from any of the CSIPHY.
+ Each CSID also contains a TG (Test Generator) block which can generate
+ artificial input data for test purposes;
+- ISPIF (ISP Interface) module. Handles the routing of the data streams from
+ the CSIDs to the inputs of the VFE;
+- VFE (Video Front End) module. Contains a pipeline of image processing hardware
+ blocks. The VFE has different input interfaces. The PIX (Pixel) input
+ interface feeds the input data to the image processing pipeline. The image
+ processing pipeline contains also a scale and crop module at the end. Three
+ RDI (Raw Dump Interface) input interfaces bypass the image processing
+ pipeline. The VFE also contains the AXI bus interface which writes the output
+ data to memory.
+
+
+Supported functionality
+-----------------------
+
+The current version of the driver supports:
+
+- Input from camera sensor via CSIPHY;
+- Generation of test input data by the TG in CSID;
+- RDI interface of VFE - raw dump of the input data to memory.
+
+ Supported formats:
+
+ - YUYV/UYVY/YVYU/VYUY (packed YUV 4:2:2 - V4L2_PIX_FMT_YUYV /
+ V4L2_PIX_FMT_UYVY / V4L2_PIX_FMT_YVYU / V4L2_PIX_FMT_VYUY);
+ - MIPI RAW8 (8bit Bayer RAW - V4L2_PIX_FMT_SRGGB8 /
+ V4L2_PIX_FMT_SGRBG8 / V4L2_PIX_FMT_SGBRG8 / V4L2_PIX_FMT_SBGGR8);
+ - MIPI RAW10 (10bit packed Bayer RAW - V4L2_PIX_FMT_SBGGR10P /
+ V4L2_PIX_FMT_SGBRG10P / V4L2_PIX_FMT_SGRBG10P / V4L2_PIX_FMT_SRGGB10P);
+ - MIPI RAW12 (12bit packed Bayer RAW - V4L2_PIX_FMT_SRGGB12P /
+ V4L2_PIX_FMT_SGBRG12P / V4L2_PIX_FMT_SGRBG12P / V4L2_PIX_FMT_SRGGB12P).
+
+- PIX interface of VFE
+
+ - Format conversion of the input data.
+
+ Supported input formats:
+
+ - YUYV/UYVY/YVYU/VYUY (packed YUV 4:2:2 - V4L2_PIX_FMT_YUYV /
+ V4L2_PIX_FMT_UYVY / V4L2_PIX_FMT_YVYU / V4L2_PIX_FMT_VYUY).
+
+ Supported output formats:
+
+ - NV12/NV21 (two plane YUV 4:2:0 - V4L2_PIX_FMT_NV12 / V4L2_PIX_FMT_NV21);
+ - NV16/NV61 (two plane YUV 4:2:2 - V4L2_PIX_FMT_NV16 / V4L2_PIX_FMT_NV61).
+
+ - Scaling support. Configuration of the VFE Encoder Scale module
+ for downscalling with ratio up to 16x.
+
+ - Cropping support. Configuration of the VFE Encoder Crop module.
+
+- Concurrent and independent usage of two data inputs - could be camera sensors
+ and/or TG.
+
+
+Driver Architecture and Design
+------------------------------
+
+The driver implements the V4L2 subdev interface. With the goal to model the
+hardware links between the modules and to expose a clean, logical and usable
+interface, the driver is split into V4L2 sub-devices as follows:
+
+- 2 CSIPHY sub-devices - each CSIPHY is represented by a single sub-device;
+- 2 CSID sub-devices - each CSID is represented by a single sub-device;
+- 2 ISPIF sub-devices - ISPIF is represented by a number of sub-devices equal
+ to the number of CSID sub-devices;
+- 4 VFE sub-devices - VFE is represented by a number of sub-devices equal to
+ the number of the input interfaces (3 RDI and 1 PIX).
+
+The considerations to split the driver in this particular way are as follows:
+
+- representing CSIPHY and CSID modules by a separate sub-device for each module
+ allows to model the hardware links between these modules;
+- representing VFE by a separate sub-devices for each input interface allows
+ to use the input interfaces concurently and independently as this is
+ supported by the hardware;
+- representing ISPIF by a number of sub-devices equal to the number of CSID
+ sub-devices allows to create linear media controller pipelines when using two
+ cameras simultaneously. This avoids branches in the pipelines which otherwise
+ will require a) userspace and b) media framework (e.g. power on/off
+ operations) to make assumptions about the data flow from a sink pad to a
+ source pad on a single media entity.
+
+Each VFE sub-device is linked to a separate video device node.
+
+The media controller pipeline graph is as follows (with connected two OV5645
+camera sensors):
+
+.. _qcom_camss_graph:
+
+.. kernel-figure:: qcom_camss_graph.dot
+ :alt: qcom_camss_graph.dot
+ :align: center
+
+ Media pipeline graph
+
+
+Implementation
+--------------
+
+Runtime configuration of the hardware (updating settings while streaming) is
+not required to implement the currently supported functionality. The complete
+configuration on each hardware module is applied on STREAMON ioctl based on
+the current active media links, formats and controls set.
+
+The output size of the scaler module in the VFE is configured with the actual
+compose selection rectangle on the sink pad of the 'msm_vfe0_pix' entity.
+
+The crop output area of the crop module in the VFE is configured with the actual
+crop selection rectangle on the source pad of the 'msm_vfe0_pix' entity.
+
+
+Documentation
+-------------
+
+APQ8016 Specification:
+https://developer.qualcomm.com/download/sd410/snapdragon-410-processor-device-specification.pdf
+Referenced 2016-11-24.
+
+
+References
+----------
+
+.. [#f1] https://source.codeaurora.org/quic/la/kernel/msm-3.10/
diff --git a/Documentation/media/v4l-drivers/qcom_camss_graph.dot b/Documentation/media/v4l-drivers/qcom_camss_graph.dot
new file mode 100644
index 000000000000..827fc7112c1e
--- /dev/null
+++ b/Documentation/media/v4l-drivers/qcom_camss_graph.dot
@@ -0,0 +1,41 @@
+digraph board {
+ rankdir=TB
+ n00000001 [label="{{<port0> 0} | msm_csiphy0\n/dev/v4l-subdev0 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000001:port1 -> n00000007:port0 [style=dashed]
+ n00000001:port1 -> n0000000a:port0 [style=dashed]
+ n00000004 [label="{{<port0> 0} | msm_csiphy1\n/dev/v4l-subdev1 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000004:port1 -> n00000007:port0 [style=dashed]
+ n00000004:port1 -> n0000000a:port0 [style=dashed]
+ n00000007 [label="{{<port0> 0} | msm_csid0\n/dev/v4l-subdev2 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000007:port1 -> n0000000d:port0 [style=dashed]
+ n00000007:port1 -> n00000010:port0 [style=dashed]
+ n0000000a [label="{{<port0> 0} | msm_csid1\n/dev/v4l-subdev3 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000000a:port1 -> n0000000d:port0 [style=dashed]
+ n0000000a:port1 -> n00000010:port0 [style=dashed]
+ n0000000d [label="{{<port0> 0} | msm_ispif0\n/dev/v4l-subdev4 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000000d:port1 -> n00000013:port0 [style=dashed]
+ n0000000d:port1 -> n0000001c:port0 [style=dashed]
+ n0000000d:port1 -> n00000025:port0 [style=dashed]
+ n0000000d:port1 -> n0000002e:port0 [style=dashed]
+ n00000010 [label="{{<port0> 0} | msm_ispif1\n/dev/v4l-subdev5 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000010:port1 -> n00000013:port0 [style=dashed]
+ n00000010:port1 -> n0000001c:port0 [style=dashed]
+ n00000010:port1 -> n00000025:port0 [style=dashed]
+ n00000010:port1 -> n0000002e:port0 [style=dashed]
+ n00000013 [label="{{<port0> 0} | msm_vfe0_rdi0\n/dev/v4l-subdev6 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000013:port1 -> n00000016 [style=bold]
+ n00000016 [label="msm_vfe0_video0\n/dev/video0", shape=box, style=filled, fillcolor=yellow]
+ n0000001c [label="{{<port0> 0} | msm_vfe0_rdi1\n/dev/v4l-subdev7 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000001c:port1 -> n0000001f [style=bold]
+ n0000001f [label="msm_vfe0_video1\n/dev/video1", shape=box, style=filled, fillcolor=yellow]
+ n00000025 [label="{{<port0> 0} | msm_vfe0_rdi2\n/dev/v4l-subdev8 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000025:port1 -> n00000028 [style=bold]
+ n00000028 [label="msm_vfe0_video2\n/dev/video2", shape=box, style=filled, fillcolor=yellow]
+ n0000002e [label="{{<port0> 0} | msm_vfe0_pix\n/dev/v4l-subdev9 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
+ n0000002e:port1 -> n00000031 [style=bold]
+ n00000031 [label="msm_vfe0_video3\n/dev/video3", shape=box, style=filled, fillcolor=yellow]
+ n00000057 [label="{{} | ov5645 1-0076\n/dev/v4l-subdev10 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000057:port0 -> n00000001:port0 [style=bold]
+ n00000059 [label="{{} | ov5645 1-0074\n/dev/v4l-subdev11 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
+ n00000059:port0 -> n00000004:port0 [style=bold]
+}
diff --git a/Documentation/media/v4l-drivers/saa7134-cardlist.rst b/Documentation/media/v4l-drivers/saa7134-cardlist.rst
index a5efa8f4b8e4..6e4c35cbaabf 100644
--- a/Documentation/media/v4l-drivers/saa7134-cardlist.rst
+++ b/Documentation/media/v4l-drivers/saa7134-cardlist.rst
@@ -1,204 +1,801 @@
SAA7134 cards list
==================
-=========== ======================================================= ================================================================
-Card number Card name PCI IDs
-=========== ======================================================= ================================================================
-0 UNKNOWN/GENERIC
-1 Proteus Pro [philips reference design] 1131:2001, 1131:2001
-2 LifeView FlyVIDEO3000 5168:0138, 4e42:0138
-3 LifeView/Typhoon FlyVIDEO2000 5168:0138, 4e42:0138
-4 EMPRESS 1131:6752
-5 SKNet Monster TV 1131:4e85
-6 Tevion MD 9717
-7 KNC One TV-Station RDS / Typhoon TV Tuner RDS 1131:fe01, 1894:fe01
-8 Terratec Cinergy 400 TV 153b:1142
-9 Medion 5044
-10 Kworld/KuroutoShikou SAA7130-TVPCI
-11 Terratec Cinergy 600 TV 153b:1143
-12 Medion 7134 16be:0003, 16be:5000
-13 Typhoon TV+Radio 90031
-14 ELSA EX-VISION 300TV 1048:226b
-15 ELSA EX-VISION 500TV 1048:226a
-16 ASUS TV-FM 7134 1043:4842, 1043:4830, 1043:4840
-17 AOPEN VA1000 POWER 1131:7133
-18 BMK MPEX No Tuner
-19 Compro VideoMate TV 185b:c100
-20 Matrox CronosPlus 102B:48d0
-21 10MOONS PCI TV CAPTURE CARD 1131:2001
-22 AverMedia M156 / Medion 2819 1461:a70b
-23 BMK MPEX Tuner
-24 KNC One TV-Station DVR 1894:a006
-25 ASUS TV-FM 7133 1043:4843
-26 Pinnacle PCTV Stereo (saa7134) 11bd:002b
-27 Manli MuchTV M-TV002
-28 Manli MuchTV M-TV001
-29 Nagase Sangyo TransGear 3000TV 1461:050c
-30 Elitegroup ECS TVP3XP FM1216 Tuner Card(PAL-BG,FM) 1019:4cb4
-31 Elitegroup ECS TVP3XP FM1236 Tuner Card (NTSC,FM) 1019:4cb5
-32 AVACS SmartTV
-33 AVerMedia DVD EZMaker 1461:10ff
-34 Noval Prime TV 7133
-35 AverMedia AverTV Studio 305 1461:2115
-36 UPMOST PURPLE TV 12ab:0800
-37 Items MuchTV Plus / IT-005
-38 Terratec Cinergy 200 TV 153b:1152
-39 LifeView FlyTV Platinum Mini 5168:0212, 4e42:0212, 5169:1502
-40 Compro VideoMate TV PVR/FM 185b:c100
-41 Compro VideoMate TV Gold+ 185b:c100
-42 Sabrent SBT-TVFM (saa7130)
-43 :Zolid Xpert TV7134
-44 Empire PCI TV-Radio LE
-45 Avermedia AVerTV Studio 307 1461:9715
-46 AVerMedia Cardbus TV/Radio (E500) 1461:d6ee
-47 Terratec Cinergy 400 mobile 153b:1162
-48 Terratec Cinergy 600 TV MK3 153b:1158
-49 Compro VideoMate Gold+ Pal 185b:c200
-50 Pinnacle PCTV 300i DVB-T + PAL 11bd:002d
-51 ProVideo PV952 1540:9524
-52 AverMedia AverTV/305 1461:2108
-53 ASUS TV-FM 7135 1043:4845
-54 LifeView FlyTV Platinum FM / Gold 5168:0214, 5168:5214, 1489:0214, 5168:0304
-55 LifeView FlyDVB-T DUO / MSI TV@nywhere Duo 5168:0306, 4E42:0306
-56 Avermedia AVerTV 307 1461:a70a
-57 Avermedia AVerTV GO 007 FM 1461:f31f
-58 ADS Tech Instant TV (saa7135) 1421:0350, 1421:0351, 1421:0370, 1421:1370
-59 Kworld/Tevion V-Stream Xpert TV PVR7134
-60 LifeView/Typhoon/Genius FlyDVB-T Duo Cardbus 5168:0502, 4e42:0502, 1489:0502
-61 Philips TOUGH DVB-T reference design 1131:2004
-62 Compro VideoMate TV Gold+II
-63 Kworld Xpert TV PVR7134
-64 FlyTV mini Asus Digimatrix 1043:0210
-65 V-Stream Studio TV Terminator
-66 Yuan TUN-900 (saa7135)
-67 Beholder BeholdTV 409 FM 0000:4091
-68 GoTView 7135 PCI 5456:7135
-69 Philips EUROPA V3 reference design 1131:2004
-70 Compro Videomate DVB-T300 185b:c900
-71 Compro Videomate DVB-T200 185b:c901
-72 RTD Embedded Technologies VFG7350 1435:7350
-73 RTD Embedded Technologies VFG7330 1435:7330
-74 LifeView FlyTV Platinum Mini2 14c0:1212
-75 AVerMedia AVerTVHD MCE A180 1461:1044
-76 SKNet MonsterTV Mobile 1131:4ee9
-77 Pinnacle PCTV 40i/50i/110i (saa7133) 11bd:002e
-78 ASUSTeK P7131 Dual 1043:4862
-79 Sedna/MuchTV PC TV Cardbus TV/Radio (ITO25 Rev:2B)
-80 ASUS Digimatrix TV 1043:0210
-81 Philips Tiger reference design 1131:2018
-82 MSI TV@Anywhere plus 1462:6231, 1462:8624
-83 Terratec Cinergy 250 PCI TV 153b:1160
-84 LifeView FlyDVB Trio 5168:0319
-85 AverTV DVB-T 777 1461:2c05, 1461:2c05
-86 LifeView FlyDVB-T / Genius VideoWonder DVB-T 5168:0301, 1489:0301
-87 ADS Instant TV Duo Cardbus PTV331 0331:1421
-88 Tevion/KWorld DVB-T 220RF 17de:7201
-89 ELSA EX-VISION 700TV 1048:226c
-90 Kworld ATSC110/115 17de:7350, 17de:7352
-91 AVerMedia A169 B 1461:7360
-92 AVerMedia A169 B1 1461:6360
-93 Medion 7134 Bridge #2 16be:0005
-94 LifeView FlyDVB-T Hybrid Cardbus/MSI TV @nywhere A/D NB 5168:3306, 5168:3502, 5168:3307, 4e42:3502
-95 LifeView FlyVIDEO3000 (NTSC) 5169:0138
-96 Medion Md8800 Quadro 16be:0007, 16be:0008, 16be:000d
-97 LifeView FlyDVB-S /Acorp TV134DS 5168:0300, 4e42:0300
-98 Proteus Pro 2309 0919:2003
-99 AVerMedia TV Hybrid A16AR 1461:2c00
-100 Asus Europa2 OEM 1043:4860
-101 Pinnacle PCTV 310i 11bd:002f
-102 Avermedia AVerTV Studio 507 1461:9715
-103 Compro Videomate DVB-T200A
-104 Hauppauge WinTV-HVR1110 DVB-T/Hybrid 0070:6700, 0070:6701, 0070:6702, 0070:6703, 0070:6704, 0070:6705
-105 Terratec Cinergy HT PCMCIA 153b:1172
-106 Encore ENLTV 1131:2342, 1131:2341, 3016:2344
-107 Encore ENLTV-FM 1131:230f
-108 Terratec Cinergy HT PCI 153b:1175
-109 Philips Tiger - S Reference design
-110 Avermedia M102 1461:f31e
-111 ASUS P7131 4871 1043:4871
-112 ASUSTeK P7131 Hybrid 1043:4876
-113 Elitegroup ECS TVP3XP FM1246 Tuner Card (PAL,FM) 1019:4cb6
-114 KWorld DVB-T 210 17de:7250
-115 Sabrent PCMCIA TV-PCB05 0919:2003
-116 10MOONS TM300 TV Card 1131:2304
-117 Avermedia Super 007 1461:f01d
-118 Beholder BeholdTV 401 0000:4016
-119 Beholder BeholdTV 403 0000:4036
-120 Beholder BeholdTV 403 FM 0000:4037
-121 Beholder BeholdTV 405 0000:4050
-122 Beholder BeholdTV 405 FM 0000:4051
-123 Beholder BeholdTV 407 0000:4070
-124 Beholder BeholdTV 407 FM 0000:4071
-125 Beholder BeholdTV 409 0000:4090
-126 Beholder BeholdTV 505 FM 5ace:5050
-127 Beholder BeholdTV 507 FM / BeholdTV 509 FM 5ace:5070, 5ace:5090
-128 Beholder BeholdTV Columbus TV/FM 0000:5201
-129 Beholder BeholdTV 607 FM 5ace:6070
-130 Beholder BeholdTV M6 5ace:6190
-131 Twinhan Hybrid DTV-DVB 3056 PCI 1822:0022
-132 Genius TVGO AM11MCE
-133 NXP Snake DVB-S reference design
-134 Medion/Creatix CTX953 Hybrid 16be:0010
-135 MSI TV@nywhere A/D v1.1 1462:8625
-136 AVerMedia Cardbus TV/Radio (E506R) 1461:f436
-137 AVerMedia Hybrid TV/Radio (A16D) 1461:f936
-138 Avermedia M115 1461:a836
-139 Compro VideoMate T750 185b:c900
-140 Avermedia DVB-S Pro A700 1461:a7a1
-141 Avermedia DVB-S Hybrid+FM A700 1461:a7a2
-142 Beholder BeholdTV H6 5ace:6290
-143 Beholder BeholdTV M63 5ace:6191
-144 Beholder BeholdTV M6 Extra 5ace:6193
-145 AVerMedia MiniPCI DVB-T Hybrid M103 1461:f636, 1461:f736
-146 ASUSTeK P7131 Analog
-147 Asus Tiger 3in1 1043:4878
-148 Encore ENLTV-FM v5.3 1a7f:2008
-149 Avermedia PCI pure analog (M135A) 1461:f11d
-150 Zogis Real Angel 220
-151 ADS Tech Instant HDTV 1421:0380
-152 Asus Tiger Rev:1.00 1043:4857
-153 Kworld Plus TV Analog Lite PCI 17de:7128
-154 Avermedia AVerTV GO 007 FM Plus 1461:f31d
-155 Hauppauge WinTV-HVR1150 ATSC/QAM-Hybrid 0070:6706, 0070:6708
-156 Hauppauge WinTV-HVR1120 DVB-T/Hybrid 0070:6707, 0070:6709, 0070:670a
-157 Avermedia AVerTV Studio 507UA 1461:a11b
-158 AVerMedia Cardbus TV/Radio (E501R) 1461:b7e9
-159 Beholder BeholdTV 505 RDS 0000:505B
-160 Beholder BeholdTV 507 RDS 0000:5071
-161 Beholder BeholdTV 507 RDS 0000:507B
-162 Beholder BeholdTV 607 FM 5ace:6071
-163 Beholder BeholdTV 609 FM 5ace:6090
-164 Beholder BeholdTV 609 FM 5ace:6091
-165 Beholder BeholdTV 607 RDS 5ace:6072
-166 Beholder BeholdTV 607 RDS 5ace:6073
-167 Beholder BeholdTV 609 RDS 5ace:6092
-168 Beholder BeholdTV 609 RDS 5ace:6093
-169 Compro VideoMate S350/S300 185b:c900
-170 AverMedia AverTV Studio 505 1461:a115
-171 Beholder BeholdTV X7 5ace:7595
-172 RoverMedia TV Link Pro FM 19d1:0138
-173 Zolid Hybrid TV Tuner PCI 1131:2004
-174 Asus Europa Hybrid OEM 1043:4847
-175 Leadtek Winfast DTV1000S 107d:6655
-176 Beholder BeholdTV 505 RDS 0000:5051
-177 Hawell HW-404M7
-178 Beholder BeholdTV H7 5ace:7190
-179 Beholder BeholdTV A7 5ace:7090
-180 Avermedia PCI M733A 1461:4155, 1461:4255
-181 TechoTrend TT-budget T-3000 13c2:2804
-182 Kworld PCI SBTVD/ISDB-T Full-Seg Hybrid 17de:b136
-183 Compro VideoMate Vista M1F 185b:c900
-184 Encore ENLTV-FM 3 1a7f:2108
-185 MagicPro ProHDTV Pro2 DMB-TH/Hybrid 17de:d136
-186 Beholder BeholdTV 501 5ace:5010
-187 Beholder BeholdTV 503 FM 5ace:5030
-188 Sensoray 811/911 6000:0811, 6000:0911
-189 Kworld PC150-U 17de:a134
-190 Asus My Cinema PS3-100 1043:48cd
-191 Hawell HW-9004V1
-192 AverMedia AverTV Satellite Hybrid+FM A706 1461:2055
-193 WIS Voyager or compatible 1905:7007
-194 AverMedia AverTV/505 1461:a10a
-195 Leadtek Winfast TV2100 FM 107d:6f3a
-196 SnaZio* TVPVR PRO 1779:13cf
-=========== ======================================================= ================================================================
+.. tabularcolumns:: |p{1.4cm}|p{11.1cm}|p{4.2cm}|
+
+.. flat-table::
+ :header-rows: 1
+ :widths: 2 19 18
+ :stub-columns: 0
+
+ * - Card number
+ - Card name
+ - PCI IDs
+
+ * - 0
+ - UNKNOWN/GENERIC
+ -
+
+ * - 1
+ - Proteus Pro [philips reference design]
+ - 1131:2001, 1131:2001
+
+ * - 2
+ - LifeView FlyVIDEO3000
+ - 5168:0138, 4e42:0138
+
+ * - 3
+ - LifeView/Typhoon FlyVIDEO2000
+ - 5168:0138, 4e42:0138
+
+ * - 4
+ - EMPRESS
+ - 1131:6752
+
+ * - 5
+ - SKNet Monster TV
+ - 1131:4e85
+
+ * - 6
+ - Tevion MD 9717
+ -
+
+ * - 7
+ - KNC One TV-Station RDS / Typhoon TV Tuner RDS
+ - 1131:fe01, 1894:fe01
+
+ * - 8
+ - Terratec Cinergy 400 TV
+ - 153b:1142
+
+ * - 9
+ - Medion 5044
+ -
+
+ * - 10
+ - Kworld/KuroutoShikou SAA7130-TVPCI
+ -
+
+ * - 11
+ - Terratec Cinergy 600 TV
+ - 153b:1143
+
+ * - 12
+ - Medion 7134
+ - 16be:0003, 16be:5000
+
+ * - 13
+ - Typhoon TV+Radio 90031
+ -
+
+ * - 14
+ - ELSA EX-VISION 300TV
+ - 1048:226b
+
+ * - 15
+ - ELSA EX-VISION 500TV
+ - 1048:226a
+
+ * - 16
+ - ASUS TV-FM 7134
+ - 1043:4842, 1043:4830, 1043:4840
+
+ * - 17
+ - AOPEN VA1000 POWER
+ - 1131:7133
+
+ * - 18
+ - BMK MPEX No Tuner
+ -
+
+ * - 19
+ - Compro VideoMate TV
+ - 185b:c100
+
+ * - 20
+ - Matrox CronosPlus
+ - 102B:48d0
+
+ * - 21
+ - 10MOONS PCI TV CAPTURE CARD
+ - 1131:2001
+
+ * - 22
+ - AverMedia M156 / Medion 2819
+ - 1461:a70b
+
+ * - 23
+ - BMK MPEX Tuner
+ -
+
+ * - 24
+ - KNC One TV-Station DVR
+ - 1894:a006
+
+ * - 25
+ - ASUS TV-FM 7133
+ - 1043:4843
+
+ * - 26
+ - Pinnacle PCTV Stereo (saa7134)
+ - 11bd:002b
+
+ * - 27
+ - Manli MuchTV M-TV002
+ -
+
+ * - 28
+ - Manli MuchTV M-TV001
+ -
+
+ * - 29
+ - Nagase Sangyo TransGear 3000TV
+ - 1461:050c
+
+ * - 30
+ - Elitegroup ECS TVP3XP FM1216 Tuner Card(PAL-BG,FM)
+ - 1019:4cb4
+
+ * - 31
+ - Elitegroup ECS TVP3XP FM1236 Tuner Card (NTSC,FM)
+ - 1019:4cb5
+
+ * - 32
+ - AVACS SmartTV
+ -
+
+ * - 33
+ - AVerMedia DVD EZMaker
+ - 1461:10ff
+
+ * - 34
+ - Noval Prime TV 7133
+ -
+
+ * - 35
+ - AverMedia AverTV Studio 305
+ - 1461:2115
+
+ * - 36
+ - UPMOST PURPLE TV
+ - 12ab:0800
+
+ * - 37
+ - Items MuchTV Plus / IT-005
+ -
+
+ * - 38
+ - Terratec Cinergy 200 TV
+ - 153b:1152
+
+ * - 39
+ - LifeView FlyTV Platinum Mini
+ - 5168:0212, 4e42:0212, 5169:1502
+
+ * - 40
+ - Compro VideoMate TV PVR/FM
+ - 185b:c100
+
+ * - 41
+ - Compro VideoMate TV Gold+
+ - 185b:c100
+
+ * - 42
+ - Sabrent SBT-TVFM (saa7130)
+ -
+
+ * - 43
+ - :Zolid Xpert TV7134
+ -
+
+ * - 44
+ - Empire PCI TV-Radio LE
+ -
+
+ * - 45
+ - Avermedia AVerTV Studio 307
+ - 1461:9715
+
+ * - 46
+ - AVerMedia Cardbus TV/Radio (E500)
+ - 1461:d6ee
+
+ * - 47
+ - Terratec Cinergy 400 mobile
+ - 153b:1162
+
+ * - 48
+ - Terratec Cinergy 600 TV MK3
+ - 153b:1158
+
+ * - 49
+ - Compro VideoMate Gold+ Pal
+ - 185b:c200
+
+ * - 50
+ - Pinnacle PCTV 300i DVB-T + PAL
+ - 11bd:002d
+
+ * - 51
+ - ProVideo PV952
+ - 1540:9524
+
+ * - 52
+ - AverMedia AverTV/305
+ - 1461:2108
+
+ * - 53
+ - ASUS TV-FM 7135
+ - 1043:4845
+
+ * - 54
+ - LifeView FlyTV Platinum FM / Gold
+ - 5168:0214, 5168:5214, 1489:0214, 5168:0304
+
+ * - 55
+ - LifeView FlyDVB-T DUO / MSI TV@nywhere Duo
+ - 5168:0306, 4E42:0306
+
+ * - 56
+ - Avermedia AVerTV 307
+ - 1461:a70a
+
+ * - 57
+ - Avermedia AVerTV GO 007 FM
+ - 1461:f31f
+
+ * - 58
+ - ADS Tech Instant TV (saa7135)
+ - 1421:0350, 1421:0351, 1421:0370, 1421:1370
+
+ * - 59
+ - Kworld/Tevion V-Stream Xpert TV PVR7134
+ -
+
+ * - 60
+ - LifeView/Typhoon/Genius FlyDVB-T Duo Cardbus
+ - 5168:0502, 4e42:0502, 1489:0502
+
+ * - 61
+ - Philips TOUGH DVB-T reference design
+ - 1131:2004
+
+ * - 62
+ - Compro VideoMate TV Gold+II
+ -
+
+ * - 63
+ - Kworld Xpert TV PVR7134
+ -
+
+ * - 64
+ - FlyTV mini Asus Digimatrix
+ - 1043:0210
+
+ * - 65
+ - V-Stream Studio TV Terminator
+ -
+
+ * - 66
+ - Yuan TUN-900 (saa7135)
+ -
+
+ * - 67
+ - Beholder BeholdTV 409 FM
+ - 0000:4091
+
+ * - 68
+ - GoTView 7135 PCI
+ - 5456:7135
+
+ * - 69
+ - Philips EUROPA V3 reference design
+ - 1131:2004
+
+ * - 70
+ - Compro Videomate DVB-T300
+ - 185b:c900
+
+ * - 71
+ - Compro Videomate DVB-T200
+ - 185b:c901
+
+ * - 72
+ - RTD Embedded Technologies VFG7350
+ - 1435:7350
+
+ * - 73
+ - RTD Embedded Technologies VFG7330
+ - 1435:7330
+
+ * - 74
+ - LifeView FlyTV Platinum Mini2
+ - 14c0:1212
+
+ * - 75
+ - AVerMedia AVerTVHD MCE A180
+ - 1461:1044
+
+ * - 76
+ - SKNet MonsterTV Mobile
+ - 1131:4ee9
+
+ * - 77
+ - Pinnacle PCTV 40i/50i/110i (saa7133)
+ - 11bd:002e
+
+ * - 78
+ - ASUSTeK P7131 Dual
+ - 1043:4862
+
+ * - 79
+ - Sedna/MuchTV PC TV Cardbus TV/Radio (ITO25 Rev:2B)
+ -
+
+ * - 80
+ - ASUS Digimatrix TV
+ - 1043:0210
+
+ * - 81
+ - Philips Tiger reference design
+ - 1131:2018
+
+ * - 82
+ - MSI TV@Anywhere plus
+ - 1462:6231, 1462:8624
+
+ * - 83
+ - Terratec Cinergy 250 PCI TV
+ - 153b:1160
+
+ * - 84
+ - LifeView FlyDVB Trio
+ - 5168:0319
+
+ * - 85
+ - AverTV DVB-T 777
+ - 1461:2c05, 1461:2c05
+
+ * - 86
+ - LifeView FlyDVB-T / Genius VideoWonder DVB-T
+ - 5168:0301, 1489:0301
+
+ * - 87
+ - ADS Instant TV Duo Cardbus PTV331
+ - 0331:1421
+
+ * - 88
+ - Tevion/KWorld DVB-T 220RF
+ - 17de:7201
+
+ * - 89
+ - ELSA EX-VISION 700TV
+ - 1048:226c
+
+ * - 90
+ - Kworld ATSC110/115
+ - 17de:7350, 17de:7352
+
+ * - 91
+ - AVerMedia A169 B
+ - 1461:7360
+
+ * - 92
+ - AVerMedia A169 B1
+ - 1461:6360
+
+ * - 93
+ - Medion 7134 Bridge #2
+ - 16be:0005
+
+ * - 94
+ - LifeView FlyDVB-T Hybrid Cardbus/MSI TV @nywhere A/D NB
+ - 5168:3306, 5168:3502, 5168:3307, 4e42:3502
+
+ * - 95
+ - LifeView FlyVIDEO3000 (NTSC)
+ - 5169:0138
+
+ * - 96
+ - Medion Md8800 Quadro
+ - 16be:0007, 16be:0008, 16be:000d
+
+ * - 97
+ - LifeView FlyDVB-S /Acorp TV134DS
+ - 5168:0300, 4e42:0300
+
+ * - 98
+ - Proteus Pro 2309
+ - 0919:2003
+
+ * - 99
+ - AVerMedia TV Hybrid A16AR
+ - 1461:2c00
+
+ * - 100
+ - Asus Europa2 OEM
+ - 1043:4860
+
+ * - 101
+ - Pinnacle PCTV 310i
+ - 11bd:002f
+
+ * - 102
+ - Avermedia AVerTV Studio 507
+ - 1461:9715
+
+ * - 103
+ - Compro Videomate DVB-T200A
+ -
+
+ * - 104
+ - Hauppauge WinTV-HVR1110 DVB-T/Hybrid
+ - 0070:6700, 0070:6701, 0070:6702, 0070:6703, 0070:6704, 0070:6705
+
+ * - 105
+ - Terratec Cinergy HT PCMCIA
+ - 153b:1172
+
+ * - 106
+ - Encore ENLTV
+ - 1131:2342, 1131:2341, 3016:2344
+
+ * - 107
+ - Encore ENLTV-FM
+ - 1131:230f
+
+ * - 108
+ - Terratec Cinergy HT PCI
+ - 153b:1175
+
+ * - 109
+ - Philips Tiger - S Reference design
+ -
+
+ * - 110
+ - Avermedia M102
+ - 1461:f31e
+
+ * - 111
+ - ASUS P7131 4871
+ - 1043:4871
+
+ * - 112
+ - ASUSTeK P7131 Hybrid
+ - 1043:4876
+
+ * - 113
+ - Elitegroup ECS TVP3XP FM1246 Tuner Card (PAL,FM)
+ - 1019:4cb6
+
+ * - 114
+ - KWorld DVB-T 210
+ - 17de:7250
+
+ * - 115
+ - Sabrent PCMCIA TV-PCB05
+ - 0919:2003
+
+ * - 116
+ - 10MOONS TM300 TV Card
+ - 1131:2304
+
+ * - 117
+ - Avermedia Super 007
+ - 1461:f01d
+
+ * - 118
+ - Beholder BeholdTV 401
+ - 0000:4016
+
+ * - 119
+ - Beholder BeholdTV 403
+ - 0000:4036
+
+ * - 120
+ - Beholder BeholdTV 403 FM
+ - 0000:4037
+
+ * - 121
+ - Beholder BeholdTV 405
+ - 0000:4050
+
+ * - 122
+ - Beholder BeholdTV 405 FM
+ - 0000:4051
+
+ * - 123
+ - Beholder BeholdTV 407
+ - 0000:4070
+
+ * - 124
+ - Beholder BeholdTV 407 FM
+ - 0000:4071
+
+ * - 125
+ - Beholder BeholdTV 409
+ - 0000:4090
+
+ * - 126
+ - Beholder BeholdTV 505 FM
+ - 5ace:5050
+
+ * - 127
+ - Beholder BeholdTV 507 FM / BeholdTV 509 FM
+ - 5ace:5070, 5ace:5090
+
+ * - 128
+ - Beholder BeholdTV Columbus TV/FM
+ - 0000:5201
+
+ * - 129
+ - Beholder BeholdTV 607 FM
+ - 5ace:6070
+
+ * - 130
+ - Beholder BeholdTV M6
+ - 5ace:6190
+
+ * - 131
+ - Twinhan Hybrid DTV-DVB 3056 PCI
+ - 1822:0022
+
+ * - 132
+ - Genius TVGO AM11MCE
+ -
+
+ * - 133
+ - NXP Snake DVB-S reference design
+ -
+
+ * - 134
+ - Medion/Creatix CTX953 Hybrid
+ - 16be:0010
+
+ * - 135
+ - MSI TV@nywhere A/D v1.1
+ - 1462:8625
+
+ * - 136
+ - AVerMedia Cardbus TV/Radio (E506R)
+ - 1461:f436
+
+ * - 137
+ - AVerMedia Hybrid TV/Radio (A16D)
+ - 1461:f936
+
+ * - 138
+ - Avermedia M115
+ - 1461:a836
+
+ * - 139
+ - Compro VideoMate T750
+ - 185b:c900
+
+ * - 140
+ - Avermedia DVB-S Pro A700
+ - 1461:a7a1
+
+ * - 141
+ - Avermedia DVB-S Hybrid+FM A700
+ - 1461:a7a2
+
+ * - 142
+ - Beholder BeholdTV H6
+ - 5ace:6290
+
+ * - 143
+ - Beholder BeholdTV M63
+ - 5ace:6191
+
+ * - 144
+ - Beholder BeholdTV M6 Extra
+ - 5ace:6193
+
+ * - 145
+ - AVerMedia MiniPCI DVB-T Hybrid M103
+ - 1461:f636, 1461:f736
+
+ * - 146
+ - ASUSTeK P7131 Analog
+ -
+
+ * - 147
+ - Asus Tiger 3in1
+ - 1043:4878
+
+ * - 148
+ - Encore ENLTV-FM v5.3
+ - 1a7f:2008
+
+ * - 149
+ - Avermedia PCI pure analog (M135A)
+ - 1461:f11d
+
+ * - 150
+ - Zogis Real Angel 220
+ -
+
+ * - 151
+ - ADS Tech Instant HDTV
+ - 1421:0380
+
+ * - 152
+ - Asus Tiger Rev:1.00
+ - 1043:4857
+
+ * - 153
+ - Kworld Plus TV Analog Lite PCI
+ - 17de:7128
+
+ * - 154
+ - Avermedia AVerTV GO 007 FM Plus
+ - 1461:f31d
+
+ * - 155
+ - Hauppauge WinTV-HVR1150 ATSC/QAM-Hybrid
+ - 0070:6706, 0070:6708
+
+ * - 156
+ - Hauppauge WinTV-HVR1120 DVB-T/Hybrid
+ - 0070:6707, 0070:6709, 0070:670a
+
+ * - 157
+ - Avermedia AVerTV Studio 507UA
+ - 1461:a11b
+
+ * - 158
+ - AVerMedia Cardbus TV/Radio (E501R)
+ - 1461:b7e9
+
+ * - 159
+ - Beholder BeholdTV 505 RDS
+ - 0000:505B
+
+ * - 160
+ - Beholder BeholdTV 507 RDS
+ - 0000:5071
+
+ * - 161
+ - Beholder BeholdTV 507 RDS
+ - 0000:507B
+
+ * - 162
+ - Beholder BeholdTV 607 FM
+ - 5ace:6071
+
+ * - 163
+ - Beholder BeholdTV 609 FM
+ - 5ace:6090
+
+ * - 164
+ - Beholder BeholdTV 609 FM
+ - 5ace:6091
+
+ * - 165
+ - Beholder BeholdTV 607 RDS
+ - 5ace:6072
+
+ * - 166
+ - Beholder BeholdTV 607 RDS
+ - 5ace:6073
+
+ * - 167
+ - Beholder BeholdTV 609 RDS
+ - 5ace:6092
+
+ * - 168
+ - Beholder BeholdTV 609 RDS
+ - 5ace:6093
+
+ * - 169
+ - Compro VideoMate S350/S300
+ - 185b:c900
+
+ * - 170
+ - AverMedia AverTV Studio 505
+ - 1461:a115
+
+ * - 171
+ - Beholder BeholdTV X7
+ - 5ace:7595
+
+ * - 172
+ - RoverMedia TV Link Pro FM
+ - 19d1:0138
+
+ * - 173
+ - Zolid Hybrid TV Tuner PCI
+ - 1131:2004
+
+ * - 174
+ - Asus Europa Hybrid OEM
+ - 1043:4847
+
+ * - 175
+ - Leadtek Winfast DTV1000S
+ - 107d:6655
+
+ * - 176
+ - Beholder BeholdTV 505 RDS
+ - 0000:5051
+
+ * - 177
+ - Hawell HW-404M7
+ -
+
+ * - 178
+ - Beholder BeholdTV H7
+ - 5ace:7190
+
+ * - 179
+ - Beholder BeholdTV A7
+ - 5ace:7090
+
+ * - 180
+ - Avermedia PCI M733A
+ - 1461:4155, 1461:4255
+
+ * - 181
+ - TechoTrend TT-budget T-3000
+ - 13c2:2804
+
+ * - 182
+ - Kworld PCI SBTVD/ISDB-T Full-Seg Hybrid
+ - 17de:b136
+
+ * - 183
+ - Compro VideoMate Vista M1F
+ - 185b:c900
+
+ * - 184
+ - Encore ENLTV-FM 3
+ - 1a7f:2108
+
+ * - 185
+ - MagicPro ProHDTV Pro2 DMB-TH/Hybrid
+ - 17de:d136
+
+ * - 186
+ - Beholder BeholdTV 501
+ - 5ace:5010
+
+ * - 187
+ - Beholder BeholdTV 503 FM
+ - 5ace:5030
+
+ * - 188
+ - Sensoray 811/911
+ - 6000:0811, 6000:0911
+
+ * - 189
+ - Kworld PC150-U
+ - 17de:a134
+
+ * - 190
+ - Asus My Cinema PS3-100
+ - 1043:48cd
+
+ * - 191
+ - Hawell HW-9004V1
+ -
+
+ * - 192
+ - AverMedia AverTV Satellite Hybrid+FM A706
+ - 1461:2055
+
+ * - 193
+ - WIS Voyager or compatible
+ - 1905:7007
+
+ * - 194
+ - AverMedia AverTV/505
+ - 1461:a10a
+
+ * - 195
+ - Leadtek Winfast TV2100 FM
+ - 107d:6f3a
+
+ * - 196
+ - SnaZio* TVPVR PRO
+ - 1779:13cf
diff --git a/Documentation/media/v4l-drivers/saa7164-cardlist.rst b/Documentation/media/v4l-drivers/saa7164-cardlist.rst
index 7d17d38df3bc..e28382ba82e6 100644
--- a/Documentation/media/v4l-drivers/saa7164-cardlist.rst
+++ b/Documentation/media/v4l-drivers/saa7164-cardlist.rst
@@ -1,21 +1,69 @@
SAA7164 cards list
==================
-=========== ==================================== ====================
-Card number Card name PCI IDs
-=========== ==================================== ====================
-0 Unknown
-1 Generic Rev2
-2 Generic Rev3
-3 Hauppauge WinTV-HVR2250 0070:8880, 0070:8810
-4 Hauppauge WinTV-HVR2200 0070:8980
-5 Hauppauge WinTV-HVR2200 0070:8900
-6 Hauppauge WinTV-HVR2200 0070:8901
-7 Hauppauge WinTV-HVR2250 0070:8891, 0070:8851
-8 Hauppauge WinTV-HVR2250 0070:88A1
-9 Hauppauge WinTV-HVR2200 0070:8940
-10 Hauppauge WinTV-HVR2200 0070:8953
-11 Hauppauge WinTV-HVR2255(proto) 0070:f111
-12 Hauppauge WinTV-HVR2255 0070:f111
-13 Hauppauge WinTV-HVR2205 0070:f123, 0070:f120
-=========== ==================================== ====================
+.. tabularcolumns:: |p{1.4cm}|p{11.1cm}|p{4.2cm}|
+
+.. flat-table::
+ :header-rows: 1
+ :widths: 2 19 18
+ :stub-columns: 0
+
+ * - Card number
+ - Card name
+ - PCI IDs
+
+ * - 0
+ - Unknown
+ -
+
+ * - 1
+ - Generic Rev2
+ -
+
+ * - 2
+ - Generic Rev3
+ -
+
+ * - 3
+ - Hauppauge WinTV-HVR2250
+ - 0070:8880, 0070:8810
+
+ * - 4
+ - Hauppauge WinTV-HVR2200
+ - 0070:8980
+
+ * - 5
+ - Hauppauge WinTV-HVR2200
+ - 0070:8900
+
+ * - 6
+ - Hauppauge WinTV-HVR2200
+ - 0070:8901
+
+ * - 7
+ - Hauppauge WinTV-HVR2250
+ - 0070:8891, 0070:8851
+
+ * - 8
+ - Hauppauge WinTV-HVR2250
+ - 0070:88A1
+
+ * - 9
+ - Hauppauge WinTV-HVR2200
+ - 0070:8940
+
+ * - 10
+ - Hauppauge WinTV-HVR2200
+ - 0070:8953
+
+ * - 11
+ - Hauppauge WinTV-HVR2255(proto)
+ - 0070:f111
+
+ * - 12
+ - Hauppauge WinTV-HVR2255
+ - 0070:f111
+
+ * - 13
+ - Hauppauge WinTV-HVR2205
+ - 0070:f123, 0070:f120
diff --git a/Documentation/media/v4l-drivers/tm6000-cardlist.rst b/Documentation/media/v4l-drivers/tm6000-cardlist.rst
index ae2952683ccf..6bd083544457 100644
--- a/Documentation/media/v4l-drivers/tm6000-cardlist.rst
+++ b/Documentation/media/v4l-drivers/tm6000-cardlist.rst
@@ -1,24 +1,81 @@
TM6000 cards list
=================
-=========== ================================================= ==========================================
-Card number Card name USB IDs
-=========== ================================================= ==========================================
-0 Unknown tm6000 video grabber
-1 Generic tm5600 board 6000:0001
-2 Generic tm6000 board
-3 Generic tm6010 board 6000:0002
-4 10Moons UT 821
-5 10Moons UT 330
-6 ADSTECH Dual TV USB 06e1:f332
-7 Freecom Hybrid Stick / Moka DVB-T Receiver Dual 14aa:0620
-8 ADSTECH Mini Dual TV USB 06e1:b339
-9 Hauppauge WinTV HVR-900H / WinTV USB2-Stick 2040:6600, 2040:6601, 2040:6610, 2040:6611
-10 Beholder Wander DVB-T/TV/FM USB2.0 6000:dec0
-11 Beholder Voyager TV/FM USB2.0 6000:dec1
-12 Terratec Cinergy Hybrid XE / Cinergy Hybrid-Stick 0ccd:0086, 0ccd:00A5
-13 Twinhan TU501(704D1) 13d3:3240, 13d3:3241, 13d3:3243, 13d3:3264
-14 Beholder Wander Lite DVB-T/TV/FM USB2.0 6000:dec2
-15 Beholder Voyager Lite TV/FM USB2.0 6000:dec3
-16 Terratec Grabster AV 150/250 MX 0ccd:0079
-=========== ================================================= ==========================================
+.. tabularcolumns:: |p{1.4cm}|p{11.1cm}|p{4.2cm}|
+
+.. flat-table::
+ :header-rows: 1
+ :widths: 2 19 18
+ :stub-columns: 0
+
+ * - Card number
+ - Card name
+ - USB IDs
+
+ * - 0
+ - Unknown tm6000 video grabber
+ -
+
+ * - 1
+ - Generic tm5600 board
+ - 6000:0001
+
+ * - 2
+ - Generic tm6000 board
+ -
+
+ * - 3
+ - Generic tm6010 board
+ - 6000:0002
+
+ * - 4
+ - 10Moons UT 821
+ -
+
+ * - 5
+ - 10Moons UT 330
+ -
+
+ * - 6
+ - ADSTECH Dual TV USB
+ - 06e1:f332
+
+ * - 7
+ - Freecom Hybrid Stick / Moka DVB-T Receiver Dual
+ - 14aa:0620
+
+ * - 8
+ - ADSTECH Mini Dual TV USB
+ - 06e1:b339
+
+ * - 9
+ - Hauppauge WinTV HVR-900H / WinTV USB2-Stick
+ - 2040:6600, 2040:6601, 2040:6610, 2040:6611
+
+ * - 10
+ - Beholder Wander DVB-T/TV/FM USB2.0
+ - 6000:dec0
+
+ * - 11
+ - Beholder Voyager TV/FM USB2.0
+ - 6000:dec1
+
+ * - 12
+ - Terratec Cinergy Hybrid XE / Cinergy Hybrid-Stick
+ - 0ccd:0086, 0ccd:00A5
+
+ * - 13
+ - Twinhan TU501(704D1)
+ - 13d3:3240, 13d3:3241, 13d3:3243, 13d3:3264
+
+ * - 14
+ - Beholder Wander Lite DVB-T/TV/FM USB2.0
+ - 6000:dec2
+
+ * - 15
+ - Beholder Voyager Lite TV/FM USB2.0
+ - 6000:dec3
+
+ * - 16
+ - Terratec Grabster AV 150/250 MX
+ - 0ccd:0079
diff --git a/Documentation/media/v4l-drivers/usbvision-cardlist.rst b/Documentation/media/v4l-drivers/usbvision-cardlist.rst
index 44d53dff0984..5a8ffbfc204e 100644
--- a/Documentation/media/v4l-drivers/usbvision-cardlist.rst
+++ b/Documentation/media/v4l-drivers/usbvision-cardlist.rst
@@ -1,74 +1,281 @@
USBvision cards list
====================
-=========== ======================================================== =========
-Card number Card name USB IDs
-=========== ======================================================== =========
-0 Xanboo 0a6f:0400
-1 Belkin USB VideoBus II Adapter 050d:0106
-2 Belkin Components USB VideoBus 050d:0207
-3 Belkin USB VideoBus II 050d:0208
-4 echoFX InterView Lite 0571:0002
-5 USBGear USBG-V1 resp. HAMA USB 0573:0003
-6 D-Link V100 0573:0400
-7 X10 USB Camera 0573:2000
-8 Hauppauge WinTV USB Live (PAL B/G) 0573:2d00
-9 Hauppauge WinTV USB Live Pro (NTSC M/N) 0573:2d01
-10 Zoran Co. PMD (Nogatech) AV-grabber Manhattan 0573:2101
-11 Nogatech USB-TV (NTSC) FM 0573:4100
-12 PNY USB-TV (NTSC) FM 0573:4110
-13 PixelView PlayTv-USB PRO (PAL) FM 0573:4450
-14 ZTV ZT-721 2.4GHz USB A/V Receiver 0573:4550
-15 Hauppauge WinTV USB (NTSC M/N) 0573:4d00
-16 Hauppauge WinTV USB (PAL B/G) 0573:4d01
-17 Hauppauge WinTV USB (PAL I) 0573:4d02
-18 Hauppauge WinTV USB (PAL/SECAM L) 0573:4d03
-19 Hauppauge WinTV USB (PAL D/K) 0573:4d04
-20 Hauppauge WinTV USB (NTSC FM) 0573:4d10
-21 Hauppauge WinTV USB (PAL B/G FM) 0573:4d11
-22 Hauppauge WinTV USB (PAL I FM) 0573:4d12
-23 Hauppauge WinTV USB (PAL D/K FM) 0573:4d14
-24 Hauppauge WinTV USB Pro (NTSC M/N) 0573:4d2a
-25 Hauppauge WinTV USB Pro (NTSC M/N) V2 0573:4d2b
-26 Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L) 0573:4d2c
-27 Hauppauge WinTV USB Pro (NTSC M/N) V3 0573:4d20
-28 Hauppauge WinTV USB Pro (PAL B/G) 0573:4d21
-29 Hauppauge WinTV USB Pro (PAL I) 0573:4d22
-30 Hauppauge WinTV USB Pro (PAL/SECAM L) 0573:4d23
-31 Hauppauge WinTV USB Pro (PAL D/K) 0573:4d24
-32 Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L) 0573:4d25
-33 Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L) V2 0573:4d26
-34 Hauppauge WinTV USB Pro (PAL B/G) V2 0573:4d27
-35 Hauppauge WinTV USB Pro (PAL B/G,D/K) 0573:4d28
-36 Hauppauge WinTV USB Pro (PAL I,D/K) 0573:4d29
-37 Hauppauge WinTV USB Pro (NTSC M/N FM) 0573:4d30
-38 Hauppauge WinTV USB Pro (PAL B/G FM) 0573:4d31
-39 Hauppauge WinTV USB Pro (PAL I FM) 0573:4d32
-40 Hauppauge WinTV USB Pro (PAL D/K FM) 0573:4d34
-41 Hauppauge WinTV USB Pro (Temic PAL/SECAM B/G/I/D/K/L FM) 0573:4d35
-42 Hauppauge WinTV USB Pro (Temic PAL B/G FM) 0573:4d36
-43 Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L FM) 0573:4d37
-44 Hauppauge WinTV USB Pro (NTSC M/N FM) V2 0573:4d38
-45 Camtel Technology USB TV Genie Pro FM Model TVB330 0768:0006
-46 Digital Video Creator I 07d0:0001
-47 Global Village GV-007 (NTSC) 07d0:0002
-48 Dazzle Fusion Model DVC-50 Rev 1 (NTSC) 07d0:0003
-49 Dazzle Fusion Model DVC-80 Rev 1 (PAL) 07d0:0004
-50 Dazzle Fusion Model DVC-90 Rev 1 (SECAM) 07d0:0005
-51 Eskape Labs MyTV2Go 07f8:9104
-52 Pinnacle Studio PCTV USB (PAL) 2304:010d
-53 Pinnacle Studio PCTV USB (SECAM) 2304:0109
-54 Pinnacle Studio PCTV USB (PAL) FM 2304:0110
-55 Miro PCTV USB 2304:0111
-56 Pinnacle Studio PCTV USB (NTSC) FM 2304:0112
-57 Pinnacle Studio PCTV USB (PAL) FM V2 2304:0210
-58 Pinnacle Studio PCTV USB (NTSC) FM V2 2304:0212
-59 Pinnacle Studio PCTV USB (PAL) FM V3 2304:0214
-60 Pinnacle Studio Linx Video input cable (NTSC) 2304:0300
-61 Pinnacle Studio Linx Video input cable (PAL) 2304:0301
-62 Pinnacle PCTV Bungee USB (PAL) FM 2304:0419
-63 Hauppauge WinTv-USB 2400:4200
-64 Pinnacle Studio PCTV USB (NTSC) FM V3 2304:0113
-65 Nogatech USB MicroCam NTSC (NV3000N) 0573:3000
-66 Nogatech USB MicroCam PAL (NV3001P) 0573:3001
-=========== ======================================================== =========
+.. tabularcolumns:: |p{1.4cm}|p{11.1cm}|p{4.2cm}|
+
+.. flat-table::
+ :header-rows: 1
+ :widths: 2 19 18
+ :stub-columns: 0
+
+ * - Card number
+ - Card name
+ - USB IDs
+
+ * - 0
+ - Xanboo
+ - 0a6f:0400
+
+ * - 1
+ - Belkin USB VideoBus II Adapter
+ - 050d:0106
+
+ * - 2
+ - Belkin Components USB VideoBus
+ - 050d:0207
+
+ * - 3
+ - Belkin USB VideoBus II
+ - 050d:0208
+
+ * - 4
+ - echoFX InterView Lite
+ - 0571:0002
+
+ * - 5
+ - USBGear USBG-V1 resp. HAMA USB
+ - 0573:0003
+
+ * - 6
+ - D-Link V100
+ - 0573:0400
+
+ * - 7
+ - X10 USB Camera
+ - 0573:2000
+
+ * - 8
+ - Hauppauge WinTV USB Live (PAL B/G)
+ - 0573:2d00
+
+ * - 9
+ - Hauppauge WinTV USB Live Pro (NTSC M/N)
+ - 0573:2d01
+
+ * - 10
+ - Zoran Co. PMD (Nogatech) AV-grabber Manhattan
+ - 0573:2101
+
+ * - 11
+ - Nogatech USB-TV (NTSC) FM
+ - 0573:4100
+
+ * - 12
+ - PNY USB-TV (NTSC) FM
+ - 0573:4110
+
+ * - 13
+ - PixelView PlayTv-USB PRO (PAL) FM
+ - 0573:4450
+
+ * - 14
+ - ZTV ZT-721 2.4GHz USB A/V Receiver
+ - 0573:4550
+
+ * - 15
+ - Hauppauge WinTV USB (NTSC M/N)
+ - 0573:4d00
+
+ * - 16
+ - Hauppauge WinTV USB (PAL B/G)
+ - 0573:4d01
+
+ * - 17
+ - Hauppauge WinTV USB (PAL I)
+ - 0573:4d02
+
+ * - 18
+ - Hauppauge WinTV USB (PAL/SECAM L)
+ - 0573:4d03
+
+ * - 19
+ - Hauppauge WinTV USB (PAL D/K)
+ - 0573:4d04
+
+ * - 20
+ - Hauppauge WinTV USB (NTSC FM)
+ - 0573:4d10
+
+ * - 21
+ - Hauppauge WinTV USB (PAL B/G FM)
+ - 0573:4d11
+
+ * - 22
+ - Hauppauge WinTV USB (PAL I FM)
+ - 0573:4d12
+
+ * - 23
+ - Hauppauge WinTV USB (PAL D/K FM)
+ - 0573:4d14
+
+ * - 24
+ - Hauppauge WinTV USB Pro (NTSC M/N)
+ - 0573:4d2a
+
+ * - 25
+ - Hauppauge WinTV USB Pro (NTSC M/N) V2
+ - 0573:4d2b
+
+ * - 26
+ - Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L)
+ - 0573:4d2c
+
+ * - 27
+ - Hauppauge WinTV USB Pro (NTSC M/N) V3
+ - 0573:4d20
+
+ * - 28
+ - Hauppauge WinTV USB Pro (PAL B/G)
+ - 0573:4d21
+
+ * - 29
+ - Hauppauge WinTV USB Pro (PAL I)
+ - 0573:4d22
+
+ * - 30
+ - Hauppauge WinTV USB Pro (PAL/SECAM L)
+ - 0573:4d23
+
+ * - 31
+ - Hauppauge WinTV USB Pro (PAL D/K)
+ - 0573:4d24
+
+ * - 32
+ - Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L)
+ - 0573:4d25
+
+ * - 33
+ - Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L) V2
+ - 0573:4d26
+
+ * - 34
+ - Hauppauge WinTV USB Pro (PAL B/G) V2
+ - 0573:4d27
+
+ * - 35
+ - Hauppauge WinTV USB Pro (PAL B/G,D/K)
+ - 0573:4d28
+
+ * - 36
+ - Hauppauge WinTV USB Pro (PAL I,D/K)
+ - 0573:4d29
+
+ * - 37
+ - Hauppauge WinTV USB Pro (NTSC M/N FM)
+ - 0573:4d30
+
+ * - 38
+ - Hauppauge WinTV USB Pro (PAL B/G FM)
+ - 0573:4d31
+
+ * - 39
+ - Hauppauge WinTV USB Pro (PAL I FM)
+ - 0573:4d32
+
+ * - 40
+ - Hauppauge WinTV USB Pro (PAL D/K FM)
+ - 0573:4d34
+
+ * - 41
+ - Hauppauge WinTV USB Pro (Temic PAL/SECAM B/G/I/D/K/L FM)
+ - 0573:4d35
+
+ * - 42
+ - Hauppauge WinTV USB Pro (Temic PAL B/G FM)
+ - 0573:4d36
+
+ * - 43
+ - Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L FM)
+ - 0573:4d37
+
+ * - 44
+ - Hauppauge WinTV USB Pro (NTSC M/N FM) V2
+ - 0573:4d38
+
+ * - 45
+ - Camtel Technology USB TV Genie Pro FM Model TVB330
+ - 0768:0006
+
+ * - 46
+ - Digital Video Creator I
+ - 07d0:0001
+
+ * - 47
+ - Global Village GV-007 (NTSC)
+ - 07d0:0002
+
+ * - 48
+ - Dazzle Fusion Model DVC-50 Rev 1 (NTSC)
+ - 07d0:0003
+
+ * - 49
+ - Dazzle Fusion Model DVC-80 Rev 1 (PAL)
+ - 07d0:0004
+
+ * - 50
+ - Dazzle Fusion Model DVC-90 Rev 1 (SECAM)
+ - 07d0:0005
+
+ * - 51
+ - Eskape Labs MyTV2Go
+ - 07f8:9104
+
+ * - 52
+ - Pinnacle Studio PCTV USB (PAL)
+ - 2304:010d
+
+ * - 53
+ - Pinnacle Studio PCTV USB (SECAM)
+ - 2304:0109
+
+ * - 54
+ - Pinnacle Studio PCTV USB (PAL) FM
+ - 2304:0110
+
+ * - 55
+ - Miro PCTV USB
+ - 2304:0111
+
+ * - 56
+ - Pinnacle Studio PCTV USB (NTSC) FM
+ - 2304:0112
+
+ * - 57
+ - Pinnacle Studio PCTV USB (PAL) FM V2
+ - 2304:0210
+
+ * - 58
+ - Pinnacle Studio PCTV USB (NTSC) FM V2
+ - 2304:0212
+
+ * - 59
+ - Pinnacle Studio PCTV USB (PAL) FM V3
+ - 2304:0214
+
+ * - 60
+ - Pinnacle Studio Linx Video input cable (NTSC)
+ - 2304:0300
+
+ * - 61
+ - Pinnacle Studio Linx Video input cable (PAL)
+ - 2304:0301
+
+ * - 62
+ - Pinnacle PCTV Bungee USB (PAL) FM
+ - 2304:0419
+
+ * - 63
+ - Hauppauge WinTv-USB
+ - 2400:4200
+
+ * - 64
+ - Pinnacle Studio PCTV USB (NTSC) FM V3
+ - 2304:0113
+
+ * - 65
+ - Nogatech USB MicroCam NTSC (NV3000N)
+ - 0573:3000
+
+ * - 66
+ - Nogatech USB MicroCam PAL (NV3001P)
+ - 0573:3001
diff --git a/Documentation/media/v4l-drivers/vivid.rst b/Documentation/media/v4l-drivers/vivid.rst
index 3e44b2217f2d..089595ce11c5 100644
--- a/Documentation/media/v4l-drivers/vivid.rst
+++ b/Documentation/media/v4l-drivers/vivid.rst
@@ -829,6 +829,7 @@ The following two controls are only valid for video and vbi capture.
The following two controls are only valid for video capture.
- DV Timings Signal Mode:
+
selects the behavior of VIDIOC_QUERY_DV_TIMINGS: what
should it return?
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index c4ddfcd5ee32..b759a60624fd 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -498,11 +498,11 @@ And a couple of implicit varieties:
This means that ACQUIRE acts as a minimal "acquire" operation and
RELEASE acts as a minimal "release" operation.
-A subset of the atomic operations described in core-api/atomic_ops.rst have
-ACQUIRE and RELEASE variants in addition to fully-ordered and relaxed (no
-barrier semantics) definitions. For compound atomics performing both a load
-and a store, ACQUIRE semantics apply only to the load and RELEASE semantics
-apply only to the store portion of the operation.
+A subset of the atomic operations described in atomic_t.txt have ACQUIRE and
+RELEASE variants in addition to fully-ordered and relaxed (no barrier
+semantics) definitions. For compound atomics performing both a load and a
+store, ACQUIRE semantics apply only to the load and RELEASE semantics apply
+only to the store portion of the operation.
Memory barriers are only required where there's a possibility of interaction
between two CPUs or between a CPU and a device. If it can be guaranteed that
@@ -594,7 +594,24 @@ between the address load and the data load:
This enforces the occurrence of one of the two implications, and prevents the
third possibility from arising.
-A data-dependency barrier must also order against dependent writes:
+
+[!] Note that this extremely counterintuitive situation arises most easily on
+machines with split caches, so that, for example, one cache bank processes
+even-numbered cache lines and the other bank processes odd-numbered cache
+lines. The pointer P might be stored in an odd-numbered cache line, and the
+variable B might be stored in an even-numbered cache line. Then, if the
+even-numbered bank of the reading CPU's cache is extremely busy while the
+odd-numbered bank is idle, one can see the new value of the pointer P (&B),
+but the old value of the variable B (2).
+
+
+A data-dependency barrier is not required to order dependent writes
+because the CPUs that the Linux kernel supports don't do writes
+until they are certain (1) that the write will actually happen, (2)
+of the location of the write, and (3) of the value to be written.
+But please carefully read the "CONTROL DEPENDENCIES" section and the
+Documentation/RCU/rcu_dereference.txt file: The compiler can and does
+break dependencies in a great many highly creative ways.
CPU 1 CPU 2
=============== ===============
@@ -603,29 +620,19 @@ A data-dependency barrier must also order against dependent writes:
<write barrier>
WRITE_ONCE(P, &B);
Q = READ_ONCE(P);
- <data dependency barrier>
- *Q = 5;
+ WRITE_ONCE(*Q, 5);
-The data-dependency barrier must order the read into Q with the store
-into *Q. This prohibits this outcome:
+Therefore, no data-dependency barrier is required to order the read into
+Q with the store into *Q. In other words, this outcome is prohibited,
+even without a data-dependency barrier:
(Q == &B) && (B == 4)
Please note that this pattern should be rare. After all, the whole point
of dependency ordering is to -prevent- writes to the data structure, along
with the expensive cache misses associated with those writes. This pattern
-can be used to record rare error conditions and the like, and the ordering
-prevents such records from being lost.
-
-
-[!] Note that this extremely counterintuitive situation arises most easily on
-machines with split caches, so that, for example, one cache bank processes
-even-numbered cache lines and the other bank processes odd-numbered cache
-lines. The pointer P might be stored in an odd-numbered cache line, and the
-variable B might be stored in an even-numbered cache line. Then, if the
-even-numbered bank of the reading CPU's cache is extremely busy while the
-odd-numbered bank is idle, one can see the new value of the pointer P (&B),
-but the old value of the variable B (2).
+can be used to record rare error conditions and the like, and the CPUs'
+naturally occurring ordering prevents such records from being lost.
The data dependency barrier is very important to the RCU system,
@@ -1876,8 +1883,7 @@ There are some more advanced barrier functions:
This makes sure that the death mark on the object is perceived to be set
*before* the reference counter is decremented.
- See Documentation/core-api/atomic_ops.rst for more information. See the
- "Atomic operations" subsection for information on where to use these.
+ See Documentation/atomic_{t,bitops}.txt for more information.
(*) lockless_dereference();
@@ -1982,10 +1988,7 @@ for each construct. These operations all imply certain barriers:
ACQUIRE operation has completed.
Memory operations issued before the ACQUIRE may be completed after
- the ACQUIRE operation has completed. An smp_mb__before_spinlock(),
- combined with a following ACQUIRE, orders prior stores against
- subsequent loads and stores. Note that this is weaker than smp_mb()!
- The smp_mb__before_spinlock() primitive is free on many architectures.
+ the ACQUIRE operation has completed.
(2) RELEASE operation implication:
@@ -2503,88 +2506,7 @@ operations are noted specially as some of them imply full memory barriers and
some don't, but they're very heavily relied on as a group throughout the
kernel.
-Any atomic operation that modifies some state in memory and returns information
-about the state (old or new) implies an SMP-conditional general memory barrier
-(smp_mb()) on each side of the actual operation (with the exception of
-explicit lock operations, described later). These include:
-
- xchg();
- atomic_xchg(); atomic_long_xchg();
- atomic_inc_return(); atomic_long_inc_return();
- atomic_dec_return(); atomic_long_dec_return();
- atomic_add_return(); atomic_long_add_return();
- atomic_sub_return(); atomic_long_sub_return();
- atomic_inc_and_test(); atomic_long_inc_and_test();
- atomic_dec_and_test(); atomic_long_dec_and_test();
- atomic_sub_and_test(); atomic_long_sub_and_test();
- atomic_add_negative(); atomic_long_add_negative();
- test_and_set_bit();
- test_and_clear_bit();
- test_and_change_bit();
-
- /* when succeeds */
- cmpxchg();
- atomic_cmpxchg(); atomic_long_cmpxchg();
- atomic_add_unless(); atomic_long_add_unless();
-
-These are used for such things as implementing ACQUIRE-class and RELEASE-class
-operations and adjusting reference counters towards object destruction, and as
-such the implicit memory barrier effects are necessary.
-
-
-The following operations are potential problems as they do _not_ imply memory
-barriers, but might be used for implementing such things as RELEASE-class
-operations:
-
- atomic_set();
- set_bit();
- clear_bit();
- change_bit();
-
-With these the appropriate explicit memory barrier should be used if necessary
-(smp_mb__before_atomic() for instance).
-
-
-The following also do _not_ imply memory barriers, and so may require explicit
-memory barriers under some circumstances (smp_mb__before_atomic() for
-instance):
-
- atomic_add();
- atomic_sub();
- atomic_inc();
- atomic_dec();
-
-If they're used for statistics generation, then they probably don't need memory
-barriers, unless there's a coupling between statistical data.
-
-If they're used for reference counting on an object to control its lifetime,
-they probably don't need memory barriers because either the reference count
-will be adjusted inside a locked section, or the caller will already hold
-sufficient references to make the lock, and thus a memory barrier unnecessary.
-
-If they're used for constructing a lock of some description, then they probably
-do need memory barriers as a lock primitive generally has to do things in a
-specific order.
-
-Basically, each usage case has to be carefully considered as to whether memory
-barriers are needed or not.
-
-The following operations are special locking primitives:
-
- test_and_set_bit_lock();
- clear_bit_unlock();
- __clear_bit_unlock();
-
-These implement ACQUIRE-class and RELEASE-class operations. These should be
-used in preference to other operations when implementing locking primitives,
-because their implementations can be optimised on many architectures.
-
-[!] Note that special memory barrier primitives are available for these
-situations because on some CPUs the atomic instructions used imply full memory
-barriers, and so barrier instructions are superfluous in conjunction with them,
-and in such cases the special barrier primitives will be no-ops.
-
-See Documentation/core-api/atomic_ops.rst for more information.
+See Documentation/atomic_t.txt for more information.
ACCESSING DEVICES
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index c6beb5f1637f..7a79b3587dd3 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -30,8 +30,6 @@ atm.txt
- info on where to get ATM programs and support for Linux.
ax25.txt
- info on using AX.25 and NET/ROM code for Linux
-batman-adv.txt
- - B.A.T.M.A.N routing protocol on top of layer 2 Ethernet Frames.
baycom.txt
- info on the driver for Baycom style amateur radio modems
bonding.txt
diff --git a/Documentation/networking/batman-adv.rst b/Documentation/networking/batman-adv.rst
new file mode 100644
index 000000000000..a342b2cc3dc6
--- /dev/null
+++ b/Documentation/networking/batman-adv.rst
@@ -0,0 +1,220 @@
+==========
+batman-adv
+==========
+
+Batman advanced is a new approach to wireless networking which does no longer
+operate on the IP basis. Unlike the batman daemon, which exchanges information
+using UDP packets and sets routing tables, batman-advanced operates on ISO/OSI
+Layer 2 only and uses and routes (or better: bridges) Ethernet Frames. It
+emulates a virtual network switch of all nodes participating. Therefore all
+nodes appear to be link local, thus all higher operating protocols won't be
+affected by any changes within the network. You can run almost any protocol
+above batman advanced, prominent examples are: IPv4, IPv6, DHCP, IPX.
+
+Batman advanced was implemented as a Linux kernel driver to reduce the overhead
+to a minimum. It does not depend on any (other) network driver, and can be used
+on wifi as well as ethernet lan, vpn, etc ... (anything with ethernet-style
+layer 2).
+
+
+Configuration
+=============
+
+Load the batman-adv module into your kernel::
+
+ $ insmod batman-adv.ko
+
+The module is now waiting for activation. You must add some interfaces on which
+batman can operate. After loading the module batman advanced will scan your
+systems interfaces to search for compatible interfaces. Once found, it will
+create subfolders in the ``/sys`` directories of each supported interface,
+e.g.::
+
+ $ ls /sys/class/net/eth0/batman_adv/
+ elp_interval iface_status mesh_iface throughput_override
+
+If an interface does not have the ``batman_adv`` subfolder, it probably is not
+supported. Not supported interfaces are: loopback, non-ethernet and batman's
+own interfaces.
+
+Note: After the module was loaded it will continuously watch for new
+interfaces to verify the compatibility. There is no need to reload the module
+if you plug your USB wifi adapter into your machine after batman advanced was
+initially loaded.
+
+The batman-adv soft-interface can be created using the iproute2 tool ``ip``::
+
+ $ ip link add name bat0 type batadv
+
+To activate a given interface simply attach it to the ``bat0`` interface::
+
+ $ ip link set dev eth0 master bat0
+
+Repeat this step for all interfaces you wish to add. Now batman starts
+using/broadcasting on this/these interface(s).
+
+By reading the "iface_status" file you can check its status::
+
+ $ cat /sys/class/net/eth0/batman_adv/iface_status
+ active
+
+To deactivate an interface you have to detach it from the "bat0" interface::
+
+ $ ip link set dev eth0 nomaster
+
+
+All mesh wide settings can be found in batman's own interface folder::
+
+ $ ls /sys/class/net/bat0/mesh/
+ aggregated_ogms fragmentation isolation_mark routing_algo
+ ap_isolation gw_bandwidth log_level vlan0
+ bonding gw_mode multicast_mode
+ bridge_loop_avoidance gw_sel_class network_coding
+ distributed_arp_table hop_penalty orig_interval
+
+There is a special folder for debugging information::
+
+ $ ls /sys/kernel/debug/batman_adv/bat0/
+ bla_backbone_table log neighbors transtable_local
+ bla_claim_table mcast_flags originators
+ dat_cache nc socket
+ gateways nc_nodes transtable_global
+
+Some of the files contain all sort of status information regarding the mesh
+network. For example, you can view the table of originators (mesh
+participants) with::
+
+ $ cat /sys/kernel/debug/batman_adv/bat0/originators
+
+Other files allow to change batman's behaviour to better fit your requirements.
+For instance, you can check the current originator interval (value in
+milliseconds which determines how often batman sends its broadcast packets)::
+
+ $ cat /sys/class/net/bat0/mesh/orig_interval
+ 1000
+
+and also change its value::
+
+ $ echo 3000 > /sys/class/net/bat0/mesh/orig_interval
+
+In very mobile scenarios, you might want to adjust the originator interval to a
+lower value. This will make the mesh more responsive to topology changes, but
+will also increase the overhead.
+
+
+Usage
+=====
+
+To make use of your newly created mesh, batman advanced provides a new
+interface "bat0" which you should use from this point on. All interfaces added
+to batman advanced are not relevant any longer because batman handles them for
+you. Basically, one "hands over" the data by using the batman interface and
+batman will make sure it reaches its destination.
+
+The "bat0" interface can be used like any other regular interface. It needs an
+IP address which can be either statically configured or dynamically (by using
+DHCP or similar services)::
+
+ NodeA: ip link set up dev bat0
+ NodeA: ip addr add 192.168.0.1/24 dev bat0
+
+ NodeB: ip link set up dev bat0
+ NodeB: ip addr add 192.168.0.2/24 dev bat0
+ NodeB: ping 192.168.0.1
+
+Note: In order to avoid problems remove all IP addresses previously assigned to
+interfaces now used by batman advanced, e.g.::
+
+ $ ip addr flush dev eth0
+
+
+Logging/Debugging
+=================
+
+All error messages, warnings and information messages are sent to the kernel
+log. Depending on your operating system distribution this can be read in one of
+a number of ways. Try using the commands: ``dmesg``, ``logread``, or looking in
+the files ``/var/log/kern.log`` or ``/var/log/syslog``. All batman-adv messages
+are prefixed with "batman-adv:" So to see just these messages try::
+
+ $ dmesg | grep batman-adv
+
+When investigating problems with your mesh network, it is sometimes necessary to
+see more detail debug messages. This must be enabled when compiling the
+batman-adv module. When building batman-adv as part of kernel, use "make
+menuconfig" and enable the option ``B.A.T.M.A.N. debugging``
+(``CONFIG_BATMAN_ADV_DEBUG=y``).
+
+Those additional debug messages can be accessed using a special file in
+debugfs::
+
+ $ cat /sys/kernel/debug/batman_adv/bat0/log
+
+The additional debug output is by default disabled. It can be enabled during
+run time. Following log_levels are defined:
+
+.. flat-table::
+
+ * - 0
+ - All debug output disabled
+ * - 1
+ - Enable messages related to routing / flooding / broadcasting
+ * - 2
+ - Enable messages related to route added / changed / deleted
+ * - 4
+ - Enable messages related to translation table operations
+ * - 8
+ - Enable messages related to bridge loop avoidance
+ * - 16
+ - Enable messages related to DAT, ARP snooping and parsing
+ * - 32
+ - Enable messages related to network coding
+ * - 64
+ - Enable messages related to multicast
+ * - 128
+ - Enable messages related to throughput meter
+ * - 255
+ - Enable all messages
+
+The debug output can be changed at runtime using the file
+``/sys/class/net/bat0/mesh/log_level``. e.g.::
+
+ $ echo 6 > /sys/class/net/bat0/mesh/log_level
+
+will enable debug messages for when routes change.
+
+Counters for different types of packets entering and leaving the batman-adv
+module are available through ethtool::
+
+ $ ethtool --statistics bat0
+
+
+batctl
+======
+
+As batman advanced operates on layer 2, all hosts participating in the virtual
+switch are completely transparent for all protocols above layer 2. Therefore
+the common diagnosis tools do not work as expected. To overcome these problems,
+batctl was created. At the moment the batctl contains ping, traceroute, tcpdump
+and interfaces to the kernel module settings.
+
+For more information, please see the manpage (``man batctl``).
+
+batctl is available on https://www.open-mesh.org/
+
+
+Contact
+=======
+
+Please send us comments, experiences, questions, anything :)
+
+IRC:
+ #batman on irc.freenode.org
+Mailing-list:
+ b.a.t.m.a.n@open-mesh.org (optional subscription at
+ https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
+
+You can also contact the Authors:
+
+* Marek Lindner <mareklindner@neomailbox.ch>
+* Simon Wunderlich <sw@simonwunderlich.de>
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
deleted file mode 100644
index ccf94677b240..000000000000
--- a/Documentation/networking/batman-adv.txt
+++ /dev/null
@@ -1,215 +0,0 @@
-BATMAN-ADV
-----------
-
-Batman advanced is a new approach to wireless networking which
-does no longer operate on the IP basis. Unlike the batman daemon,
-which exchanges information using UDP packets and sets routing
-tables, batman-advanced operates on ISO/OSI Layer 2 only and uses
-and routes (or better: bridges) Ethernet Frames. It emulates a
-virtual network switch of all nodes participating. Therefore all
-nodes appear to be link local, thus all higher operating proto-
-cols won't be affected by any changes within the network. You can
-run almost any protocol above batman advanced, prominent examples
-are: IPv4, IPv6, DHCP, IPX.
-
-Batman advanced was implemented as a Linux kernel driver to re-
-duce the overhead to a minimum. It does not depend on any (other)
-network driver, and can be used on wifi as well as ethernet lan,
-vpn, etc ... (anything with ethernet-style layer 2).
-
-
-CONFIGURATION
--------------
-
-Load the batman-adv module into your kernel:
-
-# insmod batman-adv.ko
-
-The module is now waiting for activation. You must add some in-
-terfaces on which batman can operate. After loading the module
-batman advanced will scan your systems interfaces to search for
-compatible interfaces. Once found, it will create subfolders in
-the /sys directories of each supported interface, e.g.
-
-# ls /sys/class/net/eth0/batman_adv/
-# elp_interval iface_status mesh_iface throughput_override
-
-If an interface does not have the "batman_adv" subfolder it prob-
-ably is not supported. Not supported interfaces are: loopback,
-non-ethernet and batman's own interfaces.
-
-Note: After the module was loaded it will continuously watch for
-new interfaces to verify the compatibility. There is no need to
-reload the module if you plug your USB wifi adapter into your ma-
-chine after batman advanced was initially loaded.
-
-The batman-adv soft-interface can be created using the iproute2
-tool "ip"
-
-# ip link add name bat0 type batadv
-
-To activate a given interface simply attach it to the "bat0"
-interface
-
-# ip link set dev eth0 master bat0
-
-Repeat this step for all interfaces you wish to add. Now batman
-starts using/broadcasting on this/these interface(s).
-
-By reading the "iface_status" file you can check its status:
-
-# cat /sys/class/net/eth0/batman_adv/iface_status
-# active
-
-To deactivate an interface you have to detach it from the
-"bat0" interface:
-
-# ip link set dev eth0 nomaster
-
-
-All mesh wide settings can be found in batman's own interface
-folder:
-
-# ls /sys/class/net/bat0/mesh/
-# aggregated_ogms fragmentation isolation_mark routing_algo
-# ap_isolation gw_bandwidth log_level vlan0
-# bonding gw_mode multicast_mode
-# bridge_loop_avoidance gw_sel_class network_coding
-# distributed_arp_table hop_penalty orig_interval
-
-There is a special folder for debugging information:
-
-# ls /sys/kernel/debug/batman_adv/bat0/
-# bla_backbone_table log neighbors transtable_local
-# bla_claim_table mcast_flags originators
-# dat_cache nc socket
-# gateways nc_nodes transtable_global
-
-Some of the files contain all sort of status information regard-
-ing the mesh network. For example, you can view the table of
-originators (mesh participants) with:
-
-# cat /sys/kernel/debug/batman_adv/bat0/originators
-
-Other files allow to change batman's behaviour to better fit your
-requirements. For instance, you can check the current originator
-interval (value in milliseconds which determines how often batman
-sends its broadcast packets):
-
-# cat /sys/class/net/bat0/mesh/orig_interval
-# 1000
-
-and also change its value:
-
-# echo 3000 > /sys/class/net/bat0/mesh/orig_interval
-
-In very mobile scenarios, you might want to adjust the originator
-interval to a lower value. This will make the mesh more respon-
-sive to topology changes, but will also increase the overhead.
-
-
-USAGE
------
-
-To make use of your newly created mesh, batman advanced provides
-a new interface "bat0" which you should use from this point on.
-All interfaces added to batman advanced are not relevant any
-longer because batman handles them for you. Basically, one "hands
-over" the data by using the batman interface and batman will make
-sure it reaches its destination.
-
-The "bat0" interface can be used like any other regular inter-
-face. It needs an IP address which can be either statically con-
-figured or dynamically (by using DHCP or similar services):
-
-# NodeA: ip link set up dev bat0
-# NodeA: ip addr add 192.168.0.1/24 dev bat0
-
-# NodeB: ip link set up dev bat0
-# NodeB: ip addr add 192.168.0.2/24 dev bat0
-# NodeB: ping 192.168.0.1
-
-Note: In order to avoid problems remove all IP addresses previ-
-ously assigned to interfaces now used by batman advanced, e.g.
-
-# ip addr flush dev eth0
-
-
-LOGGING/DEBUGGING
------------------
-
-All error messages, warnings and information messages are sent to
-the kernel log. Depending on your operating system distribution
-this can be read in one of a number of ways. Try using the com-
-mands: dmesg, logread, or looking in the files /var/log/kern.log
-or /var/log/syslog. All batman-adv messages are prefixed with
-"batman-adv:" So to see just these messages try
-
-# dmesg | grep batman-adv
-
-When investigating problems with your mesh network it is some-
-times necessary to see more detail debug messages. This must be
-enabled when compiling the batman-adv module. When building bat-
-man-adv as part of kernel, use "make menuconfig" and enable the
-option "B.A.T.M.A.N. debugging".
-
-Those additional debug messages can be accessed using a special
-file in debugfs
-
-# cat /sys/kernel/debug/batman_adv/bat0/log
-
-The additional debug output is by default disabled. It can be en-
-abled during run time. Following log_levels are defined:
-
- 0 - All debug output disabled
- 1 - Enable messages related to routing / flooding / broadcasting
- 2 - Enable messages related to route added / changed / deleted
- 4 - Enable messages related to translation table operations
- 8 - Enable messages related to bridge loop avoidance
- 16 - Enable messages related to DAT, ARP snooping and parsing
- 32 - Enable messages related to network coding
- 64 - Enable messages related to multicast
-128 - Enable messages related to throughput meter
-255 - Enable all messages
-
-The debug output can be changed at runtime using the file
-/sys/class/net/bat0/mesh/log_level. e.g.
-
-# echo 6 > /sys/class/net/bat0/mesh/log_level
-
-will enable debug messages for when routes change.
-
-Counters for different types of packets entering and leaving the
-batman-adv module are available through ethtool:
-
-# ethtool --statistics bat0
-
-
-BATCTL
-------
-
-As batman advanced operates on layer 2 all hosts participating in
-the virtual switch are completely transparent for all protocols
-above layer 2. Therefore the common diagnosis tools do not work
-as expected. To overcome these problems batctl was created. At
-the moment the batctl contains ping, traceroute, tcpdump and
-interfaces to the kernel module settings.
-
-For more information, please see the manpage (man batctl).
-
-batctl is available on https://www.open-mesh.org/
-
-
-CONTACT
--------
-
-Please send us comments, experiences, questions, anything :)
-
-IRC: #batman on irc.freenode.org
-Mailing-list: b.a.t.m.a.n@open-mesh.org (optional subscription
- at https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
-
-You can also contact the Authors:
-
-Marek Lindner <mareklindner@neomailbox.ch>
-Simon Wunderlich <sw@simonwunderlich.de>
diff --git a/Documentation/networking/dpaa.txt b/Documentation/networking/dpaa.txt
index 76e016d4d344..f88194f71c54 100644
--- a/Documentation/networking/dpaa.txt
+++ b/Documentation/networking/dpaa.txt
@@ -13,6 +13,7 @@ Contents
- Configuring DPAA Ethernet in your kernel
- DPAA Ethernet Frame Processing
- DPAA Ethernet Features
+ - DPAA IRQ Affinity and Receive Side Scaling
- Debugging
DPAA Ethernet Overview
@@ -147,7 +148,10 @@ gradually.
The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx
checksum offload feature is enabled by default and cannot be controlled through
-ethtool.
+ethtool. Also, rx-flow-hash and rx-hashing was added. The addition of RSS
+provides a big performance boost for the forwarding scenarios, allowing
+different traffic flows received by one interface to be processed by different
+CPUs in parallel.
The driver has support for multiple prioritized Tx traffic classes. Priorities
range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with
@@ -166,6 +170,68 @@ classes as follows:
tc qdisc add dev <int> root handle 1: \
mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1
+DPAA IRQ Affinity and Receive Side Scaling
+==========================================
+
+Traffic coming on the DPAA Rx queues or on the DPAA Tx confirmation
+queues is seen by the CPU as ingress traffic on a certain portal.
+The DPAA QMan portal interrupts are affined each to a certain CPU.
+The same portal interrupt services all the QMan portal consumers.
+
+By default the DPAA Ethernet driver enables RSS, making use of the
+DPAA FMan Parser and Keygen blocks to distribute traffic on 128
+hardware frame queues using a hash on IP v4/v6 source and destination
+and L4 source and destination ports, in present in the received frame.
+When RSS is disabled, all traffic received by a certain interface is
+received on the default Rx frame queue. The default DPAA Rx frame
+queues are configured to put the received traffic into a pool channel
+that allows any available CPU portal to dequeue the ingress traffic.
+The default frame queues have the HOLDACTIVE option set, ensuring that
+traffic bursts from a certain queue are serviced by the same CPU.
+This ensures a very low rate of frame reordering. A drawback of this
+is that only one CPU at a time can service the traffic received by a
+certain interface when RSS is not enabled.
+
+To implement RSS, the DPAA Ethernet driver allocates an extra set of
+128 Rx frame queues that are configured to dedicated channels, in a
+round-robin manner. The mapping of the frame queues to CPUs is now
+hardcoded, there is no indirection table to move traffic for a certain
+FQ (hash result) to another CPU. The ingress traffic arriving on one
+of these frame queues will arrive at the same portal and will always
+be processed by the same CPU. This ensures intra-flow order preservation
+and workload distribution for multiple traffic flows.
+
+RSS can be turned off for a certain interface using ethtool, i.e.
+
+ # ethtool -N fm1-mac9 rx-flow-hash tcp4 ""
+
+To turn it back on, one needs to set rx-flow-hash for tcp4/6 or udp4/6:
+
+ # ethtool -N fm1-mac9 rx-flow-hash udp4 sfdn
+
+There is no independent control for individual protocols, any command
+run for one of tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6 is
+going to control the rx-flow-hashing for all protocols on that interface.
+
+Besides using the FMan Keygen computed hash for spreading traffic on the
+128 Rx FQs, the DPAA Ethernet driver also sets the skb hash value when
+the NETIF_F_RXHASH feature is on (active by default). This can be turned
+on or off through ethtool, i.e.:
+
+ # ethtool -K fm1-mac9 rx-hashing off
+ # ethtool -k fm1-mac9 | grep hash
+ receive-hashing: off
+ # ethtool -K fm1-mac9 rx-hashing on
+ Actual changes:
+ receive-hashing: on
+ # ethtool -k fm1-mac9 | grep hash
+ receive-hashing: on
+
+Please note that Rx hashing depends upon the rx-flow-hashing being on
+for that interface - turning off rx-flow-hashing will also disable the
+rx-hashing (without ethtool reporting it as off as that depends on the
+NETIF_F_RXHASH feature flag).
+
Debugging
=========
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index b69b205501de..e5e33bac2068 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -596,8 +596,8 @@ skb pointer). All constraints and restrictions from bpf_check_classic() apply
before a conversion to the new layout is being done behind the scenes!
Currently, the classic BPF format is being used for JITing on most 32-bit
-architectures, whereas x86-64, aarch64, s390x, powerpc64, sparc64 perform JIT
-compilation from eBPF instruction set.
+architectures, whereas x86-64, aarch64, s390x, powerpc64, sparc64, arm32 perform
+JIT compilation from eBPF instruction set.
Some core changes of the new internal format:
@@ -793,7 +793,7 @@ Some core changes of the new internal format:
bpf_exit
After the call the registers R1-R5 contain junk values and cannot be read.
- In the future an eBPF verifier can be used to validate internal BPF programs.
+ An in-kernel eBPF verifier is used to validate internal BPF programs.
Also in the new design, eBPF is limited to 4096 insns, which means that any
program will terminate quickly and will only call a fixed number of kernel
@@ -906,6 +906,10 @@ If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of:
BPF_JSGE 0x70 /* eBPF only: signed '>=' */
BPF_CALL 0x80 /* eBPF only: function call */
BPF_EXIT 0x90 /* eBPF only: function return */
+ BPF_JLT 0xa0 /* eBPF only: unsigned '<' */
+ BPF_JLE 0xb0 /* eBPF only: unsigned '<=' */
+ BPF_JSLT 0xc0 /* eBPF only: signed '<' */
+ BPF_JSLE 0xd0 /* eBPF only: signed '<=' */
So BPF_ADD | BPF_X | BPF_ALU means 32-bit addition in both classic BPF
and eBPF. There are only two registers in classic BPF, so it means A += X.
@@ -1017,7 +1021,7 @@ At the start of the program the register R1 contains a pointer to context
and has type PTR_TO_CTX.
If verifier sees an insn that does R2=R1, then R2 has now type
PTR_TO_CTX as well and can be used on the right hand side of expression.
-If R1=PTR_TO_CTX and insn is R2=R1+R1, then R2=UNKNOWN_VALUE,
+If R1=PTR_TO_CTX and insn is R2=R1+R1, then R2=SCALAR_VALUE,
since addition of two valid pointers makes invalid pointer.
(In 'secure' mode verifier will reject any type of pointer arithmetic to make
sure that kernel addresses don't leak to unprivileged users)
@@ -1039,7 +1043,7 @@ is a correct program. If there was R1 instead of R6, it would have
been rejected.
load/store instructions are allowed only with registers of valid types, which
-are PTR_TO_CTX, PTR_TO_MAP, FRAME_PTR. They are bounds and alignment checked.
+are PTR_TO_CTX, PTR_TO_MAP, PTR_TO_STACK. They are bounds and alignment checked.
For example:
bpf_mov R1 = 1
bpf_mov R2 = 2
@@ -1058,7 +1062,7 @@ intends to load a word from address R6 + 8 and store it into R0
If R6=PTR_TO_CTX, via is_valid_access() callback the verifier will know
that offset 8 of size 4 bytes can be accessed for reading, otherwise
the verifier will reject the program.
-If R6=FRAME_PTR, then access should be aligned and be within
+If R6=PTR_TO_STACK, then access should be aligned and be within
stack bounds, which are [-MAX_BPF_STACK, 0). In this example offset is 8,
so it will fail verification, since it's out of bounds.
@@ -1069,7 +1073,7 @@ For example:
bpf_ld R0 = *(u32 *)(R10 - 4)
bpf_exit
is invalid program.
-Though R10 is correct read-only register and has type FRAME_PTR
+Though R10 is correct read-only register and has type PTR_TO_STACK
and R10 - 4 is within stack bounds, there were no stores into that location.
Pointer register spill/fill is tracked as well, since four (R6-R9)
@@ -1094,6 +1098,71 @@ all use cases.
See details of eBPF verifier in kernel/bpf/verifier.c
+Register value tracking
+-----------------------
+In order to determine the safety of an eBPF program, the verifier must track
+the range of possible values in each register and also in each stack slot.
+This is done with 'struct bpf_reg_state', defined in include/linux/
+bpf_verifier.h, which unifies tracking of scalar and pointer values. Each
+register state has a type, which is either NOT_INIT (the register has not been
+written to), SCALAR_VALUE (some value which is not usable as a pointer), or a
+pointer type. The types of pointers describe their base, as follows:
+ PTR_TO_CTX Pointer to bpf_context.
+ CONST_PTR_TO_MAP Pointer to struct bpf_map. "Const" because arithmetic
+ on these pointers is forbidden.
+ PTR_TO_MAP_VALUE Pointer to the value stored in a map element.
+ PTR_TO_MAP_VALUE_OR_NULL
+ Either a pointer to a map value, or NULL; map accesses
+ (see section 'eBPF maps', below) return this type,
+ which becomes a PTR_TO_MAP_VALUE when checked != NULL.
+ Arithmetic on these pointers is forbidden.
+ PTR_TO_STACK Frame pointer.
+ PTR_TO_PACKET skb->data.
+ PTR_TO_PACKET_END skb->data + headlen; arithmetic forbidden.
+However, a pointer may be offset from this base (as a result of pointer
+arithmetic), and this is tracked in two parts: the 'fixed offset' and 'variable
+offset'. The former is used when an exactly-known value (e.g. an immediate
+operand) is added to a pointer, while the latter is used for values which are
+not exactly known. The variable offset is also used in SCALAR_VALUEs, to track
+the range of possible values in the register.
+The verifier's knowledge about the variable offset consists of:
+* minimum and maximum values as unsigned
+* minimum and maximum values as signed
+* knowledge of the values of individual bits, in the form of a 'tnum': a u64
+'mask' and a u64 'value'. 1s in the mask represent bits whose value is unknown;
+1s in the value represent bits known to be 1. Bits known to be 0 have 0 in both
+mask and value; no bit should ever be 1 in both. For example, if a byte is read
+into a register from memory, the register's top 56 bits are known zero, while
+the low 8 are unknown - which is represented as the tnum (0x0; 0xff). If we
+then OR this with 0x40, we get (0x40; 0xcf), then if we add 1 we get (0x0;
+0x1ff), because of potential carries.
+Besides arithmetic, the register state can also be updated by conditional
+branches. For instance, if a SCALAR_VALUE is compared > 8, in the 'true' branch
+it will have a umin_value (unsigned minimum value) of 9, whereas in the 'false'
+branch it will have a umax_value of 8. A signed compare (with BPF_JSGT or
+BPF_JSGE) would instead update the signed minimum/maximum values. Information
+from the signed and unsigned bounds can be combined; for instance if a value is
+first tested < 8 and then tested s> 4, the verifier will conclude that the value
+is also > 4 and s< 8, since the bounds prevent crossing the sign boundary.
+PTR_TO_PACKETs with a variable offset part have an 'id', which is common to all
+pointers sharing that same variable offset. This is important for packet range
+checks: after adding some variable to a packet pointer, if you then copy it to
+another register and (say) add a constant 4, both registers will share the same
+'id' but one will have a fixed offset of +4. Then if it is bounds-checked and
+found to be less than a PTR_TO_PACKET_END, the other register is now known to
+have a safe range of at least 4 bytes. See 'Direct packet access', below, for
+more on PTR_TO_PACKET ranges.
+The 'id' field is also used on PTR_TO_MAP_VALUE_OR_NULL, common to all copies of
+the pointer returned from a map lookup. This means that when one copy is
+checked and found to be non-NULL, all copies can become PTR_TO_MAP_VALUEs.
+As well as range-checking, the tracked information is also used for enforcing
+alignment of pointer accesses. For instance, on most systems the packet pointer
+is 2 bytes after a 4-byte alignment. If a program adds 14 bytes to that to jump
+over the Ethernet header, then reads IHL and addes (IHL * 4), the resulting
+pointer will have a variable offset known to be 4n+2 for some n, so adding the 2
+bytes (NET_IP_ALIGN) gives a 4-byte alignment and so word-sized accesses through
+that pointer are safe.
+
Direct packet access
--------------------
In cls_bpf and act_bpf programs the verifier allows direct access to the packet
@@ -1121,7 +1190,7 @@ it now points to 'skb->data + 14' and accessible range is [R5, R5 + 14 - 14)
which is zero bytes.
More complex packet access may look like:
- R0=imm1 R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp
+ R0=inv1 R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp
6: r0 = *(u8 *)(r3 +7) /* load 7th byte from the packet */
7: r4 = *(u8 *)(r3 +12)
8: r4 *= 14
@@ -1135,26 +1204,31 @@ More complex packet access may look like:
16: r2 += 8
17: r1 = *(u32 *)(r1 +80) /* load skb->data_end */
18: if r2 > r1 goto pc+2
- R0=inv56 R1=pkt_end R2=pkt(id=2,off=8,r=8) R3=pkt(id=2,off=0,r=8) R4=inv52 R5=pkt(id=0,off=14,r=14) R10=fp
+ R0=inv(id=0,umax_value=255,var_off=(0x0; 0xff)) R1=pkt_end R2=pkt(id=2,off=8,r=8) R3=pkt(id=2,off=0,r=8) R4=inv(id=0,umax_value=3570,var_off=(0x0; 0xfffe)) R5=pkt(id=0,off=14,r=14) R10=fp
19: r1 = *(u8 *)(r3 +4)
The state of the register R3 is R3=pkt(id=2,off=0,r=8)
id=2 means that two 'r3 += rX' instructions were seen, so r3 points to some
offset within a packet and since the program author did
'if (r3 + 8 > r1) goto err' at insn #18, the safe range is [R3, R3 + 8).
-The verifier only allows 'add' operation on packet registers. Any other
-operation will set the register state to 'unknown_value' and it won't be
+The verifier only allows 'add'/'sub' operations on packet registers. Any other
+operation will set the register state to 'SCALAR_VALUE' and it won't be
available for direct packet access.
Operation 'r3 += rX' may overflow and become less than original skb->data,
-therefore the verifier has to prevent that. So it tracks the number of
-upper zero bits in all 'uknown_value' registers, so when it sees
-'r3 += rX' instruction and rX is more than 16-bit value, it will error as:
-"cannot add integer value with N upper zero bits to ptr_to_packet"
+therefore the verifier has to prevent that. So when it sees 'r3 += rX'
+instruction and rX is more than 16-bit value, any subsequent bounds-check of r3
+against skb->data_end will not give us 'range' information, so attempts to read
+through the pointer will give "invalid access to packet" error.
Ex. after insn 'r4 = *(u8 *)(r3 +12)' (insn #7 above) the state of r4 is
-R4=inv56 which means that upper 56 bits on the register are guaranteed
-to be zero. After insn 'r4 *= 14' the state becomes R4=inv52, since
-multiplying 8-bit value by constant 14 will keep upper 52 bits as zero.
-Similarly 'r2 >>= 48' will make R2=inv48, since the shift is not sign
-extending. This logic is implemented in evaluate_reg_alu() function.
+R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff)) which means that upper 56 bits
+of the register are guaranteed to be zero, and nothing is known about the lower
+8 bits. After insn 'r4 *= 14' the state becomes
+R4=inv(id=0,umax_value=3570,var_off=(0x0; 0xfffe)), since multiplying an 8-bit
+value by constant 14 will keep upper 52 bits as zero, also the least significant
+bit will be zero as 14 is even. Similarly 'r2 >>= 48' will make
+R2=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff)), since the shift is not sign
+extending. This logic is implemented in adjust_reg_min_max_vals() function,
+which calls adjust_ptr_min_max_vals() for adding pointer to scalar (or vice
+versa) and adjust_scalar_min_max_vals() for operations on two scalars.
The end result is that bpf program author can access packet directly
using normal C code as:
@@ -1214,6 +1288,22 @@ The map is defined by:
. key size in bytes
. value size in bytes
+Pruning
+-------
+The verifier does not actually walk all possible paths through the program. For
+each new branch to analyse, the verifier looks at all the states it's previously
+been in when at this instruction. If any of them contain the current state as a
+subset, the branch is 'pruned' - that is, the fact that the previous state was
+accepted implies the current state would be as well. For instance, if in the
+previous state, r1 held a packet-pointer, and in the current state, r1 holds a
+packet-pointer with a range as long or longer and at least as strict an
+alignment, then r1 is safe. Similarly, if r2 was NOT_INIT before then it can't
+have been used by any path from that point, so any value in r2 (including
+another NOT_INIT) is safe. The implementation is in the function regsafe().
+Pruning considers not only the registers but also the stack (and any spilled
+registers it may hold). They must all be safe for the branch to be pruned.
+This is implemented in states_equal().
+
Understanding eBPF verifier messages
------------------------------------
diff --git a/Documentation/networking/hinic.txt b/Documentation/networking/hinic.txt
new file mode 100644
index 000000000000..989366a4039c
--- /dev/null
+++ b/Documentation/networking/hinic.txt
@@ -0,0 +1,125 @@
+Linux Kernel Driver for Huawei Intelligent NIC(HiNIC) family
+============================================================
+
+Overview:
+=========
+HiNIC is a network interface card for the Data Center Area.
+
+The driver supports a range of link-speed devices (10GbE, 25GbE, 40GbE, etc.).
+The driver supports also a negotiated and extendable feature set.
+
+Some HiNIC devices support SR-IOV. This driver is used for Physical Function
+(PF).
+
+HiNIC devices support MSI-X interrupt vector for each Tx/Rx queue and
+adaptive interrupt moderation.
+
+HiNIC devices support also various offload features such as checksum offload,
+TCP Transmit Segmentation Offload(TSO), Receive-Side Scaling(RSS) and
+LRO(Large Receive Offload).
+
+
+Supported PCI vendor ID/device IDs:
+===================================
+
+19e5:1822 - HiNIC PF
+
+
+Driver Architecture and Source Code:
+====================================
+
+hinic_dev - Implement a Logical Network device that is independent from
+specific HW details about HW data structure formats.
+
+hinic_hwdev - Implement the HW details of the device and include the components
+for accessing the PCI NIC.
+
+hinic_hwdev contains the following components:
+===============================================
+
+HW Interface:
+=============
+
+The interface for accessing the pci device (DMA memory and PCI BARs).
+(hinic_hw_if.c, hinic_hw_if.h)
+
+Configuration Status Registers Area that describes the HW Registers on the
+configuration and status BAR0. (hinic_hw_csr.h)
+
+MGMT components:
+================
+
+Asynchronous Event Queues(AEQs) - The event queues for receiving messages from
+the MGMT modules on the cards. (hinic_hw_eqs.c, hinic_hw_eqs.h)
+
+Application Programmable Interface commands(API CMD) - Interface for sending
+MGMT commands to the card. (hinic_hw_api_cmd.c, hinic_hw_api_cmd.h)
+
+Management (MGMT) - the PF to MGMT channel that uses API CMD for sending MGMT
+commands to the card and receives notifications from the MGMT modules on the
+card by AEQs. Also set the addresses of the IO CMDQs in HW.
+(hinic_hw_mgmt.c, hinic_hw_mgmt.h)
+
+IO components:
+==============
+
+Completion Event Queues(CEQs) - The completion Event Queues that describe IO
+tasks that are finished. (hinic_hw_eqs.c, hinic_hw_eqs.h)
+
+Work Queues(WQ) - Contain the memory and operations for use by CMD queues and
+the Queue Pairs. The WQ is a Memory Block in a Page. The Block contains
+pointers to Memory Areas that are the Memory for the Work Queue Elements(WQEs).
+(hinic_hw_wq.c, hinic_hw_wq.h)
+
+Command Queues(CMDQ) - The queues for sending commands for IO management and is
+used to set the QPs addresses in HW. The commands completion events are
+accumulated on the CEQ that is configured to receive the CMDQ completion events.
+(hinic_hw_cmdq.c, hinic_hw_cmdq.h)
+
+Queue Pairs(QPs) - The HW Receive and Send queues for Receiving and Transmitting
+Data. (hinic_hw_qp.c, hinic_hw_qp.h, hinic_hw_qp_ctxt.h)
+
+IO - de/constructs all the IO components. (hinic_hw_io.c, hinic_hw_io.h)
+
+HW device:
+==========
+
+HW device - de/constructs the HW Interface, the MGMT components on the
+initialization of the driver and the IO components on the case of Interface
+UP/DOWN Events. (hinic_hw_dev.c, hinic_hw_dev.h)
+
+
+hinic_dev contains the following components:
+===============================================
+
+PCI ID table - Contains the supported PCI Vendor/Device IDs.
+(hinic_pci_tbl.h)
+
+Port Commands - Send commands to the HW device for port management
+(MAC, Vlan, MTU, ...). (hinic_port.c, hinic_port.h)
+
+Tx Queues - Logical Tx Queues that use the HW Send Queues for transmit.
+The Logical Tx queue is not dependent on the format of the HW Send Queue.
+(hinic_tx.c, hinic_tx.h)
+
+Rx Queues - Logical Rx Queues that use the HW Receive Queues for receive.
+The Logical Rx queue is not dependent on the format of the HW Receive Queue.
+(hinic_rx.c, hinic_rx.h)
+
+hinic_dev - de/constructs the Logical Tx and Rx Queues.
+(hinic_main.c, hinic_dev.h)
+
+
+Miscellaneous:
+=============
+
+Common functions that are used by HW and Logical Device.
+(hinic_common.c, hinic_common.h)
+
+
+Support
+=======
+
+If an issue is identified with the released source code on the supported kernel
+with a supported adapter, email the specific information related to the issue to
+aviad.krawczyk@huawei.com.
diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt
index c4114346f054..057e9fdbfac9 100644
--- a/Documentation/networking/ieee802154.txt
+++ b/Documentation/networking/ieee802154.txt
@@ -84,17 +84,17 @@ Device drivers API
==================
The include/net/mac802154.h defines following functions:
- - struct ieee802154_dev *ieee802154_alloc_device
- (size_t priv_size, struct ieee802154_ops *ops):
- allocation of IEEE 802.15.4 compatible device
+ - struct ieee802154_hw *
+ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops):
+ allocation of IEEE 802.15.4 compatible hardware device
- - void ieee802154_free_device(struct ieee802154_dev *dev):
- freeing allocated device
+ - void ieee802154_free_hw(struct ieee802154_hw *hw):
+ freeing allocated hardware device
- - int ieee802154_register_device(struct ieee802154_dev *dev):
- register PHY in the system
+ - int ieee802154_register_hw(struct ieee802154_hw *hw):
+ register PHY which is the allocated hardware device, in the system
- - void ieee802154_unregister_device(struct ieee802154_dev *dev):
+ - void ieee802154_unregister_hw(struct ieee802154_hw *hw):
freeing registered PHY
Moreover IEEE 802.15.4 device operations structure should be filled.
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index b5bd87e01f52..66e620866245 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -6,6 +6,7 @@ Contents:
.. toctree::
:maxdepth: 2
+ batman-adv
kapi
z8530book
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 974ab47ae53a..b3345d0fe0a6 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -109,7 +109,10 @@ neigh/default/unres_qlen_bytes - INTEGER
queued for each unresolved address by other network layers.
(added in linux 3.3)
Setting negative value is meaningless and will return error.
- Default: 65536 Bytes(64KB)
+ Default: SK_WMEM_MAX, (same as net.core.wmem_default).
+ Exact value depends on architecture and kernel options,
+ but should be enough to allow queuing 256 packets
+ of medium size.
neigh/default/unres_qlen - INTEGER
The maximum number of packets which may be queued for each
@@ -119,7 +122,7 @@ neigh/default/unres_qlen - INTEGER
unexpected packet loss. The current default value is calculated
according to default value of unres_qlen_bytes and true size of
packet.
- Default: 31
+ Default: 101
mtu_expires - INTEGER
Time, in seconds, that cached PMTU information is kept.
@@ -353,12 +356,7 @@ tcp_l3mdev_accept - BOOLEAN
compiled with CONFIG_NET_L3_MASTER_DEV.
tcp_low_latency - BOOLEAN
- If set, the TCP stack makes decisions that prefer lower
- latency as opposed to higher throughput. By default, this
- option is not set meaning that higher throughput is preferred.
- An example of an application where this default should be
- changed would be a Beowulf compute cluster.
- Default: 0
+ This is a legacy option, it has no effect anymore.
tcp_max_orphans - INTEGER
Maximal number of TCP sockets not attached to any user file handle,
@@ -1291,8 +1289,7 @@ tag - INTEGER
xfrm4_gc_thresh - INTEGER
The threshold at which we will start garbage collecting for IPv4
destination cache entries. At twice this value the system will
- refuse new allocations. The value must be set below the flowcache
- limit (4096 * number of online cpus) to take effect.
+ refuse new allocations.
igmp_link_local_mcast_reports - BOOLEAN
Enable IGMP reports for link local multicast groups in the
@@ -1356,6 +1353,15 @@ flowlabel_state_ranges - BOOLEAN
FALSE: disabled
Default: true
+flowlabel_reflect - BOOLEAN
+ Automatically reflect the flow label. Needed for Path MTU
+ Discovery to work with Equal Cost Multipath Routing in anycast
+ environments. See RFC 7690 and:
+ https://tools.ietf.org/html/draft-wang-6man-flow-label-reflection-01
+ TRUE: enabled
+ FALSE: disabled
+ Default: FALSE
+
anycast_src_echo_reply - BOOLEAN
Controls the use of anycast addresses as source addresses for ICMPv6
echo reply
@@ -1778,8 +1784,7 @@ ratelimit - INTEGER
xfrm6_gc_thresh - INTEGER
The threshold at which we will start garbage collecting for IPv6
destination cache entries. At twice this value the system will
- refuse new allocations. The value must be set below the flowcache
- limit (4096 * number of online cpus) to take effect.
+ refuse new allocations.
IPv6 Update by:
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst
new file mode 100644
index 000000000000..77f6d7e25cfd
--- /dev/null
+++ b/Documentation/networking/msg_zerocopy.rst
@@ -0,0 +1,257 @@
+
+============
+MSG_ZEROCOPY
+============
+
+Intro
+=====
+
+The MSG_ZEROCOPY flag enables copy avoidance for socket send calls.
+The feature is currently implemented for TCP sockets.
+
+
+Opportunity and Caveats
+-----------------------
+
+Copying large buffers between user process and kernel can be
+expensive. Linux supports various interfaces that eschew copying,
+such as sendpage and splice. The MSG_ZEROCOPY flag extends the
+underlying copy avoidance mechanism to common socket send calls.
+
+Copy avoidance is not a free lunch. As implemented, with page pinning,
+it replaces per byte copy cost with page accounting and completion
+notification overhead. As a result, MSG_ZEROCOPY is generally only
+effective at writes over around 10 KB.
+
+Page pinning also changes system call semantics. It temporarily shares
+the buffer between process and network stack. Unlike with copying, the
+process cannot immediately overwrite the buffer after system call
+return without possibly modifying the data in flight. Kernel integrity
+is not affected, but a buggy program can possibly corrupt its own data
+stream.
+
+The kernel returns a notification when it is safe to modify data.
+Converting an existing application to MSG_ZEROCOPY is not always as
+trivial as just passing the flag, then.
+
+
+More Info
+---------
+
+Much of this document was derived from a longer paper presented at
+netdev 2.1. For more in-depth information see that paper and talk,
+the excellent reporting over at LWN.net or read the original code.
+
+ paper, slides, video
+ https://netdevconf.org/2.1/session.html?debruijn
+
+ LWN article
+ https://lwn.net/Articles/726917/
+
+ patchset
+ [PATCH net-next v4 0/9] socket sendmsg MSG_ZEROCOPY
+ http://lkml.kernel.org/r/20170803202945.70750-1-willemdebruijn.kernel@gmail.com
+
+
+Interface
+=========
+
+Passing the MSG_ZEROCOPY flag is the most obvious step to enable copy
+avoidance, but not the only one.
+
+Socket Setup
+------------
+
+The kernel is permissive when applications pass undefined flags to the
+send system call. By default it simply ignores these. To avoid enabling
+copy avoidance mode for legacy processes that accidentally already pass
+this flag, a process must first signal intent by setting a socket option:
+
+::
+
+ if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one)))
+ error(1, errno, "setsockopt zerocopy");
+
+
+Transmission
+------------
+
+The change to send (or sendto, sendmsg, sendmmsg) itself is trivial.
+Pass the new flag.
+
+::
+
+ ret = send(fd, buf, sizeof(buf), MSG_ZEROCOPY);
+
+A zerocopy failure will return -1 with errno ENOBUFS. This happens if
+the socket option was not set, the socket exceeds its optmem limit or
+the user exceeds its ulimit on locked pages.
+
+
+Mixing copy avoidance and copying
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Many workloads have a mixture of large and small buffers. Because copy
+avoidance is more expensive than copying for small packets, the
+feature is implemented as a flag. It is safe to mix calls with the flag
+with those without.
+
+
+Notifications
+-------------
+
+The kernel has to notify the process when it is safe to reuse a
+previously passed buffer. It queues completion notifications on the
+socket error queue, akin to the transmit timestamping interface.
+
+The notification itself is a simple scalar value. Each socket
+maintains an internal unsigned 32-bit counter. Each send call with
+MSG_ZEROCOPY that successfully sends data increments the counter. The
+counter is not incremented on failure or if called with length zero.
+The counter counts system call invocations, not bytes. It wraps after
+UINT_MAX calls.
+
+
+Notification Reception
+~~~~~~~~~~~~~~~~~~~~~~
+
+The below snippet demonstrates the API. In the simplest case, each
+send syscall is followed by a poll and recvmsg on the error queue.
+
+Reading from the error queue is always a non-blocking operation. The
+poll call is there to block until an error is outstanding. It will set
+POLLERR in its output flags. That flag does not have to be set in the
+events field. Errors are signaled unconditionally.
+
+::
+
+ pfd.fd = fd;
+ pfd.events = 0;
+ if (poll(&pfd, 1, -1) != 1 || pfd.revents & POLLERR == 0)
+ error(1, errno, "poll");
+
+ ret = recvmsg(fd, &msg, MSG_ERRQUEUE);
+ if (ret == -1)
+ error(1, errno, "recvmsg");
+
+ read_notification(msg);
+
+The example is for demonstration purpose only. In practice, it is more
+efficient to not wait for notifications, but read without blocking
+every couple of send calls.
+
+Notifications can be processed out of order with other operations on
+the socket. A socket that has an error queued would normally block
+other operations until the error is read. Zerocopy notifications have
+a zero error code, however, to not block send and recv calls.
+
+
+Notification Batching
+~~~~~~~~~~~~~~~~~~~~~
+
+Multiple outstanding packets can be read at once using the recvmmsg
+call. This is often not needed. In each message the kernel returns not
+a single value, but a range. It coalesces consecutive notifications
+while one is outstanding for reception on the error queue.
+
+When a new notification is about to be queued, it checks whether the
+new value extends the range of the notification at the tail of the
+queue. If so, it drops the new notification packet and instead increases
+the range upper value of the outstanding notification.
+
+For protocols that acknowledge data in-order, like TCP, each
+notification can be squashed into the previous one, so that no more
+than one notification is outstanding at any one point.
+
+Ordered delivery is the common case, but not guaranteed. Notifications
+may arrive out of order on retransmission and socket teardown.
+
+
+Notification Parsing
+~~~~~~~~~~~~~~~~~~~~
+
+The below snippet demonstrates how to parse the control message: the
+read_notification() call in the previous snippet. A notification
+is encoded in the standard error format, sock_extended_err.
+
+The level and type fields in the control data are protocol family
+specific, IP_RECVERR or IPV6_RECVERR.
+
+Error origin is the new type SO_EE_ORIGIN_ZEROCOPY. ee_errno is zero,
+as explained before, to avoid blocking read and write system calls on
+the socket.
+
+The 32-bit notification range is encoded as [ee_info, ee_data]. This
+range is inclusive. Other fields in the struct must be treated as
+undefined, bar for ee_code, as discussed below.
+
+::
+
+ struct sock_extended_err *serr;
+ struct cmsghdr *cm;
+
+ cm = CMSG_FIRSTHDR(msg);
+ if (cm->cmsg_level != SOL_IP &&
+ cm->cmsg_type != IP_RECVERR)
+ error(1, 0, "cmsg");
+
+ serr = (void *) CMSG_DATA(cm);
+ if (serr->ee_errno != 0 ||
+ serr->ee_origin != SO_EE_ORIGIN_ZEROCOPY)
+ error(1, 0, "serr");
+
+ printf("completed: %u..%u\n", serr->ee_info, serr->ee_data);
+
+
+Deferred copies
+~~~~~~~~~~~~~~~
+
+Passing flag MSG_ZEROCOPY is a hint to the kernel to apply copy
+avoidance, and a contract that the kernel will queue a completion
+notification. It is not a guarantee that the copy is elided.
+
+Copy avoidance is not always feasible. Devices that do not support
+scatter-gather I/O cannot send packets made up of kernel generated
+protocol headers plus zerocopy user data. A packet may need to be
+converted to a private copy of data deep in the stack, say to compute
+a checksum.
+
+In all these cases, the kernel returns a completion notification when
+it releases its hold on the shared pages. That notification may arrive
+before the (copied) data is fully transmitted. A zerocopy completion
+notification is not a transmit completion notification, therefore.
+
+Deferred copies can be more expensive than a copy immediately in the
+system call, if the data is no longer warm in the cache. The process
+also incurs notification processing cost for no benefit. For this
+reason, the kernel signals if data was completed with a copy, by
+setting flag SO_EE_CODE_ZEROCOPY_COPIED in field ee_code on return.
+A process may use this signal to stop passing flag MSG_ZEROCOPY on
+subsequent requests on the same socket.
+
+
+Implementation
+==============
+
+Loopback
+--------
+
+Data sent to local sockets can be queued indefinitely if the receive
+process does not read its socket. Unbound notification latency is not
+acceptable. For this reason all packets generated with MSG_ZEROCOPY
+that are looped to a local socket will incur a deferred copy. This
+includes looping onto packet sockets (e.g., tcpdump) and tun devices.
+
+
+Testing
+=======
+
+More realistic example code can be found in the kernel source under
+tools/testing/selftests/net/msg_zerocopy.c.
+
+Be cognizant of the loopback constraint. The test can be run between
+a pair of hosts. But if run between a local pair of processes, for
+instance when run with msg_zerocopy.sh between a veth pair across
+namespaces, the test will not show any improvement. For testing, the
+loopback restriction can be temporarily relaxed by making
+skb_orphan_frags_rx identical to skb_orphan_frags.
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
index 247a30ba8e17..cfc66ea72329 100644
--- a/Documentation/networking/netdev-FAQ.txt
+++ b/Documentation/networking/netdev-FAQ.txt
@@ -111,6 +111,14 @@ A: Generally speaking, the patches get triaged quickly (in less than 48h).
patch is a good way to ensure your patch is ignored or pushed to
the bottom of the priority list.
+Q: I submitted multiple versions of the patch series, should I directly update
+ patchwork for the previous versions of these patch series?
+
+A: No, please don't interfere with the patch status on patchwork, leave it to
+ the maintainer to figure out what is the most recent and current version that
+ should be applied. If there is any doubt, the maintainer will reply and ask
+ what should be done.
+
Q: How can I tell what patches are queued up for backporting to the
various stable releases?
diff --git a/Documentation/networking/netvsc.txt b/Documentation/networking/netvsc.txt
new file mode 100644
index 000000000000..93560fb1170a
--- /dev/null
+++ b/Documentation/networking/netvsc.txt
@@ -0,0 +1,75 @@
+Hyper-V network driver
+======================
+
+Compatibility
+=============
+
+This driver is compatible with Windows Server 2012 R2, 2016 and
+Windows 10.
+
+Features
+========
+
+ Checksum offload
+ ----------------
+ The netvsc driver supports checksum offload as long as the
+ Hyper-V host version does. Windows Server 2016 and Azure
+ support checksum offload for TCP and UDP for both IPv4 and
+ IPv6. Windows Server 2012 only supports checksum offload for TCP.
+
+ Receive Side Scaling
+ --------------------
+ Hyper-V supports receive side scaling. For TCP, packets are
+ distributed among available queues based on IP address and port
+ number.
+
+ For UDP, we can switch UDP hash level between L3 and L4 by ethtool
+ command. UDP over IPv4 and v6 can be set differently. The default
+ hash level is L4. We currently only allow switching TX hash level
+ from within the guests.
+
+ On Azure, fragmented UDP packets have high loss rate with L4
+ hashing. Using L3 hashing is recommended in this case.
+
+ For example, for UDP over IPv4 on eth0:
+ To include UDP port numbers in hashing:
+ ethtool -N eth0 rx-flow-hash udp4 sdfn
+ To exclude UDP port numbers in hashing:
+ ethtool -N eth0 rx-flow-hash udp4 sd
+ To show UDP hash level:
+ ethtool -n eth0 rx-flow-hash udp4
+
+ Generic Receive Offload, aka GRO
+ --------------------------------
+ The driver supports GRO and it is enabled by default. GRO coalesces
+ like packets and significantly reduces CPU usage under heavy Rx
+ load.
+
+ SR-IOV support
+ --------------
+ Hyper-V supports SR-IOV as a hardware acceleration option. If SR-IOV
+ is enabled in both the vSwitch and the guest configuration, then the
+ Virtual Function (VF) device is passed to the guest as a PCI
+ device. In this case, both a synthetic (netvsc) and VF device are
+ visible in the guest OS and both NIC's have the same MAC address.
+
+ The VF is enslaved by netvsc device. The netvsc driver will transparently
+ switch the data path to the VF when it is available and up.
+ Network state (addresses, firewall, etc) should be applied only to the
+ netvsc device; the slave device should not be accessed directly in
+ most cases. The exceptions are if some special queue discipline or
+ flow direction is desired, these should be applied directly to the
+ VF slave device.
+
+ Receive Buffer
+ --------------
+ Packets are received into a receive area which is created when device
+ is probed. The receive area is broken into MTU sized chunks and each may
+ contain one or more packets. The number of receive sections may be changed
+ via ethtool Rx ring parameters.
+
+ There is a similar send buffer which is used to aggregate packets for sending.
+ The send area is broken into chunks of 6144 bytes, each of section may
+ contain one or more packets. The send buffer is an optimization, the driver
+ will use slower method to handle very large packets or if the send buffer
+ area is exhausted.
diff --git a/Documentation/networking/nf_conntrack-sysctl.txt b/Documentation/networking/nf_conntrack-sysctl.txt
index 497d668288f9..433b6724797a 100644
--- a/Documentation/networking/nf_conntrack-sysctl.txt
+++ b/Documentation/networking/nf_conntrack-sysctl.txt
@@ -96,17 +96,6 @@ nf_conntrack_max - INTEGER
Size of connection tracking table. Default value is
nf_conntrack_buckets value * 4.
-nf_conntrack_default_on - BOOLEAN
- 0 - don't register conntrack in new net namespaces
- 1 - register conntrack in new net namespaces (default)
-
- This controls wheter newly created network namespaces have connection
- tracking enabled by default. It will be enabled automatically
- regardless of this setting if the new net namespace requires
- connection tracking, e.g. when NAT rules are created.
- This setting is only visible in initial user namespace, it has no
- effect on existing namespaces.
-
nf_conntrack_tcp_be_liberal - BOOLEAN
0 - disabled (default)
not 0 - enabled
diff --git a/Documentation/networking/rmnet.txt b/Documentation/networking/rmnet.txt
new file mode 100644
index 000000000000..6b341eaf2062
--- /dev/null
+++ b/Documentation/networking/rmnet.txt
@@ -0,0 +1,82 @@
+1. Introduction
+
+rmnet driver is used for supporting the Multiplexing and aggregation
+Protocol (MAP). This protocol is used by all recent chipsets using Qualcomm
+Technologies, Inc. modems.
+
+This driver can be used to register onto any physical network device in
+IP mode. Physical transports include USB, HSIC, PCIe and IP accelerator.
+
+Multiplexing allows for creation of logical netdevices (rmnet devices) to
+handle multiple private data networks (PDN) like a default internet, tethering,
+multimedia messaging service (MMS) or IP media subsystem (IMS). Hardware sends
+packets with MAP headers to rmnet. Based on the multiplexer id, rmnet
+routes to the appropriate PDN after removing the MAP header.
+
+Aggregation is required to achieve high data rates. This involves hardware
+sending aggregated bunch of MAP frames. rmnet driver will de-aggregate
+these MAP frames and send them to appropriate PDN's.
+
+2. Packet format
+
+a. MAP packet (data / control)
+
+MAP header has the same endianness of the IP packet.
+
+Packet format -
+
+Bit 0 1 2-7 8 - 15 16 - 31
+Function Command / Data Reserved Pad Multiplexer ID Payload length
+Bit 32 - x
+Function Raw Bytes
+
+Command (1)/ Data (0) bit value is to indicate if the packet is a MAP command
+or data packet. Control packet is used for transport level flow control. Data
+packets are standard IP packets.
+
+Reserved bits are usually zeroed out and to be ignored by receiver.
+
+Padding is number of bytes to be added for 4 byte alignment if required by
+hardware.
+
+Multiplexer ID is to indicate the PDN on which data has to be sent.
+
+Payload length includes the padding length but does not include MAP header
+length.
+
+b. MAP packet (command specific)
+
+Bit 0 1 2-7 8 - 15 16 - 31
+Function Command Reserved Pad Multiplexer ID Payload length
+Bit 32 - 39 40 - 45 46 - 47 48 - 63
+Function Command name Reserved Command Type Reserved
+Bit 64 - 95
+Function Transaction ID
+Bit 96 - 127
+Function Command data
+
+Command 1 indicates disabling flow while 2 is enabling flow
+
+Command types -
+0 for MAP command request
+1 is to acknowledge the receipt of a command
+2 is for unsupported commands
+3 is for error during processing of commands
+
+c. Aggregation
+
+Aggregation is multiple MAP packets (can be data or command) delivered to
+rmnet in a single linear skb. rmnet will process the individual
+packets and either ACK the MAP command or deliver the IP packet to the
+network stack as needed
+
+MAP header|IP Packet|Optional padding|MAP header|IP Packet|Optional padding....
+MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad...
+
+3. Userspace configuration
+
+rmnet userspace configuration is done through netlink library librmnetctl
+and command line utility rmnetcli. Utility is hosted in codeaurora forum git.
+The driver uses rtnl_link_ops for communication.
+
+https://source.codeaurora.org/quic/la/platform/vendor/qcom-opensource/dataservices/tree/rmnetctl
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index 8c70ba5dee4d..810620153a44 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -818,10 +818,15 @@ The kernel interface functions are as follows:
(*) Send data through a call.
+ typedef void (*rxrpc_notify_end_tx_t)(struct sock *sk,
+ unsigned long user_call_ID,
+ struct sk_buff *skb);
+
int rxrpc_kernel_send_data(struct socket *sock,
struct rxrpc_call *call,
struct msghdr *msg,
- size_t len);
+ size_t len,
+ rxrpc_notify_end_tx_t notify_end_rx);
This is used to supply either the request part of a client call or the
reply part of a server call. msg.msg_iovlen and msg.msg_iov specify the
@@ -832,6 +837,11 @@ The kernel interface functions are as follows:
The msg must not specify a destination address, control data or any flags
other than MSG_MORE. len is the total amount of data to transmit.
+ notify_end_rx can be NULL or it can be used to specify a function to be
+ called when the call changes state to end the Tx phase. This function is
+ called with the call-state spinlock held to prevent any reply or final ACK
+ from being delivered first.
+
(*) Receive data from a call.
int rxrpc_kernel_recv_data(struct socket *sock,
@@ -965,6 +975,51 @@ The kernel interface functions are as follows:
size should be set when the call is begun. tx_total_len may not be less
than zero.
+ (*) Check to see the completion state of a call so that the caller can assess
+ whether it needs to be retried.
+
+ enum rxrpc_call_completion {
+ RXRPC_CALL_SUCCEEDED,
+ RXRPC_CALL_REMOTELY_ABORTED,
+ RXRPC_CALL_LOCALLY_ABORTED,
+ RXRPC_CALL_LOCAL_ERROR,
+ RXRPC_CALL_NETWORK_ERROR,
+ };
+
+ int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call,
+ enum rxrpc_call_completion *_compl,
+ u32 *_abort_code);
+
+ On return, -EINPROGRESS will be returned if the call is still ongoing; if
+ it is finished, *_compl will be set to indicate the manner of completion,
+ *_abort_code will be set to any abort code that occurred. 0 will be
+ returned on a successful completion, -ECONNABORTED will be returned if the
+ client failed due to a remote abort and anything else will return an
+ appropriate error code.
+
+ The caller should look at this information to decide if it's worth
+ retrying the call.
+
+ (*) Retry a client call.
+
+ int rxrpc_kernel_retry_call(struct socket *sock,
+ struct rxrpc_call *call,
+ struct sockaddr_rxrpc *srx,
+ struct key *key);
+
+ This attempts to partially reinitialise a call and submit it again whilst
+ reusing the original call's Tx queue to avoid the need to repackage and
+ re-encrypt the data to be sent. call indicates the call to retry, srx the
+ new address to send it to and key the encryption key to use for signing or
+ encrypting the packets.
+
+ For this to work, the first Tx data packet must still be in the transmit
+ queue, and currently this is only permitted for local and network errors
+ and the call must not have been aborted. Any partially constructed Tx
+ packet is left as is and can continue being filled afterwards.
+
+ It returns 0 if the call was requeued and an error otherwise.
+
=======================
CONFIGURABLE PARAMETERS
diff --git a/Documentation/networking/strparser.txt b/Documentation/networking/strparser.txt
index a0bf573dfa61..13081b3decef 100644
--- a/Documentation/networking/strparser.txt
+++ b/Documentation/networking/strparser.txt
@@ -1,45 +1,107 @@
-Stream Parser
--------------
+Stream Parser (strparser)
+
+Introduction
+============
The stream parser (strparser) is a utility that parses messages of an
-application layer protocol running over a TCP connection. The stream
+application layer protocol running over a data stream. The stream
parser works in conjunction with an upper layer in the kernel to provide
kernel support for application layer messages. For instance, Kernel
Connection Multiplexor (KCM) uses the Stream Parser to parse messages
using a BPF program.
+The strparser works in one of two modes: receive callback or general
+mode.
+
+In receive callback mode, the strparser is called from the data_ready
+callback of a TCP socket. Messages are parsed and delivered as they are
+received on the socket.
+
+In general mode, a sequence of skbs are fed to strparser from an
+outside source. Message are parsed and delivered as the sequence is
+processed. This modes allows strparser to be applied to arbitrary
+streams of data.
+
Interface
----------
+=========
The API includes a context structure, a set of callbacks, utility
-functions, and a data_ready function. The callbacks include
-a parse_msg function that is called to perform parsing (e.g.
-BPF parsing in case of KCM), and a rcv_msg function that is called
-when a full message has been completed.
+functions, and a data_ready function for receive callback mode. The
+callbacks include a parse_msg function that is called to perform
+parsing (e.g. BPF parsing in case of KCM), and a rcv_msg function
+that is called when a full message has been completed.
+
+Functions
+=========
+
+strp_init(struct strparser *strp, struct sock *sk,
+ const struct strp_callbacks *cb)
+
+ Called to initialize a stream parser. strp is a struct of type
+ strparser that is allocated by the upper layer. sk is the TCP
+ socket associated with the stream parser for use with receive
+ callback mode; in general mode this is set to NULL. Callbacks
+ are called by the stream parser (the callbacks are listed below).
+
+void strp_pause(struct strparser *strp)
+
+ Temporarily pause a stream parser. Message parsing is suspended
+ and no new messages are delivered to the upper layer.
+
+void strp_pause(struct strparser *strp)
+
+ Unpause a paused stream parser.
+
+void strp_stop(struct strparser *strp);
+
+ strp_stop is called to completely stop stream parser operations.
+ This is called internally when the stream parser encounters an
+ error, and it is called from the upper layer to stop parsing
+ operations.
+
+void strp_done(struct strparser *strp);
+
+ strp_done is called to release any resources held by the stream
+ parser instance. This must be called after the stream processor
+ has been stopped.
-A stream parser can be instantiated for a TCP connection. This is done
-by:
+int strp_process(struct strparser *strp, struct sk_buff *orig_skb,
+ unsigned int orig_offset, size_t orig_len,
+ size_t max_msg_size, long timeo)
-strp_init(struct strparser *strp, struct sock *csk,
- struct strp_callbacks *cb)
+ strp_process is called in general mode for a stream parser to
+ parse an sk_buff. The number of bytes processed or a negative
+ error number is returned. Note that strp_process does not
+ consume the sk_buff. max_msg_size is maximum size the stream
+ parser will parse. timeo is timeout for completing a message.
-strp is a struct of type strparser that is allocated by the upper layer.
-csk is the TCP socket associated with the stream parser. Callbacks are
-called by the stream parser.
+void strp_data_ready(struct strparser *strp);
+
+ The upper layer calls strp_tcp_data_ready when data is ready on
+ the lower socket for strparser to process. This should be called
+ from a data_ready callback that is set on the socket. Note that
+ maximum messages size is the limit of the receive socket
+ buffer and message timeout is the receive timeout for the socket.
+
+void strp_check_rcv(struct strparser *strp);
+
+ strp_check_rcv is called to check for new messages on the socket.
+ This is normally called at initialization of a stream parser
+ instance or after strp_unpause.
Callbacks
----------
+=========
-There are four callbacks:
+There are six callbacks:
int (*parse_msg)(struct strparser *strp, struct sk_buff *skb);
parse_msg is called to determine the length of the next message
in the stream. The upper layer must implement this function. It
should parse the sk_buff as containing the headers for the
- next application layer messages in the stream.
+ next application layer message in the stream.
- The skb->cb in the input skb is a struct strp_rx_msg. Only
+ The skb->cb in the input skb is a struct strp_msg. Only
the offset field is relevant in parse_msg and gives the offset
where the message starts in the skb.
@@ -50,26 +112,41 @@ int (*parse_msg)(struct strparser *strp, struct sk_buff *skb);
-ESTRPIPE : current message should not be processed by the
kernel, return control of the socket to userspace which
can proceed to read the messages itself
- other < 0 : Error is parsing, give control back to userspace
+ other < 0 : Error in parsing, give control back to userspace
assuming that synchronization is lost and the stream
is unrecoverable (application expected to close TCP socket)
In the case that an error is returned (return value is less than
- zero) the stream parser will set the error on TCP socket and wake
- it up. If parse_msg returned -ESTRPIPE and the stream parser had
- previously read some bytes for the current message, then the error
- set on the attached socket is ENODATA since the stream is
- unrecoverable in that case.
+ zero) and the parser is in receive callback mode, then it will set
+ the error on TCP socket and wake it up. If parse_msg returned
+ -ESTRPIPE and the stream parser had previously read some bytes for
+ the current message, then the error set on the attached socket is
+ ENODATA since the stream is unrecoverable in that case.
+
+void (*lock)(struct strparser *strp)
+
+ The lock callback is called to lock the strp structure when
+ the strparser is performing an asynchronous operation (such as
+ processing a timeout). In receive callback mode the default
+ function is to lock_sock for the associated socket. In general
+ mode the callback must be set appropriately.
+
+void (*unlock)(struct strparser *strp)
+
+ The unlock callback is called to release the lock obtained
+ by the lock callback. In receive callback mode the default
+ function is release_sock for the associated socket. In general
+ mode the callback must be set appropriately.
void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb);
rcv_msg is called when a full message has been received and
is queued. The callee must consume the sk_buff; it can
call strp_pause to prevent any further messages from being
- received in rcv_msg (see strp_pause below). This callback
+ received in rcv_msg (see strp_pause above). This callback
must be set.
- The skb->cb in the input skb is a struct strp_rx_msg. This
+ The skb->cb in the input skb is a struct strp_msg. This
struct contains two fields: offset and full_len. Offset is
where the message starts in the skb, and full_len is the
the length of the message. skb->len - offset may be greater
@@ -78,59 +155,53 @@ void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb);
int (*read_sock_done)(struct strparser *strp, int err);
read_sock_done is called when the stream parser is done reading
- the TCP socket. The stream parser may read multiple messages
- in a loop and this function allows cleanup to occur when existing
- the loop. If the callback is not set (NULL in strp_init) a
- default function is used.
+ the TCP socket in receive callback mode. The stream parser may
+ read multiple messages in a loop and this function allows cleanup
+ to occur when exiting the loop. If the callback is not set (NULL
+ in strp_init) a default function is used.
void (*abort_parser)(struct strparser *strp, int err);
This function is called when stream parser encounters an error
- in parsing. The default function stops the stream parser for the
- TCP socket and sets the error in the socket. The default function
- can be changed by setting the callback to non-NULL in strp_init.
+ in parsing. The default function stops the stream parser and
+ sets the error in the socket if the parser is in receive callback
+ mode. The default function can be changed by setting the callback
+ to non-NULL in strp_init.
-Functions
----------
+Statistics
+==========
-The upper layer calls strp_tcp_data_ready when data is ready on the lower
-socket for strparser to process. This should be called from a data_ready
-callback that is set on the socket.
+Various counters are kept for each stream parser instance. These are in
+the strp_stats structure. strp_aggr_stats is a convenience structure for
+accumulating statistics for multiple stream parser instances.
+save_strp_stats and aggregate_strp_stats are helper functions to save
+and aggregate statistics.
-strp_stop is called to completely stop stream parser operations. This
-is called internally when the stream parser encounters an error, and
-it is called from the upper layer when unattaching a TCP socket.
+Message assembly limits
+=======================
-strp_done is called to unattach the stream parser from the TCP socket.
-This must be called after the stream processor has be stopped.
+The stream parser provide mechanisms to limit the resources consumed by
+message assembly.
-strp_check_rcv is called to check for new messages on the socket. This
-is normally called at initialization of the a stream parser instance
-of after strp_unpause.
+A timer is set when assembly starts for a new message. In receive
+callback mode the message timeout is taken from rcvtime for the
+associated TCP socket. In general mode, the timeout is passed as an
+argument in strp_process. If the timer fires before assembly completes
+the stream parser is aborted and the ETIMEDOUT error is set on the TCP
+socket if in receive callback mode.
-Statistics
-----------
+In receive callback mode, message length is limited to the receive
+buffer size of the associated TCP socket. If the length returned by
+parse_msg is greater than the socket buffer size then the stream parser
+is aborted with EMSGSIZE error set on the TCP socket. Note that this
+makes the maximum size of receive skbuffs for a socket with a stream
+parser to be 2*sk_rcvbuf of the TCP socket.
-Various counters are kept for each stream parser for a TCP socket.
-These are in the strp_stats structure. strp_aggr_stats is a convenience
-structure for accumulating statistics for multiple stream parser
-instances. save_strp_stats and aggregate_strp_stats are helper functions
-to save and aggregate statistics.
+In general mode the message length limit is passed in as an argument
+to strp_process.
-Message assembly limits
------------------------
+Author
+======
-The stream parser provide mechanisms to limit the resources consumed by
-message assembly.
+Tom Herbert (tom@quantonium.net)
-A timer is set when assembly starts for a new message. The message
-timeout is taken from rcvtime for the associated TCP socket. If the
-timer fires before assembly completes the stream parser is aborted
-and the ETIMEDOUT error is set on the TCP socket.
-
-Message length is limited to the receive buffer size of the associated
-TCP socket. If the length returned by parse_msg is greater than
-the socket buffer size then the stream parser is aborted with
-EMSGSIZE error set on the TCP socket. Note that this makes the
-maximum size of receive skbuffs for a socket with a stream parser
-to be 2*sk_rcvbuf of the TCP socket.
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index 3e7b946dea27..5e40e1f68873 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -228,7 +228,7 @@ Learning on the device port should be enabled, as well as learning_sync:
bridge link set dev DEV learning on self
bridge link set dev DEV learning_sync on self
-Learning_sync attribute enables syncing of the learned/forgotton FDB entry to
+Learning_sync attribute enables syncing of the learned/forgotten FDB entry to
the bridge's FDB. It's possible, but not optimal, to enable learning on the
device port and on the bridge port, and disable learning_sync.
@@ -245,7 +245,7 @@ the responsibility of the port driver/device to age out these entries. If the
port device supports ageing, when the FDB entry expires, it will notify the
driver which in turn will notify the bridge with SWITCHDEV_FDB_DEL. If the
device does not support ageing, the driver can simulate ageing using a
-garbage collection timer to monitor FBD entries. Expired entries will be
+garbage collection timer to monitor FDB entries. Expired entries will be
notified to the bridge using SWITCHDEV_FDB_DEL. See rocker driver for
example of driver running ageing timer.
diff --git a/Documentation/nvmem/nvmem.txt b/Documentation/nvmem/nvmem.txt
index dbd40d879239..8d8d8f58f96f 100644
--- a/Documentation/nvmem/nvmem.txt
+++ b/Documentation/nvmem/nvmem.txt
@@ -112,7 +112,7 @@ take nvmem_device as parameter.
5. Releasing a reference to the NVMEM
=====================================
-When a consumers no longer needs the NVMEM, it has to release the reference
+When a consumer no longer needs the NVMEM, it has to release the reference
to the NVMEM it has obtained using the APIs mentioned in the above section.
The NVMEM framework provides 2 APIs to release a reference to the NVMEM.
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index 0fde3dcf077a..625549d4c74a 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -435,7 +435,8 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
PM status to 'suspended' and update its parent's counter of 'active'
children as appropriate (it is only valid to use this function if
'power.runtime_error' is set or 'power.disable_depth' is greater than
- zero)
+ zero); it will fail and return an error code if the device has a child
+ which is active and the 'power.ignore_children' flag is unset
bool pm_runtime_active(struct device *dev);
- return true if the device's runtime PM status is 'active' or its
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt
deleted file mode 100644
index bc4548245a24..000000000000
--- a/Documentation/power/states.txt
+++ /dev/null
@@ -1,125 +0,0 @@
-System Power Management Sleep States
-
-(C) 2014 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-
-The kernel supports up to four system sleep states generically, although three
-of them depend on the platform support code to implement the low-level details
-for each state.
-
-The states are represented by strings that can be read or written to the
-/sys/power/state file. Those strings may be "mem", "standby", "freeze" and
-"disk", where the last three always represent Power-On Suspend (if supported),
-Suspend-To-Idle and hibernation (Suspend-To-Disk), respectively.
-
-The meaning of the "mem" string is controlled by the /sys/power/mem_sleep file.
-It contains strings representing the available modes of system suspend that may
-be triggered by writing "mem" to /sys/power/state. These modes are "s2idle"
-(Suspend-To-Idle), "shallow" (Power-On Suspend) and "deep" (Suspend-To-RAM).
-The "s2idle" mode is always available, while the other ones are only available
-if supported by the platform (if not supported, the strings representing them
-are not present in /sys/power/mem_sleep). The string representing the suspend
-mode to be used subsequently is enclosed in square brackets. Writing one of
-the other strings present in /sys/power/mem_sleep to it causes the suspend mode
-to be used subsequently to change to the one represented by that string.
-
-Consequently, there are two ways to cause the system to go into the
-Suspend-To-Idle sleep state. The first one is to write "freeze" directly to
-/sys/power/state. The second one is to write "s2idle" to /sys/power/mem_sleep
-and then to write "mem" to /sys/power/state. Similarly, there are two ways
-to cause the system to go into the Power-On Suspend sleep state (the strings to
-write to the control files in that case are "standby" or "shallow" and "mem",
-respectively) if that state is supported by the platform. In turn, there is
-only one way to cause the system to go into the Suspend-To-RAM state (write
-"deep" into /sys/power/mem_sleep and "mem" into /sys/power/state).
-
-The default suspend mode (ie. the one to be used without writing anything into
-/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
-"s2idle", but it can be overridden by the value of the "mem_sleep_default"
-parameter in the kernel command line.
-
-The properties of all of the sleep states are described below.
-
-
-State: Suspend-To-Idle
-ACPI state: S0
-Label: "s2idle" ("freeze")
-
-This state is a generic, pure software, light-weight, system sleep state.
-It allows more energy to be saved relative to runtime idle by freezing user
-space and putting all I/O devices into low-power states (possibly
-lower-power than available at run time), such that the processors can
-spend more time in their idle states.
-
-This state can be used for platforms without Power-On Suspend/Suspend-to-RAM
-support, or it can be used in addition to Suspend-to-RAM to provide reduced
-resume latency. It is always supported.
-
-
-State: Standby / Power-On Suspend
-ACPI State: S1
-Label: "shallow" ("standby")
-
-This state, if supported, offers moderate, though real, power savings, while
-providing a relatively low-latency transition back to a working system. No
-operating state is lost (the CPU retains power), so the system easily starts up
-again where it left off.
-
-In addition to freezing user space and putting all I/O devices into low-power
-states, which is done for Suspend-To-Idle too, nonboot CPUs are taken offline
-and all low-level system functions are suspended during transitions into this
-state. For this reason, it should allow more energy to be saved relative to
-Suspend-To-Idle, but the resume latency will generally be greater than for that
-state.
-
-
-State: Suspend-to-RAM
-ACPI State: S3
-Label: "deep"
-
-This state, if supported, offers significant power savings as everything in the
-system is put into a low-power state, except for memory, which should be placed
-into the self-refresh mode to retain its contents. All of the steps carried out
-when entering Power-On Suspend are also carried out during transitions to STR.
-Additional operations may take place depending on the platform capabilities. In
-particular, on ACPI systems the kernel passes control to the BIOS (platform
-firmware) as the last step during STR transitions and that usually results in
-powering down some more low-level components that aren't directly controlled by
-the kernel.
-
-System and device state is saved and kept in memory. All devices are suspended
-and put into low-power states. In many cases, all peripheral buses lose power
-when entering STR, so devices must be able to handle the transition back to the
-"on" state.
-
-For at least ACPI, STR requires some minimal boot-strapping code to resume the
-system from it. This may be the case on other platforms too.
-
-
-State: Suspend-to-disk
-ACPI State: S4
-Label: "disk"
-
-This state offers the greatest power savings, and can be used even in
-the absence of low-level platform support for power management. This
-state operates similarly to Suspend-to-RAM, but includes a final step
-of writing memory contents to disk. On resume, this is read and memory
-is restored to its pre-suspend state.
-
-STD can be handled by the firmware or the kernel. If it is handled by
-the firmware, it usually requires a dedicated partition that must be
-setup via another operating system for it to use. Despite the
-inconvenience, this method requires minimal work by the kernel, since
-the firmware will also handle restoring memory contents on resume.
-
-For suspend-to-disk, a mechanism called 'swsusp' (Swap Suspend) is used
-to write memory contents to free swap space. swsusp has some restrictive
-requirements, but should work in most cases. Some, albeit outdated,
-documentation can be found in Documentation/power/swsusp.txt.
-Alternatively, userspace can do most of the actual suspend to disk work,
-see userland-swsusp.txt.
-
-Once memory state is written to disk, the system may either enter a
-low-power state (like ACPI S4), or it may simply power down. Powering
-down offers greater savings, and allows this mechanism to work on any
-system. However, entering a real low-power state allows the user to
-trigger wake up events (e.g. pressing a key or opening a laptop lid).
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index 65ea5915178b..361789df51ec 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -58,20 +58,33 @@ Symbols/Function Pointers
%ps versatile_init
%pB prev_fn_of_versatile_init+0x88/0x88
-For printing symbols and function pointers. The ``S`` and ``s`` specifiers
-result in the symbol name with (``S``) or without (``s``) offsets. Where
-this is used on a kernel without KALLSYMS - the symbol address is
-printed instead.
+The ``F`` and ``f`` specifiers are for printing function pointers,
+for example, f->func, &gettimeofday. They have the same result as
+``S`` and ``s`` specifiers. But they do an extra conversion on
+ia64, ppc64 and parisc64 architectures where the function pointers
+are actually function descriptors.
+
+The ``S`` and ``s`` specifiers can be used for printing symbols
+from direct addresses, for example, __builtin_return_address(0),
+(void *)regs->ip. They result in the symbol name with (``S``) or
+without (``s``) offsets. If KALLSYMS are disabled then the symbol
+address is printed instead.
The ``B`` specifier results in the symbol name with offsets and should be
used when printing stack backtraces. The specifier takes into
consideration the effect of compiler optimisations which may occur
when tail-call``s are used and marked with the noreturn GCC attribute.
-On ia64, ppc64 and parisc64 architectures function pointers are
-actually function descriptors which must first be resolved. The ``F`` and
-``f`` specifiers perform this resolution and then provide the same
-functionality as the ``S`` and ``s`` specifiers.
+Examples::
+
+ printk("Going to call: %pF\n", gettimeofday);
+ printk("Going to call: %pF\n", p->func);
+ printk("%s: called from %pS\n", __func__, (void *)_RET_IP_);
+ printk("%s: called from %pS\n", __func__,
+ (void *)__builtin_return_address(0));
+ printk("Faulted at %pS\n", (void *)regs->ip);
+ printk(" %s%pB\n", (reliable ? "" : "? "), (void *)*stack);
+
Kernel Pointers
===============
diff --git a/Documentation/process/applying-patches.rst b/Documentation/process/applying-patches.rst
index a0d058cc6d25..dc2ddc345044 100644
--- a/Documentation/process/applying-patches.rst
+++ b/Documentation/process/applying-patches.rst
@@ -6,9 +6,6 @@ Applying Patches To The Linux Kernel
Original by:
Jesper Juhl, August 2005
-Last update:
- 2016-09-14
-
.. note::
This document is obsolete. In most cases, rather than using ``patch``
@@ -344,7 +341,7 @@ possible.
This is a good branch to run for people who want to help out testing
development kernels but do not want to run some of the really experimental
-stuff (such people should see the sections about -git and -mm kernels below).
+stuff (such people should see the sections about -next and -mm kernels below).
The -rc patches are not incremental, they apply to a base 4.x kernel, just
like the 4.x.y patches described above. The kernel version before the -rcN
@@ -380,44 +377,6 @@ Here are 3 examples of how to apply these patches::
$ mv linux-4.7.3 linux-4.8-rc5 # rename the kernel source dir
-The -git kernels
-================
-
-These are daily snapshots of Linus' kernel tree (managed in a git
-repository, hence the name).
-
-These patches are usually released daily and represent the current state of
-Linus's tree. They are more experimental than -rc kernels since they are
-generated automatically without even a cursory glance to see if they are
-sane.
-
--git patches are not incremental and apply either to a base 4.x kernel or
-a base 4.x-rc kernel -- you can see which from their name.
-A patch named 4.7-git1 applies to the 4.7 kernel source and a patch
-named 4.8-rc3-git2 applies to the source of the 4.8-rc3 kernel.
-
-Here are some examples of how to apply these patches::
-
- # moving from 4.7 to 4.7-git1
-
- $ cd ~/linux-4.7 # change to the kernel source dir
- $ patch -p1 < ../patch-4.7-git1 # apply the 4.7-git1 patch
- $ cd ..
- $ mv linux-4.7 linux-4.7-git1 # rename the kernel source dir
-
- # moving from 4.7-git1 to 4.8-rc2-git3
-
- $ cd ~/linux-4.7-git1 # change to the kernel source dir
- $ patch -p1 -R < ../patch-4.7-git1 # revert the 4.7-git1 patch
- # we now have a 4.7 kernel
- $ patch -p1 < ../patch-4.8-rc2 # apply the 4.8-rc2 patch
- # the kernel is now 4.8-rc2
- $ patch -p1 < ../patch-4.8-rc2-git3 # apply the 4.8-rc2-git3 patch
- # the kernel is now 4.8-rc2-git3
- $ cd ..
- $ mv linux-4.7-git1 linux-4.8-rc2-git3 # rename source dir
-
-
The -mm patches and the linux-next tree
=======================================
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index adbb50ae5246..560beaef5a7c 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -53,7 +53,7 @@ mcelog 0.6 mcelog --version
iptables 1.4.2 iptables -V
openssl & libcrypto 1.0.0 openssl version
bc 1.06.95 bc --version
-Sphinx\ [#f1]_ 1.2 sphinx-build --version
+Sphinx\ [#f1]_ 1.3 sphinx-build --version
====================== =============== ========================================
.. [#f1] Sphinx is needed only to build the Kernel documentation
@@ -309,18 +309,8 @@ Kernel documentation
Sphinx
------
-The ReST markups currently used by the Documentation/ files are meant to be
-built with ``Sphinx`` version 1.2 or upper. If you're desiring to build
-PDF outputs, it is recommended to use version 1.4.6.
-
-.. note::
-
- Please notice that, for PDF and LaTeX output, you'll also need ``XeLaTeX``
- version 3.14159265. Depending on the distribution, you may also need to
- install a series of ``texlive`` packages that provide the minimal set of
- functionalities required for ``XeLaTex`` to work. For PDF output you'll also
- need ``convert(1)`` from ImageMagick (https://www.imagemagick.org).
-
+Please see :ref:`sphinx_install` in ``Documentation/doc-guide/sphinx.rst``
+for details about Sphinx requirements.
Getting updated software
========================
diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
index 61e9c78bd6d1..36a2dded525b 100644
--- a/Documentation/process/stable-kernel-rules.rst
+++ b/Documentation/process/stable-kernel-rules.rst
@@ -166,12 +166,12 @@ Trees
- The queues of patches, for both completed versions and in progress
versions can be found at:
- http://git.kernel.org/?p=linux/kernel/git/stable/stable-queue.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
- The finalized and tagged releases of all stable kernels can be found
in separate branches per version at:
- http://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git
Review committee
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index 3e10719fee35..733478ade91b 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -413,7 +413,7 @@ e-mail discussions.
-11) Sign your work — the Developer's Certificate of Origin
+11) Sign your work - the Developer's Certificate of Origin
----------------------------------------------------------
To improve tracking of who did what, especially with patches that can
diff --git a/Documentation/security/keys/core.rst b/Documentation/security/keys/core.rst
index 1648fa80b3bf..1266eeae45f6 100644
--- a/Documentation/security/keys/core.rst
+++ b/Documentation/security/keys/core.rst
@@ -16,17 +16,7 @@ The key service can be configured on by enabling:
This document has the following sections:
- - Key overview
- - Key service overview
- - Key access permissions
- - SELinux support
- - New procfs files
- - Userspace system call interface
- - Kernel services
- - Notes on accessing payload contents
- - Defining a key type
- - Request-key callback service
- - Garbage collection
+.. contents:: :local:
Key Overview
@@ -443,7 +433,7 @@ The main syscalls are:
/sbin/request-key will be invoked in an attempt to obtain a key. The
callout_info string will be passed as an argument to the program.
- See also Documentation/security/keys-request-key.txt.
+ See also Documentation/security/keys/request-key.rst.
The keyctl syscall functions are:
@@ -973,7 +963,7 @@ payload contents" for more information.
If successful, the key will have been attached to the default keyring for
implicitly obtained request-key keys, as set by KEYCTL_SET_REQKEY_KEYRING.
- See also Documentation/security/keys-request-key.txt.
+ See also Documentation/security/keys/request-key.rst.
* To search for a key, passing auxiliary data to the upcaller, call::
diff --git a/Documentation/security/keys/request-key.rst b/Documentation/security/keys/request-key.rst
index aba32784174c..b2d16abaa9e9 100644
--- a/Documentation/security/keys/request-key.rst
+++ b/Documentation/security/keys/request-key.rst
@@ -3,7 +3,7 @@ Key Request Service
===================
The key request service is part of the key retention service (refer to
-Documentation/security/keys.txt). This document explains more fully how
+Documentation/security/core.rst). This document explains more fully how
the requesting algorithm works.
The process starts by either the kernel requesting a service by calling
diff --git a/Documentation/security/keys/trusted-encrypted.rst b/Documentation/security/keys/trusted-encrypted.rst
index 7b503831bdea..3bb24e09a332 100644
--- a/Documentation/security/keys/trusted-encrypted.rst
+++ b/Documentation/security/keys/trusted-encrypted.rst
@@ -172,4 +172,4 @@ Other uses for trusted and encrypted keys, such as for disk and file encryption
are anticipated. In particular the new format 'ecryptfs' has been defined in
in order to use encrypted keys to mount an eCryptfs filesystem. More details
about the usage can be found in the file
-``Documentation/security/keys-ecryptfs.txt``.
+``Documentation/security/keys/ecryptfs.rst``.
diff --git a/Documentation/sphinx-static/theme_overrides.css b/Documentation/sphinx-static/theme_overrides.css
index d5764a4de5a2..522b6d4c49d4 100644
--- a/Documentation/sphinx-static/theme_overrides.css
+++ b/Documentation/sphinx-static/theme_overrides.css
@@ -4,6 +4,17 @@
*
*/
+/* Interim: Code-blocks with line nos - lines and line numbers don't line up.
+ * see: https://github.com/rtfd/sphinx_rtd_theme/issues/419
+ */
+
+div[class^="highlight"] pre {
+ line-height: normal;
+}
+.rst-content .highlight > pre {
+ line-height: normal;
+}
+
@media screen {
/* content column
@@ -56,6 +67,12 @@
font-family: "Courier New", Courier, monospace
}
+ /* fix bottom margin of lists items */
+
+ .rst-content .section ul li:last-child, .rst-content .section ul li p:last-child {
+ margin-bottom: 12px;
+ }
+
/* inline literal: drop the borderbox, padding and red color */
code, .rst-content tt, .rst-content code {
diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
index d15e07f36881..39aa9e8697cc 100644
--- a/Documentation/sphinx/kerneldoc.py
+++ b/Documentation/sphinx/kerneldoc.py
@@ -27,6 +27,7 @@
# Please make sure this works on both python2 and python3.
#
+import codecs
import os
import subprocess
import sys
@@ -88,13 +89,10 @@ class KernelDocDirective(Directive):
try:
env.app.verbose('calling kernel-doc \'%s\'' % (" ".join(cmd)))
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
- # python2 needs conversion to unicode.
- # python3 with universal_newlines=True returns strings.
- if sys.version_info.major < 3:
- out, err = unicode(out, 'utf-8'), unicode(err, 'utf-8')
+ out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
if p.returncode != 0:
sys.stderr.write(err)
diff --git a/Documentation/sphinx/requirements.txt b/Documentation/sphinx/requirements.txt
new file mode 100644
index 000000000000..742be3e12619
--- /dev/null
+++ b/Documentation/sphinx/requirements.txt
@@ -0,0 +1,3 @@
+docutils==0.12
+Sphinx==1.4.9
+sphinx_rtd_theme
diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt
index b83dfa1c0602..ab16efe0c79d 100644
--- a/Documentation/static-keys.txt
+++ b/Documentation/static-keys.txt
@@ -149,6 +149,26 @@ static_branch_inc(), will change the branch back to true. Likewise, if the
key is initialized false, a 'static_branch_inc()', will change the branch to
true. And then a 'static_branch_dec()', will again make the branch false.
+The state and the reference count can be retrieved with 'static_key_enabled()'
+and 'static_key_count()'. In general, if you use these functions, they
+should be protected with the same mutex used around the enable/disable
+or increment/decrement function.
+
+Note that switching branches results in some locks being taken,
+particularly the CPU hotplug lock (in order to avoid races against
+CPUs being brought in the kernel whilst the kernel is getting
+patched). Calling the static key API from within a hotplug notifier is
+thus a sure deadlock recipe. In order to still allow use of the
+functionnality, the following functions are provided:
+
+ static_key_enable_cpuslocked()
+ static_key_disable_cpuslocked()
+ static_branch_enable_cpuslocked()
+ static_branch_disable_cpuslocked()
+
+These functions are *not* general purpose, and must only be used when
+you really know that you're in the above context, and no other.
+
Where an array of keys is required, it can be defined as::
DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index bac23c198360..ce61d1fe08ca 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -61,6 +61,7 @@ show up in /proc/sys/kernel:
- perf_cpu_time_max_percent
- perf_event_paranoid
- perf_event_max_stack
+- perf_event_mlock_kb
- perf_event_max_contexts_per_stack
- pid_max
- powersave-nap [ PPC only ]
@@ -654,7 +655,9 @@ Controls use of the performance events system by unprivileged
users (without CAP_SYS_ADMIN). The default value is 2.
-1: Allow use of (almost) all events by all users
->=0: Disallow raw tracepoint access by users without CAP_IOC_LOCK
+ Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK
+>=0: Disallow ftrace function tracepoint by users without CAP_SYS_ADMIN
+ Disallow raw tracepoint access by users without CAP_SYS_ADMIN
>=1: Disallow CPU event access by users without CAP_SYS_ADMIN
>=2: Disallow kernel profiling by users without CAP_SYS_ADMIN
@@ -673,6 +676,14 @@ The default value is 127.
==============================================================
+perf_event_mlock_kb:
+
+Control size of per-cpu ring buffer not counted agains mlock limit.
+
+The default value is 512 + 1 page
+
+==============================================================
+
perf_event_max_contexts_per_stack:
Controls maximum number of stack frame context entries for
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 14db18c970b1..b67044a2575f 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -35,9 +35,34 @@ Table : Subdirectories in /proc/sys/net
bpf_jit_enable
--------------
-This enables Berkeley Packet Filter Just in Time compiler.
-Currently supported on x86_64 architecture, bpf_jit provides a framework
-to speed packet filtering, the one used by tcpdump/libpcap for example.
+This enables the BPF Just in Time (JIT) compiler. BPF is a flexible
+and efficient infrastructure allowing to execute bytecode at various
+hook points. It is used in a number of Linux kernel subsystems such
+as networking (e.g. XDP, tc), tracing (e.g. kprobes, uprobes, tracepoints)
+and security (e.g. seccomp). LLVM has a BPF back end that can compile
+restricted C into a sequence of BPF instructions. After program load
+through bpf(2) and passing a verifier in the kernel, a JIT will then
+translate these BPF proglets into native CPU instructions. There are
+two flavors of JITs, the newer eBPF JIT currently supported on:
+ - x86_64
+ - arm64
+ - arm32
+ - ppc64
+ - sparc64
+ - mips64
+ - s390x
+
+And the older cBPF JIT supported on the following archs:
+ - mips
+ - ppc
+ - sparc
+
+eBPF JITs are a superset of cBPF JITs, meaning the kernel will
+migrate cBPF instructions into eBPF instructions and then JIT
+compile them transparently. Older cBPF JITs can only translate
+tcpdump filters, seccomp rules, etc, but not mentioned eBPF
+programs loaded through bpf(2).
+
Values :
0 - disable the JIT (default value)
1 - enable the JIT
@@ -46,9 +71,9 @@ Values :
bpf_jit_harden
--------------
-This enables hardening for the Berkeley Packet Filter Just in Time compiler.
-Supported are eBPF JIT backends. Enabling hardening trades off performance,
-but can mitigate JIT spraying.
+This enables hardening for the BPF JIT compiler. Supported are eBPF
+JIT backends. Enabling hardening trades off performance, but can
+mitigate JIT spraying.
Values :
0 - disable JIT hardening (default value)
1 - enable JIT hardening for unprivileged users only
@@ -57,11 +82,11 @@ Values :
bpf_jit_kallsyms
----------------
-When Berkeley Packet Filter Just in Time compiler is enabled, then compiled
-images are unknown addresses to the kernel, meaning they neither show up in
-traces nor in /proc/kallsyms. This enables export of these addresses, which
-can be used for debugging/tracing. If bpf_jit_harden is enabled, this feature
-is disabled.
+When BPF JIT compiler is enabled, then compiled images are unknown
+addresses to the kernel, meaning they neither show up in traces nor
+in /proc/kallsyms. This enables export of these addresses, which can
+be used for debugging/tracing. If bpf_jit_harden is enabled, this
+feature is disabled.
Values :
0 - disable JIT kallsyms export (default value)
1 - enable JIT kallsyms export for privileged users only
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 48244c42ff52..9baf66a9ef4e 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -572,7 +572,9 @@ See Documentation/nommu-mmap.txt for more information.
numa_zonelist_order
-This sysctl is only for NUMA.
+This sysctl is only for NUMA and it is deprecated. Anything but
+Node order will fail!
+
'where the memory is allocated from' is controlled by zonelists.
(This documentation ignores ZONE_HIGHMEM/ZONE_DMA32 for simple explanation.
you may be able to read ZONE_DMA as ZONE_DMA32...)
diff --git a/Documentation/trace/stm.txt b/Documentation/trace/stm.txt
index 11cff47eecce..03765750104b 100644
--- a/Documentation/trace/stm.txt
+++ b/Documentation/trace/stm.txt
@@ -83,7 +83,7 @@ by writing the name of the desired stm device there, for example:
$ echo dummy_stm.0 > /sys/class/stm_source/console/stm_source_link
For examples on how to use stm_source interface in the kernel, refer
-to stm_console or stm_heartbeat drivers.
+to stm_console, stm_heartbeat or stm_ftrace drivers.
Each stm_source device will need to assume a master and a range of
channels, depending on how many channels it requires. These are
@@ -107,5 +107,16 @@ console in the STP stream, create a "console" policy entry (see the
beginning of this text on how to do that). When initialized, it will
consume one channel.
+stm_ftrace
+==========
+
+This is another "stm_source" device, once the stm_ftrace has been
+linked with an stm device, and if "function" tracer is enabled,
+function address and parent function address which Ftrace subsystem
+would store into ring buffer will be exported via the stm device at
+the same time.
+
+Currently only Ftrace "function" tracer is supported.
+
[1] https://software.intel.com/sites/default/files/managed/d3/3c/intel-th-developer-manual.pdf
[2] http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0444b/index.html
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index 38310dcd6620..bc80fc0e210f 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -1956,10 +1956,7 @@ MMIO 쓰기 배리어
뒤에 완료됩니다.
ACQUIRE 앞에서 요청된 메모리 오퍼레이션은 ACQUIRE 오퍼레이션이 완료된 후에
- 완료될 수 있습니다. smp_mb__before_spinlock() 뒤에 ACQUIRE 가 실행되는
- 코드 블록은 블록 앞의 스토어를 블록 뒤의 로드와 스토어에 대해 순서
- 맞춥니다. 이건 smp_mb() 보다 완화된 것임을 기억하세요! 많은 아키텍쳐에서
- smp_mb__before_spinlock() 은 사실 아무일도 하지 않습니다.
+ 완료될 수 있습니다.
(2) RELEASE 오퍼레이션의 영향:
diff --git a/Documentation/translations/zh_CN/HOWTO b/Documentation/translations/zh_CN/HOWTO
index 11be075ba5fa..5f6d09edc9ac 100644
--- a/Documentation/translations/zh_CN/HOWTO
+++ b/Documentation/translations/zh_CN/HOWTO
@@ -149,9 +149,7 @@ Linux内核代码中包含有大量的文档。这些文档对于学习如何与
核源码的主目录中使用以下不同命令将会分别生成PDF、Postscript、HTML和手册
页等不同格式的文档:
make pdfdocs
- make psdocs
make htmldocs
- make mandocs
如何成为内核开发者
diff --git a/Documentation/vm/numa b/Documentation/vm/numa
index a08f71647714..a31b85b9bb88 100644
--- a/Documentation/vm/numa
+++ b/Documentation/vm/numa
@@ -79,11 +79,8 @@ memory, Linux must decide whether to order the zonelists such that allocations
fall back to the same zone type on a different node, or to a different zone
type on the same node. This is an important consideration because some zones,
such as DMA or DMA32, represent relatively scarce resources. Linux chooses
-a default zonelist order based on the sizes of the various zone types relative
-to the total memory of the node and the total memory of the system. The
-default zonelist order may be overridden using the numa_zonelist_order kernel
-boot parameter or sysctl. [see Documentation/admin-guide/kernel-parameters.rst and
-Documentation/sysctl/vm.txt]
+a default Node ordered zonelist. This means it tries to fallback to other zones
+from the same node before using remote nodes which are ordered by NUMA distance.
By default, Linux will attempt to satisfy memory allocation requests from the
node to which the CPU that executes the request is assigned. Specifically,
diff --git a/Documentation/vm/swap_numa.txt b/Documentation/vm/swap_numa.txt
new file mode 100644
index 000000000000..d5960c9124f5
--- /dev/null
+++ b/Documentation/vm/swap_numa.txt
@@ -0,0 +1,69 @@
+Automatically bind swap device to numa node
+-------------------------------------------
+
+If the system has more than one swap device and swap device has the node
+information, we can make use of this information to decide which swap
+device to use in get_swap_pages() to get better performance.
+
+
+How to use this feature
+-----------------------
+
+Swap device has priority and that decides the order of it to be used. To make
+use of automatically binding, there is no need to manipulate priority settings
+for swap devices. e.g. on a 2 node machine, assume 2 swap devices swapA and
+swapB, with swapA attached to node 0 and swapB attached to node 1, are going
+to be swapped on. Simply swapping them on by doing:
+# swapon /dev/swapA
+# swapon /dev/swapB
+
+Then node 0 will use the two swap devices in the order of swapA then swapB and
+node 1 will use the two swap devices in the order of swapB then swapA. Note
+that the order of them being swapped on doesn't matter.
+
+A more complex example on a 4 node machine. Assume 6 swap devices are going to
+be swapped on: swapA and swapB are attached to node 0, swapC is attached to
+node 1, swapD and swapE are attached to node 2 and swapF is attached to node3.
+The way to swap them on is the same as above:
+# swapon /dev/swapA
+# swapon /dev/swapB
+# swapon /dev/swapC
+# swapon /dev/swapD
+# swapon /dev/swapE
+# swapon /dev/swapF
+
+Then node 0 will use them in the order of:
+swapA/swapB -> swapC -> swapD -> swapE -> swapF
+swapA and swapB will be used in a round robin mode before any other swap device.
+
+node 1 will use them in the order of:
+swapC -> swapA -> swapB -> swapD -> swapE -> swapF
+
+node 2 will use them in the order of:
+swapD/swapE -> swapA -> swapB -> swapC -> swapF
+Similaly, swapD and swapE will be used in a round robin mode before any
+other swap devices.
+
+node 3 will use them in the order of:
+swapF -> swapA -> swapB -> swapC -> swapD -> swapE
+
+
+Implementation details
+----------------------
+
+The current code uses a priority based list, swap_avail_list, to decide
+which swap device to use and if multiple swap devices share the same
+priority, they are used round robin. This change here replaces the single
+global swap_avail_list with a per-numa-node list, i.e. for each numa node,
+it sees its own priority based list of available swap devices. Swap
+device's priority can be promoted on its matching node's swap_avail_list.
+
+The current swap device's priority is set as: user can set a >=0 value,
+or the system will pick one starting from -1 then downwards. The priority
+value in the swap_avail_list is the negated value of the swap device's
+due to plist being sorted from low to high. The new policy doesn't change
+the semantics for priority >=0 cases, the previous starting from -1 then
+downwards now becomes starting from -2 then downwards and -1 is reserved
+as the promoted value. So if multiple swap devices are attached to the same
+node, they will all be promoted to priority -1 on that node's plist and will
+be used round robin before any other swap devices.
diff --git a/Documentation/x86/amd-memory-encryption.txt b/Documentation/x86/amd-memory-encryption.txt
new file mode 100644
index 000000000000..f512ab718541
--- /dev/null
+++ b/Documentation/x86/amd-memory-encryption.txt
@@ -0,0 +1,68 @@
+Secure Memory Encryption (SME) is a feature found on AMD processors.
+
+SME provides the ability to mark individual pages of memory as encrypted using
+the standard x86 page tables. A page that is marked encrypted will be
+automatically decrypted when read from DRAM and encrypted when written to
+DRAM. SME can therefore be used to protect the contents of DRAM from physical
+attacks on the system.
+
+A page is encrypted when a page table entry has the encryption bit set (see
+below on how to determine its position). The encryption bit can also be
+specified in the cr3 register, allowing the PGD table to be encrypted. Each
+successive level of page tables can also be encrypted by setting the encryption
+bit in the page table entry that points to the next table. This allows the full
+page table hierarchy to be encrypted. Note, this means that just because the
+encryption bit is set in cr3, doesn't imply the full hierarchy is encyrpted.
+Each page table entry in the hierarchy needs to have the encryption bit set to
+achieve that. So, theoretically, you could have the encryption bit set in cr3
+so that the PGD is encrypted, but not set the encryption bit in the PGD entry
+for a PUD which results in the PUD pointed to by that entry to not be
+encrypted.
+
+Support for SME can be determined through the CPUID instruction. The CPUID
+function 0x8000001f reports information related to SME:
+
+ 0x8000001f[eax]:
+ Bit[0] indicates support for SME
+ 0x8000001f[ebx]:
+ Bits[5:0] pagetable bit number used to activate memory
+ encryption
+ Bits[11:6] reduction in physical address space, in bits, when
+ memory encryption is enabled (this only affects
+ system physical addresses, not guest physical
+ addresses)
+
+If support for SME is present, MSR 0xc00100010 (MSR_K8_SYSCFG) can be used to
+determine if SME is enabled and/or to enable memory encryption:
+
+ 0xc0010010:
+ Bit[23] 0 = memory encryption features are disabled
+ 1 = memory encryption features are enabled
+
+Linux relies on BIOS to set this bit if BIOS has determined that the reduction
+in the physical address space as a result of enabling memory encryption (see
+CPUID information above) will not conflict with the address space resource
+requirements for the system. If this bit is not set upon Linux startup then
+Linux itself will not set it and memory encryption will not be possible.
+
+The state of SME in the Linux kernel can be documented as follows:
+ - Supported:
+ The CPU supports SME (determined through CPUID instruction).
+
+ - Enabled:
+ Supported and bit 23 of MSR_K8_SYSCFG is set.
+
+ - Active:
+ Supported, Enabled and the Linux kernel is actively applying
+ the encryption bit to page table entries (the SME mask in the
+ kernel is non-zero).
+
+SME can also be enabled and activated in the BIOS. If SME is enabled and
+activated in the BIOS, then all memory accesses will be encrypted and it will
+not be necessary to activate the Linux memory encryption support. If the BIOS
+merely enables SME (sets bit 23 of the MSR_K8_SYSCFG), then Linux can activate
+memory encryption by default (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y) or
+by supplying mem_encrypt=on on the kernel command line. However, if BIOS does
+not enable SME, then Linux will not be able to activate memory encryption, even
+if configured to do so by default or the mem_encrypt=on command line parameter
+is specified.
diff --git a/Documentation/x86/early-microcode.txt b/Documentation/x86/early-microcode.txt
deleted file mode 100644
index 07749e7f3d50..000000000000
--- a/Documentation/x86/early-microcode.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-Early load microcode
-====================
-By Fenghua Yu <fenghua.yu@intel.com>
-
-Kernel can update microcode in early phase of boot time. Loading microcode early
-can fix CPU issues before they are observed during kernel boot time.
-
-Microcode is stored in an initrd file. The microcode is read from the initrd
-file and loaded to CPUs during boot time.
-
-The format of the combined initrd image is microcode in cpio format followed by
-the initrd image (maybe compressed). Kernel parses the combined initrd image
-during boot time. The microcode file in cpio name space is:
-on Intel: kernel/x86/microcode/GenuineIntel.bin
-on AMD : kernel/x86/microcode/AuthenticAMD.bin
-
-During BSP boot (before SMP starts), if the kernel finds the microcode file in
-the initrd file, it parses the microcode and saves matching microcode in memory.
-If matching microcode is found, it will be uploaded in BSP and later on in all
-APs.
-
-The cached microcode patch is applied when CPUs resume from a sleep state.
-
-There are two legacy user space interfaces to load microcode, either through
-/dev/cpu/microcode or through /sys/devices/system/cpu/microcode/reload file
-in sysfs.
-
-In addition to these two legacy methods, the early loading method described
-here is the third method with which microcode can be uploaded to a system's
-CPUs.
-
-The following example script shows how to generate a new combined initrd file in
-/boot/initrd-3.5.0.ucode.img with original microcode microcode.bin and
-original initrd image /boot/initrd-3.5.0.img.
-
-mkdir initrd
-cd initrd
-mkdir -p kernel/x86/microcode
-cp ../microcode.bin kernel/x86/microcode/GenuineIntel.bin (or AuthenticAMD.bin)
-find . | cpio -o -H newc >../ucode.cpio
-cd ..
-cat ucode.cpio /boot/initrd-3.5.0.img >/boot/initrd-3.5.0.ucode.img
-
-Builtin microcode
-=================
-
-We can also load builtin microcode supplied through the regular firmware
-builtin method CONFIG_FIRMWARE_IN_KERNEL. Only 64-bit is currently
-supported.
-
-Here's an example:
-
-CONFIG_FIRMWARE_IN_KERNEL=y
-CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 amd-ucode/microcode_amd_fam15h.bin"
-CONFIG_EXTRA_FIRMWARE_DIR="/lib/firmware"
-
-This basically means, you have the following tree structure locally:
-
-/lib/firmware/
-|-- amd-ucode
-...
-| |-- microcode_amd_fam15h.bin
-...
-|-- intel-ucode
-...
-| |-- 06-3a-09
-...
-
-so that the build system can find those files and integrate them into
-the final kernel image. The early loader finds them and applies them.
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt
index c491a1b82de2..4d8848e4e224 100644
--- a/Documentation/x86/intel_rdt_ui.txt
+++ b/Documentation/x86/intel_rdt_ui.txt
@@ -6,8 +6,8 @@ Fenghua Yu <fenghua.yu@intel.com>
Tony Luck <tony.luck@intel.com>
Vikas Shivappa <vikas.shivappa@intel.com>
-This feature is enabled by the CONFIG_INTEL_RDT_A Kconfig and the
-X86 /proc/cpuinfo flag bits "rdt", "cat_l3" and "cdp_l3".
+This feature is enabled by the CONFIG_INTEL_RDT Kconfig and the
+X86 /proc/cpuinfo flag bits "rdt", "cqm", "cat_l3" and "cdp_l3".
To use the feature mount the file system:
@@ -17,6 +17,13 @@ mount options are:
"cdp": Enable code/data prioritization in L3 cache allocations.
+RDT features are orthogonal. A particular system may support only
+monitoring, only control, or both monitoring and control.
+
+The mount succeeds if either of allocation or monitoring is present, but
+only those files and directories supported by the system will be created.
+For more details on the behavior of the interface during monitoring
+and allocation, see the "Resource alloc and monitor groups" section.
Info directory
--------------
@@ -24,7 +31,12 @@ Info directory
The 'info' directory contains information about the enabled
resources. Each resource has its own subdirectory. The subdirectory
names reflect the resource names.
-Cache resource(L3/L2) subdirectory contains the following files:
+
+Each subdirectory contains the following files with respect to
+allocation:
+
+Cache resource(L3/L2) subdirectory contains the following files
+related to allocation:
"num_closids": The number of CLOSIDs which are valid for this
resource. The kernel uses the smallest number of
@@ -36,7 +48,15 @@ Cache resource(L3/L2) subdirectory contains the following files:
"min_cbm_bits": The minimum number of consecutive bits which
must be set when writing a mask.
-Memory bandwitdh(MB) subdirectory contains the following files:
+"shareable_bits": Bitmask of shareable resource with other executing
+ entities (e.g. I/O). User can use this when
+ setting up exclusive cache partitions. Note that
+ some platforms support devices that have their
+ own settings for cache use which can over-ride
+ these bits.
+
+Memory bandwitdh(MB) subdirectory contains the following files
+with respect to allocation:
"min_bandwidth": The minimum memory bandwidth percentage which
user can request.
@@ -52,48 +72,152 @@ Memory bandwitdh(MB) subdirectory contains the following files:
non-linear. This field is purely informational
only.
-Resource groups
----------------
+If RDT monitoring is available there will be an "L3_MON" directory
+with the following files:
+
+"num_rmids": The number of RMIDs available. This is the
+ upper bound for how many "CTRL_MON" + "MON"
+ groups can be created.
+
+"mon_features": Lists the monitoring events if
+ monitoring is enabled for the resource.
+
+"max_threshold_occupancy":
+ Read/write file provides the largest value (in
+ bytes) at which a previously used LLC_occupancy
+ counter can be considered for re-use.
+
+
+Resource alloc and monitor groups
+---------------------------------
+
Resource groups are represented as directories in the resctrl file
-system. The default group is the root directory. Other groups may be
-created as desired by the system administrator using the "mkdir(1)"
-command, and removed using "rmdir(1)".
+system. The default group is the root directory which, immediately
+after mounting, owns all the tasks and cpus in the system and can make
+full use of all resources.
+
+On a system with RDT control features additional directories can be
+created in the root directory that specify different amounts of each
+resource (see "schemata" below). The root and these additional top level
+directories are referred to as "CTRL_MON" groups below.
+
+On a system with RDT monitoring the root directory and other top level
+directories contain a directory named "mon_groups" in which additional
+directories can be created to monitor subsets of tasks in the CTRL_MON
+group that is their ancestor. These are called "MON" groups in the rest
+of this document.
+
+Removing a directory will move all tasks and cpus owned by the group it
+represents to the parent. Removing one of the created CTRL_MON groups
+will automatically remove all MON groups below it.
+
+All groups contain the following files:
+
+"tasks":
+ Reading this file shows the list of all tasks that belong to
+ this group. Writing a task id to the file will add a task to the
+ group. If the group is a CTRL_MON group the task is removed from
+ whichever previous CTRL_MON group owned the task and also from
+ any MON group that owned the task. If the group is a MON group,
+ then the task must already belong to the CTRL_MON parent of this
+ group. The task is removed from any previous MON group.
+
+
+"cpus":
+ Reading this file shows a bitmask of the logical CPUs owned by
+ this group. Writing a mask to this file will add and remove
+ CPUs to/from this group. As with the tasks file a hierarchy is
+ maintained where MON groups may only include CPUs owned by the
+ parent CTRL_MON group.
+
-There are three files associated with each group:
+"cpus_list":
+ Just like "cpus", only using ranges of CPUs instead of bitmasks.
-"tasks": A list of tasks that belongs to this group. Tasks can be
- added to a group by writing the task ID to the "tasks" file
- (which will automatically remove them from the previous
- group to which they belonged). New tasks created by fork(2)
- and clone(2) are added to the same group as their parent.
- If a pid is not in any sub partition, it is in root partition
- (i.e. default partition).
-"cpus": A bitmask of logical CPUs assigned to this group. Writing
- a new mask can add/remove CPUs from this group. Added CPUs
- are removed from their previous group. Removed ones are
- given to the default (root) group. You cannot remove CPUs
- from the default group.
+When control is enabled all CTRL_MON groups will also contain:
-"cpus_list": One or more CPU ranges of logical CPUs assigned to this
- group. Same rules apply like for the "cpus" file.
+"schemata":
+ A list of all the resources available to this group.
+ Each resource has its own line and format - see below for details.
-"schemata": A list of all the resources available to this group.
- Each resource has its own line and format - see below for
- details.
+When monitoring is enabled all MON groups will also contain:
-When a task is running the following rules define which resources
-are available to it:
+"mon_data":
+ This contains a set of files organized by L3 domain and by
+ RDT event. E.g. on a system with two L3 domains there will
+ be subdirectories "mon_L3_00" and "mon_L3_01". Each of these
+ directories have one file per event (e.g. "llc_occupancy",
+ "mbm_total_bytes", and "mbm_local_bytes"). In a MON group these
+ files provide a read out of the current value of the event for
+ all tasks in the group. In CTRL_MON groups these files provide
+ the sum for all tasks in the CTRL_MON group and all tasks in
+ MON groups. Please see example section for more details on usage.
+
+Resource allocation rules
+-------------------------
+When a task is running the following rules define which resources are
+available to it:
1) If the task is a member of a non-default group, then the schemata
-for that group is used.
+ for that group is used.
2) Else if the task belongs to the default group, but is running on a
-CPU that is assigned to some specific group, then the schemata for
-the CPU's group is used.
+ CPU that is assigned to some specific group, then the schemata for the
+ CPU's group is used.
3) Otherwise the schemata for the default group is used.
+Resource monitoring rules
+-------------------------
+1) If a task is a member of a MON group, or non-default CTRL_MON group
+ then RDT events for the task will be reported in that group.
+
+2) If a task is a member of the default CTRL_MON group, but is running
+ on a CPU that is assigned to some specific group, then the RDT events
+ for the task will be reported in that group.
+
+3) Otherwise RDT events for the task will be reported in the root level
+ "mon_data" group.
+
+
+Notes on cache occupancy monitoring and control
+-----------------------------------------------
+When moving a task from one group to another you should remember that
+this only affects *new* cache allocations by the task. E.g. you may have
+a task in a monitor group showing 3 MB of cache occupancy. If you move
+to a new group and immediately check the occupancy of the old and new
+groups you will likely see that the old group is still showing 3 MB and
+the new group zero. When the task accesses locations still in cache from
+before the move, the h/w does not update any counters. On a busy system
+you will likely see the occupancy in the old group go down as cache lines
+are evicted and re-used while the occupancy in the new group rises as
+the task accesses memory and loads into the cache are counted based on
+membership in the new group.
+
+The same applies to cache allocation control. Moving a task to a group
+with a smaller cache partition will not evict any cache lines. The
+process may continue to use them from the old partition.
+
+Hardware uses CLOSid(Class of service ID) and an RMID(Resource monitoring ID)
+to identify a control group and a monitoring group respectively. Each of
+the resource groups are mapped to these IDs based on the kind of group. The
+number of CLOSid and RMID are limited by the hardware and hence the creation of
+a "CTRL_MON" directory may fail if we run out of either CLOSID or RMID
+and creation of "MON" group may fail if we run out of RMIDs.
+
+max_threshold_occupancy - generic concepts
+------------------------------------------
+
+Note that an RMID once freed may not be immediately available for use as
+the RMID is still tagged the cache lines of the previous user of RMID.
+Hence such RMIDs are placed on limbo list and checked back if the cache
+occupancy has gone down. If there is a time when system has a lot of
+limbo RMIDs but which are not ready to be used, user may see an -EBUSY
+during mkdir.
+
+max_threshold_occupancy is a user configurable value to determine the
+occupancy at which an RMID can be freed.
Schemata files - general concepts
---------------------------------
@@ -143,22 +267,22 @@ SKUs. Using a high bandwidth and a low bandwidth setting on two threads
sharing a core will result in both threads being throttled to use the
low bandwidth.
-L3 details (code and data prioritization disabled)
---------------------------------------------------
+L3 schemata file details (code and data prioritization disabled)
+----------------------------------------------------------------
With CDP disabled the L3 schemata format is:
L3:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
-L3 details (CDP enabled via mount option to resctrl)
-----------------------------------------------------
+L3 schemata file details (CDP enabled via mount option to resctrl)
+------------------------------------------------------------------
When CDP is enabled L3 control is split into two separate resources
so you can specify independent masks for code and data like this:
L3data:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
L3code:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
-L2 details
-----------
+L2 schemata file details
+------------------------
L2 cache does not support code and data prioritization, so the
schemata format is always:
@@ -185,6 +309,8 @@ L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
L3DATA:0=fffff;1=fffff;2=3c0;3=fffff
L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
+Examples for RDT allocation usage:
+
Example 1
---------
On a two socket machine (one L3 cache per socket) with just four bits
@@ -410,3 +536,124 @@ void main(void)
/* code to read and write directory contents */
resctrl_release_lock(fd);
}
+
+Examples for RDT Monitoring along with allocation usage:
+
+Reading monitored data
+----------------------
+Reading an event file (for ex: mon_data/mon_L3_00/llc_occupancy) would
+show the current snapshot of LLC occupancy of the corresponding MON
+group or CTRL_MON group.
+
+
+Example 1 (Monitor CTRL_MON group and subset of tasks in CTRL_MON group)
+---------
+On a two socket machine (one L3 cache per socket) with just four bits
+for cache bit masks
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+# mkdir p0 p1
+# echo "L3:0=3;1=c" > /sys/fs/resctrl/p0/schemata
+# echo "L3:0=3;1=3" > /sys/fs/resctrl/p1/schemata
+# echo 5678 > p1/tasks
+# echo 5679 > p1/tasks
+
+The default resource group is unmodified, so we have access to all parts
+of all caches (its schemata file reads "L3:0=f;1=f").
+
+Tasks that are under the control of group "p0" may only allocate from the
+"lower" 50% on cache ID 0, and the "upper" 50% of cache ID 1.
+Tasks in group "p1" use the "lower" 50% of cache on both sockets.
+
+Create monitor groups and assign a subset of tasks to each monitor group.
+
+# cd /sys/fs/resctrl/p1/mon_groups
+# mkdir m11 m12
+# echo 5678 > m11/tasks
+# echo 5679 > m12/tasks
+
+fetch data (data shown in bytes)
+
+# cat m11/mon_data/mon_L3_00/llc_occupancy
+16234000
+# cat m11/mon_data/mon_L3_01/llc_occupancy
+14789000
+# cat m12/mon_data/mon_L3_00/llc_occupancy
+16789000
+
+The parent ctrl_mon group shows the aggregated data.
+
+# cat /sys/fs/resctrl/p1/mon_data/mon_l3_00/llc_occupancy
+31234000
+
+Example 2 (Monitor a task from its creation)
+---------
+On a two socket machine (one L3 cache per socket)
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+# mkdir p0 p1
+
+An RMID is allocated to the group once its created and hence the <cmd>
+below is monitored from its creation.
+
+# echo $$ > /sys/fs/resctrl/p1/tasks
+# <cmd>
+
+Fetch the data
+
+# cat /sys/fs/resctrl/p1/mon_data/mon_l3_00/llc_occupancy
+31789000
+
+Example 3 (Monitor without CAT support or before creating CAT groups)
+---------
+
+Assume a system like HSW has only CQM and no CAT support. In this case
+the resctrl will still mount but cannot create CTRL_MON directories.
+But user can create different MON groups within the root group thereby
+able to monitor all tasks including kernel threads.
+
+This can also be used to profile jobs cache size footprint before being
+able to allocate them to different allocation groups.
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+# mkdir mon_groups/m01
+# mkdir mon_groups/m02
+
+# echo 3478 > /sys/fs/resctrl/mon_groups/m01/tasks
+# echo 2467 > /sys/fs/resctrl/mon_groups/m02/tasks
+
+Monitor the groups separately and also get per domain data. From the
+below its apparent that the tasks are mostly doing work on
+domain(socket) 0.
+
+# cat /sys/fs/resctrl/mon_groups/m01/mon_L3_00/llc_occupancy
+31234000
+# cat /sys/fs/resctrl/mon_groups/m01/mon_L3_01/llc_occupancy
+34555
+# cat /sys/fs/resctrl/mon_groups/m02/mon_L3_00/llc_occupancy
+31234000
+# cat /sys/fs/resctrl/mon_groups/m02/mon_L3_01/llc_occupancy
+32789
+
+
+Example 4 (Monitor real time tasks)
+-----------------------------------
+
+A single socket system which has real time tasks running on cores 4-7
+and non real time tasks on other cpus. We want to monitor the cache
+occupancy of the real time threads on these cores.
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+# mkdir p1
+
+Move the cpus 4-7 over to p1
+# echo f0 > p0/cpus
+
+View the llc occupancy snapshot
+
+# cat /sys/fs/resctrl/p1/mon_data/mon_L3_00/llc_occupancy
+11234000
diff --git a/Documentation/x86/microcode.txt b/Documentation/x86/microcode.txt
new file mode 100644
index 000000000000..f57e1b45e628
--- /dev/null
+++ b/Documentation/x86/microcode.txt
@@ -0,0 +1,137 @@
+ The Linux Microcode Loader
+
+Authors: Fenghua Yu <fenghua.yu@intel.com>
+ Borislav Petkov <bp@suse.de>
+
+The kernel has a x86 microcode loading facility which is supposed to
+provide microcode loading methods in the OS. Potential use cases are
+updating the microcode on platforms beyond the OEM End-Of-Life support,
+and updating the microcode on long-running systems without rebooting.
+
+The loader supports three loading methods:
+
+1. Early load microcode
+=======================
+
+The kernel can update microcode very early during boot. Loading
+microcode early can fix CPU issues before they are observed during
+kernel boot time.
+
+The microcode is stored in an initrd file. During boot, it is read from
+it and loaded into the CPU cores.
+
+The format of the combined initrd image is microcode in (uncompressed)
+cpio format followed by the (possibly compressed) initrd image. The
+loader parses the combined initrd image during boot.
+
+The microcode files in cpio name space are:
+
+on Intel: kernel/x86/microcode/GenuineIntel.bin
+on AMD : kernel/x86/microcode/AuthenticAMD.bin
+
+During BSP (BootStrapping Processor) boot (pre-SMP), the kernel
+scans the microcode file in the initrd. If microcode matching the
+CPU is found, it will be applied in the BSP and later on in all APs
+(Application Processors).
+
+The loader also saves the matching microcode for the CPU in memory.
+Thus, the cached microcode patch is applied when CPUs resume from a
+sleep state.
+
+Here's a crude example how to prepare an initrd with microcode (this is
+normally done automatically by the distribution, when recreating the
+initrd, so you don't really have to do it yourself. It is documented
+here for future reference only).
+
+---
+ #!/bin/bash
+
+ if [ -z "$1" ]; then
+ echo "You need to supply an initrd file"
+ exit 1
+ fi
+
+ INITRD="$1"
+
+ DSTDIR=kernel/x86/microcode
+ TMPDIR=/tmp/initrd
+
+ rm -rf $TMPDIR
+
+ mkdir $TMPDIR
+ cd $TMPDIR
+ mkdir -p $DSTDIR
+
+ if [ -d /lib/firmware/amd-ucode ]; then
+ cat /lib/firmware/amd-ucode/microcode_amd*.bin > $DSTDIR/AuthenticAMD.bin
+ fi
+
+ if [ -d /lib/firmware/intel-ucode ]; then
+ cat /lib/firmware/intel-ucode/* > $DSTDIR/GenuineIntel.bin
+ fi
+
+ find . | cpio -o -H newc >../ucode.cpio
+ cd ..
+ mv $INITRD $INITRD.orig
+ cat ucode.cpio $INITRD.orig > $INITRD
+
+ rm -rf $TMPDIR
+---
+
+The system needs to have the microcode packages installed into
+/lib/firmware or you need to fixup the paths above if yours are
+somewhere else and/or you've downloaded them directly from the processor
+vendor's site.
+
+2. Late loading
+===============
+
+There are two legacy user space interfaces to load microcode, either through
+/dev/cpu/microcode or through /sys/devices/system/cpu/microcode/reload file
+in sysfs.
+
+The /dev/cpu/microcode method is deprecated because it needs a special
+userspace tool for that.
+
+The easier method is simply installing the microcode packages your distro
+supplies and running:
+
+# echo 1 > /sys/devices/system/cpu/microcode/reload
+
+as root.
+
+The loading mechanism looks for microcode blobs in
+/lib/firmware/{intel-ucode,amd-ucode}. The default distro installation
+packages already put them there.
+
+3. Builtin microcode
+====================
+
+The loader supports also loading of a builtin microcode supplied through
+the regular firmware builtin method CONFIG_FIRMWARE_IN_KERNEL. Only
+64-bit is currently supported.
+
+Here's an example:
+
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 amd-ucode/microcode_amd_fam15h.bin"
+CONFIG_EXTRA_FIRMWARE_DIR="/lib/firmware"
+
+This basically means, you have the following tree structure locally:
+
+/lib/firmware/
+|-- amd-ucode
+...
+| |-- microcode_amd_fam15h.bin
+...
+|-- intel-ucode
+...
+| |-- 06-3a-09
+...
+
+so that the build system can find those files and integrate them into
+the final kernel image. The early loader finds them and applies them.
+
+Needless to say, this method is not the most flexible one because it
+requires rebuilding the kernel each time updated microcode from the CPU
+vendor is available.
diff --git a/Documentation/x86/orc-unwinder.txt b/Documentation/x86/orc-unwinder.txt
new file mode 100644
index 000000000000..af0c9a4c65a6
--- /dev/null
+++ b/Documentation/x86/orc-unwinder.txt
@@ -0,0 +1,179 @@
+ORC unwinder
+============
+
+Overview
+--------
+
+The kernel CONFIG_ORC_UNWINDER option enables the ORC unwinder, which is
+similar in concept to a DWARF unwinder. The difference is that the
+format of the ORC data is much simpler than DWARF, which in turn allows
+the ORC unwinder to be much simpler and faster.
+
+The ORC data consists of unwind tables which are generated by objtool.
+They contain out-of-band data which is used by the in-kernel ORC
+unwinder. Objtool generates the ORC data by first doing compile-time
+stack metadata validation (CONFIG_STACK_VALIDATION). After analyzing
+all the code paths of a .o file, it determines information about the
+stack state at each instruction address in the file and outputs that
+information to the .orc_unwind and .orc_unwind_ip sections.
+
+The per-object ORC sections are combined at link time and are sorted and
+post-processed at boot time. The unwinder uses the resulting data to
+correlate instruction addresses with their stack states at run time.
+
+
+ORC vs frame pointers
+---------------------
+
+With frame pointers enabled, GCC adds instrumentation code to every
+function in the kernel. The kernel's .text size increases by about
+3.2%, resulting in a broad kernel-wide slowdown. Measurements by Mel
+Gorman [1] have shown a slowdown of 5-10% for some workloads.
+
+In contrast, the ORC unwinder has no effect on text size or runtime
+performance, because the debuginfo is out of band. So if you disable
+frame pointers and enable the ORC unwinder, you get a nice performance
+improvement across the board, and still have reliable stack traces.
+
+Ingo Molnar says:
+
+ "Note that it's not just a performance improvement, but also an
+ instruction cache locality improvement: 3.2% .text savings almost
+ directly transform into a similarly sized reduction in cache
+ footprint. That can transform to even higher speedups for workloads
+ whose cache locality is borderline."
+
+Another benefit of ORC compared to frame pointers is that it can
+reliably unwind across interrupts and exceptions. Frame pointer based
+unwinds can sometimes skip the caller of the interrupted function, if it
+was a leaf function or if the interrupt hit before the frame pointer was
+saved.
+
+The main disadvantage of the ORC unwinder compared to frame pointers is
+that it needs more memory to store the ORC unwind tables: roughly 2-4MB
+depending on the kernel config.
+
+
+ORC vs DWARF
+------------
+
+ORC debuginfo's advantage over DWARF itself is that it's much simpler.
+It gets rid of the complex DWARF CFI state machine and also gets rid of
+the tracking of unnecessary registers. This allows the unwinder to be
+much simpler, meaning fewer bugs, which is especially important for
+mission critical oops code.
+
+The simpler debuginfo format also enables the unwinder to be much faster
+than DWARF, which is important for perf and lockdep. In a basic
+performance test by Jiri Slaby [2], the ORC unwinder was about 20x
+faster than an out-of-tree DWARF unwinder. (Note: That measurement was
+taken before some performance tweaks were added, which doubled
+performance, so the speedup over DWARF may be closer to 40x.)
+
+The ORC data format does have a few downsides compared to DWARF. ORC
+unwind tables take up ~50% more RAM (+1.3MB on an x86 defconfig kernel)
+than DWARF-based eh_frame tables.
+
+Another potential downside is that, as GCC evolves, it's conceivable
+that the ORC data may end up being *too* simple to describe the state of
+the stack for certain optimizations. But IMO this is unlikely because
+GCC saves the frame pointer for any unusual stack adjustments it does,
+so I suspect we'll really only ever need to keep track of the stack
+pointer and the frame pointer between call frames. But even if we do
+end up having to track all the registers DWARF tracks, at least we will
+still be able to control the format, e.g. no complex state machines.
+
+
+ORC unwind table generation
+---------------------------
+
+The ORC data is generated by objtool. With the existing compile-time
+stack metadata validation feature, objtool already follows all code
+paths, and so it already has all the information it needs to be able to
+generate ORC data from scratch. So it's an easy step to go from stack
+validation to ORC data generation.
+
+It should be possible to instead generate the ORC data with a simple
+tool which converts DWARF to ORC data. However, such a solution would
+be incomplete due to the kernel's extensive use of asm, inline asm, and
+special sections like exception tables.
+
+That could be rectified by manually annotating those special code paths
+using GNU assembler .cfi annotations in .S files, and homegrown
+annotations for inline asm in .c files. But asm annotations were tried
+in the past and were found to be unmaintainable. They were often
+incorrect/incomplete and made the code harder to read and keep updated.
+And based on looking at glibc code, annotating inline asm in .c files
+might be even worse.
+
+Objtool still needs a few annotations, but only in code which does
+unusual things to the stack like entry code. And even then, far fewer
+annotations are needed than what DWARF would need, so they're much more
+maintainable than DWARF CFI annotations.
+
+So the advantages of using objtool to generate ORC data are that it
+gives more accurate debuginfo, with very few annotations. It also
+insulates the kernel from toolchain bugs which can be very painful to
+deal with in the kernel since we often have to workaround issues in
+older versions of the toolchain for years.
+
+The downside is that the unwinder now becomes dependent on objtool's
+ability to reverse engineer GCC code flow. If GCC optimizations become
+too complicated for objtool to follow, the ORC data generation might
+stop working or become incomplete. (It's worth noting that livepatch
+already has such a dependency on objtool's ability to follow GCC code
+flow.)
+
+If newer versions of GCC come up with some optimizations which break
+objtool, we may need to revisit the current implementation. Some
+possible solutions would be asking GCC to make the optimizations more
+palatable, or having objtool use DWARF as an additional input, or
+creating a GCC plugin to assist objtool with its analysis. But for now,
+objtool follows GCC code quite well.
+
+
+Unwinder implementation details
+-------------------------------
+
+Objtool generates the ORC data by integrating with the compile-time
+stack metadata validation feature, which is described in detail in
+tools/objtool/Documentation/stack-validation.txt. After analyzing all
+the code paths of a .o file, it creates an array of orc_entry structs,
+and a parallel array of instruction addresses associated with those
+structs, and writes them to the .orc_unwind and .orc_unwind_ip sections
+respectively.
+
+The ORC data is split into the two arrays for performance reasons, to
+make the searchable part of the data (.orc_unwind_ip) more compact. The
+arrays are sorted in parallel at boot time.
+
+Performance is further improved by the use of a fast lookup table which
+is created at runtime. The fast lookup table associates a given address
+with a range of indices for the .orc_unwind table, so that only a small
+subset of the table needs to be searched.
+
+
+Etymology
+---------
+
+Orcs, fearsome creatures of medieval folklore, are the Dwarves' natural
+enemies. Similarly, the ORC unwinder was created in opposition to the
+complexity and slowness of DWARF.
+
+"Although Orcs rarely consider multiple solutions to a problem, they do
+excel at getting things done because they are creatures of action, not
+thought." [3] Similarly, unlike the esoteric DWARF unwinder, the
+veracious ORC unwinder wastes no time or siloconic effort decoding
+variable-length zero-extended unsigned-integer byte-coded
+state-machine-based debug information entries.
+
+Similar to how Orcs frequently unravel the well-intentioned plans of
+their adversaries, the ORC unwinder frequently unravels stacks with
+brutal, unyielding efficiency.
+
+ORC stands for Oops Rewind Capability.
+
+
+[1] https://lkml.kernel.org/r/20170602104048.jkkzssljsompjdwy@suse.de
+[2] https://lkml.kernel.org/r/d2ca5435-6386-29b8-db87-7f227c2b713a@suse.cz
+[3] http://dustin.wikidot.com/half-orcs-and-orcs
diff --git a/Documentation/x86/protection-keys.txt b/Documentation/x86/protection-keys.txt
index b64304540821..fa46dcb347bc 100644
--- a/Documentation/x86/protection-keys.txt
+++ b/Documentation/x86/protection-keys.txt
@@ -34,7 +34,7 @@ with a key. In this example WRPKRU is wrapped by a C function
called pkey_set().
int real_prot = PROT_READ|PROT_WRITE;
- pkey = pkey_alloc(0, PKEY_DENY_WRITE);
+ pkey = pkey_alloc(0, PKEY_DISABLE_WRITE);
ptr = mmap(NULL, PAGE_SIZE, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
ret = pkey_mprotect(ptr, PAGE_SIZE, real_prot, pkey);
... application runs here
@@ -42,9 +42,9 @@ called pkey_set().
Now, if the application needs to update the data at 'ptr', it can
gain access, do the update, then remove its write access:
- pkey_set(pkey, 0); // clear PKEY_DENY_WRITE
+ pkey_set(pkey, 0); // clear PKEY_DISABLE_WRITE
*ptr = foo; // assign something
- pkey_set(pkey, PKEY_DENY_WRITE); // set PKEY_DENY_WRITE again
+ pkey_set(pkey, PKEY_DISABLE_WRITE); // set PKEY_DISABLE_WRITE again
Now when it frees the memory, it will also free the pkey since it
is no longer in use:
diff --git a/Documentation/x86/x86_64/5level-paging.txt b/Documentation/x86/x86_64/5level-paging.txt
new file mode 100644
index 000000000000..087251a0d99c
--- /dev/null
+++ b/Documentation/x86/x86_64/5level-paging.txt
@@ -0,0 +1,64 @@
+== Overview ==
+
+Original x86-64 was limited by 4-level paing to 256 TiB of virtual address
+space and 64 TiB of physical address space. We are already bumping into
+this limit: some vendors offers servers with 64 TiB of memory today.
+
+To overcome the limitation upcoming hardware will introduce support for
+5-level paging. It is a straight-forward extension of the current page
+table structure adding one more layer of translation.
+
+It bumps the limits to 128 PiB of virtual address space and 4 PiB of
+physical address space. This "ought to be enough for anybody" ©.
+
+QEMU 2.9 and later support 5-level paging.
+
+Virtual memory layout for 5-level paging is described in
+Documentation/x86/x86_64/mm.txt
+
+== Enabling 5-level paging ==
+
+CONFIG_X86_5LEVEL=y enables the feature.
+
+So far, a kernel compiled with the option enabled will be able to boot
+only on machines that supports the feature -- see for 'la57' flag in
+/proc/cpuinfo.
+
+The plan is to implement boot-time switching between 4- and 5-level paging
+in the future.
+
+== User-space and large virtual address space ==
+
+On x86, 5-level paging enables 56-bit userspace virtual address space.
+Not all user space is ready to handle wide addresses. It's known that
+at least some JIT compilers use higher bits in pointers to encode their
+information. It collides with valid pointers with 5-level paging and
+leads to crashes.
+
+To mitigate this, we are not going to allocate virtual address space
+above 47-bit by default.
+
+But userspace can ask for allocation from full address space by
+specifying hint address (with or without MAP_FIXED) above 47-bits.
+
+If hint address set above 47-bit, but MAP_FIXED is not specified, we try
+to look for unmapped area by specified address. If it's already
+occupied, we look for unmapped area in *full* address space, rather than
+from 47-bit window.
+
+A high hint address would only affect the allocation in question, but not
+any future mmap()s.
+
+Specifying high hint address on older kernel or on machine without 5-level
+paging support is safe. The hint will be ignored and kernel will fall back
+to allocation from 47-bit address space.
+
+This approach helps to easily make application's memory allocator aware
+about large address space without manually tracking allocated virtual
+address space.
+
+One important case we need to handle here is interaction with MPX.
+MPX (without MAWA extension) cannot handle addresses above 47-bit, so we
+need to make sure that MPX cannot be enabled we already have VMA above
+the boundary and forbid creating such VMAs once MPX is enabled.
+
diff --git a/MAINTAINERS b/MAINTAINERS
index 205d3977ac46..ac2dfb26d899 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -205,7 +205,6 @@ F: include/net/9p/
F: include/uapi/linux/virtio_9p.h
F: include/trace/events/9p.h
-
A8293 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -302,6 +301,7 @@ S: Supported
F: drivers/acpi/
F: drivers/pnp/pnpacpi/
F: include/linux/acpi.h
+F: include/linux/fwnode.h
F: include/acpi/
F: Documentation/acpi/
F: Documentation/ABI/testing/sysfs-bus-acpi
@@ -311,6 +311,14 @@ F: drivers/pci/*/*acpi*
F: drivers/pci/*/*/*acpi*
F: tools/power/acpi/
+ACPI APEI
+M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M: Len Brown <lenb@kernel.org>
+L: linux-acpi@vger.kernel.org
+R: Tony Luck <tony.luck@intel.com>
+R: Borislav Petkov <bp@alien8.de>
+F: drivers/acpi/apei/
+
ACPI COMPONENT ARCHITECTURE (ACPICA)
M: Robert Moore <robert.moore@intel.com>
M: Lv Zheng <lv.zheng@intel.com>
@@ -492,13 +500,6 @@ S: Maintained
F: Documentation/hwmon/adt7475
F: drivers/hwmon/adt7475.c
-ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346)
-M: Michael Hennerich <michael.hennerich@analog.com>
-W: http://wiki.analog.com/ADXL345
-W: http://ez.analog.com/community/linux-device-drivers
-S: Supported
-F: drivers/input/misc/adxl34x.c
-
ADVANSYS SCSI DRIVER
M: Matthew Wilcox <matthew@wil.cx>
M: Hannes Reinecke <hare@suse.com>
@@ -507,6 +508,13 @@ S: Maintained
F: Documentation/scsi/advansys.txt
F: drivers/scsi/advansys.c
+ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346)
+M: Michael Hennerich <michael.hennerich@analog.com>
+W: http://wiki.analog.com/ADXL345
+W: http://ez.analog.com/community/linux-device-drivers
+S: Supported
+F: drivers/input/misc/adxl34x.c
+
AEDSP16 DRIVER
M: Riccardo Facchetti <fizban@tin.it>
S: Maintained
@@ -770,6 +778,12 @@ W: http://ez.analog.com/community/linux-device-drivers
S: Supported
F: drivers/media/i2c/adv7180.c
+ANALOG DEVICES INC ADV748X DRIVER
+M: Kieran Bingham <kieran.bingham@ideasonboard.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/i2c/adv748x/*
+
ANALOG DEVICES INC ADV7511 DRIVER
M: Hans Verkuil <hans.verkuil@cisco.com>
L: linux-media@vger.kernel.org
@@ -808,6 +822,12 @@ W: http://blackfin.uclinux.org/
S: Supported
F: sound/soc/blackfin/*
+ANALOG DEVICES INC DMA DRIVERS
+M: Lars-Peter Clausen <lars@metafoo.de>
+W: http://ez.analog.com/community/linux-device-drivers
+S: Supported
+F: drivers/dma/dma-axi-dmac.c
+
ANALOG DEVICES INC IIO DRIVERS
M: Lars-Peter Clausen <lars@metafoo.de>
M: Michael Hennerich <Michael.Hennerich@analog.com>
@@ -820,12 +840,6 @@ X: drivers/iio/*/adjd*
F: drivers/staging/iio/*/ad*
F: drivers/staging/iio/trigger/iio-trig-bfin-timer.c
-ANALOG DEVICES INC DMA DRIVERS
-M: Lars-Peter Clausen <lars@metafoo.de>
-W: http://ez.analog.com/community/linux-device-drivers
-S: Supported
-F: drivers/dma/dma-axi-dmac.c
-
ANDROID CONFIG FRAGMENTS
M: Rob Herring <robh@kernel.org>
S: Supported
@@ -872,6 +886,15 @@ F: include/linux/apm_bios.h
F: include/uapi/linux/apm_bios.h
F: drivers/char/apm-emulation.c
+APPARMOR SECURITY MODULE
+M: John Johansen <john.johansen@canonical.com>
+L: apparmor@lists.ubuntu.com (subscribers-only, general discussion)
+W: apparmor.wiki.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jj/apparmor-dev.git
+S: Supported
+F: security/apparmor/
+F: Documentation/admin-guide/LSM/apparmor.rst
+
APPLE BCM5974 MULTITOUCH DRIVER
M: Henrik Rydberg <rydberg@bitmath.org>
L: linux-input@vger.kernel.org
@@ -895,6 +918,18 @@ M: Duc Dang <dhdang@apm.com>
S: Supported
F: arch/arm64/boot/dts/apm/
+APPLIED MICRO (APM) X-GENE SOC EDAC
+M: Loc Ho <lho@apm.com>
+S: Supported
+F: drivers/edac/xgene_edac.c
+F: Documentation/devicetree/bindings/edac/apm-xgene-edac.txt
+
+APPLIED MICRO (APM) X-GENE SOC ETHERNET (V2) DRIVER
+M: Iyappan Subramanian <isubramanian@apm.com>
+M: Keyur Chudgar <kchudgar@apm.com>
+S: Supported
+F: drivers/net/ethernet/apm/xgene-v2/
+
APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER
M: Iyappan Subramanian <isubramanian@apm.com>
M: Keyur Chudgar <kchudgar@apm.com>
@@ -905,12 +940,6 @@ F: drivers/net/phy/mdio-xgene.c
F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt
F: Documentation/devicetree/bindings/net/apm-xgene-mdio.txt
-APPLIED MICRO (APM) X-GENE SOC ETHERNET (V2) DRIVER
-M: Iyappan Subramanian <isubramanian@apm.com>
-M: Keyur Chudgar <kchudgar@apm.com>
-S: Supported
-F: drivers/net/ethernet/apm/xgene-v2/
-
APPLIED MICRO (APM) X-GENE SOC PMU
M: Tai Nguyen <ttnguyen@apm.com>
S: Supported
@@ -930,6 +959,12 @@ S: Maintained
F: drivers/video/fbdev/arcfb.c
F: drivers/video/fbdev/core/fb_defio.c
+ARC PGU DRM DRIVER
+M: Alexey Brodkin <abrodkin@synopsys.com>
+S: Supported
+F: drivers/gpu/drm/arc/
+F: Documentation/devicetree/bindings/display/snps,arcpgu.txt
+
ARCNET NETWORK LAYER
M: Michael Grzeschik <m.grzeschik@pengutronix.de>
L: netdev@vger.kernel.org
@@ -937,12 +972,6 @@ S: Maintained
F: drivers/net/arcnet/
F: include/uapi/linux/if_arcnet.h
-ARC PGU DRM DRIVER
-M: Alexey Brodkin <abrodkin@synopsys.com>
-S: Supported
-F: drivers/gpu/drm/arc/
-F: Documentation/devicetree/bindings/display/snps,arcpgu.txt
-
ARM ARCHITECTED TIMER DRIVER
M: Mark Rutland <mark.rutland@arm.com>
M: Marc Zyngier <marc.zyngier@arm.com>
@@ -995,18 +1024,17 @@ S: Maintained
T: git git://git.armlinux.org.uk/~rmk/linux-arm.git
F: arch/arm/
-ARM SUB-ARCHITECTURES
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: arch/arm/mach-*/
-F: arch/arm/plat-*/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
-
ARM PRIMECELL AACI PL041 DRIVER
M: Russell King <linux@armlinux.org.uk>
S: Maintained
F: sound/arm/aaci.*
+ARM PRIMECELL BUS SUPPORT
+M: Russell King <linux@armlinux.org.uk>
+S: Maintained
+F: drivers/amba/
+F: include/linux/amba/bus.h
+
ARM PRIMECELL CLCD PL110 DRIVER
M: Russell King <linux@armlinux.org.uk>
S: Maintained
@@ -1030,11 +1058,22 @@ S: Maintained
F: drivers/tty/serial/amba-pl01*.c
F: include/linux/amba/serial.h
-ARM PRIMECELL BUS SUPPORT
-M: Russell King <linux@armlinux.org.uk>
+ARM SMMU DRIVERS
+M: Will Deacon <will.deacon@arm.com>
+R: Robin Murphy <robin.murphy@arm.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: drivers/amba/
-F: include/linux/amba/bus.h
+F: drivers/iommu/arm-smmu.c
+F: drivers/iommu/arm-smmu-v3.c
+F: drivers/iommu/io-pgtable-arm.c
+F: drivers/iommu/io-pgtable-arm-v7s.c
+
+ARM SUB-ARCHITECTURES
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/mach-*/
+F: arch/arm/plat-*/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
ARM/ACTIONS SEMI ARCHITECTURE
M: Andreas Färber <afaerber@suse.de>
@@ -1067,6 +1106,11 @@ M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+ARM/Allwinner SoC Clock Support
+M: Emilio López <emilio@elopez.com.ar>
+S: Maintained
+F: drivers/clk/sunxi/
+
ARM/Allwinner sunXi SoC support
M: Maxime Ripard <maxime.ripard@free-electrons.com>
M: Chen-Yu Tsai <wens@csie.org>
@@ -1081,10 +1125,15 @@ F: drivers/pinctrl/sunxi/
F: drivers/soc/sunxi/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sunxi/linux.git
-ARM/Allwinner SoC Clock Support
-M: Emilio López <emilio@elopez.com.ar>
+ARM/Amlogic Meson SoC CLOCK FRAMEWORK
+M: Neil Armstrong <narmstrong@baylibre.com>
+M: Jerome Brunet <jbrunet@baylibre.com>
+L: linux-amlogic@lists.infradead.org
S: Maintained
-F: drivers/clk/sunxi/
+F: drivers/clk/meson/
+F: include/dt-bindings/clock/meson*
+F: include/dt-bindings/clock/gxbb*
+F: Documentation/devicetree/bindings/clock/amlogic*
ARM/Amlogic Meson SoC support
M: Carlo Caione <carlo@caione.org>
@@ -1096,20 +1145,10 @@ S: Maintained
F: arch/arm/mach-meson/
F: arch/arm/boot/dts/meson*
F: arch/arm64/boot/dts/amlogic/
-F: drivers/pinctrl/meson/
+F: drivers/pinctrl/meson/
F: drivers/mmc/host/meson*
N: meson
-ARM/Amlogic Meson SoC CLOCK FRAMEWORK
-M: Neil Armstrong <narmstrong@baylibre.com>
-M: Jerome Brunet <jbrunet@baylibre.com>
-L: linux-amlogic@lists.infradead.org
-S: Maintained
-F: drivers/clk/meson/
-F: include/dt-bindings/clock/meson*
-F: include/dt-bindings/clock/gxbb*
-F: Documentation/devicetree/bindings/clock/amlogic*
-
ARM/Annapurna Labs ALPINE ARCHITECTURE
M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
M: Antoine Tenart <antoine.tenart@free-electrons.com>
@@ -1129,28 +1168,34 @@ L: linux-arm-kernel@axis.com
F: arch/arm/mach-artpec
F: arch/arm/boot/dts/artpec6*
F: drivers/clk/axis
+F: drivers/crypto/axis
F: drivers/pinctrl/pinctrl-artpec*
F: Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
-ARM/ASPEED MACHINE SUPPORT
-M: Joel Stanley <joel@jms.id.au>
-S: Maintained
-F: arch/arm/mach-aspeed/
-F: arch/arm/boot/dts/aspeed-*
-F: drivers/*/*aspeed*
-
ARM/ASPEED I2C DRIVER
M: Brendan Higgins <brendanhiggins@google.com>
R: Benjamin Herrenschmidt <benh@kernel.crashing.org>
R: Joel Stanley <joel@jms.id.au>
L: linux-i2c@vger.kernel.org
-L: openbmc@lists.ozlabs.org
+L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
S: Maintained
F: drivers/irqchip/irq-aspeed-i2c-ic.c
F: drivers/i2c/busses/i2c-aspeed.c
F: Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.txt
F: Documentation/devicetree/bindings/i2c/i2c-aspeed.txt
+ARM/ASPEED MACHINE SUPPORT
+M: Joel Stanley <joel@jms.id.au>
+S: Maintained
+F: arch/arm/mach-aspeed/
+F: arch/arm/boot/dts/aspeed-*
+F: drivers/*/*aspeed*
+
+ARM/ATMEL AT91 Clock Support
+M: Boris Brezillon <boris.brezillon@free-electrons.com>
+S: Maintained
+F: drivers/clk/at91
+
ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
M: Nicolas Ferre <nicolas.ferre@microchip.com>
M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
@@ -1167,11 +1212,6 @@ F: arch/arm/boot/dts/sama*.dtsi
F: arch/arm/include/debug/at91.S
F: drivers/memory/atmel*
-ARM/ATMEL AT91 Clock Support
-M: Boris Brezillon <boris.brezillon@free-electrons.com>
-S: Maintained
-F: drivers/clk/at91
-
ARM/CALXEDA HIGHBANK ARCHITECTURE
M: Rob Herring <robh@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1198,6 +1238,11 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Odd Fixes
N: clps711x
+ARM/CIRRUS LOGIC EDB9315A MACHINE SUPPORT
+M: Lennert Buytenhek <kernel@wantstofly.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+
ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
M: Hartley Sweeten <hsweeten@visionengravers.com>
M: Alexander Sverdlin <alexander.sverdlin@gmail.com>
@@ -1206,11 +1251,6 @@ S: Maintained
F: arch/arm/mach-ep93xx/
F: arch/arm/mach-ep93xx/include/mach/
-ARM/CIRRUS LOGIC EDB9315A MACHINE SUPPORT
-M: Lennert Buytenhek <kernel@wantstofly.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-
ARM/CLKDEV SUPPORT
M: Russell King <linux@armlinux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1224,6 +1264,13 @@ M: Mike Rapoport <mike@compulab.co.il>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+ARM/CONEXANT DIGICOLOR MACHINE SUPPORT
+M: Baruch Siach <baruch@tkos.co.il>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/boot/dts/cx92755*
+N: digicolor
+
ARM/CONTEC MICRO9 MACHINE SUPPORT
M: Hubert Feurstein <hubert.feurstein@contec.at>
S: Maintained
@@ -1251,10 +1298,15 @@ S: Maintained
ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
M: Hans Ulli Kroll <ulli.kroll@googlemail.com>
+M: Linus Walleij <linus.walleij@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://github.com/ulli-kroll/linux.git
S: Maintained
+F: Documentation/devicetree/bindings/arm/gemini.txt
+F: Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
+F: Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt
F: arch/arm/mach-gemini/
+F: drivers/pinctrl/pinctrl-gemini.c
F: drivers/rtc/rtc-ftrtc010.c
ARM/CSR SIRFPRIMA2 MACHINE SUPPORT
@@ -1269,13 +1321,6 @@ F: drivers/clocksource/timer-prima2.c
F: drivers/clocksource/timer-atlas7.c
N: [^a-z]sirf
-ARM/CONEXANT DIGICOLOR MACHINE SUPPORT
-M: Baruch Siach <baruch@tkos.co.il>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: arch/arm/boot/dts/cx92755*
-N: digicolor
-
ARM/EBSA110 MACHINE SUPPORT
M: Russell King <linux@armlinux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1389,6 +1434,11 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-pxa/colibri-pxa270-income.c
+ARM/INTEL IOP13XX ARM ARCHITECTURE
+M: Lennert Buytenhek <kernel@wantstofly.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+
ARM/INTEL IOP32X ARM ARCHITECTURE
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1398,11 +1448,6 @@ ARM/INTEL IOP33X ARM ARCHITECTURE
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Orphan
-ARM/INTEL IOP13XX ARM ARCHITECTURE
-M: Lennert Buytenhek <kernel@wantstofly.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-
ARM/INTEL IQ81342EX MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1437,39 +1482,6 @@ M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
-M: Santosh Shilimkar <ssantosh@kernel.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: arch/arm/mach-keystone/
-F: arch/arm/boot/dts/keystone-*
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
-
-ARM/TEXAS INSTRUMENT KEYSTONE CLOCK FRAMEWORK
-M: Santosh Shilimkar <ssantosh@kernel.org>
-L: linux-kernel@vger.kernel.org
-S: Maintained
-F: drivers/clk/keystone/
-
-ARM/TEXAS INSTRUMENT KEYSTONE ClOCKSOURCE
-M: Santosh Shilimkar <ssantosh@kernel.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L: linux-kernel@vger.kernel.org
-S: Maintained
-F: drivers/clocksource/timer-keystone.c
-
-ARM/TEXAS INSTRUMENT KEYSTONE RESET DRIVER
-M: Santosh Shilimkar <ssantosh@kernel.org>
-L: linux-kernel@vger.kernel.org
-S: Maintained
-F: drivers/power/reset/keystone-reset.c
-
-ARM/TEXAS INSTRUMENT AEMIF/EMIF DRIVERS
-M: Santosh Shilimkar <ssantosh@kernel.org>
-L: linux-kernel@vger.kernel.org
-S: Maintained
-F: drivers/memory/*emif*
-
ARM/LG1K ARCHITECTURE
M: Chanho Min <chanho.min@lge.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1512,24 +1524,6 @@ ARM/MAGICIAN MACHINE SUPPORT
M: Philipp Zabel <philipp.zabel@gmail.com>
S: Maintained
-ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support
-M: Jason Cooper <jason@lakedaemon.net>
-M: Andrew Lunn <andrew@lunn.ch>
-M: Gregory Clement <gregory.clement@free-electrons.com>
-M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: arch/arm/boot/dts/armada*
-F: arch/arm/boot/dts/kirkwood*
-F: arch/arm/configs/mvebu_*_defconfig
-F: arch/arm/mach-mvebu/
-F: arch/arm64/boot/dts/marvell/armada*
-F: drivers/cpufreq/mvebu-cpufreq.c
-F: drivers/irqchip/irq-armada-370-xp.c
-F: drivers/irqchip/irq-mvebu-*
-F: drivers/pinctrl/mvebu/
-F: drivers/rtc/rtc-armada38x.c
-
ARM/Marvell Berlin SoC support
M: Jisheng Zhang <jszhang@marvell.com>
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
@@ -1539,7 +1533,6 @@ F: arch/arm/mach-berlin/
F: arch/arm/boot/dts/berlin*
F: arch/arm64/boot/dts/marvell/berlin*
-
ARM/Marvell Dove/MV78xx0/Orion SOC support
M: Jason Cooper <jason@lakedaemon.net>
M: Andrew Lunn <andrew@lunn.ch>
@@ -1555,24 +1548,23 @@ F: arch/arm/plat-orion/
F: arch/arm/boot/dts/dove*
F: arch/arm/boot/dts/orion5x*
-
-ARM/Orion SoC/Technologic Systems TS-78xx platform support
-M: Alexander Clouter <alex@digriz.org.uk>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.digriz.org.uk/ts78xx/kernel
-S: Maintained
-F: arch/arm/mach-orion5x/ts78xx-*
-
-ARM/OXNAS platform support
-M: Neil Armstrong <narmstrong@baylibre.com>
+ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support
+M: Jason Cooper <jason@lakedaemon.net>
+M: Andrew Lunn <andrew@lunn.ch>
+M: Gregory Clement <gregory.clement@free-electrons.com>
+M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L: linux-oxnas@lists.tuxfamily.org (moderated for non-subscribers)
S: Maintained
-F: arch/arm/mach-oxnas/
-F: arch/arm/boot/dts/ox8*.dtsi
-F: arch/arm/boot/dts/wd-mbwe.dts
-F: arch/arm/boot/dts/cloudengines-pogoplug-series-3.dts
-N: oxnas
+F: arch/arm/boot/dts/armada*
+F: arch/arm/boot/dts/kirkwood*
+F: arch/arm/configs/mvebu_*_defconfig
+F: arch/arm/mach-mvebu/
+F: arch/arm64/boot/dts/marvell/armada*
+F: drivers/cpufreq/mvebu-cpufreq.c
+F: drivers/irqchip/irq-armada-370-xp.c
+F: drivers/irqchip/irq-mvebu-*
+F: drivers/pinctrl/mvebu/
+F: drivers/rtc/rtc-armada38x.c
ARM/Mediatek RTC DRIVER
M: Eddie Huang <eddie.huang@mediatek.com>
@@ -1599,7 +1591,7 @@ M: Chunfeng Yun <chunfeng.yun@mediatek.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: drivers/phy/phy-mt65xx-usb3.c
+F: drivers/phy/mediatek/phy-mtk-tphy.c
ARM/MICREL KS8695 ARCHITECTURE
M: Greg Ungerer <gerg@uclinux.org>
@@ -1627,16 +1619,53 @@ F: drivers/pinctrl/nomadik/
F: drivers/i2c/busses/i2c-nomadik.c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git
+ARM/NUVOTON W90X900 ARM ARCHITECTURE
+M: Wan ZongShun <mcuos.com@gmail.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+W: http://www.mcuos.com
+S: Maintained
+F: arch/arm/mach-w90x900/
+F: drivers/input/keyboard/w90p910_keypad.c
+F: drivers/input/touchscreen/w90p910_ts.c
+F: drivers/watchdog/nuc900_wdt.c
+F: drivers/net/ethernet/nuvoton/w90p910_ether.c
+F: drivers/mtd/nand/nuc900_nand.c
+F: drivers/rtc/rtc-nuc900.c
+F: drivers/spi/spi-nuc900.c
+F: drivers/usb/host/ehci-w90x900.c
+F: drivers/video/fbdev/nuc900fb.c
+
ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
M: Nelson Castillo <arhuaco@freaks-unidos.net>
L: openmoko-kernel@lists.openmoko.org (subscribers-only)
W: http://wiki.openmoko.org/wiki/Neo_FreeRunner
S: Supported
-ARM/TOSA MACHINE SUPPORT
-M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
-M: Dirk Opfer <dirk@opfer-online.de>
+ARM/Orion SoC/Technologic Systems TS-78xx platform support
+M: Alexander Clouter <alex@digriz.org.uk>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+W: http://www.digriz.org.uk/ts78xx/kernel
+S: Maintained
+F: arch/arm/mach-orion5x/ts78xx-*
+
+ARM/OXNAS platform support
+M: Neil Armstrong <narmstrong@baylibre.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-oxnas@lists.tuxfamily.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/mach-oxnas/
+F: arch/arm/boot/dts/ox8*.dtsi
+F: arch/arm/boot/dts/wd-mbwe.dts
+F: arch/arm/boot/dts/cloudengines-pogoplug-series-3.dts
+N: oxnas
+
+ARM/PALM TREO SUPPORT
+M: Tomas Cech <sleep_walker@suse.com>
+L: linux-arm-kernel@lists.infradead.org
+W: http://hackndev.com
S: Maintained
+F: arch/arm/mach-pxa/include/mach/palmtreo.h
+F: arch/arm/mach-pxa/palmtreo.c
ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT
M: Marek Vasut <marek.vasut@gmail.com>
@@ -1654,14 +1683,6 @@ F: arch/arm/mach-pxa/palmte2.c
F: arch/arm/mach-pxa/include/mach/palmtc.h
F: arch/arm/mach-pxa/palmtc.c
-ARM/PALM TREO SUPPORT
-M: Tomas Cech <sleep_walker@suse.com>
-L: linux-arm-kernel@lists.infradead.org
-W: http://hackndev.com
-S: Maintained
-F: arch/arm/mach-pxa/include/mach/palmtreo.h
-F: arch/arm/mach-pxa/palmtreo.c
-
ARM/PALMZ72 SUPPORT
M: Sergey Lapin <slapin@ossfans.org>
L: linux-arm-kernel@lists.infradead.org
@@ -1802,17 +1823,6 @@ L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/platform/s5p-g2d/
-ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT
-M: Kyungmin Park <kyungmin.park@samsung.com>
-M: Kamil Debski <kamil@wypas.org>
-M: Jeongtae Park <jtp.park@samsung.com>
-M: Andrzej Hajda <a.hajda@samsung.com>
-L: linux-arm-kernel@lists.infradead.org
-L: linux-media@vger.kernel.org
-S: Maintained
-F: arch/arm/plat-samsung/s5p-dev-mfc.c
-F: drivers/media/platform/s5p-mfc/
-
ARM/SAMSUNG S5P SERIES HDMI CEC SUBSYSTEM SUPPORT
M: Marek Szyprowski <m.szyprowski@samsung.com>
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
@@ -1829,6 +1839,17 @@ L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/platform/s5p-jpeg/
+ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT
+M: Kyungmin Park <kyungmin.park@samsung.com>
+M: Kamil Debski <kamil@wypas.org>
+M: Jeongtae Park <jtp.park@samsung.com>
+M: Andrzej Hajda <a.hajda@samsung.com>
+L: linux-arm-kernel@lists.infradead.org
+L: linux-media@vger.kernel.org
+S: Maintained
+F: arch/arm/plat-samsung/s5p-dev-mfc.c
+F: drivers/media/platform/s5p-mfc/
+
ARM/SHMOBILE ARM ARCHITECTURE
M: Simon Horman <horms@verge.net.au>
M: Magnus Damm <magnus.damm@gmail.com>
@@ -1922,26 +1943,48 @@ M: "Mark F. Brown" <mark.brown314@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+ARM/TEXAS INSTRUMENT AEMIF/EMIF DRIVERS
+M: Santosh Shilimkar <ssantosh@kernel.org>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: drivers/memory/*emif*
+
+ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
+M: Santosh Shilimkar <ssantosh@kernel.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/mach-keystone/
+F: arch/arm/boot/dts/keystone-*
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
+
+ARM/TEXAS INSTRUMENT KEYSTONE CLOCK FRAMEWORK
+M: Santosh Shilimkar <ssantosh@kernel.org>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: drivers/clk/keystone/
+
+ARM/TEXAS INSTRUMENT KEYSTONE ClOCKSOURCE
+M: Santosh Shilimkar <ssantosh@kernel.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: drivers/clocksource/timer-keystone.c
+
+ARM/TEXAS INSTRUMENT KEYSTONE RESET DRIVER
+M: Santosh Shilimkar <ssantosh@kernel.org>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: drivers/power/reset/keystone-reset.c
+
ARM/THECUS N2100 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-ARM/NUVOTON W90X900 ARM ARCHITECTURE
-M: Wan ZongShun <mcuos.com@gmail.com>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.mcuos.com
+ARM/TOSA MACHINE SUPPORT
+M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+M: Dirk Opfer <dirk@opfer-online.de>
S: Maintained
-F: arch/arm/mach-w90x900/
-F: drivers/input/keyboard/w90p910_keypad.c
-F: drivers/input/touchscreen/w90p910_ts.c
-F: drivers/watchdog/nuc900_wdt.c
-F: drivers/net/ethernet/nuvoton/w90p910_ether.c
-F: drivers/mtd/nand/nuc900_nand.c
-F: drivers/rtc/rtc-nuc900.c
-F: drivers/spi/spi-nuc900.c
-F: drivers/usb/host/ehci-w90x900.c
-F: drivers/video/fbdev/nuc900fb.c
ARM/U300 MACHINE SUPPORT
M: Linus Walleij <linus.walleij@linaro.org>
@@ -1971,6 +2014,7 @@ F: arch/arm64/boot/dts/socionext/
F: drivers/bus/uniphier-system-bus.c
F: drivers/clk/uniphier/
F: drivers/i2c/busses/i2c-uniphier*
+F: drivers/irqchip/irq-uniphier-aidet.c
F: drivers/pinctrl/uniphier/
F: drivers/reset/reset-uniphier.c
F: drivers/tty/serial/8250/8250_uniphier.c
@@ -2086,16 +2130,6 @@ F: drivers/i2c/busses/i2c-cadence.c
F: drivers/mmc/host/sdhci-of-arasan.c
F: drivers/edac/synopsys_edac.c
-ARM SMMU DRIVERS
-M: Will Deacon <will.deacon@arm.com>
-R: Robin Murphy <robin.murphy@arm.com>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: drivers/iommu/arm-smmu.c
-F: drivers/iommu/arm-smmu-v3.c
-F: drivers/iommu/io-pgtable-arm.c
-F: drivers/iommu/io-pgtable-arm-v7s.c
-
ARM64 PORT (AARCH64 ARCHITECTURE)
M: Catalin Marinas <catalin.marinas@arm.com>
M: Will Deacon <will.deacon@arm.com>
@@ -2106,6 +2140,12 @@ F: arch/arm64/
F: Documentation/arm64/
AS3645A LED FLASH CONTROLLER DRIVER
+M: Sakari Ailus <sakari.ailus@iki.fi>
+L: linux-leds@vger.kernel.org
+S: Maintained
+F: drivers/leds/leds-as3645a.c
+
+AS3645A LED FLASH CONTROLLER DRIVER
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
@@ -2207,21 +2247,10 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
S: Supported
F: drivers/net/wireless/ath/ath6kl/
-WILOCITY WIL6210 WIRELESS DRIVER
-M: Maya Erez <qca_merez@qca.qualcomm.com>
-L: linux-wireless@vger.kernel.org
-L: wil6210@qca.qualcomm.com
-S: Supported
-W: http://wireless.kernel.org/en/users/Drivers/wil6210
-F: drivers/net/wireless/ath/wil6210/
-F: include/uapi/linux/wil6210_uapi.h
-
-CARL9170 LINUX COMMUNITY WIRELESS DRIVER
-M: Christian Lamparter <chunkeey@googlemail.com>
-L: linux-wireless@vger.kernel.org
-W: http://wireless.kernel.org/en/users/Drivers/carl9170
+ATI_REMOTE2 DRIVER
+M: Ville Syrjala <syrjala@sci.fi>
S: Maintained
-F: drivers/net/wireless/ath/carl9170/
+F: drivers/input/misc/ati_remote2.c
ATK0110 HWMON DRIVER
M: Luca Tettamanti <kronos.it@gmail.com>
@@ -2229,11 +2258,6 @@ L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/asus_atk0110.c
-ATI_REMOTE2 DRIVER
-M: Ville Syrjala <syrjala@sci.fi>
-S: Maintained
-F: drivers/input/misc/ati_remote2.c
-
ATLX ETHERNET DRIVERS
M: Jay Cliburn <jcliburn@gmail.com>
M: Chris Snook <chris.snook@gmail.com>
@@ -2263,25 +2287,12 @@ M: Nicolas Ferre <nicolas.ferre@microchip.com>
S: Supported
F: drivers/power/reset/at91-sama5d2_shdwc.c
-ATMEL SAMA5D2 ADC DRIVER
-M: Ludovic Desroches <ludovic.desroches@microchip.com>
-L: linux-iio@vger.kernel.org
-S: Supported
-F: drivers/iio/adc/at91-sama5d2_adc.c
-
ATMEL Audio ALSA driver
M: Nicolas Ferre <nicolas.ferre@microchip.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/atmel
-ATMEL XDMA DRIVER
-M: Ludovic Desroches <ludovic.desroches@microchip.com>
-L: linux-arm-kernel@lists.infradead.org
-L: dmaengine@vger.kernel.org
-S: Supported
-F: drivers/dma/at_xdmac.c
-
ATMEL I2C DRIVER
M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-i2c@vger.kernel.org
@@ -2307,6 +2318,14 @@ M: Nicolas Ferre <nicolas.ferre@microchip.com>
S: Supported
F: drivers/net/ethernet/cadence/
+ATMEL MAXTOUCH DRIVER
+M: Nick Dyer <nick@shmanahar.org>
+T: git git://github.com/ndyer/linux.git
+S: Maintained
+F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt
+F: drivers/input/touchscreen/atmel_mxt_ts.c
+F: include/linux/platform_data/atmel_mxt_ts.h
+
ATMEL NAND DRIVER
M: Wenyou Yang <wenyou.yang@atmel.com>
M: Josh Wu <rainyfeeling@outlook.com>
@@ -2314,6 +2333,12 @@ L: linux-mtd@lists.infradead.org
S: Supported
F: drivers/mtd/nand/atmel/*
+ATMEL SAMA5D2 ADC DRIVER
+M: Ludovic Desroches <ludovic.desroches@microchip.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+F: drivers/iio/adc/at91-sama5d2_adc.c
+
ATMEL SDMMC DRIVER
M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-mmc@vger.kernel.org
@@ -2353,13 +2378,12 @@ W: http://atmelwlandriver.sourceforge.net/
S: Maintained
F: drivers/net/wireless/atmel/atmel*
-ATMEL MAXTOUCH DRIVER
-M: Nick Dyer <nick@shmanahar.org>
-T: git git://github.com/ndyer/linux.git
-S: Maintained
-F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt
-F: drivers/input/touchscreen/atmel_mxt_ts.c
-F: include/linux/platform_data/atmel_mxt_ts.h
+ATMEL XDMA DRIVER
+M: Ludovic Desroches <ludovic.desroches@microchip.com>
+L: linux-arm-kernel@lists.infradead.org
+L: dmaengine@vger.kernel.org
+S: Supported
+F: drivers/dma/at_xdmac.c
ATOMIC INFRASTRUCTURE
M: Will Deacon <will.deacon@arm.com>
@@ -2413,13 +2437,6 @@ F: include/uapi/linux/ax25.h
F: include/net/ax25.h
F: net/ax25/
-AXENTIA ASOC DRIVERS
-M: Peter Rosin <peda@axentia.se>
-L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-S: Maintained
-F: Documentation/devicetree/bindings/sound/axentia,*
-F: sound/soc/atmel/tse850-pcm5142.c
-
AXENTIA ARM DEVICES
M: Peter Rosin <peda@axentia.se>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2428,6 +2445,13 @@ F: Documentation/devicetree/bindings/arm/axentia.txt
F: arch/arm/boot/dts/at91-linea.dtsi
F: arch/arm/boot/dts/at91-tse850-3.dts
+AXENTIA ASOC DRIVERS
+M: Peter Rosin <peda@axentia.se>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/sound/axentia,*
+F: sound/soc/atmel/tse850-pcm5142.c
+
AZ6007 DVB DRIVER
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org>
@@ -2481,7 +2505,7 @@ Q: https://patchwork.open-mesh.org/project/batman/list/
S: Maintained
F: Documentation/ABI/testing/sysfs-class-net-batman-adv
F: Documentation/ABI/testing/sysfs-class-net-mesh
-F: Documentation/networking/batman-adv.txt
+F: Documentation/networking/batman-adv.rst
F: include/uapi/linux/batman_adv.h
F: net/batman-adv/
@@ -2507,13 +2531,11 @@ W: https://linuxtv.org
S: Supported
F: drivers/media/platform/sti/bdisp
-DELTA ST MEDIA DRIVER
-M: Hugues Fruchet <hugues.fruchet@st.com>
-L: linux-media@vger.kernel.org
-T: git git://linuxtv.org/media_tree.git
-W: https://linuxtv.org
-S: Supported
-F: drivers/media/platform/sti/delta
+BECKHOFF CX5020 ETHERCAT MASTER DRIVER
+M: Dariusz Marcinkiewicz <reksio@newterm.pl>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/ec_bhf.c
BEFS FILE SYSTEM
M: Luis de Bethencourt <luisbg@kernel.org>
@@ -2523,11 +2545,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/luisbg/linux-befs.git
F: Documentation/filesystems/befs.txt
F: fs/befs/
-BECKHOFF CX5020 ETHERCAT MASTER DRIVER
-M: Dariusz Marcinkiewicz <reksio@newterm.pl>
-L: netdev@vger.kernel.org
+BFQ I/O SCHEDULER
+M: Paolo Valente <paolo.valente@linaro.org>
+M: Jens Axboe <axboe@kernel.dk>
+L: linux-block@vger.kernel.org
S: Maintained
-F: drivers/net/ethernet/ec_bhf.c
+F: block/bfq-*
+F: Documentation/block/bfq-iosched.txt
BFS FILE SYSTEM
M: "Tigran A. Aivazian" <aivazian.tigran@gmail.com>
@@ -2550,6 +2574,22 @@ W: http://blackfin.uclinux.org
S: Supported
F: drivers/net/ethernet/adi/
+BLACKFIN I2C TWI DRIVER
+M: Sonic Zhang <sonic.zhang@analog.com>
+L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
+W: http://blackfin.uclinux.org/
+S: Supported
+F: drivers/i2c/busses/i2c-bfin-twi.c
+
+BLACKFIN MEDIA DRIVER
+M: Scott Jiang <scott.jiang.linux@gmail.com>
+L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
+W: http://blackfin.uclinux.org/
+S: Supported
+F: drivers/media/platform/blackfin/
+F: drivers/media/i2c/adv7183*
+F: drivers/media/i2c/vs6624*
+
BLACKFIN RTC DRIVER
L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
W: http://blackfin.uclinux.org
@@ -2576,22 +2616,6 @@ W: http://blackfin.uclinux.org
S: Supported
F: drivers/watchdog/bfin_wdt.c
-BLACKFIN I2C TWI DRIVER
-M: Sonic Zhang <sonic.zhang@analog.com>
-L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
-W: http://blackfin.uclinux.org/
-S: Supported
-F: drivers/i2c/busses/i2c-bfin-twi.c
-
-BLACKFIN MEDIA DRIVER
-M: Scott Jiang <scott.jiang.linux@gmail.com>
-L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
-W: http://blackfin.uclinux.org/
-S: Supported
-F: drivers/media/platform/blackfin/
-F: drivers/media/i2c/adv7183*
-F: drivers/media/i2c/vs6624*
-
BLINKM RGB LED DRIVER
M: Jan-Simon Moeller <jansimon.moeller@gmx.de>
S: Maintained
@@ -2606,14 +2630,6 @@ F: block/
F: kernel/trace/blktrace.c
F: lib/sbitmap.c
-BFQ I/O SCHEDULER
-M: Paolo Valente <paolo.valente@linaro.org>
-M: Jens Axboe <axboe@kernel.dk>
-L: linux-block@vger.kernel.org
-S: Maintained
-F: block/bfq-*
-F: Documentation/block/bfq-iosched.txt
-
BLOCK2MTD DRIVER
M: Joern Engel <joern@lazybastard.org>
L: linux-mtd@lists.infradead.org
@@ -2643,21 +2659,6 @@ S: Maintained
F: net/bluetooth/
F: include/net/bluetooth/
-DMA MAPPING HELPERS
-M: Christoph Hellwig <hch@lst.de>
-M: Marek Szyprowski <m.szyprowski@samsung.com>
-R: Robin Murphy <robin.murphy@arm.com>
-L: linux-kernel@vger.kernel.org
-T: git git://git.infradead.org/users/hch/dma-mapping.git
-W: http://git.infradead.org/users/hch/dma-mapping.git
-S: Supported
-F: lib/dma-debug.c
-F: lib/dma-noop.c
-F: lib/dma-virt.c
-F: drivers/base/dma-mapping.c
-F: drivers/base/dma-coherent.c
-F: include/linux/dma-mapping.h
-
BONDING DRIVER
M: Jay Vosburgh <j.vosburgh@gmail.com>
M: Veaceslav Falico <vfalico@gmail.com>
@@ -2705,35 +2706,6 @@ S: Supported
F: drivers/net/dsa/b53/*
F: include/linux/platform_data/b53.h
-BROADCOM GENET ETHERNET DRIVER
-M: Florian Fainelli <f.fainelli@gmail.com>
-L: netdev@vger.kernel.org
-S: Supported
-F: drivers/net/ethernet/broadcom/genet/
-
-BROADCOM BNX2 GIGABIT ETHERNET DRIVER
-M: Rasesh Mody <rasesh.mody@cavium.com>
-M: Harish Patil <harish.patil@cavium.com>
-M: Dept-GELinuxNICDev@cavium.com
-L: netdev@vger.kernel.org
-S: Supported
-F: drivers/net/ethernet/broadcom/bnx2.*
-F: drivers/net/ethernet/broadcom/bnx2_*
-
-BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M: Yuval Mintz <Yuval.Mintz@cavium.com>
-M: Ariel Elior <ariel.elior@cavium.com>
-M: everest-linux-l2@cavium.com
-L: netdev@vger.kernel.org
-S: Supported
-F: drivers/net/ethernet/broadcom/bnx2x/
-
-BROADCOM BNXT_EN 50 GIGABIT ETHERNET DRIVER
-M: Michael Chan <michael.chan@broadcom.com>
-L: netdev@vger.kernel.org
-S: Supported
-F: drivers/net/ethernet/broadcom/bnxt/
-
BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com>
M: Ray Jui <rjui@broadcom.com>
@@ -2812,6 +2784,13 @@ F: arch/arm/boot/dts/bcm7*.dts*
F: drivers/bus/brcmstb_gisb.c
N: brcmstb
+BROADCOM BMIPS CPUFREQ DRIVER
+M: Markus Mayer <mmayer@broadcom.com>
+M: bcm-kernel-feedback-list@broadcom.com
+L: linux-pm@vger.kernel.org
+S: Maintained
+F: drivers/cpufreq/bmips-cpufreq.c
+
BROADCOM BMIPS MIPS ARCHITECTURE
M: Kevin Cernekee <cernekee@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
@@ -2828,20 +2807,40 @@ F: drivers/irqchip/irq-brcmstb*
F: include/linux/bcm963xx_nvram.h
F: include/linux/bcm963xx_tag.h
-BROADCOM BMIPS CPUFREQ DRIVER
-M: Markus Mayer <mmayer@broadcom.com>
-M: bcm-kernel-feedback-list@broadcom.com
-L: linux-pm@vger.kernel.org
-S: Maintained
-F: drivers/cpufreq/bmips-cpufreq.c
+BROADCOM BNX2 GIGABIT ETHERNET DRIVER
+M: Rasesh Mody <rasesh.mody@cavium.com>
+M: Harish Patil <harish.patil@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/broadcom/bnx2.*
+F: drivers/net/ethernet/broadcom/bnx2_*
-BROADCOM TG3 GIGABIT ETHERNET DRIVER
-M: Siva Reddy Kallam <siva.kallam@broadcom.com>
-M: Prashant Sreedharan <prashant@broadcom.com>
-M: Michael Chan <mchan@broadcom.com>
+BROADCOM BNX2FC 10 GIGABIT FCOE DRIVER
+M: QLogic-Storage-Upstream@qlogic.com
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/bnx2fc/
+
+BROADCOM BNX2I 1/10 GIGABIT iSCSI DRIVER
+M: QLogic-Storage-Upstream@qlogic.com
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/bnx2i/
+
+BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
+M: Yuval Mintz <Yuval.Mintz@cavium.com>
+M: Ariel Elior <ariel.elior@cavium.com>
+M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/ethernet/broadcom/tg3.*
+F: drivers/net/ethernet/broadcom/bnx2x/
+
+BROADCOM BNXT_EN 50 GIGABIT ETHERNET DRIVER
+M: Michael Chan <michael.chan@broadcom.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/broadcom/bnxt/
BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
M: Arend van Spriel <arend.vanspriel@broadcom.com>
@@ -2855,17 +2854,18 @@ L: brcm80211-dev-list@cypress.com
S: Supported
F: drivers/net/wireless/broadcom/brcm80211/
-BROADCOM BNX2FC 10 GIGABIT FCOE DRIVER
-M: QLogic-Storage-Upstream@qlogic.com
-L: linux-scsi@vger.kernel.org
+BROADCOM BRCMSTB GPIO DRIVER
+M: Gregory Fong <gregory.0xf0@gmail.com>
+L: bcm-kernel-feedback-list@broadcom.com
S: Supported
-F: drivers/scsi/bnx2fc/
+F: drivers/gpio/gpio-brcmstb.c
+F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
-BROADCOM BNX2I 1/10 GIGABIT iSCSI DRIVER
-M: QLogic-Storage-Upstream@qlogic.com
-L: linux-scsi@vger.kernel.org
+BROADCOM GENET ETHERNET DRIVER
+M: Florian Fainelli <f.fainelli@gmail.com>
+L: netdev@vger.kernel.org
S: Supported
-F: drivers/scsi/bnx2i/
+F: drivers/net/ethernet/broadcom/genet/
BROADCOM IPROC ARM ARCHITECTURE
M: Ray Jui <rjui@broadcom.com>
@@ -2892,13 +2892,6 @@ F: arch/arm64/boot/dts/broadcom/ns2*
F: drivers/clk/bcm/clk-ns*
F: drivers/pinctrl/bcm/pinctrl-ns*
-BROADCOM BRCMSTB GPIO DRIVER
-M: Gregory Fong <gregory.0xf0@gmail.com>
-L: bcm-kernel-feedback-list@broadcom.com
-S: Supported
-F: drivers/gpio/gpio-brcmstb.c
-F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
-
BROADCOM KONA GPIO DRIVER
M: Ray Jui <rjui@broadcom.com>
L: bcm-kernel-feedback-list@broadcom.com
@@ -2906,19 +2899,29 @@ S: Supported
F: drivers/gpio/gpio-bcm-kona.c
F: Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
+BROADCOM NETXTREME-E ROCE DRIVER
+M: Selvin Xavier <selvin.xavier@broadcom.com>
+M: Devesh Sharma <devesh.sharma@broadcom.com>
+M: Somnath Kotur <somnath.kotur@broadcom.com>
+M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
+L: linux-rdma@vger.kernel.org
+W: http://www.broadcom.com
+S: Supported
+F: drivers/infiniband/hw/bnxt_re/
+F: include/uapi/rdma/bnxt_re-abi.h
+
BROADCOM NVRAM DRIVER
M: Rafał Miłecki <zajec5@gmail.com>
L: linux-mips@linux-mips.org
S: Maintained
F: drivers/firmware/broadcom/*
-BROADCOM STB NAND FLASH DRIVER
-M: Brian Norris <computersforpeace@gmail.com>
-M: Kamal Dasu <kdasu.kdev@gmail.com>
-L: linux-mtd@lists.infradead.org
-L: bcm-kernel-feedback-list@broadcom.com
+BROADCOM SPECIFIC AMBA DRIVER (BCMA)
+M: Rafał Miłecki <zajec5@gmail.com>
+L: linux-wireless@vger.kernel.org
S: Maintained
-F: drivers/mtd/nand/brcmnand/
+F: drivers/bcma/
+F: include/linux/bcma/
BROADCOM STB AVS CPUFREQ DRIVER
M: Markus Mayer <mmayer@broadcom.com>
@@ -2928,12 +2931,13 @@ S: Maintained
F: Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
F: drivers/cpufreq/brcmstb*
-BROADCOM SPECIFIC AMBA DRIVER (BCMA)
-M: Rafał Miłecki <zajec5@gmail.com>
-L: linux-wireless@vger.kernel.org
+BROADCOM STB NAND FLASH DRIVER
+M: Brian Norris <computersforpeace@gmail.com>
+M: Kamal Dasu <kdasu.kdev@gmail.com>
+L: linux-mtd@lists.infradead.org
+L: bcm-kernel-feedback-list@broadcom.com
S: Maintained
-F: drivers/bcma/
-F: include/linux/bcma/
+F: drivers/mtd/nand/brcmnand/
BROADCOM SYSTEMPORT ETHERNET DRIVER
M: Florian Fainelli <f.fainelli@gmail.com>
@@ -2941,16 +2945,13 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bcmsysport.*
-BROADCOM NETXTREME-E ROCE DRIVER
-M: Selvin Xavier <selvin.xavier@broadcom.com>
-M: Devesh Sharma <devesh.sharma@broadcom.com>
-M: Somnath Kotur <somnath.kotur@broadcom.com>
-M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
-L: linux-rdma@vger.kernel.org
-W: http://www.broadcom.com
+BROADCOM TG3 GIGABIT ETHERNET DRIVER
+M: Siva Reddy Kallam <siva.kallam@broadcom.com>
+M: Prashant Sreedharan <prashant@broadcom.com>
+M: Michael Chan <mchan@broadcom.com>
+L: netdev@vger.kernel.org
S: Supported
-F: drivers/infiniband/hw/bnxt_re/
-F: include/uapi/rdma/bnxt_re-abi.h
+F: drivers/net/ethernet/broadcom/tg3.*
BROCADE BFA FC SCSI DRIVER
M: Anil Gurumurthy <anil.gurumurthy@qlogic.com>
@@ -3013,6 +3014,15 @@ S: Odd fixes
F: Documentation/media/v4l-drivers/bttv*
F: drivers/media/pci/bt8xx/bttv*
+BUS FREQUENCY DRIVER FOR SAMSUNG EXYNOS
+M: Chanwoo Choi <cw00.choi@samsung.com>
+L: linux-pm@vger.kernel.org
+L: linux-samsung-soc@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
+S: Maintained
+F: drivers/devfreq/exynos-bus.c
+F: Documentation/devicetree/bindings/devfreq/exynos-bus.txt
+
BUSLOGIC SCSI DRIVER
M: Khalid Aziz <khalid@gonehiking.org>
L: linux-scsi@vger.kernel.org
@@ -3087,6 +3097,21 @@ F: arch/x86/kernel/tce_64.c
F: arch/x86/include/asm/calgary.h
F: arch/x86/include/asm/tce.h
+CAN NETWORK DRIVERS
+M: Wolfgang Grandegger <wg@grandegger.com>
+M: Marc Kleine-Budde <mkl@pengutronix.de>
+L: linux-can@vger.kernel.org
+W: https://github.com/linux-can
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
+S: Maintained
+F: Documentation/devicetree/bindings/net/can/
+F: drivers/net/can/
+F: include/linux/can/dev.h
+F: include/linux/can/platform/
+F: include/uapi/linux/can/error.h
+F: include/uapi/linux/can/netlink.h
+
CAN NETWORK LAYER
M: Oliver Hartkopp <socketcan@hartkopp.net>
M: Marc Kleine-Budde <mkl@pengutronix.de>
@@ -3103,21 +3128,6 @@ F: include/uapi/linux/can/bcm.h
F: include/uapi/linux/can/raw.h
F: include/uapi/linux/can/gw.h
-CAN NETWORK DRIVERS
-M: Wolfgang Grandegger <wg@grandegger.com>
-M: Marc Kleine-Budde <mkl@pengutronix.de>
-L: linux-can@vger.kernel.org
-W: https://github.com/linux-can
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
-S: Maintained
-F: Documentation/devicetree/bindings/net/can/
-F: drivers/net/can/
-F: include/linux/can/dev.h
-F: include/linux/can/platform/
-F: include/uapi/linux/can/error.h
-F: include/uapi/linux/can/netlink.h
-
CAPABILITIES
M: Serge Hallyn <serge@hallyn.com>
L: linux-security-module@vger.kernel.org
@@ -3132,12 +3142,12 @@ M: Kevin Tsai <ktsai@capellamicro.com>
S: Maintained
F: drivers/iio/light/cm*
-CAVIUM THUNDERX2 ARM64 SOC
-M: Jayachandran C <jnair@caviumnetworks.com>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+CARL9170 LINUX COMMUNITY WIRELESS DRIVER
+M: Christian Lamparter <chunkeey@googlemail.com>
+L: linux-wireless@vger.kernel.org
+W: http://wireless.kernel.org/en/users/Drivers/carl9170
S: Maintained
-F: arch/arm64/boot/dts/cavium/thunder2-99xx*
-F: Documentation/devicetree/bindings/arm/cavium-thunder2.txt
+F: drivers/net/wireless/ath/carl9170/
CAVIUM I2C DRIVER
M: Jan Glauber <jglauber@cavium.com>
@@ -3147,6 +3157,16 @@ S: Supported
F: drivers/i2c/busses/i2c-octeon*
F: drivers/i2c/busses/i2c-thunderx*
+CAVIUM LIQUIDIO NETWORK DRIVER
+M: Derek Chickles <derek.chickles@caviumnetworks.com>
+M: Satanand Burla <satananda.burla@caviumnetworks.com>
+M: Felix Manlunas <felix.manlunas@caviumnetworks.com>
+M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
+L: netdev@vger.kernel.org
+W: http://www.cavium.com
+S: Supported
+F: drivers/net/ethernet/cavium/liquidio/
+
CAVIUM MMC DRIVER
M: Jan Glauber <jglauber@cavium.com>
M: David Daney <david.daney@cavium.com>
@@ -3155,16 +3175,6 @@ W: http://www.cavium.com
S: Supported
F: drivers/mmc/host/cavium*
-CAVIUM LIQUIDIO NETWORK DRIVER
-M: Derek Chickles <derek.chickles@caviumnetworks.com>
-M: Satanand Burla <satananda.burla@caviumnetworks.com>
-M: Felix Manlunas <felix.manlunas@caviumnetworks.com>
-M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
-L: netdev@vger.kernel.org
-W: http://www.cavium.com
-S: Supported
-F: drivers/net/ethernet/cavium/liquidio/
-
CAVIUM OCTEON-TX CRYPTO DRIVER
M: George Cherian <george.cherian@cavium.com>
L: linux-crypto@vger.kernel.org
@@ -3172,6 +3182,13 @@ W: http://www.cavium.com
S: Supported
F: drivers/crypto/cavium/cpt/
+CAVIUM THUNDERX2 ARM64 SOC
+M: Jayachandran C <jnair@caviumnetworks.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm64/boot/dts/cavium/thunder2-99xx*
+F: Documentation/devicetree/bindings/arm/cavium-thunder2.txt
+
CC2520 IEEE-802.15.4 RADIO DRIVER
M: Varka Bhadram <varkabhadram@gmail.com>
L: linux-wpan@vger.kernel.org
@@ -3260,12 +3277,6 @@ F: drivers/usb/host/whci/
F: drivers/usb/wusbcore/
F: include/linux/usb/wusb*
-HT16K33 LED CONTROLLER DRIVER
-M: Robin van der Gracht <robin@protonic.nl>
-S: Maintained
-F: drivers/auxdisplay/ht16k33.c
-F: Documentation/devicetree/bindings/display/ht16k33.txt
-
CFAG12864B LCD DRIVER
M: Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
W: http://miguelojeda.es/auxdisplay.htm
@@ -3337,6 +3348,34 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bleung/chrome-platform.git
F: drivers/platform/chrome/
+CIRRUS LOGIC AUDIO CODEC DRIVERS
+M: Brian Austin <brian.austin@cirrus.com>
+M: Paul Handrigan <Paul.Handrigan@cirrus.com>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: sound/soc/codecs/cs*
+
+CIRRUS LOGIC EP93XX ETHERNET DRIVER
+M: Hartley Sweeten <hsweeten@visionengravers.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/cirrus/ep93xx_eth.c
+
+CISCO FCOE HBA DRIVER
+M: Satish Kharat <satishkh@cisco.com>
+M: Sesidhar Baddela <sebaddel@cisco.com>
+M: Karan Tilak Kumar <kartilak@cisco.com>
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/fnic/
+
+CISCO SCSI HBA DRIVER
+M: Karan Tilak Kumar <kartilak@cisco.com>
+M: Sesidhar Baddela <sebaddel@cisco.com>
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/snic/
+
CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com>
M: Govindarajulu Varadarajan <_govind@gmx.com>
@@ -3350,19 +3389,6 @@ M: Dave Goodell <dgoodell@cisco.com>
S: Supported
F: drivers/infiniband/hw/usnic/
-CIRRUS LOGIC EP93XX ETHERNET DRIVER
-M: Hartley Sweeten <hsweeten@visionengravers.com>
-L: netdev@vger.kernel.org
-S: Maintained
-F: drivers/net/ethernet/cirrus/ep93xx_eth.c
-
-CIRRUS LOGIC AUDIO CODEC DRIVERS
-M: Brian Austin <brian.austin@cirrus.com>
-M: Paul Handrigan <Paul.Handrigan@cirrus.com>
-L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-S: Maintained
-F: sound/soc/codecs/cs*
-
CLEANCACHE API
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: linux-kernel@vger.kernel.org
@@ -3384,21 +3410,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
S: Supported
F: drivers/clocksource
-CISCO FCOE HBA DRIVER
-M: Satish Kharat <satishkh@cisco.com>
-M: Sesidhar Baddela <sebaddel@cisco.com>
-M: Karan Tilak Kumar <kartilak@cisco.com>
-L: linux-scsi@vger.kernel.org
-S: Supported
-F: drivers/scsi/fnic/
-
-CISCO SCSI HBA DRIVER
-M: Karan Tilak Kumar <kartilak@cisco.com>
-M: Sesidhar Baddela <sebaddel@cisco.com>
-L: linux-scsi@vger.kernel.org
-S: Supported
-F: drivers/scsi/snic/
-
CMPC ACPI DRIVER
M: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
M: Daniel Oliveira Nascimento <don@syst.com.br>
@@ -3474,17 +3485,17 @@ L: linux-pci@vger.kernel.org
S: Maintained
F: drivers/pci/hotplug/cpci_hotplug*
-COMPACTPCI HOTPLUG ZIATECH ZT5550 DRIVER
+COMPACTPCI HOTPLUG GENERIC DRIVER
M: Scott Murray <scott@spiteful.org>
L: linux-pci@vger.kernel.org
S: Maintained
-F: drivers/pci/hotplug/cpcihp_zt5550.*
+F: drivers/pci/hotplug/cpcihp_generic.c
-COMPACTPCI HOTPLUG GENERIC DRIVER
+COMPACTPCI HOTPLUG ZIATECH ZT5550 DRIVER
M: Scott Murray <scott@spiteful.org>
L: linux-pci@vger.kernel.org
S: Maintained
-F: drivers/pci/hotplug/cpcihp_generic.c
+F: drivers/pci/hotplug/cpcihp_zt5550.*
COMPAL LAPTOP SUPPORT
M: Cezary Jackiewicz <cezary.jackiewicz@gmail.com>
@@ -3587,6 +3598,18 @@ F: drivers/cpufreq/arm_big_little.h
F: drivers/cpufreq/arm_big_little.c
F: drivers/cpufreq/arm_big_little_dt.c
+CPU POWER MONITORING SUBSYSTEM
+M: Thomas Renninger <trenn@suse.com>
+L: linux-pm@vger.kernel.org
+S: Maintained
+F: tools/power/cpupower/
+
+CPUID/MSR DRIVER
+M: "H. Peter Anvin" <hpa@zytor.com>
+S: Maintained
+F: arch/x86/kernel/cpuid.c
+F: arch/x86/kernel/msr.c
+
CPUIDLE DRIVER - ARM BIG LITTLE
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
M: Daniel Lezcano <daniel.lezcano@linaro.org>
@@ -3616,18 +3639,6 @@ B: https://bugzilla.kernel.org
F: drivers/cpuidle/*
F: include/linux/cpuidle.h
-CPUID/MSR DRIVER
-M: "H. Peter Anvin" <hpa@zytor.com>
-S: Maintained
-F: arch/x86/kernel/cpuid.c
-F: arch/x86/kernel/msr.c
-
-CPU POWER MONITORING SUBSYSTEM
-M: Thomas Renninger <trenn@suse.com>
-L: linux-pm@vger.kernel.org
-S: Maintained
-F: tools/power/cpupower/
-
CRAMFS FILESYSTEM
W: http://sourceforge.net/projects/cramfs/
S: Orphan / Obsolete
@@ -3757,6 +3768,13 @@ S: Supported
F: drivers/infiniband/hw/cxgb3/
F: include/uapi/rdma/cxgb3-abi.h
+CXGB4 CRYPTO DRIVER (chcr)
+M: Harsh Jain <harsh@chelsio.com>
+L: linux-crypto@vger.kernel.org
+W: http://www.chelsio.com
+S: Supported
+F: drivers/crypto/chelsio
+
CXGB4 ETHERNET DRIVER (CXGB4)
M: Ganesh Goudar <ganeshgr@chelsio.com>
L: netdev@vger.kernel.org
@@ -3779,13 +3797,6 @@ S: Supported
F: drivers/infiniband/hw/cxgb4/
F: include/uapi/rdma/cxgb4-abi.h
-CXGB4 CRYPTO DRIVER (chcr)
-M: Harsh Jain <harsh@chelsio.com>
-L: linux-crypto@vger.kernel.org
-W: http://www.chelsio.com
-S: Supported
-F: drivers/crypto/chelsio
-
CXGB4VF ETHERNET DRIVER (CXGB4VF)
M: Casey Leedom <leedom@chelsio.com>
L: netdev@vger.kernel.org
@@ -3815,14 +3826,6 @@ F: drivers/scsi/cxlflash/
F: include/uapi/scsi/cxlflash_ioctls.h
F: Documentation/powerpc/cxlflash.txt
-STMMAC ETHERNET DRIVER
-M: Giuseppe Cavallaro <peppe.cavallaro@st.com>
-M: Alexandre Torgue <alexandre.torgue@st.com>
-L: netdev@vger.kernel.org
-W: http://www.stlinux.com
-S: Supported
-F: drivers/net/ethernet/stmicro/stmmac/
-
CYBERPRO FB DRIVER
M: Russell King <linux@armlinux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -3946,15 +3949,15 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/dell-laptop.c
-DELL LAPTOP RBTN DRIVER
+DELL LAPTOP FREEFALL DRIVER
M: Pali Rohár <pali.rohar@gmail.com>
S: Maintained
-F: drivers/platform/x86/dell-rbtn.*
+F: drivers/platform/x86/dell-smo8800.c
-DELL LAPTOP FREEFALL DRIVER
+DELL LAPTOP RBTN DRIVER
M: Pali Rohár <pali.rohar@gmail.com>
S: Maintained
-F: drivers/platform/x86/dell-smo8800.c
+F: drivers/platform/x86/dell-rbtn.*
DELL LAPTOP SMM DRIVER
M: Pali Rohár <pali.rohar@gmail.com>
@@ -3974,6 +3977,14 @@ M: Pali Rohár <pali.rohar@gmail.com>
S: Maintained
F: drivers/platform/x86/dell-wmi.c
+DELTA ST MEDIA DRIVER
+M: Hugues Fruchet <hugues.fruchet@st.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: https://linuxtv.org
+S: Supported
+F: drivers/media/platform/sti/delta
+
DENALI NAND DRIVER
M: Masahiro Yamada <yamada.masahiro@socionext.com>
L: linux-mtd@lists.infradead.org
@@ -4028,15 +4039,6 @@ F: drivers/devfreq/devfreq-event.c
F: include/linux/devfreq-event.h
F: Documentation/devicetree/bindings/devfreq/event/
-BUS FREQUENCY DRIVER FOR SAMSUNG EXYNOS
-M: Chanwoo Choi <cw00.choi@samsung.com>
-L: linux-pm@vger.kernel.org
-L: linux-samsung-soc@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
-S: Maintained
-F: drivers/devfreq/exynos-bus.c
-F: Documentation/devicetree/bindings/devfreq/exynos-bus.txt
-
DEVICE NUMBER REGISTRY
M: Torben Mathiasen <device@lanana.org>
W: http://lanana.org/docs/device-list/index.html
@@ -4186,20 +4188,6 @@ F: include/linux/*fence.h
F: Documentation/driver-api/dma-buf.rst
T: git git://anongit.freedesktop.org/drm/drm-misc
-SYNC FILE FRAMEWORK
-M: Sumit Semwal <sumit.semwal@linaro.org>
-R: Gustavo Padovan <gustavo@padovan.org>
-S: Maintained
-L: linux-media@vger.kernel.org
-L: dri-devel@lists.freedesktop.org
-F: drivers/dma-buf/sync_*
-F: drivers/dma-buf/dma-fence*
-F: drivers/dma-buf/sw_sync.c
-F: include/linux/sync_file.h
-F: include/uapi/linux/sync_file.h
-F: Documentation/sync_file.txt
-T: git git://anongit.freedesktop.org/drm/drm-misc
-
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vinod.koul@intel.com>
L: dmaengine@vger.kernel.org
@@ -4211,6 +4199,21 @@ F: Documentation/devicetree/bindings/dma/
F: Documentation/dmaengine/
T: git git://git.infradead.org/users/vkoul/slave-dma.git
+DMA MAPPING HELPERS
+M: Christoph Hellwig <hch@lst.de>
+M: Marek Szyprowski <m.szyprowski@samsung.com>
+R: Robin Murphy <robin.murphy@arm.com>
+L: linux-kernel@vger.kernel.org
+T: git git://git.infradead.org/users/hch/dma-mapping.git
+W: http://git.infradead.org/users/hch/dma-mapping.git
+S: Supported
+F: lib/dma-debug.c
+F: lib/dma-noop.c
+F: lib/dma-virt.c
+F: drivers/base/dma-mapping.c
+F: drivers/base/dma-coherent.c
+F: include/linux/dma-mapping.h
+
DME1737 HARDWARE MONITOR DRIVER
M: Juerg Haefliger <juergh@gmail.com>
L: linux-hwmon@vger.kernel.org
@@ -4241,6 +4244,13 @@ X: Documentation/spi
X: Documentation/media
T: git git://git.lwn.net/linux.git docs-next
+DONGWOON DW9714 LENS VOICE COIL DRIVER
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/dw9714.c
+
DOUBLETALK DRIVER
M: "James R. Van Zandt" <jrv@vanzandt.mv.com>
L: blinux-list@redhat.com
@@ -4292,36 +4302,13 @@ F: include/linux/debugfs.h
F: include/linux/kobj*
F: lib/kobj*
-DRM DRIVERS
-M: David Airlie <airlied@linux.ie>
-L: dri-devel@lists.freedesktop.org
-T: git git://people.freedesktop.org/~airlied/linux
-B: https://bugs.freedesktop.org/
-C: irc://chat.freenode.net/dri-devel
-S: Maintained
-F: drivers/gpu/drm/
-F: drivers/gpu/vga/
-F: Documentation/devicetree/bindings/display/
-F: Documentation/devicetree/bindings/gpu/
-F: Documentation/devicetree/bindings/video/
-F: Documentation/gpu/
-F: include/drm/
-F: include/uapi/drm/
-F: include/linux/vga*
-
-DRM DRIVERS AND MISC GPU PATCHES
-M: Daniel Vetter <daniel.vetter@intel.com>
-M: Jani Nikula <jani.nikula@linux.intel.com>
-M: Sean Paul <seanpaul@chromium.org>
-W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
+DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS)
+M: Kevin Hilman <khilman@kernel.org>
+M: Nishanth Menon <nm@ti.com>
S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
-F: Documentation/gpu/
-F: drivers/gpu/vga/
-F: drivers/gpu/drm/*
-F: include/drm/drm*
-F: include/uapi/drm/drm*
-F: include/linux/vga*
+F: drivers/power/avs/
+F: include/linux/power/smartreflex.h
+L: linux-pm@vger.kernel.org
DRM DRIVER FOR ARM PL111 CLCD
M: Eric Anholt <eric@anholt.net>
@@ -4334,14 +4321,6 @@ M: Dave Airlie <airlied@redhat.com>
S: Odd Fixes
F: drivers/gpu/drm/ast/
-DRM DRIVERS FOR BRIDGE CHIPS
-M: Archit Taneja <architt@codeaurora.org>
-M: Andrzej Hajda <a.hajda@samsung.com>
-R: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
-S: Maintained
-T: git git://anongit.freedesktop.org/drm/drm-misc
-F: drivers/gpu/drm/bridge/
-
DRM DRIVER FOR BOCHS VIRTUAL GPU
M: Gerd Hoffmann <kraxel@redhat.com>
L: virtualization@lists.linux-foundation.org
@@ -4349,6 +4328,47 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/bochs/
+DRM DRIVER FOR INTEL I810 VIDEO CARDS
+S: Orphan / Obsolete
+F: drivers/gpu/drm/i810/
+F: include/uapi/drm/i810_drm.h
+
+DRM DRIVER FOR MATROX G200/G400 GRAPHICS CARDS
+S: Orphan / Obsolete
+F: drivers/gpu/drm/mga/
+F: include/uapi/drm/mga_drm.h
+
+DRM DRIVER FOR MGA G200 SERVER GRAPHICS CHIPS
+M: Dave Airlie <airlied@redhat.com>
+S: Odd Fixes
+F: drivers/gpu/drm/mgag200/
+
+DRM DRIVER FOR MI0283QT
+M: Noralf Trønnes <noralf@tronnes.org>
+S: Maintained
+F: drivers/gpu/drm/tinydrm/mi0283qt.c
+F: Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt
+
+DRM DRIVER FOR MSM ADRENO GPU
+M: Rob Clark <robdclark@gmail.com>
+L: linux-arm-msm@vger.kernel.org
+L: dri-devel@lists.freedesktop.org
+L: freedreno@lists.freedesktop.org
+T: git git://people.freedesktop.org/~robclark/linux
+S: Maintained
+F: drivers/gpu/drm/msm/
+F: include/uapi/drm/msm_drm.h
+F: Documentation/devicetree/bindings/display/msm/
+
+DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
+M: Ben Skeggs <bskeggs@redhat.com>
+L: dri-devel@lists.freedesktop.org
+L: nouveau@lists.freedesktop.org
+T: git git://github.com/skeggsb/linux
+S: Supported
+F: drivers/gpu/drm/nouveau/
+F: include/uapi/drm/nouveau_drm.h
+
DRM DRIVER FOR QEMU'S CIRRUS DEVICE
M: Dave Airlie <airlied@redhat.com>
M: Gerd Hoffmann <kraxel@redhat.com>
@@ -4358,59 +4378,92 @@ S: Obsolete
W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
F: drivers/gpu/drm/cirrus/
-RADEON and AMDGPU DRM DRIVERS
-M: Alex Deucher <alexander.deucher@amd.com>
-M: Christian König <christian.koenig@amd.com>
-L: amd-gfx@lists.freedesktop.org
-T: git git://people.freedesktop.org/~agd5f/linux
-S: Supported
-F: drivers/gpu/drm/radeon/
-F: include/uapi/drm/radeon_drm.h
-F: drivers/gpu/drm/amd/
-F: include/uapi/drm/amdgpu_drm.h
+DRM DRIVER FOR QXL VIRTUAL GPU
+M: Dave Airlie <airlied@redhat.com>
+M: Gerd Hoffmann <kraxel@redhat.com>
+L: virtualization@lists.linux-foundation.org
+T: git git://anongit.freedesktop.org/drm/drm-misc
+S: Maintained
+F: drivers/gpu/drm/qxl/
+F: include/uapi/drm/qxl_drm.h
-DRM PANEL DRIVERS
-M: Thierry Reding <thierry.reding@gmail.com>
-L: dri-devel@lists.freedesktop.org
-T: git git://anongit.freedesktop.org/tegra/linux.git
+DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS
+M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
-F: drivers/gpu/drm/drm_panel.c
-F: drivers/gpu/drm/panel/
-F: include/drm/drm_panel.h
-F: Documentation/devicetree/bindings/display/panel/
+F: drivers/gpu/drm/tinydrm/repaper.c
+F: Documentation/devicetree/bindings/display/repaper.txt
-INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
-M: Daniel Vetter <daniel.vetter@intel.com>
-M: Jani Nikula <jani.nikula@linux.intel.com>
-L: intel-gfx@lists.freedesktop.org
-W: https://01.org/linuxgraphics/
-B: https://01.org/linuxgraphics/documentation/how-report-bugs
-C: irc://chat.freenode.net/intel-gfx
-Q: http://patchwork.freedesktop.org/project/intel-gfx/
-T: git git://anongit.freedesktop.org/drm-intel
-S: Supported
-F: drivers/gpu/drm/i915/
-F: include/drm/i915*
-F: include/uapi/drm/i915_drm.h
-F: Documentation/gpu/i915.rst
+DRM DRIVER FOR RAGE 128 VIDEO CARDS
+S: Orphan / Obsolete
+F: drivers/gpu/drm/r128/
+F: include/uapi/drm/r128_drm.h
-INTEL GVT-g DRIVERS (Intel GPU Virtualization)
-M: Zhenyu Wang <zhenyuw@linux.intel.com>
-M: Zhi Wang <zhi.a.wang@intel.com>
-L: intel-gvt-dev@lists.freedesktop.org
-L: intel-gfx@lists.freedesktop.org
-W: https://01.org/igvt-g
-T: git https://github.com/01org/gvt-linux.git
-S: Supported
-F: drivers/gpu/drm/i915/gvt/
+DRM DRIVER FOR SAVAGE VIDEO CARDS
+S: Orphan / Obsolete
+F: drivers/gpu/drm/savage/
+F: include/uapi/drm/savage_drm.h
-DRM DRIVERS FOR ATMEL HLCDC
-M: Boris Brezillon <boris.brezillon@free-electrons.com>
+DRM DRIVER FOR SIS VIDEO CARDS
+S: Orphan / Obsolete
+F: drivers/gpu/drm/sis/
+F: include/uapi/drm/sis_drm.h
+
+DRM DRIVER FOR SITRONIX ST7586 PANELS
+M: David Lechner <david@lechnology.com>
+S: Maintained
+F: drivers/gpu/drm/tinydrm/st7586.c
+F: Documentation/devicetree/bindings/display/st7586.txt
+
+DRM DRIVER FOR TDFX VIDEO CARDS
+S: Orphan / Obsolete
+F: drivers/gpu/drm/tdfx/
+
+DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS
+M: Dave Airlie <airlied@redhat.com>
+S: Odd Fixes
+F: drivers/gpu/drm/udl/
+
+DRM DRIVER FOR VMWARE VIRTUAL GPU
+M: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
+M: Sinclair Yeh <syeh@vmware.com>
+M: Thomas Hellstrom <thellstrom@vmware.com>
L: dri-devel@lists.freedesktop.org
+T: git git://people.freedesktop.org/~syeh/repos_linux
+T: git git://people.freedesktop.org/~thomash/linux
S: Supported
-F: drivers/gpu/drm/atmel-hlcdc/
-F: Documentation/devicetree/bindings/drm/atmel/
+F: drivers/gpu/drm/vmwgfx/
+F: include/uapi/drm/vmwgfx_drm.h
+
+DRM DRIVERS
+M: David Airlie <airlied@linux.ie>
+L: dri-devel@lists.freedesktop.org
+T: git git://people.freedesktop.org/~airlied/linux
+B: https://bugs.freedesktop.org/
+C: irc://chat.freenode.net/dri-devel
+S: Maintained
+F: drivers/gpu/drm/
+F: drivers/gpu/vga/
+F: Documentation/devicetree/bindings/display/
+F: Documentation/devicetree/bindings/gpu/
+F: Documentation/devicetree/bindings/video/
+F: Documentation/gpu/
+F: include/drm/
+F: include/uapi/drm/
+F: include/linux/vga*
+
+DRM DRIVERS AND MISC GPU PATCHES
+M: Daniel Vetter <daniel.vetter@intel.com>
+M: Jani Nikula <jani.nikula@linux.intel.com>
+M: Sean Paul <seanpaul@chromium.org>
+W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
+S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
+F: Documentation/gpu/
+F: drivers/gpu/vga/
+F: drivers/gpu/drm/*
+F: include/drm/drm*
+F: include/uapi/drm/drm*
+F: include/linux/vga*
DRM DRIVERS FOR ALLWINNER A10
M: Maxime Ripard <maxime.ripard@free-electrons.com>
@@ -4432,6 +4485,22 @@ F: Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
F: Documentation/gpu/meson.rst
T: git git://anongit.freedesktop.org/drm/drm-misc
+DRM DRIVERS FOR ATMEL HLCDC
+M: Boris Brezillon <boris.brezillon@free-electrons.com>
+L: dri-devel@lists.freedesktop.org
+S: Supported
+F: drivers/gpu/drm/atmel-hlcdc/
+F: Documentation/devicetree/bindings/drm/atmel/
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
+DRM DRIVERS FOR BRIDGE CHIPS
+M: Archit Taneja <architt@codeaurora.org>
+M: Andrzej Hajda <a.hajda@samsung.com>
+R: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: drivers/gpu/drm/bridge/
+
DRM DRIVERS FOR EXYNOS
M: Inki Dae <inki.dae@samsung.com>
M: Joonyoung Shim <jy0922.shim@samsung.com>
@@ -4480,11 +4549,6 @@ S: Maintained
F: drivers/gpu/drm/hisilicon/
F: Documentation/devicetree/bindings/display/hisilicon/
-DRM DRIVER FOR INTEL I810 VIDEO CARDS
-S: Orphan / Obsolete
-F: drivers/gpu/drm/i810/
-F: include/uapi/drm/i810_drm.h
-
DRM DRIVERS FOR MEDIATEK
M: CK Hu <ck.hu@mediatek.com>
M: Philipp Zabel <p.zabel@pengutronix.de>
@@ -4493,32 +4557,6 @@ S: Supported
F: drivers/gpu/drm/mediatek/
F: Documentation/devicetree/bindings/display/mediatek/
-DRM DRIVER FOR MI0283QT
-M: Noralf Trønnes <noralf@tronnes.org>
-S: Maintained
-F: drivers/gpu/drm/tinydrm/mi0283qt.c
-F: Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt
-
-DRM DRIVER FOR MSM ADRENO GPU
-M: Rob Clark <robdclark@gmail.com>
-L: linux-arm-msm@vger.kernel.org
-L: dri-devel@lists.freedesktop.org
-L: freedreno@lists.freedesktop.org
-T: git git://people.freedesktop.org/~robclark/linux
-S: Maintained
-F: drivers/gpu/drm/msm/
-F: include/uapi/drm/msm_drm.h
-F: Documentation/devicetree/bindings/display/msm/
-
-DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
-M: Ben Skeggs <bskeggs@redhat.com>
-L: dri-devel@lists.freedesktop.org
-L: nouveau@lists.freedesktop.org
-T: git git://github.com/skeggsb/linux
-S: Supported
-F: drivers/gpu/drm/nouveau/
-F: include/uapi/drm/nouveau_drm.h
-
DRM DRIVERS FOR NVIDIA TEGRA
M: Thierry Reding <thierry.reding@gmail.com>
L: dri-devel@lists.freedesktop.org
@@ -4531,21 +4569,6 @@ F: include/linux/host1x.h
F: include/uapi/drm/tegra_drm.h
F: Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
-DRM DRIVER FOR MATROX G200/G400 GRAPHICS CARDS
-S: Orphan / Obsolete
-F: drivers/gpu/drm/mga/
-F: include/uapi/drm/mga_drm.h
-
-DRM DRIVER FOR MGA G200 SERVER GRAPHICS CHIPS
-M: Dave Airlie <airlied@redhat.com>
-S: Odd Fixes
-F: drivers/gpu/drm/mgag200/
-
-DRM DRIVER FOR RAGE 128 VIDEO CARDS
-S: Orphan / Obsolete
-F: drivers/gpu/drm/r128/
-F: include/uapi/drm/r128_drm.h
-
DRM DRIVERS FOR RENESAS
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: dri-devel@lists.freedesktop.org
@@ -4558,15 +4581,6 @@ F: include/linux/platform_data/shmob_drm.h
F: Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
F: Documentation/devicetree/bindings/display/renesas,du.txt
-DRM DRIVER FOR QXL VIRTUAL GPU
-M: Dave Airlie <airlied@redhat.com>
-M: Gerd Hoffmann <kraxel@redhat.com>
-L: virtualization@lists.linux-foundation.org
-T: git git://anongit.freedesktop.org/drm/drm-misc
-S: Maintained
-F: drivers/gpu/drm/qxl/
-F: include/uapi/drm/qxl_drm.h
-
DRM DRIVERS FOR ROCKCHIP
M: Mark Yao <mark.yao@rock-chips.com>
L: dri-devel@lists.freedesktop.org
@@ -4575,16 +4589,6 @@ F: drivers/gpu/drm/rockchip/
F: Documentation/devicetree/bindings/display/rockchip/
T: git git://anongit.freedesktop.org/drm/drm-misc
-DRM DRIVER FOR SAVAGE VIDEO CARDS
-S: Orphan / Obsolete
-F: drivers/gpu/drm/savage/
-F: include/uapi/drm/savage_drm.h
-
-DRM DRIVER FOR SIS VIDEO CARDS
-S: Orphan / Obsolete
-F: drivers/gpu/drm/sis/
-F: include/uapi/drm/sis_drm.h
-
DRM DRIVERS FOR STI
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
M: Vincent Abriou <vincent.abriou@st.com>
@@ -4605,36 +4609,20 @@ S: Maintained
F: drivers/gpu/drm/stm
F: Documentation/devicetree/bindings/display/st,stm32-ltdc.txt
-DRM DRIVER FOR TDFX VIDEO CARDS
-S: Orphan / Obsolete
-F: drivers/gpu/drm/tdfx/
-
-DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS
-M: Dave Airlie <airlied@redhat.com>
-S: Odd Fixes
-F: drivers/gpu/drm/udl/
-
-DRM DRIVERS FOR VIVANTE GPU IP
-M: Lucas Stach <l.stach@pengutronix.de>
-R: Russell King <linux+etnaviv@armlinux.org.uk>
-R: Christian Gmeiner <christian.gmeiner@gmail.com>
-L: etnaviv@lists.freedesktop.org
+DRM DRIVERS FOR TI LCDC
+M: Jyri Sarha <jsarha@ti.com>
+R: Tomi Valkeinen <tomi.valkeinen@ti.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-F: drivers/gpu/drm/etnaviv/
-F: include/uapi/drm/etnaviv_drm.h
-F: Documentation/devicetree/bindings/display/etnaviv/
+F: drivers/gpu/drm/tilcdc/
+F: Documentation/devicetree/bindings/display/tilcdc/
-DRM DRIVER FOR VMWARE VIRTUAL GPU
-M: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
-M: Sinclair Yeh <syeh@vmware.com>
-M: Thomas Hellstrom <thellstrom@vmware.com>
+DRM DRIVERS FOR TI OMAP
+M: Tomi Valkeinen <tomi.valkeinen@ti.com>
L: dri-devel@lists.freedesktop.org
-T: git git://people.freedesktop.org/~syeh/repos_linux
-T: git git://people.freedesktop.org/~thomash/linux
-S: Supported
-F: drivers/gpu/drm/vmwgfx/
-F: include/uapi/drm/vmwgfx_drm.h
+S: Maintained
+F: drivers/gpu/drm/omapdrm/
+F: Documentation/devicetree/bindings/display/ti/
DRM DRIVERS FOR VC4
M: Eric Anholt <eric@anholt.net>
@@ -4645,20 +4633,16 @@ F: include/uapi/drm/vc4_drm.h
F: Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
T: git git://anongit.freedesktop.org/drm/drm-misc
-DRM DRIVERS FOR TI OMAP
-M: Tomi Valkeinen <tomi.valkeinen@ti.com>
-L: dri-devel@lists.freedesktop.org
-S: Maintained
-F: drivers/gpu/drm/omapdrm/
-F: Documentation/devicetree/bindings/display/ti/
-
-DRM DRIVERS FOR TI LCDC
-M: Jyri Sarha <jsarha@ti.com>
-R: Tomi Valkeinen <tomi.valkeinen@ti.com>
+DRM DRIVERS FOR VIVANTE GPU IP
+M: Lucas Stach <l.stach@pengutronix.de>
+R: Russell King <linux+etnaviv@armlinux.org.uk>
+R: Christian Gmeiner <christian.gmeiner@gmail.com>
+L: etnaviv@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org
S: Maintained
-F: drivers/gpu/drm/tilcdc/
-F: Documentation/devicetree/bindings/display/tilcdc/
+F: drivers/gpu/drm/etnaviv/
+F: include/uapi/drm/etnaviv_drm.h
+F: Documentation/devicetree/bindings/display/etnaviv/
DRM DRIVERS FOR ZTE ZX
M: Shawn Guo <shawnguo@kernel.org>
@@ -4668,6 +4652,24 @@ F: drivers/gpu/drm/zte/
F: Documentation/devicetree/bindings/display/zte,vou.txt
T: git git://anongit.freedesktop.org/drm/drm-misc
+DRM PANEL DRIVERS
+M: Thierry Reding <thierry.reding@gmail.com>
+L: dri-devel@lists.freedesktop.org
+T: git git://anongit.freedesktop.org/tegra/linux.git
+S: Maintained
+F: drivers/gpu/drm/drm_panel.c
+F: drivers/gpu/drm/panel/
+F: include/drm/drm_panel.h
+F: Documentation/devicetree/bindings/display/panel/
+
+DRM TINYDRM DRIVERS
+M: Noralf Trønnes <noralf@tronnes.org>
+W: https://github.com/notro/tinydrm/wiki/Development
+T: git git://anongit.freedesktop.org/drm/drm-misc
+S: Maintained
+F: drivers/gpu/drm/tinydrm/
+F: include/drm/tinydrm/
+
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
@@ -4799,13 +4801,6 @@ S: Maintained
F: drivers/media/usb/dvb-usb-v2/dvb_usb*
F: drivers/media/usb/dvb-usb-v2/usb_urb.c
-DONGWOON DW9714 LENS VOICE COIL DRIVER
-M: Sakari Ailus <sakari.ailus@linux.intel.com>
-L: linux-media@vger.kernel.org
-T: git git://linuxtv.org/media_tree.git
-S: Maintained
-F: drivers/media/i2c/dw9714.c
-
DYNAMIC DEBUG
M: Jason Baron <jbaron@akamai.com>
S: Maintained
@@ -4861,19 +4856,6 @@ S: Supported
F: Documentation/filesystems/ecryptfs.txt
F: fs/ecryptfs/
-EDAC-CORE
-M: Borislav Petkov <bp@alien8.de>
-M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
-M: Mauro Carvalho Chehab <mchehab@kernel.org>
-L: linux-edac@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next
-S: Supported
-F: Documentation/admin-guide/ras.rst
-F: Documentation/driver-api/edac.rst
-F: drivers/edac/
-F: include/linux/edac.h
-
EDAC-AMD64
M: Borislav Petkov <bp@alien8.de>
L: linux-edac@vger.kernel.org
@@ -4895,6 +4877,19 @@ S: Supported
F: drivers/edac/octeon_edac*
F: drivers/edac/thunderx_edac*
+EDAC-CORE
+M: Borislav Petkov <bp@alien8.de>
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
+L: linux-edac@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next
+S: Supported
+F: Documentation/admin-guide/ras.rst
+F: Documentation/driver-api/edac.rst
+F: drivers/edac/
+F: include/linux/edac.h
+
EDAC-E752X
M: Mark Gross <mark.gross@intel.com>
L: linux-edac@vger.kernel.org
@@ -4919,12 +4914,6 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/ghes_edac.c
-EDAC-I82443BXGX
-M: Tim Small <tim@buttersideup.com>
-L: linux-edac@vger.kernel.org
-S: Maintained
-F: drivers/edac/i82443bxgx_edac.c
-
EDAC-I3000
L: linux-edac@vger.kernel.org
S: Orphan
@@ -4956,6 +4945,12 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/i7core_edac.c
+EDAC-I82443BXGX
+M: Tim Small <tim@buttersideup.com>
+L: linux-edac@vger.kernel.org
+S: Maintained
+F: drivers/edac/i82443bxgx_edac.c
+
EDAC-I82975X
M: Ranganathan Desikan <ravi@jetztechnologies.com>
M: "Arvind R." <arvino55@gmail.com>
@@ -4975,18 +4970,18 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/mpc85xx_edac.[ch]
-EDAC-PND2
-M: Tony Luck <tony.luck@intel.com>
-L: linux-edac@vger.kernel.org
-S: Maintained
-F: drivers/edac/pnd2_edac.[ch]
-
EDAC-PASEMI
M: Egor Martovetsky <egor@pasemi.com>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/pasemi_edac.c
+EDAC-PND2
+M: Tony Luck <tony.luck@intel.com>
+L: linux-edac@vger.kernel.org
+S: Maintained
+F: drivers/edac/pnd2_edac.[ch]
+
EDAC-R82600
M: Tim Small <tim@buttersideup.com>
L: linux-edac@vger.kernel.org
@@ -5006,13 +5001,6 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/skx_edac.c
-EDAC-XGENE
-APPLIED MICRO (APM) X-GENE SOC EDAC
-M: Loc Ho <lho@apm.com>
-S: Supported
-F: drivers/edac/xgene_edac.c
-F: Documentation/devicetree/bindings/edac/apm-xgene-edac.txt
-
EDIROL UA-101/UA-1000 DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -5020,21 +5008,12 @@ T: git git://git.alsa-project.org/alsa-kernel.git
S: Maintained
F: sound/usb/misc/ua101.c
-EXTENSIBLE FIRMWARE INTERFACE (EFI)
-M: Matt Fleming <matt@codeblueprint.co.uk>
-M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+EFI TEST DRIVER
L: linux-efi@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
+M: Ivan Hu <ivan.hu@canonical.com>
+M: Matt Fleming <matt@codeblueprint.co.uk>
S: Maintained
-F: Documentation/efi-stub.txt
-F: arch/*/kernel/efi.c
-F: arch/x86/boot/compressed/eboot.[ch]
-F: arch/*/include/asm/efi.h
-F: arch/x86/platform/efi/
-F: drivers/firmware/efi/
-F: include/linux/efi*.h
-F: arch/arm/boot/compressed/efi-header.S
-F: arch/arm64/kernel/efi-entry.S
+F: drivers/firmware/efi/test/
EFI VARIABLE FILESYSTEM
M: Matthew Garrett <matthew.garrett@nebula.com>
@@ -5051,13 +5030,6 @@ M: Peter Jones <pjones@redhat.com>
S: Maintained
F: drivers/video/fbdev/efifb.c
-EFI TEST DRIVER
-L: linux-efi@vger.kernel.org
-M: Ivan Hu <ivan.hu@canonical.com>
-M: Matt Fleming <matt@codeblueprint.co.uk>
-S: Maintained
-F: drivers/firmware/efi/test/
-
EFS FILESYSTEM
W: http://aeschi.ch.eu.org/efs/
S: Orphan
@@ -5086,6 +5058,34 @@ M: David Woodhouse <dwmw2@infradead.org>
L: linux-embedded@vger.kernel.org
S: Maintained
+Emulex 10Gbps iSCSI - OneConnect DRIVER
+M: Subbu Seetharaman <subbu.seetharaman@broadcom.com>
+M: Ketan Mukadam <ketan.mukadam@broadcom.com>
+M: Jitendra Bhivare <jitendra.bhivare@broadcom.com>
+L: linux-scsi@vger.kernel.org
+W: http://www.broadcom.com
+S: Supported
+F: drivers/scsi/be2iscsi/
+
+Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net)
+M: Sathya Perla <sathya.perla@broadcom.com>
+M: Ajit Khaparde <ajit.khaparde@broadcom.com>
+M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
+M: Somnath Kotur <somnath.kotur@broadcom.com>
+L: netdev@vger.kernel.org
+W: http://www.emulex.com
+S: Supported
+F: drivers/net/ethernet/emulex/benet/
+
+EMULEX ONECONNECT ROCE DRIVER
+M: Selvin Xavier <selvin.xavier@broadcom.com>
+M: Devesh Sharma <devesh.sharma@broadcom.com>
+L: linux-rdma@vger.kernel.org
+W: http://www.broadcom.com
+S: Odd Fixes
+F: drivers/infiniband/hw/ocrdma/
+F: include/uapi/rdma/ocrdma-abi.h
+
EMULEX/BROADCOM LPFC FC/FCOE SCSI DRIVER
M: James Smart <james.smart@broadcom.com>
M: Dick Kennedy <dick.kennedy@broadcom.com>
@@ -5138,12 +5138,21 @@ M: Andrew Lunn <andrew@lunn.ch>
M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
-F: include/linux/phy.h
-F: include/linux/phy_fixed.h
-F: drivers/net/phy/
+F: Documentation/ABI/testing/sysfs-bus-mdio
+F: Documentation/devicetree/bindings/net/mdio*
F: Documentation/networking/phy.txt
+F: drivers/net/phy/
F: drivers/of/of_mdio.c
F: drivers/of/of_net.c
+F: include/linux/*mdio*.h
+F: include/linux/of_net.h
+F: include/linux/phy.h
+F: include/linux/phy_fixed.h
+F: include/linux/platform_data/mdio-gpio.h
+F: include/linux/platform_data/mdio-bcm-unimac.h
+F: include/trace/events/mdio.h
+F: include/uapi/linux/mdio.h
+F: include/uapi/linux/mii.h
EXT2 FILE SYSTEM
M: Jan Kara <jack@suse.com>
@@ -5171,6 +5180,22 @@ L: linux-security-module@vger.kernel.org
S: Supported
F: security/integrity/evm/
+EXTENSIBLE FIRMWARE INTERFACE (EFI)
+M: Matt Fleming <matt@codeblueprint.co.uk>
+M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+L: linux-efi@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
+S: Maintained
+F: Documentation/efi-stub.txt
+F: arch/*/kernel/efi.c
+F: arch/x86/boot/compressed/eboot.[ch]
+F: arch/*/include/asm/efi.h
+F: arch/x86/platform/efi/
+F: drivers/firmware/efi/
+F: include/linux/efi*.h
+F: arch/arm/boot/compressed/efi-header.S
+F: arch/arm64/kernel/efi-entry.S
+
EXTERNAL CONNECTOR SUBSYSTEM (EXTCON)
M: MyungJoo Ham <myungjoo.ham@samsung.com>
M: Chanwoo Choi <cw00.choi@samsung.com>
@@ -5201,6 +5226,19 @@ S: Supported
F: arch/arc/plat-eznps
F: arch/arc/boot/dts/eznps.dts
+F2FS FILE SYSTEM
+M: Jaegeuk Kim <jaegeuk@kernel.org>
+M: Chao Yu <yuchao0@huawei.com>
+L: linux-f2fs-devel@lists.sourceforge.net
+W: https://f2fs.wiki.kernel.org/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git
+S: Maintained
+F: Documentation/filesystems/f2fs.txt
+F: Documentation/ABI/testing/sysfs-fs-f2fs
+F: fs/f2fs/
+F: include/linux/f2fs_fs.h
+F: include/trace/events/f2fs.h
+
F71805F HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
@@ -5208,23 +5246,6 @@ S: Maintained
F: Documentation/hwmon/f71805f
F: drivers/hwmon/f71805f.c
-FC0011 TUNER DRIVER
-M: Michael Buesch <m@bues.ch>
-L: linux-media@vger.kernel.org
-S: Maintained
-F: drivers/media/tuners/fc0011.h
-F: drivers/media/tuners/fc0011.c
-
-FC2580 MEDIA DRIVER
-M: Antti Palosaari <crope@iki.fi>
-L: linux-media@vger.kernel.org
-W: https://linuxtv.org
-W: http://palosaari.fi/linux/
-Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/anttip/media_tree.git
-S: Maintained
-F: drivers/media/tuners/fc2580*
-
FANOTIFY
M: Eric Paris <eparis@redhat.com>
S: Maintained
@@ -5249,6 +5270,23 @@ M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
S: Maintained
F: drivers/staging/fbtft/
+FC0011 TUNER DRIVER
+M: Michael Buesch <m@bues.ch>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/tuners/fc0011.h
+F: drivers/media/tuners/fc0011.c
+
+FC2580 MEDIA DRIVER
+M: Antti Palosaari <crope@iki.fi>
+L: linux-media@vger.kernel.org
+W: https://linuxtv.org
+W: http://palosaari.fi/linux/
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+T: git git://linuxtv.org/anttip/media_tree.git
+S: Maintained
+F: drivers/media/tuners/fc2580*
+
FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
M: Johannes Thumshirn <jth@kernel.org>
L: fcoe-devel@open-fcoe.org
@@ -5352,10 +5390,11 @@ K: fmc_d.*register
FPGA MANAGER FRAMEWORK
M: Alan Tull <atull@kernel.org>
-R: Moritz Fischer <moritz.fischer@ettus.com>
+R: Moritz Fischer <mdf@kernel.org>
L: linux-fpga@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
+Q: http://patchwork.kernel.org/project/linux-fpga/list/
F: Documentation/fpga/
F: Documentation/devicetree/bindings/fpga/
F: drivers/fpga/
@@ -5408,6 +5447,14 @@ L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/dma/fsldma.*
+FREESCALE eTSEC ETHERNET DRIVER (GIANFAR)
+M: Claudiu Manoil <claudiu.manoil@freescale.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/freescale/gianfar*
+X: drivers/net/ethernet/freescale/gianfar_ptp.c
+F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+
FREESCALE GPMI NAND DRIVER
M: Han Xu <han.xu@nxp.com>
L: linux-mtd@lists.infradead.org
@@ -5421,6 +5468,15 @@ L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-cpm.c
+FREESCALE IMX / MXC FEC DRIVER
+M: Fugang Duan <fugang.duan@nxp.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/freescale/fec_main.c
+F: drivers/net/ethernet/freescale/fec_ptp.c
+F: drivers/net/ethernet/freescale/fec.h
+F: Documentation/devicetree/bindings/net/fsl-fec.txt
+
FREESCALE IMX / MXC FRAMEBUFFER DRIVER
M: Sascha Hauer <kernel@pengutronix.de>
L: linux-fbdev@vger.kernel.org
@@ -5429,29 +5485,11 @@ S: Maintained
F: include/linux/platform_data/video-imxfb.h
F: drivers/video/fbdev/imxfb.c
-FREESCALE QUAD SPI DRIVER
-M: Han Xu <han.xu@nxp.com>
-L: linux-mtd@lists.infradead.org
-S: Maintained
-F: drivers/mtd/spi-nor/fsl-quadspi.c
-
-FREESCALE SOC FS_ENET DRIVER
-M: Pantelis Antoniou <pantelis.antoniou@gmail.com>
-M: Vitaly Bordug <vbordug@ru.mvista.com>
-L: linuxppc-dev@lists.ozlabs.org
-L: netdev@vger.kernel.org
-S: Maintained
-F: drivers/net/ethernet/freescale/fs_enet/
-F: include/linux/fs_enet_pd.h
-
-FREESCALE IMX / MXC FEC DRIVER
-M: Fugang Duan <fugang.duan@nxp.com>
+FREESCALE QORIQ DPAA ETHERNET DRIVER
+M: Madalin Bucur <madalin.bucur@nxp.com>
L: netdev@vger.kernel.org
S: Maintained
-F: drivers/net/ethernet/freescale/fec_main.c
-F: drivers/net/ethernet/freescale/fec_ptp.c
-F: drivers/net/ethernet/freescale/fec.h
-F: Documentation/devicetree/bindings/net/fsl-fec.txt
+F: drivers/net/ethernet/freescale/dpaa
FREESCALE QORIQ DPAA FMAN DRIVER
M: Madalin Bucur <madalin.bucur@nxp.com>
@@ -5460,20 +5498,11 @@ S: Maintained
F: drivers/net/ethernet/freescale/fman
F: Documentation/devicetree/bindings/powerpc/fsl/fman.txt
-FREESCALE QORIQ DPAA ETHERNET DRIVER
-M: Madalin Bucur <madalin.bucur@nxp.com>
-L: netdev@vger.kernel.org
-S: Maintained
-F: drivers/net/ethernet/freescale/dpaa
-
-FREESCALE SOC DRIVERS
-M: Li Yang <leoyang.li@nxp.com>
-L: linuxppc-dev@lists.ozlabs.org
-L: linux-arm-kernel@lists.infradead.org
+FREESCALE QUAD SPI DRIVER
+M: Han Xu <han.xu@nxp.com>
+L: linux-mtd@lists.infradead.org
S: Maintained
-F: Documentation/devicetree/bindings/soc/fsl/
-F: drivers/soc/fsl/
-F: include/linux/fsl/
+F: drivers/mtd/spi-nor/fsl-quadspi.c
FREESCALE QUICC ENGINE LIBRARY
M: Qiang Zhao <qiang.zhao@nxp.com>
@@ -5483,13 +5512,6 @@ F: drivers/soc/fsl/qe/
F: include/soc/fsl/*qe*.h
F: include/soc/fsl/*ucc*.h
-FREESCALE USB PERIPHERAL DRIVERS
-M: Li Yang <leoyang.li@nxp.com>
-L: linux-usb@vger.kernel.org
-L: linuxppc-dev@lists.ozlabs.org
-S: Maintained
-F: drivers/usb/gadget/udc/fsl*
-
FREESCALE QUICC ENGINE UCC ETHERNET DRIVER
M: Li Yang <leoyang.li@nxp.com>
L: netdev@vger.kernel.org
@@ -5497,14 +5519,6 @@ L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/net/ethernet/freescale/ucc_geth*
-FREESCALE eTSEC ETHERNET DRIVER (GIANFAR)
-M: Claudiu Manoil <claudiu.manoil@freescale.com>
-L: netdev@vger.kernel.org
-S: Maintained
-F: drivers/net/ethernet/freescale/gianfar*
-X: drivers/net/ethernet/freescale/gianfar_ptp.c
-F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
-
FREESCALE QUICC ENGINE UCC HDLC DRIVER
M: Zhao Qiang <qiang.zhao@nxp.com>
L: netdev@vger.kernel.org
@@ -5518,6 +5532,24 @@ L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/tty/serial/ucc_uart.c
+FREESCALE SOC DRIVERS
+M: Li Yang <leoyang.li@nxp.com>
+L: linuxppc-dev@lists.ozlabs.org
+L: linux-arm-kernel@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/soc/fsl/
+F: drivers/soc/fsl/
+F: include/linux/fsl/
+
+FREESCALE SOC FS_ENET DRIVER
+M: Pantelis Antoniou <pantelis.antoniou@gmail.com>
+M: Vitaly Bordug <vbordug@ru.mvista.com>
+L: linuxppc-dev@lists.ozlabs.org
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/freescale/fs_enet/
+F: include/linux/fs_enet_pd.h
+
FREESCALE SOC SOUND DRIVERS
M: Timur Tabi <timur@tabi.org>
M: Nicolin Chen <nicoleotsuka@gmail.com>
@@ -5530,6 +5562,13 @@ F: sound/soc/fsl/fsl*
F: sound/soc/fsl/imx*
F: sound/soc/fsl/mpc8610_hpcd.c
+FREESCALE USB PERIPHERAL DRIVERS
+M: Li Yang <leoyang.li@nxp.com>
+L: linux-usb@vger.kernel.org
+L: linuxppc-dev@lists.ozlabs.org
+S: Maintained
+F: drivers/usb/gadget/udc/fsl*
+
FREEVXFS FILESYSTEM
M: Christoph Hellwig <hch@infradead.org>
W: ftp://ftp.openlinux.org/pub/people/hch/vxfs
@@ -5570,19 +5609,6 @@ S: Supported
F: fs/crypto/
F: include/linux/fscrypt*.h
-F2FS FILE SYSTEM
-M: Jaegeuk Kim <jaegeuk@kernel.org>
-M: Chao Yu <yuchao0@huawei.com>
-L: linux-f2fs-devel@lists.sourceforge.net
-W: https://f2fs.wiki.kernel.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git
-S: Maintained
-F: Documentation/filesystems/f2fs.txt
-F: Documentation/ABI/testing/sysfs-fs-f2fs
-F: fs/f2fs/
-F: include/linux/f2fs_fs.h
-F: include/trace/events/f2fs.h
-
FUJITSU FR-V (FRV) PORT
S: Orphan
F: arch/frv/
@@ -5656,6 +5682,12 @@ S: Maintained
F: kernel/gcov/
F: Documentation/dev-tools/gcov.rst
+GDB KERNEL DEBUGGING HELPER SCRIPTS
+M: Jan Kiszka <jan.kiszka@siemens.com>
+M: Kieran Bingham <kieran@bingham.xyz>
+S: Supported
+F: scripts/gdb/
+
GDT SCSI DISK ARRAY CONTROLLER DRIVER
M: Achim Leubner <achim_leubner@adaptec.com>
L: linux-scsi@vger.kernel.org
@@ -5663,12 +5695,6 @@ W: http://www.icp-vortex.com/
S: Supported
F: drivers/scsi/gdt*
-GDB KERNEL DEBUGGING HELPER SCRIPTS
-M: Jan Kiszka <jan.kiszka@siemens.com>
-M: Kieran Bingham <kieran@bingham.xyz>
-S: Supported
-F: scripts/gdb/
-
GEMTEK FM RADIO RECEIVER DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -5735,17 +5761,17 @@ L: kvm@vger.kernel.org
S: Supported
F: drivers/uio/uio_pci_generic.c
-GET_MAINTAINER SCRIPT
-M: Joe Perches <joe@perches.com>
-S: Maintained
-F: scripts/get_maintainer.pl
-
GENWQE (IBM Generic Workqueue Card)
M: Frank Haverkamp <haver@linux.vnet.ibm.com>
M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
S: Supported
F: drivers/misc/genwqe/
+GET_MAINTAINER SCRIPT
+M: Joe Perches <joe@perches.com>
+S: Maintained
+F: scripts/get_maintainer.pl
+
GFS2 FILE SYSTEM
M: Steven Whitehouse <swhiteho@redhat.com>
M: Bob Peterson <rpeterso@redhat.com>
@@ -5778,6 +5804,21 @@ L: linux-input@vger.kernel.org
S: Maintained
F: drivers/input/touchscreen/goodix.c
+GPIO ACPI SUPPORT
+M: Mika Westerberg <mika.westerberg@linux.intel.com>
+M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+L: linux-gpio@vger.kernel.org
+L: linux-acpi@vger.kernel.org
+S: Maintained
+F: Documentation/acpi/gpio-properties.txt
+F: drivers/gpio/gpiolib-acpi.c
+
+GPIO IR Transmitter
+M: Sean Young <sean@mess.org>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/rc/gpio-ir-tx.c
+
GPIO MOCKUP DRIVER
M: Bamvor Jian Zhang <bamvor.zhangjian@linaro.org>
L: linux-gpio@vger.kernel.org
@@ -5801,15 +5842,6 @@ F: include/asm-generic/gpio.h
F: include/uapi/linux/gpio.h
F: tools/gpio/
-GPIO ACPI SUPPORT
-M: Mika Westerberg <mika.westerberg@linux.intel.com>
-M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-L: linux-gpio@vger.kernel.org
-L: linux-acpi@vger.kernel.org
-S: Maintained
-F: Documentation/acpi/gpio-properties.txt
-F: drivers/gpio/gpiolib-acpi.c
-
GRE DEMULTIPLEXER DRIVER
M: Dmitry Kozlov <xeb@mail.ru>
L: netdev@vger.kernel.org
@@ -5824,14 +5856,6 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/aeroflex/
-GREYBUS SUBSYSTEM
-M: Johan Hovold <johan@kernel.org>
-M: Alex Elder <elder@kernel.org>
-M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-S: Maintained
-F: drivers/staging/greybus/
-L: greybus-dev@lists.linaro.org (moderated for non-subscribers)
-
GREYBUS AUDIO PROTOCOLS DRIVERS
M: Vaibhav Agarwal <vaibhav.sr@gmail.com>
M: Mark Greer <mgreer@animalcreek.com>
@@ -5849,24 +5873,7 @@ F: drivers/staging/greybus/audio_manager_sysfs.c
F: drivers/staging/greybus/audio_module.c
F: drivers/staging/greybus/audio_topology.c
-GREYBUS PROTOCOLS DRIVERS
-M: Rui Miguel Silva <rmfrfs@gmail.com>
-S: Maintained
-F: drivers/staging/greybus/sdio.c
-F: drivers/staging/greybus/light.c
-F: drivers/staging/greybus/gpio.c
-F: drivers/staging/greybus/power_supply.c
-F: drivers/staging/greybus/spi.c
-F: drivers/staging/greybus/spilib.c
-
-GREYBUS PROTOCOLS DRIVERS
-M: Bryan O'Donoghue <pure.logic@nexus-software.ie>
-S: Maintained
-F: drivers/staging/greybus/loopback.c
-F: drivers/staging/greybus/timesync.c
-F: drivers/staging/greybus/timesync_platform.c
-
-GREYBUS PROTOCOLS DRIVERS
+GREYBUS FW/HID/SPI PROTOCOLS DRIVERS
M: Viresh Kumar <vireshk@kernel.org>
S: Maintained
F: drivers/staging/greybus/authentication.c
@@ -5883,11 +5890,12 @@ F: drivers/staging/greybus/spi.c
F: drivers/staging/greybus/spilib.c
F: drivers/staging/greybus/spilib.h
-GREYBUS PROTOCOLS DRIVERS
-M: David Lin <dtwlin@gmail.com>
+GREYBUS LOOPBACK/TIME PROTOCOLS DRIVERS
+M: Bryan O'Donoghue <pure.logic@nexus-software.ie>
S: Maintained
-F: drivers/staging/greybus/uart.c
-F: drivers/staging/greybus/log.c
+F: drivers/staging/greybus/loopback.c
+F: drivers/staging/greybus/timesync.c
+F: drivers/staging/greybus/timesync_platform.c
GREYBUS PLATFORM DRIVERS
M: Vaibhav Hiremath <hvaibhav.linux@gmail.com>
@@ -5896,6 +5904,30 @@ F: drivers/staging/greybus/arche-platform.c
F: drivers/staging/greybus/arche-apb-ctrl.c
F: drivers/staging/greybus/arche_platform.h
+GREYBUS SDIO/GPIO/SPI PROTOCOLS DRIVERS
+M: Rui Miguel Silva <rmfrfs@gmail.com>
+S: Maintained
+F: drivers/staging/greybus/sdio.c
+F: drivers/staging/greybus/light.c
+F: drivers/staging/greybus/gpio.c
+F: drivers/staging/greybus/power_supply.c
+F: drivers/staging/greybus/spi.c
+F: drivers/staging/greybus/spilib.c
+
+GREYBUS SUBSYSTEM
+M: Johan Hovold <johan@kernel.org>
+M: Alex Elder <elder@kernel.org>
+M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+S: Maintained
+F: drivers/staging/greybus/
+L: greybus-dev@lists.linaro.org (moderated for non-subscribers)
+
+GREYBUS UART PROTOCOLS DRIVERS
+M: David Lin <dtwlin@gmail.com>
+S: Maintained
+F: drivers/staging/greybus/uart.c
+F: drivers/staging/greybus/log.c
+
GS1662 VIDEO SERIALIZER
M: Charles-Antoine Couret <charles-antoine.couret@nexvision.fr>
L: linux-media@vger.kernel.org
@@ -5966,13 +5998,6 @@ L: linux-efi@vger.kernel.org
S: Maintained
F: block/partitions/efi.*
-STK1160 USB VIDEO CAPTURE DRIVER
-M: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
-L: linux-media@vger.kernel.org
-T: git git://linuxtv.org/media_tree.git
-S: Maintained
-F: drivers/media/usb/stk1160/
-
H8/300 ARCHITECTURE
M: Yoshinori Sato <ysato@users.sourceforge.jp>
L: uclinux-h8-devel@lists.sourceforge.jp (moderated for non-subscribers)
@@ -5984,33 +6009,6 @@ F: drivers/clocksource/h8300_*.c
F: drivers/clk/h8300/
F: drivers/irqchip/irq-renesas-h8*.c
-HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
-M: Frank Seidel <frank@f-seidel.de>
-L: platform-driver-x86@vger.kernel.org
-W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
-S: Maintained
-F: drivers/platform/x86/hdaps.c
-
-HDPVR USB VIDEO ENCODER DRIVER
-M: Hans Verkuil <hverkuil@xs4all.nl>
-L: linux-media@vger.kernel.org
-T: git git://linuxtv.org/media_tree.git
-W: https://linuxtv.org
-S: Odd Fixes
-F: drivers/media/usb/hdpvr/
-
-HWPOISON MEMORY FAILURE HANDLING
-M: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
-L: linux-mm@kvack.org
-S: Maintained
-F: mm/memory-failure.c
-F: mm/hwpoison-inject.c
-
-HYPERVISOR VIRTUAL CONSOLE DRIVER
-L: linuxppc-dev@lists.ozlabs.org
-S: Odd Fixes
-F: drivers/tty/hvc/
-
HACKRF MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -6021,6 +6019,13 @@ T: git git://linuxtv.org/anttip/media_tree.git
S: Maintained
F: drivers/media/usb/hackrf/
+HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
+M: Frank Seidel <frank@f-seidel.de>
+L: platform-driver-x86@vger.kernel.org
+W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
+S: Maintained
+F: drivers/platform/x86/hdaps.c
+
HARDWARE MONITORING
M: Jean Delvare <jdelvare@suse.com>
M: Guenter Roeck <linux@roeck-us.net>
@@ -6059,6 +6064,14 @@ L: linux-parisc@vger.kernel.org
S: Maintained
F: sound/parisc/harmony.*
+HDPVR USB VIDEO ENCODER DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: https://linuxtv.org
+S: Odd Fixes
+F: drivers/media/usb/hdpvr/
+
HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER
M: Jimmy Vance <jimmy.vance@hpe.com>
S: Supported
@@ -6085,13 +6098,6 @@ F: drivers/block/cciss*
F: include/linux/cciss_ioctl.h
F: include/uapi/linux/cciss_ioctl.h
-OPA-VNIC DRIVER
-M: Dennis Dalessandro <dennis.dalessandro@intel.com>
-M: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
-L: linux-rdma@vger.kernel.org
-S: Supported
-F: drivers/infiniband/ulp/opa_vnic
-
HFI1 DRIVER
M: Mike Marciniszyn <mike.marciniszyn@intel.com>
M: Dennis Dalessandro <dennis.dalessandro@intel.com>
@@ -6197,6 +6203,14 @@ S: Maintained
F: drivers/net/ethernet/hisilicon/
F: Documentation/devicetree/bindings/net/hisilicon*.txt
+HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
+M: Yisen Zhuang <yisen.zhuang@huawei.com>
+M: Salil Mehta <salil.mehta@huawei.com>
+L: netdev@vger.kernel.org
+W: http://www.hisilicon.com
+S: Maintained
+F: drivers/net/ethernet/hisilicon/hns3/
+
HISILICON ROCE DRIVER
M: Lijun Ou <oulijun@huawei.com>
M: Wei Hu(Xavier) <xavier.huwei@huawei.com>
@@ -6269,12 +6283,25 @@ L: netdev@vger.kernel.org
S: Maintained
F: net/hsr/
+HT16K33 LED CONTROLLER DRIVER
+M: Robin van der Gracht <robin@protonic.nl>
+S: Maintained
+F: drivers/auxdisplay/ht16k33.c
+F: Documentation/devicetree/bindings/display/ht16k33.txt
+
HTCPEN TOUCHSCREEN DRIVER
M: Pau Oliva Fora <pof@eslack.org>
L: linux-input@vger.kernel.org
S: Maintained
F: drivers/input/touchscreen/htcpen.c
+HUAWEI ETHERNET DRIVER
+M: Aviad Krawczyk <aviad.krawczyk@huawei.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: Documentation/networking/hinic.txt
+F: drivers/net/ethernet/huawei/hinic/
+
HUGETLB FILESYSTEM
M: Nadia Yvette Chambers <nyc@holomorphy.com>
S: Maintained
@@ -6288,13 +6315,22 @@ W: https://linuxtv.org
S: Supported
F: drivers/media/platform/sti/hva
+HWPOISON MEMORY FAILURE HANDLING
+M: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+L: linux-mm@kvack.org
+S: Maintained
+F: mm/memory-failure.c
+F: mm/hwpoison-inject.c
+
Hyper-V CORE AND DRIVERS
M: "K. Y. Srinivasan" <kys@microsoft.com>
M: Haiyang Zhang <haiyangz@microsoft.com>
M: Stephen Hemminger <sthemmin@microsoft.com>
L: devel@linuxdriverproject.org
S: Maintained
+F: Documentation/networking/netvsc.txt
F: arch/x86/include/asm/mshyperv.h
+F: arch/x86/include/asm/trace/hyperv.h
F: arch/x86/include/uapi/asm/hyperv.h
F: arch/x86/kernel/cpu/mshyperv.c
F: arch/x86/hyperv
@@ -6306,10 +6342,24 @@ F: drivers/net/hyperv/
F: drivers/scsi/storvsc_drv.c
F: drivers/uio/uio_hv_generic.c
F: drivers/video/fbdev/hyperv_fb.c
+F: net/vmw_vsock/hyperv_transport.c
F: include/linux/hyperv.h
+F: include/uapi/linux/hyperv.h
F: tools/hv/
F: Documentation/ABI/stable/sysfs-bus-vmbus
+HYPERVISOR VIRTUAL CONSOLE DRIVER
+L: linuxppc-dev@lists.ozlabs.org
+S: Odd Fixes
+F: drivers/tty/hvc/
+
+I2C ACPI SUPPORT
+M: Mika Westerberg <mika.westerberg@linux.intel.com>
+L: linux-i2c@vger.kernel.org
+L: linux-acpi@vger.kernel.org
+S: Maintained
+F: drivers/i2c/i2c-core-acpi.c
+
I2C MUXES
M: Peter Rosin <peda@axentia.se>
L: linux-i2c@vger.kernel.org
@@ -6332,6 +6382,36 @@ F: Documentation/i2c/busses/i2c-parport-light
F: drivers/i2c/busses/i2c-parport.c
F: drivers/i2c/busses/i2c-parport-light.c
+I2C SUBSYSTEM
+M: Wolfram Sang <wsa@the-dreams.de>
+L: linux-i2c@vger.kernel.org
+W: https://i2c.wiki.kernel.org/
+Q: https://patchwork.ozlabs.org/project/linux-i2c/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
+S: Maintained
+F: Documentation/devicetree/bindings/i2c/
+F: Documentation/i2c/
+F: drivers/i2c/
+F: drivers/i2c/*/
+F: include/linux/i2c.h
+F: include/linux/i2c-*.h
+F: include/uapi/linux/i2c.h
+F: include/uapi/linux/i2c-*.h
+
+I2C-TAOS-EVM DRIVER
+M: Jean Delvare <jdelvare@suse.com>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: Documentation/i2c/busses/i2c-taos-evm
+F: drivers/i2c/busses/i2c-taos-evm.c
+
+I2C-TINY-USB DRIVER
+M: Till Harbaum <till@harbaum.org>
+L: linux-i2c@vger.kernel.org
+W: http://www.harbaum.org/till/i2c_tiny_usb
+S: Maintained
+F: drivers/i2c/busses/i2c-tiny-usb.c
+
I2C/SMBUS CONTROLLER DRIVERS FOR PC
M: Jean Delvare <jdelvare@suse.com>
L: linux-i2c@vger.kernel.org
@@ -6379,43 +6459,6 @@ L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/i2c-stub.c
-I2C SUBSYSTEM
-M: Wolfram Sang <wsa@the-dreams.de>
-L: linux-i2c@vger.kernel.org
-W: https://i2c.wiki.kernel.org/
-Q: https://patchwork.ozlabs.org/project/linux-i2c/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
-S: Maintained
-F: Documentation/devicetree/bindings/i2c/
-F: Documentation/i2c/
-F: drivers/i2c/
-F: drivers/i2c/*/
-F: include/linux/i2c.h
-F: include/linux/i2c-*.h
-F: include/uapi/linux/i2c.h
-F: include/uapi/linux/i2c-*.h
-
-I2C ACPI SUPPORT
-M: Mika Westerberg <mika.westerberg@linux.intel.com>
-L: linux-i2c@vger.kernel.org
-L: linux-acpi@vger.kernel.org
-S: Maintained
-F: drivers/i2c/i2c-core-acpi.c
-
-I2C-TAOS-EVM DRIVER
-M: Jean Delvare <jdelvare@suse.com>
-L: linux-i2c@vger.kernel.org
-S: Maintained
-F: Documentation/i2c/busses/i2c-taos-evm
-F: drivers/i2c/busses/i2c-taos-evm.c
-
-I2C-TINY-USB DRIVER
-M: Till Harbaum <till@harbaum.org>
-L: linux-i2c@vger.kernel.org
-W: http://www.harbaum.org/till/i2c_tiny_usb
-S: Maintained
-F: drivers/i2c/busses/i2c-tiny-usb.c
-
i386 BOOT CODE
M: "H. Peter Anvin" <hpa@zytor.com>
S: Maintained
@@ -6434,17 +6477,15 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
S: Maintained
F: arch/ia64/
-IBM Power VMX Cryptographic instructions
-M: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
-M: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
-L: linux-crypto@vger.kernel.org
+IBM Power 842 compression accelerator
+M: Haren Myneni <haren@us.ibm.com>
S: Supported
-F: drivers/crypto/vmx/Makefile
-F: drivers/crypto/vmx/Kconfig
-F: drivers/crypto/vmx/vmx.c
-F: drivers/crypto/vmx/aes*
-F: drivers/crypto/vmx/ghash*
-F: drivers/crypto/vmx/ppc-xlate.pl
+F: drivers/crypto/nx/Makefile
+F: drivers/crypto/nx/Kconfig
+F: drivers/crypto/nx/nx-842*
+F: include/linux/sw842.h
+F: crypto/842.c
+F: lib/842/
IBM Power in-Nest Crypto Acceleration
M: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
@@ -6459,33 +6500,38 @@ F: drivers/crypto/nx/nx.*
F: drivers/crypto/nx/nx_csbcpb.h
F: drivers/crypto/nx/nx_debugfs.h
-IBM Power 842 compression accelerator
-M: Haren Myneni <haren@us.ibm.com>
-S: Supported
-F: drivers/crypto/nx/Makefile
-F: drivers/crypto/nx/Kconfig
-F: drivers/crypto/nx/nx-842*
-F: include/linux/sw842.h
-F: crypto/842.c
-F: lib/842/
-
IBM Power Linux RAID adapter
M: Brian King <brking@us.ibm.com>
S: Supported
F: drivers/scsi/ipr.*
-IBM Power Virtual Ethernet Device Driver
+IBM Power SRIOV Virtual NIC Device Driver
M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
+M: John Allen <jallen@linux.vnet.ibm.com>
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/ethernet/ibm/ibmveth.*
+F: drivers/net/ethernet/ibm/ibmvnic.*
-IBM Power SRIOV Virtual NIC Device Driver
+IBM Power Virtual Accelerator Switchboard
+M: Sukadev Bhattiprolu
+L: linuxppc-dev@lists.ozlabs.org
+S: Supported
+F: arch/powerpc/platforms/powernv/vas*
+F: arch/powerpc/platforms/powernv/copy-paste.h
+F: arch/powerpc/include/asm/vas.h
+F: arch/powerpc/include/uapi/asm/vas.h
+
+IBM Power Virtual Ethernet Device Driver
M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
-M: John Allen <jallen@linux.vnet.ibm.com>
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/ethernet/ibm/ibmvnic.*
+F: drivers/net/ethernet/ibm/ibmveth.*
+
+IBM Power Virtual FC Device Drivers
+M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/ibmvscsi/ibmvfc*
IBM Power Virtual SCSI Device Drivers
M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
@@ -6502,11 +6548,17 @@ L: target-devel@vger.kernel.org
S: Supported
F: drivers/scsi/ibmvscsi_tgt/
-IBM Power Virtual FC Device Drivers
-M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
-L: linux-scsi@vger.kernel.org
+IBM Power VMX Cryptographic instructions
+M: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
+M: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
+L: linux-crypto@vger.kernel.org
S: Supported
-F: drivers/scsi/ibmvscsi/ibmvfc*
+F: drivers/crypto/vmx/Makefile
+F: drivers/crypto/vmx/Kconfig
+F: drivers/crypto/vmx/vmx.c
+F: drivers/crypto/vmx/aes*
+F: drivers/crypto/vmx/ghash*
+F: drivers/crypto/vmx/ppc-xlate.pl
IBM ServeRAID RAID DRIVER
S: Orphan
@@ -6518,11 +6570,6 @@ S: Maintained
F: drivers/mfd/lpc_ich.c
F: drivers/gpio/gpio-ich.c
-IDT VersaClock 5 CLOCK DRIVER
-M: Marek Vasut <marek.vasut@gmail.com>
-S: Maintained
-F: drivers/clk/clk-versaclock5.c
-
IDE SUBSYSTEM
M: "David S. Miller" <davem@davemloft.net>
L: linux-ide@vger.kernel.org
@@ -6533,6 +6580,13 @@ F: Documentation/ide/
F: drivers/ide/
F: include/linux/ide.h
+IDE/ATAPI DRIVERS
+M: Borislav Petkov <bp@alien8.de>
+L: linux-ide@vger.kernel.org
+S: Maintained
+F: Documentation/cdrom/ide-cd
+F: drivers/ide/ide-cd*
+
IDEAPAD LAPTOP EXTRAS DRIVER
M: Ike Panhc <ike.pan@canonical.com>
L: platform-driver-x86@vger.kernel.org
@@ -6547,12 +6601,10 @@ W: https://github.com/o2genum/ideapad-slidebar
S: Maintained
F: drivers/input/misc/ideapad_slidebar.c
-IDE/ATAPI DRIVERS
-M: Borislav Petkov <bp@alien8.de>
-L: linux-ide@vger.kernel.org
+IDT VersaClock 5 CLOCK DRIVER
+M: Marek Vasut <marek.vasut@gmail.com>
S: Maintained
-F: Documentation/cdrom/ide-cd
-F: drivers/ide/ide-cd*
+F: drivers/clk/clk-versaclock5.c
IEEE 802.15.4 SUBSYSTEM
M: Alexander Aring <alex.aring@gmail.com>
@@ -6642,6 +6694,16 @@ S: Maintained
F: Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
F: drivers/auxdisplay/img-ascii-lcd.c
+IMGTEC IR DECODER DRIVER
+M: James Hogan <james.hogan@imgtec.com>
+S: Maintained
+F: drivers/media/rc/img-ir/
+
+IMS TWINTURBO FRAMEBUFFER DRIVER
+L: linux-fbdev@vger.kernel.org
+S: Orphan
+F: drivers/video/fbdev/imsttfb.c
+
INA209 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
@@ -6667,37 +6729,6 @@ W: http://industrypack.sourceforge.net
S: Maintained
F: drivers/ipack/
-INGENIC JZ4780 DMA Driver
-M: Zubair Lutfullah Kakakhel <Zubair.Kakakhel@imgtec.com>
-S: Maintained
-F: drivers/dma/dma-jz4780.c
-
-INGENIC JZ4780 NAND DRIVER
-M: Harvey Hunt <harveyhuntnexus@gmail.com>
-L: linux-mtd@lists.infradead.org
-S: Maintained
-F: drivers/mtd/nand/jz4780_*
-
-INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
-M: Mimi Zohar <zohar@linux.vnet.ibm.com>
-M: Dmitry Kasatkin <dmitry.kasatkin@gmail.com>
-L: linux-ima-devel@lists.sourceforge.net
-L: linux-ima-user@lists.sourceforge.net
-L: linux-security-module@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git
-S: Supported
-F: security/integrity/ima/
-
-IMGTEC IR DECODER DRIVER
-M: James Hogan <james.hogan@imgtec.com>
-S: Maintained
-F: drivers/media/rc/img-ir/
-
-IMS TWINTURBO FRAMEBUFFER DRIVER
-L: linux-fbdev@vger.kernel.org
-S: Orphan
-F: drivers/video/fbdev/imsttfb.c
-
INFINIBAND SUBSYSTEM
M: Doug Ledford <dledford@redhat.com>
M: Sean Hefty <sean.hefty@intel.com>
@@ -6714,6 +6745,17 @@ F: include/uapi/linux/if_infiniband.h
F: include/uapi/rdma/
F: include/rdma/
+INGENIC JZ4780 DMA Driver
+M: Zubair Lutfullah Kakakhel <Zubair.Kakakhel@imgtec.com>
+S: Maintained
+F: drivers/dma/dma-jz4780.c
+
+INGENIC JZ4780 NAND DRIVER
+M: Harvey Hunt <harveyhuntnexus@gmail.com>
+L: linux-mtd@lists.infradead.org
+S: Maintained
+F: drivers/mtd/nand/jz4780_*
+
INOTIFY
M: John McCutchan <john@johnmccutchan.com>
M: Robert Love <rlove@rlove.org>
@@ -6752,6 +6794,22 @@ F: drivers/crypto/inside-secure/
S: Maintained
L: linux-crypto@vger.kernel.org
+INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
+M: Mimi Zohar <zohar@linux.vnet.ibm.com>
+M: Dmitry Kasatkin <dmitry.kasatkin@gmail.com>
+L: linux-ima-devel@lists.sourceforge.net
+L: linux-ima-user@lists.sourceforge.net
+L: linux-security-module@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git
+S: Supported
+F: security/integrity/ima/
+
+INTEL 810/815 FRAMEBUFFER DRIVER
+M: Antonino Daplas <adaplas@gmail.com>
+L: linux-fbdev@vger.kernel.org
+S: Maintained
+F: drivers/video/fbdev/i810/
+
INTEL ASoC BDW/HSW DRIVERS
M: Jie Yang <yang.jie@linux.intel.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -6769,17 +6827,75 @@ T: git git://git.code.sf.net/p/intel-sas/isci
S: Supported
F: drivers/scsi/isci/
+INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
+M: Jani Nikula <jani.nikula@linux.intel.com>
+M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+M: Rodrigo Vivi <rodrigo.vivi@intel.com>
+L: intel-gfx@lists.freedesktop.org
+W: https://01.org/linuxgraphics/
+B: https://01.org/linuxgraphics/documentation/how-report-bugs
+C: irc://chat.freenode.net/intel-gfx
+Q: http://patchwork.freedesktop.org/project/intel-gfx/
+T: git git://anongit.freedesktop.org/drm-intel
+S: Supported
+F: drivers/gpu/drm/i915/
+F: include/drm/i915*
+F: include/uapi/drm/i915_drm.h
+F: Documentation/gpu/i915.rst
+
+INTEL ETHERNET DRIVERS
+M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
+W: http://www.intel.com/support/feedback.htm
+W: http://e1000.sourceforge.net/
+Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
+S: Supported
+F: Documentation/networking/e100.txt
+F: Documentation/networking/e1000.txt
+F: Documentation/networking/e1000e.txt
+F: Documentation/networking/igb.txt
+F: Documentation/networking/igbvf.txt
+F: Documentation/networking/ixgb.txt
+F: Documentation/networking/ixgbe.txt
+F: Documentation/networking/ixgbevf.txt
+F: Documentation/networking/i40e.txt
+F: Documentation/networking/i40evf.txt
+F: drivers/net/ethernet/intel/
+F: drivers/net/ethernet/intel/*/
+F: include/linux/avf/virtchnl.h
+
+INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
+M: Maik Broemme <mbroemme@libmpq.org>
+L: linux-fbdev@vger.kernel.org
+S: Maintained
+F: Documentation/fb/intelfb.txt
+F: drivers/video/fbdev/intelfb/
+
+INTEL GVT-g DRIVERS (Intel GPU Virtualization)
+M: Zhenyu Wang <zhenyuw@linux.intel.com>
+M: Zhi Wang <zhi.a.wang@intel.com>
+L: intel-gvt-dev@lists.freedesktop.org
+L: intel-gfx@lists.freedesktop.org
+W: https://01.org/igvt-g
+T: git https://github.com/01org/gvt-linux.git
+S: Supported
+F: drivers/gpu/drm/i915/gvt/
+
INTEL HID EVENT DRIVER
M: Alex Hung <alex.hung@canonical.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/intel-hid.c
-INTEL VIRTUAL BUTTON DRIVER
-M: AceLan Kao <acelan.kao@canonical.com>
-L: platform-driver-x86@vger.kernel.org
-S: Maintained
-F: drivers/platform/x86/intel-vbtn.c
+INTEL I/OAT DMA DRIVER
+M: Dave Jiang <dave.jiang@intel.com>
+R: Dan Williams <dan.j.williams@intel.com>
+L: dmaengine@vger.kernel.org
+Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
+S: Supported
+F: drivers/dma/ioat*
INTEL IDLE DRIVER
M: Jacob Pan <jacob.jun.pan@linux.intel.com>
@@ -6797,41 +6913,6 @@ L: linux-input@vger.kernel.org
S: Maintained
F: drivers/hid/intel-ish-hid/
-INTEL PSTATE DRIVER
-M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
-M: Len Brown <lenb@kernel.org>
-L: linux-pm@vger.kernel.org
-S: Supported
-F: drivers/cpufreq/intel_pstate.c
-
-INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
-M: Maik Broemme <mbroemme@libmpq.org>
-L: linux-fbdev@vger.kernel.org
-S: Maintained
-F: Documentation/fb/intelfb.txt
-F: drivers/video/fbdev/intelfb/
-
-INTEL 810/815 FRAMEBUFFER DRIVER
-M: Antonino Daplas <adaplas@gmail.com>
-L: linux-fbdev@vger.kernel.org
-S: Maintained
-F: drivers/video/fbdev/i810/
-
-INTEL MENLOW THERMAL DRIVER
-M: Sujith Thomas <sujith.thomas@intel.com>
-L: platform-driver-x86@vger.kernel.org
-W: https://01.org/linux-acpi
-S: Supported
-F: drivers/platform/x86/intel_menlow.c
-
-INTEL I/OAT DMA DRIVER
-M: Dave Jiang <dave.jiang@intel.com>
-R: Dan Williams <dan.j.williams@intel.com>
-L: dmaengine@vger.kernel.org
-Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
-S: Supported
-F: drivers/dma/ioat*
-
INTEL IOMMU (VT-d)
M: David Woodhouse <dwmw2@infradead.org>
L: iommu@lists.linux-foundation.org
@@ -6860,35 +6941,23 @@ M: Deepak Saxena <dsaxena@plexity.net>
S: Maintained
F: drivers/char/hw_random/ixp4xx-rng.c
-INTEL ETHERNET DRIVERS
-M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
-W: http://www.intel.com/support/feedback.htm
-W: http://e1000.sourceforge.net/
-Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
+INTEL MANAGEMENT ENGINE (mei)
+M: Tomas Winkler <tomas.winkler@intel.com>
+L: linux-kernel@vger.kernel.org
S: Supported
-F: Documentation/networking/e100.txt
-F: Documentation/networking/e1000.txt
-F: Documentation/networking/e1000e.txt
-F: Documentation/networking/igb.txt
-F: Documentation/networking/igbvf.txt
-F: Documentation/networking/ixgb.txt
-F: Documentation/networking/ixgbe.txt
-F: Documentation/networking/ixgbevf.txt
-F: Documentation/networking/i40e.txt
-F: Documentation/networking/i40evf.txt
-F: drivers/net/ethernet/intel/
-F: drivers/net/ethernet/intel/*/
-F: include/linux/avf/virtchnl.h
+F: include/uapi/linux/mei.h
+F: include/linux/mei_cl_bus.h
+F: drivers/misc/mei/*
+F: drivers/watchdog/mei_wdt.c
+F: Documentation/misc-devices/mei/*
+F: samples/mei/*
-INTEL RDMA RNIC DRIVER
-M: Faisal Latif <faisal.latif@intel.com>
-M: Shiraz Saleem <shiraz.saleem@intel.com>
-L: linux-rdma@vger.kernel.org
-S: Supported
-F: drivers/infiniband/hw/i40iw/
+INTEL MENLOW THERMAL DRIVER
+M: Sujith Thomas <sujith.thomas@intel.com>
+L: platform-driver-x86@vger.kernel.org
+W: https://01.org/linux-acpi
+S: Supported
+F: drivers/platform/x86/intel_menlow.c
INTEL MERRIFIELD GPIO DRIVER
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
@@ -6896,11 +6965,38 @@ L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-merrifield.c
-INTEL-MID GPIO DRIVER
-M: David Cohen <david.a.cohen@linux.intel.com>
-L: linux-gpio@vger.kernel.org
+INTEL MIC DRIVERS (mic)
+M: Sudeep Dutt <sudeep.dutt@intel.com>
+M: Ashutosh Dixit <ashutosh.dixit@intel.com>
+S: Supported
+W: https://github.com/sudeepdutt/mic
+W: http://software.intel.com/en-us/mic-developer
+F: include/linux/mic_bus.h
+F: include/linux/scif.h
+F: include/uapi/linux/mic_common.h
+F: include/uapi/linux/mic_ioctl.h
+F: include/uapi/linux/scif_ioctl.h
+F: drivers/misc/mic/
+F: drivers/dma/mic_x100_dma.c
+F: drivers/dma/mic_x100_dma.h
+F: Documentation/mic/
+
+INTEL PMC CORE DRIVER
+M: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+M: Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+L: platform-driver-x86@vger.kernel.org
S: Maintained
-F: drivers/gpio/gpio-intel-mid.c
+F: arch/x86/include/asm/pmc_core.h
+F: drivers/platform/x86/intel_pmc_core*
+
+INTEL PMC/P-Unit IPC DRIVER
+M: Zha Qipeng<qipeng.zha@intel.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/intel_pmc_ipc.c
+F: drivers/platform/x86/intel_punit_ipc.c
+F: arch/x86/include/asm/intel_pmc_ipc.h
+F: arch/x86/include/asm/intel_punit_ipc.h
INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT
M: Stanislav Yakovlev <stas.yakovlev@gmail.com>
@@ -6910,31 +7006,32 @@ F: Documentation/networking/README.ipw2100
F: Documentation/networking/README.ipw2200
F: drivers/net/wireless/intel/ipw2x00/
-INTEL(R) TRACE HUB
-M: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+INTEL PSTATE DRIVER
+M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+M: Len Brown <lenb@kernel.org>
+L: linux-pm@vger.kernel.org
S: Supported
-F: Documentation/trace/intel_th.txt
-F: drivers/hwtracing/intel_th/
+F: drivers/cpufreq/intel_pstate.c
-INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT)
-M: Ning Sun <ning.sun@intel.com>
-L: tboot-devel@lists.sourceforge.net
-W: http://tboot.sourceforge.net
-T: hg http://tboot.hg.sourceforge.net:8000/hgroot/tboot/tboot
+INTEL RDMA RNIC DRIVER
+M: Faisal Latif <faisal.latif@intel.com>
+M: Shiraz Saleem <shiraz.saleem@intel.com>
+L: linux-rdma@vger.kernel.org
S: Supported
-F: Documentation/intel_txt.txt
-F: include/linux/tboot.h
-F: arch/x86/kernel/tboot.c
+F: drivers/infiniband/hw/i40iw/
-INTEL WIRELESS WIMAX CONNECTION 2400
-M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
-M: linux-wimax@intel.com
-L: wimax@linuxwimax.org (subscribers-only)
-S: Supported
-W: http://linuxwimax.org
-F: Documentation/wimax/README.i2400m
-F: drivers/net/wimax/i2400m/
-F: include/uapi/linux/wimax/i2400m.h
+INTEL TELEMETRY DRIVER
+M: Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: arch/x86/include/asm/intel_telemetry.h
+F: drivers/platform/x86/intel_telemetry*
+
+INTEL VIRTUAL BUTTON DRIVER
+M: AceLan Kao <acelan.kao@canonical.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/intel-vbtn.c
INTEL WIRELESS 3945ABG/BG, 4965AGN (iwlegacy)
M: Stanislaw Gruszka <sgruszka@redhat.com>
@@ -6953,56 +7050,37 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
S: Supported
F: drivers/net/wireless/intel/iwlwifi/
-INTEL MANAGEMENT ENGINE (mei)
-M: Tomas Winkler <tomas.winkler@intel.com>
-L: linux-kernel@vger.kernel.org
+INTEL WIRELESS WIMAX CONNECTION 2400
+M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+M: linux-wimax@intel.com
+L: wimax@linuxwimax.org (subscribers-only)
S: Supported
-F: include/uapi/linux/mei.h
-F: include/linux/mei_cl_bus.h
-F: drivers/misc/mei/*
-F: drivers/watchdog/mei_wdt.c
-F: Documentation/misc-devices/mei/*
-F: samples/mei/*
+W: http://linuxwimax.org
+F: Documentation/wimax/README.i2400m
+F: drivers/net/wimax/i2400m/
+F: include/uapi/linux/wimax/i2400m.h
-INTEL MIC DRIVERS (mic)
-M: Sudeep Dutt <sudeep.dutt@intel.com>
-M: Ashutosh Dixit <ashutosh.dixit@intel.com>
+INTEL(R) TRACE HUB
+M: Alexander Shishkin <alexander.shishkin@linux.intel.com>
S: Supported
-W: https://github.com/sudeepdutt/mic
-W: http://software.intel.com/en-us/mic-developer
-F: include/linux/mic_bus.h
-F: include/linux/scif.h
-F: include/uapi/linux/mic_common.h
-F: include/uapi/linux/mic_ioctl.h
-F: include/uapi/linux/scif_ioctl.h
-F: drivers/misc/mic/
-F: drivers/dma/mic_x100_dma.c
-F: drivers/dma/mic_x100_dma.h
-F: Documentation/mic/
-
-INTEL PMC/P-Unit IPC DRIVER
-M: Zha Qipeng<qipeng.zha@intel.com>
-L: platform-driver-x86@vger.kernel.org
-S: Maintained
-F: drivers/platform/x86/intel_pmc_ipc.c
-F: drivers/platform/x86/intel_punit_ipc.c
-F: arch/x86/include/asm/intel_pmc_ipc.h
-F: arch/x86/include/asm/intel_punit_ipc.h
+F: Documentation/trace/intel_th.txt
+F: drivers/hwtracing/intel_th/
-INTEL TELEMETRY DRIVER
-M: Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>
-L: platform-driver-x86@vger.kernel.org
-S: Maintained
-F: arch/x86/include/asm/intel_telemetry.h
-F: drivers/platform/x86/intel_telemetry*
+INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT)
+M: Ning Sun <ning.sun@intel.com>
+L: tboot-devel@lists.sourceforge.net
+W: http://tboot.sourceforge.net
+T: hg http://tboot.hg.sourceforge.net:8000/hgroot/tboot/tboot
+S: Supported
+F: Documentation/intel_txt.txt
+F: include/linux/tboot.h
+F: arch/x86/kernel/tboot.c
-INTEL PMC CORE DRIVER
-M: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
-M: Vishwanath Somayaji <vishwanath.somayaji@intel.com>
-L: platform-driver-x86@vger.kernel.org
+INTEL-MID GPIO DRIVER
+M: David Cohen <david.a.cohen@linux.intel.com>
+L: linux-gpio@vger.kernel.org
S: Maintained
-F: arch/x86/include/asm/pmc_core.h
-F: drivers/platform/x86/intel_pmc_core*
+F: drivers/gpio/gpio-intel-mid.c
INVENSENSE MPU-3050 GYROSCOPE DRIVER
M: Linus Walleij <linus.walleij@linaro.org>
@@ -7048,13 +7126,6 @@ F: drivers/char/ipmi/
F: include/linux/ipmi*
F: include/uapi/linux/ipmi*
-QCOM AUDIO (ASoC) DRIVERS
-M: Patrick Lai <plai@codeaurora.org>
-M: Banajit Goswami <bgoswami@codeaurora.org>
-L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-S: Supported
-F: sound/soc/qcom/
-
IPS SCSI RAID DRIVER
M: Adaptec OEM Raid Solutions <aacraid@adaptec.com>
L: linux-scsi@vger.kernel.org
@@ -7097,9 +7168,16 @@ W: http://irda.sourceforge.net/
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git
F: Documentation/networking/irda.txt
-F: drivers/net/irda/
-F: include/net/irda/
-F: net/irda/
+F: drivers/staging/irda/
+
+IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
+M: Marc Zyngier <marc.zyngier@arm.com>
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
+F: Documentation/IRQ-domain.txt
+F: include/linux/irqdomain.h
+F: kernel/irq/irqdomain.c
+F: kernel/irq/msi.c
IRQ SUBSYSTEM
M: Thomas Gleixner <tglx@linutronix.de>
@@ -7115,19 +7193,9 @@ M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
-T: git git://git.infradead.org/users/jcooper/linux.git irqchip/core
F: Documentation/devicetree/bindings/interrupt-controller/
F: drivers/irqchip/
-IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
-M: Marc Zyngier <marc.zyngier@arm.com>
-S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
-F: Documentation/IRQ-domain.txt
-F: include/linux/irqdomain.h
-F: kernel/irq/irqdomain.c
-F: kernel/irq/msi.c
-
ISA
M: William Breathitt Gray <vilhelm.gray@gmail.com>
S: Maintained
@@ -7135,13 +7203,6 @@ F: Documentation/isa.txt
F: drivers/base/isa.c
F: include/linux/isa.h
-ISAPNP
-M: Jaroslav Kysela <perex@perex.cz>
-S: Maintained
-F: Documentation/isapnp.txt
-F: drivers/pnp/isapnp/
-F: include/linux/isapnp.h
-
ISA RADIO MODULE
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -7150,11 +7211,12 @@ W: https://linuxtv.org
S: Maintained
F: drivers/media/radio/radio-isa*
-iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER
-M: Peter Jones <pjones@redhat.com>
-M: Konrad Rzeszutek Wilk <konrad@kernel.org>
+ISAPNP
+M: Jaroslav Kysela <perex@perex.cz>
S: Maintained
-F: drivers/firmware/iscsi_ibft*
+F: Documentation/isapnp.txt
+F: drivers/pnp/isapnp/
+F: include/linux/isapnp.h
ISCSI
M: Lee Duncan <lduncan@suse.com>
@@ -7165,6 +7227,12 @@ S: Maintained
F: drivers/scsi/*iscsi*
F: include/scsi/*iscsi*
+iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER
+M: Peter Jones <pjones@redhat.com>
+M: Konrad Rzeszutek Wilk <konrad@kernel.org>
+S: Maintained
+F: drivers/firmware/iscsi_ibft*
+
ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
M: Or Gerlitz <ogerlitz@mellanox.com>
M: Sagi Grimberg <sagi@grimberg.me>
@@ -7420,27 +7488,6 @@ S: Maintained
F: arch/x86/include/asm/svm.h
F: arch/x86/kvm/svm.c
-KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
-M: Alexander Graf <agraf@suse.com>
-L: kvm-ppc@vger.kernel.org
-W: http://www.linux-kvm.org/
-T: git git://github.com/agraf/linux-2.6.git
-S: Supported
-F: arch/powerpc/include/asm/kvm*
-F: arch/powerpc/kvm/
-
-KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
-M: Christian Borntraeger <borntraeger@de.ibm.com>
-M: Cornelia Huck <cohuck@redhat.com>
-L: linux-s390@vger.kernel.org
-W: http://www.ibm.com/developerworks/linux/linux390/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
-S: Supported
-F: Documentation/s390/kvm.txt
-F: arch/s390/include/asm/kvm*
-F: arch/s390/kvm/
-F: arch/s390/mm/gmap.c
-
KERNEL VIRTUAL MACHINE (KVM) FOR ARM
M: Christoffer Dall <christoffer.dall@linaro.org>
M: Marc Zyngier <marc.zyngier@arm.com>
@@ -7455,6 +7502,15 @@ F: arch/arm/kvm/
F: virt/kvm/arm/
F: include/kvm/arm_*
+KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
+M: Alexander Graf <agraf@suse.com>
+L: kvm-ppc@vger.kernel.org
+W: http://www.linux-kvm.org/
+T: git git://github.com/agraf/linux-2.6.git
+S: Supported
+F: arch/powerpc/include/asm/kvm*
+F: arch/powerpc/kvm/
+
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
M: Christoffer Dall <christoffer.dall@linaro.org>
M: Marc Zyngier <marc.zyngier@arm.com>
@@ -7473,6 +7529,18 @@ F: arch/mips/include/uapi/asm/kvm*
F: arch/mips/include/asm/kvm*
F: arch/mips/kvm/
+KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
+M: Christian Borntraeger <borntraeger@de.ibm.com>
+M: Cornelia Huck <cohuck@redhat.com>
+L: linux-s390@vger.kernel.org
+W: http://www.ibm.com/developerworks/linux/linux390/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
+S: Supported
+F: Documentation/s390/kvm.txt
+F: arch/s390/include/asm/kvm*
+F: arch/s390/kvm/
+F: arch/s390/mm/gmap.c
+
KERNFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Tejun Heo <tj@kernel.org>
@@ -7490,17 +7558,15 @@ F: include/linux/kexec.h
F: include/uapi/linux/kexec.h
F: kernel/kexec*
-KEYS/KEYRINGS:
-M: David Howells <dhowells@redhat.com>
+KEYS-ENCRYPTED
+M: Mimi Zohar <zohar@linux.vnet.ibm.com>
+M: David Safford <safford@us.ibm.com>
+L: linux-security-module@vger.kernel.org
L: keyrings@vger.kernel.org
-S: Maintained
-F: Documentation/security/keys/core.rst
-F: include/linux/key.h
-F: include/linux/key-type.h
-F: include/linux/keyctl.h
-F: include/uapi/linux/keyctl.h
-F: include/keys/
-F: security/keys/
+S: Supported
+F: Documentation/security/keys/trusted-encrypted.rst
+F: include/keys/encrypted-type.h
+F: security/keys/encrypted-keys/
KEYS-TRUSTED
M: David Safford <safford@us.ibm.com>
@@ -7513,15 +7579,17 @@ F: include/keys/trusted-type.h
F: security/keys/trusted.c
F: security/keys/trusted.h
-KEYS-ENCRYPTED
-M: Mimi Zohar <zohar@linux.vnet.ibm.com>
-M: David Safford <safford@us.ibm.com>
-L: linux-security-module@vger.kernel.org
+KEYS/KEYRINGS:
+M: David Howells <dhowells@redhat.com>
L: keyrings@vger.kernel.org
-S: Supported
-F: Documentation/security/keys/trusted-encrypted.rst
-F: include/keys/encrypted-type.h
-F: security/keys/encrypted-keys/
+S: Maintained
+F: Documentation/security/keys/core.rst
+F: include/linux/key.h
+F: include/linux/key-type.h
+F: include/linux/keyctl.h
+F: include/uapi/linux/keyctl.h
+F: include/keys/
+F: security/keys/
KGDB / KDB /debug_core
M: Jason Wessel <jason.wessel@windriver.com>
@@ -7654,27 +7722,6 @@ T: git git://linuxtv.org/mkrufky/tuners.git
S: Maintained
F: drivers/media/dvb-frontends/lgdt3305.*
-LGUEST
-M: Rusty Russell <rusty@rustcorp.com.au>
-L: lguest@lists.ozlabs.org
-W: http://lguest.ozlabs.org/
-S: Odd Fixes
-F: arch/x86/include/asm/lguest*.h
-F: arch/x86/lguest/
-F: drivers/lguest/
-F: include/linux/lguest*.h
-F: tools/lguest/
-
-LIBATA SUBSYSTEM (Serial and Parallel ATA drivers)
-M: Tejun Heo <tj@kernel.org>
-L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
-S: Maintained
-F: drivers/ata/
-F: include/linux/ata.h
-F: include/linux/libata.h
-F: Documentation/devicetree/bindings/ata/
-
LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
M: Viresh Kumar <vireshk@kernel.org>
L: linux-ide@vger.kernel.org
@@ -7718,22 +7765,21 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
S: Maintained
F: drivers/ata/sata_promise.*
+LIBATA SUBSYSTEM (Serial and Parallel ATA drivers)
+M: Tejun Heo <tj@kernel.org>
+L: linux-ide@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S: Maintained
+F: drivers/ata/
+F: include/linux/ata.h
+F: include/linux/libata.h
+F: Documentation/devicetree/bindings/ata/
+
LIBLOCKDEP
M: Sasha Levin <alexander.levin@verizon.com>
S: Maintained
F: tools/lib/lockdep/
-LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
-M: Dan Williams <dan.j.williams@intel.com>
-L: linux-nvdimm@lists.01.org
-Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git
-S: Supported
-F: drivers/nvdimm/*
-F: include/linux/nd.h
-F: include/linux/libnvdimm.h
-F: include/uapi/linux/ndctl.h
-
LIBNVDIMM BLK: MMIO-APERTURE DRIVER
M: Ross Zwisler <ross.zwisler@linux.intel.com>
L: linux-nvdimm@lists.01.org
@@ -7741,7 +7787,6 @@ Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported
F: drivers/nvdimm/blk.c
F: drivers/nvdimm/region_devs.c
-F: drivers/acpi/nfit*
LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
M: Vishal Verma <vishal.l.verma@intel.com>
@@ -7757,6 +7802,18 @@ Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported
F: drivers/nvdimm/pmem*
+LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
+M: Dan Williams <dan.j.williams@intel.com>
+L: linux-nvdimm@lists.01.org
+Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git
+S: Supported
+F: drivers/nvdimm/*
+F: drivers/acpi/nfit/*
+F: include/linux/nd.h
+F: include/linux/libnvdimm.h
+F: include/uapi/linux/ndctl.h
+
LIGHTNVM PLATFORM SUPPORT
M: Matias Bjorling <mb@lightnvm.io>
W: http://github/OpenChannelSSD
@@ -7766,6 +7823,14 @@ F: drivers/lightnvm/
F: include/linux/lightnvm.h
F: include/uapi/linux/lightnvm.h
+LINUX FOR POWER MACINTOSH
+M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+W: http://www.penguinppc.org/
+L: linuxppc-dev@lists.ozlabs.org
+S: Maintained
+F: arch/powerpc/platforms/powermac/
+F: drivers/macintosh/
+
LINUX FOR POWERPC (32-BIT AND 64-BIT)
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
M: Paul Mackerras <paulus@samba.org>
@@ -7792,6 +7857,7 @@ F: drivers/pci/hotplug/rpa*
F: drivers/rtc/rtc-opal.c
F: drivers/scsi/ibmvscsi/
F: drivers/tty/hvc/hvc_opal.c
+F: drivers/watchdog/wdrtas.c
F: tools/testing/selftests/powerpc
N: /pmac
N: powermac
@@ -7799,14 +7865,6 @@ N: powernv
N: [^a-z0-9]ps3
N: pseries
-LINUX FOR POWER MACINTOSH
-M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-W: http://www.penguinppc.org/
-L: linuxppc-dev@lists.ozlabs.org
-S: Maintained
-F: arch/powerpc/platforms/powermac/
-F: drivers/macintosh/
-
LINUX FOR POWERPC EMBEDDED MPC5XXX
M: Anatolij Gustschin <agust@denx.de>
L: linuxppc-dev@lists.ozlabs.org
@@ -7824,19 +7882,6 @@ S: Maintained
F: arch/powerpc/platforms/40x/
F: arch/powerpc/platforms/44x/
-LINUX FOR POWERPC EMBEDDED XILINX VIRTEX
-L: linuxppc-dev@lists.ozlabs.org
-S: Orphan
-F: arch/powerpc/*/*virtex*
-F: arch/powerpc/*/*/*virtex*
-
-LINUX FOR POWERPC EMBEDDED PPC8XX
-M: Vitaly Bordug <vitb@kernel.crashing.org>
-W: http://www.penguinppc.org/
-L: linuxppc-dev@lists.ozlabs.org
-S: Maintained
-F: arch/powerpc/platforms/8xx/
-
LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX
M: Scott Wood <oss@buserror.net>
M: Kumar Gala <galak@kernel.crashing.org>
@@ -7848,6 +7893,19 @@ F: arch/powerpc/platforms/83xx/
F: arch/powerpc/platforms/85xx/
F: Documentation/devicetree/bindings/powerpc/fsl/
+LINUX FOR POWERPC EMBEDDED PPC8XX
+M: Vitaly Bordug <vitb@kernel.crashing.org>
+W: http://www.penguinppc.org/
+L: linuxppc-dev@lists.ozlabs.org
+S: Maintained
+F: arch/powerpc/platforms/8xx/
+
+LINUX FOR POWERPC EMBEDDED XILINX VIRTEX
+L: linuxppc-dev@lists.ozlabs.org
+S: Orphan
+F: arch/powerpc/*/*virtex*
+F: arch/powerpc/*/*/*virtex*
+
LINUX FOR POWERPC PA SEMI PWRFICIENT
L: linuxppc-dev@lists.ozlabs.org
S: Orphan
@@ -7855,6 +7913,11 @@ F: arch/powerpc/platforms/pasemi/
F: drivers/*/*pasemi*
F: drivers/*/*/*pasemi*
+LINUX KERNEL DUMP TEST MODULE (LKDTM)
+M: Kees Cook <keescook@chromium.org>
+S: Maintained
+F: drivers/misc/lkdtm*
+
LINUX SECURITY MODULE (LSM) FRAMEWORK
M: Chris Wright <chrisw@sous-sol.org>
L: linux-security-module@vger.kernel.org
@@ -7884,11 +7947,6 @@ F: samples/livepatch/
L: live-patching@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching.git
-LINUX KERNEL DUMP TEST MODULE (LKDTM)
-M: Kees Cook <keescook@chromium.org>
-S: Maintained
-F: drivers/misc/lkdtm*
-
LLC (802.2)
L: netdev@vger.kernel.org
S: Odd fixes
@@ -7941,6 +7999,13 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
S: Maintained
F: drivers/media/usb/dvb-usb-v2/lmedm04*
+LOADPIN SECURITY MODULE
+M: Kees Cook <keescook@chromium.org>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git lsm/loadpin
+S: Supported
+F: security/loadpin/
+F: Documentation/admin-guide/LSM/LoadPin.rst
+
LOCKING PRIMITIVES
M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
@@ -8231,14 +8296,6 @@ S: Maintained
F: Documentation/devicetree/bindings/sound/max9860.txt
F: sound/soc/codecs/max9860.*
-MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
-M: Krzysztof Kozlowski <krzk@kernel.org>
-M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
-L: linux-pm@vger.kernel.org
-S: Supported
-F: drivers/power/supply/max14577_charger.c
-F: drivers/power/supply/max77693_charger.c
-
MAXIM MAX77802 PMIC REGULATOR DEVICE DRIVER
M: Javier Martinez Canillas <javier@dowhile0.org>
L: linux-kernel@vger.kernel.org
@@ -8247,6 +8304,14 @@ F: drivers/regulator/max77802-regulator.c
F: Documentation/devicetree/bindings/*/*max77802.txt
F: include/dt-bindings/*/*max77802.h
+MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
+M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+L: linux-pm@vger.kernel.org
+S: Supported
+F: drivers/power/supply/max14577_charger.c
+F: drivers/power/supply/max77693_charger.c
+
MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
M: Chanwoo Choi <cw00.choi@samsung.com>
M: Krzysztof Kozlowski <krzk@kernel.org>
@@ -8289,14 +8354,25 @@ L: linux-iio@vger.kernel.org
S: Maintained
F: drivers/iio/dac/cio-dac.c
-MEDIA DRIVERS FOR RENESAS - DRIF
-M: Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>
+MEDIA DRIVERS FOR ASCOT2E
+M: Sergey Kozlov <serjk@netup.ru>
+M: Abylay Ospan <aospan@netup.ru>
L: linux-media@vger.kernel.org
-L: linux-renesas-soc@vger.kernel.org
+W: https://linuxtv.org
+W: http://netup.tv/
T: git git://linuxtv.org/media_tree.git
S: Supported
-F: Documentation/devicetree/bindings/media/renesas,drif.txt
-F: drivers/media/platform/rcar_drif.c
+F: drivers/media/dvb-frontends/ascot2e*
+
+MEDIA DRIVERS FOR CXD2841ER
+M: Sergey Kozlov <serjk@netup.ru>
+M: Abylay Ospan <aospan@netup.ru>
+L: linux-media@vger.kernel.org
+W: https://linuxtv.org
+W: http://netup.tv/
+T: git git://linuxtv.org/media_tree.git
+S: Supported
+F: drivers/media/dvb-frontends/cxd2841er*
MEDIA DRIVERS FOR FREESCALE IMX
M: Steve Longerbeam <slongerbeam@gmail.com>
@@ -8310,6 +8386,62 @@ F: drivers/staging/media/imx/
F: include/linux/imx-media.h
F: include/media/imx.h
+MEDIA DRIVERS FOR HELENE
+M: Abylay Ospan <aospan@netup.ru>
+L: linux-media@vger.kernel.org
+W: https://linuxtv.org
+W: http://netup.tv/
+T: git git://linuxtv.org/media_tree.git
+S: Supported
+F: drivers/media/dvb-frontends/helene*
+
+MEDIA DRIVERS FOR HORUS3A
+M: Sergey Kozlov <serjk@netup.ru>
+M: Abylay Ospan <aospan@netup.ru>
+L: linux-media@vger.kernel.org
+W: https://linuxtv.org
+W: http://netup.tv/
+T: git git://linuxtv.org/media_tree.git
+S: Supported
+F: drivers/media/dvb-frontends/horus3a*
+
+MEDIA DRIVERS FOR LNBH25
+M: Sergey Kozlov <serjk@netup.ru>
+M: Abylay Ospan <aospan@netup.ru>
+L: linux-media@vger.kernel.org
+W: https://linuxtv.org
+W: http://netup.tv/
+T: git git://linuxtv.org/media_tree.git
+S: Supported
+F: drivers/media/dvb-frontends/lnbh25*
+
+MEDIA DRIVERS FOR MXL5XX TUNER DEMODULATORS
+M: Daniel Scheller <d.scheller.oss@gmail.com>
+L: linux-media@vger.kernel.org
+W: https://linuxtv.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/dvb-frontends/mxl5xx*
+
+MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices
+M: Sergey Kozlov <serjk@netup.ru>
+M: Abylay Ospan <aospan@netup.ru>
+L: linux-media@vger.kernel.org
+W: https://linuxtv.org
+W: http://netup.tv/
+T: git git://linuxtv.org/media_tree.git
+S: Supported
+F: drivers/media/pci/netup_unidvb/*
+
+MEDIA DRIVERS FOR RENESAS - DRIF
+M: Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>
+L: linux-media@vger.kernel.org
+L: linux-renesas-soc@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Supported
+F: Documentation/devicetree/bindings/media/renesas,drif.txt
+F: drivers/media/platform/rcar_drif.c
+
MEDIA DRIVERS FOR RENESAS - FCP
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
@@ -8347,64 +8479,29 @@ S: Supported
F: Documentation/devicetree/bindings/media/renesas,vsp1.txt
F: drivers/media/platform/vsp1/
-MEDIA DRIVERS FOR HELENE
-M: Abylay Ospan <aospan@netup.ru>
+MEDIA DRIVERS FOR ST STV0910 DEMODULATOR ICs
+M: Daniel Scheller <d.scheller.oss@gmail.com>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
-W: http://netup.tv/
T: git git://linuxtv.org/media_tree.git
-S: Supported
-F: drivers/media/dvb-frontends/helene*
-
-MEDIA DRIVERS FOR ASCOT2E
-M: Sergey Kozlov <serjk@netup.ru>
-M: Abylay Ospan <aospan@netup.ru>
-L: linux-media@vger.kernel.org
-W: https://linuxtv.org
-W: http://netup.tv/
-T: git git://linuxtv.org/media_tree.git
-S: Supported
-F: drivers/media/dvb-frontends/ascot2e*
-
-MEDIA DRIVERS FOR CXD2841ER
-M: Sergey Kozlov <serjk@netup.ru>
-M: Abylay Ospan <aospan@netup.ru>
-L: linux-media@vger.kernel.org
-W: https://linuxtv.org
-W: http://netup.tv/
-T: git git://linuxtv.org/media_tree.git
-S: Supported
-F: drivers/media/dvb-frontends/cxd2841er*
-
-MEDIA DRIVERS FOR HORUS3A
-M: Sergey Kozlov <serjk@netup.ru>
-M: Abylay Ospan <aospan@netup.ru>
-L: linux-media@vger.kernel.org
-W: https://linuxtv.org
-W: http://netup.tv/
-T: git git://linuxtv.org/media_tree.git
-S: Supported
-F: drivers/media/dvb-frontends/horus3a*
+S: Maintained
+F: drivers/media/dvb-frontends/stv0910*
-MEDIA DRIVERS FOR LNBH25
-M: Sergey Kozlov <serjk@netup.ru>
-M: Abylay Ospan <aospan@netup.ru>
+MEDIA DRIVERS FOR ST STV6111 TUNER ICs
+M: Daniel Scheller <d.scheller.oss@gmail.com>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
-W: http://netup.tv/
T: git git://linuxtv.org/media_tree.git
-S: Supported
-F: drivers/media/dvb-frontends/lnbh25*
+S: Maintained
+F: drivers/media/dvb-frontends/stv6111*
-MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices
-M: Sergey Kozlov <serjk@netup.ru>
-M: Abylay Ospan <aospan@netup.ru>
+MEDIA DRIVERS FOR DIGITAL DEVICES PCIE DEVICES
+M: Daniel Scheller <d.scheller.oss@gmail.com>
L: linux-media@vger.kernel.org
W: https://linuxtv.org
-W: http://netup.tv/
T: git git://linuxtv.org/media_tree.git
-S: Supported
-F: drivers/media/pci/netup_unidvb/*
+S: Maintained
+F: drivers/media/pci/ddbridge/*
MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
@@ -8431,7 +8528,9 @@ F: include/uapi/linux/uvcvideo.h
MEDIATEK ETHERNET DRIVER
M: Felix Fietkau <nbd@openwrt.org>
-M: John Crispin <blogic@openwrt.org>
+M: John Crispin <john@phrozen.org>
+M: Sean Wang <sean.wang@mediatek.com>
+M: Nelson Chang <nelson.chang@mediatek.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/mediatek/
@@ -8443,15 +8542,6 @@ S: Supported
F: drivers/media/platform/mtk-jpeg/
F: Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.txt
-MEDIATEK MEDIA DRIVER
-M: Tiffany Lin <tiffany.lin@mediatek.com>
-M: Andrew-CT Chen <andrew-ct.chen@mediatek.com>
-S: Supported
-F: drivers/media/platform/mtk-vcodec/
-F: drivers/media/platform/mtk-vpu/
-F: Documentation/devicetree/bindings/media/mediatek-vcodec.txt
-F: Documentation/devicetree/bindings/media/mediatek-vpu.txt
-
MEDIATEK MDP DRIVER
M: Minghsiu Tsai <minghsiu.tsai@mediatek.com>
M: Houlong Wei <houlong.wei@mediatek.com>
@@ -8461,16 +8551,38 @@ F: drivers/media/platform/mtk-mdp/
F: drivers/media/platform/mtk-vpu/
F: Documentation/devicetree/bindings/media/mediatek-mdp.txt
+MEDIATEK MEDIA DRIVER
+M: Tiffany Lin <tiffany.lin@mediatek.com>
+M: Andrew-CT Chen <andrew-ct.chen@mediatek.com>
+S: Supported
+F: drivers/media/platform/mtk-vcodec/
+F: drivers/media/platform/mtk-vpu/
+F: Documentation/devicetree/bindings/media/mediatek-vcodec.txt
+F: Documentation/devicetree/bindings/media/mediatek-vpu.txt
+
MEDIATEK MT7601U WIRELESS LAN DRIVER
M: Jakub Kicinski <kubakici@wp.pl>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/mediatek/mt7601u/
+MEDIATEK CIR DRIVER
+M: Sean Wang <sean.wang@mediatek.com>
+S: Maintained
+F: drivers/media/rc/mtk-cir.c
+
MEDIATEK RANDOM NUMBER GENERATOR SUPPORT
-M: Sean Wang <sean.wang@mediatek.com>
-S: Maintained
-F: drivers/char/hw_random/mtk-rng.c
+M: Sean Wang <sean.wang@mediatek.com>
+S: Maintained
+F: drivers/char/hw_random/mtk-rng.c
+
+MEDIATEK USB3 DRD IP DRIVER
+M: Chunfeng Yun <chunfeng.yun@mediatek.com>
+L: linux-usb@vger.kernel.org (moderated for non-subscribers)
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/usb/mtu3/
MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES
M: Peter Senna Tschudin <peter.senna@collabora.com>
@@ -8492,6 +8604,13 @@ F: Documentation/scsi/megaraid.txt
F: drivers/scsi/megaraid.*
F: drivers/scsi/megaraid/
+MELEXIS MLX90614 DRIVER
+M: Crt Mori <cmo@melexis.com>
+L: linux-iio@vger.kernel.org
+W: http://www.melexis.com
+S: Supported
+F: drivers/iio/temperature/mlx90614.c
+
MELFAS MIP4 TOUCHSCREEN DRIVER
M: Sangwon Jee <jeesw@melfas.com>
W: http://www.melfas.com
@@ -8552,6 +8671,56 @@ W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlxfw/
+MELLANOX MLX CPLD HOTPLUG DRIVER
+M: Vadim Pasternak <vadimp@mellanox.com>
+L: platform-driver-x86@vger.kernel.org
+S: Supported
+F: drivers/platform/x86/mlxcpld-hotplug.c
+F: include/linux/platform_data/mlxcpld-hotplug.h
+
+MELLANOX MLX4 core VPI driver
+M: Tariq Toukan <tariqt@mellanox.com>
+L: netdev@vger.kernel.org
+L: linux-rdma@vger.kernel.org
+W: http://www.mellanox.com
+Q: http://patchwork.ozlabs.org/project/netdev/list/
+S: Supported
+F: drivers/net/ethernet/mellanox/mlx4/
+F: include/linux/mlx4/
+
+MELLANOX MLX4 IB driver
+M: Yishai Hadas <yishaih@mellanox.com>
+L: linux-rdma@vger.kernel.org
+W: http://www.mellanox.com
+Q: http://patchwork.kernel.org/project/linux-rdma/list/
+S: Supported
+F: drivers/infiniband/hw/mlx4/
+F: include/linux/mlx4/
+F: include/uapi/rdma/mlx4-abi.h
+
+MELLANOX MLX5 core VPI driver
+M: Saeed Mahameed <saeedm@mellanox.com>
+M: Matan Barak <matanb@mellanox.com>
+M: Leon Romanovsky <leonro@mellanox.com>
+L: netdev@vger.kernel.org
+L: linux-rdma@vger.kernel.org
+W: http://www.mellanox.com
+Q: http://patchwork.ozlabs.org/project/netdev/list/
+S: Supported
+F: drivers/net/ethernet/mellanox/mlx5/core/
+F: include/linux/mlx5/
+
+MELLANOX MLX5 IB driver
+M: Matan Barak <matanb@mellanox.com>
+M: Leon Romanovsky <leonro@mellanox.com>
+L: linux-rdma@vger.kernel.org
+W: http://www.mellanox.com
+Q: http://patchwork.kernel.org/project/linux-rdma/list/
+S: Supported
+F: drivers/infiniband/hw/mlx5/
+F: include/linux/mlx5/
+F: include/uapi/rdma/mlx5-abi.h
+
MELLANOX MLXCPLD I2C AND MUX DRIVER
M: Vadim Pasternak <vadimp@mellanox.com>
M: Michael Shych <michaelsh@mellanox.com>
@@ -8569,33 +8738,17 @@ F: drivers/leds/leds-mlxcpld.c
F: Documentation/leds/leds-mlxcpld.txt
MELLANOX PLATFORM DRIVER
-M: Vadim Pasternak <vadimp@mellanox.com>
-L: platform-driver-x86@vger.kernel.org
-S: Supported
-F: drivers/platform/x86/mlx-platform.c
-
-MELLANOX MLX CPLD HOTPLUG DRIVER
M: Vadim Pasternak <vadimp@mellanox.com>
L: platform-driver-x86@vger.kernel.org
S: Supported
-F: drivers/platform/x86/mlxcpld-hotplug.c
-F: include/linux/platform_data/mlxcpld-hotplug.h
-
-SOFT-ROCE DRIVER (rxe)
-M: Moni Shoua <monis@mellanox.com>
-L: linux-rdma@vger.kernel.org
-S: Supported
-W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
-Q: http://patchwork.kernel.org/project/linux-rdma/list/
-F: drivers/infiniband/sw/rxe/
-F: include/uapi/rdma/rdma_user_rxe.h
+F: drivers/platform/x86/mlx-platform.c
MEMBARRIER SUPPORT
M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
L: linux-kernel@vger.kernel.org
S: Supported
-F: kernel/membarrier.c
+F: kernel/sched/membarrier.c
F: include/uapi/linux/membarrier.h
MEMORY MANAGEMENT
@@ -8649,6 +8802,16 @@ F: drivers/leds/leds-menf21bmc.c
F: drivers/hwmon/menf21bmc_hwmon.c
F: Documentation/hwmon/menf21bmc
+MESON AO CEC DRIVER FOR AMLOGIC SOCS
+M: Neil Armstrong <narmstrong@baylibre.com>
+L: linux-media@lists.freedesktop.org
+L: linux-amlogic@lists.infradead.org
+W: http://linux-meson.com/
+S: Supported
+F: drivers/media/platform/meson/ao-cec.c
+F: Documentation/devicetree/bindings/media/meson-ao-cec.txt
+T: git git://linuxtv.org/media_tree.git
+
METAG ARCHITECTURE
M: James Hogan <james.hogan@imgtec.com>
L: linux-metag@vger.kernel.org
@@ -8685,6 +8848,12 @@ F: drivers/dma/at_hdmac.c
F: drivers/dma/at_hdmac_regs.h
F: include/linux/platform_data/dma-atmel.h
+MICROCHIP / ATMEL ECC DRIVER
+M: Tudor Ambarus <tudor.ambarus@microchip.com>
+L: linux-crypto@vger.kernel.org
+S: Maintained
+F: drivers/crypto/atmel-ecc.*
+
MICROCHIP / ATMEL ISC DRIVER
M: Songjun Wu <songjun.wu@microchip.com>
L: linux-media@vger.kernel.org
@@ -8710,6 +8879,18 @@ S: Maintained
F: drivers/usb/misc/usb251xb.c
F: Documentation/devicetree/bindings/usb/usb251xb.txt
+MICROSEMI SMART ARRAY SMARTPQI DRIVER (smartpqi)
+M: Don Brace <don.brace@microsemi.com>
+L: esc.storagedev@microsemi.com
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/smartpqi/smartpqi*.[ch]
+F: drivers/scsi/smartpqi/Kconfig
+F: drivers/scsi/smartpqi/Makefile
+F: include/linux/cciss*.h
+F: include/uapi/linux/cciss*.h
+F: Documentation/scsi/smartpqi.txt
+
MICROSOFT SURFACE PRO 3 BUTTON DRIVER
M: Chen Yu <yu.c.chen@intel.com>
L: platform-driver-x86@vger.kernel.org
@@ -8732,6 +8913,16 @@ F: Documentation/devicetree/bindings/mips/
F: Documentation/mips/
F: arch/mips/
+MIPS BOSTON DEVELOPMENT BOARD
+M: Paul Burton <paul.burton@imgtec.com>
+L: linux-mips@linux-mips.org
+S: Maintained
+F: Documentation/devicetree/bindings/clock/img,boston-clock.txt
+F: arch/mips/boot/dts/img/boston.dts
+F: arch/mips/configs/generic/board-boston.config
+F: drivers/clk/imgtec/clk-boston.c
+F: include/dt-bindings/clock/boston-clock.h
+
MIPS GENERIC PLATFORM
M: Paul Burton <paul.burton@imgtec.com>
L: linux-mips@linux-mips.org
@@ -8747,16 +8938,6 @@ F: arch/mips/include/asm/mach-loongson32/
F: drivers/*/*loongson1*
F: drivers/*/*/*loongson1*
-MIPS BOSTON DEVELOPMENT BOARD
-M: Paul Burton <paul.burton@imgtec.com>
-L: linux-mips@linux-mips.org
-S: Maintained
-F: Documentation/devicetree/bindings/clock/img,boston-clock.txt
-F: arch/mips/boot/dts/img/boston.dts
-F: arch/mips/configs/generic/board-boston.config
-F: drivers/clk/imgtec/clk-boston.c
-F: include/dt-bindings/clock/boston-clock.h
-
MIROSOUND PCM20 FM RADIO RECEIVER DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -8765,67 +8946,15 @@ W: https://linuxtv.org
S: Odd Fixes
F: drivers/media/radio/radio-miropcm20*
-MELLANOX MLX4 core VPI driver
-M: Tariq Toukan <tariqt@mellanox.com>
-L: netdev@vger.kernel.org
-L: linux-rdma@vger.kernel.org
-W: http://www.mellanox.com
-Q: http://patchwork.ozlabs.org/project/netdev/list/
-S: Supported
-F: drivers/net/ethernet/mellanox/mlx4/
-F: include/linux/mlx4/
-
-MELLANOX MLX4 IB driver
-M: Yishai Hadas <yishaih@mellanox.com>
-L: linux-rdma@vger.kernel.org
-W: http://www.mellanox.com
-Q: http://patchwork.kernel.org/project/linux-rdma/list/
-S: Supported
-F: drivers/infiniband/hw/mlx4/
-F: include/linux/mlx4/
-F: include/uapi/rdma/mlx4-abi.h
-
-MELLANOX MLX5 core VPI driver
-M: Saeed Mahameed <saeedm@mellanox.com>
-M: Matan Barak <matanb@mellanox.com>
-M: Leon Romanovsky <leonro@mellanox.com>
-L: netdev@vger.kernel.org
-L: linux-rdma@vger.kernel.org
-W: http://www.mellanox.com
-Q: http://patchwork.ozlabs.org/project/netdev/list/
-S: Supported
-F: drivers/net/ethernet/mellanox/mlx5/core/
-F: include/linux/mlx5/
-
-MELLANOX MLX5 IB driver
-M: Matan Barak <matanb@mellanox.com>
-M: Leon Romanovsky <leonro@mellanox.com>
-L: linux-rdma@vger.kernel.org
-W: http://www.mellanox.com
-Q: http://patchwork.kernel.org/project/linux-rdma/list/
-S: Supported
-F: drivers/infiniband/hw/mlx5/
-F: include/linux/mlx5/
-F: include/uapi/rdma/mlx5-abi.h
-
-MELEXIS MLX90614 DRIVER
-M: Crt Mori <cmo@melexis.com>
-L: linux-iio@vger.kernel.org
-W: http://www.melexis.com
-S: Supported
-F: drivers/iio/temperature/mlx90614.c
-
-MICROSEMI SMART ARRAY SMARTPQI DRIVER (smartpqi)
-M: Don Brace <don.brace@microsemi.com>
-L: esc.storagedev@microsemi.com
-L: linux-scsi@vger.kernel.org
-S: Supported
-F: drivers/scsi/smartpqi/smartpqi*.[ch]
-F: drivers/scsi/smartpqi/Kconfig
-F: drivers/scsi/smartpqi/Makefile
-F: include/linux/cciss*.h
-F: include/uapi/linux/cciss*.h
-F: Documentation/scsi/smartpqi.txt
+MMP SUPPORT
+M: Eric Miao <eric.y.miao@gmail.com>
+M: Haojian Zhuang <haojian.zhuang@gmail.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+T: git git://github.com/hzhuang1/linux.git
+T: git git://git.linaro.org/people/ycmiao/pxa-linux.git
+S: Maintained
+F: arch/arm/boot/dts/mmp*
+F: arch/arm/mach-mmp/
MN88472 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
@@ -8959,6 +9088,11 @@ F: drivers/mfd/
F: include/linux/mfd/
F: include/dt-bindings/mfd/
+MULTIMEDIA CARD (MMC) ETC. OVER SPI
+S: Orphan
+F: drivers/mmc/host/mmc_spi.c
+F: include/linux/spi/mmc_spi.h
+
MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
M: Ulf Hansson <ulf.hansson@linaro.org>
L: linux-mmc@vger.kernel.org
@@ -8969,11 +9103,6 @@ F: drivers/mmc/
F: include/linux/mmc/
F: include/uapi/linux/mmc/
-MULTIMEDIA CARD (MMC) ETC. OVER SPI
-S: Orphan
-F: drivers/mmc/host/mmc_spi.c
-F: include/linux/spi/mmc_spi.h
-
MULTIPLEXER SUBSYSTEM
M: Peter Rosin <peda@axentia.se>
S: Maintained
@@ -9036,10 +9165,6 @@ S: Maintained
F: drivers/mtd/nand/
F: include/linux/mtd/nand*.h
-NATSEMI ETHERNET DRIVER (DP8381x)
-S: Orphan
-F: drivers/net/ethernet/natsemi/natsemi.c
-
NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER
M: Daniel Mack <zonque@gmail.com>
S: Maintained
@@ -9047,6 +9172,10 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
W: http://www.native-instruments.com
F: sound/usb/caiaq/
+NATSEMI ETHERNET DRIVER (DP8381x)
+S: Orphan
+F: drivers/net/ethernet/natsemi/natsemi.c
+
NCP FILESYSTEM
M: Petr Vandrovec <petr@vandrovec.name>
S: Odd Fixes
@@ -9166,6 +9295,35 @@ S: Maintained
W: https://fedorahosted.org/dropwatch/
F: net/core/drop_monitor.c
+NETWORKING DRIVERS
+L: netdev@vger.kernel.org
+W: http://www.linuxfoundation.org/en/Net
+Q: http://patchwork.ozlabs.org/project/netdev/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+S: Odd Fixes
+F: Documentation/devicetree/bindings/net/
+F: drivers/net/
+F: include/linux/if_*
+F: include/linux/netdevice.h
+F: include/linux/etherdevice.h
+F: include/linux/fcdevice.h
+F: include/linux/fddidevice.h
+F: include/linux/hippidevice.h
+F: include/linux/inetdevice.h
+F: include/uapi/linux/if_*
+F: include/uapi/linux/netdevice.h
+
+NETWORKING DRIVERS (WIRELESS)
+M: Kalle Valo <kvalo@codeaurora.org>
+L: linux-wireless@vger.kernel.org
+Q: http://patchwork.kernel.org/project/linux-wireless/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
+S: Maintained
+F: Documentation/devicetree/bindings/net/wireless/
+F: drivers/net/wireless/
+
NETWORKING [DSA]
M: Andrew Lunn <andrew@lunn.ch>
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
@@ -9197,28 +9355,6 @@ F: tools/net/
F: tools/testing/selftests/net/
F: lib/random32.c
-NETWORKING [IPv4/IPv6]
-M: "David S. Miller" <davem@davemloft.net>
-M: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
-M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
-L: netdev@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
-S: Maintained
-F: net/ipv4/
-F: net/ipv6/
-F: include/net/ip*
-F: arch/x86/net/*
-
-NETWORKING [TLS]
-M: Ilya Lesokhin <ilyal@mellanox.com>
-M: Aviad Yehezkel <aviadye@mellanox.com>
-M: Dave Watson <davejwatson@fb.com>
-L: netdev@vger.kernel.org
-S: Maintained
-F: net/tls/*
-F: include/uapi/linux/tls.h
-F: include/net/tls.h
-
NETWORKING [IPSEC]
M: Steffen Klassert <steffen.klassert@secunet.com>
M: Herbert Xu <herbert@gondor.apana.org.au>
@@ -9243,43 +9379,36 @@ F: net/ipv6/ip6_vti.c
F: include/uapi/linux/xfrm.h
F: include/net/xfrm.h
+NETWORKING [IPv4/IPv6]
+M: "David S. Miller" <davem@davemloft.net>
+M: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
+L: netdev@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+S: Maintained
+F: net/ipv4/
+F: net/ipv6/
+F: include/net/ip*
+F: arch/x86/net/*
+
NETWORKING [LABELED] (NetLabel, CIPSO, Labeled IPsec, SECMARK)
M: Paul Moore <paul@paul-moore.com>
L: netdev@vger.kernel.org
S: Maintained
-NETWORKING [WIRELESS]
-L: linux-wireless@vger.kernel.org
-Q: http://patchwork.kernel.org/project/linux-wireless/list/
-
-NETWORKING DRIVERS
+NETWORKING [TLS]
+M: Ilya Lesokhin <ilyal@mellanox.com>
+M: Aviad Yehezkel <aviadye@mellanox.com>
+M: Dave Watson <davejwatson@fb.com>
L: netdev@vger.kernel.org
-W: http://www.linuxfoundation.org/en/Net
-Q: http://patchwork.ozlabs.org/project/netdev/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
-S: Odd Fixes
-F: Documentation/devicetree/bindings/net/
-F: drivers/net/
-F: include/linux/if_*
-F: include/linux/netdevice.h
-F: include/linux/etherdevice.h
-F: include/linux/fcdevice.h
-F: include/linux/fddidevice.h
-F: include/linux/hippidevice.h
-F: include/linux/inetdevice.h
-F: include/uapi/linux/if_*
-F: include/uapi/linux/netdevice.h
+S: Maintained
+F: net/tls/*
+F: include/uapi/linux/tls.h
+F: include/net/tls.h
-NETWORKING DRIVERS (WIRELESS)
-M: Kalle Valo <kvalo@codeaurora.org>
+NETWORKING [WIRELESS]
L: linux-wireless@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-wireless/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
-S: Maintained
-F: Documentation/devicetree/bindings/net/wireless/
-F: drivers/net/wireless/
NETXEN (1/10) GbE SUPPORT
M: Manish Chopra <manish.chopra@cavium.com>
@@ -9373,14 +9502,6 @@ S: Maintained
F: drivers/media/i2c/et8ek8
F: drivers/media/i2c/ad5820.c
-NOKIA N900 CAMERA SUPPORT (ET8EK8 SENSOR, AD5820 FOCUS)
-M: Pavel Machek <pavel@ucw.cz>
-M: Sakari Ailus <sakari.ailus@iki.fi>
-L: linux-media@vger.kernel.org
-S: Maintained
-F: drivers/media/i2c/et8ek8
-F: drivers/media/i2c/ad5820.c
-
NOKIA N900 POWER SUPPLY DRIVERS
R: Pali Rohár <pali.rohar@gmail.com>
F: include/linux/power/bq2415x_charger.h
@@ -9392,6 +9513,12 @@ F: drivers/power/supply/bq27xxx_battery_i2c.c
F: drivers/power/supply/isp1704_charger.c
F: drivers/power/supply/rx51_battery.c
+NTB AMD DRIVER
+M: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+L: linux-ntb@googlegroups.com
+S: Supported
+F: drivers/ntb/hw/amd/
+
NTB DRIVER CORE
M: Jon Mason <jdmason@kudzu.us>
M: Dave Jiang <dave.jiang@intel.com>
@@ -9421,12 +9548,6 @@ W: https://github.com/jonmason/ntb/wiki
T: git git://github.com/jonmason/ntb.git
F: drivers/ntb/hw/intel/
-NTB AMD DRIVER
-M: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
-L: linux-ntb@googlegroups.com
-S: Supported
-F: drivers/ntb/hw/amd/
-
NTFS FILESYSTEM
M: Anton Altaparmakov <anton@tuxera.com>
L: linux-ntfs-dev@lists.sourceforge.net
@@ -9456,15 +9577,6 @@ F: drivers/nvme/host/
F: include/linux/nvme.h
F: include/uapi/linux/nvme_ioctl.h
-NVM EXPRESS TARGET DRIVER
-M: Christoph Hellwig <hch@lst.de>
-M: Sagi Grimberg <sagi@grimberg.me>
-L: linux-nvme@lists.infradead.org
-T: git://git.infradead.org/nvme.git
-W: http://git.infradead.org/nvme.git
-S: Supported
-F: drivers/nvme/target/
-
NVM EXPRESS FC TRANSPORT DRIVERS
M: James Smart <james.smart@broadcom.com>
L: linux-nvme@lists.infradead.org
@@ -9475,21 +9587,24 @@ F: drivers/nvme/host/fc.c
F: drivers/nvme/target/fc.c
F: drivers/nvme/target/fcloop.c
+NVM EXPRESS TARGET DRIVER
+M: Christoph Hellwig <hch@lst.de>
+M: Sagi Grimberg <sagi@grimberg.me>
+L: linux-nvme@lists.infradead.org
+T: git://git.infradead.org/nvme.git
+W: http://git.infradead.org/nvme.git
+S: Supported
+F: drivers/nvme/target/
+
NVMEM FRAMEWORK
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
S: Maintained
F: drivers/nvmem/
F: Documentation/devicetree/bindings/nvmem/
+F: Documentation/ABI/stable/sysfs-bus-nvmem
F: include/linux/nvmem-consumer.h
F: include/linux/nvmem-provider.h
-NXP-NCI NFC DRIVER
-M: Clément Perrochaud <clement.perrochaud@effinnov.com>
-R: Charles Gorand <charles.gorand@effinnov.com>
-L: linux-nfc@lists.01.org (moderated for non-subscribers)
-S: Supported
-F: drivers/nfc/nxp-nci
-
NXP TDA998X DRM DRIVER
M: Russell King <linux@armlinux.org.uk>
S: Supported
@@ -9504,55 +9619,31 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
F: sound/soc/codecs/tfa9879*
+NXP-NCI NFC DRIVER
+M: Clément Perrochaud <clement.perrochaud@effinnov.com>
+R: Charles Gorand <charles.gorand@effinnov.com>
+L: linux-nfc@lists.01.org (moderated for non-subscribers)
+S: Supported
+F: drivers/nfc/nxp-nci
+
OBJTOOL
M: Josh Poimboeuf <jpoimboe@redhat.com>
S: Supported
F: tools/objtool/
-OMAP1 SUPPORT
-M: Aaro Koskinen <aaro.koskinen@iki.fi>
-M: Tony Lindgren <tony@atomide.com>
+OMAP AUDIO SUPPORT
+M: Peter Ujfalusi <peter.ujfalusi@ti.com>
+M: Jarkko Nikula <jarkko.nikula@bitmer.com>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: linux-omap@vger.kernel.org
-Q: http://patchwork.kernel.org/project/linux-omap/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap.git
S: Maintained
-F: arch/arm/mach-omap1/
-F: arch/arm/plat-omap/
-F: arch/arm/configs/omap1_defconfig
-F: drivers/i2c/busses/i2c-omap.c
-F: include/linux/i2c-omap.h
+F: sound/soc/omap/
-OMAP2+ SUPPORT
-M: Tony Lindgren <tony@atomide.com>
+OMAP CLOCK FRAMEWORK SUPPORT
+M: Paul Walmsley <paul@pwsan.com>
L: linux-omap@vger.kernel.org
-W: http://www.muru.com/linux/omap/
-W: http://linux.omap.com/
-Q: http://patchwork.kernel.org/project/linux-omap/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap.git
S: Maintained
-F: arch/arm/mach-omap2/
-F: arch/arm/plat-omap/
-F: arch/arm/configs/omap2plus_defconfig
-F: drivers/i2c/busses/i2c-omap.c
-F: drivers/irqchip/irq-omap-intc.c
-F: drivers/mfd/*omap*.c
-F: drivers/mfd/menelaus.c
-F: drivers/mfd/palmas.c
-F: drivers/mfd/tps65217.c
-F: drivers/mfd/tps65218.c
-F: drivers/mfd/tps65910.c
-F: drivers/mfd/twl-core.[ch]
-F: drivers/mfd/twl4030*.c
-F: drivers/mfd/twl6030*.c
-F: drivers/mfd/twl6040*.c
-F: drivers/regulator/palmas-regulator*.c
-F: drivers/regulator/pbias-regulator.c
-F: drivers/regulator/tps65217-regulator.c
-F: drivers/regulator/tps65218-regulator.c
-F: drivers/regulator/tps65910-regulator.c
-F: drivers/regulator/twl-regulator.c
-F: drivers/regulator/twl6030-regulator.c
-F: include/linux/i2c-omap.h
+F: arch/arm/*omap*/*clock*
OMAP DEVICE TREE SUPPORT
M: Benoît Cousson <bcousson@baylibre.com>
@@ -9566,33 +9657,20 @@ F: arch/arm/boot/dts/*am4*
F: arch/arm/boot/dts/*am5*
F: arch/arm/boot/dts/*dra7*
-OMAP CLOCK FRAMEWORK SUPPORT
-M: Paul Walmsley <paul@pwsan.com>
-L: linux-omap@vger.kernel.org
-S: Maintained
-F: arch/arm/*omap*/*clock*
-
-OMAP POWER MANAGEMENT SUPPORT
-M: Kevin Hilman <khilman@kernel.org>
-L: linux-omap@vger.kernel.org
-S: Maintained
-F: arch/arm/*omap*/*pm*
-F: drivers/cpufreq/omap-cpufreq.c
-
-OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT
-M: Rajendra Nayak <rnayak@codeaurora.org>
-M: Paul Walmsley <paul@pwsan.com>
+OMAP DISPLAY SUBSYSTEM and FRAMEBUFFER SUPPORT (DSS2)
+M: Tomi Valkeinen <tomi.valkeinen@ti.com>
L: linux-omap@vger.kernel.org
+L: linux-fbdev@vger.kernel.org
S: Maintained
-F: arch/arm/mach-omap2/prm*
+F: drivers/video/fbdev/omap2/
+F: Documentation/arm/OMAP/DSS
-OMAP AUDIO SUPPORT
-M: Peter Ujfalusi <peter.ujfalusi@ti.com>
-M: Jarkko Nikula <jarkko.nikula@bitmer.com>
-L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+OMAP FRAMEBUFFER SUPPORT
+M: Tomi Valkeinen <tomi.valkeinen@ti.com>
+L: linux-fbdev@vger.kernel.org
L: linux-omap@vger.kernel.org
S: Maintained
-F: sound/soc/omap/
+F: drivers/video/fbdev/omap/
OMAP GENERAL PURPOSE MEMORY CONTROLLER SUPPORT
M: Roger Quadros <rogerq@ti.com>
@@ -9602,20 +9680,14 @@ S: Maintained
F: drivers/memory/omap-gpmc.c
F: arch/arm/mach-omap2/*gpmc*
-OMAP FRAMEBUFFER SUPPORT
-M: Tomi Valkeinen <tomi.valkeinen@ti.com>
-L: linux-fbdev@vger.kernel.org
-L: linux-omap@vger.kernel.org
-S: Maintained
-F: drivers/video/fbdev/omap/
-
-OMAP DISPLAY SUBSYSTEM and FRAMEBUFFER SUPPORT (DSS2)
-M: Tomi Valkeinen <tomi.valkeinen@ti.com>
+OMAP GPIO DRIVER
+M: Grygorii Strashko <grygorii.strashko@ti.com>
+M: Santosh Shilimkar <ssantosh@kernel.org>
+M: Kevin Hilman <khilman@kernel.org>
L: linux-omap@vger.kernel.org
-L: linux-fbdev@vger.kernel.org
S: Maintained
-F: drivers/video/fbdev/omap2/
-F: Documentation/arm/OMAP/DSS
+F: Documentation/devicetree/bindings/gpio/gpio-omap.txt
+F: drivers/gpio/gpio-omap.c
OMAP HARDWARE SPINLOCK SUPPORT
M: Ohad Ben-Cohen <ohad@wizery.com>
@@ -9623,30 +9695,12 @@ L: linux-omap@vger.kernel.org
S: Maintained
F: drivers/hwspinlock/omap_hwspinlock.c
-OMAP MMC SUPPORT
-M: Jarkko Lavinen <jarkko.lavinen@nokia.com>
-L: linux-omap@vger.kernel.org
-S: Maintained
-F: drivers/mmc/host/omap.c
-
OMAP HS MMC SUPPORT
L: linux-mmc@vger.kernel.org
L: linux-omap@vger.kernel.org
S: Orphan
F: drivers/mmc/host/omap_hsmmc.c
-OMAP RANDOM NUMBER GENERATOR SUPPORT
-M: Deepak Saxena <dsaxena@plexity.net>
-S: Maintained
-F: drivers/char/hw_random/omap-rng.c
-
-OMAP HWMOD SUPPORT
-M: Benoît Cousson <bcousson@baylibre.com>
-M: Paul Walmsley <paul@pwsan.com>
-L: linux-omap@vger.kernel.org
-S: Maintained
-F: arch/arm/mach-omap2/omap_hwmod.*
-
OMAP HWMOD DATA
M: Paul Walmsley <paul@pwsan.com>
L: linux-omap@vger.kernel.org
@@ -9659,6 +9713,13 @@ L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+OMAP HWMOD SUPPORT
+M: Benoît Cousson <bcousson@baylibre.com>
+M: Paul Walmsley <paul@pwsan.com>
+L: linux-omap@vger.kernel.org
+S: Maintained
+F: arch/arm/mach-omap2/omap_hwmod.*
+
OMAP IMAGING SUBSYSTEM (OMAP3 ISP and OMAP4 ISS)
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
@@ -9667,6 +9728,31 @@ F: Documentation/devicetree/bindings/media/ti,omap3isp.txt
F: drivers/media/platform/omap3isp/
F: drivers/staging/media/omap4iss/
+OMAP MMC SUPPORT
+M: Jarkko Lavinen <jarkko.lavinen@nokia.com>
+L: linux-omap@vger.kernel.org
+S: Maintained
+F: drivers/mmc/host/omap.c
+
+OMAP POWER MANAGEMENT SUPPORT
+M: Kevin Hilman <khilman@kernel.org>
+L: linux-omap@vger.kernel.org
+S: Maintained
+F: arch/arm/*omap*/*pm*
+F: drivers/cpufreq/omap-cpufreq.c
+
+OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT
+M: Rajendra Nayak <rnayak@codeaurora.org>
+M: Paul Walmsley <paul@pwsan.com>
+L: linux-omap@vger.kernel.org
+S: Maintained
+F: arch/arm/mach-omap2/prm*
+
+OMAP RANDOM NUMBER GENERATOR SUPPORT
+M: Deepak Saxena <dsaxena@plexity.net>
+S: Maintained
+F: drivers/char/hw_random/omap-rng.c
+
OMAP USB SUPPORT
L: linux-usb@vger.kernel.org
L: linux-omap@vger.kernel.org
@@ -9674,21 +9760,57 @@ S: Orphan
F: drivers/usb/*/*omap*
F: arch/arm/*omap*/usb*
-OMAP GPIO DRIVER
-M: Grygorii Strashko <grygorii.strashko@ti.com>
-M: Santosh Shilimkar <ssantosh@kernel.org>
-M: Kevin Hilman <khilman@kernel.org>
-L: linux-omap@vger.kernel.org
-S: Maintained
-F: Documentation/devicetree/bindings/gpio/gpio-omap.txt
-F: drivers/gpio/gpio-omap.c
-
OMAP/NEWFLOW NANOBONE MACHINE SUPPORT
M: Mark Jackson <mpfj@newflow.co.uk>
L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/boot/dts/am335x-nano.dts
+OMAP1 SUPPORT
+M: Aaro Koskinen <aaro.koskinen@iki.fi>
+M: Tony Lindgren <tony@atomide.com>
+L: linux-omap@vger.kernel.org
+Q: http://patchwork.kernel.org/project/linux-omap/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap.git
+S: Maintained
+F: arch/arm/mach-omap1/
+F: arch/arm/plat-omap/
+F: arch/arm/configs/omap1_defconfig
+F: drivers/i2c/busses/i2c-omap.c
+F: include/linux/i2c-omap.h
+
+OMAP2+ SUPPORT
+M: Tony Lindgren <tony@atomide.com>
+L: linux-omap@vger.kernel.org
+W: http://www.muru.com/linux/omap/
+W: http://linux.omap.com/
+Q: http://patchwork.kernel.org/project/linux-omap/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap.git
+S: Maintained
+F: arch/arm/mach-omap2/
+F: arch/arm/plat-omap/
+F: arch/arm/configs/omap2plus_defconfig
+F: drivers/i2c/busses/i2c-omap.c
+F: drivers/irqchip/irq-omap-intc.c
+F: drivers/mfd/*omap*.c
+F: drivers/mfd/menelaus.c
+F: drivers/mfd/palmas.c
+F: drivers/mfd/tps65217.c
+F: drivers/mfd/tps65218.c
+F: drivers/mfd/tps65910.c
+F: drivers/mfd/twl-core.[ch]
+F: drivers/mfd/twl4030*.c
+F: drivers/mfd/twl6030*.c
+F: drivers/mfd/twl6040*.c
+F: drivers/regulator/palmas-regulator*.c
+F: drivers/regulator/pbias-regulator.c
+F: drivers/regulator/tps65217-regulator.c
+F: drivers/regulator/tps65218-regulator.c
+F: drivers/regulator/tps65910-regulator.c
+F: drivers/regulator/twl-regulator.c
+F: drivers/regulator/twl6030-regulator.c
+F: include/linux/i2c-omap.h
+
OMFS FILESYSTEM
M: Bob Copeland <me@bobcopeland.com>
L: linux-karma-devel@lists.sourceforge.net
@@ -9708,6 +9830,13 @@ M: Harald Welte <laforge@gnumonks.org>
S: Maintained
F: drivers/char/pcmcia/cm4040_cs.*
+OMNIVISION OV13858 SENSOR DRIVER
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/ov13858.c
+
OMNIVISION OV5640 SENSOR DRIVER
M: Steve Longerbeam <slongerbeam@gmail.com>
L: linux-media@vger.kernel.org
@@ -9716,7 +9845,7 @@ S: Maintained
F: drivers/media/i2c/ov5640.c
OMNIVISION OV5647 SENSOR DRIVER
-M: Ramiro Oliveira <roliveir@synopsys.com>
+M: Luis Oliveira <lolivei@synopsys.com>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
S: Maintained
@@ -9730,13 +9859,6 @@ S: Maintained
F: drivers/media/i2c/ov7670.c
F: Documentation/devicetree/bindings/media/i2c/ov7670.txt
-OMNIVISION OV13858 SENSOR DRIVER
-M: Sakari Ailus <sakari.ailus@linux.intel.com>
-L: linux-media@vger.kernel.org
-T: git git://linuxtv.org/media_tree.git
-S: Maintained
-F: drivers/media/i2c/ov13858.c
-
ONENAND FLASH DRIVER
M: Kyungmin Park <kyungmin.park@samsung.com>
L: linux-mtd@lists.infradead.org
@@ -9754,12 +9876,26 @@ F: drivers/scsi/osst.*
F: drivers/scsi/osst_*.h
F: drivers/scsi/st.h
-OPENCORES I2C BUS DRIVER
-M: Peter Korsgaard <jacmet@sunsite.dk>
-L: linux-i2c@vger.kernel.org
+OP-TEE DRIVER
+M: Jens Wiklander <jens.wiklander@linaro.org>
S: Maintained
-F: Documentation/i2c/busses/i2c-ocores
-F: drivers/i2c/busses/i2c-ocores.c
+F: drivers/tee/optee/
+
+OPA-VNIC DRIVER
+M: Dennis Dalessandro <dennis.dalessandro@intel.com>
+M: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
+L: linux-rdma@vger.kernel.org
+S: Supported
+F: drivers/infiniband/ulp/opa_vnic
+
+OPEN FIRMWARE AND DEVICE TREE OVERLAYS
+M: Pantelis Antoniou <pantelis.antoniou@konsulko.com>
+L: devicetree@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/dynamic-resolution-notes.txt
+F: Documentation/devicetree/overlay-notes.txt
+F: drivers/of/overlay.c
+F: drivers/of/resolver.c
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Rob Herring <robh+dt@kernel.org>
@@ -9784,14 +9920,12 @@ F: Documentation/devicetree/
F: arch/*/boot/dts/
F: include/dt-bindings/
-OPEN FIRMWARE AND DEVICE TREE OVERLAYS
-M: Pantelis Antoniou <pantelis.antoniou@konsulko.com>
-L: devicetree@vger.kernel.org
+OPENCORES I2C BUS DRIVER
+M: Peter Korsgaard <jacmet@sunsite.dk>
+L: linux-i2c@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/dynamic-resolution-notes.txt
-F: Documentation/devicetree/overlay-notes.txt
-F: drivers/of/overlay.c
-F: drivers/of/resolver.c
+F: Documentation/i2c/busses/i2c-ocores
+F: drivers/i2c/busses/i2c-ocores.c
OPENRISC ARCHITECTURE
M: Jonas Bonn <jonas@southpole.se>
@@ -9840,11 +9974,6 @@ F: arch/*/oprofile/
F: drivers/oprofile/
F: include/linux/oprofile.h
-OP-TEE DRIVER
-M: Jens Wiklander <jens.wiklander@linaro.org>
-S: Maintained
-F: drivers/tee/optee/
-
ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
M: Mark Fasheh <mfasheh@versity.com>
M: Joel Becker <jlbec@evilplan.org>
@@ -9855,6 +9984,14 @@ F: Documentation/filesystems/ocfs2.txt
F: Documentation/filesystems/dlmfs.txt
F: fs/ocfs2/
+ORANGEFS FILESYSTEM
+M: Mike Marshall <hubcap@omnibond.com>
+L: pvfs2-developers@beowulf-underground.org (subscribers-only)
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git
+S: Supported
+F: fs/orangefs/
+F: Documentation/filesystems/orangefs.txt
+
ORINOCO DRIVER
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/orinoco
@@ -9869,6 +10006,16 @@ F: drivers/scsi/osd/
F: include/scsi/osd_*
F: fs/exofs/
+OV2659 OMNIVISION SENSOR DRIVER
+M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
+L: linux-media@vger.kernel.org
+W: https://linuxtv.org
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git
+S: Maintained
+F: drivers/media/i2c/ov2659.c
+F: include/media/i2c/ov2659.h
+
OVERLAY FILESYSTEM
M: Miklos Szeredi <miklos@szeredi.hu>
L: linux-unionfs@vger.kernel.org
@@ -9877,14 +10024,6 @@ S: Supported
F: fs/overlayfs/
F: Documentation/filesystems/overlayfs.txt
-ORANGEFS FILESYSTEM
-M: Mike Marshall <hubcap@omnibond.com>
-L: pvfs2-developers@beowulf-underground.org (subscribers-only)
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git
-S: Supported
-F: fs/orangefs/
-F: Documentation/filesystems/orangefs.txt
-
P54 WIRELESS DRIVER
M: Christian Lamparter <chunkeey@googlemail.com>
L: linux-wireless@vger.kernel.org
@@ -9925,11 +10064,11 @@ F: Documentation/mn10300/
F: arch/mn10300/
PARALLEL LCD/KEYPAD PANEL DRIVER
-M: Willy Tarreau <willy@haproxy.com>
-M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
-S: Odd Fixes
-F: Documentation/misc-devices/lcd-panel-cgram.txt
-F: drivers/misc/panel.c
+M: Willy Tarreau <willy@haproxy.com>
+M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
+S: Odd Fixes
+F: Documentation/misc-devices/lcd-panel-cgram.txt
+F: drivers/misc/panel.c
PARALLEL PORT SUBSYSTEM
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
@@ -10025,42 +10164,13 @@ M: Khalid Aziz <khalid@gonehiking.org>
S: Maintained
F: drivers/firmware/pcdp.*
-PCI ERROR RECOVERY
-M: Linas Vepstas <linasvepstas@gmail.com>
-L: linux-pci@vger.kernel.org
-S: Supported
-F: Documentation/PCI/pci-error-recovery.txt
-
-PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
-M: Russell Currey <ruscur@russell.cc>
-L: linuxppc-dev@lists.ozlabs.org
-S: Supported
-F: Documentation/powerpc/eeh-pci-error-recovery.txt
-F: arch/powerpc/kernel/eeh*.c
-F: arch/powerpc/platforms/*/eeh*.c
-F: arch/powerpc/include/*/eeh*.h
-
-PCI SUBSYSTEM
-M: Bjorn Helgaas <bhelgaas@google.com>
-L: linux-pci@vger.kernel.org
-Q: http://patchwork.ozlabs.org/project/linux-pci/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
-S: Supported
-F: Documentation/devicetree/bindings/pci/
-F: Documentation/PCI/
-F: drivers/pci/
-F: include/linux/pci*
-F: arch/x86/pci/
-F: arch/x86/kernel/quirks.c
-
-PCI ENDPOINT SUBSYSTEM
-M: Kishon Vijay Abraham I <kishon@ti.com>
+PCI DRIVER FOR AARDVARK (Marvell Armada 3700)
+M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
L: linux-pci@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kishon/pci-endpoint.git
-S: Supported
-F: drivers/pci/endpoint/
-F: drivers/misc/pci_endpoint_test.c
-F: tools/pci/
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/pci/aardvark-pci.txt
+F: drivers/pci/host/pci-aardvark.c
PCI DRIVER FOR ALTERA PCIE IP
M: Ley Foon Tan <lftan@altera.com>
@@ -10070,6 +10180,14 @@ S: Supported
F: Documentation/devicetree/bindings/pci/altera-pcie.txt
F: drivers/pci/host/pcie-altera.c
+PCI DRIVER FOR APPLIEDMICRO XGENE
+M: Tanmay Inamdar <tinamdar@apm.com>
+L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/xgene-pci.txt
+F: drivers/pci/host/pci-xgene.c
+
PCI DRIVER FOR ARM VERSATILE PLATFORM
M: Rob Herring <robh@kernel.org>
L: linux-pci@vger.kernel.org
@@ -10086,14 +10204,6 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/pci-armada8k.txt
F: drivers/pci/dwc/pcie-armada8k.c
-PCI DRIVER FOR APPLIEDMICRO XGENE
-M: Tanmay Inamdar <tinamdar@apm.com>
-L: linux-pci@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org
-S: Maintained
-F: Documentation/devicetree/bindings/pci/xgene-pci.txt
-F: drivers/pci/host/pci-xgene.c
-
PCI DRIVER FOR FREESCALE LAYERSCAPE
M: Minghuan Lian <minghuan.Lian@freescale.com>
M: Mingkai Hu <mingkai.hu@freescale.com>
@@ -10104,6 +10214,15 @@ L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: drivers/pci/dwc/*layerscape*
+PCI DRIVER FOR GENERIC OF HOSTS
+M: Will Deacon <will.deacon@arm.com>
+L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/pci/host-generic-pci.txt
+F: drivers/pci/host/pci-host-common.c
+F: drivers/pci/host/pci-host-generic.c
+
PCI DRIVER FOR IMX6
M: Richard Zhu <hongxing.zhu@nxp.com>
M: Lucas Stach <l.stach@pengutronix.de>
@@ -10113,28 +10232,11 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
F: drivers/pci/dwc/*imx6*
-PCI DRIVER FOR TI KEYSTONE
-M: Murali Karicheri <m-karicheri2@ti.com>
-L: linux-pci@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: drivers/pci/dwc/*keystone*
-
-PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
-M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
-M: Jason Cooper <jason@lakedaemon.net>
-L: linux-pci@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: drivers/pci/host/*mvebu*
-
-PCI DRIVER FOR AARDVARK (Marvell Armada 3700)
-M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
+M: Keith Busch <keith.busch@intel.com>
L: linux-pci@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: Documentation/devicetree/bindings/pci/aardvark-pci.txt
-F: drivers/pci/host/pci-aardvark.c
+S: Supported
+F: drivers/pci/host/vmd.c
PCI DRIVER FOR MICROSEMI SWITCHTEC
M: Kurt Schwemmer <kurt.schwemmer@microsemi.com>
@@ -10147,6 +10249,14 @@ F: Documentation/ABI/testing/sysfs-class-switchtec
F: drivers/pci/switch/switchtec*
F: include/uapi/linux/switchtec_ioctl.h
+PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
+M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+M: Jason Cooper <jason@lakedaemon.net>
+L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/pci/host/*mvebu*
+
PCI DRIVER FOR NVIDIA TEGRA
M: Thierry Reding <thierry.reding@gmail.com>
L: linux-tegra@vger.kernel.org
@@ -10155,14 +10265,6 @@ S: Supported
F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
F: drivers/pci/host/pci-tegra.c
-PCI DRIVER FOR TI DRA7XX
-M: Kishon Vijay Abraham I <kishon@ti.com>
-L: linux-omap@vger.kernel.org
-L: linux-pci@vger.kernel.org
-S: Supported
-F: Documentation/devicetree/bindings/pci/ti-pci.txt
-F: drivers/pci/dwc/pci-dra7xx.c
-
PCI DRIVER FOR RENESAS R-CAR
M: Simon Horman <horms@verge.net.au>
L: linux-pci@vger.kernel.org
@@ -10186,26 +10288,44 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/designware-pcie.txt
F: drivers/pci/dwc/*designware*
-PCI DRIVER FOR GENERIC OF HOSTS
-M: Will Deacon <will.deacon@arm.com>
+PCI DRIVER FOR TI DRA7XX
+M: Kishon Vijay Abraham I <kishon@ti.com>
+L: linux-omap@vger.kernel.org
+L: linux-pci@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/pci/ti-pci.txt
+F: drivers/pci/dwc/pci-dra7xx.c
+
+PCI DRIVER FOR TI KEYSTONE
+M: Murali Karicheri <m-karicheri2@ti.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: Documentation/devicetree/bindings/pci/host-generic-pci.txt
-F: drivers/pci/host/pci-host-common.c
-F: drivers/pci/host/pci-host-generic.c
+F: drivers/pci/dwc/*keystone*
-PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
-M: Keith Busch <keith.busch@intel.com>
+PCI ENDPOINT SUBSYSTEM
+M: Kishon Vijay Abraham I <kishon@ti.com>
L: linux-pci@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kishon/pci-endpoint.git
S: Supported
-F: drivers/pci/host/vmd.c
+F: drivers/pci/endpoint/
+F: drivers/misc/pci_endpoint_test.c
+F: tools/pci/
-PCIE DRIVER FOR ST SPEAR13XX
-M: Pratyush Anand <pratyush.anand@gmail.com>
+PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
+M: Russell Currey <ruscur@russell.cc>
+L: linuxppc-dev@lists.ozlabs.org
+S: Supported
+F: Documentation/powerpc/eeh-pci-error-recovery.txt
+F: arch/powerpc/kernel/eeh*.c
+F: arch/powerpc/platforms/*/eeh*.c
+F: arch/powerpc/include/*/eeh*.h
+
+PCI ERROR RECOVERY
+M: Linas Vepstas <linasvepstas@gmail.com>
L: linux-pci@vger.kernel.org
-S: Maintained
-F: drivers/pci/dwc/*spear*
+S: Supported
+F: Documentation/PCI/pci-error-recovery.txt
PCI MSI DRIVER FOR ALTERA MSI IP
M: Ley Foon Tan <lftan@altera.com>
@@ -10223,6 +10343,19 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
F: drivers/pci/host/pci-xgene-msi.c
+PCI SUBSYSTEM
+M: Bjorn Helgaas <bhelgaas@google.com>
+L: linux-pci@vger.kernel.org
+Q: http://patchwork.ozlabs.org/project/linux-pci/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
+S: Supported
+F: Documentation/devicetree/bindings/pci/
+F: Documentation/PCI/
+F: drivers/pci/
+F: include/linux/pci*
+F: arch/x86/pci/
+F: arch/x86/kernel/quirks.c
+
PCIE DRIVER FOR AXIS ARTPEC
M: Niklas Cassel <niklas.cassel@axis.com>
M: Jesper Nilsson <jesper.nilsson@axis.com>
@@ -10232,6 +10365,14 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/axis,artpec*
F: drivers/pci/dwc/*artpec*
+PCIE DRIVER FOR CAVIUM THUNDERX
+M: David Daney <david.daney@cavium.com>
+L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Supported
+F: Documentation/devicetree/bindings/pci/pci-thunder-*
+F: drivers/pci/host/pci-thunder-*
+
PCIE DRIVER FOR HISILICON
M: Zhou Wang <wangzhou1@hisilicon.com>
M: Gabriele Paoloni <gabriele.paoloni@huawei.com>
@@ -10248,6 +10389,21 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/pcie-kirin.txt
F: drivers/pci/dwc/pcie-kirin.c
+PCIE DRIVER FOR MEDIATEK
+M: Ryder Lee <ryder.lee@mediatek.com>
+L: linux-pci@vger.kernel.org
+L: linux-mediatek@lists.infradead.org
+S: Supported
+F: Documentation/devicetree/bindings/pci/mediatek*
+F: drivers/pci/host/*mediatek*
+
+PCIE DRIVER FOR QUALCOMM MSM
+M: Stanimir Varbanov <svarbanov@mm-sol.com>
+L: linux-pci@vger.kernel.org
+L: linux-arm-msm@vger.kernel.org
+S: Maintained
+F: drivers/pci/dwc/*qcom*
+
PCIE DRIVER FOR ROCKCHIP
M: Shawn Lin <shawn.lin@rock-chips.com>
L: linux-pci@vger.kernel.org
@@ -10256,28 +10412,11 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/rockchip-pcie.txt
F: drivers/pci/host/pcie-rockchip.c
-PCIE DRIVER FOR QUALCOMM MSM
-M: Stanimir Varbanov <svarbanov@mm-sol.com>
-L: linux-pci@vger.kernel.org
-L: linux-arm-msm@vger.kernel.org
-S: Maintained
-F: drivers/pci/dwc/*qcom*
-
-PCIE DRIVER FOR CAVIUM THUNDERX
-M: David Daney <david.daney@cavium.com>
+PCIE DRIVER FOR ST SPEAR13XX
+M: Pratyush Anand <pratyush.anand@gmail.com>
L: linux-pci@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Supported
-F: Documentation/devicetree/bindings/pci/pci-thunder-*
-F: drivers/pci/host/pci-thunder-*
-
-PCIE DRIVER FOR MEDIATEK
-M: Ryder Lee <ryder.lee@mediatek.com>
-L: linux-pci@vger.kernel.org
-L: linux-mediatek@lists.infradead.org
-S: Supported
-F: Documentation/devicetree/bindings/pci/mediatek*
-F: drivers/pci/host/*mediatek*
+S: Maintained
+F: drivers/pci/dwc/*spear*
PCMCIA SUBSYSTEM
P: Linux PCMCIA Team
@@ -10380,7 +10519,7 @@ L: linux-gpio@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
S: Maintained
F: Documentation/devicetree/bindings/pinctrl/
-F: Documentation/pinctrl.txt
+F: Documentation/driver-api/pinctl.rst
F: drivers/pinctrl/
F: include/linux/pinctrl/
@@ -10446,14 +10585,14 @@ S: Maintained
F: drivers/pinctrl/spear/
PISTACHIO SOC SUPPORT
-M: James Hartley <james.hartley@imgtec.com>
-M: Ionela Voinescu <ionela.voinescu@imgtec.com>
-L: linux-mips@linux-mips.org
-S: Maintained
-F: arch/mips/pistachio/
-F: arch/mips/include/asm/mach-pistachio/
-F: arch/mips/boot/dts/img/pistachio*
-F: arch/mips/configs/pistachio*_defconfig
+M: James Hartley <james.hartley@imgtec.com>
+M: Ionela Voinescu <ionela.voinescu@imgtec.com>
+L: linux-mips@linux-mips.org
+S: Maintained
+F: arch/mips/pistachio/
+F: arch/mips/include/asm/mach-pistachio/
+F: arch/mips/boot/dts/img/pistachio*
+F: arch/mips/configs/pistachio*_defconfig
PKTCDVD DRIVER
S: Orphan
@@ -10496,6 +10635,11 @@ L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/pm8001/
+PNP SUPPORT
+M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+S: Maintained
+F: drivers/pnp/
+
POSIX CLOCKS and TIMERS
M: Thomas Gleixner <tglx@linutronix.de>
L: linux-kernel@vger.kernel.org
@@ -10517,15 +10661,6 @@ F: include/linux/pm_*
F: include/linux/powercap.h
F: drivers/powercap/
-POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS
-M: Sebastian Reichel <sre@kernel.org>
-L: linux-pm@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-power-supply.git
-S: Maintained
-F: Documentation/devicetree/bindings/power/supply/
-F: include/linux/power_supply.h
-F: drivers/power/supply/
-
POWER STATE COORDINATION INTERFACE (PSCI)
M: Mark Rutland <mark.rutland@arm.com>
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
@@ -10535,23 +10670,21 @@ F: drivers/firmware/psci*.c
F: include/linux/psci.h
F: include/uapi/linux/psci.h
+POWER SUPPLY CLASS/SUBSYSTEM and DRIVERS
+M: Sebastian Reichel <sre@kernel.org>
+L: linux-pm@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-power-supply.git
+S: Maintained
+F: Documentation/devicetree/bindings/power/supply/
+F: include/linux/power_supply.h
+F: drivers/power/supply/
+
POWERNV OPERATOR PANEL LCD DISPLAY DRIVER
M: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/char/powernv-op-panel.c
-PNP SUPPORT
-M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
-S: Maintained
-F: drivers/pnp/
-
-PPP PROTOCOL DRIVERS AND COMPRESSORS
-M: Paul Mackerras <paulus@samba.org>
-L: linux-ppp@vger.kernel.org
-S: Maintained
-F: drivers/net/ppp/ppp_*
-
PPP OVER ATM (RFC 2364)
M: Mitchell Blank Jr <mitch@sfgoth.com>
S: Maintained
@@ -10571,6 +10704,12 @@ F: net/l2tp/l2tp_ppp.c
F: include/linux/if_pppol2tp.h
F: include/uapi/linux/if_pppol2tp.h
+PPP PROTOCOL DRIVERS AND COMPRESSORS
+M: Paul Mackerras <paulus@samba.org>
+L: linux-ppp@vger.kernel.org
+S: Maintained
+F: drivers/net/ppp/ppp_*
+
PPS SUPPORT
M: Rodolfo Giometti <giometti@enneenne.com>
W: http://wiki.enneenne.com/index.php/LinuxPPS_support
@@ -10684,7 +10823,6 @@ F: drivers/ptp/*
F: include/linux/ptp_cl*
PTRACE SUPPORT
-M: Roland McGrath <roland@hack.frob.com>
M: Oleg Nesterov <oleg@redhat.com>
S: Maintained
F: include/asm-generic/syscall.h
@@ -10692,7 +10830,12 @@ F: include/linux/ptrace.h
F: include/linux/regset.h
F: include/linux/tracehook.h
F: include/uapi/linux/ptrace.h
+F: include/uapi/linux/ptrace.h
+F: include/asm-generic/ptrace.h
F: kernel/ptrace.c
+F: arch/*/ptrace*.c
+F: arch/*/*/ptrace*.c
+F: arch/*/include/asm/ptrace*.h
PULSE8-CEC DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
@@ -10700,6 +10843,7 @@ L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/usb/pulse8-cec/*
+F: Documentation/media/cec-drivers/pulse8-cec.rst
PVRUSB2 VIDEO4LINUX DRIVER
M: Mike Isely <isely@pobox.com>
@@ -10727,6 +10871,12 @@ F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt
F: Documentation/hwmon/pwm-fan
F: drivers/hwmon/pwm-fan.c
+PWM IR Transmitter
+M: Sean Young <sean@mess.org>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/rc/pwm-ir-tx.c
+
PWM SUBSYSTEM
M: Thierry Reding <thierry.reding@gmail.com>
L: linux-pwm@vger.kernel.org
@@ -10741,6 +10891,20 @@ F: include/linux/pwm_backlight.h
F: drivers/gpio/gpio-mvebu.c
F: Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
+PXA GPIO DRIVER
+M: Robert Jarzmik <robert.jarzmik@free.fr>
+L: linux-gpio@vger.kernel.org
+S: Maintained
+F: drivers/gpio/gpio-pxa.c
+
+PXA MMCI DRIVER
+S: Orphan
+
+PXA RTC DRIVER
+M: Robert Jarzmik <robert.jarzmik@free.fr>
+L: linux-rtc@vger.kernel.org
+S: Maintained
+
PXA2xx/PXA3xx SUPPORT
M: Daniel Mack <daniel@zonque.org>
M: Haojian Zhuang <haojian.zhuang@gmail.com>
@@ -10760,36 +10924,12 @@ F: include/sound/pxa2xx-lib.h
F: sound/arm/pxa*
F: sound/soc/pxa/
-PXA GPIO DRIVER
-M: Robert Jarzmik <robert.jarzmik@free.fr>
-L: linux-gpio@vger.kernel.org
-S: Maintained
-F: drivers/gpio/gpio-pxa.c
-
PXA3xx NAND FLASH DRIVER
M: Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
L: linux-mtd@lists.infradead.org
S: Maintained
F: drivers/mtd/nand/pxa3xx_nand.c
-MMP SUPPORT
-M: Eric Miao <eric.y.miao@gmail.com>
-M: Haojian Zhuang <haojian.zhuang@gmail.com>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-T: git git://github.com/hzhuang1/linux.git
-T: git git://git.linaro.org/people/ycmiao/pxa-linux.git
-S: Maintained
-F: arch/arm/boot/dts/mmp*
-F: arch/arm/mach-mmp/
-
-PXA MMCI DRIVER
-S: Orphan
-
-PXA RTC DRIVER
-M: Robert Jarzmik <robert.jarzmik@free.fr>
-L: linux-rtc@vger.kernel.org
-S: Maintained
-
QAT DRIVER
M: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
M: Salvatore Benedetto <salvatore.benedetto@intel.com>
@@ -10797,12 +10937,56 @@ L: qat-linux@intel.com
S: Supported
F: drivers/crypto/qat/
+QCOM AUDIO (ASoC) DRIVERS
+M: Patrick Lai <plai@codeaurora.org>
+M: Banajit Goswami <bgoswami@codeaurora.org>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Supported
+F: sound/soc/qcom/
+
+QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT
+M: Gabriel Somlo <somlo@cmu.edu>
+M: "Michael S. Tsirkin" <mst@redhat.com>
+L: qemu-devel@nongnu.org
+S: Maintained
+F: drivers/firmware/qemu_fw_cfg.c
+
QIB DRIVER
M: Mike Marciniszyn <infinipath@intel.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/qib/
+QLOGIC QL41xxx FCOE DRIVER
+M: QLogic-Storage-Upstream@cavium.com
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/qedf/
+
+QLOGIC QL41xxx ISCSI DRIVER
+M: QLogic-Storage-Upstream@cavium.com
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/qedi/
+
+QLOGIC QL4xxx ETHERNET DRIVER
+M: Yuval Mintz <Yuval.Mintz@cavium.com>
+M: Ariel Elior <Ariel.Elior@cavium.com>
+M: everest-linux-l2@cavium.com
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/qlogic/qed/
+F: include/linux/qed/
+F: drivers/net/ethernet/qlogic/qede/
+
+QLOGIC QL4xxx RDMA DRIVER
+M: Ram Amrani <Ram.Amrani@cavium.com>
+M: Ariel Elior <Ariel.Elior@cavium.com>
+L: linux-rdma@vger.kernel.org
+S: Supported
+F: drivers/infiniband/hw/qedr/
+F: include/uapi/rdma/qedr-abi.h
+
QLOGIC QLA1280 SCSI DRIVER
M: Michael Reed <mdr@sgi.com>
L: linux-scsi@vger.kernel.org
@@ -10816,13 +11000,6 @@ S: Supported
F: Documentation/scsi/LICENSE.qla2xxx
F: drivers/scsi/qla2xxx/
-QLOGIC QLA4XXX iSCSI DRIVER
-M: QLogic-Storage-Upstream@qlogic.com
-L: linux-scsi@vger.kernel.org
-S: Supported
-F: Documentation/scsi/LICENSE.qla4xxx
-F: drivers/scsi/qla4xxx/
-
QLOGIC QLA3XXX NETWORK DRIVER
M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
@@ -10830,6 +11007,13 @@ S: Supported
F: Documentation/networking/LICENSE.qla3xxx
F: drivers/net/ethernet/qlogic/qla3xxx.*
+QLOGIC QLA4XXX iSCSI DRIVER
+M: QLogic-Storage-Upstream@qlogic.com
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: Documentation/scsi/LICENSE.qla4xxx
+F: drivers/scsi/qla4xxx/
+
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
M: Harish Patil <harish.patil@cavium.com>
M: Manish Chopra <manish.chopra@cavium.com>
@@ -10846,28 +11030,6 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlge/
-QLOGIC QL4xxx ETHERNET DRIVER
-M: Yuval Mintz <Yuval.Mintz@cavium.com>
-M: Ariel Elior <Ariel.Elior@cavium.com>
-M: everest-linux-l2@cavium.com
-L: netdev@vger.kernel.org
-S: Supported
-F: drivers/net/ethernet/qlogic/qed/
-F: include/linux/qed/
-F: drivers/net/ethernet/qlogic/qede/
-
-QLOGIC QL41xxx ISCSI DRIVER
-M: QLogic-Storage-Upstream@cavium.com
-L: linux-scsi@vger.kernel.org
-S: Supported
-F: drivers/scsi/qedi/
-
-QLOGIC QL41xxx FCOE DRIVER
-M: QLogic-Storage-Upstream@cavium.com
-L: linux-scsi@vger.kernel.org
-S: Supported
-F: drivers/scsi/qedf/
-
QNX4 FILESYSTEM
M: Anders Larsen <al@alarsen.net>
W: http://www.alarsen.net/linux/qnx4fs/
@@ -10894,13 +11056,6 @@ T: git git://linuxtv.org/anttip/media_tree.git
S: Maintained
F: drivers/media/tuners/qt1010*
-QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
-M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
-L: linux-wireless@vger.kernel.org
-W: http://wireless.kernel.org/en/users/Drivers/ath9k
-S: Supported
-F: drivers/net/wireless/ath/ath9k/
-
QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
M: Kalle Valo <kvalo@qca.qualcomm.com>
L: ath10k@lists.infradead.org
@@ -10909,6 +11064,21 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
S: Supported
F: drivers/net/wireless/ath/ath10k/
+QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
+M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
+L: linux-wireless@vger.kernel.org
+W: http://wireless.kernel.org/en/users/Drivers/ath9k
+S: Supported
+F: drivers/net/wireless/ath/ath9k/
+
+QUALCOMM CAMERA SUBSYSTEM DRIVER
+M: Todor Tomov <todor.tomov@linaro.org>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/media/qcom,camss.txt
+F: Documentation/media/v4l-drivers/qcom_camss.rst
+F: drivers/media/platform/qcom/camss-8x16/
+
QUALCOMM EMAC GIGABIT ETHERNET DRIVER
M: Timur Tabi <timur@codeaurora.org>
L: netdev@vger.kernel.org
@@ -10938,33 +11108,24 @@ T: git git://github.com/KrasnikovEugene/wcn36xx.git
S: Supported
F: drivers/net/wireless/ath/wcn36xx/
-QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT
-M: Gabriel Somlo <somlo@cmu.edu>
-M: "Michael S. Tsirkin" <mst@redhat.com>
-L: qemu-devel@nongnu.org
-S: Maintained
-F: drivers/firmware/qemu_fw_cfg.c
-
QUANTENNA QTNFMAC WIRELESS DRIVER
-M: Igor Mitsyanko <imitsyanko@quantenna.com>
-M: Avinash Patil <avinashp@quantenna.com>
-M: Sergey Matyukevich <smatyukevich@quantenna.com>
-L: linux-wireless@vger.kernel.org
-S: Maintained
-F: drivers/net/wireless/quantenna
+M: Igor Mitsyanko <imitsyanko@quantenna.com>
+M: Avinash Patil <avinashp@quantenna.com>
+M: Sergey Matyukevich <smatyukevich@quantenna.com>
+L: linux-wireless@vger.kernel.org
+S: Maintained
+F: drivers/net/wireless/quantenna
-RADOS BLOCK DEVICE (RBD)
-M: Ilya Dryomov <idryomov@gmail.com>
-M: Sage Weil <sage@redhat.com>
-M: Alex Elder <elder@kernel.org>
-L: ceph-devel@vger.kernel.org
-W: http://ceph.com/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
-T: git git://github.com/ceph/ceph-client.git
+RADEON and AMDGPU DRM DRIVERS
+M: Alex Deucher <alexander.deucher@amd.com>
+M: Christian König <christian.koenig@amd.com>
+L: amd-gfx@lists.freedesktop.org
+T: git git://people.freedesktop.org/~agd5f/linux
S: Supported
-F: Documentation/ABI/testing/sysfs-bus-rbd
-F: drivers/block/rbd.c
-F: drivers/block/rbd_types.h
+F: drivers/gpu/drm/radeon/
+F: include/uapi/drm/radeon_drm.h
+F: drivers/gpu/drm/amd/
+F: include/uapi/drm/amdgpu_drm.h
RADEON FRAMEBUFFER DISPLAY DRIVER
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
@@ -10988,6 +11149,19 @@ S: Maintained
F: drivers/media/radio/radio-shark2.c
F: drivers/media/radio/radio-tea5777.c
+RADOS BLOCK DEVICE (RBD)
+M: Ilya Dryomov <idryomov@gmail.com>
+M: Sage Weil <sage@redhat.com>
+M: Alex Elder <elder@kernel.org>
+L: ceph-devel@vger.kernel.org
+W: http://ceph.com/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
+T: git git://github.com/ceph/ceph-client.git
+S: Supported
+F: Documentation/ABI/testing/sysfs-bus-rbd
+F: drivers/block/rbd.c
+F: drivers/block/rbd_types.h
+
RAGE128 FRAMEBUFFER DISPLAY DRIVER
M: Paul Mackerras <paulus@samba.org>
L: linux-fbdev@vger.kernel.org
@@ -11067,6 +11241,12 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/rdc/r6040.c
+RDMAVT - RDMA verbs software
+M: Dennis Dalessandro <dennis.dalessandro@intel.com>
+L: linux-rdma@vger.kernel.org
+S: Supported
+F: drivers/infiniband/sw/rdmavt
+
RDS - RELIABLE DATAGRAM SOCKETS
M: Santosh Shilimkar <santosh.shilimkar@oracle.com>
L: netdev@vger.kernel.org
@@ -11077,18 +11257,12 @@ S: Supported
F: net/rds/
F: Documentation/networking/rds.txt
-RDMAVT - RDMA verbs software
-M: Dennis Dalessandro <dennis.dalessandro@intel.com>
-L: linux-rdma@vger.kernel.org
-S: Supported
-F: drivers/infiniband/sw/rdmavt
-
RDT - RESOURCE ALLOCATION
M: Fenghua Yu <fenghua.yu@intel.com>
L: linux-kernel@vger.kernel.org
S: Supported
F: arch/x86/kernel/cpu/intel_rdt*
-F: arch/x86/include/asm/intel_rdt*
+F: arch/x86/include/asm/intel_rdt_sched.h
F: Documentation/x86/intel_rdt*
READ-COPY UPDATE (RCU)
@@ -11131,11 +11305,6 @@ S: Maintained
F: sound/soc/codecs/rt*
F: include/sound/rt*.h
-REISERFS FILE SYSTEM
-L: reiserfs-devel@vger.kernel.org
-S: Supported
-F: fs/reiserfs/
-
REGISTER MAP ABSTRACTION
M: Mark Brown <broonie@kernel.org>
L: linux-kernel@vger.kernel.org
@@ -11145,6 +11314,11 @@ F: Documentation/devicetree/bindings/regmap/
F: drivers/base/regmap/
F: include/linux/regmap.h
+REISERFS FILE SYSTEM
+L: reiserfs-devel@vger.kernel.org
+S: Supported
+F: fs/reiserfs/
+
REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
M: Ohad Ben-Cohen <ohad@wizery.com>
M: Bjorn Andersson <bjorn.andersson@linaro.org>
@@ -11220,16 +11394,16 @@ S: Maintained
F: lib/rhashtable.c
F: include/linux/rhashtable.h
-RICOH SMARTMEDIA/XD DRIVER
+RICOH R5C592 MEMORYSTICK DRIVER
M: Maxim Levitsky <maximlevitsky@gmail.com>
S: Maintained
-F: drivers/mtd/nand/r852.c
-F: drivers/mtd/nand/r852.h
+F: drivers/memstick/host/r592.*
-RICOH R5C592 MEMORYSTICK DRIVER
+RICOH SMARTMEDIA/XD DRIVER
M: Maxim Levitsky <maximlevitsky@gmail.com>
S: Maintained
-F: drivers/memstick/host/r592.*
+F: drivers/mtd/nand/r852.c
+F: drivers/mtd/nand/r852.h
ROCCAT DRIVERS
M: Stefan Achatz <erazor_de@users.sourceforge.net>
@@ -11258,6 +11432,17 @@ L: linux-serial@vger.kernel.org
S: Odd Fixes
F: drivers/tty/serial/rp2.*
+ROHM MULTIFUNCTION BD9571MWV-M PMIC DEVICE DRIVERS
+M: Marek Vasut <marek.vasut+renesas@gmail.com>
+L: linux-kernel@vger.kernel.org
+L: linux-renesas-soc@vger.kernel.org
+S: Supported
+F: drivers/mfd/bd9571mwv.c
+F: drivers/regulator/bd9571mwv-regulator.c
+F: drivers/gpio/gpio-bd9571mwv.c
+F: include/linux/mfd/bd9571mwv.h
+F: Documentation/devicetree/bindings/mfd/bd9571mwv.txt
+
ROSE NETWORK LAYER
M: Ralf Baechle <ralf@linux-mips.org>
L: linux-hams@vger.kernel.org
@@ -11366,6 +11551,23 @@ S: Supported
F: drivers/s390/block/dasd*
F: block/partitions/ibm.c
+S390 IOMMU (PCI)
+M: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+L: linux-s390@vger.kernel.org
+W: http://www.ibm.com/developerworks/linux/linux390/
+S: Supported
+F: drivers/iommu/s390-iommu.c
+
+S390 IUCV NETWORK LAYER
+M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
+M: Ursula Braun <ubraun@linux.vnet.ibm.com>
+L: linux-s390@vger.kernel.org
+W: http://www.ibm.com/developerworks/linux/linux390/
+S: Supported
+F: drivers/s390/net/*iucv*
+F: include/net/iucv/
+F: net/iucv/
+
S390 NETWORK DRIVERS
M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
M: Ursula Braun <ubraun@linux.vnet.ibm.com>
@@ -11383,6 +11585,16 @@ S: Supported
F: arch/s390/pci/
F: drivers/pci/hotplug/s390_pci_hpc.c
+S390 VFIO-CCW DRIVER
+M: Cornelia Huck <cohuck@redhat.com>
+M: Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+L: linux-s390@vger.kernel.org
+L: kvm@vger.kernel.org
+S: Supported
+F: drivers/s390/cio/vfio_ccw*
+F: Documentation/s390/vfio-ccw.txt
+F: include/uapi/linux/vfio_ccw.h
+
S390 ZCRYPT DRIVER
M: Harald Freudenberger <freude@de.ibm.com>
L: linux-s390@vger.kernel.org
@@ -11397,33 +11609,6 @@ W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: drivers/s390/scsi/zfcp_*
-S390 IUCV NETWORK LAYER
-M: Julian Wiedmann <jwi@linux.vnet.ibm.com>
-M: Ursula Braun <ubraun@linux.vnet.ibm.com>
-L: linux-s390@vger.kernel.org
-W: http://www.ibm.com/developerworks/linux/linux390/
-S: Supported
-F: drivers/s390/net/*iucv*
-F: include/net/iucv/
-F: net/iucv/
-
-S390 IOMMU (PCI)
-M: Gerald Schaefer <gerald.schaefer@de.ibm.com>
-L: linux-s390@vger.kernel.org
-W: http://www.ibm.com/developerworks/linux/linux390/
-S: Supported
-F: drivers/iommu/s390-iommu.c
-
-S390 VFIO-CCW DRIVER
-M: Cornelia Huck <cohuck@redhat.com>
-M: Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
-L: linux-s390@vger.kernel.org
-L: kvm@vger.kernel.org
-S: Supported
-F: drivers/s390/cio/vfio_ccw*
-F: Documentation/s390/vfio-ccw.txt
-F: include/uapi/linux/vfio_ccw.h
-
S3C24XX SD/MMC Driver
M: Ben Dooks <ben-linux@fluff.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -11457,12 +11642,6 @@ F: drivers/media/common/saa7146/
F: drivers/media/pci/saa7146/
F: include/media/saa7146*
-SAMSUNG LAPTOP DRIVER
-M: Corentin Chary <corentin.chary@gmail.com>
-L: platform-driver-x86@vger.kernel.org
-S: Maintained
-F: drivers/platform/x86/samsung-laptop.c
-
SAMSUNG AUDIO (ASoC) DRIVERS
M: Krzysztof Kozlowski <krzk@kernel.org>
M: Sangbeom Kim <sbkim73@samsung.com>
@@ -11485,6 +11664,12 @@ L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/fbdev/s3c-fb.c
+SAMSUNG LAPTOP DRIVER
+M: Corentin Chary <corentin.chary@gmail.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/samsung-laptop.c
+
SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
M: Sangbeom Kim <sbkim73@samsung.com>
M: Krzysztof Kozlowski <krzk@kernel.org>
@@ -11503,22 +11688,6 @@ F: Documentation/devicetree/bindings/regulator/samsung,s2m*.txt
F: Documentation/devicetree/bindings/regulator/samsung,s5m*.txt
F: Documentation/devicetree/bindings/clock/samsung,s2mps11.txt
-SAMSUNG S5P Security SubSystem (SSS) DRIVER
-M: Krzysztof Kozlowski <krzk@kernel.org>
-M: Vladimir Zapolskiy <vz@mleia.com>
-L: linux-crypto@vger.kernel.org
-L: linux-samsung-soc@vger.kernel.org
-S: Maintained
-F: drivers/crypto/s5p-sss.c
-
-SAMSUNG S5P/EXYNOS4 SOC SERIES CAMERA SUBSYSTEM DRIVERS
-M: Kyungmin Park <kyungmin.park@samsung.com>
-M: Sylwester Nawrocki <s.nawrocki@samsung.com>
-L: linux-media@vger.kernel.org
-Q: https://patchwork.linuxtv.org/project/linux-media/list/
-S: Supported
-F: drivers/media/platform/exynos4-is/
-
SAMSUNG S3C24XX/S3C64XX SOC SERIES CAMIF DRIVER
M: Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
L: linux-media@vger.kernel.org
@@ -11527,6 +11696,13 @@ S: Maintained
F: drivers/media/platform/s3c-camif/
F: include/media/drv-intf/s3c_camif.h
+SAMSUNG S3FWRN5 NFC DRIVER
+M: Robert Baldyga <r.baldyga@samsung.com>
+M: Krzysztof Opasiak <k.opasiak@samsung.com>
+L: linux-nfc@lists.01.org (moderated for non-subscribers)
+S: Supported
+F: drivers/nfc/s3fwrn5
+
SAMSUNG S5C73M3 CAMERA DRIVER
M: Kyungmin Park <kyungmin.park@samsung.com>
M: Andrzej Hajda <a.hajda@samsung.com>
@@ -11541,12 +11717,21 @@ L: linux-media@vger.kernel.org
S: Supported
F: drivers/media/i2c/s5k5baf.c
-SAMSUNG S3FWRN5 NFC DRIVER
-M: Robert Baldyga <r.baldyga@samsung.com>
-M: Krzysztof Opasiak <k.opasiak@samsung.com>
-L: linux-nfc@lists.01.org (moderated for non-subscribers)
+SAMSUNG S5P Security SubSystem (SSS) DRIVER
+M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Vladimir Zapolskiy <vz@mleia.com>
+L: linux-crypto@vger.kernel.org
+L: linux-samsung-soc@vger.kernel.org
+S: Maintained
+F: drivers/crypto/s5p-sss.c
+
+SAMSUNG S5P/EXYNOS4 SOC SERIES CAMERA SUBSYSTEM DRIVERS
+M: Kyungmin Park <kyungmin.park@samsung.com>
+M: Sylwester Nawrocki <s.nawrocki@samsung.com>
+L: linux-media@vger.kernel.org
+Q: https://patchwork.linuxtv.org/project/linux-media/list/
S: Supported
-F: drivers/nfc/s3fwrn5
+F: drivers/media/platform/exynos4-is/
SAMSUNG SOC CLOCK DRIVERS
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
@@ -11599,126 +11784,6 @@ F: drivers/phy/samsung/phy-s5pv210-usb2.c
F: drivers/phy/samsung/phy-samsung-usb2.c
F: drivers/phy/samsung/phy-samsung-usb2.h
-SERIAL DRIVERS
-M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-L: linux-serial@vger.kernel.org
-S: Maintained
-F: Documentation/devicetree/bindings/serial/
-F: drivers/tty/serial/
-
-SERIAL DEVICE BUS
-M: Rob Herring <robh@kernel.org>
-L: linux-serial@vger.kernel.org
-S: Maintained
-F: Documentation/devicetree/bindings/serial/slave-device.txt
-F: drivers/tty/serdev/
-F: include/linux/serdev.h
-
-SERIAL IR RECEIVER
-M: Sean Young <sean@mess.org>
-L: linux-media@vger.kernel.org
-S: Maintained
-F: drivers/media/rc/serial_ir.c
-
-STI CEC DRIVER
-M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
-S: Maintained
-F: drivers/staging/media/st-cec/
-F: Documentation/devicetree/bindings/media/stih-cec.txt
-
-SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
-M: Ursula Braun <ubraun@linux.vnet.ibm.com>
-L: linux-s390@vger.kernel.org
-W: http://www.ibm.com/developerworks/linux/linux390/
-S: Supported
-F: net/smc/
-
-SYNOPSYS DESIGNWARE DMAC DRIVER
-M: Viresh Kumar <vireshk@kernel.org>
-M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-S: Maintained
-F: include/linux/dma/dw.h
-F: include/linux/platform_data/dma-dw.h
-F: drivers/dma/dw/
-
-SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER
-M: Jie Deng <jiedeng@synopsys.com>
-L: netdev@vger.kernel.org
-S: Supported
-F: drivers/net/ethernet/synopsys/
-
-SYNOPSYS DESIGNWARE I2C DRIVER
-M: Jarkko Nikula <jarkko.nikula@linux.intel.com>
-R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-R: Mika Westerberg <mika.westerberg@linux.intel.com>
-L: linux-i2c@vger.kernel.org
-S: Maintained
-F: drivers/i2c/busses/i2c-designware-*
-F: include/linux/platform_data/i2c-designware.h
-
-SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
-M: Jaehoon Chung <jh80.chung@samsung.com>
-L: linux-mmc@vger.kernel.org
-S: Maintained
-F: drivers/mmc/host/dw_mmc*
-
-SYSTEM TRACE MODULE CLASS
-M: Alexander Shishkin <alexander.shishkin@linux.intel.com>
-S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/ash/stm.git
-F: Documentation/trace/stm.txt
-F: drivers/hwtracing/stm/
-F: include/linux/stm.h
-F: include/uapi/linux/stm.h
-
-TEE SUBSYSTEM
-M: Jens Wiklander <jens.wiklander@linaro.org>
-S: Maintained
-F: include/linux/tee_drv.h
-F: include/uapi/linux/tee.h
-F: drivers/tee/
-F: Documentation/tee.txt
-
-THUNDERBOLT DRIVER
-M: Andreas Noever <andreas.noever@gmail.com>
-M: Michael Jamet <michael.jamet@intel.com>
-M: Mika Westerberg <mika.westerberg@linux.intel.com>
-M: Yehezkel Bernat <yehezkel.bernat@intel.com>
-S: Maintained
-F: drivers/thunderbolt/
-
-TI BQ27XXX POWER SUPPLY DRIVER
-R: Andrew F. Davis <afd@ti.com>
-F: include/linux/power/bq27xxx_battery.h
-F: drivers/power/supply/bq27xxx_battery.c
-F: drivers/power/supply/bq27xxx_battery_i2c.c
-
-TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
-M: John Stultz <john.stultz@linaro.org>
-M: Thomas Gleixner <tglx@linutronix.de>
-R: Stephen Boyd <sboyd@codeaurora.org>
-L: linux-kernel@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
-S: Supported
-F: include/linux/clocksource.h
-F: include/linux/time.h
-F: include/linux/timex.h
-F: include/uapi/linux/time.h
-F: include/uapi/linux/timex.h
-F: kernel/time/clocksource.c
-F: kernel/time/time*.c
-F: kernel/time/alarmtimer.c
-F: kernel/time/ntp.c
-F: tools/testing/selftests/timers/
-
-TI TRF7970A NFC DRIVER
-M: Mark Greer <mgreer@animalcreek.com>
-L: linux-wireless@vger.kernel.org
-L: linux-nfc@lists.01.org (moderated for non-subscribers)
-S: Supported
-F: drivers/nfc/trf7970a.c
-F: Documentation/devicetree/bindings/net/nfc/trf7970a.txt
-
SC1200 WDT DRIVER
M: Zwane Mwaikambo <zwanem@gmail.com>
S: Maintained
@@ -11747,16 +11812,6 @@ M: Lubomir Rintel <lkundrak@v3.sk>
S: Supported
F: drivers/char/pcmcia/scr24x_cs.c
-SYSTEM CONTROL & POWER INTERFACE (SCPI) Message Protocol drivers
-M: Sudeep Holla <sudeep.holla@arm.com>
-L: linux-arm-kernel@lists.infradead.org
-S: Maintained
-F: Documentation/devicetree/bindings/arm/arm,scpi.txt
-F: drivers/clk/clk-scpi.c
-F: drivers/cpufreq/scpi-cpufreq.c
-F: drivers/firmware/arm_scpi.c
-F: include/linux/scpi_protocol.h
-
SCSI CDROM DRIVER
M: Jens Axboe <axboe@kernel.dk>
L: linux-scsi@vger.kernel.org
@@ -11841,14 +11896,6 @@ L: sdricohcs-devel@lists.sourceforge.net (subscribers-only)
S: Maintained
F: drivers/mmc/host/sdricoh_cs.c
-SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
-M: Adrian Hunter <adrian.hunter@intel.com>
-L: linux-mmc@vger.kernel.org
-T: git git://git.infradead.org/users/ahunter/linux-sdhci.git
-S: Maintained
-F: drivers/mmc/host/sdhci*
-F: include/linux/mmc/sdhci*
-
SECURE COMPUTING
M: Kees Cook <keescook@chromium.org>
R: Andy Lutomirski <luto@amacapital.net>
@@ -11871,6 +11918,14 @@ L: bcm-kernel-feedback-list@broadcom.com
S: Maintained
F: drivers/mmc/host/sdhci-brcmstb*
+SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
+M: Adrian Hunter <adrian.hunter@intel.com>
+L: linux-mmc@vger.kernel.org
+T: git git://git.infradead.org/users/ahunter/linux-sdhci.git
+S: Maintained
+F: drivers/mmc/host/sdhci*
+F: include/linux/mmc/sdhci*
+
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) SAMSUNG DRIVER
M: Ben Dooks <ben-linux@fluff.org>
M: Jaehoon Chung <jh80.chung@samsung.com>
@@ -11895,6 +11950,10 @@ F: block/opal_proto.h
F: include/linux/sed*
F: include/uapi/linux/sed*
+SECURITY CONTACT
+M: Security Officers <security@kernel.org>
+S: Supported
+
SECURITY SUBSYSTEM
M: James Morris <james.l.morris@oracle.com>
M: "Serge E. Hallyn" <serge@hallyn.com>
@@ -11904,10 +11963,6 @@ W: http://kernsec.org/
S: Supported
F: security/
-SECURITY CONTACT
-M: Security Officers <security@kernel.org>
-S: Supported
-
SELINUX SECURITY MODULE
M: Paul Moore <paul@paul-moore.com>
M: Stephen Smalley <sds@tycho.nsa.gov>
@@ -11921,62 +11976,32 @@ F: security/selinux/
F: scripts/selinux/
F: Documentation/admin-guide/LSM/SELinux.rst
-APPARMOR SECURITY MODULE
-M: John Johansen <john.johansen@canonical.com>
-L: apparmor@lists.ubuntu.com (subscribers-only, general discussion)
-W: apparmor.wiki.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jj/apparmor-dev.git
-S: Supported
-F: security/apparmor/
-F: Documentation/admin-guide/LSM/apparmor.rst
-
-LOADPIN SECURITY MODULE
-M: Kees Cook <keescook@chromium.org>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git lsm/loadpin
-S: Supported
-F: security/loadpin/
-F: Documentation/admin-guide/LSM/LoadPin.rst
-
-YAMA SECURITY MODULE
-M: Kees Cook <keescook@chromium.org>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git yama/tip
-S: Supported
-F: security/yama/
-F: Documentation/admin-guide/LSM/Yama.rst
-
SENSABLE PHANTOM
M: Jiri Slaby <jirislaby@gmail.com>
S: Maintained
F: drivers/misc/phantom.c
F: include/uapi/linux/phantom.h
-Emulex 10Gbps iSCSI - OneConnect DRIVER
-M: Subbu Seetharaman <subbu.seetharaman@broadcom.com>
-M: Ketan Mukadam <ketan.mukadam@broadcom.com>
-M: Jitendra Bhivare <jitendra.bhivare@broadcom.com>
-L: linux-scsi@vger.kernel.org
-W: http://www.broadcom.com
-S: Supported
-F: drivers/scsi/be2iscsi/
+SERIAL DEVICE BUS
+M: Rob Herring <robh@kernel.org>
+L: linux-serial@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/serial/slave-device.txt
+F: drivers/tty/serdev/
+F: include/linux/serdev.h
-Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net)
-M: Sathya Perla <sathya.perla@broadcom.com>
-M: Ajit Khaparde <ajit.khaparde@broadcom.com>
-M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
-M: Somnath Kotur <somnath.kotur@broadcom.com>
-L: netdev@vger.kernel.org
-W: http://www.emulex.com
-S: Supported
-F: drivers/net/ethernet/emulex/benet/
+SERIAL DRIVERS
+M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+L: linux-serial@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/serial/
+F: drivers/tty/serial/
-EMULEX ONECONNECT ROCE DRIVER
-M: Selvin Xavier <selvin.xavier@broadcom.com>
-M: Devesh Sharma <devesh.sharma@broadcom.com>
-L: linux-rdma@vger.kernel.org
-W: http://www.broadcom.com
-S: Odd Fixes
-F: drivers/infiniband/hw/ocrdma/
-F: include/uapi/rdma/ocrdma-abi.h
+SERIAL IR RECEIVER
+M: Sean Young <sean@mess.org>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/rc/serial_ir.c
SFC NETWORK DRIVER
M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
@@ -12005,6 +12030,24 @@ M: Robin Holt <robinmholt@gmail.com>
S: Maintained
F: drivers/misc/sgi-xp/
+SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
+M: Ursula Braun <ubraun@linux.vnet.ibm.com>
+L: linux-s390@vger.kernel.org
+W: http://www.ibm.com/developerworks/linux/linux390/
+S: Supported
+F: net/smc/
+
+SH_VEU V4L2 MEM2MEM DRIVER
+L: linux-media@vger.kernel.org
+S: Orphan
+F: drivers/media/platform/sh_veu.c
+
+SH_VOU V4L2 OUTPUT DRIVER
+L: linux-media@vger.kernel.org
+S: Orphan
+F: drivers/media/platform/sh_vou.c
+F: include/media/drv-intf/sh_vou.h
+
SI2157 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -12087,24 +12130,14 @@ S: Maintained
F: drivers/input/touchscreen/silead.c
F: drivers/platform/x86/silead_dmi.c
-SIMPLEFB FB DRIVER
-M: Hans de Goede <hdegoede@redhat.com>
+SILICON MOTION SM712 FRAME BUFFER DRIVER
+M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+M: Teddy Wang <teddy.wang@siliconmotion.com>
+M: Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
L: linux-fbdev@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/display/simple-framebuffer.txt
-F: drivers/video/fbdev/simplefb.c
-F: include/linux/platform_data/simplefb.h
-
-SH_VEU V4L2 MEM2MEM DRIVER
-L: linux-media@vger.kernel.org
-S: Orphan
-F: drivers/media/platform/sh_veu.c
-
-SH_VOU V4L2 OUTPUT DRIVER
-L: linux-media@vger.kernel.org
-S: Orphan
-F: drivers/media/platform/sh_vou.c
-F: include/media/drv-intf/sh_vou.h
+F: drivers/video/fbdev/sm712*
+F: Documentation/fb/sm712fb.txt
SIMPLE FIRMWARE INTERFACE (SFI)
M: Len Brown <lenb@kernel.org>
@@ -12116,6 +12149,14 @@ F: arch/x86/platform/sfi/
F: drivers/sfi/
F: include/linux/sfi*.h
+SIMPLEFB FB DRIVER
+M: Hans de Goede <hdegoede@redhat.com>
+L: linux-fbdev@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/display/simple-framebuffer.txt
+F: drivers/video/fbdev/simplefb.c
+F: include/linux/platform_data/simplefb.h
+
SIMTEC EB110ATX (Chalice CATS)
P: Ben Dooks
P: Vincent Sanders <vince@simtec.co.uk>
@@ -12140,61 +12181,6 @@ F: lib/siphash.c
F: lib/test_siphash.c
F: include/linux/siphash.h
-TI DAVINCI MACHINE SUPPORT
-M: Sekhar Nori <nsekhar@ti.com>
-M: Kevin Hilman <khilman@kernel.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
-S: Supported
-F: arch/arm/mach-davinci/
-F: drivers/i2c/busses/i2c-davinci.c
-F: arch/arm/boot/dts/da850*
-
-TI DAVINCI SERIES MEDIA DRIVER
-M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
-L: linux-media@vger.kernel.org
-W: https://linuxtv.org
-Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git
-S: Maintained
-F: drivers/media/platform/davinci/
-F: include/media/davinci/
-
-TI DAVINCI SERIES GPIO DRIVER
-M: Keerthy <j-keerthy@ti.com>
-L: linux-gpio@vger.kernel.org
-S: Maintained
-F: Documentation/devicetree/bindings/gpio/gpio-davinci.txt
-F: drivers/gpio/gpio-davinci.c
-
-TI AM437X VPFE DRIVER
-M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
-L: linux-media@vger.kernel.org
-W: https://linuxtv.org
-Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git
-S: Maintained
-F: drivers/media/platform/am437x/
-
-OV2659 OMNIVISION SENSOR DRIVER
-M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
-L: linux-media@vger.kernel.org
-W: https://linuxtv.org
-Q: http://patchwork.linuxtv.org/project/linux-media/list/
-T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git
-S: Maintained
-F: drivers/media/i2c/ov2659.c
-F: include/media/i2c/ov2659.h
-
-SILICON MOTION SM712 FRAME BUFFER DRIVER
-M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
-M: Teddy Wang <teddy.wang@siliconmotion.com>
-M: Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
-L: linux-fbdev@vger.kernel.org
-S: Maintained
-F: drivers/video/fbdev/sm712*
-F: Documentation/fb/sm712fb.txt
-
SIS 190 ETHERNET DRIVER
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
@@ -12255,14 +12241,6 @@ S: Maintained
F: Documentation/admin-guide/LSM/Smack.rst
F: security/smack/
-DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS)
-M: Kevin Hilman <khilman@kernel.org>
-M: Nishanth Menon <nm@ti.com>
-S: Maintained
-F: drivers/power/avs/
-F: include/linux/power/smartreflex.h
-L: linux-pm@vger.kernel.org
-
SMC91x ETHERNET DRIVER
M: Nicolas Pitre <nico@fluxnic.net>
S: Odd Fixes
@@ -12300,6 +12278,12 @@ S: Supported
F: Documentation/hwmon/sch5627
F: drivers/hwmon/sch5627.c
+SMSC UFX6000 and UFX7000 USB to VGA DRIVER
+M: Steve Glendinning <steve.glendinning@shawell.net>
+L: linux-fbdev@vger.kernel.org
+S: Maintained
+F: drivers/video/fbdev/smscufx.c
+
SMSC47B397 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
@@ -12320,12 +12304,6 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/smsc/smsc9420.*
-SMSC UFX6000 and UFX7000 USB to VGA DRIVER
-M: Steve Glendinning <steve.glendinning@shawell.net>
-L: linux-fbdev@vger.kernel.org
-S: Maintained
-F: drivers/video/fbdev/smscufx.c
-
SOC-CAMERA V4L2 SUBSYSTEM
M: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
L: linux-media@vger.kernel.org
@@ -12340,6 +12318,15 @@ M: Chris Boot <bootc@bootc.net>
S: Maintained
F: drivers/leds/leds-net48xx.c
+SOFT-ROCE DRIVER (rxe)
+M: Moni Shoua <monis@mellanox.com>
+L: linux-rdma@vger.kernel.org
+S: Supported
+W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
+Q: http://patchwork.kernel.org/project/linux-rdma/list/
+F: drivers/infiniband/sw/rxe/
+F: include/uapi/rdma/rdma_user_rxe.h
+
SOFTLOGIC 6x10 MPEG CODEC
M: Bluecherry Maintainers <maintainers@bluecherrydvr.com>
M: Anton Sviridenko <anton@corp.bluecherry.net>
@@ -12372,16 +12359,6 @@ S: Maintained
F: drivers/ssb/
F: include/linux/ssb/
-SONY VAIO CONTROL DEVICE DRIVER
-M: Mattia Dongili <malattia@linux.it>
-L: platform-driver-x86@vger.kernel.org
-W: http://www.linux.it/~malattia/wiki/index.php/Sony_drivers
-S: Maintained
-F: Documentation/laptops/sony-laptop.txt
-F: drivers/char/sonypi.c
-F: drivers/platform/x86/sony-laptop.c
-F: include/linux/sony-laptop.h
-
SONY MEMORYSTICK CARD SUPPORT
M: Alex Dubov <oakad@yahoo.com>
W: http://tifmxx.berlios.de/
@@ -12393,6 +12370,16 @@ M: Maxim Levitsky <maximlevitsky@gmail.com>
S: Maintained
F: drivers/memstick/core/ms_block.*
+SONY VAIO CONTROL DEVICE DRIVER
+M: Mattia Dongili <malattia@linux.it>
+L: platform-driver-x86@vger.kernel.org
+W: http://www.linux.it/~malattia/wiki/index.php/Sony_drivers
+S: Maintained
+F: Documentation/laptops/sony-laptop.txt
+F: drivers/char/sonypi.c
+F: drivers/platform/x86/sony-laptop.c
+F: include/linux/sony-laptop.h
+
SOUND
M: Jaroslav Kysela <perex@perex.cz>
M: Takashi Iwai <tiwai@suse.com>
@@ -12418,6 +12405,13 @@ F: include/uapi/sound/compress_*
F: sound/core/compress_offload.c
F: sound/soc/soc-compress.c
+SOUND - DMAENGINE HELPERS
+M: Lars-Peter Clausen <lars@metafoo.de>
+S: Supported
+F: include/sound/dmaengine_pcm.h
+F: sound/core/pcm_dmaengine.c
+F: sound/soc/soc-generic-dmaengine-pcm.c
+
SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
M: Liam Girdwood <lgirdwood@gmail.com>
M: Mark Brown <broonie@kernel.org>
@@ -12430,13 +12424,6 @@ F: Documentation/sound/alsa/soc/
F: sound/soc/
F: include/sound/soc*
-SOUND - DMAENGINE HELPERS
-M: Lars-Peter Clausen <lars@metafoo.de>
-S: Supported
-F: include/sound/dmaengine_pcm.h
-F: sound/core/pcm_dmaengine.c
-F: sound/soc/soc-generic-dmaengine-pcm.c
-
SP2 MEDIA DRIVER
M: Olli Salonen <olli.salonen@iki.fi>
L: linux-media@vger.kernel.org
@@ -12479,21 +12466,21 @@ T: git git://git.kernel.org/pub/scm/devel/sparse/chrisl/sparse.git
S: Maintained
F: include/linux/compiler.h
-SPEAR PLATFORM SUPPORT
+SPEAR CLOCK FRAMEWORK SUPPORT
M: Viresh Kumar <vireshk@kernel.org>
-M: Shiraz Hashim <shiraz.linux.kernel@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
S: Maintained
-F: arch/arm/boot/dts/spear*
-F: arch/arm/mach-spear/
+F: drivers/clk/spear/
-SPEAR CLOCK FRAMEWORK SUPPORT
+SPEAR PLATFORM SUPPORT
M: Viresh Kumar <vireshk@kernel.org>
+M: Shiraz Hashim <shiraz.linux.kernel@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
S: Maintained
-F: drivers/clk/spear/
+F: arch/arm/boot/dts/spear*
+F: arch/arm/mach-spear/
SPI NOR SUBSYSTEM
M: Cyrille Pitchen <cyrille.pitchen@wedev4u.fr>
@@ -12527,6 +12514,15 @@ S: Supported
F: Documentation/networking/spider_net.txt
F: drivers/net/ethernet/toshiba/spider_net*
+SPMI SUBSYSTEM
+R: Stephen Boyd <sboyd@codeaurora.org>
+L: linux-arm-msm@vger.kernel.org
+F: Documentation/devicetree/bindings/spmi/
+F: drivers/spmi/
+F: include/dt-bindings/spmi/spmi.h
+F: include/linux/spmi.h
+F: include/trace/events/spmi.h
+
SPU FILE SYSTEM
M: Jeremy Kerr <jk@ozlabs.org>
L: linuxppc-dev@lists.ozlabs.org
@@ -12555,13 +12551,6 @@ L: stable@vger.kernel.org
S: Supported
F: Documentation/process/stable-kernel-rules.rst
-STAGING SUBSYSTEM
-M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
-L: devel@driverdev.osuosl.org
-S: Supported
-F: drivers/staging/
-
STAGING - COMEDI
M: Ian Abbott <abbotti@mev.co.uk>
M: H Hartley Sweeten <hsweeten@visionengravers.com>
@@ -12651,11 +12640,45 @@ M: Arnaud Patard <arnaud.patard@rtp-net.org>
S: Odd Fixes
F: drivers/staging/xgifb/
+STAGING SUBSYSTEM
+M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
+L: devel@driverdev.osuosl.org
+S: Supported
+F: drivers/staging/
+
STARFIRE/DURALAN NETWORK DRIVER
M: Ion Badulescu <ionut@badula.org>
S: Odd Fixes
F: drivers/net/ethernet/adaptec/starfire*
+STEC S1220 SKD DRIVER
+M: Bart Van Assche <bart.vanassche@wdc.com>
+L: linux-block@vger.kernel.org
+S: Maintained
+F: drivers/block/skd*[ch]
+
+STI CEC DRIVER
+M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+S: Maintained
+F: drivers/staging/media/st-cec/
+F: Documentation/devicetree/bindings/media/stih-cec.txt
+
+STK1160 USB VIDEO CAPTURE DRIVER
+M: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/usb/stk1160/
+
+STMMAC ETHERNET DRIVER
+M: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+M: Alexandre Torgue <alexandre.torgue@st.com>
+L: netdev@vger.kernel.org
+W: http://www.stlinux.com
+S: Supported
+F: drivers/net/ethernet/stmicro/stmmac/
+
SUN3/3X
M: Sam Creasey <sammy@sammy.net>
W: http://sammy.net/sun3/
@@ -12727,6 +12750,20 @@ S: Supported
F: net/switchdev/
F: include/net/switchdev.h
+SYNC FILE FRAMEWORK
+M: Sumit Semwal <sumit.semwal@linaro.org>
+R: Gustavo Padovan <gustavo@padovan.org>
+S: Maintained
+L: linux-media@vger.kernel.org
+L: dri-devel@lists.freedesktop.org
+F: drivers/dma-buf/sync_*
+F: drivers/dma-buf/dma-fence*
+F: drivers/dma-buf/sw_sync.c
+F: include/linux/sync_file.h
+F: include/uapi/linux/sync_file.h
+F: Documentation/sync_file.txt
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
SYNOPSYS ARC ARCHITECTURE
M: Vineet Gupta <vgupta@synopsys.com>
L: linux-snps-arc@lists.infradead.org
@@ -12745,6 +12782,35 @@ F: arch/arc/plat-axs10x
F: arch/arc/boot/dts/ax*
F: Documentation/devicetree/bindings/arc/axs10*
+SYNOPSYS DESIGNWARE DMAC DRIVER
+M: Viresh Kumar <vireshk@kernel.org>
+M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+S: Maintained
+F: include/linux/dma/dw.h
+F: include/linux/platform_data/dma-dw.h
+F: drivers/dma/dw/
+
+SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER
+M: Jie Deng <jiedeng@synopsys.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/synopsys/
+
+SYNOPSYS DESIGNWARE I2C DRIVER
+M: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+R: Mika Westerberg <mika.westerberg@linux.intel.com>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: drivers/i2c/busses/i2c-designware-*
+F: include/linux/platform_data/i2c-designware.h
+
+SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
+M: Jaehoon Chung <jh80.chung@samsung.com>
+L: linux-mmc@vger.kernel.org
+S: Maintained
+F: drivers/mmc/host/dw_mmc*
+
SYSTEM CONFIGURATION (SYSCON)
M: Lee Jones <lee.jones@linaro.org>
M: Arnd Bergmann <arnd@arndb.de>
@@ -12752,6 +12818,16 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd.git
S: Supported
F: drivers/mfd/syscon.c
+SYSTEM CONTROL & POWER INTERFACE (SCPI) Message Protocol drivers
+M: Sudeep Holla <sudeep.holla@arm.com>
+L: linux-arm-kernel@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/arm/arm,scpi.txt
+F: drivers/clk/clk-scpi.c
+F: drivers/cpufreq/scpi-cpufreq.c
+F: drivers/firmware/arm_scpi.c
+F: include/linux/scpi_protocol.h
+
SYSTEM RESET/SHUTDOWN DRIVERS
M: Sebastian Reichel <sre@kernel.org>
L: linux-pm@vger.kernel.org
@@ -12760,6 +12836,15 @@ S: Maintained
F: Documentation/devicetree/bindings/power/reset/
F: drivers/power/reset/
+SYSTEM TRACE MODULE CLASS
+M: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ash/stm.git
+F: Documentation/trace/stm.txt
+F: drivers/hwtracing/stm/
+F: include/linux/stm.h
+F: include/uapi/linux/stm.h
+
SYSV FILESYSTEM
M: Christoph Hellwig <hch@infradead.org>
S: Maintained
@@ -12929,6 +13014,14 @@ L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/rc/ttusbir.c
+TEE SUBSYSTEM
+M: Jens Wiklander <jens.wiklander@linaro.org>
+S: Maintained
+F: include/linux/tee_drv.h
+F: include/uapi/linux/tee.h
+F: drivers/tee/
+F: Documentation/tee.txt
+
TEGRA ARCHITECTURE SUPPORT
M: Thierry Reding <thierry.reding@gmail.com>
M: Jonathan Hunter <jonathanh@nvidia.com>
@@ -13060,6 +13153,28 @@ T: git git://repo.or.cz/linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git
S: Maintained
F: drivers/platform/x86/thinkpad_acpi.c
+THUNDERBOLT DRIVER
+M: Andreas Noever <andreas.noever@gmail.com>
+M: Michael Jamet <michael.jamet@intel.com>
+M: Mika Westerberg <mika.westerberg@linux.intel.com>
+M: Yehezkel Bernat <yehezkel.bernat@intel.com>
+S: Maintained
+F: drivers/thunderbolt/
+
+THUNDERX GPIO DRIVER
+M: David Daney <david.daney@cavium.com>
+S: Maintained
+F: drivers/gpio/gpio-thunderx.c
+
+TI AM437X VPFE DRIVER
+M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
+L: linux-media@vger.kernel.org
+W: https://linuxtv.org
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git
+S: Maintained
+F: drivers/media/platform/am437x/
+
TI BANDGAP AND THERMAL DRIVER
M: Eduardo Valentin <edubezval@gmail.com>
M: Keerthy <j-keerthy@ti.com>
@@ -13068,13 +13183,11 @@ L: linux-omap@vger.kernel.org
S: Maintained
F: drivers/thermal/ti-soc-thermal/
-TI VPE/CAL DRIVERS
-M: Benoit Parrot <bparrot@ti.com>
-L: linux-media@vger.kernel.org
-W: http://linuxtv.org/
-Q: http://patchwork.linuxtv.org/project/linux-media/list/
-S: Maintained
-F: drivers/media/platform/ti-vpe/
+TI BQ27XXX POWER SUPPLY DRIVER
+R: Andrew F. Davis <afd@ti.com>
+F: include/linux/power/bq27xxx_battery.h
+F: drivers/power/supply/bq27xxx_battery.c
+F: drivers/power/supply/bq27xxx_battery_i2c.c
TI CDCE706 CLOCK DRIVER
M: Max Filippov <jcmvbkbc@gmail.com>
@@ -13088,6 +13201,33 @@ S: Maintained
F: drivers/clk/ti/
F: include/linux/clk/ti.h
+TI DAVINCI MACHINE SUPPORT
+M: Sekhar Nori <nsekhar@ti.com>
+M: Kevin Hilman <khilman@kernel.org>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
+S: Supported
+F: arch/arm/mach-davinci/
+F: drivers/i2c/busses/i2c-davinci.c
+F: arch/arm/boot/dts/da850*
+
+TI DAVINCI SERIES GPIO DRIVER
+M: Keerthy <j-keerthy@ti.com>
+L: linux-gpio@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/gpio/gpio-davinci.txt
+F: drivers/gpio/gpio-davinci.c
+
+TI DAVINCI SERIES MEDIA DRIVER
+M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
+L: linux-media@vger.kernel.org
+W: https://linuxtv.org
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git
+S: Maintained
+F: drivers/media/platform/davinci/
+F: include/media/davinci/
+
TI ETHERNET SWITCH DRIVER (CPSW)
R: Grygorii Strashko <grygorii.strashko@ti.com>
L: linux-omap@vger.kernel.org
@@ -13111,7 +13251,6 @@ S: Maintained
F: drivers/soc/ti/*
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
-
TI LM49xxx FAMILY ASoC CODEC DRIVERS
M: M R Swami Reddy <mr.swami.reddy@ti.com>
M: Vishwas A Deshpande <vishwas.a.deshpande@ti.com>
@@ -13156,12 +13295,28 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Odd Fixes
F: sound/soc/codecs/tas571x*
+TI TRF7970A NFC DRIVER
+M: Mark Greer <mgreer@animalcreek.com>
+L: linux-wireless@vger.kernel.org
+L: linux-nfc@lists.01.org (moderated for non-subscribers)
+S: Supported
+F: drivers/nfc/trf7970a.c
+F: Documentation/devicetree/bindings/net/nfc/trf7970a.txt
+
TI TWL4030 SERIES SOC CODEC DRIVER
M: Peter Ujfalusi <peter.ujfalusi@ti.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
F: sound/soc/codecs/twl4030*
+TI VPE/CAL DRIVERS
+M: Benoit Parrot <bparrot@ti.com>
+L: linux-media@vger.kernel.org
+W: http://linuxtv.org/
+Q: http://patchwork.linuxtv.org/project/linux-media/list/
+S: Maintained
+F: drivers/media/platform/ti-vpe/
+
TI WILINK WIRELESS DRIVERS
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/wl12xx
@@ -13171,16 +13326,6 @@ S: Orphan
F: drivers/net/wireless/ti/
F: include/linux/wl12xx.h
-TIPC NETWORK LAYER
-M: Jon Maloy <jon.maloy@ericsson.com>
-M: Ying Xue <ying.xue@windriver.com>
-L: netdev@vger.kernel.org (core kernel code)
-L: tipc-discussion@lists.sourceforge.net (user apps, general discussion)
-W: http://tipc.sourceforge.net/
-S: Maintained
-F: include/uapi/linux/tipc*.h
-F: net/tipc/
-
TILE ARCHITECTURE
M: Chris Metcalf <cmetcalf@mellanox.com>
W: http://www.mellanox.com/repository/solutions/tile-scm/
@@ -13196,6 +13341,34 @@ F: drivers/tty/serial/tilegx.c
F: drivers/usb/host/*-tilegx.c
F: include/linux/usb/tilegx.h
+TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
+M: John Stultz <john.stultz@linaro.org>
+M: Thomas Gleixner <tglx@linutronix.de>
+R: Stephen Boyd <sboyd@codeaurora.org>
+L: linux-kernel@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
+S: Supported
+F: include/linux/clocksource.h
+F: include/linux/time.h
+F: include/linux/timex.h
+F: include/uapi/linux/time.h
+F: include/uapi/linux/timex.h
+F: kernel/time/clocksource.c
+F: kernel/time/time*.c
+F: kernel/time/alarmtimer.c
+F: kernel/time/ntp.c
+F: tools/testing/selftests/timers/
+
+TIPC NETWORK LAYER
+M: Jon Maloy <jon.maloy@ericsson.com>
+M: Ying Xue <ying.xue@windriver.com>
+L: netdev@vger.kernel.org (core kernel code)
+L: tipc-discussion@lists.sourceforge.net (user apps, general discussion)
+W: http://tipc.sourceforge.net/
+S: Maintained
+F: include/uapi/linux/tipc*.h
+F: net/tipc/
+
TLAN NETWORK DRIVER
M: Samuel Chessman <chessman@tux.org>
L: tlan-devel@lists.sourceforge.net (subscribers-only)
@@ -13204,6 +13377,38 @@ S: Maintained
F: Documentation/networking/tlan.txt
F: drivers/net/ethernet/ti/tlan.*
+TM6000 VIDEO4LINUX DRIVER
+M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M: Mauro Carvalho Chehab <mchehab@kernel.org>
+L: linux-media@vger.kernel.org
+W: https://linuxtv.org
+T: git git://linuxtv.org/media_tree.git
+S: Odd fixes
+F: drivers/media/usb/tm6000/
+F: Documentation/media/v4l-drivers/tm6000*
+
+TMIO/SDHI MMC DRIVER
+M: Wolfram Sang <wsa+renesas@sang-engineering.com>
+L: linux-mmc@vger.kernel.org
+S: Supported
+F: drivers/mmc/host/tmio_mmc*
+F: drivers/mmc/host/renesas_sdhi*
+F: include/linux/mfd/tmio.h
+
+TMP401 HARDWARE MONITOR DRIVER
+M: Guenter Roeck <linux@roeck-us.net>
+L: linux-hwmon@vger.kernel.org
+S: Maintained
+F: Documentation/hwmon/tmp401
+F: drivers/hwmon/tmp401.c
+
+TMPFS (SHMEM FILESYSTEM)
+M: Hugh Dickins <hughd@google.com>
+L: linux-mm@kvack.org
+S: Maintained
+F: include/linux/shmem_fs.h
+F: mm/shmem.c
+
TOMOYO SECURITY MODULE
M: Kentaro Takeda <takedakn@nttdata.co.jp>
M: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
@@ -13240,12 +13445,6 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/toshiba_haps.c
-TOSHIBA WMI HOTKEYS DRIVER
-M: Azael Avalos <coproscefalo@gmail.com>
-L: platform-driver-x86@vger.kernel.org
-S: Maintained
-F: drivers/platform/x86/toshiba-wmi.c
-
TOSHIBA SMM DRIVER
M: Jonathan Buzzard <jonathan@buzzard.org.uk>
W: http://www.buzzard.org.uk/toshiba/
@@ -13261,62 +13460,11 @@ S: Maintained
F: drivers/media/i2c/tc358743*
F: include/media/i2c/tc358743.h
-TMIO/SDHI MMC DRIVER
-M: Wolfram Sang <wsa+renesas@sang-engineering.com>
-L: linux-mmc@vger.kernel.org
-S: Supported
-F: drivers/mmc/host/tmio_mmc*
-F: drivers/mmc/host/renesas_sdhi*
-F: include/linux/mfd/tmio.h
-
-TMP401 HARDWARE MONITOR DRIVER
-M: Guenter Roeck <linux@roeck-us.net>
-L: linux-hwmon@vger.kernel.org
-S: Maintained
-F: Documentation/hwmon/tmp401
-F: drivers/hwmon/tmp401.c
-
-TMPFS (SHMEM FILESYSTEM)
-M: Hugh Dickins <hughd@google.com>
-L: linux-mm@kvack.org
-S: Maintained
-F: include/linux/shmem_fs.h
-F: mm/shmem.c
-
-TM6000 VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
-M: Mauro Carvalho Chehab <mchehab@kernel.org>
-L: linux-media@vger.kernel.org
-W: https://linuxtv.org
-T: git git://linuxtv.org/media_tree.git
-S: Odd fixes
-F: drivers/media/usb/tm6000/
-F: Documentation/media/v4l-drivers/tm6000*
-
-TW5864 VIDEO4LINUX DRIVER
-M: Bluecherry Maintainers <maintainers@bluecherrydvr.com>
-M: Anton Sviridenko <anton@corp.bluecherry.net>
-M: Andrey Utkin <andrey.utkin@corp.bluecherry.net>
-M: Andrey Utkin <andrey_utkin@fastmail.com>
-L: linux-media@vger.kernel.org
-S: Supported
-F: drivers/media/pci/tw5864/
-
-TW68 VIDEO4LINUX DRIVER
-M: Hans Verkuil <hverkuil@xs4all.nl>
-L: linux-media@vger.kernel.org
-T: git git://linuxtv.org/media_tree.git
-W: https://linuxtv.org
-S: Odd Fixes
-F: drivers/media/pci/tw68/
-
-TW686X VIDEO4LINUX DRIVER
-M: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
-L: linux-media@vger.kernel.org
-T: git git://linuxtv.org/media_tree.git
-W: http://linuxtv.org
+TOSHIBA WMI HOTKEYS DRIVER
+M: Azael Avalos <coproscefalo@gmail.com>
+L: platform-driver-x86@vger.kernel.org
S: Maintained
-F: drivers/media/pci/tw686x/
+F: drivers/platform/x86/toshiba-wmi.c
TPM DEVICE DRIVER
M: Peter Huewe <peterhuewe@gmx.de>
@@ -13418,6 +13566,31 @@ S: Maintained
F: drivers/tc/
F: include/linux/tc.h
+TW5864 VIDEO4LINUX DRIVER
+M: Bluecherry Maintainers <maintainers@bluecherrydvr.com>
+M: Anton Sviridenko <anton@corp.bluecherry.net>
+M: Andrey Utkin <andrey.utkin@corp.bluecherry.net>
+M: Andrey Utkin <andrey_utkin@fastmail.com>
+L: linux-media@vger.kernel.org
+S: Supported
+F: drivers/media/pci/tw5864/
+
+TW68 VIDEO4LINUX DRIVER
+M: Hans Verkuil <hverkuil@xs4all.nl>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: https://linuxtv.org
+S: Odd Fixes
+F: drivers/media/pci/tw68/
+
+TW686X VIDEO4LINUX DRIVER
+M: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: http://linuxtv.org
+S: Maintained
+F: drivers/media/pci/tw686x/
+
UBI FILE SYSTEM (UBIFS)
M: Richard Weinberger <richard@nod.at>
M: Artem Bityutskiy <dedekind1@gmail.com>
@@ -13467,6 +13640,13 @@ S: Maintained
F: drivers/hid/uhid.c
F: include/uapi/linux/uhid.h
+ULPI BUS
+M: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+L: linux-usb@vger.kernel.org
+S: Maintained
+F: drivers/usb/common/ulpi.c
+F: include/linux/ulpi/
+
ULTRA-WIDEBAND (UWB) SUBSYSTEM:
L: linux-usb@vger.kernel.org
S: Orphan
@@ -13527,6 +13707,14 @@ F: drivers/mtd/ubi/
F: include/linux/mtd/ubi.h
F: include/uapi/mtd/ubi-user.h
+USB "USBNET" DRIVER FRAMEWORK
+M: Oliver Neukum <oneukum@suse.com>
+L: netdev@vger.kernel.org
+W: http://www.linux-usb.org/usbnet
+S: Maintained
+F: drivers/net/usb/usbnet.c
+F: include/linux/usb/usbnet.h
+
USB ACM DRIVER
M: Oliver Neukum <oneukum@suse.com>
L: linux-usb@vger.kernel.org
@@ -13750,14 +13938,6 @@ L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/host/uhci*
-USB "USBNET" DRIVER FRAMEWORK
-M: Oliver Neukum <oneukum@suse.com>
-L: netdev@vger.kernel.org
-W: http://www.linux-usb.org/usbnet
-S: Maintained
-F: drivers/net/usb/usbnet.c
-F: include/linux/usb/usbnet.h
-
USB VIDEO CLASS
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-uvc-devel@lists.sourceforge.net (subscribers-only)
@@ -13812,13 +13992,6 @@ S: Maintained
F: Documentation/media/v4l-drivers/zr364xx*
F: drivers/media/usb/zr364xx/
-ULPI BUS
-M: Heikki Krogerus <heikki.krogerus@linux.intel.com>
-L: linux-usb@vger.kernel.org
-S: Maintained
-F: drivers/usb/common/ulpi.c
-F: include/linux/ulpi/
-
USER-MODE LINUX (UML)
M: Jeff Dike <jdike@addtoit.com>
M: Richard Weinberger <richard@nod.at>
@@ -13912,6 +14085,37 @@ F: drivers/gpu/vga/vga_switcheroo.c
F: include/linux/vga_switcheroo.h
T: git git://anongit.freedesktop.org/drm/drm-misc
+VIA RHINE NETWORK DRIVER
+S: Orphan
+F: drivers/net/ethernet/via/via-rhine.c
+
+VIA SD/MMC CARD CONTROLLER DRIVER
+M: Bruce Chang <brucechang@via.com.tw>
+M: Harald Welte <HaraldWelte@viatech.com>
+S: Maintained
+F: drivers/mmc/host/via-sdmmc.c
+
+VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER
+M: Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+L: linux-fbdev@vger.kernel.org
+S: Maintained
+F: include/linux/via-core.h
+F: include/linux/via-gpio.h
+F: include/linux/via_i2c.h
+F: drivers/video/fbdev/via/
+
+VIA VELOCITY NETWORK DRIVER
+M: Francois Romieu <romieu@fr.zoreil.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/via/via-velocity.*
+
+VIDEO MULTIPLEXER DRIVER
+M: Philipp Zabel <p.zabel@pengutronix.de>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/platform/video-mux.c
+
VIDEOBUF2 FRAMEWORK
M: Pawel Osciak <pawel@osciak.com>
M: Marek Szyprowski <m.szyprowski@samsung.com>
@@ -13921,11 +14125,20 @@ S: Maintained
F: drivers/media/v4l2-core/videobuf2-*
F: include/media/videobuf2-*
-VIDEO MULTIPLEXER DRIVER
-M: Philipp Zabel <p.zabel@pengutronix.de>
+VIMC VIRTUAL MEDIA CONTROLLER DRIVER
+M: Helen Koike <helen.koike@collabora.com>
L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+W: https://linuxtv.org
S: Maintained
-F: drivers/media/platform/video-mux.c
+F: drivers/media/platform/vimc/*
+
+VIRT LIB
+M: Alex Williamson <alex.williamson@redhat.com>
+M: Paolo Bonzini <pbonzini@redhat.com>
+L: kvm@vger.kernel.org
+S: Supported
+F: virt/lib/
VIRTIO AND VHOST VSOCK DRIVER
M: Stefan Hajnoczi <stefanha@redhat.com>
@@ -13943,12 +14156,6 @@ F: drivers/net/vsockmon.c
F: drivers/vhost/vsock.c
F: drivers/vhost/vsock.h
-VIRTUAL SERIO DEVICE DRIVER
-M: Stephen Chandler Paul <thatslyude@gmail.com>
-S: Maintained
-F: drivers/input/serio/userio.c
-F: include/uapi/linux/userio.h
-
VIRTIO CONSOLE DRIVER
M: Amit Shah <amit@kernel.org>
L: virtualization@lists.linux-foundation.org
@@ -13970,6 +14177,15 @@ F: drivers/block/virtio_blk.c
F: include/linux/virtio*.h
F: include/uapi/linux/virtio_*.h
F: drivers/crypto/virtio/
+F: mm/balloon_compaction.c
+
+VIRTIO CRYPTO DRIVER
+M: Gonglei <arei.gonglei@huawei.com>
+L: virtualization@lists.linux-foundation.org
+L: linux-crypto@vger.kernel.org
+S: Maintained
+F: drivers/crypto/virtio/
+F: include/uapi/linux/virtio_crypto.h
VIRTIO DRIVERS FOR S390
M: Cornelia Huck <cohuck@redhat.com>
@@ -14007,45 +14223,11 @@ S: Maintained
F: drivers/virtio/virtio_input.c
F: include/uapi/linux/virtio_input.h
-VIRTIO CRYPTO DRIVER
-M: Gonglei <arei.gonglei@huawei.com>
-L: virtualization@lists.linux-foundation.org
-L: linux-crypto@vger.kernel.org
-S: Maintained
-F: drivers/crypto/virtio/
-F: include/uapi/linux/virtio_crypto.h
-
-VIA RHINE NETWORK DRIVER
-S: Orphan
-F: drivers/net/ethernet/via/via-rhine.c
-
-VIA SD/MMC CARD CONTROLLER DRIVER
-M: Bruce Chang <brucechang@via.com.tw>
-M: Harald Welte <HaraldWelte@viatech.com>
-S: Maintained
-F: drivers/mmc/host/via-sdmmc.c
-
-VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER
-M: Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
-L: linux-fbdev@vger.kernel.org
-S: Maintained
-F: include/linux/via-core.h
-F: include/linux/via-gpio.h
-F: include/linux/via_i2c.h
-F: drivers/video/fbdev/via/
-
-VIA VELOCITY NETWORK DRIVER
-M: Francois Romieu <romieu@fr.zoreil.com>
-L: netdev@vger.kernel.org
+VIRTUAL SERIO DEVICE DRIVER
+M: Stephen Chandler Paul <thatslyude@gmail.com>
S: Maintained
-F: drivers/net/ethernet/via/via-velocity.*
-
-VIRT LIB
-M: Alex Williamson <alex.williamson@redhat.com>
-M: Paolo Bonzini <pbonzini@redhat.com>
-L: kvm@vger.kernel.org
-S: Supported
-F: virt/lib/
+F: drivers/input/serio/userio.c
+F: include/uapi/linux/userio.h
VIVID VIRTUAL VIDEO DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
@@ -14055,14 +14237,6 @@ W: https://linuxtv.org
S: Maintained
F: drivers/media/platform/vivid/*
-VIMC VIRTUAL MEDIA CONTROLLER DRIVER
-M: Helen Koike <helen.koike@collabora.com>
-L: linux-media@vger.kernel.org
-T: git git://linuxtv.org/media_tree.git
-W: https://linuxtv.org
-S: Maintained
-F: drivers/media/platform/vimc/*
-
VLYNQ BUS
M: Florian Fainelli <f.fainelli@gmail.com>
L: openwrt-devel@lists.openwrt.org (subscribers-only)
@@ -14082,12 +14256,6 @@ F: drivers/staging/vme/
F: drivers/vme/
F: include/linux/vme*
-VMWARE HYPERVISOR INTERFACE
-M: Alok Kataria <akataria@vmware.com>
-L: virtualization@lists.linux-foundation.org
-S: Supported
-F: arch/x86/kernel/cpu/vmware.c
-
VMWARE BALLOON DRIVER
M: Xavier Deguillard <xdeguillard@vmware.com>
M: Philip Moltmann <moltmann@vmware.com>
@@ -14096,6 +14264,27 @@ L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/misc/vmw_balloon.c
+VMWARE HYPERVISOR INTERFACE
+M: Alok Kataria <akataria@vmware.com>
+L: virtualization@lists.linux-foundation.org
+S: Supported
+F: arch/x86/kernel/cpu/vmware.c
+
+VMWARE PVRDMA DRIVER
+M: Adit Ranadive <aditr@vmware.com>
+M: VMware PV-Drivers <pv-drivers@vmware.com>
+L: linux-rdma@vger.kernel.org
+S: Maintained
+F: drivers/infiniband/hw/vmw_pvrdma/
+
+VMware PVSCSI driver
+M: Jim Gill <jgill@vmware.com>
+M: VMware PV-Drivers <pv-drivers@vmware.com>
+L: linux-scsi@vger.kernel.org
+S: Maintained
+F: drivers/scsi/vmw_pvscsi.c
+F: drivers/scsi/vmw_pvscsi.h
+
VMWARE VMMOUSE SUBDRIVER
M: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
M: "VMware, Inc." <pv-drivers@vmware.com>
@@ -14111,21 +14300,6 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/vmxnet3/
-VMware PVSCSI driver
-M: Jim Gill <jgill@vmware.com>
-M: VMware PV-Drivers <pv-drivers@vmware.com>
-L: linux-scsi@vger.kernel.org
-S: Maintained
-F: drivers/scsi/vmw_pvscsi.c
-F: drivers/scsi/vmw_pvscsi.h
-
-VMWARE PVRDMA DRIVER
-M: Adit Ranadive <aditr@vmware.com>
-M: VMware PV-Drivers <pv-drivers@vmware.com>
-L: linux-rdma@vger.kernel.org
-S: Maintained
-F: drivers/infiniband/hw/vmw_pvrdma/
-
VOLTAGE AND CURRENT REGULATOR FRAMEWORK
M: Liam Girdwood <lgirdwood@gmail.com>
M: Mark Brown <broonie@kernel.org>
@@ -14218,12 +14392,39 @@ F: drivers/watchdog/
F: include/linux/watchdog.h
F: include/uapi/linux/watchdog.h
+WHISKEYCOVE PMIC GPIO DRIVER
+M: Kuppuswamy Sathyanarayanan <sathyanarayanan.kuppuswamy@linux.intel.com>
+L: linux-gpio@vger.kernel.org
+S: Maintained
+F: drivers/gpio/gpio-wcove.c
+
WIIMOTE HID DRIVER
M: David Herrmann <dh.herrmann@googlemail.com>
L: linux-input@vger.kernel.org
S: Maintained
F: drivers/hid/hid-wiimote*
+WILOCITY WIL6210 WIRELESS DRIVER
+M: Maya Erez <qca_merez@qca.qualcomm.com>
+L: linux-wireless@vger.kernel.org
+L: wil6210@qca.qualcomm.com
+S: Supported
+W: http://wireless.kernel.org/en/users/Drivers/wil6210
+F: drivers/net/wireless/ath/wil6210/
+F: include/uapi/linux/wil6210_uapi.h
+
+WIMAX STACK
+M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+M: linux-wimax@intel.com
+L: wimax@linuxwimax.org (subscribers-only)
+S: Supported
+W: http://linuxwimax.org
+F: Documentation/wimax/README.wimax
+F: include/linux/wimax/debug.h
+F: include/net/wimax.h
+F: include/uapi/linux/wimax.h
+F: net/wimax/
+
WINBOND CIR DRIVER
M: David Härdeman <david@hardeman.nu>
S: Maintained
@@ -14241,18 +14442,6 @@ L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-ws16c48.c
-WIMAX STACK
-M: Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
-M: linux-wimax@intel.com
-L: wimax@linuxwimax.org (subscribers-only)
-S: Supported
-W: http://linuxwimax.org
-F: Documentation/wimax/README.wimax
-F: include/linux/wimax/debug.h
-F: include/net/wimax.h
-F: include/uapi/linux/wimax.h
-F: net/wimax/
-
WISTRON LAPTOP BUTTON DRIVER
M: Miloslav Trmac <mitr@volny.cz>
S: Maintained
@@ -14337,15 +14526,6 @@ S: Maintained
F: Documentation/x86/
F: arch/x86/
-X86 PLATFORM DRIVERS
-M: Darren Hart <dvhart@infradead.org>
-M: Andy Shevchenko <andy@infradead.org>
-L: platform-driver-x86@vger.kernel.org
-T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
-S: Maintained
-F: drivers/platform/x86/
-F: drivers/platform/olpc/
-
X86 MCE INFRASTRUCTURE
M: Tony Luck <tony.luck@intel.com>
M: Borislav Petkov <bp@alien8.de>
@@ -14358,6 +14538,15 @@ M: Borislav Petkov <bp@alien8.de>
S: Maintained
F: arch/x86/kernel/cpu/microcode/*
+X86 PLATFORM DRIVERS
+M: Darren Hart <dvhart@infradead.org>
+M: Andy Shevchenko <andy@infradead.org>
+L: platform-driver-x86@vger.kernel.org
+T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
+S: Maintained
+F: drivers/platform/x86/
+F: drivers/platform/olpc/
+
X86 VDSO
M: Andy Lutomirski <luto@amacapital.net>
L: linux-kernel@vger.kernel.org
@@ -14374,20 +14563,13 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/tuners/tuner-xc2028.*
-XEN HYPERVISOR INTERFACE
-M: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-M: Juergen Gross <jgross@suse.com>
+XEN BLOCK SUBSYSTEM
+M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M: Roger Pau Monné <roger.pau@citrix.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
S: Supported
-F: arch/x86/xen/
-F: drivers/*/xen-*front.c
-F: drivers/xen/
-F: arch/x86/include/asm/xen/
-F: include/xen/
-F: include/uapi/xen/
-F: Documentation/ABI/stable/sysfs-hypervisor-xen
-F: Documentation/ABI/testing/sysfs-hypervisor-xen
+F: drivers/block/xen-blkback/*
+F: drivers/block/xen*
XEN HYPERVISOR ARM
M: Stefano Stabellini <sstabellini@kernel.org>
@@ -14403,6 +14585,21 @@ S: Maintained
F: arch/arm64/xen/
F: arch/arm64/include/asm/xen/
+XEN HYPERVISOR INTERFACE
+M: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+M: Juergen Gross <jgross@suse.com>
+L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
+S: Supported
+F: arch/x86/xen/
+F: drivers/*/xen-*front.c
+F: drivers/xen/
+F: arch/x86/include/asm/xen/
+F: include/xen/
+F: include/uapi/xen/
+F: Documentation/ABI/stable/sysfs-hypervisor-xen
+F: Documentation/ABI/testing/sysfs-hypervisor-xen
+
XEN NETWORK BACKEND DRIVER
M: Wei Liu <wei.liu2@citrix.com>
M: Paul Durrant <paul.durrant@citrix.com>
@@ -14418,14 +14615,6 @@ S: Supported
F: arch/x86/pci/*xen*
F: drivers/pci/*xen*
-XEN BLOCK SUBSYSTEM
-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-M: Roger Pau Monné <roger.pau@citrix.com>
-L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
-S: Supported
-F: drivers/block/xen-blkback/*
-F: drivers/block/xen*
-
XEN PVSCSI DRIVERS
M: Juergen Gross <jgross@suse.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
@@ -14502,6 +14691,13 @@ S: Maintained
F: drivers/net/hamradio/yam*
F: include/linux/yam.h
+YAMA SECURITY MODULE
+M: Kees Cook <keescook@chromium.org>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git yama/tip
+S: Supported
+F: security/yama/
+F: Documentation/admin-guide/LSM/Yama.rst
+
YEALINK PHONE DRIVER
M: Henk Vergonet <Henk.Vergonet@gmail.com>
L: usbb2k-api-dev@nongnu.org
@@ -14536,23 +14732,23 @@ L: zd1211-devs@lists.sourceforge.net (subscribers-only)
S: Maintained
F: drivers/net/wireless/zydas/zd1211rw/
-ZD1301_DEMOD MEDIA DRIVER
+ZD1301 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
W: https://linuxtv.org/
W: http://palosaari.fi/linux/
Q: https://patchwork.linuxtv.org/project/linux-media/list/
S: Maintained
-F: drivers/media/dvb-frontends/zd1301_demod*
+F: drivers/media/usb/dvb-usb-v2/zd1301*
-ZD1301 MEDIA DRIVER
+ZD1301_DEMOD MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
W: https://linuxtv.org/
W: http://palosaari.fi/linux/
Q: https://patchwork.linuxtv.org/project/linux-media/list/
S: Maintained
-F: drivers/media/usb/dvb-usb-v2/zd1301*
+F: drivers/media/dvb-frontends/zd1301_demod*
ZPOOL COMPRESSED PAGE STORAGE API
M: Dan Streetman <ddstreet@ieee.org>
diff --git a/Makefile b/Makefile
index b4fb9a1d1594..ab067d51ddf1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 13
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION =
NAME = Fearless Coyote
# *DOCUMENTATION*
@@ -396,7 +396,7 @@ LINUXINCLUDE := \
KBUILD_CPPFLAGS := -D__KERNEL__
KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
- -fno-strict-aliasing -fno-common \
+ -fno-strict-aliasing -fno-common -fshort-wchar \
-Werror-implicit-function-declaration \
-Wno-format-security \
-std=gnu89 $(call cc-option,-fno-PIE)
@@ -442,7 +442,7 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
# ===========================================================================
# Rules shared between *config targets and build targets
-# Basic helpers built in scripts/
+# Basic helpers built in scripts/basic/
PHONY += scripts_basic
scripts_basic:
$(Q)$(MAKE) $(build)=scripts/basic
@@ -505,7 +505,7 @@ ifeq ($(KBUILD_EXTMOD),)
endif
endif
endif
-# install and module_install need also be processed one by one
+# install and modules_install need also be processed one by one
ifneq ($(filter install,$(MAKECMDGOALS)),)
ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
mixed-targets := 1
@@ -964,7 +964,7 @@ export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y2) $(drivers-y) $(net-y) $(virt-
export KBUILD_VMLINUX_LIBS := $(libs-y1)
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
export LDFLAGS_vmlinux
-# used by scripts/pacmage/Makefile
+# used by scripts/package/Makefile
export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools)
vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN) $(KBUILD_VMLINUX_LIBS)
@@ -992,8 +992,8 @@ include/generated/autoksyms.h: FORCE
ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
# Final link of vmlinux with optional arch pass after final link
- cmd_link-vmlinux = \
- $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \
+cmd_link-vmlinux = \
+ $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \
$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
vmlinux: scripts/link-vmlinux.sh vmlinux_prereq $(vmlinux-deps) FORCE
@@ -1184,6 +1184,7 @@ PHONY += kselftest
kselftest:
$(Q)$(MAKE) -C tools/testing/selftests run_tests
+PHONY += kselftest-clean
kselftest-clean:
$(Q)$(MAKE) -C tools/testing/selftests clean
@@ -1467,7 +1468,7 @@ $(help-board-dirs): help-%:
# Documentation targets
# ---------------------------------------------------------------------------
-DOC_TARGETS := xmldocs sgmldocs psdocs latexdocs pdfdocs htmldocs mandocs installmandocs epubdocs cleandocs linkcheckdocs
+DOC_TARGETS := xmldocs latexdocs pdfdocs htmldocs epubdocs cleandocs linkcheckdocs
PHONY += $(DOC_TARGETS)
$(DOC_TARGETS): scripts_basic FORCE
$(Q)$(MAKE) $(build)=Documentation $@
diff --git a/arch/Kconfig b/arch/Kconfig
index 21d0089117fe..2520ca5b42eb 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -931,6 +931,18 @@ config STRICT_MODULE_RWX
config ARCH_WANT_RELAX_ORDER
bool
+config ARCH_HAS_REFCOUNT
+ bool
+ help
+ An architecture selects this when it has implemented refcount_t
+ using open coded assembly primitives that provide an optimized
+ refcount_t implementation, possibly at the expense of some full
+ refcount state checks of CONFIG_REFCOUNT_FULL=y.
+
+ The refcount overflow check behavior, however, must be retained.
+ Catching overflows is the primary security concern for protecting
+ against bugs in reference counts.
+
config REFCOUNT_FULL
bool "Perform full reference count validation at the expense of speed"
help
diff --git a/arch/alpha/defconfig b/arch/alpha/defconfig
index 539e8b5a6cbd..f4ec420d7f2d 100644
--- a/arch/alpha/defconfig
+++ b/arch/alpha/defconfig
@@ -19,7 +19,6 @@ CONFIG_INET_AH=m
CONFIG_INET_ESP=m
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
CONFIG_VLAN_8021Q=m
@@ -57,7 +56,6 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_RTC=y
CONFIG_EXT2_FS=y
CONFIG_REISERFS_FS=m
-CONFIG_AUTOFS_FS=m
CONFIG_ISO9660_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index d103db5af5ff..5b974ab8425c 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -3,6 +3,7 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += export.h
+generic-y += fb.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
diff --git a/arch/alpha/include/asm/asm-prototypes.h b/arch/alpha/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..d12c68ea340b
--- /dev/null
+++ b/arch/alpha/include/asm/asm-prototypes.h
@@ -0,0 +1,18 @@
+#include <linux/spinlock.h>
+
+#include <asm/checksum.h>
+#include <asm/console.h>
+#include <asm/page.h>
+#include <asm/string.h>
+#include <asm/uaccess.h>
+
+#include <asm-generic/asm-prototypes.h>
+
+extern void __divl(void);
+extern void __reml(void);
+extern void __divq(void);
+extern void __remq(void);
+extern void __divlu(void);
+extern void __remlu(void);
+extern void __divqu(void);
+extern void __remqu(void);
diff --git a/arch/alpha/include/asm/core_marvel.h b/arch/alpha/include/asm/core_marvel.h
index dad300fa14ce..8dcf9dbda618 100644
--- a/arch/alpha/include/asm/core_marvel.h
+++ b/arch/alpha/include/asm/core_marvel.h
@@ -312,7 +312,7 @@ struct io7 {
io7_port7_csrs *csrs;
struct io7_port ports[IO7_NUM_PORTS];
- spinlock_t irq_lock;
+ raw_spinlock_t irq_lock;
};
#ifndef __EXTERN_INLINE
diff --git a/arch/alpha/include/asm/fb.h b/arch/alpha/include/asm/fb.h
deleted file mode 100644
index fa9bbb96b2b3..000000000000
--- a/arch/alpha/include/asm/fb.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-#include <linux/device.h>
-
-/* Caching is off in the I/O space quadrant by design. */
-#define fb_pgprotect(...) do {} while (0)
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
- return 0;
-}
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h
index fb01dfb760c2..05a70edd57b6 100644
--- a/arch/alpha/include/asm/futex.h
+++ b/arch/alpha/include/asm/futex.h
@@ -25,18 +25,10 @@
: "r" (uaddr), "r"(oparg) \
: "memory")
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -62,17 +54,9 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index ff4049155c84..4d61d2a50c52 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -299,6 +299,7 @@ static inline void __iomem * ioremap_nocache(unsigned long offset,
return ioremap(offset, size);
}
+#define ioremap_wc ioremap_nocache
#define ioremap_uc ioremap_nocache
static inline void iounmap(volatile void __iomem *addr)
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index a40b9fc0c6c3..718ac0b64adf 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -16,11 +16,6 @@
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x) ((x)->lock != 0)
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->lock, !VAL);
-}
-
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.lock == 0;
diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h
index 4cb4b6d3452c..0bc66e1d3a7e 100644
--- a/arch/alpha/include/asm/types.h
+++ b/arch/alpha/include/asm/types.h
@@ -1,6 +1,6 @@
#ifndef _ALPHA_TYPES_H
#define _ALPHA_TYPES_H
-#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
#endif /* _ALPHA_TYPES_H */
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index b37153ecf2ac..db7fc0f511e2 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -3,7 +3,7 @@
#include <uapi/asm/unistd.h>
-#define NR_SYSCALLS 514
+#define NR_SYSCALLS 523
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
diff --git a/arch/alpha/include/uapi/asm/ioctls.h b/arch/alpha/include/uapi/asm/ioctls.h
index ff67b8373bf7..1cd7dc7d4870 100644
--- a/arch/alpha/include/uapi/asm/ioctls.h
+++ b/arch/alpha/include/uapi/asm/ioctls.h
@@ -100,7 +100,7 @@
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
-#define TIOCGPTPEER _IOR('T', 0x41, int) /* Safely open the slave */
+#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
#define TIOCSERCONFIG 0x5453
#define TIOCSERGWILD 0x5454
diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h
index 02760f6e6ca4..3b26cc62dadb 100644
--- a/arch/alpha/include/uapi/asm/mman.h
+++ b/arch/alpha/include/uapi/asm/mman.h
@@ -64,20 +64,12 @@
overrides the coredump filter bits */
#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
+#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+
/* compatibility flags */
#define MAP_FILE 0
-/*
- * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
- * This gives us 6 bits, which is enough until someone invents 128 bit address
- * spaces.
- *
- * Assume these are all power of twos.
- * When 0 use the default page size.
- */
-#define MAP_HUGE_SHIFT 26
-#define MAP_HUGE_MASK 0x3f
-
#define PKEY_DISABLE_ACCESS 0x1
#define PKEY_DISABLE_WRITE 0x2
#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 7b285dd4fe05..c6133a045352 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -109,4 +109,6 @@
#define SO_PEERGROUPS 59
+#define SO_ZEROCOPY 60
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/alpha/include/uapi/asm/types.h b/arch/alpha/include/uapi/asm/types.h
index 9fd3cd459777..8d1024d7be05 100644
--- a/arch/alpha/include/uapi/asm/types.h
+++ b/arch/alpha/include/uapi/asm/types.h
@@ -9,8 +9,18 @@
* need to be careful to avoid a name clashes.
*/
-#ifndef __KERNEL__
+/*
+ * This is here because we used to use l64 for alpha
+ * and we don't want to impact user mode with our change to ll64
+ * in the kernel.
+ *
+ * However, some user programs are fine with this. They can
+ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
+ */
+#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__)
#include <asm-generic/int-l64.h>
+#else
+#include <asm-generic/int-ll64.h>
#endif
#endif /* _UAPI_ALPHA_TYPES_H */
diff --git a/arch/alpha/include/uapi/asm/unistd.h b/arch/alpha/include/uapi/asm/unistd.h
index aa33bf5aacb6..53de540e39a7 100644
--- a/arch/alpha/include/uapi/asm/unistd.h
+++ b/arch/alpha/include/uapi/asm/unistd.h
@@ -366,11 +366,6 @@
#define __NR_epoll_create 407
#define __NR_epoll_ctl 408
#define __NR_epoll_wait 409
-/* Feb 2007: These three sys_epoll defines shouldn't be here but culling
- * them would break userspace apps ... we'll kill them off in 2010 :) */
-#define __NR_sys_epoll_create __NR_epoll_create
-#define __NR_sys_epoll_ctl __NR_epoll_ctl
-#define __NR_sys_epoll_wait __NR_epoll_wait
#define __NR_remap_file_pages 410
#define __NR_set_tid_address 411
#define __NR_restart_syscall 412
@@ -475,5 +470,19 @@
#define __NR_getrandom 511
#define __NR_memfd_create 512
#define __NR_execveat 513
+#define __NR_seccomp 514
+#define __NR_bpf 515
+#define __NR_userfaultfd 516
+#define __NR_membarrier 517
+#define __NR_mlock2 518
+#define __NR_copy_file_range 519
+#define __NR_preadv2 520
+#define __NR_pwritev2 521
+#define __NR_statx 522
+
+/* Alpha doesn't have protection keys. */
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
#endif /* _UAPI_ALPHA_UNISTD_H */
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index d5f0580746a5..b10c316475dd 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -118,7 +118,7 @@ alloc_io7(unsigned int pe)
io7 = alloc_bootmem(sizeof(*io7));
io7->pe = pe;
- spin_lock_init(&io7->irq_lock);
+ raw_spin_lock_init(&io7->irq_lock);
for (h = 0; h < 4; h++) {
io7->ports[h].io7 = io7;
@@ -351,7 +351,7 @@ marvel_init_io7(struct io7 *io7)
}
}
-void
+void __init
marvel_io7_present(gct6_node *node)
{
int pe;
@@ -369,6 +369,7 @@ marvel_io7_present(gct6_node *node)
static void __init
marvel_find_console_vga_hose(void)
{
+#ifdef CONFIG_VGA_HOSE
u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset);
if (pu64[7] == 3) { /* TERM_TYPE == graphics */
@@ -402,9 +403,10 @@ marvel_find_console_vga_hose(void)
pci_vga_hose = hose;
}
}
+#endif
}
-gct6_search_struct gct_wanted_node_list[] = {
+gct6_search_struct gct_wanted_node_list[] __initdata = {
{ GCT_TYPE_HOSE, GCT_SUBTYPE_IO_PORT_MODULE, marvel_io7_present },
{ 0, 0, NULL }
};
diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c
index 219bf271c0ba..b532d925443d 100644
--- a/arch/alpha/kernel/core_titan.c
+++ b/arch/alpha/kernel/core_titan.c
@@ -461,6 +461,7 @@ titan_ioremap(unsigned long addr, unsigned long size)
unsigned long *ptes;
unsigned long pfn;
+#ifdef CONFIG_VGA_HOSE
/*
* Adjust the address and hose, if necessary.
*/
@@ -468,6 +469,7 @@ titan_ioremap(unsigned long addr, unsigned long size)
h = pci_vga_hose->index;
addr += pci_vga_hose->mem_space->start;
}
+#endif
/*
* Find the hose.
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
index 936bc8f89a67..47632fa8c24e 100644
--- a/arch/alpha/kernel/module.c
+++ b/arch/alpha/kernel/module.c
@@ -181,6 +181,9 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
switch (r_type) {
case R_ALPHA_NONE:
break;
+ case R_ALPHA_REFLONG:
+ *(u32 *)location = value;
+ break;
case R_ALPHA_REFQUAD:
/* BUG() can produce misaligned relocations. */
((u32 *)location)[0] = value;
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
index ffbdb3fb672f..676bab6e3123 100644
--- a/arch/alpha/kernel/pci-noop.c
+++ b/arch/alpha/kernel/pci-noop.c
@@ -42,11 +42,7 @@ alloc_pci_controller(void)
struct resource * __init
alloc_resource(void)
{
- struct resource *res;
-
- res = alloc_bootmem(sizeof(*res));
-
- return res;
+ return alloc_bootmem(sizeof(struct resource));
}
asmlinkage long
diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c
index 92c0d460815b..cbecd527c696 100644
--- a/arch/alpha/kernel/pci-sysfs.c
+++ b/arch/alpha/kernel/pci-sysfs.c
@@ -38,7 +38,7 @@ static int __pci_mmap_fits(struct pci_dev *pdev, int num,
unsigned long nr, start, size;
int shift = sparse ? 5 : 0;
- nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ nr = vma_pages(vma);
start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, num) - 1) >> (PAGE_SHIFT - shift)) + 1;
@@ -64,8 +64,7 @@ static int pci_mmap_resource(struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma, int sparse)
{
- struct pci_dev *pdev = to_pci_dev(container_of(kobj,
- struct device, kobj));
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
struct resource *res = attr->private;
enum pci_mmap_state mmap_type;
struct pci_bus_region bar;
@@ -255,7 +254,7 @@ static int __legacy_mmap_fits(struct pci_controller *hose,
{
unsigned long nr, start, size;
- nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ nr = vma_pages(vma);
start = vma->vm_pgoff;
size = ((res_size - 1) >> PAGE_SHIFT) + 1;
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 5f387ee5b5c5..8322df174bbf 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -379,11 +379,7 @@ alloc_pci_controller(void)
struct resource * __init
alloc_resource(void)
{
- struct resource *res;
-
- res = alloc_bootmem(sizeof(*res));
-
- return res;
+ return alloc_bootmem(sizeof(struct resource));
}
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 491e6a604e82..249229ab4942 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -1094,8 +1094,9 @@ get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
default: /* default to variation "0" for now */
break;
case ST_DEC_EB164:
- if (member < ARRAY_SIZE(eb164_indices))
- *variation_name = eb164_names[eb164_indices[member]];
+ if (member >= ARRAY_SIZE(eb164_indices))
+ break;
+ *variation_name = eb164_names[eb164_indices[member]];
/* PC164 may show as EB164 variation, but with EV56 CPU,
so, since no true EB164 had anything but EV5... */
if (eb164_indices[member] == 0 && cpu == EV56_CPU)
diff --git a/arch/alpha/kernel/smc37c669.c b/arch/alpha/kernel/smc37c669.c
index c803fc76ae4f..4dbd4e415041 100644
--- a/arch/alpha/kernel/smc37c669.c
+++ b/arch/alpha/kernel/smc37c669.c
@@ -2007,11 +2007,8 @@ static void __init SMC37c669_config_mode(
static unsigned char __init SMC37c669_read_config(
unsigned char index )
{
- unsigned char data;
-
- wb( &SMC37c669->index_port, index );
- data = rb( &SMC37c669->data_port );
- return data;
+ wb(&SMC37c669->index_port, index);
+ return rb(&SMC37c669->data_port);
}
/*
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 9fc560459ebd..f6726a746427 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -115,7 +115,7 @@ wait_boot_cpu_to_stop(int cpuid)
/*
* Where secondaries begin a life of C.
*/
-void
+void __init
smp_callin(void)
{
int cpuid = hard_smp_processor_id();
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 24e41bd7d3c9..3e533920371f 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -115,11 +115,11 @@ io7_enable_irq(struct irq_data *d)
return;
}
- spin_lock(&io7->irq_lock);
+ raw_spin_lock(&io7->irq_lock);
*ctl |= 1UL << 24;
mb();
*ctl;
- spin_unlock(&io7->irq_lock);
+ raw_spin_unlock(&io7->irq_lock);
}
static void
@@ -136,11 +136,11 @@ io7_disable_irq(struct irq_data *d)
return;
}
- spin_lock(&io7->irq_lock);
+ raw_spin_lock(&io7->irq_lock);
*ctl &= ~(1UL << 24);
mb();
*ctl;
- spin_unlock(&io7->irq_lock);
+ raw_spin_unlock(&io7->irq_lock);
}
static void
@@ -263,7 +263,7 @@ init_io7_irqs(struct io7 *io7,
*/
printk(" Interrupts reported to CPU at PE %u\n", boot_cpuid);
- spin_lock(&io7->irq_lock);
+ raw_spin_lock(&io7->irq_lock);
/* set up the error irqs */
io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, boot_cpuid);
@@ -295,7 +295,7 @@ init_io7_irqs(struct io7 *io7,
for (i = 0; i < 16; ++i)
init_one_io7_msi(io7, i, boot_cpuid);
- spin_unlock(&io7->irq_lock);
+ raw_spin_unlock(&io7->irq_lock);
}
static void __init
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 9b62e3fd4f03..5b4514abb234 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -532,6 +532,15 @@ sys_call_table:
.quad sys_getrandom
.quad sys_memfd_create
.quad sys_execveat
+ .quad sys_seccomp
+ .quad sys_bpf /* 515 */
+ .quad sys_userfaultfd
+ .quad sys_membarrier
+ .quad sys_mlock2
+ .quad sys_copy_file_range
+ .quad sys_preadv2 /* 520 */
+ .quad sys_pwritev2
+ .quad sys_statx
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 65bb102d985b..ddb89a18cf26 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -193,8 +193,10 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
static long dummy_emul(void) { return 0; }
long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
= (void *)dummy_emul;
+EXPORT_SYMBOL_GPL(alpha_fp_emul_imprecise);
long (*alpha_fp_emul) (unsigned long pc)
= (void *)dummy_emul;
+EXPORT_SYMBOL_GPL(alpha_fp_emul);
#else
long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
long alpha_fp_emul (unsigned long pc);
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index 7083434dd241..a80815960364 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -20,12 +20,8 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
checksum.o \
csum_partial_copy.o \
$(ev67-y)strlen.o \
- $(ev67-y)strcat.o \
- strcpy.o \
- $(ev67-y)strncat.o \
- strncpy.o \
- $(ev6-y)stxcpy.o \
- $(ev6-y)stxncpy.o \
+ stycpy.o \
+ styncpy.o \
$(ev67-y)strchr.o \
$(ev67-y)strrchr.o \
$(ev6-y)memchr.o \
@@ -49,3 +45,17 @@ AFLAGS___remlu.o = -DREM -DINTSIZE
$(addprefix $(obj)/,__divqu.o __remqu.o __divlu.o __remlu.o): \
$(src)/$(ev6-y)divide.S FORCE
$(call if_changed_rule,as_o_S)
+
+# There are direct branches between {str*cpy,str*cat} and stx*cpy.
+# Ensure the branches are within range by merging these objects.
+
+LDFLAGS_stycpy.o := -r
+LDFLAGS_styncpy.o := -r
+
+$(obj)/stycpy.o: $(obj)/strcpy.o $(obj)/$(ev67-y)strcat.o \
+ $(obj)/$(ev6-y)stxcpy.o FORCE
+ $(call if_changed,ld)
+
+$(obj)/styncpy.o: $(obj)/strncpy.o $(obj)/$(ev67-y)strncat.o \
+ $(obj)/$(ev6-y)stxncpy.o FORCE
+ $(call if_changed,ld)
diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S
index 159f1b7e6e49..c277a1a4383e 100644
--- a/arch/alpha/lib/copy_user.S
+++ b/arch/alpha/lib/copy_user.S
@@ -34,7 +34,7 @@
.ent __copy_user
__copy_user:
.prologue 0
- and $18,$18,$0
+ mov $18,$0
and $16,7,$3
beq $0,$35
beq $3,$36
diff --git a/arch/alpha/lib/ev6-copy_user.S b/arch/alpha/lib/ev6-copy_user.S
index 35e6710d0700..954ca03ebebe 100644
--- a/arch/alpha/lib/ev6-copy_user.S
+++ b/arch/alpha/lib/ev6-copy_user.S
@@ -45,9 +45,10 @@
# Pipeline info: Slotting & Comments
__copy_user:
.prologue 0
- andq $18, $18, $0
- subq $18, 32, $1 # .. E .. .. : Is this going to be a small copy?
- beq $0, $zerolength # U .. .. .. : U L U L
+ mov $18, $0 # .. .. .. E
+ subq $18, 32, $1 # .. .. E. .. : Is this going to be a small copy?
+ nop # .. E .. ..
+ beq $18, $zerolength # U .. .. .. : U L U L
and $16,7,$3 # .. .. .. E : is leading dest misalignment
ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data
diff --git a/arch/alpha/math-emu/math.c b/arch/alpha/math-emu/math.c
index d17d705f6545..1c2d456da7f2 100644
--- a/arch/alpha/math-emu/math.c
+++ b/arch/alpha/math-emu/math.c
@@ -53,6 +53,7 @@ extern void alpha_write_fp_reg_s (unsigned long reg, unsigned long val);
#ifdef MODULE
MODULE_DESCRIPTION("FP Software completion module");
+MODULE_LICENSE("GPL v2");
extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long);
extern long (*alpha_fp_emul) (unsigned long pc);
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index a5459698f0ee..7db85ab00c52 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -96,7 +96,6 @@ menu "ARC Architecture Configuration"
menu "ARC Platform/SoC/Board"
-source "arch/arc/plat-sim/Kconfig"
source "arch/arc/plat-tb10x/Kconfig"
source "arch/arc/plat-axs10x/Kconfig"
#New platform adds here
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 44ef35d33956..3a61cfcc38c0 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -107,7 +107,7 @@ core-y += arch/arc/
# w/o this dtb won't embed into kernel binary
core-y += arch/arc/boot/dts/
-core-$(CONFIG_ARC_PLAT_SIM) += arch/arc/plat-sim/
+core-y += arch/arc/plat-sim/
core-$(CONFIG_ARC_PLAT_TB10X) += arch/arc/plat-tb10x/
core-$(CONFIG_ARC_PLAT_AXS10X) += arch/arc/plat-axs10x/
core-$(CONFIG_ARC_PLAT_EZNPS) += arch/arc/plat-eznps/
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
index 53ce226f77a5..a380ffa1a458 100644
--- a/arch/arc/boot/dts/axc001.dtsi
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -15,15 +15,15 @@
/ {
compatible = "snps,arc";
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
cpu_card {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0x00000000 0xf0000000 0x10000000>;
+ ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
core_clk: core_clk {
#clock-cells = <0>;
@@ -91,23 +91,21 @@
mb_intc: dw-apb-ictl@0xe0012000 {
#interrupt-cells = <1>;
compatible = "snps,dw-apb-ictl";
- reg = < 0xe0012000 0x200 >;
+ reg = < 0x0 0xe0012000 0x0 0x200 >;
interrupt-controller;
interrupt-parent = <&core_intc>;
interrupts = < 7 >;
};
memory {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x00000000 0x80000000 0x20000000>;
device_type = "memory";
- reg = <0x80000000 0x1b000000>; /* (512 - 32) MiB */
+ /* CONFIG_KERNEL_RAM_BASE_ADDRESS needs to match low mem start */
+ reg = <0x0 0x80000000 0x0 0x1b000000>; /* (512 - 32) MiB */
};
reserved-memory {
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
ranges;
/*
* We just move frame buffer area to the very end of
@@ -118,7 +116,7 @@
*/
frame_buffer: frame_buffer@9e000000 {
compatible = "shared-dma-pool";
- reg = <0x9e000000 0x2000000>;
+ reg = <0x0 0x9e000000 0x0 0x2000000>;
no-map;
};
};
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 14df46f141bf..cc9239ef8d08 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -14,15 +14,15 @@
/ {
compatible = "snps,arc";
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
cpu_card {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0x00000000 0xf0000000 0x10000000>;
+ ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
core_clk: core_clk {
#clock-cells = <0>;
@@ -94,30 +94,29 @@
mb_intc: dw-apb-ictl@0xe0012000 {
#interrupt-cells = <1>;
compatible = "snps,dw-apb-ictl";
- reg = < 0xe0012000 0x200 >;
+ reg = < 0x0 0xe0012000 0x0 0x200 >;
interrupt-controller;
interrupt-parent = <&core_intc>;
interrupts = < 24 >;
};
memory {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x00000000 0x80000000 0x40000000>;
device_type = "memory";
- reg = <0x80000000 0x20000000>; /* 512MiB */
+ /* CONFIG_KERNEL_RAM_BASE_ADDRESS needs to match low mem start */
+ reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MiB low mem */
+ 0x1 0xc0000000 0x0 0x40000000>; /* 1 GiB highmem */
};
reserved-memory {
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
ranges;
/*
* Move frame buffer out of IOC aperture (0x8z-0xAz).
*/
frame_buffer: frame_buffer@be000000 {
compatible = "shared-dma-pool";
- reg = <0xbe000000 0x2000000>;
+ reg = <0x0 0xbe000000 0x0 0x2000000>;
no-map;
};
};
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 695f9fa1996b..4ebb2170abec 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -14,15 +14,15 @@
/ {
compatible = "snps,arc";
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
cpu_card {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0x00000000 0xf0000000 0x10000000>;
+ ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
core_clk: core_clk {
#clock-cells = <0>;
@@ -100,30 +100,29 @@
mb_intc: dw-apb-ictl@0xe0012000 {
#interrupt-cells = <1>;
compatible = "snps,dw-apb-ictl";
- reg = < 0xe0012000 0x200 >;
+ reg = < 0x0 0xe0012000 0x0 0x200 >;
interrupt-controller;
interrupt-parent = <&idu_intc>;
interrupts = <0>;
};
memory {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x00000000 0x80000000 0x40000000>;
device_type = "memory";
- reg = <0x80000000 0x20000000>; /* 512MiB */
+ /* CONFIG_KERNEL_RAM_BASE_ADDRESS needs to match low mem start */
+ reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MiB low mem */
+ 0x1 0xc0000000 0x0 0x40000000>; /* 1 GiB highmem */
};
reserved-memory {
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
ranges;
/*
* Move frame buffer out of IOC aperture (0x8z-0xAz).
*/
frame_buffer: frame_buffer@be000000 {
compatible = "shared-dma-pool";
- reg = <0xbe000000 0x2000000>;
+ reg = <0x0 0xbe000000 0x0 0x2000000>;
no-map;
};
};
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index 41cfb29b62c1..2367a67c5f10 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -13,7 +13,7 @@
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0x00000000 0xe0000000 0x10000000>;
+ ranges = <0x00000000 0x0 0xe0000000 0x10000000>;
interrupt-parent = <&mb_intc>;
i2sclk: i2sclk@100a0 {
@@ -101,7 +101,6 @@
mmc@0x15000 {
compatible = "altr,socfpga-dw-mshc";
reg = < 0x15000 0x400 >;
- num-slots = < 1 >;
fifo-depth = < 16 >;
card-detect-delay = < 200 >;
clocks = <&apbclk>, <&mmcclk>;
diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
index 459fc656b759..48bb4b4cd234 100644
--- a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
@@ -104,7 +104,6 @@
mmc@0x15000 {
compatible = "snps,dw-mshc";
reg = <0x15000 0x400>;
- num-slots = <1>;
fifo-depth = <1024>;
card-detect-delay = <200>;
clocks = <&apbclk>, <&mmcclk>;
diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig
index 57b3e599322f..db04ea4dd2d9 100644
--- a/arch/arc/configs/haps_hs_defconfig
+++ b/arch/arc/configs/haps_hs_defconfig
@@ -21,7 +21,6 @@ CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARC_PLAT_SIM=y
CONFIG_ISA_ARCV2=y
CONFIG_ARC_BUILTIN_DTB_NAME="haps_hs"
CONFIG_PREEMPT=y
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig
index f85985adebb2..821a2e562f3f 100644
--- a/arch/arc/configs/haps_hs_smp_defconfig
+++ b/arch/arc/configs/haps_hs_smp_defconfig
@@ -23,7 +23,6 @@ CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARC_PLAT_SIM=y
CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
CONFIG_ARC_BUILTIN_DTB_NAME="haps_hs_idu"
diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig
index ede625c76216..7c9c706ae7f6 100644
--- a/arch/arc/configs/nps_defconfig
+++ b/arch/arc/configs/nps_defconfig
@@ -39,7 +39,6 @@ CONFIG_IP_PNP=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index b0066a749d4c..6dff83a238b8 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -23,7 +23,6 @@ CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARC_PLAT_SIM=y
CONFIG_ARC_BUILTIN_DTB_NAME="nsim_700"
CONFIG_PREEMPT=y
# CONFIG_COMPACTION is not set
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index ebe9ebb92933..31ee51b987e7 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -26,7 +26,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARC_PLAT_SIM=y
CONFIG_ISA_ARCV2=y
CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs"
CONFIG_PREEMPT=y
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 4bde43278be6..8d3b1f67cae4 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -24,7 +24,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARC_PLAT_SIM=y
CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs_idu"
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index f6fb3d26557e..6168ce2ac2ef 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -23,7 +23,6 @@ CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARC_PLAT_SIM=y
CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci"
# CONFIG_COMPACTION is not set
CONFIG_NET=y
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index b9f0fe00044b..a70bdeb2b3fd 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -23,7 +23,6 @@ CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARC_PLAT_SIM=y
CONFIG_ISA_ARCV2=y
CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs"
# CONFIG_COMPACTION is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index 155add7761ed..ef96406c446e 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -18,7 +18,6 @@ CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARC_PLAT_SIM=y
CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
# CONFIG_ARC_TIMERS_64BIT is not set
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index 4c5118384eb5..f30182549395 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -38,7 +38,6 @@ CONFIG_IP_MULTICAST=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 54b54da6384c..11859287c52a 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -123,6 +123,8 @@ static inline void atomic_set(atomic_t *v, int i)
atomic_ops_unlock(flags);
}
+#define atomic_set_release(v, i) atomic_set((v), (i))
+
#endif
/*
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index 19ebddffb279..02fd1cece6ef 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -96,7 +96,9 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_SLC_FLUSH 0x904
#define ARC_REG_SLC_INVALIDATE 0x905
#define ARC_REG_SLC_RGN_START 0x914
+#define ARC_REG_SLC_RGN_START1 0x915
#define ARC_REG_SLC_RGN_END 0x916
+#define ARC_REG_SLC_RGN_END1 0x917
/* Bit val in SLC_CONTROL */
#define SLC_CTRL_DIS 0x001
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
index 11e1b1f3acda..eb887dd13e74 100644
--- a/arch/arc/include/asm/futex.h
+++ b/arch/arc/include/asm/futex.h
@@ -73,20 +73,11 @@
#endif
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
#ifndef CONFIG_ARC_HAS_LLSC
preempt_disable(); /* to guarantee atomic r-m-w of futex op */
#endif
@@ -118,30 +109,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
preempt_enable();
#endif
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index db7319e9b506..efb79fafff1d 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -94,6 +94,8 @@ static inline int is_pae40_enabled(void)
return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
}
+extern int pae40_exist_but_not_enab(void);
+
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index 233d5ffe6ec7..a325e6a36523 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -16,11 +16,6 @@
#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->slock, !VAL);
-}
-
#ifdef CONFIG_ARC_HAS_LLSC
static inline void arch_spin_lock(arch_spinlock_t *lock)
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index f928795fd07a..067ea362fb3e 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -75,10 +75,20 @@ void arc_init_IRQ(void)
* Set a default priority for all available interrupts to prevent
* switching of register banks if Fast IRQ and multiple register banks
* are supported by CPU.
+ * Also disable private-per-core IRQ lines so faulty external HW won't
+ * trigger interrupt that kernel is not ready to handle.
*/
for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) {
write_aux_reg(AUX_IRQ_SELECT, i);
write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
+
+ /*
+ * Only mask cpu private IRQs here.
+ * "common" interrupts are masked at IDU, otherwise it would
+ * need to be unmasked at each cpu, with IPIs
+ */
+ if (i < FIRST_EXT_IRQ)
+ write_aux_reg(AUX_IRQ_ENABLE, 0);
}
/* setup status32, don't enable intr yet as kernel doesn't want */
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index 7e608c6b0a01..47b421fa0147 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -27,7 +27,7 @@
*/
void arc_init_IRQ(void)
{
- int level_mask = 0;
+ unsigned int level_mask = 0, i;
/* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
@@ -40,6 +40,18 @@ void arc_init_IRQ(void)
if (level_mask)
pr_info("Level-2 interrupts bitset %x\n", level_mask);
+
+ /*
+ * Disable all IRQ lines so faulty external hardware won't
+ * trigger interrupt that kernel is not ready to handle.
+ */
+ for (i = TIMER0_IRQ; i < NR_CPU_IRQS; i++) {
+ unsigned int ienb;
+
+ ienb = read_aux_reg(AUX_IENABLE);
+ ienb &= ~(1 << i);
+ write_aux_reg(AUX_IENABLE, ienb);
+ }
}
/*
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index a867575a758b..7db283b46ebd 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -665,6 +665,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
static DEFINE_SPINLOCK(lock);
unsigned long flags;
unsigned int ctrl;
+ phys_addr_t end;
spin_lock_irqsave(&lock, flags);
@@ -694,8 +695,19 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
* END needs to be setup before START (latter triggers the operation)
* END can't be same as START, so add (l2_line_sz - 1) to sz
*/
- write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
- write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
+ end = paddr + sz + l2_line_sz - 1;
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
+
+ write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
+
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
+
+ write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
+
+ /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
+ read_aux_reg(ARC_REG_SLC_CTRL);
while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
@@ -1111,6 +1123,13 @@ noinline void __init arc_ioc_setup(void)
__dc_enable();
}
+/*
+ * Cache related boot time checks/setups only needed on master CPU:
+ * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
+ * Assume SMP only, so all cores will have same cache config. A check on
+ * one core suffices for all
+ * - IOC setup / dma callbacks only need to be done once
+ */
void __init arc_cache_init_master(void)
{
unsigned int __maybe_unused cpu = smp_processor_id();
@@ -1190,12 +1209,27 @@ void __ref arc_cache_init(void)
printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
- /*
- * Only master CPU needs to execute rest of function:
- * - Assume SMP so all cores will have same cache config so
- * any geomtry checks will be same for all
- * - IOC setup / dma callbacks only need to be setup once
- */
if (!cpu)
arc_cache_init_master();
+
+ /*
+ * In PAE regime, TLB and cache maintenance ops take wider addresses
+ * And even if PAE is not enabled in kernel, the upper 32-bits still need
+ * to be zeroed to keep the ops sane.
+ * As an optimization for more common !PAE enabled case, zero them out
+ * once at init, rather than checking/setting to 0 for every runtime op
+ */
+ if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
+ write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
+ write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
+
+ if (l2_line_sz) {
+ write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
+ write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
+ }
+ }
}
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 2a07e6ecafbd..e9d93604ad0f 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -117,7 +117,7 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
@@ -153,6 +153,19 @@ static void _dma_cache_sync(phys_addr_t paddr, size_t size,
}
}
+/*
+ * arc_dma_map_page - map a portion of a page for streaming DMA
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed. The CPU
+ * can regain ownership by calling dma_unmap_page().
+ *
+ * Note: while it takes struct page as arg, caller can "abuse" it to pass
+ * a region larger than PAGE_SIZE, provided it is physically contiguous
+ * and this still works correctly
+ */
static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
@@ -165,6 +178,24 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
return plat_phys_to_dma(dev, paddr);
}
+/*
+ * arc_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ *
+ * Note: historically this routine was not implemented for ARC
+ */
+static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ phys_addr_t paddr = plat_dma_to_phys(dev, handle);
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ _dma_cache_sync(paddr, size, dir);
+}
+
static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
@@ -178,6 +209,18 @@ static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
return nents;
}
+static void arc_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ arc_dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir,
+ attrs);
+}
+
static void arc_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
@@ -223,7 +266,9 @@ const struct dma_map_ops arc_dma_ops = {
.free = arc_dma_free,
.mmap = arc_dma_mmap,
.map_page = arc_dma_map_page,
+ .unmap_page = arc_dma_unmap_page,
.map_sg = arc_dma_map_sg,
+ .unmap_sg = arc_dma_unmap_sg,
.sync_single_for_device = arc_dma_sync_single_for_device,
.sync_single_for_cpu = arc_dma_sync_single_for_cpu,
.sync_sg_for_cpu = arc_dma_sync_sg_for_cpu,
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index d0126fdfe2d8..b181f3ee38aa 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -104,6 +104,8 @@
/* A copy of the ASID from the PID reg is kept in asid_cache */
DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
+static int __read_mostly pae_exists;
+
/*
* Utility Routine to erase a J-TLB entry
* Caller needs to setup Index Reg (manually or via getIndex)
@@ -784,7 +786,7 @@ void read_decode_mmu_bcr(void)
mmu->u_dtlb = mmu4->u_dtlb * 4;
mmu->u_itlb = mmu4->u_itlb * 4;
mmu->sasid = mmu4->sasid;
- mmu->pae = mmu4->pae;
+ pae_exists = mmu->pae = mmu4->pae;
}
}
@@ -809,6 +811,11 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
return buf;
}
+int pae40_exist_but_not_enab(void)
+{
+ return pae_exists && !is_pae40_enabled();
+}
+
void arc_mmu_init(void)
{
char str[256];
@@ -859,6 +866,9 @@ void arc_mmu_init(void)
/* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
#endif
+
+ if (pae40_exist_but_not_enab())
+ write_aux_reg(ARC_REG_TLBPD1HI, 0);
}
/*
diff --git a/arch/arc/plat-sim/Kconfig b/arch/arc/plat-sim/Kconfig
deleted file mode 100644
index ac6af96a82f3..000000000000
--- a/arch/arc/plat-sim/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-
-menuconfig ARC_PLAT_SIM
- bool "ARC nSIM based simulation virtual platforms"
- help
- Support for nSIM based ARC simulation platforms
- This includes the standalone nSIM (uart only) vs. System C OSCI VP
diff --git a/arch/arc/plat-sim/platform.c b/arch/arc/plat-sim/platform.c
index aea87389e44b..5cda56b1a2ea 100644
--- a/arch/arc/plat-sim/platform.c
+++ b/arch/arc/plat-sim/platform.c
@@ -20,11 +20,14 @@
*/
static const char *simulation_compat[] __initconst = {
+#ifdef CONFIG_ISA_ARCOMPACT
"snps,nsim",
- "snps,nsim_hs",
"snps,nsimosci",
+#else
+ "snps,nsim_hs",
"snps,nsimosci_hs",
"snps,zebu_hs",
+#endif
NULL,
};
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a208bfe367b5..f1b3f1d575d4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -50,7 +50,7 @@ config ARM
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_TRACEHOOK
select HAVE_ARM_SMCCC if CPU_V7
- select HAVE_CBPF_JIT
+ select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32
select HAVE_CC_STACKPROTECTOR
select HAVE_CONTEXT_TRACKING
select HAVE_C_RECORDMCOUNT
@@ -380,7 +380,7 @@ config ARCH_EP93XX
bool "EP93xx-based"
select ARCH_HAS_HOLES_MEMORYMODEL
select ARM_AMBA
- select ARM_PATCH_PHYS_VIRT
+ imply ARM_PATCH_PHYS_VIRT
select ARM_VIC
select AUTO_ZRELADDR
select CLKDEV_LOOKUP
diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S
index a17ca8d78656..c94a88ae834d 100644
--- a/arch/arm/boot/compressed/efi-header.S
+++ b/arch/arm/boot/compressed/efi-header.S
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Linaro Ltd
+ * Copyright (C) 2013-2017 Linaro Ltd
* Authors: Roy Franz <roy.franz@linaro.org>
* Ard Biesheuvel <ard.biesheuvel@linaro.org>
*
@@ -8,6 +8,9 @@
* published by the Free Software Foundation.
*/
+#include <linux/pe.h>
+#include <linux/sizes.h>
+
.macro __nop
#ifdef CONFIG_EFI_STUB
@ This is almost but not quite a NOP, since it does clobber the
@@ -15,7 +18,7 @@
@ PE/COFF expects the magic string "MZ" at offset 0, while the
@ ARM/Linux boot protocol expects an executable instruction
@ there.
- .inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
+ .inst MZ_MAGIC | (0x1310 << 16) @ tstne r0, #0x4d000
#else
AR_CLASS( mov r0, r0 )
M_CLASS( nop.w )
@@ -34,96 +37,97 @@
@ The only 2 fields of the MSDOS header that are used are this
@ PE/COFF offset, and the "MZ" bytes at offset 0x0.
@
- .long pe_header - start @ Offset to the PE header.
+ .long pe_header - start @ Offset to the PE header.
pe_header:
- .ascii "PE\0\0"
+ .long PE_MAGIC
coff_header:
- .short 0x01c2 @ ARM or Thumb
- .short 2 @ nr_sections
- .long 0 @ TimeDateStamp
- .long 0 @ PointerToSymbolTable
- .long 1 @ NumberOfSymbols
- .short section_table - optional_header
- @ SizeOfOptionalHeader
- .short 0x306 @ Characteristics.
- @ IMAGE_FILE_32BIT_MACHINE |
- @ IMAGE_FILE_DEBUG_STRIPPED |
- @ IMAGE_FILE_EXECUTABLE_IMAGE |
- @ IMAGE_FILE_LINE_NUMS_STRIPPED
+ .short IMAGE_FILE_MACHINE_THUMB @ Machine
+ .short section_count @ NumberOfSections
+ .long 0 @ TimeDateStamp
+ .long 0 @ PointerToSymbolTable
+ .long 0 @ NumberOfSymbols
+ .short section_table - optional_header @ SizeOfOptionalHeader
+ .short IMAGE_FILE_32BIT_MACHINE | \
+ IMAGE_FILE_DEBUG_STRIPPED | \
+ IMAGE_FILE_EXECUTABLE_IMAGE | \
+ IMAGE_FILE_LINE_NUMS_STRIPPED @ Characteristics
+
+#define __pecoff_code_size (__pecoff_data_start - __efi_start)
optional_header:
- .short 0x10b @ PE32 format
- .byte 0x02 @ MajorLinkerVersion
- .byte 0x14 @ MinorLinkerVersion
- .long _end - __efi_start @ SizeOfCode
- .long 0 @ SizeOfInitializedData
- .long 0 @ SizeOfUninitializedData
- .long efi_stub_entry - start @ AddressOfEntryPoint
- .long start_offset @ BaseOfCode
- .long 0 @ data
+ .short PE_OPT_MAGIC_PE32 @ PE32 format
+ .byte 0x02 @ MajorLinkerVersion
+ .byte 0x14 @ MinorLinkerVersion
+ .long __pecoff_code_size @ SizeOfCode
+ .long __pecoff_data_size @ SizeOfInitializedData
+ .long 0 @ SizeOfUninitializedData
+ .long efi_stub_entry - start @ AddressOfEntryPoint
+ .long start_offset @ BaseOfCode
+ .long __pecoff_data_start - start @ BaseOfData
extra_header_fields:
- .long 0 @ ImageBase
- .long 0x200 @ SectionAlignment
- .long 0x200 @ FileAlignment
- .short 0 @ MajorOperatingSystemVersion
- .short 0 @ MinorOperatingSystemVersion
- .short 0 @ MajorImageVersion
- .short 0 @ MinorImageVersion
- .short 0 @ MajorSubsystemVersion
- .short 0 @ MinorSubsystemVersion
- .long 0 @ Win32VersionValue
+ .long 0 @ ImageBase
+ .long SZ_4K @ SectionAlignment
+ .long SZ_512 @ FileAlignment
+ .short 0 @ MajorOsVersion
+ .short 0 @ MinorOsVersion
+ .short 0 @ MajorImageVersion
+ .short 0 @ MinorImageVersion
+ .short 0 @ MajorSubsystemVersion
+ .short 0 @ MinorSubsystemVersion
+ .long 0 @ Win32VersionValue
- .long _end - start @ SizeOfImage
- .long start_offset @ SizeOfHeaders
- .long 0 @ CheckSum
- .short 0xa @ Subsystem (EFI application)
- .short 0 @ DllCharacteristics
- .long 0 @ SizeOfStackReserve
- .long 0 @ SizeOfStackCommit
- .long 0 @ SizeOfHeapReserve
- .long 0 @ SizeOfHeapCommit
- .long 0 @ LoaderFlags
- .long 0x6 @ NumberOfRvaAndSizes
+ .long __pecoff_end - start @ SizeOfImage
+ .long start_offset @ SizeOfHeaders
+ .long 0 @ CheckSum
+ .short IMAGE_SUBSYSTEM_EFI_APPLICATION @ Subsystem
+ .short 0 @ DllCharacteristics
+ .long 0 @ SizeOfStackReserve
+ .long 0 @ SizeOfStackCommit
+ .long 0 @ SizeOfHeapReserve
+ .long 0 @ SizeOfHeapCommit
+ .long 0 @ LoaderFlags
+ .long (section_table - .) / 8 @ NumberOfRvaAndSizes
- .quad 0 @ ExportTable
- .quad 0 @ ImportTable
- .quad 0 @ ResourceTable
- .quad 0 @ ExceptionTable
- .quad 0 @ CertificationTable
- .quad 0 @ BaseRelocationTable
+ .quad 0 @ ExportTable
+ .quad 0 @ ImportTable
+ .quad 0 @ ResourceTable
+ .quad 0 @ ExceptionTable
+ .quad 0 @ CertificationTable
+ .quad 0 @ BaseRelocationTable
section_table:
- @
- @ The EFI application loader requires a relocation section
- @ because EFI applications must be relocatable. This is a
- @ dummy section as far as we are concerned.
- @
- .ascii ".reloc\0\0"
- .long 0 @ VirtualSize
- .long 0 @ VirtualAddress
- .long 0 @ SizeOfRawData
- .long 0 @ PointerToRawData
- .long 0 @ PointerToRelocations
- .long 0 @ PointerToLineNumbers
- .short 0 @ NumberOfRelocations
- .short 0 @ NumberOfLineNumbers
- .long 0x42100040 @ Characteristics
-
.ascii ".text\0\0\0"
- .long _end - __efi_start @ VirtualSize
- .long __efi_start @ VirtualAddress
- .long _edata - __efi_start @ SizeOfRawData
- .long __efi_start @ PointerToRawData
- .long 0 @ PointerToRelocations
- .long 0 @ PointerToLineNumbers
- .short 0 @ NumberOfRelocations
- .short 0 @ NumberOfLineNumbers
- .long 0xe0500020 @ Characteristics
+ .long __pecoff_code_size @ VirtualSize
+ .long __efi_start @ VirtualAddress
+ .long __pecoff_code_size @ SizeOfRawData
+ .long __efi_start @ PointerToRawData
+ .long 0 @ PointerToRelocations
+ .long 0 @ PointerToLineNumbers
+ .short 0 @ NumberOfRelocations
+ .short 0 @ NumberOfLineNumbers
+ .long IMAGE_SCN_CNT_CODE | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_EXECUTE @ Characteristics
+
+ .ascii ".data\0\0\0"
+ .long __pecoff_data_size @ VirtualSize
+ .long __pecoff_data_start - start @ VirtualAddress
+ .long __pecoff_data_rawsize @ SizeOfRawData
+ .long __pecoff_data_start - start @ PointerToRawData
+ .long 0 @ PointerToRelocations
+ .long 0 @ PointerToLineNumbers
+ .short 0 @ NumberOfRelocations
+ .short 0 @ NumberOfLineNumbers
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_WRITE @ Characteristics
+
+ .set section_count, (. - section_table) / 40
- .align 9
+ .align 12
__efi_start:
#endif
.endm
diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S
index 81c493156ce8..7a4c59154361 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.S
+++ b/arch/arm/boot/compressed/vmlinux.lds.S
@@ -48,13 +48,6 @@ SECTIONS
*(.rodata)
*(.rodata.*)
}
- .data : {
- /*
- * The EFI stub always executes from RAM, and runs strictly before the
- * decompressor, so we can make an exception for its r/w data, and keep it
- */
- *(.data.efistub)
- }
.piggydata : {
*(.piggydata)
}
@@ -70,6 +63,26 @@ SECTIONS
/* ensure the zImage file size is always a multiple of 64 bits */
/* (without a dummy byte, ld just ignores the empty section) */
.pad : { BYTE(0); . = ALIGN(8); }
+
+#ifdef CONFIG_EFI_STUB
+ .data : ALIGN(4096) {
+ __pecoff_data_start = .;
+ /*
+ * The EFI stub always executes from RAM, and runs strictly before the
+ * decompressor, so we can make an exception for its r/w data, and keep it
+ */
+ *(.data.efistub)
+ __pecoff_data_end = .;
+
+ /*
+ * PE/COFF mandates a file size which is a multiple of 512 bytes if the
+ * section size equals or exceeds 4 KB
+ */
+ . = ALIGN(512);
+ }
+ __pecoff_data_rawsize = . - ADDR(.data);
+#endif
+
_edata = .;
_magic_sig = ZIMAGE_MAGIC(0x016f2818);
@@ -84,6 +97,9 @@ SECTIONS
. = ALIGN(8); /* the stack must be 64-bit aligned */
.stack : { *(.stack) }
+ PROVIDE(__pecoff_data_size = ALIGN(512) - ADDR(.data));
+ PROVIDE(__pecoff_end = ALIGN(512));
+
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
index 895fa6cfa15a..563901e0ec07 100644
--- a/arch/arm/boot/dts/armada-388-gp.dts
+++ b/arch/arm/boot/dts/armada-388-gp.dts
@@ -75,7 +75,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pca0_pins>;
interrupt-parent = <&gpio0>;
- interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
@@ -87,7 +87,7 @@
compatible = "nxp,pca9555";
pinctrl-names = "default";
interrupt-parent = <&gpio0>;
- interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index a423e8ebfb37..67e72bc72e80 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -301,25 +301,4 @@
pinctrl-names = "default";
pinctrl-0 = <&vpif_capture_pins>, <&vpif_display_pins>;
status = "okay";
-
- /* VPIF capture port */
- port@0 {
- vpif_input_ch0: endpoint@0 {
- reg = <0>;
- bus-width = <8>;
- };
-
- vpif_input_ch1: endpoint@1 {
- reg = <1>;
- bus-width = <8>;
- data-shift = <8>;
- };
- };
-
- /* VPIF display port */
- port@1 {
- vpif_output_ch0: endpoint {
- bus-width = <8>;
- };
- };
};
diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
index b837fec70eec..a0f0916156e6 100644
--- a/arch/arm/boot/dts/da850-lcdk.dts
+++ b/arch/arm/boot/dts/da850-lcdk.dts
@@ -318,11 +318,4 @@
pinctrl-names = "default";
pinctrl-0 = <&vpif_capture_pins>;
status = "okay";
-
- /* VPIF capture port */
- port {
- vpif_ch0: endpoint {
- bus-width = <8>;
- };
- };
};
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts
index 1865976db5f9..c72a2132aa82 100644
--- a/arch/arm/boot/dts/dm8168-evm.dts
+++ b/arch/arm/boot/dts/dm8168-evm.dts
@@ -68,6 +68,34 @@
DM816X_IOPAD(0x0d08, MUX_MODE0) /* USB1_DRVVBUS */
>;
};
+
+ nandflash_pins: nandflash_pins {
+ pinctrl-single,pins = <
+ DM816X_IOPAD(0x0b38, PULL_UP | MUX_MODE0) /* PINCTRL207 GPMC_CS0*/
+ DM816X_IOPAD(0x0b60, PULL_ENA | MUX_MODE0) /* PINCTRL217 GPMC_ADV_ALE */
+ DM816X_IOPAD(0x0b54, PULL_UP | PULL_ENA | MUX_MODE0) /* PINCTRL214 GPMC_OE_RE */
+ DM816X_IOPAD(0x0b58, PULL_ENA | MUX_MODE0) /* PINCTRL215 GPMC_BE0_CLE */
+ DM816X_IOPAD(0x0b50, PULL_UP | MUX_MODE0) /* PINCTRL213 GPMC_WE */
+ DM816X_IOPAD(0x0b6c, MUX_MODE0) /* PINCTRL220 GPMC_WAIT */
+ DM816X_IOPAD(0x0be4, PULL_ENA | MUX_MODE0) /* PINCTRL250 GPMC_CLK */
+ DM816X_IOPAD(0x0ba4, MUX_MODE0) /* PINCTRL234 GPMC_D0 */
+ DM816X_IOPAD(0x0ba8, MUX_MODE0) /* PINCTRL234 GPMC_D1 */
+ DM816X_IOPAD(0x0bac, MUX_MODE0) /* PINCTRL234 GPMC_D2 */
+ DM816X_IOPAD(0x0bb0, MUX_MODE0) /* PINCTRL234 GPMC_D3 */
+ DM816X_IOPAD(0x0bb4, MUX_MODE0) /* PINCTRL234 GPMC_D4 */
+ DM816X_IOPAD(0x0bb8, MUX_MODE0) /* PINCTRL234 GPMC_D5 */
+ DM816X_IOPAD(0x0bbc, MUX_MODE0) /* PINCTRL234 GPMC_D6 */
+ DM816X_IOPAD(0x0bc0, MUX_MODE0) /* PINCTRL234 GPMC_D7 */
+ DM816X_IOPAD(0x0bc4, MUX_MODE0) /* PINCTRL234 GPMC_D8 */
+ DM816X_IOPAD(0x0bc8, MUX_MODE0) /* PINCTRL234 GPMC_D9 */
+ DM816X_IOPAD(0x0bcc, MUX_MODE0) /* PINCTRL234 GPMC_D10 */
+ DM816X_IOPAD(0x0bd0, MUX_MODE0) /* PINCTRL234 GPMC_D11 */
+ DM816X_IOPAD(0x0bd4, MUX_MODE0) /* PINCTRL234 GPMC_D12 */
+ DM816X_IOPAD(0x0bd8, MUX_MODE0) /* PINCTRL234 GPMC_D13 */
+ DM816X_IOPAD(0x0bdc, MUX_MODE0) /* PINCTRL234 GPMC_D14 */
+ DM816X_IOPAD(0x0be0, MUX_MODE0) /* PINCTRL234 GPMC_D15 */
+ >;
+ };
};
&i2c1 {
@@ -90,6 +118,8 @@
&gpmc {
ranges = <0 0 0x04000000 0x01000000>; /* CS0: 16MB for NAND */
+ pinctrl-names = "default";
+ pinctrl-0 = <&nandflash_pins>;
nand@0,0 {
compatible = "ti,omap2-nand";
@@ -98,9 +128,11 @@
interrupt-parent = <&gpmc>;
interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
<1 IRQ_TYPE_NONE>; /* termcount */
+ rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
#address-cells = <1>;
#size-cells = <1>;
ti,nand-ecc-opt = "bch8";
+ ti,elm-id = <&elm>;
nand-bus-width = <16>;
gpmc,device-width = <2>;
gpmc,sync-clk-ps = <0>;
@@ -164,7 +196,7 @@
vmmc-supply = <&vmmcsd_fixed>;
bus-width = <4>;
cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
- wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 8 GPIO_ACTIVE_HIGH>;
};
/* At least dm8168-evm rev c won't support multipoint, later may */
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 59cbf958fcc3..566b2a8c8b96 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -145,7 +145,7 @@
};
elm: elm@48080000 {
- compatible = "ti,816-elm";
+ compatible = "ti,am3352-elm";
ti,hwmods = "elm";
reg = <0x48080000 0x2000>;
interrupts = <4>;
diff --git a/arch/arm/boot/dts/dra71-evm.dts b/arch/arm/boot/dts/dra71-evm.dts
index 4d57a55473af..a6298eb56978 100644
--- a/arch/arm/boot/dts/dra71-evm.dts
+++ b/arch/arm/boot/dts/dra71-evm.dts
@@ -190,7 +190,7 @@
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
- ti,impedance-control = <0x1f>;
+ ti,min-output-impedance;
};
dp83867_1: ethernet-phy@3 {
@@ -198,7 +198,7 @@
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
- ti,impedance-control = <0x1f>;
+ ti,min-output-impedance;
};
};
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index 497a9470c888..5739389f5bb8 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -59,6 +59,9 @@
compatible = "samsung,exynos4210-audss-clock";
reg = <0x03810000 0x0C>;
#clock-cells = <1>;
+ clocks = <&clock CLK_FIN_PLL>, <&clock CLK_FOUT_EPLL>,
+ <&clock CLK_SCLK_AUDIO0>, <&clock CLK_SCLK_AUDIO0>;
+ clock-names = "pll_ref", "pll_in", "sclk_audio", "sclk_pcm_in";
};
i2s0: i2s@03830000 {
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index f92f95741207..a183b56283f8 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -266,6 +266,7 @@
&hdmicec {
status = "okay";
+ needs-hpd;
};
&hsi2c_4 {
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index dfcc8e00cf1c..0ade3619f3c3 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -297,6 +297,7 @@
#address-cells = <1>;
#size-cells = <1>;
status = "disabled";
+ ranges;
adc: adc@50030800 {
compatible = "fsl,imx25-gcq";
diff --git a/arch/arm/boot/dts/imx6q-evi.dts b/arch/arm/boot/dts/imx6q-evi.dts
index 1f0f950dc11e..e0aea782c666 100644
--- a/arch/arm/boot/dts/imx6q-evi.dts
+++ b/arch/arm/boot/dts/imx6q-evi.dts
@@ -94,6 +94,15 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ecspi1 &pinctrl_ecspi1cs>;
status = "okay";
+
+ fpga: fpga@0 {
+ compatible = "altr,fpga-passive-serial";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ pinctrl-0 = <&pinctrl_fpgaspi>;
+ nconfig-gpios = <&gpio4 9 GPIO_ACTIVE_LOW>;
+ nstat-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
+ };
};
&ecspi3 {
@@ -319,6 +328,13 @@
>;
};
+ pinctrl_fpgaspi: fpgaspigrp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW1__GPIO4_IO09 0x1b0b0
+ MX6QDL_PAD_KEY_ROW2__GPIO4_IO11 0x1b0b0
+ >;
+ };
+
pinctrl_gpminand: gpminandgrp {
fsl,pins = <
MX6QDL_PAD_NANDF_CLE__NAND_CLE 0xb0b1
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
index aeaa5a6e4fcf..a24e4f1911ab 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
@@ -507,7 +507,7 @@
pinctrl_pcie: pciegrp {
fsl,pins = <
/* PCIe reset */
- MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x030b0
+ MX6QDL_PAD_EIM_DA0__GPIO3_IO00 0x030b0
MX6QDL_PAD_EIM_DA4__GPIO3_IO04 0x030b0
>;
};
@@ -668,7 +668,7 @@
&pcie {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pcie>;
- reset-gpio = <&gpio6 31 GPIO_ACTIVE_LOW>;
+ reset-gpio = <&gpio3 0 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
index 54c45402286b..0a24d1bf3c39 100644
--- a/arch/arm/boot/dts/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/imx7d-sdb.dts
@@ -557,6 +557,14 @@
>;
};
+ pinctrl_spi4: spi4grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x59
+ MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59
+ MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59
+ >;
+ };
+
pinctrl_tsc2046_pendown: tsc2046_pendown {
fsl,pins = <
MX7D_PAD_EPDC_BDR1__GPIO2_IO29 0x59
@@ -697,13 +705,5 @@
fsl,pins = <
MX7D_PAD_LPSR_GPIO1_IO01__PWM1_OUT 0x110b0
>;
-
- pinctrl_spi4: spi4grp {
- fsl,pins = <
- MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x59
- MX7D_PAD_GPIO1_IO12__GPIO1_IO12 0x59
- MX7D_PAD_GPIO1_IO13__GPIO1_IO13 0x59
- >;
- };
};
};
diff --git a/arch/arm/boot/dts/imx7ulp-pinfunc.h b/arch/arm/boot/dts/imx7ulp-pinfunc.h
new file mode 100644
index 000000000000..fe511775b518
--- /dev/null
+++ b/arch/arm/boot/dts/imx7ulp-pinfunc.h
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DTS_IMX7ULP_PINFUNC_H
+#define __DTS_IMX7ULP_PINFUNC_H
+
+/*
+ * The pin function ID is a tuple of
+ * <mux_conf_reg input_reg mux_mode input_val>
+ */
+
+#define IMX7ULP_PAD_PTC0__PTC0 0x0000 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC0__TRACE_D15 0x0000 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC0__LPUART4_CTS_B 0x0000 0x0244 0x4 0x1
+#define IMX7ULP_PAD_PTC0__LPI2C4_SCL 0x0000 0x0278 0x5 0x1
+#define IMX7ULP_PAD_PTC0__TPM4_CLKIN 0x0000 0x0298 0x6 0x1
+#define IMX7ULP_PAD_PTC0__FB_AD0 0x0000 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC1__PTC1 0x0004 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC1__TRACE_D14 0x0004 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC1__LPUART4_RTS_B 0x0004 0x0000 0x4 0x0
+#define IMX7ULP_PAD_PTC1__LPI2C4_SDA 0x0004 0x027c 0x5 0x1
+#define IMX7ULP_PAD_PTC1__TPM4_CH0 0x0004 0x0280 0x6 0x1
+#define IMX7ULP_PAD_PTC1__FB_AD1 0x0004 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC2__PTC2 0x0008 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC2__TRACE_D13 0x0008 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC2__LPUART4_TX 0x0008 0x024c 0x4 0x1
+#define IMX7ULP_PAD_PTC2__LPI2C4_HREQ 0x0008 0x0274 0x5 0x1
+#define IMX7ULP_PAD_PTC2__TPM4_CH1 0x0008 0x0284 0x6 0x1
+#define IMX7ULP_PAD_PTC2__FB_AD2 0x0008 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC3__PTC3 0x000c 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC3__TRACE_D12 0x000c 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC3__LPUART4_RX 0x000c 0x0248 0x4 0x1
+#define IMX7ULP_PAD_PTC3__TPM4_CH2 0x000c 0x0288 0x6 0x1
+#define IMX7ULP_PAD_PTC3__FB_AD3 0x000c 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC4__PTC4 0x0010 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC4__TRACE_D11 0x0010 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC4__FXIO1_D0 0x0010 0x0204 0x2 0x1
+#define IMX7ULP_PAD_PTC4__LPSPI2_PCS1 0x0010 0x02a0 0x3 0x1
+#define IMX7ULP_PAD_PTC4__LPUART5_CTS_B 0x0010 0x0250 0x4 0x1
+#define IMX7ULP_PAD_PTC4__LPI2C5_SCL 0x0010 0x02bc 0x5 0x1
+#define IMX7ULP_PAD_PTC4__TPM4_CH3 0x0010 0x028c 0x6 0x1
+#define IMX7ULP_PAD_PTC4__FB_AD4 0x0010 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC5__PTC5 0x0014 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC5__TRACE_D10 0x0014 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC5__FXIO1_D1 0x0014 0x0208 0x2 0x1
+#define IMX7ULP_PAD_PTC5__LPSPI2_PCS2 0x0014 0x02a4 0x3 0x1
+#define IMX7ULP_PAD_PTC5__LPUART5_RTS_B 0x0014 0x0000 0x4 0x0
+#define IMX7ULP_PAD_PTC5__LPI2C5_SDA 0x0014 0x02c0 0x5 0x1
+#define IMX7ULP_PAD_PTC5__TPM4_CH4 0x0014 0x0290 0x6 0x1
+#define IMX7ULP_PAD_PTC5__FB_AD5 0x0014 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC6__PTC6 0x0018 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC6__TRACE_D9 0x0018 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC6__FXIO1_D2 0x0018 0x020c 0x2 0x1
+#define IMX7ULP_PAD_PTC6__LPSPI2_PCS3 0x0018 0x02a8 0x3 0x1
+#define IMX7ULP_PAD_PTC6__LPUART5_TX 0x0018 0x0258 0x4 0x1
+#define IMX7ULP_PAD_PTC6__LPI2C5_HREQ 0x0018 0x02b8 0x5 0x1
+#define IMX7ULP_PAD_PTC6__TPM4_CH5 0x0018 0x0294 0x6 0x1
+#define IMX7ULP_PAD_PTC6__FB_AD6 0x0018 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC7__PTC7 0x001c 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC7__TRACE_D8 0x001c 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC7__FXIO1_D3 0x001c 0x0210 0x2 0x1
+#define IMX7ULP_PAD_PTC7__LPUART5_RX 0x001c 0x0254 0x4 0x1
+#define IMX7ULP_PAD_PTC7__TPM5_CH1 0x001c 0x02c8 0x6 0x1
+#define IMX7ULP_PAD_PTC7__FB_AD7 0x001c 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC8__PTC8 0x0020 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC8__TRACE_D7 0x0020 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC8__FXIO1_D4 0x0020 0x0214 0x2 0x1
+#define IMX7ULP_PAD_PTC8__LPSPI2_SIN 0x0020 0x02b0 0x3 0x1
+#define IMX7ULP_PAD_PTC8__LPUART6_CTS_B 0x0020 0x025c 0x4 0x1
+#define IMX7ULP_PAD_PTC8__LPI2C6_SCL 0x0020 0x02fc 0x5 0x1
+#define IMX7ULP_PAD_PTC8__TPM5_CLKIN 0x0020 0x02cc 0x6 0x1
+#define IMX7ULP_PAD_PTC8__FB_AD8 0x0020 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC9__PTC9 0x0024 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC9__TRACE_D6 0x0024 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC9__FXIO1_D5 0x0024 0x0218 0x2 0x1
+#define IMX7ULP_PAD_PTC9__LPSPI2_SOUT 0x0024 0x02b4 0x3 0x1
+#define IMX7ULP_PAD_PTC9__LPUART6_RTS_B 0x0024 0x0000 0x4 0x0
+#define IMX7ULP_PAD_PTC9__LPI2C6_SDA 0x0024 0x0300 0x5 0x1
+#define IMX7ULP_PAD_PTC9__TPM5_CH0 0x0024 0x02c4 0x6 0x1
+#define IMX7ULP_PAD_PTC9__FB_AD9 0x0024 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC10__PTC10 0x0028 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC10__TRACE_D5 0x0028 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC10__FXIO1_D6 0x0028 0x021c 0x2 0x1
+#define IMX7ULP_PAD_PTC10__LPSPI2_SCK 0x0028 0x02ac 0x3 0x1
+#define IMX7ULP_PAD_PTC10__LPUART6_TX 0x0028 0x0264 0x4 0x1
+#define IMX7ULP_PAD_PTC10__LPI2C6_HREQ 0x0028 0x02f8 0x5 0x1
+#define IMX7ULP_PAD_PTC10__TPM7_CH3 0x0028 0x02e8 0x6 0x1
+#define IMX7ULP_PAD_PTC10__FB_AD10 0x0028 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC11__PTC11 0x002c 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC11__TRACE_D4 0x002c 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC11__FXIO1_D7 0x002c 0x0220 0x2 0x1
+#define IMX7ULP_PAD_PTC11__LPSPI2_PCS0 0x002c 0x029c 0x3 0x1
+#define IMX7ULP_PAD_PTC11__LPUART6_RX 0x002c 0x0260 0x4 0x1
+#define IMX7ULP_PAD_PTC11__TPM7_CH4 0x002c 0x02ec 0x6 0x1
+#define IMX7ULP_PAD_PTC11__FB_AD11 0x002c 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC12__PTC12 0x0030 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC12__TRACE_D3 0x0030 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC12__FXIO1_D8 0x0030 0x0224 0x2 0x1
+#define IMX7ULP_PAD_PTC12__LPSPI3_PCS1 0x0030 0x0314 0x3 0x1
+#define IMX7ULP_PAD_PTC12__LPUART7_CTS_B 0x0030 0x0268 0x4 0x1
+#define IMX7ULP_PAD_PTC12__LPI2C7_SCL 0x0030 0x0308 0x5 0x1
+#define IMX7ULP_PAD_PTC12__TPM7_CH5 0x0030 0x02f0 0x6 0x1
+#define IMX7ULP_PAD_PTC12__FB_AD12 0x0030 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC13__PTC13 0x0034 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC13__TRACE_D2 0x0034 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC13__FXIO1_D9 0x0034 0x0228 0x2 0x1
+#define IMX7ULP_PAD_PTC13__LPSPI3_PCS2 0x0034 0x0318 0x3 0x1
+#define IMX7ULP_PAD_PTC13__LPUART7_RTS_B 0x0034 0x0000 0x4 0x0
+#define IMX7ULP_PAD_PTC13__LPI2C7_SDA 0x0034 0x030c 0x5 0x1
+#define IMX7ULP_PAD_PTC13__TPM7_CLKIN 0x0034 0x02f4 0x6 0x1
+#define IMX7ULP_PAD_PTC13__FB_AD13 0x0034 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC14__PTC14 0x0038 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC14__TRACE_D1 0x0038 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC14__FXIO1_D10 0x0038 0x022c 0x2 0x1
+#define IMX7ULP_PAD_PTC14__LPSPI3_PCS3 0x0038 0x031c 0x3 0x1
+#define IMX7ULP_PAD_PTC14__LPUART7_TX 0x0038 0x0270 0x4 0x1
+#define IMX7ULP_PAD_PTC14__LPI2C7_HREQ 0x0038 0x0304 0x5 0x1
+#define IMX7ULP_PAD_PTC14__TPM7_CH0 0x0038 0x02dc 0x6 0x1
+#define IMX7ULP_PAD_PTC14__FB_AD14 0x0038 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC15__PTC15 0x003c 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC15__TRACE_D0 0x003c 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC15__FXIO1_D11 0x003c 0x0230 0x2 0x1
+#define IMX7ULP_PAD_PTC15__LPUART7_RX 0x003c 0x026c 0x4 0x1
+#define IMX7ULP_PAD_PTC15__TPM7_CH1 0x003c 0x02e0 0x6 0x1
+#define IMX7ULP_PAD_PTC15__FB_AD15 0x003c 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC16__PTC16 0x0040 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC16__TRACE_CLKOUT 0x0040 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTC16__FXIO1_D12 0x0040 0x0234 0x2 0x1
+#define IMX7ULP_PAD_PTC16__LPSPI3_SIN 0x0040 0x0324 0x3 0x1
+#define IMX7ULP_PAD_PTC16__TPM7_CH2 0x0040 0x02e4 0x6 0x1
+#define IMX7ULP_PAD_PTC16__FB_ALE_FB_CS1_B_FB_TS_B 0x0040 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC17__PTC17 0x0044 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC17__FXIO1_D13 0x0044 0x0238 0x2 0x1
+#define IMX7ULP_PAD_PTC17__LPSPI3_SOUT 0x0044 0x0328 0x3 0x1
+#define IMX7ULP_PAD_PTC17__TPM6_CLKIN 0x0044 0x02d8 0x6 0x1
+#define IMX7ULP_PAD_PTC17__FB_CS0_B 0x0044 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC18__PTC18 0x0048 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC18__FXIO1_D14 0x0048 0x023c 0x2 0x1
+#define IMX7ULP_PAD_PTC18__LPSPI3_SCK 0x0048 0x0320 0x3 0x1
+#define IMX7ULP_PAD_PTC18__TPM6_CH0 0x0048 0x02d0 0x6 0x1
+#define IMX7ULP_PAD_PTC18__FB_OE_B 0x0048 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTC19__PTC19 0x004c 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTC19__FXIO1_D15 0x004c 0x0240 0x2 0x1
+#define IMX7ULP_PAD_PTC19__LPSPI3_PCS0 0x004c 0x0310 0x3 0x1
+#define IMX7ULP_PAD_PTC19__TPM6_CH1 0x004c 0x02d4 0x6 0x1
+#define IMX7ULP_PAD_PTC19__FB_A16 0x004c 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTD0__PTD0 0x0080 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTD0__SDHC0_RESET_B 0x0080 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTD1__PTD1 0x0084 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTD1__SDHC0_CMD 0x0084 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTD2__PTD2 0x0088 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTD2__SDHC0_CLK 0x0088 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTD3__PTD3 0x008c 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTD3__SDHC0_D7 0x008c 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTD4__PTD4 0x0090 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTD4__SDHC0_D6 0x0090 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTD5__PTD5 0x0094 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTD5__SDHC0_D5 0x0094 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTD6__PTD6 0x0098 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTD6__SDHC0_D4 0x0098 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTD7__PTD7 0x009c 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTD7__SDHC0_D3 0x009c 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTD8__PTD8 0x00a0 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTD8__TPM4_CLKIN 0x00a0 0x0298 0x6 0x2
+#define IMX7ULP_PAD_PTD8__SDHC0_D2 0x00a0 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTD9__PTD9 0x00a4 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTD9__TPM4_CH0 0x00a4 0x0280 0x6 0x2
+#define IMX7ULP_PAD_PTD9__SDHC0_D1 0x00a4 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTD10__PTD10 0x00a8 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTD10__TPM4_CH1 0x00a8 0x0284 0x6 0x2
+#define IMX7ULP_PAD_PTD10__SDHC0_D0 0x00a8 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTD11__PTD11 0x00ac 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTD11__TPM4_CH2 0x00ac 0x0288 0x6 0x2
+#define IMX7ULP_PAD_PTD11__SDHC0_DQS 0x00ac 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTE0__PTE0 0x0100 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE0__FXIO1_D31 0x0100 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE0__LPSPI2_PCS1 0x0100 0x02a0 0x3 0x2
+#define IMX7ULP_PAD_PTE0__LPUART4_CTS_B 0x0100 0x0244 0x4 0x2
+#define IMX7ULP_PAD_PTE0__LPI2C4_SCL 0x0100 0x0278 0x5 0x2
+#define IMX7ULP_PAD_PTE0__SDHC1_D1 0x0100 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTE0__FB_A25 0x0100 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTE1__PTE1 0x0104 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE1__FXIO1_D30 0x0104 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE1__LPSPI2_PCS2 0x0104 0x02a4 0x3 0x2
+#define IMX7ULP_PAD_PTE1__LPUART4_RTS_B 0x0104 0x0000 0x4 0x0
+#define IMX7ULP_PAD_PTE1__LPI2C4_SDA 0x0104 0x027c 0x5 0x2
+#define IMX7ULP_PAD_PTE1__SDHC1_D0 0x0104 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTE1__FB_A26 0x0104 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTE2__PTE2 0x0108 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE2__FXIO1_D29 0x0108 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE2__LPSPI2_PCS3 0x0108 0x02a8 0x3 0x2
+#define IMX7ULP_PAD_PTE2__LPUART4_TX 0x0108 0x024c 0x4 0x2
+#define IMX7ULP_PAD_PTE2__LPI2C4_HREQ 0x0108 0x0274 0x5 0x2
+#define IMX7ULP_PAD_PTE2__SDHC1_CLK 0x0108 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTE3__PTE3 0x010c 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE3__FXIO1_D28 0x010c 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE3__LPUART4_RX 0x010c 0x0248 0x4 0x2
+#define IMX7ULP_PAD_PTE3__TPM5_CH1 0x010c 0x02c8 0x6 0x2
+#define IMX7ULP_PAD_PTE3__SDHC1_CMD 0x010c 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTE4__PTE4 0x0110 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE4__FXIO1_D27 0x0110 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE4__LPSPI2_SIN 0x0110 0x02b0 0x3 0x2
+#define IMX7ULP_PAD_PTE4__LPUART5_CTS_B 0x0110 0x0250 0x4 0x2
+#define IMX7ULP_PAD_PTE4__LPI2C5_SCL 0x0110 0x02bc 0x5 0x2
+#define IMX7ULP_PAD_PTE4__TPM5_CLKIN 0x0110 0x02cc 0x6 0x2
+#define IMX7ULP_PAD_PTE4__SDHC1_D3 0x0110 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTE5__PTE5 0x0114 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE5__FXIO1_D26 0x0114 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE5__LPSPI2_SOUT 0x0114 0x02b4 0x3 0x2
+#define IMX7ULP_PAD_PTE5__LPUART5_RTS_B 0x0114 0x0000 0x4 0x0
+#define IMX7ULP_PAD_PTE5__LPI2C5_SDA 0x0114 0x02c0 0x5 0x2
+#define IMX7ULP_PAD_PTE5__TPM5_CH0 0x0114 0x02c4 0x6 0x2
+#define IMX7ULP_PAD_PTE5__SDHC1_D2 0x0114 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTE6__PTE6 0x0118 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE6__FXIO1_D25 0x0118 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE6__LPSPI2_SCK 0x0118 0x02ac 0x3 0x2
+#define IMX7ULP_PAD_PTE6__LPUART5_TX 0x0118 0x0258 0x4 0x2
+#define IMX7ULP_PAD_PTE6__LPI2C5_HREQ 0x0118 0x02b8 0x5 0x2
+#define IMX7ULP_PAD_PTE6__TPM7_CH3 0x0118 0x02e8 0x6 0x2
+#define IMX7ULP_PAD_PTE6__SDHC1_D4 0x0118 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTE6__FB_A17 0x0118 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTE7__PTE7 0x011c 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE7__TRACE_D7 0x011c 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTE7__VIU_FID 0x011c 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTE7__FXIO1_D24 0x011c 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE7__LPSPI2_PCS0 0x011c 0x029c 0x3 0x2
+#define IMX7ULP_PAD_PTE7__LPUART5_RX 0x011c 0x0254 0x4 0x2
+#define IMX7ULP_PAD_PTE7__TPM7_CH4 0x011c 0x02ec 0x6 0x2
+#define IMX7ULP_PAD_PTE7__SDHC1_D5 0x011c 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTE7__FB_A18 0x011c 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTE8__PTE8 0x0120 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE8__TRACE_D6 0x0120 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTE8__VIU_D16 0x0120 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTE8__FXIO1_D23 0x0120 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE8__LPSPI3_PCS1 0x0120 0x0314 0x3 0x2
+#define IMX7ULP_PAD_PTE8__LPUART6_CTS_B 0x0120 0x025c 0x4 0x2
+#define IMX7ULP_PAD_PTE8__LPI2C6_SCL 0x0120 0x02fc 0x5 0x2
+#define IMX7ULP_PAD_PTE8__TPM7_CH5 0x0120 0x02f0 0x6 0x2
+#define IMX7ULP_PAD_PTE8__SDHC1_WP 0x0120 0x0200 0x7 0x1
+#define IMX7ULP_PAD_PTE8__SDHC1_D6 0x0120 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTE8__FB_CS3_B_FB_BE7_0_BLS31_24_B 0x0120 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTE9__PTE9 0x0124 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE9__TRACE_D5 0x0124 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTE9__VIU_D17 0x0124 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTE9__FXIO1_D22 0x0124 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE9__LPSPI3_PCS2 0x0124 0x0318 0x3 0x2
+#define IMX7ULP_PAD_PTE9__LPUART6_RTS_B 0x0124 0x0000 0x4 0x0
+#define IMX7ULP_PAD_PTE9__LPI2C6_SDA 0x0124 0x0300 0x5 0x2
+#define IMX7ULP_PAD_PTE9__TPM7_CLKIN 0x0124 0x02f4 0x6 0x2
+#define IMX7ULP_PAD_PTE9__SDHC1_CD 0x0124 0x032c 0x7 0x1
+#define IMX7ULP_PAD_PTE9__SDHC1_D7 0x0124 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTE9__FB_TBST_B_FB_CS2_B_FB_BE15_8_BLS23_16_B 0x0124 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTE10__PTE10 0x0128 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE10__TRACE_D4 0x0128 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTE10__VIU_D18 0x0128 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTE10__FXIO1_D21 0x0128 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE10__LPSPI3_PCS3 0x0128 0x031c 0x3 0x2
+#define IMX7ULP_PAD_PTE10__LPUART6_TX 0x0128 0x0264 0x4 0x2
+#define IMX7ULP_PAD_PTE10__LPI2C6_HREQ 0x0128 0x02f8 0x5 0x2
+#define IMX7ULP_PAD_PTE10__TPM7_CH0 0x0128 0x02dc 0x6 0x2
+#define IMX7ULP_PAD_PTE10__SDHC1_VS 0x0128 0x0000 0x7 0x0
+#define IMX7ULP_PAD_PTE10__SDHC1_DQS 0x0128 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTE10__FB_A19 0x0128 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTE11__PTE11 0x012c 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE11__TRACE_D3 0x012c 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTE11__VIU_D19 0x012c 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTE11__FXIO1_D20 0x012c 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE11__LPUART6_RX 0x012c 0x0260 0x4 0x2
+#define IMX7ULP_PAD_PTE11__TPM7_CH1 0x012c 0x02e0 0x6 0x2
+#define IMX7ULP_PAD_PTE11__SDHC1_RESET_B 0x012c 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTE11__FB_A20 0x012c 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTE12__PTE12 0x0130 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE12__TRACE_D2 0x0130 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTE12__VIU_D20 0x0130 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTE12__FXIO1_D19 0x0130 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE12__LPSPI3_SIN 0x0130 0x0324 0x3 0x2
+#define IMX7ULP_PAD_PTE12__LPUART7_CTS_B 0x0130 0x0268 0x4 0x2
+#define IMX7ULP_PAD_PTE12__LPI2C7_SCL 0x0130 0x0308 0x5 0x2
+#define IMX7ULP_PAD_PTE12__TPM7_CH2 0x0130 0x02e4 0x6 0x2
+#define IMX7ULP_PAD_PTE12__SDHC1_WP 0x0130 0x0200 0x8 0x2
+#define IMX7ULP_PAD_PTE12__FB_A21 0x0130 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTE13__PTE13 0x0134 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE13__TRACE_D1 0x0134 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTE13__VIU_D21 0x0134 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTE13__FXIO1_D18 0x0134 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE13__LPSPI3_SOUT 0x0134 0x0328 0x3 0x2
+#define IMX7ULP_PAD_PTE13__LPUART7_RTS_B 0x0134 0x0000 0x4 0x0
+#define IMX7ULP_PAD_PTE13__LPI2C7_SDA 0x0134 0x030c 0x5 0x2
+#define IMX7ULP_PAD_PTE13__TPM6_CLKIN 0x0134 0x02d8 0x6 0x2
+#define IMX7ULP_PAD_PTE13__SDHC1_CD 0x0134 0x032c 0x8 0x2
+#define IMX7ULP_PAD_PTE13__FB_A22 0x0134 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTE14__PTE14 0x0138 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE14__TRACE_D0 0x0138 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTE14__VIU_D22 0x0138 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTE14__FXIO1_D17 0x0138 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE14__LPSPI3_SCK 0x0138 0x0320 0x3 0x2
+#define IMX7ULP_PAD_PTE14__LPUART7_TX 0x0138 0x0270 0x4 0x2
+#define IMX7ULP_PAD_PTE14__LPI2C7_HREQ 0x0138 0x0304 0x5 0x2
+#define IMX7ULP_PAD_PTE14__TPM6_CH0 0x0138 0x02d0 0x6 0x2
+#define IMX7ULP_PAD_PTE14__SDHC1_VS 0x0138 0x0000 0x8 0x0
+#define IMX7ULP_PAD_PTE14__FB_A23 0x0138 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTE15__PTE15 0x013c 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTE15__TRACE_CLKOUT 0x013c 0x0000 0xa 0x0
+#define IMX7ULP_PAD_PTE15__VIU_D23 0x013c 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTE15__FXIO1_D16 0x013c 0x0000 0x2 0x0
+#define IMX7ULP_PAD_PTE15__LPSPI3_PCS0 0x013c 0x0310 0x3 0x2
+#define IMX7ULP_PAD_PTE15__LPUART7_RX 0x013c 0x026c 0x4 0x2
+#define IMX7ULP_PAD_PTE15__TPM6_CH1 0x013c 0x02d4 0x6 0x2
+#define IMX7ULP_PAD_PTE15__FB_A24 0x013c 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF0__PTF0 0x0180 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF0__VIU_DE 0x0180 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF0__LPUART4_CTS_B 0x0180 0x0244 0x4 0x3
+#define IMX7ULP_PAD_PTF0__LPI2C4_SCL 0x0180 0x0278 0x5 0x3
+#define IMX7ULP_PAD_PTF0__TPM4_CLKIN 0x0180 0x0298 0x6 0x3
+#define IMX7ULP_PAD_PTF0__FB_RW_B 0x0180 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF1__PTF1 0x0184 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF1__VIU_HSYNC 0x0184 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF1__LPUART4_RTS_B 0x0184 0x0000 0x4 0x0
+#define IMX7ULP_PAD_PTF1__LPI2C4_SDA 0x0184 0x027c 0x5 0x3
+#define IMX7ULP_PAD_PTF1__TPM4_CH0 0x0184 0x0280 0x6 0x3
+#define IMX7ULP_PAD_PTF1__CLKOUT 0x0184 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF2__PTF2 0x0188 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF2__VIU_VSYNC 0x0188 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF2__LPUART4_TX 0x0188 0x024c 0x4 0x3
+#define IMX7ULP_PAD_PTF2__LPI2C4_HREQ 0x0188 0x0274 0x5 0x3
+#define IMX7ULP_PAD_PTF2__TPM4_CH1 0x0188 0x0284 0x6 0x3
+#define IMX7ULP_PAD_PTF2__FB_TSIZ1_FB_CS5_B_FB_BE23_16_BLS15_8_B 0x0188 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF3__PTF3 0x018c 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF3__VIU_PCLK 0x018c 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF3__LPUART4_RX 0x018c 0x0248 0x4 0x3
+#define IMX7ULP_PAD_PTF3__TPM4_CH2 0x018c 0x0288 0x6 0x3
+#define IMX7ULP_PAD_PTF3__FB_AD16 0x018c 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF4__PTF4 0x0190 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF4__VIU_D0 0x0190 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF4__FXIO1_D0 0x0190 0x0204 0x2 0x2
+#define IMX7ULP_PAD_PTF4__LPSPI2_PCS1 0x0190 0x02a0 0x3 0x3
+#define IMX7ULP_PAD_PTF4__LPUART5_CTS_B 0x0190 0x0250 0x4 0x3
+#define IMX7ULP_PAD_PTF4__LPI2C5_SCL 0x0190 0x02bc 0x5 0x3
+#define IMX7ULP_PAD_PTF4__TPM4_CH3 0x0190 0x028c 0x6 0x2
+#define IMX7ULP_PAD_PTF4__FB_AD17 0x0190 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF5__PTF5 0x0194 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF5__VIU_D1 0x0194 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF5__FXIO1_D1 0x0194 0x0208 0x2 0x2
+#define IMX7ULP_PAD_PTF5__LPSPI2_PCS2 0x0194 0x02a4 0x3 0x3
+#define IMX7ULP_PAD_PTF5__LPUART5_RTS_B 0x0194 0x0000 0x4 0x0
+#define IMX7ULP_PAD_PTF5__LPI2C5_SDA 0x0194 0x02c0 0x5 0x3
+#define IMX7ULP_PAD_PTF5__TPM4_CH4 0x0194 0x0290 0x6 0x2
+#define IMX7ULP_PAD_PTF5__FB_AD18 0x0194 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF6__PTF6 0x0198 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF6__VIU_D2 0x0198 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF6__FXIO1_D2 0x0198 0x020c 0x2 0x2
+#define IMX7ULP_PAD_PTF6__LPSPI2_PCS3 0x0198 0x02a8 0x3 0x3
+#define IMX7ULP_PAD_PTF6__LPUART5_TX 0x0198 0x0258 0x4 0x3
+#define IMX7ULP_PAD_PTF6__LPI2C5_HREQ 0x0198 0x02b8 0x5 0x3
+#define IMX7ULP_PAD_PTF6__TPM4_CH5 0x0198 0x0294 0x6 0x2
+#define IMX7ULP_PAD_PTF6__FB_AD19 0x0198 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF7__PTF7 0x019c 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF7__VIU_D3 0x019c 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF7__FXIO1_D3 0x019c 0x0210 0x2 0x2
+#define IMX7ULP_PAD_PTF7__LPUART5_RX 0x019c 0x0254 0x4 0x3
+#define IMX7ULP_PAD_PTF7__TPM5_CH1 0x019c 0x02c8 0x6 0x3
+#define IMX7ULP_PAD_PTF7__FB_AD20 0x019c 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF8__PTF8 0x01a0 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF8__USB1_ULPI_CLK 0x01a0 0x0000 0xb 0x0
+#define IMX7ULP_PAD_PTF8__VIU_D4 0x01a0 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF8__FXIO1_D4 0x01a0 0x0214 0x2 0x2
+#define IMX7ULP_PAD_PTF8__LPSPI2_SIN 0x01a0 0x02b0 0x3 0x3
+#define IMX7ULP_PAD_PTF8__LPUART6_CTS_B 0x01a0 0x025c 0x4 0x3
+#define IMX7ULP_PAD_PTF8__LPI2C6_SCL 0x01a0 0x02fc 0x5 0x3
+#define IMX7ULP_PAD_PTF8__TPM5_CLKIN 0x01a0 0x02cc 0x6 0x3
+#define IMX7ULP_PAD_PTF8__FB_AD21 0x01a0 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF9__PTF9 0x01a4 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF9__USB1_ULPI_NXT 0x01a4 0x0000 0xb 0x0
+#define IMX7ULP_PAD_PTF9__VIU_D5 0x01a4 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF9__FXIO1_D5 0x01a4 0x0218 0x2 0x2
+#define IMX7ULP_PAD_PTF9__LPSPI2_SOUT 0x01a4 0x02b4 0x3 0x3
+#define IMX7ULP_PAD_PTF9__LPUART6_RTS_B 0x01a4 0x0000 0x4 0x0
+#define IMX7ULP_PAD_PTF9__LPI2C6_SDA 0x01a4 0x0300 0x5 0x3
+#define IMX7ULP_PAD_PTF9__TPM5_CH0 0x01a4 0x02c4 0x6 0x3
+#define IMX7ULP_PAD_PTF9__FB_AD22 0x01a4 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF10__PTF10 0x01a8 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF10__USB1_ULPI_STP 0x01a8 0x0000 0xb 0x0
+#define IMX7ULP_PAD_PTF10__VIU_D6 0x01a8 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF10__FXIO1_D6 0x01a8 0x021c 0x2 0x2
+#define IMX7ULP_PAD_PTF10__LPSPI2_SCK 0x01a8 0x02ac 0x3 0x3
+#define IMX7ULP_PAD_PTF10__LPUART6_TX 0x01a8 0x0264 0x4 0x3
+#define IMX7ULP_PAD_PTF10__LPI2C6_HREQ 0x01a8 0x02f8 0x5 0x3
+#define IMX7ULP_PAD_PTF10__TPM7_CH3 0x01a8 0x02e8 0x6 0x3
+#define IMX7ULP_PAD_PTF10__FB_AD23 0x01a8 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF11__PTF11 0x01ac 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF11__USB1_ULPI_DIR 0x01ac 0x0000 0xb 0x0
+#define IMX7ULP_PAD_PTF11__VIU_D7 0x01ac 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF11__FXIO1_D7 0x01ac 0x0220 0x2 0x2
+#define IMX7ULP_PAD_PTF11__LPSPI2_PCS0 0x01ac 0x029c 0x3 0x3
+#define IMX7ULP_PAD_PTF11__LPUART6_RX 0x01ac 0x0260 0x4 0x3
+#define IMX7ULP_PAD_PTF11__TPM7_CH4 0x01ac 0x02ec 0x6 0x3
+#define IMX7ULP_PAD_PTF11__FB_CS4_B_FB_TSIZ0_FB_BE31_24_BLS7_0_B 0x01ac 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF12__PTF12 0x01b0 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF12__USB1_ULPI_DATA0 0x01b0 0x0000 0xb 0x0
+#define IMX7ULP_PAD_PTF12__VIU_D8 0x01b0 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF12__FXIO1_D8 0x01b0 0x0224 0x2 0x2
+#define IMX7ULP_PAD_PTF12__LPSPI3_PCS1 0x01b0 0x0314 0x3 0x3
+#define IMX7ULP_PAD_PTF12__LPUART7_CTS_B 0x01b0 0x0268 0x4 0x3
+#define IMX7ULP_PAD_PTF12__LPI2C7_SCL 0x01b0 0x0308 0x5 0x3
+#define IMX7ULP_PAD_PTF12__TPM7_CH5 0x01b0 0x02f0 0x6 0x3
+#define IMX7ULP_PAD_PTF12__FB_AD24 0x01b0 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF13__PTF13 0x01b4 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF13__USB1_ULPI_DATA1 0x01b4 0x0000 0xb 0x0
+#define IMX7ULP_PAD_PTF13__VIU_D9 0x01b4 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF13__FXIO1_D9 0x01b4 0x0228 0x2 0x2
+#define IMX7ULP_PAD_PTF13__LPSPI3_PCS2 0x01b4 0x0318 0x3 0x3
+#define IMX7ULP_PAD_PTF13__LPUART7_RTS_B 0x01b4 0x0000 0x4 0x0
+#define IMX7ULP_PAD_PTF13__LPI2C7_SDA 0x01b4 0x030c 0x5 0x3
+#define IMX7ULP_PAD_PTF13__TPM7_CLKIN 0x01b4 0x02f4 0x6 0x3
+#define IMX7ULP_PAD_PTF13__FB_AD25 0x01b4 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF14__PTF14 0x01b8 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF14__USB1_ULPI_DATA2 0x01b8 0x0000 0xb 0x0
+#define IMX7ULP_PAD_PTF14__VIU_D10 0x01b8 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF14__FXIO1_D10 0x01b8 0x022c 0x2 0x2
+#define IMX7ULP_PAD_PTF14__LPSPI3_PCS3 0x01b8 0x031c 0x3 0x3
+#define IMX7ULP_PAD_PTF14__LPUART7_TX 0x01b8 0x0270 0x4 0x3
+#define IMX7ULP_PAD_PTF14__LPI2C7_HREQ 0x01b8 0x0304 0x5 0x3
+#define IMX7ULP_PAD_PTF14__TPM7_CH0 0x01b8 0x02dc 0x6 0x3
+#define IMX7ULP_PAD_PTF14__FB_AD26 0x01b8 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF15__PTF15 0x01bc 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF15__USB1_ULPI_DATA3 0x01bc 0x0000 0xb 0x0
+#define IMX7ULP_PAD_PTF15__VIU_D11 0x01bc 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF15__FXIO1_D11 0x01bc 0x0230 0x2 0x2
+#define IMX7ULP_PAD_PTF15__LPUART7_RX 0x01bc 0x026c 0x4 0x3
+#define IMX7ULP_PAD_PTF15__TPM7_CH1 0x01bc 0x02e0 0x6 0x3
+#define IMX7ULP_PAD_PTF15__FB_AD27 0x01bc 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF16__PTF16 0x01c0 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF16__USB1_ULPI_DATA4 0x01c0 0x0000 0xb 0x0
+#define IMX7ULP_PAD_PTF16__VIU_D12 0x01c0 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF16__FXIO1_D12 0x01c0 0x0234 0x2 0x2
+#define IMX7ULP_PAD_PTF16__LPSPI3_SIN 0x01c0 0x0324 0x3 0x3
+#define IMX7ULP_PAD_PTF16__TPM7_CH2 0x01c0 0x02e4 0x6 0x3
+#define IMX7ULP_PAD_PTF16__FB_AD28 0x01c0 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF17__PTF17 0x01c4 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF17__USB1_ULPI_DATA5 0x01c4 0x0000 0xb 0x0
+#define IMX7ULP_PAD_PTF17__VIU_D13 0x01c4 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF17__FXIO1_D13 0x01c4 0x0238 0x2 0x2
+#define IMX7ULP_PAD_PTF17__LPSPI3_SOUT 0x01c4 0x0328 0x3 0x3
+#define IMX7ULP_PAD_PTF17__TPM6_CLKIN 0x01c4 0x02d8 0x6 0x3
+#define IMX7ULP_PAD_PTF17__FB_AD29 0x01c4 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF18__PTF18 0x01c8 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF18__USB1_ULPI_DATA6 0x01c8 0x0000 0xb 0x0
+#define IMX7ULP_PAD_PTF18__VIU_D14 0x01c8 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF18__FXIO1_D14 0x01c8 0x023c 0x2 0x2
+#define IMX7ULP_PAD_PTF18__LPSPI3_SCK 0x01c8 0x0320 0x3 0x3
+#define IMX7ULP_PAD_PTF18__TPM6_CH0 0x01c8 0x02d0 0x6 0x3
+#define IMX7ULP_PAD_PTF18__FB_AD30 0x01c8 0x0000 0x9 0x0
+#define IMX7ULP_PAD_PTF19__PTF19 0x01cc 0x0000 0x1 0x0
+#define IMX7ULP_PAD_PTF19__USB1_ULPI_DATA7 0x01cc 0x0000 0xb 0x0
+#define IMX7ULP_PAD_PTF19__VIU_D15 0x01cc 0x0000 0xc 0x0
+#define IMX7ULP_PAD_PTF19__FXIO1_D15 0x01cc 0x0240 0x2 0x2
+#define IMX7ULP_PAD_PTF19__LPSPI3_PCS0 0x01cc 0x0310 0x3 0x3
+#define IMX7ULP_PAD_PTF19__TPM6_CH1 0x01cc 0x02d4 0x6 0x3
+#define IMX7ULP_PAD_PTF19__FB_AD31 0x01cc 0x0000 0x9 0x0
+
+#endif /* __DTS_IMX7ULP_PINFUNC_H */
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 7bb9df2c1460..9319e1f0f1d8 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -129,14 +129,14 @@
};
msi1: msi-controller@1570e00 {
- compatible = "fsl,1s1021a-msi";
+ compatible = "fsl,ls1021a-msi";
reg = <0x0 0x1570e00 0x0 0x8>;
msi-controller;
interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
};
msi2: msi-controller@1570e08 {
- compatible = "fsl,1s1021a-msi";
+ compatible = "fsl,ls1021a-msi";
reg = <0x0 0x1570e08 0x0 0x8>;
msi-controller;
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
@@ -699,7 +699,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&msi1>;
+ msi-parent = <&msi1>, <&msi2>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
@@ -722,7 +722,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&msi2>;
+ msi-parent = <&msi1>, <&msi2>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm/boot/dts/omap2420-n8x0-common.dtsi b/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
index 7e5ffc583c90..91886231e5a8 100644
--- a/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
+++ b/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
@@ -15,8 +15,8 @@
>;
#address-cells = <1>;
#size-cells = <0>;
- retu_mfd: retu@1 {
- compatible = "retu-mfd";
+ retu: retu@1 {
+ compatible = "nokia,retu";
interrupt-parent = <&gpio4>;
interrupts = <12 IRQ_TYPE_EDGE_RISING>;
reg = <0x1>;
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index df3366fa5409..cb47ae79a5f9 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -265,6 +265,20 @@
&i2c2 {
clock-frequency = <400000>;
+
+ as3645a@30 {
+ reg = <0x30>;
+ compatible = "ams,as3645a";
+ flash {
+ flash-timeout-us = <150000>;
+ flash-max-microamp = <320000>;
+ led-max-microamp = <60000>;
+ peak-current-limit = <1750000>;
+ };
+ indicator {
+ led-max-microamp = <10000>;
+ };
+ };
};
&i2c3 {
diff --git a/arch/arm/boot/dts/rk3228-evb.dts b/arch/arm/boot/dts/rk3228-evb.dts
index 58834330a5ba..1be9daacc4f9 100644
--- a/arch/arm/boot/dts/rk3228-evb.dts
+++ b/arch/arm/boot/dts/rk3228-evb.dts
@@ -50,6 +50,16 @@
device_type = "memory";
reg = <0x60000000 0x40000000>;
};
+
+ vcc_phy: vcc-phy-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ regulator-name = "vcc_phy";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
};
&emmc {
@@ -60,6 +70,30 @@
status = "okay";
};
+&gmac {
+ assigned-clocks = <&cru SCLK_MAC_SRC>;
+ assigned-clock-rates = <50000000>;
+ clock_in_out = "output";
+ phy-supply = <&vcc_phy>;
+ phy-mode = "rmii";
+ phy-handle = <&phy>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy: phy@0 {
+ compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ clocks = <&cru SCLK_MAC_PHY>;
+ resets = <&cru SRST_MACPHY>;
+ phy-is-integrated;
+ };
+ };
+};
+
&tsadc {
status = "okay";
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 2484f11761ea..858e1fed762a 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -1126,8 +1126,8 @@
};
};
- gpu: mali@ffa30000 {
- compatible = "rockchip,rk3288-mali", "arm,mali-t760", "arm,mali-midgard";
+ gpu: gpu@ffa30000 {
+ compatible = "rockchip,rk3288-mali", "arm,mali-t760";
reg = <0xffa30000 0x10000>;
interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index cc06da394366..60e69aeacbdb 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -303,7 +303,7 @@
#size-cells = <1>;
atmel,smc = <&hsmc>;
reg = <0x10000000 0x10000000
- 0x40000000 0x30000000>;
+ 0x60000000 0x30000000>;
ranges = <0x0 0x0 0x10000000 0x10000000
0x1 0x0 0x60000000 0x10000000
0x2 0x0 0x70000000 0x10000000
@@ -1048,18 +1048,18 @@
};
hsmc: hsmc@f8014000 {
- compatible = "atmel,sama5d3-smc", "syscon", "simple-mfd";
+ compatible = "atmel,sama5d2-smc", "syscon", "simple-mfd";
reg = <0xf8014000 0x1000>;
- interrupts = <5 IRQ_TYPE_LEVEL_HIGH 6>;
+ interrupts = <17 IRQ_TYPE_LEVEL_HIGH 6>;
clocks = <&hsmc_clk>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
- pmecc: ecc-engine@ffffc070 {
+ pmecc: ecc-engine@f8014070 {
compatible = "atmel,sama5d2-pmecc";
- reg = <0xffffc070 0x490>,
- <0xffffc500 0x100>;
+ reg = <0xf8014070 0x490>,
+ <0xf8014500 0x100>;
};
};
diff --git a/arch/arm/boot/dts/ste-hrefprev60.dtsi b/arch/arm/boot/dts/ste-hrefprev60.dtsi
index 5882a2606ac3..3f14b4df69b4 100644
--- a/arch/arm/boot/dts/ste-hrefprev60.dtsi
+++ b/arch/arm/boot/dts/ste-hrefprev60.dtsi
@@ -30,7 +30,7 @@
i2c@80004000 {
tps61052@33 {
- compatible = "tps61052";
+ compatible = "ti,tps61052";
reg = <0x33>;
};
diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi
index 8923ba625b76..19a8f4fcfab5 100644
--- a/arch/arm/boot/dts/sun8i-a83t.dtsi
+++ b/arch/arm/boot/dts/sun8i-a83t.dtsi
@@ -44,7 +44,9 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/sun8i-a83t-ccu.h>
#include <dt-bindings/clock/sun8i-r-ccu.h>
+#include <dt-bindings/reset/sun8i-a83t-ccu.h>
/ {
interrupt-parent = <&gic>;
@@ -175,8 +177,8 @@
compatible = "allwinner,sun8i-a83t-dma";
reg = <0x01c02000 0x1000>;
interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu 21>;
- resets = <&ccu 7>;
+ clocks = <&ccu CLK_BUS_DMA>;
+ resets = <&ccu RST_BUS_DMA>;
#dma-cells = <1>;
};
@@ -195,7 +197,7 @@
<GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x01c20800 0x400>;
- clocks = <&ccu 45>, <&osc24M>, <&osc16Md512>;
+ clocks = <&ccu CLK_BUS_PIO>, <&osc24M>, <&osc16Md512>;
clock-names = "apb", "hosc", "losc";
gpio-controller;
interrupt-controller;
@@ -247,8 +249,8 @@
"allwinner,sun8i-h3-spdif";
reg = <0x01c21000 0x400>;
interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu 44>, <&ccu 76>;
- resets = <&ccu 32>;
+ clocks = <&ccu CLK_BUS_SPDIF>, <&ccu CLK_SPDIF>;
+ resets = <&ccu RST_BUS_SPDIF>;
clock-names = "apb", "spdif";
dmas = <&dma 2>;
dma-names = "tx";
@@ -263,8 +265,8 @@
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
- clocks = <&ccu 53>;
- resets = <&ccu 40>;
+ clocks = <&ccu CLK_BUS_UART0>;
+ resets = <&ccu RST_BUS_UART0>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
index 6713d0f2b3f4..b1502df7b509 100644
--- a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
+++ b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
@@ -56,8 +56,6 @@
aliases {
serial0 = &uart0;
- /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
- ethernet0 = &emac;
ethernet1 = &xr819;
};
@@ -104,13 +102,6 @@
status = "okay";
};
-&emac {
- phy-handle = <&int_mii_phy>;
- phy-mode = "mii";
- allwinner,leds-active-low;
- status = "okay";
-};
-
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins_a>;
diff --git a/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts b/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
index d756ff825116..a337af1de322 100644
--- a/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-bananapi-m2-plus.dts
@@ -52,7 +52,6 @@
compatible = "sinovoip,bpi-m2-plus", "allwinner,sun8i-h3";
aliases {
- ethernet0 = &emac;
serial0 = &uart0;
serial1 = &uart1;
};
@@ -115,30 +114,12 @@
status = "okay";
};
-&emac {
- pinctrl-names = "default";
- pinctrl-0 = <&emac_rgmii_pins>;
- phy-supply = <&reg_gmac_3v3>;
- phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
-
- allwinner,leds-active-low;
- status = "okay";
-};
-
&ir {
pinctrl-names = "default";
pinctrl-0 = <&ir_pins_a>;
status = "okay";
};
-&mdio {
- ext_rgmii_phy: ethernet-phy@1 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <0>;
- };
-};
-
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
index 78f6c24952dd..8d2cc6e9a03f 100644
--- a/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
@@ -46,10 +46,3 @@
model = "FriendlyARM NanoPi NEO";
compatible = "friendlyarm,nanopi-neo", "allwinner,sun8i-h3";
};
-
-&emac {
- phy-handle = <&int_mii_phy>;
- phy-mode = "mii";
- allwinner,leds-active-low;
- status = "okay";
-};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
index 17cdeae19c6f..8ff71b1bb45b 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
@@ -54,7 +54,6 @@
aliases {
serial0 = &uart0;
/* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
- ethernet0 = &emac;
ethernet1 = &rtl8189;
};
@@ -118,13 +117,6 @@
status = "okay";
};
-&emac {
- phy-handle = <&int_mii_phy>;
- phy-mode = "mii";
- allwinner,leds-active-low;
- status = "okay";
-};
-
&ir {
pinctrl-names = "default";
pinctrl-0 = <&ir_pins_a>;
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
index 6880268e8b87..5fea430e0eb1 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
@@ -52,7 +52,6 @@
compatible = "xunlong,orangepi-one", "allwinner,sun8i-h3";
aliases {
- ethernet0 = &emac;
serial0 = &uart0;
};
@@ -98,13 +97,6 @@
status = "okay";
};
-&emac {
- phy-handle = <&int_mii_phy>;
- phy-mode = "mii";
- allwinner,leds-active-low;
- status = "okay";
-};
-
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
index a10281b455f5..8b93f5c781a7 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-pc-plus.dts
@@ -53,11 +53,6 @@
};
};
-&emac {
- /* LEDs changed to active high on the plus */
- /delete-property/ allwinner,leds-active-low;
-};
-
&mmc1 {
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins_a>;
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
index 998b60f8d295..1a044b17d6c6 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
@@ -52,7 +52,6 @@
compatible = "xunlong,orangepi-pc", "allwinner,sun8i-h3";
aliases {
- ethernet0 = &emac;
serial0 = &uart0;
};
@@ -114,13 +113,6 @@
status = "okay";
};
-&emac {
- phy-handle = <&int_mii_phy>;
- phy-mode = "mii";
- allwinner,leds-active-low;
- status = "okay";
-};
-
&ir {
pinctrl-names = "default";
pinctrl-0 = <&ir_pins_a>;
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
index 331ed683ac62..828ae7a526d9 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
@@ -47,10 +47,6 @@
model = "Xunlong Orange Pi Plus / Plus 2";
compatible = "xunlong,orangepi-plus", "allwinner,sun8i-h3";
- aliases {
- ethernet0 = &emac;
- };
-
reg_gmac_3v3: gmac-3v3 {
compatible = "regulator-fixed";
regulator-name = "gmac-3v3";
@@ -78,24 +74,6 @@
status = "okay";
};
-&emac {
- pinctrl-names = "default";
- pinctrl-0 = <&emac_rgmii_pins>;
- phy-supply = <&reg_gmac_3v3>;
- phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
-
- allwinner,leds-active-low;
- status = "okay";
-};
-
-&mdio {
- ext_rgmii_phy: ethernet-phy@1 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <0>;
- };
-};
-
&mmc2 {
pinctrl-names = "default";
pinctrl-0 = <&mmc2_8bit_pins>;
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
index 80026f3caafc..97920b12a944 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-plus2e.dts
@@ -61,19 +61,3 @@
gpio = <&pio 3 6 GPIO_ACTIVE_HIGH>; /* PD6 */
};
};
-
-&emac {
- pinctrl-names = "default";
- pinctrl-0 = <&emac_rgmii_pins>;
- phy-supply = <&reg_gmac_3v3>;
- phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
- status = "okay";
-};
-
-&mdio {
- ext_rgmii_phy: ethernet-phy@1 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <1>;
- };
-};
diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
index 6f2162608006..11240a8313c2 100644
--- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi
+++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
@@ -391,32 +391,6 @@
clocks = <&osc24M>;
};
- emac: ethernet@1c30000 {
- compatible = "allwinner,sun8i-h3-emac";
- syscon = <&syscon>;
- reg = <0x01c30000 0x104>;
- interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "macirq";
- resets = <&ccu RST_BUS_EMAC>;
- reset-names = "stmmaceth";
- clocks = <&ccu CLK_BUS_EMAC>;
- clock-names = "stmmaceth";
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- mdio: mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- int_mii_phy: ethernet-phy@1 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <1>;
- clocks = <&ccu CLK_BUS_EPHY>;
- resets = <&ccu RST_BUS_EPHY>;
- };
- };
- };
-
spi0: spi@01c68000 {
compatible = "allwinner,sun8i-h3-spi";
reg = <0x01c68000 0x1000>;
diff --git a/arch/arm/boot/dts/tango4-smp8758.dtsi b/arch/arm/boot/dts/tango4-smp8758.dtsi
index d2e65c46bcc7..eca33d568690 100644
--- a/arch/arm/boot/dts/tango4-smp8758.dtsi
+++ b/arch/arm/boot/dts/tango4-smp8758.dtsi
@@ -13,7 +13,6 @@
reg = <0>;
clocks = <&clkgen CPU_CLK>;
clock-latency = <1>;
- operating-points = <1215000 0 607500 0 405000 0 243000 0 135000 0>;
};
cpu1: cpu@1 {
diff --git a/arch/arm/boot/dts/tango4-vantage-1172.dts b/arch/arm/boot/dts/tango4-vantage-1172.dts
index 86d8df98802f..13bcc460bcb2 100644
--- a/arch/arm/boot/dts/tango4-vantage-1172.dts
+++ b/arch/arm/boot/dts/tango4-vantage-1172.dts
@@ -22,7 +22,7 @@
};
&eth0 {
- phy-connection-type = "rgmii";
+ phy-connection-type = "rgmii-id";
phy-handle = <&eth0_phy>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index e74de69caeab..1736813bdea7 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -226,7 +226,7 @@ CONFIG_REGULATOR_MC13892=y
CONFIG_REGULATOR_PFUZE100=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
-CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=y
CONFIG_RC_DEVICES=y
CONFIG_IR_GPIO_CIR=y
CONFIG_MEDIA_USB_SUPPORT=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 4d19c1b4b8e7..94d7e71c69c4 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -270,6 +270,7 @@ CONFIG_ICPLUS_PHY=y
CONFIG_REALTEK_PHY=y
CONFIG_MICREL_PHY=y
CONFIG_FIXED_PHY=y
+CONFIG_ROCKCHIP_PHY=y
CONFIG_USB_PEGASUS=y
CONFIG_USB_RTL8152=m
CONFIG_USB_USBNET=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index a120ae816260..0414acf731ce 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -304,7 +304,7 @@ CONFIG_REGULATOR_TPS65910=y
CONFIG_REGULATOR_TWL4030=y
CONFIG_MEDIA_SUPPORT=m
CONFIG_MEDIA_CAMERA_SUPPORT=y
-CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=m
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_LIRC=m
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 0ec1d1ec130f..22cd559531a9 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -95,7 +95,7 @@ CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_AXP20X=y
CONFIG_REGULATOR_GPIO=y
CONFIG_MEDIA_SUPPORT=y
-CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=y
CONFIG_RC_DEVICES=y
CONFIG_IR_SUNXI=y
CONFIG_DRM=y
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index b9adedcc5b2e..ec72752d5668 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -94,14 +94,15 @@ config CRYPTO_AES_ARM_CE
ARMv8 Crypto Extensions
config CRYPTO_GHASH_ARM_CE
- tristate "PMULL-accelerated GHASH using ARMv8 Crypto Extensions"
+ tristate "PMULL-accelerated GHASH using NEON/ARMv8 Crypto Extensions"
depends on KERNEL_MODE_NEON
select CRYPTO_HASH
select CRYPTO_CRYPTD
help
Use an implementation of GHASH (used by the GCM AEAD chaining mode)
that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64)
- that is part of the ARMv8 Crypto Extensions
+ that is part of the ARMv8 Crypto Extensions, or a slower variant that
+ uses the vmull.p8 instruction that is part of the basic NEON ISA.
config CRYPTO_CRCT10DIF_ARM_CE
tristate "CRCT10DIF digest algorithm using PMULL instructions"
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index 0f966a8ca1ce..d0a9cec73707 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -285,9 +285,7 @@ static int ctr_encrypt(struct skcipher_request *req)
ce_aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc,
num_rounds(ctx), blocks, walk.iv);
- if (tdst != tsrc)
- memcpy(tdst, tsrc, nbytes);
- crypto_xor(tdst, tail, nbytes);
+ crypto_xor_cpy(tdst, tsrc, tail, nbytes);
err = skcipher_walk_done(&walk, 0);
}
kernel_neon_end();
diff --git a/arch/arm/crypto/aes-cipher-core.S b/arch/arm/crypto/aes-cipher-core.S
index c817a86c4ca8..54b384084637 100644
--- a/arch/arm/crypto/aes-cipher-core.S
+++ b/arch/arm/crypto/aes-cipher-core.S
@@ -10,6 +10,7 @@
*/
#include <linux/linkage.h>
+#include <asm/cache.h>
.text
.align 5
@@ -32,19 +33,19 @@
.endif
.endm
- .macro __load, out, in, idx
+ .macro __load, out, in, idx, sz, op
.if __LINUX_ARM_ARCH__ < 7 && \idx > 0
- ldr \out, [ttab, \in, lsr #(8 * \idx) - 2]
+ ldr\op \out, [ttab, \in, lsr #(8 * \idx) - \sz]
.else
- ldr \out, [ttab, \in, lsl #2]
+ ldr\op \out, [ttab, \in, lsl #\sz]
.endif
.endm
- .macro __hround, out0, out1, in0, in1, in2, in3, t3, t4, enc
+ .macro __hround, out0, out1, in0, in1, in2, in3, t3, t4, enc, sz, op
__select \out0, \in0, 0
__select t0, \in1, 1
- __load \out0, \out0, 0
- __load t0, t0, 1
+ __load \out0, \out0, 0, \sz, \op
+ __load t0, t0, 1, \sz, \op
.if \enc
__select \out1, \in1, 0
@@ -53,10 +54,10 @@
__select \out1, \in3, 0
__select t1, \in0, 1
.endif
- __load \out1, \out1, 0
+ __load \out1, \out1, 0, \sz, \op
__select t2, \in2, 2
- __load t1, t1, 1
- __load t2, t2, 2
+ __load t1, t1, 1, \sz, \op
+ __load t2, t2, 2, \sz, \op
eor \out0, \out0, t0, ror #24
@@ -68,9 +69,9 @@
__select \t3, \in1, 2
__select \t4, \in2, 3
.endif
- __load \t3, \t3, 2
- __load t0, t0, 3
- __load \t4, \t4, 3
+ __load \t3, \t3, 2, \sz, \op
+ __load t0, t0, 3, \sz, \op
+ __load \t4, \t4, 3, \sz, \op
eor \out1, \out1, t1, ror #24
eor \out0, \out0, t2, ror #16
@@ -82,14 +83,14 @@
eor \out1, \out1, t2
.endm
- .macro fround, out0, out1, out2, out3, in0, in1, in2, in3
- __hround \out0, \out1, \in0, \in1, \in2, \in3, \out2, \out3, 1
- __hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1
+ .macro fround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op
+ __hround \out0, \out1, \in0, \in1, \in2, \in3, \out2, \out3, 1, \sz, \op
+ __hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1, \sz, \op
.endm
- .macro iround, out0, out1, out2, out3, in0, in1, in2, in3
- __hround \out0, \out1, \in0, \in3, \in2, \in1, \out2, \out3, 0
- __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0
+ .macro iround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op
+ __hround \out0, \out1, \in0, \in3, \in2, \in1, \out2, \out3, 0, \sz, \op
+ __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op
.endm
.macro __rev, out, in
@@ -114,7 +115,7 @@
.endif
.endm
- .macro do_crypt, round, ttab, ltab
+ .macro do_crypt, round, ttab, ltab, bsz
push {r3-r11, lr}
ldr r4, [in]
@@ -146,9 +147,12 @@
1: subs rounds, rounds, #4
\round r8, r9, r10, r11, r4, r5, r6, r7
- __adrl ttab, \ltab, ls
+ bls 2f
\round r4, r5, r6, r7, r8, r9, r10, r11
- bhi 0b
+ b 0b
+
+2: __adrl ttab, \ltab
+ \round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b
#ifdef CONFIG_CPU_BIG_ENDIAN
__rev r4, r4
@@ -170,10 +174,48 @@
.ltorg
.endm
+ .align L1_CACHE_SHIFT
+ .type __aes_arm_inverse_sbox, %object
+__aes_arm_inverse_sbox:
+ .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+ .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+ .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+ .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+ .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+ .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+ .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+ .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+ .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+ .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+ .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+ .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+ .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+ .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+ .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+ .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+ .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+ .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+ .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+ .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+ .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+ .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+ .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+ .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+ .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+ .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+ .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+ .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+ .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+ .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+ .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+ .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+ .size __aes_arm_inverse_sbox, . - __aes_arm_inverse_sbox
+
ENTRY(__aes_arm_encrypt)
- do_crypt fround, crypto_ft_tab, crypto_fl_tab
+ do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
ENDPROC(__aes_arm_encrypt)
+ .align 5
ENTRY(__aes_arm_decrypt)
- do_crypt iround, crypto_it_tab, crypto_il_tab
+ do_crypt iround, crypto_it_tab, __aes_arm_inverse_sbox, 0
ENDPROC(__aes_arm_decrypt)
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index c76377961444..18768f330449 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -221,9 +221,8 @@ static int ctr_encrypt(struct skcipher_request *req)
u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
u8 *src = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
- if (dst != src)
- memcpy(dst, src, walk.total % AES_BLOCK_SIZE);
- crypto_xor(dst, final, walk.total % AES_BLOCK_SIZE);
+ crypto_xor_cpy(dst, src, final,
+ walk.total % AES_BLOCK_SIZE);
err = skcipher_walk_done(&walk, 0);
break;
diff --git a/arch/arm/crypto/ghash-ce-core.S b/arch/arm/crypto/ghash-ce-core.S
index f6ab8bcc9efe..2f78c10b1881 100644
--- a/arch/arm/crypto/ghash-ce-core.S
+++ b/arch/arm/crypto/ghash-ce-core.S
@@ -1,7 +1,7 @@
/*
- * Accelerated GHASH implementation with ARMv8 vmull.p64 instructions.
+ * Accelerated GHASH implementation with NEON/ARMv8 vmull.p8/64 instructions.
*
- * Copyright (C) 2015 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2015 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -12,40 +12,162 @@
#include <asm/assembler.h>
SHASH .req q0
- SHASH2 .req q1
- T1 .req q2
- T2 .req q3
- MASK .req q4
- XL .req q5
- XM .req q6
- XH .req q7
- IN1 .req q7
+ T1 .req q1
+ XL .req q2
+ XM .req q3
+ XH .req q4
+ IN1 .req q4
SHASH_L .req d0
SHASH_H .req d1
- SHASH2_L .req d2
- T1_L .req d4
- MASK_L .req d8
- XL_L .req d10
- XL_H .req d11
- XM_L .req d12
- XM_H .req d13
- XH_L .req d14
+ T1_L .req d2
+ T1_H .req d3
+ XL_L .req d4
+ XL_H .req d5
+ XM_L .req d6
+ XM_H .req d7
+ XH_L .req d8
+
+ t0l .req d10
+ t0h .req d11
+ t1l .req d12
+ t1h .req d13
+ t2l .req d14
+ t2h .req d15
+ t3l .req d16
+ t3h .req d17
+ t4l .req d18
+ t4h .req d19
+
+ t0q .req q5
+ t1q .req q6
+ t2q .req q7
+ t3q .req q8
+ t4q .req q9
+ T2 .req q9
+
+ s1l .req d20
+ s1h .req d21
+ s2l .req d22
+ s2h .req d23
+ s3l .req d24
+ s3h .req d25
+ s4l .req d26
+ s4h .req d27
+
+ MASK .req d28
+ SHASH2_p8 .req d28
+
+ k16 .req d29
+ k32 .req d30
+ k48 .req d31
+ SHASH2_p64 .req d31
.text
.fpu crypto-neon-fp-armv8
+ .macro __pmull_p64, rd, rn, rm, b1, b2, b3, b4
+ vmull.p64 \rd, \rn, \rm
+ .endm
+
/*
- * void pmull_ghash_update(int blocks, u64 dg[], const char *src,
- * struct ghash_key const *k, const char *head)
+ * This implementation of 64x64 -> 128 bit polynomial multiplication
+ * using vmull.p8 instructions (8x8 -> 16) is taken from the paper
+ * "Fast Software Polynomial Multiplication on ARM Processors Using
+ * the NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and
+ * Ricardo Dahab (https://hal.inria.fr/hal-01506572)
+ *
+ * It has been slightly tweaked for in-order performance, and to allow
+ * 'rq' to overlap with 'ad' or 'bd'.
*/
-ENTRY(pmull_ghash_update)
- vld1.64 {SHASH}, [r3]
+ .macro __pmull_p8, rq, ad, bd, b1=t4l, b2=t3l, b3=t4l, b4=t3l
+ vext.8 t0l, \ad, \ad, #1 @ A1
+ .ifc \b1, t4l
+ vext.8 t4l, \bd, \bd, #1 @ B1
+ .endif
+ vmull.p8 t0q, t0l, \bd @ F = A1*B
+ vext.8 t1l, \ad, \ad, #2 @ A2
+ vmull.p8 t4q, \ad, \b1 @ E = A*B1
+ .ifc \b2, t3l
+ vext.8 t3l, \bd, \bd, #2 @ B2
+ .endif
+ vmull.p8 t1q, t1l, \bd @ H = A2*B
+ vext.8 t2l, \ad, \ad, #3 @ A3
+ vmull.p8 t3q, \ad, \b2 @ G = A*B2
+ veor t0q, t0q, t4q @ L = E + F
+ .ifc \b3, t4l
+ vext.8 t4l, \bd, \bd, #3 @ B3
+ .endif
+ vmull.p8 t2q, t2l, \bd @ J = A3*B
+ veor t0l, t0l, t0h @ t0 = (L) (P0 + P1) << 8
+ veor t1q, t1q, t3q @ M = G + H
+ .ifc \b4, t3l
+ vext.8 t3l, \bd, \bd, #4 @ B4
+ .endif
+ vmull.p8 t4q, \ad, \b3 @ I = A*B3
+ veor t1l, t1l, t1h @ t1 = (M) (P2 + P3) << 16
+ vmull.p8 t3q, \ad, \b4 @ K = A*B4
+ vand t0h, t0h, k48
+ vand t1h, t1h, k32
+ veor t2q, t2q, t4q @ N = I + J
+ veor t0l, t0l, t0h
+ veor t1l, t1l, t1h
+ veor t2l, t2l, t2h @ t2 = (N) (P4 + P5) << 24
+ vand t2h, t2h, k16
+ veor t3l, t3l, t3h @ t3 = (K) (P6 + P7) << 32
+ vmov.i64 t3h, #0
+ vext.8 t0q, t0q, t0q, #15
+ veor t2l, t2l, t2h
+ vext.8 t1q, t1q, t1q, #14
+ vmull.p8 \rq, \ad, \bd @ D = A*B
+ vext.8 t2q, t2q, t2q, #13
+ vext.8 t3q, t3q, t3q, #12
+ veor t0q, t0q, t1q
+ veor t2q, t2q, t3q
+ veor \rq, \rq, t0q
+ veor \rq, \rq, t2q
+ .endm
+
+ //
+ // PMULL (64x64->128) based reduction for CPUs that can do
+ // it in a single instruction.
+ //
+ .macro __pmull_reduce_p64
+ vmull.p64 T1, XL_L, MASK
+
+ veor XH_L, XH_L, XM_H
+ vext.8 T1, T1, T1, #8
+ veor XL_H, XL_H, XM_L
+ veor T1, T1, XL
+
+ vmull.p64 XL, T1_H, MASK
+ .endm
+
+ //
+ // Alternative reduction for CPUs that lack support for the
+ // 64x64->128 PMULL instruction
+ //
+ .macro __pmull_reduce_p8
+ veor XL_H, XL_H, XM_L
+ veor XH_L, XH_L, XM_H
+
+ vshl.i64 T1, XL, #57
+ vshl.i64 T2, XL, #62
+ veor T1, T1, T2
+ vshl.i64 T2, XL, #63
+ veor T1, T1, T2
+ veor XL_H, XL_H, T1_L
+ veor XH_L, XH_L, T1_H
+
+ vshr.u64 T1, XL, #1
+ veor XH, XH, XL
+ veor XL, XL, T1
+ vshr.u64 T1, T1, #6
+ vshr.u64 XL, XL, #1
+ .endm
+
+ .macro ghash_update, pn
vld1.64 {XL}, [r1]
- vmov.i8 MASK, #0xe1
- vext.8 SHASH2, SHASH, SHASH, #8
- vshl.u64 MASK, MASK, #57
- veor SHASH2, SHASH2, SHASH
/* do the head block first, if supplied */
ldr ip, [sp]
@@ -62,33 +184,59 @@ ENTRY(pmull_ghash_update)
#ifndef CONFIG_CPU_BIG_ENDIAN
vrev64.8 T1, T1
#endif
- vext.8 T2, XL, XL, #8
vext.8 IN1, T1, T1, #8
- veor T1, T1, T2
+ veor T1_L, T1_L, XL_H
veor XL, XL, IN1
- vmull.p64 XH, SHASH_H, XL_H @ a1 * b1
+ __pmull_\pn XH, XL_H, SHASH_H, s1h, s2h, s3h, s4h @ a1 * b1
veor T1, T1, XL
- vmull.p64 XL, SHASH_L, XL_L @ a0 * b0
- vmull.p64 XM, SHASH2_L, T1_L @ (a1 + a0)(b1 + b0)
+ __pmull_\pn XL, XL_L, SHASH_L, s1l, s2l, s3l, s4l @ a0 * b0
+ __pmull_\pn XM, T1_L, SHASH2_\pn @ (a1+a0)(b1+b0)
- vext.8 T1, XL, XH, #8
- veor T2, XL, XH
+ veor T1, XL, XH
veor XM, XM, T1
- veor XM, XM, T2
- vmull.p64 T2, XL_L, MASK_L
- vmov XH_L, XM_H
- vmov XM_H, XL_L
+ __pmull_reduce_\pn
- veor XL, XM, T2
- vext.8 T2, XL, XL, #8
- vmull.p64 XL, XL_L, MASK_L
- veor T2, T2, XH
- veor XL, XL, T2
+ veor T1, T1, XH
+ veor XL, XL, T1
bne 0b
vst1.64 {XL}, [r1]
bx lr
-ENDPROC(pmull_ghash_update)
+ .endm
+
+ /*
+ * void pmull_ghash_update(int blocks, u64 dg[], const char *src,
+ * struct ghash_key const *k, const char *head)
+ */
+ENTRY(pmull_ghash_update_p64)
+ vld1.64 {SHASH}, [r3]
+ veor SHASH2_p64, SHASH_L, SHASH_H
+
+ vmov.i8 MASK, #0xe1
+ vshl.u64 MASK, MASK, #57
+
+ ghash_update p64
+ENDPROC(pmull_ghash_update_p64)
+
+ENTRY(pmull_ghash_update_p8)
+ vld1.64 {SHASH}, [r3]
+ veor SHASH2_p8, SHASH_L, SHASH_H
+
+ vext.8 s1l, SHASH_L, SHASH_L, #1
+ vext.8 s2l, SHASH_L, SHASH_L, #2
+ vext.8 s3l, SHASH_L, SHASH_L, #3
+ vext.8 s4l, SHASH_L, SHASH_L, #4
+ vext.8 s1h, SHASH_H, SHASH_H, #1
+ vext.8 s2h, SHASH_H, SHASH_H, #2
+ vext.8 s3h, SHASH_H, SHASH_H, #3
+ vext.8 s4h, SHASH_H, SHASH_H, #4
+
+ vmov.i64 k16, #0xffff
+ vmov.i64 k32, #0xffffffff
+ vmov.i64 k48, #0xffffffffffff
+
+ ghash_update p8
+ENDPROC(pmull_ghash_update_p8)
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index 6bac8bea9f1e..d9bb52cae2ac 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -22,6 +22,7 @@
MODULE_DESCRIPTION("GHASH secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("ghash");
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
@@ -41,8 +42,17 @@ struct ghash_async_ctx {
struct cryptd_ahash *cryptd_tfm;
};
-asmlinkage void pmull_ghash_update(int blocks, u64 dg[], const char *src,
- struct ghash_key const *k, const char *head);
+asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src,
+ struct ghash_key const *k,
+ const char *head);
+
+asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
+ struct ghash_key const *k,
+ const char *head);
+
+static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src,
+ struct ghash_key const *k,
+ const char *head);
static int ghash_init(struct shash_desc *desc)
{
@@ -312,6 +322,14 @@ static int __init ghash_ce_mod_init(void)
{
int err;
+ if (!(elf_hwcap & HWCAP_NEON))
+ return -ENODEV;
+
+ if (elf_hwcap2 & HWCAP2_PMULL)
+ pmull_ghash_update = pmull_ghash_update_p64;
+ else
+ pmull_ghash_update = pmull_ghash_update_p8;
+
err = crypto_register_shash(&ghash_alg);
if (err)
return err;
@@ -332,5 +350,5 @@ static void __exit ghash_ce_mod_exit(void)
crypto_unregister_shash(&ghash_alg);
}
-module_cpu_feature_match(PMULL, ghash_ce_mod_init);
+module_init(ghash_ce_mod_init);
module_exit(ghash_ce_mod_exit);
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 27475904e096..eee269321923 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -276,6 +276,12 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
#define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c)
/*
+ * GICR_xLPIR - only the lower bits are significant
+ */
+#define gic_read_lpir(c) readl_relaxed(c)
+#define gic_write_lpir(v, c) writel_relaxed(lower_32_bits(v), c)
+
+/*
* GITS_TYPER is an ID register and doesn't need atomicity.
*/
#define gits_read_typer(c) __gic_readq_nonatomic(c)
@@ -291,5 +297,33 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
*/
#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c)
+/*
+ * GITS_VPROPBASER - hi and lo bits may be accessed independently.
+ */
+#define gits_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GITS_VPENDBASER - the Valid bit must be cleared before changing
+ * anything else.
+ */
+static inline void gits_write_vpendbaser(u64 val, void * __iomem addr)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(addr + 4);
+ if (tmp & (GICR_VPENDBASER_Valid >> 32)) {
+ tmp &= ~(GICR_VPENDBASER_Valid >> 32);
+ writel_relaxed(tmp, addr + 4);
+ }
+
+ /*
+ * Use the fact that __gic_writeq_nonatomic writes the second
+ * half of the 64bit quantity after the first.
+ */
+ __gic_writeq_nonatomic(val, addr);
+}
+
+#define gits_read_vpendbaser(c) __gic_readq_nonatomic(c)
+
#endif /* !__ASSEMBLY__ */
#endif /* !__ASM_ARCH_GICV3_H */
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 4e6e88a6b2f4..2244a94ed9c9 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -37,7 +37,7 @@ do { \
".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
"2:\t.asciz " #__file "\n" \
".popsection\n" \
- ".pushsection __bug_table,\"a\"\n" \
+ ".pushsection __bug_table,\"aw\"\n" \
".align 2\n" \
"3:\t.word 1b, 2b\n" \
"\t.hword " #__line ", 0\n" \
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d69bebf697e7..74504b154256 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -116,7 +116,7 @@ struct cpu_cache_fns {
void (*dma_unmap_area)(const void *, size_t, int);
void (*dma_flush_range)(const void *, const void *);
-};
+} __no_randomize_layout;
/*
* Select the calling method
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 6795368ad023..cc414382dab4 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -128,20 +128,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#endif /* !SMP */
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tmp;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
#ifndef CONFIG_SMP
preempt_disable();
#endif
@@ -172,17 +162,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
preempt_enable();
#endif
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
index 1869af6bac5c..25021b798a1e 100644
--- a/arch/arm/include/asm/kexec.h
+++ b/arch/arm/include/asm/kexec.h
@@ -19,6 +19,11 @@
#ifndef __ASSEMBLY__
+#define ARCH_HAS_KIMAGE_ARCH
+struct kimage_arch {
+ u32 kernel_r2;
+};
+
/**
* crash_setup_regs() - save registers for the panic kernel
* @newregs: registers are saved here
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 127e2dd2e21c..4a879f6ff13b 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -225,12 +225,6 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
-/* We do not have shadow page tables, hence the empty hooks */
-static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
- unsigned long address)
-{
-}
-
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 4bec45442072..c030143c18c6 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -52,22 +52,6 @@ static inline void dsb_sev(void)
* memory.
*/
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- u16 owner = READ_ONCE(lock->tickets.owner);
-
- for (;;) {
- arch_spinlock_t tmp = READ_ONCE(*lock);
-
- if (tmp.tickets.owner == tmp.tickets.next ||
- tmp.tickets.owner != owner)
- break;
-
- wfe();
- }
- smp_acquire__after_ctrl_dep();
-}
-
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 776757d1604a..1d468b527b7b 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -139,10 +139,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_UPROBE 3 /* breakpointed or singlestepping */
-#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
-#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
-#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
-#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
+#define TIF_FSCHECK 4 /* Check FS is USER_DS on return */
+#define TIF_SYSCALL_TRACE 5 /* syscall trace active */
+#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT 7 /* syscall tracepoint instrumentation */
+#define TIF_SECCOMP 8 /* seccomp syscall filtering active */
#define TIF_NOHZ 12 /* in adaptive nohz mode */
#define TIF_USING_IWMMXT 17
@@ -153,6 +154,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_UPROBE (1 << TIF_UPROBE)
+#define _TIF_FSCHECK (1 << TIF_FSCHECK)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
@@ -166,8 +168,9 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
/*
* Change these and you break ASM code in entry-common.S
*/
-#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME | _TIF_UPROBE)
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_FSCHECK)
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 3f2eb76243e3..d5562f9ce600 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -148,7 +148,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->fullmm = !(start | (end+1));
@@ -166,8 +167,14 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
}
static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force)
{
+ if (force) {
+ tlb->range_start = start;
+ tlb->range_end = end;
+ }
+
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index f555bb3664dc..683d9230984a 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -18,7 +18,6 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static inline int __in_irqentry_text(unsigned long ptr)
{
extern char __irqentry_text_start[];
@@ -27,12 +26,6 @@ static inline int __in_irqentry_text(unsigned long ptr)
return ptr >= (unsigned long)&__irqentry_text_start &&
ptr < (unsigned long)&__irqentry_text_end;
}
-#else
-static inline int __in_irqentry_text(unsigned long ptr)
-{
- return 0;
-}
-#endif
static inline int in_exception_text(unsigned long ptr)
{
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 0bf2347495f1..87936dd5d151 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -70,6 +70,8 @@ static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
+ /* On user-mode return, check fs is correct */
+ set_thread_flag(TIF_FSCHECK);
}
#define segment_eq(a, b) ((a) == (b))
diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h
index 14749aec94bf..921d8274855c 100644
--- a/arch/arm/include/asm/ucontext.h
+++ b/arch/arm/include/asm/ucontext.h
@@ -35,6 +35,12 @@ struct ucontext {
* bytes, to prevent unpredictable padding in the signal frame.
*/
+/*
+ * Dummy padding block: if this magic is encountered, the block should
+ * be skipped using the corresponding size field.
+ */
+#define DUMMY_MAGIC 0xb0d9ed01
+
#ifdef CONFIG_CRUNCH
#define CRUNCH_MAGIC 0x5065cf03
#define CRUNCH_STORAGE_SIZE (CRUNCH_SIZE + 8)
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index eb5cd77bf1d8..e33c32d56193 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -41,7 +41,9 @@ ret_fast_syscall:
UNWIND(.cantunwind )
disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+ tst r1, #_TIF_SYSCALL_WORK
+ bne fast_work_pending
+ tst r1, #_TIF_WORK_MASK
bne fast_work_pending
/* perform architecture specific actions before user return */
@@ -67,12 +69,15 @@ ret_fast_syscall:
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+ tst r1, #_TIF_SYSCALL_WORK
+ bne fast_work_pending
+ tst r1, #_TIF_WORK_MASK
beq no_work_pending
UNWIND(.fnend )
ENDPROC(ret_fast_syscall)
/* Slower path - fall through to work_pending */
+fast_work_pending:
#endif
tst r1, #_TIF_SYSCALL_WORK
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 15495887ca14..fe1419eeb932 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -30,7 +30,6 @@ extern unsigned long kexec_boot_atags;
static atomic_t waiting_for_crash_ipi;
-static unsigned long dt_mem;
/*
* Provide a dummy crash_notes definition while crash dump arrives to arm.
* This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
@@ -42,6 +41,9 @@ int machine_kexec_prepare(struct kimage *image)
__be32 header;
int i, err;
+ image->arch.kernel_r2 = image->start - KEXEC_ARM_ZIMAGE_OFFSET
+ + KEXEC_ARM_ATAGS_OFFSET;
+
/*
* Validate that if the current HW supports SMP, then the SW supports
* and implements CPU hotplug for the current HW. If not, we won't be
@@ -66,8 +68,8 @@ int machine_kexec_prepare(struct kimage *image)
if (err)
return err;
- if (be32_to_cpu(header) == OF_DT_HEADER)
- dt_mem = current_segment->mem;
+ if (header == cpu_to_be32(OF_DT_HEADER))
+ image->arch.kernel_r2 = current_segment->mem;
}
return 0;
}
@@ -165,8 +167,7 @@ void machine_kexec(struct kimage *image)
kexec_start_address = image->start;
kexec_indirection_page = page_list;
kexec_mach_type = machine_arch_type;
- kexec_boot_atags = dt_mem ?: image->start - KEXEC_ARM_ZIMAGE_OFFSET
- + KEXEC_ARM_ATAGS_OFFSET;
+ kexec_boot_atags = image->arch.kernel_r2;
/* copy our kernel relocation code to the control code page */
reboot_entry = fncpy(reboot_code_buffer,
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 4e80bf7420d4..8e9a3e40d949 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -987,6 +987,9 @@ static void __init reserve_crashkernel(void)
if (crash_base <= 0) {
unsigned long long crash_max = idmap_to_phys((u32)~0);
+ unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
+ if (crash_max > lowmem_max)
+ crash_max = lowmem_max;
crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
crash_size, CRASH_ALIGN);
if (!crash_base) {
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 7b8f2141427b..e2de50bf8742 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -14,6 +14,7 @@
#include <linux/uaccess.h>
#include <linux/tracehook.h>
#include <linux/uprobes.h>
+#include <linux/syscalls.h>
#include <asm/elf.h>
#include <asm/cacheflush.h>
@@ -40,8 +41,10 @@ static int preserve_crunch_context(struct crunch_sigframe __user *frame)
return __copy_to_user(frame, kframe, sizeof(*frame));
}
-static int restore_crunch_context(struct crunch_sigframe __user *frame)
+static int restore_crunch_context(char __user **auxp)
{
+ struct crunch_sigframe __user *frame =
+ (struct crunch_sigframe __user *)*auxp;
char kbuf[sizeof(*frame) + 8];
struct crunch_sigframe *kframe;
@@ -52,6 +55,7 @@ static int restore_crunch_context(struct crunch_sigframe __user *frame)
if (kframe->magic != CRUNCH_MAGIC ||
kframe->size != CRUNCH_STORAGE_SIZE)
return -1;
+ *auxp += CRUNCH_STORAGE_SIZE;
crunch_task_restore(current_thread_info(), &kframe->storage);
return 0;
}
@@ -59,21 +63,39 @@ static int restore_crunch_context(struct crunch_sigframe __user *frame)
#ifdef CONFIG_IWMMXT
-static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
+static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
{
char kbuf[sizeof(*frame) + 8];
struct iwmmxt_sigframe *kframe;
+ int err = 0;
/* the iWMMXt context must be 64 bit aligned */
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
- kframe->magic = IWMMXT_MAGIC;
- kframe->size = IWMMXT_STORAGE_SIZE;
- iwmmxt_task_copy(current_thread_info(), &kframe->storage);
- return __copy_to_user(frame, kframe, sizeof(*frame));
+
+ if (test_thread_flag(TIF_USING_IWMMXT)) {
+ kframe->magic = IWMMXT_MAGIC;
+ kframe->size = IWMMXT_STORAGE_SIZE;
+ iwmmxt_task_copy(current_thread_info(), &kframe->storage);
+
+ err = __copy_to_user(frame, kframe, sizeof(*frame));
+ } else {
+ /*
+ * For bug-compatibility with older kernels, some space
+ * has to be reserved for iWMMXt even if it's not used.
+ * Set the magic and size appropriately so that properly
+ * written userspace can skip it reliably:
+ */
+ __put_user_error(DUMMY_MAGIC, &frame->magic, err);
+ __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
+ }
+
+ return err;
}
-static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
+static int restore_iwmmxt_context(char __user **auxp)
{
+ struct iwmmxt_sigframe __user *frame =
+ (struct iwmmxt_sigframe __user *)*auxp;
char kbuf[sizeof(*frame) + 8];
struct iwmmxt_sigframe *kframe;
@@ -81,10 +103,28 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
if (__copy_from_user(kframe, frame, sizeof(*frame)))
return -1;
- if (kframe->magic != IWMMXT_MAGIC ||
- kframe->size != IWMMXT_STORAGE_SIZE)
+
+ /*
+ * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy
+ * block is discarded for compatibility with setup_sigframe() if
+ * present, but we don't mandate its presence. If some other
+ * magic is here, it's not for us:
+ */
+ if (!test_thread_flag(TIF_USING_IWMMXT) &&
+ kframe->magic != DUMMY_MAGIC)
+ return 0;
+
+ if (kframe->size != IWMMXT_STORAGE_SIZE)
return -1;
- iwmmxt_task_restore(current_thread_info(), &kframe->storage);
+
+ if (test_thread_flag(TIF_USING_IWMMXT)) {
+ if (kframe->magic != IWMMXT_MAGIC)
+ return -1;
+
+ iwmmxt_task_restore(current_thread_info(), &kframe->storage);
+ }
+
+ *auxp += IWMMXT_STORAGE_SIZE;
return 0;
}
@@ -107,8 +147,10 @@ static int preserve_vfp_context(struct vfp_sigframe __user *frame)
return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
}
-static int restore_vfp_context(struct vfp_sigframe __user *frame)
+static int restore_vfp_context(char __user **auxp)
{
+ struct vfp_sigframe __user *frame =
+ (struct vfp_sigframe __user *)*auxp;
unsigned long magic;
unsigned long size;
int err = 0;
@@ -121,6 +163,7 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL;
+ *auxp += size;
return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
}
@@ -141,7 +184,7 @@ struct rt_sigframe {
static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
{
- struct aux_sigframe __user *aux;
+ char __user *aux;
sigset_t set;
int err;
@@ -169,18 +212,18 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
err |= !valid_user_regs(regs);
- aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
+ aux = (char __user *) sf->uc.uc_regspace;
#ifdef CONFIG_CRUNCH
if (err == 0)
- err |= restore_crunch_context(&aux->crunch);
+ err |= restore_crunch_context(&aux);
#endif
#ifdef CONFIG_IWMMXT
- if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
- err |= restore_iwmmxt_context(&aux->iwmmxt);
+ if (err == 0)
+ err |= restore_iwmmxt_context(&aux);
#endif
#ifdef CONFIG_VFP
if (err == 0)
- err |= restore_vfp_context(&aux->vfp);
+ err |= restore_vfp_context(&aux);
#endif
return err;
@@ -286,7 +329,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
err |= preserve_crunch_context(&aux->crunch);
#endif
#ifdef CONFIG_IWMMXT
- if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
+ if (err == 0)
err |= preserve_iwmmxt_context(&aux->iwmmxt);
#endif
#ifdef CONFIG_VFP
@@ -571,6 +614,10 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
* Update the trace code with the current status.
*/
trace_hardirqs_off();
+
+ /* Check valid user FS if needed */
+ addr_limit_user_check();
+
do {
if (likely(thread_flags & _TIF_NEED_RESCHED)) {
schedule();
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index d735e5fc4772..195da38cb9a2 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -1,7 +1,7 @@
menuconfig ARCH_AT91
bool "Atmel SoCs"
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7 || ARM_SINGLE_ARMV7M
- select ARM_CPU_SUSPEND if PM
+ select ARM_CPU_SUSPEND if PM && ARCH_MULTI_V7
select COMMON_CLK_AT91
select GPIOLIB
select PINCTRL
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 667fddac3856..5036f996e694 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -608,6 +608,9 @@ static void __init at91_pm_init(void (*pm_idle)(void))
void __init at91rm9200_pm_init(void)
{
+ if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
+ return;
+
at91_dt_ramc();
/*
@@ -620,18 +623,27 @@ void __init at91rm9200_pm_init(void)
void __init at91sam9_pm_init(void)
{
+ if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
+ return;
+
at91_dt_ramc();
at91_pm_init(at91sam9_idle);
}
void __init sama5_pm_init(void)
{
+ if (!IS_ENABLED(CONFIG_SOC_SAMA5))
+ return;
+
at91_dt_ramc();
at91_pm_init(NULL);
}
void __init sama5d2_pm_init(void)
{
+ if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
+ return;
+
at91_pm_backup_init();
sama5_pm_init();
}
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index b5625d009288..e568c8c6f69c 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -1166,7 +1166,7 @@ static struct tvp514x_platform_data tvp5146_pdata = {
#define TVP514X_STD_ALL (V4L2_STD_NTSC | V4L2_STD_PAL)
-static const struct vpif_input da850_ch0_inputs[] = {
+static struct vpif_input da850_ch0_inputs[] = {
{
.input = {
.index = 0,
@@ -1181,7 +1181,7 @@ static const struct vpif_input da850_ch0_inputs[] = {
},
};
-static const struct vpif_input da850_ch1_inputs[] = {
+static struct vpif_input da850_ch1_inputs[] = {
{
.input = {
.index = 0,
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index f5dce9b4e617..f77a4f766050 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -218,6 +218,15 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
}
EXPORT_SYMBOL(clk_set_parent);
+struct clk *clk_get_parent(struct clk *clk)
+{
+ if (!clk)
+ return NULL;
+
+ return clk->parent;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
int clk_register(struct clk *clk)
{
if (clk == NULL || IS_ERR(clk))
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index 39ef3b613912..beec5f16443a 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -475,6 +475,26 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
}
EXPORT_SYMBOL(clk_set_rate);
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ return clk->parent;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
static char fclk_divisors[] = { 1, 2, 4, 8, 16, 1, 1, 1 };
static char hclk_divisors[] = { 1, 2, 4, 5, 6, 8, 16, 32 };
diff --git a/arch/arm/mach-hisi/Kconfig b/arch/arm/mach-hisi/Kconfig
index a3b091a4d344..65a048fa08ec 100644
--- a/arch/arm/mach-hisi/Kconfig
+++ b/arch/arm/mach-hisi/Kconfig
@@ -39,6 +39,7 @@ config ARCH_HIP04
select HAVE_ARM_ARCH_TIMER
select MCPM if SMP
select MCPM_QUAD_CLUSTER if SMP
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK
help
Support for Hisilicon HiP04 SoC family
diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h
index 7a0c13bf4269..844e8ac593e2 100644
--- a/arch/arm/mach-ixp4xx/include/mach/io.h
+++ b/arch/arm/mach-ixp4xx/include/mach/io.h
@@ -95,8 +95,10 @@ static inline void __indirect_writeb(u8 value, volatile void __iomem *p)
}
static inline void __indirect_writesb(volatile void __iomem *bus_addr,
- const u8 *vaddr, int count)
+ const void *p, int count)
{
+ const u8 *vaddr = p;
+
while (count--)
writeb(*vaddr++, bus_addr);
}
@@ -118,8 +120,10 @@ static inline void __indirect_writew(u16 value, volatile void __iomem *p)
}
static inline void __indirect_writesw(volatile void __iomem *bus_addr,
- const u16 *vaddr, int count)
+ const void *p, int count)
{
+ const u16 *vaddr = p;
+
while (count--)
writew(*vaddr++, bus_addr);
}
@@ -137,8 +141,9 @@ static inline void __indirect_writel(u32 value, volatile void __iomem *p)
}
static inline void __indirect_writesl(volatile void __iomem *bus_addr,
- const u32 *vaddr, int count)
+ const void *p, int count)
{
+ const u32 *vaddr = p;
while (count--)
writel(*vaddr++, bus_addr);
}
@@ -160,8 +165,10 @@ static inline u8 __indirect_readb(const volatile void __iomem *p)
}
static inline void __indirect_readsb(const volatile void __iomem *bus_addr,
- u8 *vaddr, u32 count)
+ void *p, u32 count)
{
+ u8 *vaddr = p;
+
while (count--)
*vaddr++ = readb(bus_addr);
}
@@ -183,8 +190,10 @@ static inline u16 __indirect_readw(const volatile void __iomem *p)
}
static inline void __indirect_readsw(const volatile void __iomem *bus_addr,
- u16 *vaddr, u32 count)
+ void *p, u32 count)
{
+ u16 *vaddr = p;
+
while (count--)
*vaddr++ = readw(bus_addr);
}
@@ -204,8 +213,10 @@ static inline u32 __indirect_readl(const volatile void __iomem *p)
}
static inline void __indirect_readsl(const volatile void __iomem *bus_addr,
- u32 *vaddr, u32 count)
+ void *p, u32 count)
{
+ u32 *vaddr = p;
+
while (count--)
*vaddr++ = readl(bus_addr);
}
@@ -523,8 +534,15 @@ static inline void iowrite32_rep(void __iomem *addr, const void *vaddr,
#endif
}
-#define ioport_map(port, nr) ((void __iomem*)(port + PIO_OFFSET))
-#define ioport_unmap(addr)
+#define ioport_map(port, nr) ioport_map(port, nr)
+static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ return ((void __iomem*)((port) + PIO_OFFSET));
+}
+#define ioport_unmap(addr) ioport_unmap(addr)
+static inline void ioport_unmap(void __iomem *addr)
+{
+}
#endif /* CONFIG_PCI */
#endif /* __ASM_ARM_ARCH_IO_H */
diff --git a/arch/arm/mach-mmp/devices.c b/arch/arm/mach-mmp/devices.c
index 3330ac7cfbef..671c7a09ab3d 100644
--- a/arch/arm/mach-mmp/devices.c
+++ b/arch/arm/mach-mmp/devices.c
@@ -238,7 +238,7 @@ void pxa_usb_phy_deinit(void __iomem *phy_reg)
#endif
#if IS_ENABLED(CONFIG_USB_SUPPORT)
-static u64 usb_dma_mask = ~(u32)0;
+static u64 __maybe_unused usb_dma_mask = ~(u32)0;
#if IS_ENABLED(CONFIG_USB_MV_UDC)
struct resource pxa168_u2o_resources[] = {
diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c
index e62273aacb43..4ffbbd217e82 100644
--- a/arch/arm/mach-mvebu/platsmp.c
+++ b/arch/arm/mach-mvebu/platsmp.c
@@ -211,7 +211,7 @@ static int mv98dx3236_resume_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
return PTR_ERR(base);
writel(0, base + MV98DX3236_CPU_RESUME_CTRL_REG);
- writel(virt_to_phys(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG);
+ writel(__pa_symbol(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG);
iounmap(base);
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 6613a6ff5dbc..6cbc69c92913 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -510,6 +510,7 @@ static void __init ams_delta_init(void)
static void modem_pm(struct uart_port *port, unsigned int state, unsigned old)
{
struct modem_private_data *priv = port->private_data;
+ int ret;
if (IS_ERR(priv->regulator))
return;
@@ -518,9 +519,16 @@ static void modem_pm(struct uart_port *port, unsigned int state, unsigned old)
return;
if (state == 0)
- regulator_enable(priv->regulator);
+ ret = regulator_enable(priv->regulator);
else if (old == 0)
- regulator_disable(priv->regulator);
+ ret = regulator_disable(priv->regulator);
+ else
+ ret = 0;
+
+ if (ret)
+ dev_warn(port->dev,
+ "ams_delta modem_pm: failed to %sable regulator: %d\n",
+ state ? "dis" : "en", ret);
}
static struct plat_serial8250_port ams_delta_modem_ports[] = {
diff --git a/arch/arm/mach-omap1/board-h2-mmc.c b/arch/arm/mach-omap1/board-h2-mmc.c
index 357be2debc9d..91bda9c802ff 100644
--- a/arch/arm/mach-omap1/board-h2-mmc.c
+++ b/arch/arm/mach-omap1/board-h2-mmc.c
@@ -14,7 +14,7 @@
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/platform_data/gpio-omap.h>
-#include <linux/i2c/tps65010.h>
+#include <linux/mfd/tps65010.h>
#include "board-h2.h"
#include "mmc.h"
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index 675254ee4b1e..dece47d76282 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -28,7 +28,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/input.h>
-#include <linux/i2c/tps65010.h>
+#include <linux/mfd/tps65010.h>
#include <linux/smc91x.h>
#include <linux/omapfb.h>
#include <linux/platform_data/gpio-omap.h>
diff --git a/arch/arm/mach-omap1/board-h3-mmc.c b/arch/arm/mach-omap1/board-h3-mmc.c
index 4f58bfa5e754..692c267a9a90 100644
--- a/arch/arm/mach-omap1/board-h3-mmc.c
+++ b/arch/arm/mach-omap1/board-h3-mmc.c
@@ -14,7 +14,7 @@
#include <linux/gpio.h>
#include <linux/platform_device.h>
-#include <linux/i2c/tps65010.h>
+#include <linux/mfd/tps65010.h>
#include "common.h"
#include "board-h3.h"
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index e62f9d454f10..6d32beeb2d88 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -28,7 +28,7 @@
#include <linux/mtd/physmap.h>
#include <linux/input.h>
#include <linux/spi/spi.h>
-#include <linux/i2c/tps65010.h>
+#include <linux/mfd/tps65010.h>
#include <linux/smc91x.h>
#include <linux/omapfb.h>
#include <linux/platform_data/gpio-omap.h>
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index ee8d9f553db4..06243c0b12d2 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -233,10 +233,10 @@ static struct platform_device nokia770_cbus_device = {
static struct i2c_board_info nokia770_i2c_board_info_2[] __initdata = {
{
- I2C_BOARD_INFO("retu-mfd", 0x01),
+ I2C_BOARD_INFO("retu", 0x01),
},
{
- I2C_BOARD_INFO("tahvo-mfd", 0x02),
+ I2C_BOARD_INFO("tahvo", 0x02),
},
};
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index 4dfb99504810..d579f4e04137 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -38,7 +38,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
-#include <linux/i2c/tps65010.h>
+#include <linux/mfd/tps65010.h>
#include <linux/platform_data/gpio-omap.h>
#include <linux/platform_data/omap1_bl.h>
@@ -441,13 +441,11 @@ static struct spi_board_info __initdata mistral_boardinfo[] = { {
.chip_select = 0,
} };
-#ifdef CONFIG_PM
static irqreturn_t
osk_mistral_wake_interrupt(int irq, void *ignored)
{
return IRQ_HANDLED;
}
-#endif
static void __init osk_mistral_init(void)
{
@@ -515,7 +513,6 @@ static void __init osk_mistral_init(void)
gpio_direction_input(OMAP_MPUIO(2));
irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
-#ifdef CONFIG_PM
/* share the IRQ in case someone wants to use the
* button for more than wakeup from system sleep.
*/
@@ -529,7 +526,6 @@ static void __init osk_mistral_init(void)
ret);
} else
enable_irq_wake(irq);
-#endif
} else
printk(KERN_ERR "OSK+Mistral: wakeup button is awol\n");
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 779fb1f680b3..b3b3b3a19183 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -8,7 +8,7 @@ ccflags-y := -I$(srctree)/$(src)/include \
# Common support
obj-y := id.o io.o control.o devices.o fb.o timer.o pm.o \
common.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
- omap_device.o omap-headsmp.o sram.o drm.o
+ omap_device.o omap-headsmp.o sram.o
hwmod-common = omap_hwmod.o omap_hwmod_reset.o \
omap_hwmod_common_data.o
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index dc9e34e670a2..583fc39d84cd 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -28,11 +28,12 @@ static const struct of_device_id omap_dt_match_table[] __initconst = {
{ }
};
-static void __init omap_generic_init(void)
+static void __init __maybe_unused omap_generic_init(void)
{
pdata_quirks_init(omap_dt_match_table);
omapdss_init_of();
+ omap_soc_device_init();
}
#ifdef CONFIG_SOC_OMAP2420
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 8cc6338fcb12..b5ad7fcb80ed 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -29,7 +29,7 @@
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/i2c.h>
-#include <linux/i2c/twl.h>
+#include <linux/mfd/twl.h>
#include <linux/i2c-omap.h>
#include <linux/reboot.h>
#include <linux/irqchip/irq-omap-intc.h>
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 8fa01c0ecdb2..b3f6eb5d04a2 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -66,6 +66,7 @@
*/
#define FRAMEDONE_IRQ_TIMEOUT 100
+#if defined(CONFIG_FB_OMAP2)
static struct platform_device omap_display_device = {
.name = "omapdss",
.id = -1,
@@ -163,6 +164,65 @@ static enum omapdss_version __init omap_display_get_version(void)
return OMAPDSS_VER_UNKNOWN;
}
+static int __init omapdss_init_fbdev(void)
+{
+ static struct omap_dss_board_info board_data = {
+ .dsi_enable_pads = omap_dsi_enable_pads,
+ .dsi_disable_pads = omap_dsi_disable_pads,
+ .set_min_bus_tput = omap_dss_set_min_bus_tput,
+ };
+ struct device_node *node;
+ int r;
+
+ board_data.version = omap_display_get_version();
+ if (board_data.version == OMAPDSS_VER_UNKNOWN) {
+ pr_err("DSS not supported on this SoC\n");
+ return -ENODEV;
+ }
+
+ omap_display_device.dev.platform_data = &board_data;
+
+ r = platform_device_register(&omap_display_device);
+ if (r < 0) {
+ pr_err("Unable to register omapdss device\n");
+ return r;
+ }
+
+ /* create vrfb device */
+ r = omap_init_vrfb();
+ if (r < 0) {
+ pr_err("Unable to register omapvrfb device\n");
+ return r;
+ }
+
+ /* create FB device */
+ r = omap_init_fb();
+ if (r < 0) {
+ pr_err("Unable to register omapfb device\n");
+ return r;
+ }
+
+ /* create V4L2 display device */
+ r = omap_init_vout();
+ if (r < 0) {
+ pr_err("Unable to register omap_vout device\n");
+ return r;
+ }
+
+ /* add DSI info for omap4 */
+ node = of_find_node_by_name(NULL, "omap4_padconf_global");
+ if (node)
+ omap4_dsi_mux_syscon = syscon_node_to_regmap(node);
+
+ return 0;
+}
+#else
+static inline int omapdss_init_fbdev(void)
+{
+ return 0;
+}
+#endif /* CONFIG_FB_OMAP2 */
+
static void dispc_disable_outputs(void)
{
u32 v, irq_mask = 0;
@@ -335,16 +395,9 @@ static struct device_node * __init omapdss_find_dss_of_node(void)
int __init omapdss_init_of(void)
{
int r;
- enum omapdss_version ver;
struct device_node *node;
struct platform_device *pdev;
- static struct omap_dss_board_info board_data = {
- .dsi_enable_pads = omap_dsi_enable_pads,
- .dsi_disable_pads = omap_dsi_disable_pads,
- .set_min_bus_tput = omap_dss_set_min_bus_tput,
- };
-
/* only create dss helper devices if dss is enabled in the .dts */
node = omapdss_find_dss_of_node();
@@ -354,13 +407,6 @@ int __init omapdss_init_of(void)
if (!of_device_is_available(node))
return 0;
- ver = omap_display_get_version();
-
- if (ver == OMAPDSS_VER_UNKNOWN) {
- pr_err("DSS not supported on this SoC\n");
- return -ENODEV;
- }
-
pdev = of_find_device_by_node(node);
if (!pdev) {
@@ -374,48 +420,5 @@ int __init omapdss_init_of(void)
return r;
}
- board_data.version = ver;
-
- omap_display_device.dev.platform_data = &board_data;
-
- r = platform_device_register(&omap_display_device);
- if (r < 0) {
- pr_err("Unable to register omapdss device\n");
- return r;
- }
-
- /* create DRM device */
- r = omap_init_drm();
- if (r < 0) {
- pr_err("Unable to register omapdrm device\n");
- return r;
- }
-
- /* create vrfb device */
- r = omap_init_vrfb();
- if (r < 0) {
- pr_err("Unable to register omapvrfb device\n");
- return r;
- }
-
- /* create FB device */
- r = omap_init_fb();
- if (r < 0) {
- pr_err("Unable to register omapfb device\n");
- return r;
- }
-
- /* create V4L2 display device */
- r = omap_init_vout();
- if (r < 0) {
- pr_err("Unable to register omap_vout device\n");
- return r;
- }
-
- /* add DSI info for omap4 */
- node = of_find_node_by_name(NULL, "omap4_padconf_global");
- if (node)
- omap4_dsi_mux_syscon = syscon_node_to_regmap(node);
-
- return 0;
+ return omapdss_init_fbdev();
}
diff --git a/arch/arm/mach-omap2/display.h b/arch/arm/mach-omap2/display.h
index 9a39646d4316..42ec2e99a2f4 100644
--- a/arch/arm/mach-omap2/display.h
+++ b/arch/arm/mach-omap2/display.h
@@ -26,7 +26,6 @@ struct omap_dss_dispc_dev_attr {
bool has_framedonetv_irq;
};
-int omap_init_drm(void);
int omap_init_vrfb(void);
int omap_init_fb(void);
int omap_init_vout(void);
diff --git a/arch/arm/mach-omap2/drm.c b/arch/arm/mach-omap2/drm.c
deleted file mode 100644
index 44fef961bb70..000000000000
--- a/arch/arm/mach-omap2/drm.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * DRM/KMS device registration for TI OMAP platforms
- *
- * Copyright (C) 2012 Texas Instruments
- * Author: Rob Clark <rob.clark@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_data/omap_drm.h>
-
-#include "soc.h"
-#include "display.h"
-
-#if IS_ENABLED(CONFIG_DRM_OMAP)
-
-static struct omap_drm_platform_data platform_data;
-
-static struct platform_device omap_drm_device = {
- .dev = {
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &platform_data,
- },
- .name = "omapdrm",
- .id = 0,
-};
-
-int __init omap_init_drm(void)
-{
- platform_data.omaprev = GET_OMAP_TYPE;
-
- return platform_device_register(&omap_drm_device);
-
-}
-#else
-int __init omap_init_drm(void) { return 0; }
-#endif
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index be517b048762..5b614388d72f 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -32,120 +32,6 @@ static u16 control_devconf1_offset;
#define HSMMC_NAME_LEN 9
-static void omap_hsmmc1_before_set_reg(struct device *dev,
- int power_on, int vdd)
-{
- u32 reg, prog_io;
- struct omap_hsmmc_platform_data *mmc = dev->platform_data;
-
- if (mmc->remux)
- mmc->remux(dev, power_on);
-
- /*
- * Assume we power both OMAP VMMC1 (for CMD, CLK, DAT0..3) and the
- * card with Vcc regulator (from twl4030 or whatever). OMAP has both
- * 1.8V and 3.0V modes, controlled by the PBIAS register.
- *
- * In 8-bit modes, OMAP VMMC1A (for DAT4..7) needs a supply, which
- * is most naturally TWL VSIM; those pins also use PBIAS.
- *
- * FIXME handle VMMC1A as needed ...
- */
- if (power_on) {
- if (cpu_is_omap2430()) {
- reg = omap_ctrl_readl(OMAP243X_CONTROL_DEVCONF1);
- if ((1 << vdd) >= MMC_VDD_30_31)
- reg |= OMAP243X_MMC1_ACTIVE_OVERWRITE;
- else
- reg &= ~OMAP243X_MMC1_ACTIVE_OVERWRITE;
- omap_ctrl_writel(reg, OMAP243X_CONTROL_DEVCONF1);
- }
-
- if (mmc->internal_clock) {
- reg = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
- reg |= OMAP2_MMCSDIO1ADPCLKISEL;
- omap_ctrl_writel(reg, OMAP2_CONTROL_DEVCONF0);
- }
-
- reg = omap_ctrl_readl(control_pbias_offset);
- if (cpu_is_omap3630()) {
- /* Set MMC I/O to 52MHz */
- prog_io = omap_ctrl_readl(OMAP343X_CONTROL_PROG_IO1);
- prog_io |= OMAP3630_PRG_SDMMC1_SPEEDCTRL;
- omap_ctrl_writel(prog_io, OMAP343X_CONTROL_PROG_IO1);
- } else {
- reg |= OMAP2_PBIASSPEEDCTRL0;
- }
- reg &= ~OMAP2_PBIASLITEPWRDNZ0;
- omap_ctrl_writel(reg, control_pbias_offset);
- } else {
- reg = omap_ctrl_readl(control_pbias_offset);
- reg &= ~OMAP2_PBIASLITEPWRDNZ0;
- omap_ctrl_writel(reg, control_pbias_offset);
- }
-}
-
-static void omap_hsmmc1_after_set_reg(struct device *dev, int power_on, int vdd)
-{
- u32 reg;
-
- /* 100ms delay required for PBIAS configuration */
- msleep(100);
-
- if (power_on) {
- reg = omap_ctrl_readl(control_pbias_offset);
- reg |= (OMAP2_PBIASLITEPWRDNZ0 | OMAP2_PBIASSPEEDCTRL0);
- if ((1 << vdd) <= MMC_VDD_165_195)
- reg &= ~OMAP2_PBIASLITEVMODE0;
- else
- reg |= OMAP2_PBIASLITEVMODE0;
- omap_ctrl_writel(reg, control_pbias_offset);
- } else {
- reg = omap_ctrl_readl(control_pbias_offset);
- reg |= (OMAP2_PBIASSPEEDCTRL0 | OMAP2_PBIASLITEPWRDNZ0 |
- OMAP2_PBIASLITEVMODE0);
- omap_ctrl_writel(reg, control_pbias_offset);
- }
-}
-
-static void hsmmc2_select_input_clk_src(struct omap_hsmmc_platform_data *mmc)
-{
- u32 reg;
-
- reg = omap_ctrl_readl(control_devconf1_offset);
- if (mmc->internal_clock)
- reg |= OMAP2_MMCSDIO2ADPCLKISEL;
- else
- reg &= ~OMAP2_MMCSDIO2ADPCLKISEL;
- omap_ctrl_writel(reg, control_devconf1_offset);
-}
-
-static void hsmmc2_before_set_reg(struct device *dev, int power_on, int vdd)
-{
- struct omap_hsmmc_platform_data *mmc = dev->platform_data;
-
- if (mmc->remux)
- mmc->remux(dev, power_on);
-
- if (power_on)
- hsmmc2_select_input_clk_src(mmc);
-}
-
-static int am35x_hsmmc2_set_power(struct device *dev, int power_on, int vdd)
-{
- struct omap_hsmmc_platform_data *mmc = dev->platform_data;
-
- if (power_on)
- hsmmc2_select_input_clk_src(mmc);
-
- return 0;
-}
-
-static int nop_mmc_set_power(struct device *dev, int power_on, int vdd)
-{
- return 0;
-}
-
static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
struct omap_hsmmc_platform_data *mmc)
{
@@ -157,101 +43,11 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
return -ENOMEM;
}
- if (c->name)
- strncpy(hc_name, c->name, HSMMC_NAME_LEN);
- else
- snprintf(hc_name, (HSMMC_NAME_LEN + 1), "mmc%islot%i",
- c->mmc, 1);
+ snprintf(hc_name, (HSMMC_NAME_LEN + 1), "mmc%islot%i", c->mmc, 1);
mmc->name = hc_name;
mmc->caps = c->caps;
- mmc->internal_clock = !c->ext_clock;
mmc->reg_offset = 0;
- if (c->cover_only) {
- /* detect if mobile phone cover removed */
- mmc->gpio_cd = -EINVAL;
- mmc->gpio_cod = c->gpio_cd;
- } else {
- /* card detect pin on the mmc socket itself */
- mmc->gpio_cd = c->gpio_cd;
- mmc->gpio_cod = -EINVAL;
- }
- mmc->gpio_wp = c->gpio_wp;
-
- mmc->remux = c->remux;
- mmc->init_card = c->init_card;
-
- if (c->nonremovable)
- mmc->nonremovable = 1;
-
- /*
- * NOTE: MMC slots should have a Vcc regulator set up.
- * This may be from a TWL4030-family chip, another
- * controllable regulator, or a fixed supply.
- *
- * temporary HACK: ocr_mask instead of fixed supply
- */
- if (soc_is_am35xx())
- mmc->ocr_mask = MMC_VDD_165_195 |
- MMC_VDD_26_27 |
- MMC_VDD_27_28 |
- MMC_VDD_29_30 |
- MMC_VDD_30_31 |
- MMC_VDD_31_32;
- else
- mmc->ocr_mask = c->ocr_mask;
-
- if (!soc_is_am35xx())
- mmc->features |= HSMMC_HAS_PBIAS;
-
- switch (c->mmc) {
- case 1:
- if (mmc->features & HSMMC_HAS_PBIAS) {
- /* on-chip level shifting via PBIAS0/PBIAS1 */
- mmc->before_set_reg =
- omap_hsmmc1_before_set_reg;
- mmc->after_set_reg =
- omap_hsmmc1_after_set_reg;
- }
-
- if (soc_is_am35xx())
- mmc->set_power = nop_mmc_set_power;
-
- /* OMAP3630 HSMMC1 supports only 4-bit */
- if (cpu_is_omap3630() &&
- (c->caps & MMC_CAP_8_BIT_DATA)) {
- c->caps &= ~MMC_CAP_8_BIT_DATA;
- c->caps |= MMC_CAP_4_BIT_DATA;
- mmc->caps = c->caps;
- }
- break;
- case 2:
- if (soc_is_am35xx())
- mmc->set_power = am35x_hsmmc2_set_power;
-
- if (c->ext_clock)
- c->transceiver = 1;
- if (c->transceiver && (c->caps & MMC_CAP_8_BIT_DATA)) {
- c->caps &= ~MMC_CAP_8_BIT_DATA;
- c->caps |= MMC_CAP_4_BIT_DATA;
- }
- if (mmc->features & HSMMC_HAS_PBIAS) {
- /* off-chip level shifting, or none */
- mmc->before_set_reg = hsmmc2_before_set_reg;
- mmc->after_set_reg = NULL;
- }
- break;
- case 3:
- case 4:
- case 5:
- mmc->before_set_reg = NULL;
- mmc->after_set_reg = NULL;
- break;
- default:
- pr_err("MMC%d configuration not supported!\n", c->mmc);
- kfree(hc_name);
- return -ENODEV;
- }
return 0;
}
@@ -260,7 +56,6 @@ static int omap_hsmmc_done;
void omap_hsmmc_late_init(struct omap2_hsmmc_info *c)
{
struct platform_device *pdev;
- struct omap_hsmmc_platform_data *mmc_pdata;
int res;
if (omap_hsmmc_done != 1)
@@ -269,32 +64,12 @@ void omap_hsmmc_late_init(struct omap2_hsmmc_info *c)
omap_hsmmc_done++;
for (; c->mmc; c++) {
- if (!c->deferred)
- continue;
-
pdev = c->pdev;
if (!pdev)
continue;
-
- mmc_pdata = pdev->dev.platform_data;
- if (!mmc_pdata)
- continue;
-
- if (c->cover_only) {
- /* detect if mobile phone cover removed */
- mmc_pdata->gpio_cd = -EINVAL;
- mmc_pdata->gpio_cod = c->gpio_cd;
- } else {
- /* card detect pin on the mmc socket itself */
- mmc_pdata->gpio_cd = c->gpio_cd;
- mmc_pdata->gpio_cod = -EINVAL;
- }
- mmc_pdata->gpio_wp = c->gpio_wp;
-
res = omap_device_register(pdev);
if (res)
- pr_err("Could not late init MMC %s\n",
- c->name);
+ pr_err("Could not late init MMC\n");
}
}
@@ -336,13 +111,6 @@ static void __init omap_hsmmc_init_one(struct omap2_hsmmc_info *hsmmcinfo,
if (oh->dev_attr != NULL) {
mmc_dev_attr = oh->dev_attr;
mmc_data->controller_flags = mmc_dev_attr->flags;
- /*
- * erratum 2.1.1.128 doesn't apply if board has
- * a transceiver is attached
- */
- if (hsmmcinfo->transceiver)
- mmc_data->controller_flags &=
- ~OMAP_HSMMC_BROKEN_MULTIBLOCK_READ;
}
pdev = platform_device_alloc(name, ctrl_nr - 1);
@@ -367,9 +135,6 @@ static void __init omap_hsmmc_init_one(struct omap2_hsmmc_info *hsmmcinfo,
hsmmcinfo->pdev = pdev;
- if (hsmmcinfo->deferred)
- goto free_mmc;
-
res = omap_device_register(pdev);
if (res) {
pr_err("Could not register od for %s\n", name);
diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h
index 69b619ddc765..af9af5094ec3 100644
--- a/arch/arm/mach-omap2/hsmmc.h
+++ b/arch/arm/mach-omap2/hsmmc.h
@@ -12,18 +12,9 @@ struct omap2_hsmmc_info {
u8 mmc; /* controller 1/2/3 */
u32 caps; /* 4/8 wires and any additional host
* capabilities OR'd (ref. linux/mmc/host.h) */
- bool transceiver; /* MMC-2 option */
- bool ext_clock; /* use external pin for input clock */
- bool cover_only; /* No card detect - just cover switch */
- bool nonremovable; /* Nonremovable e.g. eMMC */
- bool deferred; /* mmc needs a deferred probe */
int gpio_cd; /* or -EINVAL */
int gpio_wp; /* or -EINVAL */
- char *name; /* or NULL for default */
struct platform_device *pdev; /* mmc controller instance */
- int ocr_mask; /* temporary HACK */
- /* Remux (pad configuration) when powering on/off */
- void (*remux)(struct device *dev, int power_on);
/* init some special card */
void (*init_card)(struct mmc_card *card);
};
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 1d739d1a0a65..cb5d7314cf99 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -410,7 +410,7 @@ static int _set_hwmod_postsetup_state(struct omap_hwmod *oh, void *data)
return omap_hwmod_set_postsetup_state(oh, *(u8 *)data);
}
-static void __init omap_hwmod_init_postsetup(void)
+static void __init __maybe_unused omap_hwmod_init_postsetup(void)
{
u8 postsetup_state;
@@ -428,7 +428,6 @@ static void __init omap_hwmod_init_postsetup(void)
static void __init __maybe_unused omap_common_late_init(void)
{
omap2_common_pm_late_init();
- omap_soc_device_init();
}
#ifdef CONFIG_SOC_OMAP2420
diff --git a/arch/arm/mach-omap2/omap_twl.c b/arch/arm/mach-omap2/omap_twl.c
index 1346b3ab34a5..295124b248ae 100644
--- a/arch/arm/mach-omap2/omap_twl.c
+++ b/arch/arm/mach-omap2/omap_twl.c
@@ -16,7 +16,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/i2c/twl.h>
+#include <linux/mfd/twl.h>
#include "soc.h"
#include "voltage.h"
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index d44e0e2f1106..841ba19d64a6 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -486,7 +486,6 @@ int __init omap3_pm_init(void)
ret = request_irq(omap_prcm_event_to_irq("io"),
_prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
omap3_pm_init);
- enable_irq(omap_prcm_event_to_irq("io"));
if (ret) {
pr_err("pm: Failed to request pm_io irq\n");
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c
index 382e236fbfd9..64f6451499a7 100644
--- a/arch/arm/mach-omap2/prm3xxx.c
+++ b/arch/arm/mach-omap2/prm3xxx.c
@@ -692,7 +692,6 @@ static int omap3xxx_prm_late_init(void)
{
struct device_node *np;
int irq_num;
- int ret;
if (!(prm_features & PRM_HAS_IO_WAKEUP))
return 0;
@@ -712,12 +711,8 @@ static int omap3xxx_prm_late_init(void)
}
omap3xxx_prm_enable_io_wakeup();
- ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
- if (!ret)
- irq_set_status_flags(omap_prcm_event_to_irq("io"),
- IRQ_NOAUTOEN);
- return ret;
+ return omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
}
static void __exit omap3xxx_prm_exit(void)
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 87e86a4a9ead..3ab5df1ce900 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -337,6 +337,27 @@ static void omap44xx_prm_reconfigure_io_chain(void)
}
/**
+ * omap44xx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches
+ *
+ * Activates the I/O wakeup event latches and allows events logged by
+ * those latches to signal a wakeup event to the PRCM. For I/O wakeups
+ * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and
+ * omap44xx_prm_reconfigure_io_chain() must be called. No return value.
+ */
+static void __init omap44xx_prm_enable_io_wakeup(void)
+{
+ s32 inst = omap4_prmst_get_prm_dev_inst();
+
+ if (inst == PRM_INSTANCE_UNKNOWN)
+ return;
+
+ omap4_prm_rmw_inst_reg_bits(OMAP4430_GLOBAL_WUEN_MASK,
+ OMAP4430_GLOBAL_WUEN_MASK,
+ inst,
+ omap4_prcm_irq_setup.pm_ctrl);
+}
+
+/**
* omap44xx_prm_read_reset_sources - return the last SoC reset source
*
* Return a u32 representing the last reset sources of the SoC. The
@@ -668,6 +689,8 @@ struct pwrdm_ops omap4_pwrdm_operations = {
.pwrdm_has_voltdm = omap4_check_vcvp,
};
+static int omap44xx_prm_late_init(void);
+
/*
* XXX document
*/
@@ -675,6 +698,7 @@ static struct prm_ll_data omap44xx_prm_ll_data = {
.read_reset_sources = &omap44xx_prm_read_reset_sources,
.was_any_context_lost_old = &omap44xx_prm_was_any_context_lost_old,
.clear_context_loss_flags_old = &omap44xx_prm_clear_context_loss_flags_old,
+ .late_init = &omap44xx_prm_late_init,
.assert_hardreset = omap4_prminst_assert_hardreset,
.deassert_hardreset = omap4_prminst_deassert_hardreset,
.is_hardreset_asserted = omap4_prminst_is_hardreset_asserted,
@@ -711,6 +735,37 @@ int __init omap44xx_prm_init(const struct omap_prcm_init_data *data)
return prm_register(&omap44xx_prm_ll_data);
}
+static int omap44xx_prm_late_init(void)
+{
+ int irq_num;
+
+ if (!(prm_features & PRM_HAS_IO_WAKEUP))
+ return 0;
+
+ irq_num = of_irq_get(prm_init_data->np, 0);
+ /*
+ * Already have OMAP4 IRQ num. For all other platforms, we need
+ * IRQ numbers from DT
+ */
+ if (irq_num < 0 && !(prm_init_data->flags & PRM_IRQ_DEFAULT)) {
+ if (irq_num == -EPROBE_DEFER)
+ return irq_num;
+
+ /* Have nothing to do */
+ return 0;
+ }
+
+ /* Once OMAP4 DT is filled as well */
+ if (irq_num >= 0) {
+ omap4_prcm_irq_setup.irq = irq_num;
+ omap4_prcm_irq_setup.xlate_irq = NULL;
+ }
+
+ omap44xx_prm_enable_io_wakeup();
+
+ return omap_prcm_register_chain_handler(&omap4_prcm_irq_setup);
+}
+
static void __exit omap44xx_prm_exit(void)
{
prm_unregister(&omap44xx_prm_ll_data);
diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c
index 8cadb302a7d2..ffe05c27087e 100644
--- a/arch/arm/mach-prima2/common.c
+++ b/arch/arm/mach-prima2/common.c
@@ -15,7 +15,7 @@
#include <linux/of_platform.h>
#include "common.h"
-static void __init sirfsoc_init_late(void)
+static void __init __maybe_unused sirfsoc_init_late(void)
{
sirfsoc_pm_init();
}
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 76fbc115ec33..ce7d97babb0f 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -566,6 +566,7 @@ config MACH_ICONTROL
config ARCH_PXA_ESERIES
bool "PXA based Toshiba e-series PDAs"
select FB_W100
+ select FB
select PXA25x
config MACH_E330
diff --git a/arch/arm/mach-pxa/include/mach/mtd-xip.h b/arch/arm/mach-pxa/include/mach/mtd-xip.h
index 990d2bf2fb45..9bf4ea6a6f74 100644
--- a/arch/arm/mach-pxa/include/mach/mtd-xip.h
+++ b/arch/arm/mach-pxa/include/mach/mtd-xip.h
@@ -17,11 +17,15 @@
#include <mach/regs-ost.h>
-#define xip_irqpending() (ICIP & ICMR)
+/* restored July 2017, this did not build since 2011! */
+
+#define ICIP io_p2v(0x40d00000)
+#define ICMR io_p2v(0x40d00004)
+#define xip_irqpending() (readl(ICIP) & readl(ICMR))
/* we sample OSCR and convert desired delta to usec (1/4 ~= 1000000/3686400) */
-#define xip_currtime() (OSCR)
-#define xip_elapsed_since(x) (signed)((OSCR - (x)) / 4)
+#define xip_currtime() readl(OSCR)
+#define xip_elapsed_since(x) (signed)((readl(OSCR) - (x)) / 4)
/*
* xip_cpu_idle() is used when waiting for a delay equal or larger than
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index e2c97728b3c6..9d662fed03ec 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -377,7 +377,7 @@ static struct gpiod_lookup_table raumfeld_rotary_gpios_table = {
},
};
-static struct property_entry raumfeld_rotary_properties[] = {
+static const struct property_entry raumfeld_rotary_properties[] __initconst = {
PROPERTY_ENTRY_INTEGER("rotary-encoder,steps-per-period", u32, 24),
PROPERTY_ENTRY_INTEGER("linux,axis", u32, REL_X),
PROPERTY_ENTRY_INTEGER("rotary-encoder,relative_axis", u32, 1),
diff --git a/arch/arm/mach-rpc/include/mach/hardware.h b/arch/arm/mach-rpc/include/mach/hardware.h
index aa79fa47373a..622d4e5df029 100644
--- a/arch/arm/mach-rpc/include/mach/hardware.h
+++ b/arch/arm/mach-rpc/include/mach/hardware.h
@@ -25,8 +25,8 @@
* *_SIZE is the size of the region
* *_BASE is the virtual address
*/
-#define RAM_SIZE 0x10000000
-#define RAM_START 0x10000000
+#define RPC_RAM_SIZE 0x10000000
+#define RPC_RAM_START 0x10000000
#define EASI_SIZE 0x08000000 /* EASI I/O */
#define EASI_START 0x08000000
diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
index 262ab0744748..6cac7da15e2b 100644
--- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
+++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
@@ -17,7 +17,7 @@
#include <linux/cpufreq.h>
#include <linux/gpio.h>
-#include <linux/i2c/tps65010.h>
+#include <linux/mfd/tps65010.h>
#include <plat/cpu-freq.h>
#include <mach/gpio-samsung.h>
diff --git a/arch/arm/mach-s3c24xx/mach-osiris.c b/arch/arm/mach-s3c24xx/mach-osiris.c
index 70b0eb7d3134..64b1a0b7b803 100644
--- a/arch/arm/mach-s3c24xx/mach-osiris.c
+++ b/arch/arm/mach-s3c24xx/mach-osiris.c
@@ -24,7 +24,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
-#include <linux/i2c/tps65010.h>
+#include <linux/mfd/tps65010.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-sa1100/clock.c b/arch/arm/mach-sa1100/clock.c
index 0db46895c82a..7d52cd97d96e 100644
--- a/arch/arm/mach-sa1100/clock.c
+++ b/arch/arm/mach-sa1100/clock.c
@@ -35,6 +35,31 @@ struct clk clk_##_name = { \
static DEFINE_SPINLOCK(clocks_lock);
+/* Dummy clk routine to build generic kernel parts that may be using them */
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ return clk_get_rate(clk);
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ return NULL;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
static void clk_gpio27_enable(struct clk *clk)
{
/*
diff --git a/arch/arm/mach-sa1100/include/mach/mtd-xip.h b/arch/arm/mach-sa1100/include/mach/mtd-xip.h
index b3d684098fbf..cb76096a2e36 100644
--- a/arch/arm/mach-sa1100/include/mach/mtd-xip.h
+++ b/arch/arm/mach-sa1100/include/mach/mtd-xip.h
@@ -20,7 +20,7 @@
#define xip_irqpending() (ICIP & ICMR)
/* we sample OSCR and convert desired delta to usec (1/4 ~= 1000000/3686400) */
-#define xip_currtime() (OSCR)
-#define xip_elapsed_since(x) (signed)((OSCR - (x)) / 4)
+#define xip_currtime() readl_relaxed(OSCR)
+#define xip_elapsed_since(x) (signed)((readl_relaxed(OSCR) - (x)) / 4)
#endif /* __ARCH_SA1100_MTD_XIP_H__ */
diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
index 73e3adbc1330..44438f344dc8 100644
--- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
@@ -67,8 +67,12 @@ static int regulator_quirk_notify(struct notifier_block *nb,
{
struct device *dev = data;
struct i2c_client *client;
+ static bool done;
u32 mon;
+ if (done)
+ return 0;
+
mon = ioread32(irqc + IRQC_MONITOR);
dev_dbg(dev, "%s: %ld, IRQC_MONITOR = 0x%x\n", __func__, action, mon);
if (mon & REGULATOR_IRQ_MASK)
@@ -99,7 +103,7 @@ static int regulator_quirk_notify(struct notifier_block *nb,
remove:
dev_info(dev, "IRQ2 is not asserted, removing quirk\n");
- bus_unregister_notifier(&i2c_bus_type, nb);
+ done = true;
iounmap(irqc);
return 0;
}
diff --git a/arch/arm/mach-tegra/cpuidle-tegra114.c b/arch/arm/mach-tegra/cpuidle-tegra114.c
index d3aa9be16621..e3fbcfedf845 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra114.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra114.c
@@ -60,7 +60,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev,
return index;
}
-static void tegra114_idle_enter_freeze(struct cpuidle_device *dev,
+static void tegra114_idle_enter_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
@@ -77,7 +77,7 @@ static struct cpuidle_driver tegra_idle_driver = {
#ifdef CONFIG_PM_SLEEP
[1] = {
.enter = tegra114_idle_power_down,
- .enter_freeze = tegra114_idle_enter_freeze,
+ .enter_s2idle = tegra114_idle_enter_s2idle,
.exit_latency = 500,
.target_residency = 1000,
.flags = CPUIDLE_FLAG_TIMER_STOP,
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 28083ef72819..71a34e8c345a 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -133,6 +133,7 @@ static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler)
static struct arm_pmu_platdata db8500_pmu_platdata = {
.handle_irq = db8500_pmu_handler,
+ .irq_flags = IRQF_NOBALANCING | IRQF_NO_THREAD,
};
static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
diff --git a/arch/arm/mach-w90x900/clock.c b/arch/arm/mach-w90x900/clock.c
index ac6fd1a2cb59..3f93fac98d97 100644
--- a/arch/arm/mach-w90x900/clock.c
+++ b/arch/arm/mach-w90x900/clock.c
@@ -93,3 +93,32 @@ void nuc900_subclk_enable(struct clk *clk, int enable)
__raw_writel(clken, W90X900_VA_CLKPWR + SUBCLK);
}
+
+/* dummy functions, should not be called */
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ WARN_ON(clk);
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ WARN_ON(clk);
+ return NULL;
+}
+EXPORT_SYMBOL(clk_get_parent);
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
index 90ee354d803e..6db5fc26d154 100644
--- a/arch/arm/mm/dma-mapping-nommu.c
+++ b/arch/arm/mm/dma-mapping-nommu.c
@@ -40,9 +40,21 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
{
const struct dma_map_ops *ops = &dma_noop_ops;
+ void *ret;
/*
- * We are here because:
+ * Try generic allocator first if we are advertised that
+ * consistency is not required.
+ */
+
+ if (attrs & DMA_ATTR_NON_CONSISTENT)
+ return ops->alloc(dev, size, dma_handle, gfp, attrs);
+
+ ret = dma_alloc_from_global_coherent(size, dma_handle);
+
+ /*
+ * dma_alloc_from_global_coherent() may fail because:
+ *
* - no consistent DMA region has been defined, so we can't
* continue.
* - there is no space left in consistent DMA region, so we
@@ -50,11 +62,8 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
* advertised that consistency is not required.
*/
- if (attrs & DMA_ATTR_NON_CONSISTENT)
- return ops->alloc(dev, size, dma_handle, gfp, attrs);
-
- WARN_ON_ONCE(1);
- return NULL;
+ WARN_ON_ONCE(ret == NULL);
+ return ret;
}
static void arm_nommu_dma_free(struct device *dev, size_t size,
@@ -63,14 +72,31 @@ static void arm_nommu_dma_free(struct device *dev, size_t size,
{
const struct dma_map_ops *ops = &dma_noop_ops;
- if (attrs & DMA_ATTR_NON_CONSISTENT)
+ if (attrs & DMA_ATTR_NON_CONSISTENT) {
ops->free(dev, size, cpu_addr, dma_addr, attrs);
- else
- WARN_ON_ONCE(1);
+ } else {
+ int ret = dma_release_from_global_coherent(get_order(size),
+ cpu_addr);
+
+ WARN_ON_ONCE(ret == 0);
+ }
return;
}
+static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ int ret;
+
+ if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
+ return ret;
+
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+
static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
@@ -173,6 +199,7 @@ static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist
const struct dma_map_ops arm_nommu_dma_ops = {
.alloc = arm_nommu_dma_alloc,
.free = arm_nommu_dma_free,
+ .mmap = arm_nommu_dma_mmap,
.map_page = arm_nommu_dma_map_page,
.unmap_page = arm_nommu_dma_unmap_page,
.map_sg = arm_nommu_dma_map_sg,
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e7380bafbfa6..fcf1473d6fed 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -851,7 +851,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long pfn = dma_to_pfn(dev, dma_addr);
unsigned long off = vma->vm_pgoff;
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index d5b9fa19b684..c199990e12b6 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1,6 +1,7 @@
/*
- * Just-In-Time compiler for BPF filters on 32bit ARM
+ * Just-In-Time compiler for eBPF filters on 32bit ARM
*
+ * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
* Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -8,6 +9,7 @@
* Free Software Foundation; version 2 of the License.
*/
+#include <linux/bpf.h>
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/errno.h>
@@ -18,54 +20,101 @@
#include <linux/if_vlan.h>
#include <asm/cacheflush.h>
-#include <asm/set_memory.h>
#include <asm/hwcap.h>
#include <asm/opcodes.h>
#include "bpf_jit_32.h"
+int bpf_jit_enable __read_mostly;
+
+#define STACK_OFFSET(k) (k)
+#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */
+#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */
+#define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */
+
+/* Flags used for JIT optimization */
+#define SEEN_CALL (1 << 0)
+
+#define FLAG_IMM_OVERFLOW (1 << 0)
+
/*
- * ABI:
+ * Map eBPF registers to ARM 32bit registers or stack scratch space.
+ *
+ * 1. First argument is passed using the arm 32bit registers and rest of the
+ * arguments are passed on stack scratch space.
+ * 2. First callee-saved arugument is mapped to arm 32 bit registers and rest
+ * arguments are mapped to scratch space on stack.
+ * 3. We need two 64 bit temp registers to do complex operations on eBPF
+ * registers.
+ *
+ * As the eBPF registers are all 64 bit registers and arm has only 32 bit
+ * registers, we have to map each eBPF registers with two arm 32 bit regs or
+ * scratch memory space and we have to build eBPF 64 bit register from those.
*
- * r0 scratch register
- * r4 BPF register A
- * r5 BPF register X
- * r6 pointer to the skb
- * r7 skb->data
- * r8 skb_headlen(skb)
*/
+static const u8 bpf2a32[][2] = {
+ /* return value from in-kernel function, and exit value from eBPF */
+ [BPF_REG_0] = {ARM_R1, ARM_R0},
+ /* arguments from eBPF program to in-kernel function */
+ [BPF_REG_1] = {ARM_R3, ARM_R2},
+ /* Stored on stack scratch space */
+ [BPF_REG_2] = {STACK_OFFSET(0), STACK_OFFSET(4)},
+ [BPF_REG_3] = {STACK_OFFSET(8), STACK_OFFSET(12)},
+ [BPF_REG_4] = {STACK_OFFSET(16), STACK_OFFSET(20)},
+ [BPF_REG_5] = {STACK_OFFSET(24), STACK_OFFSET(28)},
+ /* callee saved registers that in-kernel function will preserve */
+ [BPF_REG_6] = {ARM_R5, ARM_R4},
+ /* Stored on stack scratch space */
+ [BPF_REG_7] = {STACK_OFFSET(32), STACK_OFFSET(36)},
+ [BPF_REG_8] = {STACK_OFFSET(40), STACK_OFFSET(44)},
+ [BPF_REG_9] = {STACK_OFFSET(48), STACK_OFFSET(52)},
+ /* Read only Frame Pointer to access Stack */
+ [BPF_REG_FP] = {STACK_OFFSET(56), STACK_OFFSET(60)},
+ /* Temporary Register for internal BPF JIT, can be used
+ * for constant blindings and others.
+ */
+ [TMP_REG_1] = {ARM_R7, ARM_R6},
+ [TMP_REG_2] = {ARM_R10, ARM_R8},
+ /* Tail call count. Stored on stack scratch space. */
+ [TCALL_CNT] = {STACK_OFFSET(64), STACK_OFFSET(68)},
+ /* temporary register for blinding constants.
+ * Stored on stack scratch space.
+ */
+ [BPF_REG_AX] = {STACK_OFFSET(72), STACK_OFFSET(76)},
+};
-#define r_scratch ARM_R0
-/* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
-#define r_off ARM_R1
-#define r_A ARM_R4
-#define r_X ARM_R5
-#define r_skb ARM_R6
-#define r_skb_data ARM_R7
-#define r_skb_hl ARM_R8
-
-#define SCRATCH_SP_OFFSET 0
-#define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k))
-
-#define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
-#define SEEN_MEM_WORD(k) (1 << (k))
-#define SEEN_X (1 << BPF_MEMWORDS)
-#define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
-#define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
-#define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
+#define dst_lo dst[1]
+#define dst_hi dst[0]
+#define src_lo src[1]
+#define src_hi src[0]
-#define FLAG_NEED_X_RESET (1 << 0)
-#define FLAG_IMM_OVERFLOW (1 << 1)
+/*
+ * JIT Context:
+ *
+ * prog : bpf_prog
+ * idx : index of current last JITed instruction.
+ * prologue_bytes : bytes used in prologue.
+ * epilogue_offset : offset of epilogue starting.
+ * seen : bit mask used for JIT optimization.
+ * offsets : array of eBPF instruction offsets in
+ * JITed code.
+ * target : final JITed code.
+ * epilogue_bytes : no of bytes used in epilogue.
+ * imm_count : no of immediate counts used for global
+ * variables.
+ * imms : array of global variable addresses.
+ */
struct jit_ctx {
- const struct bpf_prog *skf;
- unsigned idx;
- unsigned prologue_bytes;
- int ret0_fp_idx;
+ const struct bpf_prog *prog;
+ unsigned int idx;
+ unsigned int prologue_bytes;
+ unsigned int epilogue_offset;
u32 seen;
u32 flags;
u32 *offsets;
u32 *target;
+ u32 stack_size;
#if __LINUX_ARM_ARCH__ < 7
u16 epilogue_bytes;
u16 imm_count;
@@ -73,68 +122,16 @@ struct jit_ctx {
#endif
};
-int bpf_jit_enable __read_mostly;
-
-static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
- unsigned int size)
-{
- void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
-
- if (!ptr)
- return -EFAULT;
- memcpy(ret, ptr, size);
- return 0;
-}
-
-static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
-{
- u8 ret;
- int err;
-
- if (offset < 0)
- err = call_neg_helper(skb, offset, &ret, 1);
- else
- err = skb_copy_bits(skb, offset, &ret, 1);
-
- return (u64)err << 32 | ret;
-}
-
-static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
-{
- u16 ret;
- int err;
-
- if (offset < 0)
- err = call_neg_helper(skb, offset, &ret, 2);
- else
- err = skb_copy_bits(skb, offset, &ret, 2);
-
- return (u64)err << 32 | ntohs(ret);
-}
-
-static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
-{
- u32 ret;
- int err;
-
- if (offset < 0)
- err = call_neg_helper(skb, offset, &ret, 4);
- else
- err = skb_copy_bits(skb, offset, &ret, 4);
-
- return (u64)err << 32 | ntohl(ret);
-}
-
/*
* Wrappers which handle both OABI and EABI and assures Thumb2 interworking
* (where the assembly routines like __aeabi_uidiv could cause problems).
*/
-static u32 jit_udiv(u32 dividend, u32 divisor)
+static u32 jit_udiv32(u32 dividend, u32 divisor)
{
return dividend / divisor;
}
-static u32 jit_mod(u32 dividend, u32 divisor)
+static u32 jit_mod32(u32 dividend, u32 divisor)
{
return dividend % divisor;
}
@@ -158,36 +155,22 @@ static inline void emit(u32 inst, struct jit_ctx *ctx)
_emit(ARM_COND_AL, inst, ctx);
}
-static u16 saved_regs(struct jit_ctx *ctx)
+/*
+ * Checks if immediate value can be converted to imm12(12 bits) value.
+ */
+static int16_t imm8m(u32 x)
{
- u16 ret = 0;
-
- if ((ctx->skf->len > 1) ||
- (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
- ret |= 1 << r_A;
-
-#ifdef CONFIG_FRAME_POINTER
- ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
-#else
- if (ctx->seen & SEEN_CALL)
- ret |= 1 << ARM_LR;
-#endif
- if (ctx->seen & (SEEN_DATA | SEEN_SKB))
- ret |= 1 << r_skb;
- if (ctx->seen & SEEN_DATA)
- ret |= (1 << r_skb_data) | (1 << r_skb_hl);
- if (ctx->seen & SEEN_X)
- ret |= 1 << r_X;
-
- return ret;
-}
+ u32 rot;
-static inline int mem_words_used(struct jit_ctx *ctx)
-{
- /* yes, we do waste some stack space IF there are "holes" in the set" */
- return fls(ctx->seen & SEEN_MEM);
+ for (rot = 0; rot < 16; rot++)
+ if ((x & ~ror32(0xff, 2 * rot)) == 0)
+ return rol32(x, 2 * rot) | (rot << 8);
+ return -1;
}
+/*
+ * Initializes the JIT space with undefined instructions.
+ */
static void jit_fill_hole(void *area, unsigned int size)
{
u32 *ptr;
@@ -196,88 +179,34 @@ static void jit_fill_hole(void *area, unsigned int size)
*ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
}
-static void build_prologue(struct jit_ctx *ctx)
-{
- u16 reg_set = saved_regs(ctx);
- u16 off;
-
-#ifdef CONFIG_FRAME_POINTER
- emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
- emit(ARM_PUSH(reg_set), ctx);
- emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
-#else
- if (reg_set)
- emit(ARM_PUSH(reg_set), ctx);
-#endif
-
- if (ctx->seen & (SEEN_DATA | SEEN_SKB))
- emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
-
- if (ctx->seen & SEEN_DATA) {
- off = offsetof(struct sk_buff, data);
- emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
- /* headlen = len - data_len */
- off = offsetof(struct sk_buff, len);
- emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
- off = offsetof(struct sk_buff, data_len);
- emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
- emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
- }
-
- if (ctx->flags & FLAG_NEED_X_RESET)
- emit(ARM_MOV_I(r_X, 0), ctx);
-
- /* do not leak kernel data to userspace */
- if (bpf_needs_clear_a(&ctx->skf->insns[0]))
- emit(ARM_MOV_I(r_A, 0), ctx);
-
- /* stack space for the BPF_MEM words */
- if (ctx->seen & SEEN_MEM)
- emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
-}
-
-static void build_epilogue(struct jit_ctx *ctx)
-{
- u16 reg_set = saved_regs(ctx);
-
- if (ctx->seen & SEEN_MEM)
- emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
-
- reg_set &= ~(1 << ARM_LR);
+/* Stack must be multiples of 16 Bytes */
+#define STACK_ALIGN(sz) (((sz) + 3) & ~3)
-#ifdef CONFIG_FRAME_POINTER
- /* the first instruction of the prologue was: mov ip, sp */
- reg_set &= ~(1 << ARM_IP);
- reg_set |= (1 << ARM_SP);
- emit(ARM_LDM(ARM_SP, reg_set), ctx);
-#else
- if (reg_set) {
- if (ctx->seen & SEEN_CALL)
- reg_set |= 1 << ARM_PC;
- emit(ARM_POP(reg_set), ctx);
- }
+/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
+ * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
+ * BPF_REG_FP and Tail call counts.
+ */
+#define SCRATCH_SIZE 80
- if (!(ctx->seen & SEEN_CALL))
- emit(ARM_BX(ARM_LR), ctx);
-#endif
-}
+/* total stack size used in JITed code */
+#define _STACK_SIZE \
+ (ctx->prog->aux->stack_depth + \
+ + SCRATCH_SIZE + \
+ + 4 /* extra for skb_copy_bits buffer */)
-static int16_t imm8m(u32 x)
-{
- u32 rot;
+#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
- for (rot = 0; rot < 16; rot++)
- if ((x & ~ror32(0xff, 2 * rot)) == 0)
- return rol32(x, 2 * rot) | (rot << 8);
+/* Get the offset of eBPF REGISTERs stored on scratch space. */
+#define STACK_VAR(off) (STACK_SIZE-off-4)
- return -1;
-}
+/* Offset of skb_copy_bits buffer */
+#define SKB_BUFFER STACK_VAR(SCRATCH_SIZE)
#if __LINUX_ARM_ARCH__ < 7
static u16 imm_offset(u32 k, struct jit_ctx *ctx)
{
- unsigned i = 0, offset;
+ unsigned int i = 0, offset;
u16 imm;
/* on the "fake" run we just count them (duplicates included) */
@@ -296,7 +225,7 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx)
ctx->imms[i] = k;
/* constants go just after the epilogue */
- offset = ctx->offsets[ctx->skf->len];
+ offset = ctx->offsets[ctx->prog->len - 1] * 4;
offset += ctx->prologue_bytes;
offset += ctx->epilogue_bytes;
offset += i * 4;
@@ -320,10 +249,22 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx)
#endif /* __LINUX_ARM_ARCH__ */
+static inline int bpf2a32_offset(int bpf_to, int bpf_from,
+ const struct jit_ctx *ctx) {
+ int to, from;
+
+ if (ctx->target == NULL)
+ return 0;
+ to = ctx->offsets[bpf_to];
+ from = ctx->offsets[bpf_from];
+
+ return to - from - 1;
+}
+
/*
* Move an immediate that's not an imm8m to a core register.
*/
-static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
+static inline void emit_mov_i_no8m(const u8 rd, u32 val, struct jit_ctx *ctx)
{
#if __LINUX_ARM_ARCH__ < 7
emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
@@ -334,7 +275,7 @@ static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
#endif
}
-static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
+static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
{
int imm12 = imm8m(val);
@@ -344,676 +285,1594 @@ static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
emit_mov_i_no8m(rd, val, ctx);
}
-#if __LINUX_ARM_ARCH__ < 6
-
-static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
+static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
{
- _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
- _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
- _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
- _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
- _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
- _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
- _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
- _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
+ ctx->seen |= SEEN_CALL;
+#if __LINUX_ARM_ARCH__ < 5
+ emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
+
+ if (elf_hwcap & HWCAP_THUMB)
+ emit(ARM_BX(tgt_reg), ctx);
+ else
+ emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
+#else
+ emit(ARM_BLX_R(tgt_reg), ctx);
+#endif
}
-static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
+static inline int epilogue_offset(const struct jit_ctx *ctx)
{
- _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
- _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
- _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
+ int to, from;
+ /* No need for 1st dummy run */
+ if (ctx->target == NULL)
+ return 0;
+ to = ctx->epilogue_offset;
+ from = ctx->idx;
+
+ return to - from - 2;
}
-static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
+static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
{
- /* r_dst = (r_src << 8) | (r_src >> 8) */
- emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
- emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ s32 jmp_offset;
+
+ /* checks if divisor is zero or not. If it is, then
+ * exit directly.
+ */
+ emit(ARM_CMP_I(rn, 0), ctx);
+ _emit(ARM_COND_EQ, ARM_MOV_I(ARM_R0, 0), ctx);
+ jmp_offset = epilogue_offset(ctx);
+ _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
+#if __LINUX_ARM_ARCH__ == 7
+ if (elf_hwcap & HWCAP_IDIVA) {
+ if (op == BPF_DIV)
+ emit(ARM_UDIV(rd, rm, rn), ctx);
+ else {
+ emit(ARM_UDIV(ARM_IP, rm, rn), ctx);
+ emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx);
+ }
+ return;
+ }
+#endif
/*
- * we need to mask out the bits set in r_dst[23:16] due to
- * the first shift instruction.
- *
- * note that 0x8ff is the encoded immediate 0x00ff0000.
+ * For BPF_ALU | BPF_DIV | BPF_K instructions
+ * As ARM_R1 and ARM_R0 contains 1st argument of bpf
+ * function, we need to save it on caller side to save
+ * it from getting destroyed within callee.
+ * After the return from the callee, we restore ARM_R0
+ * ARM_R1.
*/
- emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
-}
+ if (rn != ARM_R1) {
+ emit(ARM_MOV_R(tmp[0], ARM_R1), ctx);
+ emit(ARM_MOV_R(ARM_R1, rn), ctx);
+ }
+ if (rm != ARM_R0) {
+ emit(ARM_MOV_R(tmp[1], ARM_R0), ctx);
+ emit(ARM_MOV_R(ARM_R0, rm), ctx);
+ }
-#else /* ARMv6+ */
+ /* Call appropriate function */
+ ctx->seen |= SEEN_CALL;
+ emit_mov_i(ARM_IP, op == BPF_DIV ?
+ (u32)jit_udiv32 : (u32)jit_mod32, ctx);
+ emit_blx_r(ARM_IP, ctx);
-static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
-{
- _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
-#ifdef __LITTLE_ENDIAN
- _emit(cond, ARM_REV(r_res, r_res), ctx);
-#endif
+ /* Save return value */
+ if (rd != ARM_R0)
+ emit(ARM_MOV_R(rd, ARM_R0), ctx);
+
+ /* Restore ARM_R0 and ARM_R1 */
+ if (rn != ARM_R1)
+ emit(ARM_MOV_R(ARM_R1, tmp[0]), ctx);
+ if (rm != ARM_R0)
+ emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx);
}
-static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
+/* Checks whether BPF register is on scratch stack space or not. */
+static inline bool is_on_stack(u8 bpf_reg)
{
- _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
-#ifdef __LITTLE_ENDIAN
- _emit(cond, ARM_REV16(r_res, r_res), ctx);
-#endif
+ static u8 stack_regs[] = {BPF_REG_AX, BPF_REG_3, BPF_REG_4, BPF_REG_5,
+ BPF_REG_7, BPF_REG_8, BPF_REG_9, TCALL_CNT,
+ BPF_REG_2, BPF_REG_FP};
+ int i, reg_len = sizeof(stack_regs);
+
+ for (i = 0 ; i < reg_len ; i++) {
+ if (bpf_reg == stack_regs[i])
+ return true;
+ }
+ return false;
}
-static inline void emit_swap16(u8 r_dst __maybe_unused,
- u8 r_src __maybe_unused,
- struct jit_ctx *ctx __maybe_unused)
+static inline void emit_a32_mov_i(const u8 dst, const u32 val,
+ bool dstk, struct jit_ctx *ctx)
{
-#ifdef __LITTLE_ENDIAN
- emit(ARM_REV16(r_dst, r_src), ctx);
-#endif
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+
+ if (dstk) {
+ emit_mov_i(tmp[1], val, ctx);
+ emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(dst)), ctx);
+ } else {
+ emit_mov_i(dst, val, ctx);
+ }
}
-#endif /* __LINUX_ARM_ARCH__ < 6 */
+/* Sign extended move */
+static inline void emit_a32_mov_i64(const bool is64, const u8 dst[],
+ const u32 val, bool dstk,
+ struct jit_ctx *ctx) {
+ u32 hi = 0;
+ if (is64 && (val & (1<<31)))
+ hi = (u32)~0;
+ emit_a32_mov_i(dst_lo, val, dstk, ctx);
+ emit_a32_mov_i(dst_hi, hi, dstk, ctx);
+}
-/* Compute the immediate value for a PC-relative branch. */
-static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
-{
- u32 imm;
+static inline void emit_a32_add_r(const u8 dst, const u8 src,
+ const bool is64, const bool hi,
+ struct jit_ctx *ctx) {
+ /* 64 bit :
+ * adds dst_lo, dst_lo, src_lo
+ * adc dst_hi, dst_hi, src_hi
+ * 32 bit :
+ * add dst_lo, dst_lo, src_lo
+ */
+ if (!hi && is64)
+ emit(ARM_ADDS_R(dst, dst, src), ctx);
+ else if (hi && is64)
+ emit(ARM_ADC_R(dst, dst, src), ctx);
+ else
+ emit(ARM_ADD_R(dst, dst, src), ctx);
+}
- if (ctx->target == NULL)
- return 0;
- /*
- * BPF allows only forward jumps and the offset of the target is
- * still the one computed during the first pass.
+static inline void emit_a32_sub_r(const u8 dst, const u8 src,
+ const bool is64, const bool hi,
+ struct jit_ctx *ctx) {
+ /* 64 bit :
+ * subs dst_lo, dst_lo, src_lo
+ * sbc dst_hi, dst_hi, src_hi
+ * 32 bit :
+ * sub dst_lo, dst_lo, src_lo
*/
- imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
+ if (!hi && is64)
+ emit(ARM_SUBS_R(dst, dst, src), ctx);
+ else if (hi && is64)
+ emit(ARM_SBC_R(dst, dst, src), ctx);
+ else
+ emit(ARM_SUB_R(dst, dst, src), ctx);
+}
- return imm >> 2;
+static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64,
+ const bool hi, const u8 op, struct jit_ctx *ctx){
+ switch (BPF_OP(op)) {
+ /* dst = dst + src */
+ case BPF_ADD:
+ emit_a32_add_r(dst, src, is64, hi, ctx);
+ break;
+ /* dst = dst - src */
+ case BPF_SUB:
+ emit_a32_sub_r(dst, src, is64, hi, ctx);
+ break;
+ /* dst = dst | src */
+ case BPF_OR:
+ emit(ARM_ORR_R(dst, dst, src), ctx);
+ break;
+ /* dst = dst & src */
+ case BPF_AND:
+ emit(ARM_AND_R(dst, dst, src), ctx);
+ break;
+ /* dst = dst ^ src */
+ case BPF_XOR:
+ emit(ARM_EOR_R(dst, dst, src), ctx);
+ break;
+ /* dst = dst * src */
+ case BPF_MUL:
+ emit(ARM_MUL(dst, dst, src), ctx);
+ break;
+ /* dst = dst << src */
+ case BPF_LSH:
+ emit(ARM_LSL_R(dst, dst, src), ctx);
+ break;
+ /* dst = dst >> src */
+ case BPF_RSH:
+ emit(ARM_LSR_R(dst, dst, src), ctx);
+ break;
+ /* dst = dst >> src (signed)*/
+ case BPF_ARSH:
+ emit(ARM_MOV_SR(dst, dst, SRTYPE_ASR, src), ctx);
+ break;
+ }
}
-#define OP_IMM3(op, r1, r2, imm_val, ctx) \
- do { \
- imm12 = imm8m(imm_val); \
- if (imm12 < 0) { \
- emit_mov_i_no8m(r_scratch, imm_val, ctx); \
- emit(op ## _R((r1), (r2), r_scratch), ctx); \
- } else { \
- emit(op ## _I((r1), (r2), imm12), ctx); \
- } \
- } while (0)
-
-static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
-{
- if (ctx->ret0_fp_idx >= 0) {
- _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
- /* NOP to keep the size constant between passes */
- emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
+/* ALU operation (32 bit)
+ * dst = dst (op) src
+ */
+static inline void emit_a32_alu_r(const u8 dst, const u8 src,
+ bool dstk, bool sstk,
+ struct jit_ctx *ctx, const bool is64,
+ const bool hi, const u8 op) {
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ u8 rn = sstk ? tmp[1] : src;
+
+ if (sstk)
+ emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src)), ctx);
+
+ /* ALU operation */
+ if (dstk) {
+ emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx);
+ emit_alu_r(tmp[0], rn, is64, hi, op, ctx);
+ emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx);
} else {
- _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
- _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
+ emit_alu_r(dst, rn, is64, hi, op, ctx);
}
}
-static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
-{
-#if __LINUX_ARM_ARCH__ < 5
- emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
+/* ALU operation (64 bit) */
+static inline void emit_a32_alu_r64(const bool is64, const u8 dst[],
+ const u8 src[], bool dstk,
+ bool sstk, struct jit_ctx *ctx,
+ const u8 op) {
+ emit_a32_alu_r(dst_lo, src_lo, dstk, sstk, ctx, is64, false, op);
+ if (is64)
+ emit_a32_alu_r(dst_hi, src_hi, dstk, sstk, ctx, is64, true, op);
+ else
+ emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+}
- if (elf_hwcap & HWCAP_THUMB)
- emit(ARM_BX(tgt_reg), ctx);
+/* dst = imm (4 bytes)*/
+static inline void emit_a32_mov_r(const u8 dst, const u8 src,
+ bool dstk, bool sstk,
+ struct jit_ctx *ctx) {
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ u8 rt = sstk ? tmp[0] : src;
+
+ if (sstk)
+ emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(src)), ctx);
+ if (dstk)
+ emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst)), ctx);
else
- emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
-#else
- emit(ARM_BLX_R(tgt_reg), ctx);
-#endif
+ emit(ARM_MOV_R(dst, rt), ctx);
}
-static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx,
- int bpf_op)
-{
-#if __LINUX_ARM_ARCH__ == 7
- if (elf_hwcap & HWCAP_IDIVA) {
- if (bpf_op == BPF_DIV)
- emit(ARM_UDIV(rd, rm, rn), ctx);
- else {
- emit(ARM_UDIV(ARM_R3, rm, rn), ctx);
- emit(ARM_MLS(rd, rn, ARM_R3, rm), ctx);
- }
- return;
+/* dst = src */
+static inline void emit_a32_mov_r64(const bool is64, const u8 dst[],
+ const u8 src[], bool dstk,
+ bool sstk, struct jit_ctx *ctx) {
+ emit_a32_mov_r(dst_lo, src_lo, dstk, sstk, ctx);
+ if (is64) {
+ /* complete 8 byte move */
+ emit_a32_mov_r(dst_hi, src_hi, dstk, sstk, ctx);
+ } else {
+ /* Zero out high 4 bytes */
+ emit_a32_mov_i(dst_hi, 0, dstk, ctx);
}
-#endif
+}
- /*
- * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
- * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
- * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
- * before using it as a source for ARM_R1.
- *
- * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
- * ARM_R5 (r_X) so there is no particular register overlap
- * issues.
- */
- if (rn != ARM_R1)
- emit(ARM_MOV_R(ARM_R1, rn), ctx);
- if (rm != ARM_R0)
- emit(ARM_MOV_R(ARM_R0, rm), ctx);
+/* Shift operations */
+static inline void emit_a32_alu_i(const u8 dst, const u32 val, bool dstk,
+ struct jit_ctx *ctx, const u8 op) {
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ u8 rd = dstk ? tmp[0] : dst;
+
+ if (dstk)
+ emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
+
+ /* Do shift operation */
+ switch (op) {
+ case BPF_LSH:
+ emit(ARM_LSL_I(rd, rd, val), ctx);
+ break;
+ case BPF_RSH:
+ emit(ARM_LSR_I(rd, rd, val), ctx);
+ break;
+ case BPF_NEG:
+ emit(ARM_RSB_I(rd, rd, val), ctx);
+ break;
+ }
+ if (dstk)
+ emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
+}
+
+/* dst = ~dst (64 bit) */
+static inline void emit_a32_neg64(const u8 dst[], bool dstk,
+ struct jit_ctx *ctx){
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ u8 rd = dstk ? tmp[1] : dst[1];
+ u8 rm = dstk ? tmp[0] : dst[0];
+
+ /* Setup Operand */
+ if (dstk) {
+ emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ }
+
+ /* Do Negate Operation */
+ emit(ARM_RSBS_I(rd, rd, 0), ctx);
+ emit(ARM_RSC_I(rm, rm, 0), ctx);
+
+ if (dstk) {
+ emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ }
+}
+
+/* dst = dst << src */
+static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk,
+ bool sstk, struct jit_ctx *ctx) {
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ const u8 *tmp2 = bpf2a32[TMP_REG_2];
+
+ /* Setup Operands */
+ u8 rt = sstk ? tmp2[1] : src_lo;
+ u8 rd = dstk ? tmp[1] : dst_lo;
+ u8 rm = dstk ? tmp[0] : dst_hi;
+
+ if (sstk)
+ emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
+ if (dstk) {
+ emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ }
+
+ /* Do LSH operation */
+ emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
+ emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
+ /* As we are using ARM_LR */
ctx->seen |= SEEN_CALL;
- emit_mov_i(ARM_R3, bpf_op == BPF_DIV ? (u32)jit_udiv : (u32)jit_mod,
- ctx);
- emit_blx_r(ARM_R3, ctx);
+ emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx);
+ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx);
+ emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx);
+ emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_ASL, rt), ctx);
+
+ if (dstk) {
+ emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ } else {
+ emit(ARM_MOV_R(rd, ARM_LR), ctx);
+ emit(ARM_MOV_R(rm, ARM_IP), ctx);
+ }
+}
- if (rd != ARM_R0)
- emit(ARM_MOV_R(rd, ARM_R0), ctx);
+/* dst = dst >> src (signed)*/
+static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
+ bool sstk, struct jit_ctx *ctx) {
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ /* Setup Operands */
+ u8 rt = sstk ? tmp2[1] : src_lo;
+ u8 rd = dstk ? tmp[1] : dst_lo;
+ u8 rm = dstk ? tmp[0] : dst_hi;
+
+ if (sstk)
+ emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
+ if (dstk) {
+ emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ }
+
+ /* Do the ARSH operation */
+ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
+ emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
+ /* As we are using ARM_LR */
+ ctx->seen |= SEEN_CALL;
+ emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
+ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
+ _emit(ARM_COND_MI, ARM_B(0), ctx);
+ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASR, tmp2[0]), ctx);
+ emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_ASR, rt), ctx);
+ if (dstk) {
+ emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ } else {
+ emit(ARM_MOV_R(rd, ARM_LR), ctx);
+ emit(ARM_MOV_R(rm, ARM_IP), ctx);
+ }
+}
+
+/* dst = dst >> src */
+static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
+ bool sstk, struct jit_ctx *ctx) {
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ /* Setup Operands */
+ u8 rt = sstk ? tmp2[1] : src_lo;
+ u8 rd = dstk ? tmp[1] : dst_lo;
+ u8 rm = dstk ? tmp[0] : dst_hi;
+
+ if (sstk)
+ emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
+ if (dstk) {
+ emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ }
+
+ /* Do LSH operation */
+ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
+ emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
+ /* As we are using ARM_LR */
+ ctx->seen |= SEEN_CALL;
+ emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
+ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
+ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx);
+ emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_LSR, rt), ctx);
+ if (dstk) {
+ emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ } else {
+ emit(ARM_MOV_R(rd, ARM_LR), ctx);
+ emit(ARM_MOV_R(rm, ARM_IP), ctx);
+ }
}
-static inline void update_on_xread(struct jit_ctx *ctx)
+/* dst = dst << val */
+static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk,
+ const u32 val, struct jit_ctx *ctx){
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ /* Setup operands */
+ u8 rd = dstk ? tmp[1] : dst_lo;
+ u8 rm = dstk ? tmp[0] : dst_hi;
+
+ if (dstk) {
+ emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ }
+
+ /* Do LSH operation */
+ if (val < 32) {
+ emit(ARM_MOV_SI(tmp2[0], rm, SRTYPE_ASL, val), ctx);
+ emit(ARM_ORR_SI(rm, tmp2[0], rd, SRTYPE_LSR, 32 - val), ctx);
+ emit(ARM_MOV_SI(rd, rd, SRTYPE_ASL, val), ctx);
+ } else {
+ if (val == 32)
+ emit(ARM_MOV_R(rm, rd), ctx);
+ else
+ emit(ARM_MOV_SI(rm, rd, SRTYPE_ASL, val - 32), ctx);
+ emit(ARM_EOR_R(rd, rd, rd), ctx);
+ }
+
+ if (dstk) {
+ emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ }
+}
+
+/* dst = dst >> val */
+static inline void emit_a32_lsr_i64(const u8 dst[], bool dstk,
+ const u32 val, struct jit_ctx *ctx) {
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ /* Setup operands */
+ u8 rd = dstk ? tmp[1] : dst_lo;
+ u8 rm = dstk ? tmp[0] : dst_hi;
+
+ if (dstk) {
+ emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ }
+
+ /* Do LSR operation */
+ if (val < 32) {
+ emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx);
+ emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx);
+ emit(ARM_MOV_SI(rm, rm, SRTYPE_LSR, val), ctx);
+ } else if (val == 32) {
+ emit(ARM_MOV_R(rd, rm), ctx);
+ emit(ARM_MOV_I(rm, 0), ctx);
+ } else {
+ emit(ARM_MOV_SI(rd, rm, SRTYPE_LSR, val - 32), ctx);
+ emit(ARM_MOV_I(rm, 0), ctx);
+ }
+
+ if (dstk) {
+ emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ }
+}
+
+/* dst = dst >> val (signed) */
+static inline void emit_a32_arsh_i64(const u8 dst[], bool dstk,
+ const u32 val, struct jit_ctx *ctx){
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ /* Setup operands */
+ u8 rd = dstk ? tmp[1] : dst_lo;
+ u8 rm = dstk ? tmp[0] : dst_hi;
+
+ if (dstk) {
+ emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ }
+
+ /* Do ARSH operation */
+ if (val < 32) {
+ emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx);
+ emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx);
+ emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, val), ctx);
+ } else if (val == 32) {
+ emit(ARM_MOV_R(rd, rm), ctx);
+ emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx);
+ } else {
+ emit(ARM_MOV_SI(rd, rm, SRTYPE_ASR, val - 32), ctx);
+ emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx);
+ }
+
+ if (dstk) {
+ emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ }
+}
+
+static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
+ bool sstk, struct jit_ctx *ctx) {
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ /* Setup operands for multiplication */
+ u8 rd = dstk ? tmp[1] : dst_lo;
+ u8 rm = dstk ? tmp[0] : dst_hi;
+ u8 rt = sstk ? tmp2[1] : src_lo;
+ u8 rn = sstk ? tmp2[0] : src_hi;
+
+ if (dstk) {
+ emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ }
+ if (sstk) {
+ emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx);
+ emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_hi)), ctx);
+ }
+
+ /* Do Multiplication */
+ emit(ARM_MUL(ARM_IP, rd, rn), ctx);
+ emit(ARM_MUL(ARM_LR, rm, rt), ctx);
+ /* As we are using ARM_LR */
+ ctx->seen |= SEEN_CALL;
+ emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
+
+ emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx);
+ emit(ARM_ADD_R(rm, ARM_LR, rm), ctx);
+ if (dstk) {
+ emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ } else {
+ emit(ARM_MOV_R(rd, ARM_IP), ctx);
+ }
+}
+
+/* *(size *)(dst + off) = src */
+static inline void emit_str_r(const u8 dst, const u8 src, bool dstk,
+ const s32 off, struct jit_ctx *ctx, const u8 sz){
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ u8 rd = dstk ? tmp[1] : dst;
+
+ if (dstk)
+ emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
+ if (off) {
+ emit_a32_mov_i(tmp[0], off, false, ctx);
+ emit(ARM_ADD_R(tmp[0], rd, tmp[0]), ctx);
+ rd = tmp[0];
+ }
+ switch (sz) {
+ case BPF_W:
+ /* Store a Word */
+ emit(ARM_STR_I(src, rd, 0), ctx);
+ break;
+ case BPF_H:
+ /* Store a HalfWord */
+ emit(ARM_STRH_I(src, rd, 0), ctx);
+ break;
+ case BPF_B:
+ /* Store a Byte */
+ emit(ARM_STRB_I(src, rd, 0), ctx);
+ break;
+ }
+}
+
+/* dst = *(size*)(src + off) */
+static inline void emit_ldx_r(const u8 dst, const u8 src, bool dstk,
+ const s32 off, struct jit_ctx *ctx, const u8 sz){
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ u8 rd = dstk ? tmp[1] : dst;
+ u8 rm = src;
+
+ if (off) {
+ emit_a32_mov_i(tmp[0], off, false, ctx);
+ emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
+ rm = tmp[0];
+ }
+ switch (sz) {
+ case BPF_W:
+ /* Load a Word */
+ emit(ARM_LDR_I(rd, rm, 0), ctx);
+ break;
+ case BPF_H:
+ /* Load a HalfWord */
+ emit(ARM_LDRH_I(rd, rm, 0), ctx);
+ break;
+ case BPF_B:
+ /* Load a Byte */
+ emit(ARM_LDRB_I(rd, rm, 0), ctx);
+ break;
+ }
+ if (dstk)
+ emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
+}
+
+/* Arithmatic Operation */
+static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
+ const u8 rn, struct jit_ctx *ctx, u8 op) {
+ switch (op) {
+ case BPF_JSET:
+ ctx->seen |= SEEN_CALL;
+ emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
+ emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
+ emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
+ break;
+ case BPF_JEQ:
+ case BPF_JNE:
+ case BPF_JGT:
+ case BPF_JGE:
+ case BPF_JLE:
+ case BPF_JLT:
+ emit(ARM_CMP_R(rd, rm), ctx);
+ _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
+ break;
+ case BPF_JSLE:
+ case BPF_JSGT:
+ emit(ARM_CMP_R(rn, rt), ctx);
+ emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx);
+ break;
+ case BPF_JSLT:
+ case BPF_JSGE:
+ emit(ARM_CMP_R(rt, rn), ctx);
+ emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx);
+ break;
+ }
+}
+
+static int out_offset = -1; /* initialized on the first pass of build_body() */
+static int emit_bpf_tail_call(struct jit_ctx *ctx)
{
- if (!(ctx->seen & SEEN_X))
- ctx->flags |= FLAG_NEED_X_RESET;
- ctx->seen |= SEEN_X;
+ /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
+ const u8 *r2 = bpf2a32[BPF_REG_2];
+ const u8 *r3 = bpf2a32[BPF_REG_3];
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ const u8 *tcc = bpf2a32[TCALL_CNT];
+ const int idx0 = ctx->idx;
+#define cur_offset (ctx->idx - idx0)
+#define jmp_offset (out_offset - (cur_offset))
+ u32 off, lo, hi;
+
+ /* if (index >= array->map.max_entries)
+ * goto out;
+ */
+ off = offsetof(struct bpf_array, map.max_entries);
+ /* array->map.max_entries */
+ emit_a32_mov_i(tmp[1], off, false, ctx);
+ emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
+ emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx);
+ /* index (64 bit) */
+ emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
+ /* index >= array->map.max_entries */
+ emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx);
+ _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
+
+ /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
+ * goto out;
+ * tail_call_cnt++;
+ */
+ lo = (u32)MAX_TAIL_CALL_CNT;
+ hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32);
+ emit(ARM_LDR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx);
+ emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx);
+ emit(ARM_CMP_I(tmp[0], hi), ctx);
+ _emit(ARM_COND_EQ, ARM_CMP_I(tmp[1], lo), ctx);
+ _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
+ emit(ARM_ADDS_I(tmp[1], tmp[1], 1), ctx);
+ emit(ARM_ADC_I(tmp[0], tmp[0], 0), ctx);
+ emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx);
+ emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx);
+
+ /* prog = array->ptrs[index]
+ * if (prog == NULL)
+ * goto out;
+ */
+ off = offsetof(struct bpf_array, ptrs);
+ emit_a32_mov_i(tmp[1], off, false, ctx);
+ emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
+ emit(ARM_ADD_R(tmp[1], tmp2[1], tmp[1]), ctx);
+ emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
+ emit(ARM_MOV_SI(tmp[0], tmp2[1], SRTYPE_ASL, 2), ctx);
+ emit(ARM_LDR_R(tmp[1], tmp[1], tmp[0]), ctx);
+ emit(ARM_CMP_I(tmp[1], 0), ctx);
+ _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
+
+ /* goto *(prog->bpf_func + prologue_size); */
+ off = offsetof(struct bpf_prog, bpf_func);
+ emit_a32_mov_i(tmp2[1], off, false, ctx);
+ emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx);
+ emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
+ emit(ARM_BX(tmp[1]), ctx);
+
+ /* out: */
+ if (out_offset == -1)
+ out_offset = cur_offset;
+ if (cur_offset != out_offset) {
+ pr_err_once("tail_call out_offset = %d, expected %d!\n",
+ cur_offset, out_offset);
+ return -1;
+ }
+ return 0;
+#undef cur_offset
+#undef jmp_offset
}
-static int build_body(struct jit_ctx *ctx)
+/* 0xabcd => 0xcdab */
+static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx)
{
- void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
- const struct bpf_prog *prog = ctx->skf;
- const struct sock_filter *inst;
- unsigned i, load_order, off, condt;
- int imm12;
- u32 k;
+#if __LINUX_ARM_ARCH__ < 6
+ const u8 *tmp2 = bpf2a32[TMP_REG_2];
+
+ emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
+ emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx);
+ emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
+ emit(ARM_ORR_SI(rd, tmp2[0], tmp2[1], SRTYPE_LSL, 8), ctx);
+#else /* ARMv6+ */
+ emit(ARM_REV16(rd, rn), ctx);
+#endif
+}
- for (i = 0; i < prog->len; i++) {
- u16 code;
+/* 0xabcdefgh => 0xghefcdab */
+static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx)
+{
+#if __LINUX_ARM_ARCH__ < 6
+ const u8 *tmp2 = bpf2a32[TMP_REG_2];
+
+ emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
+ emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx);
+ emit(ARM_ORR_SI(ARM_IP, tmp2[0], tmp2[1], SRTYPE_LSL, 24), ctx);
+
+ emit(ARM_MOV_SI(tmp2[1], rn, SRTYPE_LSR, 8), ctx);
+ emit(ARM_AND_I(tmp2[1], tmp2[1], 0xff), ctx);
+ emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 16), ctx);
+ emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
+ emit(ARM_MOV_SI(tmp2[0], tmp2[0], SRTYPE_LSL, 8), ctx);
+ emit(ARM_ORR_SI(tmp2[0], tmp2[0], tmp2[1], SRTYPE_LSL, 16), ctx);
+ emit(ARM_ORR_R(rd, ARM_IP, tmp2[0]), ctx);
+
+#else /* ARMv6+ */
+ emit(ARM_REV(rd, rn), ctx);
+#endif
+}
- inst = &(prog->insns[i]);
- /* K as an immediate value operand */
- k = inst->k;
- code = bpf_anc_helper(inst);
+// push the scratch stack register on top of the stack
+static inline void emit_push_r64(const u8 src[], const u8 shift,
+ struct jit_ctx *ctx)
+{
+ const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ u16 reg_set = 0;
- /* compute offsets only in the fake pass */
- if (ctx->target == NULL)
- ctx->offsets[i] = ctx->idx * 4;
+ emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(src[1]+shift)), ctx);
+ emit(ARM_LDR_I(tmp2[0], ARM_SP, STACK_VAR(src[0]+shift)), ctx);
+
+ reg_set = (1 << tmp2[1]) | (1 << tmp2[0]);
+ emit(ARM_PUSH(reg_set), ctx);
+}
+
+static void build_prologue(struct jit_ctx *ctx)
+{
+ const u8 r0 = bpf2a32[BPF_REG_0][1];
+ const u8 r2 = bpf2a32[BPF_REG_1][1];
+ const u8 r3 = bpf2a32[BPF_REG_1][0];
+ const u8 r4 = bpf2a32[BPF_REG_6][1];
+ const u8 r5 = bpf2a32[BPF_REG_6][0];
+ const u8 r6 = bpf2a32[TMP_REG_1][1];
+ const u8 r7 = bpf2a32[TMP_REG_1][0];
+ const u8 r8 = bpf2a32[TMP_REG_2][1];
+ const u8 r10 = bpf2a32[TMP_REG_2][0];
+ const u8 fplo = bpf2a32[BPF_REG_FP][1];
+ const u8 fphi = bpf2a32[BPF_REG_FP][0];
+ const u8 sp = ARM_SP;
+ const u8 *tcc = bpf2a32[TCALL_CNT];
+
+ u16 reg_set = 0;
+
+ /*
+ * eBPF prog stack layout
+ *
+ * high
+ * original ARM_SP => +-----+ eBPF prologue
+ * |FP/LR|
+ * current ARM_FP => +-----+
+ * | ... | callee saved registers
+ * eBPF fp register => +-----+ <= (BPF_FP)
+ * | ... | eBPF JIT scratch space
+ * | | eBPF prog stack
+ * +-----+
+ * |RSVD | JIT scratchpad
+ * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
+ * | |
+ * | ... | Function call stack
+ * | |
+ * +-----+
+ * low
+ */
+
+ /* Save callee saved registers. */
+ reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
+#ifdef CONFIG_FRAME_POINTER
+ reg_set |= (1<<ARM_FP) | (1<<ARM_IP) | (1<<ARM_LR) | (1<<ARM_PC);
+ emit(ARM_MOV_R(ARM_IP, sp), ctx);
+ emit(ARM_PUSH(reg_set), ctx);
+ emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
+#else
+ /* Check if call instruction exists in BPF body */
+ if (ctx->seen & SEEN_CALL)
+ reg_set |= (1<<ARM_LR);
+ emit(ARM_PUSH(reg_set), ctx);
+#endif
+ /* Save frame pointer for later */
+ emit(ARM_SUB_I(ARM_IP, sp, SCRATCH_SIZE), ctx);
+
+ ctx->stack_size = imm8m(STACK_SIZE);
+
+ /* Set up function call stack */
+ emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
- switch (code) {
- case BPF_LD | BPF_IMM:
- emit_mov_i(r_A, k, ctx);
+ /* Set up BPF prog stack base register */
+ emit_a32_mov_r(fplo, ARM_IP, true, false, ctx);
+ emit_a32_mov_i(fphi, 0, true, ctx);
+
+ /* mov r4, 0 */
+ emit(ARM_MOV_I(r4, 0), ctx);
+
+ /* Move BPF_CTX to BPF_R1 */
+ emit(ARM_MOV_R(r3, r4), ctx);
+ emit(ARM_MOV_R(r2, r0), ctx);
+ /* Initialize Tail Count */
+ emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[0])), ctx);
+ emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[1])), ctx);
+ /* end of prologue */
+}
+
+static void build_epilogue(struct jit_ctx *ctx)
+{
+ const u8 r4 = bpf2a32[BPF_REG_6][1];
+ const u8 r5 = bpf2a32[BPF_REG_6][0];
+ const u8 r6 = bpf2a32[TMP_REG_1][1];
+ const u8 r7 = bpf2a32[TMP_REG_1][0];
+ const u8 r8 = bpf2a32[TMP_REG_2][1];
+ const u8 r10 = bpf2a32[TMP_REG_2][0];
+ u16 reg_set = 0;
+
+ /* unwind function call stack */
+ emit(ARM_ADD_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
+
+ /* restore callee saved registers. */
+ reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
+#ifdef CONFIG_FRAME_POINTER
+ /* the first instruction of the prologue was: mov ip, sp */
+ reg_set |= (1<<ARM_FP) | (1<<ARM_SP) | (1<<ARM_PC);
+ emit(ARM_LDM(ARM_SP, reg_set), ctx);
+#else
+ if (ctx->seen & SEEN_CALL)
+ reg_set |= (1<<ARM_PC);
+ /* Restore callee saved registers. */
+ emit(ARM_POP(reg_set), ctx);
+ /* Return back to the callee function */
+ if (!(ctx->seen & SEEN_CALL))
+ emit(ARM_BX(ARM_LR), ctx);
+#endif
+}
+
+/*
+ * Convert an eBPF instruction to native instruction, i.e
+ * JITs an eBPF instruction.
+ * Returns :
+ * 0 - Successfully JITed an 8-byte eBPF instruction
+ * >0 - Successfully JITed a 16-byte eBPF instruction
+ * <0 - Failed to JIT.
+ */
+static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
+{
+ const u8 code = insn->code;
+ const u8 *dst = bpf2a32[insn->dst_reg];
+ const u8 *src = bpf2a32[insn->src_reg];
+ const u8 *tmp = bpf2a32[TMP_REG_1];
+ const u8 *tmp2 = bpf2a32[TMP_REG_2];
+ const s16 off = insn->off;
+ const s32 imm = insn->imm;
+ const int i = insn - ctx->prog->insnsi;
+ const bool is64 = BPF_CLASS(code) == BPF_ALU64;
+ const bool dstk = is_on_stack(insn->dst_reg);
+ const bool sstk = is_on_stack(insn->src_reg);
+ u8 rd, rt, rm, rn;
+ s32 jmp_offset;
+
+#define check_imm(bits, imm) do { \
+ if ((((imm) > 0) && ((imm) >> (bits))) || \
+ (((imm) < 0) && (~(imm) >> (bits)))) { \
+ pr_info("[%2d] imm=%d(0x%x) out of range\n", \
+ i, imm, imm); \
+ return -EINVAL; \
+ } \
+} while (0)
+#define check_imm24(imm) check_imm(24, imm)
+
+ switch (code) {
+ /* ALU operations */
+
+ /* dst = src */
+ case BPF_ALU | BPF_MOV | BPF_K:
+ case BPF_ALU | BPF_MOV | BPF_X:
+ case BPF_ALU64 | BPF_MOV | BPF_K:
+ case BPF_ALU64 | BPF_MOV | BPF_X:
+ switch (BPF_SRC(code)) {
+ case BPF_X:
+ emit_a32_mov_r64(is64, dst, src, dstk, sstk, ctx);
break;
- case BPF_LD | BPF_W | BPF_LEN:
- ctx->seen |= SEEN_SKB;
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
- emit(ARM_LDR_I(r_A, r_skb,
- offsetof(struct sk_buff, len)), ctx);
+ case BPF_K:
+ /* Sign-extend immediate value to destination reg */
+ emit_a32_mov_i64(is64, dst, imm, dstk, ctx);
break;
- case BPF_LD | BPF_MEM:
- /* A = scratch[k] */
- ctx->seen |= SEEN_MEM_WORD(k);
- emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
+ }
+ break;
+ /* dst = dst + src/imm */
+ /* dst = dst - src/imm */
+ /* dst = dst | src/imm */
+ /* dst = dst & src/imm */
+ /* dst = dst ^ src/imm */
+ /* dst = dst * src/imm */
+ /* dst = dst << src */
+ /* dst = dst >> src */
+ case BPF_ALU | BPF_ADD | BPF_K:
+ case BPF_ALU | BPF_ADD | BPF_X:
+ case BPF_ALU | BPF_SUB | BPF_K:
+ case BPF_ALU | BPF_SUB | BPF_X:
+ case BPF_ALU | BPF_OR | BPF_K:
+ case BPF_ALU | BPF_OR | BPF_X:
+ case BPF_ALU | BPF_AND | BPF_K:
+ case BPF_ALU | BPF_AND | BPF_X:
+ case BPF_ALU | BPF_XOR | BPF_K:
+ case BPF_ALU | BPF_XOR | BPF_X:
+ case BPF_ALU | BPF_MUL | BPF_K:
+ case BPF_ALU | BPF_MUL | BPF_X:
+ case BPF_ALU | BPF_LSH | BPF_X:
+ case BPF_ALU | BPF_RSH | BPF_X:
+ case BPF_ALU | BPF_ARSH | BPF_K:
+ case BPF_ALU | BPF_ARSH | BPF_X:
+ case BPF_ALU64 | BPF_ADD | BPF_K:
+ case BPF_ALU64 | BPF_ADD | BPF_X:
+ case BPF_ALU64 | BPF_SUB | BPF_K:
+ case BPF_ALU64 | BPF_SUB | BPF_X:
+ case BPF_ALU64 | BPF_OR | BPF_K:
+ case BPF_ALU64 | BPF_OR | BPF_X:
+ case BPF_ALU64 | BPF_AND | BPF_K:
+ case BPF_ALU64 | BPF_AND | BPF_X:
+ case BPF_ALU64 | BPF_XOR | BPF_K:
+ case BPF_ALU64 | BPF_XOR | BPF_X:
+ switch (BPF_SRC(code)) {
+ case BPF_X:
+ emit_a32_alu_r64(is64, dst, src, dstk, sstk,
+ ctx, BPF_OP(code));
break;
- case BPF_LD | BPF_W | BPF_ABS:
- load_order = 2;
- goto load;
- case BPF_LD | BPF_H | BPF_ABS:
- load_order = 1;
- goto load;
- case BPF_LD | BPF_B | BPF_ABS:
- load_order = 0;
-load:
- emit_mov_i(r_off, k, ctx);
-load_common:
- ctx->seen |= SEEN_DATA | SEEN_CALL;
-
- if (load_order > 0) {
- emit(ARM_SUB_I(r_scratch, r_skb_hl,
- 1 << load_order), ctx);
- emit(ARM_CMP_R(r_scratch, r_off), ctx);
- condt = ARM_COND_GE;
- } else {
- emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
- condt = ARM_COND_HI;
- }
-
- /*
- * test for negative offset, only if we are
- * currently scheduled to take the fast
- * path. this will update the flags so that
- * the slowpath instruction are ignored if the
- * offset is negative.
- *
- * for loard_order == 0 the HI condition will
- * make loads at offset 0 take the slow path too.
+ case BPF_K:
+ /* Move immediate value to the temporary register
+ * and then do the ALU operation on the temporary
+ * register as this will sign-extend the immediate
+ * value into temporary reg and then it would be
+ * safe to do the operation on it.
*/
- _emit(condt, ARM_CMP_I(r_off, 0), ctx);
-
- _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
- ctx);
-
- if (load_order == 0)
- _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
- ctx);
- else if (load_order == 1)
- emit_load_be16(condt, r_A, r_scratch, ctx);
- else if (load_order == 2)
- emit_load_be32(condt, r_A, r_scratch, ctx);
-
- _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
-
- /* the slowpath */
- emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
- emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
- /* the offset is already in R1 */
- emit_blx_r(ARM_R3, ctx);
- /* check the result of skb_copy_bits */
- emit(ARM_CMP_I(ARM_R1, 0), ctx);
- emit_err_ret(ARM_COND_NE, ctx);
- emit(ARM_MOV_R(r_A, ARM_R0), ctx);
+ emit_a32_mov_i64(is64, tmp2, imm, false, ctx);
+ emit_a32_alu_r64(is64, dst, tmp2, dstk, false,
+ ctx, BPF_OP(code));
break;
- case BPF_LD | BPF_W | BPF_IND:
- load_order = 2;
- goto load_ind;
- case BPF_LD | BPF_H | BPF_IND:
- load_order = 1;
- goto load_ind;
- case BPF_LD | BPF_B | BPF_IND:
- load_order = 0;
-load_ind:
- update_on_xread(ctx);
- OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
- goto load_common;
- case BPF_LDX | BPF_IMM:
- ctx->seen |= SEEN_X;
- emit_mov_i(r_X, k, ctx);
+ }
+ break;
+ /* dst = dst / src(imm) */
+ /* dst = dst % src(imm) */
+ case BPF_ALU | BPF_DIV | BPF_K:
+ case BPF_ALU | BPF_DIV | BPF_X:
+ case BPF_ALU | BPF_MOD | BPF_K:
+ case BPF_ALU | BPF_MOD | BPF_X:
+ rt = src_lo;
+ rd = dstk ? tmp2[1] : dst_lo;
+ if (dstk)
+ emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ switch (BPF_SRC(code)) {
+ case BPF_X:
+ rt = sstk ? tmp2[0] : rt;
+ if (sstk)
+ emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)),
+ ctx);
break;
- case BPF_LDX | BPF_W | BPF_LEN:
- ctx->seen |= SEEN_X | SEEN_SKB;
- emit(ARM_LDR_I(r_X, r_skb,
- offsetof(struct sk_buff, len)), ctx);
+ case BPF_K:
+ rt = tmp2[0];
+ emit_a32_mov_i(rt, imm, false, ctx);
break;
- case BPF_LDX | BPF_MEM:
- ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
- emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
+ }
+ emit_udivmod(rd, rd, rt, ctx, BPF_OP(code));
+ if (dstk)
+ emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+ break;
+ case BPF_ALU64 | BPF_DIV | BPF_K:
+ case BPF_ALU64 | BPF_DIV | BPF_X:
+ case BPF_ALU64 | BPF_MOD | BPF_K:
+ case BPF_ALU64 | BPF_MOD | BPF_X:
+ goto notyet;
+ /* dst = dst >> imm */
+ /* dst = dst << imm */
+ case BPF_ALU | BPF_RSH | BPF_K:
+ case BPF_ALU | BPF_LSH | BPF_K:
+ if (unlikely(imm > 31))
+ return -EINVAL;
+ if (imm)
+ emit_a32_alu_i(dst_lo, imm, dstk, ctx, BPF_OP(code));
+ emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+ break;
+ /* dst = dst << imm */
+ case BPF_ALU64 | BPF_LSH | BPF_K:
+ if (unlikely(imm > 63))
+ return -EINVAL;
+ emit_a32_lsh_i64(dst, dstk, imm, ctx);
+ break;
+ /* dst = dst >> imm */
+ case BPF_ALU64 | BPF_RSH | BPF_K:
+ if (unlikely(imm > 63))
+ return -EINVAL;
+ emit_a32_lsr_i64(dst, dstk, imm, ctx);
+ break;
+ /* dst = dst << src */
+ case BPF_ALU64 | BPF_LSH | BPF_X:
+ emit_a32_lsh_r64(dst, src, dstk, sstk, ctx);
+ break;
+ /* dst = dst >> src */
+ case BPF_ALU64 | BPF_RSH | BPF_X:
+ emit_a32_lsr_r64(dst, src, dstk, sstk, ctx);
+ break;
+ /* dst = dst >> src (signed) */
+ case BPF_ALU64 | BPF_ARSH | BPF_X:
+ emit_a32_arsh_r64(dst, src, dstk, sstk, ctx);
+ break;
+ /* dst = dst >> imm (signed) */
+ case BPF_ALU64 | BPF_ARSH | BPF_K:
+ if (unlikely(imm > 63))
+ return -EINVAL;
+ emit_a32_arsh_i64(dst, dstk, imm, ctx);
+ break;
+ /* dst = ~dst */
+ case BPF_ALU | BPF_NEG:
+ emit_a32_alu_i(dst_lo, 0, dstk, ctx, BPF_OP(code));
+ emit_a32_mov_i(dst_hi, 0, dstk, ctx);
+ break;
+ /* dst = ~dst (64 bit) */
+ case BPF_ALU64 | BPF_NEG:
+ emit_a32_neg64(dst, dstk, ctx);
+ break;
+ /* dst = dst * src/imm */
+ case BPF_ALU64 | BPF_MUL | BPF_X:
+ case BPF_ALU64 | BPF_MUL | BPF_K:
+ switch (BPF_SRC(code)) {
+ case BPF_X:
+ emit_a32_mul_r64(dst, src, dstk, sstk, ctx);
break;
- case BPF_LDX | BPF_B | BPF_MSH:
- /* x = ((*(frame + k)) & 0xf) << 2; */
- ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
- /* the interpreter should deal with the negative K */
- if ((int)k < 0)
- return -1;
- /* offset in r1: we might have to take the slow path */
- emit_mov_i(r_off, k, ctx);
- emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
-
- /* load in r0: common with the slowpath */
- _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
- ARM_R1), ctx);
- /*
- * emit_mov_i() might generate one or two instructions,
- * the same holds for emit_blx_r()
+ case BPF_K:
+ /* Move immediate value to the temporary register
+ * and then do the multiplication on it as this
+ * will sign-extend the immediate value into temp
+ * reg then it would be safe to do the operation
+ * on it.
*/
- _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
-
- emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
- /* r_off is r1 */
- emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
- emit_blx_r(ARM_R3, ctx);
- /* check the return value of skb_copy_bits */
- emit(ARM_CMP_I(ARM_R1, 0), ctx);
- emit_err_ret(ARM_COND_NE, ctx);
-
- emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
- emit(ARM_LSL_I(r_X, r_X, 2), ctx);
- break;
- case BPF_ST:
- ctx->seen |= SEEN_MEM_WORD(k);
- emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
- break;
- case BPF_STX:
- update_on_xread(ctx);
- ctx->seen |= SEEN_MEM_WORD(k);
- emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
- break;
- case BPF_ALU | BPF_ADD | BPF_K:
- /* A += K */
- OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
- break;
- case BPF_ALU | BPF_ADD | BPF_X:
- update_on_xread(ctx);
- emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
- break;
- case BPF_ALU | BPF_SUB | BPF_K:
- /* A -= K */
- OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
- break;
- case BPF_ALU | BPF_SUB | BPF_X:
- update_on_xread(ctx);
- emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
- break;
- case BPF_ALU | BPF_MUL | BPF_K:
- /* A *= K */
- emit_mov_i(r_scratch, k, ctx);
- emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
- break;
- case BPF_ALU | BPF_MUL | BPF_X:
- update_on_xread(ctx);
- emit(ARM_MUL(r_A, r_A, r_X), ctx);
+ emit_a32_mov_i64(is64, tmp2, imm, false, ctx);
+ emit_a32_mul_r64(dst, tmp2, dstk, false, ctx);
break;
- case BPF_ALU | BPF_DIV | BPF_K:
- if (k == 1)
- break;
- emit_mov_i(r_scratch, k, ctx);
- emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_DIV);
- break;
- case BPF_ALU | BPF_DIV | BPF_X:
- update_on_xread(ctx);
- emit(ARM_CMP_I(r_X, 0), ctx);
- emit_err_ret(ARM_COND_EQ, ctx);
- emit_udivmod(r_A, r_A, r_X, ctx, BPF_DIV);
+ }
+ break;
+ /* dst = htole(dst) */
+ /* dst = htobe(dst) */
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ rd = dstk ? tmp[0] : dst_hi;
+ rt = dstk ? tmp[1] : dst_lo;
+ if (dstk) {
+ emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ }
+ if (BPF_SRC(code) == BPF_FROM_LE)
+ goto emit_bswap_uxt;
+ switch (imm) {
+ case 16:
+ emit_rev16(rt, rt, ctx);
+ goto emit_bswap_uxt;
+ case 32:
+ emit_rev32(rt, rt, ctx);
+ goto emit_bswap_uxt;
+ case 64:
+ /* Because of the usage of ARM_LR */
+ ctx->seen |= SEEN_CALL;
+ emit_rev32(ARM_LR, rt, ctx);
+ emit_rev32(rt, rd, ctx);
+ emit(ARM_MOV_R(rd, ARM_LR), ctx);
break;
- case BPF_ALU | BPF_MOD | BPF_K:
- if (k == 1) {
- emit_mov_i(r_A, 0, ctx);
- break;
- }
- emit_mov_i(r_scratch, k, ctx);
- emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_MOD);
+ }
+ goto exit;
+emit_bswap_uxt:
+ switch (imm) {
+ case 16:
+ /* zero-extend 16 bits into 64 bits */
+#if __LINUX_ARM_ARCH__ < 6
+ emit_a32_mov_i(tmp2[1], 0xffff, false, ctx);
+ emit(ARM_AND_R(rt, rt, tmp2[1]), ctx);
+#else /* ARMv6+ */
+ emit(ARM_UXTH(rt, rt), ctx);
+#endif
+ emit(ARM_EOR_R(rd, rd, rd), ctx);
break;
- case BPF_ALU | BPF_MOD | BPF_X:
- update_on_xread(ctx);
- emit(ARM_CMP_I(r_X, 0), ctx);
- emit_err_ret(ARM_COND_EQ, ctx);
- emit_udivmod(r_A, r_A, r_X, ctx, BPF_MOD);
+ case 32:
+ /* zero-extend 32 bits into 64 bits */
+ emit(ARM_EOR_R(rd, rd, rd), ctx);
break;
- case BPF_ALU | BPF_OR | BPF_K:
- /* A |= K */
- OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
+ case 64:
+ /* nop */
break;
- case BPF_ALU | BPF_OR | BPF_X:
- update_on_xread(ctx);
- emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
+ }
+exit:
+ if (dstk) {
+ emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ }
+ break;
+ /* dst = imm64 */
+ case BPF_LD | BPF_IMM | BPF_DW:
+ {
+ const struct bpf_insn insn1 = insn[1];
+ u32 hi, lo = imm;
+
+ hi = insn1.imm;
+ emit_a32_mov_i(dst_lo, lo, dstk, ctx);
+ emit_a32_mov_i(dst_hi, hi, dstk, ctx);
+
+ return 1;
+ }
+ /* LDX: dst = *(size *)(src + off) */
+ case BPF_LDX | BPF_MEM | BPF_W:
+ case BPF_LDX | BPF_MEM | BPF_H:
+ case BPF_LDX | BPF_MEM | BPF_B:
+ case BPF_LDX | BPF_MEM | BPF_DW:
+ rn = sstk ? tmp2[1] : src_lo;
+ if (sstk)
+ emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
+ switch (BPF_SIZE(code)) {
+ case BPF_W:
+ /* Load a Word */
+ case BPF_H:
+ /* Load a Half-Word */
+ case BPF_B:
+ /* Load a Byte */
+ emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_SIZE(code));
+ emit_a32_mov_i(dst_hi, 0, dstk, ctx);
break;
- case BPF_ALU | BPF_XOR | BPF_K:
- /* A ^= K; */
- OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
+ case BPF_DW:
+ /* Load a double word */
+ emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_W);
+ emit_ldx_r(dst_hi, rn, dstk, off+4, ctx, BPF_W);
break;
- case BPF_ANC | SKF_AD_ALU_XOR_X:
- case BPF_ALU | BPF_XOR | BPF_X:
- /* A ^= X */
- update_on_xread(ctx);
- emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
+ }
+ break;
+ /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
+ case BPF_LD | BPF_ABS | BPF_W:
+ case BPF_LD | BPF_ABS | BPF_H:
+ case BPF_LD | BPF_ABS | BPF_B:
+ /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
+ case BPF_LD | BPF_IND | BPF_W:
+ case BPF_LD | BPF_IND | BPF_H:
+ case BPF_LD | BPF_IND | BPF_B:
+ {
+ const u8 r4 = bpf2a32[BPF_REG_6][1]; /* r4 = ptr to sk_buff */
+ const u8 r0 = bpf2a32[BPF_REG_0][1]; /*r0: struct sk_buff *skb*/
+ /* rtn value */
+ const u8 r1 = bpf2a32[BPF_REG_0][0]; /* r1: int k */
+ const u8 r2 = bpf2a32[BPF_REG_1][1]; /* r2: unsigned int size */
+ const u8 r3 = bpf2a32[BPF_REG_1][0]; /* r3: void *buffer */
+ const u8 r6 = bpf2a32[TMP_REG_1][1]; /* r6: void *(*func)(..) */
+ int size;
+
+ /* Setting up first argument */
+ emit(ARM_MOV_R(r0, r4), ctx);
+
+ /* Setting up second argument */
+ emit_a32_mov_i(r1, imm, false, ctx);
+ if (BPF_MODE(code) == BPF_IND)
+ emit_a32_alu_r(r1, src_lo, false, sstk, ctx,
+ false, false, BPF_ADD);
+
+ /* Setting up third argument */
+ switch (BPF_SIZE(code)) {
+ case BPF_W:
+ size = 4;
break;
- case BPF_ALU | BPF_AND | BPF_K:
- /* A &= K */
- OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
+ case BPF_H:
+ size = 2;
break;
- case BPF_ALU | BPF_AND | BPF_X:
- update_on_xread(ctx);
- emit(ARM_AND_R(r_A, r_A, r_X), ctx);
+ case BPF_B:
+ size = 1;
break;
- case BPF_ALU | BPF_LSH | BPF_K:
- if (unlikely(k > 31))
- return -1;
- emit(ARM_LSL_I(r_A, r_A, k), ctx);
+ default:
+ return -EINVAL;
+ }
+ emit_a32_mov_i(r2, size, false, ctx);
+
+ /* Setting up fourth argument */
+ emit(ARM_ADD_I(r3, ARM_SP, imm8m(SKB_BUFFER)), ctx);
+
+ /* Setting up function pointer to call */
+ emit_a32_mov_i(r6, (unsigned int)bpf_load_pointer, false, ctx);
+ emit_blx_r(r6, ctx);
+
+ emit(ARM_EOR_R(r1, r1, r1), ctx);
+ /* Check if return address is NULL or not.
+ * if NULL then jump to epilogue
+ * else continue to load the value from retn address
+ */
+ emit(ARM_CMP_I(r0, 0), ctx);
+ jmp_offset = epilogue_offset(ctx);
+ check_imm24(jmp_offset);
+ _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
+
+ /* Load value from the address */
+ switch (BPF_SIZE(code)) {
+ case BPF_W:
+ emit(ARM_LDR_I(r0, r0, 0), ctx);
+ emit_rev32(r0, r0, ctx);
break;
- case BPF_ALU | BPF_LSH | BPF_X:
- update_on_xread(ctx);
- emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
+ case BPF_H:
+ emit(ARM_LDRH_I(r0, r0, 0), ctx);
+ emit_rev16(r0, r0, ctx);
break;
- case BPF_ALU | BPF_RSH | BPF_K:
- if (unlikely(k > 31))
- return -1;
- if (k)
- emit(ARM_LSR_I(r_A, r_A, k), ctx);
+ case BPF_B:
+ emit(ARM_LDRB_I(r0, r0, 0), ctx);
+ /* No need to reverse */
break;
- case BPF_ALU | BPF_RSH | BPF_X:
- update_on_xread(ctx);
- emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
+ }
+ break;
+ }
+ /* ST: *(size *)(dst + off) = imm */
+ case BPF_ST | BPF_MEM | BPF_W:
+ case BPF_ST | BPF_MEM | BPF_H:
+ case BPF_ST | BPF_MEM | BPF_B:
+ case BPF_ST | BPF_MEM | BPF_DW:
+ switch (BPF_SIZE(code)) {
+ case BPF_DW:
+ /* Sign-extend immediate value into temp reg */
+ emit_a32_mov_i64(true, tmp2, imm, false, ctx);
+ emit_str_r(dst_lo, tmp2[1], dstk, off, ctx, BPF_W);
+ emit_str_r(dst_lo, tmp2[0], dstk, off+4, ctx, BPF_W);
break;
- case BPF_ALU | BPF_NEG:
- /* A = -A */
- emit(ARM_RSB_I(r_A, r_A, 0), ctx);
+ case BPF_W:
+ case BPF_H:
+ case BPF_B:
+ emit_a32_mov_i(tmp2[1], imm, false, ctx);
+ emit_str_r(dst_lo, tmp2[1], dstk, off, ctx,
+ BPF_SIZE(code));
break;
- case BPF_JMP | BPF_JA:
- /* pc += K */
- emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
+ }
+ break;
+ /* STX XADD: lock *(u32 *)(dst + off) += src */
+ case BPF_STX | BPF_XADD | BPF_W:
+ /* STX XADD: lock *(u64 *)(dst + off) += src */
+ case BPF_STX | BPF_XADD | BPF_DW:
+ goto notyet;
+ /* STX: *(size *)(dst + off) = src */
+ case BPF_STX | BPF_MEM | BPF_W:
+ case BPF_STX | BPF_MEM | BPF_H:
+ case BPF_STX | BPF_MEM | BPF_B:
+ case BPF_STX | BPF_MEM | BPF_DW:
+ {
+ u8 sz = BPF_SIZE(code);
+
+ rn = sstk ? tmp2[1] : src_lo;
+ rm = sstk ? tmp2[0] : src_hi;
+ if (sstk) {
+ emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
+ emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx);
+ }
+
+ /* Store the value */
+ if (BPF_SIZE(code) == BPF_DW) {
+ emit_str_r(dst_lo, rn, dstk, off, ctx, BPF_W);
+ emit_str_r(dst_lo, rm, dstk, off+4, ctx, BPF_W);
+ } else {
+ emit_str_r(dst_lo, rn, dstk, off, ctx, sz);
+ }
+ break;
+ }
+ /* PC += off if dst == src */
+ /* PC += off if dst > src */
+ /* PC += off if dst >= src */
+ /* PC += off if dst < src */
+ /* PC += off if dst <= src */
+ /* PC += off if dst != src */
+ /* PC += off if dst > src (signed) */
+ /* PC += off if dst >= src (signed) */
+ /* PC += off if dst < src (signed) */
+ /* PC += off if dst <= src (signed) */
+ /* PC += off if dst & src */
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ /* Setup source registers */
+ rm = sstk ? tmp2[0] : src_hi;
+ rn = sstk ? tmp2[1] : src_lo;
+ if (sstk) {
+ emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
+ emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx);
+ }
+ goto go_jmp;
+ /* PC += off if dst == imm */
+ /* PC += off if dst > imm */
+ /* PC += off if dst >= imm */
+ /* PC += off if dst < imm */
+ /* PC += off if dst <= imm */
+ /* PC += off if dst != imm */
+ /* PC += off if dst > imm (signed) */
+ /* PC += off if dst >= imm (signed) */
+ /* PC += off if dst < imm (signed) */
+ /* PC += off if dst <= imm (signed) */
+ /* PC += off if dst & imm */
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_K:
+ if (off == 0)
break;
- case BPF_JMP | BPF_JEQ | BPF_K:
- /* pc += (A == K) ? pc->jt : pc->jf */
- condt = ARM_COND_EQ;
- goto cmp_imm;
- case BPF_JMP | BPF_JGT | BPF_K:
- /* pc += (A > K) ? pc->jt : pc->jf */
- condt = ARM_COND_HI;
- goto cmp_imm;
- case BPF_JMP | BPF_JGE | BPF_K:
- /* pc += (A >= K) ? pc->jt : pc->jf */
- condt = ARM_COND_HS;
-cmp_imm:
- imm12 = imm8m(k);
- if (imm12 < 0) {
- emit_mov_i_no8m(r_scratch, k, ctx);
- emit(ARM_CMP_R(r_A, r_scratch), ctx);
- } else {
- emit(ARM_CMP_I(r_A, imm12), ctx);
- }
-cond_jump:
- if (inst->jt)
- _emit(condt, ARM_B(b_imm(i + inst->jt + 1,
- ctx)), ctx);
- if (inst->jf)
- _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
- ctx)), ctx);
+ rm = tmp2[0];
+ rn = tmp2[1];
+ /* Sign-extend immediate value */
+ emit_a32_mov_i64(true, tmp2, imm, false, ctx);
+go_jmp:
+ /* Setup destination register */
+ rd = dstk ? tmp[0] : dst_hi;
+ rt = dstk ? tmp[1] : dst_lo;
+ if (dstk) {
+ emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx);
+ emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx);
+ }
+
+ /* Check for the condition */
+ emit_ar_r(rd, rt, rm, rn, ctx, BPF_OP(code));
+
+ /* Setup JUMP instruction */
+ jmp_offset = bpf2a32_offset(i+off, i, ctx);
+ switch (BPF_OP(code)) {
+ case BPF_JNE:
+ case BPF_JSET:
+ _emit(ARM_COND_NE, ARM_B(jmp_offset), ctx);
break;
- case BPF_JMP | BPF_JEQ | BPF_X:
- /* pc += (A == X) ? pc->jt : pc->jf */
- condt = ARM_COND_EQ;
- goto cmp_x;
- case BPF_JMP | BPF_JGT | BPF_X:
- /* pc += (A > X) ? pc->jt : pc->jf */
- condt = ARM_COND_HI;
- goto cmp_x;
- case BPF_JMP | BPF_JGE | BPF_X:
- /* pc += (A >= X) ? pc->jt : pc->jf */
- condt = ARM_COND_CS;
-cmp_x:
- update_on_xread(ctx);
- emit(ARM_CMP_R(r_A, r_X), ctx);
- goto cond_jump;
- case BPF_JMP | BPF_JSET | BPF_K:
- /* pc += (A & K) ? pc->jt : pc->jf */
- condt = ARM_COND_NE;
- /* not set iff all zeroes iff Z==1 iff EQ */
-
- imm12 = imm8m(k);
- if (imm12 < 0) {
- emit_mov_i_no8m(r_scratch, k, ctx);
- emit(ARM_TST_R(r_A, r_scratch), ctx);
- } else {
- emit(ARM_TST_I(r_A, imm12), ctx);
- }
- goto cond_jump;
- case BPF_JMP | BPF_JSET | BPF_X:
- /* pc += (A & X) ? pc->jt : pc->jf */
- update_on_xread(ctx);
- condt = ARM_COND_NE;
- emit(ARM_TST_R(r_A, r_X), ctx);
- goto cond_jump;
- case BPF_RET | BPF_A:
- emit(ARM_MOV_R(ARM_R0, r_A), ctx);
- goto b_epilogue;
- case BPF_RET | BPF_K:
- if ((k == 0) && (ctx->ret0_fp_idx < 0))
- ctx->ret0_fp_idx = i;
- emit_mov_i(ARM_R0, k, ctx);
-b_epilogue:
- if (i != ctx->skf->len - 1)
- emit(ARM_B(b_imm(prog->len, ctx)), ctx);
+ case BPF_JEQ:
+ _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
break;
- case BPF_MISC | BPF_TAX:
- /* X = A */
- ctx->seen |= SEEN_X;
- emit(ARM_MOV_R(r_X, r_A), ctx);
+ case BPF_JGT:
+ _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
break;
- case BPF_MISC | BPF_TXA:
- /* A = X */
- update_on_xread(ctx);
- emit(ARM_MOV_R(r_A, r_X), ctx);
+ case BPF_JGE:
+ _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
break;
- case BPF_ANC | SKF_AD_PROTOCOL:
- /* A = ntohs(skb->protocol) */
- ctx->seen |= SEEN_SKB;
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
- protocol) != 2);
- off = offsetof(struct sk_buff, protocol);
- emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
- emit_swap16(r_A, r_scratch, ctx);
+ case BPF_JSGT:
+ _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
break;
- case BPF_ANC | SKF_AD_CPU:
- /* r_scratch = current_thread_info() */
- OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
- /* A = current_thread_info()->cpu */
- BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
- off = offsetof(struct thread_info, cpu);
- emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
+ case BPF_JSGE:
+ _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
break;
- case BPF_ANC | SKF_AD_IFINDEX:
- case BPF_ANC | SKF_AD_HATYPE:
- /* A = skb->dev->ifindex */
- /* A = skb->dev->type */
- ctx->seen |= SEEN_SKB;
- off = offsetof(struct sk_buff, dev);
- emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
-
- emit(ARM_CMP_I(r_scratch, 0), ctx);
- emit_err_ret(ARM_COND_EQ, ctx);
-
- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
- ifindex) != 4);
- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
- type) != 2);
-
- if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
- off = offsetof(struct net_device, ifindex);
- emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
- } else {
- /*
- * offset of field "type" in "struct
- * net_device" is above what can be
- * used in the ldrh rd, [rn, #imm]
- * instruction, so load the offset in
- * a register and use ldrh rd, [rn, rm]
- */
- off = offsetof(struct net_device, type);
- emit_mov_i(ARM_R3, off, ctx);
- emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx);
- }
+ case BPF_JLE:
+ _emit(ARM_COND_LS, ARM_B(jmp_offset), ctx);
break;
- case BPF_ANC | SKF_AD_MARK:
- ctx->seen |= SEEN_SKB;
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
- off = offsetof(struct sk_buff, mark);
- emit(ARM_LDR_I(r_A, r_skb, off), ctx);
+ case BPF_JLT:
+ _emit(ARM_COND_CC, ARM_B(jmp_offset), ctx);
break;
- case BPF_ANC | SKF_AD_RXHASH:
- ctx->seen |= SEEN_SKB;
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
- off = offsetof(struct sk_buff, hash);
- emit(ARM_LDR_I(r_A, r_skb, off), ctx);
+ case BPF_JSLT:
+ _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
break;
- case BPF_ANC | SKF_AD_VLAN_TAG:
- case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
- ctx->seen |= SEEN_SKB;
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
- off = offsetof(struct sk_buff, vlan_tci);
- emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
- if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
- OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
- else {
- OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
- OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
- }
+ case BPF_JSLE:
+ _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
break;
- case BPF_ANC | SKF_AD_PKTTYPE:
- ctx->seen |= SEEN_SKB;
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
- __pkt_type_offset[0]) != 1);
- off = PKT_TYPE_OFFSET();
- emit(ARM_LDRB_I(r_A, r_skb, off), ctx);
- emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx);
-#ifdef __BIG_ENDIAN_BITFIELD
- emit(ARM_LSR_I(r_A, r_A, 5), ctx);
-#endif
+ }
+ break;
+ /* JMP OFF */
+ case BPF_JMP | BPF_JA:
+ {
+ if (off == 0)
break;
- case BPF_ANC | SKF_AD_QUEUE:
- ctx->seen |= SEEN_SKB;
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
- queue_mapping) != 2);
- BUILD_BUG_ON(offsetof(struct sk_buff,
- queue_mapping) > 0xff);
- off = offsetof(struct sk_buff, queue_mapping);
- emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
+ jmp_offset = bpf2a32_offset(i+off, i, ctx);
+ check_imm24(jmp_offset);
+ emit(ARM_B(jmp_offset), ctx);
+ break;
+ }
+ /* tail call */
+ case BPF_JMP | BPF_TAIL_CALL:
+ if (emit_bpf_tail_call(ctx))
+ return -EFAULT;
+ break;
+ /* function call */
+ case BPF_JMP | BPF_CALL:
+ {
+ const u8 *r0 = bpf2a32[BPF_REG_0];
+ const u8 *r1 = bpf2a32[BPF_REG_1];
+ const u8 *r2 = bpf2a32[BPF_REG_2];
+ const u8 *r3 = bpf2a32[BPF_REG_3];
+ const u8 *r4 = bpf2a32[BPF_REG_4];
+ const u8 *r5 = bpf2a32[BPF_REG_5];
+ const u32 func = (u32)__bpf_call_base + (u32)imm;
+
+ emit_a32_mov_r64(true, r0, r1, false, false, ctx);
+ emit_a32_mov_r64(true, r1, r2, false, true, ctx);
+ emit_push_r64(r5, 0, ctx);
+ emit_push_r64(r4, 8, ctx);
+ emit_push_r64(r3, 16, ctx);
+
+ emit_a32_mov_i(tmp[1], func, false, ctx);
+ emit_blx_r(tmp[1], ctx);
+
+ emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean
+ break;
+ }
+ /* function return */
+ case BPF_JMP | BPF_EXIT:
+ /* Optimization: when last instruction is EXIT
+ * simply fallthrough to epilogue.
+ */
+ if (i == ctx->prog->len - 1)
break;
- case BPF_ANC | SKF_AD_PAY_OFFSET:
- ctx->seen |= SEEN_SKB | SEEN_CALL;
+ jmp_offset = epilogue_offset(ctx);
+ check_imm24(jmp_offset);
+ emit(ARM_B(jmp_offset), ctx);
+ break;
+notyet:
+ pr_info_once("*** NOT YET: opcode %02x ***\n", code);
+ return -EFAULT;
+ default:
+ pr_err_once("unknown opcode %02x\n", code);
+ return -EINVAL;
+ }
- emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
- emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx);
- emit_blx_r(ARM_R3, ctx);
- emit(ARM_MOV_R(r_A, ARM_R0), ctx);
- break;
- case BPF_LDX | BPF_W | BPF_ABS:
- /*
- * load a 32bit word from struct seccomp_data.
- * seccomp_check_filter() will already have checked
- * that k is 32bit aligned and lies within the
- * struct seccomp_data.
- */
- ctx->seen |= SEEN_SKB;
- emit(ARM_LDR_I(r_A, r_skb, k), ctx);
- break;
- default:
- return -1;
+ if (ctx->flags & FLAG_IMM_OVERFLOW)
+ /*
+ * this instruction generated an overflow when
+ * trying to access the literal pool, so
+ * delegate this filter to the kernel interpreter.
+ */
+ return -1;
+ return 0;
+}
+
+static int build_body(struct jit_ctx *ctx)
+{
+ const struct bpf_prog *prog = ctx->prog;
+ unsigned int i;
+
+ for (i = 0; i < prog->len; i++) {
+ const struct bpf_insn *insn = &(prog->insnsi[i]);
+ int ret;
+
+ ret = build_insn(insn, ctx);
+
+ /* It's used with loading the 64 bit immediate value. */
+ if (ret > 0) {
+ i++;
+ if (ctx->target == NULL)
+ ctx->offsets[i] = ctx->idx;
+ continue;
}
- if (ctx->flags & FLAG_IMM_OVERFLOW)
- /*
- * this instruction generated an overflow when
- * trying to access the literal pool, so
- * delegate this filter to the kernel interpreter.
- */
- return -1;
+ if (ctx->target == NULL)
+ ctx->offsets[i] = ctx->idx;
+
+ /* If unsuccesfull, return with error code */
+ if (ret)
+ return ret;
}
+ return 0;
+}
- /* compute offsets only during the first pass */
- if (ctx->target == NULL)
- ctx->offsets[i] = ctx->idx * 4;
+static int validate_code(struct jit_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < ctx->idx; i++) {
+ if (ctx->target[i] == __opcode_to_mem_arm(ARM_INST_UDF))
+ return -1;
+ }
return 0;
}
+void bpf_jit_compile(struct bpf_prog *prog)
+{
+ /* Nothing to do here. We support Internal BPF. */
+}
-void bpf_jit_compile(struct bpf_prog *fp)
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
+ struct bpf_prog *tmp, *orig_prog = prog;
struct bpf_binary_header *header;
+ bool tmp_blinded = false;
struct jit_ctx ctx;
- unsigned tmp_idx;
- unsigned alloc_size;
- u8 *target_ptr;
+ unsigned int tmp_idx;
+ unsigned int image_size;
+ u8 *image_ptr;
+ /* If BPF JIT was not enabled then we must fall back to
+ * the interpreter.
+ */
if (!bpf_jit_enable)
- return;
+ return orig_prog;
- memset(&ctx, 0, sizeof(ctx));
- ctx.skf = fp;
- ctx.ret0_fp_idx = -1;
+ /* If constant blinding was enabled and we failed during blinding
+ * then we must fall back to the interpreter. Otherwise, we save
+ * the new JITed code.
+ */
+ tmp = bpf_jit_blind_constants(prog);
- ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
- if (ctx.offsets == NULL)
- return;
+ if (IS_ERR(tmp))
+ return orig_prog;
+ if (tmp != prog) {
+ tmp_blinded = true;
+ prog = tmp;
+ }
- /* fake pass to fill in the ctx->seen */
- if (unlikely(build_body(&ctx)))
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.prog = prog;
+
+ /* Not able to allocate memory for offsets[] , then
+ * we must fall back to the interpreter
+ */
+ ctx.offsets = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
+ if (ctx.offsets == NULL) {
+ prog = orig_prog;
goto out;
+ }
+
+ /* 1) fake pass to find in the length of the JITed code,
+ * to compute ctx->offsets and other context variables
+ * needed to compute final JITed code.
+ * Also, calculate random starting pointer/start of JITed code
+ * which is prefixed by random number of fault instructions.
+ *
+ * If the first pass fails then there is no chance of it
+ * being successful in the second pass, so just fall back
+ * to the interpreter.
+ */
+ if (build_body(&ctx)) {
+ prog = orig_prog;
+ goto out_off;
+ }
tmp_idx = ctx.idx;
build_prologue(&ctx);
ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
+ ctx.epilogue_offset = ctx.idx;
+
#if __LINUX_ARM_ARCH__ < 7
tmp_idx = ctx.idx;
build_epilogue(&ctx);
@@ -1021,64 +1880,83 @@ void bpf_jit_compile(struct bpf_prog *fp)
ctx.idx += ctx.imm_count;
if (ctx.imm_count) {
- ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
- if (ctx.imms == NULL)
- goto out;
+ ctx.imms = kcalloc(ctx.imm_count, sizeof(u32), GFP_KERNEL);
+ if (ctx.imms == NULL) {
+ prog = orig_prog;
+ goto out_off;
+ }
}
#else
- /* there's nothing after the epilogue on ARMv7 */
+ /* there's nothing about the epilogue on ARMv7 */
build_epilogue(&ctx);
#endif
- alloc_size = 4 * ctx.idx;
- header = bpf_jit_binary_alloc(alloc_size, &target_ptr,
- 4, jit_fill_hole);
- if (header == NULL)
- goto out;
+ /* Now we can get the actual image size of the JITed arm code.
+ * Currently, we are not considering the THUMB-2 instructions
+ * for jit, although it can decrease the size of the image.
+ *
+ * As each arm instruction is of length 32bit, we are translating
+ * number of JITed intructions into the size required to store these
+ * JITed code.
+ */
+ image_size = sizeof(u32) * ctx.idx;
- ctx.target = (u32 *) target_ptr;
+ /* Now we know the size of the structure to make */
+ header = bpf_jit_binary_alloc(image_size, &image_ptr,
+ sizeof(u32), jit_fill_hole);
+ /* Not able to allocate memory for the structure then
+ * we must fall back to the interpretation
+ */
+ if (header == NULL) {
+ prog = orig_prog;
+ goto out_imms;
+ }
+
+ /* 2.) Actual pass to generate final JIT code */
+ ctx.target = (u32 *) image_ptr;
ctx.idx = 0;
build_prologue(&ctx);
+
+ /* If building the body of the JITed code fails somehow,
+ * we fall back to the interpretation.
+ */
if (build_body(&ctx) < 0) {
-#if __LINUX_ARM_ARCH__ < 7
- if (ctx.imm_count)
- kfree(ctx.imms);
-#endif
+ image_ptr = NULL;
bpf_jit_binary_free(header);
- goto out;
+ prog = orig_prog;
+ goto out_imms;
}
build_epilogue(&ctx);
+ /* 3.) Extra pass to validate JITed Code */
+ if (validate_code(&ctx)) {
+ image_ptr = NULL;
+ bpf_jit_binary_free(header);
+ prog = orig_prog;
+ goto out_imms;
+ }
flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
-#if __LINUX_ARM_ARCH__ < 7
- if (ctx.imm_count)
- kfree(ctx.imms);
-#endif
-
if (bpf_jit_enable > 1)
/* there are 2 passes here */
- bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
+ bpf_jit_dump(prog->len, image_size, 2, ctx.target);
set_memory_ro((unsigned long)header, header->pages);
- fp->bpf_func = (void *)ctx.target;
- fp->jited = 1;
-out:
+ prog->bpf_func = (void *)ctx.target;
+ prog->jited = 1;
+ prog->jited_len = image_size;
+
+out_imms:
+#if __LINUX_ARM_ARCH__ < 7
+ if (ctx.imm_count)
+ kfree(ctx.imms);
+#endif
+out_off:
kfree(ctx.offsets);
- return;
+out:
+ if (tmp_blinded)
+ bpf_jit_prog_release_other(prog, prog == orig_prog ?
+ tmp : orig_prog);
+ return prog;
}
-void bpf_jit_free(struct bpf_prog *fp)
-{
- unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
- struct bpf_binary_header *header = (void *)addr;
-
- if (!fp->jited)
- goto free_filter;
-
- set_memory_rw(addr, header->pages);
- bpf_jit_binary_free(header);
-
-free_filter:
- bpf_prog_unlock_free(fp);
-}
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index c46fca2972f7..d5cf5f6208aa 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -11,6 +11,7 @@
#ifndef PFILTER_OPCODES_ARM_H
#define PFILTER_OPCODES_ARM_H
+/* ARM 32bit Registers */
#define ARM_R0 0
#define ARM_R1 1
#define ARM_R2 2
@@ -22,38 +23,43 @@
#define ARM_R8 8
#define ARM_R9 9
#define ARM_R10 10
-#define ARM_FP 11
-#define ARM_IP 12
-#define ARM_SP 13
-#define ARM_LR 14
-#define ARM_PC 15
-
-#define ARM_COND_EQ 0x0
-#define ARM_COND_NE 0x1
-#define ARM_COND_CS 0x2
+#define ARM_FP 11 /* Frame Pointer */
+#define ARM_IP 12 /* Intra-procedure scratch register */
+#define ARM_SP 13 /* Stack pointer: as load/store base reg */
+#define ARM_LR 14 /* Link Register */
+#define ARM_PC 15 /* Program counter */
+
+#define ARM_COND_EQ 0x0 /* == */
+#define ARM_COND_NE 0x1 /* != */
+#define ARM_COND_CS 0x2 /* unsigned >= */
#define ARM_COND_HS ARM_COND_CS
-#define ARM_COND_CC 0x3
+#define ARM_COND_CC 0x3 /* unsigned < */
#define ARM_COND_LO ARM_COND_CC
-#define ARM_COND_MI 0x4
-#define ARM_COND_PL 0x5
-#define ARM_COND_VS 0x6
-#define ARM_COND_VC 0x7
-#define ARM_COND_HI 0x8
-#define ARM_COND_LS 0x9
-#define ARM_COND_GE 0xa
-#define ARM_COND_LT 0xb
-#define ARM_COND_GT 0xc
-#define ARM_COND_LE 0xd
-#define ARM_COND_AL 0xe
+#define ARM_COND_MI 0x4 /* < 0 */
+#define ARM_COND_PL 0x5 /* >= 0 */
+#define ARM_COND_VS 0x6 /* Signed Overflow */
+#define ARM_COND_VC 0x7 /* No Signed Overflow */
+#define ARM_COND_HI 0x8 /* unsigned > */
+#define ARM_COND_LS 0x9 /* unsigned <= */
+#define ARM_COND_GE 0xa /* Signed >= */
+#define ARM_COND_LT 0xb /* Signed < */
+#define ARM_COND_GT 0xc /* Signed > */
+#define ARM_COND_LE 0xd /* Signed <= */
+#define ARM_COND_AL 0xe /* None */
/* register shift types */
#define SRTYPE_LSL 0
#define SRTYPE_LSR 1
#define SRTYPE_ASR 2
#define SRTYPE_ROR 3
+#define SRTYPE_ASL (SRTYPE_LSL)
#define ARM_INST_ADD_R 0x00800000
+#define ARM_INST_ADDS_R 0x00900000
+#define ARM_INST_ADC_R 0x00a00000
+#define ARM_INST_ADC_I 0x02a00000
#define ARM_INST_ADD_I 0x02800000
+#define ARM_INST_ADDS_I 0x02900000
#define ARM_INST_AND_R 0x00000000
#define ARM_INST_AND_I 0x02000000
@@ -76,8 +82,10 @@
#define ARM_INST_LDRH_I 0x01d000b0
#define ARM_INST_LDRH_R 0x019000b0
#define ARM_INST_LDR_I 0x05900000
+#define ARM_INST_LDR_R 0x07900000
#define ARM_INST_LDM 0x08900000
+#define ARM_INST_LDM_IA 0x08b00000
#define ARM_INST_LSL_I 0x01a00000
#define ARM_INST_LSL_R 0x01a00010
@@ -86,6 +94,7 @@
#define ARM_INST_LSR_R 0x01a00030
#define ARM_INST_MOV_R 0x01a00000
+#define ARM_INST_MOVS_R 0x01b00000
#define ARM_INST_MOV_I 0x03a00000
#define ARM_INST_MOVW 0x03000000
#define ARM_INST_MOVT 0x03400000
@@ -96,17 +105,28 @@
#define ARM_INST_PUSH 0x092d0000
#define ARM_INST_ORR_R 0x01800000
+#define ARM_INST_ORRS_R 0x01900000
#define ARM_INST_ORR_I 0x03800000
#define ARM_INST_REV 0x06bf0f30
#define ARM_INST_REV16 0x06bf0fb0
#define ARM_INST_RSB_I 0x02600000
+#define ARM_INST_RSBS_I 0x02700000
+#define ARM_INST_RSC_I 0x02e00000
#define ARM_INST_SUB_R 0x00400000
+#define ARM_INST_SUBS_R 0x00500000
+#define ARM_INST_RSB_R 0x00600000
#define ARM_INST_SUB_I 0x02400000
+#define ARM_INST_SUBS_I 0x02500000
+#define ARM_INST_SBC_I 0x02c00000
+#define ARM_INST_SBC_R 0x00c00000
+#define ARM_INST_SBCS_R 0x00d00000
#define ARM_INST_STR_I 0x05800000
+#define ARM_INST_STRB_I 0x05c00000
+#define ARM_INST_STRH_I 0x01c000b0
#define ARM_INST_TST_R 0x01100000
#define ARM_INST_TST_I 0x03100000
@@ -117,6 +137,8 @@
#define ARM_INST_MLS 0x00600090
+#define ARM_INST_UXTH 0x06ff0070
+
/*
* Use a suitable undefined instruction to use for ARM/Thumb2 faulting.
* We need to be careful not to conflict with those used by other modules
@@ -135,9 +157,15 @@
#define _AL3_R(op, rd, rn, rm) ((op ## _R) | (rd) << 12 | (rn) << 16 | (rm))
/* immediate */
#define _AL3_I(op, rd, rn, imm) ((op ## _I) | (rd) << 12 | (rn) << 16 | (imm))
+/* register with register-shift */
+#define _AL3_SR(inst) (inst | (1 << 4))
#define ARM_ADD_R(rd, rn, rm) _AL3_R(ARM_INST_ADD, rd, rn, rm)
+#define ARM_ADDS_R(rd, rn, rm) _AL3_R(ARM_INST_ADDS, rd, rn, rm)
#define ARM_ADD_I(rd, rn, imm) _AL3_I(ARM_INST_ADD, rd, rn, imm)
+#define ARM_ADDS_I(rd, rn, imm) _AL3_I(ARM_INST_ADDS, rd, rn, imm)
+#define ARM_ADC_R(rd, rn, rm) _AL3_R(ARM_INST_ADC, rd, rn, rm)
+#define ARM_ADC_I(rd, rn, imm) _AL3_I(ARM_INST_ADC, rd, rn, imm)
#define ARM_AND_R(rd, rn, rm) _AL3_R(ARM_INST_AND, rd, rn, rm)
#define ARM_AND_I(rd, rn, imm) _AL3_I(ARM_INST_AND, rd, rn, imm)
@@ -156,7 +184,9 @@
#define ARM_EOR_I(rd, rn, imm) _AL3_I(ARM_INST_EOR, rd, rn, imm)
#define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \
- | (off))
+ | ((off) & 0xfff))
+#define ARM_LDR_R(rt, rn, rm) (ARM_INST_LDR_R | (rt) << 12 | (rn) << 16 \
+ | (rm))
#define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \
| (off))
#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | (rt) << 12 | (rn) << 16 \
@@ -167,15 +197,23 @@
| (rm))
#define ARM_LDM(rn, regs) (ARM_INST_LDM | (rn) << 16 | (regs))
+#define ARM_LDM_IA(rn, regs) (ARM_INST_LDM_IA | (rn) << 16 | (regs))
#define ARM_LSL_R(rd, rn, rm) (_AL3_R(ARM_INST_LSL, rd, 0, rn) | (rm) << 8)
#define ARM_LSL_I(rd, rn, imm) (_AL3_I(ARM_INST_LSL, rd, 0, rn) | (imm) << 7)
#define ARM_LSR_R(rd, rn, rm) (_AL3_R(ARM_INST_LSR, rd, 0, rn) | (rm) << 8)
#define ARM_LSR_I(rd, rn, imm) (_AL3_I(ARM_INST_LSR, rd, 0, rn) | (imm) << 7)
+#define ARM_ASR_R(rd, rn, rm) (_AL3_R(ARM_INST_ASR, rd, 0, rn) | (rm) << 8)
+#define ARM_ASR_I(rd, rn, imm) (_AL3_I(ARM_INST_ASR, rd, 0, rn) | (imm) << 7)
#define ARM_MOV_R(rd, rm) _AL3_R(ARM_INST_MOV, rd, 0, rm)
+#define ARM_MOVS_R(rd, rm) _AL3_R(ARM_INST_MOVS, rd, 0, rm)
#define ARM_MOV_I(rd, imm) _AL3_I(ARM_INST_MOV, rd, 0, imm)
+#define ARM_MOV_SR(rd, rm, type, rs) \
+ (_AL3_SR(ARM_MOV_R(rd, rm)) | (type) << 5 | (rs) << 8)
+#define ARM_MOV_SI(rd, rm, type, imm6) \
+ (ARM_MOV_R(rd, rm) | (type) << 5 | (imm6) << 7)
#define ARM_MOVW(rd, imm) \
(ARM_INST_MOVW | ((imm) >> 12) << 16 | (rd) << 12 | ((imm) & 0x0fff))
@@ -190,19 +228,38 @@
#define ARM_ORR_R(rd, rn, rm) _AL3_R(ARM_INST_ORR, rd, rn, rm)
#define ARM_ORR_I(rd, rn, imm) _AL3_I(ARM_INST_ORR, rd, rn, imm)
-#define ARM_ORR_S(rd, rn, rm, type, rs) \
- (ARM_ORR_R(rd, rn, rm) | (type) << 5 | (rs) << 7)
+#define ARM_ORR_SR(rd, rn, rm, type, rs) \
+ (_AL3_SR(ARM_ORR_R(rd, rn, rm)) | (type) << 5 | (rs) << 8)
+#define ARM_ORRS_R(rd, rn, rm) _AL3_R(ARM_INST_ORRS, rd, rn, rm)
+#define ARM_ORRS_SR(rd, rn, rm, type, rs) \
+ (_AL3_SR(ARM_ORRS_R(rd, rn, rm)) | (type) << 5 | (rs) << 8)
+#define ARM_ORR_SI(rd, rn, rm, type, imm6) \
+ (ARM_ORR_R(rd, rn, rm) | (type) << 5 | (imm6) << 7)
+#define ARM_ORRS_SI(rd, rn, rm, type, imm6) \
+ (ARM_ORRS_R(rd, rn, rm) | (type) << 5 | (imm6) << 7)
#define ARM_REV(rd, rm) (ARM_INST_REV | (rd) << 12 | (rm))
#define ARM_REV16(rd, rm) (ARM_INST_REV16 | (rd) << 12 | (rm))
#define ARM_RSB_I(rd, rn, imm) _AL3_I(ARM_INST_RSB, rd, rn, imm)
+#define ARM_RSBS_I(rd, rn, imm) _AL3_I(ARM_INST_RSBS, rd, rn, imm)
+#define ARM_RSC_I(rd, rn, imm) _AL3_I(ARM_INST_RSC, rd, rn, imm)
#define ARM_SUB_R(rd, rn, rm) _AL3_R(ARM_INST_SUB, rd, rn, rm)
+#define ARM_SUBS_R(rd, rn, rm) _AL3_R(ARM_INST_SUBS, rd, rn, rm)
+#define ARM_RSB_R(rd, rn, rm) _AL3_R(ARM_INST_RSB, rd, rn, rm)
+#define ARM_SBC_R(rd, rn, rm) _AL3_R(ARM_INST_SBC, rd, rn, rm)
+#define ARM_SBCS_R(rd, rn, rm) _AL3_R(ARM_INST_SBCS, rd, rn, rm)
#define ARM_SUB_I(rd, rn, imm) _AL3_I(ARM_INST_SUB, rd, rn, imm)
+#define ARM_SUBS_I(rd, rn, imm) _AL3_I(ARM_INST_SUBS, rd, rn, imm)
+#define ARM_SBC_I(rd, rn, imm) _AL3_I(ARM_INST_SBC, rd, rn, imm)
#define ARM_STR_I(rt, rn, off) (ARM_INST_STR_I | (rt) << 12 | (rn) << 16 \
- | (off))
+ | ((off) & 0xfff))
+#define ARM_STRH_I(rt, rn, off) (ARM_INST_STRH_I | (rt) << 12 | (rn) << 16 \
+ | (((off) & 0xf0) << 4) | ((off) & 0xf))
+#define ARM_STRB_I(rt, rn, off) (ARM_INST_STRB_I | (rt) << 12 | (rn) << 16 \
+ | (((off) & 0xf0) << 4) | ((off) & 0xf))
#define ARM_TST_R(rn, rm) _AL3_R(ARM_INST_TST, 0, rn, rm)
#define ARM_TST_I(rn, imm) _AL3_I(ARM_INST_TST, 0, rn, imm)
@@ -214,5 +271,6 @@
#define ARM_MLS(rd, rn, rm, ra) (ARM_INST_MLS | (rd) << 16 | (rn) | (rm) << 8 \
| (ra) << 12)
+#define ARM_UXTH(rd, rm) (ARM_INST_UXTH | (rd) << 12 | (rm))
#endif /* PFILTER_OPCODES_ARM_H */
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index dfd908630631..0df64a6a56d4 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -75,6 +75,7 @@ config ARM64
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_ARCH_VMAP_STACK
select HAVE_ARM_SMCCC
select HAVE_EBPF_JIT
select HAVE_C_RECORDMCOUNT
@@ -960,6 +961,18 @@ config ARM64_UAO
regular load/store instructions if the cpu does not implement the
feature.
+config ARM64_PMEM
+ bool "Enable support for persistent memory"
+ select ARCH_HAS_PMEM_API
+ select ARCH_HAS_UACCESS_FLUSHCACHE
+ help
+ Say Y to enable support for the persistent memory API based on the
+ ARMv8.2 DCPoP feature.
+
+ The feature is detected at runtime, and the kernel will use DC CVAC
+ operations if DC CVAP is not supported (following the behaviour of
+ DC CVAP itself if the system does not define a point of persistence).
+
endmenu
config ARM64_MODULE_CMODEL_LARGE
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
index 0d1f026d831a..6872135d7f84 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
@@ -67,14 +67,6 @@
};
};
-&emac {
- pinctrl-names = "default";
- pinctrl-0 = <&rgmii_pins>;
- phy-mode = "rgmii";
- phy-handle = <&ext_rgmii_phy>;
- status = "okay";
-};
-
&i2c1 {
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
@@ -85,13 +77,6 @@
bias-pull-up;
};
-&mdio {
- ext_rgmii_phy: ethernet-phy@1 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <1>;
- };
-};
-
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
index 24f1aac366d6..f82ccf332c0f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
@@ -48,18 +48,3 @@
/* TODO: Camera, touchscreen, etc. */
};
-
-&emac {
- pinctrl-names = "default";
- pinctrl-0 = <&rgmii_pins>;
- phy-mode = "rgmii";
- phy-handle = <&ext_rgmii_phy>;
- status = "okay";
-};
-
-&mdio {
- ext_rgmii_phy: ethernet-phy@1 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <1>;
- };
-};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index 08cda24ea194..7c533b6d4ba9 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -78,15 +78,6 @@
status = "okay";
};
-&emac {
- pinctrl-names = "default";
- pinctrl-0 = <&rmii_pins>;
- phy-mode = "rmii";
- phy-handle = <&ext_rmii_phy1>;
- status = "okay";
-
-};
-
&i2c1 {
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
@@ -97,13 +88,6 @@
bias-pull-up;
};
-&mdio {
- ext_rmii_phy1: ethernet-phy@1 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <1>;
- };
-};
-
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
index 17eb1cc5bf6b..d891a1a27f6c 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
@@ -76,21 +76,6 @@
status = "okay";
};
-&emac {
- pinctrl-names = "default";
- pinctrl-0 = <&rgmii_pins>;
- phy-mode = "rgmii";
- phy-handle = <&ext_rgmii_phy>;
- status = "okay";
-};
-
-&mdio {
- ext_rgmii_phy: ethernet-phy@1 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <1>;
- };
-};
-
&mmc2 {
pinctrl-names = "default";
pinctrl-0 = <&mmc2_pins>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 9d00622ce845..68aadc9b96dc 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -449,26 +449,6 @@
#size-cells = <0>;
};
- emac: ethernet@1c30000 {
- compatible = "allwinner,sun50i-a64-emac";
- syscon = <&syscon>;
- reg = <0x01c30000 0x100>;
- interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "macirq";
- resets = <&ccu RST_BUS_EMAC>;
- reset-names = "stmmaceth";
- clocks = <&ccu CLK_BUS_EMAC>;
- clock-names = "stmmaceth";
- status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
-
- mdio: mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- };
- };
-
gic: interrupt-controller@1c81000 {
compatible = "arm,gic-400";
reg = <0x01c81000 0x1000>,
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
index 968908761194..1c2387bd5df6 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
@@ -50,7 +50,6 @@
compatible = "friendlyarm,nanopi-neo2", "allwinner,sun50i-h5";
aliases {
- ethernet0 = &emac;
serial0 = &uart0;
};
@@ -109,22 +108,6 @@
status = "okay";
};
-&emac {
- pinctrl-names = "default";
- pinctrl-0 = <&emac_rgmii_pins>;
- phy-supply = <&reg_gmac_3v3>;
- phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
- status = "okay";
-};
-
-&mdio {
- ext_rgmii_phy: ethernet-phy@7 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <7>;
- };
-};
-
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
index a8296feee884..4f77c8470f6c 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
@@ -59,7 +59,6 @@
};
aliases {
- ethernet0 = &emac;
serial0 = &uart0;
};
@@ -137,28 +136,12 @@
status = "okay";
};
-&emac {
- pinctrl-names = "default";
- pinctrl-0 = <&emac_rgmii_pins>;
- phy-supply = <&reg_gmac_3v3>;
- phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
- status = "okay";
-};
-
&ir {
pinctrl-names = "default";
pinctrl-0 = <&ir_pins_a>;
status = "okay";
};
-&mdio {
- ext_rgmii_phy: ethernet-phy@1 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <1>;
- };
-};
-
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
index d906b302cbcd..6be06873e5af 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
@@ -54,7 +54,6 @@
compatible = "xunlong,orangepi-prime", "allwinner,sun50i-h5";
aliases {
- ethernet0 = &emac;
serial0 = &uart0;
};
@@ -144,28 +143,12 @@
status = "okay";
};
-&emac {
- pinctrl-names = "default";
- pinctrl-0 = <&emac_rgmii_pins>;
- phy-supply = <&reg_gmac_3v3>;
- phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
- status = "okay";
-};
-
&ir {
pinctrl-names = "default";
pinctrl-0 = <&ir_pins_a>;
status = "okay";
};
-&mdio {
- ext_rgmii_phy: ethernet-phy@1 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <1>;
- };
-};
-
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
index 732e2e06f503..d9a720bff05d 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
@@ -120,5 +120,8 @@
};
&pio {
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
compatible = "allwinner,sun50i-h5-pinctrl";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 35b8c88c3220..738ed689ff69 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -400,7 +400,7 @@
};
pwm_AO_ab: pwm@550 {
- compatible = "amlogic,meson-gx-pwm", "amlogic,meson-gxbb-pwm";
+ compatible = "amlogic,meson-gx-ao-pwm", "amlogic,meson-gxbb-ao-pwm";
reg = <0x0 0x00550 0x0 0x10>;
#pwm-cells = <3>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
index 72c5a9f64ca8..94567eb17875 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
@@ -109,8 +109,8 @@
status = "okay";
pinctrl-0 = <&pwm_ao_a_3_pins>, <&pwm_ao_b_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
+ clocks = <&xtal> , <&xtal>;
+ clock-names = "clkin0", "clkin1" ;
};
&pwm_ef {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
index 890821d6e52b..266fbcf3e47f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
@@ -10,12 +10,20 @@
#include <dt-bindings/input/input.h>
-#include "meson-gxl-s905x-p212.dtsi"
+#include "meson-gxl-s905x.dtsi"
/ {
compatible = "libretech,cc", "amlogic,s905x", "amlogic,meson-gxl";
model = "Libre Technology CC";
+ aliases {
+ serial0 = &uart_AO;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
cvbs-connector {
compatible = "composite-video-connector";
@@ -26,6 +34,11 @@
};
};
+ emmc_pwrseq: emmc-pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
+ };
+
hdmi-connector {
compatible = "hdmi-connector";
type = "a";
@@ -53,6 +66,39 @@
linux,default-trigger = "heartbeat";
};
};
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x80000000>;
+ };
+
+ vcc_3v3: regulator-vcc_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vcc_card: regulator-vcc-card {
+ compatible = "regulator-gpio";
+
+ regulator-name = "VCC_CARD";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>;
+ gpios-states = <0>;
+
+ states = <3300000 0>,
+ <1800000 1>;
+ };
+
+ vddio_boot: regulator-vddio_boot {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDIO_BOOT";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
};
&cvbs_vdac_port {
@@ -61,6 +107,16 @@
};
};
+&ethmac {
+ status = "okay";
+};
+
+&ir {
+ status = "okay";
+ pinctrl-0 = <&remote_input_ao_pins>;
+ pinctrl-names = "default";
+};
+
&hdmi_tx {
status = "okay";
pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
@@ -73,20 +129,43 @@
};
};
-/*
- * The following devices exists but are exposed on the general
- * purpose GPIO header. End user may well decide to use those pins
- * for another purpose
- */
+/* SD card */
+&sd_emmc_b {
+ status = "okay";
+ pinctrl-0 = <&sdcard_pins>;
+ pinctrl-names = "default";
+
+ bus-width = <4>;
+ cap-sd-highspeed;
+ max-frequency = <100000000>;
+ disable-wp;
+
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
+ cd-inverted;
-&sd_emmc_a {
- status = "disabled";
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vcc_card>;
};
-&uart_A {
- status = "disabled";
+/* eMMC */
+&sd_emmc_c {
+ status = "okay";
+ pinctrl-0 = <&emmc_pins>;
+ pinctrl-names = "default";
+
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ max-frequency = <50000000>;
+ non-removable;
+ disable-wp;
+
+ mmc-pwrseq = <&emmc_pwrseq>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vddio_boot>;
};
-&wifi32k {
- status = "disabled";
+&uart_AO {
+ status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
};
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
index e2b0da2c0bc7..105b2938082f 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
@@ -280,9 +280,6 @@
&decon {
status = "okay";
-
- i80-if-timings {
- };
};
&decon_tv {
@@ -1116,9 +1113,6 @@
&mic {
status = "okay";
-
- i80-if-timings {
- };
};
&pmu_system_controller {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 31fd77f82ced..d16b9cc1e825 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -653,21 +653,21 @@
};
msi1: msi-controller1@1571000 {
- compatible = "fsl,1s1043a-msi";
+ compatible = "fsl,ls1043a-msi";
reg = <0x0 0x1571000 0x0 0x8>;
msi-controller;
interrupts = <0 116 0x4>;
};
msi2: msi-controller2@1572000 {
- compatible = "fsl,1s1043a-msi";
+ compatible = "fsl,ls1043a-msi";
reg = <0x0 0x1572000 0x0 0x8>;
msi-controller;
interrupts = <0 126 0x4>;
};
msi3: msi-controller3@1573000 {
- compatible = "fsl,1s1043a-msi";
+ compatible = "fsl,ls1043a-msi";
reg = <0x0 0x1573000 0x0 0x8>;
msi-controller;
interrupts = <0 160 0x4>;
@@ -689,7 +689,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&msi1>;
+ msi-parent = <&msi1>, <&msi2>, <&msi3>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 110 0x4>,
@@ -714,7 +714,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&msi2>;
+ msi-parent = <&msi1>, <&msi2>, <&msi3>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 120 0x4>,
@@ -739,7 +739,7 @@
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
- msi-parent = <&msi3>;
+ msi-parent = <&msi1>, <&msi2>, <&msi3>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0000 0 0 1 &gic 0 154 0x4>,
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index dc1640be0345..c8ff0baddf1d 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -630,6 +630,37 @@
interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen 4 1>;
};
+
+ msi1: msi-controller@1580000 {
+ compatible = "fsl,ls1046a-msi";
+ msi-controller;
+ reg = <0x0 0x1580000 0x0 0x10000>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ msi2: msi-controller@1590000 {
+ compatible = "fsl,ls1046a-msi";
+ msi-controller;
+ reg = <0x0 0x1590000 0x0 0x10000>;
+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ msi3: msi-controller@15a0000 {
+ compatible = "fsl,ls1046a-msi";
+ msi-controller;
+ reg = <0x0 0x15a0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
};
reserved-memory {
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index dbcc3d4e2ed5..51763d674050 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -219,7 +219,7 @@
reg = <0x18800 0x100>, <0x18C00 0x20>;
gpiosb: gpio {
#gpio-cells = <2>;
- gpio-ranges = <&pinctrl_sb 0 0 29>;
+ gpio-ranges = <&pinctrl_sb 0 0 30>;
gpio-controller;
interrupts =
<GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 1eb1f1e9aac4..4d360713ed12 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -268,10 +268,10 @@
ap_gpio: gpio {
compatible = "marvell,armada-8k-gpio";
offset = <0x1040>;
- ngpios = <19>;
+ ngpios = <20>;
gpio-controller;
#gpio-cells = <2>;
- gpio-ranges = <&ap_pinctrl 0 0 19>;
+ gpio-ranges = <&ap_pinctrl 0 0 20>;
};
};
};
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index 726528ce54e9..4c68605675a8 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -270,6 +270,7 @@
interrupt-names = "mem", "ring0", "ring1",
"ring2", "ring3", "eip";
clocks = <&cpm_clk 1 26>;
+ dma-coherent;
};
};
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 95f8e5f607f6..923f354b02f0 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -64,7 +64,7 @@
compatible = "marvell,armada-8k-rtc";
reg = <0x284000 0x20>, <0x284080 0x24>;
reg-names = "rtc", "rtc-soc";
- interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <ICU_GRP_NSR 77 IRQ_TYPE_LEVEL_HIGH>;
};
cps_ethernet: ethernet@0 {
@@ -261,6 +261,7 @@
interrupt-names = "mem", "ring0", "ring1",
"ring2", "ring3", "eip";
clocks = <&cps_clk 1 26>;
+ dma-coherent;
/*
* The cryptographic engine found on the cp110
* master is enabled by default at the SoC
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index aef35e0b685a..f903957da504 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -45,7 +45,7 @@
stdout-path = "serial0:115200n8";
};
- audio_clkout: audio_clkout {
+ audio_clkout: audio-clkout {
/*
* This is same as <&rcar_sound 0>
* but needed to avoid cs2000/rcar_sound probe dead-lock
@@ -508,7 +508,7 @@
/* audio_clkout0/1/2/3 */
#clock-cells = <1>;
- clock-frequency = <11289600 12288000>;
+ clock-frequency = <12288000 11289600>;
status = "okay";
diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
index b5c6ee07d7f9..d1a3f3b7a0ab 100644
--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
@@ -281,7 +281,7 @@
/* audio_clkout0/1/2/3 */
#clock-cells = <1>;
- clock-frequency = <11289600 12288000>;
+ clock-frequency = <12288000 11289600>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
index cf272392cebf..b9f36dad17e6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
@@ -50,6 +50,23 @@
chosen {
stdout-path = "serial2:1500000n8";
};
+
+ vcc_phy: vcc-phy-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_phy";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&gmac2phy {
+ phy-supply = <&vcc_phy>;
+ clock_in_out = "output";
+ assigned-clocks = <&cru SCLK_MAC2PHY_SRC>;
+ assigned-clock-rate = <50000000>;
+ assigned-clocks = <&cru SCLK_MAC2PHY>;
+ assigned-clock-parents = <&cru SCLK_MAC2PHY_SRC>;
+ status = "okay";
};
&uart2 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 0be96cee27bd..d48bf5d9f8bd 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -63,6 +63,8 @@
i2c1 = &i2c1;
i2c2 = &i2c2;
i2c3 = &i2c3;
+ ethernet0 = &gmac2io;
+ ethernet1 = &gmac2phy;
};
cpus {
@@ -424,6 +426,43 @@
status = "disabled";
};
+ gmac2phy: ethernet@ff550000 {
+ compatible = "rockchip,rk3328-gmac";
+ reg = <0x0 0xff550000 0x0 0x10000>;
+ rockchip,grf = <&grf>;
+ interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clocks = <&cru SCLK_MAC2PHY_SRC>, <&cru SCLK_MAC2PHY_RXTX>,
+ <&cru SCLK_MAC2PHY_RXTX>, <&cru SCLK_MAC2PHY_REF>,
+ <&cru ACLK_MAC2PHY>, <&cru PCLK_MAC2PHY>,
+ <&cru SCLK_MAC2PHY_OUT>;
+ clock-names = "stmmaceth", "mac_clk_rx",
+ "mac_clk_tx", "clk_mac_ref",
+ "aclk_mac", "pclk_mac",
+ "clk_macphy";
+ resets = <&cru SRST_GMAC2PHY_A>, <&cru SRST_MACPHY>;
+ reset-names = "stmmaceth", "mac-phy";
+ phy-mode = "rmii";
+ phy-handle = <&phy>;
+ status = "disabled";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy: phy@0 {
+ compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ clocks = <&cru SCLK_MAC2PHY_OUT>;
+ resets = <&cru SRST_MACPHY>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&fephyled_rxm1 &fephyled_linkm1>;
+ phy-is-integrated;
+ };
+ };
+ };
+
gic: interrupt-controller@ff811000 {
compatible = "arm,gic-400";
#interrupt-cells = <3>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 6c7d147eed54..cdde4f56a281 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -203,6 +203,7 @@ CONFIG_MARVELL_PHY=m
CONFIG_MESON_GXL_PHY=m
CONFIG_MICREL_PHY=y
CONFIG_REALTEK_PHY=m
+CONFIG_ROCKCHIP_PHY=y
CONFIG_USB_PEGASUS=m
CONFIG_USB_RTL8150=m
CONFIG_USB_RTL8152=m
@@ -476,6 +477,7 @@ CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_MSM_GCC_8916=y
CONFIG_MSM_GCC_8994=y
CONFIG_MSM_MMCC_8996=y
+CONFIG_HWSPINLOCK=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_ARM_MHU=y
CONFIG_PLATFORM_MHU=y
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index d92293747d63..7ca54a76f6b9 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -18,18 +18,23 @@ config CRYPTO_SHA512_ARM64
config CRYPTO_SHA1_ARM64_CE
tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)"
- depends on ARM64 && KERNEL_MODE_NEON
+ depends on KERNEL_MODE_NEON
select CRYPTO_HASH
+ select CRYPTO_SHA1
config CRYPTO_SHA2_ARM64_CE
tristate "SHA-224/SHA-256 digest algorithm (ARMv8 Crypto Extensions)"
- depends on ARM64 && KERNEL_MODE_NEON
+ depends on KERNEL_MODE_NEON
select CRYPTO_HASH
+ select CRYPTO_SHA256_ARM64
config CRYPTO_GHASH_ARM64_CE
- tristate "GHASH (for GCM chaining mode) using ARMv8 Crypto Extensions"
- depends on ARM64 && KERNEL_MODE_NEON
+ tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions"
+ depends on KERNEL_MODE_NEON
select CRYPTO_HASH
+ select CRYPTO_GF128MUL
+ select CRYPTO_AES
+ select CRYPTO_AES_ARM64
config CRYPTO_CRCT10DIF_ARM64_CE
tristate "CRCT10DIF digest algorithm using PMULL instructions"
@@ -49,25 +54,29 @@ config CRYPTO_AES_ARM64_CE
tristate "AES core cipher using ARMv8 Crypto Extensions"
depends on ARM64 && KERNEL_MODE_NEON
select CRYPTO_ALGAPI
+ select CRYPTO_AES_ARM64
config CRYPTO_AES_ARM64_CE_CCM
tristate "AES in CCM mode using ARMv8 Crypto Extensions"
depends on ARM64 && KERNEL_MODE_NEON
select CRYPTO_ALGAPI
select CRYPTO_AES_ARM64_CE
+ select CRYPTO_AES_ARM64
select CRYPTO_AEAD
config CRYPTO_AES_ARM64_CE_BLK
tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
- depends on ARM64 && KERNEL_MODE_NEON
+ depends on KERNEL_MODE_NEON
select CRYPTO_BLKCIPHER
select CRYPTO_AES_ARM64_CE
+ select CRYPTO_AES_ARM64
select CRYPTO_SIMD
config CRYPTO_AES_ARM64_NEON_BLK
tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
- depends on ARM64 && KERNEL_MODE_NEON
+ depends on KERNEL_MODE_NEON
select CRYPTO_BLKCIPHER
+ select CRYPTO_AES_ARM64
select CRYPTO_AES
select CRYPTO_SIMD
@@ -82,6 +91,7 @@ config CRYPTO_AES_ARM64_BS
depends on KERNEL_MODE_NEON
select CRYPTO_BLKCIPHER
select CRYPTO_AES_ARM64_NEON_BLK
+ select CRYPTO_AES_ARM64
select CRYPTO_SIMD
endif
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
index 3363560c79b7..e3a375c4cb83 100644
--- a/arch/arm64/crypto/aes-ce-ccm-core.S
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -1,7 +1,7 @@
/*
* aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions
*
- * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -32,7 +32,7 @@ ENTRY(ce_aes_ccm_auth_data)
beq 8f /* out of input? */
cbnz w8, 0b
eor v0.16b, v0.16b, v1.16b
-1: ld1 {v3.16b}, [x4] /* load first round key */
+1: ld1 {v3.4s}, [x4] /* load first round key */
prfm pldl1strm, [x1]
cmp w5, #12 /* which key size? */
add x6, x4, #16
@@ -42,17 +42,17 @@ ENTRY(ce_aes_ccm_auth_data)
mov v5.16b, v3.16b
b 4f
2: mov v4.16b, v3.16b
- ld1 {v5.16b}, [x6], #16 /* load 2nd round key */
+ ld1 {v5.4s}, [x6], #16 /* load 2nd round key */
3: aese v0.16b, v4.16b
aesmc v0.16b, v0.16b
-4: ld1 {v3.16b}, [x6], #16 /* load next round key */
+4: ld1 {v3.4s}, [x6], #16 /* load next round key */
aese v0.16b, v5.16b
aesmc v0.16b, v0.16b
-5: ld1 {v4.16b}, [x6], #16 /* load next round key */
+5: ld1 {v4.4s}, [x6], #16 /* load next round key */
subs w7, w7, #3
aese v0.16b, v3.16b
aesmc v0.16b, v0.16b
- ld1 {v5.16b}, [x6], #16 /* load next round key */
+ ld1 {v5.4s}, [x6], #16 /* load next round key */
bpl 3b
aese v0.16b, v4.16b
subs w2, w2, #16 /* last data? */
@@ -90,7 +90,7 @@ ENDPROC(ce_aes_ccm_auth_data)
* u32 rounds);
*/
ENTRY(ce_aes_ccm_final)
- ld1 {v3.16b}, [x2], #16 /* load first round key */
+ ld1 {v3.4s}, [x2], #16 /* load first round key */
ld1 {v0.16b}, [x0] /* load mac */
cmp w3, #12 /* which key size? */
sub w3, w3, #2 /* modified # of rounds */
@@ -100,17 +100,17 @@ ENTRY(ce_aes_ccm_final)
mov v5.16b, v3.16b
b 2f
0: mov v4.16b, v3.16b
-1: ld1 {v5.16b}, [x2], #16 /* load next round key */
+1: ld1 {v5.4s}, [x2], #16 /* load next round key */
aese v0.16b, v4.16b
aesmc v0.16b, v0.16b
aese v1.16b, v4.16b
aesmc v1.16b, v1.16b
-2: ld1 {v3.16b}, [x2], #16 /* load next round key */
+2: ld1 {v3.4s}, [x2], #16 /* load next round key */
aese v0.16b, v5.16b
aesmc v0.16b, v0.16b
aese v1.16b, v5.16b
aesmc v1.16b, v1.16b
-3: ld1 {v4.16b}, [x2], #16 /* load next round key */
+3: ld1 {v4.4s}, [x2], #16 /* load next round key */
subs w3, w3, #3
aese v0.16b, v3.16b
aesmc v0.16b, v0.16b
@@ -137,31 +137,31 @@ CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */
cmp w4, #12 /* which key size? */
sub w7, w4, #2 /* get modified # of rounds */
ins v1.d[1], x9 /* no carry in lower ctr */
- ld1 {v3.16b}, [x3] /* load first round key */
+ ld1 {v3.4s}, [x3] /* load first round key */
add x10, x3, #16
bmi 1f
bne 4f
mov v5.16b, v3.16b
b 3f
1: mov v4.16b, v3.16b
- ld1 {v5.16b}, [x10], #16 /* load 2nd round key */
+ ld1 {v5.4s}, [x10], #16 /* load 2nd round key */
2: /* inner loop: 3 rounds, 2x interleaved */
aese v0.16b, v4.16b
aesmc v0.16b, v0.16b
aese v1.16b, v4.16b
aesmc v1.16b, v1.16b
-3: ld1 {v3.16b}, [x10], #16 /* load next round key */
+3: ld1 {v3.4s}, [x10], #16 /* load next round key */
aese v0.16b, v5.16b
aesmc v0.16b, v0.16b
aese v1.16b, v5.16b
aesmc v1.16b, v1.16b
-4: ld1 {v4.16b}, [x10], #16 /* load next round key */
+4: ld1 {v4.4s}, [x10], #16 /* load next round key */
subs w7, w7, #3
aese v0.16b, v3.16b
aesmc v0.16b, v0.16b
aese v1.16b, v3.16b
aesmc v1.16b, v1.16b
- ld1 {v5.16b}, [x10], #16 /* load next round key */
+ ld1 {v5.4s}, [x10], #16 /* load next round key */
bpl 2b
aese v0.16b, v4.16b
aese v1.16b, v4.16b
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 6a7dbc7c83a6..a1254036f2b1 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -1,7 +1,7 @@
/*
* aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions
*
- * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -9,6 +9,7 @@
*/
#include <asm/neon.h>
+#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/aes.h>
#include <crypto/scatterwalk.h>
@@ -44,6 +45,8 @@ asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[],
u32 rounds);
+asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
+
static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -103,7 +106,45 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
return 0;
}
-static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
+static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
+ u32 abytes, u32 *macp, bool use_neon)
+{
+ if (likely(use_neon)) {
+ ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
+ num_rounds(key));
+ } else {
+ if (*macp > 0 && *macp < AES_BLOCK_SIZE) {
+ int added = min(abytes, AES_BLOCK_SIZE - *macp);
+
+ crypto_xor(&mac[*macp], in, added);
+
+ *macp += added;
+ in += added;
+ abytes -= added;
+ }
+
+ while (abytes > AES_BLOCK_SIZE) {
+ __aes_arm64_encrypt(key->key_enc, mac, mac,
+ num_rounds(key));
+ crypto_xor(mac, in, AES_BLOCK_SIZE);
+
+ in += AES_BLOCK_SIZE;
+ abytes -= AES_BLOCK_SIZE;
+ }
+
+ if (abytes > 0) {
+ __aes_arm64_encrypt(key->key_enc, mac, mac,
+ num_rounds(key));
+ crypto_xor(mac, in, abytes);
+ *macp = abytes;
+ } else {
+ *macp = 0;
+ }
+ }
+}
+
+static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[],
+ bool use_neon)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
@@ -122,8 +163,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
ltag.len = 6;
}
- ce_aes_ccm_auth_data(mac, (u8 *)&ltag, ltag.len, &macp, ctx->key_enc,
- num_rounds(ctx));
+ ccm_update_mac(ctx, mac, (u8 *)&ltag, ltag.len, &macp, use_neon);
scatterwalk_start(&walk, req->src);
do {
@@ -135,8 +175,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
n = scatterwalk_clamp(&walk, len);
}
p = scatterwalk_map(&walk);
- ce_aes_ccm_auth_data(mac, p, n, &macp, ctx->key_enc,
- num_rounds(ctx));
+ ccm_update_mac(ctx, mac, p, n, &macp, use_neon);
len -= n;
scatterwalk_unmap(p);
@@ -145,6 +184,56 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
} while (len);
}
+static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
+ struct crypto_aes_ctx *ctx, bool enc)
+{
+ u8 buf[AES_BLOCK_SIZE];
+ int err = 0;
+
+ while (walk->nbytes) {
+ int blocks = walk->nbytes / AES_BLOCK_SIZE;
+ u32 tail = walk->nbytes % AES_BLOCK_SIZE;
+ u8 *dst = walk->dst.virt.addr;
+ u8 *src = walk->src.virt.addr;
+ u32 nbytes = walk->nbytes;
+
+ if (nbytes == walk->total && tail > 0) {
+ blocks++;
+ tail = 0;
+ }
+
+ do {
+ u32 bsize = AES_BLOCK_SIZE;
+
+ if (nbytes < AES_BLOCK_SIZE)
+ bsize = nbytes;
+
+ crypto_inc(walk->iv, AES_BLOCK_SIZE);
+ __aes_arm64_encrypt(ctx->key_enc, buf, walk->iv,
+ num_rounds(ctx));
+ __aes_arm64_encrypt(ctx->key_enc, mac, mac,
+ num_rounds(ctx));
+ if (enc)
+ crypto_xor(mac, src, bsize);
+ crypto_xor_cpy(dst, src, buf, bsize);
+ if (!enc)
+ crypto_xor(mac, dst, bsize);
+ dst += bsize;
+ src += bsize;
+ nbytes -= bsize;
+ } while (--blocks);
+
+ err = skcipher_walk_done(walk, tail);
+ }
+
+ if (!err) {
+ __aes_arm64_encrypt(ctx->key_enc, buf, iv0, num_rounds(ctx));
+ __aes_arm64_encrypt(ctx->key_enc, mac, mac, num_rounds(ctx));
+ crypto_xor(mac, buf, AES_BLOCK_SIZE);
+ }
+ return err;
+}
+
static int ccm_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -153,39 +242,46 @@ static int ccm_encrypt(struct aead_request *req)
u8 __aligned(8) mac[AES_BLOCK_SIZE];
u8 buf[AES_BLOCK_SIZE];
u32 len = req->cryptlen;
+ bool use_neon = may_use_simd();
int err;
err = ccm_init_mac(req, mac, len);
if (err)
return err;
- kernel_neon_begin_partial(6);
+ if (likely(use_neon))
+ kernel_neon_begin();
if (req->assoclen)
- ccm_calculate_auth_mac(req, mac);
+ ccm_calculate_auth_mac(req, mac, use_neon);
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
err = skcipher_walk_aead_encrypt(&walk, req, true);
- while (walk.nbytes) {
- u32 tail = walk.nbytes % AES_BLOCK_SIZE;
-
- if (walk.nbytes == walk.total)
- tail = 0;
+ if (likely(use_neon)) {
+ while (walk.nbytes) {
+ u32 tail = walk.nbytes % AES_BLOCK_SIZE;
- ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes - tail, ctx->key_enc,
- num_rounds(ctx), mac, walk.iv);
+ if (walk.nbytes == walk.total)
+ tail = 0;
- err = skcipher_walk_done(&walk, tail);
- }
- if (!err)
- ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
+ ce_aes_ccm_encrypt(walk.dst.virt.addr,
+ walk.src.virt.addr,
+ walk.nbytes - tail, ctx->key_enc,
+ num_rounds(ctx), mac, walk.iv);
- kernel_neon_end();
+ err = skcipher_walk_done(&walk, tail);
+ }
+ if (!err)
+ ce_aes_ccm_final(mac, buf, ctx->key_enc,
+ num_rounds(ctx));
+ kernel_neon_end();
+ } else {
+ err = ccm_crypt_fallback(&walk, mac, buf, ctx, true);
+ }
if (err)
return err;
@@ -205,38 +301,46 @@ static int ccm_decrypt(struct aead_request *req)
u8 __aligned(8) mac[AES_BLOCK_SIZE];
u8 buf[AES_BLOCK_SIZE];
u32 len = req->cryptlen - authsize;
+ bool use_neon = may_use_simd();
int err;
err = ccm_init_mac(req, mac, len);
if (err)
return err;
- kernel_neon_begin_partial(6);
+ if (likely(use_neon))
+ kernel_neon_begin();
if (req->assoclen)
- ccm_calculate_auth_mac(req, mac);
+ ccm_calculate_auth_mac(req, mac, use_neon);
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
err = skcipher_walk_aead_decrypt(&walk, req, true);
- while (walk.nbytes) {
- u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+ if (likely(use_neon)) {
+ while (walk.nbytes) {
+ u32 tail = walk.nbytes % AES_BLOCK_SIZE;
- if (walk.nbytes == walk.total)
- tail = 0;
+ if (walk.nbytes == walk.total)
+ tail = 0;
- ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes - tail, ctx->key_enc,
- num_rounds(ctx), mac, walk.iv);
+ ce_aes_ccm_decrypt(walk.dst.virt.addr,
+ walk.src.virt.addr,
+ walk.nbytes - tail, ctx->key_enc,
+ num_rounds(ctx), mac, walk.iv);
- err = skcipher_walk_done(&walk, tail);
- }
- if (!err)
- ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
+ err = skcipher_walk_done(&walk, tail);
+ }
+ if (!err)
+ ce_aes_ccm_final(mac, buf, ctx->key_enc,
+ num_rounds(ctx));
- kernel_neon_end();
+ kernel_neon_end();
+ } else {
+ err = ccm_crypt_fallback(&walk, mac, buf, ctx, false);
+ }
if (err)
return err;
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c
index 50d9fe11d0c8..6a75cd75ed11 100644
--- a/arch/arm64/crypto/aes-ce-cipher.c
+++ b/arch/arm64/crypto/aes-ce-cipher.c
@@ -1,7 +1,7 @@
/*
* aes-ce-cipher.c - core AES cipher using ARMv8 Crypto Extensions
*
- * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -9,6 +9,8 @@
*/
#include <asm/neon.h>
+#include <asm/simd.h>
+#include <asm/unaligned.h>
#include <crypto/aes.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
@@ -20,6 +22,9 @@ MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
+asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
+asmlinkage void __aes_arm64_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
+
struct aes_block {
u8 b[AES_BLOCK_SIZE];
};
@@ -44,27 +49,32 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
void *dummy0;
int dummy1;
- kernel_neon_begin_partial(4);
+ if (!may_use_simd()) {
+ __aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
+ return;
+ }
+
+ kernel_neon_begin();
__asm__(" ld1 {v0.16b}, %[in] ;"
- " ld1 {v1.16b}, [%[key]], #16 ;"
+ " ld1 {v1.4s}, [%[key]], #16 ;"
" cmp %w[rounds], #10 ;"
" bmi 0f ;"
" bne 3f ;"
" mov v3.16b, v1.16b ;"
" b 2f ;"
"0: mov v2.16b, v1.16b ;"
- " ld1 {v3.16b}, [%[key]], #16 ;"
+ " ld1 {v3.4s}, [%[key]], #16 ;"
"1: aese v0.16b, v2.16b ;"
" aesmc v0.16b, v0.16b ;"
- "2: ld1 {v1.16b}, [%[key]], #16 ;"
+ "2: ld1 {v1.4s}, [%[key]], #16 ;"
" aese v0.16b, v3.16b ;"
" aesmc v0.16b, v0.16b ;"
- "3: ld1 {v2.16b}, [%[key]], #16 ;"
+ "3: ld1 {v2.4s}, [%[key]], #16 ;"
" subs %w[rounds], %w[rounds], #3 ;"
" aese v0.16b, v1.16b ;"
" aesmc v0.16b, v0.16b ;"
- " ld1 {v3.16b}, [%[key]], #16 ;"
+ " ld1 {v3.4s}, [%[key]], #16 ;"
" bpl 1b ;"
" aese v0.16b, v2.16b ;"
" eor v0.16b, v0.16b, v3.16b ;"
@@ -89,27 +99,32 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
void *dummy0;
int dummy1;
- kernel_neon_begin_partial(4);
+ if (!may_use_simd()) {
+ __aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
+ return;
+ }
+
+ kernel_neon_begin();
__asm__(" ld1 {v0.16b}, %[in] ;"
- " ld1 {v1.16b}, [%[key]], #16 ;"
+ " ld1 {v1.4s}, [%[key]], #16 ;"
" cmp %w[rounds], #10 ;"
" bmi 0f ;"
" bne 3f ;"
" mov v3.16b, v1.16b ;"
" b 2f ;"
"0: mov v2.16b, v1.16b ;"
- " ld1 {v3.16b}, [%[key]], #16 ;"
+ " ld1 {v3.4s}, [%[key]], #16 ;"
"1: aesd v0.16b, v2.16b ;"
" aesimc v0.16b, v0.16b ;"
- "2: ld1 {v1.16b}, [%[key]], #16 ;"
+ "2: ld1 {v1.4s}, [%[key]], #16 ;"
" aesd v0.16b, v3.16b ;"
" aesimc v0.16b, v0.16b ;"
- "3: ld1 {v2.16b}, [%[key]], #16 ;"
+ "3: ld1 {v2.4s}, [%[key]], #16 ;"
" subs %w[rounds], %w[rounds], #3 ;"
" aesd v0.16b, v1.16b ;"
" aesimc v0.16b, v0.16b ;"
- " ld1 {v3.16b}, [%[key]], #16 ;"
+ " ld1 {v3.4s}, [%[key]], #16 ;"
" bpl 1b ;"
" aesd v0.16b, v2.16b ;"
" eor v0.16b, v0.16b, v3.16b ;"
@@ -165,20 +180,16 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
key_len != AES_KEYSIZE_256)
return -EINVAL;
- memcpy(ctx->key_enc, in_key, key_len);
ctx->key_length = key_len;
+ for (i = 0; i < kwords; i++)
+ ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
- kernel_neon_begin_partial(2);
+ kernel_neon_begin();
for (i = 0; i < sizeof(rcon); i++) {
u32 *rki = ctx->key_enc + (i * kwords);
u32 *rko = rki + kwords;
-#ifndef CONFIG_CPU_BIG_ENDIAN
rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0];
-#else
- rko[0] = rol32(aes_sub(rki[kwords - 1]), 8) ^ (rcon[i] << 24) ^
- rki[0];
-#endif
rko[1] = rko[0] ^ rki[1];
rko[2] = rko[1] ^ rki[2];
rko[3] = rko[2] ^ rki[3];
@@ -210,9 +221,9 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
key_dec[0] = key_enc[j];
for (i = 1, j--; j > 0; i++, j--)
- __asm__("ld1 {v0.16b}, %[in] ;"
+ __asm__("ld1 {v0.4s}, %[in] ;"
"aesimc v1.16b, v0.16b ;"
- "st1 {v1.16b}, %[out] ;"
+ "st1 {v1.4s}, %[out] ;"
: [out] "=Q"(key_dec[i])
: [in] "Q"(key_enc[j])
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S
index b46093d567e5..50330f5c3adc 100644
--- a/arch/arm64/crypto/aes-ce.S
+++ b/arch/arm64/crypto/aes-ce.S
@@ -2,7 +2,7 @@
* linux/arch/arm64/crypto/aes-ce.S - AES cipher for ARMv8 with
* Crypto Extensions
*
- * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -22,11 +22,11 @@
cmp \rounds, #12
blo 2222f /* 128 bits */
beq 1111f /* 192 bits */
- ld1 {v17.16b-v18.16b}, [\rk], #32
-1111: ld1 {v19.16b-v20.16b}, [\rk], #32
-2222: ld1 {v21.16b-v24.16b}, [\rk], #64
- ld1 {v25.16b-v28.16b}, [\rk], #64
- ld1 {v29.16b-v31.16b}, [\rk]
+ ld1 {v17.4s-v18.4s}, [\rk], #32
+1111: ld1 {v19.4s-v20.4s}, [\rk], #32
+2222: ld1 {v21.4s-v24.4s}, [\rk], #64
+ ld1 {v25.4s-v28.4s}, [\rk], #64
+ ld1 {v29.4s-v31.4s}, [\rk]
.endm
/* prepare for encryption with key in rk[] */
diff --git a/arch/arm64/crypto/aes-cipher-core.S b/arch/arm64/crypto/aes-cipher-core.S
index f2f9cc519309..6d2445d603cc 100644
--- a/arch/arm64/crypto/aes-cipher-core.S
+++ b/arch/arm64/crypto/aes-cipher-core.S
@@ -10,6 +10,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/cache.h>
.text
@@ -17,94 +18,155 @@
out .req x1
in .req x2
rounds .req x3
- tt .req x4
- lt .req x2
+ tt .req x2
- .macro __pair, enc, reg0, reg1, in0, in1e, in1d, shift
+ .macro __pair1, sz, op, reg0, reg1, in0, in1e, in1d, shift
+ .ifc \op\shift, b0
+ ubfiz \reg0, \in0, #2, #8
+ ubfiz \reg1, \in1e, #2, #8
+ .else
ubfx \reg0, \in0, #\shift, #8
- .if \enc
ubfx \reg1, \in1e, #\shift, #8
- .else
- ubfx \reg1, \in1d, #\shift, #8
.endif
+
+ /*
+ * AArch64 cannot do byte size indexed loads from a table containing
+ * 32-bit quantities, i.e., 'ldrb w12, [tt, w12, uxtw #2]' is not a
+ * valid instruction. So perform the shift explicitly first for the
+ * high bytes (the low byte is shifted implicitly by using ubfiz rather
+ * than ubfx above)
+ */
+ .ifnc \op, b
ldr \reg0, [tt, \reg0, uxtw #2]
ldr \reg1, [tt, \reg1, uxtw #2]
+ .else
+ .if \shift > 0
+ lsl \reg0, \reg0, #2
+ lsl \reg1, \reg1, #2
+ .endif
+ ldrb \reg0, [tt, \reg0, uxtw]
+ ldrb \reg1, [tt, \reg1, uxtw]
+ .endif
.endm
- .macro __hround, out0, out1, in0, in1, in2, in3, t0, t1, enc
+ .macro __pair0, sz, op, reg0, reg1, in0, in1e, in1d, shift
+ ubfx \reg0, \in0, #\shift, #8
+ ubfx \reg1, \in1d, #\shift, #8
+ ldr\op \reg0, [tt, \reg0, uxtw #\sz]
+ ldr\op \reg1, [tt, \reg1, uxtw #\sz]
+ .endm
+
+ .macro __hround, out0, out1, in0, in1, in2, in3, t0, t1, enc, sz, op
ldp \out0, \out1, [rk], #8
- __pair \enc, w13, w14, \in0, \in1, \in3, 0
- __pair \enc, w15, w16, \in1, \in2, \in0, 8
- __pair \enc, w17, w18, \in2, \in3, \in1, 16
- __pair \enc, \t0, \t1, \in3, \in0, \in2, 24
-
- eor \out0, \out0, w13
- eor \out1, \out1, w14
- eor \out0, \out0, w15, ror #24
- eor \out1, \out1, w16, ror #24
- eor \out0, \out0, w17, ror #16
- eor \out1, \out1, w18, ror #16
+ __pair\enc \sz, \op, w12, w13, \in0, \in1, \in3, 0
+ __pair\enc \sz, \op, w14, w15, \in1, \in2, \in0, 8
+ __pair\enc \sz, \op, w16, w17, \in2, \in3, \in1, 16
+ __pair\enc \sz, \op, \t0, \t1, \in3, \in0, \in2, 24
+
+ eor \out0, \out0, w12
+ eor \out1, \out1, w13
+ eor \out0, \out0, w14, ror #24
+ eor \out1, \out1, w15, ror #24
+ eor \out0, \out0, w16, ror #16
+ eor \out1, \out1, w17, ror #16
eor \out0, \out0, \t0, ror #8
eor \out1, \out1, \t1, ror #8
.endm
- .macro fround, out0, out1, out2, out3, in0, in1, in2, in3
- __hround \out0, \out1, \in0, \in1, \in2, \in3, \out2, \out3, 1
- __hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1
+ .macro fround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op
+ __hround \out0, \out1, \in0, \in1, \in2, \in3, \out2, \out3, 1, \sz, \op
+ __hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1, \sz, \op
.endm
- .macro iround, out0, out1, out2, out3, in0, in1, in2, in3
- __hround \out0, \out1, \in0, \in3, \in2, \in1, \out2, \out3, 0
- __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0
+ .macro iround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op
+ __hround \out0, \out1, \in0, \in3, \in2, \in1, \out2, \out3, 0, \sz, \op
+ __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op
.endm
- .macro do_crypt, round, ttab, ltab
- ldp w5, w6, [in]
- ldp w7, w8, [in, #8]
- ldp w9, w10, [rk], #16
- ldp w11, w12, [rk, #-8]
+ .macro do_crypt, round, ttab, ltab, bsz
+ ldp w4, w5, [in]
+ ldp w6, w7, [in, #8]
+ ldp w8, w9, [rk], #16
+ ldp w10, w11, [rk, #-8]
+CPU_BE( rev w4, w4 )
CPU_BE( rev w5, w5 )
CPU_BE( rev w6, w6 )
CPU_BE( rev w7, w7 )
-CPU_BE( rev w8, w8 )
+ eor w4, w4, w8
eor w5, w5, w9
eor w6, w6, w10
eor w7, w7, w11
- eor w8, w8, w12
adr_l tt, \ttab
- adr_l lt, \ltab
tbnz rounds, #1, 1f
-0: \round w9, w10, w11, w12, w5, w6, w7, w8
- \round w5, w6, w7, w8, w9, w10, w11, w12
+0: \round w8, w9, w10, w11, w4, w5, w6, w7
+ \round w4, w5, w6, w7, w8, w9, w10, w11
1: subs rounds, rounds, #4
- \round w9, w10, w11, w12, w5, w6, w7, w8
- csel tt, tt, lt, hi
- \round w5, w6, w7, w8, w9, w10, w11, w12
- b.hi 0b
-
+ \round w8, w9, w10, w11, w4, w5, w6, w7
+ b.ls 3f
+2: \round w4, w5, w6, w7, w8, w9, w10, w11
+ b 0b
+3: adr_l tt, \ltab
+ \round w4, w5, w6, w7, w8, w9, w10, w11, \bsz, b
+
+CPU_BE( rev w4, w4 )
CPU_BE( rev w5, w5 )
CPU_BE( rev w6, w6 )
CPU_BE( rev w7, w7 )
-CPU_BE( rev w8, w8 )
- stp w5, w6, [out]
- stp w7, w8, [out, #8]
+ stp w4, w5, [out]
+ stp w6, w7, [out, #8]
ret
.endm
- .align 5
+ .align L1_CACHE_SHIFT
+ .type __aes_arm64_inverse_sbox, %object
+__aes_arm64_inverse_sbox:
+ .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+ .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+ .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+ .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+ .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+ .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+ .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+ .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+ .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+ .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+ .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+ .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+ .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+ .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+ .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+ .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+ .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+ .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+ .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+ .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+ .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+ .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+ .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+ .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+ .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+ .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+ .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+ .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+ .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+ .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+ .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+ .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+ .size __aes_arm64_inverse_sbox, . - __aes_arm64_inverse_sbox
+
ENTRY(__aes_arm64_encrypt)
- do_crypt fround, crypto_ft_tab, crypto_fl_tab
+ do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
ENDPROC(__aes_arm64_encrypt)
.align 5
ENTRY(__aes_arm64_decrypt)
- do_crypt iround, crypto_it_tab, crypto_il_tab
+ do_crypt iround, crypto_it_tab, __aes_arm64_inverse_sbox, 0
ENDPROC(__aes_arm64_decrypt)
diff --git a/arch/arm64/crypto/aes-ctr-fallback.h b/arch/arm64/crypto/aes-ctr-fallback.h
new file mode 100644
index 000000000000..c9285717b6b5
--- /dev/null
+++ b/arch/arm64/crypto/aes-ctr-fallback.h
@@ -0,0 +1,53 @@
+/*
+ * Fallback for sync aes(ctr) in contexts where kernel mode NEON
+ * is not allowed
+ *
+ * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/aes.h>
+#include <crypto/internal/skcipher.h>
+
+asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
+
+static inline int aes_ctr_encrypt_fallback(struct crypto_aes_ctx *ctx,
+ struct skcipher_request *req)
+{
+ struct skcipher_walk walk;
+ u8 buf[AES_BLOCK_SIZE];
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, true);
+
+ while (walk.nbytes > 0) {
+ u8 *dst = walk.dst.virt.addr;
+ u8 *src = walk.src.virt.addr;
+ int nbytes = walk.nbytes;
+ int tail = 0;
+
+ if (nbytes < walk.total) {
+ nbytes = round_down(nbytes, AES_BLOCK_SIZE);
+ tail = walk.nbytes % AES_BLOCK_SIZE;
+ }
+
+ do {
+ int bsize = min(nbytes, AES_BLOCK_SIZE);
+
+ __aes_arm64_encrypt(ctx->key_enc, buf, walk.iv,
+ 6 + ctx->key_length / 4);
+ crypto_xor_cpy(dst, src, buf, bsize);
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+
+ dst += AES_BLOCK_SIZE;
+ src += AES_BLOCK_SIZE;
+ nbytes -= AES_BLOCK_SIZE;
+ } while (nbytes > 0);
+
+ err = skcipher_walk_done(&walk, tail);
+ }
+ return err;
+}
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index bcf596b0197e..998ba519a026 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -10,6 +10,7 @@
#include <asm/neon.h>
#include <asm/hwcap.h>
+#include <asm/simd.h>
#include <crypto/aes.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
@@ -19,6 +20,7 @@
#include <crypto/xts.h>
#include "aes-ce-setkey.h"
+#include "aes-ctr-fallback.h"
#ifdef USE_V8_CRYPTO_EXTENSIONS
#define MODE "ce"
@@ -241,9 +243,7 @@ static int ctr_encrypt(struct skcipher_request *req)
aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, rounds,
blocks, walk.iv, first);
- if (tdst != tsrc)
- memcpy(tdst, tsrc, nbytes);
- crypto_xor(tdst, tail, nbytes);
+ crypto_xor_cpy(tdst, tsrc, tail, nbytes);
err = skcipher_walk_done(&walk, 0);
}
kernel_neon_end();
@@ -251,6 +251,17 @@ static int ctr_encrypt(struct skcipher_request *req)
return err;
}
+static int ctr_encrypt_sync(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (!may_use_simd())
+ return aes_ctr_encrypt_fallback(ctx, req);
+
+ return ctr_encrypt(req);
+}
+
static int xts_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -357,8 +368,8 @@ static struct skcipher_alg aes_algs[] = { {
.ivsize = AES_BLOCK_SIZE,
.chunksize = AES_BLOCK_SIZE,
.setkey = skcipher_aes_setkey,
- .encrypt = ctr_encrypt,
- .decrypt = ctr_encrypt,
+ .encrypt = ctr_encrypt_sync,
+ .decrypt = ctr_encrypt_sync,
}, {
.base = {
.cra_name = "__xts(aes)",
@@ -460,11 +471,35 @@ static int mac_init(struct shash_desc *desc)
return 0;
}
+static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
+ u8 dg[], int enc_before, int enc_after)
+{
+ int rounds = 6 + ctx->key_length / 4;
+
+ if (may_use_simd()) {
+ kernel_neon_begin();
+ aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before,
+ enc_after);
+ kernel_neon_end();
+ } else {
+ if (enc_before)
+ __aes_arm64_encrypt(ctx->key_enc, dg, dg, rounds);
+
+ while (blocks--) {
+ crypto_xor(dg, in, AES_BLOCK_SIZE);
+ in += AES_BLOCK_SIZE;
+
+ if (blocks || enc_after)
+ __aes_arm64_encrypt(ctx->key_enc, dg, dg,
+ rounds);
+ }
+ }
+}
+
static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
{
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
- int rounds = 6 + tctx->key.key_length / 4;
while (len > 0) {
unsigned int l;
@@ -476,10 +511,8 @@ static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
len %= AES_BLOCK_SIZE;
- kernel_neon_begin();
- aes_mac_update(p, tctx->key.key_enc, rounds, blocks,
- ctx->dg, (ctx->len != 0), (len != 0));
- kernel_neon_end();
+ mac_do_update(&tctx->key, p, blocks, ctx->dg,
+ (ctx->len != 0), (len != 0));
p += blocks * AES_BLOCK_SIZE;
@@ -507,11 +540,8 @@ static int cbcmac_final(struct shash_desc *desc, u8 *out)
{
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
- int rounds = 6 + tctx->key.key_length / 4;
- kernel_neon_begin();
- aes_mac_update(NULL, tctx->key.key_enc, rounds, 0, ctx->dg, 1, 0);
- kernel_neon_end();
+ mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1, 0);
memcpy(out, ctx->dg, AES_BLOCK_SIZE);
@@ -522,7 +552,6 @@ static int cmac_final(struct shash_desc *desc, u8 *out)
{
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
- int rounds = 6 + tctx->key.key_length / 4;
u8 *consts = tctx->consts;
if (ctx->len != AES_BLOCK_SIZE) {
@@ -530,9 +559,7 @@ static int cmac_final(struct shash_desc *desc, u8 *out)
consts += AES_BLOCK_SIZE;
}
- kernel_neon_begin();
- aes_mac_update(consts, tctx->key.key_enc, rounds, 1, ctx->dg, 0, 1);
- kernel_neon_end();
+ mac_do_update(&tctx->key, consts, 1, ctx->dg, 0, 1);
memcpy(out, ctx->dg, AES_BLOCK_SIZE);
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index db2501d93550..c55d68ccb89f 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -1,7 +1,7 @@
/*
* Bit sliced AES using NEON instructions
*
- * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2016 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -9,12 +9,15 @@
*/
#include <asm/neon.h>
+#include <asm/simd.h>
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
#include <linux/module.h>
+#include "aes-ctr-fallback.h"
+
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
@@ -58,6 +61,11 @@ struct aesbs_cbc_ctx {
u32 enc[AES_MAX_KEYLENGTH_U32];
};
+struct aesbs_ctr_ctx {
+ struct aesbs_ctx key; /* must be first member */
+ struct crypto_aes_ctx fallback;
+};
+
struct aesbs_xts_ctx {
struct aesbs_ctx key;
u32 twkey[AES_MAX_KEYLENGTH_U32];
@@ -196,6 +204,25 @@ static int cbc_decrypt(struct skcipher_request *req)
return err;
}
+static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int err;
+
+ err = crypto_aes_expand_key(&ctx->fallback, in_key, key_len);
+ if (err)
+ return err;
+
+ ctx->key.rounds = 6 + key_len / 4;
+
+ kernel_neon_begin();
+ aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds);
+ kernel_neon_end();
+
+ return 0;
+}
+
static int ctr_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -224,9 +251,8 @@ static int ctr_encrypt(struct skcipher_request *req)
u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
u8 *src = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
- if (dst != src)
- memcpy(dst, src, walk.total % AES_BLOCK_SIZE);
- crypto_xor(dst, final, walk.total % AES_BLOCK_SIZE);
+ crypto_xor_cpy(dst, src, final,
+ walk.total % AES_BLOCK_SIZE);
err = skcipher_walk_done(&walk, 0);
break;
@@ -260,6 +286,17 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
return aesbs_setkey(tfm, in_key, key_len);
}
+static int ctr_encrypt_sync(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (!may_use_simd())
+ return aes_ctr_encrypt_fallback(&ctx->fallback, req);
+
+ return ctr_encrypt(req);
+}
+
static int __xts_crypt(struct skcipher_request *req,
void (*fn)(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[]))
@@ -356,7 +393,7 @@ static struct skcipher_alg aes_algs[] = { {
.base.cra_driver_name = "ctr-aes-neonbs",
.base.cra_priority = 250 - 1,
.base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct aesbs_ctx),
+ .base.cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = AES_MIN_KEY_SIZE,
@@ -364,9 +401,9 @@ static struct skcipher_alg aes_algs[] = { {
.chunksize = AES_BLOCK_SIZE,
.walksize = 8 * AES_BLOCK_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .setkey = aesbs_setkey,
- .encrypt = ctr_encrypt,
- .decrypt = ctr_encrypt,
+ .setkey = aesbs_ctr_setkey_sync,
+ .encrypt = ctr_encrypt_sync,
+ .decrypt = ctr_encrypt_sync,
}, {
.base.cra_name = "__xts(aes)",
.base.cra_driver_name = "__xts-aes-neonbs",
diff --git a/arch/arm64/crypto/chacha20-neon-glue.c b/arch/arm64/crypto/chacha20-neon-glue.c
index a7cd575ea223..cbdb75d15cd0 100644
--- a/arch/arm64/crypto/chacha20-neon-glue.c
+++ b/arch/arm64/crypto/chacha20-neon-glue.c
@@ -1,7 +1,7 @@
/*
* ChaCha20 256-bit cipher algorithm, RFC7539, arm64 NEON functions
*
- * Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2016 - 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -26,6 +26,7 @@
#include <asm/hwcap.h>
#include <asm/neon.h>
+#include <asm/simd.h>
asmlinkage void chacha20_block_xor_neon(u32 *state, u8 *dst, const u8 *src);
asmlinkage void chacha20_4block_xor_neon(u32 *state, u8 *dst, const u8 *src);
@@ -64,7 +65,7 @@ static int chacha20_neon(struct skcipher_request *req)
u32 state[16];
int err;
- if (req->cryptlen <= CHACHA20_BLOCK_SIZE)
+ if (!may_use_simd() || req->cryptlen <= CHACHA20_BLOCK_SIZE)
return crypto_chacha20_crypt(req);
err = skcipher_walk_virt(&walk, req, true);
diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c
index eccb1ae90064..624f4137918c 100644
--- a/arch/arm64/crypto/crc32-ce-glue.c
+++ b/arch/arm64/crypto/crc32-ce-glue.c
@@ -1,7 +1,7 @@
/*
* Accelerated CRC32(C) using arm64 NEON and Crypto Extensions instructions
*
- * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2016 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -19,6 +19,7 @@
#include <asm/hwcap.h>
#include <asm/neon.h>
+#include <asm/simd.h>
#include <asm/unaligned.h>
#define PMULL_MIN_LEN 64L /* minimum size of buffer
@@ -105,10 +106,10 @@ static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
length -= l;
}
- if (length >= PMULL_MIN_LEN) {
+ if (length >= PMULL_MIN_LEN && may_use_simd()) {
l = round_down(length, SCALE_F);
- kernel_neon_begin_partial(10);
+ kernel_neon_begin();
*crc = crc32_pmull_le(data, l, *crc);
kernel_neon_end();
@@ -137,10 +138,10 @@ static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
length -= l;
}
- if (length >= PMULL_MIN_LEN) {
+ if (length >= PMULL_MIN_LEN && may_use_simd()) {
l = round_down(length, SCALE_F);
- kernel_neon_begin_partial(10);
+ kernel_neon_begin();
*crc = crc32c_pmull_le(data, l, *crc);
kernel_neon_end();
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
index 60cb590c2590..96f0cae4a022 100644
--- a/arch/arm64/crypto/crct10dif-ce-glue.c
+++ b/arch/arm64/crypto/crct10dif-ce-glue.c
@@ -1,7 +1,7 @@
/*
* Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
*
- * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2016 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,6 +18,7 @@
#include <crypto/internal/hash.h>
#include <asm/neon.h>
+#include <asm/simd.h>
#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
@@ -48,9 +49,13 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
}
if (length > 0) {
- kernel_neon_begin_partial(14);
- *crc = crc_t10dif_pmull(*crc, data, length);
- kernel_neon_end();
+ if (may_use_simd()) {
+ kernel_neon_begin();
+ *crc = crc_t10dif_pmull(*crc, data, length);
+ kernel_neon_end();
+ } else {
+ *crc = crc_t10dif_generic(*crc, data, length);
+ }
}
return 0;
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index f0bb9f0b524f..11ebf1ae248a 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -1,7 +1,7 @@
/*
* Accelerated GHASH implementation with ARMv8 PMULL instructions.
*
- * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -11,31 +11,215 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
- SHASH .req v0
- SHASH2 .req v1
- T1 .req v2
- T2 .req v3
- MASK .req v4
- XL .req v5
- XM .req v6
- XH .req v7
- IN1 .req v7
+ SHASH .req v0
+ SHASH2 .req v1
+ T1 .req v2
+ T2 .req v3
+ MASK .req v4
+ XL .req v5
+ XM .req v6
+ XH .req v7
+ IN1 .req v7
+
+ k00_16 .req v8
+ k32_48 .req v9
+
+ t3 .req v10
+ t4 .req v11
+ t5 .req v12
+ t6 .req v13
+ t7 .req v14
+ t8 .req v15
+ t9 .req v16
+
+ perm1 .req v17
+ perm2 .req v18
+ perm3 .req v19
+
+ sh1 .req v20
+ sh2 .req v21
+ sh3 .req v22
+ sh4 .req v23
+
+ ss1 .req v24
+ ss2 .req v25
+ ss3 .req v26
+ ss4 .req v27
.text
.arch armv8-a+crypto
- /*
- * void pmull_ghash_update(int blocks, u64 dg[], const char *src,
- * struct ghash_key const *k, const char *head)
- */
-ENTRY(pmull_ghash_update)
+ .macro __pmull_p64, rd, rn, rm
+ pmull \rd\().1q, \rn\().1d, \rm\().1d
+ .endm
+
+ .macro __pmull2_p64, rd, rn, rm
+ pmull2 \rd\().1q, \rn\().2d, \rm\().2d
+ .endm
+
+ .macro __pmull_p8, rq, ad, bd
+ ext t3.8b, \ad\().8b, \ad\().8b, #1 // A1
+ ext t5.8b, \ad\().8b, \ad\().8b, #2 // A2
+ ext t7.8b, \ad\().8b, \ad\().8b, #3 // A3
+
+ __pmull_p8_\bd \rq, \ad
+ .endm
+
+ .macro __pmull2_p8, rq, ad, bd
+ tbl t3.16b, {\ad\().16b}, perm1.16b // A1
+ tbl t5.16b, {\ad\().16b}, perm2.16b // A2
+ tbl t7.16b, {\ad\().16b}, perm3.16b // A3
+
+ __pmull2_p8_\bd \rq, \ad
+ .endm
+
+ .macro __pmull_p8_SHASH, rq, ad
+ __pmull_p8_tail \rq, \ad\().8b, SHASH.8b, 8b,, sh1, sh2, sh3, sh4
+ .endm
+
+ .macro __pmull_p8_SHASH2, rq, ad
+ __pmull_p8_tail \rq, \ad\().8b, SHASH2.8b, 8b,, ss1, ss2, ss3, ss4
+ .endm
+
+ .macro __pmull2_p8_SHASH, rq, ad
+ __pmull_p8_tail \rq, \ad\().16b, SHASH.16b, 16b, 2, sh1, sh2, sh3, sh4
+ .endm
+
+ .macro __pmull_p8_tail, rq, ad, bd, nb, t, b1, b2, b3, b4
+ pmull\t t3.8h, t3.\nb, \bd // F = A1*B
+ pmull\t t4.8h, \ad, \b1\().\nb // E = A*B1
+ pmull\t t5.8h, t5.\nb, \bd // H = A2*B
+ pmull\t t6.8h, \ad, \b2\().\nb // G = A*B2
+ pmull\t t7.8h, t7.\nb, \bd // J = A3*B
+ pmull\t t8.8h, \ad, \b3\().\nb // I = A*B3
+ pmull\t t9.8h, \ad, \b4\().\nb // K = A*B4
+ pmull\t \rq\().8h, \ad, \bd // D = A*B
+
+ eor t3.16b, t3.16b, t4.16b // L = E + F
+ eor t5.16b, t5.16b, t6.16b // M = G + H
+ eor t7.16b, t7.16b, t8.16b // N = I + J
+
+ uzp1 t4.2d, t3.2d, t5.2d
+ uzp2 t3.2d, t3.2d, t5.2d
+ uzp1 t6.2d, t7.2d, t9.2d
+ uzp2 t7.2d, t7.2d, t9.2d
+
+ // t3 = (L) (P0 + P1) << 8
+ // t5 = (M) (P2 + P3) << 16
+ eor t4.16b, t4.16b, t3.16b
+ and t3.16b, t3.16b, k32_48.16b
+
+ // t7 = (N) (P4 + P5) << 24
+ // t9 = (K) (P6 + P7) << 32
+ eor t6.16b, t6.16b, t7.16b
+ and t7.16b, t7.16b, k00_16.16b
+
+ eor t4.16b, t4.16b, t3.16b
+ eor t6.16b, t6.16b, t7.16b
+
+ zip2 t5.2d, t4.2d, t3.2d
+ zip1 t3.2d, t4.2d, t3.2d
+ zip2 t9.2d, t6.2d, t7.2d
+ zip1 t7.2d, t6.2d, t7.2d
+
+ ext t3.16b, t3.16b, t3.16b, #15
+ ext t5.16b, t5.16b, t5.16b, #14
+ ext t7.16b, t7.16b, t7.16b, #13
+ ext t9.16b, t9.16b, t9.16b, #12
+
+ eor t3.16b, t3.16b, t5.16b
+ eor t7.16b, t7.16b, t9.16b
+ eor \rq\().16b, \rq\().16b, t3.16b
+ eor \rq\().16b, \rq\().16b, t7.16b
+ .endm
+
+ .macro __pmull_pre_p64
+ movi MASK.16b, #0xe1
+ shl MASK.2d, MASK.2d, #57
+ .endm
+
+ .macro __pmull_pre_p8
+ // k00_16 := 0x0000000000000000_000000000000ffff
+ // k32_48 := 0x00000000ffffffff_0000ffffffffffff
+ movi k32_48.2d, #0xffffffff
+ mov k32_48.h[2], k32_48.h[0]
+ ushr k00_16.2d, k32_48.2d, #32
+
+ // prepare the permutation vectors
+ mov_q x5, 0x080f0e0d0c0b0a09
+ movi T1.8b, #8
+ dup perm1.2d, x5
+ eor perm1.16b, perm1.16b, T1.16b
+ ushr perm2.2d, perm1.2d, #8
+ ushr perm3.2d, perm1.2d, #16
+ ushr T1.2d, perm1.2d, #24
+ sli perm2.2d, perm1.2d, #56
+ sli perm3.2d, perm1.2d, #48
+ sli T1.2d, perm1.2d, #40
+
+ // precompute loop invariants
+ tbl sh1.16b, {SHASH.16b}, perm1.16b
+ tbl sh2.16b, {SHASH.16b}, perm2.16b
+ tbl sh3.16b, {SHASH.16b}, perm3.16b
+ tbl sh4.16b, {SHASH.16b}, T1.16b
+ ext ss1.8b, SHASH2.8b, SHASH2.8b, #1
+ ext ss2.8b, SHASH2.8b, SHASH2.8b, #2
+ ext ss3.8b, SHASH2.8b, SHASH2.8b, #3
+ ext ss4.8b, SHASH2.8b, SHASH2.8b, #4
+ .endm
+
+ //
+ // PMULL (64x64->128) based reduction for CPUs that can do
+ // it in a single instruction.
+ //
+ .macro __pmull_reduce_p64
+ pmull T2.1q, XL.1d, MASK.1d
+ eor XM.16b, XM.16b, T1.16b
+
+ mov XH.d[0], XM.d[1]
+ mov XM.d[1], XL.d[0]
+
+ eor XL.16b, XM.16b, T2.16b
+ ext T2.16b, XL.16b, XL.16b, #8
+ pmull XL.1q, XL.1d, MASK.1d
+ .endm
+
+ //
+ // Alternative reduction for CPUs that lack support for the
+ // 64x64->128 PMULL instruction
+ //
+ .macro __pmull_reduce_p8
+ eor XM.16b, XM.16b, T1.16b
+
+ mov XL.d[1], XM.d[0]
+ mov XH.d[0], XM.d[1]
+
+ shl T1.2d, XL.2d, #57
+ shl T2.2d, XL.2d, #62
+ eor T2.16b, T2.16b, T1.16b
+ shl T1.2d, XL.2d, #63
+ eor T2.16b, T2.16b, T1.16b
+ ext T1.16b, XL.16b, XH.16b, #8
+ eor T2.16b, T2.16b, T1.16b
+
+ mov XL.d[1], T2.d[0]
+ mov XH.d[0], T2.d[1]
+
+ ushr T2.2d, XL.2d, #1
+ eor XH.16b, XH.16b, XL.16b
+ eor XL.16b, XL.16b, T2.16b
+ ushr T2.2d, T2.2d, #6
+ ushr XL.2d, XL.2d, #1
+ .endm
+
+ .macro __pmull_ghash, pn
ld1 {SHASH.2d}, [x3]
ld1 {XL.2d}, [x1]
- movi MASK.16b, #0xe1
ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
- shl MASK.2d, MASK.2d, #57
eor SHASH2.16b, SHASH2.16b, SHASH.16b
+ __pmull_pre_\pn
+
/* do the head block first, if supplied */
cbz x4, 0f
ld1 {T1.2d}, [x4]
@@ -52,28 +236,209 @@ CPU_LE( rev64 T1.16b, T1.16b )
eor T1.16b, T1.16b, T2.16b
eor XL.16b, XL.16b, IN1.16b
+ __pmull2_\pn XH, XL, SHASH // a1 * b1
+ eor T1.16b, T1.16b, XL.16b
+ __pmull_\pn XL, XL, SHASH // a0 * b0
+ __pmull_\pn XM, T1, SHASH2 // (a1 + a0)(b1 + b0)
+
+ eor T2.16b, XL.16b, XH.16b
+ ext T1.16b, XL.16b, XH.16b, #8
+ eor XM.16b, XM.16b, T2.16b
+
+ __pmull_reduce_\pn
+
+ eor T2.16b, T2.16b, XH.16b
+ eor XL.16b, XL.16b, T2.16b
+
+ cbnz w0, 0b
+
+ st1 {XL.2d}, [x1]
+ ret
+ .endm
+
+ /*
+ * void pmull_ghash_update(int blocks, u64 dg[], const char *src,
+ * struct ghash_key const *k, const char *head)
+ */
+ENTRY(pmull_ghash_update_p64)
+ __pmull_ghash p64
+ENDPROC(pmull_ghash_update_p64)
+
+ENTRY(pmull_ghash_update_p8)
+ __pmull_ghash p8
+ENDPROC(pmull_ghash_update_p8)
+
+ KS .req v8
+ CTR .req v9
+ INP .req v10
+
+ .macro load_round_keys, rounds, rk
+ cmp \rounds, #12
+ blo 2222f /* 128 bits */
+ beq 1111f /* 192 bits */
+ ld1 {v17.4s-v18.4s}, [\rk], #32
+1111: ld1 {v19.4s-v20.4s}, [\rk], #32
+2222: ld1 {v21.4s-v24.4s}, [\rk], #64
+ ld1 {v25.4s-v28.4s}, [\rk], #64
+ ld1 {v29.4s-v31.4s}, [\rk]
+ .endm
+
+ .macro enc_round, state, key
+ aese \state\().16b, \key\().16b
+ aesmc \state\().16b, \state\().16b
+ .endm
+
+ .macro enc_block, state, rounds
+ cmp \rounds, #12
+ b.lo 2222f /* 128 bits */
+ b.eq 1111f /* 192 bits */
+ enc_round \state, v17
+ enc_round \state, v18
+1111: enc_round \state, v19
+ enc_round \state, v20
+2222: .irp key, v21, v22, v23, v24, v25, v26, v27, v28, v29
+ enc_round \state, \key
+ .endr
+ aese \state\().16b, v30.16b
+ eor \state\().16b, \state\().16b, v31.16b
+ .endm
+
+ .macro pmull_gcm_do_crypt, enc
+ ld1 {SHASH.2d}, [x4]
+ ld1 {XL.2d}, [x1]
+ ldr x8, [x5, #8] // load lower counter
+
+ movi MASK.16b, #0xe1
+ ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
+CPU_LE( rev x8, x8 )
+ shl MASK.2d, MASK.2d, #57
+ eor SHASH2.16b, SHASH2.16b, SHASH.16b
+
+ .if \enc == 1
+ ld1 {KS.16b}, [x7]
+ .endif
+
+0: ld1 {CTR.8b}, [x5] // load upper counter
+ ld1 {INP.16b}, [x3], #16
+ rev x9, x8
+ add x8, x8, #1
+ sub w0, w0, #1
+ ins CTR.d[1], x9 // set lower counter
+
+ .if \enc == 1
+ eor INP.16b, INP.16b, KS.16b // encrypt input
+ st1 {INP.16b}, [x2], #16
+ .endif
+
+ rev64 T1.16b, INP.16b
+
+ cmp w6, #12
+ b.ge 2f // AES-192/256?
+
+1: enc_round CTR, v21
+
+ ext T2.16b, XL.16b, XL.16b, #8
+ ext IN1.16b, T1.16b, T1.16b, #8
+
+ enc_round CTR, v22
+
+ eor T1.16b, T1.16b, T2.16b
+ eor XL.16b, XL.16b, IN1.16b
+
+ enc_round CTR, v23
+
pmull2 XH.1q, SHASH.2d, XL.2d // a1 * b1
eor T1.16b, T1.16b, XL.16b
+
+ enc_round CTR, v24
+
pmull XL.1q, SHASH.1d, XL.1d // a0 * b0
pmull XM.1q, SHASH2.1d, T1.1d // (a1 + a0)(b1 + b0)
+ enc_round CTR, v25
+
ext T1.16b, XL.16b, XH.16b, #8
eor T2.16b, XL.16b, XH.16b
eor XM.16b, XM.16b, T1.16b
+
+ enc_round CTR, v26
+
eor XM.16b, XM.16b, T2.16b
pmull T2.1q, XL.1d, MASK.1d
+ enc_round CTR, v27
+
mov XH.d[0], XM.d[1]
mov XM.d[1], XL.d[0]
+ enc_round CTR, v28
+
eor XL.16b, XM.16b, T2.16b
+
+ enc_round CTR, v29
+
ext T2.16b, XL.16b, XL.16b, #8
+
+ aese CTR.16b, v30.16b
+
pmull XL.1q, XL.1d, MASK.1d
eor T2.16b, T2.16b, XH.16b
+
+ eor KS.16b, CTR.16b, v31.16b
+
eor XL.16b, XL.16b, T2.16b
+ .if \enc == 0
+ eor INP.16b, INP.16b, KS.16b
+ st1 {INP.16b}, [x2], #16
+ .endif
+
cbnz w0, 0b
+CPU_LE( rev x8, x8 )
st1 {XL.2d}, [x1]
+ str x8, [x5, #8] // store lower counter
+
+ .if \enc == 1
+ st1 {KS.16b}, [x7]
+ .endif
+
+ ret
+
+2: b.eq 3f // AES-192?
+ enc_round CTR, v17
+ enc_round CTR, v18
+3: enc_round CTR, v19
+ enc_round CTR, v20
+ b 1b
+ .endm
+
+ /*
+ * void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[], const u8 src[],
+ * struct ghash_key const *k, u8 ctr[],
+ * int rounds, u8 ks[])
+ */
+ENTRY(pmull_gcm_encrypt)
+ pmull_gcm_do_crypt 1
+ENDPROC(pmull_gcm_encrypt)
+
+ /*
+ * void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[], const u8 src[],
+ * struct ghash_key const *k, u8 ctr[],
+ * int rounds)
+ */
+ENTRY(pmull_gcm_decrypt)
+ pmull_gcm_do_crypt 0
+ENDPROC(pmull_gcm_decrypt)
+
+ /*
+ * void pmull_gcm_encrypt_block(u8 dst[], u8 src[], u8 rk[], int rounds)
+ */
+ENTRY(pmull_gcm_encrypt_block)
+ cbz x2, 0f
+ load_round_keys w3, x2
+0: ld1 {v0.16b}, [x1]
+ enc_block v0, w3
+ st1 {v0.16b}, [x0]
ret
-ENDPROC(pmull_ghash_update)
+ENDPROC(pmull_gcm_encrypt_block)
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 833ec1e3f3e9..cfc9c92814fd 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -1,7 +1,7 @@
/*
* Accelerated GHASH implementation with ARMv8 PMULL instructions.
*
- * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -9,22 +9,33 @@
*/
#include <asm/neon.h>
+#include <asm/simd.h>
#include <asm/unaligned.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/b128ops.h>
+#include <crypto/gf128mul.h>
+#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/module.h>
-MODULE_DESCRIPTION("GHASH secure hash using ARMv8 Crypto Extensions");
+MODULE_DESCRIPTION("GHASH and AES-GCM using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("ghash");
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
+#define GCM_IV_SIZE 12
struct ghash_key {
u64 a;
u64 b;
+ be128 k;
};
struct ghash_desc_ctx {
@@ -33,8 +44,35 @@ struct ghash_desc_ctx {
u32 count;
};
-asmlinkage void pmull_ghash_update(int blocks, u64 dg[], const char *src,
- struct ghash_key const *k, const char *head);
+struct gcm_aes_ctx {
+ struct crypto_aes_ctx aes_key;
+ struct ghash_key ghash_key;
+};
+
+asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src,
+ struct ghash_key const *k,
+ const char *head);
+
+asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
+ struct ghash_key const *k,
+ const char *head);
+
+static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src,
+ struct ghash_key const *k,
+ const char *head);
+
+asmlinkage void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[],
+ const u8 src[], struct ghash_key const *k,
+ u8 ctr[], int rounds, u8 ks[]);
+
+asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[],
+ const u8 src[], struct ghash_key const *k,
+ u8 ctr[], int rounds);
+
+asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[],
+ u32 const rk[], int rounds);
+
+asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
static int ghash_init(struct shash_desc *desc)
{
@@ -44,6 +82,36 @@ static int ghash_init(struct shash_desc *desc)
return 0;
}
+static void ghash_do_update(int blocks, u64 dg[], const char *src,
+ struct ghash_key *key, const char *head)
+{
+ if (likely(may_use_simd())) {
+ kernel_neon_begin();
+ pmull_ghash_update(blocks, dg, src, key, head);
+ kernel_neon_end();
+ } else {
+ be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
+
+ do {
+ const u8 *in = src;
+
+ if (head) {
+ in = head;
+ blocks++;
+ head = NULL;
+ } else {
+ src += GHASH_BLOCK_SIZE;
+ }
+
+ crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE);
+ gf128mul_lle(&dst, &key->k);
+ } while (--blocks);
+
+ dg[0] = be64_to_cpu(dst.b);
+ dg[1] = be64_to_cpu(dst.a);
+ }
+}
+
static int ghash_update(struct shash_desc *desc, const u8 *src,
unsigned int len)
{
@@ -67,10 +135,9 @@ static int ghash_update(struct shash_desc *desc, const u8 *src,
blocks = len / GHASH_BLOCK_SIZE;
len %= GHASH_BLOCK_SIZE;
- kernel_neon_begin_partial(8);
- pmull_ghash_update(blocks, ctx->digest, src, key,
- partial ? ctx->buf : NULL);
- kernel_neon_end();
+ ghash_do_update(blocks, ctx->digest, src, key,
+ partial ? ctx->buf : NULL);
+
src += blocks * GHASH_BLOCK_SIZE;
partial = 0;
}
@@ -89,9 +156,7 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
- kernel_neon_begin_partial(8);
- pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL);
- kernel_neon_end();
+ ghash_do_update(1, ctx->digest, ctx->buf, key, NULL);
}
put_unaligned_be64(ctx->digest[1], dst);
put_unaligned_be64(ctx->digest[0], dst + 8);
@@ -100,16 +165,13 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
return 0;
}
-static int ghash_setkey(struct crypto_shash *tfm,
- const u8 *inkey, unsigned int keylen)
+static int __ghash_setkey(struct ghash_key *key,
+ const u8 *inkey, unsigned int keylen)
{
- struct ghash_key *key = crypto_shash_ctx(tfm);
u64 a, b;
- if (keylen != GHASH_BLOCK_SIZE) {
- crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
+ /* needed for the fallback */
+ memcpy(&key->k, inkey, GHASH_BLOCK_SIZE);
/* perform multiplication by 'x' in GF(2^128) */
b = get_unaligned_be64(inkey);
@@ -124,33 +186,418 @@ static int ghash_setkey(struct crypto_shash *tfm,
return 0;
}
+static int ghash_setkey(struct crypto_shash *tfm,
+ const u8 *inkey, unsigned int keylen)
+{
+ struct ghash_key *key = crypto_shash_ctx(tfm);
+
+ if (keylen != GHASH_BLOCK_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ return __ghash_setkey(key, inkey, keylen);
+}
+
static struct shash_alg ghash_alg = {
- .digestsize = GHASH_DIGEST_SIZE,
- .init = ghash_init,
- .update = ghash_update,
- .final = ghash_final,
- .setkey = ghash_setkey,
- .descsize = sizeof(struct ghash_desc_ctx),
- .base = {
- .cra_name = "ghash",
- .cra_driver_name = "ghash-ce",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = GHASH_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct ghash_key),
- .cra_module = THIS_MODULE,
- },
+ .base.cra_name = "ghash",
+ .base.cra_driver_name = "ghash-ce",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = GHASH_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ghash_key),
+ .base.cra_module = THIS_MODULE,
+
+ .digestsize = GHASH_DIGEST_SIZE,
+ .init = ghash_init,
+ .update = ghash_update,
+ .final = ghash_final,
+ .setkey = ghash_setkey,
+ .descsize = sizeof(struct ghash_desc_ctx),
+};
+
+static int num_rounds(struct crypto_aes_ctx *ctx)
+{
+ /*
+ * # of rounds specified by AES:
+ * 128 bit key 10 rounds
+ * 192 bit key 12 rounds
+ * 256 bit key 14 rounds
+ * => n byte key => 6 + (n/4) rounds
+ */
+ return 6 + ctx->key_length / 4;
+}
+
+static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
+ unsigned int keylen)
+{
+ struct gcm_aes_ctx *ctx = crypto_aead_ctx(tfm);
+ u8 key[GHASH_BLOCK_SIZE];
+ int ret;
+
+ ret = crypto_aes_expand_key(&ctx->aes_key, inkey, keylen);
+ if (ret) {
+ tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
+ __aes_arm64_encrypt(ctx->aes_key.key_enc, key, (u8[AES_BLOCK_SIZE]){},
+ num_rounds(&ctx->aes_key));
+
+ return __ghash_setkey(&ctx->ghash_key, key, sizeof(key));
+}
+
+static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 8:
+ case 12 ... 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[],
+ int *buf_count, struct gcm_aes_ctx *ctx)
+{
+ if (*buf_count > 0) {
+ int buf_added = min(count, GHASH_BLOCK_SIZE - *buf_count);
+
+ memcpy(&buf[*buf_count], src, buf_added);
+
+ *buf_count += buf_added;
+ src += buf_added;
+ count -= buf_added;
+ }
+
+ if (count >= GHASH_BLOCK_SIZE || *buf_count == GHASH_BLOCK_SIZE) {
+ int blocks = count / GHASH_BLOCK_SIZE;
+
+ ghash_do_update(blocks, dg, src, &ctx->ghash_key,
+ *buf_count ? buf : NULL);
+
+ src += blocks * GHASH_BLOCK_SIZE;
+ count %= GHASH_BLOCK_SIZE;
+ *buf_count = 0;
+ }
+
+ if (count > 0) {
+ memcpy(buf, src, count);
+ *buf_count = count;
+ }
+}
+
+static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[])
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
+ u8 buf[GHASH_BLOCK_SIZE];
+ struct scatter_walk walk;
+ u32 len = req->assoclen;
+ int buf_count = 0;
+
+ scatterwalk_start(&walk, req->src);
+
+ do {
+ u32 n = scatterwalk_clamp(&walk, len);
+ u8 *p;
+
+ if (!n) {
+ scatterwalk_start(&walk, sg_next(walk.sg));
+ n = scatterwalk_clamp(&walk, len);
+ }
+ p = scatterwalk_map(&walk);
+
+ gcm_update_mac(dg, p, n, buf, &buf_count, ctx);
+ len -= n;
+
+ scatterwalk_unmap(p);
+ scatterwalk_advance(&walk, n);
+ scatterwalk_done(&walk, 0, len);
+ } while (len);
+
+ if (buf_count) {
+ memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count);
+ ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL);
+ }
+}
+
+static void gcm_final(struct aead_request *req, struct gcm_aes_ctx *ctx,
+ u64 dg[], u8 tag[], int cryptlen)
+{
+ u8 mac[AES_BLOCK_SIZE];
+ u128 lengths;
+
+ lengths.a = cpu_to_be64(req->assoclen * 8);
+ lengths.b = cpu_to_be64(cryptlen * 8);
+
+ ghash_do_update(1, dg, (void *)&lengths, &ctx->ghash_key, NULL);
+
+ put_unaligned_be64(dg[1], mac);
+ put_unaligned_be64(dg[0], mac + 8);
+
+ crypto_xor(tag, mac, AES_BLOCK_SIZE);
+}
+
+static int gcm_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
+ struct skcipher_walk walk;
+ u8 iv[AES_BLOCK_SIZE];
+ u8 ks[AES_BLOCK_SIZE];
+ u8 tag[AES_BLOCK_SIZE];
+ u64 dg[2] = {};
+ int err;
+
+ if (req->assoclen)
+ gcm_calculate_auth_mac(req, dg);
+
+ memcpy(iv, req->iv, GCM_IV_SIZE);
+ put_unaligned_be32(1, iv + GCM_IV_SIZE);
+
+ if (likely(may_use_simd())) {
+ kernel_neon_begin();
+
+ pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc,
+ num_rounds(&ctx->aes_key));
+ put_unaligned_be32(2, iv + GCM_IV_SIZE);
+ pmull_gcm_encrypt_block(ks, iv, NULL,
+ num_rounds(&ctx->aes_key));
+ put_unaligned_be32(3, iv + GCM_IV_SIZE);
+
+ err = skcipher_walk_aead_encrypt(&walk, req, true);
+
+ while (walk.nbytes >= AES_BLOCK_SIZE) {
+ int blocks = walk.nbytes / AES_BLOCK_SIZE;
+
+ pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr,
+ walk.src.virt.addr, &ctx->ghash_key,
+ iv, num_rounds(&ctx->aes_key), ks);
+
+ err = skcipher_walk_done(&walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ kernel_neon_end();
+ } else {
+ __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
+ num_rounds(&ctx->aes_key));
+ put_unaligned_be32(2, iv + GCM_IV_SIZE);
+
+ err = skcipher_walk_aead_encrypt(&walk, req, true);
+
+ while (walk.nbytes >= AES_BLOCK_SIZE) {
+ int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ u8 *dst = walk.dst.virt.addr;
+ u8 *src = walk.src.virt.addr;
+
+ do {
+ __aes_arm64_encrypt(ctx->aes_key.key_enc,
+ ks, iv,
+ num_rounds(&ctx->aes_key));
+ crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE);
+ crypto_inc(iv, AES_BLOCK_SIZE);
+
+ dst += AES_BLOCK_SIZE;
+ src += AES_BLOCK_SIZE;
+ } while (--blocks > 0);
+
+ ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg,
+ walk.dst.virt.addr, &ctx->ghash_key,
+ NULL);
+
+ err = skcipher_walk_done(&walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ if (walk.nbytes)
+ __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
+ num_rounds(&ctx->aes_key));
+ }
+
+ /* handle the tail */
+ if (walk.nbytes) {
+ u8 buf[GHASH_BLOCK_SIZE];
+
+ crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, ks,
+ walk.nbytes);
+
+ memcpy(buf, walk.dst.virt.addr, walk.nbytes);
+ memset(buf + walk.nbytes, 0, GHASH_BLOCK_SIZE - walk.nbytes);
+ ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL);
+
+ err = skcipher_walk_done(&walk, 0);
+ }
+
+ if (err)
+ return err;
+
+ gcm_final(req, ctx, dg, tag, req->cryptlen);
+
+ /* copy authtag to end of dst */
+ scatterwalk_map_and_copy(tag, req->dst, req->assoclen + req->cryptlen,
+ crypto_aead_authsize(aead), 1);
+
+ return 0;
+}
+
+static int gcm_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct gcm_aes_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int authsize = crypto_aead_authsize(aead);
+ struct skcipher_walk walk;
+ u8 iv[AES_BLOCK_SIZE];
+ u8 tag[AES_BLOCK_SIZE];
+ u8 buf[GHASH_BLOCK_SIZE];
+ u64 dg[2] = {};
+ int err;
+
+ if (req->assoclen)
+ gcm_calculate_auth_mac(req, dg);
+
+ memcpy(iv, req->iv, GCM_IV_SIZE);
+ put_unaligned_be32(1, iv + GCM_IV_SIZE);
+
+ if (likely(may_use_simd())) {
+ kernel_neon_begin();
+
+ pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc,
+ num_rounds(&ctx->aes_key));
+ put_unaligned_be32(2, iv + GCM_IV_SIZE);
+
+ err = skcipher_walk_aead_decrypt(&walk, req, true);
+
+ while (walk.nbytes >= AES_BLOCK_SIZE) {
+ int blocks = walk.nbytes / AES_BLOCK_SIZE;
+
+ pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr,
+ walk.src.virt.addr, &ctx->ghash_key,
+ iv, num_rounds(&ctx->aes_key));
+
+ err = skcipher_walk_done(&walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ if (walk.nbytes)
+ pmull_gcm_encrypt_block(iv, iv, NULL,
+ num_rounds(&ctx->aes_key));
+
+ kernel_neon_end();
+ } else {
+ __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
+ num_rounds(&ctx->aes_key));
+ put_unaligned_be32(2, iv + GCM_IV_SIZE);
+
+ err = skcipher_walk_aead_decrypt(&walk, req, true);
+
+ while (walk.nbytes >= AES_BLOCK_SIZE) {
+ int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ u8 *dst = walk.dst.virt.addr;
+ u8 *src = walk.src.virt.addr;
+
+ ghash_do_update(blocks, dg, walk.src.virt.addr,
+ &ctx->ghash_key, NULL);
+
+ do {
+ __aes_arm64_encrypt(ctx->aes_key.key_enc,
+ buf, iv,
+ num_rounds(&ctx->aes_key));
+ crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
+ crypto_inc(iv, AES_BLOCK_SIZE);
+
+ dst += AES_BLOCK_SIZE;
+ src += AES_BLOCK_SIZE;
+ } while (--blocks > 0);
+
+ err = skcipher_walk_done(&walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ if (walk.nbytes)
+ __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
+ num_rounds(&ctx->aes_key));
+ }
+
+ /* handle the tail */
+ if (walk.nbytes) {
+ memcpy(buf, walk.src.virt.addr, walk.nbytes);
+ memset(buf + walk.nbytes, 0, GHASH_BLOCK_SIZE - walk.nbytes);
+ ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL);
+
+ crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv,
+ walk.nbytes);
+
+ err = skcipher_walk_done(&walk, 0);
+ }
+
+ if (err)
+ return err;
+
+ gcm_final(req, ctx, dg, tag, req->cryptlen - authsize);
+
+ /* compare calculated auth tag with the stored one */
+ scatterwalk_map_and_copy(buf, req->src,
+ req->assoclen + req->cryptlen - authsize,
+ authsize, 0);
+
+ if (crypto_memneq(tag, buf, authsize))
+ return -EBADMSG;
+ return 0;
+}
+
+static struct aead_alg gcm_aes_alg = {
+ .ivsize = GCM_IV_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = gcm_encrypt,
+ .decrypt = gcm_decrypt,
+
+ .base.cra_name = "gcm(aes)",
+ .base.cra_driver_name = "gcm-aes-ce",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct gcm_aes_ctx),
+ .base.cra_module = THIS_MODULE,
};
static int __init ghash_ce_mod_init(void)
{
- return crypto_register_shash(&ghash_alg);
+ int ret;
+
+ if (!(elf_hwcap & HWCAP_ASIMD))
+ return -ENODEV;
+
+ if (elf_hwcap & HWCAP_PMULL)
+ pmull_ghash_update = pmull_ghash_update_p64;
+
+ else
+ pmull_ghash_update = pmull_ghash_update_p8;
+
+ ret = crypto_register_shash(&ghash_alg);
+ if (ret)
+ return ret;
+
+ if (elf_hwcap & HWCAP_PMULL) {
+ ret = crypto_register_aead(&gcm_aes_alg);
+ if (ret)
+ crypto_unregister_shash(&ghash_alg);
+ }
+ return ret;
}
static void __exit ghash_ce_mod_exit(void)
{
crypto_unregister_shash(&ghash_alg);
+ crypto_unregister_aead(&gcm_aes_alg);
}
-module_cpu_feature_match(PMULL, ghash_ce_mod_init);
+static const struct cpu_feature ghash_cpu_feature[] = {
+ { cpu_feature(PMULL) }, { }
+};
+MODULE_DEVICE_TABLE(cpu, ghash_cpu_feature);
+
+module_init(ghash_ce_mod_init);
module_exit(ghash_ce_mod_exit);
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index ea319c055f5d..efbeb3e0dcfb 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -1,7 +1,7 @@
/*
* sha1-ce-glue.c - SHA-1 secure hash using ARMv8 Crypto Extensions
*
- * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -9,6 +9,7 @@
*/
#include <asm/neon.h>
+#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
@@ -37,8 +38,11 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
+ if (!may_use_simd())
+ return crypto_sha1_update(desc, data, len);
+
sctx->finalize = 0;
- kernel_neon_begin_partial(16);
+ kernel_neon_begin();
sha1_base_do_update(desc, data, len,
(sha1_block_fn *)sha1_ce_transform);
kernel_neon_end();
@@ -52,13 +56,16 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
+ if (!may_use_simd())
+ return crypto_sha1_finup(desc, data, len, out);
+
/*
* Allow the asm code to perform the finalization if there is no
* partial data and the input is a round multiple of the block size.
*/
sctx->finalize = finalize;
- kernel_neon_begin_partial(16);
+ kernel_neon_begin();
sha1_base_do_update(desc, data, len,
(sha1_block_fn *)sha1_ce_transform);
if (!finalize)
@@ -71,8 +78,11 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
+ if (!may_use_simd())
+ return crypto_sha1_finup(desc, NULL, 0, out);
+
sctx->finalize = 0;
- kernel_neon_begin_partial(16);
+ kernel_neon_begin();
sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
kernel_neon_end();
return sha1_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 0ed9486f75dd..fd1ff2b13dfa 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -1,7 +1,7 @@
/*
* sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
*
- * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -9,6 +9,7 @@
*/
#include <asm/neon.h>
+#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
@@ -34,13 +35,19 @@ const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
finalize);
+asmlinkage void sha256_block_data_order(u32 *digest, u8 const *src, int blocks);
+
static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
+ if (!may_use_simd())
+ return sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha256_block_data_order);
+
sctx->finalize = 0;
- kernel_neon_begin_partial(28);
+ kernel_neon_begin();
sha256_base_do_update(desc, data, len,
(sha256_block_fn *)sha2_ce_transform);
kernel_neon_end();
@@ -54,13 +61,22 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
+ if (!may_use_simd()) {
+ if (len)
+ sha256_base_do_update(desc, data, len,
+ (sha256_block_fn *)sha256_block_data_order);
+ sha256_base_do_finalize(desc,
+ (sha256_block_fn *)sha256_block_data_order);
+ return sha256_base_finish(desc, out);
+ }
+
/*
* Allow the asm code to perform the finalization if there is no
* partial data and the input is a round multiple of the block size.
*/
sctx->finalize = finalize;
- kernel_neon_begin_partial(28);
+ kernel_neon_begin();
sha256_base_do_update(desc, data, len,
(sha256_block_fn *)sha2_ce_transform);
if (!finalize)
@@ -74,8 +90,14 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
{
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
+ if (!may_use_simd()) {
+ sha256_base_do_finalize(desc,
+ (sha256_block_fn *)sha256_block_data_order);
+ return sha256_base_finish(desc, out);
+ }
+
sctx->finalize = 0;
- kernel_neon_begin_partial(28);
+ kernel_neon_begin();
sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
kernel_neon_end();
return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index a2226f841960..b064d925fe2a 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -29,6 +29,7 @@ MODULE_ALIAS_CRYPTO("sha256");
asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
unsigned int num_blks);
+EXPORT_SYMBOL(sha256_block_data_order);
asmlinkage void sha256_block_neon(u32 *digest, const void *data,
unsigned int num_blks);
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index f81c7b685fc6..2326e39d5892 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -20,7 +20,6 @@ generic-y += rwsem.h
generic-y += segment.h
generic-y += serial.h
generic-y += set_memory.h
-generic-y += simd.h
generic-y += sizes.h
generic-y += switch_to.h
generic-y += trace_clock.h
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 8cef47fa2218..b7e3f74822da 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -116,6 +116,8 @@ static inline void gic_write_bpr1(u32 val)
#define gic_read_typer(c) readq_relaxed(c)
#define gic_write_irouter(v, c) writeq_relaxed(v, c)
+#define gic_read_lpir(c) readq_relaxed(c)
+#define gic_write_lpir(v, c) writeq_relaxed(v, c)
#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
@@ -133,5 +135,10 @@ static inline void gic_write_bpr1(u32 val)
#define gicr_write_pendbaser(v, c) writeq_relaxed(v, c)
#define gicr_read_pendbaser(c) readq_relaxed(c)
+#define gits_write_vpropbaser(v, c) writeq_relaxed(v, c)
+
+#define gits_write_vpendbaser(v, c) writeq_relaxed(v, c)
+#define gits_read_vpendbaser(c) readq_relaxed(c)
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ARCH_GICV3_H */
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 74d08e44a651..a652ce0a5cb2 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -65,13 +65,13 @@ DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
u64 _val; \
if (needs_unstable_timer_counter_workaround()) { \
const struct arch_timer_erratum_workaround *wa; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
wa = __this_cpu_read(timer_unstable_counter_workaround); \
if (wa && wa->read_##reg) \
_val = wa->read_##reg(); \
else \
_val = read_sysreg(reg); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
} else { \
_val = read_sysreg(reg); \
} \
diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h
new file mode 100644
index 000000000000..636e755bcdca
--- /dev/null
+++ b/arch/arm64/include/asm/asm-bug.h
@@ -0,0 +1,54 @@
+#ifndef __ASM_ASM_BUG_H
+/*
+ * Copyright (C) 2017 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#define __ASM_ASM_BUG_H
+
+#include <asm/brk-imm.h>
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
+#define __BUGVERBOSE_LOCATION(file, line) \
+ .pushsection .rodata.str,"aMS",@progbits,1; \
+ 2: .string file; \
+ .popsection; \
+ \
+ .long 2b - 0b; \
+ .short line;
+#else
+#define _BUGVERBOSE_LOCATION(file, line)
+#endif
+
+#ifdef CONFIG_GENERIC_BUG
+
+#define __BUG_ENTRY(flags) \
+ .pushsection __bug_table,"aw"; \
+ .align 2; \
+ 0: .long 1f - 0b; \
+_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
+ .short flags; \
+ .popsection; \
+ 1:
+#else
+#define __BUG_ENTRY(flags)
+#endif
+
+#define ASM_BUG_FLAGS(flags) \
+ __BUG_ENTRY(flags) \
+ brk BUG_BRK_IMM
+
+#define ASM_BUG() ASM_BUG_FLAGS(0)
+
+#endif /* __ASM_ASM_BUG_H */
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 1b67c3782d00..d58a6253c6ab 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -230,12 +230,18 @@ lr .req x30 // link register
.endm
/*
- * @dst: Result of per_cpu(sym, smp_processor_id())
+ * @dst: Result of per_cpu(sym, smp_processor_id()), can be SP for
+ * non-module code
* @sym: The name of the per-cpu variable
* @tmp: scratch register
*/
.macro adr_this_cpu, dst, sym, tmp
+#ifndef MODULE
+ adrp \tmp, \sym
+ add \dst, \tmp, #:lo12:\sym
+#else
adr_l \dst, \sym
+#endif
mrs \tmp, tpidr_el1
add \dst, \dst, \tmp
.endm
@@ -353,6 +359,12 @@ alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
alternative_else
dc civac, \kaddr
alternative_endif
+ .elseif (\op == cvap)
+alternative_if ARM64_HAS_DCPOP
+ sys 3, c7, c12, 1, \kaddr // dc cvap
+alternative_else
+ dc cvac, \kaddr
+alternative_endif
.else
dc \op, \kaddr
.endif
@@ -403,6 +415,17 @@ alternative_endif
.size __pi_##x, . - x; \
ENDPROC(x)
+/*
+ * Annotate a function as being unsuitable for kprobes.
+ */
+#ifdef CONFIG_KPROBES
+#define NOKPROBE(x) \
+ .pushsection "_kprobe_blacklist", "aw"; \
+ .quad x; \
+ .popsection;
+#else
+#define NOKPROBE(x)
+#endif
/*
* Emit a 64-bit absolute little endian symbol reference in a way that
* ensures that it will be resolved at build time, even when building a
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 99fa69c9c3cf..9ef0797380cb 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
" sub x30, x30, %[ret]\n"
" cbnz x30, 1b\n"
"2:")
- : [ret] "+&r" (x0), [v] "+Q" (v->counter)
+ : [ret] "+r" (x0), [v] "+Q" (v->counter)
:
: __LL_SC_CLOBBERS, "cc", "memory");
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
index 366448eb0fb7..d7dc43752705 100644
--- a/arch/arm64/include/asm/bug.h
+++ b/arch/arm64/include/asm/bug.h
@@ -18,41 +18,12 @@
#ifndef _ARCH_ARM64_ASM_BUG_H
#define _ARCH_ARM64_ASM_BUG_H
-#include <asm/brk-imm.h>
+#include <linux/stringify.h>
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
-#define __BUGVERBOSE_LOCATION(file, line) \
- ".pushsection .rodata.str,\"aMS\",@progbits,1\n" \
- "2: .string \"" file "\"\n\t" \
- ".popsection\n\t" \
- \
- ".long 2b - 0b\n\t" \
- ".short " #line "\n\t"
-#else
-#define _BUGVERBOSE_LOCATION(file, line)
-#endif
-
-#ifdef CONFIG_GENERIC_BUG
-
-#define __BUG_ENTRY(flags) \
- ".pushsection __bug_table,\"a\"\n\t" \
- ".align 2\n\t" \
- "0: .long 1f - 0b\n\t" \
-_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
- ".short " #flags "\n\t" \
- ".popsection\n" \
- "1: "
-#else
-#define __BUG_ENTRY(flags) ""
-#endif
+#include <asm/asm-bug.h>
#define __BUG_FLAGS(flags) \
- asm volatile ( \
- __BUG_ENTRY(flags) \
- "brk %[imm]" :: [imm] "i" (BUG_BRK_IMM) \
- );
-
+ asm volatile (__stringify(ASM_BUG_FLAGS(flags)));
#define BUG() do { \
__BUG_FLAGS(0); \
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index d74a284abdc2..76d1cc85d5b1 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -67,7 +67,9 @@
*/
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
+extern void __inval_dcache_area(void *addr, size_t len);
extern void __clean_dcache_area_poc(void *addr, size_t len);
+extern void __clean_dcache_area_pop(void *addr, size_t len);
extern void __clean_dcache_area_pou(void *addr, size_t len);
extern long __flush_cache_user_range(unsigned long start, unsigned long end);
extern void sync_icache_aliases(void *kaddr, unsigned long len);
@@ -150,6 +152,6 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
-int set_memory_valid(unsigned long addr, unsigned long size, int enable);
+int set_memory_valid(unsigned long addr, int numpages, int enable);
#endif
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 8d2272c6822c..8da621627d7c 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -39,7 +39,8 @@
#define ARM64_WORKAROUND_QCOM_FALKOR_E1003 18
#define ARM64_WORKAROUND_858921 19
#define ARM64_WORKAROUND_CAVIUM_30115 20
+#define ARM64_HAS_DCPOP 21
-#define ARM64_NCAPS 21
+#define ARM64_NCAPS 22
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 8f3043aba873..b93904b16fc2 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -3,7 +3,9 @@
#include <asm/boot.h>
#include <asm/cpufeature.h>
+#include <asm/fpsimd.h>
#include <asm/io.h>
+#include <asm/memory.h>
#include <asm/mmu_context.h>
#include <asm/neon.h>
#include <asm/ptrace.h>
@@ -20,8 +22,8 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define arch_efi_call_virt_setup() \
({ \
- kernel_neon_begin(); \
efi_virtmap_load(); \
+ __efi_fpsimd_begin(); \
})
#define arch_efi_call_virt(p, f, args...) \
@@ -33,8 +35,8 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define arch_efi_call_virt_teardown() \
({ \
+ __efi_fpsimd_end(); \
efi_virtmap_unload(); \
- kernel_neon_end(); \
})
#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
@@ -48,6 +50,13 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
*/
#define EFI_FDT_ALIGN SZ_2M /* used by allocate_new_fdt_and_exit_boot() */
+/*
+ * In some configurations (e.g. VMAP_STACK && 64K pages), stacks built into the
+ * kernel need greater alignment than we require the segments to be padded to.
+ */
+#define EFI_KIMG_ALIGN \
+ (SEGMENT_ALIGN > THREAD_ALIGN ? SEGMENT_ALIGN : THREAD_ALIGN)
+
/* on arm64, the FDT may be located anywhere in system RAM */
static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
{
@@ -81,6 +90,9 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
#define alloc_screen_info(x...) &screen_info
#define free_screen_info(x...)
+/* redeclare as 'hidden' so the compiler will generate relative references */
+extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
+
static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
{
}
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index acae781f7359..33be513ef24c 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -114,10 +114,10 @@
/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * 64-bit, this is above 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
-#define ELF_ET_DYN_BASE 0x100000000UL
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
#ifndef __ASSEMBLY__
@@ -139,7 +139,6 @@ typedef struct user_fpsimd_state elf_fpregset_t;
#define SET_PERSONALITY(ex) \
({ \
- clear_bit(TIF_32BIT, &current->mm->context.flags); \
clear_thread_flag(TIF_32BIT); \
current->personality &= ~READ_IMPLIES_EXEC; \
})
@@ -195,7 +194,6 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
*/
#define COMPAT_SET_PERSONALITY(ex) \
({ \
- set_bit(TIF_32BIT, &current->mm->context.flags); \
set_thread_flag(TIF_32BIT); \
})
#define COMPAT_ARCH_DLINFO
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 8cabd57b6348..66ed8b6b9976 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -77,16 +77,23 @@
#define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT)
#define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
-#define ESR_ELx_IL (UL(1) << 25)
+#define ESR_ELx_IL_SHIFT (25)
+#define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT)
#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
/* ISS field definitions shared by different classes */
-#define ESR_ELx_WNR (UL(1) << 6)
+#define ESR_ELx_WNR_SHIFT (6)
+#define ESR_ELx_WNR (UL(1) << ESR_ELx_WNR_SHIFT)
/* Shared ISS field definitions for Data/Instruction aborts */
-#define ESR_ELx_FnV (UL(1) << 10)
-#define ESR_ELx_EA (UL(1) << 9)
-#define ESR_ELx_S1PTW (UL(1) << 7)
+#define ESR_ELx_SET_SHIFT (11)
+#define ESR_ELx_SET_MASK (UL(3) << ESR_ELx_SET_SHIFT)
+#define ESR_ELx_FnV_SHIFT (10)
+#define ESR_ELx_FnV (UL(1) << ESR_ELx_FnV_SHIFT)
+#define ESR_ELx_EA_SHIFT (9)
+#define ESR_ELx_EA (UL(1) << ESR_ELx_EA_SHIFT)
+#define ESR_ELx_S1PTW_SHIFT (7)
+#define ESR_ELx_S1PTW (UL(1) << ESR_ELx_S1PTW_SHIFT)
/* Shared ISS fault status code(IFSC/DFSC) for Data/Instruction aborts */
#define ESR_ELx_FSC (0x3F)
@@ -97,15 +104,20 @@
#define ESR_ELx_FSC_PERM (0x0C)
/* ISS field definitions for Data Aborts */
-#define ESR_ELx_ISV (UL(1) << 24)
+#define ESR_ELx_ISV_SHIFT (24)
+#define ESR_ELx_ISV (UL(1) << ESR_ELx_ISV_SHIFT)
#define ESR_ELx_SAS_SHIFT (22)
#define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT)
-#define ESR_ELx_SSE (UL(1) << 21)
+#define ESR_ELx_SSE_SHIFT (21)
+#define ESR_ELx_SSE (UL(1) << ESR_ELx_SSE_SHIFT)
#define ESR_ELx_SRT_SHIFT (16)
#define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT)
-#define ESR_ELx_SF (UL(1) << 15)
-#define ESR_ELx_AR (UL(1) << 14)
-#define ESR_ELx_CM (UL(1) << 8)
+#define ESR_ELx_SF_SHIFT (15)
+#define ESR_ELx_SF (UL(1) << ESR_ELx_SF_SHIFT)
+#define ESR_ELx_AR_SHIFT (14)
+#define ESR_ELx_AR (UL(1) << ESR_ELx_AR_SHIFT)
+#define ESR_ELx_CM_SHIFT (8)
+#define ESR_ELx_CM (UL(1) << ESR_ELx_CM_SHIFT)
/* ISS field definitions for exceptions taken in to Hyp */
#define ESR_ELx_CV (UL(1) << 24)
@@ -157,9 +169,10 @@
/*
* User space cache operations have the following sysreg encoding
* in System instructions.
- * op0=1, op1=3, op2=1, crn=7, crm={ 5, 10, 11, 14 }, WRITE (L=0)
+ * op0=1, op1=3, op2=1, crn=7, crm={ 5, 10, 11, 12, 14 }, WRITE (L=0)
*/
#define ESR_ELx_SYS64_ISS_CRM_DC_CIVAC 14
+#define ESR_ELx_SYS64_ISS_CRM_DC_CVAP 12
#define ESR_ELx_SYS64_ISS_CRM_DC_CVAU 11
#define ESR_ELx_SYS64_ISS_CRM_DC_CVAC 10
#define ESR_ELx_SYS64_ISS_CRM_IC_IVAU 5
@@ -209,6 +222,13 @@
#ifndef __ASSEMBLY__
#include <asm/types.h>
+static inline bool esr_is_data_abort(u32 esr)
+{
+ const u32 ec = ESR_ELx_EC(esr);
+
+ return ec == ESR_ELx_EC_DABT_LOW || ec == ESR_ELx_EC_DABT_CUR;
+}
+
const char *esr_get_class_string(u32 esr);
#endif /* __ASSEMBLY */
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 50f559f574fe..410c48163c6a 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -41,16 +41,6 @@ struct fpsimd_state {
unsigned int cpu;
};
-/*
- * Struct for stacking the bottom 'n' FP/SIMD registers.
- */
-struct fpsimd_partial_state {
- u32 fpsr;
- u32 fpcr;
- u32 num_regs;
- __uint128_t vregs[32];
-};
-
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/* Masks for extracting the FPSR and FPCR from the FPSCR */
@@ -77,9 +67,9 @@ extern void fpsimd_update_current_state(struct fpsimd_state *state);
extern void fpsimd_flush_task_state(struct task_struct *target);
-extern void fpsimd_save_partial_state(struct fpsimd_partial_state *state,
- u32 num_regs);
-extern void fpsimd_load_partial_state(struct fpsimd_partial_state *state);
+/* For use by EFI runtime services calls only */
+extern void __efi_fpsimd_begin(void);
+extern void __efi_fpsimd_end(void);
#endif
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index a2daf1293028..0f5fdd388b0d 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -75,59 +75,3 @@
ldr w\tmpnr, [\state, #16 * 2 + 4]
fpsimd_restore_fpcr x\tmpnr, \state
.endm
-
-.macro fpsimd_save_partial state, numnr, tmpnr1, tmpnr2
- mrs x\tmpnr1, fpsr
- str w\numnr, [\state, #8]
- mrs x\tmpnr2, fpcr
- stp w\tmpnr1, w\tmpnr2, [\state]
- adr x\tmpnr1, 0f
- add \state, \state, x\numnr, lsl #4
- sub x\tmpnr1, x\tmpnr1, x\numnr, lsl #1
- br x\tmpnr1
- stp q30, q31, [\state, #-16 * 30 - 16]
- stp q28, q29, [\state, #-16 * 28 - 16]
- stp q26, q27, [\state, #-16 * 26 - 16]
- stp q24, q25, [\state, #-16 * 24 - 16]
- stp q22, q23, [\state, #-16 * 22 - 16]
- stp q20, q21, [\state, #-16 * 20 - 16]
- stp q18, q19, [\state, #-16 * 18 - 16]
- stp q16, q17, [\state, #-16 * 16 - 16]
- stp q14, q15, [\state, #-16 * 14 - 16]
- stp q12, q13, [\state, #-16 * 12 - 16]
- stp q10, q11, [\state, #-16 * 10 - 16]
- stp q8, q9, [\state, #-16 * 8 - 16]
- stp q6, q7, [\state, #-16 * 6 - 16]
- stp q4, q5, [\state, #-16 * 4 - 16]
- stp q2, q3, [\state, #-16 * 2 - 16]
- stp q0, q1, [\state, #-16 * 0 - 16]
-0:
-.endm
-
-.macro fpsimd_restore_partial state, tmpnr1, tmpnr2
- ldp w\tmpnr1, w\tmpnr2, [\state]
- msr fpsr, x\tmpnr1
- fpsimd_restore_fpcr x\tmpnr2, x\tmpnr1
- adr x\tmpnr1, 0f
- ldr w\tmpnr2, [\state, #8]
- add \state, \state, x\tmpnr2, lsl #4
- sub x\tmpnr1, x\tmpnr1, x\tmpnr2, lsl #1
- br x\tmpnr1
- ldp q30, q31, [\state, #-16 * 30 - 16]
- ldp q28, q29, [\state, #-16 * 28 - 16]
- ldp q26, q27, [\state, #-16 * 26 - 16]
- ldp q24, q25, [\state, #-16 * 24 - 16]
- ldp q22, q23, [\state, #-16 * 22 - 16]
- ldp q20, q21, [\state, #-16 * 20 - 16]
- ldp q18, q19, [\state, #-16 * 18 - 16]
- ldp q16, q17, [\state, #-16 * 16 - 16]
- ldp q14, q15, [\state, #-16 * 14 - 16]
- ldp q12, q13, [\state, #-16 * 12 - 16]
- ldp q10, q11, [\state, #-16 * 10 - 16]
- ldp q8, q9, [\state, #-16 * 8 - 16]
- ldp q6, q7, [\state, #-16 * 6 - 16]
- ldp q4, q5, [\state, #-16 * 4 - 16]
- ldp q2, q3, [\state, #-16 * 2 - 16]
- ldp q0, q1, [\state, #-16 * 0 - 16]
-0:
-.endm
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index f32b42e8725d..5bb2fd4674e7 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -48,20 +48,10 @@ do { \
} while (0)
static inline int
-futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (int)(encoded_op << 8) >> 20;
- int cmparg = (int)(encoded_op << 20) >> 20;
int oldval = 0, ret, tmp;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1U << (oparg & 0x1f);
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
@@ -91,17 +81,9 @@ futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 793bd73b0d07..1dca41bea16a 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -18,7 +18,6 @@
#ifndef __ASM_HUGETLB_H
#define __ASM_HUGETLB_H
-#include <asm-generic/hugetlb.h>
#include <asm/page.h>
static inline pte_t huge_ptep_get(pte_t *ptep)
@@ -82,6 +81,14 @@ extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
extern void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep);
+extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz);
+#define huge_pte_clear huge_pte_clear
+extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz);
+#define set_huge_swap_pte_at set_huge_swap_pte_at
+
+#include <asm-generic/hugetlb.h>
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
static inline bool gigantic_page_supported(void) { return true; }
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index b77197d941fc..5e6f77239064 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -1,45 +1,12 @@
#ifndef __ASM_IRQ_H
#define __ASM_IRQ_H
-#define IRQ_STACK_SIZE THREAD_SIZE
-#define IRQ_STACK_START_SP THREAD_START_SP
-
#ifndef __ASSEMBLER__
-#include <linux/percpu.h>
-
#include <asm-generic/irq.h>
-#include <asm/thread_info.h>
struct pt_regs;
-DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
-
-/*
- * The highest address on the stack, and the first to be used. Used to
- * find the dummy-stack frame put down by el?_irq() in entry.S, which
- * is structured as follows:
- *
- * ------------
- * | | <- irq_stack_ptr
- * top ------------
- * | x19 | <- irq_stack_ptr - 0x08
- * ------------
- * | x29 | <- irq_stack_ptr - 0x10
- * ------------
- *
- * where x19 holds a copy of the task stack pointer where the struct pt_regs
- * from kernel_entry can be found.
- *
- */
-#define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP)
-
-/*
- * The offset from irq_stack_ptr where entry.S will store the original
- * stack pointer. Used by unwind_frame() and dump_backtrace().
- */
-#define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08)))
-
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
static inline int nr_legacy_irqs(void)
@@ -47,14 +14,5 @@ static inline int nr_legacy_irqs(void)
return 0;
}
-static inline bool on_irq_stack(unsigned long sp, int cpu)
-{
- /* variable names the same as kernel/stacktrace.c */
- unsigned long low = (unsigned long)per_cpu(irq_stack, cpu);
- unsigned long high = low + IRQ_STACK_START_SP;
-
- return (low <= sp && sp <= high);
-}
-
#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index d68630007b14..e923b58606e2 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -326,12 +326,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
-/* We do not have shadow page tables, hence the empty hooks */
-static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
- unsigned long address)
-{
-}
-
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index a89cc22abadc..672c8684d5c2 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -175,18 +175,15 @@ static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
static inline void kvm_set_s2pte_readonly(pte_t *pte)
{
- pteval_t pteval;
- unsigned long tmp;
-
- asm volatile("// kvm_set_s2pte_readonly\n"
- " prfm pstl1strm, %2\n"
- "1: ldxr %0, %2\n"
- " and %0, %0, %3 // clear PTE_S2_RDWR\n"
- " orr %0, %0, %4 // set PTE_S2_RDONLY\n"
- " stxr %w1, %0, %2\n"
- " cbnz %w1, 1b\n"
- : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*pte))
- : "L" (~PTE_S2_RDWR), "L" (PTE_S2_RDONLY));
+ pteval_t old_pteval, pteval;
+
+ pteval = READ_ONCE(pte_val(*pte));
+ do {
+ old_pteval = pteval;
+ pteval &= ~PTE_S2_RDWR;
+ pteval |= PTE_S2_RDONLY;
+ pteval = cmpxchg_relaxed(&pte_val(*pte), old_pteval, pteval);
+ } while (pteval != old_pteval);
}
static inline bool kvm_s2pte_readonly(pte_t *pte)
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 32f82723338a..3585a5e26151 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -25,6 +25,7 @@
#include <linux/const.h>
#include <linux/types.h>
#include <asm/bug.h>
+#include <asm/page-def.h>
#include <asm/sizes.h>
/*
@@ -64,8 +65,10 @@
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
*/
#define VA_BITS (CONFIG_ARM64_VA_BITS)
-#define VA_START (UL(0xffffffffffffffff) << VA_BITS)
-#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
+#define VA_START (UL(0xffffffffffffffff) - \
+ (UL(1) << VA_BITS) + 1)
+#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
+ (UL(1) << (VA_BITS - 1)) + 1)
#define KIMAGE_VADDR (MODULES_END)
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE)
@@ -101,6 +104,58 @@
#define KASAN_SHADOW_SIZE (0)
#endif
+#define MIN_THREAD_SHIFT 14
+
+/*
+ * VMAP'd stacks are allocated at page granularity, so we must ensure that such
+ * stacks are a multiple of page size.
+ */
+#if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT)
+#define THREAD_SHIFT PAGE_SHIFT
+#else
+#define THREAD_SHIFT MIN_THREAD_SHIFT
+#endif
+
+#if THREAD_SHIFT >= PAGE_SHIFT
+#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
+#endif
+
+#define THREAD_SIZE (UL(1) << THREAD_SHIFT)
+
+/*
+ * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
+ * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
+ * assembly.
+ */
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN (2 * THREAD_SIZE)
+#else
+#define THREAD_ALIGN THREAD_SIZE
+#endif
+
+#define IRQ_STACK_SIZE THREAD_SIZE
+
+#define OVERFLOW_STACK_SIZE SZ_4K
+
+/*
+ * Alignment of kernel segments (e.g. .text, .data).
+ */
+#if defined(CONFIG_DEBUG_ALIGN_RODATA)
+/*
+ * 4 KB granule: 1 level 2 entry
+ * 16 KB granule: 128 level 3 entries, with contiguous bit
+ * 64 KB granule: 32 level 3 entries, with contiguous bit
+ */
+#define SEGMENT_ALIGN SZ_2M
+#else
+/*
+ * 4 KB granule: 16 level 3 entries, with contiguous bit
+ * 16 KB granule: 4 level 3 entries, without contiguous bit
+ * 64 KB granule: 1 level 3 entry
+ */
+#define SEGMENT_ALIGN SZ_64K
+#endif
+
/*
* Memory types available.
*/
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 5468c834b072..0d34bf0a89c7 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -16,6 +16,8 @@
#ifndef __ASM_MMU_H
#define __ASM_MMU_H
+#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
+
typedef struct {
atomic64_t id;
void *vdso;
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
index ad4cdc966c0f..f922eaf780f9 100644
--- a/arch/arm64/include/asm/neon.h
+++ b/arch/arm64/include/asm/neon.h
@@ -8,12 +8,22 @@
* published by the Free Software Foundation.
*/
+#ifndef __ASM_NEON_H
+#define __ASM_NEON_H
+
#include <linux/types.h>
#include <asm/fpsimd.h>
#define cpu_has_neon() system_supports_fpsimd()
-#define kernel_neon_begin() kernel_neon_begin_partial(32)
-
-void kernel_neon_begin_partial(u32 num_regs);
+void kernel_neon_begin(void);
void kernel_neon_end(void);
+
+/*
+ * Temporary macro to allow the crypto code to compile. Note that the
+ * semantics of kernel_neon_begin_partial() are now different from the
+ * original as it does not allow being called in an interrupt context.
+ */
+#define kernel_neon_begin_partial(num_regs) kernel_neon_begin()
+
+#endif /* ! __ASM_NEON_H */
diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h
index bf466d1876e3..ef7b23863a7c 100644
--- a/arch/arm64/include/asm/numa.h
+++ b/arch/arm64/include/asm/numa.h
@@ -7,9 +7,6 @@
#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2)
-/* currently, arm64 implements flat NUMA topology */
-#define parent_node(node) (node)
-
int __node_distance(int from, int to);
#define node_distance(a, b) __node_distance(a, b)
diff --git a/arch/arm64/include/asm/page-def.h b/arch/arm64/include/asm/page-def.h
new file mode 100644
index 000000000000..01591a29dc2e
--- /dev/null
+++ b/arch/arm64/include/asm/page-def.h
@@ -0,0 +1,34 @@
+/*
+ * Based on arch/arm/include/asm/page.h
+ *
+ * Copyright (C) 1995-2003 Russell King
+ * Copyright (C) 2017 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_PAGE_DEF_H
+#define __ASM_PAGE_DEF_H
+
+#include <linux/const.h>
+
+/* PAGE_SHIFT determines the page size */
+/* CONT_SHIFT determines the number of pages which can be tracked together */
+#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT
+#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#define CONT_SIZE (_AC(1, UL) << (CONT_SHIFT + PAGE_SHIFT))
+#define CONT_MASK (~(CONT_SIZE-1))
+
+#endif /* __ASM_PAGE_DEF_H */
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 8472c6def5ef..60d02c81a3a2 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -19,17 +19,7 @@
#ifndef __ASM_PAGE_H
#define __ASM_PAGE_H
-#include <linux/const.h>
-
-/* PAGE_SHIFT determines the page size */
-/* CONT_SHIFT determines the number of pages which can be tracked together */
-#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT
-#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT
-#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
-
-#define CONT_SIZE (_AC(1, UL) << (CONT_SHIFT + PAGE_SHIFT))
-#define CONT_MASK (~(CONT_SIZE-1))
+#include <asm/page-def.h>
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 2142c7726e76..0a5635fb0ef9 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -63,23 +63,21 @@
#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
-#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
-#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
-#define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
-#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
-#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
-#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN)
+#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
+#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
+#define __P010 PAGE_READONLY
+#define __P011 PAGE_READONLY
#define __P100 PAGE_EXECONLY
#define __P101 PAGE_READONLY_EXEC
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
+#define __P110 PAGE_READONLY_EXEC
+#define __P111 PAGE_READONLY_EXEC
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 6eae342ced6b..bc4e92337d16 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -39,6 +39,7 @@
#ifndef __ASSEMBLY__
+#include <asm/cmpxchg.h>
#include <asm/fixmap.h>
#include <linux/mmdebug.h>
@@ -84,11 +85,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
(__boundary - 1 < (end) - 1) ? __boundary : (end); \
})
-#ifdef CONFIG_ARM64_HW_AFDBM
#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
-#else
-#define pte_hw_dirty(pte) (0)
-#endif
#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
@@ -124,12 +121,16 @@ static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
static inline pte_t pte_wrprotect(pte_t pte)
{
- return clear_pte_bit(pte, __pgprot(PTE_WRITE));
+ pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
+ pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
+ return pte;
}
static inline pte_t pte_mkwrite(pte_t pte)
{
- return set_pte_bit(pte, __pgprot(PTE_WRITE));
+ pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
+ pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
+ return pte;
}
static inline pte_t pte_mkclean(pte_t pte)
@@ -168,11 +169,6 @@ static inline pte_t pte_mknoncont(pte_t pte)
return clear_pte_bit(pte, __pgprot(PTE_CONT));
}
-static inline pte_t pte_clear_rdonly(pte_t pte)
-{
- return clear_pte_bit(pte, __pgprot(PTE_RDONLY));
-}
-
static inline pte_t pte_mkpresent(pte_t pte)
{
return set_pte_bit(pte, __pgprot(PTE_VALID));
@@ -220,22 +216,15 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
- if (pte_present(pte)) {
- if (pte_sw_dirty(pte) && pte_write(pte))
- pte_val(pte) &= ~PTE_RDONLY;
- else
- pte_val(pte) |= PTE_RDONLY;
- if (pte_user_exec(pte) && !pte_special(pte))
- __sync_icache_dcache(pte, addr);
- }
+ if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
+ __sync_icache_dcache(pte, addr);
/*
* If the existing pte is valid, check for potential race with
* hardware updates of the pte (ptep_set_access_flags safely changes
* valid ptes without going through an invalid entry).
*/
- if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
- pte_valid(*ptep) && pte_valid(pte)) {
+ if (pte_valid(*ptep) && pte_valid(pte)) {
VM_WARN_ONCE(!pte_young(pte),
"%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
__func__, pte_val(*ptep), pte_val(pte));
@@ -571,7 +560,6 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
}
-#ifdef CONFIG_ARM64_HW_AFDBM
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
@@ -593,20 +581,17 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int __ptep_test_and_clear_young(pte_t *ptep)
{
- pteval_t pteval;
- unsigned int tmp, res;
+ pte_t old_pte, pte;
- asm volatile("// __ptep_test_and_clear_young\n"
- " prfm pstl1strm, %2\n"
- "1: ldxr %0, %2\n"
- " ubfx %w3, %w0, %5, #1 // extract PTE_AF (young)\n"
- " and %0, %0, %4 // clear PTE_AF\n"
- " stxr %w1, %0, %2\n"
- " cbnz %w1, 1b\n"
- : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)), "=&r" (res)
- : "L" (~PTE_AF), "I" (ilog2(PTE_AF)));
+ pte = READ_ONCE(*ptep);
+ do {
+ old_pte = pte;
+ pte = pte_mkold(pte);
+ pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
+ pte_val(old_pte), pte_val(pte));
+ } while (pte_val(pte) != pte_val(old_pte));
- return res;
+ return pte_young(pte);
}
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
@@ -630,17 +615,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
- pteval_t old_pteval;
- unsigned int tmp;
-
- asm volatile("// ptep_get_and_clear\n"
- " prfm pstl1strm, %2\n"
- "1: ldxr %0, %2\n"
- " stxr %w1, xzr, %2\n"
- " cbnz %w1, 1b\n"
- : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)));
-
- return __pte(old_pteval);
+ return __pte(xchg_relaxed(&pte_val(*ptep), 0));
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -653,27 +628,32 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
- * ptep_set_wrprotect - mark read-only while trasferring potential hardware
- * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
+ * ptep_set_wrprotect - mark read-only while preserving the hardware update of
+ * the Access Flag.
*/
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
{
- pteval_t pteval;
- unsigned long tmp;
+ pte_t old_pte, pte;
- asm volatile("// ptep_set_wrprotect\n"
- " prfm pstl1strm, %2\n"
- "1: ldxr %0, %2\n"
- " tst %0, %4 // check for hw dirty (!PTE_RDONLY)\n"
- " csel %1, %3, xzr, eq // set PTE_DIRTY|PTE_RDONLY if dirty\n"
- " orr %0, %0, %1 // if !dirty, PTE_RDONLY is already set\n"
- " and %0, %0, %5 // clear PTE_WRITE/PTE_DBM\n"
- " stxr %w1, %0, %2\n"
- " cbnz %w1, 1b\n"
- : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
- : "r" (PTE_DIRTY|PTE_RDONLY), "L" (PTE_RDONLY), "L" (~PTE_WRITE)
- : "cc");
+ /*
+ * ptep_set_wrprotect() is only called on CoW mappings which are
+ * private (!VM_SHARED) with the pte either read-only (!PTE_WRITE &&
+ * PTE_RDONLY) or writable and software-dirty (PTE_WRITE &&
+ * !PTE_RDONLY && PTE_DIRTY); see is_cow_mapping() and
+ * protection_map[]. There is no race with the hardware update of the
+ * dirty state: clearing of PTE_RDONLY when PTE_WRITE (a.k.a. PTE_DBM)
+ * is set.
+ */
+ VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(*ptep),
+ "%s: potential race with hardware DBM", __func__);
+ pte = READ_ONCE(*ptep);
+ do {
+ old_pte = pte;
+ pte = pte_wrprotect(pte);
+ pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
+ pte_val(old_pte), pte_val(pte));
+ } while (pte_val(pte) != pte_val(old_pte));
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -684,7 +664,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
}
#endif
-#endif /* CONFIG_ARM64_HW_AFDBM */
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 64c9e78f9882..29adab8138c3 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -112,7 +112,7 @@ void tls_preserve_current_state(void);
static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
{
memset(regs, 0, sizeof(*regs));
- regs->syscallno = ~0UL;
+ forget_syscall(regs);
regs->pc = pc;
}
@@ -159,7 +159,7 @@ extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next);
#define task_pt_regs(p) \
- ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
+ ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
#define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc)
#define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk))
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 11403fdd0a50..6069d66e0bc2 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -72,8 +72,19 @@
#define COMPAT_PT_TEXT_ADDR 0x10000
#define COMPAT_PT_DATA_ADDR 0x10004
#define COMPAT_PT_TEXT_END_ADDR 0x10008
+
+/*
+ * If pt_regs.syscallno == NO_SYSCALL, then the thread is not executing
+ * a syscall -- i.e., its most recent entry into the kernel from
+ * userspace was not via SVC, or otherwise a tracer cancelled the syscall.
+ *
+ * This must have the value -1, for ABI compatibility with ptrace etc.
+ */
+#define NO_SYSCALL (-1)
+
#ifndef __ASSEMBLY__
#include <linux/bug.h>
+#include <linux/types.h>
/* sizeof(struct user) for AArch32 */
#define COMPAT_USER_SZ 296
@@ -116,11 +127,29 @@ struct pt_regs {
};
};
u64 orig_x0;
- u64 syscallno;
+#ifdef __AARCH64EB__
+ u32 unused2;
+ s32 syscallno;
+#else
+ s32 syscallno;
+ u32 unused2;
+#endif
+
u64 orig_addr_limit;
u64 unused; // maintain 16 byte alignment
+ u64 stackframe[2];
};
+static inline bool in_syscall(struct pt_regs const *regs)
+{
+ return regs->syscallno != NO_SYSCALL;
+}
+
+static inline void forget_syscall(struct pt_regs *regs)
+{
+ regs->syscallno = NO_SYSCALL;
+}
+
#define MAX_REG_OFFSET offsetof(struct pt_regs, pstate)
#define arch_has_single_step() (1)
diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h
index eeaa97559bab..81abea0b7650 100644
--- a/arch/arm64/include/asm/signal32.h
+++ b/arch/arm64/include/asm/signal32.h
@@ -22,8 +22,6 @@
#define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500
-extern const compat_ulong_t aarch32_sigret_code[6];
-
int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs);
int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h
new file mode 100644
index 000000000000..fa8b3fe932e6
--- /dev/null
+++ b/arch/arm64/include/asm/simd.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef __ASM_SIMD_H
+#define __ASM_SIMD_H
+
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+DECLARE_PER_CPU(bool, kernel_neon_busy);
+
+/*
+ * may_use_simd - whether it is allowable at this time to issue SIMD
+ * instructions or access the SIMD register file
+ *
+ * Callers must not assume that the result remains true beyond the next
+ * preempt_enable() or return from softirq context.
+ */
+static __must_check inline bool may_use_simd(void)
+{
+ /*
+ * The raw_cpu_read() is racy if called with preemption enabled.
+ * This is not a bug: kernel_neon_busy is only set when
+ * preemption is disabled, so we cannot migrate to another CPU
+ * while it is set, nor can we migrate to a CPU where it is set.
+ * So, if we find it clear on some CPU then we're guaranteed to
+ * find it clear on any CPU we could migrate to.
+ *
+ * If we are in between kernel_neon_begin()...kernel_neon_end(),
+ * the flag will be set, but preemption is also disabled, so we
+ * can't migrate to another CPU and spuriously see it become
+ * false.
+ */
+ return !in_irq() && !irqs_disabled() && !in_nmi() &&
+ !raw_cpu_read(kernel_neon_busy);
+}
+
+#else /* ! CONFIG_KERNEL_MODE_NEON */
+
+static __must_check inline bool may_use_simd(void) {
+ return false;
+}
+
+#endif /* ! CONFIG_KERNEL_MODE_NEON */
+
+#endif
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 55f08c5acfad..f82b447bd34f 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -148,7 +148,7 @@ static inline void cpu_panic_kernel(void)
*/
bool cpus_are_stuck_in_kernel(void);
-extern void smp_send_crash_stop(void);
+extern void crash_smp_send_stop(void);
extern bool smp_crash_stop_failed(void);
#endif /* ifndef __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index cae331d553f8..95ad7102b63c 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -26,58 +26,6 @@
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
*/
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- unsigned int tmp;
- arch_spinlock_t lockval;
- u32 owner;
-
- /*
- * Ensure prior spin_lock operations to other locks have completed
- * on this CPU before we test whether "lock" is locked.
- */
- smp_mb();
- owner = READ_ONCE(lock->owner) << 16;
-
- asm volatile(
-" sevl\n"
-"1: wfe\n"
-"2: ldaxr %w0, %2\n"
- /* Is the lock free? */
-" eor %w1, %w0, %w0, ror #16\n"
-" cbz %w1, 3f\n"
- /* Lock taken -- has there been a subsequent unlock->lock transition? */
-" eor %w1, %w3, %w0, lsl #16\n"
-" cbz %w1, 1b\n"
- /*
- * The owner has been updated, so there was an unlock->lock
- * transition that we missed. That means we can rely on the
- * store-release of the unlock operation paired with the
- * load-acquire of the lock operation to publish any of our
- * previous stores to the new lock owner and therefore don't
- * need to bother with the writeback below.
- */
-" b 4f\n"
-"3:\n"
- /*
- * Serialise against any concurrent lockers by writing back the
- * unlocked lock value
- */
- ARM64_LSE_ATOMIC_INSN(
- /* LL/SC */
-" stxr %w1, %w0, %2\n"
- __nops(2),
- /* LSE atomics */
-" mov %w1, %w0\n"
-" cas %w0, %w0, %2\n"
-" eor %w1, %w1, %w0\n")
- /* Somebody else wrote to the lock, GOTO 10 and reload the value */
-" cbnz %w1, 2b\n"
-"4:"
- : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
- : "r" (owner)
- : "memory");
-}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
@@ -176,7 +124,11 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- smp_mb(); /* See arch_spin_unlock_wait */
+ /*
+ * Ensure prior spin_lock operations to other locks have completed
+ * on this CPU before we test whether "lock" is locked.
+ */
+ smp_mb(); /* ^^^ */
return !arch_spin_value_unlocked(READ_ONCE(*lock));
}
@@ -358,14 +310,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
-/*
- * Accesses appearing in program order before a spin_lock() operation
- * can be reordered with accesses inside the critical section, by virtue
- * of arch_spin_lock being constructed using acquire semantics.
- *
- * In cases where this is problematic (e.g. try_to_wake_up), an
- * smp_mb__before_spinlock() can restore the required ordering.
- */
-#define smp_mb__before_spinlock() smp_mb()
+/* See include/linux/spinlock.h */
+#define smp_mb__after_spinlock() smp_mb()
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 5b6eafccc5d8..6ad30776e984 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -16,11 +16,15 @@
#ifndef __ASM_STACKTRACE_H
#define __ASM_STACKTRACE_H
-struct task_struct;
+#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/memory.h>
+#include <asm/ptrace.h>
struct stackframe {
unsigned long fp;
- unsigned long sp;
unsigned long pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
unsigned int graph;
@@ -32,4 +36,57 @@ extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data);
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
+DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
+
+static inline bool on_irq_stack(unsigned long sp)
+{
+ unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
+ unsigned long high = low + IRQ_STACK_SIZE;
+
+ if (!low)
+ return false;
+
+ return (low <= sp && sp < high);
+}
+
+static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp)
+{
+ unsigned long low = (unsigned long)task_stack_page(tsk);
+ unsigned long high = low + THREAD_SIZE;
+
+ return (low <= sp && sp < high);
+}
+
+#ifdef CONFIG_VMAP_STACK
+DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
+
+static inline bool on_overflow_stack(unsigned long sp)
+{
+ unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
+ unsigned long high = low + OVERFLOW_STACK_SIZE;
+
+ return (low <= sp && sp < high);
+}
+#else
+static inline bool on_overflow_stack(unsigned long sp) { return false; }
+#endif
+
+/*
+ * We can only safely access per-cpu stacks from current in a non-preemptible
+ * context.
+ */
+static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp)
+{
+ if (on_task_stack(tsk, sp))
+ return true;
+ if (tsk != current || preemptible())
+ return false;
+ if (on_irq_stack(sp))
+ return true;
+ if (on_overflow_stack(sp))
+ return true;
+
+ return false;
+}
+
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
index d0aa42907569..dd95d33a5bd5 100644
--- a/arch/arm64/include/asm/string.h
+++ b/arch/arm64/include/asm/string.h
@@ -52,6 +52,10 @@ extern void *__memset(void *, int, __kernel_size_t);
#define __HAVE_ARCH_MEMCMP
extern int memcmp(const void *, const void *, size_t);
+#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+#define __HAVE_ARCH_MEMCPY_FLUSHCACHE
+void memcpy_flushcache(void *dst, const void *src, size_t cnt);
+#endif
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 16e44fa9b3b6..f707fed5886f 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -329,6 +329,7 @@
#define ID_AA64ISAR1_LRCPC_SHIFT 20
#define ID_AA64ISAR1_FCMA_SHIFT 16
#define ID_AA64ISAR1_JSCVT_SHIFT 12
+#define ID_AA64ISAR1_DPB_SHIFT 0
/* id_aa64pfr0 */
#define ID_AA64PFR0_GIC_SHIFT 24
@@ -492,7 +493,7 @@ asm(
* the "%x0" template means XZR.
*/
#define write_sysreg(v, r) do { \
- u64 __val = (u64)v; \
+ u64 __val = (u64)(v); \
asm volatile("msr " __stringify(r) ", %x0" \
: : "rZ" (__val)); \
} while (0)
@@ -508,7 +509,7 @@ asm(
})
#define write_sysreg_s(v, r) do { \
- u64 __val = (u64)v; \
+ u64 __val = (u64)(v); \
asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
} while (0)
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 46c3b93cf865..ddded6497a8a 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -23,19 +23,11 @@
#include <linux/compiler.h>
-#ifdef CONFIG_ARM64_4K_PAGES
-#define THREAD_SIZE_ORDER 2
-#elif defined(CONFIG_ARM64_16K_PAGES)
-#define THREAD_SIZE_ORDER 0
-#endif
-
-#define THREAD_SIZE 16384
-#define THREAD_START_SP (THREAD_SIZE - 16)
-
#ifndef __ASSEMBLY__
struct task_struct;
+#include <asm/memory.h>
#include <asm/stack_pointer.h>
#include <asm/types.h>
@@ -68,6 +60,9 @@ struct thread_info {
#define thread_saved_fp(tsk) \
((unsigned long)(tsk->thread.cpu_context.fp))
+void arch_setup_new_exec(void);
+#define arch_setup_new_exec arch_setup_new_exec
+
#endif
/*
@@ -86,6 +81,7 @@ struct thread_info {
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
+#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
#define TIF_NOHZ 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
@@ -107,11 +103,12 @@ struct thread_info {
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_UPROBE (1 << TIF_UPROBE)
+#define _TIF_FSCHECK (1 << TIF_FSCHECK)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
- _TIF_UPROBE)
+ _TIF_UPROBE | _TIF_FSCHECK)
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 02e9035b0685..d131501c6222 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -37,18 +37,11 @@ void unregister_undef_hook(struct undef_hook *hook);
void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr);
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static inline int __in_irqentry_text(unsigned long ptr)
{
return ptr >= (unsigned long)&__irqentry_text_start &&
ptr < (unsigned long)&__irqentry_text_end;
}
-#else
-static inline int __in_irqentry_text(unsigned long ptr)
-{
- return 0;
-}
-#endif
static inline int in_exception_text(unsigned long ptr)
{
@@ -60,4 +53,9 @@ static inline int in_exception_text(unsigned long ptr)
return in ? : __in_irqentry_text(ptr);
}
+static inline int in_entry_text(unsigned long ptr)
+{
+ return ptr >= (unsigned long)&__entry_text_start &&
+ ptr < (unsigned long)&__entry_text_end;
+}
#endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 8f0a1de11e4a..fc0f9eb66039 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -45,6 +45,9 @@ static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
+ /* On user-mode return, check fs is correct */
+ set_thread_flag(TIF_FSCHECK);
+
/*
* Enable/disable UAO so that copy_to_user() etc can access
* kernel memory with the unprivileged instructions.
@@ -69,7 +72,7 @@ static inline void set_fs(mm_segment_t fs)
*/
#define __range_ok(addr, size) \
({ \
- unsigned long __addr = (unsigned long __force)(addr); \
+ unsigned long __addr = (unsigned long)(addr); \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
@@ -347,4 +350,16 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n);
+#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+struct page;
+void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
+extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
+
+static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
+{
+ kasan_check_write(dst, size);
+ return __copy_user_flushcache(dst, src, size);
+}
+#endif
+
#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 4e187ce2a811..4b9344cba83a 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -35,5 +35,6 @@
#define HWCAP_JSCVT (1 << 13)
#define HWCAP_FCMA (1 << 14)
#define HWCAP_LRCPC (1 << 15)
+#define HWCAP_DCPOP (1 << 16)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index e25c11e727fe..b3162715ed78 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -95,7 +95,7 @@ static int __init dt_scan_depth1_nodes(unsigned long node,
* __acpi_map_table() will be called before page_init(), so early_ioremap()
* or early_memremap() should be called here to for ACPI table mapping.
*/
-char *__init __acpi_map_table(unsigned long phys, unsigned long size)
+void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
{
if (!size)
return NULL;
@@ -103,7 +103,7 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size)
return early_memremap(phys, size);
}
-void __init __acpi_unmap_table(char *map, unsigned long size)
+void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
{
if (!map || !size)
return;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index b3bb7ef97bc8..71bf088f1e4b 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -75,6 +75,7 @@ int main(void)
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
+ DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index e137ceaf5016..d16978213c5b 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -82,8 +82,8 @@ static const char *__init cpu_read_enable_method(int cpu)
* Don't warn spuriously.
*/
if (cpu != 0)
- pr_err("%s: missing enable-method property\n",
- dn->full_name);
+ pr_err("%pOF: missing enable-method property\n",
+ dn);
}
} else {
enable_method = acpi_get_enable_method(cpu);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 9f9e0064c8c1..cd52d365d1f0 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -120,6 +120,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -888,6 +889,17 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.min_field_value = 0,
.matches = has_no_fpsimd,
},
+#ifdef CONFIG_ARM64_PMEM
+ {
+ .desc = "Data cache clean to Point of Persistence",
+ .capability = ARM64_HAS_DCPOP,
+ .def_scope = SCOPE_SYSTEM,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .field_pos = ID_AA64ISAR1_DPB_SHIFT,
+ .min_field_value = 1,
+ },
+#endif
{},
};
@@ -916,6 +928,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index f495ee5049fd..311885962830 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -68,6 +68,7 @@ static const char *const hwcap_str[] = {
"jscvt",
"fcma",
"lrcpc",
+ "dcpop",
NULL
};
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index c44a82f146b1..6a27cd6dbfa6 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -41,27 +41,3 @@ ENTRY(fpsimd_load_state)
fpsimd_restore x0, 8
ret
ENDPROC(fpsimd_load_state)
-
-#ifdef CONFIG_KERNEL_MODE_NEON
-
-/*
- * Save the bottom n FP registers.
- *
- * x0 - pointer to struct fpsimd_partial_state
- */
-ENTRY(fpsimd_save_partial_state)
- fpsimd_save_partial x0, 1, 8, 9
- ret
-ENDPROC(fpsimd_save_partial_state)
-
-/*
- * Load the bottom n FP registers.
- *
- * x0 - pointer to struct fpsimd_partial_state
- */
-ENTRY(fpsimd_load_partial_state)
- fpsimd_restore_partial x0, 8, 9
- ret
-ENDPROC(fpsimd_load_partial_state)
-
-#endif
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index b738880350f9..e1c59d4008a8 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -69,8 +69,55 @@
#define BAD_FIQ 2
#define BAD_ERROR 3
- .macro kernel_entry, el, regsize = 64
+ .macro kernel_ventry label
+ .align 7
sub sp, sp, #S_FRAME_SIZE
+#ifdef CONFIG_VMAP_STACK
+ /*
+ * Test whether the SP has overflowed, without corrupting a GPR.
+ * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
+ */
+ add sp, sp, x0 // sp' = sp + x0
+ sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
+ tbnz x0, #THREAD_SHIFT, 0f
+ sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
+ sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
+ b \label
+
+0:
+ /*
+ * Either we've just detected an overflow, or we've taken an exception
+ * while on the overflow stack. Either way, we won't return to
+ * userspace, and can clobber EL0 registers to free up GPRs.
+ */
+
+ /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
+ msr tpidr_el0, x0
+
+ /* Recover the original x0 value and stash it in tpidrro_el0 */
+ sub x0, sp, x0
+ msr tpidrro_el0, x0
+
+ /* Switch to the overflow stack */
+ adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
+
+ /*
+ * Check whether we were already on the overflow stack. This may happen
+ * after panic() re-enables interrupts.
+ */
+ mrs x0, tpidr_el0 // sp of interrupted context
+ sub x0, sp, x0 // delta with top of overflow stack
+ tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
+ b.ne __bad_stack // no? -> bad stack pointer
+
+ /* We were already on the overflow stack. Restore sp/x0 and carry on. */
+ sub sp, sp, x0
+ mrs x0, tpidrro_el0
+#endif
+ b \label
+ .endm
+
+ .macro kernel_entry, el, regsize = 64
.if \regsize == 32
mov w0, w0 // zero upper 32 bits of x0
.endif
@@ -111,6 +158,18 @@
mrs x23, spsr_el1
stp lr, x21, [sp, #S_LR]
+ /*
+ * In order to be able to dump the contents of struct pt_regs at the
+ * time the exception was taken (in case we attempt to walk the call
+ * stack later), chain it together with the stack frames.
+ */
+ .if \el == 0
+ stp xzr, xzr, [sp, #S_STACKFRAME]
+ .else
+ stp x29, x22, [sp, #S_STACKFRAME]
+ .endif
+ add x29, sp, #S_STACKFRAME
+
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Set the TTBR0 PAN bit in SPSR. When the exception is taken from
@@ -138,12 +197,10 @@ alternative_else_nop_endif
stp x22, x23, [sp, #S_PC]
- /*
- * Set syscallno to -1 by default (overridden later if real syscall).
- */
+ /* Not in a syscall by default (el0_svc overwrites for real syscall) */
.if \el == 0
- mvn x21, xzr
- str x21, [sp, #S_SYSCALLNO]
+ mov w21, #NO_SYSCALL
+ str w21, [sp, #S_SYSCALLNO]
.endif
/*
@@ -259,20 +316,12 @@ alternative_else_nop_endif
and x25, x25, #~(THREAD_SIZE - 1)
cbnz x25, 9998f
- adr_this_cpu x25, irq_stack, x26
- mov x26, #IRQ_STACK_START_SP
+ ldr_this_cpu x25, irq_stack_ptr, x26
+ mov x26, #IRQ_STACK_SIZE
add x26, x25, x26
/* switch to the irq stack */
mov sp, x26
-
- /*
- * Add a dummy stack frame, this non-standard format is fixed up
- * by unwind_frame()
- */
- stp x29, x19, [sp, #-16]!
- mov x29, sp
-
9998:
.endm
@@ -290,8 +339,9 @@ alternative_else_nop_endif
*
* x7 is reserved for the system call number in 32-bit mode.
*/
-sc_nr .req x25 // number of system calls
-scno .req x26 // syscall number
+wsc_nr .req w25 // number of system calls
+wscno .req w26 // syscall number
+xscno .req x26 // syscall number (zero-extended)
stbl .req x27 // syscall table pointer
tsk .req x28 // current thread_info
@@ -315,34 +365,62 @@ tsk .req x28 // current thread_info
.align 11
ENTRY(vectors)
- ventry el1_sync_invalid // Synchronous EL1t
- ventry el1_irq_invalid // IRQ EL1t
- ventry el1_fiq_invalid // FIQ EL1t
- ventry el1_error_invalid // Error EL1t
+ kernel_ventry el1_sync_invalid // Synchronous EL1t
+ kernel_ventry el1_irq_invalid // IRQ EL1t
+ kernel_ventry el1_fiq_invalid // FIQ EL1t
+ kernel_ventry el1_error_invalid // Error EL1t
- ventry el1_sync // Synchronous EL1h
- ventry el1_irq // IRQ EL1h
- ventry el1_fiq_invalid // FIQ EL1h
- ventry el1_error_invalid // Error EL1h
+ kernel_ventry el1_sync // Synchronous EL1h
+ kernel_ventry el1_irq // IRQ EL1h
+ kernel_ventry el1_fiq_invalid // FIQ EL1h
+ kernel_ventry el1_error_invalid // Error EL1h
- ventry el0_sync // Synchronous 64-bit EL0
- ventry el0_irq // IRQ 64-bit EL0
- ventry el0_fiq_invalid // FIQ 64-bit EL0
- ventry el0_error_invalid // Error 64-bit EL0
+ kernel_ventry el0_sync // Synchronous 64-bit EL0
+ kernel_ventry el0_irq // IRQ 64-bit EL0
+ kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0
+ kernel_ventry el0_error_invalid // Error 64-bit EL0
#ifdef CONFIG_COMPAT
- ventry el0_sync_compat // Synchronous 32-bit EL0
- ventry el0_irq_compat // IRQ 32-bit EL0
- ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
- ventry el0_error_invalid_compat // Error 32-bit EL0
+ kernel_ventry el0_sync_compat // Synchronous 32-bit EL0
+ kernel_ventry el0_irq_compat // IRQ 32-bit EL0
+ kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
+ kernel_ventry el0_error_invalid_compat // Error 32-bit EL0
#else
- ventry el0_sync_invalid // Synchronous 32-bit EL0
- ventry el0_irq_invalid // IRQ 32-bit EL0
- ventry el0_fiq_invalid // FIQ 32-bit EL0
- ventry el0_error_invalid // Error 32-bit EL0
+ kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0
+ kernel_ventry el0_irq_invalid // IRQ 32-bit EL0
+ kernel_ventry el0_fiq_invalid // FIQ 32-bit EL0
+ kernel_ventry el0_error_invalid // Error 32-bit EL0
#endif
END(vectors)
+#ifdef CONFIG_VMAP_STACK
+ /*
+ * We detected an overflow in kernel_ventry, which switched to the
+ * overflow stack. Stash the exception regs, and head to our overflow
+ * handler.
+ */
+__bad_stack:
+ /* Restore the original x0 value */
+ mrs x0, tpidrro_el0
+
+ /*
+ * Store the original GPRs to the new stack. The orginal SP (minus
+ * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
+ */
+ sub sp, sp, #S_FRAME_SIZE
+ kernel_entry 1
+ mrs x0, tpidr_el0
+ add x0, x0, #S_FRAME_SIZE
+ str x0, [sp, #S_SP]
+
+ /* Stash the regs for handle_bad_stack */
+ mov x0, sp
+
+ /* Time to die */
+ bl handle_bad_stack
+ ASM_BUG()
+#endif /* CONFIG_VMAP_STACK */
+
/*
* Invalid mode handlers
*/
@@ -351,7 +429,8 @@ END(vectors)
mov x0, sp
mov x1, #\reason
mrs x2, esr_el1
- b bad_mode
+ bl bad_mode
+ ASM_BUG()
.endm
el0_sync_invalid:
@@ -448,14 +527,16 @@ el1_sp_pc:
mrs x0, far_el1
enable_dbg
mov x2, sp
- b do_sp_pc_abort
+ bl do_sp_pc_abort
+ ASM_BUG()
el1_undef:
/*
* Undefined instruction
*/
enable_dbg
mov x0, sp
- b do_undefinstr
+ bl do_undefinstr
+ ASM_BUG()
el1_dbg:
/*
* Debug exception handling
@@ -473,7 +554,8 @@ el1_inv:
mov x0, sp
mov x2, x1
mov x1, #BAD_SYNC
- b bad_mode
+ bl bad_mode
+ ASM_BUG()
ENDPROC(el1_sync)
.align 6
@@ -577,8 +659,8 @@ el0_svc_compat:
* AArch32 syscall handling
*/
adrp stbl, compat_sys_call_table // load compat syscall table pointer
- uxtw scno, w7 // syscall number in w7 (r7)
- mov sc_nr, #__NR_compat_syscalls
+ mov wscno, w7 // syscall number in w7 (r7)
+ mov wsc_nr, #__NR_compat_syscalls
b el0_svc_naked
.align 6
@@ -707,38 +789,6 @@ el0_irq_naked:
ENDPROC(el0_irq)
/*
- * Register switch for AArch64. The callee-saved registers need to be saved
- * and restored. On entry:
- * x0 = previous task_struct (must be preserved across the switch)
- * x1 = next task_struct
- * Previous and next are guaranteed not to be the same.
- *
- */
-ENTRY(cpu_switch_to)
- mov x10, #THREAD_CPU_CONTEXT
- add x8, x0, x10
- mov x9, sp
- stp x19, x20, [x8], #16 // store callee-saved registers
- stp x21, x22, [x8], #16
- stp x23, x24, [x8], #16
- stp x25, x26, [x8], #16
- stp x27, x28, [x8], #16
- stp x29, x9, [x8], #16
- str lr, [x8]
- add x8, x1, x10
- ldp x19, x20, [x8], #16 // restore callee-saved registers
- ldp x21, x22, [x8], #16
- ldp x23, x24, [x8], #16
- ldp x25, x26, [x8], #16
- ldp x27, x28, [x8], #16
- ldp x29, x9, [x8], #16
- ldr lr, [x8]
- mov sp, x9
- msr sp_el0, x1
- ret
-ENDPROC(cpu_switch_to)
-
-/*
* This is the fast syscall return path. We do as little as possible here,
* and this includes saving x0 back into the kernel stack.
*/
@@ -781,36 +831,24 @@ finish_ret_to_user:
ENDPROC(ret_to_user)
/*
- * This is how we return from a fork.
- */
-ENTRY(ret_from_fork)
- bl schedule_tail
- cbz x19, 1f // not a kernel thread
- mov x0, x20
- blr x19
-1: get_thread_info tsk
- b ret_to_user
-ENDPROC(ret_from_fork)
-
-/*
* SVC handler.
*/
.align 6
el0_svc:
adrp stbl, sys_call_table // load syscall table pointer
- uxtw scno, w8 // syscall number in w8
- mov sc_nr, #__NR_syscalls
+ mov wscno, w8 // syscall number in w8
+ mov wsc_nr, #__NR_syscalls
el0_svc_naked: // compat entry point
- stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
+ stp x0, xscno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
enable_dbg_and_irq
ct_user_exit 1
ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
tst x16, #_TIF_SYSCALL_WORK
b.ne __sys_trace
- cmp scno, sc_nr // check upper syscall limit
+ cmp wscno, wsc_nr // check upper syscall limit
b.hs ni_sys
- ldr x16, [stbl, scno, lsl #3] // address in the syscall table
+ ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
blr x16 // call sys_* routine
b ret_fast_syscall
ni_sys:
@@ -824,24 +862,23 @@ ENDPROC(el0_svc)
* switches, and waiting for our parent to respond.
*/
__sys_trace:
- mov w0, #-1 // set default errno for
- cmp scno, x0 // user-issued syscall(-1)
+ cmp wscno, #NO_SYSCALL // user-issued syscall(-1)?
b.ne 1f
- mov x0, #-ENOSYS
+ mov x0, #-ENOSYS // set default errno if so
str x0, [sp, #S_X0]
1: mov x0, sp
bl syscall_trace_enter
- cmp w0, #-1 // skip the syscall?
+ cmp w0, #NO_SYSCALL // skip the syscall?
b.eq __sys_trace_return_skipped
- uxtw scno, w0 // syscall number (possibly new)
+ mov wscno, w0 // syscall number (possibly new)
mov x1, sp // pointer to regs
- cmp scno, sc_nr // check upper syscall limit
+ cmp wscno, wsc_nr // check upper syscall limit
b.hs __ni_sys_trace
ldp x0, x1, [sp] // restore the syscall args
ldp x2, x3, [sp, #S_X2]
ldp x4, x5, [sp, #S_X4]
ldp x6, x7, [sp, #S_X6]
- ldr x16, [stbl, scno, lsl #3] // address in the syscall table
+ ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
blr x16 // call sys_* routine
__sys_trace_return:
@@ -865,3 +902,49 @@ ENTRY(sys_rt_sigreturn_wrapper)
mov x0, sp
b sys_rt_sigreturn
ENDPROC(sys_rt_sigreturn_wrapper)
+
+/*
+ * Register switch for AArch64. The callee-saved registers need to be saved
+ * and restored. On entry:
+ * x0 = previous task_struct (must be preserved across the switch)
+ * x1 = next task_struct
+ * Previous and next are guaranteed not to be the same.
+ *
+ */
+ENTRY(cpu_switch_to)
+ mov x10, #THREAD_CPU_CONTEXT
+ add x8, x0, x10
+ mov x9, sp
+ stp x19, x20, [x8], #16 // store callee-saved registers
+ stp x21, x22, [x8], #16
+ stp x23, x24, [x8], #16
+ stp x25, x26, [x8], #16
+ stp x27, x28, [x8], #16
+ stp x29, x9, [x8], #16
+ str lr, [x8]
+ add x8, x1, x10
+ ldp x19, x20, [x8], #16 // restore callee-saved registers
+ ldp x21, x22, [x8], #16
+ ldp x23, x24, [x8], #16
+ ldp x25, x26, [x8], #16
+ ldp x27, x28, [x8], #16
+ ldp x29, x9, [x8], #16
+ ldr lr, [x8]
+ mov sp, x9
+ msr sp_el0, x1
+ ret
+ENDPROC(cpu_switch_to)
+NOKPROBE(cpu_switch_to)
+
+/*
+ * This is how we return from a fork.
+ */
+ENTRY(ret_from_fork)
+ bl schedule_tail
+ cbz x19, 1f // not a kernel thread
+ mov x0, x20
+ blr x19
+1: get_thread_info tsk
+ b ret_to_user
+ENDPROC(ret_from_fork)
+NOKPROBE(ret_from_fork)
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 06da8ea16bbe..3a68cf38a6b3 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -17,16 +17,19 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/bottom_half.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h>
#include <linux/sched/signal.h>
#include <linux/signal.h>
-#include <linux/hardirq.h>
#include <asm/fpsimd.h>
#include <asm/cputype.h>
+#include <asm/simd.h>
#define FPEXC_IOF (1 << 0)
#define FPEXC_DZF (1 << 1)
@@ -62,6 +65,13 @@
* CPU currently contain the most recent userland FPSIMD state of the current
* task.
*
+ * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
+ * save the task's FPSIMD context back to task_struct from softirq context.
+ * To prevent this from racing with the manipulation of the task's FPSIMD state
+ * from task context and thereby corrupting the state, it is necessary to
+ * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
+ * flag with local_bh_disable() unless softirqs are already masked.
+ *
* For a certain task, the sequence may look something like this:
* - the task gets scheduled in; if both the task's fpsimd_state.cpu field
* contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
@@ -161,9 +171,14 @@ void fpsimd_flush_thread(void)
{
if (!system_supports_fpsimd())
return;
+
+ local_bh_disable();
+
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE);
+
+ local_bh_enable();
}
/*
@@ -174,10 +189,13 @@ void fpsimd_preserve_current_state(void)
{
if (!system_supports_fpsimd())
return;
- preempt_disable();
+
+ local_bh_disable();
+
if (!test_thread_flag(TIF_FOREIGN_FPSTATE))
fpsimd_save_state(&current->thread.fpsimd_state);
- preempt_enable();
+
+ local_bh_enable();
}
/*
@@ -189,15 +207,18 @@ void fpsimd_restore_current_state(void)
{
if (!system_supports_fpsimd())
return;
- preempt_disable();
+
+ local_bh_disable();
+
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
struct fpsimd_state *st = &current->thread.fpsimd_state;
fpsimd_load_state(st);
- this_cpu_write(fpsimd_last_state, st);
+ __this_cpu_write(fpsimd_last_state, st);
st->cpu = smp_processor_id();
}
- preempt_enable();
+
+ local_bh_enable();
}
/*
@@ -209,15 +230,18 @@ void fpsimd_update_current_state(struct fpsimd_state *state)
{
if (!system_supports_fpsimd())
return;
- preempt_disable();
+
+ local_bh_disable();
+
fpsimd_load_state(state);
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
struct fpsimd_state *st = &current->thread.fpsimd_state;
- this_cpu_write(fpsimd_last_state, st);
+ __this_cpu_write(fpsimd_last_state, st);
st->cpu = smp_processor_id();
}
- preempt_enable();
+
+ local_bh_enable();
}
/*
@@ -230,52 +254,122 @@ void fpsimd_flush_task_state(struct task_struct *t)
#ifdef CONFIG_KERNEL_MODE_NEON
-static DEFINE_PER_CPU(struct fpsimd_partial_state, hardirq_fpsimdstate);
-static DEFINE_PER_CPU(struct fpsimd_partial_state, softirq_fpsimdstate);
+DEFINE_PER_CPU(bool, kernel_neon_busy);
+EXPORT_PER_CPU_SYMBOL(kernel_neon_busy);
/*
* Kernel-side NEON support functions
*/
-void kernel_neon_begin_partial(u32 num_regs)
+
+/*
+ * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
+ * context
+ *
+ * Must not be called unless may_use_simd() returns true.
+ * Task context in the FPSIMD registers is saved back to memory as necessary.
+ *
+ * A matching call to kernel_neon_end() must be made before returning from the
+ * calling context.
+ *
+ * The caller may freely use the FPSIMD registers until kernel_neon_end() is
+ * called.
+ */
+void kernel_neon_begin(void)
{
if (WARN_ON(!system_supports_fpsimd()))
return;
- if (in_interrupt()) {
- struct fpsimd_partial_state *s = this_cpu_ptr(
- in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
- BUG_ON(num_regs > 32);
- fpsimd_save_partial_state(s, roundup(num_regs, 2));
- } else {
- /*
- * Save the userland FPSIMD state if we have one and if we
- * haven't done so already. Clear fpsimd_last_state to indicate
- * that there is no longer userland FPSIMD state in the
- * registers.
- */
- preempt_disable();
- if (current->mm &&
- !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE))
- fpsimd_save_state(&current->thread.fpsimd_state);
- this_cpu_write(fpsimd_last_state, NULL);
- }
+ BUG_ON(!may_use_simd());
+
+ local_bh_disable();
+
+ __this_cpu_write(kernel_neon_busy, true);
+
+ /* Save unsaved task fpsimd state, if any: */
+ if (current->mm && !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE))
+ fpsimd_save_state(&current->thread.fpsimd_state);
+
+ /* Invalidate any task state remaining in the fpsimd regs: */
+ __this_cpu_write(fpsimd_last_state, NULL);
+
+ preempt_disable();
+
+ local_bh_enable();
}
-EXPORT_SYMBOL(kernel_neon_begin_partial);
+EXPORT_SYMBOL(kernel_neon_begin);
+/*
+ * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
+ *
+ * Must be called from a context in which kernel_neon_begin() was previously
+ * called, with no call to kernel_neon_end() in the meantime.
+ *
+ * The caller must not use the FPSIMD registers after this function is called,
+ * unless kernel_neon_begin() is called again in the meantime.
+ */
void kernel_neon_end(void)
{
+ bool busy;
+
if (!system_supports_fpsimd())
return;
- if (in_interrupt()) {
- struct fpsimd_partial_state *s = this_cpu_ptr(
- in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
- fpsimd_load_partial_state(s);
- } else {
- preempt_enable();
- }
+
+ busy = __this_cpu_xchg(kernel_neon_busy, false);
+ WARN_ON(!busy); /* No matching kernel_neon_begin()? */
+
+ preempt_enable();
}
EXPORT_SYMBOL(kernel_neon_end);
+static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state);
+static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
+
+/*
+ * EFI runtime services support functions
+ *
+ * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
+ * This means that for EFI (and only for EFI), we have to assume that FPSIMD
+ * is always used rather than being an optional accelerator.
+ *
+ * These functions provide the necessary support for ensuring FPSIMD
+ * save/restore in the contexts from which EFI is used.
+ *
+ * Do not use them for any other purpose -- if tempted to do so, you are
+ * either doing something wrong or you need to propose some refactoring.
+ */
+
+/*
+ * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
+ */
+void __efi_fpsimd_begin(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ WARN_ON(preemptible());
+
+ if (may_use_simd())
+ kernel_neon_begin();
+ else {
+ fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
+ __this_cpu_write(efi_fpsimd_state_used, true);
+ }
+}
+
+/*
+ * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
+ */
+void __efi_fpsimd_end(void)
+{
+ if (!system_supports_fpsimd())
+ return;
+
+ if (__this_cpu_xchg(efi_fpsimd_state_used, false))
+ fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
+ else
+ kernel_neon_end();
+}
+
#endif /* CONFIG_KERNEL_MODE_NEON */
#ifdef CONFIG_CPU_PM
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 973df7de7bf8..7434ec0c7a27 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -143,8 +143,8 @@ preserve_boot_args:
dmb sy // needed before dc ivac with
// MMU off
- add x1, x0, #0x20 // 4 x 8 bytes
- b __inval_cache_range // tail call
+ mov x1, #0x20 // 4 x 8 bytes
+ b __inval_dcache_area // tail call
ENDPROC(preserve_boot_args)
/*
@@ -221,20 +221,20 @@ __create_page_tables:
* dirty cache lines being evicted.
*/
adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
- bl __inval_cache_range
+ ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
+ bl __inval_dcache_area
/*
* Clear the idmap and swapper page tables.
*/
adrp x0, idmap_pg_dir
- adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
+ ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
1: stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
- cmp x0, x6
- b.lo 1b
+ subs x1, x1, #64
+ b.ne 1b
mov x7, SWAPPER_MM_MMUFLAGS
@@ -307,9 +307,9 @@ __create_page_tables:
* tables again to remove any speculatively loaded cache lines.
*/
adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
+ ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
dmb sy
- bl __inval_cache_range
+ bl __inval_dcache_area
ret x28
ENDPROC(__create_page_tables)
@@ -354,7 +354,6 @@ __primary_switched:
tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
b.ne 0f
mov x0, x21 // pass FDT address in x0
- mov x1, x23 // pass modulo offset in x1
bl kaslr_early_init // parse FDT for KASLR options
cbz x0, 0f // KASLR disabled? just proceed
orr x23, x23, x0 // record KASLR offset
@@ -362,6 +361,9 @@ __primary_switched:
ret // to __primary_switch()
0:
#endif
+ add sp, sp, #16
+ mov x29, #0
+ mov x30, #0
b start_kernel
ENDPROC(__primary_switched)
@@ -617,6 +619,7 @@ __secondary_switched:
ldr x2, [x0, #CPU_BOOT_TASK]
msr sp_el0, x2
mov x29, #0
+ mov x30, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index a44e13942d30..095d3c170f5d 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -330,7 +330,7 @@ static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
* read only (code, rodata). Clear the RDONLY bit from
* the temporary mappings we use during restore.
*/
- set_pte(dst_pte, pte_clear_rdonly(pte));
+ set_pte(dst_pte, pte_mkwrite(pte));
} else if (debug_pagealloc_enabled() && !pte_none(pte)) {
/*
* debug_pagealloc will removed the PTE_VALID bit if
@@ -343,7 +343,7 @@ static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
*/
BUG_ON(!pfn_valid(pte_pfn(pte)));
- set_pte(dst_pte, pte_mkpresent(pte_clear_rdonly(pte)));
+ set_pte(dst_pte, pte_mkpresent(pte_mkwrite(pte)));
}
}
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 2386b26c0712..713561e5bcab 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -23,15 +23,16 @@
#include <linux/kernel_stat.h>
#include <linux/irq.h>
+#include <linux/memory.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/irqchip.h>
#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
unsigned long irq_err_count;
-/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
-DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16);
+DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
int arch_show_interrupts(struct seq_file *p, int prec)
{
@@ -50,8 +51,43 @@ void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
handle_arch_irq = handle_irq;
}
+#ifdef CONFIG_VMAP_STACK
+static void init_irq_stacks(void)
+{
+ int cpu;
+ unsigned long *p;
+
+ for_each_possible_cpu(cpu) {
+ /*
+ * To ensure that VMAP'd stack overflow detection works
+ * correctly, the IRQ stacks need to have the same
+ * alignment as other stacks.
+ */
+ p = __vmalloc_node_range(IRQ_STACK_SIZE, THREAD_ALIGN,
+ VMALLOC_START, VMALLOC_END,
+ THREADINFO_GFP, PAGE_KERNEL,
+ 0, cpu_to_node(cpu),
+ __builtin_return_address(0));
+
+ per_cpu(irq_stack_ptr, cpu) = p;
+ }
+}
+#else
+/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
+DEFINE_PER_CPU_ALIGNED(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
+
+static void init_irq_stacks(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
+}
+#endif
+
void __init init_IRQ(void)
{
+ init_irq_stacks();
irqchip_init();
if (!handle_arch_irq)
panic("No interrupt controller found.");
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index a9710efb8c01..47080c49cc7e 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -75,7 +75,7 @@ extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
* containing function pointers) to be reinitialized, and zero-initialized
* .bss variables will be reset to 0.
*/
-u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
+u64 __init kaslr_early_init(u64 dt_phys)
{
void *fdt;
u64 seed, offset, mask, module_range;
@@ -131,15 +131,17 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
/*
* The kernel Image should not extend across a 1GB/32MB/512MB alignment
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
- * happens, increase the KASLR offset by the size of the kernel image
- * rounded up by SWAPPER_BLOCK_SIZE.
+ * happens, round down the KASLR offset by (1 << SWAPPER_TABLE_SHIFT).
+ *
+ * NOTE: The references to _text and _end below will already take the
+ * modulo offset (the physical displacement modulo 2 MB) into
+ * account, given that the physical placement is controlled by
+ * the loader, and will not change as a result of the virtual
+ * mapping we choose.
*/
- if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
- (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
- u64 kimg_sz = _end - _text;
- offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
- & mask;
- }
+ if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
+ (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
+ offset = round_down(offset, 1 << SWAPPER_TABLE_SHIFT);
if (IS_ENABLED(CONFIG_KASAN))
/*
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 481f54a866c5..11121f608eb5 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -252,7 +252,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
local_irq_disable();
/* shutdown non-crashing cpus */
- smp_send_crash_stop();
+ crash_smp_send_stop();
/* for crashing cpu */
crash_save_cpu(regs, smp_processor_id());
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 713ca824f266..bcafd7dcfe8b 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -162,7 +162,6 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
}
frame.fp = regs->regs[29];
- frame.sp = regs->sp;
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = current->curr_ret_stack;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index b5798ba21189..9eaef51f83ff 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -202,55 +202,6 @@ static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
};
-/* ARM Cortex-A53 HW events mapping. */
-static const unsigned armv8_a53_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
-};
-
-/* ARM Cortex-A57 and Cortex-A72 events mapping. */
-static const unsigned armv8_a57_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
-};
-
-static const unsigned armv8_thunder_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
-};
-
-/* Broadcom Vulcan events mapping */
-static const unsigned armv8_vulcan_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_BR_RETIRED,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
-};
-
static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
@@ -281,27 +232,10 @@ static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED,
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
- [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
-
- [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L2D_CACHE,
- [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL,
- [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L2D_CACHE,
- [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL,
-
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
-
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
+ [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
+ [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
};
static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -314,18 +248,26 @@ static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
- [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
-
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
+ [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
+ [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
+};
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
+static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
+
+ [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
+ [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
+
+ [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
+ [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
};
static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -340,8 +282,6 @@ static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
- [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
[C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
[C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
@@ -349,13 +289,6 @@ static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
-
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
-
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
};
static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -368,22 +301,11 @@ static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
- [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
-
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
- [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
-
[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
-
[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
};
@@ -846,17 +768,14 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct hw_perf_event *hwc = &event->hw;
unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
- /* Always place a cycle counter into the cycle counter. */
+ /* Always prefer to place a cycle counter into the cycle counter. */
if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
- if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
- return -EAGAIN;
-
- return ARMV8_IDX_CYCLE_COUNTER;
+ if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
+ return ARMV8_IDX_CYCLE_COUNTER;
}
/*
- * For anything other than a cycle counter, try and use
- * the events counters
+ * Otherwise use events counters
*/
for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
if (!test_and_set_bit(idx, cpuc->used_mask))
@@ -924,7 +843,13 @@ static void armv8pmu_reset(void *info)
ARMV8_PMU_PMCR_LC);
}
-static int armv8_pmuv3_map_event(struct perf_event *event)
+static int __armv8_pmuv3_map_event(struct perf_event *event,
+ const unsigned (*extra_event_map)
+ [PERF_COUNT_HW_MAX],
+ const unsigned (*extra_cache_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX])
{
int hw_event_id;
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
@@ -932,44 +857,47 @@ static int armv8_pmuv3_map_event(struct perf_event *event)
hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
&armv8_pmuv3_perf_cache_map,
ARMV8_PMU_EVTYPE_EVENT);
- if (hw_event_id < 0)
- return hw_event_id;
- /* disable micro/arch events not supported by this PMU */
- if ((hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS) &&
- !test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
- return -EOPNOTSUPP;
+ /* Onl expose micro/arch events supported by this PMU */
+ if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
+ && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
+ return hw_event_id;
}
- return hw_event_id;
+ return armpmu_map_event(event, extra_event_map, extra_cache_map,
+ ARMV8_PMU_EVTYPE_EVENT);
+}
+
+static int armv8_pmuv3_map_event(struct perf_event *event)
+{
+ return __armv8_pmuv3_map_event(event, NULL, NULL);
}
static int armv8_a53_map_event(struct perf_event *event)
{
- return armpmu_map_event(event, &armv8_a53_perf_map,
- &armv8_a53_perf_cache_map,
- ARMV8_PMU_EVTYPE_EVENT);
+ return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
}
static int armv8_a57_map_event(struct perf_event *event)
{
- return armpmu_map_event(event, &armv8_a57_perf_map,
- &armv8_a57_perf_cache_map,
- ARMV8_PMU_EVTYPE_EVENT);
+ return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
+}
+
+static int armv8_a73_map_event(struct perf_event *event)
+{
+ return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
}
static int armv8_thunder_map_event(struct perf_event *event)
{
- return armpmu_map_event(event, &armv8_thunder_perf_map,
- &armv8_thunder_perf_cache_map,
- ARMV8_PMU_EVTYPE_EVENT);
+ return __armv8_pmuv3_map_event(event, NULL,
+ &armv8_thunder_perf_cache_map);
}
static int armv8_vulcan_map_event(struct perf_event *event)
{
- return armpmu_map_event(event, &armv8_vulcan_perf_map,
- &armv8_vulcan_perf_cache_map,
- ARMV8_PMU_EVTYPE_EVENT);
+ return __armv8_pmuv3_map_event(event, NULL,
+ &armv8_vulcan_perf_cache_map);
}
struct armv8pmu_probe_info {
@@ -1062,6 +990,22 @@ static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
return 0;
}
+static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ int ret = armv8_pmu_init(cpu_pmu);
+ if (ret)
+ return ret;
+
+ cpu_pmu->name = "armv8_cortex_a35";
+ cpu_pmu->map_event = armv8_a53_map_event;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv8_pmuv3_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv8_pmuv3_format_attr_group;
+
+ return 0;
+}
+
static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
@@ -1110,6 +1054,22 @@ static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
return 0;
}
+static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ int ret = armv8_pmu_init(cpu_pmu);
+ if (ret)
+ return ret;
+
+ cpu_pmu->name = "armv8_cortex_a73";
+ cpu_pmu->map_event = armv8_a73_map_event;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv8_pmuv3_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv8_pmuv3_format_attr_group;
+
+ return 0;
+}
+
static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv8_pmu_init(cpu_pmu);
@@ -1144,9 +1104,11 @@ static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
+ {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
{.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
{.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
{.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
+ {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
{.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
{.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
{},
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
index 26c998534dca..636ca0119c0e 100644
--- a/arch/arm64/kernel/probes/uprobes.c
+++ b/arch/arm64/kernel/probes/uprobes.c
@@ -40,7 +40,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
probe_opcode_t insn;
/* TODO: Currently we do not support AARCH32 instruction probing */
- if (test_bit(TIF_32BIT, &mm->context.flags))
+ if (mm->context.flags & MMCF_AARCH32)
return -ENOTSUPP;
else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
return -EINVAL;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 659ae8094ed5..2dc0f8482210 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -360,6 +360,8 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
/*
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
+ * This full barrier is also required by the membarrier system
+ * call.
*/
dsb(ish);
@@ -382,15 +384,12 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
frame.fp = thread_saved_fp(p);
- frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = p->curr_ret_stack;
#endif
do {
- if (frame.sp < stack_page ||
- frame.sp >= stack_page + THREAD_SIZE ||
- unwind_frame(p, &frame))
+ if (unwind_frame(p, &frame))
goto out;
if (!in_sched_functions(frame.pc)) {
ret = frame.pc;
@@ -417,3 +416,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
else
return randomize_page(mm->brk, SZ_1G);
}
+
+/*
+ * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
+ */
+void arch_setup_new_exec(void)
+{
+ current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
+}
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 1b38c0150aec..9cbb6123208f 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -42,6 +42,7 @@
#include <asm/compat.h>
#include <asm/debug-monitors.h>
#include <asm/pgtable.h>
+#include <asm/stacktrace.h>
#include <asm/syscall.h>
#include <asm/traps.h>
#include <asm/system_misc.h>
@@ -127,7 +128,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
- on_irq_stack(addr, raw_smp_processor_id());
+ on_irq_stack(addr);
}
/**
@@ -1363,7 +1364,7 @@ static void tracehook_report_syscall(struct pt_regs *regs,
if (dir == PTRACE_SYSCALL_EXIT)
tracehook_report_syscall_exit(regs, 0);
else if (tracehook_report_syscall_entry(regs))
- regs->syscallno = ~0UL;
+ forget_syscall(regs);
regs->regs[regno] = saved_reg;
}
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 12a87f2600f2..933adbc0f654 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -42,7 +42,6 @@ void *return_address(unsigned int level)
data.addr = NULL;
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_stack_pointer;
frame.pc = (unsigned long)return_address; /* dummy */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = current->curr_ret_stack;
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 089c3747995d..c45214f8fb54 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -29,6 +29,7 @@
#include <linux/string.h>
#include <linux/tracehook.h>
#include <linux/ratelimit.h>
+#include <linux/syscalls.h>
#include <asm/debug-monitors.h>
#include <asm/elf.h>
@@ -36,6 +37,7 @@
#include <asm/ucontext.h>
#include <asm/unistd.h>
#include <asm/fpsimd.h>
+#include <asm/ptrace.h>
#include <asm/signal32.h>
#include <asm/vdso.h>
@@ -387,7 +389,7 @@ static int restore_sigframe(struct pt_regs *regs,
/*
* Avoid sys_rt_sigreturn() restarting.
*/
- regs->syscallno = ~0UL;
+ forget_syscall(regs);
err |= !valid_user_regs(&regs->user_regs, current);
if (err == 0)
@@ -673,13 +675,12 @@ static void do_signal(struct pt_regs *regs)
{
unsigned long continue_addr = 0, restart_addr = 0;
int retval = 0;
- int syscall = (int)regs->syscallno;
struct ksignal ksig;
/*
* If we were from a system call, check for system call restarting...
*/
- if (syscall >= 0) {
+ if (in_syscall(regs)) {
continue_addr = regs->pc;
restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
retval = regs->regs[0];
@@ -687,7 +688,7 @@ static void do_signal(struct pt_regs *regs)
/*
* Avoid additional syscall restarting via ret_to_user.
*/
- regs->syscallno = ~0UL;
+ forget_syscall(regs);
/*
* Prepare for system call restart. We do this here so that a
@@ -731,7 +732,7 @@ static void do_signal(struct pt_regs *regs)
* Handle restarting a different system call. As above, if a debugger
* has chosen to restart at a different PC, ignore the restart.
*/
- if (syscall >= 0 && regs->pc == restart_addr) {
+ if (in_syscall(regs) && regs->pc == restart_addr) {
if (retval == -ERESTART_RESTARTBLOCK)
setup_restart_syscall(regs);
user_rewind_single_step(current);
@@ -749,6 +750,10 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
* Update the trace code with the current status.
*/
trace_hardirqs_off();
+
+ /* Check valid user FS if needed */
+ addr_limit_user_check();
+
do {
if (thread_flags & _TIF_NEED_RESCHED) {
schedule();
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index c747a0fc5d7d..4e5a664be04b 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -354,7 +354,7 @@ static int compat_restore_sigframe(struct pt_regs *regs,
/*
* Avoid compat_sys_sigreturn() restarting.
*/
- regs->syscallno = ~0UL;
+ forget_syscall(regs);
err |= !valid_user_regs(&regs->user_regs, current);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 321119881abf..ffe089942ac4 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -154,7 +154,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
* page tables.
*/
secondary_data.task = idle;
- secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
+ secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
update_cpu_boot_status(CPU_MMU_OFF);
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
@@ -469,7 +469,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn)
*/
cell = of_get_property(dn, "reg", NULL);
if (!cell) {
- pr_err("%s: missing reg property\n", dn->full_name);
+ pr_err("%pOF: missing reg property\n", dn);
return INVALID_HWID;
}
@@ -478,7 +478,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn)
* Non affinity bits must be set to 0 in the DT
*/
if (hwid & ~MPIDR_HWID_BITMASK) {
- pr_err("%s: invalid reg property\n", dn->full_name);
+ pr_err("%pOF: invalid reg property\n", dn);
return INVALID_HWID;
}
return hwid;
@@ -627,8 +627,8 @@ static void __init of_parse_and_init_cpus(void)
goto next;
if (is_mpidr_duplicate(cpu_count, hwid)) {
- pr_err("%s: duplicate cpu reg properties in the DT\n",
- dn->full_name);
+ pr_err("%pOF: duplicate cpu reg properties in the DT\n",
+ dn);
goto next;
}
@@ -640,8 +640,8 @@ static void __init of_parse_and_init_cpus(void)
*/
if (hwid == cpu_logical_map(0)) {
if (bootcpu_valid) {
- pr_err("%s: duplicate boot cpu reg property in DT\n",
- dn->full_name);
+ pr_err("%pOF: duplicate boot cpu reg property in DT\n",
+ dn);
goto next;
}
@@ -977,11 +977,21 @@ void smp_send_stop(void)
}
#ifdef CONFIG_KEXEC_CORE
-void smp_send_crash_stop(void)
+void crash_smp_send_stop(void)
{
+ static int cpus_stopped;
cpumask_t mask;
unsigned long timeout;
+ /*
+ * This function can be called twice in panic path, but obviously
+ * we execute this only once.
+ */
+ if (cpus_stopped)
+ return;
+
+ cpus_stopped = 1;
+
if (num_online_cpus() == 1)
return;
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 09d37d66b630..3144584617e7 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -42,33 +42,17 @@
*/
int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
{
- unsigned long high, low;
unsigned long fp = frame->fp;
- unsigned long irq_stack_ptr;
+
+ if (fp & 0xf)
+ return -EINVAL;
if (!tsk)
tsk = current;
- /*
- * Switching between stacks is valid when tracing current and in
- * non-preemptible context.
- */
- if (tsk == current && !preemptible())
- irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
- else
- irq_stack_ptr = 0;
-
- low = frame->sp;
- /* irq stacks are not THREAD_SIZE aligned */
- if (on_irq_stack(frame->sp, raw_smp_processor_id()))
- high = irq_stack_ptr;
- else
- high = ALIGN(low, THREAD_SIZE) - 0x20;
-
- if (fp < low || fp > high || fp & 0xf)
+ if (!on_accessible_stack(tsk, fp))
return -EINVAL;
- frame->sp = fp + 0x10;
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
@@ -86,34 +70,13 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/*
- * Check whether we are going to walk through from interrupt stack
- * to task stack.
- * If we reach the end of the stack - and its an interrupt stack,
- * unpack the dummy frame to find the original elr.
- *
- * Check the frame->fp we read from the bottom of the irq_stack,
- * and the original task stack pointer are both in current->stack.
+ * Frames created upon entry from EL0 have NULL FP and PC values, so
+ * don't bother reporting these. Frames created by __noreturn functions
+ * might have a valid FP even if PC is bogus, so only terminate where
+ * both are NULL.
*/
- if (frame->sp == irq_stack_ptr) {
- struct pt_regs *irq_args;
- unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
-
- if (object_is_on_stack((void *)orig_sp) &&
- object_is_on_stack((void *)frame->fp)) {
- frame->sp = orig_sp;
-
- /* orig_sp is the saved pt_regs, find the elr */
- irq_args = (struct pt_regs *)orig_sp;
- frame->pc = irq_args->pc;
- } else {
- /*
- * This frame has a non-standard format, and we
- * didn't fix it, because the data looked wrong.
- * Refuse to output this frame.
- */
- return -EINVAL;
- }
- }
+ if (!frame->fp && !frame->pc)
+ return -EINVAL;
return 0;
}
@@ -167,7 +130,6 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
data.no_sched_functions = 0;
frame.fp = regs->regs[29];
- frame.sp = regs->sp;
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = current->curr_ret_stack;
@@ -192,12 +154,10 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
if (tsk != current) {
data.no_sched_functions = 1;
frame.fp = thread_saved_fp(tsk);
- frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
} else {
data.no_sched_functions = 0;
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_stack_pointer;
frame.pc = (unsigned long)save_stack_trace_tsk;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index da33c90248e9..a4391280fba9 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -50,7 +50,6 @@ unsigned long profile_pc(struct pt_regs *regs)
return regs->pc;
frame.fp = regs->regs[29];
- frame.sp = regs->sp;
frame.pc = regs->pc;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = -1; /* no task info */
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 79244c75eaec..8d48b233e6ce 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -45,7 +45,7 @@ static int __init get_cpu_for_node(struct device_node *node)
}
}
- pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
+ pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
of_node_put(cpu_node);
return -1;
@@ -71,8 +71,8 @@ static int __init parse_core(struct device_node *core, int cluster_id,
cpu_topology[cpu].core_id = core_id;
cpu_topology[cpu].thread_id = i;
} else {
- pr_err("%s: Can't get CPU for thread\n",
- t->full_name);
+ pr_err("%pOF: Can't get CPU for thread\n",
+ t);
of_node_put(t);
return -EINVAL;
}
@@ -84,15 +84,15 @@ static int __init parse_core(struct device_node *core, int cluster_id,
cpu = get_cpu_for_node(core);
if (cpu >= 0) {
if (!leaf) {
- pr_err("%s: Core has both threads and CPU\n",
- core->full_name);
+ pr_err("%pOF: Core has both threads and CPU\n",
+ core);
return -EINVAL;
}
cpu_topology[cpu].cluster_id = cluster_id;
cpu_topology[cpu].core_id = core_id;
} else if (leaf) {
- pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
+ pr_err("%pOF: Can't get CPU for leaf core\n", core);
return -EINVAL;
}
@@ -137,8 +137,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
has_cores = true;
if (depth == 0) {
- pr_err("%s: cpu-map children should be clusters\n",
- c->full_name);
+ pr_err("%pOF: cpu-map children should be clusters\n",
+ c);
of_node_put(c);
return -EINVAL;
}
@@ -146,8 +146,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
if (leaf) {
ret = parse_core(c, cluster_id, core_id++);
} else {
- pr_err("%s: Non-leaf cluster with core %s\n",
- cluster->full_name, name);
+ pr_err("%pOF: Non-leaf cluster with core %s\n",
+ cluster, name);
ret = -EINVAL;
}
@@ -159,7 +159,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
} while (c);
if (leaf && !has_cores)
- pr_warn("%s: empty cluster\n", cluster->full_name);
+ pr_warn("%pOF: empty cluster\n", cluster);
if (leaf)
cluster_id++;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index c7c7088097be..5ea4b85aee0e 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -32,6 +32,7 @@
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
+#include <linux/sizes.h>
#include <linux/syscalls.h>
#include <linux/mm_types.h>
@@ -41,6 +42,7 @@
#include <asm/esr.h>
#include <asm/insn.h>
#include <asm/traps.h>
+#include <asm/smp.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
#include <asm/exception.h>
@@ -143,7 +145,6 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
- unsigned long irq_stack_ptr;
int skip;
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
@@ -154,25 +155,14 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
if (!try_get_task_stack(tsk))
return;
- /*
- * Switching between stacks is valid when tracing current and in
- * non-preemptible context.
- */
- if (tsk == current && !preemptible())
- irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
- else
- irq_stack_ptr = 0;
-
if (tsk == current) {
frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_stack_pointer;
frame.pc = (unsigned long)dump_backtrace;
} else {
/*
* task blocked in __switch_to
*/
frame.fp = thread_saved_fp(tsk);
- frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -182,13 +172,12 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
skip = !!regs;
printk("Call trace:\n");
while (1) {
- unsigned long where = frame.pc;
unsigned long stack;
int ret;
/* skip until specified stack frame */
if (!skip) {
- dump_backtrace_entry(where);
+ dump_backtrace_entry(frame.pc);
} else if (frame.fp == regs->regs[29]) {
skip = 0;
/*
@@ -203,20 +192,12 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
ret = unwind_frame(tsk, &frame);
if (ret < 0)
break;
- stack = frame.sp;
- if (in_exception_text(where)) {
- /*
- * If we switched to the irq_stack before calling this
- * exception handler, then the pt_regs will be on the
- * task stack. The easiest way to tell is if the large
- * pt_regs would overlap with the end of the irq_stack.
- */
- if (stack < irq_stack_ptr &&
- (stack + sizeof(struct pt_regs)) > irq_stack_ptr)
- stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
+ if (in_entry_text(frame.pc)) {
+ stack = frame.fp - offsetof(struct pt_regs, stackframe);
- dump_mem("", "Exception stack", stack,
- stack + sizeof(struct pt_regs));
+ if (on_accessible_stack(tsk, stack))
+ dump_mem("", "Exception stack", stack,
+ stack + sizeof(struct pt_regs));
}
}
@@ -257,8 +238,6 @@ static int __die(const char *str, int err, struct pt_regs *regs)
end_of_stack(tsk));
if (!user_mode(regs)) {
- dump_mem(KERN_EMERG, "Stack: ", regs->sp,
- THREAD_SIZE + (unsigned long)task_stack_page(tsk));
dump_backtrace(regs, tsk);
dump_instr(KERN_EMERG, regs);
}
@@ -274,10 +253,12 @@ static DEFINE_RAW_SPINLOCK(die_lock);
void die(const char *str, struct pt_regs *regs, int err)
{
int ret;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&die_lock, flags);
oops_enter();
- raw_spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
ret = __die(str, err, regs);
@@ -287,13 +268,15 @@ void die(const char *str, struct pt_regs *regs, int err)
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- raw_spin_unlock_irq(&die_lock);
oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
+
+ raw_spin_unlock_irqrestore(&die_lock, flags);
+
if (ret != NOTIFY_STOP)
do_exit(SIGSEGV);
}
@@ -480,6 +463,9 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */
__user_cache_maint("dc civac", address, ret);
break;
+ case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */
+ __user_cache_maint("sys 3, c7, c12, 1", address, ret);
+ break;
case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */
__user_cache_maint("dc civac", address, ret);
break;
@@ -519,7 +505,7 @@ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
{
int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
- pt_regs_write_reg(regs, rt, read_sysreg(cntfrq_el0));
+ pt_regs_write_reg(regs, rt, arch_timer_get_rate());
regs->pc += 4;
}
@@ -589,7 +575,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
if (show_unhandled_signals_ratelimited()) {
pr_info("%s[%d]: syscall %d\n", current->comm,
- task_pid_nr(current), (int)regs->syscallno);
+ task_pid_nr(current), regs->syscallno);
dump_instr("", regs);
if (user_mode(regs))
__show_regs(regs);
@@ -685,6 +671,43 @@ asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
force_sig_info(info.si_signo, &info, current);
}
+#ifdef CONFIG_VMAP_STACK
+
+DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
+ __aligned(16);
+
+asmlinkage void handle_bad_stack(struct pt_regs *regs)
+{
+ unsigned long tsk_stk = (unsigned long)current->stack;
+ unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
+ unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
+ unsigned int esr = read_sysreg(esr_el1);
+ unsigned long far = read_sysreg(far_el1);
+
+ console_verbose();
+ pr_emerg("Insufficient stack space to handle exception!");
+
+ pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr));
+ pr_emerg("FAR: 0x%016lx\n", far);
+
+ pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
+ tsk_stk, tsk_stk + THREAD_SIZE);
+ pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n",
+ irq_stk, irq_stk + THREAD_SIZE);
+ pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
+ ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
+
+ __show_regs(regs);
+
+ /*
+ * We use nmi_panic to limit the potential for recusive overflows, and
+ * to get a better stack trace.
+ */
+ nmi_panic(NULL, "kernel stack overflow");
+ cpu_park_loop();
+}
+#endif
+
void __pte_error(const char *file, int line, unsigned long val)
{
pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index e8f759f764f2..2d419006ad43 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -110,12 +110,27 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
}
#endif /* CONFIG_COMPAT */
+static int vdso_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
+ unsigned long vdso_size = vdso_end - vdso_start;
+
+ if (vdso_size != new_size)
+ return -EINVAL;
+
+ current->mm->context.vdso = (void *)new_vma->vm_start;
+
+ return 0;
+}
+
static struct vm_special_mapping vdso_spec[2] __ro_after_init = {
{
.name = "[vvar]",
},
{
.name = "[vdso]",
+ .mremap = vdso_mremap,
},
};
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 987a00ee446c..fe56c268a7d9 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -72,22 +72,6 @@ PECOFF_FILE_ALIGNMENT = 0x200;
#define PECOFF_EDATA_PADDING
#endif
-#if defined(CONFIG_DEBUG_ALIGN_RODATA)
-/*
- * 4 KB granule: 1 level 2 entry
- * 16 KB granule: 128 level 3 entries, with contiguous bit
- * 64 KB granule: 32 level 3 entries, with contiguous bit
- */
-#define SEGMENT_ALIGN SZ_2M
-#else
-/*
- * 4 KB granule: 16 level 3 entries, with contiguous bit
- * 16 KB granule: 4 level 3 entries, without contiguous bit
- * 64 KB granule: 1 level 3 entry
- */
-#define SEGMENT_ALIGN SZ_64K
-#endif
-
SECTIONS
{
/*
@@ -192,7 +176,7 @@ SECTIONS
_data = .;
_sdata = .;
- RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
/*
* Data written with the MMU off but read with the MMU on requires
diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
index b81f4091c909..a81f5e10fc8c 100644
--- a/arch/arm64/kvm/hyp/s2-setup.c
+++ b/arch/arm64/kvm/hyp/s2-setup.c
@@ -70,7 +70,7 @@ u32 __hyp_text __init_stage2_translation(void)
* Management in ID_AA64MMFR1_EL1 and enable the feature in VTCR_EL2.
*/
tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_HADBS_SHIFT) & 0xf;
- if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) && tmp)
+ if (tmp)
val |= VTCR_EL2_HA;
/*
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 77862881ae86..2e070d3baf9f 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -764,7 +764,7 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (p->is_write) {
if (r->CRm & 0x2)
/* accessing PMOVSSET_EL0 */
- kvm_pmu_overflow_set(vcpu, p->regval & mask);
+ vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
else
/* accessing PMOVSCLR_EL0 */
vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index c86b7909ef31..a0abc142c92b 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -17,3 +17,5 @@ CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
-fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \
-fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \
-fcall-saved-x18
+
+lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S
index c3cd65e31814..076c43715e64 100644
--- a/arch/arm64/lib/copy_page.S
+++ b/arch/arm64/lib/copy_page.S
@@ -30,9 +30,10 @@
*/
ENTRY(copy_page)
alternative_if ARM64_HAS_NO_HW_PREFETCH
- # Prefetch two cache lines ahead.
- prfm pldl1strm, [x1, #128]
- prfm pldl1strm, [x1, #256]
+ // Prefetch three cache lines ahead.
+ prfm pldl1strm, [x1, #128]
+ prfm pldl1strm, [x1, #256]
+ prfm pldl1strm, [x1, #384]
alternative_else_nop_endif
ldp x2, x3, [x1]
@@ -50,7 +51,7 @@ alternative_else_nop_endif
subs x18, x18, #128
alternative_if ARM64_HAS_NO_HW_PREFETCH
- prfm pldl1strm, [x1, #384]
+ prfm pldl1strm, [x1, #384]
alternative_else_nop_endif
stnp x2, x3, [x0]
diff --git a/arch/arm64/lib/uaccess_flushcache.c b/arch/arm64/lib/uaccess_flushcache.c
new file mode 100644
index 000000000000..b6ceafdb8b72
--- /dev/null
+++ b/arch/arm64/lib/uaccess_flushcache.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/uaccess.h>
+#include <asm/barrier.h>
+#include <asm/cacheflush.h>
+
+void memcpy_flushcache(void *dst, const void *src, size_t cnt)
+{
+ /*
+ * We assume this should not be called with @dst pointing to
+ * non-cacheable memory, such that we don't need an explicit
+ * barrier to order the cache maintenance against the memcpy.
+ */
+ memcpy(dst, src, cnt);
+ __clean_dcache_area_pop(dst, cnt);
+}
+EXPORT_SYMBOL_GPL(memcpy_flushcache);
+
+void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
+ size_t len)
+{
+ memcpy_flushcache(to, page_address(page) + offset, len);
+}
+
+unsigned long __copy_user_flushcache(void *to, const void __user *from,
+ unsigned long n)
+{
+ unsigned long rc = __arch_copy_from_user(to, from, n);
+
+ /* See above */
+ __clean_dcache_area_pop(to, n - rc);
+ return rc;
+}
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 83c27b6e6dca..7f1dbe962cf5 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -109,20 +109,25 @@ ENTRY(__clean_dcache_area_pou)
ENDPROC(__clean_dcache_area_pou)
/*
- * __dma_inv_area(start, size)
- * - start - virtual start address of region
+ * __inval_dcache_area(kaddr, size)
+ *
+ * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * are invalidated. Any partial lines at the ends of the interval are
+ * also cleaned to PoC to prevent data loss.
+ *
+ * - kaddr - kernel address
* - size - size in question
*/
-__dma_inv_area:
- add x1, x1, x0
+ENTRY(__inval_dcache_area)
/* FALLTHROUGH */
/*
- * __inval_cache_range(start, end)
- * - start - start address of region
- * - end - end address of region
+ * __dma_inv_area(start, size)
+ * - start - virtual start address of region
+ * - size - size in question
*/
-ENTRY(__inval_cache_range)
+__dma_inv_area:
+ add x1, x1, x0
dcache_line_size x2, x3
sub x3, x2, #1
tst x1, x3 // end cache line aligned?
@@ -140,7 +145,7 @@ ENTRY(__inval_cache_range)
b.lo 2b
dsb sy
ret
-ENDPIPROC(__inval_cache_range)
+ENDPIPROC(__inval_dcache_area)
ENDPROC(__dma_inv_area)
/*
@@ -167,6 +172,20 @@ ENDPIPROC(__clean_dcache_area_poc)
ENDPROC(__dma_clean_area)
/*
+ * __clean_dcache_area_pop(kaddr, size)
+ *
+ * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * are cleaned to the PoP.
+ *
+ * - kaddr - kernel address
+ * - size - size in question
+ */
+ENTRY(__clean_dcache_area_pop)
+ dcache_by_line_op cvap, sy, x0, x1, x2, x3
+ ret
+ENDPIPROC(__clean_dcache_area_pop)
+
+/*
* __dma_flush_area(start, size)
*
* clean & invalidate D / U line
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index e90cd1db42a8..614af886b7ef 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -42,7 +42,7 @@ static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
return prot;
}
-static struct gen_pool *atomic_pool;
+static struct gen_pool *atomic_pool __ro_after_init;
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
@@ -329,7 +329,7 @@ static int __swiotlb_mmap(struct device *dev,
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
is_device_dma_coherent(dev));
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
return __swiotlb_mmap_pfn(vma, pfn, size);
@@ -425,7 +425,7 @@ static int __init atomic_pool_init(void)
gen_pool_set_algo(atomic_pool,
gen_pool_first_fit_order_align,
- (void *)PAGE_SHIFT);
+ NULL);
pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
atomic_pool_size / 1024);
@@ -706,7 +706,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
is_device_dma_coherent(dev));
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c7861c9864e6..89993c4be1be 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -34,6 +34,7 @@
#include <linux/hugetlb.h>
#include <asm/bug.h>
+#include <asm/cmpxchg.h>
#include <asm/cpufeature.h>
#include <asm/exception.h>
#include <asm/debug-monitors.h>
@@ -82,6 +83,49 @@ static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
}
#endif
+static void data_abort_decode(unsigned int esr)
+{
+ pr_alert("Data abort info:\n");
+
+ if (esr & ESR_ELx_ISV) {
+ pr_alert(" Access size = %u byte(s)\n",
+ 1U << ((esr & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT));
+ pr_alert(" SSE = %lu, SRT = %lu\n",
+ (esr & ESR_ELx_SSE) >> ESR_ELx_SSE_SHIFT,
+ (esr & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT);
+ pr_alert(" SF = %lu, AR = %lu\n",
+ (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT,
+ (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT);
+ } else {
+ pr_alert(" ISV = 0, ISS = 0x%08lu\n", esr & ESR_ELx_ISS_MASK);
+ }
+
+ pr_alert(" CM = %lu, WnR = %lu\n",
+ (esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT,
+ (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT);
+}
+
+/*
+ * Decode mem abort information
+ */
+static void mem_abort_decode(unsigned int esr)
+{
+ pr_alert("Mem abort info:\n");
+
+ pr_alert(" Exception class = %s, IL = %u bits\n",
+ esr_get_class_string(esr),
+ (esr & ESR_ELx_IL) ? 32 : 16);
+ pr_alert(" SET = %lu, FnV = %lu\n",
+ (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT,
+ (esr & ESR_ELx_FnV) >> ESR_ELx_FnV_SHIFT);
+ pr_alert(" EA = %lu, S1PTW = %lu\n",
+ (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT,
+ (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT);
+
+ if (esr_is_data_abort(esr))
+ data_abort_decode(esr);
+}
+
/*
* Dump out the page tables associated with 'addr' in the currently active mm.
*/
@@ -139,7 +183,6 @@ void show_pte(unsigned long addr)
pr_cont("\n");
}
-#ifdef CONFIG_ARM64_HW_AFDBM
/*
* This function sets the access flags (dirty, accessed), as well as write
* permission, and only to a more permissive setting.
@@ -154,40 +197,33 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
- pteval_t old_pteval;
- unsigned int tmp;
+ pteval_t old_pteval, pteval;
if (pte_same(*ptep, entry))
return 0;
/* only preserve the access flags and write permission */
- pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY;
-
- /*
- * PTE_RDONLY is cleared by default in the asm below, so set it in
- * back if necessary (read-only or clean PTE).
- */
- if (!pte_write(entry) || !pte_sw_dirty(entry))
- pte_val(entry) |= PTE_RDONLY;
+ pte_val(entry) &= PTE_RDONLY | PTE_AF | PTE_WRITE | PTE_DIRTY;
/*
* Setting the flags must be done atomically to avoid racing with the
- * hardware update of the access/dirty state.
+ * hardware update of the access/dirty state. The PTE_RDONLY bit must
+ * be set to the most permissive (lowest value) of *ptep and entry
+ * (calculated as: a & b == ~(~a | ~b)).
*/
- asm volatile("// ptep_set_access_flags\n"
- " prfm pstl1strm, %2\n"
- "1: ldxr %0, %2\n"
- " and %0, %0, %3 // clear PTE_RDONLY\n"
- " orr %0, %0, %4 // set flags\n"
- " stxr %w1, %0, %2\n"
- " cbnz %w1, 1b\n"
- : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
- : "L" (~PTE_RDONLY), "r" (pte_val(entry)));
+ pte_val(entry) ^= PTE_RDONLY;
+ pteval = READ_ONCE(pte_val(*ptep));
+ do {
+ old_pteval = pteval;
+ pteval ^= PTE_RDONLY;
+ pteval |= pte_val(entry);
+ pteval ^= PTE_RDONLY;
+ pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
+ } while (pteval != old_pteval);
flush_tlb_fix_spurious_fault(vma, address);
return 1;
}
-#endif
static bool is_el1_instruction_abort(unsigned int esr)
{
@@ -247,6 +283,8 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
pr_alert("Unable to handle kernel %s at virtual address %08lx\n", msg,
addr);
+ mem_abort_decode(esr);
+
show_pte(addr);
die("Oops", regs, esr);
bust_spinlocks(0);
@@ -434,8 +472,11 @@ retry:
* the mmap_sem because it would already be released
* in __lock_page_or_retry in mm/filemap.c.
*/
- if (fatal_signal_pending(current))
+ if (fatal_signal_pending(current)) {
+ if (!user_mode(regs))
+ goto no_context;
return 0;
+ }
/*
* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
@@ -701,6 +742,8 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n",
inf->name, esr, addr);
+ mem_abort_decode(esr);
+
info.si_signo = inf->sig;
info.si_errno = 0;
info.si_code = inf->code;
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 21a8d828cbf4..e36ed5087b5c 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -83,3 +83,19 @@ EXPORT_SYMBOL(flush_dcache_page);
* Additional functions defined in assembly.
*/
EXPORT_SYMBOL(flush_icache_range);
+
+#ifdef CONFIG_ARCH_HAS_PMEM_API
+void arch_wb_cache_pmem(void *addr, size_t size)
+{
+ /* Ensure order against any prior non-cacheable writes */
+ dmb(osh);
+ __clean_dcache_area_pop(addr, size);
+}
+EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
+
+void arch_invalidate_pmem(void *addr, size_t size)
+{
+ __inval_dcache_area(addr, size);
+}
+EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
+#endif
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 656e0ece2289..6cb0fa92a651 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -41,6 +41,16 @@ int pud_huge(pud_t pud)
#endif
}
+/*
+ * Select all bits except the pfn
+ */
+static inline pgprot_t pte_pgprot(pte_t pte)
+{
+ unsigned long pfn = pte_pfn(pte);
+
+ return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
+}
+
static int find_num_contig(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, size_t *pgsize)
{
@@ -58,15 +68,107 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
return CONT_PTES;
}
+static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
+{
+ int contig_ptes = 0;
+
+ *pgsize = size;
+
+ switch (size) {
+#ifdef CONFIG_ARM64_4K_PAGES
+ case PUD_SIZE:
+#endif
+ case PMD_SIZE:
+ contig_ptes = 1;
+ break;
+ case CONT_PMD_SIZE:
+ *pgsize = PMD_SIZE;
+ contig_ptes = CONT_PMDS;
+ break;
+ case CONT_PTE_SIZE:
+ *pgsize = PAGE_SIZE;
+ contig_ptes = CONT_PTES;
+ break;
+ }
+
+ return contig_ptes;
+}
+
+/*
+ * Changing some bits of contiguous entries requires us to follow a
+ * Break-Before-Make approach, breaking the whole contiguous set
+ * before we can change any entries. See ARM DDI 0487A.k_iss10775,
+ * "Misprogramming of the Contiguous bit", page D4-1762.
+ *
+ * This helper performs the break step.
+ */
+static pte_t get_clear_flush(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep,
+ unsigned long pgsize,
+ unsigned long ncontig)
+{
+ struct vm_area_struct vma = { .vm_mm = mm };
+ pte_t orig_pte = huge_ptep_get(ptep);
+ bool valid = pte_valid(orig_pte);
+ unsigned long i, saddr = addr;
+
+ for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
+ pte_t pte = ptep_get_and_clear(mm, addr, ptep);
+
+ /*
+ * If HW_AFDBM is enabled, then the HW could turn on
+ * the dirty bit for any page in the set, so check
+ * them all. All hugetlb entries are already young.
+ */
+ if (pte_dirty(pte))
+ orig_pte = pte_mkdirty(orig_pte);
+ }
+
+ if (valid)
+ flush_tlb_range(&vma, saddr, addr);
+ return orig_pte;
+}
+
+/*
+ * Changing some bits of contiguous entries requires us to follow a
+ * Break-Before-Make approach, breaking the whole contiguous set
+ * before we can change any entries. See ARM DDI 0487A.k_iss10775,
+ * "Misprogramming of the Contiguous bit", page D4-1762.
+ *
+ * This helper performs the break step for use cases where the
+ * original pte is not needed.
+ */
+static void clear_flush(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep,
+ unsigned long pgsize,
+ unsigned long ncontig)
+{
+ struct vm_area_struct vma = { .vm_mm = mm };
+ unsigned long i, saddr = addr;
+
+ for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
+ pte_clear(mm, addr, ptep);
+
+ flush_tlb_range(&vma, saddr, addr);
+}
+
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
size_t pgsize;
int i;
int ncontig;
- unsigned long pfn;
+ unsigned long pfn, dpfn;
pgprot_t hugeprot;
+ /*
+ * Code needs to be expanded to handle huge swap and migration
+ * entries. Needed for HUGETLB and MEMORY_FAILURE.
+ */
+ WARN_ON(!pte_present(pte));
+
if (!pte_cont(pte)) {
set_pte_at(mm, addr, ptep, pte);
return;
@@ -74,17 +176,30 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
pfn = pte_pfn(pte);
- hugeprot = __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
- for (i = 0; i < ncontig; i++) {
+ dpfn = pgsize >> PAGE_SHIFT;
+ hugeprot = pte_pgprot(pte);
+
+ clear_flush(mm, addr, ptep, pgsize, ncontig);
+
+ for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) {
pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep,
pte_val(pfn_pte(pfn, hugeprot)));
set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
- ptep++;
- pfn += pgsize >> PAGE_SHIFT;
- addr += pgsize;
}
}
+void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz)
+{
+ int i, ncontig;
+ size_t pgsize;
+
+ ncontig = num_contig_ptes(sz, &pgsize);
+
+ for (i = 0; i < ncontig; i++, ptep++)
+ set_pte(ptep, pte);
+}
+
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
@@ -144,19 +259,28 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return NULL;
pud = pud_offset(pgd, addr);
- if (pud_none(*pud))
+ if (sz != PUD_SIZE && pud_none(*pud))
return NULL;
- /* swap or huge page */
- if (!pud_present(*pud) || pud_huge(*pud))
+ /* hugepage or swap? */
+ if (pud_huge(*pud) || !pud_present(*pud))
return (pte_t *)pud;
/* table; check the next level */
+ if (sz == CONT_PMD_SIZE)
+ addr &= CONT_PMD_MASK;
+
pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
+ if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
+ pmd_none(*pmd))
return NULL;
- if (!pmd_present(*pmd) || pmd_huge(*pmd))
+ if (pmd_huge(*pmd) || !pmd_present(*pmd))
return (pte_t *)pmd;
+ if (sz == CONT_PTE_SIZE) {
+ pte_t *pte = pte_offset_kernel(pmd, (addr & CONT_PTE_MASK));
+ return pte;
+ }
+
return NULL;
}
@@ -176,111 +300,133 @@ pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
return entry;
}
+void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz)
+{
+ int i, ncontig;
+ size_t pgsize;
+
+ ncontig = num_contig_ptes(sz, &pgsize);
+
+ for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
+ pte_clear(mm, addr, ptep);
+}
+
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- pte_t pte;
+ int ncontig;
+ size_t pgsize;
+ pte_t orig_pte = huge_ptep_get(ptep);
- if (pte_cont(*ptep)) {
- int ncontig, i;
- size_t pgsize;
- bool is_dirty = false;
-
- ncontig = find_num_contig(mm, addr, ptep, &pgsize);
- /* save the 1st pte to return */
- pte = ptep_get_and_clear(mm, addr, ptep);
- for (i = 1, addr += pgsize; i < ncontig; ++i, addr += pgsize) {
- /*
- * If HW_AFDBM is enabled, then the HW could
- * turn on the dirty bit for any of the page
- * in the set, so check them all.
- */
- ++ptep;
- if (pte_dirty(ptep_get_and_clear(mm, addr, ptep)))
- is_dirty = true;
- }
- if (is_dirty)
- return pte_mkdirty(pte);
- else
- return pte;
- } else {
+ if (!pte_cont(orig_pte))
return ptep_get_and_clear(mm, addr, ptep);
- }
+
+ ncontig = find_num_contig(mm, addr, ptep, &pgsize);
+
+ return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
}
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
- if (pte_cont(pte)) {
- int ncontig, i, changed = 0;
- size_t pgsize = 0;
- unsigned long pfn = pte_pfn(pte);
- /* Select all bits except the pfn */
- pgprot_t hugeprot =
- __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^
- pte_val(pte));
-
- pfn = pte_pfn(pte);
- ncontig = find_num_contig(vma->vm_mm, addr, ptep,
- &pgsize);
- for (i = 0; i < ncontig; ++i, ++ptep, addr += pgsize) {
- changed |= ptep_set_access_flags(vma, addr, ptep,
- pfn_pte(pfn,
- hugeprot),
- dirty);
- pfn += pgsize >> PAGE_SHIFT;
- }
- return changed;
- } else {
+ int ncontig, i, changed = 0;
+ size_t pgsize = 0;
+ unsigned long pfn = pte_pfn(pte), dpfn;
+ pgprot_t hugeprot;
+ pte_t orig_pte;
+
+ if (!pte_cont(pte))
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
- }
+
+ ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
+ dpfn = pgsize >> PAGE_SHIFT;
+
+ orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
+ if (!pte_same(orig_pte, pte))
+ changed = 1;
+
+ /* Make sure we don't lose the dirty state */
+ if (pte_dirty(orig_pte))
+ pte = pte_mkdirty(pte);
+
+ hugeprot = pte_pgprot(pte);
+ for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
+ set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
+
+ return changed;
}
void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- if (pte_cont(*ptep)) {
- int ncontig, i;
- size_t pgsize = 0;
-
- ncontig = find_num_contig(mm, addr, ptep, &pgsize);
- for (i = 0; i < ncontig; ++i, ++ptep, addr += pgsize)
- ptep_set_wrprotect(mm, addr, ptep);
- } else {
+ unsigned long pfn, dpfn;
+ pgprot_t hugeprot;
+ int ncontig, i;
+ size_t pgsize;
+ pte_t pte;
+
+ if (!pte_cont(*ptep)) {
ptep_set_wrprotect(mm, addr, ptep);
+ return;
}
+
+ ncontig = find_num_contig(mm, addr, ptep, &pgsize);
+ dpfn = pgsize >> PAGE_SHIFT;
+
+ pte = get_clear_flush(mm, addr, ptep, pgsize, ncontig);
+ pte = pte_wrprotect(pte);
+
+ hugeprot = pte_pgprot(pte);
+ pfn = pte_pfn(pte);
+
+ for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
+ set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
}
void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- if (pte_cont(*ptep)) {
- int ncontig, i;
- size_t pgsize = 0;
-
- ncontig = find_num_contig(vma->vm_mm, addr, ptep,
- &pgsize);
- for (i = 0; i < ncontig; ++i, ++ptep, addr += pgsize)
- ptep_clear_flush(vma, addr, ptep);
- } else {
+ size_t pgsize;
+ int ncontig;
+
+ if (!pte_cont(*ptep)) {
ptep_clear_flush(vma, addr, ptep);
+ return;
}
+
+ ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
+ clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
}
static __init int setup_hugepagesz(char *opt)
{
unsigned long ps = memparse(opt, &opt);
- if (ps == PMD_SIZE) {
- hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
- } else if (ps == PUD_SIZE) {
- hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
- } else {
- hugetlb_bad_size();
- pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
- return 0;
+ switch (ps) {
+#ifdef CONFIG_ARM64_4K_PAGES
+ case PUD_SIZE:
+#endif
+ case PMD_SIZE * CONT_PMDS:
+ case PMD_SIZE:
+ case PAGE_SIZE * CONT_PTES:
+ hugetlb_add_hstate(ilog2(ps) - PAGE_SHIFT);
+ return 1;
}
- return 1;
+
+ hugetlb_bad_size();
+ pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
+ return 0;
}
__setup("hugepagesz=", setup_hugepagesz);
+
+#ifdef CONFIG_ARM64_64K_PAGES
+static __init int add_default_hugepagesz(void)
+{
+ if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
+ hugetlb_add_hstate(CONT_PTE_SHIFT);
+ return 0;
+}
+arch_initcall(add_default_hugepagesz);
+#endif
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 23c2d89a362e..f1eb15e0e864 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -496,7 +496,7 @@ void mark_rodata_ro(void)
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
pgprot_t prot, struct vm_struct *vma,
- int flags)
+ int flags, unsigned long vm_flags)
{
phys_addr_t pa_start = __pa_symbol(va_start);
unsigned long size = va_end - va_start;
@@ -507,10 +507,13 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
early_pgtable_alloc, flags);
+ if (!(vm_flags & VM_NO_GUARD))
+ size += PAGE_SIZE;
+
vma->addr = va_start;
vma->phys_addr = pa_start;
vma->size = size;
- vma->flags = VM_MAP;
+ vma->flags = VM_MAP | vm_flags;
vma->caller = __builtin_return_address(0);
vm_area_add_early(vma);
@@ -541,14 +544,15 @@ static void __init map_kernel(pgd_t *pgd)
* Only rodata will be remapped with different permissions later on,
* all other segments are allowed to use contiguous mappings.
*/
- map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0);
+ map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0,
+ VM_NO_GUARD);
map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL,
- &vmlinux_rodata, NO_CONT_MAPPINGS);
+ &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot,
- &vmlinux_inittext, 0);
+ &vmlinux_inittext, 0, VM_NO_GUARD);
map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL,
- &vmlinux_initdata, 0);
- map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0);
+ &vmlinux_initdata, 0, VM_NO_GUARD);
+ map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
/*
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index b388a99fea7b..dad128ba98bf 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -208,8 +208,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
}
node_set(nid, numa_nodes_parsed);
- pr_info("Adding memblock [0x%llx - 0x%llx] on node %d\n",
- start, (end - 1), nid);
return ret;
}
@@ -223,10 +221,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
void *nd;
int tnid;
- if (start_pfn < end_pfn)
- pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
- start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
- else
+ if (start_pfn >= end_pfn)
pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index b02a9268dfbf..783de51a6c4e 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -44,8 +44,12 @@
#define A64_COND_NE AARCH64_INSN_COND_NE /* != */
#define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */
#define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */
+#define A64_COND_LS AARCH64_INSN_COND_LS /* unsigned <= */
+#define A64_COND_CC AARCH64_INSN_COND_CC /* unsigned < */
#define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */
#define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */
+#define A64_COND_LE AARCH64_INSN_COND_LE /* signed <= */
+#define A64_COND_LT AARCH64_INSN_COND_LT /* signed < */
#define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
/* Unconditional branch (immediate) */
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index f32144b2e07f..ba38d403abb2 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -527,10 +527,14 @@ emit_bswap_uxt:
/* IF (dst COND src) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
emit(A64_CMP(1, dst, src), ctx);
emit_cond_jmp:
jmp_offset = bpf2a64_offset(i + off, i, ctx);
@@ -542,9 +546,15 @@ emit_cond_jmp:
case BPF_JGT:
jmp_cond = A64_COND_HI;
break;
+ case BPF_JLT:
+ jmp_cond = A64_COND_CC;
+ break;
case BPF_JGE:
jmp_cond = A64_COND_CS;
break;
+ case BPF_JLE:
+ jmp_cond = A64_COND_LS;
+ break;
case BPF_JSET:
case BPF_JNE:
jmp_cond = A64_COND_NE;
@@ -552,9 +562,15 @@ emit_cond_jmp:
case BPF_JSGT:
jmp_cond = A64_COND_GT;
break;
+ case BPF_JSLT:
+ jmp_cond = A64_COND_LT;
+ break;
case BPF_JSGE:
jmp_cond = A64_COND_GE;
break;
+ case BPF_JSLE:
+ jmp_cond = A64_COND_LE;
+ break;
default:
return -EFAULT;
}
@@ -566,10 +582,14 @@ emit_cond_jmp:
/* IF (dst COND imm) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_K:
emit_a64_mov_i(1, tmp, imm, ctx);
emit(A64_CMP(1, dst, tmp), ctx);
goto emit_cond_jmp;
diff --git a/arch/blackfin/include/asm/bug.h b/arch/blackfin/include/asm/bug.h
index 8d9b1eba89c4..76b2e82ee730 100644
--- a/arch/blackfin/include/asm/bug.h
+++ b/arch/blackfin/include/asm/bug.h
@@ -21,7 +21,7 @@
#define _BUG_OR_WARN(flags) \
asm volatile( \
"1: .hword %0\n" \
- " .section __bug_table,\"a\",@progbits\n" \
+ " .section __bug_table,\"aw\",@progbits\n" \
"2: .long 1b\n" \
" .long %1\n" \
" .short %2\n" \
@@ -38,7 +38,7 @@
#define _BUG_OR_WARN(flags) \
asm volatile( \
"1: .hword %0\n" \
- " .section __bug_table,\"a\",@progbits\n" \
+ " .section __bug_table,\"aw\",@progbits\n" \
"2: .long 1b\n" \
" .short %1\n" \
" .org 2b + %2\n" \
diff --git a/arch/blackfin/include/asm/flat.h b/arch/blackfin/include/asm/flat.h
index 296d7f56fbfd..f1d6ba7afbf2 100644
--- a/arch/blackfin/include/asm/flat.h
+++ b/arch/blackfin/include/asm/flat.h
@@ -44,8 +44,7 @@ flat_get_relocate_addr (unsigned long relval)
return relval & 0x03ffffff; /* Mask out top 6 bits */
}
-static inline int flat_set_persistent(unsigned long relval,
- unsigned long *persistent)
+static inline int flat_set_persistent(u32 relval, u32 *persistent)
{
int type = (relval >> 26) & 7;
if (type == 3) {
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index c58f4a83ed6f..f6431439d15d 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -48,11 +48,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
__raw_spin_unlock_asm(&lock->lock);
}
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->lock, !VAL);
-}
-
static inline int arch_read_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) > 0;
diff --git a/arch/blackfin/kernel/flat.c b/arch/blackfin/kernel/flat.c
index d29ab6a2e909..8ebc54daaa8e 100644
--- a/arch/blackfin/kernel/flat.c
+++ b/arch/blackfin/kernel/flat.c
@@ -32,7 +32,7 @@ unsigned long bfin_get_addr_from_rp(u32 *ptr,
break;
case FLAT_BFIN_RELOC_TYPE_32_BIT:
- pr_debug("*ptr = %lx", get_unaligned(ptr));
+ pr_debug("*ptr = %x", get_unaligned(ptr));
val = get_unaligned(ptr);
break;
@@ -77,7 +77,7 @@ void bfin_put_addr_at_rp(u32 *ptr, u32 addr, u32 relval)
case FLAT_BFIN_RELOC_TYPE_32_BIT:
put_unaligned(addr, ptr);
- pr_debug("new ptr =%lx", get_unaligned(ptr));
+ pr_debug("new ptr =%x", get_unaligned(ptr));
break;
}
}
diff --git a/arch/blackfin/kernel/module.c b/arch/blackfin/kernel/module.c
index 0188c933b155..15af5768c403 100644
--- a/arch/blackfin/kernel/module.c
+++ b/arch/blackfin/kernel/module.c
@@ -4,8 +4,6 @@
* Licensed under the GPL-2 or later
*/
-#define pr_fmt(fmt) "module %s: " fmt, mod->name
-
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
@@ -16,6 +14,11 @@
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
+#define mod_err(mod, fmt, ...) \
+ pr_err("module %s: " fmt, (mod)->name, ##__VA_ARGS__)
+#define mod_debug(mod, fmt, ...) \
+ pr_debug("module %s: " fmt, (mod)->name, ##__VA_ARGS__)
+
/* Transfer the section to the L1 memory */
int
module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
@@ -44,7 +47,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l1_inst_sram_alloc(s->sh_size);
mod->arch.text_l1 = dest;
if (dest == NULL) {
- pr_err("L1 inst memory allocation failed\n");
+ mod_err(mod, "L1 inst memory allocation failed\n");
return -1;
}
dma_memcpy(dest, (void *)s->sh_addr, s->sh_size);
@@ -56,7 +59,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l1_data_sram_alloc(s->sh_size);
mod->arch.data_a_l1 = dest;
if (dest == NULL) {
- pr_err("L1 data memory allocation failed\n");
+ mod_err(mod, "L1 data memory allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
@@ -68,7 +71,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l1_data_sram_zalloc(s->sh_size);
mod->arch.bss_a_l1 = dest;
if (dest == NULL) {
- pr_err("L1 data memory allocation failed\n");
+ mod_err(mod, "L1 data memory allocation failed\n");
return -1;
}
@@ -77,7 +80,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l1_data_B_sram_alloc(s->sh_size);
mod->arch.data_b_l1 = dest;
if (dest == NULL) {
- pr_err("L1 data memory allocation failed\n");
+ mod_err(mod, "L1 data memory allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
@@ -87,7 +90,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l1_data_B_sram_alloc(s->sh_size);
mod->arch.bss_b_l1 = dest;
if (dest == NULL) {
- pr_err("L1 data memory allocation failed\n");
+ mod_err(mod, "L1 data memory allocation failed\n");
return -1;
}
memset(dest, 0, s->sh_size);
@@ -99,7 +102,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l2_sram_alloc(s->sh_size);
mod->arch.text_l2 = dest;
if (dest == NULL) {
- pr_err("L2 SRAM allocation failed\n");
+ mod_err(mod, "L2 SRAM allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
@@ -111,7 +114,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l2_sram_alloc(s->sh_size);
mod->arch.data_l2 = dest;
if (dest == NULL) {
- pr_err("L2 SRAM allocation failed\n");
+ mod_err(mod, "L2 SRAM allocation failed\n");
return -1;
}
memcpy(dest, (void *)s->sh_addr, s->sh_size);
@@ -123,7 +126,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
dest = l2_sram_zalloc(s->sh_size);
mod->arch.bss_l2 = dest;
if (dest == NULL) {
- pr_err("L2 SRAM allocation failed\n");
+ mod_err(mod, "L2 SRAM allocation failed\n");
return -1;
}
@@ -157,8 +160,8 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
Elf32_Sym *sym;
unsigned long location, value, size;
- pr_debug("applying relocate section %u to %u\n",
- relsec, sechdrs[relsec].sh_info);
+ mod_debug(mod, "applying relocate section %u to %u\n",
+ relsec, sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
@@ -174,14 +177,14 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
#ifdef CONFIG_SMP
if (location >= COREB_L1_DATA_A_START) {
- pr_err("cannot relocate in L1: %u (SMP kernel)\n",
+ mod_err(mod, "cannot relocate in L1: %u (SMP kernel)\n",
ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
#endif
- pr_debug("location is %lx, value is %lx type is %d\n",
- location, value, ELF32_R_TYPE(rel[i].r_info));
+ mod_debug(mod, "location is %lx, value is %lx type is %d\n",
+ location, value, ELF32_R_TYPE(rel[i].r_info));
switch (ELF32_R_TYPE(rel[i].r_info)) {
@@ -200,12 +203,12 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
case R_BFIN_PCREL12_JUMP:
case R_BFIN_PCREL12_JUMP_S:
case R_BFIN_PCREL10:
- pr_err("unsupported relocation: %u (no -mlong-calls?)\n",
+ mod_err(mod, "unsupported relocation: %u (no -mlong-calls?)\n",
ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
default:
- pr_err("unknown relocation: %u\n",
+ mod_err(mod, "unknown relocation: %u\n",
ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
@@ -222,7 +225,7 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
isram_memcpy((void *)location, &value, size);
break;
default:
- pr_err("invalid relocation for %#lx\n", location);
+ mod_err(mod, "invalid relocation for %#lx\n", location);
return -ENOEXEC;
}
}
diff --git a/arch/c6x/configs/dsk6455_defconfig b/arch/c6x/configs/dsk6455_defconfig
index 4663487c67a1..d764ea4cce7f 100644
--- a/arch/c6x/configs/dsk6455_defconfig
+++ b/arch/c6x/configs/dsk6455_defconfig
@@ -1,5 +1,4 @@
CONFIG_SOC_TMS320C6455=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_SPARSE_IRQ=y
@@ -25,7 +24,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=17000
-CONFIG_MISC_DEVICES=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
diff --git a/arch/c6x/configs/evmc6457_defconfig b/arch/c6x/configs/evmc6457_defconfig
index bba40e195ec4..05d0b4a25ab1 100644
--- a/arch/c6x/configs/evmc6457_defconfig
+++ b/arch/c6x/configs/evmc6457_defconfig
@@ -1,5 +1,4 @@
CONFIG_SOC_TMS320C6457=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_SPARSE_IRQ=y
@@ -26,7 +25,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=17000
-CONFIG_MISC_DEVICES=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
diff --git a/arch/c6x/configs/evmc6472_defconfig b/arch/c6x/configs/evmc6472_defconfig
index 8c46155f6d31..8d81fcf86b0e 100644
--- a/arch/c6x/configs/evmc6472_defconfig
+++ b/arch/c6x/configs/evmc6472_defconfig
@@ -1,5 +1,4 @@
CONFIG_SOC_TMS320C6472=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_SPARSE_IRQ=y
@@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=17000
-CONFIG_MISC_DEVICES=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
diff --git a/arch/c6x/configs/evmc6474_defconfig b/arch/c6x/configs/evmc6474_defconfig
index 15533f632313..8156a98f3958 100644
--- a/arch/c6x/configs/evmc6474_defconfig
+++ b/arch/c6x/configs/evmc6474_defconfig
@@ -1,5 +1,4 @@
CONFIG_SOC_TMS320C6474=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_SPARSE_IRQ=y
@@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=17000
-CONFIG_MISC_DEVICES=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
diff --git a/arch/c6x/configs/evmc6678_defconfig b/arch/c6x/configs/evmc6678_defconfig
index 5f126d4905b1..c4f433c25b69 100644
--- a/arch/c6x/configs/evmc6678_defconfig
+++ b/arch/c6x/configs/evmc6678_defconfig
@@ -1,5 +1,4 @@
CONFIG_SOC_TMS320C6678=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_SPARSE_IRQ=y
@@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=17000
-CONFIG_MISC_DEVICES=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c
index 43afc03e4125..9519fa5f97d0 100644
--- a/arch/c6x/platforms/megamod-pic.c
+++ b/arch/c6x/platforms/megamod-pic.c
@@ -208,14 +208,14 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
if (!pic) {
- pr_err("%s: Could not alloc PIC structure.\n", np->full_name);
+ pr_err("%pOF: Could not alloc PIC structure.\n", np);
return NULL;
}
pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
&megamod_domain_ops, pic);
if (!pic->irqhost) {
- pr_err("%s: Could not alloc host.\n", np->full_name);
+ pr_err("%pOF: Could not alloc host.\n", np);
goto error_free;
}
@@ -225,7 +225,7 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
pic->regs = of_iomap(np, 0);
if (!pic->regs) {
- pr_err("%s: Could not map registers.\n", np->full_name);
+ pr_err("%pOF: Could not map registers.\n", np);
goto error_free;
}
@@ -253,8 +253,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
irq_data = irq_get_irq_data(irq);
if (!irq_data) {
- pr_err("%s: combiner-%d no irq_data for virq %d!\n",
- np->full_name, i, irq);
+ pr_err("%pOF: combiner-%d no irq_data for virq %d!\n",
+ np, i, irq);
continue;
}
@@ -265,16 +265,16 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
* of the core priority interrupts (4 - 15).
*/
if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) {
- pr_err("%s: combiner-%d core irq %ld out of range!\n",
- np->full_name, i, hwirq);
+ pr_err("%pOF: combiner-%d core irq %ld out of range!\n",
+ np, i, hwirq);
continue;
}
/* record the mapping */
mapping[hwirq - 4] = i;
- pr_debug("%s: combiner-%d cascading to hwirq %ld\n",
- np->full_name, i, hwirq);
+ pr_debug("%pOF: combiner-%d cascading to hwirq %ld\n",
+ np, i, hwirq);
cascade_data[i].pic = pic;
cascade_data[i].index = i;
@@ -290,8 +290,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
/* Finally, set up the MUX registers */
for (i = 0; i < NR_MUX_OUTPUTS; i++) {
if (mapping[i] != IRQ_UNMAPPED) {
- pr_debug("%s: setting mux %d to priority %d\n",
- np->full_name, mapping[i], i + 4);
+ pr_debug("%pOF: setting mux %d to priority %d\n",
+ np, mapping[i], i + 4);
set_megamod_mux(pic, mapping[i], i);
}
}
diff --git a/arch/c6x/platforms/plldata.c b/arch/c6x/platforms/plldata.c
index 755359eb6286..e8b6cc6a7b5a 100644
--- a/arch/c6x/platforms/plldata.c
+++ b/arch/c6x/platforms/plldata.c
@@ -436,8 +436,8 @@ void __init c64x_setup_clocks(void)
err = of_property_read_u32(node, "clock-frequency", &val);
if (err || val == 0) {
- pr_err("%s: no clock-frequency found! Using %dMHz\n",
- node->full_name, (int)val / 1000000);
+ pr_err("%pOF: no clock-frequency found! Using %dMHz\n",
+ node, (int)val / 1000000);
val = 25000000;
}
clkin1.rate = val;
diff --git a/arch/c6x/platforms/timer64.c b/arch/c6x/platforms/timer64.c
index 0bd0452ded80..241a9a607193 100644
--- a/arch/c6x/platforms/timer64.c
+++ b/arch/c6x/platforms/timer64.c
@@ -204,14 +204,14 @@ void __init timer64_init(void)
timer = of_iomap(np, 0);
if (!timer) {
- pr_debug("%s: Cannot map timer registers.\n", np->full_name);
+ pr_debug("%pOF: Cannot map timer registers.\n", np);
goto out;
}
- pr_debug("%s: Timer registers=%p.\n", np->full_name, timer);
+ pr_debug("%pOF: Timer registers=%p.\n", np, timer);
cd->irq = irq_of_parse_and_map(np, 0);
if (cd->irq == NO_IRQ) {
- pr_debug("%s: Cannot find interrupt.\n", np->full_name);
+ pr_debug("%pOF: Cannot find interrupt.\n", np);
iounmap(timer);
goto out;
}
@@ -229,7 +229,7 @@ void __init timer64_init(void)
dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED);
}
- pr_debug("%s: Timer irq=%d.\n", np->full_name, cd->irq);
+ pr_debug("%pOF: Timer irq=%d.\n", np, cd->irq);
clockevents_calc_mult_shift(cd, c6x_core_freq / TIMER_DIVISOR, 5);
diff --git a/arch/cris/arch-v32/mach-a3/arbiter.c b/arch/cris/arch-v32/mach-a3/arbiter.c
index ab5c421a4de8..735a9b0abdb8 100644
--- a/arch/cris/arch-v32/mach-a3/arbiter.c
+++ b/arch/cris/arch-v32/mach-a3/arbiter.c
@@ -227,7 +227,7 @@ static void crisv32_arbiter_config(int arbiter, int region, int unused_slots)
}
}
-extern char _stext, _etext;
+extern char _stext[], _etext[];
static void crisv32_arbiter_init(void)
{
@@ -265,7 +265,7 @@ static void crisv32_arbiter_init(void)
#ifndef CONFIG_ETRAX_KGDB
/* Global watch for writes to kernel text segment. */
- crisv32_arbiter_watch(virt_to_phys(&_stext), &_etext - &_stext,
+ crisv32_arbiter_watch(virt_to_phys(_stext), _etext - _stext,
MARB_CLIENTS(arbiter_all_clients, arbiter_bar_all_clients),
arbiter_all_write, NULL);
#endif
diff --git a/arch/cris/arch-v32/mach-fs/arbiter.c b/arch/cris/arch-v32/mach-fs/arbiter.c
index c97f4d8120f9..047c70bdbb23 100644
--- a/arch/cris/arch-v32/mach-fs/arbiter.c
+++ b/arch/cris/arch-v32/mach-fs/arbiter.c
@@ -158,7 +158,7 @@ static void crisv32_arbiter_config(int region, int unused_slots)
}
}
-extern char _stext, _etext;
+extern char _stext[], _etext[];
static void crisv32_arbiter_init(void)
{
@@ -190,7 +190,7 @@ static void crisv32_arbiter_init(void)
#ifndef CONFIG_ETRAX_KGDB
/* Global watch for writes to kernel text segment. */
- crisv32_arbiter_watch(virt_to_phys(&_stext), &_etext - &_stext,
+ crisv32_arbiter_watch(virt_to_phys(_stext), _etext - _stext,
arbiter_all_clients, arbiter_all_write, NULL);
#endif
}
diff --git a/arch/cris/kernel/traps.c b/arch/cris/kernel/traps.c
index a01636a12a6e..d98131c45bb5 100644
--- a/arch/cris/kernel/traps.c
+++ b/arch/cris/kernel/traps.c
@@ -42,7 +42,7 @@ void (*nmi_handler)(struct pt_regs *);
void show_trace(unsigned long *stack)
{
unsigned long addr, module_start, module_end;
- extern char _stext, _etext;
+ extern char _stext[], _etext[];
int i;
pr_err("\nCall Trace: ");
@@ -69,8 +69,8 @@ void show_trace(unsigned long *stack)
* down the cause of the crash will be able to figure
* out the call path that was taken.
*/
- if (((addr >= (unsigned long)&_stext) &&
- (addr <= (unsigned long)&_etext)) ||
+ if (((addr >= (unsigned long)_stext) &&
+ (addr <= (unsigned long)_etext)) ||
((addr >= module_start) && (addr <= module_end))) {
#ifdef CONFIG_KALLSYMS
print_ip_sym(addr);
diff --git a/arch/frv/include/asm/futex.h b/arch/frv/include/asm/futex.h
index 2e1da71e27a4..ab346f5f8820 100644
--- a/arch/frv/include/asm/futex.h
+++ b/arch/frv/include/asm/futex.h
@@ -7,7 +7,8 @@
#include <asm/errno.h>
#include <linux/uaccess.h>
-extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr);
+extern int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr);
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h
index f1e3b20dce9f..9abf02d6855a 100644
--- a/arch/frv/include/uapi/asm/socket.h
+++ b/arch/frv/include/uapi/asm/socket.h
@@ -102,5 +102,7 @@
#define SO_PEERGROUPS 59
+#define SO_ZEROCOPY 60
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c
index d155ca9e5098..37f7b2bf7f73 100644
--- a/arch/frv/kernel/futex.c
+++ b/arch/frv/kernel/futex.c
@@ -186,20 +186,10 @@ static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_o
/*
* do the futex operations
*/
-int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
@@ -225,18 +215,9 @@ int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS; break;
- }
- }
+ if (!ret)
+ *oval = oldval;
return ret;
-} /* end futex_atomic_op_inuser() */
+} /* end arch_futex_atomic_op_inuser() */
diff --git a/arch/h8300/include/asm/flat.h b/arch/h8300/include/asm/flat.h
index 18d024251738..7e0bd6fa1532 100644
--- a/arch/h8300/include/asm/flat.h
+++ b/arch/h8300/include/asm/flat.h
@@ -24,7 +24,7 @@ static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
u32 *addr, u32 *persistent)
{
u32 val = get_unaligned((__force u32 *)rp);
- if (!(flags & FLAT_FLAG_GOTPIC)
+ if (!(flags & FLAT_FLAG_GOTPIC))
val &= 0x00ffffff;
*addr = val;
return 0;
diff --git a/arch/h8300/include/asm/traps.h b/arch/h8300/include/asm/traps.h
index 15e701130b27..1c5a30ec2df8 100644
--- a/arch/h8300/include/asm/traps.h
+++ b/arch/h8300/include/asm/traps.h
@@ -33,9 +33,9 @@ extern unsigned long *_interrupt_redirect_table;
#define TRAP2_VEC 10
#define TRAP3_VEC 11
-extern char _start, _etext;
+extern char _start[], _etext[];
#define check_kernel_text(addr) \
- ((addr >= (unsigned long)(&_start)) && \
- (addr < (unsigned long)(&_etext)) && !(addr & 1))
+ ((addr >= (unsigned long)(_start)) && \
+ (addr < (unsigned long)(_etext)) && !(addr & 1))
#endif /* _H8300_TRAPS_H */
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index a62ba368b27d..fb3dfb2a667e 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -42,6 +42,8 @@ static inline void atomic_set(atomic_t *v, int new)
);
}
+#define atomic_set_release(v, i) atomic_set((v), (i))
+
/**
* atomic_read - reads a word, atomically
* @v: pointer to atomic value
diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
index 7e597f8434da..c607b77c8215 100644
--- a/arch/hexagon/include/asm/futex.h
+++ b/arch/hexagon/include/asm/futex.h
@@ -31,18 +31,9 @@
static inline int
-futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
pagefault_disable();
@@ -72,30 +63,9 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h
index a1c55788c5d6..53a8d5885887 100644
--- a/arch/hexagon/include/asm/spinlock.h
+++ b/arch/hexagon/include/asm/spinlock.h
@@ -179,11 +179,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
*/
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->lock, !VAL);
-}
-
#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index a3d0211970e9..c86a947f5368 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -112,8 +112,6 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
}
-#define acpi_unlazy_tlb(x)
-
#ifdef CONFIG_ACPI_NUMA
extern cpumask_t early_cpu_possible_map;
#define for_each_possible_early_cpu(cpu) \
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index 76acbcd5c060..6d67dc1eaf2b 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -45,18 +45,9 @@ do { \
} while (0)
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -84,17 +75,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index ca9e76149a4a..df2c121164b8 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -76,22 +76,6 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
}
-static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
-{
- int *p = (int *)&lock->lock, ticket;
-
- ia64_invala();
-
- for (;;) {
- asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
- if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
- return;
- cpu_relax();
- }
-
- smp_acquire__after_ctrl_dep();
-}
-
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
@@ -143,11 +127,6 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
arch_spin_lock(lock);
}
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- __ticket_spin_unlock_wait(lock);
-}
-
#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index fced197b9626..cbe5ac3699bf 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -168,7 +168,8 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->max = ARRAY_SIZE(tlb->local);
@@ -185,8 +186,11 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
* collected.
*/
static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force)
{
+ if (force)
+ tlb->need_flush = 1;
/*
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
* tlb->end_addr.
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 5dd5c5d0d642..002eb85a6941 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -111,4 +111,6 @@
#define SO_PEERGROUPS 59
+#define SO_ZEROCOPY 60
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 7508c306aa9e..1d29b2f8726b 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -159,12 +159,12 @@ int acpi_request_vector(u32 int_type)
return vector;
}
-char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
+void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
{
- return __va(phys_addr);
+ return __va(phys);
}
-void __init __acpi_unmap_table(char *map, unsigned long size)
+void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
{
}
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 121295637d0d..81416000c5e0 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -757,14 +757,14 @@ efi_memmap_intersects (unsigned long phys_addr, unsigned long size)
return 0;
}
-u32
+int
efi_mem_type (unsigned long phys_addr)
{
efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
if (md)
return md->type;
- return 0;
+ return -EINVAL;
}
u64
diff --git a/arch/m32r/include/asm/flat.h b/arch/m32r/include/asm/flat.h
index 455ce7ddbf14..dfcb0e4eb256 100644
--- a/arch/m32r/include/asm/flat.h
+++ b/arch/m32r/include/asm/flat.h
@@ -95,7 +95,7 @@ static inline unsigned long m32r_flat_get_addr_from_rp (u32 *rp,
return ~0; /* bogus value */
}
-static inline void flat_put_addr_at_rp(u32 *rp, u32 addr, u32 relval)
+static inline int flat_put_addr_at_rp(u32 *rp, u32 addr, u32 relval)
{
unsigned int reloc = flat_m32r_get_reloc_type (relval);
if (reloc & 0xf0) {
@@ -133,6 +133,7 @@ static inline void flat_put_addr_at_rp(u32 *rp, u32 addr, u32 relval)
break;
}
}
+ return 0;
}
// kludge - text_len is a local variable in the only user.
diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h
index 323c7fc953cd..a56825592b90 100644
--- a/arch/m32r/include/asm/spinlock.h
+++ b/arch/m32r/include/asm/spinlock.h
@@ -30,11 +30,6 @@
#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->slock, VAL > 0);
-}
-
/**
* arch_spin_trylock - Try spin lock and return a result
* @lock: Pointer to the lock variable
diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index f8f7b47e247f..e268e51a38d1 100644
--- a/arch/m32r/include/uapi/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
@@ -102,4 +102,6 @@
#define SO_PEERGROUPS 59
+#define SO_ZEROCOPY 60
+
#endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index ddff1164aff0..54191f6fc715 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -49,6 +49,7 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
+CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_INET=y
@@ -465,8 +466,10 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_ITE is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
+# CONFIG_RTC_NVMEM is not set
CONFIG_RTC_DRV_MSM6242=m
CONFIG_RTC_DRV_RP5C01=m
# CONFIG_IOMMU_SUPPORT is not set
@@ -477,7 +480,6 @@ CONFIG_SERIAL_CONSOLE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
CONFIG_FS_ENCRYPTION=m
@@ -587,12 +589,13 @@ CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
-CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 17384dc959a5..fb4663904428 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -47,6 +47,7 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
+CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_INET=y
@@ -427,8 +428,10 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_ITE is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
+# CONFIG_RTC_NVMEM is not set
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_HEARTBEAT=y
@@ -436,7 +439,6 @@ CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
CONFIG_FS_ENCRYPTION=m
@@ -546,12 +548,13 @@ CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
-CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 53a641d62f85..4ab393e86e52 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -47,6 +47,7 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
+CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_INET=y
@@ -442,7 +443,10 @@ CONFIG_DMASOUND_ATARI=m
CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
+# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_ITE is not set
CONFIG_RTC_CLASS=y
+# CONFIG_RTC_NVMEM is not set
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_HEARTBEAT=y
@@ -457,7 +461,6 @@ CONFIG_ATARI_DSP56K=m
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
CONFIG_FS_ENCRYPTION=m
@@ -567,12 +570,13 @@ CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
-CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 3925ae3a5eb3..1dd8d697545b 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -45,6 +45,7 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
+CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_INET=y
@@ -420,15 +421,16 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_ITE is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
+# CONFIG_RTC_NVMEM is not set
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
CONFIG_FS_ENCRYPTION=m
@@ -538,12 +540,13 @@ CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
-CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index f4a134b390b4..02b39f50076e 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -47,6 +47,7 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
+CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_INET=y
@@ -430,15 +431,16 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_ITE is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
+# CONFIG_RTC_NVMEM is not set
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
CONFIG_FS_ENCRYPTION=m
@@ -548,12 +550,13 @@ CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
-CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 9ed0cef632b7..044dcb2bf8fb 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -46,6 +46,7 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
+CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_INET=y
@@ -452,15 +453,16 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_ITE is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
+# CONFIG_RTC_NVMEM is not set
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
CONFIG_FS_ENCRYPTION=m
@@ -570,12 +572,13 @@ CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
-CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index efed0d48fd53..3ad04682077a 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -56,6 +56,7 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
+CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_INET=y
@@ -520,8 +521,10 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_ITE is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
+# CONFIG_RTC_NVMEM is not set
CONFIG_RTC_DRV_MSM6242=m
CONFIG_RTC_DRV_RP5C01=m
CONFIG_RTC_DRV_GENERIC=m
@@ -540,7 +543,6 @@ CONFIG_SERIAL_CONSOLE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
CONFIG_FS_ENCRYPTION=m
@@ -650,12 +652,13 @@ CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
-CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 9040457c7f9c..dc2dd61948cd 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -44,6 +44,7 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
+CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_INET=y
@@ -420,15 +421,16 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_ITE is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
+# CONFIG_RTC_NVMEM is not set
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
CONFIG_FS_ENCRYPTION=m
@@ -538,12 +540,13 @@ CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
-CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 8b17f00e0484..54e7b523fc3d 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -45,6 +45,7 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
+CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_INET=y
@@ -420,15 +421,16 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_ITE is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
+# CONFIG_RTC_NVMEM is not set
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
CONFIG_FS_ENCRYPTION=m
@@ -538,12 +540,13 @@ CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
-CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 5f3718c62c85..d63d8a15f6db 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -45,6 +45,7 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
+CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_INET=y
@@ -442,8 +443,10 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_ITE is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
+# CONFIG_RTC_NVMEM is not set
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_HEARTBEAT=y
@@ -451,7 +454,6 @@ CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
CONFIG_FS_ENCRYPTION=m
@@ -561,12 +563,13 @@ CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
-CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 8c979a68fca5..d0924c22f52a 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -42,6 +42,7 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
+CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_INET=y
@@ -422,15 +423,16 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_ITE is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
+# CONFIG_RTC_NVMEM is not set
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
CONFIG_FS_ENCRYPTION=m
@@ -540,12 +542,13 @@ CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
-CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index a1e79530e806..3001ee1e5dc5 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -42,6 +42,7 @@ CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
+CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_INET=y
@@ -422,15 +423,16 @@ CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
# CONFIG_HID_GENERIC is not set
+# CONFIG_HID_ITE is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
+# CONFIG_RTC_NVMEM is not set
CONFIG_RTC_DRV_GENERIC=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
-CONFIG_XFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
CONFIG_FS_ENCRYPTION=m
@@ -540,12 +542,13 @@ CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
-CONFIG_TEST_LKM=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
diff --git a/arch/m68k/include/asm/asm-prototypes.h b/arch/m68k/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..22ccb9c97576
--- /dev/null
+++ b/arch/m68k/include/asm/asm-prototypes.h
@@ -0,0 +1,5 @@
+extern int __divsi3(int, int);
+extern int __modsi3(int, int);
+extern int __mulsi3(int, int);
+extern unsigned int __udivsi3(unsigned int, unsigned int);
+extern unsigned int __umodsi3(unsigned int, unsigned int);
diff --git a/arch/m68k/include/asm/flat.h b/arch/m68k/include/asm/flat.h
index 48b62790fe70..b2a41f5b3890 100644
--- a/arch/m68k/include/asm/flat.h
+++ b/arch/m68k/include/asm/flat.h
@@ -30,8 +30,7 @@ static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
}
#define flat_get_relocate_addr(rel) (rel)
-static inline int flat_set_persistent(unsigned long relval,
- unsigned long *persistent)
+static inline int flat_set_persistent(u32 relval, u32 *persistent)
{
return 0;
}
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 8aa8792e3174..d96348a52362 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -357,6 +357,17 @@ static void cuda_shutdown(void)
struct adb_request req;
if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN) < 0)
return;
+
+ /* Avoid infinite polling loop when PSU is not under Cuda control */
+ switch (macintosh_config->ident) {
+ case MAC_MODEL_C660:
+ case MAC_MODEL_Q605:
+ case MAC_MODEL_Q605_ACC:
+ case MAC_MODEL_P475:
+ case MAC_MODEL_P475F:
+ return;
+ }
+
while (!req.complete)
cuda_poll();
}
@@ -463,8 +474,9 @@ void mac_poweroff(void)
pmu_shutdown();
#endif
}
- local_irq_enable();
+
pr_crit("It is now safe to turn off your Macintosh.\n");
+ local_irq_disable();
while(1);
}
@@ -554,8 +566,8 @@ void mac_reset(void)
}
/* should never get here */
- local_irq_enable();
pr_crit("Restart failed. Please restart manually.\n");
+ local_irq_disable();
while(1);
}
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
index 5b7a45d99cfb..7d8b322e5101 100644
--- a/arch/metag/Kconfig
+++ b/arch/metag/Kconfig
@@ -26,6 +26,7 @@ config METAG
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNDERSCORE_SYMBOL_PREFIX
select IRQ_DOMAIN
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK
select MODULES_USE_ELF_RELA
select OF
select OF_EARLY_FLATTREE
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h
index 6c1380a8a0d4..eee779f26cc4 100644
--- a/arch/metag/include/asm/atomic_lock1.h
+++ b/arch/metag/include/asm/atomic_lock1.h
@@ -37,6 +37,8 @@ static inline int atomic_set(atomic_t *v, int i)
return i;
}
+#define atomic_set_release(v, i) atomic_set((v), (i))
+
#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
diff --git a/arch/metag/include/asm/spinlock.h b/arch/metag/include/asm/spinlock.h
index c0c7a22be1ae..ddf7fe5708a6 100644
--- a/arch/metag/include/asm/spinlock.h
+++ b/arch/metag/include/asm/spinlock.h
@@ -15,11 +15,6 @@
* locked.
*/
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->lock, !VAL);
-}
-
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
diff --git a/arch/metag/include/asm/topology.h b/arch/metag/include/asm/topology.h
index e95f874ded1b..707c7f7b6bea 100644
--- a/arch/metag/include/asm/topology.h
+++ b/arch/metag/include/asm/topology.h
@@ -4,7 +4,6 @@
#ifdef CONFIG_NUMA
#define cpu_to_node(cpu) ((void)(cpu), 0)
-#define parent_node(node) ((void)(node), 0)
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
diff --git a/arch/microblaze/include/asm/flat.h b/arch/microblaze/include/asm/flat.h
index f23c3d266bae..3d2747d4c967 100644
--- a/arch/microblaze/include/asm/flat.h
+++ b/arch/microblaze/include/asm/flat.h
@@ -60,7 +60,7 @@ static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
* unaligned.
*/
-static inline void
+static inline int
flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 relval)
{
u32 *p = (__force u32 *)rp;
diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
index 01848f056f43..a9dad9e5e132 100644
--- a/arch/microblaze/include/asm/futex.h
+++ b/arch/microblaze/include/asm/futex.h
@@ -29,18 +29,9 @@
})
static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -66,30 +57,9 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 8dd20358464f..48d91d5be4e9 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2260,7 +2260,7 @@ config CPU_R4K_CACHE_TLB
config MIPS_MT_SMP
bool "MIPS MT SMP support (1 TC on each available VPE)"
- depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6
+ depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select SYNC_R4K
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 04343625b929..bc2708c9ada4 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -243,8 +243,21 @@ include arch/mips/Kbuild.platforms
ifdef CONFIG_PHYSICAL_START
load-y = $(CONFIG_PHYSICAL_START)
endif
-entry-y = 0x$(shell $(NM) vmlinux 2>/dev/null \
+
+entry-noisa-y = 0x$(shell $(NM) vmlinux 2>/dev/null \
| grep "\bkernel_entry\b" | cut -f1 -d \ )
+ifdef CONFIG_CPU_MICROMIPS
+ #
+ # Set the ISA bit, since the kernel_entry symbol in the ELF will have it
+ # clear which would lead to images containing addresses which bootloaders may
+ # jump to as MIPS32 code.
+ #
+ entry-y = $(patsubst %0,%1,$(patsubst %2,%3,$(patsubst %4,%5, \
+ $(patsubst %6,%7,$(patsubst %8,%9,$(patsubst %a,%b, \
+ $(patsubst %c,%d,$(patsubst %e,%f,$(entry-noisa-y)))))))))
+else
+ entry-y = $(entry-noisa-y)
+endif
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
drivers-$(CONFIG_PCI) += arch/mips/pci/
diff --git a/arch/mips/boot/compressed/.gitignore b/arch/mips/boot/compressed/.gitignore
new file mode 100644
index 000000000000..ebae133f1d00
--- /dev/null
+++ b/arch/mips/boot/compressed/.gitignore
@@ -0,0 +1,2 @@
+ashldi3.c
+bswapsi.c
diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c
index 542be1cd0f32..bfdfaf32d2c4 100644
--- a/arch/mips/cavium-octeon/octeon-usb.c
+++ b/arch/mips/cavium-octeon/octeon-usb.c
@@ -13,9 +13,9 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/of_platform.h>
+#include <linux/io.h>
#include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-gpio-defs.h>
/* USB Control Register */
union cvm_usbdrd_uctl_ctl {
diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig
index 7d32fbbca962..3598d58aac30 100644
--- a/arch/mips/configs/pistachio_defconfig
+++ b/arch/mips/configs/pistachio_defconfig
@@ -207,7 +207,7 @@ CONFIG_IMGPDC_WDT=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_GPIO=y
CONFIG_MEDIA_SUPPORT=y
-CONFIG_MEDIA_RC_SUPPORT=y
+CONFIG_RC_CORE=y
# CONFIG_RC_DECODERS is not set
CONFIG_RC_DEVICES=y
CONFIG_IR_IMG=y
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 1910223a9c02..cea2bb1621e6 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -147,23 +147,12 @@
* Find irq with highest priority
*/
# open coded PTR_LA t1, cpu_mask_nr_tbl
-#if (_MIPS_SZPTR == 32)
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
# open coded la t1, cpu_mask_nr_tbl
lui t1, %hi(cpu_mask_nr_tbl)
addiu t1, %lo(cpu_mask_nr_tbl)
-
-#endif
-#if (_MIPS_SZPTR == 64)
- # open coded dla t1, cpu_mask_nr_tbl
- .set push
- .set noat
- lui t1, %highest(cpu_mask_nr_tbl)
- lui AT, %hi(cpu_mask_nr_tbl)
- daddiu t1, t1, %higher(cpu_mask_nr_tbl)
- daddiu AT, AT, %lo(cpu_mask_nr_tbl)
- dsll t1, 32
- daddu t1, t1, AT
- .set pop
+#else
+#error GCC `-msym32' option required for 64-bit DECstation builds
#endif
1: lw t2,(t1)
nop
@@ -214,23 +203,12 @@
* Find irq with highest priority
*/
# open coded PTR_LA t1,asic_mask_nr_tbl
-#if (_MIPS_SZPTR == 32)
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
# open coded la t1, asic_mask_nr_tbl
lui t1, %hi(asic_mask_nr_tbl)
addiu t1, %lo(asic_mask_nr_tbl)
-
-#endif
-#if (_MIPS_SZPTR == 64)
- # open coded dla t1, asic_mask_nr_tbl
- .set push
- .set noat
- lui t1, %highest(asic_mask_nr_tbl)
- lui AT, %hi(asic_mask_nr_tbl)
- daddiu t1, t1, %higher(asic_mask_nr_tbl)
- daddiu AT, AT, %lo(asic_mask_nr_tbl)
- dsll t1, 32
- daddu t1, t1, AT
- .set pop
+#else
+#error GCC `-msym32' option required for 64-bit DECstation builds
#endif
2: lw t2,(t1)
nop
diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
index fc67947ed658..8b14c2706aa5 100644
--- a/arch/mips/include/asm/cache.h
+++ b/arch/mips/include/asm/cache.h
@@ -9,6 +9,8 @@
#ifndef _ASM_CACHE_H
#define _ASM_CACHE_H
+#include <kmalloc.h>
+
#define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 8baa9033b181..721b698bfe3c 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -428,6 +428,9 @@
#ifndef cpu_scache_line_size
#define cpu_scache_line_size() cpu_data[0].scache.linesz
#endif
+#ifndef cpu_tcache_line_size
+#define cpu_tcache_line_size() cpu_data[0].tcache.linesz
+#endif
#ifndef cpu_hwrena_impl_bits
#define cpu_hwrena_impl_bits 0
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 1de190bdfb9c..a9e61ea54ca9 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -83,18 +83,9 @@
}
static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -125,17 +116,9 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 2998479fd4e8..a9af1d2dcd69 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -938,11 +938,6 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
-static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
- unsigned long address)
-{
-}
-
/* Emulation */
int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
diff --git a/arch/mips/include/asm/mach-ralink/ralink_regs.h b/arch/mips/include/asm/mach-ralink/ralink_regs.h
index 9df1a53bcb36..b4e7dfa214eb 100644
--- a/arch/mips/include/asm/mach-ralink/ralink_regs.h
+++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h
@@ -13,6 +13,8 @@
#ifndef _RALINK_REGS_H_
#define _RALINK_REGS_H_
+#include <linux/io.h>
+
enum ralink_soc_type {
RALINK_UNKNOWN = 0,
RT2880_SOC,
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
index d045973ddb33..3ea84acf1814 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
@@ -33,6 +33,10 @@
#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
#define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull))
#define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull))
+#define CVMX_L2C_ERR_TDTX(block_id) \
+ (CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_ERR_TTGX(block_id) \
+ (CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull)
#define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull))
#define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull))
#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
@@ -66,9 +70,40 @@
((offset) & 1) * 8)
#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + \
((offset) & 31) * 8)
-#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
+union cvmx_l2c_err_tdtx {
+ uint64_t u64;
+ struct cvmx_l2c_err_tdtx_s {
+ __BITFIELD_FIELD(uint64_t dbe:1,
+ __BITFIELD_FIELD(uint64_t sbe:1,
+ __BITFIELD_FIELD(uint64_t vdbe:1,
+ __BITFIELD_FIELD(uint64_t vsbe:1,
+ __BITFIELD_FIELD(uint64_t syn:10,
+ __BITFIELD_FIELD(uint64_t reserved_22_49:28,
+ __BITFIELD_FIELD(uint64_t wayidx:18,
+ __BITFIELD_FIELD(uint64_t reserved_2_3:2,
+ __BITFIELD_FIELD(uint64_t type:2,
+ ;)))))))))
+ } s;
+};
+
+union cvmx_l2c_err_ttgx {
+ uint64_t u64;
+ struct cvmx_l2c_err_ttgx_s {
+ __BITFIELD_FIELD(uint64_t dbe:1,
+ __BITFIELD_FIELD(uint64_t sbe:1,
+ __BITFIELD_FIELD(uint64_t noway:1,
+ __BITFIELD_FIELD(uint64_t reserved_56_60:5,
+ __BITFIELD_FIELD(uint64_t syn:6,
+ __BITFIELD_FIELD(uint64_t reserved_22_49:28,
+ __BITFIELD_FIELD(uint64_t wayidx:15,
+ __BITFIELD_FIELD(uint64_t reserved_2_6:5,
+ __BITFIELD_FIELD(uint64_t type:2,
+ ;)))))))))
+ } s;
+};
+
union cvmx_l2c_cfg {
uint64_t u64;
struct cvmx_l2c_cfg_s {
diff --git a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
new file mode 100644
index 000000000000..a951ad5d65ad
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
@@ -0,0 +1,60 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_L2D_DEFS_H__
+#define __CVMX_L2D_DEFS_H__
+
+#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))
+#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
+
+
+union cvmx_l2d_err {
+ uint64_t u64;
+ struct cvmx_l2d_err_s {
+ __BITFIELD_FIELD(uint64_t reserved_6_63:58,
+ __BITFIELD_FIELD(uint64_t bmhclsel:1,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;)))))))
+ } s;
+};
+
+union cvmx_l2d_fus3 {
+ uint64_t u64;
+ struct cvmx_l2d_fus3_s {
+ __BITFIELD_FIELD(uint64_t reserved_40_63:24,
+ __BITFIELD_FIELD(uint64_t ema_ctl:3,
+ __BITFIELD_FIELD(uint64_t reserved_34_36:3,
+ __BITFIELD_FIELD(uint64_t q3fus:34,
+ ;))))
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index 9742202f2a32..e638735cc3ac 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -62,6 +62,7 @@ enum cvmx_mips_space {
#include <asm/octeon/cvmx-iob-defs.h>
#include <asm/octeon/cvmx-ipd-defs.h>
#include <asm/octeon/cvmx-l2c-defs.h>
+#include <asm/octeon/cvmx-l2d-defs.h>
#include <asm/octeon/cvmx-l2t-defs.h>
#include <asm/octeon/cvmx-led-defs.h>
#include <asm/octeon/cvmx-mio-defs.h>
diff --git a/arch/mips/include/uapi/asm/ioctls.h b/arch/mips/include/uapi/asm/ioctls.h
index 68e19b689a00..1609cb0907ac 100644
--- a/arch/mips/include/uapi/asm/ioctls.h
+++ b/arch/mips/include/uapi/asm/ioctls.h
@@ -91,7 +91,7 @@
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
-#define TIOCGPTPEER _IOR('T', 0x41, int) /* Safely open the slave */
+#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
/* I hope the range from 0x5480 on is free ... */
#define TIOCSCTTY 0x5480 /* become controlling tty */
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index 655e2fb5395b..da3216007fe0 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -91,20 +91,12 @@
overrides the coredump filter bits */
#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
+#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+
/* compatibility flags */
#define MAP_FILE 0
-/*
- * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
- * This gives us 6 bits, which is enough until someone invents 128 bit address
- * spaces.
- *
- * Assume these are all power of twos.
- * When 0 use the default page size.
- */
-#define MAP_HUGE_SHIFT 26
-#define MAP_HUGE_MASK 0x3f
-
#define PKEY_DISABLE_ACCESS 0x1
#define PKEY_DISABLE_WRITE 0x2
#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 882823bec153..6c755bc07975 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -120,4 +120,6 @@
#define SO_PEERGROUPS 59
+#define SO_ZEROCOPY 60
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 6dd13641a418..1395654cfc8d 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -872,15 +872,13 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
if (unlikely(test_thread_flag(TIF_SECCOMP))) {
int ret, i;
struct seccomp_data sd;
+ unsigned long args[6];
sd.nr = syscall;
sd.arch = syscall_get_arch();
- for (i = 0; i < 6; i++) {
- unsigned long v, r;
-
- r = mips_get_syscall_arg(&v, current, regs, i);
- sd.args[i] = r ? 0 : v;
- }
+ syscall_get_arguments(current, regs, 0, 6, args);
+ for (i = 0; i < 6; i++)
+ sd.args[i] = args[i];
sd.instruction_pointer = KSTK_EIP(current);
ret = __secure_computing(&sd);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 27c2f90eeb21..a9a7d78803cd 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -190,12 +190,6 @@ illegal_syscall:
sll t1, t0, 2
beqz v0, einval
lw t2, sys_call_table(t1) # syscall routine
- sw a0, PT_R2(sp) # call routine directly on restart
-
- /* Some syscalls like execve get their arguments from struct pt_regs
- and claim zero arguments in the syscall table. Thus we have to
- assume the worst case and shuffle around all potential arguments.
- If you want performance, don't use indirect syscalls. */
move a0, a1 # shift argument registers
move a1, a2
@@ -207,11 +201,6 @@ illegal_syscall:
sw t4, 16(sp)
sw t5, 20(sp)
sw t6, 24(sp)
- sw a0, PT_R4(sp) # .. and push back a0 - a3, some
- sw a1, PT_R5(sp) # syscalls expect them there
- sw a2, PT_R6(sp)
- sw a3, PT_R7(sp)
- sw a3, PT_R26(sp) # update a3 for syscall restarting
jr t2
/* Unreached */
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index c30bc520885f..9ebe3e2403b1 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -198,7 +198,6 @@ LEAF(sys32_syscall)
dsll t1, t0, 3
beqz v0, einval
ld t2, sys32_call_table(t1) # syscall routine
- sd a0, PT_R2(sp) # call routine directly on restart
move a0, a1 # shift argument registers
move a1, a2
@@ -207,11 +206,6 @@ LEAF(sys32_syscall)
move a4, a5
move a5, a6
move a6, a7
- sd a0, PT_R4(sp) # ... and push back a0 - a3, some
- sd a1, PT_R5(sp) # syscalls expect them there
- sd a2, PT_R6(sp)
- sd a3, PT_R7(sp)
- sd a3, PT_R26(sp) # update a3 for syscall restarting
jr t2
/* Unreached */
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 770d4d1516cb..c7cbddfcdc3b 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -376,9 +376,6 @@ asmlinkage void start_secondary(void)
cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
- complete(&cpu_running);
- synchronise_count_slave(cpu);
-
set_cpu_online(cpu, true);
set_cpu_sibling_map(cpu);
@@ -386,6 +383,9 @@ asmlinkage void start_secondary(void)
calculate_cpu_foreign_map();
+ complete(&cpu_running);
+ synchronise_count_slave(cpu);
+
/*
* irq will be enabled in ->smp_finish(), enabling it too early
* is dangerous.
@@ -648,12 +648,12 @@ EXPORT_SYMBOL(flush_tlb_one);
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
-static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
+static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
void tick_broadcast(const struct cpumask *mask)
{
atomic_t *count;
- struct call_single_data *csd;
+ call_single_data_t *csd;
int cpu;
for_each_cpu(cpu, mask) {
@@ -674,7 +674,7 @@ static void tick_broadcast_callee(void *info)
static int __init tick_broadcast_init(void)
{
- struct call_single_data *csd;
+ call_single_data_t *csd;
int cpu;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index e08598c70b3e..8e78251eccc2 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -232,7 +232,7 @@ static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 3f74f6c1f065..9fea6c6bbf49 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -48,7 +48,7 @@
#include "uasm.c"
-static const struct insn const insn_table[insn_invalid] = {
+static const struct insn insn_table[insn_invalid] = {
[insn_addiu] = {M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_addu] = {M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD},
[insn_and] = {M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD},
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
new file mode 100644
index 000000000000..7646891c4e9b
--- /dev/null
+++ b/arch/mips/net/ebpf_jit.c
@@ -0,0 +1,1995 @@
+/*
+ * Just-In-Time compiler for eBPF filters on MIPS
+ *
+ * Copyright (c) 2017 Cavium, Inc.
+ *
+ * Based on code from:
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
+#include <linux/slab.h>
+#include <asm/bitops.h>
+#include <asm/byteorder.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-features.h>
+#include <asm/uasm.h>
+
+/* Registers used by JIT */
+#define MIPS_R_ZERO 0
+#define MIPS_R_AT 1
+#define MIPS_R_V0 2 /* BPF_R0 */
+#define MIPS_R_V1 3
+#define MIPS_R_A0 4 /* BPF_R1 */
+#define MIPS_R_A1 5 /* BPF_R2 */
+#define MIPS_R_A2 6 /* BPF_R3 */
+#define MIPS_R_A3 7 /* BPF_R4 */
+#define MIPS_R_A4 8 /* BPF_R5 */
+#define MIPS_R_T4 12 /* BPF_AX */
+#define MIPS_R_T5 13
+#define MIPS_R_T6 14
+#define MIPS_R_T7 15
+#define MIPS_R_S0 16 /* BPF_R6 */
+#define MIPS_R_S1 17 /* BPF_R7 */
+#define MIPS_R_S2 18 /* BPF_R8 */
+#define MIPS_R_S3 19 /* BPF_R9 */
+#define MIPS_R_S4 20 /* BPF_TCC */
+#define MIPS_R_S5 21
+#define MIPS_R_S6 22
+#define MIPS_R_S7 23
+#define MIPS_R_T8 24
+#define MIPS_R_T9 25
+#define MIPS_R_SP 29
+#define MIPS_R_RA 31
+
+/* eBPF flags */
+#define EBPF_SAVE_S0 BIT(0)
+#define EBPF_SAVE_S1 BIT(1)
+#define EBPF_SAVE_S2 BIT(2)
+#define EBPF_SAVE_S3 BIT(3)
+#define EBPF_SAVE_S4 BIT(4)
+#define EBPF_SAVE_RA BIT(5)
+#define EBPF_SEEN_FP BIT(6)
+#define EBPF_SEEN_TC BIT(7)
+#define EBPF_TCC_IN_V1 BIT(8)
+
+/*
+ * For the mips64 ISA, we need to track the value range or type for
+ * each JIT register. The BPF machine requires zero extended 32-bit
+ * values, but the mips64 ISA requires sign extended 32-bit values.
+ * At each point in the BPF program we track the state of every
+ * register so that we can zero extend or sign extend as the BPF
+ * semantics require.
+ */
+enum reg_val_type {
+ /* uninitialized */
+ REG_UNKNOWN,
+ /* not known to be 32-bit compatible. */
+ REG_64BIT,
+ /* 32-bit compatible, no truncation needed for 64-bit ops. */
+ REG_64BIT_32BIT,
+ /* 32-bit compatible, need truncation for 64-bit ops. */
+ REG_32BIT,
+ /* 32-bit zero extended. */
+ REG_32BIT_ZERO_EX,
+ /* 32-bit no sign/zero extension needed. */
+ REG_32BIT_POS
+};
+
+/*
+ * high bit of offsets indicates if long branch conversion done at
+ * this insn.
+ */
+#define OFFSETS_B_CONV BIT(31)
+
+/**
+ * struct jit_ctx - JIT context
+ * @skf: The sk_filter
+ * @stack_size: eBPF stack size
+ * @tmp_offset: eBPF $sp offset to 8-byte temporary memory
+ * @idx: Instruction index
+ * @flags: JIT flags
+ * @offsets: Instruction offsets
+ * @target: Memory location for the compiled filter
+ * @reg_val_types Packed enum reg_val_type for each register.
+ */
+struct jit_ctx {
+ const struct bpf_prog *skf;
+ int stack_size;
+ int tmp_offset;
+ u32 idx;
+ u32 flags;
+ u32 *offsets;
+ u32 *target;
+ u64 *reg_val_types;
+ unsigned int long_b_conversion:1;
+ unsigned int gen_b_offsets:1;
+ unsigned int use_bbit_insns:1;
+};
+
+static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type)
+{
+ *rvt &= ~(7ull << (reg * 3));
+ *rvt |= ((u64)type << (reg * 3));
+}
+
+static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
+ int index, int reg)
+{
+ return (ctx->reg_val_types[index] >> (reg * 3)) & 7;
+}
+
+/* Simply emit the instruction if the JIT memory space has been allocated */
+#define emit_instr(ctx, func, ...) \
+do { \
+ if ((ctx)->target != NULL) { \
+ u32 *p = &(ctx)->target[ctx->idx]; \
+ uasm_i_##func(&p, ##__VA_ARGS__); \
+ } \
+ (ctx)->idx++; \
+} while (0)
+
+static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
+{
+ unsigned long target_va, base_va;
+ unsigned int r;
+
+ if (!ctx->target)
+ return 0;
+
+ base_va = (unsigned long)ctx->target;
+ target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV);
+
+ if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful))
+ return (unsigned int)-1;
+ r = target_va & 0x0ffffffful;
+ return r;
+}
+
+/* Compute the immediate value for PC-relative branches. */
+static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
+{
+ if (!ctx->gen_b_offsets)
+ return 0;
+
+ /*
+ * We want a pc-relative branch. tgt is the instruction offset
+ * we want to jump to.
+
+ * Branch on MIPS:
+ * I: target_offset <- sign_extend(offset)
+ * I+1: PC += target_offset (delay slot)
+ *
+ * ctx->idx currently points to the branch instruction
+ * but the offset is added to the delay slot so we need
+ * to subtract 4.
+ */
+ return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) -
+ (ctx->idx * 4) - 4;
+}
+
+int bpf_jit_enable __read_mostly;
+
+enum which_ebpf_reg {
+ src_reg,
+ src_reg_no_fp,
+ dst_reg,
+ dst_reg_fp_ok
+};
+
+/*
+ * For eBPF, the register mapping naturally falls out of the
+ * requirements of eBPF and the MIPS n64 ABI. We don't maintain a
+ * separate frame pointer, so BPF_REG_10 relative accesses are
+ * adjusted to be $sp relative.
+ */
+int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
+ enum which_ebpf_reg w)
+{
+ int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
+ insn->src_reg : insn->dst_reg;
+
+ switch (ebpf_reg) {
+ case BPF_REG_0:
+ return MIPS_R_V0;
+ case BPF_REG_1:
+ return MIPS_R_A0;
+ case BPF_REG_2:
+ return MIPS_R_A1;
+ case BPF_REG_3:
+ return MIPS_R_A2;
+ case BPF_REG_4:
+ return MIPS_R_A3;
+ case BPF_REG_5:
+ return MIPS_R_A4;
+ case BPF_REG_6:
+ ctx->flags |= EBPF_SAVE_S0;
+ return MIPS_R_S0;
+ case BPF_REG_7:
+ ctx->flags |= EBPF_SAVE_S1;
+ return MIPS_R_S1;
+ case BPF_REG_8:
+ ctx->flags |= EBPF_SAVE_S2;
+ return MIPS_R_S2;
+ case BPF_REG_9:
+ ctx->flags |= EBPF_SAVE_S3;
+ return MIPS_R_S3;
+ case BPF_REG_10:
+ if (w == dst_reg || w == src_reg_no_fp)
+ goto bad_reg;
+ ctx->flags |= EBPF_SEEN_FP;
+ /*
+ * Needs special handling, return something that
+ * cannot be clobbered just in case.
+ */
+ return MIPS_R_ZERO;
+ case BPF_REG_AX:
+ return MIPS_R_T4;
+ default:
+bad_reg:
+ WARN(1, "Illegal bpf reg: %d\n", ebpf_reg);
+ return -EINVAL;
+ }
+}
+/*
+ * eBPF stack frame will be something like:
+ *
+ * Entry $sp ------> +--------------------------------+
+ * | $ra (optional) |
+ * +--------------------------------+
+ * | $s0 (optional) |
+ * +--------------------------------+
+ * | $s1 (optional) |
+ * +--------------------------------+
+ * | $s2 (optional) |
+ * +--------------------------------+
+ * | $s3 (optional) |
+ * +--------------------------------+
+ * | $s4 (optional) |
+ * +--------------------------------+
+ * | tmp-storage (if $ra saved) |
+ * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
+ * | BPF_REG_10 relative storage |
+ * | MAX_BPF_STACK (optional) |
+ * | . |
+ * | . |
+ * | . |
+ * $sp --------> +--------------------------------+
+ *
+ * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
+ * area is not allocated.
+ */
+static int gen_int_prologue(struct jit_ctx *ctx)
+{
+ int stack_adjust = 0;
+ int store_offset;
+ int locals_size;
+
+ if (ctx->flags & EBPF_SAVE_RA)
+ /*
+ * If RA we are doing a function call and may need
+ * extra 8-byte tmp area.
+ */
+ stack_adjust += 16;
+ if (ctx->flags & EBPF_SAVE_S0)
+ stack_adjust += 8;
+ if (ctx->flags & EBPF_SAVE_S1)
+ stack_adjust += 8;
+ if (ctx->flags & EBPF_SAVE_S2)
+ stack_adjust += 8;
+ if (ctx->flags & EBPF_SAVE_S3)
+ stack_adjust += 8;
+ if (ctx->flags & EBPF_SAVE_S4)
+ stack_adjust += 8;
+
+ BUILD_BUG_ON(MAX_BPF_STACK & 7);
+ locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
+
+ stack_adjust += locals_size;
+ ctx->tmp_offset = locals_size;
+
+ ctx->stack_size = stack_adjust;
+
+ /*
+ * First instruction initializes the tail call count (TCC).
+ * On tail call we skip this instruction, and the TCC is
+ * passed in $v1 from the caller.
+ */
+ emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
+ if (stack_adjust)
+ emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust);
+ else
+ return 0;
+
+ store_offset = stack_adjust - 8;
+
+ if (ctx->flags & EBPF_SAVE_RA) {
+ emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S0) {
+ emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S1) {
+ emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S2) {
+ emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S3) {
+ emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S4) {
+ emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+
+ if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
+ emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
+
+ return 0;
+}
+
+static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ int stack_adjust = ctx->stack_size;
+ int store_offset = stack_adjust - 8;
+ int r0 = MIPS_R_V0;
+
+ if (dest_reg == MIPS_R_RA &&
+ get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
+ /* Don't let zero extended value escape. */
+ emit_instr(ctx, sll, r0, r0, 0);
+
+ if (ctx->flags & EBPF_SAVE_RA) {
+ emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S0) {
+ emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S1) {
+ emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S2) {
+ emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S3) {
+ emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S4) {
+ emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ emit_instr(ctx, jr, dest_reg);
+
+ if (stack_adjust)
+ emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust);
+ else
+ emit_instr(ctx, nop);
+
+ return 0;
+}
+
+static void gen_imm_to_reg(const struct bpf_insn *insn, int reg,
+ struct jit_ctx *ctx)
+{
+ if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
+ emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm);
+ } else {
+ int lower = (s16)(insn->imm & 0xffff);
+ int upper = insn->imm - lower;
+
+ emit_instr(ctx, lui, reg, upper >> 16);
+ emit_instr(ctx, addiu, reg, reg, lower);
+ }
+
+}
+
+static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+ int idx)
+{
+ int upper_bound, lower_bound;
+ int dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+
+ if (dst < 0)
+ return dst;
+
+ switch (BPF_OP(insn->code)) {
+ case BPF_MOV:
+ case BPF_ADD:
+ upper_bound = S16_MAX;
+ lower_bound = S16_MIN;
+ break;
+ case BPF_SUB:
+ upper_bound = -(int)S16_MIN;
+ lower_bound = -(int)S16_MAX;
+ break;
+ case BPF_AND:
+ case BPF_OR:
+ case BPF_XOR:
+ upper_bound = 0xffff;
+ lower_bound = 0;
+ break;
+ case BPF_RSH:
+ case BPF_LSH:
+ case BPF_ARSH:
+ /* Shift amounts are truncated, no need for bounds */
+ upper_bound = S32_MAX;
+ lower_bound = S32_MIN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Immediate move clobbers the register, so no sign/zero
+ * extension needed.
+ */
+ if (BPF_CLASS(insn->code) == BPF_ALU64 &&
+ BPF_OP(insn->code) != BPF_MOV &&
+ get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
+ if (BPF_CLASS(insn->code) == BPF_ALU &&
+ BPF_OP(insn->code) != BPF_LSH &&
+ BPF_OP(insn->code) != BPF_MOV &&
+ get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT)
+ emit_instr(ctx, sll, dst, dst, 0);
+
+ if (insn->imm >= lower_bound && insn->imm <= upper_bound) {
+ /* single insn immediate case */
+ switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
+ case BPF_ALU64 | BPF_MOV:
+ emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_AND:
+ case BPF_ALU | BPF_AND:
+ emit_instr(ctx, andi, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_OR:
+ case BPF_ALU | BPF_OR:
+ emit_instr(ctx, ori, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_XOR:
+ case BPF_ALU | BPF_XOR:
+ emit_instr(ctx, xori, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_ADD:
+ emit_instr(ctx, daddiu, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_SUB:
+ emit_instr(ctx, daddiu, dst, dst, -insn->imm);
+ break;
+ case BPF_ALU64 | BPF_RSH:
+ emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f);
+ break;
+ case BPF_ALU | BPF_RSH:
+ emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f);
+ break;
+ case BPF_ALU64 | BPF_LSH:
+ emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f);
+ break;
+ case BPF_ALU | BPF_LSH:
+ emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f);
+ break;
+ case BPF_ALU64 | BPF_ARSH:
+ emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f);
+ break;
+ case BPF_ALU | BPF_ARSH:
+ emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f);
+ break;
+ case BPF_ALU | BPF_MOV:
+ emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm);
+ break;
+ case BPF_ALU | BPF_ADD:
+ emit_instr(ctx, addiu, dst, dst, insn->imm);
+ break;
+ case BPF_ALU | BPF_SUB:
+ emit_instr(ctx, addiu, dst, dst, -insn->imm);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ /* multi insn immediate case */
+ if (BPF_OP(insn->code) == BPF_MOV) {
+ gen_imm_to_reg(insn, dst, ctx);
+ } else {
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
+ case BPF_ALU64 | BPF_AND:
+ case BPF_ALU | BPF_AND:
+ emit_instr(ctx, and, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_OR:
+ case BPF_ALU | BPF_OR:
+ emit_instr(ctx, or, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_XOR:
+ case BPF_ALU | BPF_XOR:
+ emit_instr(ctx, xor, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_ADD:
+ emit_instr(ctx, daddu, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_SUB:
+ emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU | BPF_ADD:
+ emit_instr(ctx, addu, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU | BPF_SUB:
+ emit_instr(ctx, subu, dst, dst, MIPS_R_AT);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void * __must_check
+ool_skb_header_pointer(const struct sk_buff *skb, int offset,
+ int len, void *buffer)
+{
+ return skb_header_pointer(skb, offset, len, buffer);
+}
+
+static int size_to_len(const struct bpf_insn *insn)
+{
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ return 1;
+ case BPF_H:
+ return 2;
+ case BPF_W:
+ return 4;
+ case BPF_DW:
+ return 8;
+ }
+ return 0;
+}
+
+static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
+{
+ if (value >= 0xffffffffffff8000ull || value < 0x8000ull) {
+ emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value);
+ } else if (value >= 0xffffffff80000000ull ||
+ (value < 0x80000000 && value > 0xffff)) {
+ emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16));
+ emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff));
+ } else {
+ int i;
+ bool seen_part = false;
+ int needed_shift = 0;
+
+ for (i = 0; i < 4; i++) {
+ u64 part = (value >> (16 * (3 - i))) & 0xffff;
+
+ if (seen_part && needed_shift > 0 && (part || i == 3)) {
+ emit_instr(ctx, dsll_safe, dst, dst, needed_shift);
+ needed_shift = 0;
+ }
+ if (part) {
+ if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) {
+ emit_instr(ctx, lui, dst, (s32)(s16)part);
+ needed_shift = -16;
+ } else {
+ emit_instr(ctx, ori, dst,
+ seen_part ? dst : MIPS_R_ZERO,
+ (unsigned int)part);
+ }
+ seen_part = true;
+ }
+ if (seen_part)
+ needed_shift += 16;
+ }
+ }
+}
+
+static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
+{
+ int off, b_off;
+
+ ctx->flags |= EBPF_SEEN_TC;
+ /*
+ * if (index >= array->map.max_entries)
+ * goto out;
+ */
+ off = offsetof(struct bpf_array, map.max_entries);
+ emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1);
+ emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2);
+ b_off = b_imm(this_idx + 1, ctx);
+ emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
+ /*
+ * if (--TCC < 0)
+ * goto out;
+ */
+ /* Delay slot */
+ emit_instr(ctx, daddiu, MIPS_R_T5,
+ (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
+ b_off = b_imm(this_idx + 1, ctx);
+ emit_instr(ctx, bltz, MIPS_R_T5, b_off);
+ /*
+ * prog = array->ptrs[index];
+ * if (prog == NULL)
+ * goto out;
+ */
+ /* Delay slot */
+ emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3);
+ emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1);
+ off = offsetof(struct bpf_array, ptrs);
+ emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8);
+ b_off = b_imm(this_idx + 1, ctx);
+ emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off);
+ /* Delay slot */
+ emit_instr(ctx, nop);
+
+ /* goto *(prog->bpf_func + 4); */
+ off = offsetof(struct bpf_prog, bpf_func);
+ emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT);
+ /* All systems are go... propagate TCC */
+ emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO);
+ /* Skip first instruction (TCC initialization) */
+ emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4);
+ return build_int_epilogue(ctx, MIPS_R_T9);
+}
+
+static bool is_bad_offset(int b_off)
+{
+ return b_off > 0x1ffff || b_off < -0x20000;
+}
+
+/* Returns the number of insn slots consumed. */
+static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+ int this_idx, int exit_idx)
+{
+ int src, dst, r, td, ts, mem_off, b_off;
+ bool need_swap, did_move, cmp_eq;
+ unsigned int target;
+ u64 t64;
+ s64 t64s;
+ int bpf_op = BPF_OP(insn->code);
+
+ switch (insn->code) {
+ case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */
+ case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */
+ case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */
+ case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */
+ r = gen_imm_insn(insn, ctx, this_idx);
+ if (r < 0)
+ return r;
+ break;
+ case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ if (insn->imm == 1) /* Mult by 1 is a nop */
+ break;
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ emit_instr(ctx, dmultu, MIPS_R_AT, dst);
+ emit_instr(ctx, mflo, dst);
+ break;
+ case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+ if (insn->imm == 1) /* Mult by 1 is a nop */
+ break;
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ emit_instr(ctx, multu, dst, MIPS_R_AT);
+ emit_instr(ctx, mflo, dst);
+ break;
+ case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+ emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst);
+ break;
+ case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
+ case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ if (insn->imm == 0) { /* Div by zero */
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
+ emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
+ }
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ if (insn->imm == 1) {
+ /* div by 1 is a nop, mod by 1 is zero */
+ if (bpf_op == BPF_MOD)
+ emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
+ break;
+ }
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ emit_instr(ctx, divu, dst, MIPS_R_AT);
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
+ case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ if (insn->imm == 0) { /* Div by zero */
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
+ emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
+ }
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+
+ if (insn->imm == 1) {
+ /* div by 1 is a nop, mod by 1 is zero */
+ if (bpf_op == BPF_MOD)
+ emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
+ break;
+ }
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ emit_instr(ctx, ddivu, dst, MIPS_R_AT);
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */
+ src = ebpf_to_mips_reg(ctx, insn, src_reg);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (src < 0 || dst < 0)
+ return -EINVAL;
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ did_move = false;
+ if (insn->src_reg == BPF_REG_10) {
+ if (bpf_op == BPF_MOV) {
+ emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK);
+ did_move = true;
+ } else {
+ emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK);
+ src = MIPS_R_AT;
+ }
+ } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ int tmp_reg = MIPS_R_AT;
+
+ if (bpf_op == BPF_MOV) {
+ tmp_reg = dst;
+ did_move = true;
+ }
+ emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO);
+ emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32);
+ src = MIPS_R_AT;
+ }
+ switch (bpf_op) {
+ case BPF_MOV:
+ if (!did_move)
+ emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO);
+ break;
+ case BPF_ADD:
+ emit_instr(ctx, daddu, dst, dst, src);
+ break;
+ case BPF_SUB:
+ emit_instr(ctx, dsubu, dst, dst, src);
+ break;
+ case BPF_XOR:
+ emit_instr(ctx, xor, dst, dst, src);
+ break;
+ case BPF_OR:
+ emit_instr(ctx, or, dst, dst, src);
+ break;
+ case BPF_AND:
+ emit_instr(ctx, and, dst, dst, src);
+ break;
+ case BPF_MUL:
+ emit_instr(ctx, dmultu, dst, src);
+ emit_instr(ctx, mflo, dst);
+ break;
+ case BPF_DIV:
+ case BPF_MOD:
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
+ emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
+ emit_instr(ctx, ddivu, dst, src);
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_LSH:
+ emit_instr(ctx, dsllv, dst, dst, src);
+ break;
+ case BPF_RSH:
+ emit_instr(ctx, dsrlv, dst, dst, src);
+ break;
+ case BPF_ARSH:
+ emit_instr(ctx, dsrav, dst, dst, src);
+ break;
+ default:
+ pr_err("ALU64_REG NOT HANDLED\n");
+ return -EINVAL;
+ }
+ break;
+ case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (src < 0 || dst < 0)
+ return -EINVAL;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+ did_move = false;
+ ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+ if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
+ int tmp_reg = MIPS_R_AT;
+
+ if (bpf_op == BPF_MOV) {
+ tmp_reg = dst;
+ did_move = true;
+ }
+ /* sign extend */
+ emit_instr(ctx, sll, tmp_reg, src, 0);
+ src = MIPS_R_AT;
+ }
+ switch (bpf_op) {
+ case BPF_MOV:
+ if (!did_move)
+ emit_instr(ctx, addu, dst, src, MIPS_R_ZERO);
+ break;
+ case BPF_ADD:
+ emit_instr(ctx, addu, dst, dst, src);
+ break;
+ case BPF_SUB:
+ emit_instr(ctx, subu, dst, dst, src);
+ break;
+ case BPF_XOR:
+ emit_instr(ctx, xor, dst, dst, src);
+ break;
+ case BPF_OR:
+ emit_instr(ctx, or, dst, dst, src);
+ break;
+ case BPF_AND:
+ emit_instr(ctx, and, dst, dst, src);
+ break;
+ case BPF_MUL:
+ emit_instr(ctx, mul, dst, dst, src);
+ break;
+ case BPF_DIV:
+ case BPF_MOD:
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
+ emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
+ emit_instr(ctx, divu, dst, src);
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_LSH:
+ emit_instr(ctx, sllv, dst, dst, src);
+ break;
+ case BPF_RSH:
+ emit_instr(ctx, srlv, dst, dst, src);
+ break;
+ default:
+ pr_err("ALU_REG NOT HANDLED\n");
+ return -EINVAL;
+ }
+ break;
+ case BPF_JMP | BPF_EXIT:
+ if (this_idx + 1 < exit_idx) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
+ emit_instr(ctx, nop);
+ }
+ break;
+ case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */
+ case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */
+ cmp_eq = (bpf_op == BPF_JEQ);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+ if (insn->imm == 0) {
+ src = MIPS_R_ZERO;
+ } else {
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ src = MIPS_R_AT;
+ }
+ goto jeq_common;
+ case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JSET | BPF_X:
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (src < 0 || dst < 0)
+ return -EINVAL;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+ if (td == REG_32BIT && ts != REG_32BIT) {
+ emit_instr(ctx, sll, MIPS_R_AT, src, 0);
+ src = MIPS_R_AT;
+ } else if (ts == REG_32BIT && td != REG_32BIT) {
+ emit_instr(ctx, sll, MIPS_R_AT, dst, 0);
+ dst = MIPS_R_AT;
+ }
+ if (bpf_op == BPF_JSET) {
+ emit_instr(ctx, and, MIPS_R_AT, dst, src);
+ cmp_eq = false;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else if (bpf_op == BPF_JSGT || bpf_op == BPF_JSLE) {
+ emit_instr(ctx, dsubu, MIPS_R_AT, dst, src);
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ if (bpf_op == BPF_JSGT)
+ emit_instr(ctx, blez, MIPS_R_AT, b_off);
+ else
+ emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
+ emit_instr(ctx, nop);
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ if (bpf_op == BPF_JSGT)
+ emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
+ else
+ emit_instr(ctx, blez, MIPS_R_AT, b_off);
+ emit_instr(ctx, nop);
+ break;
+ } else if (bpf_op == BPF_JSGE || bpf_op == BPF_JSLT) {
+ emit_instr(ctx, slt, MIPS_R_AT, dst, src);
+ cmp_eq = bpf_op == BPF_JSGE;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else if (bpf_op == BPF_JGT || bpf_op == BPF_JLE) {
+ /* dst or src could be AT */
+ emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
+ emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
+ /* SP known to be non-zero, movz becomes boolean not */
+ emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8);
+ emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8);
+ emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
+ cmp_eq = bpf_op == BPF_JGT;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else if (bpf_op == BPF_JGE || bpf_op == BPF_JLT) {
+ emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
+ cmp_eq = bpf_op == BPF_JGE;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else { /* JNE/JEQ case */
+ cmp_eq = (bpf_op == BPF_JEQ);
+ }
+jeq_common:
+ /*
+ * If the next insn is EXIT and we are jumping arround
+ * only it, invert the sense of the compare and
+ * conditionally jump to the exit. Poor man's branch
+ * chaining.
+ */
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off)) {
+ target = j_target(ctx, exit_idx);
+ if (target == (unsigned int)-1)
+ return -E2BIG;
+ cmp_eq = !cmp_eq;
+ b_off = 4 * 3;
+ if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+ ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+ ctx->long_b_conversion = 1;
+ }
+ }
+
+ if (cmp_eq)
+ emit_instr(ctx, bne, dst, src, b_off);
+ else
+ emit_instr(ctx, beq, dst, src, b_off);
+ emit_instr(ctx, nop);
+ if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
+ emit_instr(ctx, j, target);
+ emit_instr(ctx, nop);
+ }
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off)) {
+ target = j_target(ctx, this_idx + insn->off + 1);
+ if (target == (unsigned int)-1)
+ return -E2BIG;
+ cmp_eq = !cmp_eq;
+ b_off = 4 * 3;
+ if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+ ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+ ctx->long_b_conversion = 1;
+ }
+ }
+
+ if (cmp_eq)
+ emit_instr(ctx, beq, dst, src, b_off);
+ else
+ emit_instr(ctx, bne, dst, src, b_off);
+ emit_instr(ctx, nop);
+ if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
+ emit_instr(ctx, j, target);
+ emit_instr(ctx, nop);
+ }
+ break;
+ case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */
+ case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */
+ case BPF_JMP | BPF_JSLT | BPF_K: /* JMP_IMM */
+ case BPF_JMP | BPF_JSLE | BPF_K: /* JMP_IMM */
+ cmp_eq = (bpf_op == BPF_JSGE);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+
+ if (insn->imm == 0) {
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ switch (bpf_op) {
+ case BPF_JSGT:
+ emit_instr(ctx, blez, dst, b_off);
+ break;
+ case BPF_JSGE:
+ emit_instr(ctx, bltz, dst, b_off);
+ break;
+ case BPF_JSLT:
+ emit_instr(ctx, bgez, dst, b_off);
+ break;
+ case BPF_JSLE:
+ emit_instr(ctx, bgtz, dst, b_off);
+ break;
+ }
+ emit_instr(ctx, nop);
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ switch (bpf_op) {
+ case BPF_JSGT:
+ emit_instr(ctx, bgtz, dst, b_off);
+ break;
+ case BPF_JSGE:
+ emit_instr(ctx, bgez, dst, b_off);
+ break;
+ case BPF_JSLT:
+ emit_instr(ctx, bltz, dst, b_off);
+ break;
+ case BPF_JSLE:
+ emit_instr(ctx, blez, dst, b_off);
+ break;
+ }
+ emit_instr(ctx, nop);
+ break;
+ }
+ /*
+ * only "LT" compare available, so we must use imm + 1
+ * to generate "GT" and imm -1 to generate LE
+ */
+ if (bpf_op == BPF_JSGT)
+ t64s = insn->imm + 1;
+ else if (bpf_op == BPF_JSLE)
+ t64s = insn->imm + 1;
+ else
+ t64s = insn->imm;
+
+ cmp_eq = bpf_op == BPF_JSGT || bpf_op == BPF_JSGE;
+ if (t64s >= S16_MIN && t64s <= S16_MAX) {
+ emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ goto jeq_common;
+ }
+ emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
+ emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ goto jeq_common;
+
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_K:
+ cmp_eq = (bpf_op == BPF_JGE);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+ /*
+ * only "LT" compare available, so we must use imm + 1
+ * to generate "GT" and imm -1 to generate LE
+ */
+ if (bpf_op == BPF_JGT)
+ t64s = (u64)(u32)(insn->imm) + 1;
+ else if (bpf_op == BPF_JLE)
+ t64s = (u64)(u32)(insn->imm) + 1;
+ else
+ t64s = (u64)(u32)(insn->imm);
+
+ cmp_eq = bpf_op == BPF_JGT || bpf_op == BPF_JGE;
+
+ emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
+ emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ goto jeq_common;
+
+ case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+
+ if (ctx->use_bbit_insns && hweight32((u32)insn->imm) == 1) {
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off);
+ emit_instr(ctx, nop);
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off);
+ emit_instr(ctx, nop);
+ break;
+ }
+ t64 = (u32)insn->imm;
+ emit_const_to_reg(ctx, MIPS_R_AT, t64);
+ emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ cmp_eq = false;
+ goto jeq_common;
+
+ case BPF_JMP | BPF_JA:
+ /*
+ * Prefer relative branch for easier debugging, but
+ * fall back if needed.
+ */
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off)) {
+ target = j_target(ctx, this_idx + insn->off + 1);
+ if (target == (unsigned int)-1)
+ return -E2BIG;
+ emit_instr(ctx, j, target);
+ } else {
+ emit_instr(ctx, b, b_off);
+ }
+ emit_instr(ctx, nop);
+ break;
+ case BPF_LD | BPF_DW | BPF_IMM:
+ if (insn->src_reg != 0)
+ return -EINVAL;
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32);
+ emit_const_to_reg(ctx, dst, t64);
+ return 2; /* Double slot insn */
+
+ case BPF_JMP | BPF_CALL:
+ ctx->flags |= EBPF_SAVE_RA;
+ t64s = (s64)insn->imm + (s64)__bpf_call_base;
+ emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
+ emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
+ /* delay slot */
+ emit_instr(ctx, nop);
+ break;
+
+ case BPF_JMP | BPF_TAIL_CALL:
+ if (emit_bpf_tail_call(ctx, this_idx))
+ return -EINVAL;
+ break;
+
+ case BPF_LD | BPF_B | BPF_ABS:
+ case BPF_LD | BPF_H | BPF_ABS:
+ case BPF_LD | BPF_W | BPF_ABS:
+ case BPF_LD | BPF_DW | BPF_ABS:
+ ctx->flags |= EBPF_SAVE_RA;
+
+ gen_imm_to_reg(insn, MIPS_R_A1, ctx);
+ emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
+
+ if (insn->imm < 0) {
+ emit_const_to_reg(ctx, MIPS_R_T9, (u64)bpf_internal_load_pointer_neg_helper);
+ } else {
+ emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
+ emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
+ }
+ goto ld_skb_common;
+
+ case BPF_LD | BPF_B | BPF_IND:
+ case BPF_LD | BPF_H | BPF_IND:
+ case BPF_LD | BPF_W | BPF_IND:
+ case BPF_LD | BPF_DW | BPF_IND:
+ ctx->flags |= EBPF_SAVE_RA;
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ if (src < 0)
+ return src;
+ ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+ if (ts == REG_32BIT_ZERO_EX) {
+ /* sign extend */
+ emit_instr(ctx, sll, MIPS_R_A1, src, 0);
+ src = MIPS_R_A1;
+ }
+ if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
+ emit_instr(ctx, daddiu, MIPS_R_A1, src, insn->imm);
+ } else {
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ emit_instr(ctx, daddu, MIPS_R_A1, MIPS_R_AT, src);
+ }
+ /* truncate to 32-bit int */
+ emit_instr(ctx, sll, MIPS_R_A1, MIPS_R_A1, 0);
+ emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
+ emit_instr(ctx, slt, MIPS_R_AT, MIPS_R_A1, MIPS_R_ZERO);
+
+ emit_const_to_reg(ctx, MIPS_R_T8, (u64)bpf_internal_load_pointer_neg_helper);
+ emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
+ emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
+ emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_T8, MIPS_R_AT);
+
+ld_skb_common:
+ emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
+ /* delay slot move */
+ emit_instr(ctx, daddu, MIPS_R_A0, MIPS_R_S0, MIPS_R_ZERO);
+
+ /* Check the error value */
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off)) {
+ target = j_target(ctx, exit_idx);
+ if (target == (unsigned int)-1)
+ return -E2BIG;
+
+ if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+ ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+ ctx->long_b_conversion = 1;
+ }
+ emit_instr(ctx, bne, MIPS_R_V0, MIPS_R_ZERO, 4 * 3);
+ emit_instr(ctx, nop);
+ emit_instr(ctx, j, target);
+ emit_instr(ctx, nop);
+ } else {
+ emit_instr(ctx, beq, MIPS_R_V0, MIPS_R_ZERO, b_off);
+ emit_instr(ctx, nop);
+ }
+
+#ifdef __BIG_ENDIAN
+ need_swap = false;
+#else
+ need_swap = true;
+#endif
+ dst = MIPS_R_V0;
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ emit_instr(ctx, lbu, dst, 0, MIPS_R_V0);
+ break;
+ case BPF_H:
+ emit_instr(ctx, lhu, dst, 0, MIPS_R_V0);
+ if (need_swap)
+ emit_instr(ctx, wsbh, dst, dst);
+ break;
+ case BPF_W:
+ emit_instr(ctx, lw, dst, 0, MIPS_R_V0);
+ if (need_swap) {
+ emit_instr(ctx, wsbh, dst, dst);
+ emit_instr(ctx, rotr, dst, dst, 16);
+ }
+ break;
+ case BPF_DW:
+ emit_instr(ctx, ld, dst, 0, MIPS_R_V0);
+ if (need_swap) {
+ emit_instr(ctx, dsbh, dst, dst);
+ emit_instr(ctx, dshd, dst, dst);
+ }
+ break;
+ }
+
+ break;
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (insn->imm == 64 && td == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+
+ if (insn->imm != 64 &&
+ (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+
+#ifdef __BIG_ENDIAN
+ need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE);
+#else
+ need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE);
+#endif
+ if (insn->imm == 16) {
+ if (need_swap)
+ emit_instr(ctx, wsbh, dst, dst);
+ emit_instr(ctx, andi, dst, dst, 0xffff);
+ } else if (insn->imm == 32) {
+ if (need_swap) {
+ emit_instr(ctx, wsbh, dst, dst);
+ emit_instr(ctx, rotr, dst, dst, 16);
+ }
+ } else { /* 64-bit*/
+ if (need_swap) {
+ emit_instr(ctx, dsbh, dst, dst);
+ emit_instr(ctx, dshd, dst, dst);
+ }
+ }
+ break;
+
+ case BPF_ST | BPF_B | BPF_MEM:
+ case BPF_ST | BPF_H | BPF_MEM:
+ case BPF_ST | BPF_W | BPF_MEM:
+ case BPF_ST | BPF_DW | BPF_MEM:
+ if (insn->dst_reg == BPF_REG_10) {
+ ctx->flags |= EBPF_SEEN_FP;
+ dst = MIPS_R_SP;
+ mem_off = insn->off + MAX_BPF_STACK;
+ } else {
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ mem_off = insn->off;
+ }
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst);
+ break;
+ case BPF_H:
+ emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst);
+ break;
+ case BPF_W:
+ emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst);
+ break;
+ case BPF_DW:
+ emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst);
+ break;
+ }
+ break;
+
+ case BPF_LDX | BPF_B | BPF_MEM:
+ case BPF_LDX | BPF_H | BPF_MEM:
+ case BPF_LDX | BPF_W | BPF_MEM:
+ case BPF_LDX | BPF_DW | BPF_MEM:
+ if (insn->src_reg == BPF_REG_10) {
+ ctx->flags |= EBPF_SEEN_FP;
+ src = MIPS_R_SP;
+ mem_off = insn->off + MAX_BPF_STACK;
+ } else {
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ if (src < 0)
+ return src;
+ mem_off = insn->off;
+ }
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ emit_instr(ctx, lbu, dst, mem_off, src);
+ break;
+ case BPF_H:
+ emit_instr(ctx, lhu, dst, mem_off, src);
+ break;
+ case BPF_W:
+ emit_instr(ctx, lw, dst, mem_off, src);
+ break;
+ case BPF_DW:
+ emit_instr(ctx, ld, dst, mem_off, src);
+ break;
+ }
+ break;
+
+ case BPF_STX | BPF_B | BPF_MEM:
+ case BPF_STX | BPF_H | BPF_MEM:
+ case BPF_STX | BPF_W | BPF_MEM:
+ case BPF_STX | BPF_DW | BPF_MEM:
+ case BPF_STX | BPF_W | BPF_XADD:
+ case BPF_STX | BPF_DW | BPF_XADD:
+ if (insn->dst_reg == BPF_REG_10) {
+ ctx->flags |= EBPF_SEEN_FP;
+ dst = MIPS_R_SP;
+ mem_off = insn->off + MAX_BPF_STACK;
+ } else {
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ mem_off = insn->off;
+ }
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ if (src < 0)
+ return dst;
+ if (BPF_MODE(insn->code) == BPF_XADD) {
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_W:
+ if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ emit_instr(ctx, sll, MIPS_R_AT, src, 0);
+ src = MIPS_R_AT;
+ }
+ emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst);
+ emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src);
+ emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst);
+ /*
+ * On failure back up to LL (-4
+ * instructions of 4 bytes each
+ */
+ emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
+ emit_instr(ctx, nop);
+ break;
+ case BPF_DW:
+ if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
+ emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
+ src = MIPS_R_AT;
+ }
+ emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst);
+ emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src);
+ emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst);
+ emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
+ emit_instr(ctx, nop);
+ break;
+ }
+ } else { /* BPF_MEM */
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ emit_instr(ctx, sb, src, mem_off, dst);
+ break;
+ case BPF_H:
+ emit_instr(ctx, sh, src, mem_off, dst);
+ break;
+ case BPF_W:
+ emit_instr(ctx, sw, src, mem_off, dst);
+ break;
+ case BPF_DW:
+ if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
+ emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
+ src = MIPS_R_AT;
+ }
+ emit_instr(ctx, sd, src, mem_off, dst);
+ break;
+ }
+ }
+ break;
+
+ default:
+ pr_err("NOT HANDLED %d - (%02x)\n",
+ this_idx, (unsigned int)insn->code);
+ return -EINVAL;
+ }
+ return 1;
+}
+
+#define RVT_VISITED_MASK 0xc000000000000000ull
+#define RVT_FALL_THROUGH 0x4000000000000000ull
+#define RVT_BRANCH_TAKEN 0x8000000000000000ull
+#define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
+
+static int build_int_body(struct jit_ctx *ctx)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ const struct bpf_insn *insn;
+ int i, r;
+
+ for (i = 0; i < prog->len; ) {
+ insn = prog->insnsi + i;
+ if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) {
+ /* dead instruction, don't emit it. */
+ i++;
+ continue;
+ }
+
+ if (ctx->target == NULL)
+ ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4);
+
+ r = build_one_insn(insn, ctx, i, prog->len);
+ if (r < 0)
+ return r;
+ i += r;
+ }
+ /* epilogue offset */
+ if (ctx->target == NULL)
+ ctx->offsets[i] = ctx->idx * 4;
+
+ /*
+ * All exits have an offset of the epilogue, some offsets may
+ * not have been set due to banch-around threading, so set
+ * them now.
+ */
+ if (ctx->target == NULL)
+ for (i = 0; i < prog->len; i++) {
+ insn = prog->insnsi + i;
+ if (insn->code == (BPF_JMP | BPF_EXIT))
+ ctx->offsets[i] = ctx->idx * 4;
+ }
+ return 0;
+}
+
+/* return the last idx processed, or negative for error */
+static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
+ int start_idx, bool follow_taken)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ const struct bpf_insn *insn;
+ u64 exit_rvt = initial_rvt;
+ u64 *rvt = ctx->reg_val_types;
+ int idx;
+ int reg;
+
+ for (idx = start_idx; idx < prog->len; idx++) {
+ rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt;
+ insn = prog->insnsi + idx;
+ switch (BPF_CLASS(insn->code)) {
+ case BPF_ALU:
+ switch (BPF_OP(insn->code)) {
+ case BPF_ADD:
+ case BPF_SUB:
+ case BPF_MUL:
+ case BPF_DIV:
+ case BPF_OR:
+ case BPF_AND:
+ case BPF_LSH:
+ case BPF_RSH:
+ case BPF_NEG:
+ case BPF_MOD:
+ case BPF_XOR:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ break;
+ case BPF_MOV:
+ if (BPF_SRC(insn->code)) {
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ } else {
+ /* IMM to REG move*/
+ if (insn->imm >= 0)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ }
+ break;
+ case BPF_END:
+ if (insn->imm == 64)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ else if (insn->imm == 32)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ else /* insn->imm == 16 */
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ break;
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_ALU64:
+ switch (BPF_OP(insn->code)) {
+ case BPF_MOV:
+ if (BPF_SRC(insn->code)) {
+ /* REG to REG move*/
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ } else {
+ /* IMM to REG move*/
+ if (insn->imm >= 0)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
+ }
+ break;
+ default:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_LD:
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_DW:
+ if (BPF_MODE(insn->code) == BPF_IMM) {
+ s64 val;
+
+ val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32));
+ if (val > 0 && val <= S32_MAX)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ else if (val >= S32_MIN && val <= S32_MAX)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ rvt[idx] |= RVT_DONE;
+ idx++;
+ } else {
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ }
+ break;
+ case BPF_B:
+ case BPF_H:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ break;
+ case BPF_W:
+ if (BPF_MODE(insn->code) == BPF_IMM)
+ set_reg_val_type(&exit_rvt, insn->dst_reg,
+ insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ break;
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_LDX:
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_DW:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ break;
+ case BPF_B:
+ case BPF_H:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ break;
+ case BPF_W:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ break;
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_JMP:
+ switch (BPF_OP(insn->code)) {
+ case BPF_EXIT:
+ rvt[idx] = RVT_DONE | exit_rvt;
+ rvt[prog->len] = exit_rvt;
+ return idx;
+ case BPF_JA:
+ rvt[idx] |= RVT_DONE;
+ idx += insn->off;
+ break;
+ case BPF_JEQ:
+ case BPF_JGT:
+ case BPF_JGE:
+ case BPF_JLT:
+ case BPF_JLE:
+ case BPF_JSET:
+ case BPF_JNE:
+ case BPF_JSGT:
+ case BPF_JSGE:
+ case BPF_JSLT:
+ case BPF_JSLE:
+ if (follow_taken) {
+ rvt[idx] |= RVT_BRANCH_TAKEN;
+ idx += insn->off;
+ follow_taken = false;
+ } else {
+ rvt[idx] |= RVT_FALL_THROUGH;
+ }
+ break;
+ case BPF_CALL:
+ set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT);
+ /* Upon call return, argument registers are clobbered. */
+ for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++)
+ set_reg_val_type(&exit_rvt, reg, REG_64BIT);
+
+ rvt[idx] |= RVT_DONE;
+ break;
+ default:
+ WARN(1, "Unhandled BPF_JMP case.\n");
+ rvt[idx] |= RVT_DONE;
+ break;
+ }
+ break;
+ default:
+ rvt[idx] |= RVT_DONE;
+ break;
+ }
+ }
+ return idx;
+}
+
+/*
+ * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
+ * each eBPF insn. This allows unneeded sign and zero extension
+ * operations to be omitted.
+ *
+ * Doesn't handle yet confluence of control paths with conflicting
+ * ranges, but it is good enough for most sane code.
+ */
+static int reg_val_propagate(struct jit_ctx *ctx)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ u64 exit_rvt;
+ int reg;
+ int i;
+
+ /*
+ * 11 registers * 3 bits/reg leaves top bits free for other
+ * uses. Bit-62..63 used to see if we have visited an insn.
+ */
+ exit_rvt = 0;
+
+ /* Upon entry, argument registers are 64-bit. */
+ for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++)
+ set_reg_val_type(&exit_rvt, reg, REG_64BIT);
+
+ /*
+ * First follow all conditional branches on the fall-through
+ * edge of control flow..
+ */
+ reg_val_propagate_range(ctx, exit_rvt, 0, false);
+restart_search:
+ /*
+ * Then repeatedly find the first conditional branch where
+ * both edges of control flow have not been taken, and follow
+ * the branch taken edge. We will end up restarting the
+ * search once per conditional branch insn.
+ */
+ for (i = 0; i < prog->len; i++) {
+ u64 rvt = ctx->reg_val_types[i];
+
+ if ((rvt & RVT_VISITED_MASK) == RVT_DONE ||
+ (rvt & RVT_VISITED_MASK) == 0)
+ continue;
+ if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) {
+ reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true);
+ } else { /* RVT_BRANCH_TAKEN */
+ WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
+ reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false);
+ }
+ goto restart_search;
+ }
+ /*
+ * Eventually all conditional branches have been followed on
+ * both branches and we are done. Any insn that has not been
+ * visited at this point is dead.
+ */
+
+ return 0;
+}
+
+static void jit_fill_hole(void *area, unsigned int size)
+{
+ u32 *p;
+
+ /* We are guaranteed to have aligned memory. */
+ for (p = area; size >= sizeof(u32); size -= sizeof(u32))
+ uasm_i_break(&p, BRK_BUG); /* Increments p */
+}
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+{
+ struct bpf_prog *orig_prog = prog;
+ bool tmp_blinded = false;
+ struct bpf_prog *tmp;
+ struct bpf_binary_header *header = NULL;
+ struct jit_ctx ctx;
+ unsigned int image_size;
+ u8 *image_ptr;
+
+ if (!bpf_jit_enable || !cpu_has_mips64r2)
+ return prog;
+
+ tmp = bpf_jit_blind_constants(prog);
+ /* If blinding was requested and we failed during blinding,
+ * we must fall back to the interpreter.
+ */
+ if (IS_ERR(tmp))
+ return orig_prog;
+ if (tmp != prog) {
+ tmp_blinded = true;
+ prog = tmp;
+ }
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ preempt_disable();
+ switch (current_cpu_type()) {
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ case CPU_CAVIUM_OCTEON3:
+ ctx.use_bbit_insns = 1;
+ break;
+ default:
+ ctx.use_bbit_insns = 0;
+ }
+ preempt_enable();
+
+ ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
+ if (ctx.offsets == NULL)
+ goto out_err;
+
+ ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL);
+ if (ctx.reg_val_types == NULL)
+ goto out_err;
+
+ ctx.skf = prog;
+
+ if (reg_val_propagate(&ctx))
+ goto out_err;
+
+ /*
+ * First pass discovers used resources and instruction offsets
+ * assuming short branches are used.
+ */
+ if (build_int_body(&ctx))
+ goto out_err;
+
+ /*
+ * If no calls are made (EBPF_SAVE_RA), then tail call count
+ * in $v1, else we must save in n$s4.
+ */
+ if (ctx.flags & EBPF_SEEN_TC) {
+ if (ctx.flags & EBPF_SAVE_RA)
+ ctx.flags |= EBPF_SAVE_S4;
+ else
+ ctx.flags |= EBPF_TCC_IN_V1;
+ }
+
+ /*
+ * Second pass generates offsets, if any branches are out of
+ * range a jump-around long sequence is generated, and we have
+ * to try again from the beginning to generate the new
+ * offsets. This is done until no additional conversions are
+ * necessary.
+ */
+ do {
+ ctx.idx = 0;
+ ctx.gen_b_offsets = 1;
+ ctx.long_b_conversion = 0;
+ if (gen_int_prologue(&ctx))
+ goto out_err;
+ if (build_int_body(&ctx))
+ goto out_err;
+ if (build_int_epilogue(&ctx, MIPS_R_RA))
+ goto out_err;
+ } while (ctx.long_b_conversion);
+
+ image_size = 4 * ctx.idx;
+
+ header = bpf_jit_binary_alloc(image_size, &image_ptr,
+ sizeof(u32), jit_fill_hole);
+ if (header == NULL)
+ goto out_err;
+
+ ctx.target = (u32 *)image_ptr;
+
+ /* Third pass generates the code */
+ ctx.idx = 0;
+ if (gen_int_prologue(&ctx))
+ goto out_err;
+ if (build_int_body(&ctx))
+ goto out_err;
+ if (build_int_epilogue(&ctx, MIPS_R_RA))
+ goto out_err;
+
+ /* Update the icache */
+ flush_icache_range((unsigned long)ctx.target,
+ (unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
+
+ if (bpf_jit_enable > 1)
+ /* Dump JIT code */
+ bpf_jit_dump(prog->len, image_size, 2, ctx.target);
+
+ bpf_jit_binary_lock_ro(header);
+ prog->bpf_func = (void *)ctx.target;
+ prog->jited = 1;
+ prog->jited_len = image_size;
+out_normal:
+ if (tmp_blinded)
+ bpf_jit_prog_release_other(prog, prog == orig_prog ?
+ tmp : orig_prog);
+ kfree(ctx.offsets);
+ kfree(ctx.reg_val_types);
+
+ return prog;
+
+out_err:
+ prog = orig_prog;
+ if (header)
+ bpf_jit_binary_free(header);
+ goto out_normal;
+}
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index bd67ac74fe2d..9632436d74d7 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -28,16 +28,15 @@ EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
static int __init pcibios_set_cache_line_size(void)
{
- struct cpuinfo_mips *c = &current_cpu_data;
unsigned int lsize;
/*
* Set PCI cacheline size to that of the highest level in the
* cache hierarchy.
*/
- lsize = c->dcache.linesz;
- lsize = c->scache.linesz ? : lsize;
- lsize = c->tcache.linesz ? : lsize;
+ lsize = cpu_dcache_line_size();
+ lsize = cpu_scache_line_size() ? : lsize;
+ lsize = cpu_tcache_line_size() ? : lsize;
BUG_ON(!lsize);
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index 094a0ee4af46..9be8b08ae46b 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/bug.h>
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
diff --git a/arch/mips/vdso/gettimeofday.c b/arch/mips/vdso/gettimeofday.c
index 974276e828b2..e2690d7ca4dd 100644
--- a/arch/mips/vdso/gettimeofday.c
+++ b/arch/mips/vdso/gettimeofday.c
@@ -35,7 +35,8 @@ static __always_inline long gettimeofday_fallback(struct timeval *_tv,
" syscall\n"
: "=r" (ret), "=r" (error)
: "r" (tv), "r" (tz), "r" (nr)
- : "memory");
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25", "hi", "lo", "memory");
return error ? -ret : ret;
}
@@ -55,7 +56,8 @@ static __always_inline long clock_gettime_fallback(clockid_t _clkid,
" syscall\n"
: "=r" (ret), "=r" (error)
: "r" (clkid), "r" (ts), "r" (nr)
- : "memory");
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25", "hi", "lo", "memory");
return error ? -ret : ret;
}
diff --git a/arch/mn10300/include/asm/bug.h b/arch/mn10300/include/asm/bug.h
index aa6a38886391..811414fb002d 100644
--- a/arch/mn10300/include/asm/bug.h
+++ b/arch/mn10300/include/asm/bug.h
@@ -21,7 +21,7 @@ do { \
asm volatile( \
" syscall 15 \n" \
"0: \n" \
- " .section __bug_table,\"a\" \n" \
+ " .section __bug_table,\"aw\" \n" \
" .long 0b,%0,%1 \n" \
" .previous \n" \
: \
diff --git a/arch/mn10300/include/asm/spinlock.h b/arch/mn10300/include/asm/spinlock.h
index 9c7b8f7942d8..fe413b41df6c 100644
--- a/arch/mn10300/include/asm/spinlock.h
+++ b/arch/mn10300/include/asm/spinlock.h
@@ -26,11 +26,6 @@
#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->slock, !VAL);
-}
-
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(
diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h
index c710db354ff2..ac82a3f26dbf 100644
--- a/arch/mn10300/include/uapi/asm/socket.h
+++ b/arch/mn10300/include/uapi/asm/socket.h
@@ -102,4 +102,6 @@
#define SO_PEERGROUPS 59
+#define SO_ZEROCOPY 60
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/openrisc/include/asm/futex.h b/arch/openrisc/include/asm/futex.h
index 778087341977..8fed278a24b8 100644
--- a/arch/openrisc/include/asm/futex.h
+++ b/arch/openrisc/include/asm/futex.h
@@ -30,20 +30,10 @@
})
static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
@@ -68,30 +58,9 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 531da9eb8f43..13648519bd41 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -9,6 +9,9 @@ config PARISC
select ARCH_WANT_FRAME_POINTERS
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_STRICT_KERNEL_RWX
+ select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_WANTS_UBSAN_NO_NULL
+ select ARCH_SUPPORTS_MEMORY_FAILURE
select RTC_CLASS
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
@@ -17,6 +20,12 @@ config PARISC
select BUG
select BUILDTIME_EXTABLE_SORT
select HAVE_PERF_EVENTS
+ select HAVE_KERNEL_BZIP2
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZ4
+ select HAVE_KERNEL_LZMA
+ select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_XZ
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_IRQ_PROBE
select GENERIC_PCI_IOMAP
@@ -47,6 +56,9 @@ config PARISC
and later HP3000 series). The PA-RISC Linux project home page is
at <http://www.parisc-linux.org/>.
+config CPU_BIG_ENDIAN
+ def_bool y
+
config MMU
def_bool y
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 75cb451b1f03..58fae5d2449d 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -24,15 +24,20 @@ KBUILD_DEFCONFIG := default_defconfig
NM = sh $(srctree)/arch/parisc/nm
CHECKFLAGS += -D__hppa__=1
LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+export LIBGCC
ifdef CONFIG_64BIT
UTS_MACHINE := parisc64
CHECKFLAGS += -D__LP64__=1 -m64
CC_ARCHES = hppa64
+LD_BFD := elf64-hppa-linux
else # 32-bit
CC_ARCHES = hppa hppa2.0 hppa1.1
+LD_BFD := elf32-hppa-linux
endif
+export LD_BFD
+
ifneq ($(SUBARCH),$(UTS_MACHINE))
ifeq ($(CROSS_COMPILE),)
CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
@@ -88,6 +93,8 @@ libs-y += arch/parisc/lib/ $(LIBGCC)
drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/
+boot := arch/parisc/boot
+
PALO := $(shell if (which palo 2>&1); then : ; \
elif [ -x /sbin/palo ]; then echo /sbin/palo; \
fi)
@@ -116,11 +123,14 @@ INSTALL_TARGETS = zinstall install
PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
-bzImage zImage: vmlinuz
+zImage: vmlinuz
Image: vmlinux
-vmlinuz: vmlinux
- @gzip -cf -9 $< > $@
+bzImage: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+vmlinuz: bzImage
+ $(OBJCOPY) $(boot)/bzImage $@
install:
$(CONFIG_SHELL) $(src)/arch/parisc/install.sh \
diff --git a/arch/parisc/boot/.gitignore b/arch/parisc/boot/.gitignore
new file mode 100644
index 000000000000..017d5912ad2d
--- /dev/null
+++ b/arch/parisc/boot/.gitignore
@@ -0,0 +1,2 @@
+image
+bzImage
diff --git a/arch/parisc/boot/Makefile b/arch/parisc/boot/Makefile
new file mode 100644
index 000000000000..cad68a584884
--- /dev/null
+++ b/arch/parisc/boot/Makefile
@@ -0,0 +1,26 @@
+#
+# Makefile for the linux parisc-specific parts of the boot image creator.
+#
+
+COMPILE_VERSION := __linux_compile_version_id__`hostname | \
+ tr -c '[0-9A-Za-z]' '_'`__`date | \
+ tr -c '[0-9A-Za-z]' '_'`_t
+
+ccflags-y := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I.
+
+targets := image
+targets += bzImage
+subdir- := compressed
+
+$(obj)/image: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/bzImage: $(obj)/compressed/vmlinux FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/compressed/vmlinux: FORCE
+ $(Q)$(MAKE) $(build)=$(obj)/compressed $@
+
+install: $(CONFIGURE) $(obj)/bzImage
+ sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
+ System.map "$(INSTALL_PATH)"
diff --git a/arch/parisc/boot/compressed/.gitignore b/arch/parisc/boot/compressed/.gitignore
new file mode 100644
index 000000000000..ae06b9b4c02f
--- /dev/null
+++ b/arch/parisc/boot/compressed/.gitignore
@@ -0,0 +1,3 @@
+sizes.h
+vmlinux
+vmlinux.lds
diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile
new file mode 100644
index 000000000000..5450a11c9d10
--- /dev/null
+++ b/arch/parisc/boot/compressed/Makefile
@@ -0,0 +1,86 @@
+#
+# linux/arch/parisc/boot/compressed/Makefile
+#
+# create a compressed self-extracting vmlinux image from the original vmlinux
+#
+
+KCOV_INSTRUMENT := n
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
+
+targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
+targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
+targets += misc.o piggy.o sizes.h head.o real2.o firmware.o
+
+KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
+KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
+KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs
+ifndef CONFIG_64BIT
+KBUILD_CFLAGS += -mfast-indirect-calls
+endif
+
+OBJECTS += $(obj)/head.o $(obj)/real2.o $(obj)/firmware.o $(obj)/misc.o $(obj)/piggy.o
+
+# LDFLAGS_vmlinux := -X --whole-archive -e startup -T
+LDFLAGS_vmlinux := -X -e startup --as-needed -T
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) $(LIBGCC)
+ $(call if_changed,ld)
+
+sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\|parisc_kernel_start\)$$/\#define SZ\2 0x\1/p'
+
+quiet_cmd_sizes = GEN $@
+ cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@
+
+$(obj)/sizes.h: vmlinux
+ $(call if_changed,sizes)
+
+AFLAGS_head.o += -I$(objtree)/$(obj) -DBOOTLOADER
+$(obj)/head.o: $(obj)/sizes.h
+
+CFLAGS_misc.o += -I$(objtree)/$(obj)
+$(obj)/misc.o: $(obj)/sizes.h
+
+$(obj)/firmware.o: $(obj)/firmware.c
+$(obj)/firmware.c: $(srctree)/arch/$(SRCARCH)/kernel/firmware.c
+ $(call cmd,shipped)
+
+AFLAGS_real2.o += -DBOOTLOADER
+$(obj)/real2.o: $(obj)/real2.S
+$(obj)/real2.S: $(srctree)/arch/$(SRCARCH)/kernel/real2.S
+ $(call cmd,shipped)
+
+$(obj)/misc.o: $(obj)/sizes.h
+
+CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER
+$(obj)/vmlinux.lds: $(obj)/sizes.h
+
+OBJCOPYFLAGS_vmlinux.bin := -O binary -R .comment -S
+$(obj)/vmlinux.bin: vmlinux
+ $(call if_changed,objcopy)
+
+vmlinux.bin.all-y := $(obj)/vmlinux.bin
+
+suffix-$(CONFIG_KERNEL_GZIP) := gz
+suffix-$(CONFIG_KERNEL_BZIP2) := bz2
+suffix-$(CONFIG_KERNEL_LZ4) := lz4
+suffix-$(CONFIG_KERNEL_LZMA) := lzma
+suffix-$(CONFIG_KERNEL_LZO) := lzo
+suffix-$(CONFIG_KERNEL_XZ) := xz
+
+$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
+ $(call if_changed,gzip)
+$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
+ $(call if_changed,bzip2)
+$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
+ $(call if_changed,lz4)
+$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
+ $(call if_changed,lzma)
+$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
+ $(call if_changed,lzo)
+$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
+ $(call if_changed,xzkern)
+
+LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
+$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y)
+ $(call if_changed,ld)
diff --git a/arch/parisc/boot/compressed/head.S b/arch/parisc/boot/compressed/head.S
new file mode 100644
index 000000000000..5aba20fa48aa
--- /dev/null
+++ b/arch/parisc/boot/compressed/head.S
@@ -0,0 +1,85 @@
+/*
+ * Startup glue code to uncompress the kernel
+ *
+ * (C) 2017 Helge Deller <deller@gmx.de>
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/psw.h>
+#include <asm/pdc.h>
+#include <asm/assembly.h>
+#include "sizes.h"
+
+#define BOOTADDR(x) (x)
+
+#ifndef CONFIG_64BIT
+ .import $global$ /* forward declaration */
+#endif /*!CONFIG_64BIT*/
+
+ __HEAD
+
+ENTRY(startup)
+ .level LEVEL
+
+#define PSW_W_SM 0x200
+#define PSW_W_BIT 36
+
+ ;! nuke the W bit, saving original value
+ .level 2.0
+ rsm PSW_W_SM, %r1
+
+ .level 1.1
+ extrw,u %r1, PSW_W_BIT-32, 1, %r1
+ copy %r1, %arg0
+
+ /* Make sure sr4-sr7 are set to zero for the kernel address space */
+ mtsp %r0,%sr4
+ mtsp %r0,%sr5
+ mtsp %r0,%sr6
+ mtsp %r0,%sr7
+
+ /* Clear BSS */
+
+ .import _bss,data
+ .import _ebss,data
+
+ load32 BOOTADDR(_bss),%r3
+ load32 BOOTADDR(_ebss),%r4
+ ldo FRAME_SIZE(%r4),%sp /* stack at end of bss */
+$bss_loop:
+ cmpb,<<,n %r3,%r4,$bss_loop
+ stw,ma %r0,4(%r3)
+
+ /* Initialize the global data pointer */
+ loadgp
+
+ /* arg0..arg4 were set by palo. */
+ copy %arg1, %r6 /* command line */
+ copy %arg2, %r7 /* rd-start */
+ copy %arg3, %r8 /* rd-end */
+ load32 BOOTADDR(decompress_kernel),%r3
+
+#ifdef CONFIG_64BIT
+ .level LEVEL
+ ssm PSW_W_SM, %r0 /* set W-bit */
+ depdi 0, 31, 32, %r3
+#endif
+ load32 BOOTADDR(startup_continue), %r2
+ bv,n 0(%r3)
+
+startup_continue:
+#ifdef CONFIG_64BIT
+ .level LEVEL
+ rsm PSW_W_SM, %r0 /* clear W-bit */
+#endif
+
+ load32 KERNEL_BINARY_TEXT_START, %arg0 /* free mem */
+ copy %r6, %arg1 /* command line */
+ copy %r7, %arg2 /* rd-start */
+ copy %r8, %arg3 /* rd-end */
+
+ bv,n 0(%ret0)
+END(startup)
diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c
new file mode 100644
index 000000000000..13a4bf9ac4da
--- /dev/null
+++ b/arch/parisc/boot/compressed/misc.c
@@ -0,0 +1,301 @@
+/*
+ * Definitions and wrapper functions for kernel decompressor
+ *
+ * (C) 2017 Helge Deller <deller@gmx.de>
+ */
+
+#include <linux/uaccess.h>
+#include <asm/unaligned.h>
+#include <asm/page.h>
+#include "sizes.h"
+
+/*
+ * gzip declarations
+ */
+#define STATIC static
+
+#undef memmove
+#define memmove memmove
+#define memzero(s, n) memset((s), 0, (n))
+
+#define malloc malloc_gzip
+#define free free_gzip
+
+/* Symbols defined by linker scripts */
+extern char input_data[];
+extern int input_len;
+extern __le32 output_len; /* at unaligned address, little-endian */
+extern char _text, _end;
+extern char _bss, _ebss;
+extern char _startcode_end;
+extern void startup_continue(void *entry, unsigned long cmdline,
+ unsigned long rd_start, unsigned long rd_end) __noreturn;
+
+void error(char *m) __noreturn;
+
+static unsigned long free_mem_ptr;
+static unsigned long free_mem_end_ptr;
+
+#ifdef CONFIG_KERNEL_GZIP
+#include "../../../../lib/decompress_inflate.c"
+#endif
+
+#ifdef CONFIG_KERNEL_BZIP2
+#include "../../../../lib/decompress_bunzip2.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZ4
+#include "../../../../lib/decompress_unlz4.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZMA
+#include "../../../../lib/decompress_unlzma.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZO
+#include "../../../../lib/decompress_unlzo.c"
+#endif
+
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
+void *memmove(void *dest, const void *src, size_t n)
+{
+ const char *s = src;
+ char *d = dest;
+
+ if (d <= s) {
+ while (n--)
+ *d++ = *s++;
+ } else {
+ d += n;
+ s += n;
+ while (n--)
+ *--d = *--s;
+ }
+ return dest;
+}
+
+void *memset(void *s, int c, size_t count)
+{
+ char *xs = (char *)s;
+
+ while (count--)
+ *xs++ = c;
+ return s;
+}
+
+void *memcpy(void *d, const void *s, size_t len)
+{
+ char *dest = (char *)d;
+ const char *source = (const char *)s;
+
+ while (len--)
+ *dest++ = *source++;
+ return d;
+}
+
+size_t strlen(const char *s)
+{
+ const char *sc;
+
+ for (sc = s; *sc != '\0'; ++sc)
+ ;
+ return sc - s;
+}
+
+char *strchr(const char *s, int c)
+{
+ while (*s) {
+ if (*s == (char)c)
+ return (char *)s;
+ ++s;
+ }
+ return NULL;
+}
+
+int puts(const char *s)
+{
+ const char *nuline = s;
+
+ while ((nuline = strchr(s, '\n')) != NULL) {
+ if (nuline != s)
+ pdc_iodc_print(s, nuline - s);
+ pdc_iodc_print("\r\n", 2);
+ s = nuline + 1;
+ }
+ if (*s != '\0')
+ pdc_iodc_print(s, strlen(s));
+
+ return 0;
+}
+
+static int putchar(int c)
+{
+ char buf[2];
+
+ buf[0] = c;
+ buf[1] = '\0';
+ puts(buf);
+ return c;
+}
+
+void __noreturn error(char *x)
+{
+ puts("\n\n");
+ puts(x);
+ puts("\n\n -- System halted");
+ while (1) /* wait forever */
+ ;
+}
+
+static int print_hex(unsigned long num)
+{
+ const char hex[] = "0123456789abcdef";
+ char str[40];
+ int i = sizeof(str)-1;
+
+ str[i--] = '\0';
+ do {
+ str[i--] = hex[num & 0x0f];
+ num >>= 4;
+ } while (num);
+
+ str[i--] = 'x';
+ str[i] = '0';
+ puts(&str[i]);
+
+ return 0;
+}
+
+int printf(const char *fmt, ...)
+{
+ va_list args;
+ int i = 0;
+
+ va_start(args, fmt);
+
+ while (fmt[i]) {
+ if (fmt[i] != '%') {
+put:
+ putchar(fmt[i++]);
+ continue;
+ }
+
+ if (fmt[++i] == '%')
+ goto put;
+ ++i;
+ print_hex(va_arg(args, unsigned long));
+ }
+
+ va_end(args);
+ return 0;
+}
+
+/* helper functions for libgcc */
+void abort(void)
+{
+ error("aborted.");
+}
+
+#undef malloc
+void *malloc(size_t size)
+{
+ return malloc_gzip(size);
+}
+
+#undef free
+void free(void *ptr)
+{
+ return free_gzip(ptr);
+}
+
+
+static void flush_data_cache(char *start, unsigned long length)
+{
+ char *end = start + length;
+
+ do {
+ asm volatile("fdc 0(%0)" : : "r" (start));
+ asm volatile("fic 0(%%sr0,%0)" : : "r" (start));
+ start += 16;
+ } while (start < end);
+ asm volatile("fdc 0(%0)" : : "r" (end));
+
+ asm ("sync");
+}
+
+unsigned long decompress_kernel(unsigned int started_wide,
+ unsigned int command_line,
+ const unsigned int rd_start,
+ const unsigned int rd_end)
+{
+ char *output;
+ unsigned long len, len_all;
+
+#ifdef CONFIG_64BIT
+ parisc_narrow_firmware = 0;
+#endif
+
+ set_firmware_width_unlocked();
+
+ putchar('U'); /* if you get this p and no more, string storage */
+ /* in $GLOBAL$ is wrong or %dp is wrong */
+ puts("ncompressing ...\n");
+
+ output = (char *) KERNEL_BINARY_TEXT_START;
+ len_all = __pa(SZ_end) - __pa(SZparisc_kernel_start);
+
+ if ((unsigned long) &_startcode_end > (unsigned long) output)
+ error("Bootcode overlaps kernel code");
+
+ len = get_unaligned_le32(&output_len);
+ if (len > len_all)
+ error("Output len too big.");
+ else
+ memset(&output[len], 0, len_all - len);
+
+ /*
+ * Initialize free_mem_ptr and free_mem_end_ptr.
+ */
+ free_mem_ptr = (unsigned long) &_ebss;
+ free_mem_ptr += 2*1024*1024; /* leave 2 MB for stack */
+
+ /* Limit memory for bootoader to 1GB */
+ #define ARTIFICIAL_LIMIT (1*1024*1024*1024)
+ free_mem_end_ptr = PAGE0->imm_max_mem;
+ if (free_mem_end_ptr > ARTIFICIAL_LIMIT)
+ free_mem_end_ptr = ARTIFICIAL_LIMIT;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* if we have ramdisk this is at end of memory */
+ if (rd_start && rd_start < free_mem_end_ptr)
+ free_mem_end_ptr = rd_start;
+#endif
+
+#ifdef DEBUG
+ printf("startcode_end = %x\n", &_startcode_end);
+ printf("commandline = %x\n", command_line);
+ printf("rd_start = %x\n", rd_start);
+ printf("rd_end = %x\n", rd_end);
+
+ printf("free_ptr = %x\n", free_mem_ptr);
+ printf("free_ptr_end = %x\n", free_mem_end_ptr);
+
+ printf("input_data = %x\n", input_data);
+ printf("input_len = %x\n", input_len);
+ printf("output = %x\n", output);
+ printf("output_len = %x\n", len);
+ printf("output_max = %x\n", len_all);
+#endif
+
+ __decompress(input_data, input_len, NULL, NULL,
+ output, 0, NULL, error);
+
+ flush_data_cache(output, len);
+
+ printf("Booting kernel ...\n\n");
+
+ return (unsigned long) output;
+}
diff --git a/arch/parisc/boot/compressed/vmlinux.lds.S b/arch/parisc/boot/compressed/vmlinux.lds.S
new file mode 100644
index 000000000000..a4ce3314e78e
--- /dev/null
+++ b/arch/parisc/boot/compressed/vmlinux.lds.S
@@ -0,0 +1,101 @@
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/page.h>
+#include "sizes.h"
+
+#ifndef CONFIG_64BIT
+OUTPUT_FORMAT("elf32-hppa-linux")
+OUTPUT_ARCH(hppa)
+#else
+OUTPUT_FORMAT("elf64-hppa-linux")
+OUTPUT_ARCH(hppa:hppa2.0w)
+#endif
+
+ENTRY(startup)
+
+SECTIONS
+{
+ /* palo loads at 0x60000 */
+ /* loaded kernel will move to 0x10000 */
+ . = 0xe0000; /* should not overwrite palo code */
+
+ .head.text : {
+ _head = . ;
+ HEAD_TEXT
+ _ehead = . ;
+ }
+
+ /* keep __gp below 0x1000000 */
+#ifdef CONFIG_64BIT
+ . = ALIGN(16);
+ /* Linkage tables */
+ .opd : {
+ *(.opd)
+ } PROVIDE (__gp = .);
+ .plt : {
+ *(.plt)
+ }
+ .dlt : {
+ *(.dlt)
+ }
+#endif
+ _startcode_end = .;
+
+ /* bootloader code and data starts behind area of extracted kernel */
+ . = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START);
+
+ /* align on next page boundary */
+ . = ALIGN(4096);
+ .text : {
+ _text = .; /* Text */
+ *(.text)
+ *(.text.*)
+ _etext = . ;
+ }
+ . = ALIGN(8);
+ .data : {
+ _data = . ;
+ *(.data)
+ *(.data.*)
+ _edata = . ;
+ }
+ . = ALIGN(8);
+ .rodata : {
+ _rodata = . ;
+ *(.rodata) /* read-only data */
+ *(.rodata.*)
+ _erodata = . ;
+ }
+ . = ALIGN(8);
+ .rodata.compressed : {
+ *(.rodata.compressed)
+ }
+ . = ALIGN(8);
+ .bss : {
+ _bss = . ;
+ *(.bss)
+ *(.bss.*)
+ *(COMMON)
+ . = ALIGN(4096);
+ _ebss = .;
+ }
+
+ STABS_DEBUG
+ .note 0 : { *(.note) }
+
+ /* Sections to be discarded */
+ DISCARDS
+ /DISCARD/ : {
+#ifdef CONFIG_64BIT
+ /* temporary hack until binutils is fixed to not emit these
+ * for static binaries
+ */
+ *(.PARISC.unwind) /* no unwind data */
+ *(.interp)
+ *(.dynsym)
+ *(.dynstr)
+ *(.dynamic)
+ *(.hash)
+ *(.gnu.hash)
+#endif
+ }
+}
diff --git a/arch/parisc/boot/compressed/vmlinux.scr b/arch/parisc/boot/compressed/vmlinux.scr
new file mode 100644
index 000000000000..dac2d142bcfa
--- /dev/null
+++ b/arch/parisc/boot/compressed/vmlinux.scr
@@ -0,0 +1,10 @@
+SECTIONS
+{
+ .rodata.compressed : {
+ input_len = .;
+ LONG(input_data_end - input_data) input_data = .;
+ *(.data)
+ output_len = . - 4; /* can be at unaligned address */
+ input_data_end = .;
+ }
+}
diff --git a/arch/parisc/boot/install.sh b/arch/parisc/boot/install.sh
new file mode 100644
index 000000000000..8f7c365fad83
--- /dev/null
+++ b/arch/parisc/boot/install.sh
@@ -0,0 +1,65 @@
+#!/bin/sh
+#
+# arch/parisc/install.sh, derived from arch/i386/boot/install.sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+#
+# "make install" script for i386 architecture
+#
+# Arguments:
+# $1 - kernel version
+# $2 - kernel image file
+# $3 - kernel map file
+# $4 - default install path (blank if root directory)
+#
+
+verify () {
+ if [ ! -f "$1" ]; then
+ echo "" 1>&2
+ echo " *** Missing file: $1" 1>&2
+ echo ' *** You need to run "make" before "make install".' 1>&2
+ echo "" 1>&2
+ exit 1
+ fi
+}
+
+# Make sure the files actually exist
+
+verify "$2"
+verify "$3"
+
+# User may have a custom install script
+
+if [ -n "${INSTALLKERNEL}" ]; then
+ if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+ if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+fi
+
+# Default install
+
+if [ "$(basename $2)" = "zImage" ]; then
+# Compressed install
+ echo "Installing compressed kernel"
+ base=vmlinuz
+else
+# Normal install
+ echo "Installing normal kernel"
+ base=vmlinux
+fi
+
+if [ -f $4/$base-$1 ]; then
+ mv $4/$base-$1 $4/$base-$1.old
+fi
+cat $2 > $4/$base-$1
+
+# Install system map file
+if [ -f $4/System.map-$1 ]; then
+ mv $4/System.map-$1 $4/System.map-$1.old
+fi
+cp $3 $4/System.map-$1
diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig
index 143d02652792..ccc109761f44 100644
--- a/arch/parisc/configs/712_defconfig
+++ b/arch/parisc/configs/712_defconfig
@@ -1,11 +1,9 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
@@ -14,7 +12,6 @@ CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
CONFIG_PA7100LC=y
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_GSC_LASI=y
@@ -32,11 +29,9 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
-CONFIG_IP_NF_QUEUE=m
CONFIG_LLC2=m
CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -65,21 +60,20 @@ CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=m
CONFIG_LASI_82596=y
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
# CONFIG_KEYBOARD_HIL_OLD is not set
CONFIG_MOUSE_SERIAL=m
+CONFIG_LEGACY_PTY_COUNT=64
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=17
@@ -88,22 +82,17 @@ CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_SERIAL_MUX is not set
CONFIG_PDC_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=64
CONFIG_PRINTER=m
CONFIG_PPDEV=m
# CONFIG_HW_RANDOM is not set
CONFIG_RAW_DRIVER=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_DUMMY_CONSOLE_COLUMNS=128
CONFIG_DUMMY_CONSOLE_ROWS=48
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
@@ -111,13 +100,9 @@ CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SEQUENCER=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_HARMONY=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_AUTOFS4_FS=y
@@ -130,14 +115,10 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V4=y
-CONFIG_RPCSEC_GSS_SPKM3=m
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
CONFIG_CIFS=m
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_737=m
@@ -177,21 +158,16 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
@@ -200,6 +176,7 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_LIBCRC32C=m
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig
index 1a4f776b49b8..5acb93dcaabf 100644
--- a/arch/parisc/configs/a500_defconfig
+++ b/arch/parisc/configs/a500_defconfig
@@ -1,13 +1,10 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
@@ -16,7 +13,6 @@ CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
CONFIG_PA8X00=y
CONFIG_64BIT=y
CONFIG_SMP=y
@@ -43,21 +39,17 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
-# CONFIG_INET_LRO is not set
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
-CONFIG_IP_NF_QUEUE=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_IP_NF_TARGET_ULOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_RAW=m
@@ -70,7 +62,6 @@ CONFIG_IP6_NF_MATCH_OPTS=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_TARGET_LOG=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
@@ -94,7 +85,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_ISCSI_ATTRS=m
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_QLOGIC_1280=m
@@ -106,43 +96,38 @@ CONFIG_MD_RAID0=y
CONFIG_MD_RAID1=y
CONFIG_FUSION=y
CONFIG_FUSION_SPI=m
-CONFIG_FUSION_FC=m
CONFIG_FUSION_CTL=m
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_VENDOR_3COM=y
+CONFIG_PCMCIA_3C574=m
+CONFIG_PCMCIA_3C589=m
CONFIG_VORTEX=m
CONFIG_TYPHOON=m
+CONFIG_ACENIC=m
+CONFIG_ACENIC_OMIT_TIGON_I=y
+CONFIG_PCNET32=m
+CONFIG_TIGON3=m
CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
CONFIG_TULIP=y
CONFIG_TULIP_MMIO=y
CONFIG_PCMCIA_XIRCOM=m
CONFIG_HP100=m
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=m
CONFIG_E100=m
-CONFIG_ACENIC=m
-CONFIG_ACENIC_OMIT_TIGON_I=y
CONFIG_E1000=m
-CONFIG_TIGON3=m
-CONFIG_NET_PCMCIA=y
-CONFIG_PCMCIA_3C589=m
-CONFIG_PCMCIA_3C574=m
CONFIG_PCMCIA_SMC91C92=m
CONFIG_PCMCIA_XIRC2PS=m
CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_CS=m
@@ -151,7 +136,6 @@ CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_PDC_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_RAW_DRIVER=y
# CONFIG_HWMON is not set
@@ -160,7 +144,6 @@ CONFIG_AGP_PARISC=y
# CONFIG_STI_CONSOLE is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_JFS_FS=m
CONFIG_XFS_FS=m
CONFIG_AUTOFS4_FS=y
@@ -173,13 +156,9 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_UFS_FS=m
CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFSD=m
CONFIG_NFSD_V4=y
-CONFIG_RPCSEC_GSS_SPKM3=m
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
CONFIG_CIFS=m
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
@@ -187,17 +166,12 @@ CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_HEADERS_CHECK=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_BLOWFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_LIBCRC32C=m
diff --git a/arch/parisc/configs/b180_defconfig b/arch/parisc/configs/b180_defconfig
index f1a0c25bef8d..83ffd161aec5 100644
--- a/arch/parisc/configs/b180_defconfig
+++ b/arch/parisc/configs/b180_defconfig
@@ -3,7 +3,6 @@ CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -25,8 +24,6 @@ CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -53,10 +50,9 @@ CONFIG_MD_LINEAR=y
CONFIG_MD_RAID0=y
CONFIG_MD_RAID1=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_LASI_82596=y
CONFIG_NET_TULIP=y
CONFIG_TULIP=y
+CONFIG_LASI_82596=y
CONFIG_PPP=y
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_HIL_OLD is not set
@@ -71,40 +67,31 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_PRINTER=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SEQUENCER=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_HARMONY=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_AUTOFS4_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_HEADERS_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SECURITY=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
index 8e8f0e34f817..8d41a73bd71b 100644
--- a/arch/parisc/configs/c3000_defconfig
+++ b/arch/parisc/configs/c3000_defconfig
@@ -1,12 +1,9 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
@@ -15,7 +12,6 @@ CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
CONFIG_PA8X00=y
CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_GSC is not set
@@ -31,13 +27,10 @@ CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_DEBUG=y
-CONFIG_IP_NF_QUEUE=m
CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
@@ -50,13 +43,11 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_NS87415=y
-CONFIG_PATA_SIL680=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_ISCSI_ATTRS=m
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
@@ -76,28 +67,23 @@ CONFIG_FUSION=y
CONFIG_FUSION_SPI=m
CONFIG_FUSION_CTL=m
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
+CONFIG_ACENIC=m
+CONFIG_TIGON3=m
CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
CONFIG_TULIP=y
CONFIG_TULIP_MMIO=y
-CONFIG_NET_PCI=y
CONFIG_E100=m
-CONFIG_ACENIC=m
CONFIG_E1000=m
-CONFIG_TIGON3=m
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPPOE=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1600
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=1200
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_MOUSE_PS2 is not set
CONFIG_SERIO=m
@@ -111,7 +97,6 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_HW_RANDOM is not set
CONFIG_RAW_DRIVER=y
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -121,9 +106,6 @@ CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SEQUENCER=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_AD1889=y
CONFIG_USB_HIDDEV=y
CONFIG_USB=y
@@ -139,7 +121,6 @@ CONFIG_USB_MICROTEK=m
CONFIG_USB_LEGOTOWER=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
CONFIG_XFS_FS=m
CONFIG_AUTOFS4_FS=y
CONFIG_ISO9660_FS=y
@@ -149,7 +130,6 @@ CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
@@ -159,18 +139,13 @@ CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_HEADERS_CHECK=y
-CONFIG_DEBUG_KERNEL=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MUTEXES=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_MD5=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_DES=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_LIBCRC32C=m
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index f6a4c016304b..088ab948a5ca 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -1,16 +1,13 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_RD_LZO=y
CONFIG_EXPERT=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_SLAB=y
@@ -23,7 +20,6 @@ CONFIG_PA8X00=y
CONFIG_64BIT=y
CONFIG_SMP=y
CONFIG_PREEMPT=y
-# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_IOMMU_CCIO=y
CONFIG_PCI=y
CONFIG_PCI_LBA=y
@@ -146,7 +142,6 @@ CONFIG_FB_FOREIGN_ENDIAN=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
# CONFIG_FB_STI is not set
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_LCD_CLASS_DEVICE is not set
# CONFIG_BACKLIGHT_GENERIC is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -157,12 +152,9 @@ CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_SOUND=m
CONFIG_SND=m
+CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_AD1889=m
# CONFIG_SND_USB is not set
# CONFIG_SND_GSC is not set
@@ -174,8 +166,6 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_EXT4_FS=m
CONFIG_REISERFS_FS=m
CONFIG_REISERFS_PROC_INFO=y
CONFIG_XFS_FS=m
@@ -238,11 +228,8 @@ CONFIG_DEBUG_SLAB=y
CONFIG_DEBUG_SLAB_LEAK=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_PROVE_RCU_DELAY=y
CONFIG_DEBUG_BLOCK_EXT_DEVT=y
CONFIG_LATENCYTOP=y
CONFIG_KEYS=y
diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig
index 310b6657e4ac..52c9050a7c5c 100644
--- a/arch/parisc/configs/default_defconfig
+++ b/arch/parisc/configs/default_defconfig
@@ -1,11 +1,9 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
@@ -41,9 +39,7 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
-CONFIG_IPV6=y
CONFIG_INET6_AH=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
@@ -82,26 +78,23 @@ CONFIG_MD_RAID1=y
CONFIG_MD_RAID10=y
CONFIG_BLK_DEV_DM=y
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=m
-CONFIG_LASI_82596=y
-CONFIG_NET_TULIP=y
-CONFIG_TULIP=y
-CONFIG_NET_PCI=y
CONFIG_ACENIC=y
CONFIG_TIGON3=y
-CONFIG_NET_PCMCIA=y
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+CONFIG_LASI_82596=y
CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
# CONFIG_KEYBOARD_HIL_OLD is not set
CONFIG_MOUSE_SERIAL=y
+CONFIG_LEGACY_PTY_COUNT=64
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_CS=y
@@ -109,31 +102,24 @@ CONFIG_SERIAL_8250_NR_UARTS=17
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_LEGACY_PTY_COUNT=64
CONFIG_PRINTER=m
CONFIG_PPDEV=m
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_DUMMY_CONSOLE_COLUMNS=128
CONFIG_DUMMY_CONSOLE_ROWS=48
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x16=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_SOUND=y
CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SEQUENCER=y
CONFIG_SND_AD1889=y
CONFIG_SND_HARMONY=y
CONFIG_HID_GYRATION=y
@@ -141,7 +127,6 @@ CONFIG_HID_NTRIG=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_HID_TOPSEED=y
CONFIG_USB=y
@@ -150,21 +135,15 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V4=y
-CONFIG_RPCSEC_GSS_SPKM3=m
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
CONFIG_CIFS=m
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_737=m
@@ -204,30 +183,24 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_HEADERS_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_KEYS=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_LIBCRC32C=m
+CONFIG_FONTS=y
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index 8688ba7f5966..37ae4b57c001 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -2,15 +2,11 @@ CONFIG_LOCALVERSION="-32bit"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_RD_LZO=y
CONFIG_EXPERT=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_PERF_EVENTS=y
@@ -49,7 +45,6 @@ CONFIG_INET_ESP=m
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
CONFIG_INET_DIAG=m
CONFIG_LLC2=m
# CONFIG_WIRELESS is not set
@@ -149,10 +144,8 @@ CONFIG_PRINTER=m
CONFIG_PPDEV=m
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
-CONFIG_POWER_SUPPLY=y
# CONFIG_HWMON is not set
CONFIG_AGP=y
-CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
CONFIG_FB_FOREIGN_ENDIAN=y
CONFIG_FB_MODE_HELPERS=y
@@ -169,11 +162,8 @@ CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_SOUND=m
CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SEQUENCER=m
CONFIG_SND_AD1889=m
CONFIG_SND_HARMONY=m
CONFIG_HIDRAW=y
@@ -223,12 +213,7 @@ CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=y
-CONFIG_XFS_FS=m
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_RT=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@@ -293,15 +278,12 @@ CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
-CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
-CONFIG_RCU_CPU_STALL_INFO=y
CONFIG_LATENCYTOP=y
CONFIG_LKDTM=m
CONFIG_KEYS=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5=y
@@ -320,7 +302,6 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_CCITT=m
CONFIG_CRC_T10DIF=y
CONFIG_FONTS=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index c564e6e1fa23..d39e7f821aba 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -8,10 +8,11 @@ CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
-# CONFIG_UTS_NS is not set
-# CONFIG_IPC_NS is not set
-# CONFIG_PID_NS is not set
-# CONFIG_NET_NS is not set
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CPUSETS=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -52,7 +53,6 @@ CONFIG_INET_ESP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_INET_LRO=m
CONFIG_INET_DIAG=m
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_ADVANCED is not set
@@ -84,7 +84,6 @@ CONFIG_PATA_SIL680=y
CONFIG_ATA_GENERIC=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_RAID=m
CONFIG_DM_UEVENT=y
@@ -138,21 +137,21 @@ CONFIG_QLGE=m
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MDIO_BITBANG=m
CONFIG_PHYLIB=y
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
CONFIG_BROADCOM_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
CONFIG_ICPLUS_PHY=m
-CONFIG_REALTEK_PHY=m
+CONFIG_LSI_ET1011C_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
CONFIG_NATIONAL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_SMSC_PHY=m
CONFIG_STE10XP=m
-CONFIG_LSI_ET1011C_PHY=m
-CONFIG_MDIO_BITBANG=m
+CONFIG_VITESSE_PHY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
@@ -166,10 +165,8 @@ CONFIG_INPUT_MISC=y
CONFIG_SERIO_SERPORT=m
# CONFIG_HP_SDC is not set
CONFIG_SERIO_RAW=m
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_NOZOMI=m
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
@@ -207,10 +204,8 @@ CONFIG_AGP=y
CONFIG_AGP_PARISC=y
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
-CONFIG_DRM_RADEON_UMS=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MODE_HELPERS=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_BACKLIGHT_GENERIC is not set
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_LOGO=y
@@ -246,8 +241,6 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_BTRFS_FS=m
CONFIG_QUOTA=y
@@ -286,27 +279,16 @@ CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
-CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
# CONFIG_SCHED_DEBUG is not set
-CONFIG_TIMER_STATS=y
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=m
CONFIG_LIBCRC32C=y
-CONFIG_XZ_DEC_X86=y
-CONFIG_XZ_DEC_POWERPC=y
-CONFIG_XZ_DEC_IA64=y
-CONFIG_XZ_DEC_ARM=y
-CONFIG_XZ_DEC_ARMTHUMB=y
-CONFIG_XZ_DEC_SPARC=y
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 5394b9c5f914..17b98a87e5e2 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -65,6 +65,8 @@ static __inline__ void atomic_set(atomic_t *v, int i)
_atomic_spin_unlock_irqrestore(v, flags);
}
+#define atomic_set_release(v, i) atomic_set((v), (i))
+
static __inline__ int atomic_read(const atomic_t *v)
{
return READ_ONCE((v)->counter);
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
index d2742273a685..07ea467f22fc 100644
--- a/arch/parisc/include/asm/bug.h
+++ b/arch/parisc/include/asm/bug.h
@@ -27,7 +27,7 @@
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
- "\t.pushsection __bug_table,\"a\"\n" \
+ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \
@@ -50,7 +50,7 @@
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
- "\t.pushsection __bug_table,\"a\"\n" \
+ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \
@@ -64,7 +64,7 @@
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
- "\t.pushsection __bug_table,\"a\"\n" \
+ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t" ASM_WORD_INSN "1b\n" \
"\t.short %c0\n" \
"\t.org 2b+%c1\n" \
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index 0ba14300cd8e..c601aab2fb36 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -32,22 +32,12 @@ _futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags)
}
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
unsigned long int flags;
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval, ret;
u32 tmp;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
- return -EFAULT;
-
_futex_spin_lock_irqsave(uaddr, &flags);
pagefault_disable();
@@ -85,17 +75,9 @@ out_pagefault_enable:
pagefault_enable();
_futex_spin_unlock_irqrestore(uaddr, &flags);
- if (ret == 0) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
index a81226257878..e4a657094058 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -63,6 +63,9 @@ static inline void switch_mm(struct mm_struct *prev,
{
unsigned long flags;
+ if (prev == next)
+ return;
+
local_irq_save(flags);
switch_mm_irqs_off(prev, next, tsk);
local_irq_restore(flags);
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index 80e742a1c162..bfed09d80bae 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -116,11 +116,15 @@ extern int npmem_ranges;
/* This governs the relationship between virtual and physical addresses.
* If you alter it, make sure to take care of our various fixed mapping
* segments in fixmap.h */
+#if defined(BOOTLOADER)
+#define __PAGE_OFFSET (0) /* bootloader uses physical addresses */
+#else
#ifdef CONFIG_64BIT
#define __PAGE_OFFSET (0x40000000) /* 1GB */
#else
#define __PAGE_OFFSET (0x10000000) /* 256MB */
#endif
+#endif /* BOOTLOADER */
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h
index 7569627a032b..26b4455baa83 100644
--- a/arch/parisc/include/asm/pdc.h
+++ b/arch/parisc/include/asm/pdc.h
@@ -5,6 +5,8 @@
#if !defined(__ASSEMBLY__)
+extern int parisc_narrow_firmware;
+
extern int pdc_type;
extern unsigned long parisc_cell_num; /* cell number the CPU runs on (PAT) */
extern unsigned long parisc_cell_loc; /* cell location of CPU (PAT) */
diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h
index 32e105fb8adb..a468a172ee33 100644
--- a/arch/parisc/include/asm/pdcpat.h
+++ b/arch/parisc/include/asm/pdcpat.h
@@ -150,7 +150,7 @@
#define PDC_PAT_MEM_SETGM 9L /* Set Good Memory value */
#define PDC_PAT_MEM_ADD_PAGE 10L /* ADDs a page to the cell */
#define PDC_PAT_MEM_ADDRESS 11L /* Get Physical Location From */
- /* Memory Address */
+ /* Memory Address */
#define PDC_PAT_MEM_GET_TXT_SIZE 12L /* Get Formatted Text Size */
#define PDC_PAT_MEM_GET_PD_TXT 13L /* Get PD Formatted Text */
#define PDC_PAT_MEM_GET_CELL_TXT 14L /* Get Cell Formatted Text */
@@ -223,11 +223,34 @@ struct pdc_pat_mem_retinfo { /* PDC_PAT_MEM/PDC_PAT_MEM_PD_INFO (return info) */
unsigned long clear_time; /* last PDT clear time (since Jan 1970) */
};
+struct pdc_pat_mem_cell_pdt_retinfo { /* PDC_PAT_MEM/PDC_PAT_MEM_CELL_INFO */
+ u64 reserved:32;
+ u64 cs:1; /* clear status: cleared since the last call? */
+ u64 current_pdt_entries:15;
+ u64 ic:1; /* interleaving had to be changed ? */
+ u64 max_pdt_entries:15;
+ unsigned long good_mem;
+ unsigned long first_dbe_loc; /* first location of double bit error */
+ unsigned long clear_time; /* last PDT clear time (since Jan 1970) */
+};
+
+
struct pdc_pat_mem_read_pd_retinfo { /* PDC_PAT_MEM/PDC_PAT_MEM_PD_READ */
unsigned long actual_count_bytes;
unsigned long pdt_entries;
};
+struct pdc_pat_mem_phys_mem_location { /* PDC_PAT_MEM/PDC_PAT_MEM_ADDRESS */
+ u64 cabinet:8;
+ u64 ign1:8;
+ u64 ign2:8;
+ u64 cell_slot:8;
+ u64 ign3:8;
+ u64 dimm_slot:8; /* DIMM slot, e.g. 0x1A, 0x2B, show user hex value! */
+ u64 ign4:8;
+ u64 source:4; /* for mem: always 0x07 */
+ u64 source_detail:4; /* for mem: always 0x04 (SIMM or DIMM) */
+};
struct pdc_pat_pd_addr_map_entry {
unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */
@@ -314,11 +337,16 @@ extern int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *va
extern int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val);
extern int pdc_pat_mem_pdt_info(struct pdc_pat_mem_retinfo *rinfo);
+extern int pdc_pat_mem_pdt_cell_info(struct pdc_pat_mem_cell_pdt_retinfo *rinfo,
+ unsigned long cell);
extern int pdc_pat_mem_read_cell_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
unsigned long *pdt_entries_ptr, unsigned long max_entries);
extern int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
unsigned long *pdt_entries_ptr, unsigned long count,
unsigned long offset);
+extern int pdc_pat_mem_get_dimm_phys_location(
+ struct pdc_pat_mem_phys_mem_location *pret,
+ unsigned long phys_addr);
#endif /* __ASSEMBLY__ */
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index e32936cd7f10..55bfe4affca3 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -14,13 +14,6 @@ static inline int arch_spin_is_locked(arch_spinlock_t *x)
#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
-static inline void arch_spin_unlock_wait(arch_spinlock_t *x)
-{
- volatile unsigned int *a = __ldcw_align(x);
-
- smp_cond_load_acquire(a, VAL);
-}
-
static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
{
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 88fe0aad4390..bc208136bbb2 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -34,7 +34,7 @@ struct thread_info {
/* thread information allocation */
-#define THREAD_SIZE_ORDER 2 /* PA-RISC requires at least 16k stack */
+#define THREAD_SIZE_ORDER 3 /* PA-RISC requires at least 32k stack */
/* Be sure to hunt all references to this down when you change the size of
* the kernel stack */
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
diff --git a/arch/parisc/include/uapi/asm/ioctls.h b/arch/parisc/include/uapi/asm/ioctls.h
index 674c68a5bbd0..d0e3321403be 100644
--- a/arch/parisc/include/uapi/asm/ioctls.h
+++ b/arch/parisc/include/uapi/asm/ioctls.h
@@ -60,7 +60,7 @@
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
-#define TIOCGPTPEER _IOR('T', 0x41, int) /* Safely open the slave */
+#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */
#define FIOCLEX 0x5451
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index 5979745815a5..775b5d5e41a1 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -40,9 +40,6 @@
#define MADV_SEQUENTIAL 2 /* expect sequential page references */
#define MADV_WILLNEED 3 /* will need these pages */
#define MADV_DONTNEED 4 /* don't need these pages */
-#define MADV_SPACEAVAIL 5 /* insure that resources are reserved */
-#define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */
-#define MADV_VPS_INHERIT 7 /* Inherit parents page size */
/* common/generic parameters */
#define MADV_FREE 8 /* free pages only if memory pressure */
@@ -60,21 +57,16 @@
overrides the coredump filter bits */
#define MADV_DODUMP 70 /* Clear the MADV_NODUMP flag */
+#define MADV_WIPEONFORK 71 /* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 72 /* Undo MADV_WIPEONFORK */
+
+#define MADV_HWPOISON 100 /* poison a page for testing */
+#define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */
+
/* compatibility flags */
#define MAP_FILE 0
#define MAP_VARIABLE 0
-/*
- * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
- * This gives us 6 bits, which is enough until someone invents 128 bit address
- * spaces.
- *
- * Assume these are all power of twos.
- * When 0 use the default page size.
- */
-#define MAP_HUGE_SHIFT 26
-#define MAP_HUGE_MASK 0x3f
-
#define PKEY_DISABLE_ACCESS 0x1
#define PKEY_DISABLE_WRITE 0x2
#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index a0d4dc9f4eb2..3b2bf7ae703b 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -101,4 +101,6 @@
#define SO_PEERGROUPS 0x4034
+#define SO_ZEROCOPY 0x4035
+
#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index c32a09095216..19c0c141bc3f 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -453,8 +453,8 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
before it can be accessed through the kernel mapping. */
preempt_disable();
flush_dcache_page_asm(__pa(vfrom), vaddr);
- preempt_enable();
copy_page_asm(vto, vfrom);
+ preempt_enable();
}
EXPORT_SYMBOL(copy_user_page);
@@ -539,6 +539,10 @@ void flush_cache_mm(struct mm_struct *mm)
struct vm_area_struct *vma;
pgd_t *pgd;
+ /* Flush the TLB to avoid speculation if coherency is required. */
+ if (parisc_requires_coherency())
+ flush_tlb_all();
+
/* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */
if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
@@ -577,33 +581,21 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- unsigned long addr;
- pgd_t *pgd;
-
BUG_ON(!vma->vm_mm->context);
- if ((end - start) >= parisc_cache_flush_threshold) {
- flush_cache_all();
- return;
- }
+ /* Flush the TLB to avoid speculation if coherency is required. */
+ if (parisc_requires_coherency())
+ flush_tlb_range(vma, start, end);
- if (vma->vm_mm->context == mfsp(3)) {
- flush_user_dcache_range_asm(start, end);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(start, end);
+ if ((end - start) >= parisc_cache_flush_threshold
+ || vma->vm_mm->context != mfsp(3)) {
+ flush_cache_all();
return;
}
- pgd = vma->vm_mm->pgd;
- for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
- unsigned long pfn;
- pte_t *ptep = get_ptep(pgd, addr);
- if (!ptep)
- continue;
- pfn = pte_pfn(*ptep);
- if (pfn_valid(pfn))
- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
- }
+ flush_user_dcache_range_asm(start, end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(start, end);
}
void
@@ -612,7 +604,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
BUG_ON(!vma->vm_mm->context);
if (pfn_valid(pfn)) {
- flush_tlb_page(vma, vmaddr);
+ if (parisc_requires_coherency())
+ flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index 98190252c12f..ab80e5c6f651 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -69,7 +69,15 @@
#include <asm/pdcpat.h>
#include <asm/processor.h> /* for boot_cpu_data */
+#if defined(BOOTLOADER)
+# undef spin_lock_irqsave
+# define spin_lock_irqsave(a, b) { b = 1; }
+# undef spin_unlock_irqrestore
+# define spin_unlock_irqrestore(a, b)
+#else
static DEFINE_SPINLOCK(pdc_lock);
+#endif
+
extern unsigned long pdc_result[NUM_PDC_RESULT];
extern unsigned long pdc_result2[NUM_PDC_RESULT];
@@ -142,8 +150,8 @@ static void convert_to_wide(unsigned long *addr)
int i;
unsigned int *p = (unsigned int *)addr;
- if(unlikely(parisc_narrow_firmware)) {
- for(i = 31; i >= 0; --i)
+ if (unlikely(parisc_narrow_firmware)) {
+ for (i = (NUM_PDC_RESULT-1); i >= 0; --i)
addr[i] = p[i];
}
#endif
@@ -186,6 +194,8 @@ void set_firmware_width(void)
}
#endif /*CONFIG_64BIT*/
+
+#if !defined(BOOTLOADER)
/**
* pdc_emergency_unlock - Unlock the linux pdc lock
*
@@ -979,16 +989,22 @@ int pdc_mem_pdt_read_entries(struct pdc_mem_read_pdt *pret,
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_MEM, PDC_MEM_READ_PDT, __pa(pdc_result),
- __pa(pdc_result2));
+ __pa(pdt_entries_ptr));
if (retval == PDC_OK) {
convert_to_wide(pdc_result);
memcpy(pret, pdc_result, sizeof(*pret));
- convert_to_wide(pdc_result2);
- memcpy(pdt_entries_ptr, pdc_result2,
- pret->pdt_entries * sizeof(*pdt_entries_ptr));
}
spin_unlock_irqrestore(&pdc_lock, flags);
+#ifdef CONFIG_64BIT
+ /*
+ * 64-bit kernels should not call this PDT function in narrow mode.
+ * The pdt_entries_ptr array above will now contain 32-bit values
+ */
+ if (WARN_ON_ONCE((retval == PDC_OK) && parisc_narrow_firmware))
+ return PDC_ERROR;
+#endif
+
return retval;
}
@@ -1143,6 +1159,8 @@ void pdc_io_reset_devices(void)
spin_unlock_irqrestore(&pdc_lock, flags);
}
+#endif /* defined(BOOTLOADER) */
+
/* locked by pdc_console_lock */
static int __attribute__((aligned(8))) iodc_retbuf[32];
static char __attribute__((aligned(64))) iodc_dbuf[4096];
@@ -1187,6 +1205,7 @@ print:
return i;
}
+#if !defined(BOOTLOADER)
/**
* pdc_iodc_getc - Read a character (non-blocking) from the PDC console.
*
@@ -1440,6 +1459,29 @@ int pdc_pat_mem_pdt_info(struct pdc_pat_mem_retinfo *rinfo)
}
/**
+ * pdc_pat_mem_pdt_cell_info - Retrieve information about page deallocation
+ * table of a cell
+ * @rinfo: memory pdt information
+ * @cell: cell number
+ *
+ */
+int pdc_pat_mem_pdt_cell_info(struct pdc_pat_mem_cell_pdt_retinfo *rinfo,
+ unsigned long cell)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_CELL_INFO,
+ __pa(&pdc_result), cell);
+ if (retval == PDC_OK)
+ memcpy(rinfo, &pdc_result, sizeof(*rinfo));
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
* pdc_pat_mem_read_cell_pdt - Read PDT entries from (old) PAT firmware
* @pret: array of PDT entries
* @pdt_entries_ptr: ptr to hold number of PDT entries
@@ -1455,14 +1497,14 @@ int pdc_pat_mem_read_cell_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
spin_lock_irqsave(&pdc_lock, flags);
/* PDC_PAT_MEM_CELL_READ is available on early PAT machines only */
retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_CELL_READ,
- __pa(&pdc_result), parisc_cell_num, __pa(&pdc_result2));
+ __pa(&pdc_result), parisc_cell_num,
+ __pa(pdt_entries_ptr));
if (retval == PDC_OK) {
/* build up return value as for PDC_PAT_MEM_PD_READ */
entries = min(pdc_result[0], max_entries);
pret->pdt_entries = entries;
pret->actual_count_bytes = entries * sizeof(unsigned long);
- memcpy(pdt_entries_ptr, &pdc_result2, pret->actual_count_bytes);
}
spin_unlock_irqrestore(&pdc_lock, flags);
@@ -1474,6 +1516,8 @@ int pdc_pat_mem_read_cell_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
* pdc_pat_mem_read_pd_pdt - Read PDT entries from (newer) PAT firmware
* @pret: array of PDT entries
* @pdt_entries_ptr: ptr to hold number of PDT entries
+ * @count: number of bytes to read
+ * @offset: offset to start (in bytes)
*
*/
int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
@@ -1481,17 +1525,50 @@ int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
unsigned long offset)
{
int retval;
- unsigned long flags;
+ unsigned long flags, entries;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_PD_READ,
- __pa(&pret), __pa(pdt_entries_ptr),
+ __pa(&pdc_result), __pa(pdt_entries_ptr),
count, offset);
+
+ if (retval == PDC_OK) {
+ entries = min(pdc_result[0], count);
+ pret->actual_count_bytes = entries;
+ pret->pdt_entries = entries / sizeof(unsigned long);
+ }
+
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_mem_get_dimm_phys_location - Get physical DIMM slot via PAT firmware
+ * @pret: ptr to hold returned information
+ * @phys_addr: physical address to examine
+ *
+ */
+int pdc_pat_mem_get_dimm_phys_location(
+ struct pdc_pat_mem_phys_mem_location *pret,
+ unsigned long phys_addr)
+{
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_ADDRESS,
+ __pa(&pdc_result), phys_addr);
+
+ if (retval == PDC_OK)
+ memcpy(pret, &pdc_result, sizeof(*pret));
+
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
#endif /* CONFIG_64BIT */
+#endif /* defined(BOOTLOADER) */
/***************** 32-bit real-mode calls ***********/
@@ -1601,4 +1678,3 @@ long real64_call(unsigned long fn, ...)
}
#endif /* CONFIG_64BIT */
-
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index ba5e1c7b1f17..0ca254085a66 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -380,7 +380,7 @@ static inline int eirr_to_irq(unsigned long eirr)
/*
* IRQ STACK - used for irq handler
*/
-#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
+#define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */
union irq_stack_union {
unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
@@ -413,6 +413,10 @@ static inline void stack_overflow_check(struct pt_regs *regs)
if (regs->sr[7])
return;
+ /* exit if already in panic */
+ if (sysctl_panic_on_stackoverflow < 0)
+ return;
+
/* calculate kernel stack usage */
stack_usage = sp - stack_start;
#ifdef CONFIG_IRQSTACKS
@@ -454,8 +458,10 @@ check_kernel_stack:
#ifdef CONFIG_IRQSTACKS
panic_check:
#endif
- if (sysctl_panic_on_stackoverflow)
+ if (sysctl_panic_on_stackoverflow) {
+ sysctl_panic_on_stackoverflow = -1; /* disable further checks */
panic("low stack detected by irq handler - check messages\n");
+ }
#endif
}
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 5f0067a62738..bd4c0a7471d3 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -41,7 +41,7 @@ static unsigned long pcxl_used_bytes __read_mostly = 0;
static unsigned long pcxl_used_pages __read_mostly = 0;
extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
-static spinlock_t pcxl_res_lock;
+static DEFINE_SPINLOCK(pcxl_res_lock);
static char *pcxl_res_map;
static int pcxl_res_hint;
static int pcxl_res_size;
@@ -390,7 +390,6 @@ pcxl_dma_init(void)
if (pcxl_dma_start == 0)
return 0;
- spin_lock_init(&pcxl_res_lock);
pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
pcxl_res_hint = 0;
pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c
index f3a797e670b0..05730a83895c 100644
--- a/arch/parisc/kernel/pdt.c
+++ b/arch/parisc/kernel/pdt.c
@@ -1,19 +1,20 @@
/*
* Page Deallocation Table (PDT) support
*
- * The Page Deallocation Table (PDT) holds a table with pointers to bad
- * memory (broken RAM modules) which is maintained by firmware.
+ * The Page Deallocation Table (PDT) is maintained by firmware and holds a
+ * list of memory addresses in which memory errors were detected.
+ * The list contains both single-bit (correctable) and double-bit
+ * (uncorrectable) errors.
*
* Copyright 2017 by Helge Deller <deller@gmx.de>
*
- * TODO:
- * - check regularily for new bad memory
- * - add userspace interface with procfs or sysfs
- * - increase number of PDT entries dynamically
+ * possible future enhancements:
+ * - add userspace interface via procfs or sysfs to clear PDT
*/
#include <linux/memblock.h>
#include <linux/seq_file.h>
+#include <linux/kthread.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
@@ -24,11 +25,16 @@ enum pdt_access_type {
PDT_NONE,
PDT_PDC,
PDT_PAT_NEW,
- PDT_PAT_OLD
+ PDT_PAT_CELL
};
static enum pdt_access_type pdt_type;
+/* PDT poll interval: 1 minute if errors, 5 minutes if everything OK. */
+#define PDT_POLL_INTERVAL_DEFAULT (5*60*HZ)
+#define PDT_POLL_INTERVAL_SHORT (1*60*HZ)
+static unsigned long pdt_poll_interval = PDT_POLL_INTERVAL_DEFAULT;
+
/* global PDT status information */
static struct pdc_mem_retinfo pdt_status;
@@ -36,6 +42,21 @@ static struct pdc_mem_retinfo pdt_status;
#define MAX_PDT_ENTRIES (MAX_PDT_TABLE_SIZE / sizeof(unsigned long))
static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss;
+/*
+ * Constants for the pdt_entry format:
+ * A pdt_entry holds the physical address in bits 0-57, bits 58-61 are
+ * reserved, bit 62 is the perm bit and bit 63 is the error_type bit.
+ * The perm bit indicates whether the error have been verified as a permanent
+ * error (value of 1) or has not been verified, and may be transient (value
+ * of 0). The error_type bit indicates whether the error is a single bit error
+ * (value of 1) or a multiple bit error.
+ * On non-PAT machines phys_addr is encoded in bits 0-59 and error_type in bit
+ * 63. Those machines don't provide the perm bit.
+ */
+
+#define PDT_ADDR_PHYS_MASK (pdt_type != PDT_PDC ? ~0x3f : ~0x0f)
+#define PDT_ADDR_PERM_ERR (pdt_type != PDT_PDC ? 2UL : 0UL)
+#define PDT_ADDR_SINGLE_ERR 1UL
/* report PDT entries via /proc/meminfo */
void arch_report_meminfo(struct seq_file *m)
@@ -49,6 +70,68 @@ void arch_report_meminfo(struct seq_file *m)
pdt_status.pdt_entries);
}
+static int get_info_pat_new(void)
+{
+ struct pdc_pat_mem_retinfo pat_rinfo;
+ int ret;
+
+ /* newer PAT machines like C8000 report info for all cells */
+ if (is_pdc_pat())
+ ret = pdc_pat_mem_pdt_info(&pat_rinfo);
+ else
+ return PDC_BAD_PROC;
+
+ pdt_status.pdt_size = pat_rinfo.max_pdt_entries;
+ pdt_status.pdt_entries = pat_rinfo.current_pdt_entries;
+ pdt_status.pdt_status = 0;
+ pdt_status.first_dbe_loc = pat_rinfo.first_dbe_loc;
+ pdt_status.good_mem = pat_rinfo.good_mem;
+
+ return ret;
+}
+
+static int get_info_pat_cell(void)
+{
+ struct pdc_pat_mem_cell_pdt_retinfo cell_rinfo;
+ int ret;
+
+ /* older PAT machines like rp5470 report cell info only */
+ if (is_pdc_pat())
+ ret = pdc_pat_mem_pdt_cell_info(&cell_rinfo, parisc_cell_num);
+ else
+ return PDC_BAD_PROC;
+
+ pdt_status.pdt_size = cell_rinfo.max_pdt_entries;
+ pdt_status.pdt_entries = cell_rinfo.current_pdt_entries;
+ pdt_status.pdt_status = 0;
+ pdt_status.first_dbe_loc = cell_rinfo.first_dbe_loc;
+ pdt_status.good_mem = cell_rinfo.good_mem;
+
+ return ret;
+}
+
+static void report_mem_err(unsigned long pde)
+{
+ struct pdc_pat_mem_phys_mem_location loc;
+ unsigned long addr;
+ char dimm_txt[32];
+
+ addr = pde & PDT_ADDR_PHYS_MASK;
+
+ /* show DIMM slot description on PAT machines */
+ if (is_pdc_pat()) {
+ pdc_pat_mem_get_dimm_phys_location(&loc, addr);
+ sprintf(dimm_txt, "DIMM slot %02x, ", loc.dimm_slot);
+ } else
+ dimm_txt[0] = 0;
+
+ pr_warn("PDT: BAD MEMORY at 0x%08lx, %s%s%s-bit error.\n",
+ addr, dimm_txt,
+ pde & PDT_ADDR_PERM_ERR ? "permanent ":"",
+ pde & PDT_ADDR_SINGLE_ERR ? "single":"multi");
+}
+
+
/*
* pdc_pdt_init()
*
@@ -63,18 +146,17 @@ void __init pdc_pdt_init(void)
unsigned long entries;
struct pdc_mem_read_pdt pdt_read_ret;
- if (is_pdc_pat()) {
- struct pdc_pat_mem_retinfo pat_rinfo;
+ pdt_type = PDT_PAT_NEW;
+ ret = get_info_pat_new();
- pdt_type = PDT_PAT_NEW;
- ret = pdc_pat_mem_pdt_info(&pat_rinfo);
- pdt_status.pdt_size = pat_rinfo.max_pdt_entries;
- pdt_status.pdt_entries = pat_rinfo.current_pdt_entries;
- pdt_status.pdt_status = 0;
- pdt_status.first_dbe_loc = pat_rinfo.first_dbe_loc;
- pdt_status.good_mem = pat_rinfo.good_mem;
- } else {
+ if (ret != PDC_OK) {
+ pdt_type = PDT_PAT_CELL;
+ ret = get_info_pat_cell();
+ }
+
+ if (ret != PDC_OK) {
pdt_type = PDT_PDC;
+ /* non-PAT machines provide the standard PDC call */
ret = pdc_mem_pdt_info(&pdt_status);
}
@@ -86,13 +168,17 @@ void __init pdc_pdt_init(void)
}
entries = pdt_status.pdt_entries;
- WARN_ON(entries > MAX_PDT_ENTRIES);
+ if (WARN_ON(entries > MAX_PDT_ENTRIES))
+ entries = pdt_status.pdt_entries = MAX_PDT_ENTRIES;
- pr_info("PDT: size %lu, entries %lu, status %lu, dbe_loc 0x%lx,"
- " good_mem %lu\n",
+ pr_info("PDT: type %s, size %lu, entries %lu, status %lu, dbe_loc 0x%lx,"
+ " good_mem %lu MB\n",
+ pdt_type == PDT_PDC ? __stringify(PDT_PDC) :
+ pdt_type == PDT_PAT_CELL ? __stringify(PDT_PAT_CELL)
+ : __stringify(PDT_PAT_NEW),
pdt_status.pdt_size, pdt_status.pdt_entries,
pdt_status.pdt_status, pdt_status.first_dbe_loc,
- pdt_status.good_mem);
+ pdt_status.good_mem / 1024 / 1024);
if (entries == 0) {
pr_info("PDT: Firmware reports all memory OK.\n");
@@ -112,13 +198,12 @@ void __init pdc_pdt_init(void)
#ifdef CONFIG_64BIT
struct pdc_pat_mem_read_pd_retinfo pat_pret;
- ret = pdc_pat_mem_read_cell_pdt(&pat_pret, pdt_entry,
- MAX_PDT_ENTRIES);
- if (ret != PDC_OK) {
- pdt_type = PDT_PAT_OLD;
+ if (pdt_type == PDT_PAT_CELL)
+ ret = pdc_pat_mem_read_cell_pdt(&pat_pret, pdt_entry,
+ MAX_PDT_ENTRIES);
+ else
ret = pdc_pat_mem_read_pd_pdt(&pat_pret, pdt_entry,
MAX_PDT_TABLE_SIZE, 0);
- }
#else
ret = PDC_BAD_PROC;
#endif
@@ -126,18 +211,142 @@ void __init pdc_pdt_init(void)
if (ret != PDC_OK) {
pdt_type = PDT_NONE;
- pr_debug("PDT type %d, retval = %d\n", pdt_type, ret);
+ pr_warn("PDT: Get PDT entries failed with %d\n", ret);
return;
}
for (i = 0; i < pdt_status.pdt_entries; i++) {
- if (i < 20)
- pr_warn("PDT: BAD PAGE #%d at 0x%08lx (error_type = %lu)\n",
- i,
- pdt_entry[i] & PAGE_MASK,
- pdt_entry[i] & 1);
+ report_mem_err(pdt_entry[i]);
/* mark memory page bad */
memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE);
}
}
+
+
+/*
+ * This is the PDT kernel thread main loop.
+ */
+
+static int pdt_mainloop(void *unused)
+{
+ struct pdc_mem_read_pdt pdt_read_ret;
+ struct pdc_pat_mem_read_pd_retinfo pat_pret __maybe_unused;
+ unsigned long old_num_entries;
+ unsigned long *bad_mem_ptr;
+ int num, ret;
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ old_num_entries = pdt_status.pdt_entries;
+
+ schedule_timeout(pdt_poll_interval);
+ if (kthread_should_stop())
+ break;
+
+ /* Do we have new PDT entries? */
+ switch (pdt_type) {
+ case PDT_PAT_NEW:
+ ret = get_info_pat_new();
+ break;
+ case PDT_PAT_CELL:
+ ret = get_info_pat_cell();
+ break;
+ default:
+ ret = pdc_mem_pdt_info(&pdt_status);
+ break;
+ }
+
+ if (ret != PDC_OK) {
+ pr_warn("PDT: unexpected failure %d\n", ret);
+ return -EINVAL;
+ }
+
+ /* if no new PDT entries, just wait again */
+ num = pdt_status.pdt_entries - old_num_entries;
+ if (num <= 0)
+ continue;
+
+ /* decrease poll interval in case we found memory errors */
+ if (pdt_status.pdt_entries &&
+ pdt_poll_interval == PDT_POLL_INTERVAL_DEFAULT)
+ pdt_poll_interval = PDT_POLL_INTERVAL_SHORT;
+
+ /* limit entries to get */
+ if (num > MAX_PDT_ENTRIES) {
+ num = MAX_PDT_ENTRIES;
+ pdt_status.pdt_entries = old_num_entries + num;
+ }
+
+ /* get new entries */
+ switch (pdt_type) {
+#ifdef CONFIG_64BIT
+ case PDT_PAT_CELL:
+ if (pdt_status.pdt_entries > MAX_PDT_ENTRIES) {
+ pr_crit("PDT: too many entries.\n");
+ return -ENOMEM;
+ }
+ ret = pdc_pat_mem_read_cell_pdt(&pat_pret, pdt_entry,
+ MAX_PDT_ENTRIES);
+ bad_mem_ptr = &pdt_entry[old_num_entries];
+ break;
+ case PDT_PAT_NEW:
+ ret = pdc_pat_mem_read_pd_pdt(&pat_pret,
+ pdt_entry,
+ num * sizeof(unsigned long),
+ old_num_entries * sizeof(unsigned long));
+ bad_mem_ptr = &pdt_entry[0];
+ break;
+#endif
+ default:
+ ret = pdc_mem_pdt_read_entries(&pdt_read_ret,
+ pdt_entry);
+ bad_mem_ptr = &pdt_entry[old_num_entries];
+ break;
+ }
+
+ /* report and mark memory broken */
+ while (num--) {
+ unsigned long pde = *bad_mem_ptr++;
+
+ report_mem_err(pde);
+
+#ifdef CONFIG_MEMORY_FAILURE
+ if ((pde & PDT_ADDR_PERM_ERR) ||
+ ((pde & PDT_ADDR_SINGLE_ERR) == 0))
+ memory_failure(pde >> PAGE_SHIFT, 0, 0);
+ else
+ soft_offline_page(
+ pfn_to_page(pde >> PAGE_SHIFT), 0);
+#else
+ pr_crit("PDT: memory error at 0x%lx ignored.\n"
+ "Rebuild kernel with CONFIG_MEMORY_FAILURE=y "
+ "for real handling.\n",
+ pde & PDT_ADDR_PHYS_MASK);
+#endif
+
+ }
+ }
+
+ return 0;
+}
+
+
+static int __init pdt_initcall(void)
+{
+ struct task_struct *kpdtd_task;
+
+ if (pdt_type == PDT_NONE)
+ return -ENODEV;
+
+ kpdtd_task = kthread_create(pdt_mainloop, NULL, "kpdtd");
+ if (IS_ERR(kpdtd_task))
+ return PTR_ERR(kpdtd_task);
+
+ wake_up_process(kpdtd_task);
+
+ return 0;
+}
+
+late_initcall(pdt_initcall);
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index 6017a5af2e6e..0813359049ae 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -69,7 +69,7 @@ struct rdr_tbl_ent {
static int perf_processor_interface __read_mostly = UNKNOWN_INTF;
static int perf_enabled __read_mostly;
-static spinlock_t perf_lock;
+static DEFINE_SPINLOCK(perf_lock);
struct parisc_device *cpu_device __read_mostly;
/* RDRs to write for PCX-W */
@@ -533,8 +533,6 @@ static int __init perf_init(void)
/* Patch the images to match the system */
perf_patch_images();
- spin_lock_init(&perf_lock);
-
/* TODO: this only lets us access the first cpu.. what to do for SMP? */
cpu_device = per_cpu(cpu_data, 0).dev;
printk("Performance monitoring counters enabled for %s\n",
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index b64d7d21646e..a45a67d526f8 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -53,6 +53,7 @@
#include <linux/uaccess.h>
#include <linux/rcupdate.h>
#include <linux/random.h>
+#include <linux/nmi.h>
#include <asm/io.h>
#include <asm/asm-offsets.h>
@@ -145,6 +146,7 @@ void machine_power_off(void)
/* prevent soft lockup/stalled CPU messages for endless loop. */
rcu_sysrq_start();
+ lockup_detector_suspend();
for (;;);
}
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 0ab32779dfa7..a778bd3c107c 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -30,6 +30,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
+#include <linux/random.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <asm/param.h>
@@ -89,7 +90,7 @@ init_percpu_prof(unsigned long cpunum)
* (return 1). If so, initialize the chip and tell other partners in crime
* they have work to do.
*/
-static int processor_probe(struct parisc_device *dev)
+static int __init processor_probe(struct parisc_device *dev)
{
unsigned long txn_addr;
unsigned long cpuid;
@@ -237,28 +238,45 @@ static int processor_probe(struct parisc_device *dev)
*/
void __init collect_boot_cpu_data(void)
{
+ unsigned long cr16_seed;
+
memset(&boot_cpu_data, 0, sizeof(boot_cpu_data));
+ cr16_seed = get_cycles();
+ add_device_randomness(&cr16_seed, sizeof(cr16_seed));
+
boot_cpu_data.cpu_hz = 100 * PAGE0->mem_10msec; /* Hz of this PARISC */
/* get CPU-Model Information... */
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
- if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK)
+ if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK) {
printk(KERN_INFO
"model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
+
+ add_device_randomness(&boot_cpu_data.pdc.model,
+ sizeof(boot_cpu_data.pdc.model));
+ }
#undef p
- if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK)
+ if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK) {
printk(KERN_INFO "vers %08lx\n",
boot_cpu_data.pdc.versions);
- if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK)
+ add_device_randomness(&boot_cpu_data.pdc.versions,
+ sizeof(boot_cpu_data.pdc.versions));
+ }
+
+ if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK) {
printk(KERN_INFO "CPUID vers %ld rev %ld (0x%08lx)\n",
(boot_cpu_data.pdc.cpuid >> 5) & 127,
boot_cpu_data.pdc.cpuid & 31,
boot_cpu_data.pdc.cpuid);
+ add_device_randomness(&boot_cpu_data.pdc.cpuid,
+ sizeof(boot_cpu_data.pdc.cpuid));
+ }
+
if (pdc_model_capabilities(&boot_cpu_data.pdc.capabilities) == PDC_OK)
printk(KERN_INFO "capabilities 0x%lx\n",
boot_cpu_data.pdc.capabilities);
@@ -414,12 +432,12 @@ show_cpuinfo (struct seq_file *m, void *v)
return 0;
}
-static const struct parisc_device_id processor_tbl[] = {
+static const struct parisc_device_id processor_tbl[] __initconst = {
{ HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID },
{ 0, }
};
-static struct parisc_driver cpu_driver = {
+static struct parisc_driver cpu_driver __refdata = {
.name = "CPU",
.id_table = processor_tbl,
.probe = processor_probe
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
index 1db58e546230..cc9963421a19 100644
--- a/arch/parisc/kernel/real2.S
+++ b/arch/parisc/kernel/real2.S
@@ -162,6 +162,7 @@ ENDPROC_CFI(restore_control_regs)
.text
.align 128
ENTRY_CFI(rfi_virt2real)
+#if !defined(BOOTLOADER)
/* switch to real mode... */
rsm PSW_SM_I,%r0
load32 PA(rfi_v2r_1), %r1
@@ -191,6 +192,7 @@ ENTRY_CFI(rfi_virt2real)
nop
rfi_v2r_1:
tophys_r1 %r2
+#endif /* defined(BOOTLOADER) */
bv 0(%r2)
nop
ENDPROC_CFI(rfi_virt2real)
@@ -198,6 +200,7 @@ ENDPROC_CFI(rfi_virt2real)
.text
.align 128
ENTRY_CFI(rfi_real2virt)
+#if !defined(BOOTLOADER)
rsm PSW_SM_I,%r0
load32 (rfi_r2v_1), %r1
nop
@@ -226,6 +229,7 @@ ENTRY_CFI(rfi_real2virt)
nop
rfi_r2v_1:
tovirt_r1 %r2
+#endif /* defined(BOOTLOADER) */
bv 0(%r2)
nop
ENDPROC_CFI(rfi_real2virt)
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 1b73690477c5..48dc7d4d20bb 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -34,7 +34,7 @@
extern struct unwind_table_entry __start___unwind[];
extern struct unwind_table_entry __stop___unwind[];
-static spinlock_t unwind_lock;
+static DEFINE_SPINLOCK(unwind_lock);
/*
* the kernel unwind block is not dynamically allocated so that
* we can call unwind_init as early in the bootup process as
@@ -181,8 +181,6 @@ int __init unwind_init(void)
start = (long)&__start___unwind[0];
stop = (long)&__stop___unwind[0];
- spin_lock_init(&unwind_lock);
-
printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
start, stop,
(stop - start) / sizeof(struct unwind_table_entry));
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 3d6ef1b29c6a..ffe2cbf52d1a 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -78,6 +78,8 @@ SECTIONS
*(.text.sys_exit)
*(.text.do_sigaltstack)
*(.text.do_fork)
+ *(.text.div)
+ *($$*) /* millicode routines */
*(.text.*)
*(.fixup)
*(.lock.text) /* out-of-line lock text */
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index 99115cd9e790..865a7f796c7f 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -27,8 +27,6 @@
#include <linux/compiler.h>
#include <linux/uaccess.h>
-DECLARE_PER_CPU(struct exception_data, exception_data);
-
#define get_user_space() (uaccess_kernel() ? 0 : mfsp(3))
#define get_kernel_space() (0)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 36f858c37ca7..809c468edab1 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -85,6 +85,17 @@ config NMI_IPI
depends on SMP && (DEBUGGER || KEXEC_CORE || HARDLOCKUP_DETECTOR)
default y
+config PPC_WATCHDOG
+ bool
+ depends on HARDLOCKUP_DETECTOR
+ depends on HAVE_HARDLOCKUP_DETECTOR_ARCH
+ default y
+ help
+ This is a placeholder when the powerpc hardlockup detector
+ watchdog is selected (arch/powerpc/kernel/watchdog.c). It is
+ seleted via the generic lockup detector menu which is why we
+ have no standalone config option for it here.
+
config STACKTRACE_SUPPORT
bool
default y
@@ -165,7 +176,7 @@ config PPC
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
- select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S_64 && !RELOCATABLE && !HIBERNATION)
+ select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION)
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select HAVE_CBPF_JIT if !PPC64
select HAVE_CONTEXT_TRACKING if PPC64
@@ -199,7 +210,7 @@ config PPC
select HAVE_OPTPROBES if PPC64
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if PPC64
- select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
+ select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE if SMP
@@ -356,10 +367,6 @@ config PPC_ADV_DEBUG_DAC_RANGE
depends on PPC_ADV_DEBUG_REGS && 44x
default y
-config PPC_EMULATE_SSTEP
- bool
- default y if KPROBES || UPROBES || XMON || HAVE_HW_BREAKPOINT
-
config ZONE_DMA32
bool
default y if PPC64
@@ -394,7 +401,7 @@ config HUGETLB_PAGE_SIZE_VARIABLE
config MATH_EMULATION
bool "Math emulation"
- depends on 4xx || 8xx || PPC_MPC832x || BOOKE
+ depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE
---help---
Some PowerPC chips designed for embedded applications do not have
a floating-point unit and therefore do not implement the
@@ -956,9 +963,9 @@ config PPC_PCI_CHOICE
config PCI
bool "PCI support" if PPC_PCI_CHOICE
- default y if !40x && !CPM2 && !8xx && !PPC_83xx \
+ default y if !40x && !CPM2 && !PPC_8xx && !PPC_83xx \
&& !PPC_85xx && !PPC_86xx && !GAMECUBE_COMMON
- default PCI_QSPAN if !4xx && !CPM2 && 8xx
+ default PCI_QSPAN if PPC_8xx
select GENERIC_PCI_IOMAP
help
Find out whether your system includes a PCI bus. PCI is the name of
@@ -974,7 +981,7 @@ config PCI_SYSCALL
config PCI_QSPAN
bool "QSpan PCI"
- depends on !4xx && !CPM2 && 8xx
+ depends on PPC_8xx
select PPC_I8259
help
Say Y here if you have a system based on a Motorola 8xx-series
@@ -1165,12 +1172,23 @@ config CONSISTENT_SIZE
config PIN_TLB
bool "Pinned Kernel TLBs (860 ONLY)"
- depends on ADVANCED_OPTIONS && 8xx
+ depends on ADVANCED_OPTIONS && PPC_8xx && \
+ !DEBUG_PAGEALLOC && !STRICT_KERNEL_RWX
+
+config PIN_TLB_DATA
+ bool "Pinned TLB for DATA"
+ depends on PIN_TLB
+ default y
config PIN_TLB_IMMR
bool "Pinned TLB for IMMR"
depends on PIN_TLB
default y
+
+config PIN_TLB_TEXT
+ bool "Pinned TLB for TEXT"
+ depends on PIN_TLB
+ default y
endmenu
if PPC64
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 8d4ed73d5490..1381693a4a51 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -59,6 +59,19 @@ machine-$(CONFIG_PPC64) += 64
machine-$(CONFIG_CPU_LITTLE_ENDIAN) += le
UTS_MACHINE := $(subst $(space),,$(machine-y))
+# XXX This needs to be before we override LD below
+ifdef CONFIG_PPC32
+KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
+else
+ifeq ($(call ld-ifversion, -ge, 225000000, y),y)
+# Have the linker provide sfpr if possible.
+# There is a corresponding test in arch/powerpc/lib/Makefile
+KBUILD_LDFLAGS_MODULE += --save-restore-funcs
+else
+KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
+endif
+endif
+
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
override LD += -EL
LDEMULATION := lppc
@@ -190,18 +203,6 @@ else
CHECKFLAGS += -D__LITTLE_ENDIAN__
endif
-ifdef CONFIG_PPC32
-KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
-else
-ifeq ($(call ld-ifversion, -ge, 225000000, y),y)
-# Have the linker provide sfpr if possible.
-# There is a corresponding test in arch/powerpc/lib/Makefile
-KBUILD_LDFLAGS_MODULE += --save-restore-funcs
-else
-KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
-endif
-endif
-
ifeq ($(CONFIG_476FPE_ERR46),y)
KBUILD_LDFLAGS_MODULE += --ppc476-workaround \
-T $(srctree)/arch/powerpc/platforms/44x/ppc476_modules.lds
@@ -249,7 +250,7 @@ KBUILD_AFLAGS += $(aflags-y)
KBUILD_CFLAGS += $(cflags-y)
head-y := arch/powerpc/kernel/head_$(BITS).o
-head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o
+head-$(CONFIG_PPC_8xx) := arch/powerpc/kernel/head_8xx.o
head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o
head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o
head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o
@@ -316,6 +317,10 @@ PHONY += ppc64le_defconfig
ppc64le_defconfig:
$(call merge_into_defconfig,ppc64_defconfig,le)
+PHONY += powernv_be_defconfig
+powernv_be_defconfig:
+ $(call merge_into_defconfig,powernv_defconfig,be)
+
PHONY += mpc85xx_defconfig
mpc85xx_defconfig:
$(call merge_into_defconfig,mpc85xx_basic_defconfig,\
diff --git a/arch/powerpc/boot/4xx.c b/arch/powerpc/boot/4xx.c
index 9d3bd4c45a24..f7da65169124 100644
--- a/arch/powerpc/boot/4xx.c
+++ b/arch/powerpc/boot/4xx.c
@@ -564,7 +564,7 @@ void ibm405gp_fixup_clocks(unsigned int sys_clk, unsigned int ser_clk)
fbdv = 16;
cbdv = ((pllmr & 0x00060000) >> 17) + 1; /* CPU:PLB */
opdv = ((pllmr & 0x00018000) >> 15) + 1; /* PLB:OPB */
- ppdv = ((pllmr & 0x00001800) >> 13) + 1; /* PLB:PCI */
+ ppdv = ((pllmr & 0x00006000) >> 13) + 1; /* PLB:PCI */
epdv = ((pllmr & 0x00001800) >> 11) + 2; /* PLB:EBC */
udiv = ((cpc0_cr0 & 0x3e) >> 1) + 1;
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index a7814a7b1523..c4e6fe35c075 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -25,12 +25,20 @@ compress-$(CONFIG_KERNEL_XZ) := CONFIG_KERNEL_XZ
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -Os -msoft-float -pipe \
-fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
- -isystem $(shell $(CROSS32CC) -print-file-name=include) \
-D$(compress-y)
+BOOTCC := $(CC)
ifdef CONFIG_PPC64_BOOT_WRAPPER
BOOTCFLAGS += -m64
+else
+BOOTCFLAGS += -m32
+ifdef CROSS32_COMPILE
+ BOOTCC := $(CROSS32_COMPILE)gcc
+endif
endif
+
+BOOTCFLAGS += -isystem $(shell $(BOOTCC) -print-file-name=include)
+
ifdef CONFIG_CPU_BIG_ENDIAN
BOOTCFLAGS += -mbig-endian
else
@@ -99,17 +107,18 @@ src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
$(libfdt) libfdt-wrapper.c \
ns16550.c serial.c simple_alloc.c div64.S util.S \
elf_util.c $(zlib-y) devtree.c stdlib.c \
- oflib.c ofconsole.c cuboot.c mpsc.c cpm-serial.c \
- uartlite.c mpc52xx-psc.c opal.c
+ oflib.c ofconsole.c cuboot.c cpm-serial.c \
+ uartlite.c opal.c
+src-wlib-$(CONFIG_PPC_MPC52XX) += mpc52xx-psc.c
src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S
ifndef CONFIG_PPC64_BOOT_WRAPPER
src-wlib-y += crtsavres.S
endif
src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c
src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c
-src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c fsl-soc.c
+src-wlib-$(CONFIG_PPC_8xx) += mpc8xx.c planetcore.c fsl-soc.c
src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
-src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c fsl-soc.c
+src-wlib-$(CONFIG_EMBEDDED6xx) += mpsc.c mv64x60.c mv64x60_i2c.c ugecon.c fsl-soc.c
src-plat-y := of.c epapr.c
src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
@@ -124,7 +133,7 @@ src-plat-$(CONFIG_44x) += treeboot-ebony.c cuboot-ebony.c treeboot-bamboo.c \
treeboot-iss4xx.c treeboot-currituck.c \
treeboot-akebono.c \
simpleboot.c fixed-head.S virtex.c
-src-plat-$(CONFIG_8xx) += cuboot-8xx.c fixed-head.S ep88xc.c redboot-8xx.c
+src-plat-$(CONFIG_PPC_8xx) += cuboot-8xx.c fixed-head.S ep88xc.c redboot-8xx.c
src-plat-$(CONFIG_PPC_MPC52xx) += cuboot-52xx.c
src-plat-$(CONFIG_PPC_82xx) += cuboot-pq2.c fixed-head.S ep8248e.c cuboot-824x.c
src-plat-$(CONFIG_PPC_83xx) += cuboot-83xx.c fixed-head.S redboot-83xx.c
@@ -183,10 +192,10 @@ clean-files := $(zlib-) $(zlibheader-) $(zliblinuxheader-) \
empty.c zImage.coff.lds zImage.ps3.lds zImage.lds
quiet_cmd_bootcc = BOOTCC $@
- cmd_bootcc = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $<
+ cmd_bootcc = $(BOOTCC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $<
quiet_cmd_bootas = BOOTAS $@
- cmd_bootas = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $<
+ cmd_bootas = $(BOOTCC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $<
quiet_cmd_bootar = BOOTAR $@
cmd_bootar = $(CROSS32AR) -cr$(KBUILD_ARFLAGS) $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 12866ccb5694..dcf2f15e6797 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -26,17 +26,17 @@ _zimage_start_opd:
#ifdef __powerpc64__
.balign 8
-p_start: .llong _start
-p_etext: .llong _etext
-p_bss_start: .llong __bss_start
-p_end: .llong _end
-
-p_toc: .llong __toc_start + 0x8000 - p_base
-p_dyn: .llong __dynamic_start - p_base
-p_rela: .llong __rela_dyn_start - p_base
-p_prom: .llong 0
+p_start: .8byte _start
+p_etext: .8byte _etext
+p_bss_start: .8byte __bss_start
+p_end: .8byte _end
+
+p_toc: .8byte __toc_start + 0x8000 - p_base
+p_dyn: .8byte __dynamic_start - p_base
+p_rela: .8byte __rela_dyn_start - p_base
+p_prom: .8byte 0
.weak _platform_stack_top
-p_pstack: .llong _platform_stack_top
+p_pstack: .8byte _platform_stack_top
#else
p_start: .long _start
p_etext: .long _etext
diff --git a/arch/powerpc/boot/dts/fsp2.dts b/arch/powerpc/boot/dts/fsp2.dts
index 475953ada707..f10a64aeb83b 100644
--- a/arch/powerpc/boot/dts/fsp2.dts
+++ b/arch/powerpc/boot/dts/fsp2.dts
@@ -52,6 +52,7 @@
clocks {
mmc_clk: mmc_clk {
compatible = "fixed-clock";
+ #clock-cells = <0>;
clock-frequency = <50000000>;
clock-output-names = "mmc_clk";
};
@@ -359,20 +360,6 @@
interrupts = <31 0x4 15 0x84>;
};
- mmc0: sdhci@020c0000 {
- compatible = "st,sdhci-stih407", "st,sdhci";
- status = "disabled";
- reg = <0x020c0000 0x20000>;
- reg-names = "mmc";
- interrupt-parent = <&UIC1_3>;
- interrupts = <21 0x4 22 0x4>;
- interrupt-names = "mmcirq";
- pinctrl-names = "default";
- pinctrl-0 = <>;
- clock-names = "mmc";
- clocks = <&mmc_clk>;
- };
-
plb6 {
compatible = "ibm,plb6";
#address-cells = <2>;
@@ -501,6 +488,24 @@
/*RXDE*/ 4 &UIC1_2 13 0x4>;
};
+ mmc0: mmc@20c0000 {
+ compatible = "st,sdhci-stih407", "st,sdhci";
+ reg = <0x020c0000 0x20000>;
+ reg-names = "mmc";
+ interrupts = <21 0x4>;
+ interrupt-parent = <&UIC1_3>;
+ interrupt-names = "mmcirq";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
+ clock-names = "mmc";
+ clocks = <&mmc_clk>;
+ bus-width = <4>;
+ non-removable;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ sd-uhs-ddr50;
+ };
+
opb {
compatible = "ibm,opb";
#address-cells = <1>;
diff --git a/arch/powerpc/boot/ppc_asm.h b/arch/powerpc/boot/ppc_asm.h
index 68e388ee94fe..c63299f9fdd9 100644
--- a/arch/powerpc/boot/ppc_asm.h
+++ b/arch/powerpc/boot/ppc_asm.h
@@ -80,4 +80,12 @@
.long 0xa6037b7d; /* mtsrr1 r11 */ \
.long 0x2400004c /* rfid */
+#ifdef CONFIG_PPC_8xx
+#define MFTBL(dest) mftb dest
+#define MFTBU(dest) mftbu dest
+#else
+#define MFTBL(dest) mfspr dest, SPRN_TBRL
+#define MFTBU(dest) mfspr dest, SPRN_TBRU
+#endif
+
#endif /* _PPC64_PPC_ASM_H */
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
index e04c1e4063ae..7b5c02b1afd0 100644
--- a/arch/powerpc/boot/serial.c
+++ b/arch/powerpc/boot/serial.c
@@ -120,15 +120,19 @@ int serial_console_init(void)
if (dt_is_compatible(devp, "ns16550") ||
dt_is_compatible(devp, "pnpPNP,501"))
rc = ns16550_console_init(devp, &serial_cd);
+#ifdef CONFIG_EMBEDDED6xx
else if (dt_is_compatible(devp, "marvell,mv64360-mpsc"))
rc = mpsc_console_init(devp, &serial_cd);
+#endif
else if (dt_is_compatible(devp, "fsl,cpm1-scc-uart") ||
dt_is_compatible(devp, "fsl,cpm1-smc-uart") ||
dt_is_compatible(devp, "fsl,cpm2-scc-uart") ||
dt_is_compatible(devp, "fsl,cpm2-smc-uart"))
rc = cpm_console_init(devp, &serial_cd);
+#ifdef CONFIG_PPC_MPC52XX
else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart"))
rc = mpc5200_psc_console_init(devp, &serial_cd);
+#endif
else if (dt_is_compatible(devp, "xlnx,opb-uartlite-1.00.b") ||
dt_is_compatible(devp, "xlnx,xps-uartlite-1.00.a"))
rc = uartlite_console_init(devp, &serial_cd);
diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S
index 243b8497d58b..ec069177d942 100644
--- a/arch/powerpc/boot/util.S
+++ b/arch/powerpc/boot/util.S
@@ -71,32 +71,18 @@ udelay:
add r4,r4,r5
addi r4,r4,-1
divw r4,r4,r5 /* BUS ticks */
-#ifdef CONFIG_8xx
-1: mftbu r5
- mftb r6
- mftbu r7
-#else
-1: mfspr r5, SPRN_TBRU
- mfspr r6, SPRN_TBRL
- mfspr r7, SPRN_TBRU
-#endif
+1: MFTBU(r5)
+ MFTBL(r6)
+ MFTBU(r7)
cmpw 0,r5,r7
bne 1b /* Get [synced] base time */
addc r9,r6,r4 /* Compute end time */
addze r8,r5
-#ifdef CONFIG_8xx
-2: mftbu r5
-#else
-2: mfspr r5, SPRN_TBRU
-#endif
+2: MFTBU(r5)
cmpw 0,r5,r8
blt 2b
bgt 3f
-#ifdef CONFIG_8xx
- mftb r6
-#else
- mfspr r6, SPRN_TBRL
-#endif
+ MFTBL(r6)
cmpw 0,r6,r9
blt 2b
3: blr
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig
index 3438ed99c088..e57344c3b0d7 100644
--- a/arch/powerpc/configs/40x/acadia_defconfig
+++ b/arch/powerpc/configs/40x/acadia_defconfig
@@ -64,4 +64,3 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig
index 36c44c0b560c..0f66f8a87be8 100644
--- a/arch/powerpc/configs/40x/ep405_defconfig
+++ b/arch/powerpc/configs/40x/ep405_defconfig
@@ -64,4 +64,3 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index ad2156c6e2fc..b5cc7426c21f 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -72,4 +72,3 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/40x/klondike_defconfig b/arch/powerpc/configs/40x/klondike_defconfig
index 28adb782ec51..caab658d1da1 100644
--- a/arch/powerpc/configs/40x/klondike_defconfig
+++ b/arch/powerpc/configs/40x/klondike_defconfig
@@ -26,7 +26,6 @@ CONFIG_SCSI_SAS_ATTRS=y
# CONFIG_VT is not set
# CONFIG_UNIX98_PTYS is not set
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig
index a00f434c4d47..e0b1489b7c7b 100644
--- a/arch/powerpc/configs/40x/makalu_defconfig
+++ b/arch/powerpc/configs/40x/makalu_defconfig
@@ -62,4 +62,3 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/40x/obs600_defconfig b/arch/powerpc/configs/40x/obs600_defconfig
index e500e6a12b3e..aac06d2ad01a 100644
--- a/arch/powerpc/configs/40x/obs600_defconfig
+++ b/arch/powerpc/configs/40x/obs600_defconfig
@@ -72,4 +72,3 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/40x/virtex_defconfig b/arch/powerpc/configs/40x/virtex_defconfig
index 65dc084a154c..a2b2770eee8f 100644
--- a/arch/powerpc/configs/40x/virtex_defconfig
+++ b/arch/powerpc/configs/40x/virtex_defconfig
@@ -41,9 +41,9 @@ CONFIG_NETDEVICES=y
CONFIG_SERIO_XILINX_XPS_PS2=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_UARTLITE=y
CONFIG_SERIAL_UARTLITE_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_XILINX_HWICAP=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
@@ -74,4 +74,3 @@ CONFIG_FONT_8x16=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_KERNEL=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/40x/walnut_defconfig b/arch/powerpc/configs/40x/walnut_defconfig
index 567f99bd64a3..6faa03cd661c 100644
--- a/arch/powerpc/configs/40x/walnut_defconfig
+++ b/arch/powerpc/configs/40x/walnut_defconfig
@@ -57,4 +57,3 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/44x/akebono_defconfig b/arch/powerpc/configs/44x/akebono_defconfig
index 143b2fbddb46..9fcd361607e2 100644
--- a/arch/powerpc/configs/44x/akebono_defconfig
+++ b/arch/powerpc/configs/44x/akebono_defconfig
@@ -123,7 +123,6 @@ CONFIG_NLS_DEFAULT="n"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_XMON=y
@@ -135,5 +134,4 @@ CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1_PPC=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/44x/bamboo_defconfig b/arch/powerpc/configs/44x/bamboo_defconfig
index 477d99fefd9a..6f3a6ecc81e7 100644
--- a/arch/powerpc/configs/44x/bamboo_defconfig
+++ b/arch/powerpc/configs/44x/bamboo_defconfig
@@ -55,4 +55,3 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/44x/currituck_defconfig b/arch/powerpc/configs/44x/currituck_defconfig
index 3799a26de6f4..5f1df5fe4453 100644
--- a/arch/powerpc/configs/44x/currituck_defconfig
+++ b/arch/powerpc/configs/44x/currituck_defconfig
@@ -81,7 +81,6 @@ CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NLS_DEFAULT="n"
CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_XMON=y
@@ -94,5 +93,4 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/44x/ebony_defconfig b/arch/powerpc/configs/44x/ebony_defconfig
index c265f54ab9e5..e2b6578993d5 100644
--- a/arch/powerpc/configs/44x/ebony_defconfig
+++ b/arch/powerpc/configs/44x/ebony_defconfig
@@ -59,5 +59,4 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/44x/eiger_defconfig b/arch/powerpc/configs/44x/eiger_defconfig
index bb6bd6d90821..f6dc23fef683 100644
--- a/arch/powerpc/configs/44x/eiger_defconfig
+++ b/arch/powerpc/configs/44x/eiger_defconfig
@@ -84,18 +84,14 @@ CONFIG_CRYPTO_CCM=y
CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_CTS=y
-CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_LRW=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_XTS=y
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_BLOWFISH=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/44x/fsp2_defconfig b/arch/powerpc/configs/44x/fsp2_defconfig
index e8e6a6999852..bae6b26bcfba 100644
--- a/arch/powerpc/configs/44x/fsp2_defconfig
+++ b/arch/powerpc/configs/44x/fsp2_defconfig
@@ -92,8 +92,10 @@ CONFIG_MMC_DEBUG=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_ST=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_M41T80=y
+CONFIG_RESET_CONTROLLER=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -115,7 +117,6 @@ CONFIG_PRINTK_TIME=y
CONFIG_MESSAGE_LOGLEVEL_DEFAULT=3
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_CRYPTO_CBC=y
diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig
index 060f2edddb71..4453a4590b1a 100644
--- a/arch/powerpc/configs/44x/icon_defconfig
+++ b/arch/powerpc/configs/44x/icon_defconfig
@@ -47,8 +47,6 @@ CONFIG_FUSION_LOGGING=y
CONFIG_NETDEVICES=y
CONFIG_IBM_EMAC=y
# CONFIG_WLAN is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
# CONFIG_MOUSE_PS2_ALPS is not set
# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
# CONFIG_MOUSE_PS2_SYNAPTICS is not set
@@ -94,4 +92,3 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index 115a6b2be18b..d24bfa6ecd62 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -63,7 +63,6 @@ CONFIG_TMPFS=y
CONFIG_CRAMFS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_PPC_EARLY_DEBUG=y
@@ -72,5 +71,4 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/44x/katmai_defconfig b/arch/powerpc/configs/44x/katmai_defconfig
index b999048c4ae6..5d3f685a7af8 100644
--- a/arch/powerpc/configs/44x/katmai_defconfig
+++ b/arch/powerpc/configs/44x/katmai_defconfig
@@ -60,4 +60,3 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/44x/rainier_defconfig b/arch/powerpc/configs/44x/rainier_defconfig
index b8c9ee45d0a2..7b8355a5698d 100644
--- a/arch/powerpc/configs/44x/rainier_defconfig
+++ b/arch/powerpc/configs/44x/rainier_defconfig
@@ -66,4 +66,3 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/44x/redwood_defconfig b/arch/powerpc/configs/44x/redwood_defconfig
index a4bb048448da..918cfb63f0c8 100644
--- a/arch/powerpc/configs/44x/redwood_defconfig
+++ b/arch/powerpc/configs/44x/redwood_defconfig
@@ -83,18 +83,14 @@ CONFIG_CRYPTO_CCM=y
CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_CTS=y
-CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_LRW=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_XTS=y
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
-CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_BLOWFISH=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/44x/sequoia_defconfig b/arch/powerpc/configs/44x/sequoia_defconfig
index b3792fd8111d..1e04122912f3 100644
--- a/arch/powerpc/configs/44x/sequoia_defconfig
+++ b/arch/powerpc/configs/44x/sequoia_defconfig
@@ -67,4 +67,3 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/44x/taishan_defconfig b/arch/powerpc/configs/44x/taishan_defconfig
index ff6f86241418..42cc7b4ed95f 100644
--- a/arch/powerpc/configs/44x/taishan_defconfig
+++ b/arch/powerpc/configs/44x/taishan_defconfig
@@ -61,4 +61,3 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/44x/virtex5_defconfig b/arch/powerpc/configs/44x/virtex5_defconfig
index ce052064bcbb..99cc3dc02df1 100644
--- a/arch/powerpc/configs/44x/virtex5_defconfig
+++ b/arch/powerpc/configs/44x/virtex5_defconfig
@@ -40,9 +40,9 @@ CONFIG_NETDEVICES=y
CONFIG_SERIO_XILINX_XPS_PS2=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_UARTLITE=y
CONFIG_SERIAL_UARTLITE_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_XILINX_HWICAP=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
@@ -73,4 +73,3 @@ CONFIG_FONT_8x16=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_KERNEL=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index ab932488e68b..b5c866073efd 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -97,4 +97,3 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
index c1faac800806..73948e88ac82 100644
--- a/arch/powerpc/configs/52xx/cm5200_defconfig
+++ b/arch/powerpc/configs/52xx/cm5200_defconfig
@@ -77,4 +77,3 @@ CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index 9493b02ac660..6fc7f786c83c 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -14,6 +14,7 @@ CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
CONFIG_PPC_LITE5200=y
# CONFIG_PPC_PMAC is not set
+CONFIG_GEN_RTC=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -44,7 +45,6 @@ CONFIG_SERIAL_MPC52xx=y
CONFIG_SERIAL_MPC52xx_CONSOLE=y
CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
@@ -62,4 +62,3 @@ CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index fe8126bc1655..ae2a1f74103b 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -41,16 +41,16 @@ CONFIG_ATA=y
CONFIG_PATA_MPC52xx=y
CONFIG_NETDEVICES=y
CONFIG_FEC_MPC52xx=y
-CONFIG_MARVELL_PHY=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_CICADA_PHY=y
CONFIG_DAVICOM_PHY=y
-CONFIG_QSEMI_PHY=y
+CONFIG_ICPLUS_PHY=y
CONFIG_LXT_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
+CONFIG_MARVELL_PHY=y
+CONFIG_QSEMI_PHY=y
CONFIG_SMSC_PHY=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_ICPLUS_PHY=y
-CONFIG_MDIO_BITBANG=y
+CONFIG_VITESSE_PHY=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -90,4 +90,3 @@ CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index b8b316b884aa..0777e6efd22d 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -48,7 +48,6 @@ CONFIG_PATA_PLATFORM=y
CONFIG_NETDEVICES=y
CONFIG_FEC_MPC52xx=y
CONFIG_LXT_PHY=y
-CONFIG_FIXED_PHY=y
CONFIG_SERIAL_MPC52xx=y
CONFIG_SERIAL_MPC52xx_CONSOLE=y
CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
@@ -92,4 +91,3 @@ CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/asp8347_defconfig b/arch/powerpc/configs/83xx/asp8347_defconfig
index b60cac088a7b..dd884df32dfd 100644
--- a/arch/powerpc/configs/83xx/asp8347_defconfig
+++ b/arch/powerpc/configs/83xx/asp8347_defconfig
@@ -42,7 +42,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -71,4 +70,3 @@ CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig
index 9547dcdd6489..d21b5cb365f2 100644
--- a/arch/powerpc/configs/83xx/kmeter1_defconfig
+++ b/arch/powerpc/configs/83xx/kmeter1_defconfig
@@ -55,7 +55,6 @@ CONFIG_HDLC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
index 80aa844c1428..1f69f4edf074 100644
--- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
@@ -48,8 +48,6 @@ CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_E100=y
CONFIG_CICADA_PHY=y
-CONFIG_FIXED_PHY=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -87,4 +85,3 @@ CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
index d89d13bc6901..797fc3ffddee 100644
--- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
@@ -47,7 +47,6 @@ CONFIG_MD_RAID1=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_E100=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -85,4 +84,3 @@ CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
index e789518a2881..4f914906ee4b 100644
--- a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
@@ -14,7 +14,6 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC832x_MDS=y
-CONFIG_QUICC_ENGINE=y
CONFIG_MATH_EMULATION=y
CONFIG_PCI=y
CONFIG_NET=y
@@ -36,7 +35,6 @@ CONFIG_SCSI=y
CONFIG_NETDEVICES=y
CONFIG_UCC_GETH=y
CONFIG_DAVICOM_PHY=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -50,6 +48,7 @@ CONFIG_I2C_MPC=y
CONFIG_WATCHDOG=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1374=y
+CONFIG_QUICC_ENGINE=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
@@ -59,4 +58,3 @@ CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
index 917a49ca2bd1..a484eb8401e8 100644
--- a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
@@ -14,7 +14,7 @@ CONFIG_LDM_PARTITION=y
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC832x_RDB=y
-CONFIG_QUICC_ENGINE=y
+CONFIG_GEN_RTC=y
CONFIG_MATH_EMULATION=y
CONFIG_PCI=y
CONFIG_NET=y
@@ -38,7 +38,6 @@ CONFIG_NETDEVICES=y
CONFIG_UCC_GETH=y
CONFIG_E1000=y
CONFIG_ICPLUS_PHY=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -46,7 +45,6 @@ CONFIG_ICPLUS_PHY=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
-CONFIG_GEN_RTC=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
@@ -62,6 +60,7 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
CONFIG_MMC_SPI=y
+CONFIG_QUICC_ENGINE=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
@@ -78,4 +77,3 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
index 00f636e95cc8..37f4d93b3f81 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
@@ -49,7 +49,6 @@ CONFIG_MD_RAID1=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_CICADA_PHY=y
-CONFIG_FIXED_PHY=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -84,4 +83,3 @@ CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
index a539d44d1dba..7adb6708a761 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
@@ -75,4 +75,3 @@ CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
index 9f0ddc830c82..d7ce3551529d 100644
--- a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
@@ -35,7 +35,6 @@ CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_E100=y
CONFIG_MARVELL_PHY=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -58,4 +57,3 @@ CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
index ceed4c1f0ab5..92134cee3f37 100644
--- a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
@@ -14,7 +14,6 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC836x_MDS=y
-CONFIG_QUICC_ENGINE=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -41,7 +40,6 @@ CONFIG_SCSI=y
CONFIG_NETDEVICES=y
CONFIG_UCC_GETH=y
CONFIG_MARVELL_PHY=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -55,6 +53,7 @@ CONFIG_I2C_MPC=y
CONFIG_WATCHDOG=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1374=y
+CONFIG_QUICC_ENGINE=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
@@ -64,4 +63,3 @@ CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
index a6819bf3ef5e..97f7ea5f205f 100644
--- a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
@@ -12,7 +12,6 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC836x_RDK=y
-CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_PCI=y
CONFIG_NET=y
@@ -39,11 +38,9 @@ CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_NETDEVICES=y
CONFIG_UCC_GETH=y
CONFIG_BROADCOM_PHY=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_QE=y
@@ -63,6 +60,7 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_QUICC_ENGINE=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
@@ -72,4 +70,3 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_PPC_EARLY_DEBUG=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
index 4bd1992e4d98..ee7510a33d06 100644
--- a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
@@ -11,6 +11,7 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC837x_MDS=y
+CONFIG_GEN_RTC=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -35,7 +36,6 @@ CONFIG_SATA_FSL=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_MARVELL_PHY=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -43,7 +43,6 @@ CONFIG_MARVELL_PHY=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
@@ -58,4 +57,3 @@ CONFIG_ROOT_NFS=y
CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
index 2d4bb63882b8..8966a9af4230 100644
--- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
@@ -11,6 +11,7 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_MPC837x_RDB=y
+CONFIG_GEN_RTC=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -41,9 +42,7 @@ CONFIG_MD_RAID456=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_MARVELL_PHY=y
-CONFIG_FIXED_PHY=y
CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -51,7 +50,6 @@ CONFIG_INPUT_FF_MEMLESS=m
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
@@ -86,4 +84,3 @@ CONFIG_CRC_T10DIF=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/83xx/sbc834x_defconfig b/arch/powerpc/configs/83xx/sbc834x_defconfig
index b3380dbd1925..7d74699334da 100644
--- a/arch/powerpc/configs/83xx/sbc834x_defconfig
+++ b/arch/powerpc/configs/83xx/sbc834x_defconfig
@@ -11,6 +11,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
CONFIG_SBC834x=y
+CONFIG_GEN_RTC=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -41,7 +42,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_BROADCOM_PHY=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -52,7 +52,6 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
@@ -72,5 +71,4 @@ CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
index a917f7afb4f9..dd98f43b2fb8 100644
--- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -22,7 +22,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_GE_IMP3A=y
-CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_CPM2=y
CONFIG_HIGHMEM=y
@@ -161,6 +160,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_RX8581=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
+CONFIG_QUICC_ENGINE=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -233,5 +233,4 @@ CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/85xx/ksi8560_defconfig b/arch/powerpc/configs/85xx/ksi8560_defconfig
index bd814dfb0bbd..9ce6f48cfb61 100644
--- a/arch/powerpc/configs/85xx/ksi8560_defconfig
+++ b/arch/powerpc/configs/85xx/ksi8560_defconfig
@@ -8,6 +8,7 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_MSDOS_PARTITION is not set
CONFIG_KSI8560=y
CONFIG_CPM2=y
+CONFIG_GEN_RTC=y
CONFIG_HIGHMEM=y
CONFIG_BINFMT_MISC=y
CONFIG_MATH_EMULATION=y
@@ -39,14 +40,12 @@ CONFIG_FS_ENET=y
CONFIG_FS_ENET_MDIO_FCC=y
CONFIG_GIANFAR=y
CONFIG_MARVELL_PHY=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
-CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
@@ -57,4 +56,3 @@ CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
index 32af10def641..5fbc3f904046 100644
--- a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
@@ -9,6 +9,7 @@ CONFIG_EXPERT=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_MSDOS_PARTITION is not set
CONFIG_MPC8540_ADS=y
+CONFIG_GEN_RTC=y
CONFIG_BINFMT_MISC=y
CONFIG_MATH_EMULATION=y
# CONFIG_SECCOMP is not set
@@ -30,7 +31,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -38,7 +38,6 @@ CONFIG_GIANFAR=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
@@ -47,4 +46,3 @@ CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
index a52b2170ee33..ff981d7905c7 100644
--- a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
@@ -7,6 +7,7 @@ CONFIG_EXPERT=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_MSDOS_PARTITION is not set
CONFIG_MPC8560_ADS=y
+CONFIG_GEN_RTC=y
CONFIG_BINFMT_MISC=y
CONFIG_MATH_EMULATION=y
# CONFIG_SECCOMP is not set
@@ -32,16 +33,14 @@ CONFIG_FS_ENET=y
# CONFIG_FS_ENET_HAS_SCC is not set
CONFIG_GIANFAR=y
CONFIG_E1000=y
-CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
-# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_MARVELL_PHY=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
-CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
@@ -50,4 +49,3 @@ CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
index 002bb48abaa3..974f0706d777 100644
--- a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
+++ b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
@@ -9,6 +9,7 @@ CONFIG_EXPERT=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_MSDOS_PARTITION is not set
CONFIG_MPC85xx_CDS=y
+CONFIG_GEN_RTC=y
CONFIG_BINFMT_MISC=y
CONFIG_MATH_EMULATION=y
# CONFIG_SECCOMP is not set
@@ -35,7 +36,6 @@ CONFIG_BLK_DEV_VIA82CXXX=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_E1000=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -43,7 +43,6 @@ CONFIG_E1000=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
@@ -52,4 +51,3 @@ CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/sbc8548_defconfig b/arch/powerpc/configs/85xx/sbc8548_defconfig
index 97ae02377cf3..7e3e84a842e4 100644
--- a/arch/powerpc/configs/85xx/sbc8548_defconfig
+++ b/arch/powerpc/configs/85xx/sbc8548_defconfig
@@ -6,6 +6,7 @@ CONFIG_EXPERT=y
CONFIG_SLAB=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_SBC8548=y
+CONFIG_GEN_RTC=y
CONFIG_BINFMT_MISC=y
CONFIG_MATH_EMULATION=y
# CONFIG_SECCOMP is not set
@@ -36,7 +37,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_BROADCOM_PHY=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -44,10 +44,8 @@ CONFIG_BROADCOM_PHY=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
# CONFIG_USB_SUPPORT is not set
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/socrates_defconfig b/arch/powerpc/configs/85xx/socrates_defconfig
index 13579cb30539..6106fadbbd8b 100644
--- a/arch/powerpc/configs/85xx/socrates_defconfig
+++ b/arch/powerpc/configs/85xx/socrates_defconfig
@@ -42,8 +42,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_MARVELL_PHY=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=800
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -86,4 +84,3 @@ CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_FONTS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/stx_gp3_defconfig b/arch/powerpc/configs/85xx/stx_gp3_defconfig
index 384926f3ce1d..5b9cc01b9098 100644
--- a/arch/powerpc/configs/85xx/stx_gp3_defconfig
+++ b/arch/powerpc/configs/85xx/stx_gp3_defconfig
@@ -39,8 +39,6 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_MARVELL_PHY=y
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1280
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=1024
CONFIG_INPUT_JOYDEV=m
CONFIG_INPUT_EVDEV=m
# CONFIG_VT is not set
@@ -68,4 +66,3 @@ CONFIG_CRC_T10DIF=m
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_BDI_SWITCH=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8540_defconfig b/arch/powerpc/configs/85xx/tqm8540_defconfig
index 908f3885f4a5..98982a0e82d8 100644
--- a/arch/powerpc/configs/85xx/tqm8540_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8540_defconfig
@@ -9,6 +9,7 @@ CONFIG_EXPERT=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_MSDOS_PARTITION is not set
CONFIG_TQM8540=y
+CONFIG_GEN_RTC=y
CONFIG_MATH_EMULATION=y
CONFIG_PCI=y
CONFIG_NET=y
@@ -35,14 +36,12 @@ CONFIG_BLK_DEV_VIA82CXXX=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_E100=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_GEN_RTC=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
@@ -56,4 +55,3 @@ CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8541_defconfig b/arch/powerpc/configs/85xx/tqm8541_defconfig
index f47e57610b7c..a6e21db1dafe 100644
--- a/arch/powerpc/configs/85xx/tqm8541_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8541_defconfig
@@ -9,6 +9,7 @@ CONFIG_EXPERT=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_MSDOS_PARTITION is not set
CONFIG_TQM8541=y
+CONFIG_GEN_RTC=y
CONFIG_MATH_EMULATION=y
CONFIG_PCI=y
CONFIG_NET=y
@@ -35,7 +36,6 @@ CONFIG_BLK_DEV_VIA82CXXX=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_E100=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -44,7 +44,6 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
-CONFIG_GEN_RTC=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
@@ -58,4 +57,3 @@ CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8548_defconfig b/arch/powerpc/configs/85xx/tqm8548_defconfig
index 42f5d0a7698e..2697e4e8a761 100644
--- a/arch/powerpc/configs/85xx/tqm8548_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8548_defconfig
@@ -43,7 +43,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -66,4 +65,3 @@ CONFIG_ROOT_NFS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_MUTEXES=y
# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8555_defconfig b/arch/powerpc/configs/85xx/tqm8555_defconfig
index 71552b7929cd..ca1de3979474 100644
--- a/arch/powerpc/configs/85xx/tqm8555_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8555_defconfig
@@ -9,6 +9,7 @@ CONFIG_EXPERT=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_MSDOS_PARTITION is not set
CONFIG_TQM8555=y
+CONFIG_GEN_RTC=y
CONFIG_MATH_EMULATION=y
CONFIG_PCI=y
CONFIG_NET=y
@@ -35,7 +36,6 @@ CONFIG_BLK_DEV_VIA82CXXX=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_E100=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -44,7 +44,6 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
-CONFIG_GEN_RTC=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
@@ -58,4 +57,3 @@ CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8560_defconfig b/arch/powerpc/configs/85xx/tqm8560_defconfig
index 25aac973d6d7..ca3b8c8ef30f 100644
--- a/arch/powerpc/configs/85xx/tqm8560_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8560_defconfig
@@ -9,6 +9,7 @@ CONFIG_EXPERT=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_MSDOS_PARTITION is not set
CONFIG_TQM8560=y
+CONFIG_GEN_RTC=y
CONFIG_MATH_EMULATION=y
CONFIG_PCI=y
CONFIG_NET=y
@@ -35,7 +36,6 @@ CONFIG_BLK_DEV_VIA82CXXX=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_E100=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -44,7 +44,6 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
-CONFIG_GEN_RTC=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
@@ -58,4 +57,3 @@ CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
index 72900b84d3e0..6531139a8a8d 100644
--- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
+++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
@@ -54,7 +54,6 @@ CONFIG_IP_PIMSM_V2=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-CONFIG_IPV6=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_REDBOOT_PARTS=y
@@ -86,7 +85,6 @@ CONFIG_DUMMY=y
CONFIG_GIANFAR=y
CONFIG_E1000=y
CONFIG_BROADCOM_PHY=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_SERIO_LIBPS2=y
@@ -107,8 +105,8 @@ CONFIG_SENSORS_LM90=y
CONFIG_WATCHDOG=y
CONFIG_USB=y
CONFIG_USB_MON=y
-CONFIG_USB_ISP1760=y
CONFIG_USB_STORAGE=y
+CONFIG_USB_ISP1760=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_PCA955X=y
@@ -143,4 +141,3 @@ CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index 6a3f825452e9..935ea3ade7de 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -12,6 +12,7 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_PPC_ADDER875=y
CONFIG_8xx_COPYBACK=y
+CONFIG_GEN_RTC=y
CONFIG_HZ_1000=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
@@ -41,7 +42,6 @@ CONFIG_DAVICOM_PHY=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
-CONFIG_GEN_RTC=y
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
# CONFIG_USB_SUPPORT is not set
diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig
index 8d3e3c41258d..12f397d403c6 100644
--- a/arch/powerpc/configs/amigaone_defconfig
+++ b/arch/powerpc/configs/amigaone_defconfig
@@ -45,7 +45,6 @@ CONFIG_PARPORT_PC_FIFO=y
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
@@ -120,5 +119,4 @@ CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/be.config b/arch/powerpc/configs/be.config
new file mode 100644
index 000000000000..c5cdc99a6530
--- /dev/null
+++ b/arch/powerpc/configs/be.config
@@ -0,0 +1 @@
+CONFIG_CPU_BIG_ENDIAN=y
diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig
index 7c9d95370150..f1552af9eecc 100644
--- a/arch/powerpc/configs/c2k_defconfig
+++ b/arch/powerpc/configs/c2k_defconfig
@@ -27,6 +27,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
CONFIG_CPU_FREQ_GOV_ONDEMAND=m
+CONFIG_GEN_RTC=y
CONFIG_HIGHMEM=y
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BINFMT_MISC=y
@@ -197,7 +198,6 @@ CONFIG_TUN=m
# CONFIG_ATM_DRIVERS is not set
CONFIG_MV643XX_ETH=y
CONFIG_VITESSE_PHY=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -209,7 +209,6 @@ CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_MPSC=y
CONFIG_SERIAL_MPSC_CONSOLE=y
CONFIG_NVRAM=m
-CONFIG_GEN_RTC=m
CONFIG_RAW_DRIVER=y
CONFIG_MAX_RAW_DEVS=8192
CONFIG_I2C=m
@@ -390,7 +389,6 @@ CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_DISABLE=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA1=y
@@ -402,4 +400,3 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index aa564599e368..560a93a84efe 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -4,7 +4,6 @@ CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
@@ -34,10 +33,10 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_GEN_RTC=y
CONFIG_BINFMT_MISC=m
CONFIG_IRQ_ALL_CPUS=y
CONFIG_NUMA=y
-CONFIG_MEMORY_HOTREMOVE=y
CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_PCIEPORTBUS=y
@@ -53,7 +52,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=y
CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_BEET is not set
-CONFIG_IPV6=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
@@ -141,7 +139,6 @@ CONFIG_SKY2=m
CONFIG_GELIC_NET=m
CONFIG_GELIC_WIRELESS=y
CONFIG_SPIDER_NET=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO_I8042 is not set
@@ -149,8 +146,6 @@ CONFIG_SPIDER_NET=y
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_TXX9_NR_UARTS=2
-CONFIG_SERIAL_TXX9_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_HVC_RTAS=y
CONFIG_IPMI_HANDLER=m
@@ -159,7 +154,6 @@ CONFIG_IPMI_SI=m
CONFIG_IPMI_WATCHDOG=m
CONFIG_IPMI_POWEROFF=m
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
CONFIG_I2C=y
CONFIG_WATCHDOG=y
# CONFIG_VGA_CONSOLE is not set
@@ -207,7 +201,6 @@ CONFIG_NLS_ISO8859_13=m
CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index 1f6f90cd8aff..a203b1cf67d3 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -16,6 +16,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
# CONFIG_PPC_PMAC is not set
+CONFIG_GEN_RTC=y
CONFIG_HIGHMEM=y
CONFIG_BINFMT_MISC=y
CONFIG_IRQ_ALL_CPUS=y
@@ -79,7 +80,6 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_NVRAM=y
-CONFIG_GEN_RTC=y
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
@@ -124,5 +124,4 @@ CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/ep8248e_defconfig b/arch/powerpc/configs/ep8248e_defconfig
index 3403b85f9d81..2e6c8a45ae88 100644
--- a/arch/powerpc/configs/ep8248e_defconfig
+++ b/arch/powerpc/configs/ep8248e_defconfig
@@ -70,5 +70,4 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index 95411aeeeb8d..7cb590e8f8fd 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -14,6 +14,7 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_PPC_EP88XC=y
CONFIG_8xx_COPYBACK=y
+CONFIG_GEN_RTC=y
CONFIG_HZ_100=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
@@ -45,7 +46,6 @@ CONFIG_LXT_PHY=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
-CONFIG_GEN_RTC=y
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_DNOTIFY is not set
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index e18f2e06553f..e084fa548d73 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -4,7 +4,6 @@ CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
@@ -25,6 +24,7 @@ CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_PMAC64=y
+CONFIG_GEN_RTC=y
CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_PCI_MSI=y
@@ -115,7 +115,6 @@ CONFIG_USB_USBNET=m
# CONFIG_USB_NET_NET1080 is not set
# CONFIG_USB_NET_CDC_SUBSET is not set
# CONFIG_USB_NET_ZAURUS is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_JOYDEV=m
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
@@ -123,7 +122,6 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
CONFIG_RAW_DRIVER=y
CONFIG_I2C_CHARDEV=y
CONFIG_AGP=m
@@ -213,20 +211,20 @@ CONFIG_USB_SERIAL_CYBERJACK=m
CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_APPLEDISPLAY=m
-CONFIG_FS_DAX=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=y
CONFIG_REISERFS_FS_XATTR=y
CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_POSIX_ACL=y
+CONFIG_FS_DAX=y
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -254,14 +252,12 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
CONFIG_CRC_T10DIF=y
-CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_LATENCYTOP=y
CONFIG_BOOTX_TEXT=y
CONFIG_PPC_EARLY_DEBUG=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
@@ -276,5 +272,4 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
index c0eec4a5df4e..79bbc8238b32 100644
--- a/arch/powerpc/configs/gamecube_defconfig
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -45,7 +45,6 @@ CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_NETDEVICES=y
# CONFIG_WLAN is not set
CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_JOYDEV=y
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
@@ -54,7 +53,6 @@ CONFIG_INPUT_JOYSTICK=y
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
CONFIG_LEGACY_PTY_COUNT=64
-# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_FB=y
diff --git a/arch/powerpc/configs/holly_defconfig b/arch/powerpc/configs/holly_defconfig
index e56e80090529..71d8d2430b6c 100644
--- a/arch/powerpc/configs/holly_defconfig
+++ b/arch/powerpc/configs/holly_defconfig
@@ -11,6 +11,7 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_PMAC is not set
CONFIG_EMBEDDED6xx=y
CONFIG_PPC_HOLLY=y
+CONFIG_GEN_RTC=y
CONFIG_BINFMT_MISC=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,115200"
@@ -37,7 +38,6 @@ CONFIG_NETDEVICES=y
CONFIG_VORTEX=y
CONFIG_TSI108_ETH=y
CONFIG_PHYLIB=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -49,7 +49,6 @@ CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig
index b413c19d7031..477794c41d50 100644
--- a/arch/powerpc/configs/linkstation_defconfig
+++ b/arch/powerpc/configs/linkstation_defconfig
@@ -26,7 +26,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CT_PROTO_SCTP=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -79,7 +78,6 @@ CONFIG_NET_TULIP=y
CONFIG_TULIP=y
CONFIG_TULIP_MMIO=y
CONFIG_R8169=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -142,4 +140,3 @@ CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index c4018179e219..078cdb427fc9 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -3,7 +3,6 @@ CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
@@ -24,6 +23,7 @@ CONFIG_MAC_PARTITION=y
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_MAPLE=y
CONFIG_UDBG_RTAS_CONSOLE=y
+CONFIG_GEN_RTC=y
CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_PCI_MSI=y
@@ -53,9 +53,6 @@ CONFIG_AMD8111_ETH=y
CONFIG_TIGON3=y
CONFIG_E1000=y
CONFIG_USB_PEGASUS=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=1600
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=1200
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -63,7 +60,6 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HVC_RTAS=y
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_AMD8111=y
@@ -100,8 +96,8 @@ CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
CONFIG_USB_SERIAL_TI=m
CONFIG_EXT2_FS=y
-CONFIG_FS_DAX=y
CONFIG_EXT4_FS=y
+CONFIG_FS_DAX=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
@@ -127,5 +123,4 @@ CONFIG_BOOTX_TEXT=y
CONFIG_PPC_EARLY_DEBUG=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig
index 197acaa026eb..5d5f08e5b8d9 100644
--- a/arch/powerpc/configs/mgcoge_defconfig
+++ b/arch/powerpc/configs/mgcoge_defconfig
@@ -46,7 +46,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_NETDEVICES=y
CONFIG_FS_ENET=y
CONFIG_FS_ENET_MDIO_FCC=y
-CONFIG_FIXED_PHY=y
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -83,5 +82,4 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_BDI_SWITCH=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig
index 0b4854cf26cb..10be5773ad5d 100644
--- a/arch/powerpc/configs/mpc512x_defconfig
+++ b/arch/powerpc/configs/mpc512x_defconfig
@@ -12,6 +12,7 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_PPC_CHRP is not set
CONFIG_PPC_MPC512x=y
+CONFIG_MPC512x_LPBFIFO=y
CONFIG_MPC5121_ADS=y
CONFIG_MPC512x_GENERIC=y
CONFIG_PDM360NG=y
@@ -61,25 +62,22 @@ CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
CONFIG_NETDEVICES=y
CONFIG_FS_ENET=y
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_QSEMI_PHY=y
-CONFIG_LXT_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_SMSC_PHY=y
+CONFIG_MDIO_BITBANG=y
CONFIG_BROADCOM_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_DAVICOM_PHY=y
CONFIG_ICPLUS_PHY=y
-CONFIG_REALTEK_PHY=y
+CONFIG_LSI_ET1011C_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_MARVELL_PHY=y
CONFIG_NATIONAL_PHY=y
+CONFIG_QSEMI_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_SMSC_PHY=y
CONFIG_STE10XP=y
-CONFIG_LSI_ET1011C_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_MDIO_BITBANG=y
+CONFIG_VITESSE_PHY=y
# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_MPC52xx=y
CONFIG_SERIAL_MPC52xx_CONSOLE=y
CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
@@ -111,10 +109,9 @@ CONFIG_RTC_DRV_M41T80=y
CONFIG_RTC_DRV_MPC5121=y
CONFIG_DMADEVICES=y
CONFIG_MPC512X_DMA=y
-CONFIG_MPC512x_LPBFIFO=y
-CONFIG_FS_DAX=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
+CONFIG_FS_DAX=y
# CONFIG_DNOTIFY is not set
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
@@ -126,5 +123,4 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 88336d0df0d6..7a2b2aa37def 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -50,7 +50,6 @@ CONFIG_NETDEVICES=y
CONFIG_FEC_MPC52xx=y
CONFIG_AMD_PHY=y
CONFIG_LXT_PHY=y
-CONFIG_FIXED_PHY=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -71,12 +70,10 @@ CONFIG_SENSORS_LM87=m
CONFIG_WATCHDOG=y
CONFIG_MFD_SM501=m
CONFIG_DRM=y
-CONFIG_FB=y
CONFIG_FB_FOREIGN_ENDIAN=y
CONFIG_FB_RADEON=y
CONFIG_FB_SM501=m
# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
@@ -130,4 +127,3 @@ CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/mpc7448_hpc2_defconfig b/arch/powerpc/configs/mpc7448_hpc2_defconfig
index d933326b4cf9..4b14c02b437c 100644
--- a/arch/powerpc/configs/mpc7448_hpc2_defconfig
+++ b/arch/powerpc/configs/mpc7448_hpc2_defconfig
@@ -11,6 +11,7 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_PMAC is not set
CONFIG_EMBEDDED6xx=y
CONFIG_MPC7448HPC2=y
+CONFIG_GEN_RTC=y
CONFIG_BINFMT_MISC=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
@@ -38,7 +39,6 @@ CONFIG_8139TOO=y
# CONFIG_8139TOO_PIO is not set
CONFIG_TSI108_ETH=y
CONFIG_PHYLIB=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -46,7 +46,6 @@ CONFIG_PHYLIB=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
@@ -54,4 +53,3 @@ CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_CRC_T10DIF=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/mpc8272_ads_defconfig b/arch/powerpc/configs/mpc8272_ads_defconfig
index 4cb0f617c0d6..b1e88b64536b 100644
--- a/arch/powerpc/configs/mpc8272_ads_defconfig
+++ b/arch/powerpc/configs/mpc8272_ads_defconfig
@@ -77,5 +77,4 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 6574477fd726..d1b82035d35f 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -21,7 +21,6 @@ CONFIG_MPC837x_MDS=y
CONFIG_MPC837x_RDB=y
CONFIG_SBC834x=y
CONFIG_ASP834x=y
-CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_MATH_EMULATION=y
CONFIG_PCI=y
@@ -60,13 +59,11 @@ CONFIG_SATA_SIL=y
CONFIG_NETDEVICES=y
CONFIG_UCC_GETH=y
CONFIG_GIANFAR=y
-CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
-CONFIG_VITESSE_PHY=y
CONFIG_ICPLUS_PHY=y
-CONFIG_FIXED_PHY=y
+CONFIG_MARVELL_PHY=y
+CONFIG_VITESSE_PHY=y
CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -99,6 +96,7 @@ CONFIG_USB_EHCI_FSL=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_RTC_DRV_DS1374=y
+CONFIG_QUICC_ENGINE=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_PROC_KCORE=y
@@ -109,7 +107,5 @@ CONFIG_ROOT_NFS=y
CONFIG_CRC_T10DIF=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index 998454471a48..f1f176c29fa3 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -14,6 +14,7 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_MPC86XADS=y
CONFIG_8xx_COPYBACK=y
CONFIG_8xx_CPU6=y
+CONFIG_GEN_RTC=y
CONFIG_HZ_1000=y
CONFIG_MATH_EMULATION=y
# CONFIG_SECCOMP is not set
@@ -28,12 +29,10 @@ CONFIG_SYN_COOKIES=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_NETDEVICES=y
CONFIG_FS_ENET=y
-CONFIG_FIXED_PHY=y
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
-CONFIG_GEN_RTC=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT4_FS=y
@@ -43,4 +42,3 @@ CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_CRC_CCITT=y
CONFIG_CRC32_SLICEBY4=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/mpc86xx_basic_defconfig b/arch/powerpc/configs/mpc86xx_basic_defconfig
index 3283f0586e11..67bd1fa036ee 100644
--- a/arch/powerpc/configs/mpc86xx_basic_defconfig
+++ b/arch/powerpc/configs/mpc86xx_basic_defconfig
@@ -1,11 +1,11 @@
-CONFIG_HIGHMEM=y
-CONFIG_KEXEC=y
CONFIG_PPC_86xx=y
-CONFIG_PROC_KCORE=y
+CONFIG_MPC8641_HPCN=y
+CONFIG_SBC8641D=y
+CONFIG_MPC8610_HPCD=y
CONFIG_GEF_PPC9A=y
CONFIG_GEF_SBC310=y
CONFIG_GEF_SBC610=y
-CONFIG_MPC8610_HPCD=y
-CONFIG_MPC8641_HPCN=y
-CONFIG_SBC8641D=y
CONFIG_MVME7100=y
+CONFIG_HIGHMEM=y
+CONFIG_KEXEC=y
+CONFIG_PROC_KCORE=y
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 91f53f1bec5d..ec3fcc2bf737 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -13,6 +13,7 @@ CONFIG_EXPERT=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_8xx_COPYBACK=y
+CONFIG_GEN_RTC=y
CONFIG_HZ_100=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
@@ -51,7 +52,6 @@ CONFIG_DAVICOM_PHY=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
-CONFIG_GEN_RTC=y
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_DNOTIFY is not set
diff --git a/arch/powerpc/configs/mvme5100_defconfig b/arch/powerpc/configs/mvme5100_defconfig
index 139add95a16a..63e38c7220f1 100644
--- a/arch/powerpc/configs/mvme5100_defconfig
+++ b/arch/powerpc/configs/mvme5100_defconfig
@@ -35,7 +35,6 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CT_PROTO_SCTP=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -70,7 +69,6 @@ CONFIG_TUN=m
# CONFIG_NET_VENDOR_3COM is not set
CONFIG_E100=y
# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -130,4 +128,3 @@ CONFIG_CRYPTO_DES=y
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index fe43ff47bd2f..8cf4a46bef86 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -3,7 +3,6 @@ CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
@@ -145,6 +144,7 @@ CONFIG_EDAC=y
CONFIG_EDAC_PASEMI=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
+CONFIG_RAS=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -167,7 +167,6 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
@@ -175,7 +174,5 @@ CONFIG_DETECT_HUNG_TASK=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_CRYPTO_MD4=y
-CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_BLOWFISH=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index fc1e7a7388b8..8e798b1fbc99 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -21,6 +21,7 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_PMAC=y
CONFIG_PPC601_SYNC_FIX=y
+CONFIG_GEN_RTC=y
CONFIG_HIGHMEM=y
CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y
@@ -179,10 +180,10 @@ CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=m
CONFIG_USB_USBNET=m
# CONFIG_USB_NET_CDC_SUBSET is not set
-CONFIG_PRISM54=m
CONFIG_B43=m
CONFIG_B43LEGACY=m
CONFIG_P54_COMMON=m
+CONFIG_PRISM54=m
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_MOUSE_PS2 is not set
@@ -193,7 +194,6 @@ CONFIG_SERIAL_8250=m
CONFIG_SERIAL_PMACZILOG=m
CONFIG_SERIAL_PMACZILOG_TTYS=y
CONFIG_NVRAM=y
-CONFIG_GEN_RTC=y
CONFIG_I2C_CHARDEV=m
CONFIG_APM_POWER=y
CONFIG_BATTERY_PMU=y
@@ -201,8 +201,9 @@ CONFIG_HWMON=m
CONFIG_AGP=m
CONFIG_AGP_UNINORTH=m
CONFIG_DRM=m
-CONFIG_DRM_R128=m
CONFIG_DRM_RADEON=m
+CONFIG_DRM_LEGACY=y
+CONFIG_DRM_R128=m
CONFIG_FB=y
CONFIG_FB_OF=y
CONFIG_FB_CONTROL=y
@@ -300,8 +301,6 @@ CONFIG_NFSD_V4=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
CONFIG_CRC_T10DIF=y
-CONFIG_LIBCRC32C=m
-CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
@@ -310,7 +309,6 @@ CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_BOOTX_TEXT=y
CONFIG_PPC_EARLY_DEBUG=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_SHA512=m
@@ -325,4 +323,3 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 0695ce047d56..caee834760d2 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -1,10 +1,8 @@
CONFIG_PPC64=y
-CONFIG_SMP=y
CONFIG_NR_CPUS=2048
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
@@ -26,8 +24,8 @@ CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_BPF=y
CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_BPF=y
CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_BPF_SYSCALL=y
@@ -62,7 +60,6 @@ CONFIG_PPC_64K_PAGES=y
CONFIG_PPC_SUBPAGE_PROT=y
CONFIG_SCHED_SMT=y
CONFIG_PM=y
-CONFIG_PCI_MSI=y
CONFIG_HOTPLUG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -158,7 +155,6 @@ CONFIG_NETCONSOLE=y
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
-CONFIG_VHOST_NET=m
CONFIG_VORTEX=m
CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
@@ -184,16 +180,13 @@ CONFIG_PPP_DEFLATE=m
CONFIG_PPPOE=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_MISC=y
# CONFIG_SERIO_SERPORT is not set
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_JSM=m
CONFIG_VIRTIO_CONSOLE=m
-CONFIG_POWERNV_OP_PANEL=m
CONFIG_IPMI_HANDLER=y
CONFIG_IPMI_DEVICE_INTERFACE=y
CONFIG_IPMI_POWERNV=y
@@ -293,11 +286,15 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_LATENCYTOP=y
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FUNCTION_GRAPH_TRACER=y
CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENT=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
CONFIG_MSI_BITMAP_SELFTEST=y
@@ -309,6 +306,7 @@ CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_CRC32C_VPMSUM=m
CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA1_PPC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
@@ -317,14 +315,13 @@ CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
-CONFIG_CRYPTO_SHA1_PPC=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_DEV_NX=y
CONFIG_CRYPTO_DEV_VMX=y
-CONFIG_CRYPTO_DEV_VMX_ENCRYPT=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
+CONFIG_VHOST_NET=m
diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig
index 370c0bbcff71..10fb1df63b46 100644
--- a/arch/powerpc/configs/ppc40x_defconfig
+++ b/arch/powerpc/configs/ppc40x_defconfig
@@ -51,9 +51,9 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_UARTLITE=y
CONFIG_SERIAL_UARTLITE_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_XILINX_HWICAP=m
CONFIG_I2C=m
@@ -85,4 +85,3 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index 2766e8f590bc..66dd6bf45cde 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -68,9 +68,9 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_UARTLITE=y
CONFIG_SERIAL_UARTLITE_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_XILINX_HWICAP=m
CONFIG_I2C=m
@@ -94,7 +94,6 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=m
-CONFIG_LOGFS=m
CONFIG_CRAMFS=y
CONFIG_SQUASHFS=m
CONFIG_SQUASHFS_XATTR=y
@@ -108,6 +107,5 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_VIRTUALIZATION=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 5175028c56ce..791db775a09c 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -1,8 +1,6 @@
CONFIG_PPC64=y
-CONFIG_SMP=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -28,9 +26,10 @@ CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_PPC_SPLPAR=y
+CONFIG_DTL=y
CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
-CONFIG_DTL=y
+CONFIG_IBMEBUS=y
CONFIG_PPC_MAPLE=y
CONFIG_PPC_PASEMI=y
CONFIG_PPC_PASEMI_IOMMU=y
@@ -41,9 +40,8 @@ CONFIG_PS3_FLASH=m
CONFIG_PS3_LPM=m
CONFIG_PPC_IBM_CELL_BLADE=y
CONFIG_RTAS_FLASH=m
-CONFIG_IBMEBUS=y
-CONFIG_CPU_FREQ_PMAC64=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_PMAC64=y
CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=m
CONFIG_PPC_TRANSACTIONAL_MEM=y
@@ -51,14 +49,15 @@ CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
CONFIG_CRASH_DUMP=y
CONFIG_IRQ_ALL_CPUS=y
-CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
-CONFIG_PCCARD=y
-CONFIG_ELECTRA_CF=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_RPA=m
CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
+CONFIG_PCCARD=y
+CONFIG_ELECTRA_CF=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -154,7 +153,6 @@ CONFIG_DUMMY=m
CONFIG_NETCONSOLE=y
CONFIG_TUN=m
CONFIG_VIRTIO_NET=m
-CONFIG_VHOST_NET=m
CONFIG_VORTEX=m
CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
@@ -181,15 +179,14 @@ CONFIG_SUNGEM=y
CONFIG_GELIC_NET=m
CONFIG_GELIC_WIRELESS=y
CONFIG_SPIDER_NET=m
-CONFIG_MARVELL_PHY=y
CONFIG_BROADCOM_PHY=m
+CONFIG_MARVELL_PHY=y
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
CONFIG_PPPOE=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PCSPKR=m
@@ -197,7 +194,6 @@ CONFIG_INPUT_PCSPKR=m
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_ICOM=m
-CONFIG_SERIAL_TXX9_CONSOLE=y
CONFIG_SERIAL_JSM=m
CONFIG_HVC_CONSOLE=y
CONFIG_HVC_RTAS=y
@@ -250,6 +246,9 @@ CONFIG_USB_EHCI_HCD=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=m
CONFIG_USB_APPLEDISPLAY=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_POWERNV=m
CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_MAD=m
CONFIG_INFINIBAND_USER_ACCESS=m
@@ -267,7 +266,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
-CONFIG_FS_DAX=y
+CONFIG_RAS=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -287,6 +286,7 @@ CONFIG_XFS_POSIX_ACL=y
CONFIG_BTRFS_FS=m
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
+CONFIG_FS_DAX=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_OVERLAY_FS=m
@@ -324,12 +324,15 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_LATENCYTOP=y
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FUNCTION_GRAPH_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENT=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
CONFIG_MSI_BITMAP_SELFTEST=y
@@ -342,6 +345,7 @@ CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_CRC32C_VPMSUM=m
CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA1_PPC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
@@ -350,19 +354,14 @@ CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
-CONFIG_CRYPTO_SHA1_PPC=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_NX=y
CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
CONFIG_CRYPTO_DEV_VMX=y
-CONFIG_CRYPTO_DEV_VMX_ENCRYPT=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=m
-CONFIG_LEDS_POWERNV=m
+CONFIG_VHOST_NET=m
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 6340e6c53c54..d0fe0f8f77c2 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -3,7 +3,6 @@ CONFIG_PPC_BOOK3E_64=y
CONFIG_SMP=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
@@ -30,8 +29,8 @@ CONFIG_BINFMT_MISC=m
CONFIG_IRQ_ALL_CPUS=y
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_PCI_MSI=y
-CONFIG_PCCARD=y
CONFIG_HOTPLUG_PCI=y
+CONFIG_PCCARD=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -108,15 +107,14 @@ CONFIG_E100=y
CONFIG_E1000=y
CONFIG_IXGB=m
CONFIG_SUNGEM=y
-CONFIG_MARVELL_PHY=y
CONFIG_BROADCOM_PHY=m
+CONFIG_MARVELL_PHY=y
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
CONFIG_PPPOE=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_MISC=y
# CONFIG_SERIO_SERPORT is not set
@@ -172,10 +170,8 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_MTHCA=m
CONFIG_INFINIBAND_IPOIB=m
CONFIG_INFINIBAND_ISER=m
-CONFIG_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
-CONFIG_FS_DAX=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -192,6 +188,7 @@ CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_POSIX_ACL=y
+CONFIG_FS_DAX=y
CONFIG_AUTOFS4_FS=m
CONFIG_ISO9660_FS=y
CONFIG_UDF_FS=m
@@ -251,5 +248,4 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 18d0d60dadbf..ae6eba482d75 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -11,10 +11,10 @@ CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_CGROUPS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
@@ -61,7 +61,7 @@ CONFIG_SBC8641D=y
CONFIG_MPC8610_HPCD=y
CONFIG_GEF_SBC610=y
CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_STAT=m
+CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
@@ -70,7 +70,6 @@ CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
CONFIG_CPU_FREQ_PMAC=y
CONFIG_TAU=y
CONFIG_TAU_AVERAGE=y
-CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_MCU_MPC8349EMITX=y
CONFIG_HIGHMEM=y
@@ -141,7 +140,6 @@ CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -187,7 +185,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -195,7 +192,6 @@ CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -334,9 +330,7 @@ CONFIG_BT_BNEP_MC_FILTER=y
CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=m
CONFIG_BT_HCIUART=m
-CONFIG_BT_HCIUART_H4=y
CONFIG_BT_HCIUART_BCSP=y
-CONFIG_BT_HCIUART_LL=y
CONFIG_BT_HCIBCM203X=m
CONFIG_BT_HCIBPA10X=m
CONFIG_BT_HCIBFUSB=m
@@ -370,7 +364,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_CDROM_PKTCDVD=m
CONFIG_VIRTIO_BLK=m
-CONFIG_BLK_DEV_HD=y
CONFIG_ENCLOSURE_SERVICES=m
CONFIG_SENSORS_TSL2550=m
CONFIG_EEPROM_AT24=m
@@ -548,16 +541,16 @@ CONFIG_PCMCIA_XIRC2PS=m
CONFIG_FDDI=y
CONFIG_SKFP=m
CONFIG_NET_SB1000=m
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
CONFIG_BROADCOM_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
CONFIG_ICPLUS_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
CONFIG_REALTEK_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
CONFIG_PLIP=m
CONFIG_PPP_DEFLATE=m
CONFIG_PPP_FILTER=y
@@ -585,7 +578,6 @@ CONFIG_USB_ALI_M5632=y
CONFIG_USB_AN2720=y
CONFIG_USB_EPSON2888=y
CONFIG_USB_KC2190=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_JOYDEV=m
CONFIG_INPUT_EVDEV=y
CONFIG_MOUSE_SERIAL=m
@@ -647,7 +639,6 @@ CONFIG_SYNCLINKMP=m
CONFIG_SYNCLINK_GT=m
CONFIG_NOZOMI=m
CONFIG_N_HDLC=m
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_CS=m
@@ -657,13 +648,13 @@ CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_UARTLITE=m
CONFIG_SERIAL_PMACZILOG=m
CONFIG_SERIAL_MPC52xx=y
CONFIG_SERIAL_MPC52xx_CONSOLE=y
CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
CONFIG_SERIAL_JSM=m
-CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_PRINTER=m
CONFIG_LP_CONSOLE=y
CONFIG_PPDEV=m
@@ -748,9 +739,10 @@ CONFIG_MFD_SM501_GPIO=y
CONFIG_AGP=y
CONFIG_AGP_UNINORTH=y
CONFIG_DRM=m
+CONFIG_DRM_RADEON=m
+CONFIG_DRM_LEGACY=y
CONFIG_DRM_TDFX=m
CONFIG_DRM_R128=m
-CONFIG_DRM_RADEON=m
CONFIG_DRM_MGA=m
CONFIG_DRM_SIS=m
CONFIG_DRM_VIA=m
@@ -899,7 +891,7 @@ CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=m
-CONFIG_USB_EHCI_FSL=y
+CONFIG_USB_EHCI_FSL=m
CONFIG_USB_OHCI_HCD=m
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
@@ -967,7 +959,6 @@ CONFIG_USB_ADUTUX=m
CONFIG_USB_SEVSEG=m
CONFIG_USB_LEGOTOWER=m
CONFIG_USB_LCD=m
-CONFIG_USB_LED=m
CONFIG_USB_IDMOUSE=m
CONFIG_USB_FTDI_ELAN=m
CONFIG_USB_APPLEDISPLAY=m
@@ -1020,15 +1011,14 @@ CONFIG_UIO_CIF=m
CONFIG_UIO_PDRV_GENIRQ=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
-CONFIG_FS_DAX=y
+CONFIG_QUICC_ENGINE=y
CONFIG_EXT2_FS=m
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT4_FS=m
+CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_FS=y
CONFIG_JBD2_DEBUG=y
CONFIG_REISERFS_FS=m
CONFIG_REISERFS_PROC_INFO=y
@@ -1042,6 +1032,7 @@ CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_GFS2_FS=m
+CONFIG_FS_DAX=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
@@ -1146,7 +1137,6 @@ CONFIG_DEBUG_VM=y
CONFIG_DEBUG_HIGHMEM=y
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DEBUG_SHIRQ=y
-CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
@@ -1173,7 +1163,6 @@ CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -1201,7 +1190,6 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_HIFN_795X=m
CONFIG_CRYPTO_DEV_HIFN_795X_RNG=y
CONFIG_CRYPTO_DEV_TALITOS=m
diff --git a/arch/powerpc/configs/pq2fads_defconfig b/arch/powerpc/configs/pq2fads_defconfig
index 50b2bad51d0a..0ededa8c837d 100644
--- a/arch/powerpc/configs/pq2fads_defconfig
+++ b/arch/powerpc/configs/pq2fads_defconfig
@@ -79,4 +79,3 @@ CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index ee0ec5a682fc..2efa025bf483 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -5,7 +5,6 @@ CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -94,7 +93,6 @@ CONFIG_USB_USBNET=m
# CONFIG_USB_NET_CDC_SUBSET is not set
# CONFIG_USB_NET_ZAURUS is not set
CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_JOYDEV=m
CONFIG_INPUT_EVDEV=m
# CONFIG_INPUT_KEYBOARD is not set
@@ -161,7 +159,6 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_CCITT=m
CONFIG_CRC_T10DIF=y
CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_STACKOVERFLOW=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 1a61aa20dfba..3d935969e5a2 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -1,11 +1,8 @@
CONFIG_PPC64=y
-CONFIG_SMP=y
CONFIG_NR_CPUS=2048
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
-CONFIG_AUDITSYSCALL=y
CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -18,17 +15,16 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_LOG_CPU_MAX_BUF_SHIFT=13
CONFIG_NUMA_BALANCING=y
-CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_CGROUP_SCHED=y
CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_DEVICE=y
CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_BPF=y
-CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_CGROUP_PERF=y
-CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_BPF_SYSCALL=y
@@ -43,12 +39,12 @@ CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_PPC_SPLPAR=y
+CONFIG_DTL=y
CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
-CONFIG_DTL=y
+CONFIG_IBMEBUS=y
# CONFIG_PPC_PMAC is not set
CONFIG_RTAS_FLASH=m
-CONFIG_IBMEBUS=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=m
@@ -155,7 +151,6 @@ CONFIG_NETCONSOLE=y
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
-CONFIG_VHOST_NET=m
CONFIG_VORTEX=m
CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
@@ -183,12 +178,10 @@ CONFIG_PPP_DEFLATE=m
CONFIG_PPPOE=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PCSPKR=m
# CONFIG_SERIO_SERPORT is not set
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_ICOM=m
@@ -198,8 +191,6 @@ CONFIG_HVC_RTAS=y
CONFIG_HVCS=m
CONFIG_VIRTIO_CONSOLE=m
CONFIG_IBM_BSR=m
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_GENERIC=y
CONFIG_RAW_DRIVER=y
CONFIG_MAX_RAW_DEVS=1024
CONFIG_FB=y
@@ -227,6 +218,9 @@ CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_POWERNV=m
CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_MAD=m
CONFIG_INFINIBAND_USER_ACCESS=m
@@ -238,9 +232,10 @@ CONFIG_INFINIBAND_IPOIB=m
CONFIG_INFINIBAND_IPOIB_CM=y
CONFIG_INFINIBAND_SRP=m
CONFIG_INFINIBAND_ISER=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=y
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
-CONFIG_FS_DAX=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -256,6 +251,7 @@ CONFIG_XFS_POSIX_ACL=y
CONFIG_BTRFS_FS=m
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
+CONFIG_FS_DAX=y
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=m
CONFIG_OVERLAY_FS=m
@@ -291,11 +287,14 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_LOCKUP_DETECTOR=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_LATENCYTOP=y
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FUNCTION_GRAPH_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENT=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
CONFIG_MSI_BITMAP_SELFTEST=y
@@ -306,6 +305,7 @@ CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_CRC32C_VPMSUM=m
CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA1_PPC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
@@ -314,19 +314,14 @@ CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
-CONFIG_CRYPTO_SHA1_PPC=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_NX=y
CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
CONFIG_CRYPTO_DEV_VMX=y
-CONFIG_CRYPTO_DEV_VMX_ENCRYPT=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=m
-CONFIG_LEDS_POWERNV=m
+CONFIG_VHOST_NET=m
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index 78fddf24b5d3..cd72193fac0a 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -18,6 +18,7 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_TQM8XX=y
CONFIG_8xx_COPYBACK=y
# CONFIG_8xx_CPU15 is not set
+CONFIG_GEN_RTC=y
CONFIG_HZ_100=y
# CONFIG_SECCOMP is not set
CONFIG_NET=y
@@ -44,7 +45,6 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_NETDEVICES=y
CONFIG_FS_ENET=y
CONFIG_DAVICOM_PHY=y
-CONFIG_FIXED_PHY=y
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -53,7 +53,6 @@ CONFIG_FIXED_PHY=y
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
CONFIG_HW_RANDOM=y
-CONFIG_GEN_RTC=y
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_DNOTIFY is not set
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index dcdd51b57783..aef41b17a8bc 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -55,9 +55,6 @@ CONFIG_B43_SDIO=y
# CONFIG_B43_PHY_LP is not set
CONFIG_B43_DEBUG=y
CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
-CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
CONFIG_INPUT_JOYDEV=y
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
@@ -68,7 +65,6 @@ CONFIG_INPUT_UINPUT=y
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
CONFIG_LEGACY_PTY_COUNT=64
-# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
CONFIG_NVRAM=y
CONFIG_I2C=y
@@ -119,5 +115,4 @@ CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_DMA_API_DEBUG=y
CONFIG_PPC_EARLY_DEBUG=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 5c4fbc80dc6c..2542ea15d338 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -8,3 +8,4 @@ generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += rwsem.h
generic-y += vtime.h
+generic-y += msi.h
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index cee3aa087653..7f2a7702596c 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -25,7 +25,7 @@
#define PPC_LCMPI stringify_in_c(cmpdi)
#define PPC_LCMPLI stringify_in_c(cmpldi)
#define PPC_LCMP stringify_in_c(cmpd)
-#define PPC_LONG stringify_in_c(.llong)
+#define PPC_LONG stringify_in_c(.8byte)
#define PPC_LONG_ALIGN stringify_in_c(.balign 8)
#define PPC_TLNEI stringify_in_c(tdnei)
#define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh)
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 25d42bd3f114..9c601adfc500 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -74,13 +74,6 @@ do { \
___p1; \
})
-/*
- * This must resolve to hwsync on SMP for the context switch path.
- * See _switch, and core scheduler context switch memory ordering
- * comments.
- */
-#define smp_mb__before_spinlock() smp_mb()
-
#include <asm-generic/barrier.h>
#endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 7fb755880409..4d453f979553 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -294,13 +294,11 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
-extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
- pmd_t **pmdp);
-
int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
/* Generic accessors to PTE bits */
static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
+static inline int pte_read(pte_t pte) { return 1; }
static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 0ce513f2926f..f88452019114 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -40,7 +40,7 @@
* Define the address range of the kernel non-linear virtual area
*/
#define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
-#define H_KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
+#define H_KERN_VIRT_SIZE ASM_CONST(0x0000400000000000) /* 64T */
/*
* The vmalloc space starts at the beginning of that region, and
@@ -48,9 +48,11 @@
* (we keep a quarter for the virtual memmap)
*/
#define H_VMALLOC_START H_KERN_VIRT_START
-#define H_VMALLOC_SIZE (H_KERN_VIRT_SIZE >> 1)
+#define H_VMALLOC_SIZE ASM_CONST(0x380000000000) /* 56T */
#define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE)
+#define H_KERN_IO_START H_VMALLOC_END
+
/*
* Region IDs
*/
@@ -91,6 +93,7 @@ static inline int hash__pgd_bad(pgd_t pgd)
}
#ifdef CONFIG_STRICT_KERNEL_RWX
extern void hash__mark_rodata_ro(void);
+extern void hash__mark_initmem_nx(void);
#endif
extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index 5c28bd6f2ae1..2d1ca488ca44 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -54,9 +54,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
static inline bool gigantic_page_supported(void)
{
- if (radix_enabled())
- return true;
- return false;
+ return true;
}
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 6981a52b3887..f28d21c69f79 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -468,7 +468,7 @@ extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
int psize, int ssize);
int htab_remove_mapping(unsigned long vstart, unsigned long vend,
int psize, int ssize);
-extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
+extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
#ifdef CONFIG_PPC_PSERIES
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 77529a3e3811..c3b00e8ff791 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -59,13 +59,14 @@ extern struct patb_entry *partition_tb;
#define PRTS_MASK 0x1f /* process table size field */
#define PRTB_MASK 0x0ffffffffffff000UL
-/*
- * Limit process table to PAGE_SIZE table. This
- * also limit the max pid we can support.
- * MAX_USER_CONTEXT * 16 bytes of space.
- */
-#define PRTB_SIZE_SHIFT (CONTEXT_BITS + 4)
-#define PRTB_ENTRIES (1ul << CONTEXT_BITS)
+/* Number of supported PID bits */
+extern unsigned int mmu_pid_bits;
+
+/* Base PID to allocate from */
+extern unsigned int mmu_base_pid;
+
+#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
+#define PRTB_ENTRIES (1ul << mmu_pid_bits)
/*
* Power9 currently only support 64K partition table size.
@@ -82,6 +83,9 @@ typedef struct {
mm_context_id_t id;
u16 user_psize; /* page size index */
+ /* Number of bits in the mm_cpumask */
+ atomic_t active_cpus;
+
/* NPU NMMU context */
struct npu_context *npu_context;
@@ -96,11 +100,6 @@ typedef struct {
#ifdef CONFIG_PPC_SUBPAGE_PROT
struct subpage_prot_table spt;
#endif /* CONFIG_PPC_SUBPAGE_PROT */
-#ifdef CONFIG_PPC_ICSWX
- struct spinlock *cop_lockp; /* guard acop and cop_pid */
- unsigned long acop; /* mask of enabled coprocessor types */
- unsigned int cop_pid; /* pid value used with coprocessors */
-#endif /* CONFIG_PPC_ICSWX */
#ifdef CONFIG_PPC_64K_PAGES
/* for 4K PTE fragment support */
void *pte_frag;
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index e2329db9d6f4..1fcfa425cefa 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -41,8 +41,6 @@ extern struct kmem_cache *pgtable_cache[];
pgtable_cache[(shift) - 1]; \
})
-#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
-
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
extern void pte_fragment_free(unsigned long *, int);
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index c0737c86a362..b9aff515b4de 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -272,8 +272,10 @@ extern unsigned long __vmalloc_end;
extern unsigned long __kernel_virt_start;
extern unsigned long __kernel_virt_size;
+extern unsigned long __kernel_io_start;
#define KERN_VIRT_START __kernel_virt_start
#define KERN_VIRT_SIZE __kernel_virt_size
+#define KERN_IO_START __kernel_io_start
extern struct page *vmemmap;
extern unsigned long ioremap_bot;
extern unsigned long pci_io_base;
@@ -298,7 +300,6 @@ extern unsigned long pci_io_base;
* PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
* IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
*/
-#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
#define FULL_IO_SIZE 0x80000000ul
#define ISA_IO_BASE (KERN_IO_START)
#define ISA_IO_END (KERN_IO_START + 0x10000ul)
@@ -409,6 +410,11 @@ static inline int pte_write(pte_t pte)
return __pte_write(pte) || pte_savedwrite(pte);
}
+static inline int pte_read(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_READ));
+}
+
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
@@ -608,9 +614,17 @@ static inline pte_t pte_mkdevmap(pte_t pte)
return __pte(pte_val(pte) | _PAGE_SPECIAL|_PAGE_DEVMAP);
}
+/*
+ * This is potentially called with a pmd as the argument, in which case it's not
+ * safe to check _PAGE_DEVMAP unless we also confirm that _PAGE_PTE is set.
+ * That's because the bit we use for _PAGE_DEVMAP is not reserved for software
+ * use in page directory entries (ie. non-ptes).
+ */
static inline int pte_devmap(pte_t pte)
{
- return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DEVMAP));
+ u64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE);
+
+ return (pte_raw(pte) & mask) == mask;
}
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
@@ -1159,6 +1173,7 @@ static inline bool arch_needs_pgtable_deposit(void)
return false;
return true;
}
+extern void serialize_against_pte_lookup(struct mm_struct *mm);
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
@@ -1192,5 +1207,6 @@ static inline const int pud_pfn(pud_t pud)
BUILD_BUG();
return 0;
}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 487709ff6875..1e5ba94e62ef 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -110,6 +110,8 @@
*/
#define RADIX_VMEMMAP_BASE (RADIX_VMALLOC_END)
+#define RADIX_KERN_IO_START (RADIX_KERN_VIRT_START + (RADIX_KERN_VIRT_SIZE >> 1))
+
#ifndef __ASSEMBLY__
#define RADIX_PTE_TABLE_SIZE (sizeof(pte_t) << RADIX_PTE_INDEX_SIZE)
#define RADIX_PMD_TABLE_SIZE (sizeof(pmd_t) << RADIX_PMD_INDEX_SIZE)
@@ -118,6 +120,7 @@
#ifdef CONFIG_STRICT_KERNEL_RWX
extern void radix__mark_rodata_ro(void);
+extern void radix__mark_initmem_nx(void);
#endif
static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index cc7fbde4f53c..9b433a624bf3 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -22,22 +22,21 @@ extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end
extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
int psize);
extern void radix__tlb_flush(struct mmu_gather *tlb);
#ifdef CONFIG_SMP
extern void radix__flush_tlb_mm(struct mm_struct *mm);
extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
int psize);
#else
#define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm)
#define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr)
#define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p)
-#define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr)
#endif
+extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
+extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
unsigned long page_size);
extern void radix__flush_tlb_lpid(unsigned long lpid);
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index 0151af6c2a50..7ee763d3bea9 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -18,7 +18,7 @@
#include <asm/asm-offsets.h>
#ifdef CONFIG_DEBUG_BUGVERBOSE
.macro EMIT_BUG_ENTRY addr,file,line,flags
- .section __bug_table,"a"
+ .section __bug_table,"aw"
5001: PPC_LONG \addr, 5002f
.short \line, \flags
.org 5001b+BUG_ENTRY_SIZE
@@ -29,7 +29,7 @@
.endm
#else
.macro EMIT_BUG_ENTRY addr,file,line,flags
- .section __bug_table,"a"
+ .section __bug_table,"aw"
5001: PPC_LONG \addr
.short \flags
.org 5001b+BUG_ENTRY_SIZE
@@ -42,14 +42,14 @@
sizeof(struct bug_entry), respectively */
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define _EMIT_BUG_ENTRY \
- ".section __bug_table,\"a\"\n" \
+ ".section __bug_table,\"aw\"\n" \
"2:\t" PPC_LONG "1b, %0\n" \
"\t.short %1, %2\n" \
".org 2b+%3\n" \
".previous\n"
#else
#define _EMIT_BUG_ENTRY \
- ".section __bug_table,\"a\"\n" \
+ ".section __bug_table,\"aw\"\n" \
"2:\t" PPC_LONG "1b\n" \
"\t.short %2\n" \
".org 2b+%3\n" \
@@ -133,6 +133,7 @@ extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
extern void bad_page_fault(struct pt_regs *, unsigned long, int);
extern void _exception(int, struct pt_regs *, int, unsigned long);
extern void die(const char *, struct pt_regs *, long);
+extern bool die_will_crash(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 5a90292afbad..d122f7f957ce 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -5,7 +5,7 @@
/* bytes per L1 cache line */
-#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
+#if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX)
#define L1_CACHE_SHIFT 4
#define MAX_COPY_PREFETCH 1
#elif defined(CONFIG_PPC_E500MC)
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index 52586f9956bb..eb43b5c3a7b5 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -67,6 +67,17 @@
#define ERR_DEEP_STATE_ESL_MISMATCH -2
#ifndef __ASSEMBLY__
+/* Additional SPRs that need to be saved/restored during stop */
+struct stop_sprs {
+ u64 pid;
+ u64 ldbar;
+ u64 fscr;
+ u64 hfscr;
+ u64 mmcr1;
+ u64 mmcr2;
+ u64 mmcra;
+};
+
extern u32 pnv_fastsleep_workaround_at_entry[];
extern u32 pnv_fastsleep_workaround_at_exit[];
@@ -90,20 +101,4 @@ static inline void report_invalid_psscr_val(u64 psscr_val, int err)
#endif
-/* Idle state entry routines */
-#ifdef CONFIG_PPC_P7_NAP
-#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \
- /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
- std r0,0(r1); \
- ptesync; \
- ld r0,0(r1); \
-236: cmpd cr0,r0,r0; \
- bne 236b; \
- IDLE_INST; \
-
-#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
- IDLE_STATE_ENTER_SEQ(IDLE_INST) \
- b .
-#endif /* CONFIG_PPC_P7_NAP */
-
#endif
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index d02ad93bf708..a9bf921f4efc 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -513,7 +513,7 @@ enum {
#else
CPU_FTRS_GENERIC_32 |
#endif
-#ifdef CONFIG_8xx
+#ifdef CONFIG_PPC_8xx
CPU_FTRS_8XX |
#endif
#ifdef CONFIG_40x
@@ -565,7 +565,7 @@ enum {
#else
CPU_FTRS_GENERIC_32 &
#endif
-#ifdef CONFIG_8xx
+#ifdef CONFIG_PPC_8xx
CPU_FTRS_8XX &
#endif
#ifdef CONFIG_40x
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 8e37b71674f4..9847ae3a12d1 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -131,7 +131,6 @@ static inline bool eeh_pe_passed(struct eeh_pe *pe)
struct eeh_dev {
int mode; /* EEH mode */
int class_code; /* Class code of the device */
- int config_addr; /* Config address */
int pe_config_addr; /* PE config address */
u32 config_space[16]; /* Saved PCI config space */
int pcix_cap; /* Saved PCIx capability */
@@ -141,7 +140,6 @@ struct eeh_dev {
struct eeh_pe *pe; /* Associated PE */
struct list_head list; /* Form link list in the PE */
struct list_head rmv_list; /* Record the removed edevs */
- struct pci_controller *phb; /* Associated PHB */
struct pci_dn *pdn; /* Associated PCI device node */
struct pci_dev *pdev; /* Associated PCI device */
bool in_error; /* Error flag for edev */
@@ -262,7 +260,8 @@ typedef void *(*eeh_traverse_func)(void *data, void *flag);
void eeh_set_pe_aux_size(int size);
int eeh_phb_pe_create(struct pci_controller *phb);
struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb);
-struct eeh_pe *eeh_pe_get(struct eeh_dev *edev);
+struct eeh_pe *eeh_pe_get(struct pci_controller *phb,
+ int pe_no, int config_addr);
int eeh_add_to_parent_pe(struct eeh_dev *edev);
int eeh_rmv_from_parent_pe(struct eeh_dev *edev);
void eeh_pe_update_time_stamp(struct eeh_pe *pe);
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index ce88bbe1d809..5a23010af600 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -209,11 +209,13 @@ extern int early_init_dt_scan_fw_dump(unsigned long node,
extern int fadump_reserve_mem(void);
extern int setup_fadump(void);
extern int is_fadump_active(void);
+extern int should_fadump_crash(void);
extern void crash_fadump(struct pt_regs *, const char *);
extern void fadump_cleanup(void);
#else /* CONFIG_FA_DUMP */
static inline int is_fadump_active(void) { return 0; }
+static inline int should_fadump_crash(void) { return 0; }
static inline void crash_fadump(struct pt_regs *regs, const char *str) { }
#endif
#endif
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 2de2319b99e2..8f88f771cc55 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -19,11 +19,11 @@
*/
#if defined(CONFIG_PPC64) && !defined(__powerpc64__)
/* 64 bits kernel, 32 bits code (ie. vdso32) */
-#define FTR_ENTRY_LONG .llong
+#define FTR_ENTRY_LONG .8byte
#define FTR_ENTRY_OFFSET .long 0xffffffff; .long
#elif defined(CONFIG_PPC64)
-#define FTR_ENTRY_LONG .llong
-#define FTR_ENTRY_OFFSET .llong
+#define FTR_ENTRY_LONG .8byte
+#define FTR_ENTRY_OFFSET .8byte
#else
#define FTR_ENTRY_LONG .long
#define FTR_ENTRY_OFFSET .long
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 4508b322f2cd..6c40dfda5912 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -17,6 +17,7 @@
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
@@ -62,9 +63,6 @@ enum fixed_addresses {
__end_of_fixed_addresses
};
-extern void __set_fixmap (enum fixed_addresses idx,
- phys_addr_t phys, pgprot_t flags);
-
#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
@@ -72,5 +70,11 @@ extern void __set_fixmap (enum fixed_addresses idx,
#include <asm-generic/fixmap.h>
+static inline void __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags)
+{
+ map_kernel_page(fix_to_virt(idx), phys, pgprot_val(flags));
+}
+
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/fs_pd.h b/arch/powerpc/include/asm/fs_pd.h
index f79d6c74eb2a..8def56ec05c6 100644
--- a/arch/powerpc/include/asm/fs_pd.h
+++ b/arch/powerpc/include/asm/fs_pd.h
@@ -26,7 +26,7 @@
#define cpm2_unmap(addr) do {} while(0)
#endif
-#ifdef CONFIG_8xx
+#ifdef CONFIG_PPC_8xx
#include <asm/8xx_immap.h>
extern immap_t __iomem *mpc8xx_immr;
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index eaada6c92344..719ed9b61ea7 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -29,18 +29,10 @@
: "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
: "cr0", "memory")
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -66,17 +58,9 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 8add8b861e8d..c97603d617e3 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -12,6 +12,10 @@ typedef struct {
unsigned int mce_exceptions;
unsigned int spurious_irqs;
unsigned int hmi_exceptions;
+ unsigned int sreset_irqs;
+#ifdef CONFIG_PPC_WATCHDOG
+ unsigned int soft_nmi_irqs;
+#endif
#ifdef CONFIG_PPC_DOORBELL
unsigned int doorbell_irqs;
#endif
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 7f4025a6c69e..b8a0fb442c64 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -218,18 +218,4 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
}
#endif /* CONFIG_HUGETLB_PAGE */
-/*
- * FSL Book3E platforms require special gpage handling - the gpages
- * are reserved early in the boot process by memblock instead of via
- * the .dts as on IBM platforms.
- */
-#if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \
- defined(CONFIG_PPC_8xx))
-extern void __init reserve_hugetlb_gpages(void);
-#else
-static inline void reserve_hugetlb_gpages(void)
-{
-}
-#endif
-
#endif /* _ASM_POWERPC_HUGETLB_H */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 57d38b504ff7..3d34dc0869f6 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -280,7 +280,18 @@
#define H_RESIZE_HPT_COMMIT 0x370
#define H_REGISTER_PROC_TBL 0x37C
#define H_SIGNAL_SYS_RESET 0x380
-#define MAX_HCALL_OPCODE H_SIGNAL_SYS_RESET
+#define H_INT_GET_SOURCE_INFO 0x3A8
+#define H_INT_SET_SOURCE_CONFIG 0x3AC
+#define H_INT_GET_SOURCE_CONFIG 0x3B0
+#define H_INT_GET_QUEUE_INFO 0x3B4
+#define H_INT_SET_QUEUE_CONFIG 0x3B8
+#define H_INT_GET_QUEUE_CONFIG 0x3BC
+#define H_INT_SET_OS_REPORTING_LINE 0x3C0
+#define H_INT_GET_OS_REPORTING_LINE 0x3C4
+#define H_INT_ESB 0x3C8
+#define H_INT_SYNC 0x3CC
+#define H_INT_RESET 0x3D0
+#define MAX_HCALL_OPCODE H_INT_RESET
/* H_VIOCTL functions */
#define H_GET_VIOA_DUMP_SIZE 0x01
diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h
index 27e588f6c72e..6a2c87577541 100644
--- a/arch/powerpc/include/asm/icswx.h
+++ b/arch/powerpc/include/asm/icswx.h
@@ -69,7 +69,10 @@ struct coprocessor_completion_block {
#define CSB_CC_WR_PROTECTION (16)
#define CSB_CC_UNKNOWN_CODE (17)
#define CSB_CC_ABORT (18)
+#define CSB_CC_EXCEED_BYTE_COUNT (19) /* P9 or later */
#define CSB_CC_TRANSPORT (20)
+#define CSB_CC_INVALID_CRB (21) /* P9 or later */
+#define CSB_CC_INVALID_DDE (30) /* P9 or later */
#define CSB_CC_SEGMENTED_DDL (31)
#define CSB_CC_PROGRESS_POINT (32)
#define CSB_CC_DDE_OVERFLOW (33)
diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h
new file mode 100644
index 000000000000..7f74c282710f
--- /dev/null
+++ b/arch/powerpc/include/asm/imc-pmu.h
@@ -0,0 +1,128 @@
+#ifndef __ASM_POWERPC_IMC_PMU_H
+#define __ASM_POWERPC_IMC_PMU_H
+
+/*
+ * IMC Nest Performance Monitor counter support.
+ *
+ * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation.
+ * (C) 2017 Anju T Sudhakar, IBM Corporation.
+ * (C) 2017 Hemant K Shaw, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or later version.
+ */
+
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <asm/opal.h>
+
+/*
+ * For static allocation of some of the structures.
+ */
+#define IMC_MAX_PMUS 32
+
+/*
+ * Compatibility macros for IMC devices
+ */
+#define IMC_DTB_COMPAT "ibm,opal-in-memory-counters"
+#define IMC_DTB_UNIT_COMPAT "ibm,imc-counters"
+
+
+/*
+ * LDBAR: Counter address and Enable/Disable macro.
+ * perf/imc-pmu.c has the LDBAR layout information.
+ */
+#define THREAD_IMC_LDBAR_MASK 0x0003ffffffffe000ULL
+#define THREAD_IMC_ENABLE 0x8000000000000000ULL
+
+/*
+ * Structure to hold memory address information for imc units.
+ */
+struct imc_mem_info {
+ u64 *vbase;
+ u32 id;
+};
+
+/*
+ * Place holder for nest pmu events and values.
+ */
+struct imc_events {
+ u32 value;
+ char *name;
+ char *unit;
+ char *scale;
+};
+
+/* Event attribute array index */
+#define IMC_FORMAT_ATTR 0
+#define IMC_EVENT_ATTR 1
+#define IMC_CPUMASK_ATTR 2
+#define IMC_NULL_ATTR 3
+
+/* PMU Format attribute macros */
+#define IMC_EVENT_OFFSET_MASK 0xffffffffULL
+
+/*
+ * Device tree parser code detects IMC pmu support and
+ * registers new IMC pmus. This structure will hold the
+ * pmu functions, events, counter memory information
+ * and attrs for each imc pmu and will be referenced at
+ * the time of pmu registration.
+ */
+struct imc_pmu {
+ struct pmu pmu;
+ struct imc_mem_info *mem_info;
+ struct imc_events **events;
+ /*
+ * Attribute groups for the PMU. Slot 0 used for
+ * format attribute, slot 1 used for cpusmask attribute,
+ * slot 2 used for event attribute. Slot 3 keep as
+ * NULL.
+ */
+ const struct attribute_group *attr_groups[4];
+ u32 counter_mem_size;
+ int domain;
+ /*
+ * flag to notify whether the memory is mmaped
+ * or allocated by kernel.
+ */
+ bool imc_counter_mmaped;
+};
+
+/*
+ * Structure to hold id, lock and reference count for the imc events which
+ * are inited.
+ */
+struct imc_pmu_ref {
+ struct mutex lock;
+ unsigned int id;
+ int refc;
+};
+
+/*
+ * In-Memory Collection Counters type.
+ * Data comes from Device tree.
+ * Three device type are supported.
+ */
+
+enum {
+ IMC_TYPE_THREAD = 0x1,
+ IMC_TYPE_CORE = 0x4,
+ IMC_TYPE_CHIP = 0x10,
+};
+
+/*
+ * Domains for IMC PMUs
+ */
+#define IMC_DOMAIN_NEST 1
+#define IMC_DOMAIN_CORE 2
+#define IMC_DOMAIN_THREAD 3
+
+extern int init_imc_pmu(struct device_node *parent,
+ struct imc_pmu *pmu_ptr, int pmu_id);
+extern void thread_imc_disable(void);
+#endif /* __ASM_POWERPC_IMC_PMU_H */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 7cea76f11c26..83596f32f50b 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -104,6 +104,10 @@ struct kvmppc_host_state {
u8 napping;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /*
+ * hwthread_req/hwthread_state pair is used to pull sibling threads
+ * out of guest on pre-ISAv3.0B CPUs where threads share MMU.
+ */
u8 hwthread_req;
u8 hwthread_state;
u8 host_ipi;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 8b3f1238d07f..e372ed871c51 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -67,11 +67,6 @@ extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
- unsigned long address)
-{
-}
-
#define HPTEG_CACHE_NUM (1 << 15)
#define HPTEG_HASH_BITS_PTE 13
#define HPTEG_HASH_BITS_PTE_LONG 12
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index cd2fc1cc1cc7..73b92017b6d7 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -76,7 +76,6 @@ struct machdep_calls {
void __noreturn (*restart)(char *cmd);
void __noreturn (*halt)(void);
- void (*panic)(char *str);
void (*cpu_die)(void);
long (*time_init)(void); /* Optional, may be NULL */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index da7e9432fa8f..309592589e30 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -45,7 +45,7 @@ extern void set_context(unsigned long id, pgd_t *pgd);
#ifdef CONFIG_PPC_BOOK3S_64
extern void radix__switch_mmu_context(struct mm_struct *prev,
- struct mm_struct *next);
+ struct mm_struct *next);
static inline void switch_mmu_context(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
@@ -67,54 +67,18 @@ extern void __destroy_context(unsigned long context_id);
extern void mmu_context_init(void);
#endif
+#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
+extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
+#else
+static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
+#endif
+
extern void switch_cop(struct mm_struct *next);
extern int use_cop(unsigned long acop, struct mm_struct *mm);
extern void drop_cop(unsigned long acop, struct mm_struct *mm);
-/*
- * switch_mm is the entry point called from the architecture independent
- * code in kernel/sched/core.c
- */
-static inline void switch_mm_irqs_off(struct mm_struct *prev,
- struct mm_struct *next,
- struct task_struct *tsk)
-{
- /* Mark this context has been used on the new CPU */
- if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
-
- /* 32-bit keeps track of the current PGDIR in the thread struct */
-#ifdef CONFIG_PPC32
- tsk->thread.pgdir = next->pgd;
-#endif /* CONFIG_PPC32 */
-
- /* 64-bit Book3E keeps track of current PGD in the PACA */
-#ifdef CONFIG_PPC_BOOK3E_64
- get_paca()->pgd = next->pgd;
-#endif
- /* Nothing else to do if we aren't actually switching */
- if (prev == next)
- return;
-
-#ifdef CONFIG_PPC_ICSWX
- /* Switch coprocessor context only if prev or next uses a coprocessor */
- if (prev->context.acop || next->context.acop)
- switch_cop(next);
-#endif /* CONFIG_PPC_ICSWX */
-
- /* We must stop all altivec streams before changing the HW
- * context
- */
-#ifdef CONFIG_ALTIVEC
- if (cpu_has_feature(CPU_FTR_ALTIVEC))
- asm volatile ("dssall");
-#endif /* CONFIG_ALTIVEC */
- /*
- * The actual HW switching method differs between the various
- * sub architectures. Out of line for now
- */
- switch_mmu_context(prev, next, tsk);
-}
+extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk);
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
@@ -136,11 +100,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
- unsigned long flags;
-
- local_irq_save(flags);
switch_mm(prev, next, current);
- local_irq_restore(flags);
}
/* We don't currently use enter_lazy_tlb() for anything */
diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h
index 6f8e79cd35d8..3760150a0ff0 100644
--- a/arch/powerpc/include/asm/nmi.h
+++ b/arch/powerpc/include/asm/nmi.h
@@ -1,9 +1,8 @@
#ifndef _ASM_NMI_H
#define _ASM_NMI_H
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_PPC_WATCHDOG
extern void arch_touch_nmi_watchdog(void);
-
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
bool exclude_self);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 91314268f04f..185c6a47f9ba 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -121,7 +121,7 @@ extern int icache_44x_need_flush;
#include <asm/nohash/pte-book3e.h>
#elif defined(CONFIG_FSL_BOOKE)
#include <asm/nohash/32/pte-fsl-booke.h>
-#elif defined(CONFIG_8xx)
+#elif defined(CONFIG_PPC_8xx)
#include <asm/nohash/32/pte-8xx.h>
#endif
@@ -337,9 +337,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
-extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
- pmd_t **pmdp);
-
int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index e5805ad78e12..17989c3d9a24 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -14,6 +14,7 @@ static inline int pte_write(pte_t pte)
{
return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO;
}
+static inline int pte_read(pte_t pte) { return 1; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 3130a73652c7..450a60b81d2a 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -42,6 +42,7 @@
#define OPAL_I2C_STOP_ERR -24
#define OPAL_XIVE_PROVISIONING -31
#define OPAL_XIVE_FREE_ACTIVE -32
+#define OPAL_TIMEOUT -33
/* API Tokens (in r0) */
#define OPAL_INVALID_CALL -1
@@ -190,7 +191,16 @@
#define OPAL_NPU_INIT_CONTEXT 146
#define OPAL_NPU_DESTROY_CONTEXT 147
#define OPAL_NPU_MAP_LPAR 148
-#define OPAL_LAST 148
+#define OPAL_IMC_COUNTERS_INIT 149
+#define OPAL_IMC_COUNTERS_START 150
+#define OPAL_IMC_COUNTERS_STOP 151
+#define OPAL_GET_POWERCAP 152
+#define OPAL_SET_POWERCAP 153
+#define OPAL_GET_POWER_SHIFT_RATIO 154
+#define OPAL_SET_POWER_SHIFT_RATIO 155
+#define OPAL_SENSOR_GROUP_CLEAR 156
+#define OPAL_PCI_SET_P2P 157
+#define OPAL_LAST 157
/* Device tree flags */
@@ -1084,6 +1094,18 @@ enum {
XIVE_DUMP_EMU_STATE = 5,
};
+/* "type" argument options for OPAL_IMC_COUNTERS_* calls */
+enum {
+ OPAL_IMC_COUNTERS_NEST = 1,
+ OPAL_IMC_COUNTERS_CORE = 2,
+};
+
+
+/* PCI p2p descriptor */
+#define OPAL_PCI_P2P_ENABLE 0x1
+#define OPAL_PCI_P2P_LOAD 0x2
+#define OPAL_PCI_P2P_STORE 0x4
+
#endif /* __ASSEMBLY__ */
#endif /* __OPAL_API_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 588fb1c23af9..726c23304a57 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -50,7 +50,7 @@ int64_t opal_tpo_write(uint64_t token, uint32_t year_mon_day,
uint32_t hour_min);
int64_t opal_cec_power_down(uint64_t request);
int64_t opal_cec_reboot(void);
-int64_t opal_cec_reboot2(uint32_t reboot_type, char *diag);
+int64_t opal_cec_reboot2(uint32_t reboot_type, const char *diag);
int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask);
@@ -267,6 +267,19 @@ int64_t opal_xive_allocate_irq(uint32_t chip_id);
int64_t opal_xive_free_irq(uint32_t girq);
int64_t opal_xive_sync(uint32_t type, uint32_t id);
int64_t opal_xive_dump(uint32_t type, uint32_t id);
+int64_t opal_pci_set_p2p(uint64_t phb_init, uint64_t phb_target,
+ uint64_t desc, uint16_t pe_number);
+
+int64_t opal_imc_counters_init(uint32_t type, uint64_t address,
+ uint64_t cpu_pir);
+int64_t opal_imc_counters_start(uint32_t type, uint64_t cpu_pir);
+int64_t opal_imc_counters_stop(uint32_t type, uint64_t cpu_pir);
+
+int opal_get_powercap(u32 handle, int token, u32 *pcap);
+int opal_set_powercap(u32 handle, int token, u32 pcap);
+int opal_get_power_shift_ratio(u32 handle, int token, u32 *psr);
+int opal_set_power_shift_ratio(u32 handle, int token, u32 psr);
+int opal_sensor_group_clear(u32 group_hndl, int token);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
@@ -345,6 +358,10 @@ static inline int opal_get_async_rc(struct opal_msg msg)
void opal_wake_poller(void);
+void opal_powercap_init(void);
+void opal_psr_init(void);
+void opal_sensor_groups_init(void);
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_OPAL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index dc88a31cc79a..04b60af027ae 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -31,6 +31,7 @@
#endif
#include <asm/accounting.h>
#include <asm/hmi.h>
+#include <asm/cpuidle.h>
register struct paca_struct *local_paca asm("r13");
@@ -183,6 +184,12 @@ struct paca_struct {
struct paca_struct **thread_sibling_pacas;
/* The PSSCR value that the kernel requested before going to stop */
u64 requested_psscr;
+
+ /*
+ * Save area for additional SPRs that need to be
+ * saved/restored during cpuidle stop.
+ */
+ struct stop_sprs stop_sprs;
#endif
#ifdef CONFIG_PPC_STD_MMU_64
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 56c67d3f0108..0b8aa1fe2d5f 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -195,7 +195,6 @@ struct pci_dn {
struct pci_dn *parent;
struct pci_controller *phb; /* for pci devices */
struct iommu_table_group *table_group; /* for phb's or bridges */
- struct device_node *node; /* back-pointer to the device_node */
int pci_ext_config_space; /* for pci devices */
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index d795c5d5789c..45ae1212ab8a 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -17,6 +17,8 @@ static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
}
#endif /* MODULE */
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
+
#ifdef CONFIG_PPC_BOOK3S
#include <asm/book3s/pgalloc.h>
#else
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
index 9c0f5db5cf46..67e7e3d990f4 100644
--- a/arch/powerpc/include/asm/pgtable-be-types.h
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
unsigned long *p = (unsigned long *)ptep;
__be64 prev;
+ /* See comment in switch_mm_irqs_off() */
prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
(__force unsigned long)pte_raw(new));
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
index 8bd3b13fe2fb..369a164b545c 100644
--- a/arch/powerpc/include/asm/pgtable-types.h
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
{
unsigned long *p = (unsigned long *)ptep;
+ /* See comment in switch_mm_irqs_off() */
return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
}
#endif
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index dd01212935ac..7d0d38f58243 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -66,20 +66,19 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_large(pmd) 0
#endif
-pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
- bool *is_thp, unsigned *shift);
-static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
- bool *is_thp, unsigned *shift)
-{
- VM_WARN(!arch_irqs_disabled(),
- "%s called with irq enabled\n", __func__);
- return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift);
-}
+/* can we use this in kvm */
unsigned long vmalloc_to_phys(void *vmalloc_addr);
void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
void pgtable_cache_init(void);
+
+#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
+void mark_initmem_nx(void);
+#else
+static inline void mark_initmem_nx(void) { }
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index de9681034353..3e5cf251ad9a 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -26,6 +26,8 @@ extern int pnv_pci_get_presence_state(uint64_t id, uint8_t *state);
extern int pnv_pci_get_power_state(uint64_t id, uint8_t *state);
extern int pnv_pci_set_power_state(uint64_t id, uint8_t state,
struct opal_msg *msg);
+extern int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target,
+ u64 desc);
int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode);
int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index fa9ebaead91e..ce0930d68857 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -193,6 +193,7 @@
#define PPC_INST_CLRBHRB 0x7c00035c
#define PPC_INST_COPY 0x7c20060c
#define PPC_INST_CP_ABORT 0x7c00068c
+#define PPC_INST_DARN 0x7c0005e6
#define PPC_INST_DCBA 0x7c0005ec
#define PPC_INST_DCBA_MASK 0xfc0007fe
#define PPC_INST_DCBAL 0x7c2005ec
@@ -204,6 +205,8 @@
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LDARX 0x7c0000a8
#define PPC_INST_STDCX 0x7c0001ad
+#define PPC_INST_LQARX 0x7c000228
+#define PPC_INST_STQCX 0x7c00016d
#define PPC_INST_LSWI 0x7c0004aa
#define PPC_INST_LSWX 0x7c00042a
#define PPC_INST_LWARX 0x7c000028
@@ -261,7 +264,7 @@
#define PPC_INST_TLBSRX_DOT 0x7c0006a5
#define PPC_INST_VPMSUMW 0x10000488
#define PPC_INST_VPMSUMD 0x100004c8
-#define PPC_INST_XXLOR 0xf0000510
+#define PPC_INST_XXLOR 0xf0000490
#define PPC_INST_XXSWAPD 0xf0000250
#define PPC_INST_XVCPSGNDP 0xf0000780
#define PPC_INST_TRECHKPT 0x7c0007dd
@@ -395,16 +398,25 @@
#define PPC_CP_ABORT stringify_in_c(.long PPC_INST_CP_ABORT)
#define PPC_COPY(a, b) stringify_in_c(.long PPC_INST_COPY | \
___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_DARN(t, l) stringify_in_c(.long PPC_INST_DARN | \
+ ___PPC_RT(t) | \
+ (((l) & 0x3) << 16))
#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
__PPC_RA(a) | __PPC_RB(b))
#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \
__PPC_RA(a) | __PPC_RB(b))
+#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LQARX | \
+ ___PPC_RT(t) | ___PPC_RA(a) | \
+ ___PPC_RB(b) | __PPC_EH(eh))
#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
___PPC_RT(t) | ___PPC_RA(a) | \
___PPC_RB(b) | __PPC_EH(eh))
#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
___PPC_RT(t) | ___PPC_RA(a) | \
___PPC_RB(b) | __PPC_EH(eh))
+#define PPC_STQCX(t, a, b) stringify_in_c(.long PPC_INST_STQCX | \
+ ___PPC_RT(t) | ___PPC_RA(a) | \
+ ___PPC_RB(b))
#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
___PPC_RB(b))
#define PPC_MSGSYNC stringify_in_c(.long PPC_INST_MSGSYNC)
@@ -414,6 +426,8 @@
___PPC_RB(b))
#define PPC_MSGCLRP(b) stringify_in_c(.long PPC_INST_MSGCLRP | \
___PPC_RB(b))
+#define PPC_PASTE(a, b) stringify_in_c(.long PPC_INST_PASTE | \
+ ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
__PPC_RA(a) | __PPC_RS(s))
#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 6baeeb9acd0d..36f3e41c9fbe 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -378,10 +378,16 @@ BEGIN_FTR_SECTION_NESTED(96); \
cmpwi dest,0; \
beq- 90b; \
END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
-#elif defined(CONFIG_8xx)
-#define MFTB(dest) mftb dest
#else
-#define MFTB(dest) mfspr dest, SPRN_TBRL
+#define MFTB(dest) MFTBL(dest)
+#endif
+
+#ifdef CONFIG_PPC_8xx
+#define MFTBL(dest) mftb dest
+#define MFTBU(dest) mftbu dest
+#else
+#define MFTBL(dest) mfspr dest, SPRN_TBRL
+#define MFTBU(dest) mfspr dest, SPRN_TBRU
#endif
#ifndef CONFIG_SMP
@@ -411,7 +417,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
* and they must be used.
*/
-#if !defined(CONFIG_4xx) && !defined(CONFIG_8xx)
+#if !defined(CONFIG_4xx) && !defined(CONFIG_PPC_8xx)
#define tlbia \
li r4,1024; \
mtctr r4; \
@@ -439,7 +445,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
.machine push ; \
.machine "power4" ; \
lis scratch,0x60000000@h; \
- dcbt r0,scratch,0b01010; \
+ dcbt 0,scratch,0b01010; \
.machine pop
/*
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 35c00d7a0cf8..825bd5998701 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -159,7 +159,10 @@ struct of_drconf_cell {
#define OV5_PFO_HW_842 0x1140 /* PFO Compression Accelerator */
#define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */
#define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */
-#define OV5_XIVE_EXPLOIT 0x1701 /* XIVE exploitation supported */
+#define OV5_XIVE_SUPPORT 0x17C0 /* XIVE Exploitation Support Mask */
+#define OV5_XIVE_LEGACY 0x1700 /* XIVE legacy mode Only */
+#define OV5_XIVE_EXPLOIT 0x1740 /* XIVE exploitation mode Only */
+#define OV5_XIVE_EITHER 0x1780 /* XIVE legacy or exploitation mode */
/* MMU Base Architecture */
#define OV5_MMU_SUPPORT 0x18C0 /* MMU Mode Support Mask */
#define OV5_MMU_HASH 0x1800 /* Hash MMU Only */
diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h
new file mode 100644
index 000000000000..2d633e9d686c
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-walk.h
@@ -0,0 +1,35 @@
+#ifndef _ASM_POWERPC_PTE_WALK_H
+#define _ASM_POWERPC_PTE_WALK_H
+
+#include <linux/sched.h>
+
+/* Don't use this directly */
+extern pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
+ bool *is_thp, unsigned *hshift);
+
+static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea,
+ bool *is_thp, unsigned *hshift)
+{
+ VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
+ return __find_linux_pte(pgdir, ea, is_thp, hshift);
+}
+
+static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
+{
+ pgd_t *pgdir = init_mm.pgd;
+ return __find_linux_pte(pgdir, ea, NULL, hshift);
+}
+/*
+ * This is what we should always use. Any other lockless page table lookup needs
+ * careful audit against THP split.
+ */
+static inline pte_t *find_current_mm_pte(pgd_t *pgdir, unsigned long ea,
+ bool *is_thp, unsigned *hshift)
+{
+ VM_WARN(!arch_irqs_disabled(), "%s called with irq enabled\n", __func__);
+ VM_WARN(pgdir != current->mm->pgd,
+ "%s lock less page table lookup called on wrong mm\n", __func__);
+ return __find_linux_pte(pgdir, ea, is_thp, hshift);
+}
+
+#endif /* _ASM_POWERPC_PTE_WALK_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a3b6575c7842..f92eaf7a4c0d 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -22,9 +22,9 @@
#include <asm/reg_fsl_emb.h>
#endif
-#ifdef CONFIG_8xx
+#ifdef CONFIG_PPC_8xx
#include <asm/reg_8xx.h>
-#endif /* CONFIG_8xx */
+#endif /* CONFIG_PPC_8xx */
#define MSR_SF_LG 63 /* Enable 64 bit mode */
#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
@@ -135,7 +135,7 @@
#define MSR_KERNEL (MSR_ | MSR_64BIT)
#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
#define MSR_USER64 (MSR_USER32 | MSR_64BIT)
-#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx)
+#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx)
/* Default MSR for kernel mode. */
#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
@@ -272,16 +272,65 @@
#define SPRN_DAR 0x013 /* Data Address Register */
#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
-#define DSISR_NOHPTE 0x40000000 /* no translation found */
-#define DSISR_PROTFAULT 0x08000000 /* protection fault */
-#define DSISR_BADACCESS 0x04000000 /* bad access to CI or G */
-#define DSISR_ISSTORE 0x02000000 /* access was a store */
-#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
-#define DSISR_NOSEGMENT 0x00200000 /* SLB miss */
-#define DSISR_KEYFAULT 0x00200000 /* Key fault */
-#define DSISR_UNSUPP_MMU 0x00080000 /* Unsupported MMU config */
-#define DSISR_SET_RC 0x00040000 /* Failed setting of R/C bits */
-#define DSISR_PGDIRFAULT 0x00020000 /* Fault on page directory */
+#define DSISR_BAD_DIRECT_ST 0x80000000 /* Obsolete: Direct store error */
+#define DSISR_NOHPTE 0x40000000 /* no translation found */
+#define DSISR_ATTR_CONFLICT 0x20000000 /* P9: Process vs. Partition attr */
+#define DSISR_NOEXEC_OR_G 0x10000000 /* Alias of SRR1 bit, see below */
+#define DSISR_PROTFAULT 0x08000000 /* protection fault */
+#define DSISR_BADACCESS 0x04000000 /* bad access to CI or G */
+#define DSISR_ISSTORE 0x02000000 /* access was a store */
+#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+#define DSISR_NOSEGMENT 0x00200000 /* STAB miss (unsupported) */
+#define DSISR_KEYFAULT 0x00200000 /* Storage Key fault */
+#define DSISR_BAD_EXT_CTRL 0x00100000 /* Obsolete: External ctrl error */
+#define DSISR_UNSUPP_MMU 0x00080000 /* P9: Unsupported MMU config */
+#define DSISR_SET_RC 0x00040000 /* P9: Failed setting of R/C bits */
+#define DSISR_PRTABLE_FAULT 0x00020000 /* P9: Fault on process table */
+#define DSISR_ICSWX_NO_CT 0x00004000 /* P7: icswx unavailable cp type */
+#define DSISR_BAD_COPYPASTE 0x00000008 /* P9: Copy/Paste on wrong memtype */
+#define DSISR_BAD_AMO 0x00000004 /* P9: Incorrect AMO opcode */
+#define DSISR_BAD_CI_LDST 0x00000002 /* P8: Bad HV CI load/store */
+
+/*
+ * DSISR_NOEXEC_OR_G doesn't actually exist. This bit is always
+ * 0 on DSIs. However, on ISIs, the corresponding bit in SRR1
+ * indicates an attempt at executing from a no-execute PTE
+ * or segment or from a guarded page.
+ *
+ * We add a definition here for completeness as we alias
+ * DSISR and SRR1 in do_page_fault.
+ */
+
+/*
+ * DSISR bits that are treated as a fault. Any bit set
+ * here will skip hash_page, and cause do_page_fault to
+ * trigger a SIGBUS or SIGSEGV:
+ */
+#define DSISR_BAD_FAULT_32S (DSISR_BAD_DIRECT_ST | \
+ DSISR_BADACCESS | \
+ DSISR_BAD_EXT_CTRL)
+#define DSISR_BAD_FAULT_64S (DSISR_BAD_FAULT_32S | \
+ DSISR_ATTR_CONFLICT | \
+ DSISR_KEYFAULT | \
+ DSISR_UNSUPP_MMU | \
+ DSISR_PRTABLE_FAULT | \
+ DSISR_ICSWX_NO_CT | \
+ DSISR_BAD_COPYPASTE | \
+ DSISR_BAD_AMO | \
+ DSISR_BAD_CI_LDST)
+/*
+ * These bits are equivalent in SRR1 and DSISR for 0x400
+ * instruction access interrupts on Book3S
+ */
+#define DSISR_SRR1_MATCH_32S (DSISR_NOHPTE | \
+ DSISR_NOEXEC_OR_G | \
+ DSISR_PROTFAULT)
+#define DSISR_SRR1_MATCH_64S (DSISR_SRR1_MATCH_32S | \
+ DSISR_KEYFAULT | \
+ DSISR_UNSUPP_MMU | \
+ DSISR_SET_RC | \
+ DSISR_PRTABLE_FAULT)
+
#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
#define SPRN_CIR 0x11B /* Chip Information Register (hyper, R/0) */
@@ -307,6 +356,7 @@
#define SPRN_PMSR 0x355 /* Power Management Status Reg */
#define SPRN_PMMAR 0x356 /* Power Management Memory Activity Register */
#define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */
+#define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */
#define SPRN_PMCR 0x374 /* Power Management Control Register */
/* HFSCR and FSCR bit numbers are the same */
@@ -675,6 +725,7 @@
* may not be recoverable */
#define SRR1_WS_DEEPER 0x00020000 /* Some resources not maintained */
#define SRR1_WS_DEEP 0x00010000 /* All resources maintained */
+#define SRR1_PROGTM 0x00200000 /* TM Bad Thing */
#define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */
#define SRR1_PROGILL 0x00080000 /* Illegal instruction */
#define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */
@@ -1114,7 +1165,7 @@
#endif
#endif
-#ifdef CONFIG_8xx
+#ifdef CONFIG_PPC_8xx
#define SPRN_SPRG_SCRATCH0 SPRN_SPRG0
#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1
#define SPRN_SPRG_SCRATCH2 SPRN_SPRG2
@@ -1197,10 +1248,8 @@
* differentiated by the version number in the Communication Processor
* Module (CPM).
*/
-#define PVR_821 0x00500000
-#define PVR_823 PVR_821
-#define PVR_850 PVR_821
-#define PVR_860 PVR_821
+#define PVR_8xx 0x00500000
+
#define PVR_8240 0x00810100
#define PVR_8245 0x80811014
#define PVR_8260 PVR_8240
@@ -1295,12 +1344,12 @@ static inline void msr_check_and_clear(unsigned long bits)
".section __ftr_fixup,\"a\"\n" \
".align 3\n" \
"98:\n" \
- " .llong %1\n" \
- " .llong %1\n" \
- " .llong 97b-98b\n" \
- " .llong 99b-98b\n" \
- " .llong 0\n" \
- " .llong 0\n" \
+ " .8byte %1\n" \
+ " .8byte %1\n" \
+ " .8byte 97b-98b\n" \
+ " .8byte 99b-98b\n" \
+ " .8byte 0\n" \
+ " .8byte 0\n" \
".previous" \
: "=r" (rval) \
: "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \
@@ -1313,7 +1362,7 @@ static inline void msr_check_and_clear(unsigned long bits)
#else /* __powerpc64__ */
-#if defined(CONFIG_8xx)
+#if defined(CONFIG_PPC_8xx)
#define mftbl() ({unsigned long rval; \
asm volatile("mftbl %0" : "=r" (rval)); rval;})
#define mftbu() ({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 737e012ef56e..eb2a33d5df26 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -221,10 +221,7 @@
#define SPRN_CSRR0 SPRN_SRR2 /* Critical Save and Restore Register 0 */
#define SPRN_CSRR1 SPRN_SRR3 /* Critical Save and Restore Register 1 */
#endif
-
-#ifdef CONFIG_PPC_ICSWX
#define SPRN_HACOP 0x15F /* Hypervisor Available Coprocessor Register */
-#endif
/* Bit definitions for CCR1. */
#define CCR1_DPC 0x00000100 /* Disable L1 I-Cache/D-Cache parity checking */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 654d64c9f3ac..3a3fb0ca68f5 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -23,7 +23,6 @@ extern void reloc_got2(unsigned long);
void check_for_initrd(void);
void initmem_init(void);
-void setup_panic(void);
#define ARCH_PANIC_TIMEOUT 180
#ifdef CONFIG_PPC_PSERIES
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 8ea98504f900..fac963e10d39 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -97,6 +97,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
#endif
DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
static inline struct cpumask *cpu_sibling_mask(int cpu)
@@ -109,6 +110,11 @@ static inline struct cpumask *cpu_core_mask(int cpu)
return per_cpu(cpu_core_map, cpu);
}
+static inline struct cpumask *cpu_l2_cache_mask(int cpu)
+{
+ return per_cpu(cpu_l2_cache_map, cpu);
+}
+
extern int cpu_to_core_id(int cpu);
/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 8c1b913de6d7..edbe571bcc54 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -170,39 +170,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
lock->slock = 0;
}
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- arch_spinlock_t lock_val;
-
- smp_mb();
-
- /*
- * Atomically load and store back the lock value (unchanged). This
- * ensures that our observation of the lock value is ordered with
- * respect to other lock operations.
- */
- __asm__ __volatile__(
-"1: " PPC_LWARX(%0, 0, %2, 0) "\n"
-" stwcx. %0, 0, %2\n"
-" bne- 1b\n"
- : "=&r" (lock_val), "+m" (*lock)
- : "r" (lock)
- : "cr0", "xer");
-
- if (arch_spin_value_unlocked(lock_val))
- goto out;
-
- while (lock->slock) {
- HMT_low();
- if (SHARED_PROCESSOR)
- __spin_yield(lock);
- }
- HMT_medium();
-
-out:
- smp_mb();
-}
-
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
@@ -342,5 +309,8 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
#define arch_read_relax(lock) __rw_yield(lock)
#define arch_write_relax(lock) __rw_yield(lock)
+/* See include/linux/spinlock.h */
+#define smp_mb__after_spinlock() smp_mb()
+
#endif /* __KERNEL__ */
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h
index d3a42cc45a82..ab9d849644d0 100644
--- a/arch/powerpc/include/asm/sstep.h
+++ b/arch/powerpc/include/asm/sstep.h
@@ -23,12 +23,9 @@ struct pt_regs;
#define IS_RFID(instr) (((instr) & 0xfc0007fe) == 0x4c000024)
#define IS_RFI(instr) (((instr) & 0xfc0007fe) == 0x4c000064)
-/* Emulate instructions that cause a transfer of control. */
-extern int emulate_step(struct pt_regs *regs, unsigned int instr);
-
enum instruction_type {
COMPUTE, /* arith/logical/CR op, etc. */
- LOAD,
+ LOAD, /* load and store types need to be contiguous */
LOAD_MULTI,
LOAD_FP,
LOAD_VMX,
@@ -55,10 +52,31 @@ enum instruction_type {
#define INSTR_TYPE_MASK 0x1f
+#define OP_IS_LOAD_STORE(type) (LOAD <= (type) && (type) <= STCX)
+
+/* Compute flags, ORed in with type */
+#define SETREG 0x20
+#define SETCC 0x40
+#define SETXER 0x80
+
+/* Branch flags, ORed in with type */
+#define SETLK 0x20
+#define BRTAKEN 0x40
+#define DECCTR 0x80
+
/* Load/store flags, ORed in with type */
#define SIGNEXT 0x20
#define UPDATE 0x40 /* matches bit in opcode 31 instructions */
#define BYTEREV 0x80
+#define FPCONV 0x100
+
+/* Barrier type field, ORed in with type */
+#define BARRIER_MASK 0xe0
+#define BARRIER_SYNC 0x00
+#define BARRIER_ISYNC 0x20
+#define BARRIER_EIEIO 0x40
+#define BARRIER_LWSYNC 0x60
+#define BARRIER_PTESYNC 0x80
/* Cacheop values, ORed in with type */
#define CACHEOP_MASK 0x700
@@ -67,10 +85,17 @@ enum instruction_type {
#define DCBTST 0x200
#define DCBT 0x300
#define ICBI 0x400
+#define DCBZ 0x500
+
+/* VSX flags values */
+#define VSX_FPCONV 1 /* do floating point SP/DP conversion */
+#define VSX_SPLAT 2 /* store loaded value into all elements */
+#define VSX_LDLEFT 4 /* load VSX register from left */
+#define VSX_CHECK_VEC 8 /* check MSR_VEC not MSR_VSX for reg >= 32 */
/* Size field in type word */
-#define SIZE(n) ((n) << 8)
-#define GETSIZE(w) ((w) >> 8)
+#define SIZE(n) ((n) << 12)
+#define GETSIZE(w) ((w) >> 12)
#define MKOP(t, f, s) ((t) | (f) | SIZE(s))
@@ -83,7 +108,63 @@ struct instruction_op {
int update_reg;
/* For MFSPR */
int spr;
+ u32 ccval;
+ u32 xerval;
+ u8 element_size; /* for VSX/VMX loads/stores */
+ u8 vsx_flags;
+};
+
+union vsx_reg {
+ u8 b[16];
+ u16 h[8];
+ u32 w[4];
+ unsigned long d[2];
+ float fp[4];
+ double dp[2];
+ __vector128 v;
};
-extern int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
+/*
+ * Decode an instruction, and return information about it in *op
+ * without changing *regs.
+ *
+ * Return value is 1 if the instruction can be emulated just by
+ * updating *regs with the information in *op, -1 if we need the
+ * GPRs but *regs doesn't contain the full register set, or 0
+ * otherwise.
+ */
+extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
unsigned int instr);
+
+/*
+ * Emulate an instruction that can be executed just by updating
+ * fields in *regs.
+ */
+void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
+
+/*
+ * Emulate instructions that cause a transfer of control,
+ * arithmetic/logical instructions, loads and stores,
+ * cache operations and barriers.
+ *
+ * Returns 1 if the instruction was emulated successfully,
+ * 0 if it could not be emulated, or -1 for an instruction that
+ * should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.).
+ */
+extern int emulate_step(struct pt_regs *regs, unsigned int instr);
+
+/*
+ * Emulate a load or store instruction by reading/writing the
+ * memory of the current process. FP/VMX/VSX registers are assumed
+ * to hold live values if the appropriate enable bit in regs->msr is
+ * set; otherwise this will use the saved values in the thread struct
+ * for user-mode accesses.
+ */
+extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op);
+
+extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
+ const void *mem, bool cross_endian);
+extern void emulate_vsx_store(struct instruction_op *op,
+ const union vsx_reg *reg, void *mem,
+ bool cross_endian);
+extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
index da3cdffca440..cc9addefb51c 100644
--- a/arch/powerpc/include/asm/string.h
+++ b/arch/powerpc/include/asm/string.h
@@ -10,6 +10,7 @@
#define __HAVE_ARCH_MEMMOVE
#define __HAVE_ARCH_MEMCMP
#define __HAVE_ARCH_MEMCHR
+#define __HAVE_ARCH_MEMSET16
extern char * strcpy(char *,const char *);
extern char * strncpy(char *,const char *, __kernel_size_t);
@@ -23,6 +24,31 @@ extern void * memmove(void *,const void *,__kernel_size_t);
extern int memcmp(const void *,const void *,__kernel_size_t);
extern void * memchr(const void *,int,__kernel_size_t);
+#ifdef CONFIG_PPC64
+#define __HAVE_ARCH_MEMSET32
+#define __HAVE_ARCH_MEMSET64
+
+extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t);
+extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
+extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t);
+
+static inline void *memset16(uint16_t *p, uint16_t v, __kernel_size_t n)
+{
+ return __memset16(p, v, n * 2);
+}
+
+static inline void *memset32(uint32_t *p, uint32_t v, __kernel_size_t n)
+{
+ return __memset32(p, v, n * 4);
+}
+
+static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
+{
+ return __memset64(p, v, n * 8);
+}
+#else
+extern void *memset16(uint16_t *, uint16_t, __kernel_size_t);
+#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_STRING_H */
diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
index 2cf846edb3fc..cb61eae5b7ed 100644
--- a/arch/powerpc/include/asm/timex.h
+++ b/arch/powerpc/include/asm/timex.h
@@ -29,7 +29,7 @@ static inline cycles_t get_cycles(void)
ret = 0;
__asm__ __volatile__(
-#ifdef CONFIG_8xx
+#ifdef CONFIG_PPC_8xx
"97: mftb %0\n"
#else
"97: mfspr %0, %2\n"
@@ -45,11 +45,7 @@ static inline cycles_t get_cycles(void)
" .long 0\n"
" .long 0\n"
".previous"
-#ifdef CONFIG_8xx
- : "=r" (ret) : "i" (CPU_FTR_601));
-#else
: "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL));
-#endif
return ret;
#endif
}
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index 609557569f65..a7eabff27a0f 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -69,13 +69,22 @@ static inline int mm_is_core_local(struct mm_struct *mm)
topology_sibling_cpumask(smp_processor_id()));
}
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+ if (atomic_read(&mm->context.active_cpus) > 1)
+ return false;
+ return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
+}
+#else /* CONFIG_PPC_BOOK3S_64 */
static inline int mm_is_thread_local(struct mm_struct *mm)
{
return cpumask_equal(mm_cpumask(mm),
cpumask_of(smp_processor_id()));
}
+#endif /* !CONFIG_PPC_BOOK3S_64 */
-#else
+#else /* CONFIG_SMP */
static inline int mm_is_core_local(struct mm_struct *mm)
{
return 1;
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index dc4e15937ccf..2d84bca8d053 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -16,8 +16,6 @@ struct device_node;
#include <asm/mmzone.h>
-#define parent_node(node) (node)
-
#define cpumask_of_node(node) ((node) == -1 ? \
cpu_all_mask : \
node_to_cpumask_map[node])
diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h
new file mode 100644
index 000000000000..fd5963acd658
--- /dev/null
+++ b/arch/powerpc/include/asm/vas.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2016-17 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_VAS_H
+#define _ASM_POWERPC_VAS_H
+
+/*
+ * Min and max FIFO sizes are based on Version 1.05 Section 3.1.4.25
+ * (Local FIFO Size Register) of the VAS workbook.
+ */
+#define VAS_RX_FIFO_SIZE_MIN (1 << 10) /* 1KB */
+#define VAS_RX_FIFO_SIZE_MAX (8 << 20) /* 8MB */
+
+/*
+ * Threshold Control Mode: Have paste operation fail if the number of
+ * requests in receive FIFO exceeds a threshold.
+ *
+ * NOTE: No special error code yet if paste is rejected because of these
+ * limits. So users can't distinguish between this and other errors.
+ */
+#define VAS_THRESH_DISABLED 0
+#define VAS_THRESH_FIFO_GT_HALF_FULL 1
+#define VAS_THRESH_FIFO_GT_QTR_FULL 2
+#define VAS_THRESH_FIFO_GT_EIGHTH_FULL 3
+
+/*
+ * Get/Set bit fields
+ */
+#define GET_FIELD(m, v) (((v) & (m)) >> MASK_LSH(m))
+#define MASK_LSH(m) (__builtin_ffsl(m) - 1)
+#define SET_FIELD(m, v, val) \
+ (((v) & ~(m)) | ((((typeof(v))(val)) << MASK_LSH(m)) & (m)))
+
+/*
+ * Co-processor Engine type.
+ */
+enum vas_cop_type {
+ VAS_COP_TYPE_FAULT,
+ VAS_COP_TYPE_842,
+ VAS_COP_TYPE_842_HIPRI,
+ VAS_COP_TYPE_GZIP,
+ VAS_COP_TYPE_GZIP_HIPRI,
+ VAS_COP_TYPE_FTW,
+ VAS_COP_TYPE_MAX,
+};
+
+/*
+ * Receive window attributes specified by the (in-kernel) owner of window.
+ */
+struct vas_rx_win_attr {
+ void *rx_fifo;
+ int rx_fifo_size;
+ int wcreds_max;
+
+ bool pin_win;
+ bool rej_no_credit;
+ bool tx_wcred_mode;
+ bool rx_wcred_mode;
+ bool tx_win_ord_mode;
+ bool rx_win_ord_mode;
+ bool data_stamp;
+ bool nx_win;
+ bool fault_win;
+ bool user_win;
+ bool notify_disable;
+ bool intr_disable;
+ bool notify_early;
+
+ int lnotify_lpid;
+ int lnotify_pid;
+ int lnotify_tid;
+ u32 pswid;
+
+ int tc_mode;
+};
+
+/*
+ * Window attributes specified by the in-kernel owner of a send window.
+ */
+struct vas_tx_win_attr {
+ enum vas_cop_type cop;
+ int wcreds_max;
+ int lpid;
+ int pidr; /* hardware PID (from SPRN_PID) */
+ int pid; /* linux process id */
+ int pswid;
+ int rsvd_txbuf_count;
+ int tc_mode;
+
+ bool user_win;
+ bool pin_win;
+ bool rej_no_credit;
+ bool rsvd_txbuf_enable;
+ bool tx_wcred_mode;
+ bool rx_wcred_mode;
+ bool tx_win_ord_mode;
+ bool rx_win_ord_mode;
+};
+
+/*
+ * Helper to initialize receive window attributes to defaults for an
+ * NX window.
+ */
+void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop);
+
+/*
+ * Open a VAS receive window for the instance of VAS identified by @vasid
+ * Use @attr to initialize the attributes of the window.
+ *
+ * Return a handle to the window or ERR_PTR() on error.
+ */
+struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
+ struct vas_rx_win_attr *attr);
+
+/*
+ * Helper to initialize send window attributes to defaults for an NX window.
+ */
+extern void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr,
+ enum vas_cop_type cop);
+
+/*
+ * Open a VAS send window for the instance of VAS identified by @vasid
+ * and the co-processor type @cop. Use @attr to initialize attributes
+ * of the window.
+ *
+ * Note: The instance of VAS must already have an open receive window for
+ * the coprocessor type @cop.
+ *
+ * Return a handle to the send window or ERR_PTR() on error.
+ */
+struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
+ struct vas_tx_win_attr *attr);
+
+/*
+ * Close the send or receive window identified by @win. For receive windows
+ * return -EAGAIN if there are active send windows attached to this receive
+ * window.
+ */
+int vas_win_close(struct vas_window *win);
+
+/*
+ * Copy the co-processor request block (CRB) @crb into the local L2 cache.
+ */
+int vas_copy_crb(void *crb, int offset);
+
+/*
+ * Paste a previously copied CRB (see vas_copy_crb()) from the L2 cache to
+ * the hardware address associated with the window @win. @re is expected/
+ * assumed to be true for NX windows.
+ */
+int vas_paste_crb(struct vas_window *win, int offset, bool re);
+
+#endif /* __ASM_POWERPC_VAS_H */
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index c23ff4389ca2..371fbebf1ec9 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -45,6 +45,7 @@ struct xive_irq_data {
void __iomem *trig_mmio;
u32 esb_shift;
int src_chip;
+ u32 hw_irq;
/* Setup/used by frontend */
int target;
@@ -55,6 +56,7 @@ struct xive_irq_data {
#define XIVE_IRQ_FLAG_SHIFT_BUG 0x04
#define XIVE_IRQ_FLAG_MASK_FW 0x08
#define XIVE_IRQ_FLAG_EOI_FW 0x10
+#define XIVE_IRQ_FLAG_H_INT_ESB 0x20
#define XIVE_INVALID_CHIP_ID -1
@@ -110,11 +112,13 @@ extern bool __xive_enabled;
static inline bool xive_enabled(void) { return __xive_enabled; }
+extern bool xive_spapr_init(void);
extern bool xive_native_init(void);
extern void xive_smp_probe(void);
extern int xive_smp_prepare_cpu(unsigned int cpu);
extern void xive_smp_setup_cpu(void);
extern void xive_smp_disable_cpu(void);
+extern void xive_teardown_cpu(void);
extern void xive_kexec_teardown_cpu(int secondary);
extern void xive_shutdown(void);
extern void xive_flush_interrupt(void);
@@ -147,6 +151,7 @@ extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
static inline bool xive_enabled(void) { return false; }
+static inline bool xive_spapr_init(void) { return false; }
static inline bool xive_native_init(void) { return false; }
static inline void xive_smp_probe(void) { }
extern inline int xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; }
diff --git a/arch/powerpc/include/uapi/asm/ioctls.h b/arch/powerpc/include/uapi/asm/ioctls.h
index bfd609a3e928..e3b10469f787 100644
--- a/arch/powerpc/include/uapi/asm/ioctls.h
+++ b/arch/powerpc/include/uapi/asm/ioctls.h
@@ -100,7 +100,7 @@
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
-#define TIOCGPTPEER _IOR('T', 0x41, int) /* Safely open the slave */
+#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
#define TIOCSERCONFIG 0x5453
#define TIOCSERGWILD 0x5454
diff --git a/arch/powerpc/include/uapi/asm/mman.h b/arch/powerpc/include/uapi/asm/mman.h
index ab45cc2f3101..03c06ba7464f 100644
--- a/arch/powerpc/include/uapi/asm/mman.h
+++ b/arch/powerpc/include/uapi/asm/mman.h
@@ -29,20 +29,4 @@
#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
-/*
- * When MAP_HUGETLB is set, bits [26:31] of the flags argument to mmap(2),
- * encode the log2 of the huge page size. A value of zero indicates that the
- * default huge page size should be used. To use a non-default huge page size,
- * one of these defines can be used, or the size can be encoded by hand. Note
- * that on most systems only a subset, or possibly none, of these sizes will be
- * available.
- */
-#define MAP_HUGE_512KB (19 << MAP_HUGE_SHIFT) /* 512KB HugeTLB Page */
-#define MAP_HUGE_1MB (20 << MAP_HUGE_SHIFT) /* 1MB HugeTLB Page */
-#define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT) /* 2MB HugeTLB Page */
-#define MAP_HUGE_8MB (23 << MAP_HUGE_SHIFT) /* 8MB HugeTLB Page */
-#define MAP_HUGE_16MB (24 << MAP_HUGE_SHIFT) /* 16MB HugeTLB Page */
-#define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT) /* 1GB HugeTLB Page */
-#define MAP_HUGE_16GB (34 << MAP_HUGE_SHIFT) /* 16GB HugeTLB Page */
-
#endif /* _UAPI_ASM_POWERPC_MMAN_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 4aa7c147e447..91960f83039c 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -38,7 +38,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o nvram_64.o firmware.o
obj-$(CONFIG_VDSO32) += vdso32/
-obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog.o
+obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
@@ -83,7 +83,7 @@ extra-y := head_$(BITS).o
extra-$(CONFIG_40x) := head_40x.o
extra-$(CONFIG_44x) := head_44x.o
extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
-extra-$(CONFIG_8xx) := head_8xx.o
+extra-$(CONFIG_PPC_8xx) := head_8xx.o
extra-y += vmlinux.lds
obj-$(CONFIG_RELOCATABLE) += reloc_$(BITS).o
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index ec7a8b099dd9..26b9994d27ee 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -27,6 +27,7 @@
#include <asm/switch_to.h>
#include <asm/disassemble.h>
#include <asm/cpu_has_feature.h>
+#include <asm/sstep.h>
struct aligninfo {
unsigned char len;
@@ -40,364 +41,9 @@ struct aligninfo {
#define LD 0 /* load */
#define ST 1 /* store */
#define SE 2 /* sign-extend value, or FP ld/st as word */
-#define F 4 /* to/from fp regs */
-#define U 8 /* update index register */
-#define M 0x10 /* multiple load/store */
#define SW 0x20 /* byte swap */
-#define S 0x40 /* single-precision fp or... */
-#define SX 0x40 /* ... byte count in XER */
-#define HARD 0x80 /* string, stwcx. */
#define E4 0x40 /* SPE endianness is word */
#define E8 0x80 /* SPE endianness is double word */
-#define SPLT 0x80 /* VSX SPLAT load */
-
-/* DSISR bits reported for a DCBZ instruction: */
-#define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
-
-/*
- * The PowerPC stores certain bits of the instruction that caused the
- * alignment exception in the DSISR register. This array maps those
- * bits to information about the operand length and what the
- * instruction would do.
- */
-static struct aligninfo aligninfo[128] = {
- { 4, LD }, /* 00 0 0000: lwz / lwarx */
- INVALID, /* 00 0 0001 */
- { 4, ST }, /* 00 0 0010: stw */
- INVALID, /* 00 0 0011 */
- { 2, LD }, /* 00 0 0100: lhz */
- { 2, LD+SE }, /* 00 0 0101: lha */
- { 2, ST }, /* 00 0 0110: sth */
- { 4, LD+M }, /* 00 0 0111: lmw */
- { 4, LD+F+S }, /* 00 0 1000: lfs */
- { 8, LD+F }, /* 00 0 1001: lfd */
- { 4, ST+F+S }, /* 00 0 1010: stfs */
- { 8, ST+F }, /* 00 0 1011: stfd */
- { 16, LD }, /* 00 0 1100: lq */
- { 8, LD }, /* 00 0 1101: ld/ldu/lwa */
- INVALID, /* 00 0 1110 */
- { 8, ST }, /* 00 0 1111: std/stdu */
- { 4, LD+U }, /* 00 1 0000: lwzu */
- INVALID, /* 00 1 0001 */
- { 4, ST+U }, /* 00 1 0010: stwu */
- INVALID, /* 00 1 0011 */
- { 2, LD+U }, /* 00 1 0100: lhzu */
- { 2, LD+SE+U }, /* 00 1 0101: lhau */
- { 2, ST+U }, /* 00 1 0110: sthu */
- { 4, ST+M }, /* 00 1 0111: stmw */
- { 4, LD+F+S+U }, /* 00 1 1000: lfsu */
- { 8, LD+F+U }, /* 00 1 1001: lfdu */
- { 4, ST+F+S+U }, /* 00 1 1010: stfsu */
- { 8, ST+F+U }, /* 00 1 1011: stfdu */
- { 16, LD+F }, /* 00 1 1100: lfdp */
- INVALID, /* 00 1 1101 */
- { 16, ST+F }, /* 00 1 1110: stfdp */
- INVALID, /* 00 1 1111 */
- { 8, LD }, /* 01 0 0000: ldx */
- INVALID, /* 01 0 0001 */
- { 8, ST }, /* 01 0 0010: stdx */
- INVALID, /* 01 0 0011 */
- INVALID, /* 01 0 0100 */
- { 4, LD+SE }, /* 01 0 0101: lwax */
- INVALID, /* 01 0 0110 */
- INVALID, /* 01 0 0111 */
- { 4, LD+M+HARD+SX }, /* 01 0 1000: lswx */
- { 4, LD+M+HARD }, /* 01 0 1001: lswi */
- { 4, ST+M+HARD+SX }, /* 01 0 1010: stswx */
- { 4, ST+M+HARD }, /* 01 0 1011: stswi */
- INVALID, /* 01 0 1100 */
- { 8, LD+U }, /* 01 0 1101: ldu */
- INVALID, /* 01 0 1110 */
- { 8, ST+U }, /* 01 0 1111: stdu */
- { 8, LD+U }, /* 01 1 0000: ldux */
- INVALID, /* 01 1 0001 */
- { 8, ST+U }, /* 01 1 0010: stdux */
- INVALID, /* 01 1 0011 */
- INVALID, /* 01 1 0100 */
- { 4, LD+SE+U }, /* 01 1 0101: lwaux */
- INVALID, /* 01 1 0110 */
- INVALID, /* 01 1 0111 */
- INVALID, /* 01 1 1000 */
- INVALID, /* 01 1 1001 */
- INVALID, /* 01 1 1010 */
- INVALID, /* 01 1 1011 */
- INVALID, /* 01 1 1100 */
- INVALID, /* 01 1 1101 */
- INVALID, /* 01 1 1110 */
- INVALID, /* 01 1 1111 */
- INVALID, /* 10 0 0000 */
- INVALID, /* 10 0 0001 */
- INVALID, /* 10 0 0010: stwcx. */
- INVALID, /* 10 0 0011 */
- INVALID, /* 10 0 0100 */
- INVALID, /* 10 0 0101 */
- INVALID, /* 10 0 0110 */
- INVALID, /* 10 0 0111 */
- { 4, LD+SW }, /* 10 0 1000: lwbrx */
- INVALID, /* 10 0 1001 */
- { 4, ST+SW }, /* 10 0 1010: stwbrx */
- INVALID, /* 10 0 1011 */
- { 2, LD+SW }, /* 10 0 1100: lhbrx */
- { 4, LD+SE }, /* 10 0 1101 lwa */
- { 2, ST+SW }, /* 10 0 1110: sthbrx */
- { 16, ST }, /* 10 0 1111: stq */
- INVALID, /* 10 1 0000 */
- INVALID, /* 10 1 0001 */
- INVALID, /* 10 1 0010 */
- INVALID, /* 10 1 0011 */
- INVALID, /* 10 1 0100 */
- INVALID, /* 10 1 0101 */
- INVALID, /* 10 1 0110 */
- INVALID, /* 10 1 0111 */
- INVALID, /* 10 1 1000 */
- INVALID, /* 10 1 1001 */
- INVALID, /* 10 1 1010 */
- INVALID, /* 10 1 1011 */
- INVALID, /* 10 1 1100 */
- INVALID, /* 10 1 1101 */
- INVALID, /* 10 1 1110 */
- { 0, ST+HARD }, /* 10 1 1111: dcbz */
- { 4, LD }, /* 11 0 0000: lwzx */
- INVALID, /* 11 0 0001 */
- { 4, ST }, /* 11 0 0010: stwx */
- INVALID, /* 11 0 0011 */
- { 2, LD }, /* 11 0 0100: lhzx */
- { 2, LD+SE }, /* 11 0 0101: lhax */
- { 2, ST }, /* 11 0 0110: sthx */
- INVALID, /* 11 0 0111 */
- { 4, LD+F+S }, /* 11 0 1000: lfsx */
- { 8, LD+F }, /* 11 0 1001: lfdx */
- { 4, ST+F+S }, /* 11 0 1010: stfsx */
- { 8, ST+F }, /* 11 0 1011: stfdx */
- { 16, LD+F }, /* 11 0 1100: lfdpx */
- { 4, LD+F+SE }, /* 11 0 1101: lfiwax */
- { 16, ST+F }, /* 11 0 1110: stfdpx */
- { 4, ST+F }, /* 11 0 1111: stfiwx */
- { 4, LD+U }, /* 11 1 0000: lwzux */
- INVALID, /* 11 1 0001 */
- { 4, ST+U }, /* 11 1 0010: stwux */
- INVALID, /* 11 1 0011 */
- { 2, LD+U }, /* 11 1 0100: lhzux */
- { 2, LD+SE+U }, /* 11 1 0101: lhaux */
- { 2, ST+U }, /* 11 1 0110: sthux */
- INVALID, /* 11 1 0111 */
- { 4, LD+F+S+U }, /* 11 1 1000: lfsux */
- { 8, LD+F+U }, /* 11 1 1001: lfdux */
- { 4, ST+F+S+U }, /* 11 1 1010: stfsux */
- { 8, ST+F+U }, /* 11 1 1011: stfdux */
- INVALID, /* 11 1 1100 */
- { 4, LD+F }, /* 11 1 1101: lfiwzx */
- INVALID, /* 11 1 1110 */
- INVALID, /* 11 1 1111 */
-};
-
-/*
- * The dcbz (data cache block zero) instruction
- * gives an alignment fault if used on non-cacheable
- * memory. We handle the fault mainly for the
- * case when we are running with the cache disabled
- * for debugging.
- */
-static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
-{
- long __user *p;
- int i, size;
-
-#ifdef __powerpc64__
- size = ppc64_caches.l1d.block_size;
-#else
- size = L1_CACHE_BYTES;
-#endif
- p = (long __user *) (regs->dar & -size);
- if (user_mode(regs) && !access_ok(VERIFY_WRITE, p, size))
- return -EFAULT;
- for (i = 0; i < size / sizeof(long); ++i)
- if (__put_user_inatomic(0, p+i))
- return -EFAULT;
- return 1;
-}
-
-/*
- * Emulate load & store multiple instructions
- * On 64-bit machines, these instructions only affect/use the
- * bottom 4 bytes of each register, and the loads clear the
- * top 4 bytes of the affected register.
- */
-#ifdef __BIG_ENDIAN__
-#ifdef CONFIG_PPC64
-#define REG_BYTE(rp, i) *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4)
-#else
-#define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
-#endif
-#else
-#define REG_BYTE(rp, i) (*(((u8 *)((rp) + ((i)>>2)) + ((i)&3))))
-#endif
-
-#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
-
-static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
- unsigned int reg, unsigned int nb,
- unsigned int flags, unsigned int instr,
- unsigned long swiz)
-{
- unsigned long *rptr;
- unsigned int nb0, i, bswiz;
- unsigned long p;
-
- /*
- * We do not try to emulate 8 bytes multiple as they aren't really
- * available in our operating environments and we don't try to
- * emulate multiples operations in kernel land as they should never
- * be used/generated there at least not on unaligned boundaries
- */
- if (unlikely((nb > 4) || !user_mode(regs)))
- return 0;
-
- /* lmw, stmw, lswi/x, stswi/x */
- nb0 = 0;
- if (flags & HARD) {
- if (flags & SX) {
- nb = regs->xer & 127;
- if (nb == 0)
- return 1;
- } else {
- unsigned long pc = regs->nip ^ (swiz & 4);
-
- if (__get_user_inatomic(instr,
- (unsigned int __user *)pc))
- return -EFAULT;
- if (swiz == 0 && (flags & SW))
- instr = cpu_to_le32(instr);
- nb = (instr >> 11) & 0x1f;
- if (nb == 0)
- nb = 32;
- }
- if (nb + reg * 4 > 128) {
- nb0 = nb + reg * 4 - 128;
- nb = 128 - reg * 4;
- }
-#ifdef __LITTLE_ENDIAN__
- /*
- * String instructions are endian neutral but the code
- * below is not. Force byte swapping on so that the
- * effects of swizzling are undone in the load/store
- * loops below.
- */
- flags ^= SW;
-#endif
- } else {
- /* lwm, stmw */
- nb = (32 - reg) * 4;
- }
-
- if (!access_ok((flags & ST ? VERIFY_WRITE: VERIFY_READ), addr, nb+nb0))
- return -EFAULT; /* bad address */
-
- rptr = &regs->gpr[reg];
- p = (unsigned long) addr;
- bswiz = (flags & SW)? 3: 0;
-
- if (!(flags & ST)) {
- /*
- * This zeroes the top 4 bytes of the affected registers
- * in 64-bit mode, and also zeroes out any remaining
- * bytes of the last register for lsw*.
- */
- memset(rptr, 0, ((nb + 3) / 4) * sizeof(unsigned long));
- if (nb0 > 0)
- memset(&regs->gpr[0], 0,
- ((nb0 + 3) / 4) * sizeof(unsigned long));
-
- for (i = 0; i < nb; ++i, ++p)
- if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
- SWIZ_PTR(p)))
- return -EFAULT;
- if (nb0 > 0) {
- rptr = &regs->gpr[0];
- addr += nb;
- for (i = 0; i < nb0; ++i, ++p)
- if (__get_user_inatomic(REG_BYTE(rptr,
- i ^ bswiz),
- SWIZ_PTR(p)))
- return -EFAULT;
- }
-
- } else {
- for (i = 0; i < nb; ++i, ++p)
- if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
- SWIZ_PTR(p)))
- return -EFAULT;
- if (nb0 > 0) {
- rptr = &regs->gpr[0];
- addr += nb;
- for (i = 0; i < nb0; ++i, ++p)
- if (__put_user_inatomic(REG_BYTE(rptr,
- i ^ bswiz),
- SWIZ_PTR(p)))
- return -EFAULT;
- }
- }
- return 1;
-}
-
-/*
- * Emulate floating-point pair loads and stores.
- * Only POWER6 has these instructions, and it does true little-endian,
- * so we don't need the address swizzling.
- */
-static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
- unsigned int flags)
-{
- char *ptr0 = (char *) &current->thread.TS_FPR(reg);
- char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
- int i, ret, sw = 0;
-
- if (reg & 1)
- return 0; /* invalid form: FRS/FRT must be even */
- if (flags & SW)
- sw = 7;
- ret = 0;
- for (i = 0; i < 8; ++i) {
- if (!(flags & ST)) {
- ret |= __get_user(ptr0[i^sw], addr + i);
- ret |= __get_user(ptr1[i^sw], addr + i + 8);
- } else {
- ret |= __put_user(ptr0[i^sw], addr + i);
- ret |= __put_user(ptr1[i^sw], addr + i + 8);
- }
- }
- if (ret)
- return -EFAULT;
- return 1; /* exception handled and fixed up */
-}
-
-#ifdef CONFIG_PPC64
-static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr,
- unsigned int reg, unsigned int flags)
-{
- char *ptr0 = (char *)&regs->gpr[reg];
- char *ptr1 = (char *)&regs->gpr[reg+1];
- int i, ret, sw = 0;
-
- if (reg & 1)
- return 0; /* invalid form: GPR must be even */
- if (flags & SW)
- sw = 7;
- ret = 0;
- for (i = 0; i < 8; ++i) {
- if (!(flags & ST)) {
- ret |= __get_user(ptr0[i^sw], addr + i);
- ret |= __get_user(ptr1[i^sw], addr + i + 8);
- } else {
- ret |= __put_user(ptr0[i^sw], addr + i);
- ret |= __put_user(ptr1[i^sw], addr + i + 8);
- }
- }
- if (ret)
- return -EFAULT;
- return 1; /* exception handled and fixed up */
-}
-#endif /* CONFIG_PPC64 */
#ifdef CONFIG_SPE
@@ -636,133 +282,21 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
}
#endif /* CONFIG_SPE */
-#ifdef CONFIG_VSX
-/*
- * Emulate VSX instructions...
- */
-static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
- unsigned int areg, struct pt_regs *regs,
- unsigned int flags, unsigned int length,
- unsigned int elsize)
-{
- char *ptr;
- unsigned long *lptr;
- int ret = 0;
- int sw = 0;
- int i, j;
-
- /* userland only */
- if (unlikely(!user_mode(regs)))
- return 0;
-
- flush_vsx_to_thread(current);
-
- if (reg < 32)
- ptr = (char *) &current->thread.fp_state.fpr[reg][0];
- else
- ptr = (char *) &current->thread.vr_state.vr[reg - 32];
-
- lptr = (unsigned long *) ptr;
-
-#ifdef __LITTLE_ENDIAN__
- if (flags & SW) {
- elsize = length;
- sw = length-1;
- } else {
- /*
- * The elements are BE ordered, even in LE mode, so process
- * them in reverse order.
- */
- addr += length - elsize;
-
- /* 8 byte memory accesses go in the top 8 bytes of the VR */
- if (length == 8)
- ptr += 8;
- }
-#else
- if (flags & SW)
- sw = elsize-1;
-#endif
-
- for (j = 0; j < length; j += elsize) {
- for (i = 0; i < elsize; ++i) {
- if (flags & ST)
- ret |= __put_user(ptr[i^sw], addr + i);
- else
- ret |= __get_user(ptr[i^sw], addr + i);
- }
- ptr += elsize;
-#ifdef __LITTLE_ENDIAN__
- addr -= elsize;
-#else
- addr += elsize;
-#endif
- }
-
-#ifdef __BIG_ENDIAN__
-#define VSX_HI 0
-#define VSX_LO 1
-#else
-#define VSX_HI 1
-#define VSX_LO 0
-#endif
-
- if (!ret) {
- if (flags & U)
- regs->gpr[areg] = regs->dar;
-
- /* Splat load copies the same data to top and bottom 8 bytes */
- if (flags & SPLT)
- lptr[VSX_LO] = lptr[VSX_HI];
- /* For 8 byte loads, zero the low 8 bytes */
- else if (!(flags & ST) && (8 == length))
- lptr[VSX_LO] = 0;
- } else
- return -EFAULT;
-
- return 1;
-}
-#endif
-
/*
* Called on alignment exception. Attempts to fixup
*
* Return 1 on success
* Return 0 if unable to handle the interrupt
* Return -EFAULT if data address is bad
+ * Other negative return values indicate that the instruction can't
+ * be emulated, and the process should be given a SIGBUS.
*/
int fix_alignment(struct pt_regs *regs)
{
- unsigned int instr, nb, flags, instruction = 0;
- unsigned int reg, areg;
- unsigned int dsisr;
- unsigned char __user *addr;
- unsigned long p, swiz;
- int ret, i;
- union data {
- u64 ll;
- double dd;
- unsigned char v[8];
- struct {
-#ifdef __LITTLE_ENDIAN__
- int low32;
- unsigned hi32;
-#else
- unsigned hi32;
- int low32;
-#endif
- } x32;
- struct {
-#ifdef __LITTLE_ENDIAN__
- short low16;
- unsigned char hi48[6];
-#else
- unsigned char hi48[6];
- short low16;
-#endif
- } x16;
- } data;
+ unsigned int instr;
+ struct instruction_op op;
+ int r, type;
/*
* We require a complete register set, if not, then our assembly
@@ -770,121 +304,23 @@ int fix_alignment(struct pt_regs *regs)
*/
CHECK_FULL_REGS(regs);
- dsisr = regs->dsisr;
-
- /* Some processors don't provide us with a DSISR we can use here,
- * let's make one up from the instruction
- */
- if (cpu_has_feature(CPU_FTR_NODSISRALIGN)) {
- unsigned long pc = regs->nip;
-
- if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE))
- pc ^= 4;
- if (unlikely(__get_user_inatomic(instr,
- (unsigned int __user *)pc)))
- return -EFAULT;
- if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE))
- instr = cpu_to_le32(instr);
- dsisr = make_dsisr(instr);
- instruction = instr;
+ if (unlikely(__get_user(instr, (unsigned int __user *)regs->nip)))
+ return -EFAULT;
+ if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
+ /* We don't handle PPC little-endian any more... */
+ if (cpu_has_feature(CPU_FTR_PPC_LE))
+ return -EIO;
+ instr = swab32(instr);
}
- /* extract the operation and registers from the dsisr */
- reg = (dsisr >> 5) & 0x1f; /* source/dest register */
- areg = dsisr & 0x1f; /* register to update */
-
#ifdef CONFIG_SPE
if ((instr >> 26) == 0x4) {
+ int reg = (instr >> 21) & 0x1f;
PPC_WARN_ALIGNMENT(spe, regs);
return emulate_spe(regs, reg, instr);
}
#endif
- instr = (dsisr >> 10) & 0x7f;
- instr |= (dsisr >> 13) & 0x60;
-
- /* Lookup the operation in our table */
- nb = aligninfo[instr].len;
- flags = aligninfo[instr].flags;
-
- /*
- * Handle some cases which give overlaps in the DSISR values.
- */
- if (IS_XFORM(instruction)) {
- switch (get_xop(instruction)) {
- case 532: /* ldbrx */
- nb = 8;
- flags = LD+SW;
- break;
- case 660: /* stdbrx */
- nb = 8;
- flags = ST+SW;
- break;
- case 20: /* lwarx */
- case 84: /* ldarx */
- case 116: /* lharx */
- case 276: /* lqarx */
- return 0; /* not emulated ever */
- }
- }
-
- /* Byteswap little endian loads and stores */
- swiz = 0;
- if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
- flags ^= SW;
-#ifdef __BIG_ENDIAN__
- /*
- * So-called "PowerPC little endian" mode works by
- * swizzling addresses rather than by actually doing
- * any byte-swapping. To emulate this, we XOR each
- * byte address with 7. We also byte-swap, because
- * the processor's address swizzling depends on the
- * operand size (it xors the address with 7 for bytes,
- * 6 for halfwords, 4 for words, 0 for doublewords) but
- * we will xor with 7 and load/store each byte separately.
- */
- if (cpu_has_feature(CPU_FTR_PPC_LE))
- swiz = 7;
-#endif
- }
-
- /* DAR has the operand effective address */
- addr = (unsigned char __user *)regs->dar;
-
-#ifdef CONFIG_VSX
- if ((instruction & 0xfc00003e) == 0x7c000018) {
- unsigned int elsize;
-
- /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */
- reg |= (instruction & 0x1) << 5;
- /* Simple inline decoder instead of a table */
- /* VSX has only 8 and 16 byte memory accesses */
- nb = 8;
- if (instruction & 0x200)
- nb = 16;
-
- /* Vector stores in little-endian mode swap individual
- elements, so process them separately */
- elsize = 4;
- if (instruction & 0x80)
- elsize = 8;
-
- flags = 0;
- if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE))
- flags |= SW;
- if (instruction & 0x100)
- flags |= ST;
- if (instruction & 0x040)
- flags |= U;
- /* splat load needs a special decoder */
- if ((instruction & 0x400) == 0){
- flags |= SPLT;
- nb = 8;
- }
- PPC_WARN_ALIGNMENT(vsx, regs);
- return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize);
- }
-#endif
/*
* ISA 3.0 (such as P9) copy, copy_first, paste and paste_last alignment
@@ -896,173 +332,27 @@ int fix_alignment(struct pt_regs *regs)
* when pasting to a co-processor. Furthermore, paste_last is the
* synchronisation point for preceding copy/paste sequences.
*/
- if ((instruction & 0xfc0006fe) == PPC_INST_COPY)
+ if ((instr & 0xfc0006fe) == PPC_INST_COPY)
return -EIO;
- /* A size of 0 indicates an instruction we don't support, with
- * the exception of DCBZ which is handled as a special case here
- */
- if (instr == DCBZ) {
- PPC_WARN_ALIGNMENT(dcbz, regs);
- return emulate_dcbz(regs, addr);
- }
- if (unlikely(nb == 0))
- return 0;
-
- /* Load/Store Multiple instructions are handled in their own
- * function
- */
- if (flags & M) {
- PPC_WARN_ALIGNMENT(multiple, regs);
- return emulate_multiple(regs, addr, reg, nb,
- flags, instr, swiz);
- }
-
- /* Verify the address of the operand */
- if (unlikely(user_mode(regs) &&
- !access_ok((flags & ST ? VERIFY_WRITE : VERIFY_READ),
- addr, nb)))
- return -EFAULT;
-
- /* Force the fprs into the save area so we can reference them */
- if (flags & F) {
- /* userland only */
- if (unlikely(!user_mode(regs)))
- return 0;
- flush_fp_to_thread(current);
- }
+ r = analyse_instr(&op, regs, instr);
+ if (r < 0)
+ return -EINVAL;
- if (nb == 16) {
- if (flags & F) {
- /* Special case for 16-byte FP loads and stores */
- PPC_WARN_ALIGNMENT(fp_pair, regs);
- return emulate_fp_pair(addr, reg, flags);
- } else {
-#ifdef CONFIG_PPC64
- /* Special case for 16-byte loads and stores */
- PPC_WARN_ALIGNMENT(lq_stq, regs);
- return emulate_lq_stq(regs, addr, reg, flags);
-#else
- return 0;
-#endif
- }
- }
-
- PPC_WARN_ALIGNMENT(unaligned, regs);
-
- /* If we are loading, get the data from user space, else
- * get it from register values
- */
- if (!(flags & ST)) {
- unsigned int start = 0;
-
- switch (nb) {
- case 4:
- start = offsetof(union data, x32.low32);
- break;
- case 2:
- start = offsetof(union data, x16.low16);
- break;
- }
-
- data.ll = 0;
- ret = 0;
- p = (unsigned long)addr;
-
- for (i = 0; i < nb; i++)
- ret |= __get_user_inatomic(data.v[start + i],
- SWIZ_PTR(p++));
-
- if (unlikely(ret))
- return -EFAULT;
-
- } else if (flags & F) {
- data.ll = current->thread.TS_FPR(reg);
- if (flags & S) {
- /* Single-precision FP store requires conversion... */
-#ifdef CONFIG_PPC_FPU
- preempt_disable();
- enable_kernel_fp();
- cvt_df(&data.dd, (float *)&data.x32.low32);
- disable_kernel_fp();
- preempt_enable();
-#else
- return 0;
-#endif
- }
- } else
- data.ll = regs->gpr[reg];
-
- if (flags & SW) {
- switch (nb) {
- case 8:
- data.ll = swab64(data.ll);
- break;
- case 4:
- data.x32.low32 = swab32(data.x32.low32);
- break;
- case 2:
- data.x16.low16 = swab16(data.x16.low16);
- break;
- }
- }
-
- /* Perform other misc operations like sign extension
- * or floating point single precision conversion
- */
- switch (flags & ~(U|SW)) {
- case LD+SE: /* sign extending integer loads */
- case LD+F+SE: /* sign extend for lfiwax */
- if ( nb == 2 )
- data.ll = data.x16.low16;
- else /* nb must be 4 */
- data.ll = data.x32.low32;
- break;
-
- /* Single-precision FP load requires conversion... */
- case LD+F+S:
-#ifdef CONFIG_PPC_FPU
- preempt_disable();
- enable_kernel_fp();
- cvt_fd((float *)&data.x32.low32, &data.dd);
- disable_kernel_fp();
- preempt_enable();
-#else
- return 0;
-#endif
- break;
+ type = op.type & INSTR_TYPE_MASK;
+ if (!OP_IS_LOAD_STORE(type)) {
+ if (type != CACHEOP + DCBZ)
+ return -EINVAL;
+ PPC_WARN_ALIGNMENT(dcbz, regs);
+ r = emulate_dcbz(op.ea, regs);
+ } else {
+ if (type == LARX || type == STCX)
+ return -EIO;
+ PPC_WARN_ALIGNMENT(unaligned, regs);
+ r = emulate_loadstore(regs, &op);
}
- /* Store result to memory or update registers */
- if (flags & ST) {
- unsigned int start = 0;
-
- switch (nb) {
- case 4:
- start = offsetof(union data, x32.low32);
- break;
- case 2:
- start = offsetof(union data, x16.low16);
- break;
- }
-
- ret = 0;
- p = (unsigned long)addr;
-
- for (i = 0; i < nb; i++)
- ret |= __put_user_inatomic(data.v[start + i],
- SWIZ_PTR(p++));
-
- if (unlikely(ret))
- return -EFAULT;
- } else if (flags & F)
- current->thread.TS_FPR(reg) = data.ll;
- else
- regs->gpr[reg] = data.ll;
-
- /* Update RA as needed */
- if (flags & U)
- regs->gpr[areg] = regs->dar;
-
- return 1;
+ if (!r)
+ return 1;
+ return r;
}
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6e95c2c19a7e..8cfb20e38cfe 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -746,6 +746,14 @@ int main(void)
OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask);
OFFSET(PACA_SIBLING_PACA_PTRS, paca_struct, thread_sibling_pacas);
OFFSET(PACA_REQ_PSSCR, paca_struct, requested_psscr);
+#define STOP_SPR(x, f) OFFSET(x, paca_struct, stop_sprs.f)
+ STOP_SPR(STOP_PID, pid);
+ STOP_SPR(STOP_LDBAR, ldbar);
+ STOP_SPR(STOP_FSCR, fscr);
+ STOP_SPR(STOP_HFSCR, hfscr);
+ STOP_SPR(STOP_MMCR1, mmcr1);
+ STOP_SPR(STOP_MMCR2, mmcr2);
+ STOP_SPR(STOP_MMCRA, mmcra);
#endif
DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 8275858a434d..3f46ca1c59f9 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -253,7 +253,7 @@ int __init btext_find_display(int allow_nonstdout)
for_each_node_by_type(np, "display") {
if (of_get_property(np, "linux,opened", NULL)) {
- printk("trying %s ...\n", np->full_name);
+ printk("trying %pOF ...\n", np);
rc = btext_initialize(np);
printk("result: %d\n", rc);
}
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index c641983bbdd6..a8f20e5928e1 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -167,10 +167,10 @@ static void release_cache_debugcheck(struct cache *cache)
list_for_each_entry(iter, &cache_list, list)
WARN_ONCE(iter->next_local == cache,
- "cache for %s(%s) refers to cache for %s(%s)\n",
- iter->ofnode->full_name,
+ "cache for %pOF(%s) refers to cache for %pOF(%s)\n",
+ iter->ofnode,
cache_type_string(iter),
- cache->ofnode->full_name,
+ cache->ofnode,
cache_type_string(cache));
}
@@ -179,8 +179,8 @@ static void release_cache(struct cache *cache)
if (!cache)
return;
- pr_debug("freeing L%d %s cache for %s\n", cache->level,
- cache_type_string(cache), cache->ofnode->full_name);
+ pr_debug("freeing L%d %s cache for %pOF\n", cache->level,
+ cache_type_string(cache), cache->ofnode);
release_cache_debugcheck(cache);
list_del(&cache->list);
@@ -194,8 +194,8 @@ static void cache_cpu_set(struct cache *cache, int cpu)
while (next) {
WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
- "CPU %i already accounted in %s(%s)\n",
- cpu, next->ofnode->full_name,
+ "CPU %i already accounted in %pOF(%s)\n",
+ cpu, next->ofnode,
cache_type_string(next));
cpumask_set_cpu(cpu, &next->shared_cpu_map);
next = next->next_local;
@@ -355,7 +355,7 @@ static int cache_is_unified_d(const struct device_node *np)
*/
static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
{
- pr_debug("creating L%d ucache for %s\n", level, node->full_name);
+ pr_debug("creating L%d ucache for %pOF\n", level, node);
return new_cache(cache_is_unified_d(node), level, node);
}
@@ -365,8 +365,8 @@ static struct cache *cache_do_one_devnode_split(struct device_node *node,
{
struct cache *dcache, *icache;
- pr_debug("creating L%d dcache and icache for %s\n", level,
- node->full_name);
+ pr_debug("creating L%d dcache and icache for %pOF\n", level,
+ node);
dcache = new_cache(CACHE_TYPE_DATA, level, node);
icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
@@ -679,7 +679,6 @@ static struct kobj_type cache_index_type = {
static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
{
- const char *cache_name;
const char *cache_type;
struct cache *cache;
char *buf;
@@ -690,7 +689,6 @@ static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
return;
cache = dir->cache;
- cache_name = cache->ofnode->full_name;
cache_type = cache_type_string(cache);
/* We don't want to create an attribute that can't provide a
@@ -707,14 +705,14 @@ static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
rc = attr->show(&dir->kobj, attr, buf);
if (rc <= 0) {
pr_debug("not creating %s attribute for "
- "%s(%s) (rc = %zd)\n",
- attr->attr.name, cache_name,
+ "%pOF(%s) (rc = %zd)\n",
+ attr->attr.name, cache->ofnode,
cache_type, rc);
continue;
}
if (sysfs_create_file(&dir->kobj, &attr->attr))
- pr_debug("could not create %s attribute for %s(%s)\n",
- attr->attr.name, cache_name, cache_type);
+ pr_debug("could not create %s attribute for %pOF(%s)\n",
+ attr->attr.name, cache->ofnode, cache_type);
}
kfree(buf);
@@ -831,8 +829,8 @@ static void cache_cpu_clear(struct cache *cache, int cpu)
struct cache *next = cache->next_local;
WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
- "CPU %i not accounted in %s(%s)\n",
- cpu, cache->ofnode->full_name,
+ "CPU %i not accounted in %pOF(%s)\n",
+ cpu, cache->ofnode,
cache_type_string(cache));
cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 6f849832a669..760872916013 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1259,10 +1259,10 @@ static struct cpu_spec __initdata cpu_specs[] = {
.platform = "ppc603",
},
#endif /* CONFIG_PPC_BOOK3S_32 */
-#ifdef CONFIG_8xx
+#ifdef CONFIG_PPC_8xx
{ /* 8xx */
.pvr_mask = 0xffff0000,
- .pvr_value = 0x00500000,
+ .pvr_value = PVR_8xx,
.cpu_name = "8xx",
/* CPU_FTR_MAYBE_CAN_DOZE is possible,
* if the 8xx code is there.... */
@@ -1274,7 +1274,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_8xx,
.platform = "ppc823",
},
-#endif /* CONFIG_8xx */
+#endif /* CONFIG_PPC_8xx */
#ifdef CONFIG_40x
{ /* 403GC */
.pvr_mask = 0xffffff00,
@@ -1936,6 +1936,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_440A,
.platform = "ppc440",
},
+#ifdef CONFIG_PPC_47x
{ /* 476 DD2 core */
.pvr_mask = 0xffffffff,
.pvr_value = 0x11a52080,
@@ -1992,6 +1993,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_47x,
.platform = "ppc470",
},
+#endif /* CONFIG_PPC_47x */
{ /* default match */
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 63992b2d8e15..9e816787c0d4 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -44,6 +44,7 @@
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/rtas.h>
+#include <asm/pte-walk.h>
/** Overview:
@@ -169,10 +170,10 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
char buffer[128];
n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
- edev->phb->global_number, pdn->busno,
+ pdn->phb->global_number, pdn->busno,
PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n",
- edev->phb->global_number, pdn->busno,
+ pdn->phb->global_number, pdn->busno,
PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
eeh_ops->read_config(pdn, PCI_VENDOR_ID, 4, &cfg);
@@ -352,8 +353,7 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
* worried about _PAGE_SPLITTING/collapse. Also we will not hit
* page table free, because of init_mm.
*/
- ptep = __find_linux_pte_or_hugepte(init_mm.pgd, token,
- NULL, &hugepage_shift);
+ ptep = find_init_mm_pte(token, &hugepage_shift);
if (!ptep)
return token;
WARN_ON(hugepage_shift);
@@ -435,7 +435,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
int ret;
int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
unsigned long flags;
- struct pci_dn *pdn;
+ struct device_node *dn;
struct pci_dev *dev;
struct eeh_pe *pe, *parent_pe, *phb_pe;
int rc = 0;
@@ -493,9 +493,10 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
if (pe->state & EEH_PE_ISOLATED) {
pe->check_count++;
if (pe->check_count % EEH_MAX_FAILS == 0) {
- pdn = eeh_dev_to_pdn(edev);
- if (pdn->node)
- location = of_get_property(pdn->node, "ibm,loc-code", NULL);
+ dn = pci_device_to_OF_node(dev);
+ if (dn)
+ location = of_get_property(dn, "ibm,loc-code",
+ NULL);
printk(KERN_ERR "EEH: %d reads ignored for recovering device at "
"location=%s driver=%s pci addr=%s\n",
pe->check_count,
@@ -1064,7 +1065,7 @@ core_initcall_sync(eeh_init);
*/
void eeh_add_device_early(struct pci_dn *pdn)
{
- struct pci_controller *phb;
+ struct pci_controller *phb = pdn ? pdn->phb : NULL;
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
if (!edev)
@@ -1074,7 +1075,6 @@ void eeh_add_device_early(struct pci_dn *pdn)
return;
/* USB Bus children of PCI devices will not have BUID's */
- phb = edev->phb;
if (NULL == phb ||
(eeh_has_flag(EEH_PROBE_MODE_DEVTREE) && 0 == phb->buid))
return;
diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c
index d6b2ca70d14d..ad04ecd63c20 100644
--- a/arch/powerpc/kernel/eeh_dev.c
+++ b/arch/powerpc/kernel/eeh_dev.c
@@ -50,21 +50,16 @@
*/
struct eeh_dev *eeh_dev_init(struct pci_dn *pdn)
{
- struct pci_controller *phb = pdn->phb;
struct eeh_dev *edev;
/* Allocate EEH device */
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
- if (!edev) {
- pr_warn("%s: out of memory\n",
- __func__);
+ if (!edev)
return NULL;
- }
/* Associate EEH device with OF node */
pdn->edev = edev;
edev->pdn = pdn;
- edev->phb = phb;
INIT_LIST_HEAD(&edev->list);
INIT_LIST_HEAD(&edev->rmv_list);
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index c405c79e50cd..8b840191df59 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -428,7 +428,7 @@ static void *eeh_add_virt_device(void *data, void *userdata)
if (!(edev->physfn)) {
pr_warn("%s: EEH dev %04x:%02x:%02x.%01x not for VF\n",
- __func__, edev->phb->global_number, pdn->busno,
+ __func__, pdn->phb->global_number, pdn->busno,
PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
return NULL;
}
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index cc4b206f77e4..2e8d1b2b5af4 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -230,10 +230,15 @@ void *eeh_pe_dev_traverse(struct eeh_pe *root,
* Bus/Device/Function number. The extra data referred by flag
* indicates which type of address should be used.
*/
+struct eeh_pe_get_flag {
+ int pe_no;
+ int config_addr;
+};
+
static void *__eeh_pe_get(void *data, void *flag)
{
struct eeh_pe *pe = (struct eeh_pe *)data;
- struct eeh_dev *edev = (struct eeh_dev *)flag;
+ struct eeh_pe_get_flag *tmp = (struct eeh_pe_get_flag *) flag;
/* Unexpected PHB PE */
if (pe->type & EEH_PE_PHB)
@@ -244,17 +249,17 @@ static void *__eeh_pe_get(void *data, void *flag)
* have non-zero PE address
*/
if (eeh_has_flag(EEH_VALID_PE_ZERO)) {
- if (edev->pe_config_addr == pe->addr)
+ if (tmp->pe_no == pe->addr)
return pe;
} else {
- if (edev->pe_config_addr &&
- (edev->pe_config_addr == pe->addr))
+ if (tmp->pe_no &&
+ (tmp->pe_no == pe->addr))
return pe;
}
/* Try BDF address */
- if (edev->config_addr &&
- (edev->config_addr == pe->config_addr))
+ if (tmp->config_addr &&
+ (tmp->config_addr == pe->config_addr))
return pe;
return NULL;
@@ -262,7 +267,9 @@ static void *__eeh_pe_get(void *data, void *flag)
/**
* eeh_pe_get - Search PE based on the given address
- * @edev: EEH device
+ * @phb: PCI controller
+ * @pe_no: PE number
+ * @config_addr: Config address
*
* Search the corresponding PE based on the specified address which
* is included in the eeh device. The function is used to check if
@@ -271,12 +278,14 @@ static void *__eeh_pe_get(void *data, void *flag)
* which is composed of PCI bus/device/function number, or unified
* PE address.
*/
-struct eeh_pe *eeh_pe_get(struct eeh_dev *edev)
+struct eeh_pe *eeh_pe_get(struct pci_controller *phb,
+ int pe_no, int config_addr)
{
- struct eeh_pe *root = eeh_phb_pe_get(edev->phb);
+ struct eeh_pe *root = eeh_phb_pe_get(phb);
+ struct eeh_pe_get_flag tmp = { pe_no, config_addr };
struct eeh_pe *pe;
- pe = eeh_pe_traverse(root, __eeh_pe_get, edev);
+ pe = eeh_pe_traverse(root, __eeh_pe_get, &tmp);
return pe;
}
@@ -330,11 +339,13 @@ static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev)
int eeh_add_to_parent_pe(struct eeh_dev *edev)
{
struct eeh_pe *pe, *parent;
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
+ int config_addr = (pdn->busno << 8) | (pdn->devfn);
/* Check if the PE number is valid */
if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) {
pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%x\n",
- __func__, edev->config_addr, edev->phb->global_number);
+ __func__, config_addr, pdn->phb->global_number);
return -EINVAL;
}
@@ -344,7 +355,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
* PE should be composed of PCI bus and its subordinate
* components.
*/
- pe = eeh_pe_get(edev);
+ pe = eeh_pe_get(pdn->phb, edev->pe_config_addr, config_addr);
if (pe && !(pe->type & EEH_PE_INVALID)) {
/* Mark the PE as type of PCI bus */
pe->type = EEH_PE_BUS;
@@ -353,11 +364,11 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
/* Put the edev to PE */
list_add_tail(&edev->list, &pe->edevs);
pr_debug("EEH: Add %04x:%02x:%02x.%01x to Bus PE#%x\n",
- edev->phb->global_number,
- edev->config_addr >> 8,
- PCI_SLOT(edev->config_addr & 0xFF),
- PCI_FUNC(edev->config_addr & 0xFF),
- pe->addr);
+ pdn->phb->global_number,
+ pdn->busno,
+ PCI_SLOT(pdn->devfn),
+ PCI_FUNC(pdn->devfn),
+ pe->addr);
return 0;
} else if (pe && (pe->type & EEH_PE_INVALID)) {
list_add_tail(&edev->list, &pe->edevs);
@@ -376,25 +387,25 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
pr_debug("EEH: Add %04x:%02x:%02x.%01x to Device "
"PE#%x, Parent PE#%x\n",
- edev->phb->global_number,
- edev->config_addr >> 8,
- PCI_SLOT(edev->config_addr & 0xFF),
- PCI_FUNC(edev->config_addr & 0xFF),
- pe->addr, pe->parent->addr);
+ pdn->phb->global_number,
+ pdn->busno,
+ PCI_SLOT(pdn->devfn),
+ PCI_FUNC(pdn->devfn),
+ pe->addr, pe->parent->addr);
return 0;
}
/* Create a new EEH PE */
if (edev->physfn)
- pe = eeh_pe_alloc(edev->phb, EEH_PE_VF);
+ pe = eeh_pe_alloc(pdn->phb, EEH_PE_VF);
else
- pe = eeh_pe_alloc(edev->phb, EEH_PE_DEVICE);
+ pe = eeh_pe_alloc(pdn->phb, EEH_PE_DEVICE);
if (!pe) {
pr_err("%s: out of memory!\n", __func__);
return -ENOMEM;
}
pe->addr = edev->pe_config_addr;
- pe->config_addr = edev->config_addr;
+ pe->config_addr = config_addr;
/*
* Put the new EEH PE into hierarchy tree. If the parent
@@ -404,10 +415,10 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
*/
parent = eeh_pe_get_parent(edev);
if (!parent) {
- parent = eeh_phb_pe_get(edev->phb);
+ parent = eeh_phb_pe_get(pdn->phb);
if (!parent) {
pr_err("%s: No PHB PE is found (PHB Domain=%d)\n",
- __func__, edev->phb->global_number);
+ __func__, pdn->phb->global_number);
edev->pe = NULL;
kfree(pe);
return -EEXIST;
@@ -424,10 +435,10 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
edev->pe = pe;
pr_debug("EEH: Add %04x:%02x:%02x.%01x to "
"Device PE#%x, Parent PE#%x\n",
- edev->phb->global_number,
- edev->config_addr >> 8,
- PCI_SLOT(edev->config_addr & 0xFF),
- PCI_FUNC(edev->config_addr & 0xFF),
+ pdn->phb->global_number,
+ pdn->busno,
+ PCI_SLOT(pdn->devfn),
+ PCI_FUNC(pdn->devfn),
pe->addr, pe->parent->addr);
return 0;
@@ -446,13 +457,14 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
{
struct eeh_pe *pe, *parent, *child;
int cnt;
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
if (!edev->pe) {
pr_debug("%s: No PE found for device %04x:%02x:%02x.%01x\n",
- __func__, edev->phb->global_number,
- edev->config_addr >> 8,
- PCI_SLOT(edev->config_addr & 0xFF),
- PCI_FUNC(edev->config_addr & 0xFF));
+ __func__, pdn->phb->global_number,
+ pdn->busno,
+ PCI_SLOT(pdn->devfn),
+ PCI_FUNC(pdn->devfn));
return -EEXIST;
}
@@ -712,10 +724,10 @@ static void eeh_bridge_check_link(struct eeh_dev *edev)
return;
pr_debug("%s: Check PCIe link for %04x:%02x:%02x.%01x ...\n",
- __func__, edev->phb->global_number,
- edev->config_addr >> 8,
- PCI_SLOT(edev->config_addr & 0xFF),
- PCI_FUNC(edev->config_addr & 0xFF));
+ __func__, pdn->phb->global_number,
+ pdn->busno,
+ PCI_SLOT(pdn->devfn),
+ PCI_FUNC(pdn->devfn));
/* Check slot status */
cap = edev->pcie_cap;
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c
index 1ceecdda810b..797549289798 100644
--- a/arch/powerpc/kernel/eeh_sysfs.c
+++ b/arch/powerpc/kernel/eeh_sysfs.c
@@ -51,7 +51,6 @@ static ssize_t eeh_show_##_name(struct device *dev, \
static DEVICE_ATTR(_name, S_IRUGO, eeh_show_##_name, NULL);
EEH_SHOW_ATTR(eeh_mode, mode, "0x%x");
-EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x");
EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x");
static ssize_t eeh_pe_state_show(struct device *dev,
@@ -103,7 +102,6 @@ void eeh_sysfs_add_device(struct pci_dev *pdev)
return;
rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode);
- rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr);
rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_state);
@@ -128,7 +126,6 @@ void eeh_sysfs_remove_device(struct pci_dev *pdev)
}
device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
- device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr);
device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
device_remove_file(&pdev->dev, &dev_attr_eeh_pe_state);
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 8587059ad848..e780e1fbf6c2 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -43,6 +43,13 @@
#define LOAD_MSR_KERNEL(r, x) li r,(x)
#endif
+/*
+ * Align to 4k in order to ensure that all functions modyfing srr0/srr1
+ * fit into one page in order to not encounter a TLB miss between the
+ * modification of srr0/srr1 and the associated rfi.
+ */
+ .align 12
+
#ifdef CONFIG_BOOKE
.globl mcheck_transfer_to_handler
mcheck_transfer_to_handler:
@@ -586,6 +593,10 @@ ppc_swapcontext:
handle_page_fault:
stw r4,_DAR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_6xx
+ andis. r0,r5,DSISR_DABRMATCH@h
+ bne- handle_dabr_fault
+#endif
bl do_page_fault
cmpwi r3,0
beq+ ret_from_except
@@ -599,6 +610,17 @@ handle_page_fault:
bl bad_page_fault
b ret_from_except_full
+#ifdef CONFIG_6xx
+ /* We have a data breakpoint exception - handle it */
+handle_dabr_fault:
+ SAVE_NVGPRS(r1)
+ lwz r0,_TRAP(r1)
+ clrrwi r0,r0,1
+ stw r0,_TRAP(r1)
+ bl do_break
+ b ret_from_except_full
+#endif
+
/*
* This routine switches between two different tasks. The process
* state of one is saved on its kernel stack. Then the state
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 49d8422767b4..4a0fd4f40245 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -223,17 +223,27 @@ system_call_exit:
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
bne- .Lsyscall_exit_work
- /* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */
- li r7,MSR_FP
+ andi. r0,r8,MSR_FP
+ beq 2f
#ifdef CONFIG_ALTIVEC
- oris r7,r7,MSR_VEC@h
+ andis. r0,r8,MSR_VEC@h
+ bne 3f
#endif
- and r0,r8,r7
- cmpd r0,r7
- bne .Lsyscall_restore_math
-.Lsyscall_restore_math_cont:
+2: addi r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_PPC_BOOK3S
+ li r10,MSR_RI
+ mtmsrd r10,1 /* Restore RI */
+#endif
+ bl restore_math
+#ifdef CONFIG_PPC_BOOK3S
+ li r11,0
+ mtmsrd r11,1
+#endif
+ ld r8,_MSR(r1)
+ ld r3,RESULT(r1)
+ li r11,-MAX_ERRNO
- cmpld r3,r11
+3: cmpld r3,r11
ld r5,_CCR(r1)
bge- .Lsyscall_error
.Lsyscall_error_cont:
@@ -267,40 +277,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
std r5,_CCR(r1)
b .Lsyscall_error_cont
-.Lsyscall_restore_math:
- /*
- * Some initial tests from restore_math to avoid the heavyweight
- * C code entry and MSR manipulations.
- */
- LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK)
- and. r0,r0,r8
- bne 1f
-
- ld r7,PACACURRENT(r13)
- lbz r0,THREAD+THREAD_LOAD_FP(r7)
-#ifdef CONFIG_ALTIVEC
- lbz r6,THREAD+THREAD_LOAD_VEC(r7)
- add r0,r0,r6
-#endif
- cmpdi r0,0
- beq .Lsyscall_restore_math_cont
-
-1: addi r3,r1,STACK_FRAME_OVERHEAD
-#ifdef CONFIG_PPC_BOOK3S
- li r10,MSR_RI
- mtmsrd r10,1 /* Restore RI */
-#endif
- bl restore_math
-#ifdef CONFIG_PPC_BOOK3S
- li r11,0
- mtmsrd r11,1
-#endif
- /* Restore volatiles, reload MSR from updated one */
- ld r8,_MSR(r1)
- ld r3,RESULT(r1)
- li r11,-MAX_ERRNO
- b .Lsyscall_restore_math_cont
-
/* Traced system call support */
.Lsyscall_dotrace:
bl save_nvgprs
@@ -990,16 +966,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
#ifdef CONFIG_PPC_BOOK3E
cmpwi cr0,r3,0x280
#else
- BEGIN_FTR_SECTION
- cmpwi cr0,r3,0xe80
- FTR_SECTION_ELSE
- cmpwi cr0,r3,0xa00
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
+ cmpwi cr0,r3,0xa00
#endif /* CONFIG_PPC_BOOK3E */
bne 1f
addi r3,r1,STACK_FRAME_OVERHEAD;
bl doorbell_exception
- b ret_from_except
#endif /* CONFIG_PPC_DOORBELL */
1: b ret_from_except /* What else to do here ? */
@@ -1133,7 +1104,7 @@ _ASM_NOKPROBE_SYMBOL(__enter_rtas)
_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
.align 3
-1: .llong rtas_restore_regs
+1: .8byte rtas_restore_regs
rtas_restore_regs:
/* relocation is on at this point */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e6d8354d79ef..48da0f5d2f7f 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -541,7 +541,7 @@ EXC_COMMON_BEGIN(instruction_access_common)
RECONCILE_IRQ_STATE(r10, r11)
ld r12,_MSR(r1)
ld r3,_NIP(r1)
- andis. r4,r12,0x5820
+ andis. r4,r12,DSISR_BAD_FAULT_64S@h
li r5,0x400
std r3,_DAR(r1)
std r4,_DSISR(r1)
@@ -824,7 +824,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
* r3 volatile parameter and return value for status
* r4-r10 volatile input and output value
* r11 volatile hypercall number and output value
- * r12 volatile
+ * r12 volatile input and output value
* r13-r31 nonvolatile
* LR nonvolatile
* CTR volatile
@@ -834,25 +834,26 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
* Other registers nonvolatile
*
* The intersection of volatile registers that don't contain possible
- * inputs is: r12, cr0, xer, ctr. We may use these as scratch regs
- * upon entry without saving.
+ * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
+ * without saving, though xer is not a good idea to use, as hardware may
+ * interpret some bits so it may be costly to change them.
*/
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
/*
* There is a little bit of juggling to get syscall and hcall
- * working well. Save r10 in ctr to be restored in case it is a
- * hcall.
+ * working well. Save r13 in ctr to avoid using SPRG scratch
+ * register.
*
* Userspace syscalls have already saved the PPR, hcalls must save
* it before setting HMT_MEDIUM.
*/
#define SYSCALL_KVMTEST \
- mr r12,r13; \
+ mtctr r13; \
GET_PACA(r13); \
- mtctr r10; \
+ std r10,PACA_EXGEN+EX_R10(r13); \
KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
HMT_MEDIUM; \
- mr r9,r12; \
+ mfctr r9;
#else
#define SYSCALL_KVMTEST \
@@ -935,8 +936,8 @@ EXC_VIRT_END(system_call, 0x4c00, 0x100)
* This is a hcall, so register convention is as above, with these
* differences:
* r13 = PACA
- * r12 = orig r13
- * ctr = orig r10
+ * ctr = orig r13
+ * orig r10 saved in PACA
*/
TRAMP_KVM_BEGIN(do_kvm_0xc00)
/*
@@ -944,14 +945,13 @@ TRAMP_KVM_BEGIN(do_kvm_0xc00)
* HMT_MEDIUM. That allows the KVM code to save that value into the
* guest state (it is the guest's PPR value).
*/
- OPT_GET_SPR(r0, SPRN_PPR, CPU_FTR_HAS_PPR)
+ OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR)
HMT_MEDIUM
- OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r0, CPU_FTR_HAS_PPR)
+ OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR)
mfctr r10
- SET_SCRATCH0(r12)
+ SET_SCRATCH0(r10)
std r9,PACA_EXGEN+EX_R9(r13)
mfcr r9
- std r10,PACA_EXGEN+EX_R10(r13)
KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
#endif
@@ -1314,7 +1314,7 @@ EXC_REAL_NONE(0x1800, 0x100)
EXC_VIRT_NONE(0x5800, 0x100)
#endif
-#if defined(CONFIG_HARDLOCKUP_DETECTOR) && defined(CONFIG_HAVE_HARDLOCKUP_DETECTOR_ARCH)
+#ifdef CONFIG_PPC_WATCHDOG
#define MASKED_DEC_HANDLER_LABEL 3f
@@ -1325,20 +1325,28 @@ EXC_VIRT_NONE(0x5800, 0x100)
std r10,PACA_EXGEN+EX_R13(r13); \
EXCEPTION_PROLOG_PSERIES_1(soft_nmi_common, _H)
+/*
+ * Branch to soft_nmi_interrupt using the emergency stack. The emergency
+ * stack is one that is usable by maskable interrupts so long as MSR_EE
+ * remains off. It is used for recovery when something has corrupted the
+ * normal kernel stack, for example. The "soft NMI" must not use the process
+ * stack because we want irq disabled sections to avoid touching the stack
+ * at all (other than PMU interrupts), so use the emergency stack for this,
+ * and run it entirely with interrupts hard disabled.
+ */
EXC_COMMON_BEGIN(soft_nmi_common)
mr r10,r1
ld r1,PACAEMERGSP(r13)
- ld r1,PACA_NMI_EMERG_SP(r13)
subi r1,r1,INT_FRAME_SIZE
EXCEPTION_COMMON_NORET_STACK(PACA_EXGEN, 0x900,
system_reset, soft_nmi_interrupt,
ADD_NVGPRS;ADD_RECONCILE)
b ret_from_except
-#else
+#else /* CONFIG_PPC_WATCHDOG */
#define MASKED_DEC_HANDLER_LABEL 2f /* normal return */
#define MASKED_DEC_HANDLER(_H)
-#endif
+#endif /* CONFIG_PPC_WATCHDOG */
/*
* An interrupt came in while soft-disabled. We set paca->irq_happened, then:
@@ -1362,19 +1370,16 @@ masked_##_H##interrupt: \
ori r10,r10,0xffff; \
mtspr SPRN_DEC,r10; \
b MASKED_DEC_HANDLER_LABEL; \
-1: cmpwi r10,PACA_IRQ_DBELL; \
- beq 2f; \
- cmpwi r10,PACA_IRQ_HMI; \
- beq 2f; \
+1: andi. r10,r10,(PACA_IRQ_DBELL|PACA_IRQ_HMI); \
+ bne 2f; \
mfspr r10,SPRN_##_H##SRR1; \
- rldicl r10,r10,48,1; /* clear MSR_EE */ \
- rotldi r10,r10,16; \
+ xori r10,r10,MSR_EE; /* clear MSR_EE */ \
mtspr SPRN_##_H##SRR1,r10; \
2: mtcrf 0x80,r9; \
ld r9,PACA_EXGEN+EX_R9(r13); \
ld r10,PACA_EXGEN+EX_R10(r13); \
ld r11,PACA_EXGEN+EX_R11(r13); \
- GET_SCRATCH0(r13); \
+ /* returns to kernel where r13 must be set up, so don't restore it */ \
##_H##rfid; \
b .; \
MASKED_DEC_HANDLER(_H)
@@ -1477,8 +1482,10 @@ USE_TEXT_SECTION()
*/
.balign IFETCH_ALIGN_BYTES
do_hash_page:
-#ifdef CONFIG_PPC_STD_MMU_64
- andis. r0,r4,0xa450 /* weird error? */
+ #ifdef CONFIG_PPC_STD_MMU_64
+ lis r0,DSISR_BAD_FAULT_64S@h
+ ori r0,r0,DSISR_BAD_FAULT_64S@l
+ and. r0,r4,r0 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */
CURRENT_THREAD_INFO(r11, r1)
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
@@ -1661,25 +1668,27 @@ _GLOBAL(__replay_interrupt)
* we don't give a damn about, so we don't bother storing them.
*/
mfmsr r12
- LOAD_REG_ADDR(r11, 1f)
+ LOAD_REG_ADDR(r11, replay_interrupt_return)
mfcr r9
ori r12,r12,MSR_EE
cmpwi r3,0x900
beq decrementer_common
cmpwi r3,0x500
+BEGIN_FTR_SECTION
+ beq h_virt_irq_common
+FTR_SECTION_ELSE
beq hardware_interrupt_common
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION
- cmpwi r3,0xe80
+ cmpwi r3,0xa00
beq h_doorbell_common_msgclr
- cmpwi r3,0xea0
- beq h_virt_irq_common
cmpwi r3,0xe60
beq hmi_exception_common
FTR_SECTION_ELSE
cmpwi r3,0xa00
beq doorbell_super_common_msgclr
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
-1:
+replay_interrupt_return:
blr
_ASM_NOKPROBE_SYMBOL(__replay_interrupt)
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index dc0c49cfd90a..e1431800bfb9 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -125,6 +125,13 @@ int is_fadump_boot_memory_area(u64 addr, ulong size)
return (addr + size) > RMA_START && addr <= fw_dump.boot_memory_size;
}
+int should_fadump_crash(void)
+{
+ if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr)
+ return 0;
+ return 1;
+}
+
int is_fadump_active(void)
{
return fw_dump.dump_active;
@@ -518,7 +525,7 @@ void crash_fadump(struct pt_regs *regs, const char *str)
struct fadump_crash_info_header *fdh = NULL;
int old_cpu, this_cpu;
- if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr)
+ if (!should_fadump_crash())
return;
/*
@@ -1446,6 +1453,25 @@ static void fadump_init_files(void)
return;
}
+static int fadump_panic_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ /*
+ * If firmware-assisted dump has been registered then trigger
+ * firmware-assisted dump and let firmware handle everything
+ * else. If this returns, then fadump was not registered, so
+ * go through the rest of the panic path.
+ */
+ crash_fadump(NULL, ptr);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block fadump_panic_block = {
+ .notifier_call = fadump_panic_event,
+ .priority = INT_MIN /* may not return; must be done last */
+};
+
/*
* Prepare for firmware-assisted dump.
*/
@@ -1478,6 +1504,9 @@ int __init setup_fadump(void)
init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start);
fadump_init_files();
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &fadump_panic_block);
+
return 1;
}
subsys_initcall(setup_fadump);
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index e22734278458..8c54166491e7 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -388,7 +388,7 @@ DataAccess:
EXCEPTION_PROLOG
mfspr r10,SPRN_DSISR
stw r10,_DSISR(r11)
- andis. r0,r10,0xa470 /* weird error? */
+ andis. r0,r10,DSISR_BAD_FAULT_32S@h
bne 1f /* if not, try to put a PTE */
mfspr r4,SPRN_DAR /* into the hash table */
rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
@@ -403,13 +403,13 @@ DataAccess:
DO_KVM 0x400
InstructionAccess:
EXCEPTION_PROLOG
- andis. r0,r9,0x4000 /* no pte found? */
+ andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
beq 1f /* if so, try to put a PTE */
li r3,0 /* into the hash table */
mr r4,r12 /* SRR0 is fault address */
bl hash_page
1: mr r4,r12
- mr r5,r9
+ andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
EXC_XFER_LITE(0x400, handle_page_fault)
/* External interrupt */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 0ddc602b33a4..ff8511d6d8ea 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -92,13 +92,13 @@ END_FTR_SECTION(0, 1)
.balign 8
.globl __secondary_hold_spinloop
__secondary_hold_spinloop:
- .llong 0x0
+ .8byte 0x0
/* Secondary processors write this value with their cpu # */
/* after they enter the spin loop immediately below. */
.globl __secondary_hold_acknowledge
__secondary_hold_acknowledge:
- .llong 0x0
+ .8byte 0x0
#ifdef CONFIG_RELOCATABLE
/* This flag is set to 1 by a loader if the kernel should run
@@ -650,7 +650,7 @@ __after_prom_start:
bctr
.balign 8
-p_end: .llong _end - copy_to_here
+p_end: .8byte _end - copy_to_here
4:
/*
@@ -892,7 +892,7 @@ _GLOBAL(relative_toc)
blr
.balign 8
-p_toc: .llong __toc_start + 0x8000 - 0b
+p_toc: .8byte __toc_start + 0x8000 - 0b
/*
* This is where the main kernel code starts.
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index c032fe8c2d26..4fee00d414e8 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -50,18 +50,20 @@
mtspr spr, reg
#endif
-/* Macro to test if an address is a kernel address */
#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
-#define IS_KERNEL(tmp, addr) \
- andis. tmp, addr, 0x8000 /* Address >= 0x80000000 */
-#define BRANCH_UNLESS_KERNEL(label) beq label
-#else
-#define IS_KERNEL(tmp, addr) \
- rlwinm tmp, addr, 16, 16, 31; \
- cmpli cr0, tmp, PAGE_OFFSET >> 16
-#define BRANCH_UNLESS_KERNEL(label) blt label
+/* By simply checking Address >= 0x80000000, we know if its a kernel address */
+#define SIMPLE_KERNEL_ADDRESS 1
#endif
+/*
+ * We need an ITLB miss handler for kernel addresses if:
+ * - Either we have modules
+ * - Or we have not pinned the first 8M
+ */
+#if defined(CONFIG_MODULES) || !defined(CONFIG_PIN_TLB_TEXT) || \
+ defined(CONFIG_DEBUG_PAGEALLOC)
+#define ITLB_MISS_KERNEL 1
+#endif
/*
* Value for the bits that have fixed value in RPN entries.
@@ -123,7 +125,6 @@ turn_on_mmu:
lis r0,start_here@h
ori r0,r0,start_here@l
mtspr SPRN_SRR0,r0
- SYNC
rfi /* enables MMU */
/*
@@ -170,7 +171,7 @@ turn_on_mmu:
stw r1,0(r11); \
tovirt(r1,r11); /* set new kernel sp */ \
li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
- MTMSRD(r10); /* (except for mach check in rtas) */ \
+ mtmsr r10; \
stw r0,GPR0(r11); \
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
@@ -300,7 +301,7 @@ SystemCall:
/* On the MPC8xx, this is a software emulation interrupt. It occurs
* for all unimplemented and illegal instructions.
*/
- EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD)
+ EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD)
. = 0x1100
/*
@@ -325,7 +326,7 @@ SystemCall:
#endif
InstructionTLBMiss:
-#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
+#if defined(CONFIG_8xx_CPU6) || defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
mtspr SPRN_SPRG_SCRATCH2, r3
#endif
EXCEPTION_PROLOG_0
@@ -343,15 +344,32 @@ InstructionTLBMiss:
INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10)
/* Only modules will cause ITLB Misses as we always
* pin the first 8MB of kernel memory */
-#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
+#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
mfcr r3
#endif
-#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
- IS_KERNEL(r11, r10)
+#ifdef ITLB_MISS_KERNEL
+#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT)
+ andis. r11, r10, 0x8000 /* Address >= 0x80000000 */
+#else
+ rlwinm r11, r10, 16, 0xfff8
+ cmpli cr0, r11, PAGE_OFFSET@h
+#ifndef CONFIG_PIN_TLB_TEXT
+ /* It is assumed that kernel code fits into the first 8M page */
+_ENTRY(ITLBMiss_cmp)
+ cmpli cr7, r11, (PAGE_OFFSET + 0x0800000)@h
+#endif
+#endif
#endif
mfspr r11, SPRN_M_TW /* Get level 1 table */
-#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
- BRANCH_UNLESS_KERNEL(3f)
+#ifdef ITLB_MISS_KERNEL
+#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT)
+ beq+ 3f
+#else
+ blt+ 3f
+#endif
+#ifndef CONFIG_PIN_TLB_TEXT
+ blt cr7, ITLBMissLinear
+#endif
lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
3:
#endif
@@ -369,7 +387,7 @@ InstructionTLBMiss:
rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */
lwz r10, 0(r10) /* Get the pte */
4:
-#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
+#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
mtcr r3
#endif
/* Insert the APG into the TWC from the Linux PTE. */
@@ -400,7 +418,7 @@ InstructionTLBMiss:
MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */
/* Restore registers */
-#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
+#if defined(CONFIG_8xx_CPU6) || defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
mfspr r3, SPRN_SPRG_SCRATCH2
#endif
EXCEPTION_EPILOG_0
@@ -447,23 +465,23 @@ DataStoreTLBMiss:
* kernel page tables.
*/
mfspr r10, SPRN_MD_EPN
- rlwinm r10, r10, 16, 0xfff8
- cmpli cr0, r10, PAGE_OFFSET@h
+ rlwinm r11, r10, 16, 0xfff8
+ cmpli cr0, r11, PAGE_OFFSET@h
mfspr r11, SPRN_M_TW /* Get level 1 table */
blt+ 3f
+ rlwinm r11, r10, 16, 0xfff8
#ifndef CONFIG_PIN_TLB_IMMR
- cmpli cr0, r10, VIRT_IMMR_BASE@h
+ cmpli cr0, r11, VIRT_IMMR_BASE@h
#endif
_ENTRY(DTLBMiss_cmp)
- cmpli cr7, r10, (PAGE_OFFSET + 0x1800000)@h
- lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
+ cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
#ifndef CONFIG_PIN_TLB_IMMR
_ENTRY(DTLBMiss_jmp)
beq- DTLBMissIMMR
#endif
blt cr7, DTLBMissLinear
+ lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
3:
- mfspr r10, SPRN_MD_EPN
/* Insert level 1 index */
rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
@@ -569,8 +587,8 @@ _ENTRY(DTLBMiss_jmp)
InstructionTLBError:
EXCEPTION_PROLOG
mr r4,r12
- mr r5,r9
- andis. r10,r5,0x4000
+ andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
+ andis. r10,r9,SRR1_ISI_NOPT@h
beq+ 1f
tlbie r4
itlbie:
@@ -595,7 +613,7 @@ DARFixed:/* Return from dcbx instruction bug workaround */
mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
mfspr r4,SPRN_DAR
- andis. r10,r5,0x4000
+ andis. r10,r5,DSISR_NOHPTE@h
beq+ 1f
tlbie r4
dtlbie:
@@ -684,7 +702,7 @@ DTLBMissLinear:
/* Set 8M byte page and mark it valid */
li r11, MD_PS8MEG | MD_SVALID
MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
- rlwinm r10, r10, 16, 0x0f800000 /* 8xx supports max 256Mb RAM */
+ rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
_PAGE_PRESENT
MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */
@@ -695,6 +713,22 @@ DTLBMissLinear:
EXCEPTION_EPILOG_0
rfi
+#ifndef CONFIG_PIN_TLB_TEXT
+ITLBMissLinear:
+ mtcr r3
+ /* Set 8M byte page and mark it valid */
+ li r11, MI_PS8MEG | MI_SVALID | _PAGE_EXEC
+ MTSPR_CPU6(SPRN_MI_TWC, r11, r3)
+ rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
+ ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
+ _PAGE_PRESENT
+ MTSPR_CPU6(SPRN_MI_RPN, r10, r11) /* Update TLB entry */
+
+ mfspr r3, SPRN_SPRG_SCRATCH2
+ EXCEPTION_EPILOG_0
+ rfi
+#endif
+
/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
* by decoding the registers used by the dcbx instruction and adding them.
* DAR is set to the calculated address.
@@ -705,9 +739,10 @@ FixupDAR:/* Entry point for dcbx workaround. */
mtspr SPRN_SPRG_SCRATCH2, r10
/* fetch instruction from memory. */
mfspr r10, SPRN_SRR0
- IS_KERNEL(r11, r10)
+ rlwinm r11, r10, 16, 0xfff8
+ cmpli cr0, r11, PAGE_OFFSET@h
mfspr r11, SPRN_M_TW /* Get level 1 table */
- BRANCH_UNLESS_KERNEL(3f)
+ blt+ 3f
rlwinm r11, r10, 16, 0xfff8
_ENTRY(FixupDAR_cmp)
cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
@@ -915,10 +950,8 @@ start_here:
rfi
/* Load up the kernel context */
2:
- SYNC /* Force all PTE updates to finish */
tlbia /* Clear all TLB entries */
sync /* wait for tlbia/tlbie to finish */
- TLBSYNC /* ... on all CPUs */
/* set up the PTE pointers for the Abatron bdiGDB.
*/
@@ -955,15 +988,14 @@ initial_mmu:
mtspr SPRN_MD_CTR, r10 /* remove PINNED DTLB entries */
tlbia /* Invalidate all TLB entries */
-/* Always pin the first 8 MB ITLB to prevent ITLB
- misses while mucking around with SRR0/SRR1 in asm
-*/
+#ifdef CONFIG_PIN_TLB_TEXT
lis r8, MI_RSV4I@h
ori r8, r8, 0x1c00
mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
+#endif
-#ifdef CONFIG_PIN_TLB
+#ifdef CONFIG_PIN_TLB_DATA
oris r10, r10, MD_RSV4I@h
mtspr SPRN_MD_CTR, r10 /* Set data TLB control */
#endif
@@ -989,6 +1021,7 @@ initial_mmu:
* internal registers (among other things).
*/
#ifdef CONFIG_PIN_TLB_IMMR
+ oris r10, r10, MD_RSV4I@h
ori r10, r10, 0x1c00
mtspr SPRN_MD_CTR, r10
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 5adb390e773b..1125c9be9e06 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -30,6 +30,7 @@
* Use unused space in the interrupt stack to save and restore
* registers for winkle support.
*/
+#define _MMCR0 GPR0
#define _SDR1 GPR3
#define _PTCR GPR3
#define _RPR GPR4
@@ -84,7 +85,61 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
std r3,_WORT(r1)
mfspr r3,SPRN_WORC
std r3,_WORC(r1)
+/*
+ * On POWER9, there are idle states such as stop4, invoked via cpuidle,
+ * that lose hypervisor resources. In such cases, we need to save
+ * additional SPRs before entering those idle states so that they can
+ * be restored to their older values on wakeup from the idle state.
+ *
+ * On POWER8, the only such deep idle state is winkle which is used
+ * only in the context of CPU-Hotplug, where these additional SPRs are
+ * reinitiazed to a sane value. Hence there is no need to save/restore
+ * these SPRs.
+ */
+BEGIN_FTR_SECTION
+ blr
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
+
+power9_save_additional_sprs:
+ mfspr r3, SPRN_PID
+ mfspr r4, SPRN_LDBAR
+ std r3, STOP_PID(r13)
+ std r4, STOP_LDBAR(r13)
+ mfspr r3, SPRN_FSCR
+ mfspr r4, SPRN_HFSCR
+ std r3, STOP_FSCR(r13)
+ std r4, STOP_HFSCR(r13)
+
+ mfspr r3, SPRN_MMCRA
+ mfspr r4, SPRN_MMCR1
+ std r3, STOP_MMCRA(r13)
+ std r4, STOP_MMCR1(r13)
+
+ mfspr r3, SPRN_MMCR2
+ std r3, STOP_MMCR2(r13)
+ blr
+
+power9_restore_additional_sprs:
+ ld r3,_LPCR(r1)
+ ld r4, STOP_PID(r13)
+ mtspr SPRN_LPCR,r3
+ mtspr SPRN_PID, r4
+
+ ld r3, STOP_LDBAR(r13)
+ ld r4, STOP_FSCR(r13)
+ mtspr SPRN_LDBAR, r3
+ mtspr SPRN_FSCR, r4
+
+ ld r3, STOP_HFSCR(r13)
+ ld r4, STOP_MMCRA(r13)
+ mtspr SPRN_HFSCR, r3
+ mtspr SPRN_MMCRA, r4
+ /* We have already restored PACA_MMCR0 */
+ ld r3, STOP_MMCR1(r13)
+ ld r4, STOP_MMCR2(r13)
+ mtspr SPRN_MMCR1, r3
+ mtspr SPRN_MMCR2, r4
blr
/*
@@ -140,7 +195,16 @@ pnv_powersave_common:
std r5,_CCR(r1)
std r1,PACAR1(r13)
+BEGIN_FTR_SECTION
+ /*
+ * POWER9 does not require real mode to stop, and presently does not
+ * set hwthread_state for KVM (threads don't share MMU context), so
+ * we can remain in virtual mode for this.
+ */
+ bctr
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
/*
+ * POWER8
* Go to real mode to do the nap, as required by the architecture.
* Also, we need to be in real mode before setting hwthread_state,
* because as soon as we do that, another thread can switch
@@ -150,6 +214,20 @@ pnv_powersave_common:
mtmsrd r7,0
bctr
+/*
+ * This is the sequence required to execute idle instructions, as
+ * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
+ */
+#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
+ /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
+ std r0,0(r1); \
+ ptesync; \
+ ld r0,0(r1); \
+236: cmpd cr0,r0,r0; \
+ bne 236b; \
+ IDLE_INST;
+
+
.globl pnv_enter_arch207_idle_mode
pnv_enter_arch207_idle_mode:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -241,20 +319,27 @@ enter_winkle:
/*
* r3 - PSSCR value corresponding to the requested stop state.
*/
-power_enter_stop:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- /* Tell KVM we're entering idle */
+power_enter_stop_kvm_rm:
+ /*
+ * This is currently unused because POWER9 KVM does not have to
+ * gather secondary threads into sibling mode, but the code is
+ * here in case that function is required.
+ *
+ * Tell KVM we're entering idle.
+ */
li r4,KVM_HWTHREAD_IN_IDLE
/* DO THIS IN REAL MODE! See comment above. */
stb r4,HSTATE_HWTHREAD_STATE(r13)
#endif
+power_enter_stop:
/*
* Check if we are executing the lite variant with ESL=EC=0
*/
andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
bne .Lhandle_esl_ec_set
- IDLE_STATE_ENTER_SEQ(PPC_STOP)
+ PPC_STOP
li r3,0 /* Since we didn't lose state, return 0 */
/*
@@ -272,6 +357,14 @@ power_enter_stop:
b pnv_wakeup_noloss
.Lhandle_esl_ec_set:
+ /*
+ * POWER9 DD2 can incorrectly set PMAO when waking up after a
+ * state-loss idle. Saving and restoring MMCR0 over idle is a
+ * workaround.
+ */
+ mfspr r4,SPRN_MMCR0
+ std r4,_MMCR0(r1)
+
/*
* Check if the requested state is a deep idle state.
*/
@@ -279,7 +372,8 @@ power_enter_stop:
ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
cmpd r3,r4
bge .Lhandle_deep_stop
- IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
+ PPC_STOP /* Does not return (system reset interrupt) */
+
.Lhandle_deep_stop:
/*
* Entering deep idle state.
@@ -301,7 +395,7 @@ lwarx_loop_stop:
bl save_sprs_to_stack
- IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
+ PPC_STOP /* Does not return (system reset interrupt) */
/*
* Entered with MSR[EE]=0 and no soft-masked interrupts pending.
@@ -402,6 +496,18 @@ pnv_powersave_wakeup_mce:
b pnv_powersave_wakeup
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+kvm_start_guest_check:
+ li r0,KVM_HWTHREAD_IN_KERNEL
+ stb r0,HSTATE_HWTHREAD_STATE(r13)
+ /* Order setting hwthread_state vs. testing hwthread_req */
+ sync
+ lbz r0,HSTATE_HWTHREAD_REQ(r13)
+ cmpwi r0,0
+ beqlr
+ b kvm_start_guest
+#endif
+
/*
* Called from reset vector for powersave wakeups.
* cr3 - set to gt if waking up with partial/complete hypervisor state loss
@@ -426,15 +532,9 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
mr r3,r12
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- li r0,KVM_HWTHREAD_IN_KERNEL
- stb r0,HSTATE_HWTHREAD_STATE(r13)
- /* Order setting hwthread_state vs. testing hwthread_req */
- sync
- lbz r0,HSTATE_HWTHREAD_REQ(r13)
- cmpwi r0,0
- beq 1f
- b kvm_start_guest
-1:
+BEGIN_FTR_SECTION
+ bl kvm_start_guest_check
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#endif
/* Return SRR1 from power7_nap() */
@@ -450,10 +550,20 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
pnv_restore_hyp_resource_arch300:
/*
* Workaround for POWER9, if we lost resources, the ERAT
- * might have been mixed up and needs flushing.
+ * might have been mixed up and needs flushing. We also need
+ * to reload MMCR0 (see comment above). We also need to set
+ * then clear bit 60 in MMCRA to ensure the PMU starts running.
*/
blt cr3,1f
PPC_INVALIDATE_ERAT
+ ld r1,PACAR1(r13)
+ mfspr r4,SPRN_MMCRA
+ ori r4,r4,(1 << (63-60))
+ mtspr SPRN_MMCRA,r4
+ xori r4,r4,(1 << (63-60))
+ mtspr SPRN_MMCRA,r4
+ ld r4,_MMCR0(r1)
+ mtspr SPRN_MMCR0,r4
1:
/*
* POWER ISA 3. Use PSSCR to determine if we
@@ -790,9 +900,16 @@ no_segments:
mtctr r12
bctrl
+/*
+ * On POWER9, we can come here on wakeup from a cpuidle stop state.
+ * Hence restore the additional SPRs to the saved value.
+ *
+ * On POWER8, we come here only on winkle. Since winkle is used
+ * only in the case of CPU-Hotplug, we don't need to restore
+ * the additional SPRs.
+ */
BEGIN_FTR_SECTION
- ld r4,_LPCR(r1)
- mtspr SPRN_LPCR,r4
+ bl power9_restore_additional_sprs
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
hypervisor_state_restored:
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index a582e0d42525..aa9f1b8261db 100644
--- a/arch/powerpc/kernel/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -19,6 +19,8 @@
#include <asm/pgtable.h>
#include <asm/ppc-pci.h>
#include <asm/io-workarounds.h>
+#include <asm/pte-walk.h>
+
#define IOWA_MAX_BUS 8
@@ -75,8 +77,7 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
* We won't find huge pages here (iomem). Also can't hit
* a page table free due to init_mm
*/
- ptep = __find_linux_pte_or_hugepte(init_mm.pgd, vaddr,
- NULL, &hugepage_shift);
+ ptep = find_init_mm_pte(vaddr, &hugepage_shift);
if (ptep == NULL)
paddr = 0;
else {
@@ -192,7 +193,7 @@ void iowa_register_bus(struct pci_controller *phb, struct ppc_pci_io *ops,
if (iowa_bus_count >= IOWA_MAX_BUS) {
pr_err("IOWA:Too many pci bridges, "
- "workarounds disabled for %s\n", np->full_name);
+ "workarounds disabled for %pOF\n", np);
return;
}
@@ -207,6 +208,6 @@ void iowa_register_bus(struct pci_controller *phb, struct ppc_pci_io *ops,
iowa_bus_count++;
- pr_debug("IOWA:[%d]Add bus, %s.\n", iowa_bus_count-1, np->full_name);
+ pr_debug("IOWA:[%d]Add bus, %pOF.\n", iowa_bus_count-1, np);
}
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 233ca3fe4754..af7a20dc6e09 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -127,8 +127,7 @@ static ssize_t fail_iommu_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(fail_iommu, S_IRUGO|S_IWUSR, fail_iommu_show,
- fail_iommu_store);
+static DEVICE_ATTR_RW(fail_iommu);
static int fail_iommu_bus_notify(struct notifier_block *nb,
unsigned long action, void *data)
@@ -190,7 +189,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
unsigned int pool_nr;
struct iommu_pool *pool;
- align_mask = 0xffffffffffffffffl >> (64 - align_order);
+ align_mask = (1ull << align_order) - 1;
/* This allocator was derived from x86_64's bit string search */
@@ -208,7 +207,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
* We don't need to disable preemption here because any CPU can
* safely use any IOMMU pool.
*/
- pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
+ pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
if (largealloc)
pool = &(tbl->large_pool);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 0bcec745a672..4e65bf82f5e0 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -24,7 +24,7 @@
* mask register (of which only 16 are defined), hence the weird shifting
* and complement of the cached_irq_mask. I want to be able to stuff
* this right into the SIU SMASK register.
- * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
+ * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
* to reduce code space and undefined function references.
*/
@@ -143,8 +143,22 @@ notrace unsigned int __check_irq_replay(void)
*/
unsigned char happened = local_paca->irq_happened;
- /* Clear bit 0 which we wouldn't clear otherwise */
- local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ if (happened & PACA_IRQ_HARD_DIS) {
+ /* Clear bit 0 which we wouldn't clear otherwise */
+ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+
+ /*
+ * We may have missed a decrementer interrupt if hard disabled.
+ * Check the decrementer register in case we had a rollover
+ * while hard disabled.
+ */
+ if (!(happened & PACA_IRQ_DEC)) {
+ if (decrementer_check_overflow()) {
+ local_paca->irq_happened |= PACA_IRQ_DEC;
+ happened |= PACA_IRQ_DEC;
+ }
+ }
+ }
/*
* Force the delivery of pending soft-disabled interrupts on PS3.
@@ -160,41 +174,39 @@ notrace unsigned int __check_irq_replay(void)
* This is a higher priority interrupt than the others, so
* replay it first.
*/
- local_paca->irq_happened &= ~PACA_IRQ_HMI;
- if (happened & PACA_IRQ_HMI)
+ if (happened & PACA_IRQ_HMI) {
+ local_paca->irq_happened &= ~PACA_IRQ_HMI;
return 0xe60;
+ }
- /*
- * We may have missed a decrementer interrupt. We check the
- * decrementer itself rather than the paca irq_happened field
- * in case we also had a rollover while hard disabled
- */
- local_paca->irq_happened &= ~PACA_IRQ_DEC;
- if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
+ if (happened & PACA_IRQ_DEC) {
+ local_paca->irq_happened &= ~PACA_IRQ_DEC;
return 0x900;
+ }
- /* Finally check if an external interrupt happened */
- local_paca->irq_happened &= ~PACA_IRQ_EE;
- if (happened & PACA_IRQ_EE)
+ if (happened & PACA_IRQ_EE) {
+ local_paca->irq_happened &= ~PACA_IRQ_EE;
return 0x500;
+ }
#ifdef CONFIG_PPC_BOOK3E
- /* Finally check if an EPR external interrupt happened
- * this bit is typically set if we need to handle another
- * "edge" interrupt from within the MPIC "EPR" handler
+ /*
+ * Check if an EPR external interrupt happened this bit is typically
+ * set if we need to handle another "edge" interrupt from within the
+ * MPIC "EPR" handler.
*/
- local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
- if (happened & PACA_IRQ_EE_EDGE)
+ if (happened & PACA_IRQ_EE_EDGE) {
+ local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
return 0x500;
+ }
- local_paca->irq_happened &= ~PACA_IRQ_DBELL;
- if (happened & PACA_IRQ_DBELL)
+ if (happened & PACA_IRQ_DBELL) {
+ local_paca->irq_happened &= ~PACA_IRQ_DBELL;
return 0x280;
+ }
#else
- local_paca->irq_happened &= ~PACA_IRQ_DBELL;
if (happened & PACA_IRQ_DBELL) {
- if (cpu_has_feature(CPU_FTR_HVMODE))
- return 0xe80;
+ local_paca->irq_happened &= ~PACA_IRQ_DBELL;
return 0xa00;
}
#endif /* CONFIG_PPC_BOOK3E */
@@ -470,6 +482,18 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, " Hypervisor Maintenance Interrupts\n");
}
+ seq_printf(p, "%*s: ", prec, "NMI");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
+ seq_printf(p, " System Reset interrupts\n");
+
+#ifdef CONFIG_PPC_WATCHDOG
+ seq_printf(p, "%*s: ", prec, "WDG");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
+ seq_printf(p, " Watchdog soft-NMI interrupts\n");
+#endif
+
#ifdef CONFIG_PPC_DOORBELL
if (cpu_has_feature(CPU_FTR_DBELL)) {
seq_printf(p, "%*s: ", prec, "DBL");
@@ -494,6 +518,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
sum += per_cpu(irq_stat, cpu).spurious_irqs;
sum += per_cpu(irq_stat, cpu).timer_irqs_others;
sum += per_cpu(irq_stat, cpu).hmi_exceptions;
+ sum += per_cpu(irq_stat, cpu).sreset_irqs;
+#ifdef CONFIG_PPC_WATCHDOG
+ sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
+#endif
#ifdef CONFIG_PPC_DOORBELL
sum += per_cpu(irq_stat, cpu).doorbell_irqs;
#endif
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index bb6f8993412e..1df6c74aa731 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -164,7 +164,7 @@ void __init isa_bridge_find_early(struct pci_controller *hose)
/* Set the global ISA io base to indicate we have an ISA bridge */
isa_io_base = ISA_IO_BASE;
- pr_debug("ISA bridge (early) is %s\n", np->full_name);
+ pr_debug("ISA bridge (early) is %pOF\n", np);
}
/**
@@ -187,15 +187,15 @@ void __init isa_bridge_init_non_pci(struct device_node *np)
pna = of_n_addr_cells(np);
if (of_property_read_u32(np, "#address-cells", &na) ||
of_property_read_u32(np, "#size-cells", &ns)) {
- pr_warn("ISA: Non-PCI bridge %s is missing address format\n",
- np->full_name);
+ pr_warn("ISA: Non-PCI bridge %pOF is missing address format\n",
+ np);
return;
}
/* Check it's a supported address format */
if (na != 2 || ns != 1) {
- pr_warn("ISA: Non-PCI bridge %s has unsupported address format\n",
- np->full_name);
+ pr_warn("ISA: Non-PCI bridge %pOF has unsupported address format\n",
+ np);
return;
}
rs = na + ns + pna;
@@ -203,8 +203,8 @@ void __init isa_bridge_init_non_pci(struct device_node *np)
/* Grab the ranges property */
ranges = of_get_property(np, "ranges", &rlen);
if (ranges == NULL || rlen < rs) {
- pr_warn("ISA: Non-PCI bridge %s has absent or invalid ranges\n",
- np->full_name);
+ pr_warn("ISA: Non-PCI bridge %pOF has absent or invalid ranges\n",
+ np);
return;
}
@@ -220,8 +220,8 @@ void __init isa_bridge_init_non_pci(struct device_node *np)
/* Got something ? */
if (!size || !pbasep) {
- pr_warn("ISA: Non-PCI bridge %s has no usable IO range\n",
- np->full_name);
+ pr_warn("ISA: Non-PCI bridge %pOF has no usable IO range\n",
+ np);
return;
}
@@ -233,15 +233,15 @@ void __init isa_bridge_init_non_pci(struct device_node *np)
/* Map pbase */
pbase = of_translate_address(np, pbasep);
if (pbase == OF_BAD_ADDR) {
- pr_warn("ISA: Non-PCI bridge %s failed to translate IO base\n",
- np->full_name);
+ pr_warn("ISA: Non-PCI bridge %pOF failed to translate IO base\n",
+ np);
return;
}
/* We need page alignment */
if ((cbase & ~PAGE_MASK) || (pbase & ~PAGE_MASK)) {
- pr_warn("ISA: Non-PCI bridge %s has non aligned IO range\n",
- np->full_name);
+ pr_warn("ISA: Non-PCI bridge %pOF has non aligned IO range\n",
+ np);
return;
}
@@ -255,7 +255,7 @@ void __init isa_bridge_init_non_pci(struct device_node *np)
__ioremap_at(pbase, (void *)ISA_IO_BASE,
size, pgprot_val(pgprot_noncached(__pgprot(0))));
- pr_debug("ISA: Non-PCI bridge is %s\n", np->full_name);
+ pr_debug("ISA: Non-PCI bridge is %pOF\n", np);
}
/**
@@ -277,8 +277,8 @@ static void isa_bridge_find_late(struct pci_dev *pdev,
/* Set the global ISA io base to indicate we have an ISA bridge */
isa_io_base = ISA_IO_BASE;
- pr_debug("ISA bridge (late) is %s on %s\n",
- devnode->full_name, pci_name(pdev));
+ pr_debug("ISA bridge (late) is %pOF on %s\n",
+ devnode, pci_name(pdev));
}
/**
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index dbf098121ce6..35e240a0a408 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -67,9 +67,9 @@ static struct hard_trap_info
#endif
#else /* ! (defined(CONFIG_40x) || defined(CONFIG_BOOKE)) */
{ 0x0d00, 0x05 /* SIGTRAP */ }, /* single-step */
-#if defined(CONFIG_8xx)
+#if defined(CONFIG_PPC_8xx)
{ 0x1000, 0x04 /* SIGILL */ }, /* software emulation */
-#else /* ! CONFIG_8xx */
+#else /* ! CONFIG_PPC_8xx */
{ 0x0f00, 0x04 /* SIGILL */ }, /* performance monitor */
{ 0x0f20, 0x08 /* SIGFPE */ }, /* altivec unavailable */
{ 0x1300, 0x05 /* SIGTRAP */ }, /* instruction address break */
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 1086ea37c832..9ad37f827a97 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -25,7 +25,6 @@
#include <linux/kvm_para.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/nmi.h> /* hardlockup_detector_disable() */
#include <asm/reg.h>
#include <asm/sections.h>
@@ -719,12 +718,6 @@ static __init void kvm_free_tmp(void)
static int __init kvm_guest_init(void)
{
- /*
- * The hardlockup detector is likely to get false positives in
- * KVM guests, so disable it by default.
- */
- hardlockup_detector_disable();
-
if (!kvm_para_available())
goto free_tmp;
diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S
index 97ec8557f974..6408f09dbbd9 100644
--- a/arch/powerpc/kernel/l2cr_6xx.S
+++ b/arch/powerpc/kernel/l2cr_6xx.S
@@ -181,7 +181,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
mtctr r4
li r4,0
1:
- lwzx r0,r0,r4
+ lwzx r0,0,r4
addi r4,r4,32 /* Go to start of next cache line */
bdnz 1b
isync
@@ -328,7 +328,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
mtctr r4
li r4,0
1:
- lwzx r0,r0,r4
+ lwzx r0,0,r4
dcbf 0,r4
addi r4,r4,32 /* Go to start of next cache line */
bdnz 1b
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 0694d20f85b6..5e5a64a8b4e4 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -147,8 +147,8 @@ static int __init add_legacy_port(struct device_node *np, int want_index,
legacy_serial_ports[index].serial_out = tsi_serial_out;
}
- printk(KERN_DEBUG "Found legacy serial port %d for %s\n",
- index, np->full_name);
+ printk(KERN_DEBUG "Found legacy serial port %d for %pOF\n",
+ index, np);
printk(KERN_DEBUG " %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n",
(iotype == UPIO_PORT) ? "port" : "mem",
(unsigned long long)base, (unsigned long long)taddr, irq,
@@ -207,7 +207,7 @@ static int __init add_legacy_isa_port(struct device_node *np,
int index = -1;
u64 taddr;
- DBG(" -> add_legacy_isa_port(%s)\n", np->full_name);
+ DBG(" -> add_legacy_isa_port(%pOF)\n", np);
/* Get the ISA port number */
reg = of_get_property(np, "reg", NULL);
@@ -256,7 +256,7 @@ static int __init add_legacy_pci_port(struct device_node *np,
unsigned int flags;
int iotype, index = -1, lindex = 0;
- DBG(" -> add_legacy_pci_port(%s)\n", np->full_name);
+ DBG(" -> add_legacy_pci_port(%pOF)\n", np);
/* We only support ports that have a clock frequency properly
* encoded in the device-tree (that is have an fcode). Anything
@@ -374,7 +374,7 @@ void __init find_legacy_serial_ports(void)
if (path != NULL) {
stdout = of_find_node_by_path(path);
if (stdout)
- DBG("stdout is %s\n", stdout->full_name);
+ DBG("stdout is %pOF\n", stdout);
} else {
DBG(" no linux,stdout-path !\n");
}
@@ -603,7 +603,7 @@ static int __init check_legacy_serial_console(void)
DBG(" can't find stdout package %s !\n", name);
return -ENODEV;
}
- DBG("stdout is %s\n", prom_stdout->full_name);
+ DBG("stdout is %pOF\n", prom_stdout);
name = of_get_property(prom_stdout, "name", NULL);
if (!name) {
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index e0e131e662ed..9b2ea7e71c06 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -22,11 +22,14 @@
#undef DEBUG
#define pr_fmt(fmt) "mce: " fmt
+#include <linux/hardirq.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
#include <linux/export.h>
#include <linux/irq_work.h>
+
+#include <asm/machdep.h>
#include <asm/mce.h>
static DEFINE_PER_CPU(int, mce_nest_count);
@@ -446,3 +449,33 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt)
return 0;
}
EXPORT_SYMBOL(get_mce_fault_addr);
+
+/*
+ * This function is called in real mode. Strictly no printk's please.
+ *
+ * regs->nip and regs->msr contains srr0 and ssr1.
+ */
+long machine_check_early(struct pt_regs *regs)
+{
+ long handled = 0;
+
+ __this_cpu_inc(irq_stat.mce_exceptions);
+
+ if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
+ handled = cur_cpu_spec->machine_check_early(regs);
+ return handled;
+}
+
+long hmi_exception_realmode(struct pt_regs *regs)
+{
+ __this_cpu_inc(irq_stat.hmi_exceptions);
+
+ wait_for_subcore_guest_exit();
+
+ if (ppc_md.hmi_exception_early)
+ ppc_md.hmi_exception_early(regs);
+
+ wait_for_tb_resync();
+
+ return 0;
+}
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 34aeac54f120..becaec990140 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -45,7 +45,7 @@ static int of_pci_phb_probe(struct platform_device *dev)
if (ppc_md.pci_setup_phb == NULL)
return -ENODEV;
- pr_info("Setting up PCI bus %s\n", dev->dev.of_node->full_name);
+ pr_info("Setting up PCI bus %pOF\n", dev->dev.of_node);
/* Alloc and setup PHB data structure */
phb = pcibios_alloc_controller(dev->dev.of_node);
diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S
index 4937bef7652f..52fc864cdec4 100644
--- a/arch/powerpc/kernel/optprobes_head.S
+++ b/arch/powerpc/kernel/optprobes_head.S
@@ -60,10 +60,6 @@ optprobe_template_entry:
std r5,_CCR(r1)
lbz r5,PACASOFTIRQEN(r13)
std r5,SOFTE(r1)
- mfdar r5
- std r5,_DAR(r1)
- mfdsisr r5
- std r5,_DSISR(r1)
/*
* We may get here from a module, so load the kernel TOC in r2.
@@ -122,10 +118,6 @@ optprobe_template_call_emulate:
mtxer r5
ld r5,_CCR(r1)
mtcr r5
- ld r5,_DAR(r1)
- mtdar r5
- ld r5,_DSISR(r1)
- mtdsisr r5
REST_GPR(0,r1)
REST_10GPRS(2,r1)
REST_10GPRS(12,r1)
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 8d63627e067f..70f073d6c3b2 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -99,18 +99,27 @@ static inline void free_lppacas(void) { }
* If you make the number of persistent SLB entries dynamic, please also
* update PR KVM to flush and restore them accordingly.
*/
-static struct slb_shadow *slb_shadow;
+static struct slb_shadow * __initdata slb_shadow;
static void __init allocate_slb_shadows(int nr_cpus, int limit)
{
int size = PAGE_ALIGN(sizeof(struct slb_shadow) * nr_cpus);
+
+ if (early_radix_enabled())
+ return;
+
slb_shadow = __va(memblock_alloc_base(size, PAGE_SIZE, limit));
memset(slb_shadow, 0, size);
}
static struct slb_shadow * __init init_slb_shadow(int cpu)
{
- struct slb_shadow *s = &slb_shadow[cpu];
+ struct slb_shadow *s;
+
+ if (early_radix_enabled())
+ return NULL;
+
+ s = &slb_shadow[cpu];
/*
* When we come through here to initialise boot_paca, the slb_shadow
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 341a7469cab8..02831a396419 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -373,9 +373,8 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
if (virq)
irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
} else {
- pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
- oirq.args_count, oirq.args[0], oirq.args[1],
- of_node_full_name(oirq.np));
+ pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %pOF\n",
+ oirq.args_count, oirq.args[0], oirq.args[1], oirq.np);
virq = irq_create_of_mapping(&oirq);
}
@@ -741,8 +740,8 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
struct of_pci_range range;
struct of_pci_range_parser parser;
- printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
- dev->full_name, primary ? "(primary)" : "");
+ printk(KERN_INFO "PCI host bridge %pOF %s ranges:\n",
+ dev, primary ? "(primary)" : "");
/* Check for ranges property */
if (of_pci_range_parser_init(&parser, dev))
@@ -1556,8 +1555,8 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
if (!res->flags) {
pr_debug("PCI: I/O resource not set for host"
- " bridge %s (domain %d)\n",
- hose->dn->full_name, hose->global_number);
+ " bridge %pOF (domain %d)\n",
+ hose->dn, hose->global_number);
} else {
offset = pcibios_io_space_offset(hose);
@@ -1668,7 +1667,7 @@ void pcibios_scan_phb(struct pci_controller *hose)
struct device_node *node = hose->dn;
int mode;
- pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node));
+ pr_debug("PCI: Scanning PHB %pOF\n", node);
/* Get some IO space for the new PHB */
pcibios_setup_phb_io_space(hose);
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 41c86c6b6e4d..1d817f4d97d9 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -79,8 +79,8 @@ make_one_node_map(struct device_node* node, u8 pci_bus)
return;
bus_range = of_get_property(node, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
- printk(KERN_WARNING "Can't get bus-range for %s, "
- "assuming it starts at 0\n", node->full_name);
+ printk(KERN_WARNING "Can't get bus-range for %pOF, "
+ "assuming it starts at 0\n", node);
pci_to_OF_bus_map[pci_bus] = 0;
} else
pci_to_OF_bus_map[pci_bus] = bus_range[0];
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index ed5e9ff61a68..932b9741aa8f 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -111,7 +111,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
if (hose->io_base_alloc == NULL)
return 0;
- pr_debug("IO unmapping for PHB %s\n", hose->dn->full_name);
+ pr_debug("IO unmapping for PHB %pOF\n", hose->dn);
pr_debug(" alloc=0x%p\n", hose->io_base_alloc);
/* This is a PHB, we fully unmap the IO area */
@@ -151,7 +151,7 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose)
hose->io_base_virt = (void __iomem *)(area->addr +
hose->io_base_phys - phys_page);
- pr_debug("IO mapping for PHB %s\n", hose->dn->full_name);
+ pr_debug("IO mapping for PHB %pOF\n", hose->dn);
pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n",
hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
pr_debug(" size=0x%016llx (alloc=0x%016lx)\n",
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 592693437070..0e395afbf0f4 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -139,7 +139,6 @@ struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
#ifdef CONFIG_PCI_IOV
static struct pci_dn *add_one_dev_pci_data(struct pci_dn *parent,
- struct pci_dev *pdev,
int vf_index,
int busno, int devfn)
{
@@ -150,10 +149,8 @@ static struct pci_dn *add_one_dev_pci_data(struct pci_dn *parent,
return NULL;
pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
- if (!pdn) {
- dev_warn(&pdev->dev, "%s: Out of memory!\n", __func__);
+ if (!pdn)
return NULL;
- }
pdn->phb = parent->phb;
pdn->parent = parent;
@@ -167,13 +164,6 @@ static struct pci_dn *add_one_dev_pci_data(struct pci_dn *parent,
INIT_LIST_HEAD(&pdn->list);
list_add_tail(&pdn->list, &parent->child_list);
- /*
- * If we already have PCI device instance, lets
- * bind them.
- */
- if (pdev)
- pdev->dev.archdata.pci_data = pdn;
-
return pdn;
}
#endif
@@ -201,7 +191,7 @@ struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
struct eeh_dev *edev __maybe_unused;
- pdn = add_one_dev_pci_data(parent, NULL, i,
+ pdn = add_one_dev_pci_data(parent, i,
pci_iov_virtfn_bus(pdev, i),
pci_iov_virtfn_devfn(pdev, i));
if (!pdn) {
@@ -303,7 +293,6 @@ struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
if (pdn == NULL)
return NULL;
dn->data = pdn;
- pdn->node = dn;
pdn->phb = hose;
#ifdef CONFIG_PPC_POWERNV
pdn->pe_number = IODA_INVALID_PE;
@@ -352,6 +341,7 @@ EXPORT_SYMBOL_GPL(pci_add_device_node_info);
void pci_remove_device_node_info(struct device_node *dn)
{
struct pci_dn *pdn = dn ? PCI_DN(dn) : NULL;
+ struct device_node *parent;
#ifdef CONFIG_EEH
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
@@ -364,8 +354,10 @@ void pci_remove_device_node_info(struct device_node *dn)
WARN_ON(!list_empty(&pdn->child_list));
list_del(&pdn->list);
- if (pdn->parent)
- of_node_put(pdn->parent->node);
+
+ parent = of_get_parent(dn);
+ if (parent)
+ of_node_put(parent);
dn->data = NULL;
kfree(pdn);
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index ea3d98115b88..0d790f8432d2 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -211,19 +211,19 @@ void of_scan_pci_bridge(struct pci_dev *dev)
unsigned int flags;
u64 size;
- pr_debug("of_scan_pci_bridge(%s)\n", node->full_name);
+ pr_debug("of_scan_pci_bridge(%pOF)\n", node);
/* parse bus-range property */
busrange = of_get_property(node, "bus-range", &len);
if (busrange == NULL || len != 8) {
- printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
- node->full_name);
+ printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %pOF\n",
+ node);
return;
}
ranges = of_get_property(node, "ranges", &len);
if (ranges == NULL) {
- printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n",
- node->full_name);
+ printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %pOF\n",
+ node);
return;
}
@@ -233,8 +233,8 @@ void of_scan_pci_bridge(struct pci_dev *dev)
bus = pci_add_new_bus(dev->bus, dev,
of_read_number(busrange, 1));
if (!bus) {
- printk(KERN_ERR "Failed to create pci bus for %s\n",
- node->full_name);
+ printk(KERN_ERR "Failed to create pci bus for %pOF\n",
+ node);
return;
}
}
@@ -262,13 +262,13 @@ void of_scan_pci_bridge(struct pci_dev *dev)
res = bus->resource[0];
if (res->flags) {
printk(KERN_ERR "PCI: ignoring extra I/O range"
- " for bridge %s\n", node->full_name);
+ " for bridge %pOF\n", node);
continue;
}
} else {
if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
printk(KERN_ERR "PCI: too many memory ranges"
- " for bridge %s\n", node->full_name);
+ " for bridge %pOF\n", node);
continue;
}
res = bus->resource[i];
@@ -307,7 +307,7 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus,
struct eeh_dev *edev = pdn_to_eeh_dev(PCI_DN(dn));
#endif
- pr_debug(" * %s\n", dn->full_name);
+ pr_debug(" * %pOF\n", dn);
if (!of_device_is_available(dn))
return NULL;
@@ -350,8 +350,8 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
struct device_node *child;
struct pci_dev *dev;
- pr_debug("of_scan_bus(%s) bus no %d...\n",
- node->full_name, bus->number);
+ pr_debug("of_scan_bus(%pOF) bus no %d...\n",
+ node, bus->number);
/* Scan direct children */
for_each_child_of_node(node, child) {
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9f3e2c932dcc..a0c74bbf3454 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -230,7 +230,8 @@ void enable_kernel_fp(void)
}
EXPORT_SYMBOL(enable_kernel_fp);
-static int restore_fp(struct task_struct *tsk) {
+static int restore_fp(struct task_struct *tsk)
+{
if (tsk->thread.load_fp || msr_tm_active(tsk->thread.regs->msr)) {
load_fp_state(&current->thread.fp_state);
current->thread.load_fp++;
@@ -330,11 +331,19 @@ static inline int restore_altivec(struct task_struct *tsk) { return 0; }
#ifdef CONFIG_VSX
static void __giveup_vsx(struct task_struct *tsk)
{
- if (tsk->thread.regs->msr & MSR_FP)
+ unsigned long msr = tsk->thread.regs->msr;
+
+ /*
+ * We should never be ssetting MSR_VSX without also setting
+ * MSR_FP and MSR_VEC
+ */
+ WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
+
+ /* __giveup_fpu will clear MSR_VSX */
+ if (msr & MSR_FP)
__giveup_fpu(tsk);
- if (tsk->thread.regs->msr & MSR_VEC)
+ if (msr & MSR_VEC)
__giveup_altivec(tsk);
- tsk->thread.regs->msr &= ~MSR_VSX;
}
static void giveup_vsx(struct task_struct *tsk)
@@ -346,14 +355,6 @@ static void giveup_vsx(struct task_struct *tsk)
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
}
-static void save_vsx(struct task_struct *tsk)
-{
- if (tsk->thread.regs->msr & MSR_FP)
- save_fpu(tsk);
- if (tsk->thread.regs->msr & MSR_VEC)
- save_altivec(tsk);
-}
-
void enable_kernel_vsx(void)
{
unsigned long cpumsr;
@@ -362,7 +363,8 @@ void enable_kernel_vsx(void)
cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
- if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
+ if (current->thread.regs &&
+ (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
check_if_tm_restore_required(current);
/*
* If a thread has already been reclaimed then the
@@ -373,10 +375,6 @@ void enable_kernel_vsx(void)
*/
if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
return;
- if (current->thread.regs->msr & MSR_FP)
- __giveup_fpu(current);
- if (current->thread.regs->msr & MSR_VEC)
- __giveup_altivec(current);
__giveup_vsx(current);
}
}
@@ -386,7 +384,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
preempt_disable();
- if (tsk->thread.regs->msr & MSR_VSX) {
+ if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
BUG_ON(tsk != current);
giveup_vsx(tsk);
}
@@ -406,7 +404,6 @@ static int restore_vsx(struct task_struct *tsk)
}
#else
static inline int restore_vsx(struct task_struct *tsk) { return 0; }
-static inline void save_vsx(struct task_struct *tsk) { }
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
@@ -486,6 +483,8 @@ void giveup_all(struct task_struct *tsk)
msr_check_and_set(msr_all_available);
check_if_tm_restore_required(tsk);
+ WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
+
#ifdef CONFIG_PPC_FPU
if (usermsr & MSR_FP)
__giveup_fpu(tsk);
@@ -494,10 +493,6 @@ void giveup_all(struct task_struct *tsk)
if (usermsr & MSR_VEC)
__giveup_altivec(tsk);
#endif
-#ifdef CONFIG_VSX
- if (usermsr & MSR_VSX)
- __giveup_vsx(tsk);
-#endif
#ifdef CONFIG_SPE
if (usermsr & MSR_SPE)
__giveup_spe(tsk);
@@ -511,10 +506,6 @@ void restore_math(struct pt_regs *regs)
{
unsigned long msr;
- /*
- * Syscall exit makes a similar initial check before branching
- * to restore_math. Keep them in synch.
- */
if (!msr_tm_active(regs->msr) &&
!current->thread.load_fp && !loadvec(current->thread))
return;
@@ -556,19 +547,13 @@ void save_all(struct task_struct *tsk)
msr_check_and_set(msr_all_available);
- /*
- * Saving the way the register space is in hardware, save_vsx boils
- * down to a save_fpu() and save_altivec()
- */
- if (usermsr & MSR_VSX) {
- save_vsx(tsk);
- } else {
- if (usermsr & MSR_FP)
- save_fpu(tsk);
+ WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
- if (usermsr & MSR_VEC)
- save_altivec(tsk);
- }
+ if (usermsr & MSR_FP)
+ save_fpu(tsk);
+
+ if (usermsr & MSR_VEC)
+ save_altivec(tsk);
if (usermsr & MSR_SPE)
__giveup_spe(tsk);
@@ -1395,13 +1380,13 @@ void show_regs(struct pt_regs * regs)
show_regs_print_info(KERN_DEFAULT);
- printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
+ printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
regs->nip, regs->link, regs->ctr);
printk("REGS: %p TRAP: %04lx %s (%s)\n",
regs, regs->trap, print_tainted(), init_utsname()->release);
- printk("MSR: "REG" ", regs->msr);
+ printk("MSR: "REG" ", regs->msr);
print_msr_bits(regs->msr);
- printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
+ pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
trap = TRAP(regs);
if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
pr_cont("CFAR: "REG" ", regs->orig_gpr3);
@@ -1994,11 +1979,25 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
void notrace __ppc64_runlatch_on(void)
{
struct thread_info *ti = current_thread_info();
- unsigned long ctrl;
- ctrl = mfspr(SPRN_CTRLF);
- ctrl |= CTRL_RUNLATCH;
- mtspr(SPRN_CTRLT, ctrl);
+ if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+ /*
+ * Least significant bit (RUN) is the only writable bit of
+ * the CTRL register, so we can avoid mfspr. 2.06 is not the
+ * earliest ISA where this is the case, but it's convenient.
+ */
+ mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
+ } else {
+ unsigned long ctrl;
+
+ /*
+ * Some architectures (e.g., Cell) have writable fields other
+ * than RUN, so do the read-modify-write.
+ */
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl |= CTRL_RUNLATCH;
+ mtspr(SPRN_CTRLT, ctrl);
+ }
ti->local_flags |= _TLF_RUNLATCH;
}
@@ -2007,13 +2006,18 @@ void notrace __ppc64_runlatch_on(void)
void notrace __ppc64_runlatch_off(void)
{
struct thread_info *ti = current_thread_info();
- unsigned long ctrl;
ti->local_flags &= ~_TLF_RUNLATCH;
- ctrl = mfspr(SPRN_CTRLF);
- ctrl &= ~CTRL_RUNLATCH;
- mtspr(SPRN_CTRLT, ctrl);
+ if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+ mtspr(SPRN_CTRLT, 0);
+ } else {
+ unsigned long ctrl;
+
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl &= ~CTRL_RUNLATCH;
+ mtspr(SPRN_CTRLT, ctrl);
+ }
}
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 613f79f03877..02190e90c7ae 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -177,6 +177,7 @@ struct platform_support {
bool hash_mmu;
bool radix_mmu;
bool radix_gtse;
+ bool xive;
};
/* Platforms codes are now obsolete in the kernel. Now only used within this
@@ -1041,6 +1042,27 @@ static void __init prom_parse_mmu_model(u8 val,
}
}
+static void __init prom_parse_xive_model(u8 val,
+ struct platform_support *support)
+{
+ switch (val) {
+ case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
+ prom_debug("XIVE - either mode supported\n");
+ support->xive = true;
+ break;
+ case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
+ prom_debug("XIVE - exploitation mode supported\n");
+ support->xive = true;
+ break;
+ case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
+ prom_debug("XIVE - legacy mode supported\n");
+ break;
+ default:
+ prom_debug("Unknown xive support option: 0x%x\n", val);
+ break;
+ }
+}
+
static void __init prom_parse_platform_support(u8 index, u8 val,
struct platform_support *support)
{
@@ -1054,6 +1076,10 @@ static void __init prom_parse_platform_support(u8 index, u8 val,
support->radix_gtse = true;
}
break;
+ case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
+ prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
+ support);
+ break;
}
}
@@ -1062,7 +1088,8 @@ static void __init prom_check_platform_support(void)
struct platform_support supported = {
.hash_mmu = false,
.radix_mmu = false,
- .radix_gtse = false
+ .radix_gtse = false,
+ .xive = false
};
int prop_len = prom_getproplen(prom.chosen,
"ibm,arch-vec-5-platform-support");
@@ -1095,6 +1122,11 @@ static void __init prom_check_platform_support(void)
/* We're probably on a legacy hypervisor */
prom_debug("Assuming legacy hash support\n");
}
+
+ if (supported.xive) {
+ prom_debug("Asking for XIVE\n");
+ ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
+ }
}
static void __init prom_send_capabilities(void)
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 925a4ef90559..07cd22e35405 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -127,12 +127,19 @@ static void flush_tmregs_to_thread(struct task_struct *tsk)
* If task is not current, it will have been flushed already to
* it's thread_struct during __switch_to().
*
- * A reclaim flushes ALL the state.
+ * A reclaim flushes ALL the state or if not in TM save TM SPRs
+ * in the appropriate thread structures from live.
*/
- if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
- tm_reclaim_current(TM_CAUSE_SIGNAL);
+ if (tsk != current)
+ return;
+ if (MSR_TM_SUSPENDED(mfmsr())) {
+ tm_reclaim_current(TM_CAUSE_SIGNAL);
+ } else {
+ tm_enable();
+ tm_save_sprs(&(tsk->thread));
+ }
}
#else
static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
@@ -1587,11 +1594,8 @@ static int ppr_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- int ret;
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.ppr, 0, sizeof(u64));
- return ret;
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ppr, 0, sizeof(u64));
}
static int ppr_set(struct task_struct *target,
@@ -1599,11 +1603,8 @@ static int ppr_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- int ret;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.ppr, 0, sizeof(u64));
- return ret;
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ppr, 0, sizeof(u64));
}
static int dscr_get(struct task_struct *target,
@@ -1611,22 +1612,16 @@ static int dscr_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- int ret;
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.dscr, 0, sizeof(u64));
- return ret;
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.dscr, 0, sizeof(u64));
}
static int dscr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- int ret;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.dscr, 0, sizeof(u64));
- return ret;
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.dscr, 0, sizeof(u64));
}
#endif
#ifdef CONFIG_PPC_BOOK3S_64
@@ -1635,22 +1630,16 @@ static int tar_get(struct task_struct *target,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
- int ret;
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.tar, 0, sizeof(u64));
- return ret;
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tar, 0, sizeof(u64));
}
static int tar_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- int ret;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.tar, 0, sizeof(u64));
- return ret;
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tar, 0, sizeof(u64));
}
static int ebb_active(struct task_struct *target,
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index d88736fbece6..e8cfc69f59ae 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -82,7 +82,7 @@ _GLOBAL(relocate)
6: blr
.balign 8
-p_dyn: .llong __dynamic_start - 0b
-p_rela: .llong __rela_dyn_start - 0b
-p_st: .llong _stext - 0b
+p_dyn: .8byte __dynamic_start - 0b
+p_rela: .8byte __rela_dyn_start - 0b
+p_st: .8byte _stext - 0b
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 73f1934582c2..c2b148b1634a 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -91,26 +91,14 @@ static int rtas_pci_read_config(struct pci_bus *bus,
unsigned int devfn,
int where, int size, u32 *val)
{
- struct device_node *busdn, *dn;
struct pci_dn *pdn;
- bool found = false;
int ret;
- /* Search only direct children of the bus */
*val = 0xFFFFFFFF;
- busdn = pci_bus_to_OF_node(bus);
- for (dn = busdn->child; dn; dn = dn->sibling) {
- pdn = PCI_DN(dn);
- if (pdn && pdn->devfn == devfn
- && of_device_is_available(dn)) {
- found = true;
- break;
- }
- }
- if (!found)
- return PCIBIOS_DEVICE_NOT_FOUND;
+ pdn = pci_get_pdn_by_devfn(bus, devfn);
+ /* Validity of pdn is checked in here */
ret = rtas_read_config(pdn, where, size, val);
if (*val == EEH_IO_ERROR_VALUE(size) &&
eeh_dev_check_failure(pdn_to_eeh_dev(pdn)))
@@ -153,24 +141,11 @@ static int rtas_pci_write_config(struct pci_bus *bus,
unsigned int devfn,
int where, int size, u32 val)
{
- struct device_node *busdn, *dn;
struct pci_dn *pdn;
- bool found = false;
-
- /* Search only direct children of the bus */
- busdn = pci_bus_to_OF_node(bus);
- for (dn = busdn->child; dn; dn = dn->sibling) {
- pdn = PCI_DN(dn);
- if (pdn && pdn->devfn == devfn
- && of_device_is_available(dn)) {
- found = true;
- break;
- }
- }
- if (!found)
- return PCIBIOS_DEVICE_NOT_FOUND;
+ pdn = pci_get_pdn_by_devfn(bus, devfn);
+ /* Validity of pdn is checked in here. */
return rtas_write_config(pdn, where, size, val);
}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 94a948207cd2..7de73589d8e2 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -481,7 +481,7 @@ void __init smp_setup_cpu_maps(void)
__be32 cpu_be;
int j, len;
- DBG(" * %s...\n", dn->full_name);
+ DBG(" * %pOF...\n", dn);
intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
&len);
@@ -704,30 +704,6 @@ int check_legacy_ioport(unsigned long base_port)
}
EXPORT_SYMBOL(check_legacy_ioport);
-static int ppc_panic_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- /*
- * If firmware-assisted dump has been registered then trigger
- * firmware-assisted dump and let firmware handle everything else.
- */
- crash_fadump(NULL, ptr);
- ppc_md.panic(ptr); /* May not return */
- return NOTIFY_DONE;
-}
-
-static struct notifier_block ppc_panic_block = {
- .notifier_call = ppc_panic_event,
- .priority = INT_MIN /* may not return; must be done last */
-};
-
-void __init setup_panic(void)
-{
- if (!ppc_md.panic)
- return;
- atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
-}
-
#ifdef CONFIG_CHECK_CACHE_COHERENCY
/*
* For platforms that have configurable cache-coherency. This function
@@ -872,9 +848,6 @@ void __init setup_arch(char **cmdline_p)
/* Probe the machine type, establish ppc_md. */
probe_machine();
- /* Setup panic notifier if requested by the platform. */
- setup_panic();
-
/*
* Configure ppc_md.power_save (ppc32 only, 64-bit machines do
* it from their respective probe() function.
@@ -916,13 +889,6 @@ void __init setup_arch(char **cmdline_p)
/* Reserve large chunks of memory for use by CMA for KVM. */
kvm_cma_reserve();
- /*
- * Reserve any gigantic pages requested on the command line.
- * memblock needs to have been initialized by the time this is
- * called since this will reserve memory.
- */
- reserve_hugetlb_gpages();
-
klp_init_thread_info(&init_thread_info);
init_mm.start_code = (unsigned long)_stext;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 2f88f6cf1a42..51ebc01fff52 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -98,6 +98,9 @@ extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
notrace void __init machine_init(u64 dt_ptr)
{
+ unsigned int *addr = &memset_nocache_branch;
+ unsigned long insn;
+
/* Configure static keys first, now that we're relocated. */
setup_feature_keys();
@@ -105,7 +108,9 @@ notrace void __init machine_init(u64 dt_ptr)
udbg_early_init();
patch_instruction((unsigned int *)&memcpy, PPC_INST_NOP);
- patch_instruction(&memset_nocache_branch, PPC_INST_NOP);
+
+ insn = create_cond_branch(addr, branch_target(addr), 0x820000);
+ patch_instruction(addr, insn); /* replace b by bne cr0 */
/* Do some early initialization based on the flat device tree */
early_init_devtree(__va(dt_ptr));
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index af23d4b576ec..b89c6aac48c9 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -564,6 +564,9 @@ static __init u64 safe_stack_limit(void)
/* Other BookE, we assume the first GB is bolted */
return 1ul << 30;
#else
+ if (early_radix_enabled())
+ return ULONG_MAX;
+
/* BookS, the first segment is bolted */
if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
return 1UL << SID_SHIFT_1T;
@@ -578,7 +581,8 @@ void __init irqstack_early_init(void)
/*
* Interrupt stacks must be in the first segment since we
- * cannot afford to take SLB misses on them.
+ * cannot afford to take SLB misses on them. They are not
+ * accessed in realmode.
*/
for_each_possible_cpu(i) {
softirq_ctx[i] = (struct thread_info *)
@@ -649,8 +653,9 @@ void __init emergency_stack_init(void)
* aligned.
*
* Since we use these as temporary stacks during secondary CPU
- * bringup, we need to get at them in real mode. This means they
- * must also be within the RMO region.
+ * bringup, machine check, system reset, and HMI, we need to get
+ * at them in real mode. This means they must also be within the RMO
+ * region.
*
* The IRQ stacks allocated elsewhere in this file are zeroed and
* initialized in kernel/irq.c. These are initialized here in order
@@ -751,3 +756,31 @@ unsigned long memory_block_size_bytes(void)
struct ppc_pci_io ppc_pci_io;
EXPORT_SYMBOL(ppc_pci_io);
#endif
+
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
+u64 hw_nmi_get_sample_period(int watchdog_thresh)
+{
+ return ppc_proc_freq * watchdog_thresh;
+}
+#endif
+
+/*
+ * The perf based hardlockup detector breaks PMU event based branches, so
+ * disable it by default. Book3S has a soft-nmi hardlockup detector based
+ * on the decrementer interrupt, so it does not suffer from this problem.
+ *
+ * It is likely to get false positives in VM guests, so disable it there
+ * by default too.
+ */
+static int __init disable_hardlockup_detector(void)
+{
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
+ hardlockup_detector_disable();
+#else
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ hardlockup_detector_disable();
+#endif
+
+ return 0;
+}
+early_initcall(disable_hardlockup_detector);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 997c88d54acf..e0a4c1f82e25 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -75,9 +75,11 @@ static DEFINE_PER_CPU(int, cpu_state) = { 0 };
struct thread_info *secondary_ti;
DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
+EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
/* SMP operations for this machine */
@@ -351,7 +353,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
hard_irq_disable();
while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
raw_local_irq_restore(*flags);
- cpu_relax();
+ spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
raw_local_irq_save(*flags);
hard_irq_disable();
}
@@ -360,7 +362,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
static void nmi_ipi_lock(void)
{
while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
- cpu_relax();
+ spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
}
static void nmi_ipi_unlock(void)
@@ -475,7 +477,7 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
nmi_ipi_lock_start(&flags);
while (nmi_ipi_busy_count) {
nmi_ipi_unlock_end(&flags);
- cpu_relax();
+ spin_until_cond(nmi_ipi_busy_count == 0);
nmi_ipi_lock_start(&flags);
}
@@ -571,6 +573,26 @@ static void smp_store_cpu_info(int id)
#endif
}
+/*
+ * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
+ * rather than just passing around the cpumask we pass around a function that
+ * returns the that cpumask for the given CPU.
+ */
+static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
+{
+ cpumask_set_cpu(i, get_cpumask(j));
+ cpumask_set_cpu(j, get_cpumask(i));
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void set_cpus_unrelated(int i, int j,
+ struct cpumask *(*get_cpumask)(int))
+{
+ cpumask_clear_cpu(i, get_cpumask(j));
+ cpumask_clear_cpu(j, get_cpumask(i));
+}
+#endif
+
void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned int cpu;
@@ -590,6 +612,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for_each_possible_cpu(cpu) {
zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
+ zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
+ GFP_KERNEL, cpu_to_node(cpu));
zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
/*
@@ -602,7 +626,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
+ /* Init the cpumasks so the boot CPU is related to itself */
cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
+ cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
if (smp_ops && smp_ops->probe)
@@ -828,33 +854,6 @@ int cpu_first_thread_of_core(int core)
}
EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
-static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
-{
- const struct cpumask *mask;
- struct device_node *np;
- int i, plen;
- const __be32 *prop;
-
- mask = add ? cpu_online_mask : cpu_present_mask;
- for_each_cpu(i, mask) {
- np = of_get_cpu_node(i, NULL);
- if (!np)
- continue;
- prop = of_get_property(np, "ibm,chip-id", &plen);
- if (prop && plen == sizeof(int) &&
- of_read_number(prop, 1) == chipid) {
- if (add) {
- cpumask_set_cpu(cpu, cpu_core_mask(i));
- cpumask_set_cpu(i, cpu_core_mask(cpu));
- } else {
- cpumask_clear_cpu(cpu, cpu_core_mask(i));
- cpumask_clear_cpu(i, cpu_core_mask(cpu));
- }
- }
- of_node_put(np);
- }
-}
-
/* Must be called when no change can occur to cpu_present_mask,
* i.e. during cpu online or offline.
*/
@@ -877,52 +876,93 @@ static struct device_node *cpu_to_l2cache(int cpu)
return cache;
}
-static void traverse_core_siblings(int cpu, bool add)
+static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
{
struct device_node *l2_cache, *np;
- const struct cpumask *mask;
- int i, chip, plen;
- const __be32 *prop;
-
- /* First see if we have ibm,chip-id properties in cpu nodes */
- np = of_get_cpu_node(cpu, NULL);
- if (np) {
- chip = -1;
- prop = of_get_property(np, "ibm,chip-id", &plen);
- if (prop && plen == sizeof(int))
- chip = of_read_number(prop, 1);
- of_node_put(np);
- if (chip >= 0) {
- traverse_siblings_chip_id(cpu, add, chip);
- return;
- }
- }
+ int i;
l2_cache = cpu_to_l2cache(cpu);
- mask = add ? cpu_online_mask : cpu_present_mask;
- for_each_cpu(i, mask) {
+ if (!l2_cache)
+ return false;
+
+ for_each_cpu(i, cpu_online_mask) {
+ /*
+ * when updating the marks the current CPU has not been marked
+ * online, but we need to update the cache masks
+ */
np = cpu_to_l2cache(i);
if (!np)
continue;
- if (np == l2_cache) {
- if (add) {
- cpumask_set_cpu(cpu, cpu_core_mask(i));
- cpumask_set_cpu(i, cpu_core_mask(cpu));
- } else {
- cpumask_clear_cpu(cpu, cpu_core_mask(i));
- cpumask_clear_cpu(i, cpu_core_mask(cpu));
- }
- }
+
+ if (np == l2_cache)
+ set_cpus_related(cpu, i, mask_fn);
+
of_node_put(np);
}
of_node_put(l2_cache);
+
+ return true;
}
+#ifdef CONFIG_HOTPLUG_CPU
+static void remove_cpu_from_masks(int cpu)
+{
+ int i;
+
+ /* NB: cpu_core_mask is a superset of the others */
+ for_each_cpu(i, cpu_core_mask(cpu)) {
+ set_cpus_unrelated(cpu, i, cpu_core_mask);
+ set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
+ set_cpus_unrelated(cpu, i, cpu_sibling_mask);
+ }
+}
+#endif
+
+static void add_cpu_to_masks(int cpu)
+{
+ int first_thread = cpu_first_thread_sibling(cpu);
+ int chipid = cpu_to_chip_id(cpu);
+ int i;
+
+ /*
+ * This CPU will not be in the online mask yet so we need to manually
+ * add it to it's own thread sibling mask.
+ */
+ cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
+
+ for (i = first_thread; i < first_thread + threads_per_core; i++)
+ if (cpu_online(i))
+ set_cpus_related(i, cpu, cpu_sibling_mask);
+
+ /*
+ * Copy the thread sibling mask into the cache sibling mask
+ * and mark any CPUs that share an L2 with this CPU.
+ */
+ for_each_cpu(i, cpu_sibling_mask(cpu))
+ set_cpus_related(cpu, i, cpu_l2_cache_mask);
+ update_mask_by_l2(cpu, cpu_l2_cache_mask);
+
+ /*
+ * Copy the cache sibling mask into core sibling mask and mark
+ * any CPUs on the same chip as this CPU.
+ */
+ for_each_cpu(i, cpu_l2_cache_mask(cpu))
+ set_cpus_related(cpu, i, cpu_core_mask);
+
+ if (chipid == -1)
+ return;
+
+ for_each_cpu(i, cpu_online_mask)
+ if (cpu_to_chip_id(i) == chipid)
+ set_cpus_related(cpu, i, cpu_core_mask);
+}
+
+static bool shared_caches;
+
/* Activate a secondary processor. */
void start_secondary(void *unused)
{
unsigned int cpu = smp_processor_id();
- int i, base;
mmgrab(&init_mm);
current->active_mm = &init_mm;
@@ -945,22 +985,15 @@ void start_secondary(void *unused)
vdso_getcpu_init();
#endif
- /* Update sibling maps */
- base = cpu_first_thread_sibling(cpu);
- for (i = 0; i < threads_per_core; i++) {
- if (cpu_is_offline(base + i) && (cpu != base + i))
- continue;
- cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
- cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
+ /* Update topology CPU masks */
+ add_cpu_to_masks(cpu);
- /* cpu_core_map should be a superset of
- * cpu_sibling_map even if we don't have cache
- * information, so update the former here, too.
- */
- cpumask_set_cpu(cpu, cpu_core_mask(base + i));
- cpumask_set_cpu(base + i, cpu_core_mask(cpu));
- }
- traverse_core_siblings(cpu, true);
+ /*
+ * Check for any shared caches. Note that this must be done on a
+ * per-core basis because one core in the pair might be disabled.
+ */
+ if (!cpumask_equal(cpu_l2_cache_mask(cpu), cpu_sibling_mask(cpu)))
+ shared_caches = true;
set_numa_node(numa_cpu_lookup_table[cpu]);
set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
@@ -1003,35 +1036,65 @@ static struct sched_domain_topology_level powerpc_topology[] = {
{ NULL, },
};
-static __init long smp_setup_cpu_workfn(void *data __always_unused)
+/*
+ * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
+ * This topology makes it *much* cheaper to migrate tasks between adjacent cores
+ * since the migrated task remains cache hot. We want to take advantage of this
+ * at the scheduler level so an extra topology level is required.
+ */
+static int powerpc_shared_cache_flags(void)
{
- smp_ops->setup_cpu(boot_cpuid);
- return 0;
+ return SD_SHARE_PKG_RESOURCES;
}
+/*
+ * We can't just pass cpu_l2_cache_mask() directly because
+ * returns a non-const pointer and the compiler barfs on that.
+ */
+static const struct cpumask *shared_cache_mask(int cpu)
+{
+ return cpu_l2_cache_mask(cpu);
+}
+
+static struct sched_domain_topology_level power9_topology[] = {
+#ifdef CONFIG_SCHED_SMT
+ { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
+#endif
+ { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
+ { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+ { NULL, },
+};
+
void __init smp_cpus_done(unsigned int max_cpus)
{
/*
- * We want the setup_cpu() here to be called on the boot CPU, but
- * init might run on any CPU, so make sure it's invoked on the boot
- * CPU.
+ * We are running pinned to the boot CPU, see rest_init().
*/
if (smp_ops && smp_ops->setup_cpu)
- work_on_cpu_safe(boot_cpuid, smp_setup_cpu_workfn, NULL);
+ smp_ops->setup_cpu(boot_cpuid);
if (smp_ops && smp_ops->bringup_done)
smp_ops->bringup_done();
dump_numa_cpu_topology();
- set_sched_topology(powerpc_topology);
+ /*
+ * If any CPU detects that it's sharing a cache with another CPU then
+ * use the deeper topology that is aware of this sharing.
+ */
+ if (shared_caches) {
+ pr_info("Using shared cache scheduler topology\n");
+ set_sched_topology(power9_topology);
+ } else {
+ pr_info("Using standard scheduler topology\n");
+ set_sched_topology(powerpc_topology);
+ }
}
#ifdef CONFIG_HOTPLUG_CPU
int __cpu_disable(void)
{
int cpu = smp_processor_id();
- int base, i;
int err;
if (!smp_ops->cpu_disable)
@@ -1042,14 +1105,7 @@ int __cpu_disable(void)
return err;
/* Update sibling maps */
- base = cpu_first_thread_sibling(cpu);
- for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) {
- cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
- cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
- cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
- cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
- }
- traverse_core_siblings(cpu, false);
+ remove_cpu_from_masks(cpu);
return 0;
}
diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S
index 988f38dced0f..82d8aae81c6a 100644
--- a/arch/powerpc/kernel/swsusp_asm64.S
+++ b/arch/powerpc/kernel/swsusp_asm64.S
@@ -179,7 +179,7 @@ nothing_to_copy:
sld r3, r3, r0
li r0, 0
1:
- dcbf r0,r3
+ dcbf 0,r3
addi r3,r3,0x20
bdnz 1b
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 4d6b1d3a747f..7ccb7f81f8db 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -17,13 +17,13 @@
#include <asm/ppc_asm.h>
#ifdef CONFIG_PPC64
-#define SYSCALL(func) .llong DOTSYM(sys_##func),DOTSYM(sys_##func)
-#define COMPAT_SYS(func) .llong DOTSYM(sys_##func),DOTSYM(compat_sys_##func)
-#define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func)
-#define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall)
-#define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func)
-#define PPC64ONLY(func) .llong DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall)
-#define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264)
+#define SYSCALL(func) .8byte DOTSYM(sys_##func),DOTSYM(sys_##func)
+#define COMPAT_SYS(func) .8byte DOTSYM(sys_##func),DOTSYM(compat_sys_##func)
+#define PPC_SYS(func) .8byte DOTSYM(ppc_##func),DOTSYM(ppc_##func)
+#define OLDSYS(func) .8byte DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall)
+#define SYS32ONLY(func) .8byte DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func)
+#define PPC64ONLY(func) .8byte DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall)
+#define SYSX(f, f3264, f32) .8byte DOTSYM(f),DOTSYM(f3264)
#else
#define SYSCALL(func) .long sys_##func
#define COMPAT_SYS(func) .long sys_##func
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index bfcfd9ef09f2..ec74e203ee04 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -114,6 +114,28 @@ static void pmac_backlight_unblank(void)
static inline void pmac_backlight_unblank(void) { }
#endif
+/*
+ * If oops/die is expected to crash the machine, return true here.
+ *
+ * This should not be expected to be 100% accurate, there may be
+ * notifiers registered or other unexpected conditions that may bring
+ * down the kernel. Or if the current process in the kernel is holding
+ * locks or has other critical state, the kernel may become effectively
+ * unusable anyway.
+ */
+bool die_will_crash(void)
+{
+ if (should_fadump_crash())
+ return true;
+ if (kexec_should_crash(current))
+ return true;
+ if (in_interrupt() || panic_on_oops ||
+ !current->pid || is_global_init(current))
+ return true;
+
+ return false;
+}
+
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static int die_owner = -1;
static unsigned int die_nest_count;
@@ -162,21 +184,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
crash_fadump(regs, "die oops");
- /*
- * A system reset (0x100) is a request to dump, so we always send
- * it through the crashdump code.
- */
- if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) {
+ if (kexec_should_crash(current))
crash_kexec(regs);
- /*
- * We aren't the primary crash CPU. We need to send it
- * to a holding pattern to avoid it ending up in the panic
- * code.
- */
- crash_kexec_secondary(regs);
- }
-
if (!signr)
return;
@@ -202,18 +212,25 @@ NOKPROBE_SYMBOL(oops_end);
static int __die(const char *str, struct pt_regs *regs, long err)
{
printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
-#ifdef CONFIG_PREEMPT
- printk("PREEMPT ");
-#endif
-#ifdef CONFIG_SMP
- printk("SMP NR_CPUS=%d ", NR_CPUS);
-#endif
+
+ if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+ printk("LE ");
+ else
+ printk("BE ");
+
+ if (IS_ENABLED(CONFIG_PREEMPT))
+ pr_cont("PREEMPT ");
+
+ if (IS_ENABLED(CONFIG_SMP))
+ pr_cont("SMP NR_CPUS=%d ", NR_CPUS);
+
if (debug_pagealloc_enabled())
- printk("DEBUG_PAGEALLOC ");
-#ifdef CONFIG_NUMA
- printk("NUMA ");
-#endif
- printk("%s\n", ppc_md.name ? ppc_md.name : "");
+ pr_cont("DEBUG_PAGEALLOC ");
+
+ if (IS_ENABLED(CONFIG_NUMA))
+ pr_cont("NUMA ");
+
+ pr_cont("%s\n", ppc_md.name ? ppc_md.name : "");
if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
return 1;
@@ -288,23 +305,52 @@ void system_reset_exception(struct pt_regs *regs)
if (!nested)
nmi_enter();
+ __this_cpu_inc(irq_stat.sreset_irqs);
+
/* See if any machine dependent calls */
if (ppc_md.system_reset_exception) {
if (ppc_md.system_reset_exception(regs))
goto out;
}
- die("System Reset", regs, SIGABRT);
+ if (debugger(regs))
+ goto out;
+
+ /*
+ * A system reset is a request to dump, so we always send
+ * it through the crashdump code (if fadump or kdump are
+ * registered).
+ */
+ crash_fadump(regs, "System Reset");
+
+ crash_kexec(regs);
+
+ /*
+ * We aren't the primary crash CPU. We need to send it
+ * to a holding pattern to avoid it ending up in the panic
+ * code.
+ */
+ crash_kexec_secondary(regs);
+
+ /*
+ * No debugger or crash dump registered, print logs then
+ * panic.
+ */
+ __die("System Reset", regs, SIGABRT);
+
+ mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ nmi_panic(regs, "System Reset");
out:
#ifdef CONFIG_PPC_BOOK3S_64
BUG_ON(get_paca()->in_nmi == 0);
if (get_paca()->in_nmi > 1)
- panic("Unrecoverable nested System Reset");
+ nmi_panic(regs, "Unrecoverable nested System Reset");
#endif
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
- panic("Unrecoverable System Reset");
+ nmi_panic(regs, "Unrecoverable System Reset");
if (!nested)
nmi_exit();
@@ -312,39 +358,6 @@ out:
/* What should we do here? We could issue a shutdown or hard reset. */
}
-#ifdef CONFIG_PPC64
-/*
- * This function is called in real mode. Strictly no printk's please.
- *
- * regs->nip and regs->msr contains srr0 and ssr1.
- */
-long machine_check_early(struct pt_regs *regs)
-{
- long handled = 0;
-
- __this_cpu_inc(irq_stat.mce_exceptions);
-
- if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
- handled = cur_cpu_spec->machine_check_early(regs);
- return handled;
-}
-
-long hmi_exception_realmode(struct pt_regs *regs)
-{
- __this_cpu_inc(irq_stat.hmi_exceptions);
-
- wait_for_subcore_guest_exit();
-
- if (ppc_md.hmi_exception_early)
- ppc_md.hmi_exception_early(regs);
-
- wait_for_tb_resync();
-
- return 0;
-}
-
-#endif
-
/*
* I/O accesses can cause machine checks on powermacs.
* Check if the NIP corresponds to the address of a sync
@@ -397,11 +410,6 @@ static inline int check_io_access(struct pt_regs *regs)
/* On 4xx, the reason for the machine check or program exception
is in the ESR. */
#define get_reason(regs) ((regs)->dsisr)
-#ifndef CONFIG_FSL_BOOKE
-#define get_mc_reason(regs) ((regs)->dsisr)
-#else
-#define get_mc_reason(regs) (mfspr(SPRN_MCSR))
-#endif
#define REASON_FP ESR_FP
#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
#define REASON_PRIVILEGED ESR_PPR
@@ -415,108 +423,17 @@ static inline int check_io_access(struct pt_regs *regs)
/* On non-4xx, the reason for the machine check or program
exception is in the MSR. */
#define get_reason(regs) ((regs)->msr)
-#define get_mc_reason(regs) ((regs)->msr)
-#define REASON_TM 0x200000
-#define REASON_FP 0x100000
-#define REASON_ILLEGAL 0x80000
-#define REASON_PRIVILEGED 0x40000
-#define REASON_TRAP 0x20000
+#define REASON_TM SRR1_PROGTM
+#define REASON_FP SRR1_PROGFPE
+#define REASON_ILLEGAL SRR1_PROGILL
+#define REASON_PRIVILEGED SRR1_PROGPRIV
+#define REASON_TRAP SRR1_PROGTRAP
#define single_stepping(regs) ((regs)->msr & MSR_SE)
#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
#endif
-#if defined(CONFIG_4xx)
-int machine_check_4xx(struct pt_regs *regs)
-{
- unsigned long reason = get_mc_reason(regs);
-
- if (reason & ESR_IMCP) {
- printk("Instruction");
- mtspr(SPRN_ESR, reason & ~ESR_IMCP);
- } else
- printk("Data");
- printk(" machine check in kernel mode.\n");
-
- return 0;
-}
-
-int machine_check_440A(struct pt_regs *regs)
-{
- unsigned long reason = get_mc_reason(regs);
-
- printk("Machine check in kernel mode.\n");
- if (reason & ESR_IMCP){
- printk("Instruction Synchronous Machine Check exception\n");
- mtspr(SPRN_ESR, reason & ~ESR_IMCP);
- }
- else {
- u32 mcsr = mfspr(SPRN_MCSR);
- if (mcsr & MCSR_IB)
- printk("Instruction Read PLB Error\n");
- if (mcsr & MCSR_DRB)
- printk("Data Read PLB Error\n");
- if (mcsr & MCSR_DWB)
- printk("Data Write PLB Error\n");
- if (mcsr & MCSR_TLBP)
- printk("TLB Parity Error\n");
- if (mcsr & MCSR_ICP){
- flush_instruction_cache();
- printk("I-Cache Parity Error\n");
- }
- if (mcsr & MCSR_DCSP)
- printk("D-Cache Search Parity Error\n");
- if (mcsr & MCSR_DCFP)
- printk("D-Cache Flush Parity Error\n");
- if (mcsr & MCSR_IMPE)
- printk("Machine Check exception is imprecise\n");
-
- /* Clear MCSR */
- mtspr(SPRN_MCSR, mcsr);
- }
- return 0;
-}
-
-int machine_check_47x(struct pt_regs *regs)
-{
- unsigned long reason = get_mc_reason(regs);
- u32 mcsr;
-
- printk(KERN_ERR "Machine check in kernel mode.\n");
- if (reason & ESR_IMCP) {
- printk(KERN_ERR
- "Instruction Synchronous Machine Check exception\n");
- mtspr(SPRN_ESR, reason & ~ESR_IMCP);
- return 0;
- }
- mcsr = mfspr(SPRN_MCSR);
- if (mcsr & MCSR_IB)
- printk(KERN_ERR "Instruction Read PLB Error\n");
- if (mcsr & MCSR_DRB)
- printk(KERN_ERR "Data Read PLB Error\n");
- if (mcsr & MCSR_DWB)
- printk(KERN_ERR "Data Write PLB Error\n");
- if (mcsr & MCSR_TLBP)
- printk(KERN_ERR "TLB Parity Error\n");
- if (mcsr & MCSR_ICP) {
- flush_instruction_cache();
- printk(KERN_ERR "I-Cache Parity Error\n");
- }
- if (mcsr & MCSR_DCSP)
- printk(KERN_ERR "D-Cache Search Parity Error\n");
- if (mcsr & PPC47x_MCSR_GPR)
- printk(KERN_ERR "GPR Parity Error\n");
- if (mcsr & PPC47x_MCSR_FPR)
- printk(KERN_ERR "FPR Parity Error\n");
- if (mcsr & PPC47x_MCSR_IPR)
- printk(KERN_ERR "Machine Check exception is imprecise\n");
-
- /* Clear MCSR */
- mtspr(SPRN_MCSR, mcsr);
-
- return 0;
-}
-#elif defined(CONFIG_E500)
+#if defined(CONFIG_E500)
int machine_check_e500mc(struct pt_regs *regs)
{
unsigned long mcsr = mfspr(SPRN_MCSR);
@@ -618,7 +535,7 @@ silent_out:
int machine_check_e500(struct pt_regs *regs)
{
- unsigned long reason = get_mc_reason(regs);
+ unsigned long reason = mfspr(SPRN_MCSR);
if (reason & MCSR_BUS_RBERR) {
if (fsl_rio_mcheck_exception(regs))
@@ -665,7 +582,7 @@ int machine_check_generic(struct pt_regs *regs)
#elif defined(CONFIG_E200)
int machine_check_e200(struct pt_regs *regs)
{
- unsigned long reason = get_mc_reason(regs);
+ unsigned long reason = mfspr(SPRN_MCSR);
printk("Machine check in kernel mode.\n");
printk("Caused by (from MCSR=%lx): ", reason);
@@ -687,35 +604,10 @@ int machine_check_e200(struct pt_regs *regs)
return 0;
}
-#elif defined(CONFIG_PPC_8xx)
-int machine_check_8xx(struct pt_regs *regs)
-{
- unsigned long reason = get_mc_reason(regs);
-
- pr_err("Machine check in kernel mode.\n");
- pr_err("Caused by (from SRR1=%lx): ", reason);
- if (reason & 0x40000000)
- pr_err("Fetch error at address %lx\n", regs->nip);
- else
- pr_err("Data access error at address %lx\n", regs->dar);
-
-#ifdef CONFIG_PCI
- /* the qspan pci read routines can cause machine checks -- Cort
- *
- * yuck !!! that totally needs to go away ! There are better ways
- * to deal with that than having a wart in the mcheck handler.
- * -- BenH
- */
- bad_page_fault(regs, regs->dar, SIGBUS);
- return 1;
-#else
- return 0;
-#endif
-}
-#else
+#elif defined(CONFIG_PPC32)
int machine_check_generic(struct pt_regs *regs)
{
- unsigned long reason = get_mc_reason(regs);
+ unsigned long reason = regs->msr;
printk("Machine check in kernel mode.\n");
printk("Caused by (from SRR1=%lx): ", reason);
@@ -752,10 +644,14 @@ int machine_check_generic(struct pt_regs *regs)
void machine_check_exception(struct pt_regs *regs)
{
- enum ctx_state prev_state = exception_enter();
int recover = 0;
+ bool nested = in_nmi();
+ if (!nested)
+ nmi_enter();
- __this_cpu_inc(irq_stat.mce_exceptions);
+ /* 64s accounts the mce in machine_check_early when in HVMODE */
+ if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !cpu_has_feature(CPU_FTR_HVMODE))
+ __this_cpu_inc(irq_stat.mce_exceptions);
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
@@ -783,10 +679,11 @@ void machine_check_exception(struct pt_regs *regs)
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
- panic("Unrecoverable Machine check");
+ nmi_panic(regs, "Unrecoverable Machine check");
bail:
- exception_exit(prev_state);
+ if (!nested)
+ nmi_exit();
}
void SMIException(struct pt_regs *regs)
@@ -1672,24 +1569,6 @@ void performance_monitor_exception(struct pt_regs *regs)
perf_irq(regs);
}
-#ifdef CONFIG_8xx
-void SoftwareEmulation(struct pt_regs *regs)
-{
- CHECK_FULL_REGS(regs);
-
- if (!user_mode(regs)) {
- debugger(regs);
- die("Kernel Mode Unimplemented Instruction or SW FPU Emulation",
- regs, SIGFPE);
- }
-
- if (!emulate_math(regs))
- return;
-
- _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
-}
-#endif /* CONFIG_8xx */
-
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
{
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index 003b20964ea0..5d105b8eeece 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -205,3 +205,12 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
return orig_ret_vaddr;
}
+
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+ struct pt_regs *regs)
+{
+ if (ctx == RP_CHECK_CHAIN_CALL)
+ return regs->gpr[1] <= ret->stack;
+ else
+ return regs->gpr[1] < ret->stack;
+}
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 6b2b69616e77..769c2624e0a6 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -232,15 +232,9 @@ __do_get_tspec:
lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
/* Get a stable TB value */
-#ifdef CONFIG_8xx
-2: mftbu r3
- mftbl r4
- mftbu r0
-#else
-2: mfspr r3, SPRN_TBRU
- mfspr r4, SPRN_TBRL
- mfspr r0, SPRN_TBRU
-#endif
+2: MFTBU(r3)
+ MFTBL(r4)
+ MFTBU(r0)
cmplw cr0,r3,r0
bne- 2b
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index b1a250560198..882628fa6987 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -8,7 +8,7 @@
#include <asm/cache.h>
#include <asm/thread_info.h>
-#ifdef CONFIG_STRICT_KERNEL_RWX
+#if defined(CONFIG_STRICT_KERNEL_RWX) && !defined(CONFIG_PPC32)
#define STRICT_ALIGN_SIZE (1 << 24)
#else
#define STRICT_ALIGN_SIZE PAGE_SIZE
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index b67f8b03a32d..2f6eadd9408d 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -71,15 +71,20 @@ static inline void wd_smp_lock(unsigned long *flags)
* This may be called from low level interrupt handlers at some
* point in future.
*/
- local_irq_save(*flags);
- while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock)))
- cpu_relax();
+ raw_local_irq_save(*flags);
+ hard_irq_disable(); /* Make it soft-NMI safe */
+ while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {
+ raw_local_irq_restore(*flags);
+ spin_until_cond(!test_bit(0, &__wd_smp_lock));
+ raw_local_irq_save(*flags);
+ hard_irq_disable();
+ }
}
static inline void wd_smp_unlock(unsigned long *flags)
{
clear_bit_unlock(0, &__wd_smp_lock);
- local_irq_restore(*flags);
+ raw_local_irq_restore(*flags);
}
static void wd_lockup_ipi(struct pt_regs *regs)
@@ -96,10 +101,10 @@ static void wd_lockup_ipi(struct pt_regs *regs)
nmi_panic(regs, "Hard LOCKUP");
}
-static void set_cpu_stuck(int cpu, u64 tb)
+static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
{
- cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
- cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
+ cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
+ cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
if (cpumask_empty(&wd_smp_cpus_pending)) {
wd_smp_last_reset_tb = tb;
cpumask_andnot(&wd_smp_cpus_pending,
@@ -107,6 +112,10 @@ static void set_cpu_stuck(int cpu, u64 tb)
&wd_smp_cpus_stuck);
}
}
+static void set_cpu_stuck(int cpu, u64 tb)
+{
+ set_cpumask_stuck(cpumask_of(cpu), tb);
+}
static void watchdog_smp_panic(int cpu, u64 tb)
{
@@ -135,11 +144,9 @@ static void watchdog_smp_panic(int cpu, u64 tb)
}
smp_flush_nmi_ipi(1000000);
- /* Take the stuck CPU out of the watch group */
- for_each_cpu(c, &wd_smp_cpus_pending)
- set_cpu_stuck(c, tb);
+ /* Take the stuck CPUs out of the watch group */
+ set_cpumask_stuck(&wd_smp_cpus_pending, tb);
-out:
wd_smp_unlock(&flags);
printk_safe_flush();
@@ -152,6 +159,11 @@ out:
if (hardlockup_panic)
nmi_panic(NULL, "Hard LOCKUP");
+
+ return;
+
+out:
+ wd_smp_unlock(&flags);
}
static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
@@ -204,6 +216,9 @@ void soft_nmi_interrupt(struct pt_regs *regs)
return;
nmi_enter();
+
+ __this_cpu_inc(irq_stat.soft_nmi_irqs);
+
tb = get_tb();
if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
per_cpu(wd_timer_tb, cpu) = tb;
@@ -258,9 +273,11 @@ static void wd_timer_fn(unsigned long data)
void arch_touch_nmi_watchdog(void)
{
+ unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
int cpu = smp_processor_id();
- watchdog_timer_interrupt(cpu);
+ if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks)
+ watchdog_timer_interrupt(cpu);
}
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
@@ -283,6 +300,8 @@ static void stop_watchdog_timer_on(unsigned int cpu)
static int start_wd_on_cpu(unsigned int cpu)
{
+ unsigned long flags;
+
if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
WARN_ON(1);
return 0;
@@ -297,12 +316,14 @@ static int start_wd_on_cpu(unsigned int cpu)
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
return 0;
+ wd_smp_lock(&flags);
cpumask_set_cpu(cpu, &wd_cpus_enabled);
if (cpumask_weight(&wd_cpus_enabled) == 1) {
cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
wd_smp_last_reset_tb = get_tb();
}
- smp_wmb();
+ wd_smp_unlock(&flags);
+
start_watchdog_timer_on(cpu);
return 0;
@@ -310,12 +331,17 @@ static int start_wd_on_cpu(unsigned int cpu)
static int stop_wd_on_cpu(unsigned int cpu)
{
+ unsigned long flags;
+
if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
return 0; /* Can happen in CPU unplug case */
stop_watchdog_timer_on(cpu);
+ wd_smp_lock(&flags);
cpumask_clear_cpu(cpu, &wd_cpus_enabled);
+ wd_smp_unlock(&flags);
+
wd_smp_clear_cpu_pending(cpu, get_tb());
return 0;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 8cb0190e2a73..67075e065ef2 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -37,6 +37,7 @@
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>
+#include <asm/pte-walk.h>
#include "trace_hv.h"
@@ -164,8 +165,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
goto out;
}
- if (kvm->arch.hpt.virt)
+ if (kvm->arch.hpt.virt) {
kvmppc_free_hpt(&kvm->arch.hpt);
+ kvmppc_rmap_reset(kvm);
+ }
err = kvmppc_allocate_hpt(&info, order);
if (err < 0)
@@ -597,8 +600,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
* hugepage split and collapse.
*/
local_irq_save(flags);
- ptep = find_linux_pte_or_hugepte(current->mm->pgd,
- hva, NULL, NULL);
+ ptep = find_current_mm_pte(current->mm->pgd,
+ hva, NULL, NULL);
if (ptep) {
pte = kvmppc_read_update_linux_pte(ptep, 1);
if (__pte_write(pte))
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index f6b3e67c5762..c5d7435455f1 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -17,6 +17,7 @@
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
+#include <asm/pte-walk.h>
/*
* Supported radix tree geometry.
@@ -322,13 +323,13 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
gpa = vcpu->arch.fault_gpa & ~0xfffUL;
gpa &= ~0xF000000000000000ul;
gfn = gpa >> PAGE_SHIFT;
- if (!(dsisr & DSISR_PGDIRFAULT))
+ if (!(dsisr & DSISR_PRTABLE_FAULT))
gpa |= ea & 0xfff;
memslot = gfn_to_memslot(kvm, gfn);
/* No memslot means it's an emulated MMIO region */
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
- if (dsisr & (DSISR_PGDIRFAULT | DSISR_BADACCESS |
+ if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
DSISR_SET_RC)) {
/*
* Bad address in guest page table tree, or other
@@ -359,8 +360,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (writing)
pgflags |= _PAGE_DIRTY;
local_irq_save(flags);
- ptep = __find_linux_pte_or_hugepte(current->mm->pgd, hva,
- NULL, NULL);
+ ptep = find_current_mm_pte(current->mm->pgd, hva, NULL, NULL);
if (ptep) {
pte = READ_ONCE(*ptep);
if (pte_present(pte) &&
@@ -374,8 +374,12 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
spin_unlock(&kvm->mmu_lock);
return RESUME_GUEST;
}
- ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable,
- gpa, NULL, &shift);
+ /*
+ * We are walking the secondary page table here. We can do this
+ * without disabling irq.
+ */
+ ptep = __find_linux_pte(kvm->arch.pgtable,
+ gpa, NULL, &shift);
if (ptep && pte_present(*ptep)) {
kvmppc_radix_update_pte(kvm, ptep, 0, pgflags,
gpa, shift);
@@ -427,8 +431,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pgflags |= _PAGE_WRITE;
} else {
local_irq_save(flags);
- ptep = __find_linux_pte_or_hugepte(current->mm->pgd,
- hva, NULL, NULL);
+ ptep = find_current_mm_pte(current->mm->pgd,
+ hva, NULL, NULL);
if (ptep && pte_write(*ptep) && pte_dirty(*ptep))
pgflags |= _PAGE_WRITE;
local_irq_restore(flags);
@@ -499,8 +503,7 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned int shift;
unsigned long old;
- ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
- NULL, &shift);
+ ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep)) {
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0,
gpa, shift);
@@ -525,8 +528,7 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned int shift;
int ref = 0;
- ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
- NULL, &shift);
+ ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
gpa, shift);
@@ -545,8 +547,7 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned int shift;
int ref = 0;
- ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
- NULL, &shift);
+ ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep))
ref = 1;
return ref;
@@ -562,8 +563,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
unsigned int shift;
int ret = 0;
- ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
- NULL, &shift);
+ ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
ret = 1;
if (shift)
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index a160c14304eb..53766e2bc029 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -294,32 +294,26 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce_64 *args)
{
struct kvmppc_spapr_tce_table *stt = NULL;
+ struct kvmppc_spapr_tce_table *siter;
unsigned long npages, size;
int ret = -ENOMEM;
int i;
+ int fd = -1;
if (!args->size)
return -EINVAL;
- /* Check this LIOBN hasn't been previously allocated */
- list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
- if (stt->liobn == args->liobn)
- return -EBUSY;
- }
-
size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
npages = kvmppc_tce_pages(size);
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
- if (ret) {
- stt = NULL;
- goto fail;
- }
+ if (ret)
+ return ret;
ret = -ENOMEM;
stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
GFP_KERNEL);
if (!stt)
- goto fail;
+ goto fail_acct;
stt->liobn = args->liobn;
stt->page_shift = args->page_shift;
@@ -334,24 +328,42 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
goto fail;
}
- kvm_get_kvm(kvm);
+ ret = fd = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
+ stt, O_RDWR | O_CLOEXEC);
+ if (ret < 0)
+ goto fail;
mutex_lock(&kvm->lock);
- list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
+
+ /* Check this LIOBN hasn't been previously allocated */
+ ret = 0;
+ list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
+ if (siter->liobn == args->liobn) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ if (!ret) {
+ list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
+ kvm_get_kvm(kvm);
+ }
mutex_unlock(&kvm->lock);
- return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
- stt, O_RDWR | O_CLOEXEC);
+ if (!ret)
+ return fd;
-fail:
- if (stt) {
- for (i = 0; i < npages; i++)
- if (stt->pages[i])
- __free_page(stt->pages[i]);
+ put_unused_fd(fd);
- kfree(stt);
- }
+ fail:
+ for (i = 0; i < npages; i++)
+ if (stt->pages[i])
+ __free_page(stt->pages[i]);
+
+ kfree(stt);
+ fail_acct:
+ kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
return ret;
}
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 3adfd2f5301c..c32e9bfe75b1 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -39,6 +39,7 @@
#include <asm/udbg.h>
#include <asm/iommu.h>
#include <asm/tce.h>
+#include <asm/pte-walk.h>
#ifdef CONFIG_BUG
@@ -353,7 +354,16 @@ static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
pte_t *ptep, pte;
unsigned shift = 0;
- ptep = __find_linux_pte_or_hugepte(vcpu->arch.pgdir, ua, NULL, &shift);
+ /*
+ * Called in real mode with MSR_EE = 0. We are safe here.
+ * It is ok to do the lookup with arch.pgdir here, because
+ * we are doing this on secondary cpus and current task there
+ * is not the hypervisor. Also this is safe against THP in the
+ * host, because an IPI to primary thread will wait for the secondary
+ * to exit which will agains result in the below page table walk
+ * to finish.
+ */
+ ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
if (!ptep || !pte_present(*ptep))
return -ENXIO;
pte = *ptep;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 0b436df746fc..ebcf97cb5c98 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2111,6 +2111,15 @@ static int kvmppc_grab_hwthread(int cpu)
struct paca_struct *tpaca;
long timeout = 10000;
+ /*
+ * ISA v3.0 idle routines do not set hwthread_state or test
+ * hwthread_req, so they can not grab idle threads.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ WARN(1, "KVM: can not control sibling threads\n");
+ return -EBUSY;
+ }
+
tpaca = &paca[cpu];
/* Ensure the thread won't go into the kernel if it wakes */
@@ -2145,10 +2154,12 @@ static void kvmppc_release_hwthread(int cpu)
struct paca_struct *tpaca;
tpaca = &paca[cpu];
- tpaca->kvm_hstate.hwthread_req = 0;
tpaca->kvm_hstate.kvm_vcpu = NULL;
tpaca->kvm_hstate.kvm_vcore = NULL;
tpaca->kvm_hstate.kvm_split_mode = NULL;
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ tpaca->kvm_hstate.hwthread_req = 0;
+
}
static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
@@ -3211,6 +3222,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
run->fail_entry.hardware_entry_failure_reason = 0;
return -EINVAL;
}
+ /* Enable TM so we can read the TM SPRs */
+ mtmsr(mfmsr() | MSR_TM);
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 584c74c8119f..fedb0139524c 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -22,6 +22,7 @@
#include <asm/hvcall.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
+#include <asm/pte-walk.h>
/* Translate address of a vmalloc'd thing to a linear map address */
static void *real_vmalloc_addr(void *x)
@@ -31,9 +32,9 @@ static void *real_vmalloc_addr(void *x)
/*
* assume we don't have huge pages in vmalloc space...
* So don't worry about THP collapse/split. Called
- * Only in realmode, hence won't need irq_save/restore.
+ * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
*/
- p = __find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL, NULL);
+ p = find_init_mm_pte(addr, NULL);
if (!p || !pte_present(*p))
return NULL;
addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
@@ -230,14 +231,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
* If we had a page table table change after lookup, we would
* retry via mmu_notifier_retry.
*/
- if (realmode)
- ptep = __find_linux_pte_or_hugepte(pgdir, hva, NULL,
- &hpage_shift);
- else {
+ if (!realmode)
local_irq_save(irq_flags);
- ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL,
- &hpage_shift);
- }
+ /*
+ * If called in real mode we have MSR_EE = 0. Otherwise
+ * we disable irq above.
+ */
+ ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
if (ptep) {
pte_t pte;
unsigned int host_pte_size;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index cb44065e2946..2259b6cde119 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -149,9 +149,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
subf r4, r4, r3
mtspr SPRN_DEC, r4
+BEGIN_FTR_SECTION
/* hwthread_req may have got set by cede or no vcpu, so clear it */
li r0, 0
stb r0, HSTATE_HWTHREAD_REQ(r13)
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
/*
* For external interrupts we need to call the Linux
@@ -314,6 +316,7 @@ kvm_novcpu_exit:
* Relocation is off and most register values are lost.
* r13 points to the PACA.
* r3 contains the SRR1 wakeup value, SRR1 is trashed.
+ * This is not used by ISAv3.0B processors.
*/
.globl kvm_start_guest
kvm_start_guest:
@@ -432,6 +435,9 @@ kvm_secondary_got_guest:
* While waiting we also need to check if we get given a vcpu to run.
*/
kvm_no_guest:
+BEGIN_FTR_SECTION
+ twi 31,0,0
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r3, HSTATE_HWTHREAD_REQ(r13)
cmpwi r3, 0
bne 53f
@@ -1291,6 +1297,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Hypervisor doorbell - exit only if host IPI flag set */
cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
bne 3f
+BEGIN_FTR_SECTION
+ PPC_MSGSYNC
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
beq 4f
@@ -1443,12 +1452,14 @@ mc_cont:
ori r6,r6,1
mtspr SPRN_CTRLT,r6
4:
- /* Read the guest SLB and save it away */
+ /* Check if we are running hash or radix and store it in cr2 */
ld r5, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r5)
- cmpwi r0, 0
+ cmpwi cr2,r0,0
+
+ /* Read the guest SLB and save it away */
li r5, 0
- bne 3f /* for radix, save 0 entries */
+ bne cr2, 3f /* for radix, save 0 entries */
lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
mtctr r0
li r6,0
@@ -1712,11 +1723,6 @@ BEGIN_FTR_SECTION_NESTED(96)
END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
22:
- /* Clear out SLB */
- li r5,0
- slbmte r5,r5
- slbia
- ptesync
/* Restore host values of some registers */
BEGIN_FTR_SECTION
@@ -1737,10 +1743,56 @@ BEGIN_FTR_SECTION
mtspr SPRN_PID, r7
mtspr SPRN_IAMR, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
+#ifdef CONFIG_PPC_RADIX_MMU
+ /*
+ * Are we running hash or radix ?
+ */
+ beq cr2,3f
+
+ /* Radix: Handle the case where the guest used an illegal PID */
+ LOAD_REG_ADDR(r4, mmu_base_pid)
+ lwz r3, VCPU_GUEST_PID(r9)
+ lwz r5, 0(r4)
+ cmpw cr0,r3,r5
+ blt 2f
+
+ /*
+ * Illegal PID, the HW might have prefetched and cached in the TLB
+ * some translations for the LPID 0 / guest PID combination which
+ * Linux doesn't know about, so we need to flush that PID out of
+ * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
+ * the right context.
+ */
+ li r0,0
+ mtspr SPRN_LPID,r0
+ isync
+
+ /* Then do a congruence class local flush */
+ ld r6,VCPU_KVM(r9)
+ lwz r0,KVM_TLB_SETS(r6)
+ mtctr r0
+ li r7,0x400 /* IS field = 0b01 */
+ ptesync
+ sldi r0,r3,32 /* RS has PID */
+1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
+ addi r7,r7,0x1000
+ bdnz 1b
+ ptesync
+
+2: /* Flush the ERAT on radix P9 DD1 guest exit */
BEGIN_FTR_SECTION
PPC_INVALIDATE_ERAT
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
+ b 4f
+#endif /* CONFIG_PPC_RADIX_MMU */
+ /* Hash: clear out SLB */
+3: li r5,0
+ slbmte r5,r5
+ slbia
+ ptesync
+4:
/*
* POWER7/POWER8 guest -> host partition switch code.
* We don't have to lock against tlbies but we do
@@ -2466,8 +2518,10 @@ kvm_do_nap:
clrrdi r0, r0, 1
mtspr SPRN_CTRLT, r0
+BEGIN_FTR_SECTION
li r0,1
stb r0,HSTATE_HWTHREAD_REQ(r13)
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mfspr r5,SPRN_LPCR
ori r5,r5,LPCR_PECE0 | LPCR_PECE1
BEGIN_FTR_SECTION
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index 4636ca6e7d38..d1ed2c41b5d2 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -16,7 +16,22 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
u8 cppr;
u16 ack;
- /* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */
+ /*
+ * Ensure any previous store to CPPR is ordered vs.
+ * the subsequent loads from PIPR or ACK.
+ */
+ eieio();
+
+ /*
+ * DD1 bug workaround: If PIPR is less favored than CPPR
+ * ignore the interrupt or we might incorrectly lose an IPB
+ * bit.
+ */
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+ u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
+ if (pipr >= xc->hw_cppr)
+ return;
+ }
/* Perform the acknowledge OS to register cycle. */
ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
@@ -235,6 +250,11 @@ skip_ipi:
/*
* If we found an interrupt, adjust what the guest CPPR should
* be as if we had just fetched that interrupt from HW.
+ *
+ * Note: This can only make xc->cppr smaller as the previous
+ * loop will only exit with hirq != 0 if prio is lower than
+ * the current xc->cppr. Thus we don't need to re-check xc->mfrr
+ * for pending IPIs.
*/
if (hirq)
xc->cppr = prio;
@@ -381,6 +401,12 @@ X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
xc->cppr = cppr;
/*
+ * Order the above update of xc->cppr with the subsequent
+ * read of xc->mfrr inside push_pending_to_hw()
+ */
+ smp_mb();
+
+ /*
* We are masking less, we need to look for pending things
* to deliver and set VP pending bits accordingly to trigger
* a new interrupt otherwise we might miss MFRR changes for
@@ -420,21 +446,37 @@ X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
* used to signal MFRR changes is EOId when fetched from
* the queue.
*/
- if (irq == XICS_IPI || irq == 0)
+ if (irq == XICS_IPI || irq == 0) {
+ /*
+ * This barrier orders the setting of xc->cppr vs.
+ * subsquent test of xc->mfrr done inside
+ * scan_interrupts and push_pending_to_hw
+ */
+ smp_mb();
goto bail;
+ }
/* Find interrupt source */
sb = kvmppc_xive_find_source(xive, irq, &src);
if (!sb) {
pr_devel(" source not found !\n");
rc = H_PARAMETER;
+ /* Same as above */
+ smp_mb();
goto bail;
}
state = &sb->irq_state[src];
kvmppc_xive_select_irq(state, &hw_num, &xd);
state->in_eoi = true;
- mb();
+
+ /*
+ * This barrier orders both setting of in_eoi above vs,
+ * subsequent test of guest_priority, and the setting
+ * of xc->cppr vs. subsquent test of xc->mfrr done inside
+ * scan_interrupts and push_pending_to_hw
+ */
+ smp_mb();
again:
if (state->guest_priority == MASKED) {
@@ -461,6 +503,14 @@ again:
}
+ /*
+ * This barrier orders the above guest_priority check
+ * and spin_lock/unlock with clearing in_eoi below.
+ *
+ * It also has to be a full mb() as it must ensure
+ * the MMIOs done in source_eoi() are completed before
+ * state->in_eoi is visible.
+ */
mb();
state->in_eoi = false;
bail:
@@ -495,6 +545,18 @@ X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
/* Locklessly write over MFRR */
xc->mfrr = mfrr;
+ /*
+ * The load of xc->cppr below and the subsequent MMIO store
+ * to the IPI must happen after the above mfrr update is
+ * globally visible so that:
+ *
+ * - Synchronize with another CPU doing an H_EOI or a H_CPPR
+ * updating xc->cppr then reading xc->mfrr.
+ *
+ * - The target of the IPI sees the xc->mfrr update
+ */
+ mb();
+
/* Shoot the IPI if most favored than target cppr */
if (mfrr < xc->cppr)
__x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 77fd043b3ecc..c6c734424c70 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -30,6 +30,7 @@
#include <linux/vmalloc.h>
#include <linux/hugetlb.h>
#include <asm/kvm_ppc.h>
+#include <asm/pte-walk.h>
#include "e500.h"
#include "timing.h"
@@ -476,7 +477,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
* can't run hence pfn won't change.
*/
local_irq_save(flags);
- ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL, NULL);
+ ptep = find_linux_pte(pgdir, hva, NULL, NULL);
if (ptep) {
pte_t pte = READ_ONCE(*ptep);
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 3c3146ba62da..50d5bf954cff 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -31,7 +31,8 @@ obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o
obj-y += checksum_$(BITS).o checksum_wrappers.o
-obj-$(CONFIG_PPC_EMULATE_SSTEP) += sstep.o ldstfp.o
+obj-y += sstep.o ldstfp.o quad.o
+obj64-y += quad.o
obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 8aedbb5f4b86..da425bb6b369 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -67,6 +67,20 @@ CACHELINE_BYTES = L1_CACHE_BYTES
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
CACHELINE_MASK = (L1_CACHE_BYTES-1)
+_GLOBAL(memset16)
+ rlwinm. r0 ,r5, 31, 1, 31
+ addi r6, r3, -4
+ beq- 2f
+ rlwimi r4 ,r4 ,16 ,0 ,15
+ mtctr r0
+1: stwu r4, 4(r6)
+ bdnz 1b
+2: andi. r0, r5, 1
+ beqlr
+ sth r4, 4(r6)
+ blr
+EXPORT_SYMBOL(memset16)
+
/*
* Use dcbz on the complete cache lines in the destination
* to set them to zero. This requires that the destination
@@ -77,22 +91,24 @@ CACHELINE_MASK = (L1_CACHE_BYTES-1)
* replaced by a nop once cache is active. This is done in machine_init()
*/
_GLOBAL(memset)
+ cmplwi 0,r5,4
+ blt 7f
+
rlwimi r4,r4,8,16,23
rlwimi r4,r4,16,0,15
- addi r6,r3,-4
- cmplwi 0,r5,4
- blt 7f
- stwu r4,4(r6)
+ stw r4,0(r3)
beqlr
- andi. r0,r6,3
+ andi. r0,r3,3
add r5,r0,r5
- subf r6,r0,r6
+ subf r6,r0,r3
cmplwi 0,r4,0
- bne 2f /* Use normal procedure if r4 is not zero */
-EXPORT_SYMBOL(memset)
+ /*
+ * Skip optimised bloc until cache is enabled. Will be replaced
+ * by 'bne' during boot to use normal procedure if r4 is not zero
+ */
_GLOBAL(memset_nocache_branch)
- b 2f /* Skip optimised bloc until cache is enabled */
+ b 2f
clrlwi r7,r6,32-LG_CACHELINE_BYTES
add r8,r7,r5
@@ -119,7 +135,6 @@ _GLOBAL(memset_nocache_branch)
1: stwu r4,4(r6)
bdnz 1b
6: andi. r5,r5,3
-7: cmpwi 0,r5,0
beqlr
mtctr r5
addi r6,r6,3
@@ -127,6 +142,15 @@ _GLOBAL(memset_nocache_branch)
bdnz 8b
blr
+7: cmpwi 0,r5,0
+ beqlr
+ mtctr r5
+ addi r6,r3,-1
+9: stbu r4,1(r6)
+ bdnz 9b
+ blr
+EXPORT_SYMBOL(memset)
+
/*
* This version uses dcbz on the complete cache lines in the
* destination area to reduce memory traffic. This requires that
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index a84d333ecb09..ca5fc8fa7efc 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -45,13 +45,13 @@ _GLOBAL(copypage_power7)
.machine push
.machine "power4"
/* setup read stream 0 */
- dcbt r0,r4,0b01000 /* addr from */
- dcbt r0,r7,0b01010 /* length and depth from */
+ dcbt 0,r4,0b01000 /* addr from */
+ dcbt 0,r7,0b01010 /* length and depth from */
/* setup write stream 1 */
- dcbtst r0,r9,0b01000 /* addr to */
- dcbtst r0,r10,0b01010 /* length and depth to */
+ dcbtst 0,r9,0b01000 /* addr to */
+ dcbtst 0,r10,0b01010 /* length and depth to */
eieio
- dcbt r0,r8,0b01010 /* all streams GO */
+ dcbt 0,r8,0b01010 /* all streams GO */
.machine pop
#ifdef CONFIG_ALTIVEC
@@ -83,7 +83,7 @@ _GLOBAL(copypage_power7)
li r12,112
.align 5
-1: lvx v7,r0,r4
+1: lvx v7,0,r4
lvx v6,r4,r6
lvx v5,r4,r7
lvx v4,r4,r8
@@ -92,7 +92,7 @@ _GLOBAL(copypage_power7)
lvx v1,r4,r11
lvx v0,r4,r12
addi r4,r4,128
- stvx v7,r0,r3
+ stvx v7,0,r3
stvx v6,r3,r6
stvx v5,r3,r7
stvx v4,r3,r8
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index 706b7cc19846..d416a4a66578 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -315,13 +315,13 @@ err1; stb r0,0(r3)
.machine push
.machine "power4"
/* setup read stream 0 */
- dcbt r0,r6,0b01000 /* addr from */
- dcbt r0,r7,0b01010 /* length and depth from */
+ dcbt 0,r6,0b01000 /* addr from */
+ dcbt 0,r7,0b01010 /* length and depth from */
/* setup write stream 1 */
- dcbtst r0,r9,0b01000 /* addr to */
- dcbtst r0,r10,0b01010 /* length and depth to */
+ dcbtst 0,r9,0b01000 /* addr to */
+ dcbtst 0,r10,0b01010 /* length and depth to */
eieio
- dcbt r0,r8,0b01010 /* all streams GO */
+ dcbt 0,r8,0b01010 /* all streams GO */
.machine pop
beq cr1,.Lunwind_stack_nonvmx_copy
@@ -376,26 +376,26 @@ err3; std r0,0(r3)
li r11,48
bf cr7*4+3,5f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
addi r4,r4,16
-err3; stvx v1,r0,r3
+err3; stvx v1,0,r3
addi r3,r3,16
5: bf cr7*4+2,6f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
err3; lvx v0,r4,r9
addi r4,r4,32
-err3; stvx v1,r0,r3
+err3; stvx v1,0,r3
err3; stvx v0,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
-err3; lvx v3,r0,r4
+err3; lvx v3,0,r4
err3; lvx v2,r4,r9
err3; lvx v1,r4,r10
err3; lvx v0,r4,r11
addi r4,r4,64
-err3; stvx v3,r0,r3
+err3; stvx v3,0,r3
err3; stvx v2,r3,r9
err3; stvx v1,r3,r10
err3; stvx v0,r3,r11
@@ -421,7 +421,7 @@ err3; stvx v0,r3,r11
*/
.align 5
8:
-err4; lvx v7,r0,r4
+err4; lvx v7,0,r4
err4; lvx v6,r4,r9
err4; lvx v5,r4,r10
err4; lvx v4,r4,r11
@@ -430,7 +430,7 @@ err4; lvx v2,r4,r14
err4; lvx v1,r4,r15
err4; lvx v0,r4,r16
addi r4,r4,128
-err4; stvx v7,r0,r3
+err4; stvx v7,0,r3
err4; stvx v6,r3,r9
err4; stvx v5,r3,r10
err4; stvx v4,r3,r11
@@ -451,29 +451,29 @@ err4; stvx v0,r3,r16
mtocrf 0x01,r6
bf cr7*4+1,9f
-err3; lvx v3,r0,r4
+err3; lvx v3,0,r4
err3; lvx v2,r4,r9
err3; lvx v1,r4,r10
err3; lvx v0,r4,r11
addi r4,r4,64
-err3; stvx v3,r0,r3
+err3; stvx v3,0,r3
err3; stvx v2,r3,r9
err3; stvx v1,r3,r10
err3; stvx v0,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
err3; lvx v0,r4,r9
addi r4,r4,32
-err3; stvx v1,r0,r3
+err3; stvx v1,0,r3
err3; stvx v0,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
addi r4,r4,16
-err3; stvx v1,r0,r3
+err3; stvx v1,0,r3
addi r3,r3,16
/* Up to 15B to go */
@@ -553,25 +553,25 @@ err3; lvx v0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
-err3; stvx v8,r0,r3
+err3; stvx v8,0,r3
addi r3,r3,16
vor v0,v1,v1
5: bf cr7*4+2,6f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
err3; lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
-err3; stvx v8,r0,r3
+err3; stvx v8,0,r3
err3; stvx v9,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
-err3; lvx v3,r0,r4
+err3; lvx v3,0,r4
VPERM(v8,v0,v3,v16)
err3; lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
@@ -580,7 +580,7 @@ err3; lvx v1,r4,r10
err3; lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
-err3; stvx v8,r0,r3
+err3; stvx v8,0,r3
err3; stvx v9,r3,r9
err3; stvx v10,r3,r10
err3; stvx v11,r3,r11
@@ -606,7 +606,7 @@ err3; stvx v11,r3,r11
*/
.align 5
8:
-err4; lvx v7,r0,r4
+err4; lvx v7,0,r4
VPERM(v8,v0,v7,v16)
err4; lvx v6,r4,r9
VPERM(v9,v7,v6,v16)
@@ -623,7 +623,7 @@ err4; lvx v1,r4,r15
err4; lvx v0,r4,r16
VPERM(v15,v1,v0,v16)
addi r4,r4,128
-err4; stvx v8,r0,r3
+err4; stvx v8,0,r3
err4; stvx v9,r3,r9
err4; stvx v10,r3,r10
err4; stvx v11,r3,r11
@@ -644,7 +644,7 @@ err4; stvx v15,r3,r16
mtocrf 0x01,r6
bf cr7*4+1,9f
-err3; lvx v3,r0,r4
+err3; lvx v3,0,r4
VPERM(v8,v0,v3,v16)
err3; lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
@@ -653,27 +653,27 @@ err3; lvx v1,r4,r10
err3; lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
-err3; stvx v8,r0,r3
+err3; stvx v8,0,r3
err3; stvx v9,r3,r9
err3; stvx v10,r3,r10
err3; stvx v11,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
err3; lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
-err3; stvx v8,r0,r3
+err3; stvx v8,0,r3
err3; stvx v9,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
-err3; lvx v1,r0,r4
+err3; lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
-err3; stvx v8,r0,r3
+err3; stvx v8,0,r3
addi r3,r3,16
/* Up to 15B to go */
diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S
index a58777c1b2cb..ae15eba49c1f 100644
--- a/arch/powerpc/lib/ldstfp.S
+++ b/arch/powerpc/lib/ldstfp.S
@@ -21,27 +21,19 @@
#define STKFRM (PPC_MIN_STKFRM + 16)
- .macro inst32 op
-reg = 0
- .rept 32
-20: \op reg,0,r4
- b 3f
- EX_TABLE(20b,99f)
-reg = reg + 1
- .endr
- .endm
-
-/* Get the contents of frN into fr0; N is in r3. */
+/* Get the contents of frN into *p; N is in r3 and p is in r4. */
_GLOBAL(get_fpr)
mflr r0
+ mfmsr r6
+ ori r7, r6, MSR_FP
+ MTMSRD(r7)
+ isync
rlwinm r3,r3,3,0xf8
bcl 20,31,1f
- blr /* fr0 is already in fr0 */
- nop
-reg = 1
- .rept 31
- fmr fr0,reg
- blr
+reg = 0
+ .rept 32
+ stfd reg, 0(r4)
+ b 2f
reg = reg + 1
.endr
1: mflr r5
@@ -49,18 +41,23 @@ reg = reg + 1
mtctr r5
mtlr r0
bctr
+2: MTMSRD(r6)
+ isync
+ blr
-/* Put the contents of fr0 into frN; N is in r3. */
+/* Put the contents of *p into frN; N is in r3 and p is in r4. */
_GLOBAL(put_fpr)
mflr r0
+ mfmsr r6
+ ori r7, r6, MSR_FP
+ MTMSRD(r7)
+ isync
rlwinm r3,r3,3,0xf8
bcl 20,31,1f
- blr /* fr0 is already in fr0 */
- nop
-reg = 1
- .rept 31
- fmr reg,fr0
- blr
+reg = 0
+ .rept 32
+ lfd reg, 0(r4)
+ b 2f
reg = reg + 1
.endr
1: mflr r5
@@ -68,127 +65,24 @@ reg = reg + 1
mtctr r5
mtlr r0
bctr
-
-/* Load FP reg N from float at *p. N is in r3, p in r4. */
-_GLOBAL(do_lfs)
- PPC_STLU r1,-STKFRM(r1)
- mflr r0
- PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
- mfmsr r6
- ori r7,r6,MSR_FP
- cmpwi cr7,r3,0
- MTMSRD(r7)
- isync
- beq cr7,1f
- stfd fr0,STKFRM-16(r1)
-1: li r9,-EFAULT
-2: lfs fr0,0(r4)
- li r9,0
-3: bl put_fpr
- beq cr7,4f
- lfd fr0,STKFRM-16(r1)
-4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
- mtlr r0
- MTMSRD(r6)
- isync
- mr r3,r9
- addi r1,r1,STKFRM
- blr
- EX_TABLE(2b,3b)
-
-/* Load FP reg N from double at *p. N is in r3, p in r4. */
-_GLOBAL(do_lfd)
- PPC_STLU r1,-STKFRM(r1)
- mflr r0
- PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
- mfmsr r6
- ori r7,r6,MSR_FP
- cmpwi cr7,r3,0
- MTMSRD(r7)
- isync
- beq cr7,1f
- stfd fr0,STKFRM-16(r1)
-1: li r9,-EFAULT
-2: lfd fr0,0(r4)
- li r9,0
-3: beq cr7,4f
- bl put_fpr
- lfd fr0,STKFRM-16(r1)
-4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
- mtlr r0
- MTMSRD(r6)
+2: MTMSRD(r6)
isync
- mr r3,r9
- addi r1,r1,STKFRM
blr
- EX_TABLE(2b,3b)
-/* Store FP reg N to float at *p. N is in r3, p in r4. */
-_GLOBAL(do_stfs)
- PPC_STLU r1,-STKFRM(r1)
- mflr r0
- PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
- mfmsr r6
- ori r7,r6,MSR_FP
- cmpwi cr7,r3,0
- MTMSRD(r7)
- isync
- beq cr7,1f
- stfd fr0,STKFRM-16(r1)
- bl get_fpr
-1: li r9,-EFAULT
-2: stfs fr0,0(r4)
- li r9,0
-3: beq cr7,4f
- lfd fr0,STKFRM-16(r1)
-4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
- mtlr r0
- MTMSRD(r6)
- isync
- mr r3,r9
- addi r1,r1,STKFRM
- blr
- EX_TABLE(2b,3b)
-
-/* Store FP reg N to double at *p. N is in r3, p in r4. */
-_GLOBAL(do_stfd)
- PPC_STLU r1,-STKFRM(r1)
+#ifdef CONFIG_ALTIVEC
+/* Get the contents of vrN into *p; N is in r3 and p is in r4. */
+_GLOBAL(get_vr)
mflr r0
- PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
mfmsr r6
- ori r7,r6,MSR_FP
- cmpwi cr7,r3,0
+ oris r7, r6, MSR_VEC@h
MTMSRD(r7)
isync
- beq cr7,1f
- stfd fr0,STKFRM-16(r1)
- bl get_fpr
-1: li r9,-EFAULT
-2: stfd fr0,0(r4)
- li r9,0
-3: beq cr7,4f
- lfd fr0,STKFRM-16(r1)
-4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
- mtlr r0
- MTMSRD(r6)
- isync
- mr r3,r9
- addi r1,r1,STKFRM
- blr
- EX_TABLE(2b,3b)
-
-#ifdef CONFIG_ALTIVEC
-/* Get the contents of vrN into v0; N is in r3. */
-_GLOBAL(get_vr)
- mflr r0
rlwinm r3,r3,3,0xf8
bcl 20,31,1f
- blr /* v0 is already in v0 */
- nop
-reg = 1
- .rept 31
- vor v0,reg,reg /* assembler doesn't know vmr? */
- blr
+reg = 0
+ .rept 32
+ stvx reg, 0, r4
+ b 2f
reg = reg + 1
.endr
1: mflr r5
@@ -196,18 +90,23 @@ reg = reg + 1
mtctr r5
mtlr r0
bctr
+2: MTMSRD(r6)
+ isync
+ blr
-/* Put the contents of v0 into vrN; N is in r3. */
+/* Put the contents of *p into vrN; N is in r3 and p is in r4. */
_GLOBAL(put_vr)
mflr r0
+ mfmsr r6
+ oris r7, r6, MSR_VEC@h
+ MTMSRD(r7)
+ isync
rlwinm r3,r3,3,0xf8
bcl 20,31,1f
- blr /* v0 is already in v0 */
- nop
-reg = 1
- .rept 31
- vor reg,v0,v0
- blr
+reg = 0
+ .rept 32
+ lvx reg, 0, r4
+ b 2f
reg = reg + 1
.endr
1: mflr r5
@@ -215,62 +114,9 @@ reg = reg + 1
mtctr r5
mtlr r0
bctr
-
-/* Load vector reg N from *p. N is in r3, p in r4. */
-_GLOBAL(do_lvx)
- PPC_STLU r1,-STKFRM(r1)
- mflr r0
- PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
- mfmsr r6
- oris r7,r6,MSR_VEC@h
- cmpwi cr7,r3,0
- li r8,STKFRM-16
- MTMSRD(r7)
- isync
- beq cr7,1f
- stvx v0,r1,r8
-1: li r9,-EFAULT
-2: lvx v0,0,r4
- li r9,0
-3: beq cr7,4f
- bl put_vr
- lvx v0,r1,r8
-4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
- mtlr r0
- MTMSRD(r6)
+2: MTMSRD(r6)
isync
- mr r3,r9
- addi r1,r1,STKFRM
- blr
- EX_TABLE(2b,3b)
-
-/* Store vector reg N to *p. N is in r3, p in r4. */
-_GLOBAL(do_stvx)
- PPC_STLU r1,-STKFRM(r1)
- mflr r0
- PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
- mfmsr r6
- oris r7,r6,MSR_VEC@h
- cmpwi cr7,r3,0
- li r8,STKFRM-16
- MTMSRD(r7)
- isync
- beq cr7,1f
- stvx v0,r1,r8
- bl get_vr
-1: li r9,-EFAULT
-2: stvx v0,0,r4
- li r9,0
-3: beq cr7,4f
- lvx v0,r1,r8
-4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
- mtlr r0
- MTMSRD(r6)
- isync
- mr r3,r9
- addi r1,r1,STKFRM
blr
- EX_TABLE(2b,3b)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
@@ -313,7 +159,7 @@ reg = reg + 1
bctr
/* Load VSX reg N from vector doubleword *p. N is in r3, p in r4. */
-_GLOBAL(do_lxvd2x)
+_GLOBAL(load_vsrn)
PPC_STLU r1,-STKFRM(r1)
mflr r0
PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
@@ -325,49 +171,74 @@ _GLOBAL(do_lxvd2x)
isync
beq cr7,1f
STXVD2X(0,R1,R8)
-1: li r9,-EFAULT
-2: LXVD2X(0,R0,R4)
- li r9,0
-3: beq cr7,4f
+1: LXVD2X(0,R0,R4)
+#ifdef __LITTLE_ENDIAN__
+ XXSWAPD(0,0)
+#endif
+ beq cr7,4f
bl put_vsr
LXVD2X(0,R1,R8)
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
MTMSRD(r6)
isync
- mr r3,r9
addi r1,r1,STKFRM
blr
- EX_TABLE(2b,3b)
/* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */
-_GLOBAL(do_stxvd2x)
+_GLOBAL(store_vsrn)
PPC_STLU r1,-STKFRM(r1)
mflr r0
PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
mfmsr r6
oris r7,r6,MSR_VSX@h
- cmpwi cr7,r3,0
li r8,STKFRM-16
MTMSRD(r7)
isync
- beq cr7,1f
STXVD2X(0,R1,R8)
bl get_vsr
-1: li r9,-EFAULT
-2: STXVD2X(0,R0,R4)
- li r9,0
-3: beq cr7,4f
+#ifdef __LITTLE_ENDIAN__
+ XXSWAPD(0,0)
+#endif
+ STXVD2X(0,R0,R4)
LXVD2X(0,R1,R8)
-4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
+ PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
MTMSRD(r6)
isync
mr r3,r9
addi r1,r1,STKFRM
blr
- EX_TABLE(2b,3b)
-
#endif /* CONFIG_VSX */
+/* Convert single-precision to double, without disturbing FPRs. */
+/* conv_sp_to_dp(float *sp, double *dp) */
+_GLOBAL(conv_sp_to_dp)
+ mfmsr r6
+ ori r7, r6, MSR_FP
+ MTMSRD(r7)
+ isync
+ stfd fr0, -16(r1)
+ lfs fr0, 0(r3)
+ stfd fr0, 0(r4)
+ lfd fr0, -16(r1)
+ MTMSRD(r6)
+ isync
+ blr
+
+/* Convert single-precision to double, without disturbing FPRs. */
+/* conv_sp_to_dp(double *dp, float *sp) */
+_GLOBAL(conv_dp_to_sp)
+ mfmsr r6
+ ori r7, r6, MSR_FP
+ MTMSRD(r7)
+ isync
+ stfd fr0, -16(r1)
+ lfd fr0, 0(r3)
+ stfs fr0, 0(r4)
+ lfd fr0, -16(r1)
+ MTMSRD(r6)
+ isync
+ blr
+
#endif /* CONFIG_PPC_FPU */
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index 85fa9869aec5..ec531de99996 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -13,6 +13,23 @@
#include <asm/ppc_asm.h>
#include <asm/export.h>
+_GLOBAL(__memset16)
+ rlwimi r4,r4,16,0,15
+ /* fall through */
+
+_GLOBAL(__memset32)
+ rldimi r4,r4,32,0
+ /* fall through */
+
+_GLOBAL(__memset64)
+ neg r0,r3
+ andi. r0,r0,7
+ cmplw cr1,r5,r0
+ b .Lms
+EXPORT_SYMBOL(__memset16)
+EXPORT_SYMBOL(__memset32)
+EXPORT_SYMBOL(__memset64)
+
_GLOBAL(memset)
neg r0,r3
rlwimi r4,r4,8,16,23
@@ -20,7 +37,7 @@ _GLOBAL(memset)
rlwimi r4,r4,16,0,15
cmplw cr1,r5,r0 /* do we get that far? */
rldimi r4,r4,32,0
- PPC_MTOCRF(1,r0)
+.Lms: PPC_MTOCRF(1,r0)
mr r6,r3
blt cr1,8f
beq+ 3f /* if already 8-byte aligned */
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 786234fd4e91..193909abd18b 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -261,12 +261,12 @@ _GLOBAL(memcpy_power7)
.machine push
.machine "power4"
- dcbt r0,r6,0b01000
- dcbt r0,r7,0b01010
- dcbtst r0,r9,0b01000
- dcbtst r0,r10,0b01010
+ dcbt 0,r6,0b01000
+ dcbt 0,r7,0b01010
+ dcbtst 0,r9,0b01000
+ dcbtst 0,r10,0b01010
eieio
- dcbt r0,r8,0b01010 /* GO */
+ dcbt 0,r8,0b01010 /* GO */
.machine pop
beq cr1,.Lunwind_stack_nonvmx_copy
@@ -321,26 +321,26 @@ _GLOBAL(memcpy_power7)
li r11,48
bf cr7*4+3,5f
- lvx v1,r0,r4
+ lvx v1,0,r4
addi r4,r4,16
- stvx v1,r0,r3
+ stvx v1,0,r3
addi r3,r3,16
5: bf cr7*4+2,6f
- lvx v1,r0,r4
+ lvx v1,0,r4
lvx v0,r4,r9
addi r4,r4,32
- stvx v1,r0,r3
+ stvx v1,0,r3
stvx v0,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
- lvx v3,r0,r4
+ lvx v3,0,r4
lvx v2,r4,r9
lvx v1,r4,r10
lvx v0,r4,r11
addi r4,r4,64
- stvx v3,r0,r3
+ stvx v3,0,r3
stvx v2,r3,r9
stvx v1,r3,r10
stvx v0,r3,r11
@@ -366,7 +366,7 @@ _GLOBAL(memcpy_power7)
*/
.align 5
8:
- lvx v7,r0,r4
+ lvx v7,0,r4
lvx v6,r4,r9
lvx v5,r4,r10
lvx v4,r4,r11
@@ -375,7 +375,7 @@ _GLOBAL(memcpy_power7)
lvx v1,r4,r15
lvx v0,r4,r16
addi r4,r4,128
- stvx v7,r0,r3
+ stvx v7,0,r3
stvx v6,r3,r9
stvx v5,r3,r10
stvx v4,r3,r11
@@ -396,29 +396,29 @@ _GLOBAL(memcpy_power7)
mtocrf 0x01,r6
bf cr7*4+1,9f
- lvx v3,r0,r4
+ lvx v3,0,r4
lvx v2,r4,r9
lvx v1,r4,r10
lvx v0,r4,r11
addi r4,r4,64
- stvx v3,r0,r3
+ stvx v3,0,r3
stvx v2,r3,r9
stvx v1,r3,r10
stvx v0,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
- lvx v1,r0,r4
+ lvx v1,0,r4
lvx v0,r4,r9
addi r4,r4,32
- stvx v1,r0,r3
+ stvx v1,0,r3
stvx v0,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
- lvx v1,r0,r4
+ lvx v1,0,r4
addi r4,r4,16
- stvx v1,r0,r3
+ stvx v1,0,r3
addi r3,r3,16
/* Up to 15B to go */
@@ -499,25 +499,25 @@ _GLOBAL(memcpy_power7)
addi r4,r4,16
bf cr7*4+3,5f
- lvx v1,r0,r4
+ lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
- stvx v8,r0,r3
+ stvx v8,0,r3
addi r3,r3,16
vor v0,v1,v1
5: bf cr7*4+2,6f
- lvx v1,r0,r4
+ lvx v1,0,r4
VPERM(v8,v0,v1,v16)
lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
- stvx v8,r0,r3
+ stvx v8,0,r3
stvx v9,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
- lvx v3,r0,r4
+ lvx v3,0,r4
VPERM(v8,v0,v3,v16)
lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
@@ -526,7 +526,7 @@ _GLOBAL(memcpy_power7)
lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
- stvx v8,r0,r3
+ stvx v8,0,r3
stvx v9,r3,r9
stvx v10,r3,r10
stvx v11,r3,r11
@@ -552,7 +552,7 @@ _GLOBAL(memcpy_power7)
*/
.align 5
8:
- lvx v7,r0,r4
+ lvx v7,0,r4
VPERM(v8,v0,v7,v16)
lvx v6,r4,r9
VPERM(v9,v7,v6,v16)
@@ -569,7 +569,7 @@ _GLOBAL(memcpy_power7)
lvx v0,r4,r16
VPERM(v15,v1,v0,v16)
addi r4,r4,128
- stvx v8,r0,r3
+ stvx v8,0,r3
stvx v9,r3,r9
stvx v10,r3,r10
stvx v11,r3,r11
@@ -590,7 +590,7 @@ _GLOBAL(memcpy_power7)
mtocrf 0x01,r6
bf cr7*4+1,9f
- lvx v3,r0,r4
+ lvx v3,0,r4
VPERM(v8,v0,v3,v16)
lvx v2,r4,r9
VPERM(v9,v3,v2,v16)
@@ -599,27 +599,27 @@ _GLOBAL(memcpy_power7)
lvx v0,r4,r11
VPERM(v11,v1,v0,v16)
addi r4,r4,64
- stvx v8,r0,r3
+ stvx v8,0,r3
stvx v9,r3,r9
stvx v10,r3,r10
stvx v11,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
- lvx v1,r0,r4
+ lvx v1,0,r4
VPERM(v8,v0,v1,v16)
lvx v0,r4,r9
VPERM(v9,v1,v0,v16)
addi r4,r4,32
- stvx v8,r0,r3
+ stvx v8,0,r3
stvx v9,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
- lvx v1,r0,r4
+ lvx v1,0,r4
VPERM(v8,v0,v1,v16)
addi r4,r4,16
- stvx v8,r0,r3
+ stvx v8,0,r3
addi r3,r3,16
/* Up to 15B to go */
diff --git a/arch/powerpc/lib/quad.S b/arch/powerpc/lib/quad.S
new file mode 100644
index 000000000000..c4d12fae8724
--- /dev/null
+++ b/arch/powerpc/lib/quad.S
@@ -0,0 +1,62 @@
+/*
+ * Quadword loads and stores
+ * for use in instruction emulation.
+ *
+ * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+#include <asm/ppc-opcode.h>
+#include <asm/reg.h>
+#include <asm/asm-offsets.h>
+#include <linux/errno.h>
+
+/* do_lq(unsigned long ea, unsigned long *regs) */
+_GLOBAL(do_lq)
+1: lq r6, 0(r3)
+ std r6, 0(r4)
+ std r7, 8(r4)
+ li r3, 0
+ blr
+2: li r3, -EFAULT
+ blr
+ EX_TABLE(1b, 2b)
+
+/* do_stq(unsigned long ea, unsigned long val0, unsigned long val1) */
+_GLOBAL(do_stq)
+1: stq r4, 0(r3)
+ li r3, 0
+ blr
+2: li r3, -EFAULT
+ blr
+ EX_TABLE(1b, 2b)
+
+/* do_lqarx(unsigned long ea, unsigned long *regs) */
+_GLOBAL(do_lqarx)
+1: PPC_LQARX(6, 0, 3, 0)
+ std r6, 0(r4)
+ std r7, 8(r4)
+ li r3, 0
+ blr
+2: li r3, -EFAULT
+ blr
+ EX_TABLE(1b, 2b)
+
+/* do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
+ unsigned int *crp) */
+
+_GLOBAL(do_stqcx)
+1: PPC_STQCX(4, 0, 3)
+ mfcr r5
+ stw r5, 0(r6)
+ li r3, 0
+ blr
+2: li r3, -EFAULT
+ blr
+ EX_TABLE(1b, 2b)
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index ee33327686ae..fb9f58b868e7 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -36,14 +36,33 @@ extern char system_call_common[];
/*
* Functions in ldstfp.S
*/
-extern int do_lfs(int rn, unsigned long ea);
-extern int do_lfd(int rn, unsigned long ea);
-extern int do_stfs(int rn, unsigned long ea);
-extern int do_stfd(int rn, unsigned long ea);
-extern int do_lvx(int rn, unsigned long ea);
-extern int do_stvx(int rn, unsigned long ea);
-extern int do_lxvd2x(int rn, unsigned long ea);
-extern int do_stxvd2x(int rn, unsigned long ea);
+extern void get_fpr(int rn, double *p);
+extern void put_fpr(int rn, const double *p);
+extern void get_vr(int rn, __vector128 *p);
+extern void put_vr(int rn, __vector128 *p);
+extern void load_vsrn(int vsr, const void *p);
+extern void store_vsrn(int vsr, void *p);
+extern void conv_sp_to_dp(const float *sp, double *dp);
+extern void conv_dp_to_sp(const double *dp, float *sp);
+#endif
+
+#ifdef __powerpc64__
+/*
+ * Functions in quad.S
+ */
+extern int do_lq(unsigned long ea, unsigned long *regs);
+extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
+extern int do_lqarx(unsigned long ea, unsigned long *regs);
+extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
+ unsigned int *crp);
+#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define IS_LE 1
+#define IS_BE 0
+#else
+#define IS_LE 0
+#define IS_BE 1
#endif
/*
@@ -62,15 +81,17 @@ static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
/*
* Determine whether a conditional branch instruction would branch.
*/
-static nokprobe_inline int branch_taken(unsigned int instr, struct pt_regs *regs)
+static nokprobe_inline int branch_taken(unsigned int instr,
+ const struct pt_regs *regs,
+ struct instruction_op *op)
{
unsigned int bo = (instr >> 21) & 0x1f;
unsigned int bi;
if ((bo & 4) == 0) {
/* decrement counter */
- --regs->ctr;
- if (((bo >> 1) & 1) ^ (regs->ctr == 0))
+ op->type |= DECCTR;
+ if (((bo >> 1) & 1) ^ (regs->ctr == 1))
return 0;
}
if ((bo & 0x10) == 0) {
@@ -82,17 +103,26 @@ static nokprobe_inline int branch_taken(unsigned int instr, struct pt_regs *regs
return 1;
}
-static nokprobe_inline long address_ok(struct pt_regs *regs, unsigned long ea, int nb)
+static nokprobe_inline long address_ok(struct pt_regs *regs,
+ unsigned long ea, int nb)
{
if (!user_mode(regs))
return 1;
- return __access_ok(ea, nb, USER_DS);
+ if (__access_ok(ea, nb, USER_DS))
+ return 1;
+ if (__access_ok(ea, 1, USER_DS))
+ /* Access overlaps the end of the user region */
+ regs->dar = USER_DS.seg;
+ else
+ regs->dar = ea;
+ return 0;
}
/*
* Calculate effective address for a D-form instruction
*/
-static nokprobe_inline unsigned long dform_ea(unsigned int instr, struct pt_regs *regs)
+static nokprobe_inline unsigned long dform_ea(unsigned int instr,
+ const struct pt_regs *regs)
{
int ra;
unsigned long ea;
@@ -102,14 +132,15 @@ static nokprobe_inline unsigned long dform_ea(unsigned int instr, struct pt_regs
if (ra)
ea += regs->gpr[ra];
- return truncate_if_32bit(regs->msr, ea);
+ return ea;
}
#ifdef __powerpc64__
/*
* Calculate effective address for a DS-form instruction
*/
-static nokprobe_inline unsigned long dsform_ea(unsigned int instr, struct pt_regs *regs)
+static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
+ const struct pt_regs *regs)
{
int ra;
unsigned long ea;
@@ -119,7 +150,24 @@ static nokprobe_inline unsigned long dsform_ea(unsigned int instr, struct pt_reg
if (ra)
ea += regs->gpr[ra];
- return truncate_if_32bit(regs->msr, ea);
+ return ea;
+}
+
+/*
+ * Calculate effective address for a DQ-form instruction
+ */
+static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
+ const struct pt_regs *regs)
+{
+ int ra;
+ unsigned long ea;
+
+ ra = (instr >> 16) & 0x1f;
+ ea = (signed short) (instr & ~0xf); /* sign-extend */
+ if (ra)
+ ea += regs->gpr[ra];
+
+ return ea;
}
#endif /* __powerpc64 */
@@ -127,7 +175,7 @@ static nokprobe_inline unsigned long dsform_ea(unsigned int instr, struct pt_reg
* Calculate effective address for an X-form instruction
*/
static nokprobe_inline unsigned long xform_ea(unsigned int instr,
- struct pt_regs *regs)
+ const struct pt_regs *regs)
{
int ra, rb;
unsigned long ea;
@@ -138,7 +186,7 @@ static nokprobe_inline unsigned long xform_ea(unsigned int instr,
if (ra)
ea += regs->gpr[ra];
- return truncate_if_32bit(regs->msr, ea);
+ return ea;
}
/*
@@ -151,7 +199,6 @@ static nokprobe_inline unsigned long max_align(unsigned long x)
return x & -x; /* isolates rightmost bit */
}
-
static nokprobe_inline unsigned long byterev_2(unsigned long x)
{
return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
@@ -170,8 +217,36 @@ static nokprobe_inline unsigned long byterev_8(unsigned long x)
}
#endif
+static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
+{
+ switch (nb) {
+ case 2:
+ *(u16 *)ptr = byterev_2(*(u16 *)ptr);
+ break;
+ case 4:
+ *(u32 *)ptr = byterev_4(*(u32 *)ptr);
+ break;
+#ifdef __powerpc64__
+ case 8:
+ *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
+ break;
+ case 16: {
+ unsigned long *up = (unsigned long *)ptr;
+ unsigned long tmp;
+ tmp = byterev_8(up[0]);
+ up[0] = byterev_8(up[1]);
+ up[1] = tmp;
+ break;
+ }
+#endif
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
static nokprobe_inline int read_mem_aligned(unsigned long *dest,
- unsigned long ea, int nb)
+ unsigned long ea, int nb,
+ struct pt_regs *regs)
{
int err = 0;
unsigned long x = 0;
@@ -194,59 +269,77 @@ static nokprobe_inline int read_mem_aligned(unsigned long *dest,
}
if (!err)
*dest = x;
+ else
+ regs->dar = ea;
return err;
}
-static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
- unsigned long ea, int nb, struct pt_regs *regs)
+/*
+ * Copy from userspace to a buffer, using the largest possible
+ * aligned accesses, up to sizeof(long).
+ */
+static int nokprobe_inline copy_mem_in(u8 *dest, unsigned long ea, int nb,
+ struct pt_regs *regs)
{
- int err;
- unsigned long x, b, c;
-#ifdef __LITTLE_ENDIAN__
- int len = nb; /* save a copy of the length for byte reversal */
-#endif
+ int err = 0;
+ int c;
- /* unaligned, do this in pieces */
- x = 0;
for (; nb > 0; nb -= c) {
-#ifdef __LITTLE_ENDIAN__
- c = 1;
-#endif
-#ifdef __BIG_ENDIAN__
c = max_align(ea);
-#endif
if (c > nb)
c = max_align(nb);
- err = read_mem_aligned(&b, ea, c);
- if (err)
- return err;
- x = (x << (8 * c)) + b;
- ea += c;
- }
-#ifdef __LITTLE_ENDIAN__
- switch (len) {
- case 2:
- *dest = byterev_2(x);
- break;
- case 4:
- *dest = byterev_4(x);
- break;
+ switch (c) {
+ case 1:
+ err = __get_user(*dest, (unsigned char __user *) ea);
+ break;
+ case 2:
+ err = __get_user(*(u16 *)dest,
+ (unsigned short __user *) ea);
+ break;
+ case 4:
+ err = __get_user(*(u32 *)dest,
+ (unsigned int __user *) ea);
+ break;
#ifdef __powerpc64__
- case 8:
- *dest = byterev_8(x);
- break;
+ case 8:
+ err = __get_user(*(unsigned long *)dest,
+ (unsigned long __user *) ea);
+ break;
#endif
+ }
+ if (err) {
+ regs->dar = ea;
+ return err;
+ }
+ dest += c;
+ ea += c;
}
-#endif
-#ifdef __BIG_ENDIAN__
- *dest = x;
-#endif
return 0;
}
+static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
+ unsigned long ea, int nb,
+ struct pt_regs *regs)
+{
+ union {
+ unsigned long ul;
+ u8 b[sizeof(unsigned long)];
+ } u;
+ int i;
+ int err;
+
+ u.ul = 0;
+ i = IS_BE ? sizeof(unsigned long) - nb : 0;
+ err = copy_mem_in(&u.b[i], ea, nb, regs);
+ if (!err)
+ *dest = u.ul;
+ return err;
+}
+
/*
* Read memory at address ea for nb bytes, return 0 for success
- * or -EFAULT if an error occurred.
+ * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
+ * If nb < sizeof(long), the result is right-justified on BE systems.
*/
static int read_mem(unsigned long *dest, unsigned long ea, int nb,
struct pt_regs *regs)
@@ -254,13 +347,14 @@ static int read_mem(unsigned long *dest, unsigned long ea, int nb,
if (!address_ok(regs, ea, nb))
return -EFAULT;
if ((ea & (nb - 1)) == 0)
- return read_mem_aligned(dest, ea, nb);
+ return read_mem_aligned(dest, ea, nb, regs);
return read_mem_unaligned(dest, ea, nb, regs);
}
NOKPROBE_SYMBOL(read_mem);
static nokprobe_inline int write_mem_aligned(unsigned long val,
- unsigned long ea, int nb)
+ unsigned long ea, int nb,
+ struct pt_regs *regs)
{
int err = 0;
@@ -280,51 +374,72 @@ static nokprobe_inline int write_mem_aligned(unsigned long val,
break;
#endif
}
+ if (err)
+ regs->dar = ea;
return err;
}
-static nokprobe_inline int write_mem_unaligned(unsigned long val,
- unsigned long ea, int nb, struct pt_regs *regs)
+/*
+ * Copy from a buffer to userspace, using the largest possible
+ * aligned accesses, up to sizeof(long).
+ */
+static int nokprobe_inline copy_mem_out(u8 *dest, unsigned long ea, int nb,
+ struct pt_regs *regs)
{
- int err;
- unsigned long c;
+ int err = 0;
+ int c;
-#ifdef __LITTLE_ENDIAN__
- switch (nb) {
- case 2:
- val = byterev_2(val);
- break;
- case 4:
- val = byterev_4(val);
- break;
-#ifdef __powerpc64__
- case 8:
- val = byterev_8(val);
- break;
-#endif
- }
-#endif
- /* unaligned or little-endian, do this in pieces */
for (; nb > 0; nb -= c) {
-#ifdef __LITTLE_ENDIAN__
- c = 1;
-#endif
-#ifdef __BIG_ENDIAN__
c = max_align(ea);
-#endif
if (c > nb)
c = max_align(nb);
- err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
- if (err)
+ switch (c) {
+ case 1:
+ err = __put_user(*dest, (unsigned char __user *) ea);
+ break;
+ case 2:
+ err = __put_user(*(u16 *)dest,
+ (unsigned short __user *) ea);
+ break;
+ case 4:
+ err = __put_user(*(u32 *)dest,
+ (unsigned int __user *) ea);
+ break;
+#ifdef __powerpc64__
+ case 8:
+ err = __put_user(*(unsigned long *)dest,
+ (unsigned long __user *) ea);
+ break;
+#endif
+ }
+ if (err) {
+ regs->dar = ea;
return err;
+ }
+ dest += c;
ea += c;
}
return 0;
}
+static nokprobe_inline int write_mem_unaligned(unsigned long val,
+ unsigned long ea, int nb,
+ struct pt_regs *regs)
+{
+ union {
+ unsigned long ul;
+ u8 b[sizeof(unsigned long)];
+ } u;
+ int i;
+
+ u.ul = val;
+ i = IS_BE ? sizeof(unsigned long) - nb : 0;
+ return copy_mem_out(&u.b[i], ea, nb, regs);
+}
+
/*
* Write memory at address ea for nb bytes, return 0 for success
- * or -EFAULT if an error occurred.
+ * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
*/
static int write_mem(unsigned long val, unsigned long ea, int nb,
struct pt_regs *regs)
@@ -332,163 +447,465 @@ static int write_mem(unsigned long val, unsigned long ea, int nb,
if (!address_ok(regs, ea, nb))
return -EFAULT;
if ((ea & (nb - 1)) == 0)
- return write_mem_aligned(val, ea, nb);
+ return write_mem_aligned(val, ea, nb, regs);
return write_mem_unaligned(val, ea, nb, regs);
}
NOKPROBE_SYMBOL(write_mem);
#ifdef CONFIG_PPC_FPU
/*
- * Check the address and alignment, and call func to do the actual
- * load or store.
+ * These access either the real FP register or the image in the
+ * thread_struct, depending on regs->msr & MSR_FP.
*/
-static int do_fp_load(int rn, int (*func)(int, unsigned long),
- unsigned long ea, int nb,
- struct pt_regs *regs)
+static int do_fp_load(struct instruction_op *op, unsigned long ea,
+ struct pt_regs *regs, bool cross_endian)
{
- int err;
+ int err, rn, nb;
union {
- double dbl;
- unsigned long ul[2];
- struct {
-#ifdef __BIG_ENDIAN__
- unsigned _pad_;
- unsigned word;
-#endif
-#ifdef __LITTLE_ENDIAN__
- unsigned word;
- unsigned _pad_;
-#endif
- } single;
- } data;
- unsigned long ptr;
-
+ int i;
+ unsigned int u;
+ float f;
+ double d[2];
+ unsigned long l[2];
+ u8 b[2 * sizeof(double)];
+ } u;
+
+ nb = GETSIZE(op->type);
if (!address_ok(regs, ea, nb))
return -EFAULT;
- if ((ea & 3) == 0)
- return (*func)(rn, ea);
- ptr = (unsigned long) &data.ul;
- if (sizeof(unsigned long) == 8 || nb == 4) {
- err = read_mem_unaligned(&data.ul[0], ea, nb, regs);
- if (nb == 4)
- ptr = (unsigned long)&(data.single.word);
- } else {
- /* reading a double on 32-bit */
- err = read_mem_unaligned(&data.ul[0], ea, 4, regs);
- if (!err)
- err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs);
- }
+ rn = op->reg;
+ err = copy_mem_in(u.b, ea, nb, regs);
if (err)
return err;
- return (*func)(rn, ptr);
+ if (unlikely(cross_endian)) {
+ do_byte_reverse(u.b, min(nb, 8));
+ if (nb == 16)
+ do_byte_reverse(&u.b[8], 8);
+ }
+ preempt_disable();
+ if (nb == 4) {
+ if (op->type & FPCONV)
+ conv_sp_to_dp(&u.f, &u.d[0]);
+ else if (op->type & SIGNEXT)
+ u.l[0] = u.i;
+ else
+ u.l[0] = u.u;
+ }
+ if (regs->msr & MSR_FP)
+ put_fpr(rn, &u.d[0]);
+ else
+ current->thread.TS_FPR(rn) = u.l[0];
+ if (nb == 16) {
+ /* lfdp */
+ rn |= 1;
+ if (regs->msr & MSR_FP)
+ put_fpr(rn, &u.d[1]);
+ else
+ current->thread.TS_FPR(rn) = u.l[1];
+ }
+ preempt_enable();
+ return 0;
}
NOKPROBE_SYMBOL(do_fp_load);
-static int do_fp_store(int rn, int (*func)(int, unsigned long),
- unsigned long ea, int nb,
- struct pt_regs *regs)
+static int do_fp_store(struct instruction_op *op, unsigned long ea,
+ struct pt_regs *regs, bool cross_endian)
{
- int err;
+ int rn, nb;
union {
- double dbl;
- unsigned long ul[2];
- struct {
-#ifdef __BIG_ENDIAN__
- unsigned _pad_;
- unsigned word;
-#endif
-#ifdef __LITTLE_ENDIAN__
- unsigned word;
- unsigned _pad_;
-#endif
- } single;
- } data;
- unsigned long ptr;
-
+ unsigned int u;
+ float f;
+ double d[2];
+ unsigned long l[2];
+ u8 b[2 * sizeof(double)];
+ } u;
+
+ nb = GETSIZE(op->type);
if (!address_ok(regs, ea, nb))
return -EFAULT;
- if ((ea & 3) == 0)
- return (*func)(rn, ea);
- ptr = (unsigned long) &data.ul[0];
- if (sizeof(unsigned long) == 8 || nb == 4) {
- if (nb == 4)
- ptr = (unsigned long)&(data.single.word);
- err = (*func)(rn, ptr);
- if (err)
- return err;
- err = write_mem_unaligned(data.ul[0], ea, nb, regs);
- } else {
- /* writing a double on 32-bit */
- err = (*func)(rn, ptr);
- if (err)
- return err;
- err = write_mem_unaligned(data.ul[0], ea, 4, regs);
- if (!err)
- err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs);
+ rn = op->reg;
+ preempt_disable();
+ if (regs->msr & MSR_FP)
+ get_fpr(rn, &u.d[0]);
+ else
+ u.l[0] = current->thread.TS_FPR(rn);
+ if (nb == 4) {
+ if (op->type & FPCONV)
+ conv_dp_to_sp(&u.d[0], &u.f);
+ else
+ u.u = u.l[0];
}
- return err;
+ if (nb == 16) {
+ rn |= 1;
+ if (regs->msr & MSR_FP)
+ get_fpr(rn, &u.d[1]);
+ else
+ u.l[1] = current->thread.TS_FPR(rn);
+ }
+ preempt_enable();
+ if (unlikely(cross_endian)) {
+ do_byte_reverse(u.b, min(nb, 8));
+ if (nb == 16)
+ do_byte_reverse(&u.b[8], 8);
+ }
+ return copy_mem_out(u.b, ea, nb, regs);
}
NOKPROBE_SYMBOL(do_fp_store);
#endif
#ifdef CONFIG_ALTIVEC
/* For Altivec/VMX, no need to worry about alignment */
-static nokprobe_inline int do_vec_load(int rn, int (*func)(int, unsigned long),
- unsigned long ea, struct pt_regs *regs)
+static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
+ int size, struct pt_regs *regs,
+ bool cross_endian)
{
+ int err;
+ union {
+ __vector128 v;
+ u8 b[sizeof(__vector128)];
+ } u = {};
+
if (!address_ok(regs, ea & ~0xfUL, 16))
return -EFAULT;
- return (*func)(rn, ea);
+ /* align to multiple of size */
+ ea &= ~(size - 1);
+ err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
+ if (err)
+ return err;
+ if (unlikely(cross_endian))
+ do_byte_reverse(&u.b[ea & 0xf], size);
+ preempt_disable();
+ if (regs->msr & MSR_VEC)
+ put_vr(rn, &u.v);
+ else
+ current->thread.vr_state.vr[rn] = u.v;
+ preempt_enable();
+ return 0;
}
-static nokprobe_inline int do_vec_store(int rn, int (*func)(int, unsigned long),
- unsigned long ea, struct pt_regs *regs)
+static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
+ int size, struct pt_regs *regs,
+ bool cross_endian)
{
+ union {
+ __vector128 v;
+ u8 b[sizeof(__vector128)];
+ } u;
+
if (!address_ok(regs, ea & ~0xfUL, 16))
return -EFAULT;
- return (*func)(rn, ea);
+ /* align to multiple of size */
+ ea &= ~(size - 1);
+
+ preempt_disable();
+ if (regs->msr & MSR_VEC)
+ get_vr(rn, &u.v);
+ else
+ u.v = current->thread.vr_state.vr[rn];
+ preempt_enable();
+ if (unlikely(cross_endian))
+ do_byte_reverse(&u.b[ea & 0xf], size);
+ return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
}
#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_VSX
-static nokprobe_inline int do_vsx_load(int rn, int (*func)(int, unsigned long),
- unsigned long ea, struct pt_regs *regs)
+#ifdef __powerpc64__
+static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
+ int reg, bool cross_endian)
{
int err;
- unsigned long val[2];
if (!address_ok(regs, ea, 16))
return -EFAULT;
- if ((ea & 3) == 0)
- return (*func)(rn, ea);
- err = read_mem_unaligned(&val[0], ea, 8, regs);
- if (!err)
- err = read_mem_unaligned(&val[1], ea + 8, 8, regs);
- if (!err)
- err = (*func)(rn, (unsigned long) &val[0]);
+ /* if aligned, should be atomic */
+ if ((ea & 0xf) == 0) {
+ err = do_lq(ea, &regs->gpr[reg]);
+ } else {
+ err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
+ if (!err)
+ err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
+ }
+ if (!err && unlikely(cross_endian))
+ do_byte_reverse(&regs->gpr[reg], 16);
return err;
}
-static nokprobe_inline int do_vsx_store(int rn, int (*func)(int, unsigned long),
- unsigned long ea, struct pt_regs *regs)
+static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
+ int reg, bool cross_endian)
{
int err;
- unsigned long val[2];
+ unsigned long vals[2];
if (!address_ok(regs, ea, 16))
return -EFAULT;
- if ((ea & 3) == 0)
- return (*func)(rn, ea);
- err = (*func)(rn, (unsigned long) &val[0]);
- if (err)
- return err;
- err = write_mem_unaligned(val[0], ea, 8, regs);
+ vals[0] = regs->gpr[reg];
+ vals[1] = regs->gpr[reg + 1];
+ if (unlikely(cross_endian))
+ do_byte_reverse(vals, 16);
+
+ /* if aligned, should be atomic */
+ if ((ea & 0xf) == 0)
+ return do_stq(ea, vals[0], vals[1]);
+
+ err = write_mem(vals[IS_LE], ea, 8, regs);
if (!err)
- err = write_mem_unaligned(val[1], ea + 8, 8, regs);
+ err = write_mem(vals[IS_BE], ea + 8, 8, regs);
return err;
}
+#endif /* __powerpc64 */
+
+#ifdef CONFIG_VSX
+void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
+ const void *mem, bool rev)
+{
+ int size, read_size;
+ int i, j;
+ const unsigned int *wp;
+ const unsigned short *hp;
+ const unsigned char *bp;
+
+ size = GETSIZE(op->type);
+ reg->d[0] = reg->d[1] = 0;
+
+ switch (op->element_size) {
+ case 16:
+ /* whole vector; lxv[x] or lxvl[l] */
+ if (size == 0)
+ break;
+ memcpy(reg, mem, size);
+ if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
+ rev = !rev;
+ if (rev)
+ do_byte_reverse(reg, 16);
+ break;
+ case 8:
+ /* scalar loads, lxvd2x, lxvdsx */
+ read_size = (size >= 8) ? 8 : size;
+ i = IS_LE ? 8 : 8 - read_size;
+ memcpy(&reg->b[i], mem, read_size);
+ if (rev)
+ do_byte_reverse(&reg->b[i], 8);
+ if (size < 8) {
+ if (op->type & SIGNEXT) {
+ /* size == 4 is the only case here */
+ reg->d[IS_LE] = (signed int) reg->d[IS_LE];
+ } else if (op->vsx_flags & VSX_FPCONV) {
+ preempt_disable();
+ conv_sp_to_dp(&reg->fp[1 + IS_LE],
+ &reg->dp[IS_LE]);
+ preempt_enable();
+ }
+ } else {
+ if (size == 16) {
+ unsigned long v = *(unsigned long *)(mem + 8);
+ reg->d[IS_BE] = !rev ? v : byterev_8(v);
+ } else if (op->vsx_flags & VSX_SPLAT)
+ reg->d[IS_BE] = reg->d[IS_LE];
+ }
+ break;
+ case 4:
+ /* lxvw4x, lxvwsx */
+ wp = mem;
+ for (j = 0; j < size / 4; ++j) {
+ i = IS_LE ? 3 - j : j;
+ reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
+ }
+ if (op->vsx_flags & VSX_SPLAT) {
+ u32 val = reg->w[IS_LE ? 3 : 0];
+ for (; j < 4; ++j) {
+ i = IS_LE ? 3 - j : j;
+ reg->w[i] = val;
+ }
+ }
+ break;
+ case 2:
+ /* lxvh8x */
+ hp = mem;
+ for (j = 0; j < size / 2; ++j) {
+ i = IS_LE ? 7 - j : j;
+ reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
+ }
+ break;
+ case 1:
+ /* lxvb16x */
+ bp = mem;
+ for (j = 0; j < size; ++j) {
+ i = IS_LE ? 15 - j : j;
+ reg->b[i] = *bp++;
+ }
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(emulate_vsx_load);
+NOKPROBE_SYMBOL(emulate_vsx_load);
+
+void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
+ void *mem, bool rev)
+{
+ int size, write_size;
+ int i, j;
+ union vsx_reg buf;
+ unsigned int *wp;
+ unsigned short *hp;
+ unsigned char *bp;
+
+ size = GETSIZE(op->type);
+
+ switch (op->element_size) {
+ case 16:
+ /* stxv, stxvx, stxvl, stxvll */
+ if (size == 0)
+ break;
+ if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
+ rev = !rev;
+ if (rev) {
+ /* reverse 16 bytes */
+ buf.d[0] = byterev_8(reg->d[1]);
+ buf.d[1] = byterev_8(reg->d[0]);
+ reg = &buf;
+ }
+ memcpy(mem, reg, size);
+ break;
+ case 8:
+ /* scalar stores, stxvd2x */
+ write_size = (size >= 8) ? 8 : size;
+ i = IS_LE ? 8 : 8 - write_size;
+ if (size < 8 && op->vsx_flags & VSX_FPCONV) {
+ buf.d[0] = buf.d[1] = 0;
+ preempt_disable();
+ conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
+ preempt_enable();
+ reg = &buf;
+ }
+ memcpy(mem, &reg->b[i], write_size);
+ if (size == 16)
+ memcpy(mem + 8, &reg->d[IS_BE], 8);
+ if (unlikely(rev)) {
+ do_byte_reverse(mem, write_size);
+ if (size == 16)
+ do_byte_reverse(mem + 8, 8);
+ }
+ break;
+ case 4:
+ /* stxvw4x */
+ wp = mem;
+ for (j = 0; j < size / 4; ++j) {
+ i = IS_LE ? 3 - j : j;
+ *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
+ }
+ break;
+ case 2:
+ /* stxvh8x */
+ hp = mem;
+ for (j = 0; j < size / 2; ++j) {
+ i = IS_LE ? 7 - j : j;
+ *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
+ }
+ break;
+ case 1:
+ /* stvxb16x */
+ bp = mem;
+ for (j = 0; j < size; ++j) {
+ i = IS_LE ? 15 - j : j;
+ *bp++ = reg->b[i];
+ }
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(emulate_vsx_store);
+NOKPROBE_SYMBOL(emulate_vsx_store);
+
+static nokprobe_inline int do_vsx_load(struct instruction_op *op,
+ unsigned long ea, struct pt_regs *regs,
+ bool cross_endian)
+{
+ int reg = op->reg;
+ u8 mem[16];
+ union vsx_reg buf;
+ int size = GETSIZE(op->type);
+
+ if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
+ return -EFAULT;
+
+ emulate_vsx_load(op, &buf, mem, cross_endian);
+ preempt_disable();
+ if (reg < 32) {
+ /* FP regs + extensions */
+ if (regs->msr & MSR_FP) {
+ load_vsrn(reg, &buf);
+ } else {
+ current->thread.fp_state.fpr[reg][0] = buf.d[0];
+ current->thread.fp_state.fpr[reg][1] = buf.d[1];
+ }
+ } else {
+ if (regs->msr & MSR_VEC)
+ load_vsrn(reg, &buf);
+ else
+ current->thread.vr_state.vr[reg - 32] = buf.v;
+ }
+ preempt_enable();
+ return 0;
+}
+
+static nokprobe_inline int do_vsx_store(struct instruction_op *op,
+ unsigned long ea, struct pt_regs *regs,
+ bool cross_endian)
+{
+ int reg = op->reg;
+ u8 mem[16];
+ union vsx_reg buf;
+ int size = GETSIZE(op->type);
+
+ if (!address_ok(regs, ea, size))
+ return -EFAULT;
+
+ preempt_disable();
+ if (reg < 32) {
+ /* FP regs + extensions */
+ if (regs->msr & MSR_FP) {
+ store_vsrn(reg, &buf);
+ } else {
+ buf.d[0] = current->thread.fp_state.fpr[reg][0];
+ buf.d[1] = current->thread.fp_state.fpr[reg][1];
+ }
+ } else {
+ if (regs->msr & MSR_VEC)
+ store_vsrn(reg, &buf);
+ else
+ buf.v = current->thread.vr_state.vr[reg - 32];
+ }
+ preempt_enable();
+ emulate_vsx_store(op, &buf, mem, cross_endian);
+ return copy_mem_out(mem, ea, size, regs);
+}
#endif /* CONFIG_VSX */
+int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
+{
+ int err;
+ unsigned long i, size;
+
+#ifdef __powerpc64__
+ size = ppc64_caches.l1d.block_size;
+ if (!(regs->msr & MSR_64BIT))
+ ea &= 0xffffffffUL;
+#else
+ size = L1_CACHE_BYTES;
+#endif
+ ea &= ~(size - 1);
+ if (!address_ok(regs, ea, size))
+ return -EFAULT;
+ for (i = 0; i < size; i += sizeof(long)) {
+ err = __put_user(0, (unsigned long __user *) (ea + i));
+ if (err) {
+ regs->dar = ea;
+ return err;
+ }
+ }
+ return 0;
+}
+NOKPROBE_SYMBOL(emulate_dcbz);
+
#define __put_user_asmx(x, addr, err, op, cr) \
__asm__ __volatile__( \
"1: " op " %2,0,%3\n" \
@@ -526,24 +943,27 @@ static nokprobe_inline int do_vsx_store(int rn, int (*func)(int, unsigned long),
: "=r" (err) \
: "r" (addr), "i" (-EFAULT), "0" (err))
-static nokprobe_inline void set_cr0(struct pt_regs *regs, int rd)
+static nokprobe_inline void set_cr0(const struct pt_regs *regs,
+ struct instruction_op *op, int rd)
{
long val = regs->gpr[rd];
- regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
+ op->type |= SETCC;
+ op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
#ifdef __powerpc64__
if (!(regs->msr & MSR_64BIT))
val = (int) val;
#endif
if (val < 0)
- regs->ccr |= 0x80000000;
+ op->ccval |= 0x80000000;
else if (val > 0)
- regs->ccr |= 0x40000000;
+ op->ccval |= 0x40000000;
else
- regs->ccr |= 0x20000000;
+ op->ccval |= 0x20000000;
}
-static nokprobe_inline void add_with_carry(struct pt_regs *regs, int rd,
+static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
+ struct instruction_op *op, int rd,
unsigned long val1, unsigned long val2,
unsigned long carry_in)
{
@@ -551,24 +971,29 @@ static nokprobe_inline void add_with_carry(struct pt_regs *regs, int rd,
if (carry_in)
++val;
- regs->gpr[rd] = val;
+ op->type = COMPUTE + SETREG + SETXER;
+ op->reg = rd;
+ op->val = val;
#ifdef __powerpc64__
if (!(regs->msr & MSR_64BIT)) {
val = (unsigned int) val;
val1 = (unsigned int) val1;
}
#endif
+ op->xerval = regs->xer;
if (val < val1 || (carry_in && val == val1))
- regs->xer |= XER_CA;
+ op->xerval |= XER_CA;
else
- regs->xer &= ~XER_CA;
+ op->xerval &= ~XER_CA;
}
-static nokprobe_inline void do_cmp_signed(struct pt_regs *regs, long v1, long v2,
- int crfld)
+static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
+ struct instruction_op *op,
+ long v1, long v2, int crfld)
{
unsigned int crval, shift;
+ op->type = COMPUTE + SETCC;
crval = (regs->xer >> 31) & 1; /* get SO bit */
if (v1 < v2)
crval |= 8;
@@ -577,14 +1002,17 @@ static nokprobe_inline void do_cmp_signed(struct pt_regs *regs, long v1, long v2
else
crval |= 2;
shift = (7 - crfld) * 4;
- regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
+ op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
}
-static nokprobe_inline void do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
- unsigned long v2, int crfld)
+static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
+ struct instruction_op *op,
+ unsigned long v1,
+ unsigned long v2, int crfld)
{
unsigned int crval, shift;
+ op->type = COMPUTE + SETCC;
crval = (regs->xer >> 31) & 1; /* get SO bit */
if (v1 < v2)
crval |= 8;
@@ -593,7 +1021,90 @@ static nokprobe_inline void do_cmp_unsigned(struct pt_regs *regs, unsigned long
else
crval |= 2;
shift = (7 - crfld) * 4;
- regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
+ op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
+}
+
+static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
+ struct instruction_op *op,
+ unsigned long v1, unsigned long v2)
+{
+ unsigned long long out_val, mask;
+ int i;
+
+ out_val = 0;
+ for (i = 0; i < 8; i++) {
+ mask = 0xffUL << (i * 8);
+ if ((v1 & mask) == (v2 & mask))
+ out_val |= mask;
+ }
+ op->val = out_val;
+}
+
+/*
+ * The size parameter is used to adjust the equivalent popcnt instruction.
+ * popcntb = 8, popcntw = 32, popcntd = 64
+ */
+static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
+ struct instruction_op *op,
+ unsigned long v1, int size)
+{
+ unsigned long long out = v1;
+
+ out -= (out >> 1) & 0x5555555555555555;
+ out = (0x3333333333333333 & out) + (0x3333333333333333 & (out >> 2));
+ out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0f;
+
+ if (size == 8) { /* popcntb */
+ op->val = out;
+ return;
+ }
+ out += out >> 8;
+ out += out >> 16;
+ if (size == 32) { /* popcntw */
+ op->val = out & 0x0000003f0000003f;
+ return;
+ }
+
+ out = (out + (out >> 32)) & 0x7f;
+ op->val = out; /* popcntd */
+}
+
+#ifdef CONFIG_PPC64
+static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
+ struct instruction_op *op,
+ unsigned long v1, unsigned long v2)
+{
+ unsigned char perm, idx;
+ unsigned int i;
+
+ perm = 0;
+ for (i = 0; i < 8; i++) {
+ idx = (v1 >> (i * 8)) & 0xff;
+ if (idx < 64)
+ if (v2 & PPC_BIT(idx))
+ perm |= 1 << i;
+ }
+ op->val = perm;
+}
+#endif /* CONFIG_PPC64 */
+/*
+ * The size parameter adjusts the equivalent prty instruction.
+ * prtyw = 32, prtyd = 64
+ */
+static nokprobe_inline void do_prty(const struct pt_regs *regs,
+ struct instruction_op *op,
+ unsigned long v, int size)
+{
+ unsigned long long res = v ^ (v >> 8);
+
+ res ^= res >> 16;
+ if (size == 32) { /* prtyw */
+ op->val = res & 0x0000000100000001;
+ return;
+ }
+
+ res ^= res >> 32;
+ op->val = res & 1; /*prtyd */
}
static nokprobe_inline int trap_compare(long v1, long v2)
@@ -629,14 +1140,18 @@ static nokprobe_inline int trap_compare(long v1, long v2)
#define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
/*
- * Decode an instruction, and execute it if that can be done just by
- * modifying *regs (i.e. integer arithmetic and logical instructions,
- * branches, and barrier instructions).
- * Returns 1 if the instruction has been executed, or 0 if not.
- * Sets *op to indicate what the instruction does.
+ * Decode an instruction, and return information about it in *op
+ * without changing *regs.
+ * Integer arithmetic and logical instructions, branches, and barrier
+ * instructions can be emulated just using the information in *op.
+ *
+ * Return value is 1 if the instruction can be emulated just by
+ * updating *regs with the information in *op, -1 if we need the
+ * GPRs but *regs doesn't contain the full register set, or 0
+ * otherwise.
*/
-int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
- unsigned int instr)
+int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
+ unsigned int instr)
{
unsigned int opcode, ra, rb, rd, spr, u;
unsigned long int imm;
@@ -653,12 +1168,11 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
imm = (signed short)(instr & 0xfffc);
if ((instr & 2) == 0)
imm += regs->nip;
- regs->nip += 4;
- regs->nip = truncate_if_32bit(regs->msr, regs->nip);
+ op->val = truncate_if_32bit(regs->msr, imm);
if (instr & 1)
- regs->link = regs->nip;
- if (branch_taken(instr, regs))
- regs->nip = truncate_if_32bit(regs->msr, imm);
+ op->type |= SETLK;
+ if (branch_taken(instr, regs, op))
+ op->type |= BRTAKEN;
return 1;
#ifdef CONFIG_PPC64
case 17: /* sc */
@@ -669,38 +1183,37 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
return 0;
#endif
case 18: /* b */
- op->type = BRANCH;
+ op->type = BRANCH | BRTAKEN;
imm = instr & 0x03fffffc;
if (imm & 0x02000000)
imm -= 0x04000000;
if ((instr & 2) == 0)
imm += regs->nip;
+ op->val = truncate_if_32bit(regs->msr, imm);
if (instr & 1)
- regs->link = truncate_if_32bit(regs->msr, regs->nip + 4);
- imm = truncate_if_32bit(regs->msr, imm);
- regs->nip = imm;
+ op->type |= SETLK;
return 1;
case 19:
switch ((instr >> 1) & 0x3ff) {
case 0: /* mcrf */
+ op->type = COMPUTE + SETCC;
rd = 7 - ((instr >> 23) & 0x7);
ra = 7 - ((instr >> 18) & 0x7);
rd *= 4;
ra *= 4;
val = (regs->ccr >> ra) & 0xf;
- regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
- goto instr_done;
+ op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
+ return 1;
case 16: /* bclr */
case 528: /* bcctr */
op->type = BRANCH;
imm = (instr & 0x400)? regs->ctr: regs->link;
- regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
- imm = truncate_if_32bit(regs->msr, imm);
+ op->val = truncate_if_32bit(regs->msr, imm);
if (instr & 1)
- regs->link = regs->nip;
- if (branch_taken(instr, regs))
- regs->nip = imm;
+ op->type |= SETLK;
+ if (branch_taken(instr, regs, op))
+ op->type |= BRTAKEN;
return 1;
case 18: /* rfid, scary */
@@ -710,9 +1223,8 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
return 0;
case 150: /* isync */
- op->type = BARRIER;
- isync();
- goto instr_done;
+ op->type = BARRIER | BARRIER_ISYNC;
+ return 1;
case 33: /* crnor */
case 129: /* crandc */
@@ -722,45 +1234,44 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
case 289: /* creqv */
case 417: /* crorc */
case 449: /* cror */
+ op->type = COMPUTE + SETCC;
ra = (instr >> 16) & 0x1f;
rb = (instr >> 11) & 0x1f;
rd = (instr >> 21) & 0x1f;
ra = (regs->ccr >> (31 - ra)) & 1;
rb = (regs->ccr >> (31 - rb)) & 1;
val = (instr >> (6 + ra * 2 + rb)) & 1;
- regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) |
+ op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
(val << (31 - rd));
- goto instr_done;
+ return 1;
}
break;
case 31:
switch ((instr >> 1) & 0x3ff) {
case 598: /* sync */
- op->type = BARRIER;
+ op->type = BARRIER + BARRIER_SYNC;
#ifdef __powerpc64__
switch ((instr >> 21) & 3) {
case 1: /* lwsync */
- asm volatile("lwsync" : : : "memory");
- goto instr_done;
+ op->type = BARRIER + BARRIER_LWSYNC;
+ break;
case 2: /* ptesync */
- asm volatile("ptesync" : : : "memory");
- goto instr_done;
+ op->type = BARRIER + BARRIER_PTESYNC;
+ break;
}
#endif
- mb();
- goto instr_done;
+ return 1;
case 854: /* eieio */
- op->type = BARRIER;
- eieio();
- goto instr_done;
+ op->type = BARRIER + BARRIER_EIEIO;
+ return 1;
}
break;
}
/* Following cases refer to regs->gpr[], so we need all regs */
if (!FULL_REGS(regs))
- return 0;
+ return -1;
rd = (instr >> 21) & 0x1f;
ra = (instr >> 16) & 0x1f;
@@ -771,21 +1282,21 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
case 2: /* tdi */
if (rd & trap_compare(regs->gpr[ra], (short) instr))
goto trap;
- goto instr_done;
+ return 1;
#endif
case 3: /* twi */
if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
goto trap;
- goto instr_done;
+ return 1;
case 7: /* mulli */
- regs->gpr[rd] = regs->gpr[ra] * (short) instr;
- goto instr_done;
+ op->val = regs->gpr[ra] * (short) instr;
+ goto compute_done;
case 8: /* subfic */
imm = (short) instr;
- add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1);
- goto instr_done;
+ add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
+ return 1;
case 10: /* cmpli */
imm = (unsigned short) instr;
@@ -794,8 +1305,8 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
if ((rd & 1) == 0)
val = (unsigned int) val;
#endif
- do_cmp_unsigned(regs, val, imm, rd >> 2);
- goto instr_done;
+ do_cmp_unsigned(regs, op, val, imm, rd >> 2);
+ return 1;
case 11: /* cmpi */
imm = (short) instr;
@@ -804,47 +1315,58 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
if ((rd & 1) == 0)
val = (int) val;
#endif
- do_cmp_signed(regs, val, imm, rd >> 2);
- goto instr_done;
+ do_cmp_signed(regs, op, val, imm, rd >> 2);
+ return 1;
case 12: /* addic */
imm = (short) instr;
- add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
- goto instr_done;
+ add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
+ return 1;
case 13: /* addic. */
imm = (short) instr;
- add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
- set_cr0(regs, rd);
- goto instr_done;
+ add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
+ set_cr0(regs, op, rd);
+ return 1;
case 14: /* addi */
imm = (short) instr;
if (ra)
imm += regs->gpr[ra];
- regs->gpr[rd] = imm;
- goto instr_done;
+ op->val = imm;
+ goto compute_done;
case 15: /* addis */
imm = ((short) instr) << 16;
if (ra)
imm += regs->gpr[ra];
- regs->gpr[rd] = imm;
- goto instr_done;
+ op->val = imm;
+ goto compute_done;
+
+ case 19:
+ if (((instr >> 1) & 0x1f) == 2) {
+ /* addpcis */
+ imm = (short) (instr & 0xffc1); /* d0 + d2 fields */
+ imm |= (instr >> 15) & 0x3e; /* d1 field */
+ op->val = regs->nip + (imm << 16) + 4;
+ goto compute_done;
+ }
+ op->type = UNKNOWN;
+ return 0;
case 20: /* rlwimi */
mb = (instr >> 6) & 0x1f;
me = (instr >> 1) & 0x1f;
val = DATA32(regs->gpr[rd]);
imm = MASK32(mb, me);
- regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
+ op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
goto logical_done;
case 21: /* rlwinm */
mb = (instr >> 6) & 0x1f;
me = (instr >> 1) & 0x1f;
val = DATA32(regs->gpr[rd]);
- regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
+ op->val = ROTATE(val, rb) & MASK32(mb, me);
goto logical_done;
case 23: /* rlwnm */
@@ -852,40 +1374,37 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
me = (instr >> 1) & 0x1f;
rb = regs->gpr[rb] & 0x1f;
val = DATA32(regs->gpr[rd]);
- regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
+ op->val = ROTATE(val, rb) & MASK32(mb, me);
goto logical_done;
case 24: /* ori */
- imm = (unsigned short) instr;
- regs->gpr[ra] = regs->gpr[rd] | imm;
- goto instr_done;
+ op->val = regs->gpr[rd] | (unsigned short) instr;
+ goto logical_done_nocc;
case 25: /* oris */
imm = (unsigned short) instr;
- regs->gpr[ra] = regs->gpr[rd] | (imm << 16);
- goto instr_done;
+ op->val = regs->gpr[rd] | (imm << 16);
+ goto logical_done_nocc;
case 26: /* xori */
- imm = (unsigned short) instr;
- regs->gpr[ra] = regs->gpr[rd] ^ imm;
- goto instr_done;
+ op->val = regs->gpr[rd] ^ (unsigned short) instr;
+ goto logical_done_nocc;
case 27: /* xoris */
imm = (unsigned short) instr;
- regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16);
- goto instr_done;
+ op->val = regs->gpr[rd] ^ (imm << 16);
+ goto logical_done_nocc;
case 28: /* andi. */
- imm = (unsigned short) instr;
- regs->gpr[ra] = regs->gpr[rd] & imm;
- set_cr0(regs, ra);
- goto instr_done;
+ op->val = regs->gpr[rd] & (unsigned short) instr;
+ set_cr0(regs, op, ra);
+ goto logical_done_nocc;
case 29: /* andis. */
imm = (unsigned short) instr;
- regs->gpr[ra] = regs->gpr[rd] & (imm << 16);
- set_cr0(regs, ra);
- goto instr_done;
+ op->val = regs->gpr[rd] & (imm << 16);
+ set_cr0(regs, op, ra);
+ goto logical_done_nocc;
#ifdef __powerpc64__
case 30: /* rld* */
@@ -896,48 +1415,60 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
val = ROTATE(val, sh);
switch ((instr >> 2) & 3) {
case 0: /* rldicl */
- regs->gpr[ra] = val & MASK64_L(mb);
- goto logical_done;
+ val &= MASK64_L(mb);
+ break;
case 1: /* rldicr */
- regs->gpr[ra] = val & MASK64_R(mb);
- goto logical_done;
+ val &= MASK64_R(mb);
+ break;
case 2: /* rldic */
- regs->gpr[ra] = val & MASK64(mb, 63 - sh);
- goto logical_done;
+ val &= MASK64(mb, 63 - sh);
+ break;
case 3: /* rldimi */
imm = MASK64(mb, 63 - sh);
- regs->gpr[ra] = (regs->gpr[ra] & ~imm) |
+ val = (regs->gpr[ra] & ~imm) |
(val & imm);
- goto logical_done;
}
+ op->val = val;
+ goto logical_done;
} else {
sh = regs->gpr[rb] & 0x3f;
val = ROTATE(val, sh);
switch ((instr >> 1) & 7) {
case 0: /* rldcl */
- regs->gpr[ra] = val & MASK64_L(mb);
+ op->val = val & MASK64_L(mb);
goto logical_done;
case 1: /* rldcr */
- regs->gpr[ra] = val & MASK64_R(mb);
+ op->val = val & MASK64_R(mb);
goto logical_done;
}
}
#endif
- break; /* illegal instruction */
+ op->type = UNKNOWN; /* illegal instruction */
+ return 0;
case 31:
+ /* isel occupies 32 minor opcodes */
+ if (((instr >> 1) & 0x1f) == 15) {
+ mb = (instr >> 6) & 0x1f; /* bc field */
+ val = (regs->ccr >> (31 - mb)) & 1;
+ val2 = (ra) ? regs->gpr[ra] : 0;
+
+ op->val = (val) ? val2 : regs->gpr[rb];
+ goto compute_done;
+ }
+
switch ((instr >> 1) & 0x3ff) {
case 4: /* tw */
if (rd == 0x1f ||
(rd & trap_compare((int)regs->gpr[ra],
(int)regs->gpr[rb])))
goto trap;
- goto instr_done;
+ return 1;
#ifdef __powerpc64__
case 68: /* td */
if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
goto trap;
- goto instr_done;
+ return 1;
#endif
case 83: /* mfmsr */
if (regs->msr & MSR_PR)
@@ -966,74 +1497,50 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
#endif
case 19: /* mfcr */
+ imm = 0xffffffffUL;
if ((instr >> 20) & 1) {
imm = 0xf0000000UL;
for (sh = 0; sh < 8; ++sh) {
- if (instr & (0x80000 >> sh)) {
- regs->gpr[rd] = regs->ccr & imm;
+ if (instr & (0x80000 >> sh))
break;
- }
imm >>= 4;
}
-
- goto instr_done;
}
-
- regs->gpr[rd] = regs->ccr;
- regs->gpr[rd] &= 0xffffffffUL;
- goto instr_done;
+ op->val = regs->ccr & imm;
+ goto compute_done;
case 144: /* mtcrf */
+ op->type = COMPUTE + SETCC;
imm = 0xf0000000UL;
val = regs->gpr[rd];
+ op->val = regs->ccr;
for (sh = 0; sh < 8; ++sh) {
if (instr & (0x80000 >> sh))
- regs->ccr = (regs->ccr & ~imm) |
+ op->val = (op->val & ~imm) |
(val & imm);
imm >>= 4;
}
- goto instr_done;
+ return 1;
case 339: /* mfspr */
spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
- switch (spr) {
- case SPRN_XER: /* mfxer */
- regs->gpr[rd] = regs->xer;
- regs->gpr[rd] &= 0xffffffffUL;
- goto instr_done;
- case SPRN_LR: /* mflr */
- regs->gpr[rd] = regs->link;
- goto instr_done;
- case SPRN_CTR: /* mfctr */
- regs->gpr[rd] = regs->ctr;
- goto instr_done;
- default:
- op->type = MFSPR;
- op->reg = rd;
- op->spr = spr;
- return 0;
- }
- break;
+ op->type = MFSPR;
+ op->reg = rd;
+ op->spr = spr;
+ if (spr == SPRN_XER || spr == SPRN_LR ||
+ spr == SPRN_CTR)
+ return 1;
+ return 0;
case 467: /* mtspr */
spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
- switch (spr) {
- case SPRN_XER: /* mtxer */
- regs->xer = (regs->gpr[rd] & 0xffffffffUL);
- goto instr_done;
- case SPRN_LR: /* mtlr */
- regs->link = regs->gpr[rd];
- goto instr_done;
- case SPRN_CTR: /* mtctr */
- regs->ctr = regs->gpr[rd];
- goto instr_done;
- default:
- op->type = MTSPR;
- op->val = regs->gpr[rd];
- op->spr = spr;
- return 0;
- }
- break;
+ op->type = MTSPR;
+ op->val = regs->gpr[rd];
+ op->spr = spr;
+ if (spr == SPRN_XER || spr == SPRN_LR ||
+ spr == SPRN_CTR)
+ return 1;
+ return 0;
/*
* Compare instructions
@@ -1048,8 +1555,8 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
val2 = (int) val2;
}
#endif
- do_cmp_signed(regs, val, val2, rd >> 2);
- goto instr_done;
+ do_cmp_signed(regs, op, val, val2, rd >> 2);
+ return 1;
case 32: /* cmpl */
val = regs->gpr[ra];
@@ -1061,109 +1568,113 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
val2 = (unsigned int) val2;
}
#endif
- do_cmp_unsigned(regs, val, val2, rd >> 2);
- goto instr_done;
+ do_cmp_unsigned(regs, op, val, val2, rd >> 2);
+ return 1;
+
+ case 508: /* cmpb */
+ do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
+ goto logical_done_nocc;
/*
* Arithmetic instructions
*/
case 8: /* subfc */
- add_with_carry(regs, rd, ~regs->gpr[ra],
+ add_with_carry(regs, op, rd, ~regs->gpr[ra],
regs->gpr[rb], 1);
goto arith_done;
#ifdef __powerpc64__
case 9: /* mulhdu */
- asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) :
+ asm("mulhdu %0,%1,%2" : "=r" (op->val) :
"r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
goto arith_done;
#endif
case 10: /* addc */
- add_with_carry(regs, rd, regs->gpr[ra],
+ add_with_carry(regs, op, rd, regs->gpr[ra],
regs->gpr[rb], 0);
goto arith_done;
case 11: /* mulhwu */
- asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) :
+ asm("mulhwu %0,%1,%2" : "=r" (op->val) :
"r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
goto arith_done;
case 40: /* subf */
- regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra];
+ op->val = regs->gpr[rb] - regs->gpr[ra];
goto arith_done;
#ifdef __powerpc64__
case 73: /* mulhd */
- asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) :
+ asm("mulhd %0,%1,%2" : "=r" (op->val) :
"r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
goto arith_done;
#endif
case 75: /* mulhw */
- asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) :
+ asm("mulhw %0,%1,%2" : "=r" (op->val) :
"r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
goto arith_done;
case 104: /* neg */
- regs->gpr[rd] = -regs->gpr[ra];
+ op->val = -regs->gpr[ra];
goto arith_done;
case 136: /* subfe */
- add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb],
- regs->xer & XER_CA);
+ add_with_carry(regs, op, rd, ~regs->gpr[ra],
+ regs->gpr[rb], regs->xer & XER_CA);
goto arith_done;
case 138: /* adde */
- add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb],
- regs->xer & XER_CA);
+ add_with_carry(regs, op, rd, regs->gpr[ra],
+ regs->gpr[rb], regs->xer & XER_CA);
goto arith_done;
case 200: /* subfze */
- add_with_carry(regs, rd, ~regs->gpr[ra], 0L,
+ add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
regs->xer & XER_CA);
goto arith_done;
case 202: /* addze */
- add_with_carry(regs, rd, regs->gpr[ra], 0L,
+ add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
regs->xer & XER_CA);
goto arith_done;
case 232: /* subfme */
- add_with_carry(regs, rd, ~regs->gpr[ra], -1L,
+ add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
regs->xer & XER_CA);
goto arith_done;
#ifdef __powerpc64__
case 233: /* mulld */
- regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb];
+ op->val = regs->gpr[ra] * regs->gpr[rb];
goto arith_done;
#endif
case 234: /* addme */
- add_with_carry(regs, rd, regs->gpr[ra], -1L,
+ add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
regs->xer & XER_CA);
goto arith_done;
case 235: /* mullw */
- regs->gpr[rd] = (unsigned int) regs->gpr[ra] *
+ op->val = (unsigned int) regs->gpr[ra] *
(unsigned int) regs->gpr[rb];
goto arith_done;
case 266: /* add */
- regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb];
+ op->val = regs->gpr[ra] + regs->gpr[rb];
goto arith_done;
#ifdef __powerpc64__
case 457: /* divdu */
- regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb];
+ op->val = regs->gpr[ra] / regs->gpr[rb];
goto arith_done;
#endif
case 459: /* divwu */
- regs->gpr[rd] = (unsigned int) regs->gpr[ra] /
+ op->val = (unsigned int) regs->gpr[ra] /
(unsigned int) regs->gpr[rb];
goto arith_done;
#ifdef __powerpc64__
case 489: /* divd */
- regs->gpr[rd] = (long int) regs->gpr[ra] /
+ op->val = (long int) regs->gpr[ra] /
(long int) regs->gpr[rb];
goto arith_done;
#endif
case 491: /* divw */
- regs->gpr[rd] = (int) regs->gpr[ra] /
+ op->val = (int) regs->gpr[ra] /
(int) regs->gpr[rb];
goto arith_done;
@@ -1172,57 +1683,79 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
* Logical instructions
*/
case 26: /* cntlzw */
- asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) :
- "r" (regs->gpr[rd]));
+ op->val = __builtin_clz((unsigned int) regs->gpr[rd]);
goto logical_done;
#ifdef __powerpc64__
case 58: /* cntlzd */
- asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) :
- "r" (regs->gpr[rd]));
+ op->val = __builtin_clzl(regs->gpr[rd]);
goto logical_done;
#endif
case 28: /* and */
- regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb];
+ op->val = regs->gpr[rd] & regs->gpr[rb];
goto logical_done;
case 60: /* andc */
- regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb];
+ op->val = regs->gpr[rd] & ~regs->gpr[rb];
goto logical_done;
+ case 122: /* popcntb */
+ do_popcnt(regs, op, regs->gpr[rd], 8);
+ goto logical_done_nocc;
+
case 124: /* nor */
- regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]);
+ op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
goto logical_done;
+ case 154: /* prtyw */
+ do_prty(regs, op, regs->gpr[rd], 32);
+ goto logical_done_nocc;
+
+ case 186: /* prtyd */
+ do_prty(regs, op, regs->gpr[rd], 64);
+ goto logical_done_nocc;
+#ifdef CONFIG_PPC64
+ case 252: /* bpermd */
+ do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
+ goto logical_done_nocc;
+#endif
case 284: /* xor */
- regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]);
+ op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
goto logical_done;
case 316: /* xor */
- regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb];
+ op->val = regs->gpr[rd] ^ regs->gpr[rb];
goto logical_done;
+ case 378: /* popcntw */
+ do_popcnt(regs, op, regs->gpr[rd], 32);
+ goto logical_done_nocc;
+
case 412: /* orc */
- regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb];
+ op->val = regs->gpr[rd] | ~regs->gpr[rb];
goto logical_done;
case 444: /* or */
- regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb];
+ op->val = regs->gpr[rd] | regs->gpr[rb];
goto logical_done;
case 476: /* nand */
- regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]);
+ op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
goto logical_done;
-
+#ifdef CONFIG_PPC64
+ case 506: /* popcntd */
+ do_popcnt(regs, op, regs->gpr[rd], 64);
+ goto logical_done_nocc;
+#endif
case 922: /* extsh */
- regs->gpr[ra] = (signed short) regs->gpr[rd];
+ op->val = (signed short) regs->gpr[rd];
goto logical_done;
case 954: /* extsb */
- regs->gpr[ra] = (signed char) regs->gpr[rd];
+ op->val = (signed char) regs->gpr[rd];
goto logical_done;
#ifdef __powerpc64__
case 986: /* extsw */
- regs->gpr[ra] = (signed int) regs->gpr[rd];
+ op->val = (signed int) regs->gpr[rd];
goto logical_done;
#endif
@@ -1232,75 +1765,83 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
case 24: /* slw */
sh = regs->gpr[rb] & 0x3f;
if (sh < 32)
- regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL;
+ op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
else
- regs->gpr[ra] = 0;
+ op->val = 0;
goto logical_done;
case 536: /* srw */
sh = regs->gpr[rb] & 0x3f;
if (sh < 32)
- regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh;
+ op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
else
- regs->gpr[ra] = 0;
+ op->val = 0;
goto logical_done;
case 792: /* sraw */
+ op->type = COMPUTE + SETREG + SETXER;
sh = regs->gpr[rb] & 0x3f;
ival = (signed int) regs->gpr[rd];
- regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
+ op->val = ival >> (sh < 32 ? sh : 31);
+ op->xerval = regs->xer;
if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
- regs->xer |= XER_CA;
+ op->xerval |= XER_CA;
else
- regs->xer &= ~XER_CA;
+ op->xerval &= ~XER_CA;
goto logical_done;
case 824: /* srawi */
+ op->type = COMPUTE + SETREG + SETXER;
sh = rb;
ival = (signed int) regs->gpr[rd];
- regs->gpr[ra] = ival >> sh;
+ op->val = ival >> sh;
+ op->xerval = regs->xer;
if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
- regs->xer |= XER_CA;
+ op->xerval |= XER_CA;
else
- regs->xer &= ~XER_CA;
+ op->xerval &= ~XER_CA;
goto logical_done;
#ifdef __powerpc64__
case 27: /* sld */
sh = regs->gpr[rb] & 0x7f;
if (sh < 64)
- regs->gpr[ra] = regs->gpr[rd] << sh;
+ op->val = regs->gpr[rd] << sh;
else
- regs->gpr[ra] = 0;
+ op->val = 0;
goto logical_done;
case 539: /* srd */
sh = regs->gpr[rb] & 0x7f;
if (sh < 64)
- regs->gpr[ra] = regs->gpr[rd] >> sh;
+ op->val = regs->gpr[rd] >> sh;
else
- regs->gpr[ra] = 0;
+ op->val = 0;
goto logical_done;
case 794: /* srad */
+ op->type = COMPUTE + SETREG + SETXER;
sh = regs->gpr[rb] & 0x7f;
ival = (signed long int) regs->gpr[rd];
- regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
+ op->val = ival >> (sh < 64 ? sh : 63);
+ op->xerval = regs->xer;
if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
- regs->xer |= XER_CA;
+ op->xerval |= XER_CA;
else
- regs->xer &= ~XER_CA;
+ op->xerval &= ~XER_CA;
goto logical_done;
case 826: /* sradi with sh_5 = 0 */
case 827: /* sradi with sh_5 = 1 */
+ op->type = COMPUTE + SETREG + SETXER;
sh = rb | ((instr & 2) << 4);
ival = (signed long int) regs->gpr[rd];
- regs->gpr[ra] = ival >> sh;
+ op->val = ival >> sh;
+ op->xerval = regs->xer;
if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
- regs->xer |= XER_CA;
+ op->xerval |= XER_CA;
else
- regs->xer &= ~XER_CA;
+ op->xerval &= ~XER_CA;
goto logical_done;
#endif /* __powerpc64__ */
@@ -1333,18 +1874,24 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
op->type = MKOP(CACHEOP, ICBI, 0);
op->ea = xform_ea(instr, regs);
return 0;
+
+ case 1014: /* dcbz */
+ op->type = MKOP(CACHEOP, DCBZ, 0);
+ op->ea = xform_ea(instr, regs);
+ return 0;
}
break;
}
- /*
- * Loads and stores.
- */
+/*
+ * Loads and stores.
+ */
op->type = UNKNOWN;
op->update_reg = ra;
op->reg = rd;
op->val = regs->gpr[rd];
u = (instr >> 20) & UPDATE;
+ op->vsx_flags = 0;
switch (opcode) {
case 31:
@@ -1368,9 +1915,30 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
op->type = MKOP(STCX, 0, 8);
break;
- case 21: /* ldx */
- case 53: /* ldux */
- op->type = MKOP(LOAD, u, 8);
+ case 52: /* lbarx */
+ op->type = MKOP(LARX, 0, 1);
+ break;
+
+ case 694: /* stbcx. */
+ op->type = MKOP(STCX, 0, 1);
+ break;
+
+ case 116: /* lharx */
+ op->type = MKOP(LARX, 0, 2);
+ break;
+
+ case 726: /* sthcx. */
+ op->type = MKOP(STCX, 0, 2);
+ break;
+
+ case 276: /* lqarx */
+ if (!((rd & 1) || rd == ra || rd == rb))
+ op->type = MKOP(LARX, 0, 16);
+ break;
+
+ case 182: /* stqcx. */
+ if (!(rd & 1))
+ op->type = MKOP(STCX, 0, 16);
break;
#endif
@@ -1385,22 +1953,58 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
break;
#ifdef CONFIG_ALTIVEC
+ /*
+ * Note: for the load/store vector element instructions,
+ * bits of the EA say which field of the VMX register to use.
+ */
+ case 7: /* lvebx */
+ op->type = MKOP(LOAD_VMX, 0, 1);
+ op->element_size = 1;
+ break;
+
+ case 39: /* lvehx */
+ op->type = MKOP(LOAD_VMX, 0, 2);
+ op->element_size = 2;
+ break;
+
+ case 71: /* lvewx */
+ op->type = MKOP(LOAD_VMX, 0, 4);
+ op->element_size = 4;
+ break;
+
case 103: /* lvx */
case 359: /* lvxl */
- if (!(regs->msr & MSR_VEC))
- goto vecunavail;
op->type = MKOP(LOAD_VMX, 0, 16);
+ op->element_size = 16;
+ break;
+
+ case 135: /* stvebx */
+ op->type = MKOP(STORE_VMX, 0, 1);
+ op->element_size = 1;
+ break;
+
+ case 167: /* stvehx */
+ op->type = MKOP(STORE_VMX, 0, 2);
+ op->element_size = 2;
+ break;
+
+ case 199: /* stvewx */
+ op->type = MKOP(STORE_VMX, 0, 4);
+ op->element_size = 4;
break;
case 231: /* stvx */
case 487: /* stvxl */
- if (!(regs->msr & MSR_VEC))
- goto vecunavail;
op->type = MKOP(STORE_VMX, 0, 16);
break;
#endif /* CONFIG_ALTIVEC */
#ifdef __powerpc64__
+ case 21: /* ldx */
+ case 53: /* ldux */
+ op->type = MKOP(LOAD, u, 8);
+ break;
+
case 149: /* stdx */
case 181: /* stdux */
op->type = MKOP(STORE, u, 8);
@@ -1457,41 +2061,52 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
if (rb == 0)
rb = 32; /* # bytes to load */
op->type = MKOP(LOAD_MULTI, 0, rb);
- op->ea = 0;
- if (ra)
- op->ea = truncate_if_32bit(regs->msr,
- regs->gpr[ra]);
+ op->ea = ra ? regs->gpr[ra] : 0;
break;
#ifdef CONFIG_PPC_FPU
case 535: /* lfsx */
case 567: /* lfsux */
- if (!(regs->msr & MSR_FP))
- goto fpunavail;
- op->type = MKOP(LOAD_FP, u, 4);
+ op->type = MKOP(LOAD_FP, u | FPCONV, 4);
break;
case 599: /* lfdx */
case 631: /* lfdux */
- if (!(regs->msr & MSR_FP))
- goto fpunavail;
op->type = MKOP(LOAD_FP, u, 8);
break;
case 663: /* stfsx */
case 695: /* stfsux */
- if (!(regs->msr & MSR_FP))
- goto fpunavail;
- op->type = MKOP(STORE_FP, u, 4);
+ op->type = MKOP(STORE_FP, u | FPCONV, 4);
break;
case 727: /* stfdx */
case 759: /* stfdux */
- if (!(regs->msr & MSR_FP))
- goto fpunavail;
op->type = MKOP(STORE_FP, u, 8);
break;
-#endif
+
+#ifdef __powerpc64__
+ case 791: /* lfdpx */
+ op->type = MKOP(LOAD_FP, 0, 16);
+ break;
+
+ case 855: /* lfiwax */
+ op->type = MKOP(LOAD_FP, SIGNEXT, 4);
+ break;
+
+ case 887: /* lfiwzx */
+ op->type = MKOP(LOAD_FP, 0, 4);
+ break;
+
+ case 919: /* stfdpx */
+ op->type = MKOP(STORE_FP, 0, 16);
+ break;
+
+ case 983: /* stfiwx */
+ op->type = MKOP(STORE_FP, 0, 4);
+ break;
+#endif /* __powerpc64 */
+#endif /* CONFIG_PPC_FPU */
#ifdef __powerpc64__
case 660: /* stdbrx */
@@ -1509,14 +2124,11 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
op->val = byterev_4(regs->gpr[rd]);
break;
- case 725:
+ case 725: /* stswi */
if (rb == 0)
rb = 32; /* # bytes to store */
op->type = MKOP(STORE_MULTI, 0, rb);
- op->ea = 0;
- if (ra)
- op->ea = truncate_if_32bit(regs->msr,
- regs->gpr[ra]);
+ op->ea = ra ? regs->gpr[ra] : 0;
break;
case 790: /* lhbrx */
@@ -1529,20 +2141,184 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
break;
#ifdef CONFIG_VSX
+ case 12: /* lxsiwzx */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 4);
+ op->element_size = 8;
+ break;
+
+ case 76: /* lxsiwax */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
+ op->element_size = 8;
+ break;
+
+ case 140: /* stxsiwx */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(STORE_VSX, 0, 4);
+ op->element_size = 8;
+ break;
+
+ case 268: /* lxvx */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 16);
+ op->element_size = 16;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+
+ case 269: /* lxvl */
+ case 301: { /* lxvll */
+ int nb;
+ op->reg = rd | ((instr & 1) << 5);
+ op->ea = ra ? regs->gpr[ra] : 0;
+ nb = regs->gpr[rb] & 0xff;
+ if (nb > 16)
+ nb = 16;
+ op->type = MKOP(LOAD_VSX, 0, nb);
+ op->element_size = 16;
+ op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
+ VSX_CHECK_VEC;
+ break;
+ }
+ case 332: /* lxvdsx */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 8);
+ op->element_size = 8;
+ op->vsx_flags = VSX_SPLAT;
+ break;
+
+ case 364: /* lxvwsx */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 4);
+ op->element_size = 4;
+ op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
+ break;
+
+ case 396: /* stxvx */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(STORE_VSX, 0, 16);
+ op->element_size = 16;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+
+ case 397: /* stxvl */
+ case 429: { /* stxvll */
+ int nb;
+ op->reg = rd | ((instr & 1) << 5);
+ op->ea = ra ? regs->gpr[ra] : 0;
+ nb = regs->gpr[rb] & 0xff;
+ if (nb > 16)
+ nb = 16;
+ op->type = MKOP(STORE_VSX, 0, nb);
+ op->element_size = 16;
+ op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
+ VSX_CHECK_VEC;
+ break;
+ }
+ case 524: /* lxsspx */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 4);
+ op->element_size = 8;
+ op->vsx_flags = VSX_FPCONV;
+ break;
+
+ case 588: /* lxsdx */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 8);
+ op->element_size = 8;
+ break;
+
+ case 652: /* stxsspx */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(STORE_VSX, 0, 4);
+ op->element_size = 8;
+ op->vsx_flags = VSX_FPCONV;
+ break;
+
+ case 716: /* stxsdx */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(STORE_VSX, 0, 8);
+ op->element_size = 8;
+ break;
+
+ case 780: /* lxvw4x */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 16);
+ op->element_size = 4;
+ break;
+
+ case 781: /* lxsibzx */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 1);
+ op->element_size = 8;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+
+ case 812: /* lxvh8x */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 16);
+ op->element_size = 2;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+
+ case 813: /* lxsihzx */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 2);
+ op->element_size = 8;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+
case 844: /* lxvd2x */
- case 876: /* lxvd2ux */
- if (!(regs->msr & MSR_VSX))
- goto vsxunavail;
op->reg = rd | ((instr & 1) << 5);
- op->type = MKOP(LOAD_VSX, u, 16);
+ op->type = MKOP(LOAD_VSX, 0, 16);
+ op->element_size = 8;
+ break;
+
+ case 876: /* lxvb16x */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(LOAD_VSX, 0, 16);
+ op->element_size = 1;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+
+ case 908: /* stxvw4x */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(STORE_VSX, 0, 16);
+ op->element_size = 4;
+ break;
+
+ case 909: /* stxsibx */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(STORE_VSX, 0, 1);
+ op->element_size = 8;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+
+ case 940: /* stxvh8x */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(STORE_VSX, 0, 16);
+ op->element_size = 2;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+
+ case 941: /* stxsihx */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(STORE_VSX, 0, 2);
+ op->element_size = 8;
+ op->vsx_flags = VSX_CHECK_VEC;
break;
case 972: /* stxvd2x */
- case 1004: /* stxvd2ux */
- if (!(regs->msr & MSR_VSX))
- goto vsxunavail;
op->reg = rd | ((instr & 1) << 5);
- op->type = MKOP(STORE_VSX, u, 16);
+ op->type = MKOP(STORE_VSX, 0, 16);
+ op->element_size = 8;
+ break;
+
+ case 1004: /* stxvb16x */
+ op->reg = rd | ((instr & 1) << 5);
+ op->type = MKOP(STORE_VSX, 0, 16);
+ op->element_size = 1;
+ op->vsx_flags = VSX_CHECK_VEC;
break;
#endif /* CONFIG_VSX */
@@ -1606,38 +2382,63 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
#ifdef CONFIG_PPC_FPU
case 48: /* lfs */
case 49: /* lfsu */
- if (!(regs->msr & MSR_FP))
- goto fpunavail;
- op->type = MKOP(LOAD_FP, u, 4);
+ op->type = MKOP(LOAD_FP, u | FPCONV, 4);
op->ea = dform_ea(instr, regs);
break;
case 50: /* lfd */
case 51: /* lfdu */
- if (!(regs->msr & MSR_FP))
- goto fpunavail;
op->type = MKOP(LOAD_FP, u, 8);
op->ea = dform_ea(instr, regs);
break;
case 52: /* stfs */
case 53: /* stfsu */
- if (!(regs->msr & MSR_FP))
- goto fpunavail;
- op->type = MKOP(STORE_FP, u, 4);
+ op->type = MKOP(STORE_FP, u | FPCONV, 4);
op->ea = dform_ea(instr, regs);
break;
case 54: /* stfd */
case 55: /* stfdu */
- if (!(regs->msr & MSR_FP))
- goto fpunavail;
op->type = MKOP(STORE_FP, u, 8);
op->ea = dform_ea(instr, regs);
break;
#endif
#ifdef __powerpc64__
+ case 56: /* lq */
+ if (!((rd & 1) || (rd == ra)))
+ op->type = MKOP(LOAD, 0, 16);
+ op->ea = dqform_ea(instr, regs);
+ break;
+#endif
+
+#ifdef CONFIG_VSX
+ case 57: /* lfdp, lxsd, lxssp */
+ op->ea = dsform_ea(instr, regs);
+ switch (instr & 3) {
+ case 0: /* lfdp */
+ if (rd & 1)
+ break; /* reg must be even */
+ op->type = MKOP(LOAD_FP, 0, 16);
+ break;
+ case 2: /* lxsd */
+ op->reg = rd + 32;
+ op->type = MKOP(LOAD_VSX, 0, 8);
+ op->element_size = 8;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+ case 3: /* lxssp */
+ op->reg = rd + 32;
+ op->type = MKOP(LOAD_VSX, 0, 4);
+ op->element_size = 8;
+ op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
+ break;
+ }
+ break;
+#endif /* CONFIG_VSX */
+
+#ifdef __powerpc64__
case 58: /* ld[u], lwa */
op->ea = dsform_ea(instr, regs);
switch (instr & 3) {
@@ -1652,7 +2453,57 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
break;
}
break;
+#endif
+#ifdef CONFIG_VSX
+ case 61: /* stfdp, lxv, stxsd, stxssp, stxv */
+ switch (instr & 7) {
+ case 0: /* stfdp with LSB of DS field = 0 */
+ case 4: /* stfdp with LSB of DS field = 1 */
+ op->ea = dsform_ea(instr, regs);
+ op->type = MKOP(STORE_FP, 0, 16);
+ break;
+
+ case 1: /* lxv */
+ op->ea = dqform_ea(instr, regs);
+ if (instr & 8)
+ op->reg = rd + 32;
+ op->type = MKOP(LOAD_VSX, 0, 16);
+ op->element_size = 16;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+
+ case 2: /* stxsd with LSB of DS field = 0 */
+ case 6: /* stxsd with LSB of DS field = 1 */
+ op->ea = dsform_ea(instr, regs);
+ op->reg = rd + 32;
+ op->type = MKOP(STORE_VSX, 0, 8);
+ op->element_size = 8;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+
+ case 3: /* stxssp with LSB of DS field = 0 */
+ case 7: /* stxssp with LSB of DS field = 1 */
+ op->ea = dsform_ea(instr, regs);
+ op->reg = rd + 32;
+ op->type = MKOP(STORE_VSX, 0, 4);
+ op->element_size = 8;
+ op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
+ break;
+
+ case 5: /* stxv */
+ op->ea = dqform_ea(instr, regs);
+ if (instr & 8)
+ op->reg = rd + 32;
+ op->type = MKOP(STORE_VSX, 0, 16);
+ op->element_size = 16;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+ }
+ break;
+#endif /* CONFIG_VSX */
+
+#ifdef __powerpc64__
case 62: /* std[u] */
op->ea = dsform_ea(instr, regs);
switch (instr & 3) {
@@ -1662,6 +2513,10 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
case 1: /* stdu */
op->type = MKOP(STORE, UPDATE, 8);
break;
+ case 2: /* stq */
+ if (!(rd & 1))
+ op->type = MKOP(STORE, 0, 16);
+ break;
}
break;
#endif /* __powerpc64__ */
@@ -1671,15 +2526,18 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
logical_done:
if (instr & 1)
- set_cr0(regs, ra);
- goto instr_done;
+ set_cr0(regs, op, ra);
+ logical_done_nocc:
+ op->reg = ra;
+ op->type |= SETREG;
+ return 1;
arith_done:
if (instr & 1)
- set_cr0(regs, rd);
-
- instr_done:
- regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
+ set_cr0(regs, op, rd);
+ compute_done:
+ op->reg = rd;
+ op->type |= SETREG;
return 1;
priv:
@@ -1691,24 +2549,6 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
op->type = INTERRUPT | 0x700;
op->val = SRR1_PROGTRAP;
return 0;
-
-#ifdef CONFIG_PPC_FPU
- fpunavail:
- op->type = INTERRUPT | 0x800;
- return 0;
-#endif
-
-#ifdef CONFIG_ALTIVEC
- vecunavail:
- op->type = INTERRUPT | 0xf20;
- return 0;
-#endif
-
-#ifdef CONFIG_VSX
- vsxunavail:
- op->type = INTERRUPT | 0xf40;
- return 0;
-#endif
}
EXPORT_SYMBOL_GPL(analyse_instr);
NOKPROBE_SYMBOL(analyse_instr);
@@ -1771,190 +2611,412 @@ static nokprobe_inline void do_byterev(unsigned long *valp, int size)
}
/*
- * Emulate instructions that cause a transfer of control,
- * loads and stores, and a few other instructions.
- * Returns 1 if the step was emulated, 0 if not,
- * or -1 if the instruction is one that should not be stepped,
- * such as an rfid, or a mtmsrd that would clear MSR_RI.
+ * Emulate an instruction that can be executed just by updating
+ * fields in *regs.
*/
-int emulate_step(struct pt_regs *regs, unsigned int instr)
+void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
{
- struct instruction_op op;
- int r, err, size;
- unsigned long val;
- unsigned int cr;
- int i, rd, nb;
+ unsigned long next_pc;
+
+ next_pc = truncate_if_32bit(regs->msr, regs->nip + 4);
+ switch (op->type & INSTR_TYPE_MASK) {
+ case COMPUTE:
+ if (op->type & SETREG)
+ regs->gpr[op->reg] = op->val;
+ if (op->type & SETCC)
+ regs->ccr = op->ccval;
+ if (op->type & SETXER)
+ regs->xer = op->xerval;
+ break;
- r = analyse_instr(&op, regs, instr);
- if (r != 0)
- return r;
+ case BRANCH:
+ if (op->type & SETLK)
+ regs->link = next_pc;
+ if (op->type & BRTAKEN)
+ next_pc = op->val;
+ if (op->type & DECCTR)
+ --regs->ctr;
+ break;
- err = 0;
- size = GETSIZE(op.type);
- switch (op.type & INSTR_TYPE_MASK) {
- case CACHEOP:
- if (!address_ok(regs, op.ea, 8))
- return 0;
- switch (op.type & CACHEOP_MASK) {
- case DCBST:
- __cacheop_user_asmx(op.ea, err, "dcbst");
+ case BARRIER:
+ switch (op->type & BARRIER_MASK) {
+ case BARRIER_SYNC:
+ mb();
break;
- case DCBF:
- __cacheop_user_asmx(op.ea, err, "dcbf");
+ case BARRIER_ISYNC:
+ isync();
break;
- case DCBTST:
- if (op.reg == 0)
- prefetchw((void *) op.ea);
+ case BARRIER_EIEIO:
+ eieio();
break;
- case DCBT:
- if (op.reg == 0)
- prefetch((void *) op.ea);
+ case BARRIER_LWSYNC:
+ asm volatile("lwsync" : : : "memory");
break;
- case ICBI:
- __cacheop_user_asmx(op.ea, err, "icbi");
+ case BARRIER_PTESYNC:
+ asm volatile("ptesync" : : : "memory");
break;
}
- if (err)
- return 0;
- goto instr_done;
+ break;
+
+ case MFSPR:
+ switch (op->spr) {
+ case SPRN_XER:
+ regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
+ break;
+ case SPRN_LR:
+ regs->gpr[op->reg] = regs->link;
+ break;
+ case SPRN_CTR:
+ regs->gpr[op->reg] = regs->ctr;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ break;
+
+ case MTSPR:
+ switch (op->spr) {
+ case SPRN_XER:
+ regs->xer = op->val & 0xffffffffUL;
+ break;
+ case SPRN_LR:
+ regs->link = op->val;
+ break;
+ case SPRN_CTR:
+ regs->ctr = op->val;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ }
+ regs->nip = next_pc;
+}
+
+/*
+ * Emulate a previously-analysed load or store instruction.
+ * Return values are:
+ * 0 = instruction emulated successfully
+ * -EFAULT = address out of range or access faulted (regs->dar
+ * contains the faulting address)
+ * -EACCES = misaligned access, instruction requires alignment
+ * -EINVAL = unknown operation in *op
+ */
+int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
+{
+ int err, size, type;
+ int i, rd, nb;
+ unsigned int cr;
+ unsigned long val;
+ unsigned long ea;
+ bool cross_endian;
+
+ err = 0;
+ size = GETSIZE(op->type);
+ type = op->type & INSTR_TYPE_MASK;
+ cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
+ ea = truncate_if_32bit(regs->msr, op->ea);
+ switch (type) {
case LARX:
- if (op.ea & (size - 1))
- break; /* can't handle misaligned */
- if (!address_ok(regs, op.ea, size))
- return 0;
+ if (ea & (size - 1))
+ return -EACCES; /* can't handle misaligned */
+ if (!address_ok(regs, ea, size))
+ return -EFAULT;
err = 0;
+ val = 0;
switch (size) {
+#ifdef __powerpc64__
+ case 1:
+ __get_user_asmx(val, ea, err, "lbarx");
+ break;
+ case 2:
+ __get_user_asmx(val, ea, err, "lharx");
+ break;
+#endif
case 4:
- __get_user_asmx(val, op.ea, err, "lwarx");
+ __get_user_asmx(val, ea, err, "lwarx");
break;
#ifdef __powerpc64__
case 8:
- __get_user_asmx(val, op.ea, err, "ldarx");
+ __get_user_asmx(val, ea, err, "ldarx");
+ break;
+ case 16:
+ err = do_lqarx(ea, &regs->gpr[op->reg]);
break;
#endif
default:
- return 0;
+ return -EINVAL;
}
- if (!err)
- regs->gpr[op.reg] = val;
- goto ldst_done;
+ if (err) {
+ regs->dar = ea;
+ break;
+ }
+ if (size < 16)
+ regs->gpr[op->reg] = val;
+ break;
case STCX:
- if (op.ea & (size - 1))
- break; /* can't handle misaligned */
- if (!address_ok(regs, op.ea, size))
- return 0;
+ if (ea & (size - 1))
+ return -EACCES; /* can't handle misaligned */
+ if (!address_ok(regs, ea, size))
+ return -EFAULT;
err = 0;
switch (size) {
+#ifdef __powerpc64__
+ case 1:
+ __put_user_asmx(op->val, ea, err, "stbcx.", cr);
+ break;
+ case 2:
+ __put_user_asmx(op->val, ea, err, "stbcx.", cr);
+ break;
+#endif
case 4:
- __put_user_asmx(op.val, op.ea, err, "stwcx.", cr);
+ __put_user_asmx(op->val, ea, err, "stwcx.", cr);
break;
#ifdef __powerpc64__
case 8:
- __put_user_asmx(op.val, op.ea, err, "stdcx.", cr);
+ __put_user_asmx(op->val, ea, err, "stdcx.", cr);
+ break;
+ case 16:
+ err = do_stqcx(ea, regs->gpr[op->reg],
+ regs->gpr[op->reg + 1], &cr);
break;
#endif
default:
- return 0;
+ return -EINVAL;
}
if (!err)
regs->ccr = (regs->ccr & 0x0fffffff) |
(cr & 0xe0000000) |
((regs->xer >> 3) & 0x10000000);
- goto ldst_done;
+ else
+ regs->dar = ea;
+ break;
case LOAD:
- err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
+#ifdef __powerpc64__
+ if (size == 16) {
+ err = emulate_lq(regs, ea, op->reg, cross_endian);
+ break;
+ }
+#endif
+ err = read_mem(&regs->gpr[op->reg], ea, size, regs);
if (!err) {
- if (op.type & SIGNEXT)
- do_signext(&regs->gpr[op.reg], size);
- if (op.type & BYTEREV)
- do_byterev(&regs->gpr[op.reg], size);
+ if (op->type & SIGNEXT)
+ do_signext(&regs->gpr[op->reg], size);
+ if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
+ do_byterev(&regs->gpr[op->reg], size);
}
- goto ldst_done;
+ break;
#ifdef CONFIG_PPC_FPU
case LOAD_FP:
- if (size == 4)
- err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
- else
- err = do_fp_load(op.reg, do_lfd, op.ea, size, regs);
- goto ldst_done;
+ /*
+ * If the instruction is in userspace, we can emulate it even
+ * if the VMX state is not live, because we have the state
+ * stored in the thread_struct. If the instruction is in
+ * the kernel, we must not touch the state in the thread_struct.
+ */
+ if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
+ return 0;
+ err = do_fp_load(op, ea, regs, cross_endian);
+ break;
#endif
#ifdef CONFIG_ALTIVEC
case LOAD_VMX:
- err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
- goto ldst_done;
+ if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
+ return 0;
+ err = do_vec_load(op->reg, ea, size, regs, cross_endian);
+ break;
#endif
#ifdef CONFIG_VSX
- case LOAD_VSX:
- err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
- goto ldst_done;
+ case LOAD_VSX: {
+ unsigned long msrbit = MSR_VSX;
+
+ /*
+ * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
+ * when the target of the instruction is a vector register.
+ */
+ if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
+ msrbit = MSR_VEC;
+ if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
+ return 0;
+ err = do_vsx_load(op, ea, regs, cross_endian);
+ break;
+ }
#endif
case LOAD_MULTI:
- if (regs->msr & MSR_LE)
- return 0;
- rd = op.reg;
+ if (!address_ok(regs, ea, size))
+ return -EFAULT;
+ rd = op->reg;
for (i = 0; i < size; i += 4) {
+ unsigned int v32 = 0;
+
nb = size - i;
if (nb > 4)
nb = 4;
- err = read_mem(&regs->gpr[rd], op.ea, nb, regs);
+ err = copy_mem_in((u8 *) &v32, ea, nb, regs);
if (err)
- return 0;
- if (nb < 4) /* left-justify last bytes */
- regs->gpr[rd] <<= 32 - 8 * nb;
- op.ea += 4;
- ++rd;
+ break;
+ if (unlikely(cross_endian))
+ v32 = byterev_4(v32);
+ regs->gpr[rd] = v32;
+ ea += 4;
+ /* reg number wraps from 31 to 0 for lsw[ix] */
+ rd = (rd + 1) & 0x1f;
}
- goto instr_done;
+ break;
case STORE:
- if ((op.type & UPDATE) && size == sizeof(long) &&
- op.reg == 1 && op.update_reg == 1 &&
+#ifdef __powerpc64__
+ if (size == 16) {
+ err = emulate_stq(regs, ea, op->reg, cross_endian);
+ break;
+ }
+#endif
+ if ((op->type & UPDATE) && size == sizeof(long) &&
+ op->reg == 1 && op->update_reg == 1 &&
!(regs->msr & MSR_PR) &&
- op.ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
- err = handle_stack_update(op.ea, regs);
- goto ldst_done;
+ ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
+ err = handle_stack_update(ea, regs);
+ break;
}
- err = write_mem(op.val, op.ea, size, regs);
- goto ldst_done;
+ if (unlikely(cross_endian))
+ do_byterev(&op->val, size);
+ err = write_mem(op->val, ea, size, regs);
+ break;
#ifdef CONFIG_PPC_FPU
case STORE_FP:
- if (size == 4)
- err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
- else
- err = do_fp_store(op.reg, do_stfd, op.ea, size, regs);
- goto ldst_done;
+ if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
+ return 0;
+ err = do_fp_store(op, ea, regs, cross_endian);
+ break;
#endif
#ifdef CONFIG_ALTIVEC
case STORE_VMX:
- err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
- goto ldst_done;
+ if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
+ return 0;
+ err = do_vec_store(op->reg, ea, size, regs, cross_endian);
+ break;
#endif
#ifdef CONFIG_VSX
- case STORE_VSX:
- err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
- goto ldst_done;
+ case STORE_VSX: {
+ unsigned long msrbit = MSR_VSX;
+
+ /*
+ * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
+ * when the target of the instruction is a vector register.
+ */
+ if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
+ msrbit = MSR_VEC;
+ if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
+ return 0;
+ err = do_vsx_store(op, ea, regs, cross_endian);
+ break;
+ }
#endif
case STORE_MULTI:
- if (regs->msr & MSR_LE)
- return 0;
- rd = op.reg;
+ if (!address_ok(regs, ea, size))
+ return -EFAULT;
+ rd = op->reg;
for (i = 0; i < size; i += 4) {
- val = regs->gpr[rd];
+ unsigned int v32 = regs->gpr[rd];
+
nb = size - i;
if (nb > 4)
nb = 4;
- else
- val >>= 32 - 8 * nb;
- err = write_mem(val, op.ea, nb, regs);
+ if (unlikely(cross_endian))
+ v32 = byterev_4(v32);
+ err = copy_mem_out((u8 *) &v32, ea, nb, regs);
if (err)
- return 0;
- op.ea += 4;
- ++rd;
+ break;
+ ea += 4;
+ /* reg number wraps from 31 to 0 for stsw[ix] */
+ rd = (rd + 1) & 0x1f;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (err)
+ return err;
+
+ if (op->type & UPDATE)
+ regs->gpr[op->update_reg] = op->ea;
+
+ return 0;
+}
+NOKPROBE_SYMBOL(emulate_loadstore);
+
+/*
+ * Emulate instructions that cause a transfer of control,
+ * loads and stores, and a few other instructions.
+ * Returns 1 if the step was emulated, 0 if not,
+ * or -1 if the instruction is one that should not be stepped,
+ * such as an rfid, or a mtmsrd that would clear MSR_RI.
+ */
+int emulate_step(struct pt_regs *regs, unsigned int instr)
+{
+ struct instruction_op op;
+ int r, err, type;
+ unsigned long val;
+ unsigned long ea;
+
+ r = analyse_instr(&op, regs, instr);
+ if (r < 0)
+ return r;
+ if (r > 0) {
+ emulate_update_regs(regs, &op);
+ return 1;
+ }
+
+ err = 0;
+ type = op.type & INSTR_TYPE_MASK;
+
+ if (OP_IS_LOAD_STORE(type)) {
+ err = emulate_loadstore(regs, &op);
+ if (err)
+ return 0;
+ goto instr_done;
+ }
+
+ switch (type) {
+ case CACHEOP:
+ ea = truncate_if_32bit(regs->msr, op.ea);
+ if (!address_ok(regs, ea, 8))
+ return 0;
+ switch (op.type & CACHEOP_MASK) {
+ case DCBST:
+ __cacheop_user_asmx(ea, err, "dcbst");
+ break;
+ case DCBF:
+ __cacheop_user_asmx(ea, err, "dcbf");
+ break;
+ case DCBTST:
+ if (op.reg == 0)
+ prefetchw((void *) ea);
+ break;
+ case DCBT:
+ if (op.reg == 0)
+ prefetch((void *) ea);
+ break;
+ case ICBI:
+ __cacheop_user_asmx(ea, err, "icbi");
+ break;
+ case DCBZ:
+ err = emulate_dcbz(ea, regs);
+ break;
+ }
+ if (err) {
+ regs->dar = ea;
+ return 0;
}
goto instr_done;
@@ -1998,12 +3060,6 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
}
return 0;
- ldst_done:
- if (err)
- return 0;
- if (op.type & UPDATE)
- regs->gpr[op.update_reg] = op.ea;
-
instr_done:
regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
return 1;
diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
index d5b4d9498c54..56aac4c22025 100644
--- a/arch/powerpc/lib/string_64.S
+++ b/arch/powerpc/lib/string_64.S
@@ -184,7 +184,7 @@ err1; std r0,8(r3)
mtctr r6
mr r8,r3
14:
-err1; dcbz r0,r3
+err1; dcbz 0,r3
add r3,r3,r9
bdnz 14b
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index f4c6472f2fc4..f29212e40f40 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -22,8 +22,11 @@
extern int __map_without_ltlbs;
+static unsigned long block_mapped_ram;
+
/*
- * Return PA for this VA if it is in IMMR area, or 0
+ * Return PA for this VA if it is in an area mapped with LTLBs.
+ * Otherwise, returns 0
*/
phys_addr_t v_block_mapped(unsigned long va)
{
@@ -33,11 +36,13 @@ phys_addr_t v_block_mapped(unsigned long va)
return 0;
if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
return p + va - VIRT_IMMR_BASE;
+ if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
+ return __pa(va);
return 0;
}
/*
- * Return VA for a given PA or 0 if not mapped
+ * Return VA for a given PA mapped with LTLBs or 0 if not mapped
*/
unsigned long p_block_mapped(phys_addr_t pa)
{
@@ -47,6 +52,8 @@ unsigned long p_block_mapped(phys_addr_t pa)
return 0;
if (pa >= p && pa < p + IMMR_SIZE)
return VIRT_IMMR_BASE + pa - p;
+ if (pa < block_mapped_ram)
+ return (unsigned long)__va(pa);
return 0;
}
@@ -58,7 +65,7 @@ unsigned long p_block_mapped(phys_addr_t pa)
void __init MMU_init_hw(void)
{
/* PIN up to the 3 first 8Mb after IMMR in DTLB table */
-#ifdef CONFIG_PIN_TLB
+#ifdef CONFIG_PIN_TLB_DATA
unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY;
#ifdef CONFIG_PIN_TLB_IMMR
@@ -80,7 +87,7 @@ void __init MMU_init_hw(void)
#endif
}
-static void mmu_mapin_immr(void)
+static void __init mmu_mapin_immr(void)
{
unsigned long p = PHYS_IMMR_BASE;
unsigned long v = VIRT_IMMR_BASE;
@@ -96,8 +103,11 @@ static void mmu_mapin_immr(void)
extern unsigned int DTLBMiss_jmp;
#endif
extern unsigned int DTLBMiss_cmp, FixupDAR_cmp;
+#ifndef CONFIG_PIN_TLB_TEXT
+extern unsigned int ITLBMiss_cmp;
+#endif
-void mmu_patch_cmp_limit(unsigned int *addr, unsigned long mapped)
+static void __init mmu_patch_cmp_limit(unsigned int *addr, unsigned long mapped)
{
unsigned int instr = *addr;
@@ -116,6 +126,9 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
#ifndef CONFIG_PIN_TLB_IMMR
patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP);
#endif
+#ifndef CONFIG_PIN_TLB_TEXT
+ mmu_patch_cmp_limit(&ITLBMiss_cmp, 0);
+#endif
} else {
mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
}
@@ -133,11 +146,13 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
if (mapped)
memblock_set_current_limit(mapped);
+ block_mapped_ram = mapped;
+
return mapped;
}
-void setup_initial_memory_limit(phys_addr_t first_memblock_base,
- phys_addr_t first_memblock_size)
+void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
+ phys_addr_t first_memblock_size)
{
/* We don't currently support the first MEMBLOCK not mapping 0
* physical on those processors
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 7414034df1c3..fb844d2f266e 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -8,7 +8,7 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
obj-y := fault.o mem.o pgtable.o mmap.o \
init_$(BITS).o pgtable_$(BITS).o \
- init-common.o
+ init-common.o mmu_context.o
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
tlb_nohash_low.o
obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o
@@ -22,8 +22,6 @@ ifeq ($(CONFIG_PPC_STD_MMU_64),y)
obj-$(CONFIG_PPC_4K_PAGES) += hash64_4k.o
obj-$(CONFIG_PPC_64K_PAGES) += hash64_64k.o
endif
-obj-$(CONFIG_PPC_ICSWX) += icswx.o
-obj-$(CONFIG_PPC_ICSWX_PID) += icswx_pid.o
obj-$(CONFIG_40x) += 40x_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_PPC_8xx) += 8xx_mmu.o
diff --git a/arch/powerpc/mm/dump_hashpagetable.c b/arch/powerpc/mm/dump_hashpagetable.c
index b1c144b03fcf..5c4c93dcff19 100644
--- a/arch/powerpc/mm/dump_hashpagetable.c
+++ b/arch/powerpc/mm/dump_hashpagetable.c
@@ -205,7 +205,7 @@ static void dump_hpte_info(struct pg_state *st, unsigned long ea, u64 v, u64 r,
aps_index = calculate_pagesize(st, aps, "actual");
if (aps_index != 2)
seq_printf(st->seq, "LP enc: %lx", lp);
- seq_puts(st->seq, "\n");
+ seq_putc(st->seq, '\n');
}
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
index 44fe4833910f..c9282d27b203 100644
--- a/arch/powerpc/mm/dump_linuxpagetables.c
+++ b/arch/powerpc/mm/dump_linuxpagetables.c
@@ -350,7 +350,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
st->current_flags,
pg_level[st->level].num);
- seq_puts(st->seq, "\n");
+ seq_putc(st->seq, '\n');
}
/*
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 4c422632047b..4797d08581ce 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -45,43 +45,39 @@
#include <asm/siginfo.h>
#include <asm/debug.h>
-#include "icswx.h"
-
-#ifdef CONFIG_KPROBES
-static inline int notify_page_fault(struct pt_regs *regs)
+static inline bool notify_page_fault(struct pt_regs *regs)
{
- int ret = 0;
+ bool ret = false;
+#ifdef CONFIG_KPROBES
/* kprobe_running() needs smp_processor_id() */
if (!user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 11))
- ret = 1;
+ ret = true;
preempt_enable();
}
+#endif /* CONFIG_KPROBES */
+
+ if (unlikely(debugger_fault_handler(regs)))
+ ret = true;
return ret;
}
-#else
-static inline int notify_page_fault(struct pt_regs *regs)
-{
- return 0;
-}
-#endif
/*
* Check whether the instruction at regs->nip is a store using
* an update addressing form which will update r1.
*/
-static int store_updates_sp(struct pt_regs *regs)
+static bool store_updates_sp(struct pt_regs *regs)
{
unsigned int inst;
if (get_user(inst, (unsigned int __user *)regs->nip))
- return 0;
+ return false;
/* check for 1 in the rA field */
if (((inst >> 16) & 0x1f) != 1)
- return 0;
+ return false;
/* check major opcode */
switch (inst >> 26) {
case 37: /* stwu */
@@ -89,7 +85,7 @@ static int store_updates_sp(struct pt_regs *regs)
case 45: /* sthu */
case 53: /* stfsu */
case 55: /* stfdu */
- return 1;
+ return true;
case 62: /* std or stdu */
return (inst & 3) == 1;
case 31:
@@ -101,18 +97,53 @@ static int store_updates_sp(struct pt_regs *regs)
case 439: /* sthux */
case 695: /* stfsux */
case 759: /* stfdux */
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
/*
* do_page_fault error handling helpers
*/
-#define MM_FAULT_RETURN 0
-#define MM_FAULT_CONTINUE -1
-#define MM_FAULT_ERR(sig) (sig)
+static int
+__bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code)
+{
+ /*
+ * If we are in kernel mode, bail out with a SEGV, this will
+ * be caught by the assembly which will restore the non-volatile
+ * registers before calling bad_page_fault()
+ */
+ if (!user_mode(regs))
+ return SIGSEGV;
+
+ _exception(SIGSEGV, regs, si_code, address);
+
+ return 0;
+}
+
+static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address)
+{
+ return __bad_area_nosemaphore(regs, address, SEGV_MAPERR);
+}
+
+static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
+{
+ struct mm_struct *mm = current->mm;
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+ up_read(&mm->mmap_sem);
+
+ return __bad_area_nosemaphore(regs, address, si_code);
+}
+
+static noinline int bad_area(struct pt_regs *regs, unsigned long address)
+{
+ return __bad_area(regs, address, SEGV_MAPERR);
+}
static int do_sigbus(struct pt_regs *regs, unsigned long address,
unsigned int fault)
@@ -121,7 +152,7 @@ static int do_sigbus(struct pt_regs *regs, unsigned long address,
unsigned int lsb = 0;
if (!user_mode(regs))
- return MM_FAULT_ERR(SIGBUS);
+ return SIGBUS;
current->thread.trap_nr = BUS_ADRERR;
info.si_signo = SIGBUS;
@@ -142,25 +173,17 @@ static int do_sigbus(struct pt_regs *regs, unsigned long address,
#endif
info.si_addr_lsb = lsb;
force_sig_info(SIGBUS, &info, current);
- return MM_FAULT_RETURN;
+ return 0;
}
static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
{
/*
- * Pagefault was interrupted by SIGKILL. We have no reason to
- * continue the pagefault.
+ * Kernel page fault interrupted by SIGKILL. We have no reason to
+ * continue processing.
*/
- if (fatal_signal_pending(current)) {
- /* Coming from kernel, we need to deal with uaccess fixups */
- if (user_mode(regs))
- return MM_FAULT_RETURN;
- return MM_FAULT_ERR(SIGKILL);
- }
-
- /* No fault: be happy */
- if (!(fault & VM_FAULT_ERROR))
- return MM_FAULT_CONTINUE;
+ if (fatal_signal_pending(current) && !user_mode(regs))
+ return SIGKILL;
/* Out of memory */
if (fault & VM_FAULT_OOM) {
@@ -169,19 +192,176 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
* made us unable to handle the page fault gracefully.
*/
if (!user_mode(regs))
- return MM_FAULT_ERR(SIGKILL);
+ return SIGSEGV;
pagefault_out_of_memory();
- return MM_FAULT_RETURN;
+ } else {
+ if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
+ VM_FAULT_HWPOISON_LARGE))
+ return do_sigbus(regs, addr, fault);
+ else if (fault & VM_FAULT_SIGSEGV)
+ return bad_area_nosemaphore(regs, addr);
+ else
+ BUG();
+ }
+ return 0;
+}
+
+/* Is this a bad kernel fault ? */
+static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
+ unsigned long address)
+{
+ if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT))) {
+ printk_ratelimited(KERN_CRIT "kernel tried to execute"
+ " exec-protected page (%lx) -"
+ "exploit attempt? (uid: %d)\n",
+ address, from_kuid(&init_user_ns,
+ current_uid()));
}
+ return is_exec || (address >= TASK_SIZE);
+}
- if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE))
- return do_sigbus(regs, addr, fault);
+static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
+ struct vm_area_struct *vma,
+ bool store_update_sp)
+{
+ /*
+ * N.B. The POWER/Open ABI allows programs to access up to
+ * 288 bytes below the stack pointer.
+ * The kernel signal delivery code writes up to about 1.5kB
+ * below the stack pointer (r1) before decrementing it.
+ * The exec code can write slightly over 640kB to the stack
+ * before setting the user r1. Thus we allow the stack to
+ * expand to 1MB without further checks.
+ */
+ if (address + 0x100000 < vma->vm_end) {
+ /* get user regs even if this fault is in kernel mode */
+ struct pt_regs *uregs = current->thread.regs;
+ if (uregs == NULL)
+ return true;
- /* We don't understand the fault code, this is fatal */
- BUG();
- return MM_FAULT_CONTINUE;
+ /*
+ * A user-mode access to an address a long way below
+ * the stack pointer is only valid if the instruction
+ * is one which would update the stack pointer to the
+ * address accessed if the instruction completed,
+ * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
+ * (or the byte, halfword, float or double forms).
+ *
+ * If we don't check this then any write to the area
+ * between the last mapped region and the stack will
+ * expand the stack rather than segfaulting.
+ */
+ if (address + 2048 < uregs->gpr[1] && !store_update_sp)
+ return true;
+ }
+ return false;
+}
+
+static bool access_error(bool is_write, bool is_exec,
+ struct vm_area_struct *vma)
+{
+ /*
+ * Allow execution from readable areas if the MMU does not
+ * provide separate controls over reading and executing.
+ *
+ * Note: That code used to not be enabled for 4xx/BookE.
+ * It is now as I/D cache coherency for these is done at
+ * set_pte_at() time and I see no reason why the test
+ * below wouldn't be valid on those processors. This -may-
+ * break programs compiled with a really old ABI though.
+ */
+ if (is_exec) {
+ return !(vma->vm_flags & VM_EXEC) &&
+ (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
+ !(vma->vm_flags & (VM_READ | VM_WRITE)));
+ }
+
+ if (is_write) {
+ if (unlikely(!(vma->vm_flags & VM_WRITE)))
+ return true;
+ return false;
+ }
+
+ if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
+ return true;
+
+ return false;
}
+#ifdef CONFIG_PPC_SMLPAR
+static inline void cmo_account_page_fault(void)
+{
+ if (firmware_has_feature(FW_FEATURE_CMO)) {
+ u32 page_ins;
+
+ preempt_disable();
+ page_ins = be32_to_cpu(get_lppaca()->page_ins);
+ page_ins += 1 << PAGE_FACTOR;
+ get_lppaca()->page_ins = cpu_to_be32(page_ins);
+ preempt_enable();
+ }
+}
+#else
+static inline void cmo_account_page_fault(void) { }
+#endif /* CONFIG_PPC_SMLPAR */
+
+#ifdef CONFIG_PPC_STD_MMU
+static void sanity_check_fault(bool is_write, unsigned long error_code)
+{
+ /*
+ * For hash translation mode, we should never get a
+ * PROTFAULT. Any update to pte to reduce access will result in us
+ * removing the hash page table entry, thus resulting in a DSISR_NOHPTE
+ * fault instead of DSISR_PROTFAULT.
+ *
+ * A pte update to relax the access will not result in a hash page table
+ * entry invalidate and hence can result in DSISR_PROTFAULT.
+ * ptep_set_access_flags() doesn't do a hpte flush. This is why we have
+ * the special !is_write in the below conditional.
+ *
+ * For platforms that doesn't supports coherent icache and do support
+ * per page noexec bit, we do setup things such that we do the
+ * sync between D/I cache via fault. But that is handled via low level
+ * hash fault code (hash_page_do_lazy_icache()) and we should not reach
+ * here in such case.
+ *
+ * For wrong access that can result in PROTFAULT, the above vma->vm_flags
+ * check should handle those and hence we should fall to the bad_area
+ * handling correctly.
+ *
+ * For embedded with per page exec support that doesn't support coherent
+ * icache we do get PROTFAULT and we handle that D/I cache sync in
+ * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON
+ * is conditional for server MMU.
+ *
+ * For radix, we can get prot fault for autonuma case, because radix
+ * page table will have them marked noaccess for user.
+ */
+ if (!radix_enabled() && !is_write)
+ WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
+}
+#else
+static void sanity_check_fault(bool is_write, unsigned long error_code) { }
+#endif /* CONFIG_PPC_STD_MMU */
+
+/*
+ * Define the correct "is_write" bit in error_code based
+ * on the processor family
+ */
+#if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
+#define page_fault_is_write(__err) ((__err) & ESR_DST)
+#define page_fault_is_bad(__err) (0)
+#else
+#define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE)
+#if defined(CONFIG_PPC_8xx)
+#define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G)
+#elif defined(CONFIG_PPC64)
+#define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_64S)
+#else
+#define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S)
+#endif
+#endif
+
/*
* For 600- and 800-family processors, the error_code parameter is DSISR
* for a data fault, SRR1 for an instruction fault. For 400-family processors
@@ -195,92 +375,56 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
-int do_page_fault(struct pt_regs *regs, unsigned long address,
- unsigned long error_code)
+static int __do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code)
{
- enum ctx_state prev_state = exception_enter();
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
- int code = SEGV_MAPERR;
- int is_write = 0;
- int trap = TRAP(regs);
- int is_exec = trap == 0x400;
+ int is_exec = TRAP(regs) == 0x400;
int is_user = user_mode(regs);
- int fault;
- int rc = 0, store_update_sp = 0;
+ int is_write = page_fault_is_write(error_code);
+ int fault, major = 0;
+ bool store_update_sp = false;
-#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
- /*
- * Fortunately the bit assignments in SRR1 for an instruction
- * fault and DSISR for a data fault are mostly the same for the
- * bits we are interested in. But there are some bits which
- * indicate errors in DSISR but can validly be set in SRR1.
- */
- if (is_exec)
- error_code &= 0x48200000;
- else
- is_write = error_code & DSISR_ISSTORE;
-#else
- is_write = error_code & ESR_DST;
-#endif /* CONFIG_4xx || CONFIG_BOOKE */
+ if (notify_page_fault(regs))
+ return 0;
-#ifdef CONFIG_PPC_ICSWX
- /*
- * we need to do this early because this "data storage
- * interrupt" does not update the DAR/DEAR so we don't want to
- * look at it
- */
- if (error_code & ICSWX_DSI_UCT) {
- rc = acop_handle_fault(regs, address, error_code);
- if (rc)
- goto bail;
+ if (unlikely(page_fault_is_bad(error_code))) {
+ if (is_user) {
+ _exception(SIGBUS, regs, BUS_OBJERR, address);
+ return 0;
+ }
+ return SIGBUS;
}
-#endif /* CONFIG_PPC_ICSWX */
-
- if (notify_page_fault(regs))
- goto bail;
- if (unlikely(debugger_fault_handler(regs)))
- goto bail;
+ /* Additional sanity check(s) */
+ sanity_check_fault(is_write, error_code);
/*
* The kernel should never take an execute fault nor should it
* take a page fault to a kernel address.
*/
- if (!is_user && (is_exec || (address >= TASK_SIZE))) {
- rc = SIGSEGV;
- goto bail;
- }
+ if (unlikely(!is_user && bad_kernel_fault(is_exec, error_code, address)))
+ return SIGSEGV;
-#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
- defined(CONFIG_PPC_BOOK3S_64) || defined(CONFIG_PPC_8xx))
- if (error_code & DSISR_DABRMATCH) {
- /* breakpoint match */
- do_break(regs, address, error_code);
- goto bail;
+ /*
+ * If we're in an interrupt, have no user context or are running
+ * in a region with pagefaults disabled then we must not take the fault
+ */
+ if (unlikely(faulthandler_disabled() || !mm)) {
+ if (is_user)
+ printk_ratelimited(KERN_ERR "Page fault in user mode"
+ " with faulthandler_disabled()=%d"
+ " mm=%p\n",
+ faulthandler_disabled(), mm);
+ return bad_area_nosemaphore(regs, address);
}
-#endif
/* We restore the interrupt state now */
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
- if (faulthandler_disabled() || mm == NULL) {
- if (!is_user) {
- rc = SIGSEGV;
- goto bail;
- }
- /* faulthandler_disabled() in user mode is really bad,
- as is current->mm == NULL. */
- printk(KERN_EMERG "Page fault in user mode with "
- "faulthandler_disabled() = %d mm = %p\n",
- faulthandler_disabled(), mm);
- printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
- regs->nip, regs->msr);
- die("Weird page fault", regs, SIGSEGV);
- }
-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/*
@@ -293,6 +437,10 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
if (is_user)
flags |= FAULT_FLAG_USER;
+ if (is_write)
+ flags |= FAULT_FLAG_WRITE;
+ if (is_exec)
+ flags |= FAULT_FLAG_INSTRUCTION;
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
@@ -309,9 +457,9 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
if (!is_user && !search_exception_tables(regs->nip))
- goto bad_area_nosemaphore;
+ return bad_area_nosemaphore(regs, address);
retry:
down_read(&mm->mmap_sem);
@@ -325,122 +473,24 @@ retry:
}
vma = find_vma(mm, address);
- if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
+ if (unlikely(!vma))
+ return bad_area(regs, address);
+ if (likely(vma->vm_start <= address))
goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
+ if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
+ return bad_area(regs, address);
- /*
- * N.B. The POWER/Open ABI allows programs to access up to
- * 288 bytes below the stack pointer.
- * The kernel signal delivery code writes up to about 1.5kB
- * below the stack pointer (r1) before decrementing it.
- * The exec code can write slightly over 640kB to the stack
- * before setting the user r1. Thus we allow the stack to
- * expand to 1MB without further checks.
- */
- if (address + 0x100000 < vma->vm_end) {
- /* get user regs even if this fault is in kernel mode */
- struct pt_regs *uregs = current->thread.regs;
- if (uregs == NULL)
- goto bad_area;
+ /* The stack is being expanded, check if it's valid */
+ if (unlikely(bad_stack_expansion(regs, address, vma, store_update_sp)))
+ return bad_area(regs, address);
- /*
- * A user-mode access to an address a long way below
- * the stack pointer is only valid if the instruction
- * is one which would update the stack pointer to the
- * address accessed if the instruction completed,
- * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
- * (or the byte, halfword, float or double forms).
- *
- * If we don't check this then any write to the area
- * between the last mapped region and the stack will
- * expand the stack rather than segfaulting.
- */
- if (address + 2048 < uregs->gpr[1] && !store_update_sp)
- goto bad_area;
- }
- if (expand_stack(vma, address))
- goto bad_area;
+ /* Try to expand it */
+ if (unlikely(expand_stack(vma, address)))
+ return bad_area(regs, address);
good_area:
- code = SEGV_ACCERR;
-#if defined(CONFIG_6xx)
- if (error_code & 0x95700000)
- /* an error such as lwarx to I/O controller space,
- address matching DABR, eciwx, etc. */
- goto bad_area;
-#endif /* CONFIG_6xx */
-#if defined(CONFIG_8xx)
- /* The MPC8xx seems to always set 0x80000000, which is
- * "undefined". Of those that can be set, this is the only
- * one which seems bad.
- */
- if (error_code & 0x10000000)
- /* Guarded storage error. */
- goto bad_area;
-#endif /* CONFIG_8xx */
-
- if (is_exec) {
- /*
- * Allow execution from readable areas if the MMU does not
- * provide separate controls over reading and executing.
- *
- * Note: That code used to not be enabled for 4xx/BookE.
- * It is now as I/D cache coherency for these is done at
- * set_pte_at() time and I see no reason why the test
- * below wouldn't be valid on those processors. This -may-
- * break programs compiled with a really old ABI though.
- */
- if (!(vma->vm_flags & VM_EXEC) &&
- (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
- !(vma->vm_flags & (VM_READ | VM_WRITE))))
- goto bad_area;
- /* a write */
- } else if (is_write) {
- if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
- flags |= FAULT_FLAG_WRITE;
- /* a read */
- } else {
- if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
- goto bad_area;
- }
-#ifdef CONFIG_PPC_STD_MMU
- /*
- * For hash translation mode, we should never get a
- * PROTFAULT. Any update to pte to reduce access will result in us
- * removing the hash page table entry, thus resulting in a DSISR_NOHPTE
- * fault instead of DSISR_PROTFAULT.
- *
- * A pte update to relax the access will not result in a hash page table
- * entry invalidate and hence can result in DSISR_PROTFAULT.
- * ptep_set_access_flags() doesn't do a hpte flush. This is why we have
- * the special !is_write in the below conditional.
- *
- * For platforms that doesn't supports coherent icache and do support
- * per page noexec bit, we do setup things such that we do the
- * sync between D/I cache via fault. But that is handled via low level
- * hash fault code (hash_page_do_lazy_icache()) and we should not reach
- * here in such case.
- *
- * For wrong access that can result in PROTFAULT, the above vma->vm_flags
- * check should handle those and hence we should fall to the bad_area
- * handling correctly.
- *
- * For embedded with per page exec support that doesn't support coherent
- * icache we do get PROTFAULT and we handle that D/I cache sync in
- * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON
- * is conditional for server MMU.
- *
- * For radix, we can get prot fault for autonuma case, because radix
- * page table will have them marked noaccess for user.
- */
- if (!radix_enabled() && !is_write)
- WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
-#endif /* CONFIG_PPC_STD_MMU */
+ if (unlikely(access_error(is_write, is_exec, vma)))
+ return bad_area(regs, address);
/*
* If for any reason at all we couldn't handle the fault,
@@ -448,6 +498,7 @@ good_area:
* the fault.
*/
fault = handle_mm_fault(vma, address, flags);
+ major |= fault & VM_FAULT_MAJOR;
/*
* Handle the retry right now, the mmap_sem has been released in that
@@ -465,64 +516,39 @@ good_area:
if (!fatal_signal_pending(current))
goto retry;
}
- /* We will enter mm_fault_error() below */
- } else
- up_read(&current->mm->mmap_sem);
-
- if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
- if (fault & VM_FAULT_SIGSEGV)
- goto bad_area_nosemaphore;
- rc = mm_fault_error(regs, address, fault);
- if (rc >= MM_FAULT_RETURN)
- goto bail;
- else
- rc = 0;
+
+ /*
+ * User mode? Just return to handle the fatal exception otherwise
+ * return to bad_page_fault
+ */
+ return is_user ? 0 : SIGBUS;
}
+ up_read(&current->mm->mmap_sem);
+
+ if (unlikely(fault & VM_FAULT_ERROR))
+ return mm_fault_error(regs, address, fault);
+
/*
* Major/minor page fault accounting.
*/
- if (fault & VM_FAULT_MAJOR) {
+ if (major) {
current->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
- regs, address);
-#ifdef CONFIG_PPC_SMLPAR
- if (firmware_has_feature(FW_FEATURE_CMO)) {
- u32 page_ins;
-
- preempt_disable();
- page_ins = be32_to_cpu(get_lppaca()->page_ins);
- page_ins += 1 << PAGE_FACTOR;
- get_lppaca()->page_ins = cpu_to_be32(page_ins);
- preempt_enable();
- }
-#endif /* CONFIG_PPC_SMLPAR */
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
+ cmo_account_page_fault();
} else {
current->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
- regs, address);
- }
-
- goto bail;
-
-bad_area:
- up_read(&mm->mmap_sem);
-
-bad_area_nosemaphore:
- /* User mode accesses cause a SIGSEGV */
- if (is_user) {
- _exception(SIGSEGV, regs, code, address);
- goto bail;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
}
+ return 0;
+}
+NOKPROBE_SYMBOL(__do_page_fault);
- if (is_exec && (error_code & DSISR_PROTFAULT))
- printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
- " page (%lx) - exploit attempt? (uid: %d)\n",
- address, from_kuid(&init_user_ns, current_uid()));
-
- rc = SIGSEGV;
-
-bail:
+int do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code)
+{
+ enum ctx_state prev_state = exception_enter();
+ int rc = __do_page_fault(regs, address, error_code);
exception_exit(prev_state);
return rc;
}
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 6f962e5cb5e1..ffbd7c0bda96 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -575,7 +575,6 @@ _GLOBAL(flush_hash_pages)
rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
stwcx. r8,0,r5 /* update the pte */
bne- 33b
-EXPORT_SYMBOL(flush_hash_pages)
/* Get the address of the primary PTE group in the hash table (r3) */
_GLOBAL(flush_hash_patch_A)
@@ -634,6 +633,7 @@ _GLOBAL(flush_hash_patch_B)
SYNC_601
isync
blr
+EXPORT_SYMBOL(flush_hash_pages)
/*
* Flush an entry from the TLB
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 7a20669c19e7..67ec2e927253 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -61,6 +61,7 @@
#include <asm/tm.h>
#include <asm/trace.h>
#include <asm/ps3.h>
+#include <asm/pte-walk.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -507,9 +508,9 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
printk(KERN_INFO "Huge page(16GB) memory: "
"addr = 0x%lX size = 0x%lX pages = %d\n",
phys_addr, block_size, expected_pages);
- if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) {
+ if (phys_addr + block_size * expected_pages <= memblock_end_of_DRAM()) {
memblock_reserve(phys_addr, block_size * expected_pages);
- add_gpage(phys_addr, block_size, expected_pages);
+ pseries_add_gpage(phys_addr, block_size, expected_pages);
}
return 0;
}
@@ -1019,6 +1020,7 @@ void __init hash__early_init_mmu(void)
__kernel_virt_size = H_KERN_VIRT_SIZE;
__vmalloc_start = H_VMALLOC_START;
__vmalloc_end = H_VMALLOC_END;
+ __kernel_io_start = H_KERN_IO_START;
vmemmap = (struct page *)H_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE;
@@ -1228,7 +1230,6 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
unsigned long vsid;
pte_t *ptep;
unsigned hugeshift;
- const struct cpumask *tmp;
int rc, user_region = 0;
int psize, ssize;
@@ -1280,8 +1281,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
}
/* Check CPU locality */
- tmp = cpumask_of(smp_processor_id());
- if (user_region && cpumask_equal(mm_cpumask(mm), tmp))
+ if (user_region && mm_is_thread_local(mm))
flags |= HPTE_LOCAL_UPDATE;
#ifndef CONFIG_PPC_64K_PAGES
@@ -1297,7 +1297,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
#endif /* CONFIG_PPC_64K_PAGES */
/* Get PTE and page size from page tables */
- ptep = __find_linux_pte_or_hugepte(pgdir, ea, &is_thp, &hugeshift);
+ ptep = find_linux_pte(pgdir, ea, &is_thp, &hugeshift);
if (ptep == NULL || !pte_present(*ptep)) {
DBG_LOW(" no PTE !\n");
rc = 1;
@@ -1526,7 +1526,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
* THP pages use update_mmu_cache_pmd. We don't do
* hash preload there. Hence can ignore THP here
*/
- ptep = find_linux_pte_or_hugepte(pgdir, ea, NULL, &hugepage_shift);
+ ptep = find_current_mm_pte(pgdir, ea, NULL, &hugepage_shift);
if (!ptep)
goto out_exit;
@@ -1543,7 +1543,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
#endif /* CONFIG_PPC_64K_PAGES */
/* Is that local to this CPU ? */
- if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
+ if (mm_is_thread_local(mm))
update_flags |= HPTE_LOCAL_UPDATE;
/* Hash it in */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index e1bf5ca397fe..1571a498a33f 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -24,6 +24,8 @@
#include <asm/tlb.h>
#include <asm/setup.h>
#include <asm/hugetlb.h>
+#include <asm/pte-walk.h>
+
#ifdef CONFIG_HUGETLB_PAGE
@@ -36,32 +38,15 @@
unsigned int HPAGE_SHIFT;
EXPORT_SYMBOL(HPAGE_SHIFT);
-/*
- * Tracks gpages after the device tree is scanned and before the
- * huge_boot_pages list is ready. On non-Freescale implementations, this is
- * just used to track 16G pages and so is a single array. FSL-based
- * implementations may have more than one gpage size, so we need multiple
- * arrays
- */
-#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
-#define MAX_NUMBER_GPAGES 128
-struct psize_gpages {
- u64 gpage_list[MAX_NUMBER_GPAGES];
- unsigned int nr_gpages;
-};
-static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
-#else
-#define MAX_NUMBER_GPAGES 1024
-static u64 gpage_freearray[MAX_NUMBER_GPAGES];
-static unsigned nr_gpages;
-#endif
-
#define hugepd_none(hpd) (hpd_val(hpd) == 0)
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
- /* Only called for hugetlbfs pages, hence can ignore THP */
- return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL, NULL);
+ /*
+ * Only called for hugetlbfs pages, hence can ignore THP and the
+ * irq disabled walk.
+ */
+ return __find_linux_pte(mm->pgd, addr, NULL, NULL);
}
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
@@ -210,145 +195,20 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
return hugepte_offset(*hpdp, addr, pdshift);
}
-#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
-/* Build list of addresses of gigantic pages. This function is used in early
- * boot before the buddy allocator is setup.
- */
-void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
-{
- unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
- int i;
-
- if (addr == 0)
- return;
-
- gpage_freearray[idx].nr_gpages = number_of_pages;
-
- for (i = 0; i < number_of_pages; i++) {
- gpage_freearray[idx].gpage_list[i] = addr;
- addr += page_size;
- }
-}
-
-/*
- * Moves the gigantic page addresses from the temporary list to the
- * huge_boot_pages list.
- */
-int alloc_bootmem_huge_page(struct hstate *hstate)
-{
- struct huge_bootmem_page *m;
- int idx = shift_to_mmu_psize(huge_page_shift(hstate));
- int nr_gpages = gpage_freearray[idx].nr_gpages;
-
- if (nr_gpages == 0)
- return 0;
-
-#ifdef CONFIG_HIGHMEM
- /*
- * If gpages can be in highmem we can't use the trick of storing the
- * data structure in the page; allocate space for this
- */
- m = memblock_virt_alloc(sizeof(struct huge_bootmem_page), 0);
- m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
-#else
- m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
-#endif
-
- list_add(&m->list, &huge_boot_pages);
- gpage_freearray[idx].nr_gpages = nr_gpages;
- gpage_freearray[idx].gpage_list[nr_gpages] = 0;
- m->hstate = hstate;
-
- return 1;
-}
+#ifdef CONFIG_PPC_BOOK3S_64
/*
- * Scan the command line hugepagesz= options for gigantic pages; store those in
- * a list that we use to allocate the memory once all options are parsed.
+ * Tracks gpages after the device tree is scanned and before the
+ * huge_boot_pages list is ready on pseries.
*/
-
-unsigned long gpage_npages[MMU_PAGE_COUNT];
-
-static int __init do_gpage_early_setup(char *param, char *val,
- const char *unused, void *arg)
-{
- static phys_addr_t size;
- unsigned long npages;
-
- /*
- * The hugepagesz and hugepages cmdline options are interleaved. We
- * use the size variable to keep track of whether or not this was done
- * properly and skip over instances where it is incorrect. Other
- * command-line parsing code will issue warnings, so we don't need to.
- *
- */
- if ((strcmp(param, "default_hugepagesz") == 0) ||
- (strcmp(param, "hugepagesz") == 0)) {
- size = memparse(val, NULL);
- } else if (strcmp(param, "hugepages") == 0) {
- if (size != 0) {
- if (sscanf(val, "%lu", &npages) <= 0)
- npages = 0;
- if (npages > MAX_NUMBER_GPAGES) {
- pr_warn("MMU: %lu pages requested for page "
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
- "size %llu KB, limiting to "
-#else
- "size %u KB, limiting to "
-#endif
- __stringify(MAX_NUMBER_GPAGES) "\n",
- npages, size / 1024);
- npages = MAX_NUMBER_GPAGES;
- }
- gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
- size = 0;
- }
- }
- return 0;
-}
-
+#define MAX_NUMBER_GPAGES 1024
+__initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
+__initdata static unsigned nr_gpages;
/*
- * This function allocates physical space for pages that are larger than the
- * buddy allocator can handle. We want to allocate these in highmem because
- * the amount of lowmem is limited. This means that this function MUST be
- * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
- * allocate to grab highmem.
- */
-void __init reserve_hugetlb_gpages(void)
-{
- static __initdata char cmdline[COMMAND_LINE_SIZE];
- phys_addr_t size, base;
- int i;
-
- strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
- parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0,
- NULL, &do_gpage_early_setup);
-
- /*
- * Walk gpage list in reverse, allocating larger page sizes first.
- * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
- * When we reach the point in the list where pages are no longer
- * considered gpages, we're done.
- */
- for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
- if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
- continue;
- else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
- break;
-
- size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
- base = memblock_alloc_base(size * gpage_npages[i], size,
- MEMBLOCK_ALLOC_ANYWHERE);
- add_gpage(base, size, gpage_npages[i]);
- }
-}
-
-#else /* !PPC_FSL_BOOK3E */
-
-/* Build list of addresses of gigantic pages. This function is used in early
+ * Build list of addresses of gigantic pages. This function is used in early
* boot before the buddy allocator is setup.
*/
-void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
+void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
{
if (!addr)
return;
@@ -360,10 +220,7 @@ void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
}
}
-/* Moves the gigantic page addresses from the temporary list to the
- * huge_boot_pages list.
- */
-int alloc_bootmem_huge_page(struct hstate *hstate)
+int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
{
struct huge_bootmem_page *m;
if (nr_gpages == 0)
@@ -376,6 +233,17 @@ int alloc_bootmem_huge_page(struct hstate *hstate)
}
#endif
+
+int __init alloc_bootmem_huge_page(struct hstate *h)
+{
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
+ return pseries_alloc_bootmem_huge_page(h);
+#endif
+ return __alloc_bootmem_huge_page(h);
+}
+
#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
#define HUGEPD_FREELIST_SIZE \
((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
@@ -407,8 +275,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
batchp = &get_cpu_var(hugepd_freelist_cur);
if (atomic_read(&tlb->mm->mm_users) < 2 ||
- cpumask_equal(mm_cpumask(tlb->mm),
- cpumask_of(smp_processor_id()))) {
+ mm_is_thread_local(tlb->mm)) {
kmem_cache_free(hugepte_cache, hugepte);
put_cpu_var(hugepd_freelist_cur);
return;
@@ -886,9 +753,8 @@ void flush_dcache_icache_hugepage(struct page *page)
* This function need to be called with interrupts disabled. We use this variant
* when we have MSR[EE] = 0 but the paca->soft_enabled = 1
*/
-
-pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
- bool *is_thp, unsigned *shift)
+pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
+ bool *is_thp, unsigned *hpage_shift)
{
pgd_t pgd, *pgdp;
pud_t pud, *pudp;
@@ -897,8 +763,8 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
hugepd_t *hpdp = NULL;
unsigned pdshift = PGDIR_SHIFT;
- if (shift)
- *shift = 0;
+ if (hpage_shift)
+ *hpage_shift = 0;
if (is_thp)
*is_thp = false;
@@ -968,16 +834,15 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
ret_pte = hugepte_offset(*hpdp, ea, pdshift);
pdshift = hugepd_shift(*hpdp);
out:
- if (shift)
- *shift = pdshift;
+ if (hpage_shift)
+ *hpage_shift = pdshift;
return ret_pte;
}
-EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte);
+EXPORT_SYMBOL_GPL(__find_linux_pte);
int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
- unsigned long mask;
unsigned long pte_end;
struct page *head, *page;
pte_t pte;
@@ -988,18 +853,10 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
end = pte_end;
pte = READ_ONCE(*ptep);
- mask = _PAGE_PRESENT | _PAGE_READ;
- /*
- * On some CPUs like the 8xx, _PAGE_RW hence _PAGE_WRITE is defined
- * as 0 and _PAGE_RO has to be set when a page is not writable
- */
- if (write)
- mask |= _PAGE_WRITE;
- else
- mask |= _PAGE_RO;
-
- if ((pte_val(pte) & mask) != mask)
+ if (!pte_present(pte) || !pte_read(pte))
+ return 0;
+ if (write && !pte_write(pte))
return 0;
/* hugepages are never "special" */
diff --git a/arch/powerpc/mm/icswx.c b/arch/powerpc/mm/icswx.c
deleted file mode 100644
index 1fa794d7d59f..000000000000
--- a/arch/powerpc/mm/icswx.c
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- * ICSWX and ACOP Management
- *
- * Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/uaccess.h>
-
-#include "icswx.h"
-
-/*
- * The processor and its L2 cache cause the icswx instruction to
- * generate a COP_REQ transaction on PowerBus. The transaction has no
- * address, and the processor does not perform an MMU access to
- * authenticate the transaction. The command portion of the PowerBus
- * COP_REQ transaction includes the LPAR_ID (LPID) and the coprocessor
- * Process ID (PID), which the coprocessor compares to the authorized
- * LPID and PID held in the coprocessor, to determine if the process
- * is authorized to generate the transaction. The data of the COP_REQ
- * transaction is 128-byte or less in size and is placed in cacheable
- * memory on a 128-byte cache line boundary.
- *
- * The task to use a coprocessor should use use_cop() to mark the use
- * of the Coprocessor Type (CT) and context switching. On a server
- * class processor, the PID register is used only for coprocessor
- * management + * and so a coprocessor PID is allocated before
- * executing icswx + * instruction. Drop_cop() is used to free the
- * coprocessor PID.
- *
- * Example:
- * Host Fabric Interface (HFI) is a PowerPC network coprocessor.
- * Each HFI have multiple windows. Each HFI window serves as a
- * network device sending to and receiving from HFI network.
- * HFI immediate send function uses icswx instruction. The immediate
- * send function allows small (single cache-line) packets be sent
- * without using the regular HFI send FIFO and doorbell, which are
- * much slower than immediate send.
- *
- * For each task intending to use HFI immediate send, the HFI driver
- * calls use_cop() to obtain a coprocessor PID for the task.
- * The HFI driver then allocate a free HFI window and save the
- * coprocessor PID to the HFI window to allow the task to use the
- * HFI window.
- *
- * The HFI driver repeatedly creates immediate send packets and
- * issues icswx instruction to send data through the HFI window.
- * The HFI compares the coprocessor PID in the CPU PID register
- * to the PID held in the HFI window to determine if the transaction
- * is allowed.
- *
- * When the task to release the HFI window, the HFI driver calls
- * drop_cop() to release the coprocessor PID.
- */
-
-void switch_cop(struct mm_struct *next)
-{
-#ifdef CONFIG_PPC_ICSWX_PID
- mtspr(SPRN_PID, next->context.cop_pid);
-#endif
- mtspr(SPRN_ACOP, next->context.acop);
-}
-
-/**
- * Start using a coprocessor.
- * @acop: mask of coprocessor to be used.
- * @mm: The mm the coprocessor to associate with. Most likely current mm.
- *
- * Return a positive PID if successful. Negative errno otherwise.
- * The returned PID will be fed to the coprocessor to determine if an
- * icswx transaction is authenticated.
- */
-int use_cop(unsigned long acop, struct mm_struct *mm)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_ICSWX))
- return -ENODEV;
-
- if (!mm || !acop)
- return -EINVAL;
-
- /* The page_table_lock ensures mm_users won't change under us */
- spin_lock(&mm->page_table_lock);
- spin_lock(mm->context.cop_lockp);
-
- ret = get_cop_pid(mm);
- if (ret < 0)
- goto out;
-
- /* update acop */
- mm->context.acop |= acop;
-
- sync_cop(mm);
-
- /*
- * If this is a threaded process then there might be other threads
- * running. We need to send an IPI to force them to pick up any
- * change in PID and ACOP.
- */
- if (atomic_read(&mm->mm_users) > 1)
- smp_call_function(sync_cop, mm, 1);
-
-out:
- spin_unlock(mm->context.cop_lockp);
- spin_unlock(&mm->page_table_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(use_cop);
-
-/**
- * Stop using a coprocessor.
- * @acop: mask of coprocessor to be stopped.
- * @mm: The mm the coprocessor associated with.
- */
-void drop_cop(unsigned long acop, struct mm_struct *mm)
-{
- int free_pid;
-
- if (!cpu_has_feature(CPU_FTR_ICSWX))
- return;
-
- if (WARN_ON_ONCE(!mm))
- return;
-
- /* The page_table_lock ensures mm_users won't change under us */
- spin_lock(&mm->page_table_lock);
- spin_lock(mm->context.cop_lockp);
-
- mm->context.acop &= ~acop;
-
- free_pid = disable_cop_pid(mm);
- sync_cop(mm);
-
- /*
- * If this is a threaded process then there might be other threads
- * running. We need to send an IPI to force them to pick up any
- * change in PID and ACOP.
- */
- if (atomic_read(&mm->mm_users) > 1)
- smp_call_function(sync_cop, mm, 1);
-
- if (free_pid != COP_PID_NONE)
- free_cop_pid(free_pid);
-
- spin_unlock(mm->context.cop_lockp);
- spin_unlock(&mm->page_table_lock);
-}
-EXPORT_SYMBOL_GPL(drop_cop);
-
-static int acop_use_cop(int ct)
-{
- /* There is no alternate policy, yet */
- return -1;
-}
-
-/*
- * Get the instruction word at the NIP
- */
-static u32 acop_get_inst(struct pt_regs *regs)
-{
- u32 inst;
- u32 __user *p;
-
- p = (u32 __user *)regs->nip;
- if (!access_ok(VERIFY_READ, p, sizeof(*p)))
- return 0;
-
- if (__get_user(inst, p))
- return 0;
-
- return inst;
-}
-
-/**
- * @regs: registers at time of interrupt
- * @address: storage address
- * @error_code: Fault code, usually the DSISR or ESR depending on
- * processor type
- *
- * Return 0 if we are able to resolve the data storage fault that
- * results from a CT miss in the ACOP register.
- */
-int acop_handle_fault(struct pt_regs *regs, unsigned long address,
- unsigned long error_code)
-{
- int ct;
- u32 inst = 0;
-
- if (!cpu_has_feature(CPU_FTR_ICSWX)) {
- pr_info("No coprocessors available");
- _exception(SIGILL, regs, ILL_ILLOPN, address);
- }
-
- if (!user_mode(regs)) {
- /* this could happen if the HV denies the
- * kernel access, for now we just die */
- die("ICSWX from kernel failed", regs, SIGSEGV);
- }
-
- /* Some implementations leave us a hint for the CT */
- ct = ICSWX_GET_CT_HINT(error_code);
- if (ct < 0) {
- /* we have to peek at the instruction word to figure out CT */
- u32 ccw;
- u32 rs;
-
- inst = acop_get_inst(regs);
- if (inst == 0)
- return -1;
-
- rs = (inst >> (31 - 10)) & 0x1f;
- ccw = regs->gpr[rs];
- ct = (ccw >> 16) & 0x3f;
- }
-
- /*
- * We could be here because another thread has enabled acop
- * but the ACOP register has yet to be updated.
- *
- * This should have been taken care of by the IPI to sync all
- * the threads (see smp_call_function(sync_cop, mm, 1)), but
- * that could take forever if there are a significant amount
- * of threads.
- *
- * Given the number of threads on some of these systems,
- * perhaps this is the best way to sync ACOP rather than whack
- * every thread with an IPI.
- */
- if ((acop_copro_type_bit(ct) & current->active_mm->context.acop) != 0) {
- sync_cop(current->active_mm);
- return 0;
- }
-
- /* check for alternate policy */
- if (!acop_use_cop(ct))
- return 0;
-
- /* at this point the CT is unknown to the system */
- pr_warn("%s[%d]: Coprocessor %d is unavailable\n",
- current->comm, current->pid, ct);
-
- /* get inst if we don't already have it */
- if (inst == 0) {
- inst = acop_get_inst(regs);
- if (inst == 0)
- return -1;
- }
-
- /* Check if the instruction is the "record form" */
- if (inst & 1) {
- /*
- * the instruction is "record" form so we can reject
- * using CR0
- */
- regs->ccr &= ~(0xful << 28);
- regs->ccr |= ICSWX_RC_NOT_FOUND << 28;
-
- /* Move on to the next instruction */
- regs->nip += 4;
- } else {
- /*
- * There is no architected mechanism to report a bad
- * CT so we could either SIGILL or report nothing.
- * Since the non-record version should only bu used
- * for "hints" or "don't care" we should probably do
- * nothing. However, I could see how some people
- * might want an SIGILL so it here if you want it.
- */
-#ifdef CONFIG_PPC_ICSWX_USE_SIGILL
- _exception(SIGILL, regs, ILL_ILLOPN, address);
-#else
- regs->nip += 4;
-#endif
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(acop_handle_fault);
diff --git a/arch/powerpc/mm/icswx.h b/arch/powerpc/mm/icswx.h
deleted file mode 100644
index 6dedc08e62c8..000000000000
--- a/arch/powerpc/mm/icswx.h
+++ /dev/null
@@ -1,68 +0,0 @@
-#ifndef _ARCH_POWERPC_MM_ICSWX_H_
-#define _ARCH_POWERPC_MM_ICSWX_H_
-
-/*
- * ICSWX and ACOP Management
- *
- * Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <asm/mmu_context.h>
-
-/* also used to denote that PIDs are not used */
-#define COP_PID_NONE 0
-
-static inline void sync_cop(void *arg)
-{
- struct mm_struct *mm = arg;
-
- if (mm == current->active_mm)
- switch_cop(current->active_mm);
-}
-
-#ifdef CONFIG_PPC_ICSWX_PID
-extern int get_cop_pid(struct mm_struct *mm);
-extern int disable_cop_pid(struct mm_struct *mm);
-extern void free_cop_pid(int free_pid);
-#else
-#define get_cop_pid(m) (COP_PID_NONE)
-#define disable_cop_pid(m) (COP_PID_NONE)
-#define free_cop_pid(p)
-#endif
-
-/*
- * These are implementation bits for architected registers. If this
- * ever becomes architecture the should be moved to reg.h et. al.
- */
-/* UCT is the same bit for Server and Embedded */
-#define ICSWX_DSI_UCT 0x00004000 /* Unavailable Coprocessor Type */
-
-#ifdef CONFIG_PPC_BOOK3E
-/* Embedded implementation gives us no hints as to what the CT is */
-#define ICSWX_GET_CT_HINT(x) (-1)
-#else
-/* Server implementation contains the CT value in the DSISR */
-#define ICSWX_DSISR_CTMASK 0x00003f00
-#define ICSWX_GET_CT_HINT(x) (((x) & ICSWX_DSISR_CTMASK) >> 8)
-#endif
-
-#define ICSWX_RC_STARTED 0x8 /* The request has been started */
-#define ICSWX_RC_NOT_IDLE 0x4 /* No coprocessor found idle */
-#define ICSWX_RC_NOT_FOUND 0x2 /* No coprocessor found */
-#define ICSWX_RC_UNDEFINED 0x1 /* Reserved */
-
-extern int acop_handle_fault(struct pt_regs *regs, unsigned long address,
- unsigned long error_code);
-
-static inline u64 acop_copro_type_bit(unsigned int type)
-{
- return 1ULL << (63 - type);
-}
-
-#endif /* !_ARCH_POWERPC_MM_ICSWX_H_ */
diff --git a/arch/powerpc/mm/icswx_pid.c b/arch/powerpc/mm/icswx_pid.c
deleted file mode 100644
index 91e30eb7d054..000000000000
--- a/arch/powerpc/mm/icswx_pid.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * ICSWX and ACOP/PID Management
- *
- * Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/spinlock.h>
-#include <linux/idr.h>
-#include <linux/module.h>
-#include "icswx.h"
-
-#define COP_PID_MIN (COP_PID_NONE + 1)
-#define COP_PID_MAX (0xFFFF)
-
-static DEFINE_SPINLOCK(mmu_context_acop_lock);
-static DEFINE_IDA(cop_ida);
-
-static int new_cop_pid(struct ida *ida, int min_id, int max_id,
- spinlock_t *lock)
-{
- int index;
- int err;
-
-again:
- if (!ida_pre_get(ida, GFP_KERNEL))
- return -ENOMEM;
-
- spin_lock(lock);
- err = ida_get_new_above(ida, min_id, &index);
- spin_unlock(lock);
-
- if (err == -EAGAIN)
- goto again;
- else if (err)
- return err;
-
- if (index > max_id) {
- spin_lock(lock);
- ida_remove(ida, index);
- spin_unlock(lock);
- return -ENOMEM;
- }
-
- return index;
-}
-
-int get_cop_pid(struct mm_struct *mm)
-{
- int pid;
-
- if (mm->context.cop_pid == COP_PID_NONE) {
- pid = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX,
- &mmu_context_acop_lock);
- if (pid >= 0)
- mm->context.cop_pid = pid;
- }
- return mm->context.cop_pid;
-}
-
-int disable_cop_pid(struct mm_struct *mm)
-{
- int free_pid = COP_PID_NONE;
-
- if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) {
- free_pid = mm->context.cop_pid;
- mm->context.cop_pid = COP_PID_NONE;
- }
- return free_pid;
-}
-
-void free_cop_pid(int free_pid)
-{
- spin_lock(&mmu_context_acop_lock);
- ida_remove(&cop_ida, free_pid);
- spin_unlock(&mmu_context_acop_lock);
-}
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 8a7c38b8d335..6419b33ca309 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -113,6 +113,12 @@ void __init MMU_setup(void)
__map_without_bats = 1;
__map_without_ltlbs = 1;
}
+#ifdef CONFIG_STRICT_KERNEL_RWX
+ if (rodata_enabled) {
+ __map_without_bats = 1;
+ __map_without_ltlbs = 1;
+ }
+#endif
}
/*
@@ -132,8 +138,6 @@ void __init MMU_init(void)
* Reserve gigantic pages for hugetlb. This MUST occur before
* lowmem_end_addr is initialized below.
*/
- reserve_hugetlb_gpages();
-
if (memblock.memory.cnt > 1) {
#ifndef CONFIG_WII
memblock_enforce_memory_limit(memblock.memory.regions[0].size);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 5b4c25d12ff3..588a521966ec 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -356,7 +356,7 @@ struct page *realmode_pfn_to_page(unsigned long pfn)
}
EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
-#elif defined(CONFIG_FLATMEM)
+#else
struct page *realmode_pfn_to_page(unsigned long pfn)
{
@@ -365,7 +365,7 @@ struct page *realmode_pfn_to_page(unsigned long pfn)
}
EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
-#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
#ifdef CONFIG_PPC_STD_MMU_64
static bool disable_radix;
@@ -381,7 +381,7 @@ early_param("disable_radix", parse_disable_radix);
* /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
* radix. If not, we clear the radix feature bit so we fall back to hash.
*/
-static void early_check_vec5(void)
+static void __init early_check_vec5(void)
{
unsigned long root, chosen;
int size;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 8541f18694a4..4362b86ef84c 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -402,6 +402,7 @@ void __init mem_init(void)
void free_initmem(void)
{
ppc_md.progress = ppc_printk_progress;
+ mark_initmem_nx();
free_initmem_default(POISON_FREE_INITMEM);
}
@@ -435,7 +436,7 @@ void flush_dcache_icache_page(struct page *page)
return;
}
#endif
-#if defined(CONFIG_8xx) || defined(CONFIG_PPC64)
+#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
/* On 8xx there is no need to kmap since highmem is not supported */
__flush_dcache_icache(page_address(page));
#else
diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c
new file mode 100644
index 000000000000..0f613bc63c50
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context.c
@@ -0,0 +1,99 @@
+/*
+ * Common implementation of switch_mm_irqs_off
+ *
+ * Copyright IBM Corp. 2017
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/cpu.h>
+
+#include <asm/mmu_context.h>
+
+#if defined(CONFIG_PPC32)
+static inline void switch_mm_pgdir(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ /* 32-bit keeps track of the current PGDIR in the thread struct */
+ tsk->thread.pgdir = mm->pgd;
+}
+#elif defined(CONFIG_PPC_BOOK3E_64)
+static inline void switch_mm_pgdir(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ /* 64-bit Book3E keeps track of current PGD in the PACA */
+ get_paca()->pgd = mm->pgd;
+}
+#else
+static inline void switch_mm_pgdir(struct task_struct *tsk,
+ struct mm_struct *mm) { }
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline void inc_mm_active_cpus(struct mm_struct *mm)
+{
+ atomic_inc(&mm->context.active_cpus);
+}
+#else
+static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
+#endif
+
+void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ bool new_on_cpu = false;
+
+ /* Mark this context has been used on the new CPU */
+ if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
+ inc_mm_active_cpus(next);
+
+ /*
+ * This full barrier orders the store to the cpumask above vs
+ * a subsequent operation which allows this CPU to begin loading
+ * translations for next.
+ *
+ * When using the radix MMU that operation is the load of the
+ * MMU context id, which is then moved to SPRN_PID.
+ *
+ * For the hash MMU it is either the first load from slb_cache
+ * in switch_slb(), and/or the store of paca->mm_ctx_id in
+ * copy_mm_to_paca().
+ *
+ * On the read side the barrier is in pte_xchg(), which orders
+ * the store to the PTE vs the load of mm_cpumask.
+ */
+ smp_mb();
+
+ new_on_cpu = true;
+ }
+
+ /* Some subarchs need to track the PGD elsewhere */
+ switch_mm_pgdir(tsk, next);
+
+ /* Nothing else to do if we aren't actually switching */
+ if (prev == next)
+ return;
+
+ /*
+ * We must stop all altivec streams before changing the HW
+ * context
+ */
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ asm volatile ("dssall");
+
+ if (new_on_cpu)
+ radix_kvm_prefetch_workaround(next);
+
+ /*
+ * The actual HW switching method differs between the various
+ * sub architectures. Out of line for now
+ */
+ switch_mmu_context(prev, next, tsk);
+}
+
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index abed1fe6992f..05e15386d4cb 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -25,8 +25,6 @@
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
-#include "icswx.h"
-
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDA(mmu_context_ida);
@@ -126,9 +124,10 @@ static int hash__init_new_context(struct mm_struct *mm)
static int radix__init_new_context(struct mm_struct *mm)
{
unsigned long rts_field;
- int index;
+ int index, max_id;
- index = alloc_context_id(1, PRTB_ENTRIES - 1);
+ max_id = (1 << mmu_pid_bits) - 1;
+ index = alloc_context_id(mmu_base_pid, max_id);
if (index < 0)
return index;
@@ -164,16 +163,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return index;
mm->context.id = index;
-#ifdef CONFIG_PPC_ICSWX
- mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
- if (!mm->context.cop_lockp) {
- __destroy_context(index);
- subpage_prot_free(mm);
- mm->context.id = MMU_NO_CONTEXT;
- return -ENOMEM;
- }
- spin_lock_init(mm->context.cop_lockp);
-#endif /* CONFIG_PPC_ICSWX */
#ifdef CONFIG_PPC_64K_PAGES
mm->context.pte_frag = NULL;
@@ -181,6 +170,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
#ifdef CONFIG_SPAPR_TCE_IOMMU
mm_iommu_init(mm);
#endif
+ atomic_set(&mm->context.active_cpus, 0);
+
return 0;
}
@@ -225,12 +216,6 @@ void destroy_context(struct mm_struct *mm)
#ifdef CONFIG_SPAPR_TCE_IOMMU
WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
#endif
-#ifdef CONFIG_PPC_ICSWX
- drop_cop(mm->context.acop, mm);
- kfree(mm->context.cop_lockp);
- mm->context.cop_lockp = NULL;
-#endif /* CONFIG_PPC_ICSWX */
-
if (radix_enabled()) {
/*
* Radix doesn't have a valid bit in the process table
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index d46128b22150..57fbc554c785 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -27,7 +27,7 @@
/*
* On 40x and 8xx, we directly inline tlbia and tlbivax
*/
-#if defined(CONFIG_40x) || defined(CONFIG_8xx)
+#if defined(CONFIG_40x) || defined(CONFIG_PPC_8xx)
static inline void _tlbil_all(void)
{
asm volatile ("sync; tlbia; isync" : : : "memory");
@@ -38,7 +38,7 @@ static inline void _tlbil_pid(unsigned int pid)
}
#define _tlbil_pid_noind(pid) _tlbil_pid(pid)
-#else /* CONFIG_40x || CONFIG_8xx */
+#else /* CONFIG_40x || CONFIG_PPC_8xx */
extern void _tlbil_all(void);
extern void _tlbil_pid(unsigned int pid);
#ifdef CONFIG_PPC_BOOK3E
@@ -46,12 +46,12 @@ extern void _tlbil_pid_noind(unsigned int pid);
#else
#define _tlbil_pid_noind(pid) _tlbil_pid(pid)
#endif
-#endif /* !(CONFIG_40x || CONFIG_8xx) */
+#endif /* !(CONFIG_40x || CONFIG_PPC_8xx) */
/*
* On 8xx, we directly inline tlbie, on others, it's extern
*/
-#ifdef CONFIG_8xx
+#ifdef CONFIG_PPC_8xx
static inline void _tlbil_va(unsigned long address, unsigned int pid,
unsigned int tsize, unsigned int ind)
{
@@ -67,7 +67,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
{
__tlbil_va(address, pid);
}
-#endif /* CONFIG_8xx */
+#endif /* CONFIG_PPC_8xx */
#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x)
extern void _tlbivax_bcast(unsigned long address, unsigned int pid,
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 31eed8fa8e99..3b65917785a5 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/mm_types.h>
+#include <misc/cxl-base.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
@@ -64,6 +65,27 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
trace_hugepage_set_pmd(addr, pmd_val(pmd));
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
+
+static void do_nothing(void *unused)
+{
+
+}
+/*
+ * Serialize against find_current_mm_pte which does lock-less
+ * lookup in page tables with local interrupts disabled. For huge pages
+ * it casts pmd_t to pte_t. Since format of pte_t is different from
+ * pmd_t we want to prevent transit from pmd pointing to page table
+ * to pmd pointing to huge page (and back) while interrupts are disabled.
+ * We clear pmd to possibly replace it with page table pointer in
+ * different code paths. So make sure we wait for the parallel
+ * find_current_mm_pte to finish.
+ */
+void serialize_against_pte_lookup(struct mm_struct *mm)
+{
+ smp_mb();
+ smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
+}
+
/*
* We use this to invalidate a pmdp entry before switching from a
* hugepte to regular pmd entry.
@@ -77,7 +99,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
* This ensures that generic code that rely on IRQ disabling
* to prevent a parallel THP split work as expected.
*/
- kick_all_cpus_sync();
+ serialize_against_pte_lookup(vma->vm_mm);
}
static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c
index 188b4107584d..ec277913e01b 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/pgtable-hash64.c
@@ -239,7 +239,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres
* by sending an IPI to all the cpus and executing a dummy
* function there.
*/
- kick_all_cpus_sync();
+ serialize_against_pte_lookup(vma->vm_mm);
/*
* Now invalidate the hpte entries in the range
* covered by pmd. This make sure we take a
@@ -329,7 +329,6 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
unsigned int psize;
unsigned long vsid;
unsigned long flags = 0;
- const struct cpumask *tmp;
/* get the base page size,vsid and segment size */
#ifdef CONFIG_DEBUG_VM
@@ -350,8 +349,7 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
ssize = mmu_kernel_ssize;
}
- tmp = cpumask_of(smp_processor_id());
- if (cpumask_equal(mm_cpumask(mm), tmp))
+ if (mm_is_thread_local(mm))
flags |= HPTE_LOCAL_UPDATE;
return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
@@ -380,16 +378,16 @@ pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
*/
memset(pgtable, 0, PTE_FRAG_SIZE);
/*
- * Serialize against find_linux_pte_or_hugepte which does lock-less
+ * Serialize against find_current_mm_pte variants which does lock-less
* lookup in page tables with local interrupts disabled. For huge pages
* it casts pmd_t to pte_t. Since format of pte_t is different from
* pmd_t we want to prevent transit from pmd pointing to page table
* to pmd pointing to huge page (and back) while interrupts are disabled.
* We clear pmd to possibly replace it with page table pointer in
* different code paths. So make sure we wait for the parallel
- * find_linux_pte_or_hugepage to finish.
+ * find_curren_mm_pte to finish.
*/
- kick_all_cpus_sync();
+ serialize_against_pte_lookup(mm);
return old_pmd;
}
@@ -425,33 +423,51 @@ int hash__has_transparent_hugepage(void)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifdef CONFIG_STRICT_KERNEL_RWX
-void hash__mark_rodata_ro(void)
+static bool hash__change_memory_range(unsigned long start, unsigned long end,
+ unsigned long newpp)
{
- unsigned long start = (unsigned long)_stext;
- unsigned long end = (unsigned long)__init_begin;
unsigned long idx;
unsigned int step, shift;
- unsigned long newpp = PP_RXXX;
shift = mmu_psize_defs[mmu_linear_psize].shift;
step = 1 << shift;
- start = ((start + step - 1) >> shift) << shift;
- end = (end >> shift) << shift;
+ start = ALIGN_DOWN(start, step);
+ end = ALIGN(end, step); // aligns up
- pr_devel("marking ro start %lx, end %lx, step %x\n",
- start, end, step);
+ if (start >= end)
+ return false;
- if (start == end) {
- pr_warn("could not set rodata ro, relocate the start"
- " of the kernel to a 0x%x boundary\n", step);
- return;
- }
+ pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
+ start, end, newpp, step);
for (idx = start; idx < end; idx += step)
/* Not sure if we can do much with the return value */
mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
mmu_kernel_ssize);
+ return true;
+}
+
+void hash__mark_rodata_ro(void)
+{
+ unsigned long start, end;
+
+ start = (unsigned long)_stext;
+ end = (unsigned long)__init_begin;
+
+ WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
+}
+
+void hash__mark_initmem_nx(void)
+{
+ unsigned long start, end, pp;
+
+ start = (unsigned long)__init_begin;
+ end = (unsigned long)__init_end;
+
+ pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
+
+ WARN_ON(!hash__change_memory_range(start, end, pp));
}
#endif
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 8c13e4282308..39c252b54d16 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -8,10 +8,15 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+
+#define pr_fmt(fmt) "radix-mmu: " fmt
+
+#include <linux/kernel.h>
#include <linux/sched/mm.h>
#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/mm.h>
+#include <linux/string_helpers.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -25,12 +30,19 @@
#include <trace/events/thp.h>
+unsigned int mmu_pid_bits;
+unsigned int mmu_base_pid;
+
static int native_register_process_table(unsigned long base, unsigned long pg_sz,
unsigned long table_size)
{
- unsigned long patb1 = base | table_size | PATB_GR;
+ unsigned long patb0, patb1;
+
+ patb0 = be64_to_cpu(partition_tb[0].patb0);
+ patb1 = base | table_size | PATB_GR;
+
+ mmu_partition_table_set_entry(0, patb0, patb1);
- partition_tb->patb1 = cpu_to_be64(patb1);
return 0;
}
@@ -112,10 +124,9 @@ set_the_pte:
}
#ifdef CONFIG_STRICT_KERNEL_RWX
-void radix__mark_rodata_ro(void)
+void radix__change_memory_range(unsigned long start, unsigned long end,
+ unsigned long clear)
{
- unsigned long start = (unsigned long)_stext;
- unsigned long end = (unsigned long)__init_begin;
unsigned long idx;
pgd_t *pgdp;
pud_t *pudp;
@@ -125,7 +136,8 @@ void radix__mark_rodata_ro(void)
start = ALIGN_DOWN(start, PAGE_SIZE);
end = PAGE_ALIGN(end); // aligns up
- pr_devel("marking ro start %lx, end %lx\n", start, end);
+ pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
+ start, end, clear);
for (idx = start; idx < end; idx += PAGE_SIZE) {
pgdp = pgd_offset_k(idx);
@@ -147,21 +159,43 @@ void radix__mark_rodata_ro(void)
if (!ptep)
continue;
update_the_pte:
- radix__pte_update(&init_mm, idx, ptep, _PAGE_WRITE, 0, 0);
+ radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
}
radix__flush_tlb_kernel_range(start, end);
}
+
+void radix__mark_rodata_ro(void)
+{
+ unsigned long start, end;
+
+ start = (unsigned long)_stext;
+ end = (unsigned long)__init_begin;
+
+ radix__change_memory_range(start, end, _PAGE_WRITE);
+}
+
+void radix__mark_initmem_nx(void)
+{
+ unsigned long start = (unsigned long)__init_begin;
+ unsigned long end = (unsigned long)__init_end;
+
+ radix__change_memory_range(start, end, _PAGE_EXEC);
+}
#endif /* CONFIG_STRICT_KERNEL_RWX */
static inline void __meminit print_mapping(unsigned long start,
unsigned long end,
unsigned long size)
{
+ char buf[10];
+
if (end <= start)
return;
- pr_info("Mapped range 0x%lx - 0x%lx with 0x%lx\n", start, end, size);
+ string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
+
+ pr_info("Mapped 0x%016lx-0x%016lx with %s pages\n", start, end, buf);
}
static int __meminit create_physical_mapping(unsigned long start,
@@ -243,11 +277,34 @@ static void __init radix_init_pgtable(void)
for_each_memblock(memory, reg)
WARN_ON(create_physical_mapping(reg->base,
reg->base + reg->size));
+
+ /* Find out how many PID bits are supported */
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ if (!mmu_pid_bits)
+ mmu_pid_bits = 20;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /*
+ * When KVM is possible, we only use the top half of the
+ * PID space to avoid collisions between host and guest PIDs
+ * which can cause problems due to prefetch when exiting the
+ * guest with AIL=3
+ */
+ mmu_base_pid = 1 << (mmu_pid_bits - 1);
+#else
+ mmu_base_pid = 1;
+#endif
+ } else {
+ /* The guest uses the bottom half of the PID space */
+ if (!mmu_pid_bits)
+ mmu_pid_bits = 19;
+ mmu_base_pid = 1;
+ }
+
/*
* Allocate Partition table and process table for the
* host.
*/
- BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 36), "Process table size too large.");
+ BUG_ON(PRTB_SIZE_SHIFT > 36);
process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
/*
* Fill in the process table.
@@ -321,6 +378,12 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
if (type == NULL || strcmp(type, "cpu") != 0)
return 0;
+ /* Find MMU PID size */
+ prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
+ if (prop && size == 4)
+ mmu_pid_bits = be32_to_cpup(prop);
+
+ /* Grab page size encodings */
prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
if (!prop)
return 0;
@@ -476,6 +539,7 @@ void __init radix__early_init_mmu(void)
__kernel_virt_size = RADIX_KERN_VIRT_SIZE;
__vmalloc_start = RADIX_VMALLOC_START;
__vmalloc_end = RADIX_VMALLOC_END;
+ __kernel_io_start = RADIX_KERN_IO_START;
vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
ioremap_bot = IOREMAP_BASE;
@@ -786,9 +850,12 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
*/
pmd = *pmdp;
pmd_clear(pmdp);
+
/*FIXME!! Verify whether we need this kick below */
- kick_all_cpus_sync();
- flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ serialize_against_pte_lookup(vma->vm_mm);
+
+ radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
+
return pmd;
}
@@ -847,16 +914,16 @@ pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
old_pmd = __pmd(old);
/*
- * Serialize against find_linux_pte_or_hugepte which does lock-less
+ * Serialize against find_current_mm_pte which does lock-less
* lookup in page tables with local interrupts disabled. For huge pages
* it casts pmd_t to pte_t. Since format of pte_t is different from
* pmd_t we want to prevent transit from pmd pointing to page table
* to pmd pointing to huge page (and back) while interrupts are disabled.
* We clear pmd to possibly replace it with page table pointer in
* different code paths. So make sure we wait for the parallel
- * find_linux_pte_or_hugepage to finish.
+ * find_current_mm_pte to finish.
*/
- kick_all_cpus_sync();
+ serialize_against_pte_lookup(mm);
return old_pmd;
}
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index a9e4bfc025bc..65eda1997c3f 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -34,6 +34,7 @@
#include <asm/fixmap.h>
#include <asm/io.h>
#include <asm/setup.h>
+#include <asm/sections.h>
#include "mmu_decl.h"
@@ -242,7 +243,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
/*
* Map in a chunk of physical memory starting at start.
*/
-void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
+static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
{
unsigned long v, s, f;
phys_addr_t p;
@@ -294,7 +295,7 @@ void __init mapin_ram(void)
* Returns true (1) if PTE was found, zero otherwise. The pointer to
* the PTE pointer is unmodified if PTE is not found.
*/
-int
+static int
get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
{
pgd_t *pgd;
@@ -323,9 +324,7 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
return(retval);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
-
-static int __change_page_attr(struct page *page, pgprot_t prot)
+static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
{
pte_t *kpte;
pmd_t *kpmd;
@@ -339,8 +338,6 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
return -EINVAL;
__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
- wmb();
- flush_tlb_page(NULL, address);
pte_unmap(kpte);
return 0;
@@ -349,44 +346,65 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
/*
* Change the page attributes of an page in the linear mapping.
*
- * THIS CONFLICTS WITH BAT MAPPINGS, DEBUG USE ONLY
+ * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
*/
static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
{
int i, err = 0;
unsigned long flags;
+ struct page *start = page;
local_irq_save(flags);
for (i = 0; i < numpages; i++, page++) {
- err = __change_page_attr(page, prot);
+ err = __change_page_attr_noflush(page, prot);
if (err)
break;
}
+ wmb();
+ flush_tlb_kernel_range((unsigned long)page_address(start),
+ (unsigned long)page_address(page));
local_irq_restore(flags);
return err;
}
-
-void __kernel_map_pages(struct page *page, int numpages, int enable)
+void mark_initmem_nx(void)
{
- if (PageHighMem(page))
- return;
+ struct page *page = virt_to_page(_sinittext);
+ unsigned long numpages = PFN_UP((unsigned long)_einittext) -
+ PFN_DOWN((unsigned long)_sinittext);
- change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+ change_page_attr(page, numpages, PAGE_KERNEL);
}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
-static int fixmaps;
-
-void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
+#ifdef CONFIG_STRICT_KERNEL_RWX
+void mark_rodata_ro(void)
{
- unsigned long address = __fix_to_virt(idx);
+ struct page *page;
+ unsigned long numpages;
+
+ page = virt_to_page(_stext);
+ numpages = PFN_UP((unsigned long)_etext) -
+ PFN_DOWN((unsigned long)_stext);
- if (idx >= __end_of_fixed_addresses) {
- BUG();
+ change_page_attr(page, numpages, PAGE_KERNEL_ROX);
+ /*
+ * mark .rodata as read only. Use __init_begin rather than __end_rodata
+ * to cover NOTES and EXCEPTION_TABLE.
+ */
+ page = virt_to_page(__start_rodata);
+ numpages = PFN_UP((unsigned long)__init_begin) -
+ PFN_DOWN((unsigned long)__start_rodata);
+
+ change_page_attr(page, numpages, PAGE_KERNEL_RO);
+}
+#endif
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ if (PageHighMem(page))
return;
- }
- map_kernel_page(address, phys, pgprot_val(flags));
- fixmaps++;
+ change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
}
+#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 5c0b795d656c..ac0717a90ca6 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -104,6 +104,8 @@ unsigned long __vmalloc_start;
EXPORT_SYMBOL(__vmalloc_start);
unsigned long __vmalloc_end;
EXPORT_SYMBOL(__vmalloc_end);
+unsigned long __kernel_io_start;
+EXPORT_SYMBOL(__kernel_io_start);
struct page *vmemmap;
EXPORT_SYMBOL(vmemmap);
unsigned long __pte_frag_nr;
@@ -505,4 +507,12 @@ void mark_rodata_ro(void)
else
hash__mark_rodata_ro();
}
+
+void mark_initmem_nx(void)
+{
+ if (radix_enabled())
+ radix__mark_initmem_nx();
+ else
+ hash__mark_initmem_nx();
+}
#endif
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index bde378559d01..906a86fe457b 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -121,12 +121,25 @@ slb_miss_kernel_load_vmemmap:
1:
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
- /* vmalloc mapping gets the encoding from the PACA as the mapping
- * can be demoted from 64K -> 4K dynamically on some machines
+ /*
+ * r10 contains the ESID, which is the original faulting EA shifted
+ * right by 28 bits. We need to compare that with (H_VMALLOC_END >> 28)
+ * which is 0xd00038000. That can't be used as an immediate, even if we
+ * ignored the 0xd, so we have to load it into a register, and we only
+ * have one register free. So we must load all of (H_VMALLOC_END >> 28)
+ * into a register and compare ESID against that.
+ */
+ lis r11,(H_VMALLOC_END >> 32)@h // r11 = 0xffffffffd0000000
+ ori r11,r11,(H_VMALLOC_END >> 32)@l // r11 = 0xffffffffd0003800
+ // Rotate left 4, then mask with 0xffffffff0
+ rldic r11,r11,4,28 // r11 = 0xd00038000
+ cmpld r10,r11 // if r10 >= r11
+ bge 5f // goto io_mapping
+
+ /*
+ * vmalloc mapping gets the encoding from the PACA as the mapping
+ * can be demoted from 64K -> 4K dynamically on some machines.
*/
- clrldi r11,r10,48
- cmpldi r11,(H_VMALLOC_SIZE >> 28) - 1
- bgt 5f
lhz r11,PACAVMALLOCSLLP(r13)
b 6f
5:
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index e94fbd4c8845..781532d7bc4d 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -36,7 +36,7 @@ void subpage_prot_free(struct mm_struct *mm)
}
}
addr = 0;
- for (i = 0; i < 2; ++i) {
+ for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) {
p = spt->protptrs[i];
if (!p)
continue;
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 744e0164ecf5..b3e849c4886e 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -12,12 +12,12 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/memblock.h>
-#include <asm/ppc-opcode.h>
+#include <asm/ppc-opcode.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/trace.h>
-
+#include <asm/cputhreads.h>
#define RIC_FLUSH_TLB 0
#define RIC_FLUSH_PWC 1
@@ -54,23 +54,15 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
*/
__tlbiel_pid(pid, 0, ric);
- if (ric == RIC_FLUSH_ALL)
- /* For the remaining sets, just flush the TLB */
- ric = RIC_FLUSH_TLB;
+ /* For PWC, only one flush is needed */
+ if (ric == RIC_FLUSH_PWC) {
+ asm volatile("ptesync": : :"memory");
+ return;
+ }
+ /* For the remaining sets, just flush the TLB */
for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
- __tlbiel_pid(pid, set, ric);
-
- asm volatile("ptesync": : :"memory");
- asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
-}
-
-static inline void tlbiel_pwc(unsigned long pid)
-{
- asm volatile("ptesync": : :"memory");
-
- /* For PWC flush, we don't look at set number */
- __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
+ __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
asm volatile("ptesync": : :"memory");
asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
@@ -146,31 +138,23 @@ void radix__local_flush_tlb_mm(struct mm_struct *mm)
preempt_disable();
pid = mm->context.id;
if (pid != MMU_NO_CONTEXT)
- _tlbiel_pid(pid, RIC_FLUSH_ALL);
+ _tlbiel_pid(pid, RIC_FLUSH_TLB);
preempt_enable();
}
EXPORT_SYMBOL(radix__local_flush_tlb_mm);
-void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+#ifndef CONFIG_SMP
+static void radix__local_flush_all_mm(struct mm_struct *mm)
{
unsigned long pid;
- struct mm_struct *mm = tlb->mm;
- /*
- * If we are doing a full mm flush, we will do a tlb flush
- * with RIC_FLUSH_ALL later.
- */
- if (tlb->fullmm)
- return;
preempt_disable();
-
pid = mm->context.id;
if (pid != MMU_NO_CONTEXT)
- tlbiel_pwc(pid);
-
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
preempt_enable();
}
-EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
+#endif /* CONFIG_SMP */
void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
int psize)
@@ -208,38 +192,35 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
goto no_context;
if (!mm_is_thread_local(mm))
- _tlbie_pid(pid, RIC_FLUSH_ALL);
+ _tlbie_pid(pid, RIC_FLUSH_TLB);
else
- _tlbiel_pid(pid, RIC_FLUSH_ALL);
+ _tlbiel_pid(pid, RIC_FLUSH_TLB);
no_context:
preempt_enable();
}
EXPORT_SYMBOL(radix__flush_tlb_mm);
-void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+static void radix__flush_all_mm(struct mm_struct *mm)
{
unsigned long pid;
- struct mm_struct *mm = tlb->mm;
- /*
- * If we are doing a full mm flush, we will do a tlb flush
- * with RIC_FLUSH_ALL later.
- */
- if (tlb->fullmm)
- return;
preempt_disable();
-
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
if (!mm_is_thread_local(mm))
- _tlbie_pid(pid, RIC_FLUSH_PWC);
+ _tlbie_pid(pid, RIC_FLUSH_ALL);
else
- tlbiel_pwc(pid);
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
no_context:
preempt_enable();
}
+
+void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+{
+ tlb->need_flush_all = 1;
+}
EXPORT_SYMBOL(radix__flush_tlb_pwc);
void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
@@ -271,6 +252,8 @@ void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
}
EXPORT_SYMBOL(radix__flush_tlb_page);
+#else /* CONFIG_SMP */
+#define radix__flush_all_mm radix__local_flush_all_mm
#endif /* CONFIG_SMP */
void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -288,6 +271,7 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
{
struct mm_struct *mm = vma->vm_mm;
+
radix__flush_tlb_mm(mm);
}
EXPORT_SYMBOL(radix__flush_tlb_range);
@@ -319,7 +303,10 @@ void radix__tlb_flush(struct mmu_gather *tlb)
*/
if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
- else
+ else if (tlb->need_flush_all) {
+ tlb->need_flush_all = 0;
+ radix__flush_all_mm(mm);
+ } else
radix__flush_tlb_mm(mm);
}
@@ -364,6 +351,43 @@ err_out:
preempt_enable();
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
+{
+ int local = mm_is_thread_local(mm);
+ unsigned long ap = mmu_get_ap(mmu_virtual_psize);
+ unsigned long pid, end;
+
+
+ pid = mm ? mm->context.id : 0;
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ goto no_context;
+
+ /* 4k page size, just blow the world */
+ if (PAGE_SIZE == 0x1000) {
+ radix__flush_all_mm(mm);
+ return;
+ }
+
+ /* Otherwise first do the PWC */
+ if (local)
+ _tlbiel_pid(pid, RIC_FLUSH_PWC);
+ else
+ _tlbie_pid(pid, RIC_FLUSH_PWC);
+
+ /* Then iterate the pages */
+ end = addr + HPAGE_PMD_SIZE;
+ for (; addr < end; addr += PAGE_SIZE) {
+ if (local)
+ _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
+ else
+ _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
+ }
+no_context:
+ preempt_enable();
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
unsigned long page_size)
{
@@ -454,3 +478,44 @@ void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
else
radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
}
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
+{
+ unsigned int pid = mm->context.id;
+
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ return;
+
+ /*
+ * If this context hasn't run on that CPU before and KVM is
+ * around, there's a slim chance that the guest on another
+ * CPU just brought in obsolete translation into the TLB of
+ * this CPU due to a bad prefetch using the guest PID on
+ * the way into the hypervisor.
+ *
+ * We work around this here. If KVM is possible, we check if
+ * any sibling thread is in KVM. If it is, the window may exist
+ * and thus we flush that PID from the core.
+ *
+ * A potential future improvement would be to mark which PIDs
+ * have never been used on the system and avoid it if the PID
+ * is new and the process has no other cpumask bit set.
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
+ int cpu = smp_processor_id();
+ int sib = cpu_first_thread_sibling(cpu);
+ bool flush = false;
+
+ for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
+ if (sib == cpu)
+ continue;
+ if (paca[sib].kvm_hstate.kvm_vcpu)
+ flush = true;
+ }
+ if (flush)
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
+ }
+}
+EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index b5b0fb97b9c0..881ebd53ffc2 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -29,6 +29,8 @@
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/bug.h>
+#include <asm/pte-walk.h>
+
#include <trace/events/thp.h>
@@ -138,13 +140,10 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
*/
void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
{
- const struct cpumask *tmp;
- int i, local = 0;
+ int i, local;
i = batch->index;
- tmp = cpumask_of(smp_processor_id());
- if (cpumask_equal(mm_cpumask(batch->mm), tmp))
- local = 1;
+ local = mm_is_thread_local(batch->mm);
if (i == 1)
flush_hash_page(batch->vpn[0], batch->pte[0],
batch->psize, batch->ssize, local);
@@ -207,8 +206,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
local_irq_save(flags);
arch_enter_lazy_mmu_mode();
for (; start < end; start += PAGE_SIZE) {
- pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, &is_thp,
- &hugepage_shift);
+ pte_t *ptep = find_current_mm_pte(mm->pgd, start, &is_thp,
+ &hugepage_shift);
unsigned long pte;
if (ptep == NULL)
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index eabecfcaef7c..048b8e9f4492 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -60,7 +60,7 @@ _GLOBAL(__tlbil_va)
isync
1: blr
-#elif defined(CONFIG_8xx)
+#elif defined(CONFIG_PPC_8xx)
/*
* Nothing to do for 8xx, everything is inline
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 30cf03f53428..47fc6660845d 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -263,6 +263,7 @@ static inline bool is_nearbranch(int offset)
#define COND_EQ (CR0_EQ | COND_CMP_TRUE)
#define COND_NE (CR0_EQ | COND_CMP_FALSE)
#define COND_LT (CR0_LT | COND_CMP_TRUE)
+#define COND_LE (CR0_GT | COND_CMP_FALSE)
#endif
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 861c5af1c9c4..a66e64b0b251 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -25,11 +25,7 @@ int bpf_jit_enable __read_mostly;
static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
{
- int *p = area;
-
- /* Fill whole space with trap instructions */
- while (p < (int *)((char *)area + size))
- *p++ = BREAKPOINT_INSTRUCTION;
+ memset32(area, BREAKPOINT_INSTRUCTION, size/4);
}
static inline void bpf_flush_icache(void *start, void *end)
@@ -795,12 +791,24 @@ emit_clear:
case BPF_JMP | BPF_JSGT | BPF_X:
true_cond = COND_GT;
goto cond_branch;
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ true_cond = COND_LT;
+ goto cond_branch;
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_X:
true_cond = COND_GE;
goto cond_branch;
+ case BPF_JMP | BPF_JLE | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ true_cond = COND_LE;
+ goto cond_branch;
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_X:
true_cond = COND_EQ;
@@ -817,14 +825,18 @@ emit_clear:
cond_branch:
switch (code) {
case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
/* unsigned comparison */
PPC_CMPLD(dst_reg, src_reg);
break;
case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
/* signed comparison */
PPC_CMPD(dst_reg, src_reg);
break;
@@ -834,7 +846,9 @@ cond_branch:
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_K:
/*
* Need sign-extended load, so only positive
* values can be used as imm in cmpldi
@@ -849,7 +863,9 @@ cond_branch:
}
break;
case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_K:
/*
* signed comparison, so any 16-bit value
* can be used in cmpdi
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index 4d606b99a5cb..3f3a5ce66495 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -8,6 +8,7 @@ obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
isa207-common.o power8-pmu.o power9-pmu.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
+obj-$(CONFIG_PPC_POWERNV) += imc-pmu.o
obj-$(CONFIG_FSL_EMB_PERF_EVENT) += core-fsl-emb.o
obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o e6500-pmu.o
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 0fc26714780a..0af051a1974e 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -22,6 +22,7 @@
#ifdef CONFIG_PPC64
#include "../kernel/ppc32.h"
#endif
+#include <asm/pte-walk.h>
/*
@@ -127,7 +128,7 @@ static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
return -EFAULT;
local_irq_save(flags);
- ptep = find_linux_pte_or_hugepte(pgdir, addr, NULL, &shift);
+ ptep = find_current_mm_pte(pgdir, addr, NULL, &shift);
if (!ptep)
goto err_out;
if (!shift)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 6c2d4168daec..2e3eb7431571 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2039,7 +2039,8 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
- if (event->attr.sample_type & PERF_SAMPLE_ADDR)
+ if (event->attr.sample_type &
+ (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR))
perf_get_data_addr(regs, &data.addr);
if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
new file mode 100644
index 000000000000..9ccac86f3463
--- /dev/null
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -0,0 +1,1306 @@
+/*
+ * In-Memory Collection (IMC) Performance Monitor counter support.
+ *
+ * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation.
+ * (C) 2017 Anju T Sudhakar, IBM Corporation.
+ * (C) 2017 Hemant K Shaw, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or later version.
+ */
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include <asm/opal.h>
+#include <asm/imc-pmu.h>
+#include <asm/cputhreads.h>
+#include <asm/smp.h>
+#include <linux/string.h>
+
+/* Nest IMC data structures and variables */
+
+/*
+ * Used to avoid races in counting the nest-pmu units during hotplug
+ * register and unregister
+ */
+static DEFINE_MUTEX(nest_init_lock);
+static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
+static struct imc_pmu *per_nest_pmu_arr[IMC_MAX_PMUS];
+static cpumask_t nest_imc_cpumask;
+struct imc_pmu_ref *nest_imc_refc;
+static int nest_pmus;
+
+/* Core IMC data structures and variables */
+
+static cpumask_t core_imc_cpumask;
+struct imc_pmu_ref *core_imc_refc;
+static struct imc_pmu *core_imc_pmu;
+
+/* Thread IMC data structures and variables */
+
+static DEFINE_PER_CPU(u64 *, thread_imc_mem);
+static struct imc_pmu *thread_imc_pmu;
+static int thread_imc_mem_size;
+
+struct imc_pmu *imc_event_to_pmu(struct perf_event *event)
+{
+ return container_of(event->pmu, struct imc_pmu, pmu);
+}
+
+PMU_FORMAT_ATTR(event, "config:0-40");
+PMU_FORMAT_ATTR(offset, "config:0-31");
+PMU_FORMAT_ATTR(rvalue, "config:32");
+PMU_FORMAT_ATTR(mode, "config:33-40");
+static struct attribute *imc_format_attrs[] = {
+ &format_attr_event.attr,
+ &format_attr_offset.attr,
+ &format_attr_rvalue.attr,
+ &format_attr_mode.attr,
+ NULL,
+};
+
+static struct attribute_group imc_format_group = {
+ .name = "format",
+ .attrs = imc_format_attrs,
+};
+
+/* Get the cpumask printed to a buffer "buf" */
+static ssize_t imc_pmu_cpumask_get_attr(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct imc_pmu *imc_pmu = container_of(pmu, struct imc_pmu, pmu);
+ cpumask_t *active_mask;
+
+ switch(imc_pmu->domain){
+ case IMC_DOMAIN_NEST:
+ active_mask = &nest_imc_cpumask;
+ break;
+ case IMC_DOMAIN_CORE:
+ active_mask = &core_imc_cpumask;
+ break;
+ default:
+ return 0;
+ }
+
+ return cpumap_print_to_pagebuf(true, buf, active_mask);
+}
+
+static DEVICE_ATTR(cpumask, S_IRUGO, imc_pmu_cpumask_get_attr, NULL);
+
+static struct attribute *imc_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group imc_pmu_cpumask_attr_group = {
+ .attrs = imc_pmu_cpumask_attrs,
+};
+
+/* device_str_attr_create : Populate event "name" and string "str" in attribute */
+static struct attribute *device_str_attr_create(const char *name, const char *str)
+{
+ struct perf_pmu_events_attr *attr;
+
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ return NULL;
+ sysfs_attr_init(&attr->attr.attr);
+
+ attr->event_str = str;
+ attr->attr.attr.name = name;
+ attr->attr.attr.mode = 0444;
+ attr->attr.show = perf_event_sysfs_show;
+
+ return &attr->attr.attr;
+}
+
+struct imc_events *imc_parse_event(struct device_node *np, const char *scale,
+ const char *unit, const char *prefix, u32 base)
+{
+ struct imc_events *event;
+ const char *s;
+ u32 reg;
+
+ event = kzalloc(sizeof(struct imc_events), GFP_KERNEL);
+ if (!event)
+ return NULL;
+
+ if (of_property_read_u32(np, "reg", &reg))
+ goto error;
+ /* Add the base_reg value to the "reg" */
+ event->value = base + reg;
+
+ if (of_property_read_string(np, "event-name", &s))
+ goto error;
+
+ event->name = kasprintf(GFP_KERNEL, "%s%s", prefix, s);
+ if (!event->name)
+ goto error;
+
+ if (of_property_read_string(np, "scale", &s))
+ s = scale;
+
+ if (s) {
+ event->scale = kstrdup(s, GFP_KERNEL);
+ if (!event->scale)
+ goto error;
+ }
+
+ if (of_property_read_string(np, "unit", &s))
+ s = unit;
+
+ if (s) {
+ event->unit = kstrdup(s, GFP_KERNEL);
+ if (!event->unit)
+ goto error;
+ }
+
+ return event;
+error:
+ kfree(event->unit);
+ kfree(event->scale);
+ kfree(event->name);
+ kfree(event);
+
+ return NULL;
+}
+
+/*
+ * update_events_in_group: Update the "events" information in an attr_group
+ * and assign the attr_group to the pmu "pmu".
+ */
+static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
+{
+ struct attribute_group *attr_group;
+ struct attribute **attrs, *dev_str;
+ struct device_node *np, *pmu_events;
+ struct imc_events *ev;
+ u32 handle, base_reg;
+ int i=0, j=0, ct;
+ const char *prefix, *g_scale, *g_unit;
+ const char *ev_val_str, *ev_scale_str, *ev_unit_str;
+
+ if (!of_property_read_u32(node, "events", &handle))
+ pmu_events = of_find_node_by_phandle(handle);
+ else
+ return 0;
+
+ /* Did not find any node with a given phandle */
+ if (!pmu_events)
+ return 0;
+
+ /* Get a count of number of child nodes */
+ ct = of_get_child_count(pmu_events);
+
+ /* Get the event prefix */
+ if (of_property_read_string(node, "events-prefix", &prefix))
+ return 0;
+
+ /* Get a global unit and scale data if available */
+ if (of_property_read_string(node, "scale", &g_scale))
+ g_scale = NULL;
+
+ if (of_property_read_string(node, "unit", &g_unit))
+ g_unit = NULL;
+
+ /* "reg" property gives out the base offset of the counters data */
+ of_property_read_u32(node, "reg", &base_reg);
+
+ /* Allocate memory for the events */
+ pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL);
+ if (!pmu->events)
+ return -ENOMEM;
+
+ ct = 0;
+ /* Parse the events and update the struct */
+ for_each_child_of_node(pmu_events, np) {
+ ev = imc_parse_event(np, g_scale, g_unit, prefix, base_reg);
+ if (ev)
+ pmu->events[ct++] = ev;
+ }
+
+ /* Allocate memory for attribute group */
+ attr_group = kzalloc(sizeof(*attr_group), GFP_KERNEL);
+ if (!attr_group)
+ return -ENOMEM;
+
+ /*
+ * Allocate memory for attributes.
+ * Since we have count of events for this pmu, we also allocate
+ * memory for the scale and unit attribute for now.
+ * "ct" has the total event structs added from the events-parent node.
+ * So allocate three times the "ct" (this includes event, event_scale and
+ * event_unit).
+ */
+ attrs = kcalloc(((ct * 3) + 1), sizeof(struct attribute *), GFP_KERNEL);
+ if (!attrs) {
+ kfree(attr_group);
+ kfree(pmu->events);
+ return -ENOMEM;
+ }
+
+ attr_group->name = "events";
+ attr_group->attrs = attrs;
+ do {
+ ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i]->value);
+ dev_str = device_str_attr_create(pmu->events[i]->name, ev_val_str);
+ if (!dev_str)
+ continue;
+
+ attrs[j++] = dev_str;
+ if (pmu->events[i]->scale) {
+ ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale",pmu->events[i]->name);
+ dev_str = device_str_attr_create(ev_scale_str, pmu->events[i]->scale);
+ if (!dev_str)
+ continue;
+
+ attrs[j++] = dev_str;
+ }
+
+ if (pmu->events[i]->unit) {
+ ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit",pmu->events[i]->name);
+ dev_str = device_str_attr_create(ev_unit_str, pmu->events[i]->unit);
+ if (!dev_str)
+ continue;
+
+ attrs[j++] = dev_str;
+ }
+ } while (++i < ct);
+
+ /* Save the event attribute */
+ pmu->attr_groups[IMC_EVENT_ATTR] = attr_group;
+
+ kfree(pmu->events);
+ return 0;
+}
+
+/* get_nest_pmu_ref: Return the imc_pmu_ref struct for the given node */
+static struct imc_pmu_ref *get_nest_pmu_ref(int cpu)
+{
+ return per_cpu(local_nest_imc_refc, cpu);
+}
+
+static void nest_change_cpu_context(int old_cpu, int new_cpu)
+{
+ struct imc_pmu **pn = per_nest_pmu_arr;
+ int i;
+
+ if (old_cpu < 0 || new_cpu < 0)
+ return;
+
+ for (i = 0; *pn && i < IMC_MAX_PMUS; i++, pn++)
+ perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu);
+}
+
+static int ppc_nest_imc_cpu_offline(unsigned int cpu)
+{
+ int nid, target = -1;
+ const struct cpumask *l_cpumask;
+ struct imc_pmu_ref *ref;
+
+ /*
+ * Check in the designated list for this cpu. Dont bother
+ * if not one of them.
+ */
+ if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask))
+ return 0;
+
+ /*
+ * Now that this cpu is one of the designated,
+ * find a next cpu a) which is online and b) in same chip.
+ */
+ nid = cpu_to_node(cpu);
+ l_cpumask = cpumask_of_node(nid);
+ target = cpumask_any_but(l_cpumask, cpu);
+
+ /*
+ * Update the cpumask with the target cpu and
+ * migrate the context if needed
+ */
+ if (target >= 0 && target < nr_cpu_ids) {
+ cpumask_set_cpu(target, &nest_imc_cpumask);
+ nest_change_cpu_context(cpu, target);
+ } else {
+ opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
+ get_hard_smp_processor_id(cpu));
+ /*
+ * If this is the last cpu in this chip then, skip the reference
+ * count mutex lock and make the reference count on this chip zero.
+ */
+ ref = get_nest_pmu_ref(cpu);
+ if (!ref)
+ return -EINVAL;
+
+ ref->refc = 0;
+ }
+ return 0;
+}
+
+static int ppc_nest_imc_cpu_online(unsigned int cpu)
+{
+ const struct cpumask *l_cpumask;
+ static struct cpumask tmp_mask;
+ int res;
+
+ /* Get the cpumask of this node */
+ l_cpumask = cpumask_of_node(cpu_to_node(cpu));
+
+ /*
+ * If this is not the first online CPU on this node, then
+ * just return.
+ */
+ if (cpumask_and(&tmp_mask, l_cpumask, &nest_imc_cpumask))
+ return 0;
+
+ /*
+ * If this is the first online cpu on this node
+ * disable the nest counters by making an OPAL call.
+ */
+ res = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
+ get_hard_smp_processor_id(cpu));
+ if (res)
+ return res;
+
+ /* Make this CPU the designated target for counter collection */
+ cpumask_set_cpu(cpu, &nest_imc_cpumask);
+ return 0;
+}
+
+static int nest_pmu_cpumask_init(void)
+{
+ return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
+ "perf/powerpc/imc:online",
+ ppc_nest_imc_cpu_online,
+ ppc_nest_imc_cpu_offline);
+}
+
+static void nest_imc_counters_release(struct perf_event *event)
+{
+ int rc, node_id;
+ struct imc_pmu_ref *ref;
+
+ if (event->cpu < 0)
+ return;
+
+ node_id = cpu_to_node(event->cpu);
+
+ /*
+ * See if we need to disable the nest PMU.
+ * If no events are currently in use, then we have to take a
+ * mutex to ensure that we don't race with another task doing
+ * enable or disable the nest counters.
+ */
+ ref = get_nest_pmu_ref(event->cpu);
+ if (!ref)
+ return;
+
+ /* Take the mutex lock for this node and then decrement the reference count */
+ mutex_lock(&ref->lock);
+ ref->refc--;
+ if (ref->refc == 0) {
+ rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
+ get_hard_smp_processor_id(event->cpu));
+ if (rc) {
+ mutex_unlock(&ref->lock);
+ pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
+ return;
+ }
+ } else if (ref->refc < 0) {
+ WARN(1, "nest-imc: Invalid event reference count\n");
+ ref->refc = 0;
+ }
+ mutex_unlock(&ref->lock);
+}
+
+static int nest_imc_event_init(struct perf_event *event)
+{
+ int chip_id, rc, node_id;
+ u32 l_config, config = event->attr.config;
+ struct imc_mem_info *pcni;
+ struct imc_pmu *pmu;
+ struct imc_pmu_ref *ref;
+ bool flag = false;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* Sampling not supported */
+ if (event->hw.sample_period)
+ return -EINVAL;
+
+ /* unsupported modes and filters */
+ if (event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ pmu = imc_event_to_pmu(event);
+
+ /* Sanity check for config (event offset) */
+ if ((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)
+ return -EINVAL;
+
+ /*
+ * Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
+ * Get the base memory addresss for this cpu.
+ */
+ chip_id = topology_physical_package_id(event->cpu);
+ pcni = pmu->mem_info;
+ do {
+ if (pcni->id == chip_id) {
+ flag = true;
+ break;
+ }
+ pcni++;
+ } while (pcni);
+
+ if (!flag)
+ return -ENODEV;
+
+ /*
+ * Add the event offset to the base address.
+ */
+ l_config = config & IMC_EVENT_OFFSET_MASK;
+ event->hw.event_base = (u64)pcni->vbase + l_config;
+ node_id = cpu_to_node(event->cpu);
+
+ /*
+ * Get the imc_pmu_ref struct for this node.
+ * Take the mutex lock and then increment the count of nest pmu events
+ * inited.
+ */
+ ref = get_nest_pmu_ref(event->cpu);
+ if (!ref)
+ return -EINVAL;
+
+ mutex_lock(&ref->lock);
+ if (ref->refc == 0) {
+ rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
+ get_hard_smp_processor_id(event->cpu));
+ if (rc) {
+ mutex_unlock(&ref->lock);
+ pr_err("nest-imc: Unable to start the counters for node %d\n",
+ node_id);
+ return rc;
+ }
+ }
+ ++ref->refc;
+ mutex_unlock(&ref->lock);
+
+ event->destroy = nest_imc_counters_release;
+ return 0;
+}
+
+/*
+ * core_imc_mem_init : Initializes memory for the current core.
+ *
+ * Uses alloc_pages_node() and uses the returned address as an argument to
+ * an opal call to configure the pdbar. The address sent as an argument is
+ * converted to physical address before the opal call is made. This is the
+ * base address at which the core imc counters are populated.
+ */
+static int core_imc_mem_init(int cpu, int size)
+{
+ int phys_id, rc = 0, core_id = (cpu / threads_per_core);
+ struct imc_mem_info *mem_info;
+
+ /*
+ * alloc_pages_node() will allocate memory for core in the
+ * local node only.
+ */
+ phys_id = topology_physical_package_id(cpu);
+ mem_info = &core_imc_pmu->mem_info[core_id];
+ mem_info->id = core_id;
+
+ /* We need only vbase for core counters */
+ mem_info->vbase = page_address(alloc_pages_node(phys_id,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
+ get_order(size)));
+ if (!mem_info->vbase)
+ return -ENOMEM;
+
+ /* Init the mutex */
+ core_imc_refc[core_id].id = core_id;
+ mutex_init(&core_imc_refc[core_id].lock);
+
+ rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
+ __pa((void *)mem_info->vbase),
+ get_hard_smp_processor_id(cpu));
+ if (rc) {
+ free_pages((u64)mem_info->vbase, get_order(size));
+ mem_info->vbase = NULL;
+ }
+
+ return rc;
+}
+
+static bool is_core_imc_mem_inited(int cpu)
+{
+ struct imc_mem_info *mem_info;
+ int core_id = (cpu / threads_per_core);
+
+ mem_info = &core_imc_pmu->mem_info[core_id];
+ if (!mem_info->vbase)
+ return false;
+
+ return true;
+}
+
+static int ppc_core_imc_cpu_online(unsigned int cpu)
+{
+ const struct cpumask *l_cpumask;
+ static struct cpumask tmp_mask;
+ int ret = 0;
+
+ /* Get the cpumask for this core */
+ l_cpumask = cpu_sibling_mask(cpu);
+
+ /* If a cpu for this core is already set, then, don't do anything */
+ if (cpumask_and(&tmp_mask, l_cpumask, &core_imc_cpumask))
+ return 0;
+
+ if (!is_core_imc_mem_inited(cpu)) {
+ ret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size);
+ if (ret) {
+ pr_info("core_imc memory allocation for cpu %d failed\n", cpu);
+ return ret;
+ }
+ }
+
+ /* set the cpu in the mask */
+ cpumask_set_cpu(cpu, &core_imc_cpumask);
+ return 0;
+}
+
+static int ppc_core_imc_cpu_offline(unsigned int cpu)
+{
+ unsigned int ncpu, core_id;
+ struct imc_pmu_ref *ref;
+
+ /*
+ * clear this cpu out of the mask, if not present in the mask,
+ * don't bother doing anything.
+ */
+ if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask))
+ return 0;
+
+ /* Find any online cpu in that core except the current "cpu" */
+ ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu);
+
+ if (ncpu >= 0 && ncpu < nr_cpu_ids) {
+ cpumask_set_cpu(ncpu, &core_imc_cpumask);
+ perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
+ } else {
+ /*
+ * If this is the last cpu in this core then, skip taking refernce
+ * count mutex lock for this core and directly zero "refc" for
+ * this core.
+ */
+ opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(cpu));
+ core_id = cpu / threads_per_core;
+ ref = &core_imc_refc[core_id];
+ if (!ref)
+ return -EINVAL;
+
+ ref->refc = 0;
+ }
+ return 0;
+}
+
+static int core_imc_pmu_cpumask_init(void)
+{
+ return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
+ "perf/powerpc/imc_core:online",
+ ppc_core_imc_cpu_online,
+ ppc_core_imc_cpu_offline);
+}
+
+static void core_imc_counters_release(struct perf_event *event)
+{
+ int rc, core_id;
+ struct imc_pmu_ref *ref;
+
+ if (event->cpu < 0)
+ return;
+ /*
+ * See if we need to disable the IMC PMU.
+ * If no events are currently in use, then we have to take a
+ * mutex to ensure that we don't race with another task doing
+ * enable or disable the core counters.
+ */
+ core_id = event->cpu / threads_per_core;
+
+ /* Take the mutex lock and decrement the refernce count for this core */
+ ref = &core_imc_refc[core_id];
+ if (!ref)
+ return;
+
+ mutex_lock(&ref->lock);
+ ref->refc--;
+ if (ref->refc == 0) {
+ rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(event->cpu));
+ if (rc) {
+ mutex_unlock(&ref->lock);
+ pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
+ return;
+ }
+ } else if (ref->refc < 0) {
+ WARN(1, "core-imc: Invalid event reference count\n");
+ ref->refc = 0;
+ }
+ mutex_unlock(&ref->lock);
+}
+
+static int core_imc_event_init(struct perf_event *event)
+{
+ int core_id, rc;
+ u64 config = event->attr.config;
+ struct imc_mem_info *pcmi;
+ struct imc_pmu *pmu;
+ struct imc_pmu_ref *ref;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* Sampling not supported */
+ if (event->hw.sample_period)
+ return -EINVAL;
+
+ /* unsupported modes and filters */
+ if (event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ event->hw.idx = -1;
+ pmu = imc_event_to_pmu(event);
+
+ /* Sanity check for config (event offset) */
+ if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size))
+ return -EINVAL;
+
+ if (!is_core_imc_mem_inited(event->cpu))
+ return -ENODEV;
+
+ core_id = event->cpu / threads_per_core;
+ pcmi = &core_imc_pmu->mem_info[core_id];
+ if ((!pcmi->vbase))
+ return -ENODEV;
+
+ /* Get the core_imc mutex for this core */
+ ref = &core_imc_refc[core_id];
+ if (!ref)
+ return -EINVAL;
+
+ /*
+ * Core pmu units are enabled only when it is used.
+ * See if this is triggered for the first time.
+ * If yes, take the mutex lock and enable the core counters.
+ * If not, just increment the count in core_imc_refc struct.
+ */
+ mutex_lock(&ref->lock);
+ if (ref->refc == 0) {
+ rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(event->cpu));
+ if (rc) {
+ mutex_unlock(&ref->lock);
+ pr_err("core-imc: Unable to start the counters for core %d\n",
+ core_id);
+ return rc;
+ }
+ }
+ ++ref->refc;
+ mutex_unlock(&ref->lock);
+
+ event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
+ event->destroy = core_imc_counters_release;
+ return 0;
+}
+
+/*
+ * Allocates a page of memory for each of the online cpus, and write the
+ * physical base address of that page to the LDBAR for that cpu.
+ *
+ * LDBAR Register Layout:
+ *
+ * 0 4 8 12 16 20 24 28
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * | | [ ] [ Counter Address [8:50]
+ * | * Mode |
+ * | * PB Scope
+ * * Enable/Disable
+ *
+ * 32 36 40 44 48 52 56 60
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * Counter Address [8:50] ]
+ *
+ */
+static int thread_imc_mem_alloc(int cpu_id, int size)
+{
+ u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id);
+ int phys_id = topology_physical_package_id(cpu_id);
+
+ if (!local_mem) {
+ /*
+ * This case could happen only once at start, since we dont
+ * free the memory in cpu offline path.
+ */
+ local_mem = page_address(alloc_pages_node(phys_id,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
+ get_order(size)));
+ if (!local_mem)
+ return -ENOMEM;
+
+ per_cpu(thread_imc_mem, cpu_id) = local_mem;
+ }
+
+ ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | THREAD_IMC_ENABLE;
+
+ mtspr(SPRN_LDBAR, ldbar_value);
+ return 0;
+}
+
+static int ppc_thread_imc_cpu_online(unsigned int cpu)
+{
+ return thread_imc_mem_alloc(cpu, thread_imc_mem_size);
+}
+
+static int ppc_thread_imc_cpu_offline(unsigned int cpu)
+{
+ mtspr(SPRN_LDBAR, 0);
+ return 0;
+}
+
+static int thread_imc_cpu_init(void)
+{
+ return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
+ "perf/powerpc/imc_thread:online",
+ ppc_thread_imc_cpu_online,
+ ppc_thread_imc_cpu_offline);
+}
+
+void thread_imc_pmu_sched_task(struct perf_event_context *ctx,
+ bool sched_in)
+{
+ int core_id;
+ struct imc_pmu_ref *ref;
+
+ if (!is_core_imc_mem_inited(smp_processor_id()))
+ return;
+
+ core_id = smp_processor_id() / threads_per_core;
+ /*
+ * imc pmus are enabled only when it is used.
+ * See if this is triggered for the first time.
+ * If yes, take the mutex lock and enable the counters.
+ * If not, just increment the count in ref count struct.
+ */
+ ref = &core_imc_refc[core_id];
+ if (!ref)
+ return;
+
+ if (sched_in) {
+ mutex_lock(&ref->lock);
+ if (ref->refc == 0) {
+ if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+ mutex_unlock(&ref->lock);
+ pr_err("thread-imc: Unable to start the counter\
+ for core %d\n", core_id);
+ return;
+ }
+ }
+ ++ref->refc;
+ mutex_unlock(&ref->lock);
+ } else {
+ mutex_lock(&ref->lock);
+ ref->refc--;
+ if (ref->refc == 0) {
+ if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+ mutex_unlock(&ref->lock);
+ pr_err("thread-imc: Unable to stop the counters\
+ for core %d\n", core_id);
+ return;
+ }
+ } else if (ref->refc < 0) {
+ ref->refc = 0;
+ }
+ mutex_unlock(&ref->lock);
+ }
+
+ return;
+}
+
+static int thread_imc_event_init(struct perf_event *event)
+{
+ u32 config = event->attr.config;
+ struct task_struct *target;
+ struct imc_pmu *pmu;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* Sampling not supported */
+ if (event->hw.sample_period)
+ return -EINVAL;
+
+ event->hw.idx = -1;
+ pmu = imc_event_to_pmu(event);
+
+ /* Sanity check for config offset */
+ if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size))
+ return -EINVAL;
+
+ target = event->hw.target;
+ if (!target)
+ return -EINVAL;
+
+ event->pmu->task_ctx_nr = perf_sw_context;
+ return 0;
+}
+
+static bool is_thread_imc_pmu(struct perf_event *event)
+{
+ if (!strncmp(event->pmu->name, "thread_imc", strlen("thread_imc")))
+ return true;
+
+ return false;
+}
+
+static u64 * get_event_base_addr(struct perf_event *event)
+{
+ u64 addr;
+
+ if (is_thread_imc_pmu(event)) {
+ addr = (u64)per_cpu(thread_imc_mem, smp_processor_id());
+ return (u64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK));
+ }
+
+ return (u64 *)event->hw.event_base;
+}
+
+static void thread_imc_pmu_start_txn(struct pmu *pmu,
+ unsigned int txn_flags)
+{
+ if (txn_flags & ~PERF_PMU_TXN_ADD)
+ return;
+ perf_pmu_disable(pmu);
+}
+
+static void thread_imc_pmu_cancel_txn(struct pmu *pmu)
+{
+ perf_pmu_enable(pmu);
+}
+
+static int thread_imc_pmu_commit_txn(struct pmu *pmu)
+{
+ perf_pmu_enable(pmu);
+ return 0;
+}
+
+static u64 imc_read_counter(struct perf_event *event)
+{
+ u64 *addr, data;
+
+ /*
+ * In-Memory Collection (IMC) counters are free flowing counters.
+ * So we take a snapshot of the counter value on enable and save it
+ * to calculate the delta at later stage to present the event counter
+ * value.
+ */
+ addr = get_event_base_addr(event);
+ data = be64_to_cpu(READ_ONCE(*addr));
+ local64_set(&event->hw.prev_count, data);
+
+ return data;
+}
+
+static void imc_event_update(struct perf_event *event)
+{
+ u64 counter_prev, counter_new, final_count;
+
+ counter_prev = local64_read(&event->hw.prev_count);
+ counter_new = imc_read_counter(event);
+ final_count = counter_new - counter_prev;
+
+ /* Update the delta to the event count */
+ local64_add(final_count, &event->count);
+}
+
+static void imc_event_start(struct perf_event *event, int flags)
+{
+ /*
+ * In Memory Counters are free flowing counters. HW or the microcode
+ * keeps adding to the counter offset in memory. To get event
+ * counter value, we snapshot the value here and we calculate
+ * delta at later point.
+ */
+ imc_read_counter(event);
+}
+
+static void imc_event_stop(struct perf_event *event, int flags)
+{
+ /*
+ * Take a snapshot and calculate the delta and update
+ * the event counter values.
+ */
+ imc_event_update(event);
+}
+
+static int imc_event_add(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_START)
+ imc_event_start(event, flags);
+
+ return 0;
+}
+
+static int thread_imc_event_add(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_START)
+ imc_event_start(event, flags);
+
+ /* Enable the sched_task to start the engine */
+ perf_sched_cb_inc(event->ctx->pmu);
+ return 0;
+}
+
+static void thread_imc_event_del(struct perf_event *event, int flags)
+{
+ /*
+ * Take a snapshot and calculate the delta and update
+ * the event counter values.
+ */
+ imc_event_update(event);
+ perf_sched_cb_dec(event->ctx->pmu);
+}
+
+/* update_pmu_ops : Populate the appropriate operations for "pmu" */
+static int update_pmu_ops(struct imc_pmu *pmu)
+{
+ pmu->pmu.task_ctx_nr = perf_invalid_context;
+ pmu->pmu.add = imc_event_add;
+ pmu->pmu.del = imc_event_stop;
+ pmu->pmu.start = imc_event_start;
+ pmu->pmu.stop = imc_event_stop;
+ pmu->pmu.read = imc_event_update;
+ pmu->pmu.attr_groups = pmu->attr_groups;
+ pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group;
+
+ switch (pmu->domain) {
+ case IMC_DOMAIN_NEST:
+ pmu->pmu.event_init = nest_imc_event_init;
+ pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group;
+ break;
+ case IMC_DOMAIN_CORE:
+ pmu->pmu.event_init = core_imc_event_init;
+ pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group;
+ break;
+ case IMC_DOMAIN_THREAD:
+ pmu->pmu.event_init = thread_imc_event_init;
+ pmu->pmu.sched_task = thread_imc_pmu_sched_task;
+ pmu->pmu.add = thread_imc_event_add;
+ pmu->pmu.del = thread_imc_event_del;
+ pmu->pmu.start_txn = thread_imc_pmu_start_txn;
+ pmu->pmu.cancel_txn = thread_imc_pmu_cancel_txn;
+ pmu->pmu.commit_txn = thread_imc_pmu_commit_txn;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* init_nest_pmu_ref: Initialize the imc_pmu_ref struct for all the nodes */
+static int init_nest_pmu_ref(void)
+{
+ int nid, i, cpu;
+
+ nest_imc_refc = kcalloc(num_possible_nodes(), sizeof(*nest_imc_refc),
+ GFP_KERNEL);
+
+ if (!nest_imc_refc)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_node(nid) {
+ /*
+ * Mutex lock to avoid races while tracking the number of
+ * sessions using the chip's nest pmu units.
+ */
+ mutex_init(&nest_imc_refc[i].lock);
+
+ /*
+ * Loop to init the "id" with the node_id. Variable "i" initialized to
+ * 0 and will be used as index to the array. "i" will not go off the
+ * end of the array since the "for_each_node" loops for "N_POSSIBLE"
+ * nodes only.
+ */
+ nest_imc_refc[i++].id = nid;
+ }
+
+ /*
+ * Loop to init the per_cpu "local_nest_imc_refc" with the proper
+ * "nest_imc_refc" index. This makes get_nest_pmu_ref() alot simple.
+ */
+ for_each_possible_cpu(cpu) {
+ nid = cpu_to_node(cpu);
+ for (i = 0; i < num_possible_nodes(); i++) {
+ if (nest_imc_refc[i].id == nid) {
+ per_cpu(local_nest_imc_refc, cpu) = &nest_imc_refc[i];
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static void cleanup_all_core_imc_memory(void)
+{
+ int i, nr_cores = num_present_cpus() / threads_per_core;
+ struct imc_mem_info *ptr = core_imc_pmu->mem_info;
+ int size = core_imc_pmu->counter_mem_size;
+
+ /* mem_info will never be NULL */
+ for (i = 0; i < nr_cores; i++) {
+ if (ptr[i].vbase)
+ free_pages((u64)ptr->vbase, get_order(size));
+ }
+
+ kfree(ptr);
+ kfree(core_imc_refc);
+}
+
+static void thread_imc_ldbar_disable(void *dummy)
+{
+ /*
+ * By Zeroing LDBAR, we disable thread-imc
+ * updates.
+ */
+ mtspr(SPRN_LDBAR, 0);
+}
+
+void thread_imc_disable(void)
+{
+ on_each_cpu(thread_imc_ldbar_disable, NULL, 1);
+}
+
+static void cleanup_all_thread_imc_memory(void)
+{
+ int i, order = get_order(thread_imc_mem_size);
+
+ for_each_online_cpu(i) {
+ if (per_cpu(thread_imc_mem, i))
+ free_pages((u64)per_cpu(thread_imc_mem, i), order);
+
+ }
+}
+
+/*
+ * Common function to unregister cpu hotplug callback and
+ * free the memory.
+ * TODO: Need to handle pmu unregistering, which will be
+ * done in followup series.
+ */
+static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
+{
+ if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
+ mutex_lock(&nest_init_lock);
+ if (nest_pmus == 1) {
+ cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
+ kfree(nest_imc_refc);
+ }
+
+ if (nest_pmus > 0)
+ nest_pmus--;
+ mutex_unlock(&nest_init_lock);
+ }
+
+ /* Free core_imc memory */
+ if (pmu_ptr->domain == IMC_DOMAIN_CORE) {
+ cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE);
+ cleanup_all_core_imc_memory();
+ }
+
+ /* Free thread_imc memory */
+ if (pmu_ptr->domain == IMC_DOMAIN_THREAD) {
+ cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE);
+ cleanup_all_thread_imc_memory();
+ }
+
+ /* Only free the attr_groups which are dynamically allocated */
+ kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
+ kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
+ kfree(pmu_ptr);
+ return;
+}
+
+
+/*
+ * imc_mem_init : Function to support memory allocation for core imc.
+ */
+static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
+ int pmu_index)
+{
+ const char *s;
+ int nr_cores, cpu, res;
+
+ if (of_property_read_string(parent, "name", &s))
+ return -ENODEV;
+
+ switch (pmu_ptr->domain) {
+ case IMC_DOMAIN_NEST:
+ /* Update the pmu name */
+ pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s);
+ if (!pmu_ptr->pmu.name)
+ return -ENOMEM;
+
+ /* Needed for hotplug/migration */
+ per_nest_pmu_arr[pmu_index] = pmu_ptr;
+ break;
+ case IMC_DOMAIN_CORE:
+ /* Update the pmu name */
+ pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
+ if (!pmu_ptr->pmu.name)
+ return -ENOMEM;
+
+ nr_cores = num_present_cpus() / threads_per_core;
+ pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info),
+ GFP_KERNEL);
+
+ if (!pmu_ptr->mem_info)
+ return -ENOMEM;
+
+ core_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref),
+ GFP_KERNEL);
+
+ if (!core_imc_refc)
+ return -ENOMEM;
+
+ core_imc_pmu = pmu_ptr;
+ break;
+ case IMC_DOMAIN_THREAD:
+ /* Update the pmu name */
+ pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
+ if (!pmu_ptr->pmu.name)
+ return -ENOMEM;
+
+ thread_imc_mem_size = pmu_ptr->counter_mem_size;
+ for_each_online_cpu(cpu) {
+ res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size);
+ if (res)
+ return res;
+ }
+
+ thread_imc_pmu = pmu_ptr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * init_imc_pmu : Setup and register the IMC pmu device.
+ *
+ * @parent: Device tree unit node
+ * @pmu_ptr: memory allocated for this pmu
+ * @pmu_idx: Count of nest pmc registered
+ *
+ * init_imc_pmu() setup pmu cpumask and registers for a cpu hotplug callback.
+ * Handles failure cases and accordingly frees memory.
+ */
+int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_idx)
+{
+ int ret;
+
+ ret = imc_mem_init(pmu_ptr, parent, pmu_idx);
+ if (ret)
+ goto err_free;
+
+ switch (pmu_ptr->domain) {
+ case IMC_DOMAIN_NEST:
+ /*
+ * Nest imc pmu need only one cpu per chip, we initialize the
+ * cpumask for the first nest imc pmu and use the same for the
+ * rest. To handle the cpuhotplug callback unregister, we track
+ * the number of nest pmus in "nest_pmus".
+ */
+ mutex_lock(&nest_init_lock);
+ if (nest_pmus == 0) {
+ ret = init_nest_pmu_ref();
+ if (ret) {
+ mutex_unlock(&nest_init_lock);
+ goto err_free;
+ }
+ /* Register for cpu hotplug notification. */
+ ret = nest_pmu_cpumask_init();
+ if (ret) {
+ mutex_unlock(&nest_init_lock);
+ goto err_free;
+ }
+ }
+ nest_pmus++;
+ mutex_unlock(&nest_init_lock);
+ break;
+ case IMC_DOMAIN_CORE:
+ ret = core_imc_pmu_cpumask_init();
+ if (ret) {
+ cleanup_all_core_imc_memory();
+ return ret;
+ }
+
+ break;
+ case IMC_DOMAIN_THREAD:
+ ret = thread_imc_cpu_init();
+ if (ret) {
+ cleanup_all_thread_imc_memory();
+ return ret;
+ }
+
+ break;
+ default:
+ return -1; /* Unknown domain */
+ }
+
+ ret = update_events_in_group(parent, pmu_ptr);
+ if (ret)
+ goto err_free;
+
+ ret = update_pmu_ops(pmu_ptr);
+ if (ret)
+ goto err_free;
+
+ ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1);
+ if (ret)
+ goto err_free;
+
+ pr_info("%s performance monitor hardware support registered\n",
+ pmu_ptr->pmu.name);
+
+ return 0;
+
+err_free:
+ imc_common_cpuhp_mem_free(pmu_ptr);
+ return ret;
+}
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 3f3aa9a7063a..2efee3f196f5 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -99,7 +99,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
else if (!cpu_has_feature(CPU_FTR_POWER9_DD1) && p9_SDAR_MODE(event))
*mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
else
- *mmcra |= MMCRA_SDAR_MODE_TLB;
+ *mmcra |= MMCRA_SDAR_MODE_DCACHE;
} else
*mmcra |= MMCRA_SDAR_MODE_TLB;
}
@@ -488,8 +488,8 @@ static int find_alternative(u64 event, const unsigned int ev_alt[][MAX_ALT], int
return -1;
}
-int isa207_get_alternatives(u64 event, u64 alt[],
- const unsigned int ev_alt[][MAX_ALT], int size)
+int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags,
+ const unsigned int ev_alt[][MAX_ALT])
{
int i, j, num_alt = 0;
u64 alt_event;
@@ -505,5 +505,30 @@ int isa207_get_alternatives(u64 event, u64 alt[],
}
}
+ if (flags & PPMU_ONLY_COUNT_RUN) {
+ /*
+ * We're only counting in RUN state, so PM_CYC is equivalent to
+ * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
+ */
+ j = num_alt;
+ for (i = 0; i < num_alt; ++i) {
+ switch (alt[i]) {
+ case 0x1e: /* PMC_CYC */
+ alt[j++] = 0x600f4; /* PM_RUN_CYC */
+ break;
+ case 0x600f4:
+ alt[j++] = 0x1e;
+ break;
+ case 0x2: /* PM_INST_CMPL */
+ alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */
+ break;
+ case 0x500fa:
+ alt[j++] = 0x2;
+ break;
+ }
+ }
+ num_alt = j;
+ }
+
return num_alt;
}
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index 8acbe6e802c7..6c737d675792 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -247,6 +247,7 @@
#define MMCRA_SDAR_MODE_SHIFT 42
#define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT)
#define MMCRA_SDAR_MODE_NO_UPDATES ~(0x3ull << MMCRA_SDAR_MODE_SHIFT)
+#define MMCRA_SDAR_MODE_DCACHE (2ull << MMCRA_SDAR_MODE_SHIFT)
#define MMCRA_IFM_SHIFT 30
#define MMCRA_THR_CTR_MANT_SHIFT 19
#define MMCRA_THR_CTR_MANT_MASK 0x7Ful
@@ -287,8 +288,8 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], unsigned long mmcr[],
struct perf_event *pevents[]);
void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]);
-int isa207_get_alternatives(u64 event, u64 alt[],
- const unsigned int ev_alt[][MAX_ALT], int size);
+int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags,
+ const unsigned int ev_alt[][MAX_ALT]);
void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
struct pt_regs *regs);
void isa207_get_mem_weight(u64 *weight);
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 5463516e369b..c9356955cab4 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -50,34 +50,11 @@ static const unsigned int event_alternatives[][MAX_ALT] = {
static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{
- int i, j, num_alt = 0;
-
- num_alt = isa207_get_alternatives(event, alt, event_alternatives,
- (int)ARRAY_SIZE(event_alternatives));
- if (flags & PPMU_ONLY_COUNT_RUN) {
- /*
- * We're only counting in RUN state, so PM_CYC is equivalent to
- * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
- */
- j = num_alt;
- for (i = 0; i < num_alt; ++i) {
- switch (alt[i]) {
- case PM_CYC:
- alt[j++] = PM_RUN_CYC;
- break;
- case PM_RUN_CYC:
- alt[j++] = PM_CYC;
- break;
- case PM_INST_CMPL:
- alt[j++] = PM_RUN_INST_CMPL;
- break;
- case PM_RUN_INST_CMPL:
- alt[j++] = PM_INST_CMPL;
- break;
- }
- }
- num_alt = j;
- }
+ int num_alt = 0;
+
+ num_alt = isa207_get_alternatives(event, alt,
+ ARRAY_SIZE(event_alternatives), flags,
+ event_alternatives);
return num_alt;
}
diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h
index 50689180a6c1..e99c6bf4d391 100644
--- a/arch/powerpc/perf/power9-events-list.h
+++ b/arch/powerpc/perf/power9-events-list.h
@@ -16,13 +16,16 @@ EVENT(PM_CYC, 0x0001e)
EVENT(PM_ICT_NOSLOT_CYC, 0x100f8)
EVENT(PM_CMPLU_STALL, 0x1e054)
EVENT(PM_INST_CMPL, 0x00002)
-EVENT(PM_BRU_CMPL, 0x4d05e)
+EVENT(PM_BR_CMPL, 0x4d05e)
EVENT(PM_BR_MPRED_CMPL, 0x400f6)
/* All L1 D cache load references counted at finish, gated by reject */
EVENT(PM_LD_REF_L1, 0x100fc)
/* Load Missed L1 */
EVENT(PM_LD_MISS_L1_FIN, 0x2c04e)
+EVENT(PM_LD_MISS_L1, 0x3e054)
+/* Alternate event code for PM_LD_MISS_L1 */
+EVENT(PM_LD_MISS_L1_ALT, 0x400f0)
/* Store Missed L1 */
EVENT(PM_ST_MISS_L1, 0x300f0)
/* L1 cache data prefetches */
@@ -62,3 +65,7 @@ EVENT(PM_INST_DISP, 0x200f2)
EVENT(PM_INST_DISP_ALT, 0x300f2)
/* Alternate Branch event code */
EVENT(PM_BR_CMPL_ALT, 0x10012)
+/* Branch event that are not strongly biased */
+EVENT(PM_BR_2PATH, 0x20036)
+/* ALternate branch event that are not strongly biased */
+EVENT(PM_BR_2PATH_ALT, 0x40036)
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 2280cf87ff9c..24b5b5b7a206 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -109,14 +109,17 @@ static const unsigned int power9_event_alternatives[][MAX_ALT] = {
{ PM_INST_DISP, PM_INST_DISP_ALT },
{ PM_RUN_CYC_ALT, PM_RUN_CYC },
{ PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
+ { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
+ { PM_BR_2PATH, PM_BR_2PATH_ALT },
};
static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{
int num_alt = 0;
- num_alt = isa207_get_alternatives(event, alt, power9_event_alternatives,
- (int)ARRAY_SIZE(power9_event_alternatives));
+ num_alt = isa207_get_alternatives(event, alt,
+ ARRAY_SIZE(power9_event_alternatives), flags,
+ power9_event_alternatives);
return num_alt;
}
@@ -125,7 +128,7 @@ GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
-GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_CMPL);
+GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL);
GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1_FIN);
@@ -143,7 +146,7 @@ CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
-CACHE_EVENT_ATTR(branch-loads, PM_BRU_CMPL);
+CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL);
CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
@@ -152,7 +155,7 @@ static struct attribute *power9_events_attr[] = {
GENERIC_EVENT_PTR(PM_ICT_NOSLOT_CYC),
GENERIC_EVENT_PTR(PM_CMPLU_STALL),
GENERIC_EVENT_PTR(PM_INST_CMPL),
- GENERIC_EVENT_PTR(PM_BRU_CMPL),
+ GENERIC_EVENT_PTR(PM_BR_CMPL),
GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
GENERIC_EVENT_PTR(PM_LD_REF_L1),
GENERIC_EVENT_PTR(PM_LD_MISS_L1_FIN),
@@ -169,7 +172,7 @@ static struct attribute *power9_events_attr[] = {
CACHE_EVENT_PTR(PM_L2_ST_MISS),
CACHE_EVENT_PTR(PM_L2_ST),
CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
- CACHE_EVENT_PTR(PM_BRU_CMPL),
+ CACHE_EVENT_PTR(PM_BR_CMPL),
CACHE_EVENT_PTR(PM_DTLB_MISS),
CACHE_EVENT_PTR(PM_ITLB_MISS),
NULL
@@ -244,7 +247,7 @@ static int power9_generic_events[] = {
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_CMPL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
[PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
[PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
@@ -370,7 +373,7 @@ static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
[ C(BPU) ] = {
[ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = PM_BRU_CMPL,
+ [ C(RESULT_ACCESS) ] = PM_BR_CMPL,
[ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
},
[ C(OP_WRITE) ] = {
@@ -459,8 +462,8 @@ static int __init init_power9_pmu(void)
* Power9 DD1 should use PM_BR_CMPL_ALT event code for
* "branches" to provide correct counter value.
*/
- EVENT_VAR(PM_BRU_CMPL, _g).id = PM_BR_CMPL_ALT;
- EVENT_VAR(PM_BRU_CMPL, _c).id = PM_BR_CMPL_ALT;
+ EVENT_VAR(PM_BR_CMPL, _g).id = PM_BR_CMPL_ALT;
+ EVENT_VAR(PM_BR_CMPL, _c).id = PM_BR_CMPL_ALT;
rc = register_power_pmu(&power9_isa207_pmu);
} else {
rc = register_power_pmu(&power9_pmu);
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index 72b824160660..2c5651992369 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -1,6 +1,6 @@
-obj-$(CONFIG_44x) += misc_44x.o
+obj-y += misc_44x.o machine_check.o
ifneq ($(CONFIG_PPC4xx_CPM),y)
-obj-$(CONFIG_44x) += idle.o
+obj-y += idle.o
endif
obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o
obj-$(CONFIG_EBONY) += ebony.o
diff --git a/arch/powerpc/platforms/44x/machine_check.c b/arch/powerpc/platforms/44x/machine_check.c
new file mode 100644
index 000000000000..034d70d6d335
--- /dev/null
+++ b/arch/powerpc/platforms/44x/machine_check.c
@@ -0,0 +1,89 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/ptrace.h>
+
+#include <asm/reg.h>
+
+int machine_check_440A(struct pt_regs *regs)
+{
+ unsigned long reason = regs->dsisr;
+
+ printk("Machine check in kernel mode.\n");
+ if (reason & ESR_IMCP){
+ printk("Instruction Synchronous Machine Check exception\n");
+ mtspr(SPRN_ESR, reason & ~ESR_IMCP);
+ }
+ else {
+ u32 mcsr = mfspr(SPRN_MCSR);
+ if (mcsr & MCSR_IB)
+ printk("Instruction Read PLB Error\n");
+ if (mcsr & MCSR_DRB)
+ printk("Data Read PLB Error\n");
+ if (mcsr & MCSR_DWB)
+ printk("Data Write PLB Error\n");
+ if (mcsr & MCSR_TLBP)
+ printk("TLB Parity Error\n");
+ if (mcsr & MCSR_ICP){
+ flush_instruction_cache();
+ printk("I-Cache Parity Error\n");
+ }
+ if (mcsr & MCSR_DCSP)
+ printk("D-Cache Search Parity Error\n");
+ if (mcsr & MCSR_DCFP)
+ printk("D-Cache Flush Parity Error\n");
+ if (mcsr & MCSR_IMPE)
+ printk("Machine Check exception is imprecise\n");
+
+ /* Clear MCSR */
+ mtspr(SPRN_MCSR, mcsr);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PPC_47x
+int machine_check_47x(struct pt_regs *regs)
+{
+ unsigned long reason = regs->dsisr;
+ u32 mcsr;
+
+ printk(KERN_ERR "Machine check in kernel mode.\n");
+ if (reason & ESR_IMCP) {
+ printk(KERN_ERR "Instruction Synchronous Machine Check exception\n");
+ mtspr(SPRN_ESR, reason & ~ESR_IMCP);
+ return 0;
+ }
+ mcsr = mfspr(SPRN_MCSR);
+ if (mcsr & MCSR_IB)
+ printk(KERN_ERR "Instruction Read PLB Error\n");
+ if (mcsr & MCSR_DRB)
+ printk(KERN_ERR "Data Read PLB Error\n");
+ if (mcsr & MCSR_DWB)
+ printk(KERN_ERR "Data Write PLB Error\n");
+ if (mcsr & MCSR_TLBP)
+ printk(KERN_ERR "TLB Parity Error\n");
+ if (mcsr & MCSR_ICP) {
+ flush_instruction_cache();
+ printk(KERN_ERR "I-Cache Parity Error\n");
+ }
+ if (mcsr & MCSR_DCSP)
+ printk(KERN_ERR "D-Cache Search Parity Error\n");
+ if (mcsr & PPC47x_MCSR_GPR)
+ printk(KERN_ERR "GPR Parity Error\n");
+ if (mcsr & PPC47x_MCSR_FPR)
+ printk(KERN_ERR "FPR Parity Error\n");
+ if (mcsr & PPC47x_MCSR_IPR)
+ printk(KERN_ERR "Machine Check exception is imprecise\n");
+
+ /* Clear MCSR */
+ mtspr(SPRN_MCSR, mcsr);
+
+ return 0;
+}
+#endif /* CONFIG_PPC_47x */
diff --git a/arch/powerpc/platforms/4xx/Makefile b/arch/powerpc/platforms/4xx/Makefile
new file mode 100644
index 000000000000..9779c32db34e
--- /dev/null
+++ b/arch/powerpc/platforms/4xx/Makefile
@@ -0,0 +1,8 @@
+obj-y += uic.o machine_check.o
+obj-$(CONFIG_PPC4xx_OCM) += ocm.o
+obj-$(CONFIG_4xx_SOC) += soc.o
+obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_PPC4xx_HSTA_MSI) += hsta_msi.o
+obj-$(CONFIG_PPC4xx_MSI) += msi.o
+obj-$(CONFIG_PPC4xx_CPM) += cpm.o
+obj-$(CONFIG_PPC4xx_GPIO) += gpio.o
diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/platforms/4xx/cpm.c
index ba95adf81d8d..53ff81ca8a3c 100644
--- a/arch/powerpc/sysdev/ppc4xx_cpm.c
+++ b/arch/powerpc/platforms/4xx/cpm.c
@@ -240,7 +240,7 @@ static int cpm_suspend_enter(suspend_state_t state)
return 0;
}
-static struct platform_suspend_ops cpm_suspend_ops = {
+static const struct platform_suspend_ops cpm_suspend_ops = {
.valid = cpm_suspend_valid,
.enter = cpm_suspend_enter,
};
@@ -278,8 +278,8 @@ static int __init cpm_init(void)
dcr_len = dcr_resource_len(np, 0);
if (dcr_base == 0 || dcr_len == 0) {
- printk(KERN_ERR "cpm: could not parse dcr property for %s\n",
- np->full_name);
+ printk(KERN_ERR "cpm: could not parse dcr property for %pOF\n",
+ np);
ret = -EINVAL;
goto node_put;
}
@@ -287,8 +287,8 @@ static int __init cpm_init(void)
cpm.dcr_host = dcr_map(np, dcr_base, dcr_len);
if (!DCR_MAP_OK(cpm.dcr_host)) {
- printk(KERN_ERR "cpm: failed to map dcr property for %s\n",
- np->full_name);
+ printk(KERN_ERR "cpm: failed to map dcr property for %pOF\n",
+ np);
ret = -EINVAL;
goto node_put;
}
diff --git a/arch/powerpc/sysdev/ppc4xx_gpio.c b/arch/powerpc/platforms/4xx/gpio.c
index 5382d04dd872..2238e369cde4 100644
--- a/arch/powerpc/sysdev/ppc4xx_gpio.c
+++ b/arch/powerpc/platforms/4xx/gpio.c
@@ -198,8 +198,7 @@ static int __init ppc4xx_add_gpiochips(void)
goto err;
continue;
err:
- pr_err("%s: registration failed with status %d\n",
- np->full_name, ret);
+ pr_err("%pOF: registration failed with status %d\n", np, ret);
kfree(ppc4xx_gc);
/* try others anyway */
}
diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/platforms/4xx/hsta_msi.c
index 9926ad67af76..9926ad67af76 100644
--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+++ b/arch/powerpc/platforms/4xx/hsta_msi.c
diff --git a/arch/powerpc/platforms/4xx/machine_check.c b/arch/powerpc/platforms/4xx/machine_check.c
new file mode 100644
index 000000000000..aa039dfaf82f
--- /dev/null
+++ b/arch/powerpc/platforms/4xx/machine_check.c
@@ -0,0 +1,26 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/ptrace.h>
+
+#include <asm/reg.h>
+
+int machine_check_4xx(struct pt_regs *regs)
+{
+ unsigned long reason = regs->dsisr;
+
+ if (reason & ESR_IMCP) {
+ printk("Instruction");
+ mtspr(SPRN_ESR, reason & ~ESR_IMCP);
+ } else
+ printk("Data");
+ printk(" machine check in kernel mode.\n");
+
+ return 0;
+}
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/platforms/4xx/msi.c
index 590dab4f47d6..d50417e23add 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/platforms/4xx/msi.c
@@ -233,8 +233,7 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
/* Get MSI ranges */
err = of_address_to_resource(dev->dev.of_node, 0, &res);
if (err) {
- dev_err(&dev->dev, "%s resource error!\n",
- dev->dev.of_node->full_name);
+ dev_err(&dev->dev, "%pOF resource error!\n", dev->dev.of_node);
goto error_out;
}
diff --git a/arch/powerpc/sysdev/ppc4xx_ocm.c b/arch/powerpc/platforms/4xx/ocm.c
index 85d9e37f5ccb..85d9e37f5ccb 100644
--- a/arch/powerpc/sysdev/ppc4xx_ocm.c
+++ b/arch/powerpc/platforms/4xx/ocm.c
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/platforms/4xx/pci.c
index 086aca69ecae..73e6b36bcd51 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/platforms/4xx/pci.c
@@ -32,7 +32,7 @@
#include <asm/dcr-regs.h>
#include <mm/mmu_decl.h>
-#include "ppc4xx_pci.h"
+#include "pci.h"
static int dma_offset_set;
@@ -127,9 +127,9 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
* within 32 bits space
*/
if (cpu_addr != 0 || pci_addr > 0xffffffff) {
- printk(KERN_WARNING "%s: Ignored unsupported dma range"
+ printk(KERN_WARNING "%pOF: Ignored unsupported dma range"
" 0x%016llx...0x%016llx -> 0x%016llx\n",
- hose->dn->full_name,
+ hose->dn,
pci_addr, pci_addr + size - 1, cpu_addr);
continue;
}
@@ -152,8 +152,7 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
/* We only support one global DMA offset */
if (dma_offset_set && pci_dram_offset != res->start) {
- printk(KERN_ERR "%s: dma-ranges(s) mismatch\n",
- hose->dn->full_name);
+ printk(KERN_ERR "%pOF: dma-ranges(s) mismatch\n", hose->dn);
return -ENXIO;
}
@@ -161,17 +160,16 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
* DMA bounce buffers
*/
if (size < total_memory) {
- printk(KERN_ERR "%s: dma-ranges too small "
+ printk(KERN_ERR "%pOF: dma-ranges too small "
"(size=%llx total_memory=%llx)\n",
- hose->dn->full_name, size, (u64)total_memory);
+ hose->dn, size, (u64)total_memory);
return -ENXIO;
}
/* Check we are a power of 2 size and that base is a multiple of size*/
if ((size & (size - 1)) != 0 ||
(res->start & (size - 1)) != 0) {
- printk(KERN_ERR "%s: dma-ranges unaligned\n",
- hose->dn->full_name);
+ printk(KERN_ERR "%pOF: dma-ranges unaligned\n", hose->dn);
return -ENXIO;
}
@@ -181,8 +179,8 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
if (res->end > 0xffffffff &&
!(of_device_is_compatible(hose->dn, "ibm,plb-pciex-460sx")
|| of_device_is_compatible(hose->dn, "ibm,plb-pciex-476fpe"))) {
- printk(KERN_ERR "%s: dma-ranges outside of 32 bits space\n",
- hose->dn->full_name);
+ printk(KERN_ERR "%pOF: dma-ranges outside of 32 bits space\n",
+ hose->dn);
return -ENXIO;
}
out:
@@ -233,8 +231,7 @@ static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose,
*/
if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
size < 0x1000 || (plb_addr & (size - 1)) != 0) {
- printk(KERN_WARNING "%s: Resource out of range\n",
- hose->dn->full_name);
+ printk(KERN_WARNING "%pOF: Resource out of range\n", hose->dn);
return -1;
}
ma = (0xffffffffu << ilog2(size)) | 1;
@@ -266,8 +263,7 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
if (!(res->flags & IORESOURCE_MEM))
continue;
if (j > 2) {
- printk(KERN_WARNING "%s: Too many ranges\n",
- hose->dn->full_name);
+ printk(KERN_WARNING "%pOF: Too many ranges\n", hose->dn);
break;
}
@@ -292,8 +288,8 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
if (j <= 2 && !found_isa_hole && hose->isa_mem_size)
if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0,
hose->isa_mem_size, 0, j) == 0)
- printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
- hose->dn->full_name);
+ printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n",
+ hose->dn);
}
static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
@@ -333,21 +329,20 @@ static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
/* Check if device is enabled */
if (!of_device_is_available(np)) {
- printk(KERN_INFO "%s: Port disabled via device-tree\n",
- np->full_name);
+ printk(KERN_INFO "%pOF: Port disabled via device-tree\n", np);
return;
}
/* Fetch config space registers address */
if (of_address_to_resource(np, 0, &rsrc_cfg)) {
- printk(KERN_ERR "%s: Can't get PCI config register base !",
- np->full_name);
+ printk(KERN_ERR "%pOF: Can't get PCI config register base !",
+ np);
return;
}
/* Fetch host bridge internal registers address */
if (of_address_to_resource(np, 3, &rsrc_reg)) {
- printk(KERN_ERR "%s: Can't get PCI internal register base !",
- np->full_name);
+ printk(KERN_ERR "%pOF: Can't get PCI internal register base !",
+ np);
return;
}
@@ -361,7 +356,7 @@ static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
/* Map registers */
reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
if (reg == NULL) {
- printk(KERN_ERR "%s: Can't map registers !", np->full_name);
+ printk(KERN_ERR "%pOF: Can't map registers !", np);
goto fail;
}
@@ -423,8 +418,8 @@ static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose,
if (!is_power_of_2(size) || size < 0x1000 ||
(plb_addr & (size - 1)) != 0) {
- printk(KERN_WARNING "%s: Resource out of range\n",
- hose->dn->full_name);
+ printk(KERN_WARNING "%pOF: Resource out of range\n",
+ hose->dn);
return -1;
}
@@ -467,8 +462,7 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
if (!(res->flags & IORESOURCE_MEM))
continue;
if (j > 1) {
- printk(KERN_WARNING "%s: Too many ranges\n",
- hose->dn->full_name);
+ printk(KERN_WARNING "%pOF: Too many ranges\n", hose->dn);
break;
}
@@ -493,8 +487,8 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0,
hose->isa_mem_size, 0, j) == 0)
- printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
- hose->dn->full_name);
+ printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n",
+ hose->dn);
}
static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
@@ -539,14 +533,14 @@ static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
/* Fetch config space registers address */
if (of_address_to_resource(np, 0, &rsrc_cfg)) {
- printk(KERN_ERR "%s:Can't get PCI-X config register base !",
- np->full_name);
+ printk(KERN_ERR "%pOF: Can't get PCI-X config register base !",
+ np);
return;
}
/* Fetch host bridge internal registers address */
if (of_address_to_resource(np, 3, &rsrc_reg)) {
- printk(KERN_ERR "%s: Can't get PCI-X internal register base !",
- np->full_name);
+ printk(KERN_ERR "%pOF: Can't get PCI-X internal register base !",
+ np);
return;
}
@@ -568,7 +562,7 @@ static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
/* Map registers */
reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
if (reg == NULL) {
- printk(KERN_ERR "%s: Can't map registers !", np->full_name);
+ printk(KERN_ERR "%pOF: Can't map registers !", np);
goto fail;
}
@@ -1246,8 +1240,8 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
if (mbase == NULL) {
- printk(KERN_ERR "%s: Can't map internal config space !",
- port->node->full_name);
+ printk(KERN_ERR "%pOF: Can't map internal config space !",
+ port->node);
goto done;
}
@@ -1389,7 +1383,7 @@ static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port)
port->index);
return;
}
-
+
while (timeout_ms--) {
val = in_le32(mbase + PECFG_TLDLP);
@@ -1448,8 +1442,7 @@ static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
ppc4xx_pciex_hwops = &ppc_476fpe_pcie_hwops;
#endif
if (ppc4xx_pciex_hwops == NULL) {
- printk(KERN_WARNING "PCIE: unknown host type %s\n",
- np->full_name);
+ printk(KERN_WARNING "PCIE: unknown host type %pOF\n", np);
return -ENODEV;
}
@@ -1730,8 +1723,7 @@ static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port,
(index < 2 && size < 0x100000) ||
(index == 2 && size < 0x100) ||
(plb_addr & (size - 1)) != 0) {
- printk(KERN_WARNING "%s: Resource out of range\n",
- hose->dn->full_name);
+ printk(KERN_WARNING "%pOF: Resource out of range\n", hose->dn);
return -1;
}
@@ -1807,8 +1799,8 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
if (!(res->flags & IORESOURCE_MEM))
continue;
if (j > 1) {
- printk(KERN_WARNING "%s: Too many ranges\n",
- port->node->full_name);
+ printk(KERN_WARNING "%pOF: Too many ranges\n",
+ port->node);
break;
}
@@ -1834,8 +1826,8 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
hose->isa_mem_phys, 0,
hose->isa_mem_size, 0, j) == 0)
- printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
- hose->dn->full_name);
+ printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n",
+ hose->dn);
/* Configure IO, always 64K starting at 0. We hard wire it to 64K !
* Note also that it -has- to be region index 2 on this HW
@@ -1970,8 +1962,8 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
(hose->first_busno + 1) * 0x100000,
busses * 0x100000);
if (cfg_data == NULL) {
- printk(KERN_ERR "%s: Can't map external config space !",
- port->node->full_name);
+ printk(KERN_ERR "%pOF: Can't map external config space !",
+ port->node);
goto fail;
}
hose->cfg_data = cfg_data;
@@ -1982,13 +1974,13 @@ static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
*/
mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
if (mbase == NULL) {
- printk(KERN_ERR "%s: Can't map internal config space !",
- port->node->full_name);
+ printk(KERN_ERR "%pOF: Can't map internal config space !",
+ port->node);
goto fail;
}
hose->cfg_addr = mbase;
- pr_debug("PCIE %s, bus %d..%d\n", port->node->full_name,
+ pr_debug("PCIE %pOF, bus %d..%d\n", port->node,
hose->first_busno, hose->last_busno);
pr_debug(" config space mapped at: root @0x%p, other @0x%p\n",
hose->cfg_addr, hose->cfg_data);
@@ -2100,14 +2092,13 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
/* Get the port number from the device-tree */
pval = of_get_property(np, "port", NULL);
if (pval == NULL) {
- printk(KERN_ERR "PCIE: Can't find port number for %s\n",
- np->full_name);
+ printk(KERN_ERR "PCIE: Can't find port number for %pOF\n", np);
return;
}
portno = *pval;
if (portno >= ppc4xx_pciex_port_count) {
- printk(KERN_ERR "PCIE: port number out of range for %s\n",
- np->full_name);
+ printk(KERN_ERR "PCIE: port number out of range for %pOF\n",
+ np);
return;
}
port = &ppc4xx_pciex_ports[portno];
@@ -2125,8 +2116,8 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
if (ppc4xx_pciex_hwops->want_sdr) {
pval = of_get_property(np, "sdr-base", NULL);
if (pval == NULL) {
- printk(KERN_ERR "PCIE: missing sdr-base for %s\n",
- np->full_name);
+ printk(KERN_ERR "PCIE: missing sdr-base for %pOF\n",
+ np);
return;
}
port->sdr_base = *pval;
@@ -2142,29 +2133,26 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
} else if (!strcmp(val, "pci")) {
port->endpoint = 0;
} else {
- printk(KERN_ERR "PCIE: missing or incorrect device_type for %s\n",
- np->full_name);
+ printk(KERN_ERR "PCIE: missing or incorrect device_type for %pOF\n",
+ np);
return;
}
/* Fetch config space registers address */
if (of_address_to_resource(np, 0, &port->cfg_space)) {
- printk(KERN_ERR "%s: Can't get PCI-E config space !",
- np->full_name);
+ printk(KERN_ERR "%pOF: Can't get PCI-E config space !", np);
return;
}
/* Fetch host bridge internal registers address */
if (of_address_to_resource(np, 1, &port->utl_regs)) {
- printk(KERN_ERR "%s: Can't get UTL register base !",
- np->full_name);
+ printk(KERN_ERR "%pOF: Can't get UTL register base !", np);
return;
}
/* Map DCRs */
dcrs = dcr_resource_start(np, 0);
if (dcrs == 0) {
- printk(KERN_ERR "%s: Can't get DCR register base !",
- np->full_name);
+ printk(KERN_ERR "%pOF: Can't get DCR register base !", np);
return;
}
port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.h b/arch/powerpc/platforms/4xx/pci.h
index bb4821938ab1..bb4821938ab1 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.h
+++ b/arch/powerpc/platforms/4xx/pci.h
diff --git a/arch/powerpc/sysdev/ppc4xx_soc.c b/arch/powerpc/platforms/4xx/soc.c
index d41134d2f786..5e36508b2a70 100644
--- a/arch/powerpc/sysdev/ppc4xx_soc.c
+++ b/arch/powerpc/platforms/4xx/soc.c
@@ -90,7 +90,7 @@ static int __init ppc4xx_l2c_probe(void)
/* Get l2 cache size */
prop = of_get_property(np, "cache-size", NULL);
if (prop == NULL) {
- printk(KERN_ERR "%s: Can't get cache-size!\n", np->full_name);
+ printk(KERN_ERR "%pOF: Can't get cache-size!\n", np);
of_node_put(np);
return -ENODEV;
}
@@ -99,8 +99,7 @@ static int __init ppc4xx_l2c_probe(void)
/* Map DCRs */
dcrreg = of_get_property(np, "dcr-reg", &len);
if (!dcrreg || (len != 4 * sizeof(u32))) {
- printk(KERN_ERR "%s: Can't get DCR register base !",
- np->full_name);
+ printk(KERN_ERR "%pOF: Can't get DCR register base !", np);
of_node_put(np);
return -ENODEV;
}
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/platforms/4xx/uic.c
index a00949f3e378..8b4dd0da0839 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/platforms/4xx/uic.c
@@ -243,16 +243,16 @@ static struct uic * __init uic_init_one(struct device_node *node)
raw_spin_lock_init(&uic->lock);
indexp = of_get_property(node, "cell-index", &len);
if (!indexp || (len != sizeof(u32))) {
- printk(KERN_ERR "uic: Device node %s has missing or invalid "
- "cell-index property\n", node->full_name);
+ printk(KERN_ERR "uic: Device node %pOF has missing or invalid "
+ "cell-index property\n", node);
return NULL;
}
uic->index = *indexp;
dcrreg = of_get_property(node, "dcr-reg", &len);
if (!dcrreg || (len != 2*sizeof(u32))) {
- printk(KERN_ERR "uic: Device node %s has missing or invalid "
- "dcr-reg property\n", node->full_name);
+ printk(KERN_ERR "uic: Device node %pOF has missing or invalid "
+ "dcr-reg property\n", node);
return NULL;
}
uic->dcrbase = *dcrreg;
@@ -292,7 +292,7 @@ void __init uic_init_tree(void)
* top-level interrupt controller */
primary_uic = uic_init_one(np);
if (!primary_uic)
- panic("Unable to initialize primary UIC %s\n", np->full_name);
+ panic("Unable to initialize primary UIC %pOF\n", np);
irq_set_default_host(primary_uic->irqhost);
of_node_put(np);
@@ -306,8 +306,8 @@ void __init uic_init_tree(void)
uic = uic_init_one(np);
if (! uic)
- panic("Unable to initialize a secondary UIC %s\n",
- np->full_name);
+ panic("Unable to initialize a secondary UIC %pOF\n",
+ np);
cascade_virq = irq_of_parse_and_map(np, 0);
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index add5a5374fa0..b3097fe6441b 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -363,7 +363,7 @@ static int get_cpmf_mult_x2(void)
*/
/* applies to the IPS_DIV, and PCI_DIV values */
-static struct clk_div_table divtab_2346[] = {
+static const struct clk_div_table divtab_2346[] = {
{ .val = 2, .div = 2, },
{ .val = 3, .div = 3, },
{ .val = 4, .div = 4, },
@@ -372,7 +372,7 @@ static struct clk_div_table divtab_2346[] = {
};
/* applies to the MBX_DIV, LPC_DIV, and NFC_DIV values */
-static struct clk_div_table divtab_1234[] = {
+static const struct clk_div_table divtab_1234[] = {
{ .val = 1, .div = 1, },
{ .val = 2, .div = 2, },
{ .val = 3, .div = 3, },
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index 6b4f4cb7009a..f99e79ee060e 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -387,8 +387,8 @@ static unsigned int __init get_fifo_size(struct device_node *np,
if (fp)
return *fp;
- pr_warning("no %s property in %s node, defaulting to %d\n",
- prop_name, np->full_name, DEFAULT_FIFO_SIZE);
+ pr_warning("no %s property in %pOF node, defaulting to %d\n",
+ prop_name, np, DEFAULT_FIFO_SIZE);
return DEFAULT_FIFO_SIZE;
}
@@ -426,15 +426,15 @@ static void __init mpc512x_psc_fifo_init(void)
psc = of_iomap(np, 0);
if (!psc) {
- pr_err("%s: Can't map %s device\n",
- __func__, np->full_name);
+ pr_err("%s: Can't map %pOF device\n",
+ __func__, np);
continue;
}
/* FIFO space is 4KiB, check if requested size is available */
if ((fifobase + tx_fifo_size + rx_fifo_size) > 0x1000) {
- pr_err("%s: no fifo space available for %s\n",
- __func__, np->full_name);
+ pr_err("%s: no fifo space available for %pOF\n",
+ __func__, np);
iounmap(psc);
/*
* chances are that another device requests less
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 39b49822ace1..1ecbf176d35a 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -99,7 +99,7 @@ static void __init efika_pcisetup(void)
bus_range = of_get_property(pcictrl, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
- ": Can't get bus-range for %s\n", pcictrl->full_name);
+ ": Can't get bus-range for %pOF\n", pcictrl);
goto out_put;
}
@@ -109,14 +109,14 @@ static void __init efika_pcisetup(void)
else
printk(KERN_INFO EFIKA_PLATFORM_NAME ": PCI buses %d..%d",
bus_range[0], bus_range[1]);
- printk(" controlled by %s\n", pcictrl->full_name);
+ printk(" controlled by %pOF\n", pcictrl);
printk("\n");
hose = pcibios_alloc_controller(pcictrl);
if (!hose) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
- ": Can't allocate PCI controller structure for %s\n",
- pcictrl->full_name);
+ ": Can't allocate PCI controller structure for %pOF\n",
+ pcictrl);
goto out_put;
}
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index a3227040cc86..1fcab233d2f2 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -156,7 +156,7 @@ static void __init media5200_init_irq(void)
fpga_np = of_find_compatible_node(NULL, NULL, "fsl,media5200-fpga");
if (!fpga_np)
goto out;
- pr_debug("%s: found fpga node: %s\n", __func__, fpga_np->full_name);
+ pr_debug("%s: found fpga node: %pOF\n", __func__, fpga_np);
media5200_irq.regs = of_iomap(fpga_np, 0);
if (!media5200_irq.regs)
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 22645a7c6b8a..9e974b1e1697 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -226,7 +226,7 @@ static int mpc52xx_gpt_irq_xlate(struct irq_domain *h, struct device_node *ct,
dev_dbg(gpt->dev, "%s: flags=%i\n", __func__, intspec[0]);
if ((intsize < 1) || (intspec[0] > 3)) {
- dev_err(gpt->dev, "bad irq specifier in %s\n", ct->full_name);
+ dev_err(gpt->dev, "bad irq specifier in %pOF\n", ct);
return -EINVAL;
}
@@ -331,7 +331,7 @@ mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
if (!of_find_property(node, "gpio-controller", NULL))
return;
- gpt->gc.label = kstrdup(node->full_name, GFP_KERNEL);
+ gpt->gc.label = kasprintf(GFP_KERNEL, "%pOF", node);
if (!gpt->gc.label) {
dev_err(gpt->dev, "out of memory\n");
return;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
index 00282c2b0cae..af0f79995214 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
@@ -369,19 +369,19 @@ mpc52xx_add_bridge(struct device_node *node)
const int *bus_range;
struct resource rsrc;
- pr_debug("Adding MPC52xx PCI host bridge %s\n", node->full_name);
+ pr_debug("Adding MPC52xx PCI host bridge %pOF\n", node);
pci_add_flags(PCI_REASSIGN_ALL_BUS);
if (of_address_to_resource(node, 0, &rsrc) != 0) {
- printk(KERN_ERR "Can't get %s resources\n", node->full_name);
+ printk(KERN_ERR "Can't get %pOF resources\n", node);
return -EINVAL;
}
bus_range = of_get_property(node, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
- printk(KERN_WARNING "Can't get %s bus-range, assume bus 0\n",
- node->full_name);
+ printk(KERN_WARNING "Can't get %pOF bus-range, assume bus 0\n",
+ node);
bus_range = NULL;
}
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index 63c5ab6489c9..96bb55ca61d3 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -128,7 +128,7 @@ static int mcu_gpiochip_add(struct mcu *mcu)
return -ENODEV;
gc->owner = THIS_MODULE;
- gc->label = np->full_name;
+ gc->label = kasprintf(GFP_KERNEL, "%pOF", np);
gc->can_sleep = 1;
gc->ngpio = MCU_NUM_GPIO;
gc->base = -1;
@@ -141,6 +141,7 @@ static int mcu_gpiochip_add(struct mcu *mcu)
static int mcu_gpiochip_remove(struct mcu *mcu)
{
+ kfree(mcu->gc.label);
gpiochip_remove(&mcu->gc);
return 0;
}
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index d7c9b186954d..a4539c5accb0 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -89,7 +89,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk,
goto err;
ret = of_irq_to_resource(np, 0, &res[1]);
- if (!ret)
+ if (ret <= 0)
goto err;
pdev = platform_device_alloc("mpc83xx_spi", i);
@@ -113,7 +113,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk,
unreg:
platform_device_del(pdev);
err:
- pr_err("%s: registration failed\n", np->full_name);
+ pr_err("%pOF: registration failed\n", np);
next:
i++;
}
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 978b85bb3233..7fa3e197871a 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -361,7 +361,7 @@ static int pmc_probe(struct platform_device *ofdev)
return -EBUSY;
}
- pmc_regs = ioremap(res.start, sizeof(struct mpc83xx_pmc));
+ pmc_regs = ioremap(res.start, sizeof(*pmc_regs));
if (!pmc_regs) {
ret = -ENOMEM;
@@ -374,7 +374,7 @@ static int pmc_probe(struct platform_device *ofdev)
goto out_pmc;
}
- clock_regs = ioremap(res.start, sizeof(struct mpc83xx_pmc));
+ clock_regs = ioremap(res.start, sizeof(*clock_regs));
if (!clock_regs) {
ret = -ENOMEM;
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 0908abd7e36f..9fb57f78cdbe 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -508,8 +508,8 @@ static void __init p1022_ds_setup_arch(void)
* allocate one static local variable for each
* call to this function.
*/
- pr_info("p1022ds: disabling %s node",
- np2->full_name);
+ pr_info("p1022ds: disabling %pOF node",
+ np2);
of_update_property(np2, &nor_status);
of_node_put(np2);
}
@@ -524,8 +524,8 @@ static void __init p1022_ds_setup_arch(void)
.length = sizeof("disabled"),
};
- pr_info("p1022ds: disabling %s node",
- np2->full_name);
+ pr_info("p1022ds: disabling %pOF node",
+ np2);
of_update_property(np2, &nand_status);
of_node_put(np2);
}
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index cd6ce845f398..77e618dce4a8 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -100,8 +100,8 @@ static void xes_mpc85xx_fixups(void)
err = of_address_to_resource(np, 0, &r[0]);
if (err) {
printk(KERN_WARNING "xes_mpc85xx: Could not get "
- "resource for device tree node '%s'",
- np->full_name);
+ "resource for device tree node '%pOF'",
+ np);
continue;
}
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index 80cbcb0ad9b1..536b0c5d5ce3 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -5,7 +5,6 @@ config CPM1
choice
prompt "8xx Machine Type"
depends on PPC_8xx
- depends on 8xx
default MPC885ADS
config MPC8XXFADS
@@ -92,7 +91,7 @@ endmenu
#
menu "MPC8xx CPM Options"
- depends on 8xx
+ depends on PPC_8xx
# This doesn't really belong here, but it is convenient to ask
# 8xx specific questions.
diff --git a/arch/powerpc/platforms/8xx/Makefile b/arch/powerpc/platforms/8xx/Makefile
index 76a81c3350a8..f9af3218bd9c 100644
--- a/arch/powerpc/platforms/8xx/Makefile
+++ b/arch/powerpc/platforms/8xx/Makefile
@@ -1,7 +1,7 @@
#
# Makefile for the PowerPC 8xx linux kernel.
#
-obj-$(CONFIG_PPC_8xx) += m8xx_setup.o
+obj-y += m8xx_setup.o machine_check.o pic.o
obj-$(CONFIG_MPC885ADS) += mpc885ads_setup.o
obj-$(CONFIG_MPC86XADS) += mpc86xads_setup.o
obj-$(CONFIG_PPC_EP88XC) += ep88xc.o
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index f81069f79a94..1917d69f84df 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -23,7 +23,7 @@
#include <asm/fs_pd.h>
#include <mm/mmu_decl.h>
-#include <sysdev/mpc8xx_pic.h>
+#include "pic.h"
#include "mpc8xx.h"
diff --git a/arch/powerpc/platforms/8xx/machine_check.c b/arch/powerpc/platforms/8xx/machine_check.c
new file mode 100644
index 000000000000..402016705a39
--- /dev/null
+++ b/arch/powerpc/platforms/8xx/machine_check.c
@@ -0,0 +1,37 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/ptrace.h>
+
+#include <asm/reg.h>
+
+int machine_check_8xx(struct pt_regs *regs)
+{
+ unsigned long reason = regs->msr;
+
+ pr_err("Machine check in kernel mode.\n");
+ pr_err("Caused by (from SRR1=%lx): ", reason);
+ if (reason & 0x40000000)
+ pr_err("Fetch error at address %lx\n", regs->nip);
+ else
+ pr_err("Data access error at address %lx\n", regs->dar);
+
+#ifdef CONFIG_PCI
+ /* the qspan pci read routines can cause machine checks -- Cort
+ *
+ * yuck !!! that totally needs to go away ! There are better ways
+ * to deal with that than having a wart in the mcheck handler.
+ * -- BenH
+ */
+ bad_page_fault(regs, regs->dar, SIGBUS);
+ return 1;
+#else
+ return 0;
+#endif
+}
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/platforms/8xx/pic.c
index 2842f9d63d21..8d5a25d43ef3 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/platforms/8xx/pic.c
@@ -9,7 +9,7 @@
#include <asm/io.h>
#include <asm/8xx_immap.h>
-#include "mpc8xx_pic.h"
+#include "pic.h"
#define PIC_VEC_SPURRIOUS 15
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.h b/arch/powerpc/platforms/8xx/pic.h
index 9fe00eebdc8b..9fe00eebdc8b 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.h
+++ b/arch/powerpc/platforms/8xx/pic.h
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 2f629e0551e9..13663efc1d31 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -32,7 +32,6 @@ config PPC_85xx
config PPC_8xx
bool "Freescale 8xx"
select FSL_SOC
- select 8xx
select PPC_LIB_RHEAP
select SYS_SUPPORTS_HUGETLBFS
@@ -149,10 +148,6 @@ config 6xx
depends on PPC32 && PPC_BOOK3S
select PPC_HAVE_PMU_SUPPORT
-# this is temp to handle compat with arch=ppc
-config 8xx
- bool
-
config E500
select FSL_EMB_PERFMON
select PPC_FSL_BOOK3E
@@ -271,44 +266,6 @@ config VSX
If in doubt, say Y here.
-config PPC_ICSWX
- bool "Support for PowerPC icswx coprocessor instruction"
- depends on PPC_BOOK3S_64
- default n
- ---help---
-
- This option enables kernel support for the PowerPC Initiate
- Coprocessor Store Word (icswx) coprocessor instruction on POWER7
- and POWER8 processors. POWER9 uses new copy/paste instructions
- to invoke the coprocessor.
-
- This option is only useful if you have a processor that supports
- the icswx coprocessor instruction. It does not have any effect
- on processors without the icswx coprocessor instruction.
-
- This option slightly increases kernel memory usage.
-
- If in doubt, say N here.
-
-config PPC_ICSWX_PID
- bool "icswx requires direct PID management"
- depends on PPC_ICSWX
- default y
- ---help---
- The PID register in server is used explicitly for ICSWX. In
- embedded systems PID management is done by the system.
-
-config PPC_ICSWX_USE_SIGILL
- bool "Should a bad CT cause a SIGILL?"
- depends on PPC_ICSWX
- default n
- ---help---
- Should a bad CT used for "non-record form ICSWX" cause an
- illegal instruction signal or should it be silent as
- architected.
-
- If in doubt, say N here.
-
config SPE_POSSIBLE
def_bool y
depends on E200 || (E500 && !PPC_E500MC)
@@ -413,7 +370,7 @@ config NR_CPUS
config NOT_COHERENT_CACHE
bool
- depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON
+ depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON
default n if PPC_47x
default y
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 469ef170d218..d7a55ecfaee5 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_FSL_ULI1575) += fsl_uli1575.o
obj-$(CONFIG_PPC_PMAC) += powermac/
obj-$(CONFIG_PPC_CHRP) += chrp/
+obj-$(CONFIG_4xx) += 4xx/
obj-$(CONFIG_40x) += 40x/
obj-$(CONFIG_44x) += 44x/
obj-$(CONFIG_PPC_MPC512x) += 512x/
diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c
index 45cb9821173c..b9d466cc2b8a 100644
--- a/arch/powerpc/platforms/amigaone/setup.c
+++ b/arch/powerpc/platforms/amigaone/setup.c
@@ -40,7 +40,7 @@ static int __init amigaone_add_bridge(struct device_node *dev)
const int *bus_range;
struct pci_controller *hose;
- printk(KERN_INFO "Adding PCI host bridge %s\n", dev->full_name);
+ printk(KERN_INFO "Adding PCI host bridge %pOF\n", dev);
cfg_addr = of_get_address(dev, 0, NULL, NULL);
cfg_data = of_get_address(dev, 1, NULL, NULL);
@@ -49,8 +49,8 @@ static int __init amigaone_add_bridge(struct device_node *dev)
bus_range = of_get_property(dev, "bus-range", &len);
if ((bus_range == NULL) || (len < 2 * sizeof(int)))
- printk(KERN_WARNING "Can't get bus-range for %s, assume"
- " bus 0\n", dev->full_name);
+ printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
+ " bus 0\n", dev);
hose = pcibios_alloc_controller(dev);
if (hose == NULL)
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 8d3ae2cc52bf..6ea3f248b155 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -187,8 +187,8 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev)
irq_domain = irq_find_host(dn);
if (!irq_domain) {
- dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n",
- dn->full_name);
+ dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %pOF\n",
+ dn);
goto out_error;
}
@@ -326,8 +326,8 @@ static void axon_msi_shutdown(struct platform_device *device)
struct axon_msic *msic = dev_get_drvdata(&device->dev);
u32 tmp;
- pr_devel("axon_msi: disabling %s\n",
- irq_domain_get_of_node(msic->irq_domain)->full_name);
+ pr_devel("axon_msi: disabling %pOF\n",
+ irq_domain_get_of_node(msic->irq_domain));
tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
@@ -340,12 +340,12 @@ static int axon_msi_probe(struct platform_device *device)
unsigned int virq;
int dcr_base, dcr_len;
- pr_devel("axon_msi: setting up dn %s\n", dn->full_name);
+ pr_devel("axon_msi: setting up dn %pOF\n", dn);
msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL);
if (!msic) {
- printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n",
- dn->full_name);
+ printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n",
+ dn);
goto out;
}
@@ -354,30 +354,30 @@ static int axon_msi_probe(struct platform_device *device)
if (dcr_base == 0 || dcr_len == 0) {
printk(KERN_ERR
- "axon_msi: couldn't parse dcr properties on %s\n",
- dn->full_name);
+ "axon_msi: couldn't parse dcr properties on %pOF\n",
+ dn);
goto out_free_msic;
}
msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
if (!DCR_MAP_OK(msic->dcr_host)) {
- printk(KERN_ERR "axon_msi: dcr_map failed for %s\n",
- dn->full_name);
+ printk(KERN_ERR "axon_msi: dcr_map failed for %pOF\n",
+ dn);
goto out_free_msic;
}
msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
&msic->fifo_phys, GFP_KERNEL);
if (!msic->fifo_virt) {
- printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n",
- dn->full_name);
+ printk(KERN_ERR "axon_msi: couldn't allocate fifo for %pOF\n",
+ dn);
goto out_free_msic;
}
virq = irq_of_parse_and_map(dn, 0);
if (!virq) {
- printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n",
- dn->full_name);
+ printk(KERN_ERR "axon_msi: irq parse and map failed for %pOF\n",
+ dn);
goto out_free_fifo;
}
memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
@@ -385,8 +385,8 @@ static int axon_msi_probe(struct platform_device *device)
/* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
if (!msic->irq_domain) {
- printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
- dn->full_name);
+ printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %pOF\n",
+ dn);
goto out_free_fifo;
}
@@ -412,7 +412,7 @@ static int axon_msi_probe(struct platform_device *device)
axon_msi_debug_setup(dn, msic);
- printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);
+ printk(KERN_DEBUG "axon_msi: setup MSIC on %pOF\n", dn);
return 0;
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 871d38479a25..6fc85e29dc08 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -303,8 +303,8 @@ static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
iic->node = of_node_get(node);
out_be64(&iic->regs->prio, 0);
- printk(KERN_INFO "IIC for CPU %d target id 0x%x : %s\n",
- hw_cpu, iic->target_id, node->full_name);
+ printk(KERN_INFO "IIC for CPU %d target id 0x%x : %pOF\n",
+ hw_cpu, iic->target_id, node);
}
static int __init setup_iic(void)
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 29d4f96ed33e..4b91ad08eefd 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -278,8 +278,8 @@ static int cell_iommu_find_ioc(int nid, unsigned long *base)
if (of_node_to_nid(np) != nid)
continue;
if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "iommu: can't get address for %s\n",
- np->full_name);
+ printk(KERN_ERR "iommu: can't get address for %pOF\n",
+ np);
continue;
}
*base = r.start;
@@ -458,8 +458,8 @@ static inline u32 cell_iommu_get_ioid(struct device_node *np)
ioid = of_get_property(np, "ioid", NULL);
if (ioid == NULL) {
- printk(KERN_WARNING "iommu: missing ioid for %s using 0\n",
- np->full_name);
+ printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n",
+ np);
return 0;
}
@@ -559,8 +559,8 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
*/
iommu = cell_iommu_for_node(dev_to_node(dev));
if (iommu == NULL || list_empty(&iommu->windows)) {
- dev_err(dev, "iommu: missing iommu for %s (node %d)\n",
- of_node_full_name(dev->of_node), dev_to_node(dev));
+ dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n",
+ dev->of_node, dev_to_node(dev));
return NULL;
}
window = list_entry(iommu->windows.next, struct iommu_window, list);
@@ -720,12 +720,12 @@ static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np)
/* Get node ID */
nid = of_node_to_nid(np);
if (nid < 0) {
- printk(KERN_ERR "iommu: failed to get node for %s\n",
- np->full_name);
+ printk(KERN_ERR "iommu: failed to get node for %pOF\n",
+ np);
return NULL;
}
- pr_debug("iommu: setting up iommu for node %d (%s)\n",
- nid, np->full_name);
+ pr_debug("iommu: setting up iommu for node %d (%pOF)\n",
+ nid, np);
/* XXX todo: If we can have multiple windows on the same IOMMU, which
* isn't the case today, we probably want here to check whether the
@@ -736,8 +736,8 @@ static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np)
*/
if (cbe_nr_iommus >= NR_IOMMUS) {
- printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n",
- np->full_name);
+ printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n",
+ np);
return NULL;
}
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 460ab392f0e7..2f704afe9af3 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -196,8 +196,8 @@ static int __init cbe_ptcal_enable(void)
for_each_node_by_type(np, "cpu") {
const u32 *nid = of_get_property(np, "node-id", NULL);
if (!nid) {
- printk(KERN_ERR "%s: node %s is missing node-id?\n",
- __func__, np->full_name);
+ printk(KERN_ERR "%s: node %pOF is missing node-id?\n",
+ __func__, np);
continue;
}
cbe_ptcal_enable_on_node(*nid, order);
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c
index f1f7878893f3..d1e61e273e64 100644
--- a/arch/powerpc/platforms/cell/spider-pci.c
+++ b/arch/powerpc/platforms/cell/spider-pci.c
@@ -130,8 +130,8 @@ int __init spiderpci_iowa_init(struct iowa_bus *bus, void *data)
struct resource r;
unsigned long offset = (unsigned long)data;
- pr_debug("SPIDERPCI-IOWA:Bus initialize for spider(%s)\n",
- np->full_name);
+ pr_debug("SPIDERPCI-IOWA:Bus initialize for spider(%pOF)\n",
+ np);
priv = kzalloc(sizeof(struct spiderpci_iowa_private), GFP_KERNEL);
if (!priv) {
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index ff924af00e78..aa44bfc46467 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -323,8 +323,8 @@ static void __init spider_init_one(struct device_node *of_node, int chip,
irq_set_handler_data(virq, pic);
irq_set_chained_handler(virq, spider_irq_cascade);
- printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n",
- pic->node_id, addr, of_node->full_name);
+ printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %pOF\n",
+ pic->node_id, addr, of_node);
/* Enable the interrupt detection enable bit. Do this last! */
out_be32(pic->regs + TIR_DEN, in_be32(pic->regs + TIR_DEN) | 0x1);
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index 672d310dcf14..f636ee22b203 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -191,8 +191,8 @@ static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
goto err;
}
ret = -EINVAL;
- pr_debug(" irq %d no 0x%x on %s\n", i, oirq.args[0],
- oirq.np->full_name);
+ pr_debug(" irq %d no 0x%x on %pOF\n", i, oirq.args[0],
+ oirq.np);
spu->irqs[i] = irq_create_of_mapping(&oirq);
if (!spu->irqs[i]) {
pr_debug("spu_new: failed to map it !\n");
@@ -243,32 +243,32 @@ static int __init spu_map_device(struct spu *spu)
ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store,
&spu->local_store_phys);
if (ret) {
- pr_debug("spu_new: failed to map %s resource 0\n",
- np->full_name);
+ pr_debug("spu_new: failed to map %pOF resource 0\n",
+ np);
goto out;
}
ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem,
&spu->problem_phys);
if (ret) {
- pr_debug("spu_new: failed to map %s resource 1\n",
- np->full_name);
+ pr_debug("spu_new: failed to map %pOF resource 1\n",
+ np);
goto out_unmap;
}
ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL);
if (ret) {
- pr_debug("spu_new: failed to map %s resource 2\n",
- np->full_name);
+ pr_debug("spu_new: failed to map %pOF resource 2\n",
+ np);
goto out_unmap;
}
if (!firmware_has_feature(FW_FEATURE_LPAR))
ret = spu_map_resource(spu, 3,
(void __iomem**)&spu->priv1, NULL);
if (ret) {
- pr_debug("spu_new: failed to map %s resource 3\n",
- np->full_name);
+ pr_debug("spu_new: failed to map %pOF resource 3\n",
+ np);
goto out_unmap;
}
- pr_debug("spu_new: %s maps:\n", np->full_name);
+ pr_debug("spu_new: %pOF maps:\n", np);
pr_debug(" local store : 0x%016lx -> 0x%p\n",
spu->local_store_phys, spu->local_store);
pr_debug(" problem state : 0x%016lx -> 0x%p\n",
@@ -316,8 +316,8 @@ static int __init of_create_spu(struct spu *spu, void *data)
spu->node = of_node_to_nid(spe);
if (spu->node >= MAX_NUMNODES) {
- printk(KERN_WARNING "SPE %s on node %d ignored,"
- " node number too big\n", spe->full_name, spu->node);
+ printk(KERN_WARNING "SPE %pOF on node %d ignored,"
+ " node number too big\n", spe, spu->node);
printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
ret = -ENODEV;
goto out;
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index ae2f740a82f1..5ffcdeb1eb17 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -1749,7 +1749,7 @@ out:
static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct inode *inode = file_inode(file);
- int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ int err = file_write_and_wait_range(file, start, end);
if (!err) {
inode_lock(inode);
err = spufs_mfc_flush(file, NULL);
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index 1b87e198faa7..27264794f5c0 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -235,14 +235,14 @@ chrp_find_bridges(void)
++index;
/* The GG2 bridge on the LongTrail doesn't have an address */
if (of_address_to_resource(dev, 0, &r) && !is_longtrail) {
- printk(KERN_WARNING "Can't use %s: no address\n",
- dev->full_name);
+ printk(KERN_WARNING "Can't use %pOF: no address\n",
+ dev);
continue;
}
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
- printk(KERN_WARNING "Can't get bus-range for %s\n",
- dev->full_name);
+ printk(KERN_WARNING "Can't get bus-range for %pOF\n",
+ dev);
continue;
}
if (bus_range[1] == bus_range[0])
@@ -250,15 +250,15 @@ chrp_find_bridges(void)
else
printk(KERN_INFO "PCI buses %d..%d",
bus_range[0], bus_range[1]);
- printk(" controlled by %s", dev->full_name);
+ printk(" controlled by %pOF", dev);
if (!is_longtrail)
printk(" at %llx", (unsigned long long)r.start);
printk("\n");
hose = pcibios_alloc_controller(dev);
if (!hose) {
- printk("Can't allocate PCI controller structure for %s\n",
- dev->full_name);
+ printk("Can't allocate PCI controller structure for %pOF\n",
+ dev);
continue;
}
hose->first_busno = hose->self_busno = bus_range[0];
@@ -297,8 +297,8 @@ chrp_find_bridges(void)
}
}
} else {
- printk("No methods for %s (model %s), using RTAS\n",
- dev->full_name, model);
+ printk("No methods for %pOF (model %s), using RTAS\n",
+ dev, model);
hose->ops = &rtas_pci_ops;
}
diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c
index 2b4dc6abde6c..19760712b39d 100644
--- a/arch/powerpc/platforms/chrp/pegasos_eth.c
+++ b/arch/powerpc/platforms/chrp/pegasos_eth.c
@@ -63,7 +63,7 @@ static struct platform_device mv643xx_eth_mvmdio_device = {
.name = "orion-mdio",
.id = -1,
.num_resources = ARRAY_SIZE(mv643xx_eth_mvmdio_resources),
- .resource = mv643xx_eth_shared_resources,
+ .resource = mv643xx_eth_mvmdio_resources,
};
static struct resource mv643xx_eth_port1_resources[] = {
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index f29cf29b11f8..f514d5d28cd4 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -41,12 +41,12 @@ static int __init linkstation_add_bridge(struct device_node *dev)
struct pci_controller *hose;
const int *bus_range;
- printk("Adding PCI host bridge %s\n", dev->full_name);
+ printk("Adding PCI host bridge %pOF\n", dev);
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int))
- printk(KERN_WARNING "Can't get bus-range for %s, assume"
- " bus 0\n", dev->full_name);
+ printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
+ " bus 0\n", dev);
hose = pcibios_alloc_controller(dev);
if (hose == NULL)
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c
index 8e3590941960..273dfa3f0252 100644
--- a/arch/powerpc/platforms/embedded6xx/mvme5100.c
+++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c
@@ -115,7 +115,7 @@ static int __init mvme5100_add_bridge(struct device_node *dev)
struct pci_controller *hose;
unsigned short devid;
- pr_info("Adding PCI host bridge %s\n", dev->full_name);
+ pr_info("Adding PCI host bridge %pOF\n", dev);
bus_range = of_get_property(dev, "bus-range", &len);
diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c
index 471a50bcd074..ed1914dd34bb 100644
--- a/arch/powerpc/platforms/embedded6xx/storcenter.c
+++ b/arch/powerpc/platforms/embedded6xx/storcenter.c
@@ -44,7 +44,7 @@ static int __init storcenter_add_bridge(struct device_node *dev)
struct pci_controller *hose;
const int *bus_range;
- printk("Adding PCI host bridge %s\n", dev->full_name);
+ printk("Adding PCI host bridge %pOF\n", dev);
hose = pcibios_alloc_controller(dev);
if (hose == NULL)
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 69794d9389c2..e3821379e86f 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -73,8 +73,8 @@ static void __init fixup_bus_range(struct device_node *bridge)
/* Lookup the "bus-range" property for the hose */
prop = of_find_property(bridge, "bus-range", &len);
if (prop == NULL || prop->value == NULL || len < 2 * sizeof(int)) {
- printk(KERN_WARNING "Can't get bus-range for %s\n",
- bridge->full_name);
+ printk(KERN_WARNING "Can't get bus-range for %pOF\n",
+ bridge);
return;
}
bus_range = prop->value;
@@ -498,12 +498,12 @@ static int __init maple_add_bridge(struct device_node *dev)
const int *bus_range;
int primary = 1;
- DBG("Adding PCI host bridge %s\n", dev->full_name);
+ DBG("Adding PCI host bridge %pOF\n", dev);
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
- printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
- dev->full_name);
+ printk(KERN_WARNING "Can't get bus-range for %pOF, assume bus 0\n",
+ dev);
}
hose = pcibios_alloc_controller(dev);
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index 10c4e8fc6ea9..5ff6108f19e9 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -193,7 +193,7 @@ static int __init pas_add_bridge(struct device_node *dev)
{
struct pci_controller *hose;
- pr_debug("Adding PCI host bridge %s\n", dev->full_name);
+ pr_debug("Adding PCI host bridge %pOF\n", dev);
hose = pcibios_alloc_controller(dev);
if (!hose)
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index 1e02328c3f2d..9e3f39d36e88 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -2658,25 +2658,25 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ
if (i >= MAX_MACIO_CHIPS) {
printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n");
- printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name);
+ printk(KERN_ERR "pmac_feature: %pOF skipped\n", node);
return;
}
addrp = of_get_pci_address(node, 0, &size, NULL);
if (addrp == NULL) {
- printk(KERN_ERR "pmac_feature: %s: can't find base !\n",
- node->full_name);
+ printk(KERN_ERR "pmac_feature: %pOF: can't find base !\n",
+ node);
return;
}
addr = of_translate_address(node, addrp);
if (addr == 0) {
- printk(KERN_ERR "pmac_feature: %s, can't translate base !\n",
- node->full_name);
+ printk(KERN_ERR "pmac_feature: %pOF, can't translate base !\n",
+ node);
return;
}
base = ioremap(addr, (unsigned long)size);
if (!base) {
- printk(KERN_ERR "pmac_feature: %s, can't map mac-io chip !\n",
- node->full_name);
+ printk(KERN_ERR "pmac_feature: %pOF, can't map mac-io chip !\n",
+ node);
return;
}
if (type == macio_keylargo || type == macio_keylargo2) {
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index f627c9fd7b48..70183eb3d5c8 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -494,8 +494,8 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
host = kzalloc(sizeof(struct pmac_i2c_host_kw), GFP_KERNEL);
if (host == NULL) {
- printk(KERN_ERR "low_i2c: Can't allocate host for %s\n",
- np->full_name);
+ printk(KERN_ERR "low_i2c: Can't allocate host for %pOF\n",
+ np);
return NULL;
}
@@ -505,8 +505,8 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
*/
addrp = of_get_property(np, "AAPL,address", NULL);
if (addrp == NULL) {
- printk(KERN_ERR "low_i2c: Can't find address for %s\n",
- np->full_name);
+ printk(KERN_ERR "low_i2c: Can't find address for %pOF\n",
+ np);
kfree(host);
return NULL;
}
@@ -538,13 +538,13 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
host->irq = irq_of_parse_and_map(np, 0);
if (!host->irq)
printk(KERN_WARNING
- "low_i2c: Failed to map interrupt for %s\n",
- np->full_name);
+ "low_i2c: Failed to map interrupt for %pOF\n",
+ np);
host->base = ioremap((*addrp), 0x1000);
if (host->base == NULL) {
- printk(KERN_ERR "low_i2c: Can't map registers for %s\n",
- np->full_name);
+ printk(KERN_ERR "low_i2c: Can't map registers for %pOF\n",
+ np);
kfree(host);
return NULL;
}
@@ -560,8 +560,8 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
"keywest i2c", host))
host->irq = 0;
- printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %s\n",
- *addrp, host->irq, np->full_name);
+ printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %pOF\n",
+ *addrp, host->irq, np);
return host;
}
@@ -798,7 +798,7 @@ static void __init pmu_i2c_probe(void)
if (busnode == NULL)
return;
- printk(KERN_INFO "PMU i2c %s\n", busnode->full_name);
+ printk(KERN_INFO "PMU i2c %pOF\n", busnode);
/*
* We add bus 1 and 2 only for now, bus 0 is "special"
@@ -913,7 +913,7 @@ static void __init smu_i2c_probe(void)
if (controller == NULL)
return;
- printk(KERN_INFO "SMU i2c %s\n", controller->full_name);
+ printk(KERN_INFO "SMU i2c %pOF\n", controller);
/* Look for childs, note that they might not be of the right
* type as older device trees mix i2c busses and other things
@@ -945,8 +945,8 @@ static void __init smu_i2c_probe(void)
bus->flags = 0;
list_add(&bus->link, &pmac_i2c_busses);
- printk(KERN_INFO " channel %x bus %s\n",
- bus->channel, busnode->full_name);
+ printk(KERN_INFO " channel %x bus %pOF\n",
+ bus->channel, busnode);
}
}
@@ -1129,7 +1129,7 @@ int pmac_i2c_setmode(struct pmac_i2c_bus *bus, int mode)
*/
if (mode < pmac_i2c_mode_dumb || mode > pmac_i2c_mode_combined) {
printk(KERN_ERR "low_i2c: Invalid mode %d requested on"
- " bus %s !\n", mode, bus->busnode->full_name);
+ " bus %pOF !\n", mode, bus->busnode);
return -EINVAL;
}
bus->mode = mode;
@@ -1146,8 +1146,8 @@ int pmac_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize,
WARN_ON(!bus->opened);
DBG("xfer() chan=%d, addrdir=0x%x, mode=%d, subsize=%d, subaddr=0x%x,"
- " %d bytes, bus %s\n", bus->channel, addrdir, bus->mode, subsize,
- subaddr, len, bus->busnode->full_name);
+ " %d bytes, bus %pOF\n", bus->channel, addrdir, bus->mode, subsize,
+ subaddr, len, bus->busnode);
rc = bus->xfer(bus, addrdir, subsize, subaddr, data, len);
@@ -1241,13 +1241,13 @@ static void* pmac_i2c_do_begin(struct pmf_function *func, struct pmf_args *args)
bus = pmac_i2c_find_bus(func->node);
if (bus == NULL) {
- printk(KERN_ERR "low_i2c: Can't find bus for %s (pfunc)\n",
- func->node->full_name);
+ printk(KERN_ERR "low_i2c: Can't find bus for %pOF (pfunc)\n",
+ func->node);
return NULL;
}
if (pmac_i2c_open(bus, 0)) {
- printk(KERN_ERR "low_i2c: Can't open i2c bus for %s (pfunc)\n",
- func->node->full_name);
+ printk(KERN_ERR "low_i2c: Can't open i2c bus for %pOF (pfunc)\n",
+ func->node);
return NULL;
}
@@ -1417,7 +1417,7 @@ static struct pmf_handlers pmac_i2c_pfunc_handlers = {
static void __init pmac_i2c_dev_create(struct device_node *np, int quirks)
{
- DBG("dev_create(%s)\n", np->full_name);
+ DBG("dev_create(%pOF)\n", np);
pmf_register_driver(np, &pmac_i2c_pfunc_handlers,
(void *)(long)quirks);
@@ -1425,20 +1425,20 @@ static void __init pmac_i2c_dev_create(struct device_node *np, int quirks)
static void __init pmac_i2c_dev_init(struct device_node *np, int quirks)
{
- DBG("dev_create(%s)\n", np->full_name);
+ DBG("dev_create(%pOF)\n", np);
pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
}
static void pmac_i2c_dev_suspend(struct device_node *np, int quirks)
{
- DBG("dev_suspend(%s)\n", np->full_name);
+ DBG("dev_suspend(%pOF)\n", np);
pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_SLEEP, NULL);
}
static void pmac_i2c_dev_resume(struct device_node *np, int quirks)
{
- DBG("dev_resume(%s)\n", np->full_name);
+ DBG("dev_resume(%pOF)\n", np);
pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_WAKE, NULL);
}
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 6e06c3be2e9a..0b8174a79993 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -783,7 +783,7 @@ static int __init pmac_add_bridge(struct device_node *dev)
const int *bus_range;
int primary = 1, has_address = 0;
- DBG("Adding PCI host bridge %s\n", dev->full_name);
+ DBG("Adding PCI host bridge %pOF\n", dev);
/* Fetch host bridge registers address */
has_address = (of_address_to_resource(dev, 0, &rsrc) == 0);
@@ -791,8 +791,8 @@ static int __init pmac_add_bridge(struct device_node *dev)
/* Get bus range if any */
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
- printk(KERN_WARNING "Can't get bus-range for %s, assume"
- " bus 0\n", dev->full_name);
+ printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
+ " bus 0\n", dev);
}
hose = pcibios_alloc_controller(dev);
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c
index 459138ed4571..860159d46ab8 100644
--- a/arch/powerpc/platforms/powermac/pfunc_base.c
+++ b/arch/powerpc/platforms/powermac/pfunc_base.c
@@ -54,8 +54,8 @@ static int macio_do_gpio_write(PMF_STD_ARGS, u8 value, u8 mask)
raw_spin_lock_irqsave(&feature_lock, flags);
tmp = readb(addr);
tmp = (tmp & ~mask) | (value & mask);
- DBG("Do write 0x%02x to GPIO %s (%p)\n",
- tmp, func->node->full_name, addr);
+ DBG("Do write 0x%02x to GPIO %pOF (%p)\n",
+ tmp, func->node, addr);
writeb(tmp, addr);
raw_spin_unlock_irqrestore(&feature_lock, flags);
@@ -107,8 +107,8 @@ static void macio_gpio_init_one(struct macio_chip *macio)
if (gparent == NULL)
return;
- DBG("Installing GPIO functions for macio %s\n",
- macio->of_node->full_name);
+ DBG("Installing GPIO functions for macio %pOF\n",
+ macio->of_node);
/*
* Ok, got one, we dont need anything special to track them down, so
@@ -129,8 +129,8 @@ static void macio_gpio_init_one(struct macio_chip *macio)
pmf_register_driver(gp, &macio_gpio_handlers, (void *)offset);
}
- DBG("Calling initial GPIO functions for macio %s\n",
- macio->of_node->full_name);
+ DBG("Calling initial GPIO functions for macio %pOF\n",
+ macio->of_node);
/* And now we run all the init ones */
for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;)
@@ -267,8 +267,8 @@ static struct pmf_handlers macio_mmio_handlers = {
static void macio_mmio_init_one(struct macio_chip *macio)
{
- DBG("Installing MMIO functions for macio %s\n",
- macio->of_node->full_name);
+ DBG("Installing MMIO functions for macio %pOF\n",
+ macio->of_node);
pmf_register_driver(macio->of_node, &macio_mmio_handlers, macio);
}
@@ -298,8 +298,8 @@ static void uninorth_install_pfunc(void)
{
struct device_node *np;
- DBG("Installing functions for UniN %s\n",
- uninorth_node->full_name);
+ DBG("Installing functions for UniN %pOF\n",
+ uninorth_node);
/*
* Install handlers for the bridge itself
@@ -317,8 +317,8 @@ static void uninorth_install_pfunc(void)
break;
}
if (unin_hwclock) {
- DBG("Installing functions for UniN clock %s\n",
- unin_hwclock->full_name);
+ DBG("Installing functions for UniN clock %pOF\n",
+ unin_hwclock);
pmf_register_driver(unin_hwclock, &unin_mmio_handlers, NULL);
pmf_do_functions(unin_hwclock, NULL, 0, PMF_FLAGS_ON_INIT,
NULL);
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index 695e8c4d4224..df3c93bef228 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -708,7 +708,7 @@ int pmf_register_driver(struct device_node *np,
if (handlers == NULL)
return -EINVAL;
- DBG("pmf: registering driver for node %s\n", np->full_name);
+ DBG("pmf: registering driver for node %pOF\n", np);
spin_lock_irqsave(&pmf_lock, flags);
dev = pmf_find_device(np);
@@ -781,7 +781,7 @@ void pmf_unregister_driver(struct device_node *np)
struct pmf_device *dev;
unsigned long flags;
- DBG("pmf: unregistering driver for node %s\n", np->full_name);
+ DBG("pmf: unregistering driver for node %pOF\n", np);
spin_lock_irqsave(&pmf_lock, flags);
dev = pmf_find_device(np);
@@ -940,7 +940,7 @@ int pmf_call_one(struct pmf_function *func, struct pmf_args *args)
void *instdata = NULL;
int rc = 0;
- DBG(" ** pmf_call_one(%s/%s) **\n", dev->node->full_name, func->name);
+ DBG(" ** pmf_call_one(%pOF/%s) **\n", dev->node, func->name);
if (dev->handlers->begin)
instdata = dev->handlers->begin(func, args);
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index f5f9ad7c3398..5e0719b27294 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -364,8 +364,8 @@ static void __init pmac_pic_probe_oldstyle(void)
(addr + 0x10);
of_node_put(master);
- printk(KERN_INFO "irq: Found primary Apple PIC %s for %d irqs\n",
- master->full_name, max_real_irqs);
+ printk(KERN_INFO "irq: Found primary Apple PIC %pOF for %d irqs\n",
+ master, max_real_irqs);
/* Map interrupts of cascaded controller */
if (slave && !of_address_to_resource(slave, 0, &r)) {
@@ -378,8 +378,8 @@ static void __init pmac_pic_probe_oldstyle(void)
(addr + 0x10);
pmac_irq_cascade = irq_of_parse_and_map(slave, 0);
- printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs"
- " cascade: %d\n", slave->full_name,
+ printk(KERN_INFO "irq: Found slave Apple PIC %pOF for %d irqs"
+ " cascade: %d\n", slave,
max_irqs - max_real_irqs, pmac_irq_cascade);
}
of_node_put(slave);
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 6b4e9d181126..ab668cb72263 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -556,7 +556,7 @@ static int __init check_pmac_serial_console(void)
pr_debug(" can't find stdout package %s !\n", name);
return -ENODEV;
}
- pr_debug("stdout is %s\n", prom_stdout->full_name);
+ pr_debug("stdout is %pOF\n", prom_stdout);
name = of_get_property(prom_stdout, "name", NULL);
if (!name) {
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 6a6f4ef46b9e..340cbe263b33 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -30,3 +30,25 @@ config OPAL_PRD
help
This enables the opal-prd driver, a facility to run processor
recovery diagnostics on OpenPower machines
+
+config PPC_MEMTRACE
+ bool "Enable removal of RAM from kernel mappings for tracing"
+ depends on PPC_POWERNV && MEMORY_HOTREMOVE
+ default n
+ help
+ Enabling this option allows for the removal of memory (RAM)
+ from the kernel mappings to be used for hardware tracing.
+
+config PPC_VAS
+ bool "IBM Virtual Accelerator Switchboard (VAS)"
+ depends on PPC_POWERNV && PPC_64K_PAGES
+ default y
+ help
+ This enables support for IBM Virtual Accelerator Switchboard (VAS).
+
+ VAS allows accelerators in co-processors like NX-GZIP and NX-842
+ to be accessible to kernel subsystems and user processes.
+
+ VAS adapters are found in POWER9 based systems.
+
+ If unsure, say N.
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index b5d98cb3f482..37d60f7dd86d 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -2,7 +2,7 @@ obj-y += setup.o opal-wrappers.o opal.o opal-async.o idle.o
obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
obj-y += opal-msglog.o opal-hmi.o opal-power.o opal-irqchip.o
-obj-y += opal-kmsg.o
+obj-y += opal-kmsg.o opal-powercap.o opal-psr.o opal-sensor-groups.o
obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
obj-$(CONFIG_PCI) += pci.o pci-ioda.o npu-dma.o
@@ -12,3 +12,6 @@ obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
obj-$(CONFIG_TRACEPOINTS) += opal-tracepoints.o
obj-$(CONFIG_OPAL_PRD) += opal-prd.o
+obj-$(CONFIG_PERF_EVENTS) += opal-imc.o
+obj-$(CONFIG_PPC_MEMTRACE) += memtrace.o
+obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o
diff --git a/arch/powerpc/platforms/powernv/copy-paste.h b/arch/powerpc/platforms/powernv/copy-paste.h
new file mode 100644
index 000000000000..c9a503623431
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/copy-paste.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2016-17 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/ppc-opcode.h>
+
+#define CR0_SHIFT 28
+#define CR0_MASK 0xF
+/*
+ * Copy/paste instructions:
+ *
+ * copy RA,RB
+ * Copy contents of address (RA) + effective_address(RB)
+ * to internal copy-buffer.
+ *
+ * paste RA,RB
+ * Paste contents of internal copy-buffer to the address
+ * (RA) + effective_address(RB)
+ */
+static inline int vas_copy(void *crb, int offset)
+{
+ asm volatile(PPC_COPY(%0, %1)";"
+ :
+ : "b" (offset), "b" (crb)
+ : "memory");
+
+ return 0;
+}
+
+static inline int vas_paste(void *paste_address, int offset)
+{
+ u32 cr;
+
+ cr = 0;
+ asm volatile(PPC_PASTE(%1, %2)";"
+ "mfocrf %0, 0x80;"
+ : "=r" (cr)
+ : "b" (offset), "b" (paste_address)
+ : "memory", "cr0");
+
+ return (cr >> CR0_SHIFT) & CR0_MASK;
+}
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 3f48f6df1cf3..8864065eba22 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -113,7 +113,6 @@ static ssize_t pnv_eeh_ei_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct pci_controller *hose = filp->private_data;
- struct eeh_dev *edev;
struct eeh_pe *pe;
int pe_no, type, func;
unsigned long addr, mask;
@@ -135,13 +134,7 @@ static ssize_t pnv_eeh_ei_write(struct file *filp,
return -EINVAL;
/* Retrieve PE */
- edev = kzalloc(sizeof(*edev), GFP_KERNEL);
- if (!edev)
- return -ENOMEM;
- edev->phb = hose;
- edev->pe_config_addr = pe_no;
- pe = eeh_pe_get(edev);
- kfree(edev);
+ pe = eeh_pe_get(hose, pe_no, 0);
if (!pe)
return -ENODEV;
@@ -359,6 +352,7 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
uint32_t pcie_flags;
int ret;
+ int config_addr = (pdn->busno << 8) | (pdn->devfn);
/*
* When probing the root bridge, which doesn't have any
@@ -393,8 +387,7 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
}
}
- edev->config_addr = (pdn->busno << 8) | (pdn->devfn);
- edev->pe_config_addr = phb->ioda.pe_rmap[edev->config_addr];
+ edev->pe_config_addr = phb->ioda.pe_rmap[config_addr];
/* Create PE */
ret = eeh_add_to_parent_pe(edev);
@@ -933,7 +926,6 @@ void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
static void pnv_eeh_wait_for_pending(struct pci_dn *pdn, const char *type,
int pos, u16 mask)
{
- struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
int i, status = 0;
/* Wait for Transaction Pending bit to be cleared */
@@ -947,7 +939,7 @@ static void pnv_eeh_wait_for_pending(struct pci_dn *pdn, const char *type,
pr_warn("%s: Pending transaction while issuing %sFLR to %04x:%02x:%02x.%01x\n",
__func__, type,
- edev->phb->global_number, pdn->busno,
+ pdn->phb->global_number, pdn->busno,
PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
}
@@ -1381,7 +1373,6 @@ static int pnv_eeh_get_pe(struct pci_controller *hose,
struct pnv_phb *phb = hose->private_data;
struct pnv_ioda_pe *pnv_pe;
struct eeh_pe *dev_pe;
- struct eeh_dev edev;
/*
* If PHB supports compound PE, to fetch
@@ -1397,10 +1388,7 @@ static int pnv_eeh_get_pe(struct pci_controller *hose,
}
/* Find the PE according to PE# */
- memset(&edev, 0, sizeof(struct eeh_dev));
- edev.phb = hose;
- edev.pe_config_addr = pe_no;
- dev_pe = eeh_pe_get(&edev);
+ dev_pe = eeh_pe_get(hose, pe_no, 0);
if (!dev_pe)
return -EEXIST;
@@ -1711,6 +1699,7 @@ static int pnv_eeh_restore_config(struct pci_dn *pdn)
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
struct pnv_phb *phb;
s64 ret;
+ int config_addr = (pdn->busno << 8) | (pdn->devfn);
if (!edev)
return -EEXIST;
@@ -1725,14 +1714,14 @@ static int pnv_eeh_restore_config(struct pci_dn *pdn)
if (edev->physfn) {
ret = pnv_eeh_restore_vf_config(pdn);
} else {
- phb = edev->phb->private_data;
+ phb = pdn->phb->private_data;
ret = opal_pci_reinit(phb->opal_id,
- OPAL_REINIT_PCI_DEV, edev->config_addr);
+ OPAL_REINIT_PCI_DEV, config_addr);
}
if (ret) {
pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
- __func__, edev->config_addr, ret);
+ __func__, config_addr, ret);
return -EIO;
}
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 2abee070373f..9f59041a172b 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -56,6 +56,7 @@ u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
*/
static u64 pnv_deepest_stop_psscr_val;
static u64 pnv_deepest_stop_psscr_mask;
+static u64 pnv_deepest_stop_flag;
static bool deepest_stop_found;
static int pnv_save_sprs_for_deep_states(void)
@@ -68,7 +69,7 @@ static int pnv_save_sprs_for_deep_states(void)
* all cpus at boot. Get these reg values of current cpu and use the
* same across all cpus.
*/
- uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
+ uint64_t lpcr_val = mfspr(SPRN_LPCR);
uint64_t hid0_val = mfspr(SPRN_HID0);
uint64_t hid1_val = mfspr(SPRN_HID1);
uint64_t hid4_val = mfspr(SPRN_HID4);
@@ -185,8 +186,40 @@ static void pnv_alloc_idle_core_states(void)
update_subcore_sibling_mask();
- if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)
- pnv_save_sprs_for_deep_states();
+ if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
+ int rc = pnv_save_sprs_for_deep_states();
+
+ if (likely(!rc))
+ return;
+
+ /*
+ * The stop-api is unable to restore hypervisor
+ * resources on wakeup from platform idle states which
+ * lose full context. So disable such states.
+ */
+ supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
+ pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
+ pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300) &&
+ (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
+ /*
+ * Use the default stop state for CPU-Hotplug
+ * if available.
+ */
+ if (default_stop_found) {
+ pnv_deepest_stop_psscr_val =
+ pnv_default_stop_val;
+ pnv_deepest_stop_psscr_mask =
+ pnv_default_stop_mask;
+ pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
+ pnv_deepest_stop_psscr_val);
+ } else { /* Fallback to snooze loop for CPU-Hotplug */
+ deepest_stop_found = false;
+ pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
+ }
+ }
+ }
}
u32 pnv_get_supported_cpuidle_states(void)
@@ -355,6 +388,14 @@ void power9_idle(void)
}
#ifdef CONFIG_HOTPLUG_CPU
+static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
+{
+ u64 pir = get_hard_smp_processor_id(cpu);
+
+ mtspr(SPRN_LPCR, lpcr_val);
+ opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
+}
+
/*
* pnv_cpu_offline: A function that puts the CPU into the deepest
* available platform idle state on a CPU-Offline.
@@ -364,6 +405,20 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
{
unsigned long srr1;
u32 idle_states = pnv_get_supported_cpuidle_states();
+ u64 lpcr_val;
+
+ /*
+ * We don't want to take decrementer interrupts while we are
+ * offline, so clear LPCR:PECE1. We keep PECE2 (and
+ * LPCR_PECE_HVEE on P9) enabled as to let IPIs in.
+ *
+ * If the CPU gets woken up by a special wakeup, ensure that
+ * the SLW engine sets LPCR with decrementer bit cleared, else
+ * the CPU will come back to the kernel due to a spurious
+ * wakeup.
+ */
+ lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
+ pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
__ppc64_runlatch_off();
@@ -375,7 +430,8 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
pnv_deepest_stop_psscr_val;
srr1 = power9_idle_stop(psscr);
- } else if (idle_states & OPAL_PM_WINKLE_ENABLED) {
+ } else if ((idle_states & OPAL_PM_WINKLE_ENABLED) &&
+ (idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) {
srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
} else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
(idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
@@ -394,6 +450,16 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
__ppc64_runlatch_on();
+ /*
+ * Re-enable decrementer interrupts in LPCR.
+ *
+ * Further, we want stop states to be woken up by decrementer
+ * for non-hotplug cases. So program the LPCR via stop api as
+ * well.
+ */
+ lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
+ pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
+
return srr1;
}
#endif
@@ -553,6 +619,7 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
max_residency_ns = residency_ns[i];
pnv_deepest_stop_psscr_val = psscr_val[i];
pnv_deepest_stop_psscr_mask = psscr_mask[i];
+ pnv_deepest_stop_flag = flags[i];
deepest_stop_found = true;
}
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
new file mode 100644
index 000000000000..de470caf0784
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) IBM Corporation, 2014, 2017
+ * Anton Blanchard, Rashmica Gupta.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "memtrace: " fmt
+
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/memblock.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/memory.h>
+#include <linux/memory_hotplug.h>
+#include <asm/machdep.h>
+#include <asm/debugfs.h>
+
+/* This enables us to keep track of the memory removed from each node. */
+struct memtrace_entry {
+ void *mem;
+ u64 start;
+ u64 size;
+ u32 nid;
+ struct dentry *dir;
+ char name[16];
+};
+
+static u64 memtrace_size;
+
+static struct memtrace_entry *memtrace_array;
+static unsigned int memtrace_array_nr;
+
+
+static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct memtrace_entry *ent = filp->private_data;
+
+ return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size);
+}
+
+static bool valid_memtrace_range(struct memtrace_entry *dev,
+ unsigned long start, unsigned long size)
+{
+ if ((start >= dev->start) &&
+ ((start + size) <= (dev->start + dev->size)))
+ return true;
+
+ return false;
+}
+
+static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ unsigned long size = vma->vm_end - vma->vm_start;
+ struct memtrace_entry *dev = filp->private_data;
+
+ if (!valid_memtrace_range(dev, vma->vm_pgoff << PAGE_SHIFT, size))
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ vma->vm_pgoff + (dev->start >> PAGE_SHIFT),
+ size, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static const struct file_operations memtrace_fops = {
+ .llseek = default_llseek,
+ .read = memtrace_read,
+ .mmap = memtrace_mmap,
+ .open = simple_open,
+};
+
+static void flush_memory_region(u64 base, u64 size)
+{
+ unsigned long line_size = ppc64_caches.l1d.size;
+ u64 end = base + size;
+ u64 addr;
+
+ base = round_down(base, line_size);
+ end = round_up(end, line_size);
+
+ for (addr = base; addr < end; addr += line_size)
+ asm volatile("dcbf 0,%0" : "=r" (addr) :: "memory");
+}
+
+static int check_memblock_online(struct memory_block *mem, void *arg)
+{
+ if (mem->state != MEM_ONLINE)
+ return -1;
+
+ return 0;
+}
+
+static int change_memblock_state(struct memory_block *mem, void *arg)
+{
+ unsigned long state = (unsigned long)arg;
+
+ mem->state = state;
+
+ return 0;
+}
+
+static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
+{
+ u64 end_pfn = start_pfn + nr_pages - 1;
+
+ if (walk_memory_range(start_pfn, end_pfn, NULL,
+ check_memblock_online))
+ return false;
+
+ walk_memory_range(start_pfn, end_pfn, (void *)MEM_GOING_OFFLINE,
+ change_memblock_state);
+
+ if (offline_pages(start_pfn, nr_pages)) {
+ walk_memory_range(start_pfn, end_pfn, (void *)MEM_ONLINE,
+ change_memblock_state);
+ return false;
+ }
+
+ walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE,
+ change_memblock_state);
+
+ /* RCU grace period? */
+ flush_memory_region((u64)__va(start_pfn << PAGE_SHIFT),
+ nr_pages << PAGE_SHIFT);
+
+ lock_device_hotplug();
+ remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
+ unlock_device_hotplug();
+
+ return true;
+}
+
+static u64 memtrace_alloc_node(u32 nid, u64 size)
+{
+ u64 start_pfn, end_pfn, nr_pages;
+ u64 base_pfn;
+
+ if (!NODE_DATA(nid) || !node_spanned_pages(nid))
+ return 0;
+
+ start_pfn = node_start_pfn(nid);
+ end_pfn = node_end_pfn(nid);
+ nr_pages = size >> PAGE_SHIFT;
+
+ /* Trace memory needs to be aligned to the size */
+ end_pfn = round_down(end_pfn - nr_pages, nr_pages);
+
+ for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
+ if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true)
+ return base_pfn << PAGE_SHIFT;
+ }
+
+ return 0;
+}
+
+static int memtrace_init_regions_runtime(u64 size)
+{
+ u32 nid;
+ u64 m;
+
+ memtrace_array = kcalloc(num_online_nodes(),
+ sizeof(struct memtrace_entry), GFP_KERNEL);
+ if (!memtrace_array) {
+ pr_err("Failed to allocate memtrace_array\n");
+ return -EINVAL;
+ }
+
+ for_each_online_node(nid) {
+ m = memtrace_alloc_node(nid, size);
+
+ /*
+ * A node might not have any local memory, so warn but
+ * continue on.
+ */
+ if (!m) {
+ pr_err("Failed to allocate trace memory on node %d\n", nid);
+ continue;
+ }
+
+ pr_info("Allocated trace memory on node %d at 0x%016llx\n", nid, m);
+
+ memtrace_array[memtrace_array_nr].start = m;
+ memtrace_array[memtrace_array_nr].size = size;
+ memtrace_array[memtrace_array_nr].nid = nid;
+ memtrace_array_nr++;
+ }
+
+ return 0;
+}
+
+static struct dentry *memtrace_debugfs_dir;
+
+static int memtrace_init_debugfs(void)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < memtrace_array_nr; i++) {
+ struct dentry *dir;
+ struct memtrace_entry *ent = &memtrace_array[i];
+
+ ent->mem = ioremap(ent->start, ent->size);
+ /* Warn but continue on */
+ if (!ent->mem) {
+ pr_err("Failed to map trace memory at 0x%llx\n",
+ ent->start);
+ ret = -1;
+ continue;
+ }
+
+ snprintf(ent->name, 16, "%08x", ent->nid);
+ dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
+ if (!dir)
+ return -1;
+
+ ent->dir = dir;
+ debugfs_create_file("trace", 0400, dir, ent, &memtrace_fops);
+ debugfs_create_x64("start", 0400, dir, &ent->start);
+ debugfs_create_x64("size", 0400, dir, &ent->size);
+ }
+
+ return ret;
+}
+
+static int memtrace_enable_set(void *data, u64 val)
+{
+ if (memtrace_size)
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ /* Make sure size is aligned to a memory block */
+ if (val & (memory_block_size_bytes() - 1))
+ return -EINVAL;
+
+ if (memtrace_init_regions_runtime(val))
+ return -EINVAL;
+
+ if (memtrace_init_debugfs())
+ return -EINVAL;
+
+ memtrace_size = val;
+
+ return 0;
+}
+
+static int memtrace_enable_get(void *data, u64 *val)
+{
+ *val = memtrace_size;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(memtrace_init_fops, memtrace_enable_get,
+ memtrace_enable_set, "0x%016llx\n");
+
+static int memtrace_init(void)
+{
+ memtrace_debugfs_dir = debugfs_create_dir("memtrace",
+ powerpc_debugfs_root);
+ if (!memtrace_debugfs_dir)
+ return -1;
+
+ debugfs_create_file("enable", 0600, memtrace_debugfs_dir,
+ NULL, &memtrace_init_fops);
+
+ return 0;
+}
+machine_device_initcall(powernv, memtrace_init);
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index b5d960d6db3d..2cb6cbea4b3b 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -546,6 +546,12 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
unsigned long pid = npu_context->mm->context.id;
/*
+ * Unfortunately the nest mmu does not support flushing specific
+ * addresses so we have to flush the whole mm.
+ */
+ flush_tlb_mm(npu_context->mm);
+
+ /*
* Loop over all the NPUs this process is active on and launch
* an invalidate.
*/
@@ -576,12 +582,6 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
}
}
- /*
- * Unfortunately the nest mmu does not support flushing specific
- * addresses so we have to flush the whole mm.
- */
- flush_tlb_mm(npu_context->mm);
-
mmio_invalidate_wait(mmio_atsd_reg, flush);
if (flush)
/* Wait for the flush to complete */
@@ -614,15 +614,6 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
mmio_invalidate(npu_context, 1, address, true);
}
-static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- struct npu_context *npu_context = mn_to_npu_context(mn);
-
- mmio_invalidate(npu_context, 1, address, true);
-}
-
static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end)
@@ -640,7 +631,6 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
.release = pnv_npu2_mn_release,
.change_pte = pnv_npu2_mn_change_pte,
- .invalidate_page = pnv_npu2_mn_invalidate_page,
.invalidate_range = pnv_npu2_mn_invalidate_range,
};
diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c
index 83bebeec0fea..cf33769a7b72 100644
--- a/arch/powerpc/platforms/powernv/opal-async.c
+++ b/arch/powerpc/platforms/powernv/opal-async.c
@@ -171,8 +171,8 @@ int __init opal_async_comp_init(void)
async = of_get_property(opal_node, "opal-msg-async-num", NULL);
if (!async) {
- pr_err("%s: %s has no opal-msg-async-num\n",
- __func__, opal_node->full_name);
+ pr_err("%s: %pOF has no opal-msg-async-num\n",
+ __func__, opal_node);
err = -ENOENT;
goto out_opal_node;
}
diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c
index 4ec6219287fc..2fa3ac80cb4e 100644
--- a/arch/powerpc/platforms/powernv/opal-flash.c
+++ b/arch/powerpc/platforms/powernv/opal-flash.c
@@ -520,7 +520,7 @@ out:
* update_flash : Flash new firmware image
*
*/
-static struct bin_attribute image_data_attr = {
+static const struct bin_attribute image_data_attr = {
.attr = {.name = "image", .mode = 0200},
.size = MAX_IMAGE_SIZE, /* Limit image size */
.write = image_data_write,
diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c
index 88f3c61eec95..d78fed728cdf 100644
--- a/arch/powerpc/platforms/powernv/opal-hmi.c
+++ b/arch/powerpc/platforms/powernv/opal-hmi.c
@@ -30,6 +30,8 @@
#include <asm/cputable.h>
#include <asm/machdep.h>
+#include "powernv.h"
+
static int opal_hmi_handler_nb_init;
struct OpalHmiEvtNode {
struct list_head list;
@@ -267,8 +269,6 @@ static void hmi_event_handler(struct work_struct *work)
spin_unlock_irqrestore(&opal_hmi_evt_lock, flags);
if (unrecoverable) {
- int ret;
-
/* Pull all HMI events from OPAL before we panic. */
while (opal_get_msg(__pa(&msg), sizeof(msg)) == OPAL_SUCCESS) {
u32 type;
@@ -284,23 +284,7 @@ static void hmi_event_handler(struct work_struct *work)
print_hmi_event_info(hmi_evt);
}
- /*
- * Unrecoverable HMI exception. We need to inform BMC/OCC
- * about this error so that it can collect relevant data
- * for error analysis before rebooting.
- */
- ret = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR,
- "Unrecoverable HMI exception");
- if (ret == OPAL_UNSUPPORTED) {
- pr_emerg("Reboot type %d not supported\n",
- OPAL_REBOOT_PLATFORM_ERROR);
- }
-
- /*
- * Fall through and panic if opal_cec_reboot2() returns
- * OPAL_UNSUPPORTED.
- */
- panic("Unrecoverable HMI exception");
+ pnv_platform_error_reboot(NULL, "Unrecoverable HMI exception");
}
}
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
new file mode 100644
index 000000000000..21f6531fae20
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -0,0 +1,226 @@
+/*
+ * OPAL IMC interface detection driver
+ * Supported on POWERNV platform
+ *
+ * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation.
+ * (C) 2017 Anju T Sudhakar, IBM Corporation.
+ * (C) 2017 Hemant K Shaw, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or later version.
+ */
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/crash_dump.h>
+#include <asm/opal.h>
+#include <asm/io.h>
+#include <asm/imc-pmu.h>
+#include <asm/cputhreads.h>
+
+/*
+ * imc_get_mem_addr_nest: Function to get nest counter memory region
+ * for each chip
+ */
+static int imc_get_mem_addr_nest(struct device_node *node,
+ struct imc_pmu *pmu_ptr,
+ u32 offset)
+{
+ int nr_chips = 0, i;
+ u64 *base_addr_arr, baddr;
+ u32 *chipid_arr;
+
+ nr_chips = of_property_count_u32_elems(node, "chip-id");
+ if (nr_chips <= 0)
+ return -ENODEV;
+
+ base_addr_arr = kcalloc(nr_chips, sizeof(u64), GFP_KERNEL);
+ if (!base_addr_arr)
+ return -ENOMEM;
+
+ chipid_arr = kcalloc(nr_chips, sizeof(u32), GFP_KERNEL);
+ if (!chipid_arr)
+ return -ENOMEM;
+
+ if (of_property_read_u32_array(node, "chip-id", chipid_arr, nr_chips))
+ goto error;
+
+ if (of_property_read_u64_array(node, "base-addr", base_addr_arr,
+ nr_chips))
+ goto error;
+
+ pmu_ptr->mem_info = kcalloc(nr_chips, sizeof(struct imc_mem_info),
+ GFP_KERNEL);
+ if (!pmu_ptr->mem_info)
+ goto error;
+
+ for (i = 0; i < nr_chips; i++) {
+ pmu_ptr->mem_info[i].id = chipid_arr[i];
+ baddr = base_addr_arr[i] + offset;
+ pmu_ptr->mem_info[i].vbase = phys_to_virt(baddr);
+ }
+
+ pmu_ptr->imc_counter_mmaped = true;
+ kfree(base_addr_arr);
+ kfree(chipid_arr);
+ return 0;
+
+error:
+ kfree(pmu_ptr->mem_info);
+ kfree(base_addr_arr);
+ kfree(chipid_arr);
+ return -1;
+}
+
+/*
+ * imc_pmu_create : Takes the parent device which is the pmu unit, pmu_index
+ * and domain as the inputs.
+ * Allocates memory for the struct imc_pmu, sets up its domain, size and offsets
+ */
+static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
+{
+ int ret = 0;
+ struct imc_pmu *pmu_ptr;
+ u32 offset;
+
+ /* memory for pmu */
+ pmu_ptr = kzalloc(sizeof(struct imc_pmu), GFP_KERNEL);
+ if (!pmu_ptr)
+ return -ENOMEM;
+
+ /* Set the domain */
+ pmu_ptr->domain = domain;
+
+ ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size);
+ if (ret) {
+ ret = -EINVAL;
+ goto free_pmu;
+ }
+
+ if (!of_property_read_u32(parent, "offset", &offset)) {
+ if (imc_get_mem_addr_nest(parent, pmu_ptr, offset)) {
+ ret = -EINVAL;
+ goto free_pmu;
+ }
+ }
+
+ /* Function to register IMC pmu */
+ ret = init_imc_pmu(parent, pmu_ptr, pmu_index);
+ if (ret)
+ pr_err("IMC PMU %s Register failed\n", pmu_ptr->pmu.name);
+
+ return 0;
+
+free_pmu:
+ kfree(pmu_ptr);
+ return ret;
+}
+
+static void disable_nest_pmu_counters(void)
+{
+ int nid, cpu;
+ const struct cpumask *l_cpumask;
+
+ get_online_cpus();
+ for_each_online_node(nid) {
+ l_cpumask = cpumask_of_node(nid);
+ cpu = cpumask_first(l_cpumask);
+ opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
+ get_hard_smp_processor_id(cpu));
+ }
+ put_online_cpus();
+}
+
+static void disable_core_pmu_counters(void)
+{
+ cpumask_t cores_map;
+ int cpu, rc;
+
+ get_online_cpus();
+ /* Disable the IMC Core functions */
+ cores_map = cpu_online_cores_map();
+ for_each_cpu(cpu, &cores_map) {
+ rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(cpu));
+ if (rc)
+ pr_err("%s: Failed to stop Core (cpu = %d)\n",
+ __FUNCTION__, cpu);
+ }
+ put_online_cpus();
+}
+
+static int opal_imc_counters_probe(struct platform_device *pdev)
+{
+ struct device_node *imc_dev = pdev->dev.of_node;
+ int pmu_count = 0, domain;
+ u32 type;
+
+ /*
+ * Check whether this is kdump kernel. If yes, force the engines to
+ * stop and return.
+ */
+ if (is_kdump_kernel()) {
+ disable_nest_pmu_counters();
+ disable_core_pmu_counters();
+ return -ENODEV;
+ }
+
+ for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) {
+ if (of_property_read_u32(imc_dev, "type", &type)) {
+ pr_warn("IMC Device without type property\n");
+ continue;
+ }
+
+ switch (type) {
+ case IMC_TYPE_CHIP:
+ domain = IMC_DOMAIN_NEST;
+ break;
+ case IMC_TYPE_CORE:
+ domain =IMC_DOMAIN_CORE;
+ break;
+ case IMC_TYPE_THREAD:
+ domain = IMC_DOMAIN_THREAD;
+ break;
+ default:
+ pr_warn("IMC Unknown Device type \n");
+ domain = -1;
+ break;
+ }
+
+ if (!imc_pmu_create(imc_dev, pmu_count, domain))
+ pmu_count++;
+ }
+
+ return 0;
+}
+
+static void opal_imc_counters_shutdown(struct platform_device *pdev)
+{
+ /*
+ * Function only stops the engines which is bare minimum.
+ * TODO: Need to handle proper memory cleanup and pmu
+ * unregister.
+ */
+ disable_nest_pmu_counters();
+ disable_core_pmu_counters();
+}
+
+static const struct of_device_id opal_imc_match[] = {
+ { .compatible = IMC_DTB_COMPAT },
+ {},
+};
+
+static struct platform_driver opal_imc_driver = {
+ .driver = {
+ .name = "opal-imc-counters",
+ .of_match_table = opal_imc_match,
+ },
+ .probe = opal_imc_counters_probe,
+ .shutdown = opal_imc_counters_shutdown,
+};
+
+builtin_platform_driver(opal_imc_driver);
diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c
new file mode 100644
index 000000000000..badb29bde93f
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-powercap.c
@@ -0,0 +1,244 @@
+/*
+ * PowerNV OPAL Powercap interface
+ *
+ * Copyright 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "opal-powercap: " fmt
+
+#include <linux/of.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+
+#include <asm/opal.h>
+
+DEFINE_MUTEX(powercap_mutex);
+
+static struct kobject *powercap_kobj;
+
+struct powercap_attr {
+ u32 handle;
+ struct kobj_attribute attr;
+};
+
+static struct pcap {
+ struct attribute_group pg;
+ struct powercap_attr *pattrs;
+} *pcaps;
+
+static ssize_t powercap_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct powercap_attr *pcap_attr = container_of(attr,
+ struct powercap_attr, attr);
+ struct opal_msg msg;
+ u32 pcap;
+ int ret, token;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ pr_devel("Failed to get token\n");
+ return token;
+ }
+
+ ret = mutex_lock_interruptible(&powercap_mutex);
+ if (ret)
+ goto out_token;
+
+ ret = opal_get_powercap(pcap_attr->handle, token, (u32 *)__pa(&pcap));
+ switch (ret) {
+ case OPAL_ASYNC_COMPLETION:
+ ret = opal_async_wait_response(token, &msg);
+ if (ret) {
+ pr_devel("Failed to wait for the async response\n");
+ ret = -EIO;
+ goto out;
+ }
+ ret = opal_error_code(opal_get_async_rc(msg));
+ if (!ret) {
+ ret = sprintf(buf, "%u\n", be32_to_cpu(pcap));
+ if (ret < 0)
+ ret = -EIO;
+ }
+ break;
+ case OPAL_SUCCESS:
+ ret = sprintf(buf, "%u\n", be32_to_cpu(pcap));
+ if (ret < 0)
+ ret = -EIO;
+ break;
+ default:
+ ret = opal_error_code(ret);
+ }
+
+out:
+ mutex_unlock(&powercap_mutex);
+out_token:
+ opal_async_release_token(token);
+ return ret;
+}
+
+static ssize_t powercap_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct powercap_attr *pcap_attr = container_of(attr,
+ struct powercap_attr, attr);
+ struct opal_msg msg;
+ u32 pcap;
+ int ret, token;
+
+ ret = kstrtoint(buf, 0, &pcap);
+ if (ret)
+ return ret;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ pr_devel("Failed to get token\n");
+ return token;
+ }
+
+ ret = mutex_lock_interruptible(&powercap_mutex);
+ if (ret)
+ goto out_token;
+
+ ret = opal_set_powercap(pcap_attr->handle, token, pcap);
+ switch (ret) {
+ case OPAL_ASYNC_COMPLETION:
+ ret = opal_async_wait_response(token, &msg);
+ if (ret) {
+ pr_devel("Failed to wait for the async response\n");
+ ret = -EIO;
+ goto out;
+ }
+ ret = opal_error_code(opal_get_async_rc(msg));
+ if (!ret)
+ ret = count;
+ break;
+ case OPAL_SUCCESS:
+ ret = count;
+ break;
+ default:
+ ret = opal_error_code(ret);
+ }
+
+out:
+ mutex_unlock(&powercap_mutex);
+out_token:
+ opal_async_release_token(token);
+ return ret;
+}
+
+static void powercap_add_attr(int handle, const char *name,
+ struct powercap_attr *attr)
+{
+ attr->handle = handle;
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = name;
+ attr->attr.attr.mode = 0444;
+ attr->attr.show = powercap_show;
+}
+
+void __init opal_powercap_init(void)
+{
+ struct device_node *powercap, *node;
+ int i = 0;
+
+ powercap = of_find_compatible_node(NULL, NULL, "ibm,opal-powercap");
+ if (!powercap) {
+ pr_devel("Powercap node not found\n");
+ return;
+ }
+
+ pcaps = kcalloc(of_get_child_count(powercap), sizeof(*pcaps),
+ GFP_KERNEL);
+ if (!pcaps)
+ return;
+
+ powercap_kobj = kobject_create_and_add("powercap", opal_kobj);
+ if (!powercap_kobj) {
+ pr_warn("Failed to create powercap kobject\n");
+ goto out_pcaps;
+ }
+
+ i = 0;
+ for_each_child_of_node(powercap, node) {
+ u32 cur, min, max;
+ int j = 0;
+ bool has_cur = false, has_min = false, has_max = false;
+
+ if (!of_property_read_u32(node, "powercap-min", &min)) {
+ j++;
+ has_min = true;
+ }
+
+ if (!of_property_read_u32(node, "powercap-max", &max)) {
+ j++;
+ has_max = true;
+ }
+
+ if (!of_property_read_u32(node, "powercap-current", &cur)) {
+ j++;
+ has_cur = true;
+ }
+
+ pcaps[i].pattrs = kcalloc(j, sizeof(struct powercap_attr),
+ GFP_KERNEL);
+ if (!pcaps[i].pattrs)
+ goto out_pcaps_pattrs;
+
+ pcaps[i].pg.attrs = kcalloc(j + 1, sizeof(struct attribute *),
+ GFP_KERNEL);
+ if (!pcaps[i].pg.attrs) {
+ kfree(pcaps[i].pattrs);
+ goto out_pcaps_pattrs;
+ }
+
+ j = 0;
+ pcaps[i].pg.name = node->name;
+ if (has_min) {
+ powercap_add_attr(min, "powercap-min",
+ &pcaps[i].pattrs[j]);
+ pcaps[i].pg.attrs[j] = &pcaps[i].pattrs[j].attr.attr;
+ j++;
+ }
+
+ if (has_max) {
+ powercap_add_attr(max, "powercap-max",
+ &pcaps[i].pattrs[j]);
+ pcaps[i].pg.attrs[j] = &pcaps[i].pattrs[j].attr.attr;
+ j++;
+ }
+
+ if (has_cur) {
+ powercap_add_attr(cur, "powercap-current",
+ &pcaps[i].pattrs[j]);
+ pcaps[i].pattrs[j].attr.attr.mode |= 0220;
+ pcaps[i].pattrs[j].attr.store = powercap_store;
+ pcaps[i].pg.attrs[j] = &pcaps[i].pattrs[j].attr.attr;
+ j++;
+ }
+
+ if (sysfs_create_group(powercap_kobj, &pcaps[i].pg)) {
+ pr_warn("Failed to create powercap attribute group %s\n",
+ pcaps[i].pg.name);
+ goto out_pcaps_pattrs;
+ }
+ i++;
+ }
+
+ return;
+
+out_pcaps_pattrs:
+ while (--i >= 0) {
+ kfree(pcaps[i].pattrs);
+ kfree(pcaps[i].pg.attrs);
+ }
+ kobject_put(powercap_kobj);
+out_pcaps:
+ kfree(pcaps);
+}
diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
index 2d6ee1c5ad85..de4dd09f4a15 100644
--- a/arch/powerpc/platforms/powernv/opal-prd.c
+++ b/arch/powerpc/platforms/powernv/opal-prd.c
@@ -241,15 +241,9 @@ static ssize_t opal_prd_write(struct file *file, const char __user *buf,
size = be16_to_cpu(hdr.size);
- msg = kmalloc(size, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- rc = copy_from_user(msg, buf, size);
- if (rc) {
- size = -EFAULT;
- goto out_free;
- }
+ msg = memdup_user(buf, size);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
rc = opal_prd_msg(msg);
if (rc) {
@@ -257,7 +251,6 @@ static ssize_t opal_prd_write(struct file *file, const char __user *buf,
size = -EIO;
}
-out_free:
kfree(msg);
return size;
diff --git a/arch/powerpc/platforms/powernv/opal-psr.c b/arch/powerpc/platforms/powernv/opal-psr.c
new file mode 100644
index 000000000000..7313b7fc9071
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-psr.c
@@ -0,0 +1,175 @@
+/*
+ * PowerNV OPAL Power-Shift-Ratio interface
+ *
+ * Copyright 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "opal-psr: " fmt
+
+#include <linux/of.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+
+#include <asm/opal.h>
+
+DEFINE_MUTEX(psr_mutex);
+
+static struct kobject *psr_kobj;
+
+struct psr_attr {
+ u32 handle;
+ struct kobj_attribute attr;
+} *psr_attrs;
+
+static ssize_t psr_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct psr_attr *psr_attr = container_of(attr, struct psr_attr, attr);
+ struct opal_msg msg;
+ int psr, ret, token;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ pr_devel("Failed to get token\n");
+ return token;
+ }
+
+ ret = mutex_lock_interruptible(&psr_mutex);
+ if (ret)
+ goto out_token;
+
+ ret = opal_get_power_shift_ratio(psr_attr->handle, token,
+ (u32 *)__pa(&psr));
+ switch (ret) {
+ case OPAL_ASYNC_COMPLETION:
+ ret = opal_async_wait_response(token, &msg);
+ if (ret) {
+ pr_devel("Failed to wait for the async response\n");
+ ret = -EIO;
+ goto out;
+ }
+ ret = opal_error_code(opal_get_async_rc(msg));
+ if (!ret) {
+ ret = sprintf(buf, "%u\n", be32_to_cpu(psr));
+ if (ret < 0)
+ ret = -EIO;
+ }
+ break;
+ case OPAL_SUCCESS:
+ ret = sprintf(buf, "%u\n", be32_to_cpu(psr));
+ if (ret < 0)
+ ret = -EIO;
+ break;
+ default:
+ ret = opal_error_code(ret);
+ }
+
+out:
+ mutex_unlock(&psr_mutex);
+out_token:
+ opal_async_release_token(token);
+ return ret;
+}
+
+static ssize_t psr_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct psr_attr *psr_attr = container_of(attr, struct psr_attr, attr);
+ struct opal_msg msg;
+ int psr, ret, token;
+
+ ret = kstrtoint(buf, 0, &psr);
+ if (ret)
+ return ret;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ pr_devel("Failed to get token\n");
+ return token;
+ }
+
+ ret = mutex_lock_interruptible(&psr_mutex);
+ if (ret)
+ goto out_token;
+
+ ret = opal_set_power_shift_ratio(psr_attr->handle, token, psr);
+ switch (ret) {
+ case OPAL_ASYNC_COMPLETION:
+ ret = opal_async_wait_response(token, &msg);
+ if (ret) {
+ pr_devel("Failed to wait for the async response\n");
+ ret = -EIO;
+ goto out;
+ }
+ ret = opal_error_code(opal_get_async_rc(msg));
+ if (!ret)
+ ret = count;
+ break;
+ case OPAL_SUCCESS:
+ ret = count;
+ break;
+ default:
+ ret = opal_error_code(ret);
+ }
+
+out:
+ mutex_unlock(&psr_mutex);
+out_token:
+ opal_async_release_token(token);
+ return ret;
+}
+
+void __init opal_psr_init(void)
+{
+ struct device_node *psr, *node;
+ int i = 0;
+
+ psr = of_find_compatible_node(NULL, NULL,
+ "ibm,opal-power-shift-ratio");
+ if (!psr) {
+ pr_devel("Power-shift-ratio node not found\n");
+ return;
+ }
+
+ psr_attrs = kcalloc(of_get_child_count(psr), sizeof(struct psr_attr),
+ GFP_KERNEL);
+ if (!psr_attrs)
+ return;
+
+ psr_kobj = kobject_create_and_add("psr", opal_kobj);
+ if (!psr_kobj) {
+ pr_warn("Failed to create psr kobject\n");
+ goto out;
+ }
+
+ for_each_child_of_node(psr, node) {
+ if (of_property_read_u32(node, "handle",
+ &psr_attrs[i].handle))
+ goto out_kobj;
+
+ sysfs_attr_init(&psr_attrs[i].attr.attr);
+ if (of_property_read_string(node, "label",
+ &psr_attrs[i].attr.attr.name))
+ goto out_kobj;
+ psr_attrs[i].attr.attr.mode = 0664;
+ psr_attrs[i].attr.show = psr_show;
+ psr_attrs[i].attr.store = psr_store;
+ if (sysfs_create_file(psr_kobj, &psr_attrs[i].attr.attr)) {
+ pr_devel("Failed to create psr sysfs file %s\n",
+ psr_attrs[i].attr.attr.name);
+ goto out_kobj;
+ }
+ i++;
+ }
+
+ return;
+out_kobj:
+ kobject_put(psr_kobj);
+out:
+ kfree(psr_attrs);
+}
diff --git a/arch/powerpc/platforms/powernv/opal-sensor-groups.c b/arch/powerpc/platforms/powernv/opal-sensor-groups.c
new file mode 100644
index 000000000000..7e5a235ebf76
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-sensor-groups.c
@@ -0,0 +1,212 @@
+/*
+ * PowerNV OPAL Sensor-groups interface
+ *
+ * Copyright 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "opal-sensor-groups: " fmt
+
+#include <linux/of.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+
+#include <asm/opal.h>
+
+DEFINE_MUTEX(sg_mutex);
+
+static struct kobject *sg_kobj;
+
+struct sg_attr {
+ u32 handle;
+ struct kobj_attribute attr;
+};
+
+static struct sensor_group {
+ char name[20];
+ struct attribute_group sg;
+ struct sg_attr *sgattrs;
+} *sgs;
+
+static ssize_t sg_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sg_attr *sattr = container_of(attr, struct sg_attr, attr);
+ struct opal_msg msg;
+ u32 data;
+ int ret, token;
+
+ ret = kstrtoint(buf, 0, &data);
+ if (ret)
+ return ret;
+
+ if (data != 1)
+ return -EINVAL;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ pr_devel("Failed to get token\n");
+ return token;
+ }
+
+ ret = mutex_lock_interruptible(&sg_mutex);
+ if (ret)
+ goto out_token;
+
+ ret = opal_sensor_group_clear(sattr->handle, token);
+ switch (ret) {
+ case OPAL_ASYNC_COMPLETION:
+ ret = opal_async_wait_response(token, &msg);
+ if (ret) {
+ pr_devel("Failed to wait for the async response\n");
+ ret = -EIO;
+ goto out;
+ }
+ ret = opal_error_code(opal_get_async_rc(msg));
+ if (!ret)
+ ret = count;
+ break;
+ case OPAL_SUCCESS:
+ ret = count;
+ break;
+ default:
+ ret = opal_error_code(ret);
+ }
+
+out:
+ mutex_unlock(&sg_mutex);
+out_token:
+ opal_async_release_token(token);
+ return ret;
+}
+
+static struct sg_ops_info {
+ int opal_no;
+ const char *attr_name;
+ ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count);
+} ops_info[] = {
+ { OPAL_SENSOR_GROUP_CLEAR, "clear", sg_store },
+};
+
+static void add_attr(int handle, struct sg_attr *attr, int index)
+{
+ attr->handle = handle;
+ sysfs_attr_init(&attr->attr.attr);
+ attr->attr.attr.name = ops_info[index].attr_name;
+ attr->attr.attr.mode = 0220;
+ attr->attr.store = ops_info[index].store;
+}
+
+static int add_attr_group(const __be32 *ops, int len, struct sensor_group *sg,
+ u32 handle)
+{
+ int i, j;
+ int count = 0;
+
+ for (i = 0; i < len; i++)
+ for (j = 0; j < ARRAY_SIZE(ops_info); j++)
+ if (be32_to_cpu(ops[i]) == ops_info[j].opal_no) {
+ add_attr(handle, &sg->sgattrs[count], j);
+ sg->sg.attrs[count] =
+ &sg->sgattrs[count].attr.attr;
+ count++;
+ }
+
+ return sysfs_create_group(sg_kobj, &sg->sg);
+}
+
+static int get_nr_attrs(const __be32 *ops, int len)
+{
+ int i, j;
+ int nr_attrs = 0;
+
+ for (i = 0; i < len; i++)
+ for (j = 0; j < ARRAY_SIZE(ops_info); j++)
+ if (be32_to_cpu(ops[i]) == ops_info[j].opal_no)
+ nr_attrs++;
+
+ return nr_attrs;
+}
+
+void __init opal_sensor_groups_init(void)
+{
+ struct device_node *sg, *node;
+ int i = 0;
+
+ sg = of_find_compatible_node(NULL, NULL, "ibm,opal-sensor-group");
+ if (!sg) {
+ pr_devel("Sensor groups node not found\n");
+ return;
+ }
+
+ sgs = kcalloc(of_get_child_count(sg), sizeof(*sgs), GFP_KERNEL);
+ if (!sgs)
+ return;
+
+ sg_kobj = kobject_create_and_add("sensor_groups", opal_kobj);
+ if (!sg_kobj) {
+ pr_warn("Failed to create sensor group kobject\n");
+ goto out_sgs;
+ }
+
+ for_each_child_of_node(sg, node) {
+ const __be32 *ops;
+ u32 sgid, len, nr_attrs, chipid;
+
+ ops = of_get_property(node, "ops", &len);
+ if (!ops)
+ continue;
+
+ nr_attrs = get_nr_attrs(ops, len);
+ if (!nr_attrs)
+ continue;
+
+ sgs[i].sgattrs = kcalloc(nr_attrs, sizeof(struct sg_attr),
+ GFP_KERNEL);
+ if (!sgs[i].sgattrs)
+ goto out_sgs_sgattrs;
+
+ sgs[i].sg.attrs = kcalloc(nr_attrs + 1,
+ sizeof(struct attribute *),
+ GFP_KERNEL);
+
+ if (!sgs[i].sg.attrs) {
+ kfree(sgs[i].sgattrs);
+ goto out_sgs_sgattrs;
+ }
+
+ if (of_property_read_u32(node, "sensor-group-id", &sgid)) {
+ pr_warn("sensor-group-id property not found\n");
+ goto out_sgs_sgattrs;
+ }
+
+ if (!of_property_read_u32(node, "ibm,chip-id", &chipid))
+ sprintf(sgs[i].name, "%s%d", node->name, chipid);
+ else
+ sprintf(sgs[i].name, "%s", node->name);
+
+ sgs[i].sg.name = sgs[i].name;
+ if (add_attr_group(ops, len, &sgs[i], sgid)) {
+ pr_warn("Failed to create sensor attribute group %s\n",
+ sgs[i].sg.name);
+ goto out_sgs_sgattrs;
+ }
+ i++;
+ }
+
+ return;
+
+out_sgs_sgattrs:
+ while (--i >= 0) {
+ kfree(sgs[i].sgattrs);
+ kfree(sgs[i].sg.attrs);
+ }
+ kobject_put(sg_kobj);
+out_sgs:
+ kfree(sgs);
+}
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 4ca6c26a56d5..8c1ede2d3f7e 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -27,7 +27,7 @@
.globl opal_tracepoint_refcount
opal_tracepoint_refcount:
- .llong 0
+ .8byte 0
.section ".text"
@@ -310,3 +310,12 @@ OPAL_CALL(opal_xive_dump, OPAL_XIVE_DUMP);
OPAL_CALL(opal_npu_init_context, OPAL_NPU_INIT_CONTEXT);
OPAL_CALL(opal_npu_destroy_context, OPAL_NPU_DESTROY_CONTEXT);
OPAL_CALL(opal_npu_map_lpar, OPAL_NPU_MAP_LPAR);
+OPAL_CALL(opal_imc_counters_init, OPAL_IMC_COUNTERS_INIT);
+OPAL_CALL(opal_imc_counters_start, OPAL_IMC_COUNTERS_START);
+OPAL_CALL(opal_imc_counters_stop, OPAL_IMC_COUNTERS_STOP);
+OPAL_CALL(opal_pci_set_p2p, OPAL_PCI_SET_P2P);
+OPAL_CALL(opal_get_powercap, OPAL_GET_POWERCAP);
+OPAL_CALL(opal_set_powercap, OPAL_SET_POWERCAP);
+OPAL_CALL(opal_get_power_shift_ratio, OPAL_GET_POWER_SHIFT_RATIO);
+OPAL_CALL(opal_set_power_shift_ratio, OPAL_SET_POWER_SHIFT_RATIO);
+OPAL_CALL(opal_sensor_group_clear, OPAL_SENSOR_GROUP_CLEAR);
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
index 28651fb25417..81c0a943dea9 100644
--- a/arch/powerpc/platforms/powernv/opal-xscom.c
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -36,14 +36,14 @@ static scom_map_t opal_scom_map(struct device_node *dev, u64 reg, u64 count)
const __be32 *gcid;
if (!of_get_property(dev, "scom-controller", NULL)) {
- pr_err("%s: device %s is not a SCOM controller\n",
- __func__, dev->full_name);
+ pr_err("%s: device %pOF is not a SCOM controller\n",
+ __func__, dev);
return SCOM_MAP_INVALID;
}
gcid = of_get_property(dev, "ibm,chip-id", NULL);
if (!gcid) {
- pr_err("%s: device %s has no ibm,chip-id\n",
- __func__, dev->full_name);
+ pr_err("%s: device %pOF has no ibm,chip-id\n",
+ __func__, dev);
return SCOM_MAP_INVALID;
}
m = kmalloc(sizeof(struct opal_scom_map), GFP_KERNEL);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 9b87abb178f0..65c79ecf5a4d 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/slab.h>
@@ -25,11 +26,17 @@
#include <linux/memblock.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <linux/printk.h>
+#include <linux/kmsg_dump.h>
+#include <linux/console.h>
+#include <linux/sched/debug.h>
#include <asm/machdep.h>
#include <asm/opal.h>
#include <asm/firmware.h>
#include <asm/mce.h>
+#include <asm/imc-pmu.h>
+#include <asm/bug.h>
#include "powernv.h"
@@ -78,7 +85,7 @@ void opal_configure_cores(void)
* ie. Host hash supports hash guests
* Host radix supports hash/radix guests
*/
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (early_cpu_has_feature(CPU_FTR_ARCH_300)) {
reinit_flags |= OPAL_REINIT_CPUS_MMU_HASH;
if (early_radix_enabled())
reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX;
@@ -162,12 +169,9 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
sizeof(struct mcheck_recoverable_range);
/*
- * Allocate a buffer to hold the MC recoverable ranges. We would be
- * accessing them in real mode, hence it needs to be within
- * RMO region.
+ * Allocate a buffer to hold the MC recoverable ranges.
*/
- mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64),
- ppc64_rma_size));
+ mc_recoverable_range =__va(memblock_alloc(size, __alignof__(u64)));
memset(mc_recoverable_range, 0, size);
for (i = 0; i < mc_recoverable_range_len; i++) {
@@ -422,24 +426,88 @@ static int opal_recover_mce(struct pt_regs *regs,
/* Fatal machine check */
pr_err("Machine check interrupt is fatal\n");
recovered = 0;
- } else if ((evt->severity == MCE_SEV_ERROR_SYNC) &&
- (user_mode(regs) && !is_global_init(current))) {
+ }
+
+ if (!recovered && evt->severity == MCE_SEV_ERROR_SYNC) {
/*
- * For now, kill the task if we have received exception when
- * in userspace.
+ * Try to kill processes if we get a synchronous machine check
+ * (e.g., one caused by execution of this instruction). This
+ * will devolve into a panic if we try to kill init or are in
+ * an interrupt etc.
*
* TODO: Queue up this address for hwpoisioning later.
+ * TODO: This is not quite right for d-side machine
+ * checks ->nip is not necessarily the important
+ * address.
*/
- _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
- recovered = 1;
+ if ((user_mode(regs))) {
+ _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
+ recovered = 1;
+ } else if (die_will_crash()) {
+ /*
+ * die() would kill the kernel, so better to go via
+ * the platform reboot code that will log the
+ * machine check.
+ */
+ recovered = 0;
+ } else {
+ die("Machine check", regs, SIGBUS);
+ recovered = 1;
+ }
}
+
return recovered;
}
+void pnv_platform_error_reboot(struct pt_regs *regs, const char *msg)
+{
+ /*
+ * This is mostly taken from kernel/panic.c, but tries to do
+ * relatively minimal work. Don't use delay functions (TB may
+ * be broken), don't crash dump (need to set a firmware log),
+ * don't run notifiers. We do want to get some information to
+ * Linux console.
+ */
+ console_verbose();
+ bust_spinlocks(1);
+ pr_emerg("Hardware platform error: %s\n", msg);
+ if (regs)
+ show_regs(regs);
+ smp_send_stop();
+ printk_safe_flush_on_panic();
+ kmsg_dump(KMSG_DUMP_PANIC);
+ bust_spinlocks(0);
+ debug_locks_off();
+ console_flush_on_panic();
+
+ /*
+ * Don't bother to shut things down because this will
+ * xstop the system.
+ */
+ if (opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR, msg)
+ == OPAL_UNSUPPORTED) {
+ pr_emerg("Reboot type %d not supported for %s\n",
+ OPAL_REBOOT_PLATFORM_ERROR, msg);
+ }
+
+ /*
+ * We reached here. There can be three possibilities:
+ * 1. We are running on a firmware level that do not support
+ * opal_cec_reboot2()
+ * 2. We are running on a firmware level that do not support
+ * OPAL_REBOOT_PLATFORM_ERROR reboot type.
+ * 3. We are running on FSP based system that does not need
+ * opal to trigger checkstop explicitly for error analysis.
+ * The FSP PRD component would have already got notified
+ * about this error through other channels.
+ */
+
+ ppc_md.restart(NULL);
+}
+
int opal_machine_check(struct pt_regs *regs)
{
struct machine_check_event evt;
- int ret;
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return 0;
@@ -455,43 +523,7 @@ int opal_machine_check(struct pt_regs *regs)
if (opal_recover_mce(regs, &evt))
return 1;
- /*
- * Unrecovered machine check, we are heading to panic path.
- *
- * We may have hit this MCE in very early stage of kernel
- * initialization even before opal-prd has started running. If
- * this is the case then this MCE error may go un-noticed or
- * un-analyzed if we go down panic path. We need to inform
- * BMC/OCC about this error so that they can collect relevant
- * data for error analysis before rebooting.
- * Use opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR) to do so.
- * This function may not return on BMC based system.
- */
- ret = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR,
- "Unrecoverable Machine Check exception");
- if (ret == OPAL_UNSUPPORTED) {
- pr_emerg("Reboot type %d not supported\n",
- OPAL_REBOOT_PLATFORM_ERROR);
- }
-
- /*
- * We reached here. There can be three possibilities:
- * 1. We are running on a firmware level that do not support
- * opal_cec_reboot2()
- * 2. We are running on a firmware level that do not support
- * OPAL_REBOOT_PLATFORM_ERROR reboot type.
- * 3. We are running on FSP based system that does not need opal
- * to trigger checkstop explicitly for error analysis. The FSP
- * PRD component would have already got notified about this
- * error through other channels.
- *
- * If hardware marked this as an unrecoverable MCE, we are
- * going to panic anyway. Even if it didn't, it's not safe to
- * continue at this point, so we should explicitly panic.
- */
-
- panic("PowerNV Unrecovered Machine Check");
- return 0;
+ pnv_platform_error_reboot(regs, "Unrecoverable Machine Check exception");
}
/* Early hmi handler called in real mode. */
@@ -720,6 +752,15 @@ static void opal_pdev_init(const char *compatible)
of_platform_device_create(np, NULL, NULL);
}
+static void __init opal_imc_init_dev(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, IMC_DTB_COMPAT);
+ if (np)
+ of_platform_device_create(np, NULL, NULL);
+}
+
static int kopald(void *unused)
{
unsigned long timeout = msecs_to_jiffies(opal_heartbeat) + 1;
@@ -793,6 +834,9 @@ static int __init opal_init(void)
/* Setup a heatbeat thread if requested by OPAL */
opal_init_heartbeat();
+ /* Detect In-Memory Collection counters and create devices*/
+ opal_imc_init_dev();
+
/* Create leds platform devices */
leds = of_find_node_by_path("/ibm,opal/leds");
if (leds) {
@@ -836,6 +880,15 @@ static int __init opal_init(void)
/* Initialise OPAL kmsg dumper for flushing console on panic */
opal_kmsg_init();
+ /* Initialise OPAL powercap interface */
+ opal_powercap_init();
+
+ /* Initialise OPAL Power-Shifting-Ratio interface */
+ opal_psr_init();
+
+ /* Initialise OPAL sensor groups */
+ opal_sensor_groups_init();
+
return 0;
}
machine_subsys_initcall(powernv, opal_init);
@@ -952,6 +1005,7 @@ int opal_error_code(int rc)
case OPAL_UNSUPPORTED: return -EIO;
case OPAL_HARDWARE: return -EIO;
case OPAL_INTERNAL_ERROR: return -EIO;
+ case OPAL_TIMEOUT: return -ETIMEDOUT;
default:
pr_err("%s: unexpected OPAL error %d\n", __func__, rc);
return -EIO;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 437613588df1..57f9e55f4352 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -444,8 +444,8 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
r = of_get_property(dn, "ibm,opal-m64-window", NULL);
if (!r) {
- pr_info(" No <ibm,opal-m64-window> on %s\n",
- dn->full_name);
+ pr_info(" No <ibm,opal-m64-window> on %pOF\n",
+ dn);
return;
}
@@ -1408,7 +1408,6 @@ m64_failed:
static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
int num);
-static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
{
@@ -1852,6 +1851,14 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
/* 4GB offset bypasses 32-bit space */
set_dma_offset(&pdev->dev, (1ULL << 32));
set_dma_ops(&pdev->dev, &dma_direct_ops);
+ } else if (dma_mask >> 32 && dma_mask != DMA_BIT_MASK(64)) {
+ /*
+ * Fail the request if a DMA mask between 32 and 64 bits
+ * was requested but couldn't be fulfilled. Ideally we
+ * would do this for 64-bits but historically we have
+ * always fallen back to 32-bits.
+ */
+ return -ENOMEM;
} else {
dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
set_dma_ops(&pdev->dev, &dma_iommu_ops);
@@ -2394,7 +2401,7 @@ static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
return 0;
}
-static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
+void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
{
uint16_t window_id = (pe->pe_number << 1 ) + 1;
int64_t rc;
@@ -3789,8 +3796,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
if (!of_device_is_available(np))
return;
- pr_info("Initializing %s PHB (%s)\n",
- pnv_phb_names[ioda_type], of_node_full_name(np));
+ pr_info("Initializing %s PHB (%pOF)\n", pnv_phb_names[ioda_type], np);
prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
if (!prop64) {
@@ -3805,8 +3811,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
/* Allocate PCI controller */
phb->hose = hose = pcibios_alloc_controller(np);
if (!phb->hose) {
- pr_err(" Can't allocate PCI controller for %s\n",
- np->full_name);
+ pr_err(" Can't allocate PCI controller for %pOF\n",
+ np);
memblock_free(__pa(phb), sizeof(struct pnv_phb));
return;
}
@@ -3817,7 +3823,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
hose->first_busno = be32_to_cpu(prop32[0]);
hose->last_busno = be32_to_cpu(prop32[1]);
} else {
- pr_warn(" Broken <bus-range> on %s\n", np->full_name);
+ pr_warn(" Broken <bus-range> on %pOF\n", np);
hose->first_busno = 0;
hose->last_busno = 0xff;
}
@@ -4038,7 +4044,7 @@ void __init pnv_pci_init_ioda_hub(struct device_node *np)
const __be64 *prop64;
u64 hub_id;
- pr_info("Probing IODA IO-Hub %s\n", np->full_name);
+ pr_info("Probing IODA IO-Hub %pOF\n", np);
prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
if (!prop64) {
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 7905d179d036..5422f4a6317c 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -37,6 +37,8 @@
#include "powernv.h"
#include "pci.h"
+static DEFINE_MUTEX(p2p_mutex);
+
int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
{
struct device_node *parent = np;
@@ -1017,6 +1019,79 @@ void pnv_pci_dma_bus_setup(struct pci_bus *bus)
}
}
+int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target, u64 desc)
+{
+ struct pci_controller *hose;
+ struct pnv_phb *phb_init, *phb_target;
+ struct pnv_ioda_pe *pe_init;
+ int rc;
+
+ if (!opal_check_token(OPAL_PCI_SET_P2P))
+ return -ENXIO;
+
+ hose = pci_bus_to_host(initiator->bus);
+ phb_init = hose->private_data;
+
+ hose = pci_bus_to_host(target->bus);
+ phb_target = hose->private_data;
+
+ pe_init = pnv_ioda_get_pe(initiator);
+ if (!pe_init)
+ return -ENODEV;
+
+ /*
+ * Configuring the initiator's PHB requires to adjust its
+ * TVE#1 setting. Since the same device can be an initiator
+ * several times for different target devices, we need to keep
+ * a reference count to know when we can restore the default
+ * bypass setting on its TVE#1 when disabling. Opal is not
+ * tracking PE states, so we add a reference count on the PE
+ * in linux.
+ *
+ * For the target, the configuration is per PHB, so we keep a
+ * target reference count on the PHB.
+ */
+ mutex_lock(&p2p_mutex);
+
+ if (desc & OPAL_PCI_P2P_ENABLE) {
+ /* always go to opal to validate the configuration */
+ rc = opal_pci_set_p2p(phb_init->opal_id, phb_target->opal_id,
+ desc, pe_init->pe_number);
+
+ if (rc != OPAL_SUCCESS) {
+ rc = -EIO;
+ goto out;
+ }
+
+ pe_init->p2p_initiator_count++;
+ phb_target->p2p_target_count++;
+ } else {
+ if (!pe_init->p2p_initiator_count ||
+ !phb_target->p2p_target_count) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (--pe_init->p2p_initiator_count == 0)
+ pnv_pci_ioda2_set_bypass(pe_init, true);
+
+ if (--phb_target->p2p_target_count == 0) {
+ rc = opal_pci_set_p2p(phb_init->opal_id,
+ phb_target->opal_id, desc,
+ pe_init->pe_number);
+ if (rc != OPAL_SUCCESS) {
+ rc = -EIO;
+ goto out;
+ }
+ }
+ }
+ rc = 0;
+out:
+ mutex_unlock(&p2p_mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pnv_pci_set_p2p);
+
void pnv_pci_shutdown(void)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index f16bc403ec03..a95273c524f6 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -78,6 +78,9 @@ struct pnv_ioda_pe {
struct pnv_ioda_pe *master;
struct list_head slaves;
+ /* PCI peer-to-peer*/
+ int p2p_initiator_count;
+
/* Link in list of PE#s */
struct list_head list;
};
@@ -189,6 +192,7 @@ struct pnv_phb {
#ifdef CONFIG_CXL_BASE
struct cxl_afu *cxl_afu;
#endif
+ int p2p_target_count;
};
extern struct pci_ops pnv_pci_ops;
@@ -229,6 +233,7 @@ extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq);
extern bool pnv_pci_enable_device_hook(struct pci_dev *dev);
+extern void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
const char *fmt, ...);
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 6dbc0a1da1f6..a159d48573d7 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -7,6 +7,8 @@ extern void pnv_smp_init(void);
static inline void pnv_smp_init(void) { }
#endif
+extern void pnv_platform_error_reboot(struct pt_regs *regs, const char *msg) __noreturn;
+
struct pci_dev;
#ifdef CONFIG_PCI
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
index 1a9d84371a4d..718f50ed22f1 100644
--- a/arch/powerpc/platforms/powernv/rng.c
+++ b/arch/powerpc/platforms/powernv/rng.c
@@ -16,11 +16,13 @@
#include <linux/slab.h>
#include <linux/smp.h>
#include <asm/archrandom.h>
+#include <asm/cputable.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/smp.h>
+#define DARN_ERR 0xFFFFFFFFFFFFFFFFul
struct powernv_rng {
void __iomem *regs;
@@ -67,6 +69,41 @@ int powernv_get_random_real_mode(unsigned long *v)
return 1;
}
+int powernv_get_random_darn(unsigned long *v)
+{
+ unsigned long val;
+
+ /* Using DARN with L=1 - 64-bit conditioned random number */
+ asm volatile(PPC_DARN(%0, 1) : "=r"(val));
+
+ if (val == DARN_ERR)
+ return 0;
+
+ *v = val;
+
+ return 1;
+}
+
+static int initialise_darn(void)
+{
+ unsigned long val;
+ int i;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return -ENODEV;
+
+ for (i = 0; i < 10; i++) {
+ if (powernv_get_random_darn(&val)) {
+ ppc_md.get_random_seed = powernv_get_random_darn;
+ return 0;
+ }
+ }
+
+ pr_warn("Unable to use DARN for get_random_seed()\n");
+
+ return -EIO;
+}
+
int powernv_get_random_long(unsigned long *v)
{
struct powernv_rng *rng;
@@ -88,7 +125,7 @@ static __init void rng_init_per_cpu(struct powernv_rng *rng,
chip_id = of_get_ibm_chip_id(dn);
if (chip_id == -1)
- pr_warn("No ibm,chip-id found for %s.\n", dn->full_name);
+ pr_warn("No ibm,chip-id found for %pOF.\n", dn);
for_each_possible_cpu(cpu) {
if (per_cpu(powernv_rng, cpu) == NULL ||
@@ -141,8 +178,8 @@ static __init int rng_init(void)
for_each_compatible_node(dn, NULL, "ibm,power-rng") {
rc = rng_create(dn);
if (rc) {
- pr_err("Failed creating rng for %s (%d).\n",
- dn->full_name, rc);
+ pr_err("Failed creating rng for %pOF (%d).\n",
+ dn, rc);
continue;
}
@@ -150,6 +187,8 @@ static __init int rng_init(void)
of_platform_device_create(dn, NULL, NULL);
}
+ initialise_darn();
+
return 0;
}
machine_subsys_initcall(powernv, rng_init);
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 40dae96f7e20..c17f81e433f7 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -57,7 +57,7 @@ static void pnv_smp_setup_cpu(int cpu)
static int pnv_smp_kick_cpu(int nr)
{
- unsigned int pcpu = get_hard_smp_processor_id(nr);
+ unsigned int pcpu;
unsigned long start_here =
__pa(ppc_function_entry(generic_secondary_smp_init));
long rc;
@@ -66,6 +66,7 @@ static int pnv_smp_kick_cpu(int nr)
if (nr < 0 || nr >= nr_cpu_ids)
return -EINVAL;
+ pcpu = get_hard_smp_processor_id(nr);
/*
* If we already started or OPAL is not supported, we just
* kick the CPU via the PACA
@@ -164,12 +165,6 @@ static void pnv_smp_cpu_kill_self(void)
if (cpu_has_feature(CPU_FTR_ARCH_207S))
wmask = SRR1_WAKEMASK_P8;
- /* We don't want to take decrementer interrupts while we are offline,
- * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
- * enabled as to let IPIs in.
- */
- mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
-
while (!generic_check_cpu_restart(cpu)) {
/*
* Clear IPI flag, since we don't handle IPIs while
@@ -219,8 +214,6 @@ static void pnv_smp_cpu_kill_self(void)
}
- /* Re-enable decrementer interrupts */
- mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
DBG("CPU%d coming online...\n", cpu);
}
diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
new file mode 100644
index 000000000000..5aae845b8cd9
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/vas-window.c
@@ -0,0 +1,1134 @@
+/*
+ * Copyright 2016-17 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "vas: " fmt
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/rcupdate.h>
+#include <linux/cred.h>
+
+#include "vas.h"
+#include "copy-paste.h"
+
+/*
+ * Compute the paste address region for the window @window using the
+ * ->paste_base_addr and ->paste_win_id_shift we got from device tree.
+ */
+static void compute_paste_address(struct vas_window *window, u64 *addr, int *len)
+{
+ int winid;
+ u64 base, shift;
+
+ base = window->vinst->paste_base_addr;
+ shift = window->vinst->paste_win_id_shift;
+ winid = window->winid;
+
+ *addr = base + (winid << shift);
+ if (len)
+ *len = PAGE_SIZE;
+
+ pr_debug("Txwin #%d: Paste addr 0x%llx\n", winid, *addr);
+}
+
+static inline void get_hvwc_mmio_bar(struct vas_window *window,
+ u64 *start, int *len)
+{
+ u64 pbaddr;
+
+ pbaddr = window->vinst->hvwc_bar_start;
+ *start = pbaddr + window->winid * VAS_HVWC_SIZE;
+ *len = VAS_HVWC_SIZE;
+}
+
+static inline void get_uwc_mmio_bar(struct vas_window *window,
+ u64 *start, int *len)
+{
+ u64 pbaddr;
+
+ pbaddr = window->vinst->uwc_bar_start;
+ *start = pbaddr + window->winid * VAS_UWC_SIZE;
+ *len = VAS_UWC_SIZE;
+}
+
+/*
+ * Map the paste bus address of the given send window into kernel address
+ * space. Unlike MMIO regions (map_mmio_region() below), paste region must
+ * be mapped cache-able and is only applicable to send windows.
+ */
+static void *map_paste_region(struct vas_window *txwin)
+{
+ int len;
+ void *map;
+ char *name;
+ u64 start;
+
+ name = kasprintf(GFP_KERNEL, "window-v%d-w%d", txwin->vinst->vas_id,
+ txwin->winid);
+ if (!name)
+ goto free_name;
+
+ txwin->paste_addr_name = name;
+ compute_paste_address(txwin, &start, &len);
+
+ if (!request_mem_region(start, len, name)) {
+ pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n",
+ __func__, start, len);
+ goto free_name;
+ }
+
+ map = ioremap_cache(start, len);
+ if (!map) {
+ pr_devel("%s(): ioremap_cache(0x%llx, %d) failed\n", __func__,
+ start, len);
+ goto free_name;
+ }
+
+ pr_devel("Mapped paste addr 0x%llx to kaddr 0x%p\n", start, map);
+ return map;
+
+free_name:
+ kfree(name);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void *map_mmio_region(char *name, u64 start, int len)
+{
+ void *map;
+
+ if (!request_mem_region(start, len, name)) {
+ pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n",
+ __func__, start, len);
+ return NULL;
+ }
+
+ map = ioremap(start, len);
+ if (!map) {
+ pr_devel("%s(): ioremap(0x%llx, %d) failed\n", __func__, start,
+ len);
+ return NULL;
+ }
+
+ return map;
+}
+
+static void unmap_region(void *addr, u64 start, int len)
+{
+ iounmap(addr);
+ release_mem_region((phys_addr_t)start, len);
+}
+
+/*
+ * Unmap the paste address region for a window.
+ */
+static void unmap_paste_region(struct vas_window *window)
+{
+ int len;
+ u64 busaddr_start;
+
+ if (window->paste_kaddr) {
+ compute_paste_address(window, &busaddr_start, &len);
+ unmap_region(window->paste_kaddr, busaddr_start, len);
+ window->paste_kaddr = NULL;
+ kfree(window->paste_addr_name);
+ window->paste_addr_name = NULL;
+ }
+}
+
+/*
+ * Unmap the MMIO regions for a window.
+ */
+static void unmap_winctx_mmio_bars(struct vas_window *window)
+{
+ int len;
+ u64 busaddr_start;
+
+ if (window->hvwc_map) {
+ get_hvwc_mmio_bar(window, &busaddr_start, &len);
+ unmap_region(window->hvwc_map, busaddr_start, len);
+ window->hvwc_map = NULL;
+ }
+
+ if (window->uwc_map) {
+ get_uwc_mmio_bar(window, &busaddr_start, &len);
+ unmap_region(window->uwc_map, busaddr_start, len);
+ window->uwc_map = NULL;
+ }
+}
+
+/*
+ * Find the Hypervisor Window Context (HVWC) MMIO Base Address Region and the
+ * OS/User Window Context (UWC) MMIO Base Address Region for the given window.
+ * Map these bus addresses and save the mapped kernel addresses in @window.
+ */
+int map_winctx_mmio_bars(struct vas_window *window)
+{
+ int len;
+ u64 start;
+
+ get_hvwc_mmio_bar(window, &start, &len);
+ window->hvwc_map = map_mmio_region("HVWCM_Window", start, len);
+
+ get_uwc_mmio_bar(window, &start, &len);
+ window->uwc_map = map_mmio_region("UWCM_Window", start, len);
+
+ if (!window->hvwc_map || !window->uwc_map) {
+ unmap_winctx_mmio_bars(window);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Reset all valid registers in the HV and OS/User Window Contexts for
+ * the window identified by @window.
+ *
+ * NOTE: We cannot really use a for loop to reset window context. Not all
+ * offsets in a window context are valid registers and the valid
+ * registers are not sequential. And, we can only write to offsets
+ * with valid registers.
+ */
+void reset_window_regs(struct vas_window *window)
+{
+ write_hvwc_reg(window, VREG(LPID), 0ULL);
+ write_hvwc_reg(window, VREG(PID), 0ULL);
+ write_hvwc_reg(window, VREG(XLATE_MSR), 0ULL);
+ write_hvwc_reg(window, VREG(XLATE_LPCR), 0ULL);
+ write_hvwc_reg(window, VREG(XLATE_CTL), 0ULL);
+ write_hvwc_reg(window, VREG(AMR), 0ULL);
+ write_hvwc_reg(window, VREG(SEIDR), 0ULL);
+ write_hvwc_reg(window, VREG(FAULT_TX_WIN), 0ULL);
+ write_hvwc_reg(window, VREG(OSU_INTR_SRC_RA), 0ULL);
+ write_hvwc_reg(window, VREG(HV_INTR_SRC_RA), 0ULL);
+ write_hvwc_reg(window, VREG(PSWID), 0ULL);
+ write_hvwc_reg(window, VREG(LFIFO_BAR), 0ULL);
+ write_hvwc_reg(window, VREG(LDATA_STAMP_CTL), 0ULL);
+ write_hvwc_reg(window, VREG(LDMA_CACHE_CTL), 0ULL);
+ write_hvwc_reg(window, VREG(LRFIFO_PUSH), 0ULL);
+ write_hvwc_reg(window, VREG(CURR_MSG_COUNT), 0ULL);
+ write_hvwc_reg(window, VREG(LNOTIFY_AFTER_COUNT), 0ULL);
+ write_hvwc_reg(window, VREG(LRX_WCRED), 0ULL);
+ write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL);
+ write_hvwc_reg(window, VREG(TX_WCRED), 0ULL);
+ write_hvwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL);
+ write_hvwc_reg(window, VREG(LFIFO_SIZE), 0ULL);
+ write_hvwc_reg(window, VREG(WINCTL), 0ULL);
+ write_hvwc_reg(window, VREG(WIN_STATUS), 0ULL);
+ write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), 0ULL);
+ write_hvwc_reg(window, VREG(TX_RSVD_BUF_COUNT), 0ULL);
+ write_hvwc_reg(window, VREG(LRFIFO_WIN_PTR), 0ULL);
+ write_hvwc_reg(window, VREG(LNOTIFY_CTL), 0ULL);
+ write_hvwc_reg(window, VREG(LNOTIFY_PID), 0ULL);
+ write_hvwc_reg(window, VREG(LNOTIFY_LPID), 0ULL);
+ write_hvwc_reg(window, VREG(LNOTIFY_TID), 0ULL);
+ write_hvwc_reg(window, VREG(LNOTIFY_SCOPE), 0ULL);
+ write_hvwc_reg(window, VREG(NX_UTIL_ADDER), 0ULL);
+
+ /* Skip read-only registers: NX_UTIL and NX_UTIL_SE */
+
+ /*
+ * The send and receive window credit adder registers are also
+ * accessible from HVWC and have been initialized above. We don't
+ * need to initialize from the OS/User Window Context, so skip
+ * following calls:
+ *
+ * write_uwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL);
+ * write_uwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL);
+ */
+}
+
+/*
+ * Initialize window context registers related to Address Translation.
+ * These registers are common to send/receive windows although they
+ * differ for user/kernel windows. As we resolve the TODOs we may
+ * want to add fields to vas_winctx and move the initialization to
+ * init_vas_winctx_regs().
+ */
+static void init_xlate_regs(struct vas_window *window, bool user_win)
+{
+ u64 lpcr, val;
+
+ /*
+ * MSR_TA, MSR_US are false for both kernel and user.
+ * MSR_DR and MSR_PR are false for kernel.
+ */
+ val = 0ULL;
+ val = SET_FIELD(VAS_XLATE_MSR_HV, val, 1);
+ val = SET_FIELD(VAS_XLATE_MSR_SF, val, 1);
+ if (user_win) {
+ val = SET_FIELD(VAS_XLATE_MSR_DR, val, 1);
+ val = SET_FIELD(VAS_XLATE_MSR_PR, val, 1);
+ }
+ write_hvwc_reg(window, VREG(XLATE_MSR), val);
+
+ lpcr = mfspr(SPRN_LPCR);
+ val = 0ULL;
+ /*
+ * NOTE: From Section 5.7.8.1 Segment Lookaside Buffer of the
+ * Power ISA, v3.0B, Page size encoding is 0 = 4KB, 5 = 64KB.
+ *
+ * NOTE: From Section 1.3.1, Address Translation Context of the
+ * Nest MMU Workbook, LPCR_SC should be 0 for Power9.
+ */
+ val = SET_FIELD(VAS_XLATE_LPCR_PAGE_SIZE, val, 5);
+ val = SET_FIELD(VAS_XLATE_LPCR_ISL, val, lpcr & LPCR_ISL);
+ val = SET_FIELD(VAS_XLATE_LPCR_TC, val, lpcr & LPCR_TC);
+ val = SET_FIELD(VAS_XLATE_LPCR_SC, val, 0);
+ write_hvwc_reg(window, VREG(XLATE_LPCR), val);
+
+ /*
+ * Section 1.3.1 (Address translation Context) of NMMU workbook.
+ * 0b00 Hashed Page Table mode
+ * 0b01 Reserved
+ * 0b10 Radix on HPT
+ * 0b11 Radix on Radix
+ */
+ val = 0ULL;
+ val = SET_FIELD(VAS_XLATE_MODE, val, radix_enabled() ? 3 : 2);
+ write_hvwc_reg(window, VREG(XLATE_CTL), val);
+
+ /*
+ * TODO: Can we mfspr(AMR) even for user windows?
+ */
+ val = 0ULL;
+ val = SET_FIELD(VAS_AMR, val, mfspr(SPRN_AMR));
+ write_hvwc_reg(window, VREG(AMR), val);
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_SEIDR, val, 0);
+ write_hvwc_reg(window, VREG(SEIDR), val);
+}
+
+/*
+ * Initialize Reserved Send Buffer Count for the send window. It involves
+ * writing to the register, reading it back to confirm that the hardware
+ * has enough buffers to reserve. See section 1.3.1.2.1 of VAS workbook.
+ *
+ * Since we can only make a best-effort attempt to fulfill the request,
+ * we don't return any errors if we cannot.
+ *
+ * TODO: Reserved (aka dedicated) send buffers are not supported yet.
+ */
+static void init_rsvd_tx_buf_count(struct vas_window *txwin,
+ struct vas_winctx *winctx)
+{
+ write_hvwc_reg(txwin, VREG(TX_RSVD_BUF_COUNT), 0ULL);
+}
+
+/*
+ * init_winctx_regs()
+ * Initialize window context registers for a receive window.
+ * Except for caching control and marking window open, the registers
+ * are initialized in the order listed in Section 3.1.4 (Window Context
+ * Cache Register Details) of the VAS workbook although they don't need
+ * to be.
+ *
+ * Design note: For NX receive windows, NX allocates the FIFO buffer in OPAL
+ * (so that it can get a large contiguous area) and passes that buffer
+ * to kernel via device tree. We now write that buffer address to the
+ * FIFO BAR. Would it make sense to do this all in OPAL? i.e have OPAL
+ * write the per-chip RX FIFO addresses to the windows during boot-up
+ * as a one-time task? That could work for NX but what about other
+ * receivers? Let the receivers tell us the rx-fifo buffers for now.
+ */
+int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx)
+{
+ u64 val;
+ int fifo_size;
+
+ reset_window_regs(window);
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_LPID, val, winctx->lpid);
+ write_hvwc_reg(window, VREG(LPID), val);
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_PID_ID, val, winctx->pidr);
+ write_hvwc_reg(window, VREG(PID), val);
+
+ init_xlate_regs(window, winctx->user_win);
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_FAULT_TX_WIN, val, 0);
+ write_hvwc_reg(window, VREG(FAULT_TX_WIN), val);
+
+ /* In PowerNV, interrupts go to HV. */
+ write_hvwc_reg(window, VREG(OSU_INTR_SRC_RA), 0ULL);
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_HV_INTR_SRC_RA, val, winctx->irq_port);
+ write_hvwc_reg(window, VREG(HV_INTR_SRC_RA), val);
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_PSWID_EA_HANDLE, val, winctx->pswid);
+ write_hvwc_reg(window, VREG(PSWID), val);
+
+ write_hvwc_reg(window, VREG(SPARE1), 0ULL);
+ write_hvwc_reg(window, VREG(SPARE2), 0ULL);
+ write_hvwc_reg(window, VREG(SPARE3), 0ULL);
+
+ /*
+ * NOTE: VAS expects the FIFO address to be copied into the LFIFO_BAR
+ * register as is - do NOT shift the address into VAS_LFIFO_BAR
+ * bit fields! Ok to set the page migration select fields -
+ * VAS ignores the lower 10+ bits in the address anyway, because
+ * the minimum FIFO size is 1K?
+ *
+ * See also: Design note in function header.
+ */
+ val = __pa(winctx->rx_fifo);
+ val = SET_FIELD(VAS_PAGE_MIGRATION_SELECT, val, 0);
+ write_hvwc_reg(window, VREG(LFIFO_BAR), val);
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_LDATA_STAMP, val, winctx->data_stamp);
+ write_hvwc_reg(window, VREG(LDATA_STAMP_CTL), val);
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_LDMA_TYPE, val, winctx->dma_type);
+ val = SET_FIELD(VAS_LDMA_FIFO_DISABLE, val, winctx->fifo_disable);
+ write_hvwc_reg(window, VREG(LDMA_CACHE_CTL), val);
+
+ write_hvwc_reg(window, VREG(LRFIFO_PUSH), 0ULL);
+ write_hvwc_reg(window, VREG(CURR_MSG_COUNT), 0ULL);
+ write_hvwc_reg(window, VREG(LNOTIFY_AFTER_COUNT), 0ULL);
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_LRX_WCRED, val, winctx->wcreds_max);
+ write_hvwc_reg(window, VREG(LRX_WCRED), val);
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_TX_WCRED, val, winctx->wcreds_max);
+ write_hvwc_reg(window, VREG(TX_WCRED), val);
+
+ write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL);
+ write_hvwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL);
+
+ fifo_size = winctx->rx_fifo_size / 1024;
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_LFIFO_SIZE, val, ilog2(fifo_size));
+ write_hvwc_reg(window, VREG(LFIFO_SIZE), val);
+
+ /* Update window control and caching control registers last so
+ * we mark the window open only after fully initializing it and
+ * pushing context to cache.
+ */
+
+ write_hvwc_reg(window, VREG(WIN_STATUS), 0ULL);
+
+ init_rsvd_tx_buf_count(window, winctx);
+
+ /* for a send window, point to the matching receive window */
+ val = 0ULL;
+ val = SET_FIELD(VAS_LRX_WIN_ID, val, winctx->rx_win_id);
+ write_hvwc_reg(window, VREG(LRFIFO_WIN_PTR), val);
+
+ write_hvwc_reg(window, VREG(SPARE4), 0ULL);
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_NOTIFY_DISABLE, val, winctx->notify_disable);
+ val = SET_FIELD(VAS_INTR_DISABLE, val, winctx->intr_disable);
+ val = SET_FIELD(VAS_NOTIFY_EARLY, val, winctx->notify_early);
+ val = SET_FIELD(VAS_NOTIFY_OSU_INTR, val, winctx->notify_os_intr_reg);
+ write_hvwc_reg(window, VREG(LNOTIFY_CTL), val);
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_LNOTIFY_PID, val, winctx->lnotify_pid);
+ write_hvwc_reg(window, VREG(LNOTIFY_PID), val);
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_LNOTIFY_LPID, val, winctx->lnotify_lpid);
+ write_hvwc_reg(window, VREG(LNOTIFY_LPID), val);
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_LNOTIFY_TID, val, winctx->lnotify_tid);
+ write_hvwc_reg(window, VREG(LNOTIFY_TID), val);
+
+ val = 0ULL;
+ val = SET_FIELD(VAS_LNOTIFY_MIN_SCOPE, val, winctx->min_scope);
+ val = SET_FIELD(VAS_LNOTIFY_MAX_SCOPE, val, winctx->max_scope);
+ write_hvwc_reg(window, VREG(LNOTIFY_SCOPE), val);
+
+ /* Skip read-only registers NX_UTIL and NX_UTIL_SE */
+
+ write_hvwc_reg(window, VREG(SPARE5), 0ULL);
+ write_hvwc_reg(window, VREG(NX_UTIL_ADDER), 0ULL);
+ write_hvwc_reg(window, VREG(SPARE6), 0ULL);
+
+ /* Finally, push window context to memory and... */
+ val = 0ULL;
+ val = SET_FIELD(VAS_PUSH_TO_MEM, val, 1);
+ write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), val);
+
+ /* ... mark the window open for business */
+ val = 0ULL;
+ val = SET_FIELD(VAS_WINCTL_REJ_NO_CREDIT, val, winctx->rej_no_credit);
+ val = SET_FIELD(VAS_WINCTL_PIN, val, winctx->pin_win);
+ val = SET_FIELD(VAS_WINCTL_TX_WCRED_MODE, val, winctx->tx_wcred_mode);
+ val = SET_FIELD(VAS_WINCTL_RX_WCRED_MODE, val, winctx->rx_wcred_mode);
+ val = SET_FIELD(VAS_WINCTL_TX_WORD_MODE, val, winctx->tx_word_mode);
+ val = SET_FIELD(VAS_WINCTL_RX_WORD_MODE, val, winctx->rx_word_mode);
+ val = SET_FIELD(VAS_WINCTL_FAULT_WIN, val, winctx->fault_win);
+ val = SET_FIELD(VAS_WINCTL_NX_WIN, val, winctx->nx_win);
+ val = SET_FIELD(VAS_WINCTL_OPEN, val, 1);
+ write_hvwc_reg(window, VREG(WINCTL), val);
+
+ return 0;
+}
+
+static DEFINE_SPINLOCK(vas_ida_lock);
+
+static void vas_release_window_id(struct ida *ida, int winid)
+{
+ spin_lock(&vas_ida_lock);
+ ida_remove(ida, winid);
+ spin_unlock(&vas_ida_lock);
+}
+
+static int vas_assign_window_id(struct ida *ida)
+{
+ int rc, winid;
+
+ do {
+ rc = ida_pre_get(ida, GFP_KERNEL);
+ if (!rc)
+ return -EAGAIN;
+
+ spin_lock(&vas_ida_lock);
+ rc = ida_get_new(ida, &winid);
+ spin_unlock(&vas_ida_lock);
+ } while (rc == -EAGAIN);
+
+ if (rc)
+ return rc;
+
+ if (winid > VAS_WINDOWS_PER_CHIP) {
+ pr_err("Too many (%d) open windows\n", winid);
+ vas_release_window_id(ida, winid);
+ return -EAGAIN;
+ }
+
+ return winid;
+}
+
+static void vas_window_free(struct vas_window *window)
+{
+ int winid = window->winid;
+ struct vas_instance *vinst = window->vinst;
+
+ unmap_winctx_mmio_bars(window);
+ kfree(window);
+
+ vas_release_window_id(&vinst->ida, winid);
+}
+
+static struct vas_window *vas_window_alloc(struct vas_instance *vinst)
+{
+ int winid;
+ struct vas_window *window;
+
+ winid = vas_assign_window_id(&vinst->ida);
+ if (winid < 0)
+ return ERR_PTR(winid);
+
+ window = kzalloc(sizeof(*window), GFP_KERNEL);
+ if (!window)
+ goto out_free;
+
+ window->vinst = vinst;
+ window->winid = winid;
+
+ if (map_winctx_mmio_bars(window))
+ goto out_free;
+
+ return window;
+
+out_free:
+ kfree(window);
+ vas_release_window_id(&vinst->ida, winid);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void put_rx_win(struct vas_window *rxwin)
+{
+ /* Better not be a send window! */
+ WARN_ON_ONCE(rxwin->tx_win);
+
+ atomic_dec(&rxwin->num_txwins);
+}
+
+/*
+ * Get the VAS receive window associated with NX engine identified
+ * by @cop and if applicable, @pswid.
+ *
+ * See also function header of set_vinst_win().
+ */
+static struct vas_window *get_vinst_rxwin(struct vas_instance *vinst,
+ enum vas_cop_type cop, u32 pswid)
+{
+ struct vas_window *rxwin;
+
+ mutex_lock(&vinst->mutex);
+
+ if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI)
+ rxwin = vinst->rxwin[cop] ?: ERR_PTR(-EINVAL);
+ else
+ rxwin = ERR_PTR(-EINVAL);
+
+ if (!IS_ERR(rxwin))
+ atomic_inc(&rxwin->num_txwins);
+
+ mutex_unlock(&vinst->mutex);
+
+ return rxwin;
+}
+
+/*
+ * We have two tables of windows in a VAS instance. The first one,
+ * ->windows[], contains all the windows in the instance and allows
+ * looking up a window by its id. It is used to look up send windows
+ * during fault handling and receive windows when pairing user space
+ * send/receive windows.
+ *
+ * The second table, ->rxwin[], contains receive windows that are
+ * associated with NX engines. This table has VAS_COP_TYPE_MAX
+ * entries and is used to look up a receive window by its
+ * coprocessor type.
+ *
+ * Here, we save @window in the ->windows[] table. If it is a receive
+ * window, we also save the window in the ->rxwin[] table.
+ */
+static void set_vinst_win(struct vas_instance *vinst,
+ struct vas_window *window)
+{
+ int id = window->winid;
+
+ mutex_lock(&vinst->mutex);
+
+ /*
+ * There should only be one receive window for a coprocessor type
+ * unless its a user (FTW) window.
+ */
+ if (!window->user_win && !window->tx_win) {
+ WARN_ON_ONCE(vinst->rxwin[window->cop]);
+ vinst->rxwin[window->cop] = window;
+ }
+
+ WARN_ON_ONCE(vinst->windows[id] != NULL);
+ vinst->windows[id] = window;
+
+ mutex_unlock(&vinst->mutex);
+}
+
+/*
+ * Clear this window from the table(s) of windows for this VAS instance.
+ * See also function header of set_vinst_win().
+ */
+static void clear_vinst_win(struct vas_window *window)
+{
+ int id = window->winid;
+ struct vas_instance *vinst = window->vinst;
+
+ mutex_lock(&vinst->mutex);
+
+ if (!window->user_win && !window->tx_win) {
+ WARN_ON_ONCE(!vinst->rxwin[window->cop]);
+ vinst->rxwin[window->cop] = NULL;
+ }
+
+ WARN_ON_ONCE(vinst->windows[id] != window);
+ vinst->windows[id] = NULL;
+
+ mutex_unlock(&vinst->mutex);
+}
+
+static void init_winctx_for_rxwin(struct vas_window *rxwin,
+ struct vas_rx_win_attr *rxattr,
+ struct vas_winctx *winctx)
+{
+ /*
+ * We first zero (memset()) all fields and only set non-zero fields.
+ * Following fields are 0/false but maybe deserve a comment:
+ *
+ * ->notify_os_intr_reg In powerNV, send intrs to HV
+ * ->notify_disable False for NX windows
+ * ->intr_disable False for Fault Windows
+ * ->xtra_write False for NX windows
+ * ->notify_early NA for NX windows
+ * ->rsvd_txbuf_count NA for Rx windows
+ * ->lpid, ->pid, ->tid NA for Rx windows
+ */
+
+ memset(winctx, 0, sizeof(struct vas_winctx));
+
+ winctx->rx_fifo = rxattr->rx_fifo;
+ winctx->rx_fifo_size = rxattr->rx_fifo_size;
+ winctx->wcreds_max = rxattr->wcreds_max ?: VAS_WCREDS_DEFAULT;
+ winctx->pin_win = rxattr->pin_win;
+
+ winctx->nx_win = rxattr->nx_win;
+ winctx->fault_win = rxattr->fault_win;
+ winctx->rx_word_mode = rxattr->rx_win_ord_mode;
+ winctx->tx_word_mode = rxattr->tx_win_ord_mode;
+ winctx->rx_wcred_mode = rxattr->rx_wcred_mode;
+ winctx->tx_wcred_mode = rxattr->tx_wcred_mode;
+
+ if (winctx->nx_win) {
+ winctx->data_stamp = true;
+ winctx->intr_disable = true;
+ winctx->pin_win = true;
+
+ WARN_ON_ONCE(winctx->fault_win);
+ WARN_ON_ONCE(!winctx->rx_word_mode);
+ WARN_ON_ONCE(!winctx->tx_word_mode);
+ WARN_ON_ONCE(winctx->notify_after_count);
+ } else if (winctx->fault_win) {
+ winctx->notify_disable = true;
+ } else if (winctx->user_win) {
+ /*
+ * Section 1.8.1 Low Latency Core-Core Wake up of
+ * the VAS workbook:
+ *
+ * - disable credit checks ([tr]x_wcred_mode = false)
+ * - disable FIFO writes
+ * - enable ASB_Notify, disable interrupt
+ */
+ winctx->fifo_disable = true;
+ winctx->intr_disable = true;
+ winctx->rx_fifo = NULL;
+ }
+
+ winctx->lnotify_lpid = rxattr->lnotify_lpid;
+ winctx->lnotify_pid = rxattr->lnotify_pid;
+ winctx->lnotify_tid = rxattr->lnotify_tid;
+ winctx->pswid = rxattr->pswid;
+ winctx->dma_type = VAS_DMA_TYPE_INJECT;
+ winctx->tc_mode = rxattr->tc_mode;
+
+ winctx->min_scope = VAS_SCOPE_LOCAL;
+ winctx->max_scope = VAS_SCOPE_VECTORED_GROUP;
+}
+
+static bool rx_win_args_valid(enum vas_cop_type cop,
+ struct vas_rx_win_attr *attr)
+{
+ dump_rx_win_attr(attr);
+
+ if (cop >= VAS_COP_TYPE_MAX)
+ return false;
+
+ if (cop != VAS_COP_TYPE_FTW &&
+ attr->rx_fifo_size < VAS_RX_FIFO_SIZE_MIN)
+ return false;
+
+ if (attr->rx_fifo_size > VAS_RX_FIFO_SIZE_MAX)
+ return false;
+
+ if (attr->nx_win) {
+ /* cannot be fault or user window if it is nx */
+ if (attr->fault_win || attr->user_win)
+ return false;
+ /*
+ * Section 3.1.4.32: NX Windows must not disable notification,
+ * and must not enable interrupts or early notification.
+ */
+ if (attr->notify_disable || !attr->intr_disable ||
+ attr->notify_early)
+ return false;
+ } else if (attr->fault_win) {
+ /* cannot be both fault and user window */
+ if (attr->user_win)
+ return false;
+
+ /*
+ * Section 3.1.4.32: Fault windows must disable notification
+ * but not interrupts.
+ */
+ if (!attr->notify_disable || attr->intr_disable)
+ return false;
+
+ } else if (attr->user_win) {
+ /*
+ * User receive windows are only for fast-thread-wakeup
+ * (FTW). They don't need a FIFO and must disable interrupts
+ */
+ if (attr->rx_fifo || attr->rx_fifo_size || !attr->intr_disable)
+ return false;
+ } else {
+ /* Rx window must be one of NX or Fault or User window. */
+ return false;
+ }
+
+ return true;
+}
+
+void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop)
+{
+ memset(rxattr, 0, sizeof(*rxattr));
+
+ if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI) {
+ rxattr->pin_win = true;
+ rxattr->nx_win = true;
+ rxattr->fault_win = false;
+ rxattr->intr_disable = true;
+ rxattr->rx_wcred_mode = true;
+ rxattr->tx_wcred_mode = true;
+ rxattr->rx_win_ord_mode = true;
+ rxattr->tx_win_ord_mode = true;
+ } else if (cop == VAS_COP_TYPE_FAULT) {
+ rxattr->pin_win = true;
+ rxattr->fault_win = true;
+ rxattr->notify_disable = true;
+ rxattr->rx_wcred_mode = true;
+ rxattr->tx_wcred_mode = true;
+ rxattr->rx_win_ord_mode = true;
+ rxattr->tx_win_ord_mode = true;
+ } else if (cop == VAS_COP_TYPE_FTW) {
+ rxattr->user_win = true;
+ rxattr->intr_disable = true;
+
+ /*
+ * As noted in the VAS Workbook we disable credit checks.
+ * If we enable credit checks in the future, we must also
+ * implement a mechanism to return the user credits or new
+ * paste operations will fail.
+ */
+ }
+}
+EXPORT_SYMBOL_GPL(vas_init_rx_win_attr);
+
+struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
+ struct vas_rx_win_attr *rxattr)
+{
+ struct vas_window *rxwin;
+ struct vas_winctx winctx;
+ struct vas_instance *vinst;
+
+ if (!rx_win_args_valid(cop, rxattr))
+ return ERR_PTR(-EINVAL);
+
+ vinst = find_vas_instance(vasid);
+ if (!vinst) {
+ pr_devel("vasid %d not found!\n", vasid);
+ return ERR_PTR(-EINVAL);
+ }
+ pr_devel("Found instance %d\n", vasid);
+
+ rxwin = vas_window_alloc(vinst);
+ if (IS_ERR(rxwin)) {
+ pr_devel("Unable to allocate memory for Rx window\n");
+ return rxwin;
+ }
+
+ rxwin->tx_win = false;
+ rxwin->nx_win = rxattr->nx_win;
+ rxwin->user_win = rxattr->user_win;
+ rxwin->cop = cop;
+ if (rxattr->user_win)
+ rxwin->pid = task_pid_vnr(current);
+
+ init_winctx_for_rxwin(rxwin, rxattr, &winctx);
+ init_winctx_regs(rxwin, &winctx);
+
+ set_vinst_win(vinst, rxwin);
+
+ return rxwin;
+}
+EXPORT_SYMBOL_GPL(vas_rx_win_open);
+
+void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, enum vas_cop_type cop)
+{
+ memset(txattr, 0, sizeof(*txattr));
+
+ if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI) {
+ txattr->rej_no_credit = false;
+ txattr->rx_wcred_mode = true;
+ txattr->tx_wcred_mode = true;
+ txattr->rx_win_ord_mode = true;
+ txattr->tx_win_ord_mode = true;
+ } else if (cop == VAS_COP_TYPE_FTW) {
+ txattr->user_win = true;
+ }
+}
+EXPORT_SYMBOL_GPL(vas_init_tx_win_attr);
+
+static void init_winctx_for_txwin(struct vas_window *txwin,
+ struct vas_tx_win_attr *txattr,
+ struct vas_winctx *winctx)
+{
+ /*
+ * We first zero all fields and only set non-zero ones. Following
+ * are some fields set to 0/false for the stated reason:
+ *
+ * ->notify_os_intr_reg In powernv, send intrs to HV
+ * ->rsvd_txbuf_count Not supported yet.
+ * ->notify_disable False for NX windows
+ * ->xtra_write False for NX windows
+ * ->notify_early NA for NX windows
+ * ->lnotify_lpid NA for Tx windows
+ * ->lnotify_pid NA for Tx windows
+ * ->lnotify_tid NA for Tx windows
+ * ->tx_win_cred_mode Ignore for now for NX windows
+ * ->rx_win_cred_mode Ignore for now for NX windows
+ */
+ memset(winctx, 0, sizeof(struct vas_winctx));
+
+ winctx->wcreds_max = txattr->wcreds_max ?: VAS_WCREDS_DEFAULT;
+
+ winctx->user_win = txattr->user_win;
+ winctx->nx_win = txwin->rxwin->nx_win;
+ winctx->pin_win = txattr->pin_win;
+
+ winctx->rx_wcred_mode = txattr->rx_wcred_mode;
+ winctx->tx_wcred_mode = txattr->tx_wcred_mode;
+ winctx->rx_word_mode = txattr->rx_win_ord_mode;
+ winctx->tx_word_mode = txattr->tx_win_ord_mode;
+
+ if (winctx->nx_win) {
+ winctx->data_stamp = true;
+ winctx->intr_disable = true;
+ }
+
+ winctx->lpid = txattr->lpid;
+ winctx->pidr = txattr->pidr;
+ winctx->rx_win_id = txwin->rxwin->winid;
+
+ winctx->dma_type = VAS_DMA_TYPE_INJECT;
+ winctx->tc_mode = txattr->tc_mode;
+ winctx->min_scope = VAS_SCOPE_LOCAL;
+ winctx->max_scope = VAS_SCOPE_VECTORED_GROUP;
+
+ winctx->pswid = 0;
+}
+
+static bool tx_win_args_valid(enum vas_cop_type cop,
+ struct vas_tx_win_attr *attr)
+{
+ if (attr->tc_mode != VAS_THRESH_DISABLED)
+ return false;
+
+ if (cop > VAS_COP_TYPE_MAX)
+ return false;
+
+ if (attr->user_win &&
+ (cop != VAS_COP_TYPE_FTW || attr->rsvd_txbuf_count))
+ return false;
+
+ return true;
+}
+
+struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
+ struct vas_tx_win_attr *attr)
+{
+ int rc;
+ struct vas_window *txwin;
+ struct vas_window *rxwin;
+ struct vas_winctx winctx;
+ struct vas_instance *vinst;
+
+ if (!tx_win_args_valid(cop, attr))
+ return ERR_PTR(-EINVAL);
+
+ vinst = find_vas_instance(vasid);
+ if (!vinst) {
+ pr_devel("vasid %d not found!\n", vasid);
+ return ERR_PTR(-EINVAL);
+ }
+
+ rxwin = get_vinst_rxwin(vinst, cop, attr->pswid);
+ if (IS_ERR(rxwin)) {
+ pr_devel("No RxWin for vasid %d, cop %d\n", vasid, cop);
+ return rxwin;
+ }
+
+ txwin = vas_window_alloc(vinst);
+ if (IS_ERR(txwin)) {
+ rc = PTR_ERR(txwin);
+ goto put_rxwin;
+ }
+
+ txwin->tx_win = 1;
+ txwin->rxwin = rxwin;
+ txwin->nx_win = txwin->rxwin->nx_win;
+ txwin->pid = attr->pid;
+ txwin->user_win = attr->user_win;
+
+ init_winctx_for_txwin(txwin, attr, &winctx);
+
+ init_winctx_regs(txwin, &winctx);
+
+ /*
+ * If its a kernel send window, map the window address into the
+ * kernel's address space. For user windows, user must issue an
+ * mmap() to map the window into their address space.
+ *
+ * NOTE: If kernel ever resubmits a user CRB after handling a page
+ * fault, we will need to map this into kernel as well.
+ */
+ if (!txwin->user_win) {
+ txwin->paste_kaddr = map_paste_region(txwin);
+ if (IS_ERR(txwin->paste_kaddr)) {
+ rc = PTR_ERR(txwin->paste_kaddr);
+ goto free_window;
+ }
+ }
+
+ set_vinst_win(vinst, txwin);
+
+ return txwin;
+
+free_window:
+ vas_window_free(txwin);
+
+put_rxwin:
+ put_rx_win(rxwin);
+ return ERR_PTR(rc);
+
+}
+EXPORT_SYMBOL_GPL(vas_tx_win_open);
+
+int vas_copy_crb(void *crb, int offset)
+{
+ return vas_copy(crb, offset);
+}
+EXPORT_SYMBOL_GPL(vas_copy_crb);
+
+#define RMA_LSMP_REPORT_ENABLE PPC_BIT(53)
+int vas_paste_crb(struct vas_window *txwin, int offset, bool re)
+{
+ int rc;
+ void *addr;
+ uint64_t val;
+
+ /*
+ * Only NX windows are supported for now and hardware assumes
+ * report-enable flag is set for NX windows. Ensure software
+ * complies too.
+ */
+ WARN_ON_ONCE(txwin->nx_win && !re);
+
+ addr = txwin->paste_kaddr;
+ if (re) {
+ /*
+ * Set the REPORT_ENABLE bit (equivalent to writing
+ * to 1K offset of the paste address)
+ */
+ val = SET_FIELD(RMA_LSMP_REPORT_ENABLE, 0ULL, 1);
+ addr += val;
+ }
+
+ /*
+ * Map the raw CR value from vas_paste() to an error code (there
+ * is just pass or fail for now though).
+ */
+ rc = vas_paste(addr, offset);
+ if (rc == 2)
+ rc = 0;
+ else
+ rc = -EINVAL;
+
+ print_fifo_msg_count(txwin);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(vas_paste_crb);
+
+static void poll_window_busy_state(struct vas_window *window)
+{
+ int busy;
+ u64 val;
+
+retry:
+ /*
+ * Poll Window Busy flag
+ */
+ val = read_hvwc_reg(window, VREG(WIN_STATUS));
+ busy = GET_FIELD(VAS_WIN_BUSY, val);
+ if (busy) {
+ val = 0;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+ goto retry;
+ }
+}
+
+static void poll_window_castout(struct vas_window *window)
+{
+ int cached;
+ u64 val;
+
+ /* Cast window context out of the cache */
+retry:
+ val = read_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL));
+ cached = GET_FIELD(VAS_WIN_CACHE_STATUS, val);
+ if (cached) {
+ val = 0ULL;
+ val = SET_FIELD(VAS_CASTOUT_REQ, val, 1);
+ val = SET_FIELD(VAS_PUSH_TO_MEM, val, 0);
+ write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), val);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+ goto retry;
+ }
+}
+
+/*
+ * Close a window.
+ *
+ * See Section 1.12.1 of VAS workbook v1.05 for details on closing window:
+ * - Disable new paste operations (unmap paste address)
+ * - Poll for the "Window Busy" bit to be cleared
+ * - Clear the Open/Enable bit for the Window.
+ * - Poll for return of window Credits (implies FIFO empty for Rx win?)
+ * - Unpin and cast window context out of cache
+ *
+ * Besides the hardware, kernel has some bookkeeping of course.
+ */
+int vas_win_close(struct vas_window *window)
+{
+ u64 val;
+
+ if (!window)
+ return 0;
+
+ if (!window->tx_win && atomic_read(&window->num_txwins) != 0) {
+ pr_devel("Attempting to close an active Rx window!\n");
+ WARN_ON_ONCE(1);
+ return -EBUSY;
+ }
+
+ unmap_paste_region(window);
+
+ clear_vinst_win(window);
+
+ poll_window_busy_state(window);
+
+ /* Unpin window from cache and close it */
+ val = read_hvwc_reg(window, VREG(WINCTL));
+ val = SET_FIELD(VAS_WINCTL_PIN, val, 0);
+ val = SET_FIELD(VAS_WINCTL_OPEN, val, 0);
+ write_hvwc_reg(window, VREG(WINCTL), val);
+
+ poll_window_castout(window);
+
+ /* if send window, drop reference to matching receive window */
+ if (window->tx_win)
+ put_rx_win(window->rxwin);
+
+ vas_window_free(window);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vas_win_close);
diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c
new file mode 100644
index 000000000000..565a4878fefa
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/vas.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2016-17 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "vas: " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+
+#include "vas.h"
+
+static DEFINE_MUTEX(vas_mutex);
+static LIST_HEAD(vas_instances);
+
+static int init_vas_instance(struct platform_device *pdev)
+{
+ int rc, vasid;
+ struct resource *res;
+ struct vas_instance *vinst;
+ struct device_node *dn = pdev->dev.of_node;
+
+ rc = of_property_read_u32(dn, "ibm,vas-id", &vasid);
+ if (rc) {
+ pr_err("No ibm,vas-id property for %s?\n", pdev->name);
+ return -ENODEV;
+ }
+
+ if (pdev->num_resources != 4) {
+ pr_err("Unexpected DT configuration for [%s, %d]\n",
+ pdev->name, vasid);
+ return -ENODEV;
+ }
+
+ vinst = kzalloc(sizeof(*vinst), GFP_KERNEL);
+ if (!vinst)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vinst->node);
+ ida_init(&vinst->ida);
+ mutex_init(&vinst->mutex);
+ vinst->vas_id = vasid;
+ vinst->pdev = pdev;
+
+ res = &pdev->resource[0];
+ vinst->hvwc_bar_start = res->start;
+
+ res = &pdev->resource[1];
+ vinst->uwc_bar_start = res->start;
+
+ res = &pdev->resource[2];
+ vinst->paste_base_addr = res->start;
+
+ res = &pdev->resource[3];
+ if (res->end > 62) {
+ pr_err("Bad 'paste_win_id_shift' in DT, %llx\n", res->end);
+ goto free_vinst;
+ }
+
+ vinst->paste_win_id_shift = 63 - res->end;
+
+ pr_devel("Initialized instance [%s, %d], paste_base 0x%llx, "
+ "paste_win_id_shift 0x%llx\n", pdev->name, vasid,
+ vinst->paste_base_addr, vinst->paste_win_id_shift);
+
+ mutex_lock(&vas_mutex);
+ list_add(&vinst->node, &vas_instances);
+ mutex_unlock(&vas_mutex);
+
+ dev_set_drvdata(&pdev->dev, vinst);
+
+ return 0;
+
+free_vinst:
+ kfree(vinst);
+ return -ENODEV;
+
+}
+
+/*
+ * Although this is read/used multiple times, it is written to only
+ * during initialization.
+ */
+struct vas_instance *find_vas_instance(int vasid)
+{
+ struct list_head *ent;
+ struct vas_instance *vinst;
+
+ mutex_lock(&vas_mutex);
+ list_for_each(ent, &vas_instances) {
+ vinst = list_entry(ent, struct vas_instance, node);
+ if (vinst->vas_id == vasid) {
+ mutex_unlock(&vas_mutex);
+ return vinst;
+ }
+ }
+ mutex_unlock(&vas_mutex);
+
+ pr_devel("Instance %d not found\n", vasid);
+ return NULL;
+}
+
+static int vas_probe(struct platform_device *pdev)
+{
+ return init_vas_instance(pdev);
+}
+
+static const struct of_device_id powernv_vas_match[] = {
+ { .compatible = "ibm,vas",},
+ {},
+};
+
+static struct platform_driver vas_driver = {
+ .driver = {
+ .name = "vas",
+ .of_match_table = powernv_vas_match,
+ },
+ .probe = vas_probe,
+};
+
+static int __init vas_init(void)
+{
+ int found = 0;
+ struct device_node *dn;
+
+ platform_driver_register(&vas_driver);
+
+ for_each_compatible_node(dn, NULL, "ibm,vas") {
+ of_platform_device_create(dn, NULL, NULL);
+ found++;
+ }
+
+ if (!found)
+ return -ENODEV;
+
+ pr_devel("Found %d instances\n", found);
+
+ return 0;
+}
+device_initcall(vas_init);
diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h
new file mode 100644
index 000000000000..38dee5d50f31
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/vas.h
@@ -0,0 +1,467 @@
+/*
+ * Copyright 2016-17 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _VAS_H
+#define _VAS_H
+#include <linux/atomic.h>
+#include <linux/idr.h>
+#include <asm/vas.h>
+#include <linux/io.h>
+
+/*
+ * Overview of Virtual Accelerator Switchboard (VAS).
+ *
+ * VAS is a hardware "switchboard" that allows senders and receivers to
+ * exchange messages with _minimal_ kernel involvment. The receivers are
+ * typically NX coprocessor engines that perform compression or encryption
+ * in hardware, but receivers can also be other software threads.
+ *
+ * Senders are user/kernel threads that submit compression/encryption or
+ * other requests to the receivers. Senders must format their messages as
+ * Coprocessor Request Blocks (CRB)s and submit them using the "copy" and
+ * "paste" instructions which were introduced in Power9.
+ *
+ * A Power node can have (upto?) 8 Power chips. There is one instance of
+ * VAS in each Power9 chip. Each instance of VAS has 64K windows or ports,
+ * Senders and receivers must each connect to a separate window before they
+ * can exchange messages through the switchboard.
+ *
+ * Each window is described by two types of window contexts:
+ *
+ * Hypervisor Window Context (HVWC) of size VAS_HVWC_SIZE bytes
+ *
+ * OS/User Window Context (UWC) of size VAS_UWC_SIZE bytes.
+ *
+ * A window context can be viewed as a set of 64-bit registers. The settings
+ * in these registers configure/control/determine the behavior of the VAS
+ * hardware when messages are sent/received through the window. The registers
+ * in the HVWC are configured by the kernel while the registers in the UWC can
+ * be configured by the kernel or by the user space application that is using
+ * the window.
+ *
+ * The HVWCs for all windows on a specific instance of VAS are in a contiguous
+ * range of hardware addresses or Base address region (BAR) referred to as the
+ * HVWC BAR for the instance. Similarly the UWCs for all windows on an instance
+ * are referred to as the UWC BAR for the instance.
+ *
+ * The two BARs for each instance are defined Power9 MMIO Ranges spreadsheet
+ * and available to the kernel in the VAS node's "reg" property in the device
+ * tree:
+ *
+ * /proc/device-tree/vasm@.../reg
+ *
+ * (see vas_probe() for details on the reg property).
+ *
+ * The kernel maps the HVWC and UWC BAR regions into the kernel address
+ * space (hvwc_map and uwc_map). The kernel can then access the window
+ * contexts of a specific window using:
+ *
+ * hvwc = hvwc_map + winid * VAS_HVWC_SIZE.
+ * uwc = uwc_map + winid * VAS_UWC_SIZE.
+ *
+ * where winid is the window index (0..64K).
+ *
+ * As mentioned, a window context is used to "configure" a window. Besides
+ * this configuration address, each _send_ window also has a unique hardware
+ * "paste" address that is used to submit requests/CRBs (see vas_paste_crb()).
+ *
+ * The hardware paste address for a window is computed using the "paste
+ * base address" and "paste win id shift" reg properties in the VAS device
+ * tree node using:
+ *
+ * paste_addr = paste_base + ((winid << paste_win_id_shift))
+ *
+ * (again, see vas_probe() for ->paste_base_addr and ->paste_win_id_shift).
+ *
+ * The kernel maps this hardware address into the sender's address space
+ * after which they can use the 'paste' instruction (new in Power9) to
+ * send a message (submit a request aka CRB) to the coprocessor.
+ *
+ * NOTE: In the initial version, senders can only in-kernel drivers/threads.
+ * Support for user space threads will be added in follow-on patches.
+ *
+ * TODO: Do we need to map the UWC into user address space so they can return
+ * credits? Its NA for NX but may be needed for other receive windows.
+ *
+ */
+
+#define VAS_WINDOWS_PER_CHIP (64 << 10)
+
+/*
+ * Hypervisor and OS/USer Window Context sizes
+ */
+#define VAS_HVWC_SIZE 512
+#define VAS_UWC_SIZE PAGE_SIZE
+
+/*
+ * Initial per-process credits.
+ * Max send window credits: 4K-1 (12-bits in VAS_TX_WCRED)
+ * Max receive window credits: 64K-1 (16 bits in VAS_LRX_WCRED)
+ *
+ * TODO: Needs tuning for per-process credits
+ */
+#define VAS_WCREDS_MIN 16
+#define VAS_WCREDS_MAX ((64 << 10) - 1)
+#define VAS_WCREDS_DEFAULT (1 << 10)
+
+/*
+ * VAS Window Context Register Offsets and bitmasks.
+ * See Section 3.1.4 of VAS Work book
+ */
+#define VAS_LPID_OFFSET 0x010
+#define VAS_LPID PPC_BITMASK(0, 11)
+
+#define VAS_PID_OFFSET 0x018
+#define VAS_PID_ID PPC_BITMASK(0, 19)
+
+#define VAS_XLATE_MSR_OFFSET 0x020
+#define VAS_XLATE_MSR_DR PPC_BIT(0)
+#define VAS_XLATE_MSR_TA PPC_BIT(1)
+#define VAS_XLATE_MSR_PR PPC_BIT(2)
+#define VAS_XLATE_MSR_US PPC_BIT(3)
+#define VAS_XLATE_MSR_HV PPC_BIT(4)
+#define VAS_XLATE_MSR_SF PPC_BIT(5)
+
+#define VAS_XLATE_LPCR_OFFSET 0x028
+#define VAS_XLATE_LPCR_PAGE_SIZE PPC_BITMASK(0, 2)
+#define VAS_XLATE_LPCR_ISL PPC_BIT(3)
+#define VAS_XLATE_LPCR_TC PPC_BIT(4)
+#define VAS_XLATE_LPCR_SC PPC_BIT(5)
+
+#define VAS_XLATE_CTL_OFFSET 0x030
+#define VAS_XLATE_MODE PPC_BITMASK(0, 1)
+
+#define VAS_AMR_OFFSET 0x040
+#define VAS_AMR PPC_BITMASK(0, 63)
+
+#define VAS_SEIDR_OFFSET 0x048
+#define VAS_SEIDR PPC_BITMASK(0, 63)
+
+#define VAS_FAULT_TX_WIN_OFFSET 0x050
+#define VAS_FAULT_TX_WIN PPC_BITMASK(48, 63)
+
+#define VAS_OSU_INTR_SRC_RA_OFFSET 0x060
+#define VAS_OSU_INTR_SRC_RA PPC_BITMASK(8, 63)
+
+#define VAS_HV_INTR_SRC_RA_OFFSET 0x070
+#define VAS_HV_INTR_SRC_RA PPC_BITMASK(8, 63)
+
+#define VAS_PSWID_OFFSET 0x078
+#define VAS_PSWID_EA_HANDLE PPC_BITMASK(0, 31)
+
+#define VAS_SPARE1_OFFSET 0x080
+#define VAS_SPARE2_OFFSET 0x088
+#define VAS_SPARE3_OFFSET 0x090
+#define VAS_SPARE4_OFFSET 0x130
+#define VAS_SPARE5_OFFSET 0x160
+#define VAS_SPARE6_OFFSET 0x188
+
+#define VAS_LFIFO_BAR_OFFSET 0x0A0
+#define VAS_LFIFO_BAR PPC_BITMASK(8, 53)
+#define VAS_PAGE_MIGRATION_SELECT PPC_BITMASK(54, 56)
+
+#define VAS_LDATA_STAMP_CTL_OFFSET 0x0A8
+#define VAS_LDATA_STAMP PPC_BITMASK(0, 1)
+#define VAS_XTRA_WRITE PPC_BIT(2)
+
+#define VAS_LDMA_CACHE_CTL_OFFSET 0x0B0
+#define VAS_LDMA_TYPE PPC_BITMASK(0, 1)
+#define VAS_LDMA_FIFO_DISABLE PPC_BIT(2)
+
+#define VAS_LRFIFO_PUSH_OFFSET 0x0B8
+#define VAS_LRFIFO_PUSH PPC_BITMASK(0, 15)
+
+#define VAS_CURR_MSG_COUNT_OFFSET 0x0C0
+#define VAS_CURR_MSG_COUNT PPC_BITMASK(0, 7)
+
+#define VAS_LNOTIFY_AFTER_COUNT_OFFSET 0x0C8
+#define VAS_LNOTIFY_AFTER_COUNT PPC_BITMASK(0, 7)
+
+#define VAS_LRX_WCRED_OFFSET 0x0E0
+#define VAS_LRX_WCRED PPC_BITMASK(0, 15)
+
+#define VAS_LRX_WCRED_ADDER_OFFSET 0x190
+#define VAS_LRX_WCRED_ADDER PPC_BITMASK(0, 15)
+
+#define VAS_TX_WCRED_OFFSET 0x0F0
+#define VAS_TX_WCRED PPC_BITMASK(4, 15)
+
+#define VAS_TX_WCRED_ADDER_OFFSET 0x1A0
+#define VAS_TX_WCRED_ADDER PPC_BITMASK(4, 15)
+
+#define VAS_LFIFO_SIZE_OFFSET 0x100
+#define VAS_LFIFO_SIZE PPC_BITMASK(0, 3)
+
+#define VAS_WINCTL_OFFSET 0x108
+#define VAS_WINCTL_OPEN PPC_BIT(0)
+#define VAS_WINCTL_REJ_NO_CREDIT PPC_BIT(1)
+#define VAS_WINCTL_PIN PPC_BIT(2)
+#define VAS_WINCTL_TX_WCRED_MODE PPC_BIT(3)
+#define VAS_WINCTL_RX_WCRED_MODE PPC_BIT(4)
+#define VAS_WINCTL_TX_WORD_MODE PPC_BIT(5)
+#define VAS_WINCTL_RX_WORD_MODE PPC_BIT(6)
+#define VAS_WINCTL_RSVD_TXBUF PPC_BIT(7)
+#define VAS_WINCTL_THRESH_CTL PPC_BITMASK(8, 9)
+#define VAS_WINCTL_FAULT_WIN PPC_BIT(10)
+#define VAS_WINCTL_NX_WIN PPC_BIT(11)
+
+#define VAS_WIN_STATUS_OFFSET 0x110
+#define VAS_WIN_BUSY PPC_BIT(1)
+
+#define VAS_WIN_CTX_CACHING_CTL_OFFSET 0x118
+#define VAS_CASTOUT_REQ PPC_BIT(0)
+#define VAS_PUSH_TO_MEM PPC_BIT(1)
+#define VAS_WIN_CACHE_STATUS PPC_BIT(4)
+
+#define VAS_TX_RSVD_BUF_COUNT_OFFSET 0x120
+#define VAS_RXVD_BUF_COUNT PPC_BITMASK(58, 63)
+
+#define VAS_LRFIFO_WIN_PTR_OFFSET 0x128
+#define VAS_LRX_WIN_ID PPC_BITMASK(0, 15)
+
+/*
+ * Local Notification Control Register controls what happens in _response_
+ * to a paste command and hence applies only to receive windows.
+ */
+#define VAS_LNOTIFY_CTL_OFFSET 0x138
+#define VAS_NOTIFY_DISABLE PPC_BIT(0)
+#define VAS_INTR_DISABLE PPC_BIT(1)
+#define VAS_NOTIFY_EARLY PPC_BIT(2)
+#define VAS_NOTIFY_OSU_INTR PPC_BIT(3)
+
+#define VAS_LNOTIFY_PID_OFFSET 0x140
+#define VAS_LNOTIFY_PID PPC_BITMASK(0, 19)
+
+#define VAS_LNOTIFY_LPID_OFFSET 0x148
+#define VAS_LNOTIFY_LPID PPC_BITMASK(0, 11)
+
+#define VAS_LNOTIFY_TID_OFFSET 0x150
+#define VAS_LNOTIFY_TID PPC_BITMASK(0, 15)
+
+#define VAS_LNOTIFY_SCOPE_OFFSET 0x158
+#define VAS_LNOTIFY_MIN_SCOPE PPC_BITMASK(0, 1)
+#define VAS_LNOTIFY_MAX_SCOPE PPC_BITMASK(2, 3)
+
+#define VAS_NX_UTIL_OFFSET 0x1B0
+#define VAS_NX_UTIL PPC_BITMASK(0, 63)
+
+/* SE: Side effects */
+#define VAS_NX_UTIL_SE_OFFSET 0x1B8
+#define VAS_NX_UTIL_SE PPC_BITMASK(0, 63)
+
+#define VAS_NX_UTIL_ADDER_OFFSET 0x180
+#define VAS_NX_UTIL_ADDER PPC_BITMASK(32, 63)
+
+/*
+ * Local Notify Scope Control Register. (Receive windows only).
+ */
+enum vas_notify_scope {
+ VAS_SCOPE_LOCAL,
+ VAS_SCOPE_GROUP,
+ VAS_SCOPE_VECTORED_GROUP,
+ VAS_SCOPE_UNUSED,
+};
+
+/*
+ * Local DMA Cache Control Register (Receive windows only).
+ */
+enum vas_dma_type {
+ VAS_DMA_TYPE_INJECT,
+ VAS_DMA_TYPE_WRITE,
+};
+
+/*
+ * Local Notify Scope Control Register. (Receive windows only).
+ * Not applicable to NX receive windows.
+ */
+enum vas_notify_after_count {
+ VAS_NOTIFY_AFTER_256 = 0,
+ VAS_NOTIFY_NONE,
+ VAS_NOTIFY_AFTER_2
+};
+
+/*
+ * One per instance of VAS. Each instance will have a separate set of
+ * receive windows, one per coprocessor type.
+ *
+ * See also function header of set_vinst_win() for details on ->windows[]
+ * and ->rxwin[] tables.
+ */
+struct vas_instance {
+ int vas_id;
+ struct ida ida;
+ struct list_head node;
+ struct platform_device *pdev;
+
+ u64 hvwc_bar_start;
+ u64 uwc_bar_start;
+ u64 paste_base_addr;
+ u64 paste_win_id_shift;
+
+ struct mutex mutex;
+ struct vas_window *rxwin[VAS_COP_TYPE_MAX];
+ struct vas_window *windows[VAS_WINDOWS_PER_CHIP];
+};
+
+/*
+ * In-kernel state a VAS window. One per window.
+ */
+struct vas_window {
+ /* Fields common to send and receive windows */
+ struct vas_instance *vinst;
+ int winid;
+ bool tx_win; /* True if send window */
+ bool nx_win; /* True if NX window */
+ bool user_win; /* True if user space window */
+ void *hvwc_map; /* HV window context */
+ void *uwc_map; /* OS/User window context */
+ pid_t pid; /* Linux process id of owner */
+
+ /* Fields applicable only to send windows */
+ void *paste_kaddr;
+ char *paste_addr_name;
+ struct vas_window *rxwin;
+
+ /* Feilds applicable only to receive windows */
+ enum vas_cop_type cop;
+ atomic_t num_txwins;
+};
+
+/*
+ * Container for the hardware state of a window. One per-window.
+ *
+ * A VAS Window context is a 512-byte area in the hardware that contains
+ * a set of 64-bit registers. Individual bit-fields in these registers
+ * determine the configuration/operation of the hardware. struct vas_winctx
+ * is a container for the register fields in the window context.
+ */
+struct vas_winctx {
+ void *rx_fifo;
+ int rx_fifo_size;
+ int wcreds_max;
+ int rsvd_txbuf_count;
+
+ bool user_win;
+ bool nx_win;
+ bool fault_win;
+ bool rsvd_txbuf_enable;
+ bool pin_win;
+ bool rej_no_credit;
+ bool tx_wcred_mode;
+ bool rx_wcred_mode;
+ bool tx_word_mode;
+ bool rx_word_mode;
+ bool data_stamp;
+ bool xtra_write;
+ bool notify_disable;
+ bool intr_disable;
+ bool fifo_disable;
+ bool notify_early;
+ bool notify_os_intr_reg;
+
+ int lpid;
+ int pidr; /* value from SPRN_PID, not linux pid */
+ int lnotify_lpid;
+ int lnotify_pid;
+ int lnotify_tid;
+ u32 pswid;
+ int rx_win_id;
+ int fault_win_id;
+ int tc_mode;
+
+ u64 irq_port;
+
+ enum vas_dma_type dma_type;
+ enum vas_notify_scope min_scope;
+ enum vas_notify_scope max_scope;
+ enum vas_notify_after_count notify_after_count;
+};
+
+extern struct vas_instance *find_vas_instance(int vasid);
+
+/*
+ * VREG(x):
+ * Expand a register's short name (eg: LPID) into two parameters:
+ * - the register's short name in string form ("LPID"), and
+ * - the name of the macro (eg: VAS_LPID_OFFSET), defining the
+ * register's offset in the window context
+ */
+#define VREG_SFX(n, s) __stringify(n), VAS_##n##s
+#define VREG(r) VREG_SFX(r, _OFFSET)
+
+#ifdef vas_debug
+static inline void dump_rx_win_attr(struct vas_rx_win_attr *attr)
+{
+ pr_err("fault %d, notify %d, intr %d early %d\n",
+ attr->fault_win, attr->notify_disable,
+ attr->intr_disable, attr->notify_early);
+
+ pr_err("rx_fifo_size %d, max value %d\n",
+ attr->rx_fifo_size, VAS_RX_FIFO_SIZE_MAX);
+}
+
+static inline void vas_log_write(struct vas_window *win, char *name,
+ void *regptr, u64 val)
+{
+ if (val)
+ pr_err("%swin #%d: %s reg %p, val 0x%016llx\n",
+ win->tx_win ? "Tx" : "Rx", win->winid, name,
+ regptr, val);
+}
+
+#else /* vas_debug */
+
+#define vas_log_write(win, name, reg, val)
+#define dump_rx_win_attr(attr)
+
+#endif /* vas_debug */
+
+static inline void write_uwc_reg(struct vas_window *win, char *name,
+ s32 reg, u64 val)
+{
+ void *regptr;
+
+ regptr = win->uwc_map + reg;
+ vas_log_write(win, name, regptr, val);
+
+ out_be64(regptr, val);
+}
+
+static inline void write_hvwc_reg(struct vas_window *win, char *name,
+ s32 reg, u64 val)
+{
+ void *regptr;
+
+ regptr = win->hvwc_map + reg;
+ vas_log_write(win, name, regptr, val);
+
+ out_be64(regptr, val);
+}
+
+static inline u64 read_hvwc_reg(struct vas_window *win,
+ char *name __maybe_unused, s32 reg)
+{
+ return in_be64(win->hvwc_map+reg);
+}
+
+#ifdef vas_debug
+
+static void print_fifo_msg_count(struct vas_window *txwin)
+{
+ uint64_t read_hvwc_reg(struct vas_window *w, char *n, uint64_t o);
+ pr_devel("Winid %d, Msg count %llu\n", txwin->winid,
+ (uint64_t)read_hvwc_reg(txwin, VREG(LRFIFO_PUSH)));
+}
+#else /* vas_debug */
+
+#define print_fifo_msg_count(window)
+
+#endif /* vas_debug */
+
+#endif /* _VAS_H */
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
index 814a7eaa7769..50dbaf24b1ee 100644
--- a/arch/powerpc/platforms/ps3/repository.c
+++ b/arch/powerpc/platforms/ps3/repository.c
@@ -170,14 +170,8 @@ int ps3_repository_read_bus_str(unsigned int bus_index, const char *bus_str,
int ps3_repository_read_bus_id(unsigned int bus_index, u64 *bus_id)
{
- int result;
-
- result = read_node(PS3_LPAR_ID_PME,
- make_first_field("bus", bus_index),
- make_field("id", 0),
- 0, 0,
- bus_id, NULL);
- return result;
+ return read_node(PS3_LPAR_ID_PME, make_first_field("bus", bus_index),
+ make_field("id", 0), 0, 0, bus_id, NULL);
}
int ps3_repository_read_bus_type(unsigned int bus_index,
@@ -224,15 +218,9 @@ int ps3_repository_read_dev_str(unsigned int bus_index,
int ps3_repository_read_dev_id(unsigned int bus_index, unsigned int dev_index,
u64 *dev_id)
{
- int result;
-
- result = read_node(PS3_LPAR_ID_PME,
- make_first_field("bus", bus_index),
- make_field("dev", dev_index),
- make_field("id", 0),
- 0,
- dev_id, NULL);
- return result;
+ return read_node(PS3_LPAR_ID_PME, make_first_field("bus", bus_index),
+ make_field("dev", dev_index), make_field("id", 0), 0,
+ dev_id, NULL);
}
int ps3_repository_read_dev_type(unsigned int bus_index,
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 6244bc849469..9dabea6e1443 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -104,20 +104,6 @@ static void __noreturn ps3_halt(void)
ps3_sys_manager_halt(); /* never returns */
}
-static void ps3_panic(char *str)
-{
- DBG("%s:%d %s\n", __func__, __LINE__, str);
-
- smp_send_stop();
- printk("\n");
- printk(" System does not reboot automatically.\n");
- printk(" Please press POWER button.\n");
- printk("\n");
-
- while(1)
- lv1_pause(1);
-}
-
#if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \
defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE)
static void __init prealloc(struct ps3_prealloc *p)
@@ -269,7 +255,6 @@ define_machine(ps3) {
.probe = ps3_probe,
.setup_arch = ps3_setup_arch,
.init_IRQ = ps3_init_IRQ,
- .panic = ps3_panic,
.get_boot_time = ps3_get_boot_time,
.set_dabr = ps3_set_dabr,
.calibrate_decr = ps3_calibrate_decr,
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 3a6dfd14f64b..71dd69d9ec64 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -7,6 +7,7 @@ config PPC_PSERIES
select PCI
select PCI_MSI
select PPC_XICS
+ select PPC_XIVE_SPAPR
select PPC_ICP_NATIVE
select PPC_ICP_HV
select PPC_ICS_RTAS
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 39187696ee74..783f36364690 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -254,18 +254,15 @@ cc_error:
return first_dn;
}
-int dlpar_attach_node(struct device_node *dn)
+int dlpar_attach_node(struct device_node *dn, struct device_node *parent)
{
int rc;
- dn->parent = pseries_of_derive_parent(dn->full_name);
- if (IS_ERR(dn->parent))
- return PTR_ERR(dn->parent);
+ dn->parent = parent;
rc = of_attach_node(dn);
if (rc) {
- printk(KERN_ERR "Failed to add device node %s\n",
- dn->full_name);
+ printk(KERN_ERR "Failed to add device node %pOF\n", dn);
return rc;
}
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 1eef46d9cf30..6b812ad990e4 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -247,14 +247,13 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
/* Initialize the fake PE */
memset(&pe, 0, sizeof(struct eeh_pe));
- pe.phb = edev->phb;
+ pe.phb = pdn->phb;
pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
/* Enable EEH on the device */
ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
if (!ret) {
/* Retrieve PE address */
- edev->config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
pe.addr = edev->pe_config_addr;
@@ -279,7 +278,6 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
/* This device doesn't support EEH, but it may have an
* EEH parent, in which case we mark it as supported.
*/
- edev->config_addr = pdn_to_eeh_dev(pdn->parent)->config_addr;
edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr;
eeh_add_to_parent_pe(edev);
}
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c
index 32187dc76730..6eeb0d4bab61 100644
--- a/arch/powerpc/platforms/pseries/event_sources.c
+++ b/arch/powerpc/platforms/pseries/event_sources.c
@@ -36,8 +36,8 @@ void request_event_sources_irqs(struct device_node *np,
virqs[count] = irq_create_of_mapping(&oirq);
if (!virqs[count]) {
pr_err("event-sources: Unable to allocate "
- "interrupt number for %s\n",
- np->full_name);
+ "interrupt number for %pOF\n",
+ np);
WARN_ON(1);
} else {
count++;
@@ -48,7 +48,7 @@ void request_event_sources_irqs(struct device_node *np,
for (i = 0; i < count; i++) {
if (request_irq(virqs[i], handler, 0, name, NULL)) {
pr_err("event-sources: Unable to request interrupt "
- "%d for %s\n", virqs[i], np->full_name);
+ "%d for %pOF\n", virqs[i], np);
WARN_ON(1);
return;
}
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 6afd1efd3633..fc0d8f97c03a 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -34,6 +34,7 @@
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
#include <asm/xics.h>
+#include <asm/xive.h>
#include <asm/plpar_wrappers.h>
#include "pseries.h"
@@ -109,7 +110,10 @@ static void pseries_mach_cpu_die(void)
local_irq_disable();
idle_task_exit();
- xics_teardown_cpu();
+ if (xive_enabled())
+ xive_teardown_cpu();
+ else
+ xics_teardown_cpu();
if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
set_cpu_current_state(cpu, CPU_STATE_INACTIVE);
@@ -174,7 +178,10 @@ static int pseries_cpu_disable(void)
boot_cpuid = cpumask_any(cpu_online_mask);
/* FIXME: abstract this to not be platform specific later on */
- xics_migrate_irqs_away();
+ if (xive_enabled())
+ xive_smp_disable_cpu();
+ else
+ xics_migrate_irqs_away();
return 0;
}
@@ -264,8 +271,8 @@ static int pseries_add_processor(struct device_node *np)
/* If we get here, it most likely means that NR_CPUS is
* less than the partition's max processors setting.
*/
- printk(KERN_ERR "Cannot add cpu %s; this system configuration"
- " supports %d logical cpus.\n", np->full_name,
+ printk(KERN_ERR "Cannot add cpu %pOF; this system configuration"
+ " supports %d logical cpus.\n", np,
num_possible_cpus());
goto out_unlock;
}
@@ -463,7 +470,7 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
return -EINVAL;
}
- rc = dlpar_attach_node(dn);
+ rc = dlpar_attach_node(dn, parent);
if (rc) {
saved_rc = rc;
pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n",
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index ca9b2f4aaa22..1d48ab424bd9 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -336,7 +336,38 @@ static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
return mem_block;
}
+static int dlpar_change_lmb_state(struct of_drconf_cell *lmb, bool online)
+{
+ struct memory_block *mem_block;
+ int rc;
+
+ mem_block = lmb_to_memblock(lmb);
+ if (!mem_block)
+ return -EINVAL;
+
+ if (online && mem_block->dev.offline)
+ rc = device_online(&mem_block->dev);
+ else if (!online && !mem_block->dev.offline)
+ rc = device_offline(&mem_block->dev);
+ else
+ rc = 0;
+
+ put_device(&mem_block->dev);
+
+ return rc;
+}
+
+static int dlpar_online_lmb(struct of_drconf_cell *lmb)
+{
+ return dlpar_change_lmb_state(lmb, true);
+}
+
#ifdef CONFIG_MEMORY_HOTREMOVE
+static int dlpar_offline_lmb(struct of_drconf_cell *lmb)
+{
+ return dlpar_change_lmb_state(lmb, false);
+}
+
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
unsigned long block_sz, start_pfn;
@@ -431,19 +462,13 @@ static int dlpar_add_lmb(struct of_drconf_cell *);
static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
{
- struct memory_block *mem_block;
unsigned long block_sz;
int nid, rc;
if (!lmb_is_removable(lmb))
return -EINVAL;
- mem_block = lmb_to_memblock(lmb);
- if (!mem_block)
- return -EINVAL;
-
- rc = device_offline(&mem_block->dev);
- put_device(&mem_block->dev);
+ rc = dlpar_offline_lmb(lmb);
if (rc)
return rc;
@@ -737,20 +762,6 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index,
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
-static int dlpar_online_lmb(struct of_drconf_cell *lmb)
-{
- struct memory_block *mem_block;
- int rc;
-
- mem_block = lmb_to_memblock(lmb);
- if (!mem_block)
- return -EINVAL;
-
- rc = device_online(&mem_block->dev);
- put_device(&mem_block->dev);
- return rc;
-}
-
static int dlpar_add_lmb(struct of_drconf_cell *lmb)
{
unsigned long block_sz;
@@ -817,6 +828,9 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop)
return -EINVAL;
for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) {
+ if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
+ continue;
+
rc = dlpar_acquire_drc(lmbs[i].drc_index);
if (rc)
continue;
@@ -859,6 +873,7 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop)
lmbs[i].base_addr, lmbs[i].drc_index);
lmbs[i].reserved = 0;
}
+ rc = 0;
}
return rc;
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 74b5b8e239c8..c511a1743a44 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -23,7 +23,7 @@
.globl hcall_tracepoint_refcount
hcall_tracepoint_refcount:
- .llong 0
+ .8byte 0
.section ".text"
#endif
diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
index 52146b1356d2..408a86044133 100644
--- a/arch/powerpc/platforms/pseries/ibmebus.c
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -150,8 +150,7 @@ static const struct dma_map_ops ibmebus_dma_ops = {
static int ibmebus_match_path(struct device *dev, void *data)
{
struct device_node *dn = to_platform_device(dev)->dev.of_node;
- return (dn->full_name &&
- (strcasecmp((char *)data, dn->full_name) == 0));
+ return (of_find_node_by_path(data) == dn);
}
static int ibmebus_match_node(struct device *dev, void *data)
@@ -395,7 +394,7 @@ static ssize_t devspec_show(struct device *dev,
struct platform_device *ofdev;
ofdev = to_platform_device(dev);
- return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
+ return sprintf(buf, "%pOF\n", ofdev->dev.of_node);
}
static DEVICE_ATTR_RO(devspec);
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 8374adee27e3..7c181467d0ad 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -511,8 +511,8 @@ static void iommu_table_setparms(struct pci_controller *phb,
basep = of_get_property(node, "linux,tce-base", NULL);
sizep = of_get_property(node, "linux,tce-size", NULL);
if (basep == NULL || sizep == NULL) {
- printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %s has "
- "missing tce entries !\n", dn->full_name);
+ printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %pOF has "
+ "missing tce entries !\n", dn);
return;
}
@@ -587,7 +587,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
dn = pci_bus_to_OF_node(bus);
- pr_debug("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn->full_name);
+ pr_debug("pci_dma_bus_setup_pSeries: setting up bus %pOF\n", dn);
if (bus->self) {
/* This is not a root bus, any setup will be done for the
@@ -701,8 +701,8 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
dn = pci_bus_to_OF_node(bus);
- pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n",
- dn->full_name);
+ pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
+ dn);
/* Find nearest ibm,dma-window, walking up the device tree */
for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
@@ -718,8 +718,8 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
ppci = PCI_DN(pdn);
- pr_debug(" parent is %s, iommu_table: 0x%p\n",
- pdn->full_name, ppci->table_group);
+ pr_debug(" parent is %pOF, iommu_table: 0x%p\n",
+ pdn, ppci->table_group);
if (!ppci->table_group) {
ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
@@ -817,28 +817,28 @@ static void remove_ddw(struct device_node *np, bool remove_prop)
ret = tce_clearrange_multi_pSeriesLP(0,
1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp);
if (ret)
- pr_warning("%s failed to clear tces in window.\n",
- np->full_name);
+ pr_warning("%pOF failed to clear tces in window.\n",
+ np);
else
- pr_debug("%s successfully cleared tces in window.\n",
- np->full_name);
+ pr_debug("%pOF successfully cleared tces in window.\n",
+ np);
ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
if (ret)
- pr_warning("%s: failed to remove direct window: rtas returned "
+ pr_warning("%pOF: failed to remove direct window: rtas returned "
"%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddw_avail[2], liobn);
+ np, ret, ddw_avail[2], liobn);
else
- pr_debug("%s: successfully removed direct window: rtas returned "
+ pr_debug("%pOF: successfully removed direct window: rtas returned "
"%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddw_avail[2], liobn);
+ np, ret, ddw_avail[2], liobn);
delprop:
if (remove_prop)
ret = of_remove_property(np, win64);
if (ret)
- pr_warning("%s: failed to remove direct window property: %d\n",
- np->full_name, ret);
+ pr_warning("%pOF: failed to remove direct window property: %d\n",
+ np, ret);
}
static u64 find_existing_ddw(struct device_node *pdn)
@@ -1004,7 +1004,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* list.
*/
list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
- if (!strcmp(fpdn->pdn->full_name, pdn->full_name))
+ if (fpdn->pdn == pdn)
goto out_unlock;
}
@@ -1087,8 +1087,8 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
ddwprop->tce_shift = cpu_to_be32(page_shift);
ddwprop->window_shift = cpu_to_be32(len);
- dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %s\n",
- create.liobn, dn->full_name);
+ dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %pOF\n",
+ create.liobn, dn);
window = kzalloc(sizeof(*window), GFP_KERNEL);
if (!window)
@@ -1097,15 +1097,15 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
win64->value, tce_setrange_multi_pSeriesLP_walk);
if (ret) {
- dev_info(&dev->dev, "failed to map direct window for %s: %d\n",
- dn->full_name, ret);
+ dev_info(&dev->dev, "failed to map direct window for %pOF: %d\n",
+ dn, ret);
goto out_free_window;
}
ret = of_add_property(pdn, win64);
if (ret) {
- dev_err(&dev->dev, "unable to add dma window property for %s: %d",
- pdn->full_name, ret);
+ dev_err(&dev->dev, "unable to add dma window property for %pOF: %d",
+ pdn, ret);
goto out_free_window;
}
@@ -1158,7 +1158,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
* already allocated.
*/
dn = pci_device_to_OF_node(dev);
- pr_debug(" node is %s\n", dn->full_name);
+ pr_debug(" node is %pOF\n", dn);
for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
pdn = pdn->parent) {
@@ -1169,11 +1169,11 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
if (!pdn || !PCI_DN(pdn)) {
printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
- "no DMA window found for pci dev=%s dn=%s\n",
- pci_name(dev), of_node_full_name(dn));
+ "no DMA window found for pci dev=%s dn=%pOF\n",
+ pci_name(dev), dn);
return;
}
- pr_debug(" parent is %s\n", pdn->full_name);
+ pr_debug(" parent is %pOF\n", pdn);
pci = PCI_DN(pdn);
if (!pci->table_group) {
@@ -1213,7 +1213,7 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
/* only attempt to use a new window if 64-bit DMA is requested */
if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) {
dn = pci_device_to_OF_node(pdev);
- dev_dbg(dev, "node is %s\n", dn->full_name);
+ dev_dbg(dev, "node is %pOF\n", dn);
/*
* the device tree might contain the dma-window properties
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 6681ac97fb18..eeb13429d685 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -15,6 +15,7 @@
#include <asm/firmware.h>
#include <asm/kexec.h>
#include <asm/xics.h>
+#include <asm/xive.h>
#include <asm/smp.h>
#include <asm/plpar_wrappers.h>
@@ -51,5 +52,8 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
}
}
- xics_kexec_teardown_cpu(secondary);
+ if (xive_enabled())
+ xive_kexec_teardown_cpu(secondary);
+ else
+ xics_kexec_teardown_cpu(secondary);
}
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 2da4851eff99..210ce632d63e 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -229,7 +229,7 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
if (!dn)
return -ENOENT;
- rc = dlpar_attach_node(dn);
+ rc = dlpar_attach_node(dn, parent_dn);
if (rc)
dlpar_free_cc_nodes(dn);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 326ef0dd6038..b7496948129e 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -132,19 +132,14 @@ static void rtas_teardown_msi_irqs(struct pci_dev *pdev)
static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
{
struct device_node *dn;
- struct pci_dn *pdn;
const __be32 *p;
u32 req_msi;
- pdn = pci_get_pdn(pdev);
- if (!pdn)
- return -ENODEV;
-
- dn = pdn->node;
+ dn = pci_device_to_OF_node(pdev);
p = of_get_property(dn, prop_name, NULL);
if (!p) {
- pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name);
+ pr_debug("rtas_msi: No %s on %pOF\n", prop_name, dn);
return -ENOENT;
}
@@ -182,8 +177,8 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
while (dn) {
p = of_get_property(dn, "ibm,pe-total-#msi", NULL);
if (p) {
- pr_debug("rtas_msi: found prop on dn %s\n",
- dn->full_name);
+ pr_debug("rtas_msi: found prop on dn %pOF\n",
+ dn);
*total = be32_to_cpup(p);
return dn;
}
@@ -197,7 +192,6 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
{
struct device_node *dn;
- struct pci_dn *pdn;
struct eeh_dev *edev;
/* Found our PE and assume 8 at that point. */
@@ -210,8 +204,7 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
edev = pdn_to_eeh_dev(PCI_DN(dn));
if (edev->pe)
edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
- pdn = eeh_dev_to_pdn(edev);
- dn = pdn ? pdn->node : NULL;
+ dn = pci_device_to_OF_node(edev->pdev);
if (!dn)
return NULL;
@@ -222,7 +215,7 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
/* Hardcode of 8 for old firmwares */
*total = 8;
- pr_debug("rtas_msi: using PE dn %s\n", dn->full_name);
+ pr_debug("rtas_msi: using PE dn %pOF\n", dn);
return dn;
}
@@ -242,7 +235,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data)
const __be32 *p;
u32 class;
- pr_debug("rtas_msi: counting %s\n", dn->full_name);
+ pr_debug("rtas_msi: counting %pOF\n", dn);
p = of_get_property(dn, "class-code", NULL);
class = p ? be32_to_cpup(p) : 0;
@@ -300,7 +293,7 @@ static int msi_quota_for_device(struct pci_dev *dev, int request)
goto out;
}
- pr_debug("rtas_msi: found PE %s\n", pe_dn->full_name);
+ pr_debug("rtas_msi: found PE %pOF\n", pe_dn);
memset(&counts, 0, sizeof(struct msi_counts));
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 547fd13e4f8e..561917fa54a8 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -38,7 +38,7 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
{
struct pci_controller *phb;
- pr_debug("PCI: Initializing new hotplug PHB %s\n", dn->full_name);
+ pr_debug("PCI: Initializing new hotplug PHB %pOF\n", dn);
phb = pcibios_alloc_controller(dn);
if (!phb)
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 1361a9db534b..4470a3194311 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -46,7 +46,7 @@ extern void dlpar_free_cc_nodes(struct device_node *);
extern void dlpar_free_cc_property(struct property *);
extern struct device_node *dlpar_configure_connector(__be32,
struct device_node *);
-extern int dlpar_attach_node(struct device_node *);
+extern int dlpar_attach_node(struct device_node *, struct device_node *);
extern int dlpar_detach_node(struct device_node *);
extern int dlpar_acquire_drc(u32 drc_index);
extern int dlpar_release_drc(u32 drc_index);
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
index 164a13d3998a..35c891aabef0 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -229,10 +229,9 @@ static int __init pseries_energy_init(void)
int cpu, err;
struct device *cpu_dev;
- if (!firmware_has_feature(FW_FEATURE_BEST_ENERGY)) {
- printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n");
- return 0;
- }
+ if (!firmware_has_feature(FW_FEATURE_BEST_ENERGY))
+ return 0; /* H_BEST_ENERGY hcall not supported */
+
/* Create the sysfs files */
err = device_create_file(cpu_subsys.dev_root,
&attr_cpu_activate_hint_list);
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index bb70b26334f0..4923ffe230cf 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -379,6 +379,21 @@ static void fwnmi_release_errinfo(void)
int pSeries_system_reset_exception(struct pt_regs *regs)
{
+#ifdef __LITTLE_ENDIAN__
+ /*
+ * Some firmware byteswaps SRR registers and gives incorrect SRR1. Try
+ * to detect the bad SRR1 pattern here. Flip the NIP back to correct
+ * endian for reporting purposes. Unfortunately the MSR can't be fixed,
+ * so clear it. It will be missing MSR_RI so we won't try to recover.
+ */
+ if ((be64_to_cpu(regs->msr) &
+ (MSR_LE|MSR_RI|MSR_DR|MSR_IR|MSR_ME|MSR_PR|
+ MSR_ILE|MSR_HV|MSR_SF)) == (MSR_DR|MSR_SF)) {
+ regs->nip = be64_to_cpu((__be64)regs->nip);
+ regs->msr = 0;
+ }
+#endif
+
if (fwnmi_active) {
struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs);
if (errhdr) {
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index e5bf1e84047f..296c188fd5ca 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -82,7 +82,6 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
of_detach_node(np);
of_node_put(parent);
- of_node_put(np); /* Must decrement the refcount */
return 0;
}
@@ -363,20 +362,13 @@ static int do_update_property(char *buf, size_t bufsize)
static ssize_t ofdt_write(struct file *file, const char __user *buf, size_t count,
loff_t *off)
{
- int rv = 0;
+ int rv;
char *kbuf;
char *tmp;
- if (!(kbuf = kmalloc(count + 1, GFP_KERNEL))) {
- rv = -ENOMEM;
- goto out;
- }
- if (copy_from_user(kbuf, buf, count)) {
- rv = -EFAULT;
- goto out;
- }
-
- kbuf[count] = '\0';
+ kbuf = memdup_user_nul(buf, count);
+ if (IS_ERR(kbuf))
+ return PTR_ERR(kbuf);
tmp = strchr(kbuf, ' ');
if (!tmp) {
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index b5d86426e97b..5f1beb8367ac 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -57,6 +57,7 @@
#include <asm/nvram.h>
#include <asm/pmc.h>
#include <asm/xics.h>
+#include <asm/xive.h>
#include <asm/ppc-pci.h>
#include <asm/i8259.h>
#include <asm/udbg.h>
@@ -176,8 +177,11 @@ static void __init pseries_setup_i8259_cascade(void)
static void __init pseries_init_irq(void)
{
- xics_init();
- pseries_setup_i8259_cascade();
+ /* Try using a XIVE if available, otherwise use a XICS */
+ if (!xive_spapr_init()) {
+ xics_init();
+ pseries_setup_i8259_cascade();
+ }
}
static void pseries_lpar_enable_pmcs(void)
@@ -722,7 +726,6 @@ define_machine(pseries) {
.pcibios_fixup = pSeries_final_fixup,
.restart = rtas_restart,
.halt = rtas_halt,
- .panic = rtas_os_term,
.get_boot_time = rtas_get_boot_time,
.get_rtc_time = rtas_get_rtc_time,
.set_rtc_time = rtas_set_rtc_time,
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 24785f63fb40..2e184829e5d4 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -41,6 +41,7 @@
#include <asm/vdso_datapage.h>
#include <asm/cputhreads.h>
#include <asm/xics.h>
+#include <asm/xive.h>
#include <asm/dbell.h>
#include <asm/plpar_wrappers.h>
#include <asm/code-patching.h>
@@ -136,7 +137,9 @@ out:
static void smp_setup_cpu(int cpu)
{
- if (cpu != boot_cpuid)
+ if (xive_enabled())
+ xive_smp_setup_cpu();
+ else if (cpu != boot_cpuid)
xics_setup_cpu();
if (firmware_has_feature(FW_FEATURE_SPLPAR))
@@ -181,6 +184,13 @@ static int smp_pSeries_kick_cpu(int nr)
return 0;
}
+static int pseries_smp_prepare_cpu(int cpu)
+{
+ if (xive_enabled())
+ return xive_smp_prepare_cpu(cpu);
+ return 0;
+}
+
static void smp_pseries_cause_ipi(int cpu)
{
/* POWER9 should not use this handler */
@@ -211,7 +221,7 @@ static int pseries_cause_nmi_ipi(int cpu)
return 0;
}
-static __init void pSeries_smp_probe(void)
+static __init void pSeries_smp_probe_xics(void)
{
xics_smp_probe();
@@ -221,11 +231,24 @@ static __init void pSeries_smp_probe(void)
smp_ops->cause_ipi = icp_ops->cause_ipi;
}
+static __init void pSeries_smp_probe(void)
+{
+ if (xive_enabled())
+ /*
+ * Don't use P9 doorbells when XIVE is enabled. IPIs
+ * using MMIOs should be faster
+ */
+ xive_smp_probe();
+ else
+ pSeries_smp_probe_xics();
+}
+
static struct smp_ops_t pseries_smp_ops = {
.message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
.cause_ipi = NULL, /* Filled at runtime by pSeries_smp_probe() */
.cause_nmi_ipi = pseries_cause_nmi_ipi,
.probe = pSeries_smp_probe,
+ .prepare_cpu = pseries_smp_prepare_cpu,
.kick_cpu = smp_pSeries_kick_cpu,
.setup_cpu = smp_setup_cpu,
.cpu_bootable = smp_generic_cpu_bootable,
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 8a47f168476b..12277bc9fd9e 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -1357,14 +1357,14 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
*/
parent_node = of_get_parent(of_node);
if (parent_node) {
- if (!strcmp(parent_node->full_name, "/ibm,platform-facilities"))
+ if (!strcmp(parent_node->type, "ibm,platform-facilities"))
family = PFO;
- else if (!strcmp(parent_node->full_name, "/vdevice"))
+ else if (!strcmp(parent_node->type, "vdevice"))
family = VDEVICE;
else {
- pr_warn("%s: parent(%s) of %s not recognized.\n",
+ pr_warn("%s: parent(%pOF) of %s not recognized.\n",
__func__,
- parent_node->full_name,
+ parent_node,
of_node_name);
of_node_put(parent_node);
return NULL;
@@ -1555,7 +1555,7 @@ static ssize_t devspec_show(struct device *dev,
{
struct device_node *of_node = dev->of_node;
- return sprintf(buf, "%s\n", of_node_full_name(of_node));
+ return sprintf(buf, "%pOF\n", of_node);
}
static DEVICE_ATTR_RO(devspec);
diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline.S
index 3696ea6c4826..4aad9dd10ace 100644
--- a/arch/powerpc/purgatory/trampoline.S
+++ b/arch/powerpc/purgatory/trampoline.S
@@ -67,7 +67,7 @@ master:
mr %r16,%r3 /* save dt address in reg16 */
li %r4,20
LWZX_BE %r6,%r3,%r4 /* fetch __be32 version number at byte 20 */
- cmpwi %r0,%r6,2 /* v2 or later? */
+ cmpwi %cr0,%r6,2 /* v2 or later? */
blt 1f
li %r4,28
STWX_BE %r17,%r3,%r4 /* Store my cpu as __be32 at byte 28 */
@@ -104,13 +104,13 @@ master:
.balign 8
.globl kernel
kernel:
- .llong 0x0
+ .8byte 0x0
.size kernel, . - kernel
.balign 8
.globl dt_offset
dt_offset:
- .llong 0x0
+ .8byte 0x0
.size dt_offset, . - dt_offset
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index c0ae11d4f62f..79416fa2e3ba 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -36,25 +36,15 @@ obj-$(CONFIG_AXON_RAM) += axonram.o
obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
obj-$(CONFIG_PPC_I8259) += i8259.o
obj-$(CONFIG_IPIC) += ipic.o
-obj-$(CONFIG_4xx) += uic.o
-obj-$(CONFIG_PPC4xx_OCM) += ppc4xx_ocm.o
-obj-$(CONFIG_4xx_SOC) += ppc4xx_soc.o
obj-$(CONFIG_XILINX_VIRTEX) += xilinx_intc.o
obj-$(CONFIG_XILINX_PCI) += xilinx_pci.o
obj-$(CONFIG_OF_RTC) += of_rtc.o
-ifeq ($(CONFIG_PCI),y)
-obj-$(CONFIG_4xx) += ppc4xx_pci.o
-endif
-obj-$(CONFIG_PPC4xx_HSTA_MSI) += ppc4xx_hsta_msi.o
-obj-$(CONFIG_PPC4xx_MSI) += ppc4xx_msi.o
-obj-$(CONFIG_PPC4xx_CPM) += ppc4xx_cpm.o
-obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o
obj-$(CONFIG_CPM) += cpm_common.o
+obj-$(CONFIG_CPM1) += cpm1.o
obj-$(CONFIG_CPM2) += cpm2.o cpm2_pic.o
obj-$(CONFIG_QUICC_ENGINE) += cpm_common.o
obj-$(CONFIG_PPC_DCR) += dcr.o
-obj-$(CONFIG_8xx) += mpc8xx_pic.o cpm1.o
obj-$(CONFIG_UCODE_PATCH) += micropatch.o
obj-$(CONFIG_PPC_MPC512x) += mpc5xxx_clocks.o
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 2799706106c6..c60e84e4558d 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -110,7 +110,7 @@ axon_ram_irq_handler(int irq, void *dev)
static blk_qc_t
axon_ram_make_request(struct request_queue *queue, struct bio *bio)
{
- struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
+ struct axon_ram_bank *bank = bio->bi_disk->private_data;
unsigned long phys_mem, phys_end;
void *user_mem;
struct bio_vec vec;
@@ -188,15 +188,12 @@ static int axon_ram_probe(struct platform_device *device)
axon_ram_bank_id++;
- dev_info(&device->dev, "Found memory controller on %s\n",
- device->dev.of_node->full_name);
+ dev_info(&device->dev, "Found memory controller on %pOF\n",
+ device->dev.of_node);
- bank = kzalloc(sizeof(struct axon_ram_bank), GFP_KERNEL);
- if (bank == NULL) {
- dev_err(&device->dev, "Out of memory\n");
- rc = -ENOMEM;
- goto failed;
- }
+ bank = kzalloc(sizeof(*bank), GFP_KERNEL);
+ if (!bank)
+ return -ENOMEM;
device->dev.platform_data = bank;
@@ -292,25 +289,22 @@ static int axon_ram_probe(struct platform_device *device)
return 0;
failed:
- if (bank != NULL) {
- if (bank->irq_id)
- free_irq(bank->irq_id, device);
- if (bank->disk != NULL) {
- if (bank->disk->major > 0)
- unregister_blkdev(bank->disk->major,
- bank->disk->disk_name);
- if (bank->disk->flags & GENHD_FL_UP)
- del_gendisk(bank->disk);
- put_disk(bank->disk);
- }
- kill_dax(bank->dax_dev);
- put_dax(bank->dax_dev);
- device->dev.platform_data = NULL;
- if (bank->io_addr != 0)
- iounmap((void __iomem *) bank->io_addr);
- kfree(bank);
+ if (bank->irq_id)
+ free_irq(bank->irq_id, device);
+ if (bank->disk != NULL) {
+ if (bank->disk->major > 0)
+ unregister_blkdev(bank->disk->major,
+ bank->disk->disk_name);
+ if (bank->disk->flags & GENHD_FL_UP)
+ del_gendisk(bank->disk);
+ put_disk(bank->disk);
}
-
+ kill_dax(bank->dax_dev);
+ put_dax(bank->dax_dev);
+ device->dev.platform_data = NULL;
+ if (bank->io_addr != 0)
+ iounmap((void __iomem *) bank->io_addr);
+ kfree(bank);
return rc;
}
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
index 121e26fffd50..d72eda568b7d 100644
--- a/arch/powerpc/sysdev/dcr.c
+++ b/arch/powerpc/sysdev/dcr.c
@@ -195,8 +195,8 @@ dcr_host_mmio_t dcr_map_mmio(struct device_node *dev,
dcr_host_mmio_t ret = { .token = NULL, .stride = 0, .base = dcr_n };
u64 addr;
- pr_debug("dcr_map(%s, 0x%x, 0x%x)\n",
- dev->full_name, dcr_n, dcr_c);
+ pr_debug("dcr_map(%pOF, 0x%x, 0x%x)\n",
+ dev, dcr_n, dcr_c);
addr = of_translate_dcr_address(dev, dcr_n, &ret.stride);
pr_debug("translates to addr: 0x%llx, stride: 0x%x\n",
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
index 37a69097e022..00ccf3e4fcb4 100644
--- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
+++ b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
@@ -101,8 +101,8 @@ int __init instantiate_cache_sram(struct platform_device *dev,
if (!request_mem_region(cache_sram->base_phys, cache_sram->size,
"fsl_85xx_cache_sram")) {
- dev_err(&dev->dev, "%s: request memory failed\n",
- dev->dev.of_node->full_name);
+ dev_err(&dev->dev, "%pOF: request memory failed\n",
+ dev->dev.of_node);
ret = -ENXIO;
goto out_free;
}
@@ -110,16 +110,16 @@ int __init instantiate_cache_sram(struct platform_device *dev,
cache_sram->base_virt = ioremap_prot(cache_sram->base_phys,
cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL);
if (!cache_sram->base_virt) {
- dev_err(&dev->dev, "%s: ioremap_prot failed\n",
- dev->dev.of_node->full_name);
+ dev_err(&dev->dev, "%pOF: ioremap_prot failed\n",
+ dev->dev.of_node);
ret = -ENOMEM;
goto out_release;
}
cache_sram->rh = rh_create(sizeof(unsigned int));
if (IS_ERR(cache_sram->rh)) {
- dev_err(&dev->dev, "%s: Unable to create remote heap\n",
- dev->dev.of_node->full_name);
+ dev_err(&dev->dev, "%pOF: Unable to create remote heap\n",
+ dev->dev.of_node);
ret = PTR_ERR(cache_sram->rh);
goto out_unmap;
}
diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c
index a6f0b96ce2c9..d902306f4718 100644
--- a/arch/powerpc/sysdev/fsl_gtm.c
+++ b/arch/powerpc/sysdev/fsl_gtm.c
@@ -388,8 +388,8 @@ static int __init fsl_gtm_init(void)
gtm = kzalloc(sizeof(*gtm), GFP_KERNEL);
if (!gtm) {
- pr_err("%s: unable to allocate memory\n",
- np->full_name);
+ pr_err("%pOF: unable to allocate memory\n",
+ np);
continue;
}
@@ -397,7 +397,7 @@ static int __init fsl_gtm_init(void)
clock = of_get_property(np, "clock-frequency", &size);
if (!clock || size != sizeof(*clock)) {
- pr_err("%s: no clock-frequency\n", np->full_name);
+ pr_err("%pOF: no clock-frequency\n", np);
goto err;
}
gtm->clock = *clock;
@@ -407,8 +407,8 @@ static int __init fsl_gtm_init(void)
irq = irq_of_parse_and_map(np, i);
if (!irq) {
- pr_err("%s: not enough interrupts specified\n",
- np->full_name);
+ pr_err("%pOF: not enough interrupts specified\n",
+ np);
goto err;
}
gtm->timers[i].irq = irq;
@@ -417,8 +417,8 @@ static int __init fsl_gtm_init(void)
gtm->regs = of_iomap(np, 0);
if (!gtm->regs) {
- pr_err("%s: unable to iomap registers\n",
- np->full_name);
+ pr_err("%pOF: unable to iomap registers\n",
+ np);
goto err;
}
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 8a244828782e..44cbf4c12ea1 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -214,8 +214,8 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
phandle = np->phandle;
else {
dev_err(&pdev->dev,
- "node %s has an invalid fsl,msi phandle %u\n",
- hose->dn->full_name, np->phandle);
+ "node %pOF has an invalid fsl,msi phandle %u\n",
+ hose->dn, np->phandle);
return -EINVAL;
}
}
@@ -438,16 +438,16 @@ static int fsl_of_msi_probe(struct platform_device *dev)
if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
err = of_address_to_resource(dev->dev.of_node, 0, &res);
if (err) {
- dev_err(&dev->dev, "invalid resource for node %s\n",
- dev->dev.of_node->full_name);
+ dev_err(&dev->dev, "invalid resource for node %pOF\n",
+ dev->dev.of_node);
goto error_out;
}
msi->msi_regs = ioremap(res.start, resource_size(&res));
if (!msi->msi_regs) {
err = -ENOMEM;
- dev_err(&dev->dev, "could not map node %s\n",
- dev->dev.of_node->full_name);
+ dev_err(&dev->dev, "could not map node %pOF\n",
+ dev->dev.of_node);
goto error_out;
}
msi->msiir_offset =
@@ -522,8 +522,8 @@ static int fsl_of_msi_probe(struct platform_device *dev)
for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
if (p[i * 2] % IRQS_PER_MSI_REG ||
p[i * 2 + 1] % IRQS_PER_MSI_REG) {
- pr_warn("%s: %s: msi available range of %u at %u is not IRQ-aligned\n",
- __func__, dev->dev.of_node->full_name,
+ pr_warn("%s: %pOF: msi available range of %u at %u is not IRQ-aligned\n",
+ __func__, dev->dev.of_node,
p[i * 2 + 1], p[i * 2]);
err = -EINVAL;
goto error_out;
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index d3a597456b6e..22d98057f773 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -202,7 +202,6 @@ static void setup_pci_atmu(struct pci_controller *hose)
u32 pcicsrbar = 0, pcicsrbar_sz;
u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
- const char *name = hose->dn->full_name;
const u64 *reg;
int len;
bool setup_inbound;
@@ -290,12 +289,12 @@ static void setup_pci_atmu(struct pci_controller *hose)
paddr_lo -= offset;
if (paddr_hi == paddr_lo) {
- pr_err("%s: No outbound window space\n", name);
+ pr_err("%pOF: No outbound window space\n", hose->dn);
return;
}
if (paddr_lo == 0) {
- pr_err("%s: No space for inbound window\n", name);
+ pr_err("%pOF: No space for inbound window\n", hose->dn);
return;
}
@@ -313,7 +312,7 @@ static void setup_pci_atmu(struct pci_controller *hose)
paddr_lo = min(paddr_lo, (u64)pcicsrbar);
- pr_info("%s: PCICSRBAR @ 0x%x\n", name, pcicsrbar);
+ pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar);
/* Setup inbound mem window */
mem = memblock_end_of_DRAM();
@@ -336,12 +335,12 @@ static void setup_pci_atmu(struct pci_controller *hose)
u64 address = be64_to_cpup(reg);
if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
- pr_info("%s: extending DDR ATMU to cover MSIIR", name);
+ pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn);
mem += PAGE_SIZE;
} else {
/* TODO: Create a new ATMU for MSIIR */
- pr_warn("%s: msi-address-64 address of %llx is "
- "unsupported\n", name, address);
+ pr_warn("%pOF: msi-address-64 address of %llx is "
+ "unsupported\n", hose->dn, address);
}
}
@@ -354,8 +353,8 @@ static void setup_pci_atmu(struct pci_controller *hose)
if ((1ull << mem_log) != mem) {
mem_log++;
if ((1ull << mem_log) > mem)
- pr_info("%s: Setting PCI inbound window "
- "greater than memory size\n", name);
+ pr_info("%pOF: Setting PCI inbound window "
+ "greater than memory size\n", hose->dn);
}
piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
@@ -402,7 +401,7 @@ static void setup_pci_atmu(struct pci_controller *hose)
*/
ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
- pr_info("%s: Setup 64-bit PCI DMA window\n", name);
+ pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn);
}
} else {
u64 paddr = 0;
@@ -443,18 +442,18 @@ static void setup_pci_atmu(struct pci_controller *hose)
#ifdef CONFIG_SWIOTLB
ppc_swiotlb_enable = 1;
#else
- pr_err("%s: ERROR: Memory size exceeds PCI ATMU ability to "
+ pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to "
"map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
- name);
+ hose->dn);
#endif
/* adjusting outbound windows could reclaim space in mem map */
if (paddr_hi < 0xffffffffull)
- pr_warning("%s: WARNING: Outbound window cfg leaves "
+ pr_warning("%pOF: WARNING: Outbound window cfg leaves "
"gaps in memory map. Adjusting the memory map "
"could reduce unnecessary bounce buffering.\n",
- name);
+ hose->dn);
- pr_info("%s: DMA window size is 0x%llx\n", name,
+ pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn,
(u64)hose->dma_window_size);
}
}
@@ -532,11 +531,11 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary)
dev = pdev->dev.of_node;
if (!of_device_is_available(dev)) {
- pr_warning("%s: disabled\n", dev->full_name);
+ pr_warning("%pOF: disabled\n", dev);
return -ENODEV;
}
- pr_debug("Adding PCI host bridge %s\n", dev->full_name);
+ pr_debug("Adding PCI host bridge %pOF\n", dev);
/* Fetch host bridge registers address */
if (of_address_to_resource(dev, 0, &rsrc)) {
@@ -547,8 +546,8 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary)
/* Get bus range if any */
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int))
- printk(KERN_WARNING "Can't get bus-range for %s, assume"
- " bus 0\n", dev->full_name);
+ printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
+ " bus 0\n", dev);
pci_add_flags(PCI_REASSIGN_ALL_BUS);
hose = pcibios_alloc_controller(dev);
@@ -809,11 +808,11 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
is_mpc83xx_pci = 1;
if (!of_device_is_available(dev)) {
- pr_warning("%s: disabled by the firmware.\n",
- dev->full_name);
+ pr_warning("%pOF: disabled by the firmware.\n",
+ dev);
return -ENODEV;
}
- pr_debug("Adding PCI host bridge %s\n", dev->full_name);
+ pr_debug("Adding PCI host bridge %pOF\n", dev);
/* Fetch host bridge registers address */
if (of_address_to_resource(dev, 0, &rsrc_reg)) {
@@ -848,8 +847,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
/* Get bus range if any */
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
- printk(KERN_WARNING "Can't get bus-range for %s, assume"
- " bus 0\n", dev->full_name);
+ printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
+ " bus 0\n", dev);
}
pci_add_flags(PCI_REASSIGN_ALL_BUS);
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 1c41c51f22cb..9234be1e66f5 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -450,12 +450,12 @@ int fsl_rio_setup(struct platform_device *dev)
rc = of_address_to_resource(dev->dev.of_node, 0, &regs);
if (rc) {
- dev_err(&dev->dev, "Can't get %s property 'reg'\n",
- dev->dev.of_node->full_name);
+ dev_err(&dev->dev, "Can't get %pOF property 'reg'\n",
+ dev->dev.of_node);
return -EFAULT;
}
- dev_info(&dev->dev, "Of-device full name %s\n",
- dev->dev.of_node->full_name);
+ dev_info(&dev->dev, "Of-device full name %pOF\n",
+ dev->dev.of_node);
dev_info(&dev->dev, "Regs: %pR\n", &regs);
rio_regs_win = ioremap(regs.start, resource_size(&regs));
@@ -494,8 +494,8 @@ int fsl_rio_setup(struct platform_device *dev)
}
rc = of_address_to_resource(rmu_node, 0, &rmu_regs);
if (rc) {
- dev_err(&dev->dev, "Can't get %s property 'reg'\n",
- rmu_node->full_name);
+ dev_err(&dev->dev, "Can't get %pOF property 'reg'\n",
+ rmu_node);
goto err_rmu;
}
rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs));
@@ -529,8 +529,8 @@ int fsl_rio_setup(struct platform_device *dev)
aw = of_n_addr_cells(np);
dt_range = of_get_property(np, "reg", &rlen);
if (!dt_range) {
- pr_err("%s: unable to find 'reg' property\n",
- np->full_name);
+ pr_err("%pOF: unable to find 'reg' property\n",
+ np);
rc = -ENOMEM;
goto err_pw;
}
@@ -557,8 +557,8 @@ int fsl_rio_setup(struct platform_device *dev)
aw = of_n_addr_cells(np);
dt_range = of_get_property(np, "reg", &rlen);
if (!dt_range) {
- pr_err("%s: unable to find 'reg' property\n",
- np->full_name);
+ pr_err("%pOF: unable to find 'reg' property\n",
+ np);
rc = -ENOMEM;
goto err;
}
@@ -569,15 +569,15 @@ int fsl_rio_setup(struct platform_device *dev)
for_each_child_of_node(dev->dev.of_node, np) {
port_index = of_get_property(np, "cell-index", NULL);
if (!port_index) {
- dev_err(&dev->dev, "Can't get %s property 'cell-index'\n",
- np->full_name);
+ dev_err(&dev->dev, "Can't get %pOF property 'cell-index'\n",
+ np);
continue;
}
dt_range = of_get_property(np, "ranges", &rlen);
if (!dt_range) {
- dev_err(&dev->dev, "Can't get %s property 'ranges'\n",
- np->full_name);
+ dev_err(&dev->dev, "Can't get %pOF property 'ranges'\n",
+ np);
continue;
}
@@ -598,8 +598,8 @@ int fsl_rio_setup(struct platform_device *dev)
range_start = of_read_number(dt_range + aw, paw);
range_size = of_read_number(dt_range + aw + paw, sw);
- dev_info(&dev->dev, "%s: LAW start 0x%016llx, size 0x%016llx.\n",
- np->full_name, range_start, range_size);
+ dev_info(&dev->dev, "%pOF: LAW start 0x%016llx, size 0x%016llx.\n",
+ np, range_start, range_size);
port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
if (!port)
@@ -757,8 +757,8 @@ err_rio_regs:
*/
static int fsl_of_rio_rpn_probe(struct platform_device *dev)
{
- printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n",
- dev->dev.of_node->full_name);
+ printk(KERN_INFO "Setting up RapidIO peer-to-peer network %pOF\n",
+ dev->dev.of_node);
return fsl_rio_setup(dev);
};
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c
index c1826de4e749..ab7a74c75be8 100644
--- a/arch/powerpc/sysdev/fsl_rmu.c
+++ b/arch/powerpc/sysdev/fsl_rmu.c
@@ -1074,8 +1074,8 @@ int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
priv = mport->priv;
if (!node) {
- dev_warn(priv->dev, "Can't get %s property 'fsl,rmu'\n",
- priv->dev->of_node->full_name);
+ dev_warn(priv->dev, "Can't get %pOF property 'fsl,rmu'\n",
+ priv->dev->of_node);
return -EINVAL;
}
@@ -1086,8 +1086,8 @@ int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
aw = of_n_addr_cells(node);
msg_addr = of_get_property(node, "reg", &mlen);
if (!msg_addr) {
- pr_err("%s: unable to find 'reg' property of message-unit\n",
- node->full_name);
+ pr_err("%pOF: unable to find 'reg' property of message-unit\n",
+ node);
kfree(rmu);
return -ENOMEM;
}
@@ -1098,8 +1098,8 @@ int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
rmu->txirq = irq_of_parse_and_map(node, 0);
rmu->rxirq = irq_of_parse_and_map(node, 1);
- printk(KERN_INFO "%s: txirq: %d, rxirq %d\n",
- node->full_name, rmu->txirq, rmu->rxirq);
+ printk(KERN_INFO "%pOF: txirq: %d, rxirq %d\n",
+ node, rmu->txirq, rmu->rxirq);
priv->rmm_handle = rmu;
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 19101f9cfcfc..1f614fb2be56 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -98,7 +98,7 @@ u32 fsl_get_sys_freq(void)
}
EXPORT_SYMBOL(fsl_get_sys_freq);
-#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx)
+#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
u32 get_brgfreq(void)
{
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index d73daa4f0ccf..2640446f8bc4 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -7,7 +7,7 @@
struct spi_device;
extern phys_addr_t get_immrbase(void);
-#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx)
+#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
extern u32 get_brgfreq(void);
extern u32 get_baudrate(void);
#else
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index f267ee0afc08..16f1edd78c40 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -315,6 +315,7 @@ static struct ipic_info ipic_info[] = {
.prio_mask = 7,
},
[48] = {
+ .ack = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_A,
.force = IPIC_SEFCR,
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index b9aac951a90f..ead3e2549ebf 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1650,8 +1650,8 @@ void __init mpic_init(struct mpic *mpic)
if (mpic->flags & MPIC_SECONDARY) {
int virq = irq_of_parse_and_map(mpic->node, 0);
if (virq) {
- printk(KERN_INFO "%s: hooking up to IRQ %d\n",
- mpic->node->full_name, virq);
+ printk(KERN_INFO "%pOF: hooking up to IRQ %d\n",
+ mpic->node, virq);
irq_set_handler_data(virq, mpic);
irq_set_chained_handler(virq, &mpic_cascade);
}
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index db2286be5d9a..eb69a5186243 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -192,7 +192,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
return -ENOMEM;
}
}
- dev_info(&dev->dev, "Of-device full name %s\n", np->full_name);
+ dev_info(&dev->dev, "Of-device full name %pOF\n", np);
/* IO map the message register block. */
of_address_to_resource(np, 0, &rsrc);
diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c
index 1d48a5385905..9ed860aee9c3 100644
--- a/arch/powerpc/sysdev/mpic_msi.c
+++ b/arch/powerpc/sysdev/mpic_msi.c
@@ -60,7 +60,7 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
np = NULL;
while ((np = of_find_all_nodes(np))) {
- pr_debug("mpic: mapping hwirqs for %s\n", np->full_name);
+ pr_debug("mpic: mapping hwirqs for %pOF\n", np);
index = 0;
while (of_irq_parse_one(np, index++, &oirq) == 0) {
diff --git a/arch/powerpc/sysdev/mpic_timer.c b/arch/powerpc/sysdev/mpic_timer.c
index 9d9b06217f8b..a418579591be 100644
--- a/arch/powerpc/sysdev/mpic_timer.c
+++ b/arch/powerpc/sysdev/mpic_timer.c
@@ -466,8 +466,7 @@ static int timer_group_get_irq(struct device_node *np,
p = of_get_property(np, "fsl,available-ranges", &len);
if (p && len % (2 * sizeof(u32)) != 0) {
- pr_err("%s: malformed available-ranges property.\n",
- np->full_name);
+ pr_err("%pOF: malformed available-ranges property.\n", np);
return -EINVAL;
}
@@ -484,8 +483,7 @@ static int timer_group_get_irq(struct device_node *np,
for (j = 0; j < count; j++) {
irq = irq_of_parse_and_map(np, irq_index);
if (!irq) {
- pr_err("%s: irq parse and map failed.\n",
- np->full_name);
+ pr_err("%pOF: irq parse and map failed.\n", np);
return -EINVAL;
}
@@ -508,8 +506,7 @@ static void timer_group_init(struct device_node *np)
priv = kzalloc(sizeof(struct timer_group_priv), GFP_KERNEL);
if (!priv) {
- pr_err("%s: cannot allocate memory for group.\n",
- np->full_name);
+ pr_err("%pOF: cannot allocate memory for group.\n", np);
return;
}
@@ -518,29 +515,27 @@ static void timer_group_init(struct device_node *np)
priv->regs = of_iomap(np, i++);
if (!priv->regs) {
- pr_err("%s: cannot ioremap timer register address.\n",
- np->full_name);
+ pr_err("%pOF: cannot ioremap timer register address.\n", np);
goto out;
}
if (priv->flags & FSL_GLOBAL_TIMER) {
priv->group_tcr = of_iomap(np, i++);
if (!priv->group_tcr) {
- pr_err("%s: cannot ioremap tcr address.\n",
- np->full_name);
+ pr_err("%pOF: cannot ioremap tcr address.\n", np);
goto out;
}
}
ret = timer_group_get_freq(np, priv);
if (ret < 0) {
- pr_err("%s: cannot get timer frequency.\n", np->full_name);
+ pr_err("%pOF: cannot get timer frequency.\n", np);
goto out;
}
ret = timer_group_get_irq(np, priv);
if (ret < 0) {
- pr_err("%s: cannot get timer irqs.\n", np->full_name);
+ pr_err("%pOF: cannot get timer irqs.\n", np);
goto out;
}
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index 5ebd3f018295..c4dae27172b3 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -86,13 +86,13 @@ int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp)
p = of_get_property(bmp->of_node, "msi-available-ranges", &len);
if (!p) {
pr_debug("msi_bitmap: no msi-available-ranges property " \
- "found on %s\n", bmp->of_node->full_name);
+ "found on %pOF\n", bmp->of_node);
return 1;
}
if (len % (2 * sizeof(u32)) != 0) {
printk(KERN_WARNING "msi_bitmap: Malformed msi-available-ranges"
- " property on %s\n", bmp->of_node->full_name);
+ " property on %pOF\n", bmp->of_node);
return -EINVAL;
}
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index 026bbc3b2c47..185a67e742a6 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -452,8 +452,8 @@ static int __init mv64x60_device_setup(void)
err = mv64x60_mpsc_device_setup(np, id++);
if (err)
printk(KERN_ERR "Failed to initialize MV64x60 "
- "serial device %s: error %d.\n",
- np->full_name, err);
+ "serial device %pOF: error %d.\n",
+ np, err);
}
id = 0;
@@ -463,8 +463,8 @@ static int __init mv64x60_device_setup(void)
if (IS_ERR(pdev)) {
err = PTR_ERR(pdev);
printk(KERN_ERR "Failed to initialize MV64x60 "
- "network block %s: error %d.\n",
- np->full_name, err);
+ "network block %pOF: error %d.\n",
+ np, err);
continue;
}
for_each_child_of_node(np, np2) {
@@ -474,9 +474,9 @@ static int __init mv64x60_device_setup(void)
err = mv64x60_eth_device_setup(np2, id2++, pdev);
if (err)
printk(KERN_ERR "Failed to initialize "
- "MV64x60 network device %s: "
+ "MV64x60 network device %pOF: "
"error %d.\n",
- np2->full_name, err);
+ np2, err);
}
}
@@ -485,8 +485,8 @@ static int __init mv64x60_device_setup(void)
err = mv64x60_i2c_device_setup(np, id++);
if (err)
printk(KERN_ERR "Failed to initialize MV64x60 I2C "
- "bus %s: error %d.\n",
- np->full_name, err);
+ "bus %pOF: error %d.\n",
+ np, err);
}
/* support up to one watchdog timer */
@@ -494,8 +494,8 @@ static int __init mv64x60_device_setup(void)
if (np) {
if ((err = mv64x60_wdt_device_setup(np, id)))
printk(KERN_ERR "Failed to initialize MV64x60 "
- "Watchdog %s: error %d.\n",
- np->full_name, err);
+ "Watchdog %pOF: error %d.\n",
+ np, err);
of_node_put(np);
}
diff --git a/arch/powerpc/sysdev/mv64x60_pci.c b/arch/powerpc/sysdev/mv64x60_pci.c
index 330d56613c5a..d52b3b81e05f 100644
--- a/arch/powerpc/sysdev/mv64x60_pci.c
+++ b/arch/powerpc/sysdev/mv64x60_pci.c
@@ -70,7 +70,7 @@ static ssize_t mv64x60_hs_reg_write(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute mv64x60_hs_reg_attr = { /* Hotswap register */
+static const struct bin_attribute mv64x60_hs_reg_attr = { /* Hotswap register */
.attr = {
.name = "hs_reg",
.mode = S_IRUGO | S_IWUSR,
@@ -136,8 +136,8 @@ static int __init mv64x60_add_bridge(struct device_node *dev)
/* Get bus range if any */
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int))
- printk(KERN_WARNING "Can't get bus-range for %s, assume"
- " bus 0\n", dev->full_name);
+ printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
+ " bus 0\n", dev);
hose = pcibios_alloc_controller(dev);
if (!hose)
diff --git a/arch/powerpc/sysdev/of_rtc.c b/arch/powerpc/sysdev/of_rtc.c
index 6f54b54b1328..153fdac4720f 100644
--- a/arch/powerpc/sysdev/of_rtc.c
+++ b/arch/powerpc/sysdev/of_rtc.c
@@ -38,21 +38,21 @@ void __init of_instantiate_rtc(void)
res = kmalloc(sizeof(*res), GFP_KERNEL);
if (!res) {
printk(KERN_ERR "OF RTC: Out of memory "
- "allocating resource structure for %s\n",
- node->full_name);
+ "allocating resource structure for %pOF\n",
+ node);
continue;
}
err = of_address_to_resource(node, 0, res);
if (err) {
printk(KERN_ERR "OF RTC: Error "
- "translating resources for %s\n",
- node->full_name);
+ "translating resources for %pOF\n",
+ node);
continue;
}
- printk(KERN_INFO "OF_RTC: %s is a %s @ 0x%llx-0x%llx\n",
- node->full_name, plat_name,
+ printk(KERN_INFO "OF_RTC: %pOF is a %s @ 0x%llx-0x%llx\n",
+ node, plat_name,
(unsigned long long)res->start,
(unsigned long long)res->end);
platform_device_register_simple(plat_name, -1, res, 1);
diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c
index 76ea32c1b664..0f6fd5d04d33 100644
--- a/arch/powerpc/sysdev/scom.c
+++ b/arch/powerpc/sysdev/scom.c
@@ -194,12 +194,13 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
ent->dn = of_node_get(dn);
snprintf(ent->name, 16, "%08x", i);
- ent->path.data = (void*) dn->full_name;
- ent->path.size = strlen(dn->full_name);
+ ent->path.data = (void*)kasprintf(GFP_KERNEL, "%pOF", dn);
+ ent->path.size = strlen((char *)ent->path.data);
dir = debugfs_create_dir(ent->name, root);
if (!dir) {
of_node_put(dn);
+ kfree(ent->path.data);
kfree(ent);
return -1;
}
diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
index 6afddae2fb47..f02d4576138c 100644
--- a/arch/powerpc/sysdev/simple_gpio.c
+++ b/arch/powerpc/sysdev/simple_gpio.c
@@ -142,7 +142,6 @@ void __init simple_gpiochip_init(const char *compatible)
}
continue;
err:
- pr_err("%s: registration failed, status %d\n",
- np->full_name, ret);
+ pr_err("%pOF: registration failed, status %d\n", np, ret);
}
}
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 5692dd569b9b..28ff1f53cefc 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -213,8 +213,8 @@ int __init tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary)
/* Get bus range if any */
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
- printk(KERN_WARNING "Can't get bus-range for %s, assume"
- " bus 0\n", dev->full_name);
+ printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
+ " bus 0\n", dev);
}
hose = pcibios_alloc_controller(dev);
diff --git a/arch/powerpc/sysdev/xive/Kconfig b/arch/powerpc/sysdev/xive/Kconfig
index 12ccd7373d2f..3e3e25b5e30d 100644
--- a/arch/powerpc/sysdev/xive/Kconfig
+++ b/arch/powerpc/sysdev/xive/Kconfig
@@ -9,3 +9,8 @@ config PPC_XIVE_NATIVE
default n
select PPC_XIVE
depends on PPC_POWERNV
+
+config PPC_XIVE_SPAPR
+ bool
+ default n
+ select PPC_XIVE
diff --git a/arch/powerpc/sysdev/xive/Makefile b/arch/powerpc/sysdev/xive/Makefile
index 3fab303fc169..536d6e5706e3 100644
--- a/arch/powerpc/sysdev/xive/Makefile
+++ b/arch/powerpc/sysdev/xive/Makefile
@@ -2,3 +2,4 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
obj-y += common.o
obj-$(CONFIG_PPC_XIVE_NATIVE) += native.o
+obj-$(CONFIG_PPC_XIVE_SPAPR) += spapr.o
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 6595462b1fc8..f387318678b9 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -40,7 +40,8 @@
#undef DEBUG_ALL
#ifdef DEBUG_ALL
-#define DBG_VERBOSE(fmt...) pr_devel(fmt)
+#define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \
+ smp_processor_id(), ## __VA_ARGS__)
#else
#define DBG_VERBOSE(fmt...) do { } while(0)
#endif
@@ -190,7 +191,7 @@ static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
* This is used to perform the magic loads from an ESB
* described in xive.h
*/
-static u8 xive_poke_esb(struct xive_irq_data *xd, u32 offset)
+static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
{
u64 val;
@@ -198,13 +199,28 @@ static u8 xive_poke_esb(struct xive_irq_data *xd, u32 offset)
if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
offset |= offset << 4;
- val = in_be64(xd->eoi_mmio + offset);
+ if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
+ val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
+ else
+ val = in_be64(xd->eoi_mmio + offset);
return (u8)val;
}
+static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
+{
+ /* Handle HW errata */
+ if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
+ offset |= offset << 4;
+
+ if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
+ xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
+ else
+ out_be64(xd->eoi_mmio + offset, data);
+}
+
#ifdef CONFIG_XMON
-static void xive_dump_eq(const char *name, struct xive_q *q)
+static notrace void xive_dump_eq(const char *name, struct xive_q *q)
{
u32 i0, i1, idx;
@@ -218,7 +234,7 @@ static void xive_dump_eq(const char *name, struct xive_q *q)
q->toggle, i0, i1);
}
-void xmon_xive_do_dump(int cpu)
+notrace void xmon_xive_do_dump(int cpu)
{
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
@@ -227,7 +243,7 @@ void xmon_xive_do_dump(int cpu)
xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]);
#ifdef CONFIG_SMP
{
- u64 val = xive_poke_esb(&xc->ipi_data, XIVE_ESB_GET);
+ u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
xmon_printf(" IPI state: %x:%c%c\n", xc->hw_ipi,
val & XIVE_ESB_VAL_P ? 'P' : 'p',
val & XIVE_ESB_VAL_P ? 'Q' : 'q');
@@ -297,7 +313,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
{
/* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
- out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0);
+ xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
/*
* The FW told us to call it. This happens for some
@@ -326,10 +342,10 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
* properly.
*/
if (xd->flags & XIVE_IRQ_FLAG_LSI)
- in_be64(xd->eoi_mmio);
+ xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
else {
- eoi_val = xive_poke_esb(xd, XIVE_ESB_SET_PQ_00);
- DBG_VERBOSE("eoi_val=%x\n", offset, eoi_val);
+ eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
+ DBG_VERBOSE("eoi_val=%x\n", eoi_val);
/* Re-trigger if needed */
if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
@@ -383,12 +399,12 @@ static void xive_do_source_set_mask(struct xive_irq_data *xd,
* ESB accordingly on unmask.
*/
if (mask) {
- val = xive_poke_esb(xd, XIVE_ESB_SET_PQ_01);
+ val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
xd->saved_p = !!(val & XIVE_ESB_VAL_P);
} else if (xd->saved_p)
- xive_poke_esb(xd, XIVE_ESB_SET_PQ_10);
+ xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
else
- xive_poke_esb(xd, XIVE_ESB_SET_PQ_00);
+ xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
}
/*
@@ -447,7 +463,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
int cpu, first, num, i;
/* Pick up a starting point CPU in the mask based on fuzz */
- num = cpumask_weight(mask);
+ num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
first = fuzz % num;
/* Locate it */
@@ -672,6 +688,10 @@ static int xive_irq_set_affinity(struct irq_data *d,
if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
return -EINVAL;
+ /* Don't do anything if the interrupt isn't started */
+ if (!irqd_is_started(d))
+ return IRQ_SET_MASK_OK;
+
/*
* If existing target is already in the new mask, and is
* online then do nothing.
@@ -768,7 +788,7 @@ static int xive_irq_retrigger(struct irq_data *d)
* To perform a retrigger, we first set the PQ bits to
* 11, then perform an EOI.
*/
- xive_poke_esb(xd, XIVE_ESB_SET_PQ_11);
+ xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
/*
* Note: We pass "0" to the hw_irq argument in order to
@@ -803,7 +823,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
irqd_set_forwarded_to_vcpu(d);
/* Set it to PQ=10 state to prevent further sends */
- pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_10);
+ pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
/* No target ? nothing to do */
if (xd->target == XIVE_INVALID_TARGET) {
@@ -832,7 +852,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
* for sure the queue slot is no longer in use.
*/
if (pq & 2) {
- pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_11);
+ pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
xd->saved_p = true;
/*
@@ -989,6 +1009,9 @@ static void xive_ipi_eoi(struct irq_data *d)
{
struct xive_cpu *xc = __this_cpu_read(xive_cpu);
+ DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
+ d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
+
/* Handle possible race with unplug and drop stale IPIs */
if (!xc)
return;
@@ -1368,6 +1391,19 @@ void xive_flush_interrupt(void)
#endif /* CONFIG_SMP */
+void xive_teardown_cpu(void)
+{
+ struct xive_cpu *xc = __this_cpu_read(xive_cpu);
+ unsigned int cpu = smp_processor_id();
+
+ /* Set CPPR to 0 to disable flow of interrupts */
+ xc->cppr = 0;
+ out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
+
+ if (xive_ops->teardown_cpu)
+ xive_ops->teardown_cpu(cpu, xc);
+}
+
void xive_kexec_teardown_cpu(int secondary)
{
struct xive_cpu *xc = __this_cpu_read(xive_cpu);
@@ -1395,8 +1431,8 @@ void xive_shutdown(void)
xive_ops->shutdown();
}
-bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
- u8 max_prio)
+bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
+ u8 max_prio)
{
xive_tima = area;
xive_tima_offset = offset;
@@ -1424,6 +1460,22 @@ bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
return true;
}
+__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
+{
+ unsigned int alloc_order;
+ struct page *pages;
+ __be32 *qpage;
+
+ alloc_order = xive_alloc_order(queue_shift);
+ pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+ qpage = (__be32 *)page_address(pages);
+ memset(qpage, 0, 1 << queue_shift);
+
+ return qpage;
+}
+
static int __init xive_off(char *arg)
{
xive_cmdline_disabled = true;
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 0f95476b01f6..44f3a25ca630 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -82,6 +82,8 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
return -ENOMEM;
}
+ data->hw_irq = hw_irq;
+
if (!data->trig_page)
return 0;
if (data->trig_page == data->eoi_page) {
@@ -202,17 +204,12 @@ EXPORT_SYMBOL_GPL(xive_native_disable_queue);
static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
{
struct xive_q *q = &xc->queue[prio];
- unsigned int alloc_order;
- struct page *pages;
__be32 *qpage;
- alloc_order = (xive_queue_shift > PAGE_SHIFT) ?
- (xive_queue_shift - PAGE_SHIFT) : 0;
- pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
- if (!pages)
- return -ENOMEM;
- qpage = (__be32 *)page_address(pages);
- memset(qpage, 0, 1 << xive_queue_shift);
+ qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
+ if (IS_ERR(qpage))
+ return PTR_ERR(qpage);
+
return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
q, prio, qpage, xive_queue_shift, false);
}
@@ -227,8 +224,7 @@ static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8
* from an IPI and iounmap isn't safe
*/
__xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
- alloc_order = (xive_queue_shift > PAGE_SHIFT) ?
- (xive_queue_shift - PAGE_SHIFT) : 0;
+ alloc_order = xive_alloc_order(xive_queue_shift);
free_pages((unsigned long)q->qpage, alloc_order);
q->qpage = NULL;
}
@@ -531,7 +527,7 @@ u32 xive_native_default_eq_shift(void)
}
EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
-bool xive_native_init(void)
+bool __init xive_native_init(void)
{
struct device_node *np;
struct resource r;
@@ -551,7 +547,7 @@ bool xive_native_init(void)
pr_devel("not found !\n");
return false;
}
- pr_devel("Found %s\n", np->full_name);
+ pr_devel("Found %pOF\n", np);
/* Resource 1 is HV window */
if (of_address_to_resource(np, 1, &r)) {
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
new file mode 100644
index 000000000000..f24a70bc6855
--- /dev/null
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -0,0 +1,662 @@
+/*
+ * Copyright 2016,2017 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "xive: " fmt
+
+#include <linux/types.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/cpumask.h>
+#include <linux/mm.h>
+
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/irq.h>
+#include <asm/errno.h>
+#include <asm/xive.h>
+#include <asm/xive-regs.h>
+#include <asm/hvcall.h>
+
+#include "xive-internal.h"
+
+static u32 xive_queue_shift;
+
+struct xive_irq_bitmap {
+ unsigned long *bitmap;
+ unsigned int base;
+ unsigned int count;
+ spinlock_t lock;
+ struct list_head list;
+};
+
+static LIST_HEAD(xive_irq_bitmaps);
+
+static int xive_irq_bitmap_add(int base, int count)
+{
+ struct xive_irq_bitmap *xibm;
+
+ xibm = kzalloc(sizeof(*xibm), GFP_ATOMIC);
+ if (!xibm)
+ return -ENOMEM;
+
+ spin_lock_init(&xibm->lock);
+ xibm->base = base;
+ xibm->count = count;
+ xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL);
+ list_add(&xibm->list, &xive_irq_bitmaps);
+
+ pr_info("Using IRQ range [%x-%x]", xibm->base,
+ xibm->base + xibm->count - 1);
+ return 0;
+}
+
+static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm)
+{
+ int irq;
+
+ irq = find_first_zero_bit(xibm->bitmap, xibm->count);
+ if (irq != xibm->count) {
+ set_bit(irq, xibm->bitmap);
+ irq += xibm->base;
+ } else {
+ irq = -ENOMEM;
+ }
+
+ return irq;
+}
+
+static int xive_irq_bitmap_alloc(void)
+{
+ struct xive_irq_bitmap *xibm;
+ unsigned long flags;
+ int irq = -ENOENT;
+
+ list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
+ spin_lock_irqsave(&xibm->lock, flags);
+ irq = __xive_irq_bitmap_alloc(xibm);
+ spin_unlock_irqrestore(&xibm->lock, flags);
+ if (irq >= 0)
+ break;
+ }
+ return irq;
+}
+
+static void xive_irq_bitmap_free(int irq)
+{
+ unsigned long flags;
+ struct xive_irq_bitmap *xibm;
+
+ list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
+ if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) {
+ spin_lock_irqsave(&xibm->lock, flags);
+ clear_bit(irq - xibm->base, xibm->bitmap);
+ spin_unlock_irqrestore(&xibm->lock, flags);
+ break;
+ }
+ }
+}
+
+static long plpar_int_get_source_info(unsigned long flags,
+ unsigned long lisn,
+ unsigned long *src_flags,
+ unsigned long *eoi_page,
+ unsigned long *trig_page,
+ unsigned long *esb_shift)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn);
+ if (rc) {
+ pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn, rc);
+ return rc;
+ }
+
+ *src_flags = retbuf[0];
+ *eoi_page = retbuf[1];
+ *trig_page = retbuf[2];
+ *esb_shift = retbuf[3];
+
+ pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n",
+ retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
+
+ return 0;
+}
+
+#define XIVE_SRC_SET_EISN (1ull << (63 - 62))
+#define XIVE_SRC_MASK (1ull << (63 - 63)) /* unused */
+
+static long plpar_int_set_source_config(unsigned long flags,
+ unsigned long lisn,
+ unsigned long target,
+ unsigned long prio,
+ unsigned long sw_irq)
+{
+ long rc;
+
+
+ pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n",
+ flags, lisn, target, prio, sw_irq);
+
+
+ rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn,
+ target, prio, sw_irq);
+ if (rc) {
+ pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n",
+ lisn, target, prio, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static long plpar_int_get_queue_info(unsigned long flags,
+ unsigned long target,
+ unsigned long priority,
+ unsigned long *esn_page,
+ unsigned long *esn_size)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target, priority);
+ if (rc) {
+ pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
+ target, priority, rc);
+ return rc;
+ }
+
+ *esn_page = retbuf[0];
+ *esn_size = retbuf[1];
+
+ pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n",
+ retbuf[0], retbuf[1]);
+
+ return 0;
+}
+
+#define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
+
+static long plpar_int_set_queue_config(unsigned long flags,
+ unsigned long target,
+ unsigned long priority,
+ unsigned long qpage,
+ unsigned long qsize)
+{
+ long rc;
+
+ pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n",
+ flags, target, priority, qpage, qsize);
+
+ rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
+ priority, qpage, qsize);
+ if (rc) {
+ pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n",
+ target, priority, qpage, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static long plpar_int_sync(unsigned long flags, unsigned long lisn)
+{
+ long rc;
+
+ rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn);
+ if (rc) {
+ pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
+
+static long plpar_int_esb(unsigned long flags,
+ unsigned long lisn,
+ unsigned long offset,
+ unsigned long in_data,
+ unsigned long *out_data)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n",
+ flags, lisn, offset, in_data);
+
+ rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset, in_data);
+ if (rc) {
+ pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n",
+ lisn, offset, rc);
+ return rc;
+ }
+
+ *out_data = retbuf[0];
+
+ return 0;
+}
+
+static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write)
+{
+ unsigned long read_data;
+ long rc;
+
+ rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0,
+ lisn, offset, data, &read_data);
+ if (rc)
+ return -1;
+
+ return write ? 0 : read_data;
+}
+
+#define XIVE_SRC_H_INT_ESB (1ull << (63 - 60))
+#define XIVE_SRC_LSI (1ull << (63 - 61))
+#define XIVE_SRC_TRIGGER (1ull << (63 - 62))
+#define XIVE_SRC_STORE_EOI (1ull << (63 - 63))
+
+static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
+{
+ long rc;
+ unsigned long flags;
+ unsigned long eoi_page;
+ unsigned long trig_page;
+ unsigned long esb_shift;
+
+ memset(data, 0, sizeof(*data));
+
+ rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page,
+ &esb_shift);
+ if (rc)
+ return -EINVAL;
+
+ if (flags & XIVE_SRC_H_INT_ESB)
+ data->flags |= XIVE_IRQ_FLAG_H_INT_ESB;
+ if (flags & XIVE_SRC_STORE_EOI)
+ data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
+ if (flags & XIVE_SRC_LSI)
+ data->flags |= XIVE_IRQ_FLAG_LSI;
+ data->eoi_page = eoi_page;
+ data->esb_shift = esb_shift;
+ data->trig_page = trig_page;
+
+ /*
+ * No chip-id for the sPAPR backend. This has an impact how we
+ * pick a target. See xive_pick_irq_target().
+ */
+ data->src_chip = XIVE_INVALID_CHIP_ID;
+
+ data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
+ if (!data->eoi_mmio) {
+ pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
+ return -ENOMEM;
+ }
+
+ data->hw_irq = hw_irq;
+
+ /* Full function page supports trigger */
+ if (flags & XIVE_SRC_TRIGGER) {
+ data->trig_mmio = data->eoi_mmio;
+ return 0;
+ }
+
+ data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
+ if (!data->trig_mmio) {
+ pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
+{
+ long rc;
+
+ rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target,
+ prio, sw_irq);
+
+ return rc == 0 ? 0 : -ENXIO;
+}
+
+/* This can be called multiple time to change a queue configuration */
+static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
+ __be32 *qpage, u32 order)
+{
+ s64 rc = 0;
+ unsigned long esn_page;
+ unsigned long esn_size;
+ u64 flags, qpage_phys;
+
+ /* If there's an actual queue page, clean it */
+ if (order) {
+ if (WARN_ON(!qpage))
+ return -EINVAL;
+ qpage_phys = __pa(qpage);
+ } else {
+ qpage_phys = 0;
+ }
+
+ /* Initialize the rest of the fields */
+ q->msk = order ? ((1u << (order - 2)) - 1) : 0;
+ q->idx = 0;
+ q->toggle = 0;
+
+ rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
+ if (rc) {
+ pr_err("Error %lld getting queue info prio %d\n", rc, prio);
+ rc = -EIO;
+ goto fail;
+ }
+
+ /* TODO: add support for the notification page */
+ q->eoi_phys = esn_page;
+
+ /* Default is to always notify */
+ flags = XIVE_EQ_ALWAYS_NOTIFY;
+
+ /* Configure and enable the queue in HW */
+ rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
+ if (rc) {
+ pr_err("Error %lld setting queue for prio %d\n", rc, prio);
+ rc = -EIO;
+ } else {
+ q->qpage = qpage;
+ }
+fail:
+ return rc;
+}
+
+static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
+ u8 prio)
+{
+ struct xive_q *q = &xc->queue[prio];
+ __be32 *qpage;
+
+ qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
+ if (IS_ERR(qpage))
+ return PTR_ERR(qpage);
+
+ return xive_spapr_configure_queue(cpu, q, prio, qpage,
+ xive_queue_shift);
+}
+
+static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
+ u8 prio)
+{
+ struct xive_q *q = &xc->queue[prio];
+ unsigned int alloc_order;
+ long rc;
+
+ rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0);
+ if (rc)
+ pr_err("Error %ld setting queue for prio %d\n", rc, prio);
+
+ alloc_order = xive_alloc_order(xive_queue_shift);
+ free_pages((unsigned long)q->qpage, alloc_order);
+ q->qpage = NULL;
+}
+
+static bool xive_spapr_match(struct device_node *node)
+{
+ /* Ignore cascaded controllers for the moment */
+ return 1;
+}
+
+#ifdef CONFIG_SMP
+static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
+{
+ int irq = xive_irq_bitmap_alloc();
+
+ if (irq < 0) {
+ pr_err("Failed to allocate IPI on CPU %d\n", cpu);
+ return -ENXIO;
+ }
+
+ xc->hw_ipi = irq;
+ return 0;
+}
+
+static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
+{
+ xive_irq_bitmap_free(xc->hw_ipi);
+}
+#endif /* CONFIG_SMP */
+
+static void xive_spapr_shutdown(void)
+{
+ long rc;
+
+ rc = plpar_hcall_norets(H_INT_RESET, 0);
+ if (rc)
+ pr_err("H_INT_RESET failed %ld\n", rc);
+}
+
+/*
+ * Perform an "ack" cycle on the current thread. Grab the pending
+ * active priorities and update the CPPR to the most favored one.
+ */
+static void xive_spapr_update_pending(struct xive_cpu *xc)
+{
+ u8 nsr, cppr;
+ u16 ack;
+
+ /*
+ * Perform the "Acknowledge O/S to Register" cycle.
+ *
+ * Let's speedup the access to the TIMA using the raw I/O
+ * accessor as we don't need the synchronisation routine of
+ * the higher level ones
+ */
+ ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
+
+ /* Synchronize subsequent queue accesses */
+ mb();
+
+ /*
+ * Grab the CPPR and the "NSR" field which indicates the source
+ * of the interrupt (if any)
+ */
+ cppr = ack & 0xff;
+ nsr = ack >> 8;
+
+ if (nsr & TM_QW1_NSR_EO) {
+ if (cppr == 0xff)
+ return;
+ /* Mark the priority pending */
+ xc->pending_prio |= 1 << cppr;
+
+ /*
+ * A new interrupt should never have a CPPR less favored
+ * than our current one.
+ */
+ if (cppr >= xc->cppr)
+ pr_err("CPU %d odd ack CPPR, got %d at %d\n",
+ smp_processor_id(), cppr, xc->cppr);
+
+ /* Update our idea of what the CPPR is */
+ xc->cppr = cppr;
+ }
+}
+
+static void xive_spapr_eoi(u32 hw_irq)
+{
+ /* Not used */;
+}
+
+static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
+{
+ /* Only some debug on the TIMA settings */
+ pr_debug("(HW value: %08x %08x %08x)\n",
+ in_be32(xive_tima + TM_QW1_OS + TM_WORD0),
+ in_be32(xive_tima + TM_QW1_OS + TM_WORD1),
+ in_be32(xive_tima + TM_QW1_OS + TM_WORD2));
+}
+
+static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
+{
+ /* Nothing to do */;
+}
+
+static void xive_spapr_sync_source(u32 hw_irq)
+{
+ /* Specs are unclear on what this is doing */
+ plpar_int_sync(0, hw_irq);
+}
+
+static const struct xive_ops xive_spapr_ops = {
+ .populate_irq_data = xive_spapr_populate_irq_data,
+ .configure_irq = xive_spapr_configure_irq,
+ .setup_queue = xive_spapr_setup_queue,
+ .cleanup_queue = xive_spapr_cleanup_queue,
+ .match = xive_spapr_match,
+ .shutdown = xive_spapr_shutdown,
+ .update_pending = xive_spapr_update_pending,
+ .eoi = xive_spapr_eoi,
+ .setup_cpu = xive_spapr_setup_cpu,
+ .teardown_cpu = xive_spapr_teardown_cpu,
+ .sync_source = xive_spapr_sync_source,
+ .esb_rw = xive_spapr_esb_rw,
+#ifdef CONFIG_SMP
+ .get_ipi = xive_spapr_get_ipi,
+ .put_ipi = xive_spapr_put_ipi,
+#endif /* CONFIG_SMP */
+ .name = "spapr",
+};
+
+/*
+ * get max priority from "/ibm,plat-res-int-priorities"
+ */
+static bool xive_get_max_prio(u8 *max_prio)
+{
+ struct device_node *rootdn;
+ const __be32 *reg;
+ u32 len;
+ int prio, found;
+
+ rootdn = of_find_node_by_path("/");
+ if (!rootdn) {
+ pr_err("not root node found !\n");
+ return false;
+ }
+
+ reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
+ if (!reg) {
+ pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
+ return false;
+ }
+
+ if (len % (2 * sizeof(u32)) != 0) {
+ pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
+ return false;
+ }
+
+ /* HW supports priorities in the range [0-7] and 0xFF is a
+ * wildcard priority used to mask. We scan the ranges reserved
+ * by the hypervisor to find the lowest priority we can use.
+ */
+ found = 0xFF;
+ for (prio = 0; prio < 8; prio++) {
+ int reserved = 0;
+ int i;
+
+ for (i = 0; i < len / (2 * sizeof(u32)); i++) {
+ int base = be32_to_cpu(reg[2 * i]);
+ int range = be32_to_cpu(reg[2 * i + 1]);
+
+ if (prio >= base && prio < base + range)
+ reserved++;
+ }
+
+ if (!reserved)
+ found = prio;
+ }
+
+ if (found == 0xFF) {
+ pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
+ return false;
+ }
+
+ *max_prio = found;
+ return true;
+}
+
+bool __init xive_spapr_init(void)
+{
+ struct device_node *np;
+ struct resource r;
+ void __iomem *tima;
+ struct property *prop;
+ u8 max_prio;
+ u32 val;
+ u32 len;
+ const __be32 *reg;
+ int i;
+
+ if (xive_cmdline_disabled)
+ return false;
+
+ pr_devel("%s()\n", __func__);
+ np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe");
+ if (!np) {
+ pr_devel("not found !\n");
+ return false;
+ }
+ pr_devel("Found %s\n", np->full_name);
+
+ /* Resource 1 is the OS ring TIMA */
+ if (of_address_to_resource(np, 1, &r)) {
+ pr_err("Failed to get thread mgmnt area resource\n");
+ return false;
+ }
+ tima = ioremap(r.start, resource_size(&r));
+ if (!tima) {
+ pr_err("Failed to map thread mgmnt area\n");
+ return false;
+ }
+
+ if (!xive_get_max_prio(&max_prio))
+ return false;
+
+ /* Feed the IRQ number allocator with the ranges given in the DT */
+ reg = of_get_property(np, "ibm,xive-lisn-ranges", &len);
+ if (!reg) {
+ pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
+ return false;
+ }
+
+ if (len % (2 * sizeof(u32)) != 0) {
+ pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
+ return false;
+ }
+
+ for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2)
+ xive_irq_bitmap_add(be32_to_cpu(reg[0]),
+ be32_to_cpu(reg[1]));
+
+ /* Iterate the EQ sizes and pick one */
+ of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) {
+ xive_queue_shift = val;
+ if (val == PAGE_SHIFT)
+ break;
+ }
+
+ /* Initialize XIVE core with our backend */
+ if (!xive_core_init(&xive_spapr_ops, tima, TM_QW1_OS, max_prio))
+ return false;
+
+ pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
+ return true;
+}
diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h
index d07ef2d29caf..f34abed0c05f 100644
--- a/arch/powerpc/sysdev/xive/xive-internal.h
+++ b/arch/powerpc/sysdev/xive/xive-internal.h
@@ -47,6 +47,7 @@ struct xive_ops {
void (*update_pending)(struct xive_cpu *xc);
void (*eoi)(u32 hw_irq);
void (*sync_source)(u32 hw_irq);
+ u64 (*esb_rw)(u32 hw_irq, u32 offset, u64 data, bool write);
#ifdef CONFIG_SMP
int (*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
@@ -56,6 +57,12 @@ struct xive_ops {
bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
u8 max_prio);
+__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
+
+static inline u32 xive_alloc_order(u32 queue_shift)
+{
+ return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0;
+}
extern bool xive_cmdline_disabled;
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 0b2f771593eb..1dd88315cff4 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -5,6 +5,10 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
GCOV_PROFILE := n
UBSAN_SANITIZE := n
+# Disable ftrace for the entire directory
+ORIG_CFLAGS := $(KBUILD_CFLAGS)
+KBUILD_CFLAGS = $(subst -mno-sched-epilog,,$(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)))
+
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
obj-y += xmon.o nonstdio.o spr_access.o
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 08e367e3e8c3..33351c6704b1 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -89,6 +89,7 @@ static unsigned long nidump = 16;
static unsigned long ncsum = 4096;
static int termch;
static char tmpstr[128];
+static int tracing_enabled;
static long bus_error_jmp[JMP_BUF_LEN];
static int catch_memory_errors;
@@ -234,6 +235,7 @@ Commands:\n\
"\
dr dump stream of raw bytes\n\
dt dump the tracing buffers (uses printk)\n\
+ dtc dump the tracing buffers for current CPU (uses printk)\n\
"
#ifdef CONFIG_PPC_POWERNV
" dx# dump xive on CPU #\n\
@@ -461,6 +463,9 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
local_irq_save(flags);
hard_irq_disable();
+ tracing_enabled = tracing_is_on();
+ tracing_off();
+
bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL) {
regs->nip = bp->address + offset;
@@ -981,6 +986,8 @@ cmds(struct pt_regs *excp)
break;
case 'x':
case 'X':
+ if (tracing_enabled)
+ tracing_on();
return cmd;
case EOF:
printf(" <no input ...>\n");
@@ -1732,23 +1739,25 @@ static void dump_206_sprs(void)
/* Actually some of these pre-date 2.06, but whatevs */
- printf("srr0 = %.16x srr1 = %.16x dsisr = %.8x\n",
+ printf("srr0 = %.16lx srr1 = %.16lx dsisr = %.8x\n",
mfspr(SPRN_SRR0), mfspr(SPRN_SRR1), mfspr(SPRN_DSISR));
- printf("dscr = %.16x ppr = %.16x pir = %.8x\n",
+ printf("dscr = %.16lx ppr = %.16lx pir = %.8x\n",
mfspr(SPRN_DSCR), mfspr(SPRN_PPR), mfspr(SPRN_PIR));
+ printf("amr = %.16lx uamor = %.16lx\n",
+ mfspr(SPRN_AMR), mfspr(SPRN_UAMOR));
if (!(mfmsr() & MSR_HV))
return;
- printf("sdr1 = %.16x hdar = %.16x hdsisr = %.8x\n",
+ printf("sdr1 = %.16lx hdar = %.16lx hdsisr = %.8x\n",
mfspr(SPRN_SDR1), mfspr(SPRN_HDAR), mfspr(SPRN_HDSISR));
- printf("hsrr0 = %.16x hsrr1 = %.16x hdec = %.8x\n",
+ printf("hsrr0 = %.16lx hsrr1 = %.16lx hdec = %.16lx\n",
mfspr(SPRN_HSRR0), mfspr(SPRN_HSRR1), mfspr(SPRN_HDEC));
- printf("lpcr = %.16x pcr = %.16x lpidr = %.8x\n",
+ printf("lpcr = %.16lx pcr = %.16lx lpidr = %.8x\n",
mfspr(SPRN_LPCR), mfspr(SPRN_PCR), mfspr(SPRN_LPID));
- printf("hsprg0 = %.16x hsprg1 = %.16x\n",
- mfspr(SPRN_HSPRG0), mfspr(SPRN_HSPRG1));
- printf("dabr = %.16x dabrx = %.16x\n",
+ printf("hsprg0 = %.16lx hsprg1 = %.16lx amor = %.16lx\n",
+ mfspr(SPRN_HSPRG0), mfspr(SPRN_HSPRG1), mfspr(SPRN_AMOR));
+ printf("dabr = %.16lx dabrx = %.16lx\n",
mfspr(SPRN_DABR), mfspr(SPRN_DABRX));
#endif
}
@@ -1761,42 +1770,65 @@ static void dump_207_sprs(void)
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return;
- printf("dpdes = %.16x tir = %.16x cir = %.8x\n",
+ printf("dpdes = %.16lx tir = %.16lx cir = %.8x\n",
mfspr(SPRN_DPDES), mfspr(SPRN_TIR), mfspr(SPRN_CIR));
- printf("fscr = %.16x tar = %.16x pspb = %.8x\n",
+ printf("fscr = %.16lx tar = %.16lx pspb = %.8x\n",
mfspr(SPRN_FSCR), mfspr(SPRN_TAR), mfspr(SPRN_PSPB));
msr = mfmsr();
if (msr & MSR_TM) {
/* Only if TM has been enabled in the kernel */
- printf("tfhar = %.16x tfiar = %.16x texasr = %.16x\n",
+ printf("tfhar = %.16lx tfiar = %.16lx texasr = %.16lx\n",
mfspr(SPRN_TFHAR), mfspr(SPRN_TFIAR),
mfspr(SPRN_TEXASR));
}
- printf("mmcr0 = %.16x mmcr1 = %.16x mmcr2 = %.16x\n",
+ printf("mmcr0 = %.16lx mmcr1 = %.16lx mmcr2 = %.16lx\n",
mfspr(SPRN_MMCR0), mfspr(SPRN_MMCR1), mfspr(SPRN_MMCR2));
printf("pmc1 = %.8x pmc2 = %.8x pmc3 = %.8x pmc4 = %.8x\n",
mfspr(SPRN_PMC1), mfspr(SPRN_PMC2),
mfspr(SPRN_PMC3), mfspr(SPRN_PMC4));
- printf("mmcra = %.16x siar = %.16x pmc5 = %.8x\n",
+ printf("mmcra = %.16lx siar = %.16lx pmc5 = %.8x\n",
mfspr(SPRN_MMCRA), mfspr(SPRN_SIAR), mfspr(SPRN_PMC5));
- printf("sdar = %.16x sier = %.16x pmc6 = %.8x\n",
+ printf("sdar = %.16lx sier = %.16lx pmc6 = %.8x\n",
mfspr(SPRN_SDAR), mfspr(SPRN_SIER), mfspr(SPRN_PMC6));
- printf("ebbhr = %.16x ebbrr = %.16x bescr = %.16x\n",
+ printf("ebbhr = %.16lx ebbrr = %.16lx bescr = %.16lx\n",
mfspr(SPRN_EBBHR), mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR));
+ printf("iamr = %.16lx\n", mfspr(SPRN_IAMR));
if (!(msr & MSR_HV))
return;
- printf("hfscr = %.16x dhdes = %.16x rpr = %.16x\n",
+ printf("hfscr = %.16lx dhdes = %.16lx rpr = %.16lx\n",
mfspr(SPRN_HFSCR), mfspr(SPRN_DHDES), mfspr(SPRN_RPR));
- printf("dawr = %.16x dawrx = %.16x ciabr = %.16x\n",
+ printf("dawr = %.16lx dawrx = %.16lx ciabr = %.16lx\n",
mfspr(SPRN_DAWR), mfspr(SPRN_DAWRX), mfspr(SPRN_CIABR));
#endif
}
+static void dump_300_sprs(void)
+{
+#ifdef CONFIG_PPC64
+ bool hv = mfmsr() & MSR_HV;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return;
+
+ printf("pidr = %.16lx tidr = %.16lx\n",
+ mfspr(SPRN_PID), mfspr(SPRN_TIDR));
+ printf("asdr = %.16lx psscr = %.16lx\n",
+ mfspr(SPRN_ASDR), hv ? mfspr(SPRN_PSSCR)
+ : mfspr(SPRN_PSSCR_PR));
+
+ if (!hv)
+ return;
+
+ printf("ptcr = %.16lx\n",
+ mfspr(SPRN_PTCR));
+#endif
+}
+
static void dump_one_spr(int spr, bool show_unimplemented)
{
unsigned long val;
@@ -1850,6 +1882,7 @@ static void super_regs(void)
dump_206_sprs();
dump_207_sprs();
+ dump_300_sprs();
return;
}
@@ -2231,6 +2264,17 @@ static void xmon_rawdump (unsigned long adrs, long ndump)
printf("\n");
}
+static void dump_tracing(void)
+{
+ int c;
+
+ c = inchar();
+ if (c == 'c')
+ ftrace_dump(DUMP_ORIG);
+ else
+ ftrace_dump(DUMP_ALL);
+}
+
#ifdef CONFIG_PPC64
static void dump_one_paca(int cpu)
{
@@ -2507,6 +2551,11 @@ dump(void)
}
#endif
+ if (c == 't') {
+ dump_tracing();
+ return;
+ }
+
if (c == '\n')
termch = c;
@@ -2525,9 +2574,6 @@ dump(void)
dump_log_buf();
} else if (c == 'o') {
dump_opal_msglog();
- } else if (c == 't') {
- ftrace_dump(DUMP_ALL);
- tracing_on();
} else if (c == 'r') {
scanhex(&ndump);
if (ndump == 0)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 7eeb75d758c1..48af970320cb 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -222,6 +222,10 @@ config HAVE_MARCH_Z13_FEATURES
def_bool n
select HAVE_MARCH_ZEC12_FEATURES
+config HAVE_MARCH_Z14_FEATURES
+ def_bool n
+ select HAVE_MARCH_Z13_FEATURES
+
choice
prompt "Processor type"
default MARCH_Z196
@@ -282,6 +286,14 @@ config MARCH_Z13
2964 series). The kernel will be slightly faster but will not work on
older machines.
+config MARCH_Z14
+ bool "IBM z14"
+ select HAVE_MARCH_Z14_FEATURES
+ help
+ Select this to enable optimizations for IBM z14 (3906 series).
+ The kernel will be slightly faster but will not work on older
+ machines.
+
endchoice
config MARCH_Z900_TUNE
@@ -305,6 +317,9 @@ config MARCH_ZEC12_TUNE
config MARCH_Z13_TUNE
def_bool TUNE_Z13 || MARCH_Z13 && TUNE_DEFAULT
+config MARCH_Z14_TUNE
+ def_bool TUNE_Z14 || MARCH_Z14 && TUNE_DEFAULT
+
choice
prompt "Tune code generation"
default TUNE_DEFAULT
@@ -343,6 +358,9 @@ config TUNE_ZEC12
config TUNE_Z13
bool "IBM z13"
+config TUNE_Z14
+ bool "IBM z14"
+
endchoice
config 64BIT
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 54e00526b8df..dac821cfcd43 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -31,7 +31,8 @@ mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
mflags-$(CONFIG_MARCH_Z10) := -march=z10
mflags-$(CONFIG_MARCH_Z196) := -march=z196
mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12
-mflags-$(CONFIG_MARCH_Z13) := -march=z13
+mflags-$(CONFIG_MARCH_Z13) := -march=z13
+mflags-$(CONFIG_MARCH_Z14) := -march=z14
export CC_FLAGS_MARCH := $(mflags-y)
@@ -44,7 +45,8 @@ cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109
cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10
cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196
cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
-cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13
+cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13
+cflags-$(CONFIG_MARCH_Z14_TUNE) += -mtune=z14
cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index b3c88479feba..6e2c9f7e47fa 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -16,4 +16,5 @@ generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += preempt.h
generic-y += trace_clock.h
+generic-y += unaligned.h
generic-y += word-at-a-time.h
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index 1bbd9dbfe4e0..ce9cc123988b 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -14,7 +14,7 @@
".section .rodata.str,\"aMS\",@progbits,1\n" \
"2: .asciz \""__FILE__"\"\n" \
".previous\n" \
- ".section __bug_table,\"a\"\n" \
+ ".section __bug_table,\"aw\"\n" \
"3: .long 1b-3b,2b-3b\n" \
" .short %0,%1\n" \
" .org 3b+%2\n" \
@@ -30,7 +30,7 @@
asm volatile( \
"0: j 0b+2\n" \
"1:\n" \
- ".section __bug_table,\"a\"\n" \
+ ".section __bug_table,\"aw\"\n" \
"2: .long 1b-2b\n" \
" .short %0\n" \
" .org 2b+%1\n" \
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index b9300f8aee10..07a82bc933a7 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -8,11 +8,12 @@
#include <linux/sched/task_stack.h>
#include <linux/thread_info.h>
-#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p(typeof(0?(t)0:0ULL), u64))
+#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p( \
+ typeof(0?(__force t)0:0ULL), u64))
#define __SC_DELOUSE(t,v) ({ \
BUILD_BUG_ON(sizeof(t) > 4 && !__TYPE_IS_PTR(t)); \
- (t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \
+ (__force t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \
})
#define PSW32_MASK_PER 0x40000000UL
diff --git a/arch/s390/include/asm/cpcmd.h b/arch/s390/include/asm/cpcmd.h
index 3dfadb5d648f..ca2b0624ad46 100644
--- a/arch/s390/include/asm/cpcmd.h
+++ b/arch/s390/include/asm/cpcmd.h
@@ -10,9 +10,8 @@
/*
* the lowlevel function for cpcmd
- * the caller of __cpcmd has to ensure that the response buffer is below 2 GB
*/
-extern int __cpcmd(const char *cmd, char *response, int rlen, int *response_code);
+int __cpcmd(const char *cmd, char *response, int rlen, int *response_code);
/*
* cpcmd is the in-kernel interface for issuing CP commands
@@ -25,8 +24,8 @@ extern int __cpcmd(const char *cmd, char *response, int rlen, int *response_code
* response_code: return pointer for VM's error code
* return value: the size of the response. The caller can check if the buffer
* was large enough by comparing the return value and rlen
- * NOTE: If the response buffer is not below 2 GB, cpcmd can sleep
+ * NOTE: If the response buffer is not in real storage, cpcmd can sleep
*/
-extern int cpcmd(const char *cmd, char *response, int rlen, int *response_code);
+int cpcmd(const char *cmd, char *response, int rlen, int *response_code);
#endif /* _ASM_S390_CPCMD_H */
diff --git a/arch/s390/include/asm/ebcdic.h b/arch/s390/include/asm/ebcdic.h
index c5befc5a3bf5..b71735eab23f 100644
--- a/arch/s390/include/asm/ebcdic.h
+++ b/arch/s390/include/asm/ebcdic.h
@@ -9,9 +9,7 @@
#ifndef _EBCDIC_H
#define _EBCDIC_H
-#ifndef _S390_TYPES_H
-#include <types.h>
-#endif
+#include <linux/types.h>
extern __u8 _ascebc_500[256]; /* ASCII -> EBCDIC 500 conversion table */
extern __u8 _ebcasc_500[256]; /* EBCDIC 500 -> ASCII conversion table */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index c92ed0170be2..65998a1f5d43 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -191,7 +191,7 @@ struct arch_elf_state {
} while (0)
#define CORE_DUMP_USE_REGSET
-#define ELF_EXEC_PAGESIZE 4096
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index a4811aa0304d..8f8eec9e1198 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -21,17 +21,12 @@
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
"m" (*uaddr) : "cc");
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, newval, ret;
load_kernel_asce();
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
pagefault_disable();
switch (op) {
@@ -60,17 +55,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
}
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index edb5161df7e2..6810bd757312 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -81,7 +81,7 @@ struct ipl_parameter_block {
struct ipl_block_fcp fcp;
struct ipl_block_ccw ccw;
} ipl_info;
-} __attribute__((packed,aligned(4096)));
+} __packed __aligned(PAGE_SIZE);
/*
* IPL validity flags
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 8a5b082797f8..a6870ea6ea8b 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -95,46 +95,46 @@ struct lowcore {
__u64 int_clock; /* 0x0310 */
__u64 mcck_clock; /* 0x0318 */
__u64 clock_comparator; /* 0x0320 */
+ __u64 boot_clock[2]; /* 0x0328 */
/* Current process. */
- __u64 current_task; /* 0x0328 */
- __u8 pad_0x318[0x320-0x318]; /* 0x0330 */
- __u64 kernel_stack; /* 0x0338 */
+ __u64 current_task; /* 0x0338 */
+ __u64 kernel_stack; /* 0x0340 */
/* Interrupt, panic and restart stack. */
- __u64 async_stack; /* 0x0340 */
- __u64 panic_stack; /* 0x0348 */
- __u64 restart_stack; /* 0x0350 */
+ __u64 async_stack; /* 0x0348 */
+ __u64 panic_stack; /* 0x0350 */
+ __u64 restart_stack; /* 0x0358 */
/* Restart function and parameter. */
- __u64 restart_fn; /* 0x0358 */
- __u64 restart_data; /* 0x0360 */
- __u64 restart_source; /* 0x0368 */
+ __u64 restart_fn; /* 0x0360 */
+ __u64 restart_data; /* 0x0368 */
+ __u64 restart_source; /* 0x0370 */
/* Address space pointer. */
- __u64 kernel_asce; /* 0x0370 */
- __u64 user_asce; /* 0x0378 */
+ __u64 kernel_asce; /* 0x0378 */
+ __u64 user_asce; /* 0x0380 */
/*
* The lpp and current_pid fields form a
* 64-bit value that is set as program
* parameter with the LPP instruction.
*/
- __u32 lpp; /* 0x0380 */
- __u32 current_pid; /* 0x0384 */
+ __u32 lpp; /* 0x0388 */
+ __u32 current_pid; /* 0x038c */
/* SMP info area */
- __u32 cpu_nr; /* 0x0388 */
- __u32 softirq_pending; /* 0x038c */
- __u64 percpu_offset; /* 0x0390 */
- __u64 vdso_per_cpu_data; /* 0x0398 */
- __u64 machine_flags; /* 0x03a0 */
- __u32 preempt_count; /* 0x03a8 */
- __u8 pad_0x03ac[0x03b0-0x03ac]; /* 0x03ac */
- __u64 gmap; /* 0x03b0 */
- __u32 spinlock_lockval; /* 0x03b8 */
- __u32 fpu_flags; /* 0x03bc */
- __u8 pad_0x03c0[0x0400-0x03c0]; /* 0x03c0 */
+ __u32 cpu_nr; /* 0x0390 */
+ __u32 softirq_pending; /* 0x0394 */
+ __u64 percpu_offset; /* 0x0398 */
+ __u64 vdso_per_cpu_data; /* 0x03a0 */
+ __u64 machine_flags; /* 0x03a8 */
+ __u32 preempt_count; /* 0x03b0 */
+ __u8 pad_0x03b4[0x03b8-0x03b4]; /* 0x03b4 */
+ __u64 gmap; /* 0x03b8 */
+ __u32 spinlock_lockval; /* 0x03c0 */
+ __u32 fpu_flags; /* 0x03c4 */
+ __u8 pad_0x03c8[0x0400-0x03c8]; /* 0x03c8 */
/* Per cpu primary space access list */
__u32 paste[16]; /* 0x0400 */
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h
deleted file mode 100644
index b79813d9cf68..000000000000
--- a/arch/s390/include/asm/mman.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * S390 version
- *
- * Derived from "include/asm-i386/mman.h"
- */
-#ifndef __S390_MMAN_H__
-#define __S390_MMAN_H__
-
-#include <uapi/asm/mman.h>
-
-#endif /* __S390_MMAN_H__ */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 4541ac44b35f..72e9ca83a668 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -12,6 +12,7 @@
#include <linux/mm_types.h>
#include <asm/tlbflush.h>
#include <asm/ctl_reg.h>
+#include <asm-generic/mm_hooks.h>
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
@@ -33,7 +34,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.use_cmma = 0;
#endif
switch (mm->context.asce_limit) {
- case 1UL << 42:
+ case _REGION2_SIZE:
/*
* forked 3-level task, fall through to set new asce with new
* mm->pgd
@@ -44,12 +45,17 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
break;
- case 1UL << 53:
+ case -PAGE_SIZE:
+ /* forked 5-level task, set new asce with new_mm->pgd */
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
+ break;
+ case _REGION1_SIZE:
/* forked 4-level task, set new asce with new mm->pgd */
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
break;
- case 1UL << 31:
+ case _REGION3_SIZE:
/* forked 2-level compat task, set new asce with new mm->pgd */
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
@@ -133,30 +139,4 @@ static inline void activate_mm(struct mm_struct *prev,
set_user_asce(next);
}
-static inline void arch_dup_mmap(struct mm_struct *oldmm,
- struct mm_struct *mm)
-{
-}
-
-static inline void arch_exit_mmap(struct mm_struct *mm)
-{
-}
-
-static inline void arch_unmap(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
-}
-
-static inline void arch_bprm_mm_init(struct mm_struct *mm,
- struct vm_area_struct *vma)
-{
-}
-
-static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
- bool write, bool execute, bool foreign)
-{
- /* by default, allow everything */
- return true;
-}
#endif /* __S390_MMU_CONTEXT_H */
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index 9d91cf3e427f..c8e211b9a002 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -72,7 +72,7 @@ union mci {
u64 ar : 1; /* 33 access register validity */
u64 da : 1; /* 34 delayed access exception */
u64 : 1; /* 35 */
- u64 gs : 1; /* 36 guarded storage registers */
+ u64 gs : 1; /* 36 guarded storage registers validity */
u64 : 5; /* 37-41 */
u64 pr : 1; /* 42 tod programmable register validity */
u64 fc : 1; /* 43 fp control register validity */
diff --git a/arch/s390/include/asm/page-states.h b/arch/s390/include/asm/page-states.h
index 42267a2fe29e..ca21b28a7b17 100644
--- a/arch/s390/include/asm/page-states.h
+++ b/arch/s390/include/asm/page-states.h
@@ -13,6 +13,7 @@
#define ESSA_SET_POT_VOLATILE 4
#define ESSA_SET_STABLE_RESIDENT 5
#define ESSA_SET_STABLE_IF_RESIDENT 6
+#define ESSA_SET_STABLE_NODAT 7
#define ESSA_MAX ESSA_SET_STABLE_IF_RESIDENT
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 624deaa44230..5d5c2b3500a4 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -10,10 +10,14 @@
#include <linux/const.h>
#include <asm/types.h>
+#define _PAGE_SHIFT 12
+#define _PAGE_SIZE (_AC(1, UL) << _PAGE_SHIFT)
+#define _PAGE_MASK (~(_PAGE_SIZE - 1))
+
/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_SHIFT _PAGE_SHIFT
+#define PAGE_SIZE _PAGE_SIZE
+#define PAGE_MASK _PAGE_MASK
#define PAGE_DEFAULT_ACC 0
#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
@@ -133,6 +137,9 @@ static inline int page_reset_referenced(unsigned long addr)
struct page;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);
+void arch_set_page_dat(struct page *page, int order);
+void arch_set_page_nodat(struct page *page, int order);
+int arch_test_page_nodat(struct page *page);
void arch_set_page_states(int make_stable);
static inline int devmem_is_allowed(unsigned long pfn)
@@ -145,16 +152,26 @@ static inline int devmem_is_allowed(unsigned long pfn)
#endif /* !__ASSEMBLY__ */
-#define __PAGE_OFFSET 0x0UL
-#define PAGE_OFFSET 0x0UL
-#define __pa(x) (unsigned long)(x)
-#define __va(x) (void *)(unsigned long)(x)
-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define __PAGE_OFFSET 0x0UL
+#define PAGE_OFFSET 0x0UL
+
+#define __pa(x) ((unsigned long)(x))
+#define __va(x) ((void *)(unsigned long)(x))
+
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+
+#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
+#define phys_to_pfn(kaddr) ((kaddr) >> PAGE_SHIFT)
+#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
+
+#define phys_to_page(kaddr) pfn_to_page(phys_to_pfn(kaddr))
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index bb0ff1bb0c4a..a0d9167519b1 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -15,6 +15,8 @@
#include <linux/gfp.h>
#include <linux/mm.h>
+#define CRST_ALLOC_ORDER 2
+
unsigned long *crst_table_alloc(struct mm_struct *);
void crst_table_free(struct mm_struct *, unsigned long *);
@@ -42,16 +44,16 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
{
- clear_table(crst, entry, sizeof(unsigned long)*2048);
+ clear_table(crst, entry, _CRST_TABLE_SIZE);
}
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
{
- if (mm->context.asce_limit <= (1UL << 31))
+ if (mm->context.asce_limit <= _REGION3_SIZE)
return _SEGMENT_ENTRY_EMPTY;
- if (mm->context.asce_limit <= (1UL << 42))
+ if (mm->context.asce_limit <= _REGION2_SIZE)
return _REGION3_ENTRY_EMPTY;
- if (mm->context.asce_limit <= (1UL << 53))
+ if (mm->context.asce_limit <= _REGION1_SIZE)
return _REGION2_ENTRY_EMPTY;
return _REGION1_ENTRY_EMPTY;
}
@@ -119,7 +121,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
if (!table)
return NULL;
- if (mm->context.asce_limit == (1UL << 31)) {
+ if (mm->context.asce_limit == _REGION3_SIZE) {
/* Forking a compat process with 2 page table levels */
if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
crst_table_free(mm, table);
@@ -131,7 +133,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- if (mm->context.asce_limit == (1UL << 31))
+ if (mm->context.asce_limit == _REGION3_SIZE)
pgtable_pmd_page_dtor(virt_to_page(pgd));
crst_table_free(mm, (unsigned long *) pgd);
}
@@ -158,4 +160,8 @@ static inline void pmd_populate(struct mm_struct *mm,
extern void rcu_table_freelist_finish(void);
+void vmem_map_init(void);
+void *vmem_crst_alloc(unsigned long val);
+pte_t *vmem_pte_alloc(void);
+
#endif /* _S390_PGALLOC_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 57057fb1cc07..dce708e061ea 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -11,19 +11,6 @@
#ifndef _ASM_S390_PGTABLE_H
#define _ASM_S390_PGTABLE_H
-/*
- * The Linux memory management assumes a three-level page table setup.
- * For s390 64 bit we use up to four of the five levels the hardware
- * provides (region first tables are not used).
- *
- * The "pgd_xxx()" functions are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
- *
- * This file contains the functions and defines necessary to modify and use
- * the S390 page table tree.
- */
-#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/page-flags.h>
@@ -34,9 +21,6 @@
extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
-extern void vmem_map_init(void);
-pmd_t *vmem_pmd_alloc(void);
-pte_t *vmem_pte_alloc(void);
enum {
PG_DIRECT_MAP_4K = 0,
@@ -77,38 +61,6 @@ extern unsigned long zero_page_mask;
#define __HAVE_COLOR_ZERO_PAGE
/* TODO: s390 cannot support io_remap_pfn_range... */
-#endif /* !__ASSEMBLY__ */
-
-/*
- * PMD_SHIFT determines the size of the area a second-level page
- * table can map
- * PGDIR_SHIFT determines what a third-level page table entry can map
- */
-#define PMD_SHIFT 20
-#define PUD_SHIFT 31
-#define P4D_SHIFT 42
-#define PGDIR_SHIFT 53
-
-#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-#define PUD_SIZE (1UL << PUD_SHIFT)
-#define PUD_MASK (~(PUD_SIZE-1))
-#define P4D_SIZE (1UL << P4D_SHIFT)
-#define P4D_MASK (~(P4D_SIZE-1))
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE-1))
-
-/*
- * entries per page directory level: the S390 is two-level, so
- * we don't really have any PMD directory physically.
- * for S390 segment-table entries are combined to one PGD
- * that leads to 1024 pte per pgd
- */
-#define PTRS_PER_PTE 256
-#define PTRS_PER_PMD 2048
-#define PTRS_PER_PUD 2048
-#define PTRS_PER_P4D 2048
-#define PTRS_PER_PGD 2048
#define FIRST_USER_ADDRESS 0UL
@@ -123,7 +75,6 @@ extern unsigned long zero_page_mask;
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
-#ifndef __ASSEMBLY__
/*
* The vmalloc and module area will always be on the topmost area of the
* kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
@@ -269,7 +220,7 @@ static inline int is_module_addr(void *addr)
*/
/* Bits in the segment/region table address-space-control-element */
-#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
+#define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
@@ -320,9 +271,9 @@ static inline int is_module_addr(void *addr)
#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
-#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
-#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
-#define _SEGMENT_ENTRY_NOEXEC 0x100 /* region no-execute bit */
+#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
+#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
+#define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
#define _SEGMENT_ENTRY (0)
@@ -340,6 +291,54 @@ static inline int is_module_addr(void *addr)
#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
#endif
+#define _CRST_ENTRIES 2048 /* number of region/segment table entries */
+#define _PAGE_ENTRIES 256 /* number of page table entries */
+
+#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
+#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
+
+#define _REGION1_SHIFT 53
+#define _REGION2_SHIFT 42
+#define _REGION3_SHIFT 31
+#define _SEGMENT_SHIFT 20
+
+#define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
+#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
+#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
+#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
+#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
+
+#define _REGION1_SIZE (1UL << _REGION1_SHIFT)
+#define _REGION2_SIZE (1UL << _REGION2_SHIFT)
+#define _REGION3_SIZE (1UL << _REGION3_SHIFT)
+#define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
+
+#define _REGION1_MASK (~(_REGION1_SIZE - 1))
+#define _REGION2_MASK (~(_REGION2_SIZE - 1))
+#define _REGION3_MASK (~(_REGION3_SIZE - 1))
+#define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
+
+#define PMD_SHIFT _SEGMENT_SHIFT
+#define PUD_SHIFT _REGION3_SHIFT
+#define P4D_SHIFT _REGION2_SHIFT
+#define PGDIR_SHIFT _REGION1_SHIFT
+
+#define PMD_SIZE _SEGMENT_SIZE
+#define PUD_SIZE _REGION3_SIZE
+#define P4D_SIZE _REGION2_SIZE
+#define PGDIR_SIZE _REGION1_SIZE
+
+#define PMD_MASK _SEGMENT_MASK
+#define PUD_MASK _REGION3_MASK
+#define P4D_MASK _REGION2_MASK
+#define PGDIR_MASK _REGION1_MASK
+
+#define PTRS_PER_PTE _PAGE_ENTRIES
+#define PTRS_PER_PMD _CRST_ENTRIES
+#define PTRS_PER_PUD _CRST_ENTRIES
+#define PTRS_PER_P4D _CRST_ENTRIES
+#define PTRS_PER_PGD _CRST_ENTRIES
+
/*
* Segment table and region3 table entry encoding
* (R = read-only, I = invalid, y = young bit):
@@ -376,6 +375,7 @@ static inline int is_module_addr(void *addr)
/* Guest Page State used for virtualization */
#define _PGSTE_GPS_ZERO 0x0000000080000000UL
+#define _PGSTE_GPS_NODAT 0x0000000040000000UL
#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
@@ -505,7 +505,7 @@ static inline int mm_alloc_pgste(struct mm_struct *mm)
* In the case that a guest uses storage keys
* faults should no longer be backed by zero pages
*/
-#define mm_forbids_zeropage mm_use_skey
+#define mm_forbids_zeropage mm_has_pgste
static inline int mm_use_skey(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
@@ -952,15 +952,30 @@ static inline pte_t pte_mkhuge(pte_t pte)
#define IPTE_GLOBAL 0
#define IPTE_LOCAL 1
-static inline void __ptep_ipte(unsigned long address, pte_t *ptep, int local)
+#define IPTE_NODAT 0x400
+#define IPTE_GUEST_ASCE 0x800
+
+static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
+ unsigned long opt, unsigned long asce,
+ int local)
{
unsigned long pto = (unsigned long) ptep;
- /* Invalidation + TLB flush for the pte */
+ if (__builtin_constant_p(opt) && opt == 0) {
+ /* Invalidation + TLB flush for the pte */
+ asm volatile(
+ " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
+ : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
+ [m4] "i" (local));
+ return;
+ }
+
+ /* Invalidate ptes with options + TLB flush of the ptes */
+ opt = opt | (asce & _ASCE_ORIGIN);
asm volatile(
- " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
- : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
- [m4] "i" (local));
+ " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
+ : [r2] "+a" (address), [r3] "+a" (opt)
+ : [r1] "a" (pto), [m4] "i" (local) : "memory");
}
static inline void __ptep_ipte_range(unsigned long address, int nr,
@@ -1341,31 +1356,61 @@ static inline void __pmdp_csp(pmd_t *pmdp)
#define IDTE_GLOBAL 0
#define IDTE_LOCAL 1
-static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp, int local)
+#define IDTE_PTOA 0x0800
+#define IDTE_NODAT 0x1000
+#define IDTE_GUEST_ASCE 0x2000
+
+static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
+ unsigned long opt, unsigned long asce,
+ int local)
{
unsigned long sto;
- sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
- asm volatile(
- " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
- : "+m" (*pmdp)
- : [r1] "a" (sto), [r2] "a" ((address & HPAGE_MASK)),
- [m4] "i" (local)
- : "cc" );
+ sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
+ if (__builtin_constant_p(opt) && opt == 0) {
+ /* flush without guest asce */
+ asm volatile(
+ " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
+ : "+m" (*pmdp)
+ : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
+ [m4] "i" (local)
+ : "cc" );
+ } else {
+ /* flush with guest asce */
+ asm volatile(
+ " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
+ : "+m" (*pmdp)
+ : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
+ [r3] "a" (asce), [m4] "i" (local)
+ : "cc" );
+ }
}
-static inline void __pudp_idte(unsigned long address, pud_t *pudp, int local)
+static inline void __pudp_idte(unsigned long addr, pud_t *pudp,
+ unsigned long opt, unsigned long asce,
+ int local)
{
unsigned long r3o;
- r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t);
+ r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
r3o |= _ASCE_TYPE_REGION3;
- asm volatile(
- " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
- : "+m" (*pudp)
- : [r1] "a" (r3o), [r2] "a" ((address & PUD_MASK)),
- [m4] "i" (local)
- : "cc");
+ if (__builtin_constant_p(opt) && opt == 0) {
+ /* flush without guest asce */
+ asm volatile(
+ " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
+ : "+m" (*pudp)
+ : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
+ [m4] "i" (local)
+ : "cc");
+ } else {
+ /* flush with guest asce */
+ asm volatile(
+ " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
+ : "+m" (*pudp)
+ : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
+ [r3] "a" (asce), [m4] "i" (local)
+ : "cc" );
+ }
}
pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
@@ -1548,8 +1593,6 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#endif /* !__ASSEMBLY__ */
-
#define kern_addr_valid(addr) (1)
extern int vmem_add_mapping(unsigned long start, unsigned long size);
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 998b61cd0e56..eaee69e7c42a 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -80,7 +80,7 @@ struct qdr {
u32 qkey : 4;
u32 : 28;
struct qdesfmt0 qdf0[126];
-} __attribute__ ((packed, aligned(4096)));
+} __packed __aligned(PAGE_SIZE);
#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
#define QIB_RFLAGS_ENABLE_QEBSM 0x80
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index cd78155b1829..490e035b3716 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -29,8 +29,10 @@
#define MACHINE_FLAG_TE _BITUL(11)
#define MACHINE_FLAG_TLB_LC _BITUL(12)
#define MACHINE_FLAG_VX _BITUL(13)
-#define MACHINE_FLAG_NX _BITUL(14)
-#define MACHINE_FLAG_GS _BITUL(15)
+#define MACHINE_FLAG_TLB_GUEST _BITUL(14)
+#define MACHINE_FLAG_NX _BITUL(15)
+#define MACHINE_FLAG_GS _BITUL(16)
+#define MACHINE_FLAG_SCC _BITUL(17)
#define LPP_MAGIC _BITUL(31)
#define LPP_PFAULT_PID_MASK _AC(0xffffffff, UL)
@@ -68,8 +70,10 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
+#define MACHINE_HAS_TLB_GUEST (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_GUEST)
#define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
#define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
+#define MACHINE_HAS_SCC (S390_lowcore.machine_flags & MACHINE_FLAG_SCC)
/*
* Console mode. Override with conmode=
@@ -104,9 +108,16 @@ extern void pfault_fini(void);
#define pfault_fini() do { } while (0)
#endif /* CONFIG_PFAULT */
+#ifdef CONFIG_VMCP
+void vmcp_cma_reserve(void);
+#else
+static inline void vmcp_cma_reserve(void) { }
+#endif
+
void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault);
-extern void cmma_init(void);
+void cmma_init(void);
+void cmma_init_nodat(void);
extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index f7838ecd83c6..8182b521c42f 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -92,17 +92,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
typecheck(int, lp->lock);
asm volatile(
- "st %1,%0\n"
- : "+Q" (lp->lock)
- : "d" (0)
- : "cc", "memory");
-}
-
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- while (arch_spin_is_locked(lock))
- arch_spin_relax(lock);
- smp_acquire__after_ctrl_dep();
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ " .long 0xb2fa0070\n" /* NIAI 7 */
+#endif
+ " st %1,%0\n"
+ : "=Q" (lp->lock) : "d" (0) : "cc", "memory");
}
/*
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 118535123f34..93f2eb3f277c 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -15,6 +15,8 @@
/* The value of the TOD clock for 1.1.1970. */
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
+extern u64 clock_comparator_max;
+
/* Inline functions for clock register access. */
static inline int set_tod_clock(__u64 time)
{
@@ -126,7 +128,7 @@ static inline unsigned long long local_tick_disable(void)
unsigned long long old;
old = S390_lowcore.clock_comparator;
- S390_lowcore.clock_comparator = -1ULL;
+ S390_lowcore.clock_comparator = clock_comparator_max;
set_clock_comparator(S390_lowcore.clock_comparator);
return old;
}
@@ -174,24 +176,24 @@ static inline cycles_t get_cycles(void)
return (cycles_t) get_tod_clock() >> 2;
}
-int get_phys_clock(unsigned long long *clock);
+int get_phys_clock(unsigned long *clock);
void init_cpu_timer(void);
unsigned long long monotonic_clock(void);
-extern u64 sched_clock_base_cc;
+extern unsigned char tod_clock_base[16] __aligned(8);
/**
* get_clock_monotonic - returns current time in clock rate units
*
* The caller must ensure that preemption is disabled.
- * The clock and sched_clock_base get changed via stop_machine.
+ * The clock and tod_clock_base get changed via stop_machine.
* Therefore preemption must be disabled when calling this
* function, otherwise the returned value is not guaranteed to
* be monotonic.
*/
static inline unsigned long long get_tod_clock_monotonic(void)
{
- return get_tod_clock() - sched_clock_base_cc;
+ return get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
}
/**
@@ -218,4 +220,32 @@ static inline unsigned long long tod_to_ns(unsigned long long todval)
return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9);
}
+/**
+ * tod_after - compare two 64 bit TOD values
+ * @a: first 64 bit TOD timestamp
+ * @b: second 64 bit TOD timestamp
+ *
+ * Returns: true if a is later than b
+ */
+static inline int tod_after(unsigned long long a, unsigned long long b)
+{
+ if (MACHINE_HAS_SCC)
+ return (long long) a > (long long) b;
+ return a > b;
+}
+
+/**
+ * tod_after_eq - compare two 64 bit TOD values
+ * @a: first 64 bit TOD timestamp
+ * @b: second 64 bit TOD timestamp
+ *
+ * Returns: true if a is later than b
+ */
+static inline int tod_after_eq(unsigned long long a, unsigned long long b)
+{
+ if (MACHINE_HAS_SCC)
+ return (long long) a >= (long long) b;
+ return a >= b;
+}
+
#endif
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 7317b3108a88..3a14b864b2e3 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -47,10 +47,9 @@ struct mmu_table_batch {
extern void tlb_table_flush(struct mmu_gather *tlb);
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-static inline void tlb_gather_mmu(struct mmu_gather *tlb,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
+static inline void
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->start = start;
@@ -76,9 +75,15 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
tlb_flush_mmu_free(tlb);
}
-static inline void tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end)
+static inline void
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force)
{
+ if (force) {
+ tlb->start = start;
+ tlb->end = end;
+ }
+
tlb_flush_mmu(tlb);
}
@@ -130,7 +135,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
- if (tlb->mm->context.asce_limit <= (1UL << 31))
+ if (tlb->mm->context.asce_limit <= _REGION3_SIZE)
return;
pgtable_pmd_page_dtor(virt_to_page(pmd));
tlb_remove_table(tlb, pmd);
@@ -146,7 +151,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long address)
{
- if (tlb->mm->context.asce_limit <= (1UL << 53))
+ if (tlb->mm->context.asce_limit <= _REGION1_SIZE)
return;
tlb_remove_table(tlb, p4d);
}
@@ -161,7 +166,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
- if (tlb->mm->context.asce_limit <= (1UL << 42))
+ if (tlb->mm->context.asce_limit <= _REGION2_SIZE)
return;
tlb_remove_table(tlb, pud);
}
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 39846100682a..4d759f8f4bc7 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -20,10 +20,15 @@ static inline void __tlb_flush_local(void)
*/
static inline void __tlb_flush_idte(unsigned long asce)
{
+ unsigned long opt;
+
+ opt = IDTE_PTOA;
+ if (MACHINE_HAS_TLB_GUEST)
+ opt |= IDTE_GUEST_ASCE;
/* Global TLB flush for the mm */
asm volatile(
" .insn rrf,0xb98e0000,0,%0,%1,0"
- : : "a" (2048), "a" (asce) : "cc");
+ : : "a" (opt), "a" (asce) : "cc");
}
#ifdef CONFIG_SMP
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index fa1bfce10370..5222da162b69 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -77,12 +77,6 @@ static inline const struct cpumask *cpumask_of_node(int node)
return &node_to_cpumask_map[node];
}
-/*
- * Returns the number of the node containing node 'node'. This
- * architecture is flat, so it is a pretty simple function!
- */
-#define parent_node(node) (node)
-
#define pcibus_to_node(bus) __pcibus_to_node(bus)
#define node_distance(a, b) __node_distance(a, b)
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
deleted file mode 100644
index 6740f4f9781f..000000000000
--- a/arch/s390/include/asm/types.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * S390 version
- *
- * Derived from "include/asm-i386/types.h"
- */
-#ifndef _S390_TYPES_H
-#define _S390_TYPES_H
-
-#include <uapi/asm/types.h>
-
-#endif /* _S390_TYPES_H */
diff --git a/arch/s390/include/asm/unaligned.h b/arch/s390/include/asm/unaligned.h
deleted file mode 100644
index da9627afe5d8..000000000000
--- a/arch/s390/include/asm/unaligned.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef _ASM_S390_UNALIGNED_H
-#define _ASM_S390_UNALIGNED_H
-
-/*
- * The S390 can do unaligned accesses itself.
- */
-#include <linux/unaligned/access_ok.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-
-#endif /* _ASM_S390_UNALIGNED_H */
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index ca62066895e0..098f28778a13 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -9,4 +9,5 @@ generic-y += param.h
generic-y += poll.h
generic-y += resource.h
generic-y += sockios.h
+generic-y += swab.h
generic-y += termbits.h
diff --git a/arch/s390/include/uapi/asm/dasd.h b/arch/s390/include/uapi/asm/dasd.h
index 1340311dab77..ab5797cdc1b7 100644
--- a/arch/s390/include/uapi/asm/dasd.h
+++ b/arch/s390/include/uapi/asm/dasd.h
@@ -72,7 +72,10 @@ typedef struct dasd_information2_t {
* 0x02: use diag discipline (diag)
* 0x04: set the device initially online (internal use only)
* 0x08: enable ERP related logging
- * 0x20: give access to raw eckd data
+ * 0x10: allow I/O to fail on lost paths
+ * 0x20: allow I/O to fail when a lock was stolen
+ * 0x40: give access to raw eckd data
+ * 0x80: enable discard support
*/
#define DASD_FEATURE_DEFAULT 0x00
#define DASD_FEATURE_READONLY 0x01
@@ -82,6 +85,7 @@ typedef struct dasd_information2_t {
#define DASD_FEATURE_FAILFAST 0x10
#define DASD_FEATURE_FAILONSLCK 0x20
#define DASD_FEATURE_USERAW 0x40
+#define DASD_FEATURE_DISCARD 0x80
#define DASD_PARTN_BITS 2
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 52a63f4175cb..a56916c83565 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -108,4 +108,6 @@
#define SO_PEERGROUPS 59
+#define SO_ZEROCOPY 60
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/include/uapi/asm/swab.h b/arch/s390/include/uapi/asm/swab.h
deleted file mode 100644
index da3bfe5cc161..000000000000
--- a/arch/s390/include/uapi/asm/swab.h
+++ /dev/null
@@ -1,89 +0,0 @@
-#ifndef _S390_SWAB_H
-#define _S390_SWAB_H
-
-/*
- * S390 version
- * Copyright IBM Corp. 1999
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- */
-
-#include <linux/types.h>
-
-#ifndef __s390x__
-# define __SWAB_64_THRU_32__
-#endif
-
-#ifdef __s390x__
-static inline __u64 __arch_swab64p(const __u64 *x)
-{
- __u64 result;
-
- asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x));
- return result;
-}
-#define __arch_swab64p __arch_swab64p
-
-static inline __u64 __arch_swab64(__u64 x)
-{
- __u64 result;
-
- asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x));
- return result;
-}
-#define __arch_swab64 __arch_swab64
-
-static inline void __arch_swab64s(__u64 *x)
-{
- *x = __arch_swab64p(x);
-}
-#define __arch_swab64s __arch_swab64s
-#endif /* __s390x__ */
-
-static inline __u32 __arch_swab32p(const __u32 *x)
-{
- __u32 result;
-
- asm volatile(
-#ifndef __s390x__
- " icm %0,8,%O1+3(%R1)\n"
- " icm %0,4,%O1+2(%R1)\n"
- " icm %0,2,%O1+1(%R1)\n"
- " ic %0,%1"
- : "=&d" (result) : "Q" (*x) : "cc");
-#else /* __s390x__ */
- " lrv %0,%1"
- : "=d" (result) : "m" (*x));
-#endif /* __s390x__ */
- return result;
-}
-#define __arch_swab32p __arch_swab32p
-
-#ifdef __s390x__
-static inline __u32 __arch_swab32(__u32 x)
-{
- __u32 result;
-
- asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x));
- return result;
-}
-#define __arch_swab32 __arch_swab32
-#endif /* __s390x__ */
-
-static inline __u16 __arch_swab16p(const __u16 *x)
-{
- __u16 result;
-
- asm volatile(
-#ifndef __s390x__
- " icm %0,2,%O1+1(%R1)\n"
- " ic %0,%1\n"
- : "=&d" (result) : "Q" (*x) : "cc");
-#else /* __s390x__ */
- " lrvh %0,%1"
- : "=d" (result) : "m" (*x));
-#endif /* __s390x__ */
- return result;
-}
-#define __arch_swab16p __arch_swab16p
-
-#endif /* _S390_SWAB_H */
diff --git a/drivers/s390/char/vmcp.h b/arch/s390/include/uapi/asm/vmcp.h
index 1e29b0418382..4caf71714a55 100644
--- a/drivers/s390/char/vmcp.h
+++ b/arch/s390/include/uapi/asm/vmcp.h
@@ -12,19 +12,13 @@
* The idea of this driver is based on cpint from Neale Ferguson
*/
+#ifndef _UAPI_ASM_VMCP_H
+#define _UAPI_ASM_VMCP_H
+
#include <linux/ioctl.h>
-#include <linux/mutex.h>
-#define VMCP_GETCODE _IOR(0x10, 1, int)
-#define VMCP_SETBUF _IOW(0x10, 2, int)
-#define VMCP_GETSIZE _IOR(0x10, 3, int)
+#define VMCP_GETCODE _IOR(0x10, 1, int)
+#define VMCP_SETBUF _IOW(0x10, 2, int)
+#define VMCP_GETSIZE _IOR(0x10, 3, int)
-struct vmcp_session {
- unsigned int bufsize;
- char *response;
- int resp_size;
- int resp_code;
- /* As we use copy_from/to_user, which might *
- * sleep and cannot use a spinlock */
- struct mutex mutex;
-};
+#endif /* _UAPI_ASM_VMCP_H */
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index b65c414b6c0e..3d42f91c95fd 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -158,6 +158,7 @@ int main(void)
OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock);
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
+ OFFSET(__LC_BOOT_CLOCK, lowcore, boot_clock);
OFFSET(__LC_CURRENT, lowcore, current_task);
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index 9f0e4a2785f7..63bc6603e0ed 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -14,6 +14,7 @@
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
+#include <linux/mm.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/cpcmd.h>
@@ -28,9 +29,7 @@ static int diag8_noresponse(int cmdlen)
register unsigned long reg3 asm ("3") = cmdlen;
asm volatile(
- " sam31\n"
" diag %1,%0,0x8\n"
- " sam64\n"
: "+d" (reg3) : "d" (reg2) : "cc");
return reg3;
}
@@ -43,9 +42,7 @@ static int diag8_response(int cmdlen, char *response, int *rlen)
register unsigned long reg5 asm ("5") = *rlen;
asm volatile(
- " sam31\n"
" diag %2,%0,0x8\n"
- " sam64\n"
" brc 8,1f\n"
" agr %1,%4\n"
"1:\n"
@@ -57,7 +54,6 @@ static int diag8_response(int cmdlen, char *response, int *rlen)
/*
* __cpcmd has some restrictions over cpcmd
- * - the response buffer must reside below 2GB (if any)
* - __cpcmd is unlocked and therefore not SMP-safe
*/
int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
@@ -88,13 +84,12 @@ EXPORT_SYMBOL(__cpcmd);
int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
{
+ unsigned long flags;
char *lowbuf;
int len;
- unsigned long flags;
- if ((virt_to_phys(response) != (unsigned long) response) ||
- (((unsigned long)response + rlen) >> 31)) {
- lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
+ if (is_vmalloc_or_module_addr(response)) {
+ lowbuf = kmalloc(rlen, GFP_KERNEL);
if (!lowbuf) {
pr_warn("The cpcmd kernel function failed to allocate a response buffer\n");
return -ENOMEM;
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 86b3e74f569e..1d9e83c401fc 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -866,7 +866,8 @@ static inline void
debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
int exception)
{
- active->id.stck = get_tod_clock_fast() - sched_clock_base_cc;
+ active->id.stck = get_tod_clock_fast() -
+ *(unsigned long long *) &tod_clock_base[1];
active->id.fields.cpuid = smp_processor_id();
active->caller = __builtin_return_address(0);
active->id.fields.exception = exception;
@@ -1455,15 +1456,15 @@ int
debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
int area, debug_entry_t * entry, char *out_buf)
{
- unsigned long sec, usec;
+ unsigned long base, sec, usec;
char *except_str;
unsigned long caller;
int rc = 0;
unsigned int level;
level = entry->id.fields.level;
- sec = (entry->id.stck >> 12) + (sched_clock_base_cc >> 12);
- sec = sec - (TOD_UNIX_EPOCH >> 12);
+ base = (*(unsigned long *) &tod_clock_base[0]) >> 4;
+ sec = (entry->id.stck >> 12) + base - (TOD_UNIX_EPOCH >> 12);
usec = do_div(sec, USEC_PER_SEC);
if (entry->id.fields.exception)
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index dab78babfab6..2aa545dca4d5 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -76,7 +76,7 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
#ifdef CONFIG_CHECK_STACK
sp = __dump_trace(func, data, sp,
- S390_lowcore.panic_stack + frame_size - 4096,
+ S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
S390_lowcore.panic_stack + frame_size);
#endif
sp = __dump_trace(func, data, sp,
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 5d20182ee8ae..ca8cd80e8feb 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -53,8 +53,9 @@ static void __init reset_tod_clock(void)
if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
disabled_wait(0);
- sched_clock_base_cc = TOD_UNIX_EPOCH;
- S390_lowcore.last_update_clock = sched_clock_base_cc;
+ memset(tod_clock_base, 0, 16);
+ *(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH;
+ S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
}
#ifdef CONFIG_SHARED_KERNEL
@@ -165,8 +166,8 @@ static noinline __init void create_kernel_nss(void)
}
/* re-initialize cputime accounting. */
- sched_clock_base_cc = get_tod_clock();
- S390_lowcore.last_update_clock = sched_clock_base_cc;
+ get_tod_clock_ext(tod_clock_base);
+ S390_lowcore.last_update_clock = *(__u64 *) &tod_clock_base[1];
S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
S390_lowcore.user_timer = 0;
S390_lowcore.system_timer = 0;
@@ -387,6 +388,12 @@ static __init void detect_machine_facilities(void)
}
if (test_facility(133))
S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
+ if (test_facility(139) && (tod_clock_base[1] & 0x80)) {
+ /* Enabled signed clock comparator comparisons */
+ S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
+ clock_comparator_max = -1ULL >> 1;
+ __ctl_set_bit(0, 53);
+ }
}
static inline void save_vector_registers(void)
@@ -413,7 +420,7 @@ static int __init disable_vector_extension(char *str)
{
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
__ctl_clear_bit(0, 17);
- return 1;
+ return 0;
}
early_param("novx", disable_vector_extension);
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index eff5b31671d4..8ed753c72d9b 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -302,7 +302,8 @@ ENTRY(startup_kdump)
xc 0xe00(256),0xe00
xc 0xf00(256),0xf00
lctlg %c0,%c15,0x200(%r0) # initialize control registers
- stck __LC_LAST_UPDATE_CLOCK
+ stcke __LC_BOOT_CLOCK
+ mvc __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1
spt 6f-.LPG0(%r13)
mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
l %r15,.Lstack-.LPG0(%r13)
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 31c91f24e562..0d8f2a858ced 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -21,8 +21,8 @@ ENTRY(startup_continue)
xc __LC_LPP+1(7,0),__LC_LPP+1 # clear lpp and current_pid
mvi __LC_LPP,0x80 # and set LPP_MAGIC
.insn s,0xb2800000,__LC_LPP # load program parameter
-0: larl %r1,sched_clock_base_cc
- mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
+0: larl %r1,tod_clock_base
+ mvc 0(16,%r1),__LC_BOOT_CLOCK
larl %r13,.LPG1 # get base
lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 6dca93b29bed..a2fdff0e730b 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -105,7 +105,8 @@ void do_IRQ(struct pt_regs *regs, int irq)
old_regs = set_irq_regs(regs);
irq_enter();
- if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
+ if (tod_after_eq(S390_lowcore.int_clock,
+ S390_lowcore.clock_comparator))
/* Serve timer interrupts first. */
clock_comparator_work();
generic_handle_irq(irq);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 0c82f7903fc7..c1bf75ffb875 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -998,7 +998,7 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
psw_bits(regs.psw).ia = sfr->basic.ia;
psw_bits(regs.psw).dat = sfr->basic.T;
psw_bits(regs.psw).wait = sfr->basic.W;
- psw_bits(regs.psw).per = sfr->basic.P;
+ psw_bits(regs.psw).pstate = sfr->basic.P;
psw_bits(regs.psw).as = sfr->basic.AS;
/*
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index cfac28330b03..4bdc65636603 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -7,6 +7,7 @@
*/
#include <linux/linkage.h>
+#include <asm/page.h>
#include <asm/sigp.h>
/*
@@ -55,8 +56,8 @@ ENTRY(relocate_kernel)
.back_pgm:
lmg %r0,%r15,gprregs-.base(%r13)
.top:
- lghi %r7,4096 # load PAGE_SIZE in r7
- lghi %r9,4096 # load PAGE_SIZE in r9
+ lghi %r7,PAGE_SIZE # load PAGE_SIZE in r7
+ lghi %r9,PAGE_SIZE # load PAGE_SIZE in r9
lg %r5,0(%r2) # read another word for indirection page
aghi %r2,8 # increment pointer
tml %r5,0x1 # is it a destination page?
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 3d1d808ea8a9..164a1e16b53e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -305,7 +305,7 @@ static void __init setup_lowcore(void)
/*
* Setup lowcore for boot cpu
*/
- BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
+ BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
lc->restart_psw.mask = PSW_KERNEL_BITS;
lc->restart_psw.addr = (unsigned long) restart_int_handler;
@@ -323,7 +323,7 @@ static void __init setup_lowcore(void)
lc->io_new_psw.mask = PSW_KERNEL_BITS |
PSW_MASK_DAT | PSW_MASK_MCHECK;
lc->io_new_psw.addr = (unsigned long) io_int_handler;
- lc->clock_comparator = -1ULL;
+ lc->clock_comparator = clock_comparator_max;
lc->kernel_stack = ((unsigned long) &init_thread_union)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->async_stack = (unsigned long)
@@ -469,10 +469,10 @@ static void __init setup_memory_end(void)
vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
- if (tmp + vmalloc_size + MODULES_LEN <= (1UL << 42))
- vmax = 1UL << 42; /* 3-level kernel page table */
+ if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
+ vmax = _REGION2_SIZE; /* 3-level kernel page table */
else
- vmax = 1UL << 53; /* 4-level kernel page table */
+ vmax = _REGION1_SIZE; /* 4-level kernel page table */
/* module area is at the end of the kernel address space. */
MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN;
@@ -818,6 +818,9 @@ static int __init setup_hwcaps(void)
case 0x2965:
strcpy(elf_platform, "z13");
break;
+ case 0x3906:
+ strcpy(elf_platform, "z14");
+ break;
}
/*
@@ -922,6 +925,7 @@ void __init setup_arch(char **cmdline_p)
setup_memory_end();
setup_memory();
dma_contiguous_reserve(memory_end);
+ vmcp_cma_reserve();
check_initrd();
reserve_crashkernel();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1020a11a24e5..1cee6753d47a 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1181,6 +1181,7 @@ static int __init s390_smp_init(void)
rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
smp_cpu_online, smp_cpu_pre_down);
+ rc = rc <= 0 ? rc : 0;
out:
return rc;
}
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index 39e2f41b6cf0..c8ea715bfe10 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -98,10 +98,16 @@ int page_key_alloc(unsigned long pages)
*/
void page_key_read(unsigned long *pfn)
{
+ struct page *page;
unsigned long addr;
-
- addr = (unsigned long) page_address(pfn_to_page(*pfn));
- *(unsigned char *) pfn = (unsigned char) page_get_storage_key(addr);
+ unsigned char key;
+
+ page = pfn_to_page(*pfn);
+ addr = (unsigned long) page_address(page);
+ key = (unsigned char) page_get_storage_key(addr) & 0x7f;
+ if (arch_test_page_nodat(page))
+ key |= 0x80;
+ *(unsigned char *) pfn = key;
}
/*
@@ -126,8 +132,16 @@ void page_key_memorize(unsigned long *pfn)
*/
void page_key_write(void *address)
{
- page_set_storage_key((unsigned long) address,
- page_key_rp->data[page_key_rx], 0);
+ struct page *page;
+ unsigned char key;
+
+ key = page_key_rp->data[page_key_rx];
+ page_set_storage_key((unsigned long) address, key & 0x7f, 0);
+ page = virt_to_page(address);
+ if (key & 0x80)
+ arch_set_page_nodat(page, 0);
+ else
+ arch_set_page_dat(page, 0);
if (++page_key_rx >= PAGE_KEY_DATA_SIZE)
return;
page_key_rp = page_key_rp->next;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 192efdfac918..5cbd52169348 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -51,8 +51,15 @@
#include <asm/cio.h>
#include "entry.h"
-u64 sched_clock_base_cc = -1; /* Force to data section. */
-EXPORT_SYMBOL_GPL(sched_clock_base_cc);
+unsigned char tod_clock_base[16] __aligned(8) = {
+ /* Force to data section. */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+EXPORT_SYMBOL_GPL(tod_clock_base);
+
+u64 clock_comparator_max = -1ULL;
+EXPORT_SYMBOL_GPL(clock_comparator_max);
static DEFINE_PER_CPU(struct clock_event_device, comparators);
@@ -75,7 +82,7 @@ void __init time_early_init(void)
struct ptff_qui qui;
/* Initialize TOD steering parameters */
- tod_steering_end = sched_clock_base_cc;
+ tod_steering_end = *(unsigned long long *) &tod_clock_base[1];
vdso_data->ts_end = tod_steering_end;
if (!test_facility(28))
@@ -111,22 +118,27 @@ unsigned long long monotonic_clock(void)
}
EXPORT_SYMBOL(monotonic_clock);
-static void tod_to_timeval(__u64 todval, struct timespec64 *xt)
+static void ext_to_timespec64(unsigned char *clk, struct timespec64 *xt)
{
- unsigned long long sec;
+ unsigned long long high, low, rem, sec, nsec;
+
+ /* Split extendnd TOD clock to micro-seconds and sub-micro-seconds */
+ high = (*(unsigned long long *) clk) >> 4;
+ low = (*(unsigned long long *)&clk[7]) << 4;
+ /* Calculate seconds and nano-seconds */
+ sec = high;
+ rem = do_div(sec, 1000000);
+ nsec = (((low >> 32) + (rem << 32)) * 1000) >> 32;
- sec = todval >> 12;
- do_div(sec, 1000000);
xt->tv_sec = sec;
- todval -= (sec * 1000000) << 12;
- xt->tv_nsec = ((todval * 1000) >> 12);
+ xt->tv_nsec = nsec;
}
void clock_comparator_work(void)
{
struct clock_event_device *cd;
- S390_lowcore.clock_comparator = -1ULL;
+ S390_lowcore.clock_comparator = clock_comparator_max;
cd = this_cpu_ptr(&comparators);
cd->event_handler(cd);
}
@@ -148,7 +160,7 @@ void init_cpu_timer(void)
struct clock_event_device *cd;
int cpu;
- S390_lowcore.clock_comparator = -1ULL;
+ S390_lowcore.clock_comparator = clock_comparator_max;
set_clock_comparator(S390_lowcore.clock_comparator);
cpu = smp_processor_id();
@@ -179,7 +191,7 @@ static void clock_comparator_interrupt(struct ext_code ext_code,
unsigned long param64)
{
inc_irq_stat(IRQEXT_CLK);
- if (S390_lowcore.clock_comparator == -1ULL)
+ if (S390_lowcore.clock_comparator == clock_comparator_max)
set_clock_comparator(S390_lowcore.clock_comparator);
}
@@ -197,18 +209,28 @@ static void stp_reset(void);
void read_persistent_clock64(struct timespec64 *ts)
{
- __u64 clock;
+ unsigned char clk[STORE_CLOCK_EXT_SIZE];
+ __u64 delta;
- clock = get_tod_clock() - initial_leap_seconds;
- tod_to_timeval(clock - TOD_UNIX_EPOCH, ts);
+ delta = initial_leap_seconds + TOD_UNIX_EPOCH;
+ get_tod_clock_ext(clk);
+ *(__u64 *) &clk[1] -= delta;
+ if (*(__u64 *) &clk[1] > delta)
+ clk[0]--;
+ ext_to_timespec64(clk, ts);
}
void read_boot_clock64(struct timespec64 *ts)
{
- __u64 clock;
+ unsigned char clk[STORE_CLOCK_EXT_SIZE];
+ __u64 delta;
- clock = sched_clock_base_cc - initial_leap_seconds;
- tod_to_timeval(clock - TOD_UNIX_EPOCH, ts);
+ delta = initial_leap_seconds + TOD_UNIX_EPOCH;
+ memcpy(clk, tod_clock_base, 16);
+ *(__u64 *) &clk[1] -= delta;
+ if (*(__u64 *) &clk[1] > delta)
+ clk[0]--;
+ ext_to_timespec64(clk, ts);
}
static u64 read_tod_clock(struct clocksource *cs)
@@ -335,7 +357,7 @@ static unsigned long clock_sync_flags;
* source. If the clock mode is local it will return -EOPNOTSUPP and
* -EAGAIN if the clock is not in sync with the external reference.
*/
-int get_phys_clock(unsigned long long *clock)
+int get_phys_clock(unsigned long *clock)
{
atomic_t *sw_ptr;
unsigned int sw0, sw1;
@@ -406,7 +428,10 @@ static void clock_sync_global(unsigned long long delta)
struct ptff_qto qto;
/* Fixup the monotonic sched clock. */
- sched_clock_base_cc += delta;
+ *(unsigned long long *) &tod_clock_base[1] += delta;
+ if (*(unsigned long long *) &tod_clock_base[1] < delta)
+ /* Epoch overflow */
+ tod_clock_base[0]++;
/* Adjust TOD steering parameters. */
vdso_data->tb_update_count++;
now = get_tod_clock();
@@ -437,7 +462,7 @@ static void clock_sync_global(unsigned long long delta)
static void clock_sync_local(unsigned long long delta)
{
/* Add the delta to the clock comparator. */
- if (S390_lowcore.clock_comparator != -1ULL) {
+ if (S390_lowcore.clock_comparator != clock_comparator_max) {
S390_lowcore.clock_comparator += delta;
set_clock_comparator(S390_lowcore.clock_comparator);
}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index b89d19f6f2ab..eacda05b45d7 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -157,6 +157,8 @@ int vdso_alloc_per_cpu(struct lowcore *lowcore)
page_frame = get_zeroed_page(GFP_KERNEL);
if (!segment_table || !page_table || !page_frame)
goto out;
+ arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER);
+ arch_set_page_dat(virt_to_page(page_table), 0);
/* Initialize per-cpu vdso data page */
vd = (struct vdso_per_cpu_data *) page_frame;
diff --git a/arch/s390/kernel/vdso32/vdso32.lds.S b/arch/s390/kernel/vdso32/vdso32.lds.S
index 8f048c2d6d13..263a7f9eee1e 100644
--- a/arch/s390/kernel/vdso32/vdso32.lds.S
+++ b/arch/s390/kernel/vdso32/vdso32.lds.S
@@ -2,6 +2,8 @@
* This is the infamous ld script for the 32 bits vdso
* library
*/
+
+#include <asm/page.h>
#include <asm/vdso.h>
OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
@@ -91,7 +93,7 @@ SECTIONS
.debug_ranges 0 : { *(.debug_ranges) }
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
PROVIDE(_vdso_data = .);
/DISCARD/ : {
diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S
index f35455d497fe..9e3dbbcc1cfc 100644
--- a/arch/s390/kernel/vdso64/vdso64.lds.S
+++ b/arch/s390/kernel/vdso64/vdso64.lds.S
@@ -2,6 +2,8 @@
* This is the infamous ld script for the 64 bits vdso
* library
*/
+
+#include <asm/page.h>
#include <asm/vdso.h>
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
@@ -91,7 +93,7 @@ SECTIONS
.debug_ranges 0 : { *(.debug_ranges) }
.gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
PROVIDE(_vdso_data = .);
/DISCARD/ : {
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index ce865bd4f81d..e4d36094aceb 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -27,7 +27,7 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
unsigned long prefix = kvm_s390_get_prefix(vcpu);
start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
- end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
+ end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE;
vcpu->stat.diagnose_10++;
if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
@@ -51,9 +51,9 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
*/
gmap_discard(vcpu->arch.gmap, start, prefix);
if (start <= prefix)
- gmap_discard(vcpu->arch.gmap, 0, 4096);
- if (end > prefix + 4096)
- gmap_discard(vcpu->arch.gmap, 4096, 8192);
+ gmap_discard(vcpu->arch.gmap, 0, PAGE_SIZE);
+ if (end > prefix + PAGE_SIZE)
+ gmap_discard(vcpu->arch.gmap, PAGE_SIZE, 2 * PAGE_SIZE);
gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end);
}
return 0;
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 653cae5e1ee1..3cc77391a102 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -629,7 +629,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
iep = ctlreg0.iep && test_kvm_facility(vcpu->kvm, 130);
if (asce.r)
goto real_address;
- ptr = asce.origin * 4096;
+ ptr = asce.origin * PAGE_SIZE;
switch (asce.dt) {
case ASCE_TYPE_REGION1:
if (vaddr.rfx01 > asce.tl)
@@ -674,7 +674,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
return PGM_REGION_SECOND_TRANS;
if (edat1)
dat_protection |= rfte.p;
- ptr = rfte.rto * 4096 + vaddr.rsx * 8;
+ ptr = rfte.rto * PAGE_SIZE + vaddr.rsx * 8;
}
/* fallthrough */
case ASCE_TYPE_REGION2: {
@@ -692,7 +692,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
return PGM_REGION_THIRD_TRANS;
if (edat1)
dat_protection |= rste.p;
- ptr = rste.rto * 4096 + vaddr.rtx * 8;
+ ptr = rste.rto * PAGE_SIZE + vaddr.rtx * 8;
}
/* fallthrough */
case ASCE_TYPE_REGION3: {
@@ -720,7 +720,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
return PGM_SEGMENT_TRANSLATION;
if (edat1)
dat_protection |= rtte.fc0.p;
- ptr = rtte.fc0.sto * 4096 + vaddr.sx * 8;
+ ptr = rtte.fc0.sto * PAGE_SIZE + vaddr.sx * 8;
}
/* fallthrough */
case ASCE_TYPE_SEGMENT: {
@@ -743,7 +743,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
goto absolute_address;
}
dat_protection |= ste.fc0.p;
- ptr = ste.fc0.pto * 2048 + vaddr.px * 8;
+ ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
}
}
if (kvm_is_error_gpa(vcpu->kvm, ptr))
@@ -993,7 +993,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
parent = sg->parent;
vaddr.addr = saddr;
asce.val = sg->orig_asce;
- ptr = asce.origin * 4096;
+ ptr = asce.origin * PAGE_SIZE;
if (asce.r) {
*fake = 1;
ptr = 0;
@@ -1029,7 +1029,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union region1_table_entry rfte;
if (*fake) {
- ptr += (unsigned long) vaddr.rfx << 53;
+ ptr += vaddr.rfx * _REGION1_SIZE;
rfte.val = ptr;
goto shadow_r2t;
}
@@ -1044,7 +1044,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
return PGM_REGION_SECOND_TRANS;
if (sg->edat_level >= 1)
*dat_protection |= rfte.p;
- ptr = rfte.rto << 12UL;
+ ptr = rfte.rto * PAGE_SIZE;
shadow_r2t:
rc = gmap_shadow_r2t(sg, saddr, rfte.val, *fake);
if (rc)
@@ -1055,7 +1055,7 @@ shadow_r2t:
union region2_table_entry rste;
if (*fake) {
- ptr += (unsigned long) vaddr.rsx << 42;
+ ptr += vaddr.rsx * _REGION2_SIZE;
rste.val = ptr;
goto shadow_r3t;
}
@@ -1070,7 +1070,7 @@ shadow_r2t:
return PGM_REGION_THIRD_TRANS;
if (sg->edat_level >= 1)
*dat_protection |= rste.p;
- ptr = rste.rto << 12UL;
+ ptr = rste.rto * PAGE_SIZE;
shadow_r3t:
rste.p |= *dat_protection;
rc = gmap_shadow_r3t(sg, saddr, rste.val, *fake);
@@ -1082,7 +1082,7 @@ shadow_r3t:
union region3_table_entry rtte;
if (*fake) {
- ptr += (unsigned long) vaddr.rtx << 31;
+ ptr += vaddr.rtx * _REGION3_SIZE;
rtte.val = ptr;
goto shadow_sgt;
}
@@ -1098,7 +1098,7 @@ shadow_r3t:
if (rtte.fc && sg->edat_level >= 2) {
*dat_protection |= rtte.fc0.p;
*fake = 1;
- ptr = rtte.fc1.rfaa << 31UL;
+ ptr = rtte.fc1.rfaa * _REGION3_SIZE;
rtte.val = ptr;
goto shadow_sgt;
}
@@ -1106,7 +1106,7 @@ shadow_r3t:
return PGM_SEGMENT_TRANSLATION;
if (sg->edat_level >= 1)
*dat_protection |= rtte.fc0.p;
- ptr = rtte.fc0.sto << 12UL;
+ ptr = rtte.fc0.sto * PAGE_SIZE;
shadow_sgt:
rtte.fc0.p |= *dat_protection;
rc = gmap_shadow_sgt(sg, saddr, rtte.val, *fake);
@@ -1118,7 +1118,7 @@ shadow_sgt:
union segment_table_entry ste;
if (*fake) {
- ptr += (unsigned long) vaddr.sx << 20;
+ ptr += vaddr.sx * _SEGMENT_SIZE;
ste.val = ptr;
goto shadow_pgt;
}
@@ -1134,11 +1134,11 @@ shadow_sgt:
*dat_protection |= ste.fc0.p;
if (ste.fc && sg->edat_level >= 1) {
*fake = 1;
- ptr = ste.fc1.sfaa << 20UL;
+ ptr = ste.fc1.sfaa * _SEGMENT_SIZE;
ste.val = ptr;
goto shadow_pgt;
}
- ptr = ste.fc0.pto << 11UL;
+ ptr = ste.fc0.pto * (PAGE_SIZE / 2);
shadow_pgt:
ste.fc0.p |= *dat_protection;
rc = gmap_shadow_pgt(sg, saddr, ste.val, *fake);
@@ -1187,8 +1187,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
vaddr.addr = saddr;
if (fake) {
- /* offset in 1MB guest memory block */
- pte.val = pgt + ((unsigned long) vaddr.px << 12UL);
+ pte.val = pgt + vaddr.px * PAGE_SIZE;
goto shadow_page;
}
if (!rc)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3f2884e99ed4..af09d3437631 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1324,7 +1324,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
{
uint8_t *keys;
uint64_t hva;
- int i, r = 0;
+ int srcu_idx, i, r = 0;
if (args->flags != 0)
return -EINVAL;
@@ -1342,6 +1342,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
return -ENOMEM;
down_read(&current->mm->mmap_sem);
+ srcu_idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i);
if (kvm_is_error_hva(hva)) {
@@ -1353,6 +1354,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
if (r)
break;
}
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
up_read(&current->mm->mmap_sem);
if (!r) {
@@ -1370,7 +1372,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
{
uint8_t *keys;
uint64_t hva;
- int i, r = 0;
+ int srcu_idx, i, r = 0;
if (args->flags != 0)
return -EINVAL;
@@ -1396,6 +1398,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
goto out;
down_read(&current->mm->mmap_sem);
+ srcu_idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i);
if (kvm_is_error_hva(hva)) {
@@ -1413,6 +1416,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
if (r)
break;
}
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
up_read(&current->mm->mmap_sem);
out:
kvfree(keys);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 8a1dac793d6b..785ad028bde6 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -329,7 +329,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
start = kvm_s390_logical_to_effective(vcpu, start);
if (m3 & SSKE_MB) {
/* start already designates an absolute address */
- end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
+ end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
} else {
start = kvm_s390_real_to_abs(vcpu, start);
end = start + PAGE_SIZE;
@@ -893,10 +893,10 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
case 0x00000000:
/* only 4k frames specify a real address */
start = kvm_s390_real_to_abs(vcpu, start);
- end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
+ end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
break;
case 0x00001000:
- end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
+ end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
break;
case 0x00002000:
/* only support 2G frame size if EDAT2 is available and we are
@@ -904,7 +904,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
if (!test_kvm_facility(vcpu->kvm, 78) ||
psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
+ end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1);
break;
default:
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
index 926b5244263e..a2e5c24f47a7 100644
--- a/arch/s390/kvm/sthyi.c
+++ b/arch/s390/kvm/sthyi.c
@@ -394,7 +394,7 @@ static int sthyi(u64 vaddr)
"srl %[cc],28\n"
: [cc] "=d" (cc)
: [code] "d" (code), [addr] "a" (addr)
- : "memory", "cc");
+ : "3", "memory", "cc");
return cc;
}
@@ -425,7 +425,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
trace_kvm_s390_handle_sthyi(vcpu, code, addr);
- if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK)
+ if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
if (code & 0xffff) {
@@ -433,6 +433,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
goto out;
}
+ if (addr & ~PAGE_MASK)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
/*
* If the page has not yet been faulted in, we want to do that
* now and not after all the expensive calculations.
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 715c19c45d9a..ba8203e4d516 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -1069,7 +1069,7 @@ int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- BUILD_BUG_ON(sizeof(struct vsie_page) != 4096);
+ BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE);
scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
/* 512 byte alignment */
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 92e90e40b6fb..7f17555ad4d5 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -57,7 +57,7 @@ static void __udelay_enabled(unsigned long long usecs)
end = get_tod_clock_fast() + (usecs << 12);
do {
clock_saved = 0;
- if (end < S390_lowcore.clock_comparator) {
+ if (tod_after(S390_lowcore.clock_comparator, end)) {
clock_saved = local_tick_disable();
set_clock_comparator(end);
}
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index ffb15bd4c593..b12663d653d8 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -32,42 +32,63 @@ static int __init spin_retry_setup(char *str)
}
__setup("spin_retry=", spin_retry_setup);
+static inline int arch_load_niai4(int *lock)
+{
+ int owner;
+
+ asm volatile(
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ " .long 0xb2fa0040\n" /* NIAI 4 */
+#endif
+ " l %0,%1\n"
+ : "=d" (owner) : "Q" (*lock) : "memory");
+ return owner;
+}
+
+static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
+{
+ int expected = old;
+
+ asm volatile(
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+ " .long 0xb2fa0080\n" /* NIAI 8 */
+#endif
+ " cs %0,%3,%1\n"
+ : "=d" (old), "=Q" (*lock)
+ : "0" (old), "d" (new), "Q" (*lock)
+ : "cc", "memory");
+ return expected == old;
+}
+
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
int cpu = SPINLOCK_LOCKVAL;
- int owner, count, first_diag;
+ int owner, count;
+
+ /* Pass the virtual CPU to the lock holder if it is not running */
+ owner = arch_load_niai4(&lp->lock);
+ if (owner && arch_vcpu_is_preempted(~owner))
+ smp_yield_cpu(~owner);
- first_diag = 1;
+ count = spin_retry;
while (1) {
- owner = ACCESS_ONCE(lp->lock);
+ owner = arch_load_niai4(&lp->lock);
/* Try to get the lock if it is free. */
if (!owner) {
- if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
+ if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
return;
continue;
}
- /* First iteration: check if the lock owner is running. */
- if (first_diag && arch_vcpu_is_preempted(~owner)) {
- smp_yield_cpu(~owner);
- first_diag = 0;
+ if (count-- >= 0)
continue;
- }
- /* Loop for a while on the lock value. */
count = spin_retry;
- do {
- owner = ACCESS_ONCE(lp->lock);
- } while (owner && count-- > 0);
- if (!owner)
- continue;
/*
* For multiple layers of hypervisors, e.g. z/VM + LPAR
* yield the CPU unconditionally. For LPAR rely on the
* sense running status.
*/
- if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
- first_diag = 0;
- }
}
}
EXPORT_SYMBOL(arch_spin_lock_wait);
@@ -75,42 +96,36 @@ EXPORT_SYMBOL(arch_spin_lock_wait);
void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{
int cpu = SPINLOCK_LOCKVAL;
- int owner, count, first_diag;
+ int owner, count;
local_irq_restore(flags);
- first_diag = 1;
+
+ /* Pass the virtual CPU to the lock holder if it is not running */
+ owner = arch_load_niai4(&lp->lock);
+ if (owner && arch_vcpu_is_preempted(~owner))
+ smp_yield_cpu(~owner);
+
+ count = spin_retry;
while (1) {
- owner = ACCESS_ONCE(lp->lock);
+ owner = arch_load_niai4(&lp->lock);
/* Try to get the lock if it is free. */
if (!owner) {
local_irq_disable();
- if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
+ if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
return;
local_irq_restore(flags);
continue;
}
- /* Check if the lock owner is running. */
- if (first_diag && arch_vcpu_is_preempted(~owner)) {
- smp_yield_cpu(~owner);
- first_diag = 0;
+ if (count-- >= 0)
continue;
- }
- /* Loop for a while on the lock value. */
count = spin_retry;
- do {
- owner = ACCESS_ONCE(lp->lock);
- } while (owner && count-- > 0);
- if (!owner)
- continue;
/*
* For multiple layers of hypervisors, e.g. z/VM + LPAR
* yield the CPU unconditionally. For LPAR rely on the
* sense running status.
*/
- if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
- first_diag = 0;
- }
}
}
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index b3bd3f23b8e8..4ea9106417ee 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -15,8 +15,30 @@
#include <asm/mmu_context.h>
#include <asm/facility.h>
+#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES
static DEFINE_STATIC_KEY_FALSE(have_mvcos);
+static int __init uaccess_init(void)
+{
+ if (test_facility(27))
+ static_branch_enable(&have_mvcos);
+ return 0;
+}
+early_initcall(uaccess_init);
+
+static inline int copy_with_mvcos(void)
+{
+ if (static_branch_likely(&have_mvcos))
+ return 1;
+ return 0;
+}
+#else
+static inline int copy_with_mvcos(void)
+{
+ return 1;
+}
+#endif
+
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
unsigned long size)
{
@@ -84,7 +106,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (static_branch_likely(&have_mvcos))
+ if (copy_with_mvcos())
return copy_from_user_mvcos(to, from, n);
return copy_from_user_mvcp(to, from, n);
}
@@ -157,7 +179,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (static_branch_likely(&have_mvcos))
+ if (copy_with_mvcos())
return copy_to_user_mvcos(to, from, n);
return copy_to_user_mvcs(to, from, n);
}
@@ -220,7 +242,7 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
- if (static_branch_likely(&have_mvcos))
+ if (copy_with_mvcos())
return copy_in_user_mvcos(to, from, n);
return copy_in_user_mvc(to, from, n);
}
@@ -292,7 +314,7 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
unsigned long __clear_user(void __user *to, unsigned long size)
{
- if (static_branch_likely(&have_mvcos))
+ if (copy_with_mvcos())
return clear_user_mvcos(to, size);
return clear_user_xc(to, size);
}
@@ -349,11 +371,3 @@ long __strncpy_from_user(char *dst, const char __user *src, long size)
return done;
}
EXPORT_SYMBOL(__strncpy_from_user);
-
-static int __init uaccess_init(void)
-{
- if (test_facility(27))
- static_branch_enable(&have_mvcos);
- return 0;
-}
-early_initcall(uaccess_init);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 14f25798b001..bdabb013537b 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -135,7 +135,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
pr_alert("AS:%016lx ", asce);
switch (asce & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1:
- table = table + ((address >> 53) & 0x7ff);
+ table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
if (bad_address(table))
goto bad;
pr_cont("R1:%016lx ", *table);
@@ -144,7 +144,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */
case _ASCE_TYPE_REGION2:
- table = table + ((address >> 42) & 0x7ff);
+ table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
if (bad_address(table))
goto bad;
pr_cont("R2:%016lx ", *table);
@@ -153,7 +153,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */
case _ASCE_TYPE_REGION3:
- table = table + ((address >> 31) & 0x7ff);
+ table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
if (bad_address(table))
goto bad;
pr_cont("R3:%016lx ", *table);
@@ -162,7 +162,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */
case _ASCE_TYPE_SEGMENT:
- table = table + ((address >> 20) & 0x7ff);
+ table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
if (bad_address(table))
goto bad;
pr_cont("S:%016lx ", *table);
@@ -170,7 +170,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
goto out;
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
}
- table = table + ((address >> 12) & 0xff);
+ table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
if (bad_address(table))
goto bad;
pr_cont("P:%016lx ", *table);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 4fb3d3cdb370..9e1494e3d849 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -36,16 +36,16 @@ static struct gmap *gmap_alloc(unsigned long limit)
unsigned long *table;
unsigned long etype, atype;
- if (limit < (1UL << 31)) {
- limit = (1UL << 31) - 1;
+ if (limit < _REGION3_SIZE) {
+ limit = _REGION3_SIZE - 1;
atype = _ASCE_TYPE_SEGMENT;
etype = _SEGMENT_ENTRY_EMPTY;
- } else if (limit < (1UL << 42)) {
- limit = (1UL << 42) - 1;
+ } else if (limit < _REGION2_SIZE) {
+ limit = _REGION2_SIZE - 1;
atype = _ASCE_TYPE_REGION3;
etype = _REGION3_ENTRY_EMPTY;
- } else if (limit < (1UL << 53)) {
- limit = (1UL << 53) - 1;
+ } else if (limit < _REGION1_SIZE) {
+ limit = _REGION1_SIZE - 1;
atype = _ASCE_TYPE_REGION2;
etype = _REGION2_ENTRY_EMPTY;
} else {
@@ -65,7 +65,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
spin_lock_init(&gmap->guest_table_lock);
spin_lock_init(&gmap->shadow_lock);
atomic_set(&gmap->ref_count, 1);
- page = alloc_pages(GFP_KERNEL, 2);
+ page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
if (!page)
goto out_free;
page->index = 0;
@@ -186,7 +186,7 @@ static void gmap_free(struct gmap *gmap)
gmap_flush_tlb(gmap);
/* Free all segment & region tables. */
list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
- __free_pages(page, 2);
+ __free_pages(page, CRST_ALLOC_ORDER);
gmap_radix_tree_free(&gmap->guest_to_host);
gmap_radix_tree_free(&gmap->host_to_guest);
@@ -306,7 +306,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
unsigned long *new;
/* since we dont free the gmap table until gmap_free we can unlock */
- page = alloc_pages(GFP_KERNEL, 2);
+ page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
new = (unsigned long *) page_to_phys(page);
@@ -321,7 +321,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
}
spin_unlock(&gmap->guest_table_lock);
if (page)
- __free_pages(page, 2);
+ __free_pages(page, CRST_ALLOC_ORDER);
return 0;
}
@@ -546,30 +546,30 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
/* Create higher level tables in the gmap page table */
table = gmap->table;
if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
- table += (gaddr >> 53) & 0x7ff;
+ table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
- gaddr & 0xffe0000000000000UL))
+ gaddr & _REGION1_MASK))
return -ENOMEM;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
}
if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
- table += (gaddr >> 42) & 0x7ff;
+ table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
- gaddr & 0xfffffc0000000000UL))
+ gaddr & _REGION2_MASK))
return -ENOMEM;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
}
if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
- table += (gaddr >> 31) & 0x7ff;
+ table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
- gaddr & 0xffffffff80000000UL))
+ gaddr & _REGION3_MASK))
return -ENOMEM;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
}
- table += (gaddr >> 20) & 0x7ff;
+ table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
/* Walk the parent mm page table */
mm = gmap->mm;
pgd = pgd_offset(mm, vmaddr);
@@ -771,7 +771,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
table = gmap->table;
switch (gmap->asce & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1:
- table += (gaddr >> 53) & 0x7ff;
+ table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
if (level == 4)
break;
if (*table & _REGION_ENTRY_INVALID)
@@ -779,7 +779,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* Fallthrough */
case _ASCE_TYPE_REGION2:
- table += (gaddr >> 42) & 0x7ff;
+ table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
if (level == 3)
break;
if (*table & _REGION_ENTRY_INVALID)
@@ -787,7 +787,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* Fallthrough */
case _ASCE_TYPE_REGION3:
- table += (gaddr >> 31) & 0x7ff;
+ table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
if (level == 2)
break;
if (*table & _REGION_ENTRY_INVALID)
@@ -795,13 +795,13 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* Fallthrough */
case _ASCE_TYPE_SEGMENT:
- table += (gaddr >> 20) & 0x7ff;
+ table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
if (level == 1)
break;
if (*table & _REGION_ENTRY_INVALID)
return NULL;
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
- table += (gaddr >> 12) & 0xff;
+ table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
}
return table;
}
@@ -1126,7 +1126,7 @@ static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
if (!table || *table & _PAGE_INVALID)
return;
- gmap_call_notifier(sg, raddr, raddr + (1UL << 12) - 1);
+ gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
}
@@ -1144,7 +1144,7 @@ static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
int i;
BUG_ON(!gmap_is_shadow(sg));
- for (i = 0; i < 256; i++, raddr += 1UL << 12)
+ for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
pgt[i] = _PAGE_INVALID;
}
@@ -1164,8 +1164,8 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
return;
- gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1);
- sto = (unsigned long) (ste - ((raddr >> 20) & 0x7ff));
+ gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
+ sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
*ste = _SEGMENT_ENTRY_EMPTY;
@@ -1193,7 +1193,7 @@ static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
BUG_ON(!gmap_is_shadow(sg));
asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
- for (i = 0; i < 2048; i++, raddr += 1UL << 20) {
+ for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
continue;
pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
@@ -1222,8 +1222,8 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
return;
- gmap_call_notifier(sg, raddr, raddr + (1UL << 31) - 1);
- r3o = (unsigned long) (r3e - ((raddr >> 31) & 0x7ff));
+ gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
+ r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
*r3e = _REGION3_ENTRY_EMPTY;
@@ -1231,7 +1231,7 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
/* Free segment table */
page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
list_del(&page->lru);
- __free_pages(page, 2);
+ __free_pages(page, CRST_ALLOC_ORDER);
}
/**
@@ -1251,7 +1251,7 @@ static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
BUG_ON(!gmap_is_shadow(sg));
asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
- for (i = 0; i < 2048; i++, raddr += 1UL << 31) {
+ for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
continue;
sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
@@ -1260,7 +1260,7 @@ static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
/* Free segment table */
page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
list_del(&page->lru);
- __free_pages(page, 2);
+ __free_pages(page, CRST_ALLOC_ORDER);
}
}
@@ -1280,8 +1280,8 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
return;
- gmap_call_notifier(sg, raddr, raddr + (1UL << 42) - 1);
- r2o = (unsigned long) (r2e - ((raddr >> 42) & 0x7ff));
+ gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
+ r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
*r2e = _REGION2_ENTRY_EMPTY;
@@ -1289,7 +1289,7 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
/* Free region 3 table */
page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
list_del(&page->lru);
- __free_pages(page, 2);
+ __free_pages(page, CRST_ALLOC_ORDER);
}
/**
@@ -1309,7 +1309,7 @@ static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
BUG_ON(!gmap_is_shadow(sg));
asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
- for (i = 0; i < 2048; i++, raddr += 1UL << 42) {
+ for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
continue;
r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
@@ -1318,7 +1318,7 @@ static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
/* Free region 3 table */
page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
list_del(&page->lru);
- __free_pages(page, 2);
+ __free_pages(page, CRST_ALLOC_ORDER);
}
}
@@ -1338,8 +1338,8 @@ static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
return;
- gmap_call_notifier(sg, raddr, raddr + (1UL << 53) - 1);
- r1o = (unsigned long) (r1e - ((raddr >> 53) & 0x7ff));
+ gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
+ r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
*r1e = _REGION1_ENTRY_EMPTY;
@@ -1347,7 +1347,7 @@ static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
/* Free region 2 table */
page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
list_del(&page->lru);
- __free_pages(page, 2);
+ __free_pages(page, CRST_ALLOC_ORDER);
}
/**
@@ -1367,7 +1367,7 @@ static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
BUG_ON(!gmap_is_shadow(sg));
asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
- for (i = 0; i < 2048; i++, raddr += 1UL << 53) {
+ for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
continue;
r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
@@ -1378,7 +1378,7 @@ static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
/* Free region 2 table */
page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
list_del(&page->lru);
- __free_pages(page, 2);
+ __free_pages(page, CRST_ALLOC_ORDER);
}
}
@@ -1535,7 +1535,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
/* protect after insertion, so it will get properly invalidated */
down_read(&parent->mm->mmap_sem);
rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
- ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096,
+ ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
PROT_READ, PGSTE_VSIE_BIT);
up_read(&parent->mm->mmap_sem);
spin_lock(&parent->shadow_lock);
@@ -1578,7 +1578,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
BUG_ON(!gmap_is_shadow(sg));
/* Allocate a shadow region second table */
- page = alloc_pages(GFP_KERNEL, 2);
+ page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
page->index = r2t & _REGION_ENTRY_ORIGIN;
@@ -1614,10 +1614,10 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
}
spin_unlock(&sg->guest_table_lock);
/* Make r2t read-only in parent gmap page table */
- raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1;
+ raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
origin = r2t & _REGION_ENTRY_ORIGIN;
- offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
- len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
+ offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
+ len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
spin_lock(&sg->guest_table_lock);
if (!rc) {
@@ -1634,7 +1634,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
return rc;
out_free:
spin_unlock(&sg->guest_table_lock);
- __free_pages(page, 2);
+ __free_pages(page, CRST_ALLOC_ORDER);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
@@ -1662,7 +1662,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
BUG_ON(!gmap_is_shadow(sg));
/* Allocate a shadow region second table */
- page = alloc_pages(GFP_KERNEL, 2);
+ page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
page->index = r3t & _REGION_ENTRY_ORIGIN;
@@ -1697,10 +1697,10 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
}
spin_unlock(&sg->guest_table_lock);
/* Make r3t read-only in parent gmap page table */
- raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2;
+ raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
origin = r3t & _REGION_ENTRY_ORIGIN;
- offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
- len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
+ offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
+ len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
spin_lock(&sg->guest_table_lock);
if (!rc) {
@@ -1717,7 +1717,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
return rc;
out_free:
spin_unlock(&sg->guest_table_lock);
- __free_pages(page, 2);
+ __free_pages(page, CRST_ALLOC_ORDER);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
@@ -1745,7 +1745,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
/* Allocate a shadow segment table */
- page = alloc_pages(GFP_KERNEL, 2);
+ page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
page->index = sgt & _REGION_ENTRY_ORIGIN;
@@ -1781,10 +1781,10 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
}
spin_unlock(&sg->guest_table_lock);
/* Make sgt read-only in parent gmap page table */
- raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3;
+ raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
origin = sgt & _REGION_ENTRY_ORIGIN;
- offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * 4096;
- len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
+ offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
+ len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
spin_lock(&sg->guest_table_lock);
if (!rc) {
@@ -1801,7 +1801,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
return rc;
out_free:
spin_unlock(&sg->guest_table_lock);
- __free_pages(page, 2);
+ __free_pages(page, CRST_ALLOC_ORDER);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
@@ -1902,7 +1902,7 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
}
spin_unlock(&sg->guest_table_lock);
/* Make pgt read-only in parent gmap page table (not the pgste) */
- raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT;
+ raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
spin_lock(&sg->guest_table_lock);
@@ -2021,7 +2021,7 @@ static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
}
/* Check for top level table */
start = sg->orig_asce & _ASCE_ORIGIN;
- end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
+ end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
gaddr < end) {
/* The complete shadow table has to go */
@@ -2032,7 +2032,7 @@ static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
return;
}
/* Remove the page table tree from on specific entry */
- head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> 12);
+ head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
gmap_for_each_rmap_safe(rmap, rnext, head) {
bits = rmap->raddr & _SHADOW_RMAP_MASK;
raddr = rmap->raddr ^ bits;
@@ -2076,7 +2076,7 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
struct gmap *gmap, *sg, *next;
offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
- offset = offset * (4096 / sizeof(pte_t));
+ offset = offset * (PAGE_SIZE / sizeof(pte_t));
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
spin_lock(&gmap->guest_table_lock);
@@ -2121,6 +2121,37 @@ static inline void thp_split_mm(struct mm_struct *mm)
}
/*
+ * Remove all empty zero pages from the mapping for lazy refaulting
+ * - This must be called after mm->context.has_pgste is set, to avoid
+ * future creation of zero pages
+ * - This must be called after THP was enabled
+ */
+static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
+ unsigned long end, struct mm_walk *walk)
+{
+ unsigned long addr;
+
+ for (addr = start; addr != end; addr += PAGE_SIZE) {
+ pte_t *ptep;
+ spinlock_t *ptl;
+
+ ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (is_zero_pfn(pte_pfn(*ptep)))
+ ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
+ pte_unmap_unlock(ptep, ptl);
+ }
+ return 0;
+}
+
+static inline void zap_zero_pages(struct mm_struct *mm)
+{
+ struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
+
+ walk.mm = mm;
+ walk_page_range(0, TASK_SIZE, &walk);
+}
+
+/*
* switch on pgstes for its userspace process (for kvm)
*/
int s390_enable_sie(void)
@@ -2137,6 +2168,7 @@ int s390_enable_sie(void)
mm->context.has_pgste = 1;
/* split thp mappings and disable thp for future mappings */
thp_split_mm(mm);
+ zap_zero_pages(mm);
up_write(&mm->mmap_sem);
return 0;
}
@@ -2149,13 +2181,6 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);
static int __s390_enable_skey(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- /*
- * Remove all zero page mappings,
- * after establishing a policy to forbid zero page mappings
- * following faults for that page will get fresh anonymous pages
- */
- if (is_zero_pfn(pte_pfn(*pte)))
- ptep_xchg_direct(walk->mm, addr, pte, __pte(_PAGE_INVALID));
/* Clear storage key */
ptep_zap_key(walk->mm, addr, pte);
return 0;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 8111694ce55a..3b567838b905 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -26,6 +26,7 @@
#include <linux/poison.h>
#include <linux/initrd.h>
#include <linux/export.h>
+#include <linux/cma.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <asm/processor.h>
@@ -84,7 +85,7 @@ void __init paging_init(void)
psw_t psw;
init_mm.pgd = swapper_pg_dir;
- if (VMALLOC_END > (1UL << 42)) {
+ if (VMALLOC_END > _REGION2_SIZE) {
asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION2_ENTRY_EMPTY;
} else {
@@ -93,8 +94,7 @@ void __init paging_init(void)
}
init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
S390_lowcore.kernel_asce = init_mm.context.asce;
- clear_table((unsigned long *) init_mm.pgd, pgd_type,
- sizeof(unsigned long)*2048);
+ crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
vmem_map_init();
/* enable virtual mapping in kernel mode */
@@ -137,6 +137,8 @@ void __init mem_init(void)
free_all_bootmem();
setup_zero_pages(); /* Setup zeroed pages. */
+ cmma_init_nodat();
+
mem_init_print_info(NULL);
}
@@ -166,6 +168,58 @@ unsigned long memory_block_size_bytes(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
+
+#ifdef CONFIG_CMA
+
+/* Prevent memory blocks which contain cma regions from going offline */
+
+struct s390_cma_mem_data {
+ unsigned long start;
+ unsigned long end;
+};
+
+static int s390_cma_check_range(struct cma *cma, void *data)
+{
+ struct s390_cma_mem_data *mem_data;
+ unsigned long start, end;
+
+ mem_data = data;
+ start = cma_get_base(cma);
+ end = start + cma_get_size(cma);
+ if (end < mem_data->start)
+ return 0;
+ if (start >= mem_data->end)
+ return 0;
+ return -EBUSY;
+}
+
+static int s390_cma_mem_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct s390_cma_mem_data mem_data;
+ struct memory_notify *arg;
+ int rc = 0;
+
+ arg = data;
+ mem_data.start = arg->start_pfn << PAGE_SHIFT;
+ mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
+ if (action == MEM_GOING_OFFLINE)
+ rc = cma_for_each_area(s390_cma_check_range, &mem_data);
+ return notifier_from_errno(rc);
+}
+
+static struct notifier_block s390_cma_mem_nb = {
+ .notifier_call = s390_cma_mem_notifier,
+};
+
+static int __init s390_cma_mem_init(void)
+{
+ return register_memory_notifier(&s390_cma_mem_nb);
+}
+device_initcall(s390_cma_mem_init);
+
+#endif /* CONFIG_CMA */
+
int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
unsigned long start_pfn = PFN_DOWN(start);
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 2e10d2b8ad35..5bea139517a2 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -119,7 +119,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
return addr;
check_asce_limit:
- if (addr + len > current->mm->context.asce_limit) {
+ if (addr + len > current->mm->context.asce_limit &&
+ addr + len <= TASK_SIZE) {
rc = crst_table_upgrade(mm, addr + len);
if (rc)
return (unsigned long) rc;
@@ -183,7 +184,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
}
check_asce_limit:
- if (addr + len > current->mm->context.asce_limit) {
+ if (addr + len > current->mm->context.asce_limit &&
+ addr + len <= TASK_SIZE) {
rc = crst_table_upgrade(mm, addr + len);
if (rc)
return (unsigned long) rc;
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index 69a7b01ae746..07fa7b8ae233 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -10,9 +10,10 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/mm.h>
+#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/init.h>
-
+#include <asm/facility.h>
#include <asm/page-states.h>
static int cmma_flag = 1;
@@ -36,14 +37,16 @@ __setup("cmma=", cmma);
static inline int cmma_test_essa(void)
{
register unsigned long tmp asm("0") = 0;
- register int rc asm("1") = -EOPNOTSUPP;
+ register int rc asm("1");
+ /* test ESSA_GET_STATE */
asm volatile(
- " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
+ " .insn rrf,0xb9ab0000,%1,%1,%2,0\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
- : "+&d" (rc), "+&d" (tmp));
+ : "=&d" (rc), "+&d" (tmp)
+ : "i" (ESSA_GET_STATE), "0" (-EOPNOTSUPP));
return rc;
}
@@ -51,11 +54,26 @@ void __init cmma_init(void)
{
if (!cmma_flag)
return;
- if (cmma_test_essa())
+ if (cmma_test_essa()) {
cmma_flag = 0;
+ return;
+ }
+ if (test_facility(147))
+ cmma_flag = 2;
}
-static inline void set_page_unstable(struct page *page, int order)
+static inline unsigned char get_page_state(struct page *page)
+{
+ unsigned char state;
+
+ asm volatile(" .insn rrf,0xb9ab0000,%0,%1,%2,0"
+ : "=&d" (state)
+ : "a" (page_to_phys(page)),
+ "i" (ESSA_GET_STATE));
+ return state & 0x3f;
+}
+
+static inline void set_page_unused(struct page *page, int order)
{
int i, rc;
@@ -66,14 +84,18 @@ static inline void set_page_unstable(struct page *page, int order)
"i" (ESSA_SET_UNUSED));
}
-void arch_free_page(struct page *page, int order)
+static inline void set_page_stable_dat(struct page *page, int order)
{
- if (!cmma_flag)
- return;
- set_page_unstable(page, order);
+ int i, rc;
+
+ for (i = 0; i < (1 << order); i++)
+ asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
+ : "=&d" (rc)
+ : "a" (page_to_phys(page + i)),
+ "i" (ESSA_SET_STABLE));
}
-static inline void set_page_stable(struct page *page, int order)
+static inline void set_page_stable_nodat(struct page *page, int order)
{
int i, rc;
@@ -81,14 +103,154 @@ static inline void set_page_stable(struct page *page, int order)
asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
: "=&d" (rc)
: "a" (page_to_phys(page + i)),
- "i" (ESSA_SET_STABLE));
+ "i" (ESSA_SET_STABLE_NODAT));
+}
+
+static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
+{
+ unsigned long next;
+ struct page *page;
+ pmd_t *pmd;
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ if (pmd_none(*pmd) || pmd_large(*pmd))
+ continue;
+ page = virt_to_page(pmd_val(*pmd));
+ set_bit(PG_arch_1, &page->flags);
+ } while (pmd++, addr = next, addr != end);
+}
+
+static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
+{
+ unsigned long next;
+ struct page *page;
+ pud_t *pud;
+ int i;
+
+ pud = pud_offset(p4d, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none(*pud) || pud_large(*pud))
+ continue;
+ if (!pud_folded(*pud)) {
+ page = virt_to_page(pud_val(*pud));
+ for (i = 0; i < 3; i++)
+ set_bit(PG_arch_1, &page[i].flags);
+ }
+ mark_kernel_pmd(pud, addr, next);
+ } while (pud++, addr = next, addr != end);
+}
+
+static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
+{
+ unsigned long next;
+ struct page *page;
+ p4d_t *p4d;
+ int i;
+
+ p4d = p4d_offset(pgd, addr);
+ do {
+ next = p4d_addr_end(addr, end);
+ if (p4d_none(*p4d))
+ continue;
+ if (!p4d_folded(*p4d)) {
+ page = virt_to_page(p4d_val(*p4d));
+ for (i = 0; i < 3; i++)
+ set_bit(PG_arch_1, &page[i].flags);
+ }
+ mark_kernel_pud(p4d, addr, next);
+ } while (p4d++, addr = next, addr != end);
+}
+
+static void mark_kernel_pgd(void)
+{
+ unsigned long addr, next;
+ struct page *page;
+ pgd_t *pgd;
+ int i;
+
+ addr = 0;
+ pgd = pgd_offset_k(addr);
+ do {
+ next = pgd_addr_end(addr, MODULES_END);
+ if (pgd_none(*pgd))
+ continue;
+ if (!pgd_folded(*pgd)) {
+ page = virt_to_page(pgd_val(*pgd));
+ for (i = 0; i < 3; i++)
+ set_bit(PG_arch_1, &page[i].flags);
+ }
+ mark_kernel_p4d(pgd, addr, next);
+ } while (pgd++, addr = next, addr != MODULES_END);
+}
+
+void __init cmma_init_nodat(void)
+{
+ struct memblock_region *reg;
+ struct page *page;
+ unsigned long start, end, ix;
+
+ if (cmma_flag < 2)
+ return;
+ /* Mark pages used in kernel page tables */
+ mark_kernel_pgd();
+
+ /* Set all kernel pages not used for page tables to stable/no-dat */
+ for_each_memblock(memory, reg) {
+ start = memblock_region_memory_base_pfn(reg);
+ end = memblock_region_memory_end_pfn(reg);
+ page = pfn_to_page(start);
+ for (ix = start; ix < end; ix++, page++) {
+ if (__test_and_clear_bit(PG_arch_1, &page->flags))
+ continue; /* skip page table pages */
+ if (!list_empty(&page->lru))
+ continue; /* skip free pages */
+ set_page_stable_nodat(page, 0);
+ }
+ }
+}
+
+void arch_free_page(struct page *page, int order)
+{
+ if (!cmma_flag)
+ return;
+ set_page_unused(page, order);
}
void arch_alloc_page(struct page *page, int order)
{
if (!cmma_flag)
return;
- set_page_stable(page, order);
+ if (cmma_flag < 2)
+ set_page_stable_dat(page, order);
+ else
+ set_page_stable_nodat(page, order);
+}
+
+void arch_set_page_dat(struct page *page, int order)
+{
+ if (!cmma_flag)
+ return;
+ set_page_stable_dat(page, order);
+}
+
+void arch_set_page_nodat(struct page *page, int order)
+{
+ if (cmma_flag < 2)
+ return;
+ set_page_stable_nodat(page, order);
+}
+
+int arch_test_page_nodat(struct page *page)
+{
+ unsigned char state;
+
+ if (cmma_flag < 2)
+ return 0;
+ state = get_page_state(page);
+ return !!(state & 0x20);
}
void arch_set_page_states(int make_stable)
@@ -108,9 +270,9 @@ void arch_set_page_states(int make_stable)
list_for_each(l, &zone->free_area[order].free_list[t]) {
page = list_entry(l, struct page, lru);
if (make_stable)
- set_page_stable(page, order);
+ set_page_stable_dat(page, 0);
else
- set_page_unstable(page, order);
+ set_page_unused(page, order);
}
}
spin_unlock_irqrestore(&zone->lock, flags);
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 180481589246..552f898dfa74 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -7,6 +7,7 @@
#include <asm/cacheflush.h>
#include <asm/facility.h>
#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/set_memory.h>
@@ -191,7 +192,7 @@ static int split_pud_page(pud_t *pudp, unsigned long addr)
pud_t new;
int i, ro, nx;
- pm_dir = vmem_pmd_alloc();
+ pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
if (!pm_dir)
return -ENOMEM;
pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT;
@@ -328,7 +329,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
return;
}
for (i = 0; i < nr; i++) {
- __ptep_ipte(address, pte, IPTE_GLOBAL);
+ __ptep_ipte(address, pte, 0, 0, IPTE_GLOBAL);
address += PAGE_SIZE;
pte++;
}
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 18918e394ce4..c5b74dd61197 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -57,6 +57,7 @@ unsigned long *crst_table_alloc(struct mm_struct *mm)
if (!page)
return NULL;
+ arch_set_page_dat(page, 2);
return (unsigned long *) page_to_phys(page);
}
@@ -82,7 +83,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
int rc, notify;
/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
- BUG_ON(mm->context.asce_limit < (1UL << 42));
+ BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
if (end >= TASK_SIZE_MAX)
return -ENOMEM;
rc = 0;
@@ -95,11 +96,11 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
}
spin_lock_bh(&mm->page_table_lock);
pgd = (unsigned long *) mm->pgd;
- if (mm->context.asce_limit == (1UL << 42)) {
+ if (mm->context.asce_limit == _REGION2_SIZE) {
crst_table_init(table, _REGION2_ENTRY_EMPTY);
p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
mm->pgd = (pgd_t *) table;
- mm->context.asce_limit = 1UL << 53;
+ mm->context.asce_limit = _REGION1_SIZE;
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
} else {
@@ -123,7 +124,7 @@ void crst_table_downgrade(struct mm_struct *mm)
pgd_t *pgd;
/* downgrade should only happen from 3 to 2 levels (compat only) */
- BUG_ON(mm->context.asce_limit != (1UL << 42));
+ BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
if (current->active_mm == mm) {
clear_user_asce();
@@ -132,7 +133,7 @@ void crst_table_downgrade(struct mm_struct *mm)
pgd = mm->pgd;
mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
- mm->context.asce_limit = 1UL << 31;
+ mm->context.asce_limit = _REGION3_SIZE;
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
crst_table_free(mm, (unsigned long *) pgd);
@@ -214,6 +215,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
__free_page(page);
return NULL;
}
+ arch_set_page_dat(page, 0);
/* Initialize page table */
table = (unsigned long *) page_to_phys(page);
if (mm_alloc_pgste(mm)) {
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index d4d409ba206b..4198a71b8fdd 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -25,8 +25,49 @@
#include <asm/mmu_context.h>
#include <asm/page-states.h>
+static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, int nodat)
+{
+ unsigned long opt, asce;
+
+ if (MACHINE_HAS_TLB_GUEST) {
+ opt = 0;
+ asce = READ_ONCE(mm->context.gmap_asce);
+ if (asce == 0UL || nodat)
+ opt |= IPTE_NODAT;
+ if (asce != -1UL) {
+ asce = asce ? : mm->context.asce;
+ opt |= IPTE_GUEST_ASCE;
+ }
+ __ptep_ipte(addr, ptep, opt, asce, IPTE_LOCAL);
+ } else {
+ __ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL);
+ }
+}
+
+static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, int nodat)
+{
+ unsigned long opt, asce;
+
+ if (MACHINE_HAS_TLB_GUEST) {
+ opt = 0;
+ asce = READ_ONCE(mm->context.gmap_asce);
+ if (asce == 0UL || nodat)
+ opt |= IPTE_NODAT;
+ if (asce != -1UL) {
+ asce = asce ? : mm->context.asce;
+ opt |= IPTE_GUEST_ASCE;
+ }
+ __ptep_ipte(addr, ptep, opt, asce, IPTE_GLOBAL);
+ } else {
+ __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
+ }
+}
+
static inline pte_t ptep_flush_direct(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep,
+ int nodat)
{
pte_t old;
@@ -36,15 +77,16 @@ static inline pte_t ptep_flush_direct(struct mm_struct *mm,
atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
- __ptep_ipte(addr, ptep, IPTE_LOCAL);
+ ptep_ipte_local(mm, addr, ptep, nodat);
else
- __ptep_ipte(addr, ptep, IPTE_GLOBAL);
+ ptep_ipte_global(mm, addr, ptep, nodat);
atomic_dec(&mm->context.flush_count);
return old;
}
static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep,
+ int nodat)
{
pte_t old;
@@ -57,7 +99,7 @@ static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
pte_val(*ptep) |= _PAGE_INVALID;
mm->context.flush_mm = 1;
} else
- __ptep_ipte(addr, ptep, IPTE_GLOBAL);
+ ptep_ipte_global(mm, addr, ptep, nodat);
atomic_dec(&mm->context.flush_count);
return old;
}
@@ -229,10 +271,12 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
{
pgste_t pgste;
pte_t old;
+ int nodat;
preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
- old = ptep_flush_direct(mm, addr, ptep);
+ nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
+ old = ptep_flush_direct(mm, addr, ptep, nodat);
old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
preempt_enable();
return old;
@@ -244,10 +288,12 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
{
pgste_t pgste;
pte_t old;
+ int nodat;
preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
- old = ptep_flush_lazy(mm, addr, ptep);
+ nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
+ old = ptep_flush_lazy(mm, addr, ptep, nodat);
old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
preempt_enable();
return old;
@@ -259,10 +305,12 @@ pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
{
pgste_t pgste;
pte_t old;
+ int nodat;
preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
- old = ptep_flush_lazy(mm, addr, ptep);
+ nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
+ old = ptep_flush_lazy(mm, addr, ptep, nodat);
if (mm_has_pgste(mm)) {
pgste = pgste_update_all(old, pgste, mm);
pgste_set(ptep, pgste);
@@ -290,6 +338,28 @@ void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
}
EXPORT_SYMBOL(ptep_modify_prot_commit);
+static inline void pmdp_idte_local(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp)
+{
+ if (MACHINE_HAS_TLB_GUEST)
+ __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
+ mm->context.asce, IDTE_LOCAL);
+ else
+ __pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
+}
+
+static inline void pmdp_idte_global(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp)
+{
+ if (MACHINE_HAS_TLB_GUEST)
+ __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
+ mm->context.asce, IDTE_GLOBAL);
+ else if (MACHINE_HAS_IDTE)
+ __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
+ else
+ __pmdp_csp(pmdp);
+}
+
static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
@@ -298,16 +368,12 @@ static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
old = *pmdp;
if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
return old;
- if (!MACHINE_HAS_IDTE) {
- __pmdp_csp(pmdp);
- return old;
- }
atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
- __pmdp_idte(addr, pmdp, IDTE_LOCAL);
+ pmdp_idte_local(mm, addr, pmdp);
else
- __pmdp_idte(addr, pmdp, IDTE_GLOBAL);
+ pmdp_idte_global(mm, addr, pmdp);
atomic_dec(&mm->context.flush_count);
return old;
}
@@ -325,10 +391,9 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
cpumask_of(smp_processor_id()))) {
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
mm->context.flush_mm = 1;
- } else if (MACHINE_HAS_IDTE)
- __pmdp_idte(addr, pmdp, IDTE_GLOBAL);
- else
- __pmdp_csp(pmdp);
+ } else {
+ pmdp_idte_global(mm, addr, pmdp);
+ }
atomic_dec(&mm->context.flush_count);
return old;
}
@@ -359,28 +424,46 @@ pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
}
EXPORT_SYMBOL(pmdp_xchg_lazy);
-static inline pud_t pudp_flush_direct(struct mm_struct *mm,
- unsigned long addr, pud_t *pudp)
+static inline void pudp_idte_local(struct mm_struct *mm,
+ unsigned long addr, pud_t *pudp)
{
- pud_t old;
+ if (MACHINE_HAS_TLB_GUEST)
+ __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
+ mm->context.asce, IDTE_LOCAL);
+ else
+ __pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL);
+}
- old = *pudp;
- if (pud_val(old) & _REGION_ENTRY_INVALID)
- return old;
- if (!MACHINE_HAS_IDTE) {
+static inline void pudp_idte_global(struct mm_struct *mm,
+ unsigned long addr, pud_t *pudp)
+{
+ if (MACHINE_HAS_TLB_GUEST)
+ __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
+ mm->context.asce, IDTE_GLOBAL);
+ else if (MACHINE_HAS_IDTE)
+ __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL);
+ else
/*
* Invalid bit position is the same for pmd and pud, so we can
* re-use _pmd_csp() here
*/
__pmdp_csp((pmd_t *) pudp);
+}
+
+static inline pud_t pudp_flush_direct(struct mm_struct *mm,
+ unsigned long addr, pud_t *pudp)
+{
+ pud_t old;
+
+ old = *pudp;
+ if (pud_val(old) & _REGION_ENTRY_INVALID)
return old;
- }
atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
- __pudp_idte(addr, pudp, IDTE_LOCAL);
+ pudp_idte_local(mm, addr, pudp);
else
- __pudp_idte(addr, pudp, IDTE_GLOBAL);
+ pudp_idte_global(mm, addr, pudp);
atomic_dec(&mm->context.flush_count);
return old;
}
@@ -482,7 +565,7 @@ int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
{
pte_t entry;
pgste_t pgste;
- int pte_i, pte_p;
+ int pte_i, pte_p, nodat;
pgste = pgste_get_lock(ptep);
entry = *ptep;
@@ -495,13 +578,14 @@ int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
return -EAGAIN;
}
/* Change access rights and set pgste bit */
+ nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
if (prot == PROT_NONE && !pte_i) {
- ptep_flush_direct(mm, addr, ptep);
+ ptep_flush_direct(mm, addr, ptep, nodat);
pgste = pgste_update_all(entry, pgste, mm);
pte_val(entry) |= _PAGE_INVALID;
}
if (prot == PROT_READ && !pte_p) {
- ptep_flush_direct(mm, addr, ptep);
+ ptep_flush_direct(mm, addr, ptep, nodat);
pte_val(entry) &= ~_PAGE_INVALID;
pte_val(entry) |= _PAGE_PROTECT;
}
@@ -541,10 +625,12 @@ int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
{
pgste_t pgste;
+ int nodat;
pgste = pgste_get_lock(ptep);
/* notifier is called by the caller */
- ptep_flush_direct(mm, saddr, ptep);
+ nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
+ ptep_flush_direct(mm, saddr, ptep, nodat);
/* don't touch the storage key - it belongs to parent pgste */
pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
pgste_set_unlock(ptep, pgste);
@@ -591,11 +677,11 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
unsigned long ptev;
pgste_t pgste;
- /* Clear storage key */
+ /* Clear storage key ACC and F, but set R/C */
preempt_disable();
pgste = pgste_get_lock(ptep);
- pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
- PGSTE_GR_BIT | PGSTE_GC_BIT);
+ pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
+ pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
ptev = pte_val(*ptep);
if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
@@ -617,6 +703,7 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
pte_t *ptep;
pte_t pte;
bool dirty;
+ int nodat;
pgd = pgd_offset(mm, addr);
p4d = p4d_alloc(mm, pgd, addr);
@@ -645,7 +732,8 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
pte = *ptep;
if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
pgste = pgste_pte_notify(mm, addr, ptep, pgste);
- __ptep_ipte(addr, ptep, IPTE_GLOBAL);
+ nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
+ ptep_ipte_global(mm, addr, ptep, nodat);
if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
pte_val(pte) |= _PAGE_PROTECT;
else
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index d8398962a723..c0af0d7b6e5f 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -38,37 +38,14 @@ static void __ref *vmem_alloc_pages(unsigned int order)
return (void *) memblock_alloc(size, size);
}
-static inline p4d_t *vmem_p4d_alloc(void)
+void *vmem_crst_alloc(unsigned long val)
{
- p4d_t *p4d = NULL;
+ unsigned long *table;
- p4d = vmem_alloc_pages(2);
- if (!p4d)
- return NULL;
- clear_table((unsigned long *) p4d, _REGION2_ENTRY_EMPTY, PAGE_SIZE * 4);
- return p4d;
-}
-
-static inline pud_t *vmem_pud_alloc(void)
-{
- pud_t *pud = NULL;
-
- pud = vmem_alloc_pages(2);
- if (!pud)
- return NULL;
- clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
- return pud;
-}
-
-pmd_t *vmem_pmd_alloc(void)
-{
- pmd_t *pmd = NULL;
-
- pmd = vmem_alloc_pages(2);
- if (!pmd)
- return NULL;
- clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
- return pmd;
+ table = vmem_alloc_pages(CRST_ALLOC_ORDER);
+ if (table)
+ crst_table_init(table, val);
+ return table;
}
pte_t __ref *vmem_pte_alloc(void)
@@ -114,14 +91,14 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
while (address < end) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
- p4_dir = vmem_p4d_alloc();
+ p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
if (!p4_dir)
goto out;
pgd_populate(&init_mm, pg_dir, p4_dir);
}
p4_dir = p4d_offset(pg_dir, address);
if (p4d_none(*p4_dir)) {
- pu_dir = vmem_pud_alloc();
+ pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
if (!pu_dir)
goto out;
p4d_populate(&init_mm, p4_dir, pu_dir);
@@ -136,7 +113,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
continue;
}
if (pud_none(*pu_dir)) {
- pm_dir = vmem_pmd_alloc();
+ pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
if (!pm_dir)
goto out;
pud_populate(&init_mm, pu_dir, pm_dir);
@@ -253,7 +230,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
for (address = start; address < end;) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
- p4_dir = vmem_p4d_alloc();
+ p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
if (!p4_dir)
goto out;
pgd_populate(&init_mm, pg_dir, p4_dir);
@@ -261,7 +238,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
p4_dir = p4d_offset(pg_dir, address);
if (p4d_none(*p4_dir)) {
- pu_dir = vmem_pud_alloc();
+ pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
if (!pu_dir)
goto out;
p4d_populate(&init_mm, p4_dir, pu_dir);
@@ -269,7 +246,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
pu_dir = pud_offset(p4_dir, address);
if (pud_none(*pu_dir)) {
- pm_dir = vmem_pmd_alloc();
+ pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
if (!pm_dir)
goto out;
pud_populate(&init_mm, pu_dir, pm_dir);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 01c6fbc3e85b..8ec88497a28d 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1093,15 +1093,27 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */
mask = 0x2000; /* jh */
goto branch_ks;
+ case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */
+ mask = 0x4000; /* jl */
+ goto branch_ks;
case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */
mask = 0xa000; /* jhe */
goto branch_ks;
+ case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */
+ mask = 0xc000; /* jle */
+ goto branch_ks;
case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */
mask = 0x2000; /* jh */
goto branch_ku;
+ case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */
+ mask = 0x4000; /* jl */
+ goto branch_ku;
case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */
mask = 0xa000; /* jhe */
goto branch_ku;
+ case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */
+ mask = 0xc000; /* jle */
+ goto branch_ku;
case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */
mask = 0x7000; /* jne */
goto branch_ku;
@@ -1119,15 +1131,27 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */
mask = 0x2000; /* jh */
goto branch_xs;
+ case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */
+ mask = 0x4000; /* jl */
+ goto branch_xs;
case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */
mask = 0xa000; /* jhe */
goto branch_xs;
+ case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */
+ mask = 0xc000; /* jle */
+ goto branch_xs;
case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */
mask = 0x2000; /* jh */
goto branch_xu;
+ case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */
+ mask = 0x4000; /* jl */
+ goto branch_xu;
case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */
mask = 0xa000; /* jhe */
goto branch_xu;
+ case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */
+ mask = 0xc000; /* jle */
+ goto branch_xu;
case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */
mask = 0x7000; /* jne */
goto branch_xu;
@@ -1253,7 +1277,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
insn_count = bpf_jit_insn(jit, fp, i);
if (insn_count < 0)
return -1;
- jit->addrs[i + 1] = jit->prg; /* Next instruction address */
+ /* Next instruction address */
+ jit->addrs[i + insn_count] = jit->prg;
}
bpf_jit_epilogue(jit);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index bd534b4d40e3..0ae3936e266f 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -24,6 +24,14 @@
bool zpci_unique_uid;
+static void update_uid_checking(bool new)
+{
+ if (zpci_unique_uid != new)
+ zpci_dbg(1, "uid checking:%d\n", new);
+
+ zpci_unique_uid = new;
+}
+
static inline void zpci_err_clp(unsigned int rsp, int rc)
{
struct {
@@ -319,7 +327,7 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data,
goto out;
}
- zpci_unique_uid = rrb->response.uid_checking;
+ update_uid_checking(rrb->response.uid_checking);
WARN_ON_ONCE(rrb->response.entry_size !=
sizeof(struct clp_fh_list_entry));
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index 025ea20fc4b4..29d72bf8ed2b 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -41,7 +41,7 @@ static struct facility_def facility_defs[] = {
27, /* mvcos */
32, /* compare and swap and store */
33, /* compare and swap and store 2 */
- 34, /* general extension facility */
+ 34, /* general instructions extension */
35, /* execute extensions */
#endif
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
@@ -54,6 +54,9 @@ static struct facility_def facility_defs[] = {
#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
53, /* load-and-zero-rightmost-byte, etc. */
#endif
+#ifdef CONFIG_HAVE_MARCH_Z14_FEATURES
+ 58, /* miscellaneous-instruction-extension 2 */
+#endif
-1 /* END */
}
},
diff --git a/arch/sh/configs/se7751_defconfig b/arch/sh/configs/se7751_defconfig
index 75c92fc1876b..56b5e4ce8d4a 100644
--- a/arch/sh/configs/se7751_defconfig
+++ b/arch/sh/configs/se7751_defconfig
@@ -28,7 +28,6 @@ CONFIG_IP_PNP_RARP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_DEBUG=y
CONFIG_IP_NF_QUEUE=y
CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y
diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h
index c9828f785ca0..5b5086367639 100644
--- a/arch/sh/include/asm/bug.h
+++ b/arch/sh/include/asm/bug.h
@@ -24,14 +24,14 @@
*/
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define _EMIT_BUG_ENTRY \
- "\t.pushsection __bug_table,\"a\"\n" \
+ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t.long 1b, %O1\n" \
"\t.short %O2, %O3\n" \
"\t.org 2b+%O4\n" \
"\t.popsection\n"
#else
#define _EMIT_BUG_ENTRY \
- "\t.pushsection __bug_table,\"a\"\n" \
+ "\t.pushsection __bug_table,\"aw\"\n" \
"2:\t.long 1b\n" \
"\t.short %O3\n" \
"\t.org 2b+%O4\n" \
diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h
index d0078747d308..8f8cf941a8cd 100644
--- a/arch/sh/include/asm/futex.h
+++ b/arch/sh/include/asm/futex.h
@@ -27,21 +27,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
}
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- u32 oparg = (encoded_op << 8) >> 20;
- u32 cmparg = (encoded_op << 20) >> 20;
u32 oldval, newval, prev;
int ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
do {
@@ -80,17 +71,8 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = ((int)oldval < (int)cmparg); break;
- case FUTEX_OP_CMP_GE: ret = ((int)oldval >= (int)cmparg); break;
- case FUTEX_OP_CMP_LE: ret = ((int)oldval <= (int)cmparg); break;
- case FUTEX_OP_CMP_GT: ret = ((int)oldval > (int)cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
return ret;
}
diff --git a/arch/sh/include/asm/spinlock-cas.h b/arch/sh/include/asm/spinlock-cas.h
index c46e8cc7b515..5ed7dbbd94ff 100644
--- a/arch/sh/include/asm/spinlock-cas.h
+++ b/arch/sh/include/asm/spinlock-cas.h
@@ -29,11 +29,6 @@ static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new
#define arch_spin_is_locked(x) ((x)->lock <= 0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->lock, VAL > 0);
-}
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
while (!__sl_cas(&lock->lock, 1, 0));
diff --git a/arch/sh/include/asm/spinlock-llsc.h b/arch/sh/include/asm/spinlock-llsc.h
index cec78143fa83..f77263aae760 100644
--- a/arch/sh/include/asm/spinlock-llsc.h
+++ b/arch/sh/include/asm/spinlock-llsc.h
@@ -21,11 +21,6 @@
#define arch_spin_is_locked(x) ((x)->lock <= 0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->lock, VAL > 0);
-}
-
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 46e0d635e36f..51a8bc967e75 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -36,7 +36,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->start = start;
@@ -47,9 +48,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
}
static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force)
{
- if (tlb->fullmm)
+ if (tlb->fullmm || force)
flush_tlb_mm(tlb->mm);
/* keep the page table cache within bounds */
diff --git a/arch/sh/include/uapi/asm/ioctls.h b/arch/sh/include/uapi/asm/ioctls.h
index eec7901e9e65..787bac9f67da 100644
--- a/arch/sh/include/uapi/asm/ioctls.h
+++ b/arch/sh/include/uapi/asm/ioctls.h
@@ -93,7 +93,7 @@
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
-#define TIOCGPTPEER _IOR('T', 0x41, int) /* Safely open the slave */
+#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
#define TIOCSERCONFIG _IO('T', 83) /* 0x5453 */
#define TIOCSERGWILD _IOR('T', 84, int) /* 0x5454 */
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig
index c74d3701ad68..207a43a2d8b3 100644
--- a/arch/sparc/configs/sparc32_defconfig
+++ b/arch/sparc/configs/sparc32_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_LOG_BUF_SHIFT=14
@@ -23,7 +22,6 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
-# CONFIG_INET_LRO is not set
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
@@ -69,7 +67,6 @@ CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
CONFIG_ISO9660_FS=m
CONFIG_PROC_KCORE=y
@@ -82,7 +79,6 @@ CONFIG_NLS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_KGDB=y
CONFIG_KGDB_TESTS=y
CONFIG_CRYPTO_NULL=m
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index b2e650d1764f..ca8609d7292f 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -1,5 +1,4 @@
CONFIG_64BIT=y
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -184,7 +183,6 @@ CONFIG_HID_TOPSEED=y
CONFIG_HID_THRUSTMASTER=y
CONFIG_HID_ZEROPLUS=y
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
CONFIG_USB_OHCI_HCD=y
@@ -210,8 +208,6 @@ CONFIG_LOCKUP_DETECTOR=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHEDSTATS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENTS=y
CONFIG_KEYS=y
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index c90930de76ba..3cd4f6b198b6 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -344,8 +344,7 @@ static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx,
ctx->ops->ecb_encrypt(&ctx->key[0], (const u64 *)ctrblk,
keystream, AES_BLOCK_SIZE);
- crypto_xor((u8 *) keystream, src, nbytes);
- memcpy(dst, keystream, nbytes);
+ crypto_xor_cpy(dst, (u8 *) keystream, src, nbytes);
crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index ee3f11c43cda..7643e979e333 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -29,6 +29,8 @@ int atomic_xchg(atomic_t *, int);
int __atomic_add_unless(atomic_t *, int, int);
void atomic_set(atomic_t *, int);
+#define atomic_set_release(v, i) atomic_set((v), (i))
+
#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h
index 4e899b0dabf7..1cfd89d92208 100644
--- a/arch/sparc/include/asm/futex_64.h
+++ b/arch/sparc/include/asm/futex_64.h
@@ -29,22 +29,14 @@
: "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
: "memory")
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tem;
- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
- return -EFAULT;
if (unlikely((((unsigned long) uaddr) & 0x3UL)))
return -EINVAL;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
pagefault_disable();
switch (op) {
@@ -69,17 +61,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 2cddcda4f85f..87841d687f8d 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -27,9 +27,11 @@ void destroy_context(struct mm_struct *mm);
void __tsb_context_switch(unsigned long pgd_pa,
struct tsb_config *tsb_base,
struct tsb_config *tsb_huge,
- unsigned long tsb_descr_pa);
+ unsigned long tsb_descr_pa,
+ unsigned long secondary_ctx);
-static inline void tsb_context_switch(struct mm_struct *mm)
+static inline void tsb_context_switch_ctx(struct mm_struct *mm,
+ unsigned long ctx)
{
__tsb_context_switch(__pa(mm->pgd),
&mm->context.tsb_block[MM_TSB_BASE],
@@ -40,9 +42,12 @@ static inline void tsb_context_switch(struct mm_struct *mm)
#else
NULL
#endif
- , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
+ , __pa(&mm->context.tsb_descr[MM_TSB_BASE]),
+ ctx);
}
+#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
+
void tsb_grow(struct mm_struct *mm,
unsigned long tsb_index,
unsigned long mm_rss);
@@ -112,8 +117,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
* cpu0 to update it's TSB because at that point the cpu_vm_mask
* only had cpu1 set in it.
*/
- load_secondary_context(mm);
- tsb_context_switch(mm);
+ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
/* Any time a processor runs a context on an address space
* for the first time, we must flush that context out of the
diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
index 0efd0583a8c9..6249214148c2 100644
--- a/arch/sparc/include/asm/page_32.h
+++ b/arch/sparc/include/asm/page_32.h
@@ -68,6 +68,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
#define iopgprot_val(x) ((x).iopgprot)
#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { { (x) }, })
#define __iopte(x) ((iopte_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __ctxd(x) ((ctxd_t) { (x) } )
@@ -95,6 +96,7 @@ typedef unsigned long iopgprot_t;
#define iopgprot_val(x) (x)
#define __pte(x) (x)
+#define __pmd(x) ((pmd_t) { { (x) }, })
#define __iopte(x) (x)
#define __pgd(x) (x)
#define __ctxd(x) (x)
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 8011e79f59c9..67345b2dc408 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -14,11 +14,6 @@
#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->lock, !VAL);
-}
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
__asm__ __volatile__(
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
index 1d8321c827a8..1b1286d05069 100644
--- a/arch/sparc/include/asm/spitfire.h
+++ b/arch/sparc/include/asm/spitfire.h
@@ -47,10 +47,26 @@
#define SUN4V_CHIP_NIAGARA5 0x05
#define SUN4V_CHIP_SPARC_M6 0x06
#define SUN4V_CHIP_SPARC_M7 0x07
+#define SUN4V_CHIP_SPARC_M8 0x08
#define SUN4V_CHIP_SPARC64X 0x8a
#define SUN4V_CHIP_SPARC_SN 0x8b
#define SUN4V_CHIP_UNKNOWN 0xff
+/*
+ * The following CPU_ID_xxx constants are used
+ * to identify the CPU type in the setup phase
+ * (see head_64.S)
+ */
+#define CPU_ID_NIAGARA1 ('1')
+#define CPU_ID_NIAGARA2 ('2')
+#define CPU_ID_NIAGARA3 ('3')
+#define CPU_ID_NIAGARA4 ('4')
+#define CPU_ID_NIAGARA5 ('5')
+#define CPU_ID_M6 ('6')
+#define CPU_ID_M7 ('7')
+#define CPU_ID_M8 ('8')
+#define CPU_ID_SONOMA1 ('N')
+
#ifndef __ASSEMBLY__
enum ultra_tlb_layout {
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
index ec9c04de3664..ff05992dae7a 100644
--- a/arch/sparc/include/asm/trap_block.h
+++ b/arch/sparc/include/asm/trap_block.h
@@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
void init_cur_cpu_trap(struct thread_info *);
void setup_tba(void);
extern int ncpus_probed;
+extern u64 cpu_mondo_counter[NR_CPUS];
unsigned long real_hard_smp_processor_id(void);
diff --git a/arch/sparc/include/uapi/asm/ioctls.h b/arch/sparc/include/uapi/asm/ioctls.h
index 6d27398632ea..f5df72b93bb2 100644
--- a/arch/sparc/include/uapi/asm/ioctls.h
+++ b/arch/sparc/include/uapi/asm/ioctls.h
@@ -88,7 +88,7 @@
#define TIOCGPTN _IOR('t', 134, unsigned int) /* Get Pty Number */
#define TIOCSPTLCK _IOW('t', 135, int) /* Lock/unlock PTY */
#define TIOCSIG _IOW('t', 136, int) /* Generate signal on Pty slave */
-#define TIOCGPTPEER _IOR('t', 137, int) /* Safely open the slave */
+#define TIOCGPTPEER _IO('t', 137) /* Safely open the slave */
/* Little f */
#define FIOCLEX _IO('f', 1)
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 186fd8199f54..b2f5c50d0947 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -98,6 +98,8 @@
#define SO_PEERGROUPS 0x003d
+#define SO_ZEROCOPY 0x003e
+
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 493e023a468a..ef4f18f7a674 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void)
sparc_pmu_type = "sparc-m7";
break;
+ case SUN4V_CHIP_SPARC_M8:
+ sparc_cpu_type = "SPARC-M8";
+ sparc_fpu_type = "SPARC-M8 integrated FPU";
+ sparc_pmu_type = "sparc-m8";
+ break;
+
case SUN4V_CHIP_SPARC_SN:
sparc_cpu_type = "SPARC-SN";
sparc_fpu_type = "SPARC-SN integrated FPU";
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index 45c820e1cba5..90d550bbfeef 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
case SUN4V_CHIP_NIAGARA5:
case SUN4V_CHIP_SPARC_M6:
case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC_M8:
case SUN4V_CHIP_SPARC_SN:
case SUN4V_CHIP_SPARC64X:
rover_inc_table = niagara_iterate_method;
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 41a407328667..78e0211753d2 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -424,22 +424,25 @@ EXPORT_SYMBOL(sun4v_chip_type)
nop
70: ldub [%g1 + 7], %g2
- cmp %g2, '3'
+ cmp %g2, CPU_ID_NIAGARA3
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA3, %g4
- cmp %g2, '4'
+ cmp %g2, CPU_ID_NIAGARA4
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA4, %g4
- cmp %g2, '5'
+ cmp %g2, CPU_ID_NIAGARA5
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA5, %g4
- cmp %g2, '6'
+ cmp %g2, CPU_ID_M6
be,pt %xcc, 5f
mov SUN4V_CHIP_SPARC_M6, %g4
- cmp %g2, '7'
+ cmp %g2, CPU_ID_M7
be,pt %xcc, 5f
mov SUN4V_CHIP_SPARC_M7, %g4
- cmp %g2, 'N'
+ cmp %g2, CPU_ID_M8
+ be,pt %xcc, 5f
+ mov SUN4V_CHIP_SPARC_M8, %g4
+ cmp %g2, CPU_ID_SONOMA1
be,pt %xcc, 5f
mov SUN4V_CHIP_SPARC_SN, %g4
ba,pt %xcc, 49f
@@ -448,10 +451,10 @@ EXPORT_SYMBOL(sun4v_chip_type)
91: sethi %hi(prom_cpu_compatible), %g1
or %g1, %lo(prom_cpu_compatible), %g1
ldub [%g1 + 17], %g2
- cmp %g2, '1'
+ cmp %g2, CPU_ID_NIAGARA1
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA1, %g4
- cmp %g2, '2'
+ cmp %g2, CPU_ID_NIAGARA2
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA2, %g4
@@ -602,6 +605,9 @@ niagara_tlb_fixup:
cmp %g1, SUN4V_CHIP_SPARC_M7
be,pt %xcc, niagara4_patch
nop
+ cmp %g1, SUN4V_CHIP_SPARC_M8
+ be,pt %xcc, niagara4_patch
+ nop
cmp %g1, SUN4V_CHIP_SPARC_SN
be,pt %xcc, niagara4_patch
nop
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 24f21c726dfa..9ebebf1fd93d 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -673,12 +673,14 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
static int dma_4v_supported(struct device *dev, u64 device_mask)
{
struct iommu *iommu = dev->archdata.iommu;
- u64 dma_addr_mask;
+ u64 dma_addr_mask = iommu->dma_addr_mask;
- if (device_mask > DMA_BIT_MASK(32) && iommu->atu)
- dma_addr_mask = iommu->atu->dma_addr_mask;
- else
- dma_addr_mask = iommu->dma_addr_mask;
+ if (device_mask > DMA_BIT_MASK(32)) {
+ if (iommu->atu)
+ dma_addr_mask = iommu->atu->dma_addr_mask;
+ else
+ return 0;
+ }
if ((device_mask & dma_addr_mask) == dma_addr_mask)
return 1;
@@ -1264,8 +1266,6 @@ static int pci_sun4v_probe(struct platform_device *op)
* ATU group, but ATU hcalls won't be available.
*/
hv_atu = false;
- pr_err(PFX "Could not register hvapi ATU err=%d\n",
- err);
} else {
pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
vatu_major, vatu_minor);
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index a38787b84322..732af9a9f6dd 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -602,7 +602,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_dev *dev;
int i, has_io, has_mem;
- unsigned int cmd;
+ unsigned int cmd = 0;
struct linux_pcic *pcic;
/* struct linux_pbm_info* pbm = &pcic->pbm; */
int node;
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 4d9c3e13c150..150ee7d4b059 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -288,10 +288,17 @@ static void __init sun4v_patch(void)
sun4v_patch_2insn_range(&__sun4v_2insn_patch,
&__sun4v_2insn_patch_end);
- if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
- sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
+
+ switch (sun4v_chip_type) {
+ case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC_M8:
+ case SUN4V_CHIP_SPARC_SN:
sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
&__sun_m7_2insn_patch_end);
+ break;
+ default:
+ break;
+ }
sun4v_hvapi_init();
}
@@ -529,6 +536,7 @@ static void __init init_sparc64_elf_hwcap(void)
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= HWCAP_SPARC_BLKINIT;
@@ -538,6 +546,7 @@ static void __init init_sparc64_elf_hwcap(void)
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= HWCAP_SPARC_N2;
@@ -568,6 +577,7 @@ static void __init init_sparc64_elf_hwcap(void)
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
@@ -578,6 +588,7 @@ static void __init init_sparc64_elf_hwcap(void)
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index fdf31040a7dc..3218bc43302e 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -622,22 +622,48 @@ retry:
}
}
-/* Multi-cpu list version. */
+#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
+#define MONDO_USEC_WAIT_MIN 2
+#define MONDO_USEC_WAIT_MAX 100
+#define MONDO_RETRY_LIMIT 500000
+
+/* Multi-cpu list version.
+ *
+ * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
+ * Sometimes not all cpus receive the mondo, requiring us to re-send
+ * the mondo until all cpus have received, or cpus are truly stuck
+ * unable to receive mondo, and we timeout.
+ * Occasionally a target cpu strand is borrowed briefly by hypervisor to
+ * perform guest service, such as PCIe error handling. Consider the
+ * service time, 1 second overall wait is reasonable for 1 cpu.
+ * Here two in-between mondo check wait time are defined: 2 usec for
+ * single cpu quick turn around and up to 100usec for large cpu count.
+ * Deliver mondo to large number of cpus could take longer, we adjusts
+ * the retry count as long as target cpus are making forward progress.
+ */
static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
{
- int retries, this_cpu, prev_sent, i, saw_cpu_error;
+ int this_cpu, tot_cpus, prev_sent, i, rem;
+ int usec_wait, retries, tot_retries;
+ u16 first_cpu = 0xffff;
+ unsigned long xc_rcvd = 0;
unsigned long status;
+ int ecpuerror_id = 0;
+ int enocpu_id = 0;
u16 *cpu_list;
+ u16 cpu;
this_cpu = smp_processor_id();
-
cpu_list = __va(tb->cpu_list_pa);
-
- saw_cpu_error = 0;
- retries = 0;
+ usec_wait = cnt * MONDO_USEC_WAIT_MIN;
+ if (usec_wait > MONDO_USEC_WAIT_MAX)
+ usec_wait = MONDO_USEC_WAIT_MAX;
+ retries = tot_retries = 0;
+ tot_cpus = cnt;
prev_sent = 0;
+
do {
- int forward_progress, n_sent;
+ int n_sent, mondo_delivered, target_cpu_busy;
status = sun4v_cpu_mondo_send(cnt,
tb->cpu_list_pa,
@@ -645,94 +671,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
/* HV_EOK means all cpus received the xcall, we're done. */
if (likely(status == HV_EOK))
- break;
+ goto xcall_done;
+
+ /* If not these non-fatal errors, panic */
+ if (unlikely((status != HV_EWOULDBLOCK) &&
+ (status != HV_ECPUERROR) &&
+ (status != HV_ENOCPU)))
+ goto fatal_errors;
/* First, see if we made any forward progress.
*
+ * Go through the cpu_list, count the target cpus that have
+ * received our mondo (n_sent), and those that did not (rem).
+ * Re-pack cpu_list with the cpus remain to be retried in the
+ * front - this simplifies tracking the truly stalled cpus.
+ *
* The hypervisor indicates successful sends by setting
* cpu list entries to the value 0xffff.
+ *
+ * EWOULDBLOCK means some target cpus did not receive the
+ * mondo and retry usually helps.
+ *
+ * ECPUERROR means at least one target cpu is in error state,
+ * it's usually safe to skip the faulty cpu and retry.
+ *
+ * ENOCPU means one of the target cpu doesn't belong to the
+ * domain, perhaps offlined which is unexpected, but not
+ * fatal and it's okay to skip the offlined cpu.
*/
+ rem = 0;
n_sent = 0;
for (i = 0; i < cnt; i++) {
- if (likely(cpu_list[i] == 0xffff))
+ cpu = cpu_list[i];
+ if (likely(cpu == 0xffff)) {
n_sent++;
+ } else if ((status == HV_ECPUERROR) &&
+ (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
+ ecpuerror_id = cpu + 1;
+ } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
+ enocpu_id = cpu + 1;
+ } else {
+ cpu_list[rem++] = cpu;
+ }
}
- forward_progress = 0;
- if (n_sent > prev_sent)
- forward_progress = 1;
+ /* No cpu remained, we're done. */
+ if (rem == 0)
+ break;
- prev_sent = n_sent;
+ /* Otherwise, update the cpu count for retry. */
+ cnt = rem;
- /* If we get a HV_ECPUERROR, then one or more of the cpus
- * in the list are in error state. Use the cpu_state()
- * hypervisor call to find out which cpus are in error state.
+ /* Record the overall number of mondos received by the
+ * first of the remaining cpus.
*/
- if (unlikely(status == HV_ECPUERROR)) {
- for (i = 0; i < cnt; i++) {
- long err;
- u16 cpu;
+ if (first_cpu != cpu_list[0]) {
+ first_cpu = cpu_list[0];
+ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
+ }
- cpu = cpu_list[i];
- if (cpu == 0xffff)
- continue;
+ /* Was any mondo delivered successfully? */
+ mondo_delivered = (n_sent > prev_sent);
+ prev_sent = n_sent;
- err = sun4v_cpu_state(cpu);
- if (err == HV_CPU_STATE_ERROR) {
- saw_cpu_error = (cpu + 1);
- cpu_list[i] = 0xffff;
- }
- }
- } else if (unlikely(status != HV_EWOULDBLOCK))
- goto fatal_mondo_error;
+ /* or, was any target cpu busy processing other mondos? */
+ target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
+ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
- /* Don't bother rewriting the CPU list, just leave the
- * 0xffff and non-0xffff entries in there and the
- * hypervisor will do the right thing.
- *
- * Only advance timeout state if we didn't make any
- * forward progress.
+ /* Retry count is for no progress. If we're making progress,
+ * reset the retry count.
*/
- if (unlikely(!forward_progress)) {
- if (unlikely(++retries > 10000))
- goto fatal_mondo_timeout;
-
- /* Delay a little bit to let other cpus catch up
- * on their cpu mondo queue work.
- */
- udelay(2 * cnt);
+ if (likely(mondo_delivered || target_cpu_busy)) {
+ tot_retries += retries;
+ retries = 0;
+ } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
+ goto fatal_mondo_timeout;
}
- } while (1);
- if (unlikely(saw_cpu_error))
- goto fatal_mondo_cpu_error;
+ /* Delay a little bit to let other cpus catch up on
+ * their cpu mondo queue work.
+ */
+ if (!mondo_delivered)
+ udelay(usec_wait);
- return;
+ retries++;
+ } while (1);
-fatal_mondo_cpu_error:
- printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
- "(including %d) were in error state\n",
- this_cpu, saw_cpu_error - 1);
+xcall_done:
+ if (unlikely(ecpuerror_id > 0)) {
+ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
+ this_cpu, ecpuerror_id - 1);
+ } else if (unlikely(enocpu_id > 0)) {
+ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
+ this_cpu, enocpu_id - 1);
+ }
return;
+fatal_errors:
+ /* fatal errors include bad alignment, etc */
+ pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
+ this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
+ panic("Unexpected SUN4V mondo error %lu\n", status);
+
fatal_mondo_timeout:
- printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
- " progress after %d retries.\n",
- this_cpu, retries);
- goto dump_cpu_list_and_out;
-
-fatal_mondo_error:
- printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
- this_cpu, status);
- printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
- "mondo_block_pa(%lx)\n",
- this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
-
-dump_cpu_list_and_out:
- printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
- for (i = 0; i < cnt; i++)
- printk("%u ", cpu_list[i]);
- printk("]\n");
+ /* some cpus being non-responsive to the cpu mondo */
+ pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
+ this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
+ panic("SUN4V mondo timeout panic\n");
}
static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S
index 559bc5e9c199..34631995859a 100644
--- a/arch/sparc/kernel/sun4v_ivec.S
+++ b/arch/sparc/kernel/sun4v_ivec.S
@@ -26,6 +26,21 @@ sun4v_cpu_mondo:
ldxa [%g0] ASI_SCRATCHPAD, %g4
sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
+ /* Get smp_processor_id() into %g3 */
+ sethi %hi(trap_block), %g5
+ or %g5, %lo(trap_block), %g5
+ sub %g4, %g5, %g3
+ srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
+
+ /* Increment cpu_mondo_counter[smp_processor_id()] */
+ sethi %hi(cpu_mondo_counter), %g5
+ or %g5, %lo(cpu_mondo_counter), %g5
+ sllx %g3, 3, %g3
+ add %g5, %g3, %g5
+ ldx [%g5], %g3
+ add %g3, 1, %g3
+ stx %g3, [%g5]
+
/* Get CPU mondo queue base phys address into %g7. */
ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 196ee5eb4d48..ad31af1dd726 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2733,6 +2733,7 @@ void do_getpsr(struct pt_regs *regs)
}
}
+u64 cpu_mondo_counter[NR_CPUS] = {0};
struct trap_per_cpu trap_block[NR_CPUS];
EXPORT_SYMBOL(trap_block);
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index 07c0df924960..db872dbfafe9 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -360,6 +360,7 @@ tsb_flush:
* %o1: TSB base config pointer
* %o2: TSB huge config pointer, or NULL if none
* %o3: Hypervisor TSB descriptor physical address
+ * %o4: Secondary context to load, if non-zero
*
* We have to run this whole thing with interrupts
* disabled so that the current cpu doesn't change
@@ -372,6 +373,17 @@ __tsb_context_switch:
rdpr %pstate, %g1
wrpr %g1, PSTATE_IE, %pstate
+ brz,pn %o4, 1f
+ mov SECONDARY_CONTEXT, %o5
+
+661: stxa %o4, [%o5] ASI_DMMU
+ .section .sun4v_1insn_patch, "ax"
+ .word 661b
+ stxa %o4, [%o5] ASI_MMU
+ .previous
+ flush %g6
+
+1:
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
index 54f98706b03b..5a8cb37f0a3b 100644
--- a/arch/sparc/lib/U3memcpy.S
+++ b/arch/sparc/lib/U3memcpy.S
@@ -145,13 +145,13 @@ ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
ENTRY(U3_retl_o2_and_7_plus_GS)
and %o2, 7, %o2
retl
- add %o2, GLOBAL_SPARE, %o2
+ add %o2, GLOBAL_SPARE, %o0
ENDPROC(U3_retl_o2_and_7_plus_GS)
ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
add GLOBAL_SPARE, 8, GLOBAL_SPARE
and %o2, 7, %o2
retl
- add %o2, GLOBAL_SPARE, %o2
+ add %o2, GLOBAL_SPARE, %o0
ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
#endif
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
index d6b6c97fe3c7..703127aaf4a5 100644
--- a/arch/sparc/lib/multi3.S
+++ b/arch/sparc/lib/multi3.S
@@ -5,26 +5,26 @@
.align 4
ENTRY(__multi3) /* %o0 = u, %o1 = v */
mov %o1, %g1
- srl %o3, 0, %g4
- mulx %g4, %g1, %o1
+ srl %o3, 0, %o4
+ mulx %o4, %g1, %o1
srlx %g1, 0x20, %g3
- mulx %g3, %g4, %g5
- sllx %g5, 0x20, %o5
- srl %g1, 0, %g4
+ mulx %g3, %o4, %g7
+ sllx %g7, 0x20, %o5
+ srl %g1, 0, %o4
sub %o1, %o5, %o5
srlx %o5, 0x20, %o5
- addcc %g5, %o5, %g5
+ addcc %g7, %o5, %g7
srlx %o3, 0x20, %o5
- mulx %g4, %o5, %g4
+ mulx %o4, %o5, %o4
mulx %g3, %o5, %o5
sethi %hi(0x80000000), %g3
- addcc %g5, %g4, %g5
- srlx %g5, 0x20, %g5
+ addcc %g7, %o4, %g7
+ srlx %g7, 0x20, %g7
add %g3, %g3, %g3
movcc %xcc, %g0, %g3
- addcc %o5, %g5, %o5
- sllx %g4, 0x20, %g4
- add %o1, %g4, %o1
+ addcc %o5, %g7, %o5
+ sllx %o4, 0x20, %o4
+ add %o1, %o4, %o1
add %o5, %g3, %g2
mulx %g1, %o2, %g1
add %g1, %g2, %g1
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 3c40ebd50f92..afa0099f3748 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -325,6 +325,29 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde
}
#ifdef CONFIG_HUGETLB_PAGE
+static void __init add_huge_page_size(unsigned long size)
+{
+ unsigned int order;
+
+ if (size_to_hstate(size))
+ return;
+
+ order = ilog2(size) - PAGE_SHIFT;
+ hugetlb_add_hstate(order);
+}
+
+static int __init hugetlbpage_init(void)
+{
+ add_huge_page_size(1UL << HPAGE_64K_SHIFT);
+ add_huge_page_size(1UL << HPAGE_SHIFT);
+ add_huge_page_size(1UL << HPAGE_256MB_SHIFT);
+ add_huge_page_size(1UL << HPAGE_2GB_SHIFT);
+
+ return 0;
+}
+
+arch_initcall(hugetlbpage_init);
+
static int __init setup_hugepagesz(char *string)
{
unsigned long long hugepage_size;
@@ -364,7 +387,7 @@ static int __init setup_hugepagesz(char *string)
goto out;
}
- hugetlb_add_hstate(hugepage_shift - PAGE_SHIFT);
+ add_huge_page_size(hugepage_size);
rc = 1;
out:
@@ -1921,12 +1944,22 @@ static void __init setup_page_offset(void)
break;
case SUN4V_CHIP_SPARC_M7:
case SUN4V_CHIP_SPARC_SN:
- default:
/* M7 and later support 52-bit virtual addresses. */
sparc64_va_hole_top = 0xfff8000000000000UL;
sparc64_va_hole_bottom = 0x0008000000000000UL;
max_phys_bits = 49;
break;
+ case SUN4V_CHIP_SPARC_M8:
+ default:
+ /* M8 and later support 54-bit virtual addresses.
+ * However, restricting M8 and above VA bits to 53
+ * as 4-level page table cannot support more than
+ * 53 VA bits.
+ */
+ sparc64_va_hole_top = 0xfff0000000000000UL;
+ sparc64_va_hole_bottom = 0x0010000000000000UL;
+ max_phys_bits = 51;
+ break;
}
}
@@ -2138,6 +2171,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
*/
switch (sun4v_chip_type) {
case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC_M8:
case SUN4V_CHIP_SPARC_SN:
pagecv_flag = 0x00;
break;
@@ -2290,6 +2324,7 @@ void __init paging_init(void)
*/
switch (sun4v_chip_type) {
case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC_M8:
case SUN4V_CHIP_SPARC_SN:
page_cache4v_flag = _PAGE_CP_4V;
break;
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index 8799ae9a8788..c340af7b1371 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -128,6 +128,8 @@ static u32 WDISP10(u32 off)
#define BA (BRANCH | CONDA)
#define BG (BRANCH | CONDG)
+#define BL (BRANCH | CONDL)
+#define BLE (BRANCH | CONDLE)
#define BGU (BRANCH | CONDGU)
#define BLEU (BRANCH | CONDLEU)
#define BGE (BRANCH | CONDGE)
@@ -715,9 +717,15 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
case BPF_JGT:
br_opcode = BGU;
break;
+ case BPF_JLT:
+ br_opcode = BLU;
+ break;
case BPF_JGE:
br_opcode = BGEU;
break;
+ case BPF_JLE:
+ br_opcode = BLEU;
+ break;
case BPF_JSET:
case BPF_JNE:
br_opcode = BNE;
@@ -725,9 +733,15 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
case BPF_JSGT:
br_opcode = BG;
break;
+ case BPF_JSLT:
+ br_opcode = BL;
+ break;
case BPF_JSGE:
br_opcode = BGE;
break;
+ case BPF_JSLE:
+ br_opcode = BLE;
+ break;
default:
/* Make sure we dont leak kernel information to the
* user.
@@ -746,18 +760,30 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
case BPF_JGT:
cbcond_opcode = CBCONDGU;
break;
+ case BPF_JLT:
+ cbcond_opcode = CBCONDLU;
+ break;
case BPF_JGE:
cbcond_opcode = CBCONDGEU;
break;
+ case BPF_JLE:
+ cbcond_opcode = CBCONDLEU;
+ break;
case BPF_JNE:
cbcond_opcode = CBCONDNE;
break;
case BPF_JSGT:
cbcond_opcode = CBCONDG;
break;
+ case BPF_JSLT:
+ cbcond_opcode = CBCONDL;
+ break;
case BPF_JSGE:
cbcond_opcode = CBCONDGE;
break;
+ case BPF_JSLE:
+ cbcond_opcode = CBCONDLE;
+ break;
default:
/* Make sure we dont leak kernel information to the
* user.
@@ -1176,10 +1202,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
/* IF (dst COND src) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP | BPF_JSET | BPF_X: {
int err;
@@ -1191,10 +1221,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
/* IF (dst COND imm) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSET | BPF_K: {
int err;
diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c
index 17bd2e167e07..df707a8ad311 100644
--- a/arch/sparc/power/hibernate.c
+++ b/arch/sparc/power/hibernate.c
@@ -35,6 +35,5 @@ void restore_processor_state(void)
{
struct mm_struct *mm = current->active_mm;
- load_secondary_context(mm);
- tsb_context_switch(mm);
+ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
}
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index a93774255136..53a423e7cb92 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -101,6 +101,8 @@ static inline void atomic_set(atomic_t *v, int n)
_atomic_xchg(&v->counter, n);
}
+#define atomic_set_release(v, i) atomic_set((v), (i))
+
/* A 64bit atomic type */
typedef struct {
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index e64a1b75fc38..83c1e639b411 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -106,12 +106,9 @@
lock = __atomic_hashed_lock((int __force *)uaddr)
#endif
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int uninitialized_var(val), ret;
__futex_prolog();
@@ -119,12 +116,6 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
/* The 32-bit futex code makes this assumption, so validate it here. */
BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -148,30 +139,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
}
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (val == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (val != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (val < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (val >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (val <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (val > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = val;
+
return ret;
}
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h
index b14b1ba5bf9c..cba8ba9b8da6 100644
--- a/arch/tile/include/asm/spinlock_32.h
+++ b/arch/tile/include/asm/spinlock_32.h
@@ -64,8 +64,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
lock->current_ticket = old_ticket + TICKET_QUANTUM;
}
-void arch_spin_unlock_wait(arch_spinlock_t *lock);
-
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
index b9718fb4e74a..9a2c2d605752 100644
--- a/arch/tile/include/asm/spinlock_64.h
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -58,8 +58,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
__insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT);
}
-void arch_spin_unlock_wait(arch_spinlock_t *lock);
-
void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val);
/* Grab the "next" ticket number and bump it atomically.
diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c
index 076c6cc43113..db9333f2447c 100644
--- a/arch/tile/lib/spinlock_32.c
+++ b/arch/tile/lib/spinlock_32.c
@@ -62,29 +62,6 @@ int arch_spin_trylock(arch_spinlock_t *lock)
}
EXPORT_SYMBOL(arch_spin_trylock);
-void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- u32 iterations = 0;
- int curr = READ_ONCE(lock->current_ticket);
- int next = READ_ONCE(lock->next_ticket);
-
- /* Return immediately if unlocked. */
- if (next == curr)
- return;
-
- /* Wait until the current locker has released the lock. */
- do {
- delay_backoff(iterations++);
- } while (READ_ONCE(lock->current_ticket) == curr);
-
- /*
- * The TILE architecture doesn't do read speculation; therefore
- * a control dependency guarantees a LOAD->{LOAD,STORE} order.
- */
- barrier();
-}
-EXPORT_SYMBOL(arch_spin_unlock_wait);
-
/*
* The low byte is always reserved to be the marker for a "tns" operation
* since the low bit is set to "1" by a tns. The next seven bits are
diff --git a/arch/tile/lib/spinlock_64.c b/arch/tile/lib/spinlock_64.c
index a4b5b2cbce93..de414c22892f 100644
--- a/arch/tile/lib/spinlock_64.c
+++ b/arch/tile/lib/spinlock_64.c
@@ -62,28 +62,6 @@ int arch_spin_trylock(arch_spinlock_t *lock)
}
EXPORT_SYMBOL(arch_spin_trylock);
-void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- u32 iterations = 0;
- u32 val = READ_ONCE(lock->lock);
- u32 curr = arch_spin_current(val);
-
- /* Return immediately if unlocked. */
- if (arch_spin_next(val) == curr)
- return;
-
- /* Wait until the current locker has released the lock. */
- do {
- delay_backoff(iterations++);
- } while (arch_spin_current(READ_ONCE(lock->lock)) == curr);
-
- /*
- * The TILE architecture doesn't do read speculation; therefore
- * a control dependency guarantees a LOAD->{LOAD,STORE} order.
- */
- barrier();
-}
-EXPORT_SYMBOL(arch_spin_unlock_wait);
/*
* If the read lock fails due to a writer, we retry periodically
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 600a2e9bfee2..344d95619d03 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -45,7 +45,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
+arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->start = start;
@@ -80,13 +81,19 @@ tlb_flush_mmu(struct mmu_gather *tlb)
tlb_flush_mmu_free(tlb);
}
-/* tlb_finish_mmu
+/* arch_tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required.
*/
static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force)
{
+ if (force) {
+ tlb->start = start;
+ tlb->end = end;
+ tlb->need_flush = 1;
+ }
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
diff --git a/arch/um/include/asm/unwind.h b/arch/um/include/asm/unwind.h
new file mode 100644
index 000000000000..7ffa5437b761
--- /dev/null
+++ b/arch/um/include/asm/unwind.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_UML_UNWIND_H
+#define _ASM_UML_UNWIND_H
+
+static inline void
+unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size,
+ void *orc, size_t orc_size) {}
+
+#endif /* _ASM_UML_UNWIND_H */
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index 586b786b3edf..0038a2d10a7a 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -8,10 +8,7 @@ obj-$(CONFIG_KVM) += kvm/
obj-$(CONFIG_XEN) += xen/
# Hyper-V paravirtualization support
-obj-$(CONFIG_HYPERVISOR_GUEST) += hyperv/
-
-# lguest paravirtualization support
-obj-$(CONFIG_LGUEST_GUEST) += lguest/
+obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/
obj-y += realmode/
obj-y += kernel/
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 781521b7cf9e..4b278a33ccbb 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -55,6 +55,8 @@ config X86
select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_MMIO_FLUSH
select ARCH_HAS_PMEM_API if X86_64
+ # Causing hangs/crashes, see the commit that added this change for details.
+ select ARCH_HAS_REFCOUNT if BROKEN
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
@@ -73,7 +75,6 @@ config X86
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
- select ARCH_WANT_FRAME_POINTERS
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANTS_THP_SWAP if X86_64
select BUILDTIME_EXTABLE_SORT
@@ -100,6 +101,7 @@ config X86
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
+ select HARDLOCKUP_CHECK_TIMESTAMP if X86_64
select HAVE_ACPI_APEI if ACPI
select HAVE_ACPI_APEI_NMI if ACPI
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
@@ -157,17 +159,19 @@ config X86
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MIXED_BREAKPOINTS_REGS
+ select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OPROFILE
select HAVE_OPTPROBES
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI
- select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI
+ select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RELIABLE_STACKTRACE if X86_64 && FRAME_POINTER && STACK_VALIDATION
+ select HAVE_RELIABLE_STACKTRACE if X86_64 && FRAME_POINTER_UNWINDER && STACK_VALIDATION
select HAVE_STACK_VALIDATION if X86_64
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNSTABLE_SCHED_CLOCK
@@ -326,6 +330,7 @@ config FIX_EARLYCON_MEM
config PGTABLE_LEVELS
int
+ default 5 if X86_5LEVEL
default 4 if X86_64
default 3 if X86_PAE
default 2
@@ -424,16 +429,16 @@ config GOLDFISH
def_bool y
depends on X86_GOLDFISH
-config INTEL_RDT_A
- bool "Intel Resource Director Technology Allocation support"
+config INTEL_RDT
+ bool "Intel Resource Director Technology support"
default n
depends on X86 && CPU_SUP_INTEL
select KERNFS
help
- Select to enable resource allocation which is a sub-feature of
- Intel Resource Director Technology(RDT). More information about
- RDT can be found in the Intel x86 Architecture Software
- Developer Manual.
+ Select to enable resource allocation and monitoring which are
+ sub-features of Intel Resource Director Technology(RDT). More
+ information about RDT can be found in the Intel x86
+ Architecture Software Developer Manual.
Say N if unsure.
@@ -777,8 +782,6 @@ config KVM_DEBUG_FS
Statistics are displayed in debugfs filesystem. Enabling this option
may incur significant overhead.
-source "arch/x86/lguest/Kconfig"
-
config PARAVIRT_TIME_ACCOUNTING
bool "Paravirtual steal time accounting"
depends on PARAVIRT
@@ -1398,6 +1401,24 @@ config X86_PAE
has the cost of more pagetable lookup overhead, and also
consumes more pagetable space per process.
+config X86_5LEVEL
+ bool "Enable 5-level page tables support"
+ depends on X86_64
+ ---help---
+ 5-level paging enables access to larger address space:
+ upto 128 PiB of virtual address space and 4 PiB of
+ physical address space.
+
+ It will be supported by future Intel CPUs.
+
+ Note: a kernel with this option enabled can only be booted
+ on machines that support the feature.
+
+ See Documentation/x86/x86_64/5level-paging.txt for more
+ information.
+
+ Say N if unsure.
+
config ARCH_PHYS_ADDR_T_64BIT
def_bool y
depends on X86_64 || X86_PAE
@@ -1415,6 +1436,35 @@ config X86_DIRECT_GBPAGES
supports them), so don't confuse the user by printing
that we have them enabled.
+config ARCH_HAS_MEM_ENCRYPT
+ def_bool y
+
+config AMD_MEM_ENCRYPT
+ bool "AMD Secure Memory Encryption (SME) support"
+ depends on X86_64 && CPU_SUP_AMD
+ ---help---
+ Say yes to enable support for the encryption of system memory.
+ This requires an AMD processor that supports Secure Memory
+ Encryption (SME).
+
+config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
+ bool "Activate AMD Secure Memory Encryption (SME) by default"
+ default y
+ depends on AMD_MEM_ENCRYPT
+ ---help---
+ Say yes to have system memory encrypted by default if running on
+ an AMD processor that supports Secure Memory Encryption (SME).
+
+ If set to Y, then the encryption of system memory can be
+ deactivated with the mem_encrypt=off command line option.
+
+ If set to N, then the encryption of system memory can be
+ activated with the mem_encrypt=on command line option.
+
+config ARCH_USE_MEMREMAP_PROT
+ def_bool y
+ depends on AMD_MEM_ENCRYPT
+
# Common NUMA Features
config NUMA
bool "Numa Memory Allocation and Scheduler Support"
@@ -1756,7 +1806,9 @@ config X86_SMAP
config X86_INTEL_MPX
prompt "Intel MPX (Memory Protection Extensions)"
def_bool n
- depends on CPU_SUP_INTEL
+ # Note: only available in 64-bit mode due to VMA flags shortage
+ depends on CPU_SUP_INTEL && X86_64
+ select ARCH_USES_HIGH_VMA_FLAGS
---help---
MPX provides hardware features that can be used in
conjunction with compiler-instrumented code to check
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index fcb7604172ce..71a48a30fc84 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -305,8 +305,6 @@ config DEBUG_ENTRY
Some of these sanity checks may slow down kernel entries and
exits or otherwise impact performance.
- This is currently used to help test NMI code.
-
If unsure, say N.
config DEBUG_NMI_SELFTEST
@@ -348,6 +346,7 @@ config X86_DEBUG_FPU
config PUNIT_ATOM_DEBUG
tristate "ATOM Punit debug driver"
+ depends on PCI
select DEBUG_FS
select IOSF_MBI
---help---
@@ -357,4 +356,61 @@ config PUNIT_ATOM_DEBUG
The current power state can be read from
/sys/kernel/debug/punit_atom/dev_power_state
+choice
+ prompt "Choose kernel unwinder"
+ default FRAME_POINTER_UNWINDER
+ ---help---
+ This determines which method will be used for unwinding kernel stack
+ traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
+ livepatch, lockdep, and more.
+
+config FRAME_POINTER_UNWINDER
+ bool "Frame pointer unwinder"
+ select FRAME_POINTER
+ ---help---
+ This option enables the frame pointer unwinder for unwinding kernel
+ stack traces.
+
+ The unwinder itself is fast and it uses less RAM than the ORC
+ unwinder, but the kernel text size will grow by ~3% and the kernel's
+ overall performance will degrade by roughly 5-10%.
+
+ This option is recommended if you want to use the livepatch
+ consistency model, as this is currently the only way to get a
+ reliable stack trace (CONFIG_HAVE_RELIABLE_STACKTRACE).
+
+config ORC_UNWINDER
+ bool "ORC unwinder"
+ depends on X86_64
+ select STACK_VALIDATION
+ ---help---
+ This option enables the ORC (Oops Rewind Capability) unwinder for
+ unwinding kernel stack traces. It uses a custom data format which is
+ a simplified version of the DWARF Call Frame Information standard.
+
+ This unwinder is more accurate across interrupt entry frames than the
+ frame pointer unwinder. It also enables a 5-10% performance
+ improvement across the entire kernel compared to frame pointers.
+
+ Enabling this option will increase the kernel's runtime memory usage
+ by roughly 2-4MB, depending on your kernel config.
+
+config GUESS_UNWINDER
+ bool "Guess unwinder"
+ depends on EXPERT
+ ---help---
+ This option enables the "guess" unwinder for unwinding kernel stack
+ traces. It scans the stack and reports every kernel text address it
+ finds. Some of the addresses it reports may be incorrect.
+
+ While this option often produces false positives, it can still be
+ useful in many cases. Unlike the other unwinders, it has no runtime
+ overhead.
+
+endchoice
+
+config FRAME_POINTER
+ depends on !ORC_UNWINDER && !GUESS_UNWINDER
+ bool
+
endmenu
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 1e902f926be3..6276572259c8 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -14,9 +14,11 @@ endif
# For gcc stack alignment is specified with -mpreferred-stack-boundary,
# clang has the option -mstack-alignment for that purpose.
ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
- cc_stack_align_opt := -mpreferred-stack-boundary
-else ifneq ($(call cc-option, -mstack-alignment=4),)
- cc_stack_align_opt := -mstack-alignment
+ cc_stack_align4 := -mpreferred-stack-boundary=2
+ cc_stack_align8 := -mpreferred-stack-boundary=3
+else ifneq ($(call cc-option, -mstack-alignment=16),)
+ cc_stack_align4 := -mstack-alignment=4
+ cc_stack_align8 := -mstack-alignment=8
endif
# How to compile the 16-bit code. Note we always compile for -march=i386;
@@ -36,7 +38,7 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
-REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align_opt)=2)
+REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
export REALMODE_CFLAGS
# BITS is used as extension for files which are available in a 32 bit
@@ -76,7 +78,7 @@ ifeq ($(CONFIG_X86_32),y)
# Align the stack to the register width instead of using the default
# alignment of 16 bytes. This reduces stack usage and the number of
# alignment instructions.
- KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align_opt)=2)
+ KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align4))
# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
# a lot more stack due to the lack of sharing of stacklots:
@@ -115,7 +117,7 @@ else
# default alignment which keep the stack *mis*aligned.
# Furthermore an alignment to the register width reduces stack usage
# and the number of alignment instructions.
- KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align_opt)=3)
+ KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align8))
# Use -mskip-rax-setup if supported.
KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
@@ -232,9 +234,6 @@ KBUILD_CFLAGS += -Wno-sign-compare
#
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
-KBUILD_CFLAGS += $(mflags-y)
-KBUILD_AFLAGS += $(mflags-y)
-
archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/x86/tools relocs
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 0d810fb15eac..d88a2fddba8c 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -73,12 +73,13 @@ UBSAN_SANITIZE := n
$(obj)/bzImage: asflags-y := $(SVGA_MODE)
quiet_cmd_image = BUILD $@
+silent_redirect_image = >/dev/null
cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \
- $(obj)/zoffset.h $@
+ $(obj)/zoffset.h $@ $($(quiet)redirect_image)
$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
$(call if_changed,image)
- @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
+ @$(kecho) 'Kernel: $@ is ready' ' (#'`cat .version`')'
OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S
$(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 2c860ad4fe06..8a958274b54c 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -34,6 +34,7 @@ KBUILD_CFLAGS += $(cflags-y)
KBUILD_CFLAGS += -mno-mmx -mno-sse
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index c3e869eaef0c..e56dbc67e837 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -767,7 +767,7 @@ static efi_status_t setup_e820(struct boot_params *params,
m |= (u64)efi->efi_memmap_hi << 32;
#endif
- d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
+ d = efi_early_memdesc_ptr(m, efi->efi_memdesc_size, i);
switch (d->type) {
case EFI_RESERVED_TYPE:
case EFI_RUNTIME_SERVICES_CODE:
@@ -997,6 +997,9 @@ struct boot_params *efi_main(struct efi_config *c,
if (boot_params->secure_boot == efi_secureboot_mode_unset)
boot_params->secure_boot = efi_get_secureboot(sys_table);
+ /* Ask the firmware to clear memory on unclean shutdown */
+ efi_enable_reset_attack_mitigation(sys_table);
+
setup_graphics(boot_params);
setup_efi_pci(boot_params);
@@ -1058,7 +1061,7 @@ struct boot_params *efi_main(struct efi_config *c,
desc->s = DESC_TYPE_CODE_DATA;
desc->dpl = 0;
desc->p = 1;
- desc->limit = 0xf;
+ desc->limit1 = 0xf;
desc->avl = 0;
desc->l = 0;
desc->d = SEG_OP_SIZE_32BIT;
@@ -1078,7 +1081,7 @@ struct boot_params *efi_main(struct efi_config *c,
desc->s = DESC_TYPE_CODE_DATA;
desc->dpl = 0;
desc->p = 1;
- desc->limit = 0xf;
+ desc->limit1 = 0xf;
desc->avl = 0;
if (IS_ENABLED(CONFIG_X86_64)) {
desc->l = 1;
@@ -1099,7 +1102,7 @@ struct boot_params *efi_main(struct efi_config *c,
desc->s = DESC_TYPE_CODE_DATA;
desc->dpl = 0;
desc->p = 1;
- desc->limit = 0xf;
+ desc->limit1 = 0xf;
desc->avl = 0;
desc->l = 0;
desc->d = SEG_OP_SIZE_32BIT;
@@ -1116,7 +1119,7 @@ struct boot_params *efi_main(struct efi_config *c,
desc->s = 0;
desc->dpl = 0;
desc->p = 1;
- desc->limit = 0x0;
+ desc->limit1 = 0x0;
desc->avl = 0;
desc->l = 0;
desc->d = 0;
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index d85b9625e836..11c68cf53d4e 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -61,71 +61,6 @@
__HEAD
ENTRY(startup_32)
-#ifdef CONFIG_EFI_STUB
- jmp preferred_addr
-
- /*
- * We don't need the return address, so set up the stack so
- * efi_main() can find its arguments.
- */
-ENTRY(efi_pe_entry)
- add $0x4, %esp
-
- call 1f
-1: popl %esi
- subl $1b, %esi
-
- popl %ecx
- movl %ecx, efi32_config(%esi) /* Handle */
- popl %ecx
- movl %ecx, efi32_config+8(%esi) /* EFI System table pointer */
-
- /* Relocate efi_config->call() */
- leal efi32_config(%esi), %eax
- add %esi, 40(%eax)
- pushl %eax
-
- call make_boot_params
- cmpl $0, %eax
- je fail
- movl %esi, BP_code32_start(%eax)
- popl %ecx
- pushl %eax
- pushl %ecx
- jmp 2f /* Skip efi_config initialization */
-
-ENTRY(efi32_stub_entry)
- add $0x4, %esp
- popl %ecx
- popl %edx
-
- call 1f
-1: popl %esi
- subl $1b, %esi
-
- movl %ecx, efi32_config(%esi) /* Handle */
- movl %edx, efi32_config+8(%esi) /* EFI System table pointer */
-
- /* Relocate efi_config->call() */
- leal efi32_config(%esi), %eax
- add %esi, 40(%eax)
- pushl %eax
-2:
- call efi_main
- cmpl $0, %eax
- movl %eax, %esi
- jne 2f
-fail:
- /* EFI init failed, so hang. */
- hlt
- jmp fail
-2:
- movl BP_code32_start(%esi), %eax
- leal preferred_addr(%eax), %eax
- jmp *%eax
-
-preferred_addr:
-#endif
cld
/*
* Test KEEP_SEGMENTS flag to see if the bootloader is asking
@@ -208,6 +143,70 @@ preferred_addr:
jmp *%eax
ENDPROC(startup_32)
+#ifdef CONFIG_EFI_STUB
+/*
+ * We don't need the return address, so set up the stack so efi_main() can find
+ * its arguments.
+ */
+ENTRY(efi_pe_entry)
+ add $0x4, %esp
+
+ call 1f
+1: popl %esi
+ subl $1b, %esi
+
+ popl %ecx
+ movl %ecx, efi32_config(%esi) /* Handle */
+ popl %ecx
+ movl %ecx, efi32_config+8(%esi) /* EFI System table pointer */
+
+ /* Relocate efi_config->call() */
+ leal efi32_config(%esi), %eax
+ add %esi, 40(%eax)
+ pushl %eax
+
+ call make_boot_params
+ cmpl $0, %eax
+ je fail
+ movl %esi, BP_code32_start(%eax)
+ popl %ecx
+ pushl %eax
+ pushl %ecx
+ jmp 2f /* Skip efi_config initialization */
+ENDPROC(efi_pe_entry)
+
+ENTRY(efi32_stub_entry)
+ add $0x4, %esp
+ popl %ecx
+ popl %edx
+
+ call 1f
+1: popl %esi
+ subl $1b, %esi
+
+ movl %ecx, efi32_config(%esi) /* Handle */
+ movl %edx, efi32_config+8(%esi) /* EFI System table pointer */
+
+ /* Relocate efi_config->call() */
+ leal efi32_config(%esi), %eax
+ add %esi, 40(%eax)
+ pushl %eax
+2:
+ call efi_main
+ cmpl $0, %eax
+ movl %eax, %esi
+ jne 2f
+fail:
+ /* EFI init failed, so hang. */
+ hlt
+ jmp fail
+2:
+ movl BP_code32_start(%esi), %eax
+ leal startup_32(%eax), %eax
+ jmp *%eax
+ENDPROC(efi32_stub_entry)
+#endif
+
.text
relocated:
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index fbf4c32d0b62..b4a5d284391c 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -243,65 +243,6 @@ ENTRY(startup_64)
* that maps our entire kernel(text+data+bss+brk), zero page
* and command line.
*/
-#ifdef CONFIG_EFI_STUB
- /*
- * The entry point for the PE/COFF executable is efi_pe_entry, so
- * only legacy boot loaders will execute this jmp.
- */
- jmp preferred_addr
-
-ENTRY(efi_pe_entry)
- movq %rcx, efi64_config(%rip) /* Handle */
- movq %rdx, efi64_config+8(%rip) /* EFI System table pointer */
-
- leaq efi64_config(%rip), %rax
- movq %rax, efi_config(%rip)
-
- call 1f
-1: popq %rbp
- subq $1b, %rbp
-
- /*
- * Relocate efi_config->call().
- */
- addq %rbp, efi64_config+40(%rip)
-
- movq %rax, %rdi
- call make_boot_params
- cmpq $0,%rax
- je fail
- mov %rax, %rsi
- leaq startup_32(%rip), %rax
- movl %eax, BP_code32_start(%rsi)
- jmp 2f /* Skip the relocation */
-
-handover_entry:
- call 1f
-1: popq %rbp
- subq $1b, %rbp
-
- /*
- * Relocate efi_config->call().
- */
- movq efi_config(%rip), %rax
- addq %rbp, 40(%rax)
-2:
- movq efi_config(%rip), %rdi
- call efi_main
- movq %rax,%rsi
- cmpq $0,%rax
- jne 2f
-fail:
- /* EFI init failed, so hang. */
- hlt
- jmp fail
-2:
- movl BP_code32_start(%esi), %eax
- leaq preferred_addr(%rax), %rax
- jmp *%rax
-
-preferred_addr:
-#endif
/* Setup data segments. */
xorl %eax, %eax
@@ -413,6 +354,59 @@ lvl5:
jmp *%rax
#ifdef CONFIG_EFI_STUB
+
+/* The entry point for the PE/COFF executable is efi_pe_entry. */
+ENTRY(efi_pe_entry)
+ movq %rcx, efi64_config(%rip) /* Handle */
+ movq %rdx, efi64_config+8(%rip) /* EFI System table pointer */
+
+ leaq efi64_config(%rip), %rax
+ movq %rax, efi_config(%rip)
+
+ call 1f
+1: popq %rbp
+ subq $1b, %rbp
+
+ /*
+ * Relocate efi_config->call().
+ */
+ addq %rbp, efi64_config+40(%rip)
+
+ movq %rax, %rdi
+ call make_boot_params
+ cmpq $0,%rax
+ je fail
+ mov %rax, %rsi
+ leaq startup_32(%rip), %rax
+ movl %eax, BP_code32_start(%rsi)
+ jmp 2f /* Skip the relocation */
+
+handover_entry:
+ call 1f
+1: popq %rbp
+ subq $1b, %rbp
+
+ /*
+ * Relocate efi_config->call().
+ */
+ movq efi_config(%rip), %rax
+ addq %rbp, 40(%rax)
+2:
+ movq efi_config(%rip), %rdi
+ call efi_main
+ movq %rax,%rsi
+ cmpq $0,%rax
+ jne 2f
+fail:
+ /* EFI init failed, so hang. */
+ hlt
+ jmp fail
+2:
+ movl BP_code32_start(%esi), %eax
+ leaq startup_64(%rax), %rax
+ jmp *%rax
+ENDPROC(efi_pe_entry)
+
.org 0x390
ENTRY(efi64_stub_entry)
movq %rdi, efi64_config(%rip) /* Handle */
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 91f27ab970ef..17818ba6906f 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -37,7 +37,9 @@
#include <linux/uts.h>
#include <linux/utsname.h>
#include <linux/ctype.h>
+#include <linux/efi.h>
#include <generated/utsrelease.h>
+#include <asm/efi.h>
/* Macros used by the included decompressor code below. */
#define STATIC
@@ -479,35 +481,31 @@ static unsigned long slots_fetch_random(void)
return 0;
}
-static void process_e820_entry(struct boot_e820_entry *entry,
+static void process_mem_region(struct mem_vector *entry,
unsigned long minimum,
unsigned long image_size)
{
struct mem_vector region, overlap;
struct slot_area slot_area;
unsigned long start_orig, end;
- struct boot_e820_entry cur_entry;
-
- /* Skip non-RAM entries. */
- if (entry->type != E820_TYPE_RAM)
- return;
+ struct mem_vector cur_entry;
/* On 32-bit, ignore entries entirely above our maximum. */
- if (IS_ENABLED(CONFIG_X86_32) && entry->addr >= KERNEL_IMAGE_SIZE)
+ if (IS_ENABLED(CONFIG_X86_32) && entry->start >= KERNEL_IMAGE_SIZE)
return;
/* Ignore entries entirely below our minimum. */
- if (entry->addr + entry->size < minimum)
+ if (entry->start + entry->size < minimum)
return;
/* Ignore entries above memory limit */
- end = min(entry->size + entry->addr, mem_limit);
- if (entry->addr >= end)
+ end = min(entry->size + entry->start, mem_limit);
+ if (entry->start >= end)
return;
- cur_entry.addr = entry->addr;
- cur_entry.size = end - entry->addr;
+ cur_entry.start = entry->start;
+ cur_entry.size = end - entry->start;
- region.start = cur_entry.addr;
+ region.start = cur_entry.start;
region.size = cur_entry.size;
/* Give up if slot area array is full. */
@@ -521,8 +519,8 @@ static void process_e820_entry(struct boot_e820_entry *entry,
/* Potentially raise address to meet alignment needs. */
region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
- /* Did we raise the address above this e820 region? */
- if (region.start > cur_entry.addr + cur_entry.size)
+ /* Did we raise the address above the passed in memory entry? */
+ if (region.start > cur_entry.start + cur_entry.size)
return;
/* Reduce size by any delta from the original address. */
@@ -562,31 +560,126 @@ static void process_e820_entry(struct boot_e820_entry *entry,
}
}
-static unsigned long find_random_phys_addr(unsigned long minimum,
- unsigned long image_size)
+#ifdef CONFIG_EFI
+/*
+ * Returns true if mirror region found (and must have been processed
+ * for slots adding)
+ */
+static bool
+process_efi_entries(unsigned long minimum, unsigned long image_size)
{
+ struct efi_info *e = &boot_params->efi_info;
+ bool efi_mirror_found = false;
+ struct mem_vector region;
+ efi_memory_desc_t *md;
+ unsigned long pmap;
+ char *signature;
+ u32 nr_desc;
int i;
- unsigned long addr;
- /* Check if we had too many memmaps. */
- if (memmap_too_large) {
- debug_putstr("Aborted e820 scan (more than 4 memmap= args)!\n");
- return 0;
+ signature = (char *)&e->efi_loader_signature;
+ if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
+ strncmp(signature, EFI64_LOADER_SIGNATURE, 4))
+ return false;
+
+#ifdef CONFIG_X86_32
+ /* Can't handle data above 4GB at this time */
+ if (e->efi_memmap_hi) {
+ warn("EFI memmap is above 4GB, can't be handled now on x86_32. EFI should be disabled.\n");
+ return false;
}
+ pmap = e->efi_memmap;
+#else
+ pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
+#endif
- /* Make sure minimum is aligned. */
- minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
+ nr_desc = e->efi_memmap_size / e->efi_memdesc_size;
+ for (i = 0; i < nr_desc; i++) {
+ md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
+ if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
+ efi_mirror_found = true;
+ break;
+ }
+ }
+
+ for (i = 0; i < nr_desc; i++) {
+ md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
+
+ /*
+ * Here we are more conservative in picking free memory than
+ * the EFI spec allows:
+ *
+ * According to the spec, EFI_BOOT_SERVICES_{CODE|DATA} are also
+ * free memory and thus available to place the kernel image into,
+ * but in practice there's firmware where using that memory leads
+ * to crashes.
+ *
+ * Only EFI_CONVENTIONAL_MEMORY is guaranteed to be free.
+ */
+ if (md->type != EFI_CONVENTIONAL_MEMORY)
+ continue;
+
+ if (efi_mirror_found &&
+ !(md->attribute & EFI_MEMORY_MORE_RELIABLE))
+ continue;
+
+ region.start = md->phys_addr;
+ region.size = md->num_pages << EFI_PAGE_SHIFT;
+ process_mem_region(&region, minimum, image_size);
+ if (slot_area_index == MAX_SLOT_AREA) {
+ debug_putstr("Aborted EFI scan (slot_areas full)!\n");
+ break;
+ }
+ }
+ return true;
+}
+#else
+static inline bool
+process_efi_entries(unsigned long minimum, unsigned long image_size)
+{
+ return false;
+}
+#endif
+
+static void process_e820_entries(unsigned long minimum,
+ unsigned long image_size)
+{
+ int i;
+ struct mem_vector region;
+ struct boot_e820_entry *entry;
/* Verify potential e820 positions, appending to slots list. */
for (i = 0; i < boot_params->e820_entries; i++) {
- process_e820_entry(&boot_params->e820_table[i], minimum,
- image_size);
+ entry = &boot_params->e820_table[i];
+ /* Skip non-RAM entries. */
+ if (entry->type != E820_TYPE_RAM)
+ continue;
+ region.start = entry->addr;
+ region.size = entry->size;
+ process_mem_region(&region, minimum, image_size);
if (slot_area_index == MAX_SLOT_AREA) {
debug_putstr("Aborted e820 scan (slot_areas full)!\n");
break;
}
}
+}
+
+static unsigned long find_random_phys_addr(unsigned long minimum,
+ unsigned long image_size)
+{
+ /* Check if we had too many memmaps. */
+ if (memmap_too_large) {
+ debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n");
+ return 0;
+ }
+
+ /* Make sure minimum is aligned. */
+ minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
+
+ if (process_efi_entries(minimum, image_size))
+ return slots_fetch_random();
+ process_e820_entries(minimum, image_size);
return slots_fetch_random();
}
@@ -645,7 +738,7 @@ void choose_random_location(unsigned long input,
*/
min_addr = min(*output, 512UL << 20);
- /* Walk e820 and find a random address. */
+ /* Walk available memory entries to find a random address. */
random_addr = find_random_phys_addr(min_addr, output_size);
if (!random_addr) {
warn("Physical KASLR disabled: no suitable memory region!");
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index a0838ab929f2..c14217cd0155 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -116,8 +116,7 @@ void __putstr(const char *s)
}
}
- if (boot_params->screen_info.orig_video_mode == 0 &&
- lines == 0 && cols == 0)
+ if (lines == 0 || cols == 0)
return;
x = boot_params->screen_info.orig_x;
diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c
index 28029be47fbb..f1aa43854bed 100644
--- a/arch/x86/boot/compressed/pagetable.c
+++ b/arch/x86/boot/compressed/pagetable.c
@@ -15,6 +15,13 @@
#define __pa(x) ((unsigned long)(x))
#define __va(x) ((void *)((unsigned long)(x)))
+/*
+ * The pgtable.h and mm/ident_map.c includes make use of the SME related
+ * information which is not used in the compressed image support. Un-define
+ * the SME support to avoid any compile and link errors.
+ */
+#undef CONFIG_AMD_MEM_ENCRYPT
+
#include "misc.h"
/* These actually do the work of building the kernel identity maps. */
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 2ed8f0c25def..1bb08ecffd24 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -520,8 +520,14 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
# the description in lib/decompressor_xxx.c for specific information.
#
# extra_bytes = (uncompressed_size >> 12) + 65536 + 128
+#
+# LZ4 is even worse: data that cannot be further compressed grows by 0.4%,
+# or one byte per 256 bytes. OTOH, we can safely get rid of the +128 as
+# the size-dependent part now grows so fast.
+#
+# extra_bytes = (uncompressed_size >> 8) + 65536
-#define ZO_z_extra_bytes ((ZO_z_output_len >> 12) + 65536 + 128)
+#define ZO_z_extra_bytes ((ZO_z_output_len >> 8) + 65536)
#if ZO_z_output_len > ZO_z_input_len
# define ZO_z_extract_offset (ZO_z_output_len + ZO_z_extra_bytes - \
ZO_z_input_len)
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 630e3664906b..16f49123d747 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -16,6 +16,15 @@
#include "ctype.h"
#include "string.h"
+/*
+ * Undef these macros so that the functions that we provide
+ * here will have the correct names regardless of how string.h
+ * may have chosen to #define them.
+ */
+#undef memcpy
+#undef memset
+#undef memcmp
+
int memcmp(const void *s1, const void *s2, size_t len)
{
bool diff;
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 6cf79e1a6830..0eb9f92f3717 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,5 +1,4 @@
# CONFIG_64BIT is not set
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -125,7 +124,6 @@ CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_IP_NF_TARGET_ULOG=y
CONFIG_NF_NAT=y
CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_IP_NF_MANGLE=y
@@ -255,7 +253,6 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_PRINTER=y
CONFIG_USB_STORAGE=y
-CONFIG_USB_LIBUSUAL=y
CONFIG_EDAC=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
diff --git a/arch/x86/configs/tiny.config b/arch/x86/configs/tiny.config
index 4b429df40d7a..550cd5012b73 100644
--- a/arch/x86/configs/tiny.config
+++ b/arch/x86/configs/tiny.config
@@ -1,3 +1,5 @@
CONFIG_NOHIGHMEM=y
# CONFIG_HIGHMEM4G is not set
# CONFIG_HIGHMEM64G is not set
+CONFIG_GUESS_UNWINDER=y
+# CONFIG_FRAME_POINTER_UNWINDER is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index de45f57b410d..4a4b16e56d35 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -124,7 +123,6 @@ CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_IP_NF_TARGET_ULOG=y
CONFIG_NF_NAT=y
CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_IP_NF_MANGLE=y
@@ -251,7 +249,6 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_PRINTER=y
CONFIG_USB_STORAGE=y
-CONFIG_USB_LIBUSUAL=y
CONFIG_EDAC=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 4a55cdcdc008..5c15d6b57329 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -475,8 +475,8 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
unsigned int nbytes = walk->nbytes;
aesni_enc(ctx, keystream, ctrblk);
- crypto_xor(keystream, src, nbytes);
- memcpy(dst, keystream, nbytes);
+ crypto_xor_cpy(dst, keystream, src, nbytes);
+
crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
index 17c05531dfd1..f9eca34301e2 100644
--- a/arch/x86/crypto/blowfish_glue.c
+++ b/arch/x86/crypto/blowfish_glue.c
@@ -271,8 +271,7 @@ static void ctr_crypt_final(struct bf_ctx *ctx, struct blkcipher_walk *walk)
unsigned int nbytes = walk->nbytes;
blowfish_enc_blk(ctx, keystream, ctrblk);
- crypto_xor(keystream, src, nbytes);
- memcpy(dst, keystream, nbytes);
+ crypto_xor_cpy(dst, keystream, src, nbytes);
crypto_inc(ctrblk, BF_BLOCK_SIZE);
}
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index 8648158f3916..dbea6020ffe7 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -256,8 +256,7 @@ static void ctr_crypt_final(struct blkcipher_desc *desc,
unsigned int nbytes = walk->nbytes;
__cast5_encrypt(ctx, keystream, ctrblk);
- crypto_xor(keystream, src, nbytes);
- memcpy(dst, keystream, nbytes);
+ crypto_xor_cpy(dst, keystream, src, nbytes);
crypto_inc(ctrblk, CAST5_BLOCK_SIZE);
}
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
index d6fc59aaaadf..30c0a37f4882 100644
--- a/arch/x86/crypto/des3_ede_glue.c
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -277,8 +277,7 @@ static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
unsigned int nbytes = walk->nbytes;
des3_ede_enc_blk(ctx, keystream, ctrblk);
- crypto_xor(keystream, src, nbytes);
- memcpy(dst, keystream, nbytes);
+ crypto_xor_cpy(dst, keystream, src, nbytes);
crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE);
}
diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
index 1cd792db15ef..1eab79c9ac48 100644
--- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
@@ -117,11 +117,10 @@
.set T1, REG_T1
.endm
-#define K_BASE %r8
#define HASH_PTR %r9
+#define BLOCKS_CTR %r8
#define BUFFER_PTR %r10
#define BUFFER_PTR2 %r13
-#define BUFFER_END %r11
#define PRECALC_BUF %r14
#define WK_BUF %r15
@@ -205,14 +204,14 @@
* blended AVX2 and ALU instruction scheduling
* 1 vector iteration per 8 rounds
*/
- vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
+ vmovdqu (i * 2)(BUFFER_PTR), W_TMP
.elseif ((i & 7) == 1)
- vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
+ vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
WY_TMP, WY_TMP
.elseif ((i & 7) == 2)
vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
.elseif ((i & 7) == 4)
- vpaddd K_XMM(K_BASE), WY, WY_TMP
+ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
.elseif ((i & 7) == 7)
vmovdqu WY_TMP, PRECALC_WK(i&~7)
@@ -255,7 +254,7 @@
vpxor WY, WY_TMP, WY_TMP
.elseif ((i & 7) == 7)
vpxor WY_TMP2, WY_TMP, WY
- vpaddd K_XMM(K_BASE), WY, WY_TMP
+ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
@@ -291,7 +290,7 @@
vpsrld $30, WY, WY
vpor WY, WY_TMP, WY
.elseif ((i & 7) == 7)
- vpaddd K_XMM(K_BASE), WY, WY_TMP
+ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
@@ -446,6 +445,16 @@
.endm
+/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
+ * %1 + %2 >= %3 ? %4 : 0
+ */
+.macro ADD_IF_GE a, b, c, d
+ mov \a, RTA
+ add $\d, RTA
+ cmp $\c, \b
+ cmovge RTA, \a
+.endm
+
/*
* macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
*/
@@ -463,13 +472,16 @@
lea (2*4*80+32)(%rsp), WK_BUF
# Precalc WK for first 2 blocks
- PRECALC_OFFSET = 0
+ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
.set i, 0
.rept 160
PRECALC i
.set i, i + 1
.endr
- PRECALC_OFFSET = 128
+
+ /* Go to next block if needed */
+ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
+ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
xchg WK_BUF, PRECALC_BUF
.align 32
@@ -479,8 +491,8 @@ _loop:
* we use K_BASE value as a signal of a last block,
* it is set below by: cmovae BUFFER_PTR, K_BASE
*/
- cmp K_BASE, BUFFER_PTR
- jne _begin
+ test BLOCKS_CTR, BLOCKS_CTR
+ jnz _begin
.align 32
jmp _end
.align 32
@@ -512,10 +524,10 @@ _loop0:
.set j, j+2
.endr
- add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */
- cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */
- cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
-
+ /* Update Counter */
+ sub $1, BLOCKS_CTR
+ /* Move to the next block only if needed*/
+ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
/*
* rounds
* 60,62,64,66,68
@@ -532,8 +544,8 @@ _loop0:
UPDATE_HASH 12(HASH_PTR), D
UPDATE_HASH 16(HASH_PTR), E
- cmp K_BASE, BUFFER_PTR /* is current block the last one? */
- je _loop
+ test BLOCKS_CTR, BLOCKS_CTR
+ jz _loop
mov TB, B
@@ -575,10 +587,10 @@ _loop2:
.set j, j+2
.endr
- add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */
-
- cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */
- cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
+ /* update counter */
+ sub $1, BLOCKS_CTR
+ /* Move to the next block only if needed*/
+ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
jmp _loop3
_loop3:
@@ -641,19 +653,12 @@ _loop3:
avx2_zeroupper
- lea K_XMM_AR(%rip), K_BASE
-
+ /* Setup initial values */
mov CTX, HASH_PTR
mov BUF, BUFFER_PTR
- lea 64(BUF), BUFFER_PTR2
-
- shl $6, CNT /* mul by 64 */
- add BUF, CNT
- add $64, CNT
- mov CNT, BUFFER_END
- cmp BUFFER_END, BUFFER_PTR2
- cmovae K_BASE, BUFFER_PTR2
+ mov BUF, BUFFER_PTR2
+ mov CNT, BLOCKS_CTR
xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index f960a043cdeb..fc61739150e7 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data,
static bool avx2_usable(void)
{
- if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
+ if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
&& boot_cpu_has(X86_FEATURE_BMI1)
&& boot_cpu_has(X86_FEATURE_BMI2))
return true;
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index 9976fcecd17e..af28a8a24366 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -2,7 +2,6 @@
# Makefile for the x86 low level entry code
#
-OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 05ed3d393da7..640aafebdc00 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -1,4 +1,5 @@
#include <linux/jump_label.h>
+#include <asm/unwind_hints.h>
/*
@@ -112,6 +113,7 @@ For 32-bit we have the following conventions - kernel is built with
movq %rdx, 12*8+\offset(%rsp)
movq %rsi, 13*8+\offset(%rsp)
movq %rdi, 14*8+\offset(%rsp)
+ UNWIND_HINT_REGS offset=\offset extra=0
.endm
.macro SAVE_C_REGS offset=0
SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
@@ -136,6 +138,7 @@ For 32-bit we have the following conventions - kernel is built with
movq %r12, 3*8+\offset(%rsp)
movq %rbp, 4*8+\offset(%rsp)
movq %rbx, 5*8+\offset(%rsp)
+ UNWIND_HINT_REGS offset=\offset
.endm
.macro RESTORE_EXTRA_REGS offset=0
@@ -145,6 +148,7 @@ For 32-bit we have the following conventions - kernel is built with
movq 3*8+\offset(%rsp), %r12
movq 4*8+\offset(%rsp), %rbp
movq 5*8+\offset(%rsp), %rbx
+ UNWIND_HINT_REGS offset=\offset extra=0
.endm
.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
@@ -167,6 +171,7 @@ For 32-bit we have the following conventions - kernel is built with
.endif
movq 13*8(%rsp), %rsi
movq 14*8(%rsp), %rdi
+ UNWIND_HINT_IRET_REGS offset=16*8
.endm
.macro RESTORE_C_REGS
RESTORE_C_REGS_HELPER 1,1,1,1,1
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index cdefcfdd9e63..03505ffbe1b6 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -23,6 +23,7 @@
#include <linux/user-return-notifier.h>
#include <linux/uprobes.h>
#include <linux/livepatch.h>
+#include <linux/syscalls.h>
#include <asm/desc.h>
#include <asm/traps.h>
@@ -183,6 +184,8 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
struct thread_info *ti = current_thread_info();
u32 cached_flags;
+ addr_limit_user_check();
+
if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
local_irq_disable();
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 48ef7bb32c42..8a13d468635a 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -673,16 +673,8 @@ ENTRY(name) \
jmp ret_from_intr; \
ENDPROC(name)
-
-#ifdef CONFIG_TRACING
-# define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
-#else
-# define TRACE_BUILD_INTERRUPT(name, nr)
-#endif
-
#define BUILD_INTERRUPT(name, nr) \
BUILD_INTERRUPT3(name, nr, smp_##name); \
- TRACE_BUILD_INTERRUPT(name, nr)
/* The include is where all of the SMP etc. interrupts come from */
#include <asm/entry_arch.h>
@@ -880,25 +872,17 @@ ENTRY(xen_failsafe_callback)
ENDPROC(xen_failsafe_callback)
BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
- xen_evtchn_do_upcall)
+ xen_evtchn_do_upcall)
#endif /* CONFIG_XEN */
#if IS_ENABLED(CONFIG_HYPERV)
BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
- hyperv_vector_handler)
+ hyperv_vector_handler)
#endif /* CONFIG_HYPERV */
-#ifdef CONFIG_TRACING
-ENTRY(trace_page_fault)
- ASM_CLAC
- pushl $trace_do_page_fault
- jmp common_exception
-END(trace_page_fault)
-#endif
-
ENTRY(page_fault)
ASM_CLAC
pushl $do_page_fault
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index a9a8027a6c0e..49167258d587 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -36,6 +36,7 @@
#include <asm/smap.h>
#include <asm/pgtable_types.h>
#include <asm/export.h>
+#include <asm/frame.h>
#include <linux/err.h>
.code64
@@ -43,9 +44,10 @@
#ifdef CONFIG_PARAVIRT
ENTRY(native_usergs_sysret64)
+ UNWIND_HINT_EMPTY
swapgs
sysretq
-ENDPROC(native_usergs_sysret64)
+END(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */
.macro TRACE_IRQS_IRETQ
@@ -134,19 +136,14 @@ ENDPROC(native_usergs_sysret64)
*/
ENTRY(entry_SYSCALL_64)
+ UNWIND_HINT_EMPTY
/*
* Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
* it is too small to ever cause noticeable irq latency.
*/
- SWAPGS_UNSAFE_STACK
- /*
- * A hypervisor implementation might want to use a label
- * after the swapgs, so that it can do the swapgs
- * for the guest and jump here on syscall.
- */
-GLOBAL(entry_SYSCALL_64_after_swapgs)
+ swapgs
movq %rsp, PER_CPU_VAR(rsp_scratch)
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
@@ -158,6 +155,7 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
pushq %r11 /* pt_regs->flags */
pushq $__USER_CS /* pt_regs->cs */
pushq %rcx /* pt_regs->ip */
+GLOBAL(entry_SYSCALL_64_after_hwframe)
pushq %rax /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */
@@ -169,6 +167,7 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
pushq %r10 /* pt_regs->r10 */
pushq %r11 /* pt_regs->r11 */
sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
+ UNWIND_HINT_REGS extra=0
/*
* If we need to do entry work or if we guess we'll need to do
@@ -223,6 +222,7 @@ entry_SYSCALL_64_fastpath:
movq EFLAGS(%rsp), %r11
RESTORE_C_REGS_EXCEPT_RCX_R11
movq RSP(%rsp), %rsp
+ UNWIND_HINT_EMPTY
USERGS_SYSRET64
1:
@@ -316,6 +316,7 @@ syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */
RESTORE_C_REGS_EXCEPT_RCX_R11
movq RSP(%rsp), %rsp
+ UNWIND_HINT_EMPTY
USERGS_SYSRET64
opportunistic_sysret_failed:
@@ -343,6 +344,7 @@ ENTRY(stub_ptregs_64)
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
popq %rax
+ UNWIND_HINT_REGS extra=0
jmp entry_SYSCALL64_slow_path
1:
@@ -351,6 +353,7 @@ END(stub_ptregs_64)
.macro ptregs_stub func
ENTRY(ptregs_\func)
+ UNWIND_HINT_FUNC
leaq \func(%rip), %rax
jmp stub_ptregs_64
END(ptregs_\func)
@@ -367,6 +370,7 @@ END(ptregs_\func)
* %rsi: next task
*/
ENTRY(__switch_to_asm)
+ UNWIND_HINT_FUNC
/*
* Save callee-saved registers
* This must match the order in inactive_task_frame
@@ -406,6 +410,7 @@ END(__switch_to_asm)
* r12: kernel thread arg
*/
ENTRY(ret_from_fork)
+ UNWIND_HINT_EMPTY
movq %rax, %rdi
call schedule_tail /* rdi: 'prev' task parameter */
@@ -413,6 +418,7 @@ ENTRY(ret_from_fork)
jnz 1f /* kernel threads are uncommon */
2:
+ UNWIND_HINT_REGS
movq %rsp, %rdi
call syscall_return_slowpath /* returns with IRQs disabled */
TRACE_IRQS_ON /* user mode is traced as IRQS on */
@@ -440,13 +446,102 @@ END(ret_from_fork)
ENTRY(irq_entries_start)
vector=FIRST_EXTERNAL_VECTOR
.rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
+ UNWIND_HINT_IRET_REGS
pushq $(~vector+0x80) /* Note: always in signed byte range */
- vector=vector+1
jmp common_interrupt
.align 8
+ vector=vector+1
.endr
END(irq_entries_start)
+.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
+#ifdef CONFIG_DEBUG_ENTRY
+ pushfq
+ testl $X86_EFLAGS_IF, (%rsp)
+ jz .Lokay_\@
+ ud2
+.Lokay_\@:
+ addq $8, %rsp
+#endif
+.endm
+
+/*
+ * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers
+ * flags and puts old RSP into old_rsp, and leaves all other GPRs alone.
+ * Requires kernel GSBASE.
+ *
+ * The invariant is that, if irq_count != -1, then the IRQ stack is in use.
+ */
+.macro ENTER_IRQ_STACK regs=1 old_rsp
+ DEBUG_ENTRY_ASSERT_IRQS_OFF
+ movq %rsp, \old_rsp
+
+ .if \regs
+ UNWIND_HINT_REGS base=\old_rsp
+ .endif
+
+ incl PER_CPU_VAR(irq_count)
+ jnz .Lirq_stack_push_old_rsp_\@
+
+ /*
+ * Right now, if we just incremented irq_count to zero, we've
+ * claimed the IRQ stack but we haven't switched to it yet.
+ *
+ * If anything is added that can interrupt us here without using IST,
+ * it must be *extremely* careful to limit its stack usage. This
+ * could include kprobes and a hypothetical future IST-less #DB
+ * handler.
+ *
+ * The OOPS unwinder relies on the word at the top of the IRQ
+ * stack linking back to the previous RSP for the entire time we're
+ * on the IRQ stack. For this to work reliably, we need to write
+ * it before we actually move ourselves to the IRQ stack.
+ */
+
+ movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8)
+ movq PER_CPU_VAR(irq_stack_ptr), %rsp
+
+#ifdef CONFIG_DEBUG_ENTRY
+ /*
+ * If the first movq above becomes wrong due to IRQ stack layout
+ * changes, the only way we'll notice is if we try to unwind right
+ * here. Assert that we set up the stack right to catch this type
+ * of bug quickly.
+ */
+ cmpq -8(%rsp), \old_rsp
+ je .Lirq_stack_okay\@
+ ud2
+ .Lirq_stack_okay\@:
+#endif
+
+.Lirq_stack_push_old_rsp_\@:
+ pushq \old_rsp
+
+ .if \regs
+ UNWIND_HINT_REGS indirect=1
+ .endif
+.endm
+
+/*
+ * Undoes ENTER_IRQ_STACK.
+ */
+.macro LEAVE_IRQ_STACK regs=1
+ DEBUG_ENTRY_ASSERT_IRQS_OFF
+ /* We need to be off the IRQ stack before decrementing irq_count. */
+ popq %rsp
+
+ .if \regs
+ UNWIND_HINT_REGS
+ .endif
+
+ /*
+ * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming
+ * the irq stack but we're not on it.
+ */
+
+ decl PER_CPU_VAR(irq_count)
+.endm
+
/*
* Interrupt entry/exit.
*
@@ -485,17 +580,7 @@ END(irq_entries_start)
CALL_enter_from_user_mode
1:
- /*
- * Save previous stack pointer, optionally switch to interrupt stack.
- * irq_count is used to check if a CPU is already on an interrupt stack
- * or not. While this is essentially redundant with preempt_count it is
- * a little cheaper to use a separate counter in the PDA (short of
- * moving irq_enter into assembly, which would be too much work)
- */
- movq %rsp, %rdi
- incl PER_CPU_VAR(irq_count)
- cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp
- pushq %rdi
+ ENTER_IRQ_STACK old_rsp=%rdi
/* We entered an interrupt context - irqs are off: */
TRACE_IRQS_OFF
@@ -515,10 +600,8 @@ common_interrupt:
ret_from_intr:
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
- decl PER_CPU_VAR(irq_count)
- /* Restore saved previous stack */
- popq %rsp
+ LEAVE_IRQ_STACK
testb $3, CS(%rsp)
jz retint_kernel
@@ -561,6 +644,7 @@ restore_c_regs_and_iret:
INTERRUPT_RETURN
ENTRY(native_iret)
+ UNWIND_HINT_IRET_REGS
/*
* Are we returning to a stack segment from the LDT? Note: in
* 64-bit mode SS:RSP on the exception stack is always valid.
@@ -633,6 +717,7 @@ native_irq_return_ldt:
orq PER_CPU_VAR(espfix_stack), %rax
SWAPGS
movq %rax, %rsp
+ UNWIND_HINT_IRET_REGS offset=8
/*
* At this point, we cannot write to the stack any more, but we can
@@ -654,6 +739,7 @@ END(common_interrupt)
*/
.macro apicinterrupt3 num sym do_sym
ENTRY(\sym)
+ UNWIND_HINT_IRET_REGS
ASM_CLAC
pushq $~(\num)
.Lcommon_\sym:
@@ -662,31 +748,13 @@ ENTRY(\sym)
END(\sym)
.endm
-#ifdef CONFIG_TRACING
-#define trace(sym) trace_##sym
-#define smp_trace(sym) smp_trace_##sym
-
-.macro trace_apicinterrupt num sym
-apicinterrupt3 \num trace(\sym) smp_trace(\sym)
-.endm
-#else
-.macro trace_apicinterrupt num sym do_sym
-.endm
-#endif
-
/* Make sure APIC interrupt handlers end up in the irqentry section: */
-#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
-# define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax"
-# define POP_SECTION_IRQENTRY .popsection
-#else
-# define PUSH_SECTION_IRQENTRY
-# define POP_SECTION_IRQENTRY
-#endif
+#define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax"
+#define POP_SECTION_IRQENTRY .popsection
.macro apicinterrupt num sym do_sym
PUSH_SECTION_IRQENTRY
apicinterrupt3 \num \sym \do_sym
-trace_apicinterrupt \num \sym
POP_SECTION_IRQENTRY
.endm
@@ -705,6 +773,7 @@ apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi
#ifdef CONFIG_HAVE_KVM
apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi
apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi
+apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi
#endif
#ifdef CONFIG_X86_MCE_THRESHOLD
@@ -739,13 +808,14 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
ENTRY(\sym)
+ UNWIND_HINT_IRET_REGS offset=8
+
/* Sanity check */
.if \shift_ist != -1 && \paranoid == 0
.error "using shift_ist requires paranoid=1"
.endif
ASM_CLAC
- PARAVIRT_ADJUST_EXCEPTION_FRAME
.ifeq \has_error_code
pushq $-1 /* ORIG_RAX: no syscall to restart */
@@ -762,6 +832,7 @@ ENTRY(\sym)
.else
call error_entry
.endif
+ UNWIND_HINT_REGS
/* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
.if \paranoid
@@ -828,17 +899,6 @@ ENTRY(\sym)
END(\sym)
.endm
-#ifdef CONFIG_TRACING
-.macro trace_idtentry sym do_sym has_error_code:req
-idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code
-idtentry \sym \do_sym has_error_code=\has_error_code
-.endm
-#else
-.macro trace_idtentry sym do_sym has_error_code:req
-idtentry \sym \do_sym has_error_code=\has_error_code
-.endm
-#endif
-
idtentry divide_error do_divide_error has_error_code=0
idtentry overflow do_overflow has_error_code=0
idtentry bounds do_bounds has_error_code=0
@@ -859,6 +919,7 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
* edi: new selector
*/
ENTRY(native_load_gs_index)
+ FRAME_BEGIN
pushfq
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
SWAPGS
@@ -867,8 +928,9 @@ ENTRY(native_load_gs_index)
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
SWAPGS
popfq
+ FRAME_END
ret
-END(native_load_gs_index)
+ENDPROC(native_load_gs_index)
EXPORT_SYMBOL(native_load_gs_index)
_ASM_EXTABLE(.Lgs_change, bad_gs)
@@ -891,17 +953,15 @@ bad_gs:
ENTRY(do_softirq_own_stack)
pushq %rbp
mov %rsp, %rbp
- incl PER_CPU_VAR(irq_count)
- cmove PER_CPU_VAR(irq_stack_ptr), %rsp
- push %rbp /* frame pointer backlink */
+ ENTER_IRQ_STACK regs=0 old_rsp=%r11
call __do_softirq
+ LEAVE_IRQ_STACK regs=0
leaveq
- decl PER_CPU_VAR(irq_count)
ret
-END(do_softirq_own_stack)
+ENDPROC(do_softirq_own_stack)
#ifdef CONFIG_XEN
-idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
+idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
/*
* A note on the "critical region" in our callback handler.
@@ -922,14 +982,14 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
* see the correct pointer to the pt_regs
*/
+ UNWIND_HINT_FUNC
movq %rdi, %rsp /* we don't return, adjust the stack frame */
-11: incl PER_CPU_VAR(irq_count)
- movq %rsp, %rbp
- cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp
- pushq %rbp /* frame pointer backlink */
+ UNWIND_HINT_REGS
+
+ ENTER_IRQ_STACK old_rsp=%r10
call xen_evtchn_do_upcall
- popq %rsp
- decl PER_CPU_VAR(irq_count)
+ LEAVE_IRQ_STACK
+
#ifndef CONFIG_PREEMPT
call xen_maybe_preempt_hcall
#endif
@@ -950,6 +1010,7 @@ END(xen_do_hypervisor_callback)
* with its current contents: any discrepancy means we in category 1.
*/
ENTRY(xen_failsafe_callback)
+ UNWIND_HINT_EMPTY
movl %ds, %ecx
cmpw %cx, 0x10(%rsp)
jne 1f
@@ -967,13 +1028,13 @@ ENTRY(xen_failsafe_callback)
movq 8(%rsp), %r11
addq $0x30, %rsp
pushq $0 /* RIP */
- pushq %r11
- pushq %rcx
+ UNWIND_HINT_IRET_REGS offset=8
jmp general_protection
1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
movq (%rsp), %rcx
movq 8(%rsp), %r11
addq $0x30, %rsp
+ UNWIND_HINT_IRET_REGS
pushq $-1 /* orig_ax = -1 => not a system call */
ALLOC_PT_GPREGS_ON_STACK
SAVE_C_REGS
@@ -997,13 +1058,12 @@ idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
idtentry stack_segment do_stack_segment has_error_code=1
#ifdef CONFIG_XEN
-idtentry xen_debug do_debug has_error_code=0
-idtentry xen_int3 do_int3 has_error_code=0
-idtentry xen_stack_segment do_stack_segment has_error_code=1
+idtentry xendebug do_debug has_error_code=0
+idtentry xenint3 do_int3 has_error_code=0
#endif
idtentry general_protection do_general_protection has_error_code=1
-trace_idtentry page_fault do_page_fault has_error_code=1
+idtentry page_fault do_page_fault has_error_code=1
#ifdef CONFIG_KVM_GUEST
idtentry async_page_fault do_async_page_fault has_error_code=1
@@ -1019,6 +1079,7 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vec
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise
*/
ENTRY(paranoid_entry)
+ UNWIND_HINT_FUNC
cld
SAVE_C_REGS 8
SAVE_EXTRA_REGS 8
@@ -1046,6 +1107,7 @@ END(paranoid_entry)
* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
*/
ENTRY(paranoid_exit)
+ UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG
testl %ebx, %ebx /* swapgs needed? */
@@ -1067,6 +1129,7 @@ END(paranoid_exit)
* Return: EBX=0: came from user mode; EBX=1: otherwise
*/
ENTRY(error_entry)
+ UNWIND_HINT_FUNC
cld
SAVE_C_REGS 8
SAVE_EXTRA_REGS 8
@@ -1151,6 +1214,7 @@ END(error_entry)
* 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
*/
ENTRY(error_exit)
+ UNWIND_HINT_REGS
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
testl %ebx, %ebx
@@ -1159,19 +1223,9 @@ ENTRY(error_exit)
END(error_exit)
/* Runs on exception stack */
+/* XXX: broken on Xen PV */
ENTRY(nmi)
- /*
- * Fix up the exception frame if we're on Xen.
- * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
- * one value to the stack on native, so it may clobber the rdx
- * scratch slot, but it won't clobber any of the important
- * slots past it.
- *
- * Xen is a different story, because the Xen frame itself overlaps
- * the "NMI executing" variable.
- */
- PARAVIRT_ADJUST_EXCEPTION_FRAME
-
+ UNWIND_HINT_IRET_REGS
/*
* We allow breakpoints in NMIs. If a breakpoint occurs, then
* the iretq it performs will take us out of NMI context.
@@ -1210,6 +1264,8 @@ ENTRY(nmi)
* other IST entries.
*/
+ ASM_CLAC
+
/* Use %rdx as our temp variable throughout */
pushq %rdx
@@ -1231,11 +1287,13 @@ ENTRY(nmi)
cld
movq %rsp, %rdx
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+ UNWIND_HINT_IRET_REGS base=%rdx offset=8
pushq 5*8(%rdx) /* pt_regs->ss */
pushq 4*8(%rdx) /* pt_regs->rsp */
pushq 3*8(%rdx) /* pt_regs->flags */
pushq 2*8(%rdx) /* pt_regs->cs */
pushq 1*8(%rdx) /* pt_regs->rip */
+ UNWIND_HINT_IRET_REGS
pushq $-1 /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */
@@ -1252,6 +1310,7 @@ ENTRY(nmi)
pushq %r13 /* pt_regs->r13 */
pushq %r14 /* pt_regs->r14 */
pushq %r15 /* pt_regs->r15 */
+ UNWIND_HINT_REGS
ENCODE_FRAME_POINTER
/*
@@ -1406,6 +1465,7 @@ first_nmi:
.rept 5
pushq 11*8(%rsp)
.endr
+ UNWIND_HINT_IRET_REGS
/* Everything up to here is safe from nested NMIs */
@@ -1421,6 +1481,7 @@ first_nmi:
pushq $__KERNEL_CS /* CS */
pushq $1f /* RIP */
INTERRUPT_RETURN /* continues at repeat_nmi below */
+ UNWIND_HINT_IRET_REGS
1:
#endif
@@ -1470,6 +1531,7 @@ end_repeat_nmi:
* exceptions might do.
*/
call paranoid_entry
+ UNWIND_HINT_REGS
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp, %rdi
@@ -1507,17 +1569,19 @@ nmi_restore:
END(nmi)
ENTRY(ignore_sysret)
+ UNWIND_HINT_EMPTY
mov $-ENOSYS, %eax
sysret
END(ignore_sysret)
ENTRY(rewind_stack_do_exit)
+ UNWIND_HINT_FUNC
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
- leaq -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp
+ leaq -PTREGS_SIZE(%rax), %rsp
+ UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE
call do_exit
-1: jmp 1b
END(rewind_stack_do_exit)
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index e1721dafbcb1..e26c25ca7756 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -183,21 +183,20 @@ ENDPROC(entry_SYSENTER_compat)
*/
ENTRY(entry_SYSCALL_compat)
/* Interrupts are off on entry. */
- SWAPGS_UNSAFE_STACK
+ swapgs
/* Stash user ESP and switch to the kernel stack. */
movl %esp, %r8d
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
- /* Zero-extending 32-bit regs, do not remove */
- movl %eax, %eax
-
/* Construct struct pt_regs on stack */
pushq $__USER32_DS /* pt_regs->ss */
pushq %r8 /* pt_regs->sp */
pushq %r11 /* pt_regs->flags */
pushq $__USER32_CS /* pt_regs->cs */
pushq %rcx /* pt_regs->ip */
+GLOBAL(entry_SYSCALL_compat_after_hwframe)
+ movl %eax, %eax /* discard orig_ax high bits */
pushq %rax /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */
@@ -294,7 +293,6 @@ ENTRY(entry_INT80_compat)
/*
* Interrupts are off on entry.
*/
- PARAVIRT_ADJUST_EXCEPTION_FRAME
ASM_CLAC /* Do this early to minimize exposure */
SWAPGS
@@ -342,8 +340,7 @@ ENTRY(entry_INT80_compat)
jmp restore_regs_and_iret
END(entry_INT80_compat)
- ALIGN
-GLOBAL(stub32_clone)
+ENTRY(stub32_clone)
/*
* The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
* The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
@@ -353,3 +350,4 @@ GLOBAL(stub32_clone)
*/
xchg %r8, %rcx
jmp sys_clone
+ENDPROC(stub32_clone)
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 726355ce8497..1911310959f8 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -351,7 +351,7 @@ static void vgetcpu_cpu_init(void *arg)
* and 8 bits for the node)
*/
d.limit0 = cpu | ((node & 0xf) << 12);
- d.limit = node >> 4;
+ d.limit1 = node >> 4;
d.type = 5; /* RO data, expand down, accessed */
d.dpl = 3; /* Visible to user code */
d.s = 1; /* Not a system segment */
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index ad44af0dd667..f5cbbba99283 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -400,11 +400,24 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
if (amd_uncore_llc) {
unsigned int apicid = cpu_data(cpu).apicid;
- unsigned int nshared;
+ unsigned int nshared, subleaf, prev_eax = 0;
uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
- cpuid_count(0x8000001d, 2, &eax, &ebx, &ecx, &edx);
- nshared = ((eax >> 14) & 0xfff) + 1;
+ /*
+ * Iterate over Cache Topology Definition leaves until no
+ * more cache descriptions are available.
+ */
+ for (subleaf = 0; subleaf < 5; subleaf++) {
+ cpuid_count(0x8000001d, subleaf, &eax, &ebx, &ecx, &edx);
+
+ /* EAX[0:4] gives type of cache */
+ if (!(eax & 0x1f))
+ break;
+
+ prev_eax = eax;
+ }
+ nshared = ((prev_eax >> 14) & 0xfff) + 1;
+
uncore->id = apicid - (apicid % nshared);
uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
@@ -555,7 +568,7 @@ static int __init amd_uncore_init(void)
ret = 0;
}
- if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
+ if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
amd_uncore_llc = alloc_percpu(struct amd_uncore *);
if (!amd_uncore_llc) {
ret = -ENOMEM;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index ff1ea2fb9705..80534d3c2480 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -191,8 +191,8 @@ static void release_pmc_hardware(void) {}
static bool check_hw_exists(void)
{
- u64 val, val_fail, val_new= ~0;
- int i, reg, reg_fail, ret = 0;
+ u64 val, val_fail = -1, val_new= ~0;
+ int i, reg, reg_fail = -1, ret = 0;
int bios_fail = 0;
int reg_safe = -1;
@@ -487,22 +487,28 @@ static inline int precise_br_compat(struct perf_event *event)
return m == b;
}
-int x86_pmu_hw_config(struct perf_event *event)
+int x86_pmu_max_precise(void)
{
- if (event->attr.precise_ip) {
- int precise = 0;
+ int precise = 0;
- /* Support for constant skid */
- if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
+ /* Support for constant skid */
+ if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
+ precise++;
+
+ /* Support for IP fixup */
+ if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
precise++;
- /* Support for IP fixup */
- if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
- precise++;
+ if (x86_pmu.pebs_prec_dist)
+ precise++;
+ }
+ return precise;
+}
- if (x86_pmu.pebs_prec_dist)
- precise++;
- }
+int x86_pmu_hw_config(struct perf_event *event)
+{
+ if (event->attr.precise_ip) {
+ int precise = x86_pmu_max_precise();
if (event->attr.precise_ip > precise)
return -EOPNOTSUPP;
@@ -1751,6 +1757,7 @@ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
}
static struct attribute_group x86_pmu_attr_group;
+static struct attribute_group x86_pmu_caps_group;
static int __init init_hw_perf_events(void)
{
@@ -1799,6 +1806,14 @@ static int __init init_hw_perf_events(void)
x86_pmu_format_group.attrs = x86_pmu.format_attrs;
+ if (x86_pmu.caps_attrs) {
+ struct attribute **tmp;
+
+ tmp = merge_attr(x86_pmu_caps_group.attrs, x86_pmu.caps_attrs);
+ if (!WARN_ON(!tmp))
+ x86_pmu_caps_group.attrs = tmp;
+ }
+
if (x86_pmu.event_attrs)
x86_pmu_events_group.attrs = x86_pmu.event_attrs;
@@ -2114,7 +2129,7 @@ static void refresh_pce(void *ignored)
load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
}
-static void x86_pmu_event_mapped(struct perf_event *event)
+static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return;
@@ -2129,22 +2144,20 @@ static void x86_pmu_event_mapped(struct perf_event *event)
* For now, this can't happen because all callers hold mmap_sem
* for write. If this changes, we'll need a different solution.
*/
- lockdep_assert_held_exclusive(&current->mm->mmap_sem);
+ lockdep_assert_held_exclusive(&mm->mmap_sem);
- if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
- on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
+ if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
+ on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
}
-static void x86_pmu_event_unmapped(struct perf_event *event)
+static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
{
- if (!current->mm)
- return;
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return;
- if (atomic_dec_and_test(&current->mm->context.perf_rdpmc_allowed))
- on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
+ if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
+ on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
}
static int x86_pmu_event_idx(struct perf_event *event)
@@ -2215,10 +2228,30 @@ static struct attribute_group x86_pmu_attr_group = {
.attrs = x86_pmu_attrs,
};
+static ssize_t max_precise_show(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise());
+}
+
+static DEVICE_ATTR_RO(max_precise);
+
+static struct attribute *x86_pmu_caps_attrs[] = {
+ &dev_attr_max_precise.attr,
+ NULL
+};
+
+static struct attribute_group x86_pmu_caps_group = {
+ .name = "caps",
+ .attrs = x86_pmu_caps_attrs,
+};
+
static const struct attribute_group *x86_pmu_attr_groups[] = {
&x86_pmu_attr_group,
&x86_pmu_format_group,
&x86_pmu_events_group,
+ &x86_pmu_caps_group,
NULL,
};
@@ -2337,12 +2370,9 @@ static unsigned long get_segment_base(unsigned int segment)
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt;
- if (idx > LDT_ENTRIES)
- return 0;
-
/* IRQs are off, so this synchronizes with smp_store_release */
ldt = lockless_dereference(current->active_mm->context.ldt);
- if (!ldt || idx > ldt->nr_entries)
+ if (!ldt || idx >= ldt->nr_entries)
return 0;
desc = &ldt->entries[idx];
@@ -2350,7 +2380,7 @@ static unsigned long get_segment_base(unsigned int segment)
return 0;
#endif
} else {
- if (idx > GDT_ENTRIES)
+ if (idx >= GDT_ENTRIES)
return 0;
desc = raw_cpu_ptr(gdt_page.gdt) + idx;
diff --git a/arch/x86/events/intel/Makefile b/arch/x86/events/intel/Makefile
index 06c2baa51814..e9d8520a801a 100644
--- a/arch/x86/events/intel/Makefile
+++ b/arch/x86/events/intel/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o cqm.o
+obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o
obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o
obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o
obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl-perf.o
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 8ae8c5ce3a1f..16076eb34699 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -69,7 +69,7 @@ struct bts_buffer {
struct bts_phys buf[0];
};
-struct pmu bts_pmu;
+static struct pmu bts_pmu;
static size_t buf_size(struct page *page)
{
@@ -268,7 +268,7 @@ static void bts_event_start(struct perf_event *event, int flags)
bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum;
bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold;
- event->hw.itrace_started = 1;
+ perf_event_itrace_started(event);
event->hw.state = 0;
__bts_event_start(event);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index aa62437d1aa1..829e89cfcee2 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -1708,6 +1708,120 @@ static __initconst const u64 glm_hw_cache_extra_regs
},
};
+static __initconst const u64 glp_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
+ [C(RESULT_MISS)] = 0x0,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
+ [C(RESULT_MISS)] = 0x0,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x0,
+ [C(RESULT_MISS)] = 0x0,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x0380, /* ICACHE.ACCESSES */
+ [C(RESULT_MISS)] = 0x0280, /* ICACHE.MISSES */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x0,
+ [C(RESULT_MISS)] = 0x0,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
+ [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0x1b7, /* OFFCORE_RESPONSE */
+ [C(RESULT_MISS)] = 0x1b7, /* OFFCORE_RESPONSE */
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x0,
+ [C(RESULT_MISS)] = 0x0,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
+ [C(RESULT_MISS)] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
+ [C(RESULT_MISS)] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x0,
+ [C(RESULT_MISS)] = 0x0,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x00c0, /* INST_RETIRED.ANY_P */
+ [C(RESULT_MISS)] = 0x0481, /* ITLB.MISS */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
+ [C(RESULT_MISS)] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ },
+};
+
+static __initconst const u64 glp_hw_cache_extra_regs
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
+ GLM_LLC_ACCESS,
+ [C(RESULT_MISS)] = GLM_DEMAND_READ|
+ GLM_LLC_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
+ GLM_LLC_ACCESS,
+ [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
+ GLM_LLC_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x0,
+ [C(RESULT_MISS)] = 0x0,
+ },
+ },
+};
+
#define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
#define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
#define KNL_MCDRAM_LOCAL BIT_ULL(21)
@@ -3016,6 +3130,9 @@ static int hsw_hw_config(struct perf_event *event)
return 0;
}
+static struct event_constraint counter0_constraint =
+ INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
+
static struct event_constraint counter2_constraint =
EVENT_CONSTRAINT(0, 0x4, 0);
@@ -3037,6 +3154,21 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
return c;
}
+static struct event_constraint *
+glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
+{
+ struct event_constraint *c;
+
+ /* :ppp means to do reduced skid PEBS which is PMC0 only. */
+ if (event->attr.precise_ip == 3)
+ return &counter0_constraint;
+
+ c = intel_get_event_constraints(cpuc, idx, event);
+
+ return c;
+}
+
/*
* Broadwell:
*
@@ -3265,10 +3397,8 @@ static void intel_pmu_cpu_dying(int cpu)
static void intel_pmu_sched_task(struct perf_event_context *ctx,
bool sched_in)
{
- if (x86_pmu.pebs_active)
- intel_pmu_pebs_sched_task(ctx, sched_in);
- if (x86_pmu.lbr_nr)
- intel_pmu_lbr_sched_task(ctx, sched_in);
+ intel_pmu_pebs_sched_task(ctx, sched_in);
+ intel_pmu_lbr_sched_task(ctx, sched_in);
}
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
@@ -3285,12 +3415,26 @@ static struct attribute *intel_arch3_formats_attr[] = {
&format_attr_any.attr,
&format_attr_inv.attr,
&format_attr_cmask.attr,
+ NULL,
+};
+
+static struct attribute *hsw_format_attr[] = {
&format_attr_in_tx.attr,
&format_attr_in_tx_cp.attr,
+ &format_attr_offcore_rsp.attr,
+ &format_attr_ldlat.attr,
+ NULL
+};
- &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
- &format_attr_ldlat.attr, /* PEBS load latency */
- NULL,
+static struct attribute *nhm_format_attr[] = {
+ &format_attr_offcore_rsp.attr,
+ &format_attr_ldlat.attr,
+ NULL
+};
+
+static struct attribute *slm_format_attr[] = {
+ &format_attr_offcore_rsp.attr,
+ NULL
};
static struct attribute *skl_format_attr[] = {
@@ -3651,6 +3795,36 @@ done:
static DEVICE_ATTR_RW(freeze_on_smi);
+static ssize_t branches_show(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
+}
+
+static DEVICE_ATTR_RO(branches);
+
+static struct attribute *lbr_attrs[] = {
+ &dev_attr_branches.attr,
+ NULL
+};
+
+static char pmu_name_str[30];
+
+static ssize_t pmu_name_show(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str);
+}
+
+static DEVICE_ATTR_RO(pmu_name);
+
+static struct attribute *intel_pmu_caps_attrs[] = {
+ &dev_attr_pmu_name.attr,
+ NULL
+};
+
static struct attribute *intel_pmu_attrs[] = {
&dev_attr_freeze_on_smi.attr,
NULL,
@@ -3665,6 +3839,8 @@ __init int intel_pmu_init(void)
unsigned int unused;
struct extra_reg *er;
int version, i;
+ struct attribute **extra_attr = NULL;
+ char *name;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
switch (boot_cpu_data.x86) {
@@ -3732,6 +3908,7 @@ __init int intel_pmu_init(void)
switch (boot_cpu_data.x86_model) {
case INTEL_FAM6_CORE_YONAH:
pr_cont("Core events, ");
+ name = "core";
break;
case INTEL_FAM6_CORE2_MEROM:
@@ -3747,6 +3924,7 @@ __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_core2_event_constraints;
x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
pr_cont("Core2 events, ");
+ name = "core2";
break;
case INTEL_FAM6_NEHALEM:
@@ -3775,8 +3953,11 @@ __init int intel_pmu_init(void)
intel_pmu_pebs_data_source_nhm();
x86_add_quirk(intel_nehalem_quirk);
+ x86_pmu.pebs_no_tlb = 1;
+ extra_attr = nhm_format_attr;
pr_cont("Nehalem events, ");
+ name = "nehalem";
break;
case INTEL_FAM6_ATOM_PINEVIEW:
@@ -3793,6 +3974,7 @@ __init int intel_pmu_init(void)
x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_core2;
pr_cont("Atom events, ");
+ name = "bonnell";
break;
case INTEL_FAM6_ATOM_SILVERMONT1:
@@ -3810,7 +3992,9 @@ __init int intel_pmu_init(void)
x86_pmu.extra_regs = intel_slm_extra_regs;
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.cpu_events = slm_events_attrs;
+ extra_attr = slm_format_attr;
pr_cont("Silvermont events, ");
+ name = "silvermont";
break;
case INTEL_FAM6_ATOM_GOLDMONT:
@@ -3835,7 +4019,37 @@ __init int intel_pmu_init(void)
x86_pmu.lbr_pt_coexist = true;
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.cpu_events = glm_events_attrs;
+ extra_attr = slm_format_attr;
pr_cont("Goldmont events, ");
+ name = "goldmont";
+ break;
+
+ case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
+ sizeof(hw_cache_extra_regs));
+
+ intel_pmu_lbr_init_skl();
+
+ x86_pmu.event_constraints = intel_slm_event_constraints;
+ x86_pmu.pebs_constraints = intel_glp_pebs_event_constraints;
+ x86_pmu.extra_regs = intel_glm_extra_regs;
+ /*
+ * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
+ * for precise cycles.
+ */
+ x86_pmu.pebs_aliases = NULL;
+ x86_pmu.pebs_prec_dist = true;
+ x86_pmu.lbr_pt_coexist = true;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.get_event_constraints = glp_get_event_constraints;
+ x86_pmu.cpu_events = glm_events_attrs;
+ /* Goldmont Plus has 4-wide pipeline */
+ event_attr_td_total_slots_scale_glm.event_str = "4";
+ extra_attr = slm_format_attr;
+ pr_cont("Goldmont plus events, ");
+ name = "goldmont_plus";
break;
case INTEL_FAM6_WESTMERE:
@@ -3864,7 +4078,9 @@ __init int intel_pmu_init(void)
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
intel_pmu_pebs_data_source_nhm();
+ extra_attr = nhm_format_attr;
pr_cont("Westmere events, ");
+ name = "westmere";
break;
case INTEL_FAM6_SANDYBRIDGE:
@@ -3900,7 +4116,10 @@ __init int intel_pmu_init(void)
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
+ extra_attr = nhm_format_attr;
+
pr_cont("SandyBridge events, ");
+ name = "sandybridge";
break;
case INTEL_FAM6_IVYBRIDGE:
@@ -3934,7 +4153,10 @@ __init int intel_pmu_init(void)
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
+ extra_attr = nhm_format_attr;
+
pr_cont("IvyBridge events, ");
+ name = "ivybridge";
break;
@@ -3962,7 +4184,10 @@ __init int intel_pmu_init(void)
x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu.cpu_events = hsw_events_attrs;
x86_pmu.lbr_double_abort = true;
+ extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
+ hsw_format_attr : nhm_format_attr;
pr_cont("Haswell events, ");
+ name = "haswell";
break;
case INTEL_FAM6_BROADWELL_CORE:
@@ -3998,7 +4223,10 @@ __init int intel_pmu_init(void)
x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu.cpu_events = hsw_events_attrs;
x86_pmu.limit_period = bdw_limit_period;
+ extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
+ hsw_format_attr : nhm_format_attr;
pr_cont("Broadwell events, ");
+ name = "broadwell";
break;
case INTEL_FAM6_XEON_PHI_KNL:
@@ -4016,8 +4244,9 @@ __init int intel_pmu_init(void)
/* all extra regs are per-cpu when HT is on */
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
-
+ extra_attr = slm_format_attr;
pr_cont("Knights Landing/Mill events, ");
+ name = "knights-landing";
break;
case INTEL_FAM6_SKYLAKE_MOBILE:
@@ -4047,11 +4276,14 @@ __init int intel_pmu_init(void)
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints;
- x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
- skl_format_attr);
- WARN_ON(!x86_pmu.format_attrs);
+ extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
+ hsw_format_attr : nhm_format_attr;
+ extra_attr = merge_attr(extra_attr, skl_format_attr);
x86_pmu.cpu_events = hsw_events_attrs;
+ intel_pmu_pebs_data_source_skl(
+ boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
pr_cont("Skylake events, ");
+ name = "skylake";
break;
default:
@@ -4059,6 +4291,7 @@ __init int intel_pmu_init(void)
case 1:
x86_pmu.event_constraints = intel_v1_event_constraints;
pr_cont("generic architected perfmon v1, ");
+ name = "generic_arch_v1";
break;
default:
/*
@@ -4066,10 +4299,19 @@ __init int intel_pmu_init(void)
*/
x86_pmu.event_constraints = intel_gen_event_constraints;
pr_cont("generic architected perfmon, ");
+ name = "generic_arch_v2+";
break;
}
}
+ snprintf(pmu_name_str, sizeof pmu_name_str, "%s", name);
+
+ if (version >= 2 && extra_attr) {
+ x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
+ extra_attr);
+ WARN_ON(!x86_pmu.format_attrs);
+ }
+
if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
@@ -4116,8 +4358,13 @@ __init int intel_pmu_init(void)
x86_pmu.lbr_nr = 0;
}
- if (x86_pmu.lbr_nr)
+ x86_pmu.caps_attrs = intel_pmu_caps_attrs;
+
+ if (x86_pmu.lbr_nr) {
+ x86_pmu.caps_attrs = merge_attr(x86_pmu.caps_attrs, lbr_attrs);
pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
+ }
+
/*
* Access extra MSR may cause #GP under certain circumstances.
* E.g. KVM doesn't support offcore event
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
deleted file mode 100644
index 2521f771f2f5..000000000000
--- a/arch/x86/events/intel/cqm.c
+++ /dev/null
@@ -1,1766 +0,0 @@
-/*
- * Intel Cache Quality-of-Service Monitoring (CQM) support.
- *
- * Based very, very heavily on work by Peter Zijlstra.
- */
-
-#include <linux/perf_event.h>
-#include <linux/slab.h>
-#include <asm/cpu_device_id.h>
-#include <asm/intel_rdt_common.h>
-#include "../perf_event.h"
-
-#define MSR_IA32_QM_CTR 0x0c8e
-#define MSR_IA32_QM_EVTSEL 0x0c8d
-
-#define MBM_CNTR_WIDTH 24
-/*
- * Guaranteed time in ms as per SDM where MBM counters will not overflow.
- */
-#define MBM_CTR_OVERFLOW_TIME 1000
-
-static u32 cqm_max_rmid = -1;
-static unsigned int cqm_l3_scale; /* supposedly cacheline size */
-static bool cqm_enabled, mbm_enabled;
-unsigned int mbm_socket_max;
-
-/*
- * The cached intel_pqr_state is strictly per CPU and can never be
- * updated from a remote CPU. Both functions which modify the state
- * (intel_cqm_event_start and intel_cqm_event_stop) are called with
- * interrupts disabled, which is sufficient for the protection.
- */
-DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
-static struct hrtimer *mbm_timers;
-/**
- * struct sample - mbm event's (local or total) data
- * @total_bytes #bytes since we began monitoring
- * @prev_msr previous value of MSR
- */
-struct sample {
- u64 total_bytes;
- u64 prev_msr;
-};
-
-/*
- * samples profiled for total memory bandwidth type events
- */
-static struct sample *mbm_total;
-/*
- * samples profiled for local memory bandwidth type events
- */
-static struct sample *mbm_local;
-
-#define pkg_id topology_physical_package_id(smp_processor_id())
-/*
- * rmid_2_index returns the index for the rmid in mbm_local/mbm_total array.
- * mbm_total[] and mbm_local[] are linearly indexed by socket# * max number of
- * rmids per socket, an example is given below
- * RMID1 of Socket0: vrmid = 1
- * RMID1 of Socket1: vrmid = 1 * (cqm_max_rmid + 1) + 1
- * RMID1 of Socket2: vrmid = 2 * (cqm_max_rmid + 1) + 1
- */
-#define rmid_2_index(rmid) ((pkg_id * (cqm_max_rmid + 1)) + rmid)
-/*
- * Protects cache_cgroups and cqm_rmid_free_lru and cqm_rmid_limbo_lru.
- * Also protects event->hw.cqm_rmid
- *
- * Hold either for stability, both for modification of ->hw.cqm_rmid.
- */
-static DEFINE_MUTEX(cache_mutex);
-static DEFINE_RAW_SPINLOCK(cache_lock);
-
-/*
- * Groups of events that have the same target(s), one RMID per group.
- */
-static LIST_HEAD(cache_groups);
-
-/*
- * Mask of CPUs for reading CQM values. We only need one per-socket.
- */
-static cpumask_t cqm_cpumask;
-
-#define RMID_VAL_ERROR (1ULL << 63)
-#define RMID_VAL_UNAVAIL (1ULL << 62)
-
-/*
- * Event IDs are used to program IA32_QM_EVTSEL before reading event
- * counter from IA32_QM_CTR
- */
-#define QOS_L3_OCCUP_EVENT_ID 0x01
-#define QOS_MBM_TOTAL_EVENT_ID 0x02
-#define QOS_MBM_LOCAL_EVENT_ID 0x03
-
-/*
- * This is central to the rotation algorithm in __intel_cqm_rmid_rotate().
- *
- * This rmid is always free and is guaranteed to have an associated
- * near-zero occupancy value, i.e. no cachelines are tagged with this
- * RMID, once __intel_cqm_rmid_rotate() returns.
- */
-static u32 intel_cqm_rotation_rmid;
-
-#define INVALID_RMID (-1)
-
-/*
- * Is @rmid valid for programming the hardware?
- *
- * rmid 0 is reserved by the hardware for all non-monitored tasks, which
- * means that we should never come across an rmid with that value.
- * Likewise, an rmid value of -1 is used to indicate "no rmid currently
- * assigned" and is used as part of the rotation code.
- */
-static inline bool __rmid_valid(u32 rmid)
-{
- if (!rmid || rmid == INVALID_RMID)
- return false;
-
- return true;
-}
-
-static u64 __rmid_read(u32 rmid)
-{
- u64 val;
-
- /*
- * Ignore the SDM, this thing is _NOTHING_ like a regular perfcnt,
- * it just says that to increase confusion.
- */
- wrmsr(MSR_IA32_QM_EVTSEL, QOS_L3_OCCUP_EVENT_ID, rmid);
- rdmsrl(MSR_IA32_QM_CTR, val);
-
- /*
- * Aside from the ERROR and UNAVAIL bits, assume this thing returns
- * the number of cachelines tagged with @rmid.
- */
- return val;
-}
-
-enum rmid_recycle_state {
- RMID_YOUNG = 0,
- RMID_AVAILABLE,
- RMID_DIRTY,
-};
-
-struct cqm_rmid_entry {
- u32 rmid;
- enum rmid_recycle_state state;
- struct list_head list;
- unsigned long queue_time;
-};
-
-/*
- * cqm_rmid_free_lru - A least recently used list of RMIDs.
- *
- * Oldest entry at the head, newest (most recently used) entry at the
- * tail. This list is never traversed, it's only used to keep track of
- * the lru order. That is, we only pick entries of the head or insert
- * them on the tail.
- *
- * All entries on the list are 'free', and their RMIDs are not currently
- * in use. To mark an RMID as in use, remove its entry from the lru
- * list.
- *
- *
- * cqm_rmid_limbo_lru - list of currently unused but (potentially) dirty RMIDs.
- *
- * This list is contains RMIDs that no one is currently using but that
- * may have a non-zero occupancy value associated with them. The
- * rotation worker moves RMIDs from the limbo list to the free list once
- * the occupancy value drops below __intel_cqm_threshold.
- *
- * Both lists are protected by cache_mutex.
- */
-static LIST_HEAD(cqm_rmid_free_lru);
-static LIST_HEAD(cqm_rmid_limbo_lru);
-
-/*
- * We use a simple array of pointers so that we can lookup a struct
- * cqm_rmid_entry in O(1). This alleviates the callers of __get_rmid()
- * and __put_rmid() from having to worry about dealing with struct
- * cqm_rmid_entry - they just deal with rmids, i.e. integers.
- *
- * Once this array is initialized it is read-only. No locks are required
- * to access it.
- *
- * All entries for all RMIDs can be looked up in the this array at all
- * times.
- */
-static struct cqm_rmid_entry **cqm_rmid_ptrs;
-
-static inline struct cqm_rmid_entry *__rmid_entry(u32 rmid)
-{
- struct cqm_rmid_entry *entry;
-
- entry = cqm_rmid_ptrs[rmid];
- WARN_ON(entry->rmid != rmid);
-
- return entry;
-}
-
-/*
- * Returns < 0 on fail.
- *
- * We expect to be called with cache_mutex held.
- */
-static u32 __get_rmid(void)
-{
- struct cqm_rmid_entry *entry;
-
- lockdep_assert_held(&cache_mutex);
-
- if (list_empty(&cqm_rmid_free_lru))
- return INVALID_RMID;
-
- entry = list_first_entry(&cqm_rmid_free_lru, struct cqm_rmid_entry, list);
- list_del(&entry->list);
-
- return entry->rmid;
-}
-
-static void __put_rmid(u32 rmid)
-{
- struct cqm_rmid_entry *entry;
-
- lockdep_assert_held(&cache_mutex);
-
- WARN_ON(!__rmid_valid(rmid));
- entry = __rmid_entry(rmid);
-
- entry->queue_time = jiffies;
- entry->state = RMID_YOUNG;
-
- list_add_tail(&entry->list, &cqm_rmid_limbo_lru);
-}
-
-static void cqm_cleanup(void)
-{
- int i;
-
- if (!cqm_rmid_ptrs)
- return;
-
- for (i = 0; i < cqm_max_rmid; i++)
- kfree(cqm_rmid_ptrs[i]);
-
- kfree(cqm_rmid_ptrs);
- cqm_rmid_ptrs = NULL;
- cqm_enabled = false;
-}
-
-static int intel_cqm_setup_rmid_cache(void)
-{
- struct cqm_rmid_entry *entry;
- unsigned int nr_rmids;
- int r = 0;
-
- nr_rmids = cqm_max_rmid + 1;
- cqm_rmid_ptrs = kzalloc(sizeof(struct cqm_rmid_entry *) *
- nr_rmids, GFP_KERNEL);
- if (!cqm_rmid_ptrs)
- return -ENOMEM;
-
- for (; r <= cqm_max_rmid; r++) {
- struct cqm_rmid_entry *entry;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- goto fail;
-
- INIT_LIST_HEAD(&entry->list);
- entry->rmid = r;
- cqm_rmid_ptrs[r] = entry;
-
- list_add_tail(&entry->list, &cqm_rmid_free_lru);
- }
-
- /*
- * RMID 0 is special and is always allocated. It's used for all
- * tasks that are not monitored.
- */
- entry = __rmid_entry(0);
- list_del(&entry->list);
-
- mutex_lock(&cache_mutex);
- intel_cqm_rotation_rmid = __get_rmid();
- mutex_unlock(&cache_mutex);
-
- return 0;
-
-fail:
- cqm_cleanup();
- return -ENOMEM;
-}
-
-/*
- * Determine if @a and @b measure the same set of tasks.
- *
- * If @a and @b measure the same set of tasks then we want to share a
- * single RMID.
- */
-static bool __match_event(struct perf_event *a, struct perf_event *b)
-{
- /* Per-cpu and task events don't mix */
- if ((a->attach_state & PERF_ATTACH_TASK) !=
- (b->attach_state & PERF_ATTACH_TASK))
- return false;
-
-#ifdef CONFIG_CGROUP_PERF
- if (a->cgrp != b->cgrp)
- return false;
-#endif
-
- /* If not task event, we're machine wide */
- if (!(b->attach_state & PERF_ATTACH_TASK))
- return true;
-
- /*
- * Events that target same task are placed into the same cache group.
- * Mark it as a multi event group, so that we update ->count
- * for every event rather than just the group leader later.
- */
- if (a->hw.target == b->hw.target) {
- b->hw.is_group_event = true;
- return true;
- }
-
- /*
- * Are we an inherited event?
- */
- if (b->parent == a)
- return true;
-
- return false;
-}
-
-#ifdef CONFIG_CGROUP_PERF
-static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
-{
- if (event->attach_state & PERF_ATTACH_TASK)
- return perf_cgroup_from_task(event->hw.target, event->ctx);
-
- return event->cgrp;
-}
-#endif
-
-/*
- * Determine if @a's tasks intersect with @b's tasks
- *
- * There are combinations of events that we explicitly prohibit,
- *
- * PROHIBITS
- * system-wide -> cgroup and task
- * cgroup -> system-wide
- * -> task in cgroup
- * task -> system-wide
- * -> task in cgroup
- *
- * Call this function before allocating an RMID.
- */
-static bool __conflict_event(struct perf_event *a, struct perf_event *b)
-{
-#ifdef CONFIG_CGROUP_PERF
- /*
- * We can have any number of cgroups but only one system-wide
- * event at a time.
- */
- if (a->cgrp && b->cgrp) {
- struct perf_cgroup *ac = a->cgrp;
- struct perf_cgroup *bc = b->cgrp;
-
- /*
- * This condition should have been caught in
- * __match_event() and we should be sharing an RMID.
- */
- WARN_ON_ONCE(ac == bc);
-
- if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) ||
- cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup))
- return true;
-
- return false;
- }
-
- if (a->cgrp || b->cgrp) {
- struct perf_cgroup *ac, *bc;
-
- /*
- * cgroup and system-wide events are mutually exclusive
- */
- if ((a->cgrp && !(b->attach_state & PERF_ATTACH_TASK)) ||
- (b->cgrp && !(a->attach_state & PERF_ATTACH_TASK)))
- return true;
-
- /*
- * Ensure neither event is part of the other's cgroup
- */
- ac = event_to_cgroup(a);
- bc = event_to_cgroup(b);
- if (ac == bc)
- return true;
-
- /*
- * Must have cgroup and non-intersecting task events.
- */
- if (!ac || !bc)
- return false;
-
- /*
- * We have cgroup and task events, and the task belongs
- * to a cgroup. Check for for overlap.
- */
- if (cgroup_is_descendant(ac->css.cgroup, bc->css.cgroup) ||
- cgroup_is_descendant(bc->css.cgroup, ac->css.cgroup))
- return true;
-
- return false;
- }
-#endif
- /*
- * If one of them is not a task, same story as above with cgroups.
- */
- if (!(a->attach_state & PERF_ATTACH_TASK) ||
- !(b->attach_state & PERF_ATTACH_TASK))
- return true;
-
- /*
- * Must be non-overlapping.
- */
- return false;
-}
-
-struct rmid_read {
- u32 rmid;
- u32 evt_type;
- atomic64_t value;
-};
-
-static void __intel_cqm_event_count(void *info);
-static void init_mbm_sample(u32 rmid, u32 evt_type);
-static void __intel_mbm_event_count(void *info);
-
-static bool is_cqm_event(int e)
-{
- return (e == QOS_L3_OCCUP_EVENT_ID);
-}
-
-static bool is_mbm_event(int e)
-{
- return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID);
-}
-
-static void cqm_mask_call(struct rmid_read *rr)
-{
- if (is_mbm_event(rr->evt_type))
- on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count, rr, 1);
- else
- on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, rr, 1);
-}
-
-/*
- * Exchange the RMID of a group of events.
- */
-static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
-{
- struct perf_event *event;
- struct list_head *head = &group->hw.cqm_group_entry;
- u32 old_rmid = group->hw.cqm_rmid;
-
- lockdep_assert_held(&cache_mutex);
-
- /*
- * If our RMID is being deallocated, perform a read now.
- */
- if (__rmid_valid(old_rmid) && !__rmid_valid(rmid)) {
- struct rmid_read rr = {
- .rmid = old_rmid,
- .evt_type = group->attr.config,
- .value = ATOMIC64_INIT(0),
- };
-
- cqm_mask_call(&rr);
- local64_set(&group->count, atomic64_read(&rr.value));
- }
-
- raw_spin_lock_irq(&cache_lock);
-
- group->hw.cqm_rmid = rmid;
- list_for_each_entry(event, head, hw.cqm_group_entry)
- event->hw.cqm_rmid = rmid;
-
- raw_spin_unlock_irq(&cache_lock);
-
- /*
- * If the allocation is for mbm, init the mbm stats.
- * Need to check if each event in the group is mbm event
- * because there could be multiple type of events in the same group.
- */
- if (__rmid_valid(rmid)) {
- event = group;
- if (is_mbm_event(event->attr.config))
- init_mbm_sample(rmid, event->attr.config);
-
- list_for_each_entry(event, head, hw.cqm_group_entry) {
- if (is_mbm_event(event->attr.config))
- init_mbm_sample(rmid, event->attr.config);
- }
- }
-
- return old_rmid;
-}
-
-/*
- * If we fail to assign a new RMID for intel_cqm_rotation_rmid because
- * cachelines are still tagged with RMIDs in limbo, we progressively
- * increment the threshold until we find an RMID in limbo with <=
- * __intel_cqm_threshold lines tagged. This is designed to mitigate the
- * problem where cachelines tagged with an RMID are not steadily being
- * evicted.
- *
- * On successful rotations we decrease the threshold back towards zero.
- *
- * __intel_cqm_max_threshold provides an upper bound on the threshold,
- * and is measured in bytes because it's exposed to userland.
- */
-static unsigned int __intel_cqm_threshold;
-static unsigned int __intel_cqm_max_threshold;
-
-/*
- * Test whether an RMID has a zero occupancy value on this cpu.
- */
-static void intel_cqm_stable(void *arg)
-{
- struct cqm_rmid_entry *entry;
-
- list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) {
- if (entry->state != RMID_AVAILABLE)
- break;
-
- if (__rmid_read(entry->rmid) > __intel_cqm_threshold)
- entry->state = RMID_DIRTY;
- }
-}
-
-/*
- * If we have group events waiting for an RMID that don't conflict with
- * events already running, assign @rmid.
- */
-static bool intel_cqm_sched_in_event(u32 rmid)
-{
- struct perf_event *leader, *event;
-
- lockdep_assert_held(&cache_mutex);
-
- leader = list_first_entry(&cache_groups, struct perf_event,
- hw.cqm_groups_entry);
- event = leader;
-
- list_for_each_entry_continue(event, &cache_groups,
- hw.cqm_groups_entry) {
- if (__rmid_valid(event->hw.cqm_rmid))
- continue;
-
- if (__conflict_event(event, leader))
- continue;
-
- intel_cqm_xchg_rmid(event, rmid);
- return true;
- }
-
- return false;
-}
-
-/*
- * Initially use this constant for both the limbo queue time and the
- * rotation timer interval, pmu::hrtimer_interval_ms.
- *
- * They don't need to be the same, but the two are related since if you
- * rotate faster than you recycle RMIDs, you may run out of available
- * RMIDs.
- */
-#define RMID_DEFAULT_QUEUE_TIME 250 /* ms */
-
-static unsigned int __rmid_queue_time_ms = RMID_DEFAULT_QUEUE_TIME;
-
-/*
- * intel_cqm_rmid_stabilize - move RMIDs from limbo to free list
- * @nr_available: number of freeable RMIDs on the limbo list
- *
- * Quiescent state; wait for all 'freed' RMIDs to become unused, i.e. no
- * cachelines are tagged with those RMIDs. After this we can reuse them
- * and know that the current set of active RMIDs is stable.
- *
- * Return %true or %false depending on whether stabilization needs to be
- * reattempted.
- *
- * If we return %true then @nr_available is updated to indicate the
- * number of RMIDs on the limbo list that have been queued for the
- * minimum queue time (RMID_AVAILABLE), but whose data occupancy values
- * are above __intel_cqm_threshold.
- */
-static bool intel_cqm_rmid_stabilize(unsigned int *available)
-{
- struct cqm_rmid_entry *entry, *tmp;
-
- lockdep_assert_held(&cache_mutex);
-
- *available = 0;
- list_for_each_entry(entry, &cqm_rmid_limbo_lru, list) {
- unsigned long min_queue_time;
- unsigned long now = jiffies;
-
- /*
- * We hold RMIDs placed into limbo for a minimum queue
- * time. Before the minimum queue time has elapsed we do
- * not recycle RMIDs.
- *
- * The reasoning is that until a sufficient time has
- * passed since we stopped using an RMID, any RMID
- * placed onto the limbo list will likely still have
- * data tagged in the cache, which means we'll probably
- * fail to recycle it anyway.
- *
- * We can save ourselves an expensive IPI by skipping
- * any RMIDs that have not been queued for the minimum
- * time.
- */
- min_queue_time = entry->queue_time +
- msecs_to_jiffies(__rmid_queue_time_ms);
-
- if (time_after(min_queue_time, now))
- break;
-
- entry->state = RMID_AVAILABLE;
- (*available)++;
- }
-
- /*
- * Fast return if none of the RMIDs on the limbo list have been
- * sitting on the queue for the minimum queue time.
- */
- if (!*available)
- return false;
-
- /*
- * Test whether an RMID is free for each package.
- */
- on_each_cpu_mask(&cqm_cpumask, intel_cqm_stable, NULL, true);
-
- list_for_each_entry_safe(entry, tmp, &cqm_rmid_limbo_lru, list) {
- /*
- * Exhausted all RMIDs that have waited min queue time.
- */
- if (entry->state == RMID_YOUNG)
- break;
-
- if (entry->state == RMID_DIRTY)
- continue;
-
- list_del(&entry->list); /* remove from limbo */
-
- /*
- * The rotation RMID gets priority if it's
- * currently invalid. In which case, skip adding
- * the RMID to the the free lru.
- */
- if (!__rmid_valid(intel_cqm_rotation_rmid)) {
- intel_cqm_rotation_rmid = entry->rmid;
- continue;
- }
-
- /*
- * If we have groups waiting for RMIDs, hand
- * them one now provided they don't conflict.
- */
- if (intel_cqm_sched_in_event(entry->rmid))
- continue;
-
- /*
- * Otherwise place it onto the free list.
- */
- list_add_tail(&entry->list, &cqm_rmid_free_lru);
- }
-
-
- return __rmid_valid(intel_cqm_rotation_rmid);
-}
-
-/*
- * Pick a victim group and move it to the tail of the group list.
- * @next: The first group without an RMID
- */
-static void __intel_cqm_pick_and_rotate(struct perf_event *next)
-{
- struct perf_event *rotor;
- u32 rmid;
-
- lockdep_assert_held(&cache_mutex);
-
- rotor = list_first_entry(&cache_groups, struct perf_event,
- hw.cqm_groups_entry);
-
- /*
- * The group at the front of the list should always have a valid
- * RMID. If it doesn't then no groups have RMIDs assigned and we
- * don't need to rotate the list.
- */
- if (next == rotor)
- return;
-
- rmid = intel_cqm_xchg_rmid(rotor, INVALID_RMID);
- __put_rmid(rmid);
-
- list_rotate_left(&cache_groups);
-}
-
-/*
- * Deallocate the RMIDs from any events that conflict with @event, and
- * place them on the back of the group list.
- */
-static void intel_cqm_sched_out_conflicting_events(struct perf_event *event)
-{
- struct perf_event *group, *g;
- u32 rmid;
-
- lockdep_assert_held(&cache_mutex);
-
- list_for_each_entry_safe(group, g, &cache_groups, hw.cqm_groups_entry) {
- if (group == event)
- continue;
-
- rmid = group->hw.cqm_rmid;
-
- /*
- * Skip events that don't have a valid RMID.
- */
- if (!__rmid_valid(rmid))
- continue;
-
- /*
- * No conflict? No problem! Leave the event alone.
- */
- if (!__conflict_event(group, event))
- continue;
-
- intel_cqm_xchg_rmid(group, INVALID_RMID);
- __put_rmid(rmid);
- }
-}
-
-/*
- * Attempt to rotate the groups and assign new RMIDs.
- *
- * We rotate for two reasons,
- * 1. To handle the scheduling of conflicting events
- * 2. To recycle RMIDs
- *
- * Rotating RMIDs is complicated because the hardware doesn't give us
- * any clues.
- *
- * There's problems with the hardware interface; when you change the
- * task:RMID map cachelines retain their 'old' tags, giving a skewed
- * picture. In order to work around this, we must always keep one free
- * RMID - intel_cqm_rotation_rmid.
- *
- * Rotation works by taking away an RMID from a group (the old RMID),
- * and assigning the free RMID to another group (the new RMID). We must
- * then wait for the old RMID to not be used (no cachelines tagged).
- * This ensure that all cachelines are tagged with 'active' RMIDs. At
- * this point we can start reading values for the new RMID and treat the
- * old RMID as the free RMID for the next rotation.
- *
- * Return %true or %false depending on whether we did any rotating.
- */
-static bool __intel_cqm_rmid_rotate(void)
-{
- struct perf_event *group, *start = NULL;
- unsigned int threshold_limit;
- unsigned int nr_needed = 0;
- unsigned int nr_available;
- bool rotated = false;
-
- mutex_lock(&cache_mutex);
-
-again:
- /*
- * Fast path through this function if there are no groups and no
- * RMIDs that need cleaning.
- */
- if (list_empty(&cache_groups) && list_empty(&cqm_rmid_limbo_lru))
- goto out;
-
- list_for_each_entry(group, &cache_groups, hw.cqm_groups_entry) {
- if (!__rmid_valid(group->hw.cqm_rmid)) {
- if (!start)
- start = group;
- nr_needed++;
- }
- }
-
- /*
- * We have some event groups, but they all have RMIDs assigned
- * and no RMIDs need cleaning.
- */
- if (!nr_needed && list_empty(&cqm_rmid_limbo_lru))
- goto out;
-
- if (!nr_needed)
- goto stabilize;
-
- /*
- * We have more event groups without RMIDs than available RMIDs,
- * or we have event groups that conflict with the ones currently
- * scheduled.
- *
- * We force deallocate the rmid of the group at the head of
- * cache_groups. The first event group without an RMID then gets
- * assigned intel_cqm_rotation_rmid. This ensures we always make
- * forward progress.
- *
- * Rotate the cache_groups list so the previous head is now the
- * tail.
- */
- __intel_cqm_pick_and_rotate(start);
-
- /*
- * If the rotation is going to succeed, reduce the threshold so
- * that we don't needlessly reuse dirty RMIDs.
- */
- if (__rmid_valid(intel_cqm_rotation_rmid)) {
- intel_cqm_xchg_rmid(start, intel_cqm_rotation_rmid);
- intel_cqm_rotation_rmid = __get_rmid();
-
- intel_cqm_sched_out_conflicting_events(start);
-
- if (__intel_cqm_threshold)
- __intel_cqm_threshold--;
- }
-
- rotated = true;
-
-stabilize:
- /*
- * We now need to stablize the RMID we freed above (if any) to
- * ensure that the next time we rotate we have an RMID with zero
- * occupancy value.
- *
- * Alternatively, if we didn't need to perform any rotation,
- * we'll have a bunch of RMIDs in limbo that need stabilizing.
- */
- threshold_limit = __intel_cqm_max_threshold / cqm_l3_scale;
-
- while (intel_cqm_rmid_stabilize(&nr_available) &&
- __intel_cqm_threshold < threshold_limit) {
- unsigned int steal_limit;
-
- /*
- * Don't spin if nobody is actively waiting for an RMID,
- * the rotation worker will be kicked as soon as an
- * event needs an RMID anyway.
- */
- if (!nr_needed)
- break;
-
- /* Allow max 25% of RMIDs to be in limbo. */
- steal_limit = (cqm_max_rmid + 1) / 4;
-
- /*
- * We failed to stabilize any RMIDs so our rotation
- * logic is now stuck. In order to make forward progress
- * we have a few options:
- *
- * 1. rotate ("steal") another RMID
- * 2. increase the threshold
- * 3. do nothing
- *
- * We do both of 1. and 2. until we hit the steal limit.
- *
- * The steal limit prevents all RMIDs ending up on the
- * limbo list. This can happen if every RMID has a
- * non-zero occupancy above threshold_limit, and the
- * occupancy values aren't dropping fast enough.
- *
- * Note that there is prioritisation at work here - we'd
- * rather increase the number of RMIDs on the limbo list
- * than increase the threshold, because increasing the
- * threshold skews the event data (because we reuse
- * dirty RMIDs) - threshold bumps are a last resort.
- */
- if (nr_available < steal_limit)
- goto again;
-
- __intel_cqm_threshold++;
- }
-
-out:
- mutex_unlock(&cache_mutex);
- return rotated;
-}
-
-static void intel_cqm_rmid_rotate(struct work_struct *work);
-
-static DECLARE_DELAYED_WORK(intel_cqm_rmid_work, intel_cqm_rmid_rotate);
-
-static struct pmu intel_cqm_pmu;
-
-static void intel_cqm_rmid_rotate(struct work_struct *work)
-{
- unsigned long delay;
-
- __intel_cqm_rmid_rotate();
-
- delay = msecs_to_jiffies(intel_cqm_pmu.hrtimer_interval_ms);
- schedule_delayed_work(&intel_cqm_rmid_work, delay);
-}
-
-static u64 update_sample(unsigned int rmid, u32 evt_type, int first)
-{
- struct sample *mbm_current;
- u32 vrmid = rmid_2_index(rmid);
- u64 val, bytes, shift;
- u32 eventid;
-
- if (evt_type == QOS_MBM_LOCAL_EVENT_ID) {
- mbm_current = &mbm_local[vrmid];
- eventid = QOS_MBM_LOCAL_EVENT_ID;
- } else {
- mbm_current = &mbm_total[vrmid];
- eventid = QOS_MBM_TOTAL_EVENT_ID;
- }
-
- wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
- rdmsrl(MSR_IA32_QM_CTR, val);
- if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
- return mbm_current->total_bytes;
-
- if (first) {
- mbm_current->prev_msr = val;
- mbm_current->total_bytes = 0;
- return mbm_current->total_bytes;
- }
-
- /*
- * The h/w guarantees that counters will not overflow
- * so long as we poll them at least once per second.
- */
- shift = 64 - MBM_CNTR_WIDTH;
- bytes = (val << shift) - (mbm_current->prev_msr << shift);
- bytes >>= shift;
-
- bytes *= cqm_l3_scale;
-
- mbm_current->total_bytes += bytes;
- mbm_current->prev_msr = val;
-
- return mbm_current->total_bytes;
-}
-
-static u64 rmid_read_mbm(unsigned int rmid, u32 evt_type)
-{
- return update_sample(rmid, evt_type, 0);
-}
-
-static void __intel_mbm_event_init(void *info)
-{
- struct rmid_read *rr = info;
-
- update_sample(rr->rmid, rr->evt_type, 1);
-}
-
-static void init_mbm_sample(u32 rmid, u32 evt_type)
-{
- struct rmid_read rr = {
- .rmid = rmid,
- .evt_type = evt_type,
- .value = ATOMIC64_INIT(0),
- };
-
- /* on each socket, init sample */
- on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_init, &rr, 1);
-}
-
-/*
- * Find a group and setup RMID.
- *
- * If we're part of a group, we use the group's RMID.
- */
-static void intel_cqm_setup_event(struct perf_event *event,
- struct perf_event **group)
-{
- struct perf_event *iter;
- bool conflict = false;
- u32 rmid;
-
- event->hw.is_group_event = false;
- list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
- rmid = iter->hw.cqm_rmid;
-
- if (__match_event(iter, event)) {
- /* All tasks in a group share an RMID */
- event->hw.cqm_rmid = rmid;
- *group = iter;
- if (is_mbm_event(event->attr.config) && __rmid_valid(rmid))
- init_mbm_sample(rmid, event->attr.config);
- return;
- }
-
- /*
- * We only care about conflicts for events that are
- * actually scheduled in (and hence have a valid RMID).
- */
- if (__conflict_event(iter, event) && __rmid_valid(rmid))
- conflict = true;
- }
-
- if (conflict)
- rmid = INVALID_RMID;
- else
- rmid = __get_rmid();
-
- if (is_mbm_event(event->attr.config) && __rmid_valid(rmid))
- init_mbm_sample(rmid, event->attr.config);
-
- event->hw.cqm_rmid = rmid;
-}
-
-static void intel_cqm_event_read(struct perf_event *event)
-{
- unsigned long flags;
- u32 rmid;
- u64 val;
-
- /*
- * Task events are handled by intel_cqm_event_count().
- */
- if (event->cpu == -1)
- return;
-
- raw_spin_lock_irqsave(&cache_lock, flags);
- rmid = event->hw.cqm_rmid;
-
- if (!__rmid_valid(rmid))
- goto out;
-
- if (is_mbm_event(event->attr.config))
- val = rmid_read_mbm(rmid, event->attr.config);
- else
- val = __rmid_read(rmid);
-
- /*
- * Ignore this reading on error states and do not update the value.
- */
- if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
- goto out;
-
- local64_set(&event->count, val);
-out:
- raw_spin_unlock_irqrestore(&cache_lock, flags);
-}
-
-static void __intel_cqm_event_count(void *info)
-{
- struct rmid_read *rr = info;
- u64 val;
-
- val = __rmid_read(rr->rmid);
-
- if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
- return;
-
- atomic64_add(val, &rr->value);
-}
-
-static inline bool cqm_group_leader(struct perf_event *event)
-{
- return !list_empty(&event->hw.cqm_groups_entry);
-}
-
-static void __intel_mbm_event_count(void *info)
-{
- struct rmid_read *rr = info;
- u64 val;
-
- val = rmid_read_mbm(rr->rmid, rr->evt_type);
- if (val & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL))
- return;
- atomic64_add(val, &rr->value);
-}
-
-static enum hrtimer_restart mbm_hrtimer_handle(struct hrtimer *hrtimer)
-{
- struct perf_event *iter, *iter1;
- int ret = HRTIMER_RESTART;
- struct list_head *head;
- unsigned long flags;
- u32 grp_rmid;
-
- /*
- * Need to cache_lock as the timer Event Select MSR reads
- * can race with the mbm/cqm count() and mbm_init() reads.
- */
- raw_spin_lock_irqsave(&cache_lock, flags);
-
- if (list_empty(&cache_groups)) {
- ret = HRTIMER_NORESTART;
- goto out;
- }
-
- list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
- grp_rmid = iter->hw.cqm_rmid;
- if (!__rmid_valid(grp_rmid))
- continue;
- if (is_mbm_event(iter->attr.config))
- update_sample(grp_rmid, iter->attr.config, 0);
-
- head = &iter->hw.cqm_group_entry;
- if (list_empty(head))
- continue;
- list_for_each_entry(iter1, head, hw.cqm_group_entry) {
- if (!iter1->hw.is_group_event)
- break;
- if (is_mbm_event(iter1->attr.config))
- update_sample(iter1->hw.cqm_rmid,
- iter1->attr.config, 0);
- }
- }
-
- hrtimer_forward_now(hrtimer, ms_to_ktime(MBM_CTR_OVERFLOW_TIME));
-out:
- raw_spin_unlock_irqrestore(&cache_lock, flags);
-
- return ret;
-}
-
-static void __mbm_start_timer(void *info)
-{
- hrtimer_start(&mbm_timers[pkg_id], ms_to_ktime(MBM_CTR_OVERFLOW_TIME),
- HRTIMER_MODE_REL_PINNED);
-}
-
-static void __mbm_stop_timer(void *info)
-{
- hrtimer_cancel(&mbm_timers[pkg_id]);
-}
-
-static void mbm_start_timers(void)
-{
- on_each_cpu_mask(&cqm_cpumask, __mbm_start_timer, NULL, 1);
-}
-
-static void mbm_stop_timers(void)
-{
- on_each_cpu_mask(&cqm_cpumask, __mbm_stop_timer, NULL, 1);
-}
-
-static void mbm_hrtimer_init(void)
-{
- struct hrtimer *hr;
- int i;
-
- for (i = 0; i < mbm_socket_max; i++) {
- hr = &mbm_timers[i];
- hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hr->function = mbm_hrtimer_handle;
- }
-}
-
-static u64 intel_cqm_event_count(struct perf_event *event)
-{
- unsigned long flags;
- struct rmid_read rr = {
- .evt_type = event->attr.config,
- .value = ATOMIC64_INIT(0),
- };
-
- /*
- * We only need to worry about task events. System-wide events
- * are handled like usual, i.e. entirely with
- * intel_cqm_event_read().
- */
- if (event->cpu != -1)
- return __perf_event_count(event);
-
- /*
- * Only the group leader gets to report values except in case of
- * multiple events in the same group, we still need to read the
- * other events.This stops us
- * reporting duplicate values to userspace, and gives us a clear
- * rule for which task gets to report the values.
- *
- * Note that it is impossible to attribute these values to
- * specific packages - we forfeit that ability when we create
- * task events.
- */
- if (!cqm_group_leader(event) && !event->hw.is_group_event)
- return 0;
-
- /*
- * Getting up-to-date values requires an SMP IPI which is not
- * possible if we're being called in interrupt context. Return
- * the cached values instead.
- */
- if (unlikely(in_interrupt()))
- goto out;
-
- /*
- * Notice that we don't perform the reading of an RMID
- * atomically, because we can't hold a spin lock across the
- * IPIs.
- *
- * Speculatively perform the read, since @event might be
- * assigned a different (possibly invalid) RMID while we're
- * busying performing the IPI calls. It's therefore necessary to
- * check @event's RMID afterwards, and if it has changed,
- * discard the result of the read.
- */
- rr.rmid = ACCESS_ONCE(event->hw.cqm_rmid);
-
- if (!__rmid_valid(rr.rmid))
- goto out;
-
- cqm_mask_call(&rr);
-
- raw_spin_lock_irqsave(&cache_lock, flags);
- if (event->hw.cqm_rmid == rr.rmid)
- local64_set(&event->count, atomic64_read(&rr.value));
- raw_spin_unlock_irqrestore(&cache_lock, flags);
-out:
- return __perf_event_count(event);
-}
-
-static void intel_cqm_event_start(struct perf_event *event, int mode)
-{
- struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
- u32 rmid = event->hw.cqm_rmid;
-
- if (!(event->hw.cqm_state & PERF_HES_STOPPED))
- return;
-
- event->hw.cqm_state &= ~PERF_HES_STOPPED;
-
- if (state->rmid_usecnt++) {
- if (!WARN_ON_ONCE(state->rmid != rmid))
- return;
- } else {
- WARN_ON_ONCE(state->rmid);
- }
-
- state->rmid = rmid;
- wrmsr(MSR_IA32_PQR_ASSOC, rmid, state->closid);
-}
-
-static void intel_cqm_event_stop(struct perf_event *event, int mode)
-{
- struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
-
- if (event->hw.cqm_state & PERF_HES_STOPPED)
- return;
-
- event->hw.cqm_state |= PERF_HES_STOPPED;
-
- intel_cqm_event_read(event);
-
- if (!--state->rmid_usecnt) {
- state->rmid = 0;
- wrmsr(MSR_IA32_PQR_ASSOC, 0, state->closid);
- } else {
- WARN_ON_ONCE(!state->rmid);
- }
-}
-
-static int intel_cqm_event_add(struct perf_event *event, int mode)
-{
- unsigned long flags;
- u32 rmid;
-
- raw_spin_lock_irqsave(&cache_lock, flags);
-
- event->hw.cqm_state = PERF_HES_STOPPED;
- rmid = event->hw.cqm_rmid;
-
- if (__rmid_valid(rmid) && (mode & PERF_EF_START))
- intel_cqm_event_start(event, mode);
-
- raw_spin_unlock_irqrestore(&cache_lock, flags);
-
- return 0;
-}
-
-static void intel_cqm_event_destroy(struct perf_event *event)
-{
- struct perf_event *group_other = NULL;
- unsigned long flags;
-
- mutex_lock(&cache_mutex);
- /*
- * Hold the cache_lock as mbm timer handlers could be
- * scanning the list of events.
- */
- raw_spin_lock_irqsave(&cache_lock, flags);
-
- /*
- * If there's another event in this group...
- */
- if (!list_empty(&event->hw.cqm_group_entry)) {
- group_other = list_first_entry(&event->hw.cqm_group_entry,
- struct perf_event,
- hw.cqm_group_entry);
- list_del(&event->hw.cqm_group_entry);
- }
-
- /*
- * And we're the group leader..
- */
- if (cqm_group_leader(event)) {
- /*
- * If there was a group_other, make that leader, otherwise
- * destroy the group and return the RMID.
- */
- if (group_other) {
- list_replace(&event->hw.cqm_groups_entry,
- &group_other->hw.cqm_groups_entry);
- } else {
- u32 rmid = event->hw.cqm_rmid;
-
- if (__rmid_valid(rmid))
- __put_rmid(rmid);
- list_del(&event->hw.cqm_groups_entry);
- }
- }
-
- raw_spin_unlock_irqrestore(&cache_lock, flags);
-
- /*
- * Stop the mbm overflow timers when the last event is destroyed.
- */
- if (mbm_enabled && list_empty(&cache_groups))
- mbm_stop_timers();
-
- mutex_unlock(&cache_mutex);
-}
-
-static int intel_cqm_event_init(struct perf_event *event)
-{
- struct perf_event *group = NULL;
- bool rotate = false;
- unsigned long flags;
-
- if (event->attr.type != intel_cqm_pmu.type)
- return -ENOENT;
-
- if ((event->attr.config < QOS_L3_OCCUP_EVENT_ID) ||
- (event->attr.config > QOS_MBM_LOCAL_EVENT_ID))
- return -EINVAL;
-
- if ((is_cqm_event(event->attr.config) && !cqm_enabled) ||
- (is_mbm_event(event->attr.config) && !mbm_enabled))
- return -EINVAL;
-
- /* unsupported modes and filters */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest ||
- event->attr.sample_period) /* no sampling */
- return -EINVAL;
-
- INIT_LIST_HEAD(&event->hw.cqm_group_entry);
- INIT_LIST_HEAD(&event->hw.cqm_groups_entry);
-
- event->destroy = intel_cqm_event_destroy;
-
- mutex_lock(&cache_mutex);
-
- /*
- * Start the mbm overflow timers when the first event is created.
- */
- if (mbm_enabled && list_empty(&cache_groups))
- mbm_start_timers();
-
- /* Will also set rmid */
- intel_cqm_setup_event(event, &group);
-
- /*
- * Hold the cache_lock as mbm timer handlers be
- * scanning the list of events.
- */
- raw_spin_lock_irqsave(&cache_lock, flags);
-
- if (group) {
- list_add_tail(&event->hw.cqm_group_entry,
- &group->hw.cqm_group_entry);
- } else {
- list_add_tail(&event->hw.cqm_groups_entry,
- &cache_groups);
-
- /*
- * All RMIDs are either in use or have recently been
- * used. Kick the rotation worker to clean/free some.
- *
- * We only do this for the group leader, rather than for
- * every event in a group to save on needless work.
- */
- if (!__rmid_valid(event->hw.cqm_rmid))
- rotate = true;
- }
-
- raw_spin_unlock_irqrestore(&cache_lock, flags);
- mutex_unlock(&cache_mutex);
-
- if (rotate)
- schedule_delayed_work(&intel_cqm_rmid_work, 0);
-
- return 0;
-}
-
-EVENT_ATTR_STR(llc_occupancy, intel_cqm_llc, "event=0x01");
-EVENT_ATTR_STR(llc_occupancy.per-pkg, intel_cqm_llc_pkg, "1");
-EVENT_ATTR_STR(llc_occupancy.unit, intel_cqm_llc_unit, "Bytes");
-EVENT_ATTR_STR(llc_occupancy.scale, intel_cqm_llc_scale, NULL);
-EVENT_ATTR_STR(llc_occupancy.snapshot, intel_cqm_llc_snapshot, "1");
-
-EVENT_ATTR_STR(total_bytes, intel_cqm_total_bytes, "event=0x02");
-EVENT_ATTR_STR(total_bytes.per-pkg, intel_cqm_total_bytes_pkg, "1");
-EVENT_ATTR_STR(total_bytes.unit, intel_cqm_total_bytes_unit, "MB");
-EVENT_ATTR_STR(total_bytes.scale, intel_cqm_total_bytes_scale, "1e-6");
-
-EVENT_ATTR_STR(local_bytes, intel_cqm_local_bytes, "event=0x03");
-EVENT_ATTR_STR(local_bytes.per-pkg, intel_cqm_local_bytes_pkg, "1");
-EVENT_ATTR_STR(local_bytes.unit, intel_cqm_local_bytes_unit, "MB");
-EVENT_ATTR_STR(local_bytes.scale, intel_cqm_local_bytes_scale, "1e-6");
-
-static struct attribute *intel_cqm_events_attr[] = {
- EVENT_PTR(intel_cqm_llc),
- EVENT_PTR(intel_cqm_llc_pkg),
- EVENT_PTR(intel_cqm_llc_unit),
- EVENT_PTR(intel_cqm_llc_scale),
- EVENT_PTR(intel_cqm_llc_snapshot),
- NULL,
-};
-
-static struct attribute *intel_mbm_events_attr[] = {
- EVENT_PTR(intel_cqm_total_bytes),
- EVENT_PTR(intel_cqm_local_bytes),
- EVENT_PTR(intel_cqm_total_bytes_pkg),
- EVENT_PTR(intel_cqm_local_bytes_pkg),
- EVENT_PTR(intel_cqm_total_bytes_unit),
- EVENT_PTR(intel_cqm_local_bytes_unit),
- EVENT_PTR(intel_cqm_total_bytes_scale),
- EVENT_PTR(intel_cqm_local_bytes_scale),
- NULL,
-};
-
-static struct attribute *intel_cmt_mbm_events_attr[] = {
- EVENT_PTR(intel_cqm_llc),
- EVENT_PTR(intel_cqm_total_bytes),
- EVENT_PTR(intel_cqm_local_bytes),
- EVENT_PTR(intel_cqm_llc_pkg),
- EVENT_PTR(intel_cqm_total_bytes_pkg),
- EVENT_PTR(intel_cqm_local_bytes_pkg),
- EVENT_PTR(intel_cqm_llc_unit),
- EVENT_PTR(intel_cqm_total_bytes_unit),
- EVENT_PTR(intel_cqm_local_bytes_unit),
- EVENT_PTR(intel_cqm_llc_scale),
- EVENT_PTR(intel_cqm_total_bytes_scale),
- EVENT_PTR(intel_cqm_local_bytes_scale),
- EVENT_PTR(intel_cqm_llc_snapshot),
- NULL,
-};
-
-static struct attribute_group intel_cqm_events_group = {
- .name = "events",
- .attrs = NULL,
-};
-
-PMU_FORMAT_ATTR(event, "config:0-7");
-static struct attribute *intel_cqm_formats_attr[] = {
- &format_attr_event.attr,
- NULL,
-};
-
-static struct attribute_group intel_cqm_format_group = {
- .name = "format",
- .attrs = intel_cqm_formats_attr,
-};
-
-static ssize_t
-max_recycle_threshold_show(struct device *dev, struct device_attribute *attr,
- char *page)
-{
- ssize_t rv;
-
- mutex_lock(&cache_mutex);
- rv = snprintf(page, PAGE_SIZE-1, "%u\n", __intel_cqm_max_threshold);
- mutex_unlock(&cache_mutex);
-
- return rv;
-}
-
-static ssize_t
-max_recycle_threshold_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int bytes, cachelines;
- int ret;
-
- ret = kstrtouint(buf, 0, &bytes);
- if (ret)
- return ret;
-
- mutex_lock(&cache_mutex);
-
- __intel_cqm_max_threshold = bytes;
- cachelines = bytes / cqm_l3_scale;
-
- /*
- * The new maximum takes effect immediately.
- */
- if (__intel_cqm_threshold > cachelines)
- __intel_cqm_threshold = cachelines;
-
- mutex_unlock(&cache_mutex);
-
- return count;
-}
-
-static DEVICE_ATTR_RW(max_recycle_threshold);
-
-static struct attribute *intel_cqm_attrs[] = {
- &dev_attr_max_recycle_threshold.attr,
- NULL,
-};
-
-static const struct attribute_group intel_cqm_group = {
- .attrs = intel_cqm_attrs,
-};
-
-static const struct attribute_group *intel_cqm_attr_groups[] = {
- &intel_cqm_events_group,
- &intel_cqm_format_group,
- &intel_cqm_group,
- NULL,
-};
-
-static struct pmu intel_cqm_pmu = {
- .hrtimer_interval_ms = RMID_DEFAULT_QUEUE_TIME,
- .attr_groups = intel_cqm_attr_groups,
- .task_ctx_nr = perf_sw_context,
- .event_init = intel_cqm_event_init,
- .add = intel_cqm_event_add,
- .del = intel_cqm_event_stop,
- .start = intel_cqm_event_start,
- .stop = intel_cqm_event_stop,
- .read = intel_cqm_event_read,
- .count = intel_cqm_event_count,
-};
-
-static inline void cqm_pick_event_reader(int cpu)
-{
- int reader;
-
- /* First online cpu in package becomes the reader */
- reader = cpumask_any_and(&cqm_cpumask, topology_core_cpumask(cpu));
- if (reader >= nr_cpu_ids)
- cpumask_set_cpu(cpu, &cqm_cpumask);
-}
-
-static int intel_cqm_cpu_starting(unsigned int cpu)
-{
- struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
- struct cpuinfo_x86 *c = &cpu_data(cpu);
-
- state->rmid = 0;
- state->closid = 0;
- state->rmid_usecnt = 0;
-
- WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid);
- WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale);
-
- cqm_pick_event_reader(cpu);
- return 0;
-}
-
-static int intel_cqm_cpu_exit(unsigned int cpu)
-{
- int target;
-
- /* Is @cpu the current cqm reader for this package ? */
- if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask))
- return 0;
-
- /* Find another online reader in this package */
- target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
-
- if (target < nr_cpu_ids)
- cpumask_set_cpu(target, &cqm_cpumask);
-
- return 0;
-}
-
-static const struct x86_cpu_id intel_cqm_match[] = {
- { .vendor = X86_VENDOR_INTEL, .feature = X86_FEATURE_CQM_OCCUP_LLC },
- {}
-};
-
-static void mbm_cleanup(void)
-{
- if (!mbm_enabled)
- return;
-
- kfree(mbm_local);
- kfree(mbm_total);
- mbm_enabled = false;
-}
-
-static const struct x86_cpu_id intel_mbm_local_match[] = {
- { .vendor = X86_VENDOR_INTEL, .feature = X86_FEATURE_CQM_MBM_LOCAL },
- {}
-};
-
-static const struct x86_cpu_id intel_mbm_total_match[] = {
- { .vendor = X86_VENDOR_INTEL, .feature = X86_FEATURE_CQM_MBM_TOTAL },
- {}
-};
-
-static int intel_mbm_init(void)
-{
- int ret = 0, array_size, maxid = cqm_max_rmid + 1;
-
- mbm_socket_max = topology_max_packages();
- array_size = sizeof(struct sample) * maxid * mbm_socket_max;
- mbm_local = kmalloc(array_size, GFP_KERNEL);
- if (!mbm_local)
- return -ENOMEM;
-
- mbm_total = kmalloc(array_size, GFP_KERNEL);
- if (!mbm_total) {
- ret = -ENOMEM;
- goto out;
- }
-
- array_size = sizeof(struct hrtimer) * mbm_socket_max;
- mbm_timers = kmalloc(array_size, GFP_KERNEL);
- if (!mbm_timers) {
- ret = -ENOMEM;
- goto out;
- }
- mbm_hrtimer_init();
-
-out:
- if (ret)
- mbm_cleanup();
-
- return ret;
-}
-
-static int __init intel_cqm_init(void)
-{
- char *str = NULL, scale[20];
- int cpu, ret;
-
- if (x86_match_cpu(intel_cqm_match))
- cqm_enabled = true;
-
- if (x86_match_cpu(intel_mbm_local_match) &&
- x86_match_cpu(intel_mbm_total_match))
- mbm_enabled = true;
-
- if (!cqm_enabled && !mbm_enabled)
- return -ENODEV;
-
- cqm_l3_scale = boot_cpu_data.x86_cache_occ_scale;
-
- /*
- * It's possible that not all resources support the same number
- * of RMIDs. Instead of making scheduling much more complicated
- * (where we have to match a task's RMID to a cpu that supports
- * that many RMIDs) just find the minimum RMIDs supported across
- * all cpus.
- *
- * Also, check that the scales match on all cpus.
- */
- cpus_read_lock();
- for_each_online_cpu(cpu) {
- struct cpuinfo_x86 *c = &cpu_data(cpu);
-
- if (c->x86_cache_max_rmid < cqm_max_rmid)
- cqm_max_rmid = c->x86_cache_max_rmid;
-
- if (c->x86_cache_occ_scale != cqm_l3_scale) {
- pr_err("Multiple LLC scale values, disabling\n");
- ret = -EINVAL;
- goto out;
- }
- }
-
- /*
- * A reasonable upper limit on the max threshold is the number
- * of lines tagged per RMID if all RMIDs have the same number of
- * lines tagged in the LLC.
- *
- * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
- */
- __intel_cqm_max_threshold =
- boot_cpu_data.x86_cache_size * 1024 / (cqm_max_rmid + 1);
-
- snprintf(scale, sizeof(scale), "%u", cqm_l3_scale);
- str = kstrdup(scale, GFP_KERNEL);
- if (!str) {
- ret = -ENOMEM;
- goto out;
- }
-
- event_attr_intel_cqm_llc_scale.event_str = str;
-
- ret = intel_cqm_setup_rmid_cache();
- if (ret)
- goto out;
-
- if (mbm_enabled)
- ret = intel_mbm_init();
- if (ret && !cqm_enabled)
- goto out;
-
- if (cqm_enabled && mbm_enabled)
- intel_cqm_events_group.attrs = intel_cmt_mbm_events_attr;
- else if (!cqm_enabled && mbm_enabled)
- intel_cqm_events_group.attrs = intel_mbm_events_attr;
- else if (cqm_enabled && !mbm_enabled)
- intel_cqm_events_group.attrs = intel_cqm_events_attr;
-
- ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
- if (ret) {
- pr_err("Intel CQM perf registration failed: %d\n", ret);
- goto out;
- }
-
- if (cqm_enabled)
- pr_info("Intel CQM monitoring enabled\n");
- if (mbm_enabled)
- pr_info("Intel MBM enabled\n");
-
- /*
- * Setup the hot cpu notifier once we are sure cqm
- * is enabled to avoid notifier leak.
- */
- cpuhp_setup_state_cpuslocked(CPUHP_AP_PERF_X86_CQM_STARTING,
- "perf/x86/cqm:starting",
- intel_cqm_cpu_starting, NULL);
- cpuhp_setup_state_cpuslocked(CPUHP_AP_PERF_X86_CQM_ONLINE,
- "perf/x86/cqm:online",
- NULL, intel_cqm_cpu_exit);
-out:
- cpus_read_unlock();
-
- if (ret) {
- kfree(str);
- cqm_cleanup();
- mbm_cleanup();
- }
-
- return ret;
-}
-device_initcall(intel_cqm_init);
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 238ae3248ba5..4cf100ff2a37 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -40,16 +40,16 @@
* Model specific counters:
* MSR_CORE_C1_RES: CORE C1 Residency Counter
* perf code: 0x00
- * Available model: SLM,AMT
+ * Available model: SLM,AMT,GLM
* Scope: Core (each processor core has a MSR)
* MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
* perf code: 0x01
- * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM
* Scope: Core
* MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
- * SKL,KNL
+ * SKL,KNL,GLM
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
@@ -57,16 +57,17 @@
* Scope: Core
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
* perf code: 0x00
- * Available model: SNB,IVB,HSW,BDW,SKL,KNL
+ * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
+ * GLM
* Scope: Package (physical package)
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
- * SKL,KNL
+ * SKL,KNL,GLM
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
@@ -82,7 +83,7 @@
* Scope: Package (physical package)
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
* perf code: 0x06
- * Available model: HSW ULT only
+ * Available model: HSW ULT, GLM
* Scope: Package (physical package)
*
*/
@@ -504,6 +505,17 @@ static const struct cstate_model knl_cstates __initconst = {
};
+static const struct cstate_model glm_cstates __initconst = {
+ .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
+ BIT(PERF_CSTATE_CORE_C3_RES) |
+ BIT(PERF_CSTATE_CORE_C6_RES),
+
+ .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
+ BIT(PERF_CSTATE_PKG_C3_RES) |
+ BIT(PERF_CSTATE_PKG_C6_RES) |
+ BIT(PERF_CSTATE_PKG_C10_RES),
+};
+
#define X86_CSTATES_MODEL(model, states) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
@@ -546,6 +558,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
+
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index c6d23ffe422d..e1965e5ff570 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -49,34 +49,47 @@ union intel_x86_pebs_dse {
*/
#define P(a, b) PERF_MEM_S(a, b)
#define OP_LH (P(OP, LOAD) | P(LVL, HIT))
+#define LEVEL(x) P(LVLNUM, x)
+#define REM P(REMOTE, REMOTE)
#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
/* Version for Sandy Bridge and later */
static u64 pebs_data_source[] = {
- P(OP, LOAD) | P(LVL, MISS) | P(LVL, L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
- OP_LH | P(LVL, L1) | P(SNOOP, NONE), /* 0x01: L1 local */
- OP_LH | P(LVL, LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
- OP_LH | P(LVL, L2) | P(SNOOP, NONE), /* 0x03: L2 hit */
- OP_LH | P(LVL, L3) | P(SNOOP, NONE), /* 0x04: L3 hit */
- OP_LH | P(LVL, L3) | P(SNOOP, MISS), /* 0x05: L3 hit, snoop miss */
- OP_LH | P(LVL, L3) | P(SNOOP, HIT), /* 0x06: L3 hit, snoop hit */
- OP_LH | P(LVL, L3) | P(SNOOP, HITM), /* 0x07: L3 hit, snoop hitm */
- OP_LH | P(LVL, REM_CCE1) | P(SNOOP, HIT), /* 0x08: L3 miss snoop hit */
- OP_LH | P(LVL, REM_CCE1) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
- OP_LH | P(LVL, LOC_RAM) | P(SNOOP, HIT), /* 0x0a: L3 miss, shared */
- OP_LH | P(LVL, REM_RAM1) | P(SNOOP, HIT), /* 0x0b: L3 miss, shared */
- OP_LH | P(LVL, LOC_RAM) | SNOOP_NONE_MISS,/* 0x0c: L3 miss, excl */
- OP_LH | P(LVL, REM_RAM1) | SNOOP_NONE_MISS,/* 0x0d: L3 miss, excl */
- OP_LH | P(LVL, IO) | P(SNOOP, NONE), /* 0x0e: I/O */
- OP_LH | P(LVL, UNC) | P(SNOOP, NONE), /* 0x0f: uncached */
+ P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
+ OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x01: L1 local */
+ OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
+ OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* 0x03: L2 hit */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* 0x04: L3 hit */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, MISS), /* 0x05: L3 hit, snoop miss */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT), /* 0x06: L3 hit, snoop hit */
+ OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* 0x07: L3 hit, snoop hitm */
+ OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x08: L3 miss snoop hit */
+ OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
+ OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, HIT), /* 0x0a: L3 miss, shared */
+ OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x0b: L3 miss, shared */
+ OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | SNOOP_NONE_MISS, /* 0x0c: L3 miss, excl */
+ OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* 0x0d: L3 miss, excl */
+ OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0e: I/O */
+ OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0f: uncached */
};
/* Patch up minor differences in the bits */
void __init intel_pmu_pebs_data_source_nhm(void)
{
- pebs_data_source[0x05] = OP_LH | P(LVL, L3) | P(SNOOP, HIT);
- pebs_data_source[0x06] = OP_LH | P(LVL, L3) | P(SNOOP, HITM);
- pebs_data_source[0x07] = OP_LH | P(LVL, L3) | P(SNOOP, HITM);
+ pebs_data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
+ pebs_data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
+ pebs_data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
+}
+
+void __init intel_pmu_pebs_data_source_skl(bool pmem)
+{
+ u64 pmem_or_l4 = pmem ? LEVEL(PMEM) : LEVEL(L4);
+
+ pebs_data_source[0x08] = OP_LH | pmem_or_l4 | P(SNOOP, HIT);
+ pebs_data_source[0x09] = OP_LH | pmem_or_l4 | REM | P(SNOOP, HIT);
+ pebs_data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE);
+ pebs_data_source[0x0c] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOPX, FWD);
+ pebs_data_source[0x0d] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOP, HITM);
}
static u64 precise_store_data(u64 status)
@@ -149,8 +162,6 @@ static u64 load_latency_data(u64 status)
{
union intel_x86_pebs_dse dse;
u64 val;
- int model = boot_cpu_data.x86_model;
- int fam = boot_cpu_data.x86;
dse.val = status;
@@ -162,8 +173,7 @@ static u64 load_latency_data(u64 status)
/*
* Nehalem models do not support TLB, Lock infos
*/
- if (fam == 0x6 && (model == 26 || model == 30
- || model == 31 || model == 46)) {
+ if (x86_pmu.pebs_no_tlb) {
val |= P(TLB, NA) | P(LOCK, NA);
return val;
}
@@ -606,12 +616,6 @@ static inline void intel_pmu_drain_pebs_buffer(void)
x86_pmu.drain_pebs(&regs);
}
-void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
-{
- if (!sched_in)
- intel_pmu_drain_pebs_buffer();
-}
-
/*
* PEBS
*/
@@ -651,6 +655,12 @@ struct event_constraint intel_glm_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+struct event_constraint intel_glp_pebs_event_constraints[] = {
+ /* Allow all events as PEBS with no flags */
+ INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
+ EVENT_CONSTRAINT_END
+};
+
struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
@@ -816,6 +826,14 @@ static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
}
+void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ if (!sched_in && pebs_needs_sched_cb(cpuc))
+ intel_pmu_drain_pebs_buffer();
+}
+
static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
{
struct debug_store *ds = cpuc->ds;
@@ -889,6 +907,8 @@ void intel_pmu_pebs_enable(struct perf_event *event)
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
ds->pebs_event_reset[hwc->idx] =
(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
+ } else {
+ ds->pebs_event_reset[hwc->idx] = 0;
}
}
@@ -1165,7 +1185,7 @@ static void setup_pebs_sample_data(struct perf_event *event,
else
regs->flags &= ~PERF_EFLAGS_EXACT;
- if ((sample_type & PERF_SAMPLE_ADDR) &&
+ if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) &&
x86_pmu.intel_cap.pebs_format >= 1)
data->addr = pebs->dla;
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index eb261656a320..8a6bbacd17dc 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -109,6 +109,9 @@ enum {
X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
X86_BR_CALL_STACK = 1 << 16,/* call stack */
X86_BR_IND_JMP = 1 << 17,/* indirect jump */
+
+ X86_BR_TYPE_SAVE = 1 << 18,/* indicate to save branch type */
+
};
#define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
@@ -380,8 +383,12 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct x86_perf_task_context *task_ctx;
+ if (!cpuc->lbr_users)
+ return;
+
/*
* If LBR callstack feature is enabled and the stack was saved when
* the task was scheduled out, restore the stack. Otherwise flush
@@ -510,6 +517,7 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
cpuc->lbr_entries[i].in_tx = 0;
cpuc->lbr_entries[i].abort = 0;
cpuc->lbr_entries[i].cycles = 0;
+ cpuc->lbr_entries[i].type = 0;
cpuc->lbr_entries[i].reserved = 0;
}
cpuc->lbr_stack.nr = i;
@@ -596,6 +604,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
cpuc->lbr_entries[out].in_tx = in_tx;
cpuc->lbr_entries[out].abort = abort;
cpuc->lbr_entries[out].cycles = cycles;
+ cpuc->lbr_entries[out].type = 0;
cpuc->lbr_entries[out].reserved = 0;
out++;
}
@@ -673,6 +682,10 @@ static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
if (br_type & PERF_SAMPLE_BRANCH_CALL)
mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
+
+ if (br_type & PERF_SAMPLE_BRANCH_TYPE_SAVE)
+ mask |= X86_BR_TYPE_SAVE;
+
/*
* stash actual user request into reg, it may
* be used by fixup code for some CPU
@@ -926,6 +939,43 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
return ret;
}
+#define X86_BR_TYPE_MAP_MAX 16
+
+static int branch_map[X86_BR_TYPE_MAP_MAX] = {
+ PERF_BR_CALL, /* X86_BR_CALL */
+ PERF_BR_RET, /* X86_BR_RET */
+ PERF_BR_SYSCALL, /* X86_BR_SYSCALL */
+ PERF_BR_SYSRET, /* X86_BR_SYSRET */
+ PERF_BR_UNKNOWN, /* X86_BR_INT */
+ PERF_BR_UNKNOWN, /* X86_BR_IRET */
+ PERF_BR_COND, /* X86_BR_JCC */
+ PERF_BR_UNCOND, /* X86_BR_JMP */
+ PERF_BR_UNKNOWN, /* X86_BR_IRQ */
+ PERF_BR_IND_CALL, /* X86_BR_IND_CALL */
+ PERF_BR_UNKNOWN, /* X86_BR_ABORT */
+ PERF_BR_UNKNOWN, /* X86_BR_IN_TX */
+ PERF_BR_UNKNOWN, /* X86_BR_NO_TX */
+ PERF_BR_CALL, /* X86_BR_ZERO_CALL */
+ PERF_BR_UNKNOWN, /* X86_BR_CALL_STACK */
+ PERF_BR_IND, /* X86_BR_IND_JMP */
+};
+
+static int
+common_branch_type(int type)
+{
+ int i;
+
+ type >>= 2; /* skip X86_BR_USER and X86_BR_KERNEL */
+
+ if (type) {
+ i = __ffs(type);
+ if (i < X86_BR_TYPE_MAP_MAX)
+ return branch_map[i];
+ }
+
+ return PERF_BR_UNKNOWN;
+}
+
/*
* implement actual branch filter based on user demand.
* Hardware may not exactly satisfy that request, thus
@@ -942,7 +992,8 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
bool compress = false;
/* if sampling all branches, then nothing to filter */
- if ((br_sel & X86_BR_ALL) == X86_BR_ALL)
+ if (((br_sel & X86_BR_ALL) == X86_BR_ALL) &&
+ ((br_sel & X86_BR_TYPE_SAVE) != X86_BR_TYPE_SAVE))
return;
for (i = 0; i < cpuc->lbr_stack.nr; i++) {
@@ -963,6 +1014,9 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
cpuc->lbr_entries[i].from = 0;
compress = true;
}
+
+ if ((br_sel & X86_BR_TYPE_SAVE) == X86_BR_TYPE_SAVE)
+ cpuc->lbr_entries[i].type = common_branch_type(type);
}
if (!compress)
diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c
index eb0533558c2b..d32c0eed38ca 100644
--- a/arch/x86/events/intel/p4.c
+++ b/arch/x86/events/intel/p4.c
@@ -587,7 +587,7 @@ static __initconst const u64 p4_hw_cache_event_ids
* P4_CONFIG_ALIASABLE or bits for P4_PEBS_METRIC, they are
* either up to date automatically or not applicable at all.
*/
-struct p4_event_alias {
+static struct p4_event_alias {
u64 original;
u64 alternative;
} p4_event_aliases[] = {
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index ae8324d65e61..81fd41d5a0d9 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -471,8 +471,9 @@ static void pt_config(struct perf_event *event)
struct pt *pt = this_cpu_ptr(&pt_ctx);
u64 reg;
- if (!event->hw.itrace_started) {
- event->hw.itrace_started = 1;
+ /* First round: clear STATUS, in particular the PSB byte counter. */
+ if (!event->hw.config) {
+ perf_event_itrace_started(event);
wrmsrl(MSR_IA32_RTIT_STATUS, 0);
}
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index a45e2114a846..8e2457cb6b4a 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -559,7 +559,7 @@ static struct attribute_group rapl_pmu_format_group = {
.attrs = rapl_formats_attr,
};
-const struct attribute_group *rapl_attr_groups[] = {
+static const struct attribute_group *rapl_attr_groups[] = {
&rapl_pmu_attr_group,
&rapl_pmu_format_group,
&rapl_pmu_events_group,
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 44ec523287f6..1c5390f1cf09 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -721,7 +721,7 @@ static struct attribute *uncore_pmu_attrs[] = {
NULL,
};
-static struct attribute_group uncore_pmu_attr_group = {
+static const struct attribute_group uncore_pmu_attr_group = {
.attrs = uncore_pmu_attrs,
};
diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c
index cda569332005..6a5cbe90f859 100644
--- a/arch/x86/events/intel/uncore_nhmex.c
+++ b/arch/x86/events/intel/uncore_nhmex.c
@@ -272,7 +272,7 @@ static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
NULL,
};
-static struct attribute_group nhmex_uncore_ubox_format_group = {
+static const struct attribute_group nhmex_uncore_ubox_format_group = {
.name = "format",
.attrs = nhmex_uncore_ubox_formats_attr,
};
@@ -299,7 +299,7 @@ static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
NULL,
};
-static struct attribute_group nhmex_uncore_cbox_format_group = {
+static const struct attribute_group nhmex_uncore_cbox_format_group = {
.name = "format",
.attrs = nhmex_uncore_cbox_formats_attr,
};
@@ -407,7 +407,7 @@ static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
NULL,
};
-static struct attribute_group nhmex_uncore_bbox_format_group = {
+static const struct attribute_group nhmex_uncore_bbox_format_group = {
.name = "format",
.attrs = nhmex_uncore_bbox_formats_attr,
};
@@ -484,7 +484,7 @@ static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
NULL,
};
-static struct attribute_group nhmex_uncore_sbox_format_group = {
+static const struct attribute_group nhmex_uncore_sbox_format_group = {
.name = "format",
.attrs = nhmex_uncore_sbox_formats_attr,
};
@@ -898,7 +898,7 @@ static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
NULL,
};
-static struct attribute_group nhmex_uncore_mbox_format_group = {
+static const struct attribute_group nhmex_uncore_mbox_format_group = {
.name = "format",
.attrs = nhmex_uncore_mbox_formats_attr,
};
@@ -1163,7 +1163,7 @@ static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
NULL,
};
-static struct attribute_group nhmex_uncore_rbox_format_group = {
+static const struct attribute_group nhmex_uncore_rbox_format_group = {
.name = "format",
.attrs = nhmex_uncore_rbox_formats_attr,
};
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index a3dcc12bef4a..db1127ce685e 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -130,7 +130,7 @@ static struct attribute *snb_uncore_formats_attr[] = {
NULL,
};
-static struct attribute_group snb_uncore_format_group = {
+static const struct attribute_group snb_uncore_format_group = {
.name = "format",
.attrs = snb_uncore_formats_attr,
};
@@ -289,7 +289,7 @@ static struct attribute *snb_uncore_imc_formats_attr[] = {
NULL,
};
-static struct attribute_group snb_uncore_imc_format_group = {
+static const struct attribute_group snb_uncore_imc_format_group = {
.name = "format",
.attrs = snb_uncore_imc_formats_attr,
};
@@ -769,7 +769,7 @@ static struct attribute *nhm_uncore_formats_attr[] = {
NULL,
};
-static struct attribute_group nhm_uncore_format_group = {
+static const struct attribute_group nhm_uncore_format_group = {
.name = "format",
.attrs = nhm_uncore_formats_attr,
};
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index dae2fedc1601..db1fe377e6dd 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -316,7 +316,7 @@
#define SKX_UPI_PCI_PMON_CTL0 0x350
#define SKX_UPI_PCI_PMON_CTR0 0x318
#define SKX_UPI_PCI_PMON_BOX_CTL 0x378
-#define SKX_PMON_CTL_UMASK_EXT 0xff
+#define SKX_UPI_CTL_UMASK_EXT 0xffefff
/* SKX M2M */
#define SKX_M2M_PCI_PMON_CTL0 0x228
@@ -328,7 +328,7 @@ DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6");
DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7");
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
-DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-39");
+DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-43,45-55");
DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16");
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
@@ -351,7 +351,6 @@ DEFINE_UNCORE_FORMAT_ATTR(filter_cid, filter_cid, "config1:5");
DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8");
DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8");
DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12");
-DEFINE_UNCORE_FORMAT_ATTR(filter_link4, filter_link, "config1:9-12");
DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47");
DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
@@ -603,27 +602,27 @@ static struct uncore_event_desc snbep_uncore_qpi_events[] = {
{ /* end: all zeroes */ },
};
-static struct attribute_group snbep_uncore_format_group = {
+static const struct attribute_group snbep_uncore_format_group = {
.name = "format",
.attrs = snbep_uncore_formats_attr,
};
-static struct attribute_group snbep_uncore_ubox_format_group = {
+static const struct attribute_group snbep_uncore_ubox_format_group = {
.name = "format",
.attrs = snbep_uncore_ubox_formats_attr,
};
-static struct attribute_group snbep_uncore_cbox_format_group = {
+static const struct attribute_group snbep_uncore_cbox_format_group = {
.name = "format",
.attrs = snbep_uncore_cbox_formats_attr,
};
-static struct attribute_group snbep_uncore_pcu_format_group = {
+static const struct attribute_group snbep_uncore_pcu_format_group = {
.name = "format",
.attrs = snbep_uncore_pcu_formats_attr,
};
-static struct attribute_group snbep_uncore_qpi_format_group = {
+static const struct attribute_group snbep_uncore_qpi_format_group = {
.name = "format",
.attrs = snbep_uncore_qpi_formats_attr,
};
@@ -1432,27 +1431,27 @@ static struct attribute *ivbep_uncore_qpi_formats_attr[] = {
NULL,
};
-static struct attribute_group ivbep_uncore_format_group = {
+static const struct attribute_group ivbep_uncore_format_group = {
.name = "format",
.attrs = ivbep_uncore_formats_attr,
};
-static struct attribute_group ivbep_uncore_ubox_format_group = {
+static const struct attribute_group ivbep_uncore_ubox_format_group = {
.name = "format",
.attrs = ivbep_uncore_ubox_formats_attr,
};
-static struct attribute_group ivbep_uncore_cbox_format_group = {
+static const struct attribute_group ivbep_uncore_cbox_format_group = {
.name = "format",
.attrs = ivbep_uncore_cbox_formats_attr,
};
-static struct attribute_group ivbep_uncore_pcu_format_group = {
+static const struct attribute_group ivbep_uncore_pcu_format_group = {
.name = "format",
.attrs = ivbep_uncore_pcu_formats_attr,
};
-static struct attribute_group ivbep_uncore_qpi_format_group = {
+static const struct attribute_group ivbep_uncore_qpi_format_group = {
.name = "format",
.attrs = ivbep_uncore_qpi_formats_attr,
};
@@ -1888,7 +1887,7 @@ static struct attribute *knl_uncore_ubox_formats_attr[] = {
NULL,
};
-static struct attribute_group knl_uncore_ubox_format_group = {
+static const struct attribute_group knl_uncore_ubox_format_group = {
.name = "format",
.attrs = knl_uncore_ubox_formats_attr,
};
@@ -1928,7 +1927,7 @@ static struct attribute *knl_uncore_cha_formats_attr[] = {
NULL,
};
-static struct attribute_group knl_uncore_cha_format_group = {
+static const struct attribute_group knl_uncore_cha_format_group = {
.name = "format",
.attrs = knl_uncore_cha_formats_attr,
};
@@ -2038,7 +2037,7 @@ static struct attribute *knl_uncore_pcu_formats_attr[] = {
NULL,
};
-static struct attribute_group knl_uncore_pcu_format_group = {
+static const struct attribute_group knl_uncore_pcu_format_group = {
.name = "format",
.attrs = knl_uncore_pcu_formats_attr,
};
@@ -2188,7 +2187,7 @@ static struct attribute *knl_uncore_irp_formats_attr[] = {
NULL,
};
-static struct attribute_group knl_uncore_irp_format_group = {
+static const struct attribute_group knl_uncore_irp_format_group = {
.name = "format",
.attrs = knl_uncore_irp_formats_attr,
};
@@ -2386,7 +2385,7 @@ static struct attribute *hswep_uncore_ubox_formats_attr[] = {
NULL,
};
-static struct attribute_group hswep_uncore_ubox_format_group = {
+static const struct attribute_group hswep_uncore_ubox_format_group = {
.name = "format",
.attrs = hswep_uncore_ubox_formats_attr,
};
@@ -2440,7 +2439,7 @@ static struct attribute *hswep_uncore_cbox_formats_attr[] = {
NULL,
};
-static struct attribute_group hswep_uncore_cbox_format_group = {
+static const struct attribute_group hswep_uncore_cbox_format_group = {
.name = "format",
.attrs = hswep_uncore_cbox_formats_attr,
};
@@ -2622,7 +2621,7 @@ static struct attribute *hswep_uncore_sbox_formats_attr[] = {
NULL,
};
-static struct attribute_group hswep_uncore_sbox_format_group = {
+static const struct attribute_group hswep_uncore_sbox_format_group = {
.name = "format",
.attrs = hswep_uncore_sbox_formats_attr,
};
@@ -3302,7 +3301,6 @@ static struct attribute *skx_uncore_cha_formats_attr[] = {
&format_attr_inv.attr,
&format_attr_thresh8.attr,
&format_attr_filter_tid4.attr,
- &format_attr_filter_link4.attr,
&format_attr_filter_state5.attr,
&format_attr_filter_rem.attr,
&format_attr_filter_loc.attr,
@@ -3312,12 +3310,11 @@ static struct attribute *skx_uncore_cha_formats_attr[] = {
&format_attr_filter_opc_0.attr,
&format_attr_filter_opc_1.attr,
&format_attr_filter_nc.attr,
- &format_attr_filter_c6.attr,
&format_attr_filter_isoc.attr,
NULL,
};
-static struct attribute_group skx_uncore_chabox_format_group = {
+static const struct attribute_group skx_uncore_chabox_format_group = {
.name = "format",
.attrs = skx_uncore_cha_formats_attr,
};
@@ -3333,8 +3330,11 @@ static struct extra_reg skx_uncore_cha_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
- SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4),
- SNBEP_CBO_EVENT_EXTRA_REG(0x8134, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x3134, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
+ EVENT_EXTRA_END
};
static u64 skx_cha_filter_mask(int fields)
@@ -3347,6 +3347,17 @@ static u64 skx_cha_filter_mask(int fields)
mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK;
if (fields & 0x4)
mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE;
+ if (fields & 0x8) {
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_REM;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LOC;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NM;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC0;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_OPC1;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_NC;
+ mask |= SKX_CHA_MSR_PMON_BOX_FILTER_ISOC;
+ }
return mask;
}
@@ -3416,7 +3427,7 @@ static struct attribute *skx_uncore_iio_formats_attr[] = {
NULL,
};
-static struct attribute_group skx_uncore_iio_format_group = {
+static const struct attribute_group skx_uncore_iio_format_group = {
.name = "format",
.attrs = skx_uncore_iio_formats_attr,
};
@@ -3473,7 +3484,7 @@ static struct attribute *skx_uncore_formats_attr[] = {
NULL,
};
-static struct attribute_group skx_uncore_format_group = {
+static const struct attribute_group skx_uncore_format_group = {
.name = "format",
.attrs = skx_uncore_formats_attr,
};
@@ -3492,6 +3503,26 @@ static struct intel_uncore_type skx_uncore_irp = {
.format_group = &skx_uncore_format_group,
};
+static struct attribute *skx_uncore_pcu_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ &format_attr_occ_invert.attr,
+ &format_attr_occ_edge_det.attr,
+ &format_attr_filter_band0.attr,
+ &format_attr_filter_band1.attr,
+ &format_attr_filter_band2.attr,
+ &format_attr_filter_band3.attr,
+ NULL,
+};
+
+static struct attribute_group skx_uncore_pcu_format_group = {
+ .name = "format",
+ .attrs = skx_uncore_pcu_formats_attr,
+};
+
static struct intel_uncore_ops skx_uncore_pcu_ops = {
IVBEP_UNCORE_MSR_OPS_COMMON_INIT(),
.hw_config = hswep_pcu_hw_config,
@@ -3510,7 +3541,7 @@ static struct intel_uncore_type skx_uncore_pcu = {
.box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL,
.num_shared_regs = 1,
.ops = &skx_uncore_pcu_ops,
- .format_group = &snbep_uncore_pcu_format_group,
+ .format_group = &skx_uncore_pcu_format_group,
};
static struct intel_uncore_type *skx_msr_uncores[] = {
@@ -3574,7 +3605,7 @@ static struct attribute *skx_upi_uncore_formats_attr[] = {
NULL,
};
-static struct attribute_group skx_upi_uncore_format_group = {
+static const struct attribute_group skx_upi_uncore_format_group = {
.name = "format",
.attrs = skx_upi_uncore_formats_attr,
};
@@ -3603,8 +3634,8 @@ static struct intel_uncore_type skx_uncore_upi = {
.perf_ctr_bits = 48,
.perf_ctr = SKX_UPI_PCI_PMON_CTR0,
.event_ctl = SKX_UPI_PCI_PMON_CTL0,
- .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
- .event_mask_ext = SKX_PMON_CTL_UMASK_EXT,
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
+ .event_mask_ext = SKX_UPI_CTL_UMASK_EXT,
.box_ctl = SKX_UPI_PCI_PMON_BOX_CTL,
.ops = &skx_upi_uncore_pci_ops,
.format_group = &skx_upi_uncore_format_group,
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 53728eea1bed..4196f81ec0e1 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -91,7 +91,7 @@ struct amd_nb {
(PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
- PERF_SAMPLE_TRANSACTION)
+ PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR)
/*
* A debug store configuration.
@@ -558,6 +558,7 @@ struct x86_pmu {
int attr_rdpmc;
struct attribute **format_attrs;
struct attribute **event_attrs;
+ struct attribute **caps_attrs;
ssize_t (*events_sysfs_show)(char *page, u64 config);
struct attribute **cpu_events;
@@ -591,7 +592,8 @@ struct x86_pmu {
pebs :1,
pebs_active :1,
pebs_broken :1,
- pebs_prec_dist :1;
+ pebs_prec_dist :1,
+ pebs_no_tlb :1;
int pebs_record_size;
int pebs_buffer_size;
void (*drain_pebs)(struct pt_regs *regs);
@@ -741,6 +743,8 @@ int x86_reserve_hardware(void);
void x86_release_hardware(void);
+int x86_pmu_max_precise(void);
+
void hw_perf_lbr_event_destroy(struct perf_event *event);
int x86_setup_perfctr(struct perf_event *event);
@@ -879,6 +883,8 @@ extern struct event_constraint intel_slm_pebs_event_constraints[];
extern struct event_constraint intel_glm_pebs_event_constraints[];
+extern struct event_constraint intel_glp_pebs_event_constraints[];
+
extern struct event_constraint intel_nehalem_pebs_event_constraints[];
extern struct event_constraint intel_westmere_pebs_event_constraints[];
@@ -945,6 +951,8 @@ void intel_pmu_lbr_init_knl(void);
void intel_pmu_pebs_data_source_nhm(void);
+void intel_pmu_pebs_data_source_skl(bool pmem);
+
int intel_pmu_setup_lbr_filter(struct perf_event *event);
void intel_pt_interrupt(void);
diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile
index 171ae09864d7..367a8203cfcf 100644
--- a/arch/x86/hyperv/Makefile
+++ b/arch/x86/hyperv/Makefile
@@ -1 +1 @@
-obj-y := hv_init.o
+obj-y := hv_init.o mmu.o
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 5b882cc0c0e9..1a8eb550c40f 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -26,6 +26,8 @@
#include <linux/mm.h>
#include <linux/clockchips.h>
#include <linux/hyperv.h>
+#include <linux/slab.h>
+#include <linux/cpuhotplug.h>
#ifdef CONFIG_HYPERV_TSCPAGE
@@ -75,10 +77,25 @@ static struct clocksource hyperv_cs_msr = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static void *hypercall_pg;
+void *hv_hypercall_pg;
+EXPORT_SYMBOL_GPL(hv_hypercall_pg);
struct clocksource *hyperv_cs;
EXPORT_SYMBOL_GPL(hyperv_cs);
+u32 *hv_vp_index;
+EXPORT_SYMBOL_GPL(hv_vp_index);
+
+static int hv_cpu_init(unsigned int cpu)
+{
+ u64 msr_vp_index;
+
+ hv_get_vp_index(msr_vp_index);
+
+ hv_vp_index[smp_processor_id()] = msr_vp_index;
+
+ return 0;
+}
+
/*
* This function is to be invoked early in the boot sequence after the
* hypervisor has been detected.
@@ -94,6 +111,16 @@ void hyperv_init(void)
if (x86_hyper != &x86_hyper_ms_hyperv)
return;
+ /* Allocate percpu VP index */
+ hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index),
+ GFP_KERNEL);
+ if (!hv_vp_index)
+ return;
+
+ if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv_init:online",
+ hv_cpu_init, NULL) < 0)
+ goto free_vp_index;
+
/*
* Setup the hypercall page and enable hypercalls.
* 1. Register the guest ID
@@ -102,17 +129,19 @@ void hyperv_init(void)
guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0);
wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
- hypercall_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
- if (hypercall_pg == NULL) {
+ hv_hypercall_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
+ if (hv_hypercall_pg == NULL) {
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
- return;
+ goto free_vp_index;
}
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
hypercall_msr.enable = 1;
- hypercall_msr.guest_physical_address = vmalloc_to_pfn(hypercall_pg);
+ hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
+ hyper_alloc_mmu();
+
/*
* Register Hyper-V specific clocksource.
*/
@@ -148,6 +177,12 @@ register_msr_cs:
hyperv_cs = &hyperv_cs_msr;
if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
+
+ return;
+
+free_vp_index:
+ kfree(hv_vp_index);
+ hv_vp_index = NULL;
}
/*
@@ -170,51 +205,6 @@ void hyperv_cleanup(void)
}
EXPORT_SYMBOL_GPL(hyperv_cleanup);
-/*
- * hv_do_hypercall- Invoke the specified hypercall
- */
-u64 hv_do_hypercall(u64 control, void *input, void *output)
-{
- u64 input_address = (input) ? virt_to_phys(input) : 0;
- u64 output_address = (output) ? virt_to_phys(output) : 0;
-#ifdef CONFIG_X86_64
- u64 hv_status = 0;
-
- if (!hypercall_pg)
- return (u64)ULLONG_MAX;
-
- __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
- __asm__ __volatile__("call *%3" : "=a" (hv_status) :
- "c" (control), "d" (input_address),
- "m" (hypercall_pg));
-
- return hv_status;
-
-#else
-
- u32 control_hi = control >> 32;
- u32 control_lo = control & 0xFFFFFFFF;
- u32 hv_status_hi = 1;
- u32 hv_status_lo = 1;
- u32 input_address_hi = input_address >> 32;
- u32 input_address_lo = input_address & 0xFFFFFFFF;
- u32 output_address_hi = output_address >> 32;
- u32 output_address_lo = output_address & 0xFFFFFFFF;
-
- if (!hypercall_pg)
- return (u64)ULLONG_MAX;
-
- __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
- "=a"(hv_status_lo) : "d" (control_hi),
- "a" (control_lo), "b" (input_address_hi),
- "c" (input_address_lo), "D"(output_address_hi),
- "S"(output_address_lo), "m" (hypercall_pg));
-
- return hv_status_lo | ((u64)hv_status_hi << 32);
-#endif /* !x86_64 */
-}
-EXPORT_SYMBOL_GPL(hv_do_hypercall);
-
void hyperv_report_panic(struct pt_regs *regs)
{
static bool panic_reported;
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
new file mode 100644
index 000000000000..39e7f6e50919
--- /dev/null
+++ b/arch/x86/hyperv/mmu.c
@@ -0,0 +1,272 @@
+#define pr_fmt(fmt) "Hyper-V: " fmt
+
+#include <linux/hyperv.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/fpu/api.h>
+#include <asm/mshyperv.h>
+#include <asm/msr.h>
+#include <asm/tlbflush.h>
+
+#define CREATE_TRACE_POINTS
+#include <asm/trace/hyperv.h>
+
+/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
+struct hv_flush_pcpu {
+ u64 address_space;
+ u64 flags;
+ u64 processor_mask;
+ u64 gva_list[];
+};
+
+/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
+struct hv_flush_pcpu_ex {
+ u64 address_space;
+ u64 flags;
+ struct {
+ u64 format;
+ u64 valid_bank_mask;
+ u64 bank_contents[];
+ } hv_vp_set;
+ u64 gva_list[];
+};
+
+/* Each gva in gva_list encodes up to 4096 pages to flush */
+#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
+
+static struct hv_flush_pcpu __percpu *pcpu_flush;
+
+static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex;
+
+/*
+ * Fills in gva_list starting from offset. Returns the number of items added.
+ */
+static inline int fill_gva_list(u64 gva_list[], int offset,
+ unsigned long start, unsigned long end)
+{
+ int gva_n = offset;
+ unsigned long cur = start, diff;
+
+ do {
+ diff = end > cur ? end - cur : 0;
+
+ gva_list[gva_n] = cur & PAGE_MASK;
+ /*
+ * Lower 12 bits encode the number of additional
+ * pages to flush (in addition to the 'cur' page).
+ */
+ if (diff >= HV_TLB_FLUSH_UNIT)
+ gva_list[gva_n] |= ~PAGE_MASK;
+ else if (diff)
+ gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
+
+ cur += HV_TLB_FLUSH_UNIT;
+ gva_n++;
+
+ } while (cur < end);
+
+ return gva_n - offset;
+}
+
+/* Return the number of banks in the resulting vp_set */
+static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
+ const struct cpumask *cpus)
+{
+ int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
+
+ /*
+ * Some banks may end up being empty but this is acceptable.
+ */
+ for_each_cpu(cpu, cpus) {
+ vcpu = hv_cpu_number_to_vp_number(cpu);
+ vcpu_bank = vcpu / 64;
+ vcpu_offset = vcpu % 64;
+
+ /* valid_bank_mask can represent up to 64 banks */
+ if (vcpu_bank >= 64)
+ return 0;
+
+ __set_bit(vcpu_offset, (unsigned long *)
+ &flush->hv_vp_set.bank_contents[vcpu_bank]);
+ if (vcpu_bank >= nr_bank)
+ nr_bank = vcpu_bank + 1;
+ }
+ flush->hv_vp_set.valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
+
+ return nr_bank;
+}
+
+static void hyperv_flush_tlb_others(const struct cpumask *cpus,
+ const struct flush_tlb_info *info)
+{
+ int cpu, vcpu, gva_n, max_gvas;
+ struct hv_flush_pcpu *flush;
+ u64 status = U64_MAX;
+ unsigned long flags;
+
+ trace_hyperv_mmu_flush_tlb_others(cpus, info);
+
+ if (!pcpu_flush || !hv_hypercall_pg)
+ goto do_native;
+
+ if (cpumask_empty(cpus))
+ return;
+
+ local_irq_save(flags);
+
+ flush = this_cpu_ptr(pcpu_flush);
+
+ if (info->mm) {
+ flush->address_space = virt_to_phys(info->mm->pgd);
+ flush->flags = 0;
+ } else {
+ flush->address_space = 0;
+ flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
+ }
+
+ flush->processor_mask = 0;
+ if (cpumask_equal(cpus, cpu_present_mask)) {
+ flush->flags |= HV_FLUSH_ALL_PROCESSORS;
+ } else {
+ for_each_cpu(cpu, cpus) {
+ vcpu = hv_cpu_number_to_vp_number(cpu);
+ if (vcpu >= 64)
+ goto do_native;
+
+ __set_bit(vcpu, (unsigned long *)
+ &flush->processor_mask);
+ }
+ }
+
+ /*
+ * We can flush not more than max_gvas with one hypercall. Flush the
+ * whole address space if we were asked to do more.
+ */
+ max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]);
+
+ if (info->end == TLB_FLUSH_ALL) {
+ flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
+ status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
+ flush, NULL);
+ } else if (info->end &&
+ ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
+ status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
+ flush, NULL);
+ } else {
+ gva_n = fill_gva_list(flush->gva_list, 0,
+ info->start, info->end);
+ status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST,
+ gva_n, 0, flush, NULL);
+ }
+
+ local_irq_restore(flags);
+
+ if (!(status & HV_HYPERCALL_RESULT_MASK))
+ return;
+do_native:
+ native_flush_tlb_others(cpus, info);
+}
+
+static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
+ const struct flush_tlb_info *info)
+{
+ int nr_bank = 0, max_gvas, gva_n;
+ struct hv_flush_pcpu_ex *flush;
+ u64 status = U64_MAX;
+ unsigned long flags;
+
+ trace_hyperv_mmu_flush_tlb_others(cpus, info);
+
+ if (!pcpu_flush_ex || !hv_hypercall_pg)
+ goto do_native;
+
+ if (cpumask_empty(cpus))
+ return;
+
+ local_irq_save(flags);
+
+ flush = this_cpu_ptr(pcpu_flush_ex);
+
+ if (info->mm) {
+ flush->address_space = virt_to_phys(info->mm->pgd);
+ flush->flags = 0;
+ } else {
+ flush->address_space = 0;
+ flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
+ }
+
+ flush->hv_vp_set.valid_bank_mask = 0;
+
+ if (!cpumask_equal(cpus, cpu_present_mask)) {
+ flush->hv_vp_set.format = HV_GENERIC_SET_SPARCE_4K;
+ nr_bank = cpumask_to_vp_set(flush, cpus);
+ }
+
+ if (!nr_bank) {
+ flush->hv_vp_set.format = HV_GENERIC_SET_ALL;
+ flush->flags |= HV_FLUSH_ALL_PROCESSORS;
+ }
+
+ /*
+ * We can flush not more than max_gvas with one hypercall. Flush the
+ * whole address space if we were asked to do more.
+ */
+ max_gvas =
+ (PAGE_SIZE - sizeof(*flush) - nr_bank *
+ sizeof(flush->hv_vp_set.bank_contents[0])) /
+ sizeof(flush->gva_list[0]);
+
+ if (info->end == TLB_FLUSH_ALL) {
+ flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
+ status = hv_do_rep_hypercall(
+ HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
+ 0, nr_bank + 2, flush, NULL);
+ } else if (info->end &&
+ ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
+ status = hv_do_rep_hypercall(
+ HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
+ 0, nr_bank + 2, flush, NULL);
+ } else {
+ gva_n = fill_gva_list(flush->gva_list, nr_bank,
+ info->start, info->end);
+ status = hv_do_rep_hypercall(
+ HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
+ gva_n, nr_bank + 2, flush, NULL);
+ }
+
+ local_irq_restore(flags);
+
+ if (!(status & HV_HYPERCALL_RESULT_MASK))
+ return;
+do_native:
+ native_flush_tlb_others(cpus, info);
+}
+
+void hyperv_setup_mmu_ops(void)
+{
+ if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
+ return;
+
+ setup_clear_cpu_cap(X86_FEATURE_PCID);
+
+ if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) {
+ pr_info("Using hypercall for remote TLB flush\n");
+ pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
+ } else {
+ pr_info("Using ext hypercall for remote TLB flush\n");
+ pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex;
+ }
+}
+
+void hyper_alloc_mmu(void)
+{
+ if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
+ return;
+
+ if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
+ else
+ pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
+}
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 724153797209..e0bb46c02857 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -226,7 +226,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
if (ksig->ka.sa.sa_flags & SA_ONSTACK)
sp = sigsp(sp, ksig);
/* This is the legacy signal stack switching. */
- else if ((regs->ss & 0xffff) != __USER32_DS &&
+ else if (regs->ss != __USER32_DS &&
!(ksig->ka.sa.sa_flags & SA_RESTORER) &&
ksig->ka.sa.sa_restorer)
sp = (unsigned long) ksig->ka.sa.sa_restorer;
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 2efc768e4362..72d867f6b518 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -150,8 +150,6 @@ static inline void disable_acpi(void) { }
extern int x86_acpi_numa_init(void);
#endif /* CONFIG_ACPI_NUMA */
-#define acpi_unlazy_tlb(x) leave_mm(x)
-
#ifdef CONFIG_ACPI_APEI
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
{
@@ -162,12 +160,13 @@ static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
* you call efi_mem_attributes() during boot and at runtime,
* you could theoretically see different attributes.
*
- * Since we are yet to see any x86 platforms that require
- * anything other than PAGE_KERNEL (some arm64 platforms
- * require the equivalent of PAGE_KERNEL_NOCACHE), return that
- * until we know differently.
+ * We are yet to see any x86 platforms that require anything
+ * other than PAGE_KERNEL (some ARM64 platforms require the
+ * equivalent of PAGE_KERNEL_NOCACHE). Additionally, if SME
+ * is active, the ACPI information will not be encrypted,
+ * so return PAGE_KERNEL_NOENC until we know differently.
*/
- return PAGE_KERNEL;
+ return PAGE_KERNEL_NOENC;
}
#endif
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 7a9df3beb89b..676ee5807d86 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -74,6 +74,9 @@
# define _ASM_EXTABLE_EX(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
+# define _ASM_EXTABLE_REFCOUNT(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount)
+
# define _ASM_NOKPROBE(entry) \
.pushsection "_kprobe_blacklist","aw" ; \
_ASM_ALIGN ; \
@@ -123,6 +126,9 @@
# define _ASM_EXTABLE_EX(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
+# define _ASM_EXTABLE_REFCOUNT(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount)
+
/* For C file, we already have NOKPROBE_SYMBOL macro */
#endif
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 33380b871463..0874ebda3069 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -197,35 +197,56 @@ static inline int atomic_xchg(atomic_t *v, int new)
return xchg(&v->counter, new);
}
-#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
-{ \
- asm volatile(LOCK_PREFIX #op"l %1,%0" \
- : "+m" (v->counter) \
- : "ir" (i) \
- : "memory"); \
+static inline void atomic_and(int i, atomic_t *v)
+{
+ asm volatile(LOCK_PREFIX "andl %1,%0"
+ : "+m" (v->counter)
+ : "ir" (i)
+ : "memory");
+}
+
+static inline int atomic_fetch_and(int i, atomic_t *v)
+{
+ int val = atomic_read(v);
+
+ do { } while (!atomic_try_cmpxchg(v, &val, val & i));
+
+ return val;
}
-#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
-{ \
- int val = atomic_read(v); \
- do { \
- } while (!atomic_try_cmpxchg(v, &val, val c_op i)); \
- return val; \
+static inline void atomic_or(int i, atomic_t *v)
+{
+ asm volatile(LOCK_PREFIX "orl %1,%0"
+ : "+m" (v->counter)
+ : "ir" (i)
+ : "memory");
}
-#define ATOMIC_OPS(op, c_op) \
- ATOMIC_OP(op) \
- ATOMIC_FETCH_OP(op, c_op)
+static inline int atomic_fetch_or(int i, atomic_t *v)
+{
+ int val = atomic_read(v);
-ATOMIC_OPS(and, &)
-ATOMIC_OPS(or , |)
-ATOMIC_OPS(xor, ^)
+ do { } while (!atomic_try_cmpxchg(v, &val, val | i));
-#undef ATOMIC_OPS
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP
+ return val;
+}
+
+static inline void atomic_xor(int i, atomic_t *v)
+{
+ asm volatile(LOCK_PREFIX "xorl %1,%0"
+ : "+m" (v->counter)
+ : "ir" (i)
+ : "memory");
+}
+
+static inline int atomic_fetch_xor(int i, atomic_t *v)
+{
+ int val = atomic_read(v);
+
+ do { } while (!atomic_try_cmpxchg(v, &val, val ^ i));
+
+ return val;
+}
/**
* __atomic_add_unless - add unless the number is already a given value
@@ -239,10 +260,12 @@ ATOMIC_OPS(xor, ^)
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c = atomic_read(v);
+
do {
if (unlikely(c == u))
break;
} while (!atomic_try_cmpxchg(v, &c, c + a));
+
return c;
}
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 71d7705fb303..9e206f31ce2a 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -312,37 +312,70 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#undef alternative_atomic64
#undef __alternative_atomic64
-#define ATOMIC64_OP(op, c_op) \
-static inline void atomic64_##op(long long i, atomic64_t *v) \
-{ \
- long long old, c = 0; \
- while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \
- c = old; \
+static inline void atomic64_and(long long i, atomic64_t *v)
+{
+ long long old, c = 0;
+
+ while ((old = atomic64_cmpxchg(v, c, c & i)) != c)
+ c = old;
}
-#define ATOMIC64_FETCH_OP(op, c_op) \
-static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
-{ \
- long long old, c = 0; \
- while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \
- c = old; \
- return old; \
+static inline long long atomic64_fetch_and(long long i, atomic64_t *v)
+{
+ long long old, c = 0;
+
+ while ((old = atomic64_cmpxchg(v, c, c & i)) != c)
+ c = old;
+
+ return old;
}
-ATOMIC64_FETCH_OP(add, +)
+static inline void atomic64_or(long long i, atomic64_t *v)
+{
+ long long old, c = 0;
-#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
+ while ((old = atomic64_cmpxchg(v, c, c | i)) != c)
+ c = old;
+}
+
+static inline long long atomic64_fetch_or(long long i, atomic64_t *v)
+{
+ long long old, c = 0;
+
+ while ((old = atomic64_cmpxchg(v, c, c | i)) != c)
+ c = old;
+
+ return old;
+}
-#define ATOMIC64_OPS(op, c_op) \
- ATOMIC64_OP(op, c_op) \
- ATOMIC64_FETCH_OP(op, c_op)
+static inline void atomic64_xor(long long i, atomic64_t *v)
+{
+ long long old, c = 0;
+
+ while ((old = atomic64_cmpxchg(v, c, c ^ i)) != c)
+ c = old;
+}
-ATOMIC64_OPS(and, &)
-ATOMIC64_OPS(or, |)
-ATOMIC64_OPS(xor, ^)
+static inline long long atomic64_fetch_xor(long long i, atomic64_t *v)
+{
+ long long old, c = 0;
+
+ while ((old = atomic64_cmpxchg(v, c, c ^ i)) != c)
+ c = old;
+
+ return old;
+}
-#undef ATOMIC64_OPS
-#undef ATOMIC64_FETCH_OP
-#undef ATOMIC64_OP
+static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
+{
+ long long old, c = 0;
+
+ while ((old = atomic64_cmpxchg(v, c, c + i)) != c)
+ c = old;
+
+ return old;
+}
+
+#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
#endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 6189a433c9a9..5d9de36a2f04 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -177,7 +177,7 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
}
#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, long *old, long new)
+static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new)
{
return try_cmpxchg(&v->counter, old, new);
}
@@ -198,7 +198,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
*/
static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
{
- long c = atomic64_read(v);
+ s64 c = atomic64_read(v);
do {
if (unlikely(c == u))
return false;
@@ -217,7 +217,7 @@ static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
*/
static inline long atomic64_dec_if_positive(atomic64_t *v)
{
- long dec, c = atomic64_read(v);
+ s64 dec, c = atomic64_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
@@ -226,34 +226,55 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
return dec;
}
-#define ATOMIC64_OP(op) \
-static inline void atomic64_##op(long i, atomic64_t *v) \
-{ \
- asm volatile(LOCK_PREFIX #op"q %1,%0" \
- : "+m" (v->counter) \
- : "er" (i) \
- : "memory"); \
+static inline void atomic64_and(long i, atomic64_t *v)
+{
+ asm volatile(LOCK_PREFIX "andq %1,%0"
+ : "+m" (v->counter)
+ : "er" (i)
+ : "memory");
}
-#define ATOMIC64_FETCH_OP(op, c_op) \
-static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
-{ \
- long val = atomic64_read(v); \
- do { \
- } while (!atomic64_try_cmpxchg(v, &val, val c_op i)); \
- return val; \
+static inline long atomic64_fetch_and(long i, atomic64_t *v)
+{
+ s64 val = atomic64_read(v);
+
+ do {
+ } while (!atomic64_try_cmpxchg(v, &val, val & i));
+ return val;
}
-#define ATOMIC64_OPS(op, c_op) \
- ATOMIC64_OP(op) \
- ATOMIC64_FETCH_OP(op, c_op)
+static inline void atomic64_or(long i, atomic64_t *v)
+{
+ asm volatile(LOCK_PREFIX "orq %1,%0"
+ : "+m" (v->counter)
+ : "er" (i)
+ : "memory");
+}
-ATOMIC64_OPS(and, &)
-ATOMIC64_OPS(or, |)
-ATOMIC64_OPS(xor, ^)
+static inline long atomic64_fetch_or(long i, atomic64_t *v)
+{
+ s64 val = atomic64_read(v);
-#undef ATOMIC64_OPS
-#undef ATOMIC64_FETCH_OP
-#undef ATOMIC64_OP
+ do {
+ } while (!atomic64_try_cmpxchg(v, &val, val | i));
+ return val;
+}
+
+static inline void atomic64_xor(long i, atomic64_t *v)
+{
+ asm volatile(LOCK_PREFIX "xorq %1,%0"
+ : "+m" (v->counter)
+ : "er" (i)
+ : "memory");
+}
+
+static inline long atomic64_fetch_xor(long i, atomic64_t *v)
+{
+ s64 val = atomic64_read(v);
+
+ do {
+ } while (!atomic64_try_cmpxchg(v, &val, val ^ i));
+ return val;
+}
#endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 39e702d90cdb..aa6b2023d8f8 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -35,7 +35,7 @@
#define _BUG_FLAGS(ins, flags) \
do { \
asm volatile("1:\t" ins "\n" \
- ".pushsection __bug_table,\"a\"\n" \
+ ".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
"\t.word %c1" "\t# bug_entry::line\n" \
@@ -52,7 +52,7 @@ do { \
#define _BUG_FLAGS(ins, flags) \
do { \
asm volatile("1:\t" ins "\n" \
- ".pushsection __bug_table,\"a\"\n" \
+ ".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t.word %c0" "\t# bug_entry::flags\n" \
"\t.org 2b+%c1\n" \
diff --git a/arch/x86/include/asm/cmdline.h b/arch/x86/include/asm/cmdline.h
index e01f7f7ccb0c..84ae170bc3d0 100644
--- a/arch/x86/include/asm/cmdline.h
+++ b/arch/x86/include/asm/cmdline.h
@@ -2,5 +2,7 @@
#define _ASM_X86_CMDLINE_H
int cmdline_find_option_bool(const char *cmdline_ptr, const char *option);
+int cmdline_find_option(const char *cmdline_ptr, const char *option,
+ char *buffer, int bufsize);
#endif /* _ASM_X86_CMDLINE_H */
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index d90296d061e8..b5069e802d5c 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -157,7 +157,7 @@ extern void __add_wrong_size(void)
#define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock) \
({ \
bool success; \
- __typeof__(_ptr) _old = (_pold); \
+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
switch (size) { \
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index ca3c48c0872f..42bbbf0f173d 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -177,7 +177,7 @@
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */
-#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
+#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
/*
@@ -196,6 +196,7 @@
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
@@ -286,7 +287,7 @@
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
-#define X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE (15*32+15) /* Virtual VMLOAD VMSAVE */
+#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index d0a21b12dd58..1a2ba368da39 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -5,6 +5,7 @@
#include <asm/ldt.h>
#include <asm/mmu.h>
#include <asm/fixmap.h>
+#include <asm/irq_vectors.h>
#include <linux/smp.h>
#include <linux/percpu.h>
@@ -22,7 +23,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
desc->s = 1;
desc->dpl = 0x3;
desc->p = info->seg_not_present ^ 1;
- desc->limit = (info->limit & 0xf0000) >> 16;
+ desc->limit1 = (info->limit & 0xf0000) >> 16;
desc->avl = info->useable;
desc->d = info->seg_32bit;
desc->g = info->limit_in_pages;
@@ -83,33 +84,25 @@ static inline phys_addr_t get_cpu_gdt_paddr(unsigned int cpu)
return per_cpu_ptr_to_phys(get_cpu_gdt_rw(cpu));
}
-#ifdef CONFIG_X86_64
-
static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
unsigned dpl, unsigned ist, unsigned seg)
{
- gate->offset_low = PTR_LOW(func);
+ gate->offset_low = (u16) func;
+ gate->bits.p = 1;
+ gate->bits.dpl = dpl;
+ gate->bits.zero = 0;
+ gate->bits.type = type;
+ gate->offset_middle = (u16) (func >> 16);
+#ifdef CONFIG_X86_64
gate->segment = __KERNEL_CS;
- gate->ist = ist;
- gate->p = 1;
- gate->dpl = dpl;
- gate->zero0 = 0;
- gate->zero1 = 0;
- gate->type = type;
- gate->offset_middle = PTR_MIDDLE(func);
- gate->offset_high = PTR_HIGH(func);
-}
-
+ gate->bits.ist = ist;
+ gate->reserved = 0;
+ gate->offset_high = (u32) (func >> 32);
#else
-static inline void pack_gate(gate_desc *gate, unsigned char type,
- unsigned long base, unsigned dpl, unsigned flags,
- unsigned short seg)
-{
- gate->a = (seg << 16) | (base & 0xffff);
- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
-}
-
+ gate->segment = seg;
+ gate->bits.ist = 0;
#endif
+}
static inline int desc_empty(const void *ptr)
{
@@ -173,35 +166,22 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
memcpy(&gdt[entry], desc, size);
}
-static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
- unsigned long limit, unsigned char type,
- unsigned char flags)
-{
- desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
- desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
- (limit & 0x000f0000) | ((type & 0xff) << 8) |
- ((flags & 0xf) << 20);
- desc->p = 1;
-}
-
-
-static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size)
+static inline void set_tssldt_descriptor(void *d, unsigned long addr,
+ unsigned type, unsigned size)
{
-#ifdef CONFIG_X86_64
- struct ldttss_desc64 *desc = d;
+ struct ldttss_desc *desc = d;
memset(desc, 0, sizeof(*desc));
- desc->limit0 = size & 0xFFFF;
- desc->base0 = PTR_LOW(addr);
- desc->base1 = PTR_MIDDLE(addr) & 0xFF;
+ desc->limit0 = (u16) size;
+ desc->base0 = (u16) addr;
+ desc->base1 = (addr >> 16) & 0xFF;
desc->type = type;
desc->p = 1;
desc->limit1 = (size >> 16) & 0xF;
- desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
- desc->base3 = PTR_HIGH(addr);
-#else
- pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
+ desc->base2 = (addr >> 24) & 0xFF;
+#ifdef CONFIG_X86_64
+ desc->base3 = (u32) (addr >> 32);
#endif
}
@@ -401,147 +381,20 @@ static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
static inline unsigned long get_desc_limit(const struct desc_struct *desc)
{
- return desc->limit0 | (desc->limit << 16);
+ return desc->limit0 | (desc->limit1 << 16);
}
static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
{
desc->limit0 = limit & 0xffff;
- desc->limit = (limit >> 16) & 0xf;
-}
-
-#ifdef CONFIG_X86_64
-static inline void set_nmi_gate(int gate, void *addr)
-{
- gate_desc s;
-
- pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
- write_idt_entry(debug_idt_table, gate, &s);
+ desc->limit1 = (limit >> 16) & 0xf;
}
-#endif
-#ifdef CONFIG_TRACING
-extern struct desc_ptr trace_idt_descr;
-extern gate_desc trace_idt_table[];
-static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
-{
- write_idt_entry(trace_idt_table, entry, gate);
-}
+void update_intr_gate(unsigned int n, const void *addr);
+void alloc_intr_gate(unsigned int n, const void *addr);
-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
- unsigned dpl, unsigned ist, unsigned seg)
-{
- gate_desc s;
-
- pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
- /*
- * does not need to be atomic because it is only done once at
- * setup time
- */
- write_trace_idt_entry(gate, &s);
-}
-#else
-static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
-{
-}
-
-#define _trace_set_gate(gate, type, addr, dpl, ist, seg)
-#endif
-
-static inline void _set_gate(int gate, unsigned type, void *addr,
- unsigned dpl, unsigned ist, unsigned seg)
-{
- gate_desc s;
-
- pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
- /*
- * does not need to be atomic because it is only done once at
- * setup time
- */
- write_idt_entry(idt_table, gate, &s);
- write_trace_idt_entry(gate, &s);
-}
-
-/*
- * This needs to use 'idt_table' rather than 'idt', and
- * thus use the _nonmapped_ version of the IDT, as the
- * Pentium F0 0F bugfix can have resulted in the mapped
- * IDT being write-protected.
- */
-#define set_intr_gate_notrace(n, addr) \
- do { \
- BUG_ON((unsigned)n > 0xFF); \
- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
- __KERNEL_CS); \
- } while (0)
-
-#define set_intr_gate(n, addr) \
- do { \
- set_intr_gate_notrace(n, addr); \
- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
- 0, 0, __KERNEL_CS); \
- } while (0)
-
-extern int first_system_vector;
-/* used_vectors is BITMAP for irq is not managed by percpu vector_irq */
extern unsigned long used_vectors[];
-static inline void alloc_system_vector(int vector)
-{
- if (!test_bit(vector, used_vectors)) {
- set_bit(vector, used_vectors);
- if (first_system_vector > vector)
- first_system_vector = vector;
- } else {
- BUG();
- }
-}
-
-#define alloc_intr_gate(n, addr) \
- do { \
- alloc_system_vector(n); \
- set_intr_gate(n, addr); \
- } while (0)
-
-/*
- * This routine sets up an interrupt gate at directory privilege level 3.
- */
-static inline void set_system_intr_gate(unsigned int n, void *addr)
-{
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
-}
-
-static inline void set_system_trap_gate(unsigned int n, void *addr)
-{
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
-}
-
-static inline void set_trap_gate(unsigned int n, void *addr)
-{
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
-}
-
-static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
-{
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
-}
-
-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
-{
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
-}
-
-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
-{
- BUG_ON((unsigned)n > 0xFF);
- _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
-}
-
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(u32, debug_idt_ctr);
static inline bool is_debug_idt_enabled(void)
@@ -567,31 +420,6 @@ static inline void load_debug_idt(void)
}
#endif
-#ifdef CONFIG_TRACING
-extern atomic_t trace_idt_ctr;
-static inline bool is_trace_idt_enabled(void)
-{
- if (atomic_read(&trace_idt_ctr))
- return true;
-
- return false;
-}
-
-static inline void load_trace_idt(void)
-{
- load_idt((const struct desc_ptr *)&trace_idt_descr);
-}
-#else
-static inline bool is_trace_idt_enabled(void)
-{
- return false;
-}
-
-static inline void load_trace_idt(void)
-{
-}
-#endif
-
/*
* The load_current_idt() must be called with interrupts disabled
* to avoid races. That way the IDT will always be set back to the expected
@@ -603,9 +431,25 @@ static inline void load_current_idt(void)
{
if (is_debug_idt_enabled())
load_debug_idt();
- else if (is_trace_idt_enabled())
- load_trace_idt();
else
load_idt((const struct desc_ptr *)&idt_descr);
}
+
+extern void idt_setup_early_handler(void);
+extern void idt_setup_early_traps(void);
+extern void idt_setup_traps(void);
+extern void idt_setup_apic_and_irq_gates(void);
+
+#ifdef CONFIG_X86_64
+extern void idt_setup_early_pf(void);
+extern void idt_setup_ist_traps(void);
+extern void idt_setup_debugidt_traps(void);
+#else
+static inline void idt_setup_early_pf(void) { }
+static inline void idt_setup_ist_traps(void) { }
+static inline void idt_setup_debugidt_traps(void) { }
+#endif
+
+extern void idt_invalidate(void *addr);
+
#endif /* _ASM_X86_DESC_H */
diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
index 49265345d4d2..346d252029b7 100644
--- a/arch/x86/include/asm/desc_defs.h
+++ b/arch/x86/include/asm/desc_defs.h
@@ -11,34 +11,30 @@
#include <linux/types.h>
-/*
- * FIXME: Accessing the desc_struct through its fields is more elegant,
- * and should be the one valid thing to do. However, a lot of open code
- * still touches the a and b accessors, and doing this allow us to do it
- * incrementally. We keep the signature as a struct, rather than a union,
- * so we can get rid of it transparently in the future -- glommer
- */
/* 8 byte segment descriptor */
struct desc_struct {
- union {
- struct {
- unsigned int a;
- unsigned int b;
- };
- struct {
- u16 limit0;
- u16 base0;
- unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
- unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
- };
- };
+ u16 limit0;
+ u16 base0;
+ u16 base1: 8, type: 4, s: 1, dpl: 2, p: 1;
+ u16 limit1: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
} __attribute__((packed));
-#define GDT_ENTRY_INIT(flags, base, limit) { { { \
- .a = ((limit) & 0xffff) | (((base) & 0xffff) << 16), \
- .b = (((base) & 0xff0000) >> 16) | (((flags) & 0xf0ff) << 8) | \
- ((limit) & 0xf0000) | ((base) & 0xff000000), \
- } } }
+#define GDT_ENTRY_INIT(flags, base, limit) \
+ { \
+ .limit0 = (u16) (limit), \
+ .limit1 = ((limit) >> 16) & 0x0F, \
+ .base0 = (u16) (base), \
+ .base1 = ((base) >> 16) & 0xFF, \
+ .base2 = ((base) >> 24) & 0xFF, \
+ .type = (flags & 0x0f), \
+ .s = (flags >> 4) & 0x01, \
+ .dpl = (flags >> 5) & 0x03, \
+ .p = (flags >> 7) & 0x01, \
+ .avl = (flags >> 12) & 0x01, \
+ .l = (flags >> 13) & 0x01, \
+ .d = (flags >> 14) & 0x01, \
+ .g = (flags >> 15) & 0x01, \
+ }
enum {
GATE_INTERRUPT = 0xE,
@@ -47,49 +43,63 @@ enum {
GATE_TASK = 0x5,
};
-/* 16byte gate */
-struct gate_struct64 {
- u16 offset_low;
- u16 segment;
- unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
- u16 offset_middle;
- u32 offset_high;
- u32 zero1;
-} __attribute__((packed));
-
-#define PTR_LOW(x) ((unsigned long long)(x) & 0xFFFF)
-#define PTR_MIDDLE(x) (((unsigned long long)(x) >> 16) & 0xFFFF)
-#define PTR_HIGH(x) ((unsigned long long)(x) >> 32)
-
enum {
DESC_TSS = 0x9,
DESC_LDT = 0x2,
DESCTYPE_S = 0x10, /* !system */
};
-/* LDT or TSS descriptor in the GDT. 16 bytes. */
-struct ldttss_desc64 {
- u16 limit0;
- u16 base0;
- unsigned base1 : 8, type : 5, dpl : 2, p : 1;
- unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
- u32 base3;
- u32 zero1;
+/* LDT or TSS descriptor in the GDT. */
+struct ldttss_desc {
+ u16 limit0;
+ u16 base0;
+
+ u16 base1 : 8, type : 5, dpl : 2, p : 1;
+ u16 limit1 : 4, zero0 : 3, g : 1, base2 : 8;
+#ifdef CONFIG_X86_64
+ u32 base3;
+ u32 zero1;
+#endif
} __attribute__((packed));
+typedef struct ldttss_desc ldt_desc;
+typedef struct ldttss_desc tss_desc;
+
+struct idt_bits {
+ u16 ist : 3,
+ zero : 5,
+ type : 5,
+ dpl : 2,
+ p : 1;
+} __attribute__((packed));
+
+struct gate_struct {
+ u16 offset_low;
+ u16 segment;
+ struct idt_bits bits;
+ u16 offset_middle;
+#ifdef CONFIG_X86_64
+ u32 offset_high;
+ u32 reserved;
+#endif
+} __attribute__((packed));
+
+typedef struct gate_struct gate_desc;
+
+static inline unsigned long gate_offset(const gate_desc *g)
+{
#ifdef CONFIG_X86_64
-typedef struct gate_struct64 gate_desc;
-typedef struct ldttss_desc64 ldt_desc;
-typedef struct ldttss_desc64 tss_desc;
-#define gate_offset(g) ((g).offset_low | ((unsigned long)(g).offset_middle << 16) | ((unsigned long)(g).offset_high << 32))
-#define gate_segment(g) ((g).segment)
+ return g->offset_low | ((unsigned long)g->offset_middle << 16) |
+ ((unsigned long) g->offset_high << 32);
#else
-typedef struct desc_struct gate_desc;
-typedef struct desc_struct ldt_desc;
-typedef struct desc_struct tss_desc;
-#define gate_offset(g) (((g).b & 0xffff0000) | ((g).a & 0x0000ffff))
-#define gate_segment(g) ((g).a >> 16)
+ return g->offset_low | ((unsigned long)g->offset_middle << 16);
#endif
+}
+
+static inline unsigned long gate_segment(const gate_desc *g)
+{
+ return g->segment;
+}
struct desc_ptr {
unsigned short size;
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index 5dff775af7cd..c10c9128f54e 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -21,11 +21,13 @@
# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
+# define DISABLE_PCID 0
#else
# define DISABLE_VME 0
# define DISABLE_K6_MTRR 0
# define DISABLE_CYRIX_ARR 0
# define DISABLE_CENTAUR_MCR 0
+# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
#endif /* CONFIG_X86_64 */
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
@@ -49,7 +51,7 @@
#define DISABLED_MASK1 0
#define DISABLED_MASK2 0
#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
-#define DISABLED_MASK4 0
+#define DISABLED_MASK4 (DISABLE_PCID)
#define DISABLED_MASK5 0
#define DISABLED_MASK6 0
#define DISABLED_MASK7 0
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 398c79889f5c..1387dafdba2d 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -12,6 +12,7 @@
#include <asm/io.h>
#include <asm/swiotlb.h>
#include <linux/dma-contiguous.h>
+#include <linux/mem_encrypt.h>
#ifdef CONFIG_ISA
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
@@ -57,12 +58,12 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
- return paddr;
+ return __sme_set(paddr);
}
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
- return daddr;
+ return __sme_clr(daddr);
}
#endif /* CONFIG_X86_DMA_REMAP */
diff --git a/arch/x86/include/asm/dmi.h b/arch/x86/include/asm/dmi.h
index 3c69fed215c5..a8e15b04565b 100644
--- a/arch/x86/include/asm/dmi.h
+++ b/arch/x86/include/asm/dmi.h
@@ -13,9 +13,9 @@ static __always_inline __init void *dmi_alloc(unsigned len)
}
/* Use early IO mappings for DMI because it's initialized early */
-#define dmi_early_remap early_ioremap
-#define dmi_early_unmap early_iounmap
-#define dmi_remap ioremap_cache
-#define dmi_unmap iounmap
+#define dmi_early_remap early_memremap
+#define dmi_early_unmap early_memunmap
+#define dmi_remap(_x, _l) memremap(_x, _l, MEMREMAP_WB)
+#define dmi_unmap(_x) memunmap(_x)
#endif /* _ASM_X86_DMI_H */
diff --git a/arch/x86/include/asm/e820/api.h b/arch/x86/include/asm/e820/api.h
index a504adc661a4..cd266d830e49 100644
--- a/arch/x86/include/asm/e820/api.h
+++ b/arch/x86/include/asm/e820/api.h
@@ -39,6 +39,8 @@ extern void e820__setup_pci_gap(void);
extern void e820__reallocate_tables(void);
extern void e820__register_nosave_regions(unsigned long limit_pfn);
+extern int e820__get_entry_type(u64 start, u64 end);
+
/*
* Returns true iff the specified range [start,end) is completely contained inside
* the ISA region.
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 1c18d83d3f09..04330c8d9af9 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -126,15 +126,15 @@ do { \
pr_reg[4] = regs->di; \
pr_reg[5] = regs->bp; \
pr_reg[6] = regs->ax; \
- pr_reg[7] = regs->ds & 0xffff; \
- pr_reg[8] = regs->es & 0xffff; \
- pr_reg[9] = regs->fs & 0xffff; \
+ pr_reg[7] = regs->ds; \
+ pr_reg[8] = regs->es; \
+ pr_reg[9] = regs->fs; \
pr_reg[11] = regs->orig_ax; \
pr_reg[12] = regs->ip; \
- pr_reg[13] = regs->cs & 0xffff; \
+ pr_reg[13] = regs->cs; \
pr_reg[14] = regs->flags; \
pr_reg[15] = regs->sp; \
- pr_reg[16] = regs->ss & 0xffff; \
+ pr_reg[16] = regs->ss; \
} while (0);
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
@@ -204,6 +204,7 @@ void set_personality_ia32(bool);
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
do { \
+ unsigned long base; \
unsigned v; \
(pr_reg)[0] = (regs)->r15; \
(pr_reg)[1] = (regs)->r14; \
@@ -226,8 +227,8 @@ do { \
(pr_reg)[18] = (regs)->flags; \
(pr_reg)[19] = (regs)->sp; \
(pr_reg)[20] = (regs)->ss; \
- (pr_reg)[21] = current->thread.fsbase; \
- (pr_reg)[22] = current->thread.gsbase; \
+ rdmsrl(MSR_FS_BASE, base); (pr_reg)[21] = base; \
+ rdmsrl(MSR_KERNEL_GS_BASE, base); (pr_reg)[22] = base; \
asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
@@ -247,11 +248,11 @@ extern int force_personality32;
/*
* This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * 64-bit, this is above 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
- 0x100000000UL)
+ (TASK_SIZE / 3 * 2))
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
@@ -304,8 +305,8 @@ static inline int mmap_is_ia32(void)
test_thread_flag(TIF_ADDR32));
}
-extern unsigned long tasksize_32bit(void);
-extern unsigned long tasksize_64bit(void);
+extern unsigned long task_size_32bit(void);
+extern unsigned long task_size_64bit(int full_addr_space);
extern unsigned long get_mmap_base(int is_legacy);
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index df002992d8fd..aa15d1f7e530 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -13,18 +13,14 @@
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
-BUILD_INTERRUPT3(irq_move_cleanup_interrupt, IRQ_MOVE_CLEANUP_VECTOR,
- smp_irq_move_cleanup_interrupt)
-BUILD_INTERRUPT3(reboot_interrupt, REBOOT_VECTOR, smp_reboot_interrupt)
+BUILD_INTERRUPT(irq_move_cleanup_interrupt, IRQ_MOVE_CLEANUP_VECTOR)
+BUILD_INTERRUPT(reboot_interrupt, REBOOT_VECTOR)
#endif
-BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
-
#ifdef CONFIG_HAVE_KVM
-BUILD_INTERRUPT3(kvm_posted_intr_ipi, POSTED_INTR_VECTOR,
- smp_kvm_posted_intr_ipi)
-BUILD_INTERRUPT3(kvm_posted_intr_wakeup_ipi, POSTED_INTR_WAKEUP_VECTOR,
- smp_kvm_posted_intr_wakeup_ipi)
+BUILD_INTERRUPT(kvm_posted_intr_ipi, POSTED_INTR_VECTOR)
+BUILD_INTERRUPT(kvm_posted_intr_wakeup_ipi, POSTED_INTR_WAKEUP_VECTOR)
+BUILD_INTERRUPT(kvm_posted_intr_nested_ipi, POSTED_INTR_NESTED_VECTOR)
#endif
/*
@@ -39,6 +35,7 @@ BUILD_INTERRUPT3(kvm_posted_intr_wakeup_ipi, POSTED_INTR_WAKEUP_VECTOR,
BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
+BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
#ifdef CONFIG_IRQ_WORK
BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR)
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index b65155cc3760..dcd9fb55e679 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -157,6 +157,26 @@ static inline void __set_fixmap(enum fixed_addresses idx,
}
#endif
+/*
+ * FIXMAP_PAGE_NOCACHE is used for MMIO. Memory encryption is not
+ * supported for MMIO addresses, so make sure that the memory encryption
+ * mask is not part of the page attributes.
+ */
+#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_IO_NOCACHE
+
+/*
+ * Early memremap routines used for in-place encryption. The mappings created
+ * by these routines are intended to be used as temporary mappings.
+ */
+void __init *early_memremap_encrypted(resource_size_t phys_addr,
+ unsigned long size);
+void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
+ unsigned long size);
+void __init *early_memremap_decrypted(resource_size_t phys_addr,
+ unsigned long size);
+void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
+ unsigned long size);
+
#include <asm-generic/fixmap.h>
#define __late_set_fixmap(idx, phys, flags) __set_fixmap(idx, phys, flags)
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 255645f60ca2..554cdb205d17 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
return 0;
}
-static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
+static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
{
if (use_xsave()) {
- copy_kernel_to_xregs(&fpstate->xsave, -1);
+ copy_kernel_to_xregs(&fpstate->xsave, mask);
} else {
if (use_fxsr())
copy_kernel_to_fxregs(&fpstate->fxsave);
@@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
: : [addr] "m" (fpstate));
}
- __copy_kernel_to_fpregs(fpstate);
+ __copy_kernel_to_fpregs(fpstate, -1);
}
extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index b4c1f5453436..f4dc9b63bdda 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -41,20 +41,11 @@
"+m" (*uaddr), "=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "1" (0))
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tem;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
@@ -80,30 +71,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 9b76cd331990..ad1ed531febc 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -15,6 +15,7 @@ typedef struct {
#ifdef CONFIG_HAVE_KVM
unsigned int kvm_posted_intr_ipis;
unsigned int kvm_posted_intr_wakeup_ipis;
+ unsigned int kvm_posted_intr_nested_ipis;
#endif
unsigned int x86_platform_ipis; /* arch dependent */
unsigned int apic_perf_irqs;
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index b90e1053049b..6dfe366a8804 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -30,6 +30,7 @@ extern asmlinkage void apic_timer_interrupt(void);
extern asmlinkage void x86_platform_ipi(void);
extern asmlinkage void kvm_posted_intr_ipi(void);
extern asmlinkage void kvm_posted_intr_wakeup_ipi(void);
+extern asmlinkage void kvm_posted_intr_nested_ipi(void);
extern asmlinkage void error_interrupt(void);
extern asmlinkage void irq_work_interrupt(void);
@@ -45,25 +46,6 @@ extern asmlinkage void deferred_error_interrupt(void);
extern asmlinkage void call_function_interrupt(void);
extern asmlinkage void call_function_single_interrupt(void);
-#ifdef CONFIG_TRACING
-/* Interrupt handlers registered during init_IRQ */
-extern void trace_apic_timer_interrupt(void);
-extern void trace_x86_platform_ipi(void);
-extern void trace_error_interrupt(void);
-extern void trace_irq_work_interrupt(void);
-extern void trace_spurious_interrupt(void);
-extern void trace_thermal_interrupt(void);
-extern void trace_reschedule_interrupt(void);
-extern void trace_threshold_interrupt(void);
-extern void trace_deferred_error_interrupt(void);
-extern void trace_call_function_interrupt(void);
-extern void trace_call_function_single_interrupt(void);
-#define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt
-#define trace_reboot_interrupt reboot_interrupt
-#define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi
-#define trace_kvm_posted_intr_wakeup_ipi kvm_posted_intr_wakeup_ipi
-#endif /* CONFIG_TRACING */
-
#ifdef CONFIG_X86_LOCAL_APIC
struct irq_data;
struct pci_dev;
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 21126155a739..0ead9dbb9130 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -43,6 +43,9 @@ struct hypervisor_x86 {
/* pin current vcpu to specified physical cpu (run rarely) */
void (*pin_vcpu)(int);
+
+ /* called during init_mem_mapping() to setup early mappings. */
+ void (*init_mem_mapping)(void);
};
extern const struct hypervisor_x86 *x86_hyper;
@@ -57,8 +60,15 @@ extern const struct hypervisor_x86 x86_hyper_kvm;
extern void init_hypervisor_platform(void);
extern bool hypervisor_x2apic_available(void);
extern void hypervisor_pin_vcpu(int cpu);
+
+static inline void hypervisor_init_mem_mapping(void)
+{
+ if (x86_hyper && x86_hyper->init_mem_mapping)
+ x86_hyper->init_mem_mapping();
+}
#else
static inline void init_hypervisor_platform(void) { }
static inline bool hypervisor_x2apic_available(void) { return false; }
+static inline void hypervisor_init_mem_mapping(void) { }
#endif /* CONFIG_HYPERVISOR_GUEST */
#endif /* _ASM_X86_HYPERVISOR_H */
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index 474eb8c66fee..05c4aa00cc86 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -7,6 +7,7 @@ struct x86_mapping_info {
unsigned long page_flag; /* page flag for PMD or PUD entry */
unsigned long offset; /* ident mapping offset */
bool direct_gbpages; /* PUD level 1GB page support */
+ unsigned long kernpg_flag; /* kernel pagetable flag override */
};
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
deleted file mode 100644
index 597dc4995678..000000000000
--- a/arch/x86/include/asm/intel_rdt.h
+++ /dev/null
@@ -1,286 +0,0 @@
-#ifndef _ASM_X86_INTEL_RDT_H
-#define _ASM_X86_INTEL_RDT_H
-
-#ifdef CONFIG_INTEL_RDT_A
-
-#include <linux/sched.h>
-#include <linux/kernfs.h>
-#include <linux/jump_label.h>
-
-#include <asm/intel_rdt_common.h>
-
-#define IA32_L3_QOS_CFG 0xc81
-#define IA32_L3_CBM_BASE 0xc90
-#define IA32_L2_CBM_BASE 0xd10
-#define IA32_MBA_THRTL_BASE 0xd50
-
-#define L3_QOS_CDP_ENABLE 0x01ULL
-
-/**
- * struct rdtgroup - store rdtgroup's data in resctrl file system.
- * @kn: kernfs node
- * @rdtgroup_list: linked list for all rdtgroups
- * @closid: closid for this rdtgroup
- * @cpu_mask: CPUs assigned to this rdtgroup
- * @flags: status bits
- * @waitcount: how many cpus expect to find this
- * group when they acquire rdtgroup_mutex
- */
-struct rdtgroup {
- struct kernfs_node *kn;
- struct list_head rdtgroup_list;
- int closid;
- struct cpumask cpu_mask;
- int flags;
- atomic_t waitcount;
-};
-
-/* rdtgroup.flags */
-#define RDT_DELETED 1
-
-/* rftype.flags */
-#define RFTYPE_FLAGS_CPUS_LIST 1
-
-/* List of all resource groups */
-extern struct list_head rdt_all_groups;
-
-extern int max_name_width, max_data_width;
-
-int __init rdtgroup_init(void);
-
-/**
- * struct rftype - describe each file in the resctrl file system
- * @name: File name
- * @mode: Access mode
- * @kf_ops: File operations
- * @flags: File specific RFTYPE_FLAGS_* flags
- * @seq_show: Show content of the file
- * @write: Write to the file
- */
-struct rftype {
- char *name;
- umode_t mode;
- struct kernfs_ops *kf_ops;
- unsigned long flags;
-
- int (*seq_show)(struct kernfs_open_file *of,
- struct seq_file *sf, void *v);
- /*
- * write() is the generic write callback which maps directly to
- * kernfs write operation and overrides all other operations.
- * Maximum write size is determined by ->max_write_len.
- */
- ssize_t (*write)(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off);
-};
-
-/**
- * struct rdt_domain - group of cpus sharing an RDT resource
- * @list: all instances of this resource
- * @id: unique id for this instance
- * @cpu_mask: which cpus share this resource
- * @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID)
- * @new_ctrl: new ctrl value to be loaded
- * @have_new_ctrl: did user provide new_ctrl for this domain
- */
-struct rdt_domain {
- struct list_head list;
- int id;
- struct cpumask cpu_mask;
- u32 *ctrl_val;
- u32 new_ctrl;
- bool have_new_ctrl;
-};
-
-/**
- * struct msr_param - set a range of MSRs from a domain
- * @res: The resource to use
- * @low: Beginning index from base MSR
- * @high: End index
- */
-struct msr_param {
- struct rdt_resource *res;
- int low;
- int high;
-};
-
-/**
- * struct rdt_cache - Cache allocation related data
- * @cbm_len: Length of the cache bit mask
- * @min_cbm_bits: Minimum number of consecutive bits to be set
- * @cbm_idx_mult: Multiplier of CBM index
- * @cbm_idx_offset: Offset of CBM index. CBM index is computed by:
- * closid * cbm_idx_multi + cbm_idx_offset
- * in a cache bit mask
- */
-struct rdt_cache {
- unsigned int cbm_len;
- unsigned int min_cbm_bits;
- unsigned int cbm_idx_mult;
- unsigned int cbm_idx_offset;
-};
-
-/**
- * struct rdt_membw - Memory bandwidth allocation related data
- * @max_delay: Max throttle delay. Delay is the hardware
- * representation for memory bandwidth.
- * @min_bw: Minimum memory bandwidth percentage user can request
- * @bw_gran: Granularity at which the memory bandwidth is allocated
- * @delay_linear: True if memory B/W delay is in linear scale
- * @mb_map: Mapping of memory B/W percentage to memory B/W delay
- */
-struct rdt_membw {
- u32 max_delay;
- u32 min_bw;
- u32 bw_gran;
- u32 delay_linear;
- u32 *mb_map;
-};
-
-/**
- * struct rdt_resource - attributes of an RDT resource
- * @enabled: Is this feature enabled on this machine
- * @capable: Is this feature available on this machine
- * @name: Name to use in "schemata" file
- * @num_closid: Number of CLOSIDs available
- * @cache_level: Which cache level defines scope of this resource
- * @default_ctrl: Specifies default cache cbm or memory B/W percent.
- * @msr_base: Base MSR address for CBMs
- * @msr_update: Function pointer to update QOS MSRs
- * @data_width: Character width of data when displaying
- * @domains: All domains for this resource
- * @cache: Cache allocation related data
- * @info_files: resctrl info files for the resource
- * @nr_info_files: Number of info files
- * @format_str: Per resource format string to show domain value
- * @parse_ctrlval: Per resource function pointer to parse control values
- */
-struct rdt_resource {
- bool enabled;
- bool capable;
- char *name;
- int num_closid;
- int cache_level;
- u32 default_ctrl;
- unsigned int msr_base;
- void (*msr_update) (struct rdt_domain *d, struct msr_param *m,
- struct rdt_resource *r);
- int data_width;
- struct list_head domains;
- struct rdt_cache cache;
- struct rdt_membw membw;
- struct rftype *info_files;
- int nr_info_files;
- const char *format_str;
- int (*parse_ctrlval) (char *buf, struct rdt_resource *r,
- struct rdt_domain *d);
-};
-
-void rdt_get_cache_infofile(struct rdt_resource *r);
-void rdt_get_mba_infofile(struct rdt_resource *r);
-int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d);
-int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d);
-
-extern struct mutex rdtgroup_mutex;
-
-extern struct rdt_resource rdt_resources_all[];
-extern struct rdtgroup rdtgroup_default;
-DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
-
-int __init rdtgroup_init(void);
-
-enum {
- RDT_RESOURCE_L3,
- RDT_RESOURCE_L3DATA,
- RDT_RESOURCE_L3CODE,
- RDT_RESOURCE_L2,
- RDT_RESOURCE_MBA,
-
- /* Must be the last */
- RDT_NUM_RESOURCES,
-};
-
-#define for_each_capable_rdt_resource(r) \
- for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
- r++) \
- if (r->capable)
-
-#define for_each_enabled_rdt_resource(r) \
- for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
- r++) \
- if (r->enabled)
-
-/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
-union cpuid_0x10_1_eax {
- struct {
- unsigned int cbm_len:5;
- } split;
- unsigned int full;
-};
-
-/* CPUID.(EAX=10H, ECX=ResID=3).EAX */
-union cpuid_0x10_3_eax {
- struct {
- unsigned int max_delay:12;
- } split;
- unsigned int full;
-};
-
-/* CPUID.(EAX=10H, ECX=ResID).EDX */
-union cpuid_0x10_x_edx {
- struct {
- unsigned int cos_max:16;
- } split;
- unsigned int full;
-};
-
-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);
-
-void rdt_ctrl_update(void *arg);
-struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
-void rdtgroup_kn_unlock(struct kernfs_node *kn);
-ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off);
-int rdtgroup_schemata_show(struct kernfs_open_file *of,
- struct seq_file *s, void *v);
-
-/*
- * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
- *
- * Following considerations are made so that this has minimal impact
- * on scheduler hot path:
- * - This will stay as no-op unless we are running on an Intel SKU
- * which supports resource control and we enable by mounting the
- * resctrl file system.
- * - Caches the per cpu CLOSid values and does the MSR write only
- * when a task with a different CLOSid is scheduled in.
- *
- * Must be called with preemption disabled.
- */
-static inline void intel_rdt_sched_in(void)
-{
- if (static_branch_likely(&rdt_enable_key)) {
- struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
- int closid;
-
- /*
- * If this task has a closid assigned, use it.
- * Else use the closid assigned to this cpu.
- */
- closid = current->closid;
- if (closid == 0)
- closid = this_cpu_read(cpu_closid);
-
- if (closid != state->closid) {
- state->closid = closid;
- wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, closid);
- }
- }
-}
-
-#else
-
-static inline void intel_rdt_sched_in(void) {}
-
-#endif /* CONFIG_INTEL_RDT_A */
-#endif /* _ASM_X86_INTEL_RDT_H */
diff --git a/arch/x86/include/asm/intel_rdt_common.h b/arch/x86/include/asm/intel_rdt_common.h
deleted file mode 100644
index b31081b89407..000000000000
--- a/arch/x86/include/asm/intel_rdt_common.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef _ASM_X86_INTEL_RDT_COMMON_H
-#define _ASM_X86_INTEL_RDT_COMMON_H
-
-#define MSR_IA32_PQR_ASSOC 0x0c8f
-
-/**
- * struct intel_pqr_state - State cache for the PQR MSR
- * @rmid: The cached Resource Monitoring ID
- * @closid: The cached Class Of Service ID
- * @rmid_usecnt: The usage counter for rmid
- *
- * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
- * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
- * contains both parts, so we need to cache them.
- *
- * The cache also helps to avoid pointless updates if the value does
- * not change.
- */
-struct intel_pqr_state {
- u32 rmid;
- u32 closid;
- int rmid_usecnt;
-};
-
-DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
-
-#endif /* _ASM_X86_INTEL_RDT_COMMON_H */
diff --git a/arch/x86/include/asm/intel_rdt_sched.h b/arch/x86/include/asm/intel_rdt_sched.h
new file mode 100644
index 000000000000..b4bbf8b21512
--- /dev/null
+++ b/arch/x86/include/asm/intel_rdt_sched.h
@@ -0,0 +1,92 @@
+#ifndef _ASM_X86_INTEL_RDT_SCHED_H
+#define _ASM_X86_INTEL_RDT_SCHED_H
+
+#ifdef CONFIG_INTEL_RDT
+
+#include <linux/sched.h>
+#include <linux/jump_label.h>
+
+#define IA32_PQR_ASSOC 0x0c8f
+
+/**
+ * struct intel_pqr_state - State cache for the PQR MSR
+ * @cur_rmid: The cached Resource Monitoring ID
+ * @cur_closid: The cached Class Of Service ID
+ * @default_rmid: The user assigned Resource Monitoring ID
+ * @default_closid: The user assigned cached Class Of Service ID
+ *
+ * The upper 32 bits of IA32_PQR_ASSOC contain closid and the
+ * lower 10 bits rmid. The update to IA32_PQR_ASSOC always
+ * contains both parts, so we need to cache them. This also
+ * stores the user configured per cpu CLOSID and RMID.
+ *
+ * The cache also helps to avoid pointless updates if the value does
+ * not change.
+ */
+struct intel_pqr_state {
+ u32 cur_rmid;
+ u32 cur_closid;
+ u32 default_rmid;
+ u32 default_closid;
+};
+
+DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
+
+DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
+DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
+DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
+
+/*
+ * __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
+ *
+ * Following considerations are made so that this has minimal impact
+ * on scheduler hot path:
+ * - This will stay as no-op unless we are running on an Intel SKU
+ * which supports resource control or monitoring and we enable by
+ * mounting the resctrl file system.
+ * - Caches the per cpu CLOSid/RMID values and does the MSR write only
+ * when a task with a different CLOSid/RMID is scheduled in.
+ * - We allocate RMIDs/CLOSids globally in order to keep this as
+ * simple as possible.
+ * Must be called with preemption disabled.
+ */
+static void __intel_rdt_sched_in(void)
+{
+ struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+ u32 closid = state->default_closid;
+ u32 rmid = state->default_rmid;
+
+ /*
+ * If this task has a closid/rmid assigned, use it.
+ * Else use the closid/rmid assigned to this cpu.
+ */
+ if (static_branch_likely(&rdt_alloc_enable_key)) {
+ if (current->closid)
+ closid = current->closid;
+ }
+
+ if (static_branch_likely(&rdt_mon_enable_key)) {
+ if (current->rmid)
+ rmid = current->rmid;
+ }
+
+ if (closid != state->cur_closid || rmid != state->cur_rmid) {
+ state->cur_closid = closid;
+ state->cur_rmid = rmid;
+ wrmsr(IA32_PQR_ASSOC, rmid, closid);
+ }
+}
+
+static inline void intel_rdt_sched_in(void)
+{
+ if (static_branch_likely(&rdt_enable_key))
+ __intel_rdt_sched_in();
+}
+
+#else
+
+static inline void intel_rdt_sched_in(void) {}
+
+#endif /* CONFIG_INTEL_RDT */
+
+#endif /* _ASM_X86_INTEL_RDT_SCHED_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 7afb0e2f07f4..c40a95c33bb8 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -69,6 +69,9 @@ build_mmio_write(__writeb, "b", unsigned char, "q", )
build_mmio_write(__writew, "w", unsigned short, "r", )
build_mmio_write(__writel, "l", unsigned int, "r", )
+#define readb readb
+#define readw readw
+#define readl readl
#define readb_relaxed(a) __readb(a)
#define readw_relaxed(a) __readw(a)
#define readl_relaxed(a) __readl(a)
@@ -76,6 +79,9 @@ build_mmio_write(__writel, "l", unsigned int, "r", )
#define __raw_readw __readw
#define __raw_readl __readl
+#define writeb writeb
+#define writew writew
+#define writel writel
#define writeb_relaxed(v, a) __writeb(v, a)
#define writew_relaxed(v, a) __writew(v, a)
#define writel_relaxed(v, a) __writel(v, a)
@@ -88,13 +94,15 @@ build_mmio_write(__writel, "l", unsigned int, "r", )
#ifdef CONFIG_X86_64
build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
+build_mmio_read(__readq, "q", unsigned long, "=r", )
build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
+build_mmio_write(__writeq, "q", unsigned long, "r", )
-#define readq_relaxed(a) readq(a)
-#define writeq_relaxed(v, a) writeq(v, a)
+#define readq_relaxed(a) __readq(a)
+#define writeq_relaxed(v, a) __writeq(v, a)
-#define __raw_readq(a) readq(a)
-#define __raw_writeq(val, addr) writeq(val, addr)
+#define __raw_readq __readq
+#define __raw_writeq __writeq
/* Let people know that we have them */
#define readq readq
@@ -119,6 +127,7 @@ static inline phys_addr_t virt_to_phys(volatile void *address)
{
return __pa(address);
}
+#define virt_to_phys virt_to_phys
/**
* phys_to_virt - map physical address to virtual
@@ -137,6 +146,7 @@ static inline void *phys_to_virt(phys_addr_t address)
{
return __va(address);
}
+#define phys_to_virt phys_to_virt
/*
* Change "struct page" to physical address.
@@ -169,11 +179,14 @@ static inline unsigned int isa_virt_to_bus(volatile void *address)
* else, you probably want one of the following.
*/
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
+#define ioremap_nocache ioremap_nocache
extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
#define ioremap_uc ioremap_uc
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
+#define ioremap_cache ioremap_cache
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val);
+#define ioremap_prot ioremap_prot
/**
* ioremap - map bus memory into CPU space
@@ -193,8 +206,10 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}
+#define ioremap ioremap
extern void iounmap(volatile void __iomem *addr);
+#define iounmap iounmap
extern void set_iounmap_nonlazy(void);
@@ -203,53 +218,6 @@ extern void set_iounmap_nonlazy(void);
#include <asm-generic/iomap.h>
/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
-/**
- * memset_io Set a range of I/O memory to a constant value
- * @addr: The beginning of the I/O-memory range to set
- * @val: The value to set the memory to
- * @count: The number of bytes to set
- *
- * Set a range of I/O memory to a given value.
- */
-static inline void
-memset_io(volatile void __iomem *addr, unsigned char val, size_t count)
-{
- memset((void __force *)addr, val, count);
-}
-
-/**
- * memcpy_fromio Copy a block of data from I/O memory
- * @dst: The (RAM) destination for the copy
- * @src: The (I/O memory) source for the data
- * @count: The number of bytes to copy
- *
- * Copy a block of data from I/O memory.
- */
-static inline void
-memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count)
-{
- memcpy(dst, (const void __force *)src, count);
-}
-
-/**
- * memcpy_toio Copy a block of data into I/O memory
- * @dst: The (I/O memory) destination for the copy
- * @src: The (RAM) source for the data
- * @count: The number of bytes to copy
- *
- * Copy a block of data to I/O memory.
- */
-static inline void
-memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
-{
- memcpy((void __force *)dst, src, count);
-}
-
-/*
* ISA space is 'always mapped' on a typical x86 system, no need to
* explicitly ioremap() it. The fact that the ISA IO space is mapped
* to PAGE_OFFSET is pure coincidence - it does not mean ISA values
@@ -328,26 +296,51 @@ static inline unsigned type in##bwl##_p(int port) \
static inline void outs##bwl(int port, const void *addr, unsigned long count) \
{ \
asm volatile("rep; outs" #bwl \
- : "+S"(addr), "+c"(count) : "d"(port)); \
+ : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
} \
\
static inline void ins##bwl(int port, void *addr, unsigned long count) \
{ \
asm volatile("rep; ins" #bwl \
- : "+D"(addr), "+c"(count) : "d"(port)); \
+ : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
}
BUILDIO(b, b, char)
BUILDIO(w, w, short)
BUILDIO(l, , int)
+#define inb inb
+#define inw inw
+#define inl inl
+#define inb_p inb_p
+#define inw_p inw_p
+#define inl_p inl_p
+#define insb insb
+#define insw insw
+#define insl insl
+
+#define outb outb
+#define outw outw
+#define outl outl
+#define outb_p outb_p
+#define outw_p outw_p
+#define outl_p outl_p
+#define outsb outsb
+#define outsw outsw
+#define outsl outsl
+
extern void *xlate_dev_mem_ptr(phys_addr_t phys);
extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
+#define xlate_dev_mem_ptr xlate_dev_mem_ptr
+#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
+
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
enum page_cache_mode pcm);
extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
+#define ioremap_wc ioremap_wc
extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
+#define ioremap_wt ioremap_wt
extern bool is_early_ioremap_ptep(pte_t *ptep);
@@ -365,6 +358,9 @@ extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
#define IO_SPACE_LIMIT 0xffff
+#include <asm-generic/io.h>
+#undef PCI_IOBASE
+
#ifdef CONFIG_MTRR
extern int __must_check arch_phys_wc_index(int handle);
#define arch_phys_wc_index arch_phys_wc_index
@@ -381,4 +377,12 @@ extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
#endif
+extern bool arch_memremap_can_ram_remap(resource_size_t offset,
+ unsigned long size,
+ unsigned long flags);
+#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
+
+extern bool phys_mem_access_encrypted(unsigned long phys_addr,
+ unsigned long size);
+
#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 668cca540025..9958ceea2fa3 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -42,10 +42,6 @@ extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
extern __visible unsigned int do_IRQ(struct pt_regs *regs);
-/* Interrupt vector management */
-extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
-extern int vector_used_by_percpu_irq(unsigned int vector);
-
extern void init_ISA_irqs(void);
#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 6ca9fd6234e1..aaf8d28b5d00 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -83,7 +83,6 @@
*/
#define X86_PLATFORM_IPI_VECTOR 0xf7
-#define POSTED_INTR_WAKEUP_VECTOR 0xf1
/*
* IRQ work vector:
*/
@@ -98,6 +97,8 @@
/* Vector for KVM to deliver posted interrupt IPI */
#ifdef CONFIG_HAVE_KVM
#define POSTED_INTR_VECTOR 0xf2
+#define POSTED_INTR_WAKEUP_VECTOR 0xf1
+#define POSTED_INTR_NESTED_VECTOR 0xf0
#endif
/*
diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h
index f70604125286..ddbb8ea0f5a9 100644
--- a/arch/x86/include/asm/irq_work.h
+++ b/arch/x86/include/asm/irq_work.h
@@ -3,9 +3,17 @@
#include <asm/cpufeature.h>
+#ifdef CONFIG_X86_LOCAL_APIC
static inline bool arch_irq_work_has_interrupt(void)
{
return boot_cpu_has(X86_FEATURE_APIC);
}
+extern void arch_irq_work_raise(void);
+#else
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return false;
+}
+#endif
#endif /* _ASM_IRQ_WORK_H */
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 70ef205489f0..942c1f444da8 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -147,7 +147,8 @@ unsigned long
relocate_kernel(unsigned long indirection_page,
unsigned long page_list,
unsigned long start_address,
- unsigned int preserve_context);
+ unsigned int preserve_context,
+ unsigned int sme_active);
#endif
#define ARCH_HAS_KIMAGE_ARCH
@@ -207,6 +208,14 @@ struct kexec_entry64_regs {
uint64_t r15;
uint64_t rip;
};
+
+extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
+ gfp_t gfp);
+#define arch_kexec_post_alloc_pages arch_kexec_post_alloc_pages
+
+extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
+#define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
+
#endif
typedef void crash_vmclear_fn(void);
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 34b984c60790..6cf65437b5e5 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -52,10 +52,10 @@ typedef u8 kprobe_opcode_t;
#define flush_insn_slot(p) do { } while (0)
/* optinsn template addresses */
-extern __visible kprobe_opcode_t optprobe_template_entry;
-extern __visible kprobe_opcode_t optprobe_template_val;
-extern __visible kprobe_opcode_t optprobe_template_call;
-extern __visible kprobe_opcode_t optprobe_template_end;
+extern __visible kprobe_opcode_t optprobe_template_entry[];
+extern __visible kprobe_opcode_t optprobe_template_val[];
+extern __visible kprobe_opcode_t optprobe_template_call[];
+extern __visible kprobe_opcode_t optprobe_template_end[];
#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
#define MAX_OPTINSN_SIZE \
(((unsigned long)&optprobe_template_end - \
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 87ac4fba6d8e..369e41c23f07 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -492,6 +492,7 @@ struct kvm_vcpu_arch {
unsigned long cr4;
unsigned long cr4_guest_owned_bits;
unsigned long cr8;
+ u32 pkru;
u32 hflags;
u64 efer;
u64 apic_base;
@@ -1078,7 +1079,7 @@ void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
- u64 acc_track_mask);
+ u64 acc_track_mask, u64 me_mask);
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
@@ -1374,8 +1375,6 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
-void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
- unsigned long address);
void kvm_define_shared_msr(unsigned index, u32 msr);
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h
deleted file mode 100644
index 73d0c9b92087..000000000000
--- a/arch/x86/include/asm/lguest.h
+++ /dev/null
@@ -1,91 +0,0 @@
-#ifndef _ASM_X86_LGUEST_H
-#define _ASM_X86_LGUEST_H
-
-#define GDT_ENTRY_LGUEST_CS 10
-#define GDT_ENTRY_LGUEST_DS 11
-#define LGUEST_CS (GDT_ENTRY_LGUEST_CS * 8)
-#define LGUEST_DS (GDT_ENTRY_LGUEST_DS * 8)
-
-#ifndef __ASSEMBLY__
-#include <asm/desc.h>
-
-#define GUEST_PL 1
-
-/* Page for Switcher text itself, then two pages per cpu */
-#define SWITCHER_TEXT_PAGES (1)
-#define SWITCHER_STACK_PAGES (2 * nr_cpu_ids)
-#define TOTAL_SWITCHER_PAGES (SWITCHER_TEXT_PAGES + SWITCHER_STACK_PAGES)
-
-/* Where we map the Switcher, in both Host and Guest. */
-extern unsigned long switcher_addr;
-
-/* Found in switcher.S */
-extern unsigned long default_idt_entries[];
-
-/* Declarations for definitions in arch/x86/lguest/head_32.S */
-extern char lguest_noirq_iret[];
-extern const char lgstart_cli[], lgend_cli[];
-extern const char lgstart_pushf[], lgend_pushf[];
-
-extern void lguest_iret(void);
-extern void lguest_init(void);
-
-struct lguest_regs {
- /* Manually saved part. */
- unsigned long eax, ebx, ecx, edx;
- unsigned long esi, edi, ebp;
- unsigned long gs;
- unsigned long fs, ds, es;
- unsigned long trapnum, errcode;
- /* Trap pushed part */
- unsigned long eip;
- unsigned long cs;
- unsigned long eflags;
- unsigned long esp;
- unsigned long ss;
-};
-
-/* This is a guest-specific page (mapped ro) into the guest. */
-struct lguest_ro_state {
- /* Host information we need to restore when we switch back. */
- u32 host_cr3;
- struct desc_ptr host_idt_desc;
- struct desc_ptr host_gdt_desc;
- u32 host_sp;
-
- /* Fields which are used when guest is running. */
- struct desc_ptr guest_idt_desc;
- struct desc_ptr guest_gdt_desc;
- struct x86_hw_tss guest_tss;
- struct desc_struct guest_idt[IDT_ENTRIES];
- struct desc_struct guest_gdt[GDT_ENTRIES];
-};
-
-struct lg_cpu_arch {
- /* The GDT entries copied into lguest_ro_state when running. */
- struct desc_struct gdt[GDT_ENTRIES];
-
- /* The IDT entries: some copied into lguest_ro_state when running. */
- struct desc_struct idt[IDT_ENTRIES];
-
- /* The address of the last guest-visible pagefault (ie. cr2). */
- unsigned long last_pagefault;
-};
-
-static inline void lguest_set_ts(void)
-{
- u32 cr0;
-
- cr0 = read_cr0();
- if (!(cr0 & 8))
- write_cr0(cr0 | 8);
-}
-
-/* Full 4G segment descriptors, suitable for CS and DS. */
-#define FULL_EXEC_SEGMENT \
- ((struct desc_struct)GDT_ENTRY_INIT(0xc09b, 0, 0xfffff))
-#define FULL_SEGMENT ((struct desc_struct)GDT_ENTRY_INIT(0xc093, 0, 0xfffff))
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_X86_LGUEST_H */
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
deleted file mode 100644
index 6c119cfae218..000000000000
--- a/arch/x86/include/asm/lguest_hcall.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* Architecture specific portion of the lguest hypercalls */
-#ifndef _ASM_X86_LGUEST_HCALL_H
-#define _ASM_X86_LGUEST_HCALL_H
-
-#define LHCALL_FLUSH_ASYNC 0
-#define LHCALL_LGUEST_INIT 1
-#define LHCALL_SHUTDOWN 2
-#define LHCALL_NEW_PGTABLE 4
-#define LHCALL_FLUSH_TLB 5
-#define LHCALL_LOAD_IDT_ENTRY 6
-#define LHCALL_SET_STACK 7
-#define LHCALL_SET_CLOCKEVENT 9
-#define LHCALL_HALT 10
-#define LHCALL_SET_PMD 13
-#define LHCALL_SET_PTE 14
-#define LHCALL_SET_PGD 15
-#define LHCALL_LOAD_TLS 16
-#define LHCALL_LOAD_GDT_ENTRY 18
-#define LHCALL_SEND_INTERRUPTS 19
-
-#define LGUEST_TRAP_ENTRY 0x1F
-
-/* Argument number 3 to LHCALL_LGUEST_SHUTDOWN */
-#define LGUEST_SHUTDOWN_POWEROFF 1
-#define LGUEST_SHUTDOWN_RESTART 2
-
-#ifndef __ASSEMBLY__
-#include <asm/hw_irq.h>
-
-/*G:030
- * But first, how does our Guest contact the Host to ask for privileged
- * operations? There are two ways: the direct way is to make a "hypercall",
- * to make requests of the Host Itself.
- *
- * Our hypercall mechanism uses the highest unused trap code (traps 32 and
- * above are used by real hardware interrupts). Seventeen hypercalls are
- * available: the hypercall number is put in the %eax register, and the
- * arguments (when required) are placed in %ebx, %ecx, %edx and %esi.
- * If a return value makes sense, it's returned in %eax.
- *
- * Grossly invalid calls result in Sudden Death at the hands of the vengeful
- * Host, rather than returning failure. This reflects Winston Churchill's
- * definition of a gentleman: "someone who is only rude intentionally".
- */
-static inline unsigned long
-hcall(unsigned long call,
- unsigned long arg1, unsigned long arg2, unsigned long arg3,
- unsigned long arg4)
-{
- /* "int" is the Intel instruction to trigger a trap. */
- asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY)
- /* The call in %eax (aka "a") might be overwritten */
- : "=a"(call)
- /* The arguments are in %eax, %ebx, %ecx, %edx & %esi */
- : "a"(call), "b"(arg1), "c"(arg2), "d"(arg3), "S"(arg4)
- /* "memory" means this might write somewhere in memory.
- * This isn't true for all calls, but it's safe to tell
- * gcc that it might happen so it doesn't get clever. */
- : "memory");
- return call;
-}
-/*:*/
-
-/* Can't use our min() macro here: needs to be a constant */
-#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
-
-#define LHCALL_RING_SIZE 64
-struct hcall_args {
- /* These map directly onto eax/ebx/ecx/edx/esi in struct lguest_regs */
- unsigned long arg0, arg1, arg2, arg3, arg4;
-};
-
-#endif /* !__ASSEMBLY__ */
-#endif /* _ASM_X86_LGUEST_HCALL_H */
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
new file mode 100644
index 000000000000..8e618fcf1f7c
--- /dev/null
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -0,0 +1,80 @@
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __X86_MEM_ENCRYPT_H__
+#define __X86_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/init.h>
+
+#include <asm/bootparam.h>
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+
+extern unsigned long sme_me_mask;
+
+void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
+ unsigned long decrypted_kernel_vaddr,
+ unsigned long kernel_len,
+ unsigned long encryption_wa,
+ unsigned long encryption_pgd);
+
+void __init sme_early_encrypt(resource_size_t paddr,
+ unsigned long size);
+void __init sme_early_decrypt(resource_size_t paddr,
+ unsigned long size);
+
+void __init sme_map_bootdata(char *real_mode_data);
+void __init sme_unmap_bootdata(char *real_mode_data);
+
+void __init sme_early_init(void);
+
+void __init sme_encrypt_kernel(void);
+void __init sme_enable(struct boot_params *bp);
+
+/* Architecture __weak replacement functions */
+void __init mem_encrypt_init(void);
+
+void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);
+
+#else /* !CONFIG_AMD_MEM_ENCRYPT */
+
+#define sme_me_mask 0UL
+
+static inline void __init sme_early_encrypt(resource_size_t paddr,
+ unsigned long size) { }
+static inline void __init sme_early_decrypt(resource_size_t paddr,
+ unsigned long size) { }
+
+static inline void __init sme_map_bootdata(char *real_mode_data) { }
+static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
+
+static inline void __init sme_early_init(void) { }
+
+static inline void __init sme_encrypt_kernel(void) { }
+static inline void __init sme_enable(struct boot_params *bp) { }
+
+#endif /* CONFIG_AMD_MEM_ENCRYPT */
+
+/*
+ * The __sme_pa() and __sme_pa_nodebug() macros are meant for use when
+ * writing to or comparing values from the cr3 register. Having the
+ * encryption mask set in cr3 enables the PGD entry to be encrypted and
+ * avoid special case handling of PGD allocations.
+ */
+#define __sme_pa(x) (__pa(x) | sme_me_mask)
+#define __sme_pa_nodebug(x) (__pa_nodebug(x) | sme_me_mask)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 79b647a7ebd0..bb8c597c2248 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -3,12 +3,28 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/atomic.h>
/*
- * The x86 doesn't have a mmu context, but
- * we put the segment information here.
+ * x86 has arch-specific MMU state beyond what lives in mm_struct.
*/
typedef struct {
+ /*
+ * ctx_id uniquely identifies this mm_struct. A ctx_id will never
+ * be reused, and zero is not a valid ctx_id.
+ */
+ u64 ctx_id;
+
+ /*
+ * Any code that needs to do any sort of TLB flushing for this
+ * mm will first make its changes to the page tables, then
+ * increment tlb_gen, then flush. This lets the low-level
+ * flushing code keep track of what needs flushing.
+ *
+ * This is not used on Xen PV.
+ */
+ atomic64_t tlb_gen;
+
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt;
#endif
@@ -37,6 +53,11 @@ typedef struct {
#endif
} mm_context_t;
+#define INIT_MM_CONTEXT(mm) \
+ .context = { \
+ .ctx_id = 1, \
+ }
+
void leave_mm(int cpu);
#endif /* _ASM_X86_MMU_H */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index ecfcb6643c9b..7ae318c340d9 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -12,6 +12,9 @@
#include <asm/tlbflush.h>
#include <asm/paravirt.h>
#include <asm/mpx.h>
+
+extern atomic64_t last_mm_ctx_id;
+
#ifndef CONFIG_PARAVIRT
static inline void paravirt_activate_mm(struct mm_struct *prev,
struct mm_struct *next)
@@ -125,13 +128,18 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
- if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
- this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+ int cpu = smp_processor_id();
+
+ if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
+ cpumask_clear_cpu(cpu, mm_cpumask(mm));
}
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
+ mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
+ atomic64_set(&mm->context.tlb_gen, 0);
+
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
/* pkey 0 is the default and always allocated */
@@ -140,9 +148,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.execute_only_pkey = -1;
}
#endif
- init_new_context_ldt(tsk, mm);
-
- return 0;
+ return init_new_context_ldt(tsk, mm);
}
static inline void destroy_context(struct mm_struct *mm)
{
@@ -292,8 +298,11 @@ static inline unsigned long __get_current_cr3_fast(void)
{
unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd);
+ if (static_cpu_has(X86_FEATURE_PCID))
+ cr3 |= this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+
/* For now, be very restrictive about when this can be called. */
- VM_WARN_ON(in_nmi() || !in_atomic());
+ VM_WARN_ON(in_nmi() || preemptible());
VM_BUG_ON(cr3 != __read_cr3());
return cr3;
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index e3b7819caeef..9eb7c718aaf8 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -2,6 +2,15 @@
#define _ASM_X86_MODULE_H
#include <asm-generic/module.h>
+#include <asm/orc_types.h>
+
+struct mod_arch_specific {
+#ifdef CONFIG_ORC_UNWINDER
+ unsigned int num_orcs;
+ int *orc_unwind_ip;
+ struct orc_entry *orc_unwind;
+#endif
+};
#ifdef CONFIG_X86_64
/* X86_64 does not define MODULE_PROC_FAMILY */
diff --git a/arch/x86/include/asm/mpx.h b/arch/x86/include/asm/mpx.h
index a0d662be4c5b..7d7404756bb4 100644
--- a/arch/x86/include/asm/mpx.h
+++ b/arch/x86/include/asm/mpx.h
@@ -73,6 +73,9 @@ static inline void mpx_mm_init(struct mm_struct *mm)
}
void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long start, unsigned long end);
+
+unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len,
+ unsigned long flags);
#else
static inline siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
{
@@ -94,6 +97,12 @@ static inline void mpx_notify_unmap(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
+
+static inline unsigned long mpx_unmapped_area_check(unsigned long addr,
+ unsigned long len, unsigned long flags)
+{
+ return addr;
+}
#endif /* CONFIG_X86_INTEL_MPX */
#endif /* _ASM_X86_MPX_H */
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 2b58c8c1eeaa..63cc96f064dc 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -3,6 +3,8 @@
#include <linux/types.h>
#include <linux/atomic.h>
+#include <linux/nmi.h>
+#include <asm/io.h>
#include <asm/hyperv.h>
/*
@@ -28,6 +30,8 @@ struct ms_hyperv_info {
u32 features;
u32 misc_features;
u32 hints;
+ u32 max_vp_index;
+ u32 max_lp_index;
};
extern struct ms_hyperv_info ms_hyperv;
@@ -168,12 +172,155 @@ void hv_remove_crash_handler(void);
#if IS_ENABLED(CONFIG_HYPERV)
extern struct clocksource *hyperv_cs;
+extern void *hv_hypercall_pg;
+
+static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
+{
+ u64 input_address = input ? virt_to_phys(input) : 0;
+ u64 output_address = output ? virt_to_phys(output) : 0;
+ u64 hv_status;
+ register void *__sp asm(_ASM_SP);
+
+#ifdef CONFIG_X86_64
+ if (!hv_hypercall_pg)
+ return U64_MAX;
+
+ __asm__ __volatile__("mov %4, %%r8\n"
+ "call *%5"
+ : "=a" (hv_status), "+r" (__sp),
+ "+c" (control), "+d" (input_address)
+ : "r" (output_address), "m" (hv_hypercall_pg)
+ : "cc", "memory", "r8", "r9", "r10", "r11");
+#else
+ u32 input_address_hi = upper_32_bits(input_address);
+ u32 input_address_lo = lower_32_bits(input_address);
+ u32 output_address_hi = upper_32_bits(output_address);
+ u32 output_address_lo = lower_32_bits(output_address);
+
+ if (!hv_hypercall_pg)
+ return U64_MAX;
+
+ __asm__ __volatile__("call *%7"
+ : "=A" (hv_status),
+ "+c" (input_address_lo), "+r" (__sp)
+ : "A" (control),
+ "b" (input_address_hi),
+ "D"(output_address_hi), "S"(output_address_lo),
+ "m" (hv_hypercall_pg)
+ : "cc", "memory");
+#endif /* !x86_64 */
+ return hv_status;
+}
+
+#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0)
+#define HV_HYPERCALL_FAST_BIT BIT(16)
+#define HV_HYPERCALL_VARHEAD_OFFSET 17
+#define HV_HYPERCALL_REP_COMP_OFFSET 32
+#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32)
+#define HV_HYPERCALL_REP_START_OFFSET 48
+#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48)
+
+/* Fast hypercall with 8 bytes of input and no output */
+static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
+{
+ u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
+ register void *__sp asm(_ASM_SP);
+
+#ifdef CONFIG_X86_64
+ {
+ __asm__ __volatile__("call *%4"
+ : "=a" (hv_status), "+r" (__sp),
+ "+c" (control), "+d" (input1)
+ : "m" (hv_hypercall_pg)
+ : "cc", "r8", "r9", "r10", "r11");
+ }
+#else
+ {
+ u32 input1_hi = upper_32_bits(input1);
+ u32 input1_lo = lower_32_bits(input1);
+
+ __asm__ __volatile__ ("call *%5"
+ : "=A"(hv_status),
+ "+c"(input1_lo),
+ "+r"(__sp)
+ : "A" (control),
+ "b" (input1_hi),
+ "m" (hv_hypercall_pg)
+ : "cc", "edi", "esi");
+ }
+#endif
+ return hv_status;
+}
+
+/*
+ * Rep hypercalls. Callers of this functions are supposed to ensure that
+ * rep_count and varhead_size comply with Hyper-V hypercall definition.
+ */
+static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
+ void *input, void *output)
+{
+ u64 control = code;
+ u64 status;
+ u16 rep_comp;
+
+ control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
+ control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
+
+ do {
+ status = hv_do_hypercall(control, input, output);
+ if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS)
+ return status;
+
+ /* Bits 32-43 of status have 'Reps completed' data. */
+ rep_comp = (status & HV_HYPERCALL_REP_COMP_MASK) >>
+ HV_HYPERCALL_REP_COMP_OFFSET;
+
+ control &= ~HV_HYPERCALL_REP_START_MASK;
+ control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
+
+ touch_nmi_watchdog();
+ } while (rep_comp < rep_count);
+
+ return status;
+}
+
+/*
+ * Hypervisor's notion of virtual processor ID is different from
+ * Linux' notion of CPU ID. This information can only be retrieved
+ * in the context of the calling CPU. Setup a map for easy access
+ * to this information.
+ */
+extern u32 *hv_vp_index;
+
+/**
+ * hv_cpu_number_to_vp_number() - Map CPU to VP.
+ * @cpu_number: CPU number in Linux terms
+ *
+ * This function returns the mapping between the Linux processor
+ * number and the hypervisor's virtual processor number, useful
+ * in making hypercalls and such that talk about specific
+ * processors.
+ *
+ * Return: Virtual processor number in Hyper-V terms
+ */
+static inline int hv_cpu_number_to_vp_number(int cpu_number)
+{
+ return hv_vp_index[cpu_number];
+}
void hyperv_init(void);
+void hyperv_setup_mmu_ops(void);
+void hyper_alloc_mmu(void);
void hyperv_report_panic(struct pt_regs *regs);
bool hv_is_hypercall_page_setup(void);
void hyperv_cleanup(void);
-#endif
+#else /* CONFIG_HYPERV */
+static inline void hyperv_init(void) {}
+static inline bool hv_is_hypercall_page_setup(void) { return false; }
+static inline void hyperv_cleanup(void) {}
+static inline void hyperv_setup_mmu_ops(void) {}
+#endif /* CONFIG_HYPERV */
+
#ifdef CONFIG_HYPERV_TSCPAGE
struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 5573c75f8e4c..17f5c12e1afd 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -356,6 +356,8 @@
#define MSR_K8_TOP_MEM1 0xc001001a
#define MSR_K8_TOP_MEM2 0xc001001d
#define MSR_K8_SYSCFG 0xc0010010
+#define MSR_K8_SYSCFG_MEM_ENCRYPT_BIT 23
+#define MSR_K8_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_K8_SYSCFG_MEM_ENCRYPT_BIT)
#define MSR_K8_INT_PENDING_MSG 0xc0010055
/* C1E active bits in int pending message */
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
diff --git a/arch/x86/include/asm/orc_lookup.h b/arch/x86/include/asm/orc_lookup.h
new file mode 100644
index 000000000000..91c8d868424d
--- /dev/null
+++ b/arch/x86/include/asm/orc_lookup.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _ORC_LOOKUP_H
+#define _ORC_LOOKUP_H
+
+/*
+ * This is a lookup table for speeding up access to the .orc_unwind table.
+ * Given an input address offset, the corresponding lookup table entry
+ * specifies a subset of the .orc_unwind table to search.
+ *
+ * Each block represents the end of the previous range and the start of the
+ * next range. An extra block is added to give the last range an end.
+ *
+ * The block size should be a power of 2 to avoid a costly 'div' instruction.
+ *
+ * A block size of 256 was chosen because it roughly doubles unwinder
+ * performance while only adding ~5% to the ORC data footprint.
+ */
+#define LOOKUP_BLOCK_ORDER 8
+#define LOOKUP_BLOCK_SIZE (1 << LOOKUP_BLOCK_ORDER)
+
+#ifndef LINKER_SCRIPT
+
+extern unsigned int orc_lookup[];
+extern unsigned int orc_lookup_end[];
+
+#define LOOKUP_START_IP (unsigned long)_stext
+#define LOOKUP_STOP_IP (unsigned long)_etext
+
+#endif /* LINKER_SCRIPT */
+
+#endif /* _ORC_LOOKUP_H */
diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h
new file mode 100644
index 000000000000..9c9dc579bd7d
--- /dev/null
+++ b/arch/x86/include/asm/orc_types.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ORC_TYPES_H
+#define _ORC_TYPES_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/*
+ * The ORC_REG_* registers are base registers which are used to find other
+ * registers on the stack.
+ *
+ * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
+ * address of the previous frame: the caller's SP before it called the current
+ * function.
+ *
+ * ORC_REG_UNDEFINED means the corresponding register's value didn't change in
+ * the current frame.
+ *
+ * The most commonly used base registers are SP and BP -- which the previous SP
+ * is usually based on -- and PREV_SP and UNDEFINED -- which the previous BP is
+ * usually based on.
+ *
+ * The rest of the base registers are needed for special cases like entry code
+ * and GCC realigned stacks.
+ */
+#define ORC_REG_UNDEFINED 0
+#define ORC_REG_PREV_SP 1
+#define ORC_REG_DX 2
+#define ORC_REG_DI 3
+#define ORC_REG_BP 4
+#define ORC_REG_SP 5
+#define ORC_REG_R10 6
+#define ORC_REG_R13 7
+#define ORC_REG_BP_INDIRECT 8
+#define ORC_REG_SP_INDIRECT 9
+#define ORC_REG_MAX 15
+
+/*
+ * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the
+ * caller's SP right before it made the call). Used for all callable
+ * functions, i.e. all C code and all callable asm functions.
+ *
+ * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points
+ * to a fully populated pt_regs from a syscall, interrupt, or exception.
+ *
+ * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset
+ * points to the iret return frame.
+ *
+ * The UNWIND_HINT macros are used only for the unwind_hint struct. They
+ * aren't used in struct orc_entry due to size and complexity constraints.
+ * Objtool converts them to real types when it converts the hints to orc
+ * entries.
+ */
+#define ORC_TYPE_CALL 0
+#define ORC_TYPE_REGS 1
+#define ORC_TYPE_REGS_IRET 2
+#define UNWIND_HINT_TYPE_SAVE 3
+#define UNWIND_HINT_TYPE_RESTORE 4
+
+#ifndef __ASSEMBLY__
+/*
+ * This struct is more or less a vastly simplified version of the DWARF Call
+ * Frame Information standard. It contains only the necessary parts of DWARF
+ * CFI, simplified for ease of access by the in-kernel unwinder. It tells the
+ * unwinder how to find the previous SP and BP (and sometimes entry regs) on
+ * the stack for a given code address. Each instance of the struct corresponds
+ * to one or more code locations.
+ */
+struct orc_entry {
+ s16 sp_offset;
+ s16 bp_offset;
+ unsigned sp_reg:4;
+ unsigned bp_reg:4;
+ unsigned type:2;
+} __packed;
+
+/*
+ * This struct is used by asm and inline asm code to manually annotate the
+ * location of registers on the stack for the ORC unwinder.
+ *
+ * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*.
+ */
+struct unwind_hint {
+ u32 ip;
+ s16 sp_offset;
+ u8 sp_reg;
+ u8 type;
+};
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ORC_TYPES_H */
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index b4a0d43248cf..b50df06ad251 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -51,6 +51,10 @@ static inline void clear_page(void *page)
void copy_page(void *to, void *from);
+#ifdef CONFIG_X86_MCE
+#define arch_unmap_kpfn arch_unmap_kpfn
+#endif
+
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_X86_VSYSCALL_EMULATION
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 7bd0099384ca..b98ed9d14630 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -3,6 +3,7 @@
#include <linux/const.h>
#include <linux/types.h>
+#include <linux/mem_encrypt.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
@@ -15,7 +16,7 @@
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
-#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
+#define __PHYSICAL_MASK ((phys_addr_t)(__sme_clr((1ULL << __PHYSICAL_MASK_SHIFT) - 1)))
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 9ccac1926587..c25dd22f7c70 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -960,11 +960,6 @@ extern void default_banner(void);
#define GET_CR2_INTO_RAX \
call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
-#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
- PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
- CLBR_NONE, \
- call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
-
#define USERGS_SYSRET64 \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
CLBR_NONE, \
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index cb976bab6299..6b64fc6367f2 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -84,7 +84,7 @@ struct pv_init_ops {
*/
unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
unsigned long addr, unsigned len);
-};
+} __no_randomize_layout;
struct pv_lazy_ops {
@@ -92,12 +92,12 @@ struct pv_lazy_ops {
void (*enter)(void);
void (*leave)(void);
void (*flush)(void);
-};
+} __no_randomize_layout;
struct pv_time_ops {
unsigned long long (*sched_clock)(void);
unsigned long long (*steal_clock)(int cpu);
-};
+} __no_randomize_layout;
struct pv_cpu_ops {
/* hooks for various privileged instructions */
@@ -176,7 +176,7 @@ struct pv_cpu_ops {
void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next);
-};
+} __no_randomize_layout;
struct pv_irq_ops {
/*
@@ -196,10 +196,7 @@ struct pv_irq_ops {
void (*safe_halt)(void);
void (*halt)(void);
-#ifdef CONFIG_X86_64
- void (*adjust_exception_frame)(void);
-#endif
-};
+} __no_randomize_layout;
struct pv_mmu_ops {
unsigned long (*read_cr2)(void);
@@ -305,7 +302,7 @@ struct pv_mmu_ops {
an mfn. We can tell which is which from the index. */
void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
-};
+} __no_randomize_layout;
struct arch_spinlock;
#ifdef CONFIG_SMP
@@ -322,7 +319,7 @@ struct pv_lock_ops {
void (*kick)(int cpu);
struct paravirt_callee_save vcpu_is_preempted;
-};
+} __no_randomize_layout;
/* This contains all the paravirt structures: we get a convenient
* number for each function using the offset which we use to indicate
@@ -334,7 +331,7 @@ struct paravirt_patch_template {
struct pv_irq_ops pv_irq_ops;
struct pv_mmu_ops pv_mmu_ops;
struct pv_lock_ops pv_lock_ops;
-};
+} __no_randomize_layout;
extern struct pv_info pv_info;
extern struct pv_init_ops pv_init_ops;
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 77037b6f1caa..bbeae4a2bd01 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1,6 +1,7 @@
#ifndef _ASM_X86_PGTABLE_H
#define _ASM_X86_PGTABLE_H
+#include <linux/mem_encrypt.h>
#include <asm/page.h>
#include <asm/pgtable_types.h>
@@ -13,9 +14,18 @@
cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
: (prot))
+/*
+ * Macros to add or remove encryption attribute
+ */
+#define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot)))
+#define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot)))
+
#ifndef __ASSEMBLY__
#include <asm/x86_init.h>
+extern pgd_t early_top_pgt[PTRS_PER_PGD];
+int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
+
void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
void ptdump_walk_pgd_level_checkwx(void);
@@ -38,6 +48,8 @@ extern struct list_head pgd_list;
extern struct mm_struct *pgd_page_get_mm(struct page *page);
+extern pmdval_t early_pmd_flags;
+
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else /* !CONFIG_PARAVIRT */
@@ -195,6 +207,11 @@ static inline unsigned long p4d_pfn(p4d_t p4d)
return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
}
+static inline unsigned long pgd_pfn(pgd_t pgd)
+{
+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
static inline int p4d_large(p4d_t p4d)
{
/* No 512 GiB pages yet */
@@ -704,8 +721,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
-#define pmd_page(pmd) \
- pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
+#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
@@ -773,8 +789,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
-#define pud_page(pud) \
- pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
+#define pud_page(pud) pfn_to_page(pud_pfn(pud))
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
@@ -824,8 +839,7 @@ static inline unsigned long p4d_page_vaddr(p4d_t p4d)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
-#define p4d_page(p4d) \
- pfn_to_page((p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT)
+#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
/* Find an entry in the third-level page table.. */
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
@@ -859,7 +873,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
+#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
/* to find an entry in a page-table-directory. */
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index bf9638e1ee42..399261ce904c 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -2,6 +2,8 @@
#define _ASM_X86_PGTABLE_DEFS_H
#include <linux/const.h>
+#include <linux/mem_encrypt.h>
+
#include <asm/page_types.h>
#define FIRST_USER_ADDRESS 0UL
@@ -121,10 +123,10 @@
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
-#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
- _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
- _PAGE_DIRTY)
+#define _PAGE_TABLE_NOENC (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |\
+ _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE_NOENC (_PAGE_PRESENT | _PAGE_RW | \
+ _PAGE_ACCESSED | _PAGE_DIRTY)
/*
* Set of bits not changed in pte_modify. The pte's
@@ -159,6 +161,7 @@ enum page_cache_mode {
#define _PAGE_CACHE_MASK (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
#define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC))
+#define _PAGE_CACHE_WP (cachemode2protval(_PAGE_CACHE_MODE_WP))
#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
@@ -187,22 +190,42 @@ enum page_cache_mode {
#define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+#define __PAGE_KERNEL_WP (__PAGE_KERNEL | _PAGE_CACHE_WP)
#define __PAGE_KERNEL_IO (__PAGE_KERNEL)
#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE)
-#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
-#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
-#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
-#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
-#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
-#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
-#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
-#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
-#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR)
+#ifndef __ASSEMBLY__
+
+#define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
+
+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_ENC)
+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
+ _PAGE_DIRTY | _PAGE_ENC)
+
+#define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _PAGE_ENC)
+#define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _PAGE_ENC)
+
+#define __PAGE_KERNEL_NOENC (__PAGE_KERNEL)
+#define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP)
+
+#define PAGE_KERNEL __pgprot(__PAGE_KERNEL | _PAGE_ENC)
+#define PAGE_KERNEL_NOENC __pgprot(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC)
+#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC | _PAGE_ENC)
+#define PAGE_KERNEL_EXEC_NOENC __pgprot(__PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX | _PAGE_ENC)
+#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
+#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
+#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
+#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL | _PAGE_ENC)
+#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
+
+#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
+#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
-#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
-#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
+#endif /* __ASSEMBLY__ */
/* xwr */
#define __P000 PAGE_NONE
@@ -287,6 +310,11 @@ static inline p4dval_t native_p4d_val(p4d_t p4d)
#else
#include <asm-generic/pgtable-nop4d.h>
+static inline p4d_t native_make_p4d(pudval_t val)
+{
+ return (p4d_t) { .pgd = native_make_pgd((pgdval_t)val) };
+}
+
static inline p4dval_t native_p4d_val(p4d_t p4d)
{
return native_pgd_val(p4d.pgd);
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 79aa2f98398d..dc723b64acf0 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -2,6 +2,7 @@
#define _ASM_X86_PROCESSOR_FLAGS_H
#include <uapi/asm/processor-flags.h>
+#include <linux/mem_encrypt.h>
#ifdef CONFIG_VM86
#define X86_VM_MASK X86_EFLAGS_VM
@@ -32,16 +33,18 @@
* CR3_ADDR_MASK is the mask used by read_cr3_pa().
*/
#ifdef CONFIG_X86_64
-/* Mask off the address space ID bits. */
-#define CR3_ADDR_MASK 0x7FFFFFFFFFFFF000ull
-#define CR3_PCID_MASK 0xFFFull
+/* Mask off the address space ID and SME encryption bits. */
+#define CR3_ADDR_MASK __sme_clr(0x7FFFFFFFFFFFF000ull)
+#define CR3_PCID_MASK 0xFFFull
+#define CR3_NOFLUSH BIT_ULL(63)
#else
/*
* CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save
* a tiny bit of code size by setting all the bits.
*/
-#define CR3_ADDR_MASK 0xFFFFFFFFull
-#define CR3_PCID_MASK 0ull
+#define CR3_ADDR_MASK 0xFFFFFFFFull
+#define CR3_PCID_MASK 0ull
+#define CR3_NOFLUSH 0
#endif
#endif /* _ASM_X86_PROCESSOR_FLAGS_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 6a79547e8ee0..3fa26a61eabc 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -22,6 +22,7 @@ struct vm86;
#include <asm/nops.h>
#include <asm/special_insns.h>
#include <asm/fpu/types.h>
+#include <asm/unwind_hints.h>
#include <linux/personality.h>
#include <linux/cache.h>
@@ -29,6 +30,7 @@ struct vm86;
#include <linux/math64.h>
#include <linux/err.h>
#include <linux/irqflags.h>
+#include <linux/mem_encrypt.h>
/*
* We handle most unaligned accesses in hardware. On the other hand
@@ -129,7 +131,7 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */
u16 cpu_index;
u32 microcode;
-};
+} __randomize_layout;
struct cpuid_regs {
u32 eax, ebx, ecx, edx;
@@ -239,9 +241,14 @@ static inline unsigned long read_cr3_pa(void)
return __read_cr3() & CR3_ADDR_MASK;
}
+static inline unsigned long native_read_cr3_pa(void)
+{
+ return __native_read_cr3() & CR3_ADDR_MASK;
+}
+
static inline void load_cr3(pgd_t *pgdir)
{
- write_cr3(__pa(pgdir));
+ write_cr3(__sme_pa(pgdir));
}
#ifdef CONFIG_X86_32
@@ -661,7 +668,7 @@ static inline void sync_core(void)
* In case NMI unmasking or performance ever becomes a problem,
* the next best option appears to be MOV-to-CR2 and an
* unconditional jump. That sequence also works on all CPUs,
- * but it will fault at CPL3 (i.e. Xen PV and lguest).
+ * but it will fault at CPL3 (i.e. Xen PV).
*
* CPUID is the conventional way, but it's nasty: it doesn't
* exist on some 486-like CPUs, and it usually exits to a
@@ -684,6 +691,7 @@ static inline void sync_core(void)
unsigned int tmp;
asm volatile (
+ UNWIND_HINT_SAVE
"mov %%ss, %0\n\t"
"pushq %q0\n\t"
"pushq %%rsp\n\t"
@@ -693,6 +701,7 @@ static inline void sync_core(void)
"pushq %q0\n\t"
"pushq $1f\n\t"
"iretq\n\t"
+ UNWIND_HINT_RESTORE
"1:"
: "=&r" (tmp), "+r" (__sp) : : "cc", "memory");
#endif
@@ -802,7 +811,9 @@ static inline void spin_lock_prefetch(const void *x)
*/
#define IA32_PAGE_OFFSET PAGE_OFFSET
#define TASK_SIZE PAGE_OFFSET
+#define TASK_SIZE_LOW TASK_SIZE
#define TASK_SIZE_MAX TASK_SIZE
+#define DEFAULT_MAP_WINDOW TASK_SIZE
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP
@@ -842,7 +853,9 @@ static inline void spin_lock_prefetch(const void *x)
* particular problem by preventing anything from being mapped
* at the maximum canonical address.
*/
-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
+#define TASK_SIZE_MAX ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE)
+
+#define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
@@ -850,12 +863,14 @@ static inline void spin_lock_prefetch(const void *x)
#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
0xc0000000 : 0xFFFFe000)
+#define TASK_SIZE_LOW (test_thread_flag(TIF_ADDR32) ? \
+ IA32_PAGE_OFFSET : DEFAULT_MAP_WINDOW)
#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
-#define STACK_TOP TASK_SIZE
+#define STACK_TOP TASK_SIZE_LOW
#define STACK_TOP_MAX TASK_SIZE_MAX
#define INIT_THREAD { \
@@ -876,7 +891,7 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
* space during mmap's.
*/
#define __TASK_UNMAPPED_BASE(task_size) (PAGE_ALIGN(task_size / 3))
-#define TASK_UNMAPPED_BASE __TASK_UNMAPPED_BASE(TASK_SIZE)
+#define TASK_UNMAPPED_BASE __TASK_UNMAPPED_BASE(TASK_SIZE_LOW)
#define KSTK_EIP(task) (task_pt_regs(task)->ip)
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index 8d3964fc5f91..b408b1886195 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -24,6 +24,9 @@ void entry_SYSENTER_compat(void);
void __end_entry_SYSENTER_compat(void);
void entry_SYSCALL_compat(void);
void entry_INT80_compat(void);
+#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
+void xen_entry_INT80_compat(void);
+#endif
#endif
void x86_configure_nx(void);
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 2b5d686ea9f3..91c04c8e67fa 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -9,6 +9,20 @@
#ifdef __i386__
struct pt_regs {
+ /*
+ * NB: 32-bit x86 CPUs are inconsistent as what happens in the
+ * following cases (where %seg represents a segment register):
+ *
+ * - pushl %seg: some do a 16-bit write and leave the high
+ * bits alone
+ * - movl %seg, [mem]: some do a 16-bit write despite the movl
+ * - IDT entry: some (e.g. 486) will leave the high bits of CS
+ * and (if applicable) SS undefined.
+ *
+ * Fortunately, x86-32 doesn't read the high bits on POP or IRET,
+ * so we can just treat all of the segment registers as 16-bit
+ * values.
+ */
unsigned long bx;
unsigned long cx;
unsigned long dx;
@@ -16,16 +30,22 @@ struct pt_regs {
unsigned long di;
unsigned long bp;
unsigned long ax;
- unsigned long ds;
- unsigned long es;
- unsigned long fs;
- unsigned long gs;
+ unsigned short ds;
+ unsigned short __dsh;
+ unsigned short es;
+ unsigned short __esh;
+ unsigned short fs;
+ unsigned short __fsh;
+ unsigned short gs;
+ unsigned short __gsh;
unsigned long orig_ax;
unsigned long ip;
- unsigned long cs;
+ unsigned short cs;
+ unsigned short __csh;
unsigned long flags;
unsigned long sp;
- unsigned long ss;
+ unsigned short ss;
+ unsigned short __ssh;
};
#else /* __i386__ */
@@ -176,6 +196,17 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
if (offset == offsetof(struct pt_regs, sp) &&
regs->cs == __KERNEL_CS)
return kernel_stack_pointer(regs);
+
+ /* The selector fields are 16-bit. */
+ if (offset == offsetof(struct pt_regs, cs) ||
+ offset == offsetof(struct pt_regs, ss) ||
+ offset == offsetof(struct pt_regs, ds) ||
+ offset == offsetof(struct pt_regs, es) ||
+ offset == offsetof(struct pt_regs, fs) ||
+ offset == offsetof(struct pt_regs, gs)) {
+ return *(u16 *)((unsigned long)regs + offset);
+
+ }
#endif
return *(unsigned long *)((unsigned long)regs + offset);
}
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
index 230e1903acf0..90d91520c13a 100644
--- a/arch/x86/include/asm/realmode.h
+++ b/arch/x86/include/asm/realmode.h
@@ -1,6 +1,15 @@
#ifndef _ARCH_X86_REALMODE_H
#define _ARCH_X86_REALMODE_H
+/*
+ * Flag bit definitions for use with the flags field of the trampoline header
+ * in the CONFIG_X86_64 variant.
+ */
+#define TH_FLAGS_SME_ACTIVE_BIT 0
+#define TH_FLAGS_SME_ACTIVE BIT(TH_FLAGS_SME_ACTIVE_BIT)
+
+#ifndef __ASSEMBLY__
+
#include <linux/types.h>
#include <asm/io.h>
@@ -38,6 +47,7 @@ struct trampoline_header {
u64 start;
u64 efer;
u32 cr4;
+ u32 flags;
#endif
};
@@ -69,4 +79,6 @@ static inline size_t real_mode_size_needed(void)
void set_real_mode_mem(phys_addr_t mem, size_t size);
void reserve_real_mode(void);
+#endif /* __ASSEMBLY__ */
+
#endif /* _ARCH_X86_REALMODE_H */
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
new file mode 100644
index 000000000000..ff871210b9f2
--- /dev/null
+++ b/arch/x86/include/asm/refcount.h
@@ -0,0 +1,109 @@
+#ifndef __ASM_X86_REFCOUNT_H
+#define __ASM_X86_REFCOUNT_H
+/*
+ * x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from
+ * PaX/grsecurity.
+ */
+#include <linux/refcount.h>
+
+/*
+ * This is the first portion of the refcount error handling, which lives in
+ * .text.unlikely, and is jumped to from the CPU flag check (in the
+ * following macros). This saves the refcount value location into CX for
+ * the exception handler to use (in mm/extable.c), and then triggers the
+ * central refcount exception. The fixup address for the exception points
+ * back to the regular execution flow in .text.
+ */
+#define _REFCOUNT_EXCEPTION \
+ ".pushsection .text.unlikely\n" \
+ "111:\tlea %[counter], %%" _ASM_CX "\n" \
+ "112:\t" ASM_UD0 "\n" \
+ ASM_UNREACHABLE \
+ ".popsection\n" \
+ "113:\n" \
+ _ASM_EXTABLE_REFCOUNT(112b, 113b)
+
+/* Trigger refcount exception if refcount result is negative. */
+#define REFCOUNT_CHECK_LT_ZERO \
+ "js 111f\n\t" \
+ _REFCOUNT_EXCEPTION
+
+/* Trigger refcount exception if refcount result is zero or negative. */
+#define REFCOUNT_CHECK_LE_ZERO \
+ "jz 111f\n\t" \
+ REFCOUNT_CHECK_LT_ZERO
+
+/* Trigger refcount exception unconditionally. */
+#define REFCOUNT_ERROR \
+ "jmp 111f\n\t" \
+ _REFCOUNT_EXCEPTION
+
+static __always_inline void refcount_add(unsigned int i, refcount_t *r)
+{
+ asm volatile(LOCK_PREFIX "addl %1,%0\n\t"
+ REFCOUNT_CHECK_LT_ZERO
+ : [counter] "+m" (r->refs.counter)
+ : "ir" (i)
+ : "cc", "cx");
+}
+
+static __always_inline void refcount_inc(refcount_t *r)
+{
+ asm volatile(LOCK_PREFIX "incl %0\n\t"
+ REFCOUNT_CHECK_LT_ZERO
+ : [counter] "+m" (r->refs.counter)
+ : : "cc", "cx");
+}
+
+static __always_inline void refcount_dec(refcount_t *r)
+{
+ asm volatile(LOCK_PREFIX "decl %0\n\t"
+ REFCOUNT_CHECK_LE_ZERO
+ : [counter] "+m" (r->refs.counter)
+ : : "cc", "cx");
+}
+
+static __always_inline __must_check
+bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+{
+ GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO,
+ r->refs.counter, "er", i, "%0", e);
+}
+
+static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
+{
+ GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO,
+ r->refs.counter, "%0", e);
+}
+
+static __always_inline __must_check
+bool refcount_add_not_zero(unsigned int i, refcount_t *r)
+{
+ int c, result;
+
+ c = atomic_read(&(r->refs));
+ do {
+ if (unlikely(c == 0))
+ return false;
+
+ result = c + i;
+
+ /* Did we try to increment from/to an undesirable state? */
+ if (unlikely(c < 0 || c == INT_MAX || result < c)) {
+ asm volatile(REFCOUNT_ERROR
+ : : [counter] "m" (r->refs.counter)
+ : "cc", "cx");
+ break;
+ }
+
+ } while (!atomic_try_cmpxchg(&(r->refs), &c, result));
+
+ return c != 0;
+}
+
+static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r)
+{
+ return refcount_add_not_zero(1, r);
+}
+
+#endif
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
index 661dd305694a..045f99211a99 100644
--- a/arch/x86/include/asm/rmwcc.h
+++ b/arch/x86/include/asm/rmwcc.h
@@ -1,45 +1,56 @@
#ifndef _ASM_X86_RMWcc
#define _ASM_X86_RMWcc
+#define __CLOBBERS_MEM "memory"
+#define __CLOBBERS_MEM_CC_CX "memory", "cc", "cx"
+
#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO)
/* Use asm goto */
-#define __GEN_RMWcc(fullop, var, cc, ...) \
+#define __GEN_RMWcc(fullop, var, cc, clobbers, ...) \
do { \
asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \
- : : "m" (var), ## __VA_ARGS__ \
- : "memory" : cc_label); \
+ : : [counter] "m" (var), ## __VA_ARGS__ \
+ : clobbers : cc_label); \
return 0; \
cc_label: \
return 1; \
} while (0)
-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
- __GEN_RMWcc(op " " arg0, var, cc)
+#define __BINARY_RMWcc_ARG " %1, "
-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
/* Use flags output or a set instruction */
-#define __GEN_RMWcc(fullop, var, cc, ...) \
+#define __GEN_RMWcc(fullop, var, cc, clobbers, ...) \
do { \
bool c; \
asm volatile (fullop ";" CC_SET(cc) \
- : "+m" (var), CC_OUT(cc) (c) \
- : __VA_ARGS__ : "memory"); \
+ : [counter] "+m" (var), CC_OUT(cc) (c) \
+ : __VA_ARGS__ : clobbers); \
return c; \
} while (0)
+#define __BINARY_RMWcc_ARG " %2, "
+
+#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
+
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
- __GEN_RMWcc(op " " arg0, var, cc)
+ __GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM)
+
+#define GEN_UNARY_SUFFIXED_RMWcc(op, suffix, var, arg0, cc) \
+ __GEN_RMWcc(op " " arg0 "\n\t" suffix, var, cc, \
+ __CLOBBERS_MEM_CC_CX)
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
+ __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0, var, cc, \
+ __CLOBBERS_MEM, vcon (val))
-#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
+#define GEN_BINARY_SUFFIXED_RMWcc(op, suffix, var, vcon, val, arg0, cc) \
+ __GEN_RMWcc(op __BINARY_RMWcc_ARG arg0 "\n\t" suffix, var, cc, \
+ __CLOBBERS_MEM_CC_CX, vcon (val))
#endif /* _ASM_X86_RMWcc */
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 1549caa098f0..066aaf813141 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -238,9 +238,7 @@
#ifndef __ASSEMBLY__
extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
-#ifdef CONFIG_TRACING
-# define trace_early_idt_handler_array early_idt_handler_array
-#endif
+extern void early_ignore_irq(void);
/*
* Load a segment. Fall back on loading the zero segment if something goes
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index eaec6c364e42..cd71273ec49d 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -11,6 +11,7 @@
* Executability : eXeutable, NoteXecutable
* Read/Write : ReadOnly, ReadWrite
* Presence : NotPresent
+ * Encryption : Encrypted, Decrypted
*
* Within a category, the attributes are mutually exclusive.
*
@@ -42,6 +43,8 @@ int set_memory_wt(unsigned long addr, int numpages);
int set_memory_wb(unsigned long addr, int numpages);
int set_memory_np(unsigned long addr, int numpages);
int set_memory_4k(unsigned long addr, int numpages);
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
int set_memory_array_uc(unsigned long *addr, int addrinarray);
int set_memory_array_wc(unsigned long *addr, int addrinarray);
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index e4585a393965..a65cf544686a 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -39,6 +39,7 @@ static inline void vsmp_init(void) { }
#endif
void setup_bios_corruption_check(void);
+void early_platform_quirks(void);
extern unsigned long saved_video_mode;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index e00e1bd6e7b3..5161da1a0fa0 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -98,6 +98,7 @@ struct thread_info {
#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
#define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
#define TIF_X32 30 /* 32-bit native x86-64 binary */
+#define TIF_FSCHECK 31 /* Check FS is USER_DS on return */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -122,6 +123,7 @@ struct thread_info {
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_ADDR32 (1 << TIF_ADDR32)
#define _TIF_X32 (1 << TIF_X32)
+#define _TIF_FSCHECK (1 << TIF_FSCHECK)
/*
* work to do in syscall_trace_enter(). Also includes TIF_NOHZ for
@@ -137,7 +139,8 @@ struct thread_info {
(_TIF_SYSCALL_TRACE | _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
_TIF_NEED_RESCHED | _TIF_SINGLESTEP | _TIF_SYSCALL_EMU | \
_TIF_SYSCALL_AUDIT | _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE | \
- _TIF_PATCH_PENDING | _TIF_NOHZ | _TIF_SYSCALL_TRACEPOINT)
+ _TIF_PATCH_PENDING | _TIF_NOHZ | _TIF_SYSCALL_TRACEPOINT | \
+ _TIF_FSCHECK)
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index c7797307fc2b..79a4ca6a9606 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -15,4 +15,18 @@
#include <asm-generic/tlb.h>
+/*
+ * While x86 architecture in general requires an IPI to perform TLB
+ * shootdown, enablement code for several hypervisors overrides
+ * .flush_tlb_others hook in pv_mmu_ops and implements it by issuing
+ * a hypercall. To keep software pagetable walkers safe in this case we
+ * switch to RCU based table free (HAVE_RCU_TABLE_FREE). See the comment
+ * below 'ifdef CONFIG_HAVE_RCU_TABLE_FREE' in include/asm-generic/tlb.h
+ * for more details.
+ */
+static inline void __tlb_remove_table(void *table)
+{
+ free_page_and_swap_cache(table);
+}
+
#endif /* _ASM_X86_TLB_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 50ea3482e1d1..4893abf7f74f 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -57,6 +57,23 @@ static inline void invpcid_flush_all_nonglobals(void)
__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
}
+static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
+{
+ u64 new_tlb_gen;
+
+ /*
+ * Bump the generation count. This also serves as a full barrier
+ * that synchronizes with switch_mm(): callers are required to order
+ * their read of mm_cpumask after their writes to the paging
+ * structures.
+ */
+ smp_mb__before_atomic();
+ new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
+ smp_mb__after_atomic();
+
+ return new_tlb_gen;
+}
+
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
@@ -65,6 +82,17 @@ static inline void invpcid_flush_all_nonglobals(void)
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif
+/*
+ * 6 because 6 should be plenty and struct tlb_state will fit in
+ * two cache lines.
+ */
+#define TLB_NR_DYN_ASIDS 6
+
+struct tlb_context {
+ u64 ctx_id;
+ u64 tlb_gen;
+};
+
struct tlb_state {
/*
* cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
@@ -73,13 +101,35 @@ struct tlb_state {
* mode even if we've already switched back to swapper_pg_dir.
*/
struct mm_struct *loaded_mm;
- int state;
+ u16 loaded_mm_asid;
+ u16 next_asid;
/*
* Access to this CR4 shadow and to H/W CR4 is protected by
* disabling interrupts when modifying either one.
*/
unsigned long cr4;
+
+ /*
+ * This is a list of all contexts that might exist in the TLB.
+ * There is one per ASID that we use, and the ASID (what the
+ * CPU calls PCID) is the index into ctxts.
+ *
+ * For each context, ctx_id indicates which mm the TLB's user
+ * entries came from. As an invariant, the TLB will never
+ * contain entries that are out-of-date as when that mm reached
+ * the tlb_gen in the list.
+ *
+ * To be clear, this means that it's legal for the TLB code to
+ * flush the TLB without updating tlb_gen. This can happen
+ * (for now, at least) due to paravirt remote flushes.
+ *
+ * NB: context 0 is a bit special, since it's also used by
+ * various bits of init code. This is fine -- code that
+ * isn't aware of PCID will end up harmlessly flushing
+ * context 0.
+ */
+ struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
@@ -148,6 +198,8 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
cr4_set_bits(mask);
}
+extern void initialize_tlbstate_and_flush(void);
+
static inline void __native_flush_tlb(void)
{
/*
@@ -207,6 +259,14 @@ static inline void __flush_tlb_all(void)
__flush_tlb_global();
else
__flush_tlb();
+
+ /*
+ * Note: if we somehow had PCID but not PGE, then this wouldn't work --
+ * we'd end up flushing kernel translations for the current ASID but
+ * we might fail to flush kernel translations for other cached ASIDs.
+ *
+ * To avoid this issue, we force PCID off if PGE is off.
+ */
}
static inline void __flush_tlb_one(unsigned long addr)
@@ -231,9 +291,26 @@ static inline void __flush_tlb_one(unsigned long addr)
* and page-granular flushes are available only on i486 and up.
*/
struct flush_tlb_info {
- struct mm_struct *mm;
- unsigned long start;
- unsigned long end;
+ /*
+ * We support several kinds of flushes.
+ *
+ * - Fully flush a single mm. .mm will be set, .end will be
+ * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
+ * which the IPI sender is trying to catch us up.
+ *
+ * - Partially flush a single mm. .mm will be set, .start and
+ * .end will indicate the range, and .new_tlb_gen will be set
+ * such that the changes between generation .new_tlb_gen-1 and
+ * .new_tlb_gen are entirely contained in the indicated range.
+ *
+ * - Fully flush all mms whose tlb_gens have been updated. .mm
+ * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
+ * will be zero.
+ */
+ struct mm_struct *mm;
+ unsigned long start;
+ unsigned long end;
+ u64 new_tlb_gen;
};
#define local_flush_tlb() __flush_tlb()
@@ -256,12 +333,10 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
void native_flush_tlb_others(const struct cpumask *cpumask,
const struct flush_tlb_info *info);
-#define TLBSTATE_OK 1
-#define TLBSTATE_LAZY 2
-
static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
struct mm_struct *mm)
{
+ inc_mm_tlb_gen(mm);
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
}
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 6358a85e2270..c1d2a9892352 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -75,12 +75,6 @@ static inline const struct cpumask *cpumask_of_node(int node)
extern void setup_node_to_cpumask_map(void);
-/*
- * Returns the number of the node containing Node 'node'. This
- * architecture is flat, so it is a pretty simple function!
- */
-#define parent_node(node) (node)
-
#define pcibus_to_node(bus) __pcibus_to_node(bus)
extern int __node_distance(int, int);
diff --git a/arch/x86/include/asm/trace/common.h b/arch/x86/include/asm/trace/common.h
new file mode 100644
index 000000000000..57c8da027d99
--- /dev/null
+++ b/arch/x86/include/asm/trace/common.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_TRACE_COMMON_H
+#define _ASM_TRACE_COMMON_H
+
+#ifdef CONFIG_TRACING
+DECLARE_STATIC_KEY_FALSE(trace_pagefault_key);
+#define trace_pagefault_enabled() \
+ static_branch_unlikely(&trace_pagefault_key)
+DECLARE_STATIC_KEY_FALSE(trace_resched_ipi_key);
+#define trace_resched_ipi_enabled() \
+ static_branch_unlikely(&trace_resched_ipi_key)
+#else
+static inline bool trace_pagefault_enabled(void) { return false; }
+static inline bool trace_resched_ipi_enabled(void) { return false; }
+#endif
+
+#endif
diff --git a/arch/x86/include/asm/trace/exceptions.h b/arch/x86/include/asm/trace/exceptions.h
index 2422b14c50a7..5665bf205b8d 100644
--- a/arch/x86/include/asm/trace/exceptions.h
+++ b/arch/x86/include/asm/trace/exceptions.h
@@ -5,9 +5,10 @@
#define _TRACE_PAGE_FAULT_H
#include <linux/tracepoint.h>
+#include <asm/trace/common.h>
-extern int trace_irq_vector_regfunc(void);
-extern void trace_irq_vector_unregfunc(void);
+extern int trace_pagefault_reg(void);
+extern void trace_pagefault_unreg(void);
DECLARE_EVENT_CLASS(x86_exceptions,
@@ -37,8 +38,7 @@ DEFINE_EVENT_FN(x86_exceptions, name, \
TP_PROTO(unsigned long address, struct pt_regs *regs, \
unsigned long error_code), \
TP_ARGS(address, regs, error_code), \
- trace_irq_vector_regfunc, \
- trace_irq_vector_unregfunc);
+ trace_pagefault_reg, trace_pagefault_unreg);
DEFINE_PAGE_FAULT_EVENT(page_fault_user);
DEFINE_PAGE_FAULT_EVENT(page_fault_kernel);
diff --git a/arch/x86/include/asm/trace/hyperv.h b/arch/x86/include/asm/trace/hyperv.h
new file mode 100644
index 000000000000..4253bca99989
--- /dev/null
+++ b/arch/x86/include/asm/trace/hyperv.h
@@ -0,0 +1,40 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hyperv
+
+#if !defined(_TRACE_HYPERV_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HYPERV_H
+
+#include <linux/tracepoint.h>
+
+#if IS_ENABLED(CONFIG_HYPERV)
+
+TRACE_EVENT(hyperv_mmu_flush_tlb_others,
+ TP_PROTO(const struct cpumask *cpus,
+ const struct flush_tlb_info *info),
+ TP_ARGS(cpus, info),
+ TP_STRUCT__entry(
+ __field(unsigned int, ncpus)
+ __field(struct mm_struct *, mm)
+ __field(unsigned long, addr)
+ __field(unsigned long, end)
+ ),
+ TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
+ __entry->mm = info->mm;
+ __entry->addr = info->start;
+ __entry->end = info->end;
+ ),
+ TP_printk("ncpus %d mm %p addr %lx, end %lx",
+ __entry->ncpus, __entry->mm,
+ __entry->addr, __entry->end)
+ );
+
+#endif /* CONFIG_HYPERV */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH asm/trace/
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hyperv
+#endif /* _TRACE_HYPERV_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
index 32dd6a9e343c..1599d394c8c1 100644
--- a/arch/x86/include/asm/trace/irq_vectors.h
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -5,9 +5,12 @@
#define _TRACE_IRQ_VECTORS_H
#include <linux/tracepoint.h>
+#include <asm/trace/common.h>
-extern int trace_irq_vector_regfunc(void);
-extern void trace_irq_vector_unregfunc(void);
+#ifdef CONFIG_X86_LOCAL_APIC
+
+extern int trace_resched_ipi_reg(void);
+extern void trace_resched_ipi_unreg(void);
DECLARE_EVENT_CLASS(x86_irq_vector,
@@ -28,15 +31,22 @@ DECLARE_EVENT_CLASS(x86_irq_vector,
#define DEFINE_IRQ_VECTOR_EVENT(name) \
DEFINE_EVENT_FN(x86_irq_vector, name##_entry, \
TP_PROTO(int vector), \
+ TP_ARGS(vector), NULL, NULL); \
+DEFINE_EVENT_FN(x86_irq_vector, name##_exit, \
+ TP_PROTO(int vector), \
+ TP_ARGS(vector), NULL, NULL);
+
+#define DEFINE_RESCHED_IPI_EVENT(name) \
+DEFINE_EVENT_FN(x86_irq_vector, name##_entry, \
+ TP_PROTO(int vector), \
TP_ARGS(vector), \
- trace_irq_vector_regfunc, \
- trace_irq_vector_unregfunc); \
+ trace_resched_ipi_reg, \
+ trace_resched_ipi_unreg); \
DEFINE_EVENT_FN(x86_irq_vector, name##_exit, \
TP_PROTO(int vector), \
TP_ARGS(vector), \
- trace_irq_vector_regfunc, \
- trace_irq_vector_unregfunc);
-
+ trace_resched_ipi_reg, \
+ trace_resched_ipi_unreg);
/*
* local_timer - called when entering/exiting a local timer interrupt
@@ -45,11 +55,6 @@ DEFINE_EVENT_FN(x86_irq_vector, name##_exit, \
DEFINE_IRQ_VECTOR_EVENT(local_timer);
/*
- * reschedule - called when entering/exiting a reschedule vector handler
- */
-DEFINE_IRQ_VECTOR_EVENT(reschedule);
-
-/*
* spurious_apic - called when entering/exiting a spurious apic vector handler
*/
DEFINE_IRQ_VECTOR_EVENT(spurious_apic);
@@ -65,6 +70,7 @@ DEFINE_IRQ_VECTOR_EVENT(error_apic);
*/
DEFINE_IRQ_VECTOR_EVENT(x86_platform_ipi);
+#ifdef CONFIG_IRQ_WORK
/*
* irq_work - called when entering/exiting a irq work interrupt
* vector handler
@@ -81,6 +87,18 @@ DEFINE_IRQ_VECTOR_EVENT(irq_work);
* 4) goto 1
*/
TRACE_EVENT_PERF_PERM(irq_work_exit, is_sampling_event(p_event) ? -EPERM : 0);
+#endif
+
+/*
+ * The ifdef is required because that tracepoint macro hell emits tracepoint
+ * code in files which include this header even if the tracepoint is not
+ * enabled. Brilliant stuff that.
+ */
+#ifdef CONFIG_SMP
+/*
+ * reschedule - called when entering/exiting a reschedule vector handler
+ */
+DEFINE_RESCHED_IPI_EVENT(reschedule);
/*
* call_function - called when entering/exiting a call function interrupt
@@ -93,24 +111,33 @@ DEFINE_IRQ_VECTOR_EVENT(call_function);
* single interrupt vector handler
*/
DEFINE_IRQ_VECTOR_EVENT(call_function_single);
+#endif
+#ifdef CONFIG_X86_MCE_THRESHOLD
/*
* threshold_apic - called when entering/exiting a threshold apic interrupt
* vector handler
*/
DEFINE_IRQ_VECTOR_EVENT(threshold_apic);
+#endif
+#ifdef CONFIG_X86_MCE_AMD
/*
* deferred_error_apic - called when entering/exiting a deferred apic interrupt
* vector handler
*/
DEFINE_IRQ_VECTOR_EVENT(deferred_error_apic);
+#endif
+#ifdef CONFIG_X86_THERMAL_VECTOR
/*
* thermal_apic - called when entering/exiting a thermal apic interrupt
* vector handler
*/
DEFINE_IRQ_VECTOR_EVENT(thermal_apic);
+#endif
+
+#endif /* CONFIG_X86_LOCAL_APIC */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 01fd0a7f48cd..5545f6459bf5 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -13,9 +13,6 @@ asmlinkage void divide_error(void);
asmlinkage void debug(void);
asmlinkage void nmi(void);
asmlinkage void int3(void);
-asmlinkage void xen_debug(void);
-asmlinkage void xen_int3(void);
-asmlinkage void xen_stack_segment(void);
asmlinkage void overflow(void);
asmlinkage void bounds(void);
asmlinkage void invalid_op(void);
@@ -38,22 +35,29 @@ asmlinkage void machine_check(void);
#endif /* CONFIG_X86_MCE */
asmlinkage void simd_coprocessor_error(void);
-#ifdef CONFIG_TRACING
-asmlinkage void trace_page_fault(void);
-#define trace_stack_segment stack_segment
-#define trace_divide_error divide_error
-#define trace_bounds bounds
-#define trace_invalid_op invalid_op
-#define trace_device_not_available device_not_available
-#define trace_coprocessor_segment_overrun coprocessor_segment_overrun
-#define trace_invalid_TSS invalid_TSS
-#define trace_segment_not_present segment_not_present
-#define trace_general_protection general_protection
-#define trace_spurious_interrupt_bug spurious_interrupt_bug
-#define trace_coprocessor_error coprocessor_error
-#define trace_alignment_check alignment_check
-#define trace_simd_coprocessor_error simd_coprocessor_error
-#define trace_async_page_fault async_page_fault
+#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
+asmlinkage void xen_divide_error(void);
+asmlinkage void xen_xendebug(void);
+asmlinkage void xen_xenint3(void);
+asmlinkage void xen_nmi(void);
+asmlinkage void xen_overflow(void);
+asmlinkage void xen_bounds(void);
+asmlinkage void xen_invalid_op(void);
+asmlinkage void xen_device_not_available(void);
+asmlinkage void xen_double_fault(void);
+asmlinkage void xen_coprocessor_segment_overrun(void);
+asmlinkage void xen_invalid_TSS(void);
+asmlinkage void xen_segment_not_present(void);
+asmlinkage void xen_stack_segment(void);
+asmlinkage void xen_general_protection(void);
+asmlinkage void xen_page_fault(void);
+asmlinkage void xen_spurious_interrupt_bug(void);
+asmlinkage void xen_coprocessor_error(void);
+asmlinkage void xen_alignment_check(void);
+#ifdef CONFIG_X86_MCE
+asmlinkage void xen_machine_check(void);
+#endif /* CONFIG_X86_MCE */
+asmlinkage void xen_simd_coprocessor_error(void);
#endif
dotraplinkage void do_divide_error(struct pt_regs *, long);
@@ -74,14 +78,6 @@ asmlinkage struct pt_regs *sync_regs(struct pt_regs *);
#endif
dotraplinkage void do_general_protection(struct pt_regs *, long);
dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
-#ifdef CONFIG_TRACING
-dotraplinkage void trace_do_page_fault(struct pt_regs *, unsigned long);
-#else
-static inline void trace_do_page_fault(struct pt_regs *regs, unsigned long error)
-{
- do_page_fault(regs, error);
-}
-#endif
dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *, long);
dotraplinkage void do_coprocessor_error(struct pt_regs *, long);
dotraplinkage void do_alignment_check(struct pt_regs *, long);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 30269dafec47..184eb9894dae 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -26,7 +26,12 @@
#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.addr_limit)
-#define set_fs(x) (current->thread.addr_limit = (x))
+static inline void set_fs(mm_segment_t fs)
+{
+ current->thread.addr_limit = fs;
+ /* On user-mode return, check fs is correct */
+ set_thread_flag(TIF_FSCHECK);
+}
#define segment_eq(a, b) ((a).seg == (b).seg)
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index e6676495b125..e9f793e2df7a 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -12,11 +12,14 @@ struct unwind_state {
struct task_struct *task;
int graph_idx;
bool error;
-#ifdef CONFIG_FRAME_POINTER
+#if defined(CONFIG_ORC_UNWINDER)
+ bool signal, full_regs;
+ unsigned long sp, bp, ip;
+ struct pt_regs *regs;
+#elif defined(CONFIG_FRAME_POINTER_UNWINDER)
bool got_irq;
- unsigned long *bp, *orig_sp;
+ unsigned long *bp, *orig_sp, ip;
struct pt_regs *regs;
- unsigned long ip;
#else
unsigned long *sp;
#endif
@@ -24,41 +27,30 @@ struct unwind_state {
void __unwind_start(struct unwind_state *state, struct task_struct *task,
struct pt_regs *regs, unsigned long *first_frame);
-
bool unwind_next_frame(struct unwind_state *state);
-
unsigned long unwind_get_return_address(struct unwind_state *state);
+unsigned long *unwind_get_return_address_ptr(struct unwind_state *state);
static inline bool unwind_done(struct unwind_state *state)
{
return state->stack_info.type == STACK_TYPE_UNKNOWN;
}
-static inline
-void unwind_start(struct unwind_state *state, struct task_struct *task,
- struct pt_regs *regs, unsigned long *first_frame)
-{
- first_frame = first_frame ? : get_stack_pointer(task, regs);
-
- __unwind_start(state, task, regs, first_frame);
-}
-
static inline bool unwind_error(struct unwind_state *state)
{
return state->error;
}
-#ifdef CONFIG_FRAME_POINTER
-
static inline
-unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
+void unwind_start(struct unwind_state *state, struct task_struct *task,
+ struct pt_regs *regs, unsigned long *first_frame)
{
- if (unwind_done(state))
- return NULL;
+ first_frame = first_frame ? : get_stack_pointer(task, regs);
- return state->regs ? &state->regs->ip : state->bp + 1;
+ __unwind_start(state, task, regs, first_frame);
}
+#if defined(CONFIG_ORC_UNWINDER) || defined(CONFIG_FRAME_POINTER_UNWINDER)
static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
{
if (unwind_done(state))
@@ -66,20 +58,46 @@ static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
return state->regs;
}
-
-#else /* !CONFIG_FRAME_POINTER */
-
-static inline
-unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
+#else
+static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
{
return NULL;
}
+#endif
-static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
+#ifdef CONFIG_ORC_UNWINDER
+void unwind_init(void);
+void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size,
+ void *orc, size_t orc_size);
+#else
+static inline void unwind_init(void) {}
+static inline
+void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size,
+ void *orc, size_t orc_size) {}
+#endif
+
+/*
+ * This disables KASAN checking when reading a value from another task's stack,
+ * since the other task could be running on another CPU and could have poisoned
+ * the stack in the meantime.
+ */
+#define READ_ONCE_TASK_STACK(task, x) \
+({ \
+ unsigned long val; \
+ if (task == current) \
+ val = READ_ONCE(x); \
+ else \
+ val = READ_ONCE_NOCHECK(x); \
+ val; \
+})
+
+static inline bool task_on_another_cpu(struct task_struct *task)
{
- return NULL;
+#ifdef CONFIG_SMP
+ return task != current && task->on_cpu;
+#else
+ return false;
+#endif
}
-#endif /* CONFIG_FRAME_POINTER */
-
#endif /* _ASM_X86_UNWIND_H */
diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h
new file mode 100644
index 000000000000..bae46fc6b9de
--- /dev/null
+++ b/arch/x86/include/asm/unwind_hints.h
@@ -0,0 +1,105 @@
+#ifndef _ASM_X86_UNWIND_HINTS_H
+#define _ASM_X86_UNWIND_HINTS_H
+
+#include "orc_types.h"
+
+#ifdef __ASSEMBLY__
+
+/*
+ * In asm, there are two kinds of code: normal C-type callable functions and
+ * the rest. The normal callable functions can be called by other code, and
+ * don't do anything unusual with the stack. Such normal callable functions
+ * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this
+ * category. In this case, no special debugging annotations are needed because
+ * objtool can automatically generate the ORC data for the ORC unwinder to read
+ * at runtime.
+ *
+ * Anything which doesn't fall into the above category, such as syscall and
+ * interrupt handlers, tends to not be called directly by other functions, and
+ * often does unusual non-C-function-type things with the stack pointer. Such
+ * code needs to be annotated such that objtool can understand it. The
+ * following CFI hint macros are for this type of code.
+ *
+ * These macros provide hints to objtool about the state of the stack at each
+ * instruction. Objtool starts from the hints and follows the code flow,
+ * making automatic CFI adjustments when it sees pushes and pops, filling out
+ * the debuginfo as necessary. It will also warn if it sees any
+ * inconsistencies.
+ */
+.macro UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=0 type=ORC_TYPE_CALL
+#ifdef CONFIG_STACK_VALIDATION
+.Lunwind_hint_ip_\@:
+ .pushsection .discard.unwind_hints
+ /* struct unwind_hint */
+ .long .Lunwind_hint_ip_\@ - .
+ .short \sp_offset
+ .byte \sp_reg
+ .byte \type
+ .popsection
+#endif
+.endm
+
+.macro UNWIND_HINT_EMPTY
+ UNWIND_HINT sp_reg=ORC_REG_UNDEFINED
+.endm
+
+.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 iret=0
+ .if \base == %rsp
+ .if \indirect
+ .set sp_reg, ORC_REG_SP_INDIRECT
+ .else
+ .set sp_reg, ORC_REG_SP
+ .endif
+ .elseif \base == %rbp
+ .set sp_reg, ORC_REG_BP
+ .elseif \base == %rdi
+ .set sp_reg, ORC_REG_DI
+ .elseif \base == %rdx
+ .set sp_reg, ORC_REG_DX
+ .elseif \base == %r10
+ .set sp_reg, ORC_REG_R10
+ .else
+ .error "UNWIND_HINT_REGS: bad base register"
+ .endif
+
+ .set sp_offset, \offset
+
+ .if \iret
+ .set type, ORC_TYPE_REGS_IRET
+ .elseif \extra == 0
+ .set type, ORC_TYPE_REGS_IRET
+ .set sp_offset, \offset + (16*8)
+ .else
+ .set type, ORC_TYPE_REGS
+ .endif
+
+ UNWIND_HINT sp_reg=sp_reg sp_offset=sp_offset type=type
+.endm
+
+.macro UNWIND_HINT_IRET_REGS base=%rsp offset=0
+ UNWIND_HINT_REGS base=\base offset=\offset iret=1
+.endm
+
+.macro UNWIND_HINT_FUNC sp_offset=8
+ UNWIND_HINT sp_offset=\sp_offset
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#define UNWIND_HINT(sp_reg, sp_offset, type) \
+ "987: \n\t" \
+ ".pushsection .discard.unwind_hints\n\t" \
+ /* struct unwind_hint */ \
+ ".long 987b - .\n\t" \
+ ".short " __stringify(sp_offset) "\n\t" \
+ ".byte " __stringify(sp_reg) "\n\t" \
+ ".byte " __stringify(type) "\n\t" \
+ ".popsection\n\t"
+
+#define UNWIND_HINT_SAVE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_SAVE)
+
+#define UNWIND_HINT_RESTORE UNWIND_HINT(0, 0, UNWIND_HINT_TYPE_RESTORE)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_UNWIND_HINTS_H */
diff --git a/arch/x86/include/asm/vga.h b/arch/x86/include/asm/vga.h
index c4b9dc2f67c5..9f42beefc67a 100644
--- a/arch/x86/include/asm/vga.h
+++ b/arch/x86/include/asm/vga.h
@@ -7,12 +7,24 @@
#ifndef _ASM_X86_VGA_H
#define _ASM_X86_VGA_H
+#include <asm/set_memory.h>
+
/*
* On the PC, we can just recalculate addresses and then
* access the videoram directly without any black magic.
+ * To support memory encryption however, we need to access
+ * the videoram as decrypted memory.
*/
-#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x)
+#define VGA_MAP_MEM(x, s) \
+({ \
+ unsigned long start = (unsigned long)phys_to_virt(x); \
+ \
+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) \
+ set_memory_decrypted(start, (s) >> PAGE_SHIFT); \
+ \
+ start; \
+})
#define vga_readb(x) (*(x))
#define vga_writeb(x, y) (*(y) = (x))
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 11071fcd630e..9606688caa4b 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -552,6 +552,8 @@ static inline void
MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
struct desc_struct desc)
{
+ u32 *p = (u32 *) &desc;
+
mcl->op = __HYPERVISOR_update_descriptor;
if (sizeof(maddr) == sizeof(long)) {
mcl->args[0] = maddr;
@@ -559,8 +561,8 @@ MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
} else {
mcl->args[0] = maddr;
mcl->args[1] = maddr >> 32;
- mcl->args[2] = desc.a;
- mcl->args[3] = desc.b;
+ mcl->args[2] = *p++;
+ mcl->args[3] = *p;
}
trace_xen_mc_entry(mcl, sizeof(maddr) == sizeof(long) ? 2 : 4);
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 8417ef7c3885..07b6531813c4 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -158,9 +158,6 @@ static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
unsigned long pfn;
int ret;
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return mfn;
-
if (unlikely(mfn >= machine_to_phys_nr))
return ~0;
@@ -317,8 +314,6 @@ static inline pte_t __pte_ma(pteval_t x)
#define p4d_val_ma(x) ((x).p4d)
#endif
-void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid);
-
xmaddr_t arbitrary_virt_to_machine(void *address);
unsigned long arbitrary_virt_to_mfn(void *vaddr);
void make_lowmem_page_readonly(void *vaddr);
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index ddef37b16af2..66b8f93333d1 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -201,7 +201,7 @@ struct boot_params {
*
* @X86_SUBARCH_PC: Should be used if the hardware is enumerable using standard
* PC mechanisms (PCI, ACPI) and doesn't need a special boot flow.
- * @X86_SUBARCH_LGUEST: Used for x86 hypervisor demo, lguest
+ * @X86_SUBARCH_LGUEST: Used for x86 hypervisor demo, lguest, deprecated
* @X86_SUBARCH_XEN: Used for Xen guest types which follow the PV boot path,
* which start at asm startup_xen() entry point and later jump to the C
* xen_start_kernel() entry point. Both domU and dom0 type of guests are
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 127ddadee1a5..7032f4d8dff3 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -149,6 +149,9 @@
*/
#define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9)
+/* Recommend using the newer ExProcessorMasks interface */
+#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11)
+
/*
* HV_VP_SET available
*/
@@ -242,7 +245,11 @@
(~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
/* Declare the various hypercall operations. */
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
#define HVCALL_POST_MESSAGE 0x005c
#define HVCALL_SIGNAL_EVENT 0x005d
@@ -259,6 +266,16 @@
#define HV_PROCESSOR_POWER_STATE_C2 2
#define HV_PROCESSOR_POWER_STATE_C3 3
+#define HV_FLUSH_ALL_PROCESSORS BIT(0)
+#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
+#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
+#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
+
+enum HV_GENERIC_SET_FORMAT {
+ HV_GENERIC_SET_SPARCE_4K,
+ HV_GENERIC_SET_ALL,
+};
+
/* hypercall status code */
#define HV_STATUS_SUCCESS 0
#define HV_STATUS_INVALID_HYPERCALL_CODE 2
diff --git a/arch/x86/include/uapi/asm/mman.h b/arch/x86/include/uapi/asm/mman.h
index 39bca7fac087..3be08f07695c 100644
--- a/arch/x86/include/uapi/asm/mman.h
+++ b/arch/x86/include/uapi/asm/mman.h
@@ -3,9 +3,6 @@
#define MAP_32BIT 0x40 /* only give out 32bit addresses */
-#define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT)
-#define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT)
-
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
/*
* Take the 4 protection key bits out of the vma->vm_flags
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index a01892bdd61a..fd0a7895b63f 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -42,7 +42,7 @@ CFLAGS_irq.o := -I$(src)/../include/asm/trace
obj-y := process_$(BITS).o signal.o
obj-$(CONFIG_COMPAT) += signal_compat.o
-obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
+obj-y += traps.o idt.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
obj-y += time.o ioport.o dumpstack.o nmi.o
obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
@@ -111,6 +111,7 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o
+obj-$(CONFIG_EISA) += eisa.o
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
@@ -126,11 +127,9 @@ obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
obj-$(CONFIG_TRACING) += tracepoint.o
obj-$(CONFIG_SCHED_MC_PRIO) += itmt.o
-ifdef CONFIG_FRAME_POINTER
-obj-y += unwind_frame.o
-else
-obj-y += unwind_guess.o
-endif
+obj-$(CONFIG_ORC_UNWINDER) += unwind_orc.o
+obj-$(CONFIG_FRAME_POINTER_UNWINDER) += unwind_frame.o
+obj-$(CONFIG_GUESS_UNWINDER) += unwind_guess.o
###
# 64 bit specific files
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 6bb680671088..f8ae286c1502 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -115,24 +115,24 @@ static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = {
#define ACPI_INVALID_GSI INT_MIN
/*
- * This is just a simple wrapper around early_ioremap(),
+ * This is just a simple wrapper around early_memremap(),
* with sanity checks for phys == 0 and size == 0.
*/
-char *__init __acpi_map_table(unsigned long phys, unsigned long size)
+void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
{
if (!phys || !size)
return NULL;
- return early_ioremap(phys, size);
+ return early_memremap(phys, size);
}
-void __init __acpi_unmap_table(char *map, unsigned long size)
+void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
{
if (!map || !size)
return;
- early_iounmap(map, size);
+ early_memunmap(map, size);
}
#ifdef CONFIG_X86_LOCAL_APIC
@@ -199,8 +199,10 @@ static int __init
acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
{
struct acpi_madt_local_x2apic *processor = NULL;
+#ifdef CONFIG_X86_X2APIC
int apic_id;
u8 enabled;
+#endif
processor = (struct acpi_madt_local_x2apic *)header;
@@ -209,9 +211,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
acpi_table_print_madt_entry(header);
+#ifdef CONFIG_X86_X2APIC
apic_id = processor->local_apic_id;
enabled = processor->lapic_flags & ACPI_MADT_ENABLED;
-#ifdef CONFIG_X86_X2APIC
+
/*
* We need to register disabled CPU as well to permit
* counting disabled CPUs. This allows us to size
@@ -347,6 +350,14 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
struct mpc_intsrc mp_irq;
/*
+ * Check bus_irq boundary.
+ */
+ if (bus_irq >= NR_IRQS_LEGACY) {
+ pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq);
+ return;
+ }
+
+ /*
* Convert 'gsi' to 'ioapic.pin'.
*/
ioapic = mp_find_ioapic(gsi);
@@ -1075,7 +1086,7 @@ static void __init mp_config_acpi_legacy_irqs(void)
mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
#endif
set_bit(MP_ISA_BUS, mp_bus_not_pci);
- pr_debug("Bus #%d is ISA\n", MP_ISA_BUS);
+ pr_debug("Bus #%d is ISA (nIRQs: %d)\n", MP_ISA_BUS, nr_legacy_irqs());
/*
* Use the default configuration for the IRQs 0-15. Unless
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 32e14d137416..3344d3382e91 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -742,7 +742,16 @@ static void *bp_int3_handler, *bp_int3_addr;
int poke_int3_handler(struct pt_regs *regs)
{
- /* bp_patching_in_progress */
+ /*
+ * Having observed our INT3 instruction, we now must observe
+ * bp_patching_in_progress.
+ *
+ * in_progress = TRUE INT3
+ * WMB RMB
+ * write INT3 if (in_progress)
+ *
+ * Idem for bp_int3_handler.
+ */
smp_rmb();
if (likely(!bp_patching_in_progress))
@@ -788,9 +797,8 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
bp_int3_addr = (u8 *)addr + sizeof(int3);
bp_patching_in_progress = true;
/*
- * Corresponding read barrier in int3 notifier for
- * making sure the in_progress flags is correctly ordered wrt.
- * patching
+ * Corresponding read barrier in int3 notifier for making sure the
+ * in_progress and handler are correctly ordered wrt. patching.
*/
smp_wmb();
@@ -815,9 +823,11 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
text_poke(addr, opcode, sizeof(int3));
on_each_cpu(do_sync_core, NULL, 1);
-
+ /*
+ * sync_core() implies an smp_mb() and orders this store against
+ * the writing of the new instruction.
+ */
bp_patching_in_progress = false;
- smp_wmb();
return addr;
}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 98b3dd8cf2bf..7834f73efbf1 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -177,8 +177,6 @@ static int disable_apic_timer __initdata;
int local_apic_timer_c2_ok;
EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
-int first_system_vector = FIRST_SYSTEM_VECTOR;
-
/*
* Debug level, exported for io_apic.c
*/
@@ -599,9 +597,13 @@ static const struct x86_cpu_id deadline_match[] = {
static void apic_check_deadline_errata(void)
{
- const struct x86_cpu_id *m = x86_match_cpu(deadline_match);
+ const struct x86_cpu_id *m;
u32 rev;
+ if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
+ return;
+
+ m = x86_match_cpu(deadline_match);
if (!m)
return;
@@ -990,8 +992,7 @@ void setup_secondary_APIC_clock(void)
*/
static void local_apic_timer_interrupt(void)
{
- int cpu = smp_processor_id();
- struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
+ struct clock_event_device *evt = this_cpu_ptr(&lapic_events);
/*
* Normally we should not be here till LAPIC has been initialized but
@@ -1005,7 +1006,8 @@ static void local_apic_timer_interrupt(void)
* spurious.
*/
if (!evt->event_handler) {
- pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu);
+ pr_warning("Spurious LAPIC timer interrupt on cpu %d\n",
+ smp_processor_id());
/* Switch it off */
lapic_timer_shutdown(evt);
return;
@@ -1040,25 +1042,6 @@ __visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
* interrupt lock, which is the WrongThing (tm) to do.
*/
entering_ack_irq();
- local_apic_timer_interrupt();
- exiting_irq();
-
- set_irq_regs(old_regs);
-}
-
-__visible void __irq_entry smp_trace_apic_timer_interrupt(struct pt_regs *regs)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- /*
- * NOTE! We'd better ACK the irq immediately,
- * because timer handling can be slow.
- *
- * update_process_times() expects us to have done irq_enter().
- * Besides, if we don't timer interrupts ignore the global
- * interrupt lock, which is the WrongThing (tm) to do.
- */
- entering_ack_irq();
trace_local_timer_entry(LOCAL_TIMER_VECTOR);
local_apic_timer_interrupt();
trace_local_timer_exit(LOCAL_TIMER_VECTOR);
@@ -1920,10 +1903,14 @@ void __init register_lapic_address(unsigned long address)
/*
* This interrupt should _never_ happen with our APIC/SMP architecture
*/
-static void __smp_spurious_interrupt(u8 vector)
+__visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
{
+ u8 vector = ~regs->orig_ax;
u32 v;
+ entering_irq();
+ trace_spurious_apic_entry(vector);
+
/*
* Check if this really is a spurious interrupt and ACK it
* if it is a vectored one. Just in case...
@@ -1938,22 +1925,7 @@ static void __smp_spurious_interrupt(u8 vector)
/* see sw-dev-man vol 3, chapter 7.4.13.5 */
pr_info("spurious APIC interrupt through vector %02x on CPU#%d, "
"should never happen.\n", vector, smp_processor_id());
-}
-__visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
-{
- entering_irq();
- __smp_spurious_interrupt(~regs->orig_ax);
- exiting_irq();
-}
-
-__visible void __irq_entry smp_trace_spurious_interrupt(struct pt_regs *regs)
-{
- u8 vector = ~regs->orig_ax;
-
- entering_irq();
- trace_spurious_apic_entry(vector);
- __smp_spurious_interrupt(vector);
trace_spurious_apic_exit(vector);
exiting_irq();
}
@@ -1961,10 +1933,8 @@ __visible void __irq_entry smp_trace_spurious_interrupt(struct pt_regs *regs)
/*
* This interrupt should never happen with our APIC/SMP architecture
*/
-static void __smp_error_interrupt(struct pt_regs *regs)
+__visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
{
- u32 v;
- u32 i = 0;
static const char * const error_interrupt_reason[] = {
"Send CS error", /* APIC Error Bit 0 */
"Receive CS error", /* APIC Error Bit 1 */
@@ -1975,6 +1945,10 @@ static void __smp_error_interrupt(struct pt_regs *regs)
"Received illegal vector", /* APIC Error Bit 6 */
"Illegal register address", /* APIC Error Bit 7 */
};
+ u32 v, i = 0;
+
+ entering_irq();
+ trace_error_apic_entry(ERROR_APIC_VECTOR);
/* First tickle the hardware, only then report what went on. -- REW */
if (lapic_get_maxlvt() > 3) /* Due to the Pentium erratum 3AP. */
@@ -1996,20 +1970,6 @@ static void __smp_error_interrupt(struct pt_regs *regs)
apic_printk(APIC_DEBUG, KERN_CONT "\n");
-}
-
-__visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
-{
- entering_irq();
- __smp_error_interrupt(regs);
- exiting_irq();
-}
-
-__visible void __irq_entry smp_trace_error_interrupt(struct pt_regs *regs)
-{
- entering_irq();
- trace_error_apic_entry(ERROR_APIC_VECTOR);
- __smp_error_interrupt(regs);
trace_error_apic_exit(ERROR_APIC_VECTOR);
exiting_irq();
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index b4f5f73febdb..70e48aa6af98 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1243,7 +1243,7 @@ static void io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
entry.vector, entry.irr, entry.delivery_status);
if (ir_entry->format)
printk(KERN_DEBUG "%s, remapped, I(%04X), Z(%X)\n",
- buf, (ir_entry->index << 15) | ir_entry->index,
+ buf, (ir_entry->index2 << 15) | ir_entry->index,
ir_entry->zero);
else
printk(KERN_DEBUG "%s, %s, D(%02X), M(%1d)\n",
@@ -2093,7 +2093,7 @@ static inline void __init check_timer(void)
int idx;
idx = find_irq_entry(apic1, pin1, mp_INT);
if (idx != -1 && irq_trigger(idx))
- unmask_ioapic_irq(irq_get_chip_data(0));
+ unmask_ioapic_irq(irq_get_irq_data(0));
}
irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data);
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index b3af457ed667..88c214e75a6b 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -166,7 +166,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
offset = current_offset;
next:
vector += 16;
- if (vector >= first_system_vector) {
+ if (vector >= FIRST_SYSTEM_VECTOR) {
offset = (offset + 1) % 16;
vector = FIRST_EXTERNAL_VECTOR + offset;
}
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index 880aa093268d..710edab9e644 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -4,9 +4,6 @@
#include <asm/ucontext.h>
-#include <linux/lguest.h>
-#include "../../../drivers/lguest/lg.h"
-
#define __SYSCALL_I386(nr, sym, qual) [nr] = 1,
static char syscalls[] = {
#include <asm/syscalls_32.h>
@@ -62,23 +59,6 @@ void foo(void)
OFFSET(stack_canary_offset, stack_canary, canary);
#endif
-#if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
- BLANK();
- OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
- OFFSET(LGUEST_DATA_irq_pending, lguest_data, irq_pending);
-
- BLANK();
- OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc);
- OFFSET(LGUEST_PAGES_host_idt_desc, lguest_pages, state.host_idt_desc);
- OFFSET(LGUEST_PAGES_host_cr3, lguest_pages, state.host_cr3);
- OFFSET(LGUEST_PAGES_host_sp, lguest_pages, state.host_sp);
- OFFSET(LGUEST_PAGES_guest_gdt_desc, lguest_pages,state.guest_gdt_desc);
- OFFSET(LGUEST_PAGES_guest_idt_desc, lguest_pages,state.guest_idt_desc);
- OFFSET(LGUEST_PAGES_guest_gdt, lguest_pages, state.guest_gdt);
- OFFSET(LGUEST_PAGES_regs_trapnum, lguest_pages, regs.trapnum);
- OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode);
- OFFSET(LGUEST_PAGES_regs, lguest_pages, regs);
-#endif
BLANK();
DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
DEFINE(NR_syscalls, sizeof(syscalls));
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 99332f550c48..cf42206926af 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -20,7 +20,6 @@ static char syscalls_ia32[] = {
int main(void)
{
#ifdef CONFIG_PARAVIRT
- OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame);
OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
BLANK();
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index cdf82492b770..e17942c131c8 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -33,7 +33,7 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
-obj-$(CONFIG_INTEL_RDT_A) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_schemata.o
+obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o intel_rdt_ctrlmondata.o
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index bb5abe8f5fd4..9862e2cd6d93 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -134,6 +134,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
n = K6_BUG_LOOP;
f_vide = vide;
+ OPTIMIZER_HIDE_VAR(f_vide);
d = rdtsc();
while (n--)
f_vide();
@@ -296,13 +297,29 @@ static int nearby_node(int apicid)
}
#endif
+#ifdef CONFIG_SMP
+/*
+ * Fix up cpu_core_id for pre-F17h systems to be in the
+ * [0 .. cores_per_node - 1] range. Not really needed but
+ * kept so as not to break existing setups.
+ */
+static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
+{
+ u32 cus_per_node;
+
+ if (c->x86 >= 0x17)
+ return;
+
+ cus_per_node = c->x86_max_cores / nodes_per_socket;
+ c->cpu_core_id %= cus_per_node;
+}
+
/*
* Fixup core topology information for
* (1) AMD multi-node processors
* Assumption: Number of cores in each internal node is the same.
* (2) AMD processors supporting compute units
*/
-#ifdef CONFIG_SMP
static void amd_get_topology(struct cpuinfo_x86 *c)
{
u8 node_id;
@@ -353,15 +370,9 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
} else
return;
- /* fixup multi-node processor information */
if (nodes_per_socket > 1) {
- u32 cus_per_node;
-
set_cpu_cap(c, X86_FEATURE_AMD_DCM);
- cus_per_node = c->x86_max_cores / nodes_per_socket;
-
- /* core id has to be in the [0 .. cores_per_node - 1] range */
- c->cpu_core_id %= cus_per_node;
+ legacy_fixup_core_id(c);
}
}
#endif
@@ -547,8 +558,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
static void early_init_amd(struct cpuinfo_x86 *c)
{
+ u32 dummy;
+
early_init_amd_mc(c);
+ rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
+
/*
* c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
* with P/T states and does not stop in deep C-states
@@ -611,6 +626,27 @@ static void early_init_amd(struct cpuinfo_x86 *c)
*/
if (cpu_has_amd_erratum(c, amd_erratum_400))
set_cpu_bug(c, X86_BUG_AMD_E400);
+
+ /*
+ * BIOS support is required for SME. If BIOS has enabled SME then
+ * adjust x86_phys_bits by the SME physical address space reduction
+ * value. If BIOS has not enabled SME then don't advertise the
+ * feature (set in scattered.c). Also, since the SME support requires
+ * long mode, don't advertise the feature under CONFIG_X86_32.
+ */
+ if (cpu_has(c, X86_FEATURE_SME)) {
+ u64 msr;
+
+ /* Check if SME is enabled */
+ rdmsrl(MSR_K8_SYSCFG, msr);
+ if (msr & MSR_K8_SYSCFG_MEM_ENCRYPT) {
+ c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
+ if (IS_ENABLED(CONFIG_X86_32))
+ clear_cpu_cap(c, X86_FEATURE_SME);
+ } else {
+ clear_cpu_cap(c, X86_FEATURE_SME);
+ }
+ }
}
static void init_amd_k8(struct cpuinfo_x86 *c)
@@ -729,8 +765,6 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
static void init_amd(struct cpuinfo_x86 *c)
{
- u32 dummy;
-
early_init_amd(c);
/*
@@ -792,8 +826,6 @@ static void init_amd(struct cpuinfo_x86 *c)
if (c->x86 > 0x11)
set_cpu_cap(c, X86_FEATURE_ARAT);
- rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
-
/* 3DNow or LM implies PREFETCHW */
if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index d869c8671e36..0ee83321a313 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -8,20 +8,25 @@
* This file is licensed under GPLv2.
*/
-#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
#include <linux/math64.h>
#include <linux/percpu.h>
#include <linux/smp.h>
struct aperfmperf_sample {
unsigned int khz;
- unsigned long jiffies;
+ ktime_t time;
u64 aperf;
u64 mperf;
};
static DEFINE_PER_CPU(struct aperfmperf_sample, samples);
+#define APERFMPERF_CACHE_THRESHOLD_MS 10
+#define APERFMPERF_REFRESH_DELAY_MS 20
+#define APERFMPERF_STALE_THRESHOLD_MS 1000
+
/*
* aperfmperf_snapshot_khz()
* On the current CPU, snapshot APERF, MPERF, and jiffies
@@ -33,13 +38,18 @@ static void aperfmperf_snapshot_khz(void *dummy)
u64 aperf, aperf_delta;
u64 mperf, mperf_delta;
struct aperfmperf_sample *s = this_cpu_ptr(&samples);
+ ktime_t now = ktime_get();
+ s64 time_delta = ktime_ms_delta(now, s->time);
+ unsigned long flags;
- /* Don't bother re-computing within 10 ms */
- if (time_before(jiffies, s->jiffies + HZ/100))
+ /* Don't bother re-computing within the cache threshold time. */
+ if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
return;
+ local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
+ local_irq_restore(flags);
aperf_delta = aperf - s->aperf;
mperf_delta = mperf - s->mperf;
@@ -51,22 +61,21 @@ static void aperfmperf_snapshot_khz(void *dummy)
if (mperf_delta == 0)
return;
- /*
- * if (cpu_khz * aperf_delta) fits into ULLONG_MAX, then
- * khz = (cpu_khz * aperf_delta) / mperf_delta
- */
- if (div64_u64(ULLONG_MAX, cpu_khz) > aperf_delta)
- s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
- else /* khz = aperf_delta / (mperf_delta / cpu_khz) */
- s->khz = div64_u64(aperf_delta,
- div64_u64(mperf_delta, cpu_khz));
- s->jiffies = jiffies;
+ s->time = now;
s->aperf = aperf;
s->mperf = mperf;
+
+ /* If the previous iteration was too long ago, discard it. */
+ if (time_delta > APERFMPERF_STALE_THRESHOLD_MS)
+ s->khz = 0;
+ else
+ s->khz = div64_u64((cpu_khz * aperf_delta), mperf_delta);
}
unsigned int arch_freq_get_on_cpu(int cpu)
{
+ unsigned int khz;
+
if (!cpu_khz)
return 0;
@@ -74,6 +83,12 @@ unsigned int arch_freq_get_on_cpu(int cpu)
return 0;
smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
+ khz = per_cpu(samples.khz, cpu);
+ if (khz)
+ return khz;
+
+ msleep(APERFMPERF_REFRESH_DELAY_MS);
+ smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
return per_cpu(samples.khz, cpu);
}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 0af86d9242da..db684880d74a 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -21,6 +21,14 @@
void __init check_bugs(void)
{
+#ifdef CONFIG_X86_32
+ /*
+ * Regardless of whether PCID is enumerated, the SDM says
+ * that it can't be enabled in 32-bit mode.
+ */
+ setup_clear_cpu_cap(X86_FEATURE_PCID);
+#endif
+
identify_boot_cpu();
if (!IS_ENABLED(CONFIG_SMP)) {
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index c8b39870f33e..fb1d3358a4af 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -168,6 +168,24 @@ static int __init x86_mpx_setup(char *s)
}
__setup("nompx", x86_mpx_setup);
+#ifdef CONFIG_X86_64
+static int __init x86_pcid_setup(char *s)
+{
+ /* require an exact match without trailing characters */
+ if (strlen(s))
+ return 0;
+
+ /* do not emit a message if the feature is not present */
+ if (!boot_cpu_has(X86_FEATURE_PCID))
+ return 1;
+
+ setup_clear_cpu_cap(X86_FEATURE_PCID);
+ pr_info("nopcid: PCID feature disabled\n");
+ return 1;
+}
+__setup("nopcid", x86_pcid_setup);
+#endif
+
static int __init x86_noinvpcid_setup(char *s)
{
/* noinvpcid doesn't accept parameters */
@@ -311,6 +329,38 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
}
}
+static void setup_pcid(struct cpuinfo_x86 *c)
+{
+ if (cpu_has(c, X86_FEATURE_PCID)) {
+ if (cpu_has(c, X86_FEATURE_PGE)) {
+ /*
+ * We'd like to use cr4_set_bits_and_update_boot(),
+ * but we can't. CR4.PCIDE is special and can only
+ * be set in long mode, and the early CPU init code
+ * doesn't know this and would try to restore CR4.PCIDE
+ * prior to entering long mode.
+ *
+ * Instead, we rely on the fact that hotplug, resume,
+ * etc all fully restore CR4 before they write anything
+ * that could have nonzero PCID bits to CR3. CR4.PCIDE
+ * has no effect on the page tables themselves, so we
+ * don't need it to be restored early.
+ */
+ cr4_set_bits(X86_CR4_PCIDE);
+ } else {
+ /*
+ * flush_tlb_all(), as currently implemented, won't
+ * work if PCID is on but PGE is not. Since that
+ * combination doesn't exist on real hardware, there's
+ * no reason to try to fully support it, but it's
+ * polite to avoid corrupting data if we're on
+ * an improperly configured VM.
+ */
+ clear_cpu_cap(c, X86_FEATURE_PCID);
+ }
+ }
+}
+
/*
* Protection Keys are not available in 32-bit mode.
*/
@@ -1125,6 +1175,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
setup_smep(c);
setup_smap(c);
+ /* Set up PCID */
+ setup_pcid(c);
+
/*
* The vendor-specific functions might have changed features.
* Now we do "generic changes."
@@ -1289,15 +1342,6 @@ static __init int setup_disablecpuid(char *arg)
__setup("clearcpuid=", setup_disablecpuid);
#ifdef CONFIG_X86_64
-struct desc_ptr idt_descr __ro_after_init = {
- .size = NR_VECTORS * 16 - 1,
- .address = (unsigned long) idt_table,
-};
-const struct desc_ptr debug_idt_descr = {
- .size = NR_VECTORS * 16 - 1,
- .address = (unsigned long) debug_idt_table,
-};
-
DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE) __visible;
@@ -1552,6 +1596,7 @@ void cpu_init(void)
mmgrab(&init_mm);
me->active_mm = &init_mm;
BUG_ON(me->mm);
+ initialize_tlbstate_and_flush();
enter_lazy_tlb(&init_mm, me);
load_sp0(t, &current->thread);
@@ -1606,6 +1651,7 @@ void cpu_init(void)
mmgrab(&init_mm);
curr->active_mm = &init_mm;
BUG_ON(curr->mm);
+ initialize_tlbstate_and_flush();
enter_lazy_tlb(&init_mm, curr);
load_sp0(t, thread);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index c55fb2cb2acc..24f749324c0f 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -811,7 +811,24 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
struct cacheinfo *this_leaf;
int i, sibling;
- if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+ /*
+ * For L3, always use the pre-calculated cpu_llc_shared_mask
+ * to derive shared_cpu_map.
+ */
+ if (index == 3) {
+ for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
+ this_cpu_ci = get_cpu_cacheinfo(i);
+ if (!this_cpu_ci->info_list)
+ continue;
+ this_leaf = this_cpu_ci->info_list + index;
+ for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
+ if (!cpu_online(sibling))
+ continue;
+ cpumask_set_cpu(sibling,
+ &this_leaf->shared_cpu_map);
+ }
+ }
+ } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
unsigned int apicid, nshared, first, last;
this_leaf = this_cpu_ci->info_list + index;
@@ -839,19 +856,6 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
&this_leaf->shared_cpu_map);
}
}
- } else if (index == 3) {
- for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
- this_cpu_ci = get_cpu_cacheinfo(i);
- if (!this_cpu_ci->info_list)
- continue;
- this_leaf = this_cpu_ci->info_list + index;
- for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
- if (!cpu_online(sibling))
- continue;
- cpumask_set_cpu(sibling,
- &this_leaf->shared_cpu_map);
- }
- }
} else
return 0;
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 5b366462f579..cd5fc61ba450 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -30,7 +30,8 @@
#include <linux/cpuhotplug.h>
#include <asm/intel-family.h>
-#include <asm/intel_rdt.h>
+#include <asm/intel_rdt_sched.h>
+#include "intel_rdt.h"
#define MAX_MBA_BW 100u
#define MBA_IS_LINEAR 0x4
@@ -38,7 +39,13 @@
/* Mutex to protect rdtgroup access. */
DEFINE_MUTEX(rdtgroup_mutex);
-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_closid);
+/*
+ * The cached intel_pqr_state is strictly per CPU and can never be
+ * updated from a remote CPU. Functions which modify the state
+ * are called with interrupts disabled and no preemption, which
+ * is sufficient for the protection.
+ */
+DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
/*
* Used to store the max resource name width and max resource data width
@@ -46,6 +53,12 @@ DEFINE_PER_CPU_READ_MOSTLY(int, cpu_closid);
*/
int max_name_width, max_data_width;
+/*
+ * Global boolean for rdt_alloc which is true if any
+ * resource allocation is enabled.
+ */
+bool rdt_alloc_capable;
+
static void
mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
static void
@@ -54,7 +67,9 @@ cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
struct rdt_resource rdt_resources_all[] = {
+ [RDT_RESOURCE_L3] =
{
+ .rid = RDT_RESOURCE_L3,
.name = "L3",
.domains = domain_init(RDT_RESOURCE_L3),
.msr_base = IA32_L3_CBM_BASE,
@@ -67,8 +82,11 @@ struct rdt_resource rdt_resources_all[] = {
},
.parse_ctrlval = parse_cbm,
.format_str = "%d=%0*x",
+ .fflags = RFTYPE_RES_CACHE,
},
+ [RDT_RESOURCE_L3DATA] =
{
+ .rid = RDT_RESOURCE_L3DATA,
.name = "L3DATA",
.domains = domain_init(RDT_RESOURCE_L3DATA),
.msr_base = IA32_L3_CBM_BASE,
@@ -81,8 +99,11 @@ struct rdt_resource rdt_resources_all[] = {
},
.parse_ctrlval = parse_cbm,
.format_str = "%d=%0*x",
+ .fflags = RFTYPE_RES_CACHE,
},
+ [RDT_RESOURCE_L3CODE] =
{
+ .rid = RDT_RESOURCE_L3CODE,
.name = "L3CODE",
.domains = domain_init(RDT_RESOURCE_L3CODE),
.msr_base = IA32_L3_CBM_BASE,
@@ -95,8 +116,11 @@ struct rdt_resource rdt_resources_all[] = {
},
.parse_ctrlval = parse_cbm,
.format_str = "%d=%0*x",
+ .fflags = RFTYPE_RES_CACHE,
},
+ [RDT_RESOURCE_L2] =
{
+ .rid = RDT_RESOURCE_L2,
.name = "L2",
.domains = domain_init(RDT_RESOURCE_L2),
.msr_base = IA32_L2_CBM_BASE,
@@ -109,8 +133,11 @@ struct rdt_resource rdt_resources_all[] = {
},
.parse_ctrlval = parse_cbm,
.format_str = "%d=%0*x",
+ .fflags = RFTYPE_RES_CACHE,
},
+ [RDT_RESOURCE_MBA] =
{
+ .rid = RDT_RESOURCE_MBA,
.name = "MB",
.domains = domain_init(RDT_RESOURCE_MBA),
.msr_base = IA32_MBA_THRTL_BASE,
@@ -118,6 +145,7 @@ struct rdt_resource rdt_resources_all[] = {
.cache_level = 3,
.parse_ctrlval = parse_bw,
.format_str = "%d=%*d",
+ .fflags = RFTYPE_RES_MB,
},
};
@@ -144,33 +172,28 @@ static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
* is always 20 on hsw server parts. The minimum cache bitmask length
* allowed for HSW server is always 2 bits. Hardcode all of them.
*/
-static inline bool cache_alloc_hsw_probe(void)
+static inline void cache_alloc_hsw_probe(void)
{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model == INTEL_FAM6_HASWELL_X) {
- struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
- u32 l, h, max_cbm = BIT_MASK(20) - 1;
-
- if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0))
- return false;
- rdmsr(IA32_L3_CBM_BASE, l, h);
+ struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
+ u32 l, h, max_cbm = BIT_MASK(20) - 1;
- /* If all the bits were set in MSR, return success */
- if (l != max_cbm)
- return false;
+ if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0))
+ return;
+ rdmsr(IA32_L3_CBM_BASE, l, h);
- r->num_closid = 4;
- r->default_ctrl = max_cbm;
- r->cache.cbm_len = 20;
- r->cache.min_cbm_bits = 2;
- r->capable = true;
- r->enabled = true;
+ /* If all the bits were set in MSR, return success */
+ if (l != max_cbm)
+ return;
- return true;
- }
+ r->num_closid = 4;
+ r->default_ctrl = max_cbm;
+ r->cache.cbm_len = 20;
+ r->cache.shareable_bits = 0xc0000;
+ r->cache.min_cbm_bits = 2;
+ r->alloc_capable = true;
+ r->alloc_enabled = true;
- return false;
+ rdt_alloc_capable = true;
}
/*
@@ -213,15 +236,14 @@ static bool rdt_get_mem_config(struct rdt_resource *r)
return false;
}
r->data_width = 3;
- rdt_get_mba_infofile(r);
- r->capable = true;
- r->enabled = true;
+ r->alloc_capable = true;
+ r->alloc_enabled = true;
return true;
}
-static void rdt_get_cache_config(int idx, struct rdt_resource *r)
+static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
{
union cpuid_0x10_1_eax eax;
union cpuid_0x10_x_edx edx;
@@ -231,10 +253,10 @@ static void rdt_get_cache_config(int idx, struct rdt_resource *r)
r->num_closid = edx.split.cos_max + 1;
r->cache.cbm_len = eax.split.cbm_len + 1;
r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
+ r->cache.shareable_bits = ebx & r->default_ctrl;
r->data_width = (r->cache.cbm_len + 3) / 4;
- rdt_get_cache_infofile(r);
- r->capable = true;
- r->enabled = true;
+ r->alloc_capable = true;
+ r->alloc_enabled = true;
}
static void rdt_get_cdp_l3_config(int type)
@@ -246,12 +268,12 @@ static void rdt_get_cdp_l3_config(int type)
r->cache.cbm_len = r_l3->cache.cbm_len;
r->default_ctrl = r_l3->default_ctrl;
r->data_width = (r->cache.cbm_len + 3) / 4;
- r->capable = true;
+ r->alloc_capable = true;
/*
* By default, CDP is disabled. CDP can be enabled by mount parameter
* "cdp" during resctrl file system mount time.
*/
- r->enabled = false;
+ r->alloc_enabled = false;
}
static int get_cache_id(int cpu, int level)
@@ -300,6 +322,19 @@ cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
wrmsrl(r->msr_base + cbm_idx(r, i), d->ctrl_val[i]);
}
+struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
+{
+ struct rdt_domain *d;
+
+ list_for_each_entry(d, &r->domains, list) {
+ /* Find the domain that contains this CPU */
+ if (cpumask_test_cpu(cpu, &d->cpu_mask))
+ return d;
+ }
+
+ return NULL;
+}
+
void rdt_ctrl_update(void *arg)
{
struct msr_param *m = arg;
@@ -307,12 +342,10 @@ void rdt_ctrl_update(void *arg)
int cpu = smp_processor_id();
struct rdt_domain *d;
- list_for_each_entry(d, &r->domains, list) {
- /* Find the domain that contains this CPU */
- if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
- r->msr_update(d, m, r);
- return;
- }
+ d = get_domain_from_cpu(cpu, r);
+ if (d) {
+ r->msr_update(d, m, r);
+ return;
}
pr_warn_once("cpu %d not found in any domain for resource %s\n",
cpu, r->name);
@@ -326,8 +359,8 @@ void rdt_ctrl_update(void *arg)
* caller, return the first domain whose id is bigger than the input id.
* The domain list is sorted by id in ascending order.
*/
-static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
- struct list_head **pos)
+struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
+ struct list_head **pos)
{
struct rdt_domain *d;
struct list_head *l;
@@ -377,6 +410,44 @@ static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
return 0;
}
+static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
+{
+ size_t tsize;
+
+ if (is_llc_occupancy_enabled()) {
+ d->rmid_busy_llc = kcalloc(BITS_TO_LONGS(r->num_rmid),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!d->rmid_busy_llc)
+ return -ENOMEM;
+ INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
+ }
+ if (is_mbm_total_enabled()) {
+ tsize = sizeof(*d->mbm_total);
+ d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
+ if (!d->mbm_total) {
+ kfree(d->rmid_busy_llc);
+ return -ENOMEM;
+ }
+ }
+ if (is_mbm_local_enabled()) {
+ tsize = sizeof(*d->mbm_local);
+ d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
+ if (!d->mbm_local) {
+ kfree(d->rmid_busy_llc);
+ kfree(d->mbm_total);
+ return -ENOMEM;
+ }
+ }
+
+ if (is_mbm_enabled()) {
+ INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
+ mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
+ }
+
+ return 0;
+}
+
/*
* domain_add_cpu - Add a cpu to a resource's domain list.
*
@@ -412,14 +483,26 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
return;
d->id = id;
+ cpumask_set_cpu(cpu, &d->cpu_mask);
- if (domain_setup_ctrlval(r, d)) {
+ if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
+ kfree(d);
+ return;
+ }
+
+ if (r->mon_capable && domain_setup_mon_state(r, d)) {
kfree(d);
return;
}
- cpumask_set_cpu(cpu, &d->cpu_mask);
list_add_tail(&d->list, add_pos);
+
+ /*
+ * If resctrl is mounted, add
+ * per domain monitor data directories.
+ */
+ if (static_branch_unlikely(&rdt_mon_enable_key))
+ mkdir_mondata_subdir_allrdtgrp(r, d);
}
static void domain_remove_cpu(int cpu, struct rdt_resource *r)
@@ -435,19 +518,58 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
cpumask_clear_cpu(cpu, &d->cpu_mask);
if (cpumask_empty(&d->cpu_mask)) {
+ /*
+ * If resctrl is mounted, remove all the
+ * per domain monitor data directories.
+ */
+ if (static_branch_unlikely(&rdt_mon_enable_key))
+ rmdir_mondata_subdir_allrdtgrp(r, d->id);
kfree(d->ctrl_val);
+ kfree(d->rmid_busy_llc);
+ kfree(d->mbm_total);
+ kfree(d->mbm_local);
list_del(&d->list);
+ if (is_mbm_enabled())
+ cancel_delayed_work(&d->mbm_over);
+ if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) {
+ /*
+ * When a package is going down, forcefully
+ * decrement rmid->ebusy. There is no way to know
+ * that the L3 was flushed and hence may lead to
+ * incorrect counts in rare scenarios, but leaving
+ * the RMID as busy creates RMID leaks if the
+ * package never comes back.
+ */
+ __check_limbo(d, true);
+ cancel_delayed_work(&d->cqm_limbo);
+ }
+
kfree(d);
+ return;
+ }
+
+ if (r == &rdt_resources_all[RDT_RESOURCE_L3]) {
+ if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
+ cancel_delayed_work(&d->mbm_over);
+ mbm_setup_overflow_handler(d, 0);
+ }
+ if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
+ has_busy_rmid(r, d)) {
+ cancel_delayed_work(&d->cqm_limbo);
+ cqm_setup_limbo_handler(d, 0);
+ }
}
}
-static void clear_closid(int cpu)
+static void clear_closid_rmid(int cpu)
{
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
- per_cpu(cpu_closid, cpu) = 0;
- state->closid = 0;
- wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0);
+ state->default_closid = 0;
+ state->default_rmid = 0;
+ state->cur_closid = 0;
+ state->cur_rmid = 0;
+ wrmsr(IA32_PQR_ASSOC, 0, 0);
}
static int intel_rdt_online_cpu(unsigned int cpu)
@@ -459,12 +581,23 @@ static int intel_rdt_online_cpu(unsigned int cpu)
domain_add_cpu(cpu, r);
/* The cpu is set in default rdtgroup after online. */
cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
- clear_closid(cpu);
+ clear_closid_rmid(cpu);
mutex_unlock(&rdtgroup_mutex);
return 0;
}
+static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
+{
+ struct rdtgroup *cr;
+
+ list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
+ if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) {
+ break;
+ }
+ }
+}
+
static int intel_rdt_offline_cpu(unsigned int cpu)
{
struct rdtgroup *rdtgrp;
@@ -474,10 +607,12 @@ static int intel_rdt_offline_cpu(unsigned int cpu)
for_each_capable_rdt_resource(r)
domain_remove_cpu(cpu, r);
list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
- if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask))
+ if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
+ clear_childcpus(rdtgrp, cpu);
break;
+ }
}
- clear_closid(cpu);
+ clear_closid_rmid(cpu);
mutex_unlock(&rdtgroup_mutex);
return 0;
@@ -492,7 +627,7 @@ static __init void rdt_init_padding(void)
struct rdt_resource *r;
int cl;
- for_each_capable_rdt_resource(r) {
+ for_each_alloc_capable_rdt_resource(r) {
cl = strlen(r->name);
if (cl > max_name_width)
max_name_width = cl;
@@ -502,38 +637,153 @@ static __init void rdt_init_padding(void)
}
}
-static __init bool get_rdt_resources(void)
+enum {
+ RDT_FLAG_CMT,
+ RDT_FLAG_MBM_TOTAL,
+ RDT_FLAG_MBM_LOCAL,
+ RDT_FLAG_L3_CAT,
+ RDT_FLAG_L3_CDP,
+ RDT_FLAG_L2_CAT,
+ RDT_FLAG_MBA,
+};
+
+#define RDT_OPT(idx, n, f) \
+[idx] = { \
+ .name = n, \
+ .flag = f \
+}
+
+struct rdt_options {
+ char *name;
+ int flag;
+ bool force_off, force_on;
+};
+
+static struct rdt_options rdt_options[] __initdata = {
+ RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC),
+ RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL),
+ RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL),
+ RDT_OPT(RDT_FLAG_L3_CAT, "l3cat", X86_FEATURE_CAT_L3),
+ RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp", X86_FEATURE_CDP_L3),
+ RDT_OPT(RDT_FLAG_L2_CAT, "l2cat", X86_FEATURE_CAT_L2),
+ RDT_OPT(RDT_FLAG_MBA, "mba", X86_FEATURE_MBA),
+};
+#define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
+
+static int __init set_rdt_options(char *str)
+{
+ struct rdt_options *o;
+ bool force_off;
+ char *tok;
+
+ if (*str == '=')
+ str++;
+ while ((tok = strsep(&str, ",")) != NULL) {
+ force_off = *tok == '!';
+ if (force_off)
+ tok++;
+ for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
+ if (strcmp(tok, o->name) == 0) {
+ if (force_off)
+ o->force_off = true;
+ else
+ o->force_on = true;
+ break;
+ }
+ }
+ }
+ return 1;
+}
+__setup("rdt", set_rdt_options);
+
+static bool __init rdt_cpu_has(int flag)
+{
+ bool ret = boot_cpu_has(flag);
+ struct rdt_options *o;
+
+ if (!ret)
+ return ret;
+
+ for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
+ if (flag == o->flag) {
+ if (o->force_off)
+ ret = false;
+ if (o->force_on)
+ ret = true;
+ break;
+ }
+ }
+ return ret;
+}
+
+static __init bool get_rdt_alloc_resources(void)
{
bool ret = false;
- if (cache_alloc_hsw_probe())
+ if (rdt_alloc_capable)
return true;
if (!boot_cpu_has(X86_FEATURE_RDT_A))
return false;
- if (boot_cpu_has(X86_FEATURE_CAT_L3)) {
- rdt_get_cache_config(1, &rdt_resources_all[RDT_RESOURCE_L3]);
- if (boot_cpu_has(X86_FEATURE_CDP_L3)) {
+ if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
+ rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]);
+ if (rdt_cpu_has(X86_FEATURE_CDP_L3)) {
rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA);
rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE);
}
ret = true;
}
- if (boot_cpu_has(X86_FEATURE_CAT_L2)) {
+ if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
/* CPUID 0x10.2 fields are same format at 0x10.1 */
- rdt_get_cache_config(2, &rdt_resources_all[RDT_RESOURCE_L2]);
+ rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]);
ret = true;
}
- if (boot_cpu_has(X86_FEATURE_MBA)) {
+ if (rdt_cpu_has(X86_FEATURE_MBA)) {
if (rdt_get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA]))
ret = true;
}
-
return ret;
}
+static __init bool get_rdt_mon_resources(void)
+{
+ if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
+ rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
+ if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
+ rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID);
+ if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))
+ rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID);
+
+ if (!rdt_mon_features)
+ return false;
+
+ return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]);
+}
+
+static __init void rdt_quirks(void)
+{
+ switch (boot_cpu_data.x86_model) {
+ case INTEL_FAM6_HASWELL_X:
+ if (!rdt_options[RDT_FLAG_L3_CAT].force_off)
+ cache_alloc_hsw_probe();
+ break;
+ case INTEL_FAM6_SKYLAKE_X:
+ if (boot_cpu_data.x86_mask <= 4)
+ set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
+ }
+}
+
+static __init bool get_rdt_resources(void)
+{
+ rdt_quirks();
+ rdt_alloc_capable = get_rdt_alloc_resources();
+ rdt_mon_capable = get_rdt_mon_resources();
+
+ return (rdt_mon_capable || rdt_alloc_capable);
+}
+
static int __init intel_rdt_late_init(void)
{
struct rdt_resource *r;
@@ -556,9 +806,12 @@ static int __init intel_rdt_late_init(void)
return ret;
}
- for_each_capable_rdt_resource(r)
+ for_each_alloc_capable_rdt_resource(r)
pr_info("Intel RDT %s allocation detected\n", r->name);
+ for_each_mon_capable_rdt_resource(r)
+ pr_info("Intel RDT %s monitoring detected\n", r->name);
+
return 0;
}
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
new file mode 100644
index 000000000000..ebaddaeef023
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -0,0 +1,440 @@
+#ifndef _ASM_X86_INTEL_RDT_H
+#define _ASM_X86_INTEL_RDT_H
+
+#include <linux/sched.h>
+#include <linux/kernfs.h>
+#include <linux/jump_label.h>
+
+#define IA32_L3_QOS_CFG 0xc81
+#define IA32_L3_CBM_BASE 0xc90
+#define IA32_L2_CBM_BASE 0xd10
+#define IA32_MBA_THRTL_BASE 0xd50
+
+#define L3_QOS_CDP_ENABLE 0x01ULL
+
+/*
+ * Event IDs are used to program IA32_QM_EVTSEL before reading event
+ * counter from IA32_QM_CTR
+ */
+#define QOS_L3_OCCUP_EVENT_ID 0x01
+#define QOS_L3_MBM_TOTAL_EVENT_ID 0x02
+#define QOS_L3_MBM_LOCAL_EVENT_ID 0x03
+
+#define CQM_LIMBOCHECK_INTERVAL 1000
+
+#define MBM_CNTR_WIDTH 24
+#define MBM_OVERFLOW_INTERVAL 1000
+
+#define RMID_VAL_ERROR BIT_ULL(63)
+#define RMID_VAL_UNAVAIL BIT_ULL(62)
+
+DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
+
+/**
+ * struct mon_evt - Entry in the event list of a resource
+ * @evtid: event id
+ * @name: name of the event
+ */
+struct mon_evt {
+ u32 evtid;
+ char *name;
+ struct list_head list;
+};
+
+/**
+ * struct mon_data_bits - Monitoring details for each event file
+ * @rid: Resource id associated with the event file.
+ * @evtid: Event id associated with the event file
+ * @domid: The domain to which the event file belongs
+ */
+union mon_data_bits {
+ void *priv;
+ struct {
+ unsigned int rid : 10;
+ unsigned int evtid : 8;
+ unsigned int domid : 14;
+ } u;
+};
+
+struct rmid_read {
+ struct rdtgroup *rgrp;
+ struct rdt_domain *d;
+ int evtid;
+ bool first;
+ u64 val;
+};
+
+extern unsigned int intel_cqm_threshold;
+extern bool rdt_alloc_capable;
+extern bool rdt_mon_capable;
+extern unsigned int rdt_mon_features;
+
+enum rdt_group_type {
+ RDTCTRL_GROUP = 0,
+ RDTMON_GROUP,
+ RDT_NUM_GROUP,
+};
+
+/**
+ * struct mongroup - store mon group's data in resctrl fs.
+ * @mon_data_kn kernlfs node for the mon_data directory
+ * @parent: parent rdtgrp
+ * @crdtgrp_list: child rdtgroup node list
+ * @rmid: rmid for this rdtgroup
+ */
+struct mongroup {
+ struct kernfs_node *mon_data_kn;
+ struct rdtgroup *parent;
+ struct list_head crdtgrp_list;
+ u32 rmid;
+};
+
+/**
+ * struct rdtgroup - store rdtgroup's data in resctrl file system.
+ * @kn: kernfs node
+ * @rdtgroup_list: linked list for all rdtgroups
+ * @closid: closid for this rdtgroup
+ * @cpu_mask: CPUs assigned to this rdtgroup
+ * @flags: status bits
+ * @waitcount: how many cpus expect to find this
+ * group when they acquire rdtgroup_mutex
+ * @type: indicates type of this rdtgroup - either
+ * monitor only or ctrl_mon group
+ * @mon: mongroup related data
+ */
+struct rdtgroup {
+ struct kernfs_node *kn;
+ struct list_head rdtgroup_list;
+ u32 closid;
+ struct cpumask cpu_mask;
+ int flags;
+ atomic_t waitcount;
+ enum rdt_group_type type;
+ struct mongroup mon;
+};
+
+/* rdtgroup.flags */
+#define RDT_DELETED 1
+
+/* rftype.flags */
+#define RFTYPE_FLAGS_CPUS_LIST 1
+
+/*
+ * Define the file type flags for base and info directories.
+ */
+#define RFTYPE_INFO BIT(0)
+#define RFTYPE_BASE BIT(1)
+#define RF_CTRLSHIFT 4
+#define RF_MONSHIFT 5
+#define RFTYPE_CTRL BIT(RF_CTRLSHIFT)
+#define RFTYPE_MON BIT(RF_MONSHIFT)
+#define RFTYPE_RES_CACHE BIT(8)
+#define RFTYPE_RES_MB BIT(9)
+#define RF_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL)
+#define RF_MON_INFO (RFTYPE_INFO | RFTYPE_MON)
+#define RF_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL)
+
+/* List of all resource groups */
+extern struct list_head rdt_all_groups;
+
+extern int max_name_width, max_data_width;
+
+int __init rdtgroup_init(void);
+
+/**
+ * struct rftype - describe each file in the resctrl file system
+ * @name: File name
+ * @mode: Access mode
+ * @kf_ops: File operations
+ * @flags: File specific RFTYPE_FLAGS_* flags
+ * @fflags: File specific RF_* or RFTYPE_* flags
+ * @seq_show: Show content of the file
+ * @write: Write to the file
+ */
+struct rftype {
+ char *name;
+ umode_t mode;
+ struct kernfs_ops *kf_ops;
+ unsigned long flags;
+ unsigned long fflags;
+
+ int (*seq_show)(struct kernfs_open_file *of,
+ struct seq_file *sf, void *v);
+ /*
+ * write() is the generic write callback which maps directly to
+ * kernfs write operation and overrides all other operations.
+ * Maximum write size is determined by ->max_write_len.
+ */
+ ssize_t (*write)(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off);
+};
+
+/**
+ * struct mbm_state - status for each MBM counter in each domain
+ * @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes)
+ * @prev_msr Value of IA32_QM_CTR for this RMID last time we read it
+ */
+struct mbm_state {
+ u64 chunks;
+ u64 prev_msr;
+};
+
+/**
+ * struct rdt_domain - group of cpus sharing an RDT resource
+ * @list: all instances of this resource
+ * @id: unique id for this instance
+ * @cpu_mask: which cpus share this resource
+ * @rmid_busy_llc:
+ * bitmap of which limbo RMIDs are above threshold
+ * @mbm_total: saved state for MBM total bandwidth
+ * @mbm_local: saved state for MBM local bandwidth
+ * @mbm_over: worker to periodically read MBM h/w counters
+ * @cqm_limbo: worker to periodically read CQM h/w counters
+ * @mbm_work_cpu:
+ * worker cpu for MBM h/w counters
+ * @cqm_work_cpu:
+ * worker cpu for CQM h/w counters
+ * @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID)
+ * @new_ctrl: new ctrl value to be loaded
+ * @have_new_ctrl: did user provide new_ctrl for this domain
+ */
+struct rdt_domain {
+ struct list_head list;
+ int id;
+ struct cpumask cpu_mask;
+ unsigned long *rmid_busy_llc;
+ struct mbm_state *mbm_total;
+ struct mbm_state *mbm_local;
+ struct delayed_work mbm_over;
+ struct delayed_work cqm_limbo;
+ int mbm_work_cpu;
+ int cqm_work_cpu;
+ u32 *ctrl_val;
+ u32 new_ctrl;
+ bool have_new_ctrl;
+};
+
+/**
+ * struct msr_param - set a range of MSRs from a domain
+ * @res: The resource to use
+ * @low: Beginning index from base MSR
+ * @high: End index
+ */
+struct msr_param {
+ struct rdt_resource *res;
+ int low;
+ int high;
+};
+
+/**
+ * struct rdt_cache - Cache allocation related data
+ * @cbm_len: Length of the cache bit mask
+ * @min_cbm_bits: Minimum number of consecutive bits to be set
+ * @cbm_idx_mult: Multiplier of CBM index
+ * @cbm_idx_offset: Offset of CBM index. CBM index is computed by:
+ * closid * cbm_idx_multi + cbm_idx_offset
+ * in a cache bit mask
+ * @shareable_bits: Bitmask of shareable resource with other
+ * executing entities
+ */
+struct rdt_cache {
+ unsigned int cbm_len;
+ unsigned int min_cbm_bits;
+ unsigned int cbm_idx_mult;
+ unsigned int cbm_idx_offset;
+ unsigned int shareable_bits;
+};
+
+/**
+ * struct rdt_membw - Memory bandwidth allocation related data
+ * @max_delay: Max throttle delay. Delay is the hardware
+ * representation for memory bandwidth.
+ * @min_bw: Minimum memory bandwidth percentage user can request
+ * @bw_gran: Granularity at which the memory bandwidth is allocated
+ * @delay_linear: True if memory B/W delay is in linear scale
+ * @mb_map: Mapping of memory B/W percentage to memory B/W delay
+ */
+struct rdt_membw {
+ u32 max_delay;
+ u32 min_bw;
+ u32 bw_gran;
+ u32 delay_linear;
+ u32 *mb_map;
+};
+
+static inline bool is_llc_occupancy_enabled(void)
+{
+ return (rdt_mon_features & (1 << QOS_L3_OCCUP_EVENT_ID));
+}
+
+static inline bool is_mbm_total_enabled(void)
+{
+ return (rdt_mon_features & (1 << QOS_L3_MBM_TOTAL_EVENT_ID));
+}
+
+static inline bool is_mbm_local_enabled(void)
+{
+ return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID));
+}
+
+static inline bool is_mbm_enabled(void)
+{
+ return (is_mbm_total_enabled() || is_mbm_local_enabled());
+}
+
+static inline bool is_mbm_event(int e)
+{
+ return (e >= QOS_L3_MBM_TOTAL_EVENT_ID &&
+ e <= QOS_L3_MBM_LOCAL_EVENT_ID);
+}
+
+/**
+ * struct rdt_resource - attributes of an RDT resource
+ * @rid: The index of the resource
+ * @alloc_enabled: Is allocation enabled on this machine
+ * @mon_enabled: Is monitoring enabled for this feature
+ * @alloc_capable: Is allocation available on this machine
+ * @mon_capable: Is monitor feature available on this machine
+ * @name: Name to use in "schemata" file
+ * @num_closid: Number of CLOSIDs available
+ * @cache_level: Which cache level defines scope of this resource
+ * @default_ctrl: Specifies default cache cbm or memory B/W percent.
+ * @msr_base: Base MSR address for CBMs
+ * @msr_update: Function pointer to update QOS MSRs
+ * @data_width: Character width of data when displaying
+ * @domains: All domains for this resource
+ * @cache: Cache allocation related data
+ * @format_str: Per resource format string to show domain value
+ * @parse_ctrlval: Per resource function pointer to parse control values
+ * @evt_list: List of monitoring events
+ * @num_rmid: Number of RMIDs available
+ * @mon_scale: cqm counter * mon_scale = occupancy in bytes
+ * @fflags: flags to choose base and info files
+ */
+struct rdt_resource {
+ int rid;
+ bool alloc_enabled;
+ bool mon_enabled;
+ bool alloc_capable;
+ bool mon_capable;
+ char *name;
+ int num_closid;
+ int cache_level;
+ u32 default_ctrl;
+ unsigned int msr_base;
+ void (*msr_update) (struct rdt_domain *d, struct msr_param *m,
+ struct rdt_resource *r);
+ int data_width;
+ struct list_head domains;
+ struct rdt_cache cache;
+ struct rdt_membw membw;
+ const char *format_str;
+ int (*parse_ctrlval) (char *buf, struct rdt_resource *r,
+ struct rdt_domain *d);
+ struct list_head evt_list;
+ int num_rmid;
+ unsigned int mon_scale;
+ unsigned long fflags;
+};
+
+int parse_cbm(char *buf, struct rdt_resource *r, struct rdt_domain *d);
+int parse_bw(char *buf, struct rdt_resource *r, struct rdt_domain *d);
+
+extern struct mutex rdtgroup_mutex;
+
+extern struct rdt_resource rdt_resources_all[];
+extern struct rdtgroup rdtgroup_default;
+DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
+
+int __init rdtgroup_init(void);
+
+enum {
+ RDT_RESOURCE_L3,
+ RDT_RESOURCE_L3DATA,
+ RDT_RESOURCE_L3CODE,
+ RDT_RESOURCE_L2,
+ RDT_RESOURCE_MBA,
+
+ /* Must be the last */
+ RDT_NUM_RESOURCES,
+};
+
+#define for_each_capable_rdt_resource(r) \
+ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+ r++) \
+ if (r->alloc_capable || r->mon_capable)
+
+#define for_each_alloc_capable_rdt_resource(r) \
+ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+ r++) \
+ if (r->alloc_capable)
+
+#define for_each_mon_capable_rdt_resource(r) \
+ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+ r++) \
+ if (r->mon_capable)
+
+#define for_each_alloc_enabled_rdt_resource(r) \
+ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+ r++) \
+ if (r->alloc_enabled)
+
+#define for_each_mon_enabled_rdt_resource(r) \
+ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+ r++) \
+ if (r->mon_enabled)
+
+/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
+union cpuid_0x10_1_eax {
+ struct {
+ unsigned int cbm_len:5;
+ } split;
+ unsigned int full;
+};
+
+/* CPUID.(EAX=10H, ECX=ResID=3).EAX */
+union cpuid_0x10_3_eax {
+ struct {
+ unsigned int max_delay:12;
+ } split;
+ unsigned int full;
+};
+
+/* CPUID.(EAX=10H, ECX=ResID).EDX */
+union cpuid_0x10_x_edx {
+ struct {
+ unsigned int cos_max:16;
+ } split;
+ unsigned int full;
+};
+
+void rdt_ctrl_update(void *arg);
+struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
+void rdtgroup_kn_unlock(struct kernfs_node *kn);
+struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
+ struct list_head **pos);
+ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off);
+int rdtgroup_schemata_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v);
+struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
+int alloc_rmid(void);
+void free_rmid(u32 rmid);
+int rdt_get_mon_l3_config(struct rdt_resource *r);
+void mon_event_count(void *info);
+int rdtgroup_mondata_show(struct seq_file *m, void *arg);
+void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
+ unsigned int dom_id);
+void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
+ struct rdt_domain *d);
+void mon_event_read(struct rmid_read *rr, struct rdt_domain *d,
+ struct rdtgroup *rdtgrp, int evtid, int first);
+void mbm_setup_overflow_handler(struct rdt_domain *dom,
+ unsigned long delay_ms);
+void mbm_handle_overflow(struct work_struct *work);
+void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms);
+void cqm_handle_limbo(struct work_struct *work);
+bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
+void __check_limbo(struct rdt_domain *d, bool force_free);
+
+#endif /* _ASM_X86_INTEL_RDT_H */
diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
index 406d7a6532f9..f6ea94f8954a 100644
--- a/arch/x86/kernel/cpu/intel_rdt_schemata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
@@ -26,7 +26,7 @@
#include <linux/kernfs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <asm/intel_rdt.h>
+#include "intel_rdt.h"
/*
* Check whether MBA bandwidth percentage value is correct. The value is
@@ -192,7 +192,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok, int closid)
{
struct rdt_resource *r;
- for_each_enabled_rdt_resource(r) {
+ for_each_alloc_enabled_rdt_resource(r) {
if (!strcmp(resname, r->name) && closid < r->num_closid)
return parse_line(tok, r);
}
@@ -221,7 +221,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
closid = rdtgrp->closid;
- for_each_enabled_rdt_resource(r) {
+ for_each_alloc_enabled_rdt_resource(r) {
list_for_each_entry(dom, &r->domains, list)
dom->have_new_ctrl = false;
}
@@ -237,7 +237,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
goto out;
}
- for_each_enabled_rdt_resource(r) {
+ for_each_alloc_enabled_rdt_resource(r) {
ret = update_domains(r, closid);
if (ret)
goto out;
@@ -269,12 +269,13 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
{
struct rdtgroup *rdtgrp;
struct rdt_resource *r;
- int closid, ret = 0;
+ int ret = 0;
+ u32 closid;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (rdtgrp) {
closid = rdtgrp->closid;
- for_each_enabled_rdt_resource(r) {
+ for_each_alloc_enabled_rdt_resource(r) {
if (closid < r->num_closid)
show_doms(s, r, closid);
}
@@ -284,3 +285,57 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
rdtgroup_kn_unlock(of->kn);
return ret;
}
+
+void mon_event_read(struct rmid_read *rr, struct rdt_domain *d,
+ struct rdtgroup *rdtgrp, int evtid, int first)
+{
+ /*
+ * setup the parameters to send to the IPI to read the data.
+ */
+ rr->rgrp = rdtgrp;
+ rr->evtid = evtid;
+ rr->d = d;
+ rr->val = 0;
+ rr->first = first;
+
+ smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
+}
+
+int rdtgroup_mondata_show(struct seq_file *m, void *arg)
+{
+ struct kernfs_open_file *of = m->private;
+ u32 resid, evtid, domid;
+ struct rdtgroup *rdtgrp;
+ struct rdt_resource *r;
+ union mon_data_bits md;
+ struct rdt_domain *d;
+ struct rmid_read rr;
+ int ret = 0;
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+
+ md.priv = of->kn->priv;
+ resid = md.u.rid;
+ domid = md.u.domid;
+ evtid = md.u.evtid;
+
+ r = &rdt_resources_all[resid];
+ d = rdt_find_domain(r, domid, NULL);
+ if (!d) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ mon_event_read(&rr, d, rdtgrp, evtid, false);
+
+ if (rr.val & RMID_VAL_ERROR)
+ seq_puts(m, "Error\n");
+ else if (rr.val & RMID_VAL_UNAVAIL)
+ seq_puts(m, "Unavailable\n");
+ else
+ seq_printf(m, "%llu\n", rr.val * r->mon_scale);
+
+out:
+ rdtgroup_kn_unlock(of->kn);
+ return ret;
+}
diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c
new file mode 100644
index 000000000000..30827510094b
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c
@@ -0,0 +1,499 @@
+/*
+ * Resource Director Technology(RDT)
+ * - Monitoring code
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Author:
+ * Vikas Shivappa <vikas.shivappa@intel.com>
+ *
+ * This replaces the cqm.c based on perf but we reuse a lot of
+ * code and datastructures originally from Peter Zijlstra and Matt Fleming.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * More information about RDT be found in the Intel (R) x86 Architecture
+ * Software Developer Manual June 2016, volume 3, section 17.17.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/cpu_device_id.h>
+#include "intel_rdt.h"
+
+#define MSR_IA32_QM_CTR 0x0c8e
+#define MSR_IA32_QM_EVTSEL 0x0c8d
+
+struct rmid_entry {
+ u32 rmid;
+ int busy;
+ struct list_head list;
+};
+
+/**
+ * @rmid_free_lru A least recently used list of free RMIDs
+ * These RMIDs are guaranteed to have an occupancy less than the
+ * threshold occupancy
+ */
+static LIST_HEAD(rmid_free_lru);
+
+/**
+ * @rmid_limbo_count count of currently unused but (potentially)
+ * dirty RMIDs.
+ * This counts RMIDs that no one is currently using but that
+ * may have a occupancy value > intel_cqm_threshold. User can change
+ * the threshold occupancy value.
+ */
+unsigned int rmid_limbo_count;
+
+/**
+ * @rmid_entry - The entry in the limbo and free lists.
+ */
+static struct rmid_entry *rmid_ptrs;
+
+/*
+ * Global boolean for rdt_monitor which is true if any
+ * resource monitoring is enabled.
+ */
+bool rdt_mon_capable;
+
+/*
+ * Global to indicate which monitoring events are enabled.
+ */
+unsigned int rdt_mon_features;
+
+/*
+ * This is the threshold cache occupancy at which we will consider an
+ * RMID available for re-allocation.
+ */
+unsigned int intel_cqm_threshold;
+
+static inline struct rmid_entry *__rmid_entry(u32 rmid)
+{
+ struct rmid_entry *entry;
+
+ entry = &rmid_ptrs[rmid];
+ WARN_ON(entry->rmid != rmid);
+
+ return entry;
+}
+
+static u64 __rmid_read(u32 rmid, u32 eventid)
+{
+ u64 val;
+
+ /*
+ * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
+ * with a valid event code for supported resource type and the bits
+ * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
+ * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
+ * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
+ * are error bits.
+ */
+ wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
+ rdmsrl(MSR_IA32_QM_CTR, val);
+
+ return val;
+}
+
+static bool rmid_dirty(struct rmid_entry *entry)
+{
+ u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
+
+ return val >= intel_cqm_threshold;
+}
+
+/*
+ * Check the RMIDs that are marked as busy for this domain. If the
+ * reported LLC occupancy is below the threshold clear the busy bit and
+ * decrement the count. If the busy count gets to zero on an RMID, we
+ * free the RMID
+ */
+void __check_limbo(struct rdt_domain *d, bool force_free)
+{
+ struct rmid_entry *entry;
+ struct rdt_resource *r;
+ u32 crmid = 1, nrmid;
+
+ r = &rdt_resources_all[RDT_RESOURCE_L3];
+
+ /*
+ * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
+ * are marked as busy for occupancy < threshold. If the occupancy
+ * is less than the threshold decrement the busy counter of the
+ * RMID and move it to the free list when the counter reaches 0.
+ */
+ for (;;) {
+ nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid);
+ if (nrmid >= r->num_rmid)
+ break;
+
+ entry = __rmid_entry(nrmid);
+ if (force_free || !rmid_dirty(entry)) {
+ clear_bit(entry->rmid, d->rmid_busy_llc);
+ if (!--entry->busy) {
+ rmid_limbo_count--;
+ list_add_tail(&entry->list, &rmid_free_lru);
+ }
+ }
+ crmid = nrmid + 1;
+ }
+}
+
+bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d)
+{
+ return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid;
+}
+
+/*
+ * As of now the RMIDs allocation is global.
+ * However we keep track of which packages the RMIDs
+ * are used to optimize the limbo list management.
+ */
+int alloc_rmid(void)
+{
+ struct rmid_entry *entry;
+
+ lockdep_assert_held(&rdtgroup_mutex);
+
+ if (list_empty(&rmid_free_lru))
+ return rmid_limbo_count ? -EBUSY : -ENOSPC;
+
+ entry = list_first_entry(&rmid_free_lru,
+ struct rmid_entry, list);
+ list_del(&entry->list);
+
+ return entry->rmid;
+}
+
+static void add_rmid_to_limbo(struct rmid_entry *entry)
+{
+ struct rdt_resource *r;
+ struct rdt_domain *d;
+ int cpu;
+ u64 val;
+
+ r = &rdt_resources_all[RDT_RESOURCE_L3];
+
+ entry->busy = 0;
+ cpu = get_cpu();
+ list_for_each_entry(d, &r->domains, list) {
+ if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
+ val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
+ if (val <= intel_cqm_threshold)
+ continue;
+ }
+
+ /*
+ * For the first limbo RMID in the domain,
+ * setup up the limbo worker.
+ */
+ if (!has_busy_rmid(r, d))
+ cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL);
+ set_bit(entry->rmid, d->rmid_busy_llc);
+ entry->busy++;
+ }
+ put_cpu();
+
+ if (entry->busy)
+ rmid_limbo_count++;
+ else
+ list_add_tail(&entry->list, &rmid_free_lru);
+}
+
+void free_rmid(u32 rmid)
+{
+ struct rmid_entry *entry;
+
+ if (!rmid)
+ return;
+
+ lockdep_assert_held(&rdtgroup_mutex);
+
+ entry = __rmid_entry(rmid);
+
+ if (is_llc_occupancy_enabled())
+ add_rmid_to_limbo(entry);
+ else
+ list_add_tail(&entry->list, &rmid_free_lru);
+}
+
+static int __mon_event_count(u32 rmid, struct rmid_read *rr)
+{
+ u64 chunks, shift, tval;
+ struct mbm_state *m;
+
+ tval = __rmid_read(rmid, rr->evtid);
+ if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
+ rr->val = tval;
+ return -EINVAL;
+ }
+ switch (rr->evtid) {
+ case QOS_L3_OCCUP_EVENT_ID:
+ rr->val += tval;
+ return 0;
+ case QOS_L3_MBM_TOTAL_EVENT_ID:
+ m = &rr->d->mbm_total[rmid];
+ break;
+ case QOS_L3_MBM_LOCAL_EVENT_ID:
+ m = &rr->d->mbm_local[rmid];
+ break;
+ default:
+ /*
+ * Code would never reach here because
+ * an invalid event id would fail the __rmid_read.
+ */
+ return -EINVAL;
+ }
+
+ if (rr->first) {
+ m->prev_msr = tval;
+ m->chunks = 0;
+ return 0;
+ }
+
+ shift = 64 - MBM_CNTR_WIDTH;
+ chunks = (tval << shift) - (m->prev_msr << shift);
+ chunks >>= shift;
+ m->chunks += chunks;
+ m->prev_msr = tval;
+
+ rr->val += m->chunks;
+ return 0;
+}
+
+/*
+ * This is called via IPI to read the CQM/MBM counters
+ * on a domain.
+ */
+void mon_event_count(void *info)
+{
+ struct rdtgroup *rdtgrp, *entry;
+ struct rmid_read *rr = info;
+ struct list_head *head;
+
+ rdtgrp = rr->rgrp;
+
+ if (__mon_event_count(rdtgrp->mon.rmid, rr))
+ return;
+
+ /*
+ * For Ctrl groups read data from child monitor groups.
+ */
+ head = &rdtgrp->mon.crdtgrp_list;
+
+ if (rdtgrp->type == RDTCTRL_GROUP) {
+ list_for_each_entry(entry, head, mon.crdtgrp_list) {
+ if (__mon_event_count(entry->mon.rmid, rr))
+ return;
+ }
+ }
+}
+
+static void mbm_update(struct rdt_domain *d, int rmid)
+{
+ struct rmid_read rr;
+
+ rr.first = false;
+ rr.d = d;
+
+ /*
+ * This is protected from concurrent reads from user
+ * as both the user and we hold the global mutex.
+ */
+ if (is_mbm_total_enabled()) {
+ rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
+ __mon_event_count(rmid, &rr);
+ }
+ if (is_mbm_local_enabled()) {
+ rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
+ __mon_event_count(rmid, &rr);
+ }
+}
+
+/*
+ * Handler to scan the limbo list and move the RMIDs
+ * to free list whose occupancy < threshold_occupancy.
+ */
+void cqm_handle_limbo(struct work_struct *work)
+{
+ unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
+ int cpu = smp_processor_id();
+ struct rdt_resource *r;
+ struct rdt_domain *d;
+
+ mutex_lock(&rdtgroup_mutex);
+
+ r = &rdt_resources_all[RDT_RESOURCE_L3];
+ d = get_domain_from_cpu(cpu, r);
+
+ if (!d) {
+ pr_warn_once("Failure to get domain for limbo worker\n");
+ goto out_unlock;
+ }
+
+ __check_limbo(d, false);
+
+ if (has_busy_rmid(r, d))
+ schedule_delayed_work_on(cpu, &d->cqm_limbo, delay);
+
+out_unlock:
+ mutex_unlock(&rdtgroup_mutex);
+}
+
+void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms)
+{
+ unsigned long delay = msecs_to_jiffies(delay_ms);
+ struct rdt_resource *r;
+ int cpu;
+
+ r = &rdt_resources_all[RDT_RESOURCE_L3];
+
+ cpu = cpumask_any(&dom->cpu_mask);
+ dom->cqm_work_cpu = cpu;
+
+ schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
+}
+
+void mbm_handle_overflow(struct work_struct *work)
+{
+ unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
+ struct rdtgroup *prgrp, *crgrp;
+ int cpu = smp_processor_id();
+ struct list_head *head;
+ struct rdt_domain *d;
+
+ mutex_lock(&rdtgroup_mutex);
+
+ if (!static_branch_likely(&rdt_enable_key))
+ goto out_unlock;
+
+ d = get_domain_from_cpu(cpu, &rdt_resources_all[RDT_RESOURCE_L3]);
+ if (!d)
+ goto out_unlock;
+
+ list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
+ mbm_update(d, prgrp->mon.rmid);
+
+ head = &prgrp->mon.crdtgrp_list;
+ list_for_each_entry(crgrp, head, mon.crdtgrp_list)
+ mbm_update(d, crgrp->mon.rmid);
+ }
+
+ schedule_delayed_work_on(cpu, &d->mbm_over, delay);
+
+out_unlock:
+ mutex_unlock(&rdtgroup_mutex);
+}
+
+void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms)
+{
+ unsigned long delay = msecs_to_jiffies(delay_ms);
+ int cpu;
+
+ if (!static_branch_likely(&rdt_enable_key))
+ return;
+ cpu = cpumask_any(&dom->cpu_mask);
+ dom->mbm_work_cpu = cpu;
+ schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
+}
+
+static int dom_data_init(struct rdt_resource *r)
+{
+ struct rmid_entry *entry = NULL;
+ int i, nr_rmids;
+
+ nr_rmids = r->num_rmid;
+ rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL);
+ if (!rmid_ptrs)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_rmids; i++) {
+ entry = &rmid_ptrs[i];
+ INIT_LIST_HEAD(&entry->list);
+
+ entry->rmid = i;
+ list_add_tail(&entry->list, &rmid_free_lru);
+ }
+
+ /*
+ * RMID 0 is special and is always allocated. It's used for all
+ * tasks that are not monitored.
+ */
+ entry = __rmid_entry(0);
+ list_del(&entry->list);
+
+ return 0;
+}
+
+static struct mon_evt llc_occupancy_event = {
+ .name = "llc_occupancy",
+ .evtid = QOS_L3_OCCUP_EVENT_ID,
+};
+
+static struct mon_evt mbm_total_event = {
+ .name = "mbm_total_bytes",
+ .evtid = QOS_L3_MBM_TOTAL_EVENT_ID,
+};
+
+static struct mon_evt mbm_local_event = {
+ .name = "mbm_local_bytes",
+ .evtid = QOS_L3_MBM_LOCAL_EVENT_ID,
+};
+
+/*
+ * Initialize the event list for the resource.
+ *
+ * Note that MBM events are also part of RDT_RESOURCE_L3 resource
+ * because as per the SDM the total and local memory bandwidth
+ * are enumerated as part of L3 monitoring.
+ */
+static void l3_mon_evt_init(struct rdt_resource *r)
+{
+ INIT_LIST_HEAD(&r->evt_list);
+
+ if (is_llc_occupancy_enabled())
+ list_add_tail(&llc_occupancy_event.list, &r->evt_list);
+ if (is_mbm_total_enabled())
+ list_add_tail(&mbm_total_event.list, &r->evt_list);
+ if (is_mbm_local_enabled())
+ list_add_tail(&mbm_local_event.list, &r->evt_list);
+}
+
+int rdt_get_mon_l3_config(struct rdt_resource *r)
+{
+ int ret;
+
+ r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
+ r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
+
+ /*
+ * A reasonable upper limit on the max threshold is the number
+ * of lines tagged per RMID if all RMIDs have the same number of
+ * lines tagged in the LLC.
+ *
+ * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
+ */
+ intel_cqm_threshold = boot_cpu_data.x86_cache_size * 1024 / r->num_rmid;
+
+ /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
+ intel_cqm_threshold /= r->mon_scale;
+
+ ret = dom_data_init(r);
+ if (ret)
+ return ret;
+
+ l3_mon_evt_init(r);
+
+ r->mon_capable = true;
+ r->mon_enabled = true;
+
+ return 0;
+}
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 9257bd9dc664..a869d4a073c5 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -32,17 +32,25 @@
#include <uapi/linux/magic.h>
-#include <asm/intel_rdt.h>
-#include <asm/intel_rdt_common.h>
+#include <asm/intel_rdt_sched.h>
+#include "intel_rdt.h"
DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
-struct kernfs_root *rdt_root;
+DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
+DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
+static struct kernfs_root *rdt_root;
struct rdtgroup rdtgroup_default;
LIST_HEAD(rdt_all_groups);
/* Kernel fs node for "info" directory under root */
static struct kernfs_node *kn_info;
+/* Kernel fs node for "mon_groups" directory under root */
+static struct kernfs_node *kn_mongrp;
+
+/* Kernel fs node for "mon_data" directory under root */
+static struct kernfs_node *kn_mondata;
+
/*
* Trivial allocator for CLOSIDs. Since h/w only supports a small number,
* we can keep a bitmap of free CLOSIDs in a single integer.
@@ -66,7 +74,7 @@ static void closid_init(void)
int rdt_min_closid = 32;
/* Compute rdt_min_closid across all resources */
- for_each_enabled_rdt_resource(r)
+ for_each_alloc_enabled_rdt_resource(r)
rdt_min_closid = min(rdt_min_closid, r->num_closid);
closid_free_map = BIT_MASK(rdt_min_closid) - 1;
@@ -75,9 +83,9 @@ static void closid_init(void)
closid_free_map &= ~1;
}
-int closid_alloc(void)
+static int closid_alloc(void)
{
- int closid = ffs(closid_free_map);
+ u32 closid = ffs(closid_free_map);
if (closid == 0)
return -ENOSPC;
@@ -125,28 +133,6 @@ static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
return 0;
}
-static int rdtgroup_add_files(struct kernfs_node *kn, struct rftype *rfts,
- int len)
-{
- struct rftype *rft;
- int ret;
-
- lockdep_assert_held(&rdtgroup_mutex);
-
- for (rft = rfts; rft < rfts + len; rft++) {
- ret = rdtgroup_add_file(kn, rft);
- if (ret)
- goto error;
- }
-
- return 0;
-error:
- pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
- while (--rft >= rfts)
- kernfs_remove_by_name(kn, rft->name);
- return ret;
-}
-
static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
{
struct kernfs_open_file *of = m->private;
@@ -174,6 +160,11 @@ static struct kernfs_ops rdtgroup_kf_single_ops = {
.seq_show = rdtgroup_seqfile_show,
};
+static struct kernfs_ops kf_mondata_ops = {
+ .atomic_write_len = PAGE_SIZE,
+ .seq_show = rdtgroup_mondata_show,
+};
+
static bool is_cpu_list(struct kernfs_open_file *of)
{
struct rftype *rft = of->kn->priv;
@@ -203,13 +194,18 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
/*
* This is safe against intel_rdt_sched_in() called from __switch_to()
* because __switch_to() is executed with interrupts disabled. A local call
- * from rdt_update_closid() is proteced against __switch_to() because
+ * from update_closid_rmid() is proteced against __switch_to() because
* preemption is disabled.
*/
-static void rdt_update_cpu_closid(void *closid)
+static void update_cpu_closid_rmid(void *info)
{
- if (closid)
- this_cpu_write(cpu_closid, *(int *)closid);
+ struct rdtgroup *r = info;
+
+ if (r) {
+ this_cpu_write(pqr_state.default_closid, r->closid);
+ this_cpu_write(pqr_state.default_rmid, r->mon.rmid);
+ }
+
/*
* We cannot unconditionally write the MSR because the current
* executing task might have its own closid selected. Just reuse
@@ -221,28 +217,128 @@ static void rdt_update_cpu_closid(void *closid)
/*
* Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
*
- * Per task closids must have been set up before calling this function.
- *
- * The per cpu closids are updated with the smp function call, when @closid
- * is not NULL. If @closid is NULL then all affected percpu closids must
- * have been set up before calling this function.
+ * Per task closids/rmids must have been set up before calling this function.
*/
static void
-rdt_update_closid(const struct cpumask *cpu_mask, int *closid)
+update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
{
int cpu = get_cpu();
if (cpumask_test_cpu(cpu, cpu_mask))
- rdt_update_cpu_closid(closid);
- smp_call_function_many(cpu_mask, rdt_update_cpu_closid, closid, 1);
+ update_cpu_closid_rmid(r);
+ smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1);
put_cpu();
}
+static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
+ cpumask_var_t tmpmask)
+{
+ struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
+ struct list_head *head;
+
+ /* Check whether cpus belong to parent ctrl group */
+ cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
+ if (cpumask_weight(tmpmask))
+ return -EINVAL;
+
+ /* Check whether cpus are dropped from this group */
+ cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
+ if (cpumask_weight(tmpmask)) {
+ /* Give any dropped cpus to parent rdtgroup */
+ cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
+ update_closid_rmid(tmpmask, prgrp);
+ }
+
+ /*
+ * If we added cpus, remove them from previous group that owned them
+ * and update per-cpu rmid
+ */
+ cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
+ if (cpumask_weight(tmpmask)) {
+ head = &prgrp->mon.crdtgrp_list;
+ list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
+ if (crgrp == rdtgrp)
+ continue;
+ cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
+ tmpmask);
+ }
+ update_closid_rmid(tmpmask, rdtgrp);
+ }
+
+ /* Done pushing/pulling - update this group with new mask */
+ cpumask_copy(&rdtgrp->cpu_mask, newmask);
+
+ return 0;
+}
+
+static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
+{
+ struct rdtgroup *crgrp;
+
+ cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
+ /* update the child mon group masks as well*/
+ list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
+ cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
+}
+
+static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
+ cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
+{
+ struct rdtgroup *r, *crgrp;
+ struct list_head *head;
+
+ /* Check whether cpus are dropped from this group */
+ cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
+ if (cpumask_weight(tmpmask)) {
+ /* Can't drop from default group */
+ if (rdtgrp == &rdtgroup_default)
+ return -EINVAL;
+
+ /* Give any dropped cpus to rdtgroup_default */
+ cpumask_or(&rdtgroup_default.cpu_mask,
+ &rdtgroup_default.cpu_mask, tmpmask);
+ update_closid_rmid(tmpmask, &rdtgroup_default);
+ }
+
+ /*
+ * If we added cpus, remove them from previous group and
+ * the prev group's child groups that owned them
+ * and update per-cpu closid/rmid.
+ */
+ cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
+ if (cpumask_weight(tmpmask)) {
+ list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
+ if (r == rdtgrp)
+ continue;
+ cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
+ if (cpumask_weight(tmpmask1))
+ cpumask_rdtgrp_clear(r, tmpmask1);
+ }
+ update_closid_rmid(tmpmask, rdtgrp);
+ }
+
+ /* Done pushing/pulling - update this group with new mask */
+ cpumask_copy(&rdtgrp->cpu_mask, newmask);
+
+ /*
+ * Clear child mon group masks since there is a new parent mask
+ * now and update the rmid for the cpus the child lost.
+ */
+ head = &rdtgrp->mon.crdtgrp_list;
+ list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
+ cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
+ update_closid_rmid(tmpmask, rdtgrp);
+ cpumask_clear(&crgrp->cpu_mask);
+ }
+
+ return 0;
+}
+
static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
- cpumask_var_t tmpmask, newmask;
- struct rdtgroup *rdtgrp, *r;
+ cpumask_var_t tmpmask, newmask, tmpmask1;
+ struct rdtgroup *rdtgrp;
int ret;
if (!buf)
@@ -254,6 +350,11 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
free_cpumask_var(tmpmask);
return -ENOMEM;
}
+ if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
+ free_cpumask_var(tmpmask);
+ free_cpumask_var(newmask);
+ return -ENOMEM;
+ }
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (!rdtgrp) {
@@ -276,41 +377,18 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
goto unlock;
}
- /* Check whether cpus are dropped from this group */
- cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
- if (cpumask_weight(tmpmask)) {
- /* Can't drop from default group */
- if (rdtgrp == &rdtgroup_default) {
- ret = -EINVAL;
- goto unlock;
- }
- /* Give any dropped cpus to rdtgroup_default */
- cpumask_or(&rdtgroup_default.cpu_mask,
- &rdtgroup_default.cpu_mask, tmpmask);
- rdt_update_closid(tmpmask, &rdtgroup_default.closid);
- }
-
- /*
- * If we added cpus, remove them from previous group that owned them
- * and update per-cpu closid
- */
- cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
- if (cpumask_weight(tmpmask)) {
- list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
- if (r == rdtgrp)
- continue;
- cpumask_andnot(&r->cpu_mask, &r->cpu_mask, tmpmask);
- }
- rdt_update_closid(tmpmask, &rdtgrp->closid);
- }
-
- /* Done pushing/pulling - update this group with new mask */
- cpumask_copy(&rdtgrp->cpu_mask, newmask);
+ if (rdtgrp->type == RDTCTRL_GROUP)
+ ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
+ else if (rdtgrp->type == RDTMON_GROUP)
+ ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
+ else
+ ret = -EINVAL;
unlock:
rdtgroup_kn_unlock(of->kn);
free_cpumask_var(tmpmask);
free_cpumask_var(newmask);
+ free_cpumask_var(tmpmask1);
return ret ?: nbytes;
}
@@ -336,6 +414,7 @@ static void move_myself(struct callback_head *head)
if (atomic_dec_and_test(&rdtgrp->waitcount) &&
(rdtgrp->flags & RDT_DELETED)) {
current->closid = 0;
+ current->rmid = 0;
kfree(rdtgrp);
}
@@ -374,7 +453,20 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
atomic_dec(&rdtgrp->waitcount);
kfree(callback);
} else {
- tsk->closid = rdtgrp->closid;
+ /*
+ * For ctrl_mon groups move both closid and rmid.
+ * For monitor groups, can move the tasks only from
+ * their parent CTRL group.
+ */
+ if (rdtgrp->type == RDTCTRL_GROUP) {
+ tsk->closid = rdtgrp->closid;
+ tsk->rmid = rdtgrp->mon.rmid;
+ } else if (rdtgrp->type == RDTMON_GROUP) {
+ if (rdtgrp->mon.parent->closid == tsk->closid)
+ tsk->rmid = rdtgrp->mon.rmid;
+ else
+ ret = -EINVAL;
+ }
}
return ret;
}
@@ -454,7 +546,8 @@ static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
rcu_read_lock();
for_each_process_thread(p, t) {
- if (t->closid == r->closid)
+ if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) ||
+ (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid))
seq_printf(s, "%d\n", t->pid);
}
rcu_read_unlock();
@@ -476,39 +569,6 @@ static int rdtgroup_tasks_show(struct kernfs_open_file *of,
return ret;
}
-/* Files in each rdtgroup */
-static struct rftype rdtgroup_base_files[] = {
- {
- .name = "cpus",
- .mode = 0644,
- .kf_ops = &rdtgroup_kf_single_ops,
- .write = rdtgroup_cpus_write,
- .seq_show = rdtgroup_cpus_show,
- },
- {
- .name = "cpus_list",
- .mode = 0644,
- .kf_ops = &rdtgroup_kf_single_ops,
- .write = rdtgroup_cpus_write,
- .seq_show = rdtgroup_cpus_show,
- .flags = RFTYPE_FLAGS_CPUS_LIST,
- },
- {
- .name = "tasks",
- .mode = 0644,
- .kf_ops = &rdtgroup_kf_single_ops,
- .write = rdtgroup_tasks_write,
- .seq_show = rdtgroup_tasks_show,
- },
- {
- .name = "schemata",
- .mode = 0644,
- .kf_ops = &rdtgroup_kf_single_ops,
- .write = rdtgroup_schemata_write,
- .seq_show = rdtgroup_schemata_show,
- },
-};
-
static int rdt_num_closids_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
@@ -536,6 +596,15 @@ static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
return 0;
}
+static int rdt_shareable_bits_show(struct kernfs_open_file *of,
+ struct seq_file *seq, void *v)
+{
+ struct rdt_resource *r = of->kn->parent->priv;
+
+ seq_printf(seq, "%x\n", r->cache.shareable_bits);
+ return 0;
+}
+
static int rdt_min_bw_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
@@ -545,6 +614,28 @@ static int rdt_min_bw_show(struct kernfs_open_file *of,
return 0;
}
+static int rdt_num_rmids_show(struct kernfs_open_file *of,
+ struct seq_file *seq, void *v)
+{
+ struct rdt_resource *r = of->kn->parent->priv;
+
+ seq_printf(seq, "%d\n", r->num_rmid);
+
+ return 0;
+}
+
+static int rdt_mon_features_show(struct kernfs_open_file *of,
+ struct seq_file *seq, void *v)
+{
+ struct rdt_resource *r = of->kn->parent->priv;
+ struct mon_evt *mevt;
+
+ list_for_each_entry(mevt, &r->evt_list, list)
+ seq_printf(seq, "%s\n", mevt->name);
+
+ return 0;
+}
+
static int rdt_bw_gran_show(struct kernfs_open_file *of,
struct seq_file *seq, void *v)
{
@@ -563,74 +654,200 @@ static int rdt_delay_linear_show(struct kernfs_open_file *of,
return 0;
}
+static int max_threshold_occ_show(struct kernfs_open_file *of,
+ struct seq_file *seq, void *v)
+{
+ struct rdt_resource *r = of->kn->parent->priv;
+
+ seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale);
+
+ return 0;
+}
+
+static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct rdt_resource *r = of->kn->parent->priv;
+ unsigned int bytes;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &bytes);
+ if (ret)
+ return ret;
+
+ if (bytes > (boot_cpu_data.x86_cache_size * 1024))
+ return -EINVAL;
+
+ intel_cqm_threshold = bytes / r->mon_scale;
+
+ return nbytes;
+}
+
/* rdtgroup information files for one cache resource. */
-static struct rftype res_cache_info_files[] = {
+static struct rftype res_common_files[] = {
{
.name = "num_closids",
.mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_num_closids_show,
+ .fflags = RF_CTRL_INFO,
+ },
+ {
+ .name = "mon_features",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdt_mon_features_show,
+ .fflags = RF_MON_INFO,
+ },
+ {
+ .name = "num_rmids",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdt_num_rmids_show,
+ .fflags = RF_MON_INFO,
},
{
.name = "cbm_mask",
.mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_default_ctrl_show,
+ .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
},
{
.name = "min_cbm_bits",
.mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_min_cbm_bits_show,
+ .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
},
-};
-
-/* rdtgroup information files for memory bandwidth. */
-static struct rftype res_mba_info_files[] = {
{
- .name = "num_closids",
+ .name = "shareable_bits",
.mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops,
- .seq_show = rdt_num_closids_show,
+ .seq_show = rdt_shareable_bits_show,
+ .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
},
{
.name = "min_bandwidth",
.mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_min_bw_show,
+ .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
},
{
.name = "bandwidth_gran",
.mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_bw_gran_show,
+ .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
},
{
.name = "delay_linear",
.mode = 0444,
.kf_ops = &rdtgroup_kf_single_ops,
.seq_show = rdt_delay_linear_show,
+ .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
+ },
+ {
+ .name = "max_threshold_occupancy",
+ .mode = 0644,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .write = max_threshold_occ_write,
+ .seq_show = max_threshold_occ_show,
+ .fflags = RF_MON_INFO | RFTYPE_RES_CACHE,
+ },
+ {
+ .name = "cpus",
+ .mode = 0644,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .write = rdtgroup_cpus_write,
+ .seq_show = rdtgroup_cpus_show,
+ .fflags = RFTYPE_BASE,
+ },
+ {
+ .name = "cpus_list",
+ .mode = 0644,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .write = rdtgroup_cpus_write,
+ .seq_show = rdtgroup_cpus_show,
+ .flags = RFTYPE_FLAGS_CPUS_LIST,
+ .fflags = RFTYPE_BASE,
+ },
+ {
+ .name = "tasks",
+ .mode = 0644,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .write = rdtgroup_tasks_write,
+ .seq_show = rdtgroup_tasks_show,
+ .fflags = RFTYPE_BASE,
+ },
+ {
+ .name = "schemata",
+ .mode = 0644,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .write = rdtgroup_schemata_write,
+ .seq_show = rdtgroup_schemata_show,
+ .fflags = RF_CTRL_BASE,
},
};
-void rdt_get_mba_infofile(struct rdt_resource *r)
+static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
{
- r->info_files = res_mba_info_files;
- r->nr_info_files = ARRAY_SIZE(res_mba_info_files);
+ struct rftype *rfts, *rft;
+ int ret, len;
+
+ rfts = res_common_files;
+ len = ARRAY_SIZE(res_common_files);
+
+ lockdep_assert_held(&rdtgroup_mutex);
+
+ for (rft = rfts; rft < rfts + len; rft++) {
+ if ((fflags & rft->fflags) == rft->fflags) {
+ ret = rdtgroup_add_file(kn, rft);
+ if (ret)
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
+ while (--rft >= rfts) {
+ if ((fflags & rft->fflags) == rft->fflags)
+ kernfs_remove_by_name(kn, rft->name);
+ }
+ return ret;
}
-void rdt_get_cache_infofile(struct rdt_resource *r)
+static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
+ unsigned long fflags)
{
- r->info_files = res_cache_info_files;
- r->nr_info_files = ARRAY_SIZE(res_cache_info_files);
+ struct kernfs_node *kn_subdir;
+ int ret;
+
+ kn_subdir = kernfs_create_dir(kn_info, name,
+ kn_info->mode, r);
+ if (IS_ERR(kn_subdir))
+ return PTR_ERR(kn_subdir);
+
+ kernfs_get(kn_subdir);
+ ret = rdtgroup_kn_set_ugid(kn_subdir);
+ if (ret)
+ return ret;
+
+ ret = rdtgroup_add_files(kn_subdir, fflags);
+ if (!ret)
+ kernfs_activate(kn_subdir);
+
+ return ret;
}
static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
{
- struct kernfs_node *kn_subdir;
- struct rftype *res_info_files;
struct rdt_resource *r;
- int ret, len;
+ unsigned long fflags;
+ char name[32];
+ int ret;
/* create the directory */
kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
@@ -638,25 +855,19 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
return PTR_ERR(kn_info);
kernfs_get(kn_info);
- for_each_enabled_rdt_resource(r) {
- kn_subdir = kernfs_create_dir(kn_info, r->name,
- kn_info->mode, r);
- if (IS_ERR(kn_subdir)) {
- ret = PTR_ERR(kn_subdir);
- goto out_destroy;
- }
- kernfs_get(kn_subdir);
- ret = rdtgroup_kn_set_ugid(kn_subdir);
+ for_each_alloc_enabled_rdt_resource(r) {
+ fflags = r->fflags | RF_CTRL_INFO;
+ ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags);
if (ret)
goto out_destroy;
+ }
- res_info_files = r->info_files;
- len = r->nr_info_files;
-
- ret = rdtgroup_add_files(kn_subdir, res_info_files, len);
+ for_each_mon_enabled_rdt_resource(r) {
+ fflags = r->fflags | RF_MON_INFO;
+ sprintf(name, "%s_MON", r->name);
+ ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
if (ret)
goto out_destroy;
- kernfs_activate(kn_subdir);
}
/*
@@ -678,6 +889,39 @@ out_destroy:
return ret;
}
+static int
+mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
+ char *name, struct kernfs_node **dest_kn)
+{
+ struct kernfs_node *kn;
+ int ret;
+
+ /* create the directory */
+ kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
+ if (IS_ERR(kn))
+ return PTR_ERR(kn);
+
+ if (dest_kn)
+ *dest_kn = kn;
+
+ /*
+ * This extra ref will be put in kernfs_remove() and guarantees
+ * that @rdtgrp->kn is always accessible.
+ */
+ kernfs_get(kn);
+
+ ret = rdtgroup_kn_set_ugid(kn);
+ if (ret)
+ goto out_destroy;
+
+ kernfs_activate(kn);
+
+ return 0;
+
+out_destroy:
+ kernfs_remove(kn);
+ return ret;
+}
static void l3_qos_cfg_update(void *arg)
{
bool *enable = arg;
@@ -718,14 +962,15 @@ static int cdp_enable(void)
struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
int ret;
- if (!r_l3->capable || !r_l3data->capable || !r_l3code->capable)
+ if (!r_l3->alloc_capable || !r_l3data->alloc_capable ||
+ !r_l3code->alloc_capable)
return -EINVAL;
ret = set_l3_qos_cfg(r_l3, true);
if (!ret) {
- r_l3->enabled = false;
- r_l3data->enabled = true;
- r_l3code->enabled = true;
+ r_l3->alloc_enabled = false;
+ r_l3data->alloc_enabled = true;
+ r_l3code->alloc_enabled = true;
}
return ret;
}
@@ -734,11 +979,11 @@ static void cdp_disable(void)
{
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
- r->enabled = r->capable;
+ r->alloc_enabled = r->alloc_capable;
- if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled) {
- rdt_resources_all[RDT_RESOURCE_L3DATA].enabled = false;
- rdt_resources_all[RDT_RESOURCE_L3CODE].enabled = false;
+ if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) {
+ rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled = false;
+ rdt_resources_all[RDT_RESOURCE_L3CODE].alloc_enabled = false;
set_l3_qos_cfg(r, false);
}
}
@@ -823,10 +1068,16 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
}
}
+static int mkdir_mondata_all(struct kernfs_node *parent_kn,
+ struct rdtgroup *prgrp,
+ struct kernfs_node **mon_data_kn);
+
static struct dentry *rdt_mount(struct file_system_type *fs_type,
int flags, const char *unused_dev_name,
void *data)
{
+ struct rdt_domain *dom;
+ struct rdt_resource *r;
struct dentry *dentry;
int ret;
@@ -853,15 +1104,54 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
goto out_cdp;
}
+ if (rdt_mon_capable) {
+ ret = mongroup_create_dir(rdtgroup_default.kn,
+ NULL, "mon_groups",
+ &kn_mongrp);
+ if (ret) {
+ dentry = ERR_PTR(ret);
+ goto out_info;
+ }
+ kernfs_get(kn_mongrp);
+
+ ret = mkdir_mondata_all(rdtgroup_default.kn,
+ &rdtgroup_default, &kn_mondata);
+ if (ret) {
+ dentry = ERR_PTR(ret);
+ goto out_mongrp;
+ }
+ kernfs_get(kn_mondata);
+ rdtgroup_default.mon.mon_data_kn = kn_mondata;
+ }
+
dentry = kernfs_mount(fs_type, flags, rdt_root,
RDTGROUP_SUPER_MAGIC, NULL);
if (IS_ERR(dentry))
- goto out_destroy;
+ goto out_mondata;
+
+ if (rdt_alloc_capable)
+ static_branch_enable(&rdt_alloc_enable_key);
+ if (rdt_mon_capable)
+ static_branch_enable(&rdt_mon_enable_key);
+
+ if (rdt_alloc_capable || rdt_mon_capable)
+ static_branch_enable(&rdt_enable_key);
+
+ if (is_mbm_enabled()) {
+ r = &rdt_resources_all[RDT_RESOURCE_L3];
+ list_for_each_entry(dom, &r->domains, list)
+ mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
+ }
- static_branch_enable(&rdt_enable_key);
goto out;
-out_destroy:
+out_mondata:
+ if (rdt_mon_capable)
+ kernfs_remove(kn_mondata);
+out_mongrp:
+ if (rdt_mon_capable)
+ kernfs_remove(kn_mongrp);
+out_info:
kernfs_remove(kn_info);
out_cdp:
cdp_disable();
@@ -909,6 +1199,18 @@ static int reset_all_ctrls(struct rdt_resource *r)
return 0;
}
+static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
+{
+ return (rdt_alloc_capable &&
+ (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
+}
+
+static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
+{
+ return (rdt_mon_capable &&
+ (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
+}
+
/*
* Move tasks from one to the other group. If @from is NULL, then all tasks
* in the systems are moved unconditionally (used for teardown).
@@ -924,8 +1226,11 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
read_lock(&tasklist_lock);
for_each_process_thread(p, t) {
- if (!from || t->closid == from->closid) {
+ if (!from || is_closid_match(t, from) ||
+ is_rmid_match(t, from)) {
t->closid = to->closid;
+ t->rmid = to->mon.rmid;
+
#ifdef CONFIG_SMP
/*
* This is safe on x86 w/o barriers as the ordering
@@ -944,6 +1249,19 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
read_unlock(&tasklist_lock);
}
+static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
+{
+ struct rdtgroup *sentry, *stmp;
+ struct list_head *head;
+
+ head = &rdtgrp->mon.crdtgrp_list;
+ list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
+ free_rmid(sentry->mon.rmid);
+ list_del(&sentry->mon.crdtgrp_list);
+ kfree(sentry);
+ }
+}
+
/*
* Forcibly remove all of subdirectories under root.
*/
@@ -955,6 +1273,9 @@ static void rmdir_all_sub(void)
rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
+ /* Free any child rmids */
+ free_all_child_rdtgrp(rdtgrp);
+
/* Remove each rdtgroup other than root */
if (rdtgrp == &rdtgroup_default)
continue;
@@ -967,16 +1288,20 @@ static void rmdir_all_sub(void)
cpumask_or(&rdtgroup_default.cpu_mask,
&rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
+ free_rmid(rdtgrp->mon.rmid);
+
kernfs_remove(rdtgrp->kn);
list_del(&rdtgrp->rdtgroup_list);
kfree(rdtgrp);
}
/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
get_online_cpus();
- rdt_update_closid(cpu_online_mask, &rdtgroup_default.closid);
+ update_closid_rmid(cpu_online_mask, &rdtgroup_default);
put_online_cpus();
kernfs_remove(kn_info);
+ kernfs_remove(kn_mongrp);
+ kernfs_remove(kn_mondata);
}
static void rdt_kill_sb(struct super_block *sb)
@@ -986,10 +1311,12 @@ static void rdt_kill_sb(struct super_block *sb)
mutex_lock(&rdtgroup_mutex);
/*Put everything back to default values. */
- for_each_enabled_rdt_resource(r)
+ for_each_alloc_enabled_rdt_resource(r)
reset_all_ctrls(r);
cdp_disable();
rmdir_all_sub();
+ static_branch_disable(&rdt_alloc_enable_key);
+ static_branch_disable(&rdt_mon_enable_key);
static_branch_disable(&rdt_enable_key);
kernfs_kill_sb(sb);
mutex_unlock(&rdtgroup_mutex);
@@ -1001,46 +1328,223 @@ static struct file_system_type rdt_fs_type = {
.kill_sb = rdt_kill_sb,
};
-static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
- umode_t mode)
+static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
+ void *priv)
{
- struct rdtgroup *parent, *rdtgrp;
struct kernfs_node *kn;
- int ret, closid;
+ int ret = 0;
- /* Only allow mkdir in the root directory */
- if (parent_kn != rdtgroup_default.kn)
- return -EPERM;
+ kn = __kernfs_create_file(parent_kn, name, 0444, 0,
+ &kf_mondata_ops, priv, NULL, NULL);
+ if (IS_ERR(kn))
+ return PTR_ERR(kn);
- /* Do not accept '\n' to avoid unparsable situation. */
- if (strchr(name, '\n'))
- return -EINVAL;
+ ret = rdtgroup_kn_set_ugid(kn);
+ if (ret) {
+ kernfs_remove(kn);
+ return ret;
+ }
- parent = rdtgroup_kn_lock_live(parent_kn);
- if (!parent) {
- ret = -ENODEV;
- goto out_unlock;
+ return ret;
+}
+
+/*
+ * Remove all subdirectories of mon_data of ctrl_mon groups
+ * and monitor groups with given domain id.
+ */
+void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id)
+{
+ struct rdtgroup *prgrp, *crgrp;
+ char name[32];
+
+ if (!r->mon_enabled)
+ return;
+
+ list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
+ sprintf(name, "mon_%s_%02d", r->name, dom_id);
+ kernfs_remove_by_name(prgrp->mon.mon_data_kn, name);
+
+ list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
+ kernfs_remove_by_name(crgrp->mon.mon_data_kn, name);
}
+}
- ret = closid_alloc();
- if (ret < 0)
+static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
+ struct rdt_domain *d,
+ struct rdt_resource *r, struct rdtgroup *prgrp)
+{
+ union mon_data_bits priv;
+ struct kernfs_node *kn;
+ struct mon_evt *mevt;
+ struct rmid_read rr;
+ char name[32];
+ int ret;
+
+ sprintf(name, "mon_%s_%02d", r->name, d->id);
+ /* create the directory */
+ kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
+ if (IS_ERR(kn))
+ return PTR_ERR(kn);
+
+ /*
+ * This extra ref will be put in kernfs_remove() and guarantees
+ * that kn is always accessible.
+ */
+ kernfs_get(kn);
+ ret = rdtgroup_kn_set_ugid(kn);
+ if (ret)
+ goto out_destroy;
+
+ if (WARN_ON(list_empty(&r->evt_list))) {
+ ret = -EPERM;
+ goto out_destroy;
+ }
+
+ priv.u.rid = r->rid;
+ priv.u.domid = d->id;
+ list_for_each_entry(mevt, &r->evt_list, list) {
+ priv.u.evtid = mevt->evtid;
+ ret = mon_addfile(kn, mevt->name, priv.priv);
+ if (ret)
+ goto out_destroy;
+
+ if (is_mbm_event(mevt->evtid))
+ mon_event_read(&rr, d, prgrp, mevt->evtid, true);
+ }
+ kernfs_activate(kn);
+ return 0;
+
+out_destroy:
+ kernfs_remove(kn);
+ return ret;
+}
+
+/*
+ * Add all subdirectories of mon_data for "ctrl_mon" groups
+ * and "monitor" groups with given domain id.
+ */
+void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
+ struct rdt_domain *d)
+{
+ struct kernfs_node *parent_kn;
+ struct rdtgroup *prgrp, *crgrp;
+ struct list_head *head;
+
+ if (!r->mon_enabled)
+ return;
+
+ list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
+ parent_kn = prgrp->mon.mon_data_kn;
+ mkdir_mondata_subdir(parent_kn, d, r, prgrp);
+
+ head = &prgrp->mon.crdtgrp_list;
+ list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
+ parent_kn = crgrp->mon.mon_data_kn;
+ mkdir_mondata_subdir(parent_kn, d, r, crgrp);
+ }
+ }
+}
+
+static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
+ struct rdt_resource *r,
+ struct rdtgroup *prgrp)
+{
+ struct rdt_domain *dom;
+ int ret;
+
+ list_for_each_entry(dom, &r->domains, list) {
+ ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * This creates a directory mon_data which contains the monitored data.
+ *
+ * mon_data has one directory for each domain whic are named
+ * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
+ * with L3 domain looks as below:
+ * ./mon_data:
+ * mon_L3_00
+ * mon_L3_01
+ * mon_L3_02
+ * ...
+ *
+ * Each domain directory has one file per event:
+ * ./mon_L3_00/:
+ * llc_occupancy
+ *
+ */
+static int mkdir_mondata_all(struct kernfs_node *parent_kn,
+ struct rdtgroup *prgrp,
+ struct kernfs_node **dest_kn)
+{
+ struct rdt_resource *r;
+ struct kernfs_node *kn;
+ int ret;
+
+ /*
+ * Create the mon_data directory first.
+ */
+ ret = mongroup_create_dir(parent_kn, NULL, "mon_data", &kn);
+ if (ret)
+ return ret;
+
+ if (dest_kn)
+ *dest_kn = kn;
+
+ /*
+ * Create the subdirectories for each domain. Note that all events
+ * in a domain like L3 are grouped into a resource whose domain is L3
+ */
+ for_each_mon_enabled_rdt_resource(r) {
+ ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
+ if (ret)
+ goto out_destroy;
+ }
+
+ return 0;
+
+out_destroy:
+ kernfs_remove(kn);
+ return ret;
+}
+
+static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
+ struct kernfs_node *prgrp_kn,
+ const char *name, umode_t mode,
+ enum rdt_group_type rtype, struct rdtgroup **r)
+{
+ struct rdtgroup *prdtgrp, *rdtgrp;
+ struct kernfs_node *kn;
+ uint files = 0;
+ int ret;
+
+ prdtgrp = rdtgroup_kn_lock_live(prgrp_kn);
+ if (!prdtgrp) {
+ ret = -ENODEV;
goto out_unlock;
- closid = ret;
+ }
/* allocate the rdtgroup. */
rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
if (!rdtgrp) {
ret = -ENOSPC;
- goto out_closid_free;
+ goto out_unlock;
}
- rdtgrp->closid = closid;
- list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
+ *r = rdtgrp;
+ rdtgrp->mon.parent = prdtgrp;
+ rdtgrp->type = rtype;
+ INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list);
/* kernfs creates the directory for rdtgrp */
- kn = kernfs_create_dir(parent->kn, name, mode, rdtgrp);
+ kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
if (IS_ERR(kn)) {
ret = PTR_ERR(kn);
- goto out_cancel_ref;
+ goto out_free_rgrp;
}
rdtgrp->kn = kn;
@@ -1056,43 +1560,211 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
if (ret)
goto out_destroy;
- ret = rdtgroup_add_files(kn, rdtgroup_base_files,
- ARRAY_SIZE(rdtgroup_base_files));
+ files = RFTYPE_BASE | RFTYPE_CTRL;
+ files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype);
+ ret = rdtgroup_add_files(kn, files);
if (ret)
goto out_destroy;
+ if (rdt_mon_capable) {
+ ret = alloc_rmid();
+ if (ret < 0)
+ goto out_destroy;
+ rdtgrp->mon.rmid = ret;
+
+ ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
+ if (ret)
+ goto out_idfree;
+ }
kernfs_activate(kn);
- ret = 0;
- goto out_unlock;
+ /*
+ * The caller unlocks the prgrp_kn upon success.
+ */
+ return 0;
+out_idfree:
+ free_rmid(rdtgrp->mon.rmid);
out_destroy:
kernfs_remove(rdtgrp->kn);
-out_cancel_ref:
- list_del(&rdtgrp->rdtgroup_list);
+out_free_rgrp:
kfree(rdtgrp);
-out_closid_free:
- closid_free(closid);
out_unlock:
- rdtgroup_kn_unlock(parent_kn);
+ rdtgroup_kn_unlock(prgrp_kn);
return ret;
}
-static int rdtgroup_rmdir(struct kernfs_node *kn)
+static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
+{
+ kernfs_remove(rgrp->kn);
+ free_rmid(rgrp->mon.rmid);
+ kfree(rgrp);
+}
+
+/*
+ * Create a monitor group under "mon_groups" directory of a control
+ * and monitor group(ctrl_mon). This is a resource group
+ * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
+ */
+static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
+ struct kernfs_node *prgrp_kn,
+ const char *name,
+ umode_t mode)
+{
+ struct rdtgroup *rdtgrp, *prgrp;
+ int ret;
+
+ ret = mkdir_rdt_prepare(parent_kn, prgrp_kn, name, mode, RDTMON_GROUP,
+ &rdtgrp);
+ if (ret)
+ return ret;
+
+ prgrp = rdtgrp->mon.parent;
+ rdtgrp->closid = prgrp->closid;
+
+ /*
+ * Add the rdtgrp to the list of rdtgrps the parent
+ * ctrl_mon group has to track.
+ */
+ list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
+
+ rdtgroup_kn_unlock(prgrp_kn);
+ return ret;
+}
+
+/*
+ * These are rdtgroups created under the root directory. Can be used
+ * to allocate and monitor resources.
+ */
+static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
+ struct kernfs_node *prgrp_kn,
+ const char *name, umode_t mode)
{
- int ret, cpu, closid = rdtgroup_default.closid;
struct rdtgroup *rdtgrp;
- cpumask_var_t tmpmask;
+ struct kernfs_node *kn;
+ u32 closid;
+ int ret;
- if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
- return -ENOMEM;
+ ret = mkdir_rdt_prepare(parent_kn, prgrp_kn, name, mode, RDTCTRL_GROUP,
+ &rdtgrp);
+ if (ret)
+ return ret;
- rdtgrp = rdtgroup_kn_lock_live(kn);
- if (!rdtgrp) {
- ret = -EPERM;
- goto out;
+ kn = rdtgrp->kn;
+ ret = closid_alloc();
+ if (ret < 0)
+ goto out_common_fail;
+ closid = ret;
+
+ rdtgrp->closid = closid;
+ list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
+
+ if (rdt_mon_capable) {
+ /*
+ * Create an empty mon_groups directory to hold the subset
+ * of tasks and cpus to monitor.
+ */
+ ret = mongroup_create_dir(kn, NULL, "mon_groups", NULL);
+ if (ret)
+ goto out_id_free;
}
+ goto out_unlock;
+
+out_id_free:
+ closid_free(closid);
+ list_del(&rdtgrp->rdtgroup_list);
+out_common_fail:
+ mkdir_rdt_prepare_clean(rdtgrp);
+out_unlock:
+ rdtgroup_kn_unlock(prgrp_kn);
+ return ret;
+}
+
+/*
+ * We allow creating mon groups only with in a directory called "mon_groups"
+ * which is present in every ctrl_mon group. Check if this is a valid
+ * "mon_groups" directory.
+ *
+ * 1. The directory should be named "mon_groups".
+ * 2. The mon group itself should "not" be named "mon_groups".
+ * This makes sure "mon_groups" directory always has a ctrl_mon group
+ * as parent.
+ */
+static bool is_mon_groups(struct kernfs_node *kn, const char *name)
+{
+ return (!strcmp(kn->name, "mon_groups") &&
+ strcmp(name, "mon_groups"));
+}
+
+static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
+ umode_t mode)
+{
+ /* Do not accept '\n' to avoid unparsable situation. */
+ if (strchr(name, '\n'))
+ return -EINVAL;
+
+ /*
+ * If the parent directory is the root directory and RDT
+ * allocation is supported, add a control and monitoring
+ * subdirectory
+ */
+ if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn)
+ return rdtgroup_mkdir_ctrl_mon(parent_kn, parent_kn, name, mode);
+
+ /*
+ * If RDT monitoring is supported and the parent directory is a valid
+ * "mon_groups" directory, add a monitoring subdirectory.
+ */
+ if (rdt_mon_capable && is_mon_groups(parent_kn, name))
+ return rdtgroup_mkdir_mon(parent_kn, parent_kn->parent, name, mode);
+
+ return -EPERM;
+}
+
+static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
+ cpumask_var_t tmpmask)
+{
+ struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
+ int cpu;
+
+ /* Give any tasks back to the parent group */
+ rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
+
+ /* Update per cpu rmid of the moved CPUs first */
+ for_each_cpu(cpu, &rdtgrp->cpu_mask)
+ per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid;
+ /*
+ * Update the MSR on moved CPUs and CPUs which have moved
+ * task running on them.
+ */
+ cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
+ update_closid_rmid(tmpmask, NULL);
+
+ rdtgrp->flags = RDT_DELETED;
+ free_rmid(rdtgrp->mon.rmid);
+
+ /*
+ * Remove the rdtgrp from the parent ctrl_mon group's list
+ */
+ WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
+ list_del(&rdtgrp->mon.crdtgrp_list);
+
+ /*
+ * one extra hold on this, will drop when we kfree(rdtgrp)
+ * in rdtgroup_kn_unlock()
+ */
+ kernfs_get(kn);
+ kernfs_remove(rdtgrp->kn);
+
+ return 0;
+}
+
+static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
+ cpumask_var_t tmpmask)
+{
+ int cpu;
+
/* Give any tasks back to the default group */
rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
@@ -1100,18 +1772,28 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
cpumask_or(&rdtgroup_default.cpu_mask,
&rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
- /* Update per cpu closid of the moved CPUs first */
- for_each_cpu(cpu, &rdtgrp->cpu_mask)
- per_cpu(cpu_closid, cpu) = closid;
+ /* Update per cpu closid and rmid of the moved CPUs first */
+ for_each_cpu(cpu, &rdtgrp->cpu_mask) {
+ per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid;
+ per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid;
+ }
+
/*
* Update the MSR on moved CPUs and CPUs which have moved
* task running on them.
*/
cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
- rdt_update_closid(tmpmask, NULL);
+ update_closid_rmid(tmpmask, NULL);
rdtgrp->flags = RDT_DELETED;
closid_free(rdtgrp->closid);
+ free_rmid(rdtgrp->mon.rmid);
+
+ /*
+ * Free all the child monitor group rmids.
+ */
+ free_all_child_rdtgrp(rdtgrp);
+
list_del(&rdtgrp->rdtgroup_list);
/*
@@ -1120,7 +1802,41 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
*/
kernfs_get(kn);
kernfs_remove(rdtgrp->kn);
- ret = 0;
+
+ return 0;
+}
+
+static int rdtgroup_rmdir(struct kernfs_node *kn)
+{
+ struct kernfs_node *parent_kn = kn->parent;
+ struct rdtgroup *rdtgrp;
+ cpumask_var_t tmpmask;
+ int ret = 0;
+
+ if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ return -ENOMEM;
+
+ rdtgrp = rdtgroup_kn_lock_live(kn);
+ if (!rdtgrp) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ /*
+ * If the rdtgroup is a ctrl_mon group and parent directory
+ * is the root directory, remove the ctrl_mon group.
+ *
+ * If the rdtgroup is a mon group and parent directory
+ * is a valid "mon_groups" directory, remove the mon group.
+ */
+ if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn)
+ ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
+ else if (rdtgrp->type == RDTMON_GROUP &&
+ is_mon_groups(parent_kn, kn->name))
+ ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask);
+ else
+ ret = -EPERM;
+
out:
rdtgroup_kn_unlock(kn);
free_cpumask_var(tmpmask);
@@ -1129,7 +1845,7 @@ out:
static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
{
- if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled)
+ if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
seq_puts(seq, ",cdp");
return 0;
}
@@ -1153,10 +1869,13 @@ static int __init rdtgroup_setup_root(void)
mutex_lock(&rdtgroup_mutex);
rdtgroup_default.closid = 0;
+ rdtgroup_default.mon.rmid = 0;
+ rdtgroup_default.type = RDTCTRL_GROUP;
+ INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list);
+
list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
- ret = rdtgroup_add_files(rdt_root->kn, rdtgroup_base_files,
- ARRAY_SIZE(rdtgroup_base_files));
+ ret = rdtgroup_add_files(rdt_root->kn, RF_CTRL_BASE);
if (ret) {
kernfs_destroy_root(rdt_root);
goto out;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 6dde0497efc7..3b413065c613 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -51,6 +51,7 @@
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/reboot.h>
+#include <asm/set_memory.h>
#include "mce-internal.h"
@@ -1051,6 +1052,48 @@ static int do_memory_failure(struct mce *m)
return ret;
}
+#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE)
+
+void arch_unmap_kpfn(unsigned long pfn)
+{
+ unsigned long decoy_addr;
+
+ /*
+ * Unmap this page from the kernel 1:1 mappings to make sure
+ * we don't log more errors because of speculative access to
+ * the page.
+ * We would like to just call:
+ * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1);
+ * but doing that would radically increase the odds of a
+ * speculative access to the posion page because we'd have
+ * the virtual address of the kernel 1:1 mapping sitting
+ * around in registers.
+ * Instead we get tricky. We create a non-canonical address
+ * that looks just like the one we want, but has bit 63 flipped.
+ * This relies on set_memory_np() not checking whether we passed
+ * a legal address.
+ */
+
+/*
+ * Build time check to see if we have a spare virtual bit. Don't want
+ * to leave this until run time because most developers don't have a
+ * system that can exercise this code path. This will only become a
+ * problem if/when we move beyond 5-level page tables.
+ *
+ * Hard code "9" here because cpp doesn't grok ilog2(PTRS_PER_PGD)
+ */
+#if PGDIR_SHIFT + 9 < 63
+ decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
+#else
+#error "no unused virtual bit available"
+#endif
+
+ if (set_memory_np(decoy_addr, 1))
+ pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
+
+}
+#endif
+
/*
* The actual machine check handler. This only handles real
* exceptions when something got corrupted coming in through int 18.
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 9e314bcf67cc..40e28ed77fbf 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -201,8 +201,8 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
wrmsr(smca_config, low, high);
}
- /* Collect bank_info using CPU 0 for now. */
- if (cpu)
+ /* Return early if this bank was already initialized. */
+ if (smca_banks[bank].hwid)
return;
if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
@@ -216,11 +216,6 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) {
s_hwid = &smca_hwid_mcatypes[i];
if (hwid_mcatype == s_hwid->hwid_mcatype) {
-
- WARN(smca_banks[bank].hwid,
- "Bank %s already initialized!\n",
- smca_get_name(s_hwid->bank_type));
-
smca_banks[bank].hwid = s_hwid;
smca_banks[bank].id = low;
smca_banks[bank].sysfs_id = s_hwid->count++;
@@ -776,24 +771,12 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
mce_log(&m);
}
-static inline void __smp_deferred_error_interrupt(void)
-{
- inc_irq_stat(irq_deferred_error_count);
- deferred_error_int_vector();
-}
-
asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
{
entering_irq();
- __smp_deferred_error_interrupt();
- exiting_ack_irq();
-}
-
-asmlinkage __visible void __irq_entry smp_trace_deferred_error_interrupt(void)
-{
- entering_irq();
trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
- __smp_deferred_error_interrupt();
+ inc_irq_stat(irq_deferred_error_count);
+ deferred_error_int_vector();
trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
exiting_ack_irq();
}
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index d7cc190ae457..2da67b70ba98 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -122,7 +122,7 @@ static struct attribute *thermal_throttle_attrs[] = {
NULL
};
-static struct attribute_group thermal_attr_group = {
+static const struct attribute_group thermal_attr_group = {
.attrs = thermal_throttle_attrs,
.name = "thermal_throttle"
};
@@ -390,26 +390,12 @@ static void unexpected_thermal_interrupt(void)
static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
-static inline void __smp_thermal_interrupt(void)
-{
- inc_irq_stat(irq_thermal_count);
- smp_thermal_vector();
-}
-
-asmlinkage __visible void __irq_entry
-smp_thermal_interrupt(struct pt_regs *regs)
-{
- entering_irq();
- __smp_thermal_interrupt();
- exiting_ack_irq();
-}
-
-asmlinkage __visible void __irq_entry
-smp_trace_thermal_interrupt(struct pt_regs *regs)
+asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r)
{
entering_irq();
trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
- __smp_thermal_interrupt();
+ inc_irq_stat(irq_thermal_count);
+ smp_thermal_vector();
trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
exiting_ack_irq();
}
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
index bb0e75eed10a..5e7249e42f8f 100644
--- a/arch/x86/kernel/cpu/mcheck/threshold.c
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
@@ -17,24 +17,12 @@ static void default_threshold_interrupt(void)
void (*mce_threshold_vector)(void) = default_threshold_interrupt;
-static inline void __smp_threshold_interrupt(void)
-{
- inc_irq_stat(irq_threshold_count);
- mce_threshold_vector();
-}
-
asmlinkage __visible void __irq_entry smp_threshold_interrupt(void)
{
entering_irq();
- __smp_threshold_interrupt();
- exiting_ack_irq();
-}
-
-asmlinkage __visible void __irq_entry smp_trace_threshold_interrupt(void)
-{
- entering_irq();
trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
- __smp_threshold_interrupt();
+ inc_irq_stat(irq_threshold_count);
+ mce_threshold_vector();
trace_threshold_apic_exit(THRESHOLD_APIC_VECTOR);
exiting_ack_irq();
}
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 21b185793c80..c6daec4bdba5 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -400,9 +400,12 @@ static void update_cache(struct ucode_patch *new_patch)
list_for_each_entry(p, &microcode_cache, plist) {
if (p->equiv_cpu == new_patch->equiv_cpu) {
- if (p->patch_id >= new_patch->patch_id)
+ if (p->patch_id >= new_patch->patch_id) {
/* we already have the latest patch */
+ kfree(new_patch->data);
+ kfree(new_patch);
return;
+ }
list_replace(&p->plist, &new_patch->plist);
kfree(p->data);
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 9cb98ee103db..86e8f0b2537b 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -561,7 +561,7 @@ static struct attribute *mc_default_attrs[] = {
NULL
};
-static struct attribute_group mc_attr_group = {
+static const struct attribute_group mc_attr_group = {
.attrs = mc_default_attrs,
.name = "microcode",
};
@@ -707,7 +707,7 @@ static struct attribute *cpu_root_microcode_attrs[] = {
NULL
};
-static struct attribute_group cpu_root_microcode_group = {
+static const struct attribute_group cpu_root_microcode_group = {
.name = "microcode",
.attrs = cpu_root_microcode_attrs,
};
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 59edbe9d4ccb..8f7a9bbad514 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -146,18 +146,18 @@ static bool microcode_matches(struct microcode_header_intel *mc_header,
return false;
}
-static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
+static struct ucode_patch *memdup_patch(void *data, unsigned int size)
{
struct ucode_patch *p;
p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
if (!p)
- return ERR_PTR(-ENOMEM);
+ return NULL;
p->data = kmemdup(data, size, GFP_KERNEL);
if (!p->data) {
kfree(p);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
return p;
@@ -183,8 +183,8 @@ static void save_microcode_patch(void *data, unsigned int size)
if (mc_hdr->rev <= mc_saved_hdr->rev)
continue;
- p = __alloc_microcode_buf(data, size);
- if (IS_ERR(p))
+ p = memdup_patch(data, size);
+ if (!p)
pr_err("Error allocating buffer %p\n", data);
else
list_replace(&iter->plist, &p->plist);
@@ -196,24 +196,25 @@ static void save_microcode_patch(void *data, unsigned int size)
* newly found.
*/
if (!prev_found) {
- p = __alloc_microcode_buf(data, size);
- if (IS_ERR(p))
+ p = memdup_patch(data, size);
+ if (!p)
pr_err("Error allocating buffer for %p\n", data);
else
list_add_tail(&p->plist, &microcode_cache);
}
+ if (!p)
+ return;
+
/*
* Save for early loading. On 32-bit, that needs to be a physical
* address as the APs are running from physical addresses, before
* paging has been enabled.
*/
- if (p) {
- if (IS_ENABLED(CONFIG_X86_32))
- intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
- else
- intel_ucode_patch = p->data;
- }
+ if (IS_ENABLED(CONFIG_X86_32))
+ intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
+ else
+ intel_ucode_patch = p->data;
}
static int microcode_sanity_check(void *mc, int print_err)
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 70e717fccdd6..3b3f713e15e5 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -59,13 +59,8 @@ void hyperv_vector_handler(struct pt_regs *regs)
void hv_setup_vmbus_irq(void (*handler)(void))
{
vmbus_handler = handler;
- /*
- * Setup the IDT for hypervisor callback. Prevent reallocation
- * at module reload.
- */
- if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
- alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
- hyperv_callback_vector);
+ /* Setup the IDT for hypervisor callback */
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
}
void hv_remove_vmbus_irq(void)
@@ -184,9 +179,15 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
- pr_info("HyperV: features 0x%x, hints 0x%x\n",
+ pr_info("Hyper-V: features 0x%x, hints 0x%x\n",
ms_hyperv.features, ms_hyperv.hints);
+ ms_hyperv.max_vp_index = cpuid_eax(HVCPUID_IMPLEMENTATION_LIMITS);
+ ms_hyperv.max_lp_index = cpuid_ebx(HVCPUID_IMPLEMENTATION_LIMITS);
+
+ pr_debug("Hyper-V: max %u virtual processors, %u logical processors\n",
+ ms_hyperv.max_vp_index, ms_hyperv.max_lp_index);
+
/*
* Extract host information.
*/
@@ -219,7 +220,7 @@ static void __init ms_hyperv_init_platform(void)
rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
lapic_timer_frequency = hv_lapic_frequency;
- pr_info("HyperV: LAPIC Timer Frequency: %#x\n",
+ pr_info("Hyper-V: LAPIC Timer Frequency: %#x\n",
lapic_timer_frequency);
}
@@ -249,11 +250,12 @@ static void __init ms_hyperv_init_platform(void)
* Setup the hook to get control post apic initialization.
*/
x86_platform.apic_post_init = hyperv_init;
+ hyperv_setup_mmu_ops();
#endif
}
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
- .name = "Microsoft HyperV",
+ .name = "Microsoft Hyper-V",
.detect = ms_hyperv_platform,
.init_platform = ms_hyperv_init_platform,
};
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index c5bb63be4ba1..40d5a8a75212 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -237,6 +237,18 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ
stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask);
}
+static void set_mtrr_cpuslocked(unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type)
+{
+ struct set_mtrr_data data = { .smp_reg = reg,
+ .smp_base = base,
+ .smp_size = size,
+ .smp_type = type
+ };
+
+ stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask);
+}
+
static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type)
{
@@ -370,7 +382,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
/* Search for an empty MTRR */
i = mtrr_if->get_free_region(base, size, replace);
if (i >= 0) {
- set_mtrr(i, base, size, type);
+ set_mtrr_cpuslocked(i, base, size, type);
if (likely(replace < 0)) {
mtrr_usage_table[i] = 1;
} else {
@@ -378,7 +390,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
if (increment)
mtrr_usage_table[i]++;
if (unlikely(replace != i)) {
- set_mtrr(replace, 0, 0, 0);
+ set_mtrr_cpuslocked(replace, 0, 0, 0);
mtrr_usage_table[replace] = 0;
}
}
@@ -506,7 +518,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
goto out;
}
if (--mtrr_usage_table[reg] < 1)
- set_mtrr(reg, 0, 0, 0);
+ set_mtrr_cpuslocked(reg, 0, 0, 0);
error = reg;
out:
mutex_unlock(&mtrr_mutex);
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 23c23508c012..05459ad3db46 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -31,6 +31,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
+ { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
{ 0, 0, 0, 0, 0 }
};
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 3fe45f84ced4..cbf1f6ba39a8 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -235,8 +235,7 @@ static void __init dtb_add_ioapic(struct device_node *dn)
ret = of_address_to_resource(dn, 0, &r);
if (ret) {
- printk(KERN_ERR "Can't obtain address from node %s.\n",
- dn->full_name);
+ printk(KERN_ERR "Can't obtain address from device node %pOF.\n", dn);
return;
}
mp_register_ioapic(++ioapic_id, r.start, gsi_top, &cfg);
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index dbce3cca94cb..f13b4c00a5de 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -94,6 +94,9 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (stack_name)
printk("%s <%s>\n", log_lvl, stack_name);
+ if (regs && on_stack(&stack_info, regs, sizeof(*regs)))
+ __show_regs(regs, 0);
+
/*
* Scan the stack, printing any text addresses we find. At the
* same time, follow proper stack frames with the unwinder.
@@ -118,10 +121,8 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
* Don't print regs->ip again if it was already printed
* by __show_regs() below.
*/
- if (regs && stack == &regs->ip) {
- unwind_next_frame(&state);
- continue;
- }
+ if (regs && stack == &regs->ip)
+ goto next;
if (stack == ret_addr_p)
reliable = 1;
@@ -144,6 +145,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (!reliable)
continue;
+next:
/*
* Get the next frame from the unwinder. No need to
* check for an error: if anything goes wrong, the rest
@@ -153,7 +155,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
/* if the frame has entry regs, print them */
regs = unwind_get_entry_regs(&state);
- if (regs)
+ if (regs && on_stack(&stack_info, regs, sizeof(*regs)))
__show_regs(regs, 0);
}
@@ -265,7 +267,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
#ifdef CONFIG_X86_32
if (user_mode(regs)) {
sp = regs->sp;
- ss = regs->ss & 0xffff;
+ ss = regs->ss;
} else {
sp = kernel_stack_pointer(regs);
savesegment(ss, ss);
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index e5f0b40e66d2..4f0481474903 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -37,7 +37,7 @@ static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
* This is a software stack, so 'end' can be a valid stack pointer.
* It just means the stack is empty.
*/
- if (stack < begin || stack > end)
+ if (stack <= begin || stack > end)
return false;
info->type = STACK_TYPE_IRQ;
@@ -62,7 +62,7 @@ static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
* This is a software stack, so 'end' can be a valid stack pointer.
* It just means the stack is empty.
*/
- if (stack < begin || stack > end)
+ if (stack <= begin || stack > end)
return false;
info->type = STACK_TYPE_SOFTIRQ;
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 3e1471d57487..225af4184f06 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -55,7 +55,7 @@ static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
begin = end - (exception_stack_sizes[k] / sizeof(long));
regs = (struct pt_regs *)end - 1;
- if (stack < begin || stack >= end)
+ if (stack <= begin || stack >= end)
continue;
info->type = STACK_TYPE_EXCEPTION + k;
@@ -78,7 +78,7 @@ static bool in_irq_stack(unsigned long *stack, struct stack_info *info)
* This is a software stack, so 'end' can be a valid stack pointer.
* It just means the stack is empty.
*/
- if (stack < begin || stack > end)
+ if (stack <= begin || stack > end)
return false;
info->type = STACK_TYPE_IRQ;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 532da61d605c..71c11ad5643e 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -96,7 +96,8 @@ EXPORT_SYMBOL_GPL(e820__mapped_any);
* Note: this function only works correctly once the E820 table is sorted and
* not-overlapping (at least for the range specified), which is the case normally.
*/
-bool __init e820__mapped_all(u64 start, u64 end, enum e820_type type)
+static struct e820_entry *__e820__mapped_all(u64 start, u64 end,
+ enum e820_type type)
{
int i;
@@ -122,9 +123,28 @@ bool __init e820__mapped_all(u64 start, u64 end, enum e820_type type)
* coverage of the desired range exists:
*/
if (start >= end)
- return 1;
+ return entry;
}
- return 0;
+
+ return NULL;
+}
+
+/*
+ * This function checks if the entire range <start,end> is mapped with type.
+ */
+bool __init e820__mapped_all(u64 start, u64 end, enum e820_type type)
+{
+ return __e820__mapped_all(start, end, type);
+}
+
+/*
+ * This function returns the type associated with the range <start,end>.
+ */
+int e820__get_entry_type(u64 start, u64 end)
+{
+ struct e820_entry *entry = __e820__mapped_all(start, end, 0);
+
+ return entry ? entry->type : -EINVAL;
}
/*
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index d907c3d8633f..927abeaf63e2 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -12,10 +12,10 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/delay.h>
-#include <linux/dmi.h>
#include <linux/pci_ids.h>
#include <linux/bcma/bcma.h>
#include <linux/bcma/bcma_regs.h>
+#include <linux/platform_data/x86/apple.h>
#include <drm/i915_drm.h>
#include <asm/pci-direct.h>
#include <asm/dma.h>
@@ -527,6 +527,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_BXT_IDS(&gen9_early_ops),
INTEL_KBL_IDS(&gen9_early_ops),
INTEL_GLK_IDS(&gen9_early_ops),
+ INTEL_CNL_IDS(&gen9_early_ops),
};
static void __init
@@ -593,7 +594,7 @@ static void __init apple_airport_reset(int bus, int slot, int func)
u64 addr;
int i;
- if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc."))
+ if (!x86_apple_machine)
return;
/* Card may have been put into PCI_D3hot by grub quirk */
diff --git a/arch/x86/kernel/eisa.c b/arch/x86/kernel/eisa.c
new file mode 100644
index 000000000000..f260e452e4f8
--- /dev/null
+++ b/arch/x86/kernel/eisa.c
@@ -0,0 +1,19 @@
+/*
+ * EISA specific code
+ *
+ * This file is licensed under the GPL V2
+ */
+#include <linux/ioport.h>
+#include <linux/eisa.h>
+#include <linux/io.h>
+
+static __init int eisa_bus_probe(void)
+{
+ void __iomem *p = ioremap(0x0FFFD9, 4);
+
+ if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
+ EISA_bus = 1;
+ iounmap(p);
+ return 0;
+}
+subsys_initcall(eisa_bus_probe);
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 6b91e2eb8d3f..9c4e7ba6870c 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -195,7 +195,7 @@ void init_espfix_ap(int cpu)
pte_p = pte_offset_kernel(&pmd, addr);
stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
- pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
+ pte = __pte(__pa(stack_page) | ((__PAGE_KERNEL_RO | _PAGE_ENC) & ptemask));
for (n = 0; n < ESPFIX_PTE_CLONES; n++)
set_pte(&pte_p[n*PTE_STRIDE], pte);
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 538ec012b371..cf2ce063f65a 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/memblock.h>
+#include <asm/desc.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/e820/api.h>
@@ -30,6 +31,9 @@ static void __init i386_default_early_setup(void)
asmlinkage __visible void __init i386_start_kernel(void)
{
cr4_init_shadow();
+
+ idt_setup_early_handler();
+
sanitize_boot_params(&boot_params);
x86_early_init_platform_quirks();
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 46c3c73e7f43..bab4fa579450 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -14,6 +14,7 @@
#include <linux/start_kernel.h>
#include <linux/io.h>
#include <linux/memblock.h>
+#include <linux/mem_encrypt.h>
#include <asm/processor.h>
#include <asm/proto.h>
@@ -33,7 +34,6 @@
/*
* Manage page tables very early on.
*/
-extern pgd_t early_top_pgt[PTRS_PER_PGD];
extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
static unsigned int __initdata next_early_pgt;
pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
@@ -45,14 +45,17 @@ static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
return ptr - (void *)_text + (void *)physaddr;
}
-void __head __startup_64(unsigned long physaddr)
+unsigned long __head __startup_64(unsigned long physaddr,
+ struct boot_params *bp)
{
unsigned long load_delta, *p;
+ unsigned long pgtable_flags;
pgdval_t *pgd;
p4dval_t *p4d;
pudval_t *pud;
pmdval_t *pmd, pmd_entry;
int i;
+ unsigned int *next_pgt_ptr;
/* Is the address too large? */
if (physaddr >> MAX_PHYSMEM_BITS)
@@ -68,6 +71,12 @@ void __head __startup_64(unsigned long physaddr)
if (load_delta & ~PMD_PAGE_MASK)
for (;;);
+ /* Activate Secure Memory Encryption (SME) if supported and enabled */
+ sme_enable(bp);
+
+ /* Include the SME encryption mask in the fixup value */
+ load_delta += sme_get_me_mask();
+
/* Fixup the physical addresses in the page table */
pgd = fixup_pointer(&early_top_pgt, physaddr);
@@ -92,30 +101,34 @@ void __head __startup_64(unsigned long physaddr)
* it avoids problems around wraparound.
*/
- pud = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
- pmd = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
+ next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
+ pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
+ pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
+
+ pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
- pgd[i + 0] = (pgdval_t)p4d + _KERNPG_TABLE;
- pgd[i + 1] = (pgdval_t)p4d + _KERNPG_TABLE;
+ pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
+ pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D;
- p4d[i + 0] = (pgdval_t)pud + _KERNPG_TABLE;
- p4d[i + 1] = (pgdval_t)pud + _KERNPG_TABLE;
+ p4d[i + 0] = (pgdval_t)pud + pgtable_flags;
+ p4d[i + 1] = (pgdval_t)pud + pgtable_flags;
} else {
i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
- pgd[i + 0] = (pgdval_t)pud + _KERNPG_TABLE;
- pgd[i + 1] = (pgdval_t)pud + _KERNPG_TABLE;
+ pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
+ pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
}
i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD;
- pud[i + 0] = (pudval_t)pmd + _KERNPG_TABLE;
- pud[i + 1] = (pudval_t)pmd + _KERNPG_TABLE;
+ pud[i + 0] = (pudval_t)pmd + pgtable_flags;
+ pud[i + 1] = (pudval_t)pmd + pgtable_flags;
pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
+ pmd_entry += sme_get_me_mask();
pmd_entry += physaddr;
for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
@@ -136,9 +149,30 @@ void __head __startup_64(unsigned long physaddr)
pmd[i] += load_delta;
}
- /* Fixup phys_base */
+ /*
+ * Fixup phys_base - remove the memory encryption mask to obtain
+ * the true physical address.
+ */
p = fixup_pointer(&phys_base, physaddr);
- *p += load_delta;
+ *p += load_delta - sme_get_me_mask();
+
+ /* Encrypt the kernel (if SME is active) */
+ sme_encrypt_kernel();
+
+ /*
+ * Return the SME encryption mask (if SME is active) to be used as a
+ * modifier for the initial pgdir entry programmed into CR3.
+ */
+ return sme_get_me_mask();
+}
+
+unsigned long __startup_secondary_64(void)
+{
+ /*
+ * Return the SME encryption mask (if SME is active) to be used as a
+ * modifier for the initial pgdir entry programmed into CR3.
+ */
+ return sme_get_me_mask();
}
/* Wipe all early page tables except for the kernel symbol map */
@@ -146,17 +180,17 @@ static void __init reset_early_page_tables(void)
{
memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
next_early_pgt = 0;
- write_cr3(__pa_nodebug(early_top_pgt));
+ write_cr3(__sme_pa_nodebug(early_top_pgt));
}
/* Create a new PMD entry */
-int __init early_make_pgtable(unsigned long address)
+int __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
{
unsigned long physaddr = address - __PAGE_OFFSET;
pgdval_t pgd, *pgd_p;
p4dval_t p4d, *p4d_p;
pudval_t pud, *pud_p;
- pmdval_t pmd, *pmd_p;
+ pmdval_t *pmd_p;
/* Invalid address or early pgt is done ? */
if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
@@ -215,12 +249,21 @@ again:
memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
*pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
}
- pmd = (physaddr & PMD_MASK) + early_pmd_flags;
pmd_p[pmd_index(address)] = pmd;
return 0;
}
+int __init early_make_pgtable(unsigned long address)
+{
+ unsigned long physaddr = address - __PAGE_OFFSET;
+ pmdval_t pmd;
+
+ pmd = (physaddr & PMD_MASK) + early_pmd_flags;
+
+ return __early_make_pgtable(address, pmd);
+}
+
/* Don't add a printk in there. printk relies on the PDA which is not initialized
yet. */
static void __init clear_bss(void)
@@ -243,6 +286,12 @@ static void __init copy_bootdata(char *real_mode_data)
char * command_line;
unsigned long cmd_line_ptr;
+ /*
+ * If SME is active, this will create decrypted mappings of the
+ * boot data in advance of the copy operations.
+ */
+ sme_map_bootdata(real_mode_data);
+
memcpy(&boot_params, real_mode_data, sizeof boot_params);
sanitize_boot_params(&boot_params);
cmd_line_ptr = get_cmd_line_ptr();
@@ -250,12 +299,18 @@ static void __init copy_bootdata(char *real_mode_data)
command_line = __va(cmd_line_ptr);
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
}
+
+ /*
+ * The old boot data is no longer needed and won't be reserved,
+ * freeing up that memory for use by the system. If SME is active,
+ * we need to remove the mappings that were created so that the
+ * memory doesn't remain mapped as decrypted.
+ */
+ sme_unmap_bootdata(real_mode_data);
}
asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
{
- int i;
-
/*
* Build-time sanity checks on the kernel image and module
* area mappings. (these are purely build-time and produce no code)
@@ -279,11 +334,16 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
clear_page(init_top_pgt);
+ /*
+ * SME support may update early_pmd_flags to include the memory
+ * encryption mask, so it needs to be called before anything
+ * that may generate a page fault.
+ */
+ sme_early_init();
+
kasan_early_init();
- for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
- set_intr_gate(i, early_idt_handler_array[i]);
- load_idt((const struct desc_ptr *)&idt_descr);
+ idt_setup_early_handler();
copy_bootdata(__va(real_mode_data));
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 1f85ee8f9439..9ed3074d0d27 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -155,7 +155,6 @@ ENTRY(startup_32)
jmp *%eax
.Lbad_subarch:
-WEAK(lguest_entry)
WEAK(xen_entry)
/* Unknown implementation; there's really
nothing we can do at this point. */
@@ -165,7 +164,6 @@ WEAK(xen_entry)
subarch_entries:
.long .Ldefault_entry /* normal x86/PC */
- .long lguest_entry /* lguest hypervisor */
.long xen_entry /* Xen hypervisor */
.long .Ldefault_entry /* Moorestown MID */
num_subarch_entries = (. - subarch_entries) / 4
@@ -347,7 +345,6 @@ ENTRY(startup_32_smp)
movl %eax,%cr0
lgdt early_gdt_descr
- lidt idt_descr
ljmp $(__KERNEL_CS),$1f
1: movl $(__KERNEL_DS),%eax # reload all the segment registers
movl %eax,%ss # after changing gdt.
@@ -380,37 +377,6 @@ ENDPROC(startup_32_smp)
*/
__INIT
setup_once:
- /*
- * Set up a idt with 256 interrupt gates that push zero if there
- * is no error code and then jump to early_idt_handler_common.
- * It doesn't actually load the idt - that needs to be done on
- * each CPU. Interrupts are enabled elsewhere, when we can be
- * relatively sure everything is ok.
- */
-
- movl $idt_table,%edi
- movl $early_idt_handler_array,%eax
- movl $NUM_EXCEPTION_VECTORS,%ecx
-1:
- movl %eax,(%edi)
- movl %eax,4(%edi)
- /* interrupt gate, dpl=0, present */
- movl $(0x8E000000 + __KERNEL_CS),2(%edi)
- addl $EARLY_IDT_HANDLER_SIZE,%eax
- addl $8,%edi
- loop 1b
-
- movl $256 - NUM_EXCEPTION_VECTORS,%ecx
- movl $ignore_int,%edx
- movl $(__KERNEL_CS << 16),%eax
- movw %dx,%ax /* selector = 0x0010 = cs */
- movw $0x8E00,%dx /* interrupt gate - dpl=0, present */
-2:
- movl %eax,(%edi)
- movl %edx,4(%edi)
- addl $8,%edi
- loop 2b
-
#ifdef CONFIG_CC_STACKPROTECTOR
/*
* Configure the stack canary. The linker can't handle this by
@@ -457,12 +423,9 @@ early_idt_handler_common:
/* The vector number is in pt_regs->gs */
cld
- pushl %fs /* pt_regs->fs */
- movw $0, 2(%esp) /* clear high bits (some CPUs leave garbage) */
- pushl %es /* pt_regs->es */
- movw $0, 2(%esp) /* clear high bits (some CPUs leave garbage) */
- pushl %ds /* pt_regs->ds */
- movw $0, 2(%esp) /* clear high bits (some CPUs leave garbage) */
+ pushl %fs /* pt_regs->fs (__fsh varies by model) */
+ pushl %es /* pt_regs->es (__esh varies by model) */
+ pushl %ds /* pt_regs->ds (__dsh varies by model) */
pushl %eax /* pt_regs->ax */
pushl %ebp /* pt_regs->bp */
pushl %edi /* pt_regs->di */
@@ -479,9 +442,8 @@ early_idt_handler_common:
/* Load the vector number into EDX */
movl PT_GS(%esp), %edx
- /* Load GS into pt_regs->gs and clear high bits */
+ /* Load GS into pt_regs->gs (and maybe clobber __gsh) */
movw %gs, PT_GS(%esp)
- movw $0, PT_GS+2(%esp)
movl %esp, %eax /* args are pt_regs (EAX), trapnr (EDX) */
call early_fixup_exception
@@ -493,18 +455,17 @@ early_idt_handler_common:
popl %edi /* pt_regs->di */
popl %ebp /* pt_regs->bp */
popl %eax /* pt_regs->ax */
- popl %ds /* pt_regs->ds */
- popl %es /* pt_regs->es */
- popl %fs /* pt_regs->fs */
- popl %gs /* pt_regs->gs */
+ popl %ds /* pt_regs->ds (always ignores __dsh) */
+ popl %es /* pt_regs->es (always ignores __esh) */
+ popl %fs /* pt_regs->fs (always ignores __fsh) */
+ popl %gs /* pt_regs->gs (always ignores __gsh) */
decl %ss:early_recursion_flag
addl $4, %esp /* pop pt_regs->orig_ax */
iret
ENDPROC(early_idt_handler_common)
/* This is the default interrupt "handler" :-) */
- ALIGN
-ignore_int:
+ENTRY(early_ignore_irq)
cld
#ifdef CONFIG_PRINTK
pushl %eax
@@ -539,7 +500,8 @@ ignore_int:
hlt_loop:
hlt
jmp hlt_loop
-ENDPROC(ignore_int)
+ENDPROC(early_ignore_irq)
+
__INITDATA
.align 4
GLOBAL(early_recursion_flag)
@@ -628,7 +590,6 @@ int_msg:
.data
.globl boot_gdt_descr
-.globl idt_descr
ALIGN
# early boot GDT descriptor (must use 1:1 address mapping)
@@ -637,11 +598,6 @@ boot_gdt_descr:
.word __BOOT_DS+7
.long boot_gdt - __PAGE_OFFSET
- .word 0 # 32-bit align idt_desc.address
-idt_descr:
- .word IDT_ENTRIES*8-1 # idt contains 256 entries
- .long idt_table
-
# boot GDT descriptor (later on used by CPU#0):
.word 0 # 32 bit align gdt_desc.address
ENTRY(early_gdt_descr)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 6225550883df..513cbb012ecc 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -73,12 +73,19 @@ startup_64:
/* Sanitize CPU configuration */
call verify_cpu
+ /*
+ * Perform pagetable fixups. Additionally, if SME is active, encrypt
+ * the kernel and retrieve the modifier (SME encryption mask if SME
+ * is active) to be added to the initial pgdir entry that will be
+ * programmed into CR3.
+ */
leaq _text(%rip), %rdi
pushq %rsi
call __startup_64
popq %rsi
- movq $(early_top_pgt - __START_KERNEL_map), %rax
+ /* Form the CR3 value being sure to include the CR3 modifier */
+ addq $(early_top_pgt - __START_KERNEL_map), %rax
jmp 1f
ENTRY(secondary_startup_64)
/*
@@ -98,7 +105,16 @@ ENTRY(secondary_startup_64)
/* Sanitize CPU configuration */
call verify_cpu
- movq $(init_top_pgt - __START_KERNEL_map), %rax
+ /*
+ * Retrieve the modifier (SME encryption mask if SME is active) to be
+ * added to the initial pgdir entry that will be programmed into CR3.
+ */
+ pushq %rsi
+ call __startup_secondary_64
+ popq %rsi
+
+ /* Form the CR3 value being sure to include the CR3 modifier */
+ addq $(init_top_pgt - __START_KERNEL_map), %rax
1:
/* Enable PAE mode, PGE and LA57 */
@@ -335,9 +351,9 @@ GLOBAL(name)
NEXT_PAGE(early_top_pgt)
.fill 511,8,0
#ifdef CONFIG_X86_5LEVEL
- .quad level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+ .quad level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
#else
- .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
#endif
NEXT_PAGE(early_dynamic_pgts)
@@ -350,15 +366,15 @@ NEXT_PAGE(init_top_pgt)
.fill 512,8,0
#else
NEXT_PAGE(init_top_pgt)
- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.org init_top_pgt + PGD_PAGE_OFFSET*8, 0
- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.org init_top_pgt + PGD_START_KERNEL*8, 0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
- .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
NEXT_PAGE(level3_ident_pgt)
- .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.fill 511, 8, 0
NEXT_PAGE(level2_ident_pgt)
/* Since I easily can, map the first 1G.
@@ -370,14 +386,14 @@ NEXT_PAGE(level2_ident_pgt)
#ifdef CONFIG_X86_5LEVEL
NEXT_PAGE(level4_kernel_pgt)
.fill 511,8,0
- .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
#endif
NEXT_PAGE(level3_kernel_pgt)
.fill L3_START_KERNEL,8,0
/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
- .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
- .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+ .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
+ .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
NEXT_PAGE(level2_kernel_pgt)
/*
@@ -395,7 +411,7 @@ NEXT_PAGE(level2_kernel_pgt)
NEXT_PAGE(level2_fixmap_pgt)
.fill 506,8,0
- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+ .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
.fill 5,8,0
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 16f82a3aaec7..8ce4212e2b8d 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -345,21 +345,10 @@ static int hpet_shutdown(struct clock_event_device *evt, int timer)
return 0;
}
-static int hpet_resume(struct clock_event_device *evt, int timer)
-{
- if (!timer) {
- hpet_enable_legacy_int();
- } else {
- struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
-
- irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
- irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
- disable_hardirq(hdev->irq);
- irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
- enable_irq(hdev->irq);
- }
+static int hpet_resume(struct clock_event_device *evt)
+{
+ hpet_enable_legacy_int();
hpet_print_config();
-
return 0;
}
@@ -417,7 +406,7 @@ static int hpet_legacy_set_periodic(struct clock_event_device *evt)
static int hpet_legacy_resume(struct clock_event_device *evt)
{
- return hpet_resume(evt, 0);
+ return hpet_resume(evt);
}
static int hpet_legacy_next_event(unsigned long delta,
@@ -510,8 +499,14 @@ static int hpet_msi_set_periodic(struct clock_event_device *evt)
static int hpet_msi_resume(struct clock_event_device *evt)
{
struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
+ struct irq_data *data = irq_get_irq_data(hdev->irq);
+ struct msi_msg msg;
- return hpet_resume(evt, hdev->num);
+ /* Restore the MSI msg and unmask the interrupt */
+ irq_chip_compose_msi_msg(data, &msg);
+ hpet_msi_write(hdev, &msg);
+ hpet_msi_unmask(data);
+ return 0;
}
static int hpet_msi_next_event(unsigned long delta,
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
new file mode 100644
index 000000000000..6107ee1cb8d5
--- /dev/null
+++ b/arch/x86/kernel/idt.c
@@ -0,0 +1,371 @@
+/*
+ * Interrupt descriptor table related code
+ *
+ * This file is licensed under the GPL V2
+ */
+#include <linux/interrupt.h>
+
+#include <asm/traps.h>
+#include <asm/proto.h>
+#include <asm/desc.h>
+
+struct idt_data {
+ unsigned int vector;
+ unsigned int segment;
+ struct idt_bits bits;
+ const void *addr;
+};
+
+#define DPL0 0x0
+#define DPL3 0x3
+
+#define DEFAULT_STACK 0
+
+#define G(_vector, _addr, _ist, _type, _dpl, _segment) \
+ { \
+ .vector = _vector, \
+ .bits.ist = _ist, \
+ .bits.type = _type, \
+ .bits.dpl = _dpl, \
+ .bits.p = 1, \
+ .addr = _addr, \
+ .segment = _segment, \
+ }
+
+/* Interrupt gate */
+#define INTG(_vector, _addr) \
+ G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL0, __KERNEL_CS)
+
+/* System interrupt gate */
+#define SYSG(_vector, _addr) \
+ G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL3, __KERNEL_CS)
+
+/* Interrupt gate with interrupt stack */
+#define ISTG(_vector, _addr, _ist) \
+ G(_vector, _addr, _ist, GATE_INTERRUPT, DPL0, __KERNEL_CS)
+
+/* System interrupt gate with interrupt stack */
+#define SISTG(_vector, _addr, _ist) \
+ G(_vector, _addr, _ist, GATE_INTERRUPT, DPL3, __KERNEL_CS)
+
+/* Task gate */
+#define TSKG(_vector, _gdt) \
+ G(_vector, NULL, DEFAULT_STACK, GATE_TASK, DPL0, _gdt << 3)
+
+/*
+ * Early traps running on the DEFAULT_STACK because the other interrupt
+ * stacks work only after cpu_init().
+ */
+static const __initdata struct idt_data early_idts[] = {
+ INTG(X86_TRAP_DB, debug),
+ SYSG(X86_TRAP_BP, int3),
+#ifdef CONFIG_X86_32
+ INTG(X86_TRAP_PF, page_fault),
+#endif
+};
+
+/*
+ * The default IDT entries which are set up in trap_init() before
+ * cpu_init() is invoked. Interrupt stacks cannot be used at that point and
+ * the traps which use them are reinitialized with IST after cpu_init() has
+ * set up TSS.
+ */
+static const __initdata struct idt_data def_idts[] = {
+ INTG(X86_TRAP_DE, divide_error),
+ INTG(X86_TRAP_NMI, nmi),
+ INTG(X86_TRAP_BR, bounds),
+ INTG(X86_TRAP_UD, invalid_op),
+ INTG(X86_TRAP_NM, device_not_available),
+ INTG(X86_TRAP_OLD_MF, coprocessor_segment_overrun),
+ INTG(X86_TRAP_TS, invalid_TSS),
+ INTG(X86_TRAP_NP, segment_not_present),
+ INTG(X86_TRAP_SS, stack_segment),
+ INTG(X86_TRAP_GP, general_protection),
+ INTG(X86_TRAP_SPURIOUS, spurious_interrupt_bug),
+ INTG(X86_TRAP_MF, coprocessor_error),
+ INTG(X86_TRAP_AC, alignment_check),
+ INTG(X86_TRAP_XF, simd_coprocessor_error),
+
+#ifdef CONFIG_X86_32
+ TSKG(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS),
+#else
+ INTG(X86_TRAP_DF, double_fault),
+#endif
+ INTG(X86_TRAP_DB, debug),
+ INTG(X86_TRAP_NMI, nmi),
+ INTG(X86_TRAP_BP, int3),
+
+#ifdef CONFIG_X86_MCE
+ INTG(X86_TRAP_MC, &machine_check),
+#endif
+
+ SYSG(X86_TRAP_OF, overflow),
+#if defined(CONFIG_IA32_EMULATION)
+ SYSG(IA32_SYSCALL_VECTOR, entry_INT80_compat),
+#elif defined(CONFIG_X86_32)
+ SYSG(IA32_SYSCALL_VECTOR, entry_INT80_32),
+#endif
+};
+
+/*
+ * The APIC and SMP idt entries
+ */
+static const __initdata struct idt_data apic_idts[] = {
+#ifdef CONFIG_SMP
+ INTG(RESCHEDULE_VECTOR, reschedule_interrupt),
+ INTG(CALL_FUNCTION_VECTOR, call_function_interrupt),
+ INTG(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt),
+ INTG(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt),
+ INTG(REBOOT_VECTOR, reboot_interrupt),
+#endif
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+ INTG(THERMAL_APIC_VECTOR, thermal_interrupt),
+#endif
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+ INTG(THRESHOLD_APIC_VECTOR, threshold_interrupt),
+#endif
+
+#ifdef CONFIG_X86_MCE_AMD
+ INTG(DEFERRED_ERROR_VECTOR, deferred_error_interrupt),
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ INTG(LOCAL_TIMER_VECTOR, apic_timer_interrupt),
+ INTG(X86_PLATFORM_IPI_VECTOR, x86_platform_ipi),
+# ifdef CONFIG_HAVE_KVM
+ INTG(POSTED_INTR_VECTOR, kvm_posted_intr_ipi),
+ INTG(POSTED_INTR_WAKEUP_VECTOR, kvm_posted_intr_wakeup_ipi),
+ INTG(POSTED_INTR_NESTED_VECTOR, kvm_posted_intr_nested_ipi),
+# endif
+# ifdef CONFIG_IRQ_WORK
+ INTG(IRQ_WORK_VECTOR, irq_work_interrupt),
+# endif
+ INTG(SPURIOUS_APIC_VECTOR, spurious_interrupt),
+ INTG(ERROR_APIC_VECTOR, error_interrupt),
+#endif
+};
+
+#ifdef CONFIG_X86_64
+/*
+ * Early traps running on the DEFAULT_STACK because the other interrupt
+ * stacks work only after cpu_init().
+ */
+static const __initdata struct idt_data early_pf_idts[] = {
+ INTG(X86_TRAP_PF, page_fault),
+};
+
+/*
+ * Override for the debug_idt. Same as the default, but with interrupt
+ * stack set to DEFAULT_STACK (0). Required for NMI trap handling.
+ */
+static const __initdata struct idt_data dbg_idts[] = {
+ INTG(X86_TRAP_DB, debug),
+ INTG(X86_TRAP_BP, int3),
+};
+#endif
+
+/* Must be page-aligned because the real IDT is used in a fixmap. */
+gate_desc idt_table[IDT_ENTRIES] __page_aligned_bss;
+
+struct desc_ptr idt_descr __ro_after_init = {
+ .size = (IDT_ENTRIES * 2 * sizeof(unsigned long)) - 1,
+ .address = (unsigned long) idt_table,
+};
+
+#ifdef CONFIG_X86_64
+/* No need to be aligned, but done to keep all IDTs defined the same way. */
+gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
+
+/*
+ * The exceptions which use Interrupt stacks. They are setup after
+ * cpu_init() when the TSS has been initialized.
+ */
+static const __initdata struct idt_data ist_idts[] = {
+ ISTG(X86_TRAP_DB, debug, DEBUG_STACK),
+ ISTG(X86_TRAP_NMI, nmi, NMI_STACK),
+ SISTG(X86_TRAP_BP, int3, DEBUG_STACK),
+ ISTG(X86_TRAP_DF, double_fault, DOUBLEFAULT_STACK),
+#ifdef CONFIG_X86_MCE
+ ISTG(X86_TRAP_MC, &machine_check, MCE_STACK),
+#endif
+};
+
+/*
+ * Override for the debug_idt. Same as the default, but with interrupt
+ * stack set to DEFAULT_STACK (0). Required for NMI trap handling.
+ */
+const struct desc_ptr debug_idt_descr = {
+ .size = IDT_ENTRIES * 16 - 1,
+ .address = (unsigned long) debug_idt_table,
+};
+#endif
+
+static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d)
+{
+ unsigned long addr = (unsigned long) d->addr;
+
+ gate->offset_low = (u16) addr;
+ gate->segment = (u16) d->segment;
+ gate->bits = d->bits;
+ gate->offset_middle = (u16) (addr >> 16);
+#ifdef CONFIG_X86_64
+ gate->offset_high = (u32) (addr >> 32);
+ gate->reserved = 0;
+#endif
+}
+
+static void
+idt_setup_from_table(gate_desc *idt, const struct idt_data *t, int size, bool sys)
+{
+ gate_desc desc;
+
+ for (; size > 0; t++, size--) {
+ idt_init_desc(&desc, t);
+ write_idt_entry(idt, t->vector, &desc);
+ if (sys)
+ set_bit(t->vector, used_vectors);
+ }
+}
+
+static void set_intr_gate(unsigned int n, const void *addr)
+{
+ struct idt_data data;
+
+ BUG_ON(n > 0xFF);
+
+ memset(&data, 0, sizeof(data));
+ data.vector = n;
+ data.addr = addr;
+ data.segment = __KERNEL_CS;
+ data.bits.type = GATE_INTERRUPT;
+ data.bits.p = 1;
+
+ idt_setup_from_table(idt_table, &data, 1, false);
+}
+
+/**
+ * idt_setup_early_traps - Initialize the idt table with early traps
+ *
+ * On X8664 these traps do not use interrupt stacks as they can't work
+ * before cpu_init() is invoked and sets up TSS. The IST variants are
+ * installed after that.
+ */
+void __init idt_setup_early_traps(void)
+{
+ idt_setup_from_table(idt_table, early_idts, ARRAY_SIZE(early_idts),
+ true);
+ load_idt(&idt_descr);
+}
+
+/**
+ * idt_setup_traps - Initialize the idt table with default traps
+ */
+void __init idt_setup_traps(void)
+{
+ idt_setup_from_table(idt_table, def_idts, ARRAY_SIZE(def_idts), true);
+}
+
+#ifdef CONFIG_X86_64
+/**
+ * idt_setup_early_pf - Initialize the idt table with early pagefault handler
+ *
+ * On X8664 this does not use interrupt stacks as they can't work before
+ * cpu_init() is invoked and sets up TSS. The IST variant is installed
+ * after that.
+ *
+ * FIXME: Why is 32bit and 64bit installing the PF handler at different
+ * places in the early setup code?
+ */
+void __init idt_setup_early_pf(void)
+{
+ idt_setup_from_table(idt_table, early_pf_idts,
+ ARRAY_SIZE(early_pf_idts), true);
+}
+
+/**
+ * idt_setup_ist_traps - Initialize the idt table with traps using IST
+ */
+void __init idt_setup_ist_traps(void)
+{
+ idt_setup_from_table(idt_table, ist_idts, ARRAY_SIZE(ist_idts), true);
+}
+
+/**
+ * idt_setup_debugidt_traps - Initialize the debug idt table with debug traps
+ */
+void __init idt_setup_debugidt_traps(void)
+{
+ memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
+
+ idt_setup_from_table(debug_idt_table, dbg_idts, ARRAY_SIZE(dbg_idts), false);
+}
+#endif
+
+/**
+ * idt_setup_apic_and_irq_gates - Setup APIC/SMP and normal interrupt gates
+ */
+void __init idt_setup_apic_and_irq_gates(void)
+{
+ int i = FIRST_EXTERNAL_VECTOR;
+ void *entry;
+
+ idt_setup_from_table(idt_table, apic_idts, ARRAY_SIZE(apic_idts), true);
+
+ for_each_clear_bit_from(i, used_vectors, FIRST_SYSTEM_VECTOR) {
+ entry = irq_entries_start + 8 * (i - FIRST_EXTERNAL_VECTOR);
+ set_intr_gate(i, entry);
+ }
+
+ for_each_clear_bit_from(i, used_vectors, NR_VECTORS) {
+#ifdef CONFIG_X86_LOCAL_APIC
+ set_bit(i, used_vectors);
+ set_intr_gate(i, spurious_interrupt);
+#else
+ entry = irq_entries_start + 8 * (i - FIRST_EXTERNAL_VECTOR);
+ set_intr_gate(i, entry);
+#endif
+ }
+}
+
+/**
+ * idt_setup_early_handler - Initializes the idt table with early handlers
+ */
+void __init idt_setup_early_handler(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
+ set_intr_gate(i, early_idt_handler_array[i]);
+#ifdef CONFIG_X86_32
+ for ( ; i < NR_VECTORS; i++)
+ set_intr_gate(i, early_ignore_irq);
+#endif
+ load_idt(&idt_descr);
+}
+
+/**
+ * idt_invalidate - Invalidate interrupt descriptor table
+ * @addr: The virtual address of the 'invalid' IDT
+ */
+void idt_invalidate(void *addr)
+{
+ struct desc_ptr idt = { .address = (unsigned long) addr, .size = 0 };
+
+ load_idt(&idt);
+}
+
+void __init update_intr_gate(unsigned int n, const void *addr)
+{
+ if (WARN_ON_ONCE(!test_bit(n, used_vectors)))
+ return;
+ set_intr_gate(n, addr);
+}
+
+void alloc_intr_gate(unsigned int n, const void *addr)
+{
+ BUG_ON(n < FIRST_SYSTEM_VECTOR);
+ if (!test_and_set_bit(n, used_vectors))
+ set_intr_gate(n, addr);
+}
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 4aa03c5a14c9..52089c043160 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -29,9 +29,6 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
atomic_t irq_err_count;
-/* Function pointer for generic interrupt vector handling */
-void (*x86_platform_ipi_callback)(void) = NULL;
-
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
@@ -87,13 +84,13 @@ int arch_show_interrupts(struct seq_file *p, int prec)
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
seq_puts(p, " APIC ICR read retries\n");
-#endif
if (x86_platform_ipi_callback) {
seq_printf(p, "%*s: ", prec, "PLT");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
seq_puts(p, " Platform interrupts\n");
}
+#endif
#ifdef CONFIG_SMP
seq_printf(p, "%*s: ", prec, "RES");
for_each_online_cpu(j)
@@ -155,6 +152,12 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
seq_puts(p, " Posted-interrupt notification event\n");
+ seq_printf(p, "%*s: ", prec, "NPI");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ",
+ irq_stats(j)->kvm_posted_intr_nested_ipis);
+ seq_puts(p, " Nested posted-interrupt event\n");
+
seq_printf(p, "%*s: ", prec, "PIW");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
@@ -177,9 +180,9 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
sum += irq_stats(cpu)->apic_perf_irqs;
sum += irq_stats(cpu)->apic_irq_work_irqs;
sum += irq_stats(cpu)->icr_read_retry_count;
-#endif
if (x86_platform_ipi_callback)
sum += irq_stats(cpu)->x86_platform_ipis;
+#endif
#ifdef CONFIG_SMP
sum += irq_stats(cpu)->irq_resched_count;
sum += irq_stats(cpu)->irq_call_count;
@@ -253,26 +256,26 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
return 1;
}
+#ifdef CONFIG_X86_LOCAL_APIC
+/* Function pointer for generic interrupt vector handling */
+void (*x86_platform_ipi_callback)(void) = NULL;
/*
* Handler for X86_PLATFORM_IPI_VECTOR.
*/
-void __smp_x86_platform_ipi(void)
-{
- inc_irq_stat(x86_platform_ipis);
-
- if (x86_platform_ipi_callback)
- x86_platform_ipi_callback();
-}
-
__visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
entering_ack_irq();
- __smp_x86_platform_ipi();
+ trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
+ inc_irq_stat(x86_platform_ipis);
+ if (x86_platform_ipi_callback)
+ x86_platform_ipi_callback();
+ trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
exiting_irq();
set_irq_regs(old_regs);
}
+#endif
#ifdef CONFIG_HAVE_KVM
static void dummy_handler(void) {}
@@ -313,21 +316,21 @@ __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs)
exiting_irq();
set_irq_regs(old_regs);
}
-#endif
-__visible void __irq_entry smp_trace_x86_platform_ipi(struct pt_regs *regs)
+/*
+ * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
+ */
+__visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
entering_ack_irq();
- trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
- __smp_x86_platform_ipi();
- trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
+ inc_irq_stat(kvm_posted_intr_nested_ipis);
exiting_irq();
set_irq_regs(old_regs);
}
+#endif
-EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
#ifdef CONFIG_HOTPLUG_CPU
@@ -412,7 +415,7 @@ int check_irq_vectors_for_cpu_disable(void)
* this w/o holding vector_lock.
*/
for (vector = FIRST_EXTERNAL_VECTOR;
- vector < first_system_vector; vector++) {
+ vector < FIRST_SYSTEM_VECTOR; vector++) {
if (!test_bit(vector, used_vectors) &&
IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) {
if (++count == this_count)
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
index 275487872be2..70dee056f92b 100644
--- a/arch/x86/kernel/irq_work.c
+++ b/arch/x86/kernel/irq_work.c
@@ -11,35 +11,23 @@
#include <asm/trace/irq_vectors.h>
#include <linux/interrupt.h>
-static inline void __smp_irq_work_interrupt(void)
-{
- inc_irq_stat(apic_irq_work_irqs);
- irq_work_run();
-}
-
+#ifdef CONFIG_X86_LOCAL_APIC
__visible void __irq_entry smp_irq_work_interrupt(struct pt_regs *regs)
{
ipi_entering_ack_irq();
- __smp_irq_work_interrupt();
- exiting_irq();
-}
-
-__visible void __irq_entry smp_trace_irq_work_interrupt(struct pt_regs *regs)
-{
- ipi_entering_ack_irq();
trace_irq_work_entry(IRQ_WORK_VECTOR);
- __smp_irq_work_interrupt();
+ inc_irq_stat(apic_irq_work_irqs);
+ irq_work_run();
trace_irq_work_exit(IRQ_WORK_VECTOR);
exiting_irq();
}
void arch_irq_work_raise(void)
{
-#ifdef CONFIG_X86_LOCAL_APIC
if (!arch_irq_work_has_interrupt())
return;
apic->send_IPI_self(IRQ_WORK_VECTOR);
apic_wait_icr_idle();
-#endif
}
+#endif
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 7468c6987547..1add9e08e83e 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -55,18 +55,6 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
[0 ... NR_VECTORS - 1] = VECTOR_UNUSED,
};
-int vector_used_by_percpu_irq(unsigned int vector)
-{
- int cpu;
-
- for_each_online_cpu(cpu) {
- if (!IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
- return 1;
- }
-
- return 0;
-}
-
void __init init_ISA_irqs(void)
{
struct irq_chip *chip = legacy_pic->chip;
@@ -99,98 +87,12 @@ void __init init_IRQ(void)
x86_init.irqs.intr_init();
}
-static void __init smp_intr_init(void)
-{
-#ifdef CONFIG_SMP
- /*
- * The reschedule interrupt is a CPU-to-CPU reschedule-helper
- * IPI, driven by wakeup.
- */
- alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
-
- /* IPI for generic function call */
- alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
-
- /* IPI for generic single function call */
- alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
- call_function_single_interrupt);
-
- /* Low priority IPI to cleanup after moving an irq */
- set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
- set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
-
- /* IPI used for rebooting/stopping */
- alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt);
-#endif /* CONFIG_SMP */
-}
-
-static void __init apic_intr_init(void)
-{
- smp_intr_init();
-
-#ifdef CONFIG_X86_THERMAL_VECTOR
- alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
-#endif
-#ifdef CONFIG_X86_MCE_THRESHOLD
- alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
-#endif
-
-#ifdef CONFIG_X86_MCE_AMD
- alloc_intr_gate(DEFERRED_ERROR_VECTOR, deferred_error_interrupt);
-#endif
-
-#ifdef CONFIG_X86_LOCAL_APIC
- /* self generated IPI for local APIC timer */
- alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
-
- /* IPI for X86 platform specific use */
- alloc_intr_gate(X86_PLATFORM_IPI_VECTOR, x86_platform_ipi);
-#ifdef CONFIG_HAVE_KVM
- /* IPI for KVM to deliver posted interrupt */
- alloc_intr_gate(POSTED_INTR_VECTOR, kvm_posted_intr_ipi);
- /* IPI for KVM to deliver interrupt to wake up tasks */
- alloc_intr_gate(POSTED_INTR_WAKEUP_VECTOR, kvm_posted_intr_wakeup_ipi);
-#endif
-
- /* IPI vectors for APIC spurious and error interrupts */
- alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
- alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
-
- /* IRQ work interrupts: */
-# ifdef CONFIG_IRQ_WORK
- alloc_intr_gate(IRQ_WORK_VECTOR, irq_work_interrupt);
-# endif
-
-#endif
-}
-
void __init native_init_IRQ(void)
{
- int i;
-
/* Execute any quirks before the call gates are initialised: */
x86_init.irqs.pre_vector_init();
- apic_intr_init();
-
- /*
- * Cover the whole vector space, no vector can escape
- * us. (some of these will be overridden and become
- * 'special' SMP interrupts)
- */
- i = FIRST_EXTERNAL_VECTOR;
-#ifndef CONFIG_X86_LOCAL_APIC
-#define first_system_vector NR_VECTORS
-#endif
- for_each_clear_bit_from(i, used_vectors, first_system_vector) {
- /* IA32_SYSCALL_VECTOR could be used in trap_init already. */
- set_intr_gate(i, irq_entries_start +
- 8 * (i - FIRST_EXTERNAL_VECTOR));
- }
-#ifdef CONFIG_X86_LOCAL_APIC
- for_each_clear_bit_from(i, used_vectors, NR_VECTORS)
- set_intr_gate(i, spurious_interrupt);
-#endif
+ idt_setup_apic_and_irq_gates();
if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs())
setup_irq(2, &irq2);
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index 38b64587b31b..fd6f8fbbe6f2 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -33,7 +33,6 @@ static ssize_t setup_data_read(struct file *file, char __user *user_buf,
struct setup_data_node *node = file->private_data;
unsigned long remain;
loff_t pos = *ppos;
- struct page *pg;
void *p;
u64 pa;
@@ -47,18 +46,13 @@ static ssize_t setup_data_read(struct file *file, char __user *user_buf,
count = node->len - pos;
pa = node->paddr + sizeof(struct setup_data) + pos;
- pg = pfn_to_page((pa + count - 1) >> PAGE_SHIFT);
- if (PageHighMem(pg)) {
- p = ioremap_cache(pa, count);
- if (!p)
- return -ENXIO;
- } else
- p = __va(pa);
+ p = memremap(pa, count, MEMREMAP_WB);
+ if (!p)
+ return -ENOMEM;
remain = copy_to_user(user_buf, p, count);
- if (PageHighMem(pg))
- iounmap(p);
+ memunmap(p);
if (remain)
return -EFAULT;
@@ -109,7 +103,6 @@ static int __init create_setup_data_nodes(struct dentry *parent)
struct setup_data *data;
int error;
struct dentry *d;
- struct page *pg;
u64 pa_data;
int no = 0;
@@ -126,16 +119,12 @@ static int __init create_setup_data_nodes(struct dentry *parent)
goto err_dir;
}
- pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT);
- if (PageHighMem(pg)) {
- data = ioremap_cache(pa_data, sizeof(*data));
- if (!data) {
- kfree(node);
- error = -ENXIO;
- goto err_dir;
- }
- } else
- data = __va(pa_data);
+ data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
+ if (!data) {
+ kfree(node);
+ error = -ENOMEM;
+ goto err_dir;
+ }
node->paddr = pa_data;
node->type = data->type;
@@ -143,8 +132,7 @@ static int __init create_setup_data_nodes(struct dentry *parent)
error = create_setup_data_node(d, no, node);
pa_data = data->next;
- if (PageHighMem(pg))
- iounmap(data);
+ memunmap(data);
if (error)
goto err_dir;
no++;
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 6b877807598b..f0153714ddac 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -457,6 +457,8 @@ static int arch_copy_kprobe(struct kprobe *p)
int arch_prepare_kprobe(struct kprobe *p)
{
+ int ret;
+
if (alternatives_text_reserved(p->addr, p->addr))
return -EINVAL;
@@ -467,7 +469,13 @@ int arch_prepare_kprobe(struct kprobe *p)
if (!p->ainsn.insn)
return -ENOMEM;
- return arch_copy_kprobe(p);
+ ret = arch_copy_kprobe(p);
+ if (ret) {
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+ }
+
+ return ret;
}
void arch_arm_kprobe(struct kprobe *p)
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 69ea0bc1cfa3..4f98aad38237 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -39,6 +39,7 @@
#include <asm/insn.h>
#include <asm/debugreg.h>
#include <asm/set_memory.h>
+#include <asm/sections.h>
#include "common.h"
@@ -251,10 +252,12 @@ static int can_optimize(unsigned long paddr)
/*
* Do not optimize in the entry code due to the unstable
- * stack handling.
+ * stack handling and registers setup.
*/
- if ((paddr >= (unsigned long)__entry_text_start) &&
- (paddr < (unsigned long)__entry_text_end))
+ if (((paddr >= (unsigned long)__entry_text_start) &&
+ (paddr < (unsigned long)__entry_text_end)) ||
+ ((paddr >= (unsigned long)__irqentry_text_start) &&
+ (paddr < (unsigned long)__irqentry_text_end)))
return 0;
/* Check there is enough space for a relative jump. */
diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
index 4afc67f5facc..4b0592ca9e47 100644
--- a/arch/x86/kernel/ksysfs.c
+++ b/arch/x86/kernel/ksysfs.c
@@ -16,8 +16,8 @@
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/setup.h>
static ssize_t version_show(struct kobject *kobj,
@@ -55,7 +55,7 @@ static struct bin_attribute *boot_params_data_attrs[] = {
NULL,
};
-static struct attribute_group boot_params_attr_group = {
+static const struct attribute_group boot_params_attr_group = {
.attrs = boot_params_version_attrs,
.bin_attrs = boot_params_data_attrs,
};
@@ -79,12 +79,12 @@ static int get_setup_data_paddr(int nr, u64 *paddr)
*paddr = pa_data;
return 0;
}
- data = ioremap_cache(pa_data, sizeof(*data));
+ data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
if (!data)
return -ENOMEM;
pa_data = data->next;
- iounmap(data);
+ memunmap(data);
i++;
}
return -EINVAL;
@@ -97,17 +97,17 @@ static int __init get_setup_data_size(int nr, size_t *size)
u64 pa_data = boot_params.hdr.setup_data;
while (pa_data) {
- data = ioremap_cache(pa_data, sizeof(*data));
+ data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
if (!data)
return -ENOMEM;
if (nr == i) {
*size = data->len;
- iounmap(data);
+ memunmap(data);
return 0;
}
pa_data = data->next;
- iounmap(data);
+ memunmap(data);
i++;
}
return -EINVAL;
@@ -127,12 +127,12 @@ static ssize_t type_show(struct kobject *kobj,
ret = get_setup_data_paddr(nr, &paddr);
if (ret)
return ret;
- data = ioremap_cache(paddr, sizeof(*data));
+ data = memremap(paddr, sizeof(*data), MEMREMAP_WB);
if (!data)
return -ENOMEM;
ret = sprintf(buf, "0x%x\n", data->type);
- iounmap(data);
+ memunmap(data);
return ret;
}
@@ -154,7 +154,7 @@ static ssize_t setup_data_data_read(struct file *fp,
ret = get_setup_data_paddr(nr, &paddr);
if (ret)
return ret;
- data = ioremap_cache(paddr, sizeof(*data));
+ data = memremap(paddr, sizeof(*data), MEMREMAP_WB);
if (!data)
return -ENOMEM;
@@ -170,15 +170,15 @@ static ssize_t setup_data_data_read(struct file *fp,
goto out;
ret = count;
- p = ioremap_cache(paddr + sizeof(*data), data->len);
+ p = memremap(paddr + sizeof(*data), data->len, MEMREMAP_WB);
if (!p) {
ret = -ENOMEM;
goto out;
}
memcpy(buf, p + off, count);
- iounmap(p);
+ memunmap(p);
out:
- iounmap(data);
+ memunmap(data);
return ret;
}
@@ -202,7 +202,7 @@ static struct bin_attribute *setup_data_data_attrs[] = {
NULL,
};
-static struct attribute_group setup_data_attr_group = {
+static const struct attribute_group setup_data_attr_group = {
.attrs = setup_data_type_attrs,
.bin_attrs = setup_data_data_attrs,
};
@@ -250,13 +250,13 @@ static int __init get_setup_data_total_num(u64 pa_data, int *nr)
*nr = 0;
while (pa_data) {
*nr += 1;
- data = ioremap_cache(pa_data, sizeof(*data));
+ data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
if (!data) {
ret = -ENOMEM;
goto out;
}
pa_data = data->next;
- iounmap(data);
+ memunmap(data);
}
out:
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 71c17a5be983..874827b0d7ca 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token)
if (hlist_unhashed(&n.link))
break;
+ rcu_irq_exit();
+
if (!n.halted) {
local_irq_enable();
schedule();
@@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token)
/*
* We cannot reschedule. So halt.
*/
- rcu_irq_exit();
native_safe_halt();
local_irq_disable();
- rcu_irq_enter();
}
+
+ rcu_irq_enter();
}
if (!n.halted)
finish_swait(&n.wq, &wait);
@@ -261,7 +263,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
switch (kvm_read_and_reset_pf_reason()) {
default:
- trace_do_page_fault(regs, error_code);
+ do_page_fault(regs, error_code);
break;
case KVM_PV_REASON_PAGE_NOT_PRESENT:
/* page is swapped out by the host. */
@@ -453,7 +455,7 @@ static int kvm_cpu_down_prepare(unsigned int cpu)
static void __init kvm_apf_trap_init(void)
{
- set_intr_gate(14, async_page_fault);
+ update_intr_gate(X86_TRAP_PF, async_page_fault);
}
void __init kvm_guest_init(void)
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index a870910c8565..f0e64db18ac8 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -21,6 +21,25 @@
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
+static void refresh_ldt_segments(void)
+{
+#ifdef CONFIG_X86_64
+ unsigned short sel;
+
+ /*
+ * Make sure that the cached DS and ES descriptors match the updated
+ * LDT.
+ */
+ savesegment(ds, sel);
+ if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
+ loadsegment(ds, sel);
+
+ savesegment(es, sel);
+ if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT)
+ loadsegment(es, sel);
+#endif
+}
+
/* context.lock is held for us, so we don't need any locking. */
static void flush_ldt(void *__mm)
{
@@ -32,6 +51,8 @@ static void flush_ldt(void *__mm)
pc = &mm->context;
set_ldt(pc->ldt->entries, pc->ldt->nr_entries);
+
+ refresh_ldt_segments();
}
/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 8c53c5d7a1bc..00bc751c861c 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -26,18 +26,6 @@
#include <asm/set_memory.h>
#include <asm/debugreg.h>
-static void set_idt(void *newidt, __u16 limit)
-{
- struct desc_ptr curidt;
-
- /* ia32 supports unaliged loads & stores */
- curidt.size = limit;
- curidt.address = (unsigned long)newidt;
-
- load_idt(&curidt);
-}
-
-
static void set_gdt(void *newgdt, __u16 limit)
{
struct desc_ptr curgdt;
@@ -245,7 +233,7 @@ void machine_kexec(struct kimage *image)
* If you want to load them you must set up your own idt & gdt.
*/
set_gdt(phys_to_virt(0), 0);
- set_idt(phys_to_virt(0), 0);
+ idt_invalidate(phys_to_virt(0));
/* now call it */
image->start = relocate_kernel_ptr((unsigned long)image->head,
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index cb0a30473c23..1f790cf9d38f 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -87,7 +87,7 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
}
pte = pte_offset_kernel(pmd, vaddr);
- set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
+ set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC_NOENC));
return 0;
err:
free_transition_pgtable(image);
@@ -115,6 +115,7 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
.alloc_pgt_page = alloc_pgt_page,
.context = image,
.page_flag = __PAGE_KERNEL_LARGE_EXEC,
+ .kernpg_flag = _KERNPG_TABLE_NOENC,
};
unsigned long mstart, mend;
pgd_t *level4p;
@@ -334,7 +335,8 @@ void machine_kexec(struct kimage *image)
image->start = relocate_kernel((unsigned long)image->head,
(unsigned long)page_list,
image->start,
- image->preserve_context);
+ image->preserve_context,
+ sme_active());
#ifdef CONFIG_KEXEC_JUMP
if (image->preserve_context)
@@ -602,3 +604,22 @@ void arch_kexec_unprotect_crashkres(void)
{
kexec_mark_crashkres(false);
}
+
+int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp)
+{
+ /*
+ * If SME is active we need to be sure that kexec pages are
+ * not encrypted because when we boot to the new kernel the
+ * pages won't be accessed encrypted (initially).
+ */
+ return set_memory_decrypted((unsigned long)vaddr, pages);
+}
+
+void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages)
+{
+ /*
+ * If SME is active we need to reset the pages back to being
+ * an encrypted mapping before freeing them.
+ */
+ set_memory_encrypted((unsigned long)vaddr, pages);
+}
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index f67bd3205df7..62e7d70aadd5 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -35,6 +35,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
+#include <asm/unwind.h>
#if 0
#define DEBUGP(fmt, ...) \
@@ -213,7 +214,7 @@ int module_finalize(const Elf_Ehdr *hdr,
struct module *me)
{
const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
- *para = NULL;
+ *para = NULL, *orc = NULL, *orc_ip = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
@@ -225,6 +226,10 @@ int module_finalize(const Elf_Ehdr *hdr,
locks = s;
if (!strcmp(".parainstructions", secstrings + s->sh_name))
para = s;
+ if (!strcmp(".orc_unwind", secstrings + s->sh_name))
+ orc = s;
+ if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
+ orc_ip = s;
}
if (alt) {
@@ -248,6 +253,10 @@ int module_finalize(const Elf_Ehdr *hdr,
/* make jump label nops */
jump_label_apply_nops(me);
+ if (orc && orc_ip)
+ unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size,
+ (void *)orc->sh_addr, orc->sh_size);
+
return 0;
}
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 0d904d759ff1..5cbb3177ed17 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -429,16 +429,16 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
}
}
-static struct mpf_intel *mpf_found;
+static unsigned long mpf_base;
static unsigned long __init get_mpc_size(unsigned long physptr)
{
struct mpc_table *mpc;
unsigned long size;
- mpc = early_ioremap(physptr, PAGE_SIZE);
+ mpc = early_memremap(physptr, PAGE_SIZE);
size = mpc->length;
- early_iounmap(mpc, PAGE_SIZE);
+ early_memunmap(mpc, PAGE_SIZE);
apic_printk(APIC_VERBOSE, " mpc: %lx-%lx\n", physptr, physptr + size);
return size;
@@ -450,7 +450,8 @@ static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
unsigned long size;
size = get_mpc_size(mpf->physptr);
- mpc = early_ioremap(mpf->physptr, size);
+ mpc = early_memremap(mpf->physptr, size);
+
/*
* Read the physical hardware table. Anything here will
* override the defaults.
@@ -461,10 +462,10 @@ static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
#endif
pr_err("BIOS bug, MP table errors detected!...\n");
pr_cont("... disabling SMP support. (tell your hw vendor)\n");
- early_iounmap(mpc, size);
+ early_memunmap(mpc, size);
return -1;
}
- early_iounmap(mpc, size);
+ early_memunmap(mpc, size);
if (early)
return -1;
@@ -497,12 +498,12 @@ static int __init check_physptr(struct mpf_intel *mpf, unsigned int early)
*/
void __init default_get_smp_config(unsigned int early)
{
- struct mpf_intel *mpf = mpf_found;
+ struct mpf_intel *mpf;
if (!smp_found_config)
return;
- if (!mpf)
+ if (!mpf_base)
return;
if (acpi_lapic && early)
@@ -515,6 +516,12 @@ void __init default_get_smp_config(unsigned int early)
if (acpi_lapic && acpi_ioapic)
return;
+ mpf = early_memremap(mpf_base, sizeof(*mpf));
+ if (!mpf) {
+ pr_err("MPTABLE: error mapping MP table\n");
+ return;
+ }
+
pr_info("Intel MultiProcessor Specification v1.%d\n",
mpf->specification);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
@@ -529,7 +536,7 @@ void __init default_get_smp_config(unsigned int early)
/*
* Now see if we need to read further.
*/
- if (mpf->feature1 != 0) {
+ if (mpf->feature1) {
if (early) {
/*
* local APIC has default address
@@ -542,8 +549,10 @@ void __init default_get_smp_config(unsigned int early)
construct_default_ISA_mptable(mpf->feature1);
} else if (mpf->physptr) {
- if (check_physptr(mpf, early))
+ if (check_physptr(mpf, early)) {
+ early_memunmap(mpf, sizeof(*mpf));
return;
+ }
} else
BUG();
@@ -552,6 +561,8 @@ void __init default_get_smp_config(unsigned int early)
/*
* Only use the first configuration found.
*/
+
+ early_memunmap(mpf, sizeof(*mpf));
}
static void __init smp_reserve_memory(struct mpf_intel *mpf)
@@ -561,15 +572,16 @@ static void __init smp_reserve_memory(struct mpf_intel *mpf)
static int __init smp_scan_config(unsigned long base, unsigned long length)
{
- unsigned int *bp = phys_to_virt(base);
+ unsigned int *bp;
struct mpf_intel *mpf;
- unsigned long mem;
+ int ret = 0;
apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
base, base + length - 1);
BUILD_BUG_ON(sizeof(*mpf) != 16);
while (length > 0) {
+ bp = early_memremap(base, length);
mpf = (struct mpf_intel *)bp;
if ((*bp == SMP_MAGIC_IDENT) &&
(mpf->length == 1) &&
@@ -579,24 +591,26 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
#ifdef CONFIG_X86_LOCAL_APIC
smp_found_config = 1;
#endif
- mpf_found = mpf;
+ mpf_base = base;
- pr_info("found SMP MP-table at [mem %#010llx-%#010llx] mapped at [%p]\n",
- (unsigned long long) virt_to_phys(mpf),
- (unsigned long long) virt_to_phys(mpf) +
- sizeof(*mpf) - 1, mpf);
+ pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
+ base, base + sizeof(*mpf) - 1, mpf);
- mem = virt_to_phys(mpf);
- memblock_reserve(mem, sizeof(*mpf));
+ memblock_reserve(base, sizeof(*mpf));
if (mpf->physptr)
smp_reserve_memory(mpf);
- return 1;
+ ret = 1;
}
- bp += 4;
+ early_memunmap(bp, length);
+
+ if (ret)
+ break;
+
+ base += 16;
length -= 16;
}
- return 0;
+ return ret;
}
void __init default_find_smp_config(void)
@@ -838,29 +852,40 @@ static int __init update_mp_table(void)
char oem[10];
struct mpf_intel *mpf;
struct mpc_table *mpc, *mpc_new;
+ unsigned long size;
if (!enable_update_mptable)
return 0;
- mpf = mpf_found;
- if (!mpf)
+ if (!mpf_base)
return 0;
+ mpf = early_memremap(mpf_base, sizeof(*mpf));
+ if (!mpf) {
+ pr_err("MPTABLE: mpf early_memremap() failed\n");
+ return 0;
+ }
+
/*
* Now see if we need to go further.
*/
- if (mpf->feature1 != 0)
- return 0;
+ if (mpf->feature1)
+ goto do_unmap_mpf;
if (!mpf->physptr)
- return 0;
+ goto do_unmap_mpf;
- mpc = phys_to_virt(mpf->physptr);
+ size = get_mpc_size(mpf->physptr);
+ mpc = early_memremap(mpf->physptr, size);
+ if (!mpc) {
+ pr_err("MPTABLE: mpc early_memremap() failed\n");
+ goto do_unmap_mpf;
+ }
if (!smp_check_mpc(mpc, oem, str))
- return 0;
+ goto do_unmap_mpc;
- pr_info("mpf: %llx\n", (u64)virt_to_phys(mpf));
+ pr_info("mpf: %llx\n", (u64)mpf_base);
pr_info("physptr: %x\n", mpf->physptr);
if (mpc_new_phys && mpc->length > mpc_new_length) {
@@ -878,21 +903,32 @@ static int __init update_mp_table(void)
new = mpf_checksum((unsigned char *)mpc, mpc->length);
if (old == new) {
pr_info("mpc is readonly, please try alloc_mptable instead\n");
- return 0;
+ goto do_unmap_mpc;
}
pr_info("use in-position replacing\n");
} else {
+ mpc_new = early_memremap(mpc_new_phys, mpc_new_length);
+ if (!mpc_new) {
+ pr_err("MPTABLE: new mpc early_memremap() failed\n");
+ goto do_unmap_mpc;
+ }
mpf->physptr = mpc_new_phys;
- mpc_new = phys_to_virt(mpc_new_phys);
memcpy(mpc_new, mpc, mpc->length);
+ early_memunmap(mpc, size);
mpc = mpc_new;
+ size = mpc_new_length;
/* check if we can modify that */
if (mpc_new_phys - mpf->physptr) {
struct mpf_intel *mpf_new;
/* steal 16 bytes from [0, 1k) */
+ mpf_new = early_memremap(0x400 - 16, sizeof(*mpf_new));
+ if (!mpf_new) {
+ pr_err("MPTABLE: new mpf early_memremap() failed\n");
+ goto do_unmap_mpc;
+ }
pr_info("mpf new: %x\n", 0x400 - 16);
- mpf_new = phys_to_virt(0x400 - 16);
memcpy(mpf_new, mpf, 16);
+ early_memunmap(mpf, sizeof(*mpf));
mpf = mpf_new;
mpf->physptr = mpc_new_phys;
}
@@ -909,6 +945,12 @@ static int __init update_mp_table(void)
*/
replace_intsrc_all(mpc, mpc_new_phys, mpc_new_length);
+do_unmap_mpc:
+ early_memunmap(mpc, size);
+
+do_unmap_mpf:
+ early_memunmap(mpf, sizeof(*mpf));
+
return 0;
}
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 446c8aa09b9b..35aafc95e4b8 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -39,26 +39,26 @@
#include <trace/events/nmi.h>
struct nmi_desc {
- spinlock_t lock;
+ raw_spinlock_t lock;
struct list_head head;
};
static struct nmi_desc nmi_desc[NMI_MAX] =
{
{
- .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
.head = LIST_HEAD_INIT(nmi_desc[0].head),
},
{
- .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
.head = LIST_HEAD_INIT(nmi_desc[1].head),
},
{
- .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock),
.head = LIST_HEAD_INIT(nmi_desc[2].head),
},
{
- .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock),
.head = LIST_HEAD_INIT(nmi_desc[3].head),
},
@@ -163,7 +163,7 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
init_irq_work(&action->irq_work, nmi_max_handler);
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
/*
* Indicate if there are multiple registrations on the
@@ -181,7 +181,7 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
else
list_add_tail_rcu(&action->list, &desc->head);
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
EXPORT_SYMBOL(__register_nmi_handler);
@@ -192,7 +192,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
struct nmiaction *n;
unsigned long flags;
- spin_lock_irqsave(&desc->lock, flags);
+ raw_spin_lock_irqsave(&desc->lock, flags);
list_for_each_entry_rcu(n, &desc->head, list) {
/*
@@ -207,7 +207,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
}
}
- spin_unlock_irqrestore(&desc->lock, flags);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(unregister_nmi_handler);
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index bc0a849589bb..a14df9eecfed 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -319,9 +319,6 @@ __visible struct pv_irq_ops pv_irq_ops = {
.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
.safe_halt = native_safe_halt,
.halt = native_halt,
-#ifdef CONFIG_X86_64
- .adjust_exception_frame = paravirt_nop,
-#endif
};
__visible struct pv_cpu_ops pv_cpu_ops = {
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 5e16d3f29594..0accc2404b92 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -93,9 +93,12 @@ again:
if (gfpflags_allow_blocking(flag)) {
page = dma_alloc_from_contiguous(dev, count, get_order(size),
flag);
- if (page && page_to_phys(page) + size > dma_mask) {
- dma_release_from_contiguous(dev, page, count);
- page = NULL;
+ if (page) {
+ addr = phys_to_dma(dev, page_to_phys(page));
+ if (addr + size > dma_mask) {
+ dma_release_from_contiguous(dev, page, count);
+ page = NULL;
+ }
}
}
/* fallback */
@@ -104,7 +107,7 @@ again:
if (!page)
return NULL;
- addr = page_to_phys(page);
+ addr = phys_to_dma(dev, page_to_phys(page));
if (addr + size > dma_mask) {
__free_pages(page, get_order(size));
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index a6d404087fe3..4fc3cb60ea11 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -32,7 +32,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- dma_addr_t bus = page_to_phys(page) + offset;
+ dma_addr_t bus = phys_to_dma(dev, page_to_phys(page)) + offset;
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
return NOMMU_MAPPING_ERROR;
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 1e23577e17cf..677077510e30 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -6,12 +6,14 @@
#include <linux/swiotlb.h>
#include <linux/bootmem.h>
#include <linux/dma-mapping.h>
+#include <linux/mem_encrypt.h>
#include <asm/iommu.h>
#include <asm/swiotlb.h>
#include <asm/dma.h>
#include <asm/xen/swiotlb-xen.h>
#include <asm/iommu_table.h>
+
int swiotlb __read_mostly;
void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -79,8 +81,8 @@ IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
pci_swiotlb_late_init);
/*
- * if 4GB or more detected (and iommu=off not set) return 1
- * and set swiotlb to 1.
+ * If 4GB or more detected (and iommu=off not set) or if SME is active
+ * then set swiotlb to 1 and return 1.
*/
int __init pci_swiotlb_detect_4gb(void)
{
@@ -89,6 +91,15 @@ int __init pci_swiotlb_detect_4gb(void)
if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
swiotlb = 1;
#endif
+
+ /*
+ * If SME is active then swiotlb will be set to 1 so that bounce
+ * buffers are allocated and used for devices that do not support
+ * the addressing range required for the encryption mask.
+ */
+ if (sme_active())
+ swiotlb = 1;
+
return swiotlb;
}
IOMMU_INIT(pci_swiotlb_detect_4gb,
diff --git a/arch/x86/kernel/platform-quirks.c b/arch/x86/kernel/platform-quirks.c
index 91271122f0df..502a77d0adb0 100644
--- a/arch/x86/kernel/platform-quirks.c
+++ b/arch/x86/kernel/platform-quirks.c
@@ -16,7 +16,6 @@ void __init x86_early_init_platform_quirks(void)
x86_platform.legacy.reserve_bios_regions = 1;
break;
case X86_SUBARCH_XEN:
- case X86_SUBARCH_LGUEST:
x86_platform.legacy.devices.pnpbios = 0;
x86_platform.legacy.rtc = 0;
break;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 3ca198080ea9..bd6b85fac666 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -355,6 +355,7 @@ bool xen_set_default_idle(void)
return ret;
}
#endif
+
void stop_this_cpu(void *dummy)
{
local_irq_disable();
@@ -365,8 +366,20 @@ void stop_this_cpu(void *dummy)
disable_local_APIC();
mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
- for (;;)
- halt();
+ for (;;) {
+ /*
+ * Use wbinvd followed by hlt to stop the processor. This
+ * provides support for kexec on a processor that supports
+ * SME. With kexec, going from SME inactive to SME active
+ * requires clearing cache entries so that addresses without
+ * the encryption bit set don't corrupt the same physical
+ * address that has the encryption bit set when caches are
+ * flushed. To achieve this a wbinvd is performed followed by
+ * a hlt. Even if the processor is not in the kexec/SME
+ * scenario this only adds a wbinvd to a halting processor.
+ */
+ asm volatile("wbinvd; hlt" : : : "memory");
+ }
}
/*
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index c6d6dc5f8bb2..11966251cd42 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -56,7 +56,7 @@
#include <asm/debugreg.h>
#include <asm/switch_to.h>
#include <asm/vm86.h>
-#include <asm/intel_rdt.h>
+#include <asm/intel_rdt_sched.h>
#include <asm/proto.h>
void __show_regs(struct pt_regs *regs, int all)
@@ -68,7 +68,7 @@ void __show_regs(struct pt_regs *regs, int all)
if (user_mode(regs)) {
sp = regs->sp;
- ss = regs->ss & 0xffff;
+ ss = regs->ss;
gs = get_user_gs(regs);
} else {
sp = kernel_stack_pointer(regs);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index c3169be4c596..302e7b2572d1 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -52,7 +52,7 @@
#include <asm/switch_to.h>
#include <asm/xen/hypervisor.h>
#include <asm/vdso.h>
-#include <asm/intel_rdt.h>
+#include <asm/intel_rdt_sched.h>
#include <asm/unistd.h>
#ifdef CONFIG_IA32_EMULATION
/* Not included via unistd.h */
@@ -69,8 +69,7 @@ void __show_regs(struct pt_regs *regs, int all)
unsigned int fsindex, gsindex;
unsigned int ds, cs, es;
- printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs & 0xffff,
- (void *)regs->ip);
+ printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs, (void *)regs->ip);
printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss,
regs->sp, regs->flags);
if (regs->orig_ax != -1)
@@ -149,6 +148,123 @@ void release_thread(struct task_struct *dead_task)
}
}
+enum which_selector {
+ FS,
+ GS
+};
+
+/*
+ * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
+ * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
+ * It's forcibly inlined because it'll generate better code and this function
+ * is hot.
+ */
+static __always_inline void save_base_legacy(struct task_struct *prev_p,
+ unsigned short selector,
+ enum which_selector which)
+{
+ if (likely(selector == 0)) {
+ /*
+ * On Intel (without X86_BUG_NULL_SEG), the segment base could
+ * be the pre-existing saved base or it could be zero. On AMD
+ * (with X86_BUG_NULL_SEG), the segment base could be almost
+ * anything.
+ *
+ * This branch is very hot (it's hit twice on almost every
+ * context switch between 64-bit programs), and avoiding
+ * the RDMSR helps a lot, so we just assume that whatever
+ * value is already saved is correct. This matches historical
+ * Linux behavior, so it won't break existing applications.
+ *
+ * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
+ * report that the base is zero, it needs to actually be zero:
+ * see the corresponding logic in load_seg_legacy.
+ */
+ } else {
+ /*
+ * If the selector is 1, 2, or 3, then the base is zero on
+ * !X86_BUG_NULL_SEG CPUs and could be anything on
+ * X86_BUG_NULL_SEG CPUs. In the latter case, Linux
+ * has never attempted to preserve the base across context
+ * switches.
+ *
+ * If selector > 3, then it refers to a real segment, and
+ * saving the base isn't necessary.
+ */
+ if (which == FS)
+ prev_p->thread.fsbase = 0;
+ else
+ prev_p->thread.gsbase = 0;
+ }
+}
+
+static __always_inline void save_fsgs(struct task_struct *task)
+{
+ savesegment(fs, task->thread.fsindex);
+ savesegment(gs, task->thread.gsindex);
+ save_base_legacy(task, task->thread.fsindex, FS);
+ save_base_legacy(task, task->thread.gsindex, GS);
+}
+
+static __always_inline void loadseg(enum which_selector which,
+ unsigned short sel)
+{
+ if (which == FS)
+ loadsegment(fs, sel);
+ else
+ load_gs_index(sel);
+}
+
+static __always_inline void load_seg_legacy(unsigned short prev_index,
+ unsigned long prev_base,
+ unsigned short next_index,
+ unsigned long next_base,
+ enum which_selector which)
+{
+ if (likely(next_index <= 3)) {
+ /*
+ * The next task is using 64-bit TLS, is not using this
+ * segment at all, or is having fun with arcane CPU features.
+ */
+ if (next_base == 0) {
+ /*
+ * Nasty case: on AMD CPUs, we need to forcibly zero
+ * the base.
+ */
+ if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
+ loadseg(which, __USER_DS);
+ loadseg(which, next_index);
+ } else {
+ /*
+ * We could try to exhaustively detect cases
+ * under which we can skip the segment load,
+ * but there's really only one case that matters
+ * for performance: if both the previous and
+ * next states are fully zeroed, we can skip
+ * the load.
+ *
+ * (This assumes that prev_base == 0 has no
+ * false positives. This is the case on
+ * Intel-style CPUs.)
+ */
+ if (likely(prev_index | next_index | prev_base))
+ loadseg(which, next_index);
+ }
+ } else {
+ if (prev_index != next_index)
+ loadseg(which, next_index);
+ wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
+ next_base);
+ }
+ } else {
+ /*
+ * The next task is using a real segment. Loading the selector
+ * is sufficient.
+ */
+ loadseg(which, next_index);
+ }
+}
+
int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct *p, unsigned long tls)
{
@@ -229,10 +345,19 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
unsigned long new_sp,
unsigned int _cs, unsigned int _ss, unsigned int _ds)
{
+ WARN_ON_ONCE(regs != current_pt_regs());
+
+ if (static_cpu_has(X86_BUG_NULL_SEG)) {
+ /* Loading zero below won't clear the base. */
+ loadsegment(fs, __USER_DS);
+ load_gs_index(__USER_DS);
+ }
+
loadsegment(fs, 0);
loadsegment(es, _ds);
loadsegment(ds, _ds);
load_gs_index(0);
+
regs->ip = new_ip;
regs->sp = new_sp;
regs->cs = _cs;
@@ -277,7 +402,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
- unsigned prev_fsindex, prev_gsindex;
+
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
+ this_cpu_read(irq_count) != -1);
switch_fpu_prepare(prev_fpu, cpu);
@@ -286,8 +413,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*
* (e.g. xen_load_tls())
*/
- savesegment(fs, prev_fsindex);
- savesegment(gs, prev_gsindex);
+ save_fsgs(prev_p);
/*
* Load TLS before restoring any segments so that segment loads
@@ -326,108 +452,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (unlikely(next->ds | prev->ds))
loadsegment(ds, next->ds);
- /*
- * Switch FS and GS.
- *
- * These are even more complicated than DS and ES: they have
- * 64-bit bases are that controlled by arch_prctl. The bases
- * don't necessarily match the selectors, as user code can do
- * any number of things to cause them to be inconsistent.
- *
- * We don't promise to preserve the bases if the selectors are
- * nonzero. We also don't promise to preserve the base if the
- * selector is zero and the base doesn't match whatever was
- * most recently passed to ARCH_SET_FS/GS. (If/when the
- * FSGSBASE instructions are enabled, we'll need to offer
- * stronger guarantees.)
- *
- * As an invariant,
- * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
- * impossible.
- */
- if (next->fsindex) {
- /* Loading a nonzero value into FS sets the index and base. */
- loadsegment(fs, next->fsindex);
- } else {
- if (next->fsbase) {
- /* Next index is zero but next base is nonzero. */
- if (prev_fsindex)
- loadsegment(fs, 0);
- wrmsrl(MSR_FS_BASE, next->fsbase);
- } else {
- /* Next base and index are both zero. */
- if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
- /*
- * We don't know the previous base and can't
- * find out without RDMSR. Forcibly clear it.
- */
- loadsegment(fs, __USER_DS);
- loadsegment(fs, 0);
- } else {
- /*
- * If the previous index is zero and ARCH_SET_FS
- * didn't change the base, then the base is
- * also zero and we don't need to do anything.
- */
- if (prev->fsbase || prev_fsindex)
- loadsegment(fs, 0);
- }
- }
- }
- /*
- * Save the old state and preserve the invariant.
- * NB: if prev_fsindex == 0, then we can't reliably learn the base
- * without RDMSR because Intel user code can zero it without telling
- * us and AMD user code can program any 32-bit value without telling
- * us.
- */
- if (prev_fsindex)
- prev->fsbase = 0;
- prev->fsindex = prev_fsindex;
-
- if (next->gsindex) {
- /* Loading a nonzero value into GS sets the index and base. */
- load_gs_index(next->gsindex);
- } else {
- if (next->gsbase) {
- /* Next index is zero but next base is nonzero. */
- if (prev_gsindex)
- load_gs_index(0);
- wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
- } else {
- /* Next base and index are both zero. */
- if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
- /*
- * We don't know the previous base and can't
- * find out without RDMSR. Forcibly clear it.
- *
- * This contains a pointless SWAPGS pair.
- * Fixing it would involve an explicit check
- * for Xen or a new pvop.
- */
- load_gs_index(__USER_DS);
- load_gs_index(0);
- } else {
- /*
- * If the previous index is zero and ARCH_SET_GS
- * didn't change the base, then the base is
- * also zero and we don't need to do anything.
- */
- if (prev->gsbase || prev_gsindex)
- load_gs_index(0);
- }
- }
- }
- /*
- * Save the old state and preserve the invariant.
- * NB: if prev_gsindex == 0, then we can't reliably learn the base
- * without RDMSR because Intel user code can zero it without telling
- * us and AMD user code can program any 32-bit value without telling
- * us.
- */
- if (prev_gsindex)
- prev->gsbase = 0;
- prev->gsindex = prev_gsindex;
+ load_seg_legacy(prev->fsindex, prev->fsbase,
+ next->fsindex, next->fsbase, FS);
+ load_seg_legacy(prev->gsindex, prev->gsbase,
+ next->gsindex, next->gsbase, GS);
switch_fpu_finish(next_fpu, cpu);
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 0bee04d41bed..eaa591cfd98b 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -1,6 +1,7 @@
/*
* This file contains work-arounds for x86 and x86_64 platform bugs.
*/
+#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/irq.h>
@@ -656,3 +657,12 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
#endif
#endif
+
+bool x86_apple_machine;
+EXPORT_SYMBOL(x86_apple_machine);
+
+void __init early_platform_quirks(void)
+{
+ x86_apple_machine = dmi_match(DMI_SYS_VENDOR, "Apple Inc.") ||
+ dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc.");
+}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 67393fc88353..54984b142641 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -38,8 +38,6 @@
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
-static const struct desc_ptr no_idt = {};
-
/*
* This is set if we need to go through the 'emergency' path.
* When machine_emergency_restart() is called, we may be on
@@ -471,12 +469,12 @@ static int __init reboot_init(void)
/*
* The DMI quirks table takes precedence. If no quirks entry
- * matches and the ACPI Hardware Reduced bit is set, force EFI
- * reboot.
+ * matches and the ACPI Hardware Reduced bit is set and EFI
+ * runtime services are enabled, force EFI reboot.
*/
rv = dmi_check_system(reboot_dmi_table);
- if (!rv && efi_reboot_required())
+ if (!rv && efi_reboot_required() && !efi_runtime_disabled())
reboot_type = BOOT_EFI;
return 0;
@@ -638,7 +636,7 @@ static void native_machine_emergency_restart(void)
break;
case BOOT_TRIPLE:
- load_idt(&no_idt);
+ idt_invalidate(NULL);
__asm__ __volatile__("int3");
/* We're probably dead after this, but... */
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index 98111b38ebfd..307d3bac5f04 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -47,6 +47,7 @@ relocate_kernel:
* %rsi page_list
* %rdx start address
* %rcx preserve_context
+ * %r8 sme_active
*/
/* Save the CPU context, used for jumping back */
@@ -71,6 +72,9 @@ relocate_kernel:
pushq $0
popfq
+ /* Save SME active flag */
+ movq %r8, %r12
+
/*
* get physical address of control page now
* this is impossible after page table switch
@@ -132,6 +136,16 @@ identity_mapped:
/* Flush the TLB (needed?) */
movq %r9, %cr3
+ /*
+ * If SME is active, there could be old encrypted cache line
+ * entries that will conflict with the now unencrypted memory
+ * used by kexec. Flush the caches before copying the kernel.
+ */
+ testq %r12, %r12
+ jz 1f
+ wbinvd
+1:
+
movq %rcx, %r11
call swap_pages
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 3486d0498800..d84afb0a322d 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -69,6 +69,7 @@
#include <linux/crash_dump.h>
#include <linux/tboot.h>
#include <linux/jiffies.h>
+#include <linux/mem_encrypt.h>
#include <linux/usb/xhci-dbgp.h>
#include <video/edid.h>
@@ -115,6 +116,7 @@
#include <asm/microcode.h>
#include <asm/mmu_context.h>
#include <asm/kaslr.h>
+#include <asm/unwind.h>
/*
* max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -374,6 +376,14 @@ static void __init reserve_initrd(void)
!ramdisk_image || !ramdisk_size)
return; /* No initrd provided by bootloader */
+ /*
+ * If SME is active, this memory will be marked encrypted by the
+ * kernel when it is accessed (including relocation). However, the
+ * ramdisk image was loaded decrypted by the bootloader, so make
+ * sure that it is encrypted before accessing it.
+ */
+ sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
+
initrd_start = 0;
mapped_size = memblock_mem_size(max_pfn_mapped);
@@ -890,7 +900,7 @@ void __init setup_arch(char **cmdline_p)
*/
olpc_ofw_detect();
- early_trap_init();
+ idt_setup_early_traps();
early_cpu_init();
early_ioremap_init();
@@ -1161,7 +1171,7 @@ void __init setup_arch(char **cmdline_p)
init_mem_mapping();
- early_trap_pf_init();
+ idt_setup_early_pf();
/*
* Update mmu_cr4_features (and, indirectly, trampoline_cr4_features)
@@ -1206,6 +1216,8 @@ void __init setup_arch(char **cmdline_p)
io_delay_init();
+ early_platform_quirks();
+
/*
* Parse the ACPI tables for possible boot-time SMP configuration.
*/
@@ -1310,6 +1322,8 @@ void __init setup_arch(char **cmdline_p)
if (efi_enabled(EFI_BOOT))
efi_apply_memmap_quirks();
#endif
+
+ unwind_init();
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 10edd1e69a68..6e8fcb6f7e1e 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -155,13 +155,10 @@ static void __init pcpup_populate_pte(unsigned long addr)
static inline void setup_percpu_segment(int cpu)
{
#ifdef CONFIG_X86_32
- struct desc_struct gdt;
+ struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu),
+ 0xFFFFF);
- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
- 0x2 | DESCTYPE_S, 0x8);
- gdt.s = 1;
- write_gdt_entry(get_cpu_gdt_rw(cpu),
- GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
+ write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
#endif
}
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index cc30a74e4adb..e04442345fc0 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -256,7 +256,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
sp = current->sas_ss_sp + current->sas_ss_size;
} else if (IS_ENABLED(CONFIG_X86_32) &&
!onsigstack &&
- (regs->ss & 0xffff) != __USER_DS &&
+ regs->ss != __USER_DS &&
!(ka->sa.sa_flags & SA_RESTORER) &&
ka->sa.sa_restorer) {
/* This is the legacy signal stack switching. */
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index d798c0da451c..5c574dff4c1a 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -254,84 +254,45 @@ finish:
}
/*
- * Reschedule call back.
+ * Reschedule call back. KVM uses this interrupt to force a cpu out of
+ * guest mode
*/
-static inline void __smp_reschedule_interrupt(void)
-{
- inc_irq_stat(irq_resched_count);
- scheduler_ipi();
-}
-
__visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs)
{
ack_APIC_irq();
- __smp_reschedule_interrupt();
- /*
- * KVM uses this interrupt to force a cpu out of guest mode
- */
-}
-
-__visible void __irq_entry smp_trace_reschedule_interrupt(struct pt_regs *regs)
-{
- /*
- * Need to call irq_enter() before calling the trace point.
- * __smp_reschedule_interrupt() calls irq_enter/exit() too (in
- * scheduler_ipi(). This is OK, since those functions are allowed
- * to nest.
- */
- ipi_entering_ack_irq();
- trace_reschedule_entry(RESCHEDULE_VECTOR);
- __smp_reschedule_interrupt();
- trace_reschedule_exit(RESCHEDULE_VECTOR);
- exiting_irq();
- /*
- * KVM uses this interrupt to force a cpu out of guest mode
- */
-}
+ inc_irq_stat(irq_resched_count);
-static inline void __smp_call_function_interrupt(void)
-{
- generic_smp_call_function_interrupt();
- inc_irq_stat(irq_call_count);
+ if (trace_resched_ipi_enabled()) {
+ /*
+ * scheduler_ipi() might call irq_enter() as well, but
+ * nested calls are fine.
+ */
+ irq_enter();
+ trace_reschedule_entry(RESCHEDULE_VECTOR);
+ scheduler_ipi();
+ trace_reschedule_exit(RESCHEDULE_VECTOR);
+ irq_exit();
+ return;
+ }
+ scheduler_ipi();
}
__visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs)
{
ipi_entering_ack_irq();
- __smp_call_function_interrupt();
- exiting_irq();
-}
-
-__visible void __irq_entry
-smp_trace_call_function_interrupt(struct pt_regs *regs)
-{
- ipi_entering_ack_irq();
trace_call_function_entry(CALL_FUNCTION_VECTOR);
- __smp_call_function_interrupt();
- trace_call_function_exit(CALL_FUNCTION_VECTOR);
- exiting_irq();
-}
-
-static inline void __smp_call_function_single_interrupt(void)
-{
- generic_smp_call_function_single_interrupt();
inc_irq_stat(irq_call_count);
-}
-
-__visible void __irq_entry
-smp_call_function_single_interrupt(struct pt_regs *regs)
-{
- ipi_entering_ack_irq();
- __smp_call_function_single_interrupt();
+ generic_smp_call_function_interrupt();
+ trace_call_function_exit(CALL_FUNCTION_VECTOR);
exiting_irq();
}
-__visible void __irq_entry
-smp_trace_call_function_single_interrupt(struct pt_regs *regs)
+__visible void __irq_entry smp_call_function_single_interrupt(struct pt_regs *r)
{
ipi_entering_ack_irq();
trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
- __smp_call_function_single_interrupt();
+ inc_irq_stat(irq_call_count);
+ generic_smp_call_function_single_interrupt();
trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
exiting_irq();
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index b474c8de7fba..54b9e89d4d6b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -971,7 +971,8 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle)
* Returns zero if CPU booted OK, else error code from
* ->wakeup_secondary_cpu.
*/
-static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
+static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
+ int *cpu0_nmi_registered)
{
volatile u32 *trampoline_status =
(volatile u32 *) __va(real_mode_header->trampoline_status);
@@ -979,7 +980,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
unsigned long start_ip = real_mode_header->trampoline_start;
unsigned long boot_error = 0;
- int cpu0_nmi_registered = 0;
unsigned long timeout;
idle->thread.sp = (unsigned long)task_pt_regs(idle);
@@ -1035,7 +1035,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
else
boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
- &cpu0_nmi_registered);
+ cpu0_nmi_registered);
if (!boot_error) {
/*
@@ -1080,12 +1080,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
*/
smpboot_restore_warm_reset_vector();
}
- /*
- * Clean up the nmi handler. Do this after the callin and callout sync
- * to avoid impact of possible long unregister time.
- */
- if (cpu0_nmi_registered)
- unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
return boot_error;
}
@@ -1093,8 +1087,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int apicid = apic->cpu_present_to_apicid(cpu);
+ int cpu0_nmi_registered = 0;
unsigned long flags;
- int err;
+ int err, ret = 0;
WARN_ON(irqs_disabled());
@@ -1131,10 +1126,11 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
common_cpu_up(cpu, tidle);
- err = do_boot_cpu(apicid, cpu, tidle);
+ err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
if (err) {
pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
- return -EIO;
+ ret = -EIO;
+ goto unreg_nmi;
}
/*
@@ -1150,7 +1146,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
touch_nmi_watchdog();
}
- return 0;
+unreg_nmi:
+ /*
+ * Clean up the nmi handler. Do this after the callin and callout sync
+ * to avoid impact of possible long unregister time.
+ */
+ if (cpu0_nmi_registered)
+ unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
+
+ return ret;
}
/**
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 5f25cfbd952e..5ee663836c08 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -13,7 +13,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
unsigned long addr, seg;
addr = regs->ip;
- seg = regs->cs & 0xffff;
+ seg = regs->cs;
if (v8086_mode(regs)) {
addr = (addr & 0xffff) + (seg << 4);
return addr;
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 213ddf3e937d..73e4d28112f8 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -21,6 +21,7 @@
#include <asm/compat.h>
#include <asm/ia32.h>
#include <asm/syscalls.h>
+#include <asm/mpx.h>
/*
* Align a virtual address to avoid aliasing in the I$ on AMD F15h.
@@ -100,8 +101,8 @@ out:
return error;
}
-static void find_start_end(unsigned long flags, unsigned long *begin,
- unsigned long *end)
+static void find_start_end(unsigned long addr, unsigned long flags,
+ unsigned long *begin, unsigned long *end)
{
if (!in_compat_syscall() && (flags & MAP_32BIT)) {
/* This is usually used needed to map code in small
@@ -120,7 +121,10 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
}
*begin = get_mmap_base(1);
- *end = in_compat_syscall() ? tasksize_32bit() : tasksize_64bit();
+ if (in_compat_syscall())
+ *end = task_size_32bit();
+ else
+ *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
}
unsigned long
@@ -132,10 +136,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct vm_unmapped_area_info info;
unsigned long begin, end;
+ addr = mpx_unmapped_area_check(addr, len, flags);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
if (flags & MAP_FIXED)
return addr;
- find_start_end(flags, &begin, &end);
+ find_start_end(addr, flags, &begin, &end);
if (len > end)
return -ENOMEM;
@@ -171,6 +179,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
+ addr = mpx_unmapped_area_check(addr, len, flags);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
/* requested length too big for entire address space */
if (len > TASK_SIZE)
return -ENOMEM;
@@ -195,6 +207,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = get_mmap_base(0);
+
+ /*
+ * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
+ * in the full address space.
+ *
+ * !in_compat_syscall() check to avoid high addresses for x32.
+ */
+ if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall())
+ info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
+
info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
if (filp) {
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index dcd699baea1b..a106b9719c58 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -93,7 +93,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
while (n-- > 0) {
if (LDT_empty(info) || LDT_zero(info)) {
- desc->a = desc->b = 0;
+ memset(desc, 0, sizeof(*desc));
} else {
fill_ldt(desc, info);
diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
index 15515132bf0d..c6636d1f60b9 100644
--- a/arch/x86/kernel/tracepoint.c
+++ b/arch/x86/kernel/tracepoint.c
@@ -4,57 +4,38 @@
* Copyright (C) 2013 Seiji Aguchi <seiji.aguchi@hds.com>
*
*/
-#include <asm/hw_irq.h>
-#include <asm/desc.h>
+#include <linux/jump_label.h>
#include <linux/atomic.h>
-atomic_t trace_idt_ctr = ATOMIC_INIT(0);
-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
- (unsigned long) trace_idt_table };
-
-/* No need to be aligned, but done to keep all IDTs defined the same way. */
-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
+#include <asm/hw_irq.h>
+#include <asm/desc.h>
-static int trace_irq_vector_refcount;
-static DEFINE_MUTEX(irq_vector_mutex);
+DEFINE_STATIC_KEY_FALSE(trace_pagefault_key);
-static void set_trace_idt_ctr(int val)
+int trace_pagefault_reg(void)
{
- atomic_set(&trace_idt_ctr, val);
- /* Ensure the trace_idt_ctr is set before sending IPI */
- wmb();
+ static_branch_inc(&trace_pagefault_key);
+ return 0;
}
-static void switch_idt(void *arg)
+void trace_pagefault_unreg(void)
{
- unsigned long flags;
-
- local_irq_save(flags);
- load_current_idt();
- local_irq_restore(flags);
+ static_branch_dec(&trace_pagefault_key);
}
-int trace_irq_vector_regfunc(void)
+#ifdef CONFIG_SMP
+
+DEFINE_STATIC_KEY_FALSE(trace_resched_ipi_key);
+
+int trace_resched_ipi_reg(void)
{
- mutex_lock(&irq_vector_mutex);
- if (!trace_irq_vector_refcount) {
- set_trace_idt_ctr(1);
- smp_call_function(switch_idt, NULL, 0);
- switch_idt(NULL);
- }
- trace_irq_vector_refcount++;
- mutex_unlock(&irq_vector_mutex);
+ static_branch_inc(&trace_resched_ipi_key);
return 0;
}
-void trace_irq_vector_unregfunc(void)
+void trace_resched_ipi_unreg(void)
{
- mutex_lock(&irq_vector_mutex);
- trace_irq_vector_refcount--;
- if (!trace_irq_vector_refcount) {
- set_trace_idt_ctr(0);
- smp_call_function(switch_idt, NULL, 0);
- switch_idt(NULL);
- }
- mutex_unlock(&irq_vector_mutex);
+ static_branch_dec(&trace_resched_ipi_key);
}
+
+#endif
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index bf54309b85da..34ea3651362e 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -38,11 +38,6 @@
#include <linux/smp.h>
#include <linux/io.h>
-#ifdef CONFIG_EISA
-#include <linux/ioport.h>
-#include <linux/eisa.h>
-#endif
-
#if defined(CONFIG_EDAC)
#include <linux/edac.h>
#endif
@@ -70,20 +65,13 @@
#include <asm/x86_init.h>
#include <asm/pgalloc.h>
#include <asm/proto.h>
-
-/* No need to be aligned, but done to keep all IDTs defined the same way. */
-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
#else
#include <asm/processor-flags.h>
#include <asm/setup.h>
#include <asm/proto.h>
#endif
-/* Must be page-aligned because the real IDT is used in a fixmap. */
-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
-
DECLARE_BITMAP(used_vectors, NR_VECTORS);
-EXPORT_SYMBOL_GPL(used_vectors);
static inline void cond_local_irq_enable(struct pt_regs *regs)
{
@@ -935,87 +923,9 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
}
#endif
-/* Set of traps needed for early debugging. */
-void __init early_trap_init(void)
-{
- /*
- * Don't use IST to set DEBUG_STACK as it doesn't work until TSS
- * is ready in cpu_init() <-- trap_init(). Before trap_init(),
- * CPU runs at ring 0 so it is impossible to hit an invalid
- * stack. Using the original stack works well enough at this
- * early stage. DEBUG_STACK will be equipped after cpu_init() in
- * trap_init().
- *
- * We don't need to set trace_idt_table like set_intr_gate(),
- * since we don't have trace_debug and it will be reset to
- * 'debug' in trap_init() by set_intr_gate_ist().
- */
- set_intr_gate_notrace(X86_TRAP_DB, debug);
- /* int3 can be called from all */
- set_system_intr_gate(X86_TRAP_BP, &int3);
-#ifdef CONFIG_X86_32
- set_intr_gate(X86_TRAP_PF, page_fault);
-#endif
- load_idt(&idt_descr);
-}
-
-void __init early_trap_pf_init(void)
-{
-#ifdef CONFIG_X86_64
- set_intr_gate(X86_TRAP_PF, page_fault);
-#endif
-}
-
void __init trap_init(void)
{
- int i;
-
-#ifdef CONFIG_EISA
- void __iomem *p = early_ioremap(0x0FFFD9, 4);
-
- if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
- EISA_bus = 1;
- early_iounmap(p, 4);
-#endif
-
- set_intr_gate(X86_TRAP_DE, divide_error);
- set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
- /* int4 can be called from all */
- set_system_intr_gate(X86_TRAP_OF, &overflow);
- set_intr_gate(X86_TRAP_BR, bounds);
- set_intr_gate(X86_TRAP_UD, invalid_op);
- set_intr_gate(X86_TRAP_NM, device_not_available);
-#ifdef CONFIG_X86_32
- set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
-#else
- set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
-#endif
- set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
- set_intr_gate(X86_TRAP_TS, invalid_TSS);
- set_intr_gate(X86_TRAP_NP, segment_not_present);
- set_intr_gate(X86_TRAP_SS, stack_segment);
- set_intr_gate(X86_TRAP_GP, general_protection);
- set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
- set_intr_gate(X86_TRAP_MF, coprocessor_error);
- set_intr_gate(X86_TRAP_AC, alignment_check);
-#ifdef CONFIG_X86_MCE
- set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
-#endif
- set_intr_gate(X86_TRAP_XF, simd_coprocessor_error);
-
- /* Reserve all the builtin and the syscall vector: */
- for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
- set_bit(i, used_vectors);
-
-#ifdef CONFIG_IA32_EMULATION
- set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_compat);
- set_bit(IA32_SYSCALL_VECTOR, used_vectors);
-#endif
-
-#ifdef CONFIG_X86_32
- set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_32);
- set_bit(IA32_SYSCALL_VECTOR, used_vectors);
-#endif
+ idt_setup_traps();
/*
* Set the IDT descriptor to a fixed read-only location, so that the
@@ -1030,20 +940,9 @@ void __init trap_init(void)
*/
cpu_init();
- /*
- * X86_TRAP_DB and X86_TRAP_BP have been set
- * in early_trap_init(). However, ITS works only after
- * cpu_init() loads TSS. See comments in early_trap_init().
- */
- set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
- /* int3 can be called from all */
- set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
+ idt_setup_ist_traps();
x86_init.irqs.trap_init();
-#ifdef CONFIG_X86_64
- memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
- set_nmi_gate(X86_TRAP_DB, &debug);
- set_nmi_gate(X86_TRAP_BP, &int3);
-#endif
+ idt_setup_debugidt_traps();
}
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index b9389d72b2f7..d145a0b1f529 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -10,20 +10,22 @@
#define FRAME_HEADER_SIZE (sizeof(long) * 2)
-/*
- * This disables KASAN checking when reading a value from another task's stack,
- * since the other task could be running on another CPU and could have poisoned
- * the stack in the meantime.
- */
-#define READ_ONCE_TASK_STACK(task, x) \
-({ \
- unsigned long val; \
- if (task == current) \
- val = READ_ONCE(x); \
- else \
- val = READ_ONCE_NOCHECK(x); \
- val; \
-})
+unsigned long unwind_get_return_address(struct unwind_state *state)
+{
+ if (unwind_done(state))
+ return 0;
+
+ return __kernel_text_address(state->ip) ? state->ip : 0;
+}
+EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
+unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
+{
+ if (unwind_done(state))
+ return NULL;
+
+ return state->regs ? &state->regs->ip : state->bp + 1;
+}
static void unwind_dump(struct unwind_state *state)
{
@@ -66,15 +68,6 @@ static void unwind_dump(struct unwind_state *state)
}
}
-unsigned long unwind_get_return_address(struct unwind_state *state)
-{
- if (unwind_done(state))
- return 0;
-
- return __kernel_text_address(state->ip) ? state->ip : 0;
-}
-EXPORT_SYMBOL_GPL(unwind_get_return_address);
-
static size_t regs_size(struct pt_regs *regs)
{
/* x86_32 regs from kernel mode are two words shorter: */
@@ -91,10 +84,8 @@ static bool in_entry_code(unsigned long ip)
if (addr >= __entry_text_start && addr < __entry_text_end)
return true;
-#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
if (addr >= __irqentry_text_start && addr < __irqentry_text_end)
return true;
-#endif
return false;
}
diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c
index 039f36738e49..4f0e17b90463 100644
--- a/arch/x86/kernel/unwind_guess.c
+++ b/arch/x86/kernel/unwind_guess.c
@@ -19,6 +19,11 @@ unsigned long unwind_get_return_address(struct unwind_state *state)
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
+unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
+{
+ return NULL;
+}
+
bool unwind_next_frame(struct unwind_state *state)
{
struct stack_info *info = &state->stack_info;
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
new file mode 100644
index 000000000000..570b70d3f604
--- /dev/null
+++ b/arch/x86/kernel/unwind_orc.c
@@ -0,0 +1,582 @@
+#include <linux/module.h>
+#include <linux/sort.h>
+#include <asm/ptrace.h>
+#include <asm/stacktrace.h>
+#include <asm/unwind.h>
+#include <asm/orc_types.h>
+#include <asm/orc_lookup.h>
+#include <asm/sections.h>
+
+#define orc_warn(fmt, ...) \
+ printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__)
+
+extern int __start_orc_unwind_ip[];
+extern int __stop_orc_unwind_ip[];
+extern struct orc_entry __start_orc_unwind[];
+extern struct orc_entry __stop_orc_unwind[];
+
+static DEFINE_MUTEX(sort_mutex);
+int *cur_orc_ip_table = __start_orc_unwind_ip;
+struct orc_entry *cur_orc_table = __start_orc_unwind;
+
+unsigned int lookup_num_blocks;
+bool orc_init;
+
+static inline unsigned long orc_ip(const int *ip)
+{
+ return (unsigned long)ip + *ip;
+}
+
+static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
+ unsigned int num_entries, unsigned long ip)
+{
+ int *first = ip_table;
+ int *last = ip_table + num_entries - 1;
+ int *mid = first, *found = first;
+
+ if (!num_entries)
+ return NULL;
+
+ /*
+ * Do a binary range search to find the rightmost duplicate of a given
+ * starting address. Some entries are section terminators which are
+ * "weak" entries for ensuring there are no gaps. They should be
+ * ignored when they conflict with a real entry.
+ */
+ while (first <= last) {
+ mid = first + ((last - first) / 2);
+
+ if (orc_ip(mid) <= ip) {
+ found = mid;
+ first = mid + 1;
+ } else
+ last = mid - 1;
+ }
+
+ return u_table + (found - ip_table);
+}
+
+#ifdef CONFIG_MODULES
+static struct orc_entry *orc_module_find(unsigned long ip)
+{
+ struct module *mod;
+
+ mod = __module_address(ip);
+ if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
+ return NULL;
+ return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind,
+ mod->arch.num_orcs, ip);
+}
+#else
+static struct orc_entry *orc_module_find(unsigned long ip)
+{
+ return NULL;
+}
+#endif
+
+static struct orc_entry *orc_find(unsigned long ip)
+{
+ if (!orc_init)
+ return NULL;
+
+ /* For non-init vmlinux addresses, use the fast lookup table: */
+ if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
+ unsigned int idx, start, stop;
+
+ idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
+
+ if (unlikely((idx >= lookup_num_blocks-1))) {
+ orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%lx\n",
+ idx, lookup_num_blocks, ip);
+ return NULL;
+ }
+
+ start = orc_lookup[idx];
+ stop = orc_lookup[idx + 1] + 1;
+
+ if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
+ (__start_orc_unwind + stop > __stop_orc_unwind))) {
+ orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%lx\n",
+ idx, lookup_num_blocks, start, stop, ip);
+ return NULL;
+ }
+
+ return __orc_find(__start_orc_unwind_ip + start,
+ __start_orc_unwind + start, stop - start, ip);
+ }
+
+ /* vmlinux .init slow lookup: */
+ if (ip >= (unsigned long)_sinittext && ip < (unsigned long)_einittext)
+ return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
+ __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
+
+ /* Module lookup: */
+ return orc_module_find(ip);
+}
+
+static void orc_sort_swap(void *_a, void *_b, int size)
+{
+ struct orc_entry *orc_a, *orc_b;
+ struct orc_entry orc_tmp;
+ int *a = _a, *b = _b, tmp;
+ int delta = _b - _a;
+
+ /* Swap the .orc_unwind_ip entries: */
+ tmp = *a;
+ *a = *b + delta;
+ *b = tmp - delta;
+
+ /* Swap the corresponding .orc_unwind entries: */
+ orc_a = cur_orc_table + (a - cur_orc_ip_table);
+ orc_b = cur_orc_table + (b - cur_orc_ip_table);
+ orc_tmp = *orc_a;
+ *orc_a = *orc_b;
+ *orc_b = orc_tmp;
+}
+
+static int orc_sort_cmp(const void *_a, const void *_b)
+{
+ struct orc_entry *orc_a;
+ const int *a = _a, *b = _b;
+ unsigned long a_val = orc_ip(a);
+ unsigned long b_val = orc_ip(b);
+
+ if (a_val > b_val)
+ return 1;
+ if (a_val < b_val)
+ return -1;
+
+ /*
+ * The "weak" section terminator entries need to always be on the left
+ * to ensure the lookup code skips them in favor of real entries.
+ * These terminator entries exist to handle any gaps created by
+ * whitelisted .o files which didn't get objtool generation.
+ */
+ orc_a = cur_orc_table + (a - cur_orc_ip_table);
+ return orc_a->sp_reg == ORC_REG_UNDEFINED ? -1 : 1;
+}
+
+#ifdef CONFIG_MODULES
+void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
+ void *_orc, size_t orc_size)
+{
+ int *orc_ip = _orc_ip;
+ struct orc_entry *orc = _orc;
+ unsigned int num_entries = orc_ip_size / sizeof(int);
+
+ WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
+ orc_size % sizeof(*orc) != 0 ||
+ num_entries != orc_size / sizeof(*orc));
+
+ /*
+ * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
+ * associate an .orc_unwind_ip table entry with its corresponding
+ * .orc_unwind entry so they can both be swapped.
+ */
+ mutex_lock(&sort_mutex);
+ cur_orc_ip_table = orc_ip;
+ cur_orc_table = orc;
+ sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
+ mutex_unlock(&sort_mutex);
+
+ mod->arch.orc_unwind_ip = orc_ip;
+ mod->arch.orc_unwind = orc;
+ mod->arch.num_orcs = num_entries;
+}
+#endif
+
+void __init unwind_init(void)
+{
+ size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
+ size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
+ size_t num_entries = orc_ip_size / sizeof(int);
+ struct orc_entry *orc;
+ int i;
+
+ if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
+ orc_size % sizeof(struct orc_entry) != 0 ||
+ num_entries != orc_size / sizeof(struct orc_entry)) {
+ orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n");
+ return;
+ }
+
+ /* Sort the .orc_unwind and .orc_unwind_ip tables: */
+ sort(__start_orc_unwind_ip, num_entries, sizeof(int), orc_sort_cmp,
+ orc_sort_swap);
+
+ /* Initialize the fast lookup table: */
+ lookup_num_blocks = orc_lookup_end - orc_lookup;
+ for (i = 0; i < lookup_num_blocks-1; i++) {
+ orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
+ num_entries,
+ LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
+ if (!orc) {
+ orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
+ return;
+ }
+
+ orc_lookup[i] = orc - __start_orc_unwind;
+ }
+
+ /* Initialize the ending block: */
+ orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries,
+ LOOKUP_STOP_IP);
+ if (!orc) {
+ orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
+ return;
+ }
+ orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
+
+ orc_init = true;
+}
+
+unsigned long unwind_get_return_address(struct unwind_state *state)
+{
+ if (unwind_done(state))
+ return 0;
+
+ return __kernel_text_address(state->ip) ? state->ip : 0;
+}
+EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
+unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
+{
+ if (unwind_done(state))
+ return NULL;
+
+ if (state->regs)
+ return &state->regs->ip;
+
+ if (state->sp)
+ return (unsigned long *)state->sp - 1;
+
+ return NULL;
+}
+
+static bool stack_access_ok(struct unwind_state *state, unsigned long addr,
+ size_t len)
+{
+ struct stack_info *info = &state->stack_info;
+
+ /*
+ * If the address isn't on the current stack, switch to the next one.
+ *
+ * We may have to traverse multiple stacks to deal with the possibility
+ * that info->next_sp could point to an empty stack and the address
+ * could be on a subsequent stack.
+ */
+ while (!on_stack(info, (void *)addr, len))
+ if (get_stack_info(info->next_sp, state->task, info,
+ &state->stack_mask))
+ return false;
+
+ return true;
+}
+
+static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
+ unsigned long *val)
+{
+ if (!stack_access_ok(state, addr, sizeof(long)))
+ return false;
+
+ *val = READ_ONCE_TASK_STACK(state->task, *(unsigned long *)addr);
+ return true;
+}
+
+#define REGS_SIZE (sizeof(struct pt_regs))
+#define SP_OFFSET (offsetof(struct pt_regs, sp))
+#define IRET_REGS_SIZE (REGS_SIZE - offsetof(struct pt_regs, ip))
+#define IRET_SP_OFFSET (SP_OFFSET - offsetof(struct pt_regs, ip))
+
+static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
+ unsigned long *ip, unsigned long *sp, bool full)
+{
+ size_t regs_size = full ? REGS_SIZE : IRET_REGS_SIZE;
+ size_t sp_offset = full ? SP_OFFSET : IRET_SP_OFFSET;
+ struct pt_regs *regs = (struct pt_regs *)(addr + regs_size - REGS_SIZE);
+
+ if (IS_ENABLED(CONFIG_X86_64)) {
+ if (!stack_access_ok(state, addr, regs_size))
+ return false;
+
+ *ip = regs->ip;
+ *sp = regs->sp;
+
+ return true;
+ }
+
+ if (!stack_access_ok(state, addr, sp_offset))
+ return false;
+
+ *ip = regs->ip;
+
+ if (user_mode(regs)) {
+ if (!stack_access_ok(state, addr + sp_offset,
+ REGS_SIZE - SP_OFFSET))
+ return false;
+
+ *sp = regs->sp;
+ } else
+ *sp = (unsigned long)&regs->sp;
+
+ return true;
+}
+
+bool unwind_next_frame(struct unwind_state *state)
+{
+ unsigned long ip_p, sp, orig_ip, prev_sp = state->sp;
+ enum stack_type prev_type = state->stack_info.type;
+ struct orc_entry *orc;
+ struct pt_regs *ptregs;
+ bool indirect = false;
+
+ if (unwind_done(state))
+ return false;
+
+ /* Don't let modules unload while we're reading their ORC data. */
+ preempt_disable();
+
+ /* Have we reached the end? */
+ if (state->regs && user_mode(state->regs))
+ goto done;
+
+ /*
+ * Find the orc_entry associated with the text address.
+ *
+ * Decrement call return addresses by one so they work for sibling
+ * calls and calls to noreturn functions.
+ */
+ orc = orc_find(state->signal ? state->ip : state->ip - 1);
+ if (!orc || orc->sp_reg == ORC_REG_UNDEFINED)
+ goto done;
+ orig_ip = state->ip;
+
+ /* Find the previous frame's stack: */
+ switch (orc->sp_reg) {
+ case ORC_REG_SP:
+ sp = state->sp + orc->sp_offset;
+ break;
+
+ case ORC_REG_BP:
+ sp = state->bp + orc->sp_offset;
+ break;
+
+ case ORC_REG_SP_INDIRECT:
+ sp = state->sp + orc->sp_offset;
+ indirect = true;
+ break;
+
+ case ORC_REG_BP_INDIRECT:
+ sp = state->bp + orc->sp_offset;
+ indirect = true;
+ break;
+
+ case ORC_REG_R10:
+ if (!state->regs || !state->full_regs) {
+ orc_warn("missing regs for base reg R10 at ip %p\n",
+ (void *)state->ip);
+ goto done;
+ }
+ sp = state->regs->r10;
+ break;
+
+ case ORC_REG_R13:
+ if (!state->regs || !state->full_regs) {
+ orc_warn("missing regs for base reg R13 at ip %p\n",
+ (void *)state->ip);
+ goto done;
+ }
+ sp = state->regs->r13;
+ break;
+
+ case ORC_REG_DI:
+ if (!state->regs || !state->full_regs) {
+ orc_warn("missing regs for base reg DI at ip %p\n",
+ (void *)state->ip);
+ goto done;
+ }
+ sp = state->regs->di;
+ break;
+
+ case ORC_REG_DX:
+ if (!state->regs || !state->full_regs) {
+ orc_warn("missing regs for base reg DX at ip %p\n",
+ (void *)state->ip);
+ goto done;
+ }
+ sp = state->regs->dx;
+ break;
+
+ default:
+ orc_warn("unknown SP base reg %d for ip %p\n",
+ orc->sp_reg, (void *)state->ip);
+ goto done;
+ }
+
+ if (indirect) {
+ if (!deref_stack_reg(state, sp, &sp))
+ goto done;
+ }
+
+ /* Find IP, SP and possibly regs: */
+ switch (orc->type) {
+ case ORC_TYPE_CALL:
+ ip_p = sp - sizeof(long);
+
+ if (!deref_stack_reg(state, ip_p, &state->ip))
+ goto done;
+
+ state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
+ state->ip, (void *)ip_p);
+
+ state->sp = sp;
+ state->regs = NULL;
+ state->signal = false;
+ break;
+
+ case ORC_TYPE_REGS:
+ if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) {
+ orc_warn("can't dereference registers at %p for ip %p\n",
+ (void *)sp, (void *)orig_ip);
+ goto done;
+ }
+
+ state->regs = (struct pt_regs *)sp;
+ state->full_regs = true;
+ state->signal = true;
+ break;
+
+ case ORC_TYPE_REGS_IRET:
+ if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) {
+ orc_warn("can't dereference iret registers at %p for ip %p\n",
+ (void *)sp, (void *)orig_ip);
+ goto done;
+ }
+
+ ptregs = container_of((void *)sp, struct pt_regs, ip);
+ if ((unsigned long)ptregs >= prev_sp &&
+ on_stack(&state->stack_info, ptregs, REGS_SIZE)) {
+ state->regs = ptregs;
+ state->full_regs = false;
+ } else
+ state->regs = NULL;
+
+ state->signal = true;
+ break;
+
+ default:
+ orc_warn("unknown .orc_unwind entry type %d\n", orc->type);
+ break;
+ }
+
+ /* Find BP: */
+ switch (orc->bp_reg) {
+ case ORC_REG_UNDEFINED:
+ if (state->regs && state->full_regs)
+ state->bp = state->regs->bp;
+ break;
+
+ case ORC_REG_PREV_SP:
+ if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
+ goto done;
+ break;
+
+ case ORC_REG_BP:
+ if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
+ goto done;
+ break;
+
+ default:
+ orc_warn("unknown BP base reg %d for ip %p\n",
+ orc->bp_reg, (void *)orig_ip);
+ goto done;
+ }
+
+ /* Prevent a recursive loop due to bad ORC data: */
+ if (state->stack_info.type == prev_type &&
+ on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
+ state->sp <= prev_sp) {
+ orc_warn("stack going in the wrong direction? ip=%p\n",
+ (void *)orig_ip);
+ goto done;
+ }
+
+ preempt_enable();
+ return true;
+
+done:
+ preempt_enable();
+ state->stack_info.type = STACK_TYPE_UNKNOWN;
+ return false;
+}
+EXPORT_SYMBOL_GPL(unwind_next_frame);
+
+void __unwind_start(struct unwind_state *state, struct task_struct *task,
+ struct pt_regs *regs, unsigned long *first_frame)
+{
+ memset(state, 0, sizeof(*state));
+ state->task = task;
+
+ /*
+ * Refuse to unwind the stack of a task while it's executing on another
+ * CPU. This check is racy, but that's ok: the unwinder has other
+ * checks to prevent it from going off the rails.
+ */
+ if (task_on_another_cpu(task))
+ goto done;
+
+ if (regs) {
+ if (user_mode(regs))
+ goto done;
+
+ state->ip = regs->ip;
+ state->sp = kernel_stack_pointer(regs);
+ state->bp = regs->bp;
+ state->regs = regs;
+ state->full_regs = true;
+ state->signal = true;
+
+ } else if (task == current) {
+ asm volatile("lea (%%rip), %0\n\t"
+ "mov %%rsp, %1\n\t"
+ "mov %%rbp, %2\n\t"
+ : "=r" (state->ip), "=r" (state->sp),
+ "=r" (state->bp));
+
+ } else {
+ struct inactive_task_frame *frame = (void *)task->thread.sp;
+
+ state->sp = task->thread.sp;
+ state->bp = READ_ONCE_NOCHECK(frame->bp);
+ state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
+ }
+
+ if (get_stack_info((unsigned long *)state->sp, state->task,
+ &state->stack_info, &state->stack_mask))
+ return;
+
+ /*
+ * The caller can provide the address of the first frame directly
+ * (first_frame) or indirectly (regs->sp) to indicate which stack frame
+ * to start unwinding at. Skip ahead until we reach it.
+ */
+
+ /* When starting from regs, skip the regs frame: */
+ if (regs) {
+ unwind_next_frame(state);
+ return;
+ }
+
+ /* Otherwise, skip ahead to the user-specified starting frame: */
+ while (!unwind_done(state) &&
+ (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
+ state->sp <= (unsigned long)first_frame))
+ unwind_next_frame(state);
+
+ return;
+
+done:
+ state->stack_info.type = STACK_TYPE_UNKNOWN;
+ return;
+}
+EXPORT_SYMBOL_GPL(__unwind_start);
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index c8a3b61be0aa..f05f00acac89 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -24,6 +24,7 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page_types.h>
+#include <asm/orc_lookup.h>
#include <asm/cache.h>
#include <asm/boot.h>
@@ -148,6 +149,8 @@ SECTIONS
BUG_TABLE
+ ORC_UNWIND_TABLE
+
. = ALIGN(PAGE_SIZE);
__vvar_page = .;
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 760433b2574a..3ea624452f93 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -22,7 +22,7 @@ config KVM
depends on HAVE_KVM
depends on HIGH_RES_TIMERS
# for TASKSTATS/TASK_DELAY_ACCT:
- depends on NET
+ depends on NET && MULTIUSER
select PREEMPT_NOTIFIERS
select MMU_NOTIFIER
select ANON_INODES
@@ -89,6 +89,5 @@ config KVM_MMU_AUDIT
# OK, it's a little counter-intuitive to do this, but it puts it neatly under
# the virtualization menu.
source drivers/vhost/Kconfig
-source drivers/lguest/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 59ca2eea522c..19adbb418443 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -469,7 +469,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
cpuid_mask(&entry->ecx, CPUID_7_ECX);
/* PKU is not yet implemented for shadow paging. */
- if (!tdp_enabled)
+ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
entry->ecx &= ~F(PKU);
entry->edx &= kvm_cpuid_7_0_edx_x86_features;
entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 2695a34fa1c5..337b6d2730fa 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -649,9 +649,10 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
}
if ((stimer->config & HV_STIMER_ENABLE) &&
- stimer->count)
- stimer_start(stimer);
- else
+ stimer->count) {
+ if (!stimer->msg_pending)
+ stimer_start(stimer);
+ } else
stimer_cleanup(stimer);
}
}
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 762cdf2595f9..e1e89ee4af75 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
| ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
}
-static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
-{
- return kvm_x86_ops->get_pkru(vcpu);
-}
-
static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
{
vcpu->arch.hflags |= HF_GUEST_MASK;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 2819d4c123eb..589dcc117086 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1495,11 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
static void cancel_hv_timer(struct kvm_lapic *apic)
{
+ WARN_ON(preemptible());
WARN_ON(!apic->lapic_timer.hv_timer_in_use);
- preempt_disable();
kvm_x86_ops->cancel_hv_timer(apic->vcpu);
apic->lapic_timer.hv_timer_in_use = false;
- preempt_enable();
}
static bool start_hv_timer(struct kvm_lapic *apic)
@@ -1507,6 +1506,7 @@ static bool start_hv_timer(struct kvm_lapic *apic)
struct kvm_timer *ktimer = &apic->lapic_timer;
int r;
+ WARN_ON(preemptible());
if (!kvm_x86_ops->set_hv_timer)
return false;
@@ -1538,6 +1538,8 @@ static bool start_hv_timer(struct kvm_lapic *apic)
static void start_sw_timer(struct kvm_lapic *apic)
{
struct kvm_timer *ktimer = &apic->lapic_timer;
+
+ WARN_ON(preemptible());
if (apic->lapic_timer.hv_timer_in_use)
cancel_hv_timer(apic);
if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
@@ -1552,15 +1554,20 @@ static void start_sw_timer(struct kvm_lapic *apic)
static void restart_apic_timer(struct kvm_lapic *apic)
{
+ preempt_disable();
if (!start_hv_timer(apic))
start_sw_timer(apic);
+ preempt_enable();
}
void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
- WARN_ON(!apic->lapic_timer.hv_timer_in_use);
+ preempt_disable();
+ /* If the preempt notifier has already run, it also called apic_timer_expired */
+ if (!apic->lapic_timer.hv_timer_in_use)
+ goto out;
WARN_ON(swait_active(&vcpu->wq));
cancel_hv_timer(apic);
apic_timer_expired(apic);
@@ -1569,6 +1576,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
advance_periodic_target_expiration(apic);
restart_apic_timer(apic);
}
+out:
+ preempt_enable();
}
EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
@@ -1582,9 +1591,11 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
+ preempt_disable();
/* Possibly the TSC deadline timer is not enabled yet */
if (apic->lapic_timer.hv_timer_in_use)
start_sw_timer(apic);
+ preempt_enable();
}
EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9b1dd114956a..04d750813c9d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -108,7 +108,7 @@ module_param(dbg, bool, 0644);
(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
-#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
+#define PT64_BASE_ADDR_MASK __sme_clr((((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)))
#define PT64_DIR_BASE_ADDR_MASK \
(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
#define PT64_LVL_ADDR_MASK(level) \
@@ -126,7 +126,7 @@ module_param(dbg, bool, 0644);
* PT32_LEVEL_BITS))) - 1))
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
- | shadow_x_mask | shadow_nx_mask)
+ | shadow_x_mask | shadow_nx_mask | shadow_me_mask)
#define ACC_EXEC_MASK 1
#define ACC_WRITE_MASK PT_WRITABLE_MASK
@@ -186,6 +186,7 @@ static u64 __read_mostly shadow_dirty_mask;
static u64 __read_mostly shadow_mmio_mask;
static u64 __read_mostly shadow_mmio_value;
static u64 __read_mostly shadow_present_mask;
+static u64 __read_mostly shadow_me_mask;
/*
* SPTEs used by MMUs without A/D bits are marked with shadow_acc_track_value.
@@ -349,7 +350,7 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
*/
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
- u64 acc_track_mask)
+ u64 acc_track_mask, u64 me_mask)
{
BUG_ON(!dirty_mask != !accessed_mask);
BUG_ON(!accessed_mask && !acc_track_mask);
@@ -362,6 +363,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
shadow_x_mask = x_mask;
shadow_present_mask = p_mask;
shadow_acc_track_mask = acc_track_mask;
+ shadow_me_mask = me_mask;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
@@ -2433,7 +2435,7 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
- shadow_user_mask | shadow_x_mask;
+ shadow_user_mask | shadow_x_mask | shadow_me_mask;
if (sp_ad_disabled(sp))
spte |= shadow_acc_track_value;
@@ -2745,6 +2747,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
pte_access &= ~ACC_WRITE_MASK;
spte |= (u64)pfn << PAGE_SHIFT;
+ spte |= shadow_me_mask;
if (pte_access & ACC_WRITE_MASK) {
@@ -4106,16 +4109,28 @@ void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
+ struct rsvd_bits_validate *shadow_zero_check;
+ int i;
/*
* Passing "true" to the last argument is okay; it adds a check
* on bit 8 of the SPTEs which KVM doesn't use anyway.
*/
- __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
+ shadow_zero_check = &context->shadow_zero_check;
+ __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
boot_cpu_data.x86_phys_bits,
context->shadow_root_level, uses_nx,
guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
true);
+
+ if (!shadow_me_mask)
+ return;
+
+ for (i = context->shadow_root_level; --i >= 0;) {
+ shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
+ shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
+ }
+
}
EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
@@ -4133,17 +4148,29 @@ static void
reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
+ struct rsvd_bits_validate *shadow_zero_check;
+ int i;
+
+ shadow_zero_check = &context->shadow_zero_check;
+
if (boot_cpu_is_amd())
- __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
+ __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
boot_cpu_data.x86_phys_bits,
context->shadow_root_level, false,
boot_cpu_has(X86_FEATURE_GBPAGES),
true, true);
else
- __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
+ __reset_rsvds_bits_mask_ept(shadow_zero_check,
boot_cpu_data.x86_phys_bits,
false);
+ if (!shadow_me_mask)
+ return;
+
+ for (i = context->shadow_root_level; --i >= 0;) {
+ shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
+ shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
+ }
}
/*
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index d7d248a000dd..4b9a3ae6b725 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -185,7 +185,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
* index of the protection domain, so pte_pkey * 2 is
* is the index of the first bit for the domain.
*/
- pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
+ pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
offset = (pfec & ~1) +
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 4d8141e533c3..8dbd8dbc83eb 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1100,7 +1100,7 @@ static __init int svm_hardware_setup(void)
if (vls) {
if (!npt_enabled ||
- !boot_cpu_has(X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE) ||
+ !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
!IS_ENABLED(CONFIG_X86_64)) {
vls = false;
} else {
@@ -1167,9 +1167,9 @@ static void avic_init_vmcb(struct vcpu_svm *svm)
{
struct vmcb *vmcb = svm->vmcb;
struct kvm_arch *vm_data = &svm->vcpu.kvm->arch;
- phys_addr_t bpa = page_to_phys(svm->avic_backing_page);
- phys_addr_t lpa = page_to_phys(vm_data->avic_logical_id_table_page);
- phys_addr_t ppa = page_to_phys(vm_data->avic_physical_id_table_page);
+ phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
+ phys_addr_t lpa = __sme_set(page_to_phys(vm_data->avic_logical_id_table_page));
+ phys_addr_t ppa = __sme_set(page_to_phys(vm_data->avic_physical_id_table_page));
vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
@@ -1232,8 +1232,8 @@ static void init_vmcb(struct vcpu_svm *svm)
set_intercept(svm, INTERCEPT_MWAIT);
}
- control->iopm_base_pa = iopm_base;
- control->msrpm_base_pa = __pa(svm->msrpm);
+ control->iopm_base_pa = __sme_set(iopm_base);
+ control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
control->int_ctl = V_INTR_MASKING_MASK;
init_seg(&save->es);
@@ -1377,9 +1377,9 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
return -EINVAL;
new_entry = READ_ONCE(*entry);
- new_entry = (page_to_phys(svm->avic_backing_page) &
- AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
- AVIC_PHYSICAL_ID_ENTRY_VALID_MASK;
+ new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
+ AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
+ AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
WRITE_ONCE(*entry, new_entry);
svm->avic_physical_id_cache = entry;
@@ -1647,7 +1647,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
svm->vmcb = page_address(page);
clear_page(svm->vmcb);
- svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
+ svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
svm->asid_generation = 0;
init_vmcb(svm);
@@ -1675,7 +1675,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
+ __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
__free_page(virt_to_page(svm->nested.hsave));
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
@@ -1777,11 +1777,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
to_svm(vcpu)->vmcb->save.rflags = rflags;
}
-static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
switch (reg) {
@@ -2335,7 +2330,7 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
u64 pdpte;
int ret;
- ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
+ ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
offset_in_page(cr3) + index * 8, 8);
if (ret)
return 0;
@@ -2347,7 +2342,7 @@ static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
{
struct vcpu_svm *svm = to_svm(vcpu);
- svm->vmcb->control.nested_cr3 = root;
+ svm->vmcb->control.nested_cr3 = __sme_set(root);
mark_dirty(svm->vmcb, VMCB_NPT);
svm_flush_tlb(vcpu);
}
@@ -2430,6 +2425,16 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
svm->vmcb->control.exit_code_hi = 0;
svm->vmcb->control.exit_info_1 = error_code;
+
+ /*
+ * FIXME: we should not write CR2 when L1 intercepts an L2 #PF exception.
+ * The fix is to add the ancillary datum (CR2 or DR6) to structs
+ * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 can be
+ * written only when inject_pending_event runs (DR6 would written here
+ * too). This should be conditional on a new capability---if the
+ * capability is disabled, kvm_multiple_exception would write the
+ * ancillary information to CR2 or DR6, for backwards ABI-compatibility.
+ */
if (svm->vcpu.arch.exception.nested_apf)
svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
else
@@ -2868,7 +2873,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
svm->nested.msrpm[p] = svm->msrpm[p] | value;
}
- svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
+ svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
return true;
}
@@ -4501,7 +4506,7 @@ get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
irq.vector);
*svm = to_svm(vcpu);
- vcpu_info->pi_desc_addr = page_to_phys((*svm)->avic_backing_page);
+ vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
vcpu_info->vector = irq.vector;
return 0;
@@ -4552,7 +4557,8 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
struct amd_iommu_pi_data pi;
/* Try to enable guest_mode in IRTE */
- pi.base = page_to_phys(svm->avic_backing_page) & AVIC_HPA_MASK;
+ pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
+ AVIC_HPA_MASK);
pi.ga_tag = AVIC_GATAG(kvm->arch.avic_vm_id,
svm->vcpu.vcpu_id);
pi.is_guest_mode = true;
@@ -5001,7 +5007,7 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
struct vcpu_svm *svm = to_svm(vcpu);
- svm->vmcb->save.cr3 = root;
+ svm->vmcb->save.cr3 = __sme_set(root);
mark_dirty(svm->vmcb, VMCB_CR);
svm_flush_tlb(vcpu);
}
@@ -5010,7 +5016,7 @@ static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
struct vcpu_svm *svm = to_svm(vcpu);
- svm->vmcb->control.nested_cr3 = root;
+ svm->vmcb->control.nested_cr3 = __sme_set(root);
mark_dirty(svm->vmcb, VMCB_NPT);
/* Also sync guest cr3 here in case we live migrate */
@@ -5403,8 +5409,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,
- .get_pkru = svm_get_pkru,
-
.tlb_flush = svm_flush_tlb,
.run = svm_vcpu_run,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 84e62acf2dd8..70b90c0810d0 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -198,7 +198,8 @@ struct loaded_vmcs {
struct vmcs *vmcs;
struct vmcs *shadow_vmcs;
int cpu;
- int launched;
+ bool launched;
+ bool nmi_known_unmasked;
struct list_head loaded_vmcss_on_cpu_link;
};
@@ -415,13 +416,10 @@ struct nested_vmx {
/* The guest-physical address of the current VMCS L1 keeps for L2 */
gpa_t current_vmptr;
- /* The host-usable pointer to the above */
- struct page *current_vmcs12_page;
- struct vmcs12 *current_vmcs12;
/*
* Cache of the guest's VMCS, existing outside of guest memory.
* Loaded from guest memory during VMPTRLD. Flushed to guest
- * memory during VMXOFF, VMCLEAR, VMPTRLD.
+ * memory during VMCLEAR and VMPTRLD.
*/
struct vmcs12 *cached_vmcs12;
/*
@@ -562,7 +560,6 @@ struct vcpu_vmx {
struct kvm_vcpu vcpu;
unsigned long host_rsp;
u8 fail;
- bool nmi_known_unmasked;
u32 exit_intr_info;
u32 idt_vectoring_info;
ulong rflags;
@@ -639,8 +636,6 @@ struct vcpu_vmx {
u64 current_tsc_ratio;
- bool guest_pkru_valid;
- u32 guest_pkru;
u32 host_pkru;
/*
@@ -927,6 +922,10 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var);
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
static int alloc_identity_pagetable(struct kvm *kvm);
+static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
+static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
+static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
+ u16 error_code);
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -2326,6 +2325,11 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
__vmx_load_host_state(to_vmx(vcpu));
}
+static bool emulation_required(struct kvm_vcpu *vcpu)
+{
+ return emulate_invalid_guest_state && !guest_state_valid(vcpu);
+}
+
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
/*
@@ -2363,6 +2367,8 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
+ unsigned long old_rflags = vmx_get_rflags(vcpu);
+
__set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
to_vmx(vcpu)->rflags = rflags;
if (to_vmx(vcpu)->rmode.vm86_active) {
@@ -2370,11 +2376,9 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
}
vmcs_writel(GUEST_RFLAGS, rflags);
-}
-static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
-{
- return to_vmx(vcpu)->guest_pkru;
+ if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
+ to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
}
static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
@@ -2418,6 +2422,30 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
vmx_set_interrupt_shadow(vcpu, 0);
}
+static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
+ unsigned long exit_qual)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ unsigned int nr = vcpu->arch.exception.nr;
+ u32 intr_info = nr | INTR_INFO_VALID_MASK;
+
+ if (vcpu->arch.exception.has_error_code) {
+ vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
+ intr_info |= INTR_INFO_DELIVER_CODE_MASK;
+ }
+
+ if (kvm_exception_is_soft(nr))
+ intr_info |= INTR_TYPE_SOFT_EXCEPTION;
+ else
+ intr_info |= INTR_TYPE_HARD_EXCEPTION;
+
+ if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
+ vmx_get_nmi_mask(vcpu))
+ intr_info |= INTR_INFO_UNBLOCK_NMI;
+
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
+}
+
/*
* KVM wants to inject page-faults which it got to the guest. This function
* checks whether in a nested guest, we need to inject them to L1 or L2.
@@ -2427,23 +2455,38 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu)
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
unsigned int nr = vcpu->arch.exception.nr;
- if (!((vmcs12->exception_bitmap & (1u << nr)) ||
- (nr == PF_VECTOR && vcpu->arch.exception.nested_apf)))
- return 0;
+ if (nr == PF_VECTOR) {
+ if (vcpu->arch.exception.nested_apf) {
+ nested_vmx_inject_exception_vmexit(vcpu,
+ vcpu->arch.apf.nested_apf_token);
+ return 1;
+ }
+ /*
+ * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
+ * The fix is to add the ancillary datum (CR2 or DR6) to structs
+ * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
+ * can be written only when inject_pending_event runs. This should be
+ * conditional on a new capability---if the capability is disabled,
+ * kvm_multiple_exception would write the ancillary information to
+ * CR2 or DR6, for backwards ABI-compatibility.
+ */
+ if (nested_vmx_is_page_fault_vmexit(vmcs12,
+ vcpu->arch.exception.error_code)) {
+ nested_vmx_inject_exception_vmexit(vcpu, vcpu->arch.cr2);
+ return 1;
+ }
+ } else {
+ unsigned long exit_qual = 0;
+ if (nr == DB_VECTOR)
+ exit_qual = vcpu->arch.dr6;
- if (vcpu->arch.exception.nested_apf) {
- vmcs_write32(VM_EXIT_INTR_ERROR_CODE, vcpu->arch.exception.error_code);
- nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
- PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
- INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
- vcpu->arch.apf.nested_apf_token);
- return 1;
+ if (vmcs12->exception_bitmap & (1u << nr)) {
+ nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
+ return 1;
+ }
}
- nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
- vmcs_read32(VM_EXIT_INTR_INFO),
- vmcs_readl(EXIT_QUALIFICATION));
- return 1;
+ return 0;
}
static void vmx_queue_exception(struct kvm_vcpu *vcpu)
@@ -2657,7 +2700,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
* reason is that if one of these bits is necessary, it will appear
* in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
* fields of vmcs01 and vmcs02, will turn these bits off - and
- * nested_vmx_exit_handled() will not pass related exits to L1.
+ * nested_vmx_exit_reflected() will not pass related exits to L1.
* These rules have exceptions below.
*/
@@ -3857,11 +3900,6 @@ static __init int alloc_kvm_area(void)
return 0;
}
-static bool emulation_required(struct kvm_vcpu *vcpu)
-{
- return emulate_invalid_guest_state && !guest_state_valid(vcpu);
-}
-
static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
struct kvm_segment *save)
{
@@ -4950,6 +4988,28 @@ static bool vmx_get_enable_apicv(void)
return enable_apicv;
}
+static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ gfn_t gfn;
+
+ /*
+ * Don't need to mark the APIC access page dirty; it is never
+ * written to by the CPU during APIC virtualization.
+ */
+
+ if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
+ gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
+ kvm_vcpu_mark_page_dirty(vcpu, gfn);
+ }
+
+ if (nested_cpu_has_posted_intr(vmcs12)) {
+ gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
+ kvm_vcpu_mark_page_dirty(vcpu, gfn);
+ }
+}
+
+
static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -4957,18 +5017,15 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
void *vapic_page;
u16 status;
- if (vmx->nested.pi_desc &&
- vmx->nested.pi_pending) {
- vmx->nested.pi_pending = false;
- if (!pi_test_and_clear_on(vmx->nested.pi_desc))
- return;
-
- max_irr = find_last_bit(
- (unsigned long *)vmx->nested.pi_desc->pir, 256);
+ if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
+ return;
- if (max_irr == 256)
- return;
+ vmx->nested.pi_pending = false;
+ if (!pi_test_and_clear_on(vmx->nested.pi_desc))
+ return;
+ max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
+ if (max_irr != 256) {
vapic_page = kmap(vmx->nested.virtual_apic_page);
__kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page);
kunmap(vmx->nested.virtual_apic_page);
@@ -4980,11 +5037,16 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
vmcs_write16(GUEST_INTR_STATUS, status);
}
}
+
+ nested_mark_vmcs12_pages_dirty(vcpu);
}
-static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
+static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
+ bool nested)
{
#ifdef CONFIG_SMP
+ int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR;
+
if (vcpu->mode == IN_GUEST_MODE) {
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -5002,8 +5064,7 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
*/
WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc));
- apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
- POSTED_INTR_VECTOR);
+ apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
return true;
}
#endif
@@ -5018,7 +5079,7 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
if (is_guest_mode(vcpu) &&
vector == vmx->nested.posted_intr_nv) {
/* the PIR and ON have been set by L1. */
- kvm_vcpu_trigger_posted_interrupt(vcpu);
+ kvm_vcpu_trigger_posted_interrupt(vcpu, true);
/*
* If a posted intr is not recognized by hardware,
* we will accomplish it in the next vmentry.
@@ -5052,7 +5113,7 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
if (pi_test_and_set_on(&vmx->pi_desc))
return;
- if (!kvm_vcpu_trigger_posted_interrupt(vcpu))
+ if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
kvm_vcpu_kick(vcpu);
}
@@ -5510,10 +5571,8 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (!is_guest_mode(vcpu)) {
- ++vcpu->stat.nmi_injections;
- vmx->nmi_known_unmasked = false;
- }
+ ++vcpu->stat.nmi_injections;
+ vmx->loaded_vmcs->nmi_known_unmasked = false;
if (vmx->rmode.vm86_active) {
if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
@@ -5527,16 +5586,21 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
{
- if (to_vmx(vcpu)->nmi_known_unmasked)
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ bool masked;
+
+ if (vmx->loaded_vmcs->nmi_known_unmasked)
return false;
- return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
+ masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
+ vmx->loaded_vmcs->nmi_known_unmasked = !masked;
+ return masked;
}
static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- vmx->nmi_known_unmasked = !masked;
+ vmx->loaded_vmcs->nmi_known_unmasked = !masked;
if (masked)
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
@@ -6492,7 +6556,7 @@ void vmx_enable_tdp(void)
enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull,
0ull, VMX_EPT_EXECUTABLE_MASK,
cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK,
- VMX_EPT_RWX_MASK);
+ VMX_EPT_RWX_MASK, 0ull);
ept_set_mmio_spte_mask();
kvm_enable_tdp();
@@ -7124,34 +7188,32 @@ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
return 1;
}
+static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
+{
+ vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
+ vmcs_write64(VMCS_LINK_POINTER, -1ull);
+}
+
static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
{
if (vmx->nested.current_vmptr == -1ull)
return;
- /* current_vmptr and current_vmcs12 are always set/reset together */
- if (WARN_ON(vmx->nested.current_vmcs12 == NULL))
- return;
-
if (enable_shadow_vmcs) {
/* copy to memory all shadowed fields in case
they were modified */
copy_shadow_to_vmcs12(vmx);
vmx->nested.sync_shadow_vmcs = false;
- vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
- SECONDARY_EXEC_SHADOW_VMCS);
- vmcs_write64(VMCS_LINK_POINTER, -1ull);
+ vmx_disable_shadow_vmcs(vmx);
}
vmx->nested.posted_intr_nv = -1;
/* Flush VMCS12 to guest memory */
- memcpy(vmx->nested.current_vmcs12, vmx->nested.cached_vmcs12,
- VMCS12_SIZE);
+ kvm_vcpu_write_guest_page(&vmx->vcpu,
+ vmx->nested.current_vmptr >> PAGE_SHIFT,
+ vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
- kunmap(vmx->nested.current_vmcs12_page);
- nested_release_page(vmx->nested.current_vmcs12_page);
vmx->nested.current_vmptr = -1ull;
- vmx->nested.current_vmcs12 = NULL;
}
/*
@@ -7165,12 +7227,14 @@ static void free_nested(struct vcpu_vmx *vmx)
vmx->nested.vmxon = false;
free_vpid(vmx->nested.vpid02);
- nested_release_vmcs12(vmx);
+ vmx->nested.posted_intr_nv = -1;
+ vmx->nested.current_vmptr = -1ull;
if (vmx->nested.msr_bitmap) {
free_page((unsigned long)vmx->nested.msr_bitmap);
vmx->nested.msr_bitmap = NULL;
}
if (enable_shadow_vmcs) {
+ vmx_disable_shadow_vmcs(vmx);
vmcs_clear(vmx->vmcs01.shadow_vmcs);
free_vmcs(vmx->vmcs01.shadow_vmcs);
vmx->vmcs01.shadow_vmcs = NULL;
@@ -7569,14 +7633,14 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
}
nested_release_vmcs12(vmx);
- vmx->nested.current_vmcs12 = new_vmcs12;
- vmx->nested.current_vmcs12_page = page;
/*
* Load VMCS12 from guest memory since it is not already
* cached.
*/
- memcpy(vmx->nested.cached_vmcs12,
- vmx->nested.current_vmcs12, VMCS12_SIZE);
+ memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
+ kunmap(page);
+ nested_release_page_clean(page);
+
set_current_vmptr(vmx, vmptr);
}
@@ -8009,12 +8073,11 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
* should handle it ourselves in L0 (and then continue L2). Only call this
* when in is_guest_mode (L2).
*/
-static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
+static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
{
u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- u32 exit_reason = vmx->exit_reason;
trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
vmcs_readl(EXIT_QUALIFICATION),
@@ -8023,6 +8086,18 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
KVM_ISA_VMX);
+ /*
+ * The host physical addresses of some pages of guest memory
+ * are loaded into VMCS02 (e.g. L1's Virtual APIC Page). The CPU
+ * may write to these pages via their host physical address while
+ * L2 is running, bypassing any address-translation-based dirty
+ * tracking (e.g. EPT write protection).
+ *
+ * Mark them dirty on every exit from L2 to prevent them from
+ * getting out of sync with dirty tracking.
+ */
+ nested_mark_vmcs12_pages_dirty(vcpu);
+
if (vmx->nested.nested_run_pending)
return false;
@@ -8159,6 +8234,29 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
}
}
+static int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason)
+{
+ u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+
+ /*
+ * At this point, the exit interruption info in exit_intr_info
+ * is only valid for EXCEPTION_NMI exits. For EXTERNAL_INTERRUPT
+ * we need to query the in-kernel LAPIC.
+ */
+ WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT);
+ if ((exit_intr_info &
+ (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
+ (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) {
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ vmcs12->vm_exit_intr_error_code =
+ vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
+ }
+
+ nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info,
+ vmcs_readl(EXIT_QUALIFICATION));
+ return 1;
+}
+
static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
{
*info1 = vmcs_readl(EXIT_QUALIFICATION);
@@ -8405,12 +8503,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
if (vmx->emulation_required)
return handle_invalid_guest_state(vcpu);
- if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
- nested_vmx_vmexit(vcpu, exit_reason,
- vmcs_read32(VM_EXIT_INTR_INFO),
- vmcs_readl(EXIT_QUALIFICATION));
- return 1;
- }
+ if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason))
+ return nested_vmx_reflect_vmexit(vcpu, exit_reason);
if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
dump_vmcs();
@@ -8685,7 +8779,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
desc = (gate_desc *)vmx->host_idt_base + vector;
- entry = gate_offset(*desc);
+ entry = gate_offset(desc);
asm volatile(
#ifdef CONFIG_X86_64
"mov %%" _ASM_SP ", %[sp]\n\t"
@@ -8736,7 +8830,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
- if (vmx->nmi_known_unmasked)
+ if (vmx->loaded_vmcs->nmi_known_unmasked)
return;
/*
* Can't use vmx->exit_intr_info since we're not sure what
@@ -8760,7 +8854,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
else
- vmx->nmi_known_unmasked =
+ vmx->loaded_vmcs->nmi_known_unmasked =
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
& GUEST_INTR_STATE_NMI);
}
@@ -8919,8 +9013,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmx_set_interrupt_shadow(vcpu, 0);
- if (vmx->guest_pkru_valid)
- __write_pkru(vmx->guest_pkru);
+ if (static_cpu_has(X86_FEATURE_PKU) &&
+ kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
+ vcpu->arch.pkru != vmx->host_pkru)
+ __write_pkru(vcpu->arch.pkru);
atomic_switch_perf_msrs(vmx);
debugctlmsr = get_debugctlmsr();
@@ -9068,13 +9164,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* back on host, so it is safe to read guest PKRU from current
* XSAVE.
*/
- if (boot_cpu_has(X86_FEATURE_OSPKE)) {
- vmx->guest_pkru = __read_pkru();
- if (vmx->guest_pkru != vmx->host_pkru) {
- vmx->guest_pkru_valid = true;
+ if (static_cpu_has(X86_FEATURE_PKU) &&
+ kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
+ vcpu->arch.pkru = __read_pkru();
+ if (vcpu->arch.pkru != vmx->host_pkru)
__write_pkru(vmx->host_pkru);
- } else
- vmx->guest_pkru_valid = false;
}
/*
@@ -9213,7 +9307,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
vmx->nested.posted_intr_nv = -1;
vmx->nested.current_vmptr = -1ull;
- vmx->nested.current_vmcs12 = NULL;
vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
@@ -9499,12 +9592,15 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
WARN_ON(!is_guest_mode(vcpu));
- if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code))
- nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
- vmcs_read32(VM_EXIT_INTR_INFO),
- vmcs_readl(EXIT_QUALIFICATION));
- else
+ if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code)) {
+ vmcs12->vm_exit_intr_error_code = fault->error_code;
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
+ PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
+ INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
+ fault->address);
+ } else {
kvm_inject_page_fault(vcpu, fault);
+ }
}
static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
@@ -10032,6 +10128,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->vm_entry_instruction_len);
vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
vmcs12->guest_interruptibility_info);
+ vmx->loaded_vmcs->nmi_known_unmasked =
+ !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
} else {
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
}
@@ -10056,13 +10154,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
/* Posted interrupts setting is only taken from vmcs12. */
if (nested_cpu_has_posted_intr(vmcs12)) {
- /*
- * Note that we use L0's vector here and in
- * vmx_deliver_nested_posted_interrupt.
- */
vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
vmx->nested.pi_pending = false;
- vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
+ vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
} else {
exec_control &= ~PIN_BASED_POSTED_INTR;
}
@@ -10086,12 +10180,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
* "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
* vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
* !enable_ept, EB.PF is 1, so the "or" will always be 1.
- *
- * A problem with this approach (when !enable_ept) is that L1 may be
- * injected with more page faults than it asked for. This could have
- * caused problems, but in practice existing hypervisors don't care.
- * To fix this, we will need to emulate the PFEC checking (on the L1
- * page tables), using walk_addr(), when injecting PFs to L1.
*/
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
enable_ept ? vmcs12->page_fault_error_code_mask : 0);
@@ -10488,6 +10576,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
{
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
u32 exit_qual;
int ret;
@@ -10512,6 +10601,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* for misconfigurations which will anyway be caught by the processor
* when using the merged vmcs02.
*/
+ if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
+ goto out;
+ }
+
if (vmcs12->launch_state == launch) {
nested_vmx_failValid(vcpu,
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
@@ -10832,13 +10927,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->vm_exit_reason = exit_reason;
vmcs12->exit_qualification = exit_qualification;
-
vmcs12->vm_exit_intr_info = exit_intr_info;
- if ((vmcs12->vm_exit_intr_info &
- (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
- (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK))
- vmcs12->vm_exit_intr_error_code =
- vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
+
vmcs12->idt_vectoring_info_field = 0;
vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
@@ -10926,7 +11016,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
*/
vmx_flush_tlb(vcpu);
}
-
+ /* Restore posted intr vector. */
+ if (nested_cpu_has_posted_intr(vmcs12))
+ vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
@@ -11032,8 +11124,15 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
- if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
- && nested_exit_intr_ack_set(vcpu)) {
+ /*
+ * TODO: SDM says that with acknowledge interrupt on exit, bit 31 of
+ * the VM-exit interrupt information (valid interrupt) is always set to
+ * 1 on EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't need
+ * kvm_cpu_has_interrupt(). See the commit message for details.
+ */
+ if (nested_exit_intr_ack_set(vcpu) &&
+ exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
+ kvm_cpu_has_interrupt(vcpu)) {
int irq = kvm_cpu_get_interrupt(vcpu);
WARN_ON(irq < 0);
vmcs12->vm_exit_intr_info = irq |
@@ -11576,8 +11675,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
- .get_pkru = vmx_get_pkru,
-
.tlb_flush = vmx_flush_tlb,
.run = vmx_vcpu_run,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5b8f07889f6a..ef5102f80497 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -54,6 +54,7 @@
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
#include <linux/sched/stat.h>
+#include <linux/mem_encrypt.h>
#include <trace/events/kvm.h>
@@ -597,8 +598,8 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu)
(unsigned long *)&vcpu->arch.regs_avail))
return true;
- gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
- offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
+ gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT;
+ offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1);
r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
PFERR_USER_MASK | PFERR_WRITE_MASK);
if (r < 0)
@@ -3159,15 +3160,18 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
kvm_set_hflags(vcpu, hflags);
vcpu->arch.smi_pending = events->smi.pending;
- if (events->smi.smm_inside_nmi)
- vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
- else
- vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
- if (lapic_in_kernel(vcpu)) {
- if (events->smi.latched_init)
- set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
+
+ if (events->smi.smm) {
+ if (events->smi.smm_inside_nmi)
+ vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
else
- clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
+ vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
+ if (lapic_in_kernel(vcpu)) {
+ if (events->smi.latched_init)
+ set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
+ else
+ clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
+ }
}
}
@@ -3242,7 +3246,12 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
u32 size, offset, ecx, edx;
cpuid_count(XSTATE_CPUID, index,
&size, &offset, &ecx, &edx);
- memcpy(dest + offset, src, size);
+ if (feature == XFEATURE_MASK_PKRU)
+ memcpy(dest + offset, &vcpu->arch.pkru,
+ sizeof(vcpu->arch.pkru));
+ else
+ memcpy(dest + offset, src, size);
+
}
valid -= feature;
@@ -3280,7 +3289,11 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
u32 size, offset, ecx, edx;
cpuid_count(XSTATE_CPUID, index,
&size, &offset, &ecx, &edx);
- memcpy(dest, src + offset, size);
+ if (feature == XFEATURE_MASK_PKRU)
+ memcpy(&vcpu->arch.pkru, src + offset,
+ sizeof(vcpu->arch.pkru));
+ else
+ memcpy(dest, src + offset, size);
}
valid -= feature;
@@ -6113,7 +6126,7 @@ int kvm_arch_init(void *opaque)
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
PT_DIRTY_MASK, PT64_NX_MASK, 0,
- PT_PRESENT_MASK, 0);
+ PT_PRESENT_MASK, 0, sme_me_mask);
kvm_timer_init();
perf_register_guest_info_callbacks(&kvm_guest_cbs);
@@ -6215,6 +6228,7 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
lapic_irq.shorthand = 0;
lapic_irq.dest_mode = 0;
+ lapic_irq.level = 0;
lapic_irq.dest_id = apicid;
lapic_irq.msi_redir_hint = false;
@@ -6721,17 +6735,6 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
-void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
- unsigned long address)
-{
- /*
- * The physical address of apic access page is stored in the VMCS.
- * Update it when it becomes invalid.
- */
- if (address == gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT))
- kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
-}
-
/*
* Returns 1 to let vcpu_run() continue the guest execution loop without
* exiting to the userspace. Otherwise, the value will be returned to the
@@ -7629,7 +7632,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
*/
vcpu->guest_fpu_loaded = 1;
__kernel_fpu_begin();
- __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
+ /* PKRU is separately restored in kvm_x86_ops->run. */
+ __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
+ ~XFEATURE_MASK_PKRU);
trace_kvm_fpu(1);
}
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
deleted file mode 100644
index 08f41caada45..000000000000
--- a/arch/x86/lguest/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-config LGUEST_GUEST
- bool "Lguest guest support"
- depends on X86_32 && PARAVIRT && PCI
- select TTY
- select VIRTUALIZATION
- select VIRTIO
- select VIRTIO_CONSOLE
- help
- Lguest is a tiny in-kernel hypervisor. Selecting this will
- allow your kernel to boot under lguest. This option will increase
- your kernel size by about 10k. If in doubt, say N.
-
- If you say Y here, make sure you say Y (or M) to the virtio block
- and net drivers which lguest needs.
diff --git a/arch/x86/lguest/Makefile b/arch/x86/lguest/Makefile
deleted file mode 100644
index 8f38d577a2fa..000000000000
--- a/arch/x86/lguest/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-y := head_32.o boot.o
-CFLAGS_boot.o := $(call cc-option, -fno-stack-protector)
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
deleted file mode 100644
index 99472698c931..000000000000
--- a/arch/x86/lguest/boot.c
+++ /dev/null
@@ -1,1558 +0,0 @@
-/*P:010
- * A hypervisor allows multiple Operating Systems to run on a single machine.
- * To quote David Wheeler: "Any problem in computer science can be solved with
- * another layer of indirection."
- *
- * We keep things simple in two ways. First, we start with a normal Linux
- * kernel and insert a module (lg.ko) which allows us to run other Linux
- * kernels the same way we'd run processes. We call the first kernel the Host,
- * and the others the Guests. The program which sets up and configures Guests
- * (such as the example in tools/lguest/lguest.c) is called the Launcher.
- *
- * Secondly, we only run specially modified Guests, not normal kernels: setting
- * CONFIG_LGUEST_GUEST to "y" compiles this file into the kernel so it knows
- * how to be a Guest at boot time. This means that you can use the same kernel
- * you boot normally (ie. as a Host) as a Guest.
- *
- * These Guests know that they cannot do privileged operations, such as disable
- * interrupts, and that they have to ask the Host to do such things explicitly.
- * This file consists of all the replacements for such low-level native
- * hardware operations: these special Guest versions call the Host.
- *
- * So how does the kernel know it's a Guest? We'll see that later, but let's
- * just say that we end up here where we replace the native functions various
- * "paravirt" structures with our Guest versions, then boot like normal.
-:*/
-
-/*
- * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include <linux/kernel.h>
-#include <linux/start_kernel.h>
-#include <linux/string.h>
-#include <linux/console.h>
-#include <linux/screen_info.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/lguest.h>
-#include <linux/lguest_launcher.h>
-#include <linux/virtio_console.h>
-#include <linux/pm.h>
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/virtio_pci.h>
-#include <asm/acpi.h>
-#include <asm/apic.h>
-#include <asm/lguest.h>
-#include <asm/paravirt.h>
-#include <asm/param.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/desc.h>
-#include <asm/setup.h>
-#include <asm/e820/api.h>
-#include <asm/mce.h>
-#include <asm/io.h>
-#include <asm/fpu/api.h>
-#include <asm/stackprotector.h>
-#include <asm/reboot.h> /* for struct machine_ops */
-#include <asm/kvm_para.h>
-#include <asm/pci_x86.h>
-#include <asm/pci-direct.h>
-
-/*G:010
- * Welcome to the Guest!
- *
- * The Guest in our tale is a simple creature: identical to the Host but
- * behaving in simplified but equivalent ways. In particular, the Guest is the
- * same kernel as the Host (or at least, built from the same source code).
-:*/
-
-struct lguest_data lguest_data = {
- .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
- .noirq_iret = (u32)lguest_noirq_iret,
- .kernel_address = PAGE_OFFSET,
- .blocked_interrupts = { 1 }, /* Block timer interrupts */
- .syscall_vec = IA32_SYSCALL_VECTOR,
-};
-
-/*G:037
- * async_hcall() is pretty simple: I'm quite proud of it really. We have a
- * ring buffer of stored hypercalls which the Host will run though next time we
- * do a normal hypercall. Each entry in the ring has 5 slots for the hypercall
- * arguments, and a "hcall_status" word which is 0 if the call is ready to go,
- * and 255 once the Host has finished with it.
- *
- * If we come around to a slot which hasn't been finished, then the table is
- * full and we just make the hypercall directly. This has the nice side
- * effect of causing the Host to run all the stored calls in the ring buffer
- * which empties it for next time!
- */
-static void async_hcall(unsigned long call, unsigned long arg1,
- unsigned long arg2, unsigned long arg3,
- unsigned long arg4)
-{
- /* Note: This code assumes we're uniprocessor. */
- static unsigned int next_call;
- unsigned long flags;
-
- /*
- * Disable interrupts if not already disabled: we don't want an
- * interrupt handler making a hypercall while we're already doing
- * one!
- */
- local_irq_save(flags);
- if (lguest_data.hcall_status[next_call] != 0xFF) {
- /* Table full, so do normal hcall which will flush table. */
- hcall(call, arg1, arg2, arg3, arg4);
- } else {
- lguest_data.hcalls[next_call].arg0 = call;
- lguest_data.hcalls[next_call].arg1 = arg1;
- lguest_data.hcalls[next_call].arg2 = arg2;
- lguest_data.hcalls[next_call].arg3 = arg3;
- lguest_data.hcalls[next_call].arg4 = arg4;
- /* Arguments must all be written before we mark it to go */
- wmb();
- lguest_data.hcall_status[next_call] = 0;
- if (++next_call == LHCALL_RING_SIZE)
- next_call = 0;
- }
- local_irq_restore(flags);
-}
-
-/*G:035
- * Notice the lazy_hcall() above, rather than hcall(). This is our first real
- * optimization trick!
- *
- * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
- * them as a batch when lazy_mode is eventually turned off. Because hypercalls
- * are reasonably expensive, batching them up makes sense. For example, a
- * large munmap might update dozens of page table entries: that code calls
- * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
- * lguest_leave_lazy_mode().
- *
- * So, when we're in lazy mode, we call async_hcall() to store the call for
- * future processing:
- */
-static void lazy_hcall1(unsigned long call, unsigned long arg1)
-{
- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
- hcall(call, arg1, 0, 0, 0);
- else
- async_hcall(call, arg1, 0, 0, 0);
-}
-
-/* You can imagine what lazy_hcall2, 3 and 4 look like. :*/
-static void lazy_hcall2(unsigned long call,
- unsigned long arg1,
- unsigned long arg2)
-{
- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
- hcall(call, arg1, arg2, 0, 0);
- else
- async_hcall(call, arg1, arg2, 0, 0);
-}
-
-static void lazy_hcall3(unsigned long call,
- unsigned long arg1,
- unsigned long arg2,
- unsigned long arg3)
-{
- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
- hcall(call, arg1, arg2, arg3, 0);
- else
- async_hcall(call, arg1, arg2, arg3, 0);
-}
-
-#ifdef CONFIG_X86_PAE
-static void lazy_hcall4(unsigned long call,
- unsigned long arg1,
- unsigned long arg2,
- unsigned long arg3,
- unsigned long arg4)
-{
- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
- hcall(call, arg1, arg2, arg3, arg4);
- else
- async_hcall(call, arg1, arg2, arg3, arg4);
-}
-#endif
-
-/*G:036
- * When lazy mode is turned off, we issue the do-nothing hypercall to
- * flush any stored calls, and call the generic helper to reset the
- * per-cpu lazy mode variable.
- */
-static void lguest_leave_lazy_mmu_mode(void)
-{
- hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
- paravirt_leave_lazy_mmu();
-}
-
-/*
- * We also catch the end of context switch; we enter lazy mode for much of
- * that too, so again we need to flush here.
- *
- * (Technically, this is lazy CPU mode, and normally we're in lazy MMU
- * mode, but unlike Xen, lguest doesn't care about the difference).
- */
-static void lguest_end_context_switch(struct task_struct *next)
-{
- hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
- paravirt_end_context_switch(next);
-}
-
-/*G:032
- * After that diversion we return to our first native-instruction
- * replacements: four functions for interrupt control.
- *
- * The simplest way of implementing these would be to have "turn interrupts
- * off" and "turn interrupts on" hypercalls. Unfortunately, this is too slow:
- * these are by far the most commonly called functions of those we override.
- *
- * So instead we keep an "irq_enabled" field inside our "struct lguest_data",
- * which the Guest can update with a single instruction. The Host knows to
- * check there before it tries to deliver an interrupt.
- */
-
-/*
- * save_flags() is expected to return the processor state (ie. "flags"). The
- * flags word contains all kind of stuff, but in practice Linux only cares
- * about the interrupt flag. Our "save_flags()" just returns that.
- */
-asmlinkage __visible unsigned long lguest_save_fl(void)
-{
- return lguest_data.irq_enabled;
-}
-
-/* Interrupts go off... */
-asmlinkage __visible void lguest_irq_disable(void)
-{
- lguest_data.irq_enabled = 0;
-}
-
-/*
- * Let's pause a moment. Remember how I said these are called so often?
- * Jeremy Fitzhardinge optimized them so hard early in 2009 that he had to
- * break some rules. In particular, these functions are assumed to save their
- * own registers if they need to: normal C functions assume they can trash the
- * eax register. To use normal C functions, we use
- * PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the
- * C function, then restores it.
- */
-PV_CALLEE_SAVE_REGS_THUNK(lguest_save_fl);
-PV_CALLEE_SAVE_REGS_THUNK(lguest_irq_disable);
-/*:*/
-
-/* These are in head_32.S */
-extern void lg_irq_enable(void);
-extern void lg_restore_fl(unsigned long flags);
-
-/*M:003
- * We could be more efficient in our checking of outstanding interrupts, rather
- * than using a branch. One way would be to put the "irq_enabled" field in a
- * page by itself, and have the Host write-protect it when an interrupt comes
- * in when irqs are disabled. There will then be a page fault as soon as
- * interrupts are re-enabled.
- *
- * A better method is to implement soft interrupt disable generally for x86:
- * instead of disabling interrupts, we set a flag. If an interrupt does come
- * in, we then disable them for real. This is uncommon, so we could simply use
- * a hypercall for interrupt control and not worry about efficiency.
-:*/
-
-/*G:034
- * The Interrupt Descriptor Table (IDT).
- *
- * The IDT tells the processor what to do when an interrupt comes in. Each
- * entry in the table is a 64-bit descriptor: this holds the privilege level,
- * address of the handler, and... well, who cares? The Guest just asks the
- * Host to make the change anyway, because the Host controls the real IDT.
- */
-static void lguest_write_idt_entry(gate_desc *dt,
- int entrynum, const gate_desc *g)
-{
- /*
- * The gate_desc structure is 8 bytes long: we hand it to the Host in
- * two 32-bit chunks. The whole 32-bit kernel used to hand descriptors
- * around like this; typesafety wasn't a big concern in Linux's early
- * years.
- */
- u32 *desc = (u32 *)g;
- /* Keep the local copy up to date. */
- native_write_idt_entry(dt, entrynum, g);
- /* Tell Host about this new entry. */
- hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1], 0);
-}
-
-/*
- * Changing to a different IDT is very rare: we keep the IDT up-to-date every
- * time it is written, so we can simply loop through all entries and tell the
- * Host about them.
- */
-static void lguest_load_idt(const struct desc_ptr *desc)
-{
- unsigned int i;
- struct desc_struct *idt = (void *)desc->address;
-
- for (i = 0; i < (desc->size+1)/8; i++)
- hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b, 0);
-}
-
-/*
- * The Global Descriptor Table.
- *
- * The Intel architecture defines another table, called the Global Descriptor
- * Table (GDT). You tell the CPU where it is (and its size) using the "lgdt"
- * instruction, and then several other instructions refer to entries in the
- * table. There are three entries which the Switcher needs, so the Host simply
- * controls the entire thing and the Guest asks it to make changes using the
- * LOAD_GDT hypercall.
- *
- * This is the exactly like the IDT code.
- */
-static void lguest_load_gdt(const struct desc_ptr *desc)
-{
- unsigned int i;
- struct desc_struct *gdt = (void *)desc->address;
-
- for (i = 0; i < (desc->size+1)/8; i++)
- hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0);
-}
-
-/*
- * For a single GDT entry which changes, we simply change our copy and
- * then tell the host about it.
- */
-static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
- const void *desc, int type)
-{
- native_write_gdt_entry(dt, entrynum, desc, type);
- /* Tell Host about this new entry. */
- hcall(LHCALL_LOAD_GDT_ENTRY, entrynum,
- dt[entrynum].a, dt[entrynum].b, 0);
-}
-
-/*
- * There are three "thread local storage" GDT entries which change
- * on every context switch (these three entries are how glibc implements
- * __thread variables). As an optimization, we have a hypercall
- * specifically for this case.
- *
- * Wouldn't it be nicer to have a general LOAD_GDT_ENTRIES hypercall
- * which took a range of entries?
- */
-static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
-{
- /*
- * There's one problem which normal hardware doesn't have: the Host
- * can't handle us removing entries we're currently using. So we clear
- * the GS register here: if it's needed it'll be reloaded anyway.
- */
- lazy_load_gs(0);
- lazy_hcall2(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu);
-}
-
-/*G:038
- * That's enough excitement for now, back to ploughing through each of the
- * different pv_ops structures (we're about 1/3 of the way through).
- *
- * This is the Local Descriptor Table, another weird Intel thingy. Linux only
- * uses this for some strange applications like Wine. We don't do anything
- * here, so they'll get an informative and friendly Segmentation Fault.
- */
-static void lguest_set_ldt(const void *addr, unsigned entries)
-{
-}
-
-/*
- * This loads a GDT entry into the "Task Register": that entry points to a
- * structure called the Task State Segment. Some comments scattered though the
- * kernel code indicate that this used for task switching in ages past, along
- * with blood sacrifice and astrology.
- *
- * Now there's nothing interesting in here that we don't get told elsewhere.
- * But the native version uses the "ltr" instruction, which makes the Host
- * complain to the Guest about a Segmentation Fault and it'll oops. So we
- * override the native version with a do-nothing version.
- */
-static void lguest_load_tr_desc(void)
-{
-}
-
-/*
- * The "cpuid" instruction is a way of querying both the CPU identity
- * (manufacturer, model, etc) and its features. It was introduced before the
- * Pentium in 1993 and keeps getting extended by both Intel, AMD and others.
- * As you might imagine, after a decade and a half this treatment, it is now a
- * giant ball of hair. Its entry in the current Intel manual runs to 28 pages.
- *
- * This instruction even it has its own Wikipedia entry. The Wikipedia entry
- * has been translated into 6 languages. I am not making this up!
- *
- * We could get funky here and identify ourselves as "GenuineLguest", but
- * instead we just use the real "cpuid" instruction. Then I pretty much turned
- * off feature bits until the Guest booted. (Don't say that: you'll damage
- * lguest sales!) Shut up, inner voice! (Hey, just pointing out that this is
- * hardly future proof.) No one's listening! They don't like you anyway,
- * parenthetic weirdo!
- *
- * Replacing the cpuid so we can turn features off is great for the kernel, but
- * anyone (including userspace) can just use the raw "cpuid" instruction and
- * the Host won't even notice since it isn't privileged. So we try not to get
- * too worked up about it.
- */
-static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
- unsigned int *cx, unsigned int *dx)
-{
- int function = *ax;
-
- native_cpuid(ax, bx, cx, dx);
- switch (function) {
- /*
- * CPUID 0 gives the highest legal CPUID number (and the ID string).
- * We futureproof our code a little by sticking to known CPUID values.
- */
- case 0:
- if (*ax > 5)
- *ax = 5;
- break;
-
- /*
- * CPUID 1 is a basic feature request.
- *
- * CX: we only allow kernel to see SSE3, CMPXCHG16B and SSSE3
- * DX: SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU and PAE.
- */
- case 1:
- *cx &= 0x00002201;
- *dx &= 0x07808151;
- /*
- * The Host can do a nice optimization if it knows that the
- * kernel mappings (addresses above 0xC0000000 or whatever
- * PAGE_OFFSET is set to) haven't changed. But Linux calls
- * flush_tlb_user() for both user and kernel mappings unless
- * the Page Global Enable (PGE) feature bit is set.
- */
- *dx |= 0x00002000;
- /*
- * We also lie, and say we're family id 5. 6 or greater
- * leads to a rdmsr in early_init_intel which we can't handle.
- * Family ID is returned as bits 8-12 in ax.
- */
- *ax &= 0xFFFFF0FF;
- *ax |= 0x00000500;
- break;
-
- /*
- * This is used to detect if we're running under KVM. We might be,
- * but that's a Host matter, not us. So say we're not.
- */
- case KVM_CPUID_SIGNATURE:
- *bx = *cx = *dx = 0;
- break;
-
- /*
- * 0x80000000 returns the highest Extended Function, so we futureproof
- * like we do above by limiting it to known fields.
- */
- case 0x80000000:
- if (*ax > 0x80000008)
- *ax = 0x80000008;
- break;
-
- /*
- * PAE systems can mark pages as non-executable. Linux calls this the
- * NX bit. Intel calls it XD (eXecute Disable), AMD EVP (Enhanced
- * Virus Protection). We just switch it off here, since we don't
- * support it.
- */
- case 0x80000001:
- *dx &= ~(1 << 20);
- break;
- }
-}
-
-/*
- * Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4.
- * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother
- * it. The Host needs to know when the Guest wants to change them, so we have
- * a whole series of functions like read_cr0() and write_cr0().
- *
- * We start with cr0. cr0 allows you to turn on and off all kinds of basic
- * features, but the only cr0 bit that Linux ever used at runtime was the
- * horrifically-named Task Switched (TS) bit at bit 3 (ie. 8)
- *
- * What does the TS bit do? Well, it causes the CPU to trap (interrupt 7) if
- * the floating point unit is used. Which allows us to restore FPU state
- * lazily after a task switch if we wanted to, but wouldn't a name like
- * "FPUTRAP bit" be a little less cryptic?
- *
- * Fortunately, Linux keeps it simple and doesn't use TS, so we can ignore
- * cr0.
- */
-static void lguest_write_cr0(unsigned long val)
-{
-}
-
-static unsigned long lguest_read_cr0(void)
-{
- return 0;
-}
-
-/*
- * cr2 is the virtual address of the last page fault, which the Guest only ever
- * reads. The Host kindly writes this into our "struct lguest_data", so we
- * just read it out of there.
- */
-static unsigned long lguest_read_cr2(void)
-{
- return lguest_data.cr2;
-}
-
-/* See lguest_set_pte() below. */
-static bool cr3_changed = false;
-static unsigned long current_cr3;
-
-/*
- * cr3 is the current toplevel pagetable page: the principle is the same as
- * cr0. Keep a local copy, and tell the Host when it changes.
- */
-static void lguest_write_cr3(unsigned long cr3)
-{
- lazy_hcall1(LHCALL_NEW_PGTABLE, cr3);
- current_cr3 = cr3;
-
- /* These two page tables are simple, linear, and used during boot */
- if (cr3 != __pa_symbol(swapper_pg_dir) &&
- cr3 != __pa_symbol(initial_page_table))
- cr3_changed = true;
-}
-
-static unsigned long lguest_read_cr3(void)
-{
- return current_cr3;
-}
-
-/* cr4 is used to enable and disable PGE, but we don't care. */
-static unsigned long lguest_read_cr4(void)
-{
- return 0;
-}
-
-static void lguest_write_cr4(unsigned long val)
-{
-}
-
-/*
- * Page Table Handling.
- *
- * Now would be a good time to take a rest and grab a coffee or similarly
- * relaxing stimulant. The easy parts are behind us, and the trek gradually
- * winds uphill from here.
- *
- * Quick refresher: memory is divided into "pages" of 4096 bytes each. The CPU
- * maps virtual addresses to physical addresses using "page tables". We could
- * use one huge index of 1 million entries: each address is 4 bytes, so that's
- * 1024 pages just to hold the page tables. But since most virtual addresses
- * are unused, we use a two level index which saves space. The cr3 register
- * contains the physical address of the top level "page directory" page, which
- * contains physical addresses of up to 1024 second-level pages. Each of these
- * second level pages contains up to 1024 physical addresses of actual pages,
- * or Page Table Entries (PTEs).
- *
- * Here's a diagram, where arrows indicate physical addresses:
- *
- * cr3 ---> +---------+
- * | --------->+---------+
- * | | | PADDR1 |
- * Mid-level | | PADDR2 |
- * (PMD) page | | |
- * | | Lower-level |
- * | | (PTE) page |
- * | | | |
- * .... ....
- *
- * So to convert a virtual address to a physical address, we look up the top
- * level, which points us to the second level, which gives us the physical
- * address of that page. If the top level entry was not present, or the second
- * level entry was not present, then the virtual address is invalid (we
- * say "the page was not mapped").
- *
- * Put another way, a 32-bit virtual address is divided up like so:
- *
- * 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
- * |<---- 10 bits ---->|<---- 10 bits ---->|<------ 12 bits ------>|
- * Index into top Index into second Offset within page
- * page directory page pagetable page
- *
- * Now, unfortunately, this isn't the whole story: Intel added Physical Address
- * Extension (PAE) to allow 32 bit systems to use 64GB of memory (ie. 36 bits).
- * These are held in 64-bit page table entries, so we can now only fit 512
- * entries in a page, and the neat three-level tree breaks down.
- *
- * The result is a four level page table:
- *
- * cr3 --> [ 4 Upper ]
- * [ Level ]
- * [ Entries ]
- * [(PUD Page)]---> +---------+
- * | --------->+---------+
- * | | | PADDR1 |
- * Mid-level | | PADDR2 |
- * (PMD) page | | |
- * | | Lower-level |
- * | | (PTE) page |
- * | | | |
- * .... ....
- *
- *
- * And the virtual address is decoded as:
- *
- * 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
- * |<-2->|<--- 9 bits ---->|<---- 9 bits --->|<------ 12 bits ------>|
- * Index into Index into mid Index into lower Offset within page
- * top entries directory page pagetable page
- *
- * It's too hard to switch between these two formats at runtime, so Linux only
- * supports one or the other depending on whether CONFIG_X86_PAE is set. Many
- * distributions turn it on, and not just for people with silly amounts of
- * memory: the larger PTE entries allow room for the NX bit, which lets the
- * kernel disable execution of pages and increase security.
- *
- * This was a problem for lguest, which couldn't run on these distributions;
- * then Matias Zabaljauregui figured it all out and implemented it, and only a
- * handful of puppies were crushed in the process!
- *
- * Back to our point: the kernel spends a lot of time changing both the
- * top-level page directory and lower-level pagetable pages. The Guest doesn't
- * know physical addresses, so while it maintains these page tables exactly
- * like normal, it also needs to keep the Host informed whenever it makes a
- * change: the Host will create the real page tables based on the Guests'.
- */
-
-/*
- * The Guest calls this after it has set a second-level entry (pte), ie. to map
- * a page into a process' address space. We tell the Host the toplevel and
- * address this corresponds to. The Guest uses one pagetable per process, so
- * we need to tell the Host which one we're changing (mm->pgd).
- */
-static void lguest_pte_update(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
-#ifdef CONFIG_X86_PAE
- /* PAE needs to hand a 64 bit page table entry, so it uses two args. */
- lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr,
- ptep->pte_low, ptep->pte_high);
-#else
- lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low);
-#endif
-}
-
-/* This is the "set and update" combo-meal-deal version. */
-static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
-{
- native_set_pte(ptep, pteval);
- lguest_pte_update(mm, addr, ptep);
-}
-
-/*
- * The Guest calls lguest_set_pud to set a top-level entry and lguest_set_pmd
- * to set a middle-level entry when PAE is activated.
- *
- * Again, we set the entry then tell the Host which page we changed,
- * and the index of the entry we changed.
- */
-#ifdef CONFIG_X86_PAE
-static void lguest_set_pud(pud_t *pudp, pud_t pudval)
-{
- native_set_pud(pudp, pudval);
-
- /* 32 bytes aligned pdpt address and the index. */
- lazy_hcall2(LHCALL_SET_PGD, __pa(pudp) & 0xFFFFFFE0,
- (__pa(pudp) & 0x1F) / sizeof(pud_t));
-}
-
-static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
- native_set_pmd(pmdp, pmdval);
- lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK,
- (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
-}
-#else
-
-/* The Guest calls lguest_set_pmd to set a top-level entry when !PAE. */
-static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
- native_set_pmd(pmdp, pmdval);
- lazy_hcall2(LHCALL_SET_PGD, __pa(pmdp) & PAGE_MASK,
- (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t));
-}
-#endif
-
-/*
- * There are a couple of legacy places where the kernel sets a PTE, but we
- * don't know the top level any more. This is useless for us, since we don't
- * know which pagetable is changing or what address, so we just tell the Host
- * to forget all of them. Fortunately, this is very rare.
- *
- * ... except in early boot when the kernel sets up the initial pagetables,
- * which makes booting astonishingly slow: 48 seconds! So we don't even tell
- * the Host anything changed until we've done the first real page table switch,
- * which brings boot back to 4.3 seconds.
- */
-static void lguest_set_pte(pte_t *ptep, pte_t pteval)
-{
- native_set_pte(ptep, pteval);
- if (cr3_changed)
- lazy_hcall1(LHCALL_FLUSH_TLB, 1);
-}
-
-#ifdef CONFIG_X86_PAE
-/*
- * With 64-bit PTE values, we need to be careful setting them: if we set 32
- * bits at a time, the hardware could see a weird half-set entry. These
- * versions ensure we update all 64 bits at once.
- */
-static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte)
-{
- native_set_pte_atomic(ptep, pte);
- if (cr3_changed)
- lazy_hcall1(LHCALL_FLUSH_TLB, 1);
-}
-
-static void lguest_pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- native_pte_clear(mm, addr, ptep);
- lguest_pte_update(mm, addr, ptep);
-}
-
-static void lguest_pmd_clear(pmd_t *pmdp)
-{
- lguest_set_pmd(pmdp, __pmd(0));
-}
-#endif
-
-/*
- * Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
- * native page table operations. On native hardware you can set a new page
- * table entry whenever you want, but if you want to remove one you have to do
- * a TLB flush (a TLB is a little cache of page table entries kept by the CPU).
- *
- * So the lguest_set_pte_at() and lguest_set_pmd() functions above are only
- * called when a valid entry is written, not when it's removed (ie. marked not
- * present). Instead, this is where we come when the Guest wants to remove a
- * page table entry: we tell the Host to set that entry to 0 (ie. the present
- * bit is zero).
- */
-static void lguest_flush_tlb_single(unsigned long addr)
-{
- /* Simply set it to zero: if it was not, it will fault back in. */
- lazy_hcall3(LHCALL_SET_PTE, current_cr3, addr, 0);
-}
-
-/*
- * This is what happens after the Guest has removed a large number of entries.
- * This tells the Host that any of the page table entries for userspace might
- * have changed, ie. virtual addresses below PAGE_OFFSET.
- */
-static void lguest_flush_tlb_user(void)
-{
- lazy_hcall1(LHCALL_FLUSH_TLB, 0);
-}
-
-/*
- * This is called when the kernel page tables have changed. That's not very
- * common (unless the Guest is using highmem, which makes the Guest extremely
- * slow), so it's worth separating this from the user flushing above.
- */
-static void lguest_flush_tlb_kernel(void)
-{
- lazy_hcall1(LHCALL_FLUSH_TLB, 1);
-}
-
-/*
- * The Unadvanced Programmable Interrupt Controller.
- *
- * This is an attempt to implement the simplest possible interrupt controller.
- * I spent some time looking though routines like set_irq_chip_and_handler,
- * set_irq_chip_and_handler_name, set_irq_chip_data and set_phasers_to_stun and
- * I *think* this is as simple as it gets.
- *
- * We can tell the Host what interrupts we want blocked ready for using the
- * lguest_data.interrupts bitmap, so disabling (aka "masking") them is as
- * simple as setting a bit. We don't actually "ack" interrupts as such, we
- * just mask and unmask them. I wonder if we should be cleverer?
- */
-static void disable_lguest_irq(struct irq_data *data)
-{
- set_bit(data->irq, lguest_data.blocked_interrupts);
-}
-
-static void enable_lguest_irq(struct irq_data *data)
-{
- clear_bit(data->irq, lguest_data.blocked_interrupts);
-}
-
-/* This structure describes the lguest IRQ controller. */
-static struct irq_chip lguest_irq_controller = {
- .name = "lguest",
- .irq_mask = disable_lguest_irq,
- .irq_mask_ack = disable_lguest_irq,
- .irq_unmask = enable_lguest_irq,
-};
-
-/*
- * Interrupt descriptors are allocated as-needed, but low-numbered ones are
- * reserved by the generic x86 code. So we ignore irq_alloc_desc_at if it
- * tells us the irq is already used: other errors (ie. ENOMEM) we take
- * seriously.
- */
-static int lguest_setup_irq(unsigned int irq)
-{
- struct irq_desc *desc;
- int err;
-
- /* Returns -ve error or vector number. */
- err = irq_alloc_desc_at(irq, 0);
- if (err < 0 && err != -EEXIST)
- return err;
-
- /*
- * Tell the Linux infrastructure that the interrupt is
- * controlled by our level-based lguest interrupt controller.
- */
- irq_set_chip_and_handler_name(irq, &lguest_irq_controller,
- handle_level_irq, "level");
-
- /* Some systems map "vectors" to interrupts weirdly. Not us! */
- desc = irq_to_desc(irq);
- __this_cpu_write(vector_irq[FIRST_EXTERNAL_VECTOR + irq], desc);
- return 0;
-}
-
-static int lguest_enable_irq(struct pci_dev *dev)
-{
- int err;
- u8 line = 0;
-
- /* We literally use the PCI interrupt line as the irq number. */
- pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line);
- err = lguest_setup_irq(line);
- if (!err)
- dev->irq = line;
- return err;
-}
-
-/* We don't do hotplug PCI, so this shouldn't be called. */
-static void lguest_disable_irq(struct pci_dev *dev)
-{
- WARN_ON(1);
-}
-
-/*
- * This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
- * interrupt (except 128, which is used for system calls).
- */
-static void __init lguest_init_IRQ(void)
-{
- unsigned int i;
-
- for (i = FIRST_EXTERNAL_VECTOR; i < FIRST_SYSTEM_VECTOR; i++) {
- if (i != IA32_SYSCALL_VECTOR)
- set_intr_gate(i, irq_entries_start +
- 8 * (i - FIRST_EXTERNAL_VECTOR));
- }
-
- /*
- * This call is required to set up for 4k stacks, where we have
- * separate stacks for hard and soft interrupts.
- */
- irq_ctx_init(smp_processor_id());
-}
-
-/*
- * Time.
- *
- * It would be far better for everyone if the Guest had its own clock, but
- * until then the Host gives us the time on every interrupt.
- */
-static void lguest_get_wallclock(struct timespec *now)
-{
- *now = lguest_data.time;
-}
-
-/*
- * The TSC is an Intel thing called the Time Stamp Counter. The Host tells us
- * what speed it runs at, or 0 if it's unusable as a reliable clock source.
- * This matches what we want here: if we return 0 from this function, the x86
- * TSC clock will give up and not register itself.
- */
-static unsigned long lguest_tsc_khz(void)
-{
- return lguest_data.tsc_khz;
-}
-
-/*
- * If we can't use the TSC, the kernel falls back to our lower-priority
- * "lguest_clock", where we read the time value given to us by the Host.
- */
-static u64 lguest_clock_read(struct clocksource *cs)
-{
- unsigned long sec, nsec;
-
- /*
- * Since the time is in two parts (seconds and nanoseconds), we risk
- * reading it just as it's changing from 99 & 0.999999999 to 100 and 0,
- * and getting 99 and 0. As Linux tends to come apart under the stress
- * of time travel, we must be careful:
- */
- do {
- /* First we read the seconds part. */
- sec = lguest_data.time.tv_sec;
- /*
- * This read memory barrier tells the compiler and the CPU that
- * this can't be reordered: we have to complete the above
- * before going on.
- */
- rmb();
- /* Now we read the nanoseconds part. */
- nsec = lguest_data.time.tv_nsec;
- /* Make sure we've done that. */
- rmb();
- /* Now if the seconds part has changed, try again. */
- } while (unlikely(lguest_data.time.tv_sec != sec));
-
- /* Our lguest clock is in real nanoseconds. */
- return sec*1000000000ULL + nsec;
-}
-
-/* This is the fallback clocksource: lower priority than the TSC clocksource. */
-static struct clocksource lguest_clock = {
- .name = "lguest",
- .rating = 200,
- .read = lguest_clock_read,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-/*
- * We also need a "struct clock_event_device": Linux asks us to set it to go
- * off some time in the future. Actually, James Morris figured all this out, I
- * just applied the patch.
- */
-static int lguest_clockevent_set_next_event(unsigned long delta,
- struct clock_event_device *evt)
-{
- /* FIXME: I don't think this can ever happen, but James tells me he had
- * to put this code in. Maybe we should remove it now. Anyone? */
- if (delta < LG_CLOCK_MIN_DELTA) {
- if (printk_ratelimit())
- printk(KERN_DEBUG "%s: small delta %lu ns\n",
- __func__, delta);
- return -ETIME;
- }
-
- /* Please wake us this far in the future. */
- hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0);
- return 0;
-}
-
-static int lguest_clockevent_shutdown(struct clock_event_device *evt)
-{
- /* A 0 argument shuts the clock down. */
- hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0);
- return 0;
-}
-
-/* This describes our primitive timer chip. */
-static struct clock_event_device lguest_clockevent = {
- .name = "lguest",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .set_next_event = lguest_clockevent_set_next_event,
- .set_state_shutdown = lguest_clockevent_shutdown,
- .rating = INT_MAX,
- .mult = 1,
- .shift = 0,
- .min_delta_ns = LG_CLOCK_MIN_DELTA,
- .min_delta_ticks = LG_CLOCK_MIN_DELTA,
- .max_delta_ns = LG_CLOCK_MAX_DELTA,
- .max_delta_ticks = LG_CLOCK_MAX_DELTA,
-};
-
-/*
- * This is the Guest timer interrupt handler (hardware interrupt 0). We just
- * call the clockevent infrastructure and it does whatever needs doing.
- */
-static void lguest_time_irq(struct irq_desc *desc)
-{
- unsigned long flags;
-
- /* Don't interrupt us while this is running. */
- local_irq_save(flags);
- lguest_clockevent.event_handler(&lguest_clockevent);
- local_irq_restore(flags);
-}
-
-/*
- * At some point in the boot process, we get asked to set up our timing
- * infrastructure. The kernel doesn't expect timer interrupts before this, but
- * we cleverly initialized the "blocked_interrupts" field of "struct
- * lguest_data" so that timer interrupts were blocked until now.
- */
-static void lguest_time_init(void)
-{
- /* Set up the timer interrupt (0) to go to our simple timer routine */
- if (lguest_setup_irq(0) != 0)
- panic("Could not set up timer irq");
- irq_set_handler(0, lguest_time_irq);
-
- clocksource_register_hz(&lguest_clock, NSEC_PER_SEC);
-
- /* We can't set cpumask in the initializer: damn C limitations! Set it
- * here and register our timer device. */
- lguest_clockevent.cpumask = cpumask_of(0);
- clockevents_register_device(&lguest_clockevent);
-
- /* Finally, we unblock the timer interrupt. */
- clear_bit(0, lguest_data.blocked_interrupts);
-}
-
-/*
- * Miscellaneous bits and pieces.
- *
- * Here is an oddball collection of functions which the Guest needs for things
- * to work. They're pretty simple.
- */
-
-/*
- * The Guest needs to tell the Host what stack it expects traps to use. For
- * native hardware, this is part of the Task State Segment mentioned above in
- * lguest_load_tr_desc(), but to help hypervisors there's this special call.
- *
- * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data
- * segment), the privilege level (we're privilege level 1, the Host is 0 and
- * will not tolerate us trying to use that), the stack pointer, and the number
- * of pages in the stack.
- */
-static void lguest_load_sp0(struct tss_struct *tss,
- struct thread_struct *thread)
-{
- lazy_hcall3(LHCALL_SET_STACK, __KERNEL_DS | 0x1, thread->sp0,
- THREAD_SIZE / PAGE_SIZE);
- tss->x86_tss.sp0 = thread->sp0;
-}
-
-/* Let's just say, I wouldn't do debugging under a Guest. */
-static unsigned long lguest_get_debugreg(int regno)
-{
- /* FIXME: Implement */
- return 0;
-}
-
-static void lguest_set_debugreg(int regno, unsigned long value)
-{
- /* FIXME: Implement */
-}
-
-/*
- * There are times when the kernel wants to make sure that no memory writes are
- * caught in the cache (that they've all reached real hardware devices). This
- * doesn't matter for the Guest which has virtual hardware.
- *
- * On the Pentium 4 and above, cpuid() indicates that the Cache Line Flush
- * (clflush) instruction is available and the kernel uses that. Otherwise, it
- * uses the older "Write Back and Invalidate Cache" (wbinvd) instruction.
- * Unlike clflush, wbinvd can only be run at privilege level 0. So we can
- * ignore clflush, but replace wbinvd.
- */
-static void lguest_wbinvd(void)
-{
-}
-
-/*
- * If the Guest expects to have an Advanced Programmable Interrupt Controller,
- * we play dumb by ignoring writes and returning 0 for reads. So it's no
- * longer Programmable nor Controlling anything, and I don't think 8 lines of
- * code qualifies for Advanced. It will also never interrupt anything. It
- * does, however, allow us to get through the Linux boot code.
- */
-#ifdef CONFIG_X86_LOCAL_APIC
-static void lguest_apic_write(u32 reg, u32 v)
-{
-}
-
-static u32 lguest_apic_read(u32 reg)
-{
- return 0;
-}
-
-static u64 lguest_apic_icr_read(void)
-{
- return 0;
-}
-
-static void lguest_apic_icr_write(u32 low, u32 id)
-{
- /* Warn to see if there's any stray references */
- WARN_ON(1);
-}
-
-static void lguest_apic_wait_icr_idle(void)
-{
- return;
-}
-
-static u32 lguest_apic_safe_wait_icr_idle(void)
-{
- return 0;
-}
-
-static void set_lguest_basic_apic_ops(void)
-{
- apic->read = lguest_apic_read;
- apic->write = lguest_apic_write;
- apic->icr_read = lguest_apic_icr_read;
- apic->icr_write = lguest_apic_icr_write;
- apic->wait_icr_idle = lguest_apic_wait_icr_idle;
- apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
-};
-#endif
-
-/* STOP! Until an interrupt comes in. */
-static void lguest_safe_halt(void)
-{
- hcall(LHCALL_HALT, 0, 0, 0, 0);
-}
-
-/*
- * The SHUTDOWN hypercall takes a string to describe what's happening, and
- * an argument which says whether this to restart (reboot) the Guest or not.
- *
- * Note that the Host always prefers that the Guest speak in physical addresses
- * rather than virtual addresses, so we use __pa() here.
- */
-static void lguest_power_off(void)
-{
- hcall(LHCALL_SHUTDOWN, __pa("Power down"),
- LGUEST_SHUTDOWN_POWEROFF, 0, 0);
-}
-
-/*
- * Panicing.
- *
- * Don't. But if you did, this is what happens.
- */
-static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
-{
- hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0);
- /* The hcall won't return, but to keep gcc happy, we're "done". */
- return NOTIFY_DONE;
-}
-
-static struct notifier_block paniced = {
- .notifier_call = lguest_panic
-};
-
-/* Setting up memory is fairly easy. */
-static __init char *lguest_memory_setup(void)
-{
- /*
- * The Linux bootloader header contains an "e820" memory map: the
- * Launcher populated the first entry with our memory limit.
- */
- e820__range_add(boot_params.e820_table[0].addr,
- boot_params.e820_table[0].size,
- boot_params.e820_table[0].type);
-
- /* This string is for the boot messages. */
- return "LGUEST";
-}
-
-/* Offset within PCI config space of BAR access capability. */
-static int console_cfg_offset = 0;
-static int console_access_cap;
-
-/* Set up so that we access off in bar0 (on bus 0, device 1, function 0) */
-static void set_cfg_window(u32 cfg_offset, u32 off)
-{
- write_pci_config_byte(0, 1, 0,
- cfg_offset + offsetof(struct virtio_pci_cap, bar),
- 0);
- write_pci_config(0, 1, 0,
- cfg_offset + offsetof(struct virtio_pci_cap, length),
- 4);
- write_pci_config(0, 1, 0,
- cfg_offset + offsetof(struct virtio_pci_cap, offset),
- off);
-}
-
-static void write_bar_via_cfg(u32 cfg_offset, u32 off, u32 val)
-{
- /*
- * We could set this up once, then leave it; nothing else in the *
- * kernel should touch these registers. But if it went wrong, that
- * would be a horrible bug to find.
- */
- set_cfg_window(cfg_offset, off);
- write_pci_config(0, 1, 0,
- cfg_offset + sizeof(struct virtio_pci_cap), val);
-}
-
-static void probe_pci_console(void)
-{
- u8 cap, common_cap = 0, device_cap = 0;
- u32 device_len;
-
- /* Avoid recursive printk into here. */
- console_cfg_offset = -1;
-
- if (!early_pci_allowed()) {
- printk(KERN_ERR "lguest: early PCI access not allowed!\n");
- return;
- }
-
- /* We expect a console PCI device at BUS0, slot 1. */
- if (read_pci_config(0, 1, 0, 0) != 0x10431AF4) {
- printk(KERN_ERR "lguest: PCI device is %#x!\n",
- read_pci_config(0, 1, 0, 0));
- return;
- }
-
- /* Find the capabilities we need (must be in bar0) */
- cap = read_pci_config_byte(0, 1, 0, PCI_CAPABILITY_LIST);
- while (cap) {
- u8 vndr = read_pci_config_byte(0, 1, 0, cap);
- if (vndr == PCI_CAP_ID_VNDR) {
- u8 type, bar;
-
- type = read_pci_config_byte(0, 1, 0,
- cap + offsetof(struct virtio_pci_cap, cfg_type));
- bar = read_pci_config_byte(0, 1, 0,
- cap + offsetof(struct virtio_pci_cap, bar));
-
- switch (type) {
- case VIRTIO_PCI_CAP_DEVICE_CFG:
- if (bar == 0)
- device_cap = cap;
- break;
- case VIRTIO_PCI_CAP_PCI_CFG:
- console_access_cap = cap;
- break;
- }
- }
- cap = read_pci_config_byte(0, 1, 0, cap + PCI_CAP_LIST_NEXT);
- }
- if (!device_cap || !console_access_cap) {
- printk(KERN_ERR "lguest: No caps (%u/%u/%u) in console!\n",
- common_cap, device_cap, console_access_cap);
- return;
- }
-
- /*
- * Note that we can't check features, until we've set the DRIVER
- * status bit. We don't want to do that until we have a real driver,
- * so we just check that the device-specific config has room for
- * emerg_wr. If it doesn't support VIRTIO_CONSOLE_F_EMERG_WRITE
- * it should ignore the access.
- */
- device_len = read_pci_config(0, 1, 0,
- device_cap + offsetof(struct virtio_pci_cap, length));
- if (device_len < (offsetof(struct virtio_console_config, emerg_wr)
- + sizeof(u32))) {
- printk(KERN_ERR "lguest: console missing emerg_wr field\n");
- return;
- }
-
- console_cfg_offset = read_pci_config(0, 1, 0,
- device_cap + offsetof(struct virtio_pci_cap, offset));
- printk(KERN_INFO "lguest: Console via virtio-pci emerg_wr\n");
-}
-
-/*
- * We will eventually use the virtio console device to produce console output,
- * but before that is set up we use the virtio PCI console's backdoor mmio
- * access and the "emergency" write facility (which is legal even before the
- * device is configured).
- */
-static __init int early_put_chars(u32 vtermno, const char *buf, int count)
-{
- /* If we couldn't find PCI console, forget it. */
- if (console_cfg_offset < 0)
- return count;
-
- if (unlikely(!console_cfg_offset)) {
- probe_pci_console();
- if (console_cfg_offset < 0)
- return count;
- }
-
- write_bar_via_cfg(console_access_cap,
- console_cfg_offset
- + offsetof(struct virtio_console_config, emerg_wr),
- buf[0]);
- return 1;
-}
-
-/*
- * Rebooting also tells the Host we're finished, but the RESTART flag tells the
- * Launcher to reboot us.
- */
-static void lguest_restart(char *reason)
-{
- hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
-}
-
-/*G:050
- * Patching (Powerfully Placating Performance Pedants)
- *
- * We have already seen that pv_ops structures let us replace simple native
- * instructions with calls to the appropriate back end all throughout the
- * kernel. This allows the same kernel to run as a Guest and as a native
- * kernel, but it's slow because of all the indirect branches.
- *
- * Remember that David Wheeler quote about "Any problem in computer science can
- * be solved with another layer of indirection"? The rest of that quote is
- * "... But that usually will create another problem." This is the first of
- * those problems.
- *
- * Our current solution is to allow the paravirt back end to optionally patch
- * over the indirect calls to replace them with something more efficient. We
- * patch two of the simplest of the most commonly called functions: disable
- * interrupts and save interrupts. We usually have 6 or 10 bytes to patch
- * into: the Guest versions of these operations are small enough that we can
- * fit comfortably.
- *
- * First we need assembly templates of each of the patchable Guest operations,
- * and these are in head_32.S.
- */
-
-/*G:060 We construct a table from the assembler templates: */
-static const struct lguest_insns
-{
- const char *start, *end;
-} lguest_insns[] = {
- [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
- [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
-};
-
-/*
- * Now our patch routine is fairly simple (based on the native one in
- * paravirt.c). If we have a replacement, we copy it in and return how much of
- * the available space we used.
- */
-static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
- unsigned long addr, unsigned len)
-{
- unsigned int insn_len;
-
- /* Don't do anything special if we don't have a replacement */
- if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start)
- return paravirt_patch_default(type, clobber, ibuf, addr, len);
-
- insn_len = lguest_insns[type].end - lguest_insns[type].start;
-
- /* Similarly if it can't fit (doesn't happen, but let's be thorough). */
- if (len < insn_len)
- return paravirt_patch_default(type, clobber, ibuf, addr, len);
-
- /* Copy in our instructions. */
- memcpy(ibuf, lguest_insns[type].start, insn_len);
- return insn_len;
-}
-
-/*G:029
- * Once we get to lguest_init(), we know we're a Guest. The various
- * pv_ops structures in the kernel provide points for (almost) every routine we
- * have to override to avoid privileged instructions.
- */
-__init void lguest_init(void)
-{
- /* We're under lguest. */
- pv_info.name = "lguest";
- /* We're running at privilege level 1, not 0 as normal. */
- pv_info.kernel_rpl = 1;
- /* Everyone except Xen runs with this set. */
- pv_info.shared_kernel_pmd = 1;
-
- /*
- * We set up all the lguest overrides for sensitive operations. These
- * are detailed with the operations themselves.
- */
-
- /* Interrupt-related operations */
- pv_irq_ops.save_fl = PV_CALLEE_SAVE(lguest_save_fl);
- pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl);
- pv_irq_ops.irq_disable = PV_CALLEE_SAVE(lguest_irq_disable);
- pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable);
- pv_irq_ops.safe_halt = lguest_safe_halt;
-
- /* Setup operations */
- pv_init_ops.patch = lguest_patch;
-
- /* Intercepts of various CPU instructions */
- pv_cpu_ops.load_gdt = lguest_load_gdt;
- pv_cpu_ops.cpuid = lguest_cpuid;
- pv_cpu_ops.load_idt = lguest_load_idt;
- pv_cpu_ops.iret = lguest_iret;
- pv_cpu_ops.load_sp0 = lguest_load_sp0;
- pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
- pv_cpu_ops.set_ldt = lguest_set_ldt;
- pv_cpu_ops.load_tls = lguest_load_tls;
- pv_cpu_ops.get_debugreg = lguest_get_debugreg;
- pv_cpu_ops.set_debugreg = lguest_set_debugreg;
- pv_cpu_ops.read_cr0 = lguest_read_cr0;
- pv_cpu_ops.write_cr0 = lguest_write_cr0;
- pv_cpu_ops.read_cr4 = lguest_read_cr4;
- pv_cpu_ops.write_cr4 = lguest_write_cr4;
- pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
- pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
- pv_cpu_ops.wbinvd = lguest_wbinvd;
- pv_cpu_ops.start_context_switch = paravirt_start_context_switch;
- pv_cpu_ops.end_context_switch = lguest_end_context_switch;
-
- /* Pagetable management */
- pv_mmu_ops.write_cr3 = lguest_write_cr3;
- pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user;
- pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single;
- pv_mmu_ops.flush_tlb_kernel = lguest_flush_tlb_kernel;
- pv_mmu_ops.set_pte = lguest_set_pte;
- pv_mmu_ops.set_pte_at = lguest_set_pte_at;
- pv_mmu_ops.set_pmd = lguest_set_pmd;
-#ifdef CONFIG_X86_PAE
- pv_mmu_ops.set_pte_atomic = lguest_set_pte_atomic;
- pv_mmu_ops.pte_clear = lguest_pte_clear;
- pv_mmu_ops.pmd_clear = lguest_pmd_clear;
- pv_mmu_ops.set_pud = lguest_set_pud;
-#endif
- pv_mmu_ops.read_cr2 = lguest_read_cr2;
- pv_mmu_ops.read_cr3 = lguest_read_cr3;
- pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
- pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
- pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
- pv_mmu_ops.pte_update = lguest_pte_update;
-
-#ifdef CONFIG_X86_LOCAL_APIC
- /* APIC read/write intercepts */
- set_lguest_basic_apic_ops();
-#endif
-
- x86_init.resources.memory_setup = lguest_memory_setup;
- x86_init.irqs.intr_init = lguest_init_IRQ;
- x86_init.timers.timer_init = lguest_time_init;
- x86_platform.calibrate_tsc = lguest_tsc_khz;
- x86_platform.get_wallclock = lguest_get_wallclock;
-
- /*
- * Now is a good time to look at the implementations of these functions
- * before returning to the rest of lguest_init().
- */
-
- /*G:070
- * Now we've seen all the paravirt_ops, we return to
- * lguest_init() where the rest of the fairly chaotic boot setup
- * occurs.
- */
-
- /*
- * The stack protector is a weird thing where gcc places a canary
- * value on the stack and then checks it on return. This file is
- * compiled with -fno-stack-protector it, so we got this far without
- * problems. The value of the canary is kept at offset 20 from the
- * %gs register, so we need to set that up before calling C functions
- * in other files.
- */
- setup_stack_canary_segment(0);
-
- /*
- * We could just call load_stack_canary_segment(), but we might as well
- * call switch_to_new_gdt() which loads the whole table and sets up the
- * per-cpu segment descriptor register %fs as well.
- */
- switch_to_new_gdt(0);
-
- /*
- * The Host<->Guest Switcher lives at the top of our address space, and
- * the Host told us how big it is when we made LGUEST_INIT hypercall:
- * it put the answer in lguest_data.reserve_mem
- */
- reserve_top_address(lguest_data.reserve_mem);
-
- /* Hook in our special panic hypercall code. */
- atomic_notifier_chain_register(&panic_notifier_list, &paniced);
-
- /*
- * This is messy CPU setup stuff which the native boot code does before
- * start_kernel, so we have to do, too:
- */
- cpu_detect(&new_cpu_data);
- /* head.S usually sets up the first capability word, so do it here. */
- new_cpu_data.x86_capability[CPUID_1_EDX] = cpuid_edx(1);
-
- /* Math is always hard! */
- set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
-
- /* We don't have features. We have puppies! Puppies! */
-#ifdef CONFIG_X86_MCE
- mca_cfg.disabled = true;
-#endif
-#ifdef CONFIG_ACPI
- acpi_disabled = 1;
-#endif
-
- /*
- * We set the preferred console to "hvc". This is the "hypervisor
- * virtual console" driver written by the PowerPC people, which we also
- * adapted for lguest's use.
- */
- add_preferred_console("hvc", 0, NULL);
-
- /* Register our very early console. */
- virtio_cons_early_init(early_put_chars);
-
- /* Don't let ACPI try to control our PCI interrupts. */
- disable_acpi();
-
- /* We control them ourselves, by overriding these two hooks. */
- pcibios_enable_irq = lguest_enable_irq;
- pcibios_disable_irq = lguest_disable_irq;
-
- /*
- * Last of all, we set the power management poweroff hook to point to
- * the Guest routine to power off, and the reboot hook to our restart
- * routine.
- */
- pm_power_off = lguest_power_off;
- machine_ops.restart = lguest_restart;
-
- /*
- * Now we're set up, call i386_start_kernel() in head32.c and we proceed
- * to boot as normal. It never returns.
- */
- i386_start_kernel();
-}
-/*
- * This marks the end of stage II of our journey, The Guest.
- *
- * It is now time for us to explore the layer of virtual drivers and complete
- * our understanding of the Guest in "make Drivers".
- */
diff --git a/arch/x86/lguest/head_32.S b/arch/x86/lguest/head_32.S
deleted file mode 100644
index d5ae63f5ec5d..000000000000
--- a/arch/x86/lguest/head_32.S
+++ /dev/null
@@ -1,192 +0,0 @@
-#include <linux/linkage.h>
-#include <linux/lguest.h>
-#include <asm/lguest_hcall.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/processor-flags.h>
-
-/*G:020
-
- * Our story starts with the bzImage: booting starts at startup_32 in
- * arch/x86/boot/compressed/head_32.S. This merely uncompresses the real
- * kernel in place and then jumps into it: startup_32 in
- * arch/x86/kernel/head_32.S. Both routines expects a boot header in the %esi
- * register, which is created by the bootloader (the Launcher in our case).
- *
- * The startup_32 function does very little: it clears the uninitialized global
- * C variables which we expect to be zero (ie. BSS) and then copies the boot
- * header and kernel command line somewhere safe, and populates some initial
- * page tables. Finally it checks the 'hardware_subarch' field. This was
- * introduced in 2.6.24 for lguest and Xen: if it's set to '1' (lguest's
- * assigned number), then it calls us here.
- *
- * WARNING: be very careful here! We're running at addresses equal to physical
- * addresses (around 0), not above PAGE_OFFSET as most code expects
- * (eg. 0xC0000000). Jumps are relative, so they're OK, but we can't touch any
- * data without remembering to subtract __PAGE_OFFSET!
- *
- * The .section line puts this code in .init.text so it will be discarded after
- * boot.
- */
-.section .init.text, "ax", @progbits
-ENTRY(lguest_entry)
- /*
- * We make the "initialization" hypercall now to tell the Host where
- * our lguest_data struct is.
- */
- movl $LHCALL_LGUEST_INIT, %eax
- movl $lguest_data - __PAGE_OFFSET, %ebx
- int $LGUEST_TRAP_ENTRY
-
- /* Now turn our pagetables on; setup by arch/x86/kernel/head_32.S. */
- movl $LHCALL_NEW_PGTABLE, %eax
- movl $(initial_page_table - __PAGE_OFFSET), %ebx
- int $LGUEST_TRAP_ENTRY
-
- /* Set up the initial stack so we can run C code. */
- movl $(init_thread_union+THREAD_SIZE),%esp
-
- /* Jumps are relative: we're running __PAGE_OFFSET too low. */
- jmp lguest_init+__PAGE_OFFSET
-
-/*G:055
- * We create a macro which puts the assembler code between lgstart_ and lgend_
- * markers. These templates are put in the .text section: they can't be
- * discarded after boot as we may need to patch modules, too.
- */
-.text
-#define LGUEST_PATCH(name, insns...) \
- lgstart_##name: insns; lgend_##name:; \
- .globl lgstart_##name; .globl lgend_##name
-
-LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled)
-LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
-
-/*G:033
- * But using those wrappers is inefficient (we'll see why that doesn't matter
- * for save_fl and irq_disable later). If we write our routines carefully in
- * assembler, we can avoid clobbering any registers and avoid jumping through
- * the wrapper functions.
- *
- * I skipped over our first piece of assembler, but this one is worth studying
- * in a bit more detail so I'll describe in easy stages. First, the routine to
- * enable interrupts:
- */
-ENTRY(lg_irq_enable)
- /*
- * The reverse of irq_disable, this sets lguest_data.irq_enabled to
- * X86_EFLAGS_IF (ie. "Interrupts enabled").
- */
- movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled
- /*
- * But now we need to check if the Host wants to know: there might have
- * been interrupts waiting to be delivered, in which case it will have
- * set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we
- * jump to send_interrupts, otherwise we're done.
- */
- cmpl $0, lguest_data+LGUEST_DATA_irq_pending
- jnz send_interrupts
- /*
- * One cool thing about x86 is that you can do many things without using
- * a register. In this case, the normal path hasn't needed to save or
- * restore any registers at all!
- */
- ret
-send_interrupts:
- /*
- * OK, now we need a register: eax is used for the hypercall number,
- * which is LHCALL_SEND_INTERRUPTS.
- *
- * We used not to bother with this pending detection at all, which was
- * much simpler. Sooner or later the Host would realize it had to
- * send us an interrupt. But that turns out to make performance 7
- * times worse on a simple tcp benchmark. So now we do this the hard
- * way.
- */
- pushl %eax
- movl $LHCALL_SEND_INTERRUPTS, %eax
- /* This is the actual hypercall trap. */
- int $LGUEST_TRAP_ENTRY
- /* Put eax back the way we found it. */
- popl %eax
- ret
-
-/*
- * Finally, the "popf" or "restore flags" routine. The %eax register holds the
- * flags (in practice, either X86_EFLAGS_IF or 0): if it's X86_EFLAGS_IF we're
- * enabling interrupts again, if it's 0 we're leaving them off.
- */
-ENTRY(lg_restore_fl)
- /* This is just "lguest_data.irq_enabled = flags;" */
- movl %eax, lguest_data+LGUEST_DATA_irq_enabled
- /*
- * Now, if the %eax value has enabled interrupts and
- * lguest_data.irq_pending is set, we want to tell the Host so it can
- * deliver any outstanding interrupts. Fortunately, both values will
- * be X86_EFLAGS_IF (ie. 512) in that case, and the "testl"
- * instruction will AND them together for us. If both are set, we
- * jump to send_interrupts.
- */
- testl lguest_data+LGUEST_DATA_irq_pending, %eax
- jnz send_interrupts
- /* Again, the normal path has used no extra registers. Clever, huh? */
- ret
-/*:*/
-
-/* These demark the EIP where host should never deliver interrupts. */
-.global lguest_noirq_iret
-
-/*M:004
- * When the Host reflects a trap or injects an interrupt into the Guest, it
- * sets the eflags interrupt bit on the stack based on lguest_data.irq_enabled,
- * so the Guest iret logic does the right thing when restoring it. However,
- * when the Host sets the Guest up for direct traps, such as system calls, the
- * processor is the one to push eflags onto the stack, and the interrupt bit
- * will be 1 (in reality, interrupts are always enabled in the Guest).
- *
- * This turns out to be harmless: the only trap which should happen under Linux
- * with interrupts disabled is Page Fault (due to our lazy mapping of vmalloc
- * regions), which has to be reflected through the Host anyway. If another
- * trap *does* go off when interrupts are disabled, the Guest will panic, and
- * we'll never get to this iret!
-:*/
-
-/*G:045
- * There is one final paravirt_op that the Guest implements, and glancing at it
- * you can see why I left it to last. It's *cool*! It's in *assembler*!
- *
- * The "iret" instruction is used to return from an interrupt or trap. The
- * stack looks like this:
- * old address
- * old code segment & privilege level
- * old processor flags ("eflags")
- *
- * The "iret" instruction pops those values off the stack and restores them all
- * at once. The only problem is that eflags includes the Interrupt Flag which
- * the Guest can't change: the CPU will simply ignore it when we do an "iret".
- * So we have to copy eflags from the stack to lguest_data.irq_enabled before
- * we do the "iret".
- *
- * There are two problems with this: firstly, we can't clobber any registers
- * and secondly, the whole thing needs to be atomic. The first problem
- * is solved by using "push memory"/"pop memory" instruction pair for copying.
- *
- * The second is harder: copying eflags to lguest_data.irq_enabled will turn
- * interrupts on before we're finished, so we could be interrupted before we
- * return to userspace or wherever. Our solution to this is to tell the
- * Host that it is *never* to interrupt us there, even if interrupts seem to be
- * enabled. (It's not necessary to protect pop instruction, since
- * data gets updated only after it completes, so we only need to protect
- * one instruction, iret).
- */
-ENTRY(lguest_iret)
- pushl 2*4(%esp)
- /*
- * Note the %ss: segment prefix here. Normal data accesses use the
- * "ds" segment, but that will have already been restored for whatever
- * we're returning to (such as userspace): we can't trust it. The %ss:
- * prefix makes sure we use the stack segment, which is still valid.
- */
- popl %ss:lguest_data+LGUEST_DATA_irq_enabled
-lguest_noirq_iret:
- iret
diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
index 5cc78bf57232..3261abb21ef4 100644
--- a/arch/x86/lib/cmdline.c
+++ b/arch/x86/lib/cmdline.c
@@ -104,7 +104,112 @@ __cmdline_find_option_bool(const char *cmdline, int max_cmdline_size,
return 0; /* Buffer overrun */
}
+/*
+ * Find a non-boolean option (i.e. option=argument). In accordance with
+ * standard Linux practice, if this option is repeated, this returns the
+ * last instance on the command line.
+ *
+ * @cmdline: the cmdline string
+ * @max_cmdline_size: the maximum size of cmdline
+ * @option: option string to look for
+ * @buffer: memory buffer to return the option argument
+ * @bufsize: size of the supplied memory buffer
+ *
+ * Returns the length of the argument (regardless of if it was
+ * truncated to fit in the buffer), or -1 on not found.
+ */
+static int
+__cmdline_find_option(const char *cmdline, int max_cmdline_size,
+ const char *option, char *buffer, int bufsize)
+{
+ char c;
+ int pos = 0, len = -1;
+ const char *opptr = NULL;
+ char *bufptr = buffer;
+ enum {
+ st_wordstart = 0, /* Start of word/after whitespace */
+ st_wordcmp, /* Comparing this word */
+ st_wordskip, /* Miscompare, skip */
+ st_bufcpy, /* Copying this to buffer */
+ } state = st_wordstart;
+
+ if (!cmdline)
+ return -1; /* No command line */
+
+ /*
+ * This 'pos' check ensures we do not overrun
+ * a non-NULL-terminated 'cmdline'
+ */
+ while (pos++ < max_cmdline_size) {
+ c = *(char *)cmdline++;
+ if (!c)
+ break;
+
+ switch (state) {
+ case st_wordstart:
+ if (myisspace(c))
+ break;
+
+ state = st_wordcmp;
+ opptr = option;
+ /* fall through */
+
+ case st_wordcmp:
+ if ((c == '=') && !*opptr) {
+ /*
+ * We matched all the way to the end of the
+ * option we were looking for, prepare to
+ * copy the argument.
+ */
+ len = 0;
+ bufptr = buffer;
+ state = st_bufcpy;
+ break;
+ } else if (c == *opptr++) {
+ /*
+ * We are currently matching, so continue
+ * to the next character on the cmdline.
+ */
+ break;
+ }
+ state = st_wordskip;
+ /* fall through */
+
+ case st_wordskip:
+ if (myisspace(c))
+ state = st_wordstart;
+ break;
+
+ case st_bufcpy:
+ if (myisspace(c)) {
+ state = st_wordstart;
+ } else {
+ /*
+ * Increment len, but don't overrun the
+ * supplied buffer and leave room for the
+ * NULL terminator.
+ */
+ if (++len < bufsize)
+ *bufptr++ = c;
+ }
+ break;
+ }
+ }
+
+ if (bufsize)
+ *bufptr = '\0';
+
+ return len;
+}
+
int cmdline_find_option_bool(const char *cmdline, const char *option)
{
return __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
}
+
+int cmdline_find_option(const char *cmdline, const char *option, char *buffer,
+ int bufsize)
+{
+ return __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option,
+ buffer, bufsize);
+}
diff --git a/arch/x86/math-emu/Makefile b/arch/x86/math-emu/Makefile
index 9b0c63b60302..1b2dac174321 100644
--- a/arch/x86/math-emu/Makefile
+++ b/arch/x86/math-emu/Makefile
@@ -5,8 +5,8 @@
#DEBUG = -DDEBUGGING
DEBUG =
PARANOID = -DPARANOID
-EXTRA_CFLAGS := $(PARANOID) $(DEBUG) -fno-builtin $(MATH_EMULATION)
-EXTRA_AFLAGS := $(PARANOID)
+ccflags-y += $(PARANOID) $(DEBUG) -fno-builtin $(MATH_EMULATION)
+asflags-y += $(PARANOID)
# From 'C' language sources:
C_OBJS =fpu_entry.o errors.o \
diff --git a/arch/x86/math-emu/div_Xsig.S b/arch/x86/math-emu/div_Xsig.S
index f77ba3058b31..066996dba6a2 100644
--- a/arch/x86/math-emu/div_Xsig.S
+++ b/arch/x86/math-emu/div_Xsig.S
@@ -363,3 +363,4 @@ L_bugged_2:
pop %ebx
jmp L_exit
#endif /* PARANOID */
+ENDPROC(div_Xsig)
diff --git a/arch/x86/math-emu/div_small.S b/arch/x86/math-emu/div_small.S
index 47099628fa4c..2c71527bd917 100644
--- a/arch/x86/math-emu/div_small.S
+++ b/arch/x86/math-emu/div_small.S
@@ -44,4 +44,4 @@ ENTRY(FPU_div_small)
leave
ret
-
+ENDPROC(FPU_div_small)
diff --git a/arch/x86/math-emu/fpu_emu.h b/arch/x86/math-emu/fpu_emu.h
index afbc4d805d66..c9c320dccca1 100644
--- a/arch/x86/math-emu/fpu_emu.h
+++ b/arch/x86/math-emu/fpu_emu.h
@@ -157,7 +157,7 @@ extern u_char const data_sizes_16[32];
#define signbyte(a) (((u_char *)(a))[9])
#define getsign(a) (signbyte(a) & 0x80)
-#define setsign(a,b) { if (b) signbyte(a) |= 0x80; else signbyte(a) &= 0x7f; }
+#define setsign(a,b) { if ((b) != 0) signbyte(a) |= 0x80; else signbyte(a) &= 0x7f; }
#define copysign(a,b) { if (getsign(a)) signbyte(b) |= 0x80; \
else signbyte(b) &= 0x7f; }
#define changesign(a) { signbyte(a) ^= 0x80; }
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 0203baefb5c0..d4a7df2205b8 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -147,7 +147,7 @@ void math_emulate(struct math_emu_info *info)
}
code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
- if (SEG_D_SIZE(code_descriptor)) {
+ if (code_descriptor.d) {
/* The above test may be wrong, the book is not clear */
/* Segmented 32 bit protected mode */
addr_modes.default_mode = SEG32;
@@ -155,11 +155,10 @@ void math_emulate(struct math_emu_info *info)
/* 16 bit protected mode */
addr_modes.default_mode = PM16;
}
- FPU_EIP += code_base = SEG_BASE_ADDR(code_descriptor);
- code_limit = code_base
- + (SEG_LIMIT(code_descriptor) +
- 1) * SEG_GRANULARITY(code_descriptor)
- - 1;
+ FPU_EIP += code_base = seg_get_base(&code_descriptor);
+ code_limit = seg_get_limit(&code_descriptor) + 1;
+ code_limit *= seg_get_granularity(&code_descriptor);
+ code_limit += code_base - 1;
if (code_limit < code_base)
code_limit = 0xffffffff;
}
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index a179254a5122..699f329f1d40 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -34,17 +34,43 @@ static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
return ret;
}
-#define SEG_D_SIZE(x) ((x).b & (3 << 21))
-#define SEG_G_BIT(x) ((x).b & (1 << 23))
-#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
-#define SEG_286_MODE(x) ((x).b & ( 0xff000000 | 0xf0000 | (1 << 23)))
-#define SEG_BASE_ADDR(s) (((s).b & 0xff000000) \
- | (((s).b & 0xff) << 16) | ((s).a >> 16))
-#define SEG_LIMIT(s) (((s).b & 0xff0000) | ((s).a & 0xffff))
-#define SEG_EXECUTE_ONLY(s) (((s).b & ((1 << 11) | (1 << 9))) == (1 << 11))
-#define SEG_WRITE_PERM(s) (((s).b & ((1 << 11) | (1 << 9))) == (1 << 9))
-#define SEG_EXPAND_DOWN(s) (((s).b & ((1 << 11) | (1 << 10))) \
- == (1 << 10))
+#define SEG_TYPE_WRITABLE (1U << 1)
+#define SEG_TYPE_EXPANDS_DOWN (1U << 2)
+#define SEG_TYPE_EXECUTE (1U << 3)
+#define SEG_TYPE_EXPAND_MASK (SEG_TYPE_EXPANDS_DOWN | SEG_TYPE_EXECUTE)
+#define SEG_TYPE_EXECUTE_MASK (SEG_TYPE_WRITABLE | SEG_TYPE_EXECUTE)
+
+static inline unsigned long seg_get_base(struct desc_struct *d)
+{
+ unsigned long base = (unsigned long)d->base2 << 24;
+
+ return base | ((unsigned long)d->base1 << 16) | d->base0;
+}
+
+static inline unsigned long seg_get_limit(struct desc_struct *d)
+{
+ return ((unsigned long)d->limit1 << 16) | d->limit0;
+}
+
+static inline unsigned long seg_get_granularity(struct desc_struct *d)
+{
+ return d->g ? 4096 : 1;
+}
+
+static inline bool seg_expands_down(struct desc_struct *d)
+{
+ return (d->type & SEG_TYPE_EXPAND_MASK) == SEG_TYPE_EXPANDS_DOWN;
+}
+
+static inline bool seg_execute_only(struct desc_struct *d)
+{
+ return (d->type & SEG_TYPE_EXECUTE_MASK) == SEG_TYPE_EXECUTE;
+}
+
+static inline bool seg_writable(struct desc_struct *d)
+{
+ return (d->type & SEG_TYPE_EXECUTE_MASK) == SEG_TYPE_WRITABLE;
+}
#define I387 (&current->thread.fpu.state)
#define FPU_info (I387->soft.info)
diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
index b8ef9f9d2ffc..c48967c6a0e2 100644
--- a/arch/x86/math-emu/get_address.c
+++ b/arch/x86/math-emu/get_address.c
@@ -159,17 +159,18 @@ static long pm_address(u_char FPU_modrm, u_char segment,
}
descriptor = FPU_get_ldt_descriptor(addr->selector);
- base_address = SEG_BASE_ADDR(descriptor);
+ base_address = seg_get_base(&descriptor);
address = base_address + offset;
- limit = base_address
- + (SEG_LIMIT(descriptor) + 1) * SEG_GRANULARITY(descriptor) - 1;
+ limit = seg_get_limit(&descriptor) + 1;
+ limit *= seg_get_granularity(&descriptor);
+ limit += base_address - 1;
if (limit < base_address)
limit = 0xffffffff;
- if (SEG_EXPAND_DOWN(descriptor)) {
- if (SEG_G_BIT(descriptor))
+ if (seg_expands_down(&descriptor)) {
+ if (descriptor.g) {
seg_top = 0xffffffff;
- else {
+ } else {
seg_top = base_address + (1 << 20);
if (seg_top < base_address)
seg_top = 0xffffffff;
@@ -182,8 +183,8 @@ static long pm_address(u_char FPU_modrm, u_char segment,
(address > limit) || (address < base_address) ? 0 :
((limit - address) >= 254 ? 255 : limit - address + 1);
}
- if (SEG_EXECUTE_ONLY(descriptor) ||
- (!SEG_WRITE_PERM(descriptor) && (FPU_modrm & FPU_WRITE_BIT))) {
+ if (seg_execute_only(&descriptor) ||
+ (!seg_writable(&descriptor) && (FPU_modrm & FPU_WRITE_BIT))) {
access_limit = 0;
}
return address;
diff --git a/arch/x86/math-emu/mul_Xsig.S b/arch/x86/math-emu/mul_Xsig.S
index 717785a53eb4..22e0631bb85a 100644
--- a/arch/x86/math-emu/mul_Xsig.S
+++ b/arch/x86/math-emu/mul_Xsig.S
@@ -62,6 +62,7 @@ ENTRY(mul32_Xsig)
popl %esi
leave
ret
+ENDPROC(mul32_Xsig)
ENTRY(mul64_Xsig)
@@ -114,6 +115,7 @@ ENTRY(mul64_Xsig)
popl %esi
leave
ret
+ENDPROC(mul64_Xsig)
@@ -173,4 +175,4 @@ ENTRY(mul_Xsig_Xsig)
popl %esi
leave
ret
-
+ENDPROC(mul_Xsig_Xsig)
diff --git a/arch/x86/math-emu/polynom_Xsig.S b/arch/x86/math-emu/polynom_Xsig.S
index 17315c89ff3d..a9aaf414135d 100644
--- a/arch/x86/math-emu/polynom_Xsig.S
+++ b/arch/x86/math-emu/polynom_Xsig.S
@@ -133,3 +133,4 @@ L_accum_done:
popl %esi
leave
ret
+ENDPROC(polynomial_Xsig)
diff --git a/arch/x86/math-emu/reg_compare.c b/arch/x86/math-emu/reg_compare.c
index b77360fdbf4a..19b33b50adfa 100644
--- a/arch/x86/math-emu/reg_compare.c
+++ b/arch/x86/math-emu/reg_compare.c
@@ -168,7 +168,7 @@ static int compare(FPU_REG const *b, int tagb)
/* This function requires that st(0) is not empty */
int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
{
- int f = 0, c;
+ int f, c;
c = compare(loaded_data, loaded_tag);
@@ -189,12 +189,12 @@ int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
case COMP_No_Comp:
f = SW_C3 | SW_C2 | SW_C0;
break;
-#ifdef PARANOID
default:
+#ifdef PARANOID
EXCEPTION(EX_INTERNAL | 0x121);
+#endif /* PARANOID */
f = SW_C3 | SW_C2 | SW_C0;
break;
-#endif /* PARANOID */
}
setcc(f);
if (c & COMP_Denormal) {
@@ -205,7 +205,7 @@ int FPU_compare_st_data(FPU_REG const *loaded_data, u_char loaded_tag)
static int compare_st_st(int nr)
{
- int f = 0, c;
+ int f, c;
FPU_REG *st_ptr;
if (!NOT_EMPTY(0) || !NOT_EMPTY(nr)) {
@@ -235,12 +235,12 @@ static int compare_st_st(int nr)
case COMP_No_Comp:
f = SW_C3 | SW_C2 | SW_C0;
break;
-#ifdef PARANOID
default:
+#ifdef PARANOID
EXCEPTION(EX_INTERNAL | 0x122);
+#endif /* PARANOID */
f = SW_C3 | SW_C2 | SW_C0;
break;
-#endif /* PARANOID */
}
setcc(f);
if (c & COMP_Denormal) {
@@ -283,12 +283,12 @@ static int compare_i_st_st(int nr)
case COMP_No_Comp:
f = X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF;
break;
-#ifdef PARANOID
default:
+#ifdef PARANOID
EXCEPTION(EX_INTERNAL | 0x122);
+#endif /* PARANOID */
f = 0;
break;
-#endif /* PARANOID */
}
FPU_EFLAGS = (FPU_EFLAGS & ~(X86_EFLAGS_ZF | X86_EFLAGS_PF | X86_EFLAGS_CF)) | f;
if (c & COMP_Denormal) {
diff --git a/arch/x86/math-emu/reg_norm.S b/arch/x86/math-emu/reg_norm.S
index 8b6352efceef..53ac1a343c69 100644
--- a/arch/x86/math-emu/reg_norm.S
+++ b/arch/x86/math-emu/reg_norm.S
@@ -94,6 +94,7 @@ L_overflow:
call arith_overflow
pop %ebx
jmp L_exit
+ENDPROC(FPU_normalize)
@@ -145,3 +146,4 @@ L_exit_nuo_zero:
popl %ebx
leave
ret
+ENDPROC(FPU_normalize_nuo)
diff --git a/arch/x86/math-emu/reg_round.S b/arch/x86/math-emu/reg_round.S
index d1d4e48b4f67..41af5b208d88 100644
--- a/arch/x86/math-emu/reg_round.S
+++ b/arch/x86/math-emu/reg_round.S
@@ -706,3 +706,5 @@ L_exception_exit:
mov $-1,%eax
jmp fpu_reg_round_special_exit
#endif /* PARANOID */
+
+ENDPROC(FPU_round)
diff --git a/arch/x86/math-emu/reg_u_add.S b/arch/x86/math-emu/reg_u_add.S
index 47c4c2434d85..3b1bc5e9b2f6 100644
--- a/arch/x86/math-emu/reg_u_add.S
+++ b/arch/x86/math-emu/reg_u_add.S
@@ -165,3 +165,4 @@ L_exit:
leave
ret
#endif /* PARANOID */
+ENDPROC(FPU_u_add)
diff --git a/arch/x86/math-emu/reg_u_div.S b/arch/x86/math-emu/reg_u_div.S
index cc00654b6f9a..796eb5ab921b 100644
--- a/arch/x86/math-emu/reg_u_div.S
+++ b/arch/x86/math-emu/reg_u_div.S
@@ -469,3 +469,5 @@ L_exit:
leave
ret
#endif /* PARANOID */
+
+ENDPROC(FPU_u_div)
diff --git a/arch/x86/math-emu/reg_u_mul.S b/arch/x86/math-emu/reg_u_mul.S
index 973f12af97df..6196f68cf3c1 100644
--- a/arch/x86/math-emu/reg_u_mul.S
+++ b/arch/x86/math-emu/reg_u_mul.S
@@ -146,3 +146,4 @@ L_exit:
ret
#endif /* PARANOID */
+ENDPROC(FPU_u_mul)
diff --git a/arch/x86/math-emu/reg_u_sub.S b/arch/x86/math-emu/reg_u_sub.S
index 1b6c24801d22..d115b900919a 100644
--- a/arch/x86/math-emu/reg_u_sub.S
+++ b/arch/x86/math-emu/reg_u_sub.S
@@ -270,3 +270,4 @@ L_exit:
popl %esi
leave
ret
+ENDPROC(FPU_u_sub)
diff --git a/arch/x86/math-emu/round_Xsig.S b/arch/x86/math-emu/round_Xsig.S
index bbe0e87718e4..87c99749a495 100644
--- a/arch/x86/math-emu/round_Xsig.S
+++ b/arch/x86/math-emu/round_Xsig.S
@@ -78,7 +78,7 @@ L_exit:
popl %ebx
leave
ret
-
+ENDPROC(round_Xsig)
@@ -138,4 +138,4 @@ L_n_exit:
popl %ebx
leave
ret
-
+ENDPROC(norm_Xsig)
diff --git a/arch/x86/math-emu/shr_Xsig.S b/arch/x86/math-emu/shr_Xsig.S
index 31cdd118e918..c8552edeec75 100644
--- a/arch/x86/math-emu/shr_Xsig.S
+++ b/arch/x86/math-emu/shr_Xsig.S
@@ -85,3 +85,4 @@ L_more_than_95:
popl %esi
leave
ret
+ENDPROC(shr_Xsig)
diff --git a/arch/x86/math-emu/wm_shrx.S b/arch/x86/math-emu/wm_shrx.S
index 518428317985..340dd6897f85 100644
--- a/arch/x86/math-emu/wm_shrx.S
+++ b/arch/x86/math-emu/wm_shrx.S
@@ -92,6 +92,7 @@ L_more_than_95:
popl %esi
leave
ret
+ENDPROC(FPU_shrx)
/*---------------------------------------------------------------------------+
@@ -202,3 +203,4 @@ Ls_more_than_95:
popl %esi
leave
ret
+ENDPROC(FPU_shrxs)
diff --git a/arch/x86/math-emu/wm_sqrt.S b/arch/x86/math-emu/wm_sqrt.S
index d258f59564e1..695afae38fdf 100644
--- a/arch/x86/math-emu/wm_sqrt.S
+++ b/arch/x86/math-emu/wm_sqrt.S
@@ -468,3 +468,4 @@ sqrt_more_prec_large:
/* Our estimate is too large */
movl $0x7fffff00,%eax
jmp sqrt_round_result
+ENDPROC(wm_sqrt)
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 0fbdcb64f9f8..72bf8c01c6e3 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -39,3 +39,5 @@ obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
+obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt.o
+obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 0470826d2bdc..5e3ac6fe6c9e 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -13,12 +13,12 @@
*/
#include <linux/debugfs.h>
+#include <linux/kasan.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <asm/kasan.h>
#include <asm/pgtable.h>
/*
@@ -138,7 +138,7 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
{
pgprotval_t pr = pgprot_val(prot);
static const char * const level_name[] =
- { "cr3", "pgd", "pud", "pmd", "pte" };
+ { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
if (!pgprot_val(prot)) {
/* Not present */
@@ -162,12 +162,12 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
pt_dump_cont_printf(m, dmsg, " ");
/* Bit 7 has a different meaning on level 3 vs 4 */
- if (level <= 3 && pr & _PAGE_PSE)
+ if (level <= 4 && pr & _PAGE_PSE)
pt_dump_cont_printf(m, dmsg, "PSE ");
else
pt_dump_cont_printf(m, dmsg, " ");
- if ((level == 4 && pr & _PAGE_PAT) ||
- ((level == 3 || level == 2) && pr & _PAGE_PAT_LARGE))
+ if ((level == 5 && pr & _PAGE_PAT) ||
+ ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE))
pt_dump_cont_printf(m, dmsg, "PAT ");
else
pt_dump_cont_printf(m, dmsg, " ");
@@ -188,11 +188,12 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
*/
static unsigned long normalize_addr(unsigned long u)
{
-#ifdef CONFIG_X86_64
- return (signed long)(u << 16) >> 16;
-#else
- return u;
-#endif
+ int shift;
+ if (!IS_ENABLED(CONFIG_X86_64))
+ return u;
+
+ shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
+ return (signed long)(u << shift) >> shift;
}
/*
@@ -297,32 +298,62 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
for (i = 0; i < PTRS_PER_PTE; i++) {
prot = pte_flags(*start);
st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
- note_page(m, st, __pgprot(prot), 4);
+ note_page(m, st, __pgprot(prot), 5);
start++;
}
}
+#ifdef CONFIG_KASAN
+
+/*
+ * This is an optimization for KASAN=y case. Since all kasan page tables
+ * eventually point to the kasan_zero_page we could call note_page()
+ * right away without walking through lower level page tables. This saves
+ * us dozens of seconds (minutes for 5-level config) while checking for
+ * W+X mapping or reading kernel_page_tables debugfs file.
+ */
+static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
+ void *pt)
+{
+ if (__pa(pt) == __pa(kasan_zero_pmd) ||
+#ifdef CONFIG_X86_5LEVEL
+ __pa(pt) == __pa(kasan_zero_p4d) ||
+#endif
+ __pa(pt) == __pa(kasan_zero_pud)) {
+ pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
+ note_page(m, st, __pgprot(prot), 5);
+ return true;
+ }
+ return false;
+}
+#else
+static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
+ void *pt)
+{
+ return false;
+}
+#endif
#if PTRS_PER_PMD > 1
static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, unsigned long P)
{
int i;
- pmd_t *start;
+ pmd_t *start, *pmd_start;
pgprotval_t prot;
- start = (pmd_t *)pud_page_vaddr(addr);
+ pmd_start = start = (pmd_t *)pud_page_vaddr(addr);
for (i = 0; i < PTRS_PER_PMD; i++) {
st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
if (!pmd_none(*start)) {
if (pmd_large(*start) || !pmd_present(*start)) {
prot = pmd_flags(*start);
- note_page(m, st, __pgprot(prot), 3);
- } else {
+ note_page(m, st, __pgprot(prot), 4);
+ } else if (!kasan_page_table(m, st, pmd_start)) {
walk_pte_level(m, st, *start,
P + i * PMD_LEVEL_MULT);
}
} else
- note_page(m, st, __pgprot(0), 3);
+ note_page(m, st, __pgprot(0), 4);
start++;
}
}
@@ -335,39 +366,27 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
#if PTRS_PER_PUD > 1
-/*
- * This is an optimization for CONFIG_DEBUG_WX=y + CONFIG_KASAN=y
- * KASAN fills page tables with the same values. Since there is no
- * point in checking page table more than once we just skip repeated
- * entries. This saves us dozens of seconds during boot.
- */
-static bool pud_already_checked(pud_t *prev_pud, pud_t *pud, bool checkwx)
-{
- return checkwx && prev_pud && (pud_val(*prev_pud) == pud_val(*pud));
-}
-
static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, unsigned long P)
{
int i;
- pud_t *start;
+ pud_t *start, *pud_start;
pgprotval_t prot;
pud_t *prev_pud = NULL;
- start = (pud_t *)p4d_page_vaddr(addr);
+ pud_start = start = (pud_t *)p4d_page_vaddr(addr);
for (i = 0; i < PTRS_PER_PUD; i++) {
st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
- if (!pud_none(*start) &&
- !pud_already_checked(prev_pud, start, st->check_wx)) {
+ if (!pud_none(*start)) {
if (pud_large(*start) || !pud_present(*start)) {
prot = pud_flags(*start);
- note_page(m, st, __pgprot(prot), 2);
- } else {
+ note_page(m, st, __pgprot(prot), 3);
+ } else if (!kasan_page_table(m, st, pud_start)) {
walk_pmd_level(m, st, *start,
P + i * PUD_LEVEL_MULT);
}
} else
- note_page(m, st, __pgprot(0), 2);
+ note_page(m, st, __pgprot(0), 3);
prev_pud = start;
start++;
@@ -385,10 +404,10 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr, unsigned long P)
{
int i;
- p4d_t *start;
+ p4d_t *start, *p4d_start;
pgprotval_t prot;
- start = (p4d_t *)pgd_page_vaddr(addr);
+ p4d_start = start = (p4d_t *)pgd_page_vaddr(addr);
for (i = 0; i < PTRS_PER_P4D; i++) {
st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT);
@@ -396,7 +415,7 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
if (p4d_large(*start) || !p4d_present(*start)) {
prot = p4d_flags(*start);
note_page(m, st, __pgprot(prot), 2);
- } else {
+ } else if (!kasan_page_table(m, st, p4d_start)) {
walk_pud_level(m, st, *start,
P + i * P4D_LEVEL_MULT);
}
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 0ea8afcb929c..c076f710de4c 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -36,6 +36,48 @@ bool ex_handler_fault(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL_GPL(ex_handler_fault);
+/*
+ * Handler for UD0 exception following a failed test against the
+ * result of a refcount inc/dec/add/sub.
+ */
+bool ex_handler_refcount(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
+{
+ /* First unconditionally saturate the refcount. */
+ *(int *)regs->cx = INT_MIN / 2;
+
+ /*
+ * Strictly speaking, this reports the fixup destination, not
+ * the fault location, and not the actually overflowing
+ * instruction, which is the instruction before the "js", but
+ * since that instruction could be a variety of lengths, just
+ * report the location after the overflow, which should be close
+ * enough for finding the overflow, as it's at least back in
+ * the function, having returned from .text.unlikely.
+ */
+ regs->ip = ex_fixup_addr(fixup);
+
+ /*
+ * This function has been called because either a negative refcount
+ * value was seen by any of the refcount functions, or a zero
+ * refcount value was seen by refcount_dec().
+ *
+ * If we crossed from INT_MAX to INT_MIN, OF (Overflow Flag: result
+ * wrapped around) will be set. Additionally, seeing the refcount
+ * reach 0 will set ZF (Zero Flag: result was zero). In each of
+ * these cases we want a report, since it's a boundary condition.
+ *
+ */
+ if (regs->flags & (X86_EFLAGS_OF | X86_EFLAGS_ZF)) {
+ bool zero = regs->flags & X86_EFLAGS_ZF;
+
+ refcount_error_report(regs, zero ? "hit zero" : "overflow");
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(ex_handler_refcount);
+
bool ex_handler_ext(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr)
{
@@ -142,7 +184,7 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
* undefined. I'm not sure which CPUs do this, but at least
* the 486 DX works this way.
*/
- if ((regs->cs & 0xFFFF) != __KERNEL_CS)
+ if (regs->cs != __KERNEL_CS)
goto fail;
/*
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2a1fa10c6a98..b836a7274e12 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -396,14 +396,18 @@ static void dump_pagetable(unsigned long address)
pte_t *pte;
#ifdef CONFIG_X86_PAE
- printk("*pdpt = %016Lx ", pgd_val(*pgd));
+ pr_info("*pdpt = %016Lx ", pgd_val(*pgd));
if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
goto out;
+#define pr_pde pr_cont
+#else
+#define pr_pde pr_info
#endif
p4d = p4d_offset(pgd, address);
pud = pud_offset(p4d, address);
pmd = pmd_offset(pud, address);
- printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
+ pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
+#undef pr_pde
/*
* We must not directly access the pte in the highpte
@@ -415,9 +419,9 @@ static void dump_pagetable(unsigned long address)
goto out;
pte = pte_offset_kernel(pmd, address);
- printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
+ pr_cont("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
out:
- printk("\n");
+ pr_cont("\n");
}
#else /* CONFIG_X86_64: */
@@ -565,7 +569,7 @@ static void dump_pagetable(unsigned long address)
if (bad_address(pgd))
goto bad;
- printk("PGD %lx ", pgd_val(*pgd));
+ pr_info("PGD %lx ", pgd_val(*pgd));
if (!pgd_present(*pgd))
goto out;
@@ -574,7 +578,7 @@ static void dump_pagetable(unsigned long address)
if (bad_address(p4d))
goto bad;
- printk("P4D %lx ", p4d_val(*p4d));
+ pr_cont("P4D %lx ", p4d_val(*p4d));
if (!p4d_present(*p4d) || p4d_large(*p4d))
goto out;
@@ -582,7 +586,7 @@ static void dump_pagetable(unsigned long address)
if (bad_address(pud))
goto bad;
- printk("PUD %lx ", pud_val(*pud));
+ pr_cont("PUD %lx ", pud_val(*pud));
if (!pud_present(*pud) || pud_large(*pud))
goto out;
@@ -590,7 +594,7 @@ static void dump_pagetable(unsigned long address)
if (bad_address(pmd))
goto bad;
- printk("PMD %lx ", pmd_val(*pmd));
+ pr_cont("PMD %lx ", pmd_val(*pmd));
if (!pmd_present(*pmd) || pmd_large(*pmd))
goto out;
@@ -598,12 +602,12 @@ static void dump_pagetable(unsigned long address)
if (bad_address(pte))
goto bad;
- printk("PTE %lx", pte_val(*pte));
+ pr_cont("PTE %lx", pte_val(*pte));
out:
- printk("\n");
+ pr_cont("\n");
return;
bad:
- printk("BAD\n");
+ pr_info("BAD\n");
}
#endif /* CONFIG_X86_64 */
@@ -1254,10 +1258,6 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
- *
- * This function must have noinline because both callers
- * {,trace_}do_page_fault() have notrace on. Having this an actual function
- * guarantees there's a function trace entry.
*/
static noinline void
__do_page_fault(struct pt_regs *regs, unsigned long error_code,
@@ -1490,27 +1490,6 @@ good_area:
}
NOKPROBE_SYMBOL(__do_page_fault);
-dotraplinkage void notrace
-do_page_fault(struct pt_regs *regs, unsigned long error_code)
-{
- unsigned long address = read_cr2(); /* Get the faulting address */
- enum ctx_state prev_state;
-
- /*
- * We must have this function tagged with __kprobes, notrace and call
- * read_cr2() before calling anything else. To avoid calling any kind
- * of tracing machinery before we've observed the CR2 value.
- *
- * exception_{enter,exit}() contain all sorts of tracepoints.
- */
-
- prev_state = exception_enter();
- __do_page_fault(regs, error_code, address);
- exception_exit(prev_state);
-}
-NOKPROBE_SYMBOL(do_page_fault);
-
-#ifdef CONFIG_TRACING
static nokprobe_inline void
trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
unsigned long error_code)
@@ -1521,22 +1500,24 @@ trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
trace_page_fault_kernel(address, regs, error_code);
}
+/*
+ * We must have this function blacklisted from kprobes, tagged with notrace
+ * and call read_cr2() before calling anything else. To avoid calling any
+ * kind of tracing machinery before we've observed the CR2 value.
+ *
+ * exception_{enter,exit}() contains all sorts of tracepoints.
+ */
dotraplinkage void notrace
-trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
+do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
- /*
- * The exception_enter and tracepoint processing could
- * trigger another page faults (user space callchain
- * reading) and destroy the original cr2 value, so read
- * the faulting address now.
- */
- unsigned long address = read_cr2();
+ unsigned long address = read_cr2(); /* Get the faulting address */
enum ctx_state prev_state;
prev_state = exception_enter();
- trace_page_fault_entries(address, regs, error_code);
+ if (trace_pagefault_enabled())
+ trace_page_fault_entries(address, regs, error_code);
+
__do_page_fault(regs, error_code, address);
exception_exit(prev_state);
}
-NOKPROBE_SYMBOL(trace_do_page_fault);
-#endif /* CONFIG_TRACING */
+NOKPROBE_SYMBOL(do_page_fault);
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 2824607df108..6d06cf33e3de 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -18,6 +18,7 @@
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#include <asm/elf.h>
+#include <asm/mpx.h>
#if 0 /* This is just for testing */
struct page *
@@ -85,25 +86,38 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
info.flags = 0;
info.length = len;
info.low_limit = get_mmap_base(1);
+
+ /*
+ * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
+ * in the full address space.
+ */
info.high_limit = in_compat_syscall() ?
- tasksize_32bit() : tasksize_64bit();
+ task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
+
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
return vm_unmapped_area(&info);
}
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
- unsigned long addr0, unsigned long len,
+ unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info;
- unsigned long addr;
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = get_mmap_base(0);
+
+ /*
+ * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
+ * in the full address space.
+ */
+ if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall())
+ info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
+
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
addr = vm_unmapped_area(&info);
@@ -118,7 +132,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
- info.high_limit = TASK_SIZE;
+ info.high_limit = TASK_SIZE_LOW;
addr = vm_unmapped_area(&info);
}
@@ -135,6 +149,11 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (len & ~huge_page_mask(h))
return -EINVAL;
+
+ addr = mpx_unmapped_area_check(addr, len, flags);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
if (len > TASK_SIZE)
return -ENOMEM;
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index adab1595f4bd..31cea988fa36 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -51,7 +51,7 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
if (!pmd)
return -ENOMEM;
ident_pmd_init(info, pmd, addr, next);
- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
+ set_pud(pud, __pud(__pa(pmd) | info->kernpg_flag));
}
return 0;
@@ -79,7 +79,7 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
if (!pud)
return -ENOMEM;
ident_pud_init(info, pud, addr, next);
- set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
+ set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
}
return 0;
@@ -93,6 +93,10 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
unsigned long next;
int result;
+ /* Set the default pagetable flags if not supplied */
+ if (!info->kernpg_flag)
+ info->kernpg_flag = _KERNPG_TABLE;
+
for (; addr < end; addr = next) {
pgd_t *pgd = pgd_page + pgd_index(addr);
p4d_t *p4d;
@@ -116,14 +120,14 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
if (result)
return result;
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
- set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
+ set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
} else {
/*
* With p4d folded, pgd is equal to p4d.
* The pgd entry has to point to the pud page table in this case.
*/
pud_t *pud = pud_offset(p4d, 0);
- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
+ set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
}
}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 673541eb3b3f..7777ccc0e9f9 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -18,6 +18,7 @@
#include <asm/dma.h> /* for MAX_DMA_PFN */
#include <asm/microcode.h>
#include <asm/kaslr.h>
+#include <asm/hypervisor.h>
/*
* We need to define the tracepoints somewhere, and tlb.c
@@ -636,6 +637,8 @@ void __init init_mem_mapping(void)
load_cr3(swapper_pg_dir);
__flush_tlb_all();
+ hypervisor_init_mem_mapping();
+
early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
}
@@ -812,7 +815,7 @@ void __init zone_sizes_init(void)
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
.loaded_mm = &init_mm,
- .state = 0,
+ .next_asid = 1,
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
};
EXPORT_SYMBOL_GPL(cpu_tlbstate);
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 4c1b5fd0c7ad..34f0e1847dd6 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -13,6 +13,8 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mmiotrace.h>
+#include <linux/mem_encrypt.h>
+#include <linux/efi.h>
#include <asm/set_memory.h>
#include <asm/e820/api.h>
@@ -21,6 +23,7 @@
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#include <asm/pat.h>
+#include <asm/setup.h>
#include "physaddr.h"
@@ -106,12 +109,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
}
/*
- * Don't remap the low PCI/ISA area, it's always mapped..
- */
- if (is_ISA_range(phys_addr, last_addr))
- return (__force void __iomem *)phys_to_virt(phys_addr);
-
- /*
* Don't allow anybody to remap normal RAM that we're using..
*/
pfn = phys_addr >> PAGE_SHIFT;
@@ -340,13 +337,17 @@ void iounmap(volatile void __iomem *addr)
return;
/*
- * __ioremap special-cases the PCI/ISA range by not instantiating a
- * vm_area and by simply returning an address into the kernel mapping
- * of ISA space. So handle that here.
+ * The PCI/ISA range special-casing was removed from __ioremap()
+ * so this check, in theory, can be removed. However, there are
+ * cases where iounmap() is called for addresses not obtained via
+ * ioremap() (vga16fb for example). Add a warning so that these
+ * cases can be caught and fixed.
*/
if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
- (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
+ (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
+ WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
return;
+ }
addr = (volatile void __iomem *)
(PAGE_MASK & (unsigned long __force)addr);
@@ -399,12 +400,10 @@ void *xlate_dev_mem_ptr(phys_addr_t phys)
unsigned long offset = phys & ~PAGE_MASK;
void *vaddr;
- /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
- if (page_is_ram(start >> PAGE_SHIFT))
- return __va(phys);
+ /* memremap() maps if RAM, otherwise falls back to ioremap() */
+ vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
- vaddr = ioremap_cache(start, PAGE_SIZE);
- /* Only add the offset on success and return NULL if the ioremap() failed: */
+ /* Only add the offset on success and return NULL if memremap() failed */
if (vaddr)
vaddr += offset;
@@ -413,11 +412,263 @@ void *xlate_dev_mem_ptr(phys_addr_t phys)
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
{
- if (page_is_ram(phys >> PAGE_SHIFT))
- return;
+ memunmap((void *)((unsigned long)addr & PAGE_MASK));
+}
+
+/*
+ * Examine the physical address to determine if it is an area of memory
+ * that should be mapped decrypted. If the memory is not part of the
+ * kernel usable area it was accessed and created decrypted, so these
+ * areas should be mapped decrypted. And since the encryption key can
+ * change across reboots, persistent memory should also be mapped
+ * decrypted.
+ */
+static bool memremap_should_map_decrypted(resource_size_t phys_addr,
+ unsigned long size)
+{
+ int is_pmem;
+
+ /*
+ * Check if the address is part of a persistent memory region.
+ * This check covers areas added by E820, EFI and ACPI.
+ */
+ is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
+ IORES_DESC_PERSISTENT_MEMORY);
+ if (is_pmem != REGION_DISJOINT)
+ return true;
+
+ /*
+ * Check if the non-volatile attribute is set for an EFI
+ * reserved area.
+ */
+ if (efi_enabled(EFI_BOOT)) {
+ switch (efi_mem_type(phys_addr)) {
+ case EFI_RESERVED_TYPE:
+ if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Check if the address is outside kernel usable area */
+ switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
+ case E820_TYPE_RESERVED:
+ case E820_TYPE_ACPI:
+ case E820_TYPE_NVS:
+ case E820_TYPE_UNUSABLE:
+ case E820_TYPE_PRAM:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/*
+ * Examine the physical address to determine if it is EFI data. Check
+ * it against the boot params structure and EFI tables and memory types.
+ */
+static bool memremap_is_efi_data(resource_size_t phys_addr,
+ unsigned long size)
+{
+ u64 paddr;
+
+ /* Check if the address is part of EFI boot/runtime data */
+ if (!efi_enabled(EFI_BOOT))
+ return false;
+
+ paddr = boot_params.efi_info.efi_memmap_hi;
+ paddr <<= 32;
+ paddr |= boot_params.efi_info.efi_memmap;
+ if (phys_addr == paddr)
+ return true;
+
+ paddr = boot_params.efi_info.efi_systab_hi;
+ paddr <<= 32;
+ paddr |= boot_params.efi_info.efi_systab;
+ if (phys_addr == paddr)
+ return true;
+
+ if (efi_is_table_address(phys_addr))
+ return true;
+
+ switch (efi_mem_type(phys_addr)) {
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_RUNTIME_SERVICES_DATA:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/*
+ * Examine the physical address to determine if it is boot data by checking
+ * it against the boot params setup_data chain.
+ */
+static bool memremap_is_setup_data(resource_size_t phys_addr,
+ unsigned long size)
+{
+ struct setup_data *data;
+ u64 paddr, paddr_next;
+
+ paddr = boot_params.hdr.setup_data;
+ while (paddr) {
+ unsigned int len;
+
+ if (phys_addr == paddr)
+ return true;
+
+ data = memremap(paddr, sizeof(*data),
+ MEMREMAP_WB | MEMREMAP_DEC);
+
+ paddr_next = data->next;
+ len = data->len;
+
+ memunmap(data);
+
+ if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
+ return true;
+
+ paddr = paddr_next;
+ }
+
+ return false;
+}
+
+/*
+ * Examine the physical address to determine if it is boot data by checking
+ * it against the boot params setup_data chain (early boot version).
+ */
+static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
+ unsigned long size)
+{
+ struct setup_data *data;
+ u64 paddr, paddr_next;
+
+ paddr = boot_params.hdr.setup_data;
+ while (paddr) {
+ unsigned int len;
+
+ if (phys_addr == paddr)
+ return true;
+
+ data = early_memremap_decrypted(paddr, sizeof(*data));
+
+ paddr_next = data->next;
+ len = data->len;
+
+ early_memunmap(data, sizeof(*data));
+
+ if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
+ return true;
+
+ paddr = paddr_next;
+ }
+
+ return false;
+}
+
+/*
+ * Architecture function to determine if RAM remap is allowed. By default, a
+ * RAM remap will map the data as encrypted. Determine if a RAM remap should
+ * not be done so that the data will be mapped decrypted.
+ */
+bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
+ unsigned long flags)
+{
+ if (!sme_active())
+ return true;
+
+ if (flags & MEMREMAP_ENC)
+ return true;
+
+ if (flags & MEMREMAP_DEC)
+ return false;
+
+ if (memremap_is_setup_data(phys_addr, size) ||
+ memremap_is_efi_data(phys_addr, size) ||
+ memremap_should_map_decrypted(phys_addr, size))
+ return false;
+
+ return true;
+}
+
+/*
+ * Architecture override of __weak function to adjust the protection attributes
+ * used when remapping memory. By default, early_memremap() will map the data
+ * as encrypted. Determine if an encrypted mapping should not be done and set
+ * the appropriate protection attributes.
+ */
+pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
+ unsigned long size,
+ pgprot_t prot)
+{
+ if (!sme_active())
+ return prot;
+
+ if (early_memremap_is_setup_data(phys_addr, size) ||
+ memremap_is_efi_data(phys_addr, size) ||
+ memremap_should_map_decrypted(phys_addr, size))
+ prot = pgprot_decrypted(prot);
+ else
+ prot = pgprot_encrypted(prot);
+
+ return prot;
+}
+
+bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
+{
+ return arch_memremap_can_ram_remap(phys_addr, size, 0);
+}
+
+#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
+/* Remap memory with encryption */
+void __init *early_memremap_encrypted(resource_size_t phys_addr,
+ unsigned long size)
+{
+ return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
+}
+
+/*
+ * Remap memory with encryption and write-protected - cannot be called
+ * before pat_init() is called
+ */
+void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
+ unsigned long size)
+{
+ /* Be sure the write-protect PAT entry is set for write-protect */
+ if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
+ return NULL;
+
+ return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
+}
+
+/* Remap memory without encryption */
+void __init *early_memremap_decrypted(resource_size_t phys_addr,
+ unsigned long size)
+{
+ return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
+}
+
+/*
+ * Remap memory without encryption and write-protected - cannot be called
+ * before pat_init() is called
+ */
+void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
+ unsigned long size)
+{
+ /* Be sure the write-protect PAT entry is set for write-protect */
+ if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
+ return NULL;
- iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
+ return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
}
+#endif /* CONFIG_ARCH_USE_MEMREMAP_PROT */
static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 02c9d7553409..bc84b73684b7 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -11,8 +11,8 @@
#include <asm/e820/types.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
+#include <asm/pgtable.h>
-extern pgd_t early_top_pgt[PTRS_PER_PGD];
extern struct range pfn_mapped[E820_MAX_ENTRIES];
static int __init map_range(struct range *range)
@@ -87,7 +87,7 @@ static struct notifier_block kasan_die_notifier = {
void __init kasan_early_init(void)
{
int i;
- pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
+ pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
@@ -153,7 +153,7 @@ void __init kasan_init(void)
*/
memset(kasan_zero_page, 0, PAGE_SIZE);
for (i = 0; i < PTRS_PER_PTE; i++) {
- pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
+ pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO | _PAGE_ENC);
set_pte(&kasan_zero_pte[i], pte);
}
/* Flush TLBs again to be sure that write protection applied. */
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
new file mode 100644
index 000000000000..0fbd09269757
--- /dev/null
+++ b/arch/x86/mm/mem_encrypt.c
@@ -0,0 +1,593 @@
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/swiotlb.h>
+#include <linux/mem_encrypt.h>
+
+#include <asm/tlbflush.h>
+#include <asm/fixmap.h>
+#include <asm/setup.h>
+#include <asm/bootparam.h>
+#include <asm/set_memory.h>
+#include <asm/cacheflush.h>
+#include <asm/sections.h>
+#include <asm/processor-flags.h>
+#include <asm/msr.h>
+#include <asm/cmdline.h>
+
+static char sme_cmdline_arg[] __initdata = "mem_encrypt";
+static char sme_cmdline_on[] __initdata = "on";
+static char sme_cmdline_off[] __initdata = "off";
+
+/*
+ * Since SME related variables are set early in the boot process they must
+ * reside in the .data section so as not to be zeroed out when the .bss
+ * section is later cleared.
+ */
+unsigned long sme_me_mask __section(.data) = 0;
+EXPORT_SYMBOL_GPL(sme_me_mask);
+
+/* Buffer used for early in-place encryption by BSP, no locking needed */
+static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+/*
+ * This routine does not change the underlying encryption setting of the
+ * page(s) that map this memory. It assumes that eventually the memory is
+ * meant to be accessed as either encrypted or decrypted but the contents
+ * are currently not in the desired state.
+ *
+ * This routine follows the steps outlined in the AMD64 Architecture
+ * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place.
+ */
+static void __init __sme_early_enc_dec(resource_size_t paddr,
+ unsigned long size, bool enc)
+{
+ void *src, *dst;
+ size_t len;
+
+ if (!sme_me_mask)
+ return;
+
+ local_flush_tlb();
+ wbinvd();
+
+ /*
+ * There are limited number of early mapping slots, so map (at most)
+ * one page at time.
+ */
+ while (size) {
+ len = min_t(size_t, sizeof(sme_early_buffer), size);
+
+ /*
+ * Create mappings for the current and desired format of
+ * the memory. Use a write-protected mapping for the source.
+ */
+ src = enc ? early_memremap_decrypted_wp(paddr, len) :
+ early_memremap_encrypted_wp(paddr, len);
+
+ dst = enc ? early_memremap_encrypted(paddr, len) :
+ early_memremap_decrypted(paddr, len);
+
+ /*
+ * If a mapping can't be obtained to perform the operation,
+ * then eventual access of that area in the desired mode
+ * will cause a crash.
+ */
+ BUG_ON(!src || !dst);
+
+ /*
+ * Use a temporary buffer, of cache-line multiple size, to
+ * avoid data corruption as documented in the APM.
+ */
+ memcpy(sme_early_buffer, src, len);
+ memcpy(dst, sme_early_buffer, len);
+
+ early_memunmap(dst, len);
+ early_memunmap(src, len);
+
+ paddr += len;
+ size -= len;
+ }
+}
+
+void __init sme_early_encrypt(resource_size_t paddr, unsigned long size)
+{
+ __sme_early_enc_dec(paddr, size, true);
+}
+
+void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
+{
+ __sme_early_enc_dec(paddr, size, false);
+}
+
+static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
+ bool map)
+{
+ unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
+ pmdval_t pmd_flags, pmd;
+
+ /* Use early_pmd_flags but remove the encryption mask */
+ pmd_flags = __sme_clr(early_pmd_flags);
+
+ do {
+ pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
+ __early_make_pgtable((unsigned long)vaddr, pmd);
+
+ vaddr += PMD_SIZE;
+ paddr += PMD_SIZE;
+ size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
+ } while (size);
+
+ __native_flush_tlb();
+}
+
+void __init sme_unmap_bootdata(char *real_mode_data)
+{
+ struct boot_params *boot_data;
+ unsigned long cmdline_paddr;
+
+ if (!sme_active())
+ return;
+
+ /* Get the command line address before unmapping the real_mode_data */
+ boot_data = (struct boot_params *)real_mode_data;
+ cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
+
+ __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false);
+
+ if (!cmdline_paddr)
+ return;
+
+ __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false);
+}
+
+void __init sme_map_bootdata(char *real_mode_data)
+{
+ struct boot_params *boot_data;
+ unsigned long cmdline_paddr;
+
+ if (!sme_active())
+ return;
+
+ __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
+
+ /* Get the command line address after mapping the real_mode_data */
+ boot_data = (struct boot_params *)real_mode_data;
+ cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
+
+ if (!cmdline_paddr)
+ return;
+
+ __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
+}
+
+void __init sme_early_init(void)
+{
+ unsigned int i;
+
+ if (!sme_me_mask)
+ return;
+
+ early_pmd_flags = __sme_set(early_pmd_flags);
+
+ __supported_pte_mask = __sme_set(__supported_pte_mask);
+
+ /* Update the protection map with memory encryption mask */
+ for (i = 0; i < ARRAY_SIZE(protection_map); i++)
+ protection_map[i] = pgprot_encrypted(protection_map[i]);
+}
+
+/* Architecture __weak replacement functions */
+void __init mem_encrypt_init(void)
+{
+ if (!sme_me_mask)
+ return;
+
+ /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
+ swiotlb_update_mem_attributes();
+
+ pr_info("AMD Secure Memory Encryption (SME) active\n");
+}
+
+void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
+{
+ WARN(PAGE_ALIGN(size) != size,
+ "size is not page-aligned (%#lx)\n", size);
+
+ /* Make the SWIOTLB buffer area decrypted */
+ set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
+}
+
+static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
+ unsigned long end)
+{
+ unsigned long pgd_start, pgd_end, pgd_size;
+ pgd_t *pgd_p;
+
+ pgd_start = start & PGDIR_MASK;
+ pgd_end = end & PGDIR_MASK;
+
+ pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1);
+ pgd_size *= sizeof(pgd_t);
+
+ pgd_p = pgd_base + pgd_index(start);
+
+ memset(pgd_p, 0, pgd_size);
+}
+
+#define PGD_FLAGS _KERNPG_TABLE_NOENC
+#define P4D_FLAGS _KERNPG_TABLE_NOENC
+#define PUD_FLAGS _KERNPG_TABLE_NOENC
+#define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
+
+static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
+ unsigned long vaddr, pmdval_t pmd_val)
+{
+ pgd_t *pgd_p;
+ p4d_t *p4d_p;
+ pud_t *pud_p;
+ pmd_t *pmd_p;
+
+ pgd_p = pgd_base + pgd_index(vaddr);
+ if (native_pgd_val(*pgd_p)) {
+ if (IS_ENABLED(CONFIG_X86_5LEVEL))
+ p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK);
+ else
+ pud_p = (pud_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK);
+ } else {
+ pgd_t pgd;
+
+ if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+ p4d_p = pgtable_area;
+ memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
+ pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
+
+ pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS);
+ } else {
+ pud_p = pgtable_area;
+ memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
+ pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
+
+ pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS);
+ }
+ native_set_pgd(pgd_p, pgd);
+ }
+
+ if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+ p4d_p += p4d_index(vaddr);
+ if (native_p4d_val(*p4d_p)) {
+ pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK);
+ } else {
+ p4d_t p4d;
+
+ pud_p = pgtable_area;
+ memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
+ pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
+
+ p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS);
+ native_set_p4d(p4d_p, p4d);
+ }
+ }
+
+ pud_p += pud_index(vaddr);
+ if (native_pud_val(*pud_p)) {
+ if (native_pud_val(*pud_p) & _PAGE_PSE)
+ goto out;
+
+ pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
+ } else {
+ pud_t pud;
+
+ pmd_p = pgtable_area;
+ memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
+ pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
+
+ pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS);
+ native_set_pud(pud_p, pud);
+ }
+
+ pmd_p += pmd_index(vaddr);
+ if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
+ native_set_pmd(pmd_p, native_make_pmd(pmd_val));
+
+out:
+ return pgtable_area;
+}
+
+static unsigned long __init sme_pgtable_calc(unsigned long len)
+{
+ unsigned long p4d_size, pud_size, pmd_size;
+ unsigned long total;
+
+ /*
+ * Perform a relatively simplistic calculation of the pagetable
+ * entries that are needed. That mappings will be covered by 2MB
+ * PMD entries so we can conservatively calculate the required
+ * number of P4D, PUD and PMD structures needed to perform the
+ * mappings. Incrementing the count for each covers the case where
+ * the addresses cross entries.
+ */
+ if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+ p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
+ p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D;
+ pud_size = (ALIGN(len, P4D_SIZE) / P4D_SIZE) + 1;
+ pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
+ } else {
+ p4d_size = 0;
+ pud_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
+ pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
+ }
+ pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1;
+ pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
+
+ total = p4d_size + pud_size + pmd_size;
+
+ /*
+ * Now calculate the added pagetable structures needed to populate
+ * the new pagetables.
+ */
+ if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+ p4d_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE;
+ p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D;
+ pud_size = ALIGN(total, P4D_SIZE) / P4D_SIZE;
+ pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
+ } else {
+ p4d_size = 0;
+ pud_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE;
+ pud_size *= sizeof(pud_t) * PTRS_PER_PUD;
+ }
+ pmd_size = ALIGN(total, PUD_SIZE) / PUD_SIZE;
+ pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
+
+ total += p4d_size + pud_size + pmd_size;
+
+ return total;
+}
+
+void __init sme_encrypt_kernel(void)
+{
+ unsigned long workarea_start, workarea_end, workarea_len;
+ unsigned long execute_start, execute_end, execute_len;
+ unsigned long kernel_start, kernel_end, kernel_len;
+ unsigned long pgtable_area_len;
+ unsigned long paddr, pmd_flags;
+ unsigned long decrypted_base;
+ void *pgtable_area;
+ pgd_t *pgd;
+
+ if (!sme_active())
+ return;
+
+ /*
+ * Prepare for encrypting the kernel by building new pagetables with
+ * the necessary attributes needed to encrypt the kernel in place.
+ *
+ * One range of virtual addresses will map the memory occupied
+ * by the kernel as encrypted.
+ *
+ * Another range of virtual addresses will map the memory occupied
+ * by the kernel as decrypted and write-protected.
+ *
+ * The use of write-protect attribute will prevent any of the
+ * memory from being cached.
+ */
+
+ /* Physical addresses gives us the identity mapped virtual addresses */
+ kernel_start = __pa_symbol(_text);
+ kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
+ kernel_len = kernel_end - kernel_start;
+
+ /* Set the encryption workarea to be immediately after the kernel */
+ workarea_start = kernel_end;
+
+ /*
+ * Calculate required number of workarea bytes needed:
+ * executable encryption area size:
+ * stack page (PAGE_SIZE)
+ * encryption routine page (PAGE_SIZE)
+ * intermediate copy buffer (PMD_PAGE_SIZE)
+ * pagetable structures for the encryption of the kernel
+ * pagetable structures for workarea (in case not currently mapped)
+ */
+ execute_start = workarea_start;
+ execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE;
+ execute_len = execute_end - execute_start;
+
+ /*
+ * One PGD for both encrypted and decrypted mappings and a set of
+ * PUDs and PMDs for each of the encrypted and decrypted mappings.
+ */
+ pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
+ pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
+
+ /* PUDs and PMDs needed in the current pagetables for the workarea */
+ pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
+
+ /*
+ * The total workarea includes the executable encryption area and
+ * the pagetable area.
+ */
+ workarea_len = execute_len + pgtable_area_len;
+ workarea_end = workarea_start + workarea_len;
+
+ /*
+ * Set the address to the start of where newly created pagetable
+ * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable
+ * structures are created when the workarea is added to the current
+ * pagetables and when the new encrypted and decrypted kernel
+ * mappings are populated.
+ */
+ pgtable_area = (void *)execute_end;
+
+ /*
+ * Make sure the current pagetable structure has entries for
+ * addressing the workarea.
+ */
+ pgd = (pgd_t *)native_read_cr3_pa();
+ paddr = workarea_start;
+ while (paddr < workarea_end) {
+ pgtable_area = sme_populate_pgd(pgd, pgtable_area,
+ paddr,
+ paddr + PMD_FLAGS);
+
+ paddr += PMD_PAGE_SIZE;
+ }
+
+ /* Flush the TLB - no globals so cr3 is enough */
+ native_write_cr3(__native_read_cr3());
+
+ /*
+ * A new pagetable structure is being built to allow for the kernel
+ * to be encrypted. It starts with an empty PGD that will then be
+ * populated with new PUDs and PMDs as the encrypted and decrypted
+ * kernel mappings are created.
+ */
+ pgd = pgtable_area;
+ memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD);
+ pgtable_area += sizeof(*pgd) * PTRS_PER_PGD;
+
+ /* Add encrypted kernel (identity) mappings */
+ pmd_flags = PMD_FLAGS | _PAGE_ENC;
+ paddr = kernel_start;
+ while (paddr < kernel_end) {
+ pgtable_area = sme_populate_pgd(pgd, pgtable_area,
+ paddr,
+ paddr + pmd_flags);
+
+ paddr += PMD_PAGE_SIZE;
+ }
+
+ /*
+ * A different PGD index/entry must be used to get different
+ * pagetable entries for the decrypted mapping. Choose the next
+ * PGD index and convert it to a virtual address to be used as
+ * the base of the mapping.
+ */
+ decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
+ decrypted_base <<= PGDIR_SHIFT;
+
+ /* Add decrypted, write-protected kernel (non-identity) mappings */
+ pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT);
+ paddr = kernel_start;
+ while (paddr < kernel_end) {
+ pgtable_area = sme_populate_pgd(pgd, pgtable_area,
+ paddr + decrypted_base,
+ paddr + pmd_flags);
+
+ paddr += PMD_PAGE_SIZE;
+ }
+
+ /* Add decrypted workarea mappings to both kernel mappings */
+ paddr = workarea_start;
+ while (paddr < workarea_end) {
+ pgtable_area = sme_populate_pgd(pgd, pgtable_area,
+ paddr,
+ paddr + PMD_FLAGS);
+
+ pgtable_area = sme_populate_pgd(pgd, pgtable_area,
+ paddr + decrypted_base,
+ paddr + PMD_FLAGS);
+
+ paddr += PMD_PAGE_SIZE;
+ }
+
+ /* Perform the encryption */
+ sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
+ kernel_len, workarea_start, (unsigned long)pgd);
+
+ /*
+ * At this point we are running encrypted. Remove the mappings for
+ * the decrypted areas - all that is needed for this is to remove
+ * the PGD entry/entries.
+ */
+ sme_clear_pgd(pgd, kernel_start + decrypted_base,
+ kernel_end + decrypted_base);
+
+ sme_clear_pgd(pgd, workarea_start + decrypted_base,
+ workarea_end + decrypted_base);
+
+ /* Flush the TLB - no globals so cr3 is enough */
+ native_write_cr3(__native_read_cr3());
+}
+
+void __init __nostackprotector sme_enable(struct boot_params *bp)
+{
+ const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
+ unsigned int eax, ebx, ecx, edx;
+ bool active_by_default;
+ unsigned long me_mask;
+ char buffer[16];
+ u64 msr;
+
+ /* Check for the SME support leaf */
+ eax = 0x80000000;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+ if (eax < 0x8000001f)
+ return;
+
+ /*
+ * Check for the SME feature:
+ * CPUID Fn8000_001F[EAX] - Bit 0
+ * Secure Memory Encryption support
+ * CPUID Fn8000_001F[EBX] - Bits 5:0
+ * Pagetable bit position used to indicate encryption
+ */
+ eax = 0x8000001f;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+ if (!(eax & 1))
+ return;
+
+ me_mask = 1UL << (ebx & 0x3f);
+
+ /* Check if SME is enabled */
+ msr = __rdmsr(MSR_K8_SYSCFG);
+ if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+ return;
+
+ /*
+ * Fixups have not been applied to phys_base yet and we're running
+ * identity mapped, so we must obtain the address to the SME command
+ * line argument data using rip-relative addressing.
+ */
+ asm ("lea sme_cmdline_arg(%%rip), %0"
+ : "=r" (cmdline_arg)
+ : "p" (sme_cmdline_arg));
+ asm ("lea sme_cmdline_on(%%rip), %0"
+ : "=r" (cmdline_on)
+ : "p" (sme_cmdline_on));
+ asm ("lea sme_cmdline_off(%%rip), %0"
+ : "=r" (cmdline_off)
+ : "p" (sme_cmdline_off));
+
+ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
+ active_by_default = true;
+ else
+ active_by_default = false;
+
+ cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
+ ((u64)bp->ext_cmd_line_ptr << 32));
+
+ cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
+
+ if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
+ sme_me_mask = me_mask;
+ else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
+ sme_me_mask = 0;
+ else
+ sme_me_mask = active_by_default ? me_mask : 0;
+}
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
new file mode 100644
index 000000000000..730e6d541df1
--- /dev/null
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -0,0 +1,149 @@
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/processor-flags.h>
+#include <asm/msr-index.h>
+
+ .text
+ .code64
+ENTRY(sme_encrypt_execute)
+
+ /*
+ * Entry parameters:
+ * RDI - virtual address for the encrypted kernel mapping
+ * RSI - virtual address for the decrypted kernel mapping
+ * RDX - length of kernel
+ * RCX - virtual address of the encryption workarea, including:
+ * - stack page (PAGE_SIZE)
+ * - encryption routine page (PAGE_SIZE)
+ * - intermediate copy buffer (PMD_PAGE_SIZE)
+ * R8 - physcial address of the pagetables to use for encryption
+ */
+
+ push %rbp
+ movq %rsp, %rbp /* RBP now has original stack pointer */
+
+ /* Set up a one page stack in the non-encrypted memory area */
+ movq %rcx, %rax /* Workarea stack page */
+ leaq PAGE_SIZE(%rax), %rsp /* Set new stack pointer */
+ addq $PAGE_SIZE, %rax /* Workarea encryption routine */
+
+ push %r12
+ movq %rdi, %r10 /* Encrypted kernel */
+ movq %rsi, %r11 /* Decrypted kernel */
+ movq %rdx, %r12 /* Kernel length */
+
+ /* Copy encryption routine into the workarea */
+ movq %rax, %rdi /* Workarea encryption routine */
+ leaq __enc_copy(%rip), %rsi /* Encryption routine */
+ movq $(.L__enc_copy_end - __enc_copy), %rcx /* Encryption routine length */
+ rep movsb
+
+ /* Setup registers for call */
+ movq %r10, %rdi /* Encrypted kernel */
+ movq %r11, %rsi /* Decrypted kernel */
+ movq %r8, %rdx /* Pagetables used for encryption */
+ movq %r12, %rcx /* Kernel length */
+ movq %rax, %r8 /* Workarea encryption routine */
+ addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */
+
+ call *%rax /* Call the encryption routine */
+
+ pop %r12
+
+ movq %rbp, %rsp /* Restore original stack pointer */
+ pop %rbp
+
+ ret
+ENDPROC(sme_encrypt_execute)
+
+ENTRY(__enc_copy)
+/*
+ * Routine used to encrypt kernel.
+ * This routine must be run outside of the kernel proper since
+ * the kernel will be encrypted during the process. So this
+ * routine is defined here and then copied to an area outside
+ * of the kernel where it will remain and run decrypted
+ * during execution.
+ *
+ * On entry the registers must be:
+ * RDI - virtual address for the encrypted kernel mapping
+ * RSI - virtual address for the decrypted kernel mapping
+ * RDX - address of the pagetables to use for encryption
+ * RCX - length of kernel
+ * R8 - intermediate copy buffer
+ *
+ * RAX - points to this routine
+ *
+ * The kernel will be encrypted by copying from the non-encrypted
+ * kernel space to an intermediate buffer and then copying from the
+ * intermediate buffer back to the encrypted kernel space. The physical
+ * addresses of the two kernel space mappings are the same which
+ * results in the kernel being encrypted "in place".
+ */
+ /* Enable the new page tables */
+ mov %rdx, %cr3
+
+ /* Flush any global TLBs */
+ mov %cr4, %rdx
+ andq $~X86_CR4_PGE, %rdx
+ mov %rdx, %cr4
+ orq $X86_CR4_PGE, %rdx
+ mov %rdx, %cr4
+
+ /* Set the PAT register PA5 entry to write-protect */
+ push %rcx
+ movl $MSR_IA32_CR_PAT, %ecx
+ rdmsr
+ push %rdx /* Save original PAT value */
+ andl $0xffff00ff, %edx /* Clear PA5 */
+ orl $0x00000500, %edx /* Set PA5 to WP */
+ wrmsr
+ pop %rdx /* RDX contains original PAT value */
+ pop %rcx
+
+ movq %rcx, %r9 /* Save kernel length */
+ movq %rdi, %r10 /* Save encrypted kernel address */
+ movq %rsi, %r11 /* Save decrypted kernel address */
+
+ wbinvd /* Invalidate any cache entries */
+
+ /* Copy/encrypt 2MB at a time */
+1:
+ movq %r11, %rsi /* Source - decrypted kernel */
+ movq %r8, %rdi /* Dest - intermediate copy buffer */
+ movq $PMD_PAGE_SIZE, %rcx /* 2MB length */
+ rep movsb
+
+ movq %r8, %rsi /* Source - intermediate copy buffer */
+ movq %r10, %rdi /* Dest - encrypted kernel */
+ movq $PMD_PAGE_SIZE, %rcx /* 2MB length */
+ rep movsb
+
+ addq $PMD_PAGE_SIZE, %r11
+ addq $PMD_PAGE_SIZE, %r10
+ subq $PMD_PAGE_SIZE, %r9 /* Kernel length decrement */
+ jnz 1b /* Kernel length not zero? */
+
+ /* Restore PAT register */
+ push %rdx /* Save original PAT value */
+ movl $MSR_IA32_CR_PAT, %ecx
+ rdmsr
+ pop %rdx /* Restore original PAT value */
+ wrmsr
+
+ ret
+.L__enc_copy_end:
+ENDPROC(__enc_copy)
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 229d04a83f85..a99679826846 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -37,22 +37,21 @@ struct va_alignment __read_mostly va_align = {
.flags = -1,
};
-unsigned long tasksize_32bit(void)
+unsigned long task_size_32bit(void)
{
return IA32_PAGE_OFFSET;
}
-unsigned long tasksize_64bit(void)
+unsigned long task_size_64bit(int full_addr_space)
{
- return TASK_SIZE_MAX;
+ return full_addr_space ? TASK_SIZE_MAX : DEFAULT_MAP_WINDOW;
}
static unsigned long stack_maxrandom_size(unsigned long task_size)
{
unsigned long max = 0;
- if ((current->flags & PF_RANDOMIZE) &&
- !(current->personality & ADDR_NO_RANDOMIZE)) {
- max = (-1UL) & __STACK_RND_MASK(task_size == tasksize_32bit());
+ if (current->flags & PF_RANDOMIZE) {
+ max = (-1UL) & __STACK_RND_MASK(task_size == task_size_32bit());
max <<= PAGE_SHIFT;
}
@@ -79,13 +78,13 @@ static int mmap_is_legacy(void)
static unsigned long arch_rnd(unsigned int rndbits)
{
+ if (!(current->flags & PF_RANDOMIZE))
+ return 0;
return (get_random_long() & ((1UL << rndbits) - 1)) << PAGE_SHIFT;
}
unsigned long arch_mmap_rnd(void)
{
- if (!(current->flags & PF_RANDOMIZE))
- return 0;
return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits);
}
@@ -142,7 +141,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
arch_pick_mmap_base(&mm->mmap_base, &mm->mmap_legacy_base,
- arch_rnd(mmap64_rnd_bits), tasksize_64bit());
+ arch_rnd(mmap64_rnd_bits), task_size_64bit(0));
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
/*
@@ -152,7 +151,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
* mmap_base, the compat syscall uses mmap_compat_base.
*/
arch_pick_mmap_base(&mm->mmap_compat_base, &mm->mmap_compat_legacy_base,
- arch_rnd(mmap32_rnd_bits), tasksize_32bit());
+ arch_rnd(mmap32_rnd_bits), task_size_32bit());
#endif
}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 1c34b767c84c..9ceaa955d2ba 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -355,10 +355,19 @@ int mpx_enable_management(void)
*/
bd_base = mpx_get_bounds_dir();
down_write(&mm->mmap_sem);
+
+ /* MPX doesn't support addresses above 47 bits yet. */
+ if (find_vma(mm, DEFAULT_MAP_WINDOW)) {
+ pr_warn_once("%s (%d): MPX cannot handle addresses "
+ "above 47-bits. Disabling.",
+ current->comm, current->pid);
+ ret = -ENXIO;
+ goto out;
+ }
mm->context.bd_addr = bd_base;
if (mm->context.bd_addr == MPX_INVALID_BOUNDS_DIR)
ret = -ENXIO;
-
+out:
up_write(&mm->mmap_sem);
return ret;
}
@@ -1030,3 +1039,25 @@ void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
if (ret)
force_sig(SIGSEGV, current);
}
+
+/* MPX cannot handle addresses above 47 bits yet. */
+unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len,
+ unsigned long flags)
+{
+ if (!kernel_managing_mpx_tables(current->mm))
+ return addr;
+ if (addr + len <= DEFAULT_MAP_WINDOW)
+ return addr;
+ if (flags & MAP_FIXED)
+ return -ENOMEM;
+
+ /*
+ * Requested len is larger than the whole area we're allowed to map in.
+ * Resetting hinting address wouldn't do much good -- fail early.
+ */
+ if (len > DEFAULT_MAP_WINDOW)
+ return -ENOMEM;
+
+ /* Look for unmap area within DEFAULT_MAP_WINDOW */
+ return 0;
+}
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index a8f90ce3dedf..d805162e6045 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -75,13 +75,15 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
/*
* Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
- * to max_addr. The return value is the number of nodes allocated.
+ * to max_addr.
+ *
+ * Returns zero on success or negative on error.
*/
static int __init split_nodes_interleave(struct numa_meminfo *ei,
struct numa_meminfo *pi,
u64 addr, u64 max_addr, int nr_nodes)
{
- nodemask_t physnode_mask = NODE_MASK_NONE;
+ nodemask_t physnode_mask = numa_nodes_parsed;
u64 size;
int big;
int nid = 0;
@@ -116,9 +118,6 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
return -1;
}
- for (i = 0; i < pi->nr_blks; i++)
- node_set(pi->blk[i].nid, physnode_mask);
-
/*
* Continue to fill physical nodes with fake nodes until there is no
* memory left on any of them.
@@ -200,13 +199,15 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
/*
* Sets up fake nodes of `size' interleaved over physical nodes ranging from
- * `addr' to `max_addr'. The return value is the number of nodes allocated.
+ * `addr' to `max_addr'.
+ *
+ * Returns zero on success or negative on error.
*/
static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
struct numa_meminfo *pi,
u64 addr, u64 max_addr, u64 size)
{
- nodemask_t physnode_mask = NODE_MASK_NONE;
+ nodemask_t physnode_mask = numa_nodes_parsed;
u64 min_size;
int nid = 0;
int i, ret;
@@ -231,9 +232,6 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
}
size &= FAKE_NODE_MIN_HASH_MASK;
- for (i = 0; i < pi->nr_blks; i++)
- node_set(pi->blk[i].nid, physnode_mask);
-
/*
* Fill physical nodes with fake nodes of size until there is no memory
* left on any of them.
@@ -280,6 +278,22 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
return 0;
}
+int __init setup_emu2phys_nid(int *dfl_phys_nid)
+{
+ int i, max_emu_nid = 0;
+
+ *dfl_phys_nid = NUMA_NO_NODE;
+ for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) {
+ if (emu_nid_to_phys[i] != NUMA_NO_NODE) {
+ max_emu_nid = i;
+ if (*dfl_phys_nid == NUMA_NO_NODE)
+ *dfl_phys_nid = emu_nid_to_phys[i];
+ }
+ }
+
+ return max_emu_nid;
+}
+
/**
* numa_emulation - Emulate NUMA nodes
* @numa_meminfo: NUMA configuration to massage
@@ -376,23 +390,18 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
* Determine the max emulated nid and the default phys nid to use
* for unmapped nodes.
*/
- max_emu_nid = 0;
- dfl_phys_nid = NUMA_NO_NODE;
- for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) {
- if (emu_nid_to_phys[i] != NUMA_NO_NODE) {
- max_emu_nid = i;
- if (dfl_phys_nid == NUMA_NO_NODE)
- dfl_phys_nid = emu_nid_to_phys[i];
- }
- }
- if (dfl_phys_nid == NUMA_NO_NODE) {
- pr_warning("NUMA: Warning: can't determine default physical node, disabling emulation\n");
- goto no_emu;
- }
+ max_emu_nid = setup_emu2phys_nid(&dfl_phys_nid);
/* commit */
*numa_meminfo = ei;
+ /* Make sure numa_nodes_parsed only contains emulated nodes */
+ nodes_clear(numa_nodes_parsed);
+ for (i = 0; i < ARRAY_SIZE(ei.blk); i++)
+ if (ei.blk[i].start != ei.blk[i].end &&
+ ei.blk[i].nid != NUMA_NO_NODE)
+ node_set(ei.blk[i].nid, numa_nodes_parsed);
+
/*
* Transform __apicid_to_node table to use emulated nids by
* reverse-mapping phys_nid. The maps should always exist but fall
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 757b0bcdf712..dfb7d657cf43 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1775,6 +1775,70 @@ int set_memory_4k(unsigned long addr, int numpages)
__pgprot(0), 1, 0, NULL);
}
+static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
+{
+ struct cpa_data cpa;
+ unsigned long start;
+ int ret;
+
+ /* Nothing to do if the SME is not active */
+ if (!sme_active())
+ return 0;
+
+ /* Should not be working on unaligned addresses */
+ if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
+ addr &= PAGE_MASK;
+
+ start = addr;
+
+ memset(&cpa, 0, sizeof(cpa));
+ cpa.vaddr = &addr;
+ cpa.numpages = numpages;
+ cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
+ cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);
+ cpa.pgd = init_mm.pgd;
+
+ /* Must avoid aliasing mappings in the highmem code */
+ kmap_flush_unused();
+ vm_unmap_aliases();
+
+ /*
+ * Before changing the encryption attribute, we need to flush caches.
+ */
+ if (static_cpu_has(X86_FEATURE_CLFLUSH))
+ cpa_flush_range(start, numpages, 1);
+ else
+ cpa_flush_all(1);
+
+ ret = __change_page_attr_set_clr(&cpa, 1);
+
+ /*
+ * After changing the encryption attribute, we need to flush TLBs
+ * again in case any speculative TLB caching occurred (but no need
+ * to flush caches again). We could just use cpa_flush_all(), but
+ * in case TLB flushing gets optimized in the cpa_flush_range()
+ * path use the same logic as above.
+ */
+ if (static_cpu_has(X86_FEATURE_CLFLUSH))
+ cpa_flush_range(start, numpages, 0);
+ else
+ cpa_flush_all(0);
+
+ return ret;
+}
+
+int set_memory_encrypted(unsigned long addr, int numpages)
+{
+ return __set_memory_enc_dec(addr, numpages, true);
+}
+EXPORT_SYMBOL_GPL(set_memory_encrypted);
+
+int set_memory_decrypted(unsigned long addr, int numpages)
+{
+ return __set_memory_enc_dec(addr, numpages, false);
+}
+EXPORT_SYMBOL_GPL(set_memory_decrypted);
+
int set_pages_uc(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);
@@ -2020,6 +2084,9 @@ int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
if (!(page_flags & _PAGE_RW))
cpa.mask_clr = __pgprot(_PAGE_RW);
+ if (!(page_flags & _PAGE_ENC))
+ cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);
+
cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);
retval = __change_page_attr_set_clr(&cpa, 0);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 45979502f64b..fe7d57a8fb60 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -293,7 +293,7 @@ void init_cache_modes(void)
* pat_init - Initialize PAT MSR and PAT table
*
* This function initializes PAT MSR and PAT table with an OS-defined value
- * to enable additional cache attributes, WC and WT.
+ * to enable additional cache attributes, WC, WT and WP.
*
* This function must be called on all CPUs using the specific sequence of
* operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
@@ -352,7 +352,7 @@ void pat_init(void)
* 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS
* 011 3 UC : _PAGE_CACHE_MODE_UC
* 100 4 WB : Reserved
- * 101 5 WC : Reserved
+ * 101 5 WP : _PAGE_CACHE_MODE_WP
* 110 6 UC-: Reserved
* 111 7 WT : _PAGE_CACHE_MODE_WT
*
@@ -360,7 +360,7 @@ void pat_init(void)
* corresponding types in the presence of PAT errata.
*/
pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
- PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, WT);
+ PAT(4, WB) | PAT(5, WP) | PAT(6, UC_MINUS) | PAT(7, WT);
}
if (!boot_cpu_done) {
@@ -744,6 +744,9 @@ EXPORT_SYMBOL(arch_io_free_memtype_wc);
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
+ if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size))
+ vma_prot = pgprot_decrypted(vma_prot);
+
return vma_prot;
}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 508a708eb9a6..218834a3e9ad 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -56,7 +56,7 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
pgtable_page_dtor(pte);
paravirt_release_pte(page_to_pfn(pte));
- tlb_remove_page(tlb, pte);
+ tlb_remove_table(tlb, pte);
}
#if CONFIG_PGTABLE_LEVELS > 2
@@ -72,21 +72,21 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
tlb->need_flush_all = 1;
#endif
pgtable_pmd_page_dtor(page);
- tlb_remove_page(tlb, page);
+ tlb_remove_table(tlb, page);
}
#if CONFIG_PGTABLE_LEVELS > 3
void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{
paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
- tlb_remove_page(tlb, virt_to_page(pud));
+ tlb_remove_table(tlb, virt_to_page(pud));
}
#if CONFIG_PGTABLE_LEVELS > 4
void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
{
paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
- tlb_remove_page(tlb, virt_to_page(p4d));
+ tlb_remove_table(tlb, virt_to_page(p4d));
}
#endif /* CONFIG_PGTABLE_LEVELS > 4 */
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 014d07a80053..dbbcfd59726a 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -28,6 +28,42 @@
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
*/
+atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
+
+static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
+ u16 *new_asid, bool *need_flush)
+{
+ u16 asid;
+
+ if (!static_cpu_has(X86_FEATURE_PCID)) {
+ *new_asid = 0;
+ *need_flush = true;
+ return;
+ }
+
+ for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
+ if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
+ next->context.ctx_id)
+ continue;
+
+ *new_asid = asid;
+ *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) <
+ next_tlb_gen);
+ return;
+ }
+
+ /*
+ * We don't currently own an ASID slot on this CPU.
+ * Allocate a slot.
+ */
+ *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1;
+ if (*new_asid >= TLB_NR_DYN_ASIDS) {
+ *new_asid = 0;
+ this_cpu_write(cpu_tlbstate.next_asid, 1);
+ }
+ *need_flush = true;
+}
+
void leave_mm(int cpu)
{
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
@@ -43,12 +79,11 @@ void leave_mm(int cpu)
if (loaded_mm == &init_mm)
return;
- if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
- BUG();
+ /* Warn if we're not lazy. */
+ WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm)));
switch_mm(NULL, &init_mm, NULL);
}
-EXPORT_SYMBOL_GPL(leave_mm);
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
@@ -63,115 +98,263 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
- unsigned cpu = smp_processor_id();
struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
+ u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+ unsigned cpu = smp_processor_id();
+ u64 next_tlb_gen;
/*
- * NB: The scheduler will call us with prev == next when
- * switching from lazy TLB mode to normal mode if active_mm
- * isn't changing. When this happens, there is no guarantee
- * that CR3 (and hence cpu_tlbstate.loaded_mm) matches next.
+ * NB: The scheduler will call us with prev == next when switching
+ * from lazy TLB mode to normal mode if active_mm isn't changing.
+ * When this happens, we don't assume that CR3 (and hence
+ * cpu_tlbstate.loaded_mm) matches next.
*
* NB: leave_mm() calls us with prev == NULL and tsk == NULL.
*/
- this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ /* We don't want flush_tlb_func_* to run concurrently with us. */
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING))
+ WARN_ON_ONCE(!irqs_disabled());
+
+ /*
+ * Verify that CR3 is what we think it is. This will catch
+ * hypothetical buggy code that directly switches to swapper_pg_dir
+ * without going through leave_mm() / switch_mm_irqs_off() or that
+ * does something like write_cr3(read_cr3_pa()).
+ */
+ VM_BUG_ON(__read_cr3() != (__sme_pa(real_prev->pgd) | prev_asid));
if (real_prev == next) {
+ VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
+ next->context.ctx_id);
+
+ if (cpumask_test_cpu(cpu, mm_cpumask(next))) {
+ /*
+ * There's nothing to do: we weren't lazy, and we
+ * aren't changing our mm. We don't need to flush
+ * anything, nor do we need to update CR3, CR4, or
+ * LDTR.
+ */
+ return;
+ }
+
+ /* Resume remote flushes and then read tlb_gen. */
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+ next_tlb_gen = atomic64_read(&next->context.tlb_gen);
+
+ if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) <
+ next_tlb_gen) {
+ /*
+ * Ideally, we'd have a flush_tlb() variant that
+ * takes the known CR3 value as input. This would
+ * be faster on Xen PV and on hypothetical CPUs
+ * on which INVPCID is fast.
+ */
+ this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen,
+ next_tlb_gen);
+ write_cr3(__sme_pa(next->pgd) | prev_asid);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
+ TLB_FLUSH_ALL);
+ }
+
/*
- * There's nothing to do: we always keep the per-mm control
- * regs in sync with cpu_tlbstate.loaded_mm. Just
- * sanity-check mm_cpumask.
+ * We just exited lazy mode, which means that CR4 and/or LDTR
+ * may be stale. (Changes to the required CR4 and LDTR states
+ * are not reflected in tlb_gen.)
*/
- if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(next))))
- cpumask_set_cpu(cpu, mm_cpumask(next));
- return;
- }
+ } else {
+ u16 new_asid;
+ bool need_flush;
+
+ if (IS_ENABLED(CONFIG_VMAP_STACK)) {
+ /*
+ * If our current stack is in vmalloc space and isn't
+ * mapped in the new pgd, we'll double-fault. Forcibly
+ * map it.
+ */
+ unsigned int index = pgd_index(current_stack_pointer());
+ pgd_t *pgd = next->pgd + index;
+
+ if (unlikely(pgd_none(*pgd)))
+ set_pgd(pgd, init_mm.pgd[index]);
+ }
+
+ /* Stop remote flushes for the previous mm */
+ if (cpumask_test_cpu(cpu, mm_cpumask(real_prev)))
+ cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
+
+ VM_WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
- if (IS_ENABLED(CONFIG_VMAP_STACK)) {
/*
- * If our current stack is in vmalloc space and isn't
- * mapped in the new pgd, we'll double-fault. Forcibly
- * map it.
+ * Start remote flushes and then read tlb_gen.
*/
- unsigned int stack_pgd_index = pgd_index(current_stack_pointer());
-
- pgd_t *pgd = next->pgd + stack_pgd_index;
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+ next_tlb_gen = atomic64_read(&next->context.tlb_gen);
+
+ choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
+
+ if (need_flush) {
+ this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
+ this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
+ write_cr3(__sme_pa(next->pgd) | new_asid);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
+ TLB_FLUSH_ALL);
+ } else {
+ /* The new ASID is already up to date. */
+ write_cr3(__sme_pa(next->pgd) | new_asid | CR3_NOFLUSH);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
+ }
- if (unlikely(pgd_none(*pgd)))
- set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
+ this_cpu_write(cpu_tlbstate.loaded_mm, next);
+ this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
}
- this_cpu_write(cpu_tlbstate.loaded_mm, next);
+ load_mm_cr4(next);
+ switch_ldt(real_prev, next);
+}
- WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
- cpumask_set_cpu(cpu, mm_cpumask(next));
+/*
+ * Call this when reinitializing a CPU. It fixes the following potential
+ * problems:
+ *
+ * - The ASID changed from what cpu_tlbstate thinks it is (most likely
+ * because the CPU was taken down and came back up with CR3's PCID
+ * bits clear. CPU hotplug can do this.
+ *
+ * - The TLB contains junk in slots corresponding to inactive ASIDs.
+ *
+ * - The CPU went so far out to lunch that it may have missed a TLB
+ * flush.
+ */
+void initialize_tlbstate_and_flush(void)
+{
+ int i;
+ struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+ u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen);
+ unsigned long cr3 = __read_cr3();
- /*
- * Re-load page tables.
- *
- * This logic has an ordering constraint:
- *
- * CPU 0: Write to a PTE for 'next'
- * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
- * CPU 1: set bit 1 in next's mm_cpumask
- * CPU 1: load from the PTE that CPU 0 writes (implicit)
- *
- * We need to prevent an outcome in which CPU 1 observes
- * the new PTE value and CPU 0 observes bit 1 clear in
- * mm_cpumask. (If that occurs, then the IPI will never
- * be sent, and CPU 0's TLB will contain a stale entry.)
- *
- * The bad outcome can occur if either CPU's load is
- * reordered before that CPU's store, so both CPUs must
- * execute full barriers to prevent this from happening.
- *
- * Thus, switch_mm needs a full barrier between the
- * store to mm_cpumask and any operation that could load
- * from next->pgd. TLB fills are special and can happen
- * due to instruction fetches or for no reason at all,
- * and neither LOCK nor MFENCE orders them.
- * Fortunately, load_cr3() is serializing and gives the
- * ordering guarantee we need.
- */
- load_cr3(next->pgd);
+ /* Assert that CR3 already references the right mm. */
+ WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd));
/*
- * This gets called via leave_mm() in the idle path where RCU
- * functions differently. Tracing normally uses RCU, so we have to
- * call the tracepoint specially here.
+ * Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization
+ * doesn't work like other CR4 bits because it can only be set from
+ * long mode.)
*/
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ WARN_ON(boot_cpu_has(X86_CR4_PCIDE) &&
+ !(cr4_read_shadow() & X86_CR4_PCIDE));
- /* Stop flush ipis for the previous mm */
- WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
- real_prev != &init_mm);
- cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
+ /* Force ASID 0 and force a TLB flush. */
+ write_cr3(cr3 & ~CR3_PCID_MASK);
- /* Load per-mm CR4 and LDTR state */
- load_mm_cr4(next);
- switch_ldt(real_prev, next);
+ /* Reinitialize tlbstate. */
+ this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
+ this_cpu_write(cpu_tlbstate.next_asid, 1);
+ this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
+ this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen);
+
+ for (i = 1; i < TLB_NR_DYN_ASIDS; i++)
+ this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0);
}
+/*
+ * flush_tlb_func_common()'s memory ordering requirement is that any
+ * TLB fills that happen after we flush the TLB are ordered after we
+ * read active_mm's tlb_gen. We don't need any explicit barriers
+ * because all x86 flush operations are serializing and the
+ * atomic64_read operation won't be reordered by the compiler.
+ */
static void flush_tlb_func_common(const struct flush_tlb_info *f,
bool local, enum tlb_flush_reason reason)
{
+ /*
+ * We have three different tlb_gen values in here. They are:
+ *
+ * - mm_tlb_gen: the latest generation.
+ * - local_tlb_gen: the generation that this CPU has already caught
+ * up to.
+ * - f->new_tlb_gen: the generation that the requester of the flush
+ * wants us to catch up to.
+ */
+ struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+ u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+ u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
+ u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
+
/* This code cannot presently handle being reentered. */
VM_WARN_ON(!irqs_disabled());
- if (this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) {
- leave_mm(smp_processor_id());
+ VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
+ loaded_mm->context.ctx_id);
+
+ if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))) {
+ /*
+ * We're in lazy mode -- don't flush. We can get here on
+ * remote flushes due to races and on local flushes if a
+ * kernel thread coincidentally flushes the mm it's lazily
+ * still using.
+ */
return;
}
- if (f->end == TLB_FLUSH_ALL) {
- local_flush_tlb();
- if (local)
- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
- trace_tlb_flush(reason, TLB_FLUSH_ALL);
- } else {
+ if (unlikely(local_tlb_gen == mm_tlb_gen)) {
+ /*
+ * There's nothing to do: we're already up to date. This can
+ * happen if two concurrent flushes happen -- the first flush to
+ * be handled can catch us all the way up, leaving no work for
+ * the second flush.
+ */
+ trace_tlb_flush(reason, 0);
+ return;
+ }
+
+ WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
+ WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
+
+ /*
+ * If we get to this point, we know that our TLB is out of date.
+ * This does not strictly imply that we need to flush (it's
+ * possible that f->new_tlb_gen <= local_tlb_gen), but we're
+ * going to need to flush in the very near future, so we might
+ * as well get it over with.
+ *
+ * The only question is whether to do a full or partial flush.
+ *
+ * We do a partial flush if requested and two extra conditions
+ * are met:
+ *
+ * 1. f->new_tlb_gen == local_tlb_gen + 1. We have an invariant that
+ * we've always done all needed flushes to catch up to
+ * local_tlb_gen. If, for example, local_tlb_gen == 2 and
+ * f->new_tlb_gen == 3, then we know that the flush needed to bring
+ * us up to date for tlb_gen 3 is the partial flush we're
+ * processing.
+ *
+ * As an example of why this check is needed, suppose that there
+ * are two concurrent flushes. The first is a full flush that
+ * changes context.tlb_gen from 1 to 2. The second is a partial
+ * flush that changes context.tlb_gen from 2 to 3. If they get
+ * processed on this CPU in reverse order, we'll see
+ * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
+ * If we were to use __flush_tlb_single() and set local_tlb_gen to
+ * 3, we'd be break the invariant: we'd update local_tlb_gen above
+ * 1 without the full flush that's needed for tlb_gen 2.
+ *
+ * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimiation.
+ * Partial TLB flushes are not all that much cheaper than full TLB
+ * flushes, so it seems unlikely that it would be a performance win
+ * to do a partial flush if that won't bring our TLB fully up to
+ * date. By doing a full flush instead, we can increase
+ * local_tlb_gen all the way to mm_tlb_gen and we can probably
+ * avoid another flush in the very near future.
+ */
+ if (f->end != TLB_FLUSH_ALL &&
+ f->new_tlb_gen == local_tlb_gen + 1 &&
+ f->new_tlb_gen == mm_tlb_gen) {
+ /* Partial flush */
unsigned long addr;
unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT;
+
addr = f->start;
while (addr < f->end) {
__flush_tlb_single(addr);
@@ -180,7 +363,16 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
if (local)
count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
trace_tlb_flush(reason, nr_pages);
+ } else {
+ /* Full flush. */
+ local_flush_tlb();
+ if (local)
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ trace_tlb_flush(reason, TLB_FLUSH_ALL);
}
+
+ /* Both paths above update our state to mm_tlb_gen. */
+ this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
}
static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
@@ -214,6 +406,21 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
(info->end - info->start) >> PAGE_SHIFT);
if (is_uv_system()) {
+ /*
+ * This whole special case is confused. UV has a "Broadcast
+ * Assist Unit", which seems to be a fancy way to send IPIs.
+ * Back when x86 used an explicit TLB flush IPI, UV was
+ * optimized to use its own mechanism. These days, x86 uses
+ * smp_call_function_many(), but UV still uses a manual IPI,
+ * and that IPI's action is out of date -- it does a manual
+ * flush instead of calling flush_tlb_func_remote(). This
+ * means that the percpu tlb_gen variables won't be updated
+ * and we'll do pointless flushes on future context switches.
+ *
+ * Rather than hooking native_flush_tlb_others() here, I think
+ * that UV should be updated so that smp_call_function_many(),
+ * etc, are optimal on UV.
+ */
unsigned int cpu;
cpu = smp_processor_id();
@@ -250,8 +457,8 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
cpu = get_cpu();
- /* Synchronize with switch_mm. */
- smp_mb();
+ /* This is also a barrier that synchronizes with switch_mm(). */
+ info.new_tlb_gen = inc_mm_tlb_gen(mm);
/* Should we flush just the requested range? */
if ((end != TLB_FLUSH_ALL) &&
@@ -273,6 +480,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), &info);
+
put_cpu();
}
@@ -281,8 +489,6 @@ static void do_flush_tlb_all(void *info)
{
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
__flush_tlb_all();
- if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
- leave_mm(smp_processor_id());
}
void flush_tlb_all(void)
@@ -335,6 +541,7 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
flush_tlb_others(&batch->cpumask, &info);
+
cpumask_clear(&batch->cpumask);
put_cpu();
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index e1324f280e06..8c9573660d51 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -94,7 +94,9 @@ static int bpf_size_to_x86_bytes(int bpf_size)
#define X86_JNE 0x75
#define X86_JBE 0x76
#define X86_JA 0x77
+#define X86_JL 0x7C
#define X86_JGE 0x7D
+#define X86_JLE 0x7E
#define X86_JG 0x7F
static void bpf_flush_icache(void *start, void *end)
@@ -285,7 +287,7 @@ static void emit_bpf_tail_call(u8 **pprog)
EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
offsetof(struct bpf_array, map.max_entries));
EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
-#define OFFSET1 47 /* number of bytes to jump */
+#define OFFSET1 43 /* number of bytes to jump */
EMIT2(X86_JBE, OFFSET1); /* jbe out */
label1 = cnt;
@@ -294,21 +296,20 @@ static void emit_bpf_tail_call(u8 **pprog)
*/
EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
-#define OFFSET2 36
+#define OFFSET2 32
EMIT2(X86_JA, OFFSET2); /* ja out */
label2 = cnt;
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
EMIT2_off32(0x89, 0x85, 36); /* mov dword ptr [rbp + 36], eax */
/* prog = array->ptrs[index]; */
- EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
+ EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
offsetof(struct bpf_array, ptrs));
- EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
/* if (prog == NULL)
* goto out;
*/
- EMIT4(0x48, 0x83, 0xF8, 0x00); /* cmp rax, 0 */
+ EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
#define OFFSET3 10
EMIT2(X86_JE, OFFSET3); /* je out */
label3 = cnt;
@@ -888,9 +889,13 @@ xadd: if (is_imm8(insn->off))
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
/* cmp dst_reg, src_reg */
EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
add_2reg(0xC0, dst_reg, src_reg));
@@ -911,9 +916,13 @@ xadd: if (is_imm8(insn->off))
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_K:
/* cmp dst_reg, imm8/32 */
EMIT1(add_1mod(0x48, dst_reg));
@@ -935,18 +944,34 @@ emit_cond_jmp: /* convert BPF opcode to x86 */
/* GT is unsigned '>', JA in x86 */
jmp_cond = X86_JA;
break;
+ case BPF_JLT:
+ /* LT is unsigned '<', JB in x86 */
+ jmp_cond = X86_JB;
+ break;
case BPF_JGE:
/* GE is unsigned '>=', JAE in x86 */
jmp_cond = X86_JAE;
break;
+ case BPF_JLE:
+ /* LE is unsigned '<=', JBE in x86 */
+ jmp_cond = X86_JBE;
+ break;
case BPF_JSGT:
/* signed '>', GT in x86 */
jmp_cond = X86_JG;
break;
+ case BPF_JSLT:
+ /* signed '<', LT in x86 */
+ jmp_cond = X86_JL;
+ break;
case BPF_JSGE:
/* signed '>=', GE in x86 */
jmp_cond = X86_JGE;
break;
+ case BPF_JSLE:
+ /* signed '<=', LE in x86 */
+ jmp_cond = X86_JLE;
+ break;
default: /* to silence gcc warning */
return -EFAULT;
}
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index dbe2132b0ed4..7a5350d08cef 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -674,7 +674,7 @@ int pcibios_add_device(struct pci_dev *dev)
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
- data = ioremap(pa_data, sizeof(*rom));
+ data = memremap(pa_data, sizeof(*rom), MEMREMAP_WB);
if (!data)
return -ENOMEM;
@@ -693,7 +693,7 @@ int pcibios_add_device(struct pci_dev *dev)
}
}
pa_data = data->next;
- iounmap(data);
+ memunmap(data);
}
set_dma_domain_ops(dev);
set_dev_domain_options(dev);
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 5a18aedcb341..b901ece278dd 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -215,16 +215,23 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
struct irq_alloc_info info;
int polarity;
int ret;
+ u8 gsi;
if (dev->irq_managed && dev->irq > 0)
return 0;
+ ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
+ if (ret < 0) {
+ dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret);
+ return ret;
+ }
+
switch (intel_mid_identify_cpu()) {
case INTEL_MID_CPU_CHIP_TANGIER:
polarity = IOAPIC_POL_HIGH;
/* Special treatment for IRQ0 */
- if (dev->irq == 0) {
+ if (gsi == 0) {
/*
* Skip HS UART common registers device since it has
* IRQ0 assigned and not used by the kernel.
@@ -253,10 +260,11 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
* IOAPIC RTE entries, so we just enable RTE for the device.
*/
- ret = mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC, &info);
+ ret = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
if (ret < 0)
return ret;
+ dev->irq = ret;
dev->irq_managed = 1;
return 0;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index f084d8718ac4..928b6dceeca0 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -1032,25 +1032,6 @@ void __init efi_enter_virtual_mode(void)
efi_dump_pagetable();
}
-/*
- * Convenience functions to obtain memory types and attributes
- */
-u32 efi_mem_type(unsigned long phys_addr)
-{
- efi_memory_desc_t *md;
-
- if (!efi_enabled(EFI_MEMMAP))
- return 0;
-
- for_each_efi_memory_desc(md) {
- if ((md->phys_addr <= phys_addr) &&
- (phys_addr < (md->phys_addr +
- (md->num_pages << EFI_PAGE_SHIFT))))
- return md->type;
- }
- return 0;
-}
-
static int __init arch_parse_efi_cmdline(char *str)
{
if (!str) {
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 9bf72f5bfedb..12e83888e5b9 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -327,7 +327,7 @@ virt_to_phys_or_null_size(void *va, unsigned long size)
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
- unsigned long pfn, text;
+ unsigned long pfn, text, pf;
struct page *page;
unsigned npages;
pgd_t *pgd;
@@ -335,7 +335,12 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
if (efi_enabled(EFI_OLD_MEMMAP))
return 0;
- efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd);
+ /*
+ * Since the PGD is encrypted, set the encryption mask so that when
+ * this value is loaded into cr3 the PGD will be decrypted during
+ * the pagetable walk.
+ */
+ efi_scratch.efi_pgt = (pgd_t *)__sme_pa(efi_pgd);
pgd = efi_pgd;
/*
@@ -345,7 +350,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
* phys_efi_set_virtual_address_map().
*/
pfn = pa_memmap >> PAGE_SHIFT;
- if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX | _PAGE_RW)) {
+ pf = _PAGE_NX | _PAGE_RW | _PAGE_ENC;
+ if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
return 1;
}
@@ -388,7 +394,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
text = __pa(_text);
pfn = text >> PAGE_SHIFT;
- if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, _PAGE_RW)) {
+ pf = _PAGE_RW | _PAGE_ENC;
+ if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) {
pr_err("Failed to map kernel text 1:1\n");
return 1;
}
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
index 5a0483e7bf66..dc036e511f48 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
@@ -60,7 +60,7 @@ static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata)
return 0;
}
-static struct bt_sfi_data tng_bt_sfi_data __initdata = {
+static const struct bt_sfi_data tng_bt_sfi_data __initdata = {
.setup = tng_bt_sfi_setup,
};
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
index 6e075afa7877..58337b2bc682 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
@@ -38,8 +38,10 @@ static void __init *max7315_platform_data(void *info)
*/
strcpy(i2c_info->type, "max7315");
if (nr++) {
- sprintf(base_pin_name, "max7315_%d_base", nr);
- sprintf(intr_pin_name, "max7315_%d_int", nr);
+ snprintf(base_pin_name, sizeof(base_pin_name),
+ "max7315_%d_base", nr);
+ snprintf(intr_pin_name, sizeof(intr_pin_name),
+ "max7315_%d_int", nr);
} else {
strcpy(base_pin_name, "max7315_base");
strcpy(intr_pin_name, "max7315_int");
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
index 9e304e2ea4f5..4f5fa65a1011 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
@@ -30,13 +30,13 @@ static int tangier_probe(struct platform_device *pdev)
{
struct irq_alloc_info info;
struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
- int gsi, irq;
+ int gsi = TANGIER_EXT_TIMER0_MSI;
+ int irq;
if (!pdata)
return -EINVAL;
/* IOAPIC builds identity mapping between GSI and IRQ on MID */
- gsi = pdata->irq;
ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0);
irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
if (irq < 0) {
@@ -44,11 +44,11 @@ static int tangier_probe(struct platform_device *pdev)
return irq;
}
+ pdata->irq = irq;
return 0;
}
static struct intel_mid_wdt_pdata tangier_pdata = {
- .irq = TANGIER_EXT_TIMER0_MSI,
.probe = tangier_probe,
};
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index 12a272582cdc..86676cec99a1 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -183,6 +183,7 @@ void __init x86_intel_mid_early_setup(void)
x86_init.timers.timer_init = intel_mid_time_init;
x86_init.timers.setup_percpu_clockev = x86_init_noop;
+ x86_init.timers.wallclock_init = intel_mid_rtc_init;
x86_init.irqs.pre_vector_init = x86_init_noop;
@@ -191,7 +192,6 @@ void __init x86_intel_mid_early_setup(void)
x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
x86_platform.calibrate_tsc = intel_mid_calibrate_tsc;
- x86_init.timers.wallclock_init = intel_mid_rtc_init;
x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
x86_init.pci.init = intel_mid_pci_init;
diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c
index ef03852ea6e8..49ec5b94c71f 100644
--- a/arch/x86/platform/intel-mid/pwr.c
+++ b/arch/x86/platform/intel-mid/pwr.c
@@ -444,7 +444,7 @@ static int mid_set_initial_state(struct mid_pwr *pwr, const u32 *states)
static int pnw_set_initial_state(struct mid_pwr *pwr)
{
/* On Penwell SRAM must stay powered on */
- const u32 states[] = {
+ static const u32 states[] = {
0xf00fffff, /* PM_SSC(0) */
0xffffffff, /* PM_SSC(1) */
0xffffffff, /* PM_SSC(2) */
@@ -455,7 +455,7 @@ static int pnw_set_initial_state(struct mid_pwr *pwr)
static int tng_set_initial_state(struct mid_pwr *pwr)
{
- const u32 states[] = {
+ static const u32 states[] = {
0xffffffff, /* PM_SSC(0) */
0xffffffff, /* PM_SSC(1) */
0xffffffff, /* PM_SSC(2) */
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index d4a61ddf9e62..f44c0bc95aa2 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -26,7 +26,7 @@
static struct bau_operations ops __ro_after_init;
/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
-static int timeout_base_ns[] = {
+static const int timeout_base_ns[] = {
20,
160,
1280,
@@ -40,7 +40,6 @@ static int timeout_base_ns[] = {
static int timeout_us;
static bool nobau = true;
static int nobau_perm;
-static cycles_t congested_cycles;
/* tunables: */
static int max_concurr = MAX_BAU_CONCURRENT;
@@ -829,10 +828,10 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
bcp->period_requests++;
bcp->period_time += elapsed;
- if ((elapsed > congested_cycles) &&
+ if ((elapsed > usec_2_cycles(bcp->cong_response_us)) &&
(bcp->period_requests > bcp->cong_reps) &&
((bcp->period_time / bcp->period_requests) >
- congested_cycles)) {
+ usec_2_cycles(bcp->cong_response_us))) {
stat->s_congested++;
disable_for_period(bcp, stat);
}
@@ -1217,7 +1216,7 @@ static struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
* set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
* Such a message must be ignored.
*/
-void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
+static void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
{
unsigned long mmr_image;
unsigned char swack_vec;
@@ -2222,14 +2221,17 @@ static int __init uv_bau_init(void)
else if (is_uv1_hub())
ops = uv1_bau_ops;
+ nuvhubs = uv_num_possible_blades();
+ if (nuvhubs < 2) {
+ pr_crit("UV: BAU disabled - insufficient hub count\n");
+ goto err_bau_disable;
+ }
+
for_each_possible_cpu(cur_cpu) {
mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
}
- nuvhubs = uv_num_possible_blades();
- congested_cycles = usec_2_cycles(congested_respns_us);
-
uv_base_pnode = 0x7fffffff;
for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
cpus = uv_blade_nr_possible_cpus(uvhub);
@@ -2242,9 +2244,8 @@ static int __init uv_bau_init(void)
enable_timeouts();
if (init_per_cpu(nuvhubs, uv_base_pnode)) {
- set_bau_off();
- nobau_perm = 1;
- return 0;
+ pr_crit("UV: BAU disabled - per CPU init failed\n");
+ goto err_bau_disable;
}
vector = UV_BAU_MESSAGE;
@@ -2270,6 +2271,16 @@ static int __init uv_bau_init(void)
}
return 0;
+
+err_bau_disable:
+
+ for_each_possible_cpu(cur_cpu)
+ free_cpumask_var(per_cpu(uv_flush_tlb_mask, cur_cpu));
+
+ set_bau_off();
+ nobau_perm = 1;
+
+ return -EINVAL;
}
core_initcall(uv_bau_init);
fs_initcall(uv_ptc_init);
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 78459a6d455a..4d68d59f457d 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -181,6 +181,7 @@ static void fix_processor_context(void)
#endif
load_TR_desc(); /* This does ltr */
load_mm_ldt(current->active_mm); /* This does lldt */
+ initialize_tlbstate_and_flush();
fpu__resume_cpu();
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index cd4be19c36dc..1f71980fc5e0 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -1,6 +1,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/memblock.h>
+#include <linux/mem_encrypt.h>
#include <asm/set_memory.h>
#include <asm/pgtable.h>
@@ -59,6 +60,13 @@ static void __init setup_real_mode(void)
base = (unsigned char *)real_mode_header;
+ /*
+ * If SME is active, the trampoline area will need to be in
+ * decrypted memory in order to bring up other processors
+ * successfully.
+ */
+ set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT);
+
memcpy(base, real_mode_blob, size);
phys_base = __pa(base);
@@ -100,6 +108,10 @@ static void __init setup_real_mode(void)
trampoline_cr4_features = &trampoline_header->cr4;
*trampoline_cr4_features = mmu_cr4_features;
+ trampoline_header->flags = 0;
+ if (sme_active())
+ trampoline_header->flags |= TH_FLAGS_SME_ACTIVE;
+
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
trampoline_pgd[0] = trampoline_pgd_entry.pgd;
trampoline_pgd[511] = init_top_pgt[511].pgd;
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
index dac7b20d2f9d..614fd7064d0a 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -30,6 +30,7 @@
#include <asm/msr.h>
#include <asm/segment.h>
#include <asm/processor-flags.h>
+#include <asm/realmode.h>
#include "realmode.h"
.text
@@ -92,6 +93,28 @@ ENTRY(startup_32)
movl %edx, %fs
movl %edx, %gs
+ /*
+ * Check for memory encryption support. This is a safety net in
+ * case BIOS hasn't done the necessary step of setting the bit in
+ * the MSR for this AP. If SME is active and we've gotten this far
+ * then it is safe for us to set the MSR bit and continue. If we
+ * don't we'll eventually crash trying to execute encrypted
+ * instructions.
+ */
+ bt $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
+ jnc .Ldone
+ movl $MSR_K8_SYSCFG, %ecx
+ rdmsr
+ bts $MSR_K8_SYSCFG_MEM_ENCRYPT_BIT, %eax
+ jc .Ldone
+
+ /*
+ * Memory encryption is enabled but the SME enable bit for this
+ * CPU has has not been set. It is safe to set it, so do so.
+ */
+ wrmsr
+.Ldone:
+
movl pa_tr_cr4, %eax
movl %eax, %cr4 # Enable PAE mode
@@ -147,6 +170,7 @@ GLOBAL(trampoline_header)
tr_start: .space 8
GLOBAL(tr_efer) .space 8
GLOBAL(tr_cr4) .space 4
+ GLOBAL(tr_flags) .space 4
END(trampoline_header)
#include "trampoline_common.S"
diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c
index ae4cd58c0c7a..02250b2633b8 100644
--- a/arch/x86/um/user-offsets.c
+++ b/arch/x86/um/user-offsets.c
@@ -50,7 +50,7 @@ void foo(void)
DEFINE(HOST_GS, GS);
DEFINE(HOST_ORIG_AX, ORIG_EAX);
#else
-#if defined(PTRACE_GETREGSET) && defined(PTRACE_SETREGSET)
+#ifdef FP_XSTATE_MAGIC1
DEFINE(HOST_FP_SIZE, sizeof(struct _xstate) / sizeof(unsigned long));
#else
DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long));
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 027987638e98..1ecd419811a2 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -17,6 +17,9 @@ config XEN_PV
bool "Xen PV guest support"
default y
depends on XEN
+ # XEN_PV is not ready to work with 5-level paging.
+ # Changes to hypervisor are also required.
+ depends on !X86_5LEVEL
select XEN_HAVE_PVMMU
select XEN_HAVE_VPMU
help
@@ -75,4 +78,6 @@ config XEN_DEBUG_FS
config XEN_PVH
bool "Support for running as a PVH guest"
depends on XEN && XEN_PVHVM && ACPI
+ # Pre-built page tables are not ready to handle 5-level paging.
+ depends on !X86_5LEVEL
def_bool n
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index 87d791356ea9..de503c225ae1 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -12,6 +12,7 @@
#include <asm/setup.h>
#include <asm/hypervisor.h>
#include <asm/e820/api.h>
+#include <asm/early_ioremap.h>
#include <asm/xen/cpuid.h>
#include <asm/xen/hypervisor.h>
@@ -21,38 +22,50 @@
#include "mmu.h"
#include "smp.h"
-void __ref xen_hvm_init_shared_info(void)
+static unsigned long shared_info_pfn;
+
+void xen_hvm_init_shared_info(void)
{
struct xen_add_to_physmap xatp;
- u64 pa;
-
- if (HYPERVISOR_shared_info == &xen_dummy_shared_info) {
- /*
- * Search for a free page starting at 4kB physical address.
- * Low memory is preferred to avoid an EPT large page split up
- * by the mapping.
- * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
- * the BIOS used for HVM guests is well behaved and won't
- * clobber memory other than the first 4kB.
- */
- for (pa = PAGE_SIZE;
- !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
- memblock_is_reserved(pa);
- pa += PAGE_SIZE)
- ;
-
- memblock_reserve(pa, PAGE_SIZE);
- HYPERVISOR_shared_info = __va(pa);
- }
xatp.domid = DOMID_SELF;
xatp.idx = 0;
xatp.space = XENMAPSPACE_shared_info;
- xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
+ xatp.gpfn = shared_info_pfn;
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
BUG();
}
+static void __init reserve_shared_info(void)
+{
+ u64 pa;
+
+ /*
+ * Search for a free page starting at 4kB physical address.
+ * Low memory is preferred to avoid an EPT large page split up
+ * by the mapping.
+ * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
+ * the BIOS used for HVM guests is well behaved and won't
+ * clobber memory other than the first 4kB.
+ */
+ for (pa = PAGE_SIZE;
+ !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
+ memblock_is_reserved(pa);
+ pa += PAGE_SIZE)
+ ;
+
+ shared_info_pfn = PHYS_PFN(pa);
+
+ memblock_reserve(pa, PAGE_SIZE);
+ HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE);
+}
+
+static void __init xen_hvm_init_mem_mapping(void)
+{
+ early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
+ HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
+}
+
static void __init init_hvm_pv_info(void)
{
int major, minor;
@@ -153,6 +166,7 @@ static void __init xen_hvm_guest_init(void)
init_hvm_pv_info();
+ reserve_shared_info();
xen_hvm_init_shared_info();
/*
@@ -218,5 +232,6 @@ const struct hypervisor_x86 x86_hyper_xen_hvm = {
.init_platform = xen_hvm_guest_init,
.pin_vcpu = xen_pin_vcpu,
.x2apic_available = xen_x2apic_para_available,
+ .init_mem_mapping = xen_hvm_init_mem_mapping,
};
EXPORT_SYMBOL(x86_hyper_xen_hvm);
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 811e4ddb3f37..ae2a2e2d6362 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -263,6 +263,13 @@ static void __init xen_init_capabilities(void)
setup_clear_cpu_cap(X86_FEATURE_MTRR);
setup_clear_cpu_cap(X86_FEATURE_ACC);
setup_clear_cpu_cap(X86_FEATURE_X2APIC);
+ setup_clear_cpu_cap(X86_FEATURE_SME);
+
+ /*
+ * Xen PV would need some work to support PCID: CR3 handling as well
+ * as xen_flush_tlb_others() would need updating.
+ */
+ setup_clear_cpu_cap(X86_FEATURE_PCID);
if (!xen_initial_domain())
setup_clear_cpu_cap(X86_FEATURE_ACPI);
@@ -494,7 +501,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
static inline bool desc_equal(const struct desc_struct *d1,
const struct desc_struct *d2)
{
- return d1->a == d2->a && d1->b == d2->b;
+ return !memcmp(d1, d2, sizeof(*d1));
}
static void load_TLS_descriptor(struct thread_struct *t,
@@ -579,59 +586,91 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
preempt_enable();
}
+#ifdef CONFIG_X86_64
+struct trap_array_entry {
+ void (*orig)(void);
+ void (*xen)(void);
+ bool ist_okay;
+};
+
+static struct trap_array_entry trap_array[] = {
+ { debug, xen_xendebug, true },
+ { int3, xen_xenint3, true },
+ { double_fault, xen_double_fault, true },
+#ifdef CONFIG_X86_MCE
+ { machine_check, xen_machine_check, true },
+#endif
+ { nmi, xen_nmi, true },
+ { overflow, xen_overflow, false },
+#ifdef CONFIG_IA32_EMULATION
+ { entry_INT80_compat, xen_entry_INT80_compat, false },
+#endif
+ { page_fault, xen_page_fault, false },
+ { divide_error, xen_divide_error, false },
+ { bounds, xen_bounds, false },
+ { invalid_op, xen_invalid_op, false },
+ { device_not_available, xen_device_not_available, false },
+ { coprocessor_segment_overrun, xen_coprocessor_segment_overrun, false },
+ { invalid_TSS, xen_invalid_TSS, false },
+ { segment_not_present, xen_segment_not_present, false },
+ { stack_segment, xen_stack_segment, false },
+ { general_protection, xen_general_protection, false },
+ { spurious_interrupt_bug, xen_spurious_interrupt_bug, false },
+ { coprocessor_error, xen_coprocessor_error, false },
+ { alignment_check, xen_alignment_check, false },
+ { simd_coprocessor_error, xen_simd_coprocessor_error, false },
+};
+
+static bool get_trap_addr(void **addr, unsigned int ist)
+{
+ unsigned int nr;
+ bool ist_okay = false;
+
+ /*
+ * Replace trap handler addresses by Xen specific ones.
+ * Check for known traps using IST and whitelist them.
+ * The debugger ones are the only ones we care about.
+ * Xen will handle faults like double_fault, * so we should never see
+ * them. Warn if there's an unexpected IST-using fault handler.
+ */
+ for (nr = 0; nr < ARRAY_SIZE(trap_array); nr++) {
+ struct trap_array_entry *entry = trap_array + nr;
+
+ if (*addr == entry->orig) {
+ *addr = entry->xen;
+ ist_okay = entry->ist_okay;
+ break;
+ }
+ }
+
+ if (WARN_ON(ist != 0 && !ist_okay))
+ return false;
+
+ return true;
+}
+#endif
+
static int cvt_gate_to_trap(int vector, const gate_desc *val,
struct trap_info *info)
{
unsigned long addr;
- if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
+ if (val->bits.type != GATE_TRAP && val->bits.type != GATE_INTERRUPT)
return 0;
info->vector = vector;
- addr = gate_offset(*val);
+ addr = gate_offset(val);
#ifdef CONFIG_X86_64
- /*
- * Look for known traps using IST, and substitute them
- * appropriately. The debugger ones are the only ones we care
- * about. Xen will handle faults like double_fault,
- * so we should never see them. Warn if
- * there's an unexpected IST-using fault handler.
- */
- if (addr == (unsigned long)debug)
- addr = (unsigned long)xen_debug;
- else if (addr == (unsigned long)int3)
- addr = (unsigned long)xen_int3;
- else if (addr == (unsigned long)stack_segment)
- addr = (unsigned long)xen_stack_segment;
- else if (addr == (unsigned long)double_fault) {
- /* Don't need to handle these */
+ if (!get_trap_addr((void **)&addr, val->bits.ist))
return 0;
-#ifdef CONFIG_X86_MCE
- } else if (addr == (unsigned long)machine_check) {
- /*
- * when xen hypervisor inject vMCE to guest,
- * use native mce handler to handle it
- */
- ;
-#endif
- } else if (addr == (unsigned long)nmi)
- /*
- * Use the native version as well.
- */
- ;
- else {
- /* Some other trap using IST? */
- if (WARN_ON(val->ist != 0))
- return 0;
- }
#endif /* CONFIG_X86_64 */
info->address = addr;
- info->cs = gate_segment(*val);
- info->flags = val->dpl;
+ info->cs = gate_segment(val);
+ info->flags = val->bits.dpl;
/* interrupt gates clear IF */
- if (val->type == GATE_INTERRUPT)
+ if (val->bits.type == GATE_INTERRUPT)
info->flags |= 1 << 2;
return 1;
@@ -981,59 +1020,6 @@ void __ref xen_setup_vcpu_info_placement(void)
}
}
-static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
- unsigned long addr, unsigned len)
-{
- char *start, *end, *reloc;
- unsigned ret;
-
- start = end = reloc = NULL;
-
-#define SITE(op, x) \
- case PARAVIRT_PATCH(op.x): \
- if (xen_have_vcpu_info_placement) { \
- start = (char *)xen_##x##_direct; \
- end = xen_##x##_direct_end; \
- reloc = xen_##x##_direct_reloc; \
- } \
- goto patch_site
-
- switch (type) {
- SITE(pv_irq_ops, irq_enable);
- SITE(pv_irq_ops, irq_disable);
- SITE(pv_irq_ops, save_fl);
- SITE(pv_irq_ops, restore_fl);
-#undef SITE
-
- patch_site:
- if (start == NULL || (end-start) > len)
- goto default_patch;
-
- ret = paravirt_patch_insns(insnbuf, len, start, end);
-
- /* Note: because reloc is assigned from something that
- appears to be an array, gcc assumes it's non-null,
- but doesn't know its relationship with start and
- end. */
- if (reloc > start && reloc < end) {
- int reloc_off = reloc - start;
- long *relocp = (long *)(insnbuf + reloc_off);
- long delta = start - (char *)addr;
-
- *relocp += delta;
- }
- break;
-
- default_patch:
- default:
- ret = paravirt_patch_default(type, clobbers, insnbuf,
- addr, len);
- break;
- }
-
- return ret;
-}
-
static const struct pv_info xen_info __initconst = {
.shared_kernel_pmd = 0,
@@ -1043,10 +1029,6 @@ static const struct pv_info xen_info __initconst = {
.name = "Xen",
};
-static const struct pv_init_ops xen_init_ops __initconst = {
- .patch = xen_patch,
-};
-
static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.cpuid = xen_cpuid,
@@ -1244,7 +1226,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
/* Install Xen paravirt ops */
pv_info = xen_info;
- pv_init_ops = xen_init_ops;
+ pv_init_ops.patch = paravirt_patch_default;
pv_cpu_ops = xen_cpu_ops;
x86_platform.get_nmi_reason = xen_get_nmi_reason;
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 33e92955e09d..d4eff5676cfa 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -123,9 +123,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = {
.safe_halt = xen_safe_halt,
.halt = xen_halt,
-#ifdef CONFIG_X86_64
- .adjust_exception_frame = xen_adjust_exception_frame,
-#endif
};
void __init xen_init_irq_ops(void)
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3be06f3caf3c..3e15345abfe7 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -84,7 +84,7 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
else
rmd->mfn++;
- rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
+ rmd->mmu_update->ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
rmd->mmu_update->val = pte_val_ma(pte);
rmd->mmu_update++;
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index cab28cf2cffb..6b983b300666 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -162,26 +162,6 @@ static bool xen_page_pinned(void *ptr)
return PagePinned(page);
}
-void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
-{
- struct multicall_space mcs;
- struct mmu_update *u;
-
- trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
-
- mcs = xen_mc_entry(sizeof(*u));
- u = mcs.args;
-
- /* ptep might be kmapped when using 32-bit HIGHPTE */
- u->ptr = virt_to_machine(ptep).maddr;
- u->val = pte_val_ma(pteval);
-
- MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
-
- xen_mc_issue(PARAVIRT_LAZY_MMU);
-}
-EXPORT_SYMBOL_GPL(xen_set_domain_pte);
-
static void xen_extend_mmu_update(const struct mmu_update *update)
{
struct multicall_space mcs;
@@ -1005,14 +985,12 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
/* Get the "official" set of cpus referring to our pagetable. */
if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
for_each_online_cpu(cpu) {
- if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
- && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
+ if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
continue;
smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
}
return;
}
- cpumask_copy(mask, mm_cpumask(mm));
/*
* It's possible that a vcpu may have a stale reference to our
@@ -1021,6 +999,7 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
* look at its actual current cr3 value, and force it to flush
* if needed.
*/
+ cpumask_clear(mask);
for_each_online_cpu(cpu) {
if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
cpumask_set_cpu(cpu, mask);
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 276da636dd39..6083ba462f35 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -212,8 +212,7 @@ void __ref xen_build_mfn_list_list(void)
unsigned int level, topidx, mididx;
unsigned long *mid_mfn_p;
- if (xen_feature(XENFEAT_auto_translated_physmap) ||
- xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
+ if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
return;
/* Pre-initialize p2m_top_mfn to be completely missing */
@@ -269,9 +268,6 @@ void __ref xen_build_mfn_list_list(void)
void xen_setup_mfn_list_list(void)
{
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return;
-
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
@@ -291,9 +287,6 @@ void __init xen_build_dynamic_phys_to_machine(void)
{
unsigned long pfn;
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return;
-
xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE);
@@ -540,9 +533,6 @@ int xen_alloc_p2m_entry(unsigned long pfn)
unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
unsigned long p2m_pfn;
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return 0;
-
ptep = lookup_address(addr, &level);
BUG_ON(!ptep || level != PG_LEVEL_4K);
pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
@@ -640,9 +630,6 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
if (unlikely(pfn_s >= xen_p2m_size))
return 0;
- if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
- return pfn_e - pfn_s;
-
if (pfn_s > pfn_e)
return 0;
@@ -660,10 +647,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
pte_t *ptep;
unsigned int level;
- /* don't track P2M changes in autotranslate guests */
- if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
- return true;
-
if (unlikely(pfn >= xen_p2m_size)) {
BUG_ON(mfn != INVALID_P2M_ENTRY);
return true;
@@ -711,9 +694,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
int i, ret = 0;
pte_t *pte;
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return 0;
-
if (kmap_ops) {
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
kmap_ops, count);
@@ -756,9 +736,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
{
int i, ret = 0;
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return 0;
-
for (i = 0; i < count; i++) {
unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
unsigned long pfn = page_to_pfn(pages[i]);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index c81046323ebc..ac55c02f98e9 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -340,8 +340,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
WARN_ON(size == 0);
- BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
-
mfn_save = virt_to_mfn(buf);
for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
@@ -1024,8 +1022,7 @@ void __init xen_pvmmu_arch_setup(void)
void __init xen_arch_setup(void)
{
xen_panic_handler_init();
- if (!xen_feature(XENFEAT_auto_translated_physmap))
- xen_pvmmu_arch_setup();
+ xen_pvmmu_arch_setup();
#ifdef CONFIG_ACPI
if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 1ea598e5f030..51471408fdd1 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -19,6 +19,7 @@
#include <linux/irq_work.h>
#include <linux/tick.h>
#include <linux/nmi.h>
+#include <linux/cpuhotplug.h>
#include <asm/paravirt.h>
#include <asm/desc.h>
@@ -413,7 +414,7 @@ static void xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */
*/
tick_nohz_idle_enter();
- cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+ cpuhp_online_idle(CPUHP_AP_ONLINE_IDLE);
}
#else /* !CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index a1895a8e85c1..1ecb05db3632 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -309,7 +309,6 @@ static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
void xen_teardown_timer(int cpu)
{
struct clock_event_device *evt;
- BUG_ON(cpu == 0);
evt = &per_cpu(xen_clock_events, cpu).evt;
if (evt->irq >= 0) {
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index eff224df813f..dcd31fa39b5d 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -1,14 +1,8 @@
/*
- * Asm versions of Xen pv-ops, suitable for either direct use or
- * inlining. The inline versions are the same as the direct-use
- * versions, with the pre- and post-amble chopped off.
- *
- * This code is encoded for size rather than absolute efficiency, with
- * a view to being able to inline as much as possible.
+ * Asm versions of Xen pv-ops, suitable for direct use.
*
* We only bother with direct forms (ie, vcpu in percpu data) of the
- * operations here; the indirect forms are better handled in C, since
- * they're generally too large to inline anyway.
+ * operations here; the indirect forms are better handled in C.
*/
#include <asm/asm-offsets.h>
@@ -16,7 +10,7 @@
#include <asm/processor-flags.h>
#include <asm/frame.h>
-#include "xen-asm.h"
+#include <linux/linkage.h>
/*
* Enable events. This clears the event mask and tests the pending
@@ -38,13 +32,11 @@ ENTRY(xen_irq_enable_direct)
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
jz 1f
-2: call check_events
+ call check_events
1:
-ENDPATCH(xen_irq_enable_direct)
FRAME_END
ret
ENDPROC(xen_irq_enable_direct)
- RELOC(xen_irq_enable_direct, 2b+1)
/*
@@ -53,10 +45,8 @@ ENDPATCH(xen_irq_enable_direct)
*/
ENTRY(xen_irq_disable_direct)
movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
-ENDPATCH(xen_irq_disable_direct)
ret
- ENDPROC(xen_irq_disable_direct)
- RELOC(xen_irq_disable_direct, 0)
+ENDPROC(xen_irq_disable_direct)
/*
* (xen_)save_fl is used to get the current interrupt enable status.
@@ -71,10 +61,8 @@ ENTRY(xen_save_fl_direct)
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
setz %ah
addb %ah, %ah
-ENDPATCH(xen_save_fl_direct)
ret
ENDPROC(xen_save_fl_direct)
- RELOC(xen_save_fl_direct, 0)
/*
@@ -101,13 +89,11 @@ ENTRY(xen_restore_fl_direct)
/* check for unmasked and pending */
cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
jnz 1f
-2: call check_events
+ call check_events
1:
-ENDPATCH(xen_restore_fl_direct)
FRAME_END
ret
ENDPROC(xen_restore_fl_direct)
- RELOC(xen_restore_fl_direct, 2b+1)
/*
diff --git a/arch/x86/xen/xen-asm.h b/arch/x86/xen/xen-asm.h
deleted file mode 100644
index 465276467a47..000000000000
--- a/arch/x86/xen/xen-asm.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _XEN_XEN_ASM_H
-#define _XEN_XEN_ASM_H
-
-#include <linux/linkage.h>
-
-#define RELOC(x, v) .globl x##_reloc; x##_reloc=v
-#define ENDPATCH(x) .globl x##_end; x##_end=.
-
-/* Pseudo-flag used for virtual NMI, which we don't implement yet */
-#define XEN_EFLAGS_NMI 0x80000000
-
-#endif
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index feb6d40a0860..1200e262a116 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -1,14 +1,8 @@
/*
- * Asm versions of Xen pv-ops, suitable for either direct use or
- * inlining. The inline versions are the same as the direct-use
- * versions, with the pre- and post-amble chopped off.
- *
- * This code is encoded for size rather than absolute efficiency, with
- * a view to being able to inline as much as possible.
+ * Asm versions of Xen pv-ops, suitable for direct use.
*
* We only bother with direct forms (ie, vcpu in pda) of the
- * operations here; the indirect forms are better handled in C, since
- * they're generally too large to inline anyway.
+ * operations here; the indirect forms are better handled in C.
*/
#include <asm/thread_info.h>
@@ -18,21 +12,10 @@
#include <xen/interface/xen.h>
-#include "xen-asm.h"
+#include <linux/linkage.h>
-/*
- * Force an event check by making a hypercall, but preserve regs
- * before making the call.
- */
-check_events:
- push %eax
- push %ecx
- push %edx
- call xen_force_evtchn_callback
- pop %edx
- pop %ecx
- pop %eax
- ret
+/* Pseudo-flag used for virtual NMI, which we don't implement yet */
+#define XEN_EFLAGS_NMI 0x80000000
/*
* This is run where a normal iret would be run, with the same stack setup:
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index c3df43141e70..dae2cc33afb5 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -1,14 +1,8 @@
/*
- * Asm versions of Xen pv-ops, suitable for either direct use or
- * inlining. The inline versions are the same as the direct-use
- * versions, with the pre- and post-amble chopped off.
- *
- * This code is encoded for size rather than absolute efficiency, with
- * a view to being able to inline as much as possible.
+ * Asm versions of Xen pv-ops, suitable for direct use.
*
* We only bother with direct forms (ie, vcpu in pda) of the
- * operations here; the indirect forms are better handled in C, since
- * they're generally too large to inline anyway.
+ * operations here; the indirect forms are better handled in C.
*/
#include <asm/errno.h>
@@ -20,13 +14,44 @@
#include <xen/interface/xen.h>
-#include "xen-asm.h"
+#include <linux/linkage.h>
+
+.macro xen_pv_trap name
+ENTRY(xen_\name)
+ pop %rcx
+ pop %r11
+ jmp \name
+END(xen_\name)
+.endm
-ENTRY(xen_adjust_exception_frame)
- mov 8+0(%rsp), %rcx
- mov 8+8(%rsp), %r11
- ret $16
-ENDPROC(xen_adjust_exception_frame)
+xen_pv_trap divide_error
+xen_pv_trap debug
+xen_pv_trap xendebug
+xen_pv_trap int3
+xen_pv_trap xenint3
+xen_pv_trap nmi
+xen_pv_trap overflow
+xen_pv_trap bounds
+xen_pv_trap invalid_op
+xen_pv_trap device_not_available
+xen_pv_trap double_fault
+xen_pv_trap coprocessor_segment_overrun
+xen_pv_trap invalid_TSS
+xen_pv_trap segment_not_present
+xen_pv_trap stack_segment
+xen_pv_trap general_protection
+xen_pv_trap page_fault
+xen_pv_trap spurious_interrupt_bug
+xen_pv_trap coprocessor_error
+xen_pv_trap alignment_check
+#ifdef CONFIG_X86_MCE
+xen_pv_trap machine_check
+#endif /* CONFIG_X86_MCE */
+xen_pv_trap simd_coprocessor_error
+#ifdef CONFIG_IA32_EMULATION
+xen_pv_trap entry_INT80_compat
+#endif
+xen_pv_trap hypervisor_callback
hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
/*
@@ -46,9 +71,7 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
*/
ENTRY(xen_iret)
pushq $0
-1: jmp hypercall_iret
-ENDPATCH(xen_iret)
-RELOC(xen_iret, 1b+1)
+ jmp hypercall_iret
ENTRY(xen_sysret64)
/*
@@ -65,9 +88,7 @@ ENTRY(xen_sysret64)
pushq %rcx
pushq $VGCF_in_syscall
-1: jmp hypercall_iret
-ENDPATCH(xen_sysret64)
-RELOC(xen_sysret64, 1b+1)
+ jmp hypercall_iret
/*
* Xen handles syscall callbacks much like ordinary exceptions, which
@@ -82,34 +103,47 @@ RELOC(xen_sysret64, 1b+1)
* rip
* r11
* rsp->rcx
- *
- * In all the entrypoints, we undo all that to make it look like a
- * CPU-generated syscall/sysenter and jump to the normal entrypoint.
*/
-.macro undo_xen_syscall
- mov 0*8(%rsp), %rcx
- mov 1*8(%rsp), %r11
- mov 5*8(%rsp), %rsp
-.endm
-
/* Normal 64-bit system call target */
ENTRY(xen_syscall_target)
- undo_xen_syscall
- jmp entry_SYSCALL_64_after_swapgs
+ popq %rcx
+ popq %r11
+
+ /*
+ * Neither Xen nor the kernel really knows what the old SS and
+ * CS were. The kernel expects __USER_DS and __USER_CS, so
+ * report those values even though Xen will guess its own values.
+ */
+ movq $__USER_DS, 4*8(%rsp)
+ movq $__USER_CS, 1*8(%rsp)
+
+ jmp entry_SYSCALL_64_after_hwframe
ENDPROC(xen_syscall_target)
#ifdef CONFIG_IA32_EMULATION
/* 32-bit compat syscall target */
ENTRY(xen_syscall32_target)
- undo_xen_syscall
- jmp entry_SYSCALL_compat
+ popq %rcx
+ popq %r11
+
+ /*
+ * Neither Xen nor the kernel really knows what the old SS and
+ * CS were. The kernel expects __USER32_DS and __USER32_CS, so
+ * report those values even though Xen will guess its own values.
+ */
+ movq $__USER32_DS, 4*8(%rsp)
+ movq $__USER32_CS, 1*8(%rsp)
+
+ jmp entry_SYSCALL_compat_after_hwframe
ENDPROC(xen_syscall32_target)
/* 32-bit compat sysenter target */
ENTRY(xen_sysenter_target)
- undo_xen_syscall
+ mov 0*8(%rsp), %rcx
+ mov 1*8(%rsp), %r11
+ mov 5*8(%rsp), %rsp
jmp entry_SYSENTER_compat
ENDPROC(xen_sysenter_target)
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 72a8e6adebe6..a7525e95d53f 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -58,7 +58,7 @@ ENTRY(hypercall_page)
#else
ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, _ASM_PTR __START_KERNEL_map)
/* Map the p2m table to a 512GB-aligned user address. */
- ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M, .quad PGDIR_SIZE)
+ ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M, .quad (PUD_SIZE * PTRS_PER_PUD))
#endif
#ifdef CONFIG_XEN_PV
ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen)
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 0d5004477db6..c8a6d224f7ed 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -129,23 +129,15 @@ static inline void __init xen_efi_init(void)
}
#endif
-/* Declare an asm function, along with symbols needed to make it
- inlineable */
-#define DECL_ASM(ret, name, ...) \
- __visible ret name(__VA_ARGS__); \
- extern char name##_end[] __visible; \
- extern char name##_reloc[] __visible
-
-DECL_ASM(void, xen_irq_enable_direct, void);
-DECL_ASM(void, xen_irq_disable_direct, void);
-DECL_ASM(unsigned long, xen_save_fl_direct, void);
-DECL_ASM(void, xen_restore_fl_direct, unsigned long);
+__visible void xen_irq_enable_direct(void);
+__visible void xen_irq_disable_direct(void);
+__visible unsigned long xen_save_fl_direct(void);
+__visible void xen_restore_fl_direct(unsigned long);
/* These are not functions, and cannot be called normally */
__visible void xen_iret(void);
__visible void xen_sysret32(void);
__visible void xen_sysret64(void);
-__visible void xen_adjust_exception_frame(void);
extern int xen_panic_handler_init(void);
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 2d716ebc5a5e..dff7cc39437c 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -1,5 +1,6 @@
generic-y += bug.h
generic-y += clkdev.h
+generic-y += device.h
generic-y += div64.h
generic-y += dma-contiguous.h
generic-y += emergency-restart.h
@@ -17,6 +18,7 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += param.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += rwsem.h
diff --git a/arch/xtensa/include/asm/device.h b/arch/xtensa/include/asm/device.h
deleted file mode 100644
index 1deeb8ebbb1b..000000000000
--- a/arch/xtensa/include/asm/device.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#ifndef _ASM_XTENSA_DEVICE_H
-#define _ASM_XTENSA_DEVICE_H
-
-struct dev_archdata {
-};
-
-struct pdev_archdata {
-};
-
-#endif /* _ASM_XTENSA_DEVICE_H */
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index b39531babec0..eaaf1ebcc7a4 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -44,18 +44,10 @@
: "r" (uaddr), "I" (-EFAULT), "r" (oparg) \
: "memory")
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
#if !XCHAL_HAVE_S32C1I
return -ENOSYS;
@@ -89,19 +81,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (ret)
- return ret;
+ if (!ret)
+ *oval = oldval;
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: return (oldval == cmparg);
- case FUTEX_OP_CMP_NE: return (oldval != cmparg);
- case FUTEX_OP_CMP_LT: return (oldval < cmparg);
- case FUTEX_OP_CMP_GE: return (oldval >= cmparg);
- case FUTEX_OP_CMP_LE: return (oldval <= cmparg);
- case FUTEX_OP_CMP_GT: return (oldval > cmparg);
- }
-
- return -ENOSYS;
+ return ret;
}
static inline int
diff --git a/arch/xtensa/include/asm/param.h b/arch/xtensa/include/asm/param.h
deleted file mode 100644
index 0a70e780ef2a..000000000000
--- a/arch/xtensa/include/asm/param.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * include/asm-xtensa/param.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-#ifndef _XTENSA_PARAM_H
-#define _XTENSA_PARAM_H
-
-#include <uapi/asm/param.h>
-
-# define HZ CONFIG_HZ /* internal timer frequency */
-# define USER_HZ 100 /* for user interfaces in "ticks" */
-# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */
-#endif /* _XTENSA_PARAM_H */
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h
index a36221cf6363..3bb49681ee24 100644
--- a/arch/xtensa/include/asm/spinlock.h
+++ b/arch/xtensa/include/asm/spinlock.h
@@ -33,11 +33,6 @@
#define arch_spin_is_locked(x) ((x)->slock != 0)
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->slock, !VAL);
-}
-
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void arch_spin_lock(arch_spinlock_t *lock)
diff --git a/arch/xtensa/include/uapi/asm/ioctls.h b/arch/xtensa/include/uapi/asm/ioctls.h
index 98b004e24e85..47d82c09be7b 100644
--- a/arch/xtensa/include/uapi/asm/ioctls.h
+++ b/arch/xtensa/include/uapi/asm/ioctls.h
@@ -105,7 +105,7 @@
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
-#define TIOCGPTPEER _IOR('T', 0x41, int) /* Safely open the slave */
+#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
#define TIOCSERCONFIG _IO('T', 83)
#define TIOCSERGWILD _IOR('T', 84, int)
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index 24365b30aae9..b15b278aa314 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -103,20 +103,12 @@
overrides the coredump filter bits */
#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
+#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+
/* compatibility flags */
#define MAP_FILE 0
-/*
- * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
- * This gives us 6 bits, which is enough until someone invents 128 bit address
- * spaces.
- *
- * Assume these are all power of twos.
- * When 0 use the default page size.
- */
-#define MAP_HUGE_SHIFT 26
-#define MAP_HUGE_MASK 0x3f
-
#define PKEY_DISABLE_ACCESS 0x1
#define PKEY_DISABLE_WRITE 0x2
#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index 3eed2761c149..220059999e74 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -113,4 +113,6 @@
#define SO_PEERGROUPS 59
+#define SO_ZEROCOPY 60
+
#endif /* _XTENSA_SOCKET_H */
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 33bfa5270d95..08175df7a69e 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -273,8 +273,8 @@ void __init init_arch(bp_tag_t *bp_start)
* Initialize system. Setup memory and reserve regions.
*/
-extern char _end;
-extern char _stext;
+extern char _end[];
+extern char _stext[];
extern char _WindowVectors_text_start;
extern char _WindowVectors_text_end;
extern char _DebugInterruptVector_literal_start;
@@ -333,7 +333,7 @@ void __init setup_arch(char **cmdline_p)
}
#endif
- mem_reserve(__pa(&_stext), __pa(&_end));
+ mem_reserve(__pa(_stext), __pa(_end));
#ifdef CONFIG_VECTORS_OFFSET
mem_reserve(__pa(&_WindowVectors_text_start),
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index d159e9b9c018..672391003e40 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -94,13 +94,11 @@ unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
}
EXPORT_SYMBOL(__sync_fetch_and_or_4);
-#ifdef CONFIG_NET
/*
* Networking support
*/
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
-#endif /* CONFIG_NET */
/*
* Architecture-specific symbols
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 1a804a2f9a5b..3c75c4e597da 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -103,6 +103,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
clear_page_alias(kvaddr, paddr);
preempt_enable();
}
+EXPORT_SYMBOL(clear_user_highpage);
void copy_user_highpage(struct page *dst, struct page *src,
unsigned long vaddr, struct vm_area_struct *vma)
@@ -119,10 +120,7 @@ void copy_user_highpage(struct page *dst, struct page *src,
copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
preempt_enable();
}
-
-#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
-
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+EXPORT_SYMBOL(copy_user_highpage);
/*
* Any time the kernel writes to a user page cache page, or it is about to
@@ -176,7 +174,7 @@ void flush_dcache_page(struct page *page)
/* There shouldn't be an entry in the cache for this page anymore. */
}
-
+EXPORT_SYMBOL(flush_dcache_page);
/*
* For now, flush the whole cache. FIXME??
@@ -188,6 +186,7 @@ void local_flush_cache_range(struct vm_area_struct *vma,
__flush_invalidate_dcache_all();
__invalidate_icache_all();
}
+EXPORT_SYMBOL(local_flush_cache_range);
/*
* Remove any entry in the cache for this page.
@@ -207,8 +206,9 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
__flush_invalidate_dcache_page_alias(virt, phys);
__invalidate_icache_page_alias(virt, phys);
}
+EXPORT_SYMBOL(local_flush_cache_page);
-#endif
+#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
void
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
@@ -225,7 +225,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
flush_tlb_page(vma, addr);
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
unsigned long phys = page_to_phys(page);
@@ -256,7 +256,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
* flush_dcache_page() on the page.
*/
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src,
diff --git a/block/Kconfig b/block/Kconfig
index 89cd28f8d051..3ab42bbb06d5 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -206,4 +206,9 @@ config BLK_MQ_VIRTIO
depends on BLOCK && VIRTIO
default y
+config BLK_MQ_RDMA
+ bool
+ depends on BLOCK && INFINIBAND
+ default y
+
source block/Kconfig.iosched
diff --git a/block/Makefile b/block/Makefile
index 2b281cf258a0..9396ebc85d24 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o
+obj-$(CONFIG_BLK_MQ_RDMA) += blk-mq-rdma.o
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 60a6835265fc..6a7a26b6cec1 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -128,7 +128,7 @@ BFQ_BFQQ_FNS(busy);
BFQ_BFQQ_FNS(wait_request);
BFQ_BFQQ_FNS(non_blocking_wait_rq);
BFQ_BFQQ_FNS(fifo_expire);
-BFQ_BFQQ_FNS(idle_window);
+BFQ_BFQQ_FNS(has_short_ttime);
BFQ_BFQQ_FNS(sync);
BFQ_BFQQ_FNS(IO_bound);
BFQ_BFQQ_FNS(in_large_burst);
@@ -731,10 +731,10 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
unsigned int old_wr_coeff = bfqq->wr_coeff;
bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
- if (bic->saved_idle_window)
- bfq_mark_bfqq_idle_window(bfqq);
+ if (bic->saved_has_short_ttime)
+ bfq_mark_bfqq_has_short_ttime(bfqq);
else
- bfq_clear_bfqq_idle_window(bfqq);
+ bfq_clear_bfqq_has_short_ttime(bfqq);
if (bic->saved_IO_bound)
bfq_mark_bfqq_IO_bound(bfqq);
@@ -2012,7 +2012,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
return;
bic->saved_ttime = bfqq->ttime;
- bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
+ bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
@@ -3038,8 +3038,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
}
bfq_log_bfqq(bfqd, bfqq,
- "expire (%d, slow %d, num_disp %d, idle_win %d)", reason,
- slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
+ "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason,
+ slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
/*
* Increase, decrease or leave budget unchanged according to
@@ -3114,7 +3114,10 @@ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
{
struct bfq_data *bfqd = bfqq->bfqd;
- bool idling_boosts_thr, idling_boosts_thr_without_issues,
+ bool rot_without_queueing =
+ !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
+ bfqq_sequential_and_IO_bound,
+ idling_boosts_thr, idling_boosts_thr_without_issues,
idling_needed_for_service_guarantees,
asymmetric_scenario;
@@ -3122,27 +3125,45 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
return true;
/*
+ * Idling is performed only if slice_idle > 0. In addition, we
+ * do not idle if
+ * (a) bfqq is async
+ * (b) bfqq is in the idle io prio class: in this case we do
+ * not idle because we want to minimize the bandwidth that
+ * queues in this class can steal to higher-priority queues
+ */
+ if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
+ bfq_class_idle(bfqq))
+ return false;
+
+ bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
+ bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
+
+ /*
* The next variable takes into account the cases where idling
* boosts the throughput.
*
* The value of the variable is computed considering, first, that
* idling is virtually always beneficial for the throughput if:
- * (a) the device is not NCQ-capable, or
- * (b) regardless of the presence of NCQ, the device is rotational
- * and the request pattern for bfqq is I/O-bound and sequential.
+ * (a) the device is not NCQ-capable and rotational, or
+ * (b) regardless of the presence of NCQ, the device is rotational and
+ * the request pattern for bfqq is I/O-bound and sequential, or
+ * (c) regardless of whether it is rotational, the device is
+ * not NCQ-capable and the request pattern for bfqq is
+ * I/O-bound and sequential.
*
* Secondly, and in contrast to the above item (b), idling an
* NCQ-capable flash-based device would not boost the
* throughput even with sequential I/O; rather it would lower
* the throughput in proportion to how fast the device
* is. Accordingly, the next variable is true if any of the
- * above conditions (a) and (b) is true, and, in particular,
- * happens to be false if bfqd is an NCQ-capable flash-based
- * device.
+ * above conditions (a), (b) or (c) is true, and, in
+ * particular, happens to be false if bfqd is an NCQ-capable
+ * flash-based device.
*/
- idling_boosts_thr = !bfqd->hw_tag ||
- (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) &&
- bfq_bfqq_idle_window(bfqq));
+ idling_boosts_thr = rot_without_queueing ||
+ ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
+ bfqq_sequential_and_IO_bound);
/*
* The value of the next variable,
@@ -3313,16 +3334,13 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
/*
- * We have now all the components we need to compute the return
- * value of the function, which is true only if both the following
- * conditions hold:
- * 1) bfqq is sync, because idling make sense only for sync queues;
- * 2) idling either boosts the throughput (without issues), or
- * is necessary to preserve service guarantees.
+ * We have now all the components we need to compute the
+ * return value of the function, which is true only if idling
+ * either boosts the throughput (without issues), or is
+ * necessary to preserve service guarantees.
*/
- return bfq_bfqq_sync(bfqq) &&
- (idling_boosts_thr_without_issues ||
- idling_needed_for_service_guarantees);
+ return idling_boosts_thr_without_issues ||
+ idling_needed_for_service_guarantees;
}
/*
@@ -3338,10 +3356,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
*/
static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
{
- struct bfq_data *bfqd = bfqq->bfqd;
-
- return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
- bfq_bfqq_may_idle(bfqq);
+ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq);
}
/*
@@ -3783,7 +3798,6 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
case IOPRIO_CLASS_IDLE:
bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
bfqq->new_ioprio = 7;
- bfq_clear_bfqq_idle_window(bfqq);
break;
}
@@ -3843,8 +3857,14 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfq_set_next_ioprio_data(bfqq, bic);
if (is_sync) {
+ /*
+ * No need to mark as has_short_ttime if in
+ * idle_class, because no device idling is performed
+ * for queues in idle class
+ */
if (!bfq_class_idle(bfqq))
- bfq_mark_bfqq_idle_window(bfqq);
+ /* tentatively mark as has_short_ttime */
+ bfq_mark_bfqq_has_short_ttime(bfqq);
bfq_mark_bfqq_sync(bfqq);
bfq_mark_bfqq_just_created(bfqq);
} else
@@ -3985,18 +4005,19 @@ bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
}
-/*
- * Disable idle window if the process thinks too long or seeks so much that
- * it doesn't matter.
- */
-static void bfq_update_idle_window(struct bfq_data *bfqd,
- struct bfq_queue *bfqq,
- struct bfq_io_cq *bic)
+static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq,
+ struct bfq_io_cq *bic)
{
- int enable_idle;
+ bool has_short_ttime = true;
- /* Don't idle for async or idle io prio class. */
- if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
+ /*
+ * No need to update has_short_ttime if bfqq is async or in
+ * idle io prio class, or if bfq_slice_idle is zero, because
+ * no device idling is performed for bfqq in this case.
+ */
+ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
+ bfqd->bfq_slice_idle == 0)
return;
/* Idle window just restored, statistics are meaningless. */
@@ -4004,27 +4025,22 @@ static void bfq_update_idle_window(struct bfq_data *bfqd,
bfqd->bfq_wr_min_idle_time))
return;
- enable_idle = bfq_bfqq_idle_window(bfqq);
-
+ /* Think time is infinite if no process is linked to
+ * bfqq. Otherwise check average think time to
+ * decide whether to mark as has_short_ttime
+ */
if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
- bfqd->bfq_slice_idle == 0 ||
- (bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
- bfqq->wr_coeff == 1))
- enable_idle = 0;
- else if (bfq_sample_valid(bfqq->ttime.ttime_samples)) {
- if (bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle &&
- bfqq->wr_coeff == 1)
- enable_idle = 0;
- else
- enable_idle = 1;
- }
- bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
- enable_idle);
+ (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
+ bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle))
+ has_short_ttime = false;
+
+ bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d",
+ has_short_ttime);
- if (enable_idle)
- bfq_mark_bfqq_idle_window(bfqq);
+ if (has_short_ttime)
+ bfq_mark_bfqq_has_short_ttime(bfqq);
else
- bfq_clear_bfqq_idle_window(bfqq);
+ bfq_clear_bfqq_has_short_ttime(bfqq);
}
/*
@@ -4040,14 +4056,12 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqq->meta_pending++;
bfq_update_io_thinktime(bfqd, bfqq);
+ bfq_update_has_short_ttime(bfqd, bfqq, bic);
bfq_update_io_seektime(bfqd, bfqq, rq);
- if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
- !BFQQ_SEEKY(bfqq))
- bfq_update_idle_window(bfqd, bfqq, bic);
bfq_log_bfqq(bfqd, bfqq,
- "rq_enqueued: idle_window=%d (seeky %d)",
- bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq));
+ "rq_enqueued: has_short_ttime=%d (seeky %d)",
+ bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq));
bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
@@ -4299,6 +4313,9 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
bfq_bfqq_expire(bfqd, bfqq, false,
BFQQE_NO_MORE_REQUESTS);
}
+
+ if (!bfqd->rq_in_driver)
+ bfq_schedule_dispatch(bfqd);
}
static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
@@ -4784,16 +4801,13 @@ static ssize_t bfq_var_show(unsigned int var, char *page)
return sprintf(page, "%u\n", var);
}
-static ssize_t bfq_var_store(unsigned long *var, const char *page,
- size_t count)
+static void bfq_var_store(unsigned long *var, const char *page)
{
unsigned long new_val;
int ret = kstrtoul(page, 10, &new_val);
if (ret == 0)
*var = new_val;
-
- return count;
}
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
@@ -4835,7 +4849,7 @@ __FUNC(struct elevator_queue *e, const char *page, size_t count) \
{ \
struct bfq_data *bfqd = e->elevator_data; \
unsigned long uninitialized_var(__data); \
- int ret = bfq_var_store(&__data, (page), count); \
+ bfq_var_store(&__data, (page)); \
if (__data < (MIN)) \
__data = (MIN); \
else if (__data > (MAX)) \
@@ -4846,7 +4860,7 @@ __FUNC(struct elevator_queue *e, const char *page, size_t count) \
*(__PTR) = (u64)__data * NSEC_PER_MSEC; \
else \
*(__PTR) = __data; \
- return ret; \
+ return count; \
}
STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
INT_MAX, 2);
@@ -4863,13 +4877,13 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
{ \
struct bfq_data *bfqd = e->elevator_data; \
unsigned long uninitialized_var(__data); \
- int ret = bfq_var_store(&__data, (page), count); \
+ bfq_var_store(&__data, (page)); \
if (__data < (MIN)) \
__data = (MIN); \
else if (__data > (MAX)) \
__data = (MAX); \
*(__PTR) = (u64)__data * NSEC_PER_USEC; \
- return ret; \
+ return count; \
}
USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
UINT_MAX);
@@ -4880,7 +4894,8 @@ static ssize_t bfq_max_budget_store(struct elevator_queue *e,
{
struct bfq_data *bfqd = e->elevator_data;
unsigned long uninitialized_var(__data);
- int ret = bfq_var_store(&__data, (page), count);
+
+ bfq_var_store(&__data, (page));
if (__data == 0)
bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
@@ -4892,7 +4907,7 @@ static ssize_t bfq_max_budget_store(struct elevator_queue *e,
bfqd->bfq_user_max_budget = __data;
- return ret;
+ return count;
}
/*
@@ -4904,7 +4919,8 @@ static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
{
struct bfq_data *bfqd = e->elevator_data;
unsigned long uninitialized_var(__data);
- int ret = bfq_var_store(&__data, (page), count);
+
+ bfq_var_store(&__data, (page));
if (__data < 1)
__data = 1;
@@ -4915,7 +4931,7 @@ static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
if (bfqd->bfq_user_max_budget == 0)
bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
- return ret;
+ return count;
}
static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
@@ -4923,7 +4939,8 @@ static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
{
struct bfq_data *bfqd = e->elevator_data;
unsigned long uninitialized_var(__data);
- int ret = bfq_var_store(&__data, (page), count);
+
+ bfq_var_store(&__data, (page));
if (__data > 1)
__data = 1;
@@ -4933,7 +4950,7 @@ static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
bfqd->strict_guarantees = __data;
- return ret;
+ return count;
}
static ssize_t bfq_low_latency_store(struct elevator_queue *e,
@@ -4941,7 +4958,8 @@ static ssize_t bfq_low_latency_store(struct elevator_queue *e,
{
struct bfq_data *bfqd = e->elevator_data;
unsigned long uninitialized_var(__data);
- int ret = bfq_var_store(&__data, (page), count);
+
+ bfq_var_store(&__data, (page));
if (__data > 1)
__data = 1;
@@ -4949,7 +4967,7 @@ static ssize_t bfq_low_latency_store(struct elevator_queue *e,
bfq_end_wr(bfqd);
bfqd->low_latency = __data;
- return ret;
+ return count;
}
#define BFQ_ATTR(name) \
@@ -4995,6 +5013,7 @@ static struct elevator_type iosched_bfq_mq = {
.elevator_name = "bfq",
.elevator_owner = THIS_MODULE,
};
+MODULE_ALIAS("bfq-iosched");
static int __init bfq_init(void)
{
@@ -5045,10 +5064,12 @@ static int __init bfq_init(void)
ret = elv_register(&iosched_bfq_mq);
if (ret)
- goto err_pol_unreg;
+ goto slab_kill;
return 0;
+slab_kill:
+ bfq_slab_kill();
err_pol_unreg:
#ifdef CONFIG_BFQ_GROUP_IOSCHED
blkcg_policy_unregister(&blkcg_policy_bfq);
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 8fd83b885774..cc4ea8574483 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -52,7 +52,7 @@ struct bfq_entity;
struct bfq_service_tree {
/* tree for active entities (i.e., those backlogged) */
struct rb_root active;
- /* tree for idle entities (i.e., not backlogged, with V <= F_i)*/
+ /* tree for idle entities (i.e., not backlogged, with V < F_i)*/
struct rb_root idle;
/* idle entity with minimum F_i */
@@ -71,17 +71,29 @@ struct bfq_service_tree {
*
* bfq_sched_data is the basic scheduler queue. It supports three
* ioprio_classes, and can be used either as a toplevel queue or as an
- * intermediate queue on a hierarchical setup. @next_in_service
- * points to the active entity of the sched_data service trees that
- * will be scheduled next. It is used to reduce the number of steps
- * needed for each hierarchical-schedule update.
+ * intermediate queue in a hierarchical setup.
*
* The supported ioprio_classes are the same as in CFQ, in descending
* priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
* Requests from higher priority queues are served before all the
* requests from lower priority queues; among requests of the same
* queue requests are served according to B-WF2Q+.
- * All the fields are protected by the queue lock of the containing bfqd.
+ *
+ * The schedule is implemented by the service trees, plus the field
+ * @next_in_service, which points to the entity on the active trees
+ * that will be served next, if 1) no changes in the schedule occurs
+ * before the current in-service entity is expired, 2) the in-service
+ * queue becomes idle when it expires, and 3) if the entity pointed by
+ * in_service_entity is not a queue, then the in-service child entity
+ * of the entity pointed by in_service_entity becomes idle on
+ * expiration. This peculiar definition allows for the following
+ * optimization, not yet exploited: while a given entity is still in
+ * service, we already know which is the best candidate for next
+ * service among the other active entitities in the same parent
+ * entity. We can then quickly compare the timestamps of the
+ * in-service entity with those of such best candidate.
+ *
+ * All fields are protected by the lock of the containing bfqd.
*/
struct bfq_sched_data {
/* entity in service */
@@ -348,11 +360,11 @@ struct bfq_io_cq {
uint64_t blkcg_serial_nr; /* the current blkcg serial */
#endif
/*
- * Snapshot of the idle window before merging; taken to
- * remember this value while the queue is merged, so as to be
- * able to restore it in case of split.
+ * Snapshot of the has_short_time flag before merging; taken
+ * to remember its value while the queue is merged, so as to
+ * be able to restore it in case of split.
*/
- bool saved_idle_window;
+ bool saved_has_short_ttime;
/*
* Same purpose as the previous two fields for the I/O bound
* classification of a queue.
@@ -626,7 +638,7 @@ enum bfqq_state_flags {
* without idling the device
*/
BFQQF_fifo_expire, /* FIFO checked in this slice */
- BFQQF_idle_window, /* slice idling enabled */
+ BFQQF_has_short_ttime, /* queue has a short think time */
BFQQF_sync, /* synchronous queue */
BFQQF_IO_bound, /*
* bfqq has timed-out at least once
@@ -655,7 +667,7 @@ BFQ_BFQQ_FNS(busy);
BFQ_BFQQ_FNS(wait_request);
BFQ_BFQQ_FNS(non_blocking_wait_rq);
BFQ_BFQQ_FNS(fifo_expire);
-BFQ_BFQQ_FNS(idle_window);
+BFQ_BFQQ_FNS(has_short_ttime);
BFQ_BFQQ_FNS(sync);
BFQ_BFQQ_FNS(IO_bound);
BFQ_BFQQ_FNS(in_large_burst);
@@ -917,13 +929,16 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
- blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\
- bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
- bfqq_group(bfqq)->blkg_path, ##args); \
+ blk_add_cgroup_trace_msg((bfqd)->queue, \
+ bfqg_to_blkg(bfqq_group(bfqq))->blkcg, \
+ "bfq%d%c " fmt, (bfqq)->pid, \
+ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', ##args); \
} while (0)
-#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) \
- blk_add_trace_msg((bfqd)->queue, "%s " fmt, (bfqg)->blkg_path, ##args)
+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
+ blk_add_cgroup_trace_msg((bfqd)->queue, \
+ bfqg_to_blkg(bfqg)->blkcg, fmt, ##args); \
+} while (0)
#else /* CONFIG_BFQ_GROUP_IOSCHED */
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 5ec05cd42b80..911aa7431dbe 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -188,21 +188,23 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
/*
* This function tells whether entity stops being a candidate for next
- * service, according to the following logic.
+ * service, according to the restrictive definition of the field
+ * next_in_service. In particular, this function is invoked for an
+ * entity that is about to be set in service.
*
- * This function is invoked for an entity that is about to be set in
- * service. If such an entity is a queue, then the entity is no longer
- * a candidate for next service (i.e, a candidate entity to serve
- * after the in-service entity is expired). The function then returns
- * true.
+ * If entity is a queue, then the entity is no longer a candidate for
+ * next service according to the that definition, because entity is
+ * about to become the in-service queue. This function then returns
+ * true if entity is a queue.
*
- * In contrast, the entity could stil be a candidate for next service
- * if it is not a queue, and has more than one child. In fact, even if
- * one of its children is about to be set in service, other children
- * may still be the next to serve. As a consequence, a non-queue
- * entity is not a candidate for next-service only if it has only one
- * child. And only if this condition holds, then the function returns
- * true for a non-queue entity.
+ * In contrast, entity could still be a candidate for next service if
+ * it is not a queue, and has more than one active child. In fact,
+ * even if one of its children is about to be set in service, other
+ * active children may still be the next to serve, for the parent
+ * entity, even according to the above definition. As a consequence, a
+ * non-queue entity is not a candidate for next-service only if it has
+ * only one active child. And only if this condition holds, then this
+ * function returns true for a non-queue entity.
*/
static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
{
@@ -213,6 +215,18 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
bfqg = container_of(entity, struct bfq_group, entity);
+ /*
+ * The field active_entities does not always contain the
+ * actual number of active children entities: it happens to
+ * not account for the in-service entity in case the latter is
+ * removed from its active tree (which may get done after
+ * invoking the function bfq_no_longer_next_in_service in
+ * bfq_get_next_queue). Fortunately, here, i.e., while
+ * bfq_no_longer_next_in_service is not yet completed in
+ * bfq_get_next_queue, bfq_active_extract has not yet been
+ * invoked, and thus active_entities still coincides with the
+ * actual number of active entities.
+ */
if (bfqg->active_entities == 1)
return true;
@@ -954,7 +968,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
* one of its children receives a new request.
*
* Basically, this function updates the timestamps of entity and
- * inserts entity into its active tree, ater possible extracting it
+ * inserts entity into its active tree, ater possibly extracting it
* from its idle tree.
*/
static void __bfq_activate_entity(struct bfq_entity *entity,
@@ -1048,7 +1062,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
entity->start = entity->finish;
/*
* In addition, if the entity had more than one child
- * when set in service, then was not extracted from
+ * when set in service, then it was not extracted from
* the active tree. This implies that the position of
* the entity in the active tree may need to be
* changed now, because we have just updated the start
@@ -1056,9 +1070,8 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
* time in a moment (the requeueing is then, more
* precisely, a repositioning in this case). To
* implement this repositioning, we: 1) dequeue the
- * entity here, 2) update the finish time and
- * requeue the entity according to the new
- * timestamps below.
+ * entity here, 2) update the finish time and requeue
+ * the entity according to the new timestamps below.
*/
if (entity->tree)
bfq_active_extract(st, entity);
@@ -1105,9 +1118,10 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
/**
- * bfq_activate_entity - activate or requeue an entity representing a bfq_queue,
- * and activate, requeue or reposition all ancestors
- * for which such an update becomes necessary.
+ * bfq_activate_requeue_entity - activate or requeue an entity representing a
+ * bfq_queue, and activate, requeue or reposition
+ * all ancestors for which such an update becomes
+ * necessary.
* @entity: the entity to activate.
* @non_blocking_wait_rq: true if this entity was waiting for a request
* @requeue: true if this is a requeue, which implies that bfqq is
@@ -1135,9 +1149,9 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
* @ins_into_idle_tree: if false, the entity will not be put into the
* idle tree.
*
- * Deactivates an entity, independently from its previous state. Must
+ * Deactivates an entity, independently of its previous state. Must
* be invoked only if entity is on a service tree. Extracts the entity
- * from that tree, and if necessary and allowed, puts it on the idle
+ * from that tree, and if necessary and allowed, puts it into the idle
* tree.
*/
bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
@@ -1158,8 +1172,10 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
st = bfq_entity_service_tree(entity);
is_in_service = entity == sd->in_service_entity;
- if (is_in_service)
+ if (is_in_service) {
bfq_calc_finish(entity, entity->service);
+ sd->in_service_entity = NULL;
+ }
if (entity->tree == &st->active)
bfq_active_extract(st, entity);
@@ -1177,7 +1193,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
/**
* bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
* @entity: the entity to deactivate.
- * @ins_into_idle_tree: true if the entity can be put on the idle tree
+ * @ins_into_idle_tree: true if the entity can be put into the idle tree
*/
static void bfq_deactivate_entity(struct bfq_entity *entity,
bool ins_into_idle_tree,
@@ -1208,16 +1224,29 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
*/
bfq_update_next_in_service(sd, NULL);
- if (sd->next_in_service)
+ if (sd->next_in_service || sd->in_service_entity) {
/*
- * The parent entity is still backlogged,
- * because next_in_service is not NULL. So, no
- * further upwards deactivation must be
- * performed. Yet, next_in_service has
- * changed. Then the schedule does need to be
- * updated upwards.
+ * The parent entity is still active, because
+ * either next_in_service or in_service_entity
+ * is not NULL. So, no further upwards
+ * deactivation must be performed. Yet,
+ * next_in_service has changed. Then the
+ * schedule does need to be updated upwards.
+ *
+ * NOTE If in_service_entity is not NULL, then
+ * next_in_service may happen to be NULL,
+ * although the parent entity is evidently
+ * active. This happens if 1) the entity
+ * pointed by in_service_entity is the only
+ * active entity in the parent entity, and 2)
+ * according to the definition of
+ * next_in_service, the in_service_entity
+ * cannot be considered as
+ * next_in_service. See the comments on the
+ * definition of next_in_service for details.
*/
break;
+ }
/*
* If we get here, then the parent is no more
@@ -1297,7 +1326,7 @@ static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value)
*
* This function searches the first schedulable entity, starting from the
* root of the tree and going on the left every time on this side there is
- * a subtree with at least one eligible (start >= vtime) entity. The path on
+ * a subtree with at least one eligible (start <= vtime) entity. The path on
* the right is followed only if a) the left subtree contains no eligible
* entities and b) no eligible entity has been found yet.
*/
@@ -1494,47 +1523,34 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
/*
* If entity is no longer a candidate for next
- * service, then we extract it from its active tree,
- * for the following reason. To further boost the
- * throughput in some special case, BFQ needs to know
- * which is the next candidate entity to serve, while
- * there is already an entity in service. In this
- * respect, to make it easy to compute/update the next
- * candidate entity to serve after the current
- * candidate has been set in service, there is a case
- * where it is necessary to extract the current
- * candidate from its service tree. Such a case is
- * when the entity just set in service cannot be also
- * a candidate for next service. Details about when
- * this conditions holds are reported in the comments
- * on the function bfq_no_longer_next_in_service()
- * invoked below.
+ * service, then it must be extracted from its active
+ * tree, so as to make sure that it won't be
+ * considered when computing next_in_service. See the
+ * comments on the function
+ * bfq_no_longer_next_in_service() for details.
*/
if (bfq_no_longer_next_in_service(entity))
bfq_active_extract(bfq_entity_service_tree(entity),
entity);
/*
- * For the same reason why we may have just extracted
- * entity from its active tree, we may need to update
- * next_in_service for the sched_data of entity too,
- * regardless of whether entity has been extracted.
- * In fact, even if entity has not been extracted, a
- * descendant entity may get extracted. Such an event
- * would cause a change in next_in_service for the
- * level of the descendant entity, and thus possibly
- * back to upper levels.
+ * Even if entity is not to be extracted according to
+ * the above check, a descendant entity may get
+ * extracted in one of the next iterations of this
+ * loop. Such an event could cause a change in
+ * next_in_service for the level of the descendant
+ * entity, and thus possibly back to this level.
*
- * We cannot perform the resulting needed update
- * before the end of this loop, because, to know which
- * is the correct next-to-serve candidate entity for
- * each level, we need first to find the leaf entity
- * to set in service. In fact, only after we know
- * which is the next-to-serve leaf entity, we can
- * discover whether the parent entity of the leaf
- * entity becomes the next-to-serve, and so on.
+ * However, we cannot perform the resulting needed
+ * update of next_in_service for this level before the
+ * end of the whole loop, because, to know which is
+ * the correct next-to-serve candidate entity for each
+ * level, we need first to find the leaf entity to set
+ * in service. In fact, only after we know which is
+ * the next-to-serve leaf entity, we can discover
+ * whether the parent entity of the leaf entity
+ * becomes the next-to-serve, and so on.
*/
-
}
bfqq = bfq_entity_to_bfqq(entity);
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 83e92beb3c9f..5df32907ff3b 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -146,7 +146,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
iv = bip->bip_vec + bip->bip_vcnt;
if (bip->bip_vcnt &&
- bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev),
+ bvec_gap_to_prev(bio->bi_disk->queue,
&bip->bip_vec[bip->bip_vcnt - 1], offset))
return 0;
@@ -190,7 +190,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
static blk_status_t bio_integrity_process(struct bio *bio,
struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn)
{
- struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
struct blk_integrity_iter iter;
struct bvec_iter bviter;
struct bio_vec bv;
@@ -199,7 +199,7 @@ static blk_status_t bio_integrity_process(struct bio *bio,
void *prot_buf = page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset;
- iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
+ iter.disk_name = bio->bi_disk->disk_name;
iter.interval = 1 << bi->interval_exp;
iter.seed = proc_iter->bi_sector;
iter.prot_buf = prot_buf;
@@ -236,8 +236,8 @@ static blk_status_t bio_integrity_process(struct bio *bio,
bool bio_integrity_prep(struct bio *bio)
{
struct bio_integrity_payload *bip;
- struct blk_integrity *bi;
- struct request_queue *q;
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
+ struct request_queue *q = bio->bi_disk->queue;
void *buf;
unsigned long start, end;
unsigned int len, nr_pages;
@@ -245,8 +245,9 @@ bool bio_integrity_prep(struct bio *bio)
unsigned int intervals;
blk_status_t status;
- bi = bdev_get_integrity(bio->bi_bdev);
- q = bdev_get_queue(bio->bi_bdev);
+ if (!bi)
+ return true;
+
if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
return true;
@@ -257,9 +258,6 @@ bool bio_integrity_prep(struct bio *bio)
if (bio_integrity(bio))
return true;
- if (bi == NULL)
- return true;
-
if (bio_data_dir(bio) == READ) {
if (!bi->profile->verify_fn ||
!(bi->flags & BLK_INTEGRITY_VERIFY))
@@ -354,7 +352,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
struct bio_integrity_payload *bip =
container_of(work, struct bio_integrity_payload, bip_work);
struct bio *bio = bip->bip_bio;
- struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
struct bvec_iter iter = bio->bi_iter;
/*
@@ -387,9 +385,11 @@ static void bio_integrity_verify_fn(struct work_struct *work)
*/
bool __bio_integrity_endio(struct bio *bio)
{
- if (bio_op(bio) == REQ_OP_READ && !bio->bi_status) {
- struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
+ (bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) {
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
queue_work(kintegrityd_wq, &bip->bip_work);
return false;
@@ -411,7 +411,7 @@ bool __bio_integrity_endio(struct bio *bio)
void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
- struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
bip->bip_iter.bi_sector += bytes_done >> 9;
@@ -428,7 +428,7 @@ EXPORT_SYMBOL(bio_integrity_advance);
void bio_integrity_trim(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
- struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
}
diff --git a/block/bio.c b/block/bio.c
index 9a63597aaacc..b38e962fa83e 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -593,10 +593,10 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
/*
- * most users will be overriding ->bi_bdev with a new target,
+ * most users will be overriding ->bi_disk with a new target,
* so we don't set nor calculate new physical/hw segment counts here
*/
- bio->bi_bdev = bio_src->bi_bdev;
+ bio->bi_disk = bio_src->bi_disk;
bio_set_flag(bio, BIO_CLONED);
bio->bi_opf = bio_src->bi_opf;
bio->bi_write_hint = bio_src->bi_write_hint;
@@ -681,7 +681,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
if (!bio)
return NULL;
- bio->bi_bdev = bio_src->bi_bdev;
+ bio->bi_disk = bio_src->bi_disk;
bio->bi_opf = bio_src->bi_opf;
bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
@@ -936,6 +936,10 @@ static void submit_bio_wait_endio(struct bio *bio)
*
* Simple wrapper around submit_bio(). Returns 0 on success, or the error from
* bio_endio() on failure.
+ *
+ * WARNING: Unlike to how submit_bio() is usually used, this function does not
+ * result in bio reference to be consumed. The caller must drop the reference
+ * on his own.
*/
int submit_bio_wait(struct bio *bio)
{
@@ -1732,29 +1736,29 @@ void bio_check_pages_dirty(struct bio *bio)
}
}
-void generic_start_io_acct(int rw, unsigned long sectors,
- struct hd_struct *part)
+void generic_start_io_acct(struct request_queue *q, int rw,
+ unsigned long sectors, struct hd_struct *part)
{
int cpu = part_stat_lock();
- part_round_stats(cpu, part);
+ part_round_stats(q, cpu, part);
part_stat_inc(cpu, part, ios[rw]);
part_stat_add(cpu, part, sectors[rw], sectors);
- part_inc_in_flight(part, rw);
+ part_inc_in_flight(q, part, rw);
part_stat_unlock();
}
EXPORT_SYMBOL(generic_start_io_acct);
-void generic_end_io_acct(int rw, struct hd_struct *part,
- unsigned long start_time)
+void generic_end_io_acct(struct request_queue *q, int rw,
+ struct hd_struct *part, unsigned long start_time)
{
unsigned long duration = jiffies - start_time;
int cpu = part_stat_lock();
part_stat_add(cpu, part, ticks[rw], duration);
- part_round_stats(cpu, part);
- part_dec_in_flight(part, rw);
+ part_round_stats(q, cpu, part);
+ part_dec_in_flight(q, part, rw);
part_stat_unlock();
}
@@ -1826,8 +1830,8 @@ again:
goto again;
}
- if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
- trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio,
+ if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
+ trace_block_bio_complete(bio->bi_disk->queue, bio,
blk_status_to_errno(bio->bi_status));
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
}
@@ -2085,7 +2089,7 @@ void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
if (src->bi_css)
WARN_ON(bio_associate_blkcg(dst, src->bi_css));
}
-
+EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
#endif /* CONFIG_BLK_CGROUP */
static void __init biovec_init_slabs(void)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 0480892e97e5..d3f56baee936 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1067,7 +1067,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
if (!blkcg) {
ret = ERR_PTR(-ENOMEM);
- goto free_blkcg;
+ goto unlock;
}
}
@@ -1111,8 +1111,10 @@ free_pd_blkcg:
for (i--; i >= 0; i--)
if (blkcg->cpd[i])
blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
-free_blkcg:
- kfree(blkcg);
+
+ if (blkcg != &blkcg_root)
+ kfree(blkcg);
+unlock:
mutex_unlock(&blkcg_pol_mutex);
return ret;
}
diff --git a/block/blk-core.c b/block/blk-core.c
index 970b9c9638c5..d709c0e3a2ac 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -280,7 +280,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
void blk_start_queue(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);
- WARN_ON(!irqs_disabled());
+ WARN_ON(!in_interrupt() && !irqs_disabled());
WARN_ON_ONCE(q->mq_ops);
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
@@ -1469,15 +1469,10 @@ static void add_acct_request(struct request_queue *q, struct request *rq,
__elv_add_request(q, rq, where);
}
-static void part_round_stats_single(int cpu, struct hd_struct *part,
- unsigned long now)
+static void part_round_stats_single(struct request_queue *q, int cpu,
+ struct hd_struct *part, unsigned long now,
+ unsigned int inflight)
{
- int inflight;
-
- if (now == part->stamp)
- return;
-
- inflight = part_in_flight(part);
if (inflight) {
__part_stat_add(cpu, part, time_in_queue,
inflight * (now - part->stamp));
@@ -1488,6 +1483,7 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
/**
* part_round_stats() - Round off the performance stats on a struct disk_stats.
+ * @q: target block queue
* @cpu: cpu number for stats access
* @part: target partition
*
@@ -1502,13 +1498,31 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
* /proc/diskstats. This accounts immediately for all queue usage up to
* the current jiffies and restarts the counters again.
*/
-void part_round_stats(int cpu, struct hd_struct *part)
+void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
{
+ struct hd_struct *part2 = NULL;
unsigned long now = jiffies;
+ unsigned int inflight[2];
+ int stats = 0;
+
+ if (part->stamp != now)
+ stats |= 1;
- if (part->partno)
- part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
- part_round_stats_single(cpu, part, now);
+ if (part->partno) {
+ part2 = &part_to_disk(part)->part0;
+ if (part2->stamp != now)
+ stats |= 2;
+ }
+
+ if (!stats)
+ return;
+
+ part_in_flight(q, part, inflight);
+
+ if (stats & 2)
+ part_round_stats_single(q, cpu, part2, now, inflight[1]);
+ if (stats & 1)
+ part_round_stats_single(q, cpu, part, now, inflight[0]);
}
EXPORT_SYMBOL_GPL(part_round_stats);
@@ -1896,40 +1910,15 @@ out_unlock:
return BLK_QC_T_NONE;
}
-/*
- * If bio->bi_dev is a partition, remap the location
- */
-static inline void blk_partition_remap(struct bio *bio)
-{
- struct block_device *bdev = bio->bi_bdev;
-
- /*
- * Zone reset does not include bi_size so bio_sectors() is always 0.
- * Include a test for the reset op code and perform the remap if needed.
- */
- if (bdev != bdev->bd_contains &&
- (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)) {
- struct hd_struct *p = bdev->bd_part;
-
- bio->bi_iter.bi_sector += p->start_sect;
- bio->bi_bdev = bdev->bd_contains;
-
- trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
- bdev->bd_dev,
- bio->bi_iter.bi_sector - p->start_sect);
- }
-}
-
static void handle_bad_sector(struct bio *bio)
{
char b[BDEVNAME_SIZE];
printk(KERN_INFO "attempt to access beyond end of device\n");
printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
- bdevname(bio->bi_bdev, b),
- bio->bi_opf,
+ bio_devname(bio, b), bio->bi_opf,
(unsigned long long)bio_end_sector(bio),
- (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
+ (long long)get_capacity(bio->bi_disk));
}
#ifdef CONFIG_FAIL_MAKE_REQUEST
@@ -1968,6 +1957,38 @@ static inline bool should_fail_request(struct hd_struct *part,
#endif /* CONFIG_FAIL_MAKE_REQUEST */
/*
+ * Remap block n of partition p to block n+start(p) of the disk.
+ */
+static inline int blk_partition_remap(struct bio *bio)
+{
+ struct hd_struct *p;
+ int ret = 0;
+
+ /*
+ * Zone reset does not include bi_size so bio_sectors() is always 0.
+ * Include a test for the reset op code and perform the remap if needed.
+ */
+ if (!bio->bi_partno ||
+ (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET))
+ return 0;
+
+ rcu_read_lock();
+ p = __disk_get_part(bio->bi_disk, bio->bi_partno);
+ if (likely(p && !should_fail_request(p, bio->bi_iter.bi_size))) {
+ bio->bi_iter.bi_sector += p->start_sect;
+ bio->bi_partno = 0;
+ trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
+ bio->bi_iter.bi_sector - p->start_sect);
+ } else {
+ printk("%s: fail for partition %d\n", __func__, bio->bi_partno);
+ ret = -EIO;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/*
* Check whether this bio extends beyond the end of the device.
*/
static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
@@ -1978,7 +1999,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
return 0;
/* Test device or partition size, when known. */
- maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
+ maxsector = get_capacity(bio->bi_disk);
if (maxsector) {
sector_t sector = bio->bi_iter.bi_sector;
@@ -2003,20 +2024,18 @@ generic_make_request_checks(struct bio *bio)
int nr_sectors = bio_sectors(bio);
blk_status_t status = BLK_STS_IOERR;
char b[BDEVNAME_SIZE];
- struct hd_struct *part;
might_sleep();
if (bio_check_eod(bio, nr_sectors))
goto end_io;
- q = bdev_get_queue(bio->bi_bdev);
+ q = bio->bi_disk->queue;
if (unlikely(!q)) {
printk(KERN_ERR
"generic_make_request: Trying to access "
"nonexistent block-device %s (%Lu)\n",
- bdevname(bio->bi_bdev, b),
- (long long) bio->bi_iter.bi_sector);
+ bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
goto end_io;
}
@@ -2028,17 +2047,11 @@ generic_make_request_checks(struct bio *bio)
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
goto not_supported;
- part = bio->bi_bdev->bd_part;
- if (should_fail_request(part, bio->bi_iter.bi_size) ||
- should_fail_request(&part_to_disk(part)->part0,
- bio->bi_iter.bi_size))
+ if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
goto end_io;
- /*
- * If this device has partitions, remap block n
- * of partition p to block n+start(p) of the disk.
- */
- blk_partition_remap(bio);
+ if (blk_partition_remap(bio))
+ goto end_io;
if (bio_check_eod(bio, nr_sectors))
goto end_io;
@@ -2067,16 +2080,16 @@ generic_make_request_checks(struct bio *bio)
goto not_supported;
break;
case REQ_OP_WRITE_SAME:
- if (!bdev_write_same(bio->bi_bdev))
+ if (!q->limits.max_write_same_sectors)
goto not_supported;
break;
case REQ_OP_ZONE_REPORT:
case REQ_OP_ZONE_RESET:
- if (!bdev_is_zoned(bio->bi_bdev))
+ if (!blk_queue_is_zoned(q))
goto not_supported;
break;
case REQ_OP_WRITE_ZEROES:
- if (!bdev_write_zeroes_sectors(bio->bi_bdev))
+ if (!q->limits.max_write_zeroes_sectors)
goto not_supported;
break;
default:
@@ -2183,7 +2196,7 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&bio_list_on_stack[0]);
current->bio_list = bio_list_on_stack;
do {
- struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+ struct request_queue *q = bio->bi_disk->queue;
if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) {
struct bio_list lower, same;
@@ -2201,7 +2214,7 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&lower);
bio_list_init(&same);
while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
- if (q == bdev_get_queue(bio->bi_bdev))
+ if (q == bio->bi_disk->queue)
bio_list_add(&same, bio);
else
bio_list_add(&lower, bio);
@@ -2244,7 +2257,7 @@ blk_qc_t submit_bio(struct bio *bio)
unsigned int count;
if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
- count = bdev_logical_block_size(bio->bi_bdev) >> 9;
+ count = queue_logical_block_size(bio->bi_disk->queue);
else
count = bio_sectors(bio);
@@ -2261,8 +2274,7 @@ blk_qc_t submit_bio(struct bio *bio)
current->comm, task_pid_nr(current),
op_is_write(bio_op(bio)) ? "WRITE" : "READ",
(unsigned long long)bio->bi_iter.bi_sector,
- bdevname(bio->bi_bdev, b),
- count);
+ bio_devname(bio, b), count);
}
}
@@ -2431,8 +2443,8 @@ void blk_account_io_done(struct request *req)
part_stat_inc(cpu, part, ios[rw]);
part_stat_add(cpu, part, ticks[rw], duration);
- part_round_stats(cpu, part);
- part_dec_in_flight(part, rw);
+ part_round_stats(req->q, cpu, part);
+ part_dec_in_flight(req->q, part, rw);
hd_struct_put(part);
part_stat_unlock();
@@ -2489,8 +2501,8 @@ void blk_account_io_start(struct request *rq, bool new_io)
part = &rq->rq_disk->part0;
hd_struct_get(part);
}
- part_round_stats(cpu, part);
- part_inc_in_flight(part, rw);
+ part_round_stats(rq->q, cpu, part);
+ part_inc_in_flight(rq->q, part, rw);
rq->part = part;
}
@@ -2603,7 +2615,7 @@ struct request *blk_peek_request(struct request_queue *q)
}
EXPORT_SYMBOL(blk_peek_request);
-void blk_dequeue_request(struct request *rq)
+static void blk_dequeue_request(struct request *rq)
{
struct request_queue *q = rq->q;
@@ -2630,9 +2642,6 @@ void blk_dequeue_request(struct request *rq)
* Description:
* Dequeue @req and start timeout timer on it. This hands off the
* request to the driver.
- *
- * Block internal functions which don't want to start timer should
- * call blk_dequeue_request().
*/
void blk_start_request(struct request *req)
{
@@ -3035,8 +3044,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio;
- if (bio->bi_bdev)
- rq->rq_disk = bio->bi_bdev->bd_disk;
+ if (bio->bi_disk)
+ rq->rq_disk = bio->bi_disk;
}
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
@@ -3421,6 +3430,10 @@ EXPORT_SYMBOL(blk_finish_plug);
*/
void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
{
+ /* not support for RQF_PM and ->rpm_status in blk-mq yet */
+ if (q->mq_ops)
+ return;
+
q->dev = dev;
q->rpm_status = RPM_ACTIVE;
pm_runtime_set_autosuspend_delay(q->dev, -1);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index ed5fe322abba..4938bec8cfef 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -1,12 +1,12 @@
/*
- * Functions to sequence FLUSH and FUA writes.
+ * Functions to sequence PREFLUSH and FUA writes.
*
* Copyright (C) 2011 Max Planck Institute for Gravitational Physics
* Copyright (C) 2011 Tejun Heo <tj@kernel.org>
*
* This file is released under the GPLv2.
*
- * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
+ * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
* optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
* properties and hardware capability.
*
@@ -16,9 +16,9 @@
* REQ_FUA means that the data must be on non-volatile media on request
* completion.
*
- * If the device doesn't have writeback cache, FLUSH and FUA don't make any
- * difference. The requests are either completed immediately if there's no
- * data or executed as normal requests otherwise.
+ * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
+ * difference. The requests are either completed immediately if there's no data
+ * or executed as normal requests otherwise.
*
* If the device has writeback cache and supports FUA, REQ_PREFLUSH is
* translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
@@ -31,7 +31,7 @@
* fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
* REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush
* completes, all the requests which were pending are proceeded to the next
- * step. This allows arbitrary merging of different types of FLUSH/FUA
+ * step. This allows arbitrary merging of different types of PREFLUSH/FUA
* requests.
*
* Currently, the following conditions are used to determine when to issue
@@ -47,19 +47,19 @@
* C3. The second condition is ignored if there is a request which has
* waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
* starvation in the unlikely case where there are continuous stream of
- * FUA (without FLUSH) requests.
+ * FUA (without PREFLUSH) requests.
*
* For devices which support FUA, it isn't clear whether C2 (and thus C3)
* is beneficial.
*
- * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
+ * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
* Once while executing DATA and again after the whole sequence is
* complete. The first completion updates the contained bio but doesn't
* finish it so that the bio submitter is notified only after the whole
* sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
* req_bio_endio().
*
- * The above peculiarity requires that each FLUSH/FUA request has only one
+ * The above peculiarity requires that each PREFLUSH/FUA request has only one
* bio attached to it, which is guaranteed as they aren't allowed to be
* merged in the usual way.
*/
@@ -76,7 +76,7 @@
#include "blk-mq-tag.h"
#include "blk-mq-sched.h"
-/* FLUSH/FUA sequences */
+/* PREFLUSH/FUA sequences */
enum {
REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
@@ -148,7 +148,7 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
/**
* blk_flush_complete_seq - complete flush sequence
- * @rq: FLUSH/FUA request being sequenced
+ * @rq: PREFLUSH/FUA request being sequenced
* @fq: flush queue
* @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
* @error: whether an error occurred
@@ -406,7 +406,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
}
/**
- * blk_insert_flush - insert a new FLUSH/FUA request
+ * blk_insert_flush - insert a new PREFLUSH/FUA request
* @rq: request to insert
*
* To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
@@ -525,7 +525,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
return -ENXIO;
bio = bio_alloc(gfp_mask, 0);
- bio->bi_bdev = bdev;
+ bio_set_dev(bio, bdev);
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
ret = submit_bio_wait(bio);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 3fe0aec90597..e01adb5145b3 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -77,7 +77,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bio = next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = bdev;
+ bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, op, 0);
bio->bi_iter.bi_size = req_sects << 9;
@@ -168,7 +168,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
while (nr_sects) {
bio = next_bio(bio, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = bdev;
+ bio_set_dev(bio, bdev);
bio->bi_vcnt = 1;
bio->bi_io_vec->bv_page = page;
bio->bi_io_vec->bv_offset = 0;
@@ -241,7 +241,7 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
while (nr_sects) {
bio = next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = bdev;
+ bio_set_dev(bio, bdev);
bio->bi_opf = REQ_OP_WRITE_ZEROES;
if (flags & BLKDEV_ZERO_NOUNMAP)
bio->bi_opf |= REQ_NOUNMAP;
@@ -323,7 +323,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
gfp_mask);
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = bdev;
+ bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
while (nr_sects != 0) {
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 99038830fb42..aa524cad5bea 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -633,8 +633,8 @@ static void blk_account_io_merge(struct request *req)
cpu = part_stat_lock();
part = req->part;
- part_round_stats(cpu, part);
- part_dec_in_flight(part, rq_data_dir(req));
+ part_round_stats(req->q, cpu, part);
+ part_dec_in_flight(req->q, part, rq_data_dir(req));
hd_struct_put(part);
part_stat_unlock();
@@ -786,7 +786,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
return false;
/* must be same device and not a special request */
- if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
+ if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq))
return false;
/* only merge integrity protected bio into ditto rq */
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 4891f042a22f..9f8cffc8a701 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -17,9 +17,9 @@
static int cpu_to_queue_index(unsigned int nr_queues, const int cpu)
{
/*
- * Non online CPU will be mapped to queue index 0.
+ * Non present CPU will be mapped to queue index 0.
*/
- if (!cpu_online(cpu))
+ if (!cpu_present(cpu))
return 0;
return cpu % nr_queues;
}
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 9ebc2945f991..980e73095643 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -48,8 +48,6 @@ static int blk_flags_show(struct seq_file *m, const unsigned long flags,
static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(QUEUED),
QUEUE_FLAG_NAME(STOPPED),
- QUEUE_FLAG_NAME(SYNCFULL),
- QUEUE_FLAG_NAME(ASYNCFULL),
QUEUE_FLAG_NAME(DYING),
QUEUE_FLAG_NAME(BYPASS),
QUEUE_FLAG_NAME(BIDI),
@@ -75,6 +73,8 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(STATS),
QUEUE_FLAG_NAME(POLL_STATS),
QUEUE_FLAG_NAME(REGISTERED),
+ QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
+ QUEUE_FLAG_NAME(QUIESCED),
};
#undef QUEUE_FLAG_NAME
@@ -265,6 +265,7 @@ static const char *const cmd_flag_name[] = {
CMD_FLAG_NAME(RAHEAD),
CMD_FLAG_NAME(BACKGROUND),
CMD_FLAG_NAME(NOUNMAP),
+ CMD_FLAG_NAME(NOWAIT),
};
#undef CMD_FLAG_NAME
@@ -741,7 +742,7 @@ static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
return seq_release(inode, file);
}
-const struct file_operations blk_mq_debugfs_fops = {
+static const struct file_operations blk_mq_debugfs_fops = {
.open = blk_mq_debugfs_open,
.read = seq_read,
.write = blk_mq_debugfs_write,
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
index 0c3354cf3552..76944e3271bf 100644
--- a/block/blk-mq-pci.c
+++ b/block/blk-mq-pci.c
@@ -36,12 +36,18 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev)
for (queue = 0; queue < set->nr_hw_queues; queue++) {
mask = pci_irq_get_affinity(pdev, queue);
if (!mask)
- return -EINVAL;
+ goto fallback;
for_each_cpu(cpu, mask)
set->mq_map[cpu] = queue;
}
return 0;
+
+fallback:
+ WARN_ON_ONCE(set->nr_hw_queues > 1);
+ for_each_possible_cpu(cpu)
+ set->mq_map[cpu] = 0;
+ return 0;
}
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
diff --git a/block/blk-mq-rdma.c b/block/blk-mq-rdma.c
new file mode 100644
index 000000000000..996167f1de18
--- /dev/null
+++ b/block/blk-mq-rdma.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2017 Sagi Grimberg.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/blk-mq.h>
+#include <linux/blk-mq-rdma.h>
+#include <rdma/ib_verbs.h>
+
+/**
+ * blk_mq_rdma_map_queues - provide a default queue mapping for rdma device
+ * @set: tagset to provide the mapping for
+ * @dev: rdma device associated with @set.
+ * @first_vec: first interrupt vectors to use for queues (usually 0)
+ *
+ * This function assumes the rdma device @dev has at least as many available
+ * interrupt vetors as @set has queues. It will then query it's affinity mask
+ * and built queue mapping that maps a queue to the CPUs that have irq affinity
+ * for the corresponding vector.
+ *
+ * In case either the driver passed a @dev with less vectors than
+ * @set->nr_hw_queues, or @dev does not provide an affinity mask for a
+ * vector, we fallback to the naive mapping.
+ */
+int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
+ struct ib_device *dev, int first_vec)
+{
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ mask = ib_get_vector_affinity(dev, first_vec + queue);
+ if (!mask)
+ goto fallback;
+
+ for_each_cpu(cpu, mask)
+ set->mq_map[cpu] = queue;
+ }
+
+ return 0;
+
+fallback:
+ return blk_mq_map_queues(set);
+}
+EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index d0be72ccb091..6714507aa6c7 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -214,7 +214,11 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
bitnr += tags->nr_reserved_tags;
rq = tags->rqs[bitnr];
- if (rq->q == hctx->queue)
+ /*
+ * We can hit rq == NULL here, because the tagging functions
+ * test and set the bit before assining ->rqs[].
+ */
+ if (rq && rq->q == hctx->queue)
iter_data->fn(hctx, rq, iter_data->data, reserved);
return true;
}
@@ -248,9 +252,15 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
if (!reserved)
bitnr += tags->nr_reserved_tags;
+
+ /*
+ * We can hit rq == NULL here, because the tagging functions
+ * test and set the bit before assining ->rqs[].
+ */
rq = tags->rqs[bitnr];
+ if (rq)
+ iter_data->fn(rq, iter_data->data, reserved);
- iter_data->fn(rq, iter_data->data, reserved);
return true;
}
@@ -288,11 +298,12 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
-int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
+int blk_mq_reinit_tagset(struct blk_mq_tag_set *set,
+ int (reinit_request)(void *, struct request *))
{
int i, j, ret = 0;
- if (!set->ops->reinit_request)
+ if (WARN_ON_ONCE(!reinit_request))
goto out;
for (i = 0; i < set->nr_hw_queues; i++) {
@@ -305,8 +316,8 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
if (!tags->static_rqs[j])
continue;
- ret = set->ops->reinit_request(set->driver_data,
- tags->static_rqs[j]);
+ ret = reinit_request(set->driver_data,
+ tags->static_rqs[j]);
if (ret)
goto out;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 041f7b7fa0d6..3f18cff80050 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -83,6 +83,41 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
}
+struct mq_inflight {
+ struct hd_struct *part;
+ unsigned int *inflight;
+};
+
+static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
+ struct request *rq, void *priv,
+ bool reserved)
+{
+ struct mq_inflight *mi = priv;
+
+ if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags) &&
+ !test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) {
+ /*
+ * index[0] counts the specific partition that was asked
+ * for. index[1] counts the ones that are active on the
+ * whole device, so increment that if mi->part is indeed
+ * a partition, and not a whole device.
+ */
+ if (rq->part == mi->part)
+ mi->inflight[0]++;
+ if (mi->part->partno)
+ mi->inflight[1]++;
+ }
+}
+
+void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
+ unsigned int inflight[2])
+{
+ struct mq_inflight mi = { .part = part, .inflight = inflight, };
+
+ inflight[0] = inflight[1] = 0;
+ blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
+}
+
void blk_freeze_queue_start(struct request_queue *q)
{
int freeze_depth;
@@ -301,11 +336,12 @@ static struct request *blk_mq_get_request(struct request_queue *q,
struct elevator_queue *e = q->elevator;
struct request *rq;
unsigned int tag;
+ struct blk_mq_ctx *local_ctx = NULL;
blk_queue_enter_live(q);
data->q = q;
if (likely(!data->ctx))
- data->ctx = blk_mq_get_ctx(q);
+ data->ctx = local_ctx = blk_mq_get_ctx(q);
if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
if (op & REQ_NOWAIT)
@@ -324,6 +360,10 @@ static struct request *blk_mq_get_request(struct request_queue *q,
tag = blk_mq_get_tag(data);
if (tag == BLK_MQ_TAG_FAIL) {
+ if (local_ctx) {
+ blk_mq_put_ctx(local_ctx);
+ data->ctx = NULL;
+ }
blk_queue_exit(q);
return NULL;
}
@@ -355,13 +395,13 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
return ERR_PTR(ret);
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
-
- blk_mq_put_ctx(alloc_data.ctx);
blk_queue_exit(q);
if (!rq)
return ERR_PTR(-EWOULDBLOCK);
+ blk_mq_put_ctx(alloc_data.ctx);
+
rq->__data_len = 0;
rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL;
@@ -406,7 +446,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
-
blk_queue_exit(q);
if (!rq)
@@ -620,11 +659,10 @@ static void blk_mq_requeue_work(struct work_struct *work)
container_of(work, struct request_queue, requeue_work.work);
LIST_HEAD(rq_list);
struct request *rq, *next;
- unsigned long flags;
- spin_lock_irqsave(&q->requeue_lock, flags);
+ spin_lock_irq(&q->requeue_lock);
list_splice_init(&q->requeue_list, &rq_list);
- spin_unlock_irqrestore(&q->requeue_lock, flags);
+ spin_unlock_irq(&q->requeue_lock);
list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
if (!(rq->rq_flags & RQF_SOFTBARRIER))
@@ -679,8 +717,8 @@ EXPORT_SYMBOL(blk_mq_kick_requeue_list);
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
unsigned long msecs)
{
- kblockd_schedule_delayed_work(&q->requeue_work,
- msecs_to_jiffies(msecs));
+ kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
+ msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
@@ -1098,9 +1136,19 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
{
int srcu_idx;
+ /*
+ * We should be running this queue from one of the CPUs that
+ * are mapped to it.
+ */
WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
cpu_online(hctx->next_cpu));
+ /*
+ * We can't run the queue inline with ints disabled. Ensure that
+ * we catch bad users of this early.
+ */
+ WARN_ON_ONCE(in_interrupt());
+
if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
rcu_read_lock();
blk_mq_sched_dispatch_requests(hctx);
@@ -1214,7 +1262,7 @@ EXPORT_SYMBOL(blk_mq_queue_stopped);
/*
* This function is often used for pausing .queue_rq() by driver when
* there isn't enough resource or some conditions aren't satisfied, and
- * BLK_MQ_RQ_QUEUE_BUSY is usually returned.
+ * BLK_STS_RESOURCE is usually returned.
*
* We do not guarantee that dispatch can be drained or blocked
* after blk_mq_stop_hw_queue() returns. Please use
@@ -1231,7 +1279,7 @@ EXPORT_SYMBOL(blk_mq_stop_hw_queue);
/*
* This function is often used for pausing .queue_rq() by driver when
* there isn't enough resource or some conditions aren't satisfied, and
- * BLK_MQ_RQ_QUEUE_BUSY is usually returned.
+ * BLK_STS_RESOURCE is usually returned.
*
* We do not guarantee that dispatch can be drained or blocked
* after blk_mq_stop_hw_queues() returns. Please use
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 60b01c0309bc..98252b79b80b 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -133,4 +133,7 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
return hctx->nr_ctx && hctx->tags;
}
+void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
+ unsigned int inflight[2]);
+
#endif
diff --git a/block/blk-settings.c b/block/blk-settings.c
index be1f115b538b..8559e9563c52 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -68,6 +68,7 @@ EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
{
+ WARN_ON_ONCE(q->mq_ops);
q->rq_timed_out_fn = fn;
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 87b7df4851bf..07125e7941f4 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -60,7 +60,7 @@ static void trigger_softirq(void *data)
static int raise_blk_irq(int cpu, struct request *rq)
{
if (cpu_online(cpu)) {
- struct call_single_data *data = &rq->csd;
+ call_single_data_t *data = &rq->csd;
data->func = trigger_softirq;
data->info = rq;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 27aceab1cc31..b8362c0df51d 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -931,7 +931,9 @@ void blk_unregister_queue(struct gendisk *disk)
if (WARN_ON(!q))
return;
+ mutex_lock(&q->sysfs_lock);
queue_flag_clear_unlocked(QUEUE_FLAG_REGISTERED, q);
+ mutex_unlock(&q->sysfs_lock);
wbt_exit(q);
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 2290f65b9d73..e1a9c15eb1b8 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -290,7 +290,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
*/
clear_bit_unlock(tag, bqt->tag_map);
}
-EXPORT_SYMBOL(blk_queue_end_tag);
/**
* blk_queue_start_tag - find a free tag and assign it
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a7285bf2831c..0fea76aa0f3f 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -373,15 +373,21 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
if (likely(!blk_trace_note_message_enabled(__td->queue))) \
break; \
if ((__tg)) { \
- char __pbuf[128]; \
- \
- blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \
- blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
+ blk_add_cgroup_trace_msg(__td->queue, \
+ tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
} else { \
blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
} \
} while (0)
+static inline unsigned int throtl_bio_data_size(struct bio *bio)
+{
+ /* assume it's one sector */
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
+ return 512;
+ return bio->bi_iter.bi_size;
+}
+
static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
{
INIT_LIST_HEAD(&qn->node);
@@ -934,6 +940,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
bool rw = bio_data_dir(bio);
u64 bytes_allowed, extra_bytes, tmp;
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
+ unsigned int bio_size = throtl_bio_data_size(bio);
jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
@@ -947,14 +954,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
do_div(tmp, HZ);
bytes_allowed = tmp;
- if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
+ if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
if (wait)
*wait = 0;
return true;
}
/* Calc approx time to dispatch */
- extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
+ extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
if (!jiffy_wait)
@@ -1034,11 +1041,12 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
{
bool rw = bio_data_dir(bio);
+ unsigned int bio_size = throtl_bio_data_size(bio);
/* Charge the bio to the group */
- tg->bytes_disp[rw] += bio->bi_iter.bi_size;
+ tg->bytes_disp[rw] += bio_size;
tg->io_disp[rw]++;
- tg->last_bytes_disp[rw] += bio->bi_iter.bi_size;
+ tg->last_bytes_disp[rw] += bio_size;
tg->last_io_disp[rw]++;
/*
@@ -2104,14 +2112,9 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
{
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- int ret;
-
- ret = bio_associate_current(bio);
- if (ret == 0 || ret == -EBUSY)
+ if (bio->bi_css)
bio->bi_cg_private = tg;
blk_stat_set_issue(&bio->bi_issue_stat, bio_sectors(bio));
-#else
- bio_associate_current(bio);
#endif
}
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 3bd15d8095b1..ff57fb51b338 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -116,7 +116,7 @@ int blkdev_report_zones(struct block_device *bdev,
if (!bio)
return -ENOMEM;
- bio->bi_bdev = bdev;
+ bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = blk_zone_start(q, sector);
bio_set_op_attrs(bio, REQ_OP_ZONE_REPORT, 0);
@@ -234,7 +234,7 @@ int blkdev_reset_zones(struct block_device *bdev,
bio = bio_alloc(gfp_mask, 0);
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = bdev;
+ bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0);
ret = submit_bio_wait(bio);
diff --git a/block/blk.h b/block/blk.h
index 3a3d715bd725..fcb9775b997d 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -64,7 +64,6 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
void blk_queue_bypass_start(struct request_queue *q);
void blk_queue_bypass_end(struct request_queue *q);
-void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q);
void blk_freeze_queue(struct request_queue *q);
@@ -204,6 +203,8 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq
e->type->ops.sq.elevator_deactivate_req_fn(q, rq);
}
+struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
+
#ifdef CONFIG_FAIL_IO_TIMEOUT
int blk_should_fake_timeout(struct request_queue *);
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index c4513b23f57a..dd56d7460cb9 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -29,26 +29,25 @@
#include <scsi/scsi_cmnd.h>
/**
- * bsg_destroy_job - routine to teardown/delete a bsg job
+ * bsg_teardown_job - routine to teardown a bsg job
* @job: bsg_job that is to be torn down
*/
-static void bsg_destroy_job(struct kref *kref)
+static void bsg_teardown_job(struct kref *kref)
{
struct bsg_job *job = container_of(kref, struct bsg_job, kref);
struct request *rq = job->req;
- blk_end_request_all(rq, BLK_STS_OK);
-
put_device(job->dev); /* release reference for the request */
kfree(job->request_payload.sg_list);
kfree(job->reply_payload.sg_list);
- kfree(job);
+
+ blk_end_request_all(rq, BLK_STS_OK);
}
void bsg_job_put(struct bsg_job *job)
{
- kref_put(&job->kref, bsg_destroy_job);
+ kref_put(&job->kref, bsg_teardown_job);
}
EXPORT_SYMBOL_GPL(bsg_job_put);
@@ -100,7 +99,7 @@ EXPORT_SYMBOL_GPL(bsg_job_done);
*/
static void bsg_softirq_done(struct request *rq)
{
- struct bsg_job *job = rq->special;
+ struct bsg_job *job = blk_mq_rq_to_pdu(rq);
bsg_job_put(job);
}
@@ -122,33 +121,20 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
}
/**
- * bsg_create_job - create the bsg_job structure for the bsg request
+ * bsg_prepare_job - create the bsg_job structure for the bsg request
* @dev: device that is being sent the bsg request
* @req: BSG request that needs a job structure
*/
-static int bsg_create_job(struct device *dev, struct request *req)
+static int bsg_prepare_job(struct device *dev, struct request *req)
{
struct request *rsp = req->next_rq;
- struct request_queue *q = req->q;
struct scsi_request *rq = scsi_req(req);
- struct bsg_job *job;
+ struct bsg_job *job = blk_mq_rq_to_pdu(req);
int ret;
- BUG_ON(req->special);
-
- job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
- if (!job)
- return -ENOMEM;
-
- req->special = job;
- job->req = req;
- if (q->bsg_job_size)
- job->dd_data = (void *)&job[1];
job->request = rq->cmd;
job->request_len = rq->cmd_len;
- job->reply = rq->sense;
- job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
- * allocated */
+
if (req->bio) {
ret = bsg_map_buffer(&job->request_payload, req);
if (ret)
@@ -187,7 +173,6 @@ static void bsg_request_fn(struct request_queue *q)
{
struct device *dev = q->queuedata;
struct request *req;
- struct bsg_job *job;
int ret;
if (!get_device(dev))
@@ -199,7 +184,7 @@ static void bsg_request_fn(struct request_queue *q)
break;
spin_unlock_irq(q->queue_lock);
- ret = bsg_create_job(dev, req);
+ ret = bsg_prepare_job(dev, req);
if (ret) {
scsi_req(req)->result = ret;
blk_end_request_all(req, BLK_STS_OK);
@@ -207,8 +192,7 @@ static void bsg_request_fn(struct request_queue *q)
continue;
}
- job = req->special;
- ret = q->bsg_job_fn(job);
+ ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
spin_lock_irq(q->queue_lock);
if (ret)
break;
@@ -219,6 +203,35 @@ static void bsg_request_fn(struct request_queue *q)
spin_lock_irq(q->queue_lock);
}
+static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
+{
+ struct bsg_job *job = blk_mq_rq_to_pdu(req);
+ struct scsi_request *sreq = &job->sreq;
+
+ memset(job, 0, sizeof(*job));
+
+ scsi_req_init(sreq);
+ sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
+ sreq->sense = kzalloc(sreq->sense_len, gfp);
+ if (!sreq->sense)
+ return -ENOMEM;
+
+ job->req = req;
+ job->reply = sreq->sense;
+ job->reply_len = sreq->sense_len;
+ job->dd_data = job + 1;
+
+ return 0;
+}
+
+static void bsg_exit_rq(struct request_queue *q, struct request *req)
+{
+ struct bsg_job *job = blk_mq_rq_to_pdu(req);
+ struct scsi_request *sreq = &job->sreq;
+
+ kfree(sreq->sense);
+}
+
/**
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests
* @dev: device to attach bsg device to
@@ -235,7 +248,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
q = blk_alloc_queue(GFP_KERNEL);
if (!q)
return ERR_PTR(-ENOMEM);
- q->cmd_size = sizeof(struct scsi_request);
+ q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
+ q->init_rq_fn = bsg_init_rq;
+ q->exit_rq_fn = bsg_exit_rq;
q->request_fn = bsg_request_fn;
ret = blk_init_allocated_queue(q);
@@ -243,7 +258,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
goto out_cleanup_queue;
q->queuedata = dev;
- q->bsg_job_size = dd_job_size;
q->bsg_job_fn = job_fn;
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
diff --git a/block/bsg.c b/block/bsg.c
index 37663b664666..ee1335c68de7 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -932,15 +932,8 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
- /*
- * block device ioctls
- */
default:
-#if 0
- return ioctl_by_bdev(bd->bdev, cmd, arg);
-#else
return -ENOTTY;
-#endif
}
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3d5c28945719..9b86e9b352e9 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -656,20 +656,17 @@ static inline void cfqg_put(struct cfq_group *cfqg)
}
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
- char __pbuf[128]; \
- \
- blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
- blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
+ blk_add_cgroup_trace_msg((cfqd)->queue, \
+ cfqg_to_blkg((cfqq)->cfqg)->blkcg, \
+ "cfq%d%c%c " fmt, (cfqq)->pid, \
cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
- __pbuf, ##args); \
+ ##args); \
} while (0)
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
- char __pbuf[128]; \
- \
- blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
- blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
+ blk_add_cgroup_trace_msg((cfqd)->queue, \
+ cfqg_to_blkg(cfqg)->blkcg, fmt, ##args); \
} while (0)
static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
@@ -2937,7 +2934,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
* for devices that support queuing, otherwise we still have a problem
* with sync vs async workloads.
*/
- if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
+ if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag &&
+ !cfqd->cfq_group_idle)
return;
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
@@ -4714,13 +4712,12 @@ cfq_var_show(unsigned int var, char *page)
return sprintf(page, "%u\n", var);
}
-static ssize_t
-cfq_var_store(unsigned int *var, const char *page, size_t count)
+static void
+cfq_var_store(unsigned int *var, const char *page)
{
char *p = (char *) page;
*var = simple_strtoul(p, &p, 10);
- return count;
}
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
@@ -4766,7 +4763,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)
{ \
struct cfq_data *cfqd = e->elevator_data; \
unsigned int __data; \
- int ret = cfq_var_store(&__data, (page), count); \
+ cfq_var_store(&__data, (page)); \
if (__data < (MIN)) \
__data = (MIN); \
else if (__data > (MAX)) \
@@ -4775,7 +4772,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)
*(__PTR) = (u64)__data * NSEC_PER_MSEC; \
else \
*(__PTR) = __data; \
- return ret; \
+ return count; \
}
STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
@@ -4800,13 +4797,13 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)
{ \
struct cfq_data *cfqd = e->elevator_data; \
unsigned int __data; \
- int ret = cfq_var_store(&__data, (page), count); \
+ cfq_var_store(&__data, (page)); \
if (__data < (MIN)) \
__data = (MIN); \
else if (__data > (MAX)) \
__data = (MAX); \
*(__PTR) = (u64)__data * NSEC_PER_USEC; \
- return ret; \
+ return count; \
}
USEC_STORE_FUNCTION(cfq_slice_idle_us_store, &cfqd->cfq_slice_idle, 0, UINT_MAX);
USEC_STORE_FUNCTION(cfq_group_idle_us_store, &cfqd->cfq_group_idle, 0, UINT_MAX);
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 38554c2ea38a..abaf9d78a206 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -79,7 +79,7 @@ static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev,
static int compat_hdio_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- unsigned long *__user p;
+ unsigned long __user *p;
int error;
p = compat_alloc_user_space(sizeof(unsigned long));
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index c68f6bbc0dcd..b83f77460d28 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -373,13 +373,12 @@ deadline_var_show(int var, char *page)
return sprintf(page, "%d\n", var);
}
-static ssize_t
-deadline_var_store(int *var, const char *page, size_t count)
+static void
+deadline_var_store(int *var, const char *page)
{
char *p = (char *) page;
*var = simple_strtol(p, &p, 10);
- return count;
}
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
@@ -403,7 +402,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)
{ \
struct deadline_data *dd = e->elevator_data; \
int __data; \
- int ret = deadline_var_store(&__data, (page), count); \
+ deadline_var_store(&__data, (page)); \
if (__data < (MIN)) \
__data = (MIN); \
else if (__data > (MAX)) \
@@ -412,7 +411,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)
*(__PTR) = msecs_to_jiffies(__data); \
else \
*(__PTR) = __data; \
- return ret; \
+ return count; \
}
STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
diff --git a/block/elevator.c b/block/elevator.c
index 4bb2f0c93fa6..153926a90901 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -1055,6 +1055,10 @@ static int __elevator_change(struct request_queue *q, const char *name)
char elevator_name[ELV_NAME_MAX];
struct elevator_type *e;
+ /* Make sure queue is not in the middle of being removed */
+ if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
+ return -ENOENT;
+
/*
* Special case for mq, turn off scheduling
*/
diff --git a/block/genhd.c b/block/genhd.c
index 7f520fa25d16..dd305c65ffb0 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -45,6 +45,52 @@ static void disk_add_events(struct gendisk *disk);
static void disk_del_events(struct gendisk *disk);
static void disk_release_events(struct gendisk *disk);
+void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
+{
+ if (q->mq_ops)
+ return;
+
+ atomic_inc(&part->in_flight[rw]);
+ if (part->partno)
+ atomic_inc(&part_to_disk(part)->part0.in_flight[rw]);
+}
+
+void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
+{
+ if (q->mq_ops)
+ return;
+
+ atomic_dec(&part->in_flight[rw]);
+ if (part->partno)
+ atomic_dec(&part_to_disk(part)->part0.in_flight[rw]);
+}
+
+void part_in_flight(struct request_queue *q, struct hd_struct *part,
+ unsigned int inflight[2])
+{
+ if (q->mq_ops) {
+ blk_mq_in_flight(q, part, inflight);
+ return;
+ }
+
+ inflight[0] = atomic_read(&part->in_flight[0]) +
+ atomic_read(&part->in_flight[1]);
+ if (part->partno) {
+ part = &part_to_disk(part)->part0;
+ inflight[1] = atomic_read(&part->in_flight[0]) +
+ atomic_read(&part->in_flight[1]);
+ }
+}
+
+struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
+{
+ struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);
+
+ if (unlikely(partno < 0 || partno >= ptbl->len))
+ return NULL;
+ return rcu_dereference(ptbl->part[partno]);
+}
+
/**
* disk_get_part - get partition
* @disk: disk to look partition from
@@ -61,21 +107,12 @@ static void disk_release_events(struct gendisk *disk);
*/
struct hd_struct *disk_get_part(struct gendisk *disk, int partno)
{
- struct hd_struct *part = NULL;
- struct disk_part_tbl *ptbl;
-
- if (unlikely(partno < 0))
- return NULL;
+ struct hd_struct *part;
rcu_read_lock();
-
- ptbl = rcu_dereference(disk->part_tbl);
- if (likely(partno < ptbl->len)) {
- part = rcu_dereference(ptbl->part[partno]);
- if (part)
- get_device(part_to_dev(part));
- }
-
+ part = __disk_get_part(disk, partno);
+ if (part)
+ get_device(part_to_dev(part));
rcu_read_unlock();
return part;
@@ -242,6 +279,7 @@ EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
* Can be deleted altogether. Later.
*
*/
+#define BLKDEV_MAJOR_HASH_SIZE 255
static struct blk_major_name {
struct blk_major_name *next;
int major;
@@ -259,12 +297,11 @@ void blkdev_show(struct seq_file *seqf, off_t offset)
{
struct blk_major_name *dp;
- if (offset < BLKDEV_MAJOR_HASH_SIZE) {
- mutex_lock(&block_class_lock);
- for (dp = major_names[offset]; dp; dp = dp->next)
+ mutex_lock(&block_class_lock);
+ for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next)
+ if (dp->major == offset)
seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
- mutex_unlock(&block_class_lock);
- }
+ mutex_unlock(&block_class_lock);
}
#endif /* CONFIG_PROC_FS */
@@ -309,6 +346,14 @@ int register_blkdev(unsigned int major, const char *name)
ret = major;
}
+ if (major >= BLKDEV_MAJOR_MAX) {
+ pr_err("register_blkdev: major requested (%d) is greater than the maximum (%d) for %s\n",
+ major, BLKDEV_MAJOR_MAX, name);
+
+ ret = -EINVAL;
+ goto out;
+ }
+
p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
if (p == NULL) {
ret = -ENOMEM;
@@ -1090,12 +1135,13 @@ static const struct attribute_group *disk_attr_groups[] = {
* original ptbl is freed using RCU callback.
*
* LOCKING:
- * Matching bd_mutx locked.
+ * Matching bd_mutex locked or the caller is the only user of @disk.
*/
static void disk_replace_part_tbl(struct gendisk *disk,
struct disk_part_tbl *new_ptbl)
{
- struct disk_part_tbl *old_ptbl = disk->part_tbl;
+ struct disk_part_tbl *old_ptbl =
+ rcu_dereference_protected(disk->part_tbl, 1);
rcu_assign_pointer(disk->part_tbl, new_ptbl);
@@ -1114,14 +1160,16 @@ static void disk_replace_part_tbl(struct gendisk *disk,
* uses RCU to allow unlocked dereferencing for stats and other stuff.
*
* LOCKING:
- * Matching bd_mutex locked, might sleep.
+ * Matching bd_mutex locked or the caller is the only user of @disk.
+ * Might sleep.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int disk_expand_part_tbl(struct gendisk *disk, int partno)
{
- struct disk_part_tbl *old_ptbl = disk->part_tbl;
+ struct disk_part_tbl *old_ptbl =
+ rcu_dereference_protected(disk->part_tbl, 1);
struct disk_part_tbl *new_ptbl;
int len = old_ptbl ? old_ptbl->len : 0;
int i, target;
@@ -1204,6 +1252,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
struct disk_part_iter piter;
struct hd_struct *hd;
char buf[BDEVNAME_SIZE];
+ unsigned int inflight[2];
int cpu;
/*
@@ -1217,8 +1266,9 @@ static int diskstats_show(struct seq_file *seqf, void *v)
disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0);
while ((hd = disk_part_iter_next(&piter))) {
cpu = part_stat_lock();
- part_round_stats(cpu, hd);
+ part_round_stats(gp->queue, cpu, hd);
part_stat_unlock();
+ part_in_flight(gp->queue, hd, inflight);
seq_printf(seqf, "%4d %7d %s %lu %lu %lu "
"%u %lu %lu %lu %u %u %u %u\n",
MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
@@ -1231,7 +1281,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
part_stat_read(hd, merges[WRITE]),
part_stat_read(hd, sectors[WRITE]),
jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])),
- part_in_flight(hd),
+ inflight[0],
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
jiffies_to_msecs(part_stat_read(hd, time_in_queue))
);
@@ -1313,6 +1363,14 @@ EXPORT_SYMBOL(alloc_disk);
struct gendisk *alloc_disk_node(int minors, int node_id)
{
struct gendisk *disk;
+ struct disk_part_tbl *ptbl;
+
+ if (minors > DISK_MAX_PARTS) {
+ printk(KERN_ERR
+ "block: can't allocated more than %d partitions\n",
+ DISK_MAX_PARTS);
+ minors = DISK_MAX_PARTS;
+ }
disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
if (disk) {
@@ -1326,7 +1384,8 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
kfree(disk);
return NULL;
}
- disk->part_tbl->part[0] = &disk->part0;
+ ptbl = rcu_dereference_protected(disk->part_tbl, 1);
+ rcu_assign_pointer(ptbl->part[0], &disk->part0);
/*
* set_capacity() and get_capacity() currently don't use
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 1b964a387afe..a1cad4331edd 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -457,13 +457,12 @@ deadline_var_show(int var, char *page)
return sprintf(page, "%d\n", var);
}
-static ssize_t
-deadline_var_store(int *var, const char *page, size_t count)
+static void
+deadline_var_store(int *var, const char *page)
{
char *p = (char *) page;
*var = simple_strtol(p, &p, 10);
- return count;
}
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
@@ -487,7 +486,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)
{ \
struct deadline_data *dd = e->elevator_data; \
int __data; \
- int ret = deadline_var_store(&__data, (page), count); \
+ deadline_var_store(&__data, (page)); \
if (__data < (MIN)) \
__data = (MIN); \
else if (__data > (MAX)) \
@@ -496,7 +495,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)
*(__PTR) = msecs_to_jiffies(__data); \
else \
*(__PTR) = __data; \
- return ret; \
+ return count; \
}
STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
@@ -660,6 +659,7 @@ static struct elevator_type mq_deadline = {
.elevator_name = "mq-deadline",
.elevator_owner = THIS_MODULE,
};
+MODULE_ALIAS("mq-deadline-iosched");
static int __init deadline_init(void)
{
diff --git a/block/partition-generic.c b/block/partition-generic.c
index c5ec8246e25e..86e8fe1adcdb 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -112,11 +112,14 @@ ssize_t part_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hd_struct *p = dev_to_part(dev);
+ struct request_queue *q = dev_to_disk(dev)->queue;
+ unsigned int inflight[2];
int cpu;
cpu = part_stat_lock();
- part_round_stats(cpu, p);
+ part_round_stats(q, cpu, p);
part_stat_unlock();
+ part_in_flight(q, p, inflight);
return sprintf(buf,
"%8lu %8lu %8llu %8u "
"%8lu %8lu %8llu %8u "
@@ -130,7 +133,7 @@ ssize_t part_stat_show(struct device *dev,
part_stat_read(p, merges[WRITE]),
(unsigned long long)part_stat_read(p, sectors[WRITE]),
jiffies_to_msecs(part_stat_read(p, ticks[WRITE])),
- part_in_flight(p),
+ inflight[0],
jiffies_to_msecs(part_stat_read(p, io_ticks)),
jiffies_to_msecs(part_stat_read(p, time_in_queue)));
}
@@ -249,15 +252,20 @@ void __delete_partition(struct percpu_ref *ref)
call_rcu(&part->rcu_head, delete_partition_rcu_cb);
}
+/*
+ * Must be called either with bd_mutex held, before a disk can be opened or
+ * after all disk users are gone.
+ */
void delete_partition(struct gendisk *disk, int partno)
{
- struct disk_part_tbl *ptbl = disk->part_tbl;
+ struct disk_part_tbl *ptbl =
+ rcu_dereference_protected(disk->part_tbl, 1);
struct hd_struct *part;
if (partno >= ptbl->len)
return;
- part = ptbl->part[partno];
+ part = rcu_dereference_protected(ptbl->part[partno], 1);
if (!part)
return;
@@ -277,6 +285,10 @@ static ssize_t whole_disk_show(struct device *dev,
static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH,
whole_disk_show, NULL);
+/*
+ * Must be called either with bd_mutex held, before a disk can be opened or
+ * after all disk users are gone.
+ */
struct hd_struct *add_partition(struct gendisk *disk, int partno,
sector_t start, sector_t len, int flags,
struct partition_meta_info *info)
@@ -292,7 +304,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
err = disk_expand_part_tbl(disk, partno);
if (err)
return ERR_PTR(err);
- ptbl = disk->part_tbl;
+ ptbl = rcu_dereference_protected(disk->part_tbl, 1);
if (ptbl->part[partno])
return ERR_PTR(-EBUSY);
@@ -391,7 +403,6 @@ out_del:
device_del(pdev);
out_put:
put_device(pdev);
- blk_free_devt(devt);
return ERR_PTR(err);
}
diff --git a/crypto/Kconfig b/crypto/Kconfig
index caa770e535a2..0a121f9ddf8e 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1753,6 +1753,8 @@ config CRYPTO_USER_API_AEAD
tristate "User-space interface for AEAD cipher algorithms"
depends on NET
select CRYPTO_AEAD
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_NULL
select CRYPTO_USER_API
help
This option enables the user-spaces interface for AEAD
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 92a3d540d920..ffa9f4ccd9b4 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/net.h>
#include <linux/rwsem.h>
+#include <linux/sched/signal.h>
#include <linux/security.h>
struct alg_type_list {
@@ -507,6 +508,696 @@ void af_alg_complete(struct crypto_async_request *req, int err)
}
EXPORT_SYMBOL_GPL(af_alg_complete);
+/**
+ * af_alg_alloc_tsgl - allocate the TX SGL
+ *
+ * @sk socket of connection to user space
+ * @return: 0 upon success, < 0 upon error
+ */
+int af_alg_alloc_tsgl(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct af_alg_tsgl *sgl;
+ struct scatterlist *sg = NULL;
+
+ sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list);
+ if (!list_empty(&ctx->tsgl_list))
+ sg = sgl->sg;
+
+ if (!sg || sgl->cur >= MAX_SGL_ENTS) {
+ sgl = sock_kmalloc(sk, sizeof(*sgl) +
+ sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
+ GFP_KERNEL);
+ if (!sgl)
+ return -ENOMEM;
+
+ sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
+ sgl->cur = 0;
+
+ if (sg)
+ sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
+
+ list_add_tail(&sgl->list, &ctx->tsgl_list);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl);
+
+/**
+ * aead_count_tsgl - Count number of TX SG entries
+ *
+ * The counting starts from the beginning of the SGL to @bytes. If
+ * an offset is provided, the counting of the SG entries starts at the offset.
+ *
+ * @sk socket of connection to user space
+ * @bytes Count the number of SG entries holding given number of bytes.
+ * @offset Start the counting of SG entries from the given offset.
+ * @return Number of TX SG entries found given the constraints
+ */
+unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct af_alg_tsgl *sgl, *tmp;
+ unsigned int i;
+ unsigned int sgl_count = 0;
+
+ if (!bytes)
+ return 0;
+
+ list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) {
+ struct scatterlist *sg = sgl->sg;
+
+ for (i = 0; i < sgl->cur; i++) {
+ size_t bytes_count;
+
+ /* Skip offset */
+ if (offset >= sg[i].length) {
+ offset -= sg[i].length;
+ bytes -= sg[i].length;
+ continue;
+ }
+
+ bytes_count = sg[i].length - offset;
+
+ offset = 0;
+ sgl_count++;
+
+ /* If we have seen requested number of bytes, stop */
+ if (bytes_count >= bytes)
+ return sgl_count;
+
+ bytes -= bytes_count;
+ }
+ }
+
+ return sgl_count;
+}
+EXPORT_SYMBOL_GPL(af_alg_count_tsgl);
+
+/**
+ * aead_pull_tsgl - Release the specified buffers from TX SGL
+ *
+ * If @dst is non-null, reassign the pages to dst. The caller must release
+ * the pages. If @dst_offset is given only reassign the pages to @dst starting
+ * at the @dst_offset (byte). The caller must ensure that @dst is large
+ * enough (e.g. by using af_alg_count_tsgl with the same offset).
+ *
+ * @sk socket of connection to user space
+ * @used Number of bytes to pull from TX SGL
+ * @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
+ * caller must release the buffers in dst.
+ * @dst_offset Reassign the TX SGL from given offset. All buffers before
+ * reaching the offset is released.
+ */
+void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
+ size_t dst_offset)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct af_alg_tsgl *sgl;
+ struct scatterlist *sg;
+ unsigned int i, j;
+
+ while (!list_empty(&ctx->tsgl_list)) {
+ sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl,
+ list);
+ sg = sgl->sg;
+
+ for (i = 0, j = 0; i < sgl->cur; i++) {
+ size_t plen = min_t(size_t, used, sg[i].length);
+ struct page *page = sg_page(sg + i);
+
+ if (!page)
+ continue;
+
+ /*
+ * Assumption: caller created af_alg_count_tsgl(len)
+ * SG entries in dst.
+ */
+ if (dst) {
+ if (dst_offset >= plen) {
+ /* discard page before offset */
+ dst_offset -= plen;
+ } else {
+ /* reassign page to dst after offset */
+ get_page(page);
+ sg_set_page(dst + j, page,
+ plen - dst_offset,
+ sg[i].offset + dst_offset);
+ dst_offset = 0;
+ j++;
+ }
+ }
+
+ sg[i].length -= plen;
+ sg[i].offset += plen;
+
+ used -= plen;
+ ctx->used -= plen;
+
+ if (sg[i].length)
+ return;
+
+ put_page(page);
+ sg_assign_page(sg + i, NULL);
+ }
+
+ list_del(&sgl->list);
+ sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) *
+ (MAX_SGL_ENTS + 1));
+ }
+
+ if (!ctx->used)
+ ctx->merge = 0;
+}
+EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
+
+/**
+ * af_alg_free_areq_sgls - Release TX and RX SGLs of the request
+ *
+ * @areq Request holding the TX and RX SGL
+ */
+void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
+{
+ struct sock *sk = areq->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct af_alg_rsgl *rsgl, *tmp;
+ struct scatterlist *tsgl;
+ struct scatterlist *sg;
+ unsigned int i;
+
+ list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
+ ctx->rcvused -= rsgl->sg_num_bytes;
+ af_alg_free_sg(&rsgl->sgl);
+ list_del(&rsgl->list);
+ if (rsgl != &areq->first_rsgl)
+ sock_kfree_s(sk, rsgl, sizeof(*rsgl));
+ }
+
+ tsgl = areq->tsgl;
+ for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
+ if (!sg_page(sg))
+ continue;
+ put_page(sg_page(sg));
+ }
+
+ if (areq->tsgl && areq->tsgl_entries)
+ sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
+}
+EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls);
+
+/**
+ * af_alg_wait_for_wmem - wait for availability of writable memory
+ *
+ * @sk socket of connection to user space
+ * @flags If MSG_DONTWAIT is set, then only report if function would sleep
+ * @return 0 when writable memory is available, < 0 upon error
+ */
+int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int err = -ERESTARTSYS;
+ long timeout;
+
+ if (flags & MSG_DONTWAIT)
+ return -EAGAIN;
+
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ for (;;) {
+ if (signal_pending(current))
+ break;
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (sk_wait_event(sk, &timeout, af_alg_writable(sk), &wait)) {
+ err = 0;
+ break;
+ }
+ }
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_wait_for_wmem);
+
+/**
+ * af_alg_wmem_wakeup - wakeup caller when writable memory is available
+ *
+ * @sk socket of connection to user space
+ */
+void af_alg_wmem_wakeup(struct sock *sk)
+{
+ struct socket_wq *wq;
+
+ if (!af_alg_writable(sk))
+ return;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (skwq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
+ POLLRDNORM |
+ POLLRDBAND);
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
+
+/**
+ * af_alg_wait_for_data - wait for availability of TX data
+ *
+ * @sk socket of connection to user space
+ * @flags If MSG_DONTWAIT is set, then only report if function would sleep
+ * @return 0 when writable memory is available, < 0 upon error
+ */
+int af_alg_wait_for_data(struct sock *sk, unsigned flags)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ long timeout;
+ int err = -ERESTARTSYS;
+
+ if (flags & MSG_DONTWAIT)
+ return -EAGAIN;
+
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ for (;;) {
+ if (signal_pending(current))
+ break;
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more),
+ &wait)) {
+ err = 0;
+ break;
+ }
+ }
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_wait_for_data);
+
+/**
+ * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel
+ *
+ * @sk socket of connection to user space
+ */
+
+void af_alg_data_wakeup(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct socket_wq *wq;
+
+ if (!ctx->used)
+ return;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (skwq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
+ POLLRDNORM |
+ POLLRDBAND);
+ sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(af_alg_data_wakeup);
+
+/**
+ * af_alg_sendmsg - implementation of sendmsg system call handler
+ *
+ * The sendmsg system call handler obtains the user data and stores it
+ * in ctx->tsgl_list. This implies allocation of the required numbers of
+ * struct af_alg_tsgl.
+ *
+ * In addition, the ctx is filled with the information sent via CMSG.
+ *
+ * @sock socket of connection to user space
+ * @msg message from user space
+ * @size size of message from user space
+ * @ivsize the size of the IV for the cipher operation to verify that the
+ * user-space-provided IV has the right size
+ * @return the number of copied data upon success, < 0 upon error
+ */
+int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ unsigned int ivsize)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct af_alg_tsgl *sgl;
+ struct af_alg_control con = {};
+ long copied = 0;
+ bool enc = 0;
+ bool init = 0;
+ int err = 0;
+
+ if (msg->msg_controllen) {
+ err = af_alg_cmsg_send(msg, &con);
+ if (err)
+ return err;
+
+ init = 1;
+ switch (con.op) {
+ case ALG_OP_ENCRYPT:
+ enc = 1;
+ break;
+ case ALG_OP_DECRYPT:
+ enc = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (con.iv && con.iv->ivlen != ivsize)
+ return -EINVAL;
+ }
+
+ lock_sock(sk);
+ if (!ctx->more && ctx->used) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ if (init) {
+ ctx->enc = enc;
+ if (con.iv)
+ memcpy(ctx->iv, con.iv->iv, ivsize);
+
+ ctx->aead_assoclen = con.aead_assoclen;
+ }
+
+ while (size) {
+ struct scatterlist *sg;
+ size_t len = size;
+ size_t plen;
+
+ /* use the existing memory in an allocated page */
+ if (ctx->merge) {
+ sgl = list_entry(ctx->tsgl_list.prev,
+ struct af_alg_tsgl, list);
+ sg = sgl->sg + sgl->cur - 1;
+ len = min_t(size_t, len,
+ PAGE_SIZE - sg->offset - sg->length);
+
+ err = memcpy_from_msg(page_address(sg_page(sg)) +
+ sg->offset + sg->length,
+ msg, len);
+ if (err)
+ goto unlock;
+
+ sg->length += len;
+ ctx->merge = (sg->offset + sg->length) &
+ (PAGE_SIZE - 1);
+
+ ctx->used += len;
+ copied += len;
+ size -= len;
+ continue;
+ }
+
+ if (!af_alg_writable(sk)) {
+ err = af_alg_wait_for_wmem(sk, msg->msg_flags);
+ if (err)
+ goto unlock;
+ }
+
+ /* allocate a new page */
+ len = min_t(unsigned long, len, af_alg_sndbuf(sk));
+
+ err = af_alg_alloc_tsgl(sk);
+ if (err)
+ goto unlock;
+
+ sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl,
+ list);
+ sg = sgl->sg;
+ if (sgl->cur)
+ sg_unmark_end(sg + sgl->cur - 1);
+
+ do {
+ unsigned int i = sgl->cur;
+
+ plen = min_t(size_t, len, PAGE_SIZE);
+
+ sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
+ if (!sg_page(sg + i)) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ err = memcpy_from_msg(page_address(sg_page(sg + i)),
+ msg, plen);
+ if (err) {
+ __free_page(sg_page(sg + i));
+ sg_assign_page(sg + i, NULL);
+ goto unlock;
+ }
+
+ sg[i].length = plen;
+ len -= plen;
+ ctx->used += plen;
+ copied += plen;
+ size -= plen;
+ sgl->cur++;
+ } while (len && sgl->cur < MAX_SGL_ENTS);
+
+ if (!size)
+ sg_mark_end(sg + sgl->cur - 1);
+
+ ctx->merge = plen & (PAGE_SIZE - 1);
+ }
+
+ err = 0;
+
+ ctx->more = msg->msg_flags & MSG_MORE;
+
+unlock:
+ af_alg_data_wakeup(sk);
+ release_sock(sk);
+
+ return copied ?: err;
+}
+EXPORT_SYMBOL_GPL(af_alg_sendmsg);
+
+/**
+ * af_alg_sendpage - sendpage system call handler
+ *
+ * This is a generic implementation of sendpage to fill ctx->tsgl_list.
+ */
+ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct af_alg_tsgl *sgl;
+ int err = -EINVAL;
+
+ if (flags & MSG_SENDPAGE_NOTLAST)
+ flags |= MSG_MORE;
+
+ lock_sock(sk);
+ if (!ctx->more && ctx->used)
+ goto unlock;
+
+ if (!size)
+ goto done;
+
+ if (!af_alg_writable(sk)) {
+ err = af_alg_wait_for_wmem(sk, flags);
+ if (err)
+ goto unlock;
+ }
+
+ err = af_alg_alloc_tsgl(sk);
+ if (err)
+ goto unlock;
+
+ ctx->merge = 0;
+ sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list);
+
+ if (sgl->cur)
+ sg_unmark_end(sgl->sg + sgl->cur - 1);
+
+ sg_mark_end(sgl->sg + sgl->cur);
+
+ get_page(page);
+ sg_set_page(sgl->sg + sgl->cur, page, size, offset);
+ sgl->cur++;
+ ctx->used += size;
+
+done:
+ ctx->more = flags & MSG_MORE;
+
+unlock:
+ af_alg_data_wakeup(sk);
+ release_sock(sk);
+
+ return err ?: size;
+}
+EXPORT_SYMBOL_GPL(af_alg_sendpage);
+
+/**
+ * af_alg_async_cb - AIO callback handler
+ *
+ * This handler cleans up the struct af_alg_async_req upon completion of the
+ * AIO operation.
+ *
+ * The number of bytes to be generated with the AIO operation must be set
+ * in areq->outlen before the AIO callback handler is invoked.
+ */
+void af_alg_async_cb(struct crypto_async_request *_req, int err)
+{
+ struct af_alg_async_req *areq = _req->data;
+ struct sock *sk = areq->sk;
+ struct kiocb *iocb = areq->iocb;
+ unsigned int resultlen;
+
+ lock_sock(sk);
+
+ /* Buffer size written by crypto operation. */
+ resultlen = areq->outlen;
+
+ af_alg_free_areq_sgls(areq);
+ sock_kfree_s(sk, areq, areq->areqlen);
+ __sock_put(sk);
+
+ iocb->ki_complete(iocb, err ? err : resultlen, 0);
+
+ release_sock(sk);
+}
+EXPORT_SYMBOL_GPL(af_alg_async_cb);
+
+/**
+ * af_alg_poll - poll system call handler
+ */
+unsigned int af_alg_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ unsigned int mask;
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
+
+ if (!ctx->more || ctx->used)
+ mask |= POLLIN | POLLRDNORM;
+
+ if (af_alg_writable(sk))
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+
+ return mask;
+}
+EXPORT_SYMBOL_GPL(af_alg_poll);
+
+/**
+ * af_alg_alloc_areq - allocate struct af_alg_async_req
+ *
+ * @sk socket of connection to user space
+ * @areqlen size of struct af_alg_async_req + crypto_*_reqsize
+ * @return allocated data structure or ERR_PTR upon error
+ */
+struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
+ unsigned int areqlen)
+{
+ struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
+
+ if (unlikely(!areq))
+ return ERR_PTR(-ENOMEM);
+
+ areq->areqlen = areqlen;
+ areq->sk = sk;
+ areq->last_rsgl = NULL;
+ INIT_LIST_HEAD(&areq->rsgl_list);
+ areq->tsgl = NULL;
+ areq->tsgl_entries = 0;
+
+ return areq;
+}
+EXPORT_SYMBOL_GPL(af_alg_alloc_areq);
+
+/**
+ * af_alg_get_rsgl - create the RX SGL for the output data from the crypto
+ * operation
+ *
+ * @sk socket of connection to user space
+ * @msg user space message
+ * @flags flags used to invoke recvmsg with
+ * @areq instance of the cryptographic request that will hold the RX SGL
+ * @maxsize maximum number of bytes to be pulled from user space
+ * @outlen number of bytes in the RX SGL
+ * @return 0 on success, < 0 upon error
+ */
+int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
+ struct af_alg_async_req *areq, size_t maxsize,
+ size_t *outlen)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ size_t len = 0;
+
+ while (maxsize > len && msg_data_left(msg)) {
+ struct af_alg_rsgl *rsgl;
+ size_t seglen;
+ int err;
+
+ /* limit the amount of readable buffers */
+ if (!af_alg_readable(sk))
+ break;
+
+ if (!ctx->used) {
+ err = af_alg_wait_for_data(sk, flags);
+ if (err)
+ return err;
+ }
+
+ seglen = min_t(size_t, (maxsize - len),
+ msg_data_left(msg));
+
+ if (list_empty(&areq->rsgl_list)) {
+ rsgl = &areq->first_rsgl;
+ } else {
+ rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
+ if (unlikely(!rsgl))
+ return -ENOMEM;
+ }
+
+ rsgl->sgl.npages = 0;
+ list_add_tail(&rsgl->list, &areq->rsgl_list);
+
+ /* make one iovec available as scatterlist */
+ err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
+ if (err < 0)
+ return err;
+
+ /* chain the new scatterlist with previous one */
+ if (areq->last_rsgl)
+ af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl);
+
+ areq->last_rsgl = rsgl;
+ len += err;
+ ctx->rcvused += err;
+ rsgl->sg_num_bytes = err;
+ iov_iter_advance(&msg->msg_iter, err);
+ }
+
+ *outlen = len;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(af_alg_get_rsgl);
+
static int __init af_alg_init(void)
{
int err = proto_register(&alg_proto, 0);
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 826cd7ab4d4a..5e8666e6ccae 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -588,6 +588,35 @@ int crypto_unregister_ahash(struct ahash_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
+int crypto_register_ahashes(struct ahash_alg *algs, int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_register_ahash(&algs[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (--i; i >= 0; --i)
+ crypto_unregister_ahash(&algs[i]);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_register_ahashes);
+
+void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_unregister_ahash(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
+
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst)
{
diff --git a/crypto/algapi.c b/crypto/algapi.c
index e4cc7615a139..aa699ff6c876 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -975,13 +975,15 @@ void crypto_inc(u8 *a, unsigned int size)
}
EXPORT_SYMBOL_GPL(crypto_inc);
-void __crypto_xor(u8 *dst, const u8 *src, unsigned int len)
+void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
{
int relalign = 0;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
int size = sizeof(unsigned long);
- int d = ((unsigned long)dst ^ (unsigned long)src) & (size - 1);
+ int d = (((unsigned long)dst ^ (unsigned long)src1) |
+ ((unsigned long)dst ^ (unsigned long)src2)) &
+ (size - 1);
relalign = d ? 1 << __ffs(d) : size;
@@ -992,34 +994,37 @@ void __crypto_xor(u8 *dst, const u8 *src, unsigned int len)
* process the remainder of the input using optimal strides.
*/
while (((unsigned long)dst & (relalign - 1)) && len > 0) {
- *dst++ ^= *src++;
+ *dst++ = *src1++ ^ *src2++;
len--;
}
}
while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
- *(u64 *)dst ^= *(u64 *)src;
+ *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
dst += 8;
- src += 8;
+ src1 += 8;
+ src2 += 8;
len -= 8;
}
while (len >= 4 && !(relalign & 3)) {
- *(u32 *)dst ^= *(u32 *)src;
+ *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
dst += 4;
- src += 4;
+ src1 += 4;
+ src2 += 4;
len -= 4;
}
while (len >= 2 && !(relalign & 1)) {
- *(u16 *)dst ^= *(u16 *)src;
+ *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
dst += 2;
- src += 2;
+ src1 += 2;
+ src2 += 2;
len -= 2;
}
while (len--)
- *dst++ ^= *src++;
+ *dst++ = *src1++ ^ *src2++;
}
EXPORT_SYMBOL_GPL(__crypto_xor);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index be117495eb43..516b38c3a169 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -5,88 +5,56 @@
*
* This file provides the user-space API for AEAD ciphers.
*
- * This file is derived from algif_skcipher.c.
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
+ *
+ * The following concept of the memory management is used:
+ *
+ * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
+ * filled by user space with the data submitted via sendpage/sendmsg. Filling
+ * up the TX SGL does not cause a crypto operation -- the data will only be
+ * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
+ * provide a buffer which is tracked with the RX SGL.
+ *
+ * During the processing of the recvmsg operation, the cipher request is
+ * allocated and prepared. As part of the recvmsg operation, the processed
+ * TX buffers are extracted from the TX SGL into a separate SGL.
+ *
+ * After the completion of the crypto operation, the RX SGL and the cipher
+ * request is released. The extracted TX SGL parts are released together with
+ * the RX SGL release.
*/
#include <crypto/internal/aead.h>
#include <crypto/scatterwalk.h>
#include <crypto/if_alg.h>
+#include <crypto/skcipher.h>
+#include <crypto/null.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
-#include <linux/sched/signal.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <net/sock.h>
-struct aead_sg_list {
- unsigned int cur;
- struct scatterlist sg[ALG_MAX_PAGES];
-};
-
-struct aead_async_rsgl {
- struct af_alg_sgl sgl;
- struct list_head list;
-};
-
-struct aead_async_req {
- struct scatterlist *tsgl;
- struct aead_async_rsgl first_rsgl;
- struct list_head list;
- struct kiocb *iocb;
- struct sock *sk;
- unsigned int tsgls;
- char iv[];
-};
-
struct aead_tfm {
struct crypto_aead *aead;
bool has_key;
+ struct crypto_skcipher *null_tfm;
};
-struct aead_ctx {
- struct aead_sg_list tsgl;
- struct aead_async_rsgl first_rsgl;
- struct list_head list;
-
- void *iv;
-
- struct af_alg_completion completion;
-
- unsigned long used;
-
- unsigned int len;
- bool more;
- bool merge;
- bool enc;
-
- size_t aead_assoclen;
- struct aead_request aead_req;
-};
-
-static inline int aead_sndbuf(struct sock *sk)
+static inline bool aead_sufficient_data(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
-
- return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
- ctx->used, 0);
-}
-
-static inline bool aead_writable(struct sock *sk)
-{
- return PAGE_SIZE <= aead_sndbuf(sk);
-}
-
-static inline bool aead_sufficient_data(struct aead_ctx *ctx)
-{
- unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct aead_tfm *aeadc = pask->private;
+ struct crypto_aead *tfm = aeadc->aead;
+ unsigned int as = crypto_aead_authsize(tfm);
/*
* The minimum amount of memory needed for an AEAD cipher is
@@ -95,484 +63,58 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx)
return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
}
-static void aead_reset_ctx(struct aead_ctx *ctx)
-{
- struct aead_sg_list *sgl = &ctx->tsgl;
-
- sg_init_table(sgl->sg, ALG_MAX_PAGES);
- sgl->cur = 0;
- ctx->used = 0;
- ctx->more = 0;
- ctx->merge = 0;
-}
-
-static void aead_put_sgl(struct sock *sk)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- struct aead_sg_list *sgl = &ctx->tsgl;
- struct scatterlist *sg = sgl->sg;
- unsigned int i;
-
- for (i = 0; i < sgl->cur; i++) {
- if (!sg_page(sg + i))
- continue;
-
- put_page(sg_page(sg + i));
- sg_assign_page(sg + i, NULL);
- }
- aead_reset_ctx(ctx);
-}
-
-static void aead_wmem_wakeup(struct sock *sk)
-{
- struct socket_wq *wq;
-
- if (!aead_writable(sk))
- return;
-
- rcu_read_lock();
- wq = rcu_dereference(sk->sk_wq);
- if (skwq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
- POLLRDNORM |
- POLLRDBAND);
- sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
- rcu_read_unlock();
-}
-
-static int aead_wait_for_data(struct sock *sk, unsigned flags)
-{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- long timeout;
- int err = -ERESTARTSYS;
-
- if (flags & MSG_DONTWAIT)
- return -EAGAIN;
-
- sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- add_wait_queue(sk_sleep(sk), &wait);
- for (;;) {
- if (signal_pending(current))
- break;
- timeout = MAX_SCHEDULE_TIMEOUT;
- if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) {
- err = 0;
- break;
- }
- }
- remove_wait_queue(sk_sleep(sk), &wait);
-
- sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
-
- return err;
-}
-
-static void aead_data_wakeup(struct sock *sk)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- struct socket_wq *wq;
-
- if (ctx->more)
- return;
- if (!ctx->used)
- return;
-
- rcu_read_lock();
- wq = rcu_dereference(sk->sk_wq);
- if (skwq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
- POLLRDNORM |
- POLLRDBAND);
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
- rcu_read_unlock();
-}
-
static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- unsigned ivsize =
- crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
- struct aead_sg_list *sgl = &ctx->tsgl;
- struct af_alg_control con = {};
- long copied = 0;
- bool enc = 0;
- bool init = 0;
- int err = -EINVAL;
-
- if (msg->msg_controllen) {
- err = af_alg_cmsg_send(msg, &con);
- if (err)
- return err;
-
- init = 1;
- switch (con.op) {
- case ALG_OP_ENCRYPT:
- enc = 1;
- break;
- case ALG_OP_DECRYPT:
- enc = 0;
- break;
- default:
- return -EINVAL;
- }
-
- if (con.iv && con.iv->ivlen != ivsize)
- return -EINVAL;
- }
-
- lock_sock(sk);
- if (!ctx->more && ctx->used)
- goto unlock;
-
- if (init) {
- ctx->enc = enc;
- if (con.iv)
- memcpy(ctx->iv, con.iv->iv, ivsize);
-
- ctx->aead_assoclen = con.aead_assoclen;
- }
-
- while (size) {
- size_t len = size;
- struct scatterlist *sg = NULL;
-
- /* use the existing memory in an allocated page */
- if (ctx->merge) {
- sg = sgl->sg + sgl->cur - 1;
- len = min_t(unsigned long, len,
- PAGE_SIZE - sg->offset - sg->length);
- err = memcpy_from_msg(page_address(sg_page(sg)) +
- sg->offset + sg->length,
- msg, len);
- if (err)
- goto unlock;
-
- sg->length += len;
- ctx->merge = (sg->offset + sg->length) &
- (PAGE_SIZE - 1);
-
- ctx->used += len;
- copied += len;
- size -= len;
- continue;
- }
-
- if (!aead_writable(sk)) {
- /* user space sent too much data */
- aead_put_sgl(sk);
- err = -EMSGSIZE;
- goto unlock;
- }
-
- /* allocate a new page */
- len = min_t(unsigned long, size, aead_sndbuf(sk));
- while (len) {
- size_t plen = 0;
-
- if (sgl->cur >= ALG_MAX_PAGES) {
- aead_put_sgl(sk);
- err = -E2BIG;
- goto unlock;
- }
-
- sg = sgl->sg + sgl->cur;
- plen = min_t(size_t, len, PAGE_SIZE);
-
- sg_assign_page(sg, alloc_page(GFP_KERNEL));
- err = -ENOMEM;
- if (!sg_page(sg))
- goto unlock;
-
- err = memcpy_from_msg(page_address(sg_page(sg)),
- msg, plen);
- if (err) {
- __free_page(sg_page(sg));
- sg_assign_page(sg, NULL);
- goto unlock;
- }
-
- sg->offset = 0;
- sg->length = plen;
- len -= plen;
- ctx->used += plen;
- copied += plen;
- sgl->cur++;
- size -= plen;
- ctx->merge = plen & (PAGE_SIZE - 1);
- }
- }
-
- err = 0;
-
- ctx->more = msg->msg_flags & MSG_MORE;
- if (!ctx->more && !aead_sufficient_data(ctx)) {
- aead_put_sgl(sk);
- err = -EMSGSIZE;
- }
-
-unlock:
- aead_data_wakeup(sk);
- release_sock(sk);
-
- return err ?: copied;
-}
-
-static ssize_t aead_sendpage(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- struct aead_sg_list *sgl = &ctx->tsgl;
- int err = -EINVAL;
-
- if (flags & MSG_SENDPAGE_NOTLAST)
- flags |= MSG_MORE;
-
- if (sgl->cur >= ALG_MAX_PAGES)
- return -E2BIG;
-
- lock_sock(sk);
- if (!ctx->more && ctx->used)
- goto unlock;
-
- if (!size)
- goto done;
-
- if (!aead_writable(sk)) {
- /* user space sent too much data */
- aead_put_sgl(sk);
- err = -EMSGSIZE;
- goto unlock;
- }
-
- ctx->merge = 0;
-
- get_page(page);
- sg_set_page(sgl->sg + sgl->cur, page, size, offset);
- sgl->cur++;
- ctx->used += size;
-
- err = 0;
-
-done:
- ctx->more = flags & MSG_MORE;
- if (!ctx->more && !aead_sufficient_data(ctx)) {
- aead_put_sgl(sk);
- err = -EMSGSIZE;
- }
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
+ struct aead_tfm *aeadc = pask->private;
+ struct crypto_aead *tfm = aeadc->aead;
+ unsigned int ivsize = crypto_aead_ivsize(tfm);
-unlock:
- aead_data_wakeup(sk);
- release_sock(sk);
-
- return err ?: size;
+ return af_alg_sendmsg(sock, msg, size, ivsize);
}
-#define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
- ((char *)req + sizeof(struct aead_request) + \
- crypto_aead_reqsize(tfm))
-
- #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
- crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
- sizeof(struct aead_request)
-
-static void aead_async_cb(struct crypto_async_request *_req, int err)
+static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm,
+ struct scatterlist *src,
+ struct scatterlist *dst, unsigned int len)
{
- struct aead_request *req = _req->data;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
- struct sock *sk = areq->sk;
- struct scatterlist *sg = areq->tsgl;
- struct aead_async_rsgl *rsgl;
- struct kiocb *iocb = areq->iocb;
- unsigned int i, reqlen = GET_REQ_SIZE(tfm);
-
- list_for_each_entry(rsgl, &areq->list, list) {
- af_alg_free_sg(&rsgl->sgl);
- if (rsgl != &areq->first_rsgl)
- sock_kfree_s(sk, rsgl, sizeof(*rsgl));
- }
+ SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
- for (i = 0; i < areq->tsgls; i++)
- put_page(sg_page(sg + i));
+ skcipher_request_set_tfm(skreq, null_tfm);
+ skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ NULL, NULL);
+ skcipher_request_set_crypt(skreq, src, dst, len, NULL);
- sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
- sock_kfree_s(sk, req, reqlen);
- __sock_put(sk);
- iocb->ki_complete(iocb, err, err);
+ return crypto_skcipher_encrypt(skreq);
}
-static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
- int flags)
+static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
- struct aead_async_req *areq;
- struct aead_request *req = NULL;
- struct aead_sg_list *sgl = &ctx->tsgl;
- struct aead_async_rsgl *last_rsgl = NULL, *rsgl;
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct aead_tfm *aeadc = pask->private;
+ struct crypto_aead *tfm = aeadc->aead;
+ struct crypto_skcipher *null_tfm = aeadc->null_tfm;
unsigned int as = crypto_aead_authsize(tfm);
- unsigned int i, reqlen = GET_REQ_SIZE(tfm);
- int err = -ENOMEM;
- unsigned long used;
- size_t outlen = 0;
- size_t usedpages = 0;
-
- lock_sock(sk);
- if (ctx->more) {
- err = aead_wait_for_data(sk, flags);
- if (err)
- goto unlock;
- }
-
- if (!aead_sufficient_data(ctx))
- goto unlock;
-
- used = ctx->used;
- if (ctx->enc)
- outlen = used + as;
- else
- outlen = used - as;
-
- req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
- if (unlikely(!req))
- goto unlock;
-
- areq = GET_ASYM_REQ(req, tfm);
- memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
- INIT_LIST_HEAD(&areq->list);
- areq->iocb = msg->msg_iocb;
- areq->sk = sk;
- memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
- aead_request_set_tfm(req, tfm);
- aead_request_set_ad(req, ctx->aead_assoclen);
- aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- aead_async_cb, req);
- used -= ctx->aead_assoclen;
-
- /* take over all tx sgls from ctx */
- areq->tsgl = sock_kmalloc(sk,
- sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1),
- GFP_KERNEL);
- if (unlikely(!areq->tsgl))
- goto free;
-
- sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1));
- for (i = 0; i < sgl->cur; i++)
- sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
- sgl->sg[i].length, sgl->sg[i].offset);
-
- areq->tsgls = sgl->cur;
-
- /* create rx sgls */
- while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
- size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
- (outlen - usedpages));
-
- if (list_empty(&areq->list)) {
- rsgl = &areq->first_rsgl;
-
- } else {
- rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
- if (unlikely(!rsgl)) {
- err = -ENOMEM;
- goto free;
- }
- }
- rsgl->sgl.npages = 0;
- list_add_tail(&rsgl->list, &areq->list);
-
- /* make one iovec available as scatterlist */
- err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
- if (err < 0)
- goto free;
-
- usedpages += err;
-
- /* chain the new scatterlist with previous one */
- if (last_rsgl)
- af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
-
- last_rsgl = rsgl;
-
- iov_iter_advance(&msg->msg_iter, err);
- }
-
- /* ensure output buffer is sufficiently large */
- if (usedpages < outlen) {
- err = -EINVAL;
- goto unlock;
- }
-
- aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
- areq->iv);
- err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
- if (err) {
- if (err == -EINPROGRESS) {
- sock_hold(sk);
- err = -EIOCBQUEUED;
- aead_reset_ctx(ctx);
- goto unlock;
- } else if (err == -EBADMSG) {
- aead_put_sgl(sk);
- }
- goto free;
- }
- aead_put_sgl(sk);
-
-free:
- list_for_each_entry(rsgl, &areq->list, list) {
- af_alg_free_sg(&rsgl->sgl);
- if (rsgl != &areq->first_rsgl)
- sock_kfree_s(sk, rsgl, sizeof(*rsgl));
- }
- if (areq->tsgl)
- sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
- if (req)
- sock_kfree_s(sk, req, reqlen);
-unlock:
- aead_wmem_wakeup(sk);
- release_sock(sk);
- return err ? err : outlen;
-}
-
-static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
-{
- struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
- struct aead_sg_list *sgl = &ctx->tsgl;
- struct aead_async_rsgl *last_rsgl = NULL;
- struct aead_async_rsgl *rsgl, *tmp;
- int err = -EINVAL;
- unsigned long used = 0;
- size_t outlen = 0;
- size_t usedpages = 0;
-
- lock_sock(sk);
+ struct af_alg_async_req *areq;
+ struct af_alg_tsgl *tsgl;
+ struct scatterlist *src;
+ int err = 0;
+ size_t used = 0; /* [in] TX bufs to be en/decrypted */
+ size_t outlen = 0; /* [out] RX bufs produced by kernel */
+ size_t usedpages = 0; /* [in] RX bufs to be used from user */
+ size_t processed = 0; /* [in] TX bufs to be consumed */
/*
- * Please see documentation of aead_request_set_crypt for the
- * description of the AEAD memory structure expected from the caller.
+ * Data length provided by caller via sendmsg/sendpage that has not
+ * yet been processed.
*/
-
- if (ctx->more) {
- err = aead_wait_for_data(sk, flags);
- if (err)
- goto unlock;
- }
-
- /* data length provided by caller via sendmsg/sendpage */
used = ctx->used;
/*
@@ -584,8 +126,8 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
* the error message in sendmsg/sendpage and still call recvmsg. This
* check here protects the kernel integrity.
*/
- if (!aead_sufficient_data(ctx))
- goto unlock;
+ if (!aead_sufficient_data(sk))
+ return -EINVAL;
/*
* Calculate the minimum output buffer size holding the result of the
@@ -606,104 +148,191 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
*/
used -= ctx->aead_assoclen;
- /* convert iovecs of output buffers into scatterlists */
- while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
- size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
- (outlen - usedpages));
-
- if (list_empty(&ctx->list)) {
- rsgl = &ctx->first_rsgl;
- } else {
- rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
- if (unlikely(!rsgl)) {
- err = -ENOMEM;
- goto unlock;
- }
- }
- rsgl->sgl.npages = 0;
- list_add_tail(&rsgl->list, &ctx->list);
+ /* Allocate cipher request for current operation. */
+ areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
+ crypto_aead_reqsize(tfm));
+ if (IS_ERR(areq))
+ return PTR_ERR(areq);
- /* make one iovec available as scatterlist */
- err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
- if (err < 0)
- goto unlock;
- usedpages += err;
- /* chain the new scatterlist with previous one */
- if (last_rsgl)
- af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
+ /* convert iovecs of output buffers into RX SGL */
+ err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages);
+ if (err)
+ goto free;
- last_rsgl = rsgl;
+ /*
+ * Ensure output buffer is sufficiently large. If the caller provides
+ * less buffer space, only use the relative required input size. This
+ * allows AIO operation where the caller sent all data to be processed
+ * and the AIO operation performs the operation on the different chunks
+ * of the input data.
+ */
+ if (usedpages < outlen) {
+ size_t less = outlen - usedpages;
- iov_iter_advance(&msg->msg_iter, err);
+ if (used < less) {
+ err = -EINVAL;
+ goto free;
+ }
+ used -= less;
+ outlen -= less;
}
- /* ensure output buffer is sufficiently large */
- if (usedpages < outlen) {
- err = -EINVAL;
- goto unlock;
- }
+ processed = used + ctx->aead_assoclen;
+ tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list);
- sg_mark_end(sgl->sg + sgl->cur - 1);
- aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
- used, ctx->iv);
- aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
+ /*
+ * Copy of AAD from source to destination
+ *
+ * The AAD is copied to the destination buffer without change. Even
+ * when user space uses an in-place cipher operation, the kernel
+ * will copy the data as it does not see whether such in-place operation
+ * is initiated.
+ *
+ * To ensure efficiency, the following implementation ensure that the
+ * ciphers are invoked to perform a crypto operation in-place. This
+ * is achieved by memory management specified as follows.
+ */
- err = af_alg_wait_for_completion(ctx->enc ?
- crypto_aead_encrypt(&ctx->aead_req) :
- crypto_aead_decrypt(&ctx->aead_req),
- &ctx->completion);
+ /* Use the RX SGL as source (and destination) for crypto op. */
+ src = areq->first_rsgl.sgl.sg;
+
+ if (ctx->enc) {
+ /*
+ * Encryption operation - The in-place cipher operation is
+ * achieved by the following operation:
+ *
+ * TX SGL: AAD || PT
+ * | |
+ * | copy |
+ * v v
+ * RX SGL: AAD || PT || Tag
+ */
+ err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+ areq->first_rsgl.sgl.sg, processed);
+ if (err)
+ goto free;
+ af_alg_pull_tsgl(sk, processed, NULL, 0);
+ } else {
+ /*
+ * Decryption operation - To achieve an in-place cipher
+ * operation, the following SGL structure is used:
+ *
+ * TX SGL: AAD || CT || Tag
+ * | | ^
+ * | copy | | Create SGL link.
+ * v v |
+ * RX SGL: AAD || CT ----+
+ */
+
+ /* Copy AAD || CT to RX SGL buffer for in-place operation. */
+ err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+ areq->first_rsgl.sgl.sg, outlen);
+ if (err)
+ goto free;
- if (err) {
- /* EBADMSG implies a valid cipher operation took place */
- if (err == -EBADMSG)
- aead_put_sgl(sk);
+ /* Create TX SGL for tag and chain it to RX SGL. */
+ areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
+ processed - as);
+ if (!areq->tsgl_entries)
+ areq->tsgl_entries = 1;
+ areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) *
+ areq->tsgl_entries,
+ GFP_KERNEL);
+ if (!areq->tsgl) {
+ err = -ENOMEM;
+ goto free;
+ }
+ sg_init_table(areq->tsgl, areq->tsgl_entries);
+
+ /* Release TX SGL, except for tag data and reassign tag data. */
+ af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
+
+ /* chain the areq TX SGL holding the tag with RX SGL */
+ if (usedpages) {
+ /* RX SGL present */
+ struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
+
+ sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
+ sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
+ areq->tsgl);
+ } else
+ /* no RX SGL present (e.g. authentication only) */
+ src = areq->tsgl;
+ }
- goto unlock;
+ /* Initialize the crypto operation */
+ aead_request_set_crypt(&areq->cra_u.aead_req, src,
+ areq->first_rsgl.sgl.sg, used, ctx->iv);
+ aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
+ aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
+
+ if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
+ /* AIO operation */
+ areq->iocb = msg->msg_iocb;
+ aead_request_set_callback(&areq->cra_u.aead_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ af_alg_async_cb, areq);
+ err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
+ crypto_aead_decrypt(&areq->cra_u.aead_req);
+ } else {
+ /* Synchronous operation */
+ aead_request_set_callback(&areq->cra_u.aead_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ af_alg_complete, &ctx->completion);
+ err = af_alg_wait_for_completion(ctx->enc ?
+ crypto_aead_encrypt(&areq->cra_u.aead_req) :
+ crypto_aead_decrypt(&areq->cra_u.aead_req),
+ &ctx->completion);
}
- aead_put_sgl(sk);
- err = 0;
+ /* AIO operation in progress */
+ if (err == -EINPROGRESS) {
+ sock_hold(sk);
-unlock:
- list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
- af_alg_free_sg(&rsgl->sgl);
- list_del(&rsgl->list);
- if (rsgl != &ctx->first_rsgl)
- sock_kfree_s(sk, rsgl, sizeof(*rsgl));
+ /* Remember output size that will be generated. */
+ areq->outlen = outlen;
+
+ return -EIOCBQUEUED;
}
- INIT_LIST_HEAD(&ctx->list);
- aead_wmem_wakeup(sk);
- release_sock(sk);
- return err ? err : outlen;
-}
+free:
+ af_alg_free_areq_sgls(areq);
+ sock_kfree_s(sk, areq, areq->areqlen);
-static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
- int flags)
-{
- return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
- aead_recvmsg_async(sock, msg, flags) :
- aead_recvmsg_sync(sock, msg, flags);
+ return err ? err : outlen;
}
-static unsigned int aead_poll(struct file *file, struct socket *sock,
- poll_table *wait)
+static int aead_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
{
struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- unsigned int mask;
-
- sock_poll_wait(file, sk_sleep(sk), wait);
- mask = 0;
+ int ret = 0;
- if (!ctx->more)
- mask |= POLLIN | POLLRDNORM;
+ lock_sock(sk);
+ while (msg_data_left(msg)) {
+ int err = _aead_recvmsg(sock, msg, ignored, flags);
+
+ /*
+ * This error covers -EIOCBQUEUED which implies that we can
+ * only handle one AIO request. If the caller wants to have
+ * multiple AIO requests in parallel, he must make multiple
+ * separate AIO calls.
+ *
+ * Also return the error if no data has been processed so far.
+ */
+ if (err <= 0) {
+ if (err == -EIOCBQUEUED || err == -EBADMSG || !ret)
+ ret = err;
+ goto out;
+ }
- if (aead_writable(sk))
- mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ ret += err;
+ }
- return mask;
+out:
+ af_alg_wmem_wakeup(sk);
+ release_sock(sk);
+ return ret;
}
static struct proto_ops algif_aead_ops = {
@@ -723,9 +352,9 @@ static struct proto_ops algif_aead_ops = {
.release = af_alg_release,
.sendmsg = aead_sendmsg,
- .sendpage = aead_sendpage,
+ .sendpage = af_alg_sendpage,
.recvmsg = aead_recvmsg,
- .poll = aead_poll,
+ .poll = af_alg_poll,
};
static int aead_check_key(struct socket *sock)
@@ -787,7 +416,7 @@ static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
if (err)
return err;
- return aead_sendpage(sock, page, offset, size, flags);
+ return af_alg_sendpage(sock, page, offset, size, flags);
}
static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
@@ -821,13 +450,14 @@ static struct proto_ops algif_aead_ops_nokey = {
.sendmsg = aead_sendmsg_nokey,
.sendpage = aead_sendpage_nokey,
.recvmsg = aead_recvmsg_nokey,
- .poll = aead_poll,
+ .poll = af_alg_poll,
};
static void *aead_bind(const char *name, u32 type, u32 mask)
{
struct aead_tfm *tfm;
struct crypto_aead *aead;
+ struct crypto_skcipher *null_tfm;
tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
if (!tfm)
@@ -839,7 +469,15 @@ static void *aead_bind(const char *name, u32 type, u32 mask)
return ERR_CAST(aead);
}
+ null_tfm = crypto_get_default_null_skcipher2();
+ if (IS_ERR(null_tfm)) {
+ crypto_free_aead(aead);
+ kfree(tfm);
+ return ERR_CAST(null_tfm);
+ }
+
tfm->aead = aead;
+ tfm->null_tfm = null_tfm;
return tfm;
}
@@ -873,12 +511,15 @@ static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
static void aead_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
- struct aead_ctx *ctx = ask->private;
- unsigned int ivlen = crypto_aead_ivsize(
- crypto_aead_reqtfm(&ctx->aead_req));
-
- WARN_ON(refcount_read(&sk->sk_refcnt) != 0);
- aead_put_sgl(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
+ struct aead_tfm *aeadc = pask->private;
+ struct crypto_aead *tfm = aeadc->aead;
+ unsigned int ivlen = crypto_aead_ivsize(tfm);
+
+ af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
+ crypto_put_default_null_skcipher2();
sock_kzfree_s(sk, ctx->iv, ivlen);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
@@ -886,11 +527,11 @@ static void aead_sock_destruct(struct sock *sk)
static int aead_accept_parent_nokey(void *private, struct sock *sk)
{
- struct aead_ctx *ctx;
+ struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
struct aead_tfm *tfm = private;
struct crypto_aead *aead = tfm->aead;
- unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead);
+ unsigned int len = sizeof(*ctx);
unsigned int ivlen = crypto_aead_ivsize(aead);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
@@ -905,23 +546,18 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
}
memset(ctx->iv, 0, ivlen);
+ INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
ctx->used = 0;
+ ctx->rcvused = 0;
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
- ctx->tsgl.cur = 0;
ctx->aead_assoclen = 0;
af_alg_init_completion(&ctx->completion);
- sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
- INIT_LIST_HEAD(&ctx->list);
ask->private = ctx;
- aead_request_set_tfm(&ctx->aead_req, aead);
- aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete, &ctx->completion);
-
sk->sk_destruct = aead_sock_destruct;
return 0;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 43839b00fe6c..8ae4170aaeb4 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -10,6 +10,21 @@
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
+ * The following concept of the memory management is used:
+ *
+ * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
+ * filled by user space with the data submitted via sendpage/sendmsg. Filling
+ * up the TX SGL does not cause a crypto operation -- the data will only be
+ * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
+ * provide a buffer which is tracked with the RX SGL.
+ *
+ * During the processing of the recvmsg operation, the cipher request is
+ * allocated and prepared. As part of the recvmsg operation, the processed
+ * TX buffers are extracted from the TX SGL into a separate SGL.
+ *
+ * After the completion of the crypto operation, the RX SGL and the cipher
+ * request is released. The extracted TX SGL parts are released together with
+ * the RX SGL release.
*/
#include <crypto/scatterwalk.h>
@@ -18,279 +33,16 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
-#include <linux/sched/signal.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <net/sock.h>
-struct skcipher_sg_list {
- struct list_head list;
-
- int cur;
-
- struct scatterlist sg[0];
-};
-
struct skcipher_tfm {
struct crypto_skcipher *skcipher;
bool has_key;
};
-struct skcipher_ctx {
- struct list_head tsgl;
- struct af_alg_sgl rsgl;
-
- void *iv;
-
- struct af_alg_completion completion;
-
- atomic_t inflight;
- size_t used;
-
- unsigned int len;
- bool more;
- bool merge;
- bool enc;
-
- struct skcipher_request req;
-};
-
-struct skcipher_async_rsgl {
- struct af_alg_sgl sgl;
- struct list_head list;
-};
-
-struct skcipher_async_req {
- struct kiocb *iocb;
- struct skcipher_async_rsgl first_sgl;
- struct list_head list;
- struct scatterlist *tsg;
- atomic_t *inflight;
- struct skcipher_request req;
-};
-
-#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
- sizeof(struct scatterlist) - 1)
-
-static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
-{
- struct skcipher_async_rsgl *rsgl, *tmp;
- struct scatterlist *sgl;
- struct scatterlist *sg;
- int i, n;
-
- list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
- af_alg_free_sg(&rsgl->sgl);
- if (rsgl != &sreq->first_sgl)
- kfree(rsgl);
- }
- sgl = sreq->tsg;
- n = sg_nents(sgl);
- for_each_sg(sgl, sg, n, i)
- put_page(sg_page(sg));
-
- kfree(sreq->tsg);
-}
-
-static void skcipher_async_cb(struct crypto_async_request *req, int err)
-{
- struct skcipher_async_req *sreq = req->data;
- struct kiocb *iocb = sreq->iocb;
-
- atomic_dec(sreq->inflight);
- skcipher_free_async_sgls(sreq);
- kzfree(sreq);
- iocb->ki_complete(iocb, err, err);
-}
-
-static inline int skcipher_sndbuf(struct sock *sk)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
-
- return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
- ctx->used, 0);
-}
-
-static inline bool skcipher_writable(struct sock *sk)
-{
- return PAGE_SIZE <= skcipher_sndbuf(sk);
-}
-
-static int skcipher_alloc_sgl(struct sock *sk)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- struct skcipher_sg_list *sgl;
- struct scatterlist *sg = NULL;
-
- sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
- if (!list_empty(&ctx->tsgl))
- sg = sgl->sg;
-
- if (!sg || sgl->cur >= MAX_SGL_ENTS) {
- sgl = sock_kmalloc(sk, sizeof(*sgl) +
- sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
- GFP_KERNEL);
- if (!sgl)
- return -ENOMEM;
-
- sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
- sgl->cur = 0;
-
- if (sg)
- sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
-
- list_add_tail(&sgl->list, &ctx->tsgl);
- }
-
- return 0;
-}
-
-static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- struct skcipher_sg_list *sgl;
- struct scatterlist *sg;
- int i;
-
- while (!list_empty(&ctx->tsgl)) {
- sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
- list);
- sg = sgl->sg;
-
- for (i = 0; i < sgl->cur; i++) {
- size_t plen = min_t(size_t, used, sg[i].length);
-
- if (!sg_page(sg + i))
- continue;
-
- sg[i].length -= plen;
- sg[i].offset += plen;
-
- used -= plen;
- ctx->used -= plen;
-
- if (sg[i].length)
- return;
- if (put)
- put_page(sg_page(sg + i));
- sg_assign_page(sg + i, NULL);
- }
-
- list_del(&sgl->list);
- sock_kfree_s(sk, sgl,
- sizeof(*sgl) + sizeof(sgl->sg[0]) *
- (MAX_SGL_ENTS + 1));
- }
-
- if (!ctx->used)
- ctx->merge = 0;
-}
-
-static void skcipher_free_sgl(struct sock *sk)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
-
- skcipher_pull_sgl(sk, ctx->used, 1);
-}
-
-static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
-{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int err = -ERESTARTSYS;
- long timeout;
-
- if (flags & MSG_DONTWAIT)
- return -EAGAIN;
-
- sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
-
- add_wait_queue(sk_sleep(sk), &wait);
- for (;;) {
- if (signal_pending(current))
- break;
- timeout = MAX_SCHEDULE_TIMEOUT;
- if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) {
- err = 0;
- break;
- }
- }
- remove_wait_queue(sk_sleep(sk), &wait);
-
- return err;
-}
-
-static void skcipher_wmem_wakeup(struct sock *sk)
-{
- struct socket_wq *wq;
-
- if (!skcipher_writable(sk))
- return;
-
- rcu_read_lock();
- wq = rcu_dereference(sk->sk_wq);
- if (skwq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
- POLLRDNORM |
- POLLRDBAND);
- sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
- rcu_read_unlock();
-}
-
-static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
-{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- long timeout;
- int err = -ERESTARTSYS;
-
- if (flags & MSG_DONTWAIT) {
- return -EAGAIN;
- }
-
- sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
-
- add_wait_queue(sk_sleep(sk), &wait);
- for (;;) {
- if (signal_pending(current))
- break;
- timeout = MAX_SCHEDULE_TIMEOUT;
- if (sk_wait_event(sk, &timeout, ctx->used, &wait)) {
- err = 0;
- break;
- }
- }
- remove_wait_queue(sk_sleep(sk), &wait);
-
- sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
-
- return err;
-}
-
-static void skcipher_data_wakeup(struct sock *sk)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- struct socket_wq *wq;
-
- if (!ctx->used)
- return;
-
- rcu_read_lock();
- wq = rcu_dereference(sk->sk_wq);
- if (skwq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
- POLLRDNORM |
- POLLRDBAND);
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
- rcu_read_unlock();
-}
-
static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
size_t size)
{
@@ -298,445 +50,143 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct skcipher_ctx *ctx = ask->private;
struct skcipher_tfm *skc = pask->private;
struct crypto_skcipher *tfm = skc->skcipher;
unsigned ivsize = crypto_skcipher_ivsize(tfm);
- struct skcipher_sg_list *sgl;
- struct af_alg_control con = {};
- long copied = 0;
- bool enc = 0;
- bool init = 0;
- int err;
- int i;
-
- if (msg->msg_controllen) {
- err = af_alg_cmsg_send(msg, &con);
- if (err)
- return err;
-
- init = 1;
- switch (con.op) {
- case ALG_OP_ENCRYPT:
- enc = 1;
- break;
- case ALG_OP_DECRYPT:
- enc = 0;
- break;
- default:
- return -EINVAL;
- }
-
- if (con.iv && con.iv->ivlen != ivsize)
- return -EINVAL;
- }
-
- err = -EINVAL;
-
- lock_sock(sk);
- if (!ctx->more && ctx->used)
- goto unlock;
-
- if (init) {
- ctx->enc = enc;
- if (con.iv)
- memcpy(ctx->iv, con.iv->iv, ivsize);
- }
-
- while (size) {
- struct scatterlist *sg;
- unsigned long len = size;
- size_t plen;
-
- if (ctx->merge) {
- sgl = list_entry(ctx->tsgl.prev,
- struct skcipher_sg_list, list);
- sg = sgl->sg + sgl->cur - 1;
- len = min_t(unsigned long, len,
- PAGE_SIZE - sg->offset - sg->length);
-
- err = memcpy_from_msg(page_address(sg_page(sg)) +
- sg->offset + sg->length,
- msg, len);
- if (err)
- goto unlock;
-
- sg->length += len;
- ctx->merge = (sg->offset + sg->length) &
- (PAGE_SIZE - 1);
-
- ctx->used += len;
- copied += len;
- size -= len;
- continue;
- }
- if (!skcipher_writable(sk)) {
- err = skcipher_wait_for_wmem(sk, msg->msg_flags);
- if (err)
- goto unlock;
- }
-
- len = min_t(unsigned long, len, skcipher_sndbuf(sk));
-
- err = skcipher_alloc_sgl(sk);
- if (err)
- goto unlock;
-
- sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
- sg = sgl->sg;
- if (sgl->cur)
- sg_unmark_end(sg + sgl->cur - 1);
- do {
- i = sgl->cur;
- plen = min_t(size_t, len, PAGE_SIZE);
-
- sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
- err = -ENOMEM;
- if (!sg_page(sg + i))
- goto unlock;
-
- err = memcpy_from_msg(page_address(sg_page(sg + i)),
- msg, plen);
- if (err) {
- __free_page(sg_page(sg + i));
- sg_assign_page(sg + i, NULL);
- goto unlock;
- }
-
- sg[i].length = plen;
- len -= plen;
- ctx->used += plen;
- copied += plen;
- size -= plen;
- sgl->cur++;
- } while (len && sgl->cur < MAX_SGL_ENTS);
-
- if (!size)
- sg_mark_end(sg + sgl->cur - 1);
-
- ctx->merge = plen & (PAGE_SIZE - 1);
- }
-
- err = 0;
-
- ctx->more = msg->msg_flags & MSG_MORE;
-
-unlock:
- skcipher_data_wakeup(sk);
- release_sock(sk);
-
- return copied ?: err;
+ return af_alg_sendmsg(sock, msg, size, ivsize);
}
-static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
- int offset, size_t size, int flags)
-{
- struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- struct skcipher_sg_list *sgl;
- int err = -EINVAL;
-
- if (flags & MSG_SENDPAGE_NOTLAST)
- flags |= MSG_MORE;
-
- lock_sock(sk);
- if (!ctx->more && ctx->used)
- goto unlock;
-
- if (!size)
- goto done;
-
- if (!skcipher_writable(sk)) {
- err = skcipher_wait_for_wmem(sk, flags);
- if (err)
- goto unlock;
- }
-
- err = skcipher_alloc_sgl(sk);
- if (err)
- goto unlock;
-
- ctx->merge = 0;
- sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
-
- if (sgl->cur)
- sg_unmark_end(sgl->sg + sgl->cur - 1);
-
- sg_mark_end(sgl->sg + sgl->cur);
- get_page(page);
- sg_set_page(sgl->sg + sgl->cur, page, size, offset);
- sgl->cur++;
- ctx->used += size;
-
-done:
- ctx->more = flags & MSG_MORE;
-
-unlock:
- skcipher_data_wakeup(sk);
- release_sock(sk);
-
- return err ?: size;
-}
-
-static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
-{
- struct skcipher_sg_list *sgl;
- struct scatterlist *sg;
- int nents = 0;
-
- list_for_each_entry(sgl, &ctx->tsgl, list) {
- sg = sgl->sg;
-
- while (!sg->length)
- sg++;
-
- nents += sg_nents(sg);
- }
- return nents;
-}
-
-static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
- int flags)
+static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct skcipher_ctx *ctx = ask->private;
+ struct af_alg_ctx *ctx = ask->private;
struct skcipher_tfm *skc = pask->private;
struct crypto_skcipher *tfm = skc->skcipher;
- struct skcipher_sg_list *sgl;
- struct scatterlist *sg;
- struct skcipher_async_req *sreq;
- struct skcipher_request *req;
- struct skcipher_async_rsgl *last_rsgl = NULL;
- unsigned int txbufs = 0, len = 0, tx_nents;
- unsigned int reqsize = crypto_skcipher_reqsize(tfm);
- unsigned int ivsize = crypto_skcipher_ivsize(tfm);
- int err = -ENOMEM;
- bool mark = false;
- char *iv;
-
- sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
- if (unlikely(!sreq))
- goto out;
-
- req = &sreq->req;
- iv = (char *)(req + 1) + reqsize;
- sreq->iocb = msg->msg_iocb;
- INIT_LIST_HEAD(&sreq->list);
- sreq->inflight = &ctx->inflight;
+ unsigned int bs = crypto_skcipher_blocksize(tfm);
+ struct af_alg_async_req *areq;
+ int err = 0;
+ size_t len = 0;
- lock_sock(sk);
- tx_nents = skcipher_all_sg_nents(ctx);
- sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
- if (unlikely(!sreq->tsg))
- goto unlock;
- sg_init_table(sreq->tsg, tx_nents);
- memcpy(iv, ctx->iv, ivsize);
- skcipher_request_set_tfm(req, tfm);
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
- skcipher_async_cb, sreq);
-
- while (iov_iter_count(&msg->msg_iter)) {
- struct skcipher_async_rsgl *rsgl;
- int used;
-
- if (!ctx->used) {
- err = skcipher_wait_for_data(sk, flags);
- if (err)
- goto free;
- }
- sgl = list_first_entry(&ctx->tsgl,
- struct skcipher_sg_list, list);
- sg = sgl->sg;
-
- while (!sg->length)
- sg++;
-
- used = min_t(unsigned long, ctx->used,
- iov_iter_count(&msg->msg_iter));
- used = min_t(unsigned long, used, sg->length);
-
- if (txbufs == tx_nents) {
- struct scatterlist *tmp;
- int x;
- /* Ran out of tx slots in async request
- * need to expand */
- tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
- GFP_KERNEL);
- if (!tmp) {
- err = -ENOMEM;
- goto free;
- }
-
- sg_init_table(tmp, tx_nents * 2);
- for (x = 0; x < tx_nents; x++)
- sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
- sreq->tsg[x].length,
- sreq->tsg[x].offset);
- kfree(sreq->tsg);
- sreq->tsg = tmp;
- tx_nents *= 2;
- mark = true;
- }
- /* Need to take over the tx sgl from ctx
- * to the asynch req - these sgls will be freed later */
- sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
- sg->offset);
-
- if (list_empty(&sreq->list)) {
- rsgl = &sreq->first_sgl;
- list_add_tail(&rsgl->list, &sreq->list);
- } else {
- rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
- if (!rsgl) {
- err = -ENOMEM;
- goto free;
- }
- list_add_tail(&rsgl->list, &sreq->list);
- }
+ /* Allocate cipher request for current operation. */
+ areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
+ crypto_skcipher_reqsize(tfm));
+ if (IS_ERR(areq))
+ return PTR_ERR(areq);
- used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
- err = used;
- if (used < 0)
- goto free;
- if (last_rsgl)
- af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
-
- last_rsgl = rsgl;
- len += used;
- skcipher_pull_sgl(sk, used, 0);
- iov_iter_advance(&msg->msg_iter, used);
+ /* convert iovecs of output buffers into RX SGL */
+ err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len);
+ if (err)
+ goto free;
+
+ /* Process only as much RX buffers for which we have TX data */
+ if (len > ctx->used)
+ len = ctx->used;
+
+ /*
+ * If more buffers are to be expected to be processed, process only
+ * full block size buffers.
+ */
+ if (ctx->more || len < ctx->used)
+ len -= len % bs;
+
+ /*
+ * Create a per request TX SGL for this request which tracks the
+ * SG entries from the global TX SGL.
+ */
+ areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
+ if (!areq->tsgl_entries)
+ areq->tsgl_entries = 1;
+ areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
+ GFP_KERNEL);
+ if (!areq->tsgl) {
+ err = -ENOMEM;
+ goto free;
+ }
+ sg_init_table(areq->tsgl, areq->tsgl_entries);
+ af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
+
+ /* Initialize the crypto operation */
+ skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
+ skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl,
+ areq->first_rsgl.sgl.sg, len, ctx->iv);
+
+ if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
+ /* AIO operation */
+ areq->iocb = msg->msg_iocb;
+ skcipher_request_set_callback(&areq->cra_u.skcipher_req,
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ af_alg_async_cb, areq);
+ err = ctx->enc ?
+ crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
+ crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
+ } else {
+ /* Synchronous operation */
+ skcipher_request_set_callback(&areq->cra_u.skcipher_req,
+ CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ af_alg_complete,
+ &ctx->completion);
+ err = af_alg_wait_for_completion(ctx->enc ?
+ crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
+ crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
+ &ctx->completion);
}
- if (mark)
- sg_mark_end(sreq->tsg + txbufs - 1);
-
- skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
- len, iv);
- err = ctx->enc ? crypto_skcipher_encrypt(req) :
- crypto_skcipher_decrypt(req);
+ /* AIO operation in progress */
if (err == -EINPROGRESS) {
- atomic_inc(&ctx->inflight);
- err = -EIOCBQUEUED;
- sreq = NULL;
- goto unlock;
+ sock_hold(sk);
+
+ /* Remember output size that will be generated. */
+ areq->outlen = len;
+
+ return -EIOCBQUEUED;
}
+
free:
- skcipher_free_async_sgls(sreq);
-unlock:
- skcipher_wmem_wakeup(sk);
- release_sock(sk);
- kzfree(sreq);
-out:
- return err;
+ af_alg_free_areq_sgls(areq);
+ sock_kfree_s(sk, areq, areq->areqlen);
+
+ return err ? err : len;
}
-static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
- int flags)
+static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
{
struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct sock *psk = ask->parent;
- struct alg_sock *pask = alg_sk(psk);
- struct skcipher_ctx *ctx = ask->private;
- struct skcipher_tfm *skc = pask->private;
- struct crypto_skcipher *tfm = skc->skcipher;
- unsigned bs = crypto_skcipher_blocksize(tfm);
- struct skcipher_sg_list *sgl;
- struct scatterlist *sg;
- int err = -EAGAIN;
- int used;
- long copied = 0;
+ int ret = 0;
lock_sock(sk);
while (msg_data_left(msg)) {
- if (!ctx->used) {
- err = skcipher_wait_for_data(sk, flags);
- if (err)
- goto unlock;
+ int err = _skcipher_recvmsg(sock, msg, ignored, flags);
+
+ /*
+ * This error covers -EIOCBQUEUED which implies that we can
+ * only handle one AIO request. If the caller wants to have
+ * multiple AIO requests in parallel, he must make multiple
+ * separate AIO calls.
+ *
+ * Also return the error if no data has been processed so far.
+ */
+ if (err <= 0) {
+ if (err == -EIOCBQUEUED || !ret)
+ ret = err;
+ goto out;
}
- used = min_t(unsigned long, ctx->used, msg_data_left(msg));
-
- used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
- err = used;
- if (err < 0)
- goto unlock;
-
- if (ctx->more || used < ctx->used)
- used -= used % bs;
-
- err = -EINVAL;
- if (!used)
- goto free;
-
- sgl = list_first_entry(&ctx->tsgl,
- struct skcipher_sg_list, list);
- sg = sgl->sg;
-
- while (!sg->length)
- sg++;
-
- skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
- ctx->iv);
-
- err = af_alg_wait_for_completion(
- ctx->enc ?
- crypto_skcipher_encrypt(&ctx->req) :
- crypto_skcipher_decrypt(&ctx->req),
- &ctx->completion);
-
-free:
- af_alg_free_sg(&ctx->rsgl);
-
- if (err)
- goto unlock;
-
- copied += used;
- skcipher_pull_sgl(sk, used, 1);
- iov_iter_advance(&msg->msg_iter, used);
+ ret += err;
}
- err = 0;
-
-unlock:
- skcipher_wmem_wakeup(sk);
+out:
+ af_alg_wmem_wakeup(sk);
release_sock(sk);
-
- return copied ?: err;
-}
-
-static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
- size_t ignored, int flags)
-{
- return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
- skcipher_recvmsg_async(sock, msg, flags) :
- skcipher_recvmsg_sync(sock, msg, flags);
+ return ret;
}
-static unsigned int skcipher_poll(struct file *file, struct socket *sock,
- poll_table *wait)
-{
- struct sock *sk = sock->sk;
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- unsigned int mask;
-
- sock_poll_wait(file, sk_sleep(sk), wait);
- mask = 0;
-
- if (ctx->used)
- mask |= POLLIN | POLLRDNORM;
-
- if (skcipher_writable(sk))
- mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
-
- return mask;
-}
static struct proto_ops algif_skcipher_ops = {
.family = PF_ALG,
@@ -755,9 +205,9 @@ static struct proto_ops algif_skcipher_ops = {
.release = af_alg_release,
.sendmsg = skcipher_sendmsg,
- .sendpage = skcipher_sendpage,
+ .sendpage = af_alg_sendpage,
.recvmsg = skcipher_recvmsg,
- .poll = skcipher_poll,
+ .poll = af_alg_poll,
};
static int skcipher_check_key(struct socket *sock)
@@ -819,7 +269,7 @@ static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
if (err)
return err;
- return skcipher_sendpage(sock, page, offset, size, flags);
+ return af_alg_sendpage(sock, page, offset, size, flags);
}
static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
@@ -853,7 +303,7 @@ static struct proto_ops algif_skcipher_ops_nokey = {
.sendmsg = skcipher_sendmsg_nokey,
.sendpage = skcipher_sendpage_nokey,
.recvmsg = skcipher_recvmsg_nokey,
- .poll = skcipher_poll,
+ .poll = af_alg_poll,
};
static void *skcipher_bind(const char *name, u32 type, u32 mask)
@@ -895,26 +345,16 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
return err;
}
-static void skcipher_wait(struct sock *sk)
-{
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- int ctr = 0;
-
- while (atomic_read(&ctx->inflight) && ctr++ < 100)
- msleep(100);
-}
-
static void skcipher_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
-
- if (atomic_read(&ctx->inflight))
- skcipher_wait(sk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
+ struct skcipher_tfm *skc = pask->private;
+ struct crypto_skcipher *tfm = skc->skcipher;
- skcipher_free_sgl(sk);
+ af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
@@ -922,11 +362,11 @@ static void skcipher_sock_destruct(struct sock *sk)
static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
{
- struct skcipher_ctx *ctx;
+ struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_tfm *tfm = private;
struct crypto_skcipher *skcipher = tfm->skcipher;
- unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher);
+ unsigned int len = sizeof(*ctx);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@@ -941,22 +381,17 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
- INIT_LIST_HEAD(&ctx->tsgl);
+ INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
ctx->used = 0;
+ ctx->rcvused = 0;
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
- atomic_set(&ctx->inflight, 0);
af_alg_init_completion(&ctx->completion);
ask->private = ctx;
- skcipher_request_set_tfm(&ctx->req, skcipher);
- skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
- CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete, &ctx->completion);
-
sk->sk_destruct = skcipher_sock_destruct;
return 0;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 6f8f6b86bfe2..0cf5fefdb859 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -248,6 +248,9 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
u32 tmp[2];
+ if (!authsize)
+ goto decrypt;
+
/* Move high-order bits of sequence number back. */
scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
@@ -256,6 +259,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
if (crypto_memneq(ihash, ohash, authsize))
return -EBADMSG;
+decrypt:
+
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c
index 8b3c04d625c3..4a45fa4890c0 100644
--- a/crypto/chacha20_generic.c
+++ b/crypto/chacha20_generic.c
@@ -91,9 +91,14 @@ int crypto_chacha20_crypt(struct skcipher_request *req)
crypto_chacha20_init(state, ctx, walk.iv);
while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
+
chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes);
- err = skcipher_walk_done(&walk, 0);
+ nbytes);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 477d9226ccaa..854d924f9d8e 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -65,8 +65,7 @@ static void crypto_ctr_crypt_final(struct blkcipher_walk *walk,
unsigned int nbytes = walk->nbytes;
crypto_cipher_encrypt_one(tfm, keystream, ctrblk);
- crypto_xor(keystream, src, nbytes);
- memcpy(dst, keystream, nbytes);
+ crypto_xor_cpy(dst, keystream, src, nbytes);
crypto_inc(ctrblk, bsize);
}
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index 61c7708905d0..4271fc77d261 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -20,8 +20,6 @@ struct ecdh_ctx {
unsigned int curve_id;
unsigned int ndigits;
u64 private_key[ECC_MAX_DIGITS];
- u64 public_key[2 * ECC_MAX_DIGITS];
- u64 shared_secret[ECC_MAX_DIGITS];
};
static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm)
@@ -70,41 +68,58 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
static int ecdh_compute_value(struct kpp_request *req)
{
- int ret = 0;
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
- size_t copied, nbytes;
+ u64 *public_key;
+ u64 *shared_secret = NULL;
void *buf;
+ size_t copied, nbytes, public_key_sz;
+ int ret = -ENOMEM;
nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
+ /* Public part is a point thus it has both coordinates */
+ public_key_sz = 2 * nbytes;
+
+ public_key = kmalloc(public_key_sz, GFP_KERNEL);
+ if (!public_key)
+ return -ENOMEM;
if (req->src) {
- copied = sg_copy_to_buffer(req->src, 1, ctx->public_key,
- 2 * nbytes);
- if (copied != 2 * nbytes)
- return -EINVAL;
+ shared_secret = kmalloc(nbytes, GFP_KERNEL);
+ if (!shared_secret)
+ goto free_pubkey;
+
+ copied = sg_copy_to_buffer(req->src, 1, public_key,
+ public_key_sz);
+ if (copied != public_key_sz) {
+ ret = -EINVAL;
+ goto free_all;
+ }
ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits,
- ctx->private_key,
- ctx->public_key,
- ctx->shared_secret);
+ ctx->private_key, public_key,
+ shared_secret);
- buf = ctx->shared_secret;
+ buf = shared_secret;
} else {
ret = ecc_make_pub_key(ctx->curve_id, ctx->ndigits,
- ctx->private_key, ctx->public_key);
- buf = ctx->public_key;
- /* Public part is a point thus it has both coordinates */
- nbytes *= 2;
+ ctx->private_key, public_key);
+ buf = public_key;
+ nbytes = public_key_sz;
}
if (ret < 0)
- return ret;
+ goto free_all;
copied = sg_copy_from_buffer(req->dst, 1, buf, nbytes);
if (copied != nbytes)
- return -EINVAL;
+ ret = -EINVAL;
+ /* fall through */
+free_all:
+ kzfree(shared_secret);
+free_pubkey:
+ kfree(public_key);
return ret;
}
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 29dd2b4a3b85..d9e45a958720 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -55,8 +55,7 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
do {
crypto_xor(iv, src, bsize);
crypto_cipher_encrypt_one(tfm, dst, iv);
- memcpy(iv, dst, bsize);
- crypto_xor(iv, src, bsize);
+ crypto_xor_cpy(iv, dst, src, bsize);
src += bsize;
dst += bsize;
@@ -79,8 +78,7 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
memcpy(tmpbuf, src, bsize);
crypto_xor(iv, src, bsize);
crypto_cipher_encrypt_one(tfm, src, iv);
- memcpy(iv, tmpbuf, bsize);
- crypto_xor(iv, src, bsize);
+ crypto_xor_cpy(iv, tmpbuf, src, bsize);
src += bsize;
} while ((nbytes -= bsize) >= bsize);
@@ -127,8 +125,7 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
do {
crypto_cipher_decrypt_one(tfm, dst, src);
crypto_xor(dst, iv, bsize);
- memcpy(iv, src, bsize);
- crypto_xor(iv, dst, bsize);
+ crypto_xor_cpy(iv, dst, src, bsize);
src += bsize;
dst += bsize;
@@ -153,8 +150,7 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
memcpy(tmpbuf, src, bsize);
crypto_cipher_decrypt_one(tfm, src, src);
crypto_xor(src, iv, bsize);
- memcpy(iv, tmpbuf, bsize);
- crypto_xor(iv, src, bsize);
+ crypto_xor_cpy(iv, src, tmpbuf, bsize);
src += bsize;
} while ((nbytes -= bsize) >= bsize);
diff --git a/crypto/rng.c b/crypto/rng.c
index 5e8469244960..b4a618668161 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -43,12 +43,14 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
if (!buf)
return -ENOMEM;
- get_random_bytes(buf, slen);
+ err = get_random_bytes_wait(buf, slen);
+ if (err)
+ goto out;
seed = buf;
}
err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
-
+out:
kzfree(buf);
return err;
}
diff --git a/crypto/scompress.c b/crypto/scompress.c
index ae1d3cf209e4..2075e2c4e7df 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -65,11 +65,6 @@ static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
seq_puts(m, "type : scomp\n");
}
-static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
-{
- return 0;
-}
-
static void crypto_scomp_free_scratches(void * __percpu *scratches)
{
int i;
@@ -125,12 +120,26 @@ static int crypto_scomp_alloc_all_scratches(void)
if (!scomp_src_scratches)
return -ENOMEM;
scomp_dst_scratches = crypto_scomp_alloc_scratches();
- if (!scomp_dst_scratches)
+ if (!scomp_dst_scratches) {
+ crypto_scomp_free_scratches(scomp_src_scratches);
+ scomp_src_scratches = NULL;
return -ENOMEM;
+ }
}
return 0;
}
+static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
+{
+ int ret;
+
+ mutex_lock(&scomp_lock);
+ ret = crypto_scomp_alloc_all_scratches();
+ mutex_unlock(&scomp_lock);
+
+ return ret;
+}
+
static void crypto_scomp_sg_free(struct scatterlist *sgl)
{
int i, n;
@@ -211,9 +220,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
scratch_dst, &req->dlen, *ctx);
if (!ret) {
if (!req->dst) {
- req->dst = crypto_scomp_sg_alloc(req->dlen,
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
- GFP_KERNEL : GFP_ATOMIC);
+ req->dst = crypto_scomp_sg_alloc(req->dlen, GFP_ATOMIC);
if (!req->dst)
goto out;
}
@@ -240,6 +247,10 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
crypto_free_scomp(*ctx);
+
+ mutex_lock(&scomp_lock);
+ crypto_scomp_free_all_scratches();
+ mutex_unlock(&scomp_lock);
}
int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
@@ -316,40 +327,18 @@ static const struct crypto_type crypto_scomp_type = {
int crypto_register_scomp(struct scomp_alg *alg)
{
struct crypto_alg *base = &alg->base;
- int ret = -ENOMEM;
-
- mutex_lock(&scomp_lock);
- if (crypto_scomp_alloc_all_scratches())
- goto error;
base->cra_type = &crypto_scomp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
- ret = crypto_register_alg(base);
- if (ret)
- goto error;
-
- mutex_unlock(&scomp_lock);
- return ret;
-
-error:
- crypto_scomp_free_all_scratches();
- mutex_unlock(&scomp_lock);
- return ret;
+ return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_scomp);
int crypto_unregister_scomp(struct scomp_alg *alg)
{
- int ret;
-
- mutex_lock(&scomp_lock);
- ret = crypto_unregister_alg(&alg->base);
- crypto_scomp_free_all_scratches();
- mutex_unlock(&scomp_lock);
-
- return ret;
+ return crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 94970a794975..7c3382facc82 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -229,6 +229,46 @@
x4 ^= x2; \
})
+static void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, u32 r3, u32 r4, u32 *k)
+{
+ k += 100;
+ S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24);
+ S4(r1, r2, r4, r3, r0); store_and_load_keys(r2, r4, r3, r0, 24, 20);
+ S5(r2, r4, r3, r0, r1); store_and_load_keys(r1, r2, r4, r0, 20, 16);
+ S6(r1, r2, r4, r0, r3); store_and_load_keys(r4, r3, r2, r0, 16, 12);
+ S7(r4, r3, r2, r0, r1); store_and_load_keys(r1, r2, r0, r4, 12, 8);
+ S0(r1, r2, r0, r4, r3); store_and_load_keys(r0, r2, r4, r1, 8, 4);
+ S1(r0, r2, r4, r1, r3); store_and_load_keys(r3, r4, r1, r0, 4, 0);
+ S2(r3, r4, r1, r0, r2); store_and_load_keys(r2, r4, r3, r0, 0, -4);
+ S3(r2, r4, r3, r0, r1); store_and_load_keys(r0, r1, r4, r2, -4, -8);
+ S4(r0, r1, r4, r2, r3); store_and_load_keys(r1, r4, r2, r3, -8, -12);
+ S5(r1, r4, r2, r3, r0); store_and_load_keys(r0, r1, r4, r3, -12, -16);
+ S6(r0, r1, r4, r3, r2); store_and_load_keys(r4, r2, r1, r3, -16, -20);
+ S7(r4, r2, r1, r3, r0); store_and_load_keys(r0, r1, r3, r4, -20, -24);
+ S0(r0, r1, r3, r4, r2); store_and_load_keys(r3, r1, r4, r0, -24, -28);
+ k -= 50;
+ S1(r3, r1, r4, r0, r2); store_and_load_keys(r2, r4, r0, r3, 22, 18);
+ S2(r2, r4, r0, r3, r1); store_and_load_keys(r1, r4, r2, r3, 18, 14);
+ S3(r1, r4, r2, r3, r0); store_and_load_keys(r3, r0, r4, r1, 14, 10);
+ S4(r3, r0, r4, r1, r2); store_and_load_keys(r0, r4, r1, r2, 10, 6);
+ S5(r0, r4, r1, r2, r3); store_and_load_keys(r3, r0, r4, r2, 6, 2);
+ S6(r3, r0, r4, r2, r1); store_and_load_keys(r4, r1, r0, r2, 2, -2);
+ S7(r4, r1, r0, r2, r3); store_and_load_keys(r3, r0, r2, r4, -2, -6);
+ S0(r3, r0, r2, r4, r1); store_and_load_keys(r2, r0, r4, r3, -6, -10);
+ S1(r2, r0, r4, r3, r1); store_and_load_keys(r1, r4, r3, r2, -10, -14);
+ S2(r1, r4, r3, r2, r0); store_and_load_keys(r0, r4, r1, r2, -14, -18);
+ S3(r0, r4, r1, r2, r3); store_and_load_keys(r2, r3, r4, r0, -18, -22);
+ k -= 50;
+ S4(r2, r3, r4, r0, r1); store_and_load_keys(r3, r4, r0, r1, 28, 24);
+ S5(r3, r4, r0, r1, r2); store_and_load_keys(r2, r3, r4, r1, 24, 20);
+ S6(r2, r3, r4, r1, r0); store_and_load_keys(r4, r0, r3, r1, 20, 16);
+ S7(r4, r0, r3, r1, r2); store_and_load_keys(r2, r3, r1, r4, 16, 12);
+ S0(r2, r3, r1, r4, r0); store_and_load_keys(r1, r3, r4, r2, 12, 8);
+ S1(r1, r3, r4, r2, r0); store_and_load_keys(r0, r4, r2, r1, 8, 4);
+ S2(r0, r4, r2, r1, r3); store_and_load_keys(r3, r4, r0, r1, 4, 0);
+ S3(r3, r4, r0, r1, r2); storekeys(r1, r2, r4, r3, 0);
+}
+
int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
unsigned int keylen)
{
@@ -395,42 +435,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
keyiter(k[23], r1, r0, r3, 131, 31);
/* Apply S-boxes */
-
- S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24);
- S4(r1, r2, r4, r3, r0); store_and_load_keys(r2, r4, r3, r0, 24, 20);
- S5(r2, r4, r3, r0, r1); store_and_load_keys(r1, r2, r4, r0, 20, 16);
- S6(r1, r2, r4, r0, r3); store_and_load_keys(r4, r3, r2, r0, 16, 12);
- S7(r4, r3, r2, r0, r1); store_and_load_keys(r1, r2, r0, r4, 12, 8);
- S0(r1, r2, r0, r4, r3); store_and_load_keys(r0, r2, r4, r1, 8, 4);
- S1(r0, r2, r4, r1, r3); store_and_load_keys(r3, r4, r1, r0, 4, 0);
- S2(r3, r4, r1, r0, r2); store_and_load_keys(r2, r4, r3, r0, 0, -4);
- S3(r2, r4, r3, r0, r1); store_and_load_keys(r0, r1, r4, r2, -4, -8);
- S4(r0, r1, r4, r2, r3); store_and_load_keys(r1, r4, r2, r3, -8, -12);
- S5(r1, r4, r2, r3, r0); store_and_load_keys(r0, r1, r4, r3, -12, -16);
- S6(r0, r1, r4, r3, r2); store_and_load_keys(r4, r2, r1, r3, -16, -20);
- S7(r4, r2, r1, r3, r0); store_and_load_keys(r0, r1, r3, r4, -20, -24);
- S0(r0, r1, r3, r4, r2); store_and_load_keys(r3, r1, r4, r0, -24, -28);
- k -= 50;
- S1(r3, r1, r4, r0, r2); store_and_load_keys(r2, r4, r0, r3, 22, 18);
- S2(r2, r4, r0, r3, r1); store_and_load_keys(r1, r4, r2, r3, 18, 14);
- S3(r1, r4, r2, r3, r0); store_and_load_keys(r3, r0, r4, r1, 14, 10);
- S4(r3, r0, r4, r1, r2); store_and_load_keys(r0, r4, r1, r2, 10, 6);
- S5(r0, r4, r1, r2, r3); store_and_load_keys(r3, r0, r4, r2, 6, 2);
- S6(r3, r0, r4, r2, r1); store_and_load_keys(r4, r1, r0, r2, 2, -2);
- S7(r4, r1, r0, r2, r3); store_and_load_keys(r3, r0, r2, r4, -2, -6);
- S0(r3, r0, r2, r4, r1); store_and_load_keys(r2, r0, r4, r3, -6, -10);
- S1(r2, r0, r4, r3, r1); store_and_load_keys(r1, r4, r3, r2, -10, -14);
- S2(r1, r4, r3, r2, r0); store_and_load_keys(r0, r4, r1, r2, -14, -18);
- S3(r0, r4, r1, r2, r3); store_and_load_keys(r2, r3, r4, r0, -18, -22);
- k -= 50;
- S4(r2, r3, r4, r0, r1); store_and_load_keys(r3, r4, r0, r1, 28, 24);
- S5(r3, r4, r0, r1, r2); store_and_load_keys(r2, r3, r4, r1, 24, 20);
- S6(r2, r3, r4, r1, r0); store_and_load_keys(r4, r0, r3, r1, 20, 16);
- S7(r4, r0, r3, r1, r2); store_and_load_keys(r2, r3, r1, r4, 16, 12);
- S0(r2, r3, r1, r4, r0); store_and_load_keys(r1, r3, r4, r2, 12, 8);
- S1(r1, r3, r4, r2, r0); store_and_load_keys(r0, r4, r2, r1, 8, 4);
- S2(r0, r4, r2, r1, r3); store_and_load_keys(r3, r4, r0, r1, 4, 0);
- S3(r3, r4, r0, r1, r2); storekeys(r1, r2, r4, r3, 0);
+ __serpent_setkey_sbox(r0, r1, r2, r3, r4, ctx->expkey);
return 0;
}
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 0dd6a432d6ca..0022a18d36ee 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1404,9 +1404,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
- speed_template_32_48_64);
+ speed_template_32_64);
test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
- speed_template_32_48_64);
+ speed_template_32_64);
test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
@@ -1837,9 +1837,9 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
- speed_template_32_48_64);
+ speed_template_32_64);
test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
- speed_template_32_48_64);
+ speed_template_32_64);
test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 6ceb0e2758bb..d54971d2d1c8 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -32675,6 +32675,10 @@ static const struct cipher_testvec chacha20_enc_tv_template[] = {
"\x5b\x86\x2f\x37\x30\xe3\x7c\xfd"
"\xc4\xfd\x80\x6c\x22\xf2\x21",
.rlen = 375,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 375 - 20, 4, 16 },
+
}, { /* RFC7539 A.2. Test Vector #3 */
.key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
"\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
@@ -33049,6 +33053,9 @@ static const struct cipher_testvec chacha20_enc_tv_template[] = {
"\xa1\xed\xad\xd5\x76\xfa\x24\x8f"
"\x98",
.rlen = 1281,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 1200, 1, 80 },
},
};
diff --git a/drivers/Makefile b/drivers/Makefile
index dfdcda00bfe3..d90fdc413648 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -125,7 +125,6 @@ obj-$(CONFIG_ACCESSIBILITY) += accessibility/
obj-$(CONFIG_ISDN) += isdn/
obj-$(CONFIG_EDAC) += edac/
obj-$(CONFIG_EISA) += eisa/
-obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index b1aacfc62b1f..90265ab4437a 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -50,6 +50,7 @@ acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o
acpi-y += sysfs.o
acpi-y += property.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
+acpi-$(CONFIG_X86) += x86/apple.o
acpi-$(CONFIG_X86) += x86/utils.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-$(CONFIG_ACPI_NUMA) += numa.o
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index fc6c416f8724..d5999eb41c00 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -180,8 +180,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
{ "APMC0D0F", APD_ADDR(xgene_i2c_desc) },
{ "BRCM900D", APD_ADDR(vulcan_spi_desc) },
{ "CAV900D", APD_ADDR(vulcan_spi_desc) },
- { "HISI0A21", APD_ADDR(hip07_i2c_desc) },
- { "HISI0A22", APD_ADDR(hip08_i2c_desc) },
+ { "HISI02A1", APD_ADDR(hip07_i2c_desc) },
+ { "HISI02A2", APD_ADDR(hip08_i2c_desc) },
#endif
{ }
};
diff --git a/drivers/acpi/acpi_lpat.c b/drivers/acpi/acpi_lpat.c
index c1c4877ca96c..2cd9f738812b 100644
--- a/drivers/acpi/acpi_lpat.c
+++ b/drivers/acpi/acpi_lpat.c
@@ -25,7 +25,7 @@
* @raw: the raw value, used as a key to get the temerature from the
* above mapping table
*
- * A positive converted temperarure value will be returned on success,
+ * A positive converted temperature value will be returned on success,
* a negative errno will be returned in error cases.
*/
int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
@@ -55,11 +55,11 @@ EXPORT_SYMBOL_GPL(acpi_lpat_raw_to_temp);
* acpi_lpat_temp_to_raw(): Return raw value from temperature through
* LPAT conversion table
*
- * @lpat: the temperature_raw mapping table
+ * @lpat_table: the temperature_raw mapping table
* @temp: the temperature, used as a key to get the raw value from the
* above mapping table
*
- * A positive converted temperature value will be returned on success,
+ * The raw value will be returned on success,
* a negative errno will be returned in error cases.
*/
int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index e51a1e98e62f..032ae44710e5 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -85,6 +85,7 @@ static const struct lpss_device_desc lpss_dma_desc = {
};
struct lpss_private_data {
+ struct acpi_device *adev;
void __iomem *mmio_base;
resource_size_t mmio_size;
unsigned int fixed_clk_rate;
@@ -155,6 +156,12 @@ static struct pwm_lookup byt_pwm_lookup[] = {
static void byt_pwm_setup(struct lpss_private_data *pdata)
{
+ struct acpi_device *adev = pdata->adev;
+
+ /* Only call pwm_add_table for the first PWM controller */
+ if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
+ return;
+
if (!acpi_dev_present("INT33FD", NULL, -1))
pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
}
@@ -180,6 +187,12 @@ static struct pwm_lookup bsw_pwm_lookup[] = {
static void bsw_pwm_setup(struct lpss_private_data *pdata)
{
+ struct acpi_device *adev = pdata->adev;
+
+ /* Only call pwm_add_table for the first PWM controller */
+ if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
+ return;
+
pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
}
@@ -452,10 +465,12 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
acpi_dev_free_resource_list(&resource_list);
if (!pdata->mmio_base) {
- ret = -ENOMEM;
+ /* Skip the device, but continue the namespace scan. */
+ ret = 0;
goto err_out;
}
+ pdata->adev = adev;
pdata->dev_desc = dev_desc;
if (dev_desc->setup)
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index f098e25b6b41..86c10599d9f8 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -670,7 +670,7 @@ err:
}
-void __init acpi_processor_check_duplicates(void)
+static void __init acpi_processor_check_duplicates(void)
{
/* check the correctness for all processors in ACPI namespace */
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 8c4e0a18460a..bf22c29d2517 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -86,7 +86,12 @@ void __init acpi_watchdog_init(void)
found = false;
resource_list_for_each_entry(rentry, &resource_list) {
- if (resource_contains(rentry->res, &res)) {
+ if (rentry->res->flags == res.flags &&
+ resource_overlaps(rentry->res, &res)) {
+ if (res.start < rentry->res->start)
+ rentry->res->start = res.start;
+ if (res.end > rentry->res->end)
+ rentry->res->end = res.end;
found = true;
break;
}
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index b125bdd3d58b..1709551bc4aa 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -18,6 +18,7 @@ acpi-y := \
dsmthdat.o \
dsobject.o \
dsopcode.o \
+ dspkginit.o \
dsutils.o \
dswexec.o \
dswload.o \
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index bb6a84b0b4b3..7a1a68b5ac5c 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -114,6 +114,8 @@ ac_get_all_tables_from_file(char *filename,
u8 get_only_aml_tables,
struct acpi_new_table_desc **return_list_head);
+void ac_delete_table_list(struct acpi_new_table_desc *list_head);
+
u8 ac_is_file_binary(FILE * file);
acpi_status ac_validate_table_header(FILE * file, long table_offset);
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 0d95c85cce06..f8f3a6e74128 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -237,6 +237,11 @@ acpi_ds_initialize_objects(u32 table_index,
* dsobject - Parser/Interpreter interface - object initialization and conversion
*/
acpi_status
+acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ union acpi_operand_object **obj_desc_ptr);
+
+acpi_status
acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
u32 buffer_length,
@@ -259,6 +264,14 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
union acpi_parse_object *op);
/*
+ * dspkginit - Package object initialization
+ */
+acpi_status
+acpi_ds_init_package_element(u8 object_type,
+ union acpi_operand_object *source_object,
+ union acpi_generic_state *state, void *context);
+
+/*
* dsutils - Parser/Interpreter interface utility routines
*/
void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 8ddd3b20e0c6..0d45b8bb1678 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -199,6 +199,7 @@ struct acpi_namespace_node {
#define ANOBJ_EVALUATED 0x20 /* Set on first evaluation of node */
#define ANOBJ_ALLOCATED_BUFFER 0x40 /* Method AML buffer is dynamic (install_method) */
+#define IMPLICIT_EXTERNAL 0x02 /* iASL only: This object created implicitly via External */
#define ANOBJ_IS_EXTERNAL 0x08 /* iASL only: This object created via External() */
#define ANOBJ_METHOD_NO_RETVAL 0x10 /* iASL only: Method has no return value */
#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* iASL only: Method has at least one return value */
@@ -604,7 +605,7 @@ struct acpi_update_state {
* Pkg state - used to traverse nested package structures
*/
struct acpi_pkg_state {
- ACPI_STATE_COMMON u16 index;
+ ACPI_STATE_COMMON u32 index;
union acpi_operand_object *source_object;
union acpi_operand_object *dest_object;
struct acpi_walk_state *walk_state;
@@ -867,7 +868,7 @@ struct acpi_parse_obj_named {
/* This version is used by the iASL compiler only */
-#define ACPI_MAX_PARSEOP_NAME 20
+#define ACPI_MAX_PARSEOP_NAME 20
struct acpi_parse_obj_asl {
ACPI_PARSE_COMMON union acpi_parse_object *child;
@@ -907,7 +908,7 @@ union acpi_parse_object {
struct asl_comment_state {
u8 comment_type;
u32 spaces_before;
- union acpi_parse_object *latest_parse_node;
+ union acpi_parse_object *latest_parse_op;
union acpi_parse_object *parsing_paren_brace_node;
u8 capture_comments;
};
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 27c3f982d810..5226146190bf 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -122,7 +122,9 @@ struct acpi_object_integer {
_type *pointer; \
u32 length;
-struct acpi_object_string { /* Null terminated, ASCII characters only */
+/* Null terminated, ASCII characters only */
+
+struct acpi_object_string {
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(char) /* String in AML stream or allocated string */
};
@@ -211,7 +213,9 @@ struct acpi_object_method {
union acpi_operand_object *notify_list[2]; /* Handlers for system/device notifies */\
union acpi_operand_object *handler; /* Handler for Address space */
-struct acpi_object_notify_common { /* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
+/* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
+
+struct acpi_object_notify_common {
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
struct acpi_object_device {
@@ -258,7 +262,9 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
u8 access_length; /* For serial regions/fields */
-struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
+/* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
+
+struct acpi_object_field_common {
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */
};
@@ -333,11 +339,12 @@ struct acpi_object_addr_handler {
struct acpi_object_reference {
ACPI_OBJECT_COMMON_HEADER u8 class; /* Reference Class */
u8 target_type; /* Used for Index Op */
- u8 reserved;
+ u8 resolved; /* Reference has been resolved to a value */
void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */
struct acpi_namespace_node *node; /* ref_of or Namepath */
union acpi_operand_object **where; /* Target of Index */
u8 *index_pointer; /* Used for Buffers and Strings */
+ u8 *aml; /* Used for deferred resolution of the ref */
u32 value; /* Used for Local/Arg/Index/ddb_handle */
};
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index c8da453bd960..84a3ceb6e384 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -76,7 +76,8 @@ void acpi_tb_release_temp_table(struct acpi_table_desc *table_desc);
acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc);
acpi_status
-acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature);
+acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc,
+ char *signature, u32 *table_index);
u8 acpi_tb_is_table_loaded(u32 table_index);
@@ -132,6 +133,8 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
acpi_status acpi_tb_unload_table(u32 table_index);
+void acpi_tb_notify_table(u32 event, void *table);
+
void acpi_tb_terminate(void);
acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index);
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 2a3cc4296481..745134ade35f 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -516,7 +516,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
void *external_object,
- u16 index);
+ u32 index);
acpi_status
acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
@@ -538,6 +538,13 @@ acpi_status
acpi_ut_short_divide(u64 in_dividend,
u32 divisor, u64 *out_quotient, u32 *out_remainder);
+acpi_status
+acpi_ut_short_multiply(u64 in_multiplicand, u32 multiplier, u64 *outproduct);
+
+acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result);
+
+acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result);
+
/*
* utmisc
*/
diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c
index 46bf270ac525..5a606eac0c22 100644
--- a/drivers/acpi/acpica/dbdisply.c
+++ b/drivers/acpi/acpica/dbdisply.c
@@ -310,7 +310,7 @@ dump_node:
}
else {
- acpi_os_printf("Object (%p) Pathname: %s\n",
+ acpi_os_printf("Object %p: Namespace Node - Pathname: %s\n",
node, (char *)ret_buf.pointer);
}
@@ -326,7 +326,7 @@ dump_node:
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
- acpi_os_printf("\nAttached Object (%p):\n", obj_desc);
+ acpi_os_printf("\nAttached Object %p:", obj_desc);
if (!acpi_os_readable
(obj_desc, sizeof(union acpi_operand_object))) {
acpi_os_printf
@@ -335,9 +335,36 @@ dump_node:
return;
}
- acpi_ut_debug_dump_buffer((void *)obj_desc,
- sizeof(union acpi_operand_object),
- display, ACPI_UINT32_MAX);
+ if (ACPI_GET_DESCRIPTOR_TYPE(((struct acpi_namespace_node *)
+ obj_desc)) ==
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf(" Namespace Node - ");
+ status =
+ acpi_get_name((struct acpi_namespace_node *)
+ obj_desc,
+ ACPI_FULL_PATHNAME_NO_TRAILING,
+ &ret_buf);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf
+ ("Could not convert name to pathname\n");
+ } else {
+ acpi_os_printf("Pathname: %s",
+ (char *)ret_buf.pointer);
+ }
+
+ acpi_os_printf("\n");
+ acpi_ut_debug_dump_buffer((void *)obj_desc,
+ sizeof(struct
+ acpi_namespace_node),
+ display, ACPI_UINT32_MAX);
+ } else {
+ acpi_os_printf("\n");
+ acpi_ut_debug_dump_buffer((void *)obj_desc,
+ sizeof(union
+ acpi_operand_object),
+ display, ACPI_UINT32_MAX);
+ }
+
acpi_ex_dump_object_descriptor(obj_desc, 1);
}
}
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index c5dccc54307d..7bcf5f5ea029 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -184,6 +184,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
/* Execute flag should always be set when this function is entered */
if (!(walk_state->parse_flags & ACPI_PARSE_EXECUTE)) {
+ ACPI_ERROR((AE_INFO, "Parse execute mode is not set"));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -556,6 +557,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
return_ACPI_STATUS(AE_OK);
}
+ ACPI_ERROR((AE_INFO, "Parse deferred mode is not set"));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 7df3152ed856..82448551781b 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -52,12 +52,6 @@
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsobject")
-/* Local prototypes */
-static acpi_status
-acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
- union acpi_parse_object *op,
- union acpi_operand_object **obj_desc_ptr);
-
#ifndef ACPI_NO_METHOD_EXECUTION
/*******************************************************************************
*
@@ -73,15 +67,13 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
* Simple objects are any objects other than a package object!
*
******************************************************************************/
-
-static acpi_status
+acpi_status
acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
union acpi_operand_object **obj_desc_ptr)
{
union acpi_operand_object *obj_desc;
acpi_status status;
- acpi_object_type type;
ACPI_FUNCTION_TRACE(ds_build_internal_object);
@@ -89,140 +81,47 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
/*
* This is a named object reference. If this name was
- * previously looked up in the namespace, it was stored in this op.
- * Otherwise, go ahead and look it up now
+ * previously looked up in the namespace, it was stored in
+ * this op. Otherwise, go ahead and look it up now
*/
if (!op->common.node) {
- status = acpi_ns_lookup(walk_state->scope_info,
- op->common.value.string,
- ACPI_TYPE_ANY,
- ACPI_IMODE_EXECUTE,
- ACPI_NS_SEARCH_PARENT |
- ACPI_NS_DONT_OPEN_SCOPE, NULL,
- ACPI_CAST_INDIRECT_PTR(struct
- acpi_namespace_node,
- &(op->
- common.
- node)));
- if (ACPI_FAILURE(status)) {
-
- /* Check if we are resolving a named reference within a package */
-
- if ((status == AE_NOT_FOUND)
- && (acpi_gbl_enable_interpreter_slack)
- &&
- ((op->common.parent->common.aml_opcode ==
- AML_PACKAGE_OP)
- || (op->common.parent->common.aml_opcode ==
- AML_VARIABLE_PACKAGE_OP))) {
- /*
- * We didn't find the target and we are populating elements
- * of a package - ignore if slack enabled. Some ASL code
- * contains dangling invalid references in packages and
- * expects that no exception will be issued. Leave the
- * element as a null element. It cannot be used, but it
- * can be overwritten by subsequent ASL code - this is
- * typically the case.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Ignoring unresolved reference in package [%4.4s]\n",
- walk_state->
- scope_info->scope.
- node->name.ascii));
-
- return_ACPI_STATUS(AE_OK);
- } else {
- ACPI_ERROR_NAMESPACE(op->common.value.
- string, status);
- }
-
- return_ACPI_STATUS(status);
- }
- }
-
- /* Special object resolution for elements of a package */
-
- if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
- (op->common.parent->common.aml_opcode ==
- AML_VARIABLE_PACKAGE_OP)) {
- /*
- * Attempt to resolve the node to a value before we insert it into
- * the package. If this is a reference to a common data type,
- * resolve it immediately. According to the ACPI spec, package
- * elements can only be "data objects" or method references.
- * Attempt to resolve to an Integer, Buffer, String or Package.
- * If cannot, return the named reference (for things like Devices,
- * Methods, etc.) Buffer Fields and Fields will resolve to simple
- * objects (int/buf/str/pkg).
- *
- * NOTE: References to things like Devices, Methods, Mutexes, etc.
- * will remain as named references. This behavior is not described
- * in the ACPI spec, but it appears to be an oversight.
- */
- obj_desc =
- ACPI_CAST_PTR(union acpi_operand_object,
- op->common.node);
-
- status =
- acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
- (struct
- acpi_namespace_node,
- &obj_desc),
- walk_state);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * Special handling for Alias objects. We need to setup the type
- * and the Op->Common.Node to point to the Alias target. Note,
- * Alias has at most one level of indirection internally.
- */
- type = op->common.node->type;
- if (type == ACPI_TYPE_LOCAL_ALIAS) {
- type = obj_desc->common.type;
- op->common.node =
- ACPI_CAST_PTR(struct acpi_namespace_node,
- op->common.node->object);
- }
-
- switch (type) {
- /*
- * For these types, we need the actual node, not the subobject.
- * However, the subobject did not get an extra reference count above.
- *
- * TBD: should ex_resolve_node_to_value be changed to fix this?
- */
- case ACPI_TYPE_DEVICE:
- case ACPI_TYPE_THERMAL:
-
- acpi_ut_add_reference(op->common.node->object);
- /*lint -fallthrough */
- /*
- * For these types, we need the actual node, not the subobject.
- * The subobject got an extra reference count in ex_resolve_node_to_value.
- */
- case ACPI_TYPE_MUTEX:
- case ACPI_TYPE_METHOD:
- case ACPI_TYPE_POWER:
- case ACPI_TYPE_PROCESSOR:
- case ACPI_TYPE_EVENT:
- case ACPI_TYPE_REGION:
-
- /* We will create a reference object for these types below */
- break;
+ /* Check if we are resolving a named reference within a package */
- default:
+ if ((op->common.parent->common.aml_opcode ==
+ AML_PACKAGE_OP)
+ || (op->common.parent->common.aml_opcode ==
+ AML_VARIABLE_PACKAGE_OP)) {
/*
- * All other types - the node was resolved to an actual
- * object, we are done.
+ * We won't resolve package elements here, we will do this
+ * after all ACPI tables are loaded into the namespace. This
+ * behavior supports both forward references to named objects
+ * and external references to objects in other tables.
*/
- goto exit;
+ goto create_new_object;
+ } else {
+ status = acpi_ns_lookup(walk_state->scope_info,
+ op->common.value.string,
+ ACPI_TYPE_ANY,
+ ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT |
+ ACPI_NS_DONT_OPEN_SCOPE,
+ NULL,
+ ACPI_CAST_INDIRECT_PTR
+ (struct
+ acpi_namespace_node,
+ &(op->common.node)));
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(op->common.value.
+ string, status);
+ return_ACPI_STATUS(status);
+ }
}
}
}
+create_new_object:
+
/* Create and init a new internal ACPI object */
obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info
@@ -240,7 +139,27 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
}
-exit:
+ /*
+ * Handling for unresolved package reference elements.
+ * These are elements that are namepaths.
+ */
+ if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
+ (op->common.parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) {
+ obj_desc->reference.resolved = TRUE;
+
+ if ((op->common.aml_opcode == AML_INT_NAMEPATH_OP) &&
+ !obj_desc->reference.node) {
+ /*
+ * Name was unresolved above.
+ * Get the prefix node for later lookup
+ */
+ obj_desc->reference.node =
+ walk_state->scope_info->scope.node;
+ obj_desc->reference.aml = op->common.aml;
+ obj_desc->reference.resolved = FALSE;
+ }
+ }
+
*obj_desc_ptr = obj_desc;
return_ACPI_STATUS(status);
}
@@ -351,200 +270,6 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
/*******************************************************************************
*
- * FUNCTION: acpi_ds_build_internal_package_obj
- *
- * PARAMETERS: walk_state - Current walk state
- * op - Parser object to be translated
- * element_count - Number of elements in the package - this is
- * the num_elements argument to Package()
- * obj_desc_ptr - Where the ACPI internal object is returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Translate a parser Op package object to the equivalent
- * namespace object
- *
- * NOTE: The number of elements in the package will be always be the num_elements
- * count, regardless of the number of elements in the package list. If
- * num_elements is smaller, only that many package list elements are used.
- * if num_elements is larger, the Package object is padded out with
- * objects of type Uninitialized (as per ACPI spec.)
- *
- * Even though the ASL compilers do not allow num_elements to be smaller
- * than the Package list length (for the fixed length package opcode), some
- * BIOS code modifies the AML on the fly to adjust the num_elements, and
- * this code compensates for that. This also provides compatibility with
- * other AML interpreters.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
- union acpi_parse_object *op,
- u32 element_count,
- union acpi_operand_object **obj_desc_ptr)
-{
- union acpi_parse_object *arg;
- union acpi_parse_object *parent;
- union acpi_operand_object *obj_desc = NULL;
- acpi_status status = AE_OK;
- u32 i;
- u16 index;
- u16 reference_count;
-
- ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
-
- /* Find the parent of a possibly nested package */
-
- parent = op->common.parent;
- while ((parent->common.aml_opcode == AML_PACKAGE_OP) ||
- (parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) {
- parent = parent->common.parent;
- }
-
- /*
- * If we are evaluating a Named package object "Name (xxxx, Package)",
- * the package object already exists, otherwise it must be created.
- */
- obj_desc = *obj_desc_ptr;
- if (!obj_desc) {
- obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
- *obj_desc_ptr = obj_desc;
- if (!obj_desc) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- obj_desc->package.node = parent->common.node;
- }
-
- /*
- * Allocate the element array (array of pointers to the individual
- * objects) based on the num_elements parameter. Add an extra pointer slot
- * so that the list is always null terminated.
- */
- obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
- element_count +
- 1) * sizeof(void *));
-
- if (!obj_desc->package.elements) {
- acpi_ut_delete_object_desc(obj_desc);
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- obj_desc->package.count = element_count;
-
- /*
- * Initialize the elements of the package, up to the num_elements count.
- * Package is automatically padded with uninitialized (NULL) elements
- * if num_elements is greater than the package list length. Likewise,
- * Package is truncated if num_elements is less than the list length.
- */
- arg = op->common.value.arg;
- arg = arg->common.next;
- for (i = 0; arg && (i < element_count); i++) {
- if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
- if (arg->common.node->type == ACPI_TYPE_METHOD) {
- /*
- * A method reference "looks" to the parser to be a method
- * invocation, so we special case it here
- */
- arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
- status =
- acpi_ds_build_internal_object(walk_state,
- arg,
- &obj_desc->
- package.
- elements[i]);
- } else {
- /* This package element is already built, just get it */
-
- obj_desc->package.elements[i] =
- ACPI_CAST_PTR(union acpi_operand_object,
- arg->common.node);
- }
- } else {
- status =
- acpi_ds_build_internal_object(walk_state, arg,
- &obj_desc->package.
- elements[i]);
- }
-
- if (*obj_desc_ptr) {
-
- /* Existing package, get existing reference count */
-
- reference_count =
- (*obj_desc_ptr)->common.reference_count;
- if (reference_count > 1) {
-
- /* Make new element ref count match original ref count */
-
- for (index = 0; index < (reference_count - 1);
- index++) {
- acpi_ut_add_reference((obj_desc->
- package.
- elements[i]));
- }
- }
- }
-
- arg = arg->common.next;
- }
-
- /* Check for match between num_elements and actual length of package_list */
-
- if (arg) {
- /*
- * num_elements was exhausted, but there are remaining elements in the
- * package_list. Truncate the package to num_elements.
- *
- * Note: technically, this is an error, from ACPI spec: "It is an error
- * for NumElements to be less than the number of elements in the
- * PackageList". However, we just print a message and
- * no exception is returned. This provides Windows compatibility. Some
- * BIOSs will alter the num_elements on the fly, creating this type
- * of ill-formed package object.
- */
- while (arg) {
- /*
- * We must delete any package elements that were created earlier
- * and are not going to be used because of the package truncation.
- */
- if (arg->common.node) {
- acpi_ut_remove_reference(ACPI_CAST_PTR
- (union
- acpi_operand_object,
- arg->common.node));
- arg->common.node = NULL;
- }
-
- /* Find out how many elements there really are */
-
- i++;
- arg = arg->common.next;
- }
-
- ACPI_INFO(("Actual Package length (%u) is larger than "
- "NumElements field (%u), truncated",
- i, element_count));
- } else if (i < element_count) {
- /*
- * Arg list (elements) was exhausted, but we did not reach num_elements count.
- * Note: this is not an error, the package is padded out with NULLs.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Package List length (%u) smaller than NumElements "
- "count (%u), padded with null elements\n",
- i, element_count));
- }
-
- obj_desc->package.flags |= AOPOBJ_DATA_VALID;
- op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ds_create_node
*
* PARAMETERS: walk_state - Current walk state
@@ -662,11 +387,20 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
case ACPI_TYPE_PACKAGE:
/*
- * Defer evaluation of Package term_arg operand
+ * Defer evaluation of Package term_arg operand and all
+ * package elements. (01/2017): We defer the element
+ * resolution to allow forward references from the package
+ * in order to provide compatibility with other ACPI
+ * implementations.
*/
obj_desc->package.node =
ACPI_CAST_PTR(struct acpi_namespace_node,
walk_state->operands[0]);
+
+ if (!op->named.data) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
obj_desc->package.aml_start = op->named.data;
obj_desc->package.aml_length = op->named.length;
break;
@@ -818,9 +552,11 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
/* Node was saved in Op */
obj_desc->reference.node = op->common.node;
- obj_desc->reference.object =
- op->common.node->object;
obj_desc->reference.class = ACPI_REFCLASS_NAME;
+ if (op->common.node) {
+ obj_desc->reference.object =
+ op->common.node->object;
+ }
break;
case AML_DEBUG_OP:
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index dfc3c25a083d..0336df7ac47d 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -599,6 +599,15 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
*/
walk_state->operand_index = walk_state->num_operands;
+ /* Ignore if child is not valid */
+
+ if (!op->common.value.arg) {
+ ACPI_ERROR((AE_INFO,
+ "Dispatch: Missing child while executing TermArg for %X",
+ op->common.aml_opcode));
+ return_ACPI_STATUS(AE_OK);
+ }
+
status = acpi_ds_create_operand(walk_state, op->common.value.arg, 1);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
new file mode 100644
index 000000000000..6d487edfe2de
--- /dev/null
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -0,0 +1,496 @@
+/******************************************************************************
+ *
+ * Module Name: dspkginit - Completion of deferred package initialization
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2017, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("dspkginit")
+
+/* Local prototypes */
+static void
+acpi_ds_resolve_package_element(union acpi_operand_object **element);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_build_internal_package_obj
+ *
+ * PARAMETERS: walk_state - Current walk state
+ * op - Parser object to be translated
+ * element_count - Number of elements in the package - this is
+ * the num_elements argument to Package()
+ * obj_desc_ptr - Where the ACPI internal object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Translate a parser Op package object to the equivalent
+ * namespace object
+ *
+ * NOTE: The number of elements in the package will be always be the num_elements
+ * count, regardless of the number of elements in the package list. If
+ * num_elements is smaller, only that many package list elements are used.
+ * if num_elements is larger, the Package object is padded out with
+ * objects of type Uninitialized (as per ACPI spec.)
+ *
+ * Even though the ASL compilers do not allow num_elements to be smaller
+ * than the Package list length (for the fixed length package opcode), some
+ * BIOS code modifies the AML on the fly to adjust the num_elements, and
+ * this code compensates for that. This also provides compatibility with
+ * other AML interpreters.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ u32 element_count,
+ union acpi_operand_object **obj_desc_ptr)
+{
+ union acpi_parse_object *arg;
+ union acpi_parse_object *parent;
+ union acpi_operand_object *obj_desc = NULL;
+ acpi_status status = AE_OK;
+ u16 reference_count;
+ u32 index;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
+
+ /* Find the parent of a possibly nested package */
+
+ parent = op->common.parent;
+ while ((parent->common.aml_opcode == AML_PACKAGE_OP) ||
+ (parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) {
+ parent = parent->common.parent;
+ }
+
+ /*
+ * If we are evaluating a Named package object of the form:
+ * Name (xxxx, Package)
+ * the package object already exists, otherwise it must be created.
+ */
+ obj_desc = *obj_desc_ptr;
+ if (!obj_desc) {
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
+ *obj_desc_ptr = obj_desc;
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ obj_desc->package.node = parent->common.node;
+ }
+
+ if (obj_desc->package.flags & AOPOBJ_DATA_VALID) { /* Just in case */
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Allocate the element array (array of pointers to the individual
+ * objects) based on the num_elements parameter. Add an extra pointer slot
+ * so that the list is always null terminated.
+ */
+ obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
+ element_count +
+ 1) * sizeof(void *));
+
+ if (!obj_desc->package.elements) {
+ acpi_ut_delete_object_desc(obj_desc);
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ obj_desc->package.count = element_count;
+ arg = op->common.value.arg;
+ arg = arg->common.next;
+
+ if (arg) {
+ obj_desc->package.flags |= AOPOBJ_DATA_VALID;
+ }
+
+ /*
+ * Initialize the elements of the package, up to the num_elements count.
+ * Package is automatically padded with uninitialized (NULL) elements
+ * if num_elements is greater than the package list length. Likewise,
+ * Package is truncated if num_elements is less than the list length.
+ */
+ for (i = 0; arg && (i < element_count); i++) {
+ if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
+ if (arg->common.node->type == ACPI_TYPE_METHOD) {
+ /*
+ * A method reference "looks" to the parser to be a method
+ * invocation, so we special case it here
+ */
+ arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
+ status =
+ acpi_ds_build_internal_object(walk_state,
+ arg,
+ &obj_desc->
+ package.
+ elements[i]);
+ } else {
+ /* This package element is already built, just get it */
+
+ obj_desc->package.elements[i] =
+ ACPI_CAST_PTR(union acpi_operand_object,
+ arg->common.node);
+ }
+ } else {
+ status =
+ acpi_ds_build_internal_object(walk_state, arg,
+ &obj_desc->package.
+ elements[i]);
+ if (status == AE_NOT_FOUND) {
+ ACPI_ERROR((AE_INFO, "%-48s",
+ "****DS namepath not found"));
+ }
+
+ /*
+ * Initialize this package element. This function handles the
+ * resolution of named references within the package.
+ */
+ acpi_ds_init_package_element(0,
+ obj_desc->package.
+ elements[i], NULL,
+ &obj_desc->package.
+ elements[i]);
+ }
+
+ if (*obj_desc_ptr) {
+
+ /* Existing package, get existing reference count */
+
+ reference_count =
+ (*obj_desc_ptr)->common.reference_count;
+ if (reference_count > 1) {
+
+ /* Make new element ref count match original ref count */
+ /* TBD: Probably need an acpi_ut_add_references function */
+
+ for (index = 0;
+ index < ((u32)reference_count - 1);
+ index++) {
+ acpi_ut_add_reference((obj_desc->
+ package.
+ elements[i]));
+ }
+ }
+ }
+
+ arg = arg->common.next;
+ }
+
+ /* Check for match between num_elements and actual length of package_list */
+
+ if (arg) {
+ /*
+ * num_elements was exhausted, but there are remaining elements in
+ * the package_list. Truncate the package to num_elements.
+ *
+ * Note: technically, this is an error, from ACPI spec: "It is an
+ * error for NumElements to be less than the number of elements in
+ * the PackageList". However, we just print a message and no
+ * exception is returned. This provides compatibility with other
+ * ACPI implementations. Some firmware implementations will alter
+ * the num_elements on the fly, possibly creating this type of
+ * ill-formed package object.
+ */
+ while (arg) {
+ /*
+ * We must delete any package elements that were created earlier
+ * and are not going to be used because of the package truncation.
+ */
+ if (arg->common.node) {
+ acpi_ut_remove_reference(ACPI_CAST_PTR
+ (union
+ acpi_operand_object,
+ arg->common.node));
+ arg->common.node = NULL;
+ }
+
+ /* Find out how many elements there really are */
+
+ i++;
+ arg = arg->common.next;
+ }
+
+ ACPI_INFO(("Actual Package length (%u) is larger than "
+ "NumElements field (%u), truncated",
+ i, element_count));
+ } else if (i < element_count) {
+ /*
+ * Arg list (elements) was exhausted, but we did not reach
+ * num_elements count.
+ *
+ * Note: this is not an error, the package is padded out
+ * with NULLs.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Package List length (%u) smaller than NumElements "
+ "count (%u), padded with null elements\n",
+ i, element_count));
+ }
+
+ obj_desc->package.flags |= AOPOBJ_DATA_VALID;
+ op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_init_package_element
+ *
+ * PARAMETERS: acpi_pkg_callback
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Resolve a named reference element within a package object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_init_package_element(u8 object_type,
+ union acpi_operand_object *source_object,
+ union acpi_generic_state *state, void *context)
+{
+ union acpi_operand_object **element_ptr;
+
+ if (!source_object) {
+ return (AE_OK);
+ }
+
+ /*
+ * The following code is a bit of a hack to workaround a (current)
+ * limitation of the acpi_pkg_callback interface. We need a pointer
+ * to the location within the element array because a new object
+ * may be created and stored there.
+ */
+ if (context) {
+
+ /* A direct call was made to this function */
+
+ element_ptr = (union acpi_operand_object **)context;
+ } else {
+ /* Call came from acpi_ut_walk_package_tree */
+
+ element_ptr = state->pkg.this_target_obj;
+ }
+
+ /* We are only interested in reference objects/elements */
+
+ if (source_object->common.type == ACPI_TYPE_LOCAL_REFERENCE) {
+
+ /* Attempt to resolve the (named) reference to a namespace node */
+
+ acpi_ds_resolve_package_element(element_ptr);
+ } else if (source_object->common.type == ACPI_TYPE_PACKAGE) {
+ source_object->package.flags |= AOPOBJ_DATA_VALID;
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_resolve_package_element
+ *
+ * PARAMETERS: element_ptr - Pointer to a reference object
+ *
+ * RETURN: Possible new element is stored to the indirect element_ptr
+ *
+ * DESCRIPTION: Resolve a package element that is a reference to a named
+ * object.
+ *
+ ******************************************************************************/
+
+static void
+acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
+{
+ acpi_status status;
+ union acpi_generic_state scope_info;
+ union acpi_operand_object *element = *element_ptr;
+ struct acpi_namespace_node *resolved_node;
+ char *external_path = NULL;
+ acpi_object_type type;
+
+ ACPI_FUNCTION_TRACE(ds_resolve_package_element);
+
+ /* Check if reference element is already resolved */
+
+ if (element->reference.resolved) {
+ return_VOID;
+ }
+
+ /* Element must be a reference object of correct type */
+
+ scope_info.scope.node = element->reference.node; /* Prefix node */
+
+ status = acpi_ns_lookup(&scope_info, (char *)element->reference.aml, /* Pointer to AML path */
+ ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE,
+ NULL, &resolved_node);
+ if (ACPI_FAILURE(status)) {
+ status = acpi_ns_externalize_name(ACPI_UINT32_MAX,
+ (char *)element->reference.
+ aml, NULL, &external_path);
+
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not find/resolve named package element: %s",
+ external_path));
+
+ ACPI_FREE(external_path);
+ *element_ptr = NULL;
+ return_VOID;
+ } else if (resolved_node->type == ACPI_TYPE_ANY) {
+
+ /* Named reference not resolved, return a NULL package element */
+
+ ACPI_ERROR((AE_INFO,
+ "Could not resolve named package element [%4.4s] in [%4.4s]",
+ resolved_node->name.ascii,
+ scope_info.scope.node->name.ascii));
+ *element_ptr = NULL;
+ return_VOID;
+ }
+#if 0
+ else if (resolved_node->flags & ANOBJ_TEMPORARY) {
+ /*
+ * A temporary node found here indicates that the reference is
+ * to a node that was created within this method. We are not
+ * going to allow it (especially if the package is returned
+ * from the method) -- the temporary node will be deleted out
+ * from under the method. (05/2017).
+ */
+ ACPI_ERROR((AE_INFO,
+ "Package element refers to a temporary name [%4.4s], "
+ "inserting a NULL element",
+ resolved_node->name.ascii));
+ *element_ptr = NULL;
+ return_VOID;
+ }
+#endif
+
+ /*
+ * Special handling for Alias objects. We need resolved_node to point
+ * to the Alias target. This effectively "resolves" the alias.
+ */
+ if (resolved_node->type == ACPI_TYPE_LOCAL_ALIAS) {
+ resolved_node = ACPI_CAST_PTR(struct acpi_namespace_node,
+ resolved_node->object);
+ }
+
+ /* Update the reference object */
+
+ element->reference.resolved = TRUE;
+ element->reference.node = resolved_node;
+ type = element->reference.node->type;
+
+ /*
+ * Attempt to resolve the node to a value before we insert it into
+ * the package. If this is a reference to a common data type,
+ * resolve it immediately. According to the ACPI spec, package
+ * elements can only be "data objects" or method references.
+ * Attempt to resolve to an Integer, Buffer, String or Package.
+ * If cannot, return the named reference (for things like Devices,
+ * Methods, etc.) Buffer Fields and Fields will resolve to simple
+ * objects (int/buf/str/pkg).
+ *
+ * NOTE: References to things like Devices, Methods, Mutexes, etc.
+ * will remain as named references. This behavior is not described
+ * in the ACPI spec, but it appears to be an oversight.
+ */
+ status = acpi_ex_resolve_node_to_value(&resolved_node, NULL);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+#if 0
+/* TBD - alias support */
+ /*
+ * Special handling for Alias objects. We need to setup the type
+ * and the Op->Common.Node to point to the Alias target. Note,
+ * Alias has at most one level of indirection internally.
+ */
+ type = op->common.node->type;
+ if (type == ACPI_TYPE_LOCAL_ALIAS) {
+ type = obj_desc->common.type;
+ op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node,
+ op->common.node->object);
+ }
+#endif
+
+ switch (type) {
+ /*
+ * These object types are a result of named references, so we will
+ * leave them as reference objects. In other words, these types
+ * have no intrinsic "value".
+ */
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_THERMAL:
+
+ /* TBD: This may not be necesssary */
+
+ acpi_ut_add_reference(resolved_node->object);
+ break;
+
+ case ACPI_TYPE_MUTEX:
+ case ACPI_TYPE_METHOD:
+ case ACPI_TYPE_POWER:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_EVENT:
+ case ACPI_TYPE_REGION:
+
+ break;
+
+ default:
+ /*
+ * For all other types - the node was resolved to an actual
+ * operand object with a value, return the object
+ */
+ *element_ptr = (union acpi_operand_object *)resolved_node;
+ break;
+ }
+
+ return_VOID;
+}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 9c941947a063..3a3cb8624f41 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -440,9 +440,11 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
void *ignored)
{
acpi_status status;
+ acpi_event_status event_status;
struct acpi_gpe_event_info *gpe_event_info;
u32 gpe_enabled_count;
u32 gpe_index;
+ u32 gpe_number;
u32 i;
u32 j;
@@ -470,30 +472,40 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
gpe_event_info = &gpe_block->event_info[gpe_index];
+ gpe_number = gpe_block->block_base_number + gpe_index;
/*
* Ignore GPEs that have no corresponding _Lxx/_Exx method
- * and GPEs that are used to wake the system
+ * and GPEs that are used for wakeup
*/
- if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
- ACPI_GPE_DISPATCH_NONE)
- || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
- ACPI_GPE_DISPATCH_HANDLER)
- || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
- ACPI_GPE_DISPATCH_RAW_HANDLER)
+ if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
+ ACPI_GPE_DISPATCH_METHOD)
|| (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
continue;
}
+ event_status = 0;
+ (void)acpi_hw_get_gpe_status(gpe_event_info,
+ &event_status);
+
status = acpi_ev_add_gpe_reference(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not enable GPE 0x%02X",
- gpe_index +
- gpe_block->block_base_number));
+ gpe_number));
continue;
}
+ gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED;
+
+ if (event_status & ACPI_EVENT_FLAG_STATUS_SET) {
+ ACPI_INFO(("GPE 0x%02X active on init",
+ gpe_number));
+ (void)acpi_ev_gpe_dispatch(gpe_block->node,
+ gpe_event_info,
+ gpe_number);
+ }
+
gpe_enabled_count++;
}
}
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 57718a3e029a..67c7c4ce276c 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -435,6 +435,14 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
*/
gpe_event_info->flags =
(ACPI_GPE_DISPATCH_NOTIFY | ACPI_GPE_LEVEL_TRIGGERED);
+ } else if (gpe_event_info->flags & ACPI_GPE_AUTO_ENABLED) {
+ /*
+ * A reference to this GPE has been added during the GPE block
+ * initialization, so drop it now to prevent the GPE from being
+ * permanently enabled and clear its ACPI_GPE_AUTO_ENABLED flag.
+ */
+ (void)acpi_ev_remove_gpe_reference(gpe_event_info);
+ gpe_event_info->flags &= ~ACPI_GPE_AUTO_ENABLED;
}
/*
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index d43d7da4c734..b8adb11f1b07 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -87,68 +87,40 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
target_node->object);
}
- /*
- * For objects that can never change (i.e., the NS node will
- * permanently point to the same object), we can simply attach
- * the object to the new NS node. For other objects (such as
- * Integers, buffers, etc.), we have to point the Alias node
- * to the original Node.
- */
- switch (target_node->type) {
+ /* Ensure that the target node is valid */
- /* For these types, the sub-object can change dynamically via a Store */
+ if (!target_node) {
+ return_ACPI_STATUS(AE_NULL_OBJECT);
+ }
- case ACPI_TYPE_INTEGER:
- case ACPI_TYPE_STRING:
- case ACPI_TYPE_BUFFER:
- case ACPI_TYPE_PACKAGE:
- case ACPI_TYPE_BUFFER_FIELD:
- /*
- * These types open a new scope, so we need the NS node in order to access
- * any children.
- */
- case ACPI_TYPE_DEVICE:
- case ACPI_TYPE_POWER:
- case ACPI_TYPE_PROCESSOR:
- case ACPI_TYPE_THERMAL:
- case ACPI_TYPE_LOCAL_SCOPE:
- /*
- * The new alias has the type ALIAS and points to the original
- * NS node, not the object itself.
- */
- alias_node->type = ACPI_TYPE_LOCAL_ALIAS;
- alias_node->object =
- ACPI_CAST_PTR(union acpi_operand_object, target_node);
- break;
+ /* Construct the alias object (a namespace node) */
+ switch (target_node->type) {
case ACPI_TYPE_METHOD:
/*
- * Control method aliases need to be differentiated
+ * Control method aliases need to be differentiated with
+ * a special type
*/
alias_node->type = ACPI_TYPE_LOCAL_METHOD_ALIAS;
- alias_node->object =
- ACPI_CAST_PTR(union acpi_operand_object, target_node);
break;
default:
-
- /* Attach the original source object to the new Alias Node */
-
/*
- * The new alias assumes the type of the target, and it points
- * to the same object. The reference count of the object has an
- * additional reference to prevent deletion out from under either the
- * target node or the alias Node
+ * All other object types.
+ *
+ * The new alias has the type ALIAS and points to the original
+ * NS node, not the object itself.
*/
- status = acpi_ns_attach_object(alias_node,
- acpi_ns_get_attached_object
- (target_node),
- target_node->type);
+ alias_node->type = ACPI_TYPE_LOCAL_ALIAS;
+ alias_node->object =
+ ACPI_CAST_PTR(union acpi_operand_object, target_node);
break;
}
/* Since both operands are Nodes, we don't need to delete them */
+ alias_node->object =
+ ACPI_CAST_PTR(union acpi_operand_object, target_node);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 44092f744477..83398dc4b7c2 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -102,7 +102,7 @@ static struct acpi_exdump_info acpi_ex_dump_package[6] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_package), NULL},
{ACPI_EXD_NODE, ACPI_EXD_OFFSET(package.node), "Parent Node"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(package.flags), "Flags"},
- {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Elements"},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Element Count"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(package.elements), "Element List"},
{ACPI_EXD_PACKAGE, 0, NULL}
};
@@ -384,6 +384,10 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
count = info->offset;
while (count) {
+ if (!obj_desc) {
+ return;
+ }
+
target = ACPI_ADD_PTR(u8, obj_desc, info->offset);
name = info->name;
@@ -469,9 +473,9 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
start = *ACPI_CAST_PTR(void *, target);
next = start;
- acpi_os_printf("%20s : %p", name, next);
+ acpi_os_printf("%20s : %p ", name, next);
if (next) {
- acpi_os_printf("(%s %2.2X)",
+ acpi_os_printf("%s (Type %2.2X)",
acpi_ut_get_object_type_name
(next), next->common.type);
@@ -493,6 +497,8 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
break;
}
}
+ } else {
+ acpi_os_printf("- No attached objects");
}
acpi_os_printf("\n");
@@ -1129,7 +1135,9 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
default:
- acpi_os_printf("[Unknown Type] %X\n", obj_desc->common.type);
+ acpi_os_printf("[%s] Type: %2.2X\n",
+ acpi_ut_get_type_name(obj_desc->common.type),
+ obj_desc->common.type);
break;
}
}
@@ -1167,11 +1175,17 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
acpi_ex_dump_namespace_node((struct acpi_namespace_node *)
obj_desc, flags);
- acpi_os_printf("\nAttached Object (%p):\n",
- ((struct acpi_namespace_node *)obj_desc)->
- object);
-
obj_desc = ((struct acpi_namespace_node *)obj_desc)->object;
+ if (!obj_desc) {
+ return_VOID;
+ }
+
+ acpi_os_printf("\nAttached Object %p", obj_desc);
+ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf(" - Namespace Node");
+ }
+
+ acpi_os_printf(":\n");
goto dump_object;
}
@@ -1191,6 +1205,10 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
dump_object:
+ if (!obj_desc) {
+ return_VOID;
+ }
+
/* Common Fields */
acpi_ex_dump_object(obj_desc, acpi_ex_dump_common);
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index f222a80ca38e..1e7649ce0a7b 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -265,6 +265,8 @@ acpi_ex_do_logical_numeric_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid numeric logical opcode: %X", opcode));
status = AE_AML_INTERNAL;
break;
}
@@ -345,6 +347,9 @@ acpi_ex_do_logical_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid object type for logical operator: %X",
+ operand0->common.type));
status = AE_AML_INTERNAL;
break;
}
@@ -388,6 +393,8 @@ acpi_ex_do_logical_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid comparison opcode: %X", opcode));
status = AE_AML_INTERNAL;
break;
}
@@ -456,6 +463,8 @@ acpi_ex_do_logical_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid comparison opcode: %X", opcode));
status = AE_AML_INTERNAL;
break;
}
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index eecb3bff7fd7..57980b7d3594 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -414,6 +414,9 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid object type: %X",
+ (operand[0])->common.type));
status = AE_AML_INTERNAL;
goto cleanup;
}
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index de74a4c25085..acb417b58bbb 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -107,7 +107,7 @@ acpi_hw_get_access_bit_width(u64 address,
ACPI_IS_ALIGNED(reg->bit_width, 8)) {
access_bit_width = reg->bit_width;
} else if (reg->access_width) {
- access_bit_width = (1 << (reg->access_width + 2));
+ access_bit_width = ACPI_ACCESS_BIT_WIDTH(reg->access_width);
} else {
access_bit_width =
ACPI_ROUND_UP_POWER_OF_TWO_8(reg->bit_offset +
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 7ef13934968f..e5c095ca6083 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -72,13 +72,16 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
{ACPI_STRUCT_INIT(legacy_function,
ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep)),
- ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_sleep) },
+ ACPI_STRUCT_INIT(extended_function,
+ acpi_hw_extended_sleep)},
{ACPI_STRUCT_INIT(legacy_function,
ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep)),
- ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_wake_prep) },
+ ACPI_STRUCT_INIT(extended_function,
+ acpi_hw_extended_wake_prep)},
{ACPI_STRUCT_INIT(legacy_function,
ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake)),
- ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_wake) }
+ ACPI_STRUCT_INIT(extended_function,
+ acpi_hw_extended_wake)}
};
/*
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index e5f4fa496572..f2733f51ca8d 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -292,6 +292,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
{
acpi_status status;
char *path = pathname;
+ char *external_path;
struct acpi_namespace_node *prefix_node;
struct acpi_namespace_node *current_node = NULL;
struct acpi_namespace_node *this_node = NULL;
@@ -427,13 +428,22 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
num_carats++;
this_node = this_node->parent;
if (!this_node) {
+ /*
+ * Current scope has no parent scope. Externalize
+ * the internal path for error message.
+ */
+ status =
+ acpi_ns_externalize_name
+ (ACPI_UINT32_MAX, pathname, NULL,
+ &external_path);
+ if (ACPI_SUCCESS(status)) {
+ ACPI_ERROR((AE_INFO,
+ "%s: Path has too many parent prefixes (^)",
+ external_path));
+
+ ACPI_FREE(external_path);
+ }
- /* Current scope has no parent scope */
-
- ACPI_ERROR((AE_INFO,
- "%s: Path has too many parent prefixes (^) "
- "- reached beyond root node",
- pathname));
return_ACPI_STATUS(AE_NOT_FOUND);
}
}
@@ -634,6 +644,12 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
this_node->object;
}
}
+#ifdef ACPI_ASL_COMPILER
+ if (!acpi_gbl_disasm_flag &&
+ (this_node->flags & ANOBJ_IS_EXTERNAL)) {
+ this_node->flags |= IMPLICIT_EXTERNAL;
+ }
+#endif
}
/* Special handling for the last segment (num_segments == 0) */
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 9095d51f6b37..67b7370dcae5 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -69,9 +69,14 @@ void acpi_ns_check_argument_types(struct acpi_evaluate_info *info)
u8 user_arg_type;
u32 i;
- /* If not a predefined name, cannot typecheck args */
-
- if (!info->predefined) {
+ /*
+ * If not a predefined name, cannot typecheck args, because
+ * we have no idea what argument types are expected.
+ * Also, ignore typecheck if warnings/errors if this method
+ * has already been evaluated at least once -- in order
+ * to suppress repetitive messages.
+ */
+ if (!info->predefined || (info->node->flags & ANOBJ_EVALUATED)) {
return;
}
@@ -93,6 +98,10 @@ void acpi_ns_check_argument_types(struct acpi_evaluate_info *info)
acpi_ut_get_type_name
(user_arg_type),
acpi_ut_get_type_name(arg_type)));
+
+ /* Prevent any additional typechecking for this method */
+
+ info->node->flags |= ANOBJ_EVALUATED;
}
}
}
@@ -121,7 +130,7 @@ acpi_ns_check_acpi_compliance(char *pathname,
u32 aml_param_count;
u32 required_param_count;
- if (!predefined) {
+ if (!predefined || (node->flags & ANOBJ_EVALUATED)) {
return;
}
@@ -215,6 +224,10 @@ acpi_ns_check_argument_count(char *pathname,
u32 aml_param_count;
u32 required_param_count;
+ if (node->flags & ANOBJ_EVALUATED) {
+ return;
+ }
+
if (!predefined) {
/*
* Not a predefined name. Check the incoming user argument count
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index ce33e7297ea7..9c6297949712 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -396,6 +396,20 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
info->package_init++;
status = acpi_ds_get_package_arguments(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+
+ /*
+ * Resolve all named references in package objects (and all
+ * sub-packages). This action has been deferred until the entire
+ * namespace has been loaded, in order to support external and
+ * forward references from individual package elements (05/2017).
+ */
+ status = acpi_ut_walk_package_tree(obj_desc, NULL,
+ acpi_ds_init_package_element,
+ NULL);
+ obj_desc->package.flags |= AOPOBJ_DATA_VALID;
break;
default:
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index aa16aeaa8937..a410760a0308 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -89,7 +89,14 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
{
acpi_size size;
- ACPI_FUNCTION_ENTRY();
+ /* Validate the Node */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/cached reference target node: %p, descriptor type %d",
+ node, ACPI_GET_DESCRIPTOR_TYPE(node)));
+ return (0);
+ }
size = acpi_ns_build_normalized_path(node, NULL, 0, FALSE);
return (size);
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 4954cb6c9090..a8ea8fb1d299 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -614,6 +614,8 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
default: /* Should not get here, type was validated by caller */
+ ACPI_ERROR((AE_INFO, "Invalid Package type: %X",
+ package->ret_info.type));
return (AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 538c61677c10..783f4c838aee 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -100,9 +100,13 @@ acpi_evaluate_object_typed(acpi_handle handle,
free_buffer_on_error = TRUE;
}
- status = acpi_get_handle(handle, pathname, &target_handle);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ if (pathname) {
+ status = acpi_get_handle(handle, pathname, &target_handle);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ } else {
+ target_handle = handle;
}
full_pathname = acpi_ns_get_external_pathname(target_handle);
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index b4224005783c..bb04dec168ad 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -164,6 +164,11 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
INCREMENT_ARG_LIST(walk_state->arg_types);
}
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Final argument count: %u pass %u\n",
+ walk_state->arg_count,
+ walk_state->pass_number));
+
/*
* Handle executable code at "module-level". This refers to
* executable opcodes that appear outside of any control method.
@@ -277,6 +282,11 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
AML_NAME_OP)
&& (walk_state->pass_number <=
ACPI_IMODE_LOAD_PASS2)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Setup Package/Buffer: Pass %u, AML Ptr: %p\n",
+ walk_state->pass_number,
+ aml_op_start));
+
/*
* Skip parsing of Buffers and Packages because we don't have
* enough info in the first pass to parse them correctly.
@@ -570,6 +580,10 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
/* Check for arguments that need to be processed */
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Parseloop: argument count: %u\n",
+ walk_state->arg_count));
+
if (walk_state->arg_count) {
/*
* There are arguments (complex ones), push Op and
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index ef6384e374fc..0bef6df71bba 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -359,6 +359,32 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
acpi_ps_build_named_op(walk_state, aml_op_start, op,
&named_op);
acpi_ps_free_op(op);
+
+#ifdef ACPI_ASL_COMPILER
+ if (acpi_gbl_disasm_flag
+ && walk_state->opcode == AML_EXTERNAL_OP
+ && status == AE_NOT_FOUND) {
+ /*
+ * If parsing of AML_EXTERNAL_OP's name path fails, then skip
+ * past this opcode and keep parsing. This is a much better
+ * alternative than to abort the entire disassembler. At this
+ * point, the parser_state is at the end of the namepath of the
+ * external declaration opcode. Setting walk_state->Aml to
+ * walk_state->parser_state.Aml + 2 moves increments the
+ * walk_state->Aml past the object type and the paramcount of the
+ * external opcode. For the error message, only print the AML
+ * offset. We could attempt to print the name but this may cause
+ * a segmentation fault when printing the namepath because the
+ * AML may be incorrect.
+ */
+ acpi_os_printf
+ ("// Invalid external declaration at AML offset 0x%x.\n",
+ walk_state->aml -
+ walk_state->parser_state.aml_start);
+ walk_state->aml = walk_state->parser_state.aml + 2;
+ return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+ }
+#endif
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 59a4f9ed06a7..be65e65e216e 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -615,7 +615,7 @@ ACPI_EXPORT_SYMBOL(acpi_walk_resource_buffer)
* device we are querying
* name - Method name of the resources we want.
* (METHOD_NAME__CRS, METHOD_NAME__PRS, or
- * METHOD_NAME__AEI)
+ * METHOD_NAME__AEI or METHOD_NAME__DMA)
* user_function - Called for each resource
* context - Passed to user_function
*
@@ -641,11 +641,12 @@ acpi_walk_resources(acpi_handle device_handle,
if (!device_handle || !user_function || !name ||
(!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
!ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI) &&
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__DMA))) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Get the _CRS/_PRS/_AEI resource list */
+ /* Get the _CRS/_PRS/_AEI/_DMA resource list */
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_rs_get_method_data(device_handle, name, &buffer);
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index c9d6fa6d7cc6..b19a2f0ea331 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -50,6 +50,57 @@
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbdata")
+/* Local prototypes */
+static acpi_status
+acpi_tb_check_duplication(struct acpi_table_desc *table_desc, u32 *table_index);
+
+static u8
+acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_compare_tables
+ *
+ * PARAMETERS: table_desc - Table 1 descriptor to be compared
+ * table_index - Index of table 2 to be compared
+ *
+ * RETURN: TRUE if both tables are identical.
+ *
+ * DESCRIPTION: This function compares a table with another table that has
+ * already been installed in the root table list.
+ *
+ ******************************************************************************/
+
+static u8
+acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
+{
+ acpi_status status = AE_OK;
+ u8 is_identical;
+ struct acpi_table_header *table;
+ u32 table_length;
+ u8 table_flags;
+
+ status =
+ acpi_tb_acquire_table(&acpi_gbl_root_table_list.tables[table_index],
+ &table, &table_length, &table_flags);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+
+ /*
+ * Check for a table match on the entire table length,
+ * not just the header.
+ */
+ is_identical = (u8)((table_desc->length != table_length ||
+ memcmp(table_desc->pointer, table, table_length)) ?
+ FALSE : TRUE);
+
+ /* Release the acquired table */
+
+ acpi_tb_release_table(table, table_length, table_flags);
+ return (is_identical);
+}
+
/*******************************************************************************
*
* FUNCTION: acpi_tb_init_table_descriptor
@@ -64,6 +115,7 @@ ACPI_MODULE_NAME("tbdata")
* DESCRIPTION: Initialize a new table descriptor
*
******************************************************************************/
+
void
acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc,
acpi_physical_address address,
@@ -338,7 +390,7 @@ void acpi_tb_invalidate_table(struct acpi_table_desc *table_desc)
acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc)
{
- if (!table_desc->pointer && !acpi_gbl_verify_table_checksum) {
+ if (!table_desc->pointer && !acpi_gbl_enable_table_validation) {
/*
* Only validates the header of the table.
* Note that Length contains the size of the mapping after invoking
@@ -354,22 +406,100 @@ acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc)
return (acpi_tb_validate_table(table_desc));
}
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_check_duplication
+ *
+ * PARAMETERS: table_desc - Table descriptor
+ * table_index - Where the table index is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Avoid installing duplicated tables. However table override and
+ * user aided dynamic table load is allowed, thus comparing the
+ * address of the table is not sufficient, and checking the entire
+ * table content is required.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_tb_check_duplication(struct acpi_table_desc *table_desc, u32 *table_index)
+{
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(tb_check_duplication);
+
+ /* Check if table is already registered */
+
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
+
+ /* Do not compare with unverified tables */
+
+ if (!
+ (acpi_gbl_root_table_list.tables[i].
+ flags & ACPI_TABLE_IS_VERIFIED)) {
+ continue;
+ }
+
+ /*
+ * Check for a table match on the entire table length,
+ * not just the header.
+ */
+ if (!acpi_tb_compare_tables(table_desc, i)) {
+ continue;
+ }
+
+ /*
+ * Note: the current mechanism does not unregister a table if it is
+ * dynamically unloaded. The related namespace entries are deleted,
+ * but the table remains in the root table list.
+ *
+ * The assumption here is that the number of different tables that
+ * will be loaded is actually small, and there is minimal overhead
+ * in just keeping the table in case it is needed again.
+ *
+ * If this assumption changes in the future (perhaps on large
+ * machines with many table load/unload operations), tables will
+ * need to be unregistered when they are unloaded, and slots in the
+ * root table list should be reused when empty.
+ */
+ if (acpi_gbl_root_table_list.tables[i].flags &
+ ACPI_TABLE_IS_LOADED) {
+
+ /* Table is still loaded, this is an error */
+
+ return_ACPI_STATUS(AE_ALREADY_EXISTS);
+ } else {
+ *table_index = i;
+ return_ACPI_STATUS(AE_CTRL_TERMINATE);
+ }
+ }
+
+ /* Indicate no duplication to the caller */
+
+ return_ACPI_STATUS(AE_OK);
+}
+
/******************************************************************************
*
* FUNCTION: acpi_tb_verify_temp_table
*
* PARAMETERS: table_desc - Table descriptor
* signature - Table signature to verify
+ * table_index - Where the table index is returned
*
* RETURN: Status
*
* DESCRIPTION: This function is called to validate and verify the table, the
* returned table descriptor is in "VALIDATED" state.
+ * Note that 'TableIndex' is required to be set to !NULL to
+ * enable duplication check.
*
*****************************************************************************/
acpi_status
-acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature)
+acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc,
+ char *signature, u32 *table_index)
{
acpi_status status = AE_OK;
@@ -392,9 +522,10 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature)
goto invalidate_and_exit;
}
- /* Verify the checksum */
+ if (acpi_gbl_enable_table_validation) {
+
+ /* Verify the checksum */
- if (acpi_gbl_verify_table_checksum) {
status =
acpi_tb_verify_checksum(table_desc->pointer,
table_desc->length);
@@ -411,9 +542,34 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature)
goto invalidate_and_exit;
}
+
+ /* Avoid duplications */
+
+ if (table_index) {
+ status =
+ acpi_tb_check_duplication(table_desc, table_index);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_CTRL_TERMINATE) {
+ ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
+ "%4.4s 0x%8.8X%8.8X"
+ " Table is duplicated",
+ acpi_ut_valid_nameseg
+ (table_desc->signature.
+ ascii) ? table_desc->
+ signature.
+ ascii : "????",
+ ACPI_FORMAT_UINT64
+ (table_desc->address)));
+ }
+
+ goto invalidate_and_exit;
+ }
+ }
+
+ table_desc->flags |= ACPI_TABLE_IS_VERIFIED;
}
- return_ACPI_STATUS(AE_OK);
+ return_ACPI_STATUS(status);
invalidate_and_exit:
acpi_tb_invalidate_table(table_desc);
@@ -436,6 +592,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
{
struct acpi_table_desc *tables;
u32 table_count;
+ u32 current_table_count, max_table_count;
+ u32 i;
ACPI_FUNCTION_TRACE(tb_resize_root_table_list);
@@ -455,8 +613,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
table_count = acpi_gbl_root_table_list.current_table_count;
}
- tables = ACPI_ALLOCATE_ZEROED(((acpi_size)table_count +
- ACPI_ROOT_TABLE_SIZE_INCREMENT) *
+ max_table_count = table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT;
+ tables = ACPI_ALLOCATE_ZEROED(((acpi_size)max_table_count) *
sizeof(struct acpi_table_desc));
if (!tables) {
ACPI_ERROR((AE_INFO,
@@ -466,9 +624,16 @@ acpi_status acpi_tb_resize_root_table_list(void)
/* Copy and free the previous table array */
+ current_table_count = 0;
if (acpi_gbl_root_table_list.tables) {
- memcpy(tables, acpi_gbl_root_table_list.tables,
- (acpi_size)table_count * sizeof(struct acpi_table_desc));
+ for (i = 0; i < table_count; i++) {
+ if (acpi_gbl_root_table_list.tables[i].address) {
+ memcpy(tables + current_table_count,
+ acpi_gbl_root_table_list.tables + i,
+ sizeof(struct acpi_table_desc));
+ current_table_count++;
+ }
+ }
if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
ACPI_FREE(acpi_gbl_root_table_list.tables);
@@ -476,8 +641,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
}
acpi_gbl_root_table_list.tables = tables;
- acpi_gbl_root_table_list.max_table_count =
- table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT;
+ acpi_gbl_root_table_list.max_table_count = max_table_count;
+ acpi_gbl_root_table_list.current_table_count = current_table_count;
acpi_gbl_root_table_list.flags |= ACPI_ROOT_ORIGIN_ALLOCATED;
return_ACPI_STATUS(AE_OK);
@@ -818,13 +983,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
acpi_ev_update_gpes(owner_id);
}
- /* Invoke table handler if present */
-
- if (acpi_gbl_table_handler) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table,
- acpi_gbl_table_handler_context);
- }
+ /* Invoke table handler */
+ acpi_tb_notify_table(ACPI_TABLE_EVENT_LOAD, table);
return_ACPI_STATUS(status);
}
@@ -894,15 +1055,11 @@ acpi_status acpi_tb_unload_table(u32 table_index)
return_ACPI_STATUS(AE_NOT_EXIST);
}
- /* Invoke table handler if present */
+ /* Invoke table handler */
- if (acpi_gbl_table_handler) {
- status = acpi_get_table_by_index(table_index, &table);
- if (ACPI_SUCCESS(status)) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
- table,
- acpi_gbl_table_handler_context);
- }
+ status = acpi_get_table_by_index(table_index, &table);
+ if (ACPI_SUCCESS(status)) {
+ acpi_tb_notify_table(ACPI_TABLE_EVENT_UNLOAD, table);
}
/* Delete the portion of the namespace owned by this table */
@@ -918,3 +1075,26 @@ acpi_status acpi_tb_unload_table(u32 table_index)
}
ACPI_EXPORT_SYMBOL(acpi_tb_unload_table)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_notify_table
+ *
+ * PARAMETERS: event - Table event
+ * table - Validated table pointer
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Notify a table event to the users.
+ *
+ ******************************************************************************/
+
+void acpi_tb_notify_table(u32 event, void *table)
+{
+ /* Invoke table handler if present */
+
+ if (acpi_gbl_table_handler) {
+ (void)acpi_gbl_table_handler(event, table,
+ acpi_gbl_table_handler_context);
+ }
+}
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 4620f3c68c13..0dfc0ac3c141 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -48,54 +48,6 @@
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbinstal")
-/* Local prototypes */
-static u8
-acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index);
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_tb_compare_tables
- *
- * PARAMETERS: table_desc - Table 1 descriptor to be compared
- * table_index - Index of table 2 to be compared
- *
- * RETURN: TRUE if both tables are identical.
- *
- * DESCRIPTION: This function compares a table with another table that has
- * already been installed in the root table list.
- *
- ******************************************************************************/
-
-static u8
-acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
-{
- acpi_status status = AE_OK;
- u8 is_identical;
- struct acpi_table_header *table;
- u32 table_length;
- u8 table_flags;
-
- status =
- acpi_tb_acquire_table(&acpi_gbl_root_table_list.tables[table_index],
- &table, &table_length, &table_flags);
- if (ACPI_FAILURE(status)) {
- return (FALSE);
- }
-
- /*
- * Check for a table match on the entire table length,
- * not just the header.
- */
- is_identical = (u8)((table_desc->length != table_length ||
- memcmp(table_desc->pointer, table, table_length)) ?
- FALSE : TRUE);
-
- /* Release the acquired table */
-
- acpi_tb_release_table(table, table_length, table_flags);
- return (is_identical);
-}
-
/*******************************************************************************
*
* FUNCTION: acpi_tb_install_table_with_override
@@ -112,7 +64,6 @@ acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
* table array.
*
******************************************************************************/
-
void
acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
u8 override, u32 *table_index)
@@ -210,95 +161,29 @@ acpi_tb_install_standard_table(acpi_physical_address address,
goto release_and_exit;
}
- /* Validate and verify a table before installation */
-
- status = acpi_tb_verify_temp_table(&new_table_desc, NULL);
- if (ACPI_FAILURE(status)) {
- goto release_and_exit;
- }
-
/* Acquire the table lock */
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (reload) {
- /*
- * Validate the incoming table signature.
- *
- * 1) Originally, we checked the table signature for "SSDT" or "PSDT".
- * 2) We added support for OEMx tables, signature "OEM".
- * 3) Valid tables were encountered with a null signature, so we just
- * gave up on validating the signature, (05/2008).
- * 4) We encountered non-AML tables such as the MADT, which caused
- * interpreter errors and kernel faults. So now, we once again allow
- * only "SSDT", "OEMx", and now, also a null signature. (05/2011).
- */
- if ((new_table_desc.signature.ascii[0] != 0x00) &&
- (!ACPI_COMPARE_NAME
- (&new_table_desc.signature, ACPI_SIG_SSDT))
- && (strncmp(new_table_desc.signature.ascii, "OEM", 3))) {
- ACPI_BIOS_ERROR((AE_INFO,
- "Table has invalid signature [%4.4s] (0x%8.8X), "
- "must be SSDT or OEMx",
- acpi_ut_valid_nameseg(new_table_desc.
- signature.
- ascii) ?
- new_table_desc.signature.
- ascii : "????",
- new_table_desc.signature.integer));
-
- status = AE_BAD_SIGNATURE;
- goto unlock_and_exit;
- }
-
- /* Check if table is already registered */
-
- for (i = 0; i < acpi_gbl_root_table_list.current_table_count;
- ++i) {
- /*
- * Check for a table match on the entire table length,
- * not just the header.
- */
- if (!acpi_tb_compare_tables(&new_table_desc, i)) {
- continue;
- }
+ /* Validate and verify a table before installation */
+ status = acpi_tb_verify_temp_table(&new_table_desc, NULL, &i);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_CTRL_TERMINATE) {
/*
- * Note: the current mechanism does not unregister a table if it is
- * dynamically unloaded. The related namespace entries are deleted,
- * but the table remains in the root table list.
- *
- * The assumption here is that the number of different tables that
- * will be loaded is actually small, and there is minimal overhead
- * in just keeping the table in case it is needed again.
- *
- * If this assumption changes in the future (perhaps on large
- * machines with many table load/unload operations), tables will
- * need to be unregistered when they are unloaded, and slots in the
- * root table list should be reused when empty.
+ * Table was unloaded, allow it to be reloaded.
+ * As we are going to return AE_OK to the caller, we should
+ * take the responsibility of freeing the input descriptor.
+ * Refill the input descriptor to ensure
+ * acpi_tb_install_table_with_override() can be called again to
+ * indicate the re-installation.
*/
- if (acpi_gbl_root_table_list.tables[i].flags &
- ACPI_TABLE_IS_LOADED) {
-
- /* Table is still loaded, this is an error */
-
- status = AE_ALREADY_EXISTS;
- goto unlock_and_exit;
- } else {
- /*
- * Table was unloaded, allow it to be reloaded.
- * As we are going to return AE_OK to the caller, we should
- * take the responsibility of freeing the input descriptor.
- * Refill the input descriptor to ensure
- * acpi_tb_install_table_with_override() can be called again to
- * indicate the re-installation.
- */
- acpi_tb_uninstall_table(&new_table_desc);
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- *table_index = i;
- return_ACPI_STATUS(AE_OK);
- }
+ acpi_tb_uninstall_table(&new_table_desc);
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ *table_index = i;
+ return_ACPI_STATUS(AE_OK);
}
+ goto unlock_and_exit;
}
/* Add the table to the global root table list */
@@ -306,14 +191,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
acpi_tb_install_table_with_override(&new_table_desc, override,
table_index);
- /* Invoke table handler if present */
+ /* Invoke table handler */
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- if (acpi_gbl_table_handler) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
- new_table_desc.pointer,
- acpi_gbl_table_handler_context);
- }
+ acpi_tb_notify_table(ACPI_TABLE_EVENT_INSTALL, new_table_desc.pointer);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
unlock_and_exit:
@@ -382,9 +263,11 @@ void acpi_tb_override_table(struct acpi_table_desc *old_table_desc)
finish_override:
- /* Validate and verify a table before overriding */
-
- status = acpi_tb_verify_temp_table(&new_table_desc, NULL);
+ /*
+ * Validate and verify a table before overriding, no nested table
+ * duplication check as it's too complicated and unnecessary.
+ */
+ status = acpi_tb_verify_temp_table(&new_table_desc, NULL, NULL);
if (ACPI_FAILURE(status)) {
return;
}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 010b1c43df92..26ad596c973e 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -167,7 +167,8 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_tables)
acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
{
acpi_status status;
- u32 i;
+ struct acpi_table_desc *table_desc;
+ u32 i, j;
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
@@ -179,6 +180,8 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_SUPPORT);
}
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
/*
* Ensure OS early boot logic, which is required by some hosts. If the
* table state is reported to be wrong, developers should fix the
@@ -186,17 +189,39 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
* early stage.
*/
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
- if (acpi_gbl_root_table_list.tables[i].pointer) {
+ table_desc = &acpi_gbl_root_table_list.tables[i];
+ if (table_desc->pointer) {
ACPI_ERROR((AE_INFO,
"Table [%4.4s] is not invalidated during early boot stage",
- acpi_gbl_root_table_list.tables[i].
- signature.ascii));
+ table_desc->signature.ascii));
}
}
- acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE;
+ if (!acpi_gbl_enable_table_validation) {
+ /*
+ * Now it's safe to do full table validation. We can do deferred
+ * table initilization here once the flag is set.
+ */
+ acpi_gbl_enable_table_validation = TRUE;
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count;
+ ++i) {
+ table_desc = &acpi_gbl_root_table_list.tables[i];
+ if (!(table_desc->flags & ACPI_TABLE_IS_VERIFIED)) {
+ status =
+ acpi_tb_verify_temp_table(table_desc, NULL,
+ &j);
+ if (ACPI_FAILURE(status)) {
+ acpi_tb_uninstall_table(table_desc);
+ }
+ }
+ }
+ }
+ acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE;
status = acpi_tb_resize_root_table_list();
+ acpi_gbl_root_table_list.flags |= ACPI_ROOT_ORIGIN_ALLOCATED;
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
}
@@ -369,6 +394,10 @@ void acpi_put_table(struct acpi_table_header *table)
ACPI_FUNCTION_TRACE(acpi_put_table);
+ if (!table) {
+ return_VOID;
+ }
+
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
/* Walk the root table list */
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index b71ce3b817ea..d81f442228b8 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -206,7 +206,7 @@ acpi_status acpi_tb_load_namespace(void)
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
table = &acpi_gbl_root_table_list.tables[i];
- if (!acpi_gbl_root_table_list.tables[i].address ||
+ if (!table->address ||
(!ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_SSDT)
&& !ACPI_COMPARE_NAME(table->signature.ascii,
ACPI_SIG_PSDT)
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index 6600bc257516..fb406daf47fa 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -69,8 +69,10 @@ static const char acpi_gbl_hex_to_ascii[] = {
char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
{
+ u64 index;
- return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
+ acpi_ut_short_shift_right(integer, position, &index);
+ return (acpi_gbl_hex_to_ascii[index & 0xF]);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index aa0502d1d019..5f9c680076c4 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -47,15 +47,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utmath")
-/*
- * Optional support for 64-bit double-precision integer divide. This code
- * is configurable and is implemented in order to support 32-bit kernel
- * environments where a 64-bit double-precision math library is not available.
- *
- * Support for a more normal 64-bit divide/modulo (with check for a divide-
- * by-zero) appears after this optional section of code.
- */
-#ifndef ACPI_USE_NATIVE_DIVIDE
/* Structures used only for 64-bit divide */
typedef struct uint64_struct {
u32 lo;
@@ -69,6 +60,217 @@ typedef union uint64_overlay {
} uint64_overlay;
+/*
+ * Optional support for 64-bit double-precision integer multiply and shift.
+ * This code is configurable and is implemented in order to support 32-bit
+ * kernel environments where a 64-bit double-precision math library is not
+ * available.
+ */
+#ifndef ACPI_USE_NATIVE_MATH64
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_multiply
+ *
+ * PARAMETERS: multiplicand - 64-bit multiplicand
+ * multiplier - 32-bit multiplier
+ * out_product - Pointer to where the product is returned
+ *
+ * DESCRIPTION: Perform a short multiply.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_short_multiply(u64 multiplicand, u32 multiplier, u64 *out_product)
+{
+ union uint64_overlay multiplicand_ovl;
+ union uint64_overlay product;
+ u32 carry32;
+
+ ACPI_FUNCTION_TRACE(ut_short_multiply);
+
+ multiplicand_ovl.full = multiplicand;
+
+ /*
+ * The Product is 64 bits, the carry is always 32 bits,
+ * and is generated by the second multiply.
+ */
+ ACPI_MUL_64_BY_32(0, multiplicand_ovl.part.hi, multiplier,
+ product.part.hi, carry32);
+
+ ACPI_MUL_64_BY_32(0, multiplicand_ovl.part.lo, multiplier,
+ product.part.lo, carry32);
+
+ product.part.hi += carry32;
+
+ /* Return only what was requested */
+
+ if (out_product) {
+ *out_product = product.full;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_left
+ *
+ * PARAMETERS: operand - 64-bit shift operand
+ * count - 32-bit shift count
+ * out_result - Pointer to where the result is returned
+ *
+ * DESCRIPTION: Perform a short left shift.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result)
+{
+ union uint64_overlay operand_ovl;
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_left);
+
+ operand_ovl.full = operand;
+
+ if ((count & 63) >= 32) {
+ operand_ovl.part.hi = operand_ovl.part.lo;
+ operand_ovl.part.lo ^= operand_ovl.part.lo;
+ count = (count & 63) - 32;
+ }
+ ACPI_SHIFT_LEFT_64_BY_32(operand_ovl.part.hi,
+ operand_ovl.part.lo, count);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand_ovl.full;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_right
+ *
+ * PARAMETERS: operand - 64-bit shift operand
+ * count - 32-bit shift count
+ * out_result - Pointer to where the result is returned
+ *
+ * DESCRIPTION: Perform a short right shift.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result)
+{
+ union uint64_overlay operand_ovl;
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_right);
+
+ operand_ovl.full = operand;
+
+ if ((count & 63) >= 32) {
+ operand_ovl.part.lo = operand_ovl.part.hi;
+ operand_ovl.part.hi ^= operand_ovl.part.hi;
+ count = (count & 63) - 32;
+ }
+ ACPI_SHIFT_RIGHT_64_BY_32(operand_ovl.part.hi,
+ operand_ovl.part.lo, count);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand_ovl.full;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+#else
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_multiply
+ *
+ * PARAMETERS: See function headers above
+ *
+ * DESCRIPTION: Native version of the ut_short_multiply function.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_short_multiply(u64 multiplicand, u32 multiplier, u64 *out_product)
+{
+
+ ACPI_FUNCTION_TRACE(ut_short_multiply);
+
+ /* Return only what was requested */
+
+ if (out_product) {
+ *out_product = multiplicand * multiplier;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_left
+ *
+ * PARAMETERS: See function headers above
+ *
+ * DESCRIPTION: Native version of the ut_short_shift_left function.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result)
+{
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_left);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand << count;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_right
+ *
+ * PARAMETERS: See function headers above
+ *
+ * DESCRIPTION: Native version of the ut_short_shift_right function.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result)
+{
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_right);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand >> count;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+#endif
+
+/*
+ * Optional support for 64-bit double-precision integer divide. This code
+ * is configurable and is implemented in order to support 32-bit kernel
+ * environments where a 64-bit double-precision math library is not available.
+ *
+ * Support for a more normal 64-bit divide/modulo (with check for a divide-
+ * by-zero) appears after this optional section of code.
+ */
+#ifndef ACPI_USE_NATIVE_DIVIDE
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_short_divide
@@ -258,6 +460,7 @@ acpi_ut_divide(u64 in_dividend,
}
#else
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_short_divide, acpi_ut_divide
@@ -272,6 +475,7 @@ acpi_ut_divide(u64 in_dividend,
* perform the divide.
*
******************************************************************************/
+
acpi_status
acpi_ut_short_divide(u64 in_dividend,
u32 divisor, u64 *out_quotient, u32 *out_remainder)
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 443ffad01209..45c78c2adbf0 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -224,7 +224,7 @@ acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
*
* RETURN: Status
*
- * DESCRIPTION: Walk through a package
+ * DESCRIPTION: Walk through a package, including subpackages
*
******************************************************************************/
@@ -236,8 +236,8 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
acpi_status status = AE_OK;
union acpi_generic_state *state_list = NULL;
union acpi_generic_state *state;
- u32 this_index;
union acpi_operand_object *this_source_obj;
+ u32 this_index;
ACPI_FUNCTION_TRACE(ut_walk_package_tree);
@@ -251,8 +251,10 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
/* Get one element of the package */
this_index = state->pkg.index;
- this_source_obj = (union acpi_operand_object *)
+ this_source_obj =
state->pkg.source_object->package.elements[this_index];
+ state->pkg.this_target_obj =
+ &state->pkg.source_object->package.elements[this_index];
/*
* Check for:
@@ -339,6 +341,8 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
/* We should never get here */
+ ACPI_ERROR((AE_INFO, "State list did not terminate correctly"));
+
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 64e6641bfe82..cb3db9fed50d 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -483,6 +483,11 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
/* A namespace node should never get here */
+ ACPI_ERROR((AE_INFO,
+ "Received a namespace node [%4.4s] "
+ "where an operand object is required",
+ ACPI_CAST_PTR(struct acpi_namespace_node,
+ internal_object)->name.ascii));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 7e6e1ae6140f..c008589b41bd 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -176,7 +176,7 @@ const char *acpi_ut_scan_number(const char *string, u64 *number_ptr)
u64 number = 0;
while (isdigit((int)*string)) {
- number *= 10;
+ acpi_ut_short_multiply(number, 10, &number);
number += *(string++) - '0';
}
@@ -286,7 +286,7 @@ static char *acpi_ut_format_number(char *string,
/* Generate full string in reverse order */
pos = acpi_ut_put_number(reversed_string, number, base, upper);
- i = ACPI_PTR_DIFF(pos, reversed_string);
+ i = (s32)ACPI_PTR_DIFF(pos, reversed_string);
/* Printing 100 using %2d gives "100", not "00" */
@@ -475,7 +475,7 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
if (!s) {
s = "<NULL>";
}
- length = acpi_ut_bound_string_length(s, precision);
+ length = (s32)acpi_ut_bound_string_length(s, precision);
if (!(type & ACPI_FORMAT_LEFT)) {
while (length < width--) {
pos =
@@ -579,7 +579,7 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
}
}
- return (ACPI_PTR_DIFF(pos, string));
+ return ((int)ACPI_PTR_DIFF(pos, string));
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 70f78a4bf13b..f9801d13547f 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -237,6 +237,13 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
+ /*
+ * Don't attempt to perform any validation on the 2nd byte.
+ * Although all known ASL compilers insert a zero for the 2nd
+ * byte, it can also be a checksum (as per the ACPI spec),
+ * and this is occasionally seen in the field. July 2017.
+ */
+
/* Return the pointer to the end_tag if requested */
if (!user_function) {
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 64308c304ade..eafabcd2fada 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -226,7 +226,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
void *external_object,
- u16 index)
+ u32 index)
{
union acpi_generic_state *state;
diff --git a/drivers/acpi/acpica/utstrtoul64.c b/drivers/acpi/acpica/utstrtoul64.c
index f42be01d99fd..9633ee142855 100644
--- a/drivers/acpi/acpica/utstrtoul64.c
+++ b/drivers/acpi/acpica/utstrtoul64.c
@@ -276,8 +276,8 @@ static u64 acpi_ut_strtoul_base10(char *string, u32 flags)
/* Convert and insert (add) the decimal digit */
- next_value =
- (return_value * 10) + (ascii_digit - ACPI_ASCII_ZERO);
+ acpi_ut_short_multiply(return_value, 10, &next_value);
+ next_value += (ascii_digit - ACPI_ASCII_ZERO);
/* Check for overflow (32 or 64 bit) - return current converted value */
@@ -335,9 +335,8 @@ static u64 acpi_ut_strtoul_base16(char *string, u32 flags)
/* Convert and insert the hex digit */
- return_value =
- (return_value << 4) |
- acpi_ut_ascii_char_to_hex(ascii_digit);
+ acpi_ut_short_shift_left(return_value, 4, &return_value);
+ return_value |= acpi_ut_ascii_char_to_hex(ascii_digit);
string++;
valid_digits++;
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 9a07a42cae34..3c8de88ecbd5 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -591,6 +591,10 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
return_VOID;
}
+ if (!acpi_gbl_global_list) {
+ goto exit;
+ }
+
element = acpi_gbl_global_list->list_head;
while (element) {
if ((element->component & component) &&
@@ -602,7 +606,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
if (element->size <
sizeof(struct acpi_common_descriptor)) {
- acpi_os_printf("%p Length 0x%04X %9.9s-%u "
+ acpi_os_printf("%p Length 0x%04X %9.9s-%4.4u "
"[Not a Descriptor - too small]\n",
descriptor, element->size,
element->module, element->line);
@@ -612,7 +616,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
if (ACPI_GET_DESCRIPTOR_TYPE(descriptor) !=
ACPI_DESC_TYPE_CACHED) {
acpi_os_printf
- ("%p Length 0x%04X %9.9s-%u [%s] ",
+ ("%p Length 0x%04X %9.9s-%4.4u [%s] ",
descriptor, element->size,
element->module, element->line,
acpi_ut_get_descriptor_name
@@ -705,6 +709,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
element = element->next;
}
+exit:
(void)acpi_ut_release_mutex(ACPI_MTX_MEMORY);
/* Print summary */
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index 6e9f14c0a71b..cb4126051f62 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -120,11 +120,6 @@ int apei_exec_collect_resources(struct apei_exec_context *ctx,
struct dentry;
struct dentry *apei_get_debugfs_dir(void);
-#define apei_estatus_for_each_section(estatus, section) \
- for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
- (void *)section - (void *)estatus < estatus->data_length; \
- section = (void *)(section+1) + section->error_data_length)
-
static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus)
{
if (estatus->raw_data_length)
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index ec50c32ea3da..b38737c83a24 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -281,7 +281,7 @@ static struct acpi_generic_address *einj_get_trigger_parameter_region(
((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
for (i = 0; i < trigger_tab->entry_count; i++) {
if (entry->action == ACPI_EINJ_TRIGGER_ERROR &&
- entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE &&
+ entry->instruction <= ACPI_EINJ_WRITE_REGISTER_VALUE &&
entry->register_region.space_id ==
ACPI_ADR_SPACE_SYSTEM_MEMORY &&
(entry->register_region.address & param2) == (param1 & param2))
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index d661d452b238..077f9bad6f44 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1157,7 +1157,8 @@ static int ghes_probe(struct platform_device *ghes_dev)
generic->header.source_id);
goto err_edac_unreg;
}
- rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes);
+ rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED,
+ "GHES IRQ", ghes);
if (rc) {
pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
generic->header.source_id);
@@ -1265,9 +1266,14 @@ static int __init ghes_init(void)
if (acpi_disabled)
return -ENODEV;
- if (hest_disable) {
+ switch (hest_disable) {
+ case HEST_NOT_FOUND:
+ return -ENODEV;
+ case HEST_DISABLED:
pr_info(GHES_PFX "HEST is not enabled!\n");
return -EINVAL;
+ default:
+ break;
}
if (ghes_disable) {
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 456b488eb1df..9cb74115a43d 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -37,7 +37,7 @@
#define HEST_PFX "HEST: "
-bool hest_disable;
+int hest_disable;
EXPORT_SYMBOL_GPL(hest_disable);
/* HEST table parsing */
@@ -213,7 +213,7 @@ err:
static int __init setup_hest_disable(char *str)
{
- hest_disable = 1;
+ hest_disable = HEST_DISABLED;
return 0;
}
@@ -232,9 +232,10 @@ void __init acpi_hest_init(void)
status = acpi_get_table(ACPI_SIG_HEST, 0,
(struct acpi_table_header **)&hest_tab);
- if (status == AE_NOT_FOUND)
- goto err;
- else if (ACPI_FAILURE(status)) {
+ if (status == AE_NOT_FOUND) {
+ hest_disable = HEST_NOT_FOUND;
+ return;
+ } else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(HEST_PFX "Failed to get table, %s\n", msg);
rc = -EINVAL;
@@ -257,5 +258,5 @@ void __init acpi_hest_init(void)
pr_info(HEST_PFX "Table parsing has been initialized.\n");
return;
err:
- hest_disable = 1;
+ hest_disable = HEST_DISABLED;
}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index a3215ee671c1..9565d572f8dd 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -588,7 +588,8 @@ void acpi_configure_pmsi_domain(struct device *dev)
dev_set_msi_domain(dev, msi_domain);
}
-static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
+static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias,
+ void *data)
{
u32 *rid = data;
@@ -633,8 +634,7 @@ int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
{
int err = 0;
- if (!IS_ERR_OR_NULL(ops) && ops->add_device && dev->bus &&
- !dev->iommu_group)
+ if (ops->add_device && dev->bus && !dev->iommu_group)
err = ops->add_device(dev);
return err;
@@ -648,45 +648,81 @@ int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
{ return 0; }
#endif
-static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
- struct acpi_iort_node *node,
- u32 streamid)
+static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
+ u32 streamid)
{
- const struct iommu_ops *ops = NULL;
- int ret = -ENODEV;
+ const struct iommu_ops *ops;
struct fwnode_handle *iort_fwnode;
- if (node) {
- iort_fwnode = iort_get_fwnode(node);
- if (!iort_fwnode)
- return NULL;
+ if (!node)
+ return -ENODEV;
- ops = iommu_ops_from_fwnode(iort_fwnode);
- /*
- * If the ops look-up fails, this means that either
- * the SMMU drivers have not been probed yet or that
- * the SMMU drivers are not built in the kernel;
- * Depending on whether the SMMU drivers are built-in
- * in the kernel or not, defer the IOMMU configuration
- * or just abort it.
- */
- if (!ops)
- return iort_iommu_driver_enabled(node->type) ?
- ERR_PTR(-EPROBE_DEFER) : NULL;
+ iort_fwnode = iort_get_fwnode(node);
+ if (!iort_fwnode)
+ return -ENODEV;
- ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
- }
+ /*
+ * If the ops look-up fails, this means that either
+ * the SMMU drivers have not been probed yet or that
+ * the SMMU drivers are not built in the kernel;
+ * Depending on whether the SMMU drivers are built-in
+ * in the kernel or not, defer the IOMMU configuration
+ * or just abort it.
+ */
+ ops = iommu_ops_from_fwnode(iort_fwnode);
+ if (!ops)
+ return iort_iommu_driver_enabled(node->type) ?
+ -EPROBE_DEFER : -ENODEV;
+
+ return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
+}
+
+struct iort_pci_alias_info {
+ struct device *dev;
+ struct acpi_iort_node *node;
+};
+
+static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct iort_pci_alias_info *info = data;
+ struct acpi_iort_node *parent;
+ u32 streamid;
- return ret ? NULL : ops;
+ parent = iort_node_map_id(info->node, alias, &streamid,
+ IORT_IOMMU_TYPE);
+ return iort_iommu_xlate(info->dev, parent, streamid);
+}
+
+static int nc_dma_get_range(struct device *dev, u64 *size)
+{
+ struct acpi_iort_node *node;
+ struct acpi_iort_named_component *ncomp;
+
+ node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
+ iort_match_node_callback, dev);
+ if (!node)
+ return -ENODEV;
+
+ ncomp = (struct acpi_iort_named_component *)node->node_data;
+
+ *size = ncomp->memory_address_limit >= 64 ? U64_MAX :
+ 1ULL<<ncomp->memory_address_limit;
+
+ return 0;
}
/**
- * iort_set_dma_mask - Set-up dma mask for a device.
+ * iort_dma_setup() - Set-up device DMA parameters.
*
* @dev: device to configure
+ * @dma_addr: device DMA address result pointer
+ * @size: DMA range size result pointer
*/
-void iort_set_dma_mask(struct device *dev)
+void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
{
+ u64 mask, dmaaddr = 0, size = 0, offset = 0;
+ int ret, msb;
+
/*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
* setup the correct supported mask.
@@ -700,6 +736,36 @@ void iort_set_dma_mask(struct device *dev)
*/
if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask;
+
+ size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
+
+ if (dev_is_pci(dev))
+ ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
+ else
+ ret = nc_dma_get_range(dev, &size);
+
+ if (!ret) {
+ msb = fls64(dmaaddr + size - 1);
+ /*
+ * Round-up to the power-of-two mask or set
+ * the mask to the whole 64-bit address space
+ * in case the DMA region covers the full
+ * memory window.
+ */
+ mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1;
+ /*
+ * Limit coherent and dma mask based on size
+ * retrieved from firmware.
+ */
+ dev->coherent_dma_mask = mask;
+ *dev->dma_mask = mask;
+ }
+
+ *dma_addr = dmaaddr;
+ *dma_size = size;
+
+ dev->dma_pfn_offset = PFN_DOWN(offset);
+ dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
}
/**
@@ -713,9 +779,9 @@ void iort_set_dma_mask(struct device *dev)
const struct iommu_ops *iort_iommu_configure(struct device *dev)
{
struct acpi_iort_node *node, *parent;
- const struct iommu_ops *ops = NULL;
+ const struct iommu_ops *ops;
u32 streamid = 0;
- int err;
+ int err = -ENODEV;
/*
* If we already translated the fwspec there
@@ -727,21 +793,16 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
if (dev_is_pci(dev)) {
struct pci_bus *bus = to_pci_dev(dev)->bus;
- u32 rid;
-
- pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid,
- &rid);
+ struct iort_pci_alias_info info = { .dev = dev };
node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
iort_match_node_callback, &bus->dev);
if (!node)
return NULL;
- parent = iort_node_map_id(node, rid, &streamid,
- IORT_IOMMU_TYPE);
-
- ops = iort_iommu_xlate(dev, parent, streamid);
-
+ info.node = node;
+ err = pci_for_each_dma_alias(to_pci_dev(dev),
+ iort_pci_iommu_init, &info);
} else {
int i = 0;
@@ -750,31 +811,30 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
if (!node)
return NULL;
- parent = iort_node_map_platform_id(node, &streamid,
- IORT_IOMMU_TYPE, i++);
-
- while (parent) {
- ops = iort_iommu_xlate(dev, parent, streamid);
- if (IS_ERR_OR_NULL(ops))
- return ops;
-
+ do {
parent = iort_node_map_platform_id(node, &streamid,
IORT_IOMMU_TYPE,
i++);
- }
+
+ if (parent)
+ err = iort_iommu_xlate(dev, parent, streamid);
+ } while (parent && !err);
}
/*
* If we have reason to believe the IOMMU driver missed the initial
* add_device callback for dev, replay it to get things in order.
*/
- err = iort_add_device_replay(ops, dev);
- if (err)
- ops = ERR_PTR(err);
+ if (!err) {
+ ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
+ err = iort_add_device_replay(ops, dev);
+ }
/* Ignore all other errors apart from EPROBE_DEFER */
- if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
- dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
+ if (err == -EPROBE_DEFER) {
+ ops = ERR_PTR(err);
+ } else if (err) {
+ dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
ops = NULL;
}
@@ -908,6 +968,27 @@ static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
}
+#if defined(CONFIG_ACPI_NUMA) && defined(ACPI_IORT_SMMU_V3_PXM_VALID)
+/*
+ * set numa proximity domain for smmuv3 device
+ */
+static void __init arm_smmu_v3_set_proximity(struct device *dev,
+ struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu_v3 *smmu;
+
+ smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+ if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
+ set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
+ pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
+ smmu->base_address,
+ smmu->pxm);
+ }
+}
+#else
+#define arm_smmu_v3_set_proximity NULL
+#endif
+
static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
{
struct acpi_iort_smmu *smmu;
@@ -977,13 +1058,16 @@ struct iort_iommu_config {
int (*iommu_count_resources)(struct acpi_iort_node *node);
void (*iommu_init_resources)(struct resource *res,
struct acpi_iort_node *node);
+ void (*iommu_set_proximity)(struct device *dev,
+ struct acpi_iort_node *node);
};
static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = {
.name = "arm-smmu-v3",
.iommu_is_coherent = arm_smmu_v3_is_coherent,
.iommu_count_resources = arm_smmu_v3_count_resources,
- .iommu_init_resources = arm_smmu_v3_init_resources
+ .iommu_init_resources = arm_smmu_v3_init_resources,
+ .iommu_set_proximity = arm_smmu_v3_set_proximity,
};
static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = {
@@ -1028,6 +1112,9 @@ static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
if (!pdev)
return -ENOMEM;
+ if (ops->iommu_set_proximity)
+ ops->iommu_set_proximity(&pdev->dev, node);
+
count = ops->iommu_count_resources(node);
r = kcalloc(count, sizeof(*r), GFP_KERNEL);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 1cbb88d938e5..13e7b56e33ae 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -620,7 +620,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
return count;
}
-static struct device_attribute alarm_attr = {
+static const struct device_attribute alarm_attr = {
.attr = {.name = "alarm", .mode = 0644},
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index bb542acc0574..037fd537bbf6 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -30,30 +30,13 @@
#include "internal.h"
-enum acpi_blacklist_predicates {
- all_versions,
- less_than_or_equal,
- equal,
- greater_than_or_equal,
-};
-
-struct acpi_blacklist_item {
- char oem_id[7];
- char oem_table_id[9];
- u32 oem_revision;
- char *table;
- enum acpi_blacklist_predicates oem_revision_predicate;
- char *reason;
- u32 is_critical_error;
-};
-
static struct dmi_system_id acpi_rev_dmi_table[] __initdata;
/*
* POLICY: If *anything* doesn't work, put it on the blacklist.
* If they are critical errors, mark it critical, and abort driver load.
*/
-static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
+static struct acpi_platform_list acpi_blacklist[] __initdata = {
/* Compaq Presario 1700 */
{"PTLTD ", " DSDT ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal,
"Multiple problems", 1},
@@ -67,65 +50,27 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
{"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
"Incorrect _ADR", 1},
- {""}
+ { }
};
int __init acpi_blacklisted(void)
{
- int i = 0;
+ int i;
int blacklisted = 0;
- struct acpi_table_header table_header;
-
- while (acpi_blacklist[i].oem_id[0] != '\0') {
- if (acpi_get_table_header(acpi_blacklist[i].table, 0, &table_header)) {
- i++;
- continue;
- }
-
- if (strncmp(acpi_blacklist[i].oem_id, table_header.oem_id, 6)) {
- i++;
- continue;
- }
-
- if (strncmp
- (acpi_blacklist[i].oem_table_id, table_header.oem_table_id,
- 8)) {
- i++;
- continue;
- }
-
- if ((acpi_blacklist[i].oem_revision_predicate == all_versions)
- || (acpi_blacklist[i].oem_revision_predicate ==
- less_than_or_equal
- && table_header.oem_revision <=
- acpi_blacklist[i].oem_revision)
- || (acpi_blacklist[i].oem_revision_predicate ==
- greater_than_or_equal
- && table_header.oem_revision >=
- acpi_blacklist[i].oem_revision)
- || (acpi_blacklist[i].oem_revision_predicate == equal
- && table_header.oem_revision ==
- acpi_blacklist[i].oem_revision)) {
- printk(KERN_ERR PREFIX
- "Vendor \"%6.6s\" System \"%8.8s\" "
- "Revision 0x%x has a known ACPI BIOS problem.\n",
- acpi_blacklist[i].oem_id,
- acpi_blacklist[i].oem_table_id,
- acpi_blacklist[i].oem_revision);
+ i = acpi_match_platform_list(acpi_blacklist);
+ if (i >= 0) {
+ pr_err(PREFIX "Vendor \"%6.6s\" System \"%8.8s\" Revision 0x%x has a known ACPI BIOS problem.\n",
+ acpi_blacklist[i].oem_id,
+ acpi_blacklist[i].oem_table_id,
+ acpi_blacklist[i].oem_revision);
- printk(KERN_ERR PREFIX
- "Reason: %s. This is a %s error\n",
- acpi_blacklist[i].reason,
- (acpi_blacklist[i].
- is_critical_error ? "non-recoverable" :
- "recoverable"));
+ pr_err(PREFIX "Reason: %s. This is a %s error\n",
+ acpi_blacklist[i].reason,
+ (acpi_blacklist[i].data ?
+ "non-recoverable" : "recoverable"));
- blacklisted = acpi_blacklist[i].is_critical_error;
- break;
- } else {
- i++;
- }
+ blacklisted = acpi_blacklist[i].data;
}
(void)early_acpi_osi_init();
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index af74b420ec83..59f2f96fdb7e 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -995,9 +995,6 @@ void __init acpi_early_init(void)
printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION);
- /* It's safe to verify table checksums during late stage */
- acpi_gbl_verify_table_checksum = TRUE;
-
/* enable workarounds, unless strict ACPI spec. compliance */
if (!acpi_strict)
acpi_gbl_enable_interpreter_slack = TRUE;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 2ed6935d4483..fbcc73f7a099 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -401,6 +401,8 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
if (val != ACPI_NOTIFY_DEVICE_WAKE)
return;
+ acpi_handle_debug(handle, "Wake notify\n");
+
adev = acpi_bus_get_acpi_device(handle);
if (!adev)
return;
@@ -409,8 +411,12 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
if (adev->wakeup.flags.notifier_present) {
pm_wakeup_ws_event(adev->wakeup.ws, 0, acpi_s2idle_wakeup());
- if (adev->wakeup.context.func)
+ if (adev->wakeup.context.func) {
+ acpi_handle_debug(handle, "Running %pF for %s\n",
+ adev->wakeup.context.func,
+ dev_name(adev->wakeup.context.dev));
adev->wakeup.context.func(&adev->wakeup.context);
+ }
}
mutex_unlock(&acpi_pm_notifier_lock);
@@ -682,55 +688,88 @@ static void acpi_pm_notify_work_func(struct acpi_device_wakeup_context *context)
}
}
+static DEFINE_MUTEX(acpi_wakeup_lock);
+
+static int __acpi_device_wakeup_enable(struct acpi_device *adev,
+ u32 target_state, int max_count)
+{
+ struct acpi_device_wakeup *wakeup = &adev->wakeup;
+ acpi_status status;
+ int error = 0;
+
+ mutex_lock(&acpi_wakeup_lock);
+
+ if (wakeup->enable_count >= max_count)
+ goto out;
+
+ if (wakeup->enable_count > 0)
+ goto inc;
+
+ error = acpi_enable_wakeup_device_power(adev, target_state);
+ if (error)
+ goto out;
+
+ status = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ if (ACPI_FAILURE(status)) {
+ acpi_disable_wakeup_device_power(adev);
+ error = -EIO;
+ goto out;
+ }
+
+inc:
+ wakeup->enable_count++;
+
+out:
+ mutex_unlock(&acpi_wakeup_lock);
+ return error;
+}
+
/**
- * acpi_device_wakeup - Enable/disable wakeup functionality for device.
- * @adev: ACPI device to enable/disable wakeup functionality for.
+ * acpi_device_wakeup_enable - Enable wakeup functionality for device.
+ * @adev: ACPI device to enable wakeup functionality for.
* @target_state: State the system is transitioning into.
- * @enable: Whether to enable or disable the wakeup functionality.
*
- * Enable/disable the GPE associated with @adev so that it can generate
- * wakeup signals for the device in response to external (remote) events and
- * enable/disable device wakeup power.
+ * Enable the GPE associated with @adev so that it can generate wakeup signals
+ * for the device in response to external (remote) events and enable wakeup
+ * power for it.
+ *
+ * Callers must ensure that @adev is a valid ACPI device node before executing
+ * this function.
+ */
+static int acpi_device_wakeup_enable(struct acpi_device *adev, u32 target_state)
+{
+ return __acpi_device_wakeup_enable(adev, target_state, 1);
+}
+
+/**
+ * acpi_device_wakeup_disable - Disable wakeup functionality for device.
+ * @adev: ACPI device to disable wakeup functionality for.
+ *
+ * Disable the GPE associated with @adev and disable wakeup power for it.
*
* Callers must ensure that @adev is a valid ACPI device node before executing
* this function.
*/
-static int acpi_device_wakeup(struct acpi_device *adev, u32 target_state,
- bool enable)
+static void acpi_device_wakeup_disable(struct acpi_device *adev)
{
struct acpi_device_wakeup *wakeup = &adev->wakeup;
- if (enable) {
- acpi_status res;
- int error;
+ mutex_lock(&acpi_wakeup_lock);
- if (adev->wakeup.flags.enabled)
- return 0;
+ if (!wakeup->enable_count)
+ goto out;
- error = acpi_enable_wakeup_device_power(adev, target_state);
- if (error)
- return error;
+ acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ acpi_disable_wakeup_device_power(adev);
- res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
- if (ACPI_FAILURE(res)) {
- acpi_disable_wakeup_device_power(adev);
- return -EIO;
- }
- adev->wakeup.flags.enabled = 1;
- } else if (adev->wakeup.flags.enabled) {
- acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
- acpi_disable_wakeup_device_power(adev);
- adev->wakeup.flags.enabled = 0;
- }
- return 0;
+ wakeup->enable_count--;
+
+out:
+ mutex_unlock(&acpi_wakeup_lock);
}
-/**
- * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
- * @dev: Device to enable/disable to generate wakeup events.
- * @enable: Whether to enable or disable the wakeup functionality.
- */
-int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
+static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable,
+ int max_count)
{
struct acpi_device *adev;
int error;
@@ -744,13 +783,41 @@ int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
if (!acpi_device_can_wakeup(adev))
return -EINVAL;
- error = acpi_device_wakeup(adev, acpi_target_system_state(), enable);
+ if (!enable) {
+ acpi_device_wakeup_disable(adev);
+ dev_dbg(dev, "Wakeup disabled by ACPI\n");
+ return 0;
+ }
+
+ error = __acpi_device_wakeup_enable(adev, acpi_target_system_state(),
+ max_count);
if (!error)
- dev_dbg(dev, "Wakeup %s by ACPI\n", enable ? "enabled" : "disabled");
+ dev_dbg(dev, "Wakeup enabled by ACPI\n");
return error;
}
-EXPORT_SYMBOL(acpi_pm_set_device_wakeup);
+
+/**
+ * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
+ * @dev: Device to enable/disable to generate wakeup events.
+ * @enable: Whether to enable or disable the wakeup functionality.
+ */
+int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
+{
+ return __acpi_pm_set_device_wakeup(dev, enable, 1);
+}
+EXPORT_SYMBOL_GPL(acpi_pm_set_device_wakeup);
+
+/**
+ * acpi_pm_set_bridge_wakeup - Enable/disable remote wakeup for given bridge.
+ * @dev: Bridge device to enable/disable to generate wakeup events.
+ * @enable: Whether to enable or disable the wakeup functionality.
+ */
+int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable)
+{
+ return __acpi_pm_set_device_wakeup(dev, enable, INT_MAX);
+}
+EXPORT_SYMBOL_GPL(acpi_pm_set_bridge_wakeup);
/**
* acpi_dev_pm_low_power - Put ACPI device into a low-power state.
@@ -800,13 +867,15 @@ int acpi_dev_runtime_suspend(struct device *dev)
remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) >
PM_QOS_FLAGS_NONE;
- error = acpi_device_wakeup(adev, ACPI_STATE_S0, remote_wakeup);
- if (remote_wakeup && error)
- return -EAGAIN;
+ if (remote_wakeup) {
+ error = acpi_device_wakeup_enable(adev, ACPI_STATE_S0);
+ if (error)
+ return -EAGAIN;
+ }
error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
- if (error)
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ if (error && remote_wakeup)
+ acpi_device_wakeup_disable(adev);
return error;
}
@@ -829,7 +898,7 @@ int acpi_dev_runtime_resume(struct device *dev)
return 0;
error = acpi_dev_pm_full_power(adev);
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ acpi_device_wakeup_disable(adev);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
@@ -884,13 +953,15 @@ int acpi_dev_suspend_late(struct device *dev)
target_state = acpi_target_system_state();
wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev);
- error = acpi_device_wakeup(adev, target_state, wakeup);
- if (wakeup && error)
- return error;
+ if (wakeup) {
+ error = acpi_device_wakeup_enable(adev, target_state);
+ if (error)
+ return error;
+ }
error = acpi_dev_pm_low_power(dev, adev, target_state);
- if (error)
- acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false);
+ if (error && wakeup)
+ acpi_device_wakeup_disable(adev);
return error;
}
@@ -913,7 +984,7 @@ int acpi_dev_resume_early(struct device *dev)
return 0;
error = acpi_dev_pm_full_power(adev);
- acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false);
+ acpi_device_wakeup_disable(adev);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
@@ -1056,7 +1127,7 @@ static void acpi_dev_pm_detach(struct device *dev, bool power_off)
*/
dev_pm_qos_hide_latency_limit(dev);
dev_pm_qos_hide_flags(dev);
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ acpi_device_wakeup_disable(adev);
acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
}
}
@@ -1100,7 +1171,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
dev_pm_domain_set(dev, &acpi_general_pm_domain);
if (power_on) {
acpi_dev_pm_full_power(adev);
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ acpi_device_wakeup_disable(adev);
}
dev->pm_domain->detach = acpi_dev_pm_detach;
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 0c00208b423e..2305e1ab978e 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -585,7 +585,7 @@ static struct attribute *dock_attributes[] = {
NULL
};
-static struct attribute_group dock_attribute_group = {
+static const struct attribute_group dock_attribute_group = {
.attrs = dock_attributes
};
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index ddb01e9fa5b2..fdfae6f3c0b1 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -112,8 +112,7 @@ enum {
EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */
EC_FLAGS_STARTED, /* Driver is started */
EC_FLAGS_STOPPED, /* Driver is stopped */
- EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
- * current command processing */
+ EC_FLAGS_GPE_MASKED, /* GPE masked */
};
#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
@@ -151,6 +150,10 @@ static bool ec_freeze_events __read_mostly = false;
module_param(ec_freeze_events, bool, 0644);
MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
+static bool ec_no_wakeup __read_mostly;
+module_param(ec_no_wakeup, bool, 0644);
+MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
+
struct acpi_ec_query_handler {
struct list_head node;
acpi_ec_query_func func;
@@ -421,19 +424,19 @@ static void acpi_ec_complete_request(struct acpi_ec *ec)
wake_up(&ec->wait);
}
-static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
+static void acpi_ec_mask_gpe(struct acpi_ec *ec)
{
- if (!test_bit(flag, &ec->flags)) {
+ if (!test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
acpi_ec_disable_gpe(ec, false);
ec_dbg_drv("Polling enabled");
- set_bit(flag, &ec->flags);
+ set_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
}
}
-static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
+static void acpi_ec_unmask_gpe(struct acpi_ec *ec)
{
- if (test_bit(flag, &ec->flags)) {
- clear_bit(flag, &ec->flags);
+ if (test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
+ clear_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
acpi_ec_enable_gpe(ec, false);
ec_dbg_drv("Polling disabled");
}
@@ -460,7 +463,7 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
static void acpi_ec_submit_query(struct acpi_ec *ec)
{
- acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_mask_gpe(ec);
if (!acpi_ec_event_enabled(ec))
return;
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
@@ -476,7 +479,7 @@ static void acpi_ec_complete_query(struct acpi_ec *ec)
if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
ec_dbg_evt("Command(%s) unblocked",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
- acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_unmask_gpe(ec);
}
static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
@@ -535,6 +538,14 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
spin_unlock_irqrestore(&ec->lock, flags);
__acpi_ec_flush_event(ec);
}
+
+void acpi_ec_flush_work(void)
+{
+ if (first_ec)
+ __acpi_ec_flush_event(first_ec);
+
+ flush_scheduled_work();
+}
#endif /* CONFIG_PM_SLEEP */
static bool acpi_ec_guard_event(struct acpi_ec *ec)
@@ -688,7 +699,7 @@ err:
++t->irq_count;
/* Allow triggering on 0 threshold */
if (t->irq_count == ec_storm_threshold)
- acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_mask_gpe(ec);
}
}
out:
@@ -786,7 +797,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
spin_lock_irqsave(&ec->lock, tmp);
if (t->irq_count == ec_storm_threshold)
- acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_unmask_gpe(ec);
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
ec->curr = NULL;
/* Disable GPE for command processing (IBF=0/OBF=1) */
@@ -1574,9 +1585,7 @@ static bool acpi_is_boot_ec(struct acpi_ec *ec)
{
if (!boot_ec)
return false;
- if (ec->handle == boot_ec->handle &&
- ec->gpe == boot_ec->gpe &&
- ec->command_addr == boot_ec->command_addr &&
+ if (ec->command_addr == boot_ec->command_addr &&
ec->data_addr == boot_ec->data_addr)
return true;
return false;
@@ -1601,6 +1610,13 @@ static int acpi_ec_add(struct acpi_device *device)
if (acpi_is_boot_ec(ec)) {
boot_ec_is_ecdt = false;
+ /*
+ * Trust PNP0C09 namespace location rather than ECDT ID.
+ *
+ * But trust ECDT GPE rather than _GPE because of ASUS quirks,
+ * so do not change boot_ec->gpe to ec->gpe.
+ */
+ boot_ec->handle = ec->handle;
acpi_handle_debug(ec->handle, "duplicated.\n");
acpi_ec_free(ec);
ec = boot_ec;
@@ -1729,24 +1745,26 @@ error:
* functioning ECDT EC first in order to handle the events.
* https://bugzilla.kernel.org/show_bug.cgi?id=115021
*/
-int __init acpi_ec_ecdt_start(void)
+static int __init acpi_ec_ecdt_start(void)
{
acpi_handle handle;
if (!boot_ec)
return -ENODEV;
- /*
- * The DSDT EC should have already been started in
- * acpi_ec_add().
- */
+ /* In case acpi_ec_ecdt_start() is called after acpi_ec_add() */
if (!boot_ec_is_ecdt)
return -ENODEV;
/*
* At this point, the namespace and the GPE is initialized, so
* start to find the namespace objects and handle the events.
+ *
+ * Note: ec->handle can be valid if this function is called after
+ * acpi_ec_add(), hence the fast path.
*/
- if (!acpi_ec_ecdt_get_handle(&handle))
+ if (boot_ec->handle != ACPI_ROOT_OBJECT)
+ handle = boot_ec->handle;
+ else if (!acpi_ec_ecdt_get_handle(&handle))
return -ENODEV;
return acpi_config_boot_ec(boot_ec, handle, true, true);
}
@@ -1880,6 +1898,32 @@ static int acpi_ec_suspend(struct device *dev)
return 0;
}
+static int acpi_ec_suspend_noirq(struct device *dev)
+{
+ struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
+
+ /*
+ * The SCI handler doesn't run at this point, so the GPE can be
+ * masked at the low level without side effects.
+ */
+ if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
+
+ return 0;
+}
+
+static int acpi_ec_resume_noirq(struct device *dev)
+{
+ struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
+
+ if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
+
+ return 0;
+}
+
static int acpi_ec_resume(struct device *dev)
{
struct acpi_ec *ec =
@@ -1891,6 +1935,7 @@ static int acpi_ec_resume(struct device *dev)
#endif
static const struct dev_pm_ops acpi_ec_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
};
@@ -1964,20 +2009,17 @@ static inline void acpi_ec_query_exit(void)
int __init acpi_ec_init(void)
{
int result;
+ int ecdt_fail, dsdt_fail;
/* register workqueue for _Qxx evaluations */
result = acpi_ec_query_init();
if (result)
- goto err_exit;
- /* Now register the driver for the EC */
- result = acpi_bus_register_driver(&acpi_ec_driver);
- if (result)
- goto err_exit;
+ return result;
-err_exit:
- if (result)
- acpi_ec_query_exit();
- return result;
+ /* Drivers must be started after acpi_ec_query_init() */
+ dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
+ ecdt_fail = acpi_ec_ecdt_start();
+ return ecdt_fail && dsdt_fail ? -ENODEV : 0;
}
/* EC driver currently not unloadable */
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 9531d3276f65..4361c4415b4f 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data);
int acpi_ec_init(void);
int acpi_ec_ecdt_probe(void);
int acpi_ec_dsdt_probe(void);
-int acpi_ec_ecdt_start(void);
void acpi_ec_block_transactions(void);
void acpi_ec_unblock_transactions(void);
int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
@@ -193,6 +192,10 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
void *data);
void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
+#ifdef CONFIG_PM_SLEEP
+void acpi_ec_flush_work(void);
+#endif
+
/*--------------------------------------------------------------------------
Suspend/Resume
@@ -229,6 +232,12 @@ static inline void suspend_nvs_restore(void) {}
void acpi_init_properties(struct acpi_device *adev);
void acpi_free_properties(struct acpi_device *adev);
+#ifdef CONFIG_X86
+void acpi_extract_apple_properties(struct acpi_device *adev);
+#else
+static inline void acpi_extract_apple_properties(struct acpi_device *adev) {}
+#endif
+
/*--------------------------------------------------------------------------
Watchdog
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index b75b734ee73a..1893e416e7c0 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2884,7 +2884,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
* need to be interruptible while waiting.
*/
INIT_WORK_ONSTACK(&flush.work, flush_probe);
- COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
+ init_completion(&flush.cmp);
queue_work(nfit_wq, &flush.work);
mutex_unlock(&acpi_desc->init_mutex);
@@ -3160,6 +3160,8 @@ static struct acpi_driver acpi_nfit_driver = {
static __init int nfit_init(void)
{
+ int ret;
+
BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
@@ -3187,8 +3189,14 @@ static __init int nfit_init(void)
return -ENOMEM;
nfit_mce_register();
+ ret = acpi_bus_register_driver(&acpi_nfit_driver);
+ if (ret) {
+ nfit_mce_unregister();
+ destroy_workqueue(nfit_wq);
+ }
+
+ return ret;
- return acpi_bus_register_driver(&acpi_nfit_driver);
}
static __exit void nfit_exit(void)
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index edb0c79f7c64..917f1cc0fda4 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -443,7 +443,7 @@ int __init acpi_numa_init(void)
* So go over all cpu entries in SRAT to get apicid to node mapping.
*/
- /* SRAT: Static Resource Affinity Table */
+ /* SRAT: System Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
struct acpi_subtable_proc srat_proc[3];
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index 723bee58bbcf..19cdd8a783a9 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/platform_data/x86/apple.h>
#include "internal.h"
@@ -257,12 +258,11 @@ bool acpi_osi_is_win8(void)
}
EXPORT_SYMBOL(acpi_osi_is_win8);
-static void __init acpi_osi_dmi_darwin(bool enable,
- const struct dmi_system_id *d)
+static void __init acpi_osi_dmi_darwin(void)
{
- pr_notice("DMI detected to setup _OSI(\"Darwin\"): %s\n", d->ident);
+ pr_notice("DMI detected to setup _OSI(\"Darwin\"): Apple hardware\n");
osi_config.darwin_dmi = 1;
- __acpi_osi_setup_darwin(enable);
+ __acpi_osi_setup_darwin(true);
}
static void __init acpi_osi_dmi_linux(bool enable,
@@ -273,13 +273,6 @@ static void __init acpi_osi_dmi_linux(bool enable,
__acpi_osi_setup_linux(enable);
}
-static int __init dmi_enable_osi_darwin(const struct dmi_system_id *d)
-{
- acpi_osi_dmi_darwin(true, d);
-
- return 0;
-}
-
static int __init dmi_enable_osi_linux(const struct dmi_system_id *d)
{
acpi_osi_dmi_linux(true, d);
@@ -481,30 +474,16 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
},
},
-
- /*
- * Enable _OSI("Darwin") for all apple platforms.
- */
- {
- .callback = dmi_enable_osi_darwin,
- .ident = "Apple hardware",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- },
- },
- {
- .callback = dmi_enable_osi_darwin,
- .ident = "Apple hardware",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- },
- },
{}
};
static __init void acpi_osi_dmi_blacklisted(void)
{
dmi_check_system(acpi_osi_dmi_table);
+
+ /* Enable _OSI("Darwin") for Apple platforms. */
+ if (x86_apple_machine)
+ acpi_osi_dmi_darwin();
}
int __init early_acpi_osi_init(void)
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 9eec3095e6c3..6fc204a52493 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -33,6 +33,7 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/dmi.h>
+#include <linux/platform_data/x86/apple.h>
#include <acpi/apei.h> /* for acpi_hest_init() */
#include "internal.h"
@@ -431,8 +432,7 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
* been called successfully. We know the feature set supported by the
* platform, so avoid calling _OSC at all
*/
-
- if (dmi_match(DMI_SYS_VENDOR, "Apple Inc.")) {
+ if (x86_apple_machine) {
root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL;
decode_osc_control(root, "OS assumes control of",
root->osc_control_set);
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 3b7d5be5b7ed..6c99d3f81095 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -27,6 +27,9 @@
#define GPI1_LDO_ON (3 << 0)
#define GPI1_LDO_OFF (4 << 0)
+#define AXP288_ADC_TS_PIN_GPADC 0xf2
+#define AXP288_ADC_TS_PIN_ON 0xf3
+
static struct pmic_table power_table[] = {
{
.address = 0x00,
@@ -209,11 +212,23 @@ static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg,
static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
{
u8 buf[2];
+ int ret;
- if (regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2))
- return -EIO;
+ ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_PIN_GPADC);
+ if (ret)
+ return ret;
+
+ /* After switching to the GPADC pin give things some time to settle */
+ usleep_range(6000, 10000);
+
+ ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
+ if (ret == 0)
+ ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
+
+ regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON);
- return (buf[0] << 4) + ((buf[1] >> 4) & 0x0F);
+ return ret;
}
static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 591d1dd3f04e..9d6aff22684e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -237,7 +237,7 @@ static int __acpi_processor_start(struct acpi_device *device)
result = acpi_cppc_processor_probe(pr);
if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
- dev_warn(&device->dev, "CPPC data invalid or not present\n");
+ dev_dbg(&device->dev, "CPPC data invalid or not present\n");
if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
acpi_processor_power_init(pr);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 5c8aa9cf62d7..2736e25e9dc6 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -48,6 +48,8 @@
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle");
+#define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
+
static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
module_param(max_cstate, uint, 0000);
static unsigned int nocst __read_mostly;
@@ -708,8 +710,6 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
static void acpi_idle_enter_bm(struct acpi_processor *pr,
struct acpi_processor_cx *cx, bool timer_bc)
{
- acpi_unlazy_tlb(smp_processor_id());
-
/*
* Must be done before busmaster disable as we might need to
* access HPET !
@@ -761,7 +761,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
if (cx->type != ACPI_STATE_C1) {
if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
- index = CPUIDLE_DRIVER_STATE_START;
+ index = ACPI_IDLE_STATE_START;
cx = per_cpu(acpi_cstate[index], dev->cpu);
} else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
@@ -789,7 +789,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
return index;
}
-static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
+static void acpi_idle_enter_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
@@ -813,7 +813,7 @@ static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
struct cpuidle_device *dev)
{
- int i, count = CPUIDLE_DRIVER_STATE_START;
+ int i, count = ACPI_IDLE_STATE_START;
struct acpi_processor_cx *cx;
if (max_cstate == 0)
@@ -840,7 +840,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
static int acpi_processor_setup_cstates(struct acpi_processor *pr)
{
- int i, count = CPUIDLE_DRIVER_STATE_START;
+ int i, count;
struct acpi_processor_cx *cx;
struct cpuidle_state *state;
struct cpuidle_driver *drv = &acpi_idle_driver;
@@ -848,6 +848,13 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
if (max_cstate == 0)
max_cstate = 1;
+ if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
+ cpuidle_poll_state_init(drv);
+ count = 1;
+ } else {
+ count = 0;
+ }
+
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
cx = &pr->power.states[i];
@@ -867,14 +874,14 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
drv->safe_state_index = count;
}
/*
- * Halt-induced C1 is not good for ->enter_freeze, because it
+ * Halt-induced C1 is not good for ->enter_s2idle, because it
* re-enables interrupts on exit. Moreover, C1 is generally not
* particularly interesting from the suspend-to-idle angle, so
* avoid C1 and the situations in which we may need to fall back
* to it altogether.
*/
if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
- state->enter_freeze = acpi_idle_enter_freeze;
+ state->enter_s2idle = acpi_idle_enter_s2idle;
count++;
if (count == CPUIDLE_STATE_MAX)
@@ -1291,7 +1298,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
return -EINVAL;
drv->safe_state_index = -1;
- for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
+ for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
drv->states[i].name[0] = '\0';
drv->states[i].desc[0] = '\0';
}
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 917c789f953d..c1c216163de3 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -19,21 +19,19 @@
#include "internal.h"
-static int acpi_data_get_property_array(struct acpi_device_data *data,
+static int acpi_data_get_property_array(const struct acpi_device_data *data,
const char *name,
acpi_object_type type,
const union acpi_object **obj);
-/* ACPI _DSD device properties UUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
-static const u8 prp_uuid[16] = {
- 0x14, 0xd8, 0xff, 0xda, 0xba, 0x6e, 0x8c, 0x4d,
- 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01
-};
-/* ACPI _DSD data subnodes UUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
-static const u8 ads_uuid[16] = {
- 0xe6, 0xe3, 0xb8, 0xdb, 0x86, 0x58, 0xa6, 0x4b,
- 0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b
-};
+/* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
+static const guid_t prp_guid =
+ GUID_INIT(0xdaffd814, 0x6eba, 0x4d8c,
+ 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01);
+/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
+static const guid_t ads_guid =
+ GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
+ 0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
const union acpi_object *desc,
@@ -56,8 +54,7 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
return false;
dn->name = link->package.elements[0].string.pointer;
- dn->fwnode.type = FWNODE_ACPI_DATA;
- dn->fwnode.ops = &acpi_fwnode_ops;
+ dn->fwnode.ops = &acpi_data_fwnode_ops;
dn->parent = parent;
INIT_LIST_HEAD(&dn->data.subnodes);
@@ -190,22 +187,23 @@ static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
{
int i;
- /* Look for the ACPI data subnodes UUID. */
+ /* Look for the ACPI data subnodes GUID. */
for (i = 0; i < desc->package.count; i += 2) {
- const union acpi_object *uuid, *links;
+ const union acpi_object *guid, *links;
- uuid = &desc->package.elements[i];
+ guid = &desc->package.elements[i];
links = &desc->package.elements[i + 1];
/*
- * The first element must be a UUID and the second one must be
+ * The first element must be a GUID and the second one must be
* a package.
*/
- if (uuid->type != ACPI_TYPE_BUFFER || uuid->buffer.length != 16
- || links->type != ACPI_TYPE_PACKAGE)
+ if (guid->type != ACPI_TYPE_BUFFER ||
+ guid->buffer.length != 16 ||
+ links->type != ACPI_TYPE_PACKAGE)
break;
- if (memcmp(uuid->buffer.pointer, ads_uuid, sizeof(ads_uuid)))
+ if (!guid_equal((guid_t *)guid->buffer.pointer, &ads_guid))
continue;
return acpi_add_nondev_subnodes(scope, links, &data->subnodes,
@@ -298,26 +296,27 @@ static bool acpi_extract_properties(const union acpi_object *desc,
if (desc->package.count % 2)
return false;
- /* Look for the device properties UUID. */
+ /* Look for the device properties GUID. */
for (i = 0; i < desc->package.count; i += 2) {
- const union acpi_object *uuid, *properties;
+ const union acpi_object *guid, *properties;
- uuid = &desc->package.elements[i];
+ guid = &desc->package.elements[i];
properties = &desc->package.elements[i + 1];
/*
- * The first element must be a UUID and the second one must be
+ * The first element must be a GUID and the second one must be
* a package.
*/
- if (uuid->type != ACPI_TYPE_BUFFER || uuid->buffer.length != 16
- || properties->type != ACPI_TYPE_PACKAGE)
+ if (guid->type != ACPI_TYPE_BUFFER ||
+ guid->buffer.length != 16 ||
+ properties->type != ACPI_TYPE_PACKAGE)
break;
- if (memcmp(uuid->buffer.pointer, prp_uuid, sizeof(prp_uuid)))
+ if (!guid_equal((guid_t *)guid->buffer.pointer, &prp_guid))
continue;
/*
- * We found the matching UUID. Now validate the format of the
+ * We found the matching GUID. Now validate the format of the
* package immediately following it.
*/
if (!acpi_properties_format_valid(properties))
@@ -339,6 +338,9 @@ void acpi_init_properties(struct acpi_device *adev)
INIT_LIST_HEAD(&adev->data.subnodes);
+ if (!adev->handle)
+ return;
+
/*
* Check if ACPI_DT_NAMESPACE_HID is present and inthat case we fill in
* Device Tree compatible properties for this device.
@@ -373,6 +375,9 @@ void acpi_init_properties(struct acpi_device *adev)
if (acpi_of && !adev->flags.of_compatible_ok)
acpi_handle_info(adev->handle,
ACPI_DT_NAMESPACE_HID " requires 'compatible' property\n");
+
+ if (!adev->data.pointer)
+ acpi_extract_apple_properties(adev);
}
static void acpi_destroy_nondev_subnodes(struct list_head *list)
@@ -418,7 +423,7 @@ void acpi_free_properties(struct acpi_device *adev)
* %-EINVAL if the property doesn't exist,
* %-EPROTO if the property value type doesn't match @type.
*/
-static int acpi_data_get_property(struct acpi_device_data *data,
+static int acpi_data_get_property(const struct acpi_device_data *data,
const char *name, acpi_object_type type,
const union acpi_object **obj)
{
@@ -460,20 +465,21 @@ static int acpi_data_get_property(struct acpi_device_data *data,
* @type: Expected property type.
* @obj: Location to store the property value (if not %NULL).
*/
-int acpi_dev_get_property(struct acpi_device *adev, const char *name,
+int acpi_dev_get_property(const struct acpi_device *adev, const char *name,
acpi_object_type type, const union acpi_object **obj)
{
return adev ? acpi_data_get_property(&adev->data, name, type, obj) : -EINVAL;
}
EXPORT_SYMBOL_GPL(acpi_dev_get_property);
-static struct acpi_device_data *acpi_device_data_of_node(struct fwnode_handle *fwnode)
+static const struct acpi_device_data *
+acpi_device_data_of_node(const struct fwnode_handle *fwnode)
{
- if (fwnode->type == FWNODE_ACPI) {
- struct acpi_device *adev = to_acpi_device_node(fwnode);
+ if (is_acpi_device_node(fwnode)) {
+ const struct acpi_device *adev = to_acpi_device_node(fwnode);
return &adev->data;
- } else if (fwnode->type == FWNODE_ACPI_DATA) {
- struct acpi_data_node *dn = to_acpi_data_node(fwnode);
+ } else if (is_acpi_data_node(fwnode)) {
+ const struct acpi_data_node *dn = to_acpi_data_node(fwnode);
return &dn->data;
}
return NULL;
@@ -485,8 +491,8 @@ static struct acpi_device_data *acpi_device_data_of_node(struct fwnode_handle *f
* @propname: Name of the property.
* @valptr: Location to store a pointer to the property value (if not %NULL).
*/
-int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname,
- void **valptr)
+int acpi_node_prop_get(const struct fwnode_handle *fwnode,
+ const char *propname, void **valptr)
{
return acpi_data_get_property(acpi_device_data_of_node(fwnode),
propname, ACPI_TYPE_ANY,
@@ -512,7 +518,7 @@ int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname,
* %-EPROTO if the property is not a package or the type of its elements
* doesn't match @type.
*/
-static int acpi_data_get_property_array(struct acpi_device_data *data,
+static int acpi_data_get_property_array(const struct acpi_device_data *data,
const char *name,
acpi_object_type type,
const union acpi_object **obj)
@@ -572,13 +578,13 @@ static int acpi_data_get_property_array(struct acpi_device_data *data,
*
* Return: %0 on success, negative error code on failure.
*/
-int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *propname, size_t index, size_t num_args,
struct acpi_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
- struct acpi_device_data *data;
+ const struct acpi_device_data *data;
struct acpi_device *device;
int ret, idx = 0;
@@ -674,7 +680,7 @@ int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
-static int acpi_data_prop_read_single(struct acpi_device_data *data,
+static int acpi_data_prop_read_single(const struct acpi_device_data *data,
const char *propname,
enum dev_prop_type proptype, void *val)
{
@@ -813,7 +819,7 @@ static int acpi_copy_property_array_string(const union acpi_object *items,
return nval;
}
-static int acpi_data_prop_read(struct acpi_device_data *data,
+static int acpi_data_prop_read(const struct acpi_device_data *data,
const char *propname,
enum dev_prop_type proptype,
void *val, size_t nval)
@@ -867,7 +873,7 @@ static int acpi_data_prop_read(struct acpi_device_data *data,
return ret;
}
-int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
+int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname,
enum dev_prop_type proptype, void *val, size_t nval)
{
return adev ? acpi_data_prop_read(&adev->data, propname, proptype, val, nval) : -EINVAL;
@@ -885,8 +891,9 @@ int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
* of the property. Otherwise, read at most @nval values to the array at the
* location pointed to by @val.
*/
-int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname,
- enum dev_prop_type proptype, void *val, size_t nval)
+int acpi_node_prop_read(const struct fwnode_handle *fwnode,
+ const char *propname, enum dev_prop_type proptype,
+ void *val, size_t nval)
{
return acpi_data_prop_read(acpi_device_data_of_node(fwnode),
propname, proptype, val, nval);
@@ -897,13 +904,15 @@ int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname,
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the device's child nodes or a null handle.
*/
-struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode,
+struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
- struct acpi_device *adev = to_acpi_device_node(fwnode);
- struct list_head *head, *next;
+ const struct acpi_device *adev = to_acpi_device_node(fwnode);
+ struct acpi_device *child_adev = NULL;
+ const struct list_head *head;
+ struct list_head *next;
- if (!child || child->type == FWNODE_ACPI) {
+ if (!child || is_acpi_device_node(child)) {
if (adev)
head = &adev->children;
else
@@ -913,26 +922,27 @@ struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode,
goto nondev;
if (child) {
- adev = to_acpi_device_node(child);
- next = adev->node.next;
+ child_adev = to_acpi_device_node(child);
+ next = child_adev->node.next;
if (next == head) {
child = NULL;
goto nondev;
}
- adev = list_entry(next, struct acpi_device, node);
+ child_adev = list_entry(next, struct acpi_device, node);
} else {
- adev = list_first_entry(head, struct acpi_device, node);
+ child_adev = list_first_entry(head, struct acpi_device,
+ node);
}
- return acpi_fwnode_handle(adev);
+ return acpi_fwnode_handle(child_adev);
}
nondev:
- if (!child || child->type == FWNODE_ACPI_DATA) {
- struct acpi_data_node *data = to_acpi_data_node(fwnode);
+ if (!child || is_acpi_data_node(child)) {
+ const struct acpi_data_node *data = to_acpi_data_node(fwnode);
struct acpi_data_node *dn;
- if (adev)
- head = &adev->data.subnodes;
+ if (child_adev)
+ head = &child_adev->data.subnodes;
else if (data)
head = &data->data.subnodes;
else
@@ -963,7 +973,7 @@ struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode,
* Returns parent node of an ACPI device or data firmware node or %NULL if
* not available.
*/
-struct fwnode_handle *acpi_node_get_parent(struct fwnode_handle *fwnode)
+struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode)
{
if (is_acpi_data_node(fwnode)) {
/* All data nodes have parent pointer so just return that */
@@ -992,8 +1002,8 @@ struct fwnode_handle *acpi_node_get_parent(struct fwnode_handle *fwnode)
* %NULL if there is no next endpoint, ERR_PTR() in case of error. In case
* of success the next endpoint is returned.
*/
-struct fwnode_handle *acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
- struct fwnode_handle *prev)
+struct fwnode_handle *acpi_graph_get_next_endpoint(
+ const struct fwnode_handle *fwnode, struct fwnode_handle *prev)
{
struct fwnode_handle *port = NULL;
struct fwnode_handle *endpoint;
@@ -1040,14 +1050,15 @@ struct fwnode_handle *acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
* the child node on success, NULL otherwise.
*/
static struct fwnode_handle *acpi_graph_get_child_prop_value(
- struct fwnode_handle *fwnode, const char *prop_name, unsigned int val)
+ const struct fwnode_handle *fwnode, const char *prop_name,
+ unsigned int val)
{
struct fwnode_handle *child;
fwnode_for_each_child_node(fwnode, child) {
u32 nr;
- if (!fwnode_property_read_u32(fwnode, prop_name, &nr))
+ if (fwnode_property_read_u32(child, prop_name, &nr))
continue;
if (val == nr)
@@ -1069,17 +1080,18 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
* fields requested by the caller. Returns %0 in case of success and
* negative errno otherwise.
*/
-int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
+int acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode,
struct fwnode_handle **parent,
struct fwnode_handle **port,
struct fwnode_handle **endpoint)
{
+ struct fwnode_handle *fwnode;
unsigned int port_nr, endpoint_nr;
struct acpi_reference_args args;
int ret;
memset(&args, 0, sizeof(args));
- ret = acpi_node_get_property_reference(fwnode, "remote-endpoint", 0,
+ ret = acpi_node_get_property_reference(__fwnode, "remote-endpoint", 0,
&args);
if (ret)
return ret;
@@ -1121,7 +1133,7 @@ int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
return 0;
}
-static bool acpi_fwnode_device_is_available(struct fwnode_handle *fwnode)
+static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
if (!is_acpi_device_node(fwnode))
return false;
@@ -1129,16 +1141,17 @@ static bool acpi_fwnode_device_is_available(struct fwnode_handle *fwnode)
return acpi_device_is_present(to_acpi_device_node(fwnode));
}
-static bool acpi_fwnode_property_present(struct fwnode_handle *fwnode,
+static bool acpi_fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
return !acpi_node_prop_get(fwnode, propname, NULL);
}
-static int acpi_fwnode_property_read_int_array(struct fwnode_handle *fwnode,
- const char *propname,
- unsigned int elem_size,
- void *val, size_t nval)
+static int
+acpi_fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
+ const char *propname,
+ unsigned int elem_size, void *val,
+ size_t nval)
{
enum dev_prop_type type;
@@ -1162,16 +1175,17 @@ static int acpi_fwnode_property_read_int_array(struct fwnode_handle *fwnode,
return acpi_node_prop_read(fwnode, propname, type, val, nval);
}
-static int acpi_fwnode_property_read_string_array(struct fwnode_handle *fwnode,
- const char *propname,
- const char **val, size_t nval)
+static int
+acpi_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
+ const char *propname, const char **val,
+ size_t nval)
{
return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING,
val, nval);
}
static struct fwnode_handle *
-acpi_fwnode_get_named_child_node(struct fwnode_handle *fwnode,
+acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
const char *childname)
{
struct fwnode_handle *child;
@@ -1187,8 +1201,34 @@ acpi_fwnode_get_named_child_node(struct fwnode_handle *fwnode,
return NULL;
}
+static int
+acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *prop, const char *nargs_prop,
+ unsigned int args_count, unsigned int index,
+ struct fwnode_reference_args *args)
+{
+ struct acpi_reference_args acpi_args;
+ unsigned int i;
+ int ret;
+
+ ret = __acpi_node_get_property_reference(fwnode, prop, index,
+ args_count, &acpi_args);
+ if (ret < 0)
+ return ret;
+ if (!args)
+ return 0;
+
+ args->nargs = acpi_args.nargs;
+ args->fwnode = acpi_fwnode_handle(acpi_args.adev);
+
+ for (i = 0; i < NR_FWNODE_REFERENCE_ARGS; i++)
+ args->args[i] = i < acpi_args.nargs ? acpi_args.args[i] : 0;
+
+ return 0;
+}
+
static struct fwnode_handle *
-acpi_fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode,
+acpi_fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
struct fwnode_handle *endpoint;
@@ -1201,7 +1241,7 @@ acpi_fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode,
}
static struct fwnode_handle *
-acpi_fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode)
+acpi_fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *endpoint = NULL;
@@ -1210,7 +1250,13 @@ acpi_fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode)
return endpoint;
}
-static int acpi_fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode,
+static struct fwnode_handle *
+acpi_fwnode_get_parent(struct fwnode_handle *fwnode)
+{
+ return acpi_node_get_parent(fwnode);
+}
+
+static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint)
{
struct fwnode_handle *port_fwnode = fwnode_get_parent(fwnode);
@@ -1223,16 +1269,27 @@ static int acpi_fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode,
return 0;
}
-const struct fwnode_operations acpi_fwnode_ops = {
- .device_is_available = acpi_fwnode_device_is_available,
- .property_present = acpi_fwnode_property_present,
- .property_read_int_array = acpi_fwnode_property_read_int_array,
- .property_read_string_array = acpi_fwnode_property_read_string_array,
- .get_parent = acpi_node_get_parent,
- .get_next_child_node = acpi_get_next_subnode,
- .get_named_child_node = acpi_fwnode_get_named_child_node,
- .graph_get_next_endpoint = acpi_fwnode_graph_get_next_endpoint,
- .graph_get_remote_endpoint = acpi_fwnode_graph_get_remote_endpoint,
- .graph_get_port_parent = acpi_node_get_parent,
- .graph_parse_endpoint = acpi_fwnode_graph_parse_endpoint,
-};
+#define DECLARE_ACPI_FWNODE_OPS(ops) \
+ const struct fwnode_operations ops = { \
+ .device_is_available = acpi_fwnode_device_is_available, \
+ .property_present = acpi_fwnode_property_present, \
+ .property_read_int_array = \
+ acpi_fwnode_property_read_int_array, \
+ .property_read_string_array = \
+ acpi_fwnode_property_read_string_array, \
+ .get_parent = acpi_node_get_parent, \
+ .get_next_child_node = acpi_get_next_subnode, \
+ .get_named_child_node = acpi_fwnode_get_named_child_node, \
+ .get_reference_args = acpi_fwnode_get_reference_args, \
+ .graph_get_next_endpoint = \
+ acpi_fwnode_graph_get_next_endpoint, \
+ .graph_get_remote_endpoint = \
+ acpi_fwnode_graph_get_remote_endpoint, \
+ .graph_get_port_parent = acpi_fwnode_get_parent, \
+ .graph_parse_endpoint = acpi_fwnode_graph_parse_endpoint, \
+ }; \
+ EXPORT_SYMBOL_GPL(ops)
+
+DECLARE_ACPI_FWNODE_OPS(acpi_device_fwnode_ops);
+DECLARE_ACPI_FWNODE_OPS(acpi_data_fwnode_ops);
+const struct fwnode_operations acpi_static_fwnode_ops;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index cd4c4271dc4c..d85e010ee2cc 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -573,6 +573,35 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
return AE_OK;
}
+static int __acpi_dev_get_resources(struct acpi_device *adev,
+ struct list_head *list,
+ int (*preproc)(struct acpi_resource *, void *),
+ void *preproc_data, char *method)
+{
+ struct res_proc_context c;
+ acpi_status status;
+
+ if (!adev || !adev->handle || !list_empty(list))
+ return -EINVAL;
+
+ if (!acpi_has_method(adev->handle, method))
+ return 0;
+
+ c.list = list;
+ c.preproc = preproc;
+ c.preproc_data = preproc_data;
+ c.count = 0;
+ c.error = 0;
+ status = acpi_walk_resources(adev->handle, method,
+ acpi_dev_process_resource, &c);
+ if (ACPI_FAILURE(status)) {
+ acpi_dev_free_resource_list(list);
+ return c.error ? c.error : -EIO;
+ }
+
+ return c.count;
+}
+
/**
* acpi_dev_get_resources - Get current resources of a device.
* @adev: ACPI device node to get the resources for.
@@ -601,30 +630,45 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
int (*preproc)(struct acpi_resource *, void *),
void *preproc_data)
{
- struct res_proc_context c;
- acpi_status status;
+ return __acpi_dev_get_resources(adev, list, preproc, preproc_data,
+ METHOD_NAME__CRS);
+}
+EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
- if (!adev || !adev->handle || !list_empty(list))
- return -EINVAL;
+static int is_memory(struct acpi_resource *ares, void *not_used)
+{
+ struct resource_win win;
+ struct resource *res = &win.res;
- if (!acpi_has_method(adev->handle, METHOD_NAME__CRS))
- return 0;
+ memset(&win, 0, sizeof(win));
- c.list = list;
- c.preproc = preproc;
- c.preproc_data = preproc_data;
- c.count = 0;
- c.error = 0;
- status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS,
- acpi_dev_process_resource, &c);
- if (ACPI_FAILURE(status)) {
- acpi_dev_free_resource_list(list);
- return c.error ? c.error : -EIO;
- }
+ return !(acpi_dev_resource_memory(ares, res)
+ || acpi_dev_resource_address_space(ares, &win)
+ || acpi_dev_resource_ext_address_space(ares, &win));
+}
- return c.count;
+/**
+ * acpi_dev_get_dma_resources - Get current DMA resources of a device.
+ * @adev: ACPI device node to get the resources for.
+ * @list: Head of the resultant list of resources (must be empty).
+ *
+ * Evaluate the _DMA method for the given device node and process its
+ * output.
+ *
+ * The resultant struct resource objects are put on the list pointed to
+ * by @list, that must be empty initially, as members of struct
+ * resource_entry objects. Callers of this routine should use
+ * %acpi_dev_free_resource_list() to free that list.
+ *
+ * The number of resources in the output list is returned on success,
+ * an error code reflecting the error condition is returned otherwise.
+ */
+int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list)
+{
+ return __acpi_dev_get_resources(adev, list, is_memory, NULL,
+ METHOD_NAME__DMA);
}
-EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
+EXPORT_SYMBOL_GPL(acpi_dev_get_dma_resources);
/**
* acpi_dev_filter_resource_type - Filter ACPI resource according to resource
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index ad0b13ad4bbb..a2428e9462dd 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -31,7 +31,7 @@
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/power_supply.h>
-#include <linux/dmi.h>
+#include <linux/platform_data/x86/apple.h>
#include "sbshc.h"
#include "battery.h"
@@ -58,8 +58,6 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
-static bool sbs_manager_broken;
-
#define MAX_SBS_BAT 4
#define ACPI_SBS_BLOCK_MAX 32
@@ -476,7 +474,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
return count;
}
-static struct device_attribute alarm_attr = {
+static const struct device_attribute alarm_attr = {
.attr = {.name = "alarm", .mode = 0644},
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
@@ -632,31 +630,12 @@ static void acpi_sbs_callback(void *context)
}
}
-static int disable_sbs_manager(const struct dmi_system_id *d)
-{
- sbs_manager_broken = true;
- return 0;
-}
-
-static struct dmi_system_id acpi_sbs_dmi_table[] = {
- {
- .callback = disable_sbs_manager,
- .ident = "Apple",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc.")
- },
- },
- { },
-};
-
static int acpi_sbs_add(struct acpi_device *device)
{
struct acpi_sbs *sbs;
int result = 0;
int id;
- dmi_check_system(acpi_sbs_dmi_table);
-
sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL);
if (!sbs) {
result = -ENOMEM;
@@ -677,7 +656,7 @@ static int acpi_sbs_add(struct acpi_device *device)
result = 0;
- if (!sbs_manager_broken) {
+ if (!x86_apple_machine) {
result = acpi_manager_get_info(sbs);
if (!result) {
sbs->manager_present = 1;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 33897298f03e..602f8ff212f2 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -13,6 +13,7 @@
#include <linux/dmi.h>
#include <linux/nls.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_data/x86/apple.h>
#include <asm/pgtable.h>
@@ -1360,6 +1361,85 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
}
/**
+ * acpi_dma_get_range() - Get device DMA parameters.
+ *
+ * @dev: device to configure
+ * @dma_addr: pointer device DMA address result
+ * @offset: pointer to the DMA offset result
+ * @size: pointer to DMA range size result
+ *
+ * Evaluate DMA regions and return respectively DMA region start, offset
+ * and size in dma_addr, offset and size on parsing success; it does not
+ * update the passed in values on failure.
+ *
+ * Return 0 on success, < 0 on failure.
+ */
+int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
+ u64 *size)
+{
+ struct acpi_device *adev;
+ LIST_HEAD(list);
+ struct resource_entry *rentry;
+ int ret;
+ struct device *dma_dev = dev;
+ u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0;
+
+ /*
+ * Walk the device tree chasing an ACPI companion with a _DMA
+ * object while we go. Stop if we find a device with an ACPI
+ * companion containing a _DMA method.
+ */
+ do {
+ adev = ACPI_COMPANION(dma_dev);
+ if (adev && acpi_has_method(adev->handle, METHOD_NAME__DMA))
+ break;
+
+ dma_dev = dma_dev->parent;
+ } while (dma_dev);
+
+ if (!dma_dev)
+ return -ENODEV;
+
+ if (!acpi_has_method(adev->handle, METHOD_NAME__CRS)) {
+ acpi_handle_warn(adev->handle, "_DMA is valid only if _CRS is present\n");
+ return -EINVAL;
+ }
+
+ ret = acpi_dev_get_dma_resources(adev, &list);
+ if (ret > 0) {
+ list_for_each_entry(rentry, &list, node) {
+ if (dma_offset && rentry->offset != dma_offset) {
+ ret = -EINVAL;
+ dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n");
+ goto out;
+ }
+ dma_offset = rentry->offset;
+
+ /* Take lower and upper limits */
+ if (rentry->res->start < dma_start)
+ dma_start = rentry->res->start;
+ if (rentry->res->end > dma_end)
+ dma_end = rentry->res->end;
+ }
+
+ if (dma_start >= dma_end) {
+ ret = -EINVAL;
+ dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
+ goto out;
+ }
+
+ *dma_addr = dma_start - dma_offset;
+ len = dma_end - dma_start;
+ *size = max(len, len + 1);
+ *offset = dma_offset;
+ }
+ out:
+ acpi_dev_free_resource_list(&list);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+/**
* acpi_dma_configure - Set-up DMA configuration for the device.
* @dev: The pointer to the device
* @attr: device dma attributes
@@ -1367,20 +1447,16 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
{
const struct iommu_ops *iommu;
- u64 size;
+ u64 dma_addr = 0, size = 0;
- iort_set_dma_mask(dev);
+ iort_dma_setup(dev, &dma_addr, &size);
iommu = iort_iommu_configure(dev);
if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
- /*
- * Assume dma valid range starts at 0 and covers the whole
- * coherent_dma_mask.
- */
- arch_setup_dma_ops(dev, 0, size, iommu, attr == DEV_DMA_COHERENT);
+ arch_setup_dma_ops(dev, dma_addr, size,
+ iommu, attr == DEV_DMA_COHERENT);
return 0;
}
@@ -1452,6 +1528,12 @@ static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
struct list_head resource_list;
bool is_spi_i2c_slave = false;
+ /* Macs use device properties in lieu of _CRS resources */
+ if (x86_apple_machine &&
+ (fwnode_property_present(&device->fwnode, "spiSclkPeriod") ||
+ fwnode_property_present(&device->fwnode, "i2cAddress")))
+ return true;
+
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
&is_spi_i2c_slave);
@@ -1467,8 +1549,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
device->device_type = type;
device->handle = handle;
device->parent = acpi_bus_get_parent(handle);
- device->fwnode.type = FWNODE_ACPI;
- device->fwnode.ops = &acpi_fwnode_ops;
+ device->fwnode.ops = &acpi_device_fwnode_ops;
acpi_set_device_status(device, sta);
acpi_device_get_busid(device);
acpi_set_pnp_ids(handle, &device->pnp, type);
@@ -2058,6 +2139,9 @@ int __init acpi_scan_init(void)
acpi_get_spcr_uart_addr();
}
+ acpi_gpe_apply_masked_gpes();
+ acpi_update_all_gpes();
+
mutex_lock(&acpi_scan_lock);
/*
* Enumerate devices in the ACPI namespace.
@@ -2082,10 +2166,6 @@ int __init acpi_scan_init(void)
}
}
- acpi_gpe_apply_masked_gpes();
- acpi_update_all_gpes();
- acpi_ec_ecdt_start();
-
acpi_scan_initialized = true;
out:
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index be17664736b2..9fdd014759f8 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -669,6 +669,7 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
+#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
#define ACPI_LPS0_SCREEN_OFF 3
#define ACPI_LPS0_SCREEN_ON 4
#define ACPI_LPS0_ENTRY 5
@@ -680,6 +681,166 @@ static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
static char lps0_dsm_func_mask;
+/* Device constraint entry structure */
+struct lpi_device_info {
+ char *name;
+ int enabled;
+ union acpi_object *package;
+};
+
+/* Constraint package structure */
+struct lpi_device_constraint {
+ int uid;
+ int min_dstate;
+ int function_states;
+};
+
+struct lpi_constraints {
+ acpi_handle handle;
+ int min_dstate;
+};
+
+static struct lpi_constraints *lpi_constraints_table;
+static int lpi_constraints_table_size;
+
+static void lpi_device_get_constraints(void)
+{
+ union acpi_object *out_obj;
+ int i;
+
+ out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
+ 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
+ NULL, ACPI_TYPE_PACKAGE);
+
+ acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
+ out_obj ? "successful" : "failed");
+
+ if (!out_obj)
+ return;
+
+ lpi_constraints_table = kcalloc(out_obj->package.count,
+ sizeof(*lpi_constraints_table),
+ GFP_KERNEL);
+ if (!lpi_constraints_table)
+ goto free_acpi_buffer;
+
+ acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n");
+
+ for (i = 0; i < out_obj->package.count; i++) {
+ struct lpi_constraints *constraint;
+ acpi_status status;
+ union acpi_object *package = &out_obj->package.elements[i];
+ struct lpi_device_info info = { };
+ int package_count = 0, j;
+
+ if (!package)
+ continue;
+
+ for (j = 0; j < package->package.count; ++j) {
+ union acpi_object *element =
+ &(package->package.elements[j]);
+
+ switch (element->type) {
+ case ACPI_TYPE_INTEGER:
+ info.enabled = element->integer.value;
+ break;
+ case ACPI_TYPE_STRING:
+ info.name = element->string.pointer;
+ break;
+ case ACPI_TYPE_PACKAGE:
+ package_count = element->package.count;
+ info.package = element->package.elements;
+ break;
+ }
+ }
+
+ if (!info.enabled || !info.package || !info.name)
+ continue;
+
+ constraint = &lpi_constraints_table[lpi_constraints_table_size];
+
+ status = acpi_get_handle(NULL, info.name, &constraint->handle);
+ if (ACPI_FAILURE(status))
+ continue;
+
+ acpi_handle_debug(lps0_device_handle,
+ "index:%d Name:%s\n", i, info.name);
+
+ constraint->min_dstate = -1;
+
+ for (j = 0; j < package_count; ++j) {
+ union acpi_object *info_obj = &info.package[j];
+ union acpi_object *cnstr_pkg;
+ union acpi_object *obj;
+ struct lpi_device_constraint dev_info;
+
+ switch (info_obj->type) {
+ case ACPI_TYPE_INTEGER:
+ /* version */
+ break;
+ case ACPI_TYPE_PACKAGE:
+ if (info_obj->package.count < 2)
+ break;
+
+ cnstr_pkg = info_obj->package.elements;
+ obj = &cnstr_pkg[0];
+ dev_info.uid = obj->integer.value;
+ obj = &cnstr_pkg[1];
+ dev_info.min_dstate = obj->integer.value;
+
+ acpi_handle_debug(lps0_device_handle,
+ "uid:%d min_dstate:%s\n",
+ dev_info.uid,
+ acpi_power_state_string(dev_info.min_dstate));
+
+ constraint->min_dstate = dev_info.min_dstate;
+ break;
+ }
+ }
+
+ if (constraint->min_dstate < 0) {
+ acpi_handle_debug(lps0_device_handle,
+ "Incomplete constraint defined\n");
+ continue;
+ }
+
+ lpi_constraints_table_size++;
+ }
+
+ acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
+
+free_acpi_buffer:
+ ACPI_FREE(out_obj);
+}
+
+static void lpi_check_constraints(void)
+{
+ int i;
+
+ for (i = 0; i < lpi_constraints_table_size; ++i) {
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(lpi_constraints_table[i].handle, &adev))
+ continue;
+
+ acpi_handle_debug(adev->handle,
+ "LPI: required min power state:%s current power state:%s\n",
+ acpi_power_state_string(lpi_constraints_table[i].min_dstate),
+ acpi_power_state_string(adev->power.state));
+
+ if (!adev->flags.power_manageable) {
+ acpi_handle_info(adev->handle, "LPI: Device not power manageble\n");
+ continue;
+ }
+
+ if (adev->power.state < lpi_constraints_table[i].min_dstate)
+ acpi_handle_info(adev->handle,
+ "LPI: Constraint not met; min power state:%s current power state:%s\n",
+ acpi_power_state_string(lpi_constraints_table[i].min_dstate),
+ acpi_power_state_string(adev->power.state));
+ }
+}
+
static void acpi_sleep_run_lps0_dsm(unsigned int func)
{
union acpi_object *out_obj;
@@ -714,6 +875,12 @@ static int lps0_device_attach(struct acpi_device *adev,
if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) {
lps0_dsm_func_mask = bitmask;
lps0_device_handle = adev->handle;
+ /*
+ * Use suspend-to-idle by default if the default
+ * suspend mode was not set from the command line.
+ */
+ if (mem_sleep_default > PM_SUSPEND_MEM)
+ mem_sleep_current = PM_SUSPEND_TO_IDLE;
}
acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
@@ -723,6 +890,9 @@ static int lps0_device_attach(struct acpi_device *adev,
"_DSM function 0 evaluation failed\n");
}
ACPI_FREE(out_obj);
+
+ lpi_device_get_constraints();
+
return 0;
}
@@ -731,14 +901,14 @@ static struct acpi_scan_handler lps0_handler = {
.attach = lps0_device_attach,
};
-static int acpi_freeze_begin(void)
+static int acpi_s2idle_begin(void)
{
acpi_scan_lock_acquire();
s2idle_in_progress = true;
return 0;
}
-static int acpi_freeze_prepare(void)
+static int acpi_s2idle_prepare(void)
{
if (lps0_device_handle) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
@@ -758,8 +928,12 @@ static int acpi_freeze_prepare(void)
return 0;
}
-static void acpi_freeze_wake(void)
+static void acpi_s2idle_wake(void)
{
+
+ if (pm_debug_messages_on)
+ lpi_check_constraints();
+
/*
* If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
* that the SCI has triggered while suspended, so cancel the wakeup in
@@ -772,20 +946,20 @@ static void acpi_freeze_wake(void)
}
}
-static void acpi_freeze_sync(void)
+static void acpi_s2idle_sync(void)
{
/*
* Process all pending events in case there are any wakeup ones.
*
- * The EC driver uses the system workqueue, so that one needs to be
- * flushed too.
+ * The EC driver uses the system workqueue and an additional special
+ * one, so those need to be flushed too.
*/
+ acpi_ec_flush_work();
acpi_os_wait_events_complete();
- flush_scheduled_work();
s2idle_wakeup = false;
}
-static void acpi_freeze_restore(void)
+static void acpi_s2idle_restore(void)
{
if (acpi_sci_irq_valid())
disable_irq_wake(acpi_sci_irq);
@@ -798,19 +972,19 @@ static void acpi_freeze_restore(void)
}
}
-static void acpi_freeze_end(void)
+static void acpi_s2idle_end(void)
{
s2idle_in_progress = false;
acpi_scan_lock_release();
}
-static const struct platform_freeze_ops acpi_freeze_ops = {
- .begin = acpi_freeze_begin,
- .prepare = acpi_freeze_prepare,
- .wake = acpi_freeze_wake,
- .sync = acpi_freeze_sync,
- .restore = acpi_freeze_restore,
- .end = acpi_freeze_end,
+static const struct platform_s2idle_ops acpi_s2idle_ops = {
+ .begin = acpi_s2idle_begin,
+ .prepare = acpi_s2idle_prepare,
+ .wake = acpi_s2idle_wake,
+ .sync = acpi_s2idle_sync,
+ .restore = acpi_s2idle_restore,
+ .end = acpi_s2idle_end,
};
static void acpi_sleep_suspend_setup(void)
@@ -825,7 +999,7 @@ static void acpi_sleep_suspend_setup(void)
&acpi_suspend_ops_old : &acpi_suspend_ops);
acpi_scan_add_handler(&lps0_handler);
- freeze_set_ops(&acpi_freeze_ops);
+ s2idle_set_ops(&acpi_s2idle_ops);
}
#else /* !CONFIG_SUSPEND */
@@ -870,7 +1044,7 @@ static struct syscore_ops acpi_sleep_syscore_ops = {
.resume = acpi_restore_bm_rld,
};
-void acpi_sleep_syscore_init(void)
+static void acpi_sleep_syscore_init(void)
{
register_syscore_ops(&acpi_sleep_syscore_ops);
}
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 4ac3e06b41d8..324b35bfe781 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -17,6 +17,16 @@
#include <linux/serial_core.h>
/*
+ * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
+ * occasionally getting stuck as 1. To avoid the potential for a hang, check
+ * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
+ * implementations, so only do so if an affected platform is detected in
+ * parse_spcr().
+ */
+bool qdf2400_e44_present;
+EXPORT_SYMBOL(qdf2400_e44_present);
+
+/*
* Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
* Detect them by examining the OEM fields in the SPCR header, similiar to PCI
* quirk detection in pci_mcfg.c.
@@ -43,17 +53,24 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
*/
static bool xgene_8250_erratum_present(struct acpi_table_spcr *tb)
{
+ bool xgene_8250 = false;
+
if (tb->interface_type != ACPI_DBG2_16550_COMPATIBLE)
return false;
- if (memcmp(tb->header.oem_id, "APMC0D", ACPI_OEM_ID_SIZE))
+ if (memcmp(tb->header.oem_id, "APMC0D", ACPI_OEM_ID_SIZE) &&
+ memcmp(tb->header.oem_id, "HPE ", ACPI_OEM_ID_SIZE))
return false;
if (!memcmp(tb->header.oem_table_id, "XGENESPC",
ACPI_OEM_TABLE_ID_SIZE) && tb->header.oem_revision == 0)
- return true;
+ xgene_8250 = true;
- return false;
+ if (!memcmp(tb->header.oem_table_id, "ProLiant",
+ ACPI_OEM_TABLE_ID_SIZE) && tb->header.oem_revision == 1)
+ xgene_8250 = true;
+
+ return xgene_8250;
}
/**
@@ -95,16 +112,17 @@ int __init parse_spcr(bool earlycon)
}
if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- switch (table->serial_port.access_width) {
+ switch (ACPI_ACCESS_BIT_WIDTH((
+ table->serial_port.access_width))) {
default:
pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n");
- case ACPI_ACCESS_SIZE_BYTE:
+ case 8:
iotype = "mmio";
break;
- case ACPI_ACCESS_SIZE_WORD:
+ case 16:
iotype = "mmio16";
break;
- case ACPI_ACCESS_SIZE_DWORD:
+ case 32:
iotype = "mmio32";
break;
}
@@ -147,13 +165,43 @@ int __init parse_spcr(bool earlycon)
goto done;
}
- if (qdf2400_erratum_44_present(&table->header))
- uart = "qdf2400_e44";
- if (xgene_8250_erratum_present(table))
+ /*
+ * If the E44 erratum is required, then we need to tell the pl011
+ * driver to implement the work-around.
+ *
+ * The global variable is used by the probe function when it
+ * creates the UARTs, whether or not they're used as a console.
+ *
+ * If the user specifies "traditional" earlycon, the qdf2400_e44
+ * console name matches the EARLYCON_DECLARE() statement, and
+ * SPCR is not used. Parameter "earlycon" is false.
+ *
+ * If the user specifies "SPCR" earlycon, then we need to update
+ * the console name so that it also says "qdf2400_e44". Parameter
+ * "earlycon" is true.
+ *
+ * For consistency, if we change the console name, then we do it
+ * for everyone, not just earlycon.
+ */
+ if (qdf2400_erratum_44_present(&table->header)) {
+ qdf2400_e44_present = true;
+ if (earlycon)
+ uart = "qdf2400_e44";
+ }
+
+ if (xgene_8250_erratum_present(table)) {
iotype = "mmio32";
- snprintf(opts, sizeof(opts), "%s,%s,0x%llx,%d", uart, iotype,
- table->serial_port.address, baud_rate);
+ /* for xgene v1 and v2 we don't know the clock rate of the
+ * UART so don't attempt to change to the baud rate state
+ * in the table because driver cannot calculate the dividers
+ */
+ snprintf(opts, sizeof(opts), "%s,%s,0x%llx", uart, iotype,
+ table->serial_port.address);
+ } else {
+ snprintf(opts, sizeof(opts), "%s,%s,0x%llx,%d", uart, iotype,
+ table->serial_port.address, baud_rate);
+ }
pr_info("console: %s\n", opts);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index e414fabf7315..78a5a23010ab 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -2,6 +2,8 @@
* sysfs.c - ACPI sysfs interface to userspace.
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
@@ -306,11 +308,13 @@ module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
/*
* ACPI table sysfs I/F:
* /sys/firmware/acpi/tables/
+ * /sys/firmware/acpi/tables/data/
* /sys/firmware/acpi/tables/dynamic/
*/
static LIST_HEAD(acpi_table_attr_list);
static struct kobject *tables_kobj;
+static struct kobject *tables_data_kobj;
static struct kobject *dynamic_tables_kobj;
static struct kobject *hotplug_kobj;
@@ -325,6 +329,11 @@ struct acpi_table_attr {
struct list_head node;
};
+struct acpi_data_attr {
+ struct bin_attribute attr;
+ u64 addr;
+};
+
static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
@@ -420,6 +429,70 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
return AE_OK;
}
+static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct acpi_data_attr *data_attr;
+ void __iomem *base;
+ ssize_t rc;
+
+ data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
+
+ base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
+ if (!base)
+ return -ENOMEM;
+ rc = memory_read_from_buffer(buf, count, &offset, base,
+ data_attr->attr.size);
+ acpi_os_unmap_memory(base, data_attr->attr.size);
+
+ return rc;
+}
+
+static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
+{
+ struct acpi_table_bert *bert = th;
+
+ if (bert->header.length < sizeof(struct acpi_table_bert) ||
+ bert->region_length < sizeof(struct acpi_hest_generic_status)) {
+ kfree(data_attr);
+ return -EINVAL;
+ }
+ data_attr->addr = bert->address;
+ data_attr->attr.size = bert->region_length;
+ data_attr->attr.attr.name = "BERT";
+
+ return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
+}
+
+static struct acpi_data_obj {
+ char *name;
+ int (*fn)(void *, struct acpi_data_attr *);
+} acpi_data_objs[] = {
+ { ACPI_SIG_BERT, acpi_bert_data_init },
+};
+
+#define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
+
+static int acpi_table_data_init(struct acpi_table_header *th)
+{
+ struct acpi_data_attr *data_attr;
+ int i;
+
+ for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
+ if (ACPI_COMPARE_NAME(th->signature, acpi_data_objs[i].name)) {
+ data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
+ if (!data_attr)
+ return -ENOMEM;
+ sysfs_attr_init(&data_attr->attr.attr);
+ data_attr->attr.read = acpi_data_show;
+ data_attr->attr.attr.mode = 0400;
+ return acpi_data_objs[i].fn(th, data_attr);
+ }
+ }
+ return 0;
+}
+
static int acpi_tables_sysfs_init(void)
{
struct acpi_table_attr *table_attr;
@@ -432,6 +505,10 @@ static int acpi_tables_sysfs_init(void)
if (!tables_kobj)
goto err;
+ tables_data_kobj = kobject_create_and_add("data", tables_kobj);
+ if (!tables_data_kobj)
+ goto err_tables_data;
+
dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
if (!dynamic_tables_kobj)
goto err_dynamic_tables;
@@ -456,13 +533,17 @@ static int acpi_tables_sysfs_init(void)
return ret;
}
list_add_tail(&table_attr->node, &acpi_table_attr_list);
+ acpi_table_data_init(table_header);
}
kobject_uevent(tables_kobj, KOBJ_ADD);
+ kobject_uevent(tables_data_kobj, KOBJ_ADD);
kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
return 0;
err_dynamic_tables:
+ kobject_put(tables_data_kobj);
+err_tables_data:
kobject_put(tables_kobj);
err:
return -ENOMEM;
@@ -552,11 +633,15 @@ static void fixed_event_count(u32 event_number)
static void acpi_global_event_handler(u32 event_type, acpi_handle device,
u32 event_number, void *context)
{
- if (event_type == ACPI_EVENT_TYPE_GPE)
+ if (event_type == ACPI_EVENT_TYPE_GPE) {
gpe_count(event_number);
-
- if (event_type == ACPI_EVENT_TYPE_FIXED)
+ pr_debug("GPE event 0x%02x\n", event_number);
+ } else if (event_type == ACPI_EVENT_TYPE_FIXED) {
fixed_event_count(event_number);
+ pr_debug("Fixed event 0x%02x\n", event_number);
+ } else {
+ pr_debug("Other event 0x%02x\n", event_number);
+ }
}
static int get_status(u32 index, acpi_event_status *status,
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index ff425390bfa8..80ce2a7d224b 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -740,10 +740,10 @@ int __init acpi_table_init(void)
if (acpi_verify_table_checksum) {
pr_info("Early table checksum verification enabled\n");
- acpi_gbl_verify_table_checksum = TRUE;
+ acpi_gbl_enable_table_validation = TRUE;
} else {
pr_info("Early table checksum verification disabled\n");
- acpi_gbl_verify_table_checksum = FALSE;
+ acpi_gbl_enable_table_validation = FALSE;
}
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index b9d956c916f5..0a9e5979aaa9 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -816,3 +816,39 @@ static int __init acpi_backlight(char *str)
return 1;
}
__setup("acpi_backlight=", acpi_backlight);
+
+/**
+ * acpi_match_platform_list - Check if the system matches with a given list
+ * @plat: pointer to acpi_platform_list table terminated by a NULL entry
+ *
+ * Return the matched index if the system is found in the platform list.
+ * Otherwise, return a negative error code.
+ */
+int acpi_match_platform_list(const struct acpi_platform_list *plat)
+{
+ struct acpi_table_header hdr;
+ int idx = 0;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ for (; plat->oem_id[0]; plat++, idx++) {
+ if (ACPI_FAILURE(acpi_get_table_header(plat->table, 0, &hdr)))
+ continue;
+
+ if (strncmp(plat->oem_id, hdr.oem_id, ACPI_OEM_ID_SIZE))
+ continue;
+
+ if (strncmp(plat->oem_table_id, hdr.oem_table_id, ACPI_OEM_TABLE_ID_SIZE))
+ continue;
+
+ if ((plat->pred == all_versions) ||
+ (plat->pred == less_than_or_equal && hdr.oem_revision <= plat->oem_revision) ||
+ (plat->pred == greater_than_or_equal && hdr.oem_revision >= plat->oem_revision) ||
+ (plat->pred == equal && hdr.oem_revision == plat->oem_revision))
+ return idx;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(acpi_match_platform_list);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index d179e8d9177d..601e5d372887 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -103,6 +103,12 @@ static int video_detect_force_native(const struct dmi_system_id *d)
return 0;
}
+static int video_detect_force_none(const struct dmi_system_id *d)
+{
+ acpi_backlight_dmi = acpi_backlight_none;
+ return 0;
+}
+
static const struct dmi_system_id video_detect_dmi_table[] = {
/* On Samsung X360, the BIOS will set a flag (VDRV) if generic
* ACPI backlight device is used. This flag will definitively break
@@ -313,6 +319,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
},
},
+ {
+ .callback = video_detect_force_none,
+ .ident = "Dell OptiPlex 9020M",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"),
+ },
+ },
{ },
};
diff --git a/drivers/acpi/x86/apple.c b/drivers/acpi/x86/apple.c
new file mode 100644
index 000000000000..51b4cf9f25da
--- /dev/null
+++ b/drivers/acpi/x86/apple.c
@@ -0,0 +1,141 @@
+/*
+ * apple.c - Apple ACPI quirks
+ * Copyright (C) 2017 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitmap.h>
+#include <linux/platform_data/x86/apple.h>
+#include <linux/uuid.h>
+
+/* Apple _DSM device properties GUID */
+static const guid_t apple_prp_guid =
+ GUID_INIT(0xa0b5b7c6, 0x1318, 0x441c,
+ 0xb0, 0xc9, 0xfe, 0x69, 0x5e, 0xaf, 0x94, 0x9b);
+
+/**
+ * acpi_extract_apple_properties - retrieve and convert Apple _DSM properties
+ * @adev: ACPI device for which to retrieve the properties
+ *
+ * Invoke Apple's custom _DSM once to check the protocol version and once more
+ * to retrieve the properties. They are marshalled up in a single package as
+ * alternating key/value elements, unlike _DSD which stores them as a package
+ * of 2-element packages. Convert to _DSD format and make them available under
+ * the primary fwnode.
+ */
+void acpi_extract_apple_properties(struct acpi_device *adev)
+{
+ unsigned int i, j = 0, newsize = 0, numprops, numvalid;
+ union acpi_object *props, *newprops;
+ unsigned long *valid = NULL;
+ void *free_space;
+
+ if (!x86_apple_machine)
+ return;
+
+ props = acpi_evaluate_dsm_typed(adev->handle, &apple_prp_guid, 1, 0,
+ NULL, ACPI_TYPE_BUFFER);
+ if (!props)
+ return;
+
+ if (!props->buffer.length)
+ goto out_free;
+
+ if (props->buffer.pointer[0] != 3) {
+ acpi_handle_info(adev->handle, FW_INFO
+ "unsupported properties version %*ph\n",
+ props->buffer.length, props->buffer.pointer);
+ goto out_free;
+ }
+
+ ACPI_FREE(props);
+ props = acpi_evaluate_dsm_typed(adev->handle, &apple_prp_guid, 1, 1,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!props)
+ return;
+
+ numprops = props->package.count / 2;
+ if (!numprops)
+ goto out_free;
+
+ valid = kcalloc(BITS_TO_LONGS(numprops), sizeof(long), GFP_KERNEL);
+ if (!valid)
+ goto out_free;
+
+ /* newsize = key length + value length of each tuple */
+ for (i = 0; i < numprops; i++) {
+ union acpi_object *key = &props->package.elements[i * 2];
+ union acpi_object *val = &props->package.elements[i * 2 + 1];
+
+ if ( key->type != ACPI_TYPE_STRING ||
+ (val->type != ACPI_TYPE_INTEGER &&
+ val->type != ACPI_TYPE_BUFFER))
+ continue; /* skip invalid properties */
+
+ __set_bit(i, valid);
+ newsize += key->string.length + 1;
+ if ( val->type == ACPI_TYPE_BUFFER)
+ newsize += val->buffer.length;
+ }
+
+ numvalid = bitmap_weight(valid, numprops);
+ if (numprops > numvalid)
+ acpi_handle_info(adev->handle, FW_INFO
+ "skipped %u properties: wrong type\n",
+ numprops - numvalid);
+ if (numvalid == 0)
+ goto out_free;
+
+ /* newsize += top-level package + 3 objects for each key/value tuple */
+ newsize += (1 + 3 * numvalid) * sizeof(union acpi_object);
+ newprops = ACPI_ALLOCATE_ZEROED(newsize);
+ if (!newprops)
+ goto out_free;
+
+ /* layout: top-level package | packages | key/value tuples | strings */
+ newprops->type = ACPI_TYPE_PACKAGE;
+ newprops->package.count = numvalid;
+ newprops->package.elements = &newprops[1];
+ free_space = &newprops[1 + 3 * numvalid];
+
+ for_each_set_bit(i, valid, numprops) {
+ union acpi_object *key = &props->package.elements[i * 2];
+ union acpi_object *val = &props->package.elements[i * 2 + 1];
+ unsigned int k = 1 + numvalid + j * 2; /* index into newprops */
+ unsigned int v = k + 1;
+
+ newprops[1 + j].type = ACPI_TYPE_PACKAGE;
+ newprops[1 + j].package.count = 2;
+ newprops[1 + j].package.elements = &newprops[k];
+
+ newprops[k].type = ACPI_TYPE_STRING;
+ newprops[k].string.length = key->string.length;
+ newprops[k].string.pointer = free_space;
+ memcpy(free_space, key->string.pointer, key->string.length);
+ free_space += key->string.length + 1;
+
+ newprops[v].type = val->type;
+ if (val->type == ACPI_TYPE_INTEGER) {
+ newprops[v].integer.value = val->integer.value;
+ } else {
+ newprops[v].buffer.length = val->buffer.length;
+ newprops[v].buffer.pointer = free_space;
+ memcpy(free_space, val->buffer.pointer,
+ val->buffer.length);
+ free_space += val->buffer.length;
+ }
+ j++; /* count valid properties */
+ }
+ WARN_ON(free_space != (void *)newprops + newsize);
+
+ adev->data.properties = newprops;
+ adev->data.pointer = newprops;
+
+out_free:
+ ACPI_FREE(props);
+ kfree(valid);
+}
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 832e885349b1..9801d852bd56 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -22,7 +22,7 @@ config ANDROID_BINDER_IPC
config ANDROID_BINDER_DEVICES
string "Android Binder devices"
depends on ANDROID_BINDER_IPC
- default "binder,hwbinder"
+ default "binder,hwbinder,vndbinder"
---help---
Default value for the binder.devices parameter.
@@ -32,7 +32,7 @@ config ANDROID_BINDER_DEVICES
therefore logically separated from the other devices.
config ANDROID_BINDER_IPC_32BIT
- bool
+ bool "Use old (Android 4.4 and earlier) 32-bit binder API"
depends on !64BIT && ANDROID_BINDER_IPC
default y
---help---
@@ -44,6 +44,16 @@ config ANDROID_BINDER_IPC_32BIT
Note that enabling this will break newer Android user-space.
+config ANDROID_BINDER_IPC_SELFTEST
+ bool "Android Binder IPC Driver Selftest"
+ depends on ANDROID_BINDER_IPC
+ ---help---
+ This feature allows binder selftest to run.
+
+ Binder selftest checks the allocation and free of binder buffers
+ exhaustively with combinations of various buffer sizes and
+ alignments.
+
endif # if ANDROID
endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index 3b7e4b072c58..a01254c43ee3 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,3 +1,4 @@
ccflags-y += -I$(src) # needed for trace events
-obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index aae4d8d4be36..d055b3f2a207 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -15,6 +15,40 @@
*
*/
+/*
+ * Locking overview
+ *
+ * There are 3 main spinlocks which must be acquired in the
+ * order shown:
+ *
+ * 1) proc->outer_lock : protects binder_ref
+ * binder_proc_lock() and binder_proc_unlock() are
+ * used to acq/rel.
+ * 2) node->lock : protects most fields of binder_node.
+ * binder_node_lock() and binder_node_unlock() are
+ * used to acq/rel
+ * 3) proc->inner_lock : protects the thread and node lists
+ * (proc->threads, proc->waiting_threads, proc->nodes)
+ * and all todo lists associated with the binder_proc
+ * (proc->todo, thread->todo, proc->delivered_death and
+ * node->async_todo), as well as thread->transaction_stack
+ * binder_inner_proc_lock() and binder_inner_proc_unlock()
+ * are used to acq/rel
+ *
+ * Any lock under procA must never be nested under any lock at the same
+ * level or below on procB.
+ *
+ * Functions that require a lock held on entry indicate which lock
+ * in the suffix of the function name:
+ *
+ * foo_olocked() : requires node->outer_lock
+ * foo_nlocked() : requires node->lock
+ * foo_ilocked() : requires proc->inner_lock
+ * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
+ * foo_nilocked(): requires node->lock and proc->inner_lock
+ * ...
+ */
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/cacheflush.h>
@@ -24,7 +58,6 @@
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nsproxy.h>
@@ -35,30 +68,31 @@
#include <linux/sched/mm.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
#include <linux/pid_namespace.h>
#include <linux/security.h>
+#include <linux/spinlock.h>
#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
#define BINDER_IPC_32BIT 1
#endif
#include <uapi/linux/android/binder.h>
+#include "binder_alloc.h"
#include "binder_trace.h"
-static DEFINE_MUTEX(binder_main_lock);
+static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
-static DEFINE_MUTEX(binder_mmap_lock);
static HLIST_HEAD(binder_devices);
static HLIST_HEAD(binder_procs);
-static HLIST_HEAD(binder_deferred_list);
+static DEFINE_MUTEX(binder_procs_lock);
+
static HLIST_HEAD(binder_dead_nodes);
+static DEFINE_SPINLOCK(binder_dead_nodes_lock);
static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
-static int binder_last_id;
+static atomic_t binder_last_id;
#define BINDER_DEBUG_ENTRY(name) \
static int binder_##name##_open(struct inode *inode, struct file *file) \
@@ -88,8 +122,6 @@ BINDER_DEBUG_ENTRY(proc);
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
-#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
-
enum {
BINDER_DEBUG_USER_ERROR = 1U << 0,
BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
@@ -104,17 +136,13 @@ enum {
BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
BINDER_DEBUG_FREE_BUFFER = 1U << 11,
BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
- BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
- BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
- BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
+ BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
+ BINDER_DEBUG_SPINLOCKS = 1U << 14,
};
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
-static bool binder_debug_no_lock;
-module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
-
static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
module_param_named(devices, binder_devices_param, charp, 0444);
@@ -171,26 +199,27 @@ enum binder_stat_types {
};
struct binder_stats {
- int br[_IOC_NR(BR_FAILED_REPLY) + 1];
- int bc[_IOC_NR(BC_REPLY_SG) + 1];
- int obj_created[BINDER_STAT_COUNT];
- int obj_deleted[BINDER_STAT_COUNT];
+ atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
+ atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
+ atomic_t obj_created[BINDER_STAT_COUNT];
+ atomic_t obj_deleted[BINDER_STAT_COUNT];
};
static struct binder_stats binder_stats;
static inline void binder_stats_deleted(enum binder_stat_types type)
{
- binder_stats.obj_deleted[type]++;
+ atomic_inc(&binder_stats.obj_deleted[type]);
}
static inline void binder_stats_created(enum binder_stat_types type)
{
- binder_stats.obj_created[type]++;
+ atomic_inc(&binder_stats.obj_created[type]);
}
struct binder_transaction_log_entry {
int debug_id;
+ int debug_id_done;
int call_type;
int from_proc;
int from_thread;
@@ -200,11 +229,14 @@ struct binder_transaction_log_entry {
int to_node;
int data_size;
int offsets_size;
+ int return_error_line;
+ uint32_t return_error;
+ uint32_t return_error_param;
const char *context_name;
};
struct binder_transaction_log {
- int next;
- int full;
+ atomic_t cur;
+ bool full;
struct binder_transaction_log_entry entry[32];
};
static struct binder_transaction_log binder_transaction_log;
@@ -214,19 +246,26 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_transaction_log *log)
{
struct binder_transaction_log_entry *e;
+ unsigned int cur = atomic_inc_return(&log->cur);
- e = &log->entry[log->next];
- memset(e, 0, sizeof(*e));
- log->next++;
- if (log->next == ARRAY_SIZE(log->entry)) {
- log->next = 0;
+ if (cur >= ARRAY_SIZE(log->entry))
log->full = 1;
- }
+ e = &log->entry[cur % ARRAY_SIZE(log->entry)];
+ WRITE_ONCE(e->debug_id_done, 0);
+ /*
+ * write-barrier to synchronize access to e->debug_id_done.
+ * We make sure the initialized 0 value is seen before
+ * memset() other fields are zeroed by memset.
+ */
+ smp_wmb();
+ memset(e, 0, sizeof(*e));
return e;
}
struct binder_context {
struct binder_node *binder_context_mgr_node;
+ struct mutex context_mgr_node_lock;
+
kuid_t binder_context_mgr_uid;
const char *name;
};
@@ -237,11 +276,20 @@ struct binder_device {
struct binder_context context;
};
+/**
+ * struct binder_work - work enqueued on a worklist
+ * @entry: node enqueued on list
+ * @type: type of work to be performed
+ *
+ * There are separate work lists for proc, thread, and node (async).
+ */
struct binder_work {
struct list_head entry;
+
enum {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_RETURN_ERROR,
BINDER_WORK_NODE,
BINDER_WORK_DEAD_BINDER,
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
@@ -249,8 +297,72 @@ struct binder_work {
} type;
};
+struct binder_error {
+ struct binder_work work;
+ uint32_t cmd;
+};
+
+/**
+ * struct binder_node - binder node bookkeeping
+ * @debug_id: unique ID for debugging
+ * (invariant after initialized)
+ * @lock: lock for node fields
+ * @work: worklist element for node work
+ * (protected by @proc->inner_lock)
+ * @rb_node: element for proc->nodes tree
+ * (protected by @proc->inner_lock)
+ * @dead_node: element for binder_dead_nodes list
+ * (protected by binder_dead_nodes_lock)
+ * @proc: binder_proc that owns this node
+ * (invariant after initialized)
+ * @refs: list of references on this node
+ * (protected by @lock)
+ * @internal_strong_refs: used to take strong references when
+ * initiating a transaction
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @local_weak_refs: weak user refs from local process
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @local_strong_refs: strong user refs from local process
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @tmp_refs: temporary kernel refs
+ * (protected by @proc->inner_lock while @proc
+ * is valid, and by binder_dead_nodes_lock
+ * if @proc is NULL. During inc/dec and node release
+ * it is also protected by @lock to provide safety
+ * as the node dies and @proc becomes NULL)
+ * @ptr: userspace pointer for node
+ * (invariant, no lock needed)
+ * @cookie: userspace cookie for node
+ * (invariant, no lock needed)
+ * @has_strong_ref: userspace notified of strong ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @pending_strong_ref: userspace has acked notification of strong ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @has_weak_ref: userspace notified of weak ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @pending_weak_ref: userspace has acked notification of weak ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @has_async_transaction: async transaction to node in progress
+ * (protected by @lock)
+ * @accept_fds: file descriptor operations supported for node
+ * (invariant after initialized)
+ * @min_priority: minimum scheduling priority
+ * (invariant after initialized)
+ * @async_todo: list of async work items
+ * (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder nodes.
+ */
struct binder_node {
int debug_id;
+ spinlock_t lock;
struct binder_work work;
union {
struct rb_node rb_node;
@@ -261,88 +373,167 @@ struct binder_node {
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
+ int tmp_refs;
binder_uintptr_t ptr;
binder_uintptr_t cookie;
- unsigned has_strong_ref:1;
- unsigned pending_strong_ref:1;
- unsigned has_weak_ref:1;
- unsigned pending_weak_ref:1;
- unsigned has_async_transaction:1;
- unsigned accept_fds:1;
- unsigned min_priority:8;
+ struct {
+ /*
+ * bitfield elements protected by
+ * proc inner_lock
+ */
+ u8 has_strong_ref:1;
+ u8 pending_strong_ref:1;
+ u8 has_weak_ref:1;
+ u8 pending_weak_ref:1;
+ };
+ struct {
+ /*
+ * invariant after initialization
+ */
+ u8 accept_fds:1;
+ u8 min_priority;
+ };
+ bool has_async_transaction;
struct list_head async_todo;
};
struct binder_ref_death {
+ /**
+ * @work: worklist element for death notifications
+ * (protected by inner_lock of the proc that
+ * this ref belongs to)
+ */
struct binder_work work;
binder_uintptr_t cookie;
};
+/**
+ * struct binder_ref_data - binder_ref counts and id
+ * @debug_id: unique ID for the ref
+ * @desc: unique userspace handle for ref
+ * @strong: strong ref count (debugging only if not locked)
+ * @weak: weak ref count (debugging only if not locked)
+ *
+ * Structure to hold ref count and ref id information. Since
+ * the actual ref can only be accessed with a lock, this structure
+ * is used to return information about the ref to callers of
+ * ref inc/dec functions.
+ */
+struct binder_ref_data {
+ int debug_id;
+ uint32_t desc;
+ int strong;
+ int weak;
+};
+
+/**
+ * struct binder_ref - struct to track references on nodes
+ * @data: binder_ref_data containing id, handle, and current refcounts
+ * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
+ * @rb_node_node: node for lookup by @node in proc's rb_tree
+ * @node_entry: list entry for node->refs list in target node
+ * (protected by @node->lock)
+ * @proc: binder_proc containing ref
+ * @node: binder_node of target node. When cleaning up a
+ * ref for deletion in binder_cleanup_ref, a non-NULL
+ * @node indicates the node must be freed
+ * @death: pointer to death notification (ref_death) if requested
+ * (protected by @node->lock)
+ *
+ * Structure to track references from procA to target node (on procB). This
+ * structure is unsafe to access without holding @proc->outer_lock.
+ */
struct binder_ref {
/* Lookups needed: */
/* node + proc => ref (transaction) */
/* desc + proc => ref (transaction, inc/dec ref) */
/* node => refs + procs (proc exit) */
- int debug_id;
+ struct binder_ref_data data;
struct rb_node rb_node_desc;
struct rb_node rb_node_node;
struct hlist_node node_entry;
struct binder_proc *proc;
struct binder_node *node;
- uint32_t desc;
- int strong;
- int weak;
struct binder_ref_death *death;
};
-struct binder_buffer {
- struct list_head entry; /* free and allocated entries by address */
- struct rb_node rb_node; /* free entry by size or allocated entry */
- /* by address */
- unsigned free:1;
- unsigned allow_user_free:1;
- unsigned async_transaction:1;
- unsigned debug_id:29;
-
- struct binder_transaction *transaction;
-
- struct binder_node *target_node;
- size_t data_size;
- size_t offsets_size;
- size_t extra_buffers_size;
- uint8_t data[0];
-};
-
enum binder_deferred_state {
BINDER_DEFERRED_PUT_FILES = 0x01,
BINDER_DEFERRED_FLUSH = 0x02,
BINDER_DEFERRED_RELEASE = 0x04,
};
+/**
+ * struct binder_proc - binder process bookkeeping
+ * @proc_node: element for binder_procs list
+ * @threads: rbtree of binder_threads in this proc
+ * (protected by @inner_lock)
+ * @nodes: rbtree of binder nodes associated with
+ * this proc ordered by node->ptr
+ * (protected by @inner_lock)
+ * @refs_by_desc: rbtree of refs ordered by ref->desc
+ * (protected by @outer_lock)
+ * @refs_by_node: rbtree of refs ordered by ref->node
+ * (protected by @outer_lock)
+ * @waiting_threads: threads currently waiting for proc work
+ * (protected by @inner_lock)
+ * @pid PID of group_leader of process
+ * (invariant after initialized)
+ * @tsk task_struct for group_leader of process
+ * (invariant after initialized)
+ * @files files_struct for process
+ * (invariant after initialized)
+ * @deferred_work_node: element for binder_deferred_list
+ * (protected by binder_deferred_lock)
+ * @deferred_work: bitmap of deferred work to perform
+ * (protected by binder_deferred_lock)
+ * @is_dead: process is dead and awaiting free
+ * when outstanding transactions are cleaned up
+ * (protected by @inner_lock)
+ * @todo: list of work for this process
+ * (protected by @inner_lock)
+ * @wait: wait queue head to wait for proc work
+ * (invariant after initialized)
+ * @stats: per-process binder statistics
+ * (atomics, no lock needed)
+ * @delivered_death: list of delivered death notification
+ * (protected by @inner_lock)
+ * @max_threads: cap on number of binder threads
+ * (protected by @inner_lock)
+ * @requested_threads: number of binder threads requested but not
+ * yet started. In current implementation, can
+ * only be 0 or 1.
+ * (protected by @inner_lock)
+ * @requested_threads_started: number binder threads started
+ * (protected by @inner_lock)
+ * @tmp_ref: temporary reference to indicate proc is in use
+ * (protected by @inner_lock)
+ * @default_priority: default scheduler priority
+ * (invariant after initialized)
+ * @debugfs_entry: debugfs node
+ * @alloc: binder allocator bookkeeping
+ * @context: binder_context for this proc
+ * (invariant after initialized)
+ * @inner_lock: can nest under outer_lock and/or node lock
+ * @outer_lock: no nesting under innor or node lock
+ * Lock order: 1) outer, 2) node, 3) inner
+ *
+ * Bookkeeping structure for binder processes
+ */
struct binder_proc {
struct hlist_node proc_node;
struct rb_root threads;
struct rb_root nodes;
struct rb_root refs_by_desc;
struct rb_root refs_by_node;
+ struct list_head waiting_threads;
int pid;
- struct vm_area_struct *vma;
- struct mm_struct *vma_vm_mm;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
- void *buffer;
- ptrdiff_t user_buffer_offset;
-
- struct list_head buffers;
- struct rb_root free_buffers;
- struct rb_root allocated_buffers;
- size_t free_async_space;
+ bool is_dead;
- struct page **pages;
- size_t buffer_size;
- uint32_t buffer_free;
struct list_head todo;
wait_queue_head_t wait;
struct binder_stats stats;
@@ -350,10 +541,13 @@ struct binder_proc {
int max_threads;
int requested_threads;
int requested_threads_started;
- int ready_threads;
+ int tmp_ref;
long default_priority;
struct dentry *debugfs_entry;
+ struct binder_alloc alloc;
struct binder_context *context;
+ spinlock_t inner_lock;
+ spinlock_t outer_lock;
};
enum {
@@ -362,22 +556,58 @@ enum {
BINDER_LOOPER_STATE_EXITED = 0x04,
BINDER_LOOPER_STATE_INVALID = 0x08,
BINDER_LOOPER_STATE_WAITING = 0x10,
- BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+ BINDER_LOOPER_STATE_POLL = 0x20,
};
+/**
+ * struct binder_thread - binder thread bookkeeping
+ * @proc: binder process for this thread
+ * (invariant after initialization)
+ * @rb_node: element for proc->threads rbtree
+ * (protected by @proc->inner_lock)
+ * @waiting_thread_node: element for @proc->waiting_threads list
+ * (protected by @proc->inner_lock)
+ * @pid: PID for this thread
+ * (invariant after initialization)
+ * @looper: bitmap of looping state
+ * (only accessed by this thread)
+ * @looper_needs_return: looping thread needs to exit driver
+ * (no lock needed)
+ * @transaction_stack: stack of in-progress transactions for this thread
+ * (protected by @proc->inner_lock)
+ * @todo: list of work to do for this thread
+ * (protected by @proc->inner_lock)
+ * @return_error: transaction errors reported by this thread
+ * (only accessed by this thread)
+ * @reply_error: transaction errors reported by target thread
+ * (protected by @proc->inner_lock)
+ * @wait: wait queue for thread work
+ * @stats: per-thread statistics
+ * (atomics, no lock needed)
+ * @tmp_ref: temporary reference to indicate thread is in use
+ * (atomic since @proc->inner_lock cannot
+ * always be acquired)
+ * @is_dead: thread is dead and awaiting free
+ * when outstanding transactions are cleaned up
+ * (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder threads.
+ */
struct binder_thread {
struct binder_proc *proc;
struct rb_node rb_node;
+ struct list_head waiting_thread_node;
int pid;
- int looper;
+ int looper; /* only modified by this thread */
+ bool looper_need_return; /* can be written by other thread */
struct binder_transaction *transaction_stack;
struct list_head todo;
- uint32_t return_error; /* Write failed, return error code in read buf */
- uint32_t return_error2; /* Write failed, return error code in read */
- /* buffer. Used when sending a reply to a dead process that */
- /* we are also waiting on */
+ struct binder_error return_error;
+ struct binder_error reply_error;
wait_queue_head_t wait;
struct binder_stats stats;
+ atomic_t tmp_ref;
+ bool is_dead;
};
struct binder_transaction {
@@ -397,10 +627,253 @@ struct binder_transaction {
long priority;
long saved_priority;
kuid_t sender_euid;
+ /**
+ * @lock: protects @from, @to_proc, and @to_thread
+ *
+ * @from, @to_proc, and @to_thread can be set to NULL
+ * during thread teardown
+ */
+ spinlock_t lock;
};
+/**
+ * binder_proc_lock() - Acquire outer lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Acquires proc->outer_lock. Used to protect binder_ref
+ * structures associated with the given proc.
+ */
+#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
+static void
+_binder_proc_lock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&proc->outer_lock);
+}
+
+/**
+ * binder_proc_unlock() - Release spinlock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_proc_lock()
+ */
+#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
+static void
+_binder_proc_unlock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&proc->outer_lock);
+}
+
+/**
+ * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Acquires proc->inner_lock. Used to protect todo lists
+ */
+#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
+static void
+_binder_inner_proc_lock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&proc->inner_lock);
+}
+
+/**
+ * binder_inner_proc_unlock() - Release inner lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_inner_proc_lock()
+ */
+#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
+static void
+_binder_inner_proc_unlock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&proc->inner_lock);
+}
+
+/**
+ * binder_node_lock() - Acquire spinlock for given binder_node
+ * @node: struct binder_node to acquire
+ *
+ * Acquires node->lock. Used to protect binder_node fields
+ */
+#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
+static void
+_binder_node_lock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&node->lock);
+}
+
+/**
+ * binder_node_unlock() - Release spinlock for given binder_proc
+ * @node: struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
+static void
+_binder_node_unlock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&node->lock);
+}
+
+/**
+ * binder_node_inner_lock() - Acquire node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Acquires node->lock. If node->proc also acquires
+ * proc->inner_lock. Used to protect binder_node fields
+ */
+#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
+static void
+_binder_node_inner_lock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&node->lock);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+}
+
+/**
+ * binder_node_unlock() - Release node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
+static void
+_binder_node_inner_unlock(struct binder_node *node, int line)
+{
+ struct binder_proc *proc = node->proc;
+
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ if (proc)
+ binder_inner_proc_unlock(proc);
+ spin_unlock(&node->lock);
+}
+
+static bool binder_worklist_empty_ilocked(struct list_head *list)
+{
+ return list_empty(list);
+}
+
+/**
+ * binder_worklist_empty() - Check if no items on the work list
+ * @proc: binder_proc associated with list
+ * @list: list to check
+ *
+ * Return: true if there are no items on list, else false
+ */
+static bool binder_worklist_empty(struct binder_proc *proc,
+ struct list_head *list)
+{
+ bool ret;
+
+ binder_inner_proc_lock(proc);
+ ret = binder_worklist_empty_ilocked(list);
+ binder_inner_proc_unlock(proc);
+ return ret;
+}
+
+static void
+binder_enqueue_work_ilocked(struct binder_work *work,
+ struct list_head *target_list)
+{
+ BUG_ON(target_list == NULL);
+ BUG_ON(work->entry.next && !list_empty(&work->entry));
+ list_add_tail(&work->entry, target_list);
+}
+
+/**
+ * binder_enqueue_work() - Add an item to the work list
+ * @proc: binder_proc associated with list
+ * @work: struct binder_work to add to list
+ * @target_list: list to add work to
+ *
+ * Adds the work to the specified list. Asserts that work
+ * is not already on a list.
+ */
+static void
+binder_enqueue_work(struct binder_proc *proc,
+ struct binder_work *work,
+ struct list_head *target_list)
+{
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(work, target_list);
+ binder_inner_proc_unlock(proc);
+}
+
+static void
+binder_dequeue_work_ilocked(struct binder_work *work)
+{
+ list_del_init(&work->entry);
+}
+
+/**
+ * binder_dequeue_work() - Removes an item from the work list
+ * @proc: binder_proc associated with list
+ * @work: struct binder_work to remove from list
+ *
+ * Removes the specified work item from whatever list it is on.
+ * Can safely be called if work is not on any list.
+ */
+static void
+binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
+{
+ binder_inner_proc_lock(proc);
+ binder_dequeue_work_ilocked(work);
+ binder_inner_proc_unlock(proc);
+}
+
+static struct binder_work *binder_dequeue_work_head_ilocked(
+ struct list_head *list)
+{
+ struct binder_work *w;
+
+ w = list_first_entry_or_null(list, struct binder_work, entry);
+ if (w)
+ list_del_init(&w->entry);
+ return w;
+}
+
+/**
+ * binder_dequeue_work_head() - Dequeues the item at head of list
+ * @proc: binder_proc associated with list
+ * @list: list to dequeue head
+ *
+ * Removes the head of the list if there are items on the list
+ *
+ * Return: pointer dequeued binder_work, NULL if list was empty
+ */
+static struct binder_work *binder_dequeue_work_head(
+ struct binder_proc *proc,
+ struct list_head *list)
+{
+ struct binder_work *w;
+
+ binder_inner_proc_lock(proc);
+ w = binder_dequeue_work_head_ilocked(list);
+ binder_inner_proc_unlock(proc);
+ return w;
+}
+
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+static void binder_free_thread(struct binder_thread *thread);
+static void binder_free_proc(struct binder_proc *proc);
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
@@ -451,462 +924,159 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
return retval;
}
-static inline void binder_lock(const char *tag)
-{
- trace_binder_lock(tag);
- mutex_lock(&binder_main_lock);
- trace_binder_locked(tag);
-}
-
-static inline void binder_unlock(const char *tag)
-{
- trace_binder_unlock(tag);
- mutex_unlock(&binder_main_lock);
-}
-
-static void binder_set_nice(long nice)
-{
- long min_nice;
-
- if (can_nice(current, nice)) {
- set_user_nice(current, nice);
- return;
- }
- min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
- binder_debug(BINDER_DEBUG_PRIORITY_CAP,
- "%d: nice value %ld not allowed use %ld instead\n",
- current->pid, nice, min_nice);
- set_user_nice(current, min_nice);
- if (min_nice <= MAX_NICE)
- return;
- binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
-}
-
-static size_t binder_buffer_size(struct binder_proc *proc,
- struct binder_buffer *buffer)
+static bool binder_has_work_ilocked(struct binder_thread *thread,
+ bool do_proc_work)
{
- if (list_is_last(&buffer->entry, &proc->buffers))
- return proc->buffer + proc->buffer_size - (void *)buffer->data;
- return (size_t)list_entry(buffer->entry.next,
- struct binder_buffer, entry) - (size_t)buffer->data;
+ return !binder_worklist_empty_ilocked(&thread->todo) ||
+ thread->looper_need_return ||
+ (do_proc_work &&
+ !binder_worklist_empty_ilocked(&thread->proc->todo));
}
-static void binder_insert_free_buffer(struct binder_proc *proc,
- struct binder_buffer *new_buffer)
+static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
{
- struct rb_node **p = &proc->free_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
- size_t buffer_size;
- size_t new_buffer_size;
+ bool has_work;
- BUG_ON(!new_buffer->free);
+ binder_inner_proc_lock(thread->proc);
+ has_work = binder_has_work_ilocked(thread, do_proc_work);
+ binder_inner_proc_unlock(thread->proc);
- new_buffer_size = binder_buffer_size(proc, new_buffer);
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: add free buffer, size %zd, at %p\n",
- proc->pid, new_buffer_size, new_buffer);
-
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
-
- buffer_size = binder_buffer_size(proc, buffer);
-
- if (new_buffer_size < buffer_size)
- p = &parent->rb_left;
- else
- p = &parent->rb_right;
- }
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+ return has_work;
}
-static void binder_insert_allocated_buffer(struct binder_proc *proc,
- struct binder_buffer *new_buffer)
+static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
{
- struct rb_node **p = &proc->allocated_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
-
- BUG_ON(new_buffer->free);
-
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
-
- if (new_buffer < buffer)
- p = &parent->rb_left;
- else if (new_buffer > buffer)
- p = &parent->rb_right;
- else
- BUG();
- }
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+ return !thread->transaction_stack &&
+ binder_worklist_empty_ilocked(&thread->todo) &&
+ (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
+ BINDER_LOOPER_STATE_REGISTERED));
}
-static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
- uintptr_t user_ptr)
+static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
+ bool sync)
{
- struct rb_node *n = proc->allocated_buffers.rb_node;
- struct binder_buffer *buffer;
- struct binder_buffer *kern_ptr;
-
- kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
- - offsetof(struct binder_buffer, data));
-
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
+ struct rb_node *n;
+ struct binder_thread *thread;
- if (kern_ptr < buffer)
- n = n->rb_left;
- else if (kern_ptr > buffer)
- n = n->rb_right;
- else
- return buffer;
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+ thread = rb_entry(n, struct binder_thread, rb_node);
+ if (thread->looper & BINDER_LOOPER_STATE_POLL &&
+ binder_available_for_proc_work_ilocked(thread)) {
+ if (sync)
+ wake_up_interruptible_sync(&thread->wait);
+ else
+ wake_up_interruptible(&thread->wait);
+ }
}
- return NULL;
}
-static int binder_update_page_range(struct binder_proc *proc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+/**
+ * binder_select_thread_ilocked() - selects a thread for doing proc work.
+ * @proc: process to select a thread from
+ *
+ * Note that calling this function moves the thread off the waiting_threads
+ * list, so it can only be woken up by the caller of this function, or a
+ * signal. Therefore, callers *should* always wake up the thread this function
+ * returns.
+ *
+ * Return: If there's a thread currently waiting for process work,
+ * returns that thread. Otherwise returns NULL.
+ */
+static struct binder_thread *
+binder_select_thread_ilocked(struct binder_proc *proc)
{
- void *page_addr;
- unsigned long user_page_addr;
- struct page **page;
- struct mm_struct *mm;
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: %s pages %p-%p\n", proc->pid,
- allocate ? "allocate" : "free", start, end);
-
- if (end <= start)
- return 0;
-
- trace_binder_update_page_range(proc, allocate, start, end);
-
- if (vma)
- mm = NULL;
- else
- mm = get_task_mm(proc->tsk);
-
- if (mm) {
- down_write(&mm->mmap_sem);
- vma = proc->vma;
- if (vma && mm != proc->vma_vm_mm) {
- pr_err("%d: vma mm and task mm mismatch\n",
- proc->pid);
- vma = NULL;
- }
- }
-
- if (allocate == 0)
- goto free_range;
-
- if (vma == NULL) {
- pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
- proc->pid);
- goto err_no_vma;
- }
-
- for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
- int ret;
+ struct binder_thread *thread;
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+ assert_spin_locked(&proc->inner_lock);
+ thread = list_first_entry_or_null(&proc->waiting_threads,
+ struct binder_thread,
+ waiting_thread_node);
- BUG_ON(*page);
- *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
- if (*page == NULL) {
- pr_err("%d: binder_alloc_buf failed for page at %p\n",
- proc->pid, page_addr);
- goto err_alloc_page_failed;
- }
- ret = map_kernel_range_noflush((unsigned long)page_addr,
- PAGE_SIZE, PAGE_KERNEL, page);
- flush_cache_vmap((unsigned long)page_addr,
- (unsigned long)page_addr + PAGE_SIZE);
- if (ret != 1) {
- pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
- proc->pid, page_addr);
- goto err_map_kernel_failed;
- }
- user_page_addr =
- (uintptr_t)page_addr + proc->user_buffer_offset;
- ret = vm_insert_page(vma, user_page_addr, page[0]);
- if (ret) {
- pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
- proc->pid, user_page_addr);
- goto err_vm_insert_page_failed;
- }
- /* vm_insert_page does not seem to increment the refcount */
- }
- if (mm) {
- up_write(&mm->mmap_sem);
- mmput(mm);
- }
- return 0;
+ if (thread)
+ list_del_init(&thread->waiting_thread_node);
-free_range:
- for (page_addr = end - PAGE_SIZE; page_addr >= start;
- page_addr -= PAGE_SIZE) {
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
- if (vma)
- zap_page_range(vma, (uintptr_t)page_addr +
- proc->user_buffer_offset, PAGE_SIZE);
-err_vm_insert_page_failed:
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-err_map_kernel_failed:
- __free_page(*page);
- *page = NULL;
-err_alloc_page_failed:
- ;
- }
-err_no_vma:
- if (mm) {
- up_write(&mm->mmap_sem);
- mmput(mm);
- }
- return -ENOMEM;
+ return thread;
}
-static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
- size_t data_size,
- size_t offsets_size,
- size_t extra_buffers_size,
- int is_async)
+/**
+ * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
+ * @proc: process to wake up a thread in
+ * @thread: specific thread to wake-up (may be NULL)
+ * @sync: whether to do a synchronous wake-up
+ *
+ * This function wakes up a thread in the @proc process.
+ * The caller may provide a specific thread to wake-up in
+ * the @thread parameter. If @thread is NULL, this function
+ * will wake up threads that have called poll().
+ *
+ * Note that for this function to work as expected, callers
+ * should first call binder_select_thread() to find a thread
+ * to handle the work (if they don't have a thread already),
+ * and pass the result into the @thread parameter.
+ */
+static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
+ struct binder_thread *thread,
+ bool sync)
{
- struct rb_node *n = proc->free_buffers.rb_node;
- struct binder_buffer *buffer;
- size_t buffer_size;
- struct rb_node *best_fit = NULL;
- void *has_page_addr;
- void *end_page_addr;
- size_t size, data_offsets_size;
-
- if (proc->vma == NULL) {
- pr_err("%d: binder_alloc_buf, no vma\n",
- proc->pid);
- return NULL;
- }
-
- data_offsets_size = ALIGN(data_size, sizeof(void *)) +
- ALIGN(offsets_size, sizeof(void *));
-
- if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
- binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
- proc->pid, data_size, offsets_size);
- return NULL;
- }
- size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
- if (size < data_offsets_size || size < extra_buffers_size) {
- binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
- proc->pid, extra_buffers_size);
- return NULL;
- }
- if (is_async &&
- proc->free_async_space < size + sizeof(struct binder_buffer)) {
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd failed, no async space left\n",
- proc->pid, size);
- return NULL;
- }
+ assert_spin_locked(&proc->inner_lock);
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
- buffer_size = binder_buffer_size(proc, buffer);
-
- if (size < buffer_size) {
- best_fit = n;
- n = n->rb_left;
- } else if (size > buffer_size)
- n = n->rb_right;
- else {
- best_fit = n;
- break;
- }
- }
- if (best_fit == NULL) {
- pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
- proc->pid, size);
- return NULL;
- }
- if (n == NULL) {
- buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
- buffer_size = binder_buffer_size(proc, buffer);
- }
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
- proc->pid, size, buffer, buffer_size);
-
- has_page_addr =
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
- if (n == NULL) {
- if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
- buffer_size = size; /* no room for other buffers */
+ if (thread) {
+ if (sync)
+ wake_up_interruptible_sync(&thread->wait);
else
- buffer_size = size + sizeof(struct binder_buffer);
- }
- end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
- if (end_page_addr > has_page_addr)
- end_page_addr = has_page_addr;
- if (binder_update_page_range(proc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
- return NULL;
-
- rb_erase(best_fit, &proc->free_buffers);
- buffer->free = 0;
- binder_insert_allocated_buffer(proc, buffer);
- if (buffer_size != size) {
- struct binder_buffer *new_buffer = (void *)buffer->data + size;
-
- list_add(&new_buffer->entry, &buffer->entry);
- new_buffer->free = 1;
- binder_insert_free_buffer(proc, new_buffer);
- }
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got %p\n",
- proc->pid, size, buffer);
- buffer->data_size = data_size;
- buffer->offsets_size = offsets_size;
- buffer->extra_buffers_size = extra_buffers_size;
- buffer->async_transaction = is_async;
- if (is_async) {
- proc->free_async_space -= size + sizeof(struct binder_buffer);
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_alloc_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
+ wake_up_interruptible(&thread->wait);
+ return;
}
- return buffer;
-}
-
-static void *buffer_start_page(struct binder_buffer *buffer)
-{
- return (void *)((uintptr_t)buffer & PAGE_MASK);
+ /* Didn't find a thread waiting for proc work; this can happen
+ * in two scenarios:
+ * 1. All threads are busy handling transactions
+ * In that case, one of those threads should call back into
+ * the kernel driver soon and pick up this work.
+ * 2. Threads are using the (e)poll interface, in which case
+ * they may be blocked on the waitqueue without having been
+ * added to waiting_threads. For this case, we just iterate
+ * over all threads not handling transaction work, and
+ * wake them all up. We wake all because we don't know whether
+ * a thread that called into (e)poll is handling non-binder
+ * work currently.
+ */
+ binder_wakeup_poll_threads_ilocked(proc, sync);
}
-static void *buffer_end_page(struct binder_buffer *buffer)
+static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
{
- return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
-}
+ struct binder_thread *thread = binder_select_thread_ilocked(proc);
-static void binder_delete_free_buffer(struct binder_proc *proc,
- struct binder_buffer *buffer)
-{
- struct binder_buffer *prev, *next = NULL;
- int free_page_end = 1;
- int free_page_start = 1;
-
- BUG_ON(proc->buffers.next == &buffer->entry);
- prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
- BUG_ON(!prev->free);
- if (buffer_end_page(prev) == buffer_start_page(buffer)) {
- free_page_start = 0;
- if (buffer_end_page(prev) == buffer_end_page(buffer))
- free_page_end = 0;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
- proc->pid, buffer, prev);
- }
-
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
- next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
- if (buffer_start_page(next) == buffer_end_page(buffer)) {
- free_page_end = 0;
- if (buffer_start_page(next) ==
- buffer_start_page(buffer))
- free_page_start = 0;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
- proc->pid, buffer, prev);
- }
- }
- list_del(&buffer->entry);
- if (free_page_start || free_page_end) {
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
- proc->pid, buffer, free_page_start ? "" : " end",
- free_page_end ? "" : " start", prev, next);
- binder_update_page_range(proc, 0, free_page_start ?
- buffer_start_page(buffer) : buffer_end_page(buffer),
- (free_page_end ? buffer_end_page(buffer) :
- buffer_start_page(buffer)) + PAGE_SIZE, NULL);
- }
+ binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
}
-static void binder_free_buf(struct binder_proc *proc,
- struct binder_buffer *buffer)
+static void binder_set_nice(long nice)
{
- size_t size, buffer_size;
-
- buffer_size = binder_buffer_size(proc, buffer);
-
- size = ALIGN(buffer->data_size, sizeof(void *)) +
- ALIGN(buffer->offsets_size, sizeof(void *)) +
- ALIGN(buffer->extra_buffers_size, sizeof(void *));
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_free_buf %p size %zd buffer_size %zd\n",
- proc->pid, buffer, size, buffer_size);
-
- BUG_ON(buffer->free);
- BUG_ON(size > buffer_size);
- BUG_ON(buffer->transaction != NULL);
- BUG_ON((void *)buffer < proc->buffer);
- BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
-
- if (buffer->async_transaction) {
- proc->free_async_space += size + sizeof(struct binder_buffer);
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_free_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
- }
+ long min_nice;
- binder_update_page_range(proc, 0,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
- NULL);
- rb_erase(&buffer->rb_node, &proc->allocated_buffers);
- buffer->free = 1;
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
- struct binder_buffer *next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
-
- if (next->free) {
- rb_erase(&next->rb_node, &proc->free_buffers);
- binder_delete_free_buffer(proc, next);
- }
- }
- if (proc->buffers.next != &buffer->entry) {
- struct binder_buffer *prev = list_entry(buffer->entry.prev,
- struct binder_buffer, entry);
-
- if (prev->free) {
- binder_delete_free_buffer(proc, buffer);
- rb_erase(&prev->rb_node, &proc->free_buffers);
- buffer = prev;
- }
+ if (can_nice(current, nice)) {
+ set_user_nice(current, nice);
+ return;
}
- binder_insert_free_buffer(proc, buffer);
+ min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
+ binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+ "%d: nice value %ld not allowed use %ld instead\n",
+ current->pid, nice, min_nice);
+ set_user_nice(current, min_nice);
+ if (min_nice <= MAX_NICE)
+ return;
+ binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
}
-static struct binder_node *binder_get_node(struct binder_proc *proc,
- binder_uintptr_t ptr)
+static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
+ binder_uintptr_t ptr)
{
struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node;
+ assert_spin_locked(&proc->inner_lock);
+
while (n) {
node = rb_entry(n, struct binder_node, rb_node);
@@ -914,21 +1084,46 @@ static struct binder_node *binder_get_node(struct binder_proc *proc,
n = n->rb_left;
else if (ptr > node->ptr)
n = n->rb_right;
- else
+ else {
+ /*
+ * take an implicit weak reference
+ * to ensure node stays alive until
+ * call to binder_put_node()
+ */
+ binder_inc_node_tmpref_ilocked(node);
return node;
+ }
}
return NULL;
}
-static struct binder_node *binder_new_node(struct binder_proc *proc,
- binder_uintptr_t ptr,
- binder_uintptr_t cookie)
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+ binder_uintptr_t ptr)
+{
+ struct binder_node *node;
+
+ binder_inner_proc_lock(proc);
+ node = binder_get_node_ilocked(proc, ptr);
+ binder_inner_proc_unlock(proc);
+ return node;
+}
+
+static struct binder_node *binder_init_node_ilocked(
+ struct binder_proc *proc,
+ struct binder_node *new_node,
+ struct flat_binder_object *fp)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
+ binder_uintptr_t ptr = fp ? fp->binder : 0;
+ binder_uintptr_t cookie = fp ? fp->cookie : 0;
+ __u32 flags = fp ? fp->flags : 0;
+
+ assert_spin_locked(&proc->inner_lock);
while (*p) {
+
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);
@@ -936,33 +1131,74 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
- else
- return NULL;
+ else {
+ /*
+ * A matching node is already in
+ * the rb tree. Abandon the init
+ * and return it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ return node;
+ }
}
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (node == NULL)
- return NULL;
+ node = new_node;
binder_stats_created(BINDER_STAT_NODE);
+ node->tmp_refs++;
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
- node->debug_id = ++binder_last_id;
+ node->debug_id = atomic_inc_return(&binder_last_id);
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
+ node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d:%d node %d u%016llx c%016llx created\n",
proc->pid, current->pid, node->debug_id,
(u64)node->ptr, (u64)node->cookie);
+
return node;
}
-static int binder_inc_node(struct binder_node *node, int strong, int internal,
- struct list_head *target_list)
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+ struct flat_binder_object *fp)
{
+ struct binder_node *node;
+ struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
+
+ if (!new_node)
+ return NULL;
+ binder_inner_proc_lock(proc);
+ node = binder_init_node_ilocked(proc, new_node, fp);
+ binder_inner_proc_unlock(proc);
+ if (node != new_node)
+ /*
+ * The node was already added by another thread
+ */
+ kfree(new_node);
+
+ return node;
+}
+
+static void binder_free_node(struct binder_node *node)
+{
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+}
+
+static int binder_inc_node_nilocked(struct binder_node *node, int strong,
+ int internal,
+ struct list_head *target_list)
+{
+ struct binder_proc *proc = node->proc;
+
+ assert_spin_locked(&node->lock);
+ if (proc)
+ assert_spin_locked(&proc->inner_lock);
if (strong) {
if (internal) {
if (target_list == NULL &&
@@ -978,8 +1214,8 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
} else
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
- list_del_init(&node->work.entry);
- list_add_tail(&node->work.entry, target_list);
+ binder_dequeue_work_ilocked(&node->work);
+ binder_enqueue_work_ilocked(&node->work, target_list);
}
} else {
if (!internal)
@@ -990,58 +1226,169 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
node->debug_id);
return -EINVAL;
}
- list_add_tail(&node->work.entry, target_list);
+ binder_enqueue_work_ilocked(&node->work, target_list);
}
}
return 0;
}
-static int binder_dec_node(struct binder_node *node, int strong, int internal)
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+ struct list_head *target_list)
+{
+ int ret;
+
+ binder_node_inner_lock(node);
+ ret = binder_inc_node_nilocked(node, strong, internal, target_list);
+ binder_node_inner_unlock(node);
+
+ return ret;
+}
+
+static bool binder_dec_node_nilocked(struct binder_node *node,
+ int strong, int internal)
{
+ struct binder_proc *proc = node->proc;
+
+ assert_spin_locked(&node->lock);
+ if (proc)
+ assert_spin_locked(&proc->inner_lock);
if (strong) {
if (internal)
node->internal_strong_refs--;
else
node->local_strong_refs--;
if (node->local_strong_refs || node->internal_strong_refs)
- return 0;
+ return false;
} else {
if (!internal)
node->local_weak_refs--;
- if (node->local_weak_refs || !hlist_empty(&node->refs))
- return 0;
+ if (node->local_weak_refs || node->tmp_refs ||
+ !hlist_empty(&node->refs))
+ return false;
}
- if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+
+ if (proc && (node->has_strong_ref || node->has_weak_ref)) {
if (list_empty(&node->work.entry)) {
- list_add_tail(&node->work.entry, &node->proc->todo);
- wake_up_interruptible(&node->proc->wait);
+ binder_enqueue_work_ilocked(&node->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
}
} else {
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
- !node->local_weak_refs) {
- list_del_init(&node->work.entry);
- if (node->proc) {
- rb_erase(&node->rb_node, &node->proc->nodes);
+ !node->local_weak_refs && !node->tmp_refs) {
+ if (proc) {
+ binder_dequeue_work_ilocked(&node->work);
+ rb_erase(&node->rb_node, &proc->nodes);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"refless node %d deleted\n",
node->debug_id);
} else {
+ BUG_ON(!list_empty(&node->work.entry));
+ spin_lock(&binder_dead_nodes_lock);
+ /*
+ * tmp_refs could have changed so
+ * check it again
+ */
+ if (node->tmp_refs) {
+ spin_unlock(&binder_dead_nodes_lock);
+ return false;
+ }
hlist_del(&node->dead_node);
+ spin_unlock(&binder_dead_nodes_lock);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"dead node %d deleted\n",
node->debug_id);
}
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
+ return true;
}
}
+ return false;
+}
- return 0;
+static void binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+ bool free_node;
+
+ binder_node_inner_lock(node);
+ free_node = binder_dec_node_nilocked(node, strong, internal);
+ binder_node_inner_unlock(node);
+ if (free_node)
+ binder_free_node(node);
+}
+
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
+{
+ /*
+ * No call to binder_inc_node() is needed since we
+ * don't need to inform userspace of any changes to
+ * tmp_refs
+ */
+ node->tmp_refs++;
+}
+
+/**
+ * binder_inc_node_tmpref() - take a temporary reference on node
+ * @node: node to reference
+ *
+ * Take reference on node to prevent the node from being freed
+ * while referenced only by a local variable. The inner lock is
+ * needed to serialize with the node work on the queue (which
+ * isn't needed after the node is dead). If the node is dead
+ * (node->proc is NULL), use binder_dead_nodes_lock to protect
+ * node->tmp_refs against dead-node-only cases where the node
+ * lock cannot be acquired (eg traversing the dead node list to
+ * print nodes)
+ */
+static void binder_inc_node_tmpref(struct binder_node *node)
+{
+ binder_node_lock(node);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+ else
+ spin_lock(&binder_dead_nodes_lock);
+ binder_inc_node_tmpref_ilocked(node);
+ if (node->proc)
+ binder_inner_proc_unlock(node->proc);
+ else
+ spin_unlock(&binder_dead_nodes_lock);
+ binder_node_unlock(node);
+}
+
+/**
+ * binder_dec_node_tmpref() - remove a temporary reference on node
+ * @node: node to reference
+ *
+ * Release temporary reference on node taken via binder_inc_node_tmpref()
+ */
+static void binder_dec_node_tmpref(struct binder_node *node)
+{
+ bool free_node;
+
+ binder_node_inner_lock(node);
+ if (!node->proc)
+ spin_lock(&binder_dead_nodes_lock);
+ node->tmp_refs--;
+ BUG_ON(node->tmp_refs < 0);
+ if (!node->proc)
+ spin_unlock(&binder_dead_nodes_lock);
+ /*
+ * Call binder_dec_node() to check if all refcounts are 0
+ * and cleanup is needed. Calling with strong=0 and internal=1
+ * causes no actual reference to be released in binder_dec_node().
+ * If that changes, a change is needed here too.
+ */
+ free_node = binder_dec_node_nilocked(node, 0, 1);
+ binder_node_inner_unlock(node);
+ if (free_node)
+ binder_free_node(node);
}
+static void binder_put_node(struct binder_node *node)
+{
+ binder_dec_node_tmpref(node);
+}
-static struct binder_ref *binder_get_ref(struct binder_proc *proc,
- u32 desc, bool need_strong_ref)
+static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
+ u32 desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
@@ -1049,11 +1396,11 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (desc < ref->desc) {
+ if (desc < ref->data.desc) {
n = n->rb_left;
- } else if (desc > ref->desc) {
+ } else if (desc > ref->data.desc) {
n = n->rb_right;
- } else if (need_strong_ref && !ref->strong) {
+ } else if (need_strong_ref && !ref->data.strong) {
binder_user_error("tried to use weak ref as strong ref\n");
return NULL;
} else {
@@ -1063,14 +1410,34 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
return NULL;
}
-static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
- struct binder_node *node)
+/**
+ * binder_get_ref_for_node_olocked() - get the ref associated with given node
+ * @proc: binder_proc that owns the ref
+ * @node: binder_node of target
+ * @new_ref: newly allocated binder_ref to be initialized or %NULL
+ *
+ * Look up the ref for the given node and return it if it exists
+ *
+ * If it doesn't exist and the caller provides a newly allocated
+ * ref, initialize the fields of the newly allocated ref and insert
+ * into the given proc rb_trees and node refs list.
+ *
+ * Return: the ref for node. It is possible that another thread
+ * allocated/initialized the ref first in which case the
+ * returned ref would be different than the passed-in
+ * new_ref. new_ref must be kfree'd by the caller in
+ * this case.
+ */
+static struct binder_ref *binder_get_ref_for_node_olocked(
+ struct binder_proc *proc,
+ struct binder_node *node,
+ struct binder_ref *new_ref)
{
- struct rb_node *n;
+ struct binder_context *context = proc->context;
struct rb_node **p = &proc->refs_by_node.rb_node;
struct rb_node *parent = NULL;
- struct binder_ref *ref, *new_ref;
- struct binder_context *context = proc->context;
+ struct binder_ref *ref;
+ struct rb_node *n;
while (*p) {
parent = *p;
@@ -1083,22 +1450,22 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
else
return ref;
}
- new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
- if (new_ref == NULL)
+ if (!new_ref)
return NULL;
+
binder_stats_created(BINDER_STAT_REF);
- new_ref->debug_id = ++binder_last_id;
+ new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
new_ref->proc = proc;
new_ref->node = node;
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
+ new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (ref->desc > new_ref->desc)
+ if (ref->data.desc > new_ref->data.desc)
break;
- new_ref->desc = ref->desc + 1;
+ new_ref->data.desc = ref->data.desc + 1;
}
p = &proc->refs_by_desc.rb_node;
@@ -1106,121 +1473,423 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
- if (new_ref->desc < ref->desc)
+ if (new_ref->data.desc < ref->data.desc)
p = &(*p)->rb_left;
- else if (new_ref->desc > ref->desc)
+ else if (new_ref->data.desc > ref->data.desc)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new_ref->rb_node_desc, parent, p);
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
- if (node) {
- hlist_add_head(&new_ref->node_entry, &node->refs);
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d new ref %d desc %d for node %d\n",
- proc->pid, new_ref->debug_id, new_ref->desc,
- node->debug_id);
- } else {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d new ref %d desc %d for dead node\n",
- proc->pid, new_ref->debug_id, new_ref->desc);
- }
+ binder_node_lock(node);
+ hlist_add_head(&new_ref->node_entry, &node->refs);
+
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d new ref %d desc %d for node %d\n",
+ proc->pid, new_ref->data.debug_id, new_ref->data.desc,
+ node->debug_id);
+ binder_node_unlock(node);
return new_ref;
}
-static void binder_delete_ref(struct binder_ref *ref)
+static void binder_cleanup_ref_olocked(struct binder_ref *ref)
{
+ bool delete_node = false;
+
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d delete ref %d desc %d for node %d\n",
- ref->proc->pid, ref->debug_id, ref->desc,
+ ref->proc->pid, ref->data.debug_id, ref->data.desc,
ref->node->debug_id);
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
- if (ref->strong)
- binder_dec_node(ref->node, 1, 1);
+
+ binder_node_inner_lock(ref->node);
+ if (ref->data.strong)
+ binder_dec_node_nilocked(ref->node, 1, 1);
+
hlist_del(&ref->node_entry);
- binder_dec_node(ref->node, 0, 1);
+ delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
+ binder_node_inner_unlock(ref->node);
+ /*
+ * Clear ref->node unless we want the caller to free the node
+ */
+ if (!delete_node) {
+ /*
+ * The caller uses ref->node to determine
+ * whether the node needs to be freed. Clear
+ * it since the node is still alive.
+ */
+ ref->node = NULL;
+ }
+
if (ref->death) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%d delete ref %d desc %d has death notification\n",
- ref->proc->pid, ref->debug_id, ref->desc);
- list_del(&ref->death->work.entry);
- kfree(ref->death);
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc);
+ binder_dequeue_work(ref->proc, &ref->death->work);
binder_stats_deleted(BINDER_STAT_DEATH);
}
- kfree(ref);
binder_stats_deleted(BINDER_STAT_REF);
}
-static int binder_inc_ref(struct binder_ref *ref, int strong,
- struct list_head *target_list)
+/**
+ * binder_inc_ref_olocked() - increment the ref for given handle
+ * @ref: ref to be incremented
+ * @strong: if true, strong increment, else weak
+ * @target_list: list to queue node work on
+ *
+ * Increment the ref. @ref->proc->outer_lock must be held on entry
+ *
+ * Return: 0, if successful, else errno
+ */
+static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
+ struct list_head *target_list)
{
int ret;
if (strong) {
- if (ref->strong == 0) {
+ if (ref->data.strong == 0) {
ret = binder_inc_node(ref->node, 1, 1, target_list);
if (ret)
return ret;
}
- ref->strong++;
+ ref->data.strong++;
} else {
- if (ref->weak == 0) {
+ if (ref->data.weak == 0) {
ret = binder_inc_node(ref->node, 0, 1, target_list);
if (ret)
return ret;
}
- ref->weak++;
+ ref->data.weak++;
}
return 0;
}
-
-static int binder_dec_ref(struct binder_ref *ref, int strong)
+/**
+ * binder_dec_ref() - dec the ref for given handle
+ * @ref: ref to be decremented
+ * @strong: if true, strong decrement, else weak
+ *
+ * Decrement the ref.
+ *
+ * Return: true if ref is cleaned up and ready to be freed
+ */
+static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
{
if (strong) {
- if (ref->strong == 0) {
+ if (ref->data.strong == 0) {
binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
- ref->proc->pid, ref->debug_id,
- ref->desc, ref->strong, ref->weak);
- return -EINVAL;
- }
- ref->strong--;
- if (ref->strong == 0) {
- int ret;
-
- ret = binder_dec_node(ref->node, strong, 1);
- if (ret)
- return ret;
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak);
+ return false;
}
+ ref->data.strong--;
+ if (ref->data.strong == 0)
+ binder_dec_node(ref->node, strong, 1);
} else {
- if (ref->weak == 0) {
+ if (ref->data.weak == 0) {
binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
- ref->proc->pid, ref->debug_id,
- ref->desc, ref->strong, ref->weak);
- return -EINVAL;
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak);
+ return false;
}
- ref->weak--;
+ ref->data.weak--;
}
- if (ref->strong == 0 && ref->weak == 0)
- binder_delete_ref(ref);
- return 0;
+ if (ref->data.strong == 0 && ref->data.weak == 0) {
+ binder_cleanup_ref_olocked(ref);
+ return true;
+ }
+ return false;
}
-static void binder_pop_transaction(struct binder_thread *target_thread,
- struct binder_transaction *t)
+/**
+ * binder_get_node_from_ref() - get the node from the given proc/desc
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @need_strong_ref: if true, only return node if ref is strong
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, return the associated binder_node
+ *
+ * Return: a binder_node or NULL if not found or not strong when strong required
+ */
+static struct binder_node *binder_get_node_from_ref(
+ struct binder_proc *proc,
+ u32 desc, bool need_strong_ref,
+ struct binder_ref_data *rdata)
{
- if (target_thread) {
- BUG_ON(target_thread->transaction_stack != t);
- BUG_ON(target_thread->transaction_stack->from != target_thread);
- target_thread->transaction_stack =
- target_thread->transaction_stack->from_parent;
- t->from = NULL;
+ struct binder_node *node;
+ struct binder_ref *ref;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
+ if (!ref)
+ goto err_no_ref;
+ node = ref->node;
+ /*
+ * Take an implicit reference on the node to ensure
+ * it stays alive until the call to binder_put_node()
+ */
+ binder_inc_node_tmpref(node);
+ if (rdata)
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+
+ return node;
+
+err_no_ref:
+ binder_proc_unlock(proc);
+ return NULL;
+}
+
+/**
+ * binder_free_ref() - free the binder_ref
+ * @ref: ref to free
+ *
+ * Free the binder_ref. Free the binder_node indicated by ref->node
+ * (if non-NULL) and the binder_ref_death indicated by ref->death.
+ */
+static void binder_free_ref(struct binder_ref *ref)
+{
+ if (ref->node)
+ binder_free_node(ref->node);
+ kfree(ref->death);
+ kfree(ref);
+}
+
+/**
+ * binder_update_ref_for_handle() - inc/dec the ref for given handle
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @increment: true=inc reference, false=dec reference
+ * @strong: true=strong reference, false=weak reference
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, increment or decrement the ref
+ * according to "increment" arg.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_update_ref_for_handle(struct binder_proc *proc,
+ uint32_t desc, bool increment, bool strong,
+ struct binder_ref_data *rdata)
+{
+ int ret = 0;
+ struct binder_ref *ref;
+ bool delete_ref = false;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, desc, strong);
+ if (!ref) {
+ ret = -EINVAL;
+ goto err_no_ref;
+ }
+ if (increment)
+ ret = binder_inc_ref_olocked(ref, strong, NULL);
+ else
+ delete_ref = binder_dec_ref_olocked(ref, strong);
+
+ if (rdata)
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+
+ if (delete_ref)
+ binder_free_ref(ref);
+ return ret;
+
+err_no_ref:
+ binder_proc_unlock(proc);
+ return ret;
+}
+
+/**
+ * binder_dec_ref_for_handle() - dec the ref for given handle
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @strong: true=strong reference, false=weak reference
+ * @rdata: the id/refcount data for the ref
+ *
+ * Just calls binder_update_ref_for_handle() to decrement the ref.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_dec_ref_for_handle(struct binder_proc *proc,
+ uint32_t desc, bool strong, struct binder_ref_data *rdata)
+{
+ return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
+}
+
+
+/**
+ * binder_inc_ref_for_node() - increment the ref for given proc/node
+ * @proc: proc containing the ref
+ * @node: target node
+ * @strong: true=strong reference, false=weak reference
+ * @target_list: worklist to use if node is incremented
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and node, increment the ref. Create the ref if it
+ * doesn't already exist
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_inc_ref_for_node(struct binder_proc *proc,
+ struct binder_node *node,
+ bool strong,
+ struct list_head *target_list,
+ struct binder_ref_data *rdata)
+{
+ struct binder_ref *ref;
+ struct binder_ref *new_ref = NULL;
+ int ret = 0;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_for_node_olocked(proc, node, NULL);
+ if (!ref) {
+ binder_proc_unlock(proc);
+ new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!new_ref)
+ return -ENOMEM;
+ binder_proc_lock(proc);
+ ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
+ }
+ ret = binder_inc_ref_olocked(ref, strong, target_list);
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+ if (new_ref && ref != new_ref)
+ /*
+ * Another thread created the ref first so
+ * free the one we allocated
+ */
+ kfree(new_ref);
+ return ret;
+}
+
+static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
+ struct binder_transaction *t)
+{
+ BUG_ON(!target_thread);
+ assert_spin_locked(&target_thread->proc->inner_lock);
+ BUG_ON(target_thread->transaction_stack != t);
+ BUG_ON(target_thread->transaction_stack->from != target_thread);
+ target_thread->transaction_stack =
+ target_thread->transaction_stack->from_parent;
+ t->from = NULL;
+}
+
+/**
+ * binder_thread_dec_tmpref() - decrement thread->tmp_ref
+ * @thread: thread to decrement
+ *
+ * A thread needs to be kept alive while being used to create or
+ * handle a transaction. binder_get_txn_from() is used to safely
+ * extract t->from from a binder_transaction and keep the thread
+ * indicated by t->from from being freed. When done with that
+ * binder_thread, this function is called to decrement the
+ * tmp_ref and free if appropriate (thread has been released
+ * and no transaction being processed by the driver)
+ */
+static void binder_thread_dec_tmpref(struct binder_thread *thread)
+{
+ /*
+ * atomic is used to protect the counter value while
+ * it cannot reach zero or thread->is_dead is false
+ */
+ binder_inner_proc_lock(thread->proc);
+ atomic_dec(&thread->tmp_ref);
+ if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
+ binder_inner_proc_unlock(thread->proc);
+ binder_free_thread(thread);
+ return;
+ }
+ binder_inner_proc_unlock(thread->proc);
+}
+
+/**
+ * binder_proc_dec_tmpref() - decrement proc->tmp_ref
+ * @proc: proc to decrement
+ *
+ * A binder_proc needs to be kept alive while being used to create or
+ * handle a transaction. proc->tmp_ref is incremented when
+ * creating a new transaction or the binder_proc is currently in-use
+ * by threads that are being released. When done with the binder_proc,
+ * this function is called to decrement the counter and free the
+ * proc if appropriate (proc has been released, all threads have
+ * been released and not currenly in-use to process a transaction).
+ */
+static void binder_proc_dec_tmpref(struct binder_proc *proc)
+{
+ binder_inner_proc_lock(proc);
+ proc->tmp_ref--;
+ if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
+ !proc->tmp_ref) {
+ binder_inner_proc_unlock(proc);
+ binder_free_proc(proc);
+ return;
+ }
+ binder_inner_proc_unlock(proc);
+}
+
+/**
+ * binder_get_txn_from() - safely extract the "from" thread in transaction
+ * @t: binder transaction for t->from
+ *
+ * Atomically return the "from" thread and increment the tmp_ref
+ * count for the thread to ensure it stays alive until
+ * binder_thread_dec_tmpref() is called.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from(
+ struct binder_transaction *t)
+{
+ struct binder_thread *from;
+
+ spin_lock(&t->lock);
+ from = t->from;
+ if (from)
+ atomic_inc(&from->tmp_ref);
+ spin_unlock(&t->lock);
+ return from;
+}
+
+/**
+ * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
+ * @t: binder transaction for t->from
+ *
+ * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
+ * to guarantee that the thread cannot be released while operating on it.
+ * The caller must call binder_inner_proc_unlock() to release the inner lock
+ * as well as call binder_dec_thread_txn() to release the reference.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from_and_acq_inner(
+ struct binder_transaction *t)
+{
+ struct binder_thread *from;
+
+ from = binder_get_txn_from(t);
+ if (!from)
+ return NULL;
+ binder_inner_proc_lock(from->proc);
+ if (t->from) {
+ BUG_ON(from != t->from);
+ return from;
}
- t->need_reply = 0;
+ binder_inner_proc_unlock(from->proc);
+ binder_thread_dec_tmpref(from);
+ return NULL;
+}
+
+static void binder_free_transaction(struct binder_transaction *t)
+{
if (t->buffer)
t->buffer->transaction = NULL;
kfree(t);
@@ -1235,30 +1904,28 @@ static void binder_send_failed_reply(struct binder_transaction *t,
BUG_ON(t->flags & TF_ONE_WAY);
while (1) {
- target_thread = t->from;
+ target_thread = binder_get_txn_from_and_acq_inner(t);
if (target_thread) {
- if (target_thread->return_error != BR_OK &&
- target_thread->return_error2 == BR_OK) {
- target_thread->return_error2 =
- target_thread->return_error;
- target_thread->return_error = BR_OK;
- }
- if (target_thread->return_error == BR_OK) {
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "send failed reply for transaction %d to %d:%d\n",
- t->debug_id,
- target_thread->proc->pid,
- target_thread->pid);
-
- binder_pop_transaction(target_thread, t);
- target_thread->return_error = error_code;
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "send failed reply for transaction %d to %d:%d\n",
+ t->debug_id,
+ target_thread->proc->pid,
+ target_thread->pid);
+
+ binder_pop_transaction_ilocked(target_thread, t);
+ if (target_thread->reply_error.cmd == BR_OK) {
+ target_thread->reply_error.cmd = error_code;
+ binder_enqueue_work_ilocked(
+ &target_thread->reply_error.work,
+ &target_thread->todo);
wake_up_interruptible(&target_thread->wait);
} else {
- pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
- target_thread->proc->pid,
- target_thread->pid,
- target_thread->return_error);
+ WARN(1, "Unexpected reply error: %u\n",
+ target_thread->reply_error.cmd);
}
+ binder_inner_proc_unlock(target_thread->proc);
+ binder_thread_dec_tmpref(target_thread);
+ binder_free_transaction(t);
return;
}
next = t->from_parent;
@@ -1267,7 +1934,7 @@ static void binder_send_failed_reply(struct binder_transaction *t,
"send failed reply for transaction %d, target dead\n",
t->debug_id);
- binder_pop_transaction(target_thread, t);
+ binder_free_transaction(t);
if (next == NULL) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"reply failed, no target thread at root\n");
@@ -1476,24 +2143,26 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
node->debug_id, (u64)node->ptr);
binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
0);
+ binder_put_node(node);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct flat_binder_object *fp;
- struct binder_ref *ref;
+ struct binder_ref_data rdata;
+ int ret;
fp = to_flat_binder_object(hdr);
- ref = binder_get_ref(proc, fp->handle,
- hdr->type == BINDER_TYPE_HANDLE);
- if (ref == NULL) {
- pr_err("transaction release %d bad handle %d\n",
- debug_id, fp->handle);
+ ret = binder_dec_ref_for_handle(proc, fp->handle,
+ hdr->type == BINDER_TYPE_HANDLE, &rdata);
+
+ if (ret) {
+ pr_err("transaction release %d bad handle %d, ret = %d\n",
+ debug_id, fp->handle, ret);
break;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, ref->node->debug_id);
- binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
+ " ref %d desc %d\n",
+ rdata.debug_id, rdata.desc);
} break;
case BINDER_TYPE_FD: {
@@ -1532,7 +2201,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
* back to kernel address space to access it
*/
parent_buffer = parent->buffer -
- proc->user_buffer_offset;
+ binder_alloc_get_user_buffer_offset(
+ &proc->alloc);
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -1564,102 +2234,122 @@ static int binder_translate_binder(struct flat_binder_object *fp,
struct binder_thread *thread)
{
struct binder_node *node;
- struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_ref_data rdata;
+ int ret = 0;
node = binder_get_node(proc, fp->binder);
if (!node) {
- node = binder_new_node(proc, fp->binder, fp->cookie);
+ node = binder_new_node(proc, fp);
if (!node)
return -ENOMEM;
-
- node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid, (u64)fp->binder,
node->debug_id, (u64)fp->cookie,
(u64)node->cookie);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
+ }
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ ret = -EPERM;
+ goto done;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
- return -EPERM;
- ref = binder_get_ref_for_node(target_proc, node);
- if (!ref)
- return -EINVAL;
+ ret = binder_inc_ref_for_node(target_proc, node,
+ fp->hdr.type == BINDER_TYPE_BINDER,
+ &thread->todo, &rdata);
+ if (ret)
+ goto done;
if (fp->hdr.type == BINDER_TYPE_BINDER)
fp->hdr.type = BINDER_TYPE_HANDLE;
else
fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
fp->binder = 0;
- fp->handle = ref->desc;
+ fp->handle = rdata.desc;
fp->cookie = 0;
- binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
- trace_binder_transaction_node_to_ref(t, node, ref);
+ trace_binder_transaction_node_to_ref(t, node, &rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx -> ref %d desc %d\n",
node->debug_id, (u64)node->ptr,
- ref->debug_id, ref->desc);
-
- return 0;
+ rdata.debug_id, rdata.desc);
+done:
+ binder_put_node(node);
+ return ret;
}
static int binder_translate_handle(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
{
- struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_node *node;
+ struct binder_ref_data src_rdata;
+ int ret = 0;
- ref = binder_get_ref(proc, fp->handle,
- fp->hdr.type == BINDER_TYPE_HANDLE);
- if (!ref) {
+ node = binder_get_node_from_ref(proc, fp->handle,
+ fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
+ if (!node) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
proc->pid, thread->pid, fp->handle);
return -EINVAL;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
- return -EPERM;
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ ret = -EPERM;
+ goto done;
+ }
- if (ref->node->proc == target_proc) {
+ binder_node_lock(node);
+ if (node->proc == target_proc) {
if (fp->hdr.type == BINDER_TYPE_HANDLE)
fp->hdr.type = BINDER_TYPE_BINDER;
else
fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
- fp->binder = ref->node->ptr;
- fp->cookie = ref->node->cookie;
- binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
- 0, NULL);
- trace_binder_transaction_ref_to_node(t, ref);
+ fp->binder = node->ptr;
+ fp->cookie = node->cookie;
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+ binder_inc_node_nilocked(node,
+ fp->hdr.type == BINDER_TYPE_BINDER,
+ 0, NULL);
+ if (node->proc)
+ binder_inner_proc_unlock(node->proc);
+ trace_binder_transaction_ref_to_node(t, node, &src_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%016llx\n",
- ref->debug_id, ref->desc, ref->node->debug_id,
- (u64)ref->node->ptr);
+ src_rdata.debug_id, src_rdata.desc, node->debug_id,
+ (u64)node->ptr);
+ binder_node_unlock(node);
} else {
- struct binder_ref *new_ref;
+ int ret;
+ struct binder_ref_data dest_rdata;
- new_ref = binder_get_ref_for_node(target_proc, ref->node);
- if (!new_ref)
- return -EINVAL;
+ binder_node_unlock(node);
+ ret = binder_inc_ref_for_node(target_proc, node,
+ fp->hdr.type == BINDER_TYPE_HANDLE,
+ NULL, &dest_rdata);
+ if (ret)
+ goto done;
fp->binder = 0;
- fp->handle = new_ref->desc;
+ fp->handle = dest_rdata.desc;
fp->cookie = 0;
- binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
- NULL);
- trace_binder_transaction_ref_to_ref(t, ref, new_ref);
+ trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
+ &dest_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, new_ref->debug_id,
- new_ref->desc, ref->node->debug_id);
+ src_rdata.debug_id, src_rdata.desc,
+ dest_rdata.debug_id, dest_rdata.desc,
+ node->debug_id);
}
- return 0;
+done:
+ binder_put_node(node);
+ return ret;
}
static int binder_translate_fd(int fd,
@@ -1750,7 +2440,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
* Since the parent was already fixed up, convert it
* back to the kernel address space to access it
*/
- parent_buffer = parent->buffer - target_proc->user_buffer_offset;
+ parent_buffer = parent->buffer -
+ binder_alloc_get_user_buffer_offset(&target_proc->alloc);
fd_array = (u32 *)(parent_buffer + fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
@@ -1818,12 +2509,80 @@ static int binder_fixup_parent(struct binder_transaction *t,
return -EINVAL;
}
parent_buffer = (u8 *)(parent->buffer -
- target_proc->user_buffer_offset);
+ binder_alloc_get_user_buffer_offset(
+ &target_proc->alloc));
*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
return 0;
}
+/**
+ * binder_proc_transaction() - sends a transaction to a process and wakes it up
+ * @t: transaction to send
+ * @proc: process to send the transaction to
+ * @thread: thread in @proc to send the transaction to (may be NULL)
+ *
+ * This function queues a transaction to the specified process. It will try
+ * to find a thread in the target process to handle the transaction and
+ * wake it up. If no thread is found, the work is queued to the proc
+ * waitqueue.
+ *
+ * If the @thread parameter is not NULL, the transaction is always queued
+ * to the waitlist of that specific thread.
+ *
+ * Return: true if the transactions was successfully queued
+ * false if the target process or thread is dead
+ */
+static bool binder_proc_transaction(struct binder_transaction *t,
+ struct binder_proc *proc,
+ struct binder_thread *thread)
+{
+ struct list_head *target_list = NULL;
+ struct binder_node *node = t->buffer->target_node;
+ bool oneway = !!(t->flags & TF_ONE_WAY);
+ bool wakeup = true;
+
+ BUG_ON(!node);
+ binder_node_lock(node);
+ if (oneway) {
+ BUG_ON(thread);
+ if (node->has_async_transaction) {
+ target_list = &node->async_todo;
+ wakeup = false;
+ } else {
+ node->has_async_transaction = 1;
+ }
+ }
+
+ binder_inner_proc_lock(proc);
+
+ if (proc->is_dead || (thread && thread->is_dead)) {
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+ return false;
+ }
+
+ if (!thread && !target_list)
+ thread = binder_select_thread_ilocked(proc);
+
+ if (thread)
+ target_list = &thread->todo;
+ else if (!target_list)
+ target_list = &proc->todo;
+ else
+ BUG_ON(target_list != &node->async_todo);
+
+ binder_enqueue_work_ilocked(&t->work, target_list);
+
+ if (wakeup)
+ binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
+
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+
+ return true;
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -1835,19 +2594,21 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
u8 *sg_bufp, *sg_buf_end;
- struct binder_proc *target_proc;
+ struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
- struct list_head *target_list;
- wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
- uint32_t return_error;
+ uint32_t return_error = 0;
+ uint32_t return_error_param = 0;
+ uint32_t return_error_line = 0;
struct binder_buffer_object *last_fixup_obj = NULL;
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
+ int t_debug_id = atomic_inc_return(&binder_last_id);
e = binder_transaction_log_add(&binder_transaction_log);
+ e->debug_id = t_debug_id;
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
@@ -1857,29 +2618,40 @@ static void binder_transaction(struct binder_proc *proc,
e->context_name = proc->context->name;
if (reply) {
+ binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
+ binder_inner_proc_unlock(proc);
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
goto err_empty_call_stack;
}
- binder_set_nice(in_reply_to->saved_priority);
if (in_reply_to->to_thread != thread) {
+ spin_lock(&in_reply_to->lock);
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
+ spin_unlock(&in_reply_to->lock);
+ binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
in_reply_to = NULL;
goto err_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
- target_thread = in_reply_to->from;
+ binder_inner_proc_unlock(proc);
+ binder_set_nice(in_reply_to->saved_priority);
+ target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
@@ -1888,89 +2660,137 @@ static void binder_transaction(struct binder_proc *proc,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
+ binder_inner_proc_unlock(target_thread->proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
target_proc = target_thread->proc;
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_thread->proc);
} else {
if (tr->target.handle) {
struct binder_ref *ref;
- ref = binder_get_ref(proc, tr->target.handle, true);
- if (ref == NULL) {
+ /*
+ * There must already be a strong ref
+ * on this node. If so, do a strong
+ * increment on the node to ensure it
+ * stays alive until the transaction is
+ * done.
+ */
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, tr->target.handle,
+ true);
+ if (ref) {
+ binder_inc_node(ref->node, 1, 0, NULL);
+ target_node = ref->node;
+ }
+ binder_proc_unlock(proc);
+ if (target_node == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_invalid_target_handle;
}
- target_node = ref->node;
} else {
+ mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
+ mutex_unlock(&context->context_mgr_node_lock);
+ return_error_line = __LINE__;
goto err_no_context_mgr_node;
}
+ binder_inc_node(target_node, 1, 0, NULL);
+ mutex_unlock(&context->context_mgr_node_lock);
}
e->to_node = target_node->debug_id;
+ binder_node_lock(target_node);
target_proc = target_node->proc;
if (target_proc == NULL) {
+ binder_node_unlock(target_node);
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
+ binder_inner_proc_lock(target_proc);
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_proc);
+ binder_node_unlock(target_node);
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPERM;
+ return_error_line = __LINE__;
goto err_invalid_target_handle;
}
+ binder_inner_proc_lock(proc);
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
+ spin_lock(&tmp->lock);
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
+ spin_unlock(&tmp->lock);
+ binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
goto err_bad_call_stack;
}
while (tmp) {
- if (tmp->from && tmp->from->proc == target_proc)
- target_thread = tmp->from;
+ struct binder_thread *from;
+
+ spin_lock(&tmp->lock);
+ from = tmp->from;
+ if (from && from->proc == target_proc) {
+ atomic_inc(&from->tmp_ref);
+ target_thread = from;
+ spin_unlock(&tmp->lock);
+ break;
+ }
+ spin_unlock(&tmp->lock);
tmp = tmp->from_parent;
}
}
+ binder_inner_proc_unlock(proc);
}
- if (target_thread) {
+ if (target_thread)
e->to_thread = target_thread->pid;
- target_list = &target_thread->todo;
- target_wait = &target_thread->wait;
- } else {
- target_list = &target_proc->todo;
- target_wait = &target_proc->wait;
- }
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
+ spin_lock_init(&t->lock);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
- t->debug_id = ++binder_last_id;
- e->debug_id = t->debug_id;
+ t->debug_id = t_debug_id;
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -2004,11 +2824,18 @@ static void binder_transaction(struct binder_proc *proc,
trace_binder_transaction(reply, t, target_node);
- t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+ t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY));
- if (t->buffer == NULL) {
- return_error = BR_FAILED_REPLY;
+ if (IS_ERR(t->buffer)) {
+ /*
+ * -ESRCH indicates VMA cleared. The target is dying.
+ */
+ return_error_param = PTR_ERR(t->buffer);
+ return_error = return_error_param == -ESRCH ?
+ BR_DEAD_REPLY : BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
@@ -2016,9 +2843,6 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
- if (target_node)
- binder_inc_node(target_node, 1, 0, NULL);
-
off_start = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
offp = off_start;
@@ -2028,6 +2852,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
@@ -2035,12 +2861,16 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
proc->pid, thread->pid, (u64)tr->offsets_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
@@ -2048,6 +2878,8 @@ static void binder_transaction(struct binder_proc *proc,
proc->pid, thread->pid,
(u64)extra_buffers_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
off_end = (void *)off_start + tr->offsets_size;
@@ -2064,6 +2896,8 @@ static void binder_transaction(struct binder_proc *proc,
(u64)off_min,
(u64)t->buffer->data_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
@@ -2078,6 +2912,8 @@ static void binder_transaction(struct binder_proc *proc,
ret = binder_translate_binder(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
@@ -2089,6 +2925,8 @@ static void binder_transaction(struct binder_proc *proc,
ret = binder_translate_handle(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
@@ -2100,6 +2938,8 @@ static void binder_transaction(struct binder_proc *proc,
if (target_fd < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = target_fd;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
fp->pad_binder = 0;
@@ -2116,6 +2956,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_parent;
}
if (!binder_validate_fixup(t->buffer, off_start,
@@ -2125,12 +2967,16 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_parent;
}
ret = binder_translate_fd_array(fda, parent, t, thread,
in_reply_to);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = parent;
@@ -2146,6 +2992,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with too large buffer\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
if (copy_from_user(sg_bufp,
@@ -2153,12 +3001,15 @@ static void binder_transaction(struct binder_proc *proc,
bp->buffer, bp->length)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
+ return_error_param = -EFAULT;
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)sg_bufp +
- target_proc->user_buffer_offset;
+ binder_alloc_get_user_buffer_offset(
+ &target_proc->alloc);
sg_bufp += ALIGN(bp->length, sizeof(u64));
ret = binder_fixup_parent(t, thread, bp, off_start,
@@ -2167,6 +3018,8 @@ static void binder_transaction(struct binder_proc *proc,
last_fixup_min_off);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = bp;
@@ -2176,34 +3029,60 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, hdr->type);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_object_type;
}
}
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ binder_enqueue_work(proc, tcomplete, &thread->todo);
+ t->work.type = BINDER_WORK_TRANSACTION;
+
if (reply) {
+ binder_inner_proc_lock(target_proc);
+ if (target_thread->is_dead) {
+ binder_inner_proc_unlock(target_proc);
+ goto err_dead_proc_or_thread;
+ }
BUG_ON(t->buffer->async_transaction != 0);
- binder_pop_transaction(target_thread, in_reply_to);
+ binder_pop_transaction_ilocked(target_thread, in_reply_to);
+ binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
+ binder_inner_proc_unlock(target_proc);
+ wake_up_interruptible_sync(&target_thread->wait);
+ binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
+ binder_inner_proc_lock(proc);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
+ binder_inner_proc_unlock(proc);
+ if (!binder_proc_transaction(t, target_proc, target_thread)) {
+ binder_inner_proc_lock(proc);
+ binder_pop_transaction_ilocked(thread, t);
+ binder_inner_proc_unlock(proc);
+ goto err_dead_proc_or_thread;
+ }
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
- if (target_node->has_async_transaction) {
- target_list = &target_node->async_todo;
- target_wait = NULL;
- } else
- target_node->has_async_transaction = 1;
+ if (!binder_proc_transaction(t, target_proc, NULL))
+ goto err_dead_proc_or_thread;
}
- t->work.type = BINDER_WORK_TRANSACTION;
- list_add_tail(&t->work.entry, target_list);
- tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- list_add_tail(&tcomplete->entry, &thread->todo);
- if (target_wait)
- wake_up_interruptible(target_wait);
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ binder_proc_dec_tmpref(target_proc);
+ /*
+ * write barrier to synchronize with initialization
+ * of log entry
+ */
+ smp_wmb();
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
return;
+err_dead_proc_or_thread:
+ return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
err_translate_failed:
err_bad_object_type:
err_bad_offset:
@@ -2211,8 +3090,9 @@ err_bad_parent:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ target_node = NULL;
t->buffer->transaction = NULL;
- binder_free_buf(target_proc, t->buffer);
+ binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
@@ -2225,24 +3105,49 @@ err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ if (target_proc)
+ binder_proc_dec_tmpref(target_proc);
+ if (target_node)
+ binder_dec_node(target_node, 1, 0);
+
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction failed %d, size %lld-%lld\n",
- proc->pid, thread->pid, return_error,
- (u64)tr->data_size, (u64)tr->offsets_size);
+ "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
+ proc->pid, thread->pid, return_error, return_error_param,
+ (u64)tr->data_size, (u64)tr->offsets_size,
+ return_error_line);
{
struct binder_transaction_log_entry *fe;
+ e->return_error = return_error;
+ e->return_error_param = return_error_param;
+ e->return_error_line = return_error_line;
fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
+ /*
+ * write barrier to synchronize with initialization
+ * of log entry
+ */
+ smp_wmb();
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
+ WRITE_ONCE(fe->debug_id_done, t_debug_id);
}
- BUG_ON(thread->return_error != BR_OK);
+ BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
- thread->return_error = BR_TRANSACTION_COMPLETE;
+ thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
binder_send_failed_reply(in_reply_to, return_error);
- } else
- thread->return_error = return_error;
+ } else {
+ thread->return_error.cmd = return_error;
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
+ }
}
static int binder_thread_write(struct binder_proc *proc,
@@ -2256,15 +3161,17 @@ static int binder_thread_write(struct binder_proc *proc,
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
- while (ptr < end && thread->return_error == BR_OK) {
+ while (ptr < end && thread->return_error.cmd == BR_OK) {
+ int ret;
+
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
- binder_stats.bc[_IOC_NR(cmd)]++;
- proc->stats.bc[_IOC_NR(cmd)]++;
- thread->stats.bc[_IOC_NR(cmd)]++;
+ atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
+ atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
+ atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
}
switch (cmd) {
case BC_INCREFS:
@@ -2272,53 +3179,61 @@ static int binder_thread_write(struct binder_proc *proc,
case BC_RELEASE:
case BC_DECREFS: {
uint32_t target;
- struct binder_ref *ref;
const char *debug_string;
+ bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
+ bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
+ struct binder_ref_data rdata;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
+
ptr += sizeof(uint32_t);
- if (target == 0 && context->binder_context_mgr_node &&
- (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
- ref = binder_get_ref_for_node(proc,
- context->binder_context_mgr_node);
- if (ref->desc != target) {
- binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
- proc->pid, thread->pid,
- ref->desc);
- }
- } else
- ref = binder_get_ref(proc, target,
- cmd == BC_ACQUIRE ||
- cmd == BC_RELEASE);
- if (ref == NULL) {
- binder_user_error("%d:%d refcount change on invalid ref %d\n",
- proc->pid, thread->pid, target);
- break;
+ ret = -1;
+ if (increment && !target) {
+ struct binder_node *ctx_mgr_node;
+ mutex_lock(&context->context_mgr_node_lock);
+ ctx_mgr_node = context->binder_context_mgr_node;
+ if (ctx_mgr_node)
+ ret = binder_inc_ref_for_node(
+ proc, ctx_mgr_node,
+ strong, NULL, &rdata);
+ mutex_unlock(&context->context_mgr_node_lock);
+ }
+ if (ret)
+ ret = binder_update_ref_for_handle(
+ proc, target, increment, strong,
+ &rdata);
+ if (!ret && rdata.desc != target) {
+ binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
+ proc->pid, thread->pid,
+ target, rdata.desc);
}
switch (cmd) {
case BC_INCREFS:
debug_string = "IncRefs";
- binder_inc_ref(ref, 0, NULL);
break;
case BC_ACQUIRE:
debug_string = "Acquire";
- binder_inc_ref(ref, 1, NULL);
break;
case BC_RELEASE:
debug_string = "Release";
- binder_dec_ref(ref, 1);
break;
case BC_DECREFS:
default:
debug_string = "DecRefs";
- binder_dec_ref(ref, 0);
+ break;
+ }
+ if (ret) {
+ binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
+ proc->pid, thread->pid, debug_string,
+ strong, target, ret);
break;
}
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
- proc->pid, thread->pid, debug_string, ref->debug_id,
- ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+ "%d:%d %s ref %d desc %d s %d w %d\n",
+ proc->pid, thread->pid, debug_string,
+ rdata.debug_id, rdata.desc, rdata.strong,
+ rdata.weak);
break;
}
case BC_INCREFS_DONE:
@@ -2326,6 +3241,7 @@ static int binder_thread_write(struct binder_proc *proc,
binder_uintptr_t node_ptr;
binder_uintptr_t cookie;
struct binder_node *node;
+ bool free_node;
if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
@@ -2350,13 +3266,17 @@ static int binder_thread_write(struct binder_proc *proc,
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
(u64)node_ptr, node->debug_id,
(u64)cookie, (u64)node->cookie);
+ binder_put_node(node);
break;
}
+ binder_node_inner_lock(node);
if (cmd == BC_ACQUIRE_DONE) {
if (node->pending_strong_ref == 0) {
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
proc->pid, thread->pid,
node->debug_id);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
node->pending_strong_ref = 0;
@@ -2365,16 +3285,23 @@ static int binder_thread_write(struct binder_proc *proc,
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
proc->pid, thread->pid,
node->debug_id);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
node->pending_weak_ref = 0;
}
- binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+ free_node = binder_dec_node_nilocked(node,
+ cmd == BC_ACQUIRE_DONE, 0);
+ WARN_ON(free_node);
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s node %d ls %d lw %d\n",
+ "%d:%d %s node %d ls %d lw %d tr %d\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
- node->debug_id, node->local_strong_refs, node->local_weak_refs);
+ node->debug_id, node->local_strong_refs,
+ node->local_weak_refs, node->tmp_refs);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
case BC_ATTEMPT_ACQUIRE:
@@ -2392,7 +3319,8 @@ static int binder_thread_write(struct binder_proc *proc,
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- buffer = binder_buffer_lookup(proc, data_ptr);
+ buffer = binder_alloc_prepare_to_free(&proc->alloc,
+ data_ptr);
if (buffer == NULL) {
binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
proc->pid, thread->pid, (u64)data_ptr);
@@ -2414,15 +3342,27 @@ static int binder_thread_write(struct binder_proc *proc,
buffer->transaction = NULL;
}
if (buffer->async_transaction && buffer->target_node) {
- BUG_ON(!buffer->target_node->has_async_transaction);
- if (list_empty(&buffer->target_node->async_todo))
- buffer->target_node->has_async_transaction = 0;
- else
- list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+ struct binder_node *buf_node;
+ struct binder_work *w;
+
+ buf_node = buffer->target_node;
+ binder_node_inner_lock(buf_node);
+ BUG_ON(!buf_node->has_async_transaction);
+ BUG_ON(buf_node->proc != proc);
+ w = binder_dequeue_work_head_ilocked(
+ &buf_node->async_todo);
+ if (!w) {
+ buf_node->has_async_transaction = 0;
+ } else {
+ binder_enqueue_work_ilocked(
+ w, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ }
+ binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
binder_transaction_buffer_release(proc, buffer, NULL);
- binder_free_buf(proc, buffer);
+ binder_alloc_free_buf(&proc->alloc, buffer);
break;
}
@@ -2453,6 +3393,7 @@ static int binder_thread_write(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
+ binder_inner_proc_lock(proc);
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
@@ -2466,6 +3407,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->requested_threads_started++;
}
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+ binder_inner_proc_unlock(proc);
break;
case BC_ENTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
@@ -2490,7 +3432,7 @@ static int binder_thread_write(struct binder_proc *proc,
uint32_t target;
binder_uintptr_t cookie;
struct binder_ref *ref;
- struct binder_ref_death *death;
+ struct binder_ref_death *death = NULL;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
@@ -2498,7 +3440,29 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- ref = binder_get_ref(proc, target, false);
+ if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+ /*
+ * Allocate memory for death notification
+ * before taking lock
+ */
+ death = kzalloc(sizeof(*death), GFP_KERNEL);
+ if (death == NULL) {
+ WARN_ON(thread->return_error.cmd !=
+ BR_OK);
+ thread->return_error.cmd = BR_ERROR;
+ binder_enqueue_work(
+ thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
+ binder_debug(
+ BINDER_DEBUG_FAILED_TRANSACTION,
+ "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ }
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
@@ -2506,6 +3470,8 @@ static int binder_thread_write(struct binder_proc *proc,
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
target);
+ binder_proc_unlock(proc);
+ kfree(death);
break;
}
@@ -2515,21 +3481,18 @@ static int binder_thread_write(struct binder_proc *proc,
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
- (u64)cookie, ref->debug_id, ref->desc,
- ref->strong, ref->weak, ref->node->debug_id);
+ (u64)cookie, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak, ref->node->debug_id);
+ binder_node_lock(ref->node);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
if (ref->death) {
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
proc->pid, thread->pid);
- break;
- }
- death = kzalloc(sizeof(*death), GFP_KERNEL);
- if (death == NULL) {
- thread->return_error = BR_ERROR;
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
- proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ kfree(death);
break;
}
binder_stats_created(BINDER_STAT_DEATH);
@@ -2538,17 +3501,19 @@ static int binder_thread_write(struct binder_proc *proc,
ref->death = death;
if (ref->node->proc == NULL) {
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&ref->death->work.entry, &thread->todo);
- } else {
- list_add_tail(&ref->death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
- }
+
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(
+ &ref->death->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
}
} else {
if (ref->death == NULL) {
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
break;
}
death = ref->death;
@@ -2557,22 +3522,35 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid,
(u64)death->cookie,
(u64)cookie);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
break;
}
ref->death = NULL;
+ binder_inner_proc_lock(proc);
if (list_empty(&death->work.entry)) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&death->work.entry, &thread->todo);
- } else {
- list_add_tail(&death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &thread->todo);
+ else {
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &proc->todo);
+ binder_wakeup_proc_ilocked(
+ proc);
}
} else {
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
}
+ binder_inner_proc_unlock(proc);
}
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
} break;
case BC_DEAD_BINDER_DONE: {
struct binder_work *w;
@@ -2583,8 +3561,13 @@ static int binder_thread_write(struct binder_proc *proc,
return -EFAULT;
ptr += sizeof(cookie);
- list_for_each_entry(w, &proc->delivered_death, entry) {
- struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+ binder_inner_proc_lock(proc);
+ list_for_each_entry(w, &proc->delivered_death,
+ entry) {
+ struct binder_ref_death *tmp_death =
+ container_of(w,
+ struct binder_ref_death,
+ work);
if (tmp_death->cookie == cookie) {
death = tmp_death;
@@ -2598,19 +3581,25 @@ static int binder_thread_write(struct binder_proc *proc,
if (death == NULL) {
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
proc->pid, thread->pid, (u64)cookie);
+ binder_inner_proc_unlock(proc);
break;
}
-
- list_del_init(&death->work.entry);
+ binder_dequeue_work_ilocked(&death->work);
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&death->work.entry, &thread->todo);
- } else {
- list_add_tail(&death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work_ilocked(
+ &death->work, &thread->todo);
+ else {
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
}
}
+ binder_inner_proc_unlock(proc);
} break;
default:
@@ -2628,23 +3617,79 @@ static void binder_stat_br(struct binder_proc *proc,
{
trace_binder_return(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
- binder_stats.br[_IOC_NR(cmd)]++;
- proc->stats.br[_IOC_NR(cmd)]++;
- thread->stats.br[_IOC_NR(cmd)]++;
+ atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
+ atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
+ atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
}
}
-static int binder_has_proc_work(struct binder_proc *proc,
- struct binder_thread *thread)
+static int binder_has_thread_work(struct binder_thread *thread)
{
- return !list_empty(&proc->todo) ||
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+ return !binder_worklist_empty(thread->proc, &thread->todo) ||
+ thread->looper_need_return;
}
-static int binder_has_thread_work(struct binder_thread *thread)
+static int binder_put_node_cmd(struct binder_proc *proc,
+ struct binder_thread *thread,
+ void __user **ptrp,
+ binder_uintptr_t node_ptr,
+ binder_uintptr_t node_cookie,
+ int node_debug_id,
+ uint32_t cmd, const char *cmd_name)
{
- return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+ void __user *ptr = *ptrp;
+
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+
+ if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+
+ if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
+ proc->pid, thread->pid, cmd_name, node_debug_id,
+ (u64)node_ptr, (u64)node_cookie);
+
+ *ptrp = ptr;
+ return 0;
+}
+
+static int binder_wait_for_work(struct binder_thread *thread,
+ bool do_proc_work)
+{
+ DEFINE_WAIT(wait);
+ struct binder_proc *proc = thread->proc;
+ int ret = 0;
+
+ freezer_do_not_count();
+ binder_inner_proc_lock(proc);
+ for (;;) {
+ prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
+ if (binder_has_work_ilocked(thread, do_proc_work))
+ break;
+ if (do_proc_work)
+ list_add(&thread->waiting_thread_node,
+ &proc->waiting_threads);
+ binder_inner_proc_unlock(proc);
+ schedule();
+ binder_inner_proc_lock(proc);
+ list_del_init(&thread->waiting_thread_node);
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ finish_wait(&thread->wait, &wait);
+ binder_inner_proc_unlock(proc);
+ freezer_count();
+
+ return ret;
}
static int binder_thread_read(struct binder_proc *proc,
@@ -2666,37 +3711,15 @@ static int binder_thread_read(struct binder_proc *proc,
}
retry:
- wait_for_proc_work = thread->transaction_stack == NULL &&
- list_empty(&thread->todo);
-
- if (thread->return_error != BR_OK && ptr < end) {
- if (thread->return_error2 != BR_OK) {
- if (put_user(thread->return_error2, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- binder_stat_br(proc, thread, thread->return_error2);
- if (ptr == end)
- goto done;
- thread->return_error2 = BR_OK;
- }
- if (put_user(thread->return_error, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- binder_stat_br(proc, thread, thread->return_error);
- thread->return_error = BR_OK;
- goto done;
- }
-
+ binder_inner_proc_lock(proc);
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
+ binder_inner_proc_unlock(proc);
thread->looper |= BINDER_LOOPER_STATE_WAITING;
- if (wait_for_proc_work)
- proc->ready_threads++;
-
- binder_unlock(__func__);
trace_binder_wait_for_work(wait_for_proc_work,
!!thread->transaction_stack,
- !list_empty(&thread->todo));
+ !binder_worklist_empty(proc, &thread->todo));
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
@@ -2706,23 +3729,15 @@ retry:
binder_stop_on_user_error < 2);
}
binder_set_nice(proc->default_priority);
- if (non_block) {
- if (!binder_has_proc_work(proc, thread))
- ret = -EAGAIN;
- } else
- ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
- } else {
- if (non_block) {
- if (!binder_has_thread_work(thread))
- ret = -EAGAIN;
- } else
- ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
}
- binder_lock(__func__);
+ if (non_block) {
+ if (!binder_has_work(thread, wait_for_proc_work))
+ ret = -EAGAIN;
+ } else {
+ ret = binder_wait_for_work(thread, wait_for_proc_work);
+ }
- if (wait_for_proc_work)
- proc->ready_threads--;
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
@@ -2731,31 +3746,52 @@ retry:
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
- struct binder_work *w;
+ struct binder_work *w = NULL;
+ struct list_head *list = NULL;
struct binder_transaction *t = NULL;
+ struct binder_thread *t_from;
+
+ binder_inner_proc_lock(proc);
+ if (!binder_worklist_empty_ilocked(&thread->todo))
+ list = &thread->todo;
+ else if (!binder_worklist_empty_ilocked(&proc->todo) &&
+ wait_for_proc_work)
+ list = &proc->todo;
+ else {
+ binder_inner_proc_unlock(proc);
- if (!list_empty(&thread->todo)) {
- w = list_first_entry(&thread->todo, struct binder_work,
- entry);
- } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
- w = list_first_entry(&proc->todo, struct binder_work,
- entry);
- } else {
/* no data added */
- if (ptr - buffer == 4 &&
- !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
+ if (ptr - buffer == 4 && !thread->looper_need_return)
goto retry;
break;
}
- if (end - ptr < sizeof(tr) + 4)
+ if (end - ptr < sizeof(tr) + 4) {
+ binder_inner_proc_unlock(proc);
break;
+ }
+ w = binder_dequeue_work_head_ilocked(list);
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
+ binder_inner_proc_unlock(proc);
t = container_of(w, struct binder_transaction, work);
} break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ WARN_ON(e->cmd == BR_OK);
+ binder_inner_proc_unlock(proc);
+ if (put_user(e->cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ e->cmd = BR_OK;
+ ptr += sizeof(uint32_t);
+
+ binder_stat_br(proc, thread, e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
+ binder_inner_proc_unlock(proc);
cmd = BR_TRANSACTION_COMPLETE;
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
@@ -2765,113 +3801,134 @@ retry:
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
"%d:%d BR_TRANSACTION_COMPLETE\n",
proc->pid, thread->pid);
-
- list_del(&w->entry);
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
case BINDER_WORK_NODE: {
struct binder_node *node = container_of(w, struct binder_node, work);
- uint32_t cmd = BR_NOOP;
- const char *cmd_name;
- int strong = node->internal_strong_refs || node->local_strong_refs;
- int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
-
- if (weak && !node->has_weak_ref) {
- cmd = BR_INCREFS;
- cmd_name = "BR_INCREFS";
+ int strong, weak;
+ binder_uintptr_t node_ptr = node->ptr;
+ binder_uintptr_t node_cookie = node->cookie;
+ int node_debug_id = node->debug_id;
+ int has_weak_ref;
+ int has_strong_ref;
+ void __user *orig_ptr = ptr;
+
+ BUG_ON(proc != node->proc);
+ strong = node->internal_strong_refs ||
+ node->local_strong_refs;
+ weak = !hlist_empty(&node->refs) ||
+ node->local_weak_refs ||
+ node->tmp_refs || strong;
+ has_strong_ref = node->has_strong_ref;
+ has_weak_ref = node->has_weak_ref;
+
+ if (weak && !has_weak_ref) {
node->has_weak_ref = 1;
node->pending_weak_ref = 1;
node->local_weak_refs++;
- } else if (strong && !node->has_strong_ref) {
- cmd = BR_ACQUIRE;
- cmd_name = "BR_ACQUIRE";
+ }
+ if (strong && !has_strong_ref) {
node->has_strong_ref = 1;
node->pending_strong_ref = 1;
node->local_strong_refs++;
- } else if (!strong && node->has_strong_ref) {
- cmd = BR_RELEASE;
- cmd_name = "BR_RELEASE";
+ }
+ if (!strong && has_strong_ref)
node->has_strong_ref = 0;
- } else if (!weak && node->has_weak_ref) {
- cmd = BR_DECREFS;
- cmd_name = "BR_DECREFS";
+ if (!weak && has_weak_ref)
node->has_weak_ref = 0;
- }
- if (cmd != BR_NOOP) {
- if (put_user(cmd, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (put_user(node->ptr,
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
- if (put_user(node->cookie,
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
-
- binder_stat_br(proc, thread, cmd);
- binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s %d u%016llx c%016llx\n",
- proc->pid, thread->pid, cmd_name,
- node->debug_id,
- (u64)node->ptr, (u64)node->cookie);
- } else {
- list_del_init(&w->entry);
- if (!weak && !strong) {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%016llx c%016llx deleted\n",
- proc->pid, thread->pid,
- node->debug_id,
- (u64)node->ptr,
- (u64)node->cookie);
- rb_erase(&node->rb_node, &proc->nodes);
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
- } else {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%016llx c%016llx state unchanged\n",
- proc->pid, thread->pid,
- node->debug_id,
- (u64)node->ptr,
- (u64)node->cookie);
- }
- }
+ if (!weak && !strong) {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d:%d node %d u%016llx c%016llx deleted\n",
+ proc->pid, thread->pid,
+ node_debug_id,
+ (u64)node_ptr,
+ (u64)node_cookie);
+ rb_erase(&node->rb_node, &proc->nodes);
+ binder_inner_proc_unlock(proc);
+ binder_node_lock(node);
+ /*
+ * Acquire the node lock before freeing the
+ * node to serialize with other threads that
+ * may have been holding the node lock while
+ * decrementing this node (avoids race where
+ * this thread frees while the other thread
+ * is unlocking the node after the final
+ * decrement)
+ */
+ binder_node_unlock(node);
+ binder_free_node(node);
+ } else
+ binder_inner_proc_unlock(proc);
+
+ if (weak && !has_weak_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_INCREFS, "BR_INCREFS");
+ if (!ret && strong && !has_strong_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_ACQUIRE, "BR_ACQUIRE");
+ if (!ret && !strong && has_strong_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_RELEASE, "BR_RELEASE");
+ if (!ret && !weak && has_weak_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_DECREFS, "BR_DECREFS");
+ if (orig_ptr == ptr)
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d:%d node %d u%016llx c%016llx state unchanged\n",
+ proc->pid, thread->pid,
+ node_debug_id,
+ (u64)node_ptr,
+ (u64)node_cookie);
+ if (ret)
+ return ret;
} break;
case BINDER_WORK_DEAD_BINDER:
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
struct binder_ref_death *death;
uint32_t cmd;
+ binder_uintptr_t cookie;
death = container_of(w, struct binder_ref_death, work);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER;
- if (put_user(cmd, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (put_user(death->cookie,
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
- binder_stat_br(proc, thread, cmd);
+ cookie = death->cookie;
+
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
"%d:%d %s %016llx\n",
proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ?
"BR_DEAD_BINDER" :
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
- (u64)death->cookie);
-
+ (u64)cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
- list_del(&w->entry);
+ binder_inner_proc_unlock(proc);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
- } else
- list_move(&w->entry, &proc->delivered_death);
+ } else {
+ binder_enqueue_work_ilocked(
+ w, &proc->delivered_death);
+ binder_inner_proc_unlock(proc);
+ }
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(cookie,
+ (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+ binder_stat_br(proc, thread, cmd);
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
@@ -2903,8 +3960,9 @@ retry:
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
- if (t->from) {
- struct task_struct *sender = t->from->proc->tsk;
+ t_from = binder_get_txn_from(t);
+ if (t_from) {
+ struct task_struct *sender = t_from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
@@ -2914,18 +3972,24 @@ retry:
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (binder_uintptr_t)(
- (uintptr_t)t->buffer->data +
- proc->user_buffer_offset);
+ tr.data.ptr.buffer = (binder_uintptr_t)
+ ((uintptr_t)t->buffer->data +
+ binder_alloc_get_user_buffer_offset(&proc->alloc));
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
- if (put_user(cmd, (uint32_t __user *)ptr))
+ if (put_user(cmd, (uint32_t __user *)ptr)) {
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
return -EFAULT;
+ }
ptr += sizeof(uint32_t);
- if (copy_to_user(ptr, &tr, sizeof(tr)))
+ if (copy_to_user(ptr, &tr, sizeof(tr))) {
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
return -EFAULT;
+ }
ptr += sizeof(tr);
trace_binder_transaction_received(t);
@@ -2935,21 +3999,22 @@ retry:
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
- t->debug_id, t->from ? t->from->proc->pid : 0,
- t->from ? t->from->pid : 0, cmd,
+ t->debug_id, t_from ? t_from->proc->pid : 0,
+ t_from ? t_from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
(u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
- list_del(&t->work.entry);
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ binder_inner_proc_lock(thread->proc);
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
+ binder_inner_proc_unlock(thread->proc);
} else {
- t->buffer->transaction = NULL;
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ binder_free_transaction(t);
}
break;
}
@@ -2957,29 +4022,36 @@ retry:
done:
*consumed = ptr - buffer;
- if (proc->requested_threads + proc->ready_threads == 0 &&
+ binder_inner_proc_lock(proc);
+ if (proc->requested_threads == 0 &&
+ list_empty(&thread->proc->waiting_threads) &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {
proc->requested_threads++;
+ binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BR_SPAWN_LOOPER\n",
proc->pid, thread->pid);
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
- }
+ } else
+ binder_inner_proc_unlock(proc);
return 0;
}
-static void binder_release_work(struct list_head *list)
+static void binder_release_work(struct binder_proc *proc,
+ struct list_head *list)
{
struct binder_work *w;
- while (!list_empty(list)) {
- w = list_first_entry(list, struct binder_work, entry);
- list_del_init(&w->entry);
+ while (1) {
+ w = binder_dequeue_work_head(proc, list);
+ if (!w)
+ return;
+
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
struct binder_transaction *t;
@@ -2992,11 +4064,17 @@ static void binder_release_work(struct list_head *list)
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered transaction %d\n",
t->debug_id);
- t->buffer->transaction = NULL;
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ binder_free_transaction(t);
}
} break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered TRANSACTION_ERROR: %u\n",
+ e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered TRANSACTION_COMPLETE\n");
@@ -3023,7 +4101,8 @@ static void binder_release_work(struct list_head *list)
}
-static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+static struct binder_thread *binder_get_thread_ilocked(
+ struct binder_proc *proc, struct binder_thread *new_thread)
{
struct binder_thread *thread = NULL;
struct rb_node *parent = NULL;
@@ -3038,38 +4117,99 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
else if (current->pid > thread->pid)
p = &(*p)->rb_right;
else
- break;
+ return thread;
}
- if (*p == NULL) {
- thread = kzalloc(sizeof(*thread), GFP_KERNEL);
- if (thread == NULL)
+ if (!new_thread)
+ return NULL;
+ thread = new_thread;
+ binder_stats_created(BINDER_STAT_THREAD);
+ thread->proc = proc;
+ thread->pid = current->pid;
+ atomic_set(&thread->tmp_ref, 0);
+ init_waitqueue_head(&thread->wait);
+ INIT_LIST_HEAD(&thread->todo);
+ rb_link_node(&thread->rb_node, parent, p);
+ rb_insert_color(&thread->rb_node, &proc->threads);
+ thread->looper_need_return = true;
+ thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
+ thread->return_error.cmd = BR_OK;
+ thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
+ thread->reply_error.cmd = BR_OK;
+ INIT_LIST_HEAD(&new_thread->waiting_thread_node);
+ return thread;
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+ struct binder_thread *thread;
+ struct binder_thread *new_thread;
+
+ binder_inner_proc_lock(proc);
+ thread = binder_get_thread_ilocked(proc, NULL);
+ binder_inner_proc_unlock(proc);
+ if (!thread) {
+ new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+ if (new_thread == NULL)
return NULL;
- binder_stats_created(BINDER_STAT_THREAD);
- thread->proc = proc;
- thread->pid = current->pid;
- init_waitqueue_head(&thread->wait);
- INIT_LIST_HEAD(&thread->todo);
- rb_link_node(&thread->rb_node, parent, p);
- rb_insert_color(&thread->rb_node, &proc->threads);
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
- thread->return_error = BR_OK;
- thread->return_error2 = BR_OK;
+ binder_inner_proc_lock(proc);
+ thread = binder_get_thread_ilocked(proc, new_thread);
+ binder_inner_proc_unlock(proc);
+ if (thread != new_thread)
+ kfree(new_thread);
}
return thread;
}
-static int binder_free_thread(struct binder_proc *proc,
- struct binder_thread *thread)
+static void binder_free_proc(struct binder_proc *proc)
+{
+ BUG_ON(!list_empty(&proc->todo));
+ BUG_ON(!list_empty(&proc->delivered_death));
+ binder_alloc_deferred_release(&proc->alloc);
+ put_task_struct(proc->tsk);
+ binder_stats_deleted(BINDER_STAT_PROC);
+ kfree(proc);
+}
+
+static void binder_free_thread(struct binder_thread *thread)
+{
+ BUG_ON(!list_empty(&thread->todo));
+ binder_stats_deleted(BINDER_STAT_THREAD);
+ binder_proc_dec_tmpref(thread->proc);
+ kfree(thread);
+}
+
+static int binder_thread_release(struct binder_proc *proc,
+ struct binder_thread *thread)
{
struct binder_transaction *t;
struct binder_transaction *send_reply = NULL;
int active_transactions = 0;
+ struct binder_transaction *last_t = NULL;
+ binder_inner_proc_lock(thread->proc);
+ /*
+ * take a ref on the proc so it survives
+ * after we remove this thread from proc->threads.
+ * The corresponding dec is when we actually
+ * free the thread in binder_free_thread()
+ */
+ proc->tmp_ref++;
+ /*
+ * take a ref on this thread to ensure it
+ * survives while we are releasing it
+ */
+ atomic_inc(&thread->tmp_ref);
rb_erase(&thread->rb_node, &proc->threads);
t = thread->transaction_stack;
- if (t && t->to_thread == thread)
- send_reply = t;
+ if (t) {
+ spin_lock(&t->lock);
+ if (t->to_thread == thread)
+ send_reply = t;
+ }
+ thread->is_dead = true;
+
while (t) {
+ last_t = t;
active_transactions++;
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"release %d:%d transaction %d %s, still active\n",
@@ -3090,12 +4230,16 @@ static int binder_free_thread(struct binder_proc *proc,
t = t->from_parent;
} else
BUG();
+ spin_unlock(&last_t->lock);
+ if (t)
+ spin_lock(&t->lock);
}
+ binder_inner_proc_unlock(thread->proc);
+
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
- binder_release_work(&thread->todo);
- kfree(thread);
- binder_stats_deleted(BINDER_STAT_THREAD);
+ binder_release_work(proc, &thread->todo);
+ binder_thread_dec_tmpref(thread);
return active_transactions;
}
@@ -3104,30 +4248,24 @@ static unsigned int binder_poll(struct file *filp,
{
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread = NULL;
- int wait_for_proc_work;
-
- binder_lock(__func__);
+ bool wait_for_proc_work;
thread = binder_get_thread(proc);
- wait_for_proc_work = thread->transaction_stack == NULL &&
- list_empty(&thread->todo) && thread->return_error == BR_OK;
+ binder_inner_proc_lock(thread->proc);
+ thread->looper |= BINDER_LOOPER_STATE_POLL;
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
- binder_unlock(__func__);
+ binder_inner_proc_unlock(thread->proc);
+
+ if (binder_has_work(thread, wait_for_proc_work))
+ return POLLIN;
+
+ poll_wait(filp, &thread->wait, wait);
+
+ if (binder_has_thread_work(thread))
+ return POLLIN;
- if (wait_for_proc_work) {
- if (binder_has_proc_work(proc, thread))
- return POLLIN;
- poll_wait(filp, &proc->wait, wait);
- if (binder_has_proc_work(proc, thread))
- return POLLIN;
- } else {
- if (binder_has_thread_work(thread))
- return POLLIN;
- poll_wait(filp, &thread->wait, wait);
- if (binder_has_thread_work(thread))
- return POLLIN;
- }
return 0;
}
@@ -3174,8 +4312,10 @@ static int binder_ioctl_write_read(struct file *filp,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
- if (!list_empty(&proc->todo))
- wake_up_interruptible(&proc->wait);
+ binder_inner_proc_lock(proc);
+ if (!binder_worklist_empty_ilocked(&proc->todo))
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
@@ -3200,9 +4340,10 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
int ret = 0;
struct binder_proc *proc = filp->private_data;
struct binder_context *context = proc->context;
-
+ struct binder_node *new_node;
kuid_t curr_euid = current_euid();
+ mutex_lock(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
@@ -3223,19 +4364,49 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
} else {
context->binder_context_mgr_uid = curr_euid;
}
- context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
- if (!context->binder_context_mgr_node) {
+ new_node = binder_new_node(proc, NULL);
+ if (!new_node) {
ret = -ENOMEM;
goto out;
}
- context->binder_context_mgr_node->local_weak_refs++;
- context->binder_context_mgr_node->local_strong_refs++;
- context->binder_context_mgr_node->has_strong_ref = 1;
- context->binder_context_mgr_node->has_weak_ref = 1;
+ binder_node_lock(new_node);
+ new_node->local_weak_refs++;
+ new_node->local_strong_refs++;
+ new_node->has_strong_ref = 1;
+ new_node->has_weak_ref = 1;
+ context->binder_context_mgr_node = new_node;
+ binder_node_unlock(new_node);
+ binder_put_node(new_node);
out:
+ mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
+static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
+ struct binder_node_debug_info *info)
+{
+ struct rb_node *n;
+ binder_uintptr_t ptr = info->ptr;
+
+ memset(info, 0, sizeof(*info));
+
+ binder_inner_proc_lock(proc);
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ struct binder_node *node = rb_entry(n, struct binder_node,
+ rb_node);
+ if (node->ptr > ptr) {
+ info->ptr = node->ptr;
+ info->cookie = node->cookie;
+ info->has_strong_ref = node->has_strong_ref;
+ info->has_weak_ref = node->has_weak_ref;
+ break;
+ }
+ }
+ binder_inner_proc_unlock(proc);
+
+ return 0;
+}
+
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
@@ -3247,17 +4418,14 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
- if (unlikely(current->mm != proc->vma_vm_mm)) {
- pr_err("current mm mismatch proc mm\n");
- return -EINVAL;
- }
+ binder_selftest_alloc(&proc->alloc);
+
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
- binder_lock(__func__);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
@@ -3270,12 +4438,19 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (ret)
goto err;
break;
- case BINDER_SET_MAX_THREADS:
- if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+ case BINDER_SET_MAX_THREADS: {
+ int max_threads;
+
+ if (copy_from_user(&max_threads, ubuf,
+ sizeof(max_threads))) {
ret = -EINVAL;
goto err;
}
+ binder_inner_proc_lock(proc);
+ proc->max_threads = max_threads;
+ binder_inner_proc_unlock(proc);
break;
+ }
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);
if (ret)
@@ -3284,7 +4459,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
- binder_free_thread(proc, thread);
+ binder_thread_release(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: {
@@ -3301,6 +4476,24 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
}
+ case BINDER_GET_NODE_DEBUG_INFO: {
+ struct binder_node_debug_info info;
+
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = binder_ioctl_get_node_debug_info(proc, &info);
+ if (ret < 0)
+ goto err;
+
+ if (copy_to_user(ubuf, &info, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ }
default:
ret = -EINVAL;
goto err;
@@ -3308,8 +4501,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ret = 0;
err:
if (thread)
- thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
- binder_unlock(__func__);
+ thread->looper_need_return = false;
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
@@ -3338,8 +4530,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
- proc->vma = NULL;
- proc->vma_vm_mm = NULL;
+ binder_alloc_vma_close(&proc->alloc);
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
@@ -3357,20 +4548,18 @@ static const struct vm_operations_struct binder_vm_ops = {
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
- struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
- struct binder_buffer *buffer;
- if (proc->tsk != current)
+ if (proc->tsk != current->group_leader)
return -EINVAL;
if ((vma->vm_end - vma->vm_start) > SZ_4M)
vma->vm_end = vma->vm_start + SZ_4M;
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
- "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
- proc->pid, vma->vm_start, vma->vm_end,
+ "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ __func__, proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
@@ -3380,73 +4569,15 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
goto err_bad_arg;
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
-
- mutex_lock(&binder_mmap_lock);
- if (proc->buffer) {
- ret = -EBUSY;
- failure_string = "already mapped";
- goto err_already_mapped;
- }
-
- area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
- if (area == NULL) {
- ret = -ENOMEM;
- failure_string = "get_vm_area";
- goto err_get_vm_area_failed;
- }
- proc->buffer = area->addr;
- proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
- mutex_unlock(&binder_mmap_lock);
-
-#ifdef CONFIG_CPU_CACHE_VIPT
- if (cache_is_vipt_aliasing()) {
- while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
- pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
- vma->vm_start += PAGE_SIZE;
- }
- }
-#endif
- proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
- if (proc->pages == NULL) {
- ret = -ENOMEM;
- failure_string = "alloc page array";
- goto err_alloc_pages_failed;
- }
- proc->buffer_size = vma->vm_end - vma->vm_start;
-
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
- if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
- ret = -ENOMEM;
- failure_string = "alloc small buf";
- goto err_alloc_small_buf_failed;
- }
- buffer = proc->buffer;
- INIT_LIST_HEAD(&proc->buffers);
- list_add(&buffer->entry, &proc->buffers);
- buffer->free = 1;
- binder_insert_free_buffer(proc, buffer);
- proc->free_async_space = proc->buffer_size / 2;
- barrier();
+ ret = binder_alloc_mmap_handler(&proc->alloc, vma);
+ if (ret)
+ return ret;
proc->files = get_files_struct(current);
- proc->vma = vma;
- proc->vma_vm_mm = vma->vm_mm;
-
- /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
- proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
return 0;
-err_alloc_small_buf_failed:
- kfree(proc->pages);
- proc->pages = NULL;
-err_alloc_pages_failed:
- mutex_lock(&binder_mmap_lock);
- vfree(proc->buffer);
- proc->buffer = NULL;
-err_get_vm_area_failed:
-err_already_mapped:
- mutex_unlock(&binder_mmap_lock);
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
@@ -3464,25 +4595,26 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
- get_task_struct(current);
- proc->tsk = current;
- proc->vma_vm_mm = current->mm;
+ spin_lock_init(&proc->inner_lock);
+ spin_lock_init(&proc->outer_lock);
+ get_task_struct(current->group_leader);
+ proc->tsk = current->group_leader;
INIT_LIST_HEAD(&proc->todo);
- init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
binder_dev = container_of(filp->private_data, struct binder_device,
miscdev);
proc->context = &binder_dev->context;
-
- binder_lock(__func__);
+ binder_alloc_init(&proc->alloc);
binder_stats_created(BINDER_STAT_PROC);
- hlist_add_head(&proc->proc_node, &binder_procs);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
+ INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc;
- binder_unlock(__func__);
+ mutex_lock(&binder_procs_lock);
+ hlist_add_head(&proc->proc_node, &binder_procs);
+ mutex_unlock(&binder_procs_lock);
if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
@@ -3518,16 +4650,17 @@ static void binder_deferred_flush(struct binder_proc *proc)
struct rb_node *n;
int wake_count = 0;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ thread->looper_need_return = true;
if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
wake_up_interruptible(&thread->wait);
wake_count++;
}
}
- wake_up_interruptible_all(&proc->wait);
+ binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"binder_flush: %d woke %d threads\n", proc->pid,
@@ -3548,13 +4681,21 @@ static int binder_node_release(struct binder_node *node, int refs)
{
struct binder_ref *ref;
int death = 0;
+ struct binder_proc *proc = node->proc;
- list_del_init(&node->work.entry);
- binder_release_work(&node->async_todo);
+ binder_release_work(proc, &node->async_todo);
- if (hlist_empty(&node->refs)) {
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
+ binder_node_lock(node);
+ binder_inner_proc_lock(proc);
+ binder_dequeue_work_ilocked(&node->work);
+ /*
+ * The caller must have taken a temporary ref on the node,
+ */
+ BUG_ON(!node->tmp_refs);
+ if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+ binder_free_node(node);
return refs;
}
@@ -3562,45 +4703,58 @@ static int binder_node_release(struct binder_node *node, int refs)
node->proc = NULL;
node->local_strong_refs = 0;
node->local_weak_refs = 0;
+ binder_inner_proc_unlock(proc);
+
+ spin_lock(&binder_dead_nodes_lock);
hlist_add_head(&node->dead_node, &binder_dead_nodes);
+ spin_unlock(&binder_dead_nodes_lock);
hlist_for_each_entry(ref, &node->refs, node_entry) {
refs++;
-
- if (!ref->death)
+ /*
+ * Need the node lock to synchronize
+ * with new notification requests and the
+ * inner lock to synchronize with queued
+ * death notifications.
+ */
+ binder_inner_proc_lock(ref->proc);
+ if (!ref->death) {
+ binder_inner_proc_unlock(ref->proc);
continue;
+ }
death++;
- if (list_empty(&ref->death->work.entry)) {
- ref->death->work.type = BINDER_WORK_DEAD_BINDER;
- list_add_tail(&ref->death->work.entry,
- &ref->proc->todo);
- wake_up_interruptible(&ref->proc->wait);
- } else
- BUG();
+ BUG_ON(!list_empty(&ref->death->work.entry));
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ binder_enqueue_work_ilocked(&ref->death->work,
+ &ref->proc->todo);
+ binder_wakeup_proc_ilocked(ref->proc);
+ binder_inner_proc_unlock(ref->proc);
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"node %d now dead, refs %d, death %d\n",
node->debug_id, refs, death);
+ binder_node_unlock(node);
+ binder_put_node(node);
return refs;
}
static void binder_deferred_release(struct binder_proc *proc)
{
- struct binder_transaction *t;
struct binder_context *context = proc->context;
struct rb_node *n;
- int threads, nodes, incoming_refs, outgoing_refs, buffers,
- active_transactions, page_count;
+ int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
- BUG_ON(proc->vma);
BUG_ON(proc->files);
+ mutex_lock(&binder_procs_lock);
hlist_del(&proc->proc_node);
+ mutex_unlock(&binder_procs_lock);
+ mutex_lock(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node &&
context->binder_context_mgr_node->proc == proc) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
@@ -3608,15 +4762,25 @@ static void binder_deferred_release(struct binder_proc *proc)
__func__, proc->pid);
context->binder_context_mgr_node = NULL;
}
+ mutex_unlock(&context->context_mgr_node_lock);
+ binder_inner_proc_lock(proc);
+ /*
+ * Make sure proc stays alive after we
+ * remove all the threads
+ */
+ proc->tmp_ref++;
+ proc->is_dead = true;
threads = 0;
active_transactions = 0;
while ((n = rb_first(&proc->threads))) {
struct binder_thread *thread;
thread = rb_entry(n, struct binder_thread, rb_node);
+ binder_inner_proc_unlock(proc);
threads++;
- active_transactions += binder_free_thread(proc, thread);
+ active_transactions += binder_thread_release(proc, thread);
+ binder_inner_proc_lock(proc);
}
nodes = 0;
@@ -3626,73 +4790,42 @@ static void binder_deferred_release(struct binder_proc *proc)
node = rb_entry(n, struct binder_node, rb_node);
nodes++;
+ /*
+ * take a temporary ref on the node before
+ * calling binder_node_release() which will either
+ * kfree() the node or call binder_put_node()
+ */
+ binder_inc_node_tmpref_ilocked(node);
rb_erase(&node->rb_node, &proc->nodes);
+ binder_inner_proc_unlock(proc);
incoming_refs = binder_node_release(node, incoming_refs);
+ binder_inner_proc_lock(proc);
}
+ binder_inner_proc_unlock(proc);
outgoing_refs = 0;
+ binder_proc_lock(proc);
while ((n = rb_first(&proc->refs_by_desc))) {
struct binder_ref *ref;
ref = rb_entry(n, struct binder_ref, rb_node_desc);
outgoing_refs++;
- binder_delete_ref(ref);
+ binder_cleanup_ref_olocked(ref);
+ binder_proc_unlock(proc);
+ binder_free_ref(ref);
+ binder_proc_lock(proc);
}
+ binder_proc_unlock(proc);
- binder_release_work(&proc->todo);
- binder_release_work(&proc->delivered_death);
-
- buffers = 0;
- while ((n = rb_first(&proc->allocated_buffers))) {
- struct binder_buffer *buffer;
-
- buffer = rb_entry(n, struct binder_buffer, rb_node);
-
- t = buffer->transaction;
- if (t) {
- t->buffer = NULL;
- buffer->transaction = NULL;
- pr_err("release proc %d, transaction %d, not freed\n",
- proc->pid, t->debug_id);
- /*BUG();*/
- }
-
- binder_free_buf(proc, buffer);
- buffers++;
- }
-
- binder_stats_deleted(BINDER_STAT_PROC);
-
- page_count = 0;
- if (proc->pages) {
- int i;
-
- for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
- void *page_addr;
-
- if (!proc->pages[i])
- continue;
-
- page_addr = proc->buffer + i * PAGE_SIZE;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%s: %d: page %d at %p not freed\n",
- __func__, proc->pid, i, page_addr);
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
- __free_page(proc->pages[i]);
- page_count++;
- }
- kfree(proc->pages);
- vfree(proc->buffer);
- }
-
- put_task_struct(proc->tsk);
+ binder_release_work(proc, &proc->todo);
+ binder_release_work(proc, &proc->delivered_death);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
- "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
+ "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
__func__, proc->pid, threads, nodes, incoming_refs,
- outgoing_refs, active_transactions, buffers, page_count);
+ outgoing_refs, active_transactions);
- kfree(proc);
+ binder_proc_dec_tmpref(proc);
}
static void binder_deferred_func(struct work_struct *work)
@@ -3703,7 +4836,6 @@ static void binder_deferred_func(struct work_struct *work)
int defer;
do {
- binder_lock(__func__);
mutex_lock(&binder_deferred_lock);
if (!hlist_empty(&binder_deferred_list)) {
proc = hlist_entry(binder_deferred_list.first,
@@ -3730,7 +4862,6 @@ static void binder_deferred_func(struct work_struct *work)
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
- binder_unlock(__func__);
if (files)
put_files_struct(files);
} while (proc);
@@ -3750,41 +4881,51 @@ binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
mutex_unlock(&binder_deferred_lock);
}
-static void print_binder_transaction(struct seq_file *m, const char *prefix,
- struct binder_transaction *t)
+static void print_binder_transaction_ilocked(struct seq_file *m,
+ struct binder_proc *proc,
+ const char *prefix,
+ struct binder_transaction *t)
{
+ struct binder_proc *to_proc;
+ struct binder_buffer *buffer = t->buffer;
+
+ spin_lock(&t->lock);
+ to_proc = t->to_proc;
seq_printf(m,
"%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
- t->to_proc ? t->to_proc->pid : 0,
+ to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
t->code, t->flags, t->priority, t->need_reply);
- if (t->buffer == NULL) {
+ spin_unlock(&t->lock);
+
+ if (proc != to_proc) {
+ /*
+ * Can only safely deref buffer if we are holding the
+ * correct proc inner lock for this node
+ */
+ seq_puts(m, "\n");
+ return;
+ }
+
+ if (buffer == NULL) {
seq_puts(m, " buffer free\n");
return;
}
- if (t->buffer->target_node)
- seq_printf(m, " node %d",
- t->buffer->target_node->debug_id);
+ if (buffer->target_node)
+ seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd data %p\n",
- t->buffer->data_size, t->buffer->offsets_size,
- t->buffer->data);
-}
-
-static void print_binder_buffer(struct seq_file *m, const char *prefix,
- struct binder_buffer *buffer)
-{
- seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
- prefix, buffer->debug_id, buffer->data,
buffer->data_size, buffer->offsets_size,
- buffer->transaction ? "active" : "delivered");
+ buffer->data);
}
-static void print_binder_work(struct seq_file *m, const char *prefix,
- const char *transaction_prefix,
- struct binder_work *w)
+static void print_binder_work_ilocked(struct seq_file *m,
+ struct binder_proc *proc,
+ const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w)
{
struct binder_node *node;
struct binder_transaction *t;
@@ -3792,8 +4933,16 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
switch (w->type) {
case BINDER_WORK_TRANSACTION:
t = container_of(w, struct binder_transaction, work);
- print_binder_transaction(m, transaction_prefix, t);
+ print_binder_transaction_ilocked(
+ m, proc, transaction_prefix, t);
break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ seq_printf(m, "%stransaction error: %u\n",
+ prefix, e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE:
seq_printf(m, "%stransaction complete\n", prefix);
break;
@@ -3818,40 +4967,46 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
}
}
-static void print_binder_thread(struct seq_file *m,
- struct binder_thread *thread,
- int print_always)
+static void print_binder_thread_ilocked(struct seq_file *m,
+ struct binder_thread *thread,
+ int print_always)
{
struct binder_transaction *t;
struct binder_work *w;
size_t start_pos = m->count;
size_t header_pos;
- seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
+ seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
+ thread->pid, thread->looper,
+ thread->looper_need_return,
+ atomic_read(&thread->tmp_ref));
header_pos = m->count;
t = thread->transaction_stack;
while (t) {
if (t->from == thread) {
- print_binder_transaction(m,
- " outgoing transaction", t);
+ print_binder_transaction_ilocked(m, thread->proc,
+ " outgoing transaction", t);
t = t->from_parent;
} else if (t->to_thread == thread) {
- print_binder_transaction(m,
+ print_binder_transaction_ilocked(m, thread->proc,
" incoming transaction", t);
t = t->to_parent;
} else {
- print_binder_transaction(m, " bad transaction", t);
+ print_binder_transaction_ilocked(m, thread->proc,
+ " bad transaction", t);
t = NULL;
}
}
list_for_each_entry(w, &thread->todo, entry) {
- print_binder_work(m, " ", " pending transaction", w);
+ print_binder_work_ilocked(m, thread->proc, " ",
+ " pending transaction", w);
}
if (!print_always && m->count == header_pos)
m->count = start_pos;
}
-static void print_binder_node(struct seq_file *m, struct binder_node *node)
+static void print_binder_node_nilocked(struct seq_file *m,
+ struct binder_node *node)
{
struct binder_ref *ref;
struct binder_work *w;
@@ -3861,27 +5016,34 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
- seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
+ seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
node->debug_id, (u64)node->ptr, (u64)node->cookie,
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
- node->internal_strong_refs, count);
+ node->internal_strong_refs, count, node->tmp_refs);
if (count) {
seq_puts(m, " proc");
hlist_for_each_entry(ref, &node->refs, node_entry)
seq_printf(m, " %d", ref->proc->pid);
}
seq_puts(m, "\n");
- list_for_each_entry(w, &node->async_todo, entry)
- print_binder_work(m, " ",
- " pending async transaction", w);
+ if (node->proc) {
+ list_for_each_entry(w, &node->async_todo, entry)
+ print_binder_work_ilocked(m, node->proc, " ",
+ " pending async transaction", w);
+ }
}
-static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+static void print_binder_ref_olocked(struct seq_file *m,
+ struct binder_ref *ref)
{
- seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
- ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
- ref->node->debug_id, ref->strong, ref->weak, ref->death);
+ binder_node_lock(ref->node);
+ seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
+ ref->data.debug_id, ref->data.desc,
+ ref->node->proc ? "" : "dead ",
+ ref->node->debug_id, ref->data.strong,
+ ref->data.weak, ref->death);
+ binder_node_unlock(ref->node);
}
static void print_binder_proc(struct seq_file *m,
@@ -3891,36 +5053,60 @@ static void print_binder_proc(struct seq_file *m,
struct rb_node *n;
size_t start_pos = m->count;
size_t header_pos;
+ struct binder_node *last_node = NULL;
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
header_pos = m->count;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
- print_binder_thread(m, rb_entry(n, struct binder_thread,
+ print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
rb_node), print_all);
+
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
- if (print_all || node->has_async_transaction)
- print_binder_node(m, node);
- }
+ /*
+ * take a temporary reference on the node so it
+ * survives and isn't removed from the tree
+ * while we print it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ /* Need to drop inner lock to take node lock */
+ binder_inner_proc_unlock(proc);
+ if (last_node)
+ binder_put_node(last_node);
+ binder_node_inner_lock(node);
+ print_binder_node_nilocked(m, node);
+ binder_node_inner_unlock(node);
+ last_node = node;
+ binder_inner_proc_lock(proc);
+ }
+ binder_inner_proc_unlock(proc);
+ if (last_node)
+ binder_put_node(last_node);
+
if (print_all) {
+ binder_proc_lock(proc);
for (n = rb_first(&proc->refs_by_desc);
n != NULL;
n = rb_next(n))
- print_binder_ref(m, rb_entry(n, struct binder_ref,
- rb_node_desc));
+ print_binder_ref_olocked(m, rb_entry(n,
+ struct binder_ref,
+ rb_node_desc));
+ binder_proc_unlock(proc);
}
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
- print_binder_buffer(m, " buffer",
- rb_entry(n, struct binder_buffer, rb_node));
+ binder_alloc_print_allocated(m, &proc->alloc);
+ binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry)
- print_binder_work(m, " ", " pending transaction", w);
+ print_binder_work_ilocked(m, proc, " ",
+ " pending transaction", w);
list_for_each_entry(w, &proc->delivered_death, entry) {
seq_puts(m, " has delivered dead binder\n");
break;
}
+ binder_inner_proc_unlock(proc);
if (!print_all && m->count == header_pos)
m->count = start_pos;
}
@@ -3986,17 +5172,21 @@ static void print_binder_stats(struct seq_file *m, const char *prefix,
BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
ARRAY_SIZE(binder_command_strings));
for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
- if (stats->bc[i])
+ int temp = atomic_read(&stats->bc[i]);
+
+ if (temp)
seq_printf(m, "%s%s: %d\n", prefix,
- binder_command_strings[i], stats->bc[i]);
+ binder_command_strings[i], temp);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
ARRAY_SIZE(binder_return_strings));
for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
- if (stats->br[i])
+ int temp = atomic_read(&stats->br[i]);
+
+ if (temp)
seq_printf(m, "%s%s: %d\n", prefix,
- binder_return_strings[i], stats->br[i]);
+ binder_return_strings[i], temp);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
@@ -4004,11 +5194,15 @@ static void print_binder_stats(struct seq_file *m, const char *prefix,
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
ARRAY_SIZE(stats->obj_deleted));
for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
- if (stats->obj_created[i] || stats->obj_deleted[i])
- seq_printf(m, "%s%s: active %d total %d\n", prefix,
+ int created = atomic_read(&stats->obj_created[i]);
+ int deleted = atomic_read(&stats->obj_deleted[i]);
+
+ if (created || deleted)
+ seq_printf(m, "%s%s: active %d total %d\n",
+ prefix,
binder_objstat_strings[i],
- stats->obj_created[i] - stats->obj_deleted[i],
- stats->obj_created[i]);
+ created - deleted,
+ created);
}
}
@@ -4016,51 +5210,61 @@ static void print_binder_proc_stats(struct seq_file *m,
struct binder_proc *proc)
{
struct binder_work *w;
+ struct binder_thread *thread;
struct rb_node *n;
- int count, strong, weak;
+ int count, strong, weak, ready_threads;
+ size_t free_async_space =
+ binder_alloc_get_free_async_space(&proc->alloc);
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
count = 0;
+ ready_threads = 0;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
count++;
+
+ list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
+ ready_threads++;
+
seq_printf(m, " threads: %d\n", count);
seq_printf(m, " requested threads: %d+%d/%d\n"
" ready threads %d\n"
" free async space %zd\n", proc->requested_threads,
proc->requested_threads_started, proc->max_threads,
- proc->ready_threads, proc->free_async_space);
+ ready_threads,
+ free_async_space);
count = 0;
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
count++;
+ binder_inner_proc_unlock(proc);
seq_printf(m, " nodes: %d\n", count);
count = 0;
strong = 0;
weak = 0;
+ binder_proc_lock(proc);
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
struct binder_ref *ref = rb_entry(n, struct binder_ref,
rb_node_desc);
count++;
- strong += ref->strong;
- weak += ref->weak;
+ strong += ref->data.strong;
+ weak += ref->data.weak;
}
+ binder_proc_unlock(proc);
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
- count = 0;
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
- count++;
+ count = binder_alloc_get_allocated_count(&proc->alloc);
seq_printf(m, " buffers: %d\n", count);
+ binder_alloc_print_pages(m, &proc->alloc);
+
count = 0;
+ binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry) {
- switch (w->type) {
- case BINDER_WORK_TRANSACTION:
+ if (w->type == BINDER_WORK_TRANSACTION)
count++;
- break;
- default:
- break;
- }
}
+ binder_inner_proc_unlock(proc);
seq_printf(m, " pending transactions: %d\n", count);
print_binder_stats(m, " ", &proc->stats);
@@ -4071,57 +5275,67 @@ static int binder_state_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
struct binder_node *node;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
+ struct binder_node *last_node = NULL;
seq_puts(m, "binder state:\n");
+ spin_lock(&binder_dead_nodes_lock);
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
- hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
- print_binder_node(m, node);
-
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
+ /*
+ * take a temporary reference on the node so it
+ * survives and isn't removed from the list
+ * while we print it.
+ */
+ node->tmp_refs++;
+ spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
+ binder_node_lock(node);
+ print_binder_node_nilocked(m, node);
+ binder_node_unlock(node);
+ last_node = node;
+ spin_lock(&binder_dead_nodes_lock);
+ }
+ spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
+
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 1);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static int binder_stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
seq_puts(m, "binder stats:\n");
print_binder_stats(m, "", &binder_stats);
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc_stats(m, proc);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static int binder_transactions_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
seq_puts(m, "binder transactions:\n");
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 0);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
@@ -4129,44 +5343,63 @@ static int binder_proc_show(struct seq_file *m, void *unused)
{
struct binder_proc *itr;
int pid = (unsigned long)m->private;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr->pid == pid) {
seq_puts(m, "binder proc state:\n");
print_binder_proc(m, itr, 1);
}
}
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static void print_binder_transaction_log_entry(struct seq_file *m,
struct binder_transaction_log_entry *e)
{
+ int debug_id = READ_ONCE(e->debug_id_done);
+ /*
+ * read barrier to guarantee debug_id_done read before
+ * we print the log values
+ */
+ smp_rmb();
seq_printf(m,
- "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
+ "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
e->debug_id, (e->call_type == 2) ? "reply" :
((e->call_type == 1) ? "async" : "call "), e->from_proc,
e->from_thread, e->to_proc, e->to_thread, e->context_name,
- e->to_node, e->target_handle, e->data_size, e->offsets_size);
+ e->to_node, e->target_handle, e->data_size, e->offsets_size,
+ e->return_error, e->return_error_param,
+ e->return_error_line);
+ /*
+ * read-barrier to guarantee read of debug_id_done after
+ * done printing the fields of the entry
+ */
+ smp_rmb();
+ seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
+ "\n" : " (incomplete)\n");
}
static int binder_transaction_log_show(struct seq_file *m, void *unused)
{
struct binder_transaction_log *log = m->private;
+ unsigned int log_cur = atomic_read(&log->cur);
+ unsigned int count;
+ unsigned int cur;
int i;
- if (log->full) {
- for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
- print_binder_transaction_log_entry(m, &log->entry[i]);
+ count = log_cur + 1;
+ cur = count < ARRAY_SIZE(log->entry) && !log->full ?
+ 0 : count % ARRAY_SIZE(log->entry);
+ if (count > ARRAY_SIZE(log->entry) || log->full)
+ count = ARRAY_SIZE(log->entry);
+ for (i = 0; i < count; i++) {
+ unsigned int index = cur++ % ARRAY_SIZE(log->entry);
+
+ print_binder_transaction_log_entry(m, &log->entry[index]);
}
- for (i = 0; i < log->next; i++)
- print_binder_transaction_log_entry(m, &log->entry[i]);
return 0;
}
@@ -4201,6 +5434,7 @@ static int __init init_binder_device(const char *name)
binder_device->context.binder_context_mgr_uid = INVALID_UID;
binder_device->context.name = name;
+ mutex_init(&binder_device->context.context_mgr_node_lock);
ret = misc_register(&binder_device->miscdev);
if (ret < 0) {
@@ -4216,10 +5450,15 @@ static int __init init_binder_device(const char *name)
static int __init binder_init(void)
{
int ret;
- char *device_name, *device_names;
+ char *device_name, *device_names, *device_tmp;
struct binder_device *device;
struct hlist_node *tmp;
+ binder_alloc_shrinker_init();
+
+ atomic_set(&binder_transaction_log.cur, ~0U);
+ atomic_set(&binder_transaction_log_failed.cur, ~0U);
+
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
@@ -4264,7 +5503,8 @@ static int __init binder_init(void)
}
strcpy(device_names, binder_devices_param);
- while ((device_name = strsep(&device_names, ","))) {
+ device_tmp = device_names;
+ while ((device_name = strsep(&device_tmp, ","))) {
ret = init_binder_device(device_name);
if (ret)
goto err_init_binder_device_failed;
@@ -4278,6 +5518,9 @@ err_init_binder_device_failed:
hlist_del(&device->hlist);
kfree(device);
}
+
+ kfree(device_names);
+
err_alloc_device_names_failed:
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
new file mode 100644
index 000000000000..8fe165844e47
--- /dev/null
+++ b/drivers/android/binder_alloc.c
@@ -0,0 +1,1009 @@
+/* binder_alloc.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/cacheflush.h>
+#include <linux/list.h>
+#include <linux/sched/mm.h>
+#include <linux/module.h>
+#include <linux/rtmutex.h>
+#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/list_lru.h>
+#include "binder_alloc.h"
+#include "binder_trace.h"
+
+struct list_lru binder_alloc_lru;
+
+static DEFINE_MUTEX(binder_alloc_mmap_lock);
+
+enum {
+ BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
+ BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
+ BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
+};
+static uint32_t binder_alloc_debug_mask;
+
+module_param_named(debug_mask, binder_alloc_debug_mask,
+ uint, 0644);
+
+#define binder_alloc_debug(mask, x...) \
+ do { \
+ if (binder_alloc_debug_mask & mask) \
+ pr_info(x); \
+ } while (0)
+
+static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.next, struct binder_buffer, entry);
+}
+
+static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.prev, struct binder_buffer, entry);
+}
+
+static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ if (list_is_last(&buffer->entry, &alloc->buffers))
+ return (u8 *)alloc->buffer +
+ alloc->buffer_size - (u8 *)buffer->data;
+ return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->free_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ size_t new_buffer_size;
+
+ BUG_ON(!new_buffer->free);
+
+ new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: add free buffer, size %zd, at %pK\n",
+ alloc->pid, new_buffer_size, new_buffer);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (new_buffer_size < buffer_size)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer_locked(
+ struct binder_alloc *alloc, struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->allocated_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+
+ BUG_ON(new_buffer->free);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (new_buffer->data < buffer->data)
+ p = &parent->rb_left;
+ else if (new_buffer->data > buffer->data)
+ p = &parent->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_alloc_prepare_to_free_locked(
+ struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct rb_node *n = alloc->allocated_buffers.rb_node;
+ struct binder_buffer *buffer;
+ void *kern_ptr;
+
+ kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (kern_ptr < buffer->data)
+ n = n->rb_left;
+ else if (kern_ptr > buffer->data)
+ n = n->rb_right;
+ else {
+ /*
+ * Guard against user threads attempting to
+ * free the buffer twice
+ */
+ if (buffer->free_in_progress) {
+ pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
+ alloc->pid, current->pid, (u64)user_ptr);
+ return NULL;
+ }
+ buffer->free_in_progress = 1;
+ return buffer;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * binder_alloc_buffer_lookup() - get buffer given user ptr
+ * @alloc: binder_alloc for this proc
+ * @user_ptr: User pointer to buffer data
+ *
+ * Validate userspace pointer to buffer data and return buffer corresponding to
+ * that user pointer. Search the rb tree for buffer that matches user data
+ * pointer.
+ *
+ * Return: Pointer to buffer or NULL
+ */
+struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
+{
+ void *page_addr;
+ unsigned long user_page_addr;
+ struct binder_lru_page *page;
+ struct mm_struct *mm = NULL;
+ bool need_mm = false;
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: %s pages %pK-%pK\n", alloc->pid,
+ allocate ? "allocate" : "free", start, end);
+
+ if (end <= start)
+ return 0;
+
+ trace_binder_update_page_range(alloc, allocate, start, end);
+
+ if (allocate == 0)
+ goto free_range;
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ if (!page->page_ptr) {
+ need_mm = true;
+ break;
+ }
+ }
+
+ if (!vma && need_mm)
+ mm = get_task_mm(alloc->tsk);
+
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ vma = alloc->vma;
+ if (vma && mm != alloc->vma_vm_mm) {
+ pr_err("%d: vma mm and task mm mismatch\n",
+ alloc->pid);
+ vma = NULL;
+ }
+ }
+
+ if (!vma && need_mm) {
+ pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
+ alloc->pid);
+ goto err_no_vma;
+ }
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ int ret;
+ bool on_lru;
+ size_t index;
+
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
+
+ if (page->page_ptr) {
+ trace_binder_alloc_lru_start(alloc, index);
+
+ on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
+ WARN_ON(!on_lru);
+
+ trace_binder_alloc_lru_end(alloc, index);
+ continue;
+ }
+
+ if (WARN_ON(!vma))
+ goto err_page_ptr_cleared;
+
+ trace_binder_alloc_page_start(alloc, index);
+ page->page_ptr = alloc_page(GFP_KERNEL |
+ __GFP_HIGHMEM |
+ __GFP_ZERO);
+ if (!page->page_ptr) {
+ pr_err("%d: binder_alloc_buf failed for page at %pK\n",
+ alloc->pid, page_addr);
+ goto err_alloc_page_failed;
+ }
+ page->alloc = alloc;
+ INIT_LIST_HEAD(&page->lru);
+
+ ret = map_kernel_range_noflush((unsigned long)page_addr,
+ PAGE_SIZE, PAGE_KERNEL,
+ &page->page_ptr);
+ flush_cache_vmap((unsigned long)page_addr,
+ (unsigned long)page_addr + PAGE_SIZE);
+ if (ret != 1) {
+ pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
+ alloc->pid, page_addr);
+ goto err_map_kernel_failed;
+ }
+ user_page_addr =
+ (uintptr_t)page_addr + alloc->user_buffer_offset;
+ ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
+ if (ret) {
+ pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
+ alloc->pid, user_page_addr);
+ goto err_vm_insert_page_failed;
+ }
+
+ trace_binder_alloc_page_end(alloc, index);
+ /* vm_insert_page does not seem to increment the refcount */
+ }
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return 0;
+
+free_range:
+ for (page_addr = end - PAGE_SIZE; page_addr >= start;
+ page_addr -= PAGE_SIZE) {
+ bool ret;
+ size_t index;
+
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
+
+ trace_binder_free_lru_start(alloc, index);
+
+ ret = list_lru_add(&binder_alloc_lru, &page->lru);
+ WARN_ON(!ret);
+
+ trace_binder_free_lru_end(alloc, index);
+ continue;
+
+err_vm_insert_page_failed:
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
+err_alloc_page_failed:
+err_page_ptr_cleared:
+ ;
+ }
+err_no_vma:
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return vma ? -ENOMEM : -ESRCH;
+}
+
+struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct rb_node *n = alloc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ struct rb_node *best_fit = NULL;
+ void *has_page_addr;
+ void *end_page_addr;
+ size_t size, data_offsets_size;
+ int ret;
+
+ if (alloc->vma == NULL) {
+ pr_err("%d: binder_alloc_buf, no vma\n",
+ alloc->pid);
+ return ERR_PTR(-ESRCH);
+ }
+
+ data_offsets_size = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+
+ if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid size %zd-%zd\n",
+ alloc->pid, data_size, offsets_size);
+ return ERR_PTR(-EINVAL);
+ }
+ size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
+ if (size < data_offsets_size || size < extra_buffers_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid extra_buffers_size %zd\n",
+ alloc->pid, extra_buffers_size);
+ return ERR_PTR(-EINVAL);
+ }
+ if (is_async &&
+ alloc->free_async_space < size + sizeof(struct binder_buffer)) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd failed, no async space left\n",
+ alloc->pid, size);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ /* Pad 0-size buffers so they get assigned unique addresses */
+ size = max(size, sizeof(void *));
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (size < buffer_size) {
+ best_fit = n;
+ n = n->rb_left;
+ } else if (size > buffer_size)
+ n = n->rb_right;
+ else {
+ best_fit = n;
+ break;
+ }
+ }
+ if (best_fit == NULL) {
+ size_t allocated_buffers = 0;
+ size_t largest_alloc_size = 0;
+ size_t total_alloc_size = 0;
+ size_t free_buffers = 0;
+ size_t largest_free_size = 0;
+ size_t total_free_size = 0;
+
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ allocated_buffers++;
+ total_alloc_size += buffer_size;
+ if (buffer_size > largest_alloc_size)
+ largest_alloc_size = buffer_size;
+ }
+ for (n = rb_first(&alloc->free_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ free_buffers++;
+ total_free_size += buffer_size;
+ if (buffer_size > largest_free_size)
+ largest_free_size = buffer_size;
+ }
+ pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
+ alloc->pid, size);
+ pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
+ total_alloc_size, allocated_buffers, largest_alloc_size,
+ total_free_size, free_buffers, largest_free_size);
+ return ERR_PTR(-ENOSPC);
+ }
+ if (n == NULL) {
+ buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ }
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
+ alloc->pid, size, buffer, buffer_size);
+
+ has_page_addr =
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ WARN_ON(n && buffer_size != size);
+ end_page_addr =
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ if (end_page_addr > has_page_addr)
+ end_page_addr = has_page_addr;
+ ret = binder_update_page_range(alloc, 1,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (buffer_size != size) {
+ struct binder_buffer *new_buffer;
+
+ new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!new_buffer) {
+ pr_err("%s: %d failed to alloc new buffer struct\n",
+ __func__, alloc->pid);
+ goto err_alloc_buf_struct_failed;
+ }
+ new_buffer->data = (u8 *)buffer->data + size;
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(alloc, new_buffer);
+ }
+
+ rb_erase(best_fit, &alloc->free_buffers);
+ buffer->free = 0;
+ buffer->free_in_progress = 0;
+ binder_insert_allocated_buffer_locked(alloc, buffer);
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got %pK\n",
+ alloc->pid, size, buffer);
+ buffer->data_size = data_size;
+ buffer->offsets_size = offsets_size;
+ buffer->async_transaction = is_async;
+ buffer->extra_buffers_size = extra_buffers_size;
+ if (is_async) {
+ alloc->free_async_space -= size + sizeof(struct binder_buffer);
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_alloc_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+ return buffer;
+
+err_alloc_buf_struct_failed:
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ end_page_addr, NULL);
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * binder_alloc_new_buf() - Allocate a new binder buffer
+ * @alloc: binder_alloc for this proc
+ * @data_size: size of user data buffer
+ * @offsets_size: user specified buffer offset
+ * @extra_buffers_size: size of extra space for meta-data (eg, security context)
+ * @is_async: buffer for async transaction
+ *
+ * Allocate a new buffer given the requested sizes. Returns
+ * the kernel version of the buffer pointer. The size allocated
+ * is the sum of the three given sizes (each rounded up to
+ * pointer-sized boundary)
+ *
+ * Return: The allocated buffer or %NULL if error
+ */
+struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
+ extra_buffers_size, is_async);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+ return (void *)((uintptr_t)buffer->data & PAGE_MASK);
+}
+
+static void *prev_buffer_end_page(struct binder_buffer *buffer)
+{
+ return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ struct binder_buffer *prev, *next = NULL;
+ bool to_free = true;
+ BUG_ON(alloc->buffers.next == &buffer->entry);
+ prev = binder_buffer_prev(buffer);
+ BUG_ON(!prev->free);
+ if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
+ to_free = false;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer->data, prev->data);
+ }
+
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ next = binder_buffer_next(buffer);
+ if (buffer_start_page(next) == buffer_start_page(buffer)) {
+ to_free = false;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid,
+ buffer->data,
+ next->data);
+ }
+ }
+
+ if (PAGE_ALIGNED(buffer->data)) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer start %pK is page aligned\n",
+ alloc->pid, buffer->data);
+ to_free = false;
+ }
+
+ if (to_free) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
+ alloc->pid, buffer->data,
+ prev->data, next->data);
+ binder_update_page_range(alloc, 0, buffer_start_page(buffer),
+ buffer_start_page(buffer) + PAGE_SIZE,
+ NULL);
+ }
+ list_del(&buffer->entry);
+ kfree(buffer);
+}
+
+static void binder_free_buf_locked(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ size_t size, buffer_size;
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ size = ALIGN(buffer->data_size, sizeof(void *)) +
+ ALIGN(buffer->offsets_size, sizeof(void *)) +
+ ALIGN(buffer->extra_buffers_size, sizeof(void *));
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
+ alloc->pid, buffer, size, buffer_size);
+
+ BUG_ON(buffer->free);
+ BUG_ON(size > buffer_size);
+ BUG_ON(buffer->transaction != NULL);
+ BUG_ON(buffer->data < alloc->buffer);
+ BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
+
+ if (buffer->async_transaction) {
+ alloc->free_async_space += size + sizeof(struct binder_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_free_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+ NULL);
+
+ rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
+ buffer->free = 1;
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ struct binder_buffer *next = binder_buffer_next(buffer);
+
+ if (next->free) {
+ rb_erase(&next->rb_node, &alloc->free_buffers);
+ binder_delete_free_buffer(alloc, next);
+ }
+ }
+ if (alloc->buffers.next != &buffer->entry) {
+ struct binder_buffer *prev = binder_buffer_prev(buffer);
+
+ if (prev->free) {
+ binder_delete_free_buffer(alloc, buffer);
+ rb_erase(&prev->rb_node, &alloc->free_buffers);
+ buffer = prev;
+ }
+ }
+ binder_insert_free_buffer(alloc, buffer);
+}
+
+/**
+ * binder_alloc_free_buf() - free a binder buffer
+ * @alloc: binder_alloc for this proc
+ * @buffer: kernel pointer to buffer
+ *
+ * Free the buffer allocated via binder_alloc_new_buffer()
+ */
+void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ mutex_lock(&alloc->mutex);
+ binder_free_buf_locked(alloc, buffer);
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_mmap_handler() - map virtual address space for proc
+ * @alloc: alloc structure for this proc
+ * @vma: vma passed to mmap()
+ *
+ * Called by binder_mmap() to initialize the space specified in
+ * vma for allocating binder buffers
+ *
+ * Return:
+ * 0 = success
+ * -EBUSY = address space already mapped
+ * -ENOMEM = failed to map memory to given address space
+ */
+int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma)
+{
+ int ret;
+ struct vm_struct *area;
+ const char *failure_string;
+ struct binder_buffer *buffer;
+
+ mutex_lock(&binder_alloc_mmap_lock);
+ if (alloc->buffer) {
+ ret = -EBUSY;
+ failure_string = "already mapped";
+ goto err_already_mapped;
+ }
+
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ if (area == NULL) {
+ ret = -ENOMEM;
+ failure_string = "get_vm_area";
+ goto err_get_vm_area_failed;
+ }
+ alloc->buffer = area->addr;
+ alloc->user_buffer_offset =
+ vma->vm_start - (uintptr_t)alloc->buffer;
+ mutex_unlock(&binder_alloc_mmap_lock);
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+ if (cache_is_vipt_aliasing()) {
+ while (CACHE_COLOUR(
+ (vma->vm_start ^ (uint32_t)alloc->buffer))) {
+ pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
+ __func__, alloc->pid, vma->vm_start,
+ vma->vm_end, alloc->buffer);
+ vma->vm_start += PAGE_SIZE;
+ }
+ }
+#endif
+ alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
+ ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
+ GFP_KERNEL);
+ if (alloc->pages == NULL) {
+ ret = -ENOMEM;
+ failure_string = "alloc page array";
+ goto err_alloc_pages_failed;
+ }
+ alloc->buffer_size = vma->vm_end - vma->vm_start;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ failure_string = "alloc buffer struct";
+ goto err_alloc_buf_struct_failed;
+ }
+
+ buffer->data = alloc->buffer;
+ list_add(&buffer->entry, &alloc->buffers);
+ buffer->free = 1;
+ binder_insert_free_buffer(alloc, buffer);
+ alloc->free_async_space = alloc->buffer_size / 2;
+ barrier();
+ alloc->vma = vma;
+ alloc->vma_vm_mm = vma->vm_mm;
+
+ return 0;
+
+err_alloc_buf_struct_failed:
+ kfree(alloc->pages);
+ alloc->pages = NULL;
+err_alloc_pages_failed:
+ mutex_lock(&binder_alloc_mmap_lock);
+ vfree(alloc->buffer);
+ alloc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+ mutex_unlock(&binder_alloc_mmap_lock);
+ pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
+ alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+ return ret;
+}
+
+
+void binder_alloc_deferred_release(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int buffers, page_count;
+ struct binder_buffer *buffer;
+
+ BUG_ON(alloc->vma);
+
+ buffers = 0;
+ mutex_lock(&alloc->mutex);
+ while ((n = rb_first(&alloc->allocated_buffers))) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+
+ /* Transaction should already have been freed */
+ BUG_ON(buffer->transaction);
+
+ binder_free_buf_locked(alloc, buffer);
+ buffers++;
+ }
+
+ while (!list_empty(&alloc->buffers)) {
+ buffer = list_first_entry(&alloc->buffers,
+ struct binder_buffer, entry);
+ WARN_ON(!buffer->free);
+
+ list_del(&buffer->entry);
+ WARN_ON_ONCE(!list_empty(&alloc->buffers));
+ kfree(buffer);
+ }
+
+ page_count = 0;
+ if (alloc->pages) {
+ int i;
+
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ void *page_addr;
+ bool on_lru;
+
+ if (!alloc->pages[i].page_ptr)
+ continue;
+
+ on_lru = list_lru_del(&binder_alloc_lru,
+ &alloc->pages[i].lru);
+ page_addr = alloc->buffer + i * PAGE_SIZE;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%s: %d: page %d at %pK %s\n",
+ __func__, alloc->pid, i, page_addr,
+ on_lru ? "on lru" : "active");
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+ __free_page(alloc->pages[i].page_ptr);
+ page_count++;
+ }
+ kfree(alloc->pages);
+ vfree(alloc->buffer);
+ }
+ mutex_unlock(&alloc->mutex);
+
+ binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "%s: %d buffers %d, pages %d\n",
+ __func__, alloc->pid, buffers, page_count);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+ struct binder_buffer *buffer)
+{
+ seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
+ prefix, buffer->debug_id, buffer->data,
+ buffer->data_size, buffer->offsets_size,
+ buffer->extra_buffers_size,
+ buffer->transaction ? "active" : "delivered");
+}
+
+/**
+ * binder_alloc_print_allocated() - print buffer info
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ *
+ * Prints information about every buffer associated with
+ * the binder_alloc state to the given seq_file
+ */
+void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ print_binder_buffer(m, " buffer",
+ rb_entry(n, struct binder_buffer, rb_node));
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_print_pages() - print page usage
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ */
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct binder_lru_page *page;
+ int i;
+ int active = 0;
+ int lru = 0;
+ int free = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
+ mutex_unlock(&alloc->mutex);
+ seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
+}
+
+/**
+ * binder_alloc_get_allocated_count() - return count of buffers
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: count of allocated buffers
+ */
+int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int count = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ count++;
+ mutex_unlock(&alloc->mutex);
+ return count;
+}
+
+
+/**
+ * binder_alloc_vma_close() - invalidate address space
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_vma_close() when releasing address space.
+ * Clears alloc->vma to prevent new incoming transactions from
+ * allocating more buffers.
+ */
+void binder_alloc_vma_close(struct binder_alloc *alloc)
+{
+ WRITE_ONCE(alloc->vma, NULL);
+ WRITE_ONCE(alloc->vma_vm_mm, NULL);
+}
+
+/**
+ * binder_alloc_free_page() - shrinker callback to free pages
+ * @item: item to free
+ * @lock: lock protecting the item
+ * @cb_arg: callback argument
+ *
+ * Called from list_lru_walk() in binder_shrink_scan() to free
+ * up pages when the system is under memory pressure.
+ */
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock,
+ void *cb_arg)
+{
+ struct mm_struct *mm = NULL;
+ struct binder_lru_page *page = container_of(item,
+ struct binder_lru_page,
+ lru);
+ struct binder_alloc *alloc;
+ uintptr_t page_addr;
+ size_t index;
+
+ alloc = page->alloc;
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ if (!page->page_ptr)
+ goto err_page_already_freed;
+
+ index = page - alloc->pages;
+ page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+ if (alloc->vma) {
+ mm = get_task_mm(alloc->tsk);
+ if (!mm)
+ goto err_get_task_mm_failed;
+ if (!down_write_trylock(&mm->mmap_sem))
+ goto err_down_write_mmap_sem_failed;
+
+ trace_binder_unmap_user_start(alloc, index);
+
+ zap_page_range(alloc->vma,
+ page_addr + alloc->user_buffer_offset,
+ PAGE_SIZE);
+
+ trace_binder_unmap_user_end(alloc, index);
+
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+
+ trace_binder_unmap_kernel_start(alloc, index);
+
+ unmap_kernel_range(page_addr, PAGE_SIZE);
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
+
+ trace_binder_unmap_kernel_end(alloc, index);
+
+ list_lru_isolate(lru, item);
+
+ mutex_unlock(&alloc->mutex);
+ return LRU_REMOVED;
+
+err_down_write_mmap_sem_failed:
+ mmput(mm);
+err_get_task_mm_failed:
+err_page_already_freed:
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ return LRU_SKIP;
+}
+
+static unsigned long
+binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret = list_lru_count(&binder_alloc_lru);
+ return ret;
+}
+
+static unsigned long
+binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret;
+
+ ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, sc->nr_to_scan);
+ return ret;
+}
+
+struct shrinker binder_shrinker = {
+ .count_objects = binder_shrink_count,
+ .scan_objects = binder_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+/**
+ * binder_alloc_init() - called by binder_open() for per-proc initialization
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_open() to initialize binder_alloc fields for
+ * new binder proc
+ */
+void binder_alloc_init(struct binder_alloc *alloc)
+{
+ alloc->tsk = current->group_leader;
+ alloc->pid = current->group_leader->pid;
+ mutex_init(&alloc->mutex);
+ INIT_LIST_HEAD(&alloc->buffers);
+}
+
+void binder_alloc_shrinker_init(void)
+{
+ list_lru_init(&binder_alloc_lru);
+ register_shrinker(&binder_shrinker);
+}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
new file mode 100644
index 000000000000..a3a3602c689c
--- /dev/null
+++ b/drivers/android/binder_alloc.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_ALLOC_H
+#define _LINUX_BINDER_ALLOC_H
+
+#include <linux/rbtree.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/list_lru.h>
+
+extern struct list_lru binder_alloc_lru;
+struct binder_transaction;
+
+/**
+ * struct binder_buffer - buffer used for binder transactions
+ * @entry: entry alloc->buffers
+ * @rb_node: node for allocated_buffers/free_buffers rb trees
+ * @free: true if buffer is free
+ * @allow_user_free: describe the second member of struct blah,
+ * @async_transaction: describe the second member of struct blah,
+ * @debug_id: describe the second member of struct blah,
+ * @transaction: describe the second member of struct blah,
+ * @target_node: describe the second member of struct blah,
+ * @data_size: describe the second member of struct blah,
+ * @offsets_size: describe the second member of struct blah,
+ * @extra_buffers_size: describe the second member of struct blah,
+ * @data:i describe the second member of struct blah,
+ *
+ * Bookkeeping structure for binder transaction buffers
+ */
+struct binder_buffer {
+ struct list_head entry; /* free and allocated entries by address */
+ struct rb_node rb_node; /* free entry by size or allocated entry */
+ /* by address */
+ unsigned free:1;
+ unsigned allow_user_free:1;
+ unsigned async_transaction:1;
+ unsigned free_in_progress:1;
+ unsigned debug_id:28;
+
+ struct binder_transaction *transaction;
+
+ struct binder_node *target_node;
+ size_t data_size;
+ size_t offsets_size;
+ size_t extra_buffers_size;
+ void *data;
+};
+
+/**
+ * struct binder_lru_page - page object used for binder shrinker
+ * @page_ptr: pointer to physical page in mmap'd space
+ * @lru: entry in binder_alloc_lru
+ * @alloc: binder_alloc for a proc
+ */
+struct binder_lru_page {
+ struct list_head lru;
+ struct page *page_ptr;
+ struct binder_alloc *alloc;
+};
+
+/**
+ * struct binder_alloc - per-binder proc state for binder allocator
+ * @vma: vm_area_struct passed to mmap_handler
+ * (invarient after mmap)
+ * @tsk: tid for task that called init for this proc
+ * (invariant after init)
+ * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
+ * @buffer: base of per-proc address space mapped via mmap
+ * @user_buffer_offset: offset between user and kernel VAs for buffer
+ * @buffers: list of all buffers for this proc
+ * @free_buffers: rb tree of buffers available for allocation
+ * sorted by size
+ * @allocated_buffers: rb tree of allocated buffers sorted by address
+ * @free_async_space: VA space available for async buffers. This is
+ * initialized at mmap time to 1/2 the full VA space
+ * @pages: array of binder_lru_page
+ * @buffer_size: size of address space specified via mmap
+ * @pid: pid for associated binder_proc (invariant after init)
+ *
+ * Bookkeeping structure for per-proc address space management for binder
+ * buffers. It is normally initialized during binder_init() and binder_mmap()
+ * calls. The address space is used for both user-visible buffers and for
+ * struct binder_buffer objects used to track the user buffers
+ */
+struct binder_alloc {
+ struct mutex mutex;
+ struct task_struct *tsk;
+ struct vm_area_struct *vma;
+ struct mm_struct *vma_vm_mm;
+ void *buffer;
+ ptrdiff_t user_buffer_offset;
+ struct list_head buffers;
+ struct rb_root free_buffers;
+ struct rb_root allocated_buffers;
+ size_t free_async_space;
+ struct binder_lru_page *pages;
+ size_t buffer_size;
+ uint32_t buffer_free;
+ int pid;
+};
+
+#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
+void binder_selftest_alloc(struct binder_alloc *alloc);
+#else
+static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
+#endif
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock, void *cb_arg);
+extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async);
+extern void binder_alloc_init(struct binder_alloc *alloc);
+void binder_alloc_shrinker_init(void);
+extern void binder_alloc_vma_close(struct binder_alloc *alloc);
+extern struct binder_buffer *
+binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+ uintptr_t user_ptr);
+extern void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer);
+extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma);
+extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
+extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
+extern void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc);
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc);
+
+/**
+ * binder_alloc_get_free_async_space() - get free space available for async
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: the bytes remaining in the address-space for async transactions
+ */
+static inline size_t
+binder_alloc_get_free_async_space(struct binder_alloc *alloc)
+{
+ size_t free_async_space;
+
+ mutex_lock(&alloc->mutex);
+ free_async_space = alloc->free_async_space;
+ mutex_unlock(&alloc->mutex);
+ return free_async_space;
+}
+
+/**
+ * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: the offset between kernel and user-space addresses to use for
+ * virtual address conversion
+ */
+static inline ptrdiff_t
+binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
+{
+ /*
+ * user_buffer_offset is constant if vma is set and
+ * undefined if vma is not set. It is possible to
+ * get here with !alloc->vma if the target process
+ * is dying while a transaction is being initiated.
+ * Returning the old value is ok in this case and
+ * the transaction will fail.
+ */
+ return alloc->user_buffer_offset;
+}
+
+#endif /* _LINUX_BINDER_ALLOC_H */
+
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
new file mode 100644
index 000000000000..8bd7bcef967d
--- /dev/null
+++ b/drivers/android/binder_alloc_selftest.c
@@ -0,0 +1,310 @@
+/* binder_alloc_selftest.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm_types.h>
+#include <linux/err.h>
+#include "binder_alloc.h"
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+static bool binder_selftest_run = true;
+static int binder_selftest_failures;
+static DEFINE_MUTEX(binder_selftest_lock);
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+ /**
+ * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+ * the same page as the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 ][ ...
+ * buf1 ]|[ buf2 ][ ...
+ */
+ SAME_PAGE_UNALIGNED = 0,
+ /**
+ * @SAME_PAGE_ALIGNED: When the end of the previous buffer
+ * is not page aligned, the end of this buffer is on the
+ * same page as the end of the previous buffer and is page
+ * aligned. When the previous buffer is page aligned, the
+ * end of this buffer is aligned to the next page boundary.
+ * Examples:
+ * buf1 ][ buf2 ]| ...
+ * buf1 ]|[ buf2 ]| ...
+ */
+ SAME_PAGE_ALIGNED,
+ /**
+ * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 ][ ...
+ */
+ NEXT_PAGE_UNALIGNED,
+ /**
+ * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ]| ...
+ * buf1 ]|[ buf2 | buf2 ]| ...
+ */
+ NEXT_PAGE_ALIGNED,
+ /**
+ * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+ * the page that follows the page after the end of the
+ * previous buffer and is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+ */
+ NEXT_NEXT_UNALIGNED,
+ LOOP_END,
+};
+
+static void pr_err_size_seq(size_t *sizes, int *seq)
+{
+ int i;
+
+ pr_err("alloc sizes: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%zu]", sizes[i]);
+ pr_cont("\n");
+ pr_err("free seq: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%d]", seq[i]);
+ pr_cont("\n");
+}
+
+static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ size_t size)
+{
+ void *page_addr, *end;
+ int page_index;
+
+ end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ page_addr = buffer->data;
+ for (; page_addr < end; page_addr += PAGE_SIZE) {
+ page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ if (!alloc->pages[page_index].page_ptr ||
+ !list_empty(&alloc->pages[page_index].lru)) {
+ pr_err("expect alloc but is %s at page index %d\n",
+ alloc->pages[page_index].page_ptr ?
+ "lru" : "free", page_index);
+ return false;
+ }
+ }
+ return true;
+}
+
+static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+ if (IS_ERR(buffers[i]) ||
+ !check_buffer_pages_allocated(alloc, buffers[i],
+ sizes[i])) {
+ pr_err_size_seq(sizes, seq);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq, size_t end)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+ for (i = 0; i < end / PAGE_SIZE; i++) {
+ /**
+ * Error message on a free page can be false positive
+ * if binder shrinker ran during binder_alloc_free_buf
+ * calls above.
+ */
+ if (list_empty(&alloc->pages[i].lru)) {
+ pr_err_size_seq(sizes, seq);
+ pr_err("expect lru but is %s at page index %d\n",
+ alloc->pages[i].page_ptr ? "alloc" : "free", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_page(struct binder_alloc *alloc)
+{
+ int i;
+ unsigned long count;
+
+ while ((count = list_lru_count(&binder_alloc_lru))) {
+ list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, count);
+ }
+
+ for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+ if (alloc->pages[i].page_ptr) {
+ pr_err("expect free but is %s at page index %d\n",
+ list_empty(&alloc->pages[i].lru) ?
+ "alloc" : "lru", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_alloc_free(struct binder_alloc *alloc,
+ size_t *sizes, int *seq, size_t end)
+{
+ struct binder_buffer *buffers[BUFFER_NUM];
+
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+
+ /* Allocate from lru. */
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ if (list_lru_count(&binder_alloc_lru))
+ pr_err("lru list should be empty but is not\n");
+
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+ binder_selftest_free_page(alloc);
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+ int i;
+
+ for (i = 0; i < index; i++) {
+ if (seq[i] == val)
+ return true;
+ }
+ return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void binder_selftest_free_seq(struct binder_alloc *alloc,
+ size_t *sizes, int *seq,
+ int index, size_t end)
+{
+ int i;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_free(alloc, sizes, seq, end);
+ return;
+ }
+ for (i = 0; i < BUFFER_NUM; i++) {
+ if (is_dup(seq, index, i))
+ continue;
+ seq[index] = i;
+ binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
+ }
+}
+
+static void binder_selftest_alloc_size(struct binder_alloc *alloc,
+ size_t *end_offset)
+{
+ int i;
+ int seq[BUFFER_NUM] = {0};
+ size_t front_sizes[BUFFER_NUM];
+ size_t back_sizes[BUFFER_NUM];
+ size_t last_offset, offset = 0;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ last_offset = offset;
+ offset = end_offset[i];
+ front_sizes[i] = offset - last_offset;
+ back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+ }
+ /*
+ * Buffers share the first or last few pages.
+ * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+ * we need one giant buffer before getting to the last page.
+ */
+ back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+ binder_selftest_free_seq(alloc, front_sizes, seq, 0,
+ end_offset[BUFFER_NUM - 1]);
+ binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
+}
+
+static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
+ size_t *end_offset, int index)
+{
+ int align;
+ size_t end, prev;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_size(alloc, end_offset);
+ return;
+ }
+ prev = index == 0 ? 0 : end_offset[index - 1];
+ end = prev;
+
+ BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+
+ for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+ if (align % 2)
+ end = ALIGN(end, PAGE_SIZE);
+ else
+ end += BUFFER_MIN_SIZE;
+ end_offset[index] = end;
+ binder_selftest_alloc_offset(alloc, end_offset, index + 1);
+ }
+}
+
+/**
+ * binder_selftest_alloc() - Test alloc and free of buffer pages.
+ * @alloc: Pointer to alloc struct.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * correctly allocated, put onto lru when buffers are freed, and
+ * are freed when binder_alloc_free_page is called.
+ */
+void binder_selftest_alloc(struct binder_alloc *alloc)
+{
+ size_t end_offset[BUFFER_NUM];
+
+ if (!binder_selftest_run)
+ return;
+ mutex_lock(&binder_selftest_lock);
+ if (!binder_selftest_run || !alloc->vma)
+ goto done;
+ pr_info("STARTED\n");
+ binder_selftest_alloc_offset(alloc, end_offset, 0);
+ binder_selftest_run = false;
+ if (binder_selftest_failures > 0)
+ pr_info("%d tests FAILED\n", binder_selftest_failures);
+ else
+ pr_info("PASSED\n");
+
+done:
+ mutex_unlock(&binder_selftest_lock);
+}
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 7f20f3dc8369..76e3b9c8a8a2 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -23,7 +23,8 @@
struct binder_buffer;
struct binder_node;
struct binder_proc;
-struct binder_ref;
+struct binder_alloc;
+struct binder_ref_data;
struct binder_thread;
struct binder_transaction;
@@ -146,8 +147,8 @@ TRACE_EVENT(binder_transaction_received,
TRACE_EVENT(binder_transaction_node_to_ref,
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
- struct binder_ref *ref),
- TP_ARGS(t, node, ref),
+ struct binder_ref_data *rdata),
+ TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -160,8 +161,8 @@ TRACE_EVENT(binder_transaction_node_to_ref,
__entry->debug_id = t->debug_id;
__entry->node_debug_id = node->debug_id;
__entry->node_ptr = node->ptr;
- __entry->ref_debug_id = ref->debug_id;
- __entry->ref_desc = ref->desc;
+ __entry->ref_debug_id = rdata->debug_id;
+ __entry->ref_desc = rdata->desc;
),
TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
__entry->debug_id, __entry->node_debug_id,
@@ -170,8 +171,9 @@ TRACE_EVENT(binder_transaction_node_to_ref,
);
TRACE_EVENT(binder_transaction_ref_to_node,
- TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
- TP_ARGS(t, ref),
+ TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+ struct binder_ref_data *rdata),
+ TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -182,10 +184,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->ref_debug_id = ref->debug_id;
- __entry->ref_desc = ref->desc;
- __entry->node_debug_id = ref->node->debug_id;
- __entry->node_ptr = ref->node->ptr;
+ __entry->ref_debug_id = rdata->debug_id;
+ __entry->ref_desc = rdata->desc;
+ __entry->node_debug_id = node->debug_id;
+ __entry->node_ptr = node->ptr;
),
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
__entry->debug_id, __entry->node_debug_id,
@@ -194,9 +196,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
);
TRACE_EVENT(binder_transaction_ref_to_ref,
- TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
- struct binder_ref *dest_ref),
- TP_ARGS(t, src_ref, dest_ref),
+ TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+ struct binder_ref_data *src_ref,
+ struct binder_ref_data *dest_ref),
+ TP_ARGS(t, node, src_ref, dest_ref),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -208,7 +211,7 @@ TRACE_EVENT(binder_transaction_ref_to_ref,
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->node_debug_id = src_ref->node->debug_id;
+ __entry->node_debug_id = node->debug_id;
__entry->src_ref_debug_id = src_ref->debug_id;
__entry->src_ref_desc = src_ref->desc;
__entry->dest_ref_debug_id = dest_ref->debug_id;
@@ -268,9 +271,9 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
TP_ARGS(buffer));
TRACE_EVENT(binder_update_page_range,
- TP_PROTO(struct binder_proc *proc, bool allocate,
+ TP_PROTO(struct binder_alloc *alloc, bool allocate,
void *start, void *end),
- TP_ARGS(proc, allocate, start, end),
+ TP_ARGS(alloc, allocate, start, end),
TP_STRUCT__entry(
__field(int, proc)
__field(bool, allocate)
@@ -278,9 +281,9 @@ TRACE_EVENT(binder_update_page_range,
__field(size_t, size)
),
TP_fast_assign(
- __entry->proc = proc->pid;
+ __entry->proc = alloc->pid;
__entry->allocate = allocate;
- __entry->offset = start - proc->buffer;
+ __entry->offset = start - alloc->buffer;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
@@ -288,6 +291,61 @@ TRACE_EVENT(binder_update_page_range,
__entry->offset, __entry->size)
);
+DECLARE_EVENT_CLASS(binder_lru_page_class,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index),
+ TP_STRUCT__entry(
+ __field(int, proc)
+ __field(size_t, page_index)
+ ),
+ TP_fast_assign(
+ __entry->proc = alloc->pid;
+ __entry->page_index = page_index;
+ ),
+ TP_printk("proc=%d page_index=%zu",
+ __entry->proc, __entry->page_index)
+);
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
TRACE_EVENT(binder_command,
TP_PROTO(uint32_t cmd),
TP_ARGS(cmd),
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 948fc86980a1..488c93724220 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -153,6 +153,16 @@ config AHCI_CEVA
If unsure, say N.
+config AHCI_MTK
+ tristate "MediaTek AHCI SATA support"
+ depends on ARCH_MEDIATEK
+ select MFD_SYSCON
+ help
+ This option enables support for the MediaTek SoC's
+ onboard AHCI SATA controller.
+
+ If unsure, say N.
+
config AHCI_MVEBU
tristate "Marvell EBU AHCI SATA support"
depends on ARCH_MVEBU
@@ -215,7 +225,7 @@ config SATA_FSL
config SATA_GEMINI
tristate "Gemini SATA bridge support"
- depends on PATA_FTIDE010
+ depends on ARCH_GEMINI || COMPILE_TEST
default ARCH_GEMINI
help
This enabled support for the FTIDE010 to SATA bridge
@@ -613,7 +623,7 @@ config PATA_FTIDE010
tristate "Faraday Technology FTIDE010 PATA support"
depends on OF
depends on ARM
- default ARCH_GEMINI
+ depends on SATA_GEMINI
help
This option enables support for the Faraday FTIDE010
PATA controller found in the Cortina Gemini SoCs.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index a26ef5a93919..ff9cd2e37458 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_AHCI_CEVA) += ahci_ceva.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_DM816) += ahci_dm816.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_MTK) += ahci_mtk.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_MVEBU) += ahci_mvebu.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_OCTEON) += ahci_octeon.o
obj-$(CONFIG_AHCI_SUNXI) += ahci_sunxi.o libahci.o libahci_platform.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5a5fd0b404eb..cb9b0e9090e3 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1469,7 +1469,14 @@ static void ahci_remap_check(struct pci_dev *pdev, int bar,
return;
dev_warn(&pdev->dev, "Found %d remapped NVMe devices.\n", count);
- dev_warn(&pdev->dev, "Switch your BIOS from RAID to AHCI mode to use them.\n");
+ dev_warn(&pdev->dev,
+ "Switch your BIOS from RAID to AHCI mode to use them.\n");
+
+ /*
+ * Don't rely on the msi-x capability in the remap case,
+ * share the legacy interrupt across ahci and remapped devices.
+ */
+ hpriv->flags |= AHCI_HFLAG_NO_MSI;
}
static int ahci_get_irq_vector(struct ata_host *host, int port)
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 1a50cd3b4233..9b34dff64536 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -216,12 +216,16 @@ static int ahci_da850_probe(struct platform_device *pdev)
return rc;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res)
+ if (!res) {
+ rc = -ENODEV;
goto disable_resources;
+ }
pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res));
- if (!pwrdn_reg)
+ if (!pwrdn_reg) {
+ rc = -ENOMEM;
goto disable_resources;
+ }
da850_sata_init(dev, pwrdn_reg, hpriv->mmio, mpy);
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
new file mode 100644
index 000000000000..80854f71559a
--- /dev/null
+++ b/drivers/ata/ahci_mtk.c
@@ -0,0 +1,196 @@
+/*
+ * MeidaTek AHCI SATA driver
+ *
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/kernel.h>
+#include <linux/libata.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "ahci.h"
+
+#define DRV_NAME "ahci"
+
+#define SYS_CFG 0x14
+#define SYS_CFG_SATA_MSK GENMASK(31, 30)
+#define SYS_CFG_SATA_EN BIT(31)
+
+struct mtk_ahci_plat {
+ struct regmap *mode;
+ struct reset_control *axi_rst;
+ struct reset_control *sw_rst;
+ struct reset_control *reg_rst;
+};
+
+static const struct ata_port_info ahci_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_platform_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+};
+
+static int mtk_ahci_platform_resets(struct ahci_host_priv *hpriv,
+ struct device *dev)
+{
+ struct mtk_ahci_plat *plat = hpriv->plat_data;
+ int err;
+
+ /* reset AXI bus and PHY part */
+ plat->axi_rst = devm_reset_control_get_optional_exclusive(dev, "axi");
+ if (PTR_ERR(plat->axi_rst) == -EPROBE_DEFER)
+ return PTR_ERR(plat->axi_rst);
+
+ plat->sw_rst = devm_reset_control_get_optional_exclusive(dev, "sw");
+ if (PTR_ERR(plat->sw_rst) == -EPROBE_DEFER)
+ return PTR_ERR(plat->sw_rst);
+
+ plat->reg_rst = devm_reset_control_get_optional_exclusive(dev, "reg");
+ if (PTR_ERR(plat->reg_rst) == -EPROBE_DEFER)
+ return PTR_ERR(plat->reg_rst);
+
+ err = reset_control_assert(plat->axi_rst);
+ if (err) {
+ dev_err(dev, "failed to assert AXI bus\n");
+ return err;
+ }
+
+ err = reset_control_assert(plat->sw_rst);
+ if (err) {
+ dev_err(dev, "failed to assert PHY digital part\n");
+ return err;
+ }
+
+ err = reset_control_assert(plat->reg_rst);
+ if (err) {
+ dev_err(dev, "failed to assert PHY register part\n");
+ return err;
+ }
+
+ err = reset_control_deassert(plat->reg_rst);
+ if (err) {
+ dev_err(dev, "failed to deassert PHY register part\n");
+ return err;
+ }
+
+ err = reset_control_deassert(plat->sw_rst);
+ if (err) {
+ dev_err(dev, "failed to deassert PHY digital part\n");
+ return err;
+ }
+
+ err = reset_control_deassert(plat->axi_rst);
+ if (err) {
+ dev_err(dev, "failed to deassert AXI bus\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int mtk_ahci_parse_property(struct ahci_host_priv *hpriv,
+ struct device *dev)
+{
+ struct mtk_ahci_plat *plat = hpriv->plat_data;
+ struct device_node *np = dev->of_node;
+
+ /* enable SATA function if needed */
+ if (of_find_property(np, "mediatek,phy-mode", NULL)) {
+ plat->mode = syscon_regmap_lookup_by_phandle(
+ np, "mediatek,phy-mode");
+ if (IS_ERR(plat->mode)) {
+ dev_err(dev, "missing phy-mode phandle\n");
+ return PTR_ERR(plat->mode);
+ }
+
+ regmap_update_bits(plat->mode, SYS_CFG, SYS_CFG_SATA_MSK,
+ SYS_CFG_SATA_EN);
+ }
+
+ of_property_read_u32(np, "ports-implemented", &hpriv->force_port_map);
+
+ return 0;
+}
+
+static int mtk_ahci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_ahci_plat *plat;
+ struct ahci_host_priv *hpriv;
+ int err;
+
+ plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ hpriv->plat_data = plat;
+
+ err = mtk_ahci_parse_property(hpriv, dev);
+ if (err)
+ return err;
+
+ err = mtk_ahci_platform_resets(hpriv, dev);
+ if (err)
+ return err;
+
+ err = ahci_platform_enable_resources(hpriv);
+ if (err)
+ return err;
+
+ err = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
+ &ahci_platform_sht);
+ if (err)
+ goto disable_resources;
+
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
+ ahci_platform_resume);
+
+static const struct of_device_id ahci_of_match[] = {
+ { .compatible = "mediatek,mtk-ahci", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ahci_of_match);
+
+static struct platform_driver mtk_ahci_driver = {
+ .probe = mtk_ahci_probe,
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = ahci_of_match,
+ .pm = &ahci_pm_ops,
+ },
+};
+module_platform_driver(mtk_ahci_driver);
+
+MODULE_DESCRIPTION("MeidaTek SATA AHCI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 62a04c8fb5c9..99f9a895a459 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -93,6 +93,7 @@ MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
static struct platform_driver ahci_driver = {
.probe = ahci_probe,
.remove = ata_platform_remove_one,
+ .shutdown = ahci_platform_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index cd2eab6aa92e..a270a1173c8c 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -602,6 +602,40 @@ static void ahci_host_stop(struct ata_host *host)
ahci_platform_disable_resources(hpriv);
}
+/**
+ * ahci_platform_shutdown - Disable interrupts and stop DMA for host ports
+ * @dev: platform device pointer for the host
+ *
+ * This function is called during system shutdown and performs the minimal
+ * deconfiguration required to ensure that an ahci_platform host cannot
+ * corrupt or otherwise interfere with a new kernel being started with kexec.
+ */
+void ahci_platform_shutdown(struct platform_device *pdev)
+{
+ struct ata_host *host = platform_get_drvdata(pdev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ int i;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ /* Disable port interrupts */
+ if (ap->ops->freeze)
+ ap->ops->freeze(ap);
+
+ /* Stop the port DMA engines */
+ if (ap->ops->port_stop)
+ ap->ops->port_stop(ap);
+ }
+
+ /* Disable and clear host interrupts */
+ writel(readl(mmio + HOST_CTL) & ~HOST_IRQ_EN, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ writel(GENMASK(host->n_ports, 0), mmio + HOST_IRQ_STAT);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_shutdown);
+
#ifdef CONFIG_PM_SLEEP
/**
* ahci_platform_suspend_host - Suspend an ahci-platform host
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 8453f9a4682f..1945a8ea2099 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2083,7 +2083,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
retry:
ata_tf_init(dev, &tf);
if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
- !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) {
+ !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
tf.command = ATA_CMD_READ_LOG_DMA_EXT;
tf.protocol = ATA_PROT_DMA;
dma = true;
@@ -2102,8 +2102,8 @@ retry:
buf, sectors * ATA_SECT_SIZE, 0);
if (err_mask && dma) {
- dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG;
- ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n");
+ dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
+ ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
goto retry;
}
@@ -2411,6 +2411,9 @@ static void ata_dev_config_trusted(struct ata_device *dev)
u64 trusted_cap;
unsigned int err;
+ if (!ata_id_has_trusted(dev->id))
+ return;
+
if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
ata_dev_warn(dev,
"Security Log not supported\n");
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index b70bcf6d2914..e4effef0c83f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -645,12 +645,11 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
* completions are honored. A scmd is determined to have
* timed out iff its associated qc is active and not failed.
*/
+ spin_lock_irqsave(ap->lock, flags);
if (ap->ops->error_handler) {
struct scsi_cmnd *scmd, *tmp;
int nr_timedout = 0;
- spin_lock_irqsave(ap->lock, flags);
-
/* This must occur under the ap->lock as we don't want
a polled recovery to race the real interrupt handler
@@ -700,12 +699,11 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
if (nr_timedout)
__ata_port_freeze(ap);
- spin_unlock_irqrestore(ap->lock, flags);
/* initialize eh_tries */
ap->eh_tries = ATA_EH_MAX_TRIES;
- } else
- spin_unlock_wait(ap->lock);
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
}
EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
@@ -1434,7 +1432,7 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
/**
* ata_eh_done - EH action complete
-* @ap: target ATA port
+ * @link: ATA link for which EH actions are complete
* @dev: target ATA dev for per-dev action (can be NULL)
* @action: action just completed
*
@@ -1576,7 +1574,7 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
/**
* ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
- * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
+ * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
* @cmd: scsi command for which the sense code should be set
*
* Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
@@ -4175,7 +4173,6 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
- int rc = 0;
/* are we resuming? */
spin_lock_irqsave(ap->lock, flags);
@@ -4202,7 +4199,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
ata_acpi_set_state(ap, ap->pm_mesg);
if (ap->ops->port_resume)
- rc = ap->ops->port_resume(ap);
+ ap->ops->port_resume(ap);
/* tell ACPI that we're resuming */
ata_acpi_on_resume(ap);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d462c5a3a7ef..44ba292f2cd7 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3030,10 +3030,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
{
if (!sata_pmp_attached(ap)) {
- if (likely(devno < ata_link_max_devices(&ap->link)))
+ if (likely(devno >= 0 &&
+ devno < ata_link_max_devices(&ap->link)))
return &ap->link.device[devno];
} else {
- if (likely(devno < ap->nr_pmp_links))
+ if (likely(devno >= 0 &&
+ devno < ap->nr_pmp_links))
return &ap->pmp_link[devno].device[0];
}
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 8a01d09ac4db..23a62e4015d0 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -34,7 +34,7 @@ struct zpodd {
static int eject_tray(struct ata_device *dev)
{
struct ata_taskfile tf;
- const char cdb[] = { GPCMD_START_STOP_UNIT,
+ static const char cdb[] = { GPCMD_START_STOP_UNIT,
0, 0, 0,
0x02, /* LoEj */
0, 0, 0, 0, 0, 0, 0,
@@ -55,7 +55,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
unsigned int ret;
struct rm_feature_desc *desc = (void *)(buf + 8);
struct ata_taskfile tf;
- char cdb[] = { GPCMD_GET_CONFIGURATION,
+ static const char cdb[] = { GPCMD_GET_CONFIGURATION,
2, /* only 1 feature descriptor requested */
0, 3, /* 3, removable medium feature */
0, 0, 0,/* reserved */
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 8d4d959a821c..8706533db57b 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -616,6 +616,7 @@ static const struct pci_device_id amd[] = {
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), 9 },
{ },
};
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 6c15a554efbe..dc1255294628 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -289,6 +289,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
static const struct pci_device_id cs5536[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), },
{ },
};
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 1ba03d6df951..d3d851b014a3 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -840,7 +840,6 @@ static int octeon_cf_probe(struct platform_device *pdev)
struct property *reg_prop;
int n_addr, n_size, reg_len;
struct device_node *node;
- const void *prop;
void __iomem *cs0;
void __iomem *cs1 = NULL;
struct ata_host *host;
@@ -850,7 +849,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
void __iomem *base;
struct octeon_cf_port *cf_port;
int rv = -ENOMEM;
-
+ u32 bus_width;
node = pdev->dev.of_node;
if (node == NULL)
@@ -860,11 +859,10 @@ static int octeon_cf_probe(struct platform_device *pdev)
if (!cf_port)
return -ENOMEM;
- cf_port->is_true_ide = (of_find_property(node, "cavium,true-ide", NULL) != NULL);
+ cf_port->is_true_ide = of_property_read_bool(node, "cavium,true-ide");
- prop = of_get_property(node, "cavium,bus-width", NULL);
- if (prop)
- is_16bit = (be32_to_cpup(prop) == 16);
+ if (of_property_read_u32(node, "cavium,bus-width", &bus_width) == 0)
+ is_16bit = (bus_width == 16);
else
is_16bit = false;
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index 8c704523bae7..46950e0267e0 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -15,6 +15,7 @@
#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/pinctrl/consumer.h>
#include "sata_gemini.h"
#define DRV_NAME "gemini_sata_bridge"
@@ -43,17 +44,6 @@ struct sata_gemini {
struct clk *sata1_pclk;
};
-/* Global IDE PAD Skew Control Register */
-#define GEMINI_GLOBAL_IDE_SKEW_CTRL 0x18
-#define GEMINI_IDE1_HOST_STROBE_DELAY_SHIFT 28
-#define GEMINI_IDE1_DEVICE_STROBE_DELAY_SHIFT 24
-#define GEMINI_IDE1_OUTPUT_IO_SKEW_SHIFT 20
-#define GEMINI_IDE1_INPUT_IO_SKEW_SHIFT 16
-#define GEMINI_IDE0_HOST_STROBE_DELAY_SHIFT 12
-#define GEMINI_IDE0_DEVICE_STROBE_DELAY_SHIFT 8
-#define GEMINI_IDE0_OUTPUT_IO_SKEW_SHIFT 4
-#define GEMINI_IDE0_INPUT_IO_SKEW_SHIFT 0
-
/* Miscellaneous Control Register */
#define GEMINI_GLOBAL_MISC_CTRL 0x30
/*
@@ -91,8 +81,6 @@ struct sata_gemini {
#define GEMINI_IDE_IOMUX_MODE2 (2 << 24)
#define GEMINI_IDE_IOMUX_MODE3 (3 << 24)
#define GEMINI_IDE_IOMUX_SHIFT (24)
-#define GEMINI_IDE_PADS_ENABLE BIT(4)
-#define GEMINI_PFLASH_PADS_DISABLE BIT(1)
/*
* Registers directly controlling the PATA<->SATA adapters
@@ -274,14 +262,14 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg)
return ret;
}
- sg->sata0_reset = devm_reset_control_get(dev, "sata0");
+ sg->sata0_reset = devm_reset_control_get_exclusive(dev, "sata0");
if (IS_ERR(sg->sata0_reset)) {
dev_err(dev, "no SATA0 reset controller\n");
clk_disable_unprepare(sg->sata1_pclk);
clk_disable_unprepare(sg->sata0_pclk);
return PTR_ERR(sg->sata0_reset);
}
- sg->sata1_reset = devm_reset_control_get(dev, "sata1");
+ sg->sata1_reset = devm_reset_control_get_exclusive(dev, "sata1");
if (IS_ERR(sg->sata1_reset)) {
dev_err(dev, "no SATA1 reset controller\n");
clk_disable_unprepare(sg->sata1_pclk);
@@ -300,17 +288,39 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg)
return 0;
}
+static int gemini_setup_ide_pins(struct device *dev)
+{
+ struct pinctrl *p;
+ struct pinctrl_state *ide_state;
+ int ret;
+
+ p = devm_pinctrl_get(dev);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ ide_state = pinctrl_lookup_state(p, "ide");
+ if (IS_ERR(ide_state))
+ return PTR_ERR(ide_state);
+
+ ret = pinctrl_select_state(p, ide_state);
+ if (ret) {
+ dev_err(dev, "could not select IDE state\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int gemini_sata_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct sata_gemini *sg;
- static struct regmap *map;
+ struct regmap *map;
struct resource *res;
enum gemini_muxmode muxmode;
u32 gmode;
u32 gmask;
- u32 val;
int ret;
sg = devm_kzalloc(dev, sizeof(*sg), GFP_KERNEL);
@@ -362,16 +372,6 @@ static int gemini_sata_probe(struct platform_device *pdev)
gmask = GEMINI_IDE_IOMUX_MASK;
gmode = (muxmode << GEMINI_IDE_IOMUX_SHIFT);
- /*
- * If we mux out the IDE, parallel flash must be disabled.
- * SATA0 and SATA1 have dedicated pins and may coexist with
- * parallel flash.
- */
- if (sg->ide_pins)
- gmode |= GEMINI_IDE_PADS_ENABLE | GEMINI_PFLASH_PADS_DISABLE;
- else
- gmask |= GEMINI_IDE_PADS_ENABLE;
-
ret = regmap_update_bits(map, GEMINI_GLOBAL_MISC_CTRL, gmask, gmode);
if (ret) {
dev_err(dev, "unable to set up IDE muxing\n");
@@ -379,14 +379,15 @@ static int gemini_sata_probe(struct platform_device *pdev)
goto out_unprep_clk;
}
- /* FIXME: add more elaborate IDE skew control handling */
+ /*
+ * Route out the IDE pins if desired.
+ * This is done by looking up a special pin control state called
+ * "ide" that will route out the IDE pins.
+ */
if (sg->ide_pins) {
- ret = regmap_read(map, GEMINI_GLOBAL_IDE_SKEW_CTRL, &val);
- if (ret) {
- dev_err(dev, "cannot read IDE skew control register\n");
+ ret = gemini_setup_ide_pins(dev);
+ if (ret)
return ret;
- }
- dev_info(dev, "IDE skew control: %08x\n", val);
}
dev_info(dev, "set up the Gemini IDE/SATA nexus\n");
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index ee9844758736..537d11869069 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -858,6 +858,14 @@ static const struct of_device_id sata_rcar_match[] = {
.compatible = "renesas,sata-r8a7795",
.data = (void *)RCAR_GEN2_SATA
},
+ {
+ .compatible = "renesas,rcar-gen2-sata",
+ .data = (void *)RCAR_GEN2_SATA
+ },
+ {
+ .compatible = "renesas,rcar-gen3-sata",
+ .data = (void *)RCAR_GEN2_SATA
+ },
{ },
};
MODULE_DEVICE_TABLE(of, sata_rcar_match);
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 0fd6ac7e57ba..a9d692c6c182 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -339,7 +339,7 @@ static int k2_sata_show_info(struct seq_file *m, struct Scsi_Host *shost)
if (!reg)
continue;
if (index == *reg) {
- seq_printf(m, "devspec: %s\n", np->full_name);
+ seq_printf(m, "devspec: %pOF\n", np);
break;
}
}
diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
index 1fd25e872ece..8d98130ecd40 100644
--- a/drivers/atm/adummy.c
+++ b/drivers/atm/adummy.c
@@ -71,7 +71,7 @@ static struct attribute *adummy_attrs[] = {
NULL
};
-static struct attribute_group adummy_group_attrs = {
+static const struct attribute_group adummy_group_attrs = {
.name = NULL, /* We want them in dev's root folder */
.attrs = adummy_attrs
};
@@ -130,7 +130,7 @@ adummy_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
return 0;
}
-static struct atmdev_ops adummy_ops =
+static const struct atmdev_ops adummy_ops =
{
.open = adummy_open,
.close = adummy_close,
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 906705e5f776..acf16c323e38 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -2374,7 +2374,7 @@ MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
/********** module entry **********/
-static struct pci_device_id amb_pci_tbl[] = {
+static const struct pci_device_id amb_pci_tbl[] = {
{ PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR), 0 },
{ PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD), 0 },
{ 0, }
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index 56fa16c85ebf..afebeb1c3e1e 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -342,7 +342,7 @@ static struct atmdev_ops atmtcp_v_dev_ops = {
*/
-static struct atmdev_ops atmtcp_c_dev_ops = {
+static const struct atmdev_ops atmtcp_c_dev_ops = {
.close = atmtcp_c_close,
.send = atmtcp_c_send
};
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index b042ec458544..ce47eb17901d 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -2292,7 +2292,7 @@ err_disable:
}
-static struct pci_device_id eni_pci_tbl[] = {
+static const struct pci_device_id eni_pci_tbl[] = {
{ PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_FPGA), 0 /* FPGA */ },
{ PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_ASIC), 1 /* ASIC */ },
{ 0, }
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 22dcab952a24..6b6368a56526 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -2030,7 +2030,7 @@ static void firestream_remove_one(struct pci_dev *pdev)
func_exit ();
}
-static struct pci_device_id firestream_pci_tbl[] = {
+static const struct pci_device_id firestream_pci_tbl[] = {
{ PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS50), FS_IS50},
{ PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS155), FS_IS155},
{ 0, }
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f0433adcd8fc..f8b7e86907cc 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2757,7 +2757,7 @@ static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
}
-static struct pci_device_id fore200e_pca_tbl[] = {
+static const struct pci_device_id fore200e_pca_tbl[] = {
{ PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
0, 0, (unsigned long) &fore200e_bus[0] },
{ 0, }
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 37ee21c5a5ca..e58538c29377 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -161,7 +161,7 @@ static unsigned int clocktab[] = {
CLK_LOW
};
-static struct atmdev_ops he_ops =
+static const struct atmdev_ops he_ops =
{
.open = he_open,
.close = he_close,
@@ -2851,7 +2851,7 @@ MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
module_param(sdh, bool, 0);
MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
-static struct pci_device_id he_pci_tbl[] = {
+static const struct pci_device_id he_pci_tbl[] = {
{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
{ 0, }
};
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 0f18480b33b5..7e76b35f422c 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2867,7 +2867,7 @@ MODULE_PARM_DESC(max_tx_size, "maximum size of TX AAL5 frames");
MODULE_PARM_DESC(max_rx_size, "maximum size of RX AAL5 frames");
MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
-static struct pci_device_id hrz_pci_tbl[] = {
+static const struct pci_device_id hrz_pci_tbl[] = {
{ PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_HORIZON, PCI_ANY_ID, PCI_ANY_ID,
0, 0, 0 },
{ 0, }
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 60bacba03d17..47f3c4ae0594 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -134,7 +134,7 @@ static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos,
static void idt77252_softint(struct work_struct *work);
-static struct atmdev_ops idt77252_ops =
+static const struct atmdev_ops idt77252_ops =
{
.dev_close = idt77252_dev_close,
.open = idt77252_open,
@@ -3725,7 +3725,7 @@ err_out_disable_pdev:
return err;
}
-static struct pci_device_id idt77252_pci_tbl[] =
+static const struct pci_device_id idt77252_pci_tbl[] =
{
{ PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77252), 0 },
{ 0, }
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index a4fa6c82261e..fc72b763fdd7 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -3266,7 +3266,7 @@ static void ia_remove_one(struct pci_dev *pdev)
kfree(iadev);
}
-static struct pci_device_id ia_pci_tbl[] = {
+static const struct pci_device_id ia_pci_tbl[] = {
{ PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 1a9bc51284b0..2351dad78ff5 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -2589,7 +2589,7 @@ static int lanai_init_one(struct pci_dev *pci,
return result;
}
-static struct pci_device_id lanai_pci_tbl[] = {
+static const struct pci_device_id lanai_pci_tbl[] = {
{ PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAI2) },
{ PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAIHB) },
{ 0, } /* terminal entry */
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index d879f3bca107..a9702836cbae 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -154,7 +154,7 @@ static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
static struct ns_dev *cards[NS_MAX_CARDS];
static unsigned num_cards;
-static struct atmdev_ops atm_ops = {
+static const struct atmdev_ops atm_ops = {
.open = ns_open,
.close = ns_close,
.ioctl = ns_ioctl,
@@ -253,7 +253,7 @@ static void nicstar_remove_one(struct pci_dev *pcidev)
kfree(card);
}
-static struct pci_device_id nicstar_pci_tbl[] = {
+static const struct pci_device_id nicstar_pci_tbl[] = {
{ PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 },
{0,} /* terminate list */
};
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index c8f2ca6d8b29..0df1a1c80b00 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -611,7 +611,7 @@ static struct attribute *solos_attrs[] = {
NULL
};
-static struct attribute_group solos_attr_group = {
+static const struct attribute_group solos_attr_group = {
.attrs = solos_attrs,
.name = "parameters",
};
@@ -628,7 +628,7 @@ static struct attribute *gpio_attrs[] = {
NULL
};
-static struct attribute_group gpio_attr_group = {
+static const struct attribute_group gpio_attr_group = {
.attrs = gpio_attrs,
.name = "gpio",
};
@@ -1187,7 +1187,7 @@ static int psend(struct atm_vcc *vcc, struct sk_buff *skb)
return 0;
}
-static struct atmdev_ops fpga_ops = {
+static const struct atmdev_ops fpga_ops = {
.open = popen,
.close = pclose,
.ioctl = NULL,
@@ -1476,7 +1476,7 @@ static void fpga_remove(struct pci_dev *dev)
kfree(card);
}
-static struct pci_device_id fpga_pci_tbl[] = {
+static const struct pci_device_id fpga_pci_tbl[] = {
{ 0x10ee, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, }
};
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 292dec18ffb8..1ef67db03c8e 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1613,7 +1613,7 @@ static int zatm_init_one(struct pci_dev *pci_dev,
ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
if (ret < 0)
- goto out_disable;
+ goto out_release;
zatm_dev->pci_dev = pci_dev;
dev->dev_data = zatm_dev;
@@ -1642,7 +1642,7 @@ out_free:
MODULE_LICENSE("GPL");
-static struct pci_device_id zatm_pci_tbl[] = {
+static const struct pci_device_id zatm_pci_tbl[] = {
{ PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER },
{ PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 },
{ 0, }
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 7a8b8fb2f572..df126dcdaf18 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -877,21 +877,21 @@ static void lcd_clear_fast_tilcd(struct charlcd *charlcd)
spin_unlock_irq(&pprt_lock);
}
-static struct charlcd_ops charlcd_serial_ops = {
+static const struct charlcd_ops charlcd_serial_ops = {
.write_cmd = lcd_write_cmd_s,
.write_data = lcd_write_data_s,
.clear_fast = lcd_clear_fast_s,
.backlight = lcd_backlight,
};
-static struct charlcd_ops charlcd_parallel_ops = {
+static const struct charlcd_ops charlcd_parallel_ops = {
.write_cmd = lcd_write_cmd_p8,
.write_data = lcd_write_data_p8,
.clear_fast = lcd_clear_fast_p8,
.backlight = lcd_backlight,
};
-static struct charlcd_ops charlcd_tilcd_ops = {
+static const struct charlcd_ops charlcd_tilcd_ops = {
.write_cmd = lcd_write_cmd_tilcd,
.write_data = lcd_write_data_tilcd,
.clear_fast = lcd_clear_fast_tilcd,
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index d1c33a85059e..41be9ff7d70a 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -41,8 +41,7 @@ static ssize_t cpu_capacity_show(struct device *dev,
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
- return sprintf(buf, "%lu\n",
- topology_get_cpu_scale(NULL, cpu->dev.id));
+ return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id));
}
static ssize_t cpu_capacity_store(struct device *dev,
@@ -96,14 +95,21 @@ subsys_initcall(register_cpu_capacity_sysctl);
static u32 capacity_scale;
static u32 *raw_capacity;
-static bool cap_parsing_failed;
+
+static int __init free_raw_capacity(void)
+{
+ kfree(raw_capacity);
+ raw_capacity = NULL;
+
+ return 0;
+}
void topology_normalize_cpu_scale(void)
{
u64 capacity;
int cpu;
- if (!raw_capacity || cap_parsing_failed)
+ if (!raw_capacity)
return;
pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
@@ -120,16 +126,16 @@ void topology_normalize_cpu_scale(void)
mutex_unlock(&cpu_scale_mutex);
}
-int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
{
- int ret = 1;
+ static bool cap_parsing_failed;
+ int ret;
u32 cpu_capacity;
if (cap_parsing_failed)
- return !ret;
+ return false;
- ret = of_property_read_u32(cpu_node,
- "capacity-dmips-mhz",
+ ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
&cpu_capacity);
if (!ret) {
if (!raw_capacity) {
@@ -139,21 +145,21 @@ int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
if (!raw_capacity) {
pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
cap_parsing_failed = true;
- return 0;
+ return false;
}
}
capacity_scale = max(cpu_capacity, capacity_scale);
raw_capacity[cpu] = cpu_capacity;
- pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
- cpu_node->full_name, raw_capacity[cpu]);
+ pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
+ cpu_node, raw_capacity[cpu]);
} else {
if (raw_capacity) {
- pr_err("cpu_capacity: missing %s raw capacity\n",
- cpu_node->full_name);
+ pr_err("cpu_capacity: missing %pOF raw capacity\n",
+ cpu_node);
pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
}
cap_parsing_failed = true;
- kfree(raw_capacity);
+ free_raw_capacity();
}
return !ret;
@@ -161,7 +167,6 @@ int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
#ifdef CONFIG_CPU_FREQ
static cpumask_var_t cpus_to_visit;
-static bool cap_parsing_done;
static void parsing_done_workfn(struct work_struct *work);
static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
@@ -173,30 +178,31 @@ init_cpu_capacity_callback(struct notifier_block *nb,
struct cpufreq_policy *policy = data;
int cpu;
- if (cap_parsing_failed || cap_parsing_done)
+ if (!raw_capacity)
return 0;
- switch (val) {
- case CPUFREQ_NOTIFY:
- pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
- cpumask_pr_args(policy->related_cpus),
- cpumask_pr_args(cpus_to_visit));
- cpumask_andnot(cpus_to_visit,
- cpus_to_visit,
- policy->related_cpus);
- for_each_cpu(cpu, policy->related_cpus) {
- raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
- policy->cpuinfo.max_freq / 1000UL;
- capacity_scale = max(raw_capacity[cpu], capacity_scale);
- }
- if (cpumask_empty(cpus_to_visit)) {
- topology_normalize_cpu_scale();
- kfree(raw_capacity);
- pr_debug("cpu_capacity: parsing done\n");
- cap_parsing_done = true;
- schedule_work(&parsing_done_work);
- }
+ if (val != CPUFREQ_NOTIFY)
+ return 0;
+
+ pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
+ cpumask_pr_args(policy->related_cpus),
+ cpumask_pr_args(cpus_to_visit));
+
+ cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
+
+ for_each_cpu(cpu, policy->related_cpus) {
+ raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
+ policy->cpuinfo.max_freq / 1000UL;
+ capacity_scale = max(raw_capacity[cpu], capacity_scale);
+ }
+
+ if (cpumask_empty(cpus_to_visit)) {
+ topology_normalize_cpu_scale();
+ free_raw_capacity();
+ pr_debug("cpu_capacity: parsing done\n");
+ schedule_work(&parsing_done_work);
}
+
return 0;
}
@@ -233,11 +239,5 @@ static void parsing_done_workfn(struct work_struct *work)
}
#else
-static int __init free_raw_capacity(void)
-{
- kfree(raw_capacity);
-
- return 0;
-}
core_initcall(free_raw_capacity);
#endif
diff --git a/drivers/base/base.h b/drivers/base/base.h
index e19b1008e5fb..539432a14b5c 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -126,11 +126,6 @@ extern int driver_add_groups(struct device_driver *drv,
extern void driver_remove_groups(struct device_driver *drv,
const struct attribute_group **groups);
-extern int device_add_groups(struct device *dev,
- const struct attribute_group **groups);
-extern void device_remove_groups(struct device *dev,
- const struct attribute_group **groups);
-
extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index e162c9a789ba..22a64fd3309b 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -698,7 +698,7 @@ int bus_add_driver(struct device_driver *drv)
out_unregister:
kobject_put(&priv->kobj);
- kfree(drv->p);
+ /* drv->p is freed in driver_release() */
drv->p = NULL;
out_put_bus:
bus_put(bus);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 755451f684bc..12ebd055724c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1023,12 +1023,144 @@ int device_add_groups(struct device *dev, const struct attribute_group **groups)
{
return sysfs_create_groups(&dev->kobj, groups);
}
+EXPORT_SYMBOL_GPL(device_add_groups);
void device_remove_groups(struct device *dev,
const struct attribute_group **groups)
{
sysfs_remove_groups(&dev->kobj, groups);
}
+EXPORT_SYMBOL_GPL(device_remove_groups);
+
+union device_attr_group_devres {
+ const struct attribute_group *group;
+ const struct attribute_group **groups;
+};
+
+static int devm_attr_group_match(struct device *dev, void *res, void *data)
+{
+ return ((union device_attr_group_devres *)res)->group == data;
+}
+
+static void devm_attr_group_remove(struct device *dev, void *res)
+{
+ union device_attr_group_devres *devres = res;
+ const struct attribute_group *group = devres->group;
+
+ dev_dbg(dev, "%s: removing group %p\n", __func__, group);
+ sysfs_remove_group(&dev->kobj, group);
+}
+
+static void devm_attr_groups_remove(struct device *dev, void *res)
+{
+ union device_attr_group_devres *devres = res;
+ const struct attribute_group **groups = devres->groups;
+
+ dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
+ sysfs_remove_groups(&dev->kobj, groups);
+}
+
+/**
+ * devm_device_add_group - given a device, create a managed attribute group
+ * @dev: The device to create the group for
+ * @grp: The attribute group to create
+ *
+ * This function creates a group for the first time. It will explicitly
+ * warn and error if any of the attribute files being created already exist.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
+{
+ union device_attr_group_devres *devres;
+ int error;
+
+ devres = devres_alloc(devm_attr_group_remove,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ error = sysfs_create_group(&dev->kobj, grp);
+ if (error) {
+ devres_free(devres);
+ return error;
+ }
+
+ devres->group = grp;
+ devres_add(dev, devres);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_device_add_group);
+
+/**
+ * devm_device_remove_group: remove a managed group from a device
+ * @dev: device to remove the group from
+ * @grp: group to remove
+ *
+ * This function removes a group of attributes from a device. The attributes
+ * previously have to have been created for this group, otherwise it will fail.
+ */
+void devm_device_remove_group(struct device *dev,
+ const struct attribute_group *grp)
+{
+ WARN_ON(devres_release(dev, devm_attr_group_remove,
+ devm_attr_group_match,
+ /* cast away const */ (void *)grp));
+}
+EXPORT_SYMBOL_GPL(devm_device_remove_group);
+
+/**
+ * devm_device_add_groups - create a bunch of managed attribute groups
+ * @dev: The device to create the group for
+ * @groups: The attribute groups to create, NULL terminated
+ *
+ * This function creates a bunch of managed attribute groups. If an error
+ * occurs when creating a group, all previously created groups will be
+ * removed, unwinding everything back to the original state when this
+ * function was called. It will explicitly warn and error if any of the
+ * attribute files being created already exist.
+ *
+ * Returns 0 on success or error code from sysfs_create_group on failure.
+ */
+int devm_device_add_groups(struct device *dev,
+ const struct attribute_group **groups)
+{
+ union device_attr_group_devres *devres;
+ int error;
+
+ devres = devres_alloc(devm_attr_groups_remove,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ error = sysfs_create_groups(&dev->kobj, groups);
+ if (error) {
+ devres_free(devres);
+ return error;
+ }
+
+ devres->groups = groups;
+ devres_add(dev, devres);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_device_add_groups);
+
+/**
+ * devm_device_remove_groups - remove a list of managed groups
+ *
+ * @dev: The device for the groups to be removed from
+ * @groups: NULL terminated list of groups to be removed
+ *
+ * If groups is not NULL, remove the specified groups from the device.
+ */
+void devm_device_remove_groups(struct device *dev,
+ const struct attribute_group **groups)
+{
+ WARN_ON(devres_release(dev, devm_attr_groups_remove,
+ devm_attr_group_match,
+ /* cast away const */ (void *)groups));
+}
+EXPORT_SYMBOL_GPL(devm_device_remove_groups);
static int device_add_attrs(struct device *dev)
{
@@ -2664,11 +2796,12 @@ void device_shutdown(void)
pm_runtime_get_noresume(dev);
pm_runtime_barrier(dev);
- if (dev->class && dev->class->shutdown) {
+ if (dev->class && dev->class->shutdown_pre) {
if (initcall_debug)
- dev_info(dev, "shutdown\n");
- dev->class->shutdown(dev);
- } else if (dev->bus && dev->bus->shutdown) {
+ dev_info(dev, "shutdown_pre\n");
+ dev->class->shutdown_pre(dev);
+ }
+ if (dev->bus && dev->bus->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->bus->shutdown(dev);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 4882f06d12df..ad44b40fe284 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -20,6 +20,7 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/init.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/wait.h>
@@ -53,6 +54,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
+static bool initcalls_done;
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
@@ -62,6 +64,26 @@ static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
static bool defer_all_probes;
/*
+ * For initcall_debug, show the deferred probes executed in late_initcall
+ * processing.
+ */
+static void deferred_probe_debug(struct device *dev)
+{
+ ktime_t calltime, delta, rettime;
+ unsigned long long duration;
+
+ printk(KERN_DEBUG "deferred probe %s @ %i\n", dev_name(dev),
+ task_pid_nr(current));
+ calltime = ktime_get();
+ bus_probe_device(dev);
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+ printk(KERN_DEBUG "deferred probe %s returned after %lld usecs\n",
+ dev_name(dev), duration);
+}
+
+/*
* deferred_probe_work_func() - Retry probing devices in the active list.
*/
static void deferred_probe_work_func(struct work_struct *work)
@@ -106,7 +128,10 @@ static void deferred_probe_work_func(struct work_struct *work)
device_pm_unlock();
dev_dbg(dev, "Retrying from deferred list\n");
- bus_probe_device(dev);
+ if (initcall_debug && !initcalls_done)
+ deferred_probe_debug(dev);
+ else
+ bus_probe_device(dev);
mutex_lock(&deferred_probe_mutex);
@@ -215,6 +240,7 @@ static int deferred_probe_initcall(void)
driver_deferred_probe_trigger();
/* Sort as many dependencies as possible before exiting initcalls */
flush_work(&deferred_probe_work);
+ initcalls_done = true;
return 0;
}
late_initcall(deferred_probe_initcall);
@@ -259,6 +285,8 @@ static void driver_bound(struct device *dev)
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_BOUND_DRIVER, dev);
+
+ kobject_uevent(&dev->kobj, KOBJ_BIND);
}
static int driver_sysfs_add(struct device *dev)
@@ -848,6 +876,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_UNBOUND_DRIVER,
dev);
+
+ kobject_uevent(&dev->kobj, KOBJ_UNBIND);
}
}
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 2ae24c28e70c..1c152aed6b82 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de
{
if (dev && dev->dma_mem)
return dev->dma_mem;
- return dma_coherent_default_memory;
+ return NULL;
}
static inline dma_addr_t dma_get_device_base(struct device *dev,
@@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
}
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-/**
- * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
- *
- * @dev: device from which we allocate memory
- * @size: size of requested memory area
- * @dma_handle: This will be filled with the correct dma handle
- * @ret: This pointer will be filled with the virtual address
- * to allocated area.
- *
- * This function should be only called from per-arch dma_alloc_coherent()
- * to support allocation from per-device coherent memory pools.
- *
- * Returns 0 if dma_alloc_coherent should continue with allocating from
- * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
- */
-int dma_alloc_from_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle, void **ret)
+static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
+ ssize_t size, dma_addr_t *dma_handle)
{
- struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
int order = get_order(size);
unsigned long flags;
int pageno;
int dma_memory_map;
+ void *ret;
- if (!mem)
- return 0;
-
- *ret = NULL;
spin_lock_irqsave(&mem->spinlock, flags);
if (unlikely(size > (mem->size << PAGE_SHIFT)))
@@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
goto err;
/*
- * Memory was found in the per-device area.
+ * Memory was found in the coherent area.
*/
- *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
- *ret = mem->virt_base + (pageno << PAGE_SHIFT);
+ *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
+ ret = mem->virt_base + (pageno << PAGE_SHIFT);
dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
spin_unlock_irqrestore(&mem->spinlock, flags);
if (dma_memory_map)
- memset(*ret, 0, size);
+ memset(ret, 0, size);
else
- memset_io(*ret, 0, size);
+ memset_io(ret, 0, size);
- return 1;
+ return ret;
err:
spin_unlock_irqrestore(&mem->spinlock, flags);
+ return NULL;
+}
+
+/**
+ * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
+ * @dev: device from which we allocate memory
+ * @size: size of requested memory area
+ * @dma_handle: This will be filled with the correct dma handle
+ * @ret: This pointer will be filled with the virtual address
+ * to allocated area.
+ *
+ * This function should be only called from per-arch dma_alloc_coherent()
+ * to support allocation from per-device coherent memory pools.
+ *
+ * Returns 0 if dma_alloc_coherent should continue with allocating from
+ * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
+ */
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle, void **ret)
+{
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+ if (!mem)
+ return 0;
+
+ *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
+ if (*ret)
+ return 1;
+
/*
* In the case where the allocation can not be satisfied from the
* per-device area, try to fall back to generic memory if the
@@ -225,25 +235,20 @@ err:
*/
return mem->flags & DMA_MEMORY_EXCLUSIVE;
}
-EXPORT_SYMBOL(dma_alloc_from_coherent);
+EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
-/**
- * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
- * @dev: device from which the memory was allocated
- * @order: the order of pages allocated
- * @vaddr: virtual address of allocated pages
- *
- * This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, releases that memory.
- *
- * Returns 1 if we correctly released the memory, or 0 if
- * dma_release_coherent() should proceed with releasing memory from
- * generic pools.
- */
-int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
+void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
{
- struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+ if (!dma_coherent_default_memory)
+ return NULL;
+
+ return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
+ dma_handle);
+}
+static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
+ int order, void *vaddr)
+{
if (mem && vaddr >= mem->virt_base && vaddr <
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
@@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
}
return 0;
}
-EXPORT_SYMBOL(dma_release_from_coherent);
/**
- * dma_mmap_from_coherent() - try to mmap the memory allocated from
- * per-device coherent memory pool to userspace
+ * dma_release_from_dev_coherent() - free memory to device coherent memory pool
* @dev: device from which the memory was allocated
- * @vma: vm_area for the userspace memory
- * @vaddr: cpu address returned by dma_alloc_from_coherent
- * @size: size of the memory buffer allocated by dma_alloc_from_coherent
- * @ret: result from remap_pfn_range()
+ * @order: the order of pages allocated
+ * @vaddr: virtual address of allocated pages
*
* This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, maps that memory to the provided vma.
+ * coherent memory pool and if so, releases that memory.
*
- * Returns 1 if we correctly mapped the memory, or 0 if the caller should
- * proceed with mapping memory from generic pools.
+ * Returns 1 if we correctly released the memory, or 0 if the caller should
+ * proceed with releasing memory from generic pools.
*/
-int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
- void *vaddr, size_t size, int *ret)
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
{
struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+ return __dma_release_from_coherent(mem, order, vaddr);
+}
+EXPORT_SYMBOL(dma_release_from_dev_coherent);
+
+int dma_release_from_global_coherent(int order, void *vaddr)
+{
+ if (!dma_coherent_default_memory)
+ return 0;
+
+ return __dma_release_from_coherent(dma_coherent_default_memory, order,
+ vaddr);
+}
+
+static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
+ struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
+{
if (mem && vaddr >= mem->virt_base && vaddr + size <=
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
unsigned long off = vma->vm_pgoff;
@@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
}
return 0;
}
-EXPORT_SYMBOL(dma_mmap_from_coherent);
+
+/**
+ * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
+ * @dev: device from which the memory was allocated
+ * @vma: vm_area for the userspace memory
+ * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
+ * @size: size of the memory buffer allocated
+ * @ret: result from remap_pfn_range()
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if the caller should
+ * proceed with mapping memory from generic pools.
+ */
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *vaddr, size_t size, int *ret)
+{
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+ return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
+}
+EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
+
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
+ size_t size, int *ret)
+{
+ if (!dma_coherent_default_memory)
+ return 0;
+
+ return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
+ vaddr, size, ret);
+}
/*
* Support for reserved memory regions defined in device tree
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 5096755d185e..b555ff9dd8fc 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -235,7 +235,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (off < count && user_count <= (count - off)) {
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index b9f907eedbf7..a5fb884a136d 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -7,6 +7,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/module.h>
@@ -30,7 +32,6 @@
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/security.h>
-#include <linux/swait.h>
#include <generated/utsrelease.h>
@@ -112,13 +113,13 @@ static inline long firmware_loading_timeout(void)
* state of the firmware loading.
*/
struct fw_state {
- struct swait_queue_head wq;
+ struct completion completion;
enum fw_status status;
};
static void fw_state_init(struct fw_state *fw_st)
{
- init_swait_queue_head(&fw_st->wq);
+ init_completion(&fw_st->completion);
fw_st->status = FW_STATUS_UNKNOWN;
}
@@ -131,9 +132,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
{
long ret;
- ret = swait_event_interruptible_timeout(fw_st->wq,
- __fw_state_is_done(READ_ONCE(fw_st->status)),
- timeout);
+ ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
return -ENOENT;
if (!ret)
@@ -148,35 +147,34 @@ static void __fw_state_set(struct fw_state *fw_st,
WRITE_ONCE(fw_st->status, status);
if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
- swake_up(&fw_st->wq);
+ complete_all(&fw_st->completion);
}
#define fw_state_start(fw_st) \
__fw_state_set(fw_st, FW_STATUS_LOADING)
#define fw_state_done(fw_st) \
__fw_state_set(fw_st, FW_STATUS_DONE)
+#define fw_state_aborted(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_ABORTED)
#define fw_state_wait(fw_st) \
__fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
-#ifndef CONFIG_FW_LOADER_USER_HELPER
-
-#define fw_state_is_aborted(fw_st) false
-
-#else /* CONFIG_FW_LOADER_USER_HELPER */
-
static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
{
return fw_st->status == status;
}
+#define fw_state_is_aborted(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_ABORTED)
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+
#define fw_state_aborted(fw_st) \
__fw_state_set(fw_st, FW_STATUS_ABORTED)
#define fw_state_is_done(fw_st) \
__fw_state_check(fw_st, FW_STATUS_DONE)
#define fw_state_is_loading(fw_st) \
__fw_state_check(fw_st, FW_STATUS_LOADING)
-#define fw_state_is_aborted(fw_st) \
- __fw_state_check(fw_st, FW_STATUS_ABORTED)
#define fw_state_wait_timeout(fw_st, timeout) \
__fw_state_wait_common(fw_st, timeout)
@@ -335,6 +333,7 @@ static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
return NULL;
}
+/* Returns 1 for batching firmware requests with the same name */
static int fw_lookup_and_allocate_buf(const char *fw_name,
struct firmware_cache *fwc,
struct firmware_buf **buf, void *dbuf,
@@ -348,6 +347,7 @@ static int fw_lookup_and_allocate_buf(const char *fw_name,
kref_get(&tmp->ref);
spin_unlock(&fwc->lock);
*buf = tmp;
+ pr_debug("batched request - sharing the same struct firmware_buf and lookup for multiple requests\n");
return 1;
}
tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
@@ -1089,9 +1089,12 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
mutex_unlock(&fw_lock);
}
- if (fw_state_is_aborted(&buf->fw_st))
- retval = -EAGAIN;
- else if (buf->is_paged_buf && !buf->data)
+ if (fw_state_is_aborted(&buf->fw_st)) {
+ if (retval == -ERESTARTSYS)
+ retval = -EINTR;
+ else
+ retval = -EAGAIN;
+ } else if (buf->is_paged_buf && !buf->data)
retval = -ENOMEM;
device_del(f_dev);
@@ -1200,6 +1203,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
return 1; /* need to load */
}
+/*
+ * Batched requests need only one wake, we need to do this step last due to the
+ * fallback mechanism. The buf is protected with kref_get(), and it won't be
+ * released until the last user calls release_firmware().
+ *
+ * Failed batched requests are possible as well, in such cases we just share
+ * the struct firmware_buf and won't release it until all requests are woken
+ * and have gone through this same path.
+ */
+static void fw_abort_batch_reqs(struct firmware *fw)
+{
+ struct firmware_buf *buf;
+
+ /* Loaded directly? */
+ if (!fw || !fw->priv)
+ return;
+
+ buf = fw->priv;
+ if (!fw_state_is_aborted(&buf->fw_st))
+ fw_state_aborted(&buf->fw_st);
+}
+
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
@@ -1243,6 +1268,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
out:
if (ret < 0) {
+ fw_abort_batch_reqs(fw);
release_firmware(fw);
fw = NULL;
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index c7c4e0325cdb..4e3b61cda520 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -388,6 +388,19 @@ static ssize_t show_phys_device(struct device *dev,
}
#ifdef CONFIG_MEMORY_HOTREMOVE
+static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn,
+ unsigned long nr_pages, int online_type,
+ struct zone *default_zone)
+{
+ struct zone *zone;
+
+ zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
+ if (zone != default_zone) {
+ strcat(buf, " ");
+ strcat(buf, zone->name);
+ }
+}
+
static ssize_t show_valid_zones(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -395,7 +408,7 @@ static ssize_t show_valid_zones(struct device *dev,
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long valid_start_pfn, valid_end_pfn;
- bool append = false;
+ struct zone *default_zone;
int nid;
/*
@@ -418,16 +431,13 @@ static ssize_t show_valid_zones(struct device *dev,
}
nid = pfn_to_nid(start_pfn);
- if (allow_online_pfn_range(nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL)) {
- strcat(buf, default_zone_for_pfn(nid, start_pfn, nr_pages)->name);
- append = true;
- }
+ default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
+ strcat(buf, default_zone->name);
- if (allow_online_pfn_range(nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE)) {
- if (append)
- strcat(buf, " ");
- strcat(buf, NODE_DATA(nid)->node_zones[ZONE_MOVABLE].name);
- }
+ print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL,
+ default_zone);
+ print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE,
+ default_zone);
out:
strcat(buf, "\n");
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 3b8210ebb50e..e8ca5e2cf1e5 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -209,6 +209,34 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
smp_mb__after_atomic();
}
+#ifdef CONFIG_DEBUG_FS
+static void genpd_update_accounting(struct generic_pm_domain *genpd)
+{
+ ktime_t delta, now;
+
+ now = ktime_get();
+ delta = ktime_sub(now, genpd->accounting_time);
+
+ /*
+ * If genpd->status is active, it means we are just
+ * out of off and so update the idle time and vice
+ * versa.
+ */
+ if (genpd->status == GPD_STATE_ACTIVE) {
+ int state_idx = genpd->state_idx;
+
+ genpd->states[state_idx].idle_time =
+ ktime_add(genpd->states[state_idx].idle_time, delta);
+ } else {
+ genpd->on_time = ktime_add(genpd->on_time, delta);
+ }
+
+ genpd->accounting_time = now;
+}
+#else
+static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
+#endif
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -361,6 +389,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
}
genpd->status = GPD_STATE_POWER_OFF;
+ genpd_update_accounting(genpd);
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master);
@@ -413,6 +442,8 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
goto err;
genpd->status = GPD_STATE_ACTIVE;
+ genpd_update_accounting(genpd);
+
return 0;
err:
@@ -1222,8 +1253,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
spin_unlock_irq(&dev->power.lock);
- dev_pm_domain_set(dev, &genpd->domain);
-
return gpd_data;
err_free:
@@ -1237,8 +1266,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
static void genpd_free_dev_data(struct device *dev,
struct generic_pm_domain_data *gpd_data)
{
- dev_pm_domain_set(dev, NULL);
-
spin_lock_irq(&dev->power.lock);
dev->power.subsys_data->domain_data = NULL;
@@ -1275,6 +1302,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (ret)
goto out;
+ dev_pm_domain_set(dev, &genpd->domain);
+
genpd->device_count++;
genpd->max_off_time_changed = true;
@@ -1336,6 +1365,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
if (genpd->detach_dev)
genpd->detach_dev(genpd, dev);
+ dev_pm_domain_set(dev, NULL);
+
list_del_init(&pdd->list_node);
genpd_unlock(genpd);
@@ -1540,6 +1571,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->max_off_time_changed = true;
genpd->provider = NULL;
genpd->has_provider = false;
+ genpd->accounting_time = ktime_get();
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = genpd_runtime_resume;
genpd->domain.ops.prepare = pm_genpd_prepare;
@@ -1743,7 +1775,7 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
mutex_lock(&of_genpd_mutex);
list_add(&cp->link, &of_genpd_providers);
mutex_unlock(&of_genpd_mutex);
- pr_debug("Added domain provider from %s\n", np->full_name);
+ pr_debug("Added domain provider from %pOF\n", np);
return 0;
}
@@ -2149,16 +2181,16 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
err = of_property_read_u32(state_node, "entry-latency-us",
&entry_latency);
if (err) {
- pr_debug(" * %s missing entry-latency-us property\n",
- state_node->full_name);
+ pr_debug(" * %pOF missing entry-latency-us property\n",
+ state_node);
return -EINVAL;
}
err = of_property_read_u32(state_node, "exit-latency-us",
&exit_latency);
if (err) {
- pr_debug(" * %s missing exit-latency-us property\n",
- state_node->full_name);
+ pr_debug(" * %pOF missing exit-latency-us property\n",
+ state_node);
return -EINVAL;
}
@@ -2212,8 +2244,8 @@ int of_genpd_parse_idle_states(struct device_node *dn,
ret = genpd_parse_state(&st[i++], np);
if (ret) {
pr_err
- ("Parsing idle state node %s failed with err %d\n",
- np->full_name, ret);
+ ("Parsing idle state node %pOF failed with err %d\n",
+ np, ret);
of_node_put(np);
kfree(st);
return ret;
@@ -2327,7 +2359,7 @@ exit:
return 0;
}
-static int pm_genpd_summary_show(struct seq_file *s, void *data)
+static int genpd_summary_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd;
int ret = 0;
@@ -2350,21 +2382,187 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data)
return ret;
}
-static int pm_genpd_summary_open(struct inode *inode, struct file *file)
+static int genpd_status_show(struct seq_file *s, void *data)
{
- return single_open(file, pm_genpd_summary_show, NULL);
+ static const char * const status_lookup[] = {
+ [GPD_STATE_ACTIVE] = "on",
+ [GPD_STATE_POWER_OFF] = "off"
+ };
+
+ struct generic_pm_domain *genpd = s->private;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
+ goto exit;
+
+ if (genpd->status == GPD_STATE_POWER_OFF)
+ seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
+ genpd->state_idx);
+ else
+ seq_printf(s, "%s\n", status_lookup[genpd->status]);
+exit:
+ genpd_unlock(genpd);
+ return ret;
}
-static const struct file_operations pm_genpd_summary_fops = {
- .open = pm_genpd_summary_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+static int genpd_sub_domains_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ struct gpd_link *link;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(link, &genpd->master_links, master_node)
+ seq_printf(s, "%s\n", link->slave->name);
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int genpd_idle_states_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ unsigned int i;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ seq_puts(s, "State Time Spent(ms)\n");
+
+ for (i = 0; i < genpd->state_count; i++) {
+ ktime_t delta = 0;
+ s64 msecs;
+
+ if ((genpd->status == GPD_STATE_POWER_OFF) &&
+ (genpd->state_idx == i))
+ delta = ktime_sub(ktime_get(), genpd->accounting_time);
+
+ msecs = ktime_to_ms(
+ ktime_add(genpd->states[i].idle_time, delta));
+ seq_printf(s, "S%-13i %lld\n", i, msecs);
+ }
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int genpd_active_time_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ ktime_t delta = 0;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (genpd->status == GPD_STATE_ACTIVE)
+ delta = ktime_sub(ktime_get(), genpd->accounting_time);
+
+ seq_printf(s, "%lld ms\n", ktime_to_ms(
+ ktime_add(genpd->on_time, delta)));
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int genpd_total_idle_time_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ ktime_t delta = 0, total = 0;
+ unsigned int i;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ for (i = 0; i < genpd->state_count; i++) {
+
+ if ((genpd->status == GPD_STATE_POWER_OFF) &&
+ (genpd->state_idx == i))
+ delta = ktime_sub(ktime_get(), genpd->accounting_time);
+
+ total = ktime_add(total, genpd->states[i].idle_time);
+ }
+ total = ktime_add(total, delta);
+
+ seq_printf(s, "%lld ms\n", ktime_to_ms(total));
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+
+static int genpd_devices_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ struct pm_domain_data *pm_data;
+ const char *kobj_path;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
+ kobj_path = kobject_get_path(&pm_data->dev->kobj,
+ genpd_is_irq_safe(genpd) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (kobj_path == NULL)
+ continue;
+
+ seq_printf(s, "%s\n", kobj_path);
+ kfree(kobj_path);
+ }
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+#define define_genpd_open_function(name) \
+static int genpd_##name##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, genpd_##name##_show, inode->i_private); \
+}
+
+define_genpd_open_function(summary);
+define_genpd_open_function(status);
+define_genpd_open_function(sub_domains);
+define_genpd_open_function(idle_states);
+define_genpd_open_function(active_time);
+define_genpd_open_function(total_idle_time);
+define_genpd_open_function(devices);
+
+#define define_genpd_debugfs_fops(name) \
+static const struct file_operations genpd_##name##_fops = { \
+ .open = genpd_##name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+define_genpd_debugfs_fops(summary);
+define_genpd_debugfs_fops(status);
+define_genpd_debugfs_fops(sub_domains);
+define_genpd_debugfs_fops(idle_states);
+define_genpd_debugfs_fops(active_time);
+define_genpd_debugfs_fops(total_idle_time);
+define_genpd_debugfs_fops(devices);
static int __init pm_genpd_debug_init(void)
{
struct dentry *d;
+ struct generic_pm_domain *genpd;
pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
@@ -2372,10 +2570,29 @@ static int __init pm_genpd_debug_init(void)
return -ENOMEM;
d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
- pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
+ pm_genpd_debugfs_dir, NULL, &genpd_summary_fops);
if (!d)
return -ENOMEM;
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
+ d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir);
+ if (!d)
+ return -ENOMEM;
+
+ debugfs_create_file("current_state", 0444,
+ d, genpd, &genpd_status_fops);
+ debugfs_create_file("sub_domains", 0444,
+ d, genpd, &genpd_sub_domains_fops);
+ debugfs_create_file("idle_states", 0444,
+ d, genpd, &genpd_idle_states_fops);
+ debugfs_create_file("active_time", 0444,
+ d, genpd, &genpd_active_time_fops);
+ debugfs_create_file("total_idle_time", 0444,
+ d, genpd, &genpd_total_idle_time_fops);
+ debugfs_create_file("devices", 0444,
+ d, genpd, &genpd_devices_fops);
+ }
+
return 0;
}
late_initcall(pm_genpd_debug_init);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c99f8730de82..ea1732ed7a9d 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -418,8 +418,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
dev_name(dev), pm_verb(state.event), info, error);
}
-#ifdef CONFIG_PM_DEBUG
-static void dpm_show_time(ktime_t starttime, pm_message_t state,
+static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
const char *info)
{
ktime_t calltime;
@@ -432,14 +431,12 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state,
usecs = usecs64;
if (usecs == 0)
usecs = 1;
- pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
- info ?: "", info ? " " : "", pm_verb(state.event),
- usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
+
+ pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
+ info ?: "", info ? " " : "", pm_verb(state.event),
+ error ? "aborted" : "complete",
+ usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}
-#else
-static inline void dpm_show_time(ktime_t starttime, pm_message_t state,
- const char *info) {}
-#endif /* CONFIG_PM_DEBUG */
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
pm_message_t state, const char *info)
@@ -602,14 +599,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie)
put_device(dev);
}
-/**
- * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
- * @state: PM transition of the system being carried out.
- *
- * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
- * enable device drivers to receive interrupts.
- */
-void dpm_resume_noirq(pm_message_t state)
+void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
@@ -654,11 +644,28 @@ void dpm_resume_noirq(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- dpm_show_time(starttime, state, "noirq");
+ dpm_show_time(starttime, state, 0, "noirq");
+ trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
+}
+
+void dpm_noirq_end(void)
+{
resume_device_irqs();
device_wakeup_disarm_wake_irqs();
cpuidle_resume();
- trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
+}
+
+/**
+ * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
+ * allow device drivers' interrupt handlers to be called.
+ */
+void dpm_resume_noirq(pm_message_t state)
+{
+ dpm_noirq_resume_devices(state);
+ dpm_noirq_end();
}
/**
@@ -776,7 +783,7 @@ void dpm_resume_early(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- dpm_show_time(starttime, state, "early");
+ dpm_show_time(starttime, state, 0, "early");
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
@@ -948,7 +955,7 @@ void dpm_resume(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- dpm_show_time(starttime, state, NULL);
+ dpm_show_time(starttime, state, 0, NULL);
cpufreq_resume();
trace_suspend_resume(TPS("dpm_resume"), state.event, false);
@@ -1098,6 +1105,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (async_error)
goto Complete;
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
+
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
@@ -1158,22 +1170,19 @@ static int device_suspend_noirq(struct device *dev)
return __device_suspend_noirq(dev, pm_transition, false);
}
-/**
- * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
- * @state: PM transition of the system being carried out.
- *
- * Prevent device drivers from receiving interrupts and call the "noirq" suspend
- * handlers for all non-sysdev devices.
- */
-int dpm_suspend_noirq(pm_message_t state)
+void dpm_noirq_begin(void)
+{
+ cpuidle_pause();
+ device_wakeup_arm_wake_irqs();
+ suspend_device_irqs();
+}
+
+int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
- cpuidle_pause();
- device_wakeup_arm_wake_irqs();
- suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
@@ -1208,15 +1217,32 @@ int dpm_suspend_noirq(pm_message_t state)
if (error) {
suspend_stats.failed_suspend_noirq++;
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
- dpm_resume_noirq(resume_event(state));
- } else {
- dpm_show_time(starttime, state, "noirq");
}
+ dpm_show_time(starttime, state, error, "noirq");
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
}
/**
+ * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Prevent device drivers' interrupt handlers from being called and invoke
+ * "noirq" suspend callbacks for all non-sysdev devices.
+ */
+int dpm_suspend_noirq(pm_message_t state)
+{
+ int ret;
+
+ dpm_noirq_begin();
+ ret = dpm_noirq_suspend_devices(state);
+ if (ret)
+ dpm_resume_noirq(resume_event(state));
+
+ return ret;
+}
+
+/**
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -1350,9 +1376,8 @@ int dpm_suspend_late(pm_message_t state)
suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
- } else {
- dpm_show_time(starttime, state, "late");
}
+ dpm_show_time(starttime, state, error, "late");
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
return error;
}
@@ -1618,8 +1643,8 @@ int dpm_suspend(pm_message_t state)
if (error) {
suspend_stats.failed_suspend++;
dpm_save_failed_step(SUSPEND_SUSPEND);
- } else
- dpm_show_time(starttime, state, NULL);
+ }
+ dpm_show_time(starttime, state, error, NULL);
trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
}
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
index 57eec1ca0569..0b718886479b 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/base/power/opp/of.c
@@ -248,15 +248,22 @@ void dev_pm_opp_of_remove_table(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-/* Returns opp descriptor node for a device, caller must do of_node_put() */
-struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
+/* Returns opp descriptor node for a device node, caller must
+ * do of_node_put() */
+static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np)
{
/*
* There should be only ONE phandle present in "operating-points-v2"
* property.
*/
- return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
+ return of_parse_phandle(np, "operating-points-v2", 0);
+}
+
+/* Returns opp descriptor node for a device, caller must do of_node_put() */
+struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
+{
+ return _opp_of_get_opp_desc_node(dev->of_node);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
@@ -539,8 +546,12 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
ret = dev_pm_opp_of_add_table(cpu_dev);
if (ret) {
- pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
- __func__, cpu, ret);
+ /*
+ * OPP may get registered dynamically, don't print error
+ * message here.
+ */
+ pr_debug("%s: couldn't find opp table for cpu:%d, %d\n",
+ __func__, cpu, ret);
/* Free all other OPPs */
dev_pm_opp_of_cpumask_remove_table(cpumask);
@@ -572,8 +583,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
struct cpumask *cpumask)
{
- struct device_node *np, *tmp_np;
- struct device *tcpu_dev;
+ struct device_node *np, *tmp_np, *cpu_np;
int cpu, ret = 0;
/* Get OPP descriptor node */
@@ -593,19 +603,18 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
if (cpu == cpu_dev->id)
continue;
- tcpu_dev = get_cpu_device(cpu);
- if (!tcpu_dev) {
- dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+ cpu_np = of_get_cpu_node(cpu, NULL);
+ if (!cpu_np) {
+ dev_err(cpu_dev, "%s: failed to get cpu%d node\n",
__func__, cpu);
- ret = -ENODEV;
+ ret = -ENOENT;
goto put_cpu_node;
}
/* Get OPP descriptor node */
- tmp_np = dev_pm_opp_of_get_opp_desc_node(tcpu_dev);
+ tmp_np = _opp_of_get_opp_desc_node(cpu_np);
if (!tmp_np) {
- dev_err(tcpu_dev, "%s: Couldn't find opp node.\n",
- __func__);
+ pr_err("%pOF: Couldn't find opp node\n", cpu_np);
ret = -ENOENT;
goto put_cpu_node;
}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 144e6d8fafc8..cdd6f256da59 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -412,15 +412,17 @@ void device_set_wakeup_capable(struct device *dev, bool capable)
if (!!dev->power.can_wakeup == !!capable)
return;
+ dev->power.can_wakeup = capable;
if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
if (capable) {
- if (wakeup_sysfs_add(dev))
- return;
+ int ret = wakeup_sysfs_add(dev);
+
+ if (ret)
+ dev_info(dev, "Wakeup sysfs attributes not added\n");
} else {
wakeup_sysfs_remove(dev);
}
}
- dev->power.can_wakeup = capable;
}
EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
@@ -863,7 +865,7 @@ bool pm_wakeup_pending(void)
void pm_system_wakeup(void)
{
atomic_inc(&pm_abort_suspend);
- freeze_wake();
+ s2idle_wake();
}
EXPORT_SYMBOL_GPL(pm_system_wakeup);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index edf02c1b5845..d0b65bbe7e15 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -25,19 +25,25 @@ struct property_set {
const struct property_entry *properties;
};
-static inline bool is_pset_node(struct fwnode_handle *fwnode)
-{
- return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA;
-}
+static const struct fwnode_operations pset_fwnode_ops;
-static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode)
+static inline bool is_pset_node(const struct fwnode_handle *fwnode)
{
- return is_pset_node(fwnode) ?
- container_of(fwnode, struct property_set, fwnode) : NULL;
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &pset_fwnode_ops;
}
-static const struct property_entry *pset_prop_get(struct property_set *pset,
- const char *name)
+#define to_pset_node(__fwnode) \
+ ({ \
+ typeof(__fwnode) __to_pset_node_fwnode = __fwnode; \
+ \
+ is_pset_node(__to_pset_node_fwnode) ? \
+ container_of(__to_pset_node_fwnode, \
+ struct property_set, fwnode) : \
+ NULL; \
+ })
+
+static const struct property_entry *
+pset_prop_get(const struct property_set *pset, const char *name)
{
const struct property_entry *prop;
@@ -51,7 +57,7 @@ static const struct property_entry *pset_prop_get(struct property_set *pset,
return NULL;
}
-static const void *pset_prop_find(struct property_set *pset,
+static const void *pset_prop_find(const struct property_set *pset,
const char *propname, size_t length)
{
const struct property_entry *prop;
@@ -71,7 +77,7 @@ static const void *pset_prop_find(struct property_set *pset,
return pointer;
}
-static int pset_prop_read_u8_array(struct property_set *pset,
+static int pset_prop_read_u8_array(const struct property_set *pset,
const char *propname,
u8 *values, size_t nval)
{
@@ -86,7 +92,7 @@ static int pset_prop_read_u8_array(struct property_set *pset,
return 0;
}
-static int pset_prop_read_u16_array(struct property_set *pset,
+static int pset_prop_read_u16_array(const struct property_set *pset,
const char *propname,
u16 *values, size_t nval)
{
@@ -101,7 +107,7 @@ static int pset_prop_read_u16_array(struct property_set *pset,
return 0;
}
-static int pset_prop_read_u32_array(struct property_set *pset,
+static int pset_prop_read_u32_array(const struct property_set *pset,
const char *propname,
u32 *values, size_t nval)
{
@@ -116,7 +122,7 @@ static int pset_prop_read_u32_array(struct property_set *pset,
return 0;
}
-static int pset_prop_read_u64_array(struct property_set *pset,
+static int pset_prop_read_u64_array(const struct property_set *pset,
const char *propname,
u64 *values, size_t nval)
{
@@ -131,7 +137,7 @@ static int pset_prop_read_u64_array(struct property_set *pset,
return 0;
}
-static int pset_prop_count_elems_of_size(struct property_set *pset,
+static int pset_prop_count_elems_of_size(const struct property_set *pset,
const char *propname, size_t length)
{
const struct property_entry *prop;
@@ -143,7 +149,7 @@ static int pset_prop_count_elems_of_size(struct property_set *pset,
return prop->length / length;
}
-static int pset_prop_read_string_array(struct property_set *pset,
+static int pset_prop_read_string_array(const struct property_set *pset,
const char *propname,
const char **strings, size_t nval)
{
@@ -187,18 +193,18 @@ struct fwnode_handle *dev_fwnode(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_fwnode);
-static bool pset_fwnode_property_present(struct fwnode_handle *fwnode,
+static bool pset_fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
return !!pset_prop_get(to_pset_node(fwnode), propname);
}
-static int pset_fwnode_read_int_array(struct fwnode_handle *fwnode,
+static int pset_fwnode_read_int_array(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval)
{
- struct property_set *node = to_pset_node(fwnode);
+ const struct property_set *node = to_pset_node(fwnode);
if (!val)
return pset_prop_count_elems_of_size(node, propname, elem_size);
@@ -217,9 +223,10 @@ static int pset_fwnode_read_int_array(struct fwnode_handle *fwnode,
return -ENXIO;
}
-static int pset_fwnode_property_read_string_array(struct fwnode_handle *fwnode,
- const char *propname,
- const char **val, size_t nval)
+static int
+pset_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
+ const char *propname,
+ const char **val, size_t nval)
{
return pset_prop_read_string_array(to_pset_node(fwnode), propname,
val, nval);
@@ -249,7 +256,8 @@ EXPORT_SYMBOL_GPL(device_property_present);
* @fwnode: Firmware node whose property to check
* @propname: Name of the property
*/
-bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname)
+bool fwnode_property_present(const struct fwnode_handle *fwnode,
+ const char *propname)
{
bool ret;
@@ -431,7 +439,7 @@ int device_property_match_string(struct device *dev, const char *propname,
}
EXPORT_SYMBOL_GPL(device_property_match_string);
-static int fwnode_property_read_int_array(struct fwnode_handle *fwnode,
+static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval)
@@ -467,7 +475,7 @@ static int fwnode_property_read_int_array(struct fwnode_handle *fwnode,
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_u8_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode,
const char *propname, u8 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u8),
@@ -493,7 +501,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array);
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_u16_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode,
const char *propname, u16 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u16),
@@ -519,7 +527,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array);
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_u32_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode,
const char *propname, u32 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u32),
@@ -545,7 +553,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array);
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_u64_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode,
const char *propname, u64 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u64),
@@ -571,7 +579,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array);
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
const char *propname, const char **val,
size_t nval)
{
@@ -603,7 +611,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
* %-EPROTO or %-EILSEQ if the property is not a string,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_string(struct fwnode_handle *fwnode,
+int fwnode_property_read_string(const struct fwnode_handle *fwnode,
const char *propname, const char **val)
{
int ret = fwnode_property_read_string_array(fwnode, propname, val, 1);
@@ -627,7 +635,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string);
* %-EPROTO if the property is not an array of strings,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_match_string(struct fwnode_handle *fwnode,
+int fwnode_property_match_string(const struct fwnode_handle *fwnode,
const char *propname, const char *string)
{
const char **values;
@@ -657,6 +665,34 @@ out:
}
EXPORT_SYMBOL_GPL(fwnode_property_match_string);
+/**
+ * fwnode_property_get_reference_args() - Find a reference with arguments
+ * @fwnode: Firmware node where to look for the reference
+ * @prop: The name of the property
+ * @nargs_prop: The name of the property telling the number of
+ * arguments in the referred node. NULL if @nargs is known,
+ * otherwise @nargs is ignored. Only relevant on OF.
+ * @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL.
+ * @index: Index of the reference, from zero onwards.
+ * @args: Result structure with reference and integer arguments.
+ *
+ * Obtain a reference based on a named property in an fwnode, with
+ * integer arguments.
+ *
+ * Caller is responsible to call fwnode_handle_put() on the returned
+ * args->fwnode pointer.
+ *
+ */
+int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *prop, const char *nargs_prop,
+ unsigned int nargs, unsigned int index,
+ struct fwnode_reference_args *args)
+{
+ return fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
+ nargs, index, args);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
+
static int property_copy_string_array(struct property_entry *dst,
const struct property_entry *src)
{
@@ -900,7 +936,6 @@ int device_add_properties(struct device *dev,
if (IS_ERR(p))
return PTR_ERR(p);
- p->fwnode.type = FWNODE_PDATA;
p->fwnode.ops = &pset_fwnode_ops;
set_secondary_fwnode(dev, &p->fwnode);
return 0;
@@ -935,7 +970,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
* Return parent firmware node of the given node if possible or %NULL if no
* parent was available.
*/
-struct fwnode_handle *fwnode_get_parent(struct fwnode_handle *fwnode)
+struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
{
return fwnode_call_ptr_op(fwnode, get_parent);
}
@@ -946,8 +981,9 @@ EXPORT_SYMBOL_GPL(fwnode_get_parent);
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the node's child nodes or a %NULL handle.
*/
-struct fwnode_handle *fwnode_get_next_child_node(struct fwnode_handle *fwnode,
- struct fwnode_handle *child)
+struct fwnode_handle *
+fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
{
return fwnode_call_ptr_op(fwnode, get_next_child_node, child);
}
@@ -978,8 +1014,9 @@ EXPORT_SYMBOL_GPL(device_get_next_child_node);
* @fwnode: Firmware node to find the named child node for.
* @childname: String to match child node name against.
*/
-struct fwnode_handle *fwnode_get_named_child_node(struct fwnode_handle *fwnode,
- const char *childname)
+struct fwnode_handle *
+fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
+ const char *childname)
{
return fwnode_call_ptr_op(fwnode, get_named_child_node, childname);
}
@@ -1025,7 +1062,7 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put);
* fwnode_device_is_available - check if a device is available for use
* @fwnode: Pointer to the fwnode of the device.
*/
-bool fwnode_device_is_available(struct fwnode_handle *fwnode)
+bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
return fwnode_call_bool_op(fwnode, device_is_available);
}
@@ -1163,7 +1200,7 @@ EXPORT_SYMBOL(device_get_mac_address);
* are available.
*/
struct fwnode_handle *
-fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode,
+fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
return fwnode_call_ptr_op(fwnode, graph_get_next_endpoint, prev);
@@ -1177,7 +1214,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
* Return: the firmware node of the device the @endpoint belongs to.
*/
struct fwnode_handle *
-fwnode_graph_get_port_parent(struct fwnode_handle *endpoint)
+fwnode_graph_get_port_parent(const struct fwnode_handle *endpoint)
{
struct fwnode_handle *port, *parent;
@@ -1197,7 +1234,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_port_parent);
* Extracts firmware node of a remote device the @fwnode points to.
*/
struct fwnode_handle *
-fwnode_graph_get_remote_port_parent(struct fwnode_handle *fwnode)
+fwnode_graph_get_remote_port_parent(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *endpoint, *parent;
@@ -1216,7 +1253,8 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent);
*
* Extracts firmware node of a remote port the @fwnode points to.
*/
-struct fwnode_handle *fwnode_graph_get_remote_port(struct fwnode_handle *fwnode)
+struct fwnode_handle *
+fwnode_graph_get_remote_port(const struct fwnode_handle *fwnode)
{
return fwnode_get_next_parent(fwnode_graph_get_remote_endpoint(fwnode));
}
@@ -1229,7 +1267,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port);
* Extracts firmware node of a remote endpoint the @fwnode points to.
*/
struct fwnode_handle *
-fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode)
+fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
{
return fwnode_call_ptr_op(fwnode, graph_get_remote_endpoint);
}
@@ -1244,8 +1282,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint);
* Return: Remote fwnode handle associated with remote endpoint node linked
* to @node. Use fwnode_node_put() on it when done.
*/
-struct fwnode_handle *fwnode_graph_get_remote_node(struct fwnode_handle *fwnode,
- u32 port_id, u32 endpoint_id)
+struct fwnode_handle *
+fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port_id,
+ u32 endpoint_id)
{
struct fwnode_handle *endpoint = NULL;
@@ -1281,7 +1320,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_node);
* information in @endpoint. The caller must hold a reference to
* @fwnode.
*/
-int fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode,
+int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint)
{
memset(endpoint, 0, sizeof(*endpoint));
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
index 5f04e7bf063e..e6c64b0be5b2 100644
--- a/drivers/base/regmap/regmap-w1.c
+++ b/drivers/base/regmap/regmap-w1.c
@@ -1,7 +1,7 @@
/*
* Register map access API - W1 (1-Wire) support
*
- * Copyright (C) 2017 OAO Radioavionica
+ * Copyright (c) 2017 Radioavionica Corporation
* Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
*
* This program is free software; you can redistribute it and/or modify
@@ -11,7 +11,7 @@
#include <linux/regmap.h>
#include <linux/module.h>
-#include "../../w1/w1.h"
+#include <linux/w1.h>
#include "internal.h"
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index d6ec1c546f5b..d936fcf9f1fb 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -105,7 +105,7 @@ static struct attribute *default_attrs[] = {
NULL
};
-static struct attribute_group topology_attr_group = {
+static const struct attribute_group topology_attr_group = {
.attrs = default_attrs,
.name = "topology"
};
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index b5c48a8d485f..54f81c554815 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -3,11 +3,8 @@ config BCMA_POSSIBLE
depends on HAS_IOMEM && HAS_DMA
default y
-menu "Broadcom specific AMBA"
- depends on BCMA_POSSIBLE
-
-config BCMA
- tristate "BCMA support"
+menuconfig BCMA
+ tristate "Broadcom specific AMBA"
depends on BCMA_POSSIBLE
help
Bus driver for Broadcom specific Advanced Microcontroller Bus
@@ -117,5 +114,3 @@ config BCMA_DEBUG
This turns on additional debugging messages.
If unsure, say N
-
-endmenu
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 7bde8d7a2816..982d5781d3ce 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -191,6 +191,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
case BCMA_CHIP_ID_BCM4707:
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM53572:
+ case BCMA_CHIP_ID_BCM53573:
case BCMA_CHIP_ID_BCM47094:
chip->ngpio = 32;
break;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 245a879b036e..255591ab3716 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -1678,9 +1678,12 @@ static bool DAC960_V1_ReadControllerConfiguration(DAC960_Controller_T
Enquiry2->FirmwareID.FirmwareType = '0';
Enquiry2->FirmwareID.TurnID = 0;
}
- sprintf(Controller->FirmwareVersion, "%d.%02d-%c-%02d",
- Enquiry2->FirmwareID.MajorVersion, Enquiry2->FirmwareID.MinorVersion,
- Enquiry2->FirmwareID.FirmwareType, Enquiry2->FirmwareID.TurnID);
+ snprintf(Controller->FirmwareVersion, sizeof(Controller->FirmwareVersion),
+ "%d.%02d-%c-%02d",
+ Enquiry2->FirmwareID.MajorVersion,
+ Enquiry2->FirmwareID.MinorVersion,
+ Enquiry2->FirmwareID.FirmwareType,
+ Enquiry2->FirmwareID.TurnID);
if (!((Controller->FirmwareVersion[0] == '5' &&
strcmp(Controller->FirmwareVersion, "5.06") >= 0) ||
(Controller->FirmwareVersion[0] == '4' &&
@@ -6588,7 +6591,8 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
&dac960_proc_fops);
}
- sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
+ snprintf(Controller->ControllerName, sizeof(Controller->ControllerName),
+ "c%d", Controller->ControllerNumber);
ControllerProcEntry = proc_mkdir(Controller->ControllerName,
DAC960_ProcDirectoryEntry);
proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 8ddc98279c8f..104180e3c55e 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -17,6 +17,7 @@ if BLK_DEV
config BLK_DEV_NULL_BLK
tristate "Null test block driver"
+ depends on CONFIGFS_FS
config BLK_DEV_FD
tristate "Normal floppy disk support"
@@ -470,7 +471,7 @@ config VIRTIO_BLK
depends on VIRTIO
---help---
This is the virtual block driver for virtio. It can be used with
- lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
+ QEMU based VMMs (like KVM or Xen). Say Y or M.
config VIRTIO_BLK_SCSI
bool "SCSI passthrough request for the Virtio block driver"
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 104b71c0490d..bbd0d186cfc0 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -294,14 +294,13 @@ out:
static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
{
- struct block_device *bdev = bio->bi_bdev;
- struct brd_device *brd = bdev->bd_disk->private_data;
+ struct brd_device *brd = bio->bi_disk->private_data;
struct bio_vec bvec;
sector_t sector;
struct bvec_iter iter;
sector = bio->bi_iter.bi_sector;
- if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
+ if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
goto io_error;
bio_for_each_segment(bvec, bio, iter) {
@@ -326,7 +325,11 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, bool is_write)
{
struct brd_device *brd = bdev->bd_disk->private_data;
- int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
+ int err;
+
+ if (PageTransHuge(page))
+ return -ENOTSUPP;
+ err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
page_endio(page, is_write, err);
return err;
}
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index e02c45cd3c5a..5f0eaee8c8a7 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -151,7 +151,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
op_flags |= REQ_SYNC;
bio = bio_alloc_drbd(GFP_NOIO);
- bio->bi_bdev = bdev->md_bdev;
+ bio_set_dev(bio, bdev->md_bdev);
bio->bi_iter.bi_sector = sector;
err = -EIO;
if (bio_add_page(bio, device->md_io.page, size, 0) != size)
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 809fd245c3dc..bd97908c766f 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1019,7 +1019,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
- bio->bi_bdev = device->ldev->md_bdev;
+ bio_set_dev(bio, device->ldev->md_bdev);
bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index d17b6e6393c7..7e8589ce631c 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -63,19 +63,15 @@
# define __must_hold(x)
#endif
-/* module parameter, defined in drbd_main.c */
-extern unsigned int minor_count;
-extern bool disable_sendpage;
-extern bool allow_oos;
-void tl_abort_disk_io(struct drbd_device *device);
-
+/* shared module parameters, defined in drbd_main.c */
#ifdef CONFIG_DRBD_FAULT_INJECTION
-extern int enable_faults;
-extern int fault_rate;
-extern int fault_devs;
+extern int drbd_enable_faults;
+extern int drbd_fault_rate;
#endif
-extern char usermode_helper[];
+extern unsigned int drbd_minor_count;
+extern char drbd_usermode_helper[];
+extern int drbd_proc_details;
/* This is used to stop/restart our threads.
@@ -181,8 +177,8 @@ _drbd_insert_fault(struct drbd_device *device, unsigned int type);
static inline int
drbd_insert_fault(struct drbd_device *device, unsigned int type) {
#ifdef CONFIG_DRBD_FAULT_INJECTION
- return fault_rate &&
- (enable_faults & (1<<type)) &&
+ return drbd_fault_rate &&
+ (drbd_enable_faults & (1<<type)) &&
_drbd_insert_fault(device, type);
#else
return 0;
@@ -745,6 +741,8 @@ struct drbd_connection {
unsigned current_tle_writes; /* writes seen within this tl epoch */
unsigned long last_reconnect_jif;
+ /* empty member on older kernels without blk_start_plug() */
+ struct blk_plug receiver_plug;
struct drbd_thread receiver;
struct drbd_thread worker;
struct drbd_thread ack_receiver;
@@ -1131,7 +1129,8 @@ extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_sta
extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
extern void drbd_device_cleanup(struct drbd_device *device);
-void drbd_print_uuids(struct drbd_device *device, const char *text);
+extern void drbd_print_uuids(struct drbd_device *device, const char *text);
+extern void drbd_queue_unplug(struct drbd_device *device);
extern void conn_md_sync(struct drbd_connection *connection);
extern void drbd_md_write(struct drbd_device *device, void *buffer);
@@ -1463,8 +1462,6 @@ extern struct drbd_resource *drbd_find_resource(const char *name);
extern void drbd_destroy_resource(struct kref *kref);
extern void conn_free_crypto(struct drbd_connection *connection);
-extern int proc_details;
-
/* drbd_req */
extern void do_submit(struct work_struct *ws);
extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
@@ -1628,8 +1625,8 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
int fault_type, struct bio *bio)
{
__release(local);
- if (!bio->bi_bdev) {
- drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
+ if (!bio->bi_disk) {
+ drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n");
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
return;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index e2ed28d45ce1..8cb3791898ae 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -77,41 +77,41 @@ MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
#include <linux/moduleparam.h>
-/* allow_open_on_secondary */
-MODULE_PARM_DESC(allow_oos, "DONT USE!");
/* thanks to these macros, if compiled into the kernel (not-module),
- * this becomes the boot parameter drbd.minor_count */
-module_param(minor_count, uint, 0444);
-module_param(disable_sendpage, bool, 0644);
-module_param(allow_oos, bool, 0);
-module_param(proc_details, int, 0644);
+ * these become boot parameters (e.g., drbd.minor_count) */
#ifdef CONFIG_DRBD_FAULT_INJECTION
-int enable_faults;
-int fault_rate;
-static int fault_count;
-int fault_devs;
+int drbd_enable_faults;
+int drbd_fault_rate;
+static int drbd_fault_count;
+static int drbd_fault_devs;
/* bitmap of enabled faults */
-module_param(enable_faults, int, 0664);
+module_param_named(enable_faults, drbd_enable_faults, int, 0664);
/* fault rate % value - applies to all enabled faults */
-module_param(fault_rate, int, 0664);
+module_param_named(fault_rate, drbd_fault_rate, int, 0664);
/* count of faults inserted */
-module_param(fault_count, int, 0664);
+module_param_named(fault_count, drbd_fault_count, int, 0664);
/* bitmap of devices to insert faults on */
-module_param(fault_devs, int, 0644);
+module_param_named(fault_devs, drbd_fault_devs, int, 0644);
#endif
-/* module parameter, defined */
-unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
-bool disable_sendpage;
-bool allow_oos;
-int proc_details; /* Detail level in proc drbd*/
-
+/* module parameters we can keep static */
+static bool drbd_allow_oos; /* allow_open_on_secondary */
+static bool drbd_disable_sendpage;
+MODULE_PARM_DESC(allow_oos, "DONT USE!");
+module_param_named(allow_oos, drbd_allow_oos, bool, 0);
+module_param_named(disable_sendpage, drbd_disable_sendpage, bool, 0644);
+
+/* module parameters we share */
+int drbd_proc_details; /* Detail level in proc drbd*/
+module_param_named(proc_details, drbd_proc_details, int, 0644);
+/* module parameters shared with defaults */
+unsigned int drbd_minor_count = DRBD_MINOR_COUNT_DEF;
/* Module parameter for setting the user mode helper program
* to run. Default is /sbin/drbdadm */
-char usermode_helper[80] = "/sbin/drbdadm";
-
-module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
+char drbd_usermode_helper[80] = "/sbin/drbdadm";
+module_param_named(minor_count, drbd_minor_count, uint, 0444);
+module_param_string(usermode_helper, drbd_usermode_helper, sizeof(drbd_usermode_helper), 0644);
/* in 2.6.x, our device mapping and config info contains our virtual gendisks
* as member "struct gendisk *vdisk;"
@@ -923,7 +923,9 @@ void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
}
/* communicated if (agreed_features & DRBD_FF_WSAME) */
-void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct request_queue *q)
+static void
+assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p,
+ struct request_queue *q)
{
if (q) {
p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
@@ -1560,7 +1562,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
* put_page(); and would cause either a VM_BUG directly, or
* __page_cache_release a page that would actually still be referenced
* by someone, leading to some obscure delayed Oops somewhere else. */
- if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
+ if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page))
return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
msg_flags |= MSG_NOSIGNAL;
@@ -1932,7 +1934,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
if (device->state.role != R_PRIMARY) {
if (mode & FMODE_WRITE)
rv = -EROFS;
- else if (!allow_oos)
+ else if (!drbd_allow_oos)
rv = -EMEDIUMTYPE;
}
@@ -1952,6 +1954,19 @@ static void drbd_release(struct gendisk *gd, fmode_t mode)
mutex_unlock(&drbd_main_mutex);
}
+/* need to hold resource->req_lock */
+void drbd_queue_unplug(struct drbd_device *device)
+{
+ if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) {
+ D_ASSERT(device, device->state.role == R_PRIMARY);
+ if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) {
+ drbd_queue_work_if_unqueued(
+ &first_peer_device(device)->connection->sender_work,
+ &device->unplug_work);
+ }
+ }
+}
+
static void drbd_set_defaults(struct drbd_device *device)
{
/* Beware! The actual layout differs
@@ -2008,18 +2023,14 @@ void drbd_init_set_defaults(struct drbd_device *device)
device->unplug_work.cb = w_send_write_hint;
device->bm_io_work.w.cb = w_bitmap_io;
- init_timer(&device->resync_timer);
- init_timer(&device->md_sync_timer);
- init_timer(&device->start_resync_timer);
- init_timer(&device->request_timer);
- device->resync_timer.function = resync_timer_fn;
- device->resync_timer.data = (unsigned long) device;
- device->md_sync_timer.function = md_sync_timer_fn;
- device->md_sync_timer.data = (unsigned long) device;
- device->start_resync_timer.function = start_resync_timer_fn;
- device->start_resync_timer.data = (unsigned long) device;
- device->request_timer.function = request_timer_fn;
- device->request_timer.data = (unsigned long) device;
+ setup_timer(&device->resync_timer, resync_timer_fn,
+ (unsigned long)device);
+ setup_timer(&device->md_sync_timer, md_sync_timer_fn,
+ (unsigned long)device);
+ setup_timer(&device->start_resync_timer, start_resync_timer_fn,
+ (unsigned long)device);
+ setup_timer(&device->request_timer, request_timer_fn,
+ (unsigned long)device);
init_waitqueue_head(&device->misc_wait);
init_waitqueue_head(&device->state_wait);
@@ -2131,7 +2142,7 @@ static void drbd_destroy_mempools(void)
static int drbd_create_mempools(void)
{
struct page *page;
- const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
+ const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
int i;
/* prepare our caches and mempools */
@@ -2167,13 +2178,12 @@ static int drbd_create_mempools(void)
goto Enomem;
/* mempools */
- drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_RESCUER);
+ drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
if (drbd_io_bio_set == NULL)
goto Enomem;
drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0,
- BIOSET_NEED_BVECS |
- BIOSET_NEED_RESCUER);
+ BIOSET_NEED_BVECS);
if (drbd_md_io_bio_set == NULL)
goto Enomem;
@@ -2409,7 +2419,6 @@ static void drbd_cleanup(void)
destroy_workqueue(retry.wq);
drbd_genl_unregister();
- drbd_debugfs_cleanup();
idr_for_each_entry(&drbd_devices, device, i)
drbd_delete_device(device);
@@ -2420,6 +2429,8 @@ static void drbd_cleanup(void)
drbd_free_resource(resource);
}
+ drbd_debugfs_cleanup();
+
drbd_destroy_mempools();
unregister_blkdev(DRBD_MAJOR, "drbd");
@@ -2972,12 +2983,12 @@ static int __init drbd_init(void)
{
int err;
- if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
- pr_err("invalid minor_count (%d)\n", minor_count);
+ if (drbd_minor_count < DRBD_MINOR_COUNT_MIN || drbd_minor_count > DRBD_MINOR_COUNT_MAX) {
+ pr_err("invalid minor_count (%d)\n", drbd_minor_count);
#ifdef MODULE
return -EINVAL;
#else
- minor_count = DRBD_MINOR_COUNT_DEF;
+ drbd_minor_count = DRBD_MINOR_COUNT_DEF;
#endif
}
@@ -3900,12 +3911,12 @@ _drbd_insert_fault(struct drbd_device *device, unsigned int type)
static struct fault_random_state rrs = {0, 0};
unsigned int ret = (
- (fault_devs == 0 ||
- ((1 << device_to_minor(device)) & fault_devs) != 0) &&
- (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
+ (drbd_fault_devs == 0 ||
+ ((1 << device_to_minor(device)) & drbd_fault_devs) != 0) &&
+ (((_drbd_fault_random(&rrs) % 100) + 1) <= drbd_fault_rate));
if (ret) {
- fault_count++;
+ drbd_fault_count++;
if (__ratelimit(&drbd_ratelimit_state))
drbd_warn(device, "***Simulating %s failure\n",
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index ad0fcb43e45c..a12f77e6891e 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -344,7 +344,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
(char[60]) { }, /* address */
NULL };
char mb[14];
- char *argv[] = {usermode_helper, cmd, mb, NULL };
+ char *argv[] = {drbd_usermode_helper, cmd, mb, NULL };
struct drbd_connection *connection = first_peer_device(device)->connection;
struct sib_info sib;
int ret;
@@ -359,19 +359,19 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
* write out any unsynced meta data changes now */
drbd_md_sync(device);
- drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
+ drbd_info(device, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, mb);
sib.sib_reason = SIB_HELPER_PRE;
sib.helper_name = cmd;
drbd_bcast_event(device, &sib);
notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
- ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
+ ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, mb,
+ drbd_usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
else
drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, mb,
+ drbd_usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
sib.sib_reason = SIB_HELPER_POST;
sib.helper_exit_code = ret;
@@ -396,24 +396,24 @@ enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
(char[60]) { }, /* address */
NULL };
char *resource_name = connection->resource->name;
- char *argv[] = {usermode_helper, cmd, resource_name, NULL };
+ char *argv[] = {drbd_usermode_helper, cmd, resource_name, NULL };
int ret;
setup_khelper_env(connection, envp);
conn_md_sync(connection);
- drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name);
+ drbd_info(connection, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, resource_name);
/* TODO: conn_bcast_event() ?? */
notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
- ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
+ ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, resource_name,
+ drbd_usermode_helper, cmd, resource_name,
(ret >> 8) & 0xff, ret);
else
drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, resource_name,
+ drbd_usermode_helper, cmd, resource_name,
(ret >> 8) & 0xff, ret);
/* TODO: conn_bcast_event() ?? */
notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
@@ -1236,12 +1236,18 @@ static void fixup_discard_if_not_supported(struct request_queue *q)
static void decide_on_write_same_support(struct drbd_device *device,
struct request_queue *q,
- struct request_queue *b, struct o_qlim *o)
+ struct request_queue *b, struct o_qlim *o,
+ bool disable_write_same)
{
struct drbd_peer_device *peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device->connection;
bool can_do = b ? b->limits.max_write_same_sectors : true;
+ if (can_do && disable_write_same) {
+ can_do = false;
+ drbd_info(peer_device, "WRITE_SAME disabled by config\n");
+ }
+
if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) {
can_do = false;
drbd_info(peer_device, "peer does not support WRITE_SAME\n");
@@ -1302,6 +1308,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
struct request_queue *b = NULL;
struct disk_conf *dc;
bool discard_zeroes_if_aligned = true;
+ bool disable_write_same = false;
if (bdev) {
b = bdev->backing_bdev->bd_disk->queue;
@@ -1311,6 +1318,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
dc = rcu_dereference(device->ldev->disk_conf);
max_segments = dc->max_bio_bvecs;
discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
+ disable_write_same = dc->disable_write_same;
rcu_read_unlock();
blk_set_stacking_limits(&q->limits);
@@ -1321,7 +1329,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
blk_queue_segment_boundary(q, PAGE_SIZE-1);
decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
- decide_on_write_same_support(device, q, b, o);
+ decide_on_write_same_support(device, q, b, o, disable_write_same);
if (b) {
blk_queue_stack_limits(q, b);
@@ -1612,7 +1620,8 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
if (write_ordering_changed(old_disk_conf, new_disk_conf))
drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
- if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned)
+ if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned
+ || old_disk_conf->disable_write_same != new_disk_conf->disable_write_same)
drbd_reconsider_queue_parameters(device, device->ldev, NULL);
drbd_md_sync(device);
@@ -2140,34 +2149,13 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
static int adm_detach(struct drbd_device *device, int force)
{
- enum drbd_state_rv retcode;
- void *buffer;
- int ret;
-
if (force) {
set_bit(FORCE_DETACH, &device->flags);
drbd_force_state(device, NS(disk, D_FAILED));
- retcode = SS_SUCCESS;
- goto out;
+ return SS_SUCCESS;
}
- drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
- buffer = drbd_md_get_buffer(device, __func__); /* make sure there is no in-flight meta-data IO */
- if (buffer) {
- retcode = drbd_request_state(device, NS(disk, D_FAILED));
- drbd_md_put_buffer(device);
- } else /* already <= D_FAILED */
- retcode = SS_NOTHING_TO_DO;
- /* D_FAILED will transition to DISKLESS. */
- drbd_resume_io(device);
- ret = wait_event_interruptible(device->misc_wait,
- device->state.disk != D_FAILED);
- if ((int)retcode == (int)SS_IS_DISKLESS)
- retcode = SS_NOTHING_TO_DO;
- if (ret)
- retcode = ERR_INTR;
-out:
- return retcode;
+ return drbd_request_detach_interruptible(device);
}
/* Detaching the disk is a process in multiple stages. First we need to lock
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 8378142f7a55..582caeb0de86 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -127,7 +127,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
seq_putc(seq, '=');
seq_putc(seq, '>');
for (i = 0; i < y; i++)
- seq_printf(seq, ".");
+ seq_putc(seq, '.');
seq_puts(seq, "] ");
if (state.conn == C_VERIFY_S || state.conn == C_VERIFY_T)
@@ -179,7 +179,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
seq_printf_with_thousands_grouping(seq, dbdt);
seq_puts(seq, " (");
/* ------------------------- ~3s average ------------------------ */
- if (proc_details >= 1) {
+ if (drbd_proc_details >= 1) {
/* this is what drbd_rs_should_slow_down() uses */
i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
dt = (jiffies - device->rs_mark_time[i]) / HZ;
@@ -209,7 +209,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
}
seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : "");
- if (proc_details >= 1) {
+ if (drbd_proc_details >= 1) {
/* 64 bit:
* we convert to sectors in the display below. */
unsigned long bm_bits = drbd_bm_bits(device);
@@ -332,13 +332,13 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
state.conn == C_VERIFY_T)
drbd_syncer_progress(device, seq, state);
- if (proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) {
+ if (drbd_proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) {
lc_seq_printf_stats(seq, device->resync);
lc_seq_printf_stats(seq, device->act_log);
put_ldev(device);
}
- if (proc_details >= 2)
+ if (drbd_proc_details >= 2)
seq_printf(seq, "\tblocked on activity log: %d\n", atomic_read(&device->ap_actlog_cnt));
}
rcu_read_unlock();
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c7e95e6380fb..796eaf347dc0 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -332,7 +332,7 @@ static void drbd_free_pages(struct drbd_device *device, struct page *page, int i
if (page == NULL)
return;
- if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
+ if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
i = page_chain_free(page);
else {
struct page *tmp;
@@ -1100,7 +1100,10 @@ randomize:
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
mutex_lock(peer_device->device->state_mutex);
+ /* avoid a race with conn_request_state( C_DISCONNECTING ) */
+ spin_lock_irq(&connection->resource->req_lock);
set_bit(STATE_SENT, &connection->flags);
+ spin_unlock_irq(&connection->resource->req_lock);
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
mutex_unlock(peer_device->device->state_mutex);
@@ -1194,6 +1197,14 @@ static int decode_header(struct drbd_connection *connection, void *header, struc
return 0;
}
+static void drbd_unplug_all_devices(struct drbd_connection *connection)
+{
+ if (current->plug == &connection->receiver_plug) {
+ blk_finish_plug(&connection->receiver_plug);
+ blk_start_plug(&connection->receiver_plug);
+ } /* else: maybe just schedule() ?? */
+}
+
static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
{
void *buffer = connection->data.rbuf;
@@ -1209,6 +1220,36 @@ static int drbd_recv_header(struct drbd_connection *connection, struct packet_in
return err;
}
+static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi)
+{
+ void *buffer = connection->data.rbuf;
+ unsigned int size = drbd_header_size(connection);
+ int err;
+
+ err = drbd_recv_short(connection->data.socket, buffer, size, MSG_NOSIGNAL|MSG_DONTWAIT);
+ if (err != size) {
+ /* If we have nothing in the receive buffer now, to reduce
+ * application latency, try to drain the backend queues as
+ * quickly as possible, and let remote TCP know what we have
+ * received so far. */
+ if (err == -EAGAIN) {
+ drbd_tcp_quickack(connection->data.socket);
+ drbd_unplug_all_devices(connection);
+ }
+ if (err > 0) {
+ buffer += err;
+ size -= err;
+ }
+ err = drbd_recv_all_warn(connection, buffer, size);
+ if (err)
+ return err;
+ }
+
+ err = decode_header(connection, connection->data.rbuf, pi);
+ connection->last_received = jiffies;
+
+ return err;
+}
/* This is blkdev_issue_flush, but asynchronous.
* We want to submit to all component volumes in parallel,
* then wait for all completions.
@@ -1223,7 +1264,7 @@ struct one_flush_context {
struct issue_flush_context *ctx;
};
-void one_flush_endio(struct bio *bio)
+static void one_flush_endio(struct bio *bio)
{
struct one_flush_context *octx = bio->bi_private;
struct drbd_device *device = octx->device;
@@ -1265,7 +1306,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
octx->device = device;
octx->ctx = ctx;
- bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(bio, device->ldev->backing_bdev);
bio->bi_private = octx;
bio->bi_end_io = one_flush_endio;
bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
@@ -1548,7 +1589,7 @@ next_bio:
}
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(bio, device->ldev->backing_bdev);
bio_set_op_attrs(bio, op, op_flags);
bio->bi_private = peer_req;
bio->bi_end_io = drbd_peer_request_endio;
@@ -4085,7 +4126,7 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info
return config_unknown_volume(connection, pi);
device = peer_device->device;
- p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
+ p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO);
if (!p_uuid) {
drbd_err(device, "kmalloc of p_uuid failed\n");
return false;
@@ -4882,8 +4923,8 @@ static void drbdd(struct drbd_connection *connection)
struct data_cmd const *cmd;
drbd_thread_current_set_cpu(&connection->receiver);
- update_receiver_timing_details(connection, drbd_recv_header);
- if (drbd_recv_header(connection, &pi))
+ update_receiver_timing_details(connection, drbd_recv_header_maybe_unplug);
+ if (drbd_recv_header_maybe_unplug(connection, &pi))
goto err_out;
cmd = &drbd_cmd_handler[pi.cmd];
@@ -5375,8 +5416,11 @@ int drbd_receiver(struct drbd_thread *thi)
}
} while (h == 0);
- if (h > 0)
+ if (h > 0) {
+ blk_start_plug(&connection->receiver_plug);
drbdd(connection);
+ blk_finish_plug(&connection->receiver_plug);
+ }
conn_disconnect(connection);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index f6e865b2d543..de8566e55334 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -36,14 +36,18 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector,
/* Update disk stats at start of I/O request */
static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
{
- generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9,
- &device->vdisk->part0);
+ struct request_queue *q = device->rq_queue;
+
+ generic_start_io_acct(q, bio_data_dir(req->master_bio),
+ req->i.size >> 9, &device->vdisk->part0);
}
/* Update disk stats when completing request upwards */
static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
{
- generic_end_io_acct(bio_data_dir(req->master_bio),
+ struct request_queue *q = device->rq_queue;
+
+ generic_end_io_acct(q, bio_data_dir(req->master_bio),
&device->vdisk->part0, req->start_jif);
}
@@ -1175,7 +1179,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
else
type = DRBD_FAULT_DT_RD;
- bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(bio, device->ldev->backing_bdev);
/* State may have changed since we grabbed our reference on the
* ->ldev member. Double check, and short-circuit to endio.
@@ -1275,6 +1279,57 @@ static bool may_do_writes(struct drbd_device *device)
return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE;
}
+struct drbd_plug_cb {
+ struct blk_plug_cb cb;
+ struct drbd_request *most_recent_req;
+ /* do we need more? */
+};
+
+static void drbd_unplug(struct blk_plug_cb *cb, bool from_schedule)
+{
+ struct drbd_plug_cb *plug = container_of(cb, struct drbd_plug_cb, cb);
+ struct drbd_resource *resource = plug->cb.data;
+ struct drbd_request *req = plug->most_recent_req;
+
+ kfree(cb);
+ if (!req)
+ return;
+
+ spin_lock_irq(&resource->req_lock);
+ /* In case the sender did not process it yet, raise the flag to
+ * have it followed with P_UNPLUG_REMOTE just after. */
+ req->rq_state |= RQ_UNPLUG;
+ /* but also queue a generic unplug */
+ drbd_queue_unplug(req->device);
+ kref_put(&req->kref, drbd_req_destroy);
+ spin_unlock_irq(&resource->req_lock);
+}
+
+static struct drbd_plug_cb* drbd_check_plugged(struct drbd_resource *resource)
+{
+ /* A lot of text to say
+ * return (struct drbd_plug_cb*)blk_check_plugged(); */
+ struct drbd_plug_cb *plug;
+ struct blk_plug_cb *cb = blk_check_plugged(drbd_unplug, resource, sizeof(*plug));
+
+ if (cb)
+ plug = container_of(cb, struct drbd_plug_cb, cb);
+ else
+ plug = NULL;
+ return plug;
+}
+
+static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req)
+{
+ struct drbd_request *tmp = plug->most_recent_req;
+ /* Will be sent to some peer.
+ * Remember to tag it with UNPLUG_REMOTE on unplug */
+ kref_get(&req->kref);
+ plug->most_recent_req = req;
+ if (tmp)
+ kref_put(&tmp->kref, drbd_req_destroy);
+}
+
static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
{
struct drbd_resource *resource = device->resource;
@@ -1347,6 +1402,12 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
no_remote = true;
}
+ if (no_remote == false) {
+ struct drbd_plug_cb *plug = drbd_check_plugged(resource);
+ if (plug)
+ drbd_update_plug(plug, req);
+ }
+
/* If it took the fast path in drbd_request_prepare, add it here.
* The slow path has added it already. */
if (list_empty(&req->req_pending_master_completion))
@@ -1395,7 +1456,10 @@ void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned l
static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
{
+ struct blk_plug plug;
struct drbd_request *req, *tmp;
+
+ blk_start_plug(&plug);
list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
const int rw = bio_data_dir(req->master_bio);
@@ -1413,6 +1477,7 @@ static void submit_fast_path(struct drbd_device *device, struct list_head *incom
list_del_init(&req->tl_requests);
drbd_send_and_submit(device, req);
}
+ blk_finish_plug(&plug);
}
static bool prepare_al_transaction_nonblock(struct drbd_device *device,
@@ -1420,12 +1485,12 @@ static bool prepare_al_transaction_nonblock(struct drbd_device *device,
struct list_head *pending,
struct list_head *later)
{
- struct drbd_request *req, *tmp;
+ struct drbd_request *req;
int wake = 0;
int err;
spin_lock_irq(&device->al_lock);
- list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
+ while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) {
err = drbd_al_begin_io_nonblock(device, &req->i);
if (err == -ENOBUFS)
break;
@@ -1442,17 +1507,20 @@ static bool prepare_al_transaction_nonblock(struct drbd_device *device,
return !list_empty(pending);
}
-void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
+static void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
{
- struct drbd_request *req, *tmp;
+ struct blk_plug plug;
+ struct drbd_request *req;
- list_for_each_entry_safe(req, tmp, pending, tl_requests) {
+ blk_start_plug(&plug);
+ while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) {
req->rq_state |= RQ_IN_ACT_LOG;
req->in_actlog_jif = jiffies;
atomic_dec(&device->ap_actlog_cnt);
list_del_init(&req->tl_requests);
drbd_send_and_submit(device, req);
}
+ blk_finish_plug(&plug);
}
void do_submit(struct work_struct *ws)
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 9e1866ab238f..a2254f825601 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -212,6 +212,11 @@ enum drbd_req_state_bits {
/* Should call drbd_al_complete_io() for this request... */
__RQ_IN_ACT_LOG,
+ /* This was the most recent request during some blk_finish_plug()
+ * or its implicit from-schedule equivalent.
+ * We may use it as hint to send a P_UNPLUG_REMOTE */
+ __RQ_UNPLUG,
+
/* The peer has sent a retry ACK */
__RQ_POSTPONED,
@@ -249,6 +254,7 @@ enum drbd_req_state_bits {
#define RQ_WSAME (1UL << __RQ_WSAME)
#define RQ_UNMAP (1UL << __RQ_UNMAP)
#define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG)
+#define RQ_UNPLUG (1UL << __RQ_UNPLUG)
#define RQ_POSTPONED (1UL << __RQ_POSTPONED)
#define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP)
#define RQ_EXP_RECEIVE_ACK (1UL << __RQ_EXP_RECEIVE_ACK)
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index eea0c4aec978..0813c654c893 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -346,7 +346,7 @@ static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
enum drbd_role conn_highest_role(struct drbd_connection *connection)
{
- enum drbd_role role = R_UNKNOWN;
+ enum drbd_role role = R_SECONDARY;
struct drbd_peer_device *peer_device;
int vnr;
@@ -579,11 +579,14 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
unsigned long flags;
union drbd_state os, ns;
enum drbd_state_rv rv;
+ void *buffer = NULL;
init_completion(&done);
if (f & CS_SERIALIZE)
mutex_lock(device->state_mutex);
+ if (f & CS_INHIBIT_MD_IO)
+ buffer = drbd_md_get_buffer(device, __func__);
spin_lock_irqsave(&device->resource->req_lock, flags);
os = drbd_read_state(device);
@@ -636,6 +639,8 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
}
abort:
+ if (buffer)
+ drbd_md_put_buffer(device);
if (f & CS_SERIALIZE)
mutex_unlock(device->state_mutex);
@@ -664,6 +669,47 @@ _drbd_request_state(struct drbd_device *device, union drbd_state mask,
return rv;
}
+/*
+ * We grab drbd_md_get_buffer(), because we don't want to "fail" the disk while
+ * there is IO in-flight: the transition into D_FAILED for detach purposes
+ * may get misinterpreted as actual IO error in a confused endio function.
+ *
+ * We wrap it all into wait_event(), to retry in case the drbd_req_state()
+ * returns SS_IN_TRANSIENT_STATE.
+ *
+ * To avoid potential deadlock with e.g. the receiver thread trying to grab
+ * drbd_md_get_buffer() while trying to get out of the "transient state", we
+ * need to grab and release the meta data buffer inside of that wait_event loop.
+ */
+static enum drbd_state_rv
+request_detach(struct drbd_device *device)
+{
+ return drbd_req_state(device, NS(disk, D_FAILED),
+ CS_VERBOSE | CS_ORDERED | CS_INHIBIT_MD_IO);
+}
+
+enum drbd_state_rv
+drbd_request_detach_interruptible(struct drbd_device *device)
+{
+ enum drbd_state_rv rv;
+ int ret;
+
+ drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
+ wait_event_interruptible(device->state_wait,
+ (rv = request_detach(device)) != SS_IN_TRANSIENT_STATE);
+ drbd_resume_io(device);
+
+ ret = wait_event_interruptible(device->misc_wait,
+ device->state.disk != D_FAILED);
+
+ if (rv == SS_IS_DISKLESS)
+ rv = SS_NOTHING_TO_DO;
+ if (ret)
+ rv = ERR_INTR;
+
+ return rv;
+}
+
enum drbd_state_rv
_drbd_request_state_holding_state_mutex(struct drbd_device *device, union drbd_state mask,
union drbd_state val, enum chg_state_flags f)
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
index 6c9d5d4a8a75..0276c98fbbdd 100644
--- a/drivers/block/drbd/drbd_state.h
+++ b/drivers/block/drbd/drbd_state.h
@@ -71,6 +71,10 @@ enum chg_state_flags {
CS_DC_SUSP = 1 << 10,
CS_DC_MASK = CS_DC_ROLE + CS_DC_PEER + CS_DC_CONN + CS_DC_DISK + CS_DC_PDSK,
CS_IGN_OUTD_FAIL = 1 << 11,
+
+ /* Make sure no meta data IO is in flight, by calling
+ * drbd_md_get_buffer(). Used for graceful detach. */
+ CS_INHIBIT_MD_IO = 1 << 12,
};
/* drbd_dev_state and drbd_state are different types. This is to stress the
@@ -156,6 +160,10 @@ static inline int drbd_request_state(struct drbd_device *device,
return _drbd_request_state(device, mask, val, CS_VERBOSE + CS_ORDERED);
}
+/* for use in adm_detach() (drbd_adm_detach(), drbd_adm_down()) */
+enum drbd_state_rv
+drbd_request_detach_interruptible(struct drbd_device *device);
+
enum drbd_role conn_highest_role(struct drbd_connection *connection);
enum drbd_role conn_highest_peer(struct drbd_connection *connection);
enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 1d8726a8df34..03471b3fce86 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -65,6 +65,11 @@ void drbd_md_endio(struct bio *bio)
device = bio->bi_private;
device->md_io.error = blk_status_to_errno(bio->bi_status);
+ /* special case: drbd_md_read() during drbd_adm_attach() */
+ if (device->ldev)
+ put_ldev(device);
+ bio_put(bio);
+
/* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
* to timeout on the lower level device, and eventually detach from it.
* If this io completion runs after that timeout expired, this
@@ -79,9 +84,6 @@ void drbd_md_endio(struct bio *bio)
drbd_md_put_buffer(device);
device->md_io.done = 1;
wake_up(&device->misc_wait);
- bio_put(bio);
- if (device->ldev) /* special case: drbd_md_read() during drbd_adm_attach() */
- put_ldev(device);
}
/* reads on behalf of the partner,
@@ -128,6 +130,14 @@ void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(l
block_id = peer_req->block_id;
peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
+ if (peer_req->flags & EE_WAS_ERROR) {
+ /* In protocol != C, we usually do not send write acks.
+ * In case of a write error, send the neg ack anyways. */
+ if (!__test_and_set_bit(__EE_SEND_WRITE_ACK, &peer_req->flags))
+ inc_unacked(device);
+ drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
+ }
+
spin_lock_irqsave(&device->resource->req_lock, flags);
device->writ_cnt += peer_req->i.size >> 9;
list_move_tail(&peer_req->w.list, &device->done_ee);
@@ -195,7 +205,8 @@ void drbd_peer_request_endio(struct bio *bio)
}
}
-void drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device)
+static void
+drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device)
{
panic("drbd%u %s/%u potential random memory corruption caused by delayed completion of aborted local request\n",
device->minor, device->resource->name, device->vnr);
@@ -1382,18 +1393,22 @@ static int drbd_send_barrier(struct drbd_connection *connection)
return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0);
}
+static int pd_send_unplug_remote(struct drbd_peer_device *pd)
+{
+ struct drbd_socket *sock = &pd->connection->data;
+ if (!drbd_prepare_command(pd, sock))
+ return -EIO;
+ return drbd_send_command(pd, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
+}
+
int w_send_write_hint(struct drbd_work *w, int cancel)
{
struct drbd_device *device =
container_of(w, struct drbd_device, unplug_work);
- struct drbd_socket *sock;
if (cancel)
return 0;
- sock = &first_peer_device(device)->connection->data;
- if (!drbd_prepare_command(first_peer_device(device), sock))
- return -EIO;
- return drbd_send_command(first_peer_device(device), sock, P_UNPLUG_REMOTE, 0, NULL, 0);
+ return pd_send_unplug_remote(first_peer_device(device));
}
static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch)
@@ -1455,6 +1470,7 @@ int w_send_dblock(struct drbd_work *w, int cancel)
struct drbd_device *device = req->device;
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device->connection;
+ bool do_send_unplug = req->rq_state & RQ_UNPLUG;
int err;
if (unlikely(cancel)) {
@@ -1470,6 +1486,9 @@ int w_send_dblock(struct drbd_work *w, int cancel)
err = drbd_send_dblock(peer_device, req);
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
+ if (do_send_unplug && !err)
+ pd_send_unplug_remote(peer_device);
+
return err;
}
@@ -1484,6 +1503,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
struct drbd_device *device = req->device;
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device->connection;
+ bool do_send_unplug = req->rq_state & RQ_UNPLUG;
int err;
if (unlikely(cancel)) {
@@ -1501,6 +1521,9 @@ int w_send_read_req(struct drbd_work *w, int cancel)
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
+ if (do_send_unplug && !err)
+ pd_send_unplug_remote(peer_device);
+
return err;
}
@@ -1513,7 +1536,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
drbd_al_begin_io(device, &req->i);
drbd_req_make_private_bio(req, req->master_bio);
- req->private_bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(req->private_bio, device->ldev->backing_bdev);
generic_make_request(req->private_bio);
return 0;
@@ -1733,6 +1756,11 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
return;
}
+ if (!connection) {
+ drbd_err(device, "No connection to peer, aborting!\n");
+ return;
+ }
+
if (!test_bit(B_RS_H_DONE, &device->flags)) {
if (side == C_SYNC_TARGET) {
/* Since application IO was locked out during C_WF_BITMAP_T and
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9c00f29e40c1..60c086a53609 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4134,7 +4134,7 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
cbdata.drive = drive;
bio_init(&bio, &bio_vec, 1);
- bio.bi_bdev = bdev;
+ bio_set_dev(&bio, bdev);
bio_add_page(&bio, page, size, 0);
bio.bi_iter.bi_sector = 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ef8334949b42..407cb172d6e3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -221,8 +221,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
}
static int
-figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
- loff_t logical_blocksize)
+figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
{
loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
sector_t x = (sector_t)size;
@@ -234,12 +233,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
lo->lo_offset = offset;
if (lo->lo_sizelimit != sizelimit)
lo->lo_sizelimit = sizelimit;
- if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
- lo->lo_logical_blocksize = logical_blocksize;
- blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
- blk_queue_logical_block_size(lo->lo_queue,
- lo->lo_logical_blocksize);
- }
set_capacity(lo->lo_disk, x);
bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
/* let user-space know about the new size */
@@ -820,7 +813,6 @@ static void loop_config_discard(struct loop_device *lo)
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct request_queue *q = lo->lo_queue;
- int lo_bits = 9;
/*
* We use punch hole to reclaim the free space used by the
@@ -840,11 +832,9 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_granularity = inode->i_sb->s_blocksize;
q->limits.discard_alignment = 0;
- if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
- lo_bits = blksize_bits(lo->lo_logical_blocksize);
- blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits);
- blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits);
+ blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
+ blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
}
@@ -938,7 +928,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->use_dio = false;
lo->lo_blocksize = lo_blocksize;
- lo->lo_logical_blocksize = 512;
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
@@ -1104,7 +1093,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
int err;
struct loop_func_table *xfer;
kuid_t uid = current_uid();
- int lo_flags = lo->lo_flags;
if (lo->lo_encrypt_key_size &&
!uid_eq(lo->lo_key_owner, uid) &&
@@ -1137,26 +1125,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (err)
goto exit;
- if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
- if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
- lo->lo_logical_blocksize = 512;
- lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
- if (LO_INFO_BLOCKSIZE(info) != 512 &&
- LO_INFO_BLOCKSIZE(info) != 1024 &&
- LO_INFO_BLOCKSIZE(info) != 2048 &&
- LO_INFO_BLOCKSIZE(info) != 4096)
- return -EINVAL;
- if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
- return -EINVAL;
- }
-
if (lo->lo_offset != info->lo_offset ||
- lo->lo_sizelimit != info->lo_sizelimit ||
- lo->lo_flags != lo_flags ||
- ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
- lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
- if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
- LO_INFO_BLOCKSIZE(info))) {
+ lo->lo_sizelimit != info->lo_sizelimit) {
+ if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
err = -EFBIG;
goto exit;
}
@@ -1348,8 +1319,7 @@ static int loop_set_capacity(struct loop_device *lo)
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
- return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit,
- lo->lo_logical_blocksize);
+ return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
}
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
@@ -1996,10 +1966,6 @@ static int __init loop_init(void)
struct loop_device *lo;
int err;
- err = misc_register(&loop_misc);
- if (err < 0)
- return err;
-
part_shift = 0;
if (max_part > 0) {
part_shift = fls(max_part);
@@ -2017,12 +1983,12 @@ static int __init loop_init(void)
if ((1UL << part_shift) > DISK_MAX_PARTS) {
err = -EINVAL;
- goto misc_out;
+ goto err_out;
}
if (max_loop > 1UL << (MINORBITS - part_shift)) {
err = -EINVAL;
- goto misc_out;
+ goto err_out;
}
/*
@@ -2041,6 +2007,11 @@ static int __init loop_init(void)
range = 1UL << MINORBITS;
}
+ err = misc_register(&loop_misc);
+ if (err < 0)
+ goto err_out;
+
+
if (register_blkdev(LOOP_MAJOR, "loop")) {
err = -EIO;
goto misc_out;
@@ -2060,6 +2031,7 @@ static int __init loop_init(void)
misc_out:
misc_deregister(&loop_misc);
+err_out:
return err;
}
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 2c096b9a17b8..fecd3f97ef8c 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -49,7 +49,6 @@ struct loop_device {
struct file * lo_backing_file;
struct block_device *lo_device;
unsigned lo_blocksize;
- unsigned lo_logical_blocksize;
void *key_data;
gfp_t old_gfp_mask;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index dea7d85134ee..2aa87cbdede0 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -128,7 +128,7 @@ static struct dentry *nbd_dbg_dir;
#define NBD_MAGIC 0x68797548
static unsigned int nbds_max = 16;
-static int max_part;
+static int max_part = 16;
static struct workqueue_struct *recv_workqueue;
static int part_shift;
@@ -165,7 +165,7 @@ static ssize_t pid_show(struct device *dev,
return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
}
-static struct device_attribute pid_attr = {
+static const struct device_attribute pid_attr = {
.attr = { .name = "pid", .mode = S_IRUGO},
.show = pid_show,
};
@@ -626,7 +626,6 @@ static void recv_work(struct work_struct *work)
struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config;
struct nbd_cmd *cmd;
- int ret = 0;
while (1) {
cmd = nbd_read_stat(nbd, args->index);
@@ -636,7 +635,6 @@ static void recv_work(struct work_struct *work)
mutex_lock(&nsock->tx_lock);
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
- ret = PTR_ERR(cmd);
break;
}
@@ -910,7 +908,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
continue;
}
sk_set_memalloc(sock->sk);
- sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
+ if (nbd->tag_set.timeout)
+ sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
atomic_inc(&config->recv_threads);
refcount_inc(&nbd->config_refs);
old = nsock->sock;
@@ -924,6 +923,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
mutex_unlock(&nsock->tx_lock);
sockfd_put(old);
+ clear_bit(NBD_DISCONNECTED, &config->runtime_flags);
+
/* We take the tx_mutex in an error path in the recv_work, so we
* need to queue_work outside of the tx_mutex.
*/
@@ -980,11 +981,15 @@ static void send_disconnects(struct nbd_device *nbd)
int i, ret;
for (i = 0; i < config->num_connections; i++) {
+ struct nbd_sock *nsock = config->socks[i];
+
iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
+ mutex_lock(&nsock->tx_lock);
ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
if (ret <= 0)
dev_err(disk_to_dev(nbd->disk),
"Send disconnect failed %d\n", ret);
+ mutex_unlock(&nsock->tx_lock);
}
}
@@ -993,9 +998,8 @@ static int nbd_disconnect(struct nbd_device *nbd)
struct nbd_config *config = nbd->config;
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
- if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
- &config->runtime_flags))
- send_disconnects(nbd);
+ set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
+ send_disconnects(nbd);
return 0;
}
@@ -1076,7 +1080,9 @@ static int nbd_start_device(struct nbd_device *nbd)
return -ENOMEM;
}
sk_set_memalloc(config->socks[i]->sock->sk);
- config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
+ if (nbd->tag_set.timeout)
+ config->socks[i]->sock->sk->sk_sndtimeo =
+ nbd->tag_set.timeout;
atomic_inc(&config->recv_threads);
refcount_inc(&nbd->config_refs);
INIT_WORK(&args->work, recv_work);
@@ -1578,6 +1584,15 @@ again:
}
} else {
nbd = idr_find(&nbd_index_idr, index);
+ if (!nbd) {
+ ret = nbd_dev_add(index);
+ if (ret < 0) {
+ mutex_unlock(&nbd_index_mutex);
+ printk(KERN_ERR "nbd: failed to add new device\n");
+ return ret;
+ }
+ nbd = idr_find(&nbd_index_idr, index);
+ }
}
if (!nbd) {
printk(KERN_ERR "nbd: couldn't find device at index %d\n",
@@ -2131,4 +2146,4 @@ MODULE_LICENSE("GPL");
module_param(nbds_max, int, 0444);
MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
module_param(max_part, int, 0444);
-MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
+MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 85c24cace973..8042c26ea9e6 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1,3 +1,7 @@
+/*
+ * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
+ * Shaohua Li <shli@fb.com>
+ */
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -9,27 +13,110 @@
#include <linux/blk-mq.h>
#include <linux/hrtimer.h>
#include <linux/lightnvm.h>
+#include <linux/configfs.h>
+#include <linux/badblocks.h>
+
+#define SECTOR_SHIFT 9
+#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
+#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+#define SECTOR_MASK (PAGE_SECTORS - 1)
+
+#define FREE_BATCH 16
+
+#define TICKS_PER_SEC 50ULL
+#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
+
+static inline u64 mb_per_tick(int mbps)
+{
+ return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
+}
struct nullb_cmd {
struct list_head list;
struct llist_node ll_list;
- struct call_single_data csd;
+ call_single_data_t csd;
struct request *rq;
struct bio *bio;
unsigned int tag;
struct nullb_queue *nq;
struct hrtimer timer;
+ blk_status_t error;
};
struct nullb_queue {
unsigned long *tag_map;
wait_queue_head_t wait;
unsigned int queue_depth;
+ struct nullb_device *dev;
struct nullb_cmd *cmds;
};
+/*
+ * Status flags for nullb_device.
+ *
+ * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
+ * UP: Device is currently on and visible in userspace.
+ * THROTTLED: Device is being throttled.
+ * CACHE: Device is using a write-back cache.
+ */
+enum nullb_device_flags {
+ NULLB_DEV_FL_CONFIGURED = 0,
+ NULLB_DEV_FL_UP = 1,
+ NULLB_DEV_FL_THROTTLED = 2,
+ NULLB_DEV_FL_CACHE = 3,
+};
+
+/*
+ * nullb_page is a page in memory for nullb devices.
+ *
+ * @page: The page holding the data.
+ * @bitmap: The bitmap represents which sector in the page has data.
+ * Each bit represents one block size. For example, sector 8
+ * will use the 7th bit
+ * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
+ * page is being flushing to storage. FREE means the cache page is freed and
+ * should be skipped from flushing to storage. Please see
+ * null_make_cache_space
+ */
+struct nullb_page {
+ struct page *page;
+ unsigned long bitmap;
+};
+#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1)
+#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2)
+
+struct nullb_device {
+ struct nullb *nullb;
+ struct config_item item;
+ struct radix_tree_root data; /* data stored in the disk */
+ struct radix_tree_root cache; /* disk cache data */
+ unsigned long flags; /* device flags */
+ unsigned int curr_cache;
+ struct badblocks badblocks;
+
+ unsigned long size; /* device size in MB */
+ unsigned long completion_nsec; /* time in ns to complete a request */
+ unsigned long cache_size; /* disk cache size in MB */
+ unsigned int submit_queues; /* number of submission queues */
+ unsigned int home_node; /* home node for the device */
+ unsigned int queue_mode; /* block interface */
+ unsigned int blocksize; /* block size */
+ unsigned int irqmode; /* IRQ completion handler */
+ unsigned int hw_queue_depth; /* queue depth */
+ unsigned int index; /* index of the disk, only valid with a disk */
+ unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
+ bool use_lightnvm; /* register as a LightNVM device */
+ bool blocking; /* blocking blk-mq device */
+ bool use_per_node_hctx; /* use per-node allocation for hardware context */
+ bool power; /* power on/off the device */
+ bool memory_backed; /* if data is stored in memory */
+ bool discard; /* if support discard */
+};
+
struct nullb {
+ struct nullb_device *dev;
struct list_head list;
unsigned int index;
struct request_queue *q;
@@ -37,8 +124,10 @@ struct nullb {
struct nvm_dev *ndev;
struct blk_mq_tag_set *tag_set;
struct blk_mq_tag_set __tag_set;
- struct hrtimer timer;
unsigned int queue_depth;
+ atomic_long_t cur_bytes;
+ struct hrtimer bw_timer;
+ unsigned long cache_flush_pos;
spinlock_t lock;
struct nullb_queue *queues;
@@ -49,7 +138,7 @@ struct nullb {
static LIST_HEAD(nullb_list);
static struct mutex lock;
static int null_major;
-static int nullb_indexes;
+static DEFINE_IDA(nullb_indexes);
static struct kmem_cache *ppa_cache;
static struct blk_mq_tag_set tag_set;
@@ -65,15 +154,15 @@ enum {
NULL_Q_MQ = 2,
};
-static int submit_queues;
-module_param(submit_queues, int, S_IRUGO);
+static int g_submit_queues = 1;
+module_param_named(submit_queues, g_submit_queues, int, S_IRUGO);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
-static int home_node = NUMA_NO_NODE;
-module_param(home_node, int, S_IRUGO);
+static int g_home_node = NUMA_NO_NODE;
+module_param_named(home_node, g_home_node, int, S_IRUGO);
MODULE_PARM_DESC(home_node, "Home node for the device");
-static int queue_mode = NULL_Q_MQ;
+static int g_queue_mode = NULL_Q_MQ;
static int null_param_store_val(const char *str, int *val, int min, int max)
{
@@ -92,7 +181,7 @@ static int null_param_store_val(const char *str, int *val, int min, int max)
static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
{
- return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
+ return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
}
static const struct kernel_param_ops null_queue_mode_param_ops = {
@@ -100,38 +189,38 @@ static const struct kernel_param_ops null_queue_mode_param_ops = {
.get = param_get_int,
};
-device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
+device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, S_IRUGO);
MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
-static int gb = 250;
-module_param(gb, int, S_IRUGO);
+static int g_gb = 250;
+module_param_named(gb, g_gb, int, S_IRUGO);
MODULE_PARM_DESC(gb, "Size in GB");
-static int bs = 512;
-module_param(bs, int, S_IRUGO);
+static int g_bs = 512;
+module_param_named(bs, g_bs, int, S_IRUGO);
MODULE_PARM_DESC(bs, "Block size (in bytes)");
static int nr_devices = 1;
module_param(nr_devices, int, S_IRUGO);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
-static bool use_lightnvm;
-module_param(use_lightnvm, bool, S_IRUGO);
+static bool g_use_lightnvm;
+module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO);
MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
-static bool blocking;
-module_param(blocking, bool, S_IRUGO);
+static bool g_blocking;
+module_param_named(blocking, g_blocking, bool, S_IRUGO);
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
static bool shared_tags;
module_param(shared_tags, bool, S_IRUGO);
MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
-static int irqmode = NULL_IRQ_SOFTIRQ;
+static int g_irqmode = NULL_IRQ_SOFTIRQ;
static int null_set_irqmode(const char *str, const struct kernel_param *kp)
{
- return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
+ return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
NULL_IRQ_TIMER);
}
@@ -140,21 +229,358 @@ static const struct kernel_param_ops null_irqmode_param_ops = {
.get = param_get_int,
};
-device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
+device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, S_IRUGO);
MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
-static unsigned long completion_nsec = 10000;
-module_param(completion_nsec, ulong, S_IRUGO);
+static unsigned long g_completion_nsec = 10000;
+module_param_named(completion_nsec, g_completion_nsec, ulong, S_IRUGO);
MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
-static int hw_queue_depth = 64;
-module_param(hw_queue_depth, int, S_IRUGO);
+static int g_hw_queue_depth = 64;
+module_param_named(hw_queue_depth, g_hw_queue_depth, int, S_IRUGO);
MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
-static bool use_per_node_hctx = false;
-module_param(use_per_node_hctx, bool, S_IRUGO);
+static bool g_use_per_node_hctx;
+module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, S_IRUGO);
MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
+static struct nullb_device *null_alloc_dev(void);
+static void null_free_dev(struct nullb_device *dev);
+static void null_del_dev(struct nullb *nullb);
+static int null_add_dev(struct nullb_device *dev);
+static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
+
+static inline struct nullb_device *to_nullb_device(struct config_item *item)
+{
+ return item ? container_of(item, struct nullb_device, item) : NULL;
+}
+
+static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", val);
+}
+
+static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%lu\n", val);
+}
+
+static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t nullb_device_uint_attr_store(unsigned int *val,
+ const char *page, size_t count)
+{
+ unsigned int tmp;
+ int result;
+
+ result = kstrtouint(page, 0, &tmp);
+ if (result)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
+ const char *page, size_t count)
+{
+ int result;
+ unsigned long tmp;
+
+ result = kstrtoul(page, 0, &tmp);
+ if (result)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
+ size_t count)
+{
+ bool tmp;
+ int result;
+
+ result = kstrtobool(page, &tmp);
+ if (result)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
+#define NULLB_DEVICE_ATTR(NAME, TYPE) \
+static ssize_t \
+nullb_device_##NAME##_show(struct config_item *item, char *page) \
+{ \
+ return nullb_device_##TYPE##_attr_show( \
+ to_nullb_device(item)->NAME, page); \
+} \
+static ssize_t \
+nullb_device_##NAME##_store(struct config_item *item, const char *page, \
+ size_t count) \
+{ \
+ if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
+ return -EBUSY; \
+ return nullb_device_##TYPE##_attr_store( \
+ &to_nullb_device(item)->NAME, page, count); \
+} \
+CONFIGFS_ATTR(nullb_device_, NAME);
+
+NULLB_DEVICE_ATTR(size, ulong);
+NULLB_DEVICE_ATTR(completion_nsec, ulong);
+NULLB_DEVICE_ATTR(submit_queues, uint);
+NULLB_DEVICE_ATTR(home_node, uint);
+NULLB_DEVICE_ATTR(queue_mode, uint);
+NULLB_DEVICE_ATTR(blocksize, uint);
+NULLB_DEVICE_ATTR(irqmode, uint);
+NULLB_DEVICE_ATTR(hw_queue_depth, uint);
+NULLB_DEVICE_ATTR(index, uint);
+NULLB_DEVICE_ATTR(use_lightnvm, bool);
+NULLB_DEVICE_ATTR(blocking, bool);
+NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
+NULLB_DEVICE_ATTR(memory_backed, bool);
+NULLB_DEVICE_ATTR(discard, bool);
+NULLB_DEVICE_ATTR(mbps, uint);
+NULLB_DEVICE_ATTR(cache_size, ulong);
+
+static ssize_t nullb_device_power_show(struct config_item *item, char *page)
+{
+ return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
+}
+
+static ssize_t nullb_device_power_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nullb_device *dev = to_nullb_device(item);
+ bool newp = false;
+ ssize_t ret;
+
+ ret = nullb_device_bool_attr_store(&newp, page, count);
+ if (ret < 0)
+ return ret;
+
+ if (!dev->power && newp) {
+ if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
+ return count;
+ if (null_add_dev(dev)) {
+ clear_bit(NULLB_DEV_FL_UP, &dev->flags);
+ return -ENOMEM;
+ }
+
+ set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
+ dev->power = newp;
+ } else if (dev->power && !newp) {
+ mutex_lock(&lock);
+ dev->power = newp;
+ null_del_dev(dev->nullb);
+ mutex_unlock(&lock);
+ clear_bit(NULLB_DEV_FL_UP, &dev->flags);
+ }
+
+ return count;
+}
+
+CONFIGFS_ATTR(nullb_device_, power);
+
+static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
+{
+ struct nullb_device *t_dev = to_nullb_device(item);
+
+ return badblocks_show(&t_dev->badblocks, page, 0);
+}
+
+static ssize_t nullb_device_badblocks_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nullb_device *t_dev = to_nullb_device(item);
+ char *orig, *buf, *tmp;
+ u64 start, end;
+ int ret;
+
+ orig = kstrndup(page, count, GFP_KERNEL);
+ if (!orig)
+ return -ENOMEM;
+
+ buf = strstrip(orig);
+
+ ret = -EINVAL;
+ if (buf[0] != '+' && buf[0] != '-')
+ goto out;
+ tmp = strchr(&buf[1], '-');
+ if (!tmp)
+ goto out;
+ *tmp = '\0';
+ ret = kstrtoull(buf + 1, 0, &start);
+ if (ret)
+ goto out;
+ ret = kstrtoull(tmp + 1, 0, &end);
+ if (ret)
+ goto out;
+ ret = -EINVAL;
+ if (start > end)
+ goto out;
+ /* enable badblocks */
+ cmpxchg(&t_dev->badblocks.shift, -1, 0);
+ if (buf[0] == '+')
+ ret = badblocks_set(&t_dev->badblocks, start,
+ end - start + 1, 1);
+ else
+ ret = badblocks_clear(&t_dev->badblocks, start,
+ end - start + 1);
+ if (ret == 0)
+ ret = count;
+out:
+ kfree(orig);
+ return ret;
+}
+CONFIGFS_ATTR(nullb_device_, badblocks);
+
+static struct configfs_attribute *nullb_device_attrs[] = {
+ &nullb_device_attr_size,
+ &nullb_device_attr_completion_nsec,
+ &nullb_device_attr_submit_queues,
+ &nullb_device_attr_home_node,
+ &nullb_device_attr_queue_mode,
+ &nullb_device_attr_blocksize,
+ &nullb_device_attr_irqmode,
+ &nullb_device_attr_hw_queue_depth,
+ &nullb_device_attr_index,
+ &nullb_device_attr_use_lightnvm,
+ &nullb_device_attr_blocking,
+ &nullb_device_attr_use_per_node_hctx,
+ &nullb_device_attr_power,
+ &nullb_device_attr_memory_backed,
+ &nullb_device_attr_discard,
+ &nullb_device_attr_mbps,
+ &nullb_device_attr_cache_size,
+ &nullb_device_attr_badblocks,
+ NULL,
+};
+
+static void nullb_device_release(struct config_item *item)
+{
+ struct nullb_device *dev = to_nullb_device(item);
+
+ badblocks_exit(&dev->badblocks);
+ null_free_device_storage(dev, false);
+ null_free_dev(dev);
+}
+
+static struct configfs_item_operations nullb_device_ops = {
+ .release = nullb_device_release,
+};
+
+static struct config_item_type nullb_device_type = {
+ .ct_item_ops = &nullb_device_ops,
+ .ct_attrs = nullb_device_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct
+config_item *nullb_group_make_item(struct config_group *group, const char *name)
+{
+ struct nullb_device *dev;
+
+ dev = null_alloc_dev();
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ config_item_init_type_name(&dev->item, name, &nullb_device_type);
+
+ return &dev->item;
+}
+
+static void
+nullb_group_drop_item(struct config_group *group, struct config_item *item)
+{
+ struct nullb_device *dev = to_nullb_device(item);
+
+ if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
+ mutex_lock(&lock);
+ dev->power = false;
+ null_del_dev(dev->nullb);
+ mutex_unlock(&lock);
+ }
+
+ config_item_put(item);
+}
+
+static ssize_t memb_group_features_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks\n");
+}
+
+CONFIGFS_ATTR_RO(memb_group_, features);
+
+static struct configfs_attribute *nullb_group_attrs[] = {
+ &memb_group_attr_features,
+ NULL,
+};
+
+static struct configfs_group_operations nullb_group_ops = {
+ .make_item = nullb_group_make_item,
+ .drop_item = nullb_group_drop_item,
+};
+
+static struct config_item_type nullb_group_type = {
+ .ct_group_ops = &nullb_group_ops,
+ .ct_attrs = nullb_group_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem nullb_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "nullb",
+ .ci_type = &nullb_group_type,
+ },
+ },
+};
+
+static inline int null_cache_active(struct nullb *nullb)
+{
+ return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
+}
+
+static struct nullb_device *null_alloc_dev(void)
+{
+ struct nullb_device *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+ INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
+ INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
+ if (badblocks_init(&dev->badblocks, 0)) {
+ kfree(dev);
+ return NULL;
+ }
+
+ dev->size = g_gb * 1024;
+ dev->completion_nsec = g_completion_nsec;
+ dev->submit_queues = g_submit_queues;
+ dev->home_node = g_home_node;
+ dev->queue_mode = g_queue_mode;
+ dev->blocksize = g_bs;
+ dev->irqmode = g_irqmode;
+ dev->hw_queue_depth = g_hw_queue_depth;
+ dev->use_lightnvm = g_use_lightnvm;
+ dev->blocking = g_blocking;
+ dev->use_per_node_hctx = g_use_per_node_hctx;
+ return dev;
+}
+
+static void null_free_dev(struct nullb_device *dev)
+{
+ kfree(dev);
+}
+
static void put_tag(struct nullb_queue *nq, unsigned int tag)
{
clear_bit_unlock(tag, nq->tag_map);
@@ -193,7 +619,7 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
cmd = &nq->cmds[tag];
cmd->tag = tag;
cmd->nq = nq;
- if (irqmode == NULL_IRQ_TIMER) {
+ if (nq->dev->irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
@@ -229,19 +655,21 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
struct request_queue *q = NULL;
+ int queue_mode = cmd->nq->dev->queue_mode;
if (cmd->rq)
q = cmd->rq->q;
switch (queue_mode) {
case NULL_Q_MQ:
- blk_mq_end_request(cmd->rq, BLK_STS_OK);
+ blk_mq_end_request(cmd->rq, cmd->error);
return;
case NULL_Q_RQ:
INIT_LIST_HEAD(&cmd->rq->queuelist);
- blk_end_request_all(cmd->rq, BLK_STS_OK);
+ blk_end_request_all(cmd->rq, cmd->error);
break;
case NULL_Q_BIO:
+ cmd->bio->bi_status = cmd->error;
bio_endio(cmd->bio);
break;
}
@@ -267,25 +695,582 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
static void null_cmd_end_timer(struct nullb_cmd *cmd)
{
- ktime_t kt = completion_nsec;
+ ktime_t kt = cmd->nq->dev->completion_nsec;
hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
}
static void null_softirq_done_fn(struct request *rq)
{
- if (queue_mode == NULL_Q_MQ)
+ struct nullb *nullb = rq->q->queuedata;
+
+ if (nullb->dev->queue_mode == NULL_Q_MQ)
end_cmd(blk_mq_rq_to_pdu(rq));
else
end_cmd(rq->special);
}
-static inline void null_handle_cmd(struct nullb_cmd *cmd)
+static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
+{
+ struct nullb_page *t_page;
+
+ t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
+ if (!t_page)
+ goto out;
+
+ t_page->page = alloc_pages(gfp_flags, 0);
+ if (!t_page->page)
+ goto out_freepage;
+
+ t_page->bitmap = 0;
+ return t_page;
+out_freepage:
+ kfree(t_page);
+out:
+ return NULL;
+}
+
+static void null_free_page(struct nullb_page *t_page)
+{
+ __set_bit(NULLB_PAGE_FREE, &t_page->bitmap);
+ if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap))
+ return;
+ __free_page(t_page->page);
+ kfree(t_page);
+}
+
+static void null_free_sector(struct nullb *nullb, sector_t sector,
+ bool is_cache)
+{
+ unsigned int sector_bit;
+ u64 idx;
+ struct nullb_page *t_page, *ret;
+ struct radix_tree_root *root;
+
+ root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
+ idx = sector >> PAGE_SECTORS_SHIFT;
+ sector_bit = (sector & SECTOR_MASK);
+
+ t_page = radix_tree_lookup(root, idx);
+ if (t_page) {
+ __clear_bit(sector_bit, &t_page->bitmap);
+
+ if (!t_page->bitmap) {
+ ret = radix_tree_delete_item(root, idx, t_page);
+ WARN_ON(ret != t_page);
+ null_free_page(ret);
+ if (is_cache)
+ nullb->dev->curr_cache -= PAGE_SIZE;
+ }
+ }
+}
+
+static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
+ struct nullb_page *t_page, bool is_cache)
+{
+ struct radix_tree_root *root;
+
+ root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
+
+ if (radix_tree_insert(root, idx, t_page)) {
+ null_free_page(t_page);
+ t_page = radix_tree_lookup(root, idx);
+ WARN_ON(!t_page || t_page->page->index != idx);
+ } else if (is_cache)
+ nullb->dev->curr_cache += PAGE_SIZE;
+
+ return t_page;
+}
+
+static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
+{
+ unsigned long pos = 0;
+ int nr_pages;
+ struct nullb_page *ret, *t_pages[FREE_BATCH];
+ struct radix_tree_root *root;
+
+ root = is_cache ? &dev->cache : &dev->data;
+
+ do {
+ int i;
+
+ nr_pages = radix_tree_gang_lookup(root,
+ (void **)t_pages, pos, FREE_BATCH);
+
+ for (i = 0; i < nr_pages; i++) {
+ pos = t_pages[i]->page->index;
+ ret = radix_tree_delete_item(root, pos, t_pages[i]);
+ WARN_ON(ret != t_pages[i]);
+ null_free_page(ret);
+ }
+
+ pos++;
+ } while (nr_pages == FREE_BATCH);
+
+ if (is_cache)
+ dev->curr_cache = 0;
+}
+
+static struct nullb_page *__null_lookup_page(struct nullb *nullb,
+ sector_t sector, bool for_write, bool is_cache)
+{
+ unsigned int sector_bit;
+ u64 idx;
+ struct nullb_page *t_page;
+ struct radix_tree_root *root;
+
+ idx = sector >> PAGE_SECTORS_SHIFT;
+ sector_bit = (sector & SECTOR_MASK);
+
+ root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
+ t_page = radix_tree_lookup(root, idx);
+ WARN_ON(t_page && t_page->page->index != idx);
+
+ if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap)))
+ return t_page;
+
+ return NULL;
+}
+
+static struct nullb_page *null_lookup_page(struct nullb *nullb,
+ sector_t sector, bool for_write, bool ignore_cache)
+{
+ struct nullb_page *page = NULL;
+
+ if (!ignore_cache)
+ page = __null_lookup_page(nullb, sector, for_write, true);
+ if (page)
+ return page;
+ return __null_lookup_page(nullb, sector, for_write, false);
+}
+
+static struct nullb_page *null_insert_page(struct nullb *nullb,
+ sector_t sector, bool ignore_cache)
+{
+ u64 idx;
+ struct nullb_page *t_page;
+
+ t_page = null_lookup_page(nullb, sector, true, ignore_cache);
+ if (t_page)
+ return t_page;
+
+ spin_unlock_irq(&nullb->lock);
+
+ t_page = null_alloc_page(GFP_NOIO);
+ if (!t_page)
+ goto out_lock;
+
+ if (radix_tree_preload(GFP_NOIO))
+ goto out_freepage;
+
+ spin_lock_irq(&nullb->lock);
+ idx = sector >> PAGE_SECTORS_SHIFT;
+ t_page->page->index = idx;
+ t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
+ radix_tree_preload_end();
+
+ return t_page;
+out_freepage:
+ null_free_page(t_page);
+out_lock:
+ spin_lock_irq(&nullb->lock);
+ return null_lookup_page(nullb, sector, true, ignore_cache);
+}
+
+static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
+{
+ int i;
+ unsigned int offset;
+ u64 idx;
+ struct nullb_page *t_page, *ret;
+ void *dst, *src;
+
+ idx = c_page->page->index;
+
+ t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
+
+ __clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap);
+ if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) {
+ null_free_page(c_page);
+ if (t_page && t_page->bitmap == 0) {
+ ret = radix_tree_delete_item(&nullb->dev->data,
+ idx, t_page);
+ null_free_page(t_page);
+ }
+ return 0;
+ }
+
+ if (!t_page)
+ return -ENOMEM;
+
+ src = kmap_atomic(c_page->page);
+ dst = kmap_atomic(t_page->page);
+
+ for (i = 0; i < PAGE_SECTORS;
+ i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
+ if (test_bit(i, &c_page->bitmap)) {
+ offset = (i << SECTOR_SHIFT);
+ memcpy(dst + offset, src + offset,
+ nullb->dev->blocksize);
+ __set_bit(i, &t_page->bitmap);
+ }
+ }
+
+ kunmap_atomic(dst);
+ kunmap_atomic(src);
+
+ ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
+ null_free_page(ret);
+ nullb->dev->curr_cache -= PAGE_SIZE;
+
+ return 0;
+}
+
+static int null_make_cache_space(struct nullb *nullb, unsigned long n)
{
+ int i, err, nr_pages;
+ struct nullb_page *c_pages[FREE_BATCH];
+ unsigned long flushed = 0, one_round;
+
+again:
+ if ((nullb->dev->cache_size * 1024 * 1024) >
+ nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
+ return 0;
+
+ nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
+ (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
+ /*
+ * nullb_flush_cache_page could unlock before using the c_pages. To
+ * avoid race, we don't allow page free
+ */
+ for (i = 0; i < nr_pages; i++) {
+ nullb->cache_flush_pos = c_pages[i]->page->index;
+ /*
+ * We found the page which is being flushed to disk by other
+ * threads
+ */
+ if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap))
+ c_pages[i] = NULL;
+ else
+ __set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap);
+ }
+
+ one_round = 0;
+ for (i = 0; i < nr_pages; i++) {
+ if (c_pages[i] == NULL)
+ continue;
+ err = null_flush_cache_page(nullb, c_pages[i]);
+ if (err)
+ return err;
+ one_round++;
+ }
+ flushed += one_round << PAGE_SHIFT;
+
+ if (n > flushed) {
+ if (nr_pages == 0)
+ nullb->cache_flush_pos = 0;
+ if (one_round == 0) {
+ /* give other threads a chance */
+ spin_unlock_irq(&nullb->lock);
+ spin_lock_irq(&nullb->lock);
+ }
+ goto again;
+ }
+ return 0;
+}
+
+static int copy_to_nullb(struct nullb *nullb, struct page *source,
+ unsigned int off, sector_t sector, size_t n, bool is_fua)
+{
+ size_t temp, count = 0;
+ unsigned int offset;
+ struct nullb_page *t_page;
+ void *dst, *src;
+
+ while (count < n) {
+ temp = min_t(size_t, nullb->dev->blocksize, n - count);
+
+ if (null_cache_active(nullb) && !is_fua)
+ null_make_cache_space(nullb, PAGE_SIZE);
+
+ offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
+ t_page = null_insert_page(nullb, sector,
+ !null_cache_active(nullb) || is_fua);
+ if (!t_page)
+ return -ENOSPC;
+
+ src = kmap_atomic(source);
+ dst = kmap_atomic(t_page->page);
+ memcpy(dst + offset, src + off + count, temp);
+ kunmap_atomic(dst);
+ kunmap_atomic(src);
+
+ __set_bit(sector & SECTOR_MASK, &t_page->bitmap);
+
+ if (is_fua)
+ null_free_sector(nullb, sector, true);
+
+ count += temp;
+ sector += temp >> SECTOR_SHIFT;
+ }
+ return 0;
+}
+
+static int copy_from_nullb(struct nullb *nullb, struct page *dest,
+ unsigned int off, sector_t sector, size_t n)
+{
+ size_t temp, count = 0;
+ unsigned int offset;
+ struct nullb_page *t_page;
+ void *dst, *src;
+
+ while (count < n) {
+ temp = min_t(size_t, nullb->dev->blocksize, n - count);
+
+ offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
+ t_page = null_lookup_page(nullb, sector, false,
+ !null_cache_active(nullb));
+
+ dst = kmap_atomic(dest);
+ if (!t_page) {
+ memset(dst + off + count, 0, temp);
+ goto next;
+ }
+ src = kmap_atomic(t_page->page);
+ memcpy(dst + off + count, src + offset, temp);
+ kunmap_atomic(src);
+next:
+ kunmap_atomic(dst);
+
+ count += temp;
+ sector += temp >> SECTOR_SHIFT;
+ }
+ return 0;
+}
+
+static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
+{
+ size_t temp;
+
+ spin_lock_irq(&nullb->lock);
+ while (n > 0) {
+ temp = min_t(size_t, n, nullb->dev->blocksize);
+ null_free_sector(nullb, sector, false);
+ if (null_cache_active(nullb))
+ null_free_sector(nullb, sector, true);
+ sector += temp >> SECTOR_SHIFT;
+ n -= temp;
+ }
+ spin_unlock_irq(&nullb->lock);
+}
+
+static int null_handle_flush(struct nullb *nullb)
+{
+ int err;
+
+ if (!null_cache_active(nullb))
+ return 0;
+
+ spin_lock_irq(&nullb->lock);
+ while (true) {
+ err = null_make_cache_space(nullb,
+ nullb->dev->cache_size * 1024 * 1024);
+ if (err || nullb->dev->curr_cache == 0)
+ break;
+ }
+
+ WARN_ON(!radix_tree_empty(&nullb->dev->cache));
+ spin_unlock_irq(&nullb->lock);
+ return err;
+}
+
+static int null_transfer(struct nullb *nullb, struct page *page,
+ unsigned int len, unsigned int off, bool is_write, sector_t sector,
+ bool is_fua)
+{
+ int err = 0;
+
+ if (!is_write) {
+ err = copy_from_nullb(nullb, page, off, sector, len);
+ flush_dcache_page(page);
+ } else {
+ flush_dcache_page(page);
+ err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
+ }
+
+ return err;
+}
+
+static int null_handle_rq(struct nullb_cmd *cmd)
+{
+ struct request *rq = cmd->rq;
+ struct nullb *nullb = cmd->nq->dev->nullb;
+ int err;
+ unsigned int len;
+ sector_t sector;
+ struct req_iterator iter;
+ struct bio_vec bvec;
+
+ sector = blk_rq_pos(rq);
+
+ if (req_op(rq) == REQ_OP_DISCARD) {
+ null_handle_discard(nullb, sector, blk_rq_bytes(rq));
+ return 0;
+ }
+
+ spin_lock_irq(&nullb->lock);
+ rq_for_each_segment(bvec, rq, iter) {
+ len = bvec.bv_len;
+ err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
+ op_is_write(req_op(rq)), sector,
+ req_op(rq) & REQ_FUA);
+ if (err) {
+ spin_unlock_irq(&nullb->lock);
+ return err;
+ }
+ sector += len >> SECTOR_SHIFT;
+ }
+ spin_unlock_irq(&nullb->lock);
+
+ return 0;
+}
+
+static int null_handle_bio(struct nullb_cmd *cmd)
+{
+ struct bio *bio = cmd->bio;
+ struct nullb *nullb = cmd->nq->dev->nullb;
+ int err;
+ unsigned int len;
+ sector_t sector;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+
+ sector = bio->bi_iter.bi_sector;
+
+ if (bio_op(bio) == REQ_OP_DISCARD) {
+ null_handle_discard(nullb, sector,
+ bio_sectors(bio) << SECTOR_SHIFT);
+ return 0;
+ }
+
+ spin_lock_irq(&nullb->lock);
+ bio_for_each_segment(bvec, bio, iter) {
+ len = bvec.bv_len;
+ err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
+ op_is_write(bio_op(bio)), sector,
+ bio_op(bio) & REQ_FUA);
+ if (err) {
+ spin_unlock_irq(&nullb->lock);
+ return err;
+ }
+ sector += len >> SECTOR_SHIFT;
+ }
+ spin_unlock_irq(&nullb->lock);
+ return 0;
+}
+
+static void null_stop_queue(struct nullb *nullb)
+{
+ struct request_queue *q = nullb->q;
+
+ if (nullb->dev->queue_mode == NULL_Q_MQ)
+ blk_mq_stop_hw_queues(q);
+ else {
+ spin_lock_irq(q->queue_lock);
+ blk_stop_queue(q);
+ spin_unlock_irq(q->queue_lock);
+ }
+}
+
+static void null_restart_queue_async(struct nullb *nullb)
+{
+ struct request_queue *q = nullb->q;
+ unsigned long flags;
+
+ if (nullb->dev->queue_mode == NULL_Q_MQ)
+ blk_mq_start_stopped_hw_queues(q, true);
+ else {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue_async(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+}
+
+static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+ struct nullb *nullb = dev->nullb;
+ int err = 0;
+
+ if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
+ struct request *rq = cmd->rq;
+
+ if (!hrtimer_active(&nullb->bw_timer))
+ hrtimer_restart(&nullb->bw_timer);
+
+ if (atomic_long_sub_return(blk_rq_bytes(rq),
+ &nullb->cur_bytes) < 0) {
+ null_stop_queue(nullb);
+ /* race with timer */
+ if (atomic_long_read(&nullb->cur_bytes) > 0)
+ null_restart_queue_async(nullb);
+ if (dev->queue_mode == NULL_Q_RQ) {
+ struct request_queue *q = nullb->q;
+
+ spin_lock_irq(q->queue_lock);
+ rq->rq_flags |= RQF_DONTPREP;
+ blk_requeue_request(q, rq);
+ spin_unlock_irq(q->queue_lock);
+ return BLK_STS_OK;
+ } else
+ /* requeue request */
+ return BLK_STS_RESOURCE;
+ }
+ }
+
+ if (nullb->dev->badblocks.shift != -1) {
+ int bad_sectors;
+ sector_t sector, size, first_bad;
+ bool is_flush = true;
+
+ if (dev->queue_mode == NULL_Q_BIO &&
+ bio_op(cmd->bio) != REQ_OP_FLUSH) {
+ is_flush = false;
+ sector = cmd->bio->bi_iter.bi_sector;
+ size = bio_sectors(cmd->bio);
+ }
+ if (dev->queue_mode != NULL_Q_BIO &&
+ req_op(cmd->rq) != REQ_OP_FLUSH) {
+ is_flush = false;
+ sector = blk_rq_pos(cmd->rq);
+ size = blk_rq_sectors(cmd->rq);
+ }
+ if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
+ size, &first_bad, &bad_sectors)) {
+ cmd->error = BLK_STS_IOERR;
+ goto out;
+ }
+ }
+
+ if (dev->memory_backed) {
+ if (dev->queue_mode == NULL_Q_BIO) {
+ if (bio_op(cmd->bio) == REQ_OP_FLUSH)
+ err = null_handle_flush(nullb);
+ else
+ err = null_handle_bio(cmd);
+ } else {
+ if (req_op(cmd->rq) == REQ_OP_FLUSH)
+ err = null_handle_flush(nullb);
+ else
+ err = null_handle_rq(cmd);
+ }
+ }
+ cmd->error = errno_to_blk_status(err);
+out:
/* Complete IO by inline, softirq or timer */
- switch (irqmode) {
+ switch (dev->irqmode) {
case NULL_IRQ_SOFTIRQ:
- switch (queue_mode) {
+ switch (dev->queue_mode) {
case NULL_Q_MQ:
blk_mq_complete_request(cmd->rq);
break;
@@ -307,6 +1292,34 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
null_cmd_end_timer(cmd);
break;
}
+ return BLK_STS_OK;
+}
+
+static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
+{
+ struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
+ ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
+ unsigned int mbps = nullb->dev->mbps;
+
+ if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
+ return HRTIMER_NORESTART;
+
+ atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
+ null_restart_queue_async(nullb);
+
+ hrtimer_forward_now(&nullb->bw_timer, timer_interval);
+
+ return HRTIMER_RESTART;
+}
+
+static void nullb_setup_bwtimer(struct nullb *nullb)
+{
+ ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
+
+ hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ nullb->bw_timer.function = nullb_bwtimer_fn;
+ atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
+ hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
}
static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
@@ -366,20 +1379,20 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ struct nullb_queue *nq = hctx->driver_data;
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
- if (irqmode == NULL_IRQ_TIMER) {
+ if (nq->dev->irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
cmd->rq = bd->rq;
- cmd->nq = hctx->driver_data;
+ cmd->nq = nq;
blk_mq_start_request(bd->rq);
- null_handle_cmd(cmd);
- return BLK_STS_OK;
+ return null_handle_cmd(cmd);
}
static const struct blk_mq_ops null_mq_ops = {
@@ -438,7 +1451,8 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
{
- sector_t size = gb * 1024 * 1024 * 1024ULL;
+ struct nullb *nullb = dev->q->queuedata;
+ sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
sector_t blksize;
struct nvm_id_group *grp;
@@ -460,7 +1474,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
id->ppaf.ch_offset = 56;
id->ppaf.ch_len = 8;
- sector_div(size, bs); /* convert size to pages */
+ sector_div(size, nullb->dev->blocksize); /* convert size to pages */
size >>= 8; /* concert size to pgs pr blk */
grp = &id->grp;
grp->mtype = 0;
@@ -474,8 +1488,8 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
grp->num_blk = blksize;
grp->num_pln = 1;
- grp->fpg_sz = bs;
- grp->csecs = bs;
+ grp->fpg_sz = nullb->dev->blocksize;
+ grp->csecs = nullb->dev->blocksize;
grp->trdt = 25000;
grp->trdm = 25000;
grp->tprt = 500000;
@@ -483,7 +1497,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
grp->tbet = 1500000;
grp->tbem = 1500000;
grp->mpos = 0x010101; /* single plane rwe */
- grp->cpar = hw_queue_depth;
+ grp->cpar = nullb->dev->hw_queue_depth;
return 0;
}
@@ -568,19 +1582,44 @@ static void null_nvm_unregister(struct nullb *nullb) {}
static void null_del_dev(struct nullb *nullb)
{
+ struct nullb_device *dev = nullb->dev;
+
+ ida_simple_remove(&nullb_indexes, nullb->index);
+
list_del_init(&nullb->list);
- if (use_lightnvm)
+ if (dev->use_lightnvm)
null_nvm_unregister(nullb);
else
del_gendisk(nullb->disk);
+
+ if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
+ hrtimer_cancel(&nullb->bw_timer);
+ atomic_long_set(&nullb->cur_bytes, LONG_MAX);
+ null_restart_queue_async(nullb);
+ }
+
blk_cleanup_queue(nullb->q);
- if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
+ if (dev->queue_mode == NULL_Q_MQ &&
+ nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
- if (!use_lightnvm)
+ if (!dev->use_lightnvm)
put_disk(nullb->disk);
cleanup_queues(nullb);
+ if (null_cache_active(nullb))
+ null_free_device_storage(nullb->dev, true);
kfree(nullb);
+ dev->nullb = NULL;
+}
+
+static void null_config_discard(struct nullb *nullb)
+{
+ if (nullb->dev->discard == false)
+ return;
+ nullb->q->limits.discard_granularity = nullb->dev->blocksize;
+ nullb->q->limits.discard_alignment = nullb->dev->blocksize;
+ blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q);
}
static int null_open(struct block_device *bdev, fmode_t mode)
@@ -605,6 +1644,7 @@ static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
init_waitqueue_head(&nq->wait);
nq->queue_depth = nullb->queue_depth;
+ nq->dev = nullb->dev;
}
static void null_init_queues(struct nullb *nullb)
@@ -652,13 +1692,13 @@ static int setup_commands(struct nullb_queue *nq)
static int setup_queues(struct nullb *nullb)
{
- nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
- GFP_KERNEL);
+ nullb->queues = kzalloc(nullb->dev->submit_queues *
+ sizeof(struct nullb_queue), GFP_KERNEL);
if (!nullb->queues)
return -ENOMEM;
nullb->nr_queues = 0;
- nullb->queue_depth = hw_queue_depth;
+ nullb->queue_depth = nullb->dev->hw_queue_depth;
return 0;
}
@@ -668,7 +1708,7 @@ static int init_driver_queues(struct nullb *nullb)
struct nullb_queue *nq;
int i, ret = 0;
- for (i = 0; i < submit_queues; i++) {
+ for (i = 0; i < nullb->dev->submit_queues; i++) {
nq = &nullb->queues[i];
null_init_queue(nullb, nq);
@@ -686,10 +1726,10 @@ static int null_gendisk_register(struct nullb *nullb)
struct gendisk *disk;
sector_t size;
- disk = nullb->disk = alloc_disk_node(1, home_node);
+ disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
if (!disk)
return -ENOMEM;
- size = gb * 1024 * 1024 * 1024ULL;
+ size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
set_capacity(disk, size >> 9);
disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
@@ -704,49 +1744,86 @@ static int null_gendisk_register(struct nullb *nullb)
return 0;
}
-static int null_init_tag_set(struct blk_mq_tag_set *set)
+static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
{
set->ops = &null_mq_ops;
- set->nr_hw_queues = submit_queues;
- set->queue_depth = hw_queue_depth;
- set->numa_node = home_node;
+ set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
+ g_submit_queues;
+ set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
+ g_hw_queue_depth;
+ set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
set->cmd_size = sizeof(struct nullb_cmd);
set->flags = BLK_MQ_F_SHOULD_MERGE;
set->driver_data = NULL;
- if (blocking)
+ if ((nullb && nullb->dev->blocking) || g_blocking)
set->flags |= BLK_MQ_F_BLOCKING;
return blk_mq_alloc_tag_set(set);
}
-static int null_add_dev(void)
+static void null_validate_conf(struct nullb_device *dev)
+{
+ dev->blocksize = round_down(dev->blocksize, 512);
+ dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
+ if (dev->use_lightnvm && dev->blocksize != 4096)
+ dev->blocksize = 4096;
+
+ if (dev->use_lightnvm && dev->queue_mode != NULL_Q_MQ)
+ dev->queue_mode = NULL_Q_MQ;
+
+ if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
+ if (dev->submit_queues != nr_online_nodes)
+ dev->submit_queues = nr_online_nodes;
+ } else if (dev->submit_queues > nr_cpu_ids)
+ dev->submit_queues = nr_cpu_ids;
+ else if (dev->submit_queues == 0)
+ dev->submit_queues = 1;
+
+ dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
+ dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
+
+ /* Do memory allocation, so set blocking */
+ if (dev->memory_backed)
+ dev->blocking = true;
+ else /* cache is meaningless */
+ dev->cache_size = 0;
+ dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
+ dev->cache_size);
+ dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
+ /* can not stop a queue */
+ if (dev->queue_mode == NULL_Q_BIO)
+ dev->mbps = 0;
+}
+
+static int null_add_dev(struct nullb_device *dev)
{
struct nullb *nullb;
int rv;
- nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
+ null_validate_conf(dev);
+
+ nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
if (!nullb) {
rv = -ENOMEM;
goto out;
}
+ nullb->dev = dev;
+ dev->nullb = nullb;
spin_lock_init(&nullb->lock);
- if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
- submit_queues = nr_online_nodes;
-
rv = setup_queues(nullb);
if (rv)
goto out_free_nullb;
- if (queue_mode == NULL_Q_MQ) {
+ if (dev->queue_mode == NULL_Q_MQ) {
if (shared_tags) {
nullb->tag_set = &tag_set;
rv = 0;
} else {
nullb->tag_set = &nullb->__tag_set;
- rv = null_init_tag_set(nullb->tag_set);
+ rv = null_init_tag_set(nullb, nullb->tag_set);
}
if (rv)
@@ -758,8 +1835,8 @@ static int null_add_dev(void)
goto out_cleanup_tags;
}
null_init_queues(nullb);
- } else if (queue_mode == NULL_Q_BIO) {
- nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
+ } else if (dev->queue_mode == NULL_Q_BIO) {
+ nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues;
@@ -769,7 +1846,8 @@ static int null_add_dev(void)
if (rv)
goto out_cleanup_blk_queue;
} else {
- nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
+ nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock,
+ dev->home_node);
if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues;
@@ -781,20 +1859,34 @@ static int null_add_dev(void)
goto out_cleanup_blk_queue;
}
+ if (dev->mbps) {
+ set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
+ nullb_setup_bwtimer(nullb);
+ }
+
+ if (dev->cache_size > 0) {
+ set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
+ blk_queue_write_cache(nullb->q, true, true);
+ blk_queue_flush_queueable(nullb->q, true);
+ }
+
nullb->q->queuedata = nullb;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
mutex_lock(&lock);
- nullb->index = nullb_indexes++;
+ nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
+ dev->index = nullb->index;
mutex_unlock(&lock);
- blk_queue_logical_block_size(nullb->q, bs);
- blk_queue_physical_block_size(nullb->q, bs);
+ blk_queue_logical_block_size(nullb->q, dev->blocksize);
+ blk_queue_physical_block_size(nullb->q, dev->blocksize);
+
+ null_config_discard(nullb);
sprintf(nullb->disk_name, "nullb%d", nullb->index);
- if (use_lightnvm)
+ if (dev->use_lightnvm)
rv = null_nvm_register(nullb);
else
rv = null_gendisk_register(nullb);
@@ -810,7 +1902,7 @@ static int null_add_dev(void)
out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q);
out_cleanup_tags:
- if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
+ if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
out_cleanup_queues:
cleanup_queues(nullb);
@@ -825,51 +1917,63 @@ static int __init null_init(void)
int ret = 0;
unsigned int i;
struct nullb *nullb;
+ struct nullb_device *dev;
+
+ /* check for nullb_page.bitmap */
+ if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT))
+ return -EINVAL;
- if (bs > PAGE_SIZE) {
+ if (g_bs > PAGE_SIZE) {
pr_warn("null_blk: invalid block size\n");
pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
- bs = PAGE_SIZE;
+ g_bs = PAGE_SIZE;
}
- if (use_lightnvm && bs != 4096) {
+ if (g_use_lightnvm && g_bs != 4096) {
pr_warn("null_blk: LightNVM only supports 4k block size\n");
pr_warn("null_blk: defaults block size to 4k\n");
- bs = 4096;
+ g_bs = 4096;
}
- if (use_lightnvm && queue_mode != NULL_Q_MQ) {
+ if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) {
pr_warn("null_blk: LightNVM only supported for blk-mq\n");
pr_warn("null_blk: defaults queue mode to blk-mq\n");
- queue_mode = NULL_Q_MQ;
+ g_queue_mode = NULL_Q_MQ;
}
- if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
- if (submit_queues < nr_online_nodes) {
- pr_warn("null_blk: submit_queues param is set to %u.",
+ if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
+ if (g_submit_queues != nr_online_nodes) {
+ pr_warn("null_blk: submit_queues param is set to %u.\n",
nr_online_nodes);
- submit_queues = nr_online_nodes;
+ g_submit_queues = nr_online_nodes;
}
- } else if (submit_queues > nr_cpu_ids)
- submit_queues = nr_cpu_ids;
- else if (!submit_queues)
- submit_queues = 1;
+ } else if (g_submit_queues > nr_cpu_ids)
+ g_submit_queues = nr_cpu_ids;
+ else if (g_submit_queues <= 0)
+ g_submit_queues = 1;
- if (queue_mode == NULL_Q_MQ && shared_tags) {
- ret = null_init_tag_set(&tag_set);
+ if (g_queue_mode == NULL_Q_MQ && shared_tags) {
+ ret = null_init_tag_set(NULL, &tag_set);
if (ret)
return ret;
}
+ config_group_init(&nullb_subsys.su_group);
+ mutex_init(&nullb_subsys.su_mutex);
+
+ ret = configfs_register_subsystem(&nullb_subsys);
+ if (ret)
+ goto err_tagset;
+
mutex_init(&lock);
null_major = register_blkdev(0, "nullb");
if (null_major < 0) {
ret = null_major;
- goto err_tagset;
+ goto err_conf;
}
- if (use_lightnvm) {
+ if (g_use_lightnvm) {
ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
0, 0, NULL);
if (!ppa_cache) {
@@ -880,9 +1984,14 @@ static int __init null_init(void)
}
for (i = 0; i < nr_devices; i++) {
- ret = null_add_dev();
- if (ret)
+ dev = null_alloc_dev();
+ if (!dev)
+ goto err_dev;
+ ret = null_add_dev(dev);
+ if (ret) {
+ null_free_dev(dev);
goto err_dev;
+ }
}
pr_info("null: module loaded\n");
@@ -891,13 +2000,17 @@ static int __init null_init(void)
err_dev:
while (!list_empty(&nullb_list)) {
nullb = list_entry(nullb_list.next, struct nullb, list);
+ dev = nullb->dev;
null_del_dev(nullb);
+ null_free_dev(dev);
}
kmem_cache_destroy(ppa_cache);
err_ppa:
unregister_blkdev(null_major, "nullb");
+err_conf:
+ configfs_unregister_subsystem(&nullb_subsys);
err_tagset:
- if (queue_mode == NULL_Q_MQ && shared_tags)
+ if (g_queue_mode == NULL_Q_MQ && shared_tags)
blk_mq_free_tag_set(&tag_set);
return ret;
}
@@ -906,16 +2019,22 @@ static void __exit null_exit(void)
{
struct nullb *nullb;
+ configfs_unregister_subsystem(&nullb_subsys);
+
unregister_blkdev(null_major, "nullb");
mutex_lock(&lock);
while (!list_empty(&nullb_list)) {
+ struct nullb_device *dev;
+
nullb = list_entry(nullb_list.next, struct nullb, list);
+ dev = nullb->dev;
null_del_dev(nullb);
+ null_free_dev(dev);
}
mutex_unlock(&lock);
- if (queue_mode == NULL_Q_MQ && shared_tags)
+ if (g_queue_mode == NULL_Q_MQ && shared_tags)
blk_mq_free_tag_set(&tag_set);
kmem_cache_destroy(ppa_cache);
@@ -924,5 +2043,5 @@ static void __exit null_exit(void)
module_init(null_init);
module_exit(null_exit);
-MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
+MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 6b8b097abbb9..67974796c350 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1028,7 +1028,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
bio = pkt->r_bios[f];
bio_reset(bio);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
- bio->bi_bdev = pd->bdev;
+ bio_set_dev(bio, pd->bdev);
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
@@ -1122,7 +1122,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
pkt->sector = new_sector;
bio_reset(pkt->bio);
- pkt->bio->bi_bdev = pd->bdev;
+ bio_set_set(pkt->bio, pd->bdev);
bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
pkt->bio->bi_iter.bi_sector = new_sector;
pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
@@ -1267,7 +1267,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
bio_reset(pkt->w_bio);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
- pkt->w_bio->bi_bdev = pd->bdev;
+ bio_set_dev(pkt->w_bio, pd->bdev);
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
@@ -2314,7 +2314,7 @@ static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
psd->pd = pd;
psd->bio = bio;
- cloned_bio->bi_bdev = pd->bdev;
+ bio_set_dev(cloned_bio, pd->bdev);
cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio_sectors(bio);
@@ -2415,8 +2415,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
pd = q->queuedata;
if (!pd) {
- pr_err("%s incorrect request queue\n",
- bdevname(bio->bi_bdev, b));
+ pr_err("%s incorrect request queue\n", bio_devname(bio, b));
goto end_io;
}
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index e0e81cacd781..6a55959cbf78 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -409,10 +409,8 @@ static int ps3vram_cache_init(struct ps3_system_bus_device *dev)
priv->cache.page_size = CACHE_PAGE_SIZE;
priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) *
CACHE_PAGE_COUNT, GFP_KERNEL);
- if (priv->cache.tags == NULL) {
- dev_err(&dev->core, "Could not allocate cache tags\n");
+ if (!priv->cache.tags)
return -ENOMEM;
- }
dev_info(&dev->core, "Created ram cache: %d entries, %d KiB each\n",
CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024);
@@ -743,7 +741,11 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
goto out_unmap_reports;
}
- ps3vram_cache_init(dev);
+ error = ps3vram_cache_init(dev);
+ if (error < 0) {
+ goto out_unmap_reports;
+ }
+
ps3vram_proc_init(dev);
queue = blk_alloc_queue(GFP_KERNEL);
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 7f4acebf4657..e397d3ee7308 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -112,7 +112,7 @@ static const struct block_device_operations rsxx_fops = {
static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
{
- generic_start_io_acct(bio_data_dir(bio), bio_sectors(bio),
+ generic_start_io_acct(card->queue, bio_data_dir(bio), bio_sectors(bio),
&card->gendisk->part0);
}
@@ -120,8 +120,8 @@ static void disk_stats_complete(struct rsxx_cardinfo *card,
struct bio *bio,
unsigned long start_time)
{
- generic_end_io_acct(bio_data_dir(bio), &card->gendisk->part0,
- start_time);
+ generic_end_io_acct(card->queue, bio_data_dir(bio),
+ &card->gendisk->part0, start_time);
}
static void bio_dma_done_cb(struct rsxx_cardinfo *card,
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index d0368682bd43..7cedb4295e9d 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -1,19 +1,12 @@
-/* Copyright 2012 STEC, Inc.
+/*
+ * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
+ * was acquired by Western Digital in 2012.
+ *
+ * Copyright 2012 sTec, Inc.
+ * Copyright (c) 2017 Western Digital Corporation or its affiliates.
*
- * This file is licensed under the terms of the 3-clause
- * BSD License (http://opensource.org/licenses/BSD-3-Clause)
- * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
- * at your option. Both licenses are also available in the LICENSE file
- * distributed with this project. This file may not be copied, modified,
- * or distributed except in accordance with those terms.
- * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
- * Initial Driver Design!
- * Thomas Swann <tswann@stec-inc.com>
- * Interrupt handling.
- * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
- * biomode implementation.
- * Akhil Bhansali <abhansali@stec-inc.com>
- * Added support for DISCARD / FLUSH and FUA.
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
*/
#include <linux/kernel.h>
@@ -23,11 +16,11 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
#include <linux/workqueue.h>
-#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/hdreg.h>
@@ -37,9 +30,9 @@
#include <linux/version.h>
#include <linux/err.h>
#include <linux/aer.h>
-#include <linux/ctype.h>
#include <linux/wait.h>
-#include <linux/uio.h>
+#include <linux/stringify.h>
+#include <linux/slab_def.h>
#include <scsi/scsi.h>
#include <scsi/sg.h>
#include <linux/io.h>
@@ -51,19 +44,6 @@
static int skd_dbg_level;
static int skd_isr_comp_limit = 4;
-enum {
- STEC_LINK_2_5GTS = 0,
- STEC_LINK_5GTS = 1,
- STEC_LINK_8GTS = 2,
- STEC_LINK_UNKNOWN = 0xFF
-};
-
-enum {
- SKD_FLUSH_INITIALIZER,
- SKD_FLUSH_ZERO_SIZE_FIRST,
- SKD_FLUSH_DATA_SECOND,
-};
-
#define SKD_ASSERT(expr) \
do { \
if (unlikely(!(expr))) { \
@@ -73,17 +53,11 @@ enum {
} while (0)
#define DRV_NAME "skd"
-#define DRV_VERSION "2.2.1"
-#define DRV_BUILD_ID "0260"
#define PFX DRV_NAME ": "
-#define DRV_BIN_VERSION 0x100
-#define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
-MODULE_AUTHOR("bug-reports: support@stec-inc.com");
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
-MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
+MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver");
#define PCI_VENDOR_ID_STEC 0x1B39
#define PCI_DEVICE_ID_S1120 0x0001
@@ -96,34 +70,32 @@ MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
#define SKD_PAUSE_TIMEOUT (5 * 1000)
#define SKD_N_FITMSG_BYTES (512u)
+#define SKD_MAX_REQ_PER_MSG 14
-#define SKD_N_SPECIAL_CONTEXT 32u
#define SKD_N_SPECIAL_FITMSG_BYTES (128u)
/* SG elements are 32 bytes, so we can make this 4096 and still be under the
* 128KB limit. That allows 4096*4K = 16M xfer size
*/
#define SKD_N_SG_PER_REQ_DEFAULT 256u
-#define SKD_N_SG_PER_SPECIAL 256u
#define SKD_N_COMPLETION_ENTRY 256u
#define SKD_N_READ_CAP_BYTES (8u)
#define SKD_N_INTERNAL_BYTES (512u)
+#define SKD_SKCOMP_SIZE \
+ ((sizeof(struct fit_completion_entry_v1) + \
+ sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
+
/* 5 bits of uniqifier, 0xF800 */
-#define SKD_ID_INCR (0x400)
#define SKD_ID_TABLE_MASK (3u << 8u)
#define SKD_ID_RW_REQUEST (0u << 8u)
#define SKD_ID_INTERNAL (1u << 8u)
-#define SKD_ID_SPECIAL_REQUEST (2u << 8u)
#define SKD_ID_FIT_MSG (3u << 8u)
#define SKD_ID_SLOT_MASK 0x00FFu
#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
-#define SKD_N_TIMEOUT_SLOT 4u
-#define SKD_TIMEOUT_SLOT_MASK 3u
-
#define SKD_N_MAX_SECTORS 2048u
#define SKD_MAX_RETRIES 2u
@@ -141,7 +113,6 @@ enum skd_drvr_state {
SKD_DRVR_STATE_ONLINE,
SKD_DRVR_STATE_PAUSING,
SKD_DRVR_STATE_PAUSED,
- SKD_DRVR_STATE_DRAINING_TIMEOUT,
SKD_DRVR_STATE_RESTARTING,
SKD_DRVR_STATE_RESUMING,
SKD_DRVR_STATE_STOPPING,
@@ -158,7 +129,6 @@ enum skd_drvr_state {
#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
-#define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
#define SKD_START_WAIT_SECONDS 90u
@@ -169,12 +139,6 @@ enum skd_req_state {
SKD_REQ_STATE_BUSY,
SKD_REQ_STATE_COMPLETED,
SKD_REQ_STATE_TIMEOUT,
- SKD_REQ_STATE_ABORTED,
-};
-
-enum skd_fit_msg_state {
- SKD_MSG_STATE_IDLE,
- SKD_MSG_STATE_BUSY,
};
enum skd_check_status_action {
@@ -185,34 +149,29 @@ enum skd_check_status_action {
SKD_CHECK_STATUS_BUSY_IMMINENT,
};
-struct skd_fitmsg_context {
- enum skd_fit_msg_state state;
-
- struct skd_fitmsg_context *next;
+struct skd_msg_buf {
+ struct fit_msg_hdr fmh;
+ struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
+};
+struct skd_fitmsg_context {
u32 id;
- u16 outstanding;
u32 length;
- u32 offset;
- u8 *msg_buf;
+ struct skd_msg_buf *msg_buf;
dma_addr_t mb_dma_address;
};
struct skd_request_context {
enum skd_req_state state;
- struct skd_request_context *next;
-
u16 id;
u32 fitmsg_id;
- struct request *req;
u8 flush_cmd;
- u32 timeout_stamp;
- u8 sg_data_dir;
+ enum dma_data_direction data_dir;
struct scatterlist *sg;
u32 n_sg;
u32 sg_byte_count;
@@ -224,38 +183,19 @@ struct skd_request_context {
struct fit_comp_error_info err_info;
+ blk_status_t status;
};
-#define SKD_DATA_DIR_HOST_TO_CARD 1
-#define SKD_DATA_DIR_CARD_TO_HOST 2
struct skd_special_context {
struct skd_request_context req;
- u8 orphaned;
-
void *data_buf;
dma_addr_t db_dma_address;
- u8 *msg_buf;
+ struct skd_msg_buf *msg_buf;
dma_addr_t mb_dma_address;
};
-struct skd_sg_io {
- fmode_t mode;
- void __user *argp;
-
- struct sg_io_hdr sg;
-
- u8 cdb[16];
-
- u32 dxfer_len;
- u32 iovcnt;
- struct sg_iovec *iov;
- struct sg_iovec no_iov_iov;
-
- struct skd_special_context *skspcl;
-};
-
typedef enum skd_irq_type {
SKD_IRQ_LEGACY,
SKD_IRQ_MSI,
@@ -265,7 +205,7 @@ typedef enum skd_irq_type {
#define SKD_MAX_BARS 2
struct skd_device {
- volatile void __iomem *mem_map[SKD_MAX_BARS];
+ void __iomem *mem_map[SKD_MAX_BARS];
resource_size_t mem_phys[SKD_MAX_BARS];
u32 mem_size[SKD_MAX_BARS];
@@ -276,21 +216,20 @@ struct skd_device {
spinlock_t lock;
struct gendisk *disk;
+ struct blk_mq_tag_set tag_set;
struct request_queue *queue;
+ struct skd_fitmsg_context *skmsg;
struct device *class_dev;
int gendisk_on;
int sync_done;
- atomic_t device_count;
u32 devno;
u32 major;
- char name[32];
char isr_name[30];
enum skd_drvr_state state;
u32 drive_state;
- u32 in_flight;
u32 cur_max_queue_depth;
u32 queue_low_water_mark;
u32 dev_max_queue_depth;
@@ -298,27 +237,20 @@ struct skd_device {
u32 num_fitmsg_context;
u32 num_req_context;
- u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
- u32 timeout_stamp;
- struct skd_fitmsg_context *skmsg_free_list;
struct skd_fitmsg_context *skmsg_table;
- struct skd_request_context *skreq_free_list;
- struct skd_request_context *skreq_table;
-
- struct skd_special_context *skspcl_free_list;
- struct skd_special_context *skspcl_table;
-
struct skd_special_context internal_skspcl;
u32 read_cap_blocksize;
u32 read_cap_last_lba;
int read_cap_is_valid;
int inquiry_is_valid;
u8 inq_serial_num[13]; /*12 chars plus null term */
- u8 id_str[80]; /* holds a composite name (pci + sernum) */
u8 skcomp_cycle;
u32 skcomp_ix;
+ struct kmem_cache *msgbuf_cache;
+ struct kmem_cache *sglist_cache;
+ struct kmem_cache *databuf_cache;
struct fit_completion_entry_v1 *skcomp_table;
struct fit_comp_error_info *skerr_table;
dma_addr_t cq_dma_address;
@@ -329,7 +261,6 @@ struct skd_device {
u32 timer_countdown;
u32 timer_substate;
- int n_special;
int sgs_per_request;
u32 last_mtd;
@@ -343,7 +274,7 @@ struct skd_device {
u32 timo_slot;
-
+ struct work_struct start_queue;
struct work_struct completion_worker;
};
@@ -353,53 +284,32 @@ struct skd_device {
static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
{
- u32 val;
-
- if (likely(skdev->dbg_level < 2))
- return readl(skdev->mem_map[1] + offset);
- else {
- barrier();
- val = readl(skdev->mem_map[1] + offset);
- barrier();
- pr_debug("%s:%s:%d offset %x = %x\n",
- skdev->name, __func__, __LINE__, offset, val);
- return val;
- }
+ u32 val = readl(skdev->mem_map[1] + offset);
+ if (unlikely(skdev->dbg_level >= 2))
+ dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
+ return val;
}
static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
u32 offset)
{
- if (likely(skdev->dbg_level < 2)) {
- writel(val, skdev->mem_map[1] + offset);
- barrier();
- } else {
- barrier();
- writel(val, skdev->mem_map[1] + offset);
- barrier();
- pr_debug("%s:%s:%d offset %x = %x\n",
- skdev->name, __func__, __LINE__, offset, val);
- }
+ writel(val, skdev->mem_map[1] + offset);
+ if (unlikely(skdev->dbg_level >= 2))
+ dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
}
static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
u32 offset)
{
- if (likely(skdev->dbg_level < 2)) {
- writeq(val, skdev->mem_map[1] + offset);
- barrier();
- } else {
- barrier();
- writeq(val, skdev->mem_map[1] + offset);
- barrier();
- pr_debug("%s:%s:%d offset %x = %016llx\n",
- skdev->name, __func__, __LINE__, offset, val);
- }
+ writeq(val, skdev->mem_map[1] + offset);
+ if (unlikely(skdev->dbg_level >= 2))
+ dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
+ val);
}
-#define SKD_IRQ_DEFAULT SKD_IRQ_MSI
+#define SKD_IRQ_DEFAULT SKD_IRQ_MSIX
static int skd_isr_type = SKD_IRQ_DEFAULT;
module_param(skd_isr_type, int, 0444);
@@ -412,7 +322,7 @@ static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
module_param(skd_max_req_per_msg, int, 0444);
MODULE_PARM_DESC(skd_max_req_per_msg,
"Maximum SCSI requests packed in a single message."
- " (1-14, default==1)");
+ " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
@@ -429,10 +339,10 @@ MODULE_PARM_DESC(skd_sgs_per_request,
"Maximum SG elements per block request."
" (1-4096, default==256)");
-static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
+static int skd_max_pass_thru = 1;
module_param(skd_max_pass_thru, int, 0444);
MODULE_PARM_DESC(skd_max_pass_thru,
- "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
+ "Maximum SCSI pass-thru at a time. IGNORED");
module_param(skd_dbg_level, int, 0444);
MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
@@ -449,9 +359,6 @@ static void skd_send_fitmsg(struct skd_device *skdev,
struct skd_fitmsg_context *skmsg);
static void skd_send_special_fitmsg(struct skd_device *skdev,
struct skd_special_context *skspcl);
-static void skd_request_fn(struct request_queue *rq);
-static void skd_end_request(struct skd_device *skdev,
- struct skd_request_context *skreq, blk_status_t status);
static bool skd_preop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq);
static void skd_postop_sg_list(struct skd_device *skdev,
@@ -460,19 +367,14 @@ static void skd_postop_sg_list(struct skd_device *skdev,
static void skd_restart_device(struct skd_device *skdev);
static int skd_quiesce_dev(struct skd_device *skdev);
static int skd_unquiesce_dev(struct skd_device *skdev);
-static void skd_release_special(struct skd_device *skdev,
- struct skd_special_context *skspcl);
static void skd_disable_interrupts(struct skd_device *skdev);
static void skd_isr_fwstate(struct skd_device *skdev);
-static void skd_recover_requests(struct skd_device *skdev, int requeue);
+static void skd_recover_requests(struct skd_device *skdev);
static void skd_soft_reset(struct skd_device *skdev);
-static const char *skd_name(struct skd_device *skdev);
const char *skd_drive_state_to_str(int state);
const char *skd_skdev_state_to_str(enum skd_drvr_state state);
static void skd_log_skdev(struct skd_device *skdev, const char *event);
-static void skd_log_skmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg, const char *event);
static void skd_log_skreq(struct skd_device *skdev,
struct skd_request_context *skreq, const char *event);
@@ -481,18 +383,20 @@ static void skd_log_skreq(struct skd_device *skdev,
* READ/WRITE REQUESTS
*****************************************************************************
*/
-static void skd_fail_all_pending(struct skd_device *skdev)
+static void skd_inc_in_flight(struct request *rq, void *data, bool reserved)
{
- struct request_queue *q = skdev->queue;
- struct request *req;
+ int *count = data;
- for (;; ) {
- req = blk_peek_request(q);
- if (req == NULL)
- break;
- blk_start_request(req);
- __blk_end_request_all(req, BLK_STS_IOERR);
- }
+ count++;
+}
+
+static int skd_in_flight(struct skd_device *skdev)
+{
+ int count = 0;
+
+ blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count);
+
+ return count;
}
static void
@@ -501,9 +405,9 @@ skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
unsigned count)
{
if (data_dir == READ)
- scsi_req->cdb[0] = 0x28;
+ scsi_req->cdb[0] = READ_10;
else
- scsi_req->cdb[0] = 0x2a;
+ scsi_req->cdb[0] = WRITE_10;
scsi_req->cdb[1] = 0;
scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
@@ -522,7 +426,7 @@ skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
{
skreq->flush_cmd = 1;
- scsi_req->cdb[0] = 0x35;
+ scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
scsi_req->cdb[1] = 0;
scsi_req->cdb[2] = 0;
scsi_req->cdb[3] = 0;
@@ -534,307 +438,194 @@ skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
scsi_req->cdb[9] = 0;
}
-static void skd_request_fn_not_online(struct request_queue *q);
-
-static void skd_request_fn(struct request_queue *q)
+/*
+ * Return true if and only if all pending requests should be failed.
+ */
+static bool skd_fail_all(struct request_queue *q)
{
struct skd_device *skdev = q->queuedata;
- struct skd_fitmsg_context *skmsg = NULL;
- struct fit_msg_hdr *fmh = NULL;
- struct skd_request_context *skreq;
- struct request *req = NULL;
- struct skd_scsi_request *scsi_req;
- unsigned long io_flags;
- u32 lba;
- u32 count;
- int data_dir;
- u32 be_lba;
- u32 be_count;
- u64 be_dmaa;
- u64 cmdctxt;
- u32 timo_slot;
- void *cmd_ptr;
- int flush, fua;
-
- if (skdev->state != SKD_DRVR_STATE_ONLINE) {
- skd_request_fn_not_online(q);
- return;
- }
- if (blk_queue_stopped(skdev->queue)) {
- if (skdev->skmsg_free_list == NULL ||
- skdev->skreq_free_list == NULL ||
- skdev->in_flight >= skdev->queue_low_water_mark)
- /* There is still some kind of shortage */
- return;
-
- queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
- }
+ SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
- /*
- * Stop conditions:
- * - There are no more native requests
- * - There are already the maximum number of requests in progress
- * - There are no more skd_request_context entries
- * - There are no more FIT msg buffers
+ skd_log_skdev(skdev, "req_not_online");
+ switch (skdev->state) {
+ case SKD_DRVR_STATE_PAUSING:
+ case SKD_DRVR_STATE_PAUSED:
+ case SKD_DRVR_STATE_STARTING:
+ case SKD_DRVR_STATE_RESTARTING:
+ case SKD_DRVR_STATE_WAIT_BOOT:
+ /* In case of starting, we haven't started the queue,
+ * so we can't get here... but requests are
+ * possibly hanging out waiting for us because we
+ * reported the dev/skd0 already. They'll wait
+ * forever if connect doesn't complete.
+ * What to do??? delay dev/skd0 ??
*/
- for (;; ) {
-
- flush = fua = 0;
-
- req = blk_peek_request(q);
-
- /* Are there any native requests to start? */
- if (req == NULL)
- break;
-
- lba = (u32)blk_rq_pos(req);
- count = blk_rq_sectors(req);
- data_dir = rq_data_dir(req);
- io_flags = req->cmd_flags;
-
- if (req_op(req) == REQ_OP_FLUSH)
- flush++;
-
- if (io_flags & REQ_FUA)
- fua++;
-
- pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
- "count=%u(0x%x) dir=%d\n",
- skdev->name, __func__, __LINE__,
- req, lba, lba, count, count, data_dir);
-
- /* At this point we know there is a request */
+ case SKD_DRVR_STATE_BUSY:
+ case SKD_DRVR_STATE_BUSY_IMMINENT:
+ case SKD_DRVR_STATE_BUSY_ERASE:
+ return false;
- /* Are too many requets already in progress? */
- if (skdev->in_flight >= skdev->cur_max_queue_depth) {
- pr_debug("%s:%s:%d qdepth %d, limit %d\n",
- skdev->name, __func__, __LINE__,
- skdev->in_flight, skdev->cur_max_queue_depth);
- break;
- }
+ case SKD_DRVR_STATE_BUSY_SANITIZE:
+ case SKD_DRVR_STATE_STOPPING:
+ case SKD_DRVR_STATE_SYNCING:
+ case SKD_DRVR_STATE_FAULT:
+ case SKD_DRVR_STATE_DISAPPEARED:
+ default:
+ return true;
+ }
+}
- /* Is a skd_request_context available? */
- skreq = skdev->skreq_free_list;
- if (skreq == NULL) {
- pr_debug("%s:%s:%d Out of req=%p\n",
- skdev->name, __func__, __LINE__, q);
- break;
- }
- SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
- SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
-
- /* Now we check to see if we can get a fit msg */
- if (skmsg == NULL) {
- if (skdev->skmsg_free_list == NULL) {
- pr_debug("%s:%s:%d Out of msg\n",
- skdev->name, __func__, __LINE__);
- break;
- }
- }
+static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *mqd)
+{
+ struct request *const req = mqd->rq;
+ struct request_queue *const q = req->q;
+ struct skd_device *skdev = q->queuedata;
+ struct skd_fitmsg_context *skmsg;
+ struct fit_msg_hdr *fmh;
+ const u32 tag = blk_mq_unique_tag(req);
+ struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req);
+ struct skd_scsi_request *scsi_req;
+ unsigned long flags = 0;
+ const u32 lba = blk_rq_pos(req);
+ const u32 count = blk_rq_sectors(req);
+ const int data_dir = rq_data_dir(req);
- skreq->flush_cmd = 0;
- skreq->n_sg = 0;
- skreq->sg_byte_count = 0;
+ if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
+ return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
- /*
- * OK to now dequeue request from q.
- *
- * At this point we are comitted to either start or reject
- * the native request. Note that skd_request_context is
- * available but is still at the head of the free list.
- */
- blk_start_request(req);
- skreq->req = req;
- skreq->fitmsg_id = 0;
-
- /* Either a FIT msg is in progress or we have to start one. */
- if (skmsg == NULL) {
- /* Are there any FIT msg buffers available? */
- skmsg = skdev->skmsg_free_list;
- if (skmsg == NULL) {
- pr_debug("%s:%s:%d Out of msg skdev=%p\n",
- skdev->name, __func__, __LINE__,
- skdev);
- break;
- }
- SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
- SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
+ blk_mq_start_request(req);
- skdev->skmsg_free_list = skmsg->next;
+ WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
+ tag, skd_max_queue_depth, q->nr_requests);
- skmsg->state = SKD_MSG_STATE_BUSY;
- skmsg->id += SKD_ID_INCR;
+ SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
- /* Initialize the FIT msg header */
- fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
- memset(fmh, 0, sizeof(*fmh));
- fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
- skmsg->length = sizeof(*fmh);
- }
+ dev_dbg(&skdev->pdev->dev,
+ "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba,
+ lba, count, count, data_dir);
- skreq->fitmsg_id = skmsg->id;
+ skreq->id = tag + SKD_ID_RW_REQUEST;
+ skreq->flush_cmd = 0;
+ skreq->n_sg = 0;
+ skreq->sg_byte_count = 0;
- /*
- * Note that a FIT msg may have just been started
- * but contains no SoFIT requests yet.
- */
+ skreq->fitmsg_id = 0;
- /*
- * Transcode the request, checking as we go. The outcome of
- * the transcoding is represented by the error variable.
- */
- cmd_ptr = &skmsg->msg_buf[skmsg->length];
- memset(cmd_ptr, 0, 32);
+ skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- be_lba = cpu_to_be32(lba);
- be_count = cpu_to_be32(count);
- be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
- cmdctxt = skreq->id + SKD_ID_INCR;
+ if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
+ dev_dbg(&skdev->pdev->dev, "error Out\n");
+ skreq->status = BLK_STS_RESOURCE;
+ blk_mq_complete_request(req);
+ return BLK_STS_OK;
+ }
- scsi_req = cmd_ptr;
- scsi_req->hdr.tag = cmdctxt;
- scsi_req->hdr.sg_list_dma_address = be_dmaa;
+ dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
+ skreq->n_sg *
+ sizeof(struct fit_sg_descriptor),
+ DMA_TO_DEVICE);
- if (data_dir == READ)
- skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
- else
- skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
+ /* Either a FIT msg is in progress or we have to start one. */
+ if (skd_max_req_per_msg == 1) {
+ skmsg = NULL;
+ } else {
+ spin_lock_irqsave(&skdev->lock, flags);
+ skmsg = skdev->skmsg;
+ }
+ if (!skmsg) {
+ skmsg = &skdev->skmsg_table[tag];
+ skdev->skmsg = skmsg;
- if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
- skd_prep_zerosize_flush_cdb(scsi_req, skreq);
- SKD_ASSERT(skreq->flush_cmd == 1);
+ /* Initialize the FIT msg header */
+ fmh = &skmsg->msg_buf->fmh;
+ memset(fmh, 0, sizeof(*fmh));
+ fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
+ skmsg->length = sizeof(*fmh);
+ } else {
+ fmh = &skmsg->msg_buf->fmh;
+ }
- } else {
- skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
- }
+ skreq->fitmsg_id = skmsg->id;
- if (fua)
- scsi_req->cdb[1] |= SKD_FUA_NV;
+ scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
+ memset(scsi_req, 0, sizeof(*scsi_req));
- if (!req->bio)
- goto skip_sg;
+ scsi_req->hdr.tag = skreq->id;
+ scsi_req->hdr.sg_list_dma_address =
+ cpu_to_be64(skreq->sksg_dma_address);
- if (!skd_preop_sg_list(skdev, skreq)) {
- /*
- * Complete the native request with error.
- * Note that the request context is still at the
- * head of the free list, and that the SoFIT request
- * was encoded into the FIT msg buffer but the FIT
- * msg length has not been updated. In short, the
- * only resource that has been allocated but might
- * not be used is that the FIT msg could be empty.
- */
- pr_debug("%s:%s:%d error Out\n",
- skdev->name, __func__, __LINE__);
- skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
- continue;
- }
+ if (req_op(req) == REQ_OP_FLUSH) {
+ skd_prep_zerosize_flush_cdb(scsi_req, skreq);
+ SKD_ASSERT(skreq->flush_cmd == 1);
+ } else {
+ skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
+ }
-skip_sg:
- scsi_req->hdr.sg_list_len_bytes =
- cpu_to_be32(skreq->sg_byte_count);
+ if (req->cmd_flags & REQ_FUA)
+ scsi_req->cdb[1] |= SKD_FUA_NV;
- /* Complete resource allocations. */
- skdev->skreq_free_list = skreq->next;
- skreq->state = SKD_REQ_STATE_BUSY;
- skreq->id += SKD_ID_INCR;
+ scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count);
- skmsg->length += sizeof(struct skd_scsi_request);
- fmh->num_protocol_cmds_coalesced++;
+ /* Complete resource allocations. */
+ skreq->state = SKD_REQ_STATE_BUSY;
- /*
- * Update the active request counts.
- * Capture the timeout timestamp.
- */
- skreq->timeout_stamp = skdev->timeout_stamp;
- timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
- skdev->timeout_slot[timo_slot]++;
- skdev->in_flight++;
- pr_debug("%s:%s:%d req=0x%x busy=%d\n",
- skdev->name, __func__, __LINE__,
- skreq->id, skdev->in_flight);
+ skmsg->length += sizeof(struct skd_scsi_request);
+ fmh->num_protocol_cmds_coalesced++;
- /*
- * If the FIT msg buffer is full send it.
- */
- if (skmsg->length >= SKD_N_FITMSG_BYTES ||
- fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
- skd_send_fitmsg(skdev, skmsg);
- skmsg = NULL;
- fmh = NULL;
- }
- }
+ dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
+ skd_in_flight(skdev));
/*
- * Is a FIT msg in progress? If it is empty put the buffer back
- * on the free list. If it is non-empty send what we got.
- * This minimizes latency when there are fewer requests than
- * what fits in a FIT msg.
+ * If the FIT msg buffer is full send it.
*/
- if (skmsg != NULL) {
- /* Bigger than just a FIT msg header? */
- if (skmsg->length > sizeof(struct fit_msg_hdr)) {
- pr_debug("%s:%s:%d sending msg=%p, len %d\n",
- skdev->name, __func__, __LINE__,
- skmsg, skmsg->length);
+ if (skd_max_req_per_msg == 1) {
+ skd_send_fitmsg(skdev, skmsg);
+ } else {
+ if (mqd->last ||
+ fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
skd_send_fitmsg(skdev, skmsg);
- } else {
- /*
- * The FIT msg is empty. It means we got started
- * on the msg, but the requests were rejected.
- */
- skmsg->state = SKD_MSG_STATE_IDLE;
- skmsg->id += SKD_ID_INCR;
- skmsg->next = skdev->skmsg_free_list;
- skdev->skmsg_free_list = skmsg;
+ skdev->skmsg = NULL;
}
- skmsg = NULL;
- fmh = NULL;
+ spin_unlock_irqrestore(&skdev->lock, flags);
}
- /*
- * If req is non-NULL it means there is something to do but
- * we are out of a resource.
- */
- if (req)
- blk_stop_queue(skdev->queue);
+ return BLK_STS_OK;
}
-static void skd_end_request(struct skd_device *skdev,
- struct skd_request_context *skreq, blk_status_t error)
+static enum blk_eh_timer_return skd_timed_out(struct request *req,
+ bool reserved)
{
- if (unlikely(error)) {
- struct request *req = skreq->req;
- char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
- u32 lba = (u32)blk_rq_pos(req);
- u32 count = blk_rq_sectors(req);
-
- pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
- skd_name(skdev), cmd, lba, count, skreq->id);
- } else
- pr_debug("%s:%s:%d id=0x%x error=%d\n",
- skdev->name, __func__, __LINE__, skreq->id, error);
+ struct skd_device *skdev = req->q->queuedata;
+
+ dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n",
+ blk_mq_unique_tag(req));
- __blk_end_request_all(skreq->req, error);
+ return BLK_EH_RESET_TIMER;
+}
+
+static void skd_complete_rq(struct request *req)
+{
+ struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
+
+ blk_mq_end_request(req, skreq->status);
}
static bool skd_preop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq)
{
- struct request *req = skreq->req;
- int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
- int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
- struct scatterlist *sg = &skreq->sg[0];
+ struct request *req = blk_mq_rq_from_pdu(skreq);
+ struct scatterlist *sgl = &skreq->sg[0], *sg;
int n_sg;
int i;
skreq->sg_byte_count = 0;
- /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
- skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
+ WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
+ skreq->data_dir != DMA_FROM_DEVICE);
- n_sg = blk_rq_map_sg(skdev->queue, req, sg);
+ n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
if (n_sg <= 0)
return false;
@@ -842,7 +633,7 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
* Map scatterlist to PCI bus addresses.
* Note PCI might change the number of entries.
*/
- n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
+ n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir);
if (n_sg <= 0)
return false;
@@ -850,10 +641,10 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
skreq->n_sg = n_sg;
- for (i = 0; i < n_sg; i++) {
+ for_each_sg(sgl, sg, n_sg, i) {
struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
- u32 cnt = sg_dma_len(&sg[i]);
- uint64_t dma_addr = sg_dma_address(&sg[i]);
+ u32 cnt = sg_dma_len(sg);
+ uint64_t dma_addr = sg_dma_address(sg);
sgd->control = FIT_SGD_CONTROL_NOT_LAST;
sgd->byte_count = cnt;
@@ -866,16 +657,16 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
if (unlikely(skdev->dbg_level > 1)) {
- pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
- skdev->name, __func__, __LINE__,
- skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
+ dev_dbg(&skdev->pdev->dev,
+ "skreq=%x sksg_list=%p sksg_dma=%llx\n",
+ skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
for (i = 0; i < n_sg; i++) {
struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
- pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
- "addr=0x%llx next=0x%llx\n",
- skdev->name, __func__, __LINE__,
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
+
+ dev_dbg(&skdev->pdev->dev,
+ " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
+ i, sgd->byte_count, sgd->control,
+ sgd->host_side_addr, sgd->next_desc_ptr);
}
}
@@ -885,9 +676,6 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
static void skd_postop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq)
{
- int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
- int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
-
/*
* restore the next ptr for next IO request so we
* don't have to set it every time.
@@ -895,51 +683,7 @@ static void skd_postop_sg_list(struct skd_device *skdev,
skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
skreq->sksg_dma_address +
((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
- pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
-}
-
-static void skd_request_fn_not_online(struct request_queue *q)
-{
- struct skd_device *skdev = q->queuedata;
- int error;
-
- SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
-
- skd_log_skdev(skdev, "req_not_online");
- switch (skdev->state) {
- case SKD_DRVR_STATE_PAUSING:
- case SKD_DRVR_STATE_PAUSED:
- case SKD_DRVR_STATE_STARTING:
- case SKD_DRVR_STATE_RESTARTING:
- case SKD_DRVR_STATE_WAIT_BOOT:
- /* In case of starting, we haven't started the queue,
- * so we can't get here... but requests are
- * possibly hanging out waiting for us because we
- * reported the dev/skd0 already. They'll wait
- * forever if connect doesn't complete.
- * What to do??? delay dev/skd0 ??
- */
- case SKD_DRVR_STATE_BUSY:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- case SKD_DRVR_STATE_BUSY_ERASE:
- case SKD_DRVR_STATE_DRAINING_TIMEOUT:
- return;
-
- case SKD_DRVR_STATE_BUSY_SANITIZE:
- case SKD_DRVR_STATE_STOPPING:
- case SKD_DRVR_STATE_SYNCING:
- case SKD_DRVR_STATE_FAULT:
- case SKD_DRVR_STATE_DISAPPEARED:
- default:
- error = -EIO;
- break;
- }
-
- /* If we get here, terminate all pending block requeusts
- * with EIO and any scsi pass thru with appropriate sense
- */
-
- skd_fail_all_pending(skdev);
+ pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir);
}
/*
@@ -950,12 +694,22 @@ static void skd_request_fn_not_online(struct request_queue *q)
static void skd_timer_tick_not_online(struct skd_device *skdev);
+static void skd_start_queue(struct work_struct *work)
+{
+ struct skd_device *skdev = container_of(work, typeof(*skdev),
+ start_queue);
+
+ /*
+ * Although it is safe to call blk_start_queue() from interrupt
+ * context, blk_mq_start_hw_queues() must not be called from
+ * interrupt context.
+ */
+ blk_mq_start_hw_queues(skdev->queue);
+}
+
static void skd_timer_tick(ulong arg)
{
struct skd_device *skdev = (struct skd_device *)arg;
-
- u32 timo_slot;
- u32 overdue_timestamp;
unsigned long reqflags;
u32 state;
@@ -972,37 +726,9 @@ static void skd_timer_tick(ulong arg)
if (state != skdev->drive_state)
skd_isr_fwstate(skdev);
- if (skdev->state != SKD_DRVR_STATE_ONLINE) {
+ if (skdev->state != SKD_DRVR_STATE_ONLINE)
skd_timer_tick_not_online(skdev);
- goto timer_func_out;
- }
- skdev->timeout_stamp++;
- timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
-
- /*
- * All requests that happened during the previous use of
- * this slot should be done by now. The previous use was
- * over 7 seconds ago.
- */
- if (skdev->timeout_slot[timo_slot] == 0)
- goto timer_func_out;
-
- /* Something is overdue */
- overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
-
- pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->timeout_slot[timo_slot], skdev->in_flight);
- pr_err("(%s): Overdue IOs (%d), busy %d\n",
- skd_name(skdev), skdev->timeout_slot[timo_slot],
- skdev->in_flight);
- skdev->timer_countdown = SKD_DRAINING_TIMO;
- skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
- skdev->timo_slot = timo_slot;
- blk_stop_queue(skdev->queue);
-
-timer_func_out:
mod_timer(&skdev->timer, (jiffies + HZ));
spin_unlock_irqrestore(&skdev->lock, reqflags);
@@ -1015,9 +741,9 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
case SKD_DRVR_STATE_LOAD:
break;
case SKD_DRVR_STATE_BUSY_SANITIZE:
- pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
- skdev->name, __func__, __LINE__,
- skdev->drive_state, skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "drive busy sanitize[%x], driver[%x]\n",
+ skdev->drive_state, skdev->state);
/* If we've been in sanitize for 3 seconds, we figure we're not
* going to get anymore completions, so recover requests now
*/
@@ -1025,22 +751,21 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
skdev->timer_countdown--;
return;
}
- skd_recover_requests(skdev, 0);
+ skd_recover_requests(skdev);
break;
case SKD_DRVR_STATE_BUSY:
case SKD_DRVR_STATE_BUSY_IMMINENT:
case SKD_DRVR_STATE_BUSY_ERASE:
- pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->state, skdev->timer_countdown);
+ dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
+ skdev->state, skdev->timer_countdown);
if (skdev->timer_countdown > 0) {
skdev->timer_countdown--;
return;
}
- pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
- skdev->name, __func__, __LINE__,
- skdev->state, skdev->timer_countdown);
+ dev_dbg(&skdev->pdev->dev,
+ "busy[%x], timedout=%d, restarting device.",
+ skdev->state, skdev->timer_countdown);
skd_restart_device(skdev);
break;
@@ -1054,12 +779,12 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
* revcover at some point. */
skdev->state = SKD_DRVR_STATE_FAULT;
- pr_err("(%s): DriveFault Connect Timeout (%x)\n",
- skd_name(skdev), skdev->drive_state);
+ dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
+ skdev->drive_state);
/*start the queue so we can respond with error to requests */
/* wakeup anyone waiting for startup complete */
- blk_start_queue(skdev->queue);
+ schedule_work(&skdev->start_queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
@@ -1072,29 +797,6 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
case SKD_DRVR_STATE_PAUSED:
break;
- case SKD_DRVR_STATE_DRAINING_TIMEOUT:
- pr_debug("%s:%s:%d "
- "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
- skdev->name, __func__, __LINE__,
- skdev->timo_slot,
- skdev->timer_countdown,
- skdev->in_flight,
- skdev->timeout_slot[skdev->timo_slot]);
- /* if the slot has cleared we can let the I/O continue */
- if (skdev->timeout_slot[skdev->timo_slot] == 0) {
- pr_debug("%s:%s:%d Slot drained, starting queue.\n",
- skdev->name, __func__, __LINE__);
- skdev->state = SKD_DRVR_STATE_ONLINE;
- blk_start_queue(skdev->queue);
- return;
- }
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- skd_restart_device(skdev);
- break;
-
case SKD_DRVR_STATE_RESTARTING:
if (skdev->timer_countdown > 0) {
skdev->timer_countdown--;
@@ -1103,8 +805,9 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
/* For now, we fault the drive. Could attempt resets to
* revcover at some point. */
skdev->state = SKD_DRVR_STATE_FAULT;
- pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
- skd_name(skdev), skdev->drive_state);
+ dev_err(&skdev->pdev->dev,
+ "DriveFault Reconnect Timeout (%x)\n",
+ skdev->drive_state);
/*
* Recovering does two things:
@@ -1124,18 +827,18 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
/* It never came out of soft reset. Try to
* recover the requests and then let them
* fail. This is to mitigate hung processes. */
- skd_recover_requests(skdev, 0);
+ skd_recover_requests(skdev);
else {
- pr_err("(%s): Disable BusMaster (%x)\n",
- skd_name(skdev), skdev->drive_state);
+ dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
+ skdev->drive_state);
pci_disable_device(skdev->pdev);
skd_disable_interrupts(skdev);
- skd_recover_requests(skdev, 0);
+ skd_recover_requests(skdev);
}
/*start the queue so we can respond with error to requests */
/* wakeup anyone waiting for startup complete */
- blk_start_queue(skdev->queue);
+ schedule_work(&skdev->start_queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
@@ -1154,13 +857,11 @@ static int skd_start_timer(struct skd_device *skdev)
{
int rc;
- init_timer(&skdev->timer);
setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
rc = mod_timer(&skdev->timer, (jiffies + HZ));
if (rc)
- pr_err("%s: failed to start timer %d\n",
- __func__, rc);
+ dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
return rc;
}
@@ -1171,634 +872,6 @@ static void skd_kill_timer(struct skd_device *skdev)
/*
*****************************************************************************
- * IOCTL
- *****************************************************************************
- */
-static int skd_ioctl_sg_io(struct skd_device *skdev,
- fmode_t mode, void __user *argp);
-static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-static int skd_sg_io_prep_buffering(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-static int skd_sg_io_copy_buffer(struct skd_device *skdev,
- struct skd_sg_io *sksgio, int dxfer_dir);
-static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
-static int skd_sg_io_release_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-static int skd_sg_io_put_status(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-
-static void skd_complete_special(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
- struct skd_special_context *skspcl);
-
-static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
- uint cmd_in, ulong arg)
-{
- static const int sg_version_num = 30527;
- int rc = 0, timeout;
- struct gendisk *disk = bdev->bd_disk;
- struct skd_device *skdev = disk->private_data;
- int __user *p = (int __user *)arg;
-
- pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
- skdev->name, __func__, __LINE__,
- disk->disk_name, current->comm, mode, cmd_in, arg);
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- switch (cmd_in) {
- case SG_SET_TIMEOUT:
- rc = get_user(timeout, p);
- if (!rc)
- disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
- break;
- case SG_GET_TIMEOUT:
- rc = jiffies_to_clock_t(disk->queue->sg_timeout);
- break;
- case SG_GET_VERSION_NUM:
- rc = put_user(sg_version_num, p);
- break;
- case SG_IO:
- rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
- break;
-
- default:
- rc = -ENOTTY;
- break;
- }
-
- pr_debug("%s:%s:%d %s: completion rc %d\n",
- skdev->name, __func__, __LINE__, disk->disk_name, rc);
- return rc;
-}
-
-static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
- void __user *argp)
-{
- int rc;
- struct skd_sg_io sksgio;
-
- memset(&sksgio, 0, sizeof(sksgio));
- sksgio.mode = mode;
- sksgio.argp = argp;
- sksgio.iov = &sksgio.no_iov_iov;
-
- switch (skdev->state) {
- case SKD_DRVR_STATE_ONLINE:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- break;
-
- default:
- pr_debug("%s:%s:%d drive not online\n",
- skdev->name, __func__, __LINE__);
- rc = -ENXIO;
- goto out;
- }
-
- rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = skd_sg_io_prep_buffering(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
- if (rc)
- goto out;
-
- rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = skd_sg_io_await(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
- if (rc)
- goto out;
-
- rc = skd_sg_io_put_status(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = 0;
-
-out:
- skd_sg_io_release_skspcl(skdev, &sksgio);
-
- if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
- kfree(sksgio.iov);
- return rc;
-}
-
-static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct sg_io_hdr *sgp = &sksgio->sg;
- int i, acc;
-
- if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
- pr_debug("%s:%s:%d access sg failed %p\n",
- skdev->name, __func__, __LINE__, sksgio->argp);
- return -EFAULT;
- }
-
- if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
- pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
- skdev->name, __func__, __LINE__, sksgio->argp);
- return -EFAULT;
- }
-
- if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
- pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
- skdev->name, __func__, __LINE__, sgp->interface_id);
- return -EINVAL;
- }
-
- if (sgp->cmd_len > sizeof(sksgio->cdb)) {
- pr_debug("%s:%s:%d cmd_len invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->cmd_len);
- return -EINVAL;
- }
-
- if (sgp->iovec_count > 256) {
- pr_debug("%s:%s:%d iovec_count invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->iovec_count);
- return -EINVAL;
- }
-
- if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
- pr_debug("%s:%s:%d dxfer_len invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->dxfer_len);
- return -EINVAL;
- }
-
- switch (sgp->dxfer_direction) {
- case SG_DXFER_NONE:
- acc = -1;
- break;
-
- case SG_DXFER_TO_DEV:
- acc = VERIFY_READ;
- break;
-
- case SG_DXFER_FROM_DEV:
- case SG_DXFER_TO_FROM_DEV:
- acc = VERIFY_WRITE;
- break;
-
- default:
- pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->dxfer_direction);
- return -EINVAL;
- }
-
- if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
- pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
- skdev->name, __func__, __LINE__, sgp->cmdp);
- return -EFAULT;
- }
-
- if (sgp->mx_sb_len != 0) {
- if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
- pr_debug("%s:%s:%d access sbp failed %p\n",
- skdev->name, __func__, __LINE__, sgp->sbp);
- return -EFAULT;
- }
- }
-
- if (sgp->iovec_count == 0) {
- sksgio->iov[0].iov_base = sgp->dxferp;
- sksgio->iov[0].iov_len = sgp->dxfer_len;
- sksgio->iovcnt = 1;
- sksgio->dxfer_len = sgp->dxfer_len;
- } else {
- struct sg_iovec *iov;
- uint nbytes = sizeof(*iov) * sgp->iovec_count;
- size_t iov_data_len;
-
- iov = kmalloc(nbytes, GFP_KERNEL);
- if (iov == NULL) {
- pr_debug("%s:%s:%d alloc iovec failed %d\n",
- skdev->name, __func__, __LINE__,
- sgp->iovec_count);
- return -ENOMEM;
- }
- sksgio->iov = iov;
- sksgio->iovcnt = sgp->iovec_count;
-
- if (copy_from_user(iov, sgp->dxferp, nbytes)) {
- pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
- skdev->name, __func__, __LINE__, sgp->dxferp);
- return -EFAULT;
- }
-
- /*
- * Sum up the vecs, making sure they don't overflow
- */
- iov_data_len = 0;
- for (i = 0; i < sgp->iovec_count; i++) {
- if (iov_data_len + iov[i].iov_len < iov_data_len)
- return -EINVAL;
- iov_data_len += iov[i].iov_len;
- }
-
- /* SG_IO howto says that the shorter of the two wins */
- if (sgp->dxfer_len < iov_data_len) {
- sksgio->iovcnt = iov_shorten((struct iovec *)iov,
- sgp->iovec_count,
- sgp->dxfer_len);
- sksgio->dxfer_len = sgp->dxfer_len;
- } else
- sksgio->dxfer_len = iov_data_len;
- }
-
- if (sgp->dxfer_direction != SG_DXFER_NONE) {
- struct sg_iovec *iov = sksgio->iov;
- for (i = 0; i < sksgio->iovcnt; i++, iov++) {
- if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
- pr_debug("%s:%s:%d access data failed %p/%d\n",
- skdev->name, __func__, __LINE__,
- iov->iov_base, (int)iov->iov_len);
- return -EFAULT;
- }
- }
- }
-
- return 0;
-}
-
-static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct skd_special_context *skspcl = NULL;
- int rc;
-
- for (;;) {
- ulong flags;
-
- spin_lock_irqsave(&skdev->lock, flags);
- skspcl = skdev->skspcl_free_list;
- if (skspcl != NULL) {
- skdev->skspcl_free_list =
- (struct skd_special_context *)skspcl->req.next;
- skspcl->req.id += SKD_ID_INCR;
- skspcl->req.state = SKD_REQ_STATE_SETUP;
- skspcl->orphaned = 0;
- skspcl->req.n_sg = 0;
- }
- spin_unlock_irqrestore(&skdev->lock, flags);
-
- if (skspcl != NULL) {
- rc = 0;
- break;
- }
-
- pr_debug("%s:%s:%d blocking\n",
- skdev->name, __func__, __LINE__);
-
- rc = wait_event_interruptible_timeout(
- skdev->waitq,
- (skdev->skspcl_free_list != NULL),
- msecs_to_jiffies(sksgio->sg.timeout));
-
- pr_debug("%s:%s:%d unblocking, rc=%d\n",
- skdev->name, __func__, __LINE__, rc);
-
- if (rc <= 0) {
- if (rc == 0)
- rc = -ETIMEDOUT;
- else
- rc = -EINTR;
- break;
- }
- /*
- * If we get here rc > 0 meaning the timeout to
- * wait_event_interruptible_timeout() had time left, hence the
- * sought event -- non-empty free list -- happened.
- * Retry the allocation.
- */
- }
- sksgio->skspcl = skspcl;
-
- return rc;
-}
-
-static int skd_skreq_prep_buffering(struct skd_device *skdev,
- struct skd_request_context *skreq,
- u32 dxfer_len)
-{
- u32 resid = dxfer_len;
-
- /*
- * The DMA engine must have aligned addresses and byte counts.
- */
- resid += (-resid) & 3;
- skreq->sg_byte_count = resid;
-
- skreq->n_sg = 0;
-
- while (resid > 0) {
- u32 nbytes = PAGE_SIZE;
- u32 ix = skreq->n_sg;
- struct scatterlist *sg = &skreq->sg[ix];
- struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
- struct page *page;
-
- if (nbytes > resid)
- nbytes = resid;
-
- page = alloc_page(GFP_KERNEL);
- if (page == NULL)
- return -ENOMEM;
-
- sg_set_page(sg, page, nbytes, 0);
-
- /* TODO: This should be going through a pci_???()
- * routine to do proper mapping. */
- sksg->control = FIT_SGD_CONTROL_NOT_LAST;
- sksg->byte_count = nbytes;
-
- sksg->host_side_addr = sg_phys(sg);
-
- sksg->dev_side_addr = 0;
- sksg->next_desc_ptr = skreq->sksg_dma_address +
- (ix + 1) * sizeof(*sksg);
-
- skreq->n_sg++;
- resid -= nbytes;
- }
-
- if (skreq->n_sg > 0) {
- u32 ix = skreq->n_sg - 1;
- struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
-
- sksg->control = FIT_SGD_CONTROL_LAST;
- sksg->next_desc_ptr = 0;
- }
-
- if (unlikely(skdev->dbg_level > 1)) {
- u32 i;
-
- pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
- skdev->name, __func__, __LINE__,
- skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
- for (i = 0; i < skreq->n_sg; i++) {
- struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
-
- pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
- "addr=0x%llx next=0x%llx\n",
- skdev->name, __func__, __LINE__,
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
- }
- }
-
- return 0;
-}
-
-static int skd_sg_io_prep_buffering(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct skd_special_context *skspcl = sksgio->skspcl;
- struct skd_request_context *skreq = &skspcl->req;
- u32 dxfer_len = sksgio->dxfer_len;
- int rc;
-
- rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
- /*
- * Eventually, errors or not, skd_release_special() is called
- * to recover allocations including partial allocations.
- */
- return rc;
-}
-
-static int skd_sg_io_copy_buffer(struct skd_device *skdev,
- struct skd_sg_io *sksgio, int dxfer_dir)
-{
- struct skd_special_context *skspcl = sksgio->skspcl;
- u32 iov_ix = 0;
- struct sg_iovec curiov;
- u32 sksg_ix = 0;
- u8 *bufp = NULL;
- u32 buf_len = 0;
- u32 resid = sksgio->dxfer_len;
- int rc;
-
- curiov.iov_len = 0;
- curiov.iov_base = NULL;
-
- if (dxfer_dir != sksgio->sg.dxfer_direction) {
- if (dxfer_dir != SG_DXFER_TO_DEV ||
- sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
- return 0;
- }
-
- while (resid > 0) {
- u32 nbytes = PAGE_SIZE;
-
- if (curiov.iov_len == 0) {
- curiov = sksgio->iov[iov_ix++];
- continue;
- }
-
- if (buf_len == 0) {
- struct page *page;
- page = sg_page(&skspcl->req.sg[sksg_ix++]);
- bufp = page_address(page);
- buf_len = PAGE_SIZE;
- }
-
- nbytes = min_t(u32, nbytes, resid);
- nbytes = min_t(u32, nbytes, curiov.iov_len);
- nbytes = min_t(u32, nbytes, buf_len);
-
- if (dxfer_dir == SG_DXFER_TO_DEV)
- rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
- else
- rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
-
- if (rc)
- return -EFAULT;
-
- resid -= nbytes;
- curiov.iov_len -= nbytes;
- curiov.iov_base += nbytes;
- buf_len -= nbytes;
- }
-
- return 0;
-}
-
-static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct skd_special_context *skspcl = sksgio->skspcl;
- struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
- struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
-
- memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
-
- /* Initialize the FIT msg header */
- fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
- fmh->num_protocol_cmds_coalesced = 1;
-
- /* Initialize the SCSI request */
- if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
- scsi_req->hdr.sg_list_dma_address =
- cpu_to_be64(skspcl->req.sksg_dma_address);
- scsi_req->hdr.tag = skspcl->req.id;
- scsi_req->hdr.sg_list_len_bytes =
- cpu_to_be32(skspcl->req.sg_byte_count);
- memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
-
- skspcl->req.state = SKD_REQ_STATE_BUSY;
- skd_send_special_fitmsg(skdev, skspcl);
-
- return 0;
-}
-
-static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
-{
- unsigned long flags;
- int rc;
-
- rc = wait_event_interruptible_timeout(skdev->waitq,
- (sksgio->skspcl->req.state !=
- SKD_REQ_STATE_BUSY),
- msecs_to_jiffies(sksgio->sg.
- timeout));
-
- spin_lock_irqsave(&skdev->lock, flags);
-
- if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
- pr_debug("%s:%s:%d skspcl %p aborted\n",
- skdev->name, __func__, __LINE__, sksgio->skspcl);
-
- /* Build check cond, sense and let command finish. */
- /* For a timeout, we must fabricate completion and sense
- * data to complete the command */
- sksgio->skspcl->req.completion.status =
- SAM_STAT_CHECK_CONDITION;
-
- memset(&sksgio->skspcl->req.err_info, 0,
- sizeof(sksgio->skspcl->req.err_info));
- sksgio->skspcl->req.err_info.type = 0x70;
- sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
- sksgio->skspcl->req.err_info.code = 0x44;
- sksgio->skspcl->req.err_info.qual = 0;
- rc = 0;
- } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
- /* No longer on the adapter. We finish. */
- rc = 0;
- else {
- /* Something's gone wrong. Still busy. Timeout or
- * user interrupted (control-C). Mark as an orphan
- * so it will be disposed when completed. */
- sksgio->skspcl->orphaned = 1;
- sksgio->skspcl = NULL;
- if (rc == 0) {
- pr_debug("%s:%s:%d timed out %p (%u ms)\n",
- skdev->name, __func__, __LINE__,
- sksgio, sksgio->sg.timeout);
- rc = -ETIMEDOUT;
- } else {
- pr_debug("%s:%s:%d cntlc %p\n",
- skdev->name, __func__, __LINE__, sksgio);
- rc = -EINTR;
- }
- }
-
- spin_unlock_irqrestore(&skdev->lock, flags);
-
- return rc;
-}
-
-static int skd_sg_io_put_status(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct sg_io_hdr *sgp = &sksgio->sg;
- struct skd_special_context *skspcl = sksgio->skspcl;
- int resid = 0;
-
- u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
-
- sgp->status = skspcl->req.completion.status;
- resid = sksgio->dxfer_len - nb;
-
- sgp->masked_status = sgp->status & STATUS_MASK;
- sgp->msg_status = 0;
- sgp->host_status = 0;
- sgp->driver_status = 0;
- sgp->resid = resid;
- if (sgp->masked_status || sgp->host_status || sgp->driver_status)
- sgp->info |= SG_INFO_CHECK;
-
- pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
- skdev->name, __func__, __LINE__,
- sgp->status, sgp->masked_status, sgp->resid);
-
- if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
- if (sgp->mx_sb_len > 0) {
- struct fit_comp_error_info *ei = &skspcl->req.err_info;
- u32 nbytes = sizeof(*ei);
-
- nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
-
- sgp->sb_len_wr = nbytes;
-
- if (__copy_to_user(sgp->sbp, ei, nbytes)) {
- pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
- skdev->name, __func__, __LINE__,
- sgp->sbp);
- return -EFAULT;
- }
- }
- }
-
- if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
- pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
- skdev->name, __func__, __LINE__, sksgio->argp);
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int skd_sg_io_release_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct skd_special_context *skspcl = sksgio->skspcl;
-
- if (skspcl != NULL) {
- ulong flags;
-
- sksgio->skspcl = NULL;
-
- spin_lock_irqsave(&skdev->lock, flags);
- skd_release_special(skdev, skspcl);
- spin_unlock_irqrestore(&skdev->lock, flags);
- }
-
- return 0;
-}
-
-/*
- *****************************************************************************
* INTERNAL REQUESTS -- generated by driver itself
*****************************************************************************
*/
@@ -1811,14 +884,15 @@ static int skd_format_internal_skspcl(struct skd_device *skdev)
uint64_t dma_address;
struct skd_scsi_request *scsi;
- fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
+ fmh = &skspcl->msg_buf->fmh;
fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
fmh->num_protocol_cmds_coalesced = 1;
- scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
+ scsi = &skspcl->msg_buf->scsi[0];
memset(scsi, 0, sizeof(*scsi));
dma_address = skspcl->req.sksg_dma_address;
scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
+ skspcl->req.n_sg = 1;
sgd->control = FIT_SGD_CONTROL_LAST;
sgd->byte_count = 0;
sgd->host_side_addr = skspcl->db_dma_address;
@@ -1846,11 +920,9 @@ static void skd_send_internal_skspcl(struct skd_device *skdev,
*/
return;
- SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
skspcl->req.state = SKD_REQ_STATE_BUSY;
- skspcl->req.id += SKD_ID_INCR;
- scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
+ scsi = &skspcl->msg_buf->scsi[0];
scsi->hdr.tag = skspcl->req.id;
memset(scsi->cdb, 0, sizeof(scsi->cdb));
@@ -1940,32 +1012,35 @@ static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
/* If the check condition is of special interest, log a message */
if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
&& (code == 0x04) && (qual == 0x06)) {
- pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
- "ascq/fruc %02x/%02x/%02x/%02x\n",
- skd_name(skdev), key, code, qual, fruc);
+ dev_err(&skdev->pdev->dev,
+ "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
+ key, code, qual, fruc);
}
}
static void skd_complete_internal(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
+ struct fit_completion_entry_v1 *skcomp,
+ struct fit_comp_error_info *skerr,
struct skd_special_context *skspcl)
{
u8 *buf = skspcl->data_buf;
u8 status;
int i;
- struct skd_scsi_request *scsi =
- (struct skd_scsi_request *)&skspcl->msg_buf[64];
+ struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
+
+ lockdep_assert_held(&skdev->lock);
SKD_ASSERT(skspcl == &skdev->internal_skspcl);
- pr_debug("%s:%s:%d complete internal %x\n",
- skdev->name, __func__, __LINE__, scsi->cdb[0]);
+ dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
+
+ dma_sync_single_for_cpu(&skdev->pdev->dev,
+ skspcl->db_dma_address,
+ skspcl->req.sksg_list[0].byte_count,
+ DMA_BIDIRECTIONAL);
skspcl->req.completion = *skcomp;
skspcl->req.state = SKD_REQ_STATE_IDLE;
- skspcl->req.id += SKD_ID_INCR;
status = skspcl->req.completion.status;
@@ -1981,14 +1056,15 @@ static void skd_complete_internal(struct skd_device *skdev,
skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
else {
if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
- skdev->name, __func__, __LINE__,
- skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "TUR failed, don't send anymore state 0x%x\n",
+ skdev->state);
return;
}
- pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
- skdev->name, __func__, __LINE__);
- skd_send_internal_skspcl(skdev, skspcl, 0x00);
+ dev_dbg(&skdev->pdev->dev,
+ "**** TUR failed, retry skerr\n");
+ skd_send_internal_skspcl(skdev, skspcl,
+ TEST_UNIT_READY);
}
break;
@@ -1997,14 +1073,15 @@ static void skd_complete_internal(struct skd_device *skdev,
skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
else {
if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
- skdev->name, __func__, __LINE__,
- skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "write buffer failed, don't send anymore state 0x%x\n",
+ skdev->state);
return;
}
- pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
- skdev->name, __func__, __LINE__);
- skd_send_internal_skspcl(skdev, skspcl, 0x00);
+ dev_dbg(&skdev->pdev->dev,
+ "**** write buffer failed, retry skerr\n");
+ skd_send_internal_skspcl(skdev, skspcl,
+ TEST_UNIT_READY);
}
break;
@@ -2014,33 +1091,31 @@ static void skd_complete_internal(struct skd_device *skdev,
skd_send_internal_skspcl(skdev, skspcl,
READ_CAPACITY);
else {
- pr_err(
- "(%s):*** W/R Buffer mismatch %d ***\n",
- skd_name(skdev), skdev->connect_retries);
+ dev_err(&skdev->pdev->dev,
+ "*** W/R Buffer mismatch %d ***\n",
+ skdev->connect_retries);
if (skdev->connect_retries <
SKD_MAX_CONNECT_RETRIES) {
skdev->connect_retries++;
skd_soft_reset(skdev);
} else {
- pr_err(
- "(%s): W/R Buffer Connect Error\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev,
+ "W/R Buffer Connect Error\n");
return;
}
}
} else {
if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- pr_debug("%s:%s:%d "
- "read buffer failed, don't send anymore state 0x%x\n",
- skdev->name, __func__, __LINE__,
- skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "read buffer failed, don't send anymore state 0x%x\n",
+ skdev->state);
return;
}
- pr_debug("%s:%s:%d "
- "**** read buffer failed, retry skerr\n",
- skdev->name, __func__, __LINE__);
- skd_send_internal_skspcl(skdev, skspcl, 0x00);
+ dev_dbg(&skdev->pdev->dev,
+ "**** read buffer failed, retry skerr\n");
+ skd_send_internal_skspcl(skdev, skspcl,
+ TEST_UNIT_READY);
}
break;
@@ -2054,10 +1129,9 @@ static void skd_complete_internal(struct skd_device *skdev,
(buf[4] << 24) | (buf[5] << 16) |
(buf[6] << 8) | buf[7];
- pr_debug("%s:%s:%d last lba %d, bs %d\n",
- skdev->name, __func__, __LINE__,
- skdev->read_cap_last_lba,
- skdev->read_cap_blocksize);
+ dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
+ skdev->read_cap_last_lba,
+ skdev->read_cap_blocksize);
set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
@@ -2068,13 +1142,10 @@ static void skd_complete_internal(struct skd_device *skdev,
(skerr->key == MEDIUM_ERROR)) {
skdev->read_cap_last_lba = ~0;
set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
- pr_debug("%s:%s:%d "
- "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
} else {
- pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
skd_send_internal_skspcl(skdev, skspcl,
TEST_UNIT_READY);
}
@@ -2091,8 +1162,7 @@ static void skd_complete_internal(struct skd_device *skdev,
}
if (skd_unquiesce_dev(skdev) < 0)
- pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
/* connection is complete */
skdev->connect_retries = 0;
break;
@@ -2120,27 +1190,20 @@ static void skd_send_fitmsg(struct skd_device *skdev,
struct skd_fitmsg_context *skmsg)
{
u64 qcmd;
- struct fit_msg_hdr *fmh;
- pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
- skdev->name, __func__, __LINE__,
- skmsg->mb_dma_address, skdev->in_flight);
- pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
- skdev->name, __func__, __LINE__,
- skmsg->msg_buf, skmsg->offset);
+ dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
+ skmsg->mb_dma_address, skd_in_flight(skdev));
+ dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
qcmd = skmsg->mb_dma_address;
qcmd |= FIT_QCMD_QID_NORMAL;
- fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
- skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
-
if (unlikely(skdev->dbg_level > 1)) {
u8 *bp = (u8 *)skmsg->msg_buf;
int i;
for (i = 0; i < skmsg->length; i += 8) {
- pr_debug("%s:%s:%d msg[%2d] %8ph\n",
- skdev->name, __func__, __LINE__, i, &bp[i]);
+ dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
+ &bp[i]);
if (i == 0)
i = 64 - 8;
}
@@ -2160,6 +1223,12 @@ static void skd_send_fitmsg(struct skd_device *skdev,
*/
qcmd |= FIT_QCMD_MSGSIZE_64;
+ dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address,
+ skmsg->length, DMA_TO_DEVICE);
+
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@@ -2168,30 +1237,31 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
{
u64 qcmd;
+ WARN_ON_ONCE(skspcl->req.n_sg != 1);
+
if (unlikely(skdev->dbg_level > 1)) {
u8 *bp = (u8 *)skspcl->msg_buf;
int i;
for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
- pr_debug("%s:%s:%d spcl[%2d] %8ph\n",
- skdev->name, __func__, __LINE__, i, &bp[i]);
+ dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
+ &bp[i]);
if (i == 0)
i = 64 - 8;
}
- pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
- skdev->name, __func__, __LINE__,
- skspcl, skspcl->req.id, skspcl->req.sksg_list,
- skspcl->req.sksg_dma_address);
+ dev_dbg(&skdev->pdev->dev,
+ "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
+ skspcl, skspcl->req.id, skspcl->req.sksg_list,
+ skspcl->req.sksg_dma_address);
for (i = 0; i < skspcl->req.n_sg; i++) {
struct fit_sg_descriptor *sgd =
&skspcl->req.sksg_list[i];
- pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
- "addr=0x%llx next=0x%llx\n",
- skdev->name, __func__, __LINE__,
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
+ dev_dbg(&skdev->pdev->dev,
+ " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
+ i, sgd->byte_count, sgd->control,
+ sgd->host_side_addr, sgd->next_desc_ptr);
}
}
@@ -2202,6 +1272,20 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
qcmd = skspcl->mb_dma_address;
qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
+ dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address,
+ SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE);
+ dma_sync_single_for_device(&skdev->pdev->dev,
+ skspcl->req.sksg_dma_address,
+ 1 * sizeof(struct fit_sg_descriptor),
+ DMA_TO_DEVICE);
+ dma_sync_single_for_device(&skdev->pdev->dev,
+ skspcl->db_dma_address,
+ skspcl->req.sksg_list[0].byte_count,
+ DMA_BIDIRECTIONAL);
+
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@@ -2212,8 +1296,8 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
*/
static void skd_complete_other(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr);
+ struct fit_completion_entry_v1 *skcomp,
+ struct fit_comp_error_info *skerr);
struct sns_info {
u8 type;
@@ -2262,21 +1346,20 @@ static struct sns_info skd_chkstat_table[] = {
static enum skd_check_status_action
skd_check_status(struct skd_device *skdev,
- u8 cmp_status, volatile struct fit_comp_error_info *skerr)
+ u8 cmp_status, struct fit_comp_error_info *skerr)
{
- int i, n;
+ int i;
- pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
- skd_name(skdev), skerr->key, skerr->code, skerr->qual,
- skerr->fruc);
+ dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
+ skerr->key, skerr->code, skerr->qual, skerr->fruc);
- pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
- skdev->name, __func__, __LINE__, skerr->type, cmp_status,
- skerr->key, skerr->code, skerr->qual, skerr->fruc);
+ dev_dbg(&skdev->pdev->dev,
+ "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
+ skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
+ skerr->fruc);
/* Does the info match an entry in the good category? */
- n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
- for (i = 0; i < n; i++) {
+ for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
struct sns_info *sns = &skd_chkstat_table[i];
if (sns->mask & 0x10)
@@ -2300,10 +1383,9 @@ skd_check_status(struct skd_device *skdev,
continue;
if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
- pr_err("(%s): SMART Alert: sense key/asc/ascq "
- "%02x/%02x/%02x\n",
- skd_name(skdev), skerr->key,
- skerr->code, skerr->qual);
+ dev_err(&skdev->pdev->dev,
+ "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
+ skerr->key, skerr->code, skerr->qual);
}
return sns->action;
}
@@ -2312,335 +1394,80 @@ skd_check_status(struct skd_device *skdev,
* zero status means good
*/
if (cmp_status) {
- pr_debug("%s:%s:%d status check: error\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "status check: error\n");
return SKD_CHECK_STATUS_REPORT_ERROR;
}
- pr_debug("%s:%s:%d status check good default\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "status check good default\n");
return SKD_CHECK_STATUS_REPORT_GOOD;
}
static void skd_resolve_req_exception(struct skd_device *skdev,
- struct skd_request_context *skreq)
+ struct skd_request_context *skreq,
+ struct request *req)
{
u8 cmp_status = skreq->completion.status;
switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
case SKD_CHECK_STATUS_REPORT_GOOD:
case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
- skd_end_request(skdev, skreq, BLK_STS_OK);
+ skreq->status = BLK_STS_OK;
+ blk_mq_complete_request(req);
break;
case SKD_CHECK_STATUS_BUSY_IMMINENT:
skd_log_skreq(skdev, skreq, "retry(busy)");
- blk_requeue_request(skdev->queue, skreq->req);
- pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
+ blk_requeue_request(skdev->queue, req);
+ dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
skdev->timer_countdown = SKD_TIMER_MINUTES(20);
skd_quiesce_dev(skdev);
break;
case SKD_CHECK_STATUS_REQUEUE_REQUEST:
- if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
+ if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
skd_log_skreq(skdev, skreq, "retry");
- blk_requeue_request(skdev->queue, skreq->req);
+ blk_requeue_request(skdev->queue, req);
break;
}
- /* fall through to report error */
+ /* fall through */
case SKD_CHECK_STATUS_REPORT_ERROR:
default:
- skd_end_request(skdev, skreq, BLK_STS_IOERR);
+ skreq->status = BLK_STS_IOERR;
+ blk_mq_complete_request(req);
break;
}
}
-/* assume spinlock is already held */
static void skd_release_skreq(struct skd_device *skdev,
struct skd_request_context *skreq)
{
- u32 msg_slot;
- struct skd_fitmsg_context *skmsg;
-
- u32 timo_slot;
-
- /*
- * Reclaim the FIT msg buffer if this is
- * the first of the requests it carried to
- * be completed. The FIT msg buffer used to
- * send this request cannot be reused until
- * we are sure the s1120 card has copied
- * it to its memory. The FIT msg might have
- * contained several requests. As soon as
- * any of them are completed we know that
- * the entire FIT msg was transferred.
- * Only the first completed request will
- * match the FIT msg buffer id. The FIT
- * msg buffer id is immediately updated.
- * When subsequent requests complete the FIT
- * msg buffer id won't match, so we know
- * quite cheaply that it is already done.
- */
- msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
- SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
-
- skmsg = &skdev->skmsg_table[msg_slot];
- if (skmsg->id == skreq->fitmsg_id) {
- SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
- SKD_ASSERT(skmsg->outstanding > 0);
- skmsg->outstanding--;
- if (skmsg->outstanding == 0) {
- skmsg->state = SKD_MSG_STATE_IDLE;
- skmsg->id += SKD_ID_INCR;
- skmsg->next = skdev->skmsg_free_list;
- skdev->skmsg_free_list = skmsg;
- }
- }
-
- /*
- * Decrease the number of active requests.
- * Also decrements the count in the timeout slot.
- */
- SKD_ASSERT(skdev->in_flight > 0);
- skdev->in_flight -= 1;
-
- timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
- SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
- skdev->timeout_slot[timo_slot] -= 1;
-
- /*
- * Reset backpointer
- */
- skreq->req = NULL;
-
/*
* Reclaim the skd_request_context
*/
skreq->state = SKD_REQ_STATE_IDLE;
- skreq->id += SKD_ID_INCR;
- skreq->next = skdev->skreq_free_list;
- skdev->skreq_free_list = skreq;
}
-#define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
-
-static void skd_do_inq_page_00(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr,
- uint8_t *cdb, uint8_t *buf)
-{
- uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
-
- /* Caller requested "supported pages". The driver needs to insert
- * its page.
- */
- pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
- skdev->name, __func__, __LINE__);
-
- /* If the device rejected the request because the CDB was
- * improperly formed, then just leave.
- */
- if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
- skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
- return;
-
- /* Get the amount of space the caller allocated */
- max_bytes = (cdb[3] << 8) | cdb[4];
-
- /* Get the number of pages actually returned by the device */
- drive_pages = (buf[2] << 8) | buf[3];
- drive_bytes = drive_pages + 4;
- new_size = drive_pages + 1;
-
- /* Supported pages must be in numerical order, so find where
- * the driver page needs to be inserted into the list of
- * pages returned by the device.
- */
- for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
- if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
- return; /* Device using this page code. abort */
- else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
- break;
- }
-
- if (insert_pt < max_bytes) {
- uint16_t u;
-
- /* Shift everything up one byte to make room. */
- for (u = new_size + 3; u > insert_pt; u--)
- buf[u] = buf[u - 1];
- buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
-
- /* SCSI byte order increment of num_returned_bytes by 1 */
- skcomp->num_returned_bytes =
- be32_to_cpu(skcomp->num_returned_bytes) + 1;
- skcomp->num_returned_bytes =
- be32_to_cpu(skcomp->num_returned_bytes);
- }
-
- /* update page length field to reflect the driver's page too */
- buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
- buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
-}
-
-static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
-{
- int pcie_reg;
- u16 pci_bus_speed;
- u8 pci_lanes;
-
- pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (pcie_reg) {
- u16 linksta;
- pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
-
- pci_bus_speed = linksta & 0xF;
- pci_lanes = (linksta & 0x3F0) >> 4;
- } else {
- *speed = STEC_LINK_UNKNOWN;
- *width = 0xFF;
- return;
- }
-
- switch (pci_bus_speed) {
- case 1:
- *speed = STEC_LINK_2_5GTS;
- break;
- case 2:
- *speed = STEC_LINK_5GTS;
- break;
- case 3:
- *speed = STEC_LINK_8GTS;
- break;
- default:
- *speed = STEC_LINK_UNKNOWN;
- break;
- }
-
- if (pci_lanes <= 0x20)
- *width = pci_lanes;
- else
- *width = 0xFF;
-}
-
-static void skd_do_inq_page_da(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr,
- uint8_t *cdb, uint8_t *buf)
-{
- struct pci_dev *pdev = skdev->pdev;
- unsigned max_bytes;
- struct driver_inquiry_data inq;
- u16 val;
-
- pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
- skdev->name, __func__, __LINE__);
-
- memset(&inq, 0, sizeof(inq));
-
- inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
-
- skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
- inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
- inq.pcie_device_number = PCI_SLOT(pdev->devfn);
- inq.pcie_function_number = PCI_FUNC(pdev->devfn);
-
- pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
- inq.pcie_vendor_id = cpu_to_be16(val);
-
- pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
- inq.pcie_device_id = cpu_to_be16(val);
-
- pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
- inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
-
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
- inq.pcie_subsystem_device_id = cpu_to_be16(val);
-
- /* Driver version, fixed lenth, padded with spaces on the right */
- inq.driver_version_length = sizeof(inq.driver_version);
- memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
- memcpy(inq.driver_version, DRV_VER_COMPL,
- min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
-
- inq.page_length = cpu_to_be16((sizeof(inq) - 4));
-
- /* Clear the error set by the device */
- skcomp->status = SAM_STAT_GOOD;
- memset((void *)skerr, 0, sizeof(*skerr));
-
- /* copy response into output buffer */
- max_bytes = (cdb[3] << 8) | cdb[4];
- memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
-
- skcomp->num_returned_bytes =
- be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
-}
-
-static void skd_do_driver_inq(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr,
- uint8_t *cdb, uint8_t *buf)
-{
- if (!buf)
- return;
- else if (cdb[0] != INQUIRY)
- return; /* Not an INQUIRY */
- else if ((cdb[1] & 1) == 0)
- return; /* EVPD not set */
- else if (cdb[2] == 0)
- /* Need to add driver's page to supported pages list */
- skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
- else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
- /* Caller requested driver's page */
- skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
-}
-
-static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
-{
- if (!sg)
- return NULL;
- if (!sg_page(sg))
- return NULL;
- return sg_virt(sg);
-}
-
-static void skd_process_scsi_inq(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
- struct skd_special_context *skspcl)
-{
- uint8_t *buf;
- struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
- struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
-
- dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
- skspcl->req.sg_data_dir);
- buf = skd_sg_1st_page_ptr(skspcl->req.sg);
-
- if (buf)
- skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
-}
-
-
static int skd_isr_completion_posted(struct skd_device *skdev,
int limit, int *enqueued)
{
- volatile struct fit_completion_entry_v1 *skcmp = NULL;
- volatile struct fit_comp_error_info *skerr;
+ struct fit_completion_entry_v1 *skcmp;
+ struct fit_comp_error_info *skerr;
u16 req_id;
- u32 req_slot;
+ u32 tag;
+ u16 hwq = 0;
+ struct request *rq;
struct skd_request_context *skreq;
- u16 cmp_cntxt = 0;
- u8 cmp_status = 0;
- u8 cmp_cycle = 0;
- u32 cmp_bytes = 0;
+ u16 cmp_cntxt;
+ u8 cmp_status;
+ u8 cmp_cycle;
+ u32 cmp_bytes;
int rc = 0;
int processed = 0;
+ lockdep_assert_held(&skdev->lock);
+
for (;; ) {
SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
@@ -2652,16 +1479,14 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
skerr = &skdev->skerr_table[skdev->skcomp_ix];
- pr_debug("%s:%s:%d "
- "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
- "busy=%d rbytes=0x%x proto=%d\n",
- skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
- skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
- skdev->in_flight, cmp_bytes, skdev->proto_ver);
+ dev_dbg(&skdev->pdev->dev,
+ "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
+ skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
+ cmp_cntxt, cmp_status, skd_in_flight(skdev),
+ cmp_bytes, skdev->proto_ver);
if (cmp_cycle != skdev->skcomp_cycle) {
- pr_debug("%s:%s:%d end of completions\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "end of completions\n");
break;
}
/*
@@ -2680,49 +1505,38 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
* r/w request (see skd_start() above) or a special request.
*/
req_id = cmp_cntxt;
- req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
+ tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
/* Is this other than a r/w request? */
- if (req_slot >= skdev->num_req_context) {
+ if (tag >= skdev->num_req_context) {
/*
* This is not a completion for a r/w request.
*/
+ WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq],
+ tag));
skd_complete_other(skdev, skcmp, skerr);
continue;
}
- skreq = &skdev->skreq_table[req_slot];
+ rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag);
+ if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
+ tag))
+ continue;
+ skreq = blk_mq_rq_to_pdu(rq);
/*
* Make sure the request ID for the slot matches.
*/
if (skreq->id != req_id) {
- pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
- skdev->name, __func__, __LINE__,
- req_id, skreq->id);
- {
- u16 new_id = cmp_cntxt;
- pr_err("(%s): Completion mismatch "
- "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
- skd_name(skdev), req_id,
- skreq->id, new_id);
+ dev_err(&skdev->pdev->dev,
+ "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
+ req_id, skreq->id, cmp_cntxt);
- continue;
- }
+ continue;
}
SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
- if (skreq->state == SKD_REQ_STATE_ABORTED) {
- pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
- skdev->name, __func__, __LINE__,
- skreq, skreq->id);
- /* a previously timed out command can
- * now be cleaned up */
- skd_release_skreq(skdev, skreq);
- continue;
- }
-
skreq->completion = *skcmp;
if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
skreq->err_info = *skerr;
@@ -2734,27 +1548,17 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
if (skreq->n_sg > 0)
skd_postop_sg_list(skdev, skreq);
- if (!skreq->req) {
- pr_debug("%s:%s:%d NULL backptr skdreq %p, "
- "req=0x%x req_id=0x%x\n",
- skdev->name, __func__, __LINE__,
- skreq, skreq->id, req_id);
- } else {
- /*
- * Capture the outcome and post it back to the
- * native request.
- */
- if (likely(cmp_status == SAM_STAT_GOOD))
- skd_end_request(skdev, skreq, BLK_STS_OK);
- else
- skd_resolve_req_exception(skdev, skreq);
- }
+ skd_release_skreq(skdev, skreq);
/*
- * Release the skreq, its FIT msg (if one), timeout slot,
- * and queue depth.
+ * Capture the outcome and post it back to the native request.
*/
- skd_release_skreq(skdev, skreq);
+ if (likely(cmp_status == SAM_STAT_GOOD)) {
+ skreq->status = BLK_STS_OK;
+ blk_mq_complete_request(rq);
+ } else {
+ skd_resolve_req_exception(skdev, skreq, rq);
+ }
/* skd_isr_comp_limit equal zero means no limit */
if (limit) {
@@ -2765,8 +1569,8 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
}
}
- if ((skdev->state == SKD_DRVR_STATE_PAUSING)
- && (skdev->in_flight) == 0) {
+ if (skdev->state == SKD_DRVR_STATE_PAUSING &&
+ skd_in_flight(skdev) == 0) {
skdev->state = SKD_DRVR_STATE_PAUSED;
wake_up_interruptible(&skdev->waitq);
}
@@ -2775,21 +1579,22 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
}
static void skd_complete_other(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr)
+ struct fit_completion_entry_v1 *skcomp,
+ struct fit_comp_error_info *skerr)
{
u32 req_id = 0;
u32 req_table;
u32 req_slot;
struct skd_special_context *skspcl;
+ lockdep_assert_held(&skdev->lock);
+
req_id = skcomp->tag;
req_table = req_id & SKD_ID_TABLE_MASK;
req_slot = req_id & SKD_ID_SLOT_MASK;
- pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
- skdev->name, __func__, __LINE__,
- req_table, req_id, req_slot);
+ dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
+ req_id, req_slot);
/*
* Based on the request id, determine how to dispatch this completion.
@@ -2799,28 +1604,12 @@ static void skd_complete_other(struct skd_device *skdev,
switch (req_table) {
case SKD_ID_RW_REQUEST:
/*
- * The caller, skd_completion_posted_isr() above,
+ * The caller, skd_isr_completion_posted() above,
* handles r/w requests. The only way we get here
* is if the req_slot is out of bounds.
*/
break;
- case SKD_ID_SPECIAL_REQUEST:
- /*
- * Make sure the req_slot is in bounds and that the id
- * matches.
- */
- if (req_slot < skdev->n_special) {
- skspcl = &skdev->skspcl_table[req_slot];
- if (skspcl->req.id == req_id &&
- skspcl->req.state == SKD_REQ_STATE_BUSY) {
- skd_complete_special(skdev,
- skcomp, skerr, skspcl);
- return;
- }
- }
- break;
-
case SKD_ID_INTERNAL:
if (req_slot == 0) {
skspcl = &skdev->internal_skspcl;
@@ -2851,72 +1640,9 @@ static void skd_complete_other(struct skd_device *skdev,
*/
}
-static void skd_complete_special(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
- struct skd_special_context *skspcl)
-{
- pr_debug("%s:%s:%d completing special request %p\n",
- skdev->name, __func__, __LINE__, skspcl);
- if (skspcl->orphaned) {
- /* Discard orphaned request */
- /* ?: Can this release directly or does it need
- * to use a worker? */
- pr_debug("%s:%s:%d release orphaned %p\n",
- skdev->name, __func__, __LINE__, skspcl);
- skd_release_special(skdev, skspcl);
- return;
- }
-
- skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
-
- skspcl->req.state = SKD_REQ_STATE_COMPLETED;
- skspcl->req.completion = *skcomp;
- skspcl->req.err_info = *skerr;
-
- skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
- skerr->code, skerr->qual, skerr->fruc);
-
- wake_up_interruptible(&skdev->waitq);
-}
-
-/* assume spinlock is already held */
-static void skd_release_special(struct skd_device *skdev,
- struct skd_special_context *skspcl)
-{
- int i, was_depleted;
-
- for (i = 0; i < skspcl->req.n_sg; i++) {
- struct page *page = sg_page(&skspcl->req.sg[i]);
- __free_page(page);
- }
-
- was_depleted = (skdev->skspcl_free_list == NULL);
-
- skspcl->req.state = SKD_REQ_STATE_IDLE;
- skspcl->req.id += SKD_ID_INCR;
- skspcl->req.next =
- (struct skd_request_context *)skdev->skspcl_free_list;
- skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
-
- if (was_depleted) {
- pr_debug("%s:%s:%d skspcl was depleted\n",
- skdev->name, __func__, __LINE__);
- /* Free list was depleted. Their might be waiters. */
- wake_up_interruptible(&skdev->waitq);
- }
-}
-
static void skd_reset_skcomp(struct skd_device *skdev)
{
- u32 nbytes;
- struct fit_completion_entry_v1 *skcomp;
-
- nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
- nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
-
- memset(skdev->skcomp_table, 0, nbytes);
+ memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
skdev->skcomp_ix = 0;
skdev->skcomp_cycle = 1;
@@ -2941,7 +1667,7 @@ static void skd_completion_worker(struct work_struct *work)
* process everything in compq
*/
skd_isr_completion_posted(skdev, 0, &flush_enqueued);
- skd_request_fn(skdev->queue);
+ schedule_work(&skdev->start_queue);
spin_unlock_irqrestore(&skdev->lock, flags);
}
@@ -2951,14 +1677,13 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev);
static irqreturn_t
skd_isr(int irq, void *ptr)
{
- struct skd_device *skdev;
+ struct skd_device *skdev = ptr;
u32 intstat;
u32 ack;
int rc = 0;
int deferred = 0;
int flush_enqueued = 0;
- skdev = (struct skd_device *)ptr;
spin_lock(&skdev->lock);
for (;; ) {
@@ -2967,8 +1692,8 @@ skd_isr(int irq, void *ptr)
ack = FIT_INT_DEF_MASK;
ack &= intstat;
- pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
- skdev->name, __func__, __LINE__, intstat, ack);
+ dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
+ ack);
/* As long as there is an int pending on device, keep
* running loop. When none, get out, but if we've never
@@ -3018,12 +1743,12 @@ skd_isr(int irq, void *ptr)
}
if (unlikely(flush_enqueued))
- skd_request_fn(skdev->queue);
+ schedule_work(&skdev->start_queue);
if (deferred)
schedule_work(&skdev->completion_worker);
else if (!flush_enqueued)
- skd_request_fn(skdev->queue);
+ schedule_work(&skdev->start_queue);
spin_unlock(&skdev->lock);
@@ -3033,13 +1758,13 @@ skd_isr(int irq, void *ptr)
static void skd_drive_fault(struct skd_device *skdev)
{
skdev->state = SKD_DRVR_STATE_FAULT;
- pr_err("(%s): Drive FAULT\n", skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "Drive FAULT\n");
}
static void skd_drive_disappeared(struct skd_device *skdev)
{
skdev->state = SKD_DRVR_STATE_DISAPPEARED;
- pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
}
static void skd_isr_fwstate(struct skd_device *skdev)
@@ -3052,10 +1777,9 @@ static void skd_isr_fwstate(struct skd_device *skdev)
sense = SKD_READL(skdev, FIT_STATUS);
state = sense & FIT_SR_DRIVE_STATE_MASK;
- pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
- skd_name(skdev),
- skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
- skd_drive_state_to_str(state), state);
+ dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
+ skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
+ skd_drive_state_to_str(state), state);
skdev->drive_state = state;
@@ -3066,7 +1790,7 @@ static void skd_isr_fwstate(struct skd_device *skdev)
break;
}
if (skdev->state == SKD_DRVR_STATE_RESTARTING)
- skd_recover_requests(skdev, 0);
+ skd_recover_requests(skdev);
if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
skdev->timer_countdown = SKD_STARTING_TIMO;
skdev->state = SKD_DRVR_STATE_STARTING;
@@ -3087,11 +1811,11 @@ static void skd_isr_fwstate(struct skd_device *skdev)
skdev->cur_max_queue_depth * 2 / 3 + 1;
if (skdev->queue_low_water_mark < 1)
skdev->queue_low_water_mark = 1;
- pr_info(
- "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
- skd_name(skdev),
- skdev->cur_max_queue_depth,
- skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
+ dev_info(&skdev->pdev->dev,
+ "Queue depth limit=%d dev=%d lowat=%d\n",
+ skdev->cur_max_queue_depth,
+ skdev->dev_max_queue_depth,
+ skdev->queue_low_water_mark);
skd_refresh_device_data(skdev);
break;
@@ -3107,7 +1831,7 @@ static void skd_isr_fwstate(struct skd_device *skdev)
*/
skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
skdev->timer_countdown = SKD_TIMER_SECONDS(3);
- blk_start_queue(skdev->queue);
+ schedule_work(&skdev->start_queue);
break;
case FIT_SR_DRIVE_BUSY_ERASE:
skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
@@ -3128,8 +1852,7 @@ static void skd_isr_fwstate(struct skd_device *skdev)
}
break;
case FIT_SR_DRIVE_FW_BOOTING:
- pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
- skdev->name, __func__, __LINE__, skdev->name);
+ dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
break;
@@ -3141,17 +1864,17 @@ static void skd_isr_fwstate(struct skd_device *skdev)
case FIT_SR_DRIVE_FAULT:
skd_drive_fault(skdev);
- skd_recover_requests(skdev, 0);
- blk_start_queue(skdev->queue);
+ skd_recover_requests(skdev);
+ schedule_work(&skdev->start_queue);
break;
/* PCIe bus returned all Fs? */
case 0xFF:
- pr_info("(%s): state=0x%x sense=0x%x\n",
- skd_name(skdev), state, sense);
+ dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
+ sense);
skd_drive_disappeared(skdev);
- skd_recover_requests(skdev, 0);
- blk_start_queue(skdev->queue);
+ skd_recover_requests(skdev);
+ schedule_work(&skdev->start_queue);
break;
default:
/*
@@ -3159,92 +1882,33 @@ static void skd_isr_fwstate(struct skd_device *skdev)
*/
break;
}
- pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
- skd_name(skdev),
- skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
- skd_skdev_state_to_str(skdev->state), skdev->state);
+ dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
+ skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
+ skd_skdev_state_to_str(skdev->state), skdev->state);
}
-static void skd_recover_requests(struct skd_device *skdev, int requeue)
+static void skd_recover_request(struct request *req, void *data, bool reserved)
{
- int i;
-
- for (i = 0; i < skdev->num_req_context; i++) {
- struct skd_request_context *skreq = &skdev->skreq_table[i];
-
- if (skreq->state == SKD_REQ_STATE_BUSY) {
- skd_log_skreq(skdev, skreq, "recover");
-
- SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
- SKD_ASSERT(skreq->req != NULL);
-
- /* Release DMA resources for the request. */
- if (skreq->n_sg > 0)
- skd_postop_sg_list(skdev, skreq);
-
- if (requeue &&
- (unsigned long) ++skreq->req->special <
- SKD_MAX_RETRIES)
- blk_requeue_request(skdev->queue, skreq->req);
- else
- skd_end_request(skdev, skreq, BLK_STS_IOERR);
-
- skreq->req = NULL;
-
- skreq->state = SKD_REQ_STATE_IDLE;
- skreq->id += SKD_ID_INCR;
- }
- if (i > 0)
- skreq[-1].next = skreq;
- skreq->next = NULL;
- }
- skdev->skreq_free_list = skdev->skreq_table;
+ struct skd_device *const skdev = data;
+ struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
- for (i = 0; i < skdev->num_fitmsg_context; i++) {
- struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
+ if (skreq->state != SKD_REQ_STATE_BUSY)
+ return;
- if (skmsg->state == SKD_MSG_STATE_BUSY) {
- skd_log_skmsg(skdev, skmsg, "salvaged");
- SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
- skmsg->state = SKD_MSG_STATE_IDLE;
- skmsg->id += SKD_ID_INCR;
- }
- if (i > 0)
- skmsg[-1].next = skmsg;
- skmsg->next = NULL;
- }
- skdev->skmsg_free_list = skdev->skmsg_table;
+ skd_log_skreq(skdev, skreq, "recover");
- for (i = 0; i < skdev->n_special; i++) {
- struct skd_special_context *skspcl = &skdev->skspcl_table[i];
+ /* Release DMA resources for the request. */
+ if (skreq->n_sg > 0)
+ skd_postop_sg_list(skdev, skreq);
- /* If orphaned, reclaim it because it has already been reported
- * to the process as an error (it was just waiting for
- * a completion that didn't come, and now it will never come)
- * If busy, change to a state that will cause it to error
- * out in the wait routine and let it do the normal
- * reporting and reclaiming
- */
- if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
- if (skspcl->orphaned) {
- pr_debug("%s:%s:%d orphaned %p\n",
- skdev->name, __func__, __LINE__,
- skspcl);
- skd_release_special(skdev, skspcl);
- } else {
- pr_debug("%s:%s:%d not orphaned %p\n",
- skdev->name, __func__, __LINE__,
- skspcl);
- skspcl->req.state = SKD_REQ_STATE_ABORTED;
- }
- }
- }
- skdev->skspcl_free_list = skdev->skspcl_table;
-
- for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
- skdev->timeout_slot[i] = 0;
+ skreq->state = SKD_REQ_STATE_IDLE;
+ skreq->status = BLK_STS_IOERR;
+ blk_mq_complete_request(req);
+}
- skdev->in_flight = 0;
+static void skd_recover_requests(struct skd_device *skdev)
+{
+ blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev);
}
static void skd_isr_msg_from_dev(struct skd_device *skdev)
@@ -3255,8 +1919,8 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev)
mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
- pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
- skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
+ dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
+ skdev->last_mtd);
/* ignore any mtd that is an ack for something we didn't send */
if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
@@ -3267,13 +1931,10 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev)
skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
- pr_err("(%s): protocol mismatch\n",
- skdev->name);
- pr_err("(%s): got=%d support=%d\n",
- skdev->name, skdev->proto_ver,
- FIT_PROTOCOL_VERSION_1);
- pr_err("(%s): please upgrade driver\n",
- skdev->name);
+ dev_err(&skdev->pdev->dev, "protocol mismatch\n");
+ dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
+ skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
+ dev_err(&skdev->pdev->dev, " please upgrade driver\n");
skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
skd_soft_reset(skdev);
break;
@@ -3327,9 +1988,8 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev)
SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
skdev->last_mtd = mtd;
- pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
- skd_name(skdev),
- skdev->connect_time_stamp, skdev->drive_jiffies);
+ dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
+ skdev->connect_time_stamp, skdev->drive_jiffies);
break;
case FIT_MTD_ARM_QUEUE:
@@ -3351,8 +2011,7 @@ static void skd_disable_interrupts(struct skd_device *skdev)
sense = SKD_READL(skdev, FIT_CONTROL);
sense &= ~FIT_CR_ENABLE_INTERRUPTS;
SKD_WRITEL(skdev, sense, FIT_CONTROL);
- pr_debug("%s:%s:%d sense 0x%x\n",
- skdev->name, __func__, __LINE__, sense);
+ dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
/* Note that the 1s is written. A 1-bit means
* disable, a 0 means enable.
@@ -3371,13 +2030,11 @@ static void skd_enable_interrupts(struct skd_device *skdev)
/* Note that the compliment of mask is written. A 1-bit means
* disable, a 0 means enable. */
SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
- pr_debug("%s:%s:%d interrupt mask=0x%x\n",
- skdev->name, __func__, __LINE__, ~val);
+ dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
val = SKD_READL(skdev, FIT_CONTROL);
val |= FIT_CR_ENABLE_INTERRUPTS;
- pr_debug("%s:%s:%d control=0x%x\n",
- skdev->name, __func__, __LINE__, val);
+ dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
SKD_WRITEL(skdev, val, FIT_CONTROL);
}
@@ -3393,8 +2050,7 @@ static void skd_soft_reset(struct skd_device *skdev)
val = SKD_READL(skdev, FIT_CONTROL);
val |= (FIT_CR_SOFT_RESET);
- pr_debug("%s:%s:%d control=0x%x\n",
- skdev->name, __func__, __LINE__, val);
+ dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
SKD_WRITEL(skdev, val, FIT_CONTROL);
}
@@ -3411,8 +2067,7 @@ static void skd_start_device(struct skd_device *skdev)
sense = SKD_READL(skdev, FIT_STATUS);
- pr_debug("%s:%s:%d initial status=0x%x\n",
- skdev->name, __func__, __LINE__, sense);
+ dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
state = sense & FIT_SR_DRIVE_STATE_MASK;
skdev->drive_state = state;
@@ -3425,25 +2080,23 @@ static void skd_start_device(struct skd_device *skdev)
switch (skdev->drive_state) {
case FIT_SR_DRIVE_OFFLINE:
- pr_err("(%s): Drive offline...\n", skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "Drive offline...\n");
break;
case FIT_SR_DRIVE_FW_BOOTING:
- pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
- skdev->name, __func__, __LINE__, skdev->name);
+ dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
break;
case FIT_SR_DRIVE_BUSY_SANITIZE:
- pr_info("(%s): Start: BUSY_SANITIZE\n",
- skd_name(skdev));
+ dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
break;
case FIT_SR_DRIVE_BUSY_ERASE:
- pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
+ dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
break;
@@ -3454,14 +2107,13 @@ static void skd_start_device(struct skd_device *skdev)
break;
case FIT_SR_DRIVE_BUSY:
- pr_err("(%s): Drive Busy...\n", skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "Drive Busy...\n");
skdev->state = SKD_DRVR_STATE_BUSY;
skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
break;
case FIT_SR_DRIVE_SOFT_RESET:
- pr_err("(%s) drive soft reset in prog\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
break;
case FIT_SR_DRIVE_FAULT:
@@ -3471,9 +2123,8 @@ static void skd_start_device(struct skd_device *skdev)
*/
skd_drive_fault(skdev);
/*start the queue so we can respond with error to requests */
- pr_debug("%s:%s:%d starting %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_start_queue(skdev->queue);
+ dev_dbg(&skdev->pdev->dev, "starting queue\n");
+ schedule_work(&skdev->start_queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
@@ -3483,38 +2134,33 @@ static void skd_start_device(struct skd_device *skdev)
* to the BAR1 addresses. */
skd_drive_disappeared(skdev);
/*start the queue so we can respond with error to requests */
- pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_start_queue(skdev->queue);
+ dev_dbg(&skdev->pdev->dev,
+ "starting queue to error-out reqs\n");
+ schedule_work(&skdev->start_queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
default:
- pr_err("(%s) Start: unknown state %x\n",
- skd_name(skdev), skdev->drive_state);
+ dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
+ skdev->drive_state);
break;
}
state = SKD_READL(skdev, FIT_CONTROL);
- pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
- pr_debug("%s:%s:%d Intr Status=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
state = SKD_READL(skdev, FIT_INT_MASK_HOST);
- pr_debug("%s:%s:%d Intr Mask=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
- pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
state = SKD_READL(skdev, FIT_HW_VERSION);
- pr_debug("%s:%s:%d HW version=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
spin_unlock_irqrestore(&skdev->lock, flags);
}
@@ -3529,14 +2175,12 @@ static void skd_stop_device(struct skd_device *skdev)
spin_lock_irqsave(&skdev->lock, flags);
if (skdev->state != SKD_DRVR_STATE_ONLINE) {
- pr_err("(%s): skd_stop_device not online no sync\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
goto stop_out;
}
if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
- pr_err("(%s): skd_stop_device no special\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
goto stop_out;
}
@@ -3554,16 +2198,13 @@ static void skd_stop_device(struct skd_device *skdev)
switch (skdev->sync_done) {
case 0:
- pr_err("(%s): skd_stop_device no sync\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
break;
case 1:
- pr_err("(%s): skd_stop_device sync done\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
break;
default:
- pr_err("(%s): skd_stop_device sync error\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
}
stop_out:
@@ -3593,8 +2234,8 @@ stop_out:
}
if (dev_state != FIT_SR_DRIVE_INIT)
- pr_err("(%s): skd_stop_device state error 0x%02x\n",
- skd_name(skdev), dev_state);
+ dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
+ dev_state);
}
/* assume spinlock is held */
@@ -3607,8 +2248,7 @@ static void skd_restart_device(struct skd_device *skdev)
state = SKD_READL(skdev, FIT_STATUS);
- pr_debug("%s:%s:%d drive status=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
state &= FIT_SR_DRIVE_STATE_MASK;
skdev->drive_state = state;
@@ -3628,9 +2268,8 @@ static int skd_quiesce_dev(struct skd_device *skdev)
switch (skdev->state) {
case SKD_DRVR_STATE_BUSY:
case SKD_DRVR_STATE_BUSY_IMMINENT:
- pr_debug("%s:%s:%d stopping %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_stop_queue(skdev->queue);
+ dev_dbg(&skdev->pdev->dev, "stopping queue\n");
+ blk_mq_stop_hw_queues(skdev->queue);
break;
case SKD_DRVR_STATE_ONLINE:
case SKD_DRVR_STATE_STOPPING:
@@ -3642,8 +2281,8 @@ static int skd_quiesce_dev(struct skd_device *skdev)
case SKD_DRVR_STATE_RESUMING:
default:
rc = -EINVAL;
- pr_debug("%s:%s:%d state [%d] not implemented\n",
- skdev->name, __func__, __LINE__, skdev->state);
+ dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
+ skdev->state);
}
return rc;
}
@@ -3655,8 +2294,7 @@ static int skd_unquiesce_dev(struct skd_device *skdev)
skd_log_skdev(skdev, "unquiesce");
if (skdev->state == SKD_DRVR_STATE_ONLINE) {
- pr_debug("%s:%s:%d **** device already ONLINE\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
return 0;
}
if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
@@ -3669,8 +2307,7 @@ static int skd_unquiesce_dev(struct skd_device *skdev)
* to become available.
*/
skdev->state = SKD_DRVR_STATE_BUSY;
- pr_debug("%s:%s:%d drive BUSY state\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
return 0;
}
@@ -3689,26 +2326,24 @@ static int skd_unquiesce_dev(struct skd_device *skdev)
case SKD_DRVR_STATE_IDLE:
case SKD_DRVR_STATE_LOAD:
skdev->state = SKD_DRVR_STATE_ONLINE;
- pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
- skd_name(skdev),
- skd_skdev_state_to_str(prev_driver_state),
- prev_driver_state, skd_skdev_state_to_str(skdev->state),
- skdev->state);
- pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
- skdev->name, __func__, __LINE__);
- pr_debug("%s:%s:%d starting %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
- blk_start_queue(skdev->queue);
+ dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
+ skd_skdev_state_to_str(prev_driver_state),
+ prev_driver_state, skd_skdev_state_to_str(skdev->state),
+ skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "**** device ONLINE...starting block queue\n");
+ dev_dbg(&skdev->pdev->dev, "starting queue\n");
+ dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
+ schedule_work(&skdev->start_queue);
skdev->gendisk_on = 1;
wake_up_interruptible(&skdev->waitq);
break;
case SKD_DRVR_STATE_DISAPPEARED:
default:
- pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
- skdev->name, __func__, __LINE__,
- skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "**** driver state %d, not implemented\n",
+ skdev->state);
return -EBUSY;
}
return 0;
@@ -3726,11 +2361,10 @@ static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
unsigned long flags;
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
- irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
spin_unlock_irqrestore(&skdev->lock, flags);
return IRQ_HANDLED;
@@ -3742,9 +2376,8 @@ static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
unsigned long flags;
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
skd_isr_fwstate(skdev);
spin_unlock_irqrestore(&skdev->lock, flags);
@@ -3759,19 +2392,18 @@ static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
int deferred;
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
&flush_enqueued);
if (flush_enqueued)
- skd_request_fn(skdev->queue);
+ schedule_work(&skdev->start_queue);
if (deferred)
schedule_work(&skdev->completion_worker);
else if (!flush_enqueued)
- skd_request_fn(skdev->queue);
+ schedule_work(&skdev->start_queue);
spin_unlock_irqrestore(&skdev->lock, flags);
@@ -3784,9 +2416,8 @@ static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
unsigned long flags;
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
skd_isr_msg_from_dev(skdev);
spin_unlock_irqrestore(&skdev->lock, flags);
@@ -3799,9 +2430,8 @@ static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
unsigned long flags;
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
spin_unlock_irqrestore(&skdev->lock, flags);
return IRQ_HANDLED;
@@ -3850,8 +2480,7 @@ static int skd_acquire_msix(struct skd_device *skdev)
rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
PCI_IRQ_MSIX);
if (rc < 0) {
- pr_err("(%s): failed to enable MSI-X %d\n",
- skd_name(skdev), rc);
+ dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
goto out;
}
@@ -3859,8 +2488,7 @@ static int skd_acquire_msix(struct skd_device *skdev)
sizeof(struct skd_msix_entry), GFP_KERNEL);
if (!skdev->msix_entries) {
rc = -ENOMEM;
- pr_err("(%s): msix table allocation error\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "msix table allocation error\n");
goto out;
}
@@ -3877,16 +2505,15 @@ static int skd_acquire_msix(struct skd_device *skdev)
msix_entries[i].handler, 0,
qentry->isr_name, skdev);
if (rc) {
- pr_err("(%s): Unable to register(%d) MSI-X "
- "handler %d: %s\n",
- skd_name(skdev), rc, i, qentry->isr_name);
+ dev_err(&skdev->pdev->dev,
+ "Unable to register(%d) MSI-X handler %d: %s\n",
+ rc, i, qentry->isr_name);
goto msix_out;
}
}
- pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
- skdev->name, __func__, __LINE__,
- pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT);
+ dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
+ SKD_MAX_MSIX_COUNT);
return 0;
msix_out:
@@ -3909,8 +2536,8 @@ static int skd_acquire_irq(struct skd_device *skdev)
if (!rc)
return 0;
- pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n",
- skd_name(skdev), rc);
+ dev_err(&skdev->pdev->dev,
+ "failed to enable MSI-X, re-trying with MSI %d\n", rc);
}
snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
@@ -3920,8 +2547,8 @@ static int skd_acquire_irq(struct skd_device *skdev)
irq_flag |= PCI_IRQ_MSI;
rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
if (rc < 0) {
- pr_err("(%s): failed to allocate the MSI interrupt %d\n",
- skd_name(skdev), rc);
+ dev_err(&skdev->pdev->dev,
+ "failed to allocate the MSI interrupt %d\n", rc);
return rc;
}
@@ -3930,8 +2557,8 @@ static int skd_acquire_irq(struct skd_device *skdev)
skdev->isr_name, skdev);
if (rc) {
pci_free_irq_vectors(pdev);
- pr_err("(%s): failed to allocate interrupt %d\n",
- skd_name(skdev), rc);
+ dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
+ rc);
return rc;
}
@@ -3965,20 +2592,45 @@ static void skd_release_irq(struct skd_device *skdev)
*****************************************************************************
*/
+static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ enum dma_data_direction dir)
+{
+ struct device *dev = &skdev->pdev->dev;
+ void *buf;
+
+ buf = kmem_cache_alloc(s, gfp);
+ if (!buf)
+ return NULL;
+ *dma_handle = dma_map_single(dev, buf, s->size, dir);
+ if (dma_mapping_error(dev, *dma_handle)) {
+ kfree(buf);
+ buf = NULL;
+ }
+ return buf;
+}
+
+static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s,
+ void *vaddr, dma_addr_t dma_handle,
+ enum dma_data_direction dir)
+{
+ if (!vaddr)
+ return;
+
+ dma_unmap_single(&skdev->pdev->dev, dma_handle, s->size, dir);
+ kmem_cache_free(s, vaddr);
+}
+
static int skd_cons_skcomp(struct skd_device *skdev)
{
int rc = 0;
struct fit_completion_entry_v1 *skcomp;
- u32 nbytes;
- nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
- nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
+ dev_dbg(&skdev->pdev->dev,
+ "comp pci_alloc, total bytes %zd entries %d\n",
+ SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
- pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
- skdev->name, __func__, __LINE__,
- nbytes, SKD_N_COMPLETION_ENTRY);
-
- skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
+ skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
&skdev->cq_dma_address);
if (skcomp == NULL) {
@@ -4000,14 +2652,14 @@ static int skd_cons_skmsg(struct skd_device *skdev)
int rc = 0;
u32 i;
- pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
- skdev->name, __func__, __LINE__,
- sizeof(struct skd_fitmsg_context),
- skdev->num_fitmsg_context,
- sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
+ dev_dbg(&skdev->pdev->dev,
+ "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
+ sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
+ sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
- skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
- *skdev->num_fitmsg_context, GFP_KERNEL);
+ skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
+ sizeof(struct skd_fitmsg_context),
+ GFP_KERNEL);
if (skdev->skmsg_table == NULL) {
rc = -ENOMEM;
goto err_out;
@@ -4020,9 +2672,8 @@ static int skd_cons_skmsg(struct skd_device *skdev)
skmsg->id = i + SKD_ID_FIT_MSG;
- skmsg->state = SKD_MSG_STATE_IDLE;
skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
- SKD_N_FITMSG_BYTES + 64,
+ SKD_N_FITMSG_BYTES,
&skmsg->mb_dma_address);
if (skmsg->msg_buf == NULL) {
@@ -4030,22 +2681,13 @@ static int skd_cons_skmsg(struct skd_device *skdev)
goto err_out;
}
- skmsg->offset = (u32)((u64)skmsg->msg_buf &
- (~FIT_QCMD_BASE_ADDRESS_MASK));
- skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
- skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
- FIT_QCMD_BASE_ADDRESS_MASK);
- skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
- skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
+ WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
+ (FIT_QCMD_ALIGN - 1),
+ "not aligned: msg_buf %p mb_dma_address %#llx\n",
+ skmsg->msg_buf, skmsg->mb_dma_address);
memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
-
- skmsg->next = &skmsg[1];
}
- /* Free list is in order starting with the 0th entry. */
- skdev->skmsg_table[i - 1].next = NULL;
- skdev->skmsg_free_list = skdev->skmsg_table;
-
err_out:
return rc;
}
@@ -4055,18 +2697,14 @@ static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
dma_addr_t *ret_dma_addr)
{
struct fit_sg_descriptor *sg_list;
- u32 nbytes;
-
- nbytes = sizeof(*sg_list) * n_sg;
- sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
+ sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr,
+ GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
if (sg_list != NULL) {
uint64_t dma_address = *ret_dma_addr;
u32 i;
- memset(sg_list, 0, nbytes);
-
for (i = 0; i < n_sg - 1; i++) {
uint64_t ndp_off;
ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
@@ -4079,153 +2717,63 @@ static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
return sg_list;
}
-static int skd_cons_skreq(struct skd_device *skdev)
+static void skd_free_sg_list(struct skd_device *skdev,
+ struct fit_sg_descriptor *sg_list,
+ dma_addr_t dma_addr)
{
- int rc = 0;
- u32 i;
-
- pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
- skdev->name, __func__, __LINE__,
- sizeof(struct skd_request_context),
- skdev->num_req_context,
- sizeof(struct skd_request_context) * skdev->num_req_context);
-
- skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
- * skdev->num_req_context, GFP_KERNEL);
- if (skdev->skreq_table == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
- skdev->name, __func__, __LINE__,
- skdev->sgs_per_request, sizeof(struct scatterlist),
- skdev->sgs_per_request * sizeof(struct scatterlist));
-
- for (i = 0; i < skdev->num_req_context; i++) {
- struct skd_request_context *skreq;
-
- skreq = &skdev->skreq_table[i];
-
- skreq->id = i + SKD_ID_RW_REQUEST;
- skreq->state = SKD_REQ_STATE_IDLE;
-
- skreq->sg = kzalloc(sizeof(struct scatterlist) *
- skdev->sgs_per_request, GFP_KERNEL);
- if (skreq->sg == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- sg_init_table(skreq->sg, skdev->sgs_per_request);
-
- skreq->sksg_list = skd_cons_sg_list(skdev,
- skdev->sgs_per_request,
- &skreq->sksg_dma_address);
-
- if (skreq->sksg_list == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skreq->next = &skreq[1];
- }
-
- /* Free list is in order starting with the 0th entry. */
- skdev->skreq_table[i - 1].next = NULL;
- skdev->skreq_free_list = skdev->skreq_table;
+ if (WARN_ON_ONCE(!sg_list))
+ return;
-err_out:
- return rc;
+ skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr,
+ DMA_TO_DEVICE);
}
-static int skd_cons_skspcl(struct skd_device *skdev)
+static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
- int rc = 0;
- u32 i, nbytes;
-
- pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
- skdev->name, __func__, __LINE__,
- sizeof(struct skd_special_context),
- skdev->n_special,
- sizeof(struct skd_special_context) * skdev->n_special);
-
- skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
- * skdev->n_special, GFP_KERNEL);
- if (skdev->skspcl_table == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
+ struct skd_device *skdev = set->driver_data;
+ struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
- for (i = 0; i < skdev->n_special; i++) {
- struct skd_special_context *skspcl;
-
- skspcl = &skdev->skspcl_table[i];
-
- skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
- skspcl->req.state = SKD_REQ_STATE_IDLE;
-
- skspcl->req.next = &skspcl[1].req;
-
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
-
- skspcl->msg_buf =
- pci_zalloc_consistent(skdev->pdev, nbytes,
- &skspcl->mb_dma_address);
- if (skspcl->msg_buf == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
- SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
- if (skspcl->req.sg == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skspcl->req.sksg_list = skd_cons_sg_list(skdev,
- SKD_N_SG_PER_SPECIAL,
- &skspcl->req.
- sksg_dma_address);
- if (skspcl->req.sksg_list == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- }
+ skreq->state = SKD_REQ_STATE_IDLE;
+ skreq->sg = (void *)(skreq + 1);
+ sg_init_table(skreq->sg, skd_sgs_per_request);
+ skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request,
+ &skreq->sksg_dma_address);
- /* Free list is in order starting with the 0th entry. */
- skdev->skspcl_table[i - 1].req.next = NULL;
- skdev->skspcl_free_list = skdev->skspcl_table;
+ return skreq->sksg_list ? 0 : -ENOMEM;
+}
- return rc;
+static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx)
+{
+ struct skd_device *skdev = set->driver_data;
+ struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
-err_out:
- return rc;
+ skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address);
}
static int skd_cons_sksb(struct skd_device *skdev)
{
int rc = 0;
struct skd_special_context *skspcl;
- u32 nbytes;
skspcl = &skdev->internal_skspcl;
skspcl->req.id = 0 + SKD_ID_INTERNAL;
skspcl->req.state = SKD_REQ_STATE_IDLE;
- nbytes = SKD_N_INTERNAL_BYTES;
-
- skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
- &skspcl->db_dma_address);
+ skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache,
+ &skspcl->db_dma_address,
+ GFP_DMA | __GFP_ZERO,
+ DMA_BIDIRECTIONAL);
if (skspcl->data_buf == NULL) {
rc = -ENOMEM;
goto err_out;
}
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
- skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
- &skspcl->mb_dma_address);
+ skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache,
+ &skspcl->mb_dma_address,
+ GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
if (skspcl->msg_buf == NULL) {
rc = -ENOMEM;
goto err_out;
@@ -4247,6 +2795,14 @@ err_out:
return rc;
}
+static const struct blk_mq_ops skd_mq_ops = {
+ .queue_rq = skd_mq_queue_rq,
+ .complete = skd_complete_rq,
+ .timeout = skd_timed_out,
+ .init_request = skd_init_request,
+ .exit_request = skd_exit_request,
+};
+
static int skd_cons_disk(struct skd_device *skdev)
{
int rc = 0;
@@ -4268,31 +2824,46 @@ static int skd_cons_disk(struct skd_device *skdev)
disk->fops = &skd_blockdev_ops;
disk->private_data = skdev;
- q = blk_init_queue(skd_request_fn, &skdev->lock);
- if (!q) {
- rc = -ENOMEM;
+ memset(&skdev->tag_set, 0, sizeof(skdev->tag_set));
+ skdev->tag_set.ops = &skd_mq_ops;
+ skdev->tag_set.nr_hw_queues = 1;
+ skdev->tag_set.queue_depth = skd_max_queue_depth;
+ skdev->tag_set.cmd_size = sizeof(struct skd_request_context) +
+ skdev->sgs_per_request * sizeof(struct scatterlist);
+ skdev->tag_set.numa_node = NUMA_NO_NODE;
+ skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
+ BLK_MQ_F_SG_MERGE |
+ BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO);
+ skdev->tag_set.driver_data = skdev;
+ rc = blk_mq_alloc_tag_set(&skdev->tag_set);
+ if (rc)
+ goto err_out;
+ q = blk_mq_init_queue(&skdev->tag_set);
+ if (IS_ERR(q)) {
+ blk_mq_free_tag_set(&skdev->tag_set);
+ rc = PTR_ERR(q);
goto err_out;
}
- blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+ q->queuedata = skdev;
skdev->queue = q;
disk->queue = q;
- q->queuedata = skdev;
blk_queue_write_cache(q, true, true);
blk_queue_max_segments(q, skdev->sgs_per_request);
blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
- /* set sysfs ptimal_io_size to 8K */
+ /* set optimal I/O size to 8KB */
blk_queue_io_opt(q, 8192);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
+ blk_queue_rq_timeout(q, 8 * HZ);
+
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d stopping %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_stop_queue(skdev->queue);
+ dev_dbg(&skdev->pdev->dev, "stopping queue\n");
+ blk_mq_stop_hw_queues(skdev->queue);
spin_unlock_irqrestore(&skdev->lock, flags);
err_out:
@@ -4306,13 +2877,13 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
{
struct skd_device *skdev;
int blk_major = skd_major;
+ size_t size;
int rc;
skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
if (!skdev) {
- pr_err(PFX "(%s): memory alloc failure\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "memory alloc failure\n");
return NULL;
}
@@ -4320,60 +2891,71 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
skdev->pdev = pdev;
skdev->devno = skd_next_devno++;
skdev->major = blk_major;
- sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
skdev->dev_max_queue_depth = 0;
skdev->num_req_context = skd_max_queue_depth;
skdev->num_fitmsg_context = skd_max_queue_depth;
- skdev->n_special = skd_max_pass_thru;
skdev->cur_max_queue_depth = 1;
skdev->queue_low_water_mark = 1;
skdev->proto_ver = 99;
skdev->sgs_per_request = skd_sgs_per_request;
skdev->dbg_level = skd_dbg_level;
- atomic_set(&skdev->device_count, 0);
-
spin_lock_init(&skdev->lock);
+ INIT_WORK(&skdev->start_queue, skd_start_queue);
INIT_WORK(&skdev->completion_worker, skd_completion_worker);
- pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skcomp(skdev);
- if (rc < 0)
+ size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES);
+ skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!skdev->msgbuf_cache)
goto err_out;
-
- pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skmsg(skdev);
- if (rc < 0)
+ WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size,
+ "skd-msgbuf: %d < %zd\n",
+ kmem_cache_size(skdev->msgbuf_cache), size);
+ size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor);
+ skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!skdev->sglist_cache)
+ goto err_out;
+ WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size,
+ "skd-sglist: %d < %zd\n",
+ kmem_cache_size(skdev->sglist_cache), size);
+ size = SKD_N_INTERNAL_BYTES;
+ skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!skdev->databuf_cache)
goto err_out;
+ WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size,
+ "skd-databuf: %d < %zd\n",
+ kmem_cache_size(skdev->databuf_cache), size);
- pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skreq(skdev);
+ dev_dbg(&skdev->pdev->dev, "skcomp\n");
+ rc = skd_cons_skcomp(skdev);
if (rc < 0)
goto err_out;
- pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skspcl(skdev);
+ dev_dbg(&skdev->pdev->dev, "skmsg\n");
+ rc = skd_cons_skmsg(skdev);
if (rc < 0)
goto err_out;
- pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "sksb\n");
rc = skd_cons_sksb(skdev);
if (rc < 0)
goto err_out;
- pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "disk\n");
rc = skd_cons_disk(skdev);
if (rc < 0)
goto err_out;
- pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "VICTORY\n");
return skdev;
err_out:
- pr_debug("%s:%s:%d construct failed\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "construct failed\n");
skd_destruct(skdev);
return NULL;
}
@@ -4386,14 +2968,9 @@ err_out:
static void skd_free_skcomp(struct skd_device *skdev)
{
- if (skdev->skcomp_table != NULL) {
- u32 nbytes;
-
- nbytes = sizeof(skdev->skcomp_table[0]) *
- SKD_N_COMPLETION_ENTRY;
- pci_free_consistent(skdev->pdev, nbytes,
+ if (skdev->skcomp_table)
+ pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
skdev->skcomp_table, skdev->cq_dma_address);
- }
skdev->skcomp_table = NULL;
skdev->cq_dma_address = 0;
@@ -4412,8 +2989,6 @@ static void skd_free_skmsg(struct skd_device *skdev)
skmsg = &skdev->skmsg_table[i];
if (skmsg->msg_buf != NULL) {
- skmsg->msg_buf += skmsg->offset;
- skmsg->mb_dma_address += skmsg->offset;
pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
skmsg->msg_buf,
skmsg->mb_dma_address);
@@ -4426,109 +3001,23 @@ static void skd_free_skmsg(struct skd_device *skdev)
skdev->skmsg_table = NULL;
}
-static void skd_free_sg_list(struct skd_device *skdev,
- struct fit_sg_descriptor *sg_list,
- u32 n_sg, dma_addr_t dma_addr)
-{
- if (sg_list != NULL) {
- u32 nbytes;
-
- nbytes = sizeof(*sg_list) * n_sg;
-
- pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
- }
-}
-
-static void skd_free_skreq(struct skd_device *skdev)
-{
- u32 i;
-
- if (skdev->skreq_table == NULL)
- return;
-
- for (i = 0; i < skdev->num_req_context; i++) {
- struct skd_request_context *skreq;
-
- skreq = &skdev->skreq_table[i];
-
- skd_free_sg_list(skdev, skreq->sksg_list,
- skdev->sgs_per_request,
- skreq->sksg_dma_address);
-
- skreq->sksg_list = NULL;
- skreq->sksg_dma_address = 0;
-
- kfree(skreq->sg);
- }
-
- kfree(skdev->skreq_table);
- skdev->skreq_table = NULL;
-}
-
-static void skd_free_skspcl(struct skd_device *skdev)
-{
- u32 i;
- u32 nbytes;
-
- if (skdev->skspcl_table == NULL)
- return;
-
- for (i = 0; i < skdev->n_special; i++) {
- struct skd_special_context *skspcl;
-
- skspcl = &skdev->skspcl_table[i];
-
- if (skspcl->msg_buf != NULL) {
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
- pci_free_consistent(skdev->pdev, nbytes,
- skspcl->msg_buf,
- skspcl->mb_dma_address);
- }
-
- skspcl->msg_buf = NULL;
- skspcl->mb_dma_address = 0;
-
- skd_free_sg_list(skdev, skspcl->req.sksg_list,
- SKD_N_SG_PER_SPECIAL,
- skspcl->req.sksg_dma_address);
-
- skspcl->req.sksg_list = NULL;
- skspcl->req.sksg_dma_address = 0;
-
- kfree(skspcl->req.sg);
- }
-
- kfree(skdev->skspcl_table);
- skdev->skspcl_table = NULL;
-}
-
static void skd_free_sksb(struct skd_device *skdev)
{
- struct skd_special_context *skspcl;
- u32 nbytes;
-
- skspcl = &skdev->internal_skspcl;
-
- if (skspcl->data_buf != NULL) {
- nbytes = SKD_N_INTERNAL_BYTES;
+ struct skd_special_context *skspcl = &skdev->internal_skspcl;
- pci_free_consistent(skdev->pdev, nbytes,
- skspcl->data_buf, skspcl->db_dma_address);
- }
+ skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf,
+ skspcl->db_dma_address, DMA_BIDIRECTIONAL);
skspcl->data_buf = NULL;
skspcl->db_dma_address = 0;
- if (skspcl->msg_buf != NULL) {
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
- pci_free_consistent(skdev->pdev, nbytes,
- skspcl->msg_buf, skspcl->mb_dma_address);
- }
+ skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf,
+ skspcl->mb_dma_address, DMA_TO_DEVICE);
skspcl->msg_buf = NULL;
skspcl->mb_dma_address = 0;
- skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
+ skd_free_sg_list(skdev, skspcl->req.sksg_list,
skspcl->req.sksg_dma_address);
skspcl->req.sksg_list = NULL;
@@ -4539,15 +3028,20 @@ static void skd_free_disk(struct skd_device *skdev)
{
struct gendisk *disk = skdev->disk;
- if (disk != NULL) {
- struct request_queue *q = disk->queue;
+ if (disk && (disk->flags & GENHD_FL_UP))
+ del_gendisk(disk);
- if (disk->flags & GENHD_FL_UP)
- del_gendisk(disk);
- if (q)
- blk_cleanup_queue(q);
- put_disk(disk);
+ if (skdev->queue) {
+ blk_cleanup_queue(skdev->queue);
+ skdev->queue = NULL;
+ if (disk)
+ disk->queue = NULL;
}
+
+ if (skdev->tag_set.tags)
+ blk_mq_free_tag_set(&skdev->tag_set);
+
+ put_disk(disk);
skdev->disk = NULL;
}
@@ -4556,26 +3050,25 @@ static void skd_destruct(struct skd_device *skdev)
if (skdev == NULL)
return;
+ cancel_work_sync(&skdev->start_queue);
- pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "disk\n");
skd_free_disk(skdev);
- pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "sksb\n");
skd_free_sksb(skdev);
- pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
- skd_free_skspcl(skdev);
-
- pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
- skd_free_skreq(skdev);
-
- pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "skmsg\n");
skd_free_skmsg(skdev);
- pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "skcomp\n");
skd_free_skcomp(skdev);
- pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
+ kmem_cache_destroy(skdev->databuf_cache);
+ kmem_cache_destroy(skdev->sglist_cache);
+ kmem_cache_destroy(skdev->msgbuf_cache);
+
+ dev_dbg(&skdev->pdev->dev, "skdev\n");
kfree(skdev);
}
@@ -4592,9 +3085,8 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
skdev = bdev->bd_disk->private_data;
- pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
- skdev->name, __func__, __LINE__,
- bdev->bd_disk->disk_name, current->comm);
+ dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
+ bdev->bd_disk->disk_name, current->comm);
if (skdev->read_cap_is_valid) {
capacity = get_capacity(skdev->disk);
@@ -4609,18 +3101,16 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
{
- pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "add_disk\n");
device_add_disk(parent, skdev->disk);
return 0;
}
static const struct block_device_operations skd_blockdev_ops = {
.owner = THIS_MODULE,
- .ioctl = skd_bdev_ioctl,
.getgeo = skd_bdev_getgeo,
};
-
/*
*****************************************************************************
* PCIe DRIVER GLUE
@@ -4671,10 +3161,8 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
char pci_str[32];
struct skd_device *skdev;
- pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
- DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
- pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
- pci_name(pdev), pdev->vendor, pdev->device);
+ dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
+ pdev->device);
rc = pci_enable_device(pdev);
if (rc)
@@ -4685,16 +3173,13 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!rc) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
-
- pr_err("(%s): consistent DMA mask error %d\n",
- pci_name(pdev), rc);
+ dev_err(&pdev->dev, "consistent DMA mask error %d\n",
+ rc);
}
} else {
- (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
-
- pr_err("(%s): DMA mask error %d\n",
- pci_name(pdev), rc);
+ dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
}
}
@@ -4714,19 +3199,17 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
skd_pci_info(skdev, pci_str);
- pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
+ dev_info(&pdev->dev, "%s 64bit\n", pci_str);
pci_set_master(pdev);
rc = pci_enable_pcie_error_reporting(pdev);
if (rc) {
- pr_err(
- "(%s): bad enable of PCIe error reporting rc=%d\n",
- skd_name(skdev), rc);
+ dev_err(&pdev->dev,
+ "bad enable of PCIe error reporting rc=%d\n", rc);
skdev->pcie_error_reporting_is_enabled = 0;
} else
skdev->pcie_error_reporting_is_enabled = 1;
-
pci_set_drvdata(pdev, skdev);
for (i = 0; i < SKD_MAX_BARS; i++) {
@@ -4735,21 +3218,19 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
skdev->mem_size[i]);
if (!skdev->mem_map[i]) {
- pr_err("(%s): Unable to map adapter memory!\n",
- skd_name(skdev));
+ dev_err(&pdev->dev,
+ "Unable to map adapter memory!\n");
rc = -ENODEV;
goto err_out_iounmap;
}
- pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->mem_map[i],
- (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
+ dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
+ skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
+ skdev->mem_size[i]);
}
rc = skd_acquire_irq(skdev);
if (rc) {
- pr_err("(%s): interrupt resource error %d\n",
- skd_name(skdev), rc);
+ dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
goto err_out_iounmap;
}
@@ -4771,29 +3252,14 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
/* we timed out, something is wrong with the device,
don't add the disk structure */
- pr_err(
- "(%s): error: waiting for s1120 timed out %d!\n",
- skd_name(skdev), rc);
+ dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
+ rc);
/* in case of no error; we timeout with ENXIO */
if (!rc)
rc = -ENXIO;
goto err_out_timer;
}
-
-#ifdef SKD_VMK_POLL_HANDLER
- if (skdev->irq_type == SKD_IRQ_MSIX) {
- /* MSIX completion handler is being used for coredump */
- vmklnx_scsi_register_poll_handler(skdev->scsi_host,
- skdev->msix_entries[5].vector,
- skd_comp_q, skdev);
- } else {
- vmklnx_scsi_register_poll_handler(skdev->scsi_host,
- skdev->pdev->irq, skd_isr,
- skdev);
- }
-#endif /* SKD_VMK_POLL_HANDLER */
-
return rc;
err_out_timer:
@@ -4826,7 +3292,7 @@ static void skd_pci_remove(struct pci_dev *pdev)
skdev = pci_get_drvdata(pdev);
if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ dev_err(&pdev->dev, "no device data for PCI\n");
return;
}
skd_stop_device(skdev);
@@ -4834,7 +3300,7 @@ static void skd_pci_remove(struct pci_dev *pdev)
for (i = 0; i < SKD_MAX_BARS; i++)
if (skdev->mem_map[i])
- iounmap((u32 *)skdev->mem_map[i]);
+ iounmap(skdev->mem_map[i]);
if (skdev->pcie_error_reporting_is_enabled)
pci_disable_pcie_error_reporting(pdev);
@@ -4855,7 +3321,7 @@ static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
skdev = pci_get_drvdata(pdev);
if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ dev_err(&pdev->dev, "no device data for PCI\n");
return -EIO;
}
@@ -4865,7 +3331,7 @@ static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
for (i = 0; i < SKD_MAX_BARS; i++)
if (skdev->mem_map[i])
- iounmap((u32 *)skdev->mem_map[i]);
+ iounmap(skdev->mem_map[i]);
if (skdev->pcie_error_reporting_is_enabled)
pci_disable_pcie_error_reporting(pdev);
@@ -4885,7 +3351,7 @@ static int skd_pci_resume(struct pci_dev *pdev)
skdev = pci_get_drvdata(pdev);
if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ dev_err(&pdev->dev, "no device data for PCI\n");
return -1;
}
@@ -4903,15 +3369,14 @@ static int skd_pci_resume(struct pci_dev *pdev)
if (!rc) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- pr_err("(%s): consistent DMA mask error %d\n",
- pci_name(pdev), rc);
+ dev_err(&pdev->dev, "consistent DMA mask error %d\n",
+ rc);
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- pr_err("(%s): DMA mask error %d\n",
- pci_name(pdev), rc);
+ dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
}
}
@@ -4919,8 +3384,8 @@ static int skd_pci_resume(struct pci_dev *pdev)
pci_set_master(pdev);
rc = pci_enable_pcie_error_reporting(pdev);
if (rc) {
- pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
- skdev->name, rc);
+ dev_err(&pdev->dev,
+ "bad enable of PCIe error reporting rc=%d\n", rc);
skdev->pcie_error_reporting_is_enabled = 0;
} else
skdev->pcie_error_reporting_is_enabled = 1;
@@ -4932,21 +3397,17 @@ static int skd_pci_resume(struct pci_dev *pdev)
skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
skdev->mem_size[i]);
if (!skdev->mem_map[i]) {
- pr_err("(%s): Unable to map adapter memory!\n",
- skd_name(skdev));
+ dev_err(&pdev->dev, "Unable to map adapter memory!\n");
rc = -ENODEV;
goto err_out_iounmap;
}
- pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->mem_map[i],
- (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
+ dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
+ skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
+ skdev->mem_size[i]);
}
rc = skd_acquire_irq(skdev);
if (rc) {
-
- pr_err("(%s): interrupt resource error %d\n",
- pci_name(pdev), rc);
+ dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
goto err_out_iounmap;
}
@@ -4984,15 +3445,15 @@ static void skd_pci_shutdown(struct pci_dev *pdev)
{
struct skd_device *skdev;
- pr_err("skd_pci_shutdown called\n");
+ dev_err(&pdev->dev, "%s called\n", __func__);
skdev = pci_get_drvdata(pdev);
if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ dev_err(&pdev->dev, "no device data for PCI\n");
return;
}
- pr_err("%s: calling stop\n", skd_name(skdev));
+ dev_err(&pdev->dev, "calling stop\n");
skd_stop_device(skdev);
}
@@ -5012,21 +3473,6 @@ static struct pci_driver skd_driver = {
*****************************************************************************
*/
-static const char *skd_name(struct skd_device *skdev)
-{
- memset(skdev->id_str, 0, sizeof(skdev->id_str));
-
- if (skdev->inquiry_is_valid)
- snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
- skdev->name, skdev->inq_serial_num,
- pci_name(skdev->pdev));
- else
- snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
- skdev->name, pci_name(skdev->pdev));
-
- return skdev->id_str;
-}
-
const char *skd_drive_state_to_str(int state)
{
switch (state) {
@@ -5078,8 +3524,6 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
return "PAUSING";
case SKD_DRVR_STATE_PAUSED:
return "PAUSED";
- case SKD_DRVR_STATE_DRAINING_TIMEOUT:
- return "DRAINING_TIMEOUT";
case SKD_DRVR_STATE_RESTARTING:
return "RESTARTING";
case SKD_DRVR_STATE_RESUMING:
@@ -5106,18 +3550,6 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
}
}
-static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
-{
- switch (state) {
- case SKD_MSG_STATE_IDLE:
- return "IDLE";
- case SKD_MSG_STATE_BUSY:
- return "BUSY";
- default:
- return "???";
- }
-}
-
static const char *skd_skreq_state_to_str(enum skd_req_state state)
{
switch (state) {
@@ -5131,8 +3563,6 @@ static const char *skd_skreq_state_to_str(enum skd_req_state state)
return "COMPLETED";
case SKD_REQ_STATE_TIMEOUT:
return "TIMEOUT";
- case SKD_REQ_STATE_ABORTED:
- return "ABORTED";
default:
return "???";
}
@@ -5140,58 +3570,34 @@ static const char *skd_skreq_state_to_str(enum skd_req_state state)
static void skd_log_skdev(struct skd_device *skdev, const char *event)
{
- pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
- skdev->name, __func__, __LINE__, skdev->name, skdev, event);
- pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
- skdev->name, __func__, __LINE__,
- skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
- skd_skdev_state_to_str(skdev->state), skdev->state);
- pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->in_flight, skdev->cur_max_queue_depth,
- skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
- pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
-}
-
-static void skd_log_skmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg, const char *event)
-{
- pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
- skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
- pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
- skdev->name, __func__, __LINE__,
- skd_skmsg_state_to_str(skmsg->state), skmsg->state,
- skmsg->id, skmsg->length);
+ dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
+ dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
+ skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
+ skd_skdev_state_to_str(skdev->state), skdev->state);
+ dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
+ skd_in_flight(skdev), skdev->cur_max_queue_depth,
+ skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
+ dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n",
+ skdev->skcomp_cycle, skdev->skcomp_ix);
}
static void skd_log_skreq(struct skd_device *skdev,
struct skd_request_context *skreq, const char *event)
{
- pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
- skdev->name, __func__, __LINE__, skdev->name, skreq, event);
- pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
- skdev->name, __func__, __LINE__,
- skd_skreq_state_to_str(skreq->state), skreq->state,
- skreq->id, skreq->fitmsg_id);
- pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
- skdev->name, __func__, __LINE__,
- skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
-
- if (skreq->req != NULL) {
- struct request *req = skreq->req;
- u32 lba = (u32)blk_rq_pos(req);
- u32 count = blk_rq_sectors(req);
-
- pr_debug("%s:%s:%d "
- "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
- skdev->name, __func__, __LINE__,
- req, lba, lba, count, count,
- (int)rq_data_dir(req));
- } else
- pr_debug("%s:%s:%d req=NULL\n",
- skdev->name, __func__, __LINE__);
+ struct request *req = blk_mq_rq_from_pdu(skreq);
+ u32 lba = blk_rq_pos(req);
+ u32 count = blk_rq_sectors(req);
+
+ dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
+ dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
+ skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
+ skreq->fitmsg_id);
+ dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n",
+ skreq->data_dir, skreq->n_sg);
+
+ dev_dbg(&skdev->pdev->dev,
+ "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba,
+ count, count, (int)rq_data_dir(req));
}
/*
@@ -5202,7 +3608,14 @@ static void skd_log_skreq(struct skd_device *skdev,
static int __init skd_init(void)
{
- pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
+ BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
+ BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
+ BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
+ BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
+ BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
+ BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
+ BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
+ BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
switch (skd_isr_type) {
case SKD_IRQ_LEGACY:
@@ -5222,7 +3635,8 @@ static int __init skd_init(void)
skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
}
- if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
+ if (skd_max_req_per_msg < 1 ||
+ skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
@@ -5246,19 +3660,11 @@ static int __init skd_init(void)
skd_isr_comp_limit = 0;
}
- if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
- pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
- skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
- skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
- }
-
return pci_register_driver(&skd_driver);
}
static void __exit skd_exit(void)
{
- pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
-
pci_unregister_driver(&skd_driver);
if (skd_major)
diff --git a/drivers/block/skd_s1120.h b/drivers/block/skd_s1120.h
index 61c757ff0161..de35f47e953c 100644
--- a/drivers/block/skd_s1120.h
+++ b/drivers/block/skd_s1120.h
@@ -1,19 +1,15 @@
-/* Copyright 2012 STEC, Inc.
+/*
+ * Copyright 2012 STEC, Inc.
+ * Copyright (c) 2017 Western Digital Corporation or its affiliates.
*
- * This file is licensed under the terms of the 3-clause
- * BSD License (http://opensource.org/licenses/BSD-3-Clause)
- * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
- * at your option. Both licenses are also available in the LICENSE file
- * distributed with this project. This file may not be copied, modified,
- * or distributed except in accordance with those terms.
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
*/
#ifndef SKD_S1120_H
#define SKD_S1120_H
-#pragma pack(push, s1120_h, 1)
-
/*
* Q-channel, 64-bit r/w
*/
@@ -30,7 +26,7 @@
#define FIT_QCMD_MSGSIZE_128 (0x1 << 4)
#define FIT_QCMD_MSGSIZE_256 (0x2 << 4)
#define FIT_QCMD_MSGSIZE_512 (0x3 << 4)
-#define FIT_QCMD_BASE_ADDRESS_MASK (0xFFFFFFFFFFFFFFC0ull)
+#define FIT_QCMD_ALIGN L1_CACHE_BYTES
/*
* Control, 32-bit r/w
@@ -250,7 +246,7 @@ struct fit_msg_hdr {
* 20-23 of the FIT_MTD_FITFW_INIT response.
*/
struct fit_completion_entry_v1 {
- uint32_t num_returned_bytes;
+ __be32 num_returned_bytes;
uint16_t tag;
uint8_t status; /* SCSI status */
uint8_t cycle;
@@ -278,7 +274,7 @@ struct fit_comp_error_info {
uint16_t sks_low; /* 10: Sense Key Specific (LSW) */
uint16_t reserved3; /* 12: Part of additional sense bytes (unused) */
uint16_t uec; /* 14: Additional Sense Bytes */
- uint64_t per; /* 16: Additional Sense Bytes */
+ uint64_t per __packed; /* 16: Additional Sense Bytes */
uint8_t reserved4[2]; /* 1E: Additional Sense Bytes (unused) */
};
@@ -292,11 +288,11 @@ struct fit_comp_error_info {
* Version one has the last 32 bits sg_list_len_bytes;
*/
struct skd_command_header {
- uint64_t sg_list_dma_address;
+ __be64 sg_list_dma_address;
uint16_t tag;
uint8_t attribute;
uint8_t add_cdb_len; /* In 32 bit words */
- uint32_t sg_list_len_bytes;
+ __be32 sg_list_len_bytes;
};
struct skd_scsi_request {
@@ -309,22 +305,20 @@ struct driver_inquiry_data {
uint8_t peripheral_device_type:5;
uint8_t qualifier:3;
uint8_t page_code;
- uint16_t page_length;
- uint16_t pcie_bus_number;
+ __be16 page_length;
+ __be16 pcie_bus_number;
uint8_t pcie_device_number;
uint8_t pcie_function_number;
uint8_t pcie_link_speed;
uint8_t pcie_link_lanes;
- uint16_t pcie_vendor_id;
- uint16_t pcie_device_id;
- uint16_t pcie_subsystem_vendor_id;
- uint16_t pcie_subsystem_device_id;
+ __be16 pcie_vendor_id;
+ __be16 pcie_device_id;
+ __be16 pcie_subsystem_vendor_id;
+ __be16 pcie_subsystem_device_id;
uint8_t reserved1[2];
uint8_t reserved2[3];
uint8_t driver_version_length;
uint8_t driver_version[0x14];
};
-#pragma pack(pop, s1120_h)
-
#endif /* SKD_S1120_H */
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 6b16ead1da58..ad9749463d4f 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -875,6 +875,56 @@ static void print_version(void)
printk(KERN_INFO "%s", version);
}
+struct vdc_check_port_data {
+ int dev_no;
+ char *type;
+};
+
+static int vdc_device_probed(struct device *dev, void *arg)
+{
+ struct vio_dev *vdev = to_vio_dev(dev);
+ struct vdc_check_port_data *port_data;
+
+ port_data = (struct vdc_check_port_data *)arg;
+
+ if ((vdev->dev_no == port_data->dev_no) &&
+ (!(strcmp((char *)&vdev->type, port_data->type))) &&
+ dev_get_drvdata(dev)) {
+ /* This device has already been configured
+ * by vdc_port_probe()
+ */
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/* Determine whether the VIO device is part of an mpgroup
+ * by locating all the virtual-device-port nodes associated
+ * with the parent virtual-device node for the VIO device
+ * and checking whether any of these nodes are vdc-ports
+ * which have already been configured.
+ *
+ * Returns true if this device is part of an mpgroup and has
+ * already been probed.
+ */
+static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
+{
+ struct vdc_check_port_data port_data;
+ struct device *dev;
+
+ port_data.dev_no = vdev->dev_no;
+ port_data.type = (char *)&vdev->type;
+
+ dev = device_find_child(vdev->dev.parent, &port_data,
+ vdc_device_probed);
+
+ if (dev)
+ return true;
+
+ return false;
+}
+
static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct mdesc_handle *hp;
@@ -893,6 +943,14 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto err_out_release_mdesc;
}
+ /* Check if this device is part of an mpgroup */
+ if (vdc_port_mpgroup_check(vdev)) {
+ printk(KERN_WARNING
+ "VIO: Ignoring extra vdisk port %s",
+ dev_name(&vdev->dev));
+ goto err_out_release_mdesc;
+ }
+
port = kzalloc(sizeof(*port), GFP_KERNEL);
err = -ENOMEM;
if (!port) {
@@ -943,6 +1001,9 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (err)
goto err_out_free_tx_ring;
+ /* Note that the device driver_data is used to determine
+ * whether the port has been probed.
+ */
dev_set_drvdata(&vdev->dev, port);
mdesc_release(hp);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4e02aa5fdac0..34e17ee799be 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -265,7 +265,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
}
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
- if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
+ if (blk_rq_is_scsi(req))
err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
else
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
@@ -381,6 +381,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10];
char *envp[] = { "RESIZE=1", NULL };
+ unsigned long long nblocks;
u64 capacity;
/* Host must always specify the capacity. */
@@ -393,16 +394,19 @@ static void virtblk_config_changed_work(struct work_struct *work)
capacity = (sector_t)-1;
}
- string_get_size(capacity, queue_logical_block_size(q),
+ nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
+
+ string_get_size(nblocks, queue_logical_block_size(q),
STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
- string_get_size(capacity, queue_logical_block_size(q),
+ string_get_size(nblocks, queue_logical_block_size(q),
STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
dev_notice(&vdev->dev,
- "new size: %llu %d-byte logical blocks (%s/%s)\n",
- (unsigned long long)capacity,
- queue_logical_block_size(q),
- cap_str_10, cap_str_2);
+ "new size: %llu %d-byte logical blocks (%s/%s)\n",
+ nblocks,
+ queue_logical_block_size(q),
+ cap_str_10,
+ cap_str_2);
set_capacity(vblk->disk, capacity);
revalidate_disk(vblk->disk);
@@ -541,12 +545,9 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
int i;
BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
- for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
- if (sysfs_streq(buf, virtblk_cache_types[i]))
- break;
-
+ i = sysfs_match_string(virtblk_cache_types, buf);
if (i < 0)
- return -EINVAL;
+ return i;
virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
virtblk_update_cache_mode(vdev);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index fe7cd58c43d0..987d665e82de 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -705,9 +705,9 @@ static unsigned int xen_blkbk_unmap_prepare(
GNTMAP_host_map, pages[i]->handle);
pages[i]->handle = BLKBACK_INVALID_HANDLE;
invcount++;
- }
+ }
- return invcount;
+ return invcount;
}
static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
@@ -1251,6 +1251,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
break;
case BLKIF_OP_WRITE_BARRIER:
drain = true;
+ /* fall through */
case BLKIF_OP_FLUSH_DISKCACHE:
ring->st_f_req++;
operation = REQ_OP_WRITE;
@@ -1362,7 +1363,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
goto fail_put_bio;
biolist[nbio++] = bio;
- bio->bi_bdev = preq.bdev;
+ bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio->bi_iter.bi_sector = preq.sector_number;
@@ -1381,7 +1382,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
goto fail_put_bio;
biolist[nbio++] = bio;
- bio->bi_bdev = preq.bdev;
+ bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio_set_op_attrs(bio, operation, operation_flags);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 792da683e70d..21c1be1eb226 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -244,6 +244,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
{
struct pending_req *req, *n;
unsigned int j, r;
+ bool busy = false;
for (r = 0; r < blkif->nr_rings; r++) {
struct xen_blkif_ring *ring = &blkif->rings[r];
@@ -261,8 +262,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
* don't have any discard_io or other_io requests. So, checking
* for inflight IO is enough.
*/
- if (atomic_read(&ring->inflight) > 0)
- return -EBUSY;
+ if (atomic_read(&ring->inflight) > 0) {
+ busy = true;
+ continue;
+ }
if (ring->irq) {
unbind_from_irqhandler(ring->irq, ring);
@@ -300,6 +303,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
ring->active = false;
}
+ if (busy)
+ return -EBUSY;
+
blkif->nr_ring_pages = 0;
/*
* blkif->rings was allocated in connect_ring, so we should free it in
@@ -810,7 +816,8 @@ static void frontend_changed(struct xenbus_device *dev,
xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
- /* fall through if not online */
+ /* fall through */
+ /* if not online */
case XenbusStateUnknown:
/* implies xen_blkif_disconnect() via xen_blkbk_remove() */
device_unregister(&dev->dev);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c852ed3c01d5..891265acb10e 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -111,7 +111,7 @@ struct blk_shadow {
};
struct blkif_req {
- int error;
+ blk_status_t error;
};
static inline struct blkif_req *blkif_req(struct request *rq)
@@ -708,6 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
* existing persistent grants, or if we have to get new grants,
* as there are not sufficiently many free.
*/
+ bool new_persistent_gnts = false;
struct scatterlist *sg;
int num_sg, max_grefs, num_grant;
@@ -719,19 +720,21 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
*/
max_grefs += INDIRECT_GREFS(max_grefs);
- /*
- * We have to reserve 'max_grefs' grants because persistent
- * grants are shared by all rings.
- */
- if (max_grefs > 0)
- if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) {
+ /* Check if we have enough persistent grants to allocate a requests */
+ if (rinfo->persistent_gnts_c < max_grefs) {
+ new_persistent_gnts = true;
+
+ if (gnttab_alloc_grant_references(
+ max_grefs - rinfo->persistent_gnts_c,
+ &setup.gref_head) < 0) {
gnttab_request_free_callback(
&rinfo->callback,
blkif_restart_queue_callback,
rinfo,
- max_grefs);
+ max_grefs - rinfo->persistent_gnts_c);
return 1;
}
+ }
/* Fill out a communications ring structure. */
id = blkif_ring_get_request(rinfo, req, &ring_req);
@@ -832,7 +835,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
if (unlikely(require_extra_req))
rinfo->shadow[extra_id].req = *extra_ring_req;
- if (max_grefs > 0)
+ if (new_persistent_gnts)
gnttab_free_grant_references(setup.gref_head);
return 0;
@@ -906,8 +909,8 @@ out_err:
return BLK_STS_IOERR;
out_busy:
- spin_unlock_irqrestore(&rinfo->ring_lock, flags);
blk_mq_stop_hw_queue(hctx);
+ spin_unlock_irqrestore(&rinfo->ring_lock, flags);
return BLK_STS_RESOURCE;
}
@@ -1616,7 +1619,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- blkif_req(req)->error = -EOPNOTSUPP;
+ blkif_req(req)->error = BLK_STS_NOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
@@ -2072,9 +2075,9 @@ static int blkfront_resume(struct xenbus_device *dev)
/*
* Get the bios in the request so we can re-queue them.
*/
- if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
- req_op(shadow[i].request) == REQ_OP_DISCARD ||
- req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
+ if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
+ req_op(shadow[j].request) == REQ_OP_DISCARD ||
+ req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
shadow[j].request->cmd_flags & REQ_FUA) {
/*
* Flush operations don't contain bios, so
@@ -2453,7 +2456,7 @@ static void blkback_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* Missed the backend's Closing state -- fallthrough */
+ /* fall through */
case XenbusStateClosing:
if (info)
blkfront_closing(info);
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index b8ecba6dcd3b..7cd4a8ec3c8f 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -13,3 +13,15 @@ config ZRAM
disks and maybe many more.
See zram.txt for more information.
+
+config ZRAM_WRITEBACK
+ bool "Write back incompressible page to backing device"
+ depends on ZRAM
+ default n
+ help
+ With incompressible page, there is no memory saving to keep it
+ in memory. Instead, write it out to backing device.
+ For this feature, admin should set up backing device via
+ /sys/block/zramX/backing_dev.
+
+ See zram.txt for more infomration.
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 856d5dc02451..4063f3f59f4f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -270,6 +270,349 @@ static ssize_t mem_used_max_store(struct device *dev,
return len;
}
+#ifdef CONFIG_ZRAM_WRITEBACK
+static bool zram_wb_enabled(struct zram *zram)
+{
+ return zram->backing_dev;
+}
+
+static void reset_bdev(struct zram *zram)
+{
+ struct block_device *bdev;
+
+ if (!zram_wb_enabled(zram))
+ return;
+
+ bdev = zram->bdev;
+ if (zram->old_block_size)
+ set_blocksize(bdev, zram->old_block_size);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ /* hope filp_close flush all of IO */
+ filp_close(zram->backing_dev, NULL);
+ zram->backing_dev = NULL;
+ zram->old_block_size = 0;
+ zram->bdev = NULL;
+
+ kvfree(zram->bitmap);
+ zram->bitmap = NULL;
+}
+
+static ssize_t backing_dev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+ struct file *file = zram->backing_dev;
+ char *p;
+ ssize_t ret;
+
+ down_read(&zram->init_lock);
+ if (!zram_wb_enabled(zram)) {
+ memcpy(buf, "none\n", 5);
+ up_read(&zram->init_lock);
+ return 5;
+ }
+
+ p = file_path(file, buf, PAGE_SIZE - 1);
+ if (IS_ERR(p)) {
+ ret = PTR_ERR(p);
+ goto out;
+ }
+
+ ret = strlen(p);
+ memmove(buf, p, ret);
+ buf[ret++] = '\n';
+out:
+ up_read(&zram->init_lock);
+ return ret;
+}
+
+static ssize_t backing_dev_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ char *file_name;
+ struct file *backing_dev = NULL;
+ struct inode *inode;
+ struct address_space *mapping;
+ unsigned int bitmap_sz, old_block_size = 0;
+ unsigned long nr_pages, *bitmap = NULL;
+ struct block_device *bdev = NULL;
+ int err;
+ struct zram *zram = dev_to_zram(dev);
+
+ file_name = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!file_name)
+ return -ENOMEM;
+
+ down_write(&zram->init_lock);
+ if (init_done(zram)) {
+ pr_info("Can't setup backing device for initialized device\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ strlcpy(file_name, buf, len);
+
+ backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
+ if (IS_ERR(backing_dev)) {
+ err = PTR_ERR(backing_dev);
+ backing_dev = NULL;
+ goto out;
+ }
+
+ mapping = backing_dev->f_mapping;
+ inode = mapping->host;
+
+ /* Support only block device in this moment */
+ if (!S_ISBLK(inode->i_mode)) {
+ err = -ENOTBLK;
+ goto out;
+ }
+
+ bdev = bdgrab(I_BDEV(inode));
+ err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
+ if (err < 0)
+ goto out;
+
+ nr_pages = i_size_read(inode) >> PAGE_SHIFT;
+ bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
+ bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
+ if (!bitmap) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ old_block_size = block_size(bdev);
+ err = set_blocksize(bdev, PAGE_SIZE);
+ if (err)
+ goto out;
+
+ reset_bdev(zram);
+ spin_lock_init(&zram->bitmap_lock);
+
+ zram->old_block_size = old_block_size;
+ zram->bdev = bdev;
+ zram->backing_dev = backing_dev;
+ zram->bitmap = bitmap;
+ zram->nr_pages = nr_pages;
+ up_write(&zram->init_lock);
+
+ pr_info("setup backing device %s\n", file_name);
+ kfree(file_name);
+
+ return len;
+out:
+ if (bitmap)
+ kvfree(bitmap);
+
+ if (bdev)
+ blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+
+ if (backing_dev)
+ filp_close(backing_dev, NULL);
+
+ up_write(&zram->init_lock);
+
+ kfree(file_name);
+
+ return err;
+}
+
+static unsigned long get_entry_bdev(struct zram *zram)
+{
+ unsigned long entry;
+
+ spin_lock(&zram->bitmap_lock);
+ /* skip 0 bit to confuse zram.handle = 0 */
+ entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1);
+ if (entry == zram->nr_pages) {
+ spin_unlock(&zram->bitmap_lock);
+ return 0;
+ }
+
+ set_bit(entry, zram->bitmap);
+ spin_unlock(&zram->bitmap_lock);
+
+ return entry;
+}
+
+static void put_entry_bdev(struct zram *zram, unsigned long entry)
+{
+ int was_set;
+
+ spin_lock(&zram->bitmap_lock);
+ was_set = test_and_clear_bit(entry, zram->bitmap);
+ spin_unlock(&zram->bitmap_lock);
+ WARN_ON_ONCE(!was_set);
+}
+
+void zram_page_end_io(struct bio *bio)
+{
+ struct page *page = bio->bi_io_vec[0].bv_page;
+
+ page_endio(page, op_is_write(bio_op(bio)),
+ blk_status_to_errno(bio->bi_status));
+ bio_put(bio);
+}
+
+/*
+ * Returns 1 if the submission is successful.
+ */
+static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *parent)
+{
+ struct bio *bio;
+
+ bio = bio_alloc(GFP_ATOMIC, 1);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
+ bio_set_dev(bio, zram->bdev);
+ if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
+ bio_put(bio);
+ return -EIO;
+ }
+
+ if (!parent) {
+ bio->bi_opf = REQ_OP_READ;
+ bio->bi_end_io = zram_page_end_io;
+ } else {
+ bio->bi_opf = parent->bi_opf;
+ bio_chain(bio, parent);
+ }
+
+ submit_bio(bio);
+ return 1;
+}
+
+struct zram_work {
+ struct work_struct work;
+ struct zram *zram;
+ unsigned long entry;
+ struct bio *bio;
+};
+
+#if PAGE_SIZE != 4096
+static void zram_sync_read(struct work_struct *work)
+{
+ struct bio_vec bvec;
+ struct zram_work *zw = container_of(work, struct zram_work, work);
+ struct zram *zram = zw->zram;
+ unsigned long entry = zw->entry;
+ struct bio *bio = zw->bio;
+
+ read_from_bdev_async(zram, &bvec, entry, bio);
+}
+
+/*
+ * Block layer want one ->make_request_fn to be active at a time
+ * so if we use chained IO with parent IO in same context,
+ * it's a deadlock. To avoid, it, it uses worker thread context.
+ */
+static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *bio)
+{
+ struct zram_work work;
+
+ work.zram = zram;
+ work.entry = entry;
+ work.bio = bio;
+
+ INIT_WORK_ONSTACK(&work.work, zram_sync_read);
+ queue_work(system_unbound_wq, &work.work);
+ flush_work(&work.work);
+ destroy_work_on_stack(&work.work);
+
+ return 1;
+}
+#else
+static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *bio)
+{
+ WARN_ON(1);
+ return -EIO;
+}
+#endif
+
+static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *parent, bool sync)
+{
+ if (sync)
+ return read_from_bdev_sync(zram, bvec, entry, parent);
+ else
+ return read_from_bdev_async(zram, bvec, entry, parent);
+}
+
+static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
+ u32 index, struct bio *parent,
+ unsigned long *pentry)
+{
+ struct bio *bio;
+ unsigned long entry;
+
+ bio = bio_alloc(GFP_ATOMIC, 1);
+ if (!bio)
+ return -ENOMEM;
+
+ entry = get_entry_bdev(zram);
+ if (!entry) {
+ bio_put(bio);
+ return -ENOSPC;
+ }
+
+ bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
+ bio_set_dev(bio, zram->bdev);
+ if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len,
+ bvec->bv_offset)) {
+ bio_put(bio);
+ put_entry_bdev(zram, entry);
+ return -EIO;
+ }
+
+ if (!parent) {
+ bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
+ bio->bi_end_io = zram_page_end_io;
+ } else {
+ bio->bi_opf = parent->bi_opf;
+ bio_chain(bio, parent);
+ }
+
+ submit_bio(bio);
+ *pentry = entry;
+
+ return 0;
+}
+
+static void zram_wb_clear(struct zram *zram, u32 index)
+{
+ unsigned long entry;
+
+ zram_clear_flag(zram, index, ZRAM_WB);
+ entry = zram_get_element(zram, index);
+ zram_set_element(zram, index, 0);
+ put_entry_bdev(zram, entry);
+}
+
+#else
+static bool zram_wb_enabled(struct zram *zram) { return false; }
+static inline void reset_bdev(struct zram *zram) {};
+static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
+ u32 index, struct bio *parent,
+ unsigned long *pentry)
+
+{
+ return -EIO;
+}
+
+static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *parent, bool sync)
+{
+ return -EIO;
+}
+static void zram_wb_clear(struct zram *zram, u32 index) {}
+#endif
+
+
/*
* We switched to per-cpu streams and this attr is not needed anymore.
* However, we will keep it around for some time, because:
@@ -308,7 +651,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
- char compressor[CRYPTO_MAX_ALG_NAME];
+ char compressor[ARRAY_SIZE(zram->compressor)];
size_t sz;
strlcpy(compressor, buf, sizeof(compressor));
@@ -327,7 +670,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
return -EBUSY;
}
- strlcpy(zram->compressor, compressor, sizeof(compressor));
+ strcpy(zram->compressor, compressor);
up_write(&zram->init_lock);
return len;
}
@@ -453,30 +796,6 @@ static bool zram_same_page_read(struct zram *zram, u32 index,
return false;
}
-static bool zram_same_page_write(struct zram *zram, u32 index,
- struct page *page)
-{
- unsigned long element;
- void *mem = kmap_atomic(page);
-
- if (page_same_filled(mem, &element)) {
- kunmap_atomic(mem);
- /* Free memory associated with this sector now. */
- zram_slot_lock(zram, index);
- zram_free_page(zram, index);
- zram_set_flag(zram, index, ZRAM_SAME);
- zram_set_element(zram, index, element);
- zram_slot_unlock(zram, index);
-
- atomic64_inc(&zram->stats.same_pages);
- atomic64_inc(&zram->stats.pages_stored);
- return true;
- }
- kunmap_atomic(mem);
-
- return false;
-}
-
static void zram_meta_free(struct zram *zram, u64 disksize)
{
size_t num_pages = disksize >> PAGE_SHIFT;
@@ -515,7 +834,13 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
*/
static void zram_free_page(struct zram *zram, size_t index)
{
- unsigned long handle = zram_get_handle(zram, index);
+ unsigned long handle;
+
+ if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) {
+ zram_wb_clear(zram, index);
+ atomic64_dec(&zram->stats.pages_stored);
+ return;
+ }
/*
* No memory is allocated for same element filled pages.
@@ -529,6 +854,7 @@ static void zram_free_page(struct zram *zram, size_t index)
return;
}
+ handle = zram_get_handle(zram, index);
if (!handle)
return;
@@ -542,13 +868,31 @@ static void zram_free_page(struct zram *zram, size_t index)
zram_set_obj_size(zram, index, 0);
}
-static int zram_decompress_page(struct zram *zram, struct page *page, u32 index)
+static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
+ struct bio *bio, bool partial_io)
{
int ret;
unsigned long handle;
unsigned int size;
void *src, *dst;
+ if (zram_wb_enabled(zram)) {
+ zram_slot_lock(zram, index);
+ if (zram_test_flag(zram, index, ZRAM_WB)) {
+ struct bio_vec bvec;
+
+ zram_slot_unlock(zram, index);
+
+ bvec.bv_page = page;
+ bvec.bv_len = PAGE_SIZE;
+ bvec.bv_offset = 0;
+ return read_from_bdev(zram, &bvec,
+ zram_get_element(zram, index),
+ bio, partial_io);
+ }
+ zram_slot_unlock(zram, index);
+ }
+
if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
return 0;
@@ -581,7 +925,7 @@ static int zram_decompress_page(struct zram *zram, struct page *page, u32 index)
}
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset)
+ u32 index, int offset, struct bio *bio)
{
int ret;
struct page *page;
@@ -594,7 +938,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
return -ENOMEM;
}
- ret = zram_decompress_page(zram, page, index);
+ ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
if (unlikely(ret))
goto out;
@@ -613,30 +957,57 @@ out:
return ret;
}
-static int zram_compress(struct zram *zram, struct zcomp_strm **zstrm,
- struct page *page,
- unsigned long *out_handle, unsigned int *out_comp_len)
+static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
+ u32 index, struct bio *bio)
{
- int ret;
- unsigned int comp_len;
- void *src;
+ int ret = 0;
unsigned long alloced_pages;
unsigned long handle = 0;
+ unsigned int comp_len = 0;
+ void *src, *dst, *mem;
+ struct zcomp_strm *zstrm;
+ struct page *page = bvec->bv_page;
+ unsigned long element = 0;
+ enum zram_pageflags flags = 0;
+ bool allow_wb = true;
+
+ mem = kmap_atomic(page);
+ if (page_same_filled(mem, &element)) {
+ kunmap_atomic(mem);
+ /* Free memory associated with this sector now. */
+ flags = ZRAM_SAME;
+ atomic64_inc(&zram->stats.same_pages);
+ goto out;
+ }
+ kunmap_atomic(mem);
compress_again:
+ zstrm = zcomp_stream_get(zram->comp);
src = kmap_atomic(page);
- ret = zcomp_compress(*zstrm, src, &comp_len);
+ ret = zcomp_compress(zstrm, src, &comp_len);
kunmap_atomic(src);
if (unlikely(ret)) {
+ zcomp_stream_put(zram->comp);
pr_err("Compression failed! err=%d\n", ret);
- if (handle)
- zs_free(zram->mem_pool, handle);
+ zs_free(zram->mem_pool, handle);
return ret;
}
- if (unlikely(comp_len > max_zpage_size))
+ if (unlikely(comp_len > max_zpage_size)) {
+ if (zram_wb_enabled(zram) && allow_wb) {
+ zcomp_stream_put(zram->comp);
+ ret = write_to_bdev(zram, bvec, index, bio, &element);
+ if (!ret) {
+ flags = ZRAM_WB;
+ ret = 1;
+ goto out;
+ }
+ allow_wb = false;
+ goto compress_again;
+ }
comp_len = PAGE_SIZE;
+ }
/*
* handle allocation has 2 paths:
@@ -663,7 +1034,6 @@ compress_again:
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE);
- *zstrm = zcomp_stream_get(zram->comp);
if (handle)
goto compress_again;
return -ENOMEM;
@@ -673,34 +1043,11 @@ compress_again:
update_used_max(zram, alloced_pages);
if (zram->limit_pages && alloced_pages > zram->limit_pages) {
+ zcomp_stream_put(zram->comp);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
}
- *out_handle = handle;
- *out_comp_len = comp_len;
- return 0;
-}
-
-static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
-{
- int ret;
- unsigned long handle;
- unsigned int comp_len;
- void *src, *dst;
- struct zcomp_strm *zstrm;
- struct page *page = bvec->bv_page;
-
- if (zram_same_page_write(zram, index, page))
- return 0;
-
- zstrm = zcomp_stream_get(zram->comp);
- ret = zram_compress(zram, &zstrm, page, &handle, &comp_len);
- if (ret) {
- zcomp_stream_put(zram->comp);
- return ret;
- }
-
dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
src = zstrm->buffer;
@@ -712,25 +1059,31 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
zcomp_stream_put(zram->comp);
zs_unmap_object(zram->mem_pool, handle);
-
+ atomic64_add(comp_len, &zram->stats.compr_data_size);
+out:
/*
* Free memory associated with this sector
* before overwriting unused sectors.
*/
zram_slot_lock(zram, index);
zram_free_page(zram, index);
- zram_set_handle(zram, index, handle);
- zram_set_obj_size(zram, index, comp_len);
+
+ if (flags) {
+ zram_set_flag(zram, index, flags);
+ zram_set_element(zram, index, element);
+ } else {
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, comp_len);
+ }
zram_slot_unlock(zram, index);
/* Update stats */
- atomic64_add(comp_len, &zram->stats.compr_data_size);
atomic64_inc(&zram->stats.pages_stored);
- return 0;
+ return ret;
}
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset)
+ u32 index, int offset, struct bio *bio)
{
int ret;
struct page *page = NULL;
@@ -748,7 +1101,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
if (!page)
return -ENOMEM;
- ret = zram_decompress_page(zram, page, index);
+ ret = __zram_bvec_read(zram, page, index, bio, true);
if (ret)
goto out;
@@ -763,7 +1116,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
vec.bv_offset = 0;
}
- ret = __zram_bvec_write(zram, &vec, index);
+ ret = __zram_bvec_write(zram, &vec, index, bio);
out:
if (is_partial_io(bvec))
__free_page(page);
@@ -808,28 +1161,34 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}
}
+/*
+ * Returns errno if it has some problem. Otherwise return 0 or 1.
+ * Returns 0 if IO request was done synchronously
+ * Returns 1 if IO request was successfully submitted.
+ */
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, bool is_write)
+ int offset, bool is_write, struct bio *bio)
{
unsigned long start_time = jiffies;
int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
+ struct request_queue *q = zram->disk->queue;
int ret;
- generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
+ generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT,
&zram->disk->part0);
if (!is_write) {
atomic64_inc(&zram->stats.num_reads);
- ret = zram_bvec_read(zram, bvec, index, offset);
+ ret = zram_bvec_read(zram, bvec, index, offset, bio);
flush_dcache_page(bvec->bv_page);
} else {
atomic64_inc(&zram->stats.num_writes);
- ret = zram_bvec_write(zram, bvec, index, offset);
+ ret = zram_bvec_write(zram, bvec, index, offset, bio);
}
- generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
+ generic_end_io_acct(q, rw_acct, &zram->disk->part0, start_time);
- if (unlikely(ret)) {
+ if (unlikely(ret < 0)) {
if (!is_write)
atomic64_inc(&zram->stats.failed_reads);
else
@@ -868,7 +1227,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
unwritten);
if (zram_bvec_rw(zram, &bv, index, offset,
- op_is_write(bio_op(bio))) < 0)
+ op_is_write(bio_op(bio)), bio) < 0)
goto out;
bv.bv_offset += bv.bv_len;
@@ -922,16 +1281,18 @@ static void zram_slot_free_notify(struct block_device *bdev,
static int zram_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, bool is_write)
{
- int offset, err = -EIO;
+ int offset, ret;
u32 index;
struct zram *zram;
struct bio_vec bv;
+ if (PageTransHuge(page))
+ return -ENOTSUPP;
zram = bdev->bd_disk->private_data;
if (!valid_io_request(zram, sector, PAGE_SIZE)) {
atomic64_inc(&zram->stats.invalid_io);
- err = -EINVAL;
+ ret = -EINVAL;
goto out;
}
@@ -942,7 +1303,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
- err = zram_bvec_rw(zram, &bv, index, offset, is_write);
+ ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL);
out:
/*
* If I/O fails, just return error(ie, non-zero) without
@@ -952,9 +1313,20 @@ out:
* bio->bi_end_io does things to handle the error
* (e.g., SetPageError, set_page_dirty and extra works).
*/
- if (err == 0)
+ if (unlikely(ret < 0))
+ return ret;
+
+ switch (ret) {
+ case 0:
page_endio(page, is_write, 0);
- return err;
+ break;
+ case 1:
+ ret = 0;
+ break;
+ default:
+ WARN_ON(1);
+ }
+ return ret;
}
static void zram_reset_device(struct zram *zram)
@@ -983,6 +1355,7 @@ static void zram_reset_device(struct zram *zram)
zram_meta_free(zram, disksize);
memset(&zram->stats, 0, sizeof(zram->stats));
zcomp_destroy(comp);
+ reset_bdev(zram);
}
static ssize_t disksize_store(struct device *dev,
@@ -1108,6 +1481,9 @@ static DEVICE_ATTR_WO(mem_limit);
static DEVICE_ATTR_WO(mem_used_max);
static DEVICE_ATTR_RW(max_comp_streams);
static DEVICE_ATTR_RW(comp_algorithm);
+#ifdef CONFIG_ZRAM_WRITEBACK
+static DEVICE_ATTR_RW(backing_dev);
+#endif
static struct attribute *zram_disk_attrs[] = {
&dev_attr_disksize.attr,
@@ -1118,6 +1494,9 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_mem_used_max.attr,
&dev_attr_max_comp_streams.attr,
&dev_attr_comp_algorithm.attr,
+#ifdef CONFIG_ZRAM_WRITEBACK
+ &dev_attr_backing_dev.attr,
+#endif
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
&dev_attr_debug_stat.attr,
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index e34e44d02e3e..31762db861e3 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -60,9 +60,10 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/* Flags for zram pages (table[page_no].value) */
enum zram_pageflags {
- /* Page consists entirely of zeros */
+ /* Page consists the same element */
ZRAM_SAME = ZRAM_FLAG_SHIFT,
ZRAM_ACCESS, /* page is now accessed */
+ ZRAM_WB, /* page is stored on backing_device */
__NR_ZRAM_PAGEFLAGS,
};
@@ -115,5 +116,13 @@ struct zram {
* zram is claimed so open request will be failed
*/
bool claim; /* Protected by bdev->bd_mutex */
+#ifdef CONFIG_ZRAM_WRITEBACK
+ struct file *backing_dev;
+ struct block_device *bdev;
+ unsigned int old_block_size;
+ unsigned long *bitmap;
+ unsigned long nr_pages;
+ spinlock_t bitmap_lock;
+#endif
};
#endif
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 35952a94875e..fae5a74dc737 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -98,6 +98,7 @@ config BT_HCIUART_NOKIA
depends on BT_HCIUART_SERDEV
depends on PM
select BT_HCIUART_H4
+ select BT_BCM
help
Nokia H4+ is serial protocol for communication between Bluetooth
device and host. This protocol is required for Bluetooth devices
@@ -167,6 +168,7 @@ config BT_HCIUART_INTEL
config BT_HCIUART_BCM
bool "Broadcom protocol support"
depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
select BT_HCIUART_H4
select BT_BCM
help
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index b793853ff05f..204afe66de92 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -140,7 +140,8 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
#define BTUSB_ATH3012 0x80
/* This table is to load patch and sysconfig files
- * for AR3012 */
+ * for AR3012
+ */
static const struct usb_device_id ath3k_blist_tbl[] = {
/* Atheros AR3012 with sflash firmware*/
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index d4b0b655dde6..b07ca9565291 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -93,6 +93,7 @@ static void bluecard_detach(struct pcmcia_device *p_dev);
/* Hardware states */
#define CARD_READY 1
+#define CARD_ACTIVITY 2
#define CARD_HAS_PCCARD_ID 4
#define CARD_HAS_POWER_LED 5
#define CARD_HAS_ACTIVITY_LED 6
@@ -160,16 +161,14 @@ static void bluecard_activity_led_timeout(u_long arg)
struct bluecard_info *info = (struct bluecard_info *)arg;
unsigned int iobase = info->p_dev->resource[0]->start;
- if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
- return;
-
- if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) {
- /* Disable activity LED */
- outb(0x08 | 0x20, iobase + 0x30);
- } else {
- /* Disable power LED */
- outb(0x00, iobase + 0x30);
+ if (test_bit(CARD_ACTIVITY, &(info->hw_state))) {
+ /* leave LED in inactive state for HZ/10 for blink effect */
+ clear_bit(CARD_ACTIVITY, &(info->hw_state));
+ mod_timer(&(info->timer), jiffies + HZ / 10);
}
+
+ /* Disable activity LED, enable power LED */
+ outb(0x08 | 0x20, iobase + 0x30);
}
@@ -177,22 +176,22 @@ static void bluecard_enable_activity_led(struct bluecard_info *info)
{
unsigned int iobase = info->p_dev->resource[0]->start;
- if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
+ /* don't disturb running blink timer */
+ if (timer_pending(&(info->timer)))
return;
- if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) {
- /* Enable activity LED */
- outb(0x10 | 0x40, iobase + 0x30);
+ set_bit(CARD_ACTIVITY, &(info->hw_state));
- /* Stop the LED after HZ/4 */
- mod_timer(&(info->timer), jiffies + HZ / 4);
+ if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) {
+ /* Enable activity LED, keep power LED enabled */
+ outb(0x18 | 0x60, iobase + 0x30);
} else {
- /* Enable power LED */
- outb(0x08 | 0x20, iobase + 0x30);
-
- /* Stop the LED after HZ/2 */
- mod_timer(&(info->timer), jiffies + HZ / 2);
+ /* Disable power LED */
+ outb(0x00, iobase + 0x30);
}
+
+ /* Stop the LED after HZ/10 */
+ mod_timer(&(info->timer), jiffies + HZ / 10);
}
@@ -625,16 +624,13 @@ static int bluecard_hci_flush(struct hci_dev *hdev)
static int bluecard_hci_open(struct hci_dev *hdev)
{
struct bluecard_info *info = hci_get_drvdata(hdev);
+ unsigned int iobase = info->p_dev->resource[0]->start;
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE);
- if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
- unsigned int iobase = info->p_dev->resource[0]->start;
-
- /* Enable LED */
- outb(0x08 | 0x20, iobase + 0x30);
- }
+ /* Enable power LED */
+ outb(0x08 | 0x20, iobase + 0x30);
return 0;
}
@@ -643,15 +639,15 @@ static int bluecard_hci_open(struct hci_dev *hdev)
static int bluecard_hci_close(struct hci_dev *hdev)
{
struct bluecard_info *info = hci_get_drvdata(hdev);
+ unsigned int iobase = info->p_dev->resource[0]->start;
bluecard_hci_flush(hdev);
- if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
- unsigned int iobase = info->p_dev->resource[0]->start;
+ /* Stop LED timer */
+ del_timer_sync(&(info->timer));
- /* Disable LED */
- outb(0x00, iobase + 0x30);
- }
+ /* Disable power LED */
+ outb(0x00, iobase + 0x30);
return 0;
}
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 32dcac017395..194788739a83 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -684,14 +684,16 @@ static int bt3c_config(struct pcmcia_device *link)
unsigned long try;
/* First pass: look for a config entry that looks normal.
- Two tries: without IO aliases, then with aliases */
+ * Two tries: without IO aliases, then with aliases
+ */
for (try = 0; try < 2; try++)
if (!pcmcia_loop_config(link, bt3c_check_config, (void *) try))
goto found_port;
/* Second pass: try to find an entry that isn't picky about
- its base address, then try to grab any standard serial port
- address, and finally try to get any free port. */
+ * its base address, then try to grab any standard serial port
+ * address, and finally try to get any free port.
+ */
if (!pcmcia_loop_config(link, bt3c_check_config_notpicky, NULL))
goto found_port;
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 9ab6cfbb831d..cc4bdefa6648 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -287,6 +287,37 @@ static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev)
return skb;
}
+static int btbcm_read_info(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ /* Read Verbose Config Version Info */
+ skb = btbcm_read_verbose_config(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
+ kfree_skb(skb);
+
+ /* Read Controller Features */
+ skb = btbcm_read_controller_features(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ BT_INFO("%s: BCM: features 0x%2.2x", hdev->name, skb->data[1]);
+ kfree_skb(skb);
+
+ /* Read Local Name */
+ skb = btbcm_read_local_name(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
+ kfree_skb(skb);
+
+ return 0;
+}
+
static const struct {
u16 subver;
const char *name;
@@ -322,13 +353,10 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len)
subver = le16_to_cpu(ver->lmp_subver);
kfree_skb(skb);
- /* Read Verbose Config Version Info */
- skb = btbcm_read_verbose_config(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
- kfree_skb(skb);
+ /* Read controller information */
+ err = btbcm_read_info(hdev);
+ if (err)
+ return err;
switch ((rev & 0xf000) >> 12) {
case 0:
@@ -431,29 +459,10 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
subver = le16_to_cpu(ver->lmp_subver);
kfree_skb(skb);
- /* Read Verbose Config Version Info */
- skb = btbcm_read_verbose_config(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
- kfree_skb(skb);
-
- /* Read Controller Features */
- skb = btbcm_read_controller_features(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- BT_INFO("%s: BCM: features 0x%2.2x", hdev->name, skb->data[1]);
- kfree_skb(skb);
-
- /* Read Local Name */
- skb = btbcm_read_local_name(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
- kfree_skb(skb);
+ /* Read controller information */
+ err = btbcm_read_info(hdev);
+ if (err)
+ return err;
switch ((rev & 0xf000) >> 12) {
case 0:
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index eb794f08b238..03341ce98c32 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1455,7 +1455,8 @@ done:
fw_dump_ptr = fw_dump_data;
/* Dump all the memory data into single file, a userspace script will
- be used to split all the memory data to multiple files*/
+ * be used to split all the memory data to multiple files
+ */
BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump start");
for (idx = 0; idx < dump_num; idx++) {
struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
@@ -1482,7 +1483,8 @@ done:
}
/* fw_dump_data will be free in device coredump release function
- after 5 min*/
+ * after 5 min
+ */
dev_coredumpv(&card->func->dev, fw_dump_data, fw_dump_len, GFP_KERNEL);
BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump end");
}
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 28afd5d585f9..0bbdfcef2aa8 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -81,7 +81,7 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
* and lower 2 bytes from patch will be used.
*/
*rome_version = (le32_to_cpu(ver->soc_id) << 16) |
- (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
+ (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
out:
kfree_skb(skb);
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 8279094dd713..d9a99b4302ea 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -279,6 +279,8 @@ static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff)
return ret;
ret = fw->size;
*buff = kmemdup(fw->data, ret, GFP_KERNEL);
+ if (!*buff)
+ ret = -ENOMEM;
release_firmware(fw);
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 1cb958e199eb..c8e945d19ffe 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -144,7 +144,8 @@ static int btsdio_rx_packet(struct btsdio_data *data)
if (!skb) {
/* Out of memory. Prepare a read retry and just
* return with the expectation that the next time
- * we're called we'll have more memory. */
+ * we're called we'll have more memory.
+ */
return -ENOMEM;
}
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 7df79bb12350..310e9c2e09b6 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -614,14 +614,16 @@ static int btuart_config(struct pcmcia_device *link)
int try;
/* First pass: look for a config entry that looks normal.
- Two tries: without IO aliases, then with aliases */
+ * Two tries: without IO aliases, then with aliases
+ */
for (try = 0; try < 2; try++)
if (!pcmcia_loop_config(link, btuart_check_config, &try))
goto found_port;
/* Second pass: try to find an entry that isn't picky about
- its base address, then try to grab any standard serial port
- address, and finally try to get any free port. */
+ * its base address, then try to grab any standard serial port
+ * address, and finally try to get any free port.
+ */
if (!pcmcia_loop_config(link, btuart_check_config_notpicky, NULL))
goto found_port;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index fa24d693af24..7a5c06aaa181 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -66,6 +66,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_BCM2045 0x40000
#define BTUSB_IFNUM_2 0x80000
#define BTUSB_CW6622 0x100000
+#define BTUSB_BCM_NO_PRODID 0x200000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -131,7 +132,8 @@ static const struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
/* Broadcom BCM43142A0 (Foxconn/Lenovo) */
- { USB_DEVICE(0x105b, 0xe065), .driver_info = BTUSB_BCM_PATCHRAM },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x105b, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM },
/* Broadcom BCM920703 (HTC Vive) */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bb4, 0xff, 0x01, 0x01),
@@ -169,6 +171,10 @@ static const struct usb_device_id btusb_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
+ /* Broadcom devices with missing product id */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0000, 0x0000, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM | BTUSB_BCM_NO_PRODID },
+
/* Intel Bluetooth USB Bootloader (RAM module) */
{ USB_DEVICE(0x8087, 0x0a5a),
.driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
@@ -268,6 +274,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -357,6 +364,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
@@ -656,7 +664,8 @@ static void btusb_intr_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
/* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
+ * -ENODEV: device got disconnected
+ */
if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
@@ -745,7 +754,8 @@ static void btusb_bulk_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
/* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
+ * -ENODEV: device got disconnected
+ */
if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
@@ -840,7 +850,8 @@ static void btusb_isoc_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
/* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
+ * -ENODEV: device got disconnected
+ */
if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
@@ -952,7 +963,8 @@ static void btusb_diag_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
/* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
+ * -ENODEV: device got disconnected
+ */
if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
@@ -1076,6 +1088,10 @@ static int btusb_open(struct hci_dev *hdev)
}
data->intf->needs_remote_wakeup = 1;
+ /* device specific wakeup source enabled and required for USB
+ * remote wakeup while host is suspended
+ */
+ device_wakeup_enable(&data->udev->dev);
if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
goto done;
@@ -1139,6 +1155,7 @@ static int btusb_close(struct hci_dev *hdev)
goto failed;
data->intf->needs_remote_wakeup = 0;
+ device_wakeup_disable(&data->udev->dev);
usb_autopm_put_interface(data->intf);
failed:
@@ -2892,11 +2909,25 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info == BTUSB_IGNORE)
return -ENODEV;
+ if (id->driver_info & BTUSB_BCM_NO_PRODID) {
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ /* For the broken Broadcom devices that show 0000:0000
+ * as USB vendor and product information, check that the
+ * manufacturer string identifies them as Broadcom based
+ * devices.
+ */
+ if (!udev->manufacturer ||
+ strcmp(udev->manufacturer, "Broadcom Corp"))
+ return -ENODEV;
+ }
+
if (id->driver_info & BTUSB_ATH3012) {
struct usb_device *udev = interface_to_usbdev(intf);
/* Old firmware would otherwise let ath3k driver load
- * patch and sysconfig files */
+ * patch and sysconfig files
+ */
if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001)
return -ENODEV;
}
@@ -3067,6 +3098,12 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_QCA_ROME) {
data->setup_on_usb = btusb_setup_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+
+ /* QCA Rome devices lose their updated firmware over suspend,
+ * but the USB hub doesn't notice any status change.
+ * Explicitly request a device reset on resume.
+ */
+ set_bit(BTUSB_RESET_RESUME, &data->flags);
}
#ifdef CONFIG_BT_HCIBTUSB_RTL
@@ -3259,13 +3296,28 @@ static void play_deferred(struct btusb_data *data)
int err;
while ((urb = usb_get_from_anchor(&data->deferred))) {
+ usb_anchor_urb(urb, &data->tx_anchor);
+
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0)
+ if (err < 0) {
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
+ data->hdev->name, urb, -err);
+ kfree(urb->setup_packet);
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
break;
+ }
data->tx_in_flight++;
+ usb_free_urb(urb);
+ }
+
+ /* Cleanup the rest deferred urbs. */
+ while ((urb = usb_get_from_anchor(&data->deferred))) {
+ kfree(urb->setup_packet);
+ usb_free_urb(urb);
}
- usb_scuttle_anchored_urbs(&data->deferred);
}
static int btusb_resume(struct usb_interface *intf)
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 85a3978b064f..5ef8000f90a9 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -93,8 +93,7 @@ static void st_reg_completion_cb(void *priv_data, int data)
complete(&lhst->wait_reg_completion);
}
-/* Called by Shared Transport layer when receive data is
- * available */
+/* Called by Shared Transport layer when receive data is available */
static long st_receive(void *priv_data, struct sk_buff *skb)
{
struct ti_st *lhst = priv_data;
@@ -198,7 +197,8 @@ static int ti_st_open(struct hci_dev *hdev)
}
/* Is ST registration callback
- * called with ERROR status? */
+ * called with ERROR status?
+ */
if (hst->reg_status != 0) {
BT_ERR("ST registration completed with invalid "
"status %d", hst->reg_status);
@@ -276,7 +276,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
static int bt_ti_probe(struct platform_device *pdev)
{
- static struct ti_st *hst;
+ struct ti_st *hst;
struct hci_dev *hdev;
int err;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 6a662d0161b4..e2540113d0da 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -27,6 +27,8 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/property.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
@@ -34,6 +36,7 @@
#include <linux/interrupt.h>
#include <linux/dmi.h>
#include <linux/pm_runtime.h>
+#include <linux/serdev.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -41,11 +44,15 @@
#include "btbcm.h"
#include "hci_uart.h"
+#define BCM_NULL_PKT 0x00
+#define BCM_NULL_SIZE 0
+
#define BCM_LM_DIAG_PKT 0x07
#define BCM_LM_DIAG_SIZE 63
#define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */
+/* platform device driver resources */
struct bcm_device {
struct list_head list;
@@ -59,6 +66,7 @@ struct bcm_device {
bool clk_enabled;
u32 init_speed;
+ u32 oper_speed;
int irq;
u8 irq_polarity;
@@ -68,6 +76,12 @@ struct bcm_device {
#endif
};
+/* serdev driver resources */
+struct bcm_serdev {
+ struct hci_uart hu;
+};
+
+/* generic bcm uart resources */
struct bcm_data {
struct sk_buff *rx_skb;
struct sk_buff_head txq;
@@ -79,6 +93,14 @@ struct bcm_data {
static DEFINE_MUTEX(bcm_device_lock);
static LIST_HEAD(bcm_device_list);
+static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
+{
+ if (hu->serdev)
+ serdev_device_set_baudrate(hu->serdev, speed);
+ else
+ hci_uart_set_baudrate(hu, speed);
+}
+
static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
{
struct hci_dev *hdev = hu->hdev;
@@ -176,7 +198,7 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
static int bcm_request_irq(struct bcm_data *bcm)
{
struct bcm_device *bdev = bcm->dev;
- int err = 0;
+ int err;
/* If this is not a platform device, do not enable PM functionalities */
mutex_lock(&bcm_device_lock);
@@ -185,21 +207,23 @@ static int bcm_request_irq(struct bcm_data *bcm)
goto unlock;
}
- if (bdev->irq > 0) {
- err = devm_request_irq(&bdev->pdev->dev, bdev->irq,
- bcm_host_wake, IRQF_TRIGGER_RISING,
- "host_wake", bdev);
- if (err)
- goto unlock;
+ if (bdev->irq <= 0) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ err = devm_request_irq(&bdev->pdev->dev, bdev->irq, bcm_host_wake,
+ IRQF_TRIGGER_RISING, "host_wake", bdev);
+ if (err)
+ goto unlock;
- device_init_wakeup(&bdev->pdev->dev, true);
+ device_init_wakeup(&bdev->pdev->dev, true);
- pm_runtime_set_autosuspend_delay(&bdev->pdev->dev,
- BCM_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(&bdev->pdev->dev);
- pm_runtime_set_active(&bdev->pdev->dev);
- pm_runtime_enable(&bdev->pdev->dev);
- }
+ pm_runtime_set_autosuspend_delay(&bdev->pdev->dev,
+ BCM_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&bdev->pdev->dev);
+ pm_runtime_set_active(&bdev->pdev->dev);
+ pm_runtime_enable(&bdev->pdev->dev);
unlock:
mutex_unlock(&bcm_device_lock);
@@ -287,6 +311,14 @@ static int bcm_open(struct hci_uart *hu)
hu->priv = bcm;
+ /* If this is a serdev defined device, then only use
+ * serdev open primitive and skip the rest.
+ */
+ if (hu->serdev) {
+ serdev_device_open(hu->serdev);
+ goto out;
+ }
+
if (!hu->tty->dev)
goto out;
@@ -301,6 +333,7 @@ static int bcm_open(struct hci_uart *hu)
if (hu->tty->dev->parent == dev->pdev->dev.parent) {
bcm->dev = dev;
hu->init_speed = dev->init_speed;
+ hu->oper_speed = dev->oper_speed;
#ifdef CONFIG_PM
dev->hu = hu;
#endif
@@ -321,6 +354,12 @@ static int bcm_close(struct hci_uart *hu)
bt_dev_dbg(hu->hdev, "hu %p", hu);
+ /* If this is a serdev defined device, only use serdev
+ * close primitive and then continue as usual.
+ */
+ if (hu->serdev)
+ serdev_device_close(hu->serdev);
+
/* Protect bcm->dev against removal of the device or driver */
mutex_lock(&bcm_device_lock);
if (bcm_device_exists(bdev)) {
@@ -396,7 +435,7 @@ static int bcm_setup(struct hci_uart *hu)
speed = 0;
if (speed)
- hci_uart_set_baudrate(hu, speed);
+ host_set_baudrate(hu, speed);
/* Operational speed if any */
if (hu->oper_speed)
@@ -409,7 +448,7 @@ static int bcm_setup(struct hci_uart *hu)
if (speed) {
err = bcm_set_baudrate(hu, speed);
if (!err)
- hci_uart_set_baudrate(hu, speed);
+ host_set_baudrate(hu, speed);
}
finalize:
@@ -432,11 +471,19 @@ finalize:
.lsize = 0, \
.maxlen = BCM_LM_DIAG_SIZE
+#define BCM_RECV_NULL \
+ .type = BCM_NULL_PKT, \
+ .hlen = BCM_NULL_SIZE, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = BCM_NULL_SIZE
+
static const struct h4_recv_pkt bcm_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
{ BCM_RECV_LM_DIAG, .recv = hci_recv_diag },
+ { BCM_RECV_NULL, .recv = hci_recv_diag },
};
static int bcm_recv(struct hci_uart *hu, const void *data, int count)
@@ -697,8 +744,10 @@ static int bcm_resource(struct acpi_resource *ares, void *data)
case ACPI_RESOURCE_TYPE_SERIAL_BUS:
sb = &ares->data.uart_serial_bus;
- if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART)
+ if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART) {
dev->init_speed = sb->default_baud_rate;
+ dev->oper_speed = 4000000;
+ }
break;
default:
@@ -851,7 +900,6 @@ static const struct hci_uart_proto bcm_proto = {
.name = "Broadcom",
.manufacturer = 15,
.init_speed = 115200,
- .oper_speed = 4000000,
.open = bcm_open,
.close = bcm_close,
.flush = bcm_flush,
@@ -901,9 +949,57 @@ static struct platform_driver bcm_driver = {
},
};
+static int bcm_serdev_probe(struct serdev_device *serdev)
+{
+ struct bcm_serdev *bcmdev;
+ u32 speed;
+ int err;
+
+ bcmdev = devm_kzalloc(&serdev->dev, sizeof(*bcmdev), GFP_KERNEL);
+ if (!bcmdev)
+ return -ENOMEM;
+
+ bcmdev->hu.serdev = serdev;
+ serdev_device_set_drvdata(serdev, bcmdev);
+
+ err = device_property_read_u32(&serdev->dev, "max-speed", &speed);
+ if (!err)
+ bcmdev->hu.oper_speed = speed;
+
+ return hci_uart_register_device(&bcmdev->hu, &bcm_proto);
+}
+
+static void bcm_serdev_remove(struct serdev_device *serdev)
+{
+ struct bcm_serdev *bcmdev = serdev_device_get_drvdata(serdev);
+
+ hci_uart_unregister_device(&bcmdev->hu);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id bcm_bluetooth_of_match[] = {
+ { .compatible = "brcm,bcm43438-bt" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bcm_bluetooth_of_match);
+#endif
+
+static struct serdev_device_driver bcm_serdev_driver = {
+ .probe = bcm_serdev_probe,
+ .remove = bcm_serdev_remove,
+ .driver = {
+ .name = "hci_uart_bcm",
+ .of_match_table = of_match_ptr(bcm_bluetooth_of_match),
+ },
+};
+
int __init bcm_init(void)
{
+ /* For now, we need to keep both platform device
+ * driver (ACPI generated) and serdev driver (DT).
+ */
platform_driver_register(&bcm_driver);
+ serdev_device_driver_register(&bcm_serdev_driver);
return hci_uart_register_proto(&bcm_proto);
}
@@ -911,6 +1007,7 @@ int __init bcm_init(void)
int __exit bcm_deinit(void)
{
platform_driver_unregister(&bcm_driver);
+ serdev_device_driver_unregister(&bcm_serdev_driver);
return hci_uart_unregister_proto(&bcm_proto);
}
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 4e328d7d47bb..3b82a87224a9 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -172,7 +172,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
const struct h4_recv_pkt *pkts, int pkts_count)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
- u8 alignment = hu->alignment;
+ u8 alignment = hu->alignment ? hu->alignment : 1;
while (count) {
int i, len;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 8397b716fa65..a746627e784e 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -457,7 +457,8 @@ static int hci_uart_tty_open(struct tty_struct *tty)
BT_DBG("tty %p", tty);
/* Error if the tty has no write op instead of leaving an exploitable
- hole */
+ * hole
+ */
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index c982943f0747..424c15aa7bb7 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -622,7 +622,8 @@ static int download_firmware(struct ll_device *lldev)
cmd = (struct hci_command *)action_ptr;
if (cmd->opcode == 0xff36) {
/* ignore remote change
- * baud rate HCI VS command */
+ * baud rate HCI VS command
+ */
bt_dev_warn(lldev->hu.hdev, "change remote baud rate command in firmware");
break;
}
@@ -742,14 +743,8 @@ static int hci_ti_probe(struct serdev_device *serdev)
static void hci_ti_remove(struct serdev_device *serdev)
{
struct ll_device *lldev = serdev_device_get_drvdata(serdev);
- struct hci_uart *hu = &lldev->hu;
- struct hci_dev *hdev = hu->hdev;
- cancel_work_sync(&hu->write_work);
-
- hci_unregister_dev(hdev);
- hci_free_dev(hdev);
- hu->proto->close(hu);
+ hci_uart_unregister_device(&lldev->hu);
}
static const struct of_device_id hci_ti_of_match[] = {
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 181a15b549e5..3539fd03f47e 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -767,16 +767,8 @@ static int nokia_bluetooth_serdev_probe(struct serdev_device *serdev)
static void nokia_bluetooth_serdev_remove(struct serdev_device *serdev)
{
struct nokia_bt_dev *btdev = serdev_device_get_drvdata(serdev);
- struct hci_uart *hu = &btdev->hu;
- struct hci_dev *hdev = hu->hdev;
- cancel_work_sync(&hu->write_work);
-
- hci_unregister_dev(hdev);
- hci_free_dev(hdev);
- hu->proto->close(hu);
-
- pm_runtime_disable(&btdev->serdev->dev);
+ hci_uart_unregister_device(&btdev->hu);
}
static int nokia_bluetooth_runtime_suspend(struct device *dev)
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index aea930101dd2..b725ac4f7ff6 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -354,3 +354,16 @@ err_alloc:
return err;
}
EXPORT_SYMBOL_GPL(hci_uart_register_device);
+
+void hci_uart_unregister_device(struct hci_uart *hu)
+{
+ struct hci_dev *hdev = hu->hdev;
+
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+
+ cancel_work_sync(&hu->write_work);
+
+ hu->proto->close(hu);
+}
+EXPORT_SYMBOL_GPL(hci_uart_unregister_device);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index c6e9e1cf63f8..d9cd95d81149 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -112,6 +112,7 @@ struct hci_uart {
int hci_uart_register_proto(const struct hci_uart_proto *p);
int hci_uart_unregister_proto(const struct hci_uart_proto *p);
int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p);
+void hci_uart_unregister_device(struct hci_uart *hu);
int hci_uart_tx_wakeup(struct hci_uart *hu);
int hci_uart_init_ready(struct hci_uart *hu);
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
index 1e6e0269edcc..f76be6bd6eb3 100644
--- a/drivers/bus/uniphier-system-bus.c
+++ b/drivers/bus/uniphier-system-bus.c
@@ -256,10 +256,23 @@ static int uniphier_system_bus_probe(struct platform_device *pdev)
uniphier_system_bus_set_reg(priv);
+ platform_set_drvdata(pdev, priv);
+
/* Now, the bus is configured. Populate platform_devices below it */
return of_platform_default_populate(dev->of_node, NULL, dev);
}
+static int __maybe_unused uniphier_system_bus_resume(struct device *dev)
+{
+ uniphier_system_bus_set_reg(dev_get_drvdata(dev));
+
+ return 0;
+}
+
+static const struct dev_pm_ops uniphier_system_bus_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(NULL, uniphier_system_bus_resume)
+};
+
static const struct of_device_id uniphier_system_bus_match[] = {
{ .compatible = "socionext,uniphier-system-bus" },
{ /* sentinel */ }
@@ -271,6 +284,7 @@ static struct platform_driver uniphier_system_bus_driver = {
.driver = {
.name = "uniphier-system-bus",
.of_match_table = uniphier_system_bus_match,
+ .pm = &uniphier_system_bus_pm_ops,
},
};
module_platform_driver(uniphier_system_bus_driver);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ccd239ab879f..623714344600 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -161,7 +161,7 @@ config VIRTIO_CONSOLE
depends on VIRTIO && TTY
select HVC_DRIVER
help
- Virtio console for use with lguest and other hypervisors.
+ Virtio console for use with hypervisors.
Also serves as a general-purpose serial device for data
transfer between the guest and host. Character devices at
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index dcbbb4ea3cc1..89527bae4602 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -381,7 +381,7 @@ static void agp_ali_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-static struct pci_device_id agp_ali_pci_table[] = {
+static const struct pci_device_id agp_ali_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 5fbd333e4c6d..b450544dcaf0 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -21,7 +21,7 @@
#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
-static struct pci_device_id agp_amdk7_pci_table[];
+static const struct pci_device_id agp_amdk7_pci_table[];
struct amd_page_map {
unsigned long *real;
@@ -508,7 +508,7 @@ static int agp_amdk7_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
/* must be the same order as name table above */
-static struct pci_device_id agp_amdk7_pci_table[] = {
+static const struct pci_device_id agp_amdk7_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index c99cd19d9147..e50c29c97ca7 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -610,7 +610,7 @@ static int agp_amd64_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
-static struct pci_device_id agp_amd64_pci_table[] = {
+static const struct pci_device_id agp_amd64_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 0b5ec7af2414..88b4cbee4dac 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -540,7 +540,7 @@ static void agp_ati_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-static struct pci_device_id agp_ati_pci_table[] = {
+static const struct pci_device_id agp_ati_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 533cb6d229b8..7f88490b5479 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -427,7 +427,7 @@ static int agp_efficeon_resume(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id agp_efficeon_pci_table[] = {
+static const struct pci_device_id agp_efficeon_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 0a21daed5b62..9e4f27a6cb5a 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -828,7 +828,7 @@ static int agp_intel_resume(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id agp_intel_pci_table[] = {
+static const struct pci_device_id agp_intel_pci_table[] = {
#define ID(x) \
{ \
.class = (PCI_CLASS_BRIDGE_HOST << 8), \
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 6c8d39cb566e..828b34445203 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -420,7 +420,7 @@ static int agp_nvidia_resume(struct pci_dev *pdev)
#endif
-static struct pci_device_id agp_nvidia_pci_table[] = {
+static const struct pci_device_id agp_nvidia_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 2c74038da459..14909fc5d767 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -237,7 +237,7 @@ static int agp_sis_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
-static struct pci_device_id agp_sis_pci_table[] = {
+static const struct pci_device_id agp_sis_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index fdced547ad59..c381c8e396fc 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -679,7 +679,7 @@ static void agp_uninorth_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-static struct pci_device_id agp_uninorth_pci_table[] = {
+static const struct pci_device_id agp_uninorth_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index b67263d6e34b..c0a5b1f3a986 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -67,7 +67,7 @@ static char *applicom_pci_devnames[] = {
"PCI2000PFB"
};
-static struct pci_device_id applicom_pci_tbl[] = {
+static const struct pci_device_id applicom_pci_tbl[] = {
{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCIGENERIC) },
{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN) },
{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000PFB) },
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 1b223c32a8ae..95a031e9eced 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -13,10 +13,8 @@ menuconfig HW_RANDOM
that's usually called /dev/hwrng, and which exposes one
of possibly several hardware random number generators.
- These hardware random number generators do not feed directly
- into the kernel's random number generator. That is usually
- handled by the "rngd" daemon. Documentation/hw_random.txt
- has more information.
+ These hardware random number generators do feed into the
+ kernel's random number generator entropy pool.
If unsure, say Y.
@@ -255,6 +253,20 @@ config HW_RANDOM_MXC_RNGA
If unsure, say Y.
+config HW_RANDOM_IMX_RNGC
+ tristate "Freescale i.MX RNGC Random Number Generator"
+ depends on ARCH_MXC
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator Version C hardware found on some Freescale i.MX
+ processors. Version B is also supported by this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-rngc.
+
+ If unsure, say Y.
+
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
depends on ARCH_NOMADIK
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index b085975ec1d2..39a67defac67 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
+obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 503a41dfa193..9701ac7d8b47 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -28,7 +28,10 @@
#define RNG_MODULE_NAME "hw_random"
static struct hwrng *current_rng;
+/* the current rng has been explicitly chosen by user via sysfs */
+static int cur_rng_set_by_user;
static struct task_struct *hwrng_fill;
+/* list of registered rngs, sorted decending by quality */
static LIST_HEAD(rng_list);
/* Protects rng_list and current_rng */
static DEFINE_MUTEX(rng_mutex);
@@ -303,6 +306,7 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
list_for_each_entry(rng, &rng_list, list) {
if (sysfs_streq(rng->name, buf)) {
err = 0;
+ cur_rng_set_by_user = 1;
if (rng != current_rng)
err = set_current_rng(rng);
break;
@@ -351,16 +355,27 @@ static ssize_t hwrng_attr_available_show(struct device *dev,
return strlen(buf);
}
+static ssize_t hwrng_attr_selected_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
+}
+
static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
hwrng_attr_current_show,
hwrng_attr_current_store);
static DEVICE_ATTR(rng_available, S_IRUGO,
hwrng_attr_available_show,
NULL);
+static DEVICE_ATTR(rng_selected, S_IRUGO,
+ hwrng_attr_selected_show,
+ NULL);
static struct attribute *rng_dev_attrs[] = {
&dev_attr_rng_current.attr,
&dev_attr_rng_available.attr,
+ &dev_attr_rng_selected.attr,
NULL
};
@@ -417,6 +432,7 @@ int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
struct hwrng *old_rng, *tmp;
+ struct list_head *rng_list_ptr;
if (!rng->name || (!rng->data_read && !rng->read))
goto out;
@@ -432,14 +448,27 @@ int hwrng_register(struct hwrng *rng)
init_completion(&rng->cleanup_done);
complete(&rng->cleanup_done);
+ /* rng_list is sorted by decreasing quality */
+ list_for_each(rng_list_ptr, &rng_list) {
+ tmp = list_entry(rng_list_ptr, struct hwrng, list);
+ if (tmp->quality < rng->quality)
+ break;
+ }
+ list_add_tail(&rng->list, rng_list_ptr);
+
old_rng = current_rng;
err = 0;
- if (!old_rng) {
+ if (!old_rng ||
+ (!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
+ /*
+ * Set new rng as current as the new rng source
+ * provides better entropy quality and was not
+ * chosen by userspace.
+ */
err = set_current_rng(rng);
if (err)
goto out_unlock;
}
- list_add_tail(&rng->list, &rng_list);
if (old_rng && !rng->init) {
/*
@@ -466,12 +495,13 @@ void hwrng_unregister(struct hwrng *rng)
list_del(&rng->list);
if (current_rng == rng) {
drop_current_rng();
+ cur_rng_set_by_user = 0;
+ /* rng_list is sorted by quality, use the best (=first) one */
if (!list_empty(&rng_list)) {
- struct hwrng *tail;
-
- tail = list_entry(rng_list.prev, struct hwrng, list);
+ struct hwrng *new_rng;
- set_current_rng(tail);
+ new_rng = list_entry(rng_list.next, struct hwrng, list);
+ set_current_rng(new_rng);
}
}
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
new file mode 100644
index 000000000000..88db42d30760
--- /dev/null
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -0,0 +1,331 @@
+/*
+ * RNG driver for Freescale RNGC
+ *
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2017 Martin Kaiser <martin@kaiser.cx>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/hw_random.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+
+#define RNGC_COMMAND 0x0004
+#define RNGC_CONTROL 0x0008
+#define RNGC_STATUS 0x000C
+#define RNGC_ERROR 0x0010
+#define RNGC_FIFO 0x0014
+
+#define RNGC_CMD_CLR_ERR 0x00000020
+#define RNGC_CMD_CLR_INT 0x00000010
+#define RNGC_CMD_SEED 0x00000002
+#define RNGC_CMD_SELF_TEST 0x00000001
+
+#define RNGC_CTRL_MASK_ERROR 0x00000040
+#define RNGC_CTRL_MASK_DONE 0x00000020
+
+#define RNGC_STATUS_ERROR 0x00010000
+#define RNGC_STATUS_FIFO_LEVEL_MASK 0x00000f00
+#define RNGC_STATUS_FIFO_LEVEL_SHIFT 8
+#define RNGC_STATUS_SEED_DONE 0x00000020
+#define RNGC_STATUS_ST_DONE 0x00000010
+
+#define RNGC_ERROR_STATUS_STAT_ERR 0x00000008
+
+#define RNGC_TIMEOUT 3000 /* 3 sec */
+
+
+static bool self_test = true;
+module_param(self_test, bool, 0);
+
+struct imx_rngc {
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *base;
+ struct hwrng rng;
+ struct completion rng_op_done;
+ /*
+ * err_reg is written only by the irq handler and read only
+ * when interrupts are masked, we need no spinlock
+ */
+ u32 err_reg;
+};
+
+
+static inline void imx_rngc_irq_mask_clear(struct imx_rngc *rngc)
+{
+ u32 ctrl, cmd;
+
+ /* mask interrupts */
+ ctrl = readl(rngc->base + RNGC_CONTROL);
+ ctrl |= RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR;
+ writel(ctrl, rngc->base + RNGC_CONTROL);
+
+ /*
+ * CLR_INT clears the interrupt only if there's no error
+ * CLR_ERR clear the interrupt and the error register if there
+ * is an error
+ */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ cmd |= RNGC_CMD_CLR_INT | RNGC_CMD_CLR_ERR;
+ writel(cmd, rngc->base + RNGC_COMMAND);
+}
+
+static inline void imx_rngc_irq_unmask(struct imx_rngc *rngc)
+{
+ u32 ctrl;
+
+ ctrl = readl(rngc->base + RNGC_CONTROL);
+ ctrl &= ~(RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR);
+ writel(ctrl, rngc->base + RNGC_CONTROL);
+}
+
+static int imx_rngc_self_test(struct imx_rngc *rngc)
+{
+ u32 cmd;
+ int ret;
+
+ imx_rngc_irq_unmask(rngc);
+
+ /* run self test */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND);
+
+ ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT);
+ if (!ret) {
+ imx_rngc_irq_mask_clear(rngc);
+ return -ETIMEDOUT;
+ }
+
+ if (rngc->err_reg != 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+ unsigned int status;
+ unsigned int level;
+ int retval = 0;
+
+ while (max >= sizeof(u32)) {
+ status = readl(rngc->base + RNGC_STATUS);
+
+ /* is there some error while reading this random number? */
+ if (status & RNGC_STATUS_ERROR)
+ break;
+
+ /* how many random numbers are in FIFO? [0-16] */
+ level = (status & RNGC_STATUS_FIFO_LEVEL_MASK) >>
+ RNGC_STATUS_FIFO_LEVEL_SHIFT;
+
+ if (level) {
+ /* retrieve a random number from FIFO */
+ *(u32 *)data = readl(rngc->base + RNGC_FIFO);
+
+ retval += sizeof(u32);
+ data += sizeof(u32);
+ max -= sizeof(u32);
+ }
+ }
+
+ return retval ? retval : -EIO;
+}
+
+static irqreturn_t imx_rngc_irq(int irq, void *priv)
+{
+ struct imx_rngc *rngc = (struct imx_rngc *)priv;
+ u32 status;
+
+ /*
+ * clearing the interrupt will also clear the error register
+ * read error and status before clearing
+ */
+ status = readl(rngc->base + RNGC_STATUS);
+ rngc->err_reg = readl(rngc->base + RNGC_ERROR);
+
+ imx_rngc_irq_mask_clear(rngc);
+
+ if (status & (RNGC_STATUS_SEED_DONE | RNGC_STATUS_ST_DONE))
+ complete(&rngc->rng_op_done);
+
+ return IRQ_HANDLED;
+}
+
+static int imx_rngc_init(struct hwrng *rng)
+{
+ struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+ u32 cmd;
+ int ret;
+
+ /* clear error */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ writel(cmd | RNGC_CMD_CLR_ERR, rngc->base + RNGC_COMMAND);
+
+ /* create seed, repeat while there is some statistical error */
+ do {
+ imx_rngc_irq_unmask(rngc);
+
+ /* seed creation */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND);
+
+ ret = wait_for_completion_timeout(&rngc->rng_op_done,
+ RNGC_TIMEOUT);
+
+ if (!ret) {
+ imx_rngc_irq_mask_clear(rngc);
+ return -ETIMEDOUT;
+ }
+
+ } while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR);
+
+ return rngc->err_reg ? -EIO : 0;
+}
+
+static int imx_rngc_probe(struct platform_device *pdev)
+{
+ struct imx_rngc *rngc;
+ struct resource *res;
+ int ret;
+ int irq;
+
+ rngc = devm_kzalloc(&pdev->dev, sizeof(*rngc), GFP_KERNEL);
+ if (!rngc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rngc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rngc->base))
+ return PTR_ERR(rngc->base);
+
+ rngc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(rngc->clk)) {
+ dev_err(&pdev->dev, "Can not get rng_clk\n");
+ return PTR_ERR(rngc->clk);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "Couldn't get irq %d\n", irq);
+ return irq;
+ }
+
+ ret = clk_prepare_enable(rngc->clk);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev,
+ irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
+ if (ret) {
+ dev_err(rngc->dev, "Can't get interrupt working.\n");
+ goto err;
+ }
+
+ init_completion(&rngc->rng_op_done);
+
+ rngc->rng.name = pdev->name;
+ rngc->rng.init = imx_rngc_init;
+ rngc->rng.read = imx_rngc_read;
+
+ rngc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, rngc);
+
+ imx_rngc_irq_mask_clear(rngc);
+
+ if (self_test) {
+ ret = imx_rngc_self_test(rngc);
+ if (ret) {
+ dev_err(rngc->dev, "FSL RNGC self test failed.\n");
+ goto err;
+ }
+ }
+
+ ret = hwrng_register(&rngc->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "FSL RNGC registering failed (%d)\n", ret);
+ goto err;
+ }
+
+ dev_info(&pdev->dev, "Freescale RNGC registered.\n");
+ return 0;
+
+err:
+ clk_disable_unprepare(rngc->clk);
+
+ return ret;
+}
+
+static int __exit imx_rngc_remove(struct platform_device *pdev)
+{
+ struct imx_rngc *rngc = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&rngc->rng);
+
+ clk_disable_unprepare(rngc->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int imx_rngc_suspend(struct device *dev)
+{
+ struct imx_rngc *rngc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(rngc->clk);
+
+ return 0;
+}
+
+static int imx_rngc_resume(struct device *dev)
+{
+ struct imx_rngc *rngc = dev_get_drvdata(dev);
+
+ clk_prepare_enable(rngc->clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx_rngc_pm_ops = {
+ .suspend = imx_rngc_suspend,
+ .resume = imx_rngc_resume,
+};
+#endif
+
+static const struct of_device_id imx_rngc_dt_ids[] = {
+ { .compatible = "fsl,imx25-rngb", .data = NULL, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
+
+static struct platform_driver imx_rngc_driver = {
+ .driver = {
+ .name = "imx_rngc",
+#ifdef CONFIG_PM
+ .pm = &imx_rngc_pm_ops,
+#endif
+ .of_match_table = imx_rngc_dt_ids,
+ },
+ .remove = __exit_p(imx_rngc_remove),
+};
+
+module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("H/W RNGC driver for i.MX");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 985973855005..36f47e8d06a3 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2812,7 +2812,7 @@ static struct platform_driver ipmi_driver = {
};
#ifdef CONFIG_PARISC
-static int ipmi_parisc_probe(struct parisc_device *dev)
+static int __init ipmi_parisc_probe(struct parisc_device *dev)
{
struct smi_info *info;
int rv;
@@ -2850,22 +2850,24 @@ static int ipmi_parisc_probe(struct parisc_device *dev)
return 0;
}
-static int ipmi_parisc_remove(struct parisc_device *dev)
+static int __exit ipmi_parisc_remove(struct parisc_device *dev)
{
cleanup_one_si(dev_get_drvdata(&dev->dev));
return 0;
}
-static const struct parisc_device_id ipmi_parisc_tbl[] = {
+static const struct parisc_device_id ipmi_parisc_tbl[] __initconst = {
{ HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
{ 0, }
};
-static struct parisc_driver ipmi_parisc_driver = {
+MODULE_DEVICE_TABLE(parisc, ipmi_parisc_tbl);
+
+static struct parisc_driver ipmi_parisc_driver __refdata = {
.name = "ipmi",
.id_table = ipmi_parisc_tbl,
.probe = ipmi_parisc_probe,
- .remove = ipmi_parisc_remove,
+ .remove = __exit_p(ipmi_parisc_remove),
};
#endif /* CONFIG_PARISC */
diff --git a/drivers/char/mwave/smapi.c b/drivers/char/mwave/smapi.c
index 8c5411a8f33f..691f5898bb32 100644
--- a/drivers/char/mwave/smapi.c
+++ b/drivers/char/mwave/smapi.c
@@ -128,10 +128,11 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
{
int bRC = -EIO;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
- unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 };
- unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
- unsigned short numDspBases = 8;
- unsigned short numUartBases = 4;
+ static const unsigned short ausDspBases[] = {
+ 0x0030, 0x4E30, 0x8E30, 0xCE30,
+ 0x0130, 0x0350, 0x0070, 0x0DB0 };
+ static const unsigned short ausUartBases[] = {
+ 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n");
@@ -148,7 +149,7 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
pSettings->bDSPEnabled = ((usCX & 0x0001) != 0);
pSettings->usDspIRQ = usSI & 0x00FF;
pSettings->usDspDMA = (usSI & 0xFF00) >> 8;
- if ((usDI & 0x00FF) < numDspBases) {
+ if ((usDI & 0x00FF) < ARRAY_SIZE(ausDspBases)) {
pSettings->usDspBaseIO = ausDspBases[usDI & 0x00FF];
} else {
pSettings->usDspBaseIO = 0;
@@ -176,7 +177,7 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
pSettings->bModemEnabled = ((usCX & 0x0001) != 0);
pSettings->usUartIRQ = usSI & 0x000F;
- if (((usSI & 0xFF00) >> 8) < numUartBases) {
+ if (((usSI & 0xFF00) >> 8) < ARRAY_SIZE(ausUartBases)) {
pSettings->usUartBaseIO = ausUartBases[(usSI & 0xFF00) >> 8];
} else {
pSettings->usUartBaseIO = 0;
@@ -205,15 +206,16 @@ int smapi_set_DSP_cfg(void)
int bRC = -EIO;
int i;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
- unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 };
- unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
- unsigned short ausDspIrqs[] = { 5, 7, 10, 11, 15 };
- unsigned short ausUartIrqs[] = { 3, 4 };
-
- unsigned short numDspBases = 8;
- unsigned short numUartBases = 4;
- unsigned short numDspIrqs = 5;
- unsigned short numUartIrqs = 2;
+ static const unsigned short ausDspBases[] = {
+ 0x0030, 0x4E30, 0x8E30, 0xCE30,
+ 0x0130, 0x0350, 0x0070, 0x0DB0 };
+ static const unsigned short ausUartBases[] = {
+ 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
+ static const unsigned short ausDspIrqs[] = {
+ 5, 7, 10, 11, 15 };
+ static const unsigned short ausUartIrqs[] = {
+ 3, 4 };
+
unsigned short dspio_index = 0, uartio_index = 0;
PRINTK_5(TRACE_SMAPI,
@@ -221,11 +223,11 @@ int smapi_set_DSP_cfg(void)
mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io);
if (mwave_3780i_io) {
- for (i = 0; i < numDspBases; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausDspBases); i++) {
if (mwave_3780i_io == ausDspBases[i])
break;
}
- if (i == numDspBases) {
+ if (i == ARRAY_SIZE(ausDspBases)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io);
return bRC;
}
@@ -233,22 +235,22 @@ int smapi_set_DSP_cfg(void)
}
if (mwave_3780i_irq) {
- for (i = 0; i < numDspIrqs; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausDspIrqs); i++) {
if (mwave_3780i_irq == ausDspIrqs[i])
break;
}
- if (i == numDspIrqs) {
+ if (i == ARRAY_SIZE(ausDspIrqs)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq);
return bRC;
}
}
if (mwave_uart_io) {
- for (i = 0; i < numUartBases; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausUartBases); i++) {
if (mwave_uart_io == ausUartBases[i])
break;
}
- if (i == numUartBases) {
+ if (i == ARRAY_SIZE(ausUartBases)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io);
return bRC;
}
@@ -257,11 +259,11 @@ int smapi_set_DSP_cfg(void)
if (mwave_uart_irq) {
- for (i = 0; i < numUartIrqs; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausUartIrqs); i++) {
if (mwave_uart_irq == ausUartIrqs[i])
break;
}
- if (i == numUartIrqs) {
+ if (i == ARRAY_SIZE(ausUartIrqs)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq);
return bRC;
}
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 3e73bcdf9e65..d256110ba672 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -101,9 +101,6 @@ static DEFINE_IDA(ida_index);
#define PP_BUFFER_SIZE 1024
#define PARDEVICE_MAX 8
-/* ROUND_UP macro from fs/select.c */
-#define ROUND_UP(x,y) (((x)+(y)-1)/(y))
-
static DEFINE_MUTEX(pp_do_mutex);
/* define fixed sized ioctl cmd for y2038 migration */
diff --git a/drivers/char/random.c b/drivers/char/random.c
index afa3ce7d3e72..8ad92707e45f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1492,7 +1492,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
print_once = true;
#endif
- pr_notice("random: %s called from %pF with crng_init=%d\n",
+ pr_notice("random: %s called from %pS with crng_init=%d\n",
func_name, caller, crng_init);
}
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 572a51704e67..6210bff46341 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -766,7 +766,7 @@ static struct attribute *tlclk_sysfs_entries[] = {
NULL
};
-static struct attribute_group tlclk_attribute_group = {
+static const struct attribute_group tlclk_attribute_group = {
.name = NULL, /* put in device directory */
.attrs = tlclk_sysfs_entries,
};
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 67ec9d3d04f5..0eca20c5a80c 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -164,14 +164,7 @@ static int tpm_class_shutdown(struct device *dev)
chip->ops = NULL;
up_write(&chip->ops_sem);
}
- /* Allow bus- and device-specific code to run. Note: since chip->ops
- * is NULL, more-specific shutdown code will not be able to issue TPM
- * commands.
- */
- if (dev->bus && dev->bus->shutdown)
- dev->bus->shutdown(dev);
- else if (dev->driver && dev->driver->shutdown)
- dev->driver->shutdown(dev);
+
return 0;
}
@@ -214,7 +207,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
device_initialize(&chip->devs);
chip->dev.class = tpm_class;
- chip->dev.class->shutdown = tpm_class_shutdown;
+ chip->dev.class->shutdown_pre = tpm_class_shutdown;
chip->dev.release = tpm_dev_release;
chip->dev.parent = pdev;
chip->dev.groups = chip->groups;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index ad843eb02ae7..23f33f95d4a6 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1130,7 +1130,7 @@ static const struct file_operations port_fops = {
* We turn the characters into a scatter-gather list, add it to the
* output queue and then kick the Host. Then we sit here waiting for
* it to finish: inefficient in theory, but in practice
- * implementations will do it immediately (lguest's Launcher does).
+ * implementations will do it immediately.
*/
static int put_chars(u32 vtermno, const char *buf, int count)
{
@@ -1308,7 +1308,7 @@ static struct attribute *port_sysfs_entries[] = {
NULL
};
-static struct attribute_group port_attribute_group = {
+static const struct attribute_group port_attribute_group = {
.name = NULL, /* put in device directory */
.attrs = port_sysfs_entries,
};
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 3e6b23c3453c..067396bedf22 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -86,8 +86,7 @@
#include <linux/cdev.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/uaccess.h>
#ifdef CONFIG_OF
@@ -222,6 +221,8 @@ static const struct config_registers v6_config_registers = {
* hwicap_command_desync - Send a DESYNC command to the ICAP port.
* @drvdata: a pointer to the drvdata.
*
+ * Returns: '0' on success and failure value on error
+ *
* This command desynchronizes the ICAP After this command, a
* bitstream containing a NULL packet, followed by a SYNCH packet is
* required before the ICAP will recognize commands.
@@ -251,10 +252,12 @@ static int hwicap_command_desync(struct hwicap_drvdata *drvdata)
* hwicap_get_configuration_register - Query a configuration register.
* @drvdata: a pointer to the drvdata.
* @reg: a constant which represents the configuration
- * register value to be returned.
- * Examples: XHI_IDCODE, XHI_FLR.
+ * register value to be returned.
+ * Examples: XHI_IDCODE, XHI_FLR.
* @reg_data: returns the value of the register.
*
+ * Returns: '0' on success and failure value on error
+ *
* Sends a query packet to the ICAP and then receives the response.
* The icap is left in Synched state.
*/
@@ -320,7 +323,8 @@ static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
dev_dbg(drvdata->dev, "initializing\n");
/* Abort any current transaction, to make sure we have the
- * ICAP in a good state. */
+ * ICAP in a good state.
+ */
dev_dbg(drvdata->dev, "Reset...\n");
drvdata->config->reset(drvdata);
@@ -632,7 +636,6 @@ static int hwicap_setup(struct device *dev, int id,
drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
if (!drvdata) {
- dev_err(dev, "Couldn't allocate device private record\n");
retval = -ENOMEM;
goto failed0;
}
@@ -759,20 +762,20 @@ static int hwicap_of_probe(struct platform_device *op,
id = of_get_property(op->dev.of_node, "port-number", NULL);
/* It's most likely that we're using V4, if the family is not
- specified */
+ * specified
+ */
regs = &v4_config_registers;
family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
if (family) {
- if (!strcmp(family, "virtex2p")) {
+ if (!strcmp(family, "virtex2p"))
regs = &v2_config_registers;
- } else if (!strcmp(family, "virtex4")) {
+ else if (!strcmp(family, "virtex4"))
regs = &v4_config_registers;
- } else if (!strcmp(family, "virtex5")) {
+ else if (!strcmp(family, "virtex5"))
regs = &v5_config_registers;
- } else if (!strcmp(family, "virtex6")) {
+ else if (!strcmp(family, "virtex6"))
regs = &v6_config_registers;
- }
}
return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
regs);
@@ -802,20 +805,20 @@ static int hwicap_drv_probe(struct platform_device *pdev)
return -ENODEV;
/* It's most likely that we're using V4, if the family is not
- specified */
+ * specified
+ */
regs = &v4_config_registers;
family = pdev->dev.platform_data;
if (family) {
- if (!strcmp(family, "virtex2p")) {
+ if (!strcmp(family, "virtex2p"))
regs = &v2_config_registers;
- } else if (!strcmp(family, "virtex4")) {
+ else if (!strcmp(family, "virtex4"))
regs = &v4_config_registers;
- } else if (!strcmp(family, "virtex5")) {
+ else if (!strcmp(family, "virtex5"))
regs = &v5_config_registers;
- } else if (!strcmp(family, "virtex6")) {
+ else if (!strcmp(family, "virtex6"))
regs = &v6_config_registers;
- }
}
return hwicap_setup(&pdev->dev, pdev->id, res,
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
index 38b145eaf24d..6b963d1c8ba3 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
@@ -62,11 +62,13 @@ struct hwicap_drvdata {
struct hwicap_driver_config {
/* Read configuration data given by size into the data buffer.
- Return 0 if successful. */
+ * Return 0 if successful.
+ */
int (*get_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
u32 size);
/* Write configuration data given by size from the data buffer.
- Return 0 if successful. */
+ * Return 0 if successful.
+ */
int (*set_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
u32 size);
/* Get the status register, bit pattern given by:
@@ -193,11 +195,12 @@ struct config_registers {
* hwicap_type_1_read - Generates a Type 1 read packet header.
* @reg: is the address of the register to be read back.
*
+ * Return:
* Generates a Type 1 read packet header, which is used to indirectly
* read registers in the configuration logic. This packet must then
* be sent through the icap device, and a return packet received with
* the information.
- **/
+ */
static inline u32 hwicap_type_1_read(u32 reg)
{
return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
@@ -208,7 +211,9 @@ static inline u32 hwicap_type_1_read(u32 reg)
/**
* hwicap_type_1_write - Generates a Type 1 write packet header
* @reg: is the address of the register to be read back.
- **/
+ *
+ * Return: Type 1 write packet header
+ */
static inline u32 hwicap_type_1_write(u32 reg)
{
return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c
index c391a49aaaff..b4cf2f699a21 100644
--- a/drivers/clk/clk-gemini.c
+++ b/drivers/clk/clk-gemini.c
@@ -237,6 +237,18 @@ static int gemini_reset(struct reset_controller_dev *rcdev,
BIT(GEMINI_RESET_CPU1) | BIT(id));
}
+static int gemini_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return 0;
+}
+
+static int gemini_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return 0;
+}
+
static int gemini_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
@@ -253,6 +265,8 @@ static int gemini_reset_status(struct reset_controller_dev *rcdev,
static const struct reset_control_ops gemini_reset_ops = {
.reset = gemini_reset,
+ .assert = gemini_reset_assert,
+ .deassert = gemini_reset_deassert,
.status = gemini_reset_status,
};
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index 43b0f2f08df2..9cdf9d5050ac 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/bsearch.h>
#define SCI_CLK_SSC_ENABLE BIT(0)
#define SCI_CLK_ALLOW_FREQ_CHANGE BIT(1)
@@ -44,6 +45,7 @@ struct sci_clk_data {
* @dev: Device pointer for the clock provider
* @clk_data: Clock data
* @clocks: Clocks array for this device
+ * @num_clocks: Total number of clocks for this provider
*/
struct sci_clk_provider {
const struct ti_sci_handle *sci;
@@ -51,6 +53,7 @@ struct sci_clk_provider {
struct device *dev;
const struct sci_clk_data *clk_data;
struct clk_hw **clocks;
+ int num_clocks;
};
/**
@@ -58,7 +61,6 @@ struct sci_clk_provider {
* @hw: Hardware clock cookie for common clock framework
* @dev_id: Device index
* @clk_id: Clock index
- * @node: Clocks list link
* @provider: Master clock provider
* @flags: Flags for the clock
*/
@@ -66,7 +68,6 @@ struct sci_clk {
struct clk_hw hw;
u16 dev_id;
u8 clk_id;
- struct list_head node;
struct sci_clk_provider *provider;
u8 flags;
};
@@ -367,6 +368,19 @@ err:
return &sci_clk->hw;
}
+static int _cmp_sci_clk(const void *a, const void *b)
+{
+ const struct sci_clk *ca = a;
+ const struct sci_clk *cb = *(struct sci_clk **)b;
+
+ if (ca->dev_id == cb->dev_id && ca->clk_id == cb->clk_id)
+ return 0;
+ if (ca->dev_id > cb->dev_id ||
+ (ca->dev_id == cb->dev_id && ca->clk_id > cb->clk_id))
+ return 1;
+ return -1;
+}
+
/**
* sci_clk_get - Xlate function for getting clock handles
* @clkspec: device tree clock specifier
@@ -380,29 +394,22 @@ err:
static struct clk_hw *sci_clk_get(struct of_phandle_args *clkspec, void *data)
{
struct sci_clk_provider *provider = data;
- u16 dev_id;
- u8 clk_id;
- const struct sci_clk_data *clks = provider->clk_data;
- struct clk_hw **clocks = provider->clocks;
+ struct sci_clk **clk;
+ struct sci_clk key;
if (clkspec->args_count != 2)
return ERR_PTR(-EINVAL);
- dev_id = clkspec->args[0];
- clk_id = clkspec->args[1];
+ key.dev_id = clkspec->args[0];
+ key.clk_id = clkspec->args[1];
- while (clks->num_clks) {
- if (clks->dev == dev_id) {
- if (clk_id >= clks->num_clks)
- return ERR_PTR(-EINVAL);
-
- return clocks[clk_id];
- }
+ clk = bsearch(&key, provider->clocks, provider->num_clocks,
+ sizeof(clk), _cmp_sci_clk);
- clks++;
- }
+ if (!clk)
+ return ERR_PTR(-ENODEV);
- return ERR_PTR(-ENODEV);
+ return &(*clk)->hw;
}
static int ti_sci_init_clocks(struct sci_clk_provider *p)
@@ -410,18 +417,29 @@ static int ti_sci_init_clocks(struct sci_clk_provider *p)
const struct sci_clk_data *data = p->clk_data;
struct clk_hw *hw;
int i;
+ int num_clks = 0;
while (data->num_clks) {
- p->clocks = devm_kcalloc(p->dev, data->num_clks,
- sizeof(struct sci_clk),
- GFP_KERNEL);
- if (!p->clocks)
- return -ENOMEM;
+ num_clks += data->num_clks;
+ data++;
+ }
+ p->num_clocks = num_clks;
+
+ p->clocks = devm_kcalloc(p->dev, num_clks, sizeof(struct sci_clk),
+ GFP_KERNEL);
+ if (!p->clocks)
+ return -ENOMEM;
+
+ num_clks = 0;
+
+ data = p->clk_data;
+
+ while (data->num_clks) {
for (i = 0; i < data->num_clks; i++) {
hw = _sci_clk_build(p, data->dev, i);
if (!IS_ERR(hw)) {
- p->clocks[i] = hw;
+ p->clocks[num_clks++] = hw;
continue;
}
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index 39eab69fe51a..44a5a535ca63 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -161,6 +161,13 @@ static int mpll_set_rate(struct clk_hw *hw,
reg = PARM_SET(p->width, p->shift, reg, 1);
writel(reg, mpll->base + p->reg_off);
+ p = &mpll->ssen;
+ if (p->width != 0) {
+ reg = readl(mpll->base + p->reg_off);
+ reg = PARM_SET(p->width, p->shift, reg, 1);
+ writel(reg, mpll->base + p->reg_off);
+ }
+
p = &mpll->n2;
reg = readl(mpll->base + p->reg_off);
reg = PARM_SET(p->width, p->shift, reg, n2);
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
index d6feafe8bd6c..1629da9b4141 100644
--- a/drivers/clk/meson/clkc.h
+++ b/drivers/clk/meson/clkc.h
@@ -118,6 +118,7 @@ struct meson_clk_mpll {
struct parm sdm_en;
struct parm n2;
struct parm en;
+ struct parm ssen;
spinlock_t *lock;
};
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index a897ea45327c..a7ea5f3da89d 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -528,6 +528,11 @@ static struct meson_clk_mpll gxbb_mpll0 = {
.shift = 14,
.width = 1,
},
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 25,
+ .width = 1,
+ },
.lock = &clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll0",
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index bb3f1de876b1..6ec512ad2598 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -267,6 +267,11 @@ static struct meson_clk_mpll meson8b_mpll0 = {
.shift = 14,
.width = 1,
},
+ .ssen = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 25,
+ .width = 1,
+ },
.lock = &clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll0",
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 0748a0b333c5..9a6476aa7d81 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -1283,16 +1283,16 @@ static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __ini
static const struct samsung_pll_rate_table exynos5420_epll_24mhz_tbl[] = {
PLL_36XX_RATE(600000000U, 100, 2, 1, 0),
PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
- PLL_36XX_RATE(393216000U, 197, 3, 2, 25690),
- PLL_36XX_RATE(361267200U, 301, 5, 2, 3671),
+ PLL_36XX_RATE(393216003U, 197, 3, 2, -25690),
+ PLL_36XX_RATE(361267218U, 301, 5, 2, 3671),
PLL_36XX_RATE(200000000U, 200, 3, 3, 0),
- PLL_36XX_RATE(196608000U, 197, 3, 3, -25690),
- PLL_36XX_RATE(180633600U, 301, 5, 3, 3671),
- PLL_36XX_RATE(131072000U, 131, 3, 3, 4719),
+ PLL_36XX_RATE(196608001U, 197, 3, 3, -25690),
+ PLL_36XX_RATE(180633609U, 301, 5, 3, 3671),
+ PLL_36XX_RATE(131072006U, 131, 3, 3, 4719),
PLL_36XX_RATE(100000000U, 200, 3, 4, 0),
- PLL_36XX_RATE(65536000U, 131, 3, 4, 4719),
- PLL_36XX_RATE(49152000U, 197, 3, 5, 25690),
- PLL_36XX_RATE(32768000U, 131, 3, 5, 4719),
+ PLL_36XX_RATE( 65536003U, 131, 3, 4, 4719),
+ PLL_36XX_RATE( 49152000U, 197, 3, 5, -25690),
+ PLL_36XX_RATE( 32768001U, 131, 3, 5, 4719),
};
static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = {
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 0c45fa50283d..45a5910379a5 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -1,5 +1,6 @@
# Common objects
lib-$(CONFIG_SUNXI_CCU) += ccu_common.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_mmc_timing.o
lib-$(CONFIG_SUNXI_CCU) += ccu_reset.o
# Base clock types
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index 5372bf8be5e6..31d7ffda9aab 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -184,7 +184,7 @@ static struct ccu_mux cpu_clk = {
.hw.init = CLK_HW_INIT_PARENTS("cpu",
cpu_parents,
&ccu_mux_ops,
- CLK_IS_CRITICAL),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
}
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index 947f9f6e05d2..e43acebdfbcd 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -418,14 +418,8 @@ static SUNXI_CCU_PHASE(mmc1_sample_clk, "mmc1-sample", "mmc1",
static SUNXI_CCU_PHASE(mmc1_output_clk, "mmc1-output", "mmc1",
0x08c, 8, 3, 0);
-/* TODO Support MMC2 clock's new timing mode. */
-static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents,
- 0x090,
- 0, 4, /* M */
- 16, 2, /* P */
- 24, 2, /* mux */
- BIT(31), /* gate */
- 0);
+static SUNXI_CCU_MP_MMC_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents,
+ 0x090, 0);
static SUNXI_CCU_PHASE(mmc2_sample_clk, "mmc2-sample", "mmc2",
0x090, 20, 3, 0);
diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
index d6fdd7a789aa..cadd1a9f93b6 100644
--- a/drivers/clk/sunxi-ng/ccu_common.h
+++ b/drivers/clk/sunxi-ng/ccu_common.h
@@ -23,6 +23,10 @@
#define CCU_FEATURE_FIXED_POSTDIV BIT(3)
#define CCU_FEATURE_ALL_PREDIV BIT(4)
#define CCU_FEATURE_LOCK_REG BIT(5)
+#define CCU_FEATURE_MMC_TIMING_SWITCH BIT(6)
+
+/* MMC timing mode switch bit */
+#define CCU_MMC_NEW_TIMING_MODE BIT(30)
struct device_node;
diff --git a/drivers/clk/sunxi-ng/ccu_mmc_timing.c b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
new file mode 100644
index 000000000000..f9869f7353c0
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017 Chen-Yu Tsai. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk/sunxi-ng.h>
+
+#include "ccu_common.h"
+
+/**
+ * sunxi_ccu_set_mmc_timing_mode: Configure the MMC clock timing mode
+ * @clk: clock to be configured
+ * @new_mode: true for new timing mode introduced in A83T and later
+ *
+ * Returns 0 on success, -ENOTSUPP if the clock does not support
+ * switching modes.
+ */
+int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode)
+{
+ struct clk_hw *hw = __clk_get_hw(clk);
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+ unsigned long flags;
+ u32 val;
+
+ if (!(cm->features & CCU_FEATURE_MMC_TIMING_SWITCH))
+ return -ENOTSUPP;
+
+ spin_lock_irqsave(cm->lock, flags);
+
+ val = readl(cm->base + cm->reg);
+ if (new_mode)
+ val |= CCU_MMC_NEW_TIMING_MODE;
+ else
+ val &= ~CCU_MMC_NEW_TIMING_MODE;
+ writel(val, cm->base + cm->reg);
+
+ spin_unlock_irqrestore(cm->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sunxi_ccu_set_mmc_timing_mode);
+
+/**
+ * sunxi_ccu_set_mmc_timing_mode: Get the current MMC clock timing mode
+ * @clk: clock to query
+ *
+ * Returns 0 if the clock is in old timing mode, > 0 if it is in
+ * new timing mode, and -ENOTSUPP if the clock does not support
+ * this function.
+ */
+int sunxi_ccu_get_mmc_timing_mode(struct clk *clk)
+{
+ struct clk_hw *hw = __clk_get_hw(clk);
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+
+ if (!(cm->features & CCU_FEATURE_MMC_TIMING_SWITCH))
+ return -ENOTSUPP;
+
+ return !!(readl(cm->base + cm->reg) & CCU_MMC_NEW_TIMING_MODE);
+}
+EXPORT_SYMBOL_GPL(sunxi_ccu_get_mmc_timing_mode);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index b917ad7a386c..688855e7dc8c 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -172,3 +172,83 @@ const struct clk_ops ccu_mp_ops = {
.recalc_rate = ccu_mp_recalc_rate,
.set_rate = ccu_mp_set_rate,
};
+
+/*
+ * Support for MMC timing mode switching
+ *
+ * The MMC clocks on some SoCs support switching between old and
+ * new timing modes. A platform specific API is provided to query
+ * and set the timing mode on supported SoCs.
+ *
+ * In addition, a special class of ccu_mp_ops is provided, which
+ * takes in to account the timing mode switch. When the new timing
+ * mode is active, the clock output rate is halved. This new class
+ * is a wrapper around the generic ccu_mp_ops. When clock rates
+ * are passed through to ccu_mp_ops callbacks, they are doubled
+ * if the new timing mode bit is set, to account for the post
+ * divider. Conversely, when clock rates are passed back, they
+ * are halved if the mode bit is set.
+ */
+
+static unsigned long ccu_mp_mmc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long rate = ccu_mp_recalc_rate(hw, parent_rate);
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+ u32 val = readl(cm->base + cm->reg);
+
+ if (val & CCU_MMC_NEW_TIMING_MODE)
+ return rate / 2;
+ return rate;
+}
+
+static int ccu_mp_mmc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+ u32 val = readl(cm->base + cm->reg);
+ int ret;
+
+ /* adjust the requested clock rate */
+ if (val & CCU_MMC_NEW_TIMING_MODE) {
+ req->rate *= 2;
+ req->min_rate *= 2;
+ req->max_rate *= 2;
+ }
+
+ ret = ccu_mp_determine_rate(hw, req);
+
+ /* re-adjust the requested clock rate back */
+ if (val & CCU_MMC_NEW_TIMING_MODE) {
+ req->rate /= 2;
+ req->min_rate /= 2;
+ req->max_rate /= 2;
+ }
+
+ return ret;
+}
+
+static int ccu_mp_mmc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_common *cm = hw_to_ccu_common(hw);
+ u32 val = readl(cm->base + cm->reg);
+
+ if (val & CCU_MMC_NEW_TIMING_MODE)
+ rate *= 2;
+
+ return ccu_mp_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops ccu_mp_mmc_ops = {
+ .disable = ccu_mp_disable,
+ .enable = ccu_mp_enable,
+ .is_enabled = ccu_mp_is_enabled,
+
+ .get_parent = ccu_mp_get_parent,
+ .set_parent = ccu_mp_set_parent,
+
+ .determine_rate = ccu_mp_mmc_determine_rate,
+ .recalc_rate = ccu_mp_mmc_recalc_rate,
+ .set_rate = ccu_mp_mmc_set_rate,
+};
diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h
index 915625e97d98..aaef11d747ea 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.h
+++ b/drivers/clk/sunxi-ng/ccu_mp.h
@@ -14,6 +14,7 @@
#ifndef _CCU_MP_H_
#define _CCU_MP_H_
+#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include "ccu_common.h"
@@ -74,4 +75,33 @@ static inline struct ccu_mp *hw_to_ccu_mp(struct clk_hw *hw)
extern const struct clk_ops ccu_mp_ops;
+/*
+ * Special class of M-P clock that supports MMC timing modes
+ *
+ * Since the MMC clock registers all follow the same layout, we can
+ * simplify the macro for this particular case. In addition, as
+ * switching modes also affects the output clock rate, we need to
+ * have CLK_GET_RATE_NOCACHE for all these types of clocks.
+ */
+
+#define SUNXI_CCU_MP_MMC_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _flags) \
+ struct ccu_mp _struct = { \
+ .enable = BIT(31), \
+ .m = _SUNXI_CCU_DIV(0, 4), \
+ .p = _SUNXI_CCU_DIV(16, 2), \
+ .mux = _SUNXI_CCU_MUX(24, 2), \
+ .common = { \
+ .reg = _reg, \
+ .features = CCU_FEATURE_MMC_TIMING_SWITCH, \
+ .hw.init = CLK_HW_INIT_PARENTS(_name, \
+ _parents, \
+ &ccu_mp_mmc_ops, \
+ CLK_GET_RATE_NOCACHE | \
+ _flags), \
+ } \
+ }
+
+extern const struct clk_ops ccu_mp_mmc_ops;
+
#endif /* _CCU_MP_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_reset.c b/drivers/clk/sunxi-ng/ccu_reset.c
index 6c31d48783a7..1dc4e98ea802 100644
--- a/drivers/clk/sunxi-ng/ccu_reset.c
+++ b/drivers/clk/sunxi-ng/ccu_reset.c
@@ -8,6 +8,7 @@
* the License, or (at your option) any later version.
*/
+#include <linux/delay.h>
#include <linux/io.h>
#include <linux/reset-controller.h>
@@ -49,7 +50,18 @@ static int ccu_reset_deassert(struct reset_controller_dev *rcdev,
return 0;
}
+static int ccu_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ ccu_reset_assert(rcdev, id);
+ udelay(10);
+ ccu_reset_deassert(rcdev, id);
+
+ return 0;
+}
+
const struct reset_control_ops ccu_reset_ops = {
.assert = ccu_reset_assert,
.deassert = ccu_reset_deassert,
+ .reset = ccu_reset_reset,
};
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index f99abc1106f0..08ef69945ffb 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -186,6 +186,13 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
spin_lock_init(&pclk->lock);
+ /*
+ * If the clock was already enabled by the firmware mark it as critical
+ * to avoid it being gated by the clock framework if no driver owns it.
+ */
+ if (plt_clk_is_enabled(&pclk->hw))
+ init.flags |= CLK_IS_CRITICAL;
+
ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
if (ret) {
pclk = ERR_PTR(ret);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index fcae5ca6ac92..cc6062049170 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -262,7 +262,7 @@ config CLKSRC_LPC32XX
config CLKSRC_PISTACHIO
bool "Clocksource for Pistachio SoC" if COMPILE_TEST
- depends on HAS_IOMEM
+ depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
select TIMER_OF
help
Enables the clocksource for the Pistachio SoC.
@@ -598,6 +598,14 @@ config CLKSRC_IMX_GPT
depends on ARM && CLKDEV_LOOKUP
select CLKSRC_MMIO
+config CLKSRC_IMX_TPM
+ bool "Clocksource using i.MX TPM" if COMPILE_TEST
+ depends on ARM && CLKDEV_LOOKUP && GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ help
+ Enable this option to use IMX Timer/PWM Module (TPM) timer as
+ clocksource.
+
config CLKSRC_ST_LPC
bool "Low power clocksource found in the LPC" if COMPILE_TEST
select TIMER_OF if OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 6df949402dfc..dbc1ad14515e 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o
+obj-$(CONFIG_CLKSRC_IMX_TPM) += timer-imx-tpm.o
obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o
obj-$(CONFIG_H8300_TMR8) += h8300_timer8.o
obj-$(CONFIG_H8300_TMR16) += h8300_timer16.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index aae87c4c546e..fd4b7f684bd0 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -455,7 +455,11 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
per_cpu(timer_unstable_counter_workaround, i) = wa;
}
- static_branch_enable(&arch_timer_read_ool_enabled);
+ /*
+ * Use the locked version, as we're called from the CPU
+ * hotplug framework. Otherwise, we end-up in deadlock-land.
+ */
+ static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
/*
* Don't use the vdso fastpath if errata require using the
@@ -1440,7 +1444,7 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count)
* While unlikely, it's theoretically possible that none of the frames
* in a timer expose the combination of feature we want.
*/
- for (i = i; i < timer_count; i++) {
+ for (i = 0; i < timer_count; i++) {
timer = &timers[i];
frame = arch_timer_mem_find_best_frame(timer);
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 82828d3a4739..39e489a96ad7 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -114,7 +114,6 @@ static int __init bcm2835_timer_init(struct device_node *node)
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer) {
- pr_err("Can't allocate timer struct\n");
ret = -ENOMEM;
goto err_iounmap;
}
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index bc48cbf6a795..269db74a0658 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -305,7 +305,7 @@ static int em_sti_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get irq\n");
- return -EINVAL;
+ return irq;
}
/* map memory, let base point to the STI instance */
@@ -314,11 +314,12 @@ static int em_sti_probe(struct platform_device *pdev)
if (IS_ERR(p->base))
return PTR_ERR(p->base);
- if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
- IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
- dev_name(&pdev->dev), p)) {
+ ret = devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+ dev_name(&pdev->dev), p);
+ if (ret) {
dev_err(&pdev->dev, "failed to request low IRQ\n");
- return -ENOENT;
+ return ret;
}
/* get hold of clock */
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
index c4e1c2e6046f..6a8d9838ce33 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/tango_xtal.c
@@ -26,13 +26,13 @@ static int __init tango_clocksource_init(struct device_node *np)
xtal_in_cnt = of_iomap(np, 0);
if (xtal_in_cnt == NULL) {
- pr_err("%s: invalid address\n", np->full_name);
+ pr_err("%pOF: invalid address\n", np);
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
- pr_err("%s: invalid clock\n", np->full_name);
+ pr_err("%pOF: invalid clock\n", np);
return PTR_ERR(clk);
}
@@ -43,7 +43,7 @@ static int __init tango_clocksource_init(struct device_node *np)
ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
32, clocksource_mmio_readl_up);
if (ret) {
- pr_err("%s: registration failed\n", np->full_name);
+ pr_err("%pOF: registration failed\n", np);
return ret;
}
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
new file mode 100644
index 000000000000..21bffdcb2f20
--- /dev/null
+++ b/drivers/clocksource/timer-imx-tpm.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+
+#define TPM_SC 0x10
+#define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3)
+#define TPM_SC_CMOD_DIV_DEFAULT 0x3
+#define TPM_CNT 0x14
+#define TPM_MOD 0x18
+#define TPM_STATUS 0x1c
+#define TPM_STATUS_CH0F BIT(0)
+#define TPM_C0SC 0x20
+#define TPM_C0SC_CHIE BIT(6)
+#define TPM_C0SC_MODE_SHIFT 2
+#define TPM_C0SC_MODE_MASK 0x3c
+#define TPM_C0SC_MODE_SW_COMPARE 0x4
+#define TPM_C0V 0x24
+
+static void __iomem *timer_base;
+static struct clock_event_device clockevent_tpm;
+
+static inline void tpm_timer_disable(void)
+{
+ unsigned int val;
+
+ /* channel disable */
+ val = readl(timer_base + TPM_C0SC);
+ val &= ~(TPM_C0SC_MODE_MASK | TPM_C0SC_CHIE);
+ writel(val, timer_base + TPM_C0SC);
+}
+
+static inline void tpm_timer_enable(void)
+{
+ unsigned int val;
+
+ /* channel enabled in sw compare mode */
+ val = readl(timer_base + TPM_C0SC);
+ val |= (TPM_C0SC_MODE_SW_COMPARE << TPM_C0SC_MODE_SHIFT) |
+ TPM_C0SC_CHIE;
+ writel(val, timer_base + TPM_C0SC);
+}
+
+static inline void tpm_irq_acknowledge(void)
+{
+ writel(TPM_STATUS_CH0F, timer_base + TPM_STATUS);
+}
+
+static struct delay_timer tpm_delay_timer;
+
+static inline unsigned long tpm_read_counter(void)
+{
+ return readl(timer_base + TPM_CNT);
+}
+
+static unsigned long tpm_read_current_timer(void)
+{
+ return tpm_read_counter();
+}
+
+static u64 notrace tpm_read_sched_clock(void)
+{
+ return tpm_read_counter();
+}
+
+static int __init tpm_clocksource_init(unsigned long rate)
+{
+ tpm_delay_timer.read_current_timer = &tpm_read_current_timer;
+ tpm_delay_timer.freq = rate;
+ register_current_timer_delay(&tpm_delay_timer);
+
+ sched_clock_register(tpm_read_sched_clock, 32, rate);
+
+ return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm",
+ rate, 200, 32, clocksource_mmio_readl_up);
+}
+
+static int tpm_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned long next, now;
+
+ next = tpm_read_counter();
+ next += delta;
+ writel(next, timer_base + TPM_C0V);
+ now = tpm_read_counter();
+
+ /*
+ * NOTE: We observed in a very small probability, the bus fabric
+ * contention between GPU and A7 may results a few cycles delay
+ * of writing CNT registers which may cause the min_delta event got
+ * missed, so we need add a ETIME check here in case it happened.
+ */
+ return (int)((next - now) <= 0) ? -ETIME : 0;
+}
+
+static int tpm_set_state_oneshot(struct clock_event_device *evt)
+{
+ tpm_timer_enable();
+
+ return 0;
+}
+
+static int tpm_set_state_shutdown(struct clock_event_device *evt)
+{
+ tpm_timer_disable();
+
+ return 0;
+}
+
+static irqreturn_t tpm_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ tpm_irq_acknowledge();
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct clock_event_device clockevent_tpm = {
+ .name = "i.MX7ULP TPM Timer",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_oneshot = tpm_set_state_oneshot,
+ .set_next_event = tpm_set_next_event,
+ .set_state_shutdown = tpm_set_state_shutdown,
+ .rating = 200,
+};
+
+static int __init tpm_clockevent_init(unsigned long rate, int irq)
+{
+ int ret;
+
+ ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
+ "i.MX7ULP TPM Timer", &clockevent_tpm);
+
+ clockevent_tpm.cpumask = cpumask_of(0);
+ clockevent_tpm.irq = irq;
+ clockevents_config_and_register(&clockevent_tpm,
+ rate, 300, 0xfffffffe);
+
+ return ret;
+}
+
+static int __init tpm_timer_init(struct device_node *np)
+{
+ struct clk *ipg, *per;
+ int irq, ret;
+ u32 rate;
+
+ timer_base = of_iomap(np, 0);
+ if (!timer_base) {
+ pr_err("tpm: failed to get base address\n");
+ return -ENXIO;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ pr_err("tpm: failed to get irq\n");
+ ret = -ENOENT;
+ goto err_iomap;
+ }
+
+ ipg = of_clk_get_by_name(np, "ipg");
+ per = of_clk_get_by_name(np, "per");
+ if (IS_ERR(ipg) || IS_ERR(per)) {
+ pr_err("tpm: failed to get igp or per clk\n");
+ ret = -ENODEV;
+ goto err_clk_get;
+ }
+
+ /* enable clk before accessing registers */
+ ret = clk_prepare_enable(ipg);
+ if (ret) {
+ pr_err("tpm: ipg clock enable failed (%d)\n", ret);
+ goto err_clk_get;
+ }
+
+ ret = clk_prepare_enable(per);
+ if (ret) {
+ pr_err("tpm: per clock enable failed (%d)\n", ret);
+ goto err_per_clk_enable;
+ }
+
+ /*
+ * Initialize tpm module to a known state
+ * 1) Counter disabled
+ * 2) TPM counter operates in up counting mode
+ * 3) Timer Overflow Interrupt disabled
+ * 4) Channel0 disabled
+ * 5) DMA transfers disabled
+ */
+ writel(0, timer_base + TPM_SC);
+ writel(0, timer_base + TPM_CNT);
+ writel(0, timer_base + TPM_C0SC);
+
+ /* increase per cnt, div 8 by default */
+ writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT,
+ timer_base + TPM_SC);
+
+ /* set MOD register to maximum for free running mode */
+ writel(0xffffffff, timer_base + TPM_MOD);
+
+ rate = clk_get_rate(per) >> 3;
+ ret = tpm_clocksource_init(rate);
+ if (ret)
+ goto err_per_clk_enable;
+
+ ret = tpm_clockevent_init(rate, irq);
+ if (ret)
+ goto err_per_clk_enable;
+
+ return 0;
+
+err_per_clk_enable:
+ clk_disable_unprepare(ipg);
+err_clk_get:
+ clk_put(per);
+ clk_put(ipg);
+err_iomap:
+ iounmap(timer_base);
+ return ret;
+}
+TIMER_OF_DECLARE(imx7ulp, "fsl,imx7ulp-tpm", tpm_timer_init);
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index f6e7491c873c..c79122d8e10d 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -41,10 +41,18 @@ static __init int timer_irq_init(struct device_node *np,
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
struct clock_event_device *clkevt = &to->clkevt;
- of_irq->irq = of_irq->name ? of_irq_get_byname(np, of_irq->name):
- irq_of_parse_and_map(np, of_irq->index);
+ if (of_irq->name) {
+ of_irq->irq = ret = of_irq_get_byname(np, of_irq->name);
+ if (ret < 0) {
+ pr_err("Failed to get interrupt %s for %s\n",
+ of_irq->name, np->full_name);
+ return ret;
+ }
+ } else {
+ of_irq->irq = irq_of_parse_and_map(np, of_irq->index);
+ }
if (!of_irq->irq) {
- pr_err("Failed to map interrupt for %s\n", np->full_name);
+ pr_err("Failed to map interrupt for %pOF\n", np);
return -EINVAL;
}
@@ -55,8 +63,7 @@ static __init int timer_irq_init(struct device_node *np,
of_irq->flags ? of_irq->flags : IRQF_TIMER,
np->full_name, clkevt);
if (ret) {
- pr_err("Failed to request irq %d for %s\n", of_irq->irq,
- np->full_name);
+ pr_err("Failed to request irq %d for %pOF\n", of_irq->irq, np);
return ret;
}
@@ -80,20 +87,20 @@ static __init int timer_clk_init(struct device_node *np,
of_clk->clk = of_clk->name ? of_clk_get_by_name(np, of_clk->name) :
of_clk_get(np, of_clk->index);
if (IS_ERR(of_clk->clk)) {
- pr_err("Failed to get clock for %s\n", np->full_name);
+ pr_err("Failed to get clock for %pOF\n", np);
return PTR_ERR(of_clk->clk);
}
ret = clk_prepare_enable(of_clk->clk);
if (ret) {
- pr_err("Failed for enable clock for %s\n", np->full_name);
+ pr_err("Failed for enable clock for %pOF\n", np);
goto out_clk_put;
}
of_clk->rate = clk_get_rate(of_clk->clk);
if (!of_clk->rate) {
ret = -EINVAL;
- pr_err("Failed to get clock rate for %s\n", np->full_name);
+ pr_err("Failed to get clock rate for %pOF\n", np);
goto out_clk_disable;
}
@@ -120,9 +127,9 @@ static __init int timer_base_init(struct device_node *np,
const char *name = of_base->name ? of_base->name : np->full_name;
of_base->base = of_io_request_and_map(np, of_base->index, name);
- if (!of_base->base) {
+ if (IS_ERR(of_base->base)) {
pr_err("Failed to iomap (%s)\n", name);
- return -ENXIO;
+ return PTR_ERR(of_base->base);
}
return 0;
diff --git a/drivers/clocksource/timer-probe.c b/drivers/clocksource/timer-probe.c
index da81e5de74fe..028075720334 100644
--- a/drivers/clocksource/timer-probe.c
+++ b/drivers/clocksource/timer-probe.c
@@ -40,8 +40,7 @@ void __init timer_probe(void)
ret = init_func_ret(np);
if (ret) {
- pr_err("Failed to initialize '%s': %d\n",
- of_node_full_name(np), ret);
+ pr_err("Failed to initialize '%pOF': %d\n", np, ret);
continue;
}
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index 174d1243ea93..8f2423789ba9 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -138,7 +138,7 @@ static int __init stm32_clockevent_init(struct device_node *np)
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
ret = -EINVAL;
- pr_err("%s: failed to get irq.\n", np->full_name);
+ pr_err("%pOF: failed to get irq.\n", np);
goto err_get_irq;
}
@@ -168,12 +168,12 @@ static int __init stm32_clockevent_init(struct device_node *np)
ret = request_irq(irq, stm32_clock_event_handler, IRQF_TIMER,
"stm32 clockevent", data);
if (ret) {
- pr_err("%s: failed to request irq.\n", np->full_name);
+ pr_err("%pOF: failed to request irq.\n", np);
goto err_get_irq;
}
- pr_info("%s: STM32 clockevent driver initialized (%d bits)\n",
- np->full_name, bits);
+ pr_info("%pOF: STM32 clockevent driver initialized (%d bits)\n",
+ np, bits);
return ret;
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 2011fec2d6ad..bdce4488ded1 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -71,15 +71,6 @@ config ARM_HIGHBANK_CPUFREQ
If in doubt, say N.
-config ARM_DB8500_CPUFREQ
- tristate "ST-Ericsson DB8500 cpufreq" if COMPILE_TEST && !ARCH_U8500
- default ARCH_U8500
- depends on HAS_IOMEM
- depends on !CPU_THERMAL || THERMAL
- help
- This adds the CPUFreq driver for ST-Ericsson Ux500 (DB8500) SoC
- series.
-
config ARM_IMX6Q_CPUFREQ
tristate "Freescale i.MX6 cpufreq support"
depends on ARCH_MXC
@@ -96,14 +87,13 @@ config ARM_KIRKWOOD_CPUFREQ
This adds the CPUFreq driver for Marvell Kirkwood
SoCs.
-config ARM_MT8173_CPUFREQ
- tristate "Mediatek MT8173 CPUFreq support"
+config ARM_MEDIATEK_CPUFREQ
+ tristate "CPU Frequency scaling support for MediaTek SoCs"
depends on ARCH_MEDIATEK && REGULATOR
- depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
depends on !CPU_THERMAL || THERMAL
select PM_OPP
help
- This adds the CPUFreq driver support for Mediatek MT8173 SoC.
+ This adds the CPUFreq driver support for MediaTek SoCs.
config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
@@ -242,6 +232,11 @@ config ARM_STI_CPUFREQ
this config option if you wish to add CPUFreq support for STi based
SoCs.
+config ARM_TANGO_CPUFREQ
+ bool
+ depends on CPUFREQ_DT && ARCH_TANGO
+ default y
+
config ARM_TEGRA20_CPUFREQ
bool "Tegra20 CPUFreq support"
depends on ARCH_TEGRA
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index ab3a42cd29ef..c7af9b2a255e 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -53,12 +53,11 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
-obj-$(CONFIG_ARM_DB8500_CPUFREQ) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
-obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o
+obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
@@ -75,6 +74,7 @@ obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o
+obj-$(CONFIG_ARM_TANGO_CPUFREQ) += tango-cpufreq.o
obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index ea6d62547b10..17504129fd77 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -483,11 +483,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
return ret;
}
- if (arm_bL_ops->get_transition_latency)
- policy->cpuinfo.transition_latency =
- arm_bL_ops->get_transition_latency(cpu_dev);
- else
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ policy->cpuinfo.transition_latency =
+ arm_bL_ops->get_transition_latency(cpu_dev);
if (is_bL_switching_enabled())
per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
@@ -622,7 +619,8 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
return -EBUSY;
}
- if (!ops || !strlen(ops->name) || !ops->init_opp_table) {
+ if (!ops || !strlen(ops->name) || !ops->init_opp_table ||
+ !ops->get_transition_latency) {
pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
return -ENODEV;
}
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 10be285c9055..a1c3025f9df7 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -172,7 +172,6 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
return -EFAULT;
}
- cpumask_set_cpu(policy->cpu, policy->cpus);
cpu->cur_policy = policy;
/* Set policy->cur to max now. The governors will adjust later. */
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 1c262923fe58..a020da7940d6 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -9,11 +9,16 @@
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "cpufreq-dt.h"
-static const struct of_device_id machines[] __initconst = {
+/*
+ * Machines for which the cpufreq device is *always* created, mostly used for
+ * platforms using "operating-points" (V1) property.
+ */
+static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "allwinner,sun4i-a10", },
{ .compatible = "allwinner,sun5i-a10s", },
{ .compatible = "allwinner,sun5i-a13", },
@@ -22,7 +27,6 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "allwinner,sun6i-a31s", },
{ .compatible = "allwinner,sun7i-a20", },
{ .compatible = "allwinner,sun8i-a23", },
- { .compatible = "allwinner,sun8i-a33", },
{ .compatible = "allwinner,sun8i-a83t", },
{ .compatible = "allwinner,sun8i-h3", },
@@ -32,7 +36,6 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "arm,integrator-cp", },
{ .compatible = "hisilicon,hi3660", },
- { .compatible = "hisilicon,hi6220", },
{ .compatible = "fsl,imx27", },
{ .compatible = "fsl,imx51", },
@@ -46,11 +49,8 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "samsung,exynos3250", },
{ .compatible = "samsung,exynos4210", },
{ .compatible = "samsung,exynos4212", },
- { .compatible = "samsung,exynos4412", },
{ .compatible = "samsung,exynos5250", },
#ifndef CONFIG_BL_SWITCHER
- { .compatible = "samsung,exynos5420", },
- { .compatible = "samsung,exynos5433", },
{ .compatible = "samsung,exynos5800", },
#endif
@@ -67,6 +67,8 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "renesas,r8a7792", },
{ .compatible = "renesas,r8a7793", },
{ .compatible = "renesas,r8a7794", },
+ { .compatible = "renesas,r8a7795", },
+ { .compatible = "renesas,r8a7796", },
{ .compatible = "renesas,sh73a0", },
{ .compatible = "rockchip,rk2928", },
@@ -76,17 +78,17 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "rockchip,rk3188", },
{ .compatible = "rockchip,rk3228", },
{ .compatible = "rockchip,rk3288", },
+ { .compatible = "rockchip,rk3328", },
{ .compatible = "rockchip,rk3366", },
{ .compatible = "rockchip,rk3368", },
{ .compatible = "rockchip,rk3399", },
- { .compatible = "sigma,tango4" },
-
- { .compatible = "socionext,uniphier-pro5", },
- { .compatible = "socionext,uniphier-pxs2", },
{ .compatible = "socionext,uniphier-ld6b", },
- { .compatible = "socionext,uniphier-ld11", },
- { .compatible = "socionext,uniphier-ld20", },
+
+ { .compatible = "st-ericsson,u8500", },
+ { .compatible = "st-ericsson,u8540", },
+ { .compatible = "st-ericsson,u9500", },
+ { .compatible = "st-ericsson,u9540", },
{ .compatible = "ti,omap2", },
{ .compatible = "ti,omap3", },
@@ -94,27 +96,56 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "ti,omap5", },
{ .compatible = "xlnx,zynq-7000", },
+ { .compatible = "xlnx,zynqmp", },
- { .compatible = "zte,zx296718", },
+ { }
+};
+/*
+ * Machines for which the cpufreq device is *not* created, mostly used for
+ * platforms using "operating-points-v2" property.
+ */
+static const struct of_device_id blacklist[] __initconst = {
{ }
};
+static bool __init cpu0_node_has_opp_v2_prop(void)
+{
+ struct device_node *np = of_cpu_device_node_get(0);
+ bool ret = false;
+
+ if (of_get_property(np, "operating-points-v2", NULL))
+ ret = true;
+
+ of_node_put(np);
+ return ret;
+}
+
static int __init cpufreq_dt_platdev_init(void)
{
struct device_node *np = of_find_node_by_path("/");
const struct of_device_id *match;
+ const void *data = NULL;
if (!np)
return -ENODEV;
- match = of_match_node(machines, np);
+ match = of_match_node(whitelist, np);
+ if (match) {
+ data = match->data;
+ goto create_pdev;
+ }
+
+ if (cpu0_node_has_opp_v2_prop() && !of_match_node(blacklist, np))
+ goto create_pdev;
+
of_node_put(np);
- if (!match)
- return -ENODEV;
+ return -ENODEV;
+create_pdev:
+ of_node_put(np);
return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt",
- -1, match->data,
+ -1, data,
sizeof(struct cpufreq_dt_platform_data)));
}
device_initcall(cpufreq_dt_platdev_init);
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index fef3c2160691..d83ab94d041a 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -274,6 +274,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
transition_latency = CPUFREQ_ETERNAL;
policy->cpuinfo.transition_latency = transition_latency;
+ policy->dvfs_possible_from_any_cpu = true;
return 0;
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index 5503d491b016..dbf82f36d270 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -357,7 +357,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100;
policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
return 0;
}
@@ -369,6 +368,7 @@ static int nforce2_cpu_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver nforce2_driver = {
.name = "nforce2",
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = nforce2_verify,
.target = nforce2_target,
.get = nforce2_get,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 9bf97a366029..ea43b147a7fe 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -524,6 +524,32 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
+unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
+{
+ unsigned int latency;
+
+ if (policy->transition_delay_us)
+ return policy->transition_delay_us;
+
+ latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
+ if (latency) {
+ /*
+ * For platforms that can change the frequency very fast (< 10
+ * us), the above formula gives a decent transition delay. But
+ * for platforms where transition_latency is in milliseconds, it
+ * ends up giving unrealistic values.
+ *
+ * Cap the default transition delay to 10 ms, which seems to be
+ * a reasonable amount of time after which we should reevaluate
+ * the frequency.
+ */
+ return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
+ }
+
+ return LATENCY_MULTIPLIER;
+}
+EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
+
/*********************************************************************
* SYSFS INTERFACE *
*********************************************************************/
@@ -1817,9 +1843,10 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
* twice in parallel for the same policy and that it will never be called in
* parallel with either ->target() or ->target_index() for the same policy.
*
- * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
- * callback to indicate an error condition, the hardware configuration must be
- * preserved.
+ * Returns the actual frequency set for the CPU.
+ *
+ * If 0 is returned by the driver's ->fast_switch() callback to indicate an
+ * error condition, the hardware configuration must be preserved.
*/
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
@@ -1988,13 +2015,13 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy)
if (!policy->governor)
return -EINVAL;
- if (policy->governor->max_transition_latency &&
- policy->cpuinfo.transition_latency >
- policy->governor->max_transition_latency) {
+ /* Platform doesn't want dynamic frequency switching ? */
+ if (policy->governor->dynamic_switching &&
+ cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
struct cpufreq_governor *gov = cpufreq_fallback_governor();
if (gov) {
- pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
+ pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
policy->governor->name, gov->name);
policy->governor = gov;
} else {
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 88220ff3e1c2..f20f20a77d4d 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -246,7 +246,6 @@ gov_show_one_common(sampling_rate);
gov_show_one_common(sampling_down_factor);
gov_show_one_common(up_threshold);
gov_show_one_common(ignore_nice_load);
-gov_show_one_common(min_sampling_rate);
gov_show_one(cs, down_threshold);
gov_show_one(cs, freq_step);
@@ -254,12 +253,10 @@ gov_attr_rw(sampling_rate);
gov_attr_rw(sampling_down_factor);
gov_attr_rw(up_threshold);
gov_attr_rw(ignore_nice_load);
-gov_attr_ro(min_sampling_rate);
gov_attr_rw(down_threshold);
gov_attr_rw(freq_step);
static struct attribute *cs_attributes[] = {
- &min_sampling_rate.attr,
&sampling_rate.attr,
&sampling_down_factor.attr,
&up_threshold.attr,
@@ -297,10 +294,7 @@ static int cs_init(struct dbs_data *dbs_data)
dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
dbs_data->ignore_nice_load = 0;
-
dbs_data->tuners = tuners;
- dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
- jiffies_to_usecs(10);
return 0;
}
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 47e24b5384b3..58d4f4e1ad6a 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -47,14 +47,11 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct policy_dbs_info *policy_dbs;
- unsigned int rate;
int ret;
- ret = sscanf(buf, "%u", &rate);
+ ret = sscanf(buf, "%u", &dbs_data->sampling_rate);
if (ret != 1)
return -EINVAL;
- dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);
-
/*
* We are operating under dbs_data->mutex and so the list and its
* entries can't be freed concurrently.
@@ -275,6 +272,9 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
u64 delta_ns, lst;
+ if (!cpufreq_can_do_remote_dvfs(policy_dbs->policy))
+ return;
+
/*
* The work may not be allowed to be queued up right now.
* Possible reasons:
@@ -392,7 +392,6 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
struct dbs_governor *gov = dbs_governor_of(policy);
struct dbs_data *dbs_data;
struct policy_dbs_info *policy_dbs;
- unsigned int latency;
int ret = 0;
/* State should be equivalent to EXIT */
@@ -431,16 +430,7 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
if (ret)
goto free_policy_dbs_info;
- /* policy latency is in ns. Convert it to us first */
- latency = policy->cpuinfo.transition_latency / 1000;
- if (latency == 0)
- latency = 1;
-
- /* Bring kernel and HW constraints together */
- dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
- MIN_LATENCY_MULTIPLIER * latency);
- dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
- LATENCY_MULTIPLIER * latency);
+ dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy);
if (!have_governor_per_policy())
gov->gdbs_data = dbs_data;
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 0236ec2cd654..8463f5def0f5 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -41,7 +41,6 @@ enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
struct dbs_data {
struct gov_attr_set attr_set;
void *tuners;
- unsigned int min_sampling_rate;
unsigned int ignore_nice_load;
unsigned int sampling_rate;
unsigned int sampling_down_factor;
@@ -160,7 +159,7 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
#define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \
{ \
.name = _name_, \
- .max_transition_latency = TRANSITION_LATENCY_LIMIT, \
+ .dynamic_switching = true, \
.owner = THIS_MODULE, \
.init = cpufreq_dbs_governor_init, \
.exit = cpufreq_dbs_governor_exit, \
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 3937acf7e026..6b423eebfd5d 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -319,7 +319,6 @@ gov_show_one_common(sampling_rate);
gov_show_one_common(up_threshold);
gov_show_one_common(sampling_down_factor);
gov_show_one_common(ignore_nice_load);
-gov_show_one_common(min_sampling_rate);
gov_show_one_common(io_is_busy);
gov_show_one(od, powersave_bias);
@@ -329,10 +328,8 @@ gov_attr_rw(up_threshold);
gov_attr_rw(sampling_down_factor);
gov_attr_rw(ignore_nice_load);
gov_attr_rw(powersave_bias);
-gov_attr_ro(min_sampling_rate);
static struct attribute *od_attributes[] = {
- &min_sampling_rate.attr,
&sampling_rate.attr,
&up_threshold.attr,
&sampling_down_factor.attr,
@@ -373,17 +370,8 @@ static int od_init(struct dbs_data *dbs_data)
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
- /*
- * In nohz/micro accounting case we set the minimum frequency
- * not depending on HZ, but fixed (very low).
- */
- dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else {
dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
-
- /* For correct statistics, we need 10 ticks for each measure */
- dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
- jiffies_to_usecs(10);
}
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
deleted file mode 100644
index 4ee0431579c1..000000000000
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics 2009
- * Copyright (C) ST-Ericsson SA 2010-2012
- *
- * License Terms: GNU General Public License v2
- * Author: Sundar Iyer <sundar.iyer@stericsson.com>
- * Author: Martin Persson <martin.persson@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/cpufreq.h>
-#include <linux/cpu_cooling.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-
-static struct cpufreq_frequency_table *freq_table;
-static struct clk *armss_clk;
-static struct thermal_cooling_device *cdev;
-
-static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int index)
-{
- /* update armss clk frequency */
- return clk_set_rate(armss_clk, freq_table[index].frequency * 1000);
-}
-
-static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
-{
- policy->clk = armss_clk;
- return cpufreq_generic_init(policy, freq_table, 20 * 1000);
-}
-
-static int dbx500_cpufreq_exit(struct cpufreq_policy *policy)
-{
- if (!IS_ERR(cdev))
- cpufreq_cooling_unregister(cdev);
- return 0;
-}
-
-static void dbx500_cpufreq_ready(struct cpufreq_policy *policy)
-{
- cdev = cpufreq_cooling_register(policy);
- if (IS_ERR(cdev))
- pr_err("Failed to register cooling device %ld\n", PTR_ERR(cdev));
- else
- pr_info("Cooling device registered: %s\n", cdev->type);
-}
-
-static struct cpufreq_driver dbx500_cpufreq_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS |
- CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = dbx500_cpufreq_target,
- .get = cpufreq_generic_get,
- .init = dbx500_cpufreq_init,
- .exit = dbx500_cpufreq_exit,
- .ready = dbx500_cpufreq_ready,
- .name = "DBX500",
- .attr = cpufreq_generic_attr,
-};
-
-static int dbx500_cpufreq_probe(struct platform_device *pdev)
-{
- struct cpufreq_frequency_table *pos;
-
- freq_table = dev_get_platdata(&pdev->dev);
- if (!freq_table) {
- pr_err("dbx500-cpufreq: Failed to fetch cpufreq table\n");
- return -ENODEV;
- }
-
- armss_clk = clk_get(&pdev->dev, "armss");
- if (IS_ERR(armss_clk)) {
- pr_err("dbx500-cpufreq: Failed to get armss clk\n");
- return PTR_ERR(armss_clk);
- }
-
- pr_info("dbx500-cpufreq: Available frequencies:\n");
- cpufreq_for_each_entry(pos, freq_table)
- pr_info(" %d Mhz\n", pos->frequency / 1000);
-
- return cpufreq_register_driver(&dbx500_cpufreq_driver);
-}
-
-static struct platform_driver dbx500_cpufreq_plat_driver = {
- .driver = {
- .name = "cpufreq-ux500",
- },
- .probe = dbx500_cpufreq_probe,
-};
-
-static int __init dbx500_cpufreq_register(void)
-{
- return platform_driver_register(&dbx500_cpufreq_plat_driver);
-}
-device_initcall(dbx500_cpufreq_register);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("cpufreq driver for DBX500");
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index bfce11cba1df..45e2ca62515e 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -165,9 +165,6 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
if (pos->frequency > max_freq)
pos->frequency = CPUFREQ_ENTRY_INVALID;
- /* cpuinfo and default policy values */
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-
return cpufreq_table_validate_and_show(policy, elanfreq_table);
}
@@ -196,6 +193,7 @@ __setup("elanfreq=", elanfreq_setup);
static struct cpufreq_driver elanfreq_driver = {
.get = elanfreq_get_cpu_frequency,
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = elanfreq_target,
.init = elanfreq_cpu_init,
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index 3488c9c175eb..8f52a06664e3 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -428,7 +428,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
policy->max = maxfreq;
policy->cpuinfo.min_freq = maxfreq / max_duration;
policy->cpuinfo.max_freq = maxfreq;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
return 0;
}
@@ -438,6 +437,7 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
* MediaGX/Geode GX initialize cpufreq driver
*/
static struct cpufreq_driver gx_suspmod_driver = {
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.get = gx_get_cpuspeed,
.verify = cpufreq_gx_verify,
.target = cpufreq_gx_target,
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index b6edd3ccaa55..14466a9b01c0 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -47,6 +47,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
struct dev_pm_opp *opp;
unsigned long freq_hz, volt, volt_old;
unsigned int old_freq, new_freq;
+ bool pll1_sys_temp_enabled = false;
int ret;
new_freq = freq_table[index].frequency;
@@ -124,6 +125,10 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
clk_set_rate(pll1_sys_clk, new_freq * 1000);
clk_set_parent(pll1_sw_clk, pll1_sys_clk);
+ } else {
+ /* pll1_sys needs to be enabled for divider rate change to work. */
+ pll1_sys_temp_enabled = true;
+ clk_prepare_enable(pll1_sys_clk);
}
}
@@ -135,6 +140,10 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
return ret;
}
+ /* PLL1 is only needed until after ARM-PODF is set. */
+ if (pll1_sys_temp_enabled)
+ clk_disable_unprepare(pll1_sys_clk);
+
/* scaling down? scale voltage after frequency */
if (new_freq < old_freq) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b7fb8b7c980d..93a0e88bef76 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -37,8 +37,7 @@
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
-#define INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
-#define INTEL_PSTATE_HWP_SAMPLING_INTERVAL (50 * NSEC_PER_MSEC)
+#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
#define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
#define INTEL_CPUFREQ_TRANSITION_DELAY 500
@@ -173,28 +172,6 @@ struct vid_data {
};
/**
- * struct _pid - Stores PID data
- * @setpoint: Target set point for busyness or performance
- * @integral: Storage for accumulated error values
- * @p_gain: PID proportional gain
- * @i_gain: PID integral gain
- * @d_gain: PID derivative gain
- * @deadband: PID deadband
- * @last_err: Last error storage for integral part of PID calculation
- *
- * Stores PID coefficients and last error for PID controller.
- */
-struct _pid {
- int setpoint;
- int32_t integral;
- int32_t p_gain;
- int32_t i_gain;
- int32_t d_gain;
- int deadband;
- int32_t last_err;
-};
-
-/**
* struct global_params - Global parameters, mostly tunable via sysfs.
* @no_turbo: Whether or not to use turbo P-states.
* @turbo_disabled: Whethet or not turbo P-states are available at all,
@@ -223,8 +200,10 @@ struct global_params {
* @last_update: Time of the last update.
* @pstate: Stores P state limits for this CPU
* @vid: Stores VID limits for this CPU
- * @pid: Stores PID parameters for this CPU
* @last_sample_time: Last Sample time
+ * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented
+ * This shift is a multiplier to mperf delta to
+ * calculate CPU busy.
* @prev_aperf: Last APERF value read from APERF MSR
* @prev_mperf: Last MPERF value read from MPERF MSR
* @prev_tsc: Last timestamp counter (TSC) value
@@ -255,10 +234,10 @@ struct cpudata {
struct pstate_data pstate;
struct vid_data vid;
- struct _pid pid;
u64 last_update;
u64 last_sample_time;
+ u64 aperf_mperf_shift;
u64 prev_aperf;
u64 prev_mperf;
u64 prev_tsc;
@@ -280,28 +259,6 @@ struct cpudata {
static struct cpudata **all_cpu_data;
/**
- * struct pstate_adjust_policy - Stores static PID configuration data
- * @sample_rate_ms: PID calculation sample rate in ms
- * @sample_rate_ns: Sample rate calculation in ns
- * @deadband: PID deadband
- * @setpoint: PID Setpoint
- * @p_gain_pct: PID proportional gain
- * @i_gain_pct: PID integral gain
- * @d_gain_pct: PID derivative gain
- *
- * Stores per CPU model static PID configuration data.
- */
-struct pstate_adjust_policy {
- int sample_rate_ms;
- s64 sample_rate_ns;
- int deadband;
- int setpoint;
- int p_gain_pct;
- int d_gain_pct;
- int i_gain_pct;
-};
-
-/**
* struct pstate_funcs - Per CPU model specific callbacks
* @get_max: Callback to get maximum non turbo effective P state
* @get_max_physical: Callback to get maximum non turbo physical P state
@@ -310,7 +267,6 @@ struct pstate_adjust_policy {
* @get_scaling: Callback to get frequency scaling factor
* @get_val: Callback to convert P state to actual MSR write value
* @get_vid: Callback to get VID data for Atom platforms
- * @update_util: Active mode utilization update callback.
*
* Core and Atom CPU models have different way to get P State limits. This
* structure is used to store those callbacks.
@@ -321,22 +277,12 @@ struct pstate_funcs {
int (*get_min)(void);
int (*get_turbo)(void);
int (*get_scaling)(void);
+ int (*get_aperf_mperf_shift)(void);
u64 (*get_val)(struct cpudata*, int pstate);
void (*get_vid)(struct cpudata *);
- void (*update_util)(struct update_util_data *data, u64 time,
- unsigned int flags);
};
static struct pstate_funcs pstate_funcs __read_mostly;
-static struct pstate_adjust_policy pid_params __read_mostly = {
- .sample_rate_ms = 10,
- .sample_rate_ns = 10 * NSEC_PER_MSEC,
- .deadband = 0,
- .setpoint = 97,
- .p_gain_pct = 20,
- .d_gain_pct = 0,
- .i_gain_pct = 0,
-};
static int hwp_active __read_mostly;
static bool per_cpu_limits __read_mostly;
@@ -504,56 +450,6 @@ static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
}
#endif
-static signed int pid_calc(struct _pid *pid, int32_t busy)
-{
- signed int result;
- int32_t pterm, dterm, fp_error;
- int32_t integral_limit;
-
- fp_error = pid->setpoint - busy;
-
- if (abs(fp_error) <= pid->deadband)
- return 0;
-
- pterm = mul_fp(pid->p_gain, fp_error);
-
- pid->integral += fp_error;
-
- /*
- * We limit the integral here so that it will never
- * get higher than 30. This prevents it from becoming
- * too large an input over long periods of time and allows
- * it to get factored out sooner.
- *
- * The value of 30 was chosen through experimentation.
- */
- integral_limit = int_tofp(30);
- if (pid->integral > integral_limit)
- pid->integral = integral_limit;
- if (pid->integral < -integral_limit)
- pid->integral = -integral_limit;
-
- dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
- pid->last_err = fp_error;
-
- result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
- result = result + (1 << (FRAC_BITS-1));
- return (signed int)fp_toint(result);
-}
-
-static inline void intel_pstate_pid_reset(struct cpudata *cpu)
-{
- struct _pid *pid = &cpu->pid;
-
- pid->p_gain = percent_fp(pid_params.p_gain_pct);
- pid->d_gain = percent_fp(pid_params.d_gain_pct);
- pid->i_gain = percent_fp(pid_params.i_gain_pct);
- pid->setpoint = int_tofp(pid_params.setpoint);
- pid->last_err = pid->setpoint - int_tofp(100);
- pid->deadband = int_tofp(pid_params.deadband);
- pid->integral = 0;
-}
-
static inline void update_turbo_state(void)
{
u64 misc_en;
@@ -906,82 +802,6 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu);
}
-/************************** debugfs begin ************************/
-static int pid_param_set(void *data, u64 val)
-{
- unsigned int cpu;
-
- *(u32 *)data = val;
- pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
- for_each_possible_cpu(cpu)
- if (all_cpu_data[cpu])
- intel_pstate_pid_reset(all_cpu_data[cpu]);
-
- return 0;
-}
-
-static int pid_param_get(void *data, u64 *val)
-{
- *val = *(u32 *)data;
- return 0;
-}
-DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
-
-static struct dentry *debugfs_parent;
-
-struct pid_param {
- char *name;
- void *value;
- struct dentry *dentry;
-};
-
-static struct pid_param pid_files[] = {
- {"sample_rate_ms", &pid_params.sample_rate_ms, },
- {"d_gain_pct", &pid_params.d_gain_pct, },
- {"i_gain_pct", &pid_params.i_gain_pct, },
- {"deadband", &pid_params.deadband, },
- {"setpoint", &pid_params.setpoint, },
- {"p_gain_pct", &pid_params.p_gain_pct, },
- {NULL, NULL, }
-};
-
-static void intel_pstate_debug_expose_params(void)
-{
- int i;
-
- debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
- if (IS_ERR_OR_NULL(debugfs_parent))
- return;
-
- for (i = 0; pid_files[i].name; i++) {
- struct dentry *dentry;
-
- dentry = debugfs_create_file(pid_files[i].name, 0660,
- debugfs_parent, pid_files[i].value,
- &fops_pid_param);
- if (!IS_ERR(dentry))
- pid_files[i].dentry = dentry;
- }
-}
-
-static void intel_pstate_debug_hide_params(void)
-{
- int i;
-
- if (IS_ERR_OR_NULL(debugfs_parent))
- return;
-
- for (i = 0; pid_files[i].name; i++) {
- debugfs_remove(pid_files[i].dentry);
- pid_files[i].dentry = NULL;
- }
-
- debugfs_remove(debugfs_parent);
- debugfs_parent = NULL;
-}
-
-/************************** debugfs end ************************/
-
/************************** sysfs begin ************************/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
@@ -1486,6 +1306,11 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
return val;
}
+static int knl_get_aperf_mperf_shift(void)
+{
+ return 10;
+}
+
static int knl_get_turbo_pstate(void)
{
u64 value;
@@ -1543,6 +1368,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ if (pstate_funcs.get_aperf_mperf_shift)
+ cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
+
if (pstate_funcs.get_vid)
pstate_funcs.get_vid(cpu);
@@ -1600,8 +1428,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
static inline int32_t get_avg_frequency(struct cpudata *cpu)
{
- return mul_ext_fp(cpu->sample.core_avg_perf,
- cpu->pstate.max_pstate_physical * cpu->pstate.scaling);
+ return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
}
static inline int32_t get_avg_pstate(struct cpudata *cpu)
@@ -1610,13 +1437,14 @@ static inline int32_t get_avg_pstate(struct cpudata *cpu)
cpu->sample.core_avg_perf);
}
-static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
+static inline int32_t get_target_pstate(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
int32_t busy_frac, boost;
int target, avg_pstate;
- busy_frac = div_fp(sample->mperf, sample->tsc);
+ busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
+ sample->tsc);
boost = cpu->iowait_boost;
cpu->iowait_boost >>= 1;
@@ -1647,43 +1475,6 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
return target;
}
-static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
-{
- int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
- u64 duration_ns;
-
- /*
- * perf_scaled is the ratio of the average P-state during the last
- * sampling period to the P-state requested last time (in percent).
- *
- * That measures the system's response to the previous P-state
- * selection.
- */
- max_pstate = cpu->pstate.max_pstate_physical;
- current_pstate = cpu->pstate.current_pstate;
- perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf,
- div_fp(100 * max_pstate, current_pstate));
-
- /*
- * Since our utilization update callback will not run unless we are
- * in C0, check if the actual elapsed time is significantly greater (3x)
- * than our sample interval. If it is, then we were idle for a long
- * enough period of time to adjust our performance metric.
- */
- duration_ns = cpu->sample.time - cpu->last_sample_time;
- if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
- sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
- perf_scaled = mul_fp(perf_scaled, sample_ratio);
- } else {
- sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
- if (sample_ratio < int_tofp(1))
- perf_scaled = 0;
- }
-
- cpu->sample.busy_scaled = perf_scaled;
- return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled);
-}
-
static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
{
int max_pstate = intel_pstate_get_base_pstate(cpu);
@@ -1703,13 +1494,15 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
}
-static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate)
+static void intel_pstate_adjust_pstate(struct cpudata *cpu)
{
int from = cpu->pstate.current_pstate;
struct sample *sample;
+ int target_pstate;
update_turbo_state();
+ target_pstate = get_target_pstate(cpu);
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
intel_pstate_update_pstate(cpu, target_pstate);
@@ -1726,31 +1519,27 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate)
fp_toint(cpu->iowait_boost * 100));
}
-static void intel_pstate_update_util_pid(struct update_util_data *data,
- u64 time, unsigned int flags)
-{
- struct cpudata *cpu = container_of(data, struct cpudata, update_util);
- u64 delta_ns = time - cpu->sample.time;
-
- if ((s64)delta_ns < pid_params.sample_rate_ns)
- return;
-
- if (intel_pstate_sample(cpu, time)) {
- int target_pstate;
-
- target_pstate = get_target_pstate_use_performance(cpu);
- intel_pstate_adjust_pstate(cpu, target_pstate);
- }
-}
-
static void intel_pstate_update_util(struct update_util_data *data, u64 time,
unsigned int flags)
{
struct cpudata *cpu = container_of(data, struct cpudata, update_util);
u64 delta_ns;
+ /* Don't allow remote callbacks */
+ if (smp_processor_id() != cpu->cpu)
+ return;
+
if (flags & SCHED_CPUFREQ_IOWAIT) {
cpu->iowait_boost = int_tofp(1);
+ cpu->last_update = time;
+ /*
+ * The last time the busy was 100% so P-state was max anyway
+ * so avoid overhead of computation.
+ */
+ if (fp_toint(cpu->sample.busy_scaled) == 100)
+ return;
+
+ goto set_pstate;
} else if (cpu->iowait_boost) {
/* Clear iowait_boost if the CPU may have been idle. */
delta_ns = time - cpu->last_update;
@@ -1759,15 +1548,12 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
}
cpu->last_update = time;
delta_ns = time - cpu->sample.time;
- if ((s64)delta_ns < INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL)
+ if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
return;
- if (intel_pstate_sample(cpu, time)) {
- int target_pstate;
-
- target_pstate = get_target_pstate_use_cpu_load(cpu);
- intel_pstate_adjust_pstate(cpu, target_pstate);
- }
+set_pstate:
+ if (intel_pstate_sample(cpu, time))
+ intel_pstate_adjust_pstate(cpu);
}
static struct pstate_funcs core_funcs = {
@@ -1777,7 +1563,6 @@ static struct pstate_funcs core_funcs = {
.get_turbo = core_get_turbo_pstate,
.get_scaling = core_get_scaling,
.get_val = core_get_val,
- .update_util = intel_pstate_update_util_pid,
};
static const struct pstate_funcs silvermont_funcs = {
@@ -1788,7 +1573,6 @@ static const struct pstate_funcs silvermont_funcs = {
.get_val = atom_get_val,
.get_scaling = silvermont_get_scaling,
.get_vid = atom_get_vid,
- .update_util = intel_pstate_update_util,
};
static const struct pstate_funcs airmont_funcs = {
@@ -1799,7 +1583,6 @@ static const struct pstate_funcs airmont_funcs = {
.get_val = atom_get_val,
.get_scaling = airmont_get_scaling,
.get_vid = atom_get_vid,
- .update_util = intel_pstate_update_util,
};
static const struct pstate_funcs knl_funcs = {
@@ -1807,9 +1590,9 @@ static const struct pstate_funcs knl_funcs = {
.get_max_physical = core_get_max_pstate_physical,
.get_min = core_get_min_pstate,
.get_turbo = knl_get_turbo_pstate,
+ .get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
.get_scaling = core_get_scaling,
.get_val = core_get_val,
- .update_util = intel_pstate_update_util_pid,
};
static const struct pstate_funcs bxt_funcs = {
@@ -1819,7 +1602,6 @@ static const struct pstate_funcs bxt_funcs = {
.get_turbo = core_get_turbo_pstate,
.get_scaling = core_get_scaling,
.get_val = core_get_val,
- .update_util = intel_pstate_update_util,
};
#define ICPU(model, policy) \
@@ -1863,8 +1645,6 @@ static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
{}
};
-static bool pid_in_use(void);
-
static int intel_pstate_init_cpu(unsigned int cpunum)
{
struct cpudata *cpu;
@@ -1895,8 +1675,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
intel_pstate_disable_ee(cpunum);
intel_pstate_hwp_enable(cpu);
- } else if (pid_in_use()) {
- intel_pstate_pid_reset(cpu);
}
intel_pstate_get_cpu_pstates(cpu);
@@ -1906,13 +1684,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
return 0;
}
-static unsigned int intel_pstate_get(unsigned int cpu_num)
-{
- struct cpudata *cpu = all_cpu_data[cpu_num];
-
- return cpu ? get_avg_frequency(cpu) : 0;
-}
-
static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
{
struct cpudata *cpu = all_cpu_data[cpu_num];
@@ -1926,7 +1697,7 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
/* Prevent intel_pstate_update_util() from using stale data. */
cpu->sample.time = 0;
cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
- pstate_funcs.update_util);
+ intel_pstate_update_util);
cpu->update_util_set = true;
}
@@ -2124,7 +1895,6 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
intel_pstate_init_acpi_perf_limits(policy);
- cpumask_set_cpu(policy->cpu, policy->cpus);
policy->fast_switch_possible = true;
@@ -2138,7 +1908,6 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
return ret;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
else
@@ -2153,7 +1922,6 @@ static struct cpufreq_driver intel_pstate = {
.setpolicy = intel_pstate_set_policy,
.suspend = intel_pstate_hwp_save_state,
.resume = intel_pstate_resume,
- .get = intel_pstate_get,
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,
.stop_cpu = intel_pstate_stop_cpu,
@@ -2254,12 +2022,6 @@ static struct cpufreq_driver intel_cpufreq = {
static struct cpufreq_driver *default_driver = &intel_pstate;
-static bool pid_in_use(void)
-{
- return intel_pstate_driver == &intel_pstate &&
- pstate_funcs.update_util == intel_pstate_update_util_pid;
-}
-
static void intel_pstate_driver_cleanup(void)
{
unsigned int cpu;
@@ -2294,9 +2056,6 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
global.min_perf_pct = min_perf_pct_min();
- if (pid_in_use())
- intel_pstate_debug_expose_params();
-
return 0;
}
@@ -2305,9 +2064,6 @@ static int intel_pstate_unregister_driver(void)
if (hwp_active)
return -EBUSY;
- if (pid_in_use())
- intel_pstate_debug_hide_params();
-
cpufreq_unregister_driver(intel_pstate_driver);
intel_pstate_driver_cleanup();
@@ -2375,24 +2131,6 @@ static int __init intel_pstate_msrs_not_valid(void)
return 0;
}
-#ifdef CONFIG_ACPI
-static void intel_pstate_use_acpi_profile(void)
-{
- switch (acpi_gbl_FADT.preferred_profile) {
- case PM_MOBILE:
- case PM_TABLET:
- case PM_APPLIANCE_PC:
- case PM_DESKTOP:
- case PM_WORKSTATION:
- pstate_funcs.update_util = intel_pstate_update_util;
- }
-}
-#else
-static void intel_pstate_use_acpi_profile(void)
-{
-}
-#endif
-
static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
{
pstate_funcs.get_max = funcs->get_max;
@@ -2402,9 +2140,7 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
pstate_funcs.get_scaling = funcs->get_scaling;
pstate_funcs.get_val = funcs->get_val;
pstate_funcs.get_vid = funcs->get_vid;
- pstate_funcs.update_util = funcs->update_util;
-
- intel_pstate_use_acpi_profile();
+ pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
}
#ifdef CONFIG_ACPI
@@ -2458,39 +2194,31 @@ enum {
PPC,
};
-struct hw_vendor_info {
- u16 valid;
- char oem_id[ACPI_OEM_ID_SIZE];
- char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
- int oem_pwr_table;
-};
-
/* Hardware vendor-specific info that has its own power management modes */
-static struct hw_vendor_info vendor_info[] __initdata = {
- {1, "HP ", "ProLiant", PSS},
- {1, "ORACLE", "X4-2 ", PPC},
- {1, "ORACLE", "X4-2L ", PPC},
- {1, "ORACLE", "X4-2B ", PPC},
- {1, "ORACLE", "X3-2 ", PPC},
- {1, "ORACLE", "X3-2L ", PPC},
- {1, "ORACLE", "X3-2B ", PPC},
- {1, "ORACLE", "X4470M2 ", PPC},
- {1, "ORACLE", "X4270M3 ", PPC},
- {1, "ORACLE", "X4270M2 ", PPC},
- {1, "ORACLE", "X4170M2 ", PPC},
- {1, "ORACLE", "X4170 M3", PPC},
- {1, "ORACLE", "X4275 M3", PPC},
- {1, "ORACLE", "X6-2 ", PPC},
- {1, "ORACLE", "Sudbury ", PPC},
- {0, "", ""},
+static struct acpi_platform_list plat_info[] __initdata = {
+ {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, 0, PSS},
+ {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, 0, PPC},
+ { } /* End */
};
static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
{
- struct acpi_table_header hdr;
- struct hw_vendor_info *v_info;
const struct x86_cpu_id *id;
u64 misc_pwr;
+ int idx;
id = x86_match_cpu(intel_pstate_cpu_oob_ids);
if (id) {
@@ -2499,21 +2227,15 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
return true;
}
- if (acpi_disabled ||
- ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
+ idx = acpi_match_platform_list(plat_info);
+ if (idx < 0)
return false;
- for (v_info = vendor_info; v_info->valid; v_info++) {
- if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
- !strncmp(hdr.oem_table_id, v_info->oem_table_id,
- ACPI_OEM_TABLE_ID_SIZE))
- switch (v_info->oem_pwr_table) {
- case PSS:
- return intel_pstate_no_acpi_pss();
- case PPC:
- return intel_pstate_has_acpi_ppc() &&
- (!force_load);
- }
+ switch (plat_info[idx].data) {
+ case PSS:
+ return intel_pstate_no_acpi_pss();
+ case PPC:
+ return intel_pstate_has_acpi_ppc() && !force_load;
}
return false;
@@ -2548,9 +2270,7 @@ static int __init intel_pstate_init(void)
if (x86_match_cpu(hwp_support_ids)) {
copy_cpu_funcs(&core_funcs);
- if (no_hwp) {
- pstate_funcs.update_util = intel_pstate_update_util;
- } else {
+ if (!no_hwp) {
hwp_active++;
intel_pstate.attr = hwp_cpufreq_attrs;
goto hwp_cpu_matched;
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c
index 074971b12635..542aa9adba1a 100644
--- a/drivers/cpufreq/longrun.c
+++ b/drivers/cpufreq/longrun.c
@@ -270,7 +270,6 @@ static int longrun_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = longrun_low_freq;
policy->cpuinfo.max_freq = longrun_high_freq;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
longrun_get_policy(policy);
return 0;
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index 9ac27b22476c..da344696beed 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -114,7 +114,7 @@ static struct cpufreq_driver loongson2_cpufreq_driver = {
.attr = cpufreq_generic_attr,
};
-static struct platform_device_id platform_device_ids[] = {
+static const struct platform_device_id platform_device_ids[] = {
{
.name = "loongson2_cpufreq",
},
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index f9f00fb4bc3a..18c4bd9a5c65 100644
--- a/drivers/cpufreq/mt8173-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -507,7 +507,7 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
return 0;
}
-static struct cpufreq_driver mt8173_cpufreq_driver = {
+static struct cpufreq_driver mtk_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
.verify = cpufreq_generic_frequency_table_verify,
@@ -520,7 +520,7 @@ static struct cpufreq_driver mt8173_cpufreq_driver = {
.attr = cpufreq_generic_attr,
};
-static int mt8173_cpufreq_probe(struct platform_device *pdev)
+static int mtk_cpufreq_probe(struct platform_device *pdev)
{
struct mtk_cpu_dvfs_info *info, *tmp;
int cpu, ret;
@@ -547,7 +547,7 @@ static int mt8173_cpufreq_probe(struct platform_device *pdev)
list_add(&info->list_head, &dvfs_info_list);
}
- ret = cpufreq_register_driver(&mt8173_cpufreq_driver);
+ ret = cpufreq_register_driver(&mtk_cpufreq_driver);
if (ret) {
dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n");
goto release_dvfs_info_list;
@@ -564,15 +564,18 @@ release_dvfs_info_list:
return ret;
}
-static struct platform_driver mt8173_cpufreq_platdrv = {
+static struct platform_driver mtk_cpufreq_platdrv = {
.driver = {
- .name = "mt8173-cpufreq",
+ .name = "mtk-cpufreq",
},
- .probe = mt8173_cpufreq_probe,
+ .probe = mtk_cpufreq_probe,
};
/* List of machines supported by this driver */
-static const struct of_device_id mt8173_cpufreq_machines[] __initconst = {
+static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
+ { .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt7622", },
+ { .compatible = "mediatek,mt7623", },
{ .compatible = "mediatek,mt817x", },
{ .compatible = "mediatek,mt8173", },
{ .compatible = "mediatek,mt8176", },
@@ -580,7 +583,7 @@ static const struct of_device_id mt8173_cpufreq_machines[] __initconst = {
{ }
};
-static int __init mt8173_cpufreq_driver_init(void)
+static int __init mtk_cpufreq_driver_init(void)
{
struct device_node *np;
const struct of_device_id *match;
@@ -591,14 +594,14 @@ static int __init mt8173_cpufreq_driver_init(void)
if (!np)
return -ENODEV;
- match = of_match_node(mt8173_cpufreq_machines, np);
+ match = of_match_node(mtk_cpufreq_machines, np);
of_node_put(np);
if (!match) {
- pr_warn("Machine is not compatible with mt8173-cpufreq\n");
+ pr_warn("Machine is not compatible with mtk-cpufreq\n");
return -ENODEV;
}
- err = platform_driver_register(&mt8173_cpufreq_platdrv);
+ err = platform_driver_register(&mtk_cpufreq_platdrv);
if (err)
return err;
@@ -608,7 +611,7 @@ static int __init mt8173_cpufreq_driver_init(void)
* and the device registration codes are put here to handle defer
* probing.
*/
- pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0);
+ pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0);
if (IS_ERR(pdev)) {
pr_err("failed to register mtk-cpufreq platform device\n");
return PTR_ERR(pdev);
@@ -616,4 +619,4 @@ static int __init mt8173_cpufreq_driver_init(void)
return 0;
}
-device_initcall(mt8173_cpufreq_driver_init);
+device_initcall(mtk_cpufreq_driver_init);
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index ff44016ea031..61ae06ca008e 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -442,7 +442,8 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
.init = pmac_cpufreq_cpu_init,
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
- .flags = CPUFREQ_PM_NO_WARN,
+ .flags = CPUFREQ_PM_NO_WARN |
+ CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.attr = cpufreq_generic_attr,
.name = "powermac",
};
@@ -626,14 +627,16 @@ static int __init pmac_cpufreq_setup(void)
if (!value)
goto out;
cur_freq = (*value) / 1000;
- transition_latency = CPUFREQ_ETERNAL;
/* Check for 7447A based MacRISC3 */
if (of_machine_is_compatible("MacRISC3") &&
of_get_property(cpunode, "dynamic-power-step", NULL) &&
PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
pmac_cpufreq_init_7447A(cpunode);
+
+ /* Allow dynamic switching */
transition_latency = 8000000;
+ pmac_cpufreq_driver.flags &= ~CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING;
/* Check for other MacRISC3 machines */
} else if (of_machine_is_compatible("PowerBook3,4") ||
of_machine_is_compatible("PowerBook3,5") ||
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 267e0894c62d..be623dd7b9f2 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -516,7 +516,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
goto bail;
}
- DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name);
+ DBG("cpufreq: i2c clock chip found: %pOF\n", hwclock);
/* Now get all the platform functions */
pfunc_cpu_getfreq =
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index f82074eea779..5d31c2db12a3 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -602,6 +602,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
}
clk_base = of_iomap(np, 0);
+ of_node_put(np);
if (!clk_base) {
pr_err("%s: failed to map clock registers\n", __func__);
return -EFAULT;
@@ -612,6 +613,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
if (id < 0 || id >= ARRAY_SIZE(dmc_base)) {
pr_err("%s: failed to get alias of dmc node '%s'\n",
__func__, np->name);
+ of_node_put(np);
return id;
}
@@ -619,6 +621,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
if (!dmc_base[id]) {
pr_err("%s: failed to map dmc%d registers\n",
__func__, id);
+ of_node_put(np);
return -EFAULT;
}
}
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
index 728eab77e8e0..e2d8a77c36d5 100644
--- a/drivers/cpufreq/sa1100-cpufreq.c
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -197,11 +197,12 @@ static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr)
static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
{
- return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
+ return cpufreq_generic_init(policy, sa11x0_freq_table, 0);
}
static struct cpufreq_driver sa1100_driver __refdata = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sa1100_target,
.get = sa11x0_getspeed,
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 2bac9b6cfeea..66e5fb088ecc 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -306,13 +306,14 @@ static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr)
static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
{
- return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
+ return cpufreq_generic_init(policy, sa11x0_freq_table, 0);
}
/* sa1110_driver needs __refdata because it must remain after init registers
* it with cpufreq_register_driver() */
static struct cpufreq_driver sa1110_driver __refdata = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = sa1110_target,
.get = sa11x0_getspeed,
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 719c3d9f07fb..28893d435cf5 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -137,8 +137,6 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
(clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
}
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-
dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, "
"Maximum %u.%03u MHz.\n",
policy->min / 1000, policy->min % 1000,
@@ -159,6 +157,7 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
static struct cpufreq_driver sh_cpufreq_driver = {
.name = "sh",
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.get = sh_cpufreq_get,
.target = sh_cpufreq_target,
.verify = sh_cpufreq_verify,
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index b86953a3ddc4..0412a246a785 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -207,7 +207,7 @@ static unsigned int speedstep_detect_chipset(void)
* 8100 which use a pretty old revision of the 82815
* host bridge. Abort on these systems.
*/
- static struct pci_dev *hostbridge;
+ struct pci_dev *hostbridge;
hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82815_MC,
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 1b8062182c81..ccab452a4ef5 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -35,7 +35,7 @@ static int relaxed_check;
static unsigned int pentium3_get_frequency(enum speedstep_processor processor)
{
/* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */
- struct {
+ static const struct {
unsigned int ratio; /* Frequency Multiplier (x10) */
u8 bitmap; /* power on configuration bits
[27, 25:22] (in MSR 0x2a) */
@@ -58,7 +58,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor)
};
/* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */
- struct {
+ static const struct {
unsigned int value; /* Front Side Bus speed in MHz */
u8 bitmap; /* power on configuration bits [18: 19]
(in MSR 0x2a) */
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 37b30071c220..d23f24ccff38 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -266,7 +266,6 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
pr_debug("workaround worked.\n");
}
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
return cpufreq_table_validate_and_show(policy, speedstep_freqs);
}
@@ -290,6 +289,7 @@ static int speedstep_resume(struct cpufreq_policy *policy)
static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-smi",
+ .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = speedstep_target,
.init = speedstep_cpu_init,
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index d2d0430d09d4..47105735df12 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -65,8 +65,8 @@ static int sti_cpufreq_fetch_major(void) {
ret = of_property_read_u32_index(np, "st,syscfg",
MAJOR_ID_INDEX, &major_offset);
if (ret) {
- dev_err(dev, "No major number offset provided in %s [%d]\n",
- np->full_name, ret);
+ dev_err(dev, "No major number offset provided in %pOF [%d]\n",
+ np, ret);
return ret;
}
@@ -92,8 +92,8 @@ static int sti_cpufreq_fetch_minor(void)
MINOR_ID_INDEX, &minor_offset);
if (ret) {
dev_err(dev,
- "No minor number offset provided %s [%d]\n",
- np->full_name, ret);
+ "No minor number offset provided %pOF [%d]\n",
+ np, ret);
return ret;
}
diff --git a/drivers/cpufreq/tango-cpufreq.c b/drivers/cpufreq/tango-cpufreq.c
new file mode 100644
index 000000000000..89a7f860bfe8
--- /dev/null
+++ b/drivers/cpufreq/tango-cpufreq.c
@@ -0,0 +1,38 @@
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/clk.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+
+static const struct of_device_id machines[] __initconst = {
+ { .compatible = "sigma,tango4" },
+ { /* sentinel */ }
+};
+
+static int __init tango_cpufreq_init(void)
+{
+ struct device *cpu_dev = get_cpu_device(0);
+ unsigned long max_freq;
+ struct clk *cpu_clk;
+ void *res;
+
+ if (!of_match_node(machines, of_root))
+ return -ENODEV;
+
+ cpu_clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(cpu_clk))
+ return -ENODEV;
+
+ max_freq = clk_get_rate(cpu_clk);
+
+ dev_pm_opp_add(cpu_dev, max_freq / 1, 0);
+ dev_pm_opp_add(cpu_dev, max_freq / 2, 0);
+ dev_pm_opp_add(cpu_dev, max_freq / 3, 0);
+ dev_pm_opp_add(cpu_dev, max_freq / 5, 0);
+ dev_pm_opp_add(cpu_dev, max_freq / 9, 0);
+
+ res = platform_device_register_data(NULL, "cpufreq-dt", -1, NULL, 0);
+
+ return PTR_ERR_OR_ZERO(res);
+}
+device_initcall(tango_cpufreq_init);
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index a7b5658c0460..b29cd3398463 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -245,8 +245,6 @@ static int ti_cpufreq_init(void)
if (ret)
goto fail_put_node;
- of_node_put(opp_data->opp_node);
-
ret = PTR_ERR_OR_ZERO(dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
version, VERSION_COUNT));
if (ret) {
@@ -255,6 +253,8 @@ static int ti_cpufreq_init(void)
goto fail_put_node;
}
+ of_node_put(opp_data->opp_node);
+
register_cpufreq_dt:
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index 6f9dfa80563a..db62d9844751 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -58,13 +58,12 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
policy->min = policy->cpuinfo.min_freq = 250000;
policy->max = policy->cpuinfo.max_freq = 1000000;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->clk = clk_get(NULL, "MAIN_CLK");
return PTR_ERR_OR_ZERO(policy->clk);
}
static struct cpufreq_driver ucv2_driver = {
- .flags = CPUFREQ_STICKY,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
.verify = ucv2_verify_speed,
.target = ucv2_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 3ba81b1dffad..0b67a05a7aae 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -5,6 +5,7 @@
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o
+obj-$(CONFIG_ARCH_HAS_CPU_RELAX) += poll_state.o
##################################################################################
# ARM SoC drivers
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 71e586d7df71..147f38ea0fcd 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -119,13 +119,13 @@ struct cpuidle_coupled {
#define CPUIDLE_COUPLED_NOT_IDLE (-1)
-static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
+static DEFINE_PER_CPU(call_single_data_t, cpuidle_coupled_poke_cb);
/*
* The cpuidle_coupled_poke_pending mask is used to avoid calling
- * __smp_call_function_single with the per cpu call_single_data struct already
+ * __smp_call_function_single with the per cpu call_single_data_t struct already
* in use. This prevents a deadlock where two cpus are waiting for each others
- * call_single_data struct to be available
+ * call_single_data_t struct to be available
*/
static cpumask_t cpuidle_coupled_poke_pending;
@@ -339,7 +339,7 @@ static void cpuidle_coupled_handle_poke(void *info)
*/
static void cpuidle_coupled_poke(int cpu)
{
- struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
+ call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
smp_call_function_single_async(cpu, csd);
@@ -651,7 +651,7 @@ int cpuidle_coupled_register_device(struct cpuidle_device *dev)
{
int cpu;
struct cpuidle_device *other_dev;
- struct call_single_data *csd;
+ call_single_data_t *csd;
struct cpuidle_coupled *coupled;
if (cpumask_empty(&dev->coupled_cpus))
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 37b0698b7193..42896a67aeae 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -235,6 +235,7 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len,
return -1;
}
+extern u32 pnv_get_supported_cpuidle_states(void);
static int powernv_add_idle_states(void)
{
struct device_node *power_mgt;
@@ -248,6 +249,8 @@ static int powernv_add_idle_states(void)
const char *names[CPUIDLE_STATE_MAX];
u32 has_stop_states = 0;
int i, rc;
+ u32 supported_flags = pnv_get_supported_cpuidle_states();
+
/* Currently we have snooze statically defined */
@@ -362,6 +365,13 @@ static int powernv_add_idle_states(void)
for (i = 0; i < dt_idle_states; i++) {
unsigned int exit_latency, target_residency;
bool stops_timebase = false;
+
+ /*
+ * Skip the platform idle state whose flag isn't in
+ * the supported_cpuidle_states flag mask.
+ */
+ if ((flags[i] & supported_flags) != flags[i])
+ continue;
/*
* If an idle state has exit latency beyond
* POWERNV_THRESHOLD_LATENCY_NS then don't use it
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 60bb64f4329d..484cc8909d5c 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -77,7 +77,7 @@ static int find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
unsigned int max_latency,
unsigned int forbidden_flags,
- bool freeze)
+ bool s2idle)
{
unsigned int latency_req = 0;
int i, ret = 0;
@@ -89,7 +89,7 @@ static int find_deepest_state(struct cpuidle_driver *drv,
if (s->disabled || su->disable || s->exit_latency <= latency_req
|| s->exit_latency > max_latency
|| (s->flags & forbidden_flags)
- || (freeze && !s->enter_freeze))
+ || (s2idle && !s->enter_s2idle))
continue;
latency_req = s->exit_latency;
@@ -128,7 +128,7 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
}
#ifdef CONFIG_SUSPEND
-static void enter_freeze_proper(struct cpuidle_driver *drv,
+static void enter_s2idle_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{
/*
@@ -143,7 +143,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
* suspended is generally unsafe.
*/
stop_critical_timings();
- drv->states[index].enter_freeze(dev, drv, index);
+ drv->states[index].enter_s2idle(dev, drv, index);
WARN_ON(!irqs_disabled());
/*
* timekeeping_resume() that will be called by tick_unfreeze() for the
@@ -155,25 +155,25 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
}
/**
- * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle.
+ * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle.
* @drv: cpuidle driver for the given CPU.
* @dev: cpuidle device for the given CPU.
*
- * If there are states with the ->enter_freeze callback, find the deepest of
+ * If there are states with the ->enter_s2idle callback, find the deepest of
* them and enter it with frozen tick.
*/
-int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
int index;
/*
- * Find the deepest state with ->enter_freeze present, which guarantees
+ * Find the deepest state with ->enter_s2idle present, which guarantees
* that interrupts won't be enabled when it exits and allows the tick to
* be frozen safely.
*/
index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
if (index > 0)
- enter_freeze_proper(drv, dev, index);
+ enter_s2idle_proper(drv, dev, index);
return index;
}
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index e53fb861beb0..dc32f34e68d9 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -179,36 +179,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
}
}
-#ifdef CONFIG_ARCH_HAS_CPU_RELAX
-static int __cpuidle poll_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
-{
- local_irq_enable();
- if (!current_set_polling_and_test()) {
- while (!need_resched())
- cpu_relax();
- }
- current_clr_polling();
-
- return index;
-}
-
-static void poll_idle_init(struct cpuidle_driver *drv)
-{
- struct cpuidle_state *state = &drv->states[0];
-
- snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
- snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
- state->exit_latency = 0;
- state->target_residency = 0;
- state->power_usage = -1;
- state->enter = poll_idle;
- state->disabled = false;
-}
-#else
-static void poll_idle_init(struct cpuidle_driver *drv) {}
-#endif /* !CONFIG_ARCH_HAS_CPU_RELAX */
-
/**
* __cpuidle_register_driver: register the driver
* @drv: a valid pointer to a struct cpuidle_driver
@@ -246,8 +216,6 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer,
(void *)1, 1);
- poll_idle_init(drv);
-
return 0;
}
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index ae8eb0359889..53342b7f1010 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -41,9 +41,9 @@ static int init_state_node(struct cpuidle_state *idle_state,
/*
* Since this is not a "coupled" state, it's safe to assume interrupts
* won't be enabled when it exits allowing the tick to be frozen
- * safely. So enter() can be also enter_freeze() callback.
+ * safely. So enter() can be also enter_s2idle() callback.
*/
- idle_state->enter_freeze = match_id->data;
+ idle_state->enter_s2idle = match_id->data;
err = of_property_read_u32(state_node, "wakeup-latency-us",
&idle_state->exit_latency);
@@ -53,16 +53,16 @@ static int init_state_node(struct cpuidle_state *idle_state,
err = of_property_read_u32(state_node, "entry-latency-us",
&entry_latency);
if (err) {
- pr_debug(" * %s missing entry-latency-us property\n",
- state_node->full_name);
+ pr_debug(" * %pOF missing entry-latency-us property\n",
+ state_node);
return -EINVAL;
}
err = of_property_read_u32(state_node, "exit-latency-us",
&exit_latency);
if (err) {
- pr_debug(" * %s missing exit-latency-us property\n",
- state_node->full_name);
+ pr_debug(" * %pOF missing exit-latency-us property\n",
+ state_node);
return -EINVAL;
}
/*
@@ -75,8 +75,8 @@ static int init_state_node(struct cpuidle_state *idle_state,
err = of_property_read_u32(state_node, "min-residency-us",
&idle_state->target_residency);
if (err) {
- pr_debug(" * %s missing min-residency-us property\n",
- state_node->full_name);
+ pr_debug(" * %pOF missing min-residency-us property\n",
+ state_node);
return -EINVAL;
}
@@ -186,8 +186,8 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
}
if (!idle_state_valid(state_node, i, cpumask)) {
- pr_warn("%s idle state not valid, bailing out\n",
- state_node->full_name);
+ pr_warn("%pOF idle state not valid, bailing out\n",
+ state_node);
err = -EINVAL;
break;
}
@@ -200,8 +200,8 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
idle_state = &drv->states[state_idx++];
err = init_state_node(idle_state, matches, state_node);
if (err) {
- pr_err("Parsing idle state node %s failed with err %d\n",
- state_node->full_name, err);
+ pr_err("Parsing idle state node %pOF failed with err %d\n",
+ state_node, err);
err = -EINVAL;
break;
}
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index ac321f09e717..ce1a2ffffb2a 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -69,6 +69,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
struct ladder_device_state *last_state;
int last_residency, last_idx = ldev->last_state_idx;
+ int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
/* Special case when user has set very strict latency requirement */
@@ -96,13 +97,13 @@ static int ladder_select_state(struct cpuidle_driver *drv,
}
/* consider demotion */
- if (last_idx > CPUIDLE_DRIVER_STATE_START &&
+ if (last_idx > first_idx &&
(drv->states[last_idx].disabled ||
dev->states_usage[last_idx].disable ||
drv->states[last_idx].exit_latency > latency_req)) {
int i;
- for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
+ for (i = last_idx - 1; i > first_idx; i--) {
if (drv->states[i].exit_latency <= latency_req)
break;
}
@@ -110,7 +111,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
return i;
}
- if (last_idx > CPUIDLE_DRIVER_STATE_START &&
+ if (last_idx > first_idx &&
last_residency < last_state->threshold.demotion_time) {
last_state->stats.demotion_count++;
last_state->stats.promotion_count = 0;
@@ -133,13 +134,14 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
int i;
+ int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
struct ladder_device_state *lstate;
struct cpuidle_state *state;
- ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
+ ldev->last_state_idx = first_idx;
- for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
+ for (i = first_idx; i < drv->state_count; i++) {
state = &drv->states[i];
lstate = &ldev->states[i];
@@ -151,7 +153,7 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
if (i < drv->state_count - 1)
lstate->threshold.promotion_time = state->exit_latency;
- if (i > CPUIDLE_DRIVER_STATE_START)
+ if (i > first_idx)
lstate->threshold.demotion_time = state->exit_latency;
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 61b64c2b2cb8..48eaf2879228 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -324,8 +324,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
expected_interval = get_typical_interval(data);
expected_interval = min(expected_interval, data->next_timer_us);
- if (CPUIDLE_DRIVER_STATE_START > 0) {
- struct cpuidle_state *s = &drv->states[CPUIDLE_DRIVER_STATE_START];
+ first_idx = 0;
+ if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) {
+ struct cpuidle_state *s = &drv->states[1];
unsigned int polling_threshold;
/*
@@ -336,12 +337,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
polling_threshold = max_t(unsigned int, 20, s->target_residency);
if (data->next_timer_us > polling_threshold &&
latency_req > s->exit_latency && !s->disabled &&
- !dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable)
- first_idx = CPUIDLE_DRIVER_STATE_START;
- else
- first_idx = CPUIDLE_DRIVER_STATE_START - 1;
- } else {
- first_idx = 0;
+ !dev->states_usage[1].disable)
+ first_idx = 1;
}
/*
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
new file mode 100644
index 000000000000..7416b16287de
--- /dev/null
+++ b/drivers/cpuidle/poll_state.c
@@ -0,0 +1,37 @@
+/*
+ * poll_state.c - Polling idle state
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/cpuidle.h>
+#include <linux/sched.h>
+#include <linux/sched/idle.h>
+
+static int __cpuidle poll_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ local_irq_enable();
+ if (!current_set_polling_and_test()) {
+ while (!need_resched())
+ cpu_relax();
+ }
+ current_clr_polling();
+
+ return index;
+}
+
+void cpuidle_poll_state_init(struct cpuidle_driver *drv)
+{
+ struct cpuidle_state *state = &drv->states[0];
+
+ snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
+ snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
+ state->exit_latency = 0;
+ state->target_residency = 0;
+ state->power_usage = -1;
+ state->enter = poll_idle;
+ state->disabled = false;
+ state->flags = CPUIDLE_FLAG_POLLING;
+}
+EXPORT_SYMBOL_GPL(cpuidle_poll_state_init);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 193204dfbf3a..fe33c199fc1a 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -525,12 +525,26 @@ config CRYPTO_DEV_ATMEL_SHA
To compile this driver as a module, choose M here: the module
will be called atmel-sha.
+config CRYPTO_DEV_ATMEL_ECC
+ tristate "Support for Microchip / Atmel ECC hw accelerator"
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on I2C
+ select CRYPTO_ECDH
+ select CRC16
+ help
+ Microhip / Atmel ECC hw accelerator.
+ Select this if you want to use the Microchip / Atmel module for
+ ECDH algorithm.
+
+ To compile this driver as a module, choose M here: the module
+ will be called atmel-ecc.
+
config CRYPTO_DEV_CCP
- bool "Support for AMD Cryptographic Coprocessor"
+ bool "Support for AMD Secure Processor"
depends on ((X86 && PCI) || (ARM64 && (OF_ADDRESS || ACPI))) && HAS_IOMEM
help
- The AMD Cryptographic Coprocessor provides hardware offload support
- for encryption, hashing and related operations.
+ The AMD Secure Processor provides support for the Cryptographic Coprocessor
+ (CCP) and the Platform Security Processor (PSP) devices.
if CRYPTO_DEV_CCP
source "drivers/crypto/ccp/Kconfig"
@@ -616,6 +630,14 @@ config CRYPTO_DEV_SUN4I_SS
To compile this driver as a module, choose M here: the module
will be called sun4i-ss.
+config CRYPTO_DEV_SUN4I_SS_PRNG
+ bool "Support for Allwinner Security System PRNG"
+ depends on CRYPTO_DEV_SUN4I_SS
+ select CRYPTO_RNG
+ help
+ Select this option if you want to provide kernel-side support for
+ the Pseudo-Random Number Generator found in the Security System.
+
config CRYPTO_DEV_ROCKCHIP
tristate "Rockchip's Cryptographic Engine driver"
depends on OF && ARCH_ROCKCHIP
@@ -655,7 +677,7 @@ source "drivers/crypto/virtio/Kconfig"
config CRYPTO_DEV_BCM_SPU
tristate "Broadcom symmetric crypto/hash acceleration support"
depends on ARCH_BCM_IPROC
- depends on BCM_PDC_MBOX
+ depends on MAILBOX
default m
select CRYPTO_DES
select CRYPTO_MD5
@@ -686,4 +708,25 @@ config CRYPTO_DEV_SAFEXCEL
chain mode, AES cipher mode and SHA1/SHA224/SHA256/SHA512 hash
algorithms.
+config CRYPTO_DEV_ARTPEC6
+ tristate "Support for Axis ARTPEC-6/7 hardware crypto acceleration."
+ depends on ARM && (ARCH_ARTPEC || COMPILE_TEST)
+ depends on HAS_DMA
+ depends on OF
+ select CRYPTO_AEAD
+ select CRYPTO_AES
+ select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_CTR
+ select CRYPTO_HASH
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA384
+ select CRYPTO_SHA512
+ help
+ Enables the driver for the on-chip crypto accelerator
+ of Axis ARTPEC SoCs.
+
+ To compile this driver as a module, choose M here.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 2c555a3393b2..808432b44c6b 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
+obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o
obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
obj-$(CONFIG_CRYPTO_DEV_CAVIUM_ZIP) += cavium/
obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
@@ -35,7 +36,7 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
-obj-$(CONFIG_CRYPTO_DEV_STM32) += stm32/
+obj-$(CONFIG_ARCH_STM32) += stm32/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
@@ -43,3 +44,4 @@ obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
+obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
new file mode 100644
index 000000000000..e66f18a0ddd0
--- /dev/null
+++ b/drivers/crypto/atmel-ecc.c
@@ -0,0 +1,781 @@
+/*
+ * Microchip / Atmel ECC (I2C) driver.
+ *
+ * Copyright (c) 2017, Microchip Technology Inc.
+ * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitrev.h>
+#include <linux/crc16.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/ecdh.h>
+#include <crypto/kpp.h>
+#include "atmel-ecc.h"
+
+/* Used for binding tfm objects to i2c clients. */
+struct atmel_ecc_driver_data {
+ struct list_head i2c_client_list;
+ spinlock_t i2c_list_lock;
+} ____cacheline_aligned;
+
+static struct atmel_ecc_driver_data driver_data;
+
+/**
+ * atmel_ecc_i2c_client_priv - i2c_client private data
+ * @client : pointer to i2c client device
+ * @i2c_client_list_node: part of i2c_client_list
+ * @lock : lock for sending i2c commands
+ * @wake_token : wake token array of zeros
+ * @wake_token_sz : size in bytes of the wake_token
+ * @tfm_count : number of active crypto transformations on i2c client
+ *
+ * Reads and writes from/to the i2c client are sequential. The first byte
+ * transmitted to the device is treated as the byte size. Any attempt to send
+ * more than this number of bytes will cause the device to not ACK those bytes.
+ * After the host writes a single command byte to the input buffer, reads are
+ * prohibited until after the device completes command execution. Use a mutex
+ * when sending i2c commands.
+ */
+struct atmel_ecc_i2c_client_priv {
+ struct i2c_client *client;
+ struct list_head i2c_client_list_node;
+ struct mutex lock;
+ u8 wake_token[WAKE_TOKEN_MAX_SIZE];
+ size_t wake_token_sz;
+ atomic_t tfm_count ____cacheline_aligned;
+};
+
+/**
+ * atmel_ecdh_ctx - transformation context
+ * @client : pointer to i2c client device
+ * @fallback : used for unsupported curves or when user wants to use its own
+ * private key.
+ * @public_key : generated when calling set_secret(). It's the responsibility
+ * of the user to not call set_secret() while
+ * generate_public_key() or compute_shared_secret() are in flight.
+ * @curve_id : elliptic curve id
+ * @n_sz : size in bytes of the n prime
+ * @do_fallback: true when the device doesn't support the curve or when the user
+ * wants to use its own private key.
+ */
+struct atmel_ecdh_ctx {
+ struct i2c_client *client;
+ struct crypto_kpp *fallback;
+ const u8 *public_key;
+ unsigned int curve_id;
+ size_t n_sz;
+ bool do_fallback;
+};
+
+/**
+ * atmel_ecc_work_data - data structure representing the work
+ * @ctx : transformation context.
+ * @cbk : pointer to a callback function to be invoked upon completion of this
+ * request. This has the form:
+ * callback(struct atmel_ecc_work_data *work_data, void *areq, u8 status)
+ * where:
+ * @work_data: data structure representing the work
+ * @areq : optional pointer to an argument passed with the original
+ * request.
+ * @status : status returned from the i2c client device or i2c error.
+ * @areq: optional pointer to a user argument for use at callback time.
+ * @work: describes the task to be executed.
+ * @cmd : structure used for communicating with the device.
+ */
+struct atmel_ecc_work_data {
+ struct atmel_ecdh_ctx *ctx;
+ void (*cbk)(struct atmel_ecc_work_data *work_data, void *areq,
+ int status);
+ void *areq;
+ struct work_struct work;
+ struct atmel_ecc_cmd cmd;
+};
+
+static u16 atmel_ecc_crc16(u16 crc, const u8 *buffer, size_t len)
+{
+ return cpu_to_le16(bitrev16(crc16(crc, buffer, len)));
+}
+
+/**
+ * atmel_ecc_checksum() - Generate 16-bit CRC as required by ATMEL ECC.
+ * CRC16 verification of the count, opcode, param1, param2 and data bytes.
+ * The checksum is saved in little-endian format in the least significant
+ * two bytes of the command. CRC polynomial is 0x8005 and the initial register
+ * value should be zero.
+ *
+ * @cmd : structure used for communicating with the device.
+ */
+static void atmel_ecc_checksum(struct atmel_ecc_cmd *cmd)
+{
+ u8 *data = &cmd->count;
+ size_t len = cmd->count - CRC_SIZE;
+ u16 *crc16 = (u16 *)(data + len);
+
+ *crc16 = atmel_ecc_crc16(0, data, len);
+}
+
+static void atmel_ecc_init_read_cmd(struct atmel_ecc_cmd *cmd)
+{
+ cmd->word_addr = COMMAND;
+ cmd->opcode = OPCODE_READ;
+ /*
+ * Read the word from Configuration zone that contains the lock bytes
+ * (UserExtra, Selector, LockValue, LockConfig).
+ */
+ cmd->param1 = CONFIG_ZONE;
+ cmd->param2 = DEVICE_LOCK_ADDR;
+ cmd->count = READ_COUNT;
+
+ atmel_ecc_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_READ;
+ cmd->rxsize = READ_RSP_SIZE;
+}
+
+static void atmel_ecc_init_genkey_cmd(struct atmel_ecc_cmd *cmd, u16 keyid)
+{
+ cmd->word_addr = COMMAND;
+ cmd->count = GENKEY_COUNT;
+ cmd->opcode = OPCODE_GENKEY;
+ cmd->param1 = GENKEY_MODE_PRIVATE;
+ /* a random private key will be generated and stored in slot keyID */
+ cmd->param2 = cpu_to_le16(keyid);
+
+ atmel_ecc_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_GENKEY;
+ cmd->rxsize = GENKEY_RSP_SIZE;
+}
+
+static int atmel_ecc_init_ecdh_cmd(struct atmel_ecc_cmd *cmd,
+ struct scatterlist *pubkey)
+{
+ size_t copied;
+
+ cmd->word_addr = COMMAND;
+ cmd->count = ECDH_COUNT;
+ cmd->opcode = OPCODE_ECDH;
+ cmd->param1 = ECDH_PREFIX_MODE;
+ /* private key slot */
+ cmd->param2 = cpu_to_le16(DATA_SLOT_2);
+
+ /*
+ * The device only supports NIST P256 ECC keys. The public key size will
+ * always be the same. Use a macro for the key size to avoid unnecessary
+ * computations.
+ */
+ copied = sg_copy_to_buffer(pubkey, 1, cmd->data, ATMEL_ECC_PUBKEY_SIZE);
+ if (copied != ATMEL_ECC_PUBKEY_SIZE)
+ return -EINVAL;
+
+ atmel_ecc_checksum(cmd);
+
+ cmd->msecs = MAX_EXEC_TIME_ECDH;
+ cmd->rxsize = ECDH_RSP_SIZE;
+
+ return 0;
+}
+
+/*
+ * After wake and after execution of a command, there will be error, status, or
+ * result bytes in the device's output register that can be retrieved by the
+ * system. When the length of that group is four bytes, the codes returned are
+ * detailed in error_list.
+ */
+static int atmel_ecc_status(struct device *dev, u8 *status)
+{
+ size_t err_list_len = ARRAY_SIZE(error_list);
+ int i;
+ u8 err_id = status[1];
+
+ if (*status != STATUS_SIZE)
+ return 0;
+
+ if (err_id == STATUS_WAKE_SUCCESSFUL || err_id == STATUS_NOERR)
+ return 0;
+
+ for (i = 0; i < err_list_len; i++)
+ if (error_list[i].value == err_id)
+ break;
+
+ /* if err_id is not in the error_list then ignore it */
+ if (i != err_list_len) {
+ dev_err(dev, "%02x: %s:\n", err_id, error_list[i].error_text);
+ return err_id;
+ }
+
+ return 0;
+}
+
+static int atmel_ecc_wakeup(struct i2c_client *client)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+ u8 status[STATUS_RSP_SIZE];
+ int ret;
+
+ /*
+ * The device ignores any levels or transitions on the SCL pin when the
+ * device is idle, asleep or during waking up. Don't check for error
+ * when waking up the device.
+ */
+ i2c_master_send(client, i2c_priv->wake_token, i2c_priv->wake_token_sz);
+
+ /*
+ * Wait to wake the device. Typical execution times for ecdh and genkey
+ * are around tens of milliseconds. Delta is chosen to 50 microseconds.
+ */
+ usleep_range(TWHI_MIN, TWHI_MAX);
+
+ ret = i2c_master_recv(client, status, STATUS_SIZE);
+ if (ret < 0)
+ return ret;
+
+ return atmel_ecc_status(&client->dev, status);
+}
+
+static int atmel_ecc_sleep(struct i2c_client *client)
+{
+ u8 sleep = SLEEP_TOKEN;
+
+ return i2c_master_send(client, &sleep, 1);
+}
+
+static void atmel_ecdh_done(struct atmel_ecc_work_data *work_data, void *areq,
+ int status)
+{
+ struct kpp_request *req = areq;
+ struct atmel_ecdh_ctx *ctx = work_data->ctx;
+ struct atmel_ecc_cmd *cmd = &work_data->cmd;
+ size_t copied;
+ size_t n_sz = ctx->n_sz;
+
+ if (status)
+ goto free_work_data;
+
+ /* copy the shared secret */
+ copied = sg_copy_from_buffer(req->dst, 1, &cmd->data[RSP_DATA_IDX],
+ n_sz);
+ if (copied != n_sz)
+ status = -EINVAL;
+
+ /* fall through */
+free_work_data:
+ kzfree(work_data);
+ kpp_request_complete(req, status);
+}
+
+/*
+ * atmel_ecc_send_receive() - send a command to the device and receive its
+ * response.
+ * @client: i2c client device
+ * @cmd : structure used to communicate with the device
+ *
+ * After the device receives a Wake token, a watchdog counter starts within the
+ * device. After the watchdog timer expires, the device enters sleep mode
+ * regardless of whether some I/O transmission or command execution is in
+ * progress. If a command is attempted when insufficient time remains prior to
+ * watchdog timer execution, the device will return the watchdog timeout error
+ * code without attempting to execute the command. There is no way to reset the
+ * counter other than to put the device into sleep or idle mode and then
+ * wake it up again.
+ */
+static int atmel_ecc_send_receive(struct i2c_client *client,
+ struct atmel_ecc_cmd *cmd)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+ int ret;
+
+ mutex_lock(&i2c_priv->lock);
+
+ ret = atmel_ecc_wakeup(client);
+ if (ret)
+ goto err;
+
+ /* send the command */
+ ret = i2c_master_send(client, (u8 *)cmd, cmd->count + WORD_ADDR_SIZE);
+ if (ret < 0)
+ goto err;
+
+ /* delay the appropriate amount of time for command to execute */
+ msleep(cmd->msecs);
+
+ /* receive the response */
+ ret = i2c_master_recv(client, cmd->data, cmd->rxsize);
+ if (ret < 0)
+ goto err;
+
+ /* put the device into low-power mode */
+ ret = atmel_ecc_sleep(client);
+ if (ret < 0)
+ goto err;
+
+ mutex_unlock(&i2c_priv->lock);
+ return atmel_ecc_status(&client->dev, cmd->data);
+err:
+ mutex_unlock(&i2c_priv->lock);
+ return ret;
+}
+
+static void atmel_ecc_work_handler(struct work_struct *work)
+{
+ struct atmel_ecc_work_data *work_data =
+ container_of(work, struct atmel_ecc_work_data, work);
+ struct atmel_ecc_cmd *cmd = &work_data->cmd;
+ struct i2c_client *client = work_data->ctx->client;
+ int status;
+
+ status = atmel_ecc_send_receive(client, cmd);
+ work_data->cbk(work_data, work_data->areq, status);
+}
+
+static void atmel_ecc_enqueue(struct atmel_ecc_work_data *work_data,
+ void (*cbk)(struct atmel_ecc_work_data *work_data,
+ void *areq, int status),
+ void *areq)
+{
+ work_data->cbk = (void *)cbk;
+ work_data->areq = areq;
+
+ INIT_WORK(&work_data->work, atmel_ecc_work_handler);
+ schedule_work(&work_data->work);
+}
+
+static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
+{
+ if (curve_id == ECC_CURVE_NIST_P256)
+ return ATMEL_ECC_NIST_P256_N_SIZE;
+
+ return 0;
+}
+
+/*
+ * A random private key is generated and stored in the device. The device
+ * returns the pair public key.
+ */
+static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct atmel_ecc_cmd *cmd;
+ void *public_key;
+ struct ecdh params;
+ int ret = -ENOMEM;
+
+ /* free the old public key, if any */
+ kfree(ctx->public_key);
+ /* make sure you don't free the old public key twice */
+ ctx->public_key = NULL;
+
+ if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
+ dev_err(&ctx->client->dev, "crypto_ecdh_decode_key failed\n");
+ return -EINVAL;
+ }
+
+ ctx->n_sz = atmel_ecdh_supported_curve(params.curve_id);
+ if (!ctx->n_sz || params.key_size) {
+ /* fallback to ecdh software implementation */
+ ctx->do_fallback = true;
+ return crypto_kpp_set_secret(ctx->fallback, buf, len);
+ }
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ /*
+ * The device only supports NIST P256 ECC keys. The public key size will
+ * always be the same. Use a macro for the key size to avoid unnecessary
+ * computations.
+ */
+ public_key = kmalloc(ATMEL_ECC_PUBKEY_SIZE, GFP_KERNEL);
+ if (!public_key)
+ goto free_cmd;
+
+ ctx->do_fallback = false;
+ ctx->curve_id = params.curve_id;
+
+ atmel_ecc_init_genkey_cmd(cmd, DATA_SLOT_2);
+
+ ret = atmel_ecc_send_receive(ctx->client, cmd);
+ if (ret)
+ goto free_public_key;
+
+ /* save the public key */
+ memcpy(public_key, &cmd->data[RSP_DATA_IDX], ATMEL_ECC_PUBKEY_SIZE);
+ ctx->public_key = public_key;
+
+ kfree(cmd);
+ return 0;
+
+free_public_key:
+ kfree(public_key);
+free_cmd:
+ kfree(cmd);
+ return ret;
+}
+
+static int atmel_ecdh_generate_public_key(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+ size_t copied;
+ int ret = 0;
+
+ if (ctx->do_fallback) {
+ kpp_request_set_tfm(req, ctx->fallback);
+ return crypto_kpp_generate_public_key(req);
+ }
+
+ /* public key was saved at private key generation */
+ copied = sg_copy_from_buffer(req->dst, 1, ctx->public_key,
+ ATMEL_ECC_PUBKEY_SIZE);
+ if (copied != ATMEL_ECC_PUBKEY_SIZE)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int atmel_ecdh_compute_shared_secret(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct atmel_ecc_work_data *work_data;
+ gfp_t gfp;
+ int ret;
+
+ if (ctx->do_fallback) {
+ kpp_request_set_tfm(req, ctx->fallback);
+ return crypto_kpp_compute_shared_secret(req);
+ }
+
+ gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+ work_data = kmalloc(sizeof(*work_data), gfp);
+ if (!work_data)
+ return -ENOMEM;
+
+ work_data->ctx = ctx;
+
+ ret = atmel_ecc_init_ecdh_cmd(&work_data->cmd, req->src);
+ if (ret)
+ goto free_work_data;
+
+ atmel_ecc_enqueue(work_data, atmel_ecdh_done, req);
+
+ return -EINPROGRESS;
+
+free_work_data:
+ kfree(work_data);
+ return ret;
+}
+
+static struct i2c_client *atmel_ecc_i2c_client_alloc(void)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv, *min_i2c_priv = NULL;
+ struct i2c_client *client = ERR_PTR(-ENODEV);
+ int min_tfm_cnt = INT_MAX;
+ int tfm_cnt;
+
+ spin_lock(&driver_data.i2c_list_lock);
+
+ if (list_empty(&driver_data.i2c_client_list)) {
+ spin_unlock(&driver_data.i2c_list_lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ list_for_each_entry(i2c_priv, &driver_data.i2c_client_list,
+ i2c_client_list_node) {
+ tfm_cnt = atomic_read(&i2c_priv->tfm_count);
+ if (tfm_cnt < min_tfm_cnt) {
+ min_tfm_cnt = tfm_cnt;
+ min_i2c_priv = i2c_priv;
+ }
+ if (!min_tfm_cnt)
+ break;
+ }
+
+ if (min_i2c_priv) {
+ atomic_inc(&min_i2c_priv->tfm_count);
+ client = min_i2c_priv->client;
+ }
+
+ spin_unlock(&driver_data.i2c_list_lock);
+
+ return client;
+}
+
+static void atmel_ecc_i2c_client_free(struct i2c_client *client)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+
+ atomic_dec(&i2c_priv->tfm_count);
+}
+
+static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
+{
+ const char *alg = kpp_alg_name(tfm);
+ struct crypto_kpp *fallback;
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ ctx->client = atmel_ecc_i2c_client_alloc();
+ if (IS_ERR(ctx->client)) {
+ pr_err("tfm - i2c_client binding failed\n");
+ return PTR_ERR(ctx->client);
+ }
+
+ fallback = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(fallback)) {
+ dev_err(&ctx->client->dev, "Failed to allocate transformation for '%s': %ld\n",
+ alg, PTR_ERR(fallback));
+ return PTR_ERR(fallback);
+ }
+
+ crypto_kpp_set_flags(fallback, crypto_kpp_get_flags(tfm));
+
+ dev_info(&ctx->client->dev, "Using '%s' as fallback implementation.\n",
+ crypto_tfm_alg_driver_name(crypto_kpp_tfm(fallback)));
+
+ ctx->fallback = fallback;
+
+ return 0;
+}
+
+static void atmel_ecdh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ kfree(ctx->public_key);
+ crypto_free_kpp(ctx->fallback);
+ atmel_ecc_i2c_client_free(ctx->client);
+}
+
+static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm)
+{
+ struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ if (ctx->fallback)
+ return crypto_kpp_maxsize(ctx->fallback);
+
+ /*
+ * The device only supports NIST P256 ECC keys. The public key size will
+ * always be the same. Use a macro for the key size to avoid unnecessary
+ * computations.
+ */
+ return ATMEL_ECC_PUBKEY_SIZE;
+}
+
+static struct kpp_alg atmel_ecdh = {
+ .set_secret = atmel_ecdh_set_secret,
+ .generate_public_key = atmel_ecdh_generate_public_key,
+ .compute_shared_secret = atmel_ecdh_compute_shared_secret,
+ .init = atmel_ecdh_init_tfm,
+ .exit = atmel_ecdh_exit_tfm,
+ .max_size = atmel_ecdh_max_size,
+ .base = {
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .cra_name = "ecdh",
+ .cra_driver_name = "atmel-ecdh",
+ .cra_priority = ATMEL_ECC_PRIORITY,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct atmel_ecdh_ctx),
+ },
+};
+
+static inline size_t atmel_ecc_wake_token_sz(u32 bus_clk_rate)
+{
+ u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC);
+
+ /* return the size of the wake_token in bytes */
+ return DIV_ROUND_UP(no_of_bits, 8);
+}
+
+static int device_sanity_check(struct i2c_client *client)
+{
+ struct atmel_ecc_cmd *cmd;
+ int ret;
+
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ atmel_ecc_init_read_cmd(cmd);
+
+ ret = atmel_ecc_send_receive(client, cmd);
+ if (ret)
+ goto free_cmd;
+
+ /*
+ * It is vital that the Configuration, Data and OTP zones be locked
+ * prior to release into the field of the system containing the device.
+ * Failure to lock these zones may permit modification of any secret
+ * keys and may lead to other security problems.
+ */
+ if (cmd->data[LOCK_CONFIG_IDX] || cmd->data[LOCK_VALUE_IDX]) {
+ dev_err(&client->dev, "Configuration or Data and OTP zones are unlocked!\n");
+ ret = -ENOTSUPP;
+ }
+
+ /* fall through */
+free_cmd:
+ kfree(cmd);
+ return ret;
+}
+
+static int atmel_ecc_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv;
+ struct device *dev = &client->dev;
+ int ret;
+ u32 bus_clk_rate;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "I2C_FUNC_I2C not supported\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(client->adapter->dev.of_node,
+ "clock-frequency", &bus_clk_rate);
+ if (ret) {
+ dev_err(dev, "of: failed to read clock-frequency property\n");
+ return ret;
+ }
+
+ if (bus_clk_rate > 1000000L) {
+ dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n",
+ bus_clk_rate);
+ return -EINVAL;
+ }
+
+ i2c_priv = devm_kmalloc(dev, sizeof(*i2c_priv), GFP_KERNEL);
+ if (!i2c_priv)
+ return -ENOMEM;
+
+ i2c_priv->client = client;
+ mutex_init(&i2c_priv->lock);
+
+ /*
+ * WAKE_TOKEN_MAX_SIZE was calculated for the maximum bus_clk_rate -
+ * 1MHz. The previous bus_clk_rate check ensures us that wake_token_sz
+ * will always be smaller than or equal to WAKE_TOKEN_MAX_SIZE.
+ */
+ i2c_priv->wake_token_sz = atmel_ecc_wake_token_sz(bus_clk_rate);
+
+ memset(i2c_priv->wake_token, 0, sizeof(i2c_priv->wake_token));
+
+ atomic_set(&i2c_priv->tfm_count, 0);
+
+ i2c_set_clientdata(client, i2c_priv);
+
+ ret = device_sanity_check(client);
+ if (ret)
+ return ret;
+
+ spin_lock(&driver_data.i2c_list_lock);
+ list_add_tail(&i2c_priv->i2c_client_list_node,
+ &driver_data.i2c_client_list);
+ spin_unlock(&driver_data.i2c_list_lock);
+
+ ret = crypto_register_kpp(&atmel_ecdh);
+ if (ret) {
+ spin_lock(&driver_data.i2c_list_lock);
+ list_del(&i2c_priv->i2c_client_list_node);
+ spin_unlock(&driver_data.i2c_list_lock);
+
+ dev_err(dev, "%s alg registration failed\n",
+ atmel_ecdh.base.cra_driver_name);
+ } else {
+ dev_info(dev, "atmel ecc algorithms registered in /proc/crypto\n");
+ }
+
+ return ret;
+}
+
+static int atmel_ecc_remove(struct i2c_client *client)
+{
+ struct atmel_ecc_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
+
+ /* Return EBUSY if i2c client already allocated. */
+ if (atomic_read(&i2c_priv->tfm_count)) {
+ dev_err(&client->dev, "Device is busy\n");
+ return -EBUSY;
+ }
+
+ crypto_unregister_kpp(&atmel_ecdh);
+
+ spin_lock(&driver_data.i2c_list_lock);
+ list_del(&i2c_priv->i2c_client_list_node);
+ spin_unlock(&driver_data.i2c_list_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_ecc_dt_ids[] = {
+ {
+ .compatible = "atmel,atecc508a",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, atmel_ecc_dt_ids);
+#endif
+
+static const struct i2c_device_id atmel_ecc_id[] = {
+ { "atecc508a", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, atmel_ecc_id);
+
+static struct i2c_driver atmel_ecc_driver = {
+ .driver = {
+ .name = "atmel-ecc",
+ .of_match_table = of_match_ptr(atmel_ecc_dt_ids),
+ },
+ .probe = atmel_ecc_probe,
+ .remove = atmel_ecc_remove,
+ .id_table = atmel_ecc_id,
+};
+
+static int __init atmel_ecc_init(void)
+{
+ spin_lock_init(&driver_data.i2c_list_lock);
+ INIT_LIST_HEAD(&driver_data.i2c_client_list);
+ return i2c_add_driver(&atmel_ecc_driver);
+}
+
+static void __exit atmel_ecc_exit(void)
+{
+ flush_scheduled_work();
+ i2c_del_driver(&atmel_ecc_driver);
+}
+
+module_init(atmel_ecc_init);
+module_exit(atmel_ecc_exit);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
+MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/atmel-ecc.h b/drivers/crypto/atmel-ecc.h
new file mode 100644
index 000000000000..25232c8abcc2
--- /dev/null
+++ b/drivers/crypto/atmel-ecc.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017, Microchip Technology Inc.
+ * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef __ATMEL_ECC_H__
+#define __ATMEL_ECC_H__
+
+#define ATMEL_ECC_PRIORITY 300
+
+#define COMMAND 0x03 /* packet function */
+#define SLEEP_TOKEN 0x01
+#define WAKE_TOKEN_MAX_SIZE 8
+
+/* Definitions of Data and Command sizes */
+#define WORD_ADDR_SIZE 1
+#define COUNT_SIZE 1
+#define CRC_SIZE 2
+#define CMD_OVERHEAD_SIZE (COUNT_SIZE + CRC_SIZE)
+
+/* size in bytes of the n prime */
+#define ATMEL_ECC_NIST_P256_N_SIZE 32
+#define ATMEL_ECC_PUBKEY_SIZE (2 * ATMEL_ECC_NIST_P256_N_SIZE)
+
+#define STATUS_RSP_SIZE 4
+#define ECDH_RSP_SIZE (32 + CMD_OVERHEAD_SIZE)
+#define GENKEY_RSP_SIZE (ATMEL_ECC_PUBKEY_SIZE + \
+ CMD_OVERHEAD_SIZE)
+#define READ_RSP_SIZE (4 + CMD_OVERHEAD_SIZE)
+#define MAX_RSP_SIZE GENKEY_RSP_SIZE
+
+/**
+ * atmel_ecc_cmd - structure used for communicating with the device.
+ * @word_addr: indicates the function of the packet sent to the device. This
+ * byte should have a value of COMMAND for normal operation.
+ * @count : number of bytes to be transferred to (or from) the device.
+ * @opcode : the command code.
+ * @param1 : the first parameter; always present.
+ * @param2 : the second parameter; always present.
+ * @data : optional remaining input data. Includes a 2-byte CRC.
+ * @rxsize : size of the data received from i2c client.
+ * @msecs : command execution time in milliseconds
+ */
+struct atmel_ecc_cmd {
+ u8 word_addr;
+ u8 count;
+ u8 opcode;
+ u8 param1;
+ u16 param2;
+ u8 data[MAX_RSP_SIZE];
+ u8 msecs;
+ u16 rxsize;
+} __packed;
+
+/* Status/Error codes */
+#define STATUS_SIZE 0x04
+#define STATUS_NOERR 0x00
+#define STATUS_WAKE_SUCCESSFUL 0x11
+
+static const struct {
+ u8 value;
+ const char *error_text;
+} error_list[] = {
+ { 0x01, "CheckMac or Verify miscompare" },
+ { 0x03, "Parse Error" },
+ { 0x05, "ECC Fault" },
+ { 0x0F, "Execution Error" },
+ { 0xEE, "Watchdog about to expire" },
+ { 0xFF, "CRC or other communication error" },
+};
+
+/* Definitions for eeprom organization */
+#define CONFIG_ZONE 0
+
+/* Definitions for Indexes common to all commands */
+#define RSP_DATA_IDX 1 /* buffer index of data in response */
+#define DATA_SLOT_2 2 /* used for ECDH private key */
+
+/* Definitions for the device lock state */
+#define DEVICE_LOCK_ADDR 0x15
+#define LOCK_VALUE_IDX (RSP_DATA_IDX + 2)
+#define LOCK_CONFIG_IDX (RSP_DATA_IDX + 3)
+
+/*
+ * Wake High delay to data communication (microseconds). SDA should be stable
+ * high for this entire duration.
+ */
+#define TWHI_MIN 1500
+#define TWHI_MAX 1550
+
+/* Wake Low duration */
+#define TWLO_USEC 60
+
+/* Command execution time (milliseconds) */
+#define MAX_EXEC_TIME_ECDH 58
+#define MAX_EXEC_TIME_GENKEY 115
+#define MAX_EXEC_TIME_READ 1
+
+/* Command opcode */
+#define OPCODE_ECDH 0x43
+#define OPCODE_GENKEY 0x40
+#define OPCODE_READ 0x02
+
+/* Definitions for the READ Command */
+#define READ_COUNT 7
+
+/* Definitions for the GenKey Command */
+#define GENKEY_COUNT 7
+#define GENKEY_MODE_PRIVATE 0x04
+
+/* Definitions for the ECDH Command */
+#define ECDH_COUNT 71
+#define ECDH_PREFIX_MODE 0x00
+
+#endif /* __ATMEL_ECC_H__ */
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index dad4e5bad827..3e2f41b3eaf3 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -2883,7 +2883,7 @@ sha_dd_err:
static int atmel_sha_remove(struct platform_device *pdev)
{
- static struct atmel_sha_dev *sha_dd;
+ struct atmel_sha_dev *sha_dd;
sha_dd = platform_get_drvdata(pdev);
if (!sha_dd)
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index b25f1b3c981f..f4b335dda568 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1487,7 +1487,7 @@ tdes_dd_err:
static int atmel_tdes_remove(struct platform_device *pdev)
{
- static struct atmel_tdes_dev *tdes_dd;
+ struct atmel_tdes_dev *tdes_dd;
tdes_dd = platform_get_drvdata(pdev);
if (!tdes_dd)
diff --git a/drivers/crypto/axis/Makefile b/drivers/crypto/axis/Makefile
new file mode 100644
index 000000000000..be9a84a4b667
--- /dev/null
+++ b/drivers/crypto/axis/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) := artpec6_crypto.o
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
new file mode 100644
index 000000000000..d9fbbf01062b
--- /dev/null
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -0,0 +1,3192 @@
+/*
+ * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
+ *
+ * Copyright (C) 2014-2017 Axis Communications AB
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitfield.h>
+#include <linux/crypto.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/fault-inject.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include <crypto/aes.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/xts.h>
+
+/* Max length of a line in all cache levels for Artpec SoCs. */
+#define ARTPEC_CACHE_LINE_MAX 32
+
+#define PDMA_OUT_CFG 0x0000
+#define PDMA_OUT_BUF_CFG 0x0004
+#define PDMA_OUT_CMD 0x0008
+#define PDMA_OUT_DESCRQ_PUSH 0x0010
+#define PDMA_OUT_DESCRQ_STAT 0x0014
+
+#define A6_PDMA_IN_CFG 0x0028
+#define A6_PDMA_IN_BUF_CFG 0x002c
+#define A6_PDMA_IN_CMD 0x0030
+#define A6_PDMA_IN_STATQ_PUSH 0x0038
+#define A6_PDMA_IN_DESCRQ_PUSH 0x0044
+#define A6_PDMA_IN_DESCRQ_STAT 0x0048
+#define A6_PDMA_INTR_MASK 0x0068
+#define A6_PDMA_ACK_INTR 0x006c
+#define A6_PDMA_MASKED_INTR 0x0074
+
+#define A7_PDMA_IN_CFG 0x002c
+#define A7_PDMA_IN_BUF_CFG 0x0030
+#define A7_PDMA_IN_CMD 0x0034
+#define A7_PDMA_IN_STATQ_PUSH 0x003c
+#define A7_PDMA_IN_DESCRQ_PUSH 0x0048
+#define A7_PDMA_IN_DESCRQ_STAT 0x004C
+#define A7_PDMA_INTR_MASK 0x006c
+#define A7_PDMA_ACK_INTR 0x0070
+#define A7_PDMA_MASKED_INTR 0x0078
+
+#define PDMA_OUT_CFG_EN BIT(0)
+
+#define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
+#define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
+
+#define PDMA_OUT_CMD_START BIT(0)
+#define A6_PDMA_OUT_CMD_STOP BIT(3)
+#define A7_PDMA_OUT_CMD_STOP BIT(2)
+
+#define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
+#define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
+
+#define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
+#define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
+
+#define PDMA_IN_CFG_EN BIT(0)
+
+#define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
+#define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
+#define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
+
+#define PDMA_IN_CMD_START BIT(0)
+#define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
+#define A6_PDMA_IN_CMD_STOP BIT(3)
+#define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
+#define A7_PDMA_IN_CMD_STOP BIT(2)
+
+#define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
+#define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
+
+#define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
+#define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
+
+#define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
+#define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
+
+#define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
+#define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
+#define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
+
+#define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
+#define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
+#define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
+
+#define A6_CRY_MD_OPER GENMASK(19, 16)
+
+#define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
+#define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
+
+#define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
+#define A6_CRY_MD_CIPHER_DECR BIT(22)
+#define A6_CRY_MD_CIPHER_TWEAK BIT(23)
+#define A6_CRY_MD_CIPHER_DSEQ BIT(24)
+
+#define A7_CRY_MD_OPER GENMASK(11, 8)
+
+#define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
+#define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
+
+#define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
+#define A7_CRY_MD_CIPHER_DECR BIT(14)
+#define A7_CRY_MD_CIPHER_TWEAK BIT(15)
+#define A7_CRY_MD_CIPHER_DSEQ BIT(16)
+
+/* DMA metadata constants */
+#define regk_crypto_aes_cbc 0x00000002
+#define regk_crypto_aes_ctr 0x00000003
+#define regk_crypto_aes_ecb 0x00000001
+#define regk_crypto_aes_gcm 0x00000004
+#define regk_crypto_aes_xts 0x00000005
+#define regk_crypto_cache 0x00000002
+#define a6_regk_crypto_dlkey 0x0000000a
+#define a7_regk_crypto_dlkey 0x0000000e
+#define regk_crypto_ext 0x00000001
+#define regk_crypto_hmac_sha1 0x00000007
+#define regk_crypto_hmac_sha256 0x00000009
+#define regk_crypto_hmac_sha384 0x0000000b
+#define regk_crypto_hmac_sha512 0x0000000d
+#define regk_crypto_init 0x00000000
+#define regk_crypto_key_128 0x00000000
+#define regk_crypto_key_192 0x00000001
+#define regk_crypto_key_256 0x00000002
+#define regk_crypto_null 0x00000000
+#define regk_crypto_sha1 0x00000006
+#define regk_crypto_sha256 0x00000008
+#define regk_crypto_sha384 0x0000000a
+#define regk_crypto_sha512 0x0000000c
+
+/* DMA descriptor structures */
+struct pdma_descr_ctrl {
+ unsigned char short_descr : 1;
+ unsigned char pad1 : 1;
+ unsigned char eop : 1;
+ unsigned char intr : 1;
+ unsigned char short_len : 3;
+ unsigned char pad2 : 1;
+} __packed;
+
+struct pdma_data_descr {
+ unsigned int len : 24;
+ unsigned int buf : 32;
+} __packed;
+
+struct pdma_short_descr {
+ unsigned char data[7];
+} __packed;
+
+struct pdma_descr {
+ struct pdma_descr_ctrl ctrl;
+ union {
+ struct pdma_data_descr data;
+ struct pdma_short_descr shrt;
+ };
+};
+
+struct pdma_stat_descr {
+ unsigned char pad1 : 1;
+ unsigned char pad2 : 1;
+ unsigned char eop : 1;
+ unsigned char pad3 : 5;
+ unsigned int len : 24;
+};
+
+/* Each descriptor array can hold max 64 entries */
+#define PDMA_DESCR_COUNT 64
+
+#define MODULE_NAME "Artpec-6 CA"
+
+/* Hash modes (including HMAC variants) */
+#define ARTPEC6_CRYPTO_HASH_SHA1 1
+#define ARTPEC6_CRYPTO_HASH_SHA256 2
+#define ARTPEC6_CRYPTO_HASH_SHA384 3
+#define ARTPEC6_CRYPTO_HASH_SHA512 4
+
+/* Crypto modes */
+#define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
+#define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
+#define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
+#define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
+
+/* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
+ * It operates on a descriptor array with up to 64 descriptor entries.
+ * The arrays must be 64 byte aligned in memory.
+ *
+ * The ciphering unit has no registers and is completely controlled by
+ * a 4-byte metadata that is inserted at the beginning of each dma packet.
+ *
+ * A dma packet is a sequence of descriptors terminated by setting the .eop
+ * field in the final descriptor of the packet.
+ *
+ * Multiple packets are used for providing context data, key data and
+ * the plain/ciphertext.
+ *
+ * PDMA Descriptors (Array)
+ * +------+------+------+~~+-------+------+----
+ * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
+ * +--+---+--+---+----+-+~~+-------+----+-+----
+ * | | | | |
+ * | | | | |
+ * __|__ +-------++-------++-------+ +----+
+ * | MD | |Payload||Payload||Payload| | MD |
+ * +-----+ +-------++-------++-------+ +----+
+ */
+
+struct artpec6_crypto_bounce_buffer {
+ struct list_head list;
+ size_t length;
+ struct scatterlist *sg;
+ size_t offset;
+ /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
+ * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
+ */
+ void *buf;
+};
+
+struct artpec6_crypto_dma_map {
+ dma_addr_t dma_addr;
+ size_t size;
+ enum dma_data_direction dir;
+};
+
+struct artpec6_crypto_dma_descriptors {
+ struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
+ struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
+ u32 stat[PDMA_DESCR_COUNT] __aligned(64);
+ struct list_head bounce_buffers;
+ /* Enough maps for all out/in buffers, and all three descr. arrays */
+ struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
+ dma_addr_t out_dma_addr;
+ dma_addr_t in_dma_addr;
+ dma_addr_t stat_dma_addr;
+ size_t out_cnt;
+ size_t in_cnt;
+ size_t map_count;
+};
+
+enum artpec6_crypto_variant {
+ ARTPEC6_CRYPTO,
+ ARTPEC7_CRYPTO,
+};
+
+struct artpec6_crypto {
+ void __iomem *base;
+ spinlock_t queue_lock;
+ struct list_head queue; /* waiting for pdma fifo space */
+ struct list_head pending; /* submitted to pdma fifo */
+ struct tasklet_struct task;
+ struct kmem_cache *dma_cache;
+ int pending_count;
+ struct timer_list timer;
+ enum artpec6_crypto_variant variant;
+ void *pad_buffer; /* cache-aligned block padding buffer */
+ void *zero_buffer;
+};
+
+enum artpec6_crypto_hash_flags {
+ HASH_FLAG_INIT_CTX = 2,
+ HASH_FLAG_UPDATE = 4,
+ HASH_FLAG_FINALIZE = 8,
+ HASH_FLAG_HMAC = 16,
+ HASH_FLAG_UPDATE_KEY = 32,
+};
+
+struct artpec6_crypto_req_common {
+ struct list_head list;
+ struct artpec6_crypto_dma_descriptors *dma;
+ struct crypto_async_request *req;
+ void (*complete)(struct crypto_async_request *req);
+ gfp_t gfp_flags;
+};
+
+struct artpec6_hash_request_context {
+ char partial_buffer[SHA512_BLOCK_SIZE];
+ char partial_buffer_out[SHA512_BLOCK_SIZE];
+ char key_buffer[SHA512_BLOCK_SIZE];
+ char pad_buffer[SHA512_BLOCK_SIZE + 32];
+ unsigned char digeststate[SHA512_DIGEST_SIZE];
+ size_t partial_bytes;
+ u64 digcnt;
+ u32 key_md;
+ u32 hash_md;
+ enum artpec6_crypto_hash_flags hash_flags;
+ struct artpec6_crypto_req_common common;
+};
+
+struct artpec6_hash_export_state {
+ char partial_buffer[SHA512_BLOCK_SIZE];
+ unsigned char digeststate[SHA512_DIGEST_SIZE];
+ size_t partial_bytes;
+ u64 digcnt;
+ int oper;
+ unsigned int hash_flags;
+};
+
+struct artpec6_hashalg_context {
+ char hmac_key[SHA512_BLOCK_SIZE];
+ size_t hmac_key_length;
+ struct crypto_shash *child_hash;
+};
+
+struct artpec6_crypto_request_context {
+ u32 cipher_md;
+ bool decrypt;
+ struct artpec6_crypto_req_common common;
+};
+
+struct artpec6_cryptotfm_context {
+ unsigned char aes_key[2*AES_MAX_KEY_SIZE];
+ size_t key_length;
+ u32 key_md;
+ int crypto_type;
+ struct crypto_skcipher *fallback;
+};
+
+struct artpec6_crypto_aead_hw_ctx {
+ __be64 aad_length_bits;
+ __be64 text_length_bits;
+ __u8 J0[AES_BLOCK_SIZE];
+};
+
+struct artpec6_crypto_aead_req_ctx {
+ struct artpec6_crypto_aead_hw_ctx hw_ctx;
+ u32 cipher_md;
+ bool decrypt;
+ struct artpec6_crypto_req_common common;
+ __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
+};
+
+/* The crypto framework makes it hard to avoid this global. */
+static struct device *artpec6_crypto_dev;
+
+static struct dentry *dbgfs_root;
+
+#ifdef CONFIG_FAULT_INJECTION
+static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
+static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
+#endif
+
+enum {
+ ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
+ ARTPEC6_CRYPTO_PREPARE_HASH_START,
+};
+
+static int artpec6_crypto_prepare_aead(struct aead_request *areq);
+static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
+static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
+
+static void
+artpec6_crypto_complete_crypto(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_aead(struct crypto_async_request *req);
+static void
+artpec6_crypto_complete_hash(struct crypto_async_request *req);
+
+static int
+artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
+
+static void
+artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
+
+struct artpec6_crypto_walk {
+ struct scatterlist *sg;
+ size_t offset;
+};
+
+static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
+ struct scatterlist *sg)
+{
+ awalk->sg = sg;
+ awalk->offset = 0;
+}
+
+static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
+ size_t nbytes)
+{
+ while (nbytes && awalk->sg) {
+ size_t piece;
+
+ WARN_ON(awalk->offset > awalk->sg->length);
+
+ piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
+ nbytes -= piece;
+ awalk->offset += piece;
+ if (awalk->offset == awalk->sg->length) {
+ awalk->sg = sg_next(awalk->sg);
+ awalk->offset = 0;
+ }
+
+ }
+
+ return nbytes;
+}
+
+static size_t
+artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
+{
+ WARN_ON(awalk->sg->length == awalk->offset);
+
+ return awalk->sg->length - awalk->offset;
+}
+
+static dma_addr_t
+artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
+{
+ return sg_phys(awalk->sg) + awalk->offset;
+}
+
+static void
+artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct artpec6_crypto_bounce_buffer *b;
+ struct artpec6_crypto_bounce_buffer *next;
+
+ list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
+ pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
+ b, b->length, b->offset, b->buf);
+ sg_pcopy_from_buffer(b->sg,
+ 1,
+ b->buf,
+ b->length,
+ b->offset);
+
+ list_del(&b->list);
+ kfree(b);
+ }
+}
+
+static inline bool artpec6_crypto_busy(void)
+{
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ int fifo_count = ac->pending_count;
+
+ return fifo_count > 6;
+}
+
+static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
+{
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ int ret = -EBUSY;
+
+ spin_lock_bh(&ac->queue_lock);
+
+ if (!artpec6_crypto_busy()) {
+ list_add_tail(&req->list, &ac->pending);
+ artpec6_crypto_start_dma(req);
+ ret = -EINPROGRESS;
+ } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
+ list_add_tail(&req->list, &ac->queue);
+ } else {
+ artpec6_crypto_common_destroy(req);
+ }
+
+ spin_unlock_bh(&ac->queue_lock);
+
+ return ret;
+}
+
+static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+ void __iomem *base = ac->base;
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ u32 ind, statd, outd;
+
+ /* Make descriptor content visible to the DMA before starting it. */
+ wmb();
+
+ ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
+ FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
+
+ statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
+ FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
+
+ outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
+ FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
+
+ if (variant == ARTPEC6_CRYPTO) {
+ writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
+ writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
+ writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
+ } else {
+ writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
+ writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
+ writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
+ }
+
+ writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
+ writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
+
+ ac->pending_count++;
+}
+
+static void
+artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+
+ dma->out_cnt = 0;
+ dma->in_cnt = 0;
+ dma->map_count = 0;
+ INIT_LIST_HEAD(&dma->bounce_buffers);
+}
+
+static bool fault_inject_dma_descr(void)
+{
+#ifdef CONFIG_FAULT_INJECTION
+ return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
+#else
+ return false;
+#endif
+}
+
+/** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
+ * physical address
+ *
+ * @addr: The physical address of the data buffer
+ * @len: The length of the data buffer
+ * @eop: True if this is the last buffer in the packet
+ *
+ * @return 0 on success or -ENOSPC if there are no more descriptors available
+ */
+static int
+artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
+ dma_addr_t addr, size_t len, bool eop)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct pdma_descr *d;
+
+ if (dma->out_cnt >= PDMA_DESCR_COUNT ||
+ fault_inject_dma_descr()) {
+ pr_err("No free OUT DMA descriptors available!\n");
+ return -ENOSPC;
+ }
+
+ d = &dma->out[dma->out_cnt++];
+ memset(d, 0, sizeof(*d));
+
+ d->ctrl.short_descr = 0;
+ d->ctrl.eop = eop;
+ d->data.len = len;
+ d->data.buf = addr;
+ return 0;
+}
+
+/** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
+ *
+ * @dst: The virtual address of the data
+ * @len: The length of the data, must be between 1 to 7 bytes
+ * @eop: True if this is the last buffer in the packet
+ *
+ * @return 0 on success
+ * -ENOSPC if no more descriptors are available
+ * -EINVAL if the data length exceeds 7 bytes
+ */
+static int
+artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
+ void *dst, unsigned int len, bool eop)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct pdma_descr *d;
+
+ if (dma->out_cnt >= PDMA_DESCR_COUNT ||
+ fault_inject_dma_descr()) {
+ pr_err("No free OUT DMA descriptors available!\n");
+ return -ENOSPC;
+ } else if (len > 7 || len < 1) {
+ return -EINVAL;
+ }
+ d = &dma->out[dma->out_cnt++];
+ memset(d, 0, sizeof(*d));
+
+ d->ctrl.short_descr = 1;
+ d->ctrl.short_len = len;
+ d->ctrl.eop = eop;
+ memcpy(d->shrt.data, dst, len);
+ return 0;
+}
+
+static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
+ struct page *page, size_t offset,
+ size_t size,
+ enum dma_data_direction dir,
+ dma_addr_t *dma_addr_out)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct device *dev = artpec6_crypto_dev;
+ struct artpec6_crypto_dma_map *map;
+ dma_addr_t dma_addr;
+
+ *dma_addr_out = 0;
+
+ if (dma->map_count >= ARRAY_SIZE(dma->maps))
+ return -ENOMEM;
+
+ dma_addr = dma_map_page(dev, page, offset, size, dir);
+ if (dma_mapping_error(dev, dma_addr))
+ return -ENOMEM;
+
+ map = &dma->maps[dma->map_count++];
+ map->size = size;
+ map->dma_addr = dma_addr;
+ map->dir = dir;
+
+ *dma_addr_out = dma_addr;
+
+ return 0;
+}
+
+static int
+artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
+ void *ptr, size_t size,
+ enum dma_data_direction dir,
+ dma_addr_t *dma_addr_out)
+{
+ struct page *page = virt_to_page(ptr);
+ size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
+
+ return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
+ dma_addr_out);
+}
+
+static int
+artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ int ret;
+
+ ret = artpec6_crypto_dma_map_single(common, dma->in,
+ sizeof(dma->in[0]) * dma->in_cnt,
+ DMA_TO_DEVICE, &dma->in_dma_addr);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_dma_map_single(common, dma->out,
+ sizeof(dma->out[0]) * dma->out_cnt,
+ DMA_TO_DEVICE, &dma->out_dma_addr);
+ if (ret)
+ return ret;
+
+ /* We only read one stat descriptor */
+ dma->stat[dma->in_cnt - 1] = 0;
+
+ /*
+ * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
+ * to be written.
+ */
+ return artpec6_crypto_dma_map_single(common,
+ dma->stat + dma->in_cnt - 1,
+ sizeof(dma->stat[0]),
+ DMA_BIDIRECTIONAL,
+ &dma->stat_dma_addr);
+}
+
+static void
+artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct device *dev = artpec6_crypto_dev;
+ int i;
+
+ for (i = 0; i < dma->map_count; i++) {
+ struct artpec6_crypto_dma_map *map = &dma->maps[i];
+
+ dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
+ }
+
+ dma->map_count = 0;
+}
+
+/** artpec6_crypto_setup_out_descr - Setup an out descriptor
+ *
+ * @dst: The virtual address of the data
+ * @len: The length of the data
+ * @eop: True if this is the last buffer in the packet
+ * @use_short: If this is true and the data length is 7 bytes or less then
+ * a short descriptor will be used
+ *
+ * @return 0 on success
+ * Any errors from artpec6_crypto_setup_out_descr_short() or
+ * setup_out_descr_phys()
+ */
+static int
+artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
+ void *dst, unsigned int len, bool eop,
+ bool use_short)
+{
+ if (use_short && len < 7) {
+ return artpec6_crypto_setup_out_descr_short(common, dst, len,
+ eop);
+ } else {
+ int ret;
+ dma_addr_t dma_addr;
+
+ ret = artpec6_crypto_dma_map_single(common, dst, len,
+ DMA_TO_DEVICE,
+ &dma_addr);
+ if (ret)
+ return ret;
+
+ return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
+ len, eop);
+ }
+}
+
+/** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
+ * physical address
+ *
+ * @addr: The physical address of the data buffer
+ * @len: The length of the data buffer
+ * @intr: True if an interrupt should be fired after HW processing of this
+ * descriptor
+ *
+ */
+static int
+artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
+ dma_addr_t addr, unsigned int len, bool intr)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct pdma_descr *d;
+
+ if (dma->in_cnt >= PDMA_DESCR_COUNT ||
+ fault_inject_dma_descr()) {
+ pr_err("No free IN DMA descriptors available!\n");
+ return -ENOSPC;
+ }
+ d = &dma->in[dma->in_cnt++];
+ memset(d, 0, sizeof(*d));
+
+ d->ctrl.intr = intr;
+ d->data.len = len;
+ d->data.buf = addr;
+ return 0;
+}
+
+/** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
+ *
+ * @buffer: The virtual address to of the data buffer
+ * @len: The length of the data buffer
+ * @last: If this is the last data buffer in the request (i.e. an interrupt
+ * is needed
+ *
+ * Short descriptors are not used for the in channel
+ */
+static int
+artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
+ void *buffer, unsigned int len, bool last)
+{
+ dma_addr_t dma_addr;
+ int ret;
+
+ ret = artpec6_crypto_dma_map_single(common, buffer, len,
+ DMA_FROM_DEVICE, &dma_addr);
+ if (ret)
+ return ret;
+
+ return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
+}
+
+static struct artpec6_crypto_bounce_buffer *
+artpec6_crypto_alloc_bounce(gfp_t flags)
+{
+ void *base;
+ size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
+ 2 * ARTPEC_CACHE_LINE_MAX;
+ struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
+
+ if (!bbuf)
+ return NULL;
+
+ base = bbuf + 1;
+ bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
+ return bbuf;
+}
+
+static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
+ struct artpec6_crypto_walk *walk, size_t size)
+{
+ struct artpec6_crypto_bounce_buffer *bbuf;
+ int ret;
+
+ bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
+ if (!bbuf)
+ return -ENOMEM;
+
+ bbuf->length = size;
+ bbuf->sg = walk->sg;
+ bbuf->offset = walk->offset;
+
+ ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
+ if (ret) {
+ kfree(bbuf);
+ return ret;
+ }
+
+ pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
+ list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
+ return 0;
+}
+
+static int
+artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
+ struct artpec6_crypto_walk *walk,
+ size_t count)
+{
+ size_t chunk;
+ int ret;
+ dma_addr_t addr;
+
+ while (walk->sg && count) {
+ chunk = min(count, artpec6_crypto_walk_chunklen(walk));
+ addr = artpec6_crypto_walk_chunk_phys(walk);
+
+ /* When destination buffers are not aligned to the cache line
+ * size we need bounce buffers. The DMA-API requires that the
+ * entire line is owned by the DMA buffer and this holds also
+ * for the case when coherent DMA is used.
+ */
+ if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
+ chunk = min_t(dma_addr_t, chunk,
+ ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
+ addr);
+
+ pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
+ ret = setup_bounce_buffer_in(common, walk, chunk);
+ } else if (chunk < ARTPEC_CACHE_LINE_MAX) {
+ pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
+ ret = setup_bounce_buffer_in(common, walk, chunk);
+ } else {
+ dma_addr_t dma_addr;
+
+ chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
+
+ pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
+
+ ret = artpec6_crypto_dma_map_page(common,
+ sg_page(walk->sg),
+ walk->sg->offset +
+ walk->offset,
+ chunk,
+ DMA_FROM_DEVICE,
+ &dma_addr);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_in_descr_phys(common,
+ dma_addr,
+ chunk, false);
+ }
+
+ if (ret)
+ return ret;
+
+ count = count - chunk;
+ artpec6_crypto_walk_advance(walk, chunk);
+ }
+
+ if (count)
+ pr_err("EOL unexpected %zu bytes left\n", count);
+
+ return count ? -EINVAL : 0;
+}
+
+static int
+artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
+ struct artpec6_crypto_walk *walk,
+ size_t count)
+{
+ size_t chunk;
+ int ret;
+ dma_addr_t addr;
+
+ while (walk->sg && count) {
+ chunk = min(count, artpec6_crypto_walk_chunklen(walk));
+ addr = artpec6_crypto_walk_chunk_phys(walk);
+
+ pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
+
+ if (addr & 3) {
+ char buf[3];
+
+ chunk = min_t(size_t, chunk, (4-(addr&3)));
+
+ sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
+ walk->offset);
+
+ ret = artpec6_crypto_setup_out_descr_short(common, buf,
+ chunk,
+ false);
+ } else {
+ dma_addr_t dma_addr;
+
+ ret = artpec6_crypto_dma_map_page(common,
+ sg_page(walk->sg),
+ walk->sg->offset +
+ walk->offset,
+ chunk,
+ DMA_TO_DEVICE,
+ &dma_addr);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_out_descr_phys(common,
+ dma_addr,
+ chunk, false);
+ }
+
+ if (ret)
+ return ret;
+
+ count = count - chunk;
+ artpec6_crypto_walk_advance(walk, chunk);
+ }
+
+ if (count)
+ pr_err("EOL unexpected %zu bytes left\n", count);
+
+ return count ? -EINVAL : 0;
+}
+
+
+/** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
+ *
+ * If the out descriptor list is non-empty, then the eop flag on the
+ * last used out descriptor will be set.
+ *
+ * @return 0 on success
+ * -EINVAL if the out descriptor is empty or has overflown
+ */
+static int
+artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct pdma_descr *d;
+
+ if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
+ pr_err("%s: OUT descriptor list is %s\n",
+ MODULE_NAME, dma->out_cnt ? "empty" : "full");
+ return -EINVAL;
+
+ }
+
+ d = &dma->out[dma->out_cnt-1];
+ d->ctrl.eop = 1;
+
+ return 0;
+}
+
+/** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
+ * in descriptor
+ *
+ * See artpec6_crypto_terminate_out_descrs() for return values
+ */
+static int
+artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto_dma_descriptors *dma = common->dma;
+ struct pdma_descr *d;
+
+ if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
+ pr_err("%s: IN descriptor list is %s\n",
+ MODULE_NAME, dma->in_cnt ? "empty" : "full");
+ return -EINVAL;
+ }
+
+ d = &dma->in[dma->in_cnt-1];
+ d->ctrl.intr = 1;
+ return 0;
+}
+
+/** create_hash_pad - Create a Secure Hash conformant pad
+ *
+ * @dst: The destination buffer to write the pad. Must be at least 64 bytes
+ * @dgstlen: The total length of the hash digest in bytes
+ * @bitcount: The total length of the digest in bits
+ *
+ * @return The total number of padding bytes written to @dst
+ */
+static size_t
+create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
+{
+ unsigned int mod, target, diff, pad_bytes, size_bytes;
+ __be64 bits = __cpu_to_be64(bitcount);
+
+ switch (oper) {
+ case regk_crypto_sha1:
+ case regk_crypto_sha256:
+ case regk_crypto_hmac_sha1:
+ case regk_crypto_hmac_sha256:
+ target = 448 / 8;
+ mod = 512 / 8;
+ size_bytes = 8;
+ break;
+ default:
+ target = 896 / 8;
+ mod = 1024 / 8;
+ size_bytes = 16;
+ break;
+ }
+
+ target -= 1;
+ diff = dgstlen & (mod - 1);
+ pad_bytes = diff > target ? target + mod - diff : target - diff;
+
+ memset(dst + 1, 0, pad_bytes);
+ dst[0] = 0x80;
+
+ if (size_bytes == 16) {
+ memset(dst + 1 + pad_bytes, 0, 8);
+ memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
+ } else {
+ memcpy(dst + 1 + pad_bytes, &bits, 8);
+ }
+
+ return pad_bytes + size_bytes + 1;
+}
+
+static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
+ struct crypto_async_request *parent,
+ void (*complete)(struct crypto_async_request *req),
+ struct scatterlist *dstsg, unsigned int nbytes)
+{
+ gfp_t flags;
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+
+ flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+
+ common->gfp_flags = flags;
+ common->dma = kmem_cache_alloc(ac->dma_cache, flags);
+ if (!common->dma)
+ return -ENOMEM;
+
+ common->req = parent;
+ common->complete = complete;
+ return 0;
+}
+
+static void
+artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
+{
+ struct artpec6_crypto_bounce_buffer *b;
+ struct artpec6_crypto_bounce_buffer *next;
+
+ list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
+ kfree(b);
+ }
+}
+
+static int
+artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
+{
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+
+ artpec6_crypto_dma_unmap_all(common);
+ artpec6_crypto_bounce_destroy(common->dma);
+ kmem_cache_free(ac->dma_cache, common->dma);
+ common->dma = NULL;
+ return 0;
+}
+
+/*
+ * Ciphering functions.
+ */
+static int artpec6_crypto_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+ struct artpec6_crypto_request_context *req_ctx = NULL;
+ void (*complete)(struct crypto_async_request *req);
+ int ret;
+
+ req_ctx = skcipher_request_ctx(req);
+
+ switch (ctx->crypto_type) {
+ case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
+ case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
+ req_ctx->decrypt = 0;
+ break;
+ default:
+ break;
+ }
+
+ switch (ctx->crypto_type) {
+ case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ complete = artpec6_crypto_complete_cbc_encrypt;
+ break;
+ default:
+ complete = artpec6_crypto_complete_crypto;
+ break;
+ }
+
+ ret = artpec6_crypto_common_init(&req_ctx->common,
+ &req->base,
+ complete,
+ req->dst, req->cryptlen);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_prepare_crypto(req);
+ if (ret) {
+ artpec6_crypto_common_destroy(&req_ctx->common);
+ return ret;
+ }
+
+ return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int artpec6_crypto_decrypt(struct skcipher_request *req)
+{
+ int ret;
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+ struct artpec6_crypto_request_context *req_ctx = NULL;
+ void (*complete)(struct crypto_async_request *req);
+
+ req_ctx = skcipher_request_ctx(req);
+
+ switch (ctx->crypto_type) {
+ case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
+ case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
+ req_ctx->decrypt = 1;
+ break;
+ default:
+ break;
+ }
+
+
+ switch (ctx->crypto_type) {
+ case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ complete = artpec6_crypto_complete_cbc_decrypt;
+ break;
+ default:
+ complete = artpec6_crypto_complete_crypto;
+ break;
+ }
+
+ ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
+ complete,
+ req->dst, req->cryptlen);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_prepare_crypto(req);
+ if (ret) {
+ artpec6_crypto_common_destroy(&req_ctx->common);
+ return ret;
+ }
+
+ return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int
+artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+ size_t iv_len = crypto_skcipher_ivsize(cipher);
+ unsigned int counter = be32_to_cpup((__be32 *)
+ (req->iv + iv_len - 4));
+ unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
+ AES_BLOCK_SIZE;
+
+ /*
+ * The hardware uses only the last 32-bits as the counter while the
+ * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
+ * the whole IV is a counter. So fallback if the counter is going to
+ * overlow.
+ */
+ if (counter + nblks < counter) {
+ int ret;
+
+ pr_debug("counter %x will overflow (nblks %u), falling back\n",
+ counter, counter + nblks);
+
+ ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key,
+ ctx->key_length);
+ if (ret)
+ return ret;
+
+ {
+ SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+
+ skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ ret = encrypt ? crypto_skcipher_encrypt(subreq)
+ : crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
+ }
+ return ret;
+ }
+
+ return encrypt ? artpec6_crypto_encrypt(req)
+ : artpec6_crypto_decrypt(req);
+}
+
+static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
+{
+ return artpec6_crypto_ctr_crypt(req, true);
+}
+
+static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
+{
+ return artpec6_crypto_ctr_crypt(req, false);
+}
+
+/*
+ * AEAD functions
+ */
+static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
+{
+ struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
+
+ memset(tfm_ctx, 0, sizeof(*tfm_ctx));
+
+ crypto_aead_set_reqsize(tfm,
+ sizeof(struct artpec6_crypto_aead_req_ctx));
+
+ return 0;
+}
+
+static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
+ unsigned int len)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
+
+ if (len != 16 && len != 24 && len != 32) {
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -1;
+ }
+
+ ctx->key_length = len;
+
+ memcpy(ctx->aes_key, key, len);
+ return 0;
+}
+
+static int artpec6_crypto_aead_encrypt(struct aead_request *req)
+{
+ int ret;
+ struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
+
+ req_ctx->decrypt = false;
+ ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
+ artpec6_crypto_complete_aead,
+ NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_prepare_aead(req);
+ if (ret) {
+ artpec6_crypto_common_destroy(&req_ctx->common);
+ return ret;
+ }
+
+ return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int artpec6_crypto_aead_decrypt(struct aead_request *req)
+{
+ int ret;
+ struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
+
+ req_ctx->decrypt = true;
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ ret = artpec6_crypto_common_init(&req_ctx->common,
+ &req->base,
+ artpec6_crypto_complete_aead,
+ NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_prepare_aead(req);
+ if (ret) {
+ artpec6_crypto_common_destroy(&req_ctx->common);
+ return ret;
+ }
+
+ return artpec6_crypto_submit(&req_ctx->common);
+}
+
+static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
+{
+ struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
+ size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
+ size_t contextsize = digestsize == SHA384_DIGEST_SIZE ?
+ SHA512_DIGEST_SIZE : digestsize;
+ size_t blocksize = crypto_tfm_alg_blocksize(
+ crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
+ struct artpec6_crypto_req_common *common = &req_ctx->common;
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+ u32 sel_ctx;
+ bool ext_ctx = false;
+ bool run_hw = false;
+ int error = 0;
+
+ artpec6_crypto_init_dma_operation(common);
+
+ /* Upload HMAC key, must be first the first packet */
+ if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
+ if (variant == ARTPEC6_CRYPTO) {
+ req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
+ a6_regk_crypto_dlkey);
+ } else {
+ req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
+ a7_regk_crypto_dlkey);
+ }
+
+ /* Copy and pad up the key */
+ memcpy(req_ctx->key_buffer, ctx->hmac_key,
+ ctx->hmac_key_length);
+ memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
+ blocksize - ctx->hmac_key_length);
+
+ error = artpec6_crypto_setup_out_descr(common,
+ (void *)&req_ctx->key_md,
+ sizeof(req_ctx->key_md), false, false);
+ if (error)
+ return error;
+
+ error = artpec6_crypto_setup_out_descr(common,
+ req_ctx->key_buffer, blocksize,
+ true, false);
+ if (error)
+ return error;
+ }
+
+ if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
+ /* Restore context */
+ sel_ctx = regk_crypto_ext;
+ ext_ctx = true;
+ } else {
+ sel_ctx = regk_crypto_init;
+ }
+
+ if (variant == ARTPEC6_CRYPTO) {
+ req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
+ req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
+
+ /* If this is the final round, set the final flag */
+ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
+ req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
+ } else {
+ req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
+ req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
+
+ /* If this is the final round, set the final flag */
+ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
+ req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
+ }
+
+ /* Setup up metadata descriptors */
+ error = artpec6_crypto_setup_out_descr(common,
+ (void *)&req_ctx->hash_md,
+ sizeof(req_ctx->hash_md), false, false);
+ if (error)
+ return error;
+
+ error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
+ if (error)
+ return error;
+
+ if (ext_ctx) {
+ error = artpec6_crypto_setup_out_descr(common,
+ req_ctx->digeststate,
+ contextsize, false, false);
+
+ if (error)
+ return error;
+ }
+
+ if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
+ size_t done_bytes = 0;
+ size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
+ size_t ready_bytes = round_down(total_bytes, blocksize);
+ struct artpec6_crypto_walk walk;
+
+ run_hw = ready_bytes > 0;
+ if (req_ctx->partial_bytes && ready_bytes) {
+ /* We have a partial buffer and will at least some bytes
+ * to the HW. Empty this partial buffer before tackling
+ * the SG lists
+ */
+ memcpy(req_ctx->partial_buffer_out,
+ req_ctx->partial_buffer,
+ req_ctx->partial_bytes);
+
+ error = artpec6_crypto_setup_out_descr(common,
+ req_ctx->partial_buffer_out,
+ req_ctx->partial_bytes,
+ false, true);
+ if (error)
+ return error;
+
+ /* Reset partial buffer */
+ done_bytes += req_ctx->partial_bytes;
+ req_ctx->partial_bytes = 0;
+ }
+
+ artpec6_crypto_walk_init(&walk, areq->src);
+
+ error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
+ ready_bytes -
+ done_bytes);
+ if (error)
+ return error;
+
+ if (walk.sg) {
+ size_t sg_skip = ready_bytes - done_bytes;
+ size_t sg_rem = areq->nbytes - sg_skip;
+
+ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+ req_ctx->partial_buffer +
+ req_ctx->partial_bytes,
+ sg_rem, sg_skip);
+
+ req_ctx->partial_bytes += sg_rem;
+ }
+
+ req_ctx->digcnt += ready_bytes;
+ req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
+ }
+
+ /* Finalize */
+ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
+ bool needtrim = contextsize != digestsize;
+ size_t hash_pad_len;
+ u64 digest_bits;
+ u32 oper;
+
+ if (variant == ARTPEC6_CRYPTO)
+ oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
+ else
+ oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
+
+ /* Write out the partial buffer if present */
+ if (req_ctx->partial_bytes) {
+ memcpy(req_ctx->partial_buffer_out,
+ req_ctx->partial_buffer,
+ req_ctx->partial_bytes);
+ error = artpec6_crypto_setup_out_descr(common,
+ req_ctx->partial_buffer_out,
+ req_ctx->partial_bytes,
+ false, true);
+ if (error)
+ return error;
+
+ req_ctx->digcnt += req_ctx->partial_bytes;
+ req_ctx->partial_bytes = 0;
+ }
+
+ if (req_ctx->hash_flags & HASH_FLAG_HMAC)
+ digest_bits = 8 * (req_ctx->digcnt + blocksize);
+ else
+ digest_bits = 8 * req_ctx->digcnt;
+
+ /* Add the hash pad */
+ hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
+ req_ctx->digcnt, digest_bits);
+ error = artpec6_crypto_setup_out_descr(common,
+ req_ctx->pad_buffer,
+ hash_pad_len, false,
+ true);
+ req_ctx->digcnt = 0;
+
+ if (error)
+ return error;
+
+ /* Descriptor for the final result */
+ error = artpec6_crypto_setup_in_descr(common, areq->result,
+ digestsize,
+ !needtrim);
+ if (error)
+ return error;
+
+ if (needtrim) {
+ /* Discard the extra context bytes for SHA-384 */
+ error = artpec6_crypto_setup_in_descr(common,
+ req_ctx->partial_buffer,
+ digestsize - contextsize, true);
+ if (error)
+ return error;
+ }
+
+ } else { /* This is not the final operation for this request */
+ if (!run_hw)
+ return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
+
+ /* Save the result to the context */
+ error = artpec6_crypto_setup_in_descr(common,
+ req_ctx->digeststate,
+ contextsize, false);
+ if (error)
+ return error;
+ /* fall through */
+ }
+
+ req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
+ HASH_FLAG_FINALIZE);
+
+ error = artpec6_crypto_terminate_in_descrs(common);
+ if (error)
+ return error;
+
+ error = artpec6_crypto_terminate_out_descrs(common);
+ if (error)
+ return error;
+
+ error = artpec6_crypto_dma_map_descs(common);
+ if (error)
+ return error;
+
+ return ARTPEC6_CRYPTO_PREPARE_HASH_START;
+}
+
+
+static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
+
+ return 0;
+}
+
+static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
+ 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback))
+ return PTR_ERR(ctx->fallback);
+
+ tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
+
+ return 0;
+}
+
+static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
+
+ return 0;
+}
+
+static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
+
+ return 0;
+}
+
+static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ memset(ctx, 0, sizeof(*ctx));
+}
+
+static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
+{
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(ctx->fallback);
+ artpec6_crypto_aes_exit(tfm);
+}
+
+static int
+artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct artpec6_cryptotfm_context *ctx =
+ crypto_skcipher_ctx(cipher);
+
+ switch (keylen) {
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ crypto_skcipher_set_flags(cipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->aes_key, key, keylen);
+ ctx->key_length = keylen;
+ return 0;
+}
+
+static int
+artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct artpec6_cryptotfm_context *ctx =
+ crypto_skcipher_ctx(cipher);
+ int ret;
+
+ ret = xts_check_key(&cipher->base, key, keylen);
+ if (ret)
+ return ret;
+
+ switch (keylen) {
+ case 32:
+ case 48:
+ case 64:
+ break;
+ default:
+ crypto_skcipher_set_flags(cipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ memcpy(ctx->aes_key, key, keylen);
+ ctx->key_length = keylen;
+ return 0;
+}
+
+/** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
+ *
+ * @req: The asynch request to process
+ *
+ * @return 0 if the dma job was successfully prepared
+ * <0 on error
+ *
+ * This function sets up the PDMA descriptors for a block cipher request.
+ *
+ * The required padding is added for AES-CTR using a statically defined
+ * buffer.
+ *
+ * The PDMA descriptor list will be as follows:
+ *
+ * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
+ * IN: <CIPHER_MD><data_0>...[data_n]<intr>
+ *
+ */
+static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
+{
+ int ret;
+ struct artpec6_crypto_walk walk;
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
+ struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
+ struct artpec6_crypto_request_context *req_ctx = NULL;
+ size_t iv_len = crypto_skcipher_ivsize(cipher);
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+ struct artpec6_crypto_req_common *common;
+ bool cipher_decr = false;
+ size_t cipher_klen;
+ u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
+ u32 oper;
+
+ req_ctx = skcipher_request_ctx(areq);
+ common = &req_ctx->common;
+
+ artpec6_crypto_init_dma_operation(common);
+
+ if (variant == ARTPEC6_CRYPTO)
+ ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
+ else
+ ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
+
+ ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
+ sizeof(ctx->key_md), false, false);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
+ ctx->key_length, true, false);
+ if (ret)
+ return ret;
+
+ req_ctx->cipher_md = 0;
+
+ if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
+ cipher_klen = ctx->key_length/2;
+ else
+ cipher_klen = ctx->key_length;
+
+ /* Metadata */
+ switch (cipher_klen) {
+ case 16:
+ cipher_len = regk_crypto_key_128;
+ break;
+ case 24:
+ cipher_len = regk_crypto_key_192;
+ break;
+ case 32:
+ cipher_len = regk_crypto_key_256;
+ break;
+ default:
+ pr_err("%s: Invalid key length %d!\n",
+ MODULE_NAME, ctx->key_length);
+ return -EINVAL;
+ }
+
+ switch (ctx->crypto_type) {
+ case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
+ oper = regk_crypto_aes_ecb;
+ cipher_decr = req_ctx->decrypt;
+ break;
+
+ case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
+ oper = regk_crypto_aes_cbc;
+ cipher_decr = req_ctx->decrypt;
+ break;
+
+ case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
+ oper = regk_crypto_aes_ctr;
+ cipher_decr = false;
+ break;
+
+ case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
+ oper = regk_crypto_aes_xts;
+ cipher_decr = req_ctx->decrypt;
+
+ if (variant == ARTPEC6_CRYPTO)
+ req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
+ else
+ req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
+ break;
+
+ default:
+ pr_err("%s: Invalid cipher mode %d!\n",
+ MODULE_NAME, ctx->crypto_type);
+ return -EINVAL;
+ }
+
+ if (variant == ARTPEC6_CRYPTO) {
+ req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
+ req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
+ cipher_len);
+ if (cipher_decr)
+ req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
+ } else {
+ req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
+ req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
+ cipher_len);
+ if (cipher_decr)
+ req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
+ }
+
+ ret = artpec6_crypto_setup_out_descr(common,
+ &req_ctx->cipher_md,
+ sizeof(req_ctx->cipher_md),
+ false, false);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
+ if (ret)
+ return ret;
+
+ if (iv_len) {
+ ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
+ false, false);
+ if (ret)
+ return ret;
+ }
+ /* Data out */
+ artpec6_crypto_walk_init(&walk, areq->src);
+ ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
+ if (ret)
+ return ret;
+
+ /* Data in */
+ artpec6_crypto_walk_init(&walk, areq->dst);
+ ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
+ if (ret)
+ return ret;
+
+ /* CTR-mode padding required by the HW. */
+ if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
+ ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
+ size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
+ areq->cryptlen;
+
+ if (pad) {
+ ret = artpec6_crypto_setup_out_descr(common,
+ ac->pad_buffer,
+ pad, false, false);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_in_descr(common,
+ ac->pad_buffer, pad,
+ false);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = artpec6_crypto_terminate_out_descrs(common);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_terminate_in_descrs(common);
+ if (ret)
+ return ret;
+
+ return artpec6_crypto_dma_map_descs(common);
+}
+
+static int artpec6_crypto_prepare_aead(struct aead_request *areq)
+{
+ size_t count;
+ int ret;
+ size_t input_length;
+ struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
+ struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
+ struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
+ struct artpec6_crypto_req_common *common = &req_ctx->common;
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+ u32 md_cipher_len;
+
+ artpec6_crypto_init_dma_operation(common);
+
+ /* Key */
+ if (variant == ARTPEC6_CRYPTO) {
+ ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
+ a6_regk_crypto_dlkey);
+ } else {
+ ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
+ a7_regk_crypto_dlkey);
+ }
+ ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
+ sizeof(ctx->key_md), false, false);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
+ ctx->key_length, true, false);
+ if (ret)
+ return ret;
+
+ req_ctx->cipher_md = 0;
+
+ switch (ctx->key_length) {
+ case 16:
+ md_cipher_len = regk_crypto_key_128;
+ break;
+ case 24:
+ md_cipher_len = regk_crypto_key_192;
+ break;
+ case 32:
+ md_cipher_len = regk_crypto_key_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (variant == ARTPEC6_CRYPTO) {
+ req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
+ regk_crypto_aes_gcm);
+ req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
+ md_cipher_len);
+ if (req_ctx->decrypt)
+ req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
+ } else {
+ req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
+ regk_crypto_aes_gcm);
+ req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
+ md_cipher_len);
+ if (req_ctx->decrypt)
+ req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
+ }
+
+ ret = artpec6_crypto_setup_out_descr(common,
+ (void *) &req_ctx->cipher_md,
+ sizeof(req_ctx->cipher_md), false,
+ false);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
+ if (ret)
+ return ret;
+
+ /* For the decryption, cryptlen includes the tag. */
+ input_length = areq->cryptlen;
+ if (req_ctx->decrypt)
+ input_length -= AES_BLOCK_SIZE;
+
+ /* Prepare the context buffer */
+ req_ctx->hw_ctx.aad_length_bits =
+ __cpu_to_be64(8*areq->assoclen);
+
+ req_ctx->hw_ctx.text_length_bits =
+ __cpu_to_be64(8*input_length);
+
+ memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
+ // The HW omits the initial increment of the counter field.
+ crypto_inc(req_ctx->hw_ctx.J0+12, 4);
+
+ ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
+ sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
+ if (ret)
+ return ret;
+
+ {
+ struct artpec6_crypto_walk walk;
+
+ artpec6_crypto_walk_init(&walk, areq->src);
+
+ /* Associated data */
+ count = areq->assoclen;
+ ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
+ if (ret)
+ return ret;
+
+ if (!IS_ALIGNED(areq->assoclen, 16)) {
+ size_t assoc_pad = 16 - (areq->assoclen % 16);
+ /* The HW mandates zero padding here */
+ ret = artpec6_crypto_setup_out_descr(common,
+ ac->zero_buffer,
+ assoc_pad, false,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ /* Data to crypto */
+ count = input_length;
+ ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
+ if (ret)
+ return ret;
+
+ if (!IS_ALIGNED(input_length, 16)) {
+ size_t crypto_pad = 16 - (input_length % 16);
+ /* The HW mandates zero padding here */
+ ret = artpec6_crypto_setup_out_descr(common,
+ ac->zero_buffer,
+ crypto_pad,
+ false,
+ false);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Data from crypto */
+ {
+ struct artpec6_crypto_walk walk;
+ size_t output_len = areq->cryptlen;
+
+ if (req_ctx->decrypt)
+ output_len -= AES_BLOCK_SIZE;
+
+ artpec6_crypto_walk_init(&walk, areq->dst);
+
+ /* skip associated data in the output */
+ count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
+ if (count)
+ return -EINVAL;
+
+ count = output_len;
+ ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
+ if (ret)
+ return ret;
+
+ /* Put padding between the cryptotext and the auth tag */
+ if (!IS_ALIGNED(output_len, 16)) {
+ size_t crypto_pad = 16 - (output_len % 16);
+
+ ret = artpec6_crypto_setup_in_descr(common,
+ ac->pad_buffer,
+ crypto_pad, false);
+ if (ret)
+ return ret;
+ }
+
+ /* The authentication tag shall follow immediately after
+ * the output ciphertext. For decryption it is put in a context
+ * buffer for later compare against the input tag.
+ */
+ count = AES_BLOCK_SIZE;
+
+ if (req_ctx->decrypt) {
+ ret = artpec6_crypto_setup_in_descr(common,
+ req_ctx->decryption_tag, count, false);
+ if (ret)
+ return ret;
+
+ } else {
+ ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
+ count);
+ if (ret)
+ return ret;
+ }
+
+ }
+
+ ret = artpec6_crypto_terminate_in_descrs(common);
+ if (ret)
+ return ret;
+
+ ret = artpec6_crypto_terminate_out_descrs(common);
+ if (ret)
+ return ret;
+
+ return artpec6_crypto_dma_map_descs(common);
+}
+
+static void artpec6_crypto_process_queue(struct artpec6_crypto *ac)
+{
+ struct artpec6_crypto_req_common *req;
+
+ while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
+ req = list_first_entry(&ac->queue,
+ struct artpec6_crypto_req_common,
+ list);
+ list_move_tail(&req->list, &ac->pending);
+ artpec6_crypto_start_dma(req);
+
+ req->req->complete(req->req, -EINPROGRESS);
+ }
+
+ /*
+ * In some cases, the hardware can raise an in_eop_flush interrupt
+ * before actually updating the status, so we have an timer which will
+ * recheck the status on timeout. Since the cases are expected to be
+ * very rare, we use a relatively large timeout value. There should be
+ * no noticeable negative effect if we timeout spuriously.
+ */
+ if (ac->pending_count)
+ mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
+ else
+ del_timer(&ac->timer);
+}
+
+static void artpec6_crypto_timeout(unsigned long data)
+{
+ struct artpec6_crypto *ac = (struct artpec6_crypto *) data;
+
+ dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
+
+ tasklet_schedule(&ac->task);
+}
+
+static void artpec6_crypto_task(unsigned long data)
+{
+ struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
+ struct artpec6_crypto_req_common *req;
+ struct artpec6_crypto_req_common *n;
+
+ if (list_empty(&ac->pending)) {
+ pr_debug("Spurious IRQ\n");
+ return;
+ }
+
+ spin_lock_bh(&ac->queue_lock);
+
+ list_for_each_entry_safe(req, n, &ac->pending, list) {
+ struct artpec6_crypto_dma_descriptors *dma = req->dma;
+ u32 stat;
+
+ dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr,
+ sizeof(dma->stat[0]),
+ DMA_BIDIRECTIONAL);
+
+ stat = req->dma->stat[req->dma->in_cnt-1];
+
+ /* A non-zero final status descriptor indicates
+ * this job has finished.
+ */
+ pr_debug("Request %p status is %X\n", req, stat);
+ if (!stat)
+ break;
+
+ /* Allow testing of timeout handling with fault injection */
+#ifdef CONFIG_FAULT_INJECTION
+ if (should_fail(&artpec6_crypto_fail_status_read, 1))
+ continue;
+#endif
+
+ pr_debug("Completing request %p\n", req);
+
+ list_del(&req->list);
+
+ artpec6_crypto_dma_unmap_all(req);
+ artpec6_crypto_copy_bounce_buffers(req);
+
+ ac->pending_count--;
+ artpec6_crypto_common_destroy(req);
+ req->complete(req->req);
+ }
+
+ artpec6_crypto_process_queue(ac);
+
+ spin_unlock_bh(&ac->queue_lock);
+}
+
+static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
+{
+ req->complete(req, 0);
+}
+
+static void
+artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
+{
+ struct skcipher_request *cipher_req = container_of(req,
+ struct skcipher_request, base);
+
+ scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
+ cipher_req->cryptlen - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, 0);
+ req->complete(req, 0);
+}
+
+static void
+artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
+{
+ struct skcipher_request *cipher_req = container_of(req,
+ struct skcipher_request, base);
+
+ scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
+ cipher_req->cryptlen - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE, 0);
+ req->complete(req, 0);
+}
+
+static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
+{
+ int result = 0;
+
+ /* Verify GCM hashtag. */
+ struct aead_request *areq = container_of(req,
+ struct aead_request, base);
+ struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
+
+ if (req_ctx->decrypt) {
+ u8 input_tag[AES_BLOCK_SIZE];
+
+ sg_pcopy_to_buffer(areq->src,
+ sg_nents(areq->src),
+ input_tag,
+ AES_BLOCK_SIZE,
+ areq->assoclen + areq->cryptlen -
+ AES_BLOCK_SIZE);
+
+ if (memcmp(req_ctx->decryption_tag,
+ input_tag,
+ AES_BLOCK_SIZE)) {
+ pr_debug("***EBADMSG:\n");
+ print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
+ input_tag, AES_BLOCK_SIZE, true);
+ print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
+ req_ctx->decryption_tag,
+ AES_BLOCK_SIZE, true);
+
+ result = -EBADMSG;
+ }
+ }
+
+ req->complete(req, result);
+}
+
+static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
+{
+ req->complete(req, 0);
+}
+
+
+/*------------------- Hash functions -----------------------------------------*/
+static int
+artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
+ size_t blocksize;
+ int ret;
+
+ if (!keylen) {
+ pr_err("Invalid length (%d) of HMAC key\n",
+ keylen);
+ return -EINVAL;
+ }
+
+ memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
+
+ blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+ if (keylen > blocksize) {
+ SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
+
+ hdesc->tfm = tfm_ctx->child_hash;
+ hdesc->flags = crypto_ahash_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ tfm_ctx->hmac_key_length = blocksize;
+ ret = crypto_shash_digest(hdesc, key, keylen,
+ tfm_ctx->hmac_key);
+ if (ret)
+ return ret;
+
+ } else {
+ memcpy(tfm_ctx->hmac_key, key, keylen);
+ tfm_ctx->hmac_key_length = keylen;
+ }
+
+ return 0;
+}
+
+static int
+artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
+{
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+ u32 oper;
+
+ memset(req_ctx, 0, sizeof(*req_ctx));
+
+ req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
+ if (hmac)
+ req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
+
+ switch (type) {
+ case ARTPEC6_CRYPTO_HASH_SHA1:
+ oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
+ break;
+ case ARTPEC6_CRYPTO_HASH_SHA256:
+ oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
+ break;
+ case ARTPEC6_CRYPTO_HASH_SHA384:
+ oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384;
+ break;
+ case ARTPEC6_CRYPTO_HASH_SHA512:
+ oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512;
+ break;
+
+ default:
+ pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
+ return -EINVAL;
+ }
+
+ if (variant == ARTPEC6_CRYPTO)
+ req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
+ else
+ req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
+
+ return 0;
+}
+
+static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+ int ret;
+
+ if (!req_ctx->common.dma) {
+ ret = artpec6_crypto_common_init(&req_ctx->common,
+ &req->base,
+ artpec6_crypto_complete_hash,
+ NULL, 0);
+
+ if (ret)
+ return ret;
+ }
+
+ ret = artpec6_crypto_prepare_hash(req);
+ switch (ret) {
+ case ARTPEC6_CRYPTO_PREPARE_HASH_START:
+ ret = artpec6_crypto_submit(&req_ctx->common);
+ break;
+
+ case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
+ ret = 0;
+ /* Fallthrough */
+
+ default:
+ artpec6_crypto_common_destroy(&req_ctx->common);
+ break;
+ }
+
+ return ret;
+}
+
+static int artpec6_crypto_hash_final(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_hash_update(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_sha1_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
+}
+
+static int artpec6_crypto_sha1_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
+
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_sha256_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
+}
+
+static int artpec6_crypto_sha256_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
+}
+
+static int __maybe_unused
+artpec6_crypto_sha384_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_sha512_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
+}
+
+static int artpec6_crypto_sha512_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
+}
+
+static int __maybe_unused
+artpec6_crypto_hmac_sha384_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
+}
+
+static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req)
+{
+ return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
+}
+
+static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int __maybe_unused
+artpec6_crypto_hmac_sha384_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req)
+{
+ struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
+
+ artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
+ req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
+
+ return artpec6_crypto_prepare_submit_hash(req);
+}
+
+static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
+ const char *base_hash_name)
+{
+ struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct artpec6_hash_request_context));
+ memset(tfm_ctx, 0, sizeof(*tfm_ctx));
+
+ if (base_hash_name) {
+ struct crypto_shash *child;
+
+ child = crypto_alloc_shash(base_hash_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ if (IS_ERR(child))
+ return PTR_ERR(child);
+
+ tfm_ctx->child_hash = child;
+ }
+
+ return 0;
+}
+
+static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
+{
+ return artpec6_crypto_ahash_init_common(tfm, NULL);
+}
+
+static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
+{
+ return artpec6_crypto_ahash_init_common(tfm, "sha256");
+}
+
+static int __maybe_unused
+artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm)
+{
+ return artpec6_crypto_ahash_init_common(tfm, "sha384");
+}
+
+static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm)
+{
+ return artpec6_crypto_ahash_init_common(tfm, "sha512");
+}
+
+static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
+{
+ struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
+
+ if (tfm_ctx->child_hash)
+ crypto_free_shash(tfm_ctx->child_hash);
+
+ memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
+ tfm_ctx->hmac_key_length = 0;
+}
+
+static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
+{
+ const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
+ struct artpec6_hash_export_state *state = out;
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+
+ BUILD_BUG_ON(sizeof(state->partial_buffer) !=
+ sizeof(ctx->partial_buffer));
+ BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
+
+ state->digcnt = ctx->digcnt;
+ state->partial_bytes = ctx->partial_bytes;
+ state->hash_flags = ctx->hash_flags;
+
+ if (variant == ARTPEC6_CRYPTO)
+ state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
+ else
+ state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
+
+ memcpy(state->partial_buffer, ctx->partial_buffer,
+ sizeof(state->partial_buffer));
+ memcpy(state->digeststate, ctx->digeststate,
+ sizeof(state->digeststate));
+
+ return 0;
+}
+
+static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
+{
+ struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
+ const struct artpec6_hash_export_state *state = in;
+ struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
+ enum artpec6_crypto_variant variant = ac->variant;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->digcnt = state->digcnt;
+ ctx->partial_bytes = state->partial_bytes;
+ ctx->hash_flags = state->hash_flags;
+
+ if (variant == ARTPEC6_CRYPTO)
+ ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
+ else
+ ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
+
+ memcpy(ctx->partial_buffer, state->partial_buffer,
+ sizeof(state->partial_buffer));
+ memcpy(ctx->digeststate, state->digeststate,
+ sizeof(state->digeststate));
+
+ return 0;
+}
+
+static int init_crypto_hw(struct artpec6_crypto *ac)
+{
+ enum artpec6_crypto_variant variant = ac->variant;
+ void __iomem *base = ac->base;
+ u32 out_descr_buf_size;
+ u32 out_data_buf_size;
+ u32 in_data_buf_size;
+ u32 in_descr_buf_size;
+ u32 in_stat_buf_size;
+ u32 in, out;
+
+ /*
+ * The PDMA unit contains 1984 bytes of internal memory for the OUT
+ * channels and 1024 bytes for the IN channel. This is an elastic
+ * memory used to internally store the descriptors and data. The values
+ * ares specified in 64 byte incremements. Trustzone buffers are not
+ * used at this stage.
+ */
+ out_data_buf_size = 16; /* 1024 bytes for data */
+ out_descr_buf_size = 15; /* 960 bytes for descriptors */
+ in_data_buf_size = 8; /* 512 bytes for data */
+ in_descr_buf_size = 4; /* 256 bytes for descriptors */
+ in_stat_buf_size = 4; /* 256 bytes for stat descrs */
+
+ BUILD_BUG_ON_MSG((out_data_buf_size
+ + out_descr_buf_size) * 64 > 1984,
+ "Invalid OUT configuration");
+
+ BUILD_BUG_ON_MSG((in_data_buf_size
+ + in_descr_buf_size
+ + in_stat_buf_size) * 64 > 1024,
+ "Invalid IN configuration");
+
+ in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
+ FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
+ FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
+
+ out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
+ FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
+
+ writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
+ writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
+
+ if (variant == ARTPEC6_CRYPTO) {
+ writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
+ writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
+ writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
+ A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
+ base + A6_PDMA_INTR_MASK);
+ } else {
+ writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
+ writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
+ writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
+ A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
+ base + A7_PDMA_INTR_MASK);
+ }
+
+ return 0;
+}
+
+static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
+{
+ enum artpec6_crypto_variant variant = ac->variant;
+ void __iomem *base = ac->base;
+
+ if (variant == ARTPEC6_CRYPTO) {
+ writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
+ writel_relaxed(0, base + A6_PDMA_IN_CFG);
+ writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
+ } else {
+ writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
+ writel_relaxed(0, base + A7_PDMA_IN_CFG);
+ writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
+ }
+
+ writel_relaxed(0, base + PDMA_OUT_CFG);
+
+}
+
+static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
+{
+ struct artpec6_crypto *ac = dev_id;
+ enum artpec6_crypto_variant variant = ac->variant;
+ void __iomem *base = ac->base;
+ u32 mask_in_data, mask_in_eop_flush;
+ u32 in_cmd_flush_stat, in_cmd_reg;
+ u32 ack_intr_reg;
+ u32 ack = 0;
+ u32 intr;
+
+ if (variant == ARTPEC6_CRYPTO) {
+ intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
+ mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
+ mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
+ in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
+ in_cmd_reg = A6_PDMA_IN_CMD;
+ ack_intr_reg = A6_PDMA_ACK_INTR;
+ } else {
+ intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
+ mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
+ mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
+ in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
+ in_cmd_reg = A7_PDMA_IN_CMD;
+ ack_intr_reg = A7_PDMA_ACK_INTR;
+ }
+
+ /* We get two interrupt notifications from each job.
+ * The in_data means all data was sent to memory and then
+ * we request a status flush command to write the per-job
+ * status to its status vector. This ensures that the
+ * tasklet can detect exactly how many submitted jobs
+ * that have finished.
+ */
+ if (intr & mask_in_data)
+ ack |= mask_in_data;
+
+ if (intr & mask_in_eop_flush)
+ ack |= mask_in_eop_flush;
+ else
+ writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
+
+ writel_relaxed(ack, base + ack_intr_reg);
+
+ if (intr & mask_in_eop_flush)
+ tasklet_schedule(&ac->task);
+
+ return IRQ_HANDLED;
+}
+
+/*------------------- Algorithm definitions ----------------------------------*/
+
+/* Hashes */
+static struct ahash_alg hash_algos[] = {
+ /* SHA-1 */
+ {
+ .init = artpec6_crypto_sha1_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_sha1_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "artpec-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+ /* SHA-256 */
+ {
+ .init = artpec6_crypto_sha256_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_sha256_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "artpec-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+ /* HMAC SHA-256 */
+ {
+ .init = artpec6_crypto_hmac_sha256_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_hmac_sha256_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .setkey = artpec6_crypto_hash_set_key,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "artpec-hmac-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init_hmac_sha256,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+};
+
+static struct ahash_alg artpec7_hash_algos[] = {
+ /* SHA-384 */
+ {
+ .init = artpec6_crypto_sha384_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_sha384_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "artpec-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+ /* HMAC SHA-384 */
+ {
+ .init = artpec6_crypto_hmac_sha384_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_hmac_sha384_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .setkey = artpec6_crypto_hash_set_key,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "artpec-hmac-sha384",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init_hmac_sha384,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+ /* SHA-512 */
+ {
+ .init = artpec6_crypto_sha512_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_sha512_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "artpec-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+ /* HMAC SHA-512 */
+ {
+ .init = artpec6_crypto_hmac_sha512_init,
+ .update = artpec6_crypto_hash_update,
+ .final = artpec6_crypto_hash_final,
+ .digest = artpec6_crypto_hmac_sha512_digest,
+ .import = artpec6_crypto_hash_import,
+ .export = artpec6_crypto_hash_export,
+ .setkey = artpec6_crypto_hash_set_key,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct artpec6_hash_export_state),
+ .halg.base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "artpec-hmac-sha512",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_init = artpec6_crypto_ahash_init_hmac_sha512,
+ .cra_exit = artpec6_crypto_ahash_exit,
+ }
+ },
+};
+
+/* Crypto */
+static struct skcipher_alg crypto_algos[] = {
+ /* AES - ECB */
+ {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "artpec6-ecb-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = artpec6_crypto_cipher_set_key,
+ .encrypt = artpec6_crypto_encrypt,
+ .decrypt = artpec6_crypto_decrypt,
+ .init = artpec6_crypto_aes_ecb_init,
+ .exit = artpec6_crypto_aes_exit,
+ },
+ /* AES - CTR */
+ {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "artpec6-ctr-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = artpec6_crypto_cipher_set_key,
+ .encrypt = artpec6_crypto_ctr_encrypt,
+ .decrypt = artpec6_crypto_ctr_decrypt,
+ .init = artpec6_crypto_aes_ctr_init,
+ .exit = artpec6_crypto_aes_ctr_exit,
+ },
+ /* AES - CBC */
+ {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "artpec6-cbc-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = artpec6_crypto_cipher_set_key,
+ .encrypt = artpec6_crypto_encrypt,
+ .decrypt = artpec6_crypto_decrypt,
+ .init = artpec6_crypto_aes_cbc_init,
+ .exit = artpec6_crypto_aes_exit
+ },
+ /* AES - XTS */
+ {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "artpec6-xts-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = 2*AES_MIN_KEY_SIZE,
+ .max_keysize = 2*AES_MAX_KEY_SIZE,
+ .ivsize = 16,
+ .setkey = artpec6_crypto_xts_set_key,
+ .encrypt = artpec6_crypto_encrypt,
+ .decrypt = artpec6_crypto_decrypt,
+ .init = artpec6_crypto_aes_xts_init,
+ .exit = artpec6_crypto_aes_exit,
+ },
+};
+
+static struct aead_alg aead_algos[] = {
+ {
+ .init = artpec6_crypto_aead_init,
+ .setkey = artpec6_crypto_aead_set_key,
+ .encrypt = artpec6_crypto_aead_encrypt,
+ .decrypt = artpec6_crypto_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "artpec-gcm-aes",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ },
+ }
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+struct dbgfs_u32 {
+ char *name;
+ mode_t mode;
+ u32 *flag;
+ char *desc;
+};
+
+static void artpec6_crypto_init_debugfs(void)
+{
+ dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
+
+ if (!dbgfs_root || IS_ERR(dbgfs_root)) {
+ dbgfs_root = NULL;
+ pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME);
+ return;
+ }
+
+#ifdef CONFIG_FAULT_INJECTION
+ fault_create_debugfs_attr("fail_status_read", dbgfs_root,
+ &artpec6_crypto_fail_status_read);
+
+ fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
+ &artpec6_crypto_fail_dma_array_full);
+#endif
+}
+
+static void artpec6_crypto_free_debugfs(void)
+{
+ if (!dbgfs_root)
+ return;
+
+ debugfs_remove_recursive(dbgfs_root);
+ dbgfs_root = NULL;
+}
+#endif
+
+static const struct of_device_id artpec6_crypto_of_match[] = {
+ { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
+ { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
+ {}
+};
+MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
+
+static int artpec6_crypto_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ enum artpec6_crypto_variant variant;
+ struct artpec6_crypto *ac;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ struct resource *res;
+ int irq;
+ int err;
+
+ if (artpec6_crypto_dev)
+ return -ENODEV;
+
+ match = of_match_node(artpec6_crypto_of_match, dev->of_node);
+ if (!match)
+ return -EINVAL;
+
+ variant = (enum artpec6_crypto_variant)match->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENODEV;
+
+ ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
+ GFP_KERNEL);
+ if (!ac)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ac);
+ ac->variant = variant;
+
+ spin_lock_init(&ac->queue_lock);
+ INIT_LIST_HEAD(&ac->queue);
+ INIT_LIST_HEAD(&ac->pending);
+ setup_timer(&ac->timer, artpec6_crypto_timeout, (unsigned long) ac);
+
+ ac->base = base;
+
+ ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
+ sizeof(struct artpec6_crypto_dma_descriptors),
+ 64,
+ 0,
+ NULL);
+ if (!ac->dma_cache)
+ return -ENOMEM;
+
+#ifdef CONFIG_DEBUG_FS
+ artpec6_crypto_init_debugfs();
+#endif
+
+ tasklet_init(&ac->task, artpec6_crypto_task,
+ (unsigned long)ac);
+
+ ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
+ GFP_KERNEL);
+ if (!ac->pad_buffer)
+ return -ENOMEM;
+ ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
+
+ ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
+ GFP_KERNEL);
+ if (!ac->zero_buffer)
+ return -ENOMEM;
+ ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
+
+ err = init_crypto_hw(ac);
+ if (err)
+ goto free_cache;
+
+ err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
+ "artpec6-crypto", ac);
+ if (err)
+ goto disable_hw;
+
+ artpec6_crypto_dev = &pdev->dev;
+
+ err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
+ if (err) {
+ dev_err(dev, "Failed to register ahashes\n");
+ goto disable_hw;
+ }
+
+ if (variant != ARTPEC6_CRYPTO) {
+ err = crypto_register_ahashes(artpec7_hash_algos,
+ ARRAY_SIZE(artpec7_hash_algos));
+ if (err) {
+ dev_err(dev, "Failed to register ahashes\n");
+ goto unregister_ahashes;
+ }
+ }
+
+ err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
+ if (err) {
+ dev_err(dev, "Failed to register ciphers\n");
+ goto unregister_a7_ahashes;
+ }
+
+ err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
+ if (err) {
+ dev_err(dev, "Failed to register aeads\n");
+ goto unregister_algs;
+ }
+
+ return 0;
+
+unregister_algs:
+ crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
+unregister_a7_ahashes:
+ if (variant != ARTPEC6_CRYPTO)
+ crypto_unregister_ahashes(artpec7_hash_algos,
+ ARRAY_SIZE(artpec7_hash_algos));
+unregister_ahashes:
+ crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
+disable_hw:
+ artpec6_crypto_disable_hw(ac);
+free_cache:
+ kmem_cache_destroy(ac->dma_cache);
+ return err;
+}
+
+static int artpec6_crypto_remove(struct platform_device *pdev)
+{
+ struct artpec6_crypto *ac = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+
+ crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
+ if (ac->variant != ARTPEC6_CRYPTO)
+ crypto_unregister_ahashes(artpec7_hash_algos,
+ ARRAY_SIZE(artpec7_hash_algos));
+ crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
+ crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
+
+ tasklet_disable(&ac->task);
+ devm_free_irq(&pdev->dev, irq, ac);
+ tasklet_kill(&ac->task);
+ del_timer_sync(&ac->timer);
+
+ artpec6_crypto_disable_hw(ac);
+
+ kmem_cache_destroy(ac->dma_cache);
+#ifdef CONFIG_DEBUG_FS
+ artpec6_crypto_free_debugfs();
+#endif
+ return 0;
+}
+
+static struct platform_driver artpec6_crypto_driver = {
+ .probe = artpec6_crypto_probe,
+ .remove = artpec6_crypto_remove,
+ .driver = {
+ .name = "artpec6-crypto",
+ .owner = THIS_MODULE,
+ .of_match_table = artpec6_crypto_of_match,
+ },
+};
+
+module_platform_driver(artpec6_crypto_driver);
+
+MODULE_AUTHOR("Axis Communications AB");
+MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 9cfd36c1bcb6..8685c7e4debd 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -90,8 +90,6 @@ static int aead_pri = 150;
module_param(aead_pri, int, 0644);
MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
-#define MAX_SPUS 16
-
/* A type 3 BCM header, expected to precede the SPU header for SPU-M.
* Bits 3 and 4 in the first byte encode the channel number (the dma ringset).
* 0x60 - ring 0
@@ -120,7 +118,7 @@ static u8 select_channel(void)
{
u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
- return chan_idx % iproc_priv.spu.num_spu;
+ return chan_idx % iproc_priv.spu.num_chan;
}
/**
@@ -4528,8 +4526,13 @@ static void spu_functions_register(struct device *dev,
*/
static int spu_mb_init(struct device *dev)
{
- struct mbox_client *mcl = &iproc_priv.mcl[iproc_priv.spu.num_spu];
- int err;
+ struct mbox_client *mcl = &iproc_priv.mcl;
+ int err, i;
+
+ iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
+ sizeof(struct mbox_chan *), GFP_KERNEL);
+ if (!iproc_priv.mbox)
+ return -ENOMEM;
mcl->dev = dev;
mcl->tx_block = false;
@@ -4538,25 +4541,33 @@ static int spu_mb_init(struct device *dev)
mcl->rx_callback = spu_rx_callback;
mcl->tx_done = NULL;
- iproc_priv.mbox[iproc_priv.spu.num_spu] =
- mbox_request_channel(mcl, 0);
- if (IS_ERR(iproc_priv.mbox[iproc_priv.spu.num_spu])) {
- err = (int)PTR_ERR(iproc_priv.mbox[iproc_priv.spu.num_spu]);
- dev_err(dev,
- "Mbox channel %d request failed with err %d",
- iproc_priv.spu.num_spu, err);
- iproc_priv.mbox[iproc_priv.spu.num_spu] = NULL;
- return err;
+ for (i = 0; i < iproc_priv.spu.num_chan; i++) {
+ iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
+ if (IS_ERR(iproc_priv.mbox[i])) {
+ err = (int)PTR_ERR(iproc_priv.mbox[i]);
+ dev_err(dev,
+ "Mbox channel %d request failed with err %d",
+ i, err);
+ iproc_priv.mbox[i] = NULL;
+ goto free_channels;
+ }
}
return 0;
+free_channels:
+ for (i = 0; i < iproc_priv.spu.num_chan; i++) {
+ if (iproc_priv.mbox[i])
+ mbox_free_channel(iproc_priv.mbox[i]);
+ }
+
+ return err;
}
static void spu_mb_release(struct platform_device *pdev)
{
int i;
- for (i = 0; i < iproc_priv.spu.num_spu; i++)
+ for (i = 0; i < iproc_priv.spu.num_chan; i++)
mbox_free_channel(iproc_priv.mbox[i]);
}
@@ -4567,7 +4578,7 @@ static void spu_counters_init(void)
atomic_set(&iproc_priv.session_count, 0);
atomic_set(&iproc_priv.stream_count, 0);
- atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_spu);
+ atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
atomic64_set(&iproc_priv.bytes_in, 0);
atomic64_set(&iproc_priv.bytes_out, 0);
for (i = 0; i < SPU_OP_NUM; i++) {
@@ -4809,47 +4820,38 @@ static int spu_dt_read(struct platform_device *pdev)
struct resource *spu_ctrl_regs;
const struct of_device_id *match;
const struct spu_type_subtype *matched_spu_type;
- void __iomem *spu_reg_vbase[MAX_SPUS];
- int err;
+ struct device_node *dn = pdev->dev.of_node;
+ int err, i;
- match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev);
- matched_spu_type = match->data;
+ /* Count number of mailbox channels */
+ spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
- if (iproc_priv.spu.num_spu > 1) {
- /* If this is 2nd or later SPU, make sure it's same type */
- if ((spu->spu_type != matched_spu_type->type) ||
- (spu->spu_subtype != matched_spu_type->subtype)) {
- err = -EINVAL;
- dev_err(&pdev->dev, "Multiple SPU types not allowed");
- return err;
- }
- } else {
- /* Record type of first SPU */
- spu->spu_type = matched_spu_type->type;
- spu->spu_subtype = matched_spu_type->subtype;
+ match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Failed to match device\n");
+ return -ENODEV;
}
- /* Get and map SPU registers */
- spu_ctrl_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!spu_ctrl_regs) {
- err = -EINVAL;
- dev_err(&pdev->dev, "Invalid/missing registers for SPU\n");
- return err;
- }
+ matched_spu_type = match->data;
- spu_reg_vbase[iproc_priv.spu.num_spu] =
- devm_ioremap_resource(dev, spu_ctrl_regs);
- if (IS_ERR(spu_reg_vbase[iproc_priv.spu.num_spu])) {
- err = PTR_ERR(spu_reg_vbase[iproc_priv.spu.num_spu]);
- dev_err(&pdev->dev, "Failed to map registers: %d\n",
- err);
- spu_reg_vbase[iproc_priv.spu.num_spu] = NULL;
- return err;
- }
+ spu->spu_type = matched_spu_type->type;
+ spu->spu_subtype = matched_spu_type->subtype;
- dev_dbg(dev, "SPU %d detected.", iproc_priv.spu.num_spu);
+ i = 0;
+ for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
+ platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
- spu->reg_vbase[iproc_priv.spu.num_spu] = spu_reg_vbase;
+ spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
+ if (IS_ERR(spu->reg_vbase[i])) {
+ err = PTR_ERR(spu->reg_vbase[i]);
+ dev_err(&pdev->dev, "Failed to map registers: %d\n",
+ err);
+ spu->reg_vbase[i] = NULL;
+ return err;
+ }
+ }
+ spu->num_spu = i;
+ dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
return 0;
}
@@ -4860,8 +4862,8 @@ int bcm_spu_probe(struct platform_device *pdev)
struct spu_hw *spu = &iproc_priv.spu;
int err = 0;
- iproc_priv.pdev[iproc_priv.spu.num_spu] = pdev;
- platform_set_drvdata(iproc_priv.pdev[iproc_priv.spu.num_spu],
+ iproc_priv.pdev = pdev;
+ platform_set_drvdata(iproc_priv.pdev,
&iproc_priv);
err = spu_dt_read(pdev);
@@ -4872,12 +4874,6 @@ int bcm_spu_probe(struct platform_device *pdev)
if (err < 0)
goto failure;
- iproc_priv.spu.num_spu++;
-
- /* If already initialized, we've just added another SPU and are done */
- if (iproc_priv.inited)
- return 0;
-
if (spu->spu_type == SPU_TYPE_SPUM)
iproc_priv.bcm_hdr_len = 8;
else if (spu->spu_type == SPU_TYPE_SPU2)
@@ -4893,8 +4889,6 @@ int bcm_spu_probe(struct platform_device *pdev)
if (err < 0)
goto fail_reg;
- iproc_priv.inited = true;
-
return 0;
fail_reg:
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index 51dca529ce8f..57a55eb2a255 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -427,10 +427,13 @@ struct spu_hw {
/* The number of SPUs on this platform */
u32 num_spu;
+
+ /* The number of SPU channels on this platform */
+ u32 num_chan;
};
struct device_private {
- struct platform_device *pdev[MAX_SPUS];
+ struct platform_device *pdev;
struct spu_hw spu;
@@ -470,12 +473,10 @@ struct device_private {
/* Number of ICV check failures for AEAD messages */
atomic_t bad_icv;
- struct mbox_client mcl[MAX_SPUS];
- /* Array of mailbox channel pointers, one for each channel */
- struct mbox_chan *mbox[MAX_SPUS];
+ struct mbox_client mcl;
- /* Driver initialized */
- bool inited;
+ /* Array of mailbox channel pointers, one for each channel */
+ struct mbox_chan **mbox;
};
extern struct device_private iproc_priv;
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index ef04c9748317..bf7ac621c591 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -302,6 +302,7 @@ spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode,
break;
case HASH_ALG_SHA3_512:
*spu2_type = SPU2_HASH_TYPE_SHA3_512;
+ break;
case HASH_ALG_LAST:
default:
err = -EINVAL;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 0488b7f81dcf..54f3b375a453 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -81,40 +81,6 @@
#define debug(format, arg...)
#endif
-#ifdef DEBUG
-#include <linux/highmem.h>
-
-static void dbg_dump_sg(const char *level, const char *prefix_str,
- int prefix_type, int rowsize, int groupsize,
- struct scatterlist *sg, size_t tlen, bool ascii)
-{
- struct scatterlist *it;
- void *it_page;
- size_t len;
- void *buf;
-
- for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
- /*
- * make sure the scatterlist's page
- * has a valid virtual memory mapping
- */
- it_page = kmap_atomic(sg_page(it));
- if (unlikely(!it_page)) {
- printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
- return;
- }
-
- buf = it_page + it->offset;
- len = min_t(size_t, tlen, it->length);
- print_hex_dump(level, prefix_str, prefix_type, rowsize,
- groupsize, buf, len, ascii);
- tlen -= len;
-
- kunmap_atomic(it_page);
- }
-}
-#endif
-
static struct list_head alg_list;
struct caam_alg_entry {
@@ -898,10 +864,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
ablkcipher_unmap(jrdev, edesc, req);
@@ -937,10 +903,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
ablkcipher_unmap(jrdev, edesc, req);
@@ -1107,10 +1073,10 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
ivsize, 1);
pr_err("asked=%d, nbytes%d\n",
(int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
- dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
#endif
+ caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ edesc->src_nents > 1 ? 100 : req->nbytes, 1);
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
@@ -1164,10 +1130,10 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
- dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
#endif
+ caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ edesc->src_nents > 1 ? 100 : req->nbytes, 1);
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
@@ -1449,11 +1415,9 @@ static int aead_decrypt(struct aead_request *req)
u32 *desc;
int ret = 0;
-#ifdef DEBUG
- dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- req->assoclen + req->cryptlen, 1);
-#endif
+ caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->src,
+ req->assoclen + req->cryptlen, 1);
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 6f9c7ec0e339..530c14ee32de 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -599,7 +599,7 @@ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
/* skip key loading if they are loaded due to sharing */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD | JUMP_COND_SELF);
+ JUMP_COND_SHRD);
if (cdata->key_inline)
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
@@ -688,8 +688,7 @@ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
/* skip key loading if they are loaded due to sharing */
key_jump_cmd = append_jump(desc, JUMP_JSL |
- JUMP_TEST_ALL | JUMP_COND_SHRD |
- JUMP_COND_SELF);
+ JUMP_TEST_ALL | JUMP_COND_SHRD);
if (cdata->key_inline)
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 78c4c0485c58..2eefc4a26bc2 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -12,7 +12,6 @@
#include "intern.h"
#include "desc_constr.h"
#include "error.h"
-#include "sg_sw_sec4.h"
#include "sg_sw_qm.h"
#include "key_gen.h"
#include "qi.h"
@@ -399,6 +398,7 @@ badkey:
* @iv_dma: dma address of iv for checking continuity and link table
* @qm_sg_bytes: length of dma mapped h/w link table
* @qm_sg_dma: bus physical mapped address of h/w link table
+ * @assoclen: associated data length, in CAAM endianness
* @assoclen_dma: bus physical mapped address of req->assoclen
* @drv_req: driver-specific request structure
* @sgt: the h/w link table
@@ -409,8 +409,12 @@ struct aead_edesc {
dma_addr_t iv_dma;
int qm_sg_bytes;
dma_addr_t qm_sg_dma;
+ unsigned int assoclen;
dma_addr_t assoclen_dma;
struct caam_drv_req drv_req;
+#define CAAM_QI_MAX_AEAD_SG \
+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
+ sizeof(struct qm_sg_entry))
struct qm_sg_entry sgt[0];
};
@@ -431,6 +435,9 @@ struct ablkcipher_edesc {
int qm_sg_bytes;
dma_addr_t qm_sg_dma;
struct caam_drv_req drv_req;
+#define CAAM_QI_MAX_ABLKCIPHER_SG \
+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
+ sizeof(struct qm_sg_entry))
struct qm_sg_entry sgt[0];
};
@@ -660,6 +667,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
*/
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
+ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
sg_table = &edesc->sgt[0];
qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
@@ -670,7 +685,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
edesc->drv_req.cbk = aead_done;
edesc->drv_req.drv_ctx = drv_ctx;
- edesc->assoclen_dma = dma_map_single(qidev, &req->assoclen, 4,
+ edesc->assoclen = cpu_to_caam32(req->assoclen);
+ edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
DMA_TO_DEVICE);
if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
dev_err(qidev, "unable to map assoclen\n");
@@ -776,9 +792,9 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *qidev = caam_ctx->qidev;
-#ifdef DEBUG
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+#ifdef DEBUG
dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
#endif
@@ -791,14 +807,21 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
edesc->src_nents > 1 ? 100 : ivsize, 1);
- dbg_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
#endif
ablkcipher_unmap(qidev, edesc, req);
qi_cache_free(edesc);
+ /*
+ * The crypto API expects us to set the IV (req->info) to the last
+ * ciphertext block. This is used e.g. by the CTS mode.
+ */
+ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+ ivsize, 0);
+
ablkcipher_request_complete(req, status);
}
@@ -880,6 +903,15 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
}
dst_sg_idx = qm_sg_ents;
+ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, op_type, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
/* allocate space for base edesc and link tables */
edesc = qi_cache_alloc(GFP_DMA | flags);
if (unlikely(!edesc)) {
@@ -892,7 +924,6 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
- qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
sg_table = &edesc->sgt[0];
edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
edesc->drv_req.app_ctx = req;
@@ -1026,6 +1057,14 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
qm_sg_ents += 1 + mapped_dst_nents;
}
+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, GIVENCRYPT, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
/* allocate space for base edesc and link tables */
edesc = qi_cache_alloc(GFP_DMA | flags);
if (!edesc) {
@@ -1968,7 +2007,7 @@ static struct caam_aead_alg driver_aeads[] = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(des)))",
.cra_driver_name = "echainiv-authenc-"
- "hmac-sha256-cbc-desi-"
+ "hmac-sha256-cbc-des-"
"caam-qi",
.cra_blocksize = DES_BLOCK_SIZE,
},
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 910ec61cae09..698580b60b2f 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -791,8 +791,8 @@ static int ahash_update_ctx(struct ahash_request *req)
to_hash - *buflen,
*next_buflen, 0);
} else {
- (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
- cpu_to_caam32(SEC4_SG_LEN_FIN);
+ sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
+ 1);
}
desc = edesc->hw_desc;
@@ -882,8 +882,7 @@ static int ahash_final_ctx(struct ahash_request *req)
if (ret)
goto unmap_ctx;
- (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
- cpu_to_caam32(SEC4_SG_LEN_FIN);
+ sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 41398da3edf4..fde07d4ff019 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -285,11 +285,7 @@ static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
if (err)
return err;
- err = caam_init_buf(ctx, 1);
- if (err)
- return err;
-
- return 0;
+ return caam_init_buf(ctx, 1);
}
static struct hwrng caam_rng = {
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index dd353e342c12..dacb53fb690e 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -17,6 +17,8 @@
bool caam_little_end;
EXPORT_SYMBOL(caam_little_end);
+bool caam_dpaa2;
+EXPORT_SYMBOL(caam_dpaa2);
#ifdef CONFIG_CAAM_QI
#include "qi.h"
@@ -319,8 +321,11 @@ static int caam_remove(struct platform_device *pdev)
caam_qi_shutdown(ctrlpriv->qidev);
#endif
- /* De-initialize RNG state handles initialized by this driver. */
- if (ctrlpriv->rng4_sh_init)
+ /*
+ * De-initialize RNG state handles initialized by this driver.
+ * In case of DPAA 2.x, RNG is managed by MC firmware.
+ */
+ if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
/* Shut down debug views */
@@ -444,7 +449,6 @@ static int caam_probe(struct platform_device *pdev)
dev = &pdev->dev;
dev_set_drvdata(dev, ctrlpriv);
- ctrlpriv->pdev = pdev;
nprop = pdev->dev.of_node;
/* Enable clocking */
@@ -553,12 +557,17 @@ static int caam_probe(struct platform_device *pdev)
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
- * long pointers in master configuration register
+ * long pointers in master configuration register.
+ * In case of DPAA 2.x, Management Complex firmware performs
+ * the configuration.
*/
- clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
- MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
- MCFGR_WDENABLE | MCFGR_LARGE_BURST |
- (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+ caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
+ if (!caam_dpaa2)
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+ MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
+ MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+ (sizeof(dma_addr_t) == sizeof(u64) ?
+ MCFGR_LONG_PTR : 0));
/*
* Read the Compile Time paramters and SCFGR to determine
@@ -587,7 +596,9 @@ static int caam_probe(struct platform_device *pdev)
JRSTART_JR3_START);
if (sizeof(dma_addr_t) == sizeof(u64)) {
- if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
+ if (caam_dpaa2)
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
+ else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
else
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
@@ -630,11 +641,9 @@ static int caam_probe(struct platform_device *pdev)
ring++;
}
- /* Check to see if QI present. If so, enable */
- ctrlpriv->qi_present =
- !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
- CTPR_MS_QI_MASK);
- if (ctrlpriv->qi_present) {
+ /* Check to see if (DPAA 1.x) QI present. If so, enable */
+ ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
+ if (ctrlpriv->qi_present && !caam_dpaa2) {
ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
((__force uint8_t *)ctrl +
BLOCK_OFFSET * QI_BLOCK_NUMBER
@@ -662,8 +671,10 @@ static int caam_probe(struct platform_device *pdev)
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
* already instantiated, do RNG instantiation
+ * In case of DPAA 2.x, RNG is managed by MC firmware.
*/
- if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
+ if (!caam_dpaa2 &&
+ (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
ctrlpriv->rng4_sh_init =
rd_reg32(&ctrl->r4tst[0].rdsta);
/*
@@ -731,63 +742,43 @@ static int caam_probe(struct platform_device *pdev)
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
caam_get_era());
- dev_info(dev, "job rings = %d, qi = %d\n",
- ctrlpriv->total_jobrs, ctrlpriv->qi_present);
+ dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
+ ctrlpriv->total_jobrs, ctrlpriv->qi_present,
+ caam_dpaa2 ? "yes" : "no");
#ifdef CONFIG_DEBUG_FS
-
- ctrlpriv->ctl_rq_dequeued =
- debugfs_create_file("rq_dequeued",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->req_dequeued,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ob_enc_req =
- debugfs_create_file("ob_rq_encrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_req,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ib_dec_req =
- debugfs_create_file("ib_rq_decrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_req,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ob_enc_bytes =
- debugfs_create_file("ob_bytes_encrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_enc_bytes,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ob_prot_bytes =
- debugfs_create_file("ob_bytes_protected",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ob_prot_bytes,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ib_dec_bytes =
- debugfs_create_file("ib_bytes_decrypted",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_dec_bytes,
- &caam_fops_u64_ro);
- ctrlpriv->ctl_ib_valid_bytes =
- debugfs_create_file("ib_bytes_validated",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->ib_valid_bytes,
- &caam_fops_u64_ro);
+ debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->req_dequeued,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_enc_req,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_dec_req,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_enc_bytes,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ob_prot_bytes,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_dec_bytes,
+ &caam_fops_u64_ro);
+ debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->ib_valid_bytes,
+ &caam_fops_u64_ro);
/* Controller level - global status values */
- ctrlpriv->ctl_faultaddr =
- debugfs_create_file("fault_addr",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultaddr,
- &caam_fops_u32_ro);
- ctrlpriv->ctl_faultdetail =
- debugfs_create_file("fault_detail",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->faultdetail,
- &caam_fops_u32_ro);
- ctrlpriv->ctl_faultstatus =
- debugfs_create_file("fault_status",
- S_IRUSR | S_IRGRP | S_IROTH,
- ctrlpriv->ctl, &perfmon->status,
- &caam_fops_u32_ro);
+ debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->faultaddr,
+ &caam_fops_u32_ro);
+ debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->faultdetail,
+ &caam_fops_u32_ro);
+ debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
+ ctrlpriv->ctl, &perfmon->status,
+ &caam_fops_u32_ro);
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
index cac5402a46eb..7e7bf68c9ef5 100644
--- a/drivers/crypto/caam/ctrl.h
+++ b/drivers/crypto/caam/ctrl.h
@@ -10,4 +10,6 @@
/* Prototypes for backend-level services exposed to APIs */
int caam_get_era(void);
+extern bool caam_dpaa2;
+
#endif /* CTRL_H */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 6f44ccb55c63..3d639f3b45aa 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -9,6 +9,46 @@
#include "desc.h"
#include "error.h"
+#ifdef DEBUG
+#include <linux/highmem.h>
+
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, struct scatterlist *sg,
+ size_t tlen, bool ascii)
+{
+ struct scatterlist *it;
+ void *it_page;
+ size_t len;
+ void *buf;
+
+ for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
+ /*
+ * make sure the scatterlist's page
+ * has a valid virtual memory mapping
+ */
+ it_page = kmap_atomic(sg_page(it));
+ if (unlikely(!it_page)) {
+ pr_err("caam_dump_sg: kmap failed\n");
+ return;
+ }
+
+ buf = it_page + it->offset;
+ len = min_t(size_t, tlen, it->length);
+ print_hex_dump(level, prefix_str, prefix_type, rowsize,
+ groupsize, buf, len, ascii);
+ tlen -= len;
+
+ kunmap_atomic(it_page);
+ }
+}
+#else
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, struct scatterlist *sg,
+ size_t tlen, bool ascii)
+{}
+#endif /* DEBUG */
+EXPORT_SYMBOL(caam_dump_sg);
+
static const struct {
u8 value;
const char *error_text;
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index b6350b0d9153..250e1a21c473 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -8,4 +8,8 @@
#define CAAM_ERROR_H
#define CAAM_ERROR_STR_MAX 302
void caam_jr_strstatus(struct device *jrdev, u32 status);
+
+void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize, struct scatterlist *sg,
+ size_t tlen, bool ascii);
#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 85b6c5835b8f..a52361258d3a 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -64,12 +64,9 @@ struct caam_drv_private_jr {
* Driver-private storage for a single CAAM block instance
*/
struct caam_drv_private {
-
- struct device *dev;
#ifdef CONFIG_CAAM_QI
struct device *qidev;
#endif
- struct platform_device *pdev;
/* Physical-presence section */
struct caam_ctrl __iomem *ctrl; /* controller region */
@@ -105,16 +102,8 @@ struct caam_drv_private {
#ifdef CONFIG_DEBUG_FS
struct dentry *dfs_root;
struct dentry *ctl; /* controller dir */
- struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
- struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
- struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
- struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
-
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
-#ifdef CONFIG_CAAM_QI
- struct dentry *qi_congested;
-#endif
#endif
};
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 1ccfb317d468..d258953ff488 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -9,6 +9,7 @@
#include <linux/of_address.h>
#include "compat.h"
+#include "ctrl.h"
#include "regs.h"
#include "jr.h"
#include "desc.h"
@@ -499,7 +500,11 @@ static int caam_jr_probe(struct platform_device *pdev)
jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
if (sizeof(dma_addr_t) == sizeof(u64)) {
- if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
+ if (caam_dpaa2)
+ error = dma_set_mask_and_coherent(jrdev,
+ DMA_BIT_MASK(49));
+ else if (of_device_is_compatible(nprop,
+ "fsl,sec-v5.0-job-ring"))
error = dma_set_mask_and_coherent(jrdev,
DMA_BIT_MASK(40));
else
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 1990ed460c46..e4cf00014233 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -24,9 +24,6 @@
*/
#define MAX_RSP_FQ_BACKLOG_PER_CPU 256
-/* Length of a single buffer in the QI driver memory cache */
-#define CAAM_QI_MEMCACHE_SIZE 512
-
#define CAAM_QI_ENQUEUE_RETRIES 10000
#define CAAM_NAPI_WEIGHT 63
@@ -55,6 +52,7 @@ struct caam_qi_pcpu_priv {
} ____cacheline_aligned;
static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
+static DEFINE_PER_CPU(int, last_cpu);
/*
* caam_qi_priv - CAAM QI backend private params
@@ -203,8 +201,8 @@ static struct qman_fq *create_caam_req_fq(struct device *qidev,
goto init_req_fq_fail;
}
- dev_info(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
- smp_processor_id());
+ dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
+ smp_processor_id());
return req_fq;
init_req_fq_fail:
@@ -277,6 +275,7 @@ empty_fq:
dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
qman_destroy_fq(fq);
+ kfree(fq);
return ret;
}
@@ -342,8 +341,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
drv_ctx->req_fq = old_fq;
if (kill_fq(qidev, new_fq))
- dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
- new_fq->fqid);
+ dev_warn(qidev, "New CAAM FQ kill failed\n");
return ret;
}
@@ -373,10 +371,9 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
drv_ctx->req_fq = old_fq;
if (kill_fq(qidev, new_fq))
- dev_warn(qidev, "New CAAM FQ: %u kill failed\n",
- new_fq->fqid);
+ dev_warn(qidev, "New CAAM FQ kill failed\n");
} else if (kill_fq(qidev, old_fq)) {
- dev_warn(qidev, "Old CAAM FQ: %u kill failed\n", old_fq->fqid);
+ dev_warn(qidev, "Old CAAM FQ kill failed\n");
}
return 0;
@@ -392,7 +389,6 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
dma_addr_t hwdesc;
struct caam_drv_ctx *drv_ctx;
const cpumask_t *cpus = qman_affine_cpus();
- static DEFINE_PER_CPU(int, last_cpu);
num_words = desc_len(sh_desc);
if (num_words > MAX_SDLEN) {
@@ -511,7 +507,6 @@ int caam_qi_shutdown(struct device *qidev)
if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
- kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
}
/*
@@ -646,7 +641,7 @@ static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
- dev_info(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
+ dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
return 0;
}
@@ -679,7 +674,7 @@ static int init_cgr(struct device *qidev)
return ret;
}
- dev_info(qidev, "Congestion threshold set to %llu\n", val);
+ dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
return 0;
}
@@ -737,6 +732,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
qi_pdev = platform_device_register_full(&qi_pdev_info);
if (IS_ERR(qi_pdev))
return PTR_ERR(qi_pdev);
+ set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
ctrlpriv = dev_get_drvdata(ctrldev);
qidev = &qi_pdev->dev;
@@ -795,10 +791,8 @@ int caam_qi_init(struct platform_device *caam_pdev)
/* Done with the CGRs; restore the cpus allowed mask */
set_cpus_allowed_ptr(current, &old_cpumask);
#ifdef CONFIG_DEBUG_FS
- ctrlpriv->qi_congested = debugfs_create_file("qi_congested", 0444,
- ctrlpriv->ctl,
- &times_congested,
- &caam_fops_u64_ro);
+ debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
+ &times_congested, &caam_fops_u64_ro);
#endif
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
return 0;
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index 33b0433f5f22..ecb21f207637 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -39,6 +39,9 @@
*/
#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
+/* Length of a single buffer in the QI driver memory cache */
+#define CAAM_QI_MEMCACHE_SIZE 768
+
extern bool caam_congested __read_mostly;
/*
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 84d2f838a063..2b5efff9ec3c 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -293,6 +293,7 @@ struct caam_perfmon {
u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
#define CTPR_MS_QI_SHIFT 25
#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
+#define CTPR_MS_DPAA2 BIT(13)
#define CTPR_MS_VIRT_EN_INCL 0x00000001
#define CTPR_MS_VIRT_EN_POR 0x00000002
#define CTPR_MS_PG_SZ_MASK 0x10
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
new file mode 100644
index 000000000000..31b440757146
--- /dev/null
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the names of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SG_SW_QM2_H_
+#define _SG_SW_QM2_H_
+
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
+
+static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len, u16 offset)
+{
+ dpaa2_sg_set_addr(qm_sg_ptr, dma);
+ dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
+ dpaa2_sg_set_final(qm_sg_ptr, false);
+ dpaa2_sg_set_len(qm_sg_ptr, len);
+ dpaa2_sg_set_bpid(qm_sg_ptr, 0);
+ dpaa2_sg_set_offset(qm_sg_ptr, offset);
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * but does not have final bit; instead, returns last entry
+ */
+static inline struct dpaa2_sg_entry *
+sg_to_qm_sg(struct scatterlist *sg, int sg_count,
+ struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
+{
+ while (sg_count && sg) {
+ dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
+ sg_dma_len(sg), offset);
+ qm_sg_ptr++;
+ sg = sg_next(sg);
+ sg_count--;
+ }
+ return qm_sg_ptr - 1;
+}
+
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
+ struct dpaa2_sg_entry *qm_sg_ptr,
+ u16 offset)
+{
+ qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
+ dpaa2_sg_set_final(qm_sg_ptr, true);
+}
+
+#endif /* _SG_SW_QM2_H_ */
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index c6adad09c972..936b1b630058 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -5,7 +5,13 @@
*
*/
+#ifndef _SG_SW_SEC4_H_
+#define _SG_SW_SEC4_H_
+
+#include "ctrl.h"
#include "regs.h"
+#include "sg_sw_qm2.h"
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
struct sec4_sg_entry {
u64 ptr;
@@ -19,9 +25,15 @@ struct sec4_sg_entry {
static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
dma_addr_t dma, u32 len, u16 offset)
{
- sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
- sec4_sg_ptr->len = cpu_to_caam32(len);
- sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
+ if (caam_dpaa2) {
+ dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
+ offset);
+ } else {
+ sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
+ sec4_sg_ptr->len = cpu_to_caam32(len);
+ sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
+ SEC4_SG_OFFSET_MASK);
+ }
#ifdef DEBUG
print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
@@ -47,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
return sec4_sg_ptr - 1;
}
+static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
+{
+ if (caam_dpaa2)
+ dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
+ else
+ sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+}
+
/*
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
@@ -56,20 +76,7 @@ static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
u16 offset)
{
sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
- sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+ sg_to_sec4_set_last(sec4_sg_ptr);
}
-static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
- struct scatterlist *sg, unsigned int total,
- struct sec4_sg_entry *sec4_sg_ptr)
-{
- do {
- unsigned int len = min(sg_dma_len(sg), total);
-
- dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0);
- sec4_sg_ptr++;
- sg = sg_next(sg);
- total -= len;
- } while (total);
- return sec4_sg_ptr - 1;
-}
+#endif /* _SG_SW_SEC4_H_ */
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 4119c40e7c4b..34a6d8bf229e 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -268,8 +268,10 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
mcode = &cpt->mcode[cpt->next_mc_idx];
memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
mcode->code_size = ntohl(ucode->code_length) * 2;
- if (!mcode->code_size)
- return -EINVAL;
+ if (!mcode->code_size) {
+ ret = -EINVAL;
+ goto fw_release;
+ }
mcode->is_ae = is_ae;
mcode->core_mask = 0ULL;
@@ -280,7 +282,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
&mcode->phys_base, GFP_KERNEL);
if (!mcode->code) {
dev_err(dev, "Unable to allocate space for microcode");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fw_release;
}
memcpy((void *)mcode->code, (void *)(fw_entry->data + sizeof(*ucode)),
@@ -302,12 +305,14 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
ret = do_cpt_init(cpt, mcode);
if (ret) {
dev_err(dev, "do_cpt_init failed with ret: %d\n", ret);
- return ret;
+ goto fw_release;
}
dev_info(dev, "Microcode Loaded %s\n", mcode->version);
mcode->is_mc_valid = 1;
cpt->next_mc_idx++;
+
+fw_release:
release_firmware(fw_entry);
return ret;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index ae44a464cd2d..fee7cb2ce747 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -18,8 +18,9 @@
#define SE_GROUP 0
#define DRIVER_VERSION "1.0"
+#define FW_DIR "cavium/"
/* SE microcode */
-#define SE_FW "cnn55xx_se.fw"
+#define SE_FW FW_DIR "cnn55xx_se.fw"
static const char nitrox_driver_name[] = "CNN55XX";
@@ -512,8 +513,10 @@ static int nitrox_probe(struct pci_dev *pdev,
pci_set_master(pdev);
ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
- if (!ndev)
+ if (!ndev) {
+ err = -ENOMEM;
goto ndev_fail;
+ }
pci_set_drvdata(pdev, ndev);
ndev->pdev = pdev;
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 2238f77aa248..6d626606b9c5 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -1,25 +1,33 @@
config CRYPTO_DEV_CCP_DD
- tristate "Cryptographic Coprocessor device driver"
- depends on CRYPTO_DEV_CCP
+ tristate "Secure Processor device driver"
default m
+ help
+ Provides AMD Secure Processor device driver.
+ If you choose 'M' here, this module will be called ccp.
+
+config CRYPTO_DEV_SP_CCP
+ bool "Cryptographic Coprocessor device"
+ default y
+ depends on CRYPTO_DEV_CCP_DD
select HW_RANDOM
select DMA_ENGINE
select DMADEVICES
select CRYPTO_SHA1
select CRYPTO_SHA256
help
- Provides the interface to use the AMD Cryptographic Coprocessor
- which can be used to offload encryption operations such as SHA,
- AES and more. If you choose 'M' here, this module will be called
- ccp.
+ Provides the support for AMD Cryptographic Coprocessor (CCP) device
+ which can be used to offload encryption operations such as SHA, AES
+ and more.
config CRYPTO_DEV_CCP_CRYPTO
tristate "Encryption and hashing offload support"
- depends on CRYPTO_DEV_CCP_DD
default m
+ depends on CRYPTO_DEV_CCP_DD
+ depends on CRYPTO_DEV_SP_CCP
select CRYPTO_HASH
select CRYPTO_BLKCIPHER
select CRYPTO_AUTHENC
+ select CRYPTO_RSA
help
Support for using the cryptographic API with the AMD Cryptographic
Coprocessor. This module supports offload of SHA and AES algorithms.
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 59493fd3a751..57f8debfcfb3 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,12 +1,12 @@
obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
-ccp-objs := ccp-dev.o \
+ccp-objs := sp-dev.o sp-platform.o
+ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \
ccp-ops.o \
ccp-dev-v3.o \
ccp-dev-v5.o \
- ccp-platform.o \
ccp-dmaengine.o \
ccp-debugfs.o
-ccp-$(CONFIG_PCI) += ccp-pci.o
+ccp-$(CONFIG_PCI) += sp-pci.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
@@ -15,4 +15,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
ccp-crypto-aes-xts.o \
ccp-crypto-aes-galois.o \
ccp-crypto-des3.o \
+ ccp-crypto-rsa.o \
ccp-crypto-sha.o
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index 38ee6f348ea9..52313524a4dd 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES GCM crypto API support
*
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <gary.hook@amd.com>
*
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 58a4244b4752..94b5bcf5b628 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -1,8 +1,9 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
+ * Author: Gary R Hook <gary.hook@amd.com>
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -15,6 +16,7 @@
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <crypto/aes.h>
+#include <crypto/xts.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
@@ -37,46 +39,26 @@ struct ccp_unit_size_map {
u32 value;
};
-static struct ccp_unit_size_map unit_size_map[] = {
+static struct ccp_unit_size_map xts_unit_sizes[] = {
{
- .size = 4096,
- .value = CCP_XTS_AES_UNIT_SIZE_4096,
- },
- {
- .size = 2048,
- .value = CCP_XTS_AES_UNIT_SIZE_2048,
- },
- {
- .size = 1024,
- .value = CCP_XTS_AES_UNIT_SIZE_1024,
+ .size = 16,
+ .value = CCP_XTS_AES_UNIT_SIZE_16,
},
{
- .size = 512,
+ .size = 512,
.value = CCP_XTS_AES_UNIT_SIZE_512,
},
{
- .size = 256,
- .value = CCP_XTS_AES_UNIT_SIZE__LAST,
- },
- {
- .size = 128,
- .value = CCP_XTS_AES_UNIT_SIZE__LAST,
- },
- {
- .size = 64,
- .value = CCP_XTS_AES_UNIT_SIZE__LAST,
- },
- {
- .size = 32,
- .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ .size = 1024,
+ .value = CCP_XTS_AES_UNIT_SIZE_1024,
},
{
- .size = 16,
- .value = CCP_XTS_AES_UNIT_SIZE_16,
+ .size = 2048,
+ .value = CCP_XTS_AES_UNIT_SIZE_2048,
},
{
- .size = 1,
- .value = CCP_XTS_AES_UNIT_SIZE__LAST,
+ .size = 4096,
+ .value = CCP_XTS_AES_UNIT_SIZE_4096,
},
};
@@ -96,15 +78,26 @@ static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
- struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+ struct crypto_tfm *xfm = crypto_ablkcipher_tfm(tfm);
+ struct ccp_ctx *ctx = crypto_tfm_ctx(xfm);
+ unsigned int ccpversion = ccp_version();
+ int ret;
- /* Only support 128-bit AES key with a 128-bit Tweak key,
- * otherwise use the fallback
+ ret = xts_check_key(xfm, key, key_len);
+ if (ret)
+ return ret;
+
+ /* Version 3 devices support 128-bit keys; version 5 devices can
+ * accommodate 128- and 256-bit keys.
*/
switch (key_len) {
case AES_KEYSIZE_128 * 2:
memcpy(ctx->u.aes.key, key, key_len);
break;
+ case AES_KEYSIZE_256 * 2:
+ if (ccpversion > CCP_VERSION(3, 0))
+ memcpy(ctx->u.aes.key, key, key_len);
+ break;
}
ctx->u.aes.key_len = key_len / 2;
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
@@ -117,6 +110,8 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
{
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ unsigned int ccpversion = ccp_version();
+ unsigned int fallback = 0;
unsigned int unit;
u32 unit_size;
int ret;
@@ -130,18 +125,32 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
if (!req->info)
return -EINVAL;
+ /* Check conditions under which the CCP can fulfill a request. The
+ * device can handle input plaintext of a length that is a multiple
+ * of the unit_size, bug the crypto implementation only supports
+ * the unit_size being equal to the input length. This limits the
+ * number of scenarios we can handle.
+ */
unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
- if (req->nbytes <= unit_size_map[0].size) {
- for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
- if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
- unit_size = unit_size_map[unit].value;
- break;
- }
+ for (unit = 0; unit < ARRAY_SIZE(xts_unit_sizes); unit++) {
+ if (req->nbytes == xts_unit_sizes[unit].size) {
+ unit_size = unit;
+ break;
}
}
-
- if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
- (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
+ /* The CCP has restrictions on block sizes. Also, a version 3 device
+ * only supports AES-128 operations; version 5 CCPs support both
+ * AES-128 and -256 operations.
+ */
+ if (unit_size == CCP_XTS_AES_UNIT_SIZE__LAST)
+ fallback = 1;
+ if ((ccpversion < CCP_VERSION(5, 0)) &&
+ (ctx->u.aes.key_len != AES_KEYSIZE_128))
+ fallback = 1;
+ if ((ctx->u.aes.key_len != AES_KEYSIZE_128) &&
+ (ctx->u.aes.key_len != AES_KEYSIZE_256))
+ fallback = 1;
+ if (fallback) {
SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher);
/* Use the fallback to process the request for any
@@ -164,6 +173,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
INIT_LIST_HEAD(&rctx->cmd.entry);
rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
+ rctx->cmd.u.xts.type = CCP_AES_TYPE_128;
rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
: CCP_AES_ACTION_DECRYPT;
rctx->cmd.u.xts.unit_size = unit_size;
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index 5af7347ae03c..ae87b741f9d5 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) DES3 crypto API support
*
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <ghook@amd.com>
*
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 8dccbddabef1..35a9de7fd475 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -17,6 +17,7 @@
#include <linux/ccp.h>
#include <linux/scatterlist.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/akcipher.h>
#include "ccp-crypto.h"
@@ -37,10 +38,15 @@ static unsigned int des3_disable;
module_param(des3_disable, uint, 0444);
MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
+static unsigned int rsa_disable;
+module_param(rsa_disable, uint, 0444);
+MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
+
/* List heads for the supported algorithms */
static LIST_HEAD(hash_algs);
static LIST_HEAD(cipher_algs);
static LIST_HEAD(aead_algs);
+static LIST_HEAD(akcipher_algs);
/* For any tfm, requests for that tfm must be returned on the order
* received. With multiple queues available, the CCP can process more
@@ -358,6 +364,12 @@ static int ccp_register_algs(void)
return ret;
}
+ if (!rsa_disable) {
+ ret = ccp_register_rsa_algs(&akcipher_algs);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -366,6 +378,7 @@ static void ccp_unregister_algs(void)
struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
struct ccp_crypto_aead *aead_alg, *aead_tmp;
+ struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
crypto_unregister_ahash(&ahash_alg->alg);
@@ -384,6 +397,12 @@ static void ccp_unregister_algs(void)
list_del(&aead_alg->entry);
kfree(aead_alg);
}
+
+ list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) {
+ crypto_unregister_akcipher(&akc_alg->alg);
+ list_del(&akc_alg->entry);
+ kfree(akc_alg);
+ }
}
static int ccp_crypto_init(void)
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
new file mode 100644
index 000000000000..e6db8672d89c
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -0,0 +1,299 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) RSA crypto API support
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+static inline struct akcipher_request *akcipher_request_cast(
+ struct crypto_async_request *req)
+{
+ return container_of(req, struct akcipher_request, base);
+}
+
+static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen,
+ const u8 *buf, size_t sz)
+{
+ int nskip;
+
+ for (nskip = 0; nskip < sz; nskip++)
+ if (buf[nskip])
+ break;
+ *kplen = sz - nskip;
+ *kpbuf = kzalloc(*kplen, GFP_KERNEL);
+ if (!*kpbuf)
+ return -ENOMEM;
+ memcpy(*kpbuf, buf + nskip, *kplen);
+
+ return 0;
+}
+
+static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
+{
+ struct akcipher_request *req = akcipher_request_cast(async_req);
+ struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
+
+ if (ret)
+ return ret;
+
+ req->dst_len = rctx->cmd.u.rsa.key_size >> 3;
+
+ return 0;
+}
+
+static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
+{
+ if (ccp_version() > CCP_VERSION(3, 0))
+ return CCP5_RSA_MAXMOD;
+ else
+ return CCP_RSA_MAXMOD;
+}
+
+static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
+ int ret = 0;
+
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_RSA;
+
+ rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len; /* in bits */
+ if (encrypt) {
+ rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg;
+ rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len;
+ } else {
+ rctx->cmd.u.rsa.exp = &ctx->u.rsa.d_sg;
+ rctx->cmd.u.rsa.exp_len = ctx->u.rsa.d_len;
+ }
+ rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg;
+ rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len;
+ rctx->cmd.u.rsa.src = req->src;
+ rctx->cmd.u.rsa.src_len = req->src_len;
+ rctx->cmd.u.rsa.dst = req->dst;
+
+ ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+ return ret;
+}
+
+static int ccp_rsa_encrypt(struct akcipher_request *req)
+{
+ return ccp_rsa_crypt(req, true);
+}
+
+static int ccp_rsa_decrypt(struct akcipher_request *req)
+{
+ return ccp_rsa_crypt(req, false);
+}
+
+static int ccp_check_key_length(unsigned int len)
+{
+ /* In bits */
+ if (len < 8 || len > 4096)
+ return -EINVAL;
+ return 0;
+}
+
+static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
+{
+ /* Clean up old key data */
+ kzfree(ctx->u.rsa.e_buf);
+ ctx->u.rsa.e_buf = NULL;
+ ctx->u.rsa.e_len = 0;
+ kzfree(ctx->u.rsa.n_buf);
+ ctx->u.rsa.n_buf = NULL;
+ ctx->u.rsa.n_len = 0;
+ kzfree(ctx->u.rsa.d_buf);
+ ctx->u.rsa.d_buf = NULL;
+ ctx->u.rsa.d_len = 0;
+}
+
+static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen, bool private)
+{
+ struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct rsa_key raw_key;
+ int ret;
+
+ ccp_rsa_free_key_bufs(ctx);
+ memset(&raw_key, 0, sizeof(raw_key));
+
+ /* Code borrowed from crypto/rsa.c */
+ if (private)
+ ret = rsa_parse_priv_key(&raw_key, key, keylen);
+ else
+ ret = rsa_parse_pub_key(&raw_key, key, keylen);
+ if (ret)
+ goto n_key;
+
+ ret = ccp_copy_and_save_keypart(&ctx->u.rsa.n_buf, &ctx->u.rsa.n_len,
+ raw_key.n, raw_key.n_sz);
+ if (ret)
+ goto key_err;
+ sg_init_one(&ctx->u.rsa.n_sg, ctx->u.rsa.n_buf, ctx->u.rsa.n_len);
+
+ ctx->u.rsa.key_len = ctx->u.rsa.n_len << 3; /* convert to bits */
+ if (ccp_check_key_length(ctx->u.rsa.key_len)) {
+ ret = -EINVAL;
+ goto key_err;
+ }
+
+ ret = ccp_copy_and_save_keypart(&ctx->u.rsa.e_buf, &ctx->u.rsa.e_len,
+ raw_key.e, raw_key.e_sz);
+ if (ret)
+ goto key_err;
+ sg_init_one(&ctx->u.rsa.e_sg, ctx->u.rsa.e_buf, ctx->u.rsa.e_len);
+
+ if (private) {
+ ret = ccp_copy_and_save_keypart(&ctx->u.rsa.d_buf,
+ &ctx->u.rsa.d_len,
+ raw_key.d, raw_key.d_sz);
+ if (ret)
+ goto key_err;
+ sg_init_one(&ctx->u.rsa.d_sg,
+ ctx->u.rsa.d_buf, ctx->u.rsa.d_len);
+ }
+
+ return 0;
+
+key_err:
+ ccp_rsa_free_key_bufs(ctx);
+
+n_key:
+ return ret;
+}
+
+static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ return ccp_rsa_setkey(tfm, key, keylen, true);
+}
+
+static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ return ccp_rsa_setkey(tfm, key, keylen, false);
+}
+
+static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ akcipher_set_reqsize(tfm, sizeof(struct ccp_rsa_req_ctx));
+ ctx->complete = ccp_rsa_complete;
+
+ return 0;
+}
+
+static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+
+ ccp_rsa_free_key_bufs(ctx);
+}
+
+static struct akcipher_alg ccp_rsa_defaults = {
+ .encrypt = ccp_rsa_encrypt,
+ .decrypt = ccp_rsa_decrypt,
+ .sign = ccp_rsa_decrypt,
+ .verify = ccp_rsa_encrypt,
+ .set_pub_key = ccp_rsa_setpubkey,
+ .set_priv_key = ccp_rsa_setprivkey,
+ .max_size = ccp_rsa_maxsize,
+ .init = ccp_rsa_init_tfm,
+ .exit = ccp_rsa_exit_tfm,
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "rsa-ccp",
+ .cra_priority = CCP_CRA_PRIORITY,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = 2 * sizeof(struct ccp_ctx),
+ },
+};
+
+struct ccp_rsa_def {
+ unsigned int version;
+ const char *name;
+ const char *driver_name;
+ unsigned int reqsize;
+ struct akcipher_alg *alg_defaults;
+};
+
+static struct ccp_rsa_def rsa_algs[] = {
+ {
+ .version = CCP_VERSION(3, 0),
+ .name = "rsa",
+ .driver_name = "rsa-ccp",
+ .reqsize = sizeof(struct ccp_rsa_req_ctx),
+ .alg_defaults = &ccp_rsa_defaults,
+ }
+};
+
+int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def)
+{
+ struct ccp_crypto_akcipher_alg *ccp_alg;
+ struct akcipher_alg *alg;
+ int ret;
+
+ ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+ if (!ccp_alg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ccp_alg->entry);
+
+ alg = &ccp_alg->alg;
+ *alg = *def->alg_defaults;
+ snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+ snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ def->driver_name);
+ ret = crypto_register_akcipher(alg);
+ if (ret) {
+ pr_err("%s akcipher algorithm registration error (%d)\n",
+ alg->base.cra_name, ret);
+ kfree(ccp_alg);
+ return ret;
+ }
+
+ list_add(&ccp_alg->entry, head);
+
+ return 0;
+}
+
+int ccp_register_rsa_algs(struct list_head *head)
+{
+ int i, ret;
+ unsigned int ccpversion = ccp_version();
+
+ /* Register the RSA algorithm in standard mode
+ * This works for CCP v3 and later
+ */
+ for (i = 0; i < ARRAY_SIZE(rsa_algs); i++) {
+ if (rsa_algs[i].version > ccpversion)
+ continue;
+ ret = ccp_register_rsa_alg(head, &rsa_algs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index ce97b3868f4a..8b9b16d433f7 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) SHA crypto API support
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index dd5bf15f06e5..b9fd090c46c2 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -24,6 +24,8 @@
#include <crypto/ctr.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
+#include <crypto/akcipher.h>
+#include <crypto/internal/rsa.h>
#define CCP_LOG_LEVEL KERN_INFO
@@ -58,6 +60,12 @@ struct ccp_crypto_ahash_alg {
struct ahash_alg alg;
};
+struct ccp_crypto_akcipher_alg {
+ struct list_head entry;
+
+ struct akcipher_alg alg;
+};
+
static inline struct ccp_crypto_ablkcipher_alg *
ccp_crypto_ablkcipher_alg(struct crypto_tfm *tfm)
{
@@ -91,7 +99,7 @@ struct ccp_aes_ctx {
struct scatterlist key_sg;
unsigned int key_len;
- u8 key[AES_MAX_KEY_SIZE];
+ u8 key[AES_MAX_KEY_SIZE * 2];
u8 nonce[CTR_RFC3686_NONCE_SIZE];
@@ -227,12 +235,35 @@ struct ccp_sha_exp_ctx {
u8 buf[MAX_SHA_BLOCK_SIZE];
};
+/***** RSA related defines *****/
+
+struct ccp_rsa_ctx {
+ unsigned int key_len; /* in bits */
+ struct scatterlist e_sg;
+ u8 *e_buf;
+ unsigned int e_len;
+ struct scatterlist n_sg;
+ u8 *n_buf;
+ unsigned int n_len;
+ struct scatterlist d_sg;
+ u8 *d_buf;
+ unsigned int d_len;
+};
+
+struct ccp_rsa_req_ctx {
+ struct ccp_cmd cmd;
+};
+
+#define CCP_RSA_MAXMOD (4 * 1024 / 8)
+#define CCP5_RSA_MAXMOD (16 * 1024 / 8)
+
/***** Common Context Structure *****/
struct ccp_ctx {
int (*complete)(struct crypto_async_request *req, int ret);
union {
struct ccp_aes_ctx aes;
+ struct ccp_rsa_ctx rsa;
struct ccp_sha_ctx sha;
struct ccp_des3_ctx des3;
} u;
@@ -249,5 +280,6 @@ int ccp_register_aes_xts_algs(struct list_head *head);
int ccp_register_aes_aeads(struct list_head *head);
int ccp_register_sha_algs(struct list_head *head);
int ccp_register_des3_algs(struct list_head *head);
+int ccp_register_rsa_algs(struct list_head *head);
#endif
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index 3cd6c83754e0..59d4ca4e72d8 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -305,19 +305,19 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
ccp->debugfs_instance = debugfs_create_dir(ccp->name, ccp_debugfs_dir);
if (!ccp->debugfs_instance)
- return;
+ goto err;
debugfs_info = debugfs_create_file("info", 0400,
ccp->debugfs_instance, ccp,
&ccp_debugfs_info_ops);
if (!debugfs_info)
- return;
+ goto err;
debugfs_stats = debugfs_create_file("stats", 0600,
ccp->debugfs_instance, ccp,
&ccp_debugfs_stats_ops);
if (!debugfs_stats)
- return;
+ goto err;
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
@@ -327,15 +327,20 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
debugfs_q_instance =
debugfs_create_dir(name, ccp->debugfs_instance);
if (!debugfs_q_instance)
- return;
+ goto err;
debugfs_q_stats =
debugfs_create_file("stats", 0600,
debugfs_q_instance, cmd_q,
&ccp_debugfs_queue_ops);
if (!debugfs_q_stats)
- return;
+ goto err;
}
+
+ return;
+
+err:
+ debugfs_remove_recursive(ccp->debugfs_instance);
}
void ccp5_debugfs_destroy(void)
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 367c2e30656f..240bebbcb8ac 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -359,8 +359,7 @@ static void ccp_irq_bh(unsigned long data)
static irqreturn_t ccp_irq_handler(int irq, void *data)
{
- struct device *dev = data;
- struct ccp_device *ccp = dev_get_drvdata(dev);
+ struct ccp_device *ccp = (struct ccp_device *)data;
ccp_disable_queue_interrupts(ccp);
if (ccp->use_tasklet)
@@ -454,7 +453,7 @@ static int ccp_init(struct ccp_device *ccp)
iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
/* Request an irq */
- ret = ccp->get_irq(ccp);
+ ret = sp_request_ccp_irq(ccp->sp, ccp_irq_handler, ccp->name, ccp);
if (ret) {
dev_err(dev, "unable to allocate an IRQ\n");
goto e_pool;
@@ -511,7 +510,7 @@ e_kthread:
if (ccp->cmd_q[i].kthread)
kthread_stop(ccp->cmd_q[i].kthread);
- ccp->free_irq(ccp);
+ sp_free_ccp_irq(ccp->sp, ccp);
e_pool:
for (i = 0; i < ccp->cmd_q_count; i++)
@@ -550,7 +549,7 @@ static void ccp_destroy(struct ccp_device *ccp)
if (ccp->cmd_q[i].kthread)
kthread_stop(ccp->cmd_q[i].kthread);
- ccp->free_irq(ccp);
+ sp_free_ccp_irq(ccp->sp, ccp);
for (i = 0; i < ccp->cmd_q_count; i++)
dma_pool_destroy(ccp->cmd_q[i].dma_pool);
@@ -586,10 +585,17 @@ static const struct ccp_actions ccp3_actions = {
.irqhandler = ccp_irq_handler,
};
+const struct ccp_vdata ccpv3_platform = {
+ .version = CCP_VERSION(3, 0),
+ .setup = NULL,
+ .perform = &ccp3_actions,
+ .offset = 0,
+};
+
const struct ccp_vdata ccpv3 = {
.version = CCP_VERSION(3, 0),
.setup = NULL,
.perform = &ccp3_actions,
- .bar = 2,
.offset = 0x20000,
+ .rsamax = CCP_RSA_MAX_WIDTH,
};
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index b10d2d2075cb..65604fc65e8f 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <gary.hook@amd.com>
*
@@ -145,6 +145,7 @@ union ccp_function {
#define CCP_AES_MODE(p) ((p)->aes.mode)
#define CCP_AES_TYPE(p) ((p)->aes.type)
#define CCP_XTS_SIZE(p) ((p)->aes_xts.size)
+#define CCP_XTS_TYPE(p) ((p)->aes_xts.type)
#define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt)
#define CCP_DES3_SIZE(p) ((p)->des3.size)
#define CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt)
@@ -344,6 +345,7 @@ static int ccp5_perform_xts_aes(struct ccp_op *op)
CCP5_CMD_PROT(&desc) = 0;
function.raw = 0;
+ CCP_XTS_TYPE(&function) = op->u.xts.type;
CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
CCP5_CMD_FUNCTION(&desc) = function.raw;
@@ -469,7 +471,7 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_PROT(&desc) = 0;
function.raw = 0;
- CCP_RSA_SIZE(&function) = op->u.rsa.mod_size >> 3;
+ CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3;
CCP5_CMD_FUNCTION(&desc) = function.raw;
CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
@@ -484,10 +486,10 @@ static int ccp5_perform_rsa(struct ccp_op *op)
CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
- /* Exponent is in LSB memory */
- CCP5_CMD_KEY_LO(&desc) = op->sb_key * LSB_ITEM_SIZE;
- CCP5_CMD_KEY_HI(&desc) = 0;
- CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
+ /* Key (Exponent) is in external memory */
+ CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
+ CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
+ CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
return ccp5_do_cmd(&desc, op->cmd_q);
}
@@ -769,8 +771,7 @@ static void ccp5_irq_bh(unsigned long data)
static irqreturn_t ccp5_irq_handler(int irq, void *data)
{
- struct device *dev = data;
- struct ccp_device *ccp = dev_get_drvdata(dev);
+ struct ccp_device *ccp = (struct ccp_device *)data;
ccp5_disable_queue_interrupts(ccp);
ccp->total_interrupts++;
@@ -881,7 +882,7 @@ static int ccp5_init(struct ccp_device *ccp)
dev_dbg(dev, "Requesting an IRQ...\n");
/* Request an irq */
- ret = ccp->get_irq(ccp);
+ ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp);
if (ret) {
dev_err(dev, "unable to allocate an IRQ\n");
goto e_pool;
@@ -987,7 +988,7 @@ e_kthread:
kthread_stop(ccp->cmd_q[i].kthread);
e_irq:
- ccp->free_irq(ccp);
+ sp_free_ccp_irq(ccp->sp, ccp);
e_pool:
for (i = 0; i < ccp->cmd_q_count; i++)
@@ -1037,7 +1038,7 @@ static void ccp5_destroy(struct ccp_device *ccp)
if (ccp->cmd_q[i].kthread)
kthread_stop(ccp->cmd_q[i].kthread);
- ccp->free_irq(ccp);
+ sp_free_ccp_irq(ccp->sp, ccp);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
@@ -1106,15 +1107,14 @@ static const struct ccp_actions ccp5_actions = {
.init = ccp5_init,
.destroy = ccp5_destroy,
.get_free_slots = ccp5_get_free_slots,
- .irqhandler = ccp5_irq_handler,
};
const struct ccp_vdata ccpv5a = {
.version = CCP_VERSION(5, 0),
.setup = ccp5_config,
.perform = &ccp5_actions,
- .bar = 2,
.offset = 0x0,
+ .rsamax = CCP5_RSA_MAX_WIDTH,
};
const struct ccp_vdata ccpv5b = {
@@ -1122,6 +1122,6 @@ const struct ccp_vdata ccpv5b = {
.dma_chan_attr = DMA_PRIVATE,
.setup = ccp5other_config,
.perform = &ccp5_actions,
- .bar = 2,
.offset = 0x0,
+ .rsamax = CCP5_RSA_MAX_WIDTH,
};
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 2506b5025700..4e029b176641 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -11,7 +11,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/sched.h>
@@ -30,12 +29,6 @@
#include "ccp-dev.h"
-MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
-MODULE_AUTHOR("Gary R Hook <gary.hook@amd.com>");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("1.1.0");
-MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
-
struct ccp_tasklet_data {
struct completion completion;
struct ccp_cmd *cmd;
@@ -111,13 +104,6 @@ static LIST_HEAD(ccp_units);
static DEFINE_SPINLOCK(ccp_rr_lock);
static struct ccp_device *ccp_rr;
-/* Ever-increasing value to produce unique unit numbers */
-static atomic_t ccp_unit_ordinal;
-static unsigned int ccp_increment_unit_ordinal(void)
-{
- return atomic_inc_return(&ccp_unit_ordinal);
-}
-
/**
* ccp_add_device - add a CCP device to the list
*
@@ -415,6 +401,7 @@ static void ccp_do_cmd_complete(unsigned long data)
struct ccp_cmd *cmd = tdata->cmd;
cmd->callback(cmd->data, cmd->ret);
+
complete(&tdata->completion);
}
@@ -464,14 +451,17 @@ int ccp_cmd_queue_thread(void *data)
*
* @dev: device struct of the CCP
*/
-struct ccp_device *ccp_alloc_struct(struct device *dev)
+struct ccp_device *ccp_alloc_struct(struct sp_device *sp)
{
+ struct device *dev = sp->dev;
struct ccp_device *ccp;
ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL);
if (!ccp)
return NULL;
ccp->dev = dev;
+ ccp->sp = sp;
+ ccp->axcache = sp->axcache;
INIT_LIST_HEAD(&ccp->cmd);
INIT_LIST_HEAD(&ccp->backlog);
@@ -486,9 +476,8 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
init_waitqueue_head(&ccp->sb_queue);
init_waitqueue_head(&ccp->suspend_queue);
- ccp->ord = ccp_increment_unit_ordinal();
- snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
- snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
+ snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord);
+ snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord);
return ccp;
}
@@ -538,55 +527,100 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
return ccp->cmd_q_count == suspended;
}
-#endif
-static int __init ccp_mod_init(void)
+int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
{
-#ifdef CONFIG_X86
- int ret;
+ struct ccp_device *ccp = sp->ccp_data;
+ unsigned long flags;
+ unsigned int i;
- ret = ccp_pci_init();
- if (ret)
- return ret;
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
- /* Don't leave the driver loaded if init failed */
- if (ccp_present() != 0) {
- ccp_pci_exit();
- return -ENODEV;
+ ccp->suspending = 1;
+
+ /* Wake all the queue kthreads to prepare for suspend */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ wake_up_process(ccp->cmd_q[i].kthread);
+
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+ /* Wait for all queue kthreads to say they're done */
+ while (!ccp_queues_suspended(ccp))
+ wait_event_interruptible(ccp->suspend_queue,
+ ccp_queues_suspended(ccp));
+
+ return 0;
+}
+
+int ccp_dev_resume(struct sp_device *sp)
+{
+ struct ccp_device *ccp = sp->ccp_data;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ ccp->suspending = 0;
+
+ /* Wake up all the kthreads */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ ccp->cmd_q[i].suspended = 0;
+ wake_up_process(ccp->cmd_q[i].kthread);
}
+ spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
return 0;
+}
#endif
-#ifdef CONFIG_ARM64
+int ccp_dev_init(struct sp_device *sp)
+{
+ struct device *dev = sp->dev;
+ struct ccp_device *ccp;
int ret;
- ret = ccp_platform_init();
+ ret = -ENOMEM;
+ ccp = ccp_alloc_struct(sp);
+ if (!ccp)
+ goto e_err;
+ sp->ccp_data = ccp;
+
+ ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata;
+ if (!ccp->vdata || !ccp->vdata->version) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
+
+ ccp->use_tasklet = sp->use_tasklet;
+
+ ccp->io_regs = sp->io_map + ccp->vdata->offset;
+ if (ccp->vdata->setup)
+ ccp->vdata->setup(ccp);
+
+ ret = ccp->vdata->perform->init(ccp);
if (ret)
- return ret;
+ goto e_err;
- /* Don't leave the driver loaded if init failed */
- if (ccp_present() != 0) {
- ccp_platform_exit();
- return -ENODEV;
- }
+ dev_notice(dev, "ccp enabled\n");
return 0;
-#endif
- return -ENODEV;
+e_err:
+ sp->ccp_data = NULL;
+
+ dev_notice(dev, "ccp initialization failed\n");
+
+ return ret;
}
-static void __exit ccp_mod_exit(void)
+void ccp_dev_destroy(struct sp_device *sp)
{
-#ifdef CONFIG_X86
- ccp_pci_exit();
-#endif
+ struct ccp_device *ccp = sp->ccp_data;
-#ifdef CONFIG_ARM64
- ccp_platform_exit();
-#endif
-}
+ if (!ccp)
+ return;
-module_init(ccp_mod_init);
-module_exit(ccp_mod_exit);
+ ccp->vdata->perform->destroy(ccp);
+}
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index a70154ac7405..6810b65c1939 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -27,6 +27,8 @@
#include <linux/irqreturn.h>
#include <linux/dmaengine.h>
+#include "sp-dev.h"
+
#define MAX_CCP_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32
@@ -192,6 +194,7 @@
#define CCP_AES_CTX_SB_COUNT 1
#define CCP_XTS_AES_KEY_SB_COUNT 1
+#define CCP5_XTS_AES_KEY_SB_COUNT 2
#define CCP_XTS_AES_CTX_SB_COUNT 1
#define CCP_DES3_KEY_SB_COUNT 1
@@ -200,6 +203,7 @@
#define CCP_SHA_SB_COUNT 1
#define CCP_RSA_MAX_WIDTH 4096
+#define CCP5_RSA_MAX_WIDTH 16384
#define CCP_PASSTHRU_BLOCKSIZE 256
#define CCP_PASSTHRU_MASKSIZE 32
@@ -344,12 +348,11 @@ struct ccp_device {
char rngname[MAX_CCP_NAME_LEN];
struct device *dev;
+ struct sp_device *sp;
/* Bus specific device information
*/
void *dev_specific;
- int (*get_irq)(struct ccp_device *ccp);
- void (*free_irq)(struct ccp_device *ccp);
unsigned int qim;
unsigned int irq;
bool use_tasklet;
@@ -362,7 +365,6 @@ struct ccp_device {
* them.
*/
struct mutex req_mutex ____cacheline_aligned;
- void __iomem *io_map;
void __iomem *io_regs;
/* Master lists that all cmds are queued on. Because there can be
@@ -497,6 +499,7 @@ struct ccp_aes_op {
};
struct ccp_xts_aes_op {
+ enum ccp_aes_type type;
enum ccp_aes_action action;
enum ccp_xts_aes_unit_size unit_size;
};
@@ -626,18 +629,12 @@ struct ccp5_desc {
struct dword7 dw7;
};
-int ccp_pci_init(void);
-void ccp_pci_exit(void);
-
-int ccp_platform_init(void);
-void ccp_platform_exit(void);
-
void ccp_add_device(struct ccp_device *ccp);
void ccp_del_device(struct ccp_device *ccp);
extern void ccp_log_error(struct ccp_device *, int);
-struct ccp_device *ccp_alloc_struct(struct device *dev);
+struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
bool ccp_queues_suspended(struct ccp_device *ccp);
int ccp_cmd_queue_thread(void *data);
int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait);
@@ -669,16 +666,7 @@ struct ccp_actions {
irqreturn_t (*irqhandler)(int, void *);
};
-/* Structure to hold CCP version-specific values */
-struct ccp_vdata {
- const unsigned int version;
- const unsigned int dma_chan_attr;
- void (*setup)(struct ccp_device *);
- const struct ccp_actions *perform;
- const unsigned int bar;
- const unsigned int offset;
-};
-
+extern const struct ccp_vdata ccpv3_platform;
extern const struct ccp_vdata ccpv3;
extern const struct ccp_vdata ccpv5a;
extern const struct ccp_vdata ccpv5b;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 3f7d54f4728c..d608043c0280 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <gary.hook@amd.com>
*
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index c0dfdacbdff5..406b95329b3d 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
@@ -168,7 +168,7 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
wa->dma.address = dma_map_single(wa->dev, wa->address, len,
dir);
- if (!wa->dma.address)
+ if (dma_mapping_error(wa->dev, wa->dma.address))
return -ENOMEM;
wa->dma.length = len;
@@ -1038,6 +1038,8 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
struct ccp_op op;
unsigned int unit_size, dm_offset;
bool in_place = false;
+ unsigned int sb_count;
+ enum ccp_aes_type aestype;
int ret;
switch (xts->unit_size) {
@@ -1061,7 +1063,11 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
return -EINVAL;
}
- if (xts->key_len != AES_KEYSIZE_128)
+ if (xts->key_len == AES_KEYSIZE_128)
+ aestype = CCP_AES_TYPE_128;
+ else if (xts->key_len == AES_KEYSIZE_256)
+ aestype = CCP_AES_TYPE_256;
+ else
return -EINVAL;
if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
@@ -1083,23 +1089,44 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
op.sb_key = cmd_q->sb_key;
op.sb_ctx = cmd_q->sb_ctx;
op.init = 1;
+ op.u.xts.type = aestype;
op.u.xts.action = xts->action;
op.u.xts.unit_size = xts->unit_size;
- /* All supported key sizes fit in a single (32-byte) SB entry
- * and must be in little endian format. Use the 256-bit byte
- * swap passthru option to convert from big endian to little
- * endian.
+ /* A version 3 device only supports 128-bit keys, which fits into a
+ * single SB entry. A version 5 device uses a 512-bit vector, so two
+ * SB entries.
*/
+ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+ sb_count = CCP_XTS_AES_KEY_SB_COUNT;
+ else
+ sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
ret = ccp_init_dm_workarea(&key, cmd_q,
- CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
+ sb_count * CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
- dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
- ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
- ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
+ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
+ /* All supported key sizes must be in little endian format.
+ * Use the 256-bit byte swap passthru option to convert from
+ * big endian to little endian.
+ */
+ dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
+ ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+ ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
+ } else {
+ /* Version 5 CCPs use a 512-bit space for the key: each portion
+ * occupies 256 bits, or one entire slot, and is zero-padded.
+ */
+ unsigned int pad;
+
+ dm_offset = CCP_SB_BYTES;
+ pad = dm_offset - xts->key_len;
+ ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
+ ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
+ xts->key_len);
+ }
ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
@@ -1731,42 +1758,53 @@ e_ctx:
static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_rsa_engine *rsa = &cmd->u.rsa;
- struct ccp_dm_workarea exp, src;
- struct ccp_data dst;
+ struct ccp_dm_workarea exp, src, dst;
struct ccp_op op;
unsigned int sb_count, i_len, o_len;
int ret;
- if (rsa->key_size > CCP_RSA_MAX_WIDTH)
+ /* Check against the maximum allowable size, in bits */
+ if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
return -EINVAL;
if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
return -EINVAL;
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+
/* The RSA modulus must precede the message being acted upon, so
* it must be copied to a DMA area where the message and the
* modulus can be concatenated. Therefore the input buffer
* length required is twice the output buffer length (which
- * must be a multiple of 256-bits).
+ * must be a multiple of 256-bits). Compute o_len, i_len in bytes.
+ * Buffer sizes must be a multiple of 32 bytes; rounding up may be
+ * required.
*/
- o_len = ((rsa->key_size + 255) / 256) * 32;
+ o_len = 32 * ((rsa->key_size + 255) / 256);
i_len = o_len * 2;
- sb_count = o_len / CCP_SB_BYTES;
-
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
- op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
-
- if (!op.sb_key)
- return -EIO;
+ sb_count = 0;
+ if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
+ /* sb_count is the number of storage block slots required
+ * for the modulus.
+ */
+ sb_count = o_len / CCP_SB_BYTES;
+ op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
+ sb_count);
+ if (!op.sb_key)
+ return -EIO;
+ } else {
+ /* A version 5 device allows a modulus size that will not fit
+ * in the LSB, so the command will transfer it from memory.
+ * Set the sb key to the default, even though it's not used.
+ */
+ op.sb_key = cmd_q->sb_key;
+ }
- /* The RSA exponent may span multiple (32-byte) SB entries and must
- * be in little endian format. Reverse copy each 32-byte chunk
- * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
- * and each byte within that chunk and do not perform any byte swap
- * operations on the passthru operation.
+ /* The RSA exponent must be in little endian format. Reverse its
+ * byte order.
*/
ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
if (ret)
@@ -1775,11 +1813,22 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
if (ret)
goto e_exp;
- ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
- CCP_PASSTHRU_BYTESWAP_NOOP);
- if (ret) {
- cmd->engine_error = cmd_q->cmd_error;
- goto e_exp;
+
+ if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
+ /* Copy the exponent to the local storage block, using
+ * as many 32-byte blocks as were allocated above. It's
+ * already little endian, so no further change is required.
+ */
+ ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_exp;
+ }
+ } else {
+ /* The exponent can be retrieved from memory via DMA. */
+ op.exp.u.dma.address = exp.dma.address;
+ op.exp.u.dma.offset = 0;
}
/* Concatenate the modulus and the message. Both the modulus and
@@ -1798,8 +1847,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_src;
/* Prepare the output area for the operation */
- ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
- o_len, DMA_FROM_DEVICE);
+ ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE);
if (ret)
goto e_src;
@@ -1807,7 +1855,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.src.u.dma.address = src.dma.address;
op.src.u.dma.offset = 0;
op.src.u.dma.length = i_len;
- op.dst.u.dma.address = dst.dm_wa.dma.address;
+ op.dst.u.dma.address = dst.dma.address;
op.dst.u.dma.offset = 0;
op.dst.u.dma.length = o_len;
@@ -1820,10 +1868,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_dst;
}
- ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->mod_len);
+ ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len);
e_dst:
- ccp_free_data(&dst, cmd_q);
+ ccp_dm_free(&dst);
e_src:
ccp_dm_free(&src);
@@ -1832,7 +1880,8 @@ e_exp:
ccp_dm_free(&exp);
e_sb:
- cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
+ if (sb_count)
+ cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
return ret;
}
@@ -1992,7 +2041,7 @@ static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
- op.jobid = ccp_gen_jobid(cmd_q->ccp);
+ op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
/* Load the mask */
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
deleted file mode 100644
index e880d4cf4ada..000000000000
--- a/drivers/crypto/ccp/ccp-pci.c
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * AMD Cryptographic Coprocessor (CCP) driver
- *
- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
- *
- * Author: Tom Lendacky <thomas.lendacky@amd.com>
- * Author: Gary R Hook <gary.hook@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/dma-mapping.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/ccp.h>
-
-#include "ccp-dev.h"
-
-#define MSIX_VECTORS 2
-
-struct ccp_msix {
- u32 vector;
- char name[16];
-};
-
-struct ccp_pci {
- int msix_count;
- struct ccp_msix msix[MSIX_VECTORS];
-};
-
-static int ccp_get_msix_irqs(struct ccp_device *ccp)
-{
- struct ccp_pci *ccp_pci = ccp->dev_specific;
- struct device *dev = ccp->dev;
- struct pci_dev *pdev = to_pci_dev(dev);
- struct msix_entry msix_entry[MSIX_VECTORS];
- unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
- int v, ret;
-
- for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
- msix_entry[v].entry = v;
-
- ret = pci_enable_msix_range(pdev, msix_entry, 1, v);
- if (ret < 0)
- return ret;
-
- ccp_pci->msix_count = ret;
- for (v = 0; v < ccp_pci->msix_count; v++) {
- /* Set the interrupt names and request the irqs */
- snprintf(ccp_pci->msix[v].name, name_len, "%s-%u",
- ccp->name, v);
- ccp_pci->msix[v].vector = msix_entry[v].vector;
- ret = request_irq(ccp_pci->msix[v].vector,
- ccp->vdata->perform->irqhandler,
- 0, ccp_pci->msix[v].name, dev);
- if (ret) {
- dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
- ret);
- goto e_irq;
- }
- }
- ccp->use_tasklet = true;
-
- return 0;
-
-e_irq:
- while (v--)
- free_irq(ccp_pci->msix[v].vector, dev);
-
- pci_disable_msix(pdev);
-
- ccp_pci->msix_count = 0;
-
- return ret;
-}
-
-static int ccp_get_msi_irq(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = pci_enable_msi(pdev);
- if (ret)
- return ret;
-
- ccp->irq = pdev->irq;
- ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
- ccp->name, dev);
- if (ret) {
- dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
- goto e_msi;
- }
- ccp->use_tasklet = true;
-
- return 0;
-
-e_msi:
- pci_disable_msi(pdev);
-
- return ret;
-}
-
-static int ccp_get_irqs(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- int ret;
-
- ret = ccp_get_msix_irqs(ccp);
- if (!ret)
- return 0;
-
- /* Couldn't get MSI-X vectors, try MSI */
- dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
- ret = ccp_get_msi_irq(ccp);
- if (!ret)
- return 0;
-
- /* Couldn't get MSI interrupt */
- dev_notice(dev, "could not enable MSI (%d)\n", ret);
-
- return ret;
-}
-
-static void ccp_free_irqs(struct ccp_device *ccp)
-{
- struct ccp_pci *ccp_pci = ccp->dev_specific;
- struct device *dev = ccp->dev;
- struct pci_dev *pdev = to_pci_dev(dev);
-
- if (ccp_pci->msix_count) {
- while (ccp_pci->msix_count--)
- free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
- dev);
- pci_disable_msix(pdev);
- } else if (ccp->irq) {
- free_irq(ccp->irq, dev);
- pci_disable_msi(pdev);
- }
- ccp->irq = 0;
-}
-
-static int ccp_find_mmio_area(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct pci_dev *pdev = to_pci_dev(dev);
- resource_size_t io_len;
- unsigned long io_flags;
-
- io_flags = pci_resource_flags(pdev, ccp->vdata->bar);
- io_len = pci_resource_len(pdev, ccp->vdata->bar);
- if ((io_flags & IORESOURCE_MEM) &&
- (io_len >= (ccp->vdata->offset + 0x800)))
- return ccp->vdata->bar;
-
- return -EIO;
-}
-
-static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct ccp_device *ccp;
- struct ccp_pci *ccp_pci;
- struct device *dev = &pdev->dev;
- unsigned int bar;
- int ret;
-
- ret = -ENOMEM;
- ccp = ccp_alloc_struct(dev);
- if (!ccp)
- goto e_err;
-
- ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL);
- if (!ccp_pci)
- goto e_err;
-
- ccp->dev_specific = ccp_pci;
- ccp->vdata = (struct ccp_vdata *)id->driver_data;
- if (!ccp->vdata || !ccp->vdata->version) {
- ret = -ENODEV;
- dev_err(dev, "missing driver data\n");
- goto e_err;
- }
- ccp->get_irq = ccp_get_irqs;
- ccp->free_irq = ccp_free_irqs;
-
- ret = pci_request_regions(pdev, "ccp");
- if (ret) {
- dev_err(dev, "pci_request_regions failed (%d)\n", ret);
- goto e_err;
- }
-
- ret = pci_enable_device(pdev);
- if (ret) {
- dev_err(dev, "pci_enable_device failed (%d)\n", ret);
- goto e_regions;
- }
-
- pci_set_master(pdev);
-
- ret = ccp_find_mmio_area(ccp);
- if (ret < 0)
- goto e_device;
- bar = ret;
-
- ret = -EIO;
- ccp->io_map = pci_iomap(pdev, bar, 0);
- if (!ccp->io_map) {
- dev_err(dev, "pci_iomap failed\n");
- goto e_device;
- }
- ccp->io_regs = ccp->io_map + ccp->vdata->offset;
-
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (ret) {
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
- ret);
- goto e_iomap;
- }
- }
-
- dev_set_drvdata(dev, ccp);
-
- if (ccp->vdata->setup)
- ccp->vdata->setup(ccp);
-
- ret = ccp->vdata->perform->init(ccp);
- if (ret)
- goto e_iomap;
-
- dev_notice(dev, "enabled\n");
-
- return 0;
-
-e_iomap:
- pci_iounmap(pdev, ccp->io_map);
-
-e_device:
- pci_disable_device(pdev);
-
-e_regions:
- pci_release_regions(pdev);
-
-e_err:
- dev_notice(dev, "initialization failed\n");
- return ret;
-}
-
-static void ccp_pci_remove(struct pci_dev *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
-
- if (!ccp)
- return;
-
- ccp->vdata->perform->destroy(ccp);
-
- pci_iounmap(pdev, ccp->io_map);
-
- pci_disable_device(pdev);
-
- pci_release_regions(pdev);
-
- dev_notice(dev, "disabled\n");
-}
-
-#ifdef CONFIG_PM
-static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&ccp->cmd_lock, flags);
-
- ccp->suspending = 1;
-
- /* Wake all the queue kthreads to prepare for suspend */
- for (i = 0; i < ccp->cmd_q_count; i++)
- wake_up_process(ccp->cmd_q[i].kthread);
-
- spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
- /* Wait for all queue kthreads to say they're done */
- while (!ccp_queues_suspended(ccp))
- wait_event_interruptible(ccp->suspend_queue,
- ccp_queues_suspended(ccp));
-
- return 0;
-}
-
-static int ccp_pci_resume(struct pci_dev *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&ccp->cmd_lock, flags);
-
- ccp->suspending = 0;
-
- /* Wake up all the kthreads */
- for (i = 0; i < ccp->cmd_q_count; i++) {
- ccp->cmd_q[i].suspended = 0;
- wake_up_process(ccp->cmd_q[i].kthread);
- }
-
- spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
- return 0;
-}
-#endif
-
-static const struct pci_device_id ccp_pci_table[] = {
- { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 },
- { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a },
- { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b },
- /* Last entry must be zero */
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, ccp_pci_table);
-
-static struct pci_driver ccp_pci_driver = {
- .name = "ccp",
- .id_table = ccp_pci_table,
- .probe = ccp_pci_probe,
- .remove = ccp_pci_remove,
-#ifdef CONFIG_PM
- .suspend = ccp_pci_suspend,
- .resume = ccp_pci_resume,
-#endif
-};
-
-int ccp_pci_init(void)
-{
- return pci_register_driver(&ccp_pci_driver);
-}
-
-void ccp_pci_exit(void)
-{
- pci_unregister_driver(&ccp_pci_driver);
-}
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
deleted file mode 100644
index e26969e601ad..000000000000
--- a/drivers/crypto/ccp/ccp-platform.c
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * AMD Cryptographic Coprocessor (CCP) driver
- *
- * Copyright (C) 2014,2016 Advanced Micro Devices, Inc.
- *
- * Author: Tom Lendacky <thomas.lendacky@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/ioport.h>
-#include <linux/dma-mapping.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/ccp.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/acpi.h>
-
-#include "ccp-dev.h"
-
-struct ccp_platform {
- int coherent;
-};
-
-static const struct acpi_device_id ccp_acpi_match[];
-static const struct of_device_id ccp_of_match[];
-
-static struct ccp_vdata *ccp_get_of_version(struct platform_device *pdev)
-{
-#ifdef CONFIG_OF
- const struct of_device_id *match;
-
- match = of_match_node(ccp_of_match, pdev->dev.of_node);
- if (match && match->data)
- return (struct ccp_vdata *)match->data;
-#endif
- return NULL;
-}
-
-static struct ccp_vdata *ccp_get_acpi_version(struct platform_device *pdev)
-{
-#ifdef CONFIG_ACPI
- const struct acpi_device_id *match;
-
- match = acpi_match_device(ccp_acpi_match, &pdev->dev);
- if (match && match->driver_data)
- return (struct ccp_vdata *)match->driver_data;
-#endif
- return NULL;
-}
-
-static int ccp_get_irq(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct platform_device *pdev = to_platform_device(dev);
- int ret;
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
-
- ccp->irq = ret;
- ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
- ccp->name, dev);
- if (ret) {
- dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static int ccp_get_irqs(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- int ret;
-
- ret = ccp_get_irq(ccp);
- if (!ret)
- return 0;
-
- /* Couldn't get an interrupt */
- dev_notice(dev, "could not enable interrupts (%d)\n", ret);
-
- return ret;
-}
-
-static void ccp_free_irqs(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
-
- free_irq(ccp->irq, dev);
-}
-
-static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct resource *ior;
-
- ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (ior && (resource_size(ior) >= 0x800))
- return ior;
-
- return NULL;
-}
-
-static int ccp_platform_probe(struct platform_device *pdev)
-{
- struct ccp_device *ccp;
- struct ccp_platform *ccp_platform;
- struct device *dev = &pdev->dev;
- enum dev_dma_attr attr;
- struct resource *ior;
- int ret;
-
- ret = -ENOMEM;
- ccp = ccp_alloc_struct(dev);
- if (!ccp)
- goto e_err;
-
- ccp_platform = devm_kzalloc(dev, sizeof(*ccp_platform), GFP_KERNEL);
- if (!ccp_platform)
- goto e_err;
-
- ccp->dev_specific = ccp_platform;
- ccp->vdata = pdev->dev.of_node ? ccp_get_of_version(pdev)
- : ccp_get_acpi_version(pdev);
- if (!ccp->vdata || !ccp->vdata->version) {
- ret = -ENODEV;
- dev_err(dev, "missing driver data\n");
- goto e_err;
- }
- ccp->get_irq = ccp_get_irqs;
- ccp->free_irq = ccp_free_irqs;
-
- ior = ccp_find_mmio_area(ccp);
- ccp->io_map = devm_ioremap_resource(dev, ior);
- if (IS_ERR(ccp->io_map)) {
- ret = PTR_ERR(ccp->io_map);
- goto e_err;
- }
- ccp->io_regs = ccp->io_map;
-
- attr = device_get_dma_attr(dev);
- if (attr == DEV_DMA_NOT_SUPPORTED) {
- dev_err(dev, "DMA is not supported");
- goto e_err;
- }
-
- ccp_platform->coherent = (attr == DEV_DMA_COHERENT);
- if (ccp_platform->coherent)
- ccp->axcache = CACHE_WB_NO_ALLOC;
- else
- ccp->axcache = CACHE_NONE;
-
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (ret) {
- dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
- goto e_err;
- }
-
- dev_set_drvdata(dev, ccp);
-
- ret = ccp->vdata->perform->init(ccp);
- if (ret)
- goto e_err;
-
- dev_notice(dev, "enabled\n");
-
- return 0;
-
-e_err:
- dev_notice(dev, "initialization failed\n");
- return ret;
-}
-
-static int ccp_platform_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
-
- ccp->vdata->perform->destroy(ccp);
-
- dev_notice(dev, "disabled\n");
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ccp_platform_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&ccp->cmd_lock, flags);
-
- ccp->suspending = 1;
-
- /* Wake all the queue kthreads to prepare for suspend */
- for (i = 0; i < ccp->cmd_q_count; i++)
- wake_up_process(ccp->cmd_q[i].kthread);
-
- spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
- /* Wait for all queue kthreads to say they're done */
- while (!ccp_queues_suspended(ccp))
- wait_event_interruptible(ccp->suspend_queue,
- ccp_queues_suspended(ccp));
-
- return 0;
-}
-
-static int ccp_platform_resume(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&ccp->cmd_lock, flags);
-
- ccp->suspending = 0;
-
- /* Wake up all the kthreads */
- for (i = 0; i < ccp->cmd_q_count; i++) {
- ccp->cmd_q[i].suspended = 0;
- wake_up_process(ccp->cmd_q[i].kthread);
- }
-
- spin_unlock_irqrestore(&ccp->cmd_lock, flags);
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id ccp_acpi_match[] = {
- { "AMDI0C00", (kernel_ulong_t)&ccpv3 },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id ccp_of_match[] = {
- { .compatible = "amd,ccp-seattle-v1a",
- .data = (const void *)&ccpv3 },
- { },
-};
-MODULE_DEVICE_TABLE(of, ccp_of_match);
-#endif
-
-static struct platform_driver ccp_platform_driver = {
- .driver = {
- .name = "ccp",
-#ifdef CONFIG_ACPI
- .acpi_match_table = ccp_acpi_match,
-#endif
-#ifdef CONFIG_OF
- .of_match_table = ccp_of_match,
-#endif
- },
- .probe = ccp_platform_probe,
- .remove = ccp_platform_remove,
-#ifdef CONFIG_PM
- .suspend = ccp_platform_suspend,
- .resume = ccp_platform_resume,
-#endif
-};
-
-int ccp_platform_init(void)
-{
- return platform_driver_register(&ccp_platform_driver);
-}
-
-void ccp_platform_exit(void)
-{
- platform_driver_unregister(&ccp_platform_driver);
-}
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
new file mode 100644
index 000000000000..bef387c8abfd
--- /dev/null
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -0,0 +1,277 @@
+/*
+ * AMD Secure Processor driver
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+#include "sp-dev.h"
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_AUTHOR("Gary R Hook <gary.hook@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.1.0");
+MODULE_DESCRIPTION("AMD Secure Processor driver");
+
+/* List of SPs, SP count, read-write access lock, and access functions
+ *
+ * Lock structure: get sp_unit_lock for reading whenever we need to
+ * examine the SP list.
+ */
+static DEFINE_RWLOCK(sp_unit_lock);
+static LIST_HEAD(sp_units);
+
+/* Ever-increasing value to produce unique unit numbers */
+static atomic_t sp_ordinal;
+
+static void sp_add_device(struct sp_device *sp)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&sp_unit_lock, flags);
+
+ list_add_tail(&sp->entry, &sp_units);
+
+ write_unlock_irqrestore(&sp_unit_lock, flags);
+}
+
+static void sp_del_device(struct sp_device *sp)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&sp_unit_lock, flags);
+
+ list_del(&sp->entry);
+
+ write_unlock_irqrestore(&sp_unit_lock, flags);
+}
+
+static irqreturn_t sp_irq_handler(int irq, void *data)
+{
+ struct sp_device *sp = data;
+
+ if (sp->ccp_irq_handler)
+ sp->ccp_irq_handler(irq, sp->ccp_irq_data);
+
+ if (sp->psp_irq_handler)
+ sp->psp_irq_handler(irq, sp->psp_irq_data);
+
+ return IRQ_HANDLED;
+}
+
+int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler,
+ const char *name, void *data)
+{
+ int ret;
+
+ if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) {
+ /* Need a common routine to manage all interrupts */
+ sp->ccp_irq_data = data;
+ sp->ccp_irq_handler = handler;
+
+ if (!sp->irq_registered) {
+ ret = request_irq(sp->ccp_irq, sp_irq_handler, 0,
+ sp->name, sp);
+ if (ret)
+ return ret;
+
+ sp->irq_registered = true;
+ }
+ } else {
+ /* Each sub-device can manage it's own interrupt */
+ ret = request_irq(sp->ccp_irq, handler, 0, name, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler,
+ const char *name, void *data)
+{
+ int ret;
+
+ if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) {
+ /* Need a common routine to manage all interrupts */
+ sp->psp_irq_data = data;
+ sp->psp_irq_handler = handler;
+
+ if (!sp->irq_registered) {
+ ret = request_irq(sp->psp_irq, sp_irq_handler, 0,
+ sp->name, sp);
+ if (ret)
+ return ret;
+
+ sp->irq_registered = true;
+ }
+ } else {
+ /* Each sub-device can manage it's own interrupt */
+ ret = request_irq(sp->psp_irq, handler, 0, name, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void sp_free_ccp_irq(struct sp_device *sp, void *data)
+{
+ if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) {
+ /* Using common routine to manage all interrupts */
+ if (!sp->psp_irq_handler) {
+ /* Nothing else using it, so free it */
+ free_irq(sp->ccp_irq, sp);
+
+ sp->irq_registered = false;
+ }
+
+ sp->ccp_irq_handler = NULL;
+ sp->ccp_irq_data = NULL;
+ } else {
+ /* Each sub-device can manage it's own interrupt */
+ free_irq(sp->ccp_irq, data);
+ }
+}
+
+void sp_free_psp_irq(struct sp_device *sp, void *data)
+{
+ if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) {
+ /* Using common routine to manage all interrupts */
+ if (!sp->ccp_irq_handler) {
+ /* Nothing else using it, so free it */
+ free_irq(sp->psp_irq, sp);
+
+ sp->irq_registered = false;
+ }
+
+ sp->psp_irq_handler = NULL;
+ sp->psp_irq_data = NULL;
+ } else {
+ /* Each sub-device can manage it's own interrupt */
+ free_irq(sp->psp_irq, data);
+ }
+}
+
+/**
+ * sp_alloc_struct - allocate and initialize the sp_device struct
+ *
+ * @dev: device struct of the SP
+ */
+struct sp_device *sp_alloc_struct(struct device *dev)
+{
+ struct sp_device *sp;
+
+ sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
+ if (!sp)
+ return NULL;
+
+ sp->dev = dev;
+ sp->ord = atomic_inc_return(&sp_ordinal);
+ snprintf(sp->name, SP_MAX_NAME_LEN, "sp-%u", sp->ord);
+
+ return sp;
+}
+
+int sp_init(struct sp_device *sp)
+{
+ sp_add_device(sp);
+
+ if (sp->dev_vdata->ccp_vdata)
+ ccp_dev_init(sp);
+
+ return 0;
+}
+
+void sp_destroy(struct sp_device *sp)
+{
+ if (sp->dev_vdata->ccp_vdata)
+ ccp_dev_destroy(sp);
+
+ sp_del_device(sp);
+}
+
+#ifdef CONFIG_PM
+int sp_suspend(struct sp_device *sp, pm_message_t state)
+{
+ int ret;
+
+ if (sp->dev_vdata->ccp_vdata) {
+ ret = ccp_dev_suspend(sp, state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int sp_resume(struct sp_device *sp)
+{
+ int ret;
+
+ if (sp->dev_vdata->ccp_vdata) {
+ ret = ccp_dev_resume(sp);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static int __init sp_mod_init(void)
+{
+#ifdef CONFIG_X86
+ int ret;
+
+ ret = sp_pci_init();
+ if (ret)
+ return ret;
+
+ return 0;
+#endif
+
+#ifdef CONFIG_ARM64
+ int ret;
+
+ ret = sp_platform_init();
+ if (ret)
+ return ret;
+
+ return 0;
+#endif
+
+ return -ENODEV;
+}
+
+static void __exit sp_mod_exit(void)
+{
+#ifdef CONFIG_X86
+ sp_pci_exit();
+#endif
+
+#ifdef CONFIG_ARM64
+ sp_platform_exit();
+#endif
+}
+
+module_init(sp_mod_init);
+module_exit(sp_mod_exit);
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
new file mode 100644
index 000000000000..5ab486ade1ad
--- /dev/null
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -0,0 +1,133 @@
+/*
+ * AMD Secure Processor driver
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SP_DEV_H__
+#define __SP_DEV_H__
+
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+#include <linux/hw_random.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+
+#define SP_MAX_NAME_LEN 32
+
+#define CACHE_NONE 0x00
+#define CACHE_WB_NO_ALLOC 0xb7
+
+/* Structure to hold CCP device data */
+struct ccp_device;
+struct ccp_vdata {
+ const unsigned int version;
+ const unsigned int dma_chan_attr;
+ void (*setup)(struct ccp_device *);
+ const struct ccp_actions *perform;
+ const unsigned int offset;
+ const unsigned int rsamax;
+};
+/* Structure to hold SP device data */
+struct sp_dev_vdata {
+ const unsigned int bar;
+
+ const struct ccp_vdata *ccp_vdata;
+ void *psp_vdata;
+};
+
+struct sp_device {
+ struct list_head entry;
+
+ struct device *dev;
+
+ struct sp_dev_vdata *dev_vdata;
+ unsigned int ord;
+ char name[SP_MAX_NAME_LEN];
+
+ /* Bus specific device information */
+ void *dev_specific;
+
+ /* I/O area used for device communication. */
+ void __iomem *io_map;
+
+ /* DMA caching attribute support */
+ unsigned int axcache;
+
+ bool irq_registered;
+ bool use_tasklet;
+
+ unsigned int ccp_irq;
+ irq_handler_t ccp_irq_handler;
+ void *ccp_irq_data;
+
+ unsigned int psp_irq;
+ irq_handler_t psp_irq_handler;
+ void *psp_irq_data;
+
+ void *ccp_data;
+ void *psp_data;
+};
+
+int sp_pci_init(void);
+void sp_pci_exit(void);
+
+int sp_platform_init(void);
+void sp_platform_exit(void);
+
+struct sp_device *sp_alloc_struct(struct device *dev);
+
+int sp_init(struct sp_device *sp);
+void sp_destroy(struct sp_device *sp);
+struct sp_device *sp_get_master(void);
+
+int sp_suspend(struct sp_device *sp, pm_message_t state);
+int sp_resume(struct sp_device *sp);
+int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler,
+ const char *name, void *data);
+void sp_free_ccp_irq(struct sp_device *sp, void *data);
+int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler,
+ const char *name, void *data);
+void sp_free_psp_irq(struct sp_device *sp, void *data);
+
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+
+int ccp_dev_init(struct sp_device *sp);
+void ccp_dev_destroy(struct sp_device *sp);
+
+int ccp_dev_suspend(struct sp_device *sp, pm_message_t state);
+int ccp_dev_resume(struct sp_device *sp);
+
+#else /* !CONFIG_CRYPTO_DEV_SP_CCP */
+
+static inline int ccp_dev_init(struct sp_device *sp)
+{
+ return 0;
+}
+static inline void ccp_dev_destroy(struct sp_device *sp) { }
+
+static inline int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
+{
+ return 0;
+}
+static inline int ccp_dev_resume(struct sp_device *sp)
+{
+ return 0;
+}
+#endif /* CONFIG_CRYPTO_DEV_SP_CCP */
+
+#endif
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
new file mode 100644
index 000000000000..9859aa683a28
--- /dev/null
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -0,0 +1,276 @@
+/*
+ * AMD Secure Processor device driver
+ *
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+#define MSIX_VECTORS 2
+
+struct sp_pci {
+ int msix_count;
+ struct msix_entry msix_entry[MSIX_VECTORS];
+};
+
+static int sp_get_msix_irqs(struct sp_device *sp)
+{
+ struct sp_pci *sp_pci = sp->dev_specific;
+ struct device *dev = sp->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int v, ret;
+
+ for (v = 0; v < ARRAY_SIZE(sp_pci->msix_entry); v++)
+ sp_pci->msix_entry[v].entry = v;
+
+ ret = pci_enable_msix_range(pdev, sp_pci->msix_entry, 1, v);
+ if (ret < 0)
+ return ret;
+
+ sp_pci->msix_count = ret;
+ sp->use_tasklet = true;
+
+ sp->psp_irq = sp_pci->msix_entry[0].vector;
+ sp->ccp_irq = (sp_pci->msix_count > 1) ? sp_pci->msix_entry[1].vector
+ : sp_pci->msix_entry[0].vector;
+ return 0;
+}
+
+static int sp_get_msi_irq(struct sp_device *sp)
+{
+ struct device *dev = sp->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_enable_msi(pdev);
+ if (ret)
+ return ret;
+
+ sp->ccp_irq = pdev->irq;
+ sp->psp_irq = pdev->irq;
+
+ return 0;
+}
+
+static int sp_get_irqs(struct sp_device *sp)
+{
+ struct device *dev = sp->dev;
+ int ret;
+
+ ret = sp_get_msix_irqs(sp);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI-X vectors, try MSI */
+ dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
+ ret = sp_get_msi_irq(sp);
+ if (!ret)
+ return 0;
+
+ /* Couldn't get MSI interrupt */
+ dev_notice(dev, "could not enable MSI (%d)\n", ret);
+
+ return ret;
+}
+
+static void sp_free_irqs(struct sp_device *sp)
+{
+ struct sp_pci *sp_pci = sp->dev_specific;
+ struct device *dev = sp->dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (sp_pci->msix_count)
+ pci_disable_msix(pdev);
+ else if (sp->psp_irq)
+ pci_disable_msi(pdev);
+
+ sp->ccp_irq = 0;
+ sp->psp_irq = 0;
+}
+
+static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct sp_device *sp;
+ struct sp_pci *sp_pci;
+ struct device *dev = &pdev->dev;
+ void __iomem * const *iomap_table;
+ int bar_mask;
+ int ret;
+
+ ret = -ENOMEM;
+ sp = sp_alloc_struct(dev);
+ if (!sp)
+ goto e_err;
+
+ sp_pci = devm_kzalloc(dev, sizeof(*sp_pci), GFP_KERNEL);
+ if (!sp_pci)
+ goto e_err;
+
+ sp->dev_specific = sp_pci;
+ sp->dev_vdata = (struct sp_dev_vdata *)id->driver_data;
+ if (!sp->dev_vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pcim_enable_device failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+ ret = pcim_iomap_regions(pdev, bar_mask, "ccp");
+ if (ret) {
+ dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ iomap_table = pcim_iomap_table(pdev);
+ if (!iomap_table) {
+ dev_err(dev, "pcim_iomap_table failed\n");
+ ret = -ENOMEM;
+ goto e_err;
+ }
+
+ sp->io_map = iomap_table[sp->dev_vdata->bar];
+ if (!sp->io_map) {
+ dev_err(dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto e_err;
+ }
+
+ ret = sp_get_irqs(sp);
+ if (ret)
+ goto e_err;
+
+ pci_set_master(pdev);
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
+ ret);
+ goto e_err;
+ }
+ }
+
+ dev_set_drvdata(dev, sp);
+
+ ret = sp_init(sp);
+ if (ret)
+ goto e_err;
+
+ dev_notice(dev, "enabled\n");
+
+ return 0;
+
+e_err:
+ dev_notice(dev, "initialization failed\n");
+ return ret;
+}
+
+static void sp_pci_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ if (!sp)
+ return;
+
+ sp_destroy(sp);
+
+ sp_free_irqs(sp);
+
+ dev_notice(dev, "disabled\n");
+}
+
+#ifdef CONFIG_PM
+static int sp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ return sp_suspend(sp, state);
+}
+
+static int sp_pci_resume(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ return sp_resume(sp);
+}
+#endif
+
+static const struct sp_dev_vdata dev_vdata[] = {
+ {
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv3,
+#endif
+ },
+ {
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5a,
+#endif
+ },
+ {
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5b,
+#endif
+ },
+};
+static const struct pci_device_id sp_pci_table[] = {
+ { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
+ { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] },
+ { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
+ /* Last entry must be zero */
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sp_pci_table);
+
+static struct pci_driver sp_pci_driver = {
+ .name = "ccp",
+ .id_table = sp_pci_table,
+ .probe = sp_pci_probe,
+ .remove = sp_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = sp_pci_suspend,
+ .resume = sp_pci_resume,
+#endif
+};
+
+int sp_pci_init(void)
+{
+ return pci_register_driver(&sp_pci_driver);
+}
+
+void sp_pci_exit(void)
+{
+ pci_unregister_driver(&sp_pci_driver);
+}
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
new file mode 100644
index 000000000000..71734f254fd1
--- /dev/null
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -0,0 +1,256 @@
+/*
+ * AMD Secure Processor device driver
+ *
+ * Copyright (C) 2014,2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/acpi.h>
+
+#include "ccp-dev.h"
+
+struct sp_platform {
+ int coherent;
+ unsigned int irq_count;
+};
+
+static const struct acpi_device_id sp_acpi_match[];
+static const struct of_device_id sp_of_match[];
+
+static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ const struct of_device_id *match;
+
+ match = of_match_node(sp_of_match, pdev->dev.of_node);
+ if (match && match->data)
+ return (struct sp_dev_vdata *)match->data;
+#endif
+ return NULL;
+}
+
+static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
+{
+#ifdef CONFIG_ACPI
+ const struct acpi_device_id *match;
+
+ match = acpi_match_device(sp_acpi_match, &pdev->dev);
+ if (match && match->driver_data)
+ return (struct sp_dev_vdata *)match->driver_data;
+#endif
+ return NULL;
+}
+
+static int sp_get_irqs(struct sp_device *sp)
+{
+ struct sp_platform *sp_platform = sp->dev_specific;
+ struct device *dev = sp->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ unsigned int i, count;
+ int ret;
+
+ for (i = 0, count = 0; i < pdev->num_resources; i++) {
+ struct resource *res = &pdev->resource[i];
+
+ if (resource_type(res) == IORESOURCE_IRQ)
+ count++;
+ }
+
+ sp_platform->irq_count = count;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_notice(dev, "unable to get IRQ (%d)\n", ret);
+ return ret;
+ }
+
+ sp->psp_irq = ret;
+ if (count == 1) {
+ sp->ccp_irq = ret;
+ } else {
+ ret = platform_get_irq(pdev, 1);
+ if (ret < 0) {
+ dev_notice(dev, "unable to get IRQ (%d)\n", ret);
+ return ret;
+ }
+
+ sp->ccp_irq = ret;
+ }
+
+ return 0;
+}
+
+static int sp_platform_probe(struct platform_device *pdev)
+{
+ struct sp_device *sp;
+ struct sp_platform *sp_platform;
+ struct device *dev = &pdev->dev;
+ enum dev_dma_attr attr;
+ struct resource *ior;
+ int ret;
+
+ ret = -ENOMEM;
+ sp = sp_alloc_struct(dev);
+ if (!sp)
+ goto e_err;
+
+ sp_platform = devm_kzalloc(dev, sizeof(*sp_platform), GFP_KERNEL);
+ if (!sp_platform)
+ goto e_err;
+
+ sp->dev_specific = sp_platform;
+ sp->dev_vdata = pdev->dev.of_node ? sp_get_of_version(pdev)
+ : sp_get_acpi_version(pdev);
+ if (!sp->dev_vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
+
+ ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sp->io_map = devm_ioremap_resource(dev, ior);
+ if (IS_ERR(sp->io_map)) {
+ ret = PTR_ERR(sp->io_map);
+ goto e_err;
+ }
+
+ attr = device_get_dma_attr(dev);
+ if (attr == DEV_DMA_NOT_SUPPORTED) {
+ dev_err(dev, "DMA is not supported");
+ goto e_err;
+ }
+
+ sp_platform->coherent = (attr == DEV_DMA_COHERENT);
+ if (sp_platform->coherent)
+ sp->axcache = CACHE_WB_NO_ALLOC;
+ else
+ sp->axcache = CACHE_NONE;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
+ goto e_err;
+ }
+
+ ret = sp_get_irqs(sp);
+ if (ret)
+ goto e_err;
+
+ dev_set_drvdata(dev, sp);
+
+ ret = sp_init(sp);
+ if (ret)
+ goto e_err;
+
+ dev_notice(dev, "enabled\n");
+
+ return 0;
+
+e_err:
+ dev_notice(dev, "initialization failed\n");
+ return ret;
+}
+
+static int sp_platform_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ sp_destroy(sp);
+
+ dev_notice(dev, "disabled\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sp_platform_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ return sp_suspend(sp, state);
+}
+
+static int sp_platform_resume(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sp_device *sp = dev_get_drvdata(dev);
+
+ return sp_resume(sp);
+}
+#endif
+
+static const struct sp_dev_vdata dev_vdata[] = {
+ {
+ .bar = 0,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv3_platform,
+#endif
+ },
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id sp_acpi_match[] = {
+ { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id sp_of_match[] = {
+ { .compatible = "amd,ccp-seattle-v1a",
+ .data = (const void *)&dev_vdata[0] },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sp_of_match);
+#endif
+
+static struct platform_driver sp_platform_driver = {
+ .driver = {
+ .name = "ccp",
+#ifdef CONFIG_ACPI
+ .acpi_match_table = sp_acpi_match,
+#endif
+#ifdef CONFIG_OF
+ .of_match_table = sp_of_match,
+#endif
+ },
+ .probe = sp_platform_probe,
+ .remove = sp_platform_remove,
+#ifdef CONFIG_PM
+ .suspend = sp_platform_suspend,
+ .resume = sp_platform_resume,
+#endif
+};
+
+int sp_platform_init(void)
+{
+ return platform_driver_register(&sp_platform_driver);
+}
+
+void sp_platform_exit(void)
+{
+ platform_driver_unregister(&sp_platform_driver);
+}
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index fe538e5287a5..eb2a0a73cbed 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -1,10 +1,10 @@
/* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -30,6 +30,7 @@ static inline void
_writefield(u32 offset, void *value)
{
int i;
+
for (i = 0; i < 4; i++)
iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
}
@@ -39,6 +40,7 @@ static inline void
_readfield(u32 offset, void *value)
{
int i;
+
for (i = 0; i < 4; i++)
((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
}
@@ -515,6 +517,7 @@ static void geode_aes_remove(struct pci_dev *dev)
static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int ret;
+
ret = pci_enable_device(dev);
if (ret)
return ret;
@@ -570,7 +573,7 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
static struct pci_device_id geode_aes_tbl[] = {
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), } ,
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), },
{ 0, }
};
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 0c6a917a9ab8..b87000a0a01c 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -1054,7 +1054,7 @@ res_err:
static int img_hash_remove(struct platform_device *pdev)
{
- static struct img_hash_dev *hdev;
+ struct img_hash_dev *hdev;
hdev = platform_get_drvdata(pdev);
spin_lock(&img_hash.lock);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index e7f87ac12685..89ba9e85c0f3 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -773,7 +773,6 @@ static int safexcel_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct safexcel_crypto_priv *priv;
- u64 dma_mask;
int i, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -802,9 +801,7 @@ static int safexcel_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- if (of_property_read_u64(dev->of_node, "dma-mask", &dma_mask))
- dma_mask = DMA_BIT_MASK(64);
- ret = dma_set_mask_and_coherent(dev, dma_mask);
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret)
goto err_clk;
@@ -842,9 +839,10 @@ static int safexcel_probe(struct platform_device *pdev)
snprintf(irq_name, 6, "ring%d", i);
irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
ring_irq);
-
- if (irq < 0)
+ if (irq < 0) {
+ ret = irq;
goto err_clk;
+ }
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 8527a5899a2f..3f819399cd95 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -883,10 +883,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret)
return ret;
- memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
- memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
-
- for (i = 0; i < ARRAY_SIZE(istate.state); i++) {
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
ctx->base.needs_inv = true;
@@ -894,6 +891,9 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
}
}
+ memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
+ memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
+
return 0;
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 427cbe012729..dadc4a808df5 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1073,7 +1073,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
&crypt->icv_rev_aes);
if (unlikely(!req_ctx->hmac_virt))
- goto free_buf_src;
+ goto free_buf_dst;
if (!encrypt) {
scatterwalk_map_and_copy(req_ctx->hmac_virt,
req->src, cryptlen, authsize, 0);
@@ -1088,10 +1088,10 @@ static int aead_perform(struct aead_request *req, int encrypt,
BUG_ON(qmgr_stat_overflow(SEND_QID));
return -EINPROGRESS;
-free_buf_src:
- free_buf_chain(dev, req_ctx->src, crypt->src_buf);
free_buf_dst:
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+free_buf_src:
+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
crypt->ctl_flags = CTL_FLAG_UNUSED;
return -ENOMEM;
}
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index 000b6500a22d..b182e941b0cd 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -500,7 +500,7 @@ static int mtk_crypto_probe(struct platform_device *pdev)
cryp->irq[i] = platform_get_irq(pdev, i);
if (cryp->irq[i] < 0) {
dev_err(cryp->dev, "no IRQ:%d resource info\n", i);
- return -ENXIO;
+ return cryp->irq[i];
}
}
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
index ee4be1b0d30b..e01c46387df8 100644
--- a/drivers/crypto/mxc-scc.c
+++ b/drivers/crypto/mxc-scc.c
@@ -708,8 +708,8 @@ static int mxc_scc_probe(struct platform_device *pdev)
for (i = 0; i < 2; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
- dev_err(dev, "failed to get irq resource\n");
- ret = -EINVAL;
+ dev_err(dev, "failed to get irq resource: %d\n", irq);
+ ret = irq;
goto err_out;
}
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 625ee50fd78b..764be3e6933c 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -908,12 +908,16 @@ static int mxs_dcp_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dcp_vmi_irq = platform_get_irq(pdev, 0);
- if (dcp_vmi_irq < 0)
+ if (dcp_vmi_irq < 0) {
+ dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_vmi_irq);
return dcp_vmi_irq;
+ }
dcp_irq = platform_get_irq(pdev, 1);
- if (dcp_irq < 0)
+ if (dcp_irq < 0) {
+ dev_err(dev, "Failed to get IRQ: (%d)!\n", dcp_irq);
return dcp_irq;
+ }
sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
if (!sdcp)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 269451375b63..a9fd8b9e86cd 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1730,8 +1730,8 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
continue;
id = mdesc_get_property(mdesc, tgt, "id", NULL);
if (table[*id] != NULL) {
- dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
- dev->dev.of_node->full_name);
+ dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
+ dev->dev.of_node);
return -EINVAL;
}
cpumask_set_cpu(*id, &p->sharing);
@@ -1751,8 +1751,8 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
if (!p) {
- dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
- dev->dev.of_node->full_name);
+ dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
+ dev->dev.of_node);
return -ENOMEM;
}
@@ -1981,41 +1981,39 @@ static void n2_spu_driver_version(void)
static int n2_crypto_probe(struct platform_device *dev)
{
struct mdesc_handle *mdesc;
- const char *full_name;
struct n2_crypto *np;
int err;
n2_spu_driver_version();
- full_name = dev->dev.of_node->full_name;
- pr_info("Found N2CP at %s\n", full_name);
+ pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
np = alloc_n2cp();
if (!np) {
- dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
+ dev->dev.of_node);
return -ENOMEM;
}
err = grab_global_resources();
if (err) {
- dev_err(&dev->dev, "%s: Unable to grab "
- "global resources.\n", full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
+ dev->dev.of_node);
goto out_free_n2cp;
}
mdesc = mdesc_grab();
if (!mdesc) {
- dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
+ dev->dev.of_node);
err = -ENODEV;
goto out_free_global;
}
err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
if (err) {
- dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
+ dev->dev.of_node);
mdesc_release(mdesc);
goto out_free_global;
}
@@ -2026,15 +2024,15 @@ static int n2_crypto_probe(struct platform_device *dev)
mdesc_release(mdesc);
if (err) {
- dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
+ dev->dev.of_node);
goto out_free_global;
}
err = n2_register_algs();
if (err) {
- dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
+ dev->dev.of_node);
goto out_free_spu_list;
}
@@ -2092,42 +2090,40 @@ static void free_ncp(struct n2_mau *mp)
static int n2_mau_probe(struct platform_device *dev)
{
struct mdesc_handle *mdesc;
- const char *full_name;
struct n2_mau *mp;
int err;
n2_spu_driver_version();
- full_name = dev->dev.of_node->full_name;
- pr_info("Found NCP at %s\n", full_name);
+ pr_info("Found NCP at %pOF\n", dev->dev.of_node);
mp = alloc_ncp();
if (!mp) {
- dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
+ dev->dev.of_node);
return -ENOMEM;
}
err = grab_global_resources();
if (err) {
- dev_err(&dev->dev, "%s: Unable to grab "
- "global resources.\n", full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
+ dev->dev.of_node);
goto out_free_ncp;
}
mdesc = mdesc_grab();
if (!mdesc) {
- dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
+ dev->dev.of_node);
err = -ENODEV;
goto out_free_global;
}
err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
if (err) {
- dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
+ dev->dev.of_node);
mdesc_release(mdesc);
goto out_free_global;
}
@@ -2138,8 +2134,8 @@ static int n2_mau_probe(struct platform_device *dev)
mdesc_release(mdesc);
if (err) {
- dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
- full_name);
+ dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
+ dev->dev.of_node);
goto out_free_global;
}
diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig
index ad7552a6998c..cd5dda9c48f4 100644
--- a/drivers/crypto/nx/Kconfig
+++ b/drivers/crypto/nx/Kconfig
@@ -38,6 +38,7 @@ config CRYPTO_DEV_NX_COMPRESS_PSERIES
config CRYPTO_DEV_NX_COMPRESS_POWERNV
tristate "Compression acceleration support on PowerNV platform"
depends on PPC_POWERNV
+ depends on PPC_VAS
default y
help
Support for PowerPC Nest (NX) compression acceleration. This
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 1710f80a09ec..874ddf5e9087 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -22,6 +22,8 @@
#include <asm/prom.h>
#include <asm/icswx.h>
+#include <asm/vas.h>
+#include <asm/reg.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
@@ -31,6 +33,9 @@ MODULE_ALIAS_CRYPTO("842-nx");
#define WORKMEM_ALIGN (CRB_ALIGN)
#define CSB_WAIT_MAX (5000) /* ms */
+#define VAS_RETRIES (10)
+/* # of requests allowed per RxFIFO at a time. 0 for unlimited */
+#define MAX_CREDITS_PER_RXFIFO (1024)
struct nx842_workmem {
/* Below fields must be properly aligned */
@@ -41,19 +46,34 @@ struct nx842_workmem {
ktime_t start;
+ struct vas_window *txwin; /* Used with VAS function */
char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */
} __packed __aligned(WORKMEM_ALIGN);
struct nx842_coproc {
unsigned int chip_id;
unsigned int ct;
- unsigned int ci;
+ unsigned int ci; /* Coprocessor instance, used with icswx */
+ struct {
+ struct vas_window *rxwin;
+ int id;
+ } vas;
struct list_head list;
};
+/*
+ * Send the request to NX engine on the chip for the corresponding CPU
+ * where the process is executing. Use with VAS function.
+ */
+static DEFINE_PER_CPU(struct nx842_coproc *, coproc_inst);
+
/* no cpu hotplug on powernv, so this list never changes after init */
static LIST_HEAD(nx842_coprocs);
-static unsigned int nx842_ct;
+static unsigned int nx842_ct; /* used in icswx function */
+
+static int (*nx842_powernv_exec)(const unsigned char *in,
+ unsigned int inlen, unsigned char *out,
+ unsigned int *outlenp, void *workmem, int fc);
/**
* setup_indirect_dde - Setup an indirect DDE
@@ -238,6 +258,13 @@ static int wait_for_csb(struct nx842_workmem *wmem,
case CSB_CC_TEMPL_OVERFLOW:
CSB_ERR(csb, "Compressed data template shows data past end");
return -EINVAL;
+ case CSB_CC_EXCEED_BYTE_COUNT: /* P9 or later */
+ /*
+ * DDE byte count exceeds the limit specified in Maximum
+ * byte count register.
+ */
+ CSB_ERR(csb, "DDE byte count exceeds the limit");
+ return -EINVAL;
/* these should not happen */
case CSB_CC_INVALID_ALIGN:
@@ -279,9 +306,17 @@ static int wait_for_csb(struct nx842_workmem *wmem,
CSB_ERR(csb, "Too many DDEs in DDL");
return -EINVAL;
case CSB_CC_TRANSPORT:
+ case CSB_CC_INVALID_CRB: /* P9 or later */
/* shouldn't happen, we setup CRB correctly */
CSB_ERR(csb, "Invalid CRB");
return -EINVAL;
+ case CSB_CC_INVALID_DDE: /* P9 or later */
+ /*
+ * shouldn't happen, setup_direct/indirect_dde creates
+ * DDE right
+ */
+ CSB_ERR(csb, "Invalid DDE");
+ return -EINVAL;
case CSB_CC_SEGMENTED_DDL:
/* shouldn't happen, setup_ddl creates DDL right */
CSB_ERR(csb, "Segmented DDL error");
@@ -325,6 +360,9 @@ static int wait_for_csb(struct nx842_workmem *wmem,
case CSB_CC_HW:
CSB_ERR(csb, "Correctable hardware error");
return -EPROTO;
+ case CSB_CC_HW_EXPIRED_TIMER: /* P9 or later */
+ CSB_ERR(csb, "Job did not finish within allowed time");
+ return -EPROTO;
default:
CSB_ERR(csb, "Invalid CC %d", csb->cc);
@@ -353,8 +391,42 @@ static int wait_for_csb(struct nx842_workmem *wmem,
return 0;
}
+static int nx842_config_crb(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int outlen,
+ struct nx842_workmem *wmem)
+{
+ struct coprocessor_request_block *crb;
+ struct coprocessor_status_block *csb;
+ u64 csb_addr;
+ int ret;
+
+ crb = &wmem->crb;
+ csb = &crb->csb;
+
+ /* Clear any previous values */
+ memset(crb, 0, sizeof(*crb));
+
+ /* set up DDLs */
+ ret = setup_ddl(&crb->source, wmem->ddl_in,
+ (unsigned char *)in, inlen, true);
+ if (ret)
+ return ret;
+
+ ret = setup_ddl(&crb->target, wmem->ddl_out,
+ out, outlen, false);
+ if (ret)
+ return ret;
+
+ /* set up CRB's CSB addr */
+ csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS;
+ csb_addr |= CRB_CSB_AT; /* Addrs are phys */
+ crb->csb_addr = cpu_to_be64(csb_addr);
+
+ return 0;
+}
+
/**
- * nx842_powernv_function - compress/decompress data using the 842 algorithm
+ * nx842_exec_icswx - compress/decompress data using the 842 algorithm
*
* (De)compression provided by the NX842 coprocessor on IBM PowerNV systems.
* This compresses or decompresses the provided input buffer into the provided
@@ -384,7 +456,7 @@ static int wait_for_csb(struct nx842_workmem *wmem,
* -ETIMEDOUT hardware did not complete operation in reasonable time
* -EINTR operation was aborted
*/
-static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
+static int nx842_exec_icswx(const unsigned char *in, unsigned int inlen,
unsigned char *out, unsigned int *outlenp,
void *workmem, int fc)
{
@@ -392,7 +464,6 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
struct coprocessor_status_block *csb;
struct nx842_workmem *wmem;
int ret;
- u64 csb_addr;
u32 ccw;
unsigned int outlen = *outlenp;
@@ -406,32 +477,18 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
return -ENODEV;
}
- crb = &wmem->crb;
- csb = &crb->csb;
-
- /* Clear any previous values */
- memset(crb, 0, sizeof(*crb));
-
- /* set up DDLs */
- ret = setup_ddl(&crb->source, wmem->ddl_in,
- (unsigned char *)in, inlen, true);
- if (ret)
- return ret;
- ret = setup_ddl(&crb->target, wmem->ddl_out,
- out, outlen, false);
+ ret = nx842_config_crb(in, inlen, out, outlen, wmem);
if (ret)
return ret;
+ crb = &wmem->crb;
+ csb = &crb->csb;
+
/* set up CCW */
ccw = 0;
- ccw = SET_FIELD(ccw, CCW_CT, nx842_ct);
- ccw = SET_FIELD(ccw, CCW_CI_842, 0); /* use 0 for hw auto-selection */
- ccw = SET_FIELD(ccw, CCW_FC_842, fc);
-
- /* set up CRB's CSB addr */
- csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS;
- csb_addr |= CRB_CSB_AT; /* Addrs are phys */
- crb->csb_addr = cpu_to_be64(csb_addr);
+ ccw = SET_FIELD(CCW_CT, ccw, nx842_ct);
+ ccw = SET_FIELD(CCW_CI_842, ccw, 0); /* use 0 for hw auto-selection */
+ ccw = SET_FIELD(CCW_FC_842, ccw, fc);
wmem->start = ktime_get();
@@ -471,6 +528,104 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
}
/**
+ * nx842_exec_vas - compress/decompress data using the 842 algorithm
+ *
+ * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems.
+ * This compresses or decompresses the provided input buffer into the provided
+ * output buffer.
+ *
+ * Upon return from this function @outlen contains the length of the
+ * output data. If there is an error then @outlen will be 0 and an
+ * error will be specified by the return code from this function.
+ *
+ * The @workmem buffer should only be used by one function call at a time.
+ *
+ * @in: input buffer pointer
+ * @inlen: input buffer size
+ * @out: output buffer pointer
+ * @outlenp: output buffer size pointer
+ * @workmem: working memory buffer pointer, size determined by
+ * nx842_powernv_driver.workmem_size
+ * @fc: function code, see CCW Function Codes in nx-842.h
+ *
+ * Returns:
+ * 0 Success, output of length @outlenp stored in the buffer
+ * at @out
+ * -ENODEV Hardware unavailable
+ * -ENOSPC Output buffer is to small
+ * -EMSGSIZE Input buffer too large
+ * -EINVAL buffer constraints do not fix nx842_constraints
+ * -EPROTO hardware error during operation
+ * -ETIMEDOUT hardware did not complete operation in reasonable time
+ * -EINTR operation was aborted
+ */
+static int nx842_exec_vas(const unsigned char *in, unsigned int inlen,
+ unsigned char *out, unsigned int *outlenp,
+ void *workmem, int fc)
+{
+ struct coprocessor_request_block *crb;
+ struct coprocessor_status_block *csb;
+ struct nx842_workmem *wmem;
+ struct vas_window *txwin;
+ int ret, i = 0;
+ u32 ccw;
+ unsigned int outlen = *outlenp;
+
+ wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN);
+
+ *outlenp = 0;
+
+ crb = &wmem->crb;
+ csb = &crb->csb;
+
+ ret = nx842_config_crb(in, inlen, out, outlen, wmem);
+ if (ret)
+ return ret;
+
+ ccw = 0;
+ ccw = SET_FIELD(CCW_FC_842, ccw, fc);
+ crb->ccw = cpu_to_be32(ccw);
+
+ txwin = wmem->txwin;
+ /* shoudn't happen, we don't load without a coproc */
+ if (!txwin) {
+ pr_err_ratelimited("NX-842 coprocessor is not available");
+ return -ENODEV;
+ }
+
+ do {
+ wmem->start = ktime_get();
+ preempt_disable();
+ /*
+ * VAS copy CRB into L2 cache. Refer <asm/vas.h>.
+ * @crb and @offset.
+ */
+ vas_copy_crb(crb, 0);
+
+ /*
+ * VAS paste previously copied CRB to NX.
+ * @txwin, @offset and @last (must be true).
+ */
+ ret = vas_paste_crb(txwin, 0, 1);
+ preempt_enable();
+ /*
+ * Retry copy/paste function for VAS failures.
+ */
+ } while (ret && (i++ < VAS_RETRIES));
+
+ if (ret) {
+ pr_err_ratelimited("VAS copy/paste failed\n");
+ return ret;
+ }
+
+ ret = wait_for_csb(wmem, csb);
+ if (!ret)
+ *outlenp = be32_to_cpu(csb->count);
+
+ return ret;
+}
+
+/**
* nx842_powernv_compress - Compress data using the 842 algorithm
*
* Compression provided by the NX842 coprocessor on IBM PowerNV systems.
@@ -488,13 +643,13 @@ static int nx842_powernv_function(const unsigned char *in, unsigned int inlen,
* @workmem: working memory buffer pointer, size determined by
* nx842_powernv_driver.workmem_size
*
- * Returns: see @nx842_powernv_function()
+ * Returns: see @nx842_powernv_exec()
*/
static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen,
unsigned char *out, unsigned int *outlenp,
void *wmem)
{
- return nx842_powernv_function(in, inlen, out, outlenp,
+ return nx842_powernv_exec(in, inlen, out, outlenp,
wmem, CCW_FC_842_COMP_CRC);
}
@@ -516,16 +671,219 @@ static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen,
* @workmem: working memory buffer pointer, size determined by
* nx842_powernv_driver.workmem_size
*
- * Returns: see @nx842_powernv_function()
+ * Returns: see @nx842_powernv_exec()
*/
static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen,
unsigned char *out, unsigned int *outlenp,
void *wmem)
{
- return nx842_powernv_function(in, inlen, out, outlenp,
+ return nx842_powernv_exec(in, inlen, out, outlenp,
wmem, CCW_FC_842_DECOMP_CRC);
}
+static inline void nx842_add_coprocs_list(struct nx842_coproc *coproc,
+ int chipid)
+{
+ coproc->chip_id = chipid;
+ INIT_LIST_HEAD(&coproc->list);
+ list_add(&coproc->list, &nx842_coprocs);
+}
+
+/*
+ * Identify chip ID for each CPU and save coprocesor adddress for the
+ * corresponding NX engine in percpu coproc_inst.
+ * coproc_inst is used in crypto_init to open send window on the NX instance
+ * for the corresponding CPU / chip where the open request is executed.
+ */
+static void nx842_set_per_cpu_coproc(struct nx842_coproc *coproc)
+{
+ unsigned int i, chip_id;
+
+ for_each_possible_cpu(i) {
+ chip_id = cpu_to_chip_id(i);
+
+ if (coproc->chip_id == chip_id)
+ per_cpu(coproc_inst, i) = coproc;
+ }
+}
+
+
+static struct vas_window *nx842_alloc_txwin(struct nx842_coproc *coproc)
+{
+ struct vas_window *txwin = NULL;
+ struct vas_tx_win_attr txattr;
+
+ /*
+ * Kernel requests will be high priority. So open send
+ * windows only for high priority RxFIFO entries.
+ */
+ vas_init_tx_win_attr(&txattr, coproc->ct);
+ txattr.lpid = 0; /* lpid is 0 for kernel requests */
+ txattr.pid = 0; /* pid is 0 for kernel requests */
+
+ /*
+ * Open a VAS send window which is used to send request to NX.
+ */
+ txwin = vas_tx_win_open(coproc->vas.id, coproc->ct, &txattr);
+ if (IS_ERR(txwin)) {
+ pr_err("ibm,nx-842: Can not open TX window: %ld\n",
+ PTR_ERR(txwin));
+ return NULL;
+ }
+
+ return txwin;
+}
+
+static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
+ int vasid)
+{
+ struct vas_window *rxwin = NULL;
+ struct vas_rx_win_attr rxattr;
+ struct nx842_coproc *coproc;
+ u32 lpid, pid, tid, fifo_size;
+ u64 rx_fifo;
+ const char *priority;
+ int ret;
+
+ ret = of_property_read_u64(dn, "rx-fifo-address", &rx_fifo);
+ if (ret) {
+ pr_err("Missing rx-fifo-address property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "rx-fifo-size", &fifo_size);
+ if (ret) {
+ pr_err("Missing rx-fifo-size property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "lpid", &lpid);
+ if (ret) {
+ pr_err("Missing lpid property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "pid", &pid);
+ if (ret) {
+ pr_err("Missing pid property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dn, "tid", &tid);
+ if (ret) {
+ pr_err("Missing tid property\n");
+ return ret;
+ }
+
+ ret = of_property_read_string(dn, "priority", &priority);
+ if (ret) {
+ pr_err("Missing priority property\n");
+ return ret;
+ }
+
+ coproc = kzalloc(sizeof(*coproc), GFP_KERNEL);
+ if (!coproc)
+ return -ENOMEM;
+
+ if (!strcmp(priority, "High"))
+ coproc->ct = VAS_COP_TYPE_842_HIPRI;
+ else if (!strcmp(priority, "Normal"))
+ coproc->ct = VAS_COP_TYPE_842;
+ else {
+ pr_err("Invalid RxFIFO priority value\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ vas_init_rx_win_attr(&rxattr, coproc->ct);
+ rxattr.rx_fifo = (void *)rx_fifo;
+ rxattr.rx_fifo_size = fifo_size;
+ rxattr.lnotify_lpid = lpid;
+ rxattr.lnotify_pid = pid;
+ rxattr.lnotify_tid = tid;
+ rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO;
+
+ /*
+ * Open a VAS receice window which is used to configure RxFIFO
+ * for NX.
+ */
+ rxwin = vas_rx_win_open(vasid, coproc->ct, &rxattr);
+ if (IS_ERR(rxwin)) {
+ ret = PTR_ERR(rxwin);
+ pr_err("setting RxFIFO with VAS failed: %d\n",
+ ret);
+ goto err_out;
+ }
+
+ coproc->vas.rxwin = rxwin;
+ coproc->vas.id = vasid;
+ nx842_add_coprocs_list(coproc, chip_id);
+
+ /*
+ * Kernel requests use only high priority FIFOs. So save coproc
+ * info in percpu coproc_inst which will be used to open send
+ * windows for crypto open requests later.
+ */
+ if (coproc->ct == VAS_COP_TYPE_842_HIPRI)
+ nx842_set_per_cpu_coproc(coproc);
+
+ return 0;
+
+err_out:
+ kfree(coproc);
+ return ret;
+}
+
+
+static int __init nx842_powernv_probe_vas(struct device_node *pn)
+{
+ struct device_node *dn;
+ int chip_id, vasid, ret = 0;
+ int nx_fifo_found = 0;
+
+ chip_id = of_get_ibm_chip_id(pn);
+ if (chip_id < 0) {
+ pr_err("ibm,chip-id missing\n");
+ return -EINVAL;
+ }
+
+ for_each_compatible_node(dn, NULL, "ibm,power9-vas-x") {
+ if (of_get_ibm_chip_id(dn) == chip_id)
+ break;
+ }
+
+ if (!dn) {
+ pr_err("Missing VAS device node\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dn, "ibm,vas-id", &vasid)) {
+ pr_err("Missing ibm,vas-id device property\n");
+ of_node_put(dn);
+ return -EINVAL;
+ }
+
+ of_node_put(dn);
+
+ for_each_child_of_node(pn, dn) {
+ if (of_device_is_compatible(dn, "ibm,p9-nx-842")) {
+ ret = vas_cfg_coproc_info(dn, chip_id, vasid);
+ if (ret) {
+ of_node_put(dn);
+ return ret;
+ }
+ nx_fifo_found++;
+ }
+ }
+
+ if (!nx_fifo_found) {
+ pr_err("NX842 FIFO nodes are missing\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static int __init nx842_powernv_probe(struct device_node *dn)
{
struct nx842_coproc *coproc;
@@ -552,11 +910,9 @@ static int __init nx842_powernv_probe(struct device_node *dn)
if (!coproc)
return -ENOMEM;
- coproc->chip_id = chip_id;
coproc->ct = ct;
coproc->ci = ci;
- INIT_LIST_HEAD(&coproc->list);
- list_add(&coproc->list, &nx842_coprocs);
+ nx842_add_coprocs_list(coproc, chip_id);
pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci);
@@ -569,6 +925,19 @@ static int __init nx842_powernv_probe(struct device_node *dn)
return 0;
}
+static void nx842_delete_coprocs(void)
+{
+ struct nx842_coproc *coproc, *n;
+
+ list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
+ if (coproc->vas.rxwin)
+ vas_win_close(coproc->vas.rxwin);
+
+ list_del(&coproc->list);
+ kfree(coproc);
+ }
+}
+
static struct nx842_constraints nx842_powernv_constraints = {
.alignment = DDE_BUFFER_ALIGN,
.multiple = DDE_BUFFER_LAST_MULT,
@@ -585,6 +954,46 @@ static struct nx842_driver nx842_powernv_driver = {
.decompress = nx842_powernv_decompress,
};
+static int nx842_powernv_crypto_init_vas(struct crypto_tfm *tfm)
+{
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_workmem *wmem;
+ struct nx842_coproc *coproc;
+ int ret;
+
+ ret = nx842_crypto_init(tfm, &nx842_powernv_driver);
+
+ if (ret)
+ return ret;
+
+ wmem = PTR_ALIGN((struct nx842_workmem *)ctx->wmem, WORKMEM_ALIGN);
+ coproc = per_cpu(coproc_inst, smp_processor_id());
+
+ ret = -EINVAL;
+ if (coproc && coproc->vas.rxwin) {
+ wmem->txwin = nx842_alloc_txwin(coproc);
+ if (!IS_ERR(wmem->txwin))
+ return 0;
+
+ ret = PTR_ERR(wmem->txwin);
+ }
+
+ return ret;
+}
+
+void nx842_powernv_crypto_exit_vas(struct crypto_tfm *tfm)
+{
+ struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct nx842_workmem *wmem;
+
+ wmem = PTR_ALIGN((struct nx842_workmem *)ctx->wmem, WORKMEM_ALIGN);
+
+ if (wmem && wmem->txwin)
+ vas_win_close(wmem->txwin);
+
+ nx842_crypto_exit(tfm);
+}
+
static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
{
return nx842_crypto_init(tfm, &nx842_powernv_driver);
@@ -618,21 +1027,31 @@ static __init int nx842_powernv_init(void)
BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT);
BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
- for_each_compatible_node(dn, NULL, "ibm,power-nx")
- nx842_powernv_probe(dn);
+ for_each_compatible_node(dn, NULL, "ibm,power9-nx") {
+ ret = nx842_powernv_probe_vas(dn);
+ if (ret) {
+ nx842_delete_coprocs();
+ return ret;
+ }
+ }
- if (!nx842_ct)
- return -ENODEV;
+ if (list_empty(&nx842_coprocs)) {
+ for_each_compatible_node(dn, NULL, "ibm,power-nx")
+ nx842_powernv_probe(dn);
- ret = crypto_register_alg(&nx842_powernv_alg);
- if (ret) {
- struct nx842_coproc *coproc, *n;
+ if (!nx842_ct)
+ return -ENODEV;
- list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
- list_del(&coproc->list);
- kfree(coproc);
- }
+ nx842_powernv_exec = nx842_exec_icswx;
+ } else {
+ nx842_powernv_exec = nx842_exec_vas;
+ nx842_powernv_alg.cra_init = nx842_powernv_crypto_init_vas;
+ nx842_powernv_alg.cra_exit = nx842_powernv_crypto_exit_vas;
+ }
+ ret = crypto_register_alg(&nx842_powernv_alg);
+ if (ret) {
+ nx842_delete_coprocs();
return ret;
}
@@ -642,13 +1061,8 @@ module_init(nx842_powernv_init);
static void __exit nx842_powernv_exit(void)
{
- struct nx842_coproc *coproc, *n;
-
crypto_unregister_alg(&nx842_powernv_alg);
- list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
- list_del(&coproc->list);
- kfree(coproc);
- }
+ nx842_delete_coprocs();
}
module_exit(nx842_powernv_exit);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index d94e25df503b..da3cb8c35ec7 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -116,7 +116,7 @@ int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
spin_lock_init(&ctx->lock);
ctx->driver = driver;
- ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL);
+ ctx->wmem = kzalloc(driver->workmem_size, GFP_KERNEL);
ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index a4eee3bba937..bb2f31792683 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -76,9 +76,17 @@
#define CSB_CC_DECRYPT_OVERFLOW (64)
/* asym crypt codes */
#define CSB_CC_MINV_OVERFLOW (128)
+/*
+ * HW error - Job did not finish in the maximum time allowed.
+ * Job terminated.
+ */
+#define CSB_CC_HW_EXPIRED_TIMER (224)
/* These are reserved for hypervisor use */
#define CSB_CC_HYP_RESERVE_START (240)
#define CSB_CC_HYP_RESERVE_END (253)
+#define CSB_CC_HYP_RESERVE_P9_END (251)
+/* No valid interrupt server (P9 or later). */
+#define CSB_CC_HYP_RESERVE_NO_INTR_SERVER (252)
#define CSB_CC_HYP_NO_HW (254)
#define CSB_CC_HYP_HANG_ABORTED (255)
@@ -100,11 +108,6 @@ static inline unsigned long nx842_get_pa(void *addr)
return page_to_phys(vmalloc_to_page(addr)) + offset_in_page(addr);
}
-/* Get/Set bit fields */
-#define MASK_LSH(m) (__builtin_ffsl(m) - 1)
-#define GET_FIELD(v, m) (((v) & (m)) >> MASK_LSH(m))
-#define SET_FIELD(v, m, val) (((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m)))
-
/**
* This provides the driver's constraints. Different nx842 implementations
* may have varying requirements. The constraints are:
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 5120a17731d0..c376a3ee7c2c 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1095,6 +1095,7 @@ static int omap_aes_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "can't get IRQ resource\n");
+ err = irq;
goto err_irq;
}
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 0bcab00e0ff5..d37c9506c36c 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -1023,7 +1023,8 @@ static int omap_des_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "can't get IRQ resource\n");
+ dev_err(dev, "can't get IRQ resource: %d\n", irq);
+ err = irq;
goto err_irq;
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 9ad9d399daf1..c40ac30ec002 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -2133,7 +2133,7 @@ data_err:
static int omap_sham_remove(struct platform_device *pdev)
{
- static struct omap_sham_dev *dd;
+ struct omap_sham_dev *dd;
int i, j;
dd = platform_get_drvdata(pdev);
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index d3e25c37dc33..da8a2d3b5e9a 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -208,7 +208,7 @@ static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
static void adf_resume(struct pci_dev *pdev)
{
dev_info(&pdev->dev, "Acceleration driver reset completed\n");
- dev_info(&pdev->dev, "Device is up and runnig\n");
+ dev_info(&pdev->dev, "Device is up and running\n");
}
static const struct pci_error_handlers adf_err_handler = {
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index d0f80c6241f9..c9d622abd90c 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -169,50 +169,82 @@ static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
{
struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
u32 interrupt_status;
- int err = 0;
spin_lock(&dev->lock);
interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
+
if (interrupt_status & 0x0a) {
dev_warn(dev->dev, "DMA Error\n");
- err = -EFAULT;
- } else if (interrupt_status & 0x05) {
- err = dev->update(dev);
+ dev->err = -EFAULT;
}
- if (err)
- dev->complete(dev, err);
+ tasklet_schedule(&dev->done_task);
+
spin_unlock(&dev->lock);
return IRQ_HANDLED;
}
-static void rk_crypto_tasklet_cb(unsigned long data)
+static int rk_crypto_enqueue(struct rk_crypto_info *dev,
+ struct crypto_async_request *async_req)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ret = crypto_enqueue_request(&dev->queue, async_req);
+ if (dev->busy) {
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret;
+ }
+ dev->busy = true;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ tasklet_schedule(&dev->queue_task);
+
+ return ret;
+}
+
+static void rk_crypto_queue_task_cb(unsigned long data)
{
struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
struct crypto_async_request *async_req, *backlog;
unsigned long flags;
int err = 0;
+ dev->err = 0;
spin_lock_irqsave(&dev->lock, flags);
backlog = crypto_get_backlog(&dev->queue);
async_req = crypto_dequeue_request(&dev->queue);
- spin_unlock_irqrestore(&dev->lock, flags);
+
if (!async_req) {
- dev_err(dev->dev, "async_req is NULL !!\n");
+ dev->busy = false;
+ spin_unlock_irqrestore(&dev->lock, flags);
return;
}
+ spin_unlock_irqrestore(&dev->lock, flags);
+
if (backlog) {
backlog->complete(backlog, -EINPROGRESS);
backlog = NULL;
}
- if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER)
- dev->ablk_req = ablkcipher_request_cast(async_req);
- else
- dev->ahash_req = ahash_request_cast(async_req);
+ dev->async_req = async_req;
err = dev->start(dev);
if (err)
- dev->complete(dev, err);
+ dev->complete(dev->async_req, err);
+}
+
+static void rk_crypto_done_task_cb(unsigned long data)
+{
+ struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
+
+ if (dev->err) {
+ dev->complete(dev->async_req, dev->err);
+ return;
+ }
+
+ dev->err = dev->update(dev);
+ if (dev->err)
+ dev->complete(dev->async_req, dev->err);
}
static struct rk_crypto_tmp *rk_cipher_algs[] = {
@@ -361,14 +393,18 @@ static int rk_crypto_probe(struct platform_device *pdev)
crypto_info->dev = &pdev->dev;
platform_set_drvdata(pdev, crypto_info);
- tasklet_init(&crypto_info->crypto_tasklet,
- rk_crypto_tasklet_cb, (unsigned long)crypto_info);
+ tasklet_init(&crypto_info->queue_task,
+ rk_crypto_queue_task_cb, (unsigned long)crypto_info);
+ tasklet_init(&crypto_info->done_task,
+ rk_crypto_done_task_cb, (unsigned long)crypto_info);
crypto_init_queue(&crypto_info->queue, 50);
crypto_info->enable_clk = rk_crypto_enable_clk;
crypto_info->disable_clk = rk_crypto_disable_clk;
crypto_info->load_data = rk_load_data;
crypto_info->unload_data = rk_unload_data;
+ crypto_info->enqueue = rk_crypto_enqueue;
+ crypto_info->busy = false;
err = rk_crypto_register(crypto_info);
if (err) {
@@ -380,7 +416,8 @@ static int rk_crypto_probe(struct platform_device *pdev)
return 0;
err_register_alg:
- tasklet_kill(&crypto_info->crypto_tasklet);
+ tasklet_kill(&crypto_info->queue_task);
+ tasklet_kill(&crypto_info->done_task);
err_crypto:
return err;
}
@@ -390,7 +427,8 @@ static int rk_crypto_remove(struct platform_device *pdev)
struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
rk_crypto_unregister();
- tasklet_kill(&crypto_tmp->crypto_tasklet);
+ tasklet_kill(&crypto_tmp->done_task);
+ tasklet_kill(&crypto_tmp->queue_task);
return 0;
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index d7b71fea320b..ab6a1b4c40f0 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -190,9 +190,10 @@ struct rk_crypto_info {
void __iomem *reg;
int irq;
struct crypto_queue queue;
- struct tasklet_struct crypto_tasklet;
- struct ablkcipher_request *ablk_req;
- struct ahash_request *ahash_req;
+ struct tasklet_struct queue_task;
+ struct tasklet_struct done_task;
+ struct crypto_async_request *async_req;
+ int err;
/* device lock */
spinlock_t lock;
@@ -208,18 +209,20 @@ struct rk_crypto_info {
size_t nents;
unsigned int total;
unsigned int count;
- u32 mode;
dma_addr_t addr_in;
dma_addr_t addr_out;
+ bool busy;
int (*start)(struct rk_crypto_info *dev);
int (*update)(struct rk_crypto_info *dev);
- void (*complete)(struct rk_crypto_info *dev, int err);
+ void (*complete)(struct crypto_async_request *base, int err);
int (*enable_clk)(struct rk_crypto_info *dev);
void (*disable_clk)(struct rk_crypto_info *dev);
int (*load_data)(struct rk_crypto_info *dev,
struct scatterlist *sg_src,
struct scatterlist *sg_dst);
void (*unload_data)(struct rk_crypto_info *dev);
+ int (*enqueue)(struct rk_crypto_info *dev,
+ struct crypto_async_request *async_req);
};
/* the private variable of hash */
@@ -232,12 +235,14 @@ struct rk_ahash_ctx {
/* the privete variable of hash for fallback */
struct rk_ahash_rctx {
struct ahash_request fallback_req;
+ u32 mode;
};
/* the private variable of cipher */
struct rk_cipher_ctx {
struct rk_crypto_info *dev;
unsigned int keylen;
+ u32 mode;
};
enum alg_type {
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index b5a3afe222e4..639c15c5364b 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -15,35 +15,19 @@
#define RK_CRYPTO_DEC BIT(0)
-static void rk_crypto_complete(struct rk_crypto_info *dev, int err)
+static void rk_crypto_complete(struct crypto_async_request *base, int err)
{
- if (dev->ablk_req->base.complete)
- dev->ablk_req->base.complete(&dev->ablk_req->base, err);
+ if (base->complete)
+ base->complete(base, err);
}
static int rk_handle_req(struct rk_crypto_info *dev,
struct ablkcipher_request *req)
{
- unsigned long flags;
- int err;
-
if (!IS_ALIGNED(req->nbytes, dev->align_size))
return -EINVAL;
-
- dev->left_bytes = req->nbytes;
- dev->total = req->nbytes;
- dev->sg_src = req->src;
- dev->first = req->src;
- dev->nents = sg_nents(req->src);
- dev->sg_dst = req->dst;
- dev->aligned = 1;
- dev->ablk_req = req;
-
- spin_lock_irqsave(&dev->lock, flags);
- err = ablkcipher_enqueue_request(&dev->queue, req);
- spin_unlock_irqrestore(&dev->lock, flags);
- tasklet_schedule(&dev->crypto_tasklet);
- return err;
+ else
+ return dev->enqueue(dev, &req->base);
}
static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
@@ -93,7 +77,7 @@ static int rk_aes_ecb_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_AES_ECB_MODE;
+ ctx->mode = RK_CRYPTO_AES_ECB_MODE;
return rk_handle_req(dev, req);
}
@@ -103,7 +87,7 @@ static int rk_aes_ecb_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
+ ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
@@ -113,7 +97,7 @@ static int rk_aes_cbc_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_AES_CBC_MODE;
+ ctx->mode = RK_CRYPTO_AES_CBC_MODE;
return rk_handle_req(dev, req);
}
@@ -123,7 +107,7 @@ static int rk_aes_cbc_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
+ ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
@@ -133,7 +117,7 @@ static int rk_des_ecb_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = 0;
+ ctx->mode = 0;
return rk_handle_req(dev, req);
}
@@ -143,7 +127,7 @@ static int rk_des_ecb_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_DEC;
+ ctx->mode = RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
@@ -153,7 +137,7 @@ static int rk_des_cbc_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
+ ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
return rk_handle_req(dev, req);
}
@@ -163,7 +147,7 @@ static int rk_des_cbc_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
+ ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
@@ -173,7 +157,7 @@ static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_SELECT;
+ ctx->mode = RK_CRYPTO_TDES_SELECT;
return rk_handle_req(dev, req);
}
@@ -183,7 +167,7 @@ static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
@@ -193,7 +177,7 @@ static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
return rk_handle_req(dev, req);
}
@@ -203,15 +187,16 @@ static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req)
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct rk_crypto_info *dev = ctx->dev;
- dev->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
+ ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
RK_CRYPTO_DEC;
return rk_handle_req(dev, req);
}
static void rk_ablk_hw_init(struct rk_crypto_info *dev)
{
- struct crypto_ablkcipher *cipher =
- crypto_ablkcipher_reqtfm(dev->ablk_req);
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dev->async_req);
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
u32 ivsize, block, conf_reg = 0;
@@ -220,25 +205,23 @@ static void rk_ablk_hw_init(struct rk_crypto_info *dev)
ivsize = crypto_ablkcipher_ivsize(cipher);
if (block == DES_BLOCK_SIZE) {
- dev->mode |= RK_CRYPTO_TDES_FIFO_MODE |
+ ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
RK_CRYPTO_TDES_BYTESWAP_KEY |
RK_CRYPTO_TDES_BYTESWAP_IV;
- CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, dev->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0,
- dev->ablk_req->info, ivsize);
+ CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->info, ivsize);
conf_reg = RK_CRYPTO_DESSEL;
} else {
- dev->mode |= RK_CRYPTO_AES_FIFO_MODE |
+ ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
RK_CRYPTO_AES_KEY_CHANGE |
RK_CRYPTO_AES_BYTESWAP_KEY |
RK_CRYPTO_AES_BYTESWAP_IV;
if (ctx->keylen == AES_KEYSIZE_192)
- dev->mode |= RK_CRYPTO_AES_192BIT_key;
+ ctx->mode |= RK_CRYPTO_AES_192BIT_key;
else if (ctx->keylen == AES_KEYSIZE_256)
- dev->mode |= RK_CRYPTO_AES_256BIT_key;
- CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, dev->mode);
- memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0,
- dev->ablk_req->info, ivsize);
+ ctx->mode |= RK_CRYPTO_AES_256BIT_key;
+ CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
+ memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->info, ivsize);
}
conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
RK_CRYPTO_BYTESWAP_BRFIFO;
@@ -268,8 +251,18 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
static int rk_ablk_start(struct rk_crypto_info *dev)
{
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dev->async_req);
unsigned long flags;
- int err;
+ int err = 0;
+
+ dev->left_bytes = req->nbytes;
+ dev->total = req->nbytes;
+ dev->sg_src = req->src;
+ dev->first = req->src;
+ dev->nents = sg_nents(req->src);
+ dev->sg_dst = req->dst;
+ dev->aligned = 1;
spin_lock_irqsave(&dev->lock, flags);
rk_ablk_hw_init(dev);
@@ -280,15 +273,16 @@ static int rk_ablk_start(struct rk_crypto_info *dev)
static void rk_iv_copyback(struct rk_crypto_info *dev)
{
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(dev->ablk_req);
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dev->async_req);
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
u32 ivsize = crypto_ablkcipher_ivsize(tfm);
if (ivsize == DES_BLOCK_SIZE)
- memcpy_fromio(dev->ablk_req->info,
- dev->reg + RK_CRYPTO_TDES_IV_0, ivsize);
+ memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
+ ivsize);
else if (ivsize == AES_BLOCK_SIZE)
- memcpy_fromio(dev->ablk_req->info,
- dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
+ memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
}
/* return:
@@ -298,10 +292,12 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
static int rk_ablk_rx(struct rk_crypto_info *dev)
{
int err = 0;
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(dev->async_req);
dev->unload_data(dev);
if (!dev->aligned) {
- if (!sg_pcopy_from_buffer(dev->ablk_req->dst, dev->nents,
+ if (!sg_pcopy_from_buffer(req->dst, dev->nents,
dev->addr_vir, dev->count,
dev->total - dev->left_bytes -
dev->count)) {
@@ -324,7 +320,8 @@ static int rk_ablk_rx(struct rk_crypto_info *dev)
} else {
rk_iv_copyback(dev);
/* here show the calculation is over without any err */
- dev->complete(dev, 0);
+ dev->complete(dev->async_req, 0);
+ tasklet_schedule(&dev->queue_task);
}
out_rx:
return err;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 718588219f75..821a506b9e17 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -40,14 +40,16 @@ static int zero_message_process(struct ahash_request *req)
return 0;
}
-static void rk_ahash_crypto_complete(struct rk_crypto_info *dev, int err)
+static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
{
- if (dev->ahash_req->base.complete)
- dev->ahash_req->base.complete(&dev->ahash_req->base, err);
+ if (base->complete)
+ base->complete(base, err);
}
static void rk_ahash_reg_init(struct rk_crypto_info *dev)
{
+ struct ahash_request *req = ahash_request_cast(dev->async_req);
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
int reg_status = 0;
reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
@@ -67,7 +69,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev)
CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
RK_CRYPTO_HRDMA_DONE_INT);
- CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, dev->mode |
+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode |
RK_CRYPTO_HASH_SWAP_DO);
CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
@@ -164,64 +166,13 @@ static int rk_ahash_export(struct ahash_request *req, void *out)
static int rk_ahash_digest(struct ahash_request *req)
{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct rk_crypto_info *dev = NULL;
- unsigned long flags;
- int ret;
+ struct rk_crypto_info *dev = tctx->dev;
if (!req->nbytes)
return zero_message_process(req);
-
- dev = tctx->dev;
- dev->total = req->nbytes;
- dev->left_bytes = req->nbytes;
- dev->aligned = 0;
- dev->mode = 0;
- dev->align_size = 4;
- dev->sg_dst = NULL;
- dev->sg_src = req->src;
- dev->first = req->src;
- dev->nents = sg_nents(req->src);
-
- switch (crypto_ahash_digestsize(tfm)) {
- case SHA1_DIGEST_SIZE:
- dev->mode = RK_CRYPTO_HASH_SHA1;
- break;
- case SHA256_DIGEST_SIZE:
- dev->mode = RK_CRYPTO_HASH_SHA256;
- break;
- case MD5_DIGEST_SIZE:
- dev->mode = RK_CRYPTO_HASH_MD5;
- break;
- default:
- return -EINVAL;
- }
-
- rk_ahash_reg_init(dev);
-
- spin_lock_irqsave(&dev->lock, flags);
- ret = crypto_enqueue_request(&dev->queue, &req->base);
- spin_unlock_irqrestore(&dev->lock, flags);
-
- tasklet_schedule(&dev->crypto_tasklet);
-
- /*
- * it will take some time to process date after last dma transmission.
- *
- * waiting time is relative with the last date len,
- * so cannot set a fixed time here.
- * 10-50 makes system not call here frequently wasting
- * efficiency, and make it response quickly when dma
- * complete.
- */
- while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
- usleep_range(10, 50);
-
- memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
- crypto_ahash_digestsize(tfm));
-
- return 0;
+ else
+ return dev->enqueue(dev, &req->base);
}
static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
@@ -244,12 +195,45 @@ static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
static int rk_ahash_start(struct rk_crypto_info *dev)
{
+ struct ahash_request *req = ahash_request_cast(dev->async_req);
+ struct crypto_ahash *tfm;
+ struct rk_ahash_rctx *rctx;
+
+ dev->total = req->nbytes;
+ dev->left_bytes = req->nbytes;
+ dev->aligned = 0;
+ dev->align_size = 4;
+ dev->sg_dst = NULL;
+ dev->sg_src = req->src;
+ dev->first = req->src;
+ dev->nents = sg_nents(req->src);
+ rctx = ahash_request_ctx(req);
+ rctx->mode = 0;
+
+ tfm = crypto_ahash_reqtfm(req);
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
+ rctx->mode = RK_CRYPTO_HASH_SHA1;
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->mode = RK_CRYPTO_HASH_SHA256;
+ break;
+ case MD5_DIGEST_SIZE:
+ rctx->mode = RK_CRYPTO_HASH_MD5;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rk_ahash_reg_init(dev);
return rk_ahash_set_data_start(dev);
}
static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
{
int err = 0;
+ struct ahash_request *req = ahash_request_cast(dev->async_req);
+ struct crypto_ahash *tfm;
dev->unload_data(dev);
if (dev->left_bytes) {
@@ -264,7 +248,24 @@ static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
}
err = rk_ahash_set_data_start(dev);
} else {
- dev->complete(dev, 0);
+ /*
+ * it will take some time to process date after last dma
+ * transmission.
+ *
+ * waiting time is relative with the last date len,
+ * so cannot set a fixed time here.
+ * 10us makes system not call here frequently wasting
+ * efficiency, and make it response quickly when dma
+ * complete.
+ */
+ while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
+ udelay(10);
+
+ tfm = crypto_ahash_reqtfm(req);
+ memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
+ crypto_ahash_digestsize(tfm));
+ dev->complete(dev->async_req, 0);
+ tasklet_schedule(&dev->queue_task);
}
out_rx:
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 1d9ecd368b5b..08e7bdcaa6e3 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -202,7 +202,6 @@ struct sahara_dev {
struct completion dma_completion;
struct sahara_ctx *ctx;
- spinlock_t lock;
struct crypto_queue queue;
unsigned long flags;
@@ -543,10 +542,10 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
unmap_out:
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
- DMA_TO_DEVICE);
+ DMA_FROM_DEVICE);
unmap_in:
dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
return -EINVAL;
}
@@ -594,9 +593,9 @@ static int sahara_aes_process(struct ablkcipher_request *req)
}
dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
- DMA_TO_DEVICE);
- dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
DMA_FROM_DEVICE);
+ dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+ DMA_TO_DEVICE);
return 0;
}
@@ -1376,13 +1375,13 @@ static void sahara_unregister_algs(struct sahara_dev *dev)
crypto_unregister_ahash(&sha_v4_algs[i]);
}
-static struct platform_device_id sahara_platform_ids[] = {
+static const struct platform_device_id sahara_platform_ids[] = {
{ .name = "sahara-imx27" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
-static struct of_device_id sahara_dt_ids[] = {
+static const struct of_device_id sahara_dt_ids[] = {
{ .compatible = "fsl,imx53-sahara" },
{ .compatible = "fsl,imx27-sahara" },
{ /* sentinel */ }
@@ -1487,7 +1486,6 @@ static int sahara_probe(struct platform_device *pdev)
crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
- spin_lock_init(&dev->lock);
mutex_init(&dev->queue_mutex);
dev_ptr = dev;
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 09b4ec87c212..602332e02729 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -1,7 +1,20 @@
-config CRYPTO_DEV_STM32
- tristate "Support for STM32 crypto accelerators"
+config CRC_DEV_STM32
+ tristate "Support for STM32 crc accelerators"
depends on ARCH_STM32
select CRYPTO_HASH
help
This enables support for the CRC32 hw accelerator which can be found
- on STMicroelectronis STM32 SOC.
+ on STMicroelectronics STM32 SOC.
+
+config HASH_DEV_STM32
+ tristate "Support for STM32 hash accelerators"
+ depends on ARCH_STM32
+ depends on HAS_DMA
+ select CRYPTO_HASH
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_ENGINE
+ help
+ This enables support for the HASH hw accelerator which can be found
+ on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index 73b4c6e47f5f..73cd56cad0cc 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,2 +1,2 @@
-obj-$(CONFIG_CRYPTO_DEV_STM32) += stm32_cryp.o
-stm32_cryp-objs := stm32_crc32.o
+obj-$(CONFIG_CRC_DEV_STM32) += stm32_crc32.o
+obj-$(CONFIG_HASH_DEV_STM32) += stm32-hash.o \ No newline at end of file
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
new file mode 100644
index 000000000000..b585ce54a802
--- /dev/null
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -0,0 +1,1575 @@
+/*
+ * This file is part of STM32 Crypto driver for Linux.
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
+ *
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <crypto/engine.h>
+#include <crypto/hash.h>
+#include <crypto/md5.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+#include <crypto/internal/hash.h>
+
+#define HASH_CR 0x00
+#define HASH_DIN 0x04
+#define HASH_STR 0x08
+#define HASH_IMR 0x20
+#define HASH_SR 0x24
+#define HASH_CSR(x) (0x0F8 + ((x) * 0x04))
+#define HASH_HREG(x) (0x310 + ((x) * 0x04))
+#define HASH_HWCFGR 0x3F0
+#define HASH_VER 0x3F4
+#define HASH_ID 0x3F8
+
+/* Control Register */
+#define HASH_CR_INIT BIT(2)
+#define HASH_CR_DMAE BIT(3)
+#define HASH_CR_DATATYPE_POS 4
+#define HASH_CR_MODE BIT(6)
+#define HASH_CR_MDMAT BIT(13)
+#define HASH_CR_DMAA BIT(14)
+#define HASH_CR_LKEY BIT(16)
+
+#define HASH_CR_ALGO_SHA1 0x0
+#define HASH_CR_ALGO_MD5 0x80
+#define HASH_CR_ALGO_SHA224 0x40000
+#define HASH_CR_ALGO_SHA256 0x40080
+
+/* Interrupt */
+#define HASH_DINIE BIT(0)
+#define HASH_DCIE BIT(1)
+
+/* Interrupt Mask */
+#define HASH_MASK_CALC_COMPLETION BIT(0)
+#define HASH_MASK_DATA_INPUT BIT(1)
+
+/* Context swap register */
+#define HASH_CSR_REGISTER_NUMBER 53
+
+/* Status Flags */
+#define HASH_SR_DATA_INPUT_READY BIT(0)
+#define HASH_SR_OUTPUT_READY BIT(1)
+#define HASH_SR_DMA_ACTIVE BIT(2)
+#define HASH_SR_BUSY BIT(3)
+
+/* STR Register */
+#define HASH_STR_NBLW_MASK GENMASK(4, 0)
+#define HASH_STR_DCAL BIT(8)
+
+#define HASH_FLAGS_INIT BIT(0)
+#define HASH_FLAGS_OUTPUT_READY BIT(1)
+#define HASH_FLAGS_CPU BIT(2)
+#define HASH_FLAGS_DMA_READY BIT(3)
+#define HASH_FLAGS_DMA_ACTIVE BIT(4)
+#define HASH_FLAGS_HMAC_INIT BIT(5)
+#define HASH_FLAGS_HMAC_FINAL BIT(6)
+#define HASH_FLAGS_HMAC_KEY BIT(7)
+
+#define HASH_FLAGS_FINAL BIT(15)
+#define HASH_FLAGS_FINUP BIT(16)
+#define HASH_FLAGS_ALGO_MASK GENMASK(21, 18)
+#define HASH_FLAGS_MD5 BIT(18)
+#define HASH_FLAGS_SHA1 BIT(19)
+#define HASH_FLAGS_SHA224 BIT(20)
+#define HASH_FLAGS_SHA256 BIT(21)
+#define HASH_FLAGS_ERRORS BIT(22)
+#define HASH_FLAGS_HMAC BIT(23)
+
+#define HASH_OP_UPDATE 1
+#define HASH_OP_FINAL 2
+
+enum stm32_hash_data_format {
+ HASH_DATA_32_BITS = 0x0,
+ HASH_DATA_16_BITS = 0x1,
+ HASH_DATA_8_BITS = 0x2,
+ HASH_DATA_1_BIT = 0x3
+};
+
+#define HASH_BUFLEN 256
+#define HASH_LONG_KEY 64
+#define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8)
+#define HASH_QUEUE_LENGTH 16
+#define HASH_DMA_THRESHOLD 50
+
+struct stm32_hash_ctx {
+ struct stm32_hash_dev *hdev;
+ unsigned long flags;
+
+ u8 key[HASH_MAX_KEY_SIZE];
+ int keylen;
+};
+
+struct stm32_hash_request_ctx {
+ struct stm32_hash_dev *hdev;
+ unsigned long flags;
+ unsigned long op;
+
+ u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
+ size_t digcnt;
+ size_t bufcnt;
+ size_t buflen;
+
+ /* DMA */
+ struct scatterlist *sg;
+ unsigned int offset;
+ unsigned int total;
+ struct scatterlist sg_key;
+
+ dma_addr_t dma_addr;
+ size_t dma_ct;
+ int nents;
+
+ u8 data_type;
+
+ u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
+
+ /* Export Context */
+ u32 *hw_context;
+};
+
+struct stm32_hash_algs_info {
+ struct ahash_alg *algs_list;
+ size_t size;
+};
+
+struct stm32_hash_pdata {
+ struct stm32_hash_algs_info *algs_info;
+ size_t algs_info_size;
+};
+
+struct stm32_hash_dev {
+ struct list_head list;
+ struct device *dev;
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *io_base;
+ phys_addr_t phys_base;
+ u32 dma_mode;
+ u32 dma_maxburst;
+
+ spinlock_t lock; /* lock to protect queue */
+
+ struct ahash_request *req;
+ struct crypto_engine *engine;
+
+ int err;
+ unsigned long flags;
+
+ struct dma_chan *dma_lch;
+ struct completion dma_completion;
+
+ const struct stm32_hash_pdata *pdata;
+};
+
+struct stm32_hash_drv {
+ struct list_head dev_list;
+ spinlock_t lock; /* List protection access */
+};
+
+static struct stm32_hash_drv stm32_hash = {
+ .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
+};
+
+static void stm32_hash_dma_callback(void *param);
+
+static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
+{
+ return readl_relaxed(hdev->io_base + offset);
+}
+
+static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
+ u32 offset, u32 value)
+{
+ writel_relaxed(value, hdev->io_base + offset);
+}
+
+static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
+ !(status & HASH_SR_BUSY), 10, 10000);
+}
+
+static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
+{
+ u32 reg;
+
+ reg = stm32_hash_read(hdev, HASH_STR);
+ reg &= ~(HASH_STR_NBLW_MASK);
+ reg |= (8U * ((length) % 4U));
+ stm32_hash_write(hdev, HASH_STR, reg);
+}
+
+static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ u32 reg;
+ int keylen = ctx->keylen;
+ void *key = ctx->key;
+
+ if (keylen) {
+ stm32_hash_set_nblw(hdev, keylen);
+
+ while (keylen > 0) {
+ stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
+ keylen -= 4;
+ key += 4;
+ }
+
+ reg = stm32_hash_read(hdev, HASH_STR);
+ reg |= HASH_STR_DCAL;
+ stm32_hash_write(hdev, HASH_STR, reg);
+
+ return -EINPROGRESS;
+ }
+
+ return 0;
+}
+
+static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ u32 reg = HASH_CR_INIT;
+
+ if (!(hdev->flags & HASH_FLAGS_INIT)) {
+ switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
+ case HASH_FLAGS_MD5:
+ reg |= HASH_CR_ALGO_MD5;
+ break;
+ case HASH_FLAGS_SHA1:
+ reg |= HASH_CR_ALGO_SHA1;
+ break;
+ case HASH_FLAGS_SHA224:
+ reg |= HASH_CR_ALGO_SHA224;
+ break;
+ case HASH_FLAGS_SHA256:
+ reg |= HASH_CR_ALGO_SHA256;
+ break;
+ default:
+ reg |= HASH_CR_ALGO_MD5;
+ }
+
+ reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
+
+ if (rctx->flags & HASH_FLAGS_HMAC) {
+ hdev->flags |= HASH_FLAGS_HMAC;
+ reg |= HASH_CR_MODE;
+ if (ctx->keylen > HASH_LONG_KEY)
+ reg |= HASH_CR_LKEY;
+ }
+
+ stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
+
+ stm32_hash_write(hdev, HASH_CR, reg);
+
+ hdev->flags |= HASH_FLAGS_INIT;
+
+ dev_dbg(hdev->dev, "Write Control %x\n", reg);
+ }
+}
+
+static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
+{
+ size_t count;
+
+ while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
+ count = min(rctx->sg->length - rctx->offset, rctx->total);
+ count = min(count, rctx->buflen - rctx->bufcnt);
+
+ if (count <= 0) {
+ if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
+ rctx->sg = sg_next(rctx->sg);
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
+ rctx->offset, count, 0);
+
+ rctx->bufcnt += count;
+ rctx->offset += count;
+ rctx->total -= count;
+
+ if (rctx->offset == rctx->sg->length) {
+ rctx->sg = sg_next(rctx->sg);
+ if (rctx->sg)
+ rctx->offset = 0;
+ else
+ rctx->total = 0;
+ }
+ }
+}
+
+static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
+ const u8 *buf, size_t length, int final)
+{
+ unsigned int count, len32;
+ const u32 *buffer = (const u32 *)buf;
+ u32 reg;
+
+ if (final)
+ hdev->flags |= HASH_FLAGS_FINAL;
+
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
+
+ dev_dbg(hdev->dev, "%s: length: %d, final: %x len32 %i\n",
+ __func__, length, final, len32);
+
+ hdev->flags |= HASH_FLAGS_CPU;
+
+ stm32_hash_write_ctrl(hdev);
+
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+
+ if ((hdev->flags & HASH_FLAGS_HMAC) &&
+ (hdev->flags & ~HASH_FLAGS_HMAC_KEY)) {
+ hdev->flags |= HASH_FLAGS_HMAC_KEY;
+ stm32_hash_write_key(hdev);
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ }
+
+ for (count = 0; count < len32; count++)
+ stm32_hash_write(hdev, HASH_DIN, buffer[count]);
+
+ if (final) {
+ stm32_hash_set_nblw(hdev, length);
+ reg = stm32_hash_read(hdev, HASH_STR);
+ reg |= HASH_STR_DCAL;
+ stm32_hash_write(hdev, HASH_STR, reg);
+ if (hdev->flags & HASH_FLAGS_HMAC) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ stm32_hash_write_key(hdev);
+ }
+ return -EINPROGRESS;
+ }
+
+ return 0;
+}
+
+static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ int bufcnt, err = 0, final;
+
+ dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
+
+ final = (rctx->flags & HASH_FLAGS_FINUP);
+
+ while ((rctx->total >= rctx->buflen) ||
+ (rctx->bufcnt + rctx->total >= rctx->buflen)) {
+ stm32_hash_append_sg(rctx);
+ bufcnt = rctx->bufcnt;
+ rctx->bufcnt = 0;
+ err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
+ }
+
+ stm32_hash_append_sg(rctx);
+
+ if (final) {
+ bufcnt = rctx->bufcnt;
+ rctx->bufcnt = 0;
+ err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
+ (rctx->flags & HASH_FLAGS_FINUP));
+ }
+
+ return err;
+}
+
+static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
+ struct scatterlist *sg, int length, int mdma)
+{
+ struct dma_async_tx_descriptor *in_desc;
+ dma_cookie_t cookie;
+ u32 reg;
+ int err;
+
+ in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (!in_desc) {
+ dev_err(hdev->dev, "dmaengine_prep_slave error\n");
+ return -ENOMEM;
+ }
+
+ reinit_completion(&hdev->dma_completion);
+ in_desc->callback = stm32_hash_dma_callback;
+ in_desc->callback_param = hdev;
+
+ hdev->flags |= HASH_FLAGS_FINAL;
+ hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
+
+ reg = stm32_hash_read(hdev, HASH_CR);
+
+ if (mdma)
+ reg |= HASH_CR_MDMAT;
+ else
+ reg &= ~HASH_CR_MDMAT;
+
+ reg |= HASH_CR_DMAE;
+
+ stm32_hash_write(hdev, HASH_CR, reg);
+
+ stm32_hash_set_nblw(hdev, length);
+
+ cookie = dmaengine_submit(in_desc);
+ err = dma_submit_error(cookie);
+ if (err)
+ return -ENOMEM;
+
+ dma_async_issue_pending(hdev->dma_lch);
+
+ if (!wait_for_completion_interruptible_timeout(&hdev->dma_completion,
+ msecs_to_jiffies(100)))
+ err = -ETIMEDOUT;
+
+ if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
+ NULL, NULL) != DMA_COMPLETE)
+ err = -ETIMEDOUT;
+
+ if (err) {
+ dev_err(hdev->dev, "DMA Error %i\n", err);
+ dmaengine_terminate_all(hdev->dma_lch);
+ return err;
+ }
+
+ return -EINPROGRESS;
+}
+
+static void stm32_hash_dma_callback(void *param)
+{
+ struct stm32_hash_dev *hdev = param;
+
+ complete(&hdev->dma_completion);
+
+ hdev->flags |= HASH_FLAGS_DMA_READY;
+}
+
+static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ int err;
+
+ if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
+ err = stm32_hash_write_key(hdev);
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ } else {
+ if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
+ sg_init_one(&rctx->sg_key, ctx->key,
+ ALIGN(ctx->keylen, sizeof(u32)));
+
+ rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
+ DMA_TO_DEVICE);
+ if (rctx->dma_ct == 0) {
+ dev_err(hdev->dev, "dma_map_sg error\n");
+ return -ENOMEM;
+ }
+
+ err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
+
+ dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
+ }
+
+ return err;
+}
+
+static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
+{
+ struct dma_slave_config dma_conf;
+ int err;
+
+ memset(&dma_conf, 0, sizeof(dma_conf));
+
+ dma_conf.direction = DMA_MEM_TO_DEV;
+ dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
+ dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_conf.src_maxburst = hdev->dma_maxburst;
+ dma_conf.dst_maxburst = hdev->dma_maxburst;
+ dma_conf.device_fc = false;
+
+ hdev->dma_lch = dma_request_slave_channel(hdev->dev, "in");
+ if (!hdev->dma_lch) {
+ dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
+ return -EBUSY;
+ }
+
+ err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
+ if (err) {
+ dma_release_channel(hdev->dma_lch);
+ hdev->dma_lch = NULL;
+ dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
+ return err;
+ }
+
+ init_completion(&hdev->dma_completion);
+
+ return 0;
+}
+
+static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
+ struct scatterlist sg[1], *tsg;
+ int err = 0, len = 0, reg, ncp;
+ unsigned int i;
+ const u32 *buffer = (const u32 *)rctx->buffer;
+
+ rctx->sg = hdev->req->src;
+ rctx->total = hdev->req->nbytes;
+
+ rctx->nents = sg_nents(rctx->sg);
+
+ if (rctx->nents < 0)
+ return -EINVAL;
+
+ stm32_hash_write_ctrl(hdev);
+
+ if (hdev->flags & HASH_FLAGS_HMAC) {
+ err = stm32_hash_hmac_dma_send(hdev);
+ if (err != -EINPROGRESS)
+ return err;
+ }
+
+ for_each_sg(rctx->sg, tsg, rctx->nents, i) {
+ len = sg->length;
+
+ sg[0] = *tsg;
+ if (sg_is_last(sg)) {
+ if (hdev->dma_mode == 1) {
+ len = (ALIGN(sg->length, 16) - 16);
+
+ ncp = sg_pcopy_to_buffer(
+ rctx->sg, rctx->nents,
+ rctx->buffer, sg->length - len,
+ rctx->total - sg->length + len);
+
+ sg->length = len;
+ } else {
+ if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
+ len = sg->length;
+ sg->length = ALIGN(sg->length,
+ sizeof(u32));
+ }
+ }
+ }
+
+ rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
+ DMA_TO_DEVICE);
+ if (rctx->dma_ct == 0) {
+ dev_err(hdev->dev, "dma_map_sg error\n");
+ return -ENOMEM;
+ }
+
+ err = stm32_hash_xmit_dma(hdev, sg, len,
+ !sg_is_last(sg));
+
+ dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
+
+ if (err == -ENOMEM)
+ return err;
+ }
+
+ if (hdev->dma_mode == 1) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ reg = stm32_hash_read(hdev, HASH_CR);
+ reg &= ~HASH_CR_DMAE;
+ reg |= HASH_CR_DMAA;
+ stm32_hash_write(hdev, HASH_CR, reg);
+
+ for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++)
+ stm32_hash_write(hdev, HASH_DIN, buffer[i]);
+
+ stm32_hash_set_nblw(hdev, ncp);
+ reg = stm32_hash_read(hdev, HASH_STR);
+ reg |= HASH_STR_DCAL;
+ stm32_hash_write(hdev, HASH_STR, reg);
+ err = -EINPROGRESS;
+ }
+
+ if (hdev->flags & HASH_FLAGS_HMAC) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ err = stm32_hash_hmac_dma_send(hdev);
+ }
+
+ return err;
+}
+
+static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
+{
+ struct stm32_hash_dev *hdev = NULL, *tmp;
+
+ spin_lock_bh(&stm32_hash.lock);
+ if (!ctx->hdev) {
+ list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
+ hdev = tmp;
+ break;
+ }
+ ctx->hdev = hdev;
+ } else {
+ hdev = ctx->hdev;
+ }
+
+ spin_unlock_bh(&stm32_hash.lock);
+
+ return hdev;
+}
+
+static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
+{
+ struct scatterlist *sg;
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ int i;
+
+ if (req->nbytes <= HASH_DMA_THRESHOLD)
+ return false;
+
+ if (sg_nents(req->src) > 1) {
+ if (hdev->dma_mode == 1)
+ return false;
+ for_each_sg(req->src, sg, sg_nents(req->src), i) {
+ if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
+ (!sg_is_last(sg)))
+ return false;
+ }
+ }
+
+ if (req->src->offset % 4)
+ return false;
+
+ return true;
+}
+
+static int stm32_hash_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+
+ rctx->hdev = hdev;
+
+ rctx->flags = HASH_FLAGS_CPU;
+
+ rctx->digcnt = crypto_ahash_digestsize(tfm);
+ switch (rctx->digcnt) {
+ case MD5_DIGEST_SIZE:
+ rctx->flags |= HASH_FLAGS_MD5;
+ break;
+ case SHA1_DIGEST_SIZE:
+ rctx->flags |= HASH_FLAGS_SHA1;
+ break;
+ case SHA224_DIGEST_SIZE:
+ rctx->flags |= HASH_FLAGS_SHA224;
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->flags |= HASH_FLAGS_SHA256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rctx->bufcnt = 0;
+ rctx->buflen = HASH_BUFLEN;
+ rctx->total = 0;
+ rctx->offset = 0;
+ rctx->data_type = HASH_DATA_8_BITS;
+
+ memset(rctx->buffer, 0, HASH_BUFLEN);
+
+ if (ctx->flags & HASH_FLAGS_HMAC)
+ rctx->flags |= HASH_FLAGS_HMAC;
+
+ dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
+
+ return 0;
+}
+
+static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
+{
+ return stm32_hash_update_cpu(hdev);
+}
+
+static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
+{
+ struct ahash_request *req = hdev->req;
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ int err;
+
+ if (!(rctx->flags & HASH_FLAGS_CPU))
+ err = stm32_hash_dma_send(hdev);
+ else
+ err = stm32_hash_xmit_cpu(hdev, rctx->buffer, rctx->bufcnt, 1);
+
+ rctx->bufcnt = 0;
+
+ return err;
+}
+
+static void stm32_hash_copy_hash(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ u32 *hash = (u32 *)rctx->digest;
+ unsigned int i, hashsize;
+
+ switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
+ case HASH_FLAGS_MD5:
+ hashsize = MD5_DIGEST_SIZE;
+ break;
+ case HASH_FLAGS_SHA1:
+ hashsize = SHA1_DIGEST_SIZE;
+ break;
+ case HASH_FLAGS_SHA224:
+ hashsize = SHA224_DIGEST_SIZE;
+ break;
+ case HASH_FLAGS_SHA256:
+ hashsize = SHA256_DIGEST_SIZE;
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < hashsize / sizeof(u32); i++)
+ hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev,
+ HASH_HREG(i)));
+}
+
+static int stm32_hash_finish(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ if (!req->result)
+ return -EINVAL;
+
+ memcpy(req->result, rctx->digest, rctx->digcnt);
+
+ return 0;
+}
+
+static void stm32_hash_finish_req(struct ahash_request *req, int err)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_dev *hdev = rctx->hdev;
+
+ if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
+ stm32_hash_copy_hash(req);
+ err = stm32_hash_finish(req);
+ hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
+ HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
+ HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
+ HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
+ HASH_FLAGS_HMAC_KEY);
+ } else {
+ rctx->flags |= HASH_FLAGS_ERRORS;
+ }
+
+ crypto_finalize_hash_request(hdev->engine, req, err);
+}
+
+static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
+ struct stm32_hash_request_ctx *rctx)
+{
+ if (!(HASH_FLAGS_INIT & hdev->flags)) {
+ stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
+ stm32_hash_write(hdev, HASH_STR, 0);
+ stm32_hash_write(hdev, HASH_DIN, 0);
+ stm32_hash_write(hdev, HASH_IMR, 0);
+ hdev->err = 0;
+ }
+
+ return 0;
+}
+
+static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
+ struct ahash_request *req)
+{
+ return crypto_transfer_hash_request_to_engine(hdev->engine, req);
+}
+
+static int stm32_hash_prepare_req(struct crypto_engine *engine,
+ struct ahash_request *req)
+{
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ struct stm32_hash_request_ctx *rctx;
+
+ if (!hdev)
+ return -ENODEV;
+
+ hdev->req = req;
+
+ rctx = ahash_request_ctx(req);
+
+ dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
+ rctx->op, req->nbytes);
+
+ return stm32_hash_hw_init(hdev, rctx);
+}
+
+static int stm32_hash_one_request(struct crypto_engine *engine,
+ struct ahash_request *req)
+{
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ struct stm32_hash_request_ctx *rctx;
+ int err = 0;
+
+ if (!hdev)
+ return -ENODEV;
+
+ hdev->req = req;
+
+ rctx = ahash_request_ctx(req);
+
+ if (rctx->op == HASH_OP_UPDATE)
+ err = stm32_hash_update_req(hdev);
+ else if (rctx->op == HASH_OP_FINAL)
+ err = stm32_hash_final_req(hdev);
+
+ if (err != -EINPROGRESS)
+ /* done task will not finish it, so do it here */
+ stm32_hash_finish_req(req, err);
+
+ return 0;
+}
+
+static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct stm32_hash_dev *hdev = ctx->hdev;
+
+ rctx->op = op;
+
+ return stm32_hash_handle_queue(hdev, req);
+}
+
+static int stm32_hash_update(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ int ret;
+
+ if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
+ return 0;
+
+ rctx->total = req->nbytes;
+ rctx->sg = req->src;
+ rctx->offset = 0;
+
+ if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
+ stm32_hash_append_sg(rctx);
+ return 0;
+ }
+
+ ret = stm32_hash_enqueue(req, HASH_OP_UPDATE);
+
+ if (rctx->flags & HASH_FLAGS_FINUP)
+ return ret;
+
+ return 0;
+}
+
+static int stm32_hash_final(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+
+ rctx->flags |= HASH_FLAGS_FINUP;
+
+ return stm32_hash_enqueue(req, HASH_OP_FINAL);
+}
+
+static int stm32_hash_finup(struct ahash_request *req)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ int err1, err2;
+
+ rctx->flags |= HASH_FLAGS_FINUP;
+
+ if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
+ rctx->flags &= ~HASH_FLAGS_CPU;
+
+ err1 = stm32_hash_update(req);
+
+ if (err1 == -EINPROGRESS || err1 == -EBUSY)
+ return err1;
+
+ /*
+ * final() has to be always called to cleanup resources
+ * even if update() failed, except EINPROGRESS
+ */
+ err2 = stm32_hash_final(req);
+
+ return err1 ?: err2;
+}
+
+static int stm32_hash_digest(struct ahash_request *req)
+{
+ return stm32_hash_init(req) ?: stm32_hash_finup(req);
+}
+
+static int stm32_hash_export(struct ahash_request *req, void *out)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ u32 *preg;
+ unsigned int i;
+
+ while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
+ cpu_relax();
+
+ rctx->hw_context = kmalloc(sizeof(u32) * (3 + HASH_CSR_REGISTER_NUMBER),
+ GFP_KERNEL);
+
+ preg = rctx->hw_context;
+
+ *preg++ = stm32_hash_read(hdev, HASH_IMR);
+ *preg++ = stm32_hash_read(hdev, HASH_STR);
+ *preg++ = stm32_hash_read(hdev, HASH_CR);
+ for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
+
+ memcpy(out, rctx, sizeof(*rctx));
+
+ return 0;
+}
+
+static int stm32_hash_import(struct ahash_request *req, const void *in)
+{
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ const u32 *preg = in;
+ u32 reg;
+ unsigned int i;
+
+ memcpy(rctx, in, sizeof(*rctx));
+
+ preg = rctx->hw_context;
+
+ stm32_hash_write(hdev, HASH_IMR, *preg++);
+ stm32_hash_write(hdev, HASH_STR, *preg++);
+ stm32_hash_write(hdev, HASH_CR, *preg);
+ reg = *preg++ | HASH_CR_INIT;
+ stm32_hash_write(hdev, HASH_CR, reg);
+
+ for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ stm32_hash_write(hdev, HASH_CSR(i), *preg++);
+
+ kfree(rctx->hw_context);
+
+ return 0;
+}
+
+static int stm32_hash_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ if (keylen <= HASH_MAX_KEY_SIZE) {
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+ } else {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
+ const char *algs_hmac_name)
+{
+ struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct stm32_hash_request_ctx));
+
+ ctx->keylen = 0;
+
+ if (algs_hmac_name)
+ ctx->flags |= HASH_FLAGS_HMAC;
+
+ return 0;
+}
+
+static int stm32_hash_cra_init(struct crypto_tfm *tfm)
+{
+ return stm32_hash_cra_init_algs(tfm, NULL);
+}
+
+static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
+{
+ return stm32_hash_cra_init_algs(tfm, "md5");
+}
+
+static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
+{
+ return stm32_hash_cra_init_algs(tfm, "sha1");
+}
+
+static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
+{
+ return stm32_hash_cra_init_algs(tfm, "sha224");
+}
+
+static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
+{
+ return stm32_hash_cra_init_algs(tfm, "sha256");
+}
+
+static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
+{
+ struct stm32_hash_dev *hdev = dev_id;
+ int err;
+
+ if (HASH_FLAGS_CPU & hdev->flags) {
+ if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
+ hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
+ goto finish;
+ }
+ } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
+ if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
+ hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
+ goto finish;
+ }
+ }
+
+ return IRQ_HANDLED;
+
+finish:
+ /*Finish current request */
+ stm32_hash_finish_req(hdev->req, err);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
+{
+ struct stm32_hash_dev *hdev = dev_id;
+ u32 reg;
+
+ reg = stm32_hash_read(hdev, HASH_SR);
+ if (reg & HASH_SR_OUTPUT_READY) {
+ reg &= ~HASH_SR_OUTPUT_READY;
+ stm32_hash_write(hdev, HASH_SR, reg);
+ hdev->flags |= HASH_FLAGS_OUTPUT_READY;
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static struct ahash_alg algs_md5_sha1[] = {
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "stm32-md5",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .setkey = stm32_hash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "stm32-hmac-md5",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_md5_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "stm32-sha1",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .setkey = stm32_hash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "stm32-hmac-sha1",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha1_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+};
+
+static struct ahash_alg algs_sha224_sha256[] = {
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "stm32-sha224",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .setkey = stm32_hash_setkey,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "stm32-hmac-sha224",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha224_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "stm32-sha256",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+ {
+ .init = stm32_hash_init,
+ .update = stm32_hash_update,
+ .final = stm32_hash_final,
+ .finup = stm32_hash_finup,
+ .digest = stm32_hash_digest,
+ .export = stm32_hash_export,
+ .import = stm32_hash_import,
+ .setkey = stm32_hash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_request_ctx),
+ .base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "stm32-hmac-sha256",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha256_init,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ },
+};
+
+static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
+{
+ unsigned int i, j;
+ int err;
+
+ for (i = 0; i < hdev->pdata->algs_info_size; i++) {
+ for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
+ err = crypto_register_ahash(
+ &hdev->pdata->algs_info[i].algs_list[j]);
+ if (err)
+ goto err_algs;
+ }
+ }
+
+ return 0;
+err_algs:
+ dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
+ for (; i--; ) {
+ for (; j--;)
+ crypto_unregister_ahash(
+ &hdev->pdata->algs_info[i].algs_list[j]);
+ }
+
+ return err;
+}
+
+static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < hdev->pdata->algs_info_size; i++) {
+ for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
+ crypto_unregister_ahash(
+ &hdev->pdata->algs_info[i].algs_list[j]);
+ }
+
+ return 0;
+}
+
+static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
+ {
+ .algs_list = algs_md5_sha1,
+ .size = ARRAY_SIZE(algs_md5_sha1),
+ },
+};
+
+static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
+ .algs_info = stm32_hash_algs_info_stm32f4,
+ .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
+};
+
+static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
+ {
+ .algs_list = algs_md5_sha1,
+ .size = ARRAY_SIZE(algs_md5_sha1),
+ },
+ {
+ .algs_list = algs_sha224_sha256,
+ .size = ARRAY_SIZE(algs_sha224_sha256),
+ },
+};
+
+static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
+ .algs_info = stm32_hash_algs_info_stm32f7,
+ .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
+};
+
+static const struct of_device_id stm32_hash_of_match[] = {
+ {
+ .compatible = "st,stm32f456-hash",
+ .data = &stm32_hash_pdata_stm32f4,
+ },
+ {
+ .compatible = "st,stm32f756-hash",
+ .data = &stm32_hash_pdata_stm32f7,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
+
+static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
+ struct device *dev)
+{
+ const struct of_device_id *match;
+ int err;
+
+ match = of_match_device(stm32_hash_of_match, dev);
+ if (!match) {
+ dev_err(dev, "no compatible OF match\n");
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(dev->of_node, "dma-maxburst",
+ &hdev->dma_maxburst);
+
+ hdev->pdata = match->data;
+
+ return err;
+}
+
+static int stm32_hash_probe(struct platform_device *pdev)
+{
+ struct stm32_hash_dev *hdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret, irq;
+
+ hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hdev->io_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hdev->io_base))
+ return PTR_ERR(hdev->io_base);
+
+ hdev->phys_base = res->start;
+
+ ret = stm32_hash_get_of_match(hdev, dev);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Cannot get IRQ resource\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
+ stm32_hash_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), hdev);
+ if (ret) {
+ dev_err(dev, "Cannot grab IRQ\n");
+ return ret;
+ }
+
+ hdev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hdev->clk)) {
+ dev_err(dev, "failed to get clock for hash (%lu)\n",
+ PTR_ERR(hdev->clk));
+ return PTR_ERR(hdev->clk);
+ }
+
+ ret = clk_prepare_enable(hdev->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable hash clock (%d)\n", ret);
+ return ret;
+ }
+
+ hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (!IS_ERR(hdev->rst)) {
+ reset_control_assert(hdev->rst);
+ udelay(2);
+ reset_control_deassert(hdev->rst);
+ }
+
+ hdev->dev = dev;
+
+ platform_set_drvdata(pdev, hdev);
+
+ ret = stm32_hash_dma_init(hdev);
+ if (ret)
+ dev_dbg(dev, "DMA mode not available\n");
+
+ spin_lock(&stm32_hash.lock);
+ list_add_tail(&hdev->list, &stm32_hash.dev_list);
+ spin_unlock(&stm32_hash.lock);
+
+ /* Initialize crypto engine */
+ hdev->engine = crypto_engine_alloc_init(dev, 1);
+ if (!hdev->engine) {
+ ret = -ENOMEM;
+ goto err_engine;
+ }
+
+ hdev->engine->prepare_hash_request = stm32_hash_prepare_req;
+ hdev->engine->hash_one_request = stm32_hash_one_request;
+
+ ret = crypto_engine_start(hdev->engine);
+ if (ret)
+ goto err_engine_start;
+
+ hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
+
+ /* Register algos */
+ ret = stm32_hash_register_algs(hdev);
+ if (ret)
+ goto err_algs;
+
+ dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
+ stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
+
+ return 0;
+
+err_algs:
+err_engine_start:
+ crypto_engine_exit(hdev->engine);
+err_engine:
+ spin_lock(&stm32_hash.lock);
+ list_del(&hdev->list);
+ spin_unlock(&stm32_hash.lock);
+
+ if (hdev->dma_lch)
+ dma_release_channel(hdev->dma_lch);
+
+ clk_disable_unprepare(hdev->clk);
+
+ return ret;
+}
+
+static int stm32_hash_remove(struct platform_device *pdev)
+{
+ static struct stm32_hash_dev *hdev;
+
+ hdev = platform_get_drvdata(pdev);
+ if (!hdev)
+ return -ENODEV;
+
+ stm32_hash_unregister_algs(hdev);
+
+ crypto_engine_exit(hdev->engine);
+
+ spin_lock(&stm32_hash.lock);
+ list_del(&hdev->list);
+ spin_unlock(&stm32_hash.lock);
+
+ if (hdev->dma_lch)
+ dma_release_channel(hdev->dma_lch);
+
+ clk_disable_unprepare(hdev->clk);
+
+ return 0;
+}
+
+static struct platform_driver stm32_hash_driver = {
+ .probe = stm32_hash_probe,
+ .remove = stm32_hash_remove,
+ .driver = {
+ .name = "stm32-hash",
+ .of_match_table = stm32_hash_of_match,
+ }
+};
+
+module_platform_driver(stm32_hash_driver);
+
+MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
+MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index ec83b1e6bfe8..090582baecfe 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -107,12 +107,12 @@ static int stm32_crc_init(struct shash_desc *desc)
spin_unlock_bh(&crc_list.lock);
/* Reset, set key, poly and configure in bit reverse mode */
- writel(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
- writel(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
- writel(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
+ writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
+ writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
+ writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
/* Store partial result */
- ctx->partial = readl(ctx->crc->regs + CRC_DR);
+ ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
ctx->crc->nb_pending_bytes = 0;
return 0;
@@ -135,7 +135,8 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
if (crc->nb_pending_bytes == sizeof(u32)) {
/* Process completed pending data */
- writel(*(u32 *)crc->pending_data, crc->regs + CRC_DR);
+ writel_relaxed(*(u32 *)crc->pending_data,
+ crc->regs + CRC_DR);
crc->nb_pending_bytes = 0;
}
}
@@ -143,10 +144,10 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
d32 = (u32 *)d8;
for (i = 0; i < length >> 2; i++)
/* Process 32 bits data */
- writel(*(d32++), crc->regs + CRC_DR);
+ writel_relaxed(*(d32++), crc->regs + CRC_DR);
/* Store partial result */
- ctx->partial = readl(crc->regs + CRC_DR);
+ ctx->partial = readl_relaxed(crc->regs + CRC_DR);
/* Check for pending data (non 32 bits) */
length &= 3;
@@ -295,7 +296,7 @@ static int stm32_crc_remove(struct platform_device *pdev)
list_del(&crc->list);
spin_unlock(&crc_list.lock);
- crypto_unregister_shash(algs);
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
clk_disable_unprepare(crc->clk);
diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/sunxi-ss/Makefile
index 8f4c7a273141..ccb893219079 100644
--- a/drivers/crypto/sunxi-ss/Makefile
+++ b/drivers/crypto/sunxi-ss/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss.o
sun4i-ss-y += sun4i-ss-core.o sun4i-ss-hash.o sun4i-ss-cipher.o
+sun4i-ss-$(CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG) += sun4i-ss-prng.o
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 02ad8256e900..1547cbe13dc2 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -213,6 +213,23 @@ static struct sun4i_ss_alg_template ss_algs[] = {
}
}
},
+#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG
+{
+ .type = CRYPTO_ALG_TYPE_RNG,
+ .alg.rng = {
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "sun4i_ss_rng",
+ .cra_priority = 300,
+ .cra_ctxsize = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .generate = sun4i_ss_prng_generate,
+ .seed = sun4i_ss_prng_seed,
+ .seedsize = SS_SEED_LEN / BITS_PER_BYTE,
+ }
+},
+#endif
};
static int sun4i_ss_probe(struct platform_device *pdev)
@@ -355,6 +372,13 @@ static int sun4i_ss_probe(struct platform_device *pdev)
goto error_alg;
}
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ err = crypto_register_rng(&ss_algs[i].alg.rng);
+ if (err) {
+ dev_err(ss->dev, "Fail to register %s\n",
+ ss_algs[i].alg.rng.base.cra_name);
+ }
+ break;
}
}
platform_set_drvdata(pdev, ss);
@@ -369,6 +393,9 @@ error_alg:
case CRYPTO_ALG_TYPE_AHASH:
crypto_unregister_ahash(&ss_algs[i].alg.hash);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ crypto_unregister_rng(&ss_algs[i].alg.rng);
+ break;
}
}
if (ss->reset)
@@ -393,6 +420,9 @@ static int sun4i_ss_remove(struct platform_device *pdev)
case CRYPTO_ALG_TYPE_AHASH:
crypto_unregister_ahash(&ss_algs[i].alg.hash);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ crypto_unregister_rng(&ss_algs[i].alg.rng);
+ break;
}
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
new file mode 100644
index 000000000000..0d01d1624252
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
@@ -0,0 +1,56 @@
+#include "sun4i-ss.h"
+
+int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ struct sun4i_ss_alg_template *algt;
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
+ memcpy(algt->ss->seed, seed, slen);
+
+ return 0;
+}
+
+int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int dlen)
+{
+ struct sun4i_ss_alg_template *algt;
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+ int i;
+ u32 v;
+ u32 *data = (u32 *)dst;
+ const u32 mode = SS_OP_PRNG | SS_PRNG_CONTINUE | SS_ENABLED;
+ size_t len;
+ struct sun4i_ss_ctx *ss;
+ unsigned int todo = (dlen / 4) * 4;
+
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
+ ss = algt->ss;
+
+ spin_lock(&ss->slock);
+
+ writel(mode, ss->base + SS_CTL);
+
+ while (todo > 0) {
+ /* write the seed */
+ for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++)
+ writel(ss->seed[i], ss->base + SS_KEY0 + i * 4);
+
+ /* Read the random data */
+ len = min_t(size_t, SS_DATA_LEN / BITS_PER_BYTE, todo);
+ readsl(ss->base + SS_TXFIFO, data, len / 4);
+ data += len / 4;
+ todo -= len;
+
+ /* Update the seed */
+ for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) {
+ v = readl(ss->base + SS_KEY0 + i * 4);
+ ss->seed[i] = v;
+ }
+ }
+
+ writel(0, ss->base + SS_CTL);
+ spin_unlock(&ss->slock);
+ return dlen;
+}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
index a0e1efc1cb2a..f3ac90692ac6 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss.h
+++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
@@ -32,6 +32,7 @@
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/internal/rng.h>
+#include <crypto/rng.h>
#define SS_CTL 0x00
#define SS_KEY0 0x04
@@ -127,6 +128,9 @@
#define SS_RXFIFO_EMP_INT_ENABLE (1 << 2)
#define SS_TXFIFO_AVA_INT_ENABLE (1 << 0)
+#define SS_SEED_LEN 192
+#define SS_DATA_LEN 160
+
struct sun4i_ss_ctx {
void __iomem *base;
int irq;
@@ -136,6 +140,9 @@ struct sun4i_ss_ctx {
struct device *dev;
struct resource *res;
spinlock_t slock; /* control the use of the device */
+#ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG
+ u32 seed[SS_SEED_LEN / BITS_PER_LONG];
+#endif
};
struct sun4i_ss_alg_template {
@@ -144,6 +151,7 @@ struct sun4i_ss_alg_template {
union {
struct skcipher_alg crypto;
struct ahash_alg hash;
+ struct rng_alg rng;
} alg;
struct sun4i_ss_ctx *ss;
};
@@ -201,3 +209,6 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
+int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int dlen);
+int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen);
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
index 49defda4e03d..5035b0dc1e40 100644
--- a/drivers/crypto/virtio/virtio_crypto_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -27,12 +27,68 @@
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
+
+struct virtio_crypto_ablkcipher_ctx {
+ struct virtio_crypto *vcrypto;
+ struct crypto_tfm *tfm;
+
+ struct virtio_crypto_sym_session_info enc_sess_info;
+ struct virtio_crypto_sym_session_info dec_sess_info;
+};
+
+struct virtio_crypto_sym_request {
+ struct virtio_crypto_request base;
+
+ /* Cipher or aead */
+ uint32_t type;
+ struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
+ struct ablkcipher_request *ablkcipher_req;
+ uint8_t *iv;
+ /* Encryption? */
+ bool encrypt;
+};
+
/*
* The algs_lock protects the below global virtio_crypto_active_devs
* and crypto algorithms registion.
*/
static DEFINE_MUTEX(algs_lock);
static unsigned int virtio_crypto_active_devs;
+static void virtio_crypto_ablkcipher_finalize_req(
+ struct virtio_crypto_sym_request *vc_sym_req,
+ struct ablkcipher_request *req,
+ int err);
+
+static void virtio_crypto_dataq_sym_callback
+ (struct virtio_crypto_request *vc_req, int len)
+{
+ struct virtio_crypto_sym_request *vc_sym_req =
+ container_of(vc_req, struct virtio_crypto_sym_request, base);
+ struct ablkcipher_request *ablk_req;
+ int error;
+
+ /* Finish the encrypt or decrypt process */
+ if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+ switch (vc_req->status) {
+ case VIRTIO_CRYPTO_OK:
+ error = 0;
+ break;
+ case VIRTIO_CRYPTO_INVSESS:
+ case VIRTIO_CRYPTO_ERR:
+ error = -EINVAL;
+ break;
+ case VIRTIO_CRYPTO_BADMSG:
+ error = -EBADMSG;
+ break;
+ default:
+ error = -EIO;
+ break;
+ }
+ ablk_req = vc_sym_req->ablkcipher_req;
+ virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
+ ablk_req, error);
+ }
+}
static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
{
@@ -286,13 +342,14 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
}
static int
-__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
+__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
struct ablkcipher_request *req,
struct data_queue *data_vq)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
- struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx;
struct virtio_crypto *vcrypto = ctx->vcrypto;
struct virtio_crypto_op_data_req *req_data;
int src_nents, dst_nents;
@@ -326,9 +383,9 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
}
vc_req->req_data = req_data;
- vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+ vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
/* Head of operation */
- if (vc_req->encrypt) {
+ if (vc_sym_req->encrypt) {
req_data->header.session_id =
cpu_to_le64(ctx->enc_sess_info.session_id);
req_data->header.opcode =
@@ -383,7 +440,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
memcpy(iv, req->info, ivsize);
sg_init_one(&iv_sg, iv, ivsize);
sgs[num_out++] = &iv_sg;
- vc_req->iv = iv;
+ vc_sym_req->iv = iv;
/* Source data */
for (i = 0; i < src_nents; i++)
@@ -421,15 +478,18 @@ static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
- struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+ struct virtio_crypto_sym_request *vc_sym_req =
+ ablkcipher_request_ctx(req);
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
- vc_req->ablkcipher_ctx = ctx;
- vc_req->ablkcipher_req = req;
- vc_req->encrypt = true;
vc_req->dataq = data_vq;
+ vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
+ vc_sym_req->ablkcipher_ctx = ctx;
+ vc_sym_req->ablkcipher_req = req;
+ vc_sym_req->encrypt = true;
return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
}
@@ -438,16 +498,18 @@ static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
- struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+ struct virtio_crypto_sym_request *vc_sym_req =
+ ablkcipher_request_ctx(req);
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct virtio_crypto *vcrypto = ctx->vcrypto;
/* Use the first data virtqueue as default */
struct data_queue *data_vq = &vcrypto->data_vq[0];
- vc_req->ablkcipher_ctx = ctx;
- vc_req->ablkcipher_req = req;
-
- vc_req->encrypt = false;
vc_req->dataq = data_vq;
+ vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
+ vc_sym_req->ablkcipher_ctx = ctx;
+ vc_sym_req->ablkcipher_req = req;
+ vc_sym_req->encrypt = false;
return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
}
@@ -456,7 +518,7 @@ static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
{
struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_request);
+ tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
ctx->tfm = tfm;
return 0;
@@ -479,11 +541,13 @@ int virtio_crypto_ablkcipher_crypt_req(
struct crypto_engine *engine,
struct ablkcipher_request *req)
{
- struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+ struct virtio_crypto_sym_request *vc_sym_req =
+ ablkcipher_request_ctx(req);
+ struct virtio_crypto_request *vc_req = &vc_sym_req->base;
struct data_queue *data_vq = vc_req->dataq;
int ret;
- ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq);
+ ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
if (ret < 0)
return ret;
@@ -492,14 +556,15 @@ int virtio_crypto_ablkcipher_crypt_req(
return 0;
}
-void virtio_crypto_ablkcipher_finalize_req(
- struct virtio_crypto_request *vc_req,
+static void virtio_crypto_ablkcipher_finalize_req(
+ struct virtio_crypto_sym_request *vc_sym_req,
struct ablkcipher_request *req,
int err)
{
- crypto_finalize_cipher_request(vc_req->dataq->engine, req, err);
-
- virtcrypto_clear_request(vc_req);
+ crypto_finalize_cipher_request(vc_sym_req->base.dataq->engine,
+ req, err);
+ kzfree(vc_sym_req->iv);
+ virtcrypto_clear_request(&vc_sym_req->base);
}
static struct crypto_alg virtio_crypto_algs[] = { {
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index da6d8c0ea407..e976539a05d9 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -83,26 +83,16 @@ struct virtio_crypto_sym_session_info {
__u64 session_id;
};
-struct virtio_crypto_ablkcipher_ctx {
- struct virtio_crypto *vcrypto;
- struct crypto_tfm *tfm;
-
- struct virtio_crypto_sym_session_info enc_sess_info;
- struct virtio_crypto_sym_session_info dec_sess_info;
-};
+struct virtio_crypto_request;
+typedef void (*virtio_crypto_data_callback)
+ (struct virtio_crypto_request *vc_req, int len);
struct virtio_crypto_request {
- /* Cipher or aead */
- uint32_t type;
uint8_t status;
- struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
- struct ablkcipher_request *ablkcipher_req;
struct virtio_crypto_op_data_req *req_data;
struct scatterlist **sgs;
- uint8_t *iv;
- /* Encryption? */
- bool encrypt;
struct data_queue *dataq;
+ virtio_crypto_data_callback alg_cb;
};
int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
@@ -119,10 +109,6 @@ void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
int virtio_crypto_ablkcipher_crypt_req(
struct crypto_engine *engine,
struct ablkcipher_request *req);
-void virtio_crypto_ablkcipher_finalize_req(
- struct virtio_crypto_request *vc_req,
- struct ablkcipher_request *req,
- int err);
void
virtcrypto_clear_request(struct virtio_crypto_request *vc_req);
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index a111cd72797b..ff1410a32c2b 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -29,7 +29,6 @@ void
virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
{
if (vc_req) {
- kzfree(vc_req->iv);
kzfree(vc_req->req_data);
kfree(vc_req->sgs);
}
@@ -41,40 +40,18 @@ static void virtcrypto_dataq_callback(struct virtqueue *vq)
struct virtio_crypto_request *vc_req;
unsigned long flags;
unsigned int len;
- struct ablkcipher_request *ablk_req;
- int error;
unsigned int qid = vq->index;
spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
do {
virtqueue_disable_cb(vq);
while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
- if (vc_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
- switch (vc_req->status) {
- case VIRTIO_CRYPTO_OK:
- error = 0;
- break;
- case VIRTIO_CRYPTO_INVSESS:
- case VIRTIO_CRYPTO_ERR:
- error = -EINVAL;
- break;
- case VIRTIO_CRYPTO_BADMSG:
- error = -EBADMSG;
- break;
- default:
- error = -EIO;
- break;
- }
- ablk_req = vc_req->ablkcipher_req;
-
- spin_unlock_irqrestore(
- &vcrypto->data_vq[qid].lock, flags);
- /* Finish the encrypt or decrypt process */
- virtio_crypto_ablkcipher_finalize_req(vc_req,
- ablk_req, error);
- spin_lock_irqsave(
- &vcrypto->data_vq[qid].lock, flags);
- }
+ spin_unlock_irqrestore(
+ &vcrypto->data_vq[qid].lock, flags);
+ if (vc_req->alg_cb)
+ vc_req->alg_cb(vc_req, len);
+ spin_lock_irqsave(
+ &vcrypto->data_vq[qid].lock, flags);
}
} while (!virtqueue_enable_cb(vq));
spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
@@ -270,7 +247,7 @@ static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
return -EPERM;
}
- dev_info(&vcrypto->vdev->dev, "Accelerator is ready\n");
+ dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
} else {
virtcrypto_dev_stop(vcrypto);
dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 9c26d9e8dbea..17d84217dd76 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -104,8 +104,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
pagefault_enable();
preempt_enable();
- crypto_xor(keystream, src, nbytes);
- memcpy(dst, keystream, nbytes);
+ crypto_xor_cpy(dst, keystream, src, nbytes);
crypto_inc(ctrblk, AES_BLOCK_SIZE);
}
diff --git a/drivers/dax/device-dax.h b/drivers/dax/device-dax.h
index fdcd9769ffde..688b051750bd 100644
--- a/drivers/dax/device-dax.h
+++ b/drivers/dax/device-dax.h
@@ -21,5 +21,5 @@ struct dax_region *alloc_dax_region(struct device *parent,
int region_id, struct resource *res, unsigned int align,
void *addr, unsigned long flags);
struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
- struct resource *res, int count);
+ int id, struct resource *res, int count);
#endif /* __DEVICE_DAX_H__ */
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 12943d19bfc4..e9f3b3e4bbf4 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -529,7 +529,8 @@ static void dev_dax_release(struct device *dev)
struct dax_region *dax_region = dev_dax->region;
struct dax_device *dax_dev = dev_dax->dax_dev;
- ida_simple_remove(&dax_region->ida, dev_dax->id);
+ if (dev_dax->id >= 0)
+ ida_simple_remove(&dax_region->ida, dev_dax->id);
dax_region_put(dax_region);
put_dax(dax_dev);
kfree(dev_dax);
@@ -559,7 +560,7 @@ static void unregister_dev_dax(void *dev)
}
struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
- struct resource *res, int count)
+ int id, struct resource *res, int count)
{
struct device *parent = dax_region->dev;
struct dax_device *dax_dev;
@@ -567,7 +568,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
struct inode *inode;
struct device *dev;
struct cdev *cdev;
- int rc = 0, i;
+ int rc, i;
+
+ if (!count)
+ return ERR_PTR(-EINVAL);
dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL);
if (!dev_dax)
@@ -587,10 +591,16 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
if (i < count)
goto err_id;
- dev_dax->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
- if (dev_dax->id < 0) {
- rc = dev_dax->id;
- goto err_id;
+ if (id < 0) {
+ id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
+ dev_dax->id = id;
+ if (id < 0) {
+ rc = id;
+ goto err_id;
+ }
+ } else {
+ /* region provider owns @id lifetime */
+ dev_dax->id = -1;
}
/*
@@ -598,8 +608,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
* device outside of mmap of the resulting character device.
*/
dax_dev = alloc_dax(dev_dax, NULL, NULL);
- if (!dax_dev)
+ if (!dax_dev) {
+ rc = -ENOMEM;
goto err_dax;
+ }
/* from here on we're committed to teardown via dax_dev_release() */
dev = &dev_dax->dev;
@@ -620,7 +632,7 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
dev->parent = parent;
dev->groups = dax_attribute_groups;
dev->release = dev_dax_release;
- dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
+ dev_set_name(dev, "dax%d.%d", dax_region->id, id);
rc = cdev_device_add(cdev, dev);
if (rc) {
@@ -636,7 +648,8 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
return dev_dax;
err_dax:
- ida_simple_remove(&dax_region->ida, dev_dax->id);
+ if (dev_dax->id >= 0)
+ ida_simple_remove(&dax_region->ida, dev_dax->id);
err_id:
kfree(dev_dax);
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 9f2a0b4fd801..8d8c852ba8f2 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -58,13 +58,12 @@ static void dax_pmem_percpu_kill(void *data)
static int dax_pmem_probe(struct device *dev)
{
- int rc;
void *addr;
struct resource res;
+ int rc, id, region_id;
struct nd_pfn_sb *pfn_sb;
struct dev_dax *dev_dax;
struct dax_pmem *dax_pmem;
- struct nd_region *nd_region;
struct nd_namespace_io *nsio;
struct dax_region *dax_region;
struct nd_namespace_common *ndns;
@@ -123,14 +122,17 @@ static int dax_pmem_probe(struct device *dev)
/* adjust the dax_region resource to the start of data */
res.start += le64_to_cpu(pfn_sb->dataoff);
- nd_region = to_nd_region(dev->parent);
- dax_region = alloc_dax_region(dev, nd_region->id, &res,
+ rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
+ if (rc != 2)
+ return -EINVAL;
+
+ dax_region = alloc_dax_region(dev, region_id, &res,
le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
if (!dax_region)
return -ENOMEM;
/* TODO: support for subdividing a dax region... */
- dev_dax = devm_create_dev_dax(dax_region, &res, 1);
+ dev_dax = devm_create_dev_dax(dax_region, id, &res, 1);
/* child dev_dax instances now own the lifetime of the dax_region */
dax_region_put(dax_region);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index ce9e563e6e1d..938eb4868f7f 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -278,6 +278,12 @@ void dax_write_cache(struct dax_device *dax_dev, bool wc)
}
EXPORT_SYMBOL_GPL(dax_write_cache);
+bool dax_write_cache_enabled(struct dax_device *dax_dev)
+{
+ return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
+
bool dax_alive(struct dax_device *dax_dev)
{
lockdep_assert_held(&dax_srcu);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 41254e702f1e..6a172d338f6d 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -1,6 +1,7 @@
menuconfig PM_DEVFREQ
bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
select SRCU
+ select PM_OPP
help
A device may have a list of frequencies and voltages available.
devfreq, a generic DVFS framework can be registered for a device
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 8648b32ebc89..d67242d87744 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -277,8 +277,8 @@ int devfreq_event_get_edev_count(struct device *dev)
sizeof(u32));
if (count < 0) {
dev_err(dev,
- "failed to get the count of devfreq-event in %s node\n",
- dev->of_node->full_name);
+ "failed to get the count of devfreq-event in %pOF node\n",
+ dev->of_node);
return count;
}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index dea04871b50d..a1c4ee818614 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -564,7 +564,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
err = device_register(&devfreq->dev);
if (err) {
mutex_unlock(&devfreq->lock);
- goto err_out;
+ goto err_dev;
}
devfreq->trans_table = devm_kzalloc(&devfreq->dev,
@@ -610,6 +610,9 @@ err_init:
mutex_unlock(&devfreq_list_lock);
device_unregister(&devfreq->dev);
+err_dev:
+ if (devfreq)
+ kfree(devfreq);
err_out:
return ERR_PTR(err);
}
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index a4f2fa1091e4..cfc50a61a90d 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -69,4 +69,8 @@ extern int devfreq_remove_governor(struct devfreq_governor *governor);
extern int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
+static inline int devfreq_update_stats(struct devfreq *df)
+{
+ return df->profile->get_dev_status(df->dev.parent, &df->last_status);
+}
#endif /* _GOVERNOR_H */
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 57da14c15987..9a302799040e 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -48,7 +48,7 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
*/
u64 dma_fence_context_alloc(unsigned num)
{
- BUG_ON(!num);
+ WARN_ON(!num);
return atomic64_add_return(num, &dma_fence_context_counter) - num;
}
EXPORT_SYMBOL(dma_fence_context_alloc);
@@ -75,11 +75,6 @@ int dma_fence_signal_locked(struct dma_fence *fence)
if (WARN_ON(!fence))
return -EINVAL;
- if (!ktime_to_ns(fence->timestamp)) {
- fence->timestamp = ktime_get();
- smp_mb__before_atomic();
- }
-
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
ret = -EINVAL;
@@ -87,8 +82,11 @@ int dma_fence_signal_locked(struct dma_fence *fence)
* we might have raced with the unlocked dma_fence_signal,
* still run through all callbacks
*/
- } else
+ } else {
+ fence->timestamp = ktime_get();
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
trace_dma_fence_signaled(fence);
+ }
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
list_del_init(&cur->node);
@@ -115,14 +113,11 @@ int dma_fence_signal(struct dma_fence *fence)
if (!fence)
return -EINVAL;
- if (!ktime_to_ns(fence->timestamp)) {
- fence->timestamp = ktime_get();
- smp_mb__before_atomic();
- }
-
if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return -EINVAL;
+ fence->timestamp = ktime_get();
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
trace_dma_fence_signaled(fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
@@ -177,7 +172,7 @@ void dma_fence_release(struct kref *kref)
trace_dma_fence_destroy(fence);
- BUG_ON(!list_empty(&fence->cb_list));
+ WARN_ON(!list_empty(&fence->cb_list));
if (fence->ops->release)
fence->ops->release(fence);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 393817e849ed..dec3a815455d 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -195,8 +195,7 @@ done:
if (old)
kfree_rcu(old, rcu);
- if (old_fence)
- dma_fence_put(old_fence);
+ dma_fence_put(old_fence);
}
/**
@@ -258,12 +257,71 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
dma_fence_put(rcu_dereference_protected(old->shared[i],
reservation_object_held(obj)));
- if (old_fence)
- dma_fence_put(old_fence);
+ dma_fence_put(old_fence);
}
EXPORT_SYMBOL(reservation_object_add_excl_fence);
/**
+* reservation_object_copy_fences - Copy all fences from src to dst.
+* @dst: the destination reservation object
+* @src: the source reservation object
+*
+* Copy all fences from src to dst. Both src->lock as well as dst-lock must be
+* held.
+*/
+int reservation_object_copy_fences(struct reservation_object *dst,
+ struct reservation_object *src)
+{
+ struct reservation_object_list *src_list, *dst_list;
+ struct dma_fence *old, *new;
+ size_t size;
+ unsigned i;
+
+ src_list = reservation_object_get_list(src);
+
+ if (src_list) {
+ size = offsetof(typeof(*src_list),
+ shared[src_list->shared_count]);
+ dst_list = kmalloc(size, GFP_KERNEL);
+ if (!dst_list)
+ return -ENOMEM;
+
+ dst_list->shared_count = src_list->shared_count;
+ dst_list->shared_max = src_list->shared_count;
+ for (i = 0; i < src_list->shared_count; ++i)
+ dst_list->shared[i] =
+ dma_fence_get(src_list->shared[i]);
+ } else {
+ dst_list = NULL;
+ }
+
+ kfree(dst->staged);
+ dst->staged = NULL;
+
+ src_list = reservation_object_get_list(dst);
+
+ old = reservation_object_get_excl(dst);
+ new = reservation_object_get_excl(src);
+
+ dma_fence_get(new);
+
+ preempt_disable();
+ write_seqcount_begin(&dst->seq);
+ /* write_seqcount_begin provides the necessary memory barrier */
+ RCU_INIT_POINTER(dst->fence_excl, new);
+ RCU_INIT_POINTER(dst->fence, dst_list);
+ write_seqcount_end(&dst->seq);
+ preempt_enable();
+
+ if (src_list)
+ kfree_rcu(src_list, rcu);
+ dma_fence_put(old);
+
+ return 0;
+}
+EXPORT_SYMBOL(reservation_object_copy_fences);
+
+/**
* reservation_object_get_fences_rcu - Get an object's shared and exclusive
* fences without update side lock held
* @obj: the reservation object
@@ -373,12 +431,25 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
long ret = timeout ? timeout : 1;
retry:
- fence = NULL;
shared_count = 0;
seq = read_seqcount_begin(&obj->seq);
rcu_read_lock();
- if (wait_all) {
+ fence = rcu_dereference(obj->fence_excl);
+ if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (!dma_fence_get_rcu(fence))
+ goto unlock_retry;
+
+ if (dma_fence_is_signaled(fence)) {
+ dma_fence_put(fence);
+ fence = NULL;
+ }
+
+ } else {
+ fence = NULL;
+ }
+
+ if (!fence && wait_all) {
struct reservation_object_list *fobj =
rcu_dereference(obj->fence);
@@ -405,22 +476,6 @@ retry:
}
}
- if (!shared_count) {
- struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
-
- if (fence_excl &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &fence_excl->flags)) {
- if (!dma_fence_get_rcu(fence_excl))
- goto unlock_retry;
-
- if (dma_fence_is_signaled(fence_excl))
- dma_fence_put(fence_excl);
- else
- fence = fence_excl;
- }
- }
-
rcu_read_unlock();
if (fence) {
if (read_seqcount_retry(&obj->seq, seq)) {
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 69c5ff36e2f9..38cc7389a6c1 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -96,9 +96,9 @@ static struct sync_timeline *sync_timeline_create(const char *name)
obj->context = dma_fence_context_alloc(1);
strlcpy(obj->name, name, sizeof(obj->name));
- INIT_LIST_HEAD(&obj->child_list_head);
- INIT_LIST_HEAD(&obj->active_list_head);
- spin_lock_init(&obj->child_list_lock);
+ obj->pt_tree = RB_ROOT;
+ INIT_LIST_HEAD(&obj->pt_list);
+ spin_lock_init(&obj->lock);
sync_timeline_debug_add(obj);
@@ -125,68 +125,6 @@ static void sync_timeline_put(struct sync_timeline *obj)
kref_put(&obj->kref, sync_timeline_free);
}
-/**
- * sync_timeline_signal() - signal a status change on a sync_timeline
- * @obj: sync_timeline to signal
- * @inc: num to increment on timeline->value
- *
- * A sync implementation should call this any time one of it's fences
- * has signaled or has an error condition.
- */
-static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
-{
- unsigned long flags;
- struct sync_pt *pt, *next;
-
- trace_sync_timeline(obj);
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
-
- obj->value += inc;
-
- list_for_each_entry_safe(pt, next, &obj->active_list_head,
- active_list) {
- if (dma_fence_is_signaled_locked(&pt->base))
- list_del_init(&pt->active_list);
- }
-
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-
-/**
- * sync_pt_create() - creates a sync pt
- * @parent: fence's parent sync_timeline
- * @size: size to allocate for this pt
- * @inc: value of the fence
- *
- * Creates a new sync_pt as a child of @parent. @size bytes will be
- * allocated allowing for implementation specific data to be kept after
- * the generic sync_timeline struct. Returns the sync_pt object or
- * NULL in case of error.
- */
-static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
- unsigned int value)
-{
- unsigned long flags;
- struct sync_pt *pt;
-
- if (size < sizeof(*pt))
- return NULL;
-
- pt = kzalloc(size, GFP_KERNEL);
- if (!pt)
- return NULL;
-
- spin_lock_irqsave(&obj->child_list_lock, flags);
- sync_timeline_get(obj);
- dma_fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
- obj->context, value);
- list_add_tail(&pt->child_list, &obj->child_list_head);
- INIT_LIST_HEAD(&pt->active_list);
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
- return pt;
-}
-
static const char *timeline_fence_get_driver_name(struct dma_fence *fence)
{
return "sw_sync";
@@ -203,13 +141,17 @@ static void timeline_fence_release(struct dma_fence *fence)
{
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
struct sync_timeline *parent = dma_fence_parent(fence);
- unsigned long flags;
- spin_lock_irqsave(fence->lock, flags);
- list_del(&pt->child_list);
- if (!list_empty(&pt->active_list))
- list_del(&pt->active_list);
- spin_unlock_irqrestore(fence->lock, flags);
+ if (!list_empty(&pt->link)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(fence->lock, flags);
+ if (!list_empty(&pt->link)) {
+ list_del(&pt->link);
+ rb_erase(&pt->node, &parent->pt_tree);
+ }
+ spin_unlock_irqrestore(fence->lock, flags);
+ }
sync_timeline_put(parent);
dma_fence_free(fence);
@@ -219,18 +161,11 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
{
struct sync_timeline *parent = dma_fence_parent(fence);
- return (fence->seqno > parent->value) ? false : true;
+ return !__dma_fence_is_later(fence->seqno, parent->value);
}
static bool timeline_fence_enable_signaling(struct dma_fence *fence)
{
- struct sync_pt *pt = dma_fence_to_sync_pt(fence);
- struct sync_timeline *parent = dma_fence_parent(fence);
-
- if (timeline_fence_signaled(fence))
- return false;
-
- list_add_tail(&pt->active_list, &parent->active_list_head);
return true;
}
@@ -259,6 +194,107 @@ static const struct dma_fence_ops timeline_fence_ops = {
.timeline_value_str = timeline_fence_timeline_value_str,
};
+/**
+ * sync_timeline_signal() - signal a status change on a sync_timeline
+ * @obj: sync_timeline to signal
+ * @inc: num to increment on timeline->value
+ *
+ * A sync implementation should call this any time one of it's fences
+ * has signaled or has an error condition.
+ */
+static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+{
+ struct sync_pt *pt, *next;
+
+ trace_sync_timeline(obj);
+
+ spin_lock_irq(&obj->lock);
+
+ obj->value += inc;
+
+ list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
+ if (!timeline_fence_signaled(&pt->base))
+ break;
+
+ list_del_init(&pt->link);
+ rb_erase(&pt->node, &obj->pt_tree);
+
+ /*
+ * A signal callback may release the last reference to this
+ * fence, causing it to be freed. That operation has to be
+ * last to avoid a use after free inside this loop, and must
+ * be after we remove the fence from the timeline in order to
+ * prevent deadlocking on timeline->lock inside
+ * timeline_fence_release().
+ */
+ dma_fence_signal_locked(&pt->base);
+ }
+
+ spin_unlock_irq(&obj->lock);
+}
+
+/**
+ * sync_pt_create() - creates a sync pt
+ * @parent: fence's parent sync_timeline
+ * @inc: value of the fence
+ *
+ * Creates a new sync_pt as a child of @parent. @size bytes will be
+ * allocated allowing for implementation specific data to be kept after
+ * the generic sync_timeline struct. Returns the sync_pt object or
+ * NULL in case of error.
+ */
+static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
+ unsigned int value)
+{
+ struct sync_pt *pt;
+
+ pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+ if (!pt)
+ return NULL;
+
+ sync_timeline_get(obj);
+ dma_fence_init(&pt->base, &timeline_fence_ops, &obj->lock,
+ obj->context, value);
+ INIT_LIST_HEAD(&pt->link);
+
+ spin_lock_irq(&obj->lock);
+ if (!dma_fence_is_signaled_locked(&pt->base)) {
+ struct rb_node **p = &obj->pt_tree.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p) {
+ struct sync_pt *other;
+ int cmp;
+
+ parent = *p;
+ other = rb_entry(parent, typeof(*pt), node);
+ cmp = value - other->base.seqno;
+ if (cmp > 0) {
+ p = &parent->rb_right;
+ } else if (cmp < 0) {
+ p = &parent->rb_left;
+ } else {
+ if (dma_fence_get_rcu(&other->base)) {
+ dma_fence_put(&pt->base);
+ pt = other;
+ goto unlock;
+ }
+ p = &parent->rb_left;
+ }
+ }
+ rb_link_node(&pt->node, parent, p);
+ rb_insert_color(&pt->node, &obj->pt_tree);
+
+ parent = rb_next(&pt->node);
+ list_add_tail(&pt->link,
+ parent ? &rb_entry(parent, typeof(*pt), node)->link : &obj->pt_list);
+ }
+unlock:
+ spin_unlock_irq(&obj->lock);
+
+ return pt;
+}
+
/*
* *WARNING*
*
@@ -309,7 +345,7 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
goto err;
}
- pt = sync_pt_create(obj, sizeof(*pt), data.value);
+ pt = sync_pt_create(obj, data.value);
if (!pt) {
err = -ENOMEM;
goto err;
@@ -345,6 +381,11 @@ static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
return -EFAULT;
+ while (value > INT_MAX) {
+ sync_timeline_signal(obj, INT_MAX);
+ value -= INT_MAX;
+ }
+
sync_timeline_signal(obj, value);
return 0;
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 82a6e7f6d37f..c4c8ecb24aa9 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -84,7 +84,7 @@ static void sync_print_fence(struct seq_file *s,
show ? "_" : "",
sync_status_str(status));
- if (status) {
+ if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) {
struct timespec64 ts64 =
ktime_to_timespec64(fence->timestamp);
@@ -116,17 +116,15 @@ static void sync_print_fence(struct seq_file *s,
static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
{
struct list_head *pos;
- unsigned long flags;
seq_printf(s, "%s: %d\n", obj->name, obj->value);
- spin_lock_irqsave(&obj->child_list_lock, flags);
- list_for_each(pos, &obj->child_list_head) {
- struct sync_pt *pt =
- container_of(pos, struct sync_pt, child_list);
+ spin_lock_irq(&obj->lock);
+ list_for_each(pos, &obj->pt_list) {
+ struct sync_pt *pt = container_of(pos, struct sync_pt, link);
sync_print_fence(s, &pt->base, false);
}
- spin_unlock_irqrestore(&obj->child_list_lock, flags);
+ spin_unlock_irq(&obj->lock);
}
static void sync_print_sync_file(struct seq_file *s,
@@ -151,12 +149,11 @@ static void sync_print_sync_file(struct seq_file *s,
static int sync_debugfs_show(struct seq_file *s, void *unused)
{
- unsigned long flags;
struct list_head *pos;
seq_puts(s, "objs:\n--------------\n");
- spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ spin_lock_irq(&sync_timeline_list_lock);
list_for_each(pos, &sync_timeline_list_head) {
struct sync_timeline *obj =
container_of(pos, struct sync_timeline,
@@ -165,11 +162,11 @@ static int sync_debugfs_show(struct seq_file *s, void *unused)
sync_print_obj(s, obj);
seq_putc(s, '\n');
}
- spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+ spin_unlock_irq(&sync_timeline_list_lock);
seq_puts(s, "fences:\n--------------\n");
- spin_lock_irqsave(&sync_file_list_lock, flags);
+ spin_lock_irq(&sync_file_list_lock);
list_for_each(pos, &sync_file_list_head) {
struct sync_file *sync_file =
container_of(pos, struct sync_file, sync_file_list);
@@ -177,7 +174,7 @@ static int sync_debugfs_show(struct seq_file *s, void *unused)
sync_print_sync_file(s, sync_file);
seq_putc(s, '\n');
}
- spin_unlock_irqrestore(&sync_file_list_lock, flags);
+ spin_unlock_irq(&sync_file_list_lock);
return 0;
}
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index 26fe8b9907b3..d615a89f774c 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -14,6 +14,7 @@
#define _LINUX_SYNC_H
#include <linux/list.h>
+#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/dma-fence.h>
@@ -24,42 +25,41 @@
* struct sync_timeline - sync object
* @kref: reference count on fence.
* @name: name of the sync_timeline. Useful for debugging
- * @child_list_head: list of children sync_pts for this sync_timeline
- * @child_list_lock: lock protecting @child_list_head and fence.status
- * @active_list_head: list of active (unsignaled/errored) sync_pts
+ * @lock: lock protecting @pt_list and @value
+ * @pt_tree: rbtree of active (unsignaled/errored) sync_pts
+ * @pt_list: list of active (unsignaled/errored) sync_pts
* @sync_timeline_list: membership in global sync_timeline_list
*/
struct sync_timeline {
struct kref kref;
char name[32];
- /* protected by child_list_lock */
+ /* protected by lock */
u64 context;
int value;
- struct list_head child_list_head;
- spinlock_t child_list_lock;
-
- struct list_head active_list_head;
+ struct rb_root pt_tree;
+ struct list_head pt_list;
+ spinlock_t lock;
struct list_head sync_timeline_list;
};
static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
{
- return container_of(fence->lock, struct sync_timeline, child_list_lock);
+ return container_of(fence->lock, struct sync_timeline, lock);
}
/**
* struct sync_pt - sync_pt object
* @base: base fence object
- * @child_list: sync timeline child's list
- * @active_list: sync timeline active child's list
+ * @link: link on the sync timeline's list
+ * @node: node in the sync timeline's tree
*/
struct sync_pt {
struct dma_fence base;
- struct list_head child_list;
- struct list_head active_list;
+ struct list_head link;
+ struct rb_node node;
};
#ifdef CONFIG_SW_SYNC
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 545e2c5c4815..66fb40d0ebdb 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
{
struct sync_file *sync_file = file->private_data;
- if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
+ if (test_bit(POLL_ENABLED, &sync_file->flags))
dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
dma_fence_put(sync_file->fence);
kfree(sync_file);
@@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
poll_wait(file, &sync_file->wq, wait);
- if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+ if (list_empty(&sync_file->cb.node) &&
+ !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
fence_check_cb_func) < 0)
wake_up_all(&sync_file->wq);
@@ -391,7 +392,13 @@ static void sync_fill_fence_info(struct dma_fence *fence,
sizeof(info->driver_name));
info->status = dma_fence_get_status(fence);
- info->timestamp_ns = ktime_to_ns(fence->timestamp);
+ while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
+ !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
+ cpu_relax();
+ info->timestamp_ns =
+ test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
+ ktime_to_ns(fence->timestamp) :
+ ktime_set(0, 0);
}
static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index b10cbaa82ff5..b26256f23d67 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -717,8 +717,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i);
tdc->irq = of_irq_get(pdev->dev.of_node, i);
- if (tdc->irq < 0) {
- ret = tdc->irq;
+ if (tdc->irq <= 0) {
+ ret = tdc->irq ?: -ENXIO;
goto irq_dispose;
}
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index db75d4b614f7..346c4987b284 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -38,7 +38,6 @@
#include "edac_module.h"
#define EDAC_MOD_STR "altera_edac"
-#define EDAC_VERSION "1"
#define EDAC_DEVICE "Altera"
static const struct altr_sdram_prv_data c5_data = {
@@ -392,7 +391,6 @@ static int altr_sdram_probe(struct platform_device *pdev)
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = EDAC_VERSION;
mci->ctl_name = dev_name(&pdev->dev);
mci->scrub_mode = SCRUB_SW_SRC;
mci->dev_name = dev_name(&pdev->dev);
@@ -749,8 +747,10 @@ static int altr_edac_device_probe(struct platform_device *pdev)
drvdata->edac_dev_name = ecc_name;
drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
- if (!drvdata->base)
+ if (!drvdata->base) {
+ res = -ENOMEM;
goto fail1;
+ }
/* Get driver specific data for this EDAC device */
drvdata->data = of_match_node(altr_edac_device_of_match, np)->data;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 3aea55698165..ac2f30295efe 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3130,7 +3130,6 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
mci->edac_cap = determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = EDAC_AMD64_VERSION;
mci->ctl_name = fam->ctl_name;
mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index a7450275ad28..9c6e326b4c14 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -19,7 +19,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define AMD76X_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "amd76x_edac"
#define amd76x_printk(level, fmt, arg...) \
@@ -263,7 +262,6 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_cap = ems_mode ?
(EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = AMD76X_REVISION;
mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = amd76x_check;
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 837b62c4993d..2c98e020df05 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -618,7 +618,7 @@ static u32 cpc925_cpu_mask_disabled(void)
}
if (reg == NULL || *reg > 2) {
- cpc925_printk(KERN_ERR, "Bad reg value at %s\n", cpunode->full_name);
+ cpc925_printk(KERN_ERR, "Bad reg value at %pOF\n", cpunode);
continue;
}
@@ -999,7 +999,6 @@ static int cpc925_probe(struct platform_device *pdev)
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = CPC925_EDAC_MOD_STR;
- mci->mod_ver = CPC925_EDAC_REVISION;
mci->ctl_name = pdev->name;
if (edac_op_state == EDAC_OPSTATE_POLL)
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 1a352cae1f52..b5de9a13ea3f 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -26,7 +26,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define E752X_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "e752x_edac"
static int report_non_memory_errors;
@@ -1303,7 +1302,6 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
(EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = E752X_REVISION;
mci->pdev = &pdev->dev;
edac_dbg(3, "init pvt\n");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 67ef07aed923..75d7ce62b3be 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -32,7 +32,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define E7XXX_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "e7xxx_edac"
#define e7xxx_printk(level, fmt, arg...) \
@@ -458,7 +457,6 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = E7XXX_REVISION;
mci->pdev = &pdev->dev;
edac_dbg(3, "init pvt\n");
pvt = (struct e7xxx_pvt *)mci->pvt_info;
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 445862dac273..e4fcfa84fbd3 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -287,7 +287,7 @@ static struct attribute *csrow_attrs[] = {
NULL,
};
-static struct attribute_group csrow_attr_grp = {
+static const struct attribute_group csrow_attr_grp = {
.attrs = csrow_attrs,
};
@@ -304,7 +304,7 @@ static void csrow_attr_release(struct device *dev)
kfree(csrow);
}
-static struct device_type csrow_attr_type = {
+static const struct device_type csrow_attr_type = {
.groups = csrow_attr_groups,
.release = csrow_attr_release,
};
@@ -627,7 +627,7 @@ static struct attribute *dimm_attrs[] = {
NULL,
};
-static struct attribute_group dimm_attr_grp = {
+static const struct attribute_group dimm_attr_grp = {
.attrs = dimm_attrs,
};
@@ -644,7 +644,7 @@ static void dimm_attr_release(struct device *dev)
kfree(dimm);
}
-static struct device_type dimm_attr_type = {
+static const struct device_type dimm_attr_type = {
.groups = dimm_attr_groups,
.release = dimm_attr_release,
};
@@ -902,7 +902,7 @@ static umode_t mci_attr_is_visible(struct kobject *kobj,
return mode;
}
-static struct attribute_group mci_attr_grp = {
+static const struct attribute_group mci_attr_grp = {
.attrs = mci_attrs,
.is_visible = mci_attr_is_visible,
};
@@ -920,7 +920,7 @@ static void mci_attr_release(struct device *dev)
kfree(mci);
}
-static struct device_type mci_attr_type = {
+static const struct device_type mci_attr_type = {
.groups = mci_attr_groups,
.release = mci_attr_release,
};
@@ -1074,7 +1074,7 @@ static void mc_attr_release(struct device *dev)
kfree(dev);
}
-static struct device_type mc_attr_type = {
+static const struct device_type mc_attr_type = {
.release = mc_attr_release,
};
/*
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 4e61a6229dd2..6f80eb65c26c 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -17,8 +17,6 @@
#include "edac_module.h"
#include <ras/ras_event.h>
-#define GHES_EDAC_REVISION " Ver: 1.0.0"
-
struct ghes_edac_pvt {
struct list_head list;
struct ghes *ghes;
@@ -451,7 +449,6 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "ghes_edac.c";
- mci->mod_ver = GHES_EDAC_REVISION;
mci->ctl_name = "ghes_edac";
mci->dev_name = "ghes";
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
index 0e7e0a404d89..6092e61be605 100644
--- a/drivers/edac/highbank_mc_edac.c
+++ b/drivers/edac/highbank_mc_edac.c
@@ -224,7 +224,6 @@ static int highbank_mc_probe(struct platform_device *pdev)
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = pdev->dev.driver->name;
- mci->mod_ver = "1";
mci->ctl_name = id->compatible;
mci->dev_name = dev_name(&pdev->dev);
mci->scrub_mode = SCRUB_SW_SRC;
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 5306240570d7..8085a32ec3bd 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -16,8 +16,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define I3000_REVISION "1.1"
-
#define EDAC_MOD_STR "i3000_edac"
#define I3000_RANKS 8
@@ -375,7 +373,6 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I3000_REVISION;
mci->ctl_name = i3000_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i3000_check;
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 77c58d201a30..d92d56cee101 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -17,8 +17,6 @@
#include <linux/io-64-nonatomic-lo-hi.h>
-#define I3200_REVISION "1.1"
-
#define EDAC_MOD_STR "i3200_edac"
#define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0
@@ -375,7 +373,6 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I3200_REVISION;
mci->ctl_name = i3200_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i3200_check;
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 8f5a56e25bd2..53f24b18cd61 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1430,7 +1430,6 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i5000_edac.c";
- mci->mod_ver = I5000_REVISION;
mci->ctl_name = i5000_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index a8334c4acea7..b506eef6b146 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -1108,7 +1108,6 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = "i5100_edac.c";
- mci->mod_ver = "not versioned";
mci->ctl_name = "i5100";
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index cd889edc8516..6f8bcdb9256a 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -1315,7 +1315,6 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i5400_edac.c";
- mci->mod_ver = I5400_REVISION;
mci->ctl_name = i5400_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index e391f5a716be..6b5a554ba8e4 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -1077,7 +1077,6 @@ static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i7300_edac.c";
- mci->mod_ver = I7300_REVISION;
mci->ctl_name = i7300_devs[0].ctl_name;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 75ad847593b7..c16c3b931b3d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1079,7 +1079,7 @@ static struct attribute *i7core_addrmatch_attrs[] = {
NULL
};
-static struct attribute_group addrmatch_grp = {
+static const struct attribute_group addrmatch_grp = {
.attrs = i7core_addrmatch_attrs,
};
@@ -1094,7 +1094,7 @@ static void addrmatch_release(struct device *device)
kfree(device);
}
-static struct device_type addrmatch_type = {
+static const struct device_type addrmatch_type = {
.groups = addrmatch_groups,
.release = addrmatch_release,
};
@@ -1110,7 +1110,7 @@ static struct attribute *i7core_udimm_counters_attrs[] = {
NULL
};
-static struct attribute_group all_channel_counts_grp = {
+static const struct attribute_group all_channel_counts_grp = {
.attrs = i7core_udimm_counters_attrs,
};
@@ -1125,7 +1125,7 @@ static void all_channel_counts_release(struct device *device)
kfree(device);
}
-static struct device_type all_channel_counts_type = {
+static const struct device_type all_channel_counts_type = {
.groups = all_channel_counts_groups,
.release = all_channel_counts_release,
};
@@ -2159,7 +2159,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i7core_edac.c";
- mci->mod_ver = I7CORE_REVISION;
mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
i7core_dev->socket);
mci->dev_name = pci_name(i7core_dev->pdev[0]);
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index cb61a5b7d080..a2ca929e2168 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -31,8 +31,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define I82443_REVISION "0.1"
-
#define EDAC_MOD_STR "i82443bxgx_edac"
/* The 82443BX supports SDRAM, or EDO (EDO for mobile only), "Memory
@@ -320,7 +318,6 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
I82443BXGX_EAP_OFFSET_MBE));
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I82443_REVISION;
mci->ctl_name = "I82443BXGX";
mci->dev_name = pci_name(pdev);
mci->edac_check = i82443bxgx_edacmc_check;
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index 236c813227fc..3e3a80ffb322 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -16,7 +16,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define I82860_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "i82860_edac"
#define i82860_printk(level, fmt, arg...) \
@@ -216,7 +215,6 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
/* I"m not sure about this but I think that all RDRAM is SECDED */
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I82860_REVISION;
mci->ctl_name = i82860_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i82860_check;
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index e286b7e74c7a..ceac925af38c 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -20,7 +20,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define I82875P_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "i82875p_edac"
#define i82875p_printk(level, fmt, arg...) \
@@ -423,7 +422,6 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_UNKNOWN;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I82875P_REVISION;
mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i82875p_check;
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 9dcdab28f665..892815eaa97b 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -16,7 +16,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define I82975X_REVISION " Ver: 1.0.0"
#define EDAC_MOD_STR "i82975x_edac"
#define i82975x_printk(level, fmt, arg...) \
@@ -564,7 +563,6 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = I82975X_REVISION;
mci->ctl_name = i82975x_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i82975x_check;
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 4260579e6901..aac9b9b360b8 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -45,7 +45,6 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include "edac_module.h"
-#define IE31200_REVISION "1.0"
#define EDAC_MOD_STR "ie31200_edac"
#define ie31200_printk(level, fmt, arg...) \
@@ -420,7 +419,6 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = IE31200_REVISION;
mci->ctl_name = ie31200_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = ie31200_check;
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 9a2658a256a9..a11a671c7a38 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -1,6 +1,8 @@
#include <linux/module.h>
#include <linux/slab.h>
+#include <asm/cpu.h>
+
#include "mce_amd.h"
static struct amd_decoder_ops *fam_ops;
@@ -744,7 +746,7 @@ static void decode_mc3_mce(struct mce *m)
static void decode_mc4_mce(struct mce *m)
{
- struct cpuinfo_x86 *c = &boot_cpu_data;
+ unsigned int fam = x86_family(m->cpuid);
int node_id = amd_get_nb_id(m->extcpu);
u16 ec = EC(m->status);
u8 xec = XEC(m->status, 0x1f);
@@ -758,7 +760,7 @@ static void decode_mc4_mce(struct mce *m)
/* special handling for DRAM ECCs */
if (xec == 0x0 || xec == 0x8) {
/* no ECCs on F11h */
- if (c->x86 == 0x11)
+ if (fam == 0x11)
goto wrong_mc4_mce;
pr_cont("%s.\n", mc4_mce_desc[xec]);
@@ -779,7 +781,7 @@ static void decode_mc4_mce(struct mce *m)
return;
case 0x19:
- if (boot_cpu_data.x86 == 0x15 || boot_cpu_data.x86 == 0x16)
+ if (fam == 0x15 || fam == 0x16)
pr_cont("Compute Unit Data Error.\n");
else
goto wrong_mc4_mce;
@@ -802,11 +804,11 @@ static void decode_mc4_mce(struct mce *m)
static void decode_mc5_mce(struct mce *m)
{
- struct cpuinfo_x86 *c = &boot_cpu_data;
+ unsigned int fam = x86_family(m->cpuid);
u16 ec = EC(m->status);
u8 xec = XEC(m->status, xec_mask);
- if (c->x86 == 0xf || c->x86 == 0x11)
+ if (fam == 0xf || fam == 0x11)
goto wrong_mc5_mce;
pr_emerg(HW_ERR "MC5 Error: ");
@@ -849,7 +851,7 @@ static void decode_mc6_mce(struct mce *m)
}
/* Decode errors according to Scalable MCA specification */
-static void decode_smca_errors(struct mce *m)
+static void decode_smca_error(struct mce *m)
{
struct smca_hwid *hwid;
unsigned int bank_type;
@@ -859,7 +861,7 @@ static void decode_smca_errors(struct mce *m)
if (m->bank >= ARRAY_SIZE(smca_banks))
return;
- if (boot_cpu_data.x86 >= 0x17 && m->bank == 4)
+ if (x86_family(m->cpuid) >= 0x17 && m->bank == 4)
pr_emerg(HW_ERR "Bank 4 is reserved on Fam17h.\n");
hwid = smca_banks[m->bank].hwid;
@@ -878,12 +880,8 @@ static void decode_smca_errors(struct mce *m)
pr_cont("%s.\n", smca_mce_descs[bank_type].descs[xec]);
}
- /*
- * amd_get_nb_id() returns the last level cache id.
- * The last level cache on Fam17h is 1 level below the node.
- */
if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc)
- decode_dram_ecc(amd_get_nb_id(m->extcpu) >> 1, m);
+ decode_dram_ecc(cpu_to_node(m->extcpu), m);
}
static inline void amd_decode_err_code(u16 ec)
@@ -915,12 +913,10 @@ static inline void amd_decode_err_code(u16 ec)
*/
static bool amd_filter_mce(struct mce *m)
{
- u8 xec = (m->status >> 16) & 0x1f;
-
/*
* NB GART TLB error reporting is disabled by default.
*/
- if (m->bank == 4 && xec == 0x5 && !report_gart_errors)
+ if (m->bank == 4 && XEC(m->status, 0x1f) == 0x5 && !report_gart_errors)
return true;
return false;
@@ -946,7 +942,7 @@ static int
amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
{
struct mce *m = (struct mce *)data;
- struct cpuinfo_x86 *c = &cpu_data(m->extcpu);
+ unsigned int fam = x86_family(m->cpuid);
int ecc;
if (amd_filter_mce(m))
@@ -956,7 +952,7 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
pr_emerg(HW_ERR "CPU:%d (%x:%x:%x) MC%d_STATUS[%s|%s|%s|%s|%s",
m->extcpu,
- c->x86, c->x86_model, c->x86_mask,
+ fam, x86_model(m->cpuid), x86_stepping(m->cpuid),
m->bank,
((m->status & MCI_STATUS_OVER) ? "Over" : "-"),
((m->status & MCI_STATUS_UC) ? "UE" :
@@ -965,11 +961,11 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
- if (c->x86 >= 0x15) {
+ if (fam >= 0x15) {
pr_cont("|%s", (m->status & MCI_STATUS_DEFERRED ? "Deferred" : "-"));
/* F15h, bank4, bit 43 is part of McaStatSubCache. */
- if (c->x86 != 0x15 || m->bank != 4)
+ if (fam != 0x15 || m->bank != 4)
pr_cont("|%s", (m->status & MCI_STATUS_POISON ? "Poison" : "-"));
}
@@ -1002,7 +998,7 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
pr_cont("\n");
- decode_smca_errors(m);
+ decode_smca_error(m);
goto err_code;
}
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index d3650df94fe8..ec5d695bbb72 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -766,7 +766,6 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = MV64x60_REVISION;
mci->ctl_name = mv64x60_ctl_name;
if (edac_op_state == EDAC_OPSTATE_POLL)
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index 8e599490f6de..4395c84cdcbf 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -129,42 +129,72 @@ static struct mem_ctl_info *pnd2_mci;
#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
#define U64_LSHIFT(val, s) ((u64)(val) << (s))
-#ifdef CONFIG_X86_INTEL_SBI_APL
-#include "linux/platform_data/sbi_apl.h"
-static int sbi_send(int port, int off, int op, u32 *data)
+/*
+ * On Apollo Lake we access memory controller registers via a
+ * side-band mailbox style interface in a hidden PCI device
+ * configuration space.
+ */
+static struct pci_bus *p2sb_bus;
+#define P2SB_DEVFN PCI_DEVFN(0xd, 0)
+#define P2SB_ADDR_OFF 0xd0
+#define P2SB_DATA_OFF 0xd4
+#define P2SB_STAT_OFF 0xd8
+#define P2SB_ROUT_OFF 0xda
+#define P2SB_EADD_OFF 0xdc
+#define P2SB_HIDE_OFF 0xe1
+
+#define P2SB_BUSY 1
+
+#define P2SB_READ(size, off, ptr) \
+ pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
+#define P2SB_WRITE(size, off, val) \
+ pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
+
+static bool p2sb_is_busy(u16 *status)
{
- struct sbi_apl_message sbi_arg;
- int ret, read = 0;
+ P2SB_READ(word, P2SB_STAT_OFF, status);
- memset(&sbi_arg, 0, sizeof(sbi_arg));
+ return !!(*status & P2SB_BUSY);
+}
- if (op == 0 || op == 4 || op == 6)
- read = 1;
- else
- sbi_arg.data = *data;
+static int _apl_rd_reg(int port, int off, int op, u32 *data)
+{
+ int retries = 0xff, ret;
+ u16 status;
+ u8 hidden;
+
+ /* Unhide the P2SB device, if it's hidden */
+ P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
+ if (hidden)
+ P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
+
+ if (p2sb_is_busy(&status)) {
+ ret = -EAGAIN;
+ goto out;
+ }
- sbi_arg.opcode = op;
- sbi_arg.port_address = port;
- sbi_arg.register_offset = off;
- ret = sbi_apl_commit(&sbi_arg);
- if (ret || sbi_arg.status)
- edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n",
- sbi_arg.status, ret, sbi_arg.data);
+ P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
+ P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
+ P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
+ P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
+ P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
- if (ret == 0)
- ret = sbi_arg.status;
+ while (p2sb_is_busy(&status)) {
+ if (retries-- == 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
- if (ret == 0 && read)
- *data = sbi_arg.data;
+ P2SB_READ(dword, P2SB_DATA_OFF, data);
+ ret = (status >> 1) & 0x3;
+out:
+ /* Hide the P2SB device, if it was hidden before */
+ if (hidden)
+ P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
return ret;
}
-#else
-static int sbi_send(int port, int off, int op, u32 *data)
-{
- return -EUNATCH;
-}
-#endif
static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
{
@@ -173,10 +203,10 @@ static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *na
edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
switch (sz) {
case 8:
- ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
+ ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
/* fall through */
case 4:
- ret |= sbi_send(port, off, op, (u32 *)data);
+ ret |= _apl_rd_reg(port, off, op, (u32 *)data);
pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
break;
@@ -212,11 +242,23 @@ static u64 get_sideband_reg_base_addr(void)
{
struct pci_dev *pdev;
u32 hi, lo;
+ u8 hidden;
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
if (pdev) {
+ /* Unhide the P2SB device, if it's hidden */
+ pci_read_config_byte(pdev, 0xe1, &hidden);
+ if (hidden)
+ pci_write_config_byte(pdev, 0xe1, 0);
+
pci_read_config_dword(pdev, 0x10, &lo);
pci_read_config_dword(pdev, 0x14, &hi);
+ lo &= 0xfffffff0;
+
+ /* Hide the P2SB device, if it was hidden before */
+ if (hidden)
+ pci_write_config_byte(pdev, 0xe1, hidden);
+
pci_dev_put(pdev);
return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
} else {
@@ -1515,6 +1557,12 @@ static int __init pnd2_init(void)
ops = (struct dunit_ops *)id->driver_data;
+ if (ops->type == APL) {
+ p2sb_bus = pci_find_bus(0, 0);
+ if (!p2sb_bus)
+ return -ENODEV;
+ }
+
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index e55e92590106..fd3202c30f69 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -1063,7 +1063,6 @@ static int ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
/* Initialize strings */
mci->mod_name = PPC4XX_EDAC_MODULE_NAME;
- mci->mod_ver = PPC4XX_EDAC_MODULE_REVISION;
mci->ctl_name = ppc4xx_edac_match->compatible,
mci->dev_name = np->full_name;
@@ -1267,8 +1266,8 @@ static int ppc4xx_edac_probe(struct platform_device *op)
memcheck = (mcopt1 & SDRAM_MCOPT1_MCHK_MASK);
if (memcheck == SDRAM_MCOPT1_MCHK_NON) {
- ppc4xx_edac_printk(KERN_INFO, "%s: No ECC memory detected or "
- "ECC is disabled.\n", np->full_name);
+ ppc4xx_edac_printk(KERN_INFO, "%pOF: No ECC memory detected or "
+ "ECC is disabled.\n", np);
status = -ENODEV;
goto done;
}
@@ -1287,9 +1286,9 @@ static int ppc4xx_edac_probe(struct platform_device *op)
mci = edac_mc_alloc(ppc4xx_edac_instance, ARRAY_SIZE(layers), layers,
sizeof(struct ppc4xx_edac_pdata));
if (mci == NULL) {
- ppc4xx_edac_printk(KERN_ERR, "%s: "
+ ppc4xx_edac_printk(KERN_ERR, "%pOF: "
"Failed to allocate EDAC MC instance!\n",
- np->full_name);
+ np);
status = -ENOMEM;
goto done;
}
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 978916625ced..851e53e122aa 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -22,7 +22,6 @@
#include <linux/edac.h>
#include "edac_module.h"
-#define R82600_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "r82600_edac"
#define r82600_printk(level, fmt, arg...) \
@@ -316,7 +315,6 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = R82600_REVISION;
mci->ctl_name = "R82600";
mci->dev_name = pci_name(pdev);
mci->edac_check = r82600_check;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 80d860cb0746..dc0591654011 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -300,6 +300,12 @@ enum domain {
SOCK,
};
+enum mirroring_mode {
+ NON_MIRRORING,
+ ADDR_RANGE_MIRRORING,
+ FULL_MIRRORING,
+};
+
struct sbridge_pvt;
struct sbridge_info {
enum type type;
@@ -377,8 +383,9 @@ struct sbridge_pvt {
struct sbridge_channel channel[NUM_CHANNELS];
/* Memory type detection */
- bool is_mirrored, is_lockstep, is_close_pg;
+ bool is_cur_addr_mirrored, is_lockstep, is_close_pg;
bool is_chan_hash;
+ enum mirroring_mode mirror_mode;
/* Memory description */
u64 tolm, tohm;
@@ -1648,10 +1655,6 @@ static int get_dimm_config(struct mem_ctl_info *mci)
enum edac_type mode;
u32 reg;
- if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
- pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg);
- pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
- }
pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
pvt->sbridge_dev->mc,
@@ -1663,22 +1666,45 @@ static int get_dimm_config(struct mem_ctl_info *mci)
*/
if (pvt->info.type == KNIGHTS_LANDING) {
mode = EDAC_S4ECD4ED;
- pvt->is_mirrored = false;
+ pvt->mirror_mode = NON_MIRRORING;
+ pvt->is_cur_addr_mirrored = false;
if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
return -1;
- pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr);
+ if (pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr)) {
+ edac_dbg(0, "Failed to read KNL_MCMTR register\n");
+ return -ENODEV;
+ }
} else {
- pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg);
+ if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
+ if (pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg)) {
+ edac_dbg(0, "Failed to read HASWELL_HASYSDEFEATURE2 register\n");
+ return -ENODEV;
+ }
+ pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
+ if (GET_BITFIELD(reg, 28, 28)) {
+ pvt->mirror_mode = ADDR_RANGE_MIRRORING;
+ edac_dbg(0, "Address range partial memory mirroring is enabled\n");
+ goto next;
+ }
+ }
+ if (pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg)) {
+ edac_dbg(0, "Failed to read RASENABLES register\n");
+ return -ENODEV;
+ }
if (IS_MIRROR_ENABLED(reg)) {
- edac_dbg(0, "Memory mirror is enabled\n");
- pvt->is_mirrored = true;
+ pvt->mirror_mode = FULL_MIRRORING;
+ edac_dbg(0, "Full memory mirroring is enabled\n");
} else {
- edac_dbg(0, "Memory mirror is disabled\n");
- pvt->is_mirrored = false;
+ pvt->mirror_mode = NON_MIRRORING;
+ edac_dbg(0, "Memory mirroring is disabled\n");
}
- pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
+next:
+ if (pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr)) {
+ edac_dbg(0, "Failed to read MCMTR register\n");
+ return -ENODEV;
+ }
if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
edac_dbg(0, "Lockstep is enabled\n");
mode = EDAC_S8ECD8ED;
@@ -2092,7 +2118,8 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
- if (pvt->is_mirrored) {
+ if (pvt->mirror_mode == FULL_MIRRORING ||
+ (pvt->mirror_mode == ADDR_RANGE_MIRRORING && n_tads == 0)) {
*channel_mask |= 1 << ((base_ch + 2) % 4);
switch(ch_way) {
case 2:
@@ -2103,8 +2130,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
sprintf(msg, "Invalid mirror set. Can't decode addr");
return -EINVAL;
}
- } else
+
+ pvt->is_cur_addr_mirrored = true;
+ } else {
sck_xch = (1 << sck_way) * ch_way;
+ pvt->is_cur_addr_mirrored = false;
+ }
if (pvt->is_lockstep)
*channel_mask |= 1 << ((base_ch + 1) % 4);
@@ -2967,7 +2998,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
* EDAC core should be handling the channel mask, in order to point
* to the group of dimm's where the error may be happening.
*/
- if (!pvt->is_lockstep && !pvt->is_mirrored && !pvt->is_close_pg)
+ if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
channel = first_channel;
snprintf(msg, sizeof(msg),
@@ -3125,7 +3156,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "sb_edac.c";
- mci->mod_ver = SBRIDGE_REVISION;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index 64bef6c9cfb4..16dea97568a1 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -31,8 +31,6 @@
#include "edac_module.h"
-#define SKX_REVISION " Ver: 1.0 "
-
/*
* Debug macros
*/
@@ -473,7 +471,6 @@ static int skx_register_mci(struct skx_imc *imc)
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "skx_edac.c";
mci->dev_name = pci_name(imc->chan[0].cdev);
- mci->mod_ver = SKX_REVISION;
mci->ctl_page_to_phys = NULL;
rc = skx_get_dimm_config(mci);
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 1c01dec78ec3..0c9c59e2b5a3 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -413,7 +413,6 @@ static int synps_edac_mc_init(struct mem_ctl_info *mci,
mci->ctl_name = "synps_ddr_controller";
mci->dev_name = SYNPS_EDAC_MOD_STRING;
mci->mod_name = SYNPS_EDAC_MOD_VER;
- mci->mod_ver = "1";
edac_op_state = EDAC_OPSTATE_POLL;
mci->edac_check = synps_edac_check;
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index 2d352b40ae1c..f35d87519a3e 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -732,7 +732,6 @@ static int thunderx_lmc_probe(struct pci_dev *pdev,
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = "thunderx-lmc";
- mci->mod_ver = "1";
mci->ctl_name = "thunderx-lmc";
mci->dev_name = dev_name(&pdev->dev);
mci->scrub_mode = SCRUB_NONE;
@@ -775,11 +774,10 @@ static int thunderx_lmc_probe(struct pci_dev *pdev,
lmc->xor_bank = lmc_control & LMC_CONTROL_XOR_BANK;
- l2c_ioaddr = ioremap(L2C_CTL | FIELD_PREP(THUNDERX_NODE, lmc->node),
- PAGE_SIZE);
-
+ l2c_ioaddr = ioremap(L2C_CTL | FIELD_PREP(THUNDERX_NODE, lmc->node), PAGE_SIZE);
if (!l2c_ioaddr) {
dev_err(&pdev->dev, "Cannot map L2C_CTL\n");
+ ret = -ENOMEM;
goto err_free;
}
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 03c97a4bf590..cc779f3f9e2d 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -18,8 +18,6 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include "edac_module.h"
-#define X38_REVISION "1.1"
-
#define EDAC_MOD_STR "x38_edac"
#define PCI_DEVICE_ID_INTEL_X38_HB 0x29e0
@@ -357,7 +355,6 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = X38_REVISION;
mci->ctl_name = x38_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = x38_check;
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 669246056812..e8b81d7ef61f 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -415,7 +415,6 @@ static int xgene_edac_mc_add(struct xgene_edac *edac, struct device_node *np)
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = "0.1";
mci->ctl_page_to_phys = NULL;
mci->scrub_cap = SCRUB_FLAG_HW_SRC;
mci->scrub_mode = SCRUB_HW_SRC;
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 6d50071f07d5..a7bca4207f44 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -150,4 +150,11 @@ config EXTCON_USB_GPIO
Say Y here to enable GPIO based USB cable detection extcon support.
Used typically if GPIO is used for USB ID pin detection.
+config EXTCON_USBC_CROS_EC
+ tristate "ChromeOS Embedded Controller EXTCON support"
+ depends on MFD_CROS_EC
+ help
+ Say Y here to enable USB Type C cable detection extcon support when
+ using Chrome OS EC based USB Type-C ports.
+
endif
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index ecfa95804427..a73624e76193 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -20,3 +20,4 @@ obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o
obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o
obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o
obj-$(CONFIG_EXTCON_USB_GPIO) += extcon-usb-gpio.o
+obj-$(CONFIG_EXTCON_USBC_CROS_EC) += extcon-usbc-cros-ec.o
diff --git a/drivers/extcon/devres.c b/drivers/extcon/devres.c
index 186fd735eb28..f599aeddf8e5 100644
--- a/drivers/extcon/devres.c
+++ b/drivers/extcon/devres.c
@@ -1,5 +1,5 @@
/*
- * drivers/extcon/devres.c - EXTCON device's resource management
+ * drivers/extcon/devres.c - EXTCON device's resource management
*
* Copyright (C) 2016 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
@@ -59,10 +59,9 @@ static void devm_extcon_dev_notifier_all_unreg(struct device *dev, void *res)
/**
* devm_extcon_dev_allocate - Allocate managed extcon device
- * @dev: device owning the extcon device being created
- * @supported_cable: Array of supported extcon ending with EXTCON_NONE.
- * If supported_cable is NULL, cable name related APIs
- * are disabled.
+ * @dev: the device owning the extcon device being created
+ * @supported_cable: the array of the supported external connectors
+ * ending with EXTCON_NONE.
*
* This function manages automatically the memory of extcon device using device
* resource management and simplify the control of freeing the memory of extcon
@@ -97,8 +96,8 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate);
/**
* devm_extcon_dev_free() - Resource-managed extcon_dev_unregister()
- * @dev: device the extcon belongs to
- * @edev: the extcon device to unregister
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device to be freed
*
* Free the memory that is allocated with devm_extcon_dev_allocate()
* function.
@@ -112,10 +111,9 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
/**
* devm_extcon_dev_register() - Resource-managed extcon_dev_register()
- * @dev: device to allocate extcon device
- * @edev: the new extcon device to register
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device to be registered
*
- * Managed extcon_dev_register() function. If extcon device is attached with
* this function, that extcon device is automatically unregistered on driver
* detach. Internally this function calls extcon_dev_register() function.
* To get more information, refer that function.
@@ -149,8 +147,8 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_register);
/**
* devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister()
- * @dev: device the extcon belongs to
- * @edev: the extcon device to unregister
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device to unregistered
*
* Unregister extcon device that is registered with devm_extcon_dev_register()
* function.
@@ -164,10 +162,10 @@ EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister);
/**
* devm_extcon_register_notifier() - Resource-managed extcon_register_notifier()
- * @dev: device to allocate extcon device
- * @edev: the extcon device that has the external connecotr.
- * @id: the unique id of each external connector in extcon enumeration.
- * @nb: a notifier block to be registered.
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device
+ * @id: the unique id among the extcon enumeration
+ * @nb: a notifier block to be registered
*
* This function manages automatically the notifier of extcon device using
* device resource management and simplify the control of unregistering
@@ -208,10 +206,10 @@ EXPORT_SYMBOL(devm_extcon_register_notifier);
/**
* devm_extcon_unregister_notifier()
- Resource-managed extcon_unregister_notifier()
- * @dev: device to allocate extcon device
- * @edev: the extcon device that has the external connecotr.
- * @id: the unique id of each external connector in extcon enumeration.
- * @nb: a notifier block to be registered.
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device
+ * @id: the unique id among the extcon enumeration
+ * @nb: a notifier block to be registered
*/
void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
@@ -225,9 +223,9 @@ EXPORT_SYMBOL(devm_extcon_unregister_notifier);
/**
* devm_extcon_register_notifier_all()
* - Resource-managed extcon_register_notifier_all()
- * @dev: device to allocate extcon device
- * @edev: the extcon device that has the external connecotr.
- * @nb: a notifier block to be registered.
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device
+ * @nb: a notifier block to be registered
*
* This function manages automatically the notifier of extcon device using
* device resource management and simplify the control of unregistering
@@ -263,9 +261,9 @@ EXPORT_SYMBOL(devm_extcon_register_notifier_all);
/**
* devm_extcon_unregister_notifier_all()
* - Resource-managed extcon_unregister_notifier_all()
- * @dev: device to allocate extcon device
- * @edev: the extcon device that has the external connecotr.
- * @nb: a notifier block to be registered.
+ * @dev: the device owning the extcon device being created
+ * @edev: the extcon device
+ * @nb: a notifier block to be registered
*/
void devm_extcon_unregister_notifier_all(struct device *dev,
struct extcon_dev *edev,
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index d9f9afe45961..1a45e745717d 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -171,7 +171,7 @@ static int int3496_remove(struct platform_device *pdev)
return 0;
}
-static struct acpi_device_id int3496_acpi_match[] = {
+static const struct acpi_device_id int3496_acpi_match[] = {
{ "INT3496" },
{ }
};
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 62163468f205..7a5856809047 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -811,9 +811,8 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
*/
extcon_set_state_sync(info->edev, EXTCON_CHG_USB_DCP,
attached);
- if (!cable_attached)
- extcon_set_state_sync(info->edev,
- EXTCON_DISP_MHL, cable_attached);
+ extcon_set_state_sync(info->edev, EXTCON_DISP_MHL,
+ cable_attached);
break;
}
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
new file mode 100644
index 000000000000..598956f1dcae
--- /dev/null
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -0,0 +1,417 @@
+/**
+ * drivers/extcon/extcon-usbc-cros-ec - ChromeOS Embedded Controller extcon
+ *
+ * Copyright (C) 2017 Google, Inc
+ * Author: Benson Leung <bleung@chromium.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/extcon.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+struct cros_ec_extcon_info {
+ struct device *dev;
+ struct extcon_dev *edev;
+
+ int port_id;
+
+ struct cros_ec_device *ec;
+
+ struct notifier_block notifier;
+
+ bool dp; /* DisplayPort enabled */
+ bool mux; /* SuperSpeed (usb3) enabled */
+ unsigned int power_type;
+};
+
+static const unsigned int usb_type_c_cable[] = {
+ EXTCON_DISP_DP,
+ EXTCON_NONE,
+};
+
+/**
+ * cros_ec_pd_command() - Send a command to the EC.
+ * @info: pointer to struct cros_ec_extcon_info
+ * @command: EC command
+ * @version: EC command version
+ * @outdata: EC command output data
+ * @outsize: Size of outdata
+ * @indata: EC command input data
+ * @insize: Size of indata
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+static int cros_ec_pd_command(struct cros_ec_extcon_info *info,
+ unsigned int command,
+ unsigned int version,
+ void *outdata,
+ unsigned int outsize,
+ void *indata,
+ unsigned int insize)
+{
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = version;
+ msg->command = command;
+ msg->outsize = outsize;
+ msg->insize = insize;
+
+ if (outsize)
+ memcpy(msg->data, outdata, outsize);
+
+ ret = cros_ec_cmd_xfer_status(info->ec, msg);
+ if (ret >= 0 && insize)
+ memcpy(indata, msg->data, insize);
+
+ kfree(msg);
+ return ret;
+}
+
+/**
+ * cros_ec_usb_get_power_type() - Get power type info about PD device attached
+ * to given port.
+ * @info: pointer to struct cros_ec_extcon_info
+ *
+ * Return: power type on success, <0 on failure.
+ */
+static int cros_ec_usb_get_power_type(struct cros_ec_extcon_info *info)
+{
+ struct ec_params_usb_pd_power_info req;
+ struct ec_response_usb_pd_power_info resp;
+ int ret;
+
+ req.port = info->port_id;
+ ret = cros_ec_pd_command(info, EC_CMD_USB_PD_POWER_INFO, 0,
+ &req, sizeof(req), &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ return resp.type;
+}
+
+/**
+ * cros_ec_usb_get_pd_mux_state() - Get PD mux state for given port.
+ * @info: pointer to struct cros_ec_extcon_info
+ *
+ * Return: PD mux state on success, <0 on failure.
+ */
+static int cros_ec_usb_get_pd_mux_state(struct cros_ec_extcon_info *info)
+{
+ struct ec_params_usb_pd_mux_info req;
+ struct ec_response_usb_pd_mux_info resp;
+ int ret;
+
+ req.port = info->port_id;
+ ret = cros_ec_pd_command(info, EC_CMD_USB_PD_MUX_INFO, 0,
+ &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ return resp.flags;
+}
+
+/**
+ * cros_ec_usb_get_role() - Get role info about possible PD device attached to a
+ * given port.
+ * @info: pointer to struct cros_ec_extcon_info
+ * @polarity: pointer to cable polarity (return value)
+ *
+ * Return: role info on success, -ENOTCONN if no cable is connected, <0 on
+ * failure.
+ */
+static int cros_ec_usb_get_role(struct cros_ec_extcon_info *info,
+ bool *polarity)
+{
+ struct ec_params_usb_pd_control pd_control;
+ struct ec_response_usb_pd_control_v1 resp;
+ int ret;
+
+ pd_control.port = info->port_id;
+ pd_control.role = USB_PD_CTRL_ROLE_NO_CHANGE;
+ pd_control.mux = USB_PD_CTRL_MUX_NO_CHANGE;
+ ret = cros_ec_pd_command(info, EC_CMD_USB_PD_CONTROL, 1,
+ &pd_control, sizeof(pd_control),
+ &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ if (!(resp.enabled & PD_CTRL_RESP_ENABLED_CONNECTED))
+ return -ENOTCONN;
+
+ *polarity = resp.polarity;
+
+ return resp.role;
+}
+
+/**
+ * cros_ec_pd_get_num_ports() - Get number of EC charge ports.
+ * @info: pointer to struct cros_ec_extcon_info
+ *
+ * Return: number of ports on success, <0 on failure.
+ */
+static int cros_ec_pd_get_num_ports(struct cros_ec_extcon_info *info)
+{
+ struct ec_response_usb_pd_ports resp;
+ int ret;
+
+ ret = cros_ec_pd_command(info, EC_CMD_USB_PD_PORTS,
+ 0, NULL, 0, &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ return resp.num_ports;
+}
+
+static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
+ bool force)
+{
+ struct device *dev = info->dev;
+ int role, power_type;
+ bool polarity = false;
+ bool dp = false;
+ bool mux = false;
+ bool hpd = false;
+
+ power_type = cros_ec_usb_get_power_type(info);
+ if (power_type < 0) {
+ dev_err(dev, "failed getting power type err = %d\n",
+ power_type);
+ return power_type;
+ }
+
+ role = cros_ec_usb_get_role(info, &polarity);
+ if (role < 0) {
+ if (role != -ENOTCONN) {
+ dev_err(dev, "failed getting role err = %d\n", role);
+ return role;
+ }
+ } else {
+ int pd_mux_state;
+
+ pd_mux_state = cros_ec_usb_get_pd_mux_state(info);
+ if (pd_mux_state < 0)
+ pd_mux_state = USB_PD_MUX_USB_ENABLED;
+
+ dp = pd_mux_state & USB_PD_MUX_DP_ENABLED;
+ mux = pd_mux_state & USB_PD_MUX_USB_ENABLED;
+ hpd = pd_mux_state & USB_PD_MUX_HPD_IRQ;
+ }
+
+ if (force || info->dp != dp || info->mux != mux ||
+ info->power_type != power_type) {
+
+ info->dp = dp;
+ info->mux = mux;
+ info->power_type = power_type;
+
+ extcon_set_state(info->edev, EXTCON_DISP_DP, dp);
+
+ extcon_set_property(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_USB_TYPEC_POLARITY,
+ (union extcon_property_value)(int)polarity);
+ extcon_set_property(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_USB_SS,
+ (union extcon_property_value)(int)mux);
+ extcon_set_property(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_DISP_HPD,
+ (union extcon_property_value)(int)hpd);
+
+ extcon_sync(info->edev, EXTCON_DISP_DP);
+
+ } else if (hpd) {
+ extcon_set_property(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_DISP_HPD,
+ (union extcon_property_value)(int)hpd);
+ extcon_sync(info->edev, EXTCON_DISP_DP);
+ }
+
+ return 0;
+}
+
+static int extcon_cros_ec_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_extcon_info *info;
+ struct cros_ec_device *ec;
+ u32 host_event;
+
+ info = container_of(nb, struct cros_ec_extcon_info, notifier);
+ ec = info->ec;
+
+ host_event = cros_ec_get_host_event(ec);
+ if (host_event & (EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU) |
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_USB_MUX))) {
+ extcon_cros_ec_detect_cable(info, false);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int extcon_cros_ec_probe(struct platform_device *pdev)
+{
+ struct cros_ec_extcon_info *info;
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int numports, ret;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->dev = dev;
+ info->ec = ec;
+
+ if (np) {
+ u32 port;
+
+ ret = of_property_read_u32(np, "google,usb-port-id", &port);
+ if (ret < 0) {
+ dev_err(dev, "Missing google,usb-port-id property\n");
+ return ret;
+ }
+ info->port_id = port;
+ } else {
+ info->port_id = pdev->id;
+ }
+
+ numports = cros_ec_pd_get_num_ports(info);
+ if (numports < 0) {
+ dev_err(dev, "failed getting number of ports! ret = %d\n",
+ numports);
+ return numports;
+ }
+
+ if (info->port_id >= numports) {
+ dev_err(dev, "This system only supports %d ports\n", numports);
+ return -ENODEV;
+ }
+
+ info->edev = devm_extcon_dev_allocate(dev, usb_type_c_cable);
+ if (IS_ERR(info->edev)) {
+ dev_err(dev, "failed to allocate extcon device\n");
+ return -ENOMEM;
+ }
+
+ ret = devm_extcon_dev_register(dev, info->edev);
+ if (ret < 0) {
+ dev_err(dev, "failed to register extcon device\n");
+ return ret;
+ }
+
+ extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_USB_SS);
+ extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
+ EXTCON_PROP_DISP_HPD);
+
+ platform_set_drvdata(pdev, info);
+
+ /* Get PD events from the EC */
+ info->notifier.notifier_call = extcon_cros_ec_event;
+ ret = blocking_notifier_chain_register(&info->ec->event_notifier,
+ &info->notifier);
+ if (ret < 0) {
+ dev_err(dev, "failed to register notifier\n");
+ return ret;
+ }
+
+ /* Perform initial detection */
+ ret = extcon_cros_ec_detect_cable(info, true);
+ if (ret < 0) {
+ dev_err(dev, "failed to detect initial cable state\n");
+ goto unregister_notifier;
+ }
+
+ return 0;
+
+unregister_notifier:
+ blocking_notifier_chain_unregister(&info->ec->event_notifier,
+ &info->notifier);
+ return ret;
+}
+
+static int extcon_cros_ec_remove(struct platform_device *pdev)
+{
+ struct cros_ec_extcon_info *info = platform_get_drvdata(pdev);
+
+ blocking_notifier_chain_unregister(&info->ec->event_notifier,
+ &info->notifier);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int extcon_cros_ec_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int extcon_cros_ec_resume(struct device *dev)
+{
+ int ret;
+ struct cros_ec_extcon_info *info = dev_get_drvdata(dev);
+
+ ret = extcon_cros_ec_detect_cable(info, true);
+ if (ret < 0)
+ dev_err(dev, "failed to detect cable state on resume\n");
+
+ return 0;
+}
+
+static const struct dev_pm_ops extcon_cros_ec_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(extcon_cros_ec_suspend, extcon_cros_ec_resume)
+};
+
+#define DEV_PM_OPS (&extcon_cros_ec_dev_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_OF
+static const struct of_device_id extcon_cros_ec_of_match[] = {
+ { .compatible = "google,extcon-usbc-cros-ec" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, extcon_cros_ec_of_match);
+#endif /* CONFIG_OF */
+
+static struct platform_driver extcon_cros_ec_driver = {
+ .driver = {
+ .name = "extcon-usbc-cros-ec",
+ .of_match_table = of_match_ptr(extcon_cros_ec_of_match),
+ .pm = DEV_PM_OPS,
+ },
+ .remove = extcon_cros_ec_remove,
+ .probe = extcon_cros_ec_probe,
+};
+
+module_platform_driver(extcon_cros_ec_driver);
+
+MODULE_DESCRIPTION("ChromeOS Embedded Controller extcon driver");
+MODULE_AUTHOR("Benson Leung <bleung@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 8eccf7b14937..35e9fb885486 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -1,7 +1,5 @@
/*
- * drivers/extcon/extcon.c - External Connector (extcon) framework.
- *
- * External connector (extcon) class driver
+ * drivers/extcon/extcon.c - External Connector (extcon) framework.
*
* Copyright (C) 2015 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
@@ -37,7 +35,6 @@
#include "extcon.h"
#define SUPPORTED_CABLE_MAX 32
-#define CABLE_NAME_MAX 30
struct __extcon_info {
unsigned int type;
@@ -200,13 +197,13 @@ struct __extcon_info {
};
/**
- * struct extcon_cable - An internal data for each cable of extcon device.
- * @edev: The extcon device
- * @cable_index: Index of this cable in the edev
- * @attr_g: Attribute group for the cable
+ * struct extcon_cable - An internal data for an external connector.
+ * @edev: the extcon device
+ * @cable_index: the index of this cable in the edev
+ * @attr_g: the attribute group for the cable
* @attr_name: "name" sysfs entry
* @attr_state: "state" sysfs entry
- * @attrs: Array pointing to attr_name and attr_state for attr_g
+ * @attrs: the array pointing to attr_name and attr_state for attr_g
*/
struct extcon_cable {
struct extcon_dev *edev;
@@ -234,15 +231,6 @@ static struct class *extcon_class;
static LIST_HEAD(extcon_dev_list);
static DEFINE_MUTEX(extcon_dev_list_lock);
-/**
- * check_mutually_exclusive - Check if new_state violates mutually_exclusive
- * condition.
- * @edev: the extcon device
- * @new_state: new cable attach status for @edev
- *
- * Returns 0 if nothing violates. Returns the index + 1 for the first
- * violated condition.
- */
static int check_mutually_exclusive(struct extcon_dev *edev, u32 new_state)
{
int i = 0;
@@ -417,11 +405,13 @@ static ssize_t cable_state_show(struct device *dev,
}
/**
- * extcon_sync() - Synchronize the states for both the attached/detached
- * @edev: the extcon device that has the cable.
+ * extcon_sync() - Synchronize the state for an external connector.
+ * @edev: the extcon device
+ *
+ * Note that this function send a notification in order to synchronize
+ * the state and property of an external connector.
*
- * This function send a notification to synchronize the all states of a
- * specific external connector
+ * Returns 0 if success or error number if fail.
*/
int extcon_sync(struct extcon_dev *edev, unsigned int id)
{
@@ -497,9 +487,11 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
EXPORT_SYMBOL_GPL(extcon_sync);
/**
- * extcon_get_state() - Get the state of a external connector.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector in extcon enumeration.
+ * extcon_get_state() - Get the state of an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ *
+ * Returns 0 if success or error number if fail.
*/
int extcon_get_state(struct extcon_dev *edev, const unsigned int id)
{
@@ -522,20 +514,19 @@ int extcon_get_state(struct extcon_dev *edev, const unsigned int id)
EXPORT_SYMBOL_GPL(extcon_get_state);
/**
- * extcon_set_state() - Set the state of a external connector.
- * without a notification.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @state: the new cable status. The default semantics is
- * true: attached / false: detached.
+ * extcon_set_state() - Set the state of an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @state: the new state of an external connector.
+ * the default semantics is true: attached / false: detached.
+ *
+ * Note that this function set the state of an external connector without
+ * a notification. To synchronize the state of an external connector,
+ * have to use extcon_set_state_sync() and extcon_sync().
*
- * This function only set the state of a external connector without
- * a notification. To synchronize the data of a external connector,
- * use extcon_set_state_sync() and extcon_sync().
+ * Returns 0 if success or error number if fail.
*/
-int extcon_set_state(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
+int extcon_set_state(struct extcon_dev *edev, unsigned int id, bool state)
{
unsigned long flags;
int index, ret = 0;
@@ -550,11 +541,11 @@ int extcon_set_state(struct extcon_dev *edev, unsigned int id,
spin_lock_irqsave(&edev->lock, flags);
/* Check whether the external connector's state is changed. */
- if (!is_extcon_changed(edev, index, cable_state))
+ if (!is_extcon_changed(edev, index, state))
goto out;
if (check_mutually_exclusive(edev,
- (edev->state & ~BIT(index)) | (cable_state & BIT(index)))) {
+ (edev->state & ~BIT(index)) | (state & BIT(index)))) {
ret = -EPERM;
goto out;
}
@@ -563,11 +554,11 @@ int extcon_set_state(struct extcon_dev *edev, unsigned int id,
* Initialize the value of extcon property before setting
* the detached state for an external connector.
*/
- if (!cable_state)
+ if (!state)
init_property(edev, id, index);
- /* Update the state for a external connector. */
- if (cable_state)
+ /* Update the state for an external connector. */
+ if (state)
edev->state |= BIT(index);
else
edev->state &= ~(BIT(index));
@@ -579,19 +570,18 @@ out:
EXPORT_SYMBOL_GPL(extcon_set_state);
/**
- * extcon_set_state_sync() - Set the state of a external connector
- * with a notification.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @state: the new cable status. The default semantics is
- * true: attached / false: detached.
+ * extcon_set_state_sync() - Set the state of an external connector with sync.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @state: the new state of external connector.
+ * the default semantics is true: attached / false: detached.
+ *
+ * Note that this function set the state of external connector
+ * and synchronize the state by sending a notification.
*
- * This function set the state of external connector and synchronize the data
- * by usning a notification.
+ * Returns 0 if success or error number if fail.
*/
-int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
- bool cable_state)
+int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, bool state)
{
int ret, index;
unsigned long flags;
@@ -602,12 +592,12 @@ int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
/* Check whether the external connector's state is changed. */
spin_lock_irqsave(&edev->lock, flags);
- ret = is_extcon_changed(edev, index, cable_state);
+ ret = is_extcon_changed(edev, index, state);
spin_unlock_irqrestore(&edev->lock, flags);
if (!ret)
return 0;
- ret = extcon_set_state(edev, id, cable_state);
+ ret = extcon_set_state(edev, id, state);
if (ret < 0)
return ret;
@@ -616,19 +606,18 @@ int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_set_state_sync);
/**
- * extcon_get_property() - Get the property value of a specific cable.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @prop: the property id among enum extcon_property.
- * @prop_val: the pointer which store the value of property.
+ * extcon_get_property() - Get the property value of an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @prop: the property id indicating an extcon property
+ * @prop_val: the pointer which store the value of extcon property
*
- * When getting the property value of external connector, the external connector
- * should be attached. If detached state, function just return 0 without
- * property value. Also, the each property should be included in the list of
- * supported properties according to the type of external connectors.
+ * Note that when getting the property value of external connector,
+ * the external connector should be attached. If detached state, function
+ * return 0 without property value. Also, the each property should be
+ * included in the list of supported properties according to extcon type.
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_get_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
@@ -698,17 +687,16 @@ int extcon_get_property(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_get_property);
/**
- * extcon_set_property() - Set the property value of a specific cable.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @prop: the property id among enum extcon_property.
- * @prop_val: the pointer including the new value of property.
+ * extcon_set_property() - Set the property value of an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @prop: the property id indicating an extcon property
+ * @prop_val: the pointer including the new value of extcon property
*
- * The each property should be included in the list of supported properties
- * according to the type of external connectors.
+ * Note that each property should be included in the list of supported
+ * properties according to the extcon type.
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_set_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
@@ -766,15 +754,14 @@ int extcon_set_property(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_set_property);
/**
- * extcon_set_property_sync() - Set the property value of a specific cable
- with a notification.
- * @prop_val: the pointer including the new value of property.
+ * extcon_set_property_sync() - Set property of an external connector with sync.
+ * @prop_val: the pointer including the new value of extcon property
*
- * When setting the property value of external connector, the external connector
- * should be attached. The each property should be included in the list of
- * supported properties according to the type of external connectors.
+ * Note that when setting the property value of external connector,
+ * the external connector should be attached. The each property should
+ * be included in the list of supported properties according to extcon type.
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
@@ -791,12 +778,11 @@ int extcon_set_property_sync(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_set_property_sync);
/**
- * extcon_get_property_capability() - Get the capability of property
- * of an external connector.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @prop: the property id among enum extcon_property.
+ * extcon_get_property_capability() - Get the capability of the property
+ * for an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @prop: the property id indicating an extcon property
*
* Returns 1 if the property is available or 0 if not available.
*/
@@ -822,18 +808,17 @@ int extcon_get_property_capability(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_get_property_capability);
/**
- * extcon_set_property_capability() - Set the capability of a property
- * of an external connector.
- * @edev: the extcon device that has the cable.
- * @id: the unique id of each external connector
- * in extcon enumeration.
- * @prop: the property id among enum extcon_property.
+ * extcon_set_property_capability() - Set the capability of the property
+ * for an external connector.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @prop: the property id indicating an extcon property
*
- * This function set the capability of a property for an external connector
- * to mark the bit in capability bitmap which mean the available state of
- * a property.
+ * Note that this function set the capability of the property
+ * for an external connector in order to mark the bit in capability
+ * bitmap which mean the available state of the property.
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_set_property_capability(struct extcon_dev *edev, unsigned int id,
unsigned int prop)
@@ -881,8 +866,10 @@ int extcon_set_property_capability(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_set_property_capability);
/**
- * extcon_get_extcon_dev() - Get the extcon device instance from the name
- * @extcon_name: The extcon name provided with extcon_dev_register()
+ * extcon_get_extcon_dev() - Get the extcon device instance from the name.
+ * @extcon_name: the extcon name provided with extcon_dev_register()
+ *
+ * Return the pointer of extcon device if success or ERR_PTR(err) if fail.
*/
struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
@@ -904,15 +891,17 @@ out:
EXPORT_SYMBOL_GPL(extcon_get_extcon_dev);
/**
- * extcon_register_notifier() - Register a notifiee to get notified by
- * any attach status changes from the extcon.
- * @edev: the extcon device that has the external connecotr.
- * @id: the unique id of each external connector in extcon enumeration.
- * @nb: a notifier block to be registered.
+ * extcon_register_notifier() - Register a notifier block to get notified by
+ * any state changes from the extcon.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @nb: a notifier block to be registered
*
* Note that the second parameter given to the callback of nb (val) is
- * "old_state", not the current state. The current state can be retrieved
- * by looking at the third pameter (edev pointer)'s state value.
+ * the current state of an external connector and the third pameter
+ * is the pointer of extcon device.
+ *
+ * Returns 0 if success or error number if fail.
*/
int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
@@ -936,10 +925,12 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_register_notifier);
/**
- * extcon_unregister_notifier() - Unregister a notifiee from the extcon device.
- * @edev: the extcon device that has the external connecotr.
- * @id: the unique id of each external connector in extcon enumeration.
- * @nb: a notifier block to be registered.
+ * extcon_unregister_notifier() - Unregister a notifier block from the extcon.
+ * @edev: the extcon device
+ * @id: the unique id indicating an external connector
+ * @nb: a notifier block to be registered
+ *
+ * Returns 0 if success or error number if fail.
*/
int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb)
@@ -963,16 +954,16 @@ int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
EXPORT_SYMBOL_GPL(extcon_unregister_notifier);
/**
- * extcon_register_notifier_all() - Register a notifier block for all connectors
- * @edev: the extcon device that has the external connector.
- * @nb: a notifier block to be registered.
+ * extcon_register_notifier_all() - Register a notifier block for all connectors.
+ * @edev: the extcon device
+ * @nb: a notifier block to be registered
*
- * This function registers a notifier block in order to receive the state
- * change of all supported external connectors from extcon device.
+ * Note that this function registers a notifier block in order to receive
+ * the state change of all supported external connectors from extcon device.
* And the second parameter given to the callback of nb (val) is
- * the current state and third parameter is the edev pointer.
+ * the current state and the third pameter is the pointer of extcon device.
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_register_notifier_all(struct extcon_dev *edev,
struct notifier_block *nb)
@@ -993,10 +984,10 @@ EXPORT_SYMBOL_GPL(extcon_register_notifier_all);
/**
* extcon_unregister_notifier_all() - Unregister a notifier block from extcon.
- * @edev: the extcon device that has the external connecotr.
- * @nb: a notifier block to be registered.
+ * @edev: the extcon device
+ * @nb: a notifier block to be registered
*
- * Returns 0 if success or error number if fail
+ * Returns 0 if success or error number if fail.
*/
int extcon_unregister_notifier_all(struct extcon_dev *edev,
struct notifier_block *nb)
@@ -1045,15 +1036,14 @@ static void dummy_sysfs_dev_release(struct device *dev)
/*
* extcon_dev_allocate() - Allocate the memory of extcon device.
- * @supported_cable: Array of supported extcon ending with EXTCON_NONE.
- * If supported_cable is NULL, cable name related APIs
- * are disabled.
+ * @supported_cable: the array of the supported external connectors
+ * ending with EXTCON_NONE.
*
- * This function allocates the memory for extcon device without allocating
- * memory in each extcon provider driver and initialize default setting for
- * extcon device.
+ * Note that this function allocates the memory for extcon device
+ * and initialize default setting for the extcon device.
*
- * Return the pointer of extcon device if success or ERR_PTR(err) if fail
+ * Returns the pointer memory of allocated extcon_dev if success
+ * or ERR_PTR(err) if fail.
*/
struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable)
{
@@ -1074,7 +1064,7 @@ struct extcon_dev *extcon_dev_allocate(const unsigned int *supported_cable)
/*
* extcon_dev_free() - Free the memory of extcon device.
- * @edev: the extcon device to free
+ * @edev: the extcon device
*/
void extcon_dev_free(struct extcon_dev *edev)
{
@@ -1083,13 +1073,18 @@ void extcon_dev_free(struct extcon_dev *edev)
EXPORT_SYMBOL_GPL(extcon_dev_free);
/**
- * extcon_dev_register() - Register a new extcon device
- * @edev : the new extcon device (should be allocated before calling)
+ * extcon_dev_register() - Register an new extcon device
+ * @edev: the extcon device to be registered
*
* Among the members of edev struct, please set the "user initializing data"
- * in any case and set the "optional callbacks" if required. However, please
* do not set the values of "internal data", which are initialized by
* this function.
+ *
+ * Note that before calling this funciton, have to allocate the memory
+ * of an extcon device by using the extcon_dev_allocate(). And the extcon
+ * dev should include the supported_cable information.
+ *
+ * Returns 0 if success or error number if fail.
*/
int extcon_dev_register(struct extcon_dev *edev)
{
@@ -1296,7 +1291,7 @@ EXPORT_SYMBOL_GPL(extcon_dev_register);
/**
* extcon_dev_unregister() - Unregister the extcon device.
- * @edev: the extcon device instance to be unregistered.
+ * @edev: the extcon device to be unregistered.
*
* Note that this does not call kfree(edev) because edev was not allocated
* by this class.
@@ -1342,11 +1337,11 @@ EXPORT_SYMBOL_GPL(extcon_dev_unregister);
#ifdef CONFIG_OF
/*
- * extcon_get_edev_by_phandle - Get the extcon device from devicetree
- * @dev - instance to the given device
- * @index - index into list of extcon_dev
+ * extcon_get_edev_by_phandle - Get the extcon device from devicetree.
+ * @dev : the instance to the given device
+ * @index : the index into list of extcon_dev
*
- * return the instance of extcon device
+ * Return the pointer of extcon device if success or ERR_PTR(err) if fail.
*/
struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
{
@@ -1363,8 +1358,8 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
node = of_parse_phandle(dev->of_node, "extcon", index);
if (!node) {
- dev_dbg(dev, "failed to get phandle in %s node\n",
- dev->of_node->full_name);
+ dev_dbg(dev, "failed to get phandle in %pOF node\n",
+ dev->of_node);
return ERR_PTR(-ENODEV);
}
@@ -1411,8 +1406,6 @@ static void __exit extcon_class_exit(void)
module_exit(extcon_class_exit);
MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
-MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
-MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
-MODULE_DESCRIPTION("External connector (extcon) class driver");
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("External Connector (extcon) framework");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 2fe1a130189f..c16600f30611 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -534,7 +534,7 @@ static struct attribute *dcdbas_dev_attrs[] = {
NULL
};
-static struct attribute_group dcdbas_attr_group = {
+static const struct attribute_group dcdbas_attr_group = {
.attrs = dcdbas_dev_attrs,
.bin_attrs = dcdbas_bin_attrs,
};
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index ef76e5eecf0b..d5de6ee8466d 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/io.h>
+#include <asm/dmi.h>
#define MAX_ENTRY_TYPE 255 /* Most of these aren't used, but we consider
the top entry type is only 8 bits */
@@ -380,7 +381,7 @@ static ssize_t dmi_sel_raw_read_phys32(struct dmi_sysfs_entry *entry,
u8 __iomem *mapped;
ssize_t wrote = 0;
- mapped = ioremap(sel->access_method_address, sel->area_length);
+ mapped = dmi_remap(sel->access_method_address, sel->area_length);
if (!mapped)
return -EIO;
@@ -390,7 +391,7 @@ static ssize_t dmi_sel_raw_read_phys32(struct dmi_sysfs_entry *entry,
wrote++;
}
- iounmap(mapped);
+ dmi_unmap(mapped);
return wrote;
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 394db40ed374..2b4c39fdfa91 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -151,6 +151,16 @@ config APPLE_PROPERTIES
If unsure, say Y if you have a Mac. Otherwise N.
+config RESET_ATTACK_MITIGATION
+ bool "Reset memory attack mitigation"
+ depends on EFI_STUB
+ help
+ Request that the firmware clear the contents of RAM after a reboot
+ using the TCG Platform Reset Attack Mitigation specification. This
+ protects against an attacker forcibly rebooting the system while it
+ still contains secrets in RAM, booting another OS and extracting the
+ secrets.
+
endmenu
config UEFI_CPER
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index c473f4c5ca34..9f6bcf173b0e 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -18,8 +18,8 @@
#define pr_fmt(fmt) "apple-properties: " fmt
#include <linux/bootmem.h>
-#include <linux/dmi.h>
#include <linux/efi.h>
+#include <linux/platform_data/x86/apple.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/ucs2_string.h>
@@ -191,8 +191,7 @@ static int __init map_properties(void)
u64 pa_data;
int ret;
- if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc.") &&
- !dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc."))
+ if (!x86_apple_machine)
return 0;
pa_data = boot_params.hdr.setup_data;
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 1027d7b44358..80d1a885def5 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -145,6 +145,9 @@ static int __init uefi_init(void)
sizeof(efi_config_table_t),
arch_tables);
+ if (!retval)
+ efi.config_table = (unsigned long)efi.systab->tables;
+
early_memunmap(config_tables, table_size);
out:
early_memunmap(efi.systab, sizeof(efi_system_table_t));
@@ -159,6 +162,7 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
switch (md->type) {
case EFI_LOADER_CODE:
case EFI_LOADER_DATA:
+ case EFI_ACPI_RECLAIM_MEMORY:
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
@@ -211,6 +215,10 @@ static __init void reserve_regions(void)
if (!is_usable_memory(md))
memblock_mark_nomap(paddr, size);
+
+ /* keep ACPI reclaim memory intact for kexec etc. */
+ if (md->type == EFI_ACPI_RECLAIM_MEMORY)
+ memblock_reserve(paddr, size);
}
}
}
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 48a8f69da42a..bf3672a81e49 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -606,7 +606,6 @@ void cper_estatus_print(const char *pfx,
const struct acpi_hest_generic_status *estatus)
{
struct acpi_hest_generic_data *gdata;
- unsigned int data_len;
int sec_no = 0;
char newpfx[64];
__u16 severity;
@@ -617,14 +616,10 @@ void cper_estatus_print(const char *pfx,
"It has been corrected by h/w "
"and requires no further action");
printk("%s""event severity: %s\n", pfx, cper_severity_str(severity));
- data_len = estatus->data_length;
- gdata = (struct acpi_hest_generic_data *)(estatus + 1);
snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
- while (data_len >= acpi_hest_get_size(gdata)) {
+ apei_estatus_for_each_section(estatus, gdata) {
cper_estatus_print_section(newpfx, gdata, sec_no);
- data_len -= acpi_hest_get_record_size(gdata);
- gdata = acpi_hest_get_next(gdata);
sec_no++;
}
}
@@ -653,15 +648,12 @@ int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
if (rc)
return rc;
data_len = estatus->data_length;
- gdata = (struct acpi_hest_generic_data *)(estatus + 1);
- while (data_len >= acpi_hest_get_size(gdata)) {
+ apei_estatus_for_each_section(estatus, gdata) {
gedata_len = acpi_hest_get_error_length(gdata);
if (gedata_len > data_len - acpi_hest_get_size(gdata))
return -EINVAL;
-
data_len -= acpi_hest_get_record_size(gdata);
- gdata = acpi_hest_get_next(gdata);
}
if (data_len)
return -EINVAL;
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index b58233e4ed71..50793fda7819 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -27,26 +27,6 @@ struct bmp_header {
u32 size;
} __packed;
-static bool efi_bgrt_addr_valid(u64 addr)
-{
- efi_memory_desc_t *md;
-
- for_each_efi_memory_desc(md) {
- u64 size;
- u64 end;
-
- if (md->type != EFI_BOOT_SERVICES_DATA)
- continue;
-
- size = md->num_pages << EFI_PAGE_SHIFT;
- end = md->phys_addr + size;
- if (addr >= md->phys_addr && addr < end)
- return true;
- }
-
- return false;
-}
-
void __init efi_bgrt_init(struct acpi_table_header *table)
{
void *image;
@@ -85,7 +65,7 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
goto out;
}
- if (!efi_bgrt_addr_valid(bgrt->image_address)) {
+ if (efi_mem_type(bgrt->image_address) != EFI_BOOT_SERVICES_DATA) {
pr_notice("Ignoring BGRT: invalid image address\n");
goto out;
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 045d6d311bde..f70febf680c3 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -55,6 +55,25 @@ struct efi __read_mostly efi = {
};
EXPORT_SYMBOL(efi);
+static unsigned long *efi_tables[] = {
+ &efi.mps,
+ &efi.acpi,
+ &efi.acpi20,
+ &efi.smbios,
+ &efi.smbios3,
+ &efi.sal_systab,
+ &efi.boot_info,
+ &efi.hcdp,
+ &efi.uga,
+ &efi.uv_systab,
+ &efi.fw_vendor,
+ &efi.runtime,
+ &efi.config_table,
+ &efi.esrt,
+ &efi.properties_table,
+ &efi.mem_attr_table,
+};
+
static bool disable_runtime;
static int __init setup_noefi(char *arg)
{
@@ -179,7 +198,7 @@ static umode_t efi_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
-static struct attribute_group efi_subsys_attr_group = {
+static const struct attribute_group efi_subsys_attr_group = {
.attrs = efi_subsys_attrs,
.is_visible = efi_attr_is_visible,
};
@@ -522,6 +541,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
if (seed != NULL) {
add_device_randomness(seed->bits, seed->size);
early_memunmap(seed, sizeof(*seed) + size);
+ pr_notice("seeding entropy pool\n");
} else {
pr_err("Could not map UEFI random seed!\n");
}
@@ -791,19 +811,19 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
}
/*
+ * IA64 has a funky EFI memory map that doesn't work the same way as
+ * other architectures.
+ */
+#ifndef CONFIG_IA64
+/*
* efi_mem_attributes - lookup memmap attributes for physical address
* @phys_addr: the physical address to lookup
*
* Search in the EFI memory map for the region covering
* @phys_addr. Returns the EFI memory attributes if the region
* was found in the memory map, 0 otherwise.
- *
- * Despite being marked __weak, most architectures should *not*
- * override this function. It is __weak solely for the benefit
- * of ia64 which has a funky EFI memory map that doesn't work
- * the same way as other architectures.
*/
-u64 __weak efi_mem_attributes(unsigned long phys_addr)
+u64 efi_mem_attributes(unsigned long phys_addr)
{
efi_memory_desc_t *md;
@@ -819,6 +839,31 @@ u64 __weak efi_mem_attributes(unsigned long phys_addr)
return 0;
}
+/*
+ * efi_mem_type - lookup memmap type for physical address
+ * @phys_addr: the physical address to lookup
+ *
+ * Search in the EFI memory map for the region covering @phys_addr.
+ * Returns the EFI memory type if the region was found in the memory
+ * map, EFI_RESERVED_TYPE (zero) otherwise.
+ */
+int efi_mem_type(unsigned long phys_addr)
+{
+ const efi_memory_desc_t *md;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return -ENOTSUPP;
+
+ for_each_efi_memory_desc(md) {
+ if ((md->phys_addr <= phys_addr) &&
+ (phys_addr < (md->phys_addr +
+ (md->num_pages << EFI_PAGE_SHIFT))))
+ return md->type;
+ }
+ return -EINVAL;
+}
+#endif
+
int efi_status_to_err(efi_status_t status)
{
int err;
@@ -855,6 +900,20 @@ int efi_status_to_err(efi_status_t status)
return err;
}
+bool efi_is_table_address(unsigned long phys_addr)
+{
+ unsigned int i;
+
+ if (phys_addr == EFI_INVALID_TABLE_ADDR)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
+ if (*(efi_tables[i]) == phys_addr)
+ return true;
+
+ return false;
+}
+
#ifdef CONFIG_KEXEC
static int update_efi_random_seed(struct notifier_block *nb,
unsigned long code, void *unused)
@@ -867,7 +926,7 @@ static int update_efi_random_seed(struct notifier_block *nb,
seed = memremap(efi.rng_seed, sizeof(*seed), MEMREMAP_WB);
if (seed != NULL) {
- size = min(seed->size, 32U);
+ size = min(seed->size, EFI_RANDOM_SEED_SIZE);
memunmap(seed);
} else {
pr_err("Could not map UEFI random seed!\n");
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 8554d7aec31c..bd7ed3c1148a 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -230,7 +230,7 @@ static umode_t esrt_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
-static struct attribute_group esrt_attr_group = {
+static const struct attribute_group esrt_attr_group = {
.attrs = esrt_attrs,
.is_visible = esrt_attr_is_visible,
};
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 37e24f525162..dedf9bde44db 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -10,7 +10,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse
-cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
+cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic -mno-single-pic-base
@@ -30,6 +30,7 @@ OBJECT_FILES_NON_STANDARD := y
KCOV_INSTRUMENT := n
lib-y := efi-stub-helper.o gop.o secureboot.o
+lib-$(CONFIG_RESET_ATTACK_MITIGATION) += tpm.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 8181ac179d14..1cb2d1c070c3 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -192,6 +192,9 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
goto fail_free_cmdline;
}
+ /* Ask the firmware to clear memory on unclean shutdown */
+ efi_enable_reset_attack_mitigation(sys_table);
+
secure_boot = efi_get_secureboot(sys_table);
/*
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index b4c2589d7c91..b9bd827caa22 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -9,9 +9,18 @@
* published by the Free Software Foundation.
*
*/
+
+/*
+ * To prevent the compiler from emitting GOT-indirected (and thus absolute)
+ * references to the section markers, override their visibility as 'hidden'
+ */
+#pragma GCC visibility push(hidden)
+#include <asm/sections.h>
+#pragma GCC visibility pop
+
#include <linux/efi.h>
#include <asm/efi.h>
-#include <asm/sections.h>
+#include <asm/memory.h>
#include <asm/sysreg.h>
#include "efistub.h"
@@ -81,9 +90,10 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
/*
* If CONFIG_DEBUG_ALIGN_RODATA is not set, produce a
* displacement in the interval [0, MIN_KIMG_ALIGN) that
- * is a multiple of the minimal segment alignment (SZ_64K)
+ * doesn't violate this kernel's de-facto alignment
+ * constraints.
*/
- u32 mask = (MIN_KIMG_ALIGN - 1) & ~(SZ_64K - 1);
+ u32 mask = (MIN_KIMG_ALIGN - 1) & ~(EFI_KIMG_ALIGN - 1);
u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ?
(phys_seed >> 32) & mask : TEXT_OFFSET;
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index b0184360efc6..50a9cab5a834 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -205,7 +205,7 @@ again:
unsigned long m = (unsigned long)map;
u64 start, end;
- desc = (efi_memory_desc_t *)(m + (i * desc_size));
+ desc = efi_early_memdesc_ptr(m, desc_size, i);
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
@@ -298,7 +298,7 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
unsigned long m = (unsigned long)map;
u64 start, end;
- desc = (efi_memory_desc_t *)(m + (i * desc_size));
+ desc = efi_early_memdesc_ptr(m, desc_size, i);
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index 7e72954d5860..e0e603a89aa9 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -145,8 +145,6 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
return status;
}
-#define RANDOM_SEED_SIZE 32
-
efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
{
efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
@@ -162,25 +160,25 @@ efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
return status;
status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
- sizeof(*seed) + RANDOM_SEED_SIZE,
+ sizeof(*seed) + EFI_RANDOM_SEED_SIZE,
(void **)&seed);
if (status != EFI_SUCCESS)
return status;
- status = rng->get_rng(rng, &rng_algo_raw, RANDOM_SEED_SIZE,
+ status = rng->get_rng(rng, &rng_algo_raw, EFI_RANDOM_SEED_SIZE,
seed->bits);
if (status == EFI_UNSUPPORTED)
/*
* Use whatever algorithm we have available if the raw algorithm
* is not implemented.
*/
- status = rng->get_rng(rng, NULL, RANDOM_SEED_SIZE,
+ status = rng->get_rng(rng, NULL, EFI_RANDOM_SEED_SIZE,
seed->bits);
if (status != EFI_SUCCESS)
goto err_freepool;
- seed->size = RANDOM_SEED_SIZE;
+ seed->size = EFI_RANDOM_SEED_SIZE;
status = efi_call_early(install_configuration_table, &rng_table_guid,
seed);
if (status != EFI_SUCCESS)
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
new file mode 100644
index 000000000000..6224cdbc9669
--- /dev/null
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -0,0 +1,58 @@
+/*
+ * TPM handling.
+ *
+ * Copyright (C) 2016 CoreOS, Inc
+ * Copyright (C) 2017 Google, Inc.
+ * Matthew Garrett <mjg59@google.com>
+ *
+ * This file is part of the Linux kernel, and is made available under the
+ * terms of the GNU General Public License version 2.
+ */
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+static const efi_char16_t efi_MemoryOverWriteRequest_name[] = {
+ 'M', 'e', 'm', 'o', 'r', 'y', 'O', 'v', 'e', 'r', 'w', 'r', 'i', 't',
+ 'e', 'R', 'e', 'q', 'u', 'e', 's', 't', 'C', 'o', 'n', 't', 'r', 'o',
+ 'l', 0
+};
+
+#define MEMORY_ONLY_RESET_CONTROL_GUID \
+ EFI_GUID(0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29)
+
+#define get_efi_var(name, vendor, ...) \
+ efi_call_runtime(get_variable, \
+ (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
+ __VA_ARGS__)
+
+#define set_efi_var(name, vendor, ...) \
+ efi_call_runtime(set_variable, \
+ (efi_char16_t *)(name), (efi_guid_t *)(vendor), \
+ __VA_ARGS__)
+
+/*
+ * Enable reboot attack mitigation. This requests that the firmware clear the
+ * RAM on next reboot before proceeding with boot, ensuring that any secrets
+ * are cleared. If userland has ensured that all secrets have been removed
+ * from RAM before reboot it can simply reset this variable.
+ */
+void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
+{
+ u8 val = 1;
+ efi_guid_t var_guid = MEMORY_ONLY_RESET_CONTROL_GUID;
+ efi_status_t status;
+ unsigned long datasize = 0;
+
+ status = get_efi_var(efi_MemoryOverWriteRequest_name, &var_guid,
+ NULL, &datasize, NULL);
+
+ if (status == EFI_NOT_FOUND)
+ return;
+
+ set_efi_var(efi_MemoryOverWriteRequest_name, &var_guid,
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS, sizeof(val), &val);
+}
diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c
index 62ead9b9d871..22874544d301 100644
--- a/drivers/firmware/efi/reboot.c
+++ b/drivers/firmware/efi/reboot.c
@@ -5,6 +5,8 @@
#include <linux/efi.h>
#include <linux/reboot.h>
+static void (*orig_pm_power_off)(void);
+
int efi_reboot_quirk_mode = -1;
void efi_reboot(enum reboot_mode reboot_mode, const char *__unused)
@@ -51,6 +53,12 @@ bool __weak efi_poweroff_required(void)
static void efi_power_off(void)
{
efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
+ /*
+ * The above call should not return, if it does fall back to
+ * the original power off method (typically ACPI poweroff).
+ */
+ if (orig_pm_power_off)
+ orig_pm_power_off();
}
static int __init efi_shutdown_init(void)
@@ -58,8 +66,10 @@ static int __init efi_shutdown_init(void)
if (!efi_enabled(EFI_RUNTIME_SERVICES))
return -ENODEV;
- if (efi_poweroff_required())
+ if (efi_poweroff_required()) {
+ orig_pm_power_off = pm_power_off;
pm_power_off = efi_power_off;
+ }
return 0;
}
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 78945729388e..35e553b3b190 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -202,7 +202,7 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name);
if (!sec->raw_name) {
err = -ENOMEM;
- goto err_iounmap;
+ goto err_memunmap;
}
sysfs_bin_attr_init(&sec->bin_attr);
@@ -233,8 +233,8 @@ err_sysfs_remove:
sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
err_free_raw_name:
kfree(sec->raw_name);
-err_iounmap:
- iounmap(sec->baseaddr);
+err_memunmap:
+ memunmap(sec->baseaddr);
return err;
}
@@ -245,7 +245,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
kobject_put(sec->kobj);
sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
kfree(sec->raw_name);
- iounmap(sec->baseaddr);
+ memunmap(sec->baseaddr);
}
return 0;
@@ -262,7 +262,7 @@ static int vpd_sections_init(phys_addr_t physaddr)
return -ENOMEM;
memcpy_fromio(&header, temp, sizeof(struct vpd_cbmem));
- iounmap(temp);
+ memunmap(temp);
if (header.magic != VPD_CBMEM_MAGIC)
return -ENODEV;
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index 75273a251603..e83d6aec0c13 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -95,7 +95,7 @@ efi_setup_pcdp_console(char *cmdline)
if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
return -ENODEV;
- pcdp = early_ioremap(efi.hcdp, 4096);
+ pcdp = early_memremap(efi.hcdp, 4096);
printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
if (strstr(cmdline, "console=hcdp")) {
@@ -131,6 +131,6 @@ efi_setup_pcdp_console(char *cmdline)
}
out:
- early_iounmap(pcdp, 4096);
+ early_memunmap(pcdp, 4096);
return rc;
}
diff --git a/drivers/fmc/Makefile b/drivers/fmc/Makefile
index b9452919739f..e809322e1bac 100644
--- a/drivers/fmc/Makefile
+++ b/drivers/fmc/Makefile
@@ -6,6 +6,7 @@ fmc-y += fmc-match.o
fmc-y += fmc-sdb.o
fmc-y += fru-parse.o
fmc-y += fmc-dump.o
+fmc-y += fmc-debug.o
obj-$(CONFIG_FMC_FAKEDEV) += fmc-fakedev.o
obj-$(CONFIG_FMC_TRIVIAL) += fmc-trivial.o
diff --git a/drivers/fmc/fmc-chardev.c b/drivers/fmc/fmc-chardev.c
index ace6ef24d15e..5ecf4090a610 100644
--- a/drivers/fmc/fmc-chardev.c
+++ b/drivers/fmc/fmc-chardev.c
@@ -129,8 +129,7 @@ static int fc_probe(struct fmc_device *fmc)
struct fc_instance *fc;
- if (fmc->op->validate)
- index = fmc->op->validate(fmc, &fc_drv);
+ index = fmc_validate(fmc, &fc_drv);
if (index < 0)
return -EINVAL; /* not our device: invalid */
diff --git a/drivers/fmc/fmc-core.c b/drivers/fmc/fmc-core.c
index 353fc546fb08..cec3b8db0d69 100644
--- a/drivers/fmc/fmc-core.c
+++ b/drivers/fmc/fmc-core.c
@@ -13,6 +13,9 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/fmc.h>
+#include <linux/fmc-sdb.h>
+
+#include "fmc-private.h"
static int fmc_check_version(unsigned long version, const char *name)
{
@@ -118,6 +121,61 @@ static struct bin_attribute fmc_eeprom_attr = {
.write = fmc_write_eeprom,
};
+int fmc_irq_request(struct fmc_device *fmc, irq_handler_t h,
+ char *name, int flags)
+{
+ if (fmc->op->irq_request)
+ return fmc->op->irq_request(fmc, h, name, flags);
+ return -EPERM;
+}
+EXPORT_SYMBOL(fmc_irq_request);
+
+void fmc_irq_free(struct fmc_device *fmc)
+{
+ if (fmc->op->irq_free)
+ fmc->op->irq_free(fmc);
+}
+EXPORT_SYMBOL(fmc_irq_free);
+
+void fmc_irq_ack(struct fmc_device *fmc)
+{
+ if (likely(fmc->op->irq_ack))
+ fmc->op->irq_ack(fmc);
+}
+EXPORT_SYMBOL(fmc_irq_ack);
+
+int fmc_validate(struct fmc_device *fmc, struct fmc_driver *drv)
+{
+ if (fmc->op->validate)
+ return fmc->op->validate(fmc, drv);
+ return -EPERM;
+}
+EXPORT_SYMBOL(fmc_validate);
+
+int fmc_gpio_config(struct fmc_device *fmc, struct fmc_gpio *gpio, int ngpio)
+{
+ if (fmc->op->gpio_config)
+ return fmc->op->gpio_config(fmc, gpio, ngpio);
+ return -EPERM;
+}
+EXPORT_SYMBOL(fmc_gpio_config);
+
+int fmc_read_ee(struct fmc_device *fmc, int pos, void *d, int l)
+{
+ if (fmc->op->read_ee)
+ return fmc->op->read_ee(fmc, pos, d, l);
+ return -EPERM;
+}
+EXPORT_SYMBOL(fmc_read_ee);
+
+int fmc_write_ee(struct fmc_device *fmc, int pos, const void *d, int l)
+{
+ if (fmc->op->write_ee)
+ return fmc->op->write_ee(fmc, pos, d, l);
+ return -EPERM;
+}
+EXPORT_SYMBOL(fmc_write_ee);
+
/*
* Functions for client modules follow
*/
@@ -141,7 +199,8 @@ EXPORT_SYMBOL(fmc_driver_unregister);
* When a device set is registered, all eeproms must be read
* and all FRUs must be parsed
*/
-int fmc_device_register_n(struct fmc_device **devs, int n)
+int fmc_device_register_n_gw(struct fmc_device **devs, int n,
+ struct fmc_gateware *gw)
{
struct fmc_device *fmc, **devarray;
uint32_t device_id;
@@ -221,6 +280,21 @@ int fmc_device_register_n(struct fmc_device **devs, int n)
else
dev_set_name(&fmc->dev, "%s-%04x", fmc->mezzanine_name,
device_id);
+
+ if (gw) {
+ /*
+ * The carrier already know the bitstream to load
+ * for this set of FMC mezzanines.
+ */
+ ret = fmc->op->reprogram_raw(fmc, NULL,
+ gw->bitstream, gw->len);
+ if (ret) {
+ dev_warn(fmc->hwdev,
+ "Invalid gateware for FMC mezzanine\n");
+ goto out;
+ }
+ }
+
ret = device_add(&fmc->dev);
if (ret < 0) {
dev_err(fmc->hwdev, "Slot %i: Failed in registering "
@@ -234,18 +308,16 @@ int fmc_device_register_n(struct fmc_device **devs, int n)
}
/* This device went well, give information to the user */
fmc_dump_eeprom(fmc);
- fmc_dump_sdb(fmc);
+ fmc_debug_init(fmc);
}
return 0;
out1:
device_del(&fmc->dev);
out:
- fmc_free_id_info(fmc);
- put_device(&fmc->dev);
-
kfree(devarray);
for (i--; i >= 0; i--) {
+ fmc_debug_exit(devs[i]);
sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr);
device_del(&devs[i]->dev);
fmc_free_id_info(devs[i]);
@@ -254,8 +326,20 @@ out:
return ret;
}
+EXPORT_SYMBOL(fmc_device_register_n_gw);
+
+int fmc_device_register_n(struct fmc_device **devs, int n)
+{
+ return fmc_device_register_n_gw(devs, n, NULL);
+}
EXPORT_SYMBOL(fmc_device_register_n);
+int fmc_device_register_gw(struct fmc_device *fmc, struct fmc_gateware *gw)
+{
+ return fmc_device_register_n_gw(&fmc, 1, gw);
+}
+EXPORT_SYMBOL(fmc_device_register_gw);
+
int fmc_device_register(struct fmc_device *fmc)
{
return fmc_device_register_n(&fmc, 1);
@@ -273,6 +357,7 @@ void fmc_device_unregister_n(struct fmc_device **devs, int n)
kfree(devs[0]->devarray);
for (i = 0; i < n; i++) {
+ fmc_debug_exit(devs[i]);
sysfs_remove_bin_file(&devs[i]->dev.kobj, &fmc_eeprom_attr);
device_del(&devs[i]->dev);
fmc_free_id_info(devs[i]);
diff --git a/drivers/fmc/fmc-debug.c b/drivers/fmc/fmc-debug.c
new file mode 100644
index 000000000000..32930722770c
--- /dev/null
+++ b/drivers/fmc/fmc-debug.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2015 CERN (www.cern.ch)
+ * Author: Federico Vaga <federico.vaga@cern.ch>
+ *
+ * Released according to the GNU GPL, version 2 or any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/byteorder.h>
+
+#include <linux/fmc.h>
+#include <linux/sdb.h>
+#include <linux/fmc-sdb.h>
+
+#define FMC_DBG_SDB_DUMP "dump_sdb"
+
+static char *__strip_trailing_space(char *buf, char *str, int len)
+{
+ int i = len - 1;
+
+ memcpy(buf, str, len);
+ buf[len] = '\0';
+ while (i >= 0 && buf[i] == ' ')
+ buf[i--] = '\0';
+ return buf;
+}
+
+#define __sdb_string(buf, field) ({ \
+ BUILD_BUG_ON(sizeof(buf) < sizeof(field)); \
+ __strip_trailing_space(buf, (void *)(field), sizeof(field)); \
+ })
+
+/**
+ * We do not check seq_printf() errors because we want to see things in any case
+ */
+static void fmc_sdb_dump_recursive(struct fmc_device *fmc, struct seq_file *s,
+ const struct sdb_array *arr)
+{
+ unsigned long base = arr->baseaddr;
+ int i, j, n = arr->len, level = arr->level;
+ char tmp[64];
+
+ for (i = 0; i < n; i++) {
+ union sdb_record *r;
+ struct sdb_product *p;
+ struct sdb_component *c;
+
+ r = &arr->record[i];
+ c = &r->dev.sdb_component;
+ p = &c->product;
+
+ for (j = 0; j < level; j++)
+ seq_printf(s, " ");
+ switch (r->empty.record_type) {
+ case sdb_type_interconnect:
+ seq_printf(s, "%08llx:%08x %.19s\n",
+ __be64_to_cpu(p->vendor_id),
+ __be32_to_cpu(p->device_id),
+ p->name);
+ break;
+ case sdb_type_device:
+ seq_printf(s, "%08llx:%08x %.19s (%08llx-%08llx)\n",
+ __be64_to_cpu(p->vendor_id),
+ __be32_to_cpu(p->device_id),
+ p->name,
+ __be64_to_cpu(c->addr_first) + base,
+ __be64_to_cpu(c->addr_last) + base);
+ break;
+ case sdb_type_bridge:
+ seq_printf(s, "%08llx:%08x %.19s (bridge: %08llx)\n",
+ __be64_to_cpu(p->vendor_id),
+ __be32_to_cpu(p->device_id),
+ p->name,
+ __be64_to_cpu(c->addr_first) + base);
+ if (IS_ERR(arr->subtree[i])) {
+ seq_printf(s, "SDB: (bridge error %li)\n",
+ PTR_ERR(arr->subtree[i]));
+ break;
+ }
+ fmc_sdb_dump_recursive(fmc, s, arr->subtree[i]);
+ break;
+ case sdb_type_integration:
+ seq_printf(s, "integration\n");
+ break;
+ case sdb_type_repo_url:
+ seq_printf(s, "Synthesis repository: %s\n",
+ __sdb_string(tmp, r->repo_url.repo_url));
+ break;
+ case sdb_type_synthesis:
+ seq_printf(s, "Bitstream '%s' ",
+ __sdb_string(tmp, r->synthesis.syn_name));
+ seq_printf(s, "synthesized %08x by %s ",
+ __be32_to_cpu(r->synthesis.date),
+ __sdb_string(tmp, r->synthesis.user_name));
+ seq_printf(s, "(%s version %x), ",
+ __sdb_string(tmp, r->synthesis.tool_name),
+ __be32_to_cpu(r->synthesis.tool_version));
+ seq_printf(s, "commit %pm\n",
+ r->synthesis.commit_id);
+ break;
+ case sdb_type_empty:
+ seq_printf(s, "empty\n");
+ break;
+ default:
+ seq_printf(s, "UNKNOWN TYPE 0x%02x\n",
+ r->empty.record_type);
+ break;
+ }
+ }
+}
+
+static int fmc_sdb_dump(struct seq_file *s, void *offset)
+{
+ struct fmc_device *fmc = s->private;
+
+ if (!fmc->sdb) {
+ seq_printf(s, "no SDB information\n");
+ return 0;
+ }
+
+ seq_printf(s, "FMC: %s (%s), slot %i, device %s\n", dev_name(fmc->hwdev),
+ fmc->carrier_name, fmc->slot_id, dev_name(&fmc->dev));
+ /* Dump SDB information */
+ fmc_sdb_dump_recursive(fmc, s, fmc->sdb);
+
+ return 0;
+}
+
+
+static int fmc_sdb_dump_open(struct inode *inode, struct file *file)
+{
+ struct fmc_device *fmc = inode->i_private;
+
+ return single_open(file, fmc_sdb_dump, fmc);
+}
+
+
+const struct file_operations fmc_dbgfs_sdb_dump = {
+ .owner = THIS_MODULE,
+ .open = fmc_sdb_dump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int fmc_debug_init(struct fmc_device *fmc)
+{
+ fmc->dbg_dir = debugfs_create_dir(dev_name(&fmc->dev), NULL);
+ if (IS_ERR_OR_NULL(fmc->dbg_dir)) {
+ pr_err("FMC: Cannot create debugfs\n");
+ return PTR_ERR(fmc->dbg_dir);
+ }
+
+ fmc->dbg_sdb_dump = debugfs_create_file(FMC_DBG_SDB_DUMP, 0444,
+ fmc->dbg_dir, fmc,
+ &fmc_dbgfs_sdb_dump);
+ if (IS_ERR_OR_NULL(fmc->dbg_sdb_dump))
+ pr_err("FMC: Cannot create debugfs file %s\n",
+ FMC_DBG_SDB_DUMP);
+
+ return 0;
+}
+
+void fmc_debug_exit(struct fmc_device *fmc)
+{
+ if (fmc->dbg_dir)
+ debugfs_remove_recursive(fmc->dbg_dir);
+}
diff --git a/drivers/fmc/fmc-dump.c b/drivers/fmc/fmc-dump.c
index c91afd6388f6..cd1df475b254 100644
--- a/drivers/fmc/fmc-dump.c
+++ b/drivers/fmc/fmc-dump.c
@@ -15,8 +15,6 @@
static int fmc_must_dump_eeprom;
module_param_named(dump_eeprom, fmc_must_dump_eeprom, int, 0644);
-static int fmc_must_dump_sdb;
-module_param_named(dump_sdb, fmc_must_dump_sdb, int, 0644);
#define LINELEN 16
@@ -59,42 +57,3 @@ void fmc_dump_eeprom(const struct fmc_device *fmc)
for (i = 0; i < fmc->eeprom_len; i += LINELEN, line += LINELEN)
prev = dump_line(i, line, prev);
}
-
-void fmc_dump_sdb(const struct fmc_device *fmc)
-{
- const uint8_t *line, *prev;
- int i, len;
-
- if (!fmc->sdb)
- return;
- if (!fmc_must_dump_sdb)
- return;
-
- /* If the argument is not-zero, do simple dump (== show) */
- if (fmc_must_dump_sdb > 0)
- fmc_show_sdb_tree(fmc);
-
- if (fmc_must_dump_sdb == 1)
- return;
-
- /* If bigger than 1, dump it seriously, to help debugging */
-
- /*
- * Here we should really use libsdbfs (which is designed to
- * work in kernel space as well) , but it doesn't support
- * directories yet, and it requires better intergration (it
- * should be used instead of fmc-specific code).
- *
- * So, lazily, just dump the top-level array
- */
- pr_info("FMC: %s (%s), slot %i, device %s\n", dev_name(fmc->hwdev),
- fmc->carrier_name, fmc->slot_id, dev_name(&fmc->dev));
- pr_info("FMC: poor dump of sdb first level:\n");
-
- len = fmc->sdb->len * sizeof(union sdb_record);
- line = (void *)fmc->sdb->record;
- prev = NULL;
- for (i = 0; i < len; i += LINELEN, line += LINELEN)
- prev = dump_line(i, line, prev);
- return;
-}
diff --git a/drivers/fmc/fmc-match.c b/drivers/fmc/fmc-match.c
index 104a5efc2207..a0956d1f7550 100644
--- a/drivers/fmc/fmc-match.c
+++ b/drivers/fmc/fmc-match.c
@@ -63,7 +63,7 @@ int fmc_fill_id_info(struct fmc_device *fmc)
if (!fmc->eeprom)
return -ENOMEM;
allocated = 1;
- ret = fmc->op->read_ee(fmc, 0, fmc->eeprom, fmc->eeprom_len);
+ ret = fmc_read_ee(fmc, 0, fmc->eeprom, fmc->eeprom_len);
if (ret < 0)
goto out;
}
diff --git a/drivers/fmc/fmc-private.h b/drivers/fmc/fmc-private.h
new file mode 100644
index 000000000000..1e5136643bdc
--- /dev/null
+++ b/drivers/fmc/fmc-private.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2015 CERN (www.cern.ch)
+ * Author: Federico Vaga <federico.vaga@cern.ch>
+ *
+ * Released according to the GNU GPL, version 2 or any later version.
+ */
+
+extern int fmc_debug_init(struct fmc_device *fmc);
+extern void fmc_debug_exit(struct fmc_device *fmc);
diff --git a/drivers/fmc/fmc-sdb.c b/drivers/fmc/fmc-sdb.c
index 4603fdb74465..ffdc1762b580 100644
--- a/drivers/fmc/fmc-sdb.c
+++ b/drivers/fmc/fmc-sdb.c
@@ -127,12 +127,12 @@ int fmc_free_sdb_tree(struct fmc_device *fmc)
EXPORT_SYMBOL(fmc_free_sdb_tree);
/* This helper calls reprogram and inizialized sdb as well */
-int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
- int sdb_entry)
+int fmc_reprogram_raw(struct fmc_device *fmc, struct fmc_driver *d,
+ void *gw, unsigned long len, int sdb_entry)
{
int ret;
- ret = fmc->op->reprogram(fmc, d, gw);
+ ret = fmc->op->reprogram_raw(fmc, d, gw, len);
if (ret < 0)
return ret;
if (sdb_entry < 0)
@@ -145,108 +145,39 @@ int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
sdb_entry);
return -ENODEV;
}
- fmc_dump_sdb(fmc);
- return 0;
-}
-EXPORT_SYMBOL(fmc_reprogram);
-
-static char *__strip_trailing_space(char *buf, char *str, int len)
-{
- int i = len - 1;
- memcpy(buf, str, len);
- while(i >= 0 && buf[i] == ' ')
- buf[i--] = '\0';
- return buf;
+ return 0;
}
+EXPORT_SYMBOL(fmc_reprogram_raw);
-#define __sdb_string(buf, field) ({ \
- BUILD_BUG_ON(sizeof(buf) < sizeof(field)); \
- __strip_trailing_space(buf, (void *)(field), sizeof(field)); \
- })
-
-static void __fmc_show_sdb_tree(const struct fmc_device *fmc,
- const struct sdb_array *arr)
+/* This helper calls reprogram and inizialized sdb as well */
+int fmc_reprogram(struct fmc_device *fmc, struct fmc_driver *d, char *gw,
+ int sdb_entry)
{
- unsigned long base = arr->baseaddr;
- int i, j, n = arr->len, level = arr->level;
- char buf[64];
-
- for (i = 0; i < n; i++) {
- union sdb_record *r;
- struct sdb_product *p;
- struct sdb_component *c;
- r = &arr->record[i];
- c = &r->dev.sdb_component;
- p = &c->product;
+ int ret;
- dev_info(&fmc->dev, "SDB: ");
+ ret = fmc->op->reprogram(fmc, d, gw);
+ if (ret < 0)
+ return ret;
+ if (sdb_entry < 0)
+ return ret;
- for (j = 0; j < level; j++)
- printk(KERN_CONT " ");
- switch (r->empty.record_type) {
- case sdb_type_interconnect:
- printk(KERN_CONT "%08llx:%08x %.19s\n",
- __be64_to_cpu(p->vendor_id),
- __be32_to_cpu(p->device_id),
- p->name);
- break;
- case sdb_type_device:
- printk(KERN_CONT "%08llx:%08x %.19s (%08llx-%08llx)\n",
- __be64_to_cpu(p->vendor_id),
- __be32_to_cpu(p->device_id),
- p->name,
- __be64_to_cpu(c->addr_first) + base,
- __be64_to_cpu(c->addr_last) + base);
- break;
- case sdb_type_bridge:
- printk(KERN_CONT "%08llx:%08x %.19s (bridge: %08llx)\n",
- __be64_to_cpu(p->vendor_id),
- __be32_to_cpu(p->device_id),
- p->name,
- __be64_to_cpu(c->addr_first) + base);
- if (IS_ERR(arr->subtree[i])) {
- dev_info(&fmc->dev, "SDB: (bridge error %li)\n",
- PTR_ERR(arr->subtree[i]));
- break;
- }
- __fmc_show_sdb_tree(fmc, arr->subtree[i]);
- break;
- case sdb_type_integration:
- printk(KERN_CONT "integration\n");
- break;
- case sdb_type_repo_url:
- printk(KERN_CONT "Synthesis repository: %s\n",
- __sdb_string(buf, r->repo_url.repo_url));
- break;
- case sdb_type_synthesis:
- printk(KERN_CONT "Bitstream '%s' ",
- __sdb_string(buf, r->synthesis.syn_name));
- printk(KERN_CONT "synthesized %08x by %s ",
- __be32_to_cpu(r->synthesis.date),
- __sdb_string(buf, r->synthesis.user_name));
- printk(KERN_CONT "(%s version %x), ",
- __sdb_string(buf, r->synthesis.tool_name),
- __be32_to_cpu(r->synthesis.tool_version));
- printk(KERN_CONT "commit %pm\n",
- r->synthesis.commit_id);
- break;
- case sdb_type_empty:
- printk(KERN_CONT "empty\n");
- break;
- default:
- printk(KERN_CONT "UNKNOWN TYPE 0x%02x\n",
- r->empty.record_type);
- break;
- }
+ /* We are required to find SDB at a given offset */
+ ret = fmc_scan_sdb_tree(fmc, sdb_entry);
+ if (ret < 0) {
+ dev_err(&fmc->dev, "Can't find SDB at address 0x%x\n",
+ sdb_entry);
+ return -ENODEV;
}
+
+ return 0;
}
+EXPORT_SYMBOL(fmc_reprogram);
void fmc_show_sdb_tree(const struct fmc_device *fmc)
{
- if (!fmc->sdb)
- return;
- __fmc_show_sdb_tree(fmc, fmc->sdb);
+ pr_err("%s: not supported anymore, use debugfs to dump SDB\n",
+ __func__);
}
EXPORT_SYMBOL(fmc_show_sdb_tree);
diff --git a/drivers/fmc/fmc-trivial.c b/drivers/fmc/fmc-trivial.c
index 6c590f54c79d..8defdee3e3a3 100644
--- a/drivers/fmc/fmc-trivial.c
+++ b/drivers/fmc/fmc-trivial.c
@@ -24,7 +24,7 @@ static irqreturn_t t_handler(int irq, void *dev_id)
{
struct fmc_device *fmc = dev_id;
- fmc->op->irq_ack(fmc);
+ fmc_irq_ack(fmc);
dev_info(&fmc->dev, "received irq %i\n", irq);
return IRQ_HANDLED;
}
@@ -46,25 +46,21 @@ static int t_probe(struct fmc_device *fmc)
int ret;
int index = 0;
- if (fmc->op->validate)
- index = fmc->op->validate(fmc, &t_drv);
+ index = fmc_validate(fmc, &t_drv);
if (index < 0)
return -EINVAL; /* not our device: invalid */
- ret = fmc->op->irq_request(fmc, t_handler, "fmc-trivial", IRQF_SHARED);
+ ret = fmc_irq_request(fmc, t_handler, "fmc-trivial", IRQF_SHARED);
if (ret < 0)
return ret;
/* ignore error code of call below, we really don't care */
- fmc->op->gpio_config(fmc, t_gpio, ARRAY_SIZE(t_gpio));
+ fmc_gpio_config(fmc, t_gpio, ARRAY_SIZE(t_gpio));
- /* Reprogram, if asked to. ESRCH == no filename specified */
- ret = -ESRCH;
- if (fmc->op->reprogram)
- ret = fmc->op->reprogram(fmc, &t_drv, "");
- if (ret == -ESRCH)
+ ret = fmc_reprogram(fmc, &t_drv, "", 0);
+ if (ret == -EPERM) /* programming not supported */
ret = 0;
if (ret < 0)
- fmc->op->irq_free(fmc);
+ fmc_irq_free(fmc);
/* FIXME: reprogram LM32 too */
return ret;
@@ -72,7 +68,7 @@ static int t_probe(struct fmc_device *fmc)
static int t_remove(struct fmc_device *fmc)
{
- fmc->op->irq_free(fmc);
+ fmc_irq_free(fmc);
return 0;
}
diff --git a/drivers/fmc/fmc-write-eeprom.c b/drivers/fmc/fmc-write-eeprom.c
index 9bb2cbd5c9f2..3eb81bb1f1fc 100644
--- a/drivers/fmc/fmc-write-eeprom.c
+++ b/drivers/fmc/fmc-write-eeprom.c
@@ -50,7 +50,7 @@ static int fwe_run_tlv(struct fmc_device *fmc, const struct firmware *fw,
if (write) {
dev_info(&fmc->dev, "write %i bytes at 0x%04x\n",
thislen, thisaddr);
- err = fmc->op->write_ee(fmc, thisaddr, p + 5, thislen);
+ err = fmc_write_ee(fmc, thisaddr, p + 5, thislen);
}
if (err < 0) {
dev_err(&fmc->dev, "write failure @0x%04x\n",
@@ -70,7 +70,7 @@ static int fwe_run_bin(struct fmc_device *fmc, const struct firmware *fw)
int ret;
dev_info(&fmc->dev, "programming %zi bytes\n", fw->size);
- ret = fmc->op->write_ee(fmc, 0, (void *)fw->data, fw->size);
+ ret = fmc_write_ee(fmc, 0, (void *)fw->data, fw->size);
if (ret < 0) {
dev_info(&fmc->dev, "write_eeprom: error %i\n", ret);
return ret;
@@ -115,8 +115,8 @@ static int fwe_probe(struct fmc_device *fmc)
KBUILD_MODNAME);
return -ENODEV;
}
- if (fmc->op->validate)
- index = fmc->op->validate(fmc, &fwe_drv);
+
+ index = fmc_validate(fmc, &fwe_drv);
if (index < 0) {
pr_err("%s: refusing device \"%s\"\n", KBUILD_MODNAME,
dev_name(dev));
diff --git a/drivers/fmc/fru-parse.c b/drivers/fmc/fru-parse.c
index cb46263c5da2..eb21480d399f 100644
--- a/drivers/fmc/fru-parse.c
+++ b/drivers/fmc/fru-parse.c
@@ -31,12 +31,11 @@ static char *__fru_alloc_get_tl(struct fru_common_header *header, int nr)
{
struct fru_type_length *tl;
char *res;
- int len;
tl = __fru_get_board_tl(header, nr);
if (!tl)
return NULL;
- len = fru_strlen(tl);
+
res = fru_alloc(fru_strlen(tl) + 1);
if (!res)
return NULL;
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 161ba9dccede..ad5448f718b3 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -2,9 +2,7 @@
# FPGA framework configuration
#
-menu "FPGA Configuration Support"
-
-config FPGA
+menuconfig FPGA
tristate "FPGA Configuration Framework"
help
Say Y here if you want support for configuring FPGAs from the
@@ -26,6 +24,20 @@ config FPGA_MGR_ICE40_SPI
help
FPGA manager driver support for Lattice iCE40 FPGAs over SPI.
+config FPGA_MGR_ALTERA_CVP
+ tristate "Altera Arria-V/Cyclone-V/Stratix-V CvP FPGA Manager"
+ depends on PCI
+ help
+ FPGA manager driver support for Arria-V, Cyclone-V, Stratix-V
+ and Arria 10 Altera FPGAs using the CvP interface over PCIe.
+
+config FPGA_MGR_ALTERA_PS_SPI
+ tristate "Altera FPGA Passive Serial over SPI"
+ depends on SPI
+ help
+ FPGA manager driver support for Altera Arria/Cyclone/Stratix
+ using the passive serial interface over SPI.
+
config FPGA_MGR_SOCFPGA
tristate "Altera SOCFPGA FPGA Manager"
depends on ARCH_SOCFPGA || COMPILE_TEST
@@ -106,5 +118,3 @@ config XILINX_PR_DECOUPLER
being reprogrammed during partial reconfig.
endif # FPGA
-
-endmenu
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 2a4f0218145c..e09895f0525b 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -6,6 +6,8 @@
obj-$(CONFIG_FPGA) += fpga-mgr.o
# FPGA Manager Drivers
+obj-$(CONFIG_FPGA_MGR_ALTERA_CVP) += altera-cvp.o
+obj-$(CONFIG_FPGA_MGR_ALTERA_PS_SPI) += altera-ps-spi.o
obj-$(CONFIG_FPGA_MGR_ICE40_SPI) += ice40-spi.o
obj-$(CONFIG_FPGA_MGR_SOCFPGA) += socfpga.o
obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10) += socfpga-a10.o
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
new file mode 100644
index 000000000000..08629ee69d11
--- /dev/null
+++ b/drivers/fpga/altera-cvp.c
@@ -0,0 +1,500 @@
+/*
+ * FPGA Manager Driver for Altera Arria/Cyclone/Stratix CvP
+ *
+ * Copyright (C) 2017 DENX Software Engineering
+ *
+ * Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Manage Altera FPGA firmware using PCIe CvP.
+ * Firmware must be in binary "rbf" format.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+
+#define CVP_BAR 0 /* BAR used for data transfer in memory mode */
+#define CVP_DUMMY_WR 244 /* dummy writes to clear CvP state machine */
+#define TIMEOUT_US 2000 /* CVP STATUS timeout for USERMODE polling */
+
+/* Vendor Specific Extended Capability Registers */
+#define VSE_PCIE_EXT_CAP_ID 0x200
+#define VSE_PCIE_EXT_CAP_ID_VAL 0x000b /* 16bit */
+
+#define VSE_CVP_STATUS 0x21c /* 32bit */
+#define VSE_CVP_STATUS_CFG_RDY BIT(18) /* CVP_CONFIG_READY */
+#define VSE_CVP_STATUS_CFG_ERR BIT(19) /* CVP_CONFIG_ERROR */
+#define VSE_CVP_STATUS_CVP_EN BIT(20) /* ctrl block is enabling CVP */
+#define VSE_CVP_STATUS_USERMODE BIT(21) /* USERMODE */
+#define VSE_CVP_STATUS_CFG_DONE BIT(23) /* CVP_CONFIG_DONE */
+#define VSE_CVP_STATUS_PLD_CLK_IN_USE BIT(24) /* PLD_CLK_IN_USE */
+
+#define VSE_CVP_MODE_CTRL 0x220 /* 32bit */
+#define VSE_CVP_MODE_CTRL_CVP_MODE BIT(0) /* CVP (1) or normal mode (0) */
+#define VSE_CVP_MODE_CTRL_HIP_CLK_SEL BIT(1) /* PMA (1) or fabric clock (0) */
+#define VSE_CVP_MODE_CTRL_NUMCLKS_OFF 8 /* NUMCLKS bits offset */
+#define VSE_CVP_MODE_CTRL_NUMCLKS_MASK GENMASK(15, 8)
+
+#define VSE_CVP_DATA 0x228 /* 32bit */
+#define VSE_CVP_PROG_CTRL 0x22c /* 32bit */
+#define VSE_CVP_PROG_CTRL_CONFIG BIT(0)
+#define VSE_CVP_PROG_CTRL_START_XFER BIT(1)
+
+#define VSE_UNCOR_ERR_STATUS 0x234 /* 32bit */
+#define VSE_UNCOR_ERR_CVP_CFG_ERR BIT(5) /* CVP_CONFIG_ERROR_LATCHED */
+
+#define DRV_NAME "altera-cvp"
+#define ALTERA_CVP_MGR_NAME "Altera CvP FPGA Manager"
+
+/* Optional CvP config error status check for debugging */
+static bool altera_cvp_chkcfg;
+
+struct altera_cvp_conf {
+ struct fpga_manager *mgr;
+ struct pci_dev *pci_dev;
+ void __iomem *map;
+ void (*write_data)(struct altera_cvp_conf *, u32);
+ char mgr_name[64];
+ u8 numclks;
+};
+
+static enum fpga_mgr_states altera_cvp_state(struct fpga_manager *mgr)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ u32 status;
+
+ pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &status);
+
+ if (status & VSE_CVP_STATUS_CFG_DONE)
+ return FPGA_MGR_STATE_OPERATING;
+
+ if (status & VSE_CVP_STATUS_CVP_EN)
+ return FPGA_MGR_STATE_POWER_UP;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static void altera_cvp_write_data_iomem(struct altera_cvp_conf *conf, u32 val)
+{
+ writel(val, conf->map);
+}
+
+static void altera_cvp_write_data_config(struct altera_cvp_conf *conf, u32 val)
+{
+ pci_write_config_dword(conf->pci_dev, VSE_CVP_DATA, val);
+}
+
+/* switches between CvP clock and internal clock */
+static void altera_cvp_dummy_write(struct altera_cvp_conf *conf)
+{
+ unsigned int i;
+ u32 val;
+
+ /* set 1 CVP clock cycle for every CVP Data Register Write */
+ pci_read_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, &val);
+ val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
+ val |= 1 << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
+ pci_write_config_dword(conf->pci_dev, VSE_CVP_MODE_CTRL, val);
+
+ for (i = 0; i < CVP_DUMMY_WR; i++)
+ conf->write_data(conf, 0); /* dummy data, could be any value */
+}
+
+static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask,
+ u32 status_val, int timeout_us)
+{
+ unsigned int retries;
+ u32 val;
+
+ retries = timeout_us / 10;
+ if (timeout_us % 10)
+ retries++;
+
+ do {
+ pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val);
+ if ((val & status_mask) == status_val)
+ return 0;
+
+ /* use small usleep value to re-check and break early */
+ usleep_range(10, 11);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+}
+
+static int altera_cvp_teardown(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ struct pci_dev *pdev = conf->pci_dev;
+ int ret;
+ u32 val;
+
+ /* STEP 12 - reset START_XFER bit */
+ pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+ val &= ~VSE_CVP_PROG_CTRL_START_XFER;
+ pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+ /* STEP 13 - reset CVP_CONFIG bit */
+ val &= ~VSE_CVP_PROG_CTRL_CONFIG;
+ pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+ /*
+ * STEP 14
+ * - set CVP_NUMCLKS to 1 and then issue CVP_DUMMY_WR dummy
+ * writes to the HIP
+ */
+ altera_cvp_dummy_write(conf); /* from CVP clock to internal clock */
+
+ /* STEP 15 - poll CVP_CONFIG_READY bit for 0 with 10us timeout */
+ ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0, 10);
+ if (ret)
+ dev_err(&mgr->dev, "CFG_RDY == 0 timeout\n");
+
+ return ret;
+}
+
+static int altera_cvp_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ struct pci_dev *pdev = conf->pci_dev;
+ u32 iflags, val;
+ int ret;
+
+ iflags = info ? info->flags : 0;
+
+ if (iflags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
+ return -EINVAL;
+ }
+
+ /* Determine allowed clock to data ratio */
+ if (iflags & FPGA_MGR_COMPRESSED_BITSTREAM)
+ conf->numclks = 8; /* ratio for all compressed images */
+ else if (iflags & FPGA_MGR_ENCRYPTED_BITSTREAM)
+ conf->numclks = 4; /* for uncompressed and encrypted images */
+ else
+ conf->numclks = 1; /* for uncompressed and unencrypted images */
+
+ /* STEP 1 - read CVP status and check CVP_EN flag */
+ pci_read_config_dword(pdev, VSE_CVP_STATUS, &val);
+ if (!(val & VSE_CVP_STATUS_CVP_EN)) {
+ dev_err(&mgr->dev, "CVP mode off: 0x%04x\n", val);
+ return -ENODEV;
+ }
+
+ if (val & VSE_CVP_STATUS_CFG_RDY) {
+ dev_warn(&mgr->dev, "CvP already started, teardown first\n");
+ ret = altera_cvp_teardown(mgr, info);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * STEP 2
+ * - set HIP_CLK_SEL and CVP_MODE (must be set in the order mentioned)
+ */
+ /* switch from fabric to PMA clock */
+ pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ val |= VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
+ pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+ /* set CVP mode */
+ pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ val |= VSE_CVP_MODE_CTRL_CVP_MODE;
+ pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+ /*
+ * STEP 3
+ * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
+ */
+ altera_cvp_dummy_write(conf);
+
+ /* STEP 4 - set CVP_CONFIG bit */
+ pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+ /* request control block to begin transfer using CVP */
+ val |= VSE_CVP_PROG_CTRL_CONFIG;
+ pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+ /* STEP 5 - poll CVP_CONFIG READY for 1 with 10us timeout */
+ ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY,
+ VSE_CVP_STATUS_CFG_RDY, 10);
+ if (ret) {
+ dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n");
+ return ret;
+ }
+
+ /*
+ * STEP 6
+ * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
+ */
+ altera_cvp_dummy_write(conf);
+
+ /* STEP 7 - set START_XFER */
+ pci_read_config_dword(pdev, VSE_CVP_PROG_CTRL, &val);
+ val |= VSE_CVP_PROG_CTRL_START_XFER;
+ pci_write_config_dword(pdev, VSE_CVP_PROG_CTRL, val);
+
+ /* STEP 8 - start transfer (set CVP_NUMCLKS for bitstream) */
+ pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
+ val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
+ pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+ return 0;
+}
+
+static inline int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ u32 val;
+
+ /* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */
+ pci_read_config_dword(conf->pci_dev, VSE_CVP_STATUS, &val);
+ if (val & VSE_CVP_STATUS_CFG_ERR) {
+ dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n",
+ bytes);
+ return -EPROTO;
+ }
+ return 0;
+}
+
+static int altera_cvp_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ const u32 *data;
+ size_t done, remaining;
+ int status = 0;
+ u32 mask;
+
+ /* STEP 9 - write 32-bit data from RBF file to CVP data register */
+ data = (u32 *)buf;
+ remaining = count;
+ done = 0;
+
+ while (remaining >= 4) {
+ conf->write_data(conf, *data++);
+ done += 4;
+ remaining -= 4;
+
+ /*
+ * STEP 10 (optional) and STEP 11
+ * - check error flag
+ * - loop until data transfer completed
+ * Config images can be huge (more than 40 MiB), so
+ * only check after a new 4k data block has been written.
+ * This reduces the number of checks and speeds up the
+ * configuration process.
+ */
+ if (altera_cvp_chkcfg && !(done % SZ_4K)) {
+ status = altera_cvp_chk_error(mgr, done);
+ if (status < 0)
+ return status;
+ }
+ }
+
+ /* write up to 3 trailing bytes, if any */
+ mask = BIT(remaining * 8) - 1;
+ if (mask)
+ conf->write_data(conf, *data & mask);
+
+ if (altera_cvp_chkcfg)
+ status = altera_cvp_chk_error(mgr, count);
+
+ return status;
+}
+
+static int altera_cvp_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct altera_cvp_conf *conf = mgr->priv;
+ struct pci_dev *pdev = conf->pci_dev;
+ int ret;
+ u32 mask;
+ u32 val;
+
+ ret = altera_cvp_teardown(mgr, info);
+ if (ret)
+ return ret;
+
+ /* STEP 16 - check CVP_CONFIG_ERROR_LATCHED bit */
+ pci_read_config_dword(pdev, VSE_UNCOR_ERR_STATUS, &val);
+ if (val & VSE_UNCOR_ERR_CVP_CFG_ERR) {
+ dev_err(&mgr->dev, "detected CVP_CONFIG_ERROR_LATCHED!\n");
+ return -EPROTO;
+ }
+
+ /* STEP 17 - reset CVP_MODE and HIP_CLK_SEL bit */
+ pci_read_config_dword(pdev, VSE_CVP_MODE_CTRL, &val);
+ val &= ~VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
+ val &= ~VSE_CVP_MODE_CTRL_CVP_MODE;
+ pci_write_config_dword(pdev, VSE_CVP_MODE_CTRL, val);
+
+ /* STEP 18 - poll PLD_CLK_IN_USE and USER_MODE bits */
+ mask = VSE_CVP_STATUS_PLD_CLK_IN_USE | VSE_CVP_STATUS_USERMODE;
+ ret = altera_cvp_wait_status(conf, mask, mask, TIMEOUT_US);
+ if (ret)
+ dev_err(&mgr->dev, "PLD_CLK_IN_USE|USERMODE timeout\n");
+
+ return ret;
+}
+
+static const struct fpga_manager_ops altera_cvp_ops = {
+ .state = altera_cvp_state,
+ .write_init = altera_cvp_write_init,
+ .write = altera_cvp_write,
+ .write_complete = altera_cvp_write_complete,
+};
+
+static ssize_t show_chkcfg(struct device_driver *dev, char *buf)
+{
+ return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg);
+}
+
+static ssize_t store_chkcfg(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ int ret;
+
+ ret = kstrtobool(buf, &altera_cvp_chkcfg);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DRIVER_ATTR(chkcfg, 0600, show_chkcfg, store_chkcfg);
+
+static int altera_cvp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id);
+static void altera_cvp_remove(struct pci_dev *pdev);
+
+#define PCI_VENDOR_ID_ALTERA 0x1172
+
+static struct pci_device_id altera_cvp_id_tbl[] = {
+ { PCI_VDEVICE(ALTERA, PCI_ANY_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, altera_cvp_id_tbl);
+
+static struct pci_driver altera_cvp_driver = {
+ .name = DRV_NAME,
+ .id_table = altera_cvp_id_tbl,
+ .probe = altera_cvp_probe,
+ .remove = altera_cvp_remove,
+};
+
+static int altera_cvp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id)
+{
+ struct altera_cvp_conf *conf;
+ u16 cmd, val;
+ int ret;
+
+ /*
+ * First check if this is the expected FPGA device. PCI config
+ * space access works without enabling the PCI device, memory
+ * space access is enabled further down.
+ */
+ pci_read_config_word(pdev, VSE_PCIE_EXT_CAP_ID, &val);
+ if (val != VSE_PCIE_EXT_CAP_ID_VAL) {
+ dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val);
+ return -ENODEV;
+ }
+
+ conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return -ENOMEM;
+
+ /*
+ * Enable memory BAR access. We cannot use pci_enable_device() here
+ * because it will make the driver unusable with FPGA devices that
+ * have additional big IOMEM resources (e.g. 4GiB BARs) on 32-bit
+ * platform. Such BARs will not have an assigned address range and
+ * pci_enable_device() will fail, complaining about not claimed BAR,
+ * even if the concerned BAR is not needed for FPGA configuration
+ * at all. Thus, enable the device via PCI config space command.
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (!(cmd & PCI_COMMAND_MEMORY)) {
+ cmd |= PCI_COMMAND_MEMORY;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+
+ ret = pci_request_region(pdev, CVP_BAR, "CVP");
+ if (ret) {
+ dev_err(&pdev->dev, "Requesting CVP BAR region failed\n");
+ goto err_disable;
+ }
+
+ conf->pci_dev = pdev;
+ conf->write_data = altera_cvp_write_data_iomem;
+
+ conf->map = pci_iomap(pdev, CVP_BAR, 0);
+ if (!conf->map) {
+ dev_warn(&pdev->dev, "Mapping CVP BAR failed\n");
+ conf->write_data = altera_cvp_write_data_config;
+ }
+
+ snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s @%s",
+ ALTERA_CVP_MGR_NAME, pci_name(pdev));
+
+ ret = fpga_mgr_register(&pdev->dev, conf->mgr_name,
+ &altera_cvp_ops, conf);
+ if (ret)
+ goto err_unmap;
+
+ ret = driver_create_file(&altera_cvp_driver.driver,
+ &driver_attr_chkcfg);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't create sysfs chkcfg file\n");
+ fpga_mgr_unregister(&pdev->dev);
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ pci_iounmap(pdev, conf->map);
+ pci_release_region(pdev, CVP_BAR);
+err_disable:
+ cmd &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ return ret;
+}
+
+static void altera_cvp_remove(struct pci_dev *pdev)
+{
+ struct fpga_manager *mgr = pci_get_drvdata(pdev);
+ struct altera_cvp_conf *conf = mgr->priv;
+ u16 cmd;
+
+ driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
+ fpga_mgr_unregister(&pdev->dev);
+ pci_iounmap(pdev, conf->map);
+ pci_release_region(pdev, CVP_BAR);
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+}
+
+module_pci_driver(altera_cvp_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
+MODULE_DESCRIPTION("Module to load Altera FPGA over CvP");
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
index 3066b805f2d0..406d2f10741f 100644
--- a/drivers/fpga/altera-hps2fpga.c
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -66,7 +66,7 @@ static int alt_hps2fpga_enable_show(struct fpga_bridge *bridge)
/* The L3 REMAP register is write only, so keep a cached value. */
static unsigned int l3_remap_shadow;
-static spinlock_t l3_remap_lock;
+static DEFINE_SPINLOCK(l3_remap_lock);
static int _alt_hps2fpga_enable_set(struct altera_hps2fpga_data *priv,
bool enable)
@@ -143,9 +143,15 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
int ret;
of_id = of_match_device(altera_fpga_of_match, dev);
+ if (!of_id) {
+ dev_err(dev, "failed to match device\n");
+ return -ENODEV;
+ }
+
priv = (struct altera_hps2fpga_data *)of_id->data;
- priv->bridge_reset = of_reset_control_get_by_index(dev->of_node, 0);
+ priv->bridge_reset = of_reset_control_get_exclusive_by_index(dev->of_node,
+ 0);
if (IS_ERR(priv->bridge_reset)) {
dev_err(dev, "Could not get %s reset control\n", priv->name);
return PTR_ERR(priv->bridge_reset);
@@ -171,8 +177,6 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
return -EBUSY;
}
- spin_lock_init(&l3_remap_lock);
-
if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
if (enable > 1) {
dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
new file mode 100644
index 000000000000..14f14efdf0d5
--- /dev/null
+++ b/drivers/fpga/altera-ps-spi.c
@@ -0,0 +1,308 @@
+/*
+ * Altera Passive Serial SPI Driver
+ *
+ * Copyright (c) 2017 United Western Technologies, Corporation
+ *
+ * Joshua Clayton <stillcompiling@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Manage Altera FPGA firmware that is loaded over SPI using the passive
+ * serial configuration method.
+ * Firmware must be in binary "rbf" format.
+ * Works on Arria 10, Cyclone V and Stratix V. Should work on Cyclone series.
+ * May work on other Altera FPGAs.
+ */
+
+#include <linux/bitrev.h>
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+#include <linux/sizes.h>
+
+enum altera_ps_devtype {
+ CYCLONE5,
+ ARRIA10,
+};
+
+struct altera_ps_data {
+ enum altera_ps_devtype devtype;
+ int status_wait_min_us;
+ int status_wait_max_us;
+ int t_cfg_us;
+ int t_st2ck_us;
+};
+
+struct altera_ps_conf {
+ struct gpio_desc *config;
+ struct gpio_desc *confd;
+ struct gpio_desc *status;
+ struct spi_device *spi;
+ const struct altera_ps_data *data;
+ u32 info_flags;
+ char mgr_name[64];
+};
+
+/* | Arria 10 | Cyclone5 | Stratix5 |
+ * t_CF2ST0 | [; 600] | [; 600] | [; 600] |ns
+ * t_CFG | [2;] | [2;] | [2;] |µs
+ * t_STATUS | [268; 3000] | [268; 1506] | [268; 1506] |µs
+ * t_CF2ST1 | [; 3000] | [; 1506] | [; 1506] |µs
+ * t_CF2CK | [3010;] | [1506;] | [1506;] |µs
+ * t_ST2CK | [10;] | [2;] | [2;] |µs
+ * t_CD2UM | [175; 830] | [175; 437] | [175; 437] |µs
+ */
+static struct altera_ps_data c5_data = {
+ /* these values for Cyclone5 are compatible with Stratix5 */
+ .devtype = CYCLONE5,
+ .status_wait_min_us = 268,
+ .status_wait_max_us = 1506,
+ .t_cfg_us = 2,
+ .t_st2ck_us = 2,
+};
+
+static struct altera_ps_data a10_data = {
+ .devtype = ARRIA10,
+ .status_wait_min_us = 268, /* min(t_STATUS) */
+ .status_wait_max_us = 3000, /* max(t_CF2ST1) */
+ .t_cfg_us = 2, /* max { min(t_CFG), max(tCF2ST0) } */
+ .t_st2ck_us = 10, /* min(t_ST2CK) */
+};
+
+static const struct of_device_id of_ef_match[] = {
+ { .compatible = "altr,fpga-passive-serial", .data = &c5_data },
+ { .compatible = "altr,fpga-arria10-passive-serial", .data = &a10_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_ef_match);
+
+static enum fpga_mgr_states altera_ps_state(struct fpga_manager *mgr)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+
+ if (gpiod_get_value_cansleep(conf->status))
+ return FPGA_MGR_STATE_RESET;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static inline void altera_ps_delay(int delay_us)
+{
+ if (delay_us > 10)
+ usleep_range(delay_us, delay_us + 5);
+ else
+ udelay(delay_us);
+}
+
+static int altera_ps_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+ int min, max, waits;
+ int i;
+
+ conf->info_flags = info->flags;
+
+ if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
+ dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
+ return -EINVAL;
+ }
+
+ gpiod_set_value_cansleep(conf->config, 1);
+
+ /* wait min reset pulse time */
+ altera_ps_delay(conf->data->t_cfg_us);
+
+ if (!gpiod_get_value_cansleep(conf->status)) {
+ dev_err(&mgr->dev, "Status pin failed to show a reset\n");
+ return -EIO;
+ }
+
+ gpiod_set_value_cansleep(conf->config, 0);
+
+ min = conf->data->status_wait_min_us;
+ max = conf->data->status_wait_max_us;
+ waits = max / min;
+ if (max % min)
+ waits++;
+
+ /* wait for max { max(t_STATUS), max(t_CF2ST1) } */
+ for (i = 0; i < waits; i++) {
+ usleep_range(min, min + 10);
+ if (!gpiod_get_value_cansleep(conf->status)) {
+ /* wait for min(t_ST2CK)*/
+ altera_ps_delay(conf->data->t_st2ck_us);
+ return 0;
+ }
+ }
+
+ dev_err(&mgr->dev, "Status pin not ready.\n");
+ return -EIO;
+}
+
+static void rev_buf(char *buf, size_t len)
+{
+ u32 *fw32 = (u32 *)buf;
+ size_t extra_bytes = (len & 0x03);
+ const u32 *fw_end = (u32 *)(buf + len - extra_bytes);
+
+ /* set buffer to lsb first */
+ while (fw32 < fw_end) {
+ *fw32 = bitrev8x4(*fw32);
+ fw32++;
+ }
+
+ if (extra_bytes) {
+ buf = (char *)fw_end;
+ while (extra_bytes) {
+ *buf = bitrev8(*buf);
+ buf++;
+ extra_bytes--;
+ }
+ }
+}
+
+static int altera_ps_write(struct fpga_manager *mgr, const char *buf,
+ size_t count)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+ const char *fw_data = buf;
+ const char *fw_data_end = fw_data + count;
+
+ while (fw_data < fw_data_end) {
+ int ret;
+ size_t stride = min_t(size_t, fw_data_end - fw_data, SZ_4K);
+
+ if (!(conf->info_flags & FPGA_MGR_BITSTREAM_LSB_FIRST))
+ rev_buf((char *)fw_data, stride);
+
+ ret = spi_write(conf->spi, fw_data, stride);
+ if (ret) {
+ dev_err(&mgr->dev, "spi error in firmware write: %d\n",
+ ret);
+ return ret;
+ }
+ fw_data += stride;
+ }
+
+ return 0;
+}
+
+static int altera_ps_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ struct altera_ps_conf *conf = mgr->priv;
+ const char dummy[] = {0};
+ int ret;
+
+ if (gpiod_get_value_cansleep(conf->status)) {
+ dev_err(&mgr->dev, "Error during configuration.\n");
+ return -EIO;
+ }
+
+ if (!IS_ERR(conf->confd)) {
+ if (!gpiod_get_raw_value_cansleep(conf->confd)) {
+ dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
+ return -EIO;
+ }
+ }
+
+ /*
+ * After CONF_DONE goes high, send two additional falling edges on DCLK
+ * to begin initialization and enter user mode
+ */
+ ret = spi_write(conf->spi, dummy, 1);
+ if (ret) {
+ dev_err(&mgr->dev, "spi error during end sequence: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct fpga_manager_ops altera_ps_ops = {
+ .state = altera_ps_state,
+ .write_init = altera_ps_write_init,
+ .write = altera_ps_write,
+ .write_complete = altera_ps_write_complete,
+};
+
+static int altera_ps_probe(struct spi_device *spi)
+{
+ struct altera_ps_conf *conf;
+ const struct of_device_id *of_id;
+
+ conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
+ if (!conf)
+ return -ENOMEM;
+
+ of_id = of_match_device(of_ef_match, &spi->dev);
+ if (!of_id)
+ return -ENODEV;
+
+ conf->data = of_id->data;
+ conf->spi = spi;
+ conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_HIGH);
+ if (IS_ERR(conf->config)) {
+ dev_err(&spi->dev, "Failed to get config gpio: %ld\n",
+ PTR_ERR(conf->config));
+ return PTR_ERR(conf->config);
+ }
+
+ conf->status = devm_gpiod_get(&spi->dev, "nstat", GPIOD_IN);
+ if (IS_ERR(conf->status)) {
+ dev_err(&spi->dev, "Failed to get status gpio: %ld\n",
+ PTR_ERR(conf->status));
+ return PTR_ERR(conf->status);
+ }
+
+ conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN);
+ if (IS_ERR(conf->confd)) {
+ dev_warn(&spi->dev, "Not using confd gpio: %ld\n",
+ PTR_ERR(conf->confd));
+ }
+
+ /* Register manager with unique name */
+ snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s %s",
+ dev_driver_string(&spi->dev), dev_name(&spi->dev));
+
+ return fpga_mgr_register(&spi->dev, conf->mgr_name,
+ &altera_ps_ops, conf);
+}
+
+static int altera_ps_remove(struct spi_device *spi)
+{
+ fpga_mgr_unregister(&spi->dev);
+
+ return 0;
+}
+
+static const struct spi_device_id altera_ps_spi_ids[] = {
+ {"cyclone-ps-spi", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, altera_ps_spi_ids);
+
+static struct spi_driver altera_ps_driver = {
+ .driver = {
+ .name = "altera-ps-spi",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_ef_match),
+ },
+ .id_table = altera_ps_spi_ids,
+ .probe = altera_ps_probe,
+ .remove = altera_ps_remove,
+};
+
+module_spi_driver(altera_ps_driver)
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Joshua Clayton <stillcompiling@gmail.com>");
+MODULE_DESCRIPTION("Module to load Altera FPGA firmware over SPI");
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index 3b6b2f4182a1..d9ab7c75b14f 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -319,8 +319,8 @@ static int child_regions_with_firmware(struct device_node *overlay)
of_node_put(child_region);
if (ret)
- pr_err("firmware-name not allowed in child FPGA region: %s",
- child_region->full_name);
+ pr_err("firmware-name not allowed in child FPGA region: %pOF",
+ child_region);
return ret;
}
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index a485864cb512..4ea63d9bd131 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -475,7 +475,7 @@ static ssize_t fsi_slave_sysfs_raw_write(struct file *file,
return count;
}
-static struct bin_attribute fsi_slave_raw_attr = {
+static const struct bin_attribute fsi_slave_raw_attr = {
.attr = {
.name = "raw",
.mode = 0600,
@@ -499,7 +499,7 @@ static ssize_t fsi_slave_sysfs_term_write(struct file *file,
return count;
}
-static struct bin_attribute fsi_slave_term_attr = {
+static const struct bin_attribute fsi_slave_term_attr = {
.attr = {
.name = "term",
.mode = 0200,
@@ -532,7 +532,7 @@ static inline uint32_t fsi_smode_sid(int x)
return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
}
-static const uint32_t fsi_slave_smode(int id)
+static uint32_t fsi_slave_smode(int id)
{
return FSI_SMODE_WSC | FSI_SMODE_ECRC
| fsi_smode_sid(id)
@@ -883,17 +883,16 @@ struct bus_type fsi_bus_type = {
};
EXPORT_SYMBOL_GPL(fsi_bus_type);
-static int fsi_init(void)
+static int __init fsi_init(void)
{
return bus_register(&fsi_bus_type);
}
+postcore_initcall(fsi_init);
static void fsi_exit(void)
{
bus_unregister(&fsi_bus_type);
}
-
-module_init(fsi_init);
module_exit(fsi_exit);
module_param(discard_errors, int, 0664);
MODULE_LICENSE("GPL");
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index 98d062fd353e..e13353a2fd7c 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -57,12 +57,6 @@ static int put_scom(struct scom_device *scom_dev, uint64_t value,
int rc;
uint32_t data;
- data = cpu_to_be32(SCOM_RESET_CMD);
- rc = fsi_device_write(scom_dev->fsi_dev, SCOM_RESET_REG, &data,
- sizeof(uint32_t));
- if (rc)
- return rc;
-
data = cpu_to_be32((value >> 32) & 0xffffffff);
rc = fsi_device_write(scom_dev->fsi_dev, SCOM_DATA0_REG, &data,
sizeof(uint32_t));
@@ -186,6 +180,7 @@ static const struct file_operations scom_fops = {
static int scom_probe(struct device *dev)
{
+ uint32_t data;
struct fsi_device *fsi_dev = to_fsi_dev(dev);
struct scom_device *scom;
@@ -202,6 +197,9 @@ static int scom_probe(struct device *dev)
scom->mdev.parent = dev;
list_add(&scom->link, &scom_devices);
+ data = cpu_to_be32(SCOM_RESET_CMD);
+ fsi_device_write(fsi_dev, SCOM_RESET_REG, &data, sizeof(uint32_t));
+
return misc_register(&scom->mdev);
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index f235eae04c16..3388d54ba114 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -311,7 +311,7 @@ config GPIO_MOCKUP
depends on GPIOLIB && SYSFS
select GPIO_SYSFS
select GPIOLIB_IRQCHIP
- select IRQ_WORK
+ select IRQ_SIM
help
This enables GPIO Testing driver, which provides a way to test GPIO
subsystem through sysfs(or char device) and debugfs. GPIO_SYSFS
@@ -450,6 +450,15 @@ config GPIO_TS4800
help
This driver support TS-4800 FPGA GPIO controllers.
+config GPIO_THUNDERX
+ tristate "Cavium ThunderX/OCTEON-TX GPIO"
+ depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
+ depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY
+ select IRQ_FASTEOI_HIERARCHY_HANDLERS
+ help
+ Say yes here to support the on-chip GPIO lines on the ThunderX
+ and OCTEON-TX families of SoCs.
+
config GPIO_TZ1090
bool "Toumaz Xenif TZ1090 GPIO support"
depends on SOC_TZ1090
@@ -504,6 +513,7 @@ config GPIO_XGENE_SB
depends on ARCH_XGENE && OF_GPIO
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
help
This driver supports the GPIO block within the APM X-Gene
Standby Domain. Say yes here to enable the GPIO functionality.
@@ -1064,6 +1074,21 @@ config GPIO_TPS65912
help
This driver supports TPS65912 gpio chip
+config GPIO_TPS68470
+ bool "TPS68470 GPIO"
+ depends on MFD_TPS68470
+ help
+ Select this option to enable GPIO driver for the TPS68470
+ chip family.
+ There are 7 GPIOs and few sensor related GPIOs supported
+ by the TPS68470. While the 7 GPIOs can be configured as
+ input or output as appropriate, the sensor related GPIOs
+ are "output only" GPIOs.
+
+ This driver config is bool, as the GPIO functionality
+ of the TPS68470 must be available before dependent
+ drivers are loaded.
+
config GPIO_TWL4030
tristate "TWL4030, TWL5030, and TPS659x0 GPIOs"
depends on TWL4030_CORE
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index a9fda6c55113..aeb70e9de6f2 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -113,6 +113,7 @@ obj-$(CONFIG_GPIO_SYSCON) += gpio-syscon.o
obj-$(CONFIG_GPIO_TB10X) += gpio-tb10x.o
obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_GPIO_TEGRA) += gpio-tegra.o
+obj-$(CONFIG_GPIO_THUNDERX) += gpio-thunderx.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
obj-$(CONFIG_GPIO_PALMAS) += gpio-palmas.o
obj-$(CONFIG_GPIO_TPIC2810) += gpio-tpic2810.o
@@ -121,6 +122,7 @@ obj-$(CONFIG_GPIO_TPS65218) += gpio-tps65218.o
obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
+obj-$(CONFIG_GPIO_TPS68470) += gpio-tps68470.o
obj-$(CONFIG_GPIO_TS4800) += gpio-ts4800.o
obj-$(CONFIG_GPIO_TS4900) += gpio-ts4900.o
obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index a75511d1ea5d..afbff155a0ba 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -132,6 +132,7 @@ EXPORT_SYMBOL(devm_gpiod_get_index);
* @index: index of the GPIO to obtain in the consumer
* @child: firmware node (child of @dev)
* @flags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
*
* GPIO descriptors returned from this function are automatically disposed on
* driver detach.
@@ -271,6 +272,7 @@ EXPORT_SYMBOL(devm_gpiod_get_array_optional);
/**
* devm_gpiod_put - Resource-managed gpiod_put()
+ * @dev: GPIO consumer
* @desc: GPIO descriptor to dispose of
*
* Dispose of a GPIO descriptor obtained with devm_gpiod_get() or
@@ -286,6 +288,7 @@ EXPORT_SYMBOL(devm_gpiod_put);
/**
* devm_gpiod_put_array - Resource-managed gpiod_put_array()
+ * @dev: GPIO consumer
* @descs: GPIO descriptor array to dispose of
*
* Dispose of an array of GPIO descriptors obtained with devm_gpiod_get_array().
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index a6607faf2fdf..6b535ec858cc 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/spi/spi.h>
@@ -31,6 +32,7 @@ struct gen_74x164_chip {
* numbering, store the bytes in reverse order.
*/
u8 buffer[0];
+ struct gpio_desc *gpiod_oe;
};
static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
@@ -126,6 +128,13 @@ static int gen_74x164_probe(struct spi_device *spi)
if (!chip)
return -ENOMEM;
+ chip->gpiod_oe = devm_gpiod_get_optional(&spi->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(chip->gpiod_oe))
+ return PTR_ERR(chip->gpiod_oe);
+
+ gpiod_set_value_cansleep(chip->gpiod_oe, 1);
+
spi_set_drvdata(spi, chip);
chip->gpio_chip.label = spi->modalias;
@@ -164,6 +173,7 @@ static int gen_74x164_remove(struct spi_device *spi)
{
struct gen_74x164_chip *chip = spi_get_drvdata(spi);
+ gpiod_set_value_cansleep(chip->gpiod_oe, 0);
gpiochip_remove(&chip->gpio_chip);
mutex_destroy(&chip->lock);
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 16a8951b2bed..6b11f1314248 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -71,7 +71,7 @@ static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
return -EINVAL;
}
-static struct gpio_chip altr_a10sr_gc = {
+static const struct gpio_chip altr_a10sr_gc = {
.label = "altr_a10sr_gpio",
.owner = THIS_MODULE,
.get = altr_a10sr_gpio_get,
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 17485dc20384..ccc02ed65b3c 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -324,8 +324,8 @@ skip_irq:
return 0;
teardown:
of_mm_gpiochip_remove(&altera_gc->mmchip);
- pr_err("%s: registration failed with status %d\n",
- node->full_name, ret);
+ pr_err("%pOF: registration failed with status %d\n",
+ node, ret);
return ret;
}
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 4ca436e66bdb..bfc53995064a 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -834,7 +834,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
gpio->clk = of_clk_get(pdev->dev.of_node, 0);
if (IS_ERR(gpio->clk)) {
dev_warn(&pdev->dev,
- "No HPLL clock phandle provided, debouncing disabled\n");
+ "Failed to get clock from devicetree, debouncing disabled\n");
gpio->clk = NULL;
}
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index e6489143721a..dd0308cc8bb0 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -339,6 +339,7 @@ static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
struct brcmstb_gpio_priv *priv = bank->parent_priv;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ int err;
bank->irq_chip.name = dev_name(dev);
bank->irq_chip.irq_mask = brcmstb_gpio_irq_mask;
@@ -355,8 +356,6 @@ static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
dev_warn(dev,
"Couldn't get wake IRQ - GPIOs will not be able to wake from sleep");
} else {
- int err;
-
/*
* Set wakeup capability before requesting wakeup
* interrupt, so we can process boot-time "wakeups"
@@ -383,8 +382,10 @@ static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
if (priv->can_wake)
bank->irq_chip.irq_set_wake = brcmstb_gpio_irq_set_wake;
- gpiochip_irqchip_add(&bank->gc, &bank->irq_chip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ err = gpiochip_irqchip_add(&bank->gc, &bank->irq_chip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (err)
+ return err;
gpiochip_set_chained_irqchip(&bank->gc, &bank->irq_chip,
priv->parent_irq, brcmstb_gpio_irq_handler);
@@ -483,7 +484,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
gc->of_node = np;
gc->owner = THIS_MODULE;
- gc->label = np->full_name;
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", dev->of_node);
gc->base = gpio_base;
gc->of_gpio_n_cells = 2;
gc->of_xlate = brcmstb_gpio_of_xlate;
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 65cb359308e3..f75d8443ecaf 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -166,7 +166,7 @@ of_err:
static int davinci_gpio_probe(struct platform_device *pdev)
{
static int ctrl_num, bank_base;
- int gpio, bank;
+ int gpio, bank, ret = 0;
unsigned ngpio, nbank;
struct davinci_gpio_controller *chips;
struct davinci_gpio_platform_data *pdata;
@@ -232,10 +232,23 @@ static int davinci_gpio_probe(struct platform_device *pdev)
for (gpio = 0, bank = 0; gpio < ngpio; gpio += 32, bank++)
chips->regs[bank] = gpio_base + offset_array[bank];
- gpiochip_add_data(&chips->chip, chips);
+ ret = devm_gpiochip_add_data(dev, &chips->chip, chips);
+ if (ret)
+ goto err;
+
platform_set_drvdata(pdev, chips);
- davinci_gpio_irq_setup(pdev);
+ ret = davinci_gpio_irq_setup(pdev);
+ if (ret)
+ goto err;
+
return 0;
+
+err:
+ /* Revert the static variable increments */
+ ctrl_num--;
+ bank_base -= ngpio;
+
+ return ret;
}
/*--------------------------------------------------------------------------*/
@@ -477,8 +490,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
clk = devm_clk_get(dev, "gpio");
if (IS_ERR(clk)) {
- printk(KERN_ERR "Error %ld getting gpio clock?\n",
- PTR_ERR(clk));
+ dev_err(dev, "Error %ld getting gpio clock\n", PTR_ERR(clk));
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index fb8d304cfa17..0ecd2369c2ca 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -132,7 +132,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
if (!p)
return -ENOMEM;
- ret = device_property_read_u32(&pdev->dev, "linux,first-pin",
+ ret = device_property_read_u32(&pdev->dev, "exar,first-pin",
&first_pin);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index 8650b2916f87..6f5a7fe9787d 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -76,8 +76,7 @@ static int __init gef_gpio_probe(struct platform_device *pdev)
}
/* Setup pointers to chip functions */
- gc->label = devm_kstrdup(&pdev->dev, pdev->dev.of_node->full_name,
- GFP_KERNEL);
+ gc->label = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOF", pdev->dev.of_node);
if (!gc->label) {
ret = -ENOMEM;
goto err0;
@@ -96,8 +95,7 @@ static int __init gef_gpio_probe(struct platform_device *pdev)
return 0;
err0:
iounmap(regs);
- pr_err("%s: GPIO chip registration failed\n",
- pdev->dev.of_node->full_name);
+ pr_err("%pOF: GPIO chip registration failed\n", pdev->dev.of_node);
return ret;
};
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 7847dd34f86f..6544a16ab02e 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -367,7 +367,7 @@ static int grgpio_probe(struct platform_device *ofdev)
gc->of_node = np;
gc->owner = THIS_MODULE;
gc->to_irq = grgpio_to_irq;
- gc->label = np->full_name;
+ gc->label = devm_kasprintf(&ofdev->dev, GFP_KERNEL, "%pOF", np);
gc->base = -1;
err = of_property_read_u32(np, "nbits", &prop);
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index 45d29e488dbb..d43d0a2cc4c5 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -2,6 +2,7 @@
* GPIO interface for IT87xx Super I/O chips
*
* Author: Diego Elio Pettenò <flameeyes@flameeyes.eu>
+ * Copyright (c) 2017 Google, Inc.
*
* Based on it87_wdt.c by Oliver Schuster
* gpio-it8761e.c by Denis Turischev
@@ -39,6 +40,7 @@
#define IT8728_ID 0x8728
#define IT8732_ID 0x8732
#define IT8761_ID 0x8761
+#define IT8772_ID 0x8772
/* IO Ports */
#define REG 0x2e
@@ -314,6 +316,7 @@ static int __init it87_gpio_init(void)
break;
case IT8728_ID:
case IT8732_ID:
+ case IT8772_ID:
gpio_ba_reg = 0x62;
it87_gpio->io_size = 8;
it87_gpio->output_base = 0xc8;
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
index 6313c50bb91b..a121c8f10610 100644
--- a/drivers/gpio/gpio-lp87565.c
+++ b/drivers/gpio/gpio-lp87565.c
@@ -26,6 +26,27 @@ struct lp87565_gpio {
struct regmap *map;
};
+static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct lp87565_gpio *gpio = gpiochip_get_data(chip);
+ int ret, val;
+
+ ret = regmap_read(gpio->map, LP87565_REG_GPIO_IN, &val);
+ if (ret < 0)
+ return ret;
+
+ return !!(val & BIT(offset));
+}
+
+static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct lp87565_gpio *gpio = gpiochip_get_data(chip);
+
+ regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
+ BIT(offset), value ? BIT(offset) : 0);
+}
+
static int lp87565_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
@@ -54,30 +75,11 @@ static int lp87565_gpio_direction_output(struct gpio_chip *chip,
{
struct lp87565_gpio *gpio = gpiochip_get_data(chip);
+ lp87565_gpio_set(chip, offset, value);
+
return regmap_update_bits(gpio->map,
LP87565_REG_GPIO_CONFIG,
- BIT(offset), !value ? BIT(offset) : 0);
-}
-
-static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- struct lp87565_gpio *gpio = gpiochip_get_data(chip);
- int ret, val;
-
- ret = regmap_read(gpio->map, LP87565_REG_GPIO_IN, &val);
- if (ret < 0)
- return ret;
-
- return !!(val & BIT(offset));
-}
-
-static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- struct lp87565_gpio *gpio = gpiochip_get_data(chip);
-
- regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
- BIT(offset), value ? BIT(offset) : 0);
+ BIT(offset), BIT(offset));
}
static int lp87565_gpio_request(struct gpio_chip *gc, unsigned int offset)
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index 743459d9477d..538bce4b5b42 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -82,7 +82,7 @@ static const struct regmap_irq max77620_gpio_irqs[] = {
},
};
-static struct regmap_irq_chip max77620_gpio_irq_chip = {
+static const struct regmap_irq_chip max77620_gpio_irq_chip = {
.name = "max77620-gpio",
.irqs = max77620_gpio_irqs,
.num_irqs = ARRAY_SIZE(max77620_gpio_irqs),
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index ffb73f688ae1..94d772677ed6 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -168,7 +168,9 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gchip->clk))
return PTR_ERR(gchip->clk);
- clk_prepare_enable(gchip->clk);
+ ret = clk_prepare_enable(gchip->clk);
+ if (ret)
+ return ret;
spin_lock_init(&gchip->lock);
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 74fdce096c26..4b80e996d976 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -391,9 +391,10 @@ static int ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ int rv;
- gc = irq_alloc_generic_chip("ioh_gpio", 1, irq_start, chip->base,
- handle_simple_irq);
+ gc = devm_irq_alloc_generic_chip(chip->dev, "ioh_gpio", 1, irq_start,
+ chip->base, handle_simple_irq);
if (!gc)
return -ENOMEM;
@@ -406,10 +407,11 @@ static int ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
ct->chip.irq_disable = ioh_irq_disable;
ct->chip.irq_enable = ioh_irq_enable;
- irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
- IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+ rv = devm_irq_setup_generic_chip(chip->dev, gc, IRQ_MSK(num),
+ IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
- return 0;
+ return rv;
}
static int ioh_gpio_probe(struct pci_dev *pdev,
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index a6565e128f9e..9532d86a82f7 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -20,7 +20,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/irq_work.h>
+#include <linux/irq_sim.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
@@ -47,18 +47,12 @@ enum {
struct gpio_mockup_line_status {
int dir;
bool value;
- bool irq_enabled;
-};
-
-struct gpio_mockup_irq_context {
- struct irq_work work;
- int irq;
};
struct gpio_mockup_chip {
struct gpio_chip gc;
struct gpio_mockup_line_status *lines;
- struct gpio_mockup_irq_context irq_ctx;
+ struct irq_sim irqsim;
struct dentry *dbg_dir;
};
@@ -144,65 +138,11 @@ static int gpio_mockup_name_lines(struct device *dev,
return 0;
}
-static int gpio_mockup_to_irq(struct gpio_chip *chip, unsigned int offset)
-{
- return chip->irq_base + offset;
-}
-
-static void gpio_mockup_irqmask(struct irq_data *data)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
-
- chip->lines[data->irq - gc->irq_base].irq_enabled = false;
-}
-
-static void gpio_mockup_irqunmask(struct irq_data *data)
+static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
- chip->lines[data->irq - gc->irq_base].irq_enabled = true;
-}
-
-static struct irq_chip gpio_mockup_irqchip = {
- .name = GPIO_MOCKUP_NAME,
- .irq_mask = gpio_mockup_irqmask,
- .irq_unmask = gpio_mockup_irqunmask,
-};
-
-static void gpio_mockup_handle_irq(struct irq_work *work)
-{
- struct gpio_mockup_irq_context *irq_ctx;
-
- irq_ctx = container_of(work, struct gpio_mockup_irq_context, work);
- handle_simple_irq(irq_to_desc(irq_ctx->irq));
-}
-
-static int gpio_mockup_irqchip_setup(struct device *dev,
- struct gpio_mockup_chip *chip)
-{
- struct gpio_chip *gc = &chip->gc;
- int irq_base, i;
-
- irq_base = devm_irq_alloc_descs(dev, -1, 0, gc->ngpio, 0);
- if (irq_base < 0)
- return irq_base;
-
- gc->irq_base = irq_base;
- gc->irqchip = &gpio_mockup_irqchip;
-
- for (i = 0; i < gc->ngpio; i++) {
- irq_set_chip(irq_base + i, gc->irqchip);
- irq_set_chip_data(irq_base + i, gc);
- irq_set_handler(irq_base + i, &handle_simple_irq);
- irq_modify_status(irq_base + i,
- IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
- }
-
- init_irq_work(&chip->irq_ctx.work, gpio_mockup_handle_irq);
-
- return 0;
+ return irq_sim_irqnum(&chip->irqsim, offset);
}
static ssize_t gpio_mockup_event_write(struct file *file,
@@ -213,7 +153,6 @@ static ssize_t gpio_mockup_event_write(struct file *file,
struct gpio_mockup_chip *chip;
struct seq_file *sfile;
struct gpio_desc *desc;
- struct gpio_chip *gc;
int rv, val;
rv = kstrtoint_from_user(usr_buf, size, 0, &val);
@@ -226,13 +165,9 @@ static ssize_t gpio_mockup_event_write(struct file *file,
priv = sfile->private;
desc = priv->desc;
chip = priv->chip;
- gc = &chip->gc;
- if (chip->lines[priv->offset].irq_enabled) {
- gpiod_set_value_cansleep(desc, val);
- priv->chip->irq_ctx.irq = gc->irq_base + priv->offset;
- irq_work_queue(&priv->chip->irq_ctx.work);
- }
+ gpiod_set_value_cansleep(desc, val);
+ irq_sim_fire(&chip->irqsim, priv->offset);
return size;
}
@@ -319,7 +254,7 @@ static int gpio_mockup_add(struct device *dev,
return ret;
}
- ret = gpio_mockup_irqchip_setup(dev, chip);
+ ret = devm_irq_sim_init(dev, &chip->irqsim, gc->ngpio);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 793518a30afe..8c93dec498fa 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -348,8 +348,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
ret = gpiochip_add_data(gc, mpc8xxx_gc);
if (ret) {
- pr_err("%s: GPIO chip registration failed with status %d\n",
- np->full_name, ret);
+ pr_err("%pOF: GPIO chip registration failed with status %d\n",
+ np, ret);
goto err;
}
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index 1b7ce7f85886..6cb67595d15f 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -265,8 +265,8 @@ static int platform_msic_gpio_probe(struct platform_device *pdev)
int i;
if (irq < 0) {
- dev_err(dev, "no IRQ line\n");
- return -EINVAL;
+ dev_err(dev, "no IRQ line: %d\n", irq);
+ return irq;
}
if (!pdata || !pdata->gpio_base) {
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index e338c3743562..45c65f805fd6 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -557,7 +557,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
edge_cause = mvebu_gpio_read_edge_cause(mvchip);
edge_mask = mvebu_gpio_read_edge_mask(mvchip);
- cause = (data_in ^ level_mask) | (edge_cause & edge_mask);
+ cause = (data_in & level_mask) | (edge_cause & edge_mask);
for (i = 0; i < mvchip->chip.ngpio; i++) {
int irq;
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 3abea3f0b307..5245a2fe62ae 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -66,6 +66,7 @@ struct mxc_gpio_port {
int irq_high;
struct irq_domain *domain;
struct gpio_chip gc;
+ struct device *dev;
u32 both_edges;
};
@@ -324,29 +325,31 @@ static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mxc_gpio_port *port = gc->private;
u32 gpio_idx = d->hwirq;
+ int ret;
if (enable) {
if (port->irq_high && (gpio_idx >= 16))
- enable_irq_wake(port->irq_high);
+ ret = enable_irq_wake(port->irq_high);
else
- enable_irq_wake(port->irq);
+ ret = enable_irq_wake(port->irq);
} else {
if (port->irq_high && (gpio_idx >= 16))
- disable_irq_wake(port->irq_high);
+ ret = disable_irq_wake(port->irq_high);
else
- disable_irq_wake(port->irq);
+ ret = disable_irq_wake(port->irq);
}
- return 0;
+ return ret;
}
static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ int rv;
- gc = irq_alloc_generic_chip("gpio-mxc", 1, irq_base,
- port->base, handle_level_irq);
+ gc = devm_irq_alloc_generic_chip(port->dev, "gpio-mxc", 1, irq_base,
+ port->base, handle_level_irq);
if (!gc)
return -ENOMEM;
gc->private = port;
@@ -361,10 +364,11 @@ static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
ct->regs.ack = GPIO_ISR;
ct->regs.mask = GPIO_IMR;
- irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
- IRQ_NOREQUEST, 0);
+ rv = devm_irq_setup_generic_chip(port->dev, gc, IRQ_MSK(32),
+ IRQ_GC_INIT_NESTED_LOCK,
+ IRQ_NOREQUEST, 0);
- return 0;
+ return rv;
}
static void mxc_gpio_get_hw(struct platform_device *pdev)
@@ -418,12 +422,17 @@ static int mxc_gpio_probe(struct platform_device *pdev)
if (!port)
return -ENOMEM;
+ port->dev = &pdev->dev;
+
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
port->base = devm_ioremap_resource(&pdev->dev, iores);
if (IS_ERR(port->base))
return PTR_ERR(port->base);
port->irq_high = platform_get_irq(pdev, 1);
+ if (port->irq_high < 0)
+ port->irq_high = 0;
+
port->irq = platform_get_irq(pdev, 0);
if (port->irq < 0)
return port->irq;
@@ -504,6 +513,7 @@ static struct platform_driver mxc_gpio_driver = {
.driver = {
.name = "gpio-mxc",
.of_match_table = mxc_gpio_dt_ids,
+ .suppress_bind_attrs = true,
},
.probe = mxc_gpio_probe,
.id_table = mxc_gpio_devtype,
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 6ae583f36733..435def22445d 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -66,6 +66,7 @@ struct mxs_gpio_port {
int irq;
struct irq_domain *domain;
struct gpio_chip gc;
+ struct device *dev;
enum mxs_gpio_id devid;
u32 both_edges;
};
@@ -209,9 +210,10 @@ static int mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ int rv;
- gc = irq_alloc_generic_chip("gpio-mxs", 2, irq_base,
- port->base, handle_level_irq);
+ gc = devm_irq_alloc_generic_chip(port->dev, "gpio-mxs", 2, irq_base,
+ port->base, handle_level_irq);
if (!gc)
return -ENOMEM;
@@ -242,10 +244,11 @@ static int mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
ct->regs.disable = PINCTRL_IRQEN(port) + MXS_CLR;
ct->handler = handle_level_irq;
- irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
- IRQ_NOREQUEST, 0);
+ rv = devm_irq_setup_generic_chip(port->dev, gc, IRQ_MSK(32),
+ IRQ_GC_INIT_NESTED_LOCK,
+ IRQ_NOREQUEST, 0);
- return 0;
+ return rv;
}
static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -304,6 +307,7 @@ static int mxs_gpio_probe(struct platform_device *pdev)
if (port->id < 0)
return port->id;
port->devid = (enum mxs_gpio_id) of_id->data;
+ port->dev = &pdev->dev;
port->irq = platform_get_irq(pdev, 0);
if (port->irq < 0)
return port->irq;
@@ -379,6 +383,7 @@ static struct platform_driver mxs_gpio_driver = {
.driver = {
.name = "gpio-mxs",
.of_match_table = mxs_gpio_dt_ids,
+ .suppress_bind_attrs = true,
},
.probe = mxs_gpio_probe,
.id_table = mxs_gpio_ids,
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index f8c550de6c72..dbf869fb63ce 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1247,6 +1247,8 @@ static int omap_gpio_probe(struct platform_device *pdev)
if (ret) {
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+ if (bank->dbck_flag)
+ clk_unprepare(bank->dbck);
return ret;
}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 4c9e21300a26..1b9dbf691ae7 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -187,10 +187,9 @@ static int pca953x_write_regs_8(struct pca953x_chip *chip, int reg, u8 *val)
static int pca953x_write_regs_16(struct pca953x_chip *chip, int reg, u8 *val)
{
- __le16 word = cpu_to_le16(get_unaligned((u16 *)val));
+ u16 word = get_unaligned((u16 *)val);
- return i2c_smbus_write_word_data(chip->client,
- reg << 1, (__force u16)word);
+ return i2c_smbus_write_word_data(chip->client, reg << 1, word);
}
static int pca957x_write_regs_16(struct pca953x_chip *chip, int reg, u8 *val)
@@ -241,8 +240,7 @@ static int pca953x_read_regs_16(struct pca953x_chip *chip, int reg, u8 *val)
int ret;
ret = i2c_smbus_read_word_data(chip->client, reg << 1);
- val[0] = (u16)ret & 0xFF;
- val[1] = (u16)ret >> 8;
+ put_unaligned(ret, (u16 *)val);
return ret;
}
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index f6600f8ada52..68c6d0c5a6d1 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -337,9 +337,10 @@ static int pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ int rv;
- gc = irq_alloc_generic_chip("pch_gpio", 1, irq_start, chip->base,
- handle_simple_irq);
+ gc = devm_irq_alloc_generic_chip(chip->dev, "pch_gpio", 1, irq_start,
+ chip->base, handle_simple_irq);
if (!gc)
return -ENOMEM;
@@ -351,10 +352,11 @@ static int pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
ct->chip.irq_unmask = pch_irq_unmask;
ct->chip.irq_set_type = pch_irq_type;
- irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
- IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+ rv = devm_irq_setup_generic_chip(chip->dev, gc, IRQ_MSK(num),
+ IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
- return 0;
+ return rv;
}
static int pch_gpio_probe(struct pci_dev *pdev,
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 3d3d6b6645a7..6aaaab79c205 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -405,7 +405,7 @@ static const struct dev_pm_ops pl061_dev_pm_ops = {
};
#endif
-static struct amba_id pl061_ids[] = {
+static const struct amba_id pl061_ids[] = {
{
.id = 0x00041061,
.mask = 0x000fffff,
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 832f3e46ba9f..6029899789f3 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -451,7 +451,9 @@ static irqreturn_t pxa_gpio_demux_handler(int in_irq, void *d)
for_each_set_bit(n, &gedr, BITS_PER_LONG) {
loop = 1;
- generic_handle_irq(gpio_to_irq(gpio + n));
+ generic_handle_irq(
+ irq_find_mapping(pchip->irqdomain,
+ gpio + n));
}
}
handled += loop;
@@ -465,9 +467,9 @@ static irqreturn_t pxa_gpio_direct_handler(int in_irq, void *d)
struct pxa_gpio_chip *pchip = d;
if (in_irq == pchip->irq0) {
- generic_handle_irq(gpio_to_irq(0));
+ generic_handle_irq(irq_find_mapping(pchip->irqdomain, 0));
} else if (in_irq == pchip->irq1) {
- generic_handle_irq(gpio_to_irq(1));
+ generic_handle_irq(irq_find_mapping(pchip->irqdomain, 1));
} else {
pr_err("%s() unknown irq %d\n", __func__, in_irq);
return IRQ_NONE;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 4a1536a050bc..1f0871553fd2 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -371,6 +371,16 @@ static const struct of_device_id gpio_rcar_of_table[] = {
/* Gen3 GPIO is identical to Gen2. */
.data = &gpio_rcar_info_gen2,
}, {
+ .compatible = "renesas,rcar-gen1-gpio",
+ .data = &gpio_rcar_info_gen1,
+ }, {
+ .compatible = "renesas,rcar-gen2-gpio",
+ .data = &gpio_rcar_info_gen2,
+ }, {
+ .compatible = "renesas,rcar-gen3-gpio",
+ /* Gen3 GPIO is identical to Gen2. */
+ .data = &gpio_rcar_info_gen2,
+ }, {
.compatible = "renesas,gpio-rcar",
.data = &gpio_rcar_info_gen1,
}, {
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 9e705162da8d..407359da08f9 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -324,9 +324,11 @@ static int gsta_alloc_irq_chip(struct gsta_gpio *chip)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ int rv;
- gc = irq_alloc_generic_chip(KBUILD_MODNAME, 1, chip->irq_base,
- chip->reg_base, handle_simple_irq);
+ gc = devm_irq_alloc_generic_chip(chip->dev, KBUILD_MODNAME, 1,
+ chip->irq_base,
+ chip->reg_base, handle_simple_irq);
if (!gc)
return -ENOMEM;
@@ -338,8 +340,11 @@ static int gsta_alloc_irq_chip(struct gsta_gpio *chip)
ct->chip.irq_enable = gsta_irq_enable;
/* FIXME: this makes at most 32 interrupts. Request 0 by now */
- irq_setup_generic_chip(gc, 0 /* IRQ_MSK(GSTA_GPIO_PER_BLOCK) */, 0,
- IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+ rv = devm_irq_setup_generic_chip(chip->dev, gc,
+ 0 /* IRQ_MSK(GSTA_GPIO_PER_BLOCK) */,
+ 0, IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+ if (rv)
+ return rv;
/* Set up all all 128 interrupts: code from setup_generic_chip */
{
@@ -432,6 +437,7 @@ static int gsta_probe(struct platform_device *dev)
static struct platform_driver sta2x11_gpio_platform_driver = {
.driver = {
.name = "sta2x11-gpio",
+ .suppress_bind_attrs = true,
},
.probe = gsta_probe,
};
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 80b6959ae995..091ffaaec635 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -191,7 +191,8 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (IS_ERR(tb10x_gpio->base))
return PTR_ERR(tb10x_gpio->base);
- tb10x_gpio->gc.label = of_node_full_name(dn);
+ tb10x_gpio->gc.label =
+ devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOF", pdev->dev.of_node);
tb10x_gpio->gc.parent = &pdev->dev;
tb10x_gpio->gc.owner = THIS_MODULE;
tb10x_gpio->gc.direction_input = tb10x_gpio_direction_in;
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 88529d3c06c9..fbaf974277df 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -67,8 +67,8 @@
struct tegra_gpio_info;
struct tegra_gpio_bank {
- int bank;
- int irq;
+ unsigned int bank;
+ unsigned int irq;
spinlock_t lvl_lock[4];
spinlock_t dbc_lock[4]; /* Lock for updating debounce count register */
#ifdef CONFIG_PM_SLEEP
@@ -112,13 +112,14 @@ static inline u32 tegra_gpio_readl(struct tegra_gpio_info *tgi, u32 reg)
return __raw_readl(tgi->regs + reg);
}
-static int tegra_gpio_compose(int bank, int port, int bit)
+static unsigned int tegra_gpio_compose(unsigned int bank, unsigned int port,
+ unsigned int bit)
{
return (bank << 5) | ((port & 0x3) << 3) | (bit & 0x7);
}
static void tegra_gpio_mask_write(struct tegra_gpio_info *tgi, u32 reg,
- int gpio, int value)
+ unsigned int gpio, u32 value)
{
u32 val;
@@ -128,22 +129,22 @@ static void tegra_gpio_mask_write(struct tegra_gpio_info *tgi, u32 reg,
tegra_gpio_writel(tgi, val, reg);
}
-static void tegra_gpio_enable(struct tegra_gpio_info *tgi, int gpio)
+static void tegra_gpio_enable(struct tegra_gpio_info *tgi, unsigned int gpio)
{
tegra_gpio_mask_write(tgi, GPIO_MSK_CNF(tgi, gpio), gpio, 1);
}
-static void tegra_gpio_disable(struct tegra_gpio_info *tgi, int gpio)
+static void tegra_gpio_disable(struct tegra_gpio_info *tgi, unsigned int gpio)
{
tegra_gpio_mask_write(tgi, GPIO_MSK_CNF(tgi, gpio), gpio, 0);
}
-static int tegra_gpio_request(struct gpio_chip *chip, unsigned offset)
+static int tegra_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
return pinctrl_request_gpio(offset);
}
-static void tegra_gpio_free(struct gpio_chip *chip, unsigned offset)
+static void tegra_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
@@ -151,17 +152,18 @@ static void tegra_gpio_free(struct gpio_chip *chip, unsigned offset)
tegra_gpio_disable(tgi, offset);
}
-static void tegra_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static void tegra_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
tegra_gpio_mask_write(tgi, GPIO_MSK_OUT(tgi, offset), offset, value);
}
-static int tegra_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int tegra_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
- int bval = BIT(GPIO_BIT(offset));
+ unsigned int bval = BIT(GPIO_BIT(offset));
/* If gpio is in output mode then read from the out value */
if (tegra_gpio_readl(tgi, GPIO_OE(tgi, offset)) & bval)
@@ -170,7 +172,8 @@ static int tegra_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(tegra_gpio_readl(tgi, GPIO_IN(tgi, offset)) & bval);
}
-static int tegra_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int tegra_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
@@ -179,8 +182,9 @@ static int tegra_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static int tegra_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
+static int tegra_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset,
+ int value)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
@@ -190,7 +194,8 @@ static int tegra_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
return 0;
}
-static int tegra_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+static int tegra_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
u32 pin_mask = BIT(GPIO_BIT(offset));
@@ -212,7 +217,7 @@ static int tegra_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
struct tegra_gpio_bank *bank = &tgi->bank_info[GPIO_BANK(offset)];
unsigned int debounce_ms = DIV_ROUND_UP(debounce, 1000);
unsigned long flags;
- int port;
+ unsigned int port;
if (!debounce_ms) {
tegra_gpio_mask_write(tgi, GPIO_MSK_DBC_EN(tgi, offset),
@@ -250,7 +255,7 @@ static int tegra_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
return tegra_gpio_set_debounce(chip, offset, debounce);
}
-static int tegra_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+static int tegra_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
@@ -261,7 +266,7 @@ static void tegra_gpio_irq_ack(struct irq_data *d)
{
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
struct tegra_gpio_info *tgi = bank->tgi;
- int gpio = d->hwirq;
+ unsigned int gpio = d->hwirq;
tegra_gpio_writel(tgi, 1 << GPIO_BIT(gpio), GPIO_INT_CLR(tgi, gpio));
}
@@ -270,7 +275,7 @@ static void tegra_gpio_irq_mask(struct irq_data *d)
{
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
struct tegra_gpio_info *tgi = bank->tgi;
- int gpio = d->hwirq;
+ unsigned int gpio = d->hwirq;
tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 0);
}
@@ -279,20 +284,18 @@ static void tegra_gpio_irq_unmask(struct irq_data *d)
{
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
struct tegra_gpio_info *tgi = bank->tgi;
- int gpio = d->hwirq;
+ unsigned int gpio = d->hwirq;
tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 1);
}
static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
- int gpio = d->hwirq;
+ unsigned int gpio = d->hwirq, port = GPIO_PORT(gpio), lvl_type;
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
struct tegra_gpio_info *tgi = bank->tgi;
- int port = GPIO_PORT(gpio);
- int lvl_type;
- int val;
unsigned long flags;
+ u32 val;
int ret;
switch (type & IRQ_TYPE_SENSE_MASK) {
@@ -323,7 +326,7 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
ret = gpiochip_lock_as_irq(&tgi->gc, gpio);
if (ret) {
dev_err(tgi->dev,
- "unable to lock Tegra GPIO %d as IRQ\n", gpio);
+ "unable to lock Tegra GPIO %u as IRQ\n", gpio);
return ret;
}
@@ -351,17 +354,15 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d)
{
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
struct tegra_gpio_info *tgi = bank->tgi;
- int gpio = d->hwirq;
+ unsigned int gpio = d->hwirq;
gpiochip_unlock_as_irq(&tgi->gc, gpio);
}
static void tegra_gpio_irq_handler(struct irq_desc *desc)
{
- int port;
- int pin;
- int unmasked = 0;
- int gpio;
+ unsigned int port, pin, gpio;
+ bool unmasked = false;
u32 lvl;
unsigned long sta;
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -384,12 +385,13 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
* before executing the handler so that we don't
* miss edges
*/
- if (lvl & (0x100 << pin)) {
- unmasked = 1;
+ if (!unmasked && lvl & (0x100 << pin)) {
+ unmasked = true;
chained_irq_exit(chip, desc);
}
- generic_handle_irq(gpio_to_irq(gpio + pin));
+ generic_handle_irq(irq_find_mapping(tgi->irq_domain,
+ gpio + pin));
}
}
@@ -404,8 +406,7 @@ static int tegra_gpio_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct tegra_gpio_info *tgi = platform_get_drvdata(pdev);
unsigned long flags;
- int b;
- int p;
+ unsigned int b, p;
local_irq_save(flags);
@@ -413,7 +414,8 @@ static int tegra_gpio_resume(struct device *dev)
struct tegra_gpio_bank *bank = &tgi->bank_info[b];
for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
- unsigned int gpio = (b<<5) | (p<<3);
+ unsigned int gpio = (b << 5) | (p << 3);
+
tegra_gpio_writel(tgi, bank->cnf[p],
GPIO_CNF(tgi, gpio));
@@ -444,15 +446,15 @@ static int tegra_gpio_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct tegra_gpio_info *tgi = platform_get_drvdata(pdev);
unsigned long flags;
- int b;
- int p;
+ unsigned int b, p;
local_irq_save(flags);
for (b = 0; b < tgi->bank_count; b++) {
struct tegra_gpio_bank *bank = &tgi->bank_info[b];
for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
- unsigned int gpio = (b<<5) | (p<<3);
+ unsigned int gpio = (b << 5) | (p << 3);
+
bank->cnf[p] = tegra_gpio_readl(tgi,
GPIO_CNF(tgi, gpio));
bank->out[p] = tegra_gpio_readl(tgi,
@@ -483,7 +485,7 @@ static int tegra_gpio_suspend(struct device *dev)
static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
{
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- int gpio = d->hwirq;
+ unsigned int gpio = d->hwirq;
u32 port, bit, mask;
port = GPIO_PORT(gpio);
@@ -507,14 +509,14 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
static int dbg_gpio_show(struct seq_file *s, void *unused)
{
struct tegra_gpio_info *tgi = s->private;
- int i;
- int j;
+ unsigned int i, j;
for (i = 0; i < tgi->bank_count; i++) {
for (j = 0; j < 4; j++) {
- int gpio = tegra_gpio_compose(i, j, 0);
+ unsigned int gpio = tegra_gpio_compose(i, j, 0);
+
seq_printf(s,
- "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
+ "%u:%u %02x %02x %02x %02x %02x %02x %06x\n",
i, j,
tegra_gpio_readl(tgi, GPIO_CNF(tgi, gpio)),
tegra_gpio_readl(tgi, GPIO_OE(tgi, gpio)),
@@ -542,7 +544,7 @@ static const struct file_operations debug_fops = {
static void tegra_gpio_debuginit(struct tegra_gpio_info *tgi)
{
- (void) debugfs_create_file("tegra_gpio", S_IRUGO,
+ (void) debugfs_create_file("tegra_gpio", 0444,
NULL, tgi, &debug_fops);
}
@@ -566,35 +568,25 @@ static struct lock_class_key gpio_lock_class;
static int tegra_gpio_probe(struct platform_device *pdev)
{
- const struct tegra_gpio_soc_config *config;
struct tegra_gpio_info *tgi;
struct resource *res;
struct tegra_gpio_bank *bank;
+ unsigned int gpio, i, j;
int ret;
- int gpio;
- int i;
- int j;
-
- config = of_device_get_match_data(&pdev->dev);
- if (!config) {
- dev_err(&pdev->dev, "Error: No device match found\n");
- return -ENODEV;
- }
tgi = devm_kzalloc(&pdev->dev, sizeof(*tgi), GFP_KERNEL);
if (!tgi)
return -ENODEV;
- tgi->soc = config;
+ tgi->soc = of_device_get_match_data(&pdev->dev);
tgi->dev = &pdev->dev;
- for (;;) {
- res = platform_get_resource(pdev, IORESOURCE_IRQ,
- tgi->bank_count);
- if (!res)
- break;
- tgi->bank_count++;
- }
+ ret = platform_irq_count(pdev);
+ if (ret < 0)
+ return ret;
+
+ tgi->bank_count = ret;
+
if (!tgi->bank_count) {
dev_err(&pdev->dev, "Missing IRQ resource\n");
return -ENODEV;
@@ -626,13 +618,13 @@ static int tegra_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tgi);
- if (config->debounce_supported)
+ if (tgi->soc->debounce_supported)
tgi->gc.set_config = tegra_gpio_set_config;
- tgi->bank_info = devm_kzalloc(&pdev->dev, tgi->bank_count *
+ tgi->bank_info = devm_kcalloc(&pdev->dev, tgi->bank_count,
sizeof(*tgi->bank_info), GFP_KERNEL);
if (!tgi->bank_info)
- return -ENODEV;
+ return -ENOMEM;
tgi->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
tgi->gc.ngpio,
@@ -641,15 +633,15 @@ static int tegra_gpio_probe(struct platform_device *pdev)
return -ENODEV;
for (i = 0; i < tgi->bank_count; i++) {
- res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (!res) {
- dev_err(&pdev->dev, "Missing IRQ resource\n");
- return -ENODEV;
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Missing IRQ resource: %d\n", ret);
+ return ret;
}
bank = &tgi->bank_info[i];
bank->bank = i;
- bank->irq = res->start;
+ bank->irq = ret;
bank->tgi = tgi;
}
@@ -661,6 +653,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
for (i = 0; i < tgi->bank_count; i++) {
for (j = 0; j < 4; j++) {
int gpio = tegra_gpio_compose(i, j, 0);
+
tegra_gpio_writel(tgi, 0x00, GPIO_INT_ENB(tgi, gpio));
}
}
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
new file mode 100644
index 000000000000..57efb251f9c4
--- /dev/null
+++ b/drivers/gpio/gpio-thunderx.c
@@ -0,0 +1,639 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2016, 2017 Cavium Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+
+#define GPIO_RX_DAT 0x0
+#define GPIO_TX_SET 0x8
+#define GPIO_TX_CLR 0x10
+#define GPIO_CONST 0x90
+#define GPIO_CONST_GPIOS_MASK 0xff
+#define GPIO_BIT_CFG 0x400
+#define GPIO_BIT_CFG_TX_OE BIT(0)
+#define GPIO_BIT_CFG_PIN_XOR BIT(1)
+#define GPIO_BIT_CFG_INT_EN BIT(2)
+#define GPIO_BIT_CFG_INT_TYPE BIT(3)
+#define GPIO_BIT_CFG_FIL_MASK GENMASK(11, 4)
+#define GPIO_BIT_CFG_FIL_CNT_SHIFT 4
+#define GPIO_BIT_CFG_FIL_SEL_SHIFT 8
+#define GPIO_BIT_CFG_TX_OD BIT(12)
+#define GPIO_BIT_CFG_PIN_SEL_MASK GENMASK(25, 16)
+#define GPIO_INTR 0x800
+#define GPIO_INTR_INTR BIT(0)
+#define GPIO_INTR_INTR_W1S BIT(1)
+#define GPIO_INTR_ENA_W1C BIT(2)
+#define GPIO_INTR_ENA_W1S BIT(3)
+#define GPIO_2ND_BANK 0x1400
+
+#define GLITCH_FILTER_400NS ((4u << GPIO_BIT_CFG_FIL_SEL_SHIFT) | \
+ (9u << GPIO_BIT_CFG_FIL_CNT_SHIFT))
+
+struct thunderx_gpio;
+
+struct thunderx_line {
+ struct thunderx_gpio *txgpio;
+ unsigned int line;
+ unsigned int fil_bits;
+};
+
+struct thunderx_gpio {
+ struct gpio_chip chip;
+ u8 __iomem *register_base;
+ struct irq_domain *irqd;
+ struct msix_entry *msix_entries; /* per line MSI-X */
+ struct thunderx_line *line_entries; /* per line irq info */
+ raw_spinlock_t lock;
+ unsigned long invert_mask[2];
+ unsigned long od_mask[2];
+ int base_msi;
+};
+
+static unsigned int bit_cfg_reg(unsigned int line)
+{
+ return 8 * line + GPIO_BIT_CFG;
+}
+
+static unsigned int intr_reg(unsigned int line)
+{
+ return 8 * line + GPIO_INTR;
+}
+
+static bool thunderx_gpio_is_gpio_nowarn(struct thunderx_gpio *txgpio,
+ unsigned int line)
+{
+ u64 bit_cfg = readq(txgpio->register_base + bit_cfg_reg(line));
+
+ return (bit_cfg & GPIO_BIT_CFG_PIN_SEL_MASK) == 0;
+}
+
+/*
+ * Check (and WARN) that the pin is available for GPIO. We will not
+ * allow modification of the state of non-GPIO pins from this driver.
+ */
+static bool thunderx_gpio_is_gpio(struct thunderx_gpio *txgpio,
+ unsigned int line)
+{
+ bool rv = thunderx_gpio_is_gpio_nowarn(txgpio, line);
+
+ WARN_RATELIMIT(!rv, "Pin %d not available for GPIO\n", line);
+
+ return rv;
+}
+
+static int thunderx_gpio_request(struct gpio_chip *chip, unsigned int line)
+{
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+
+ return thunderx_gpio_is_gpio(txgpio, line) ? 0 : -EIO;
+}
+
+static int thunderx_gpio_dir_in(struct gpio_chip *chip, unsigned int line)
+{
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+
+ if (!thunderx_gpio_is_gpio(txgpio, line))
+ return -EIO;
+
+ raw_spin_lock(&txgpio->lock);
+ clear_bit(line, txgpio->invert_mask);
+ clear_bit(line, txgpio->od_mask);
+ writeq(txgpio->line_entries[line].fil_bits,
+ txgpio->register_base + bit_cfg_reg(line));
+ raw_spin_unlock(&txgpio->lock);
+ return 0;
+}
+
+static void thunderx_gpio_set(struct gpio_chip *chip, unsigned int line,
+ int value)
+{
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+ int bank = line / 64;
+ int bank_bit = line % 64;
+
+ void __iomem *reg = txgpio->register_base +
+ (bank * GPIO_2ND_BANK) + (value ? GPIO_TX_SET : GPIO_TX_CLR);
+
+ writeq(BIT_ULL(bank_bit), reg);
+}
+
+static int thunderx_gpio_dir_out(struct gpio_chip *chip, unsigned int line,
+ int value)
+{
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+ u64 bit_cfg = txgpio->line_entries[line].fil_bits | GPIO_BIT_CFG_TX_OE;
+
+ if (!thunderx_gpio_is_gpio(txgpio, line))
+ return -EIO;
+
+ raw_spin_lock(&txgpio->lock);
+
+ thunderx_gpio_set(chip, line, value);
+
+ if (test_bit(line, txgpio->invert_mask))
+ bit_cfg |= GPIO_BIT_CFG_PIN_XOR;
+
+ if (test_bit(line, txgpio->od_mask))
+ bit_cfg |= GPIO_BIT_CFG_TX_OD;
+
+ writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(line));
+
+ raw_spin_unlock(&txgpio->lock);
+ return 0;
+}
+
+static int thunderx_gpio_get_direction(struct gpio_chip *chip, unsigned int line)
+{
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+ u64 bit_cfg;
+
+ if (!thunderx_gpio_is_gpio_nowarn(txgpio, line))
+ /*
+ * Say it is input for now to avoid WARNing on
+ * gpiochip_add_data(). We will WARN if someone
+ * requests it or tries to use it.
+ */
+ return 1;
+
+ bit_cfg = readq(txgpio->register_base + bit_cfg_reg(line));
+
+ return !(bit_cfg & GPIO_BIT_CFG_TX_OE);
+}
+
+static int thunderx_gpio_set_config(struct gpio_chip *chip,
+ unsigned int line,
+ unsigned long cfg)
+{
+ bool orig_invert, orig_od, orig_dat, new_invert, new_od;
+ u32 arg, sel;
+ u64 bit_cfg;
+ int bank = line / 64;
+ int bank_bit = line % 64;
+ int ret = -ENOTSUPP;
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+ void __iomem *reg = txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_SET;
+
+ if (!thunderx_gpio_is_gpio(txgpio, line))
+ return -EIO;
+
+ raw_spin_lock(&txgpio->lock);
+ orig_invert = test_bit(line, txgpio->invert_mask);
+ new_invert = orig_invert;
+ orig_od = test_bit(line, txgpio->od_mask);
+ new_od = orig_od;
+ orig_dat = ((readq(reg) >> bank_bit) & 1) ^ orig_invert;
+ bit_cfg = readq(txgpio->register_base + bit_cfg_reg(line));
+ switch (pinconf_to_config_param(cfg)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ /*
+ * Weird, setting open-drain mode causes signal
+ * inversion. Note this so we can compensate in the
+ * dir_out function.
+ */
+ set_bit(line, txgpio->invert_mask);
+ new_invert = true;
+ set_bit(line, txgpio->od_mask);
+ new_od = true;
+ ret = 0;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ clear_bit(line, txgpio->invert_mask);
+ new_invert = false;
+ clear_bit(line, txgpio->od_mask);
+ new_od = false;
+ ret = 0;
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ arg = pinconf_to_config_argument(cfg);
+ if (arg > 1228) { /* 15 * 2^15 * 2.5nS maximum */
+ ret = -EINVAL;
+ break;
+ }
+ arg *= 400; /* scale to 2.5nS clocks. */
+ sel = 0;
+ while (arg > 15) {
+ sel++;
+ arg++; /* always round up */
+ arg >>= 1;
+ }
+ txgpio->line_entries[line].fil_bits =
+ (sel << GPIO_BIT_CFG_FIL_SEL_SHIFT) |
+ (arg << GPIO_BIT_CFG_FIL_CNT_SHIFT);
+ bit_cfg &= ~GPIO_BIT_CFG_FIL_MASK;
+ bit_cfg |= txgpio->line_entries[line].fil_bits;
+ writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(line));
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ raw_spin_unlock(&txgpio->lock);
+
+ /*
+ * If currently output and OPEN_DRAIN changed, install the new
+ * settings
+ */
+ if ((new_invert != orig_invert || new_od != orig_od) &&
+ (bit_cfg & GPIO_BIT_CFG_TX_OE))
+ ret = thunderx_gpio_dir_out(chip, line, orig_dat ^ new_invert);
+
+ return ret;
+}
+
+static int thunderx_gpio_get(struct gpio_chip *chip, unsigned int line)
+{
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+ int bank = line / 64;
+ int bank_bit = line % 64;
+ u64 read_bits = readq(txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_RX_DAT);
+ u64 masked_bits = read_bits & BIT_ULL(bank_bit);
+
+ if (test_bit(line, txgpio->invert_mask))
+ return masked_bits == 0;
+ else
+ return masked_bits != 0;
+}
+
+static void thunderx_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ int bank;
+ u64 set_bits, clear_bits;
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+
+ for (bank = 0; bank <= chip->ngpio / 64; bank++) {
+ set_bits = bits[bank] & mask[bank];
+ clear_bits = ~bits[bank] & mask[bank];
+ writeq(set_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_SET);
+ writeq(clear_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_CLR);
+ }
+}
+
+static void thunderx_gpio_irq_ack(struct irq_data *data)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+
+ writeq(GPIO_INTR_INTR,
+ txline->txgpio->register_base + intr_reg(txline->line));
+}
+
+static void thunderx_gpio_irq_mask(struct irq_data *data)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+
+ writeq(GPIO_INTR_ENA_W1C,
+ txline->txgpio->register_base + intr_reg(txline->line));
+}
+
+static void thunderx_gpio_irq_mask_ack(struct irq_data *data)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+
+ writeq(GPIO_INTR_ENA_W1C | GPIO_INTR_INTR,
+ txline->txgpio->register_base + intr_reg(txline->line));
+}
+
+static void thunderx_gpio_irq_unmask(struct irq_data *data)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+
+ writeq(GPIO_INTR_ENA_W1S,
+ txline->txgpio->register_base + intr_reg(txline->line));
+}
+
+static int thunderx_gpio_irq_set_type(struct irq_data *data,
+ unsigned int flow_type)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+ struct thunderx_gpio *txgpio = txline->txgpio;
+ u64 bit_cfg;
+
+ irqd_set_trigger_type(data, flow_type);
+
+ bit_cfg = txline->fil_bits | GPIO_BIT_CFG_INT_EN;
+
+ if (flow_type & IRQ_TYPE_EDGE_BOTH) {
+ irq_set_handler_locked(data, handle_fasteoi_ack_irq);
+ bit_cfg |= GPIO_BIT_CFG_INT_TYPE;
+ } else {
+ irq_set_handler_locked(data, handle_fasteoi_mask_irq);
+ }
+
+ raw_spin_lock(&txgpio->lock);
+ if (flow_type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)) {
+ bit_cfg |= GPIO_BIT_CFG_PIN_XOR;
+ set_bit(txline->line, txgpio->invert_mask);
+ } else {
+ clear_bit(txline->line, txgpio->invert_mask);
+ }
+ clear_bit(txline->line, txgpio->od_mask);
+ writeq(bit_cfg, txgpio->register_base + bit_cfg_reg(txline->line));
+ raw_spin_unlock(&txgpio->lock);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static void thunderx_gpio_irq_enable(struct irq_data *data)
+{
+ irq_chip_enable_parent(data);
+ thunderx_gpio_irq_unmask(data);
+}
+
+static void thunderx_gpio_irq_disable(struct irq_data *data)
+{
+ thunderx_gpio_irq_mask(data);
+ irq_chip_disable_parent(data);
+}
+
+static int thunderx_gpio_irq_request_resources(struct irq_data *data)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+ struct thunderx_gpio *txgpio = txline->txgpio;
+ struct irq_data *parent_data = data->parent_data;
+ int r;
+
+ r = gpiochip_lock_as_irq(&txgpio->chip, txline->line);
+ if (r)
+ return r;
+
+ if (parent_data && parent_data->chip->irq_request_resources) {
+ r = parent_data->chip->irq_request_resources(parent_data);
+ if (r)
+ goto error;
+ }
+
+ return 0;
+error:
+ gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
+ return r;
+}
+
+static void thunderx_gpio_irq_release_resources(struct irq_data *data)
+{
+ struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
+ struct thunderx_gpio *txgpio = txline->txgpio;
+ struct irq_data *parent_data = data->parent_data;
+
+ if (parent_data && parent_data->chip->irq_release_resources)
+ parent_data->chip->irq_release_resources(parent_data);
+
+ gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
+}
+
+/*
+ * Interrupts are chained from underlying MSI-X vectors. We have
+ * these irq_chip functions to be able to handle level triggering
+ * semantics and other acknowledgment tasks associated with the GPIO
+ * mechanism.
+ */
+static struct irq_chip thunderx_gpio_irq_chip = {
+ .name = "GPIO",
+ .irq_enable = thunderx_gpio_irq_enable,
+ .irq_disable = thunderx_gpio_irq_disable,
+ .irq_ack = thunderx_gpio_irq_ack,
+ .irq_mask = thunderx_gpio_irq_mask,
+ .irq_mask_ack = thunderx_gpio_irq_mask_ack,
+ .irq_unmask = thunderx_gpio_irq_unmask,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_request_resources = thunderx_gpio_irq_request_resources,
+ .irq_release_resources = thunderx_gpio_irq_release_resources,
+ .irq_set_type = thunderx_gpio_irq_set_type,
+
+ .flags = IRQCHIP_SET_TYPE_MASKED
+};
+
+static int thunderx_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct thunderx_gpio *txgpio = d->host_data;
+
+ if (hwirq >= txgpio->chip.ngpio)
+ return -EINVAL;
+ if (!thunderx_gpio_is_gpio_nowarn(txgpio, hwirq))
+ return -EPERM;
+ return 0;
+}
+
+static int thunderx_gpio_irq_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ irq_hw_number_t *hwirq,
+ unsigned int *type)
+{
+ struct thunderx_gpio *txgpio = d->host_data;
+
+ if (WARN_ON(fwspec->param_count < 2))
+ return -EINVAL;
+ if (fwspec->param[0] >= txgpio->chip.ngpio)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+}
+
+static int thunderx_gpio_irq_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct thunderx_line *txline = arg;
+
+ return irq_domain_set_hwirq_and_chip(d, virq, txline->line,
+ &thunderx_gpio_irq_chip, txline);
+}
+
+static const struct irq_domain_ops thunderx_gpio_irqd_ops = {
+ .map = thunderx_gpio_irq_map,
+ .alloc = thunderx_gpio_irq_alloc,
+ .translate = thunderx_gpio_irq_translate
+};
+
+static int thunderx_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
+
+ return irq_find_mapping(txgpio->irqd, offset);
+}
+
+static int thunderx_gpio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ void __iomem * const *tbl;
+ struct device *dev = &pdev->dev;
+ struct thunderx_gpio *txgpio;
+ struct gpio_chip *chip;
+ int ngpio, i;
+ int err = 0;
+
+ txgpio = devm_kzalloc(dev, sizeof(*txgpio), GFP_KERNEL);
+ if (!txgpio)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&txgpio->lock);
+ chip = &txgpio->chip;
+
+ pci_set_drvdata(pdev, txgpio);
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device: err %d\n", err);
+ goto out;
+ }
+
+ err = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
+ if (err) {
+ dev_err(dev, "Failed to iomap PCI device: err %d\n", err);
+ goto out;
+ }
+
+ tbl = pcim_iomap_table(pdev);
+ txgpio->register_base = tbl[0];
+ if (!txgpio->register_base) {
+ dev_err(dev, "Cannot map PCI resource\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (pdev->subsystem_device == 0xa10a) {
+ /* CN88XX has no GPIO_CONST register*/
+ ngpio = 50;
+ txgpio->base_msi = 48;
+ } else {
+ u64 c = readq(txgpio->register_base + GPIO_CONST);
+
+ ngpio = c & GPIO_CONST_GPIOS_MASK;
+ txgpio->base_msi = (c >> 8) & 0xff;
+ }
+
+ txgpio->msix_entries = devm_kzalloc(dev,
+ sizeof(struct msix_entry) * ngpio,
+ GFP_KERNEL);
+ if (!txgpio->msix_entries) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ txgpio->line_entries = devm_kzalloc(dev,
+ sizeof(struct thunderx_line) * ngpio,
+ GFP_KERNEL);
+ if (!txgpio->line_entries) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < ngpio; i++) {
+ u64 bit_cfg = readq(txgpio->register_base + bit_cfg_reg(i));
+
+ txgpio->msix_entries[i].entry = txgpio->base_msi + (2 * i);
+ txgpio->line_entries[i].line = i;
+ txgpio->line_entries[i].txgpio = txgpio;
+ /*
+ * If something has already programmed the pin, use
+ * the existing glitch filter settings, otherwise go
+ * to 400nS.
+ */
+ txgpio->line_entries[i].fil_bits = bit_cfg ?
+ (bit_cfg & GPIO_BIT_CFG_FIL_MASK) : GLITCH_FILTER_400NS;
+
+ if ((bit_cfg & GPIO_BIT_CFG_TX_OE) && (bit_cfg & GPIO_BIT_CFG_TX_OD))
+ set_bit(i, txgpio->od_mask);
+ if (bit_cfg & GPIO_BIT_CFG_PIN_XOR)
+ set_bit(i, txgpio->invert_mask);
+ }
+
+
+ /* Enable all MSI-X for interrupts on all possible lines. */
+ err = pci_enable_msix_range(pdev, txgpio->msix_entries, ngpio, ngpio);
+ if (err < 0)
+ goto out;
+
+ /*
+ * Push GPIO specific irqdomain on hierarchy created as a side
+ * effect of the pci_enable_msix()
+ */
+ txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain,
+ 0, 0, of_node_to_fwnode(dev->of_node),
+ &thunderx_gpio_irqd_ops, txgpio);
+ if (!txgpio->irqd)
+ goto out;
+
+ /* Push on irq_data and the domain for each line. */
+ for (i = 0; i < ngpio; i++) {
+ err = irq_domain_push_irq(txgpio->irqd,
+ txgpio->msix_entries[i].vector,
+ &txgpio->line_entries[i]);
+ if (err < 0)
+ dev_err(dev, "irq_domain_push_irq: %d\n", err);
+ }
+
+ chip->label = KBUILD_MODNAME;
+ chip->parent = dev;
+ chip->owner = THIS_MODULE;
+ chip->request = thunderx_gpio_request;
+ chip->base = -1; /* System allocated */
+ chip->can_sleep = false;
+ chip->ngpio = ngpio;
+ chip->get_direction = thunderx_gpio_get_direction;
+ chip->direction_input = thunderx_gpio_dir_in;
+ chip->get = thunderx_gpio_get;
+ chip->direction_output = thunderx_gpio_dir_out;
+ chip->set = thunderx_gpio_set;
+ chip->set_multiple = thunderx_gpio_set_multiple;
+ chip->set_config = thunderx_gpio_set_config;
+ chip->to_irq = thunderx_gpio_to_irq;
+ err = devm_gpiochip_add_data(dev, chip, txgpio);
+ if (err)
+ goto out;
+
+ dev_info(dev, "ThunderX GPIO: %d lines with base %d.\n",
+ ngpio, chip->base);
+ return 0;
+out:
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void thunderx_gpio_remove(struct pci_dev *pdev)
+{
+ int i;
+ struct thunderx_gpio *txgpio = pci_get_drvdata(pdev);
+
+ for (i = 0; i < txgpio->chip.ngpio; i++)
+ irq_domain_pop_irq(txgpio->irqd,
+ txgpio->msix_entries[i].vector);
+
+ irq_domain_remove(txgpio->irqd);
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+static const struct pci_device_id thunderx_gpio_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA00A) },
+ { 0, } /* end of table */
+};
+
+MODULE_DEVICE_TABLE(pci, thunderx_gpio_id_table);
+
+static struct pci_driver thunderx_gpio_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = thunderx_gpio_id_table,
+ .probe = thunderx_gpio_probe,
+ .remove = thunderx_gpio_remove,
+};
+
+module_pci_driver(thunderx_gpio_driver);
+
+MODULE_DESCRIPTION("Cavium Inc. ThunderX/OCTEON-TX GPIO Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c
new file mode 100644
index 000000000000..fa2662f8b026
--- /dev/null
+++ b/drivers/gpio/gpio-tps68470.c
@@ -0,0 +1,176 @@
+/*
+ * GPIO driver for TPS68470 PMIC
+ *
+ * Copyright (C) 2017 Intel Corporation
+ *
+ * Authors:
+ * Antti Laakso <antti.laakso@intel.com>
+ * Tianshu Qiu <tian.shu.qiu@intel.com>
+ * Jian Xu Zheng <jian.xu.zheng@intel.com>
+ * Yuning Pu <yuning.pu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/mfd/tps68470.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define TPS68470_N_LOGIC_OUTPUT 3
+#define TPS68470_N_REGULAR_GPIO 7
+#define TPS68470_N_GPIO (TPS68470_N_LOGIC_OUTPUT + TPS68470_N_REGULAR_GPIO)
+
+struct tps68470_gpio_data {
+ struct regmap *tps68470_regmap;
+ struct gpio_chip gc;
+};
+
+static int tps68470_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
+ struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+ unsigned int reg = TPS68470_REG_GPDO;
+ int val, ret;
+
+ if (offset >= TPS68470_N_REGULAR_GPIO) {
+ offset -= TPS68470_N_REGULAR_GPIO;
+ reg = TPS68470_REG_SGPO;
+ }
+
+ ret = regmap_read(regmap, reg, &val);
+ if (ret) {
+ dev_err(tps68470_gpio->gc.parent, "reg 0x%x read failed\n",
+ TPS68470_REG_SGPO);
+ return ret;
+ }
+ return !!(val & BIT(offset));
+}
+
+/* Return 0 if output, 1 if input */
+static int tps68470_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
+ struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+ int val, ret;
+
+ /* rest are always outputs */
+ if (offset >= TPS68470_N_REGULAR_GPIO)
+ return 0;
+
+ ret = regmap_read(regmap, TPS68470_GPIO_CTL_REG_A(offset), &val);
+ if (ret) {
+ dev_err(tps68470_gpio->gc.parent, "reg 0x%x read failed\n",
+ TPS68470_GPIO_CTL_REG_A(offset));
+ return ret;
+ }
+
+ val &= TPS68470_GPIO_MODE_MASK;
+ return val >= TPS68470_GPIO_MODE_OUT_CMOS ? 0 : 1;
+}
+
+static void tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
+ struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+ unsigned int reg = TPS68470_REG_GPDO;
+
+ if (offset >= TPS68470_N_REGULAR_GPIO) {
+ reg = TPS68470_REG_SGPO;
+ offset -= TPS68470_N_REGULAR_GPIO;
+ }
+
+ regmap_update_bits(regmap, reg, BIT(offset), value ? BIT(offset) : 0);
+}
+
+static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
+ struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+
+ /* rest are always outputs */
+ if (offset >= TPS68470_N_REGULAR_GPIO)
+ return 0;
+
+ /* Set the initial value */
+ tps68470_gpio_set(gc, offset, value);
+
+ return regmap_update_bits(regmap, TPS68470_GPIO_CTL_REG_A(offset),
+ TPS68470_GPIO_MODE_MASK,
+ TPS68470_GPIO_MODE_OUT_CMOS);
+}
+
+static int tps68470_gpio_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
+ struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+
+ /* rest are always outputs */
+ if (offset >= TPS68470_N_REGULAR_GPIO)
+ return -EINVAL;
+
+ return regmap_update_bits(regmap, TPS68470_GPIO_CTL_REG_A(offset),
+ TPS68470_GPIO_MODE_MASK, 0x00);
+}
+
+static const char *tps68470_names[TPS68470_N_GPIO] = {
+ "gpio.0", "gpio.1", "gpio.2", "gpio.3",
+ "gpio.4", "gpio.5", "gpio.6",
+ "s_enable", "s_idle", "s_resetn",
+};
+
+static int tps68470_gpio_probe(struct platform_device *pdev)
+{
+ struct tps68470_gpio_data *tps68470_gpio;
+ int ret;
+
+ tps68470_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps68470_gpio),
+ GFP_KERNEL);
+ if (!tps68470_gpio)
+ return -ENOMEM;
+
+ tps68470_gpio->tps68470_regmap = dev_get_drvdata(pdev->dev.parent);
+ tps68470_gpio->gc.label = "tps68470-gpio";
+ tps68470_gpio->gc.owner = THIS_MODULE;
+ tps68470_gpio->gc.direction_input = tps68470_gpio_input;
+ tps68470_gpio->gc.direction_output = tps68470_gpio_output;
+ tps68470_gpio->gc.get = tps68470_gpio_get;
+ tps68470_gpio->gc.get_direction = tps68470_gpio_get_direction;
+ tps68470_gpio->gc.set = tps68470_gpio_set;
+ tps68470_gpio->gc.can_sleep = true;
+ tps68470_gpio->gc.names = tps68470_names;
+ tps68470_gpio->gc.ngpio = TPS68470_N_GPIO;
+ tps68470_gpio->gc.base = -1;
+ tps68470_gpio->gc.parent = &pdev->dev;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &tps68470_gpio->gc,
+ tps68470_gpio);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register gpio_chip: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, tps68470_gpio);
+
+ return ret;
+}
+
+static struct platform_driver tps68470_gpio_driver = {
+ .driver = {
+ .name = "tps68470-gpio",
+ },
+ .probe = tps68470_gpio_probe,
+};
+
+builtin_platform_driver(tps68470_gpio_driver)
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 24f388ed46d4..9b511df5450e 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -35,7 +35,7 @@
#include <linux/of.h>
#include <linux/irqdomain.h>
-#include <linux/i2c/twl.h>
+#include <linux/mfd/twl.h>
/*
* The GPIO "subchip" supports 18 GPIOs which can be configured as
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index b780314cdfc9..dadeacf43e0c 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -32,8 +32,6 @@
#include <linux/mfd/twl6040.h>
-static struct gpio_chip twl6040gpo_chip;
-
static int twl6040gpo_get(struct gpio_chip *chip, unsigned offset)
{
struct twl6040 *twl6040 = dev_get_drvdata(chip->parent->parent);
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index ca958e0f6909..22c5be65051f 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -527,13 +527,12 @@ static void tz1090_gpio_register_banks(struct tz1090_gpio *priv)
ret = of_property_read_u32(node, "reg", &addr);
if (ret) {
- dev_err(priv->dev, "invalid reg on %s\n",
- node->full_name);
+ dev_err(priv->dev, "invalid reg on %pOF\n", node);
continue;
}
if (addr >= 3) {
- dev_err(priv->dev, "index %u in %s out of range\n",
- addr, node->full_name);
+ dev_err(priv->dev, "index %u in %pOF out of range\n",
+ addr, node);
continue;
}
@@ -543,8 +542,7 @@ static void tz1090_gpio_register_banks(struct tz1090_gpio *priv)
ret = tz1090_gpio_bank_probe(&info);
if (ret) {
- dev_err(priv->dev, "failure registering %s\n",
- node->full_name);
+ dev_err(priv->dev, "failure registering %pOF\n", node);
of_node_put(node);
continue;
}
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 521fbe338589..cbe9e06861de 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -30,10 +30,16 @@
#define VF610_GPIO_PER_PORT 32
+struct fsl_gpio_soc_data {
+ /* SoCs has a Port Data Direction Register (PDDR) */
+ bool have_paddr;
+};
+
struct vf610_gpio_port {
struct gpio_chip gc;
void __iomem *base;
void __iomem *gpio_base;
+ const struct fsl_gpio_soc_data *sdata;
u8 irqc[VF610_GPIO_PER_PORT];
int irq;
};
@@ -43,6 +49,7 @@ struct vf610_gpio_port {
#define GPIO_PCOR 0x08
#define GPIO_PTOR 0x0c
#define GPIO_PDIR 0x10
+#define GPIO_PDDR 0x14
#define PORT_PCR(n) ((n) * 0x4)
#define PORT_PCR_IRQC_OFFSET 16
@@ -61,8 +68,13 @@ struct vf610_gpio_port {
static struct irq_chip vf610_gpio_irq_chip;
+static const struct fsl_gpio_soc_data imx_data = {
+ .have_paddr = true,
+};
+
static const struct of_device_id vf610_gpio_dt_ids[] = {
- { .compatible = "fsl,vf610-gpio" },
+ { .compatible = "fsl,vf610-gpio", .data = NULL, },
+ { .compatible = "fsl,imx7ulp-gpio", .data = &imx_data, },
{ /* sentinel */ }
};
@@ -79,8 +91,18 @@ static inline u32 vf610_gpio_readl(void __iomem *reg)
static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct vf610_gpio_port *port = gpiochip_get_data(gc);
-
- return !!(vf610_gpio_readl(port->gpio_base + GPIO_PDIR) & BIT(gpio));
+ unsigned long mask = BIT(gpio);
+ void __iomem *addr;
+
+ if (port->sdata && port->sdata->have_paddr) {
+ mask &= vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
+ addr = mask ? port->gpio_base + GPIO_PDOR :
+ port->gpio_base + GPIO_PDIR;
+ return !!(vf610_gpio_readl(addr) & BIT(gpio));
+ } else {
+ return !!(vf610_gpio_readl(port->gpio_base + GPIO_PDIR)
+ & BIT(gpio));
+ }
}
static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -96,12 +118,28 @@ static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
+ struct vf610_gpio_port *port = gpiochip_get_data(chip);
+ unsigned long mask = BIT(gpio);
+ u32 val;
+
+ if (port->sdata && port->sdata->have_paddr) {
+ val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
+ val &= ~mask;
+ vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
+ }
+
return pinctrl_gpio_direction_input(chip->base + gpio);
}
static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
int value)
{
+ struct vf610_gpio_port *port = gpiochip_get_data(chip);
+ unsigned long mask = BIT(gpio);
+
+ if (port->sdata && port->sdata->have_paddr)
+ vf610_gpio_writel(mask, port->gpio_base + GPIO_PDDR);
+
vf610_gpio_set(chip, gpio, value);
return pinctrl_gpio_direction_output(chip->base + gpio);
@@ -216,6 +254,8 @@ static struct irq_chip vf610_gpio_irq_chip = {
static int vf610_gpio_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id = of_match_device(vf610_gpio_dt_ids,
+ &pdev->dev);
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct vf610_gpio_port *port;
@@ -227,6 +267,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
if (!port)
return -ENOMEM;
+ port->sdata = of_id->data;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
port->base = devm_ioremap_resource(dev, iores);
if (IS_ERR(port->base))
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 14b2a62338ea..e8ec0e33a0a9 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -360,8 +360,8 @@ static int xgpio_probe(struct platform_device *pdev)
/* Call the OF gpio helper to setup and register the GPIO device */
status = of_mm_gpiochip_add_data(np, &chip->mmchip, chip);
if (status) {
- pr_err("%s: error in probe function with status %d\n",
- np->full_name, status);
+ pr_err("%pOF: error in probe function with status %d\n",
+ np, status);
return status;
}
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index e23ef7b9451d..3926ce9c2840 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -156,7 +156,7 @@ static int zevio_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
return -ENXIO;
}
-static struct gpio_chip zevio_gpio_chip = {
+static const struct gpio_chip zevio_gpio_chip = {
.direction_input = zevio_gpio_direction_input,
.direction_output = zevio_gpio_direction_output,
.set = zevio_gpio_set,
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index df0851464006..b3cc948a2d8b 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -60,13 +60,13 @@
#define ZYNQ_GPIO_BANK5_PIN_MAX(str) (ZYNQ_GPIO_BANK5_PIN_MIN(str) + \
ZYNQ##str##_GPIO_BANK5_NGPIO - 1)
-
/* Register offsets for the GPIO device */
/* LSW Mask & Data -WO */
#define ZYNQ_GPIO_DATA_LSW_OFFSET(BANK) (0x000 + (8 * BANK))
/* MSW Mask & Data -WO */
#define ZYNQ_GPIO_DATA_MSW_OFFSET(BANK) (0x004 + (8 * BANK))
/* Data Register-RW */
+#define ZYNQ_GPIO_DATA_OFFSET(BANK) (0x040 + (4 * BANK))
#define ZYNQ_GPIO_DATA_RO_OFFSET(BANK) (0x060 + (4 * BANK))
/* Direction mode reg-RW */
#define ZYNQ_GPIO_DIRM_OFFSET(BANK) (0x204 + (0x40 * BANK))
@@ -98,6 +98,19 @@
/* set to differentiate zynq from zynqmp, 0=zynqmp, 1=zynq */
#define ZYNQ_GPIO_QUIRK_IS_ZYNQ BIT(0)
+#define GPIO_QUIRK_DATA_RO_BUG BIT(1)
+
+struct gpio_regs {
+ u32 datamsw[ZYNQMP_GPIO_MAX_BANK];
+ u32 datalsw[ZYNQMP_GPIO_MAX_BANK];
+ u32 dirm[ZYNQMP_GPIO_MAX_BANK];
+ u32 outen[ZYNQMP_GPIO_MAX_BANK];
+ u32 int_en[ZYNQMP_GPIO_MAX_BANK];
+ u32 int_dis[ZYNQMP_GPIO_MAX_BANK];
+ u32 int_type[ZYNQMP_GPIO_MAX_BANK];
+ u32 int_polarity[ZYNQMP_GPIO_MAX_BANK];
+ u32 int_any[ZYNQMP_GPIO_MAX_BANK];
+};
/**
* struct zynq_gpio - gpio device private data structure
@@ -106,6 +119,7 @@
* @clk: clock resource for this controller
* @irq: interrupt for the GPIO device
* @p_data: pointer to platform data
+ * @context: context registers
*/
struct zynq_gpio {
struct gpio_chip chip;
@@ -113,16 +127,18 @@ struct zynq_gpio {
struct clk *clk;
int irq;
const struct zynq_platform_data *p_data;
+ struct gpio_regs context;
};
/**
* struct zynq_platform_data - zynq gpio platform data structure
* @label: string to store in gpio->label
+ * @quirks: Flags is used to identify the platform
* @ngpio: max number of gpio pins
* @max_bank: maximum number of gpio banks
* @bank_min: this array represents bank's min pin
* @bank_max: this array represents bank's max pin
-*/
+ */
struct zynq_platform_data {
const char *label;
u32 quirks;
@@ -147,6 +163,17 @@ static int zynq_gpio_is_zynq(struct zynq_gpio *gpio)
}
/**
+ * gpio_data_ro_bug - test if HW bug exists or not
+ * @gpio: Pointer to driver data struct
+ *
+ * Return: 0 if bug doesnot exist, 1 if bug exists.
+ */
+static int gpio_data_ro_bug(struct zynq_gpio *gpio)
+{
+ return !!(gpio->p_data->quirks & GPIO_QUIRK_DATA_RO_BUG);
+}
+
+/**
* zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank
* for a given pin in the GPIO device
* @pin_num: gpio pin number within the device
@@ -154,6 +181,7 @@ static int zynq_gpio_is_zynq(struct zynq_gpio *gpio)
* pin
* @bank_pin_num: an output parameter used to return pin number within a bank
* for the given gpio pin
+ * @gpio: gpio device data structure
*
* Returns the bank number and pin offset within the bank.
*/
@@ -166,11 +194,11 @@ static inline void zynq_gpio_get_bank_pin(unsigned int pin_num,
for (bank = 0; bank < gpio->p_data->max_bank; bank++) {
if ((pin_num >= gpio->p_data->bank_min[bank]) &&
- (pin_num <= gpio->p_data->bank_max[bank])) {
- *bank_num = bank;
- *bank_pin_num = pin_num -
- gpio->p_data->bank_min[bank];
- return;
+ (pin_num <= gpio->p_data->bank_max[bank])) {
+ *bank_num = bank;
+ *bank_pin_num = pin_num -
+ gpio->p_data->bank_min[bank];
+ return;
}
}
@@ -197,9 +225,28 @@ static int zynq_gpio_get_value(struct gpio_chip *chip, unsigned int pin)
zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
- data = readl_relaxed(gpio->base_addr +
- ZYNQ_GPIO_DATA_RO_OFFSET(bank_num));
-
+ if (gpio_data_ro_bug(gpio)) {
+ if (zynq_gpio_is_zynq(gpio)) {
+ if (bank_num <= 1) {
+ data = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DATA_RO_OFFSET(bank_num));
+ } else {
+ data = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DATA_OFFSET(bank_num));
+ }
+ } else {
+ if (bank_num <= 2) {
+ data = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DATA_RO_OFFSET(bank_num));
+ } else {
+ data = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DATA_OFFSET(bank_num));
+ }
+ }
+ } else {
+ data = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DATA_RO_OFFSET(bank_num));
+ }
return (data >> bank_pin_num) & 1;
}
@@ -263,7 +310,7 @@ static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
* as inputs.
*/
if (zynq_gpio_is_zynq(gpio) && bank_num == 0 &&
- (bank_pin_num == 7 || bank_pin_num == 8))
+ (bank_pin_num == 7 || bank_pin_num == 8))
return -EINVAL;
/* clear the bit in direction mode reg to set the pin as input */
@@ -464,13 +511,14 @@ static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
writel_relaxed(int_any,
gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num));
- if (type & IRQ_TYPE_LEVEL_MASK) {
+ if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_chip_handler_name_locked(irq_data,
- &zynq_gpio_level_irqchip, handle_fasteoi_irq, NULL);
- } else {
+ &zynq_gpio_level_irqchip,
+ handle_fasteoi_irq, NULL);
+ else
irq_set_chip_handler_name_locked(irq_data,
- &zynq_gpio_edge_irqchip, handle_level_irq, NULL);
- }
+ &zynq_gpio_edge_irqchip,
+ handle_level_irq, NULL);
return 0;
}
@@ -530,7 +578,6 @@ static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio,
/**
* zynq_gpio_irqhandler - IRQ handler for the gpio banks of a gpio device
- * @irq: irq number of the gpio bank where interrupt has occurred
* @desc: irq descriptor instance of the 'irq'
*
* This function reads the Interrupt Status Register of each bank to get the
@@ -560,14 +607,73 @@ static void zynq_gpio_irqhandler(struct irq_desc *desc)
chained_irq_exit(irqchip, desc);
}
+static void zynq_gpio_save_context(struct zynq_gpio *gpio)
+{
+ unsigned int bank_num;
+
+ for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) {
+ gpio->context.datalsw[bank_num] =
+ readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num));
+ gpio->context.datamsw[bank_num] =
+ readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DATA_MSW_OFFSET(bank_num));
+ gpio->context.dirm[bank_num] = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DIRM_OFFSET(bank_num));
+ gpio->context.int_en[bank_num] = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_INTMASK_OFFSET(bank_num));
+ gpio->context.int_type[bank_num] =
+ readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_INTTYPE_OFFSET(bank_num));
+ gpio->context.int_polarity[bank_num] =
+ readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_INTPOL_OFFSET(bank_num));
+ gpio->context.int_any[bank_num] =
+ readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+ }
+}
+
+static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
+{
+ unsigned int bank_num;
+
+ for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) {
+ writel_relaxed(gpio->context.datalsw[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num));
+ writel_relaxed(gpio->context.datamsw[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_DATA_MSW_OFFSET(bank_num));
+ writel_relaxed(gpio->context.dirm[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_DIRM_OFFSET(bank_num));
+ writel_relaxed(gpio->context.int_en[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_INTEN_OFFSET(bank_num));
+ writel_relaxed(gpio->context.int_type[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_INTTYPE_OFFSET(bank_num));
+ writel_relaxed(gpio->context.int_polarity[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_INTPOL_OFFSET(bank_num));
+ writel_relaxed(gpio->context.int_any[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+ }
+}
+
static int __maybe_unused zynq_gpio_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
int irq = platform_get_irq(pdev, 0);
struct irq_data *data = irq_get_irq_data(irq);
+ struct zynq_gpio *gpio = platform_get_drvdata(pdev);
- if (!irqd_is_wakeup_set(data))
+ if (!irqd_is_wakeup_set(data)) {
+ zynq_gpio_save_context(gpio);
return pm_runtime_force_suspend(dev);
+ }
return 0;
}
@@ -577,9 +683,14 @@ static int __maybe_unused zynq_gpio_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
int irq = platform_get_irq(pdev, 0);
struct irq_data *data = irq_get_irq_data(irq);
+ struct zynq_gpio *gpio = platform_get_drvdata(pdev);
+ int ret;
- if (!irqd_is_wakeup_set(data))
- return pm_runtime_force_resume(dev);
+ if (!irqd_is_wakeup_set(data)) {
+ ret = pm_runtime_force_resume(dev);
+ zynq_gpio_restore_context(gpio);
+ return ret;
+ }
return 0;
}
@@ -602,7 +713,7 @@ static int __maybe_unused zynq_gpio_runtime_resume(struct device *dev)
return clk_prepare_enable(gpio->clk);
}
-static int zynq_gpio_request(struct gpio_chip *chip, unsigned offset)
+static int zynq_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
int ret;
@@ -615,7 +726,7 @@ static int zynq_gpio_request(struct gpio_chip *chip, unsigned offset)
return ret < 0 ? ret : 0;
}
-static void zynq_gpio_free(struct gpio_chip *chip, unsigned offset)
+static void zynq_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
pm_runtime_put(chip->parent);
}
@@ -623,11 +734,12 @@ static void zynq_gpio_free(struct gpio_chip *chip, unsigned offset)
static const struct dev_pm_ops zynq_gpio_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(zynq_gpio_suspend, zynq_gpio_resume)
SET_RUNTIME_PM_OPS(zynq_gpio_runtime_suspend,
- zynq_gpio_runtime_resume, NULL)
+ zynq_gpio_runtime_resume, NULL)
};
static const struct zynq_platform_data zynqmp_gpio_def = {
.label = "zynqmp_gpio",
+ .quirks = GPIO_QUIRK_DATA_RO_BUG,
.ngpio = ZYNQMP_GPIO_NR_GPIOS,
.max_bank = ZYNQMP_GPIO_MAX_BANK,
.bank_min[0] = ZYNQ_GPIO_BANK0_PIN_MIN(MP),
@@ -646,7 +758,7 @@ static const struct zynq_platform_data zynqmp_gpio_def = {
static const struct zynq_platform_data zynq_gpio_def = {
.label = "zynq_gpio",
- .quirks = ZYNQ_GPIO_QUIRK_IS_ZYNQ,
+ .quirks = ZYNQ_GPIO_QUIRK_IS_ZYNQ | GPIO_QUIRK_DATA_RO_BUG,
.ngpio = ZYNQ_GPIO_NR_GPIOS,
.max_bank = ZYNQ_GPIO_MAX_BANK,
.bank_min[0] = ZYNQ_GPIO_BANK0_PIN_MIN(),
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index c9b42dd12dfa..4d2113530735 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -61,7 +61,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
#ifdef CONFIG_PINCTRL
/**
* acpi_gpiochip_pin_to_gpio_offset() - translates ACPI GPIO to Linux GPIO
- * @chip: GPIO chip
+ * @gdev: GPIO device
* @pin: ACPI GPIO pin number from GpioIo/GpioInt resource
*
* Function takes ACPI GpioIo/GpioInt pin number as a parameter and
@@ -763,7 +763,7 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
* The function is idempotent, though each time it runs it will configure GPIO
* pin direction according to the flags in GpioInt resource.
*
- * Return: Linux IRQ number (>%0) on success, negative errno on failure.
+ * Return: Linux IRQ number (> %0) on success, negative errno on failure.
*/
int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 54ce8dc58ad0..bfcd20699ec8 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -78,8 +78,8 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index,
&gpiospec);
if (ret) {
- pr_debug("%s: can't parse '%s' property of node '%s[%d]'\n",
- __func__, propname, np->full_name, index);
+ pr_debug("%s: can't parse '%s' property of node '%pOF[%d]'\n",
+ __func__, propname, np, index);
return ERR_PTR(ret);
}
@@ -93,8 +93,8 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
if (IS_ERR(desc))
goto out;
- pr_debug("%s: parsed '%s' property of node '%s[%d]' - status (%d)\n",
- __func__, propname, np->full_name, index,
+ pr_debug("%s: parsed '%s' property of node '%pOF[%d]' - status (%d)\n",
+ __func__, propname, np, index,
PTR_ERR_OR_ZERO(desc));
out:
@@ -273,14 +273,13 @@ static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
}
/**
- * of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags
+ * of_gpio_simple_xlate - translate gpiospec to the GPIO number and flags
* @gc: pointer to the gpio_chip structure
- * @np: device node of the GPIO chip
- * @gpio_spec: gpio specifier as found in the device tree
+ * @gpiospec: GPIO specifier as found in the device tree
* @flags: a flags pointer to fill in
*
* This is simple translation function, suitable for the most 1:1 mapped
- * gpio chips. This function performs only one sanity check: whether gpio
+ * GPIO chips. This function performs only one sanity check: whether GPIO
* is less than ngpios (that is specified in the gpio_chip).
*/
int of_gpio_simple_xlate(struct gpio_chip *gc,
@@ -337,7 +336,7 @@ int of_mm_gpiochip_add_data(struct device_node *np,
int ret = -ENOMEM;
struct gpio_chip *gc = &mm_gc->gc;
- gc->label = kstrdup(np->full_name, GFP_KERNEL);
+ gc->label = kasprintf(GFP_KERNEL, "%pOF", np);
if (!gc->label)
goto err0;
@@ -362,8 +361,7 @@ err2:
err1:
kfree(gc->label);
err0:
- pr_err("%s: GPIO chip registration failed with status %d\n",
- np->full_name, ret);
+ pr_err("%pOF: GPIO chip registration failed with status %d\n", np, ret);
return ret;
}
EXPORT_SYMBOL(of_mm_gpiochip_add_data);
@@ -418,8 +416,8 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
group_names_propname,
index, &name);
if (strlen(name)) {
- pr_err("%s: Group name of numeric GPIO ranges must be the empty string.\n",
- np->full_name);
+ pr_err("%pOF: Group name of numeric GPIO ranges must be the empty string.\n",
+ np);
break;
}
}
@@ -434,14 +432,14 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
} else {
/* npins == 0: special range */
if (pinspec.args[1]) {
- pr_err("%s: Illegal gpio-range format.\n",
- np->full_name);
+ pr_err("%pOF: Illegal gpio-range format.\n",
+ np);
break;
}
if (!group_names) {
- pr_err("%s: GPIO group range requested but no %s property.\n",
- np->full_name, group_names_propname);
+ pr_err("%pOF: GPIO group range requested but no %s property.\n",
+ np, group_names_propname);
break;
}
@@ -452,8 +450,8 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
break;
if (!strlen(name)) {
- pr_err("%s: Group name of GPIO group range cannot be the empty string.\n",
- np->full_name);
+ pr_err("%pOF: Group name of GPIO group range cannot be the empty string.\n",
+ np);
break;
}
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 16fe9742597b..3f454eaf2101 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -2,6 +2,7 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/sysfs.h>
+#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
@@ -432,6 +433,11 @@ static struct attribute *gpiochip_attrs[] = {
};
ATTRIBUTE_GROUPS(gpiochip);
+static struct gpio_desc *gpio_to_valid_desc(int gpio)
+{
+ return gpio_is_valid(gpio) ? gpio_to_desc(gpio) : NULL;
+}
+
/*
* /sys/class/gpio/export ... write-only
* integer N ... number of GPIO to export (full access)
@@ -450,7 +456,7 @@ static ssize_t export_store(struct class *class,
if (status < 0)
goto done;
- desc = gpio_to_desc(gpio);
+ desc = gpio_to_valid_desc(gpio);
/* reject invalid GPIOs */
if (!desc) {
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
@@ -493,7 +499,7 @@ static ssize_t unexport_store(struct class *class,
if (status < 0)
goto done;
- desc = gpio_to_desc(gpio);
+ desc = gpio_to_valid_desc(gpio);
/* reject bogus commands (gpio_unexport ignores them) */
if (!desc) {
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
@@ -534,8 +540,8 @@ static struct class gpio_class = {
/**
* gpiod_export - export a GPIO through sysfs
- * @gpio: gpio to make available, already requested
- * @direction_may_change: true if userspace may change gpio direction
+ * @desc: GPIO to make available, already requested
+ * @direction_may_change: true if userspace may change GPIO direction
* Context: arch_initcall or later
*
* When drivers want to make a GPIO accessible to userspace after they
@@ -643,7 +649,7 @@ static int match_export(struct device *dev, const void *desc)
* gpiod_export_link - create a sysfs link to an exported GPIO node
* @dev: device under which to create symlink
* @name: name of the symlink
- * @gpio: gpio to create symlink to, already exported
+ * @desc: GPIO to create symlink to, already exported
*
* Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN
* node. Caller is responsible for unlinking.
@@ -674,7 +680,7 @@ EXPORT_SYMBOL_GPL(gpiod_export_link);
/**
* gpiod_unexport - reverse effect of gpiod_export()
- * @gpio: gpio to make unavailable
+ * @desc: GPIO to make unavailable
*
* This is implicit on gpiod_free().
*/
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 9568708a550b..eb80dac4e26a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -84,7 +84,12 @@ static inline void desc_set_label(struct gpio_desc *d, const char *label)
}
/**
- * Convert a GPIO number to its descriptor
+ * gpio_to_desc - Convert a GPIO number to its descriptor
+ * @gpio: global GPIO number
+ *
+ * Returns:
+ * The GPIO descriptor associated with the given GPIO, or %NULL if no GPIO
+ * with the given number exists in the system.
*/
struct gpio_desc *gpio_to_desc(unsigned gpio)
{
@@ -111,7 +116,14 @@ struct gpio_desc *gpio_to_desc(unsigned gpio)
EXPORT_SYMBOL_GPL(gpio_to_desc);
/**
- * Get the GPIO descriptor corresponding to the given hw number for this chip.
+ * gpiochip_get_desc - get the GPIO descriptor corresponding to the given
+ * hardware number for this chip
+ * @chip: GPIO chip
+ * @hwnum: hardware number of the GPIO for this chip
+ *
+ * Returns:
+ * A pointer to the GPIO descriptor or %ERR_PTR(-EINVAL) if no GPIO exists
+ * in the given chip for the specified hardware number.
*/
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
u16 hwnum)
@@ -125,9 +137,14 @@ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
}
/**
- * Convert a GPIO descriptor to the integer namespace.
+ * desc_to_gpio - convert a GPIO descriptor to the integer namespace
+ * @desc: GPIO descriptor
+ *
* This should disappear in the future but is needed since we still
- * use GPIO numbers for error messages and sysfs nodes
+ * use GPIO numbers for error messages and sysfs nodes.
+ *
+ * Returns:
+ * The global GPIO number for the GPIO specified by its descriptor.
*/
int desc_to_gpio(const struct gpio_desc *desc)
{
@@ -254,7 +271,7 @@ static int gpiodev_add_to_list(struct gpio_device *gdev)
return -EBUSY;
}
-/**
+/*
* Convert a GPIO name to its descriptor
*/
static struct gpio_desc *gpio_name_to_desc(const char * const name)
@@ -704,24 +721,23 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
{
struct lineevent_state *le = p;
struct gpioevent_data ge;
- int ret;
+ int ret, level;
ge.timestamp = ktime_get_real_ns();
+ level = gpiod_get_value_cansleep(le->desc);
if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
&& le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
- int level = gpiod_get_value_cansleep(le->desc);
-
if (level)
/* Emit low-to-high event */
ge.id = GPIOEVENT_EVENT_RISING_EDGE;
else
/* Emit high-to-low event */
ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
- } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
+ } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) {
/* Emit low-to-high event */
ge.id = GPIOEVENT_EVENT_RISING_EDGE;
- } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
+ } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) {
/* Emit high-to-low event */
ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
} else {
@@ -879,7 +895,7 @@ out_free_le:
return ret;
}
-/**
+/*
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
@@ -1078,11 +1094,9 @@ static void gpiochip_setup_devs(void)
/**
* gpiochip_add_data() - register a gpio_chip
* @chip: the chip to register, with chip->base initialized
- * Context: potentially before irqs will work
+ * @data: driver-private data associated with this chip
*
- * Returns a negative errno if the chip can't be registered, such as
- * because the chip->base is invalid or already associated with a
- * different chip. Otherwise it returns zero as a success code.
+ * Context: potentially before irqs will work
*
* When gpiochip_add_data() is called very early during boot, so that GPIOs
* can be freely used, the chip->parent device must be registered before
@@ -1094,6 +1108,11 @@ static void gpiochip_setup_devs(void)
*
* If chip->base is negative, this requests dynamic assignment of
* a range of valid GPIOs.
+ *
+ * Returns:
+ * A negative errno if the chip can't be registered, such as because the
+ * chip->base is invalid or already associated with a different chip.
+ * Otherwise it returns zero as a success code.
*/
int gpiochip_add_data(struct gpio_chip *chip, void *data)
{
@@ -1288,6 +1307,10 @@ EXPORT_SYMBOL_GPL(gpiochip_add_data);
/**
* gpiochip_get_data() - get per-subdriver data for the chip
+ * @chip: GPIO chip
+ *
+ * Returns:
+ * The per-subdriver data for the chip.
*/
void *gpiochip_get_data(struct gpio_chip *chip)
{
@@ -1371,13 +1394,16 @@ static int devm_gpio_chip_match(struct device *dev, void *res, void *data)
* devm_gpiochip_add_data() - Resource manager piochip_add_data()
* @dev: the device pointer on which irq_chip belongs to.
* @chip: the chip to register, with chip->base initialized
- * Context: potentially before irqs will work
+ * @data: driver-private data associated with this chip
*
- * Returns a negative errno if the chip can't be registered, such as
- * because the chip->base is invalid or already associated with a
- * different chip. Otherwise it returns zero as a success code.
+ * Context: potentially before irqs will work
*
* The gpio chip automatically be released when the device is unbound.
+ *
+ * Returns:
+ * A negative errno if the chip can't be registered, such as because the
+ * chip->base is invalid or already associated with a different chip.
+ * Otherwise it returns zero as a success code.
*/
int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip,
void *data)
@@ -1423,7 +1449,7 @@ EXPORT_SYMBOL_GPL(devm_gpiochip_remove);
/**
* gpiochip_find() - iterator for locating a specific gpio_chip
* @data: data to pass to match function
- * @callback: Callback function to check gpio_chip
+ * @match: Callback function to check gpio_chip
*
* Similar to bus_find_device. It returns a reference to a gpio_chip as
* determined by a user supplied @match callback. The callback should return
@@ -1605,6 +1631,9 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
{
struct gpio_chip *chip = d->host_data;
+ if (!gpiochip_irqchip_irq_valid(chip, hwirq))
+ return -ENXIO;
+
irq_set_chip_data(irq, chip);
/*
* This lock class tells lockdep that GPIO irqs are in a different
@@ -1671,7 +1700,9 @@ static void gpiochip_irq_relres(struct irq_data *d)
static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
{
- return irq_find_mapping(chip->irqdomain, offset);
+ if (!gpiochip_irqchip_irq_valid(chip, offset))
+ return -ENXIO;
+ return irq_create_mapping(chip->irqdomain, offset);
}
/**
@@ -1747,9 +1778,6 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
struct lock_class_key *lock_key)
{
struct device_node *of_node;
- bool irq_base_set = false;
- unsigned int offset;
- unsigned irq_base = 0;
if (!gpiochip || !irqchip)
return -EINVAL;
@@ -1775,7 +1803,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
* conflicting triggers. Tell the user, and reset to NONE.
*/
if (WARN(of_node && type != IRQ_TYPE_NONE,
- "%s: Ignoring %d default trigger\n", of_node->full_name, type))
+ "%pOF: Ignoring %d default trigger\n", of_node, type))
type = IRQ_TYPE_NONE;
if (has_acpi_companion(gpiochip->parent) && type != IRQ_TYPE_NONE) {
acpi_handle_warn(ACPI_HANDLE(gpiochip->parent),
@@ -1806,25 +1834,6 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
irqchip->irq_release_resources = gpiochip_irq_relres;
}
- /*
- * Prepare the mapping since the irqchip shall be orthogonal to
- * any gpiochip calls. If the first_irq was zero, this is
- * necessary to allocate descriptors for all IRQs.
- */
- for (offset = 0; offset < gpiochip->ngpio; offset++) {
- if (!gpiochip_irqchip_irq_valid(gpiochip, offset))
- continue;
- irq_base = irq_create_mapping(gpiochip->irqdomain, offset);
- if (!irq_base_set) {
- /*
- * Store the base into the gpiochip to be used when
- * unmapping the irqs.
- */
- gpiochip->irq_base = irq_base;
- irq_base_set = true;
- }
- }
-
acpi_gpiochip_request_interrupts(gpiochip);
return 0;
@@ -1931,11 +1940,14 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
/**
* gpiochip_add_pin_range() - add a range for GPIO <-> pin mapping
* @chip: the gpiochip to add the range for
- * @pinctrl_name: the dev_name() of the pin controller to map to
+ * @pinctl_name: the dev_name() of the pin controller to map to
* @gpio_offset: the start offset in the current gpio_chip number space
* @pin_offset: the start offset in the pin controller number space
* @npins: the number of pins from the offset of each pin space (GPIO and
* pin controller) to accumulate in this range
+ *
+ * Returns:
+ * 0 on success, or a negative error-code on failure.
*/
int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
@@ -2180,7 +2192,8 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
/**
* gpiochip_request_own_desc - Allow GPIO chip to request its own descriptor
- * @desc: GPIO descriptor to request
+ * @chip: GPIO chip
+ * @hwnum: hardware number of the GPIO for which to request the descriptor
* @label: label for the GPIO
*
* Function allows GPIO chip drivers to request and use their own GPIO
@@ -2188,6 +2201,10 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested);
* function will not increase reference count of the GPIO chip module. This
* allows the GPIO chip module to be unloaded as needed (we assume that the
* GPIO chip driver handles freeing the GPIOs it has requested).
+ *
+ * Returns:
+ * A pointer to the GPIO descriptor, or an ERR_PTR()-encoded negative error
+ * code on failure.
*/
struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
const char *label)
@@ -2369,12 +2386,13 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
EXPORT_SYMBOL_GPL(gpiod_direction_output);
/**
- * gpiod_set_debounce - sets @debounce time for a @gpio
- * @gpio: the gpio to set debounce time
- * @debounce: debounce time is microseconds
+ * gpiod_set_debounce - sets @debounce time for a GPIO
+ * @desc: descriptor of the GPIO for which to set debounce time
+ * @debounce: debounce time in microseconds
*
- * returns -ENOTSUPP if the controller does not support setting
- * debounce.
+ * Returns:
+ * 0 on success, %-ENOTSUPP if the controller doesn't support setting the
+ * debounce time.
*/
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
@@ -2982,6 +3000,23 @@ void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value_cansleep);
/**
+ * gpiod_add_lookup_tables() - register GPIO device consumers
+ * @tables: list of tables of consumers to register
+ * @n: number of tables in the list
+ */
+void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n)
+{
+ unsigned int i;
+
+ mutex_lock(&gpio_lookup_lock);
+
+ for (i = 0; i < n; i++)
+ list_add_tail(&tables[i]->list, &gpio_lookup_list);
+
+ mutex_unlock(&gpio_lookup_lock);
+}
+
+/**
* gpiod_set_array_value_cansleep() - assign values to an array of GPIOs
* @array_size: number of elements in the descriptor / value arrays
* @desc_array: array of GPIO descriptors whose values will be assigned
@@ -3323,6 +3358,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_index);
* @propname: name of the firmware property representing the GPIO
* @index: index of the GPIO to obtain in the consumer
* @dflags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
*
* This function can be used for drivers that get their configuration
* from firmware.
@@ -3331,6 +3367,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_index);
* underlying firmware interface and then makes sure that the GPIO
* descriptor is requested before it is returned to the caller.
*
+ * Returns:
* On successful request the GPIO pin is configured in accordance with
* provided @dflags.
*
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index a8be286eff86..d003ccb12781 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -219,7 +219,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
/*
* Return the GPIO number of the passed descriptor relative to its chip
*/
-static int __maybe_unused gpio_chip_hwgpio(const struct gpio_desc *desc)
+static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
{
return desc - &desc->gdev->descs[0];
}
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 24a066e1841c..a8acc197dec3 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -33,7 +33,7 @@ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
- drm_scdc_helper.o
+ drm_scdc_helper.o drm_gem_framebuffer_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index faea6349228f..658bac0cdc5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
- amdgpu_queue_mgr.o
+ amdgpu_queue_mgr.o amdgpu_vf_error.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ff7bf1a9f967..12e71bbfd222 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -68,13 +68,16 @@
#include "gpu_scheduler.h"
#include "amdgpu_virt.h"
+#include "amdgpu_gart.h"
/*
* Modules parameters.
*/
extern int amdgpu_modeset;
extern int amdgpu_vram_limit;
-extern int amdgpu_gart_size;
+extern int amdgpu_vis_vram_limit;
+extern unsigned amdgpu_gart_size;
+extern int amdgpu_gtt_size;
extern int amdgpu_moverate;
extern int amdgpu_benchmarking;
extern int amdgpu_testing;
@@ -93,6 +96,7 @@ extern int amdgpu_bapm;
extern int amdgpu_deep_color;
extern int amdgpu_vm_size;
extern int amdgpu_vm_block_size;
+extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
@@ -104,6 +108,7 @@ extern unsigned amdgpu_pcie_gen_cap;
extern unsigned amdgpu_pcie_lane_cap;
extern unsigned amdgpu_cg_mask;
extern unsigned amdgpu_pg_mask;
+extern unsigned amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display;
extern unsigned amdgpu_pp_feature_mask;
@@ -369,78 +374,10 @@ struct amdgpu_clock {
};
/*
- * BO.
+ * GEM.
*/
-struct amdgpu_bo_list_entry {
- struct amdgpu_bo *robj;
- struct ttm_validate_buffer tv;
- struct amdgpu_bo_va *bo_va;
- uint32_t priority;
- struct page **user_pages;
- int user_invalidated;
-};
-
-struct amdgpu_bo_va_mapping {
- struct list_head list;
- struct rb_node rb;
- uint64_t start;
- uint64_t last;
- uint64_t __subtree_last;
- uint64_t offset;
- uint64_t flags;
-};
-
-/* bo virtual addresses in a specific vm */
-struct amdgpu_bo_va {
- /* protected by bo being reserved */
- struct list_head bo_list;
- struct dma_fence *last_pt_update;
- unsigned ref_count;
-
- /* protected by vm mutex and spinlock */
- struct list_head vm_status;
-
- /* mappings for this bo_va */
- struct list_head invalids;
- struct list_head valids;
-
- /* constant after initialization */
- struct amdgpu_vm *vm;
- struct amdgpu_bo *bo;
-};
#define AMDGPU_GEM_DOMAIN_MAX 0x3
-
-struct amdgpu_bo {
- /* Protected by tbo.reserved */
- u32 prefered_domains;
- u32 allowed_domains;
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
- struct ttm_placement placement;
- struct ttm_buffer_object tbo;
- struct ttm_bo_kmap_obj kmap;
- u64 flags;
- unsigned pin_count;
- void *kptr;
- u64 tiling_flags;
- u64 metadata_flags;
- void *metadata;
- u32 metadata_size;
- unsigned prime_shared_count;
- /* list of all virtual address to which this bo
- * is associated to
- */
- struct list_head va;
- /* Constant after initialization */
- struct drm_gem_object gem_base;
- struct amdgpu_bo *parent;
- struct amdgpu_bo *shadow;
-
- struct ttm_bo_kmap_obj dma_buf_vmap;
- struct amdgpu_mn *mn;
- struct list_head mn_list;
- struct list_head shadow_list;
-};
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
void amdgpu_gem_object_free(struct drm_gem_object *obj);
@@ -532,49 +469,6 @@ int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void);
/*
- * GART structures, functions & helpers
- */
-struct amdgpu_mc;
-
-#define AMDGPU_GPU_PAGE_SIZE 4096
-#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
-#define AMDGPU_GPU_PAGE_SHIFT 12
-#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
-
-struct amdgpu_gart {
- dma_addr_t table_addr;
- struct amdgpu_bo *robj;
- void *ptr;
- unsigned num_gpu_pages;
- unsigned num_cpu_pages;
- unsigned table_size;
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- struct page **pages;
-#endif
- bool ready;
-
- /* Asic default pte flags */
- uint64_t gart_pte_flags;
-
- const struct amdgpu_gart_funcs *gart_funcs;
-};
-
-int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
-int amdgpu_gart_init(struct amdgpu_device *adev);
-void amdgpu_gart_fini(struct amdgpu_device *adev);
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
- int pages);
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
- int pages, struct page **pagelist,
- dma_addr_t *dma_addr, uint64_t flags);
-int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
-
-/*
* VMHUB structures, functions & helpers
*/
struct amdgpu_vmhub {
@@ -598,22 +492,20 @@ struct amdgpu_mc {
* about vram size near mc fb location */
u64 mc_vram_size;
u64 visible_vram_size;
- u64 gtt_size;
- u64 gtt_start;
- u64 gtt_end;
+ u64 gart_size;
+ u64 gart_start;
+ u64 gart_end;
u64 vram_start;
u64 vram_end;
unsigned vram_width;
u64 real_vram_size;
int vram_mtrr;
- u64 gtt_base_align;
u64 mc_mask;
const struct firmware *fw; /* MC firmware */
uint32_t fw_version;
struct amdgpu_irq_src vm_fault;
uint32_t vram_type;
uint32_t srbm_soft_reset;
- struct amdgpu_mode_mc_save save;
bool prt_warning;
uint64_t stolen_size;
/* apertures */
@@ -719,15 +611,15 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
/* overlap the doorbell assignment with VCN as they are mutually exclusive
* VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
*/
- AMDGPU_DOORBELL64_RING0_1 = 0xF8,
- AMDGPU_DOORBELL64_RING2_3 = 0xF9,
- AMDGPU_DOORBELL64_RING4_5 = 0xFA,
- AMDGPU_DOORBELL64_RING6_7 = 0xFB,
+ AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
+ AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
+ AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
+ AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
- AMDGPU_DOORBELL64_UVD_RING0_1 = 0xFC,
- AMDGPU_DOORBELL64_UVD_RING2_3 = 0xFD,
- AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFE,
- AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFF,
+ AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
+ AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
+ AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
+ AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
AMDGPU_DOORBELL64_INVALID = 0xFFFF
@@ -857,6 +749,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
struct amdgpu_fpriv {
struct amdgpu_vm vm;
struct amdgpu_bo_va *prt_va;
+ struct amdgpu_bo_va *csa_va;
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
@@ -866,6 +759,14 @@ struct amdgpu_fpriv {
/*
* residency list
*/
+struct amdgpu_bo_list_entry {
+ struct amdgpu_bo *robj;
+ struct ttm_validate_buffer tv;
+ struct amdgpu_bo_va *bo_va;
+ uint32_t priority;
+ struct page **user_pages;
+ int user_invalidated;
+};
struct amdgpu_bo_list {
struct mutex lock;
@@ -1159,7 +1060,9 @@ struct amdgpu_cs_parser {
struct list_head validated;
struct dma_fence *fence;
uint64_t bytes_moved_threshold;
+ uint64_t bytes_moved_vis_threshold;
uint64_t bytes_moved;
+ uint64_t bytes_moved_vis;
struct amdgpu_bo_list_entry *evictable;
/* user fence */
@@ -1230,8 +1133,6 @@ struct amdgpu_wb {
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
-int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb);
-void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb);
void amdgpu_get_pcie_info(struct amdgpu_device *adev);
@@ -1525,7 +1426,7 @@ struct amdgpu_device {
bool is_atom_fw;
uint8_t *bios;
uint32_t bios_size;
- struct amdgpu_bo *stollen_vga_memory;
+ struct amdgpu_bo *stolen_vga_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -1557,6 +1458,10 @@ struct amdgpu_device {
spinlock_t gc_cac_idx_lock;
amdgpu_rreg_t gc_cac_rreg;
amdgpu_wreg_t gc_cac_wreg;
+ /* protects concurrent se_cac register access */
+ spinlock_t se_cac_idx_lock;
+ amdgpu_rreg_t se_cac_rreg;
+ amdgpu_wreg_t se_cac_wreg;
/* protects concurrent ENDPOINT (audio) register access */
spinlock_t audio_endpt_idx_lock;
amdgpu_block_rreg_t audio_endpt_rreg;
@@ -1579,9 +1484,6 @@ struct amdgpu_device {
struct amdgpu_mman mman;
struct amdgpu_vram_scratch vram_scratch;
struct amdgpu_wb wb;
- atomic64_t vram_usage;
- atomic64_t vram_vis_usage;
- atomic64_t gtt_usage;
atomic64_t num_bytes_moved;
atomic64_t num_evictions;
atomic64_t num_vram_cpu_page_faults;
@@ -1593,6 +1495,7 @@ struct amdgpu_device {
spinlock_t lock;
s64 last_update_us;
s64 accum_us; /* accumulated microseconds */
+ s64 accum_us_vis; /* for visible VRAM */
u32 log2_max_MBps;
} mm_stats;
@@ -1687,6 +1590,8 @@ struct amdgpu_device {
bool has_hw_reset;
u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
+ /* record last mm index being written through WREG32*/
+ unsigned long last_mm_index;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1742,6 +1647,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
+#define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
+#define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v))
#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
#define WREG32_P(reg, val, mask) \
@@ -1792,50 +1699,6 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
-/*
- * RING helpers.
- */
-static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
-{
- if (ring->count_dw <= 0)
- DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
- ring->ring[ring->wptr++ & ring->buf_mask] = v;
- ring->wptr &= ring->ptr_mask;
- ring->count_dw--;
-}
-
-static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *src, int count_dw)
-{
- unsigned occupied, chunk1, chunk2;
- void *dst;
-
- if (unlikely(ring->count_dw < count_dw)) {
- DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
- return;
- }
-
- occupied = ring->wptr & ring->buf_mask;
- dst = (void *)&ring->ring[occupied];
- chunk1 = ring->buf_mask + 1 - occupied;
- chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
- chunk2 = count_dw - chunk1;
- chunk1 <<= 2;
- chunk2 <<= 2;
-
- if (chunk1)
- memcpy(dst, src, chunk1);
-
- if (chunk2) {
- src += chunk1;
- dst = (void *)ring->ring;
- memcpy(dst, src, chunk2);
- }
-
- ring->wptr += count_dw;
- ring->wptr &= ring->ptr_mask;
- ring->count_dw -= count_dw;
-}
-
static inline struct amdgpu_sdma_instance *
amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
{
@@ -1898,7 +1761,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
-#define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
#define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
@@ -1911,8 +1773,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
-#define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
-#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
@@ -1927,7 +1787,8 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
bool amdgpu_need_post(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
-void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
+ u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
@@ -1943,7 +1804,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
-void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
+void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 06879d1dcabd..a52795d9b458 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -285,19 +285,20 @@ static int acp_hw_init(void *handle)
return 0;
else if (r)
return r;
+ if (adev->asic_type != CHIP_STONEY) {
+ adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
+ if (adev->acp.acp_genpd == NULL)
+ return -ENOMEM;
- adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
- if (adev->acp.acp_genpd == NULL)
- return -ENOMEM;
-
- adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
- adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
- adev->acp.acp_genpd->gpd.power_on = acp_poweron;
+ adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
+ adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
+ adev->acp.acp_genpd->gpd.power_on = acp_poweron;
- adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
+ adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
- pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
+ pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
+ }
adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
GFP_KERNEL);
@@ -319,14 +320,29 @@ static int acp_hw_init(void *handle)
return -ENOMEM;
}
- i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
+ DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+ break;
+ default:
+ i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+ }
i2s_pdata[0].cap = DWC_I2S_PLAY;
i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
+ DW_I2S_QUIRK_COMP_PARAM1 |
+ DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+ break;
+ default:
+ i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
+ DW_I2S_QUIRK_COMP_PARAM1;
+ }
- i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
- DW_I2S_QUIRK_COMP_PARAM1;
i2s_pdata[1].cap = DWC_I2S_RECORD;
i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
@@ -373,12 +389,14 @@ static int acp_hw_init(void *handle)
if (r)
return r;
- for (i = 0; i < ACP_DEVS ; i++) {
- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
- r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
- if (r) {
- dev_err(dev, "Failed to add dev to genpd\n");
- return r;
+ if (adev->asic_type != CHIP_STONEY) {
+ for (i = 0; i < ACP_DEVS ; i++) {
+ dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+ r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
+ if (r) {
+ dev_err(dev, "Failed to add dev to genpd\n");
+ return r;
+ }
}
}
@@ -398,20 +416,22 @@ static int acp_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* return early if no ACP */
- if (!adev->acp.acp_genpd)
+ if (!adev->acp.acp_cell)
return 0;
- for (i = 0; i < ACP_DEVS ; i++) {
- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
- ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
- /* If removal fails, dont giveup and try rest */
- if (ret)
- dev_err(dev, "remove dev from genpd failed\n");
+ if (adev->acp.acp_genpd) {
+ for (i = 0; i < ACP_DEVS ; i++) {
+ dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
+ ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
+ /* If removal fails, dont giveup and try rest */
+ if (ret)
+ dev_err(dev, "remove dev from genpd failed\n");
+ }
+ kfree(adev->acp.acp_genpd);
}
mfd_remove_devices(adev->acp.parent);
kfree(adev->acp.acp_res);
- kfree(adev->acp.acp_genpd);
kfree(adev->acp.acp_cell);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index ef79551b4cb7..57afad79f55d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -30,10 +30,10 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "amdgpu.h"
+#include "amdgpu_pm.h"
#include "amd_acpi.h"
#include "atom.h"
-extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
/* Call the ATIF method
*/
/**
@@ -289,7 +289,7 @@ out:
* handles it.
* Returns NOTIFY code
*/
-int amdgpu_atif_handler(struct amdgpu_device *adev,
+static int amdgpu_atif_handler(struct amdgpu_device *adev,
struct acpi_bus_event *event)
{
struct amdgpu_atif *atif = &adev->atif;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 5f8ada1d872b..5432af39a674 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -27,16 +27,15 @@
#include "amdgpu_gfx.h"
#include <linux/module.h>
-const struct kfd2kgd_calls *kfd2kgd;
const struct kgd2kfd_calls *kgd2kfd;
-bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+bool (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
int amdgpu_amdkfd_init(void)
{
int ret;
#if defined(CONFIG_HSA_AMD_MODULE)
- int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+ int (*kgd2kfd_init_p)(unsigned int, const struct kgd2kfd_calls**);
kgd2kfd_init_p = symbol_request(kgd2kfd_init);
@@ -61,8 +60,21 @@ int amdgpu_amdkfd_init(void)
return ret;
}
-bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev)
+void amdgpu_amdkfd_fini(void)
+{
+ if (kgd2kfd) {
+ kgd2kfd->exit();
+ symbol_put(kgd2kfd_init);
+ }
+}
+
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
{
+ const struct kfd2kgd_calls *kfd2kgd;
+
+ if (!kgd2kfd)
+ return;
+
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI:
@@ -73,25 +85,12 @@ bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev)
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
break;
default:
- return false;
+ dev_info(adev->dev, "kfd not supported on this ASIC\n");
+ return;
}
- return true;
-}
-
-void amdgpu_amdkfd_fini(void)
-{
- if (kgd2kfd) {
- kgd2kfd->exit();
- symbol_put(kgd2kfd_init);
- }
-}
-
-void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
-{
- if (kgd2kfd)
- adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
- adev->pdev, kfd2kgd);
+ adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
+ adev->pdev, kfd2kgd);
}
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
@@ -101,7 +100,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
if (adev->kfd) {
struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap = 0xFF00,
- .num_mec = adev->gfx.mec.num_mec,
.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe
};
@@ -122,7 +120,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
/* According to linux/bitmap.h we shouldn't use bitmap_clear if
* nbits is not compile time constant */
- last_valid_bit = adev->gfx.mec.num_mec
+ last_valid_bit = 1 /* only first MEC can have compute queues */
* adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe;
for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
@@ -185,7 +183,8 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
return -ENOMEM;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, 0,
+ &(*mem)->bo);
if (r) {
dev_err(adev->dev,
"failed to allocate BO for amdkfd (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 73f83a10ae14..8d689ab7e429 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -26,6 +26,7 @@
#define AMDGPU_AMDKFD_H_INCLUDED
#include <linux/types.h>
+#include <linux/mmu_context.h>
#include <kgd_kfd_interface.h>
struct amdgpu_device;
@@ -39,8 +40,6 @@ struct kgd_mem {
int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
-bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev);
-
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev);
int amdgpu_amdkfd_resume(struct amdgpu_device *adev);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
@@ -62,4 +61,19 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+#define read_user_wptr(mmptr, wptr, dst) \
+ ({ \
+ bool valid = false; \
+ if ((mmptr) && (wptr)) { \
+ if ((mmptr) == current->mm) { \
+ valid = !get_user((dst), (wptr)); \
+ } else if (current->mm == NULL) { \
+ use_mm(mmptr); \
+ valid = !get_user((dst), (wptr)); \
+ unuse_mm(mmptr); \
+ } \
+ } \
+ valid; \
+ })
+
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 5254562fd0f9..b9dbbf9cb8b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -39,6 +39,12 @@
#include "gmc/gmc_7_1_sh_mask.h"
#include "cik_structs.h"
+enum hqd_dequeue_request_type {
+ NO_ACTION = 0,
+ DRAIN_PIPE,
+ RESET_WAVES
+};
+
enum {
MAX_TRAPID = 8, /* 3 bits in the bitfield. */
MAX_WATCH_ADDRESSES = 4
@@ -96,12 +102,15 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr);
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr);
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm);
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
@@ -126,6 +135,33 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+static void set_scratch_backing_va(struct kgd_dev *kgd,
+ uint64_t va, uint32_t vmid);
+
+/* Because of REG_GET_FIELD() being used, we put this function in the
+ * asic specific file.
+ */
+static int get_tile_config(struct kgd_dev *kgd,
+ struct tile_config *config)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ config->gb_addr_config = adev->gfx.config.gb_addr_config;
+ config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFBANK);
+ config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFRANKS);
+
+ config->tile_config_ptr = adev->gfx.config.tile_mode_array;
+ config->num_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.tile_mode_array);
+ config->macro_tile_config_ptr =
+ adev->gfx.config.macrotile_mode_array;
+ config->num_macro_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
+
+ return 0;
+}
static const struct kfd2kgd_calls kfd2kgd = {
.init_gtt_mem_allocation = alloc_gtt_mem,
@@ -150,7 +186,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
.get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
.write_vmid_invalidate_request = write_vmid_invalidate_request,
- .get_fw_version = get_fw_version
+ .get_fw_version = get_fw_version,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
@@ -186,7 +224,7 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(kgd, mec, pipe, queue_id, 0);
@@ -290,20 +328,38 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
}
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr)
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t wptr_shadow, is_wptr_shadow_valid;
struct cik_mqd *m;
+ uint32_t *mqd_hqd;
+ uint32_t reg, wptr_val, data;
m = get_mqd(mqd);
- is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
- if (is_wptr_shadow_valid)
- m->cp_hqd_pq_wptr = wptr_shadow;
-
acquire_queue(kgd, pipe_id, queue_id);
- gfx_v7_0_mqd_commit(adev, m);
+
+ /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
+ mqd_hqd = &m->cp_mqd_base_addr_lo;
+
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
+ WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
+
+ /* Copy userspace write pointer value to register.
+ * Activate doorbell logic to monitor subsequent changes.
+ */
+ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
+ CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
+
+ if (read_user_wptr(mm, wptr, wptr_val))
+ WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
+
+ data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+ WREG32(mmCP_HQD_ACTIVE, data);
+
release_queue(kgd);
return 0;
@@ -382,30 +438,99 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
- int timeout = utimeout;
+ enum hqd_dequeue_request_type type;
+ unsigned long flags, end_jiffies;
+ int retry;
acquire_queue(kgd, pipe_id, queue_id);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
- WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+ switch (reset_type) {
+ case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
+ type = DRAIN_PIPE;
+ break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
+ type = RESET_WAVES;
+ break;
+ default:
+ type = DRAIN_PIPE;
+ break;
+ }
+
+ /* Workaround: If IQ timer is active and the wait time is close to or
+ * equal to 0, dequeueing is not safe. Wait until either the wait time
+ * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
+ * cleared before continuing. Also, ensure wait times are set to at
+ * least 0x3.
+ */
+ local_irq_save(flags);
+ preempt_disable();
+ retry = 5000; /* wait for 500 usecs at maximum */
+ while (true) {
+ temp = RREG32(mmCP_HQD_IQ_TIMER);
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
+ pr_debug("HW is processing IQ\n");
+ goto loop;
+ }
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
+ == 3) /* SEM-rearm is safe */
+ break;
+ /* Wait time 3 is safe for CP, but our MMIO read/write
+ * time is close to 1 microsecond, so check for 10 to
+ * leave more buffer room
+ */
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
+ >= 10)
+ break;
+ pr_debug("IQ timer is active\n");
+ } else
+ break;
+loop:
+ if (!retry) {
+ pr_err("CP HQD IQ timer status time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ retry = 1000;
+ while (true) {
+ temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+ if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
+ break;
+ pr_debug("Dequeue request is pending\n");
+ if (!retry) {
+ pr_err("CP HQD dequeue request time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ local_irq_restore(flags);
+ preempt_enable();
+
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
+
+ end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
temp = RREG32(mmCP_HQD_ACTIVE);
- if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
- if (timeout <= 0) {
- pr_err("kfd: cp queue preemption time out.\n");
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("cp queue preemption time out\n");
release_queue(kgd);
return -ETIME;
}
- msleep(20);
- timeout -= 20;
+ usleep_range(500, 1000);
}
release_queue(kgd);
@@ -556,6 +681,16 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
}
+static void set_scratch_backing_va(struct kgd_dev *kgd,
+ uint64_t va, uint32_t vmid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ lock_srbm(kgd, 0, 0, 0, vmid);
+ WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
+ unlock_srbm(kgd);
+}
+
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
@@ -566,42 +701,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
switch (type) {
case KGD_ENGINE_PFP:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw->data;
break;
case KGD_ENGINE_ME:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.me_fw->data;
+ adev->gfx.me_fw->data;
break;
case KGD_ENGINE_CE:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw->data;
break;
case KGD_ENGINE_MEC1:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw->data;
break;
case KGD_ENGINE_MEC2:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw->data;
break;
case KGD_ENGINE_RLC:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_fw->data;
break;
case KGD_ENGINE_SDMA1:
hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[0].fw->data;
+ adev->sdma.instance[0].fw->data;
break;
case KGD_ENGINE_SDMA2:
hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[1].fw->data;
+ adev->sdma.instance[1].fw->data;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 133d06671e46..fb6e5dbd5a03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -39,6 +39,12 @@
#include "vi_structs.h"
#include "vid.h"
+enum hqd_dequeue_request_type {
+ NO_ACTION = 0,
+ DRAIN_PIPE,
+ RESET_WAVES
+};
+
struct cik_sdma_rlc_registers;
/*
@@ -55,12 +61,15 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr);
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr);
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm);
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id);
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
@@ -85,6 +94,33 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
+static void set_scratch_backing_va(struct kgd_dev *kgd,
+ uint64_t va, uint32_t vmid);
+
+/* Because of REG_GET_FIELD() being used, we put this function in the
+ * asic specific file.
+ */
+static int get_tile_config(struct kgd_dev *kgd,
+ struct tile_config *config)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ config->gb_addr_config = adev->gfx.config.gb_addr_config;
+ config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFBANK);
+ config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFRANKS);
+
+ config->tile_config_ptr = adev->gfx.config.tile_mode_array;
+ config->num_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.tile_mode_array);
+ config->macro_tile_config_ptr =
+ adev->gfx.config.macrotile_mode_array;
+ config->num_macro_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
+
+ return 0;
+}
static const struct kfd2kgd_calls kfd2kgd = {
.init_gtt_mem_allocation = alloc_gtt_mem,
@@ -111,12 +147,15 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_atc_vmid_pasid_mapping_valid =
get_atc_vmid_pasid_mapping_valid,
.write_vmid_invalidate_request = write_vmid_invalidate_request,
- .get_fw_version = get_fw_version
+ .get_fw_version = get_fw_version,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .get_tile_config = get_tile_config,
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
{
return (struct kfd2kgd_calls *)&kfd2kgd;
+ return (struct kfd2kgd_calls *)&kfd2kgd;
}
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
@@ -147,7 +186,7 @@ static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(kgd, mec, pipe, queue_id, 0);
@@ -216,7 +255,7 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
uint32_t mec;
uint32_t pipe;
- mec = (++pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
lock_srbm(kgd, mec, pipe, 0, 0);
@@ -244,20 +283,67 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
}
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr)
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm)
{
- struct vi_mqd *m;
- uint32_t shadow_wptr, valid_wptr;
struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct vi_mqd *m;
+ uint32_t *mqd_hqd;
+ uint32_t reg, wptr_val, data;
m = get_mqd(mqd);
- valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr));
- if (valid_wptr == 0)
- m->cp_hqd_pq_wptr = shadow_wptr;
-
acquire_queue(kgd, pipe_id, queue_id);
- gfx_v8_0_mqd_commit(adev, mqd);
+
+ /* HIQ is set during driver init period with vmid set to 0*/
+ if (m->cp_hqd_vmid == 0) {
+ uint32_t value, mec, pipe;
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+ value = RREG32(mmRLC_CP_SCHEDULERS);
+ value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
+ ((mec << 5) | (pipe << 3) | queue_id | 0x80));
+ WREG32(mmRLC_CP_SCHEDULERS, value);
+ }
+
+ /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
+ mqd_hqd = &m->cp_mqd_base_addr_lo;
+
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++)
+ WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
+
+ /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
+ * This is safe since EOP RPTR==WPTR for any inactive HQD
+ * on ASICs that do not support context-save.
+ * EOP writes/reads can start anywhere in the ring.
+ */
+ if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) {
+ WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
+ WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
+ WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
+ }
+
+ for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++)
+ WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
+
+ /* Copy userspace write pointer value to register.
+ * Activate doorbell logic to monitor subsequent changes.
+ */
+ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
+ CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
+
+ if (read_user_wptr(mm, wptr, wptr_val))
+ WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
+
+ data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+ WREG32(mmCP_HQD_ACTIVE, data);
+
release_queue(kgd);
return 0;
@@ -308,29 +394,102 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
- int timeout = utimeout;
+ enum hqd_dequeue_request_type type;
+ unsigned long flags, end_jiffies;
+ int retry;
+ struct vi_mqd *m = get_mqd(mqd);
acquire_queue(kgd, pipe_id, queue_id);
- WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+ if (m->cp_hqd_vmid == 0)
+ WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
+
+ switch (reset_type) {
+ case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
+ type = DRAIN_PIPE;
+ break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
+ type = RESET_WAVES;
+ break;
+ default:
+ type = DRAIN_PIPE;
+ break;
+ }
+ /* Workaround: If IQ timer is active and the wait time is close to or
+ * equal to 0, dequeueing is not safe. Wait until either the wait time
+ * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
+ * cleared before continuing. Also, ensure wait times are set to at
+ * least 0x3.
+ */
+ local_irq_save(flags);
+ preempt_disable();
+ retry = 5000; /* wait for 500 usecs at maximum */
+ while (true) {
+ temp = RREG32(mmCP_HQD_IQ_TIMER);
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
+ pr_debug("HW is processing IQ\n");
+ goto loop;
+ }
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
+ == 3) /* SEM-rearm is safe */
+ break;
+ /* Wait time 3 is safe for CP, but our MMIO read/write
+ * time is close to 1 microsecond, so check for 10 to
+ * leave more buffer room
+ */
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
+ >= 10)
+ break;
+ pr_debug("IQ timer is active\n");
+ } else
+ break;
+loop:
+ if (!retry) {
+ pr_err("CP HQD IQ timer status time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ retry = 1000;
+ while (true) {
+ temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+ if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
+ break;
+ pr_debug("Dequeue request is pending\n");
+
+ if (!retry) {
+ pr_err("CP HQD dequeue request time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ local_irq_restore(flags);
+ preempt_enable();
+
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
+
+ end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
temp = RREG32(mmCP_HQD_ACTIVE);
- if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
- if (timeout <= 0) {
- pr_err("kfd: cp queue preemption time out.\n");
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("cp queue preemption time out.\n");
release_queue(kgd);
return -ETIME;
}
- msleep(20);
- timeout -= 20;
+ usleep_range(500, 1000);
}
release_queue(kgd);
@@ -444,6 +603,16 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
return 0;
}
+static void set_scratch_backing_va(struct kgd_dev *kgd,
+ uint64_t va, uint32_t vmid)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+
+ lock_srbm(kgd, 0, 0, 0, vmid);
+ WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
+ unlock_srbm(kgd);
+}
+
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
@@ -454,42 +623,42 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
switch (type) {
case KGD_ENGINE_PFP:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw->data;
break;
case KGD_ENGINE_ME:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.me_fw->data;
+ adev->gfx.me_fw->data;
break;
case KGD_ENGINE_CE:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw->data;
break;
case KGD_ENGINE_MEC1:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw->data;
break;
case KGD_ENGINE_MEC2:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw->data;
break;
case KGD_ENGINE_RLC:
hdr = (const union amdgpu_firmware_header *)
- adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_fw->data;
break;
case KGD_ENGINE_SDMA1:
hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[0].fw->data;
+ adev->sdma.instance[0].fw->data;
break;
case KGD_ENGINE_SDMA2:
hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[1].fw->data;
+ adev->sdma.instance[1].fw->data;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 1e8e1123ddf4..ce443586a0c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1686,7 +1686,7 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
{
uint32_t bios_6_scratch;
- bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
+ bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6);
if (lock) {
bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
@@ -1696,15 +1696,17 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
bios_6_scratch |= ATOM_S6_ACC_MODE;
}
- WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
+ WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
}
void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
{
uint32_t bios_2_scratch, bios_6_scratch;
- bios_2_scratch = RREG32(mmBIOS_SCRATCH_2);
- bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
+ adev->bios_scratch_reg_offset = mmBIOS_SCRATCH_0;
+
+ bios_2_scratch = RREG32(adev->bios_scratch_reg_offset + 2);
+ bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6);
/* let the bios control the backlight */
bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
@@ -1715,8 +1717,8 @@ void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
/* clear the vbios dpms state */
bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
- WREG32(mmBIOS_SCRATCH_2, bios_2_scratch);
- WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
+ WREG32(adev->bios_scratch_reg_offset + 2, bios_2_scratch);
+ WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
}
void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev)
@@ -1724,7 +1726,7 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev)
int i;
for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- adev->bios_scratch[i] = RREG32(mmBIOS_SCRATCH_0 + i);
+ adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i);
}
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
@@ -1738,20 +1740,30 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
+ WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]);
}
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung)
{
- u32 tmp = RREG32(mmBIOS_SCRATCH_3);
+ u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3);
if (hung)
tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
else
tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- WREG32(mmBIOS_SCRATCH_3, tmp);
+ WREG32(adev->bios_scratch_reg_offset + 3, tmp);
+}
+
+bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
+{
+ u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7);
+
+ if (tmp & ATOM_S7_ASIC_INIT_COMPLETE_MASK)
+ return false;
+ else
+ return true;
}
/* Atom needs data in little endian format
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 38d0fe32e5cd..b0d5d1d7fdba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -200,6 +200,7 @@ void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung);
+bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 4bdda56fccee..f9ffe8ef0cd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -66,41 +66,6 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
}
}
-void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i);
-}
-
-void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev)
-{
- int i;
-
- /*
- * VBIOS will check ASIC_INIT_COMPLETE bit to decide if
- * execute ASIC_Init posting via driver
- */
- adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
-
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]);
-}
-
-void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev,
- bool hung)
-{
- u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3);
-
- if (hung)
- tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- else
- tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
- WREG32(adev->bios_scratch_reg_offset + 3, tmp);
-}
-
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
{
struct atom_context *ctx = adev->mode_info.atom_context;
@@ -130,3 +95,129 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
ctx->scratch_size_bytes = usage_bytes;
return 0;
}
+
+union igp_info {
+ struct atom_integrated_system_info_v1_11 v11;
+};
+
+/*
+ * Return vram width from integrated system info table, if available,
+ * or 0 if not.
+ */
+int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ integratedsysteminfo);
+ u16 data_offset, size;
+ union igp_info *igp_info;
+ u8 frev, crev;
+
+ /* get any igp specific overrides */
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (crev) {
+ case 11:
+ return igp_info->v11.umachannelnumber * 64;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+union firmware_info {
+ struct atom_firmware_info_v3_1 v31;
+};
+
+union smu_info {
+ struct atom_smu_info_v3_1 v31;
+};
+
+union umc_info {
+ struct atom_umc_info_v3_1 v31;
+};
+
+int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct amdgpu_pll *spll = &adev->clock.spll;
+ struct amdgpu_pll *mpll = &adev->clock.mpll;
+ uint8_t frev, crev;
+ uint16_t data_offset;
+ int ret = -EINVAL, index;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ union firmware_info *firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ adev->clock.default_sclk =
+ le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
+ adev->clock.default_mclk =
+ le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
+
+ adev->pm.current_sclk = adev->clock.default_sclk;
+ adev->pm.current_mclk = adev->clock.default_mclk;
+
+ /* not technically a clock, but... */
+ adev->mode_info.firmware_flags =
+ le32_to_cpu(firmware_info->v31.firmware_capability);
+
+ ret = 0;
+ }
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ smu_info);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ union smu_info *smu_info =
+ (union smu_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ /* system clock */
+ spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
+
+ spll->reference_div = 0;
+ spll->min_post_div = 1;
+ spll->max_post_div = 1;
+ spll->min_ref_div = 2;
+ spll->max_ref_div = 0xff;
+ spll->min_feedback_div = 4;
+ spll->max_feedback_div = 0xff;
+ spll->best_vco = 0;
+
+ ret = 0;
+ }
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ umc_info);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ union umc_info *umc_info =
+ (union umc_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ /* memory clock */
+ mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
+
+ mpll->reference_div = 0;
+ mpll->min_post_div = 1;
+ mpll->max_post_div = 1;
+ mpll->min_ref_div = 2;
+ mpll->max_ref_div = 0xff;
+ mpll->min_feedback_div = 4;
+ mpll->max_feedback_div = 0xff;
+ mpll->best_vco = 0;
+
+ ret = 0;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index a2c3ebe22c71..288b97e54347 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -26,10 +26,8 @@
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
-void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev);
-void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev);
-void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev,
- bool hung);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 1beae5b930d0..63ec1e1bb6aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
- false);
+ false, false);
if (r)
goto exit_do_move;
r = dma_fence_wait(fence, false);
@@ -81,7 +81,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
n = AMDGPU_BENCHMARK_ITERATIONS;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
- NULL, &sobj);
+ NULL, 0, &sobj);
if (r) {
goto out_cleanup;
}
@@ -94,7 +94,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
goto out_cleanup;
}
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
- NULL, &dobj);
+ NULL, 0, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 365e735f6647..c21adf60a7f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -86,19 +86,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size)
return false;
}
-static bool is_atom_fw(uint8_t *bios)
-{
- uint16_t bios_header_start = bios[0x48] | (bios[0x49] << 8);
- uint8_t frev = bios[bios_header_start + 2];
- uint8_t crev = bios[bios_header_start + 3];
-
- if ((frev < 3) ||
- ((frev == 3) && (crev < 3)))
- return false;
-
- return true;
-}
-
/* If you boot an IGP board with a discrete card as the primary,
* the IGP rom is not accessible via the rom bar as the IGP rom is
* part of the system bios. On boot, the system bios puts a
@@ -117,7 +104,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
adev->bios = NULL;
vram_base = pci_resource_start(adev->pdev, 0);
- bios = ioremap(vram_base, size);
+ bios = ioremap_wc(vram_base, size);
if (!bios) {
return false;
}
@@ -455,6 +442,6 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
return false;
success:
- adev->is_atom_fw = is_atom_fw(adev->bios);
+ adev->is_atom_fw = (adev->asic_type >= CHIP_VEGA10) ? true : false;
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index f621ee115c98..59089e027f4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -83,7 +83,7 @@ static int amdgpu_bo_list_create(struct amdgpu_device *adev,
r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
mutex_unlock(&fpriv->bo_list_lock);
if (r < 0) {
- kfree(list);
+ amdgpu_bo_list_free(list);
return r;
}
*id = r;
@@ -136,7 +136,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
}
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
if (usermm) {
@@ -156,11 +156,11 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = !entry->robj->prime_shared_count;
- if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
+ if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
gds_obj = entry->robj;
- if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
+ if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
gws_obj = entry->robj;
- if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
+ if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
oa_obj = entry->robj;
total_size += amdgpu_bo_size(entry->robj);
@@ -198,12 +198,16 @@ amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
result = idr_find(&fpriv->bo_list_handles, id);
if (result) {
- if (kref_get_unless_zero(&result->refcount))
+ if (kref_get_unless_zero(&result->refcount)) {
+ rcu_read_unlock();
mutex_lock(&result->lock);
- else
+ } else {
+ rcu_read_unlock();
result = NULL;
+ }
+ } else {
+ rcu_read_unlock();
}
- rcu_read_unlock();
return result;
}
@@ -266,7 +270,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_bo_list *args = data;
uint32_t handle = args->in.list_handle;
- const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr;
+ const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr);
struct drm_amdgpu_bo_list_entry *info;
struct amdgpu_bo_list *list;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index c0a806280257..fd435a96481c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -124,7 +124,7 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
true, domain, flags,
NULL, &placement, NULL,
- &obj);
+ 0, &obj);
if (ret) {
DRM_ERROR("(%d) bo create failed\n", ret);
return ret;
@@ -166,7 +166,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
- r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
+ r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains,
min_offset, max_offset, mcaddr);
amdgpu_bo_unreserve(obj);
return r;
@@ -240,6 +240,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
return RREG32_DIDT(index);
case CGS_IND_REG_GC_CAC:
return RREG32_GC_CAC(index);
+ case CGS_IND_REG_SE_CAC:
+ return RREG32_SE_CAC(index);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return 0;
@@ -266,6 +268,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
return WREG32_DIDT(index, value);
case CGS_IND_REG_GC_CAC:
return WREG32_GC_CAC(index, value);
+ case CGS_IND_REG_SE_CAC:
+ return WREG32_SE_CAC(index, value);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return;
@@ -610,6 +614,17 @@ static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
return 0;
}
+static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device,
+ bool lock)
+{
+ CGS_FUNC_ADEV;
+
+ if (lock)
+ mutex_lock(&adev->grbm_idx_mutex);
+ else
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
@@ -644,7 +659,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
if (CGS_UCODE_ID_CP_MEC == type)
- info->image_size = (header->jt_offset) << 2;
+ info->image_size = le32_to_cpu(header->jt_offset) << 2;
info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
@@ -719,7 +734,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
strcpy(fw_name, "amdgpu/polaris12_smc.bin");
break;
case CHIP_VEGA10:
- strcpy(fw_name, "amdgpu/vega10_smc.bin");
+ if ((adev->pdev->device == 0x687f) &&
+ ((adev->pdev->revision == 0xc0) ||
+ (adev->pdev->revision == 0xc1) ||
+ (adev->pdev->revision == 0xc3)))
+ strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
+ else
+ strcpy(fw_name, "amdgpu/vega10_smc.bin");
break;
default:
DRM_ERROR("SMC firmware not supported\n");
@@ -1117,6 +1138,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
.query_system_info = amdgpu_cgs_query_system_info,
.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
+ .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 5599c01b265d..269b835571eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -54,7 +54,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
*offset = data->offset;
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
amdgpu_bo_unref(&p->uf_entry.robj);
@@ -90,7 +90,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
/* get chunks */
- chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks);
+ chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
@@ -110,7 +110,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;
- chunk_ptr = (void __user *)(uintptr_t)chunk_array[i];
+ chunk_ptr = u64_to_user_ptr(chunk_array[i]);
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_amdgpu_cs_chunk))) {
ret = -EFAULT;
@@ -121,7 +121,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
- cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
+ cdata = u64_to_user_ptr(user_chunk.chunk_data);
p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
if (p->chunks[i].kdata == NULL) {
@@ -223,10 +223,11 @@ static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
* ticks. The accumulated microseconds (us) are converted to bytes and
* returned.
*/
-static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
+static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
+ u64 *max_bytes,
+ u64 *max_vis_bytes)
{
s64 time_us, increment_us;
- u64 max_bytes;
u64 free_vram, total_vram, used_vram;
/* Allow a maximum of 200 accumulated ms. This is basically per-IB
@@ -238,11 +239,14 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
*/
const s64 us_upper_bound = 200000;
- if (!adev->mm_stats.log2_max_MBps)
- return 0;
+ if (!adev->mm_stats.log2_max_MBps) {
+ *max_bytes = 0;
+ *max_vis_bytes = 0;
+ return;
+ }
total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
- used_vram = atomic64_read(&adev->vram_usage);
+ used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
spin_lock(&adev->mm_stats.lock);
@@ -280,23 +284,46 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
}
- /* This returns 0 if the driver is in debt to disallow (optional)
+ /* This is set to 0 if the driver is in debt to disallow (optional)
* buffer moves.
*/
- max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
+ *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
+
+ /* Do the same for visible VRAM if half of it is free */
+ if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
+ u64 total_vis_vram = adev->mc.visible_vram_size;
+ u64 used_vis_vram =
+ amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+
+ if (used_vis_vram < total_vis_vram) {
+ u64 free_vis_vram = total_vis_vram - used_vis_vram;
+ adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
+ increment_us, us_upper_bound);
+
+ if (free_vis_vram >= total_vis_vram / 2)
+ adev->mm_stats.accum_us_vis =
+ max(bytes_to_us(adev, free_vis_vram / 2),
+ adev->mm_stats.accum_us_vis);
+ }
+
+ *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
+ } else {
+ *max_vis_bytes = 0;
+ }
spin_unlock(&adev->mm_stats.lock);
- return max_bytes;
}
/* Report how many bytes have really been moved for the last command
* submission. This can result in a debt that can stop buffer migrations
* temporarily.
*/
-void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes)
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
+ u64 num_vis_bytes)
{
spin_lock(&adev->mm_stats.lock);
adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
+ adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
spin_unlock(&adev->mm_stats.lock);
}
@@ -304,7 +331,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- u64 initial_bytes_moved;
+ u64 initial_bytes_moved, bytes_moved;
uint32_t domain;
int r;
@@ -314,17 +341,35 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
/* Don't move this buffer if we have depleted our allowance
* to move it. Don't move anything if the threshold is zero.
*/
- if (p->bytes_moved < p->bytes_moved_threshold)
- domain = bo->prefered_domains;
- else
+ if (p->bytes_moved < p->bytes_moved_threshold) {
+ if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
+ /* And don't move a CPU_ACCESS_REQUIRED BO to limited
+ * visible VRAM if we've depleted our allowance to do
+ * that.
+ */
+ if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
+ domain = bo->preferred_domains;
+ else
+ domain = bo->allowed_domains;
+ } else {
+ domain = bo->preferred_domains;
+ }
+ } else {
domain = bo->allowed_domains;
+ }
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
- initial_bytes_moved;
+ bytes_moved = atomic64_read(&adev->num_bytes_moved) -
+ initial_bytes_moved;
+ p->bytes_moved += bytes_moved;
+ if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
+ p->bytes_moved_vis += bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
@@ -350,7 +395,8 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *candidate = p->evictable;
struct amdgpu_bo *bo = candidate->robj;
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- u64 initial_bytes_moved;
+ u64 initial_bytes_moved, bytes_moved;
+ bool update_bytes_moved_vis;
uint32_t other;
/* If we reached our current BO we can forget it */
@@ -370,10 +416,17 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */
amdgpu_ttm_placement_from_domain(bo, other);
+ update_bytes_moved_vis =
+ adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT;
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
+ bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
+ p->bytes_moved += bytes_moved;
+ if (update_bytes_moved_vis)
+ p->bytes_moved_vis += bytes_moved;
if (unlikely(r))
break;
@@ -554,8 +607,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_splice(&need_pages, &p->validated);
}
- p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
+ amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
+ &p->bytes_moved_vis_threshold);
p->bytes_moved = 0;
+ p->bytes_moved_vis = 0;
p->evictable = list_last_entry(&p->validated,
struct amdgpu_bo_list_entry,
tv.head);
@@ -579,8 +634,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto error_validate;
}
- amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved);
-
+ amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
+ p->bytes_moved_vis);
fpriv->vm.last_eviction_counter =
atomic64_read(&p->adev->num_evictions);
@@ -619,10 +674,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
error_validate:
- if (r) {
- amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
+ if (r)
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
- }
error_free_pages:
@@ -670,21 +723,18 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
**/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
+ bool backoff)
{
- struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
unsigned i;
- if (!error) {
- amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
-
+ if (!error)
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
parser->fence);
- } else if (backoff) {
+ else if (backoff)
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
- }
for (i = 0; i < parser->num_post_dep_syncobjs; i++)
drm_syncobj_put(parser->post_dep_syncobjs[i]);
@@ -737,7 +787,8 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (amdgpu_sriov_vf(adev)) {
struct dma_fence *f;
- bo_va = vm->csa_bo_va;
+
+ bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
@@ -774,7 +825,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
}
- r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync);
+ r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync);
if (amdgpu_vm_debug && p->bo_list) {
/* Invalidate all BOs to test for userspace bugs */
@@ -984,7 +1035,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
{
int r;
struct dma_fence *fence;
- r = drm_syncobj_fence_get(p->filp, handle, &fence);
+ r = drm_syncobj_find_fence(p->filp, handle, &fence);
if (r)
return r;
@@ -1383,7 +1434,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
if (fences == NULL)
return -ENOMEM;
- fences_user = (void __user *)(uintptr_t)(wait->in.fences);
+ fences_user = u64_to_user_ptr(wait->in.fences);
if (copy_from_user(fences, fences_user,
sizeof(struct drm_amdgpu_fence) * fence_count)) {
r = -EFAULT;
@@ -1436,7 +1487,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
addr > mapping->last)
continue;
- *bo = lobj->bo_va->bo;
+ *bo = lobj->bo_va->base.bo;
return mapping;
}
@@ -1445,7 +1496,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
addr > mapping->last)
continue;
- *bo = lobj->bo_va->bo;
+ *bo = lobj->bo_va->base.bo;
return mapping;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4a8fc15467cf..1a459ac63df4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -53,6 +53,9 @@
#include "bif/bif_4_1_d.h"
#include <linux/pci.h>
#include <linux/firmware.h>
+#include "amdgpu_vf_error.h"
+
+#include "amdgpu_amdkfd.h"
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -128,6 +131,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
{
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
+ adev->last_mm_index = v;
+ }
+
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
BUG_ON(in_interrupt());
return amdgpu_virt_kiq_wreg(adev, reg, v);
@@ -143,6 +150,10 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
}
+
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
+ udelay(500);
+ }
}
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
@@ -157,6 +168,9 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
+ adev->last_mm_index = v;
+ }
if ((reg * 4) < adev->rio_mem_size)
iowrite32(v, adev->rio_mem + (reg * 4));
@@ -164,6 +178,10 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
}
+
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
+ udelay(500);
+ }
}
/**
@@ -318,51 +336,16 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->vram_scratch.robj == NULL) {
- r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
- PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &adev->vram_scratch.robj);
- if (r) {
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_pin(adev->vram_scratch.robj,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->vram_scratch.robj);
- return r;
- }
- r = amdgpu_bo_kmap(adev->vram_scratch.robj,
- (void **)&adev->vram_scratch.ptr);
- if (r)
- amdgpu_bo_unpin(adev->vram_scratch.robj);
- amdgpu_bo_unreserve(adev->vram_scratch.robj);
-
- return r;
+ return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->vram_scratch.robj,
+ &adev->vram_scratch.gpu_addr,
+ (void **)&adev->vram_scratch.ptr);
}
static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->vram_scratch.robj == NULL) {
- return;
- }
- r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
- if (likely(r == 0)) {
- amdgpu_bo_kunmap(adev->vram_scratch.robj);
- amdgpu_bo_unpin(adev->vram_scratch.robj);
- amdgpu_bo_unreserve(adev->vram_scratch.robj);
- }
- amdgpu_bo_unref(&adev->vram_scratch.robj);
+ amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
}
/**
@@ -521,7 +504,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
int r;
if (adev->wb.wb_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
+ /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
&adev->wb.wb_obj, &adev->wb.gpu_addr,
(void **)&adev->wb.wb);
@@ -552,32 +536,10 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
{
unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
- if (offset < adev->wb.num_wb) {
- __set_bit(offset, adev->wb.used);
- *wb = offset;
- return 0;
- } else {
- return -EINVAL;
- }
-}
-/**
- * amdgpu_wb_get_64bit - Allocate a wb entry
- *
- * @adev: amdgpu_device pointer
- * @wb: wb index
- *
- * Allocate a wb slot for use by the driver (all asics).
- * Returns 0 on success or -EINVAL on failure.
- */
-int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
-{
- unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
- adev->wb.num_wb, 0, 2, 7, 0);
- if ((offset + 1) < adev->wb.num_wb) {
+ if (offset < adev->wb.num_wb) {
__set_bit(offset, adev->wb.used);
- __set_bit(offset + 1, adev->wb.used);
- *wb = offset;
+ *wb = offset * 8; /* convert to dw offset */
return 0;
} else {
return -EINVAL;
@@ -599,22 +561,6 @@ void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
}
/**
- * amdgpu_wb_free_64bit - Free a wb entry
- *
- * @adev: amdgpu_device pointer
- * @wb: wb index
- *
- * Free a wb slot allocated for use by the driver (all asics)
- */
-void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
-{
- if ((wb + 1) < adev->wb.num_wb) {
- __clear_bit(wb, adev->wb.used);
- __clear_bit(wb + 1, adev->wb.used);
- }
-}
-
-/**
* amdgpu_vram_location - try to find VRAM location
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
@@ -665,7 +611,7 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
}
/**
- * amdgpu_gtt_location - try to find GTT location
+ * amdgpu_gart_location - try to find GTT location
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
@@ -676,28 +622,28 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
*
* FIXME: when reducing GTT size align new size on power of 2.
*/
-void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
+void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
{
u64 size_af, size_bf;
- size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
- size_bf = mc->vram_start & ~mc->gtt_base_align;
+ size_af = adev->mc.mc_mask - mc->vram_end;
+ size_bf = mc->vram_start;
if (size_bf > size_af) {
- if (mc->gtt_size > size_bf) {
+ if (mc->gart_size > size_bf) {
dev_warn(adev->dev, "limiting GTT\n");
- mc->gtt_size = size_bf;
+ mc->gart_size = size_bf;
}
- mc->gtt_start = 0;
+ mc->gart_start = 0;
} else {
- if (mc->gtt_size > size_af) {
+ if (mc->gart_size > size_af) {
dev_warn(adev->dev, "limiting GTT\n");
- mc->gtt_size = size_af;
+ mc->gart_size = size_af;
}
- mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
+ mc->gart_start = mc->vram_end + 1;
}
- mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
+ mc->gart_end = mc->gart_start + mc->gart_size - 1;
dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
- mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
+ mc->gart_size >> 20, mc->gart_start, mc->gart_end);
}
/*
@@ -720,7 +666,12 @@ bool amdgpu_need_post(struct amdgpu_device *adev)
adev->has_hw_reset = false;
return true;
}
- /* then check MEM_SIZE, in case the crtcs are off */
+
+ /* bios scratch used on CIK+ */
+ if (adev->asic_type >= CHIP_BONAIRE)
+ return amdgpu_atombios_scratch_need_asic_init(adev);
+
+ /* check MEM_SIZE for older asics */
reg = amdgpu_asic_get_config_memsize(adev);
if ((reg != 0) && (reg != 0xffffffff))
@@ -1031,19 +982,6 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
-/**
- * amdgpu_check_pot_argument - check that argument is a power of two
- *
- * @arg: value to check
- *
- * Validates that a certain argument is a power of two (all asics).
- * Returns true if argument is valid.
- */
-static bool amdgpu_check_pot_argument(int arg)
-{
- return (arg & (arg - 1)) == 0;
-}
-
static void amdgpu_check_block_size(struct amdgpu_device *adev)
{
/* defines number of bits in page table versus page directory,
@@ -1077,7 +1015,7 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev)
if (amdgpu_vm_size == -1)
return;
- if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
+ if (!is_power_of_2(amdgpu_vm_size)) {
dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
amdgpu_vm_size);
goto def_value;
@@ -1118,19 +1056,31 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = 4;
- } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
+ } else if (!is_power_of_2(amdgpu_sched_jobs)){
dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
}
- if (amdgpu_gart_size != -1) {
+ if (amdgpu_gart_size < 32) {
+ /* gart size must be greater or equal to 32M */
+ dev_warn(adev->dev, "gart size (%d) too small\n",
+ amdgpu_gart_size);
+ amdgpu_gart_size = 32;
+ }
+
+ if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
/* gtt size must be greater or equal to 32M */
- if (amdgpu_gart_size < 32) {
- dev_warn(adev->dev, "gart size (%d) too small\n",
- amdgpu_gart_size);
- amdgpu_gart_size = -1;
- }
+ dev_warn(adev->dev, "gtt size (%d) too small\n",
+ amdgpu_gtt_size);
+ amdgpu_gtt_size = -1;
+ }
+
+ /* valid range is between 4 and 9 inclusive */
+ if (amdgpu_vm_fragment_size != -1 &&
+ (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
+ dev_warn(adev->dev, "valid range is between 4 and 9\n");
+ amdgpu_vm_fragment_size = -1;
}
amdgpu_check_vm_size(adev);
@@ -1138,7 +1088,7 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_check_block_size(adev);
if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
- !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
+ !is_power_of_2(amdgpu_vram_page_split))) {
dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
amdgpu_vram_page_split);
amdgpu_vram_page_split = 1024;
@@ -1901,7 +1851,8 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
AMD_IP_BLOCK_TYPE_DCE,
AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA,
- AMD_IP_BLOCK_TYPE_VCE,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_IP_BLOCK_TYPE_VCE
};
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
@@ -2019,7 +1970,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->flags = flags;
adev->asic_type = flags & AMD_ASIC_MASK;
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
- adev->mc.gtt_size = 512 * 1024 * 1024;
+ adev->mc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false;
adev->num_rings = 0;
adev->mman.buffer_funcs = NULL;
@@ -2068,6 +2019,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
spin_lock_init(&adev->uvd_ctx_idx_lock);
spin_lock_init(&adev->didt_idx_lock);
spin_lock_init(&adev->gc_cac_idx_lock);
+ spin_lock_init(&adev->se_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
spin_lock_init(&adev->mm_stats.lock);
@@ -2143,6 +2095,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
goto failed;
}
@@ -2153,6 +2106,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_vpost_needed(adev)) {
if (!adev->bios) {
dev_err(adev->dev, "no vBIOS found\n");
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
r = -EINVAL;
goto failed;
}
@@ -2160,18 +2114,28 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
goto failed;
}
} else {
DRM_INFO("GPU post is not needed\n");
}
- if (!adev->is_atom_fw) {
+ if (adev->is_atom_fw) {
+ /* Initialize clocks */
+ r = amdgpu_atomfirmware_get_clock_info(adev);
+ if (r) {
+ dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+ goto failed;
+ }
+ } else {
/* Initialize clocks */
r = amdgpu_atombios_get_clock_info(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
- return r;
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+ goto failed;
}
/* init i2c buses */
amdgpu_atombios_i2c_init(adev);
@@ -2181,6 +2145,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_fence_driver_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
goto failed;
}
@@ -2190,6 +2155,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_init failed\n");
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
amdgpu_fini(adev);
goto failed;
}
@@ -2209,6 +2175,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_ib_pool_init(adev);
if (r) {
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
goto failed;
}
@@ -2253,12 +2220,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_late_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_late_init failed\n");
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
goto failed;
}
return 0;
failed:
+ amdgpu_vf_error_trans_all(adev);
if (runtime)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
return r;
@@ -2351,6 +2320,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
}
drm_modeset_unlock_all(dev);
+ amdgpu_amdkfd_suspend(adev);
+
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
@@ -2392,10 +2363,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
*/
amdgpu_bo_evict_vram(adev);
- if (adev->is_atom_fw)
- amdgpu_atomfirmware_scratch_regs_save(adev);
- else
- amdgpu_atombios_scratch_regs_save(adev);
+ amdgpu_atombios_scratch_regs_save(adev);
pci_save_state(dev->pdev);
if (suspend) {
/* Shut down the device */
@@ -2444,10 +2412,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (r)
goto unlock;
}
- if (adev->is_atom_fw)
- amdgpu_atomfirmware_scratch_regs_restore(adev);
- else
- amdgpu_atombios_scratch_regs_restore(adev);
+ amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
if (amdgpu_need_post(adev)) {
@@ -2490,6 +2455,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
}
}
}
+ r = amdgpu_amdkfd_resume(adev);
+ if (r)
+ return r;
/* blat the mode back in */
if (fbcon) {
@@ -2860,21 +2828,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
r = amdgpu_suspend(adev);
retry:
- /* Disable fb access */
- if (adev->mode_info.num_crtc) {
- struct amdgpu_mode_mc_save save;
- amdgpu_display_stop_mc_access(adev, &save);
- amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
- }
- if (adev->is_atom_fw)
- amdgpu_atomfirmware_scratch_regs_save(adev);
- else
- amdgpu_atombios_scratch_regs_save(adev);
+ amdgpu_atombios_scratch_regs_save(adev);
r = amdgpu_asic_reset(adev);
- if (adev->is_atom_fw)
- amdgpu_atomfirmware_scratch_regs_restore(adev);
- else
- amdgpu_atombios_scratch_regs_restore(adev);
+ amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -2952,6 +2908,7 @@ out:
}
} else {
dev_err(adev->dev, "asic resume failed (%d).\n", r);
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
if (adev->rings[i] && adev->rings[i]->sched.thread) {
kthread_unpark(adev->rings[i]->sched.thread);
@@ -2962,12 +2919,16 @@ out:
drm_helper_resume_force_mode(adev->ddev);
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
- if (r)
+ if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(adev->dev, "GPU reset failed\n");
- else
+ amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ }
+ else {
dev_info(adev->dev, "GPU reset successed!\n");
+ }
+ amdgpu_vf_error_trans_all(adev);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index cdf2ab20166a..6ad243293a78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -482,7 +482,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
- drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
+ drm_gem_object_put_unlocked(amdgpu_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(amdgpu_fb);
}
@@ -542,14 +542,14 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b59f37c83fa6..e39ec981b11c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -68,13 +68,16 @@
* - 3.16.0 - Add reserved vmid support
* - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
* - 3.18.0 - Export gpu always on cu bitmap
+ * - 3.19.0 - Add support for UVD MJPEG decode
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 18
+#define KMS_DRIVER_MINOR 19
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
-int amdgpu_gart_size = -1; /* auto */
+int amdgpu_vis_vram_limit = 0;
+unsigned amdgpu_gart_size = 256;
+int amdgpu_gtt_size = -1; /* auto */
int amdgpu_moverate = -1; /* auto */
int amdgpu_benchmarking = 0;
int amdgpu_testing = 0;
@@ -92,6 +95,7 @@ unsigned amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1;
int amdgpu_deep_color = 0;
int amdgpu_vm_size = -1;
+int amdgpu_vm_fragment_size = -1;
int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
@@ -106,6 +110,7 @@ unsigned amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0;
unsigned amdgpu_cg_mask = 0xffffffff;
unsigned amdgpu_pg_mask = 0xffffffff;
+unsigned amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
unsigned amdgpu_pp_feature_mask = 0xffffffff;
@@ -120,8 +125,14 @@ int amdgpu_lbpw = -1;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
-MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
-module_param_named(gartsize, amdgpu_gart_size, int, 0600);
+MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes");
+module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444);
+
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc.)");
+module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
+
+MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)");
+module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
module_param_named(moverate, amdgpu_moverate, int, 0600);
@@ -174,6 +185,9 @@ module_param_named(deep_color, amdgpu_deep_color, int, 0444);
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
module_param_named(vm_size, amdgpu_vm_size, int, 0444);
+MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)");
+module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444);
+
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
@@ -186,7 +200,7 @@ module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both");
module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
-MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
+MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 512, -1 = disable)");
module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
@@ -199,7 +213,7 @@ MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
-module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
+module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))");
module_param_named(no_evict, amdgpu_no_evict, int, 0444);
@@ -219,6 +233,9 @@ module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
+MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))");
+module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444);
+
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
@@ -803,7 +820,6 @@ static struct drm_driver kms_driver = {
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
.lastclose = amdgpu_driver_lastclose_kms,
- .set_busid = drm_pci_set_busid,
.unload = amdgpu_driver_unload_kms,
.get_vblank_counter = amdgpu_get_vblank_counter_kms,
.enable_vblank = amdgpu_enable_vblank_kms,
@@ -823,7 +839,6 @@ static struct drm_driver kms_driver = {
.gem_close_object = amdgpu_gem_object_close,
.dumb_create = amdgpu_mode_dumb_create,
.dumb_map_offset = amdgpu_mode_dumb_mmap,
- .dumb_destroy = drm_gem_dumb_destroy,
.fops = &amdgpu_driver_kms_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index c0d8c6ff6380..9afa9c097e1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -118,7 +118,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
}
static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
@@ -245,13 +245,12 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &amdgpufb_ops;
tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start;
info->fix.smem_start = adev->mc.aper_base + tmp;
info->fix.smem_len = amdgpu_bo_size(abo);
- info->screen_base = abo->kptr;
+ info->screen_base = amdgpu_bo_kptr(abo);
info->screen_size = amdgpu_bo_size(abo);
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
@@ -281,7 +280,7 @@ out:
}
if (fb && ret) {
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
@@ -312,31 +311,7 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb
return 0;
}
-/** Sets the color ramps on behalf of fbcon */
-static void amdgpu_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- amdgpu_crtc->lut_r[regno] = red >> 6;
- amdgpu_crtc->lut_g[regno] = green >> 6;
- amdgpu_crtc->lut_b[regno] = blue >> 6;
-}
-
-/** Gets the color ramps on behalf of fbcon */
-static void amdgpu_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- *red = amdgpu_crtc->lut_r[regno] << 6;
- *green = amdgpu_crtc->lut_g[regno] << 6;
- *blue = amdgpu_crtc->lut_b[regno] << 6;
-}
-
static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = {
- .gamma_set = amdgpu_crtc_fb_gamma_set,
- .gamma_get = amdgpu_crtc_fb_gamma_get,
.fb_probe = amdgpufb_create,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index a57abc1a25fb..94c1e2e8e34c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -55,6 +55,19 @@
/*
* Common GART table functions.
*/
+
+/**
+ * amdgpu_gart_set_defaults - set the default gart_size
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set the default gart_size based on parameters and available VRAM.
+ */
+void amdgpu_gart_set_defaults(struct amdgpu_device *adev)
+{
+ adev->mc.gart_size = (uint64_t)amdgpu_gart_size << 20;
+}
+
/**
* amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
*
@@ -131,7 +144,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &adev->gart.robj);
+ NULL, NULL, 0, &adev->gart.robj);
if (r) {
return r;
}
@@ -263,6 +276,41 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
}
/**
+ * amdgpu_gart_map - map dma_addresses into GART entries
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Map the dma_addresses into GART entries (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+ int pages, dma_addr_t *dma_addr, uint64_t flags,
+ void *dst)
+{
+ uint64_t page_base;
+ unsigned i, j, t;
+
+ if (!adev->gart.ready) {
+ WARN(1, "trying to bind memory to uninitialized GART !\n");
+ return -EINVAL;
+ }
+
+ t = offset / AMDGPU_GPU_PAGE_SIZE;
+
+ for (i = 0; i < pages; i++) {
+ page_base = dma_addr[i];
+ for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+ amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags);
+ page_base += AMDGPU_GPU_PAGE_SIZE;
+ }
+ }
+ return 0;
+}
+
+/**
* amdgpu_gart_bind - bind pages into the gart page table
*
* @adev: amdgpu_device pointer
@@ -279,31 +327,30 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint64_t flags)
{
- unsigned t;
- unsigned p;
- uint64_t page_base;
- int i, j;
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+ unsigned i,t,p;
+#endif
+ int r;
if (!adev->gart.ready) {
WARN(1, "trying to bind memory to uninitialized GART !\n");
return -EINVAL;
}
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
-
- for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+ for (i = 0; i < pages; i++, p++)
adev->gart.pages[p] = pagelist[i];
#endif
- if (adev->gart.ptr) {
- page_base = dma_addr[i];
- for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
- amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags);
- page_base += AMDGPU_GPU_PAGE_SIZE;
- }
- }
+
+ if (adev->gart.ptr) {
+ r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
+ adev->gart.ptr);
+ if (r)
+ return r;
}
+
mb();
amdgpu_gart_flush_gpu_tlb(adev, 0);
return 0;
@@ -333,8 +380,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
if (r)
return r;
/* Compute table size */
- adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE;
- adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
+ adev->gart.num_cpu_pages = adev->mc.gart_size / PAGE_SIZE;
+ adev->gart.num_gpu_pages = adev->mc.gart_size / AMDGPU_GPU_PAGE_SIZE;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
new file mode 100644
index 000000000000..d4cce6936200
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_GART_H__
+#define __AMDGPU_GART_H__
+
+#include <linux/types.h>
+
+/*
+ * GART structures, functions & helpers
+ */
+struct amdgpu_device;
+struct amdgpu_bo;
+struct amdgpu_gart_funcs;
+
+#define AMDGPU_GPU_PAGE_SIZE 4096
+#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
+#define AMDGPU_GPU_PAGE_SHIFT 12
+#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
+
+struct amdgpu_gart {
+ dma_addr_t table_addr;
+ struct amdgpu_bo *robj;
+ void *ptr;
+ unsigned num_gpu_pages;
+ unsigned num_cpu_pages;
+ unsigned table_size;
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+ struct page **pages;
+#endif
+ bool ready;
+
+ /* Asic default pte flags */
+ uint64_t gart_pte_flags;
+
+ const struct amdgpu_gart_funcs *gart_funcs;
+};
+
+void amdgpu_gart_set_defaults(struct amdgpu_device *adev);
+int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
+void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
+int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
+void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
+int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
+void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
+int amdgpu_gart_init(struct amdgpu_device *adev);
+void amdgpu_gart_fini(struct amdgpu_device *adev);
+int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+ int pages);
+int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+ int pages, dma_addr_t *dma_addr, uint64_t flags,
+ void *dst);
+int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+ int pages, struct page **pagelist,
+ dma_addr_t *dma_addr, uint64_t flags);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 621f739103a6..7171968f261e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -49,7 +49,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
struct drm_gem_object **obj)
{
struct amdgpu_bo *robj;
- unsigned long max_size;
int r;
*obj = NULL;
@@ -58,20 +57,9 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
alignment = PAGE_SIZE;
}
- if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
- /* Maximum bo size is the unpinned gtt size since we use the gtt to
- * handle vram to system pool migrations.
- */
- max_size = adev->mc.gtt_size - adev->gart_pin_size;
- if (size > max_size) {
- DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
- size >> 20, max_size >> 20);
- return -ENOMEM;
- }
- }
retry:
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
- flags, NULL, NULL, &robj);
+ flags, NULL, NULL, 0, &robj);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
@@ -103,7 +91,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
spin_lock(&file->table_lock);
idr_for_each_entry(&file->object_idr, gobj, handle) {
WARN_ONCE(1, "And also active allocations!\n");
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
}
idr_destroy(&file->object_idr);
spin_unlock(&file->table_lock);
@@ -237,9 +225,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_VRAM_CLEARED|
- AMDGPU_GEM_CREATE_SHADOW |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
+ AMDGPU_GEM_CREATE_VRAM_CLEARED))
return -EINVAL;
/* reject invalid gem domains */
@@ -275,7 +261,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (r)
return r;
@@ -318,7 +304,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
return r;
bo = gem_to_amdgpu_bo(gobj);
- bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
+ bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
if (r)
@@ -353,7 +339,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (r)
return r;
@@ -367,7 +353,7 @@ unlock_mmap_sem:
up_read(&current->mm->mmap_sem);
release_object:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -386,11 +372,11 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
robj = gem_to_amdgpu_bo(gobj);
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
(robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return -EPERM;
}
*offset_p = amdgpu_bo_mmap_offset(robj);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return 0;
}
@@ -460,7 +446,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
} else
r = ret;
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -503,7 +489,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
unreserve:
amdgpu_bo_unreserve(robj);
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -635,7 +621,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
+ r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
args->map_size);
if (r)
goto error_backoff;
@@ -655,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
+ r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
args->map_size);
if (r)
goto error_backoff;
@@ -676,7 +662,7 @@ error_backoff:
ttm_eu_backoff_reservation(&ticket, &list);
error_unref:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -701,11 +687,11 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
switch (args->op) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
struct drm_amdgpu_gem_create_in info;
- void __user *out = (void __user *)(uintptr_t)args->value;
+ void __user *out = u64_to_user_ptr(args->value);
info.bo_size = robj->gem_base.size;
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
- info.domains = robj->prefered_domains;
+ info.domains = robj->preferred_domains;
info.domain_flags = robj->flags;
amdgpu_bo_unreserve(robj);
if (copy_to_user(out, &info, sizeof(info)))
@@ -723,10 +709,10 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
amdgpu_bo_unreserve(robj);
break;
}
- robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
+ robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU);
- robj->allowed_domains = robj->prefered_domains;
+ robj->allowed_domains = robj->preferred_domains;
if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
@@ -738,7 +724,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
}
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -766,7 +752,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
r = drm_gem_handle_create(file_priv, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (r) {
return r;
}
@@ -784,6 +770,7 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
unsigned domain;
const char *placement;
unsigned pin_count;
+ uint64_t offset;
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
switch (domain) {
@@ -798,9 +785,12 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
placement = " CPU";
break;
}
- seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
- id, amdgpu_bo_size(bo), placement,
- amdgpu_bo_gpu_offset(bo));
+ seq_printf(m, "\t0x%08x: %12ld byte %s",
+ id, amdgpu_bo_size(bo), placement);
+
+ offset = ACCESS_ONCE(bo->tbo.mem.start);
+ if (offset != AMDGPU_BO_INVALID_OFFSET)
+ seq_printf(m, " @ 0x%010Lx", offset);
pin_count = ACCESS_ONCE(bo->pin_count);
if (pin_count)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index e26108aad3fe..4f6c68fc1dd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -125,7 +125,8 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec)
break;
- if (adev->gfx.mec.num_mec > 1) {
+ /* FIXME: spreading the queues across pipes causes perf regressions */
+ if (0) {
/* policy: amdgpu owns the first two queues of the first MEC */
if (mec == 0 && queue < 2)
set_bit(i, adev->gfx.mec.queue_bitmap);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index f7d22c44034d..9e05e257729f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -28,7 +28,7 @@
struct amdgpu_gtt_mgr {
struct drm_mm mm;
spinlock_t lock;
- uint64_t available;
+ atomic64_t available;
};
/**
@@ -42,15 +42,19 @@ struct amdgpu_gtt_mgr {
static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr;
+ uint64_t start, size;
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
return -ENOMEM;
- drm_mm_init(&mgr->mm, 0, p_size);
+ start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
+ size = (adev->mc.gart_size >> PAGE_SHIFT) - start;
+ drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock);
- mgr->available = p_size;
+ atomic64_set(&mgr->available, p_size);
man->priv = mgr;
return 0;
}
@@ -81,6 +85,20 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
}
/**
+ * amdgpu_gtt_mgr_is_allocated - Check if mem has address space
+ *
+ * @mem: the mem object to check
+ *
+ * Check if a mem object has already address space allocated.
+ */
+bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem)
+{
+ struct drm_mm_node *node = mem->mm_node;
+
+ return (node->start != AMDGPU_BO_INVALID_OFFSET);
+}
+
+/**
* amdgpu_gtt_mgr_alloc - allocate new ranges
*
* @man: TTM memory type manager
@@ -95,13 +113,14 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr = man->priv;
struct drm_mm_node *node = mem->mm_node;
enum drm_mm_insert_mode mode;
unsigned long fpfn, lpfn;
int r;
- if (node->start != AMDGPU_BO_INVALID_OFFSET)
+ if (amdgpu_gtt_mgr_is_allocated(mem))
return 0;
if (place)
@@ -112,7 +131,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
if (place && place->lpfn)
lpfn = place->lpfn;
else
- lpfn = man->size;
+ lpfn = adev->gart.num_cpu_pages;
mode = DRM_MM_INSERT_BEST;
if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
@@ -134,15 +153,6 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
return r;
}
-void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager *man)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_gtt_mgr *mgr = man->priv;
-
- seq_printf(m, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
- man->size, mgr->available, (u64)atomic64_read(&adev->gtt_usage) >> 20);
-
-}
/**
* amdgpu_gtt_mgr_new - allocate a new node
*
@@ -163,11 +173,11 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
int r;
spin_lock(&mgr->lock);
- if (mgr->available < mem->num_pages) {
+ if (atomic64_read(&mgr->available) < mem->num_pages) {
spin_unlock(&mgr->lock);
return 0;
}
- mgr->available -= mem->num_pages;
+ atomic64_sub(mem->num_pages, &mgr->available);
spin_unlock(&mgr->lock);
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -194,9 +204,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
return 0;
err_out:
- spin_lock(&mgr->lock);
- mgr->available += mem->num_pages;
- spin_unlock(&mgr->lock);
+ atomic64_add(mem->num_pages, &mgr->available);
return r;
}
@@ -223,30 +231,47 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
spin_lock(&mgr->lock);
if (node->start != AMDGPU_BO_INVALID_OFFSET)
drm_mm_remove_node(node);
- mgr->available += mem->num_pages;
spin_unlock(&mgr->lock);
+ atomic64_add(mem->num_pages, &mgr->available);
kfree(node);
mem->mm_node = NULL;
}
/**
+ * amdgpu_gtt_mgr_usage - return usage of GTT domain
+ *
+ * @man: TTM memory type manager
+ *
+ * Return how many bytes are used in the GTT domain
+ */
+uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_gtt_mgr *mgr = man->priv;
+
+ return (u64)(man->size - atomic64_read(&mgr->available)) * PAGE_SIZE;
+}
+
+/**
* amdgpu_gtt_mgr_debug - dump VRAM table
*
* @man: TTM memory type manager
- * @prefix: text prefix
+ * @printer: DRM printer to use
*
* Dump the table content using printk.
*/
static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
- const char *prefix)
+ struct drm_printer *printer)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
- struct drm_printer p = drm_debug_printer(prefix);
spin_lock(&mgr->lock);
- drm_mm_print(&mgr->mm, &p);
+ drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock);
+
+ drm_printf(printer, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
+ man->size, (u64)atomic64_read(&mgr->available),
+ amdgpu_gtt_mgr_usage(man) >> 20);
}
const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index f774b3f497d2..659997bfff30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -130,6 +130,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
unsigned i;
int r = 0;
+ bool need_pipe_sync = false;
if (num_ibs == 0)
return -EINVAL;
@@ -165,15 +166,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->emit_pipeline_sync && job &&
((tmp = amdgpu_sync_get_fence(&job->sched_sync)) ||
amdgpu_vm_need_pipeline_sync(ring, job))) {
- amdgpu_ring_emit_pipeline_sync(ring);
+ need_pipe_sync = true;
dma_fence_put(tmp);
}
if (ring->funcs->insert_start)
ring->funcs->insert_start(ring);
- if (vm) {
- r = amdgpu_vm_flush(ring, job);
+ if (job) {
+ r = amdgpu_vm_flush(ring, job, need_pipe_sync);
if (r) {
amdgpu_ring_undo(ring);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 62da6c5c6095..4bdd851f56d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -220,6 +220,10 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
int r = 0;
spin_lock_init(&adev->irq.lock);
+
+ /* Disable vblank irqs aggressively for power-saving */
+ adev->ddev->vblank_disable_immediate = true;
+
r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
if (r) {
return r;
@@ -263,7 +267,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
{
unsigned i, j;
- drm_vblank_cleanup(adev->ddev);
if (adev->irq.installed) {
drm_irq_uninstall(adev->ddev);
adev->irq.installed = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 3d641e10e6b6..4510627ae83e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -81,6 +81,8 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
if (r)
kfree(*job);
+ else
+ (*job)->vm_pd_addr = adev->gart.table_addr;
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index b0b23101d1c8..e16229000a98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -158,7 +158,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
- amdgpu_amdkfd_load_interface(adev);
amdgpu_amdkfd_device_probe(adev);
amdgpu_amdkfd_device_init(adev);
@@ -456,13 +455,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
- ui64 = atomic64_read(&adev->vram_usage);
+ ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE:
- ui64 = atomic64_read(&adev->vram_vis_usage);
+ ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GTT_USAGE:
- ui64 = atomic64_read(&adev->gtt_usage);
+ ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GDS_CONFIG: {
struct drm_amdgpu_info_gds gds_info;
@@ -485,7 +484,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
vram_gtt.vram_size -= adev->vram_pin_size;
vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
- vram_gtt.gtt_size = adev->mc.gtt_size;
+ vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
+ vram_gtt.gtt_size *= PAGE_SIZE;
vram_gtt.gtt_size -= adev->gart_pin_size;
return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
@@ -497,7 +497,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
mem.vram.total_heap_size = adev->mc.real_vram_size;
mem.vram.usable_heap_size =
adev->mc.real_vram_size - adev->vram_pin_size;
- mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
+ mem.vram.heap_usage =
+ amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
@@ -506,14 +507,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
adev->mc.visible_vram_size -
(adev->vram_pin_size - adev->invisible_pin_size);
mem.cpu_accessible_vram.heap_usage =
- atomic64_read(&adev->vram_vis_usage);
+ amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.cpu_accessible_vram.max_allocation =
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
- mem.gtt.total_heap_size = adev->mc.gtt_size;
- mem.gtt.usable_heap_size =
- adev->mc.gtt_size - adev->gart_pin_size;
- mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
+ mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
+ mem.gtt.total_heap_size *= PAGE_SIZE;
+ mem.gtt.usable_heap_size = mem.gtt.total_heap_size
+ - adev->gart_pin_size;
+ mem.gtt.heap_usage =
+ amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
return copy_to_user(out, &mem,
@@ -571,8 +574,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
} else {
- dev_info.max_engine_clock = adev->pm.default_sclk * 10;
- dev_info.max_memory_clock = adev->pm.default_mclk * 10;
+ dev_info.max_engine_clock = adev->clock.default_sclk * 10;
+ dev_info.max_memory_clock = adev->clock.default_mclk * 10;
}
dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
@@ -587,10 +590,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
- dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) *
- AMDGPU_GPU_PAGE_SIZE;
+ dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
-
dev_info.cu_active_number = adev->gfx.cu_info.number;
dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
dev_info.ce_ram_size = adev->gfx.ce_ram_size;
@@ -839,7 +840,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
}
if (amdgpu_sriov_vf(adev)) {
- r = amdgpu_map_static_csa(adev, &fpriv->vm);
+ r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
if (r)
goto out_suspend;
}
@@ -892,8 +893,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
if (amdgpu_sriov_vf(adev)) {
/* TODO: how to handle reserve failure */
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
- amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
- fpriv->vm.csa_bo_va = NULL;
+ amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
+ fpriv->csa_va = NULL;
amdgpu_bo_unreserve(adev->virt.csa_obj);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 38f739fb727b..e1cde6b80027 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -147,36 +147,6 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
}
/**
- * amdgpu_mn_invalidate_page - callback to notify about mm change
- *
- * @mn: our notifier
- * @mn: the mm this callback is about
- * @address: address of invalidate page
- *
- * Invalidation of a single page. Blocks for all BOs mapping it
- * and unmap them by move them into system domain again.
- */
-static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
- struct interval_tree_node *it;
-
- mutex_lock(&rmn->lock);
-
- it = interval_tree_iter_first(&rmn->objects, address, address);
- if (it) {
- struct amdgpu_mn_node *node;
-
- node = container_of(it, struct amdgpu_mn_node, it);
- amdgpu_mn_invalidate_node(node, address, address);
- }
-
- mutex_unlock(&rmn->lock);
-}
-
-/**
* amdgpu_mn_invalidate_range_start - callback to notify about mm change
*
* @mn: our notifier
@@ -215,7 +185,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release,
- .invalidate_page = amdgpu_mn_invalidate_page,
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
};
@@ -359,7 +328,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
head = bo->mn_list.next;
bo->mn = NULL;
- list_del(&bo->mn_list);
+ list_del_init(&bo->mn_list);
if (list_empty(head)) {
struct amdgpu_mn_node *node;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 43a9d3aec6c4..2af2678ddaf6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -257,15 +257,7 @@ struct amdgpu_audio {
int num_pins;
};
-struct amdgpu_mode_mc_save {
- u32 vga_render_control;
- u32 vga_hdp_control;
- bool crtc_enabled[AMDGPU_MAX_CRTCS];
-};
-
struct amdgpu_display_funcs {
- /* vga render */
- void (*set_vga_render_state)(struct amdgpu_device *adev, bool render);
/* display watermarks */
void (*bandwidth_update)(struct amdgpu_device *adev);
/* get frame count */
@@ -300,10 +292,6 @@ struct amdgpu_display_funcs {
uint16_t connector_object_id,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router);
- void (*stop_mc_access)(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
- void (*resume_mc_access)(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
};
struct amdgpu_mode_info {
@@ -369,7 +357,6 @@ struct amdgpu_atom_ss {
struct amdgpu_crtc {
struct drm_crtc base;
int crtc_id;
- u16 lut_r[256], lut_g[256], lut_b[256];
bool enabled;
bool can_tile;
uint32_t crtc_offset;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 8ee69652be8c..e7e899190bef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -37,55 +37,6 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
-
-
-static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
- struct ttm_mem_reg *mem)
-{
- if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
- return 0;
-
- return ((mem->start << PAGE_SHIFT) + mem->size) >
- adev->mc.visible_vram_size ?
- adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
- mem->size;
-}
-
-static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
- struct ttm_mem_reg *old_mem,
- struct ttm_mem_reg *new_mem)
-{
- u64 vis_size;
- if (!adev)
- return;
-
- if (new_mem) {
- switch (new_mem->mem_type) {
- case TTM_PL_TT:
- atomic64_add(new_mem->size, &adev->gtt_usage);
- break;
- case TTM_PL_VRAM:
- atomic64_add(new_mem->size, &adev->vram_usage);
- vis_size = amdgpu_get_vis_part_size(adev, new_mem);
- atomic64_add(vis_size, &adev->vram_vis_usage);
- break;
- }
- }
-
- if (old_mem) {
- switch (old_mem->mem_type) {
- case TTM_PL_TT:
- atomic64_sub(old_mem->size, &adev->gtt_usage);
- break;
- case TTM_PL_VRAM:
- atomic64_sub(old_mem->size, &adev->vram_usage);
- vis_size = amdgpu_get_vis_part_size(adev, old_mem);
- atomic64_sub(vis_size, &adev->vram_vis_usage);
- break;
- }
- }
-}
-
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
@@ -93,7 +44,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
bo = container_of(tbo, struct amdgpu_bo, tbo);
- amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
+ amdgpu_bo_kunmap(bo);
drm_gem_object_release(&bo->gem_base);
amdgpu_bo_unref(&bo->parent);
@@ -219,7 +170,7 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
}
/**
- * amdgpu_bo_create_kernel - create BO for kernel use
+ * amdgpu_bo_create_reserved - create reserved BO for kernel use
*
* @adev: amdgpu device object
* @size: size for the new BO
@@ -229,24 +180,30 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
* @gpu_addr: GPU addr of the pinned BO
* @cpu_addr: optional CPU address mapping
*
- * Allocates and pins a BO for kernel internal use.
+ * Allocates and pins a BO for kernel internal use, and returns it still
+ * reserved.
*
* Returns 0 on success, negative error code otherwise.
*/
-int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
- unsigned long size, int align,
- u32 domain, struct amdgpu_bo **bo_ptr,
- u64 *gpu_addr, void **cpu_addr)
+int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr)
{
+ bool free = false;
int r;
- r = amdgpu_bo_create(adev, size, align, true, domain,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, bo_ptr);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
- return r;
+ if (!*bo_ptr) {
+ r = amdgpu_bo_create(adev, size, align, true, domain,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+ NULL, NULL, 0, bo_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
+ r);
+ return r;
+ }
+ free = true;
}
r = amdgpu_bo_reserve(*bo_ptr, false);
@@ -269,20 +226,52 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
}
}
- amdgpu_bo_unreserve(*bo_ptr);
-
return 0;
error_unreserve:
amdgpu_bo_unreserve(*bo_ptr);
error_free:
- amdgpu_bo_unref(bo_ptr);
+ if (free)
+ amdgpu_bo_unref(bo_ptr);
return r;
}
/**
+ * amdgpu_bo_create_kernel - create BO for kernel use
+ *
+ * @adev: amdgpu device object
+ * @size: size for the new BO
+ * @align: alignment for the new BO
+ * @domain: where to place it
+ * @bo_ptr: resulting BO
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Allocates and pins a BO for kernel internal use.
+ *
+ * Returns 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr)
+{
+ int r;
+
+ r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
+ gpu_addr, cpu_addr);
+
+ if (r)
+ return r;
+
+ amdgpu_bo_unreserve(*bo_ptr);
+
+ return 0;
+}
+
+/**
* amdgpu_bo_free_kernel - free BO for kernel use
*
* @bo: amdgpu BO to free
@@ -317,12 +306,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
struct sg_table *sg,
struct ttm_placement *placement,
struct reservation_object *resv,
+ uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
struct amdgpu_bo *bo;
enum ttm_bo_type type;
unsigned long page_align;
- u64 initial_bytes_moved;
+ u64 initial_bytes_moved, bytes_moved;
size_t acc_size;
int r;
@@ -351,13 +341,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
}
INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va);
- bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
+ bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU |
AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS |
AMDGPU_GEM_DOMAIN_OA);
- bo->allowed_domains = bo->prefered_domains;
+ bo->allowed_domains = bo->preferred_domains;
if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
@@ -398,8 +388,14 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
- amdgpu_cs_report_moved_bytes(adev,
- atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved);
+ bytes_moved = atomic64_read(&adev->num_bytes_moved) -
+ initial_bytes_moved;
+ if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT)
+ amdgpu_cs_report_moved_bytes(adev, bytes_moved, bytes_moved);
+ else
+ amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
if (unlikely(r != 0))
return r;
@@ -411,7 +407,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
struct dma_fence *fence;
- r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
+ r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence);
if (unlikely(r))
goto fail_unreserve;
@@ -426,6 +422,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
trace_amdgpu_bo_create(bo);
+ /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
+ if (type == ttm_bo_type_device)
+ bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
return 0;
fail_unreserve:
@@ -459,6 +459,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
AMDGPU_GEM_CREATE_CPU_GTT_USWC,
NULL, &placement,
bo->tbo.resv,
+ 0,
&bo->shadow);
if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo);
@@ -470,11 +471,15 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
return r;
}
+/* init_value will only take effect when flags contains
+ * AMDGPU_GEM_CREATE_VRAM_CLEARED.
+ */
int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct reservation_object *resv,
+ uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
struct ttm_placement placement = {0};
@@ -489,7 +494,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
domain, flags, sg, &placement,
- resv, bo_ptr);
+ resv, init_value, bo_ptr);
if (r)
return r;
@@ -535,7 +540,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
amdgpu_bo_size(bo), resv, fence,
- direct);
+ direct, false);
if (!r)
amdgpu_bo_fence(bo, *fence, true);
@@ -551,7 +556,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
if (bo->pin_count)
return 0;
- domain = bo->prefered_domains;
+ domain = bo->preferred_domains;
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
@@ -588,7 +593,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
amdgpu_bo_size(bo), resv, fence,
- direct);
+ direct, false);
if (!r)
amdgpu_bo_fence(bo, *fence, true);
@@ -598,16 +603,16 @@ err:
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
{
- bool is_iomem;
+ void *kptr;
long r;
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
return -EPERM;
- if (bo->kptr) {
- if (ptr) {
- *ptr = bo->kptr;
- }
+ kptr = amdgpu_bo_kptr(bo);
+ if (kptr) {
+ if (ptr)
+ *ptr = kptr;
return 0;
}
@@ -620,19 +625,23 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
if (r)
return r;
- bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
if (ptr)
- *ptr = bo->kptr;
+ *ptr = amdgpu_bo_kptr(bo);
return 0;
}
+void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
+{
+ bool is_iomem;
+
+ return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+}
+
void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
{
- if (bo->kptr == NULL)
- return;
- bo->kptr = NULL;
- ttm_bo_kunmap(&bo->kmap);
+ if (bo->kmap.bo)
+ ttm_bo_kunmap(&bo->kmap);
}
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
@@ -724,15 +733,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
dev_err(adev->dev, "%p pin failed\n", bo);
goto error;
}
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
- if (unlikely(r)) {
- dev_err(adev->dev, "%p bind failed\n", bo);
- goto error;
- }
bo->pin_count = 1;
- if (gpu_addr != NULL)
+ if (gpu_addr != NULL) {
+ r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+ if (unlikely(r)) {
+ dev_err(adev->dev, "%p bind failed\n", bo);
+ goto error;
+ }
*gpu_addr = amdgpu_bo_gpu_offset(bo);
+ }
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
adev->vram_pin_size += amdgpu_bo_size(bo);
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
@@ -921,6 +931,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
abo = container_of(bo, struct amdgpu_bo, tbo);
amdgpu_vm_bo_invalidate(adev, abo);
+ amdgpu_bo_kunmap(abo);
+
/* remember the eviction */
if (evict)
atomic64_inc(&adev->num_evictions);
@@ -930,8 +942,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
/* move_notify is called before move happens */
- amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
-
trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
@@ -939,19 +949,22 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
- unsigned long offset, size, lpfn;
- int i, r;
+ unsigned long offset, size;
+ int r;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return 0;
abo = container_of(bo, struct amdgpu_bo, tbo);
+
+ /* Remember that this BO was accessed by the CPU */
+ abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
if (bo->mem.mem_type != TTM_PL_VRAM)
return 0;
size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT;
- /* TODO: figure out how to map scattered VRAM to the CPU */
if ((offset + size) <= adev->mc.visible_vram_size)
return 0;
@@ -961,26 +974,21 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
atomic64_inc(&adev->num_vram_cpu_page_faults);
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
- lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
- for (i = 0; i < abo->placement.num_placement; i++) {
- /* Force into visible VRAM */
- if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
- (!abo->placements[i].lpfn ||
- abo->placements[i].lpfn > lpfn))
- abo->placements[i].lpfn = lpfn;
- }
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT);
+
+ /* Avoid costly evictions; only set GTT as a busy placement */
+ abo->placement.num_busy_placement = 1;
+ abo->placement.busy_placement = &abo->placements[1];
+
r = ttm_bo_validate(bo, &abo->placement, false, false);
- if (unlikely(r == -ENOMEM)) {
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
- return ttm_bo_validate(bo, &abo->placement, false, false);
- } else if (unlikely(r != 0)) {
+ if (unlikely(r != 0))
return r;
- }
offset = bo->mem.start << PAGE_SHIFT;
/* this should never happen */
- if ((offset + size) > adev->mc.visible_vram_size)
+ if (bo->mem.mem_type == TTM_PL_VRAM &&
+ (offset + size) > adev->mc.visible_vram_size)
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 382485115b06..a288fa6d72c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -33,6 +33,61 @@
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
+/* bo virtual addresses in a vm */
+struct amdgpu_bo_va_mapping {
+ struct list_head list;
+ struct rb_node rb;
+ uint64_t start;
+ uint64_t last;
+ uint64_t __subtree_last;
+ uint64_t offset;
+ uint64_t flags;
+};
+
+/* User space allocated BO in a VM */
+struct amdgpu_bo_va {
+ struct amdgpu_vm_bo_base base;
+
+ /* protected by bo being reserved */
+ struct dma_fence *last_pt_update;
+ unsigned ref_count;
+
+ /* mappings for this bo_va */
+ struct list_head invalids;
+ struct list_head valids;
+};
+
+struct amdgpu_bo {
+ /* Protected by tbo.reserved */
+ u32 preferred_domains;
+ u32 allowed_domains;
+ struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ struct ttm_placement placement;
+ struct ttm_buffer_object tbo;
+ struct ttm_bo_kmap_obj kmap;
+ u64 flags;
+ unsigned pin_count;
+ u64 tiling_flags;
+ u64 metadata_flags;
+ void *metadata;
+ u32 metadata_size;
+ unsigned prime_shared_count;
+ /* list of all virtual address to which this bo is associated to */
+ struct list_head va;
+ /* Constant after initialization */
+ struct drm_gem_object gem_base;
+ struct amdgpu_bo *parent;
+ struct amdgpu_bo *shadow;
+
+ struct ttm_bo_kmap_obj dma_buf_vmap;
+ struct amdgpu_mn *mn;
+
+ union {
+ struct list_head mn_list;
+ struct list_head shadow_list;
+ };
+};
+
/**
* amdgpu_mem_type_to_domain - return domain corresponding to mem_type
* @mem_type: ttm memory type
@@ -120,7 +175,11 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
*/
static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
{
- return bo->tbo.mem.mem_type != TTM_PL_SYSTEM;
+ switch (bo->tbo.mem.mem_type) {
+ case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm);
+ case TTM_PL_VRAM: return true;
+ default: return false;
+ }
}
int amdgpu_bo_create(struct amdgpu_device *adev,
@@ -128,6 +187,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct reservation_object *resv,
+ uint64_t init_value,
struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
unsigned long size, int byte_align,
@@ -135,7 +195,12 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
struct sg_table *sg,
struct ttm_placement *placement,
struct reservation_object *resv,
+ uint64_t init_value,
struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr);
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
@@ -143,6 +208,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
+void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
void amdgpu_bo_unref(struct amdgpu_bo **bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index c19c4d138751..f21a7716b90e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -30,6 +30,7 @@ struct cg_flag_name
const char *name;
};
+void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 6bdc866570ab..5b3f92891f89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -69,7 +69,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
ww_mutex_lock(&resv->lock, NULL);
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
- AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
+ AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo);
ww_mutex_unlock(&resv->lock);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 4083be61b328..8c2204c7b384 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -63,8 +63,13 @@ static int psp_sw_init(void *handle)
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
break;
case CHIP_RAVEN:
+#if 0
+ psp->init_microcode = psp_v10_0_init_microcode;
+#endif
psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
psp->ring_init = psp_v10_0_ring_init;
+ psp->ring_create = psp_v10_0_ring_create;
+ psp->ring_destroy = psp_v10_0_ring_destroy;
psp->cmd_submit = psp_v10_0_cmd_submit;
psp->compare_sram_data = psp_v10_0_compare_sram_data;
break;
@@ -95,9 +100,8 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
int i;
struct amdgpu_device *adev = psp->adev;
- val = RREG32(reg_index);
-
for (i = 0; i < adev->usec_timeout; i++) {
+ val = RREG32(reg_index);
if (check_changed) {
if (val != reg_val)
return 0;
@@ -118,33 +122,18 @@ psp_cmd_submit_buf(struct psp_context *psp,
int index)
{
int ret;
- struct amdgpu_bo *cmd_buf_bo;
- uint64_t cmd_buf_mc_addr;
- struct psp_gfx_cmd_resp *cmd_buf_mem;
- struct amdgpu_device *adev = psp->adev;
-
- ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &cmd_buf_bo, &cmd_buf_mc_addr,
- (void **)&cmd_buf_mem);
- if (ret)
- return ret;
- memset(cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
+ memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
- memcpy(cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
+ memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
- ret = psp_cmd_submit(psp, ucode, cmd_buf_mc_addr,
+ ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr,
fence_mc_addr, index);
while (*((unsigned int *)psp->fence_buf) != index) {
msleep(1);
}
- amdgpu_bo_free_kernel(&cmd_buf_bo,
- &cmd_buf_mc_addr,
- (void **)&cmd_buf_mem);
-
return ret;
}
@@ -352,13 +341,20 @@ static int psp_load_fw(struct amdgpu_device *adev)
&psp->fence_buf_mc_addr,
&psp->fence_buf);
if (ret)
+ goto failed_mem2;
+
+ ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
+ (void **)&psp->cmd_buf_mem);
+ if (ret)
goto failed_mem1;
memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
if (ret)
- goto failed_mem1;
+ goto failed_mem;
ret = psp_tmr_init(psp);
if (ret)
@@ -379,9 +375,13 @@ static int psp_load_fw(struct amdgpu_device *adev)
return 0;
failed_mem:
+ amdgpu_bo_free_kernel(&psp->cmd_buf_bo,
+ &psp->cmd_buf_mc_addr,
+ (void **)&psp->cmd_buf_mem);
+failed_mem1:
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf);
-failed_mem1:
+failed_mem2:
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
failed:
@@ -435,16 +435,15 @@ static int psp_hw_fini(void *handle)
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
- if (psp->tmr_buf)
- amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
-
- if (psp->fw_pri_buf)
- amdgpu_bo_free_kernel(&psp->fw_pri_bo,
- &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
-
- if (psp->fence_buf_bo)
- amdgpu_bo_free_kernel(&psp->fence_buf_bo,
- &psp->fence_buf_mc_addr, &psp->fence_buf);
+ amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
+ amdgpu_bo_free_kernel(&psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
+ amdgpu_bo_free_kernel(&psp->fence_buf_bo,
+ &psp->fence_buf_mc_addr, &psp->fence_buf);
+ amdgpu_bo_free_kernel(&psp->asd_shared_bo, &psp->asd_shared_mc_addr,
+ &psp->asd_shared_buf);
+ amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
+ (void **)&psp->cmd_buf_mem);
kfree(psp->cmd);
psp->cmd = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 1a1c8b469f93..538fa9dbfb21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -108,6 +108,11 @@ struct psp_context
struct amdgpu_bo *fence_buf_bo;
uint64_t fence_buf_mc_addr;
void *fence_buf;
+
+ /* cmd buffer */
+ struct amdgpu_bo *cmd_buf_bo;
+ uint64_t cmd_buf_mc_addr;
+ struct psp_gfx_cmd_resp *cmd_buf_mem;
};
struct amdgpu_psp_funcs {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 75165e07b1cd..6c5646b48d1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -184,32 +184,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
- if (ring->funcs->support_64bit_ptrs) {
- r = amdgpu_wb_get_64bit(adev, &ring->rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
- return r;
- }
-
- r = amdgpu_wb_get_64bit(adev, &ring->wptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
- return r;
- }
-
- } else {
- r = amdgpu_wb_get(adev, &ring->rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
- return r;
- }
-
- r = amdgpu_wb_get(adev, &ring->wptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_wb_get(adev, &ring->rptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
+ return r;
+ }
+ r = amdgpu_wb_get(adev, &ring->wptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
+ return r;
}
r = amdgpu_wb_get(adev, &ring->fence_offs);
@@ -277,18 +261,15 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
ring->ready = false;
- if (ring->funcs->support_64bit_ptrs) {
- amdgpu_wb_free_64bit(ring->adev, ring->cond_exe_offs);
- amdgpu_wb_free_64bit(ring->adev, ring->fence_offs);
- amdgpu_wb_free_64bit(ring->adev, ring->rptr_offs);
- amdgpu_wb_free_64bit(ring->adev, ring->wptr_offs);
- } else {
- amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
- amdgpu_wb_free(ring->adev, ring->fence_offs);
- amdgpu_wb_free(ring->adev, ring->rptr_offs);
- amdgpu_wb_free(ring->adev, ring->wptr_offs);
- }
+ /* Not to finish a ring which is not initialized */
+ if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
+ return;
+
+ amdgpu_wb_free(ring->adev, ring->rptr_offs);
+ amdgpu_wb_free(ring->adev, ring->wptr_offs);
+ amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
+ amdgpu_wb_free(ring->adev, ring->fence_offs);
amdgpu_bo_free_kernel(&ring->ring_obj,
&ring->gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index bc8dec992f73..322d25299a00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -212,4 +212,44 @@ static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
}
+static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
+{
+ if (ring->count_dw <= 0)
+ DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
+ ring->ring[ring->wptr++ & ring->buf_mask] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+}
+
+static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
+ void *src, int count_dw)
+{
+ unsigned occupied, chunk1, chunk2;
+ void *dst;
+
+ if (unlikely(ring->count_dw < count_dw))
+ DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
+
+ occupied = ring->wptr & ring->buf_mask;
+ dst = (void *)&ring->ring[occupied];
+ chunk1 = ring->buf_mask + 1 - occupied;
+ chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
+ chunk2 = count_dw - chunk1;
+ chunk1 <<= 2;
+ chunk2 <<= 2;
+
+ if (chunk1)
+ memcpy(dst, src, chunk1);
+
+ if (chunk2) {
+ src += chunk1;
+ dst = (void *)ring->ring;
+ memcpy(dst, src, chunk2);
+ }
+
+ ring->wptr += count_dw;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw -= count_dw;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 5ca75a456ad2..3144400435b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -64,7 +64,7 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&sa_manager->flist[i]);
r = amdgpu_bo_create(adev, size, align, true, domain,
- 0, NULL, NULL, &sa_manager->bo);
+ 0, NULL, NULL, 0, &sa_manager->bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a6899180b265..c586f44312f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct dma_fence *f = e->fence;
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ if (dma_fence_is_signaled(f)) {
+ hash_del(&e->node);
+ dma_fence_put(f);
+ kmem_cache_free(amdgpu_sync_slab, e);
+ continue;
+ }
if (ring && s_fence) {
/* For fences from the same ring it is sufficient
* when they are scheduled.
@@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
}
}
- if (dma_fence_is_signaled(f)) {
- hash_del(&e->node);
- dma_fence_put(f);
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
- }
-
return f;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 15510dadde01..ed8c3739015b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -33,7 +33,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_bo *vram_obj = NULL;
struct amdgpu_bo **gtt_obj = NULL;
- uint64_t gtt_addr, vram_addr;
+ uint64_t gart_addr, vram_addr;
unsigned n, size;
int i, r;
@@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
+ n = adev->mc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
if (adev->rings[i])
n -= adev->rings[i]->ring_size;
@@ -61,7 +61,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM, 0,
- NULL, NULL, &vram_obj);
+ NULL, NULL, 0, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
@@ -76,13 +76,13 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
}
for (i = 0; i < n; i++) {
void *gtt_map, *vram_map;
- void **gtt_start, **gtt_end;
+ void **gart_start, **gart_end;
void **vram_start, **vram_end;
struct dma_fence *fence = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
- NULL, gtt_obj + i);
+ NULL, 0, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean;
@@ -91,7 +91,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_reserve(gtt_obj[i], false);
if (unlikely(r != 0))
goto out_lclean_unref;
- r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
+ r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr);
if (r) {
DRM_ERROR("Failed to pin GTT object %d\n", i);
goto out_lclean_unres;
@@ -103,15 +103,15 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- for (gtt_start = gtt_map, gtt_end = gtt_map + size;
- gtt_start < gtt_end;
- gtt_start++)
- *gtt_start = gtt_start;
+ for (gart_start = gtt_map, gart_end = gtt_map + size;
+ gart_start < gart_end;
+ gart_start++)
+ *gart_start = gart_start;
amdgpu_bo_kunmap(gtt_obj[i]);
- r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
- size, NULL, &fence, false);
+ r = amdgpu_copy_buffer(ring, gart_addr, vram_addr,
+ size, NULL, &fence, false, false);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -132,21 +132,21 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+ for (gart_start = gtt_map, gart_end = gtt_map + size,
vram_start = vram_map, vram_end = vram_map + size;
vram_start < vram_end;
- gtt_start++, vram_start++) {
- if (*vram_start != gtt_start) {
+ gart_start++, vram_start++) {
+ if (*vram_start != gart_start) {
DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
"expected 0x%p (GTT/VRAM offset "
"0x%16llx/0x%16llx)\n",
- i, *vram_start, gtt_start,
+ i, *vram_start, gart_start,
(unsigned long long)
- (gtt_addr - adev->mc.gtt_start +
- (void*)gtt_start - gtt_map),
+ (gart_addr - adev->mc.gart_start +
+ (void*)gart_start - gtt_map),
(unsigned long long)
(vram_addr - adev->mc.vram_start +
- (void*)gtt_start - gtt_map));
+ (void*)gart_start - gtt_map));
amdgpu_bo_kunmap(vram_obj);
goto out_lclean_unpin;
}
@@ -155,8 +155,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(vram_obj);
- r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
- size, NULL, &fence, false);
+ r = amdgpu_copy_buffer(ring, vram_addr, gart_addr,
+ size, NULL, &fence, false, false);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
@@ -177,20 +177,20 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_lclean_unpin;
}
- for (gtt_start = gtt_map, gtt_end = gtt_map + size,
+ for (gart_start = gtt_map, gart_end = gtt_map + size,
vram_start = vram_map, vram_end = vram_map + size;
- gtt_start < gtt_end;
- gtt_start++, vram_start++) {
- if (*gtt_start != vram_start) {
+ gart_start < gart_end;
+ gart_start++, vram_start++) {
+ if (*gart_start != vram_start) {
DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
"expected 0x%p (VRAM/GTT offset "
"0x%16llx/0x%16llx)\n",
- i, *gtt_start, vram_start,
+ i, *gart_start, vram_start,
(unsigned long long)
(vram_addr - adev->mc.vram_start +
(void*)vram_start - vram_map),
(unsigned long long)
- (gtt_addr - adev->mc.gtt_start +
+ (gart_addr - adev->mc.gart_start +
(void*)vram_start - vram_map));
amdgpu_bo_kunmap(gtt_obj[i]);
goto out_lclean_unpin;
@@ -200,7 +200,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(gtt_obj[i]);
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
- gtt_addr - adev->mc.gtt_start);
+ gart_addr - adev->mc.gart_start);
continue;
out_lclean_unpin:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 8601904e670a..1c88bd5e29ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -14,6 +14,62 @@
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
+TRACE_EVENT(amdgpu_ttm_tt_populate,
+ TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
+ TP_ARGS(adev, dma_address, phys_address),
+ TP_STRUCT__entry(
+ __field(uint16_t, domain)
+ __field(uint8_t, bus)
+ __field(uint8_t, slot)
+ __field(uint8_t, func)
+ __field(uint64_t, dma)
+ __field(uint64_t, phys)
+ ),
+ TP_fast_assign(
+ __entry->domain = pci_domain_nr(adev->pdev->bus);
+ __entry->bus = adev->pdev->bus->number;
+ __entry->slot = PCI_SLOT(adev->pdev->devfn);
+ __entry->func = PCI_FUNC(adev->pdev->devfn);
+ __entry->dma = dma_address;
+ __entry->phys = phys_address;
+ ),
+ TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
+ (unsigned)__entry->domain,
+ (unsigned)__entry->bus,
+ (unsigned)__entry->slot,
+ (unsigned)__entry->func,
+ (unsigned long long)__entry->dma,
+ (unsigned long long)__entry->phys)
+);
+
+TRACE_EVENT(amdgpu_ttm_tt_unpopulate,
+ TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
+ TP_ARGS(adev, dma_address, phys_address),
+ TP_STRUCT__entry(
+ __field(uint16_t, domain)
+ __field(uint8_t, bus)
+ __field(uint8_t, slot)
+ __field(uint8_t, func)
+ __field(uint64_t, dma)
+ __field(uint64_t, phys)
+ ),
+ TP_fast_assign(
+ __entry->domain = pci_domain_nr(adev->pdev->bus);
+ __entry->bus = adev->pdev->bus->number;
+ __entry->slot = PCI_SLOT(adev->pdev->devfn);
+ __entry->func = PCI_FUNC(adev->pdev->devfn);
+ __entry->dma = dma_address;
+ __entry->phys = phys_address;
+ ),
+ TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
+ (unsigned)__entry->domain,
+ (unsigned)__entry->bus,
+ (unsigned)__entry->slot,
+ (unsigned)__entry->func,
+ (unsigned long long)__entry->dma,
+ (unsigned long long)__entry->phys)
+);
+
TRACE_EVENT(amdgpu_mm_rreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
@@ -105,12 +161,12 @@ TRACE_EVENT(amdgpu_bo_create,
__entry->bo = bo;
__entry->pages = bo->tbo.num_pages;
__entry->type = bo->tbo.mem.mem_type;
- __entry->prefer = bo->prefered_domains;
+ __entry->prefer = bo->preferred_domains;
__entry->allow = bo->allowed_domains;
__entry->visible = bo->flags;
),
- TP_printk("bo=%p, pages=%u, type=%d, prefered=%d, allowed=%d, visible=%d",
+ TP_printk("bo=%p, pages=%u, type=%d, preferred=%d, allowed=%d, visible=%d",
__entry->bo, __entry->pages, __entry->type,
__entry->prefer, __entry->allow, __entry->visible)
);
@@ -224,17 +280,17 @@ TRACE_EVENT(amdgpu_vm_bo_map,
__field(long, start)
__field(long, last)
__field(u64, offset)
- __field(u32, flags)
+ __field(u64, flags)
),
TP_fast_assign(
- __entry->bo = bo_va ? bo_va->bo : NULL;
+ __entry->bo = bo_va ? bo_va->base.bo : NULL;
__entry->start = mapping->start;
__entry->last = mapping->last;
__entry->offset = mapping->offset;
__entry->flags = mapping->flags;
),
- TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
+ TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx",
__entry->bo, __entry->start, __entry->last,
__entry->offset, __entry->flags)
);
@@ -248,17 +304,17 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
__field(long, start)
__field(long, last)
__field(u64, offset)
- __field(u32, flags)
+ __field(u64, flags)
),
TP_fast_assign(
- __entry->bo = bo_va->bo;
+ __entry->bo = bo_va->base.bo;
__entry->start = mapping->start;
__entry->last = mapping->last;
__entry->offset = mapping->offset;
__entry->flags = mapping->flags;
),
- TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
+ TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx",
__entry->bo, __entry->start, __entry->last,
__entry->offset, __entry->flags)
);
@@ -269,7 +325,7 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
TP_STRUCT__entry(
__field(u64, soffset)
__field(u64, eoffset)
- __field(u32, flags)
+ __field(u64, flags)
),
TP_fast_assign(
@@ -277,7 +333,7 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
__entry->eoffset = mapping->last + 1;
__entry->flags = mapping->flags;
),
- TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
+ TP_printk("soffs=%010llx, eoffs=%010llx, flags=%llx",
__entry->soffset, __entry->eoffset, __entry->flags)
);
@@ -293,14 +349,14 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags),
+ uint32_t incr, uint64_t flags),
TP_ARGS(pe, addr, count, incr, flags),
TP_STRUCT__entry(
__field(u64, pe)
__field(u64, addr)
__field(u32, count)
__field(u32, incr)
- __field(u32, flags)
+ __field(u64, flags)
),
TP_fast_assign(
@@ -310,7 +366,7 @@ TRACE_EVENT(amdgpu_vm_set_ptes,
__entry->incr = incr;
__entry->flags = flags;
),
- TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
+ TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u",
__entry->pe, __entry->addr, __entry->incr,
__entry->flags, __entry->count)
);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c9b131b13ef7..8b2c294f6f79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -43,14 +43,20 @@
#include <linux/pagemap.h>
#include <linux/debugfs.h>
#include "amdgpu.h"
+#include "amdgpu_trace.h"
#include "bif/bif_4_1_d.h"
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem, unsigned num_pages,
+ uint64_t offset, unsigned window,
+ struct amdgpu_ring *ring,
+ uint64_t *addr);
+
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-
/*
* Global memory.
*/
@@ -97,6 +103,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
goto error_bo;
}
+ mutex_init(&adev->mman.gtt_window_lock);
+
ring = adev->mman.buffer_funcs_ring;
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
@@ -123,6 +131,7 @@ static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
if (adev->mman.mem_global_referenced) {
amd_sched_entity_fini(adev->mman.entity.sched,
&adev->mman.entity);
+ mutex_destroy(&adev->mman.gtt_window_lock);
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
drm_global_item_unref(&adev->mman.mem_global_ref);
adev->mman.mem_global_referenced = false;
@@ -150,7 +159,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_TT:
man->func = &amdgpu_gtt_mgr_func;
- man->gpu_offset = adev->mc.gtt_start;
+ man->gpu_offset = adev->mc.gart_start;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -186,12 +195,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
- static struct ttm_place placements = {
+ static const struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
};
- unsigned i;
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
@@ -207,22 +215,36 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
adev->mman.buffer_funcs_ring &&
adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ } else if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
+ !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
+ unsigned fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
+ struct drm_mm_node *node = bo->mem.mm_node;
+ unsigned long pages_left;
+
+ for (pages_left = bo->mem.num_pages;
+ pages_left;
+ pages_left -= node->size, node++) {
+ if (node->start < fpfn)
+ break;
+ }
+
+ if (!pages_left)
+ goto gtt;
+
+ /* Try evicting to the CPU inaccessible part of VRAM
+ * first, but only set GTT as busy placement, so this
+ * BO will be evicted to GTT rather than causing other
+ * BOs to be evicted from VRAM
+ */
+ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT);
+ abo->placements[0].fpfn = fpfn;
+ abo->placements[0].lpfn = 0;
+ abo->placement.busy_placement = &abo->placements[1];
+ abo->placement.num_busy_placement = 1;
} else {
+gtt:
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
- for (i = 0; i < abo->placement.num_placement; ++i) {
- if (!(abo->placements[i].flags &
- TTM_PL_FLAG_TT))
- continue;
-
- if (abo->placements[i].lpfn)
- continue;
-
- /* set an upper limit to force directly
- * allocating address space for the BO.
- */
- abo->placements[i].lpfn =
- adev->mc.gtt_size >> PAGE_SHIFT;
- }
}
break;
case TTM_PL_TT:
@@ -252,29 +274,18 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
new_mem->mm_node = NULL;
}
-static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
- struct drm_mm_node *mm_node,
- struct ttm_mem_reg *mem,
- uint64_t *addr)
+static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
+ struct drm_mm_node *mm_node,
+ struct ttm_mem_reg *mem)
{
- int r;
+ uint64_t addr = 0;
- switch (mem->mem_type) {
- case TTM_PL_TT:
- r = amdgpu_ttm_bind(bo, mem);
- if (r)
- return r;
-
- case TTM_PL_VRAM:
- *addr = mm_node->start << PAGE_SHIFT;
- *addr += bo->bdev->man[mem->mem_type].gpu_offset;
- break;
- default:
- DRM_ERROR("Unknown placement %d\n", mem->mem_type);
- return -EINVAL;
+ if (mem->mem_type != TTM_PL_TT ||
+ amdgpu_gtt_mgr_is_allocated(mem)) {
+ addr = mm_node->start << PAGE_SHIFT;
+ addr += bo->bdev->man[mem->mem_type].gpu_offset;
}
-
- return 0;
+ return addr;
}
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
@@ -299,26 +310,40 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
}
old_mm = old_mem->mm_node;
- r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
- if (r)
- return r;
old_size = old_mm->size;
-
+ old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem);
new_mm = new_mem->mm_node;
- r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
- if (r)
- return r;
new_size = new_mm->size;
+ new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
num_pages = new_mem->num_pages;
+ mutex_lock(&adev->mman.gtt_window_lock);
while (num_pages) {
- unsigned long cur_pages = min(old_size, new_size);
+ unsigned long cur_pages = min(min(old_size, new_size),
+ (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
+ uint64_t from = old_start, to = new_start;
struct dma_fence *next;
- r = amdgpu_copy_buffer(ring, old_start, new_start,
+ if (old_mem->mem_type == TTM_PL_TT &&
+ !amdgpu_gtt_mgr_is_allocated(old_mem)) {
+ r = amdgpu_map_buffer(bo, old_mem, cur_pages,
+ old_start, 0, ring, &from);
+ if (r)
+ goto error;
+ }
+
+ if (new_mem->mem_type == TTM_PL_TT &&
+ !amdgpu_gtt_mgr_is_allocated(new_mem)) {
+ r = amdgpu_map_buffer(bo, new_mem, cur_pages,
+ new_start, 1, ring, &to);
+ if (r)
+ goto error;
+ }
+
+ r = amdgpu_copy_buffer(ring, from, to,
cur_pages * PAGE_SIZE,
- bo->resv, &next, false);
+ bo->resv, &next, false, true);
if (r)
goto error;
@@ -331,10 +356,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
old_size -= cur_pages;
if (!old_size) {
- r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
- &old_start);
- if (r)
- goto error;
+ old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem);
old_size = old_mm->size;
} else {
old_start += cur_pages * PAGE_SIZE;
@@ -342,22 +364,21 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
new_size -= cur_pages;
if (!new_size) {
- r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
- &new_start);
- if (r)
- goto error;
-
+ new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem);
new_size = new_mm->size;
} else {
new_start += cur_pages * PAGE_SIZE;
}
}
+ mutex_unlock(&adev->mman.gtt_window_lock);
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
dma_fence_put(fence);
return r;
error:
+ mutex_unlock(&adev->mman.gtt_window_lock);
+
if (fence)
dma_fence_wait(fence, false);
dma_fence_put(fence);
@@ -384,7 +405,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements.fpfn = 0;
- placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
+ placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
@@ -431,7 +452,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements.fpfn = 0;
- placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
+ placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
@@ -507,6 +528,15 @@ memcpy:
}
}
+ if (bo->type == ttm_bo_type_device &&
+ new_mem->mem_type == TTM_PL_VRAM &&
+ old_mem->mem_type != TTM_PL_VRAM) {
+ /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
+ * accesses the BO after it's moved.
+ */
+ abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ }
+
/* update statistics */
atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
return 0;
@@ -633,6 +663,38 @@ release_pages:
return r;
}
+static void amdgpu_trace_dma_map(struct ttm_tt *ttm)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+
+ if (unlikely(trace_amdgpu_ttm_tt_populate_enabled())) {
+ for (i = 0; i < ttm->num_pages; i++) {
+ trace_amdgpu_ttm_tt_populate(
+ adev,
+ gtt->ttm.dma_address[i],
+ page_to_phys(ttm->pages[i]));
+ }
+ }
+}
+
+static void amdgpu_trace_dma_unmap(struct ttm_tt *ttm)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+
+ if (unlikely(trace_amdgpu_ttm_tt_unpopulate_enabled())) {
+ for (i = 0; i < ttm->num_pages; i++) {
+ trace_amdgpu_ttm_tt_unpopulate(
+ adev,
+ gtt->ttm.dma_address[i],
+ page_to_phys(ttm->pages[i]));
+ }
+ }
+}
+
/* prepare the sg table with the user pages */
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
{
@@ -659,6 +721,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
+ amdgpu_trace_dma_map(ttm);
+
return 0;
release_sg:
@@ -692,14 +756,41 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
put_page(page);
}
+ amdgpu_trace_dma_unmap(ttm);
+
sg_free_table(ttm->sg);
}
+static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ uint64_t flags;
+ int r;
+
+ spin_lock(&gtt->adev->gtt_list_lock);
+ flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem);
+ gtt->offset = (u64)mem->start << PAGE_SHIFT;
+ r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
+ ttm->pages, gtt->ttm.dma_address, flags);
+
+ if (r) {
+ DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ ttm->num_pages, gtt->offset);
+ goto error_gart_bind;
+ }
+
+ list_add_tail(&gtt->list, &gtt->adev->gtt_list);
+error_gart_bind:
+ spin_unlock(&gtt->adev->gtt_list_lock);
+ return r;
+
+}
+
static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
struct amdgpu_ttm_tt *gtt = (void*)ttm;
- int r;
+ int r = 0;
if (gtt->userptr) {
r = amdgpu_ttm_tt_pin_userptr(ttm);
@@ -718,7 +809,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
bo_mem->mem_type == AMDGPU_PL_OA)
return -EINVAL;
- return 0;
+ if (amdgpu_gtt_mgr_is_allocated(bo_mem))
+ r = amdgpu_ttm_do_bind(ttm, bo_mem);
+
+ return r;
}
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
@@ -731,8 +825,6 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
{
struct ttm_tt *ttm = bo->ttm;
- struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
- uint64_t flags;
int r;
if (!ttm || amdgpu_ttm_is_bound(ttm))
@@ -745,22 +837,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
return r;
}
- spin_lock(&gtt->adev->gtt_list_lock);
- flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
- gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
- r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
- ttm->pages, gtt->ttm.dma_address, flags);
-
- if (r) {
- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
- ttm->num_pages, gtt->offset);
- goto error_gart_bind;
- }
-
- list_add_tail(&gtt->list, &gtt->adev->gtt_list);
-error_gart_bind:
- spin_unlock(&gtt->adev->gtt_list_lock);
- return r;
+ return amdgpu_ttm_do_bind(ttm, bo_mem);
}
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
@@ -852,7 +929,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
int r;
@@ -875,14 +952,14 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
ttm->state = tt_unbound;
- return 0;
+ r = 0;
+ goto trace_mappings;
}
- adev = amdgpu_ttm_adev(ttm->bdev);
-
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
- return ttm_dma_populate(&gtt->ttm, adev->dev);
+ r = ttm_dma_populate(&gtt->ttm, adev->dev);
+ goto trace_mappings;
}
#endif
@@ -905,7 +982,12 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
return -EFAULT;
}
}
- return 0;
+
+ r = 0;
+trace_mappings:
+ if (likely(!r))
+ amdgpu_trace_dma_map(ttm);
+ return r;
}
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
@@ -926,6 +1008,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
adev = amdgpu_ttm_adev(ttm->bdev);
+ amdgpu_trace_dma_unmap(ttm);
+
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
ttm_dma_unpopulate(&gtt->ttm, adev->dev);
@@ -1075,6 +1159,67 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
return ttm_bo_eviction_valuable(bo, place);
}
+static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
+ unsigned long offset,
+ void *buf, int len, int write)
+{
+ struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ struct drm_mm_node *nodes = abo->tbo.mem.mm_node;
+ uint32_t value = 0;
+ int ret = 0;
+ uint64_t pos;
+ unsigned long flags;
+
+ if (bo->mem.mem_type != TTM_PL_VRAM)
+ return -EIO;
+
+ while (offset >= (nodes->size << PAGE_SHIFT)) {
+ offset -= nodes->size << PAGE_SHIFT;
+ ++nodes;
+ }
+ pos = (nodes->start << PAGE_SHIFT) + offset;
+
+ while (len && pos < adev->mc.mc_vram_size) {
+ uint64_t aligned_pos = pos & ~(uint64_t)3;
+ uint32_t bytes = 4 - (pos & 3);
+ uint32_t shift = (pos & 3) * 8;
+ uint32_t mask = 0xffffffff << shift;
+
+ if (len < bytes) {
+ mask &= 0xffffffff >> (bytes - len) * 8;
+ bytes = len;
+ }
+
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
+ WREG32(mmMM_INDEX_HI, aligned_pos >> 31);
+ if (!write || mask != 0xffffffff)
+ value = RREG32(mmMM_DATA);
+ if (write) {
+ value &= ~mask;
+ value |= (*(uint32_t *)buf << shift) & mask;
+ WREG32(mmMM_DATA, value);
+ }
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ if (!write) {
+ value = (value & mask) >> shift;
+ memcpy(buf, &value, bytes);
+ }
+
+ ret += bytes;
+ buf = (uint8_t *)buf + bytes;
+ pos += bytes;
+ len -= bytes;
+ if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
+ ++nodes;
+ pos = (nodes->start << PAGE_SHIFT);
+ }
+ }
+
+ return ret;
+}
+
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
@@ -1090,11 +1235,14 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
+ .access_memory = &amdgpu_ttm_access_memory
};
int amdgpu_ttm_init(struct amdgpu_device *adev)
{
+ uint64_t gtt_size;
int r;
+ u64 vis_vram_limit;
r = amdgpu_ttm_global_init(adev);
if (r) {
@@ -1118,36 +1266,37 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
+
+ /* Reduce size of CPU-visible VRAM if requested */
+ vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
+ if (amdgpu_vis_vram_limit > 0 &&
+ vis_vram_limit <= adev->mc.visible_vram_size)
+ adev->mc.visible_vram_size = vis_vram_limit;
+
/* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
- r = amdgpu_bo_create(adev, adev->mc.stolen_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &adev->stollen_vga_memory);
- if (r) {
- return r;
- }
- r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
+ r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->stolen_vga_memory,
+ NULL, NULL);
if (r)
return r;
- r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL);
- amdgpu_bo_unreserve(adev->stollen_vga_memory);
- if (r) {
- amdgpu_bo_unref(&adev->stollen_vga_memory);
- return r;
- }
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
- r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
- adev->mc.gtt_size >> PAGE_SHIFT);
+
+ if (amdgpu_gtt_size == -1)
+ gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+ adev->mc.mc_vram_size);
+ else
+ gtt_size = (uint64_t)amdgpu_gtt_size << 20;
+ r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing GTT heap.\n");
return r;
}
DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
- (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
+ (unsigned)(gtt_size / (1024 * 1024)));
adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
@@ -1203,13 +1352,13 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
if (!adev->mman.initialized)
return;
amdgpu_ttm_debugfs_fini(adev);
- if (adev->stollen_vga_memory) {
- r = amdgpu_bo_reserve(adev->stollen_vga_memory, true);
+ if (adev->stolen_vga_memory) {
+ r = amdgpu_bo_reserve(adev->stolen_vga_memory, true);
if (r == 0) {
- amdgpu_bo_unpin(adev->stollen_vga_memory);
- amdgpu_bo_unreserve(adev->stollen_vga_memory);
+ amdgpu_bo_unpin(adev->stolen_vga_memory);
+ amdgpu_bo_unreserve(adev->stolen_vga_memory);
}
- amdgpu_bo_unref(&adev->stollen_vga_memory);
+ amdgpu_bo_unref(&adev->stolen_vga_memory);
}
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
@@ -1256,12 +1405,77 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
}
-int amdgpu_copy_buffer(struct amdgpu_ring *ring,
- uint64_t src_offset,
- uint64_t dst_offset,
- uint32_t byte_count,
+static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem, unsigned num_pages,
+ uint64_t offset, unsigned window,
+ struct amdgpu_ring *ring,
+ uint64_t *addr)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
+ struct amdgpu_device *adev = ring->adev;
+ struct ttm_tt *ttm = bo->ttm;
+ struct amdgpu_job *job;
+ unsigned num_dw, num_bytes;
+ dma_addr_t *dma_address;
+ struct dma_fence *fence;
+ uint64_t src_addr, dst_addr;
+ uint64_t flags;
+ int r;
+
+ BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
+ AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+
+ *addr = adev->mc.gart_start;
+ *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
+ AMDGPU_GPU_PAGE_SIZE;
+
+ num_dw = adev->mman.buffer_funcs->copy_num_dw;
+ while (num_dw & 0x7)
+ num_dw++;
+
+ num_bytes = num_pages * 8;
+
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
+ if (r)
+ return r;
+
+ src_addr = num_dw * 4;
+ src_addr += job->ibs[0].gpu_addr;
+
+ dst_addr = adev->gart.table_addr;
+ dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
+ amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
+ dst_addr, num_bytes);
+
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+
+ dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
+ flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
+ r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+ &job->ibs[0].ptr[num_dw]);
+ if (r)
+ goto error_free;
+
+ r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+ if (r)
+ goto error_free;
+
+ dma_fence_put(fence);
+
+ return r;
+
+error_free:
+ amdgpu_job_free(job);
+ return r;
+}
+
+int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+ uint64_t dst_offset, uint32_t byte_count,
struct reservation_object *resv,
- struct dma_fence **fence, bool direct_submit)
+ struct dma_fence **fence, bool direct_submit,
+ bool vm_needs_flush)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -1283,6 +1497,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
if (r)
return r;
+ job->vm_needs_flush = vm_needs_flush;
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED);
@@ -1327,11 +1542,12 @@ error_free:
}
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- uint32_t src_data,
+ uint64_t src_data,
struct reservation_object *resv,
struct dma_fence **fence)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ /* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/
uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
@@ -1347,6 +1563,12 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
return -EINVAL;
}
+ if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+ if (r)
+ return r;
+ }
+
num_pages = bo->tbo.num_pages;
mm_node = bo->tbo.mem.mm_node;
num_loops = 0;
@@ -1357,7 +1579,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
num_pages -= mm_node->size;
++mm_node;
}
- num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
+
+ /* 10 double words for each SDMA_OP_PTEPDE cmd */
+ num_dw = num_loops * 10;
/* for IB padding */
num_dw += 64;
@@ -1382,16 +1606,16 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t byte_count = mm_node->size << PAGE_SHIFT;
uint64_t dst_addr;
- r = amdgpu_mm_node_addr(&bo->tbo, mm_node,
- &bo->tbo.mem, &dst_addr);
- if (r)
- return r;
+ WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8");
+ dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
while (byte_count) {
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
- amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
- dst_addr, cur_size_in_bytes);
+ amdgpu_vm_set_pte_pde(adev, &job->ibs[0],
+ dst_addr, 0,
+ cur_size_in_bytes >> 3, 0,
+ src_data);
dst_addr += cur_size_in_bytes;
byte_count -= cur_size_in_bytes;
@@ -1417,32 +1641,16 @@ error_free:
#if defined(CONFIG_DEBUG_FS)
-extern void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager
- *man);
static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
unsigned ttm_pl = *(int *)node->info_ent->data;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv;
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
+ struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&glob->lru_lock);
- drm_mm_print(mm, &p);
- spin_unlock(&glob->lru_lock);
- switch (ttm_pl) {
- case TTM_PL_VRAM:
- seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
- adev->mman.bdev.man[ttm_pl].size,
- (u64)atomic64_read(&adev->vram_usage) >> 20,
- (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
- break;
- case TTM_PL_TT:
- amdgpu_gtt_mgr_print(m, &adev->mman.bdev.man[TTM_PL_TT]);
- break;
- }
+ man->func->debug(man, &p);
return 0;
}
@@ -1574,7 +1782,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
adev, &amdgpu_ttm_gtt_fops);
if (IS_ERR(ent))
return PTR_ERR(ent);
- i_size_write(ent->d_inode, adev->mc.gtt_size);
+ i_size_write(ent->d_inode, adev->mc.gart_size);
adev->mman.gtt = ent;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 6bdede8ff12b..f22a4758719d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -34,6 +34,9 @@
#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1)
#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2)
+#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
+#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
+
struct amdgpu_mman {
struct ttm_bo_global_ref bo_global_ref;
struct drm_global_reference mem_global_ref;
@@ -49,6 +52,8 @@ struct amdgpu_mman {
/* buffer handling */
const struct amdgpu_buffer_funcs *buffer_funcs;
struct amdgpu_ring *buffer_funcs_ring;
+
+ struct mutex gtt_window_lock;
/* Scheduler entity for buffer moves */
struct amd_sched_entity entity;
};
@@ -56,24 +61,29 @@ struct amdgpu_mman {
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
+bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem);
int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
struct ttm_mem_reg *mem);
+uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
+
+uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
+uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
-int amdgpu_copy_buffer(struct amdgpu_ring *ring,
- uint64_t src_offset,
- uint64_t dst_offset,
- uint32_t byte_count,
+int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+ uint64_t dst_offset, uint32_t byte_count,
struct reservation_object *resv,
- struct dma_fence **fence, bool direct_submit);
+ struct dma_fence **fence, bool direct_submit,
+ bool vm_needs_flush);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- uint32_t src_data,
+ uint64_t src_data,
struct reservation_object *resv,
struct dma_fence **fence);
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
+int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 4f50eeb65855..36c763310df5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -275,14 +275,10 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
else
return AMDGPU_FW_LOAD_PSP;
case CHIP_RAVEN:
-#if 0
- if (!load_type)
+ if (load_type != 2)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
-#else
- return AMDGPU_FW_LOAD_DIRECT;
-#endif
default:
DRM_ERROR("Unknow firmware load type\n");
}
@@ -362,8 +358,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
(le32_to_cpu(header->jt_offset) * 4);
memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
- ucode->ucode_size += le32_to_cpu(header->jt_size) * 4;
-
return 0;
}
@@ -377,10 +371,15 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
struct amdgpu_firmware_info *ucode = NULL;
const struct common_firmware_header *header = NULL;
+ if (!adev->firmware.fw_size) {
+ dev_warn(adev->dev, "No ip firmware need to load\n");
+ return 0;
+ }
+
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, bo);
+ NULL, NULL, 0, bo);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
goto failed;
@@ -459,6 +458,9 @@ int amdgpu_ucode_fini_bo(struct amdgpu_device *adev)
int i;
struct amdgpu_firmware_info *ucode = NULL;
+ if (!adev->firmware.fw_size)
+ return 0;
+
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2ca09f111f08..e19928dae8e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -588,6 +588,10 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
}
break;
+ case 8: /* MJPEG */
+ min_dpb_size = 0;
+ break;
+
case 16: /* H265 */
image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
image_size = ALIGN(image_size, 256);
@@ -1051,7 +1055,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &bo);
+ NULL, NULL, 0, &bo);
if (r)
return r;
@@ -1101,7 +1105,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &bo);
+ NULL, NULL, 0, &bo);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b692ad402252..c855366521ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -937,9 +937,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r, timeout = adev->usec_timeout;
- /* workaround VCE ring test slow issue for sriov*/
+ /* skip ring test for sriov*/
if (amdgpu_sriov_vf(adev))
- timeout *= 10;
+ return 0;
r = amdgpu_ring_alloc(ring, 16);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 09190fadd228..041e0121590c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -209,9 +209,9 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
if (fences == 0) {
if (adev->pm.dpm_enabled) {
+ /* might be used when with pg/cg
amdgpu_dpm_enable_uvd(adev, false);
- } else {
- amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+ */
}
} else {
schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
@@ -223,12 +223,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
- if (set_clocks) {
- if (adev->pm.dpm_enabled) {
- amdgpu_dpm_enable_uvd(adev, true);
- } else {
- amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
- }
+ if (set_clocks && adev->pm.dpm_enabled) {
+ /* might be used when with pg/cg
+ amdgpu_dpm_enable_uvd(adev, true);
+ */
}
}
@@ -361,7 +359,7 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &bo);
+ NULL, NULL, 0, &bo);
if (r)
return r;
@@ -413,7 +411,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &bo);
+ NULL, NULL, 0, &bo);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
new file mode 100644
index 000000000000..45ac91861965
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_vf_error.h"
+#include "mxgpu_ai.h"
+
+#define AMDGPU_VF_ERROR_ENTRY_SIZE 16
+
+/* struct error_entry - amdgpu VF error information. */
+struct amdgpu_vf_error_buffer {
+ int read_count;
+ int write_count;
+ uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
+ uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
+ uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
+};
+
+struct amdgpu_vf_error_buffer admgpu_vf_errors;
+
+
+void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data)
+{
+ int index;
+ uint16_t error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
+
+ index = admgpu_vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
+ admgpu_vf_errors.code [index] = error_code;
+ admgpu_vf_errors.flags [index] = error_flags;
+ admgpu_vf_errors.data [index] = error_data;
+ admgpu_vf_errors.write_count ++;
+}
+
+
+void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
+{
+ /* u32 pf2vf_flags = 0; */
+ u32 data1, data2, data3;
+ int index;
+
+ if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) || (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
+ return;
+ }
+/*
+ TODO: Enable these code when pv2vf_info is merged
+ AMDGPU_FW_VRAM_PF2VF_READ (adev, feature_flags, &pf2vf_flags);
+ if (!(pf2vf_flags & AMDGIM_FEATURE_ERROR_LOG_COLLECT)) {
+ return;
+ }
+*/
+ /* The errors are overlay of array, correct read_count as full. */
+ if (admgpu_vf_errors.write_count - admgpu_vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
+ admgpu_vf_errors.read_count = admgpu_vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
+ }
+
+ while (admgpu_vf_errors.read_count < admgpu_vf_errors.write_count) {
+ index =admgpu_vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
+ data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX (admgpu_vf_errors.code[index], admgpu_vf_errors.flags[index]);
+ data2 = admgpu_vf_errors.data[index] & 0xFFFFFFFF;
+ data3 = (admgpu_vf_errors.data[index] >> 32) & 0xFFFFFFFF;
+
+ adev->virt.ops->trans_msg(adev, IDH_LOG_VF_ERROR, data1, data2, data3);
+ admgpu_vf_errors.read_count ++;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
new file mode 100644
index 000000000000..2a3278ec76ba
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VF_ERROR_H__
+#define __VF_ERROR_H__
+
+#define AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(c,f) (((c & 0xFFFF) << 16) | (f & 0xFFFF))
+#define AMDGIM_ERROR_CODE(t,c) (((t&0xF)<<12)|(c&0xFFF))
+
+/* Please keep enum same as AMD GIM driver */
+enum AMDGIM_ERROR_VF {
+ AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL = 0,
+ AMDGIM_ERROR_VF_NO_VBIOS,
+ AMDGIM_ERROR_VF_GPU_POST_ERROR,
+ AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL,
+ AMDGIM_ERROR_VF_FENCE_INIT_FAIL,
+
+ AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL,
+ AMDGIM_ERROR_VF_IB_INIT_FAIL,
+ AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL,
+ AMDGIM_ERROR_VF_ASIC_RESUME_FAIL,
+ AMDGIM_ERROR_VF_GPU_RESET_FAIL,
+
+ AMDGIM_ERROR_VF_TEST,
+ AMDGIM_ERROR_VF_MAX
+};
+
+enum AMDGIM_ERROR_CATEGORY {
+ AMDGIM_ERROR_CATEGORY_NON_USED = 0,
+ AMDGIM_ERROR_CATEGORY_GIM,
+ AMDGIM_ERROR_CATEGORY_PF,
+ AMDGIM_ERROR_CATEGORY_VF,
+ AMDGIM_ERROR_CATEGORY_VBIOS,
+ AMDGIM_ERROR_CATEGORY_MONITOR,
+
+ AMDGIM_ERROR_CATEGORY_MAX
+};
+
+void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data);
+void amdgpu_vf_error_trans_all (struct amdgpu_device *adev);
+
+#endif /* __VF_ERROR_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 8a081e162d13..ab05121b9272 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -46,14 +46,14 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
* address within META_DATA init package to support SRIOV gfx preemption.
*/
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo_va **bo_va)
{
- int r;
- struct amdgpu_bo_va *bo_va;
struct ww_acquire_ctx ticket;
struct list_head list;
struct amdgpu_bo_list_entry pd;
struct ttm_validate_buffer csa_tv;
+ int r;
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&csa_tv.head);
@@ -69,34 +69,33 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
return r;
}
- bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
- if (!bo_va) {
+ *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
+ if (!*bo_va) {
ttm_eu_backoff_reservation(&ticket, &list);
DRM_ERROR("failed to create bo_va for static CSA\n");
return -ENOMEM;
}
- r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR,
- AMDGPU_CSA_SIZE);
+ r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
+ AMDGPU_CSA_SIZE);
if (r) {
DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, bo_va);
+ amdgpu_vm_bo_rmv(adev, *bo_va);
ttm_eu_backoff_reservation(&ticket, &list);
return r;
}
- r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE,
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
+ r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
+ AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+ AMDGPU_PTE_EXECUTABLE);
if (r) {
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, bo_va);
+ amdgpu_vm_bo_rmv(adev, *bo_va);
ttm_eu_backoff_reservation(&ticket, &list);
return r;
}
- vm->csa_bo_va = bo_va;
ttm_eu_backoff_reservation(&ticket, &list);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 9e1062edb76e..afcfb8bcfb65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -43,6 +43,7 @@ struct amdgpu_virt_ops {
int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
int (*reset_gpu)(struct amdgpu_device *adev);
+ void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
};
/* GPU virtualization */
@@ -89,7 +90,8 @@ static inline bool is_virtual_machine(void)
struct amdgpu_vm;
int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo_va **bo_va);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5795f81369f0..6b1343e5541d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -77,8 +77,6 @@ struct amdgpu_pte_update_params {
void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags);
- /* indicate update pt or its shadow */
- bool shadow;
/* The next two are used during VM update by CPU
* DMA addresses to use for mapping
* Kernel pointer of PD/PT BO that needs to be updated
@@ -161,11 +159,26 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
*/
static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
int (*validate)(void *, struct amdgpu_bo *),
- void *param)
+ void *param, bool use_cpu_for_update,
+ struct ttm_bo_global *glob)
{
unsigned i;
int r;
+ if (parent->bo->shadow) {
+ struct amdgpu_bo *shadow = parent->bo->shadow;
+
+ r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
+ if (r)
+ return r;
+ }
+
+ if (use_cpu_for_update) {
+ r = amdgpu_bo_kmap(parent->bo, NULL);
+ if (r)
+ return r;
+ }
+
if (!parent->entries)
return 0;
@@ -179,11 +192,18 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
if (r)
return r;
+ spin_lock(&glob->lru_lock);
+ ttm_bo_move_to_lru_tail(&entry->bo->tbo);
+ if (entry->bo->shadow)
+ ttm_bo_move_to_lru_tail(&entry->bo->shadow->tbo);
+ spin_unlock(&glob->lru_lock);
+
/*
* Recurse into the sub directory. This is harmless because we
* have only a maximum of 5 layers.
*/
- r = amdgpu_vm_validate_level(entry, validate, param);
+ r = amdgpu_vm_validate_level(entry, validate, param,
+ use_cpu_for_update, glob);
if (r)
return r;
}
@@ -214,54 +234,12 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (num_evictions == vm->last_eviction_counter)
return 0;
- return amdgpu_vm_validate_level(&vm->root, validate, param);
-}
-
-/**
- * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail
- *
- * @adev: amdgpu device instance
- * @vm: vm providing the BOs
- *
- * Move the PT BOs to the tail of the LRU.
- */
-static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt *parent)
-{
- unsigned i;
-
- if (!parent->entries)
- return;
-
- for (i = 0; i <= parent->last_entry_used; ++i) {
- struct amdgpu_vm_pt *entry = &parent->entries[i];
-
- if (!entry->bo)
- continue;
-
- ttm_bo_move_to_lru_tail(&entry->bo->tbo);
- amdgpu_vm_move_level_in_lru(entry);
- }
+ return amdgpu_vm_validate_level(&vm->root, validate, param,
+ vm->use_cpu_for_update,
+ adev->mman.bdev.glob);
}
/**
- * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
- *
- * @adev: amdgpu device instance
- * @vm: vm providing the BOs
- *
- * Move the PT BOs to the tail of the LRU.
- */
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
-{
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
-
- spin_lock(&glob->lru_lock);
- amdgpu_vm_move_level_in_lru(&vm->root);
- spin_unlock(&glob->lru_lock);
-}
-
- /**
* amdgpu_vm_alloc_levels - allocate the PD/PT levels
*
* @adev: amdgpu_device pointer
@@ -282,6 +260,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
unsigned pt_idx, from, to;
int r;
u64 flags;
+ uint64_t init_value = 0;
if (!parent->entries) {
unsigned num_entries = amdgpu_vm_num_entries(adev, level);
@@ -315,6 +294,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_SHADOW);
+ if (vm->pte_support_ats) {
+ init_value = AMDGPU_PTE_SYSTEM;
+ if (level != adev->vm_manager.num_level - 1)
+ init_value |= AMDGPU_PDE_PTE;
+ }
+
/* walk over the address space and allocate the page tables */
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
struct reservation_object *resv = vm->root.bo->tbo.resv;
@@ -327,10 +312,18 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
flags,
- NULL, resv, &pt);
+ NULL, resv, init_value, &pt);
if (r)
return r;
+ if (vm->use_cpu_for_update) {
+ r = amdgpu_bo_kmap(pt, NULL);
+ if (r) {
+ amdgpu_bo_unref(&pt);
+ return r;
+ }
+ }
+
/* Keep a reference to the root directory to avoid
* freeing them up in the wrong order.
*/
@@ -424,7 +417,7 @@ static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
struct dma_fence *updates = sync->last_vm_update;
int r = 0;
struct dma_fence *flushed, *tmp;
- bool needs_flush = false;
+ bool needs_flush = vm->use_cpu_for_update;
flushed = id->flushed_updates;
if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
@@ -545,11 +538,11 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
}
kfree(fences);
- job->vm_needs_flush = false;
+ job->vm_needs_flush = vm->use_cpu_for_update;
/* Check if we can use a VMID already assigned to this VM */
list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
struct dma_fence *flushed;
- bool needs_flush = false;
+ bool needs_flush = vm->use_cpu_for_update;
/* Check all the prerequisites to using this VMID */
if (amdgpu_vm_had_gpu_reset(adev, id))
@@ -745,7 +738,7 @@ static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
*
* Emit a VM flush when it is necessary.
*/
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
@@ -767,12 +760,15 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
vm_flush_needed = true;
}
- if (!vm_flush_needed && !gds_switch_needed)
+ if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
return 0;
if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
+ if (need_pipe_sync)
+ amdgpu_ring_emit_pipeline_sync(ring);
+
if (ring->funcs->emit_vm_flush && vm_flush_needed) {
struct dma_fence *fence;
@@ -874,8 +870,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
{
struct amdgpu_bo_va *bo_va;
- list_for_each_entry(bo_va, &bo->va, bo_list) {
- if (bo_va->vm == vm) {
+ list_for_each_entry(bo_va, &bo->va, base.bo_list) {
+ if (bo_va->base.vm == vm) {
return bo_va;
}
}
@@ -981,6 +977,8 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
unsigned int i;
uint64_t value;
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+
for (i = 0; i < count; i++) {
value = params->pages_addr ?
amdgpu_vm_map_gart(params->pages_addr, addr) :
@@ -989,19 +987,16 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
i, value, flags);
addr += incr;
}
-
- /* Flush HDP */
- mb();
- amdgpu_gart_flush_gpu_tlb(params->adev, 0);
}
-static int amdgpu_vm_bo_wait(struct amdgpu_device *adev, struct amdgpu_bo *bo)
+static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ void *owner)
{
struct amdgpu_sync sync;
int r;
amdgpu_sync_create(&sync);
- amdgpu_sync_resv(adev, &sync, bo->tbo.resv, AMDGPU_FENCE_OWNER_VM);
+ amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner);
r = amdgpu_sync_wait(&sync, true);
amdgpu_sync_free(&sync);
@@ -1042,23 +1037,14 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
params.adev = adev;
shadow = parent->bo->shadow;
- WARN_ON(vm->use_cpu_for_update && shadow);
- if (vm->use_cpu_for_update && !shadow) {
- r = amdgpu_bo_kmap(parent->bo, (void **)&pd_addr);
- if (r)
- return r;
- r = amdgpu_vm_bo_wait(adev, parent->bo);
- if (unlikely(r)) {
- amdgpu_bo_kunmap(parent->bo);
+ if (vm->use_cpu_for_update) {
+ pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
+ r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
+ if (unlikely(r))
return r;
- }
+
params.func = amdgpu_vm_cpu_set_ptes;
} else {
- if (shadow) {
- r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
- if (r)
- return r;
- }
ring = container_of(vm->entity.sched, struct amdgpu_ring,
sched);
@@ -1094,21 +1080,14 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
if (bo == NULL)
continue;
- if (bo->shadow) {
- struct amdgpu_bo *pt_shadow = bo->shadow;
-
- r = amdgpu_ttm_bind(&pt_shadow->tbo,
- &pt_shadow->tbo.mem);
- if (r)
- return r;
- }
-
pt = amdgpu_bo_gpu_offset(bo);
pt = amdgpu_gart_get_vm_pde(adev, pt);
- if (parent->entries[pt_idx].addr == pt)
+ /* Don't update huge pages here */
+ if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) ||
+ parent->entries[pt_idx].addr == (pt | AMDGPU_PTE_VALID))
continue;
- parent->entries[pt_idx].addr = pt;
+ parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;
pde = pd_addr + pt_idx * 8;
if (((last_pde + 8 * count) != pde) ||
@@ -1146,28 +1125,29 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
count, incr, AMDGPU_PTE_VALID);
}
- if (params.func == amdgpu_vm_cpu_set_ptes)
- amdgpu_bo_kunmap(parent->bo);
- else if (params.ib->length_dw == 0) {
- amdgpu_job_free(job);
- } else {
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
- AMDGPU_FENCE_OWNER_VM);
- if (shadow)
- amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
+ if (!vm->use_cpu_for_update) {
+ if (params.ib->length_dw == 0) {
+ amdgpu_job_free(job);
+ } else {
+ amdgpu_ring_pad_ib(ring, params.ib);
+ amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
+ if (shadow)
+ amdgpu_sync_resv(adev, &job->sync,
+ shadow->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM);
+
+ WARN_ON(params.ib->length_dw > ndw);
+ r = amdgpu_job_submit(job, ring, &vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &fence);
+ if (r)
+ goto error_free;
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
- if (r)
- goto error_free;
-
- amdgpu_bo_fence(parent->bo, fence, true);
- dma_fence_put(vm->last_dir_update);
- vm->last_dir_update = dma_fence_get(fence);
- dma_fence_put(fence);
+ amdgpu_bo_fence(parent->bo, fence, true);
+ dma_fence_put(vm->last_dir_update);
+ vm->last_dir_update = dma_fence_get(fence);
+ dma_fence_put(fence);
+ }
}
/*
* Recurse into the subdirectories. This recursion is harmless because
@@ -1235,33 +1215,98 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
if (r)
amdgpu_vm_invalidate_level(&vm->root);
+ if (vm->use_cpu_for_update) {
+ /* Flush HDP */
+ mb();
+ amdgpu_gart_flush_gpu_tlb(adev, 0);
+ }
+
return r;
}
/**
- * amdgpu_vm_find_pt - find the page table for an address
+ * amdgpu_vm_find_entry - find the entry for an address
*
* @p: see amdgpu_pte_update_params definition
* @addr: virtual address in question
+ * @entry: resulting entry or NULL
+ * @parent: parent entry
*
- * Find the page table BO for a virtual address, return NULL when none found.
+ * Find the vm_pt entry and it's parent for the given address.
*/
-static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p,
- uint64_t addr)
+void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
+ struct amdgpu_vm_pt **entry,
+ struct amdgpu_vm_pt **parent)
{
- struct amdgpu_vm_pt *entry = &p->vm->root;
unsigned idx, level = p->adev->vm_manager.num_level;
- while (entry->entries) {
+ *parent = NULL;
+ *entry = &p->vm->root;
+ while ((*entry)->entries) {
idx = addr >> (p->adev->vm_manager.block_size * level--);
- idx %= amdgpu_bo_size(entry->bo) / 8;
- entry = &entry->entries[idx];
+ idx %= amdgpu_bo_size((*entry)->bo) / 8;
+ *parent = *entry;
+ *entry = &(*entry)->entries[idx];
}
if (level)
- return NULL;
+ *entry = NULL;
+}
+
+/**
+ * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
+ *
+ * @p: see amdgpu_pte_update_params definition
+ * @entry: vm_pt entry to check
+ * @parent: parent entry
+ * @nptes: number of PTEs updated with this operation
+ * @dst: destination address where the PTEs should point to
+ * @flags: access flags fro the PTEs
+ *
+ * Check if we can update the PD with a huge page.
+ */
+static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
+ struct amdgpu_vm_pt *entry,
+ struct amdgpu_vm_pt *parent,
+ unsigned nptes, uint64_t dst,
+ uint64_t flags)
+{
+ bool use_cpu_update = (p->func == amdgpu_vm_cpu_set_ptes);
+ uint64_t pd_addr, pde;
+
+ /* In the case of a mixed PT the PDE must point to it*/
+ if (p->adev->asic_type < CHIP_VEGA10 ||
+ nptes != AMDGPU_VM_PTE_COUNT(p->adev) ||
+ p->func == amdgpu_vm_do_copy_ptes ||
+ !(flags & AMDGPU_PTE_VALID)) {
+
+ dst = amdgpu_bo_gpu_offset(entry->bo);
+ dst = amdgpu_gart_get_vm_pde(p->adev, dst);
+ flags = AMDGPU_PTE_VALID;
+ } else {
+ /* Set the huge page flag to stop scanning at this PDE */
+ flags |= AMDGPU_PDE_PTE;
+ }
+
+ if (entry->addr == (dst | flags))
+ return;
+
+ entry->addr = (dst | flags);
- return entry->bo;
+ if (use_cpu_update) {
+ pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
+ pde = pd_addr + (entry - parent->entries) * 8;
+ amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
+ } else {
+ if (parent->bo->shadow) {
+ pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow);
+ pde = pd_addr + (entry - parent->entries) * 8;
+ amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
+ }
+ pd_addr = amdgpu_bo_gpu_offset(parent->bo);
+ pde = pd_addr + (entry - parent->entries) * 8;
+ amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
+ }
}
/**
@@ -1287,49 +1332,44 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
uint64_t addr, pe_start;
struct amdgpu_bo *pt;
unsigned nptes;
- int r;
bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
-
/* walk over the address space and update the page tables */
- for (addr = start; addr < end; addr += nptes) {
- pt = amdgpu_vm_get_pt(params, addr);
- if (!pt) {
- pr_err("PT not found, aborting update_ptes\n");
- return -EINVAL;
- }
+ for (addr = start; addr < end; addr += nptes,
+ dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
+ struct amdgpu_vm_pt *entry, *parent;
- if (params->shadow) {
- if (WARN_ONCE(use_cpu_update,
- "CPU VM update doesn't suuport shadow pages"))
- return 0;
-
- if (!pt->shadow)
- return 0;
- pt = pt->shadow;
- }
+ amdgpu_vm_get_entry(params, addr, &entry, &parent);
+ if (!entry)
+ return -ENOENT;
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
+ amdgpu_vm_handle_huge_pages(params, entry, parent,
+ nptes, dst, flags);
+ /* We don't need to update PTEs for huge pages */
+ if (entry->addr & AMDGPU_PDE_PTE)
+ continue;
+
+ pt = entry->bo;
if (use_cpu_update) {
- r = amdgpu_bo_kmap(pt, (void *)&pe_start);
- if (r)
- return r;
- } else
+ pe_start = (unsigned long)amdgpu_bo_kptr(pt);
+ } else {
+ if (pt->shadow) {
+ pe_start = amdgpu_bo_gpu_offset(pt->shadow);
+ pe_start += (addr & mask) * 8;
+ params->func(params, pe_start, dst, nptes,
+ AMDGPU_GPU_PAGE_SIZE, flags);
+ }
pe_start = amdgpu_bo_gpu_offset(pt);
+ }
pe_start += (addr & mask) * 8;
-
params->func(params, pe_start, dst, nptes,
AMDGPU_GPU_PAGE_SIZE, flags);
-
- dst += nptes * AMDGPU_GPU_PAGE_SIZE;
-
- if (use_cpu_update)
- amdgpu_bo_kunmap(pt);
}
return 0;
@@ -1370,10 +1410,9 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* Userspace can support this by aligning virtual base address and
* allocation size to the fragment size.
*/
-
- /* SI and newer are optimized for 64KB */
- uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
- uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
+ unsigned pages_per_frag = params->adev->vm_manager.fragment_size;
+ uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag);
+ uint64_t frag_align = 1 << pages_per_frag;
uint64_t frag_start = ALIGN(start, frag_align);
uint64_t frag_end = end & ~(frag_align - 1);
@@ -1445,6 +1484,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
params.vm = vm;
params.src = src;
+ /* sync to everything on unmapping */
+ if (!(flags & AMDGPU_PTE_VALID))
+ owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+
if (vm->use_cpu_for_update) {
/* params.src is used as flag to indicate system Memory */
if (pages_addr)
@@ -1453,23 +1496,18 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* Wait for PT BOs to be free. PTs share the same resv. object
* as the root PD BO
*/
- r = amdgpu_vm_bo_wait(adev, vm->root.bo);
+ r = amdgpu_vm_wait_pd(adev, vm, owner);
if (unlikely(r))
return r;
params.func = amdgpu_vm_cpu_set_ptes;
params.pages_addr = pages_addr;
- params.shadow = false;
return amdgpu_vm_frag_ptes(&params, start, last + 1,
addr, flags);
}
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
- /* sync to everything on unmapping */
- if (!(flags & AMDGPU_PTE_VALID))
- owner = AMDGPU_FENCE_OWNER_UNDEFINED;
-
nptes = last - start + 1;
/*
@@ -1481,6 +1519,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* padding, etc. */
ndw = 64;
+ /* one PDE write for each huge page */
+ ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
+
if (src) {
/* only copy commands needed */
ndw += ncmds * 7;
@@ -1542,11 +1583,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- params.shadow = true;
- r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
- if (r)
- goto error_free;
- params.shadow = false;
r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
if (r)
goto error_free;
@@ -1565,6 +1601,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
error_free:
amdgpu_job_free(job);
+ amdgpu_vm_invalidate_level(&vm->root);
return r;
}
@@ -1687,7 +1724,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear)
{
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+ struct amdgpu_vm *vm = bo_va->base.vm;
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
uint64_t gtt_flags, flags;
@@ -1696,27 +1734,27 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct dma_fence *exclusive;
int r;
- if (clear || !bo_va->bo) {
+ if (clear || !bo_va->base.bo) {
mem = NULL;
nodes = NULL;
exclusive = NULL;
} else {
struct ttm_dma_tt *ttm;
- mem = &bo_va->bo->tbo.mem;
+ mem = &bo_va->base.bo->tbo.mem;
nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT) {
- ttm = container_of(bo_va->bo->tbo.ttm, struct
- ttm_dma_tt, ttm);
+ ttm = container_of(bo_va->base.bo->tbo.ttm,
+ struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
+ exclusive = reservation_object_get_excl(bo->tbo.resv);
}
- if (bo_va->bo) {
- flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
- gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
- adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
+ if (bo) {
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
+ gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) &&
+ adev == amdgpu_ttm_adev(bo->tbo.bdev)) ?
flags : 0;
} else {
flags = 0x0;
@@ -1724,7 +1762,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
}
spin_lock(&vm->status_lock);
- if (!list_empty(&bo_va->vm_status))
+ if (!list_empty(&bo_va->base.vm_status))
list_splice_init(&bo_va->valids, &bo_va->invalids);
spin_unlock(&vm->status_lock);
@@ -1747,11 +1785,17 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_lock(&vm->status_lock);
list_splice_init(&bo_va->invalids, &bo_va->valids);
- list_del_init(&bo_va->vm_status);
+ list_del_init(&bo_va->base.vm_status);
if (clear)
- list_add(&bo_va->vm_status, &vm->cleared);
+ list_add(&bo_va->base.vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
+ if (vm->use_cpu_for_update) {
+ /* Flush HDP */
+ mb();
+ amdgpu_gart_flush_gpu_tlb(adev, 0);
+ }
+
return 0;
}
@@ -1905,15 +1949,19 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
struct dma_fence *f = NULL;
int r;
+ uint64_t init_pte_value = 0;
while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
+ if (vm->pte_support_ats)
+ init_pte_value = AMDGPU_PTE_SYSTEM;
+
r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
mapping->start, mapping->last,
- 0, 0, &f);
+ init_pte_value, 0, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
@@ -1933,26 +1981,26 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
}
/**
- * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
+ * amdgpu_vm_clear_moved - clear moved BOs in the PT
*
* @adev: amdgpu_device pointer
* @vm: requested vm
*
- * Make sure all invalidated BOs are cleared in the PT.
+ * Make sure all moved BOs are cleared in the PT.
* Returns 0 for success.
*
* PTs have to be reserved and mutex must be locked!
*/
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, struct amdgpu_sync *sync)
+int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = NULL;
int r = 0;
spin_lock(&vm->status_lock);
- while (!list_empty(&vm->invalidated)) {
- bo_va = list_first_entry(&vm->invalidated,
- struct amdgpu_bo_va, vm_status);
+ while (!list_empty(&vm->moved)) {
+ bo_va = list_first_entry(&vm->moved,
+ struct amdgpu_bo_va, base.vm_status);
spin_unlock(&vm->status_lock);
r = amdgpu_vm_bo_update(adev, bo_va, true);
@@ -1992,16 +2040,17 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
if (bo_va == NULL) {
return NULL;
}
- bo_va->vm = vm;
- bo_va->bo = bo;
+ bo_va->base.vm = vm;
+ bo_va->base.bo = bo;
+ INIT_LIST_HEAD(&bo_va->base.bo_list);
+ INIT_LIST_HEAD(&bo_va->base.vm_status);
+
bo_va->ref_count = 1;
- INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
- INIT_LIST_HEAD(&bo_va->vm_status);
if (bo)
- list_add_tail(&bo_va->bo_list, &bo->va);
+ list_add_tail(&bo_va->base.bo_list, &bo->va);
return bo_va;
}
@@ -2026,7 +2075,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
uint64_t size, uint64_t flags)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+ struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
/* validate the parameters */
@@ -2037,7 +2087,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
- (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
+ (bo && offset + size > amdgpu_bo_size(bo)))
return -EINVAL;
saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2047,7 +2097,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (tmp) {
/* bo and tmp overlap, invalid addr */
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
- "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
+ "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
tmp->start, tmp->last + 1);
return -EINVAL;
}
@@ -2092,7 +2142,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t size, uint64_t flags)
{
struct amdgpu_bo_va_mapping *mapping;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+ struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
int r;
@@ -2104,7 +2155,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
- (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
+ (bo && offset + size > amdgpu_bo_size(bo)))
return -EINVAL;
/* Allocate all the needed memory */
@@ -2112,7 +2163,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
if (!mapping)
return -ENOMEM;
- r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size);
+ r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
if (r) {
kfree(mapping);
return r;
@@ -2152,7 +2203,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
uint64_t saddr)
{
struct amdgpu_bo_va_mapping *mapping;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_vm *vm = bo_va->base.vm;
bool valid = true;
saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2300,12 +2351,12 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va)
{
struct amdgpu_bo_va_mapping *mapping, *next;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_vm *vm = bo_va->base.vm;
- list_del(&bo_va->bo_list);
+ list_del(&bo_va->base.bo_list);
spin_lock(&vm->status_lock);
- list_del(&bo_va->vm_status);
+ list_del(&bo_va->base.vm_status);
spin_unlock(&vm->status_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
@@ -2337,13 +2388,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo)
{
- struct amdgpu_bo_va *bo_va;
+ struct amdgpu_vm_bo_base *bo_base;
- list_for_each_entry(bo_va, &bo->va, bo_list) {
- spin_lock(&bo_va->vm->status_lock);
- if (list_empty(&bo_va->vm_status))
- list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
- spin_unlock(&bo_va->vm->status_lock);
+ list_for_each_entry(bo_base, &bo->va, bo_list) {
+ spin_lock(&bo_base->vm->status_lock);
+ if (list_empty(&bo_base->vm_status))
+ list_add(&bo_base->vm_status,
+ &bo_base->vm->moved);
+ spin_unlock(&bo_base->vm->status_lock);
}
}
@@ -2361,12 +2413,26 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
}
/**
- * amdgpu_vm_adjust_size - adjust vm size and block size
+ * amdgpu_vm_set_fragment_size - adjust fragment size in PTE
+ *
+ * @adev: amdgpu_device pointer
+ * @fragment_size_default: the default fragment size if it's set auto
+ */
+void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev, uint32_t fragment_size_default)
+{
+ if (amdgpu_vm_fragment_size == -1)
+ adev->vm_manager.fragment_size = fragment_size_default;
+ else
+ adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
+}
+
+/**
+ * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
*
* @adev: amdgpu_device pointer
* @vm_size: the default vm size if it's set auto
*/
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_t fragment_size_default)
{
/* adjust vm size firstly */
if (amdgpu_vm_size == -1)
@@ -2381,8 +2447,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
else
adev->vm_manager.block_size = amdgpu_vm_block_size;
- DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
- adev->vm_manager.vm_size, adev->vm_manager.block_size);
+ amdgpu_vm_set_fragment_size(adev, fragment_size_default);
+
+ DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n",
+ adev->vm_manager.vm_size, adev->vm_manager.block_size,
+ adev->vm_manager.fragment_size);
}
/**
@@ -2404,13 +2473,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amd_sched_rq *rq;
int r, i;
u64 flags;
+ uint64_t init_pde_value = 0;
vm->va = RB_ROOT;
vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL;
spin_lock_init(&vm->status_lock);
- INIT_LIST_HEAD(&vm->invalidated);
+ INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
@@ -2425,10 +2495,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
return r;
- if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
+ vm->pte_support_ats = false;
+
+ if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
- else
+
+ if (adev->asic_type == CHIP_RAVEN) {
+ vm->pte_support_ats = true;
+ init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE;
+ }
+ } else
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_GFX);
DRM_DEBUG_DRIVER("VM update mode is %s\n",
@@ -2448,7 +2525,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
AMDGPU_GEM_DOMAIN_VRAM,
flags,
- NULL, NULL, &vm->root.bo);
+ NULL, NULL, init_pde_value, &vm->root.bo);
if (r)
goto error_free_sched_entity;
@@ -2457,6 +2534,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
goto error_free_root;
vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
+
+ if (vm->use_cpu_for_update) {
+ r = amdgpu_bo_kmap(vm->root.bo, NULL);
+ if (r)
+ goto error_free_root;
+ }
+
amdgpu_bo_unreserve(vm->root.bo);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 936f158bc5ec..ba6691b58ee7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -50,9 +50,6 @@ struct amdgpu_bo_list_entry;
/* PTBs (Page Table Blocks) need to be aligned to 32K */
#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
-/* LOG2 number of continuous pages for the fragment field */
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
-
#define AMDGPU_PTE_VALID (1ULL << 0)
#define AMDGPU_PTE_SYSTEM (1ULL << 1)
#define AMDGPU_PTE_SNOOPED (1ULL << 2)
@@ -68,6 +65,9 @@ struct amdgpu_bo_list_entry;
/* TILED for VEGA10, reserved for older ASICs */
#define AMDGPU_PTE_PRT (1ULL << 51)
+/* PDE is handled as PTE for VEGA10 */
+#define AMDGPU_PDE_PTE (1ULL << 54)
+
/* VEGA10 only */
#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
@@ -94,6 +94,18 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
+/* base structure for tracking BO usage in a VM */
+struct amdgpu_vm_bo_base {
+ /* constant after initialization */
+ struct amdgpu_vm *vm;
+ struct amdgpu_bo *bo;
+
+ /* protected by bo being reserved */
+ struct list_head bo_list;
+
+ /* protected by spinlock */
+ struct list_head vm_status;
+};
struct amdgpu_vm_pt {
struct amdgpu_bo *bo;
@@ -112,7 +124,7 @@ struct amdgpu_vm {
spinlock_t status_lock;
/* BOs moved, but not yet updated in the PT */
- struct list_head invalidated;
+ struct list_head moved;
/* BOs cleared in the PT because of a move */
struct list_head cleared;
@@ -135,11 +147,12 @@ struct amdgpu_vm {
u64 client_id;
/* dedicated to vm */
struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
- /* each VM will map on CSA */
- struct amdgpu_bo_va *csa_bo_va;
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
bool use_cpu_for_update;
+
+ /* Flag to indicate ATS support from PTE for GFX9 */
+ bool pte_support_ats;
};
struct amdgpu_vm_id {
@@ -182,6 +195,7 @@ struct amdgpu_vm_manager {
uint32_t num_level;
uint64_t vm_size;
uint32_t block_size;
+ uint32_t fragment_size;
/* vram base address for page table entry */
u64 vram_base_offset;
/* vm pte handling */
@@ -214,15 +228,13 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job);
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
unsigned vmid);
void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
@@ -231,8 +243,8 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_sync *sync);
+int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_sync *sync);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
@@ -259,7 +271,10 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
uint64_t saddr, uint64_t size);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size);
+void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
+ uint32_t fragment_size_default);
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size,
+ uint32_t fragment_size_default);
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index a2c59a08b2bd..26e900627971 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -28,6 +28,8 @@
struct amdgpu_vram_mgr {
struct drm_mm mm;
spinlock_t lock;
+ atomic64_t usage;
+ atomic64_t vis_usage;
};
/**
@@ -79,6 +81,27 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
}
/**
+ * amdgpu_vram_mgr_vis_size - Calculate visible node size
+ *
+ * @adev: amdgpu device structure
+ * @node: MM node structure
+ *
+ * Calculate how many bytes of the MM node are inside visible VRAM
+ */
+static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
+ struct drm_mm_node *node)
+{
+ uint64_t start = node->start << PAGE_SHIFT;
+ uint64_t end = (node->size + node->start) << PAGE_SHIFT;
+
+ if (start >= adev->mc.visible_vram_size)
+ return 0;
+
+ return (end > adev->mc.visible_vram_size ?
+ adev->mc.visible_vram_size : end) - start;
+}
+
+/**
* amdgpu_vram_mgr_new - allocate new ranges
*
* @man: TTM memory type manager
@@ -93,11 +116,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm *mm = &mgr->mm;
struct drm_mm_node *nodes;
enum drm_mm_insert_mode mode;
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
+ uint64_t usage = 0, vis_usage = 0;
unsigned i;
int r;
@@ -142,6 +167,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (unlikely(r))
goto error;
+ usage += nodes[i].size << PAGE_SHIFT;
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
+
/* Calculate a virtual BO start address to easily check if
* everything is CPU accessible.
*/
@@ -155,6 +183,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
}
spin_unlock(&mgr->lock);
+ atomic64_add(usage, &mgr->usage);
+ atomic64_add(vis_usage, &mgr->vis_usage);
+
mem->mm_node = nodes;
return 0;
@@ -181,8 +212,10 @@ error:
static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm_node *nodes = mem->mm_node;
+ uint64_t usage = 0, vis_usage = 0;
unsigned pages = mem->num_pages;
if (!mem->mm_node)
@@ -192,31 +225,67 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
while (pages) {
pages -= nodes->size;
drm_mm_remove_node(nodes);
+ usage += nodes->size << PAGE_SHIFT;
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
++nodes;
}
spin_unlock(&mgr->lock);
+ atomic64_sub(usage, &mgr->usage);
+ atomic64_sub(vis_usage, &mgr->vis_usage);
+
kfree(mem->mm_node);
mem->mm_node = NULL;
}
/**
+ * amdgpu_vram_mgr_usage - how many bytes are used in this domain
+ *
+ * @man: TTM memory type manager
+ *
+ * Returns how many bytes are used in this domain.
+ */
+uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ return atomic64_read(&mgr->usage);
+}
+
+/**
+ * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
+ *
+ * @man: TTM memory type manager
+ *
+ * Returns how many bytes are used in the visible part of VRAM
+ */
+uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man)
+{
+ struct amdgpu_vram_mgr *mgr = man->priv;
+
+ return atomic64_read(&mgr->vis_usage);
+}
+
+/**
* amdgpu_vram_mgr_debug - dump VRAM table
*
* @man: TTM memory type manager
- * @prefix: text prefix
+ * @printer: DRM printer to use
*
* Dump the table content using printk.
*/
static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
- const char *prefix)
+ struct drm_printer *printer)
{
struct amdgpu_vram_mgr *mgr = man->priv;
- struct drm_printer p = drm_debug_printer(prefix);
spin_lock(&mgr->lock);
- drm_mm_print(&mgr->mm, &p);
+ drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock);
+
+ drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
+ man->size, amdgpu_vram_mgr_usage(man) >> 20,
+ amdgpu_vram_mgr_vis_usage(man) >> 20);
}
const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 37a499ab30eb..567c4a5cf90c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1824,21 +1824,14 @@ static int cik_common_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_amdkfd_suspend(adev);
-
return cik_common_hw_fini(adev);
}
static int cik_common_resume(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = cik_common_hw_init(adev);
- if (r)
- return r;
-
- return amdgpu_amdkfd_resume(adev);
+ return cik_common_hw_init(adev);
}
static bool cik_common_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index c216e16826c9..f508f4d01e4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -342,6 +342,63 @@ static void cik_sdma_rlc_stop(struct amdgpu_device *adev)
}
/**
+ * cik_ctx_switch_enable - stop the async dma engines context switch
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs context switch.
+ *
+ * Halt or unhalt the async dma engines context switch (VI).
+ */
+static void cik_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
+{
+ u32 f32_cntl, phase_quantum = 0;
+ int i;
+
+ if (amdgpu_sdma_phase_quantum) {
+ unsigned value = amdgpu_sdma_phase_quantum;
+ unsigned unit = 0;
+
+ while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
+ value = (value + 1) >> 1;
+ unit++;
+ }
+ if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
+ value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
+ unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
+ WARN_ONCE(1,
+ "clamping sdma_phase_quantum to %uK clock cycles\n",
+ value << unit);
+ }
+ phase_quantum =
+ value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
+ unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
+ if (enable) {
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ AUTO_CTXSW_ENABLE, 1);
+ if (amdgpu_sdma_phase_quantum) {
+ WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i],
+ phase_quantum);
+ WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i],
+ phase_quantum);
+ }
+ } else {
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ AUTO_CTXSW_ENABLE, 0);
+ }
+
+ WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
+ }
+}
+
+/**
* cik_sdma_enable - stop the async dma engines
*
* @adev: amdgpu_device pointer
@@ -537,6 +594,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
/* halt the engine before programing */
cik_sdma_enable(adev, false);
+ /* enable sdma ring preemption */
+ cik_ctx_switch_enable(adev, true);
/* start the gfx rings and rlc compute queues */
r = cik_sdma_gfx_resume(adev);
@@ -984,6 +1043,7 @@ static int cik_sdma_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cik_ctx_switch_enable(adev, false);
cik_sdma_enable(adev, false);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
index 18fd01f3e4b2..003a131bad47 100644
--- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
@@ -1,24 +1,25 @@
-
/*
-***************************************************************************************************
-*
-* Trade secret of Advanced Micro Devices, Inc.
-* Copyright (c) 2010 Advanced Micro Devices, Inc. (unpublished)
-*
-* All rights reserved. This notice is intended as a precaution against inadvertent publication and
-* does not imply publication or any waiver of confidentiality. The year included in the foregoing
-* notice is the year of creation of the work.
-*
-***************************************************************************************************
-*/
-/**
-***************************************************************************************************
-* @brief gfx9 Clearstate Definitions
-***************************************************************************************************
-*
-* Do not edit! This is a machine-generated file!
-*
-*/
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
{
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9f78c03a2e31..4e519dc42916 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -484,134 +484,6 @@ static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev)
return true;
}
-static void dce_v10_0_stop_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 crtc_enabled, tmp;
- int i;
-
- save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
- /* disable VGA render */
- tmp = RREG32(mmVGA_RENDER_CONTROL);
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32(mmVGA_RENDER_CONTROL, tmp);
-
- /* blank the display controllers */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
- CRTC_CONTROL, CRTC_MASTER_EN);
- if (crtc_enabled) {
-#if 0
- u32 frame_count;
- int j;
-
- save->crtc_enabled[i] = true;
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
- amdgpu_display_vblank_wait(adev, i);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
- }
-#else
- /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- save->crtc_enabled[i] = false;
- /* ***** */
-#endif
- } else {
- save->crtc_enabled[i] = false;
- }
- }
-}
-
-static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 tmp, frame_count;
- int i, j;
-
- /* update crtc base addresses */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
-
- if (save->crtc_enabled[i]) {
- tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0);
- WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
- }
- for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
- break;
- udelay(1);
- }
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
- }
- }
-
- WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
- WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
-
- /* Unlock vga access */
- WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
- mdelay(1);
- WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-}
-
static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
@@ -1867,7 +1739,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
dce_v10_0_audio_write_sad_regs(encoder);
dce_v10_0_audio_write_latency_fields(encoder, mode);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
@@ -2267,6 +2139,7 @@ static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
+ u16 *r, *g, *b;
int i;
u32 tmp;
@@ -2304,11 +2177,14 @@ static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->lut_r[i] << 20) |
- (amdgpu_crtc->lut_g[i] << 10) |
- (amdgpu_crtc->lut_b[i] << 0));
+ ((*r++ & 0xffc0) << 14) |
+ ((*g++ & 0xffc0) << 4) |
+ (*b++ >> 6));
}
tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
@@ -2555,7 +2431,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2563,7 +2439,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2597,7 +2473,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
@@ -2624,15 +2500,6 @@ static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- amdgpu_crtc->lut_r[i] = red[i] >> 6;
- amdgpu_crtc->lut_g[i] = green[i] >> 6;
- amdgpu_crtc->lut_b[i] = blue[i] >> 6;
- }
dce_v10_0_crtc_load_lut(crtc);
return 0;
@@ -2844,14 +2711,12 @@ static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
.mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic,
.prepare = dce_v10_0_crtc_prepare,
.commit = dce_v10_0_crtc_commit,
- .load_lut = dce_v10_0_crtc_load_lut,
.disable = dce_v10_0_crtc_disable,
};
static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
- int i;
amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
(AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
@@ -2869,12 +2734,6 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
- for (i = 0; i < 256; i++) {
- amdgpu_crtc->lut_r[i] = i << 2;
- amdgpu_crtc->lut_g[i] = i << 2;
- amdgpu_crtc->lut_b[i] = i << 2;
- }
-
switch (amdgpu_crtc->crtc_id) {
case 0:
default:
@@ -3025,6 +2884,8 @@ static int dce_v10_0_hw_init(void *handle)
dce_v10_0_init_golden_registers(adev);
+ /* disable vga render */
+ dce_v10_0_set_vga_render_state(adev, false);
/* init dig PHYs, disp eng pll */
amdgpu_atombios_encoder_init_dig(adev);
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
@@ -3737,7 +3598,6 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
}
static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
- .set_vga_render_state = &dce_v10_0_set_vga_render_state,
.bandwidth_update = &dce_v10_0_bandwidth_update,
.vblank_get_counter = &dce_v10_0_vblank_get_counter,
.vblank_wait = &dce_v10_0_vblank_wait,
@@ -3750,8 +3610,6 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
.page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos,
.add_encoder = &dce_v10_0_encoder_add,
.add_connector = &amdgpu_connector_add,
- .stop_mc_access = &dce_v10_0_stop_mc_access,
- .resume_mc_access = &dce_v10_0_resume_mc_access,
};
static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 4bcf01dc567a..11edc75edaa9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -499,79 +499,6 @@ static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
return true;
}
-static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 crtc_enabled, tmp;
- int i;
-
- save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
- /* disable VGA render */
- tmp = RREG32(mmVGA_RENDER_CONTROL);
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32(mmVGA_RENDER_CONTROL, tmp);
-
- /* blank the display controllers */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
- CRTC_CONTROL, CRTC_MASTER_EN);
- if (crtc_enabled) {
-#if 1
- save->crtc_enabled[i] = true;
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
- /*it is correct only for RGB ; black is 0*/
- WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- }
-#else
- /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- save->crtc_enabled[i] = false;
- /* ***** */
-#endif
- } else {
- save->crtc_enabled[i] = false;
- }
- }
-}
-
-static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 tmp;
- int i;
-
- /* update crtc base addresses */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
-
- if (save->crtc_enabled[i]) {
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- }
- }
-
- WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
- WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
-
- /* Unlock vga access */
- WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
- mdelay(1);
- WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-}
-
static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
@@ -1851,7 +1778,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
dce_v11_0_audio_write_sad_regs(encoder);
dce_v11_0_audio_write_latency_fields(encoder, mode);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
@@ -2251,6 +2178,7 @@ static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
+ u16 *r, *g, *b;
int i;
u32 tmp;
@@ -2282,11 +2210,14 @@ static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->lut_r[i] << 20) |
- (amdgpu_crtc->lut_g[i] << 10) |
- (amdgpu_crtc->lut_b[i] << 0));
+ ((*r++ & 0xffc0) << 14) |
+ ((*g++ & 0xffc0) << 4) |
+ (*b++ >> 6));
}
tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
@@ -2575,7 +2506,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2583,7 +2514,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2617,7 +2548,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
@@ -2644,15 +2575,6 @@ static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- amdgpu_crtc->lut_r[i] = red[i] >> 6;
- amdgpu_crtc->lut_g[i] = green[i] >> 6;
- amdgpu_crtc->lut_b[i] = blue[i] >> 6;
- }
dce_v11_0_crtc_load_lut(crtc);
return 0;
@@ -2892,14 +2814,12 @@ static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
.mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic,
.prepare = dce_v11_0_crtc_prepare,
.commit = dce_v11_0_crtc_commit,
- .load_lut = dce_v11_0_crtc_load_lut,
.disable = dce_v11_0_crtc_disable,
};
static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
- int i;
amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
(AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
@@ -2917,12 +2837,6 @@ static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
- for (i = 0; i < 256; i++) {
- amdgpu_crtc->lut_r[i] = i << 2;
- amdgpu_crtc->lut_g[i] = i << 2;
- amdgpu_crtc->lut_b[i] = i << 2;
- }
-
switch (amdgpu_crtc->crtc_id) {
case 0:
default:
@@ -3086,6 +3000,8 @@ static int dce_v11_0_hw_init(void *handle)
dce_v11_0_init_golden_registers(adev);
+ /* disable vga render */
+ dce_v11_0_set_vga_render_state(adev, false);
/* init dig PHYs, disp eng pll */
amdgpu_atombios_crtc_powergate_init(adev);
amdgpu_atombios_encoder_init_dig(adev);
@@ -3806,7 +3722,6 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
}
static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
- .set_vga_render_state = &dce_v11_0_set_vga_render_state,
.bandwidth_update = &dce_v11_0_bandwidth_update,
.vblank_get_counter = &dce_v11_0_vblank_get_counter,
.vblank_wait = &dce_v11_0_vblank_wait,
@@ -3819,8 +3734,6 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
.page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
.add_encoder = &dce_v11_0_encoder_add,
.add_connector = &amdgpu_connector_add,
- .stop_mc_access = &dce_v11_0_stop_mc_access,
- .resume_mc_access = &dce_v11_0_resume_mc_access,
};
static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index fd134a4629d7..a51e35f824a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -42,6 +42,7 @@
#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"
#include "gca/gfx_7_2_enum.h"
+#include "dce_v6_0.h"
#include "si_enums.h"
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
@@ -392,117 +393,6 @@ static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
return mmDC_GPIO_HPD_A;
}
-static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
-{
- if (crtc >= adev->mode_info.num_crtc)
- return 0;
- else
- return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
-}
-
-static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 crtc_enabled, tmp, frame_count;
- int i, j;
-
- save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
- /* disable VGA render */
- WREG32(mmVGA_RENDER_CONTROL, 0);
-
- /* blank the display controllers */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK;
- if (crtc_enabled) {
- save->crtc_enabled[i] = true;
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
-
- if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) {
- dce_v6_0_vblank_wait(adev, i);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK;
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- /* wait for the next frame */
- frame_count = evergreen_get_vblank_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (evergreen_get_vblank_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
-
- /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- save->crtc_enabled[i] = false;
- /* ***** */
- } else {
- save->crtc_enabled[i] = false;
- }
- }
-}
-
-static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 tmp;
- int i, j;
-
- /* update crtc base addresses */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
- }
-
- WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
- WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
-
- /* unlock regs and wait for update */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (save->crtc_enabled[i]) {
- tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x7) != 0) {
- tmp &= ~0x7;
- WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) {
- tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (tmp & 1) {
- tmp &= ~1;
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
- }
- for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0)
- break;
- udelay(1);
- }
- }
- }
-
- /* Unlock vga access */
- WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
- mdelay(1);
- WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-
-}
-
static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
@@ -1597,7 +1487,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
ssize_t err;
u32 tmp;
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
@@ -2182,6 +2072,7 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
+ u16 *r, *g, *b;
int i;
DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
@@ -2211,11 +2102,14 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->lut_r[i] << 20) |
- (amdgpu_crtc->lut_g[i] << 10) |
- (amdgpu_crtc->lut_b[i] << 0));
+ ((*r++ & 0xffc0) << 14) |
+ ((*g++ & 0xffc0) << 4) |
+ (*b++ >> 6));
}
WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
@@ -2428,7 +2322,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2436,7 +2330,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2470,7 +2364,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
@@ -2496,15 +2390,6 @@ static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- amdgpu_crtc->lut_r[i] = red[i] >> 6;
- amdgpu_crtc->lut_g[i] = green[i] >> 6;
- amdgpu_crtc->lut_b[i] = blue[i] >> 6;
- }
dce_v6_0_crtc_load_lut(crtc);
return 0;
@@ -2712,14 +2597,12 @@ static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
.prepare = dce_v6_0_crtc_prepare,
.commit = dce_v6_0_crtc_commit,
- .load_lut = dce_v6_0_crtc_load_lut,
.disable = dce_v6_0_crtc_disable,
};
static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
- int i;
amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
(AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
@@ -2737,12 +2620,6 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
- for (i = 0; i < 256; i++) {
- amdgpu_crtc->lut_r[i] = i << 2;
- amdgpu_crtc->lut_g[i] = i << 2;
- amdgpu_crtc->lut_b[i] = i << 2;
- }
-
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
@@ -2873,6 +2750,8 @@ static int dce_v6_0_hw_init(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* disable vga render */
+ dce_v6_0_set_vga_render_state(adev, false);
/* init dig PHYs, disp eng pll */
amdgpu_atombios_encoder_init_dig(adev);
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
@@ -3525,7 +3404,6 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
}
static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
- .set_vga_render_state = &dce_v6_0_set_vga_render_state,
.bandwidth_update = &dce_v6_0_bandwidth_update,
.vblank_get_counter = &dce_v6_0_vblank_get_counter,
.vblank_wait = &dce_v6_0_vblank_wait,
@@ -3538,8 +3416,6 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
.add_encoder = &dce_v6_0_encoder_add,
.add_connector = &amdgpu_connector_add,
- .stop_mc_access = &dce_v6_0_stop_mc_access,
- .resume_mc_access = &dce_v6_0_resume_mc_access,
};
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index a9e869554627..9cf14b8b2db9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -419,81 +419,6 @@ static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
return true;
}
-static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 crtc_enabled, tmp;
- int i;
-
- save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
- /* disable VGA render */
- tmp = RREG32(mmVGA_RENDER_CONTROL);
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32(mmVGA_RENDER_CONTROL, tmp);
-
- /* blank the display controllers */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
- CRTC_CONTROL, CRTC_MASTER_EN);
- if (crtc_enabled) {
-#if 1
- save->crtc_enabled[i] = true;
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
- /*it is correct only for RGB ; black is 0*/
- WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- }
- mdelay(20);
-#else
- /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- save->crtc_enabled[i] = false;
- /* ***** */
-#endif
- } else {
- save->crtc_enabled[i] = false;
- }
- }
-}
-
-static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 tmp;
- int i;
-
- /* update crtc base addresses */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
-
- if (save->crtc_enabled[i]) {
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- }
- mdelay(20);
- }
-
- WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
- WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
-
- /* Unlock vga access */
- WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
- mdelay(1);
- WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-}
-
static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
@@ -1750,7 +1675,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
dce_v8_0_audio_write_sad_regs(encoder);
dce_v8_0_audio_write_latency_fields(encoder, mode);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
@@ -2124,6 +2049,7 @@ static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
+ u16 *r, *g, *b;
int i;
DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
@@ -2153,11 +2079,14 @@ static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->lut_r[i] << 20) |
- (amdgpu_crtc->lut_g[i] << 10) |
- (amdgpu_crtc->lut_b[i] << 0));
+ ((*r++ & 0xffc0) << 14) |
+ ((*g++ & 0xffc0) << 4) |
+ (*b++ >> 6));
}
WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
@@ -2406,7 +2335,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2414,7 +2343,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -2448,7 +2377,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
@@ -2475,15 +2404,6 @@ static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- amdgpu_crtc->lut_r[i] = red[i] >> 6;
- amdgpu_crtc->lut_g[i] = green[i] >> 6;
- amdgpu_crtc->lut_b[i] = blue[i] >> 6;
- }
dce_v8_0_crtc_load_lut(crtc);
return 0;
@@ -2702,14 +2622,12 @@ static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
.mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
.prepare = dce_v8_0_crtc_prepare,
.commit = dce_v8_0_crtc_commit,
- .load_lut = dce_v8_0_crtc_load_lut,
.disable = dce_v8_0_crtc_disable,
};
static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
- int i;
amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
(AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
@@ -2727,12 +2645,6 @@ static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
- for (i = 0; i < 256; i++) {
- amdgpu_crtc->lut_r[i] = i << 2;
- amdgpu_crtc->lut_g[i] = i << 2;
- amdgpu_crtc->lut_b[i] = i << 2;
- }
-
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
@@ -2870,6 +2782,8 @@ static int dce_v8_0_hw_init(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* disable vga render */
+ dce_v8_0_set_vga_render_state(adev, false);
/* init dig PHYs, disp eng pll */
amdgpu_atombios_encoder_init_dig(adev);
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
@@ -3574,7 +3488,6 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
}
static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
- .set_vga_render_state = &dce_v8_0_set_vga_render_state,
.bandwidth_update = &dce_v8_0_bandwidth_update,
.vblank_get_counter = &dce_v8_0_vblank_get_counter,
.vblank_wait = &dce_v8_0_vblank_wait,
@@ -3587,8 +3500,6 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
.page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
.add_encoder = &dce_v8_0_encoder_add,
.add_connector = &amdgpu_connector_add,
- .stop_mc_access = &dce_v8_0_stop_mc_access,
- .resume_mc_access = &dce_v8_0_resume_mc_access,
};
static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 90bb08309a53..b9ee9073cb0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -95,62 +95,6 @@ static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
return 0;
}
-static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- switch (adev->asic_type) {
-#ifdef CONFIG_DRM_AMDGPU_SI
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- dce_v6_0_disable_dce(adev);
- break;
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_KAVERI:
- case CHIP_KABINI:
- case CHIP_MULLINS:
- dce_v8_0_disable_dce(adev);
- break;
-#endif
- case CHIP_FIJI:
- case CHIP_TONGA:
- dce_v10_0_disable_dce(adev);
- break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- dce_v11_0_disable_dce(adev);
- break;
- case CHIP_TOPAZ:
-#ifdef CONFIG_DRM_AMDGPU_SI
- case CHIP_HAINAN:
-#endif
- /* no DCE */
- return;
- default:
- DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
- }
-
- return;
-}
-static void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- return;
-}
-
-static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
- bool render)
-{
- return;
-}
-
/**
* dce_virtual_bandwidth_update - program display watermarks
*
@@ -168,16 +112,6 @@ static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
u16 *green, u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- amdgpu_crtc->lut_r[i] = red[i] >> 6;
- amdgpu_crtc->lut_g[i] = green[i] >> 6;
- amdgpu_crtc->lut_b[i] = blue[i] >> 6;
- }
-
return 0;
}
@@ -289,11 +223,6 @@ static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
-static void dce_virtual_crtc_load_lut(struct drm_crtc *crtc)
-{
- return;
-}
-
static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
@@ -309,14 +238,12 @@ static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
.mode_set_base_atomic = dce_virtual_crtc_set_base_atomic,
.prepare = dce_virtual_crtc_prepare,
.commit = dce_virtual_crtc_commit,
- .load_lut = dce_virtual_crtc_load_lut,
.disable = dce_virtual_crtc_disable,
};
static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
- int i;
amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
(AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
@@ -329,12 +256,6 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->crtc_id = index;
adev->mode_info.crtcs[index] = amdgpu_crtc;
- for (i = 0; i < 256; i++) {
- amdgpu_crtc->lut_r[i] = i << 2;
- amdgpu_crtc->lut_g[i] = i << 2;
- amdgpu_crtc->lut_b[i] = i << 2;
- }
-
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
@@ -522,6 +443,47 @@ static int dce_virtual_sw_fini(void *handle)
static int dce_virtual_hw_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ dce_v6_0_disable_dce(adev);
+ break;
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ dce_v8_0_disable_dce(adev);
+ break;
+#endif
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ dce_v10_0_disable_dce(adev);
+ break;
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ dce_v11_0_disable_dce(adev);
+ break;
+ case CHIP_TOPAZ:
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_HAINAN:
+#endif
+ /* no DCE */
+ break;
+ case CHIP_VEGA10:
+ break;
+ default:
+ DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
+ }
return 0;
}
@@ -677,7 +639,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
}
static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
- .set_vga_render_state = &dce_virtual_set_vga_render_state,
.bandwidth_update = &dce_virtual_bandwidth_update,
.vblank_get_counter = &dce_virtual_vblank_get_counter,
.vblank_wait = &dce_virtual_vblank_wait,
@@ -690,8 +651,6 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
.page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
.add_encoder = NULL,
.add_connector = NULL,
- .stop_mc_access = &dce_virtual_stop_mc_access,
- .resume_mc_access = &dce_virtual_resume_mc_access,
};
static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
@@ -809,7 +768,7 @@ static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1;
adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 5173ca1fd159..d228f5a99044 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1573,7 +1573,7 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev)
static void gfx_v6_0_scratch_init(struct amdgpu_device *adev)
{
- adev->gfx.scratch.num_reg = 7;
+ adev->gfx.scratch.num_reg = 8;
adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
}
@@ -2217,40 +2217,9 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->gfx.rlc.save_restore_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
- adev->gfx.rlc.save_restore_obj = NULL;
- }
-
- if (adev->gfx.rlc.clear_state_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
- adev->gfx.rlc.clear_state_obj = NULL;
- }
-
- if (adev->gfx.rlc.cp_table_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
- adev->gfx.rlc.cp_table_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
}
static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
@@ -2273,43 +2242,23 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
if (src_ptr) {
/* save restore block */
- if (adev->gfx.rlc.save_restore_obj == NULL) {
- r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL,
- &adev->gfx.rlc.save_restore_obj);
-
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
- if (unlikely(r != 0)) {
- gfx_v6_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.save_restore_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.save_restore_obj,
+ &adev->gfx.rlc.save_restore_gpu_addr,
+ (void **)&adev->gfx.rlc.sr_ptr);
if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
- dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
+ r);
gfx_v6_0_rlc_fini(adev);
return r;
}
- r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
- gfx_v6_0_rlc_fini(adev);
- return r;
- }
/* write the sr buffer */
dst_ptr = adev->gfx.rlc.sr_ptr;
for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+
amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
}
@@ -2319,39 +2268,17 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
dws = adev->gfx.rlc.clear_state_size + (256 / 4);
- if (adev->gfx.rlc.clear_state_obj == NULL) {
- r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL,
- &adev->gfx.rlc.clear_state_obj);
-
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v6_0_rlc_fini(adev);
- return r;
- }
- }
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- gfx_v6_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
gfx_v6_0_rlc_fini(adev);
return r;
}
- r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
- gfx_v6_0_rlc_fini(adev);
- return r;
- }
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 37b45e4403d1..00868764a0dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1823,7 +1823,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
}
/**
- * gmc_v7_0_init_compute_vmid - gart enable
+ * gfx_v7_0_init_compute_vmid - gart enable
*
* @adev: amdgpu_device pointer
*
@@ -1833,7 +1833,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
#define DEFAULT_SH_MEM_BASES (0x6000)
#define FIRST_COMPUTE_VMID (8)
#define LAST_COMPUTE_VMID (16)
-static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
+static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
{
int i;
uint32_t sh_mem_config;
@@ -1921,6 +1921,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
ELEMENT_SIZE, 1);
sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
INDEX_STRIDE, 3);
+ WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
@@ -1934,12 +1935,11 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
WREG32(mmSH_MEM_APE1_BASE, 1);
WREG32(mmSH_MEM_APE1_LIMIT, 0);
WREG32(mmSH_MEM_BASES, sh_mem_base);
- WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
}
cik_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- gmc_v7_0_init_compute_vmid(adev);
+ gfx_v7_0_init_compute_vmid(adev);
WREG32(mmSX_DEBUG_1, 0x20);
@@ -2021,7 +2021,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
*/
static void gfx_v7_0_scratch_init(struct amdgpu_device *adev)
{
- adev->gfx.scratch.num_reg = 7;
+ adev->gfx.scratch.num_reg = 8;
adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
}
@@ -2774,39 +2774,18 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
*/
static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev)
{
- int i, r;
+ int i;
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
- if (ring->mqd_obj) {
- r = amdgpu_bo_reserve(ring->mqd_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
-
- amdgpu_bo_unpin(ring->mqd_obj);
- amdgpu_bo_unreserve(ring->mqd_obj);
-
- amdgpu_bo_unref(&ring->mqd_obj);
- ring->mqd_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL);
}
}
static void gfx_v7_0_mec_fini(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->gfx.mec.hpd_eop_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
-
- amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
- adev->gfx.mec.hpd_eop_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
}
static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
@@ -2823,33 +2802,14 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
/* allocate space for ALL pipes (even the ones we don't own) */
mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec
* GFX7_MEC_HPD_SIZE * 2;
- if (adev->gfx.mec.hpd_eop_obj == NULL) {
- r = amdgpu_bo_create(adev,
- mec_hpd_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->gfx.mec.hpd_eop_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- return r;
- }
- }
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
- if (unlikely(r != 0)) {
- gfx_v7_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->gfx.mec.hpd_eop_gpu_addr);
- if (r) {
- dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
- gfx_v7_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
if (r) {
- dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r);
gfx_v7_0_mec_fini(adev);
return r;
}
@@ -3108,32 +3068,12 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
struct cik_mqd *mqd;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
- if (ring->mqd_obj == NULL) {
- r = amdgpu_bo_create(adev,
- sizeof(struct cik_mqd),
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &ring->mqd_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(ring->mqd_obj, false);
- if (unlikely(r != 0))
- goto out;
-
- r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
- &mqd_gpu_addr);
- if (r) {
- dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
- goto out_unreserve;
- }
- r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
+ r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
+ &mqd_gpu_addr, (void **)&mqd);
if (r) {
- dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
- goto out_unreserve;
+ dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
+ return r;
}
mutex_lock(&adev->srbm_mutex);
@@ -3147,9 +3087,7 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
mutex_unlock(&adev->srbm_mutex);
amdgpu_bo_kunmap(ring->mqd_obj);
-out_unreserve:
amdgpu_bo_unreserve(ring->mqd_obj);
-out:
return 0;
}
@@ -3361,43 +3299,9 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
*/
static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
{
- int r;
-
- /* save restore block */
- if (adev->gfx.rlc.save_restore_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj);
- adev->gfx.rlc.save_restore_obj = NULL;
- }
-
- /* clear state block */
- if (adev->gfx.rlc.clear_state_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
- adev->gfx.rlc.clear_state_obj = NULL;
- }
-
- /* clear state block */
- if (adev->gfx.rlc.cp_table_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
- amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
- adev->gfx.rlc.cp_table_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
}
static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
@@ -3432,39 +3336,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (src_ptr) {
/* save restore block */
- if (adev->gfx.rlc.save_restore_obj == NULL) {
- r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL,
- &adev->gfx.rlc.save_restore_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
- if (unlikely(r != 0)) {
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.save_restore_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.save_restore_obj,
+ &adev->gfx.rlc.save_restore_gpu_addr,
+ (void **)&adev->gfx.rlc.sr_ptr);
if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
- dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
return r;
}
- r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
/* write the sr buffer */
dst_ptr = adev->gfx.rlc.sr_ptr;
for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
@@ -3477,39 +3359,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
/* clear state block */
adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
- if (adev->gfx.rlc.clear_state_obj == NULL) {
- r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL,
- &adev->gfx.rlc.clear_state_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- }
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
return r;
}
- r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
gfx_v7_0_get_csb_buffer(adev, dst_ptr);
@@ -3518,37 +3378,14 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
}
if (adev->gfx.rlc.cp_table_size) {
- if (adev->gfx.rlc.cp_table_obj == NULL) {
- r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL,
- &adev->gfx.rlc.cp_table_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- }
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
- if (unlikely(r != 0)) {
- dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
- dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
+ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
if (r) {
- dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index aa5a50f5eac8..832e592fcd07 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -193,8 +193,8 @@ static const u32 tonga_golden_common_all[] =
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
- mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
- mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
};
static const u32 tonga_mgcg_cgcg_init[] =
@@ -303,8 +303,8 @@ static const u32 polaris11_golden_common_all[] =
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
- mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
- mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
};
static const u32 golden_settings_polaris10_a11[] =
@@ -336,8 +336,8 @@ static const u32 polaris10_golden_common_all[] =
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
- mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
- mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
};
static const u32 fiji_golden_common_all[] =
@@ -348,8 +348,8 @@ static const u32 fiji_golden_common_all[] =
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
- mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
- mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
};
@@ -436,8 +436,8 @@ static const u32 iceland_golden_common_all[] =
mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
- mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
- mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
};
static const u32 iceland_mgcg_cgcg_init[] =
@@ -532,8 +532,8 @@ static const u32 cz_golden_common_all[] =
mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
- mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
- mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
};
static const u32 cz_mgcg_cgcg_init[] =
@@ -637,8 +637,8 @@ static const u32 stoney_golden_common_all[] =
mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
- mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
- mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
};
static const u32 stoney_mgcg_cgcg_init[] =
@@ -750,7 +750,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
static void gfx_v8_0_scratch_init(struct amdgpu_device *adev)
{
- adev->gfx.scratch.num_reg = 7;
+ adev->gfx.scratch.num_reg = 8;
adev->gfx.scratch.reg_base = mmSCRATCH_REG0;
adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
}
@@ -1238,29 +1238,8 @@ static void cz_init_cp_jump_table(struct amdgpu_device *adev)
static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
{
- int r;
-
- /* clear state block */
- if (adev->gfx.rlc.clear_state_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
- adev->gfx.rlc.clear_state_obj = NULL;
- }
-
- /* jump table block */
- if (adev->gfx.rlc.cp_table_obj) {
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
- adev->gfx.rlc.cp_table_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
}
static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
@@ -1278,39 +1257,17 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
/* clear state block */
adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
- if (adev->gfx.rlc.clear_state_obj == NULL) {
- r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL,
- &adev->gfx.rlc.clear_state_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v8_0_rlc_fini(adev);
- return r;
- }
- }
- r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- gfx_v8_0_rlc_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
- dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
gfx_v8_0_rlc_fini(adev);
return r;
}
- r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r);
- gfx_v8_0_rlc_fini(adev);
- return r;
- }
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
gfx_v8_0_get_csb_buffer(adev, dst_ptr);
@@ -1321,34 +1278,13 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
if ((adev->asic_type == CHIP_CARRIZO) ||
(adev->asic_type == CHIP_STONEY)) {
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
- if (adev->gfx.rlc.cp_table_obj == NULL) {
- r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL,
- &adev->gfx.rlc.cp_table_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
- if (unlikely(r != 0)) {
- dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
- dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
+ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
if (r) {
- dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
return r;
}
@@ -1363,17 +1299,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->gfx.mec.hpd_eop_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
- adev->gfx.mec.hpd_eop_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
}
static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
@@ -1389,34 +1315,13 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
- if (adev->gfx.mec.hpd_eop_obj == NULL) {
- r = amdgpu_bo_create(adev,
- mec_hpd_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->gfx.mec.hpd_eop_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
- if (unlikely(r != 0)) {
- gfx_v8_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->gfx.mec.hpd_eop_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
if (r) {
- dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
- gfx_v8_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
- if (r) {
- dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
- gfx_v8_0_mec_fini(adev);
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
return r;
}
@@ -3802,6 +3707,8 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
ELEMENT_SIZE, 1);
sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
INDEX_STRIDE, 3);
+ WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
+
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
vi_srbm_select(adev, 0, 0, 0, i);
@@ -3825,7 +3732,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
WREG32(mmSH_MEM_APE1_BASE, 1);
WREG32(mmSH_MEM_APE1_LIMIT, 0);
- WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
}
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
@@ -4564,7 +4470,7 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
/* This situation may be hit in the future if a new HW
* generation exposes more than 64 queues. If so, the
* definition of queue_mask needs updating */
- if (WARN_ON(i > (sizeof(queue_mask)*8))) {
+ if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
DRM_ERROR("Invalid KCQ enabled: %d\n", i);
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 3a0b69b09ed6..69182eeca264 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -116,7 +116,9 @@ static const u32 golden_settings_gc_9_0[] =
SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
+ SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), 0x00001000, 0x00001000,
SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107,
+ SOC15_REG_OFFSET(GC, 0, mmSQC_CONFIG), 0x03000000, 0x020a2000,
SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68,
SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197,
@@ -211,7 +213,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
{
- adev->gfx.scratch.num_reg = 7;
+ adev->gfx.scratch.num_reg = 8;
adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
}
@@ -772,18 +774,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
if (cs_data) {
/* clear state block */
adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
- if (adev->gfx.rlc.clear_state_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_err(adev->dev,
- "(%d) failed to create rlc csb bo\n", r);
- gfx_v9_0_rlc_fini(adev);
- return r;
- }
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
+ r);
+ gfx_v9_0_rlc_fini(adev);
+ return r;
}
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
@@ -795,18 +795,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_RAVEN) {
/* TODO: double check the cp_table_size for RV */
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
- if (adev->gfx.rlc.cp_table_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, adev->gfx.rlc.cp_table_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
- if (r) {
- dev_err(adev->dev,
- "(%d) failed to create cp table bo\n", r);
- gfx_v9_0_rlc_fini(adev);
- return r;
- }
+ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ if (r) {
+ dev_err(adev->dev,
+ "(%d) failed to create cp table bo\n", r);
+ gfx_v9_0_rlc_fini(adev);
+ return r;
}
rv_init_cp_jump_table(adev);
@@ -821,28 +819,8 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->gfx.mec.hpd_eop_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
-
- amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
- adev->gfx.mec.hpd_eop_obj = NULL;
- }
- if (adev->gfx.mec.mec_fw_obj) {
- r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, true);
- if (unlikely(r != 0))
- dev_warn(adev->dev, "(%d) reserve mec firmware bo failed\n", r);
- amdgpu_bo_unpin(adev->gfx.mec.mec_fw_obj);
- amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
-
- amdgpu_bo_unref(&adev->gfx.mec.mec_fw_obj);
- adev->gfx.mec.mec_fw_obj = NULL;
- }
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
}
static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
@@ -862,33 +840,13 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
- if (adev->gfx.mec.hpd_eop_obj == NULL) {
- r = amdgpu_bo_create(adev,
- mec_hpd_size,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->gfx.mec.hpd_eop_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
- if (unlikely(r != 0)) {
- gfx_v9_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->gfx.mec.hpd_eop_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
if (r) {
- dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
- gfx_v9_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
- if (r) {
- dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
gfx_v9_0_mec_fini(adev);
return r;
}
@@ -905,42 +863,22 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
- if (adev->gfx.mec.mec_fw_obj == NULL) {
- r = amdgpu_bo_create(adev,
- mec_hdr->header.ucode_size_bytes,
- PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &adev->gfx.mec.mec_fw_obj);
- if (r) {
- dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
- return r;
- }
- }
-
- r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false);
- if (unlikely(r != 0)) {
- gfx_v9_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_pin(adev->gfx.mec.mec_fw_obj, AMDGPU_GEM_DOMAIN_GTT,
- &adev->gfx.mec.mec_fw_gpu_addr);
+ r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.mec_fw_obj,
+ &adev->gfx.mec.mec_fw_gpu_addr,
+ (void **)&fw);
if (r) {
- dev_warn(adev->dev, "(%d) pin mec firmware bo failed\n", r);
- gfx_v9_0_mec_fini(adev);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gfx.mec.mec_fw_obj, (void **)&fw);
- if (r) {
- dev_warn(adev->dev, "(%d) map firmware bo failed\n", r);
+ dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
gfx_v9_0_mec_fini(adev);
return r;
}
+
memcpy(fw, fw_data, fw_size);
amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
-
return 0;
}
@@ -1475,21 +1413,23 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
{
- u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ u32 data;
- if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
- } else if (se_num == 0xffffffff) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
+
+ if (se_num == 0xffffffff)
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
- } else if (sh_num == 0xffffffff) {
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
- } else {
+
+ if (sh_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
+ else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
- data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
- }
+
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
}
@@ -2217,7 +2157,7 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
- int r, i;
+ int r, i, tmp;
/* init the CP */
WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
@@ -2225,7 +2165,7 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
gfx_v9_0_cp_gfx_enable(adev, true);
- r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4);
+ r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
return r;
@@ -2263,6 +2203,12 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, 0x8000);
amdgpu_ring_write(ring, 0x8000);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
+ tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
+ (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
+ amdgpu_ring_write(ring, tmp);
+ amdgpu_ring_write(ring, 0);
+
amdgpu_ring_commit(ring);
return 0;
@@ -2425,7 +2371,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
/* This situation may be hit in the future if a new HW
* generation exposes more than 64 queues. If so, the
* definition of queue_mask needs updating */
- if (WARN_ON(i > (sizeof(queue_mask)*8))) {
+ if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
DRM_ERROR("Invalid KCQ enabled: %d\n", i);
break;
}
@@ -4156,7 +4102,7 @@ static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
return 0;
}
-const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
+static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
.late_init = gfx_v9_0_late_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
index 56ef652a575d..fa5a3fbaf6ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
@@ -24,7 +24,6 @@
#ifndef __GFX_V9_0_H__
#define __GFX_V9_0_H__
-extern const struct amd_ip_funcs gfx_v9_0_ip_funcs;
extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block;
void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index a42f483767e7..4f2788b61a08 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -58,14 +58,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
gfxhub_v1_0_init_gart_pt_regs(adev);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->mc.gtt_start >> 12));
+ (u32)(adev->mc.gart_start >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->mc.gtt_start >> 44));
+ (u32)(adev->mc.gart_start >> 44));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
- (u32)(adev->mc.gtt_end >> 12));
+ (u32)(adev->mc.gart_end >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
- (u32)(adev->mc.gtt_end >> 44));
+ (u32)(adev->mc.gart_end >> 44));
}
static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -124,12 +124,12 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
{
- uint32_t tmp;
+ uint32_t tmp, field;
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0);
@@ -143,7 +143,10 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp);
+ field = adev->vm_manager.fragment_size;
tmp = mmVM_L2_CNTL3_DEFAULT;
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
@@ -206,6 +209,9 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
adev->vm_manager.block_size - 9);
+ /* Send no-retry XNACK on fault to suppress VM fault storm. */
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
index d2dbb085f480..206e29cad753 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
@@ -30,7 +30,5 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value);
void gfxhub_v1_0_init(struct amdgpu_device *adev);
u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev);
-extern const struct amd_ip_funcs gfxhub_v1_0_ip_funcs;
-extern const struct amdgpu_ip_block_version gfxhub_v1_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index d0214d942bfc..12b0c4cd7a5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -66,14 +66,10 @@ static const u32 crtc_offsets[6] =
SI_CRTC5_REGISTER_OFFSET
};
-static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
- if (adev->mode_info.num_crtc)
- amdgpu_display_stop_mc_access(adev, save);
-
gmc_v6_0_wait_for_idle((void *)adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
@@ -90,8 +86,7 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
}
-static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v6_0_mc_resume(struct amdgpu_device *adev)
{
u32 tmp;
@@ -103,10 +98,6 @@ static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
WREG32(mmBIF_FB_EN, tmp);
-
- if (adev->mode_info.num_crtc)
- amdgpu_display_resume_mc_access(adev, save);
-
}
static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
@@ -228,20 +219,20 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc)
{
+ u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+ base <<= 24;
+
if (mc->mc_vram_size > 0xFFC0000000ULL) {
dev_warn(adev->dev, "limiting VRAM\n");
mc->real_vram_size = 0xFFC0000000ULL;
mc->mc_vram_size = 0xFFC0000000ULL;
}
- amdgpu_vram_location(adev, &adev->mc, 0);
- adev->mc.gtt_base_align = 0;
- amdgpu_gtt_location(adev, mc);
+ amdgpu_vram_location(adev, &adev->mc, base);
+ amdgpu_gart_location(adev, mc);
}
static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
{
- struct amdgpu_mode_mc_save save;
- u32 tmp;
int i, j;
/* Initialize HDP */
@@ -254,16 +245,23 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
- if (adev->mode_info.num_crtc)
- amdgpu_display_set_vga_render_state(adev, false);
-
- gmc_v6_0_mc_stop(adev, &save);
-
if (gmc_v6_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
- WREG32(mmVGA_HDP_CONTROL, VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK);
+ if (adev->mode_info.num_crtc) {
+ u32 tmp;
+
+ /* Lockout access through VGA aperture*/
+ tmp = RREG32(mmVGA_HDP_CONTROL);
+ tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK;
+ WREG32(mmVGA_HDP_CONTROL, tmp);
+
+ /* disable VGA render */
+ tmp = RREG32(mmVGA_RENDER_CONTROL);
+ tmp &= ~VGA_VSTATUS_CNTL;
+ WREG32(mmVGA_RENDER_CONTROL, tmp);
+ }
/* Update configuration */
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->mc.vram_start >> 12);
@@ -271,13 +269,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
adev->mc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->vram_scratch.gpu_addr >> 12);
- tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
- tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
- WREG32(mmMC_VM_FB_LOCATION, tmp);
- /* XXX double check these! */
- WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
- WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
- WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
@@ -285,7 +276,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
if (gmc_v6_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
- gmc_v6_0_mc_resume(adev, &save);
}
static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
@@ -342,15 +332,7 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
adev->mc.visible_vram_size = adev->mc.aper_size;
- /* unless the user had overridden it, set the gart
- * size equal to the 1024 or vram, whichever is larger.
- */
- if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
- adev->mc.mc_vram_size);
- else
- adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
-
+ amdgpu_gart_set_defaults(adev);
gmc_v6_0_vram_gtt_location(adev, &adev->mc);
return 0;
@@ -479,6 +461,7 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
{
int r, i;
+ u32 field;
if (adev->gart.robj == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -506,13 +489,15 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_L2_CNTL2,
VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
+
+ field = adev->vm_manager.fragment_size;
WREG32(mmVM_L2_CNTL3,
VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
- (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
- (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
+ (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
+ (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
/* setup context0 */
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
@@ -559,7 +544,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->mc.gtt_size >> 20),
+ (unsigned)(adev->mc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr);
adev->gart.ready = true;
return 0;
@@ -829,7 +814,7 @@ static int gmc_v6_0_sw_init(void *handle)
if (r)
return r;
- amdgpu_vm_adjust_size(adev, 64);
+ amdgpu_vm_adjust_size(adev, 64, 4);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
adev->mc.mc_mask = 0xffffffffffULL;
@@ -987,7 +972,6 @@ static int gmc_v6_0_wait_for_idle(void *handle)
static int gmc_v6_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_mode_mc_save save;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -1003,7 +987,7 @@ static int gmc_v6_0_soft_reset(void *handle)
}
if (srbm_soft_reset) {
- gmc_v6_0_mc_stop(adev, &save);
+ gmc_v6_0_mc_stop(adev);
if (gmc_v6_0_wait_for_idle(adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
}
@@ -1023,7 +1007,7 @@ static int gmc_v6_0_soft_reset(void *handle)
udelay(50);
- gmc_v6_0_mc_resume(adev, &save);
+ gmc_v6_0_mc_resume(adev);
udelay(50);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 7e9ea53edf8b..e42c1ad3af5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -37,6 +37,9 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
#include "amdgpu_atombios.h"
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
@@ -76,14 +79,10 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
}
}
-static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
- if (adev->mode_info.num_crtc)
- amdgpu_display_stop_mc_access(adev, save);
-
gmc_v7_0_wait_for_idle((void *)adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
@@ -99,8 +98,7 @@ static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
udelay(100);
}
-static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_resume(struct amdgpu_device *adev)
{
u32 tmp;
@@ -112,9 +110,6 @@ static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
WREG32(mmBIF_FB_EN, tmp);
-
- if (adev->mode_info.num_crtc)
- amdgpu_display_resume_mc_access(adev, save);
}
/**
@@ -242,15 +237,17 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc)
{
+ u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+ base <<= 24;
+
if (mc->mc_vram_size > 0xFFC0000000ULL) {
/* leave room for at least 1024M GTT */
dev_warn(adev->dev, "limiting VRAM\n");
mc->real_vram_size = 0xFFC0000000ULL;
mc->mc_vram_size = 0xFFC0000000ULL;
}
- amdgpu_vram_location(adev, &adev->mc, 0);
- adev->mc.gtt_base_align = 0;
- amdgpu_gtt_location(adev, mc);
+ amdgpu_vram_location(adev, &adev->mc, base);
+ amdgpu_gart_location(adev, mc);
}
/**
@@ -263,7 +260,6 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
{
- struct amdgpu_mode_mc_save save;
u32 tmp;
int i, j;
@@ -277,13 +273,20 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
- if (adev->mode_info.num_crtc)
- amdgpu_display_set_vga_render_state(adev, false);
-
- gmc_v7_0_mc_stop(adev, &save);
if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
+ if (adev->mode_info.num_crtc) {
+ /* Lockout access through VGA aperture*/
+ tmp = RREG32(mmVGA_HDP_CONTROL);
+ tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+ WREG32(mmVGA_HDP_CONTROL, tmp);
+
+ /* disable VGA render */
+ tmp = RREG32(mmVGA_RENDER_CONTROL);
+ tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+ WREG32(mmVGA_RENDER_CONTROL, tmp);
+ }
/* Update configuration */
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->mc.vram_start >> 12);
@@ -291,20 +294,12 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
adev->mc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->vram_scratch.gpu_addr >> 12);
- tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
- tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
- WREG32(mmMC_VM_FB_LOCATION, tmp);
- /* XXX double check these! */
- WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
- WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
- WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
- gmc_v7_0_mc_resume(adev, &save);
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
@@ -391,15 +386,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size;
- /* unless the user had overridden it, set the gart
- * size equal to the 1024 or vram, whichever is larger.
- */
- if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
- adev->mc.mc_vram_size);
- else
- adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
-
+ amdgpu_gart_set_defaults(adev);
gmc_v7_0_vram_gtt_location(adev, &adev->mc);
return 0;
@@ -575,7 +562,7 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
{
int r, i;
- u32 tmp;
+ u32 tmp, field;
if (adev->gart.robj == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -605,14 +592,16 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32(mmVM_L2_CNTL2, tmp);
+
+ field = adev->vm_manager.fragment_size;
tmp = RREG32(mmVM_L2_CNTL3);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
WREG32(mmVM_L2_CNTL3, tmp);
/* setup context0 */
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
@@ -666,7 +655,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
gmc_v7_0_gart_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->mc.gtt_size >> 20),
+ (unsigned)(adev->mc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr);
adev->gart.ready = true;
return 0;
@@ -961,7 +950,7 @@ static int gmc_v7_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- amdgpu_vm_adjust_size(adev, 64);
+ amdgpu_vm_adjust_size(adev, 64, 4);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
@@ -1138,7 +1127,6 @@ static int gmc_v7_0_wait_for_idle(void *handle)
static int gmc_v7_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_mode_mc_save save;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -1154,7 +1142,7 @@ static int gmc_v7_0_soft_reset(void *handle)
}
if (srbm_soft_reset) {
- gmc_v7_0_mc_stop(adev, &save);
+ gmc_v7_0_mc_stop(adev);
if (gmc_v7_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
}
@@ -1175,7 +1163,7 @@ static int gmc_v7_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
- gmc_v7_0_mc_resume(adev, &save);
+ gmc_v7_0_mc_resume(adev);
udelay(50);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index cc9f88057cd5..7ca2dae8237a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -35,6 +35,9 @@
#include "oss/oss_3_0_d.h"
#include "oss/oss_3_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
#include "vid.h"
#include "vi.h"
@@ -161,14 +164,10 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
}
}
-static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
- if (adev->mode_info.num_crtc)
- amdgpu_display_stop_mc_access(adev, save);
-
gmc_v8_0_wait_for_idle(adev);
blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
@@ -184,8 +183,7 @@ static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
udelay(100);
}
-static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_resume(struct amdgpu_device *adev)
{
u32 tmp;
@@ -197,9 +195,6 @@ static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
WREG32(mmBIF_FB_EN, tmp);
-
- if (adev->mode_info.num_crtc)
- amdgpu_display_resume_mc_access(adev, save);
}
/**
@@ -404,15 +399,20 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc)
{
+ u64 base = 0;
+
+ if (!amdgpu_sriov_vf(adev))
+ base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
+ base <<= 24;
+
if (mc->mc_vram_size > 0xFFC0000000ULL) {
/* leave room for at least 1024M GTT */
dev_warn(adev->dev, "limiting VRAM\n");
mc->real_vram_size = 0xFFC0000000ULL;
mc->mc_vram_size = 0xFFC0000000ULL;
}
- amdgpu_vram_location(adev, &adev->mc, 0);
- adev->mc.gtt_base_align = 0;
- amdgpu_gtt_location(adev, mc);
+ amdgpu_vram_location(adev, &adev->mc, base);
+ amdgpu_gart_location(adev, mc);
}
/**
@@ -425,7 +425,6 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
{
- struct amdgpu_mode_mc_save save;
u32 tmp;
int i, j;
@@ -439,13 +438,20 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
- if (adev->mode_info.num_crtc)
- amdgpu_display_set_vga_render_state(adev, false);
-
- gmc_v8_0_mc_stop(adev, &save);
if (gmc_v8_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
+ if (adev->mode_info.num_crtc) {
+ /* Lockout access through VGA aperture*/
+ tmp = RREG32(mmVGA_HDP_CONTROL);
+ tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+ WREG32(mmVGA_HDP_CONTROL, tmp);
+
+ /* disable VGA render */
+ tmp = RREG32(mmVGA_RENDER_CONTROL);
+ tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+ WREG32(mmVGA_RENDER_CONTROL, tmp);
+ }
/* Update configuration */
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->mc.vram_start >> 12);
@@ -453,20 +459,23 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
adev->mc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->vram_scratch.gpu_addr >> 12);
- tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
- tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
- WREG32(mmMC_VM_FB_LOCATION, tmp);
- /* XXX double check these! */
- WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
- WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
- WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+
+ if (amdgpu_sriov_vf(adev)) {
+ tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
+ tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
+ WREG32(mmMC_VM_FB_LOCATION, tmp);
+ /* XXX double check these! */
+ WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
+ WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
+ WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
+ }
+
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
if (gmc_v8_0_wait_for_idle((void *)adev)) {
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
- gmc_v8_0_mc_resume(adev, &save);
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
@@ -553,15 +562,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size;
- /* unless the user had overridden it, set the gart
- * size equal to the 1024 or vram, whichever is larger.
- */
- if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
- adev->mc.mc_vram_size);
- else
- adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
-
+ amdgpu_gart_set_defaults(adev);
gmc_v8_0_vram_gtt_location(adev, &adev->mc);
return 0;
@@ -761,7 +762,7 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
{
int r, i;
- u32 tmp;
+ u32 tmp, field;
if (adev->gart.robj == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -792,10 +793,12 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32(mmVM_L2_CNTL2, tmp);
+
+ field = adev->vm_manager.fragment_size;
tmp = RREG32(mmVM_L2_CNTL3);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
WREG32(mmVM_L2_CNTL3, tmp);
/* XXX: set to enable PTE/PDE in system memory */
tmp = RREG32(mmVM_L2_CNTL4);
@@ -813,8 +816,8 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
WREG32(mmVM_L2_CNTL4, tmp);
/* setup context0 */
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
- WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12);
+ WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12));
@@ -869,7 +872,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
gmc_v8_0_gart_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->mc.gtt_size >> 20),
+ (unsigned)(adev->mc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr);
adev->gart.ready = true;
return 0;
@@ -1045,7 +1048,7 @@ static int gmc_v8_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- amdgpu_vm_adjust_size(adev, 64);
+ amdgpu_vm_adjust_size(adev, 64, 4);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
@@ -1260,7 +1263,7 @@ static int gmc_v8_0_pre_soft_reset(void *handle)
if (!adev->mc.srbm_soft_reset)
return 0;
- gmc_v8_0_mc_stop(adev, &adev->mc.save);
+ gmc_v8_0_mc_stop(adev);
if (gmc_v8_0_wait_for_idle(adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
}
@@ -1306,7 +1309,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
if (!adev->mc.srbm_soft_reset)
return 0;
- gmc_v8_0_mc_resume(adev, &adev->mc.save);
+ gmc_v8_0_mc_resume(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 175ba5f9691c..2769c2b3b56e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -23,11 +23,14 @@
#include <linux/firmware.h>
#include "amdgpu.h"
#include "gmc_v9_0.h"
+#include "amdgpu_atomfirmware.h"
#include "vega10/soc15ip.h"
#include "vega10/HDP/hdp_4_0_offset.h"
#include "vega10/HDP/hdp_4_0_sh_mask.h"
#include "vega10/GC/gc_9_0_sh_mask.h"
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
#include "vega10/vega10_enum.h"
#include "soc15_common.h"
@@ -419,8 +422,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
if (!amdgpu_sriov_vf(adev))
base = mmhub_v1_0_get_fb_location(adev);
amdgpu_vram_location(adev, &adev->mc, base);
- adev->mc.gtt_base_align = 0;
- amdgpu_gtt_location(adev, mc);
+ amdgpu_gart_location(adev, mc);
/* base offset of vram pages */
if (adev->flags & AMD_IS_APU)
adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
@@ -442,43 +444,46 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
u32 tmp;
int chansize, numchan;
- /* hbm memory channel size */
- chansize = 128;
-
- tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
- tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
- tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
- switch (tmp) {
- case 0:
- default:
- numchan = 1;
- break;
- case 1:
- numchan = 2;
- break;
- case 2:
- numchan = 0;
- break;
- case 3:
- numchan = 4;
- break;
- case 4:
- numchan = 0;
- break;
- case 5:
- numchan = 8;
- break;
- case 6:
- numchan = 0;
- break;
- case 7:
- numchan = 16;
- break;
- case 8:
- numchan = 2;
- break;
+ adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
+ if (!adev->mc.vram_width) {
+ /* hbm memory channel size */
+ chansize = 128;
+
+ tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
+ tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
+ tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
+ switch (tmp) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 0;
+ break;
+ case 3:
+ numchan = 4;
+ break;
+ case 4:
+ numchan = 0;
+ break;
+ case 5:
+ numchan = 8;
+ break;
+ case 6:
+ numchan = 0;
+ break;
+ case 7:
+ numchan = 16;
+ break;
+ case 8:
+ numchan = 2;
+ break;
+ }
+ adev->mc.vram_width = numchan * chansize;
}
- adev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
@@ -494,15 +499,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size;
- /* unless the user had overridden it, set the gart
- * size equal to the 1024 or vram, whichever is larger.
- */
- if (amdgpu_gart_size == -1)
- adev->mc.gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
- adev->mc.mc_vram_size);
- else
- adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20;
-
+ amdgpu_gart_set_defaults(adev);
gmc_v9_0_vram_gtt_location(adev, &adev->mc);
return 0;
@@ -537,10 +534,21 @@ static int gmc_v9_0_sw_init(void *handle)
spin_lock_init(&adev->mc.invalidate_lock);
- if (adev->flags & AMD_IS_APU) {
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- amdgpu_vm_adjust_size(adev, 64);
- } else {
+ if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
+ adev->vm_manager.vm_size = 1U << 18;
+ adev->vm_manager.block_size = 9;
+ adev->vm_manager.num_level = 3;
+ amdgpu_vm_set_fragment_size(adev, 9);
+ } else {
+ /* vm_size is 64GB for legacy 2-level page support */
+ amdgpu_vm_adjust_size(adev, 64, 9);
+ adev->vm_manager.num_level = 1;
+ }
+ break;
+ case CHIP_VEGA10:
/* XXX Don't know how to get VRAM type yet. */
adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
/*
@@ -550,11 +558,18 @@ static int gmc_v9_0_sw_init(void *handle)
*/
adev->vm_manager.vm_size = 1U << 18;
adev->vm_manager.block_size = 9;
- DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
- adev->vm_manager.vm_size,
- adev->vm_manager.block_size);
+ adev->vm_manager.num_level = 3;
+ amdgpu_vm_set_fragment_size(adev, 9);
+ break;
+ default:
+ break;
}
+ DRM_INFO("vm size is %llu GB, block size is %u-bit,fragment size is %u-bit\n",
+ adev->vm_manager.vm_size,
+ adev->vm_manager.block_size,
+ adev->vm_manager.fragment_size);
+
/* This interrupt is VMC page fault.*/
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
&adev->mc.vm_fault);
@@ -619,11 +634,6 @@ static int gmc_v9_0_sw_init(void *handle)
adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
- /* TODO: fix num_level for APU when updating vm size and block size */
- if (adev->flags & AMD_IS_APU)
- adev->vm_manager.num_level = 1;
- else
- adev->vm_manager.num_level = 3;
amdgpu_vm_manager_init(adev);
return 0;
@@ -731,7 +741,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->mc.gtt_size >> 20),
+ (unsigned)(adev->mc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr);
adev->gart.ready = true;
return 0;
@@ -745,6 +755,20 @@ static int gmc_v9_0_hw_init(void *handle)
/* The sequence of these two function calls matters.*/
gmc_v9_0_init_golden_registers(adev);
+ if (adev->mode_info.num_crtc) {
+ u32 tmp;
+
+ /* Lockout access through VGA aperture*/
+ tmp = RREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL);
+ tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+ WREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL, tmp);
+
+ /* disable VGA render */
+ tmp = RREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL);
+ tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+ WREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL, tmp);
+ }
+
r = gmc_v9_0_gart_enable(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 9804318f3488..4395a4f12149 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -69,14 +69,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
mmhub_v1_0_init_gart_pt_regs(adev);
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->mc.gtt_start >> 12));
+ (u32)(adev->mc.gart_start >> 12));
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->mc.gtt_start >> 44));
+ (u32)(adev->mc.gart_start >> 44));
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
- (u32)(adev->mc.gtt_end >> 12));
+ (u32)(adev->mc.gart_end >> 12));
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
- (u32)(adev->mc.gtt_end >> 44));
+ (u32)(adev->mc.gart_end >> 44));
}
static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -138,12 +138,12 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
{
- uint32_t tmp;
+ uint32_t tmp, field;
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0);
@@ -157,7 +157,10 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
+ field = adev->vm_manager.fragment_size;
tmp = mmVM_L2_CNTL3_DEFAULT;
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
@@ -222,6 +225,9 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
adev->vm_manager.block_size - 9);
+ /* Send no-retry XNACK on fault to suppress VM fault storm. */
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
@@ -245,28 +251,28 @@ static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev)
}
struct pctl_data {
- uint32_t index;
- uint32_t data;
+ uint32_t index;
+ uint32_t data;
};
-const struct pctl_data pctl0_data[] = {
- {0x0, 0x7a640},
- {0x9, 0x2a64a},
- {0xd, 0x2a680},
- {0x11, 0x6a684},
- {0x19, 0xea68e},
- {0x29, 0xa69e},
- {0x2b, 0x34a6c0},
- {0x61, 0x83a707},
- {0xe6, 0x8a7a4},
- {0xf0, 0x1a7b8},
- {0xf3, 0xfa7cc},
- {0x104, 0x17a7dd},
- {0x11d, 0xa7dc},
- {0x11f, 0x12a7f5},
- {0x133, 0xa808},
- {0x135, 0x12a810},
- {0x149, 0x7a82c}
+static const struct pctl_data pctl0_data[] = {
+ {0x0, 0x7a640},
+ {0x9, 0x2a64a},
+ {0xd, 0x2a680},
+ {0x11, 0x6a684},
+ {0x19, 0xea68e},
+ {0x29, 0xa69e},
+ {0x2b, 0x34a6c0},
+ {0x61, 0x83a707},
+ {0xe6, 0x8a7a4},
+ {0xf0, 0x1a7b8},
+ {0xf3, 0xfa7cc},
+ {0x104, 0x17a7dd},
+ {0x11d, 0xa7dc},
+ {0x11f, 0x12a7f5},
+ {0x133, 0xa808},
+ {0x135, 0x12a810},
+ {0x149, 0x7a82c}
};
#define PCTL0_DATA_LEN (sizeof(pctl0_data)/sizeof(pctl0_data[0]))
@@ -274,32 +280,39 @@ const struct pctl_data pctl0_data[] = {
#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
#define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833
-const struct pctl_data pctl1_data[] = {
- {0x0, 0x39a000},
- {0x3b, 0x44a040},
- {0x81, 0x2a08d},
- {0x85, 0x6ba094},
- {0xf2, 0x18a100},
- {0x10c, 0x4a132},
- {0x112, 0xca141},
- {0x120, 0x2fa158},
- {0x151, 0x17a1d0},
- {0x16a, 0x1a1e9},
- {0x16d, 0x13a1ec},
- {0x182, 0x7a201},
- {0x18b, 0x3a20a},
- {0x190, 0x7a580},
- {0x199, 0xa590},
- {0x19b, 0x4a594},
- {0x1a1, 0x1a59c},
- {0x1a4, 0x7a82c},
- {0x1ad, 0xfa7cc},
- {0x1be, 0x17a7dd},
- {0x1d7, 0x12a810}
+static const struct pctl_data pctl1_data[] = {
+ {0x0, 0x39a000},
+ {0x3b, 0x44a040},
+ {0x81, 0x2a08d},
+ {0x85, 0x6ba094},
+ {0xf2, 0x18a100},
+ {0x10c, 0x4a132},
+ {0x112, 0xca141},
+ {0x120, 0x2fa158},
+ {0x151, 0x17a1d0},
+ {0x16a, 0x1a1e9},
+ {0x16d, 0x13a1ec},
+ {0x182, 0x7a201},
+ {0x18b, 0x3a20a},
+ {0x190, 0x7a580},
+ {0x199, 0xa590},
+ {0x19b, 0x4a594},
+ {0x1a1, 0x1a59c},
+ {0x1a4, 0x7a82c},
+ {0x1ad, 0xfa7cc},
+ {0x1be, 0x17a7dd},
+ {0x1d7, 0x12a810},
+ {0x1eb, 0x4000a7e1},
+ {0x1ec, 0x5000a7f5},
+ {0x1ed, 0x4000a7e2},
+ {0x1ee, 0x5000a7dc},
+ {0x1ef, 0x4000a7e3},
+ {0x1f0, 0x5000a7f6},
+ {0x1f1, 0x5000a7e4}
};
#define PCTL1_DATA_LEN (sizeof(pctl1_data)/sizeof(pctl1_data[0]))
-#define PCTL1_RENG_EXEC_END_PTR 0x1ea
+#define PCTL1_RENG_EXEC_END_PTR 0x1f1
#define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000
#define PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa20d
#define PCTL1_STCTRL_REG_SAVE_RANGE1_BASE 0xa580
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index 57bb940c0ecd..5d38229baf69 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -36,7 +36,4 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev);
void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
bool enable);
-extern const struct amd_ip_funcs mmhub_v1_0_ip_funcs;
-extern const struct amdgpu_ip_block_version mmhub_v1_0_ip_block;
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index bde3ca3c21c1..2812d88a8bdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -72,21 +72,6 @@ static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
reg);
}
-static void xgpu_ai_mailbox_trans_msg(struct amdgpu_device *adev,
- enum idh_request req)
-{
- u32 reg;
-
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
- reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
- MSGBUF_DATA, req);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
- reg);
-
- xgpu_ai_mailbox_set_valid(adev, true);
-}
-
static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
enum idh_event event)
{
@@ -154,13 +139,25 @@ static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
return r;
}
-
-static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
- enum idh_request req)
-{
+static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
+ enum idh_request req, u32 data1, u32 data2, u32 data3) {
+ u32 reg;
int r;
- xgpu_ai_mailbox_trans_msg(adev, req);
+ reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
+ reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
+ MSGBUF_DATA, req);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
+ reg);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
+ data1);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
+ data2);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
+ data3);
+
+ xgpu_ai_mailbox_set_valid(adev, true);
/* start to poll ack */
r = xgpu_ai_poll_ack(adev);
@@ -168,6 +165,14 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
pr_err("Doesn't get ack from pf, continue\n");
xgpu_ai_mailbox_set_valid(adev, false);
+}
+
+static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
+ enum idh_request req)
+{
+ int r;
+
+ xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
/* start to check msg if request is idh_req_gpu_init_access */
if (req == IDH_REQ_GPU_INIT_ACCESS ||
@@ -342,4 +347,5 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
.reset_gpu = xgpu_ai_request_reset,
+ .trans_msg = xgpu_ai_mailbox_trans_msg,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 9aefc44d2c34..1e91b9a1c591 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -31,7 +31,9 @@ enum idh_request {
IDH_REL_GPU_INIT_ACCESS,
IDH_REQ_GPU_FINI_ACCESS,
IDH_REL_GPU_FINI_ACCESS,
- IDH_REQ_GPU_RESET_ACCESS
+ IDH_REQ_GPU_RESET_ACCESS,
+
+ IDH_LOG_VF_ERROR = 200,
};
enum idh_event {
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 171a658135b5..c25a831f94ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -613,4 +613,5 @@ const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
.req_full_gpu = xgpu_vi_request_full_gpu_access,
.rel_full_gpu = xgpu_vi_release_full_gpu_access,
.reset_gpu = xgpu_vi_request_reset,
+ .trans_msg = NULL, /* Does not need to trans VF errors to host. */
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
index 2db741131bc6..c791d73d2d54 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
@@ -32,7 +32,9 @@ enum idh_request {
IDH_REL_GPU_INIT_ACCESS,
IDH_REQ_GPU_FINI_ACCESS,
IDH_REL_GPU_FINI_ACCESS,
- IDH_REQ_GPU_RESET_ACCESS
+ IDH_REQ_GPU_RESET_ACCESS,
+
+ IDH_LOG_VF_ERROR = 200,
};
/* VI mailbox messages data */
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 1e272f785def..045988b18bc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -32,6 +32,7 @@
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
+#define smnPCIE_CONFIG_CNTL 0x11180044
u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
{
@@ -67,7 +68,7 @@ void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
{
- WREG32_SOC15(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
}
u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
@@ -256,3 +257,15 @@ void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
}
+
+void nbio_v6_1_init_registers(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_CONFIG_CNTL);
+ data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
+ data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
+
+ if (def != data)
+ WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
index f6f8bc045518..686e4b4d296a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
@@ -50,5 +50,6 @@ void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev, bool
void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable);
void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev);
+void nbio_v6_1_init_registers(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index aa04632523fa..11b70d601922 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -65,7 +65,7 @@ void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
void nbio_v7_0_hdp_flush(struct amdgpu_device *adev)
{
- WREG32_SOC15(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
+ WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
}
u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 2258323a3c26..f7cf994b1da2 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -86,6 +86,52 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
return 0;
}
+int psp_v10_0_init_microcode(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ const char *chip_name;
+ char fw_name[30];
+ int err = 0;
+ const struct psp_firmware_header_v1_0 *hdr;
+
+ DRM_DEBUG("\n");
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ chip_name = "raven";
+ break;
+ default: BUG();
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
+ err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ if (err)
+ goto out;
+
+ hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
+ adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version);
+ adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version);
+ adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+ adev->psp.asd_start_addr = (uint8_t *)hdr +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+
+ return 0;
+out:
+ if (err) {
+ dev_err(adev->dev,
+ "psp v10.0: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(adev->psp.asd_fw);
+ adev->psp.asd_fw = NULL;
+ }
+
+ return err;
+}
+
int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd)
{
int ret;
@@ -110,7 +156,6 @@ int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cm
int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
{
int ret = 0;
- unsigned int psp_ring_reg = 0;
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
@@ -130,6 +175,16 @@ int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
+ return 0;
+}
+
+int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ unsigned int psp_ring_reg = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
/* Write low address of the ring to C2PMSG_69 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
@@ -143,13 +198,42 @@ int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
psp_ring_reg = ring_type;
psp_ring_reg = psp_ring_reg << 16;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* There might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
/* Wait for response flag (bit 31) in C2PMSG_64 */
- psp_ring_reg = 0;
- while ((psp_ring_reg & 0x80000000) == 0) {
- psp_ring_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64);
- }
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
- return 0;
+ return ret;
+}
+
+int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring;
+ unsigned int psp_ring_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ ring = &psp->km_ring;
+
+ /* Write the ring destroy command to C2PMSG_64 */
+ psp_ring_reg = 3 << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* There might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+
+ return ret;
}
int psp_v10_0_cmd_submit(struct psp_context *psp,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
index 2022b7b7151e..e76cde2f01f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
@@ -27,10 +27,15 @@
#include "amdgpu_psp.h"
+extern int psp_v10_0_init_microcode(struct psp_context *psp);
extern int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd);
extern int psp_v10_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type);
+extern int psp_v10_0_ring_create(struct psp_context *psp,
+ enum psp_ring_type ring_type);
+extern int psp_v10_0_ring_destroy(struct psp_context *psp,
+ enum psp_ring_type ring_type);
extern int psp_v10_0_cmd_submit(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index c98d77d0c8f8..2a535a4b8d5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -237,11 +237,9 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
-#if 0
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
0, true);
-#endif
return ret;
}
@@ -341,10 +339,10 @@ int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
0x80000000, 0x80000000, false);
- if (ring->ring_mem)
- amdgpu_bo_free_kernel(&adev->firmware.rbuf,
- &ring->ring_mem_mc_addr,
- (void **)&ring->ring_mem);
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 1d766ae98dc8..b1de44f22824 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -551,17 +551,53 @@ static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
*/
static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{
- u32 f32_cntl;
+ u32 f32_cntl, phase_quantum = 0;
int i;
+ if (amdgpu_sdma_phase_quantum) {
+ unsigned value = amdgpu_sdma_phase_quantum;
+ unsigned unit = 0;
+
+ while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
+ value = (value + 1) >> 1;
+ unit++;
+ }
+ if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
+ value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
+ unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
+ WARN_ONCE(1,
+ "clamping sdma_phase_quantum to %uK clock cycles\n",
+ value << unit);
+ }
+ phase_quantum =
+ value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
+ unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
+ }
+
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
- if (enable)
+ if (enable) {
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, 1);
- else
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ ATC_L1_ENABLE, 1);
+ if (amdgpu_sdma_phase_quantum) {
+ WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i],
+ phase_quantum);
+ WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i],
+ phase_quantum);
+ }
+ } else {
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, 0);
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ ATC_L1_ENABLE, 1);
+ }
+
WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4a65697ccc94..fd7c72aaafa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -291,6 +291,8 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
DRM_DEBUG("Setting write pointer\n");
if (ring->use_doorbell) {
+ u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
+
DRM_DEBUG("Using doorbell -- "
"wptr_offs == 0x%08x "
"lower_32_bits(ring->wptr) << 2 == 0x%08x "
@@ -299,8 +301,7 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
lower_32_bits(ring->wptr << 2),
upper_32_bits(ring->wptr << 2));
/* XXX check if swapping is necessary on BE */
- adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2);
- adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2);
+ WRITE_ONCE(*wb, (ring->wptr << 2));
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
ring->doorbell_index, ring->wptr << 2);
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
@@ -493,13 +494,45 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
*/
static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{
- u32 f32_cntl;
+ u32 f32_cntl, phase_quantum = 0;
int i;
+ if (amdgpu_sdma_phase_quantum) {
+ unsigned value = amdgpu_sdma_phase_quantum;
+ unsigned unit = 0;
+
+ while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
+ value = (value + 1) >> 1;
+ unit++;
+ }
+ if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
+ value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
+ unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
+ WARN_ONCE(1,
+ "clamping sdma_phase_quantum to %uK clock cycles\n",
+ value << unit);
+ }
+ phase_quantum =
+ value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
+ unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
+ }
+
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+ if (enable && amdgpu_sdma_phase_quantum) {
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE0_QUANTUM),
+ phase_quantum);
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE1_QUANTUM),
+ phase_quantum);
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE2_QUANTUM),
+ phase_quantum);
+ }
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL), f32_cntl);
}
@@ -541,12 +574,13 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- u32 rb_cntl, ib_cntl;
+ u32 rb_cntl, ib_cntl, wptr_poll_cntl;
u32 rb_bufsz;
u32 wb_offset;
u32 doorbell;
u32 doorbell_offset;
u32 temp;
+ u64 wptr_gpu_addr;
int i, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -628,6 +662,19 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), temp);
}
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
+ lower_32_bits(wptr_gpu_addr));
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
+ if (amdgpu_sriov_vf(adev))
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
+ else
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
+ WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
+
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
@@ -655,6 +702,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
+
}
return 0;
@@ -751,15 +799,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
const struct sdma_firmware_header_v1_0 *hdr;
const __le32 *fw_data;
u32 fw_size;
- u32 digest_size = 0;
int i, j;
/* halt the MEs */
sdma_v4_0_enable(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
- uint16_t version_major;
- uint16_t version_minor;
if (!adev->sdma.instance[i].fw)
return -EINVAL;
@@ -767,23 +812,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- version_major = le16_to_cpu(hdr->header.header_version_major);
- version_minor = le16_to_cpu(hdr->header.header_version_minor);
-
- if (version_major == 1 && version_minor >= 1) {
- const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr = (const struct sdma_firmware_header_v1_1 *) hdr;
- digest_size = le32_to_cpu(sdma_v1_1_hdr->digest_size);
- }
-
- fw_size -= digest_size;
-
fw_data = (const __le32 *)
(adev->sdma.instance[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), 0);
-
for (j = 0; j < fw_size; j++)
WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index f45fb0f022b3..8284d5dbfc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1150,6 +1150,33 @@ static bool si_read_disabled_bios(struct amdgpu_device *adev)
return r;
}
+#define mmROM_INDEX 0x2A
+#define mmROM_DATA 0x2B
+
+static bool si_read_bios_from_rom(struct amdgpu_device *adev,
+ u8 *bios, u32 length_bytes)
+{
+ u32 *dw_ptr;
+ u32 i, length_dw;
+
+ if (bios == NULL)
+ return false;
+ if (length_bytes == 0)
+ return false;
+ /* APU vbios image is part of sbios image */
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ dw_ptr = (u32 *)bios;
+ length_dw = ALIGN(length_bytes, 4) / 4;
+ /* set rom index to 0 */
+ WREG32(mmROM_INDEX, 0);
+ for (i = 0; i < length_dw; i++)
+ dw_ptr[i] = RREG32(mmROM_DATA);
+
+ return true;
+}
+
//xxx: not implemented
static int si_asic_reset(struct amdgpu_device *adev)
{
@@ -1206,6 +1233,7 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev)
static const struct amdgpu_asic_funcs si_asic_funcs =
{
.read_disabled_bios = &si_read_disabled_bios,
+ .read_bios_from_rom = &si_read_bios_from_rom,
.read_register = &si_read_register,
.reset = &si_asic_reset,
.set_vga_state = &si_vga_set_state,
@@ -1385,6 +1413,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
amdgpu_program_register_sequence(adev,
pitcairn_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+ break;
case CHIP_VERDE:
amdgpu_program_register_sequence(adev,
verde_golden_registers,
@@ -1409,6 +1438,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
amdgpu_program_register_sequence(adev,
oland_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+ break;
case CHIP_HAINAN:
amdgpu_program_register_sequence(adev,
hainan_golden_registers,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index a7ad8390981c..d63873f3f574 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -2055,6 +2055,7 @@ static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
case 0x682C:
si_pi->cac_weights = cac_weights_cape_verde_pro;
si_pi->dte_data = dte_data_sun_xt;
+ update_dte_from_pl2 = true;
break;
case 0x6825:
case 0x6827:
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index a7341d88a320..f2c3a49f73a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -25,7 +25,7 @@
#include <linux/module.h>
#include <drm/drmP.h>
#include "amdgpu.h"
-#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
@@ -62,8 +62,6 @@
#include "dce_virtual.h"
#include "mxgpu_ai.h"
-MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
-
#define mmFabricConfigAccessControl 0x0410
#define mmFabricConfigAccessControl_BASE_IDX 0
#define mmFabricConfigAccessControl_DEFAULT 0x00000000
@@ -198,6 +196,50 @@ static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
}
+static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+ WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
+ r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
+ spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+ return r;
+}
+
+static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+ WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
+ WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
+ spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+}
+
+static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
+{
+ unsigned long flags;
+ u32 r;
+
+ spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
+ WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
+ r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
+ spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
+ return r;
+}
+
+static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
+ WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
+ WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
+ spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
+}
+
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
@@ -392,11 +434,11 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
static int soc15_asic_reset(struct amdgpu_device *adev)
{
- amdgpu_atomfirmware_scratch_regs_engine_hung(adev, true);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
soc15_gpu_pci_config_reset(adev);
- amdgpu_atomfirmware_scratch_regs_engine_hung(adev, false);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return 0;
}
@@ -524,13 +566,6 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
return nbio_v6_1_get_rev_id(adev);
}
-
-int gmc_v9_0_mc_wait_for_idle(struct amdgpu_device *adev)
-{
- /* to be implemented in MC IP*/
- return 0;
-}
-
static const struct amdgpu_asic_funcs soc15_asic_funcs =
{
.read_disabled_bios = &soc15_read_disabled_bios,
@@ -557,6 +592,10 @@ static int soc15_common_early_init(void *handle)
adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
adev->didt_rreg = &soc15_didt_rreg;
adev->didt_wreg = &soc15_didt_wreg;
+ adev->gc_cac_rreg = &soc15_gc_cac_rreg;
+ adev->gc_cac_wreg = &soc15_gc_cac_wreg;
+ adev->se_cac_rreg = &soc15_se_cac_rreg;
+ adev->se_cac_wreg = &soc15_se_cac_wreg;
adev->asic_funcs = &soc15_asic_funcs;
@@ -681,6 +720,9 @@ static int soc15_common_hw_init(void *handle)
soc15_pcie_gen3_enable(adev);
/* enable aspm */
soc15_program_aspm(adev);
+ /* setup nbio registers */
+ if (!(adev->flags & AMD_IS_APU))
+ nbio_v6_1_init_registers(adev);
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index e2d330eed952..7a8e4e28abb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -77,6 +77,13 @@ struct nbio_pcie_index_data {
(3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
(ip##_BASE__INST##inst##_SEG4 + reg))))), value)
+#define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
+ WREG32_NO_KIQ( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
+ (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
+ (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
+ (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
+ (ip##_BASE__INST##inst##_SEG4 + reg))))), value)
+
#define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
(1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index e79befd80eed..7f408f85fdb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -250,6 +250,7 @@
#define PACKET3_SET_UCONFIG_REG 0x79
#define PACKET3_SET_UCONFIG_REG_START 0x0000c000
#define PACKET3_SET_UCONFIG_REG_END 0x0000c400
+#define PACKET3_SET_UCONFIG_REG_INDEX_TYPE (2 << 28)
#define PACKET3_SCRATCH_RAM_WRITE 0x7D
#define PACKET3_SCRATCH_RAM_READ 0x7E
#define PACKET3_LOAD_CONST_RAM 0x80
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 987b958368ac..23a85750edd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -165,6 +165,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
r = amdgpu_ring_alloc(ring, 16);
if (r) {
DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
@@ -432,13 +435,19 @@ static int uvd_v7_0_sw_init(void *handle)
return r;
}
-
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.ring_enc[i];
sprintf(ring->name, "uvd_enc%d", i);
if (amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+
+ /* currently only use the first enconding ring for
+ * sriov, so set unused location for other unused rings.
+ */
+ if (i == 0)
+ ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+ else
+ ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
if (r)
@@ -685,6 +694,11 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
/* 4, set resp to zero */
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
+ WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0);
+ adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0;
+ adev->uvd.ring_enc[0].wptr = 0;
+ adev->uvd.ring_enc[0].wptr_old = 0;
+
/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001);
@@ -702,7 +716,6 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
return -EBUSY;
}
- WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0);
return 0;
}
@@ -736,11 +749,9 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
init_table += header->uvd_table_offset;
ring = &adev->uvd.ring;
+ ring->wptr = 0;
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
- /* disable clock gating */
- MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
- ~UVD_POWER_STATUS__UVD_PG_MODE_MASK, 0);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
0xFFFFFFFF, 0x00000004);
/* mc resume*/
@@ -777,12 +788,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40));
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG),
- adev->gfx.config.gb_addr_config);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG),
- adev->gfx.config.gb_addr_config);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG),
- adev->gfx.config.gb_addr_config);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles);
/* mc resume end*/
@@ -819,17 +824,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
UVD_LMI_CTRL__REQ_MODE_MASK |
0x00100000L));
- /* disable byte swapping */
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), 0);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MP_SWAP_CNTL), 0);
-
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0);
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88);
-
/* take all subblocks out of reset, except VCPU */
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
@@ -838,15 +832,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__CLK_EN_MASK);
- /* enable UMC */
- MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
- ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
-
- /* boot up the VCPU */
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
-
- MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02);
-
/* enable master interrupt */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
@@ -859,40 +844,31 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
/* force RBC into idle state */
size = order_base_2(ring->ring_size);
tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
- tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
- /* set the write pointer delay */
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0);
-
- /* set the wb address */
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR),
- (upper_32_bits(ring->gpu_addr) >> 2));
-
- /* programm the RB_BASE for ring buffer */
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
- lower_32_bits(ring->gpu_addr));
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
- upper_32_bits(ring->gpu_addr));
-
- ring->wptr = 0;
ring = &adev->uvd.ring_enc[0];
+ ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4);
+ /* boot up the VCPU */
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
+
+ /* enable UMC */
+ MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0);
+
+ MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02);
+
/* add end packet */
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
header->uvd_table_size = table_size;
- return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
}
- return -EINVAL; /* already initializaed ? */
+ return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 1ecd6bb90c1f..11134d5f7443 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -173,6 +173,11 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
/* 4, set resp to zero */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0);
+ WDOORBELL32(adev->vce.ring[0].doorbell_index, 0);
+ adev->wb.wb[adev->vce.ring[0].wptr_offs] = 0;
+ adev->vce.ring[0].wptr = 0;
+ adev->vce.ring[0].wptr_old = 0;
+
/* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001);
@@ -190,7 +195,6 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
return -EBUSY;
}
- WDOORBELL32(adev->vce.ring[0].doorbell_index, 0);
return 0;
}
@@ -274,7 +278,8 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
- 0xffffffff, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
+ VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
/* end of MC_RESUME */
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
@@ -296,11 +301,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
header->vce_table_size = table_size;
-
- return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table);
}
- return -EINVAL; /* already initializaed ? */
+ return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table);
}
/**
@@ -443,12 +446,14 @@ static int vce_v4_0_sw_init(void *handle)
if (amdgpu_sriov_vf(adev)) {
/* DOORBELL only works under SRIOV */
ring->use_doorbell = true;
+
+ /* currently only use the first encoding ring for sriov,
+ * so set unused location for other unused rings.
+ */
if (i == 0)
- ring->doorbell_index = AMDGPU_DOORBELL64_RING0_1 * 2;
- else if (i == 1)
- ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2;
+ ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2;
else
- ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2 + 1;
+ ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
if (r)
@@ -990,11 +995,13 @@ static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
{
uint32_t val = 0;
- if (state == AMDGPU_IRQ_STATE_ENABLE)
- val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
+ if (!amdgpu_sriov_vf(adev)) {
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
- WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
- ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
+ ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 6cac291c96da..9ff69b90df36 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1028,8 +1028,7 @@ static int vi_common_early_init(void *handle)
/* rev0 hardware requires workarounds to support PG */
adev->pg_flags = 0;
if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
- AMD_PG_SUPPORT_GFX_SMG |
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_UVD |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 6316aad43a73..e4a8c2e52cb2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -142,12 +142,12 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
struct kfd_ioctl_create_queue_args *args)
{
if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
- pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
+ pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
- pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
+ pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
return -EINVAL;
}
@@ -155,26 +155,26 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
(!access_ok(VERIFY_WRITE,
(const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
- pr_err("kfd: can't access ring base address\n");
+ pr_err("Can't access ring base address\n");
return -EFAULT;
}
if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
- pr_err("kfd: ring size must be a power of 2 or 0\n");
+ pr_err("Ring size must be a power of 2 or 0\n");
return -EINVAL;
}
if (!access_ok(VERIFY_WRITE,
(const void __user *) args->read_pointer_address,
sizeof(uint32_t))) {
- pr_err("kfd: can't access read pointer\n");
+ pr_err("Can't access read pointer\n");
return -EFAULT;
}
if (!access_ok(VERIFY_WRITE,
(const void __user *) args->write_pointer_address,
sizeof(uint32_t))) {
- pr_err("kfd: can't access write pointer\n");
+ pr_err("Can't access write pointer\n");
return -EFAULT;
}
@@ -182,7 +182,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
!access_ok(VERIFY_WRITE,
(const void __user *) args->eop_buffer_address,
sizeof(uint32_t))) {
- pr_debug("kfd: can't access eop buffer");
+ pr_debug("Can't access eop buffer");
return -EFAULT;
}
@@ -190,7 +190,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
!access_ok(VERIFY_WRITE,
(const void __user *) args->ctx_save_restore_address,
sizeof(uint32_t))) {
- pr_debug("kfd: can't access ctx save restore buffer");
+ pr_debug("Can't access ctx save restore buffer");
return -EFAULT;
}
@@ -219,27 +219,27 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
else
q_properties->format = KFD_QUEUE_FORMAT_PM4;
- pr_debug("Queue Percentage (%d, %d)\n",
+ pr_debug("Queue Percentage: %d, %d\n",
q_properties->queue_percent, args->queue_percentage);
- pr_debug("Queue Priority (%d, %d)\n",
+ pr_debug("Queue Priority: %d, %d\n",
q_properties->priority, args->queue_priority);
- pr_debug("Queue Address (0x%llX, 0x%llX)\n",
+ pr_debug("Queue Address: 0x%llX, 0x%llX\n",
q_properties->queue_address, args->ring_base_address);
- pr_debug("Queue Size (0x%llX, %u)\n",
+ pr_debug("Queue Size: 0x%llX, %u\n",
q_properties->queue_size, args->ring_size);
- pr_debug("Queue r/w Pointers (0x%llX, 0x%llX)\n",
- (uint64_t) q_properties->read_ptr,
- (uint64_t) q_properties->write_ptr);
+ pr_debug("Queue r/w Pointers: %p, %p\n",
+ q_properties->read_ptr,
+ q_properties->write_ptr);
- pr_debug("Queue Format (%d)\n", q_properties->format);
+ pr_debug("Queue Format: %d\n", q_properties->format);
- pr_debug("Queue EOP (0x%llX)\n", q_properties->eop_ring_buffer_address);
+ pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
- pr_debug("Queue CTX save arex (0x%llX)\n",
+ pr_debug("Queue CTX save area: 0x%llX\n",
q_properties->ctx_save_restore_area_address);
return 0;
@@ -257,16 +257,16 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
memset(&q_properties, 0, sizeof(struct queue_properties));
- pr_debug("kfd: creating queue ioctl\n");
+ pr_debug("Creating queue ioctl\n");
err = set_queue_properties_from_user(&q_properties, args);
if (err)
return err;
- pr_debug("kfd: looking for gpu id 0x%x\n", args->gpu_id);
+ pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL) {
- pr_debug("kfd: gpu id 0x%x was not found\n", args->gpu_id);
+ if (!dev) {
+ pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
return -EINVAL;
}
@@ -278,7 +278,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process;
}
- pr_debug("kfd: creating queue for PASID %d on GPU 0x%x\n",
+ pr_debug("Creating queue for PASID %d on gpu 0x%x\n",
p->pasid,
dev->id);
@@ -296,15 +296,15 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
mutex_unlock(&p->mutex);
- pr_debug("kfd: queue id %d was created successfully\n", args->queue_id);
+ pr_debug("Queue id %d was created successfully\n", args->queue_id);
- pr_debug("ring buffer address == 0x%016llX\n",
+ pr_debug("Ring buffer address == 0x%016llX\n",
args->ring_base_address);
- pr_debug("read ptr address == 0x%016llX\n",
+ pr_debug("Read ptr address == 0x%016llX\n",
args->read_pointer_address);
- pr_debug("write ptr address == 0x%016llX\n",
+ pr_debug("Write ptr address == 0x%016llX\n",
args->write_pointer_address);
return 0;
@@ -321,7 +321,7 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
int retval;
struct kfd_ioctl_destroy_queue_args *args = data;
- pr_debug("kfd: destroying queue id %d for PASID %d\n",
+ pr_debug("Destroying queue id %d for pasid %d\n",
args->queue_id,
p->pasid);
@@ -341,12 +341,12 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
struct queue_properties properties;
if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
- pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
+ pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
- pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
+ pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
return -EINVAL;
}
@@ -354,12 +354,12 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
(!access_ok(VERIFY_WRITE,
(const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
- pr_err("kfd: can't access ring base address\n");
+ pr_err("Can't access ring base address\n");
return -EFAULT;
}
if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
- pr_err("kfd: ring size must be a power of 2 or 0\n");
+ pr_err("Ring size must be a power of 2 or 0\n");
return -EINVAL;
}
@@ -368,7 +368,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
properties.queue_percent = args->queue_percentage;
properties.priority = args->queue_priority;
- pr_debug("kfd: updating queue id %d for PASID %d\n",
+ pr_debug("Updating queue id %d for pasid %d\n",
args->queue_id, p->pasid);
mutex_lock(&p->mutex);
@@ -400,7 +400,7 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
}
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
mutex_lock(&p->mutex);
@@ -443,7 +443,7 @@ static int kfd_ioctl_dbg_register(struct file *filep,
long status = 0;
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) {
@@ -460,12 +460,11 @@ static int kfd_ioctl_dbg_register(struct file *filep,
*/
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
- mutex_unlock(&p->mutex);
- mutex_unlock(kfd_get_dbgmgr_mutex());
- return PTR_ERR(pdd);
+ status = PTR_ERR(pdd);
+ goto out;
}
- if (dev->dbgmgr == NULL) {
+ if (!dev->dbgmgr) {
/* In case of a legal call, we have no dbgmgr yet */
create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev);
if (create_ok) {
@@ -480,6 +479,7 @@ static int kfd_ioctl_dbg_register(struct file *filep,
status = -EINVAL;
}
+out:
mutex_unlock(&p->mutex);
mutex_unlock(kfd_get_dbgmgr_mutex());
@@ -494,7 +494,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep,
long status;
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) {
@@ -505,7 +505,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep,
mutex_lock(kfd_get_dbgmgr_mutex());
status = kfd_dbgmgr_unregister(dev->dbgmgr, p);
- if (status == 0) {
+ if (!status) {
kfd_dbgmgr_destroy(dev->dbgmgr);
dev->dbgmgr = NULL;
}
@@ -539,7 +539,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info));
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) {
@@ -580,8 +580,8 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points;
if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) {
- kfree(args_buff);
- return -EINVAL;
+ status = -EINVAL;
+ goto out;
}
watch_mask_value = (uint64_t) args_buff[args_idx];
@@ -604,8 +604,8 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
}
if (args_idx >= args->buf_size_in_bytes - sizeof(args)) {
- kfree(args_buff);
- return -EINVAL;
+ status = -EINVAL;
+ goto out;
}
/* Currently HSA Event is not supported for DBG */
@@ -617,6 +617,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
mutex_unlock(kfd_get_dbgmgr_mutex());
+out:
kfree(args_buff);
return status;
@@ -646,7 +647,7 @@ static int kfd_ioctl_dbg_wave_control(struct file *filep,
sizeof(wac_info.trapId);
dev = kfd_device_by_id(args->gpu_id);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) {
@@ -782,8 +783,9 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
"scratch_limit %llX\n", pdd->scratch_limit);
args->num_of_nodes++;
- } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
- (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
+
+ pdd = kfd_get_next_process_device_data(p, pdd);
+ } while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
}
mutex_unlock(&p->mutex);
@@ -846,9 +848,84 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
return err;
}
+static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_set_scratch_backing_va_args *args = data;
+ struct kfd_process_device *pdd;
+ struct kfd_dev *dev;
+ long err;
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ err = PTR_ERR(pdd);
+ goto bind_process_to_device_fail;
+ }
+
+ pdd->qpd.sh_hidden_private_base = args->va_addr;
+
+ mutex_unlock(&p->mutex);
+
+ if (sched_policy == KFD_SCHED_POLICY_NO_HWS && pdd->qpd.vmid != 0)
+ dev->kfd2kgd->set_scratch_backing_va(
+ dev->kgd, args->va_addr, pdd->qpd.vmid);
+
+ return 0;
+
+bind_process_to_device_fail:
+ mutex_unlock(&p->mutex);
+ return err;
+}
+
+static int kfd_ioctl_get_tile_config(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_get_tile_config_args *args = data;
+ struct kfd_dev *dev;
+ struct tile_config config;
+ int err = 0;
+
+ dev = kfd_device_by_id(args->gpu_id);
+
+ dev->kfd2kgd->get_tile_config(dev->kgd, &config);
+
+ args->gb_addr_config = config.gb_addr_config;
+ args->num_banks = config.num_banks;
+ args->num_ranks = config.num_ranks;
+
+ if (args->num_tile_configs > config.num_tile_configs)
+ args->num_tile_configs = config.num_tile_configs;
+ err = copy_to_user((void __user *)args->tile_config_ptr,
+ config.tile_config_ptr,
+ args->num_tile_configs * sizeof(uint32_t));
+ if (err) {
+ args->num_tile_configs = 0;
+ return -EFAULT;
+ }
+
+ if (args->num_macro_tile_configs > config.num_macro_tile_configs)
+ args->num_macro_tile_configs =
+ config.num_macro_tile_configs;
+ err = copy_to_user((void __user *)args->macro_tile_config_ptr,
+ config.macro_tile_config_ptr,
+ args->num_macro_tile_configs * sizeof(uint32_t));
+ if (err) {
+ args->num_macro_tile_configs = 0;
+ return -EFAULT;
+ }
+
+ return 0;
+}
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
- [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
+ .cmd_drv = 0, .name = #ioctl}
/** Ioctl table */
static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
@@ -899,6 +976,12 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL,
kfd_ioctl_dbg_wave_control, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
+ kfd_ioctl_set_scratch_backing_va, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
+ kfd_ioctl_get_tile_config, 0)
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index d5e19b5fbbfb..0aa021aa0aa1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -42,8 +42,6 @@
static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
{
- BUG_ON(!dev || !dev->kfd2kgd);
-
dev->kfd2kgd->address_watch_disable(dev->kgd);
}
@@ -62,7 +60,8 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
unsigned int *ib_packet_buff;
int status;
- BUG_ON(!dbgdev || !dbgdev->kq || !packet_buff || !size_in_bytes);
+ if (WARN_ON(!size_in_bytes))
+ return -EINVAL;
kq = dbgdev->kq;
@@ -77,8 +76,8 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
status = kq->ops.acquire_packet_buffer(kq,
pq_packets_size_in_bytes / sizeof(uint32_t),
&ib_packet_buff);
- if (status != 0) {
- pr_err("amdkfd: acquire_packet_buffer failed\n");
+ if (status) {
+ pr_err("acquire_packet_buffer failed\n");
return status;
}
@@ -115,8 +114,8 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
status = kfd_gtt_sa_allocate(dbgdev->dev, sizeof(uint64_t),
&mem_obj);
- if (status != 0) {
- pr_err("amdkfd: Failed to allocate GART memory\n");
+ if (status) {
+ pr_err("Failed to allocate GART memory\n");
kq->ops.rollback_packet(kq);
return status;
}
@@ -168,8 +167,6 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
static int dbgdev_register_nodiq(struct kfd_dbgdev *dbgdev)
{
- BUG_ON(!dbgdev);
-
/*
* no action is needed in this case,
* just make sure diq will not be used
@@ -187,14 +184,12 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
struct kernel_queue *kq = NULL;
int status;
- BUG_ON(!dbgdev || !dbgdev->pqm || !dbgdev->dev);
-
status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,
&properties, 0, KFD_QUEUE_TYPE_DIQ,
&qid);
if (status) {
- pr_err("amdkfd: Failed to create DIQ\n");
+ pr_err("Failed to create DIQ\n");
return status;
}
@@ -202,8 +197,8 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
kq = pqm_get_kernel_queue(dbgdev->pqm, qid);
- if (kq == NULL) {
- pr_err("amdkfd: Error getting DIQ\n");
+ if (!kq) {
+ pr_err("Error getting DIQ\n");
pqm_destroy_queue(dbgdev->pqm, qid);
return -EFAULT;
}
@@ -215,8 +210,6 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
static int dbgdev_unregister_nodiq(struct kfd_dbgdev *dbgdev)
{
- BUG_ON(!dbgdev || !dbgdev->dev);
-
/* disable watch address */
dbgdev_address_watch_disable_nodiq(dbgdev->dev);
return 0;
@@ -227,8 +220,6 @@ static int dbgdev_unregister_diq(struct kfd_dbgdev *dbgdev)
/* todo - disable address watch */
int status;
- BUG_ON(!dbgdev || !dbgdev->pqm || !dbgdev->kq);
-
status = pqm_destroy_queue(dbgdev->pqm,
dbgdev->kq->queue->properties.queue_id);
dbgdev->kq = NULL;
@@ -245,14 +236,12 @@ static void dbgdev_address_watch_set_registers(
{
union ULARGE_INTEGER addr;
- BUG_ON(!adw_info || !addrHi || !addrLo || !cntl);
-
addr.quad_part = 0;
addrHi->u32All = 0;
addrLo->u32All = 0;
cntl->u32All = 0;
- if (adw_info->watch_mask != NULL)
+ if (adw_info->watch_mask)
cntl->bitfields.mask =
(uint32_t) (adw_info->watch_mask[index] &
ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK);
@@ -279,7 +268,7 @@ static void dbgdev_address_watch_set_registers(
}
static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
- struct dbg_address_watch_info *adw_info)
+ struct dbg_address_watch_info *adw_info)
{
union TCP_WATCH_ADDR_H_BITS addrHi;
union TCP_WATCH_ADDR_L_BITS addrLo;
@@ -287,13 +276,11 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
struct kfd_process_device *pdd;
unsigned int i;
- BUG_ON(!dbgdev || !dbgdev->dev || !adw_info);
-
/* taking the vmid for that process on the safe way using pdd */
pdd = kfd_get_process_device_data(dbgdev->dev,
adw_info->process);
if (!pdd) {
- pr_err("amdkfd: Failed to get pdd for wave control no DIQ\n");
+ pr_err("Failed to get pdd for wave control no DIQ\n");
return -EFAULT;
}
@@ -303,17 +290,16 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) ||
(adw_info->num_watch_points == 0)) {
- pr_err("amdkfd: num_watch_points is invalid\n");
+ pr_err("num_watch_points is invalid\n");
return -EINVAL;
}
- if ((adw_info->watch_mode == NULL) ||
- (adw_info->watch_address == NULL)) {
- pr_err("amdkfd: adw_info fields are not valid\n");
+ if (!adw_info->watch_mode || !adw_info->watch_address) {
+ pr_err("adw_info fields are not valid\n");
return -EINVAL;
}
- for (i = 0 ; i < adw_info->num_watch_points ; i++) {
+ for (i = 0; i < adw_info->num_watch_points; i++) {
dbgdev_address_watch_set_registers(adw_info, &addrHi, &addrLo,
&cntl, i, pdd->qpd.vmid);
@@ -348,7 +334,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
}
static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
- struct dbg_address_watch_info *adw_info)
+ struct dbg_address_watch_info *adw_info)
{
struct pm4__set_config_reg *packets_vec;
union TCP_WATCH_ADDR_H_BITS addrHi;
@@ -363,28 +349,25 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
/* we do not control the vmid in DIQ mode, just a place holder */
unsigned int vmid = 0;
- BUG_ON(!dbgdev || !dbgdev->dev || !adw_info);
-
addrHi.u32All = 0;
addrLo.u32All = 0;
cntl.u32All = 0;
if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) ||
(adw_info->num_watch_points == 0)) {
- pr_err("amdkfd: num_watch_points is invalid\n");
+ pr_err("num_watch_points is invalid\n");
return -EINVAL;
}
- if ((NULL == adw_info->watch_mode) ||
- (NULL == adw_info->watch_address)) {
- pr_err("amdkfd: adw_info fields are not valid\n");
+ if (!adw_info->watch_mode || !adw_info->watch_address) {
+ pr_err("adw_info fields are not valid\n");
return -EINVAL;
}
status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj);
- if (status != 0) {
- pr_err("amdkfd: Failed to allocate GART memory\n");
+ if (status) {
+ pr_err("Failed to allocate GART memory\n");
return status;
}
@@ -442,8 +425,6 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
i,
ADDRESS_WATCH_REG_CNTL);
- aw_reg_add_dword /= sizeof(uint32_t);
-
packets_vec[0].bitfields2.reg_offset =
aw_reg_add_dword - AMD_CONFIG_REG_BASE;
@@ -455,8 +436,6 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
i,
ADDRESS_WATCH_REG_ADDR_HI);
- aw_reg_add_dword /= sizeof(uint32_t);
-
packets_vec[1].bitfields2.reg_offset =
aw_reg_add_dword - AMD_CONFIG_REG_BASE;
packets_vec[1].reg_data[0] = addrHi.u32All;
@@ -467,8 +446,6 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
i,
ADDRESS_WATCH_REG_ADDR_LO);
- aw_reg_add_dword /= sizeof(uint32_t);
-
packets_vec[2].bitfields2.reg_offset =
aw_reg_add_dword - AMD_CONFIG_REG_BASE;
packets_vec[2].reg_data[0] = addrLo.u32All;
@@ -485,8 +462,6 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
i,
ADDRESS_WATCH_REG_CNTL);
- aw_reg_add_dword /= sizeof(uint32_t);
-
packets_vec[3].bitfields2.reg_offset =
aw_reg_add_dword - AMD_CONFIG_REG_BASE;
packets_vec[3].reg_data[0] = cntl.u32All;
@@ -498,8 +473,8 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
packet_buff_uint,
ib_size);
- if (status != 0) {
- pr_err("amdkfd: Failed to submit IB to DIQ\n");
+ if (status) {
+ pr_err("Failed to submit IB to DIQ\n");
break;
}
}
@@ -518,8 +493,6 @@ static int dbgdev_wave_control_set_registers(
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct HsaDbgWaveMsgAMDGen2 *pMsg;
- BUG_ON(!wac_info || !in_reg_sq_cmd || !in_reg_gfx_index);
-
reg_sq_cmd.u32All = 0;
reg_gfx_index.u32All = 0;
pMsg = &wac_info->dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2;
@@ -620,18 +593,16 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
struct pm4__set_config_reg *packets_vec;
size_t ib_size = sizeof(struct pm4__set_config_reg) * 3;
- BUG_ON(!dbgdev || !wac_info);
-
reg_sq_cmd.u32All = 0;
status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd,
&reg_gfx_index);
if (status) {
- pr_err("amdkfd: Failed to set wave control registers\n");
+ pr_err("Failed to set wave control registers\n");
return status;
}
- /* we do not control the VMID in DIQ,so reset it to a known value */
+ /* we do not control the VMID in DIQ, so reset it to a known value */
reg_sq_cmd.bits.vm_id = 0;
pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
@@ -667,7 +638,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj);
if (status != 0) {
- pr_err("amdkfd: Failed to allocate GART memory\n");
+ pr_err("Failed to allocate GART memory\n");
return status;
}
@@ -719,8 +690,8 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
packet_buff_uint,
ib_size);
- if (status != 0)
- pr_err("amdkfd: Failed to submit IB to DIQ\n");
+ if (status)
+ pr_err("Failed to submit IB to DIQ\n");
kfd_gtt_sa_free(dbgdev->dev, mem_obj);
@@ -735,21 +706,19 @@ static int dbgdev_wave_control_nodiq(struct kfd_dbgdev *dbgdev,
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct kfd_process_device *pdd;
- BUG_ON(!dbgdev || !dbgdev->dev || !wac_info);
-
reg_sq_cmd.u32All = 0;
/* taking the VMID for that process on the safe way using PDD */
pdd = kfd_get_process_device_data(dbgdev->dev, wac_info->process);
if (!pdd) {
- pr_err("amdkfd: Failed to get pdd for wave control no DIQ\n");
+ pr_err("Failed to get pdd for wave control no DIQ\n");
return -EFAULT;
}
status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd,
&reg_gfx_index);
if (status) {
- pr_err("amdkfd: Failed to set wave control registers\n");
+ pr_err("Failed to set wave control registers\n");
return status;
}
@@ -818,12 +787,13 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
/* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
* ATC_VMID15_PASID_MAPPING
- * to check which VMID the current process is mapped to. */
+ * to check which VMID the current process is mapped to.
+ */
for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
(dev->kgd, vmid)) {
- if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
+ if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid
(dev->kgd, vmid) == p->pasid) {
pr_debug("Killing wave fronts of vmid %d and pasid %d\n",
vmid, p->pasid);
@@ -833,7 +803,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
}
if (vmid > last_vmid_to_scan) {
- pr_err("amdkfd: didn't found vmid for pasid (%d)\n", p->pasid);
+ pr_err("Didn't find vmid for pasid %d\n", p->pasid);
return -EFAULT;
}
@@ -860,8 +830,6 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
enum DBGDEV_TYPE type)
{
- BUG_ON(!pdbgdev || !pdev);
-
pdbgdev->dev = pdev;
pdbgdev->kq = NULL;
pdbgdev->type = type;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
index 56d676396342..3da25f7bda6b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
@@ -44,8 +44,6 @@ struct mutex *kfd_get_dbgmgr_mutex(void)
static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr)
{
- BUG_ON(!pmgr);
-
kfree(pmgr->dbgdev);
pmgr->dbgdev = NULL;
@@ -55,7 +53,7 @@ static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr)
void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr)
{
- if (pmgr != NULL) {
+ if (pmgr) {
kfd_dbgmgr_uninitialize(pmgr);
kfree(pmgr);
}
@@ -66,12 +64,12 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ;
struct kfd_dbgmgr *new_buff;
- BUG_ON(pdev == NULL);
- BUG_ON(!pdev->init_complete);
+ if (WARN_ON(!pdev->init_complete))
+ return false;
new_buff = kfd_alloc_struct(new_buff);
if (!new_buff) {
- pr_err("amdkfd: Failed to allocate dbgmgr instance\n");
+ pr_err("Failed to allocate dbgmgr instance\n");
return false;
}
@@ -79,7 +77,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
new_buff->dev = pdev;
new_buff->dbgdev = kfd_alloc_struct(new_buff->dbgdev);
if (!new_buff->dbgdev) {
- pr_err("amdkfd: Failed to allocate dbgdev instance\n");
+ pr_err("Failed to allocate dbgdev instance\n");
kfree(new_buff);
return false;
}
@@ -96,8 +94,6 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
- BUG_ON(!p || !pmgr || !pmgr->dbgdev);
-
if (pmgr->pasid != 0) {
pr_debug("H/W debugger is already active using pasid %d\n",
pmgr->pasid);
@@ -118,8 +114,6 @@ long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
- BUG_ON(!p || !pmgr || !pmgr->dbgdev);
-
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != p->pasid) {
pr_debug("H/W debugger is not registered by calling pasid %d\n",
@@ -137,8 +131,6 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
struct dbg_wave_control_info *wac_info)
{
- BUG_ON(!pmgr || !pmgr->dbgdev || !wac_info);
-
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != wac_info->process->pasid) {
pr_debug("H/W debugger support was not registered for requester pasid %d\n",
@@ -152,9 +144,6 @@ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr,
struct dbg_address_watch_info *adw_info)
{
- BUG_ON(!pmgr || !pmgr->dbgdev || !adw_info);
-
-
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != adw_info->process->pasid) {
pr_debug("H/W debugger support was not registered for requester pasid %d\n",
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
index 257a745ad0b5..a04a1fe1d0d9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
@@ -30,13 +30,11 @@
#pragma pack(push, 4)
enum HSA_DBG_WAVEOP {
- HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */
- HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */
- HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */
- HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter
- debug mode */
- HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take
- a trap */
+ HSA_DBG_WAVEOP_HALT = 1, /* Halts a wavefront */
+ HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */
+ HSA_DBG_WAVEOP_KILL = 3, /* Kills a wavefront */
+ HSA_DBG_WAVEOP_DEBUG = 4, /* Causes wavefront to enter dbg mode */
+ HSA_DBG_WAVEOP_TRAP = 5, /* Causes wavefront to take a trap */
HSA_DBG_NUM_WAVEOP = 5,
HSA_DBG_MAX_WAVEOP = 0xFFFFFFFF
};
@@ -81,15 +79,13 @@ struct HsaDbgWaveMsgAMDGen2 {
uint32_t UserData:8; /* user data */
uint32_t ShaderArray:1; /* Shader array */
uint32_t Priv:1; /* Privileged */
- uint32_t Reserved0:4; /* This field is reserved,
- should be 0 */
+ uint32_t Reserved0:4; /* Reserved, should be 0 */
uint32_t WaveId:4; /* wave id */
uint32_t SIMD:2; /* SIMD id */
uint32_t HSACU:4; /* Compute unit */
uint32_t ShaderEngine:2;/* Shader engine */
uint32_t MessageType:2; /* see HSA_DBG_WAVEMSG_TYPE */
- uint32_t Reserved1:4; /* This field is reserved,
- should be 0 */
+ uint32_t Reserved1:4; /* Reserved, should be 0 */
} ui32;
uint32_t Value;
};
@@ -121,20 +117,23 @@ struct HsaDbgWaveMessage {
* in the user mode instruction stream. The OS scheduler event is typically
* associated and signaled by an interrupt issued by the GPU, but other HSA
* system interrupt conditions from other HW (e.g. IOMMUv2) may be surfaced
- * by the KFD by this mechanism, too. */
+ * by the KFD by this mechanism, too.
+ */
/* these are the new definitions for events */
enum HSA_EVENTTYPE {
HSA_EVENTTYPE_SIGNAL = 0, /* user-mode generated GPU signal */
HSA_EVENTTYPE_NODECHANGE = 1, /* HSA node change (attach/detach) */
HSA_EVENTTYPE_DEVICESTATECHANGE = 2, /* HSA device state change
- (start/stop) */
+ * (start/stop)
+ */
HSA_EVENTTYPE_HW_EXCEPTION = 3, /* GPU shader exception event */
HSA_EVENTTYPE_SYSTEM_EVENT = 4, /* GPU SYSCALL with parameter info */
HSA_EVENTTYPE_DEBUG_EVENT = 5, /* GPU signal for debugging */
HSA_EVENTTYPE_PROFILE_EVENT = 6,/* GPU signal for profiling */
HSA_EVENTTYPE_QUEUE_EVENT = 7, /* GPU signal queue idle state
- (EOP pm4) */
+ * (EOP pm4)
+ */
/* ... */
HSA_EVENTTYPE_MAXID,
HSA_EVENTTYPE_TYPE_SIZE = 0xFFFFFFFF
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 88187bfc5ea3..61fff25b4ce7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -26,7 +26,7 @@
#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
-#include "kfd_pm4_headers.h"
+#include "kfd_pm4_headers_vi.h"
#define MQD_SIZE_ALIGNED 768
@@ -98,11 +98,14 @@ static const struct kfd_device_info *lookup_device_info(unsigned short did)
for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
if (supported_devices[i].did == did) {
- BUG_ON(supported_devices[i].device_info == NULL);
+ WARN_ON(!supported_devices[i].device_info);
return supported_devices[i].device_info;
}
}
+ dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
+ did);
+
return NULL;
}
@@ -114,8 +117,10 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
const struct kfd_device_info *device_info =
lookup_device_info(pdev->device);
- if (!device_info)
+ if (!device_info) {
+ dev_err(kfd_device, "kgd2kfd_probe failed\n");
return NULL;
+ }
kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
if (!kfd)
@@ -152,15 +157,16 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd)
}
if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
- dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n",
+ dev_err(kfd_device, "error required iommu flags ats %i, pri %i, pasid %i\n",
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
(iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
- (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) != 0);
+ (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP)
+ != 0);
return false;
}
pasid_limit = min_t(unsigned int,
- (unsigned int)1 << kfd->device_info->max_pasid_bits,
+ (unsigned int)(1 << kfd->device_info->max_pasid_bits),
iommu_info.max_pasids);
/*
* last pasid is used for kernel queues doorbells
@@ -211,9 +217,8 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
flags);
dev = kfd_device_by_pci_dev(pdev);
- BUG_ON(dev == NULL);
-
- kfd_signal_iommu_event(dev, pasid, address,
+ if (!WARN_ON(!dev))
+ kfd_signal_iommu_event(dev, pasid, address,
flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
return AMD_IOMMU_INV_PRI_RSP_INVALID;
@@ -226,10 +231,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources;
- /* We only use the first MEC */
- if (kfd->shared_resources.num_mec > 1)
- kfd->shared_resources.num_mec = 1;
-
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
@@ -238,9 +239,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
* calculate max size of runlist packet.
* There can be only 2 packets at once
*/
- size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_map_process) +
- max_num_of_queues_per_device *
- sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2;
+ size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) +
+ max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
+ + sizeof(struct pm4_mes_runlist)) * 2;
/* Add size of HIQ & DIQ */
size += KFD_KERNEL_QUEUE_SIZE * 2;
@@ -251,42 +252,37 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (kfd->kfd2kgd->init_gtt_mem_allocation(
kfd->kgd, size, &kfd->gtt_mem,
&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
- dev_err(kfd_device,
- "Could not allocate %d bytes for device (%x:%x)\n",
- size, kfd->pdev->vendor, kfd->pdev->device);
+ dev_err(kfd_device, "Could not allocate %d bytes\n", size);
goto out;
}
- dev_info(kfd_device,
- "Allocated %d bytes on gart for device(%x:%x)\n",
- size, kfd->pdev->vendor, kfd->pdev->device);
+ dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
/* Initialize GTT sa with 512 byte chunk size */
if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
- dev_err(kfd_device,
- "Error initializing gtt sub-allocator\n");
+ dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
goto kfd_gtt_sa_init_error;
}
- kfd_doorbell_init(kfd);
-
- if (kfd_topology_add_device(kfd) != 0) {
+ if (kfd_doorbell_init(kfd)) {
dev_err(kfd_device,
- "Error adding device (%x:%x) to topology\n",
- kfd->pdev->vendor, kfd->pdev->device);
+ "Error initializing doorbell aperture\n");
+ goto kfd_doorbell_error;
+ }
+
+ if (kfd_topology_add_device(kfd)) {
+ dev_err(kfd_device, "Error adding device to topology\n");
goto kfd_topology_add_device_error;
}
if (kfd_interrupt_init(kfd)) {
- dev_err(kfd_device,
- "Error initializing interrupts for device (%x:%x)\n",
- kfd->pdev->vendor, kfd->pdev->device);
+ dev_err(kfd_device, "Error initializing interrupts\n");
goto kfd_interrupt_error;
}
if (!device_iommu_pasid_init(kfd)) {
dev_err(kfd_device,
- "Error initializing iommuv2 for device (%x:%x)\n",
+ "Error initializing iommuv2 for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
goto device_iommu_pasid_error;
}
@@ -296,15 +292,13 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->dqm = device_queue_manager_init(kfd);
if (!kfd->dqm) {
- dev_err(kfd_device,
- "Error initializing queue manager for device (%x:%x)\n",
- kfd->pdev->vendor, kfd->pdev->device);
+ dev_err(kfd_device, "Error initializing queue manager\n");
goto device_queue_manager_error;
}
- if (kfd->dqm->ops.start(kfd->dqm) != 0) {
+ if (kfd->dqm->ops.start(kfd->dqm)) {
dev_err(kfd_device,
- "Error starting queuen manager for device (%x:%x)\n",
+ "Error starting queue manager for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
goto dqm_start_error;
}
@@ -312,10 +306,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->dbgmgr = NULL;
kfd->init_complete = true;
- dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor,
+ dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
kfd->pdev->device);
- pr_debug("kfd: Starting kfd with the following scheduling policy %d\n",
+ pr_debug("Starting kfd with the following scheduling policy %d\n",
sched_policy);
goto out;
@@ -329,11 +323,13 @@ device_iommu_pasid_error:
kfd_interrupt_error:
kfd_topology_remove_device(kfd);
kfd_topology_add_device_error:
+ kfd_doorbell_fini(kfd);
+kfd_doorbell_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
dev_err(kfd_device,
- "device (%x:%x) NOT added due to errors\n",
+ "device %x:%x NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device);
out:
return kfd->init_complete;
@@ -346,6 +342,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
amd_iommu_free_device(kfd->pdev);
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
+ kfd_doorbell_fini(kfd);
kfd_gtt_sa_fini(kfd);
kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
}
@@ -355,8 +352,6 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
void kgd2kfd_suspend(struct kfd_dev *kfd)
{
- BUG_ON(kfd == NULL);
-
if (kfd->init_complete) {
kfd->dqm->ops.stop(kfd->dqm);
amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
@@ -370,14 +365,15 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
unsigned int pasid_limit;
int err;
- BUG_ON(kfd == NULL);
-
pasid_limit = kfd_get_pasid_limit();
if (kfd->init_complete) {
err = amd_iommu_init_device(kfd->pdev, pasid_limit);
- if (err < 0)
+ if (err < 0) {
+ dev_err(kfd_device, "failed to initialize iommu\n");
return -ENXIO;
+ }
+
amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
iommu_pasid_shutdown_callback);
amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb);
@@ -406,26 +402,27 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size)
{
- unsigned int num_of_bits;
+ unsigned int num_of_longs;
- BUG_ON(!kfd);
- BUG_ON(!kfd->gtt_mem);
- BUG_ON(buf_size < chunk_size);
- BUG_ON(buf_size == 0);
- BUG_ON(chunk_size == 0);
+ if (WARN_ON(buf_size < chunk_size))
+ return -EINVAL;
+ if (WARN_ON(buf_size == 0))
+ return -EINVAL;
+ if (WARN_ON(chunk_size == 0))
+ return -EINVAL;
kfd->gtt_sa_chunk_size = chunk_size;
kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
- num_of_bits = kfd->gtt_sa_num_of_chunks / BITS_PER_BYTE;
- BUG_ON(num_of_bits == 0);
+ num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
+ BITS_PER_LONG;
- kfd->gtt_sa_bitmap = kzalloc(num_of_bits, GFP_KERNEL);
+ kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
if (!kfd->gtt_sa_bitmap)
return -ENOMEM;
- pr_debug("kfd: gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
+ pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
mutex_init(&kfd->gtt_sa_lock);
@@ -459,8 +456,6 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
{
unsigned int found, start_search, cur_size;
- BUG_ON(!kfd);
-
if (size == 0)
return -EINVAL;
@@ -471,7 +466,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
if ((*mem_obj) == NULL)
return -ENOMEM;
- pr_debug("kfd: allocated mem_obj = %p for size = %d\n", *mem_obj, size);
+ pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
start_search = 0;
@@ -483,7 +478,7 @@ kfd_gtt_restart_search:
kfd->gtt_sa_num_of_chunks,
start_search);
- pr_debug("kfd: found = %d\n", found);
+ pr_debug("Found = %d\n", found);
/* If there wasn't any free chunk, bail out */
if (found == kfd->gtt_sa_num_of_chunks)
@@ -501,12 +496,12 @@ kfd_gtt_restart_search:
found,
kfd->gtt_sa_chunk_size);
- pr_debug("kfd: gpu_addr = %p, cpu_addr = %p\n",
+ pr_debug("gpu_addr = %p, cpu_addr = %p\n",
(uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
/* If we need only one chunk, mark it as allocated and get out */
if (size <= kfd->gtt_sa_chunk_size) {
- pr_debug("kfd: single bit\n");
+ pr_debug("Single bit\n");
set_bit(found, kfd->gtt_sa_bitmap);
goto kfd_gtt_out;
}
@@ -541,7 +536,7 @@ kfd_gtt_restart_search:
} while (cur_size > 0);
- pr_debug("kfd: range_start = %d, range_end = %d\n",
+ pr_debug("range_start = %d, range_end = %d\n",
(*mem_obj)->range_start, (*mem_obj)->range_end);
/* Mark the chunks as allocated */
@@ -555,7 +550,7 @@ kfd_gtt_out:
return 0;
kfd_gtt_no_free_chunk:
- pr_debug("kfd: allocation failed with mem_obj = %p\n", mem_obj);
+ pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
mutex_unlock(&kfd->gtt_sa_lock);
kfree(mem_obj);
return -ENOMEM;
@@ -565,13 +560,11 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
{
unsigned int bit;
- BUG_ON(!kfd);
-
/* Act like kfree when trying to free a NULL object */
if (!mem_obj)
return 0;
- pr_debug("kfd: free mem_obj = %p, range_start = %d, range_end = %d\n",
+ pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
mem_obj, mem_obj->range_start, mem_obj->range_end);
mutex_lock(&kfd->gtt_sa_lock);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 955aa304ff48..53a66e821624 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -77,29 +77,19 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
return false;
}
-unsigned int get_mec_num(struct device_queue_manager *dqm)
-{
- BUG_ON(!dqm || !dqm->dev);
-
- return dqm->dev->shared_resources.num_mec;
-}
-
unsigned int get_queues_num(struct device_queue_manager *dqm)
{
- BUG_ON(!dqm || !dqm->dev);
return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
KGD_MAX_QUEUES);
}
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
{
- BUG_ON(!dqm || !dqm->dev);
return dqm->dev->shared_resources.num_queue_per_pipe;
}
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
{
- BUG_ON(!dqm || !dqm->dev);
return dqm->dev->shared_resources.num_pipe_per_mec;
}
@@ -128,7 +118,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
/* Kaveri kfd vmid's starts from vmid 8 */
allocated_vmid = bit + KFD_VMID_START_OFFSET;
- pr_debug("kfd: vmid allocation %d\n", allocated_vmid);
+ pr_debug("vmid allocation %d\n", allocated_vmid);
qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid;
@@ -159,42 +149,38 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
{
int retval;
- BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
-
- pr_debug("kfd: In func %s\n", __func__);
print_queue(q);
mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
- pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ pr_warn("Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
- return -EPERM;
+ retval = -EPERM;
+ goto out_unlock;
}
if (list_empty(&qpd->queues_list)) {
retval = allocate_vmid(dqm, qpd, q);
- if (retval != 0) {
- mutex_unlock(&dqm->lock);
- return retval;
- }
+ if (retval)
+ goto out_unlock;
}
*allocated_vmid = qpd->vmid;
q->properties.vmid = qpd->vmid;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
retval = create_compute_queue_nocpsch(dqm, q, qpd);
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
retval = create_sdma_queue_nocpsch(dqm, q, qpd);
+ else
+ retval = -EINVAL;
- if (retval != 0) {
+ if (retval) {
if (list_empty(&qpd->queues_list)) {
deallocate_vmid(dqm, qpd, q);
*allocated_vmid = 0;
}
- mutex_unlock(&dqm->lock);
- return retval;
+ goto out_unlock;
}
list_add(&q->list, &qpd->queues_list);
@@ -212,8 +198,9 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
+out_unlock:
mutex_unlock(&dqm->lock);
- return 0;
+ return retval;
}
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
@@ -223,7 +210,8 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
set = false;
- for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_per_mec(dqm);
+ for (pipe = dqm->next_pipe_to_allocate, i = 0;
+ i < get_pipes_per_mec(dqm);
pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
if (!is_pipe_enabled(dqm, 0, pipe))
@@ -246,8 +234,7 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
if (!set)
return -EBUSY;
- pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
- __func__, q->pipe, q->queue);
+ pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
/* horizontal hqd allocation */
dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
@@ -267,36 +254,38 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
int retval;
struct mqd_manager *mqd;
- BUG_ON(!dqm || !q || !qpd);
-
mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
- if (mqd == NULL)
+ if (!mqd)
return -ENOMEM;
retval = allocate_hqd(dqm, q);
- if (retval != 0)
+ if (retval)
return retval;
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
- if (retval != 0) {
- deallocate_hqd(dqm, q);
- return retval;
- }
+ if (retval)
+ goto out_deallocate_hqd;
- pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
- q->pipe,
- q->queue);
+ pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
+ q->pipe, q->queue);
- retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
- q->queue, (uint32_t __user *) q->properties.write_ptr);
- if (retval != 0) {
- deallocate_hqd(dqm, q);
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
- return retval;
- }
+ dqm->dev->kfd2kgd->set_scratch_backing_va(
+ dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
+
+ retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
+ q->process->mm);
+ if (retval)
+ goto out_uninit_mqd;
return 0;
+
+out_uninit_mqd:
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+out_deallocate_hqd:
+ deallocate_hqd(dqm, q);
+
+ return retval;
}
static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
@@ -306,12 +295,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
int retval;
struct mqd_manager *mqd;
- BUG_ON(!dqm || !q || !q->mqd || !qpd);
-
retval = 0;
- pr_debug("kfd: In Func %s\n", __func__);
-
mutex_lock(&dqm->lock);
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
@@ -330,7 +315,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
dqm->sdma_queue_count--;
deallocate_sdma_queue(dqm, q->sdma_id);
} else {
- pr_debug("q->properties.type is invalid (%d)\n",
+ pr_debug("q->properties.type %d is invalid\n",
q->properties.type);
retval = -EINVAL;
goto out;
@@ -341,7 +326,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
q->pipe, q->queue);
- if (retval != 0)
+ if (retval)
goto out;
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
@@ -371,14 +356,12 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
struct mqd_manager *mqd;
bool prev_active = false;
- BUG_ON(!dqm || !q || !q->mqd);
-
mutex_lock(&dqm->lock);
mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (mqd == NULL) {
- mutex_unlock(&dqm->lock);
- return -ENOMEM;
+ if (!mqd) {
+ retval = -ENOMEM;
+ goto out_unlock;
}
if (q->properties.is_active)
@@ -392,12 +375,13 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
if ((q->properties.is_active) && (!prev_active))
dqm->queue_count++;
- else if ((!q->properties.is_active) && (prev_active))
+ else if (!q->properties.is_active && prev_active)
dqm->queue_count--;
if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
retval = execute_queues_cpsch(dqm, false);
+out_unlock:
mutex_unlock(&dqm->lock);
return retval;
}
@@ -407,15 +391,16 @@ static struct mqd_manager *get_mqd_manager_nocpsch(
{
struct mqd_manager *mqd;
- BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
+ if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+ return NULL;
- pr_debug("kfd: In func %s mqd type %d\n", __func__, type);
+ pr_debug("mqd type %d\n", type);
mqd = dqm->mqds[type];
if (!mqd) {
mqd = mqd_manager_init(type, dqm->dev);
- if (mqd == NULL)
- pr_err("kfd: mqd manager is NULL");
+ if (!mqd)
+ pr_err("mqd manager is NULL");
dqm->mqds[type] = mqd;
}
@@ -428,11 +413,7 @@ static int register_process_nocpsch(struct device_queue_manager *dqm,
struct device_process_node *n;
int retval;
- BUG_ON(!dqm || !qpd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
- n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
if (!n)
return -ENOMEM;
@@ -456,10 +437,6 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm,
int retval;
struct device_process_node *cur, *next;
- BUG_ON(!dqm || !qpd);
-
- pr_debug("In func %s\n", __func__);
-
pr_debug("qpd->queues_list is %s\n",
list_empty(&qpd->queues_list) ? "empty" : "not empty");
@@ -500,51 +477,39 @@ static void init_interrupts(struct device_queue_manager *dqm)
{
unsigned int i;
- BUG_ON(dqm == NULL);
-
for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
if (is_pipe_enabled(dqm, 0, i))
dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
}
-static int init_scheduler(struct device_queue_manager *dqm)
-{
- int retval = 0;
-
- BUG_ON(!dqm);
-
- pr_debug("kfd: In %s\n", __func__);
-
- return retval;
-}
-
static int initialize_nocpsch(struct device_queue_manager *dqm)
{
- int i;
+ int pipe, queue;
- BUG_ON(!dqm);
+ pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
- pr_debug("kfd: In func %s num of pipes: %d\n",
- __func__, get_pipes_per_mec(dqm));
+ dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
+ sizeof(unsigned int), GFP_KERNEL);
+ if (!dqm->allocated_queues)
+ return -ENOMEM;
mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->next_pipe_to_allocate = 0;
dqm->sdma_queue_count = 0;
- dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
- sizeof(unsigned int), GFP_KERNEL);
- if (!dqm->allocated_queues) {
- mutex_destroy(&dqm->lock);
- return -ENOMEM;
- }
- for (i = 0; i < get_pipes_per_mec(dqm); i++)
- dqm->allocated_queues[i] = (1 << get_queues_per_pipe(dqm)) - 1;
+ for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
+ int pipe_offset = pipe * get_queues_per_pipe(dqm);
+
+ for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
+ if (test_bit(pipe_offset + queue,
+ dqm->dev->shared_resources.queue_bitmap))
+ dqm->allocated_queues[pipe] |= 1 << queue;
+ }
dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
- init_scheduler(dqm);
return 0;
}
@@ -552,9 +517,7 @@ static void uninitialize_nocpsch(struct device_queue_manager *dqm)
{
int i;
- BUG_ON(!dqm);
-
- BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
+ WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
kfree(dqm->allocated_queues);
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
@@ -611,33 +574,34 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
return -ENOMEM;
retval = allocate_sdma_queue(dqm, &q->sdma_id);
- if (retval != 0)
+ if (retval)
return retval;
q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
- pr_debug("kfd: sdma id is: %d\n", q->sdma_id);
- pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
- pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
+ pr_debug("SDMA id is: %d\n", q->sdma_id);
+ pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
+ pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
- if (retval != 0) {
- deallocate_sdma_queue(dqm, q->sdma_id);
- return retval;
- }
+ if (retval)
+ goto out_deallocate_sdma_queue;
- retval = mqd->load_mqd(mqd, q->mqd, 0,
- 0, NULL);
- if (retval != 0) {
- deallocate_sdma_queue(dqm, q->sdma_id);
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
- return retval;
- }
+ retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
+ if (retval)
+ goto out_uninit_mqd;
return 0;
+
+out_uninit_mqd:
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+out_deallocate_sdma_queue:
+ deallocate_sdma_queue(dqm, q->sdma_id);
+
+ return retval;
}
/*
@@ -649,10 +613,6 @@ static int set_sched_resources(struct device_queue_manager *dqm)
int i, mec;
struct scheduling_resources res;
- BUG_ON(!dqm);
-
- pr_debug("kfd: In func %s\n", __func__);
-
res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
res.vmid_mask <<= KFD_VMID_START_OFFSET;
@@ -670,8 +630,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
/* This situation may be hit in the future if a new HW
* generation exposes more than 64 queues. If so, the
- * definition of res.queue_mask needs updating */
- if (WARN_ON(i > (sizeof(res.queue_mask)*8))) {
+ * definition of res.queue_mask needs updating
+ */
+ if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
pr_err("Invalid queue enabled by amdgpu: %d\n", i);
break;
}
@@ -681,9 +642,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
res.gws_mask = res.oac_mask = res.gds_heap_base =
res.gds_heap_size = 0;
- pr_debug("kfd: scheduling resources:\n"
- " vmid mask: 0x%8X\n"
- " queue mask: 0x%8llX\n",
+ pr_debug("Scheduling resources:\n"
+ "vmid mask: 0x%8X\n"
+ "queue mask: 0x%8llX\n",
res.vmid_mask, res.queue_mask);
return pm_send_set_resources(&dqm->packets, &res);
@@ -693,10 +654,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
{
int retval;
- BUG_ON(!dqm);
-
- pr_debug("kfd: In func %s num of pipes: %d\n",
- __func__, get_pipes_per_mec(dqm));
+ pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues);
@@ -704,13 +662,9 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->sdma_queue_count = 0;
dqm->active_runlist = false;
retval = dqm->ops_asic_specific.initialize(dqm);
- if (retval != 0)
- goto fail_init_pipelines;
-
- return 0;
+ if (retval)
+ mutex_destroy(&dqm->lock);
-fail_init_pipelines:
- mutex_destroy(&dqm->lock);
return retval;
}
@@ -719,25 +673,23 @@ static int start_cpsch(struct device_queue_manager *dqm)
struct device_process_node *node;
int retval;
- BUG_ON(!dqm);
-
retval = 0;
retval = pm_init(&dqm->packets, dqm);
- if (retval != 0)
+ if (retval)
goto fail_packet_manager_init;
retval = set_sched_resources(dqm);
- if (retval != 0)
+ if (retval)
goto fail_set_sched_resources;
- pr_debug("kfd: allocating fence memory\n");
+ pr_debug("Allocating fence memory\n");
/* allocate fence memory on the gart */
retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
&dqm->fence_mem);
- if (retval != 0)
+ if (retval)
goto fail_allocate_vidmem;
dqm->fence_addr = dqm->fence_mem->cpu_ptr;
@@ -765,8 +717,6 @@ static int stop_cpsch(struct device_queue_manager *dqm)
struct device_process_node *node;
struct kfd_process_device *pdd;
- BUG_ON(!dqm);
-
destroy_queues_cpsch(dqm, true, true);
list_for_each_entry(node, &dqm->queues, list) {
@@ -783,13 +733,9 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
- BUG_ON(!dqm || !kq || !qpd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
- pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
+ pr_warn("Can't create new kernel queue because %d queues were already created\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock);
return -EPERM;
@@ -816,10 +762,6 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
- BUG_ON(!dqm || !kq);
-
- pr_debug("kfd: In %s\n", __func__);
-
mutex_lock(&dqm->lock);
/* here we actually preempt the DIQ */
destroy_queues_cpsch(dqm, true, false);
@@ -851,8 +793,6 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
int retval;
struct mqd_manager *mqd;
- BUG_ON(!dqm || !q || !qpd);
-
retval = 0;
if (allocate_vmid)
@@ -861,7 +801,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
- pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ pr_warn("Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
retval = -EPERM;
goto out;
@@ -873,15 +813,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (mqd == NULL) {
- mutex_unlock(&dqm->lock);
- return -ENOMEM;
+ if (!mqd) {
+ retval = -ENOMEM;
+ goto out;
}
dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
- if (retval != 0)
+ if (retval)
goto out;
list_add(&q->list, &qpd->queues_list);
@@ -891,7 +831,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
}
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- dqm->sdma_queue_count++;
+ dqm->sdma_queue_count++;
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
@@ -910,12 +850,11 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
unsigned int fence_value,
unsigned long timeout)
{
- BUG_ON(!fence_addr);
timeout += jiffies;
while (*fence_addr != fence_value) {
if (time_after(jiffies, timeout)) {
- pr_err("kfd: qcm fence wait loop timeout expired\n");
+ pr_err("qcm fence wait loop timeout expired\n");
return -ETIME;
}
schedule();
@@ -939,8 +878,6 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_preempt_type_filter preempt_type;
struct kfd_process_device *pdd;
- BUG_ON(!dqm);
-
retval = 0;
if (lock)
@@ -948,7 +885,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
if (!dqm->active_runlist)
goto out;
- pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
+ pr_debug("Before destroying queues, sdma queue count is : %u\n",
dqm->sdma_queue_count);
if (dqm->sdma_queue_count > 0) {
@@ -962,7 +899,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
preempt_type, 0, false, 0);
- if (retval != 0)
+ if (retval)
goto out;
*dqm->fence_addr = KFD_FENCE_INIT;
@@ -971,7 +908,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
/* should be timed out */
retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
- if (retval != 0) {
+ if (retval) {
pdd = kfd_get_process_device_data(dqm->dev,
kfd_get_process(current));
pdd->reset_wavefronts = true;
@@ -990,14 +927,12 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
{
int retval;
- BUG_ON(!dqm);
-
if (lock)
mutex_lock(&dqm->lock);
retval = destroy_queues_cpsch(dqm, false, false);
- if (retval != 0) {
- pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
+ if (retval) {
+ pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
goto out;
}
@@ -1012,8 +947,8 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
}
retval = pm_send_runlist(&dqm->packets, &dqm->queues);
- if (retval != 0) {
- pr_err("kfd: failed to execute runlist");
+ if (retval) {
+ pr_err("failed to execute runlist");
goto out;
}
dqm->active_runlist = true;
@@ -1032,8 +967,6 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
struct mqd_manager *mqd;
bool preempt_all_queues;
- BUG_ON(!dqm || !qpd || !q);
-
preempt_all_queues = false;
retval = 0;
@@ -1105,8 +1038,6 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
{
bool retval;
- pr_debug("kfd: In func %s\n", __func__);
-
mutex_lock(&dqm->lock);
if (alternate_aperture_size == 0) {
@@ -1127,14 +1058,11 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
uint64_t base = (uintptr_t)alternate_aperture_base;
uint64_t limit = base + alternate_aperture_size - 1;
- if (limit <= base)
- goto out;
-
- if ((base & APE1_FIXED_BITS_MASK) != 0)
- goto out;
-
- if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
+ if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
+ (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
+ retval = false;
goto out;
+ }
qpd->sh_mem_ape1_base = base >> 16;
qpd->sh_mem_ape1_limit = limit >> 16;
@@ -1151,27 +1079,22 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
program_sh_mem_settings(dqm, qpd);
- pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
+ pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
qpd->sh_mem_config, qpd->sh_mem_ape1_base,
qpd->sh_mem_ape1_limit);
- mutex_unlock(&dqm->lock);
- return retval;
-
out:
mutex_unlock(&dqm->lock);
- return false;
+ return retval;
}
struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
{
struct device_queue_manager *dqm;
- BUG_ON(!dev);
+ pr_debug("Loading device queue manager\n");
- pr_debug("kfd: loading device queue manager\n");
-
- dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
+ dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
if (!dqm)
return NULL;
@@ -1209,8 +1132,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
break;
default:
- BUG();
- break;
+ pr_err("Invalid scheduling policy %d\n", sched_policy);
+ goto out_free;
}
switch (dev->device_info->asic_family) {
@@ -1223,18 +1146,16 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
break;
}
- if (dqm->ops.initialize(dqm) != 0) {
- kfree(dqm);
- return NULL;
- }
+ if (!dqm->ops.initialize(dqm))
+ return dqm;
- return dqm;
+out_free:
+ kfree(dqm);
+ return NULL;
}
void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
- BUG_ON(!dqm);
-
dqm->ops.uninitialize(dqm);
kfree(dqm);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 66b9615bc3c1..faf820a06400 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -180,7 +180,6 @@ void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops);
void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops);
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
-unsigned int get_mec_num(struct device_queue_manager *dqm);
unsigned int get_queues_num(struct device_queue_manager *dqm);
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 48dc0561b402..72c3cbabc0a7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -24,6 +24,7 @@
#include "kfd_device_queue_manager.h"
#include "cik_regs.h"
#include "oss/oss_2_4_sh_mask.h"
+#include "gca/gfx_7_2_sh_mask.h"
static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
@@ -65,7 +66,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
* for LDS/Scratch and GPUVM.
*/
- BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+ WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
top_address_nybble == 0);
return PRIVATE_BASE(top_address_nybble << 12) |
@@ -104,8 +105,6 @@ static int register_process_cik(struct device_queue_manager *dqm,
struct kfd_process_device *pdd;
unsigned int temp;
- BUG_ON(!dqm || !qpd);
-
pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */
@@ -125,9 +124,10 @@ static int register_process_cik(struct device_queue_manager *dqm,
} else {
temp = get_sh_mem_bases_nybble_64(pdd);
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
+ qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__PRIVATE_ATC__SHIFT;
}
- pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+ pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index 7e9cae9d349b..40e9ddd096cd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -67,7 +67,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
* for LDS/Scratch and GPUVM.
*/
- BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
+ WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
top_address_nybble == 0);
return top_address_nybble << 12 |
@@ -110,8 +110,6 @@ static int register_process_vi(struct device_queue_manager *dqm,
struct kfd_process_device *pdd;
unsigned int temp;
- BUG_ON(!dqm || !qpd);
-
pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */
@@ -137,9 +135,11 @@ static int register_process_vi(struct device_queue_manager *dqm,
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
qpd->sh_mem_config |= SH_MEM_ADDRESS_MODE_HSA64 <<
SH_MEM_CONFIG__ADDRESS_MODE__SHIFT;
+ qpd->sh_mem_config |= 1 <<
+ SH_MEM_CONFIG__PRIVATE_ATC__SHIFT;
}
- pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
+ pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index 453c5d66e5c3..acf4d2a977ad 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -59,7 +59,7 @@ static inline size_t doorbell_process_allocation(void)
}
/* Doorbell calculations for device init. */
-void kfd_doorbell_init(struct kfd_dev *kfd)
+int kfd_doorbell_init(struct kfd_dev *kfd)
{
size_t doorbell_start_offset;
size_t doorbell_aperture_size;
@@ -95,26 +95,35 @@ void kfd_doorbell_init(struct kfd_dev *kfd)
kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
doorbell_process_allocation());
- BUG_ON(!kfd->doorbell_kernel_ptr);
+ if (!kfd->doorbell_kernel_ptr)
+ return -ENOMEM;
- pr_debug("kfd: doorbell initialization:\n");
- pr_debug("kfd: doorbell base == 0x%08lX\n",
+ pr_debug("Doorbell initialization:\n");
+ pr_debug("doorbell base == 0x%08lX\n",
(uintptr_t)kfd->doorbell_base);
- pr_debug("kfd: doorbell_id_offset == 0x%08lX\n",
+ pr_debug("doorbell_id_offset == 0x%08lX\n",
kfd->doorbell_id_offset);
- pr_debug("kfd: doorbell_process_limit == 0x%08lX\n",
+ pr_debug("doorbell_process_limit == 0x%08lX\n",
doorbell_process_limit);
- pr_debug("kfd: doorbell_kernel_offset == 0x%08lX\n",
+ pr_debug("doorbell_kernel_offset == 0x%08lX\n",
(uintptr_t)kfd->doorbell_base);
- pr_debug("kfd: doorbell aperture size == 0x%08lX\n",
+ pr_debug("doorbell aperture size == 0x%08lX\n",
kfd->shared_resources.doorbell_aperture_size);
- pr_debug("kfd: doorbell kernel address == 0x%08lX\n",
+ pr_debug("doorbell kernel address == 0x%08lX\n",
(uintptr_t)kfd->doorbell_kernel_ptr);
+
+ return 0;
+}
+
+void kfd_doorbell_fini(struct kfd_dev *kfd)
+{
+ if (kfd->doorbell_kernel_ptr)
+ iounmap(kfd->doorbell_kernel_ptr);
}
int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
@@ -131,7 +140,7 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
/* Find kfd device according to gpu id */
dev = kfd_device_by_id(vma->vm_pgoff);
- if (dev == NULL)
+ if (!dev)
return -EINVAL;
/* Calculate physical address of doorbell */
@@ -142,12 +151,11 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pr_debug("kfd: mapping doorbell page in %s\n"
+ pr_debug("Mapping doorbell page\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
" size == 0x%04lX\n",
- __func__,
(unsigned long long) vma->vm_start, address, vma->vm_flags,
doorbell_process_allocation());
@@ -166,8 +174,6 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
{
u32 inx;
- BUG_ON(!kfd || !doorbell_off);
-
mutex_lock(&kfd->doorbell_mutex);
inx = find_first_zero_bit(kfd->doorbell_available_index,
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
@@ -185,7 +191,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
*doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() /
sizeof(u32)) + inx;
- pr_debug("kfd: get kernel queue doorbell\n"
+ pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
" kernel address == 0x%08lX\n",
*doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
@@ -197,8 +203,6 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
{
unsigned int inx;
- BUG_ON(!kfd || !db_addr);
-
inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
mutex_lock(&kfd->doorbell_mutex);
@@ -210,7 +214,7 @@ inline void write_kernel_doorbell(u32 __iomem *db, u32 value)
{
if (db) {
writel(value, db);
- pr_debug("writing %d to doorbell address 0x%p\n", value, db);
+ pr_debug("Writing %d to doorbell address 0x%p\n", value, db);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index d1ce83d73a87..5979158c3f7b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -110,7 +110,7 @@ static bool allocate_free_slot(struct kfd_process *process,
*out_page = page;
*out_slot_index = slot;
- pr_debug("allocated event signal slot in page %p, slot %d\n",
+ pr_debug("Allocated event signal slot in page %p, slot %d\n",
page, slot);
return true;
@@ -155,9 +155,9 @@ static bool allocate_signal_page(struct file *devkfd, struct kfd_process *p)
struct signal_page,
event_pages)->page_index + 1;
- pr_debug("allocated new event signal page at %p, for process %p\n",
+ pr_debug("Allocated new event signal page at %p, for process %p\n",
page, p);
- pr_debug("page index is %d\n", page->page_index);
+ pr_debug("Page index is %d\n", page->page_index);
list_add(&page->event_pages, &p->signal_event_pages);
@@ -194,7 +194,8 @@ static void release_event_notification_slot(struct signal_page *page,
page->free_slots++;
/* We don't free signal pages, they are retained by the process
- * and reused until it exits. */
+ * and reused until it exits.
+ */
}
static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p,
@@ -246,7 +247,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p)
for (id = p->next_nonsignal_event_id;
id < KFD_LAST_NONSIGNAL_EVENT_ID &&
- lookup_event_by_id(p, id) != NULL;
+ lookup_event_by_id(p, id);
id++)
;
@@ -265,7 +266,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p)
for (id = KFD_FIRST_NONSIGNAL_EVENT_ID;
id < KFD_LAST_NONSIGNAL_EVENT_ID &&
- lookup_event_by_id(p, id) != NULL;
+ lookup_event_by_id(p, id);
id++)
;
@@ -291,13 +292,13 @@ static int create_signal_event(struct file *devkfd,
struct kfd_event *ev)
{
if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
- pr_warn("amdkfd: Signal event wasn't created because limit was reached\n");
+ pr_warn("Signal event wasn't created because limit was reached\n");
return -ENOMEM;
}
if (!allocate_event_notification_slot(devkfd, p, &ev->signal_page,
&ev->signal_slot_index)) {
- pr_warn("amdkfd: Signal event wasn't created because out of kernel memory\n");
+ pr_warn("Signal event wasn't created because out of kernel memory\n");
return -ENOMEM;
}
@@ -309,11 +310,7 @@ static int create_signal_event(struct file *devkfd,
ev->event_id = make_signal_event_id(ev->signal_page,
ev->signal_slot_index);
- pr_debug("signal event number %zu created with id %d, address %p\n",
- p->signal_event_count, ev->event_id,
- ev->user_signal_address);
-
- pr_debug("signal event number %zu created with id %d, address %p\n",
+ pr_debug("Signal event number %zu created with id %d, address %p\n",
p->signal_event_count, ev->event_id,
ev->user_signal_address);
@@ -345,7 +342,7 @@ void kfd_event_init_process(struct kfd_process *p)
static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
{
- if (ev->signal_page != NULL) {
+ if (ev->signal_page) {
release_event_notification_slot(ev->signal_page,
ev->signal_slot_index);
p->signal_event_count--;
@@ -584,7 +581,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
* search faster.
*/
struct signal_page *page;
- unsigned i;
+ unsigned int i;
list_for_each_entry(page, &p->signal_event_pages, event_pages)
for (i = 0; i < SLOTS_PER_PAGE; i++)
@@ -816,7 +813,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
/* check required size is logical */
if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
get_order(vma->vm_end - vma->vm_start)) {
- pr_err("amdkfd: event page mmap requested illegal size\n");
+ pr_err("Event page mmap requested illegal size\n");
return -EINVAL;
}
@@ -825,7 +822,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
page = lookup_signal_page_by_index(p, page_index);
if (!page) {
/* Probably KFD bug, but mmap is user-accessible. */
- pr_debug("signal page could not be found for page_index %u\n",
+ pr_debug("Signal page could not be found for page_index %u\n",
page_index);
return -EINVAL;
}
@@ -836,7 +833,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
| VM_DONTDUMP | VM_PFNMAP;
- pr_debug("mapping signal page\n");
+ pr_debug("Mapping signal page\n");
pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
pr_debug(" pfn == 0x%016lX\n", pfn);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 2b655103ba79..c59384bbbc5f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -304,7 +304,7 @@ int kfd_init_apertures(struct kfd_process *process)
id < NUM_OF_SUPPORTED_GPUS) {
pdd = kfd_create_process_device_data(dev, process);
- if (pdd == NULL) {
+ if (!pdd) {
pr_err("Failed to create process device data\n");
return -1;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index 7f134aa9bfd3..70b3a99cffc2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -179,7 +179,7 @@ static void interrupt_wq(struct work_struct *work)
bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry)
{
/* integer and bitwise OR so there is no boolean short-circuiting */
- unsigned wanted = 0;
+ unsigned int wanted = 0;
wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
ih_ring_entry);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index d135cd002a95..681b639f5133 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -41,11 +41,11 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
int retval;
union PM4_MES_TYPE_3_HEADER nop;
- BUG_ON(!kq || !dev);
- BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ);
+ if (WARN_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ))
+ return false;
- pr_debug("amdkfd: In func %s initializing queue type %d size %d\n",
- __func__, KFD_QUEUE_TYPE_HIQ, queue_size);
+ pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ,
+ queue_size);
memset(&prop, 0, sizeof(prop));
memset(&nop, 0, sizeof(nop));
@@ -63,23 +63,23 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
KFD_MQD_TYPE_HIQ);
break;
default:
- BUG();
- break;
+ pr_err("Invalid queue type %d\n", type);
+ return false;
}
- if (kq->mqd == NULL)
+ if (!kq->mqd)
return false;
prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
- if (prop.doorbell_ptr == NULL) {
- pr_err("amdkfd: error init doorbell");
+ if (!prop.doorbell_ptr) {
+ pr_err("Failed to initialize doorbell");
goto err_get_kernel_doorbell;
}
retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
if (retval != 0) {
- pr_err("amdkfd: error init pq queues size (%d)\n", queue_size);
+ pr_err("Failed to init pq queues size %d\n", queue_size);
goto err_pq_allocate_vidmem;
}
@@ -87,7 +87,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->pq_gpu_addr = kq->pq->gpu_addr;
retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size);
- if (retval == false)
+ if (!retval)
goto err_eop_allocate_vidmem;
retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
@@ -139,11 +139,12 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
/* assign HIQ to HQD */
if (type == KFD_QUEUE_TYPE_HIQ) {
- pr_debug("assigning hiq to hqd\n");
+ pr_debug("Assigning hiq to hqd\n");
kq->queue->pipe = KFD_CIK_HIQ_PIPE;
kq->queue->queue = KFD_CIK_HIQ_QUEUE;
kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
- kq->queue->queue, NULL);
+ kq->queue->queue, &kq->queue->properties,
+ NULL);
} else {
/* allocate fence for DIQ */
@@ -180,8 +181,6 @@ err_get_kernel_doorbell:
static void uninitialize(struct kernel_queue *kq)
{
- BUG_ON(!kq);
-
if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
kq->mqd->destroy_mqd(kq->mqd,
NULL,
@@ -211,8 +210,6 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
uint32_t wptr, rptr;
unsigned int *queue_address;
- BUG_ON(!kq || !buffer_ptr);
-
rptr = *kq->rptr_kernel;
wptr = *kq->wptr_kernel;
queue_address = (unsigned int *)kq->pq_kernel_addr;
@@ -252,11 +249,7 @@ static void submit_packet(struct kernel_queue *kq)
{
#ifdef DEBUG
int i;
-#endif
-
- BUG_ON(!kq);
-#ifdef DEBUG
for (i = *kq->wptr_kernel; i < kq->pending_wptr; i++) {
pr_debug("0x%2X ", kq->pq_kernel_addr[i]);
if (i % 15 == 0)
@@ -272,7 +265,6 @@ static void submit_packet(struct kernel_queue *kq)
static void rollback_packet(struct kernel_queue *kq)
{
- BUG_ON(!kq);
kq->pending_wptr = *kq->queue->properties.write_ptr;
}
@@ -281,9 +273,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
{
struct kernel_queue *kq;
- BUG_ON(!dev);
-
- kq = kzalloc(sizeof(struct kernel_queue), GFP_KERNEL);
+ kq = kzalloc(sizeof(*kq), GFP_KERNEL);
if (!kq)
return NULL;
@@ -304,7 +294,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
}
if (!kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) {
- pr_err("amdkfd: failed to init kernel queue\n");
+ pr_err("Failed to init kernel queue\n");
kfree(kq);
return NULL;
}
@@ -313,32 +303,37 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
void kernel_queue_uninit(struct kernel_queue *kq)
{
- BUG_ON(!kq);
-
kq->ops.uninitialize(kq);
kfree(kq);
}
+/* FIXME: Can this test be removed? */
static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
{
struct kernel_queue *kq;
uint32_t *buffer, i;
int retval;
- BUG_ON(!dev);
-
- pr_err("amdkfd: starting kernel queue test\n");
+ pr_err("Starting kernel queue test\n");
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
- BUG_ON(!kq);
+ if (unlikely(!kq)) {
+ pr_err(" Failed to initialize HIQ\n");
+ pr_err("Kernel queue test failed\n");
+ return;
+ }
retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer);
- BUG_ON(retval != 0);
+ if (unlikely(retval != 0)) {
+ pr_err(" Failed to acquire packet buffer\n");
+ pr_err("Kernel queue test failed\n");
+ return;
+ }
for (i = 0; i < 5; i++)
buffer[i] = kq->nop_packet;
kq->ops.submit_packet(kq);
- pr_err("amdkfd: ending kernel queue test\n");
+ pr_err("Ending kernel queue test\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 850a5623661f..0d73bea22c45 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(send_sigterm,
static int amdkfd_init_completed;
-int kgd2kfd_init(unsigned interface_version, const struct kgd2kfd_calls **g2f)
+int kgd2kfd_init(unsigned int interface_version,
+ const struct kgd2kfd_calls **g2f)
{
if (!amdkfd_init_completed)
return -EPROBE_DEFER;
@@ -90,7 +91,7 @@ static int __init kfd_module_init(void)
/* Verify module parameters */
if ((sched_policy < KFD_SCHED_POLICY_HWS) ||
(sched_policy > KFD_SCHED_POLICY_NO_HWS)) {
- pr_err("kfd: sched_policy has invalid value\n");
+ pr_err("sched_policy has invalid value\n");
return -1;
}
@@ -98,13 +99,13 @@ static int __init kfd_module_init(void)
if ((max_num_of_queues_per_device < 1) ||
(max_num_of_queues_per_device >
KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
- pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
+ pr_err("max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
return -1;
}
err = kfd_pasid_init();
if (err < 0)
- goto err_pasid;
+ return err;
err = kfd_chardev_init();
if (err < 0)
@@ -126,7 +127,6 @@ err_topology:
kfd_chardev_exit();
err_ioctl:
kfd_pasid_exit();
-err_pasid:
return err;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 213a71e0b6c7..1f3a6ba7eed2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -67,7 +67,8 @@ struct mqd_manager {
int (*load_mqd)(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t __user *wptr);
+ struct queue_properties *p,
+ struct mm_struct *mms);
int (*update_mqd)(struct mqd_manager *mm, void *mqd,
struct queue_properties *q);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 6acc4313363e..44ffd23348fc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -44,10 +44,6 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
struct cik_mqd *m;
int retval;
- BUG_ON(!mm || !q || !mqd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
mqd_mem_obj);
@@ -101,7 +97,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_iq_rptr = AQL_ENABLE;
*mqd = m;
- if (gart_addr != NULL)
+ if (gart_addr)
*gart_addr = addr;
retval = mm->update_mqd(mm, m, q);
@@ -115,8 +111,6 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
int retval;
struct cik_sdma_rlc_registers *m;
- BUG_ON(!mm || !mqd || !mqd_mem_obj);
-
retval = kfd_gtt_sa_allocate(mm->dev,
sizeof(struct cik_sdma_rlc_registers),
mqd_mem_obj);
@@ -129,7 +123,7 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
memset(m, 0, sizeof(struct cik_sdma_rlc_registers));
*mqd = m;
- if (gart_addr != NULL)
+ if (gart_addr)
*gart_addr = (*mqd_mem_obj)->gpu_addr;
retval = mm->update_mqd(mm, m, q);
@@ -140,27 +134,31 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
static void uninit_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
- BUG_ON(!mm || !mqd);
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
- BUG_ON(!mm || !mqd);
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr)
+ uint32_t queue_id, struct queue_properties *p,
+ struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_load
- (mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+ /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
+ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
+ uint32_t wptr_mask = (uint32_t)((p->queue_size / sizeof(uint32_t)) - 1);
+
+ return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
+ (uint32_t __user *)p->write_ptr,
+ wptr_shift, wptr_mask, mms);
}
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
- uint32_t pipe_id, uint32_t queue_id,
- uint32_t __user *wptr)
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
{
return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd);
}
@@ -170,10 +168,6 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
{
struct cik_mqd *m;
- BUG_ON(!mm || !q || !mqd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;
@@ -188,21 +182,17 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
- m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
- DOORBELL_OFFSET(q->doorbell_off);
+ m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(q->doorbell_off);
m->cp_hqd_vmid = q->vmid;
- if (q->format == KFD_QUEUE_FORMAT_AQL) {
+ if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
- }
- m->cp_hqd_active = 0;
q->is_active = false;
if (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0) {
- m->cp_hqd_active = 1;
q->is_active = true;
}
@@ -214,8 +204,6 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
{
struct cik_sdma_rlc_registers *m;
- BUG_ON(!mm || !mqd || !q);
-
m = get_sdma_mqd(mqd);
m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
@@ -254,7 +242,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
- return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
+ return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, mqd, type, timeout,
pipe_id, queue_id);
}
@@ -301,10 +289,6 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct cik_mqd *m;
int retval;
- BUG_ON(!mm || !q || !mqd || !mqd_mem_obj);
-
- pr_debug("kfd: In func %s\n", __func__);
-
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
mqd_mem_obj);
@@ -359,10 +343,6 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
{
struct cik_mqd *m;
- BUG_ON(!mm || !q || !mqd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE |
@@ -400,8 +380,6 @@ struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
{
struct cik_sdma_rlc_registers *m;
- BUG_ON(!mqd);
-
m = (struct cik_sdma_rlc_registers *)mqd;
return m;
@@ -412,12 +390,10 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
{
struct mqd_manager *mqd;
- BUG_ON(!dev);
- BUG_ON(type >= KFD_MQD_TYPE_MAX);
-
- pr_debug("kfd: In func %s\n", __func__);
+ if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+ return NULL;
- mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index a9b9882a9a77..73cbfe186dd2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -85,7 +85,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_iq_rptr = 1;
*mqd = m;
- if (gart_addr != NULL)
+ if (gart_addr)
*gart_addr = addr;
retval = mm->update_mqd(mm, m, q);
@@ -94,10 +94,15 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
static int load_mqd(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
- uint32_t __user *wptr)
+ struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_load
- (mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
+ /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
+ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
+ uint32_t wptr_mask = (uint32_t)((p->queue_size / sizeof(uint32_t)) - 1);
+
+ return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
+ (uint32_t __user *)p->write_ptr,
+ wptr_shift, wptr_mask, mms);
}
static int __update_mqd(struct mqd_manager *mm, void *mqd,
@@ -106,10 +111,6 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
{
struct vi_mqd *m;
- BUG_ON(!mm || !q || !mqd);
-
- pr_debug("kfd: In func %s\n", __func__);
-
m = get_mqd(mqd);
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT |
@@ -117,7 +118,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- pr_debug("kfd: cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
+ pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
@@ -126,10 +127,9 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_doorbell_control =
- 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN__SHIFT |
q->doorbell_off <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
- pr_debug("kfd: cp_hqd_pq_doorbell_control 0x%x\n",
+ pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control);
m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT |
@@ -139,8 +139,15 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
mtype << CP_HQD_IB_CONTROL__MTYPE__SHIFT;
- m->cp_hqd_eop_control |=
- ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1;
+ /*
+ * HW does not clamp this field correctly. Maximum EOP queue size
+ * is constrained by per-SE EOP done signal count, which is 8-bit.
+ * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
+ * more than (EOP entry count - 1) so a queue size of 0x800 dwords
+ * is safe, giving a maximum field value of 0xA.
+ */
+ m->cp_hqd_eop_control |= min(0xA,
+ ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
m->cp_hqd_eop_base_addr_lo =
lower_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_eop_base_addr_hi =
@@ -156,12 +163,10 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
}
- m->cp_hqd_active = 0;
q->is_active = false;
if (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0) {
- m->cp_hqd_active = 1;
q->is_active = true;
}
@@ -181,14 +186,13 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_destroy
- (mm->dev->kgd, type, timeout,
+ (mm->dev->kgd, mqd, type, timeout,
pipe_id, queue_id);
}
static void uninit_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
- BUG_ON(!mm || !mqd);
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
@@ -238,12 +242,10 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
{
struct mqd_manager *mqd;
- BUG_ON(!dev);
- BUG_ON(type >= KFD_MQD_TYPE_MAX);
-
- pr_debug("kfd: In func %s\n", __func__);
+ if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
+ return NULL;
- mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 7131998848d7..1d312603de9f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -26,7 +26,6 @@
#include "kfd_device_queue_manager.h"
#include "kfd_kernel_queue.h"
#include "kfd_priv.h"
-#include "kfd_pm4_headers.h"
#include "kfd_pm4_headers_vi.h"
#include "kfd_pm4_opcodes.h"
@@ -35,7 +34,8 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
{
unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
- BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes);
+ WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
+ "Runlist IB overflow");
*wptr = temp;
}
@@ -43,12 +43,12 @@ static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
{
union PM4_MES_TYPE_3_HEADER header;
- header.u32all = 0;
+ header.u32All = 0;
header.opcode = opcode;
header.count = packet_size/sizeof(uint32_t) - 2;
header.type = PM4_TYPE_3;
- return header.u32all;
+ return header.u32All;
}
static void pm_calc_rlib_size(struct packet_manager *pm,
@@ -58,8 +58,6 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int process_count, queue_count;
unsigned int map_queue_size;
- BUG_ON(!pm || !rlib_size || !over_subscription);
-
process_count = pm->dqm->processes_count;
queue_count = pm->dqm->queue_count;
@@ -67,15 +65,12 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
*over_subscription = false;
if ((process_count > 1) || queue_count > get_queues_num(pm->dqm)) {
*over_subscription = true;
- pr_debug("kfd: over subscribed runlist\n");
+ pr_debug("Over subscribed runlist\n");
}
- map_queue_size =
- (pm->dqm->dev->device_info->asic_family == CHIP_CARRIZO) ?
- sizeof(struct pm4_mes_map_queues) :
- sizeof(struct pm4_map_queues);
+ map_queue_size = sizeof(struct pm4_mes_map_queues);
/* calculate run list ib allocation size */
- *rlib_size = process_count * sizeof(struct pm4_map_process) +
+ *rlib_size = process_count * sizeof(struct pm4_mes_map_process) +
queue_count * map_queue_size;
/*
@@ -83,9 +78,9 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
* when over subscription
*/
if (*over_subscription)
- *rlib_size += sizeof(struct pm4_runlist);
+ *rlib_size += sizeof(struct pm4_mes_runlist);
- pr_debug("kfd: runlist ib size %d\n", *rlib_size);
+ pr_debug("runlist ib size %d\n", *rlib_size);
}
static int pm_allocate_runlist_ib(struct packet_manager *pm,
@@ -96,17 +91,16 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
{
int retval;
- BUG_ON(!pm);
- BUG_ON(pm->allocated);
- BUG_ON(is_over_subscription == NULL);
+ if (WARN_ON(pm->allocated))
+ return -EINVAL;
pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
&pm->ib_buffer_obj);
- if (retval != 0) {
- pr_err("kfd: failed to allocate runlist IB\n");
+ if (retval) {
+ pr_err("Failed to allocate runlist IB\n");
return retval;
}
@@ -121,15 +115,16 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
uint64_t ib, size_t ib_size_in_dwords, bool chain)
{
- struct pm4_runlist *packet;
+ struct pm4_mes_runlist *packet;
- BUG_ON(!pm || !buffer || !ib);
+ if (WARN_ON(!ib))
+ return -EFAULT;
- packet = (struct pm4_runlist *)buffer;
+ packet = (struct pm4_mes_runlist *)buffer;
- memset(buffer, 0, sizeof(struct pm4_runlist));
- packet->header.u32all = build_pm4_header(IT_RUN_LIST,
- sizeof(struct pm4_runlist));
+ memset(buffer, 0, sizeof(struct pm4_mes_runlist));
+ packet->header.u32All = build_pm4_header(IT_RUN_LIST,
+ sizeof(struct pm4_mes_runlist));
packet->bitfields4.ib_size = ib_size_in_dwords;
packet->bitfields4.chain = chain ? 1 : 0;
@@ -144,20 +139,16 @@ static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
struct qcm_process_device *qpd)
{
- struct pm4_map_process *packet;
+ struct pm4_mes_map_process *packet;
struct queue *cur;
uint32_t num_queues;
- BUG_ON(!pm || !buffer || !qpd);
-
- packet = (struct pm4_map_process *)buffer;
-
- pr_debug("kfd: In func %s\n", __func__);
+ packet = (struct pm4_mes_map_process *)buffer;
- memset(buffer, 0, sizeof(struct pm4_map_process));
+ memset(buffer, 0, sizeof(struct pm4_mes_map_process));
- packet->header.u32all = build_pm4_header(IT_MAP_PROCESS,
- sizeof(struct pm4_map_process));
+ packet->header.u32All = build_pm4_header(IT_MAP_PROCESS,
+ sizeof(struct pm4_mes_map_process));
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 1;
packet->bitfields2.pasid = qpd->pqm->process->pasid;
@@ -175,27 +166,26 @@ static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
+ /* TODO: scratch support */
+ packet->sh_hidden_private_base_vmid = 0;
+
packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
return 0;
}
-static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
+static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
struct queue *q, bool is_static)
{
struct pm4_mes_map_queues *packet;
bool use_static = is_static;
- BUG_ON(!pm || !buffer || !q);
-
- pr_debug("kfd: In func %s\n", __func__);
-
packet = (struct pm4_mes_map_queues *)buffer;
- memset(buffer, 0, sizeof(struct pm4_map_queues));
+ memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
- packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
- sizeof(struct pm4_map_queues));
+ packet->header.u32All = build_pm4_header(IT_MAP_QUEUES,
+ sizeof(struct pm4_mes_map_queues));
packet->bitfields2.alloc_format =
alloc_format__mes_map_queues__one_per_pipe_vi;
packet->bitfields2.num_queues = 1;
@@ -223,10 +213,8 @@ static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
use_static = false; /* no static queues under SDMA */
break;
default:
- pr_err("kfd: in %s queue type %d\n", __func__,
- q->properties.type);
- BUG();
- break;
+ WARN(1, "queue type %d", q->properties.type);
+ return -EINVAL;
}
packet->bitfields3.doorbell_offset =
q->properties.doorbell_off;
@@ -246,68 +234,6 @@ static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
-static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
- struct queue *q, bool is_static)
-{
- struct pm4_map_queues *packet;
- bool use_static = is_static;
-
- BUG_ON(!pm || !buffer || !q);
-
- pr_debug("kfd: In func %s\n", __func__);
-
- packet = (struct pm4_map_queues *)buffer;
- memset(buffer, 0, sizeof(struct pm4_map_queues));
-
- packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
- sizeof(struct pm4_map_queues));
- packet->bitfields2.alloc_format =
- alloc_format__mes_map_queues__one_per_pipe;
- packet->bitfields2.num_queues = 1;
- packet->bitfields2.queue_sel =
- queue_sel__mes_map_queues__map_to_hws_determined_queue_slots;
-
- packet->bitfields2.vidmem = (q->properties.is_interop) ?
- vidmem__mes_map_queues__uses_video_memory :
- vidmem__mes_map_queues__uses_no_video_memory;
-
- switch (q->properties.type) {
- case KFD_QUEUE_TYPE_COMPUTE:
- case KFD_QUEUE_TYPE_DIQ:
- packet->bitfields2.engine_sel =
- engine_sel__mes_map_queues__compute;
- break;
- case KFD_QUEUE_TYPE_SDMA:
- packet->bitfields2.engine_sel =
- engine_sel__mes_map_queues__sdma0;
- use_static = false; /* no static queues under SDMA */
- break;
- default:
- BUG();
- break;
- }
-
- packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
- q->properties.doorbell_off;
-
- packet->mes_map_queues_ordinals[0].bitfields3.is_static =
- (use_static) ? 1 : 0;
-
- packet->mes_map_queues_ordinals[0].mqd_addr_lo =
- lower_32_bits(q->gart_mqd_addr);
-
- packet->mes_map_queues_ordinals[0].mqd_addr_hi =
- upper_32_bits(q->gart_mqd_addr);
-
- packet->mes_map_queues_ordinals[0].wptr_addr_lo =
- lower_32_bits((uint64_t)q->properties.write_ptr);
-
- packet->mes_map_queues_ordinals[0].wptr_addr_hi =
- upper_32_bits((uint64_t)q->properties.write_ptr);
-
- return 0;
-}
-
static int pm_create_runlist_ib(struct packet_manager *pm,
struct list_head *queues,
uint64_t *rl_gpu_addr,
@@ -322,19 +248,16 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
struct kernel_queue *kq;
bool is_over_subscription;
- BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr);
-
rl_wptr = retval = proccesses_mapped = 0;
retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
&alloc_size_bytes, &is_over_subscription);
- if (retval != 0)
+ if (retval)
return retval;
*rl_size_bytes = alloc_size_bytes;
- pr_debug("kfd: In func %s\n", __func__);
- pr_debug("kfd: building runlist ib process count: %d queues count %d\n",
+ pr_debug("Building runlist ib process count: %d queues count %d\n",
pm->dqm->processes_count, pm->dqm->queue_count);
/* build the run list ib packet */
@@ -342,42 +265,35 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
qpd = cur->qpd;
/* build map process packet */
if (proccesses_mapped >= pm->dqm->processes_count) {
- pr_debug("kfd: not enough space left in runlist IB\n");
+ pr_debug("Not enough space left in runlist IB\n");
pm_release_ib(pm);
return -ENOMEM;
}
retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
- if (retval != 0)
+ if (retval)
return retval;
proccesses_mapped++;
- inc_wptr(&rl_wptr, sizeof(struct pm4_map_process),
+ inc_wptr(&rl_wptr, sizeof(struct pm4_mes_map_process),
alloc_size_bytes);
list_for_each_entry(kq, &qpd->priv_queue_list, list) {
if (!kq->queue->properties.is_active)
continue;
- pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n",
+ pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
kq->queue->queue, qpd->is_debug);
- if (pm->dqm->dev->device_info->asic_family ==
- CHIP_CARRIZO)
- retval = pm_create_map_queue_vi(pm,
- &rl_buffer[rl_wptr],
- kq->queue,
- qpd->is_debug);
- else
- retval = pm_create_map_queue(pm,
+ retval = pm_create_map_queue(pm,
&rl_buffer[rl_wptr],
kq->queue,
qpd->is_debug);
- if (retval != 0)
+ if (retval)
return retval;
inc_wptr(&rl_wptr,
- sizeof(struct pm4_map_queues),
+ sizeof(struct pm4_mes_map_queues),
alloc_size_bytes);
}
@@ -385,51 +301,44 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
if (!q->properties.is_active)
continue;
- pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n",
+ pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
q->queue, qpd->is_debug);
- if (pm->dqm->dev->device_info->asic_family ==
- CHIP_CARRIZO)
- retval = pm_create_map_queue_vi(pm,
- &rl_buffer[rl_wptr],
- q,
- qpd->is_debug);
- else
- retval = pm_create_map_queue(pm,
+ retval = pm_create_map_queue(pm,
&rl_buffer[rl_wptr],
q,
qpd->is_debug);
- if (retval != 0)
+ if (retval)
return retval;
inc_wptr(&rl_wptr,
- sizeof(struct pm4_map_queues),
+ sizeof(struct pm4_mes_map_queues),
alloc_size_bytes);
}
}
- pr_debug("kfd: finished map process and queues to runlist\n");
+ pr_debug("Finished map process and queues to runlist\n");
if (is_over_subscription)
- pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr,
- alloc_size_bytes / sizeof(uint32_t), true);
+ retval = pm_create_runlist(pm, &rl_buffer[rl_wptr],
+ *rl_gpu_addr,
+ alloc_size_bytes / sizeof(uint32_t),
+ true);
for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
pr_debug("0x%2X ", rl_buffer[i]);
pr_debug("\n");
- return 0;
+ return retval;
}
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
{
- BUG_ON(!dqm);
-
pm->dqm = dqm;
mutex_init(&pm->lock);
pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
- if (pm->priv_queue == NULL) {
+ if (!pm->priv_queue) {
mutex_destroy(&pm->lock);
return -ENOMEM;
}
@@ -440,8 +349,6 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
void pm_uninit(struct packet_manager *pm)
{
- BUG_ON(!pm);
-
mutex_destroy(&pm->lock);
kernel_queue_uninit(pm->priv_queue);
}
@@ -449,25 +356,22 @@ void pm_uninit(struct packet_manager *pm)
int pm_send_set_resources(struct packet_manager *pm,
struct scheduling_resources *res)
{
- struct pm4_set_resources *packet;
-
- BUG_ON(!pm || !res);
-
- pr_debug("kfd: In func %s\n", __func__);
+ struct pm4_mes_set_resources *packet;
+ int retval = 0;
mutex_lock(&pm->lock);
pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
sizeof(*packet) / sizeof(uint32_t),
- (unsigned int **)&packet);
- if (packet == NULL) {
- mutex_unlock(&pm->lock);
- pr_err("kfd: failed to allocate buffer on kernel queue\n");
- return -ENOMEM;
+ (unsigned int **)&packet);
+ if (!packet) {
+ pr_err("Failed to allocate buffer on kernel queue\n");
+ retval = -ENOMEM;
+ goto out;
}
- memset(packet, 0, sizeof(struct pm4_set_resources));
- packet->header.u32all = build_pm4_header(IT_SET_RESOURCES,
- sizeof(struct pm4_set_resources));
+ memset(packet, 0, sizeof(struct pm4_mes_set_resources));
+ packet->header.u32All = build_pm4_header(IT_SET_RESOURCES,
+ sizeof(struct pm4_mes_set_resources));
packet->bitfields2.queue_type =
queue_type__mes_set_resources__hsa_interface_queue_hiq;
@@ -485,9 +389,10 @@ int pm_send_set_resources(struct packet_manager *pm,
pm->priv_queue->ops.submit_packet(pm->priv_queue);
+out:
mutex_unlock(&pm->lock);
- return 0;
+ return retval;
}
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
@@ -497,26 +402,24 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
size_t rl_ib_size, packet_size_dwords;
int retval;
- BUG_ON(!pm || !dqm_queues);
-
retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
&rl_ib_size);
- if (retval != 0)
+ if (retval)
goto fail_create_runlist_ib;
- pr_debug("kfd: runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
+ pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
- packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
+ packet_size_dwords = sizeof(struct pm4_mes_runlist) / sizeof(uint32_t);
mutex_lock(&pm->lock);
retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
packet_size_dwords, &rl_buffer);
- if (retval != 0)
+ if (retval)
goto fail_acquire_packet_buffer;
retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
rl_ib_size / sizeof(uint32_t), false);
- if (retval != 0)
+ if (retval)
goto fail_create_runlist;
pm->priv_queue->ops.submit_packet(pm->priv_queue);
@@ -530,8 +433,7 @@ fail_create_runlist:
fail_acquire_packet_buffer:
mutex_unlock(&pm->lock);
fail_create_runlist_ib:
- if (pm->allocated)
- pm_release_ib(pm);
+ pm_release_ib(pm);
return retval;
}
@@ -539,20 +441,21 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
uint32_t fence_value)
{
int retval;
- struct pm4_query_status *packet;
+ struct pm4_mes_query_status *packet;
- BUG_ON(!pm || !fence_address);
+ if (WARN_ON(!fence_address))
+ return -EFAULT;
mutex_lock(&pm->lock);
retval = pm->priv_queue->ops.acquire_packet_buffer(
pm->priv_queue,
- sizeof(struct pm4_query_status) / sizeof(uint32_t),
+ sizeof(struct pm4_mes_query_status) / sizeof(uint32_t),
(unsigned int **)&packet);
- if (retval != 0)
+ if (retval)
goto fail_acquire_packet_buffer;
- packet->header.u32all = build_pm4_header(IT_QUERY_STATUS,
- sizeof(struct pm4_query_status));
+ packet->header.u32All = build_pm4_header(IT_QUERY_STATUS,
+ sizeof(struct pm4_mes_query_status));
packet->bitfields2.context_id = 0;
packet->bitfields2.interrupt_sel =
@@ -566,9 +469,6 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
packet->data_lo = lower_32_bits((uint64_t)fence_value);
pm->priv_queue->ops.submit_packet(pm->priv_queue);
- mutex_unlock(&pm->lock);
-
- return 0;
fail_acquire_packet_buffer:
mutex_unlock(&pm->lock);
@@ -582,24 +482,22 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
{
int retval;
uint32_t *buffer;
- struct pm4_unmap_queues *packet;
-
- BUG_ON(!pm);
+ struct pm4_mes_unmap_queues *packet;
mutex_lock(&pm->lock);
retval = pm->priv_queue->ops.acquire_packet_buffer(
pm->priv_queue,
- sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
+ sizeof(struct pm4_mes_unmap_queues) / sizeof(uint32_t),
&buffer);
- if (retval != 0)
+ if (retval)
goto err_acquire_packet_buffer;
- packet = (struct pm4_unmap_queues *)buffer;
- memset(buffer, 0, sizeof(struct pm4_unmap_queues));
- pr_debug("kfd: static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
+ packet = (struct pm4_mes_unmap_queues *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
+ pr_debug("static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
mode, reset, type);
- packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES,
- sizeof(struct pm4_unmap_queues));
+ packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES,
+ sizeof(struct pm4_mes_unmap_queues));
switch (type) {
case KFD_QUEUE_TYPE_COMPUTE:
case KFD_QUEUE_TYPE_DIQ:
@@ -611,8 +509,9 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
break;
default:
- BUG();
- break;
+ WARN(1, "queue type %d", type);
+ retval = -EINVAL;
+ goto err_invalid;
}
if (reset)
@@ -636,16 +535,17 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
break;
case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__perform_request_on_all_active_queues;
+ queue_sel__mes_unmap_queues__unmap_all_queues;
break;
case KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES:
/* in this case, we do not preempt static queues */
packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only;
+ queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
break;
default:
- BUG();
- break;
+ WARN(1, "filter %d", mode);
+ retval = -EINVAL;
+ goto err_invalid;
}
pm->priv_queue->ops.submit_packet(pm->priv_queue);
@@ -653,6 +553,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
mutex_unlock(&pm->lock);
return 0;
+err_invalid:
+ pm->priv_queue->ops.rollback_packet(pm->priv_queue);
err_acquire_packet_buffer:
mutex_unlock(&pm->lock);
return retval;
@@ -660,8 +562,6 @@ err_acquire_packet_buffer:
void pm_release_ib(struct packet_manager *pm)
{
- BUG_ON(!pm);
-
mutex_lock(&pm->lock);
if (pm->allocated) {
kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 6cfe7f1f18cf..1e06de0bc673 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -32,7 +32,8 @@ int kfd_pasid_init(void)
{
pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
- pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
+ pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long),
+ GFP_KERNEL);
if (!pasid_bitmap)
return -ENOMEM;
@@ -91,6 +92,6 @@ unsigned int kfd_pasid_alloc(void)
void kfd_pasid_free(unsigned int pasid)
{
- BUG_ON(pasid == 0 || pasid >= pasid_limit);
- clear_bit(pasid, pasid_bitmap);
+ if (!WARN_ON(pasid == 0 || pasid >= pasid_limit))
+ clear_bit(pasid, pasid_bitmap);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
index 5b393f3e34a9..e50f73d25de6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
@@ -28,112 +28,19 @@
#define PM4_MES_HEADER_DEFINED
union PM4_MES_TYPE_3_HEADER {
struct {
- uint32_t reserved1:8; /* < reserved */
- uint32_t opcode:8; /* < IT opcode */
- uint32_t count:14; /* < number of DWORDs - 1
- * in the information body.
- */
- uint32_t type:2; /* < packet identifier.
- * It should be 3 for type 3 packets
- */
+ /* reserved */
+ uint32_t reserved1:8;
+ /* IT opcode */
+ uint32_t opcode:8;
+ /* number of DWORDs - 1 in the information body */
+ uint32_t count:14;
+ /* packet identifier. It should be 3 for type 3 packets */
+ uint32_t type:2;
};
uint32_t u32all;
};
#endif /* PM4_MES_HEADER_DEFINED */
-/* --------------------MES_SET_RESOURCES-------------------- */
-
-#ifndef PM4_MES_SET_RESOURCES_DEFINED
-#define PM4_MES_SET_RESOURCES_DEFINED
-enum set_resources_queue_type_enum {
- queue_type__mes_set_resources__kernel_interface_queue_kiq = 0,
- queue_type__mes_set_resources__hsa_interface_queue_hiq = 1,
- queue_type__mes_set_resources__hsa_debug_interface_queue = 4
-};
-
-struct pm4_set_resources {
- union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
- };
-
- union {
- struct {
- uint32_t vmid_mask:16;
- uint32_t unmap_latency:8;
- uint32_t reserved1:5;
- enum set_resources_queue_type_enum queue_type:3;
- } bitfields2;
- uint32_t ordinal2;
- };
-
- uint32_t queue_mask_lo;
- uint32_t queue_mask_hi;
- uint32_t gws_mask_lo;
- uint32_t gws_mask_hi;
-
- union {
- struct {
- uint32_t oac_mask:16;
- uint32_t reserved2:16;
- } bitfields7;
- uint32_t ordinal7;
- };
-
- union {
- struct {
- uint32_t gds_heap_base:6;
- uint32_t reserved3:5;
- uint32_t gds_heap_size:6;
- uint32_t reserved4:15;
- } bitfields8;
- uint32_t ordinal8;
- };
-
-};
-#endif
-
-/*--------------------MES_RUN_LIST-------------------- */
-
-#ifndef PM4_MES_RUN_LIST_DEFINED
-#define PM4_MES_RUN_LIST_DEFINED
-
-struct pm4_runlist {
- union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
- };
-
- union {
- struct {
- uint32_t reserved1:2;
- uint32_t ib_base_lo:30;
- } bitfields2;
- uint32_t ordinal2;
- };
-
- union {
- struct {
- uint32_t ib_base_hi:16;
- uint32_t reserved2:16;
- } bitfields3;
- uint32_t ordinal3;
- };
-
- union {
- struct {
- uint32_t ib_size:20;
- uint32_t chain:1;
- uint32_t offload_polling:1;
- uint32_t reserved3:1;
- uint32_t valid:1;
- uint32_t reserved4:8;
- } bitfields4;
- uint32_t ordinal4;
- };
-
-};
-#endif
/*--------------------MES_MAP_PROCESS-------------------- */
@@ -186,217 +93,58 @@ struct pm4_map_process {
};
#endif
-/*--------------------MES_MAP_QUEUES--------------------*/
-
-#ifndef PM4_MES_MAP_QUEUES_DEFINED
-#define PM4_MES_MAP_QUEUES_DEFINED
-enum map_queues_queue_sel_enum {
- queue_sel__mes_map_queues__map_to_specified_queue_slots = 0,
- queue_sel__mes_map_queues__map_to_hws_determined_queue_slots = 1,
- queue_sel__mes_map_queues__enable_process_queues = 2
-};
+#ifndef PM4_MES_MAP_PROCESS_DEFINED_KV_SCRATCH
+#define PM4_MES_MAP_PROCESS_DEFINED_KV_SCRATCH
-enum map_queues_vidmem_enum {
- vidmem__mes_map_queues__uses_no_video_memory = 0,
- vidmem__mes_map_queues__uses_video_memory = 1
-};
-
-enum map_queues_alloc_format_enum {
- alloc_format__mes_map_queues__one_per_pipe = 0,
- alloc_format__mes_map_queues__all_on_one_pipe = 1
-};
-
-enum map_queues_engine_sel_enum {
- engine_sel__mes_map_queues__compute = 0,
- engine_sel__mes_map_queues__sdma0 = 2,
- engine_sel__mes_map_queues__sdma1 = 3
-};
-
-struct pm4_map_queues {
+struct pm4_map_process_scratch_kv {
union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
- };
-
- union {
- struct {
- uint32_t reserved1:4;
- enum map_queues_queue_sel_enum queue_sel:2;
- uint32_t reserved2:2;
- uint32_t vmid:4;
- uint32_t reserved3:4;
- enum map_queues_vidmem_enum vidmem:2;
- uint32_t reserved4:6;
- enum map_queues_alloc_format_enum alloc_format:2;
- enum map_queues_engine_sel_enum engine_sel:3;
- uint32_t num_queues:3;
- } bitfields2;
- uint32_t ordinal2;
- };
-
- struct {
- union {
- struct {
- uint32_t is_static:1;
- uint32_t reserved5:1;
- uint32_t doorbell_offset:21;
- uint32_t reserved6:3;
- uint32_t queue:6;
- } bitfields3;
- uint32_t ordinal3;
- };
-
- uint32_t mqd_addr_lo;
- uint32_t mqd_addr_hi;
- uint32_t wptr_addr_lo;
- uint32_t wptr_addr_hi;
-
- } mes_map_queues_ordinals[1]; /* 1..N of these ordinal groups */
-
-};
-#endif
-
-/*--------------------MES_QUERY_STATUS--------------------*/
-
-#ifndef PM4_MES_QUERY_STATUS_DEFINED
-#define PM4_MES_QUERY_STATUS_DEFINED
-enum query_status_interrupt_sel_enum {
- interrupt_sel__mes_query_status__completion_status = 0,
- interrupt_sel__mes_query_status__process_status = 1,
- interrupt_sel__mes_query_status__queue_status = 2
-};
-
-enum query_status_command_enum {
- command__mes_query_status__interrupt_only = 0,
- command__mes_query_status__fence_only_immediate = 1,
- command__mes_query_status__fence_only_after_write_ack = 2,
- command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3
-};
-
-enum query_status_engine_sel_enum {
- engine_sel__mes_query_status__compute = 0,
- engine_sel__mes_query_status__sdma0_queue = 2,
- engine_sel__mes_query_status__sdma1_queue = 3
-};
-
-struct pm4_query_status {
- union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
- };
-
- union {
- struct {
- uint32_t context_id:28;
- enum query_status_interrupt_sel_enum interrupt_sel:2;
- enum query_status_command_enum command:2;
- } bitfields2;
- uint32_t ordinal2;
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
};
union {
struct {
uint32_t pasid:16;
- uint32_t reserved1:16;
- } bitfields3a;
- struct {
- uint32_t reserved2:2;
- uint32_t doorbell_offset:21;
- uint32_t reserved3:3;
- enum query_status_engine_sel_enum engine_sel:3;
- uint32_t reserved4:3;
- } bitfields3b;
- uint32_t ordinal3;
- };
-
- uint32_t addr_lo;
- uint32_t addr_hi;
- uint32_t data_lo;
- uint32_t data_hi;
-};
-#endif
-
-/*--------------------MES_UNMAP_QUEUES--------------------*/
-
-#ifndef PM4_MES_UNMAP_QUEUES_DEFINED
-#define PM4_MES_UNMAP_QUEUES_DEFINED
-enum unmap_queues_action_enum {
- action__mes_unmap_queues__preempt_queues = 0,
- action__mes_unmap_queues__reset_queues = 1,
- action__mes_unmap_queues__disable_process_queues = 2
-};
-
-enum unmap_queues_queue_sel_enum {
- queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
- queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
- queue_sel__mes_unmap_queues__perform_request_on_all_active_queues = 2,
- queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only = 3
-};
-
-enum unmap_queues_engine_sel_enum {
- engine_sel__mes_unmap_queues__compute = 0,
- engine_sel__mes_unmap_queues__sdma0 = 2,
- engine_sel__mes_unmap_queues__sdma1 = 3
-};
-
-struct pm4_unmap_queues {
- union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
- };
-
- union {
- struct {
- enum unmap_queues_action_enum action:2;
- uint32_t reserved1:2;
- enum unmap_queues_queue_sel_enum queue_sel:2;
- uint32_t reserved2:20;
- enum unmap_queues_engine_sel_enum engine_sel:3;
- uint32_t num_queues:3;
+ uint32_t reserved1:8;
+ uint32_t diq_enable:1;
+ uint32_t process_quantum:7;
} bitfields2;
uint32_t ordinal2;
};
union {
struct {
- uint32_t pasid:16;
- uint32_t reserved3:16;
- } bitfields3a;
- struct {
- uint32_t reserved4:2;
- uint32_t doorbell_offset0:21;
- uint32_t reserved5:9;
- } bitfields3b;
+ uint32_t page_table_base:28;
+ uint32_t reserved2:4;
+ } bitfields3;
uint32_t ordinal3;
};
- union {
- struct {
- uint32_t reserved6:2;
- uint32_t doorbell_offset1:21;
- uint32_t reserved7:9;
- } bitfields4;
- uint32_t ordinal4;
- };
-
- union {
- struct {
- uint32_t reserved8:2;
- uint32_t doorbell_offset2:21;
- uint32_t reserved9:9;
- } bitfields5;
- uint32_t ordinal5;
- };
+ uint32_t reserved3;
+ uint32_t sh_mem_bases;
+ uint32_t sh_mem_config;
+ uint32_t sh_mem_ape1_base;
+ uint32_t sh_mem_ape1_limit;
+ uint32_t sh_hidden_private_base_vmid;
+ uint32_t reserved4;
+ uint32_t reserved5;
+ uint32_t gds_addr_lo;
+ uint32_t gds_addr_hi;
union {
struct {
- uint32_t reserved10:2;
- uint32_t doorbell_offset3:21;
- uint32_t reserved11:9;
- } bitfields6;
- uint32_t ordinal6;
+ uint32_t num_gws:6;
+ uint32_t reserved6:2;
+ uint32_t num_oac:4;
+ uint32_t reserved7:4;
+ uint32_t gds_size:6;
+ uint32_t num_queues:10;
+ } bitfields14;
+ uint32_t ordinal14;
};
+ uint32_t completion_signal_lo32;
+uint32_t completion_signal_hi32;
};
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
index 08c721922812..7c8d9b357749 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h
@@ -30,10 +30,12 @@ union PM4_MES_TYPE_3_HEADER {
struct {
uint32_t reserved1 : 8; /* < reserved */
uint32_t opcode : 8; /* < IT opcode */
- uint32_t count : 14;/* < number of DWORDs - 1 in the
- information body. */
- uint32_t type : 2; /* < packet identifier.
- It should be 3 for type 3 packets */
+ uint32_t count : 14;/* < Number of DWORDS - 1 in the
+ * information body
+ */
+ uint32_t type : 2; /* < packet identifier
+ * It should be 3 for type 3 packets
+ */
};
uint32_t u32All;
};
@@ -124,9 +126,10 @@ struct pm4_mes_runlist {
uint32_t ib_size:20;
uint32_t chain:1;
uint32_t offload_polling:1;
- uint32_t reserved3:1;
+ uint32_t reserved2:1;
uint32_t valid:1;
- uint32_t reserved4:8;
+ uint32_t process_cnt:4;
+ uint32_t reserved3:4;
} bitfields4;
uint32_t ordinal4;
};
@@ -141,8 +144,8 @@ struct pm4_mes_runlist {
struct pm4_mes_map_process {
union {
- union PM4_MES_TYPE_3_HEADER header; /* header */
- uint32_t ordinal1;
+ union PM4_MES_TYPE_3_HEADER header; /* header */
+ uint32_t ordinal1;
};
union {
@@ -153,36 +156,48 @@ struct pm4_mes_map_process {
uint32_t process_quantum:7;
} bitfields2;
uint32_t ordinal2;
-};
+ };
union {
struct {
uint32_t page_table_base:28;
- uint32_t reserved2:4;
+ uint32_t reserved3:4;
} bitfields3;
uint32_t ordinal3;
};
+ uint32_t reserved;
+
uint32_t sh_mem_bases;
+ uint32_t sh_mem_config;
uint32_t sh_mem_ape1_base;
uint32_t sh_mem_ape1_limit;
- uint32_t sh_mem_config;
+
+ uint32_t sh_hidden_private_base_vmid;
+
+ uint32_t reserved2;
+ uint32_t reserved3;
+
uint32_t gds_addr_lo;
uint32_t gds_addr_hi;
union {
struct {
uint32_t num_gws:6;
- uint32_t reserved3:2;
+ uint32_t reserved4:2;
uint32_t num_oac:4;
- uint32_t reserved4:4;
+ uint32_t reserved5:4;
uint32_t gds_size:6;
uint32_t num_queues:10;
} bitfields10;
uint32_t ordinal10;
};
+ uint32_t completion_signal_lo;
+ uint32_t completion_signal_hi;
+
};
+
#endif
/*--------------------MES_MAP_QUEUES--------------------*/
@@ -335,7 +350,7 @@ enum mes_unmap_queues_engine_sel_enum {
engine_sel__mes_unmap_queues__sdmal = 3
};
-struct PM4_MES_UNMAP_QUEUES {
+struct pm4_mes_unmap_queues {
union {
union PM4_MES_TYPE_3_HEADER header; /* header */
uint32_t ordinal1;
@@ -395,4 +410,101 @@ struct PM4_MES_UNMAP_QUEUES {
};
#endif
+#ifndef PM4_MEC_RELEASE_MEM_DEFINED
+#define PM4_MEC_RELEASE_MEM_DEFINED
+enum RELEASE_MEM_event_index_enum {
+ event_index___release_mem__end_of_pipe = 5,
+ event_index___release_mem__shader_done = 6
+};
+
+enum RELEASE_MEM_cache_policy_enum {
+ cache_policy___release_mem__lru = 0,
+ cache_policy___release_mem__stream = 1,
+ cache_policy___release_mem__bypass = 2
+};
+
+enum RELEASE_MEM_dst_sel_enum {
+ dst_sel___release_mem__memory_controller = 0,
+ dst_sel___release_mem__tc_l2 = 1,
+ dst_sel___release_mem__queue_write_pointer_register = 2,
+ dst_sel___release_mem__queue_write_pointer_poll_mask_bit = 3
+};
+
+enum RELEASE_MEM_int_sel_enum {
+ int_sel___release_mem__none = 0,
+ int_sel___release_mem__send_interrupt_only = 1,
+ int_sel___release_mem__send_interrupt_after_write_confirm = 2,
+ int_sel___release_mem__send_data_after_write_confirm = 3
+};
+
+enum RELEASE_MEM_data_sel_enum {
+ data_sel___release_mem__none = 0,
+ data_sel___release_mem__send_32_bit_low = 1,
+ data_sel___release_mem__send_64_bit_data = 2,
+ data_sel___release_mem__send_gpu_clock_counter = 3,
+ data_sel___release_mem__send_cp_perfcounter_hi_lo = 4,
+ data_sel___release_mem__store_gds_data_to_memory = 5
+};
+
+struct pm4_mec_release_mem {
+ union {
+ union PM4_MES_TYPE_3_HEADER header; /*header */
+ unsigned int ordinal1;
+ };
+
+ union {
+ struct {
+ unsigned int event_type:6;
+ unsigned int reserved1:2;
+ enum RELEASE_MEM_event_index_enum event_index:4;
+ unsigned int tcl1_vol_action_ena:1;
+ unsigned int tc_vol_action_ena:1;
+ unsigned int reserved2:1;
+ unsigned int tc_wb_action_ena:1;
+ unsigned int tcl1_action_ena:1;
+ unsigned int tc_action_ena:1;
+ unsigned int reserved3:6;
+ unsigned int atc:1;
+ enum RELEASE_MEM_cache_policy_enum cache_policy:2;
+ unsigned int reserved4:5;
+ } bitfields2;
+ unsigned int ordinal2;
+ };
+
+ union {
+ struct {
+ unsigned int reserved5:16;
+ enum RELEASE_MEM_dst_sel_enum dst_sel:2;
+ unsigned int reserved6:6;
+ enum RELEASE_MEM_int_sel_enum int_sel:3;
+ unsigned int reserved7:2;
+ enum RELEASE_MEM_data_sel_enum data_sel:3;
+ } bitfields3;
+ unsigned int ordinal3;
+ };
+
+ union {
+ struct {
+ unsigned int reserved8:2;
+ unsigned int address_lo_32b:30;
+ } bitfields4;
+ struct {
+ unsigned int reserved9:3;
+ unsigned int address_lo_64b:29;
+ } bitfields5;
+ unsigned int ordinal4;
+ };
+
+ unsigned int address_hi;
+
+ unsigned int data_lo;
+
+ unsigned int data_hi;
+};
+#endif
+
+enum {
+ CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 4750cabe4252..b397ec726400 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -239,11 +239,6 @@ enum kfd_preempt_type_filter {
KFD_PREEMPT_TYPE_FILTER_BY_PASID
};
-enum kfd_preempt_type {
- KFD_PREEMPT_TYPE_WAVEFRONT,
- KFD_PREEMPT_TYPE_WAVEFRONT_RESET
-};
-
/**
* enum kfd_queue_type
*
@@ -294,13 +289,13 @@ enum kfd_queue_format {
* @write_ptr: Defines the number of dwords written to the ring buffer.
*
* @doorbell_ptr: This field aim is to notify the H/W of new packet written to
- * the queue ring buffer. This field should be similar to write_ptr and the user
- * should update this field after he updated the write_ptr.
+ * the queue ring buffer. This field should be similar to write_ptr and the
+ * user should update this field after he updated the write_ptr.
*
* @doorbell_off: The doorbell offset in the doorbell pci-bar.
*
- * @is_interop: Defines if this is a interop queue. Interop queue means that the
- * queue can access both graphics and compute resources.
+ * @is_interop: Defines if this is a interop queue. Interop queue means that
+ * the queue can access both graphics and compute resources.
*
* @is_active: Defines if the queue is active or not.
*
@@ -352,9 +347,10 @@ struct queue_properties {
* @properties: The queue properties.
*
* @mec: Used only in no cp scheduling mode and identifies to micro engine id
- * that the queue should be execute on.
+ * that the queue should be execute on.
*
- * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id.
+ * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
+ * id.
*
* @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
*
@@ -436,6 +432,7 @@ struct qcm_process_device {
uint32_t gds_size;
uint32_t num_gws;
uint32_t num_oac;
+ uint32_t sh_hidden_private_base;
};
/* Data that is per-process-per device. */
@@ -520,8 +517,8 @@ struct kfd_process {
struct mutex event_mutex;
/* All events in process hashed by ID, linked on kfd_event.events. */
DECLARE_HASHTABLE(events, 4);
- struct list_head signal_event_pages; /* struct slot_page_header.
- event_pages */
+ /* struct slot_page_header.event_pages */
+ struct list_head signal_event_pages;
u32 next_nonsignal_event_id;
size_t signal_event_count;
};
@@ -559,8 +556,10 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
struct kfd_process *p);
/* Process device data iterator */
-struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p);
-struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
+struct kfd_process_device *kfd_get_first_process_device_data(
+ struct kfd_process *p);
+struct kfd_process_device *kfd_get_next_process_device_data(
+ struct kfd_process *p,
struct kfd_process_device *pdd);
bool kfd_has_process_device_data(struct kfd_process *p);
@@ -573,7 +572,8 @@ unsigned int kfd_pasid_alloc(void);
void kfd_pasid_free(unsigned int pasid);
/* Doorbells */
-void kfd_doorbell_init(struct kfd_dev *kfd);
+int kfd_doorbell_init(struct kfd_dev *kfd);
+void kfd_doorbell_fini(struct kfd_dev *kfd);
int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
unsigned int *doorbell_off);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 035bbc98a63d..c74cf22a1ed9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -79,9 +79,7 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
{
struct kfd_process *process;
- BUG_ON(!kfd_process_wq);
-
- if (thread->mm == NULL)
+ if (!thread->mm)
return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */
@@ -101,7 +99,7 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
/* A prior open of /dev/kfd could have already created the process. */
process = find_process(thread);
if (process)
- pr_debug("kfd: process already found\n");
+ pr_debug("Process already found\n");
if (!process)
process = create_process(thread);
@@ -117,7 +115,7 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread)
{
struct kfd_process *process;
- if (thread->mm == NULL)
+ if (!thread->mm)
return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */
@@ -202,10 +200,8 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu)
struct kfd_process_release_work *work;
struct kfd_process *p;
- BUG_ON(!kfd_process_wq);
-
p = container_of(rcu, struct kfd_process, rcu);
- BUG_ON(atomic_read(&p->mm->mm_count) <= 0);
+ WARN_ON(atomic_read(&p->mm->mm_count) <= 0);
mmdrop(p->mm);
@@ -229,7 +225,8 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
* mmu_notifier srcu is read locked
*/
p = container_of(mn, struct kfd_process, mmu_notifier);
- BUG_ON(p->mm != mm);
+ if (WARN_ON(p->mm != mm))
+ return;
mutex_lock(&kfd_processes_mutex);
hash_del_rcu(&p->kfd_processes);
@@ -250,7 +247,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
if (pdd->reset_wavefronts) {
- pr_warn("amdkfd: Resetting all wave fronts\n");
+ pr_warn("Resetting all wave fronts\n");
dbgdev_wave_reset_wavefronts(pdd->dev, p);
pdd->reset_wavefronts = false;
}
@@ -407,8 +404,6 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
struct kfd_process *p;
struct kfd_process_device *pdd;
- BUG_ON(dev == NULL);
-
/*
* Look for the process that matches the pasid. If there is no such
* process, we either released it in amdkfd's own notifier, or there
@@ -449,14 +444,16 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
mutex_unlock(&p->mutex);
}
-struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
+struct kfd_process_device *kfd_get_first_process_device_data(
+ struct kfd_process *p)
{
return list_first_entry(&p->per_device_data,
struct kfd_process_device,
per_device_list);
}
-struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
+struct kfd_process_device *kfd_get_next_process_device_data(
+ struct kfd_process *p,
struct kfd_process_device *pdd)
{
if (list_is_last(&pdd->per_device_list, &p->per_device_data))
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 32cdf2b483db..1cae95e2b13a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -32,12 +32,9 @@ static inline struct process_queue_node *get_queue_by_qid(
{
struct process_queue_node *pqn;
- BUG_ON(!pqm);
-
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
- if (pqn->q && pqn->q->properties.queue_id == qid)
- return pqn;
- if (pqn->kq && pqn->kq->queue->properties.queue_id == qid)
+ if ((pqn->q && pqn->q->properties.queue_id == qid) ||
+ (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
return pqn;
}
@@ -49,17 +46,13 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
{
unsigned long found;
- BUG_ON(!pqm || !qid);
-
- pr_debug("kfd: in %s\n", __func__);
-
found = find_first_zero_bit(pqm->queue_slot_bitmap,
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
- pr_debug("kfd: the new slot id %lu\n", found);
+ pr_debug("The new slot id %lu\n", found);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
- pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
+ pr_info("Cannot open more queues for process with pasid %d\n",
pqm->process->pasid);
return -ENOMEM;
}
@@ -72,13 +65,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
{
- BUG_ON(!pqm);
-
INIT_LIST_HEAD(&pqm->queues);
pqm->queue_slot_bitmap =
kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
BITS_PER_BYTE), GFP_KERNEL);
- if (pqm->queue_slot_bitmap == NULL)
+ if (!pqm->queue_slot_bitmap)
return -ENOMEM;
pqm->process = p;
@@ -90,10 +81,6 @@ void pqm_uninit(struct process_queue_manager *pqm)
int retval;
struct process_queue_node *pqn, *next;
- BUG_ON(!pqm);
-
- pr_debug("In func %s\n", __func__);
-
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
retval = pqm_destroy_queue(
pqm,
@@ -102,7 +89,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
pqn->kq->queue->properties.queue_id);
if (retval != 0) {
- pr_err("kfd: failed to destroy queue\n");
+ pr_err("failed to destroy queue\n");
return;
}
}
@@ -117,8 +104,6 @@ static int create_cp_queue(struct process_queue_manager *pqm,
{
int retval;
- retval = 0;
-
/* Doorbell initialized in user space*/
q_properties->doorbell_ptr = NULL;
@@ -131,16 +116,13 @@ static int create_cp_queue(struct process_queue_manager *pqm,
retval = init_queue(q, q_properties);
if (retval != 0)
- goto err_init_queue;
+ return retval;
(*q)->device = dev;
(*q)->process = pqm->process;
- pr_debug("kfd: PQM After init queue");
-
- return retval;
+ pr_debug("PQM After init queue");
-err_init_queue:
return retval;
}
@@ -161,8 +143,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
int num_queues = 0;
struct queue *cur;
- BUG_ON(!pqm || !dev || !properties || !qid);
-
memset(&q_properties, 0, sizeof(struct queue_properties));
memcpy(&q_properties, properties, sizeof(struct queue_properties));
q = NULL;
@@ -185,7 +165,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
list_for_each_entry(cur, &pdd->qpd.queues_list, list)
num_queues++;
if (num_queues >= dev->device_info->max_no_of_hqd/2)
- return (-ENOSPC);
+ return -ENOSPC;
}
retval = find_available_queue_slot(pqm, qid);
@@ -197,7 +177,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
}
- pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL);
+ pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
if (!pqn) {
retval = -ENOMEM;
goto err_allocate_pqn;
@@ -210,7 +190,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= VMID_PER_DEVICE) ||
(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
- pr_err("kfd: over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
+ pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
}
@@ -227,7 +207,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
break;
case KFD_QUEUE_TYPE_DIQ:
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
- if (kq == NULL) {
+ if (!kq) {
retval = -ENOMEM;
goto err_create_queue;
}
@@ -238,22 +218,22 @@ int pqm_create_queue(struct process_queue_manager *pqm,
kq, &pdd->qpd);
break;
default:
- BUG();
- break;
+ WARN(1, "Invalid queue type %d", type);
+ retval = -EINVAL;
}
if (retval != 0) {
- pr_debug("Error dqm create queue\n");
+ pr_err("DQM create queue failed\n");
goto err_create_queue;
}
- pr_debug("kfd: PQM After DQM create queue\n");
+ pr_debug("PQM After DQM create queue\n");
list_add(&pqn->process_queue_list, &pqm->queues);
if (q) {
*properties = q->properties;
- pr_debug("kfd: PQM done creating queue\n");
+ pr_debug("PQM done creating queue\n");
print_queue_properties(properties);
}
@@ -279,14 +259,11 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
dqm = NULL;
- BUG_ON(!pqm);
retval = 0;
- pr_debug("kfd: In Func %s\n", __func__);
-
pqn = get_queue_by_qid(pqm, qid);
- if (pqn == NULL) {
- pr_err("kfd: queue id does not match any known queue\n");
+ if (!pqn) {
+ pr_err("Queue id does not match any known queue\n");
return -EINVAL;
}
@@ -295,7 +272,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
dev = pqn->kq->dev;
if (pqn->q)
dev = pqn->q->device;
- BUG_ON(!dev);
+ if (WARN_ON(!dev))
+ return -ENODEV;
pdd = kfd_get_process_device_data(dev, pqm->process);
if (!pdd) {
@@ -335,12 +313,9 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
int retval;
struct process_queue_node *pqn;
- BUG_ON(!pqm);
-
pqn = get_queue_by_qid(pqm, qid);
if (!pqn) {
- pr_debug("amdkfd: No queue %d exists for update operation\n",
- qid);
+ pr_debug("No queue %d exists for update operation\n", qid);
return -EFAULT;
}
@@ -363,8 +338,6 @@ struct kernel_queue *pqm_get_kernel_queue(
{
struct process_queue_node *pqn;
- BUG_ON(!pqm);
-
pqn = get_queue_by_qid(pqm, qid);
if (pqn && pqn->kq)
return pqn->kq;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index 0ab197077f2d..a5315d4f1c95 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -65,17 +65,15 @@ void print_queue(struct queue *q)
int init_queue(struct queue **q, const struct queue_properties *properties)
{
- struct queue *tmp;
+ struct queue *tmp_q;
- BUG_ON(!q);
-
- tmp = kzalloc(sizeof(struct queue), GFP_KERNEL);
- if (!tmp)
+ tmp_q = kzalloc(sizeof(*tmp_q), GFP_KERNEL);
+ if (!tmp_q)
return -ENOMEM;
- memcpy(&tmp->properties, properties, sizeof(struct queue_properties));
+ memcpy(&tmp_q->properties, properties, sizeof(*properties));
- *q = tmp;
+ *q = tmp_q;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 1e5064749959..19ce59028d6b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -108,9 +108,6 @@ static int kfd_topology_get_crat_acpi(void *crat_image, size_t *size)
static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
struct crat_subtype_computeunit *cu)
{
- BUG_ON(!dev);
- BUG_ON(!cu);
-
dev->node_props.cpu_cores_count = cu->num_cpu_cores;
dev->node_props.cpu_core_id_base = cu->processor_id_low;
if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
@@ -123,9 +120,6 @@ static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
struct crat_subtype_computeunit *cu)
{
- BUG_ON(!dev);
- BUG_ON(!cu);
-
dev->node_props.simd_id_base = cu->processor_id_low;
dev->node_props.simd_count = cu->num_simd_cores;
dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
@@ -148,8 +142,6 @@ static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu)
struct kfd_topology_device *dev;
int i = 0;
- BUG_ON(!cu);
-
pr_info("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
cu->proximity_domain, cu->hsa_capability);
list_for_each_entry(dev, &topology_device_list, list) {
@@ -177,8 +169,6 @@ static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem)
struct kfd_topology_device *dev;
int i = 0;
- BUG_ON(!mem);
-
pr_info("Found memory entry in CRAT table with proximity_domain=%d\n",
mem->promixity_domain);
list_for_each_entry(dev, &topology_device_list, list) {
@@ -223,8 +213,6 @@ static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache)
struct kfd_topology_device *dev;
uint32_t id;
- BUG_ON(!cache);
-
id = cache->processor_id_low;
pr_info("Found cache entry in CRAT table with processor_id=%d\n", id);
@@ -274,8 +262,6 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink)
uint32_t id_from;
uint32_t id_to;
- BUG_ON(!iolink);
-
id_from = iolink->proximity_domain_from;
id_to = iolink->proximity_domain_to;
@@ -323,8 +309,6 @@ static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr)
struct crat_subtype_iolink *iolink;
int ret = 0;
- BUG_ON(!sub_type_hdr);
-
switch (sub_type_hdr->type) {
case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
cu = (struct crat_subtype_computeunit *)sub_type_hdr;
@@ -368,8 +352,6 @@ static void kfd_release_topology_device(struct kfd_topology_device *dev)
struct kfd_cache_properties *cache;
struct kfd_iolink_properties *iolink;
- BUG_ON(!dev);
-
list_del(&dev->list);
while (dev->mem_props.next != &dev->mem_props) {
@@ -416,7 +398,7 @@ static struct kfd_topology_device *kfd_create_topology_device(void)
struct kfd_topology_device *dev;
dev = kfd_alloc_struct(dev);
- if (dev == NULL) {
+ if (!dev) {
pr_err("No memory to allocate a topology device");
return NULL;
}
@@ -666,7 +648,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.simd_count);
if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
- pr_info_once("kfd: mem_banks_count truncated from %d to %d\n",
+ pr_info_once("mem_banks_count truncated from %d to %d\n",
dev->node_props.mem_banks_count,
dev->mem_bank_count);
sysfs_show_32bit_prop(buffer, "mem_banks_count",
@@ -763,8 +745,6 @@ static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
struct kfd_cache_properties *cache;
struct kfd_mem_properties *mem;
- BUG_ON(!dev);
-
if (dev->kobj_iolink) {
list_for_each_entry(iolink, &dev->io_link_props, list)
if (iolink->kobj) {
@@ -819,12 +799,12 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
int ret;
uint32_t i;
- BUG_ON(!dev);
+ if (WARN_ON(dev->kobj_node))
+ return -EEXIST;
/*
* Creating the sysfs folders
*/
- BUG_ON(dev->kobj_node);
dev->kobj_node = kfd_alloc_struct(dev->kobj_node);
if (!dev->kobj_node)
return -ENOMEM;
@@ -957,7 +937,7 @@ static int kfd_topology_update_sysfs(void)
int ret;
pr_info("Creating topology SYSFS entries\n");
- if (sys_props.kobj_topology == NULL) {
+ if (!sys_props.kobj_topology) {
sys_props.kobj_topology =
kfd_alloc_struct(sys_props.kobj_topology);
if (!sys_props.kobj_topology)
@@ -1117,10 +1097,8 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
struct kfd_topology_device *dev;
struct kfd_topology_device *out_dev = NULL;
- BUG_ON(!gpu);
-
list_for_each_entry(dev, &topology_device_list, list)
- if (dev->gpu == NULL && dev->node_props.simd_count > 0) {
+ if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
break;
@@ -1143,11 +1121,9 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
struct kfd_topology_device *dev;
int res;
- BUG_ON(!gpu);
-
gpu_id = kfd_generate_gpu_id(gpu);
- pr_debug("kfd: Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
+ pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
down_write(&topology_lock);
/*
@@ -1170,8 +1146,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
* GPU vBIOS
*/
- /*
- * Update the SYSFS tree, since we added another topology device
+ /* Update the SYSFS tree, since we added another topology
+ * device
*/
if (kfd_topology_update_sysfs() < 0)
kfd_topology_release_sysfs();
@@ -1190,7 +1166,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
dev->node_props.capability |= HSA_CAP_DOORBELL_PACKET_TYPE;
- pr_info("amdkfd: adding doorbell packet type capability\n");
+ pr_info("Adding doorbell packet type capability\n");
}
res = 0;
@@ -1210,8 +1186,6 @@ int kfd_topology_remove_device(struct kfd_dev *gpu)
uint32_t gpu_id;
int res = -ENODEV;
- BUG_ON(!gpu);
-
down_write(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list)
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 0021a1c63356..837296db9628 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1233,6 +1233,69 @@ struct atom_asic_profiling_info_v4_1
uint32_t phyclk2gfxclk_c;
};
+struct atom_asic_profiling_info_v4_2 {
+ struct atom_common_table_header table_header;
+ uint32_t maxvddc;
+ uint32_t minvddc;
+ uint32_t avfs_meannsigma_acontant0;
+ uint32_t avfs_meannsigma_acontant1;
+ uint32_t avfs_meannsigma_acontant2;
+ uint16_t avfs_meannsigma_dc_tol_sigma;
+ uint16_t avfs_meannsigma_platform_mean;
+ uint16_t avfs_meannsigma_platform_sigma;
+ uint32_t gb_vdroop_table_cksoff_a0;
+ uint32_t gb_vdroop_table_cksoff_a1;
+ uint32_t gb_vdroop_table_cksoff_a2;
+ uint32_t gb_vdroop_table_ckson_a0;
+ uint32_t gb_vdroop_table_ckson_a1;
+ uint32_t gb_vdroop_table_ckson_a2;
+ uint32_t avfsgb_fuse_table_cksoff_m1;
+ uint32_t avfsgb_fuse_table_cksoff_m2;
+ uint32_t avfsgb_fuse_table_cksoff_b;
+ uint32_t avfsgb_fuse_table_ckson_m1;
+ uint32_t avfsgb_fuse_table_ckson_m2;
+ uint32_t avfsgb_fuse_table_ckson_b;
+ uint16_t max_voltage_0_25mv;
+ uint8_t enable_gb_vdroop_table_cksoff;
+ uint8_t enable_gb_vdroop_table_ckson;
+ uint8_t enable_gb_fuse_table_cksoff;
+ uint8_t enable_gb_fuse_table_ckson;
+ uint16_t psm_age_comfactor;
+ uint8_t enable_apply_avfs_cksoff_voltage;
+ uint8_t reserved;
+ uint32_t dispclk2gfxclk_a;
+ uint32_t dispclk2gfxclk_b;
+ uint32_t dispclk2gfxclk_c;
+ uint32_t pixclk2gfxclk_a;
+ uint32_t pixclk2gfxclk_b;
+ uint32_t pixclk2gfxclk_c;
+ uint32_t dcefclk2gfxclk_a;
+ uint32_t dcefclk2gfxclk_b;
+ uint32_t dcefclk2gfxclk_c;
+ uint32_t phyclk2gfxclk_a;
+ uint32_t phyclk2gfxclk_b;
+ uint32_t phyclk2gfxclk_c;
+ uint32_t acg_gb_vdroop_table_a0;
+ uint32_t acg_gb_vdroop_table_a1;
+ uint32_t acg_gb_vdroop_table_a2;
+ uint32_t acg_avfsgb_fuse_table_m1;
+ uint32_t acg_avfsgb_fuse_table_m2;
+ uint32_t acg_avfsgb_fuse_table_b;
+ uint8_t enable_acg_gb_vdroop_table;
+ uint8_t enable_acg_gb_fuse_table;
+ uint32_t acg_dispclk2gfxclk_a;
+ uint32_t acg_dispclk2gfxclk_b;
+ uint32_t acg_dispclk2gfxclk_c;
+ uint32_t acg_pixclk2gfxclk_a;
+ uint32_t acg_pixclk2gfxclk_b;
+ uint32_t acg_pixclk2gfxclk_c;
+ uint32_t acg_dcefclk2gfxclk_a;
+ uint32_t acg_dcefclk2gfxclk_b;
+ uint32_t acg_dcefclk2gfxclk_c;
+ uint32_t acg_phyclk2gfxclk_a;
+ uint32_t acg_phyclk2gfxclk_b;
+ uint32_t acg_phyclk2gfxclk_c;
+};
/*
***************************************************************************
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 0a94f749e3c0..0214f63f52fc 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -50,6 +50,7 @@ enum cgs_ind_reg {
CGS_IND_REG__UVD_CTX,
CGS_IND_REG__DIDT,
CGS_IND_REG_GC_CAC,
+ CGS_IND_REG_SE_CAC,
CGS_IND_REG__AUDIO_ENDPT
};
@@ -406,6 +407,8 @@ typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device);
typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en);
+typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock);
+
struct cgs_ops {
/* memory management calls (similar to KFD interface) */
cgs_alloc_gpu_mem_t alloc_gpu_mem;
@@ -441,6 +444,7 @@ struct cgs_ops {
cgs_query_system_info query_system_info;
cgs_is_virtualization_enabled_t is_virtualization_enabled;
cgs_enter_safe_mode enter_safe_mode;
+ cgs_lock_grbm_idx lock_grbm_idx;
};
struct cgs_os_ops; /* To be define in OS-specific CGS header */
@@ -517,4 +521,6 @@ struct cgs_device
#define cgs_enter_safe_mode(cgs_device, en) \
CGS_CALL(enter_safe_mode, cgs_device, en)
+#define cgs_lock_grbm_idx(cgs_device, lock) \
+ CGS_CALL(lock_grbm_idx, cgs_device, lock)
#endif /* _CGS_COMMON_H */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 91ef1484b3bb..94277cb734d2 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -41,6 +41,11 @@ struct kgd_dev;
struct kgd_mem;
+enum kfd_preempt_type {
+ KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
+};
+
enum kgd_memory_pool {
KGD_POOL_SYSTEM_CACHEABLE = 1,
KGD_POOL_SYSTEM_WRITECOMBINE = 2,
@@ -63,9 +68,6 @@ struct kgd2kfd_shared_resources {
/* Bit n == 1 means VMID n is available for KFD. */
unsigned int compute_vmid_bitmap;
- /* number of mec available from the hardware */
- uint32_t num_mec;
-
/* number of pipes per mec */
uint32_t num_pipe_per_mec;
@@ -85,6 +87,17 @@ struct kgd2kfd_shared_resources {
size_t doorbell_start_offset;
};
+struct tile_config {
+ uint32_t *tile_config_ptr;
+ uint32_t *macro_tile_config_ptr;
+ uint32_t num_tile_configs;
+ uint32_t num_macro_tile_configs;
+
+ uint32_t gb_addr_config;
+ uint32_t num_banks;
+ uint32_t num_ranks;
+};
+
/**
* struct kfd2kgd_calls
*
@@ -126,6 +139,11 @@ struct kgd2kfd_shared_resources {
*
* @get_fw_version: Returns FW versions from the header
*
+ * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
+ * Only used for no cp scheduling mode
+ *
+ * @get_tile_config: Returns GPU-specific tiling mode information
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -156,14 +174,16 @@ struct kfd2kgd_calls {
int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id);
int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr);
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm);
int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd);
bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
- int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
+ int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id);
@@ -195,6 +215,9 @@ struct kfd2kgd_calls {
uint16_t (*get_fw_version)(struct kgd_dev *kgd,
enum kgd_engine_type type);
+ void (*set_scratch_backing_va)(struct kgd_dev *kgd,
+ uint64_t va, uint32_t vmid);
+ int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
};
/**
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 0b74da3dca8b..bc839ff0bdd0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1240,13 +1240,18 @@ static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- if (cz_hwmgr->sclk_dpm.soft_min_clk !=
- cz_hwmgr->sclk_dpm.soft_max_clk)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMin,
+ cz_get_sclk_level(hwmgr,
+ cz_hwmgr->sclk_dpm.soft_max_clk,
+ PPSMC_MSG_SetSclkSoftMin));
+
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMax,
+ cz_get_sclk_level(hwmgr,
+ cz_hwmgr->sclk_dpm.soft_max_clk,
+ PPSMC_MSG_SetSclkSoftMax));
+
return 0;
}
@@ -1292,17 +1297,55 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- if (cz_hwmgr->sclk_dpm.soft_min_clk !=
- cz_hwmgr->sclk_dpm.soft_max_clk) {
- cz_hwmgr->sclk_dpm.soft_max_clk =
- cz_hwmgr->sclk_dpm.soft_min_clk;
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMax,
+ cz_get_sclk_level(hwmgr,
+ cz_hwmgr->sclk_dpm.soft_min_clk,
+ PPSMC_MSG_SetSclkSoftMax));
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMin,
+ cz_get_sclk_level(hwmgr,
+ cz_hwmgr->sclk_dpm.soft_min_clk,
+ PPSMC_MSG_SetSclkSoftMin));
+
+ return 0;
+}
+
+static int cz_phm_force_dpm_sclk(struct pp_hwmgr *hwmgr, uint32_t sclk)
+{
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSclkSoftMin,
+ cz_get_sclk_level(hwmgr,
+ sclk,
+ PPSMC_MSG_SetSclkSoftMin));
+
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
- cz_hwmgr->sclk_dpm.soft_max_clk,
+ sclk,
PPSMC_MSG_SetSclkSoftMax));
+ return 0;
+}
+
+static int cz_get_profiling_clk(struct pp_hwmgr *hwmgr, uint32_t *sclk)
+{
+ struct phm_clock_voltage_dependency_table *table =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+ int32_t tmp_sclk;
+ int32_t count;
+
+ tmp_sclk = table->entries[table->count-1].clk * 70 / 100;
+
+ for (count = table->count-1; count >= 0; count--) {
+ if (tmp_sclk >= table->entries[count].clk) {
+ tmp_sclk = table->entries[count].clk;
+ *sclk = tmp_sclk;
+ break;
+ }
}
+ if (count < 0)
+ *sclk = table->entries[0].clk;
return 0;
}
@@ -1310,30 +1353,70 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
+ uint32_t sclk = 0;
int ret = 0;
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+ if (level == hwmgr->dpm_level)
+ return ret;
+
+ if (!(hwmgr->dpm_level & profile_mode_mask)) {
+ /* enter profile mode, save current level, disable gfx cg*/
+ if (level & profile_mode_mask) {
+ hwmgr->saved_dpm_level = hwmgr->dpm_level;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+ }
+ } else {
+ /* exit profile mode, restore level, enable gfx cg*/
+ if (!(level & profile_mode_mask)) {
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
+ level = hwmgr->saved_dpm_level;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ }
+ }
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
ret = cz_phm_force_dpm_highest(hwmgr);
if (ret)
return ret;
+ hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
ret = cz_phm_force_dpm_lowest(hwmgr);
if (ret)
return ret;
+ hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = cz_phm_unforce_dpm_levels(hwmgr);
if (ret)
return ret;
+ hwmgr->dpm_level = level;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = cz_get_profiling_clk(hwmgr, &sclk);
+ if (ret)
+ return ret;
+ hwmgr->dpm_level = level;
+ cz_phm_force_dpm_sclk(hwmgr, sclk);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ hwmgr->dpm_level = level;
break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
- hwmgr->dpm_level = level;
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index d025653c7823..9547f265a8bb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -557,9 +557,8 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u
return vddci_table->entries[i].value;
}
- PP_ASSERT_WITH_CODE(false,
- "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i-1].value);
+ pr_debug("vddci is larger than max value in vddci_table\n");
+ return vddci_table->entries[i-1].value;
}
int phm_find_boot_level(void *table,
@@ -583,26 +582,26 @@ int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
phm_ppt_v1_voltage_lookup_table *lookup_table,
uint16_t virtual_voltage_id, int32_t *sclk)
{
- uint8_t entryId;
- uint8_t voltageId;
+ uint8_t entry_id;
+ uint8_t voltage_id;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
/* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
- for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) {
- voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd;
- if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
+ for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
+ voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
+ if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
break;
}
- PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count,
- "Can't find requested voltage id in vdd_dep_on_sclk table!",
- return -EINVAL;
- );
+ if (entry_id >= table_info->vdd_dep_on_sclk->count) {
+ pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
+ return -EINVAL;
+ }
- *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk;
+ *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 720d5006ff62..c062844b15f3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -142,7 +142,7 @@ int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr,
}
} else if (voltage_mode == VOLTAGE_OBJ_SVID2) {
voltage_table->psi1_enable =
- voltage_object->svid2_voltage_obj.loadline_psi1 & 0x1;
+ (voltage_object->svid2_voltage_obj.loadline_psi1 & 0x20) >> 5;
voltage_table->psi0_enable =
voltage_object->svid2_voltage_obj.psi0_enable & 0x1;
voltage_table->max_vid_step =
@@ -276,7 +276,10 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_avfs_parameters *param)
{
uint16_t idx;
+ uint8_t format_revision, content_revision;
+
struct atom_asic_profiling_info_v4_1 *profile;
+ struct atom_asic_profiling_info_v4_2 *profile_v4_2;
idx = GetIndexIntoMasterDataTable(asic_profiling_info);
profile = (struct atom_asic_profiling_info_v4_1 *)
@@ -286,76 +289,172 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
if (!profile)
return -1;
- param->ulMaxVddc = le32_to_cpu(profile->maxvddc);
- param->ulMinVddc = le32_to_cpu(profile->minvddc);
- param->ulMeanNsigmaAcontant0 =
- le32_to_cpu(profile->avfs_meannsigma_acontant0);
- param->ulMeanNsigmaAcontant1 =
- le32_to_cpu(profile->avfs_meannsigma_acontant1);
- param->ulMeanNsigmaAcontant2 =
- le32_to_cpu(profile->avfs_meannsigma_acontant2);
- param->usMeanNsigmaDcTolSigma =
- le16_to_cpu(profile->avfs_meannsigma_dc_tol_sigma);
- param->usMeanNsigmaPlatformMean =
- le16_to_cpu(profile->avfs_meannsigma_platform_mean);
- param->usMeanNsigmaPlatformSigma =
- le16_to_cpu(profile->avfs_meannsigma_platform_sigma);
- param->ulGbVdroopTableCksoffA0 =
- le32_to_cpu(profile->gb_vdroop_table_cksoff_a0);
- param->ulGbVdroopTableCksoffA1 =
- le32_to_cpu(profile->gb_vdroop_table_cksoff_a1);
- param->ulGbVdroopTableCksoffA2 =
- le32_to_cpu(profile->gb_vdroop_table_cksoff_a2);
- param->ulGbVdroopTableCksonA0 =
- le32_to_cpu(profile->gb_vdroop_table_ckson_a0);
- param->ulGbVdroopTableCksonA1 =
- le32_to_cpu(profile->gb_vdroop_table_ckson_a1);
- param->ulGbVdroopTableCksonA2 =
- le32_to_cpu(profile->gb_vdroop_table_ckson_a2);
- param->ulGbFuseTableCksoffM1 =
- le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1);
- param->ulGbFuseTableCksoffM2 =
- le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m2);
- param->ulGbFuseTableCksoffB =
- le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b);
- param->ulGbFuseTableCksonM1 =
- le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1);
- param->ulGbFuseTableCksonM2 =
- le32_to_cpu(profile->avfsgb_fuse_table_ckson_m2);
- param->ulGbFuseTableCksonB =
- le32_to_cpu(profile->avfsgb_fuse_table_ckson_b);
-
- param->ucEnableGbVdroopTableCkson =
- profile->enable_gb_vdroop_table_ckson;
- param->ucEnableGbFuseTableCkson =
- profile->enable_gb_fuse_table_ckson;
- param->usPsmAgeComfactor =
- le16_to_cpu(profile->psm_age_comfactor);
-
- param->ulDispclk2GfxclkM1 =
- le32_to_cpu(profile->dispclk2gfxclk_a);
- param->ulDispclk2GfxclkM2 =
- le32_to_cpu(profile->dispclk2gfxclk_b);
- param->ulDispclk2GfxclkB =
- le32_to_cpu(profile->dispclk2gfxclk_c);
- param->ulDcefclk2GfxclkM1 =
- le32_to_cpu(profile->dcefclk2gfxclk_a);
- param->ulDcefclk2GfxclkM2 =
- le32_to_cpu(profile->dcefclk2gfxclk_b);
- param->ulDcefclk2GfxclkB =
- le32_to_cpu(profile->dcefclk2gfxclk_c);
- param->ulPixelclk2GfxclkM1 =
- le32_to_cpu(profile->pixclk2gfxclk_a);
- param->ulPixelclk2GfxclkM2 =
- le32_to_cpu(profile->pixclk2gfxclk_b);
- param->ulPixelclk2GfxclkB =
- le32_to_cpu(profile->pixclk2gfxclk_c);
- param->ulPhyclk2GfxclkM1 =
- le32_to_cpu(profile->phyclk2gfxclk_a);
- param->ulPhyclk2GfxclkM2 =
- le32_to_cpu(profile->phyclk2gfxclk_b);
- param->ulPhyclk2GfxclkB =
- le32_to_cpu(profile->phyclk2gfxclk_c);
+ format_revision = ((struct atom_common_table_header *)profile)->format_revision;
+ content_revision = ((struct atom_common_table_header *)profile)->content_revision;
+
+ if (format_revision == 4 && content_revision == 1) {
+ param->ulMaxVddc = le32_to_cpu(profile->maxvddc);
+ param->ulMinVddc = le32_to_cpu(profile->minvddc);
+ param->ulMeanNsigmaAcontant0 =
+ le32_to_cpu(profile->avfs_meannsigma_acontant0);
+ param->ulMeanNsigmaAcontant1 =
+ le32_to_cpu(profile->avfs_meannsigma_acontant1);
+ param->ulMeanNsigmaAcontant2 =
+ le32_to_cpu(profile->avfs_meannsigma_acontant2);
+ param->usMeanNsigmaDcTolSigma =
+ le16_to_cpu(profile->avfs_meannsigma_dc_tol_sigma);
+ param->usMeanNsigmaPlatformMean =
+ le16_to_cpu(profile->avfs_meannsigma_platform_mean);
+ param->usMeanNsigmaPlatformSigma =
+ le16_to_cpu(profile->avfs_meannsigma_platform_sigma);
+ param->ulGbVdroopTableCksoffA0 =
+ le32_to_cpu(profile->gb_vdroop_table_cksoff_a0);
+ param->ulGbVdroopTableCksoffA1 =
+ le32_to_cpu(profile->gb_vdroop_table_cksoff_a1);
+ param->ulGbVdroopTableCksoffA2 =
+ le32_to_cpu(profile->gb_vdroop_table_cksoff_a2);
+ param->ulGbVdroopTableCksonA0 =
+ le32_to_cpu(profile->gb_vdroop_table_ckson_a0);
+ param->ulGbVdroopTableCksonA1 =
+ le32_to_cpu(profile->gb_vdroop_table_ckson_a1);
+ param->ulGbVdroopTableCksonA2 =
+ le32_to_cpu(profile->gb_vdroop_table_ckson_a2);
+ param->ulGbFuseTableCksoffM1 =
+ le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m1);
+ param->ulGbFuseTableCksoffM2 =
+ le32_to_cpu(profile->avfsgb_fuse_table_cksoff_m2);
+ param->ulGbFuseTableCksoffB =
+ le32_to_cpu(profile->avfsgb_fuse_table_cksoff_b);
+ param->ulGbFuseTableCksonM1 =
+ le32_to_cpu(profile->avfsgb_fuse_table_ckson_m1);
+ param->ulGbFuseTableCksonM2 =
+ le32_to_cpu(profile->avfsgb_fuse_table_ckson_m2);
+ param->ulGbFuseTableCksonB =
+ le32_to_cpu(profile->avfsgb_fuse_table_ckson_b);
+
+ param->ucEnableGbVdroopTableCkson =
+ profile->enable_gb_vdroop_table_ckson;
+ param->ucEnableGbFuseTableCkson =
+ profile->enable_gb_fuse_table_ckson;
+ param->usPsmAgeComfactor =
+ le16_to_cpu(profile->psm_age_comfactor);
+
+ param->ulDispclk2GfxclkM1 =
+ le32_to_cpu(profile->dispclk2gfxclk_a);
+ param->ulDispclk2GfxclkM2 =
+ le32_to_cpu(profile->dispclk2gfxclk_b);
+ param->ulDispclk2GfxclkB =
+ le32_to_cpu(profile->dispclk2gfxclk_c);
+ param->ulDcefclk2GfxclkM1 =
+ le32_to_cpu(profile->dcefclk2gfxclk_a);
+ param->ulDcefclk2GfxclkM2 =
+ le32_to_cpu(profile->dcefclk2gfxclk_b);
+ param->ulDcefclk2GfxclkB =
+ le32_to_cpu(profile->dcefclk2gfxclk_c);
+ param->ulPixelclk2GfxclkM1 =
+ le32_to_cpu(profile->pixclk2gfxclk_a);
+ param->ulPixelclk2GfxclkM2 =
+ le32_to_cpu(profile->pixclk2gfxclk_b);
+ param->ulPixelclk2GfxclkB =
+ le32_to_cpu(profile->pixclk2gfxclk_c);
+ param->ulPhyclk2GfxclkM1 =
+ le32_to_cpu(profile->phyclk2gfxclk_a);
+ param->ulPhyclk2GfxclkM2 =
+ le32_to_cpu(profile->phyclk2gfxclk_b);
+ param->ulPhyclk2GfxclkB =
+ le32_to_cpu(profile->phyclk2gfxclk_c);
+ param->ulAcgGbVdroopTableA0 = 0;
+ param->ulAcgGbVdroopTableA1 = 0;
+ param->ulAcgGbVdroopTableA2 = 0;
+ param->ulAcgGbFuseTableM1 = 0;
+ param->ulAcgGbFuseTableM2 = 0;
+ param->ulAcgGbFuseTableB = 0;
+ param->ucAcgEnableGbVdroopTable = 0;
+ param->ucAcgEnableGbFuseTable = 0;
+ } else if (format_revision == 4 && content_revision == 2) {
+ profile_v4_2 = (struct atom_asic_profiling_info_v4_2 *)profile;
+ param->ulMaxVddc = le32_to_cpu(profile_v4_2->maxvddc);
+ param->ulMinVddc = le32_to_cpu(profile_v4_2->minvddc);
+ param->ulMeanNsigmaAcontant0 =
+ le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant0);
+ param->ulMeanNsigmaAcontant1 =
+ le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant1);
+ param->ulMeanNsigmaAcontant2 =
+ le32_to_cpu(profile_v4_2->avfs_meannsigma_acontant2);
+ param->usMeanNsigmaDcTolSigma =
+ le16_to_cpu(profile_v4_2->avfs_meannsigma_dc_tol_sigma);
+ param->usMeanNsigmaPlatformMean =
+ le16_to_cpu(profile_v4_2->avfs_meannsigma_platform_mean);
+ param->usMeanNsigmaPlatformSigma =
+ le16_to_cpu(profile_v4_2->avfs_meannsigma_platform_sigma);
+ param->ulGbVdroopTableCksoffA0 =
+ le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a0);
+ param->ulGbVdroopTableCksoffA1 =
+ le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a1);
+ param->ulGbVdroopTableCksoffA2 =
+ le32_to_cpu(profile_v4_2->gb_vdroop_table_cksoff_a2);
+ param->ulGbVdroopTableCksonA0 =
+ le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a0);
+ param->ulGbVdroopTableCksonA1 =
+ le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a1);
+ param->ulGbVdroopTableCksonA2 =
+ le32_to_cpu(profile_v4_2->gb_vdroop_table_ckson_a2);
+ param->ulGbFuseTableCksoffM1 =
+ le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_m1);
+ param->ulGbFuseTableCksoffM2 =
+ le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_m2);
+ param->ulGbFuseTableCksoffB =
+ le32_to_cpu(profile_v4_2->avfsgb_fuse_table_cksoff_b);
+ param->ulGbFuseTableCksonM1 =
+ le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_m1);
+ param->ulGbFuseTableCksonM2 =
+ le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_m2);
+ param->ulGbFuseTableCksonB =
+ le32_to_cpu(profile_v4_2->avfsgb_fuse_table_ckson_b);
+
+ param->ucEnableGbVdroopTableCkson =
+ profile_v4_2->enable_gb_vdroop_table_ckson;
+ param->ucEnableGbFuseTableCkson =
+ profile_v4_2->enable_gb_fuse_table_ckson;
+ param->usPsmAgeComfactor =
+ le16_to_cpu(profile_v4_2->psm_age_comfactor);
+
+ param->ulDispclk2GfxclkM1 =
+ le32_to_cpu(profile_v4_2->dispclk2gfxclk_a);
+ param->ulDispclk2GfxclkM2 =
+ le32_to_cpu(profile_v4_2->dispclk2gfxclk_b);
+ param->ulDispclk2GfxclkB =
+ le32_to_cpu(profile_v4_2->dispclk2gfxclk_c);
+ param->ulDcefclk2GfxclkM1 =
+ le32_to_cpu(profile_v4_2->dcefclk2gfxclk_a);
+ param->ulDcefclk2GfxclkM2 =
+ le32_to_cpu(profile_v4_2->dcefclk2gfxclk_b);
+ param->ulDcefclk2GfxclkB =
+ le32_to_cpu(profile_v4_2->dcefclk2gfxclk_c);
+ param->ulPixelclk2GfxclkM1 =
+ le32_to_cpu(profile_v4_2->pixclk2gfxclk_a);
+ param->ulPixelclk2GfxclkM2 =
+ le32_to_cpu(profile_v4_2->pixclk2gfxclk_b);
+ param->ulPixelclk2GfxclkB =
+ le32_to_cpu(profile_v4_2->pixclk2gfxclk_c);
+ param->ulPhyclk2GfxclkM1 =
+ le32_to_cpu(profile->phyclk2gfxclk_a);
+ param->ulPhyclk2GfxclkM2 =
+ le32_to_cpu(profile_v4_2->phyclk2gfxclk_b);
+ param->ulPhyclk2GfxclkB =
+ le32_to_cpu(profile_v4_2->phyclk2gfxclk_c);
+ param->ulAcgGbVdroopTableA0 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a0);
+ param->ulAcgGbVdroopTableA1 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a1);
+ param->ulAcgGbVdroopTableA2 = le32_to_cpu(profile_v4_2->acg_gb_vdroop_table_a2);
+ param->ulAcgGbFuseTableM1 = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_m1);
+ param->ulAcgGbFuseTableM2 = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_m2);
+ param->ulAcgGbFuseTableB = le32_to_cpu(profile_v4_2->acg_avfsgb_fuse_table_b);
+ param->ucAcgEnableGbVdroopTable = le32_to_cpu(profile_v4_2->enable_acg_gb_vdroop_table);
+ param->ucAcgEnableGbFuseTable = le32_to_cpu(profile_v4_2->enable_acg_gb_fuse_table);
+ } else {
+ pr_info("Invalid VBIOS AVFS ProfilingInfo Revision!\n");
+ return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index 81908b5cfd5f..8e6b1f0ddebc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -109,6 +109,14 @@ struct pp_atomfwctrl_avfs_parameters {
uint32_t ulPhyclk2GfxclkM1;
uint32_t ulPhyclk2GfxclkM2;
uint32_t ulPhyclk2GfxclkB;
+ uint32_t ulAcgGbVdroopTableA0;
+ uint32_t ulAcgGbVdroopTableA1;
+ uint32_t ulAcgGbVdroopTableA2;
+ uint32_t ulAcgGbFuseTableM1;
+ uint32_t ulAcgGbFuseTableM2;
+ uint32_t ulAcgGbFuseTableB;
+ uint32_t ucAcgEnableGbVdroopTable;
+ uint32_t ucAcgEnableGbFuseTable;
};
struct pp_atomfwctrl_gpio_parameters {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
index 4c7f430b36eb..edc5fb6412d9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
@@ -265,6 +265,15 @@ static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input,
}
} */
+ if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) ||
+ ((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) {
+ rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100;
+ rv_data->dclk_soft_min = hwmgr->uvd_arbiter.dclk_soft_min / 100;
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetSoftMinVcn,
+ (rv_data->vclk_soft_min << 16) | rv_data->vclk_soft_min);
+ }
+
if((hwmgr->gfx_arbiter.sclk_hard_min != 0) &&
((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) {
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
index afb852295a15..2472b50e54cf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
@@ -280,6 +280,8 @@ struct rv_hwmgr {
uint32_t f_actual_hard_min_freq;
uint32_t fabric_actual_soft_min_freq;
+ uint32_t vclk_soft_min;
+ uint32_t dclk_soft_min;
uint32_t gfx_actual_soft_min_freq;
bool vcn_power_gated;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 1f01020ce3a9..c2743233ba10 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1962,9 +1962,6 @@ static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
break;
default:
- PP_ASSERT_WITH_CODE(0,
- "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
- );
break;
}
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
@@ -4630,6 +4627,15 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
{
+ struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+
+ if (smu_data == NULL)
+ return -EINVAL;
+
+ if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return 0;
+
if (enable) {
if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index d6f097f44b6c..9d71a259d97d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -78,6 +78,8 @@ uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
+static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, uint32_t mask);
const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
@@ -146,6 +148,19 @@ static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.vr1hot_enabled = 1;
data->registry_data.regulator_hot_gpio_support = 1;
+ data->registry_data.didt_support = 1;
+ if (data->registry_data.didt_support) {
+ data->registry_data.didt_mode = 6;
+ data->registry_data.sq_ramping_support = 1;
+ data->registry_data.db_ramping_support = 0;
+ data->registry_data.td_ramping_support = 0;
+ data->registry_data.tcp_ramping_support = 0;
+ data->registry_data.dbr_ramping_support = 0;
+ data->registry_data.edc_didt_support = 1;
+ data->registry_data.gc_didt_support = 0;
+ data->registry_data.psm_didt_support = 0;
+ }
+
data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
@@ -223,6 +238,8 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DiDtSupport);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DBRamping);
@@ -230,6 +247,34 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_TDRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DiDtEDCEnable);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_GCEDC);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PSM);
+
+ if (data->registry_data.didt_support) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
+ if (data->registry_data.sq_ramping_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
+ if (data->registry_data.db_ramping_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
+ if (data->registry_data.td_ramping_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
+ if (data->registry_data.tcp_ramping_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
+ if (data->registry_data.dbr_ramping_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
+ if (data->registry_data.edc_didt_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
+ if (data->registry_data.gc_didt_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
+ if (data->registry_data.psm_didt_support)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
+ }
if (data->registry_data.power_containment_support)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -321,8 +366,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
FEATURE_LED_DISPLAY_BIT;
data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
FEATURE_FAN_CONTROL_BIT;
- data->smu_features[GNLD_VOLTAGE_CONTROLLER].smu_feature_id =
- FEATURE_VOLTAGE_CONTROLLER_BIT;
+ data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
+ data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
if (!data->registry_data.prefetcher_dpm_key_disabled)
data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
@@ -386,6 +431,15 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (data->registry_data.vr0hot_enabled)
data->smu_features[GNLD_VR0HOT].supported = true;
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetSmuVersion);
+ vega10_read_arg_from_smc(hwmgr->smumgr, &(data->smu_version));
+ /* ACG firmware has major version 5 */
+ if ((data->smu_version & 0xff000000) == 0x5000000)
+ data->smu_features[GNLD_ACG].supported = true;
+
+ if (data->registry_data.didt_support)
+ data->smu_features[GNLD_DIDT].supported = true;
+
}
#ifdef PPLIB_VEGA10_EVV_SUPPORT
@@ -2128,15 +2182,9 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
pp_table->AvfsGbCksOff.m2_shift = 12;
pp_table->AvfsGbCksOff.b_shift = 0;
- for (i = 0; i < dep_table->count; i++) {
- if (dep_table->entries[i].sclk_offset == 0)
- pp_table->StaticVoltageOffsetVid[i] = 248;
- else
- pp_table->StaticVoltageOffsetVid[i] =
- (uint8_t)(dep_table->entries[i].sclk_offset *
- VOLTAGE_VID_OFFSET_SCALE2 /
- VOLTAGE_VID_OFFSET_SCALE1);
- }
+ for (i = 0; i < dep_table->count; i++)
+ pp_table->StaticVoltageOffsetVid[i] =
+ convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
data->disp_clk_quad_eqn_a) &&
@@ -2228,6 +2276,21 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
+
+ pp_table->AcgBtcGbVdroopTable.a0 = avfs_params.ulAcgGbVdroopTableA0;
+ pp_table->AcgBtcGbVdroopTable.a0_shift = 20;
+ pp_table->AcgBtcGbVdroopTable.a1 = avfs_params.ulAcgGbVdroopTableA1;
+ pp_table->AcgBtcGbVdroopTable.a1_shift = 20;
+ pp_table->AcgBtcGbVdroopTable.a2 = avfs_params.ulAcgGbVdroopTableA2;
+ pp_table->AcgBtcGbVdroopTable.a2_shift = 20;
+
+ pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1;
+ pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2;
+ pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB;
+ pp_table->AcgAvfsGb.m1_shift = 0;
+ pp_table->AcgAvfsGb.m2_shift = 0;
+ pp_table->AcgAvfsGb.b_shift = 0;
+
} else {
data->smu_features[GNLD_AVFS].supported = false;
}
@@ -2236,6 +2299,55 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
return 0;
}
+static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data =
+ (struct vega10_hwmgr *)(hwmgr->backend);
+ uint32_t agc_btc_response;
+
+ if (data->smu_features[GNLD_ACG].supported) {
+ if (0 == vega10_enable_smc_features(hwmgr->smumgr, true,
+ data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
+ data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
+
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg);
+
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc);
+ vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response);
+
+ if (1 == agc_btc_response) {
+ if (1 == data->acg_loop_state)
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInClosedLoop);
+ else if (2 == data->acg_loop_state)
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInOpenLoop);
+ if (0 == vega10_enable_smc_features(hwmgr->smumgr, true,
+ data->smu_features[GNLD_ACG].smu_feature_bitmap))
+ data->smu_features[GNLD_ACG].enabled = true;
+ } else {
+ pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
+ data->smu_features[GNLD_ACG].enabled = false;
+ }
+ }
+
+ return 0;
+}
+
+static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data =
+ (struct vega10_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_ACG].supported) {
+ if (data->smu_features[GNLD_ACG].enabled) {
+ if (0 == vega10_enable_smc_features(hwmgr->smumgr, false,
+ data->smu_features[GNLD_ACG].smu_feature_bitmap))
+ data->smu_features[GNLD_ACG].enabled = false;
+ }
+ }
+
+ return 0;
+}
+
static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data =
@@ -2410,6 +2522,9 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
pp_table->DisplayDpmVoltageMode =
(uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
+ data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable;
+ data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable;
+
if (data->registry_data.ulv_support &&
table_info->us_ulv_voltage_offset) {
result = vega10_populate_ulv_state(hwmgr);
@@ -2506,7 +2621,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
result = vega10_avfs_enable(hwmgr, true);
PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
return result);
-
+ vega10_acg_enable(hwmgr);
vega10_save_default_power_profile(hwmgr);
return 0;
@@ -2838,6 +2953,11 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to start DPM!", result = tmp_result);
+ /* enable didt, do not abort if failed didt */
+ tmp_result = vega10_enable_didt_config(hwmgr);
+ PP_ASSERT(!tmp_result,
+ "Failed to enable didt config!");
+
tmp_result = vega10_enable_power_containment(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to enable power containment!",
@@ -3584,10 +3704,22 @@ static void vega10_apply_dal_minimum_voltage_request(
return;
}
+static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk;
+ struct phm_ppt_v2_information *table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
+
+ vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk;
+
+ return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1;
+}
+
static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
+ uint32_t socclk_idx;
vega10_apply_dal_minimum_voltage_request(hwmgr);
@@ -3608,13 +3740,22 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
if (!data->registry_data.mclk_dpm_key_disabled) {
if (data->smc_state_table.mem_boot_level !=
data->dpm_table.mem_table.dpm_state.soft_min_level) {
+ if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
+ socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_SetSoftMinUclkByIndex,
- data->smc_state_table.mem_boot_level),
- "Failed to set soft min mclk index!",
- return -EINVAL);
-
+ hwmgr->smumgr,
+ PPSMC_MSG_SetSoftMinSocclkByIndex,
+ socclk_idx),
+ "Failed to set soft min uclk index!",
+ return -EINVAL);
+ } else {
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr,
+ PPSMC_MSG_SetSoftMinUclkByIndex,
+ data->smc_state_table.mem_boot_level),
+ "Failed to set soft min uclk index!",
+ return -EINVAL);
+ }
data->dpm_table.mem_table.dpm_state.soft_min_level =
data->smc_state_table.mem_boot_level;
}
@@ -4021,7 +4162,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
}
} else {
- pr_info("Cannot find requested DCEFCLK!");
+ pr_debug("Cannot find requested DCEFCLK!");
}
if (min_clocks.memoryClock != 0) {
@@ -4103,34 +4244,30 @@ static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
return 0;
}
-static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
- enum amd_dpm_forced_level level)
+static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
{
- int ret = 0;
+ struct phm_ppt_v2_information *table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = vega10_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = vega10_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_AUTO:
- ret = vega10_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- break;
- default:
- break;
+ if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
+ table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL &&
+ table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
+ *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
+ *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
+ *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
}
- hwmgr->dpm_level = level;
-
- return ret;
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ *sclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
+ *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
+ *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
+ }
+ return 0;
}
static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
@@ -4157,6 +4294,86 @@ static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
return result;
}
+static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
+ enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ uint32_t sclk_mask = 0;
+ uint32_t mclk_mask = 0;
+ uint32_t soc_mask = 0;
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+ if (level == hwmgr->dpm_level)
+ return ret;
+
+ if (!(hwmgr->dpm_level & profile_mode_mask)) {
+ /* enter profile mode, save current level, disable gfx cg*/
+ if (level & profile_mode_mask) {
+ hwmgr->saved_dpm_level = hwmgr->dpm_level;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+ }
+ } else {
+ /* exit profile mode, restore level, enable gfx cg*/
+ if (!(level & profile_mode_mask)) {
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
+ level = hwmgr->saved_dpm_level;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ }
+ }
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = vega10_force_dpm_highest(hwmgr);
+ if (ret)
+ return ret;
+ hwmgr->dpm_level = level;
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = vega10_force_dpm_lowest(hwmgr);
+ if (ret)
+ return ret;
+ hwmgr->dpm_level = level;
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ ret = vega10_unforce_dpm_levels(hwmgr);
+ if (ret)
+ return ret;
+ hwmgr->dpm_level = level;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
+ if (ret)
+ return ret;
+ hwmgr->dpm_level = level;
+ vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
+ vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ hwmgr->dpm_level = level;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
+ else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
+
+ return 0;
+}
+
static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
@@ -4402,7 +4619,9 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
int i;
- if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
+ AMD_DPM_FORCED_LEVEL_LOW |
+ AMD_DPM_FORCED_LEVEL_HIGH))
return -EINVAL;
switch (type) {
@@ -4667,6 +4886,10 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable power containment!", result = tmp_result);
+ tmp_result = vega10_disable_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable didt config!", result = tmp_result);
+
tmp_result = vega10_avfs_enable(hwmgr, false);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable AVFS!", result = tmp_result);
@@ -4683,6 +4906,9 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable ulv!", result = tmp_result);
+ tmp_result = vega10_acg_disable(hwmgr);
+ PP_ASSERT_WITH_CODE((tmp_result == 0),
+ "Failed to disable acg!", result = tmp_result);
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index 6e5c5b99593b..676cd7735883 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -64,7 +64,9 @@ enum {
GNLD_FW_CTF,
GNLD_LED_DISPLAY,
GNLD_FAN_CONTROL,
- GNLD_VOLTAGE_CONTROLLER,
+ GNLD_FEATURE_FAST_PPT_BIT,
+ GNLD_DIDT,
+ GNLD_ACG,
GNLD_FEATURES_MAX
};
@@ -230,7 +232,9 @@ struct vega10_registry_data {
uint8_t cac_support;
uint8_t clock_stretcher_support;
uint8_t db_ramping_support;
+ uint8_t didt_mode;
uint8_t didt_support;
+ uint8_t edc_didt_support;
uint8_t dynamic_state_patching_support;
uint8_t enable_pkg_pwr_tracking_feature;
uint8_t enable_tdc_limit_feature;
@@ -263,6 +267,9 @@ struct vega10_registry_data {
uint8_t tcp_ramping_support;
uint8_t tdc_support;
uint8_t td_ramping_support;
+ uint8_t dbr_ramping_support;
+ uint8_t gc_didt_support;
+ uint8_t psm_didt_support;
uint8_t thermal_out_gpio_support;
uint8_t thermal_support;
uint8_t fw_ctf_enabled;
@@ -381,6 +388,8 @@ struct vega10_hwmgr {
struct vega10_smc_state_table smc_state_table;
uint32_t config_telemetry;
+ uint32_t smu_version;
+ uint32_t acg_loop_state;
};
#define VEGA10_DPM2_NEAR_TDP_DEC 10
@@ -425,6 +434,10 @@ struct vega10_hwmgr {
#define PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
#define PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */
+#define VEGA10_UMD_PSTATE_GFXCLK_LEVEL 0x3
+#define VEGA10_UMD_PSTATE_SOCCLK_LEVEL 0x3
+#define VEGA10_UMD_PSTATE_MCLK_LEVEL 0x2
+
extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
extern int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 3f72268e99bb..e7fa67063cdc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -26,7 +26,1298 @@
#include "vega10_powertune.h"
#include "vega10_smumgr.h"
#include "vega10_ppsmc.h"
+#include "vega10_inc.h"
#include "pp_debug.h"
+#include "pp_soc15.h"
+
+static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853 },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153 },
+
+ /* DIDT_TD */
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde },
+ { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde },
+
+ /* DIDT_TCP */
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde },
+ { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde },
+
+ /* DIDT_DB */
+ { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde },
+ { ixDIDT_DB_TUNING_CTRL, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_DB_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEDiDtCtrl3Config_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /*DIDT_SQ_CTRL3 */
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_SQ_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__THROTTLE_POLICY_MASK, DIDT_SQ_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_SQ_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_SQ_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_SQ_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_SEL_MASK, DIDT_SQ_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_SQ_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL3, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_SQ_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
+
+ /*DIDT_TCP_CTRL3 */
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TCP_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__THROTTLE_POLICY_MASK, DIDT_TCP_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TCP_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TCP_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TCP_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TCP_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TCP_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TCP_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL3, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TCP_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
+
+ /*DIDT_TD_CTRL3 */
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_TD_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__THROTTLE_POLICY_MASK, DIDT_TD_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_TD_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_TD_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_TD_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_SEL_MASK, DIDT_TD_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_TD_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL3, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_TD_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
+
+ /*DIDT_DB_CTRL3 */
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_ENABLE_MASK, DIDT_DB_CTRL3__GC_DIDT_ENABLE__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL3__GC_DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__THROTTLE_POLICY_MASK, DIDT_DB_CTRL3__THROTTLE_POLICY__SHIFT, 0x0003 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT_MASK, DIDT_DB_CTRL3__DIDT_POWER_LEVEL_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS_MASK, DIDT_DB_CTRL3__DIDT_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0003 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__GC_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN_MASK, DIDT_DB_CTRL3__SE_DIDT_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__QUALIFY_STALL_EN_MASK, DIDT_DB_CTRL3__QUALIFY_STALL_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_SEL_MASK, DIDT_DB_CTRL3__DIDT_STALL_SEL__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_FORCE_STALL_MASK, DIDT_DB_CTRL3__DIDT_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL3, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN_MASK, DIDT_DB_CTRL3__DIDT_STALL_DELAY_EN__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEDiDtCtrl2Config_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853 },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000 },
+
+ /* DIDT_TD */
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
+ { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
+
+ /* DIDT_TCP */
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
+ { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
+
+ /* DIDT_DB */
+ { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__MAX_POWER_DELTA_MASK, DIDT_DB_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde },
+ { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_DB_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x00c0 },
+ { ixDIDT_DB_CTRL2, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_DB_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0001 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEDiDtCtrl1Config_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff },
+ /* DIDT_TD */
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff },
+ /* DIDT_TCP */
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff },
+ /* DIDT_DB */
+ { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MIN_POWER_MASK, DIDT_DB_CTRL1__MIN_POWER__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL1, DIDT_DB_CTRL1__MAX_POWER_MASK, DIDT_DB_CTRL1__MAX_POWER__SHIFT, 0xffff },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+
+static const struct vega10_didt_config_reg SEDiDtWeightConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B363B1A },
+ { ixDIDT_SQ_WEIGHT4_7, 0xFFFFFFFF, 0, 0x270B2432 },
+ { ixDIDT_SQ_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000018 },
+
+ /* DIDT_TD */
+ { ixDIDT_TD_WEIGHT0_3, 0xFFFFFFFF, 0, 0x2B1D220F },
+ { ixDIDT_TD_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00007558 },
+ { ixDIDT_TD_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 },
+
+ /* DIDT_TCP */
+ { ixDIDT_TCP_WEIGHT0_3, 0xFFFFFFFF, 0, 0x5ACE160D },
+ { ixDIDT_TCP_WEIGHT4_7, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000000 },
+
+ /* DIDT_DB */
+ { ixDIDT_DB_WEIGHT0_3, 0xFFFFFFFF, 0, 0x0E152A0F },
+ { ixDIDT_DB_WEIGHT4_7, 0xFFFFFFFF, 0, 0x09061813 },
+ { ixDIDT_DB_WEIGHT8_11, 0xFFFFFFFF, 0, 0x00000013 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEDiDtCtrl0Config_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_SQ_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_SQ_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
+ /* DIDT_TD */
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TD_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TD_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
+ /* DIDT_TCP */
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_TCP_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
+ { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_TCP_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
+ /* DIDT_DB */
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__PHASE_OFFSET_MASK, DIDT_DB_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CTRL_RST_MASK, DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN_MASK, DIDT_DB_CTRL0__DIDT_TUNING_CTRL_EN__SHIFT, 0x0001 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_AUTO_RELEASE_EN__SHIFT, 0x0001 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_DB_CTRL0__DIDT_HI_POWER_THRESHOLD__SHIFT, 0xffff },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN_MASK, DIDT_DB_CTRL0__DIDT_AUTO_MPD_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_EN__SHIFT, 0x0000 },
+ { ixDIDT_DB_CTRL0, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR_MASK, DIDT_DB_CTRL0__DIDT_STALL_EVENT_COUNTER_CLEAR__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+
+static const struct vega10_didt_config_reg SEDiDtStallCtrlConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ */
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
+
+ /* DIDT_TD */
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
+ { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
+
+ /* DIDT_TCP */
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001 },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001 },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
+ { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
+
+ /* DIDT_DB */
+ { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0004 },
+ { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0004 },
+ { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x000a },
+ { ixDIDT_DB_STALL_CTRL, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_DB_STALL_CTRL__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x000a },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEDiDtStallPatternConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* DIDT_SQ_STALL_PATTERN_1_2 */
+ { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
+ { ixDIDT_SQ_STALL_PATTERN_1_2, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_SQ_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
+
+ /* DIDT_SQ_STALL_PATTERN_3_4 */
+ { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
+ { ixDIDT_SQ_STALL_PATTERN_3_4, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_SQ_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
+
+ /* DIDT_SQ_STALL_PATTERN_5_6 */
+ { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
+ { ixDIDT_SQ_STALL_PATTERN_5_6, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_SQ_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
+
+ /* DIDT_SQ_STALL_PATTERN_7 */
+ { ixDIDT_SQ_STALL_PATTERN_7, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_SQ_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
+
+ /* DIDT_TCP_STALL_PATTERN_1_2 */
+ { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
+ { ixDIDT_TCP_STALL_PATTERN_1_2, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TCP_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
+
+ /* DIDT_TCP_STALL_PATTERN_3_4 */
+ { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
+ { ixDIDT_TCP_STALL_PATTERN_3_4, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TCP_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
+
+ /* DIDT_TCP_STALL_PATTERN_5_6 */
+ { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
+ { ixDIDT_TCP_STALL_PATTERN_5_6, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TCP_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
+
+ /* DIDT_TCP_STALL_PATTERN_7 */
+ { ixDIDT_TCP_STALL_PATTERN_7, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TCP_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
+
+ /* DIDT_TD_STALL_PATTERN_1_2 */
+ { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
+ { ixDIDT_TD_STALL_PATTERN_1_2, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_TD_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
+
+ /* DIDT_TD_STALL_PATTERN_3_4 */
+ { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
+ { ixDIDT_TD_STALL_PATTERN_3_4, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_TD_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
+
+ /* DIDT_TD_STALL_PATTERN_5_6 */
+ { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
+ { ixDIDT_TD_STALL_PATTERN_5_6, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_TD_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
+
+ /* DIDT_TD_STALL_PATTERN_7 */
+ { ixDIDT_TD_STALL_PATTERN_7, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_TD_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
+
+ /* DIDT_DB_STALL_PATTERN_1_2 */
+ { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_1__SHIFT, 0x0001 },
+ { ixDIDT_DB_STALL_PATTERN_1_2, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2_MASK, DIDT_DB_STALL_PATTERN_1_2__DIDT_STALL_PATTERN_2__SHIFT, 0x0001 },
+
+ /* DIDT_DB_STALL_PATTERN_3_4 */
+ { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_3__SHIFT, 0x0001 },
+ { ixDIDT_DB_STALL_PATTERN_3_4, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4_MASK, DIDT_DB_STALL_PATTERN_3_4__DIDT_STALL_PATTERN_4__SHIFT, 0x0001 },
+
+ /* DIDT_DB_STALL_PATTERN_5_6 */
+ { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_5__SHIFT, 0x0000 },
+ { ixDIDT_DB_STALL_PATTERN_5_6, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6_MASK, DIDT_DB_STALL_PATTERN_5_6__DIDT_STALL_PATTERN_6__SHIFT, 0x0000 },
+
+ /* DIDT_DB_STALL_PATTERN_7 */
+ { ixDIDT_DB_STALL_PATTERN_7, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7_MASK, DIDT_DB_STALL_PATTERN_7__DIDT_STALL_PATTERN_7__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SELCacConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860021 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060021 },
+ /* TD */
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x028E0020 },
+ /* TCP */
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x001c0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x009c0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x011c0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x019c0020 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x021c0020 },
+ /* DB */
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00200008 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x00820008 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01020008 },
+ { ixSE_CAC_CNTL, 0xFFFFFFFF, 0, 0x01820008 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+
+static const struct vega10_didt_config_reg SEEDCStallPatternConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00030001 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x000F0007 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x003F001F },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x0000007F },
+ /* TD */
+ { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
+ /* TCP */
+ { ixDIDT_TCP_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
+ /* DB */
+ { ixDIDT_DB_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_DB_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_DB_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_DB_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCForceStallPatternConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
+ /* TD */
+ { ixDIDT_TD_EDC_STALL_PATTERN_1_2, 0xFFFFFFFF, 0, 0x00000015 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_3_4, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_5_6, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_PATTERN_7, 0xFFFFFFFF, 0, 0x00000000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCStallDelayConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_3, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_4, 0xFFFFFFFF, 0, 0x00000000 },
+ /* TD */
+ { ixDIDT_TD_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_DELAY_3, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TD_EDC_STALL_DELAY_4, 0xFFFFFFFF, 0, 0x00000000 },
+ /* TCP */
+ { ixDIDT_TCP_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_DELAY_2, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_DELAY_3, 0xFFFFFFFF, 0, 0x00000000 },
+ { ixDIDT_TCP_EDC_STALL_DELAY_4, 0xFFFFFFFF, 0, 0x00000000 },
+ /* DB */
+ { ixDIDT_DB_EDC_STALL_DELAY_1, 0xFFFFFFFF, 0, 0x00000000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCThresholdConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { ixDIDT_SQ_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0x0000010E },
+ { ixDIDT_TD_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
+ { ixDIDT_TCP_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
+ { ixDIDT_DB_EDC_THRESHOLD, 0xFFFFFFFF, 0, 0xFFFFFFFF },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCCtrlResetConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCCtrlConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0004 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0006 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ */
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000C },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
+
+ /* TD */
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK, DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_TD_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0001 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_TD_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_TD_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_TD_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_TD_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg GCDiDtDroopCtrlConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_EN__SHIFT, 0x0000 },
+ { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_THRESHOLD__SHIFT, 0x0000 },
+ { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_INDEX__SHIFT, 0x0000 },
+ { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL_MASK, GC_DIDT_DROOP_CTRL__DIDT_LEVEL_SEL__SHIFT, 0x0000 },
+ { mmGC_DIDT_DROOP_CTRL, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW_MASK, GC_DIDT_DROOP_CTRL__DIDT_DROOP_LEVEL_OVERFLOW__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg GCDiDtCtrl0Config_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CTRL_EN_MASK, GC_DIDT_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0000 },
+ { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__PHASE_OFFSET_MASK, GC_DIDT_CTRL0__PHASE_OFFSET__SHIFT, 0x0000 },
+ { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_SW_RST_MASK, GC_DIDT_CTRL0__DIDT_SW_RST__SHIFT, 0x0000 },
+ { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, GC_DIDT_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { mmGC_DIDT_CTRL0, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT_MASK, GC_DIDT_CTRL0__DIDT_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { 0xFFFFFFFF } /* End of list */
+};
+
+
+static const struct vega10_didt_config_reg PSMSEEDCStallPatternConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ EDC STALL PATTERNs */
+ { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_1__SHIFT, 0x0101 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_1_2, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2_MASK, DIDT_SQ_EDC_STALL_PATTERN_1_2__EDC_STALL_PATTERN_2__SHIFT, 0x0101 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_3__SHIFT, 0x1111 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_3_4, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4_MASK, DIDT_SQ_EDC_STALL_PATTERN_3_4__EDC_STALL_PATTERN_4__SHIFT, 0x1111 },
+
+ { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_5__SHIFT, 0x1515 },
+ { ixDIDT_SQ_EDC_STALL_PATTERN_5_6, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6_MASK, DIDT_SQ_EDC_STALL_PATTERN_5_6__EDC_STALL_PATTERN_6__SHIFT, 0x1515 },
+
+ { ixDIDT_SQ_EDC_STALL_PATTERN_7, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7_MASK, DIDT_SQ_EDC_STALL_PATTERN_7__EDC_STALL_PATTERN_7__SHIFT, 0x5555 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMSEEDCStallDelayConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ EDC STALL DELAYs */
+ { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ0__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ1__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ2__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_1, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3_MASK, DIDT_SQ_EDC_STALL_DELAY_1__EDC_STALL_DELAY_SQ3__SHIFT, 0x0000 },
+
+ { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ4__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ5_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ5__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ6_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ6__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_2, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ7_MASK, DIDT_SQ_EDC_STALL_DELAY_2__EDC_STALL_DELAY_SQ7__SHIFT, 0x0000 },
+
+ { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ8_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ8__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ9_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ9__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ10_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ10__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_3, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ11_MASK, DIDT_SQ_EDC_STALL_DELAY_3__EDC_STALL_DELAY_SQ11__SHIFT, 0x0000 },
+
+ { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ12_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ12__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ12_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ13__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ14_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ14__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_STALL_DELAY_4, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ15_MASK, DIDT_SQ_EDC_STALL_DELAY_4__EDC_STALL_DELAY_SQ15__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMSEEDCThresholdConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ EDC THRESHOLD */
+ { ixDIDT_SQ_EDC_THRESHOLD, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMSEEDCCtrlResetConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ EDC CTRL */
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMSEEDCCtrlConfig_Vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ /* SQ EDC CTRL */
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, DIDT_SQ_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS_MASK, DIDT_SQ_EDC_CTRL__EDC_STALL_PATTERN_BIT_NUMS__SHIFT, 0x000E },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, DIDT_SQ_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_STALL_POLICY__SHIFT, 0x0003 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__GC_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 },
+ { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMGCEDCThresholdConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_EDC_THRESHOLD, GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK, GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMGCEDCDroopCtrlConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_EN__SHIFT, 0x0001 },
+ { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_THRESHOLD__SHIFT, 0x0384 },
+ { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX_MASK, GC_EDC_DROOP_CTRL__EDC_DROOP_LEVEL_INDEX__SHIFT, 0x0001 },
+ { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__AVG_PSM_SEL_MASK, GC_EDC_DROOP_CTRL__AVG_PSM_SEL__SHIFT, 0x0001 },
+ { mmGC_EDC_DROOP_CTRL, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL_MASK, GC_EDC_DROOP_CTRL__EDC_LEVEL_SEL__SHIFT, 0x0001 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMGCEDCCtrlResetConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0001 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg PSMGCEDCCtrlConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_EN_MASK, GC_EDC_CTRL__EDC_EN__SHIFT, 0x0001 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_SW_RST_MASK, GC_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, GC_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_FORCE_STALL_MASK, GC_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT_MASK, GC_EDC_CTRL__EDC_TRIGGER_THROTTLE_LOWBIT__SHIFT, 0x0000 },
+ { mmGC_EDC_CTRL, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA_MASK, GC_EDC_CTRL__EDC_ALLOW_WRITE_PWRDELTA__SHIFT, 0x0000 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg AvfsPSMResetConfig_vega10[]=
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { 0x16A02, 0xFFFFFFFF, 0x0, 0x0000005F },
+ { 0x16A05, 0xFFFFFFFF, 0x0, 0x00000001 },
+ { 0x16A06, 0x00000001, 0x0, 0x02000000 },
+ { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static const struct vega10_didt_config_reg AvfsPSMInitConfig_vega10[] =
+{
+/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ * Offset Mask Shift Value
+ * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ */
+ { 0x16A05, 0xFFFFFFFF, 0x18, 0x00000001 },
+ { 0x16A05, 0xFFFFFFFF, 0x8, 0x00000003 },
+ { 0x16A05, 0xFFFFFFFF, 0xa, 0x00000006 },
+ { 0x16A05, 0xFFFFFFFF, 0x7, 0x00000000 },
+ { 0x16A06, 0xFFFFFFFF, 0x18, 0x00000001 },
+ { 0x16A06, 0xFFFFFFFF, 0x19, 0x00000001 },
+ { 0x16A01, 0xFFFFFFFF, 0x0, 0x00003027 },
+
+ { 0xFFFFFFFF } /* End of list */
+};
+
+static int vega10_program_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega10_didt_config_reg *config_regs, enum vega10_didt_config_reg_type reg_type)
+{
+ uint32_t data;
+
+ PP_ASSERT_WITH_CODE((config_regs != NULL), "[vega10_program_didt_config_registers] Invalid config register table!", return -EINVAL);
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ switch (reg_type) {
+ case VEGA10_CONFIGREG_DIDT:
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
+ break;
+ case VEGA10_CONFIGREG_GCCAC:
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
+ break;
+ case VEGA10_CONFIGREG_SECAC:
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset);
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG_SE_CAC, config_regs->offset, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ config_regs++;
+ }
+
+ return 0;
+}
+
+static int vega10_program_gc_didt_config_registers(struct pp_hwmgr *hwmgr, const struct vega10_didt_config_reg *config_regs)
+{
+ uint32_t data;
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ data = cgs_read_register(hwmgr->device, config_regs->offset);
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ cgs_write_register(hwmgr->device, config_regs->offset, data);
+ config_regs++;
+ }
+
+ return 0;
+}
+
+static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
+{
+ uint32_t data;
+ int result;
+ uint32_t en = (enable ? 1 : 0);
+ uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
+ data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
+ didt_block_info &= ~SQ_Enable_MASK;
+ didt_block_info |= en << SQ_Enable_SHIFT;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
+ data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
+ didt_block_info &= ~DB_Enable_MASK;
+ didt_block_info |= en << DB_Enable_SHIFT;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
+ data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
+ didt_block_info &= ~TD_Enable_MASK;
+ didt_block_info |= en << TD_Enable_SHIFT;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
+ data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
+ didt_block_info &= ~TCP_Enable_MASK;
+ didt_block_info |= en << TCP_Enable_SHIFT;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0);
+ data &= ~DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK;
+ data |= ((en << DIDT_DBR_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0, data);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable)) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL);
+ data &= ~DIDT_SQ_EDC_CTRL__EDC_EN_MASK;
+ data |= ((en << DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_EN_MASK);
+ data &= ~DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK;
+ data |= ((~en << DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL);
+ data &= ~DIDT_DB_EDC_CTRL__EDC_EN_MASK;
+ data |= ((en << DIDT_DB_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DB_EDC_CTRL__EDC_EN_MASK);
+ data &= ~DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK;
+ data |= ((~en << DIDT_DB_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL);
+ data &= ~DIDT_TD_EDC_CTRL__EDC_EN_MASK;
+ data |= ((en << DIDT_TD_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TD_EDC_CTRL__EDC_EN_MASK);
+ data &= ~DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK;
+ data |= ((~en << DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL);
+ data &= ~DIDT_TCP_EDC_CTRL__EDC_EN_MASK;
+ data |= ((en << DIDT_TCP_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_EN_MASK);
+ data &= ~DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK;
+ data |= ((~en << DIDT_TCP_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) {
+ data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL);
+ data &= ~DIDT_DBR_EDC_CTRL__EDC_EN_MASK;
+ data |= ((en << DIDT_DBR_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_EN_MASK);
+ data &= ~DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK;
+ data |= ((~en << DIDT_DBR_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data);
+ }
+ }
+
+ if (enable) {
+ /* For Vega10, SMC does not support any mask yet. */
+ result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
+ PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!");
+ }
+}
+
+static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ uint32_t num_se = 0, count, data;
+ struct cgs_system_info sys_info = {0};
+ uint32_t reg;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+ if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
+ num_se = sys_info.value;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ cgs_lock_grbm_idx(hwmgr->device, true);
+ reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ for (count = 0; count < num_se; count++) {
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ cgs_write_register(hwmgr->device, reg, data);
+
+ result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl1Config_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl2Config_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtTuningCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SELCacConfig_Vega10, VEGA10_CONFIGREG_SECAC);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega10, VEGA10_CONFIGREG_DIDT);
+
+ if (0 != result)
+ break;
+ }
+ cgs_write_register(hwmgr->device, reg, 0xE0000000);
+ cgs_lock_grbm_idx(hwmgr->device, false);
+
+ vega10_didt_set_mask(hwmgr, true);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ return 0;
+}
+
+static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
+{
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ return 0;
+}
+
+static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ uint32_t num_se = 0, count, data;
+ struct cgs_system_info sys_info = {0};
+ uint32_t reg;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+ if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
+ num_se = sys_info.value;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ cgs_lock_grbm_idx(hwmgr->device, true);
+ reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ for (count = 0; count < num_se; count++) {
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ cgs_write_register(hwmgr->device, reg, data);
+
+ result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl3Config_vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEDiDtCtrl0Config_Vega10, VEGA10_CONFIGREG_DIDT);
+ if (0 != result)
+ break;
+ }
+ cgs_write_register(hwmgr->device, reg, 0xE0000000);
+ cgs_lock_grbm_idx(hwmgr->device, false);
+
+ vega10_didt_set_mask(hwmgr, true);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC))
+ vega10_program_gc_didt_config_registers(hwmgr, GCDiDtCtrl0Config_vega10);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10);
+
+ return 0;
+}
+
+static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
+{
+ uint32_t data;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+ data = 0x00000000;
+ cgs_write_register(hwmgr->device, mmGC_DIDT_CTRL0, data);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
+
+ return 0;
+}
+
+static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ uint32_t num_se = 0, count, data;
+ struct cgs_system_info sys_info = {0};
+ uint32_t reg;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+ if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
+ num_se = sys_info.value;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ cgs_lock_grbm_idx(hwmgr->device, true);
+ reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ for (count = 0; count < num_se; count++) {
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ cgs_write_register(hwmgr->device, reg, data);
+ result = vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEEDCThresholdConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+
+ if (0 != result)
+ break;
+ }
+ cgs_write_register(hwmgr->device, reg, 0xE0000000);
+ cgs_lock_grbm_idx(hwmgr->device, false);
+
+ vega10_didt_set_mask(hwmgr, true);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ return 0;
+}
+
+static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
+{
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ return 0;
+}
+
+static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ uint32_t num_se = 0;
+ uint32_t count, data;
+ struct cgs_system_info sys_info = {0};
+ uint32_t reg;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
+ if (cgs_query_system_info(hwmgr->device, &sys_info) == 0)
+ num_se = sys_info.value;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
+
+ cgs_lock_grbm_idx(hwmgr->device, true);
+ reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ for (count = 0; count < num_se; count++) {
+ data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT);
+ cgs_write_register(hwmgr->device, reg, data);
+ result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+
+ if (0 != result)
+ break;
+ }
+ cgs_write_register(hwmgr->device, reg, 0xE0000000);
+ cgs_lock_grbm_idx(hwmgr->device, false);
+
+ vega10_didt_set_mask(hwmgr, true);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+ vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlResetConfig_vega10);
+ vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlConfig_vega10);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10);
+
+ return 0;
+}
+
+static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
+{
+ uint32_t data;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ vega10_didt_set_mask(hwmgr, false);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+ data = 0x00000000;
+ cgs_write_register(hwmgr->device, mmGC_EDC_CTRL, data);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
+
+ return 0;
+}
+
+static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reg;
+ int result;
+
+ cgs_enter_safe_mode(hwmgr->device, true);
+
+ cgs_lock_grbm_idx(hwmgr->device, true);
+ reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX);
+ cgs_write_register(hwmgr->device, reg, 0xE0000000);
+ cgs_lock_grbm_idx(hwmgr->device, false);
+
+ result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT);
+ if (0 != result)
+ return result;
+
+ vega10_didt_set_mask(hwmgr, false);
+
+ cgs_enter_safe_mode(hwmgr->device, false);
+
+ return 0;
+}
+
+static int vega10_disable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
+{
+ int result;
+
+ result = vega10_disable_se_edc_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDtConfig] Pre DIDT disable clock gating failed!", return result);
+
+ return 0;
+}
+
+int vega10_enable_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+ struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_DIDT].supported) {
+ if (data->smu_features[GNLD_DIDT].enabled)
+ PP_DBG_LOG("[EnableDiDtConfig] Feature DiDt Already enabled!\n");
+
+ switch (data->registry_data.didt_mode) {
+ case 0:
+ result = vega10_enable_cac_driving_se_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 0 Failed!", return result);
+ break;
+ case 2:
+ result = vega10_enable_psm_gc_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 2 Failed!", return result);
+ break;
+ case 3:
+ result = vega10_enable_se_edc_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 3 Failed!", return result);
+ break;
+ case 1:
+ case 4:
+ case 5:
+ result = vega10_enable_psm_gc_edc_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 5 Failed!", return result);
+ break;
+ case 6:
+ result = vega10_enable_se_edc_force_stall_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[EnableDiDt] Attempt to enable DiDt Mode 6 Failed!", return result);
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+
+ if (0 == result) {
+ PP_ASSERT_WITH_CODE((!vega10_enable_smc_features(hwmgr->smumgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
+ "[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result);
+ data->smu_features[GNLD_DIDT].enabled = true;
+ }
+ }
+
+ return result;
+}
+
+int vega10_disable_didt_config(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+ struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_DIDT].supported) {
+ if (!data->smu_features[GNLD_DIDT].enabled)
+ PP_DBG_LOG("[DisableDiDtConfig] Feature DiDt Already Disabled!\n");
+
+ switch (data->registry_data.didt_mode) {
+ case 0:
+ result = vega10_disable_cac_driving_se_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 0 Failed!", return result);
+ break;
+ case 2:
+ result = vega10_disable_psm_gc_didt_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 2 Failed!", return result);
+ break;
+ case 3:
+ result = vega10_disable_se_edc_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 3 Failed!", return result);
+ break;
+ case 1:
+ case 4:
+ case 5:
+ result = vega10_disable_psm_gc_edc_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 5 Failed!", return result);
+ break;
+ case 6:
+ result = vega10_disable_se_edc_force_stall_config(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result), "[DisableDiDt] Attempt to disable DiDt Mode 6 Failed!", return result);
+ break;
+ default:
+ result = -EINVAL;
+ break;
+ }
+
+ if (0 == result) {
+ PP_ASSERT_WITH_CODE((0 != vega10_enable_smc_features(hwmgr->smumgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
+ "[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result);
+ data->smu_features[GNLD_DIDT].enabled = false;
+ }
+ }
+
+ return result;
+}
void vega10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
{
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h
index 9ecaa27c0bb5..b95771ab89cd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h
@@ -31,6 +31,12 @@ enum vega10_pt_config_reg_type {
VEGA10_CONFIGREG_MAX
};
+enum vega10_didt_config_reg_type {
+ VEGA10_CONFIGREG_DIDT = 0,
+ VEGA10_CONFIGREG_GCCAC,
+ VEGA10_CONFIGREG_SECAC
+};
+
/* PowerContainment Features */
#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
@@ -44,6 +50,13 @@ struct vega10_pt_config_reg {
enum vega10_pt_config_reg_type type;
};
+struct vega10_didt_config_reg {
+ uint32_t offset;
+ uint32_t mask;
+ uint32_t shift;
+ uint32_t value;
+};
+
struct vega10_pt_defaults {
uint8_t SviLoadLineEn;
uint8_t SviLoadLineVddC;
@@ -62,5 +75,8 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
int vega10_power_control_set_level(struct pp_hwmgr *hwmgr);
int vega10_disable_power_containment(struct pp_hwmgr *hwmgr);
+int vega10_enable_didt_config(struct pp_hwmgr *hwmgr);
+int vega10_disable_didt_config(struct pp_hwmgr *hwmgr);
+
#endif /* _VEGA10_POWERTUNE_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index 1623644ea49a..e343df190375 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -31,6 +31,8 @@
#include "cgs_common.h"
#include "vega10_pptable.h"
+#define NUM_DSPCLK_LEVELS 8
+
static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
enum phm_platform_caps cap)
{
@@ -644,11 +646,11 @@ static int get_gfxclk_voltage_dependency_table(
return 0;
}
-static int get_dcefclk_voltage_dependency_table(
+static int get_pix_clk_voltage_dependency_table(
struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_clock_voltage_dependency_table
**pp_vega10_clk_dep_table,
- const ATOM_Vega10_DCEFCLK_Dependency_Table *clk_dep_table)
+ const ATOM_Vega10_PIXCLK_Dependency_Table *clk_dep_table)
{
uint32_t table_size, i;
struct phm_ppt_v1_clock_voltage_dependency_table
@@ -681,6 +683,76 @@ static int get_dcefclk_voltage_dependency_table(
return 0;
}
+static int get_dcefclk_voltage_dependency_table(
+ struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table
+ **pp_vega10_clk_dep_table,
+ const ATOM_Vega10_DCEFCLK_Dependency_Table *clk_dep_table)
+{
+ uint32_t table_size, i;
+ uint8_t num_entries;
+ struct phm_ppt_v1_clock_voltage_dependency_table
+ *clk_table;
+ struct cgs_system_info sys_info = {0};
+ uint32_t dev_id;
+ uint32_t rev_id;
+
+ PP_ASSERT_WITH_CODE((clk_dep_table->ucNumEntries != 0),
+ "Invalid PowerPlay Table!", return -1);
+
+/*
+ * workaround needed to add another DPM level for pioneer cards
+ * as VBIOS is locked down.
+ * This DPM level was added to support 3DPM monitors @ 4K120Hz
+ *
+ */
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ rev_id = (uint32_t)sys_info.value;
+
+ if (dev_id == 0x6863 && rev_id == 0 &&
+ clk_dep_table->entries[clk_dep_table->ucNumEntries - 1].ulClk < 90000)
+ num_entries = clk_dep_table->ucNumEntries + 1 > NUM_DSPCLK_LEVELS ?
+ NUM_DSPCLK_LEVELS : clk_dep_table->ucNumEntries + 1;
+ else
+ num_entries = clk_dep_table->ucNumEntries;
+
+
+ table_size = sizeof(uint32_t) +
+ sizeof(phm_ppt_v1_clock_voltage_dependency_record) *
+ num_entries;
+
+ clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)
+ kzalloc(table_size, GFP_KERNEL);
+
+ if (!clk_table)
+ return -ENOMEM;
+
+ clk_table->count = (uint32_t)num_entries;
+
+ for (i = 0; i < clk_dep_table->ucNumEntries; i++) {
+ clk_table->entries[i].vddInd =
+ clk_dep_table->entries[i].ucVddInd;
+ clk_table->entries[i].clk =
+ le32_to_cpu(clk_dep_table->entries[i].ulClk);
+ }
+
+ if (i < num_entries) {
+ clk_table->entries[i].vddInd = clk_dep_table->entries[i-1].ucVddInd;
+ clk_table->entries[i].clk = 90000;
+ }
+
+ *pp_vega10_clk_dep_table = clk_table;
+
+ return 0;
+}
+
static int get_pcie_table(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_pcie_table **vega10_pcie_table,
const Vega10_PPTable_Generic_SubTable_Header *table)
@@ -862,21 +934,21 @@ static int init_powerplay_extended_tables(
gfxclk_dep_table);
if (!result && powerplay_table->usPixclkDependencyTableOffset)
- result = get_dcefclk_voltage_dependency_table(hwmgr,
+ result = get_pix_clk_voltage_dependency_table(hwmgr,
&pp_table_info->vdd_dep_on_pixclk,
- (const ATOM_Vega10_DCEFCLK_Dependency_Table*)
+ (const ATOM_Vega10_PIXCLK_Dependency_Table*)
pixclk_dep_table);
if (!result && powerplay_table->usPhyClkDependencyTableOffset)
- result = get_dcefclk_voltage_dependency_table(hwmgr,
+ result = get_pix_clk_voltage_dependency_table(hwmgr,
&pp_table_info->vdd_dep_on_phyclk,
- (const ATOM_Vega10_DCEFCLK_Dependency_Table *)
+ (const ATOM_Vega10_PIXCLK_Dependency_Table *)
phyclk_dep_table);
if (!result && powerplay_table->usDispClkDependencyTableOffset)
- result = get_dcefclk_voltage_dependency_table(hwmgr,
+ result = get_pix_clk_voltage_dependency_table(hwmgr,
&pp_table_info->vdd_dep_on_dispclk,
- (const ATOM_Vega10_DCEFCLK_Dependency_Table *)
+ (const ATOM_Vega10_PIXCLK_Dependency_Table *)
dispclk_dep_table);
if (!result && powerplay_table->usDcefclkDependencyTableOffset)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index e7ab8eb8a0cf..d44243441d28 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -321,10 +321,7 @@ int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) {
- result = vega10_fan_ctrl_set_static_mode(hwmgr,
- FDO_PWM_MODE_STATIC);
- if (!result)
- result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
+ result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
} else
result = vega10_fan_ctrl_set_default_mode(hwmgr);
@@ -633,7 +630,6 @@ int tf_vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl)) {
vega10_fan_ctrl_start_smc_fan_control(hwmgr);
- vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index a1ebe1014492..a4c8b09b6f14 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -164,9 +164,14 @@ enum phm_platform_caps {
PHM_PlatformCaps_EnablePlatformPowerManagement, /* indicates that Platform Power Management feature is supported */
PHM_PlatformCaps_SurpriseRemoval, /* indicates that surprise removal feature is requested */
PHM_PlatformCaps_NewCACVoltage, /* indicates new CAC voltage table support */
+ PHM_PlatformCaps_DiDtSupport, /* for dI/dT feature */
PHM_PlatformCaps_DBRamping, /* for dI/dT feature */
PHM_PlatformCaps_TDRamping, /* for dI/dT feature */
PHM_PlatformCaps_TCPRamping, /* for dI/dT feature */
+ PHM_PlatformCaps_DBRRamping, /* for dI/dT feature */
+ PHM_PlatformCaps_DiDtEDCEnable, /* for dI/dT feature */
+ PHM_PlatformCaps_GCEDC, /* for dI/dT feature */
+ PHM_PlatformCaps_PSM, /* for dI/dT feature */
PHM_PlatformCaps_EnableSMU7ThermalManagement, /* SMC will manage thermal events */
PHM_PlatformCaps_FPS, /* FPS support */
PHM_PlatformCaps_ACP, /* ACP support */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 47e57bd2c36f..91b0105e8240 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -128,6 +128,8 @@ struct phm_uvd_arbiter {
uint32_t dclk;
uint32_t vclk_ceiling;
uint32_t dclk_ceiling;
+ uint32_t vclk_soft_min;
+ uint32_t dclk_soft_min;
};
struct phm_vce_arbiter {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
index f3f9ebb631a5..822cd8b5bf90 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
@@ -42,6 +42,12 @@
} \
} while (0)
+#define PP_ASSERT(cond, msg) \
+ do { \
+ if (!(cond)) { \
+ pr_warn("%s\n", msg); \
+ } \
+ } while (0)
#define PP_DBG_LOG(fmt, ...) \
do { \
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
index 227d999b6bd1..a511611ec7e0 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h
@@ -41,6 +41,8 @@ inline static uint32_t soc15_get_register_offset(
reg = MP1_BASE.instance[inst].segment[segment] + offset;
else if (hw_id == DF_HWID)
reg = DF_BASE.instance[inst].segment[segment] + offset;
+ else if (hw_id == GC_HWID)
+ reg = GC_BASE.instance[inst].segment[segment] + offset;
return reg;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index e0e106f1b23a..901c960cfe21 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -66,7 +66,12 @@
#define PPSMC_MSG_SetMinVddcrSocVoltage 0x22
#define PPSMC_MSG_SetMinVideoFclkFreq 0x23
#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x24
-#define PPSMC_Message_Count 0x25
+#define PPSMC_MSG_ForcePowerDownGfx 0x25
+#define PPSMC_MSG_SetPhyclkVoltageByFreq 0x26
+#define PPSMC_MSG_SetDppclkVoltageByFreq 0x27
+#define PPSMC_MSG_SetSoftMinVcn 0x28
+#define PPSMC_Message_Count 0x29
+
typedef uint16_t PPSMC_Result;
typedef int PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9.h b/drivers/gpu/drm/amd/powerplay/inc/smu9.h
index 9ef2490c7c2e..550ed675027a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu9.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu9.h
@@ -55,9 +55,9 @@
#define FEATURE_FW_CTF_BIT 23
#define FEATURE_LED_DISPLAY_BIT 24
#define FEATURE_FAN_CONTROL_BIT 25
-#define FEATURE_VOLTAGE_CONTROLLER_BIT 26
-#define FEATURE_SPARE_27_BIT 27
-#define FEATURE_SPARE_28_BIT 28
+#define FEATURE_FAST_PPT_BIT 26
+#define FEATURE_GFX_EDC_BIT 27
+#define FEATURE_ACG_BIT 28
#define FEATURE_SPARE_29_BIT 29
#define FEATURE_SPARE_30_BIT 30
#define FEATURE_SPARE_31_BIT 31
@@ -90,9 +90,10 @@
#define FFEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT )
#define FFEATURE_LED_DISPLAY_MASK (1 << FEATURE_LED_DISPLAY_BIT )
#define FFEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT )
-#define FFEATURE_VOLTAGE_CONTROLLER_MASK (1 << FEATURE_VOLTAGE_CONTROLLER_BIT )
-#define FFEATURE_SPARE_27_MASK (1 << FEATURE_SPARE_27_BIT )
-#define FFEATURE_SPARE_28_MASK (1 << FEATURE_SPARE_28_BIT )
+
+#define FEATURE_FAST_PPT_MASK (1 << FAST_PPT_BIT )
+#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT )
+#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT )
#define FFEATURE_SPARE_29_MASK (1 << FEATURE_SPARE_29_BIT )
#define FFEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT )
#define FFEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT )
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
index 532186b6f941..f6d6c61f796a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
@@ -312,7 +312,10 @@ typedef struct {
PllSetting_t GfxBoostState;
- uint32_t Reserved[14];
+ uint8_t AcgEnable[NUM_GFXCLK_DPM_LEVELS];
+ GbVdroopTable_t AcgBtcGbVdroopTable;
+ QuadraticInt_t AcgAvfsGb;
+ uint32_t Reserved[4];
/* Padding - ignore */
uint32_t MmHubPadding[7]; /* SMU internal use */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 976e942ec694..5d61cc9d4554 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -131,6 +131,7 @@ struct pp_smumgr_func {
bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request);
+ bool (*is_hw_avfs_present)(struct pp_smumgr *smumgr);
};
struct pp_smumgr {
@@ -202,6 +203,8 @@ extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr);
extern int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request);
+extern bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr);
+
#define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
index b4af9e85dfa5..cb070ebc7de1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h
@@ -124,6 +124,10 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_NumOfDisplays 0x56
#define PPSMC_MSG_ReadSerialNumTop32 0x58
#define PPSMC_MSG_ReadSerialNumBottom32 0x59
+#define PPSMC_MSG_RunAcgBtc 0x5C
+#define PPSMC_MSG_RunAcgInClosedLoop 0x5D
+#define PPSMC_MSG_RunAcgInOpenLoop 0x5E
+#define PPSMC_MSG_InitializeAcg 0x5F
#define PPSMC_MSG_GetCurrPkgPwr 0x61
#define PPSMC_Message_Count 0x62
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
index 6a320b27aefd..8712f093d6d9 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
@@ -2129,6 +2129,25 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
return 0;
}
+
+int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+ struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+
+ if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
+ return 0;
+
+ ret = smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs);
+
+ if (!ret)
+ /* If this param is not changed, this function could fire unnecessarily */
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+ return ret;
+}
+
static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
index 0e9e1f2d7238..d9c72d992e30 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h
@@ -48,5 +48,6 @@ int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr);
int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request);
+int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index a1cb78552cf6..6ae948fc524f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -161,56 +161,47 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
{
- int i, result = -1;
+ int i;
+ int result = -EINVAL;
uint32_t reg, data;
- const PWR_Command_Table *virus = PwrVirusTable;
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
- priv->avfs.AvfsBtcStatus = AVFS_LOAD_VIRUS;
- for (i = 0; (i < PWR_VIRUS_TABLE_SIZE); i++) {
- switch (virus->command) {
+ const PWR_Command_Table *pvirus = PwrVirusTable;
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+
+ for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
+ switch (pvirus->command) {
case PwrCmdWrite:
- reg = virus->reg;
- data = virus->data;
+ reg = pvirus->reg;
+ data = pvirus->data;
cgs_write_register(smumgr->device, reg, data);
break;
+
case PwrCmdEnd:
- priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_LOADED;
result = 0;
break;
+
default:
- pr_err("Table Exit with Invalid Command!");
- priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL;
- result = -1;
+ pr_info("Table Exit with Invalid Command!");
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
+ result = -EINVAL;
break;
}
- virus++;
+ pvirus++;
}
+
return result;
}
static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
{
int result = 0;
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_STARTED;
- if (priv->avfs.AvfsBtcParam) {
- if (!smum_send_msg_to_smc_with_parameter(smumgr,
- PPSMC_MSG_PerformBtc, priv->avfs.AvfsBtcParam)) {
- if (!smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs)) {
- priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_UNSAVED;
- result = 0;
- } else {
- pr_err("[AVFS][fiji_start_avfs_btc] Attempt"
- " to Enable AVFS Failed!");
- smum_send_msg_to_smc(smumgr, PPSMC_MSG_DisableAvfs);
- result = -1;
- }
- } else {
- pr_err("[AVFS][fiji_start_avfs_btc] "
- "PerformBTC SMU msg failed");
- result = -1;
+ if (0 != smu_data->avfs.avfs_btc_param) {
+ if (0 != smu7_send_msg_to_smc_with_parameter(smumgr,
+ PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+ pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
+ result = -EINVAL;
}
}
/* Soft-Reset to reset the engine before loading uCode */
@@ -224,42 +215,6 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
return result;
}
-static int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr)
-{
- int result = 0;
- uint32_t table_start;
- uint32_t charz_freq_addr, inversion_voltage_addr, charz_freq;
- uint16_t inversion_voltage;
-
- charz_freq = 0x30750000; /* In 10KHz units 0x00007530 Actual value */
- inversion_voltage = 0x1A04; /* mV Q14.2 0x41A Actual value */
-
- PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU73_Firmware_Header,
- PmFuseTable), &table_start, 0x40000),
- "[AVFS][Fiji_SetupGfxLvlStruct] SMU could not communicate "
- "starting address of PmFuse structure",
- return -1;);
-
- charz_freq_addr = table_start +
- offsetof(struct SMU73_Discrete_PmFuses, PsmCharzFreq);
- inversion_voltage_addr = table_start +
- offsetof(struct SMU73_Discrete_PmFuses, InversionVoltage);
-
- result = smu7_copy_bytes_to_smc(smumgr, charz_freq_addr,
- (uint8_t *)(&charz_freq), sizeof(charz_freq), 0x40000);
- PP_ASSERT_WITH_CODE(0 == result,
- "[AVFS][fiji_setup_pm_fuse_for_avfs] charz_freq could not "
- "be populated.", return -1;);
-
- result = smu7_copy_bytes_to_smc(smumgr, inversion_voltage_addr,
- (uint8_t *)(&inversion_voltage), sizeof(inversion_voltage), 0x40000);
- PP_ASSERT_WITH_CODE(0 == result, "[AVFS][fiji_setup_pm_fuse_for_avfs] "
- "charz_freq could not be populated.", return -1;);
-
- return result;
-}
-
static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
{
int32_t vr_config;
@@ -298,93 +253,41 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
return 0;
}
-/* Work in Progress */
-static int fiji_restore_vft_table(struct pp_smumgr *smumgr)
-{
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-
- if (AVFS_BTC_COMPLETED_SAVED == priv->avfs.AvfsBtcStatus) {
- priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED;
- return 0;
- } else
- return -EINVAL;
-}
-
-/* Work in Progress */
-static int fiji_save_vft_table(struct pp_smumgr *smumgr)
-{
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
-
- if (AVFS_BTC_COMPLETED_SAVED == priv->avfs.AvfsBtcStatus) {
- priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED;
- return 0;
- } else
- return -EINVAL;
-}
-
static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
{
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
- switch (priv->avfs.AvfsBtcStatus) {
- case AVFS_BTC_COMPLETED_SAVED: /*S3 State - Pre SMU Start */
- priv->avfs.AvfsBtcStatus = AVFS_BTC_RESTOREVFT_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_restore_vft_table(smumgr),
- "[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics "
- "Level table over to SMU",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_RESTORED;
- break;
- case AVFS_BTC_COMPLETED_RESTORED: /*S3 State - Post SMU Start*/
- priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
- 0x666),
- "[AVFS][fiji_avfs_event_mgr] SMU did not respond "
- "correctly to VftTableIsValid Msg",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_SMUMSG_ERROR;
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(smumgr,
- PPSMC_MSG_EnableAvfs),
- "[AVFS][fiji_avfs_event_mgr] SMU did not respond "
- "correctly to EnableAvfs Message Msg",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_SAVED;
+ switch (smu_data->avfs.avfs_btc_status) {
+ case AVFS_BTC_COMPLETED_PREVIOUSLY:
break;
+
case AVFS_BTC_BOOT: /*Cold Boot State - Post SMU Start*/
if (!smu_started)
break;
- priv->avfs.AvfsBtcStatus = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_setup_pm_fuse_for_avfs(smumgr),
- "[AVFS][fiji_avfs_event_mgr] Failure at "
- "fiji_setup_pm_fuse_for_avfs",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_DPMTABLESETUP_FAILED;
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(smumgr),
"[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
" table over to SMU",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_VIRUS_FAIL;
+ return -EINVAL;);
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(smumgr),
"[AVFS][fiji_avfs_event_mgr] Could not setup "
"Pwr Virus for AVFS ",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_FAILED;
+ return -EINVAL;);
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(smumgr),
"[AVFS][fiji_avfs_event_mgr] Failure at "
"fiji_start_avfs_btc. AVFS Disabled",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_SAVEVFT_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_save_vft_table(smumgr),
- "[AVFS][fiji_avfs_event_mgr] Could not save VFT Table",
- return -1;);
- priv->avfs.AvfsBtcStatus = AVFS_BTC_COMPLETED_SAVED;
+ return -EINVAL;);
+
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
break;
case AVFS_BTC_DISABLED: /* Do nothing */
- break;
case AVFS_BTC_NOTSUPPORTED: /* Do nothing */
+ case AVFS_BTC_ENABLEAVFS:
break;
default:
- pr_err("[AVFS] Something is broken. See log!");
+ pr_err("AVFS failed status is %x !\n", smu_data->avfs.avfs_btc_status);
break;
}
return 0;
@@ -477,19 +380,6 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
if (smu7_init(smumgr))
return -EINVAL;
- fiji_priv->avfs.AvfsBtcStatus = AVFS_BTC_BOOT;
- if (fiji_is_hw_avfs_present(smumgr))
- /* AVFS Parameter
- * 0 - BTC DC disabled, BTC AC disabled
- * 1 - BTC DC enabled, BTC AC disabled
- * 2 - BTC DC disabled, BTC AC enabled
- * 3 - BTC DC enabled, BTC AC enabled
- * Default is 0 - BTC DC disabled, BTC AC disabled
- */
- fiji_priv->avfs.AvfsBtcParam = 0;
- else
- fiji_priv->avfs.AvfsBtcStatus = AVFS_BTC_NOTSUPPORTED;
-
for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
fiji_priv->activity_target[i] = 30;
@@ -514,10 +404,12 @@ const struct pp_smumgr_func fiji_smu_funcs = {
.init_smc_table = fiji_init_smc_table,
.update_sclk_threshold = fiji_update_sclk_threshold,
.thermal_setup_fan_table = fiji_thermal_setup_fan_table,
+ .thermal_avfs_enable = fiji_thermal_avfs_enable,
.populate_all_graphic_levels = fiji_populate_all_graphic_levels,
.populate_all_memory_levels = fiji_populate_all_memory_levels,
.get_mac_definition = fiji_get_mac_definition,
.initialize_mc_reg_table = fiji_initialize_mc_reg_table,
.is_dpm_running = fiji_is_dpm_running,
.populate_requested_graphic_levels = fiji_populate_requested_graphic_levels,
+ .is_hw_avfs_present = fiji_is_hw_avfs_present,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
index adcbdfb209be..175bf9f8ef9c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -28,17 +28,8 @@
#include "smu7_smumgr.h"
-
-struct fiji_smu_avfs {
- enum AVFS_BTC_STATUS AvfsBtcStatus;
- uint32_t AvfsBtcParam;
-};
-
-
struct fiji_smumgr {
struct smu7_smumgr smu7_data;
-
- struct fiji_smu_avfs avfs;
struct SMU73_Discrete_DpmTable smc_state_table;
struct SMU73_Discrete_Ulv ulv_setting;
struct SMU73_Discrete_PmFuses power_tune_table;
@@ -47,7 +38,5 @@ struct fiji_smumgr {
};
-
-
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
index f68e759e8be2..99a00bd39256 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -1498,7 +1498,7 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
table_info->vdd_dep_on_sclk;
- if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ if (((struct smu7_smumgr *)smu_data)->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
return result;
result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
@@ -1889,7 +1889,7 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
{
int ret;
struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 9616cedc139c..75f43dadc56b 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -60,16 +60,14 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
-
static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
{
int i;
- int result = -1;
+ int result = -EINVAL;
uint32_t reg, data;
const PWR_Command_Table *pvirus = pwr_virus_table;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
-
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
switch (pvirus->command) {
@@ -86,7 +84,7 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
default:
pr_info("Table Exit with Invalid Command!");
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- result = -1;
+ result = -EINVAL;
break;
}
pvirus++;
@@ -98,7 +96,7 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
static int polaris10_perform_btc(struct pp_smumgr *smumgr)
{
int result = 0;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
if (0 != smu_data->avfs.avfs_btc_param) {
if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
@@ -172,10 +170,11 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
return 0;
}
+
static int
polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
switch (smu_data->avfs.avfs_btc_status) {
case AVFS_BTC_COMPLETED_PREVIOUSLY:
@@ -185,30 +184,31 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED;
PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(smumgr),
- "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
- return -1);
+ "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
+ return -EINVAL);
if (smu_data->avfs.avfs_btc_param > 1) {
pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- PP_ASSERT_WITH_CODE(-1 == polaris10_setup_pwr_virus(smumgr),
+ PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(smumgr),
"[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
- return -1);
+ return -EINVAL);
}
smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(smumgr),
"[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
- return -1);
-
+ return -EINVAL);
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
break;
case AVFS_BTC_DISABLED:
+ case AVFS_BTC_ENABLEAVFS:
case AVFS_BTC_NOTSUPPORTED:
break;
default:
- pr_info("[AVFS] Something is broken. See log!");
+ pr_err("AVFS failed status is %x!\n", smu_data->avfs.avfs_btc_status);
break;
}
@@ -376,11 +376,6 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
if (smu7_init(smumgr))
return -EINVAL;
- if (polaris10_is_hw_avfs_present(smumgr))
- smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
- else
- smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
-
for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++)
smu_data->activity_target[i] = PPPOLARIS10_TARGETACTIVITY_DFLT;
@@ -410,4 +405,5 @@ const struct pp_smumgr_func polaris10_smu_funcs = {
.get_mac_definition = polaris10_get_mac_definition,
.is_dpm_running = polaris10_is_dpm_running,
.populate_requested_graphic_levels = polaris10_populate_requested_graphic_levels,
+ .is_hw_avfs_present = polaris10_is_hw_avfs_present,
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
index 49ebf1d5a53c..5e19c24b0561 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
@@ -32,11 +32,6 @@
#define SMC_RAM_END 0x40000
-struct polaris10_avfs {
- enum AVFS_BTC_STATUS avfs_btc_status;
- uint32_t avfs_btc_param;
-};
-
struct polaris10_pt_defaults {
uint8_t SviLoadLineEn;
uint8_t SviLoadLineVddC;
@@ -51,8 +46,6 @@ struct polaris10_pt_defaults {
uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
};
-
-
struct polaris10_range_table {
uint32_t trans_lower_frequency; /* in 10khz */
uint32_t trans_upper_frequency;
@@ -61,14 +54,13 @@ struct polaris10_range_table {
struct polaris10_smumgr {
struct smu7_smumgr smu7_data;
uint8_t protected_mode;
- struct polaris10_avfs avfs;
SMU74_Discrete_DpmTable smc_state_table;
struct SMU74_Discrete_Ulv ulv_setting;
struct SMU74_Discrete_PmFuses power_tune_table;
struct polaris10_range_table range_table[NUM_SCLK_RANGE];
const struct polaris10_pt_defaults *power_tune_defaults;
- uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
- uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
+ uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
+ uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 35ac27681415..76347ff6d655 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -540,7 +540,6 @@ int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
return result;
}
-
int smu7_init(struct pp_smumgr *smumgr)
{
struct smu7_smumgr *smu_data;
@@ -596,6 +595,11 @@ int smu7_init(struct pp_smumgr *smumgr)
(cgs_handle_t)smu_data->smu_buffer.handle);
return -EINVAL);
+ if (smum_is_hw_avfs_present(smumgr))
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
+ else
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 919be435b49c..ee5e32d2921e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -37,6 +37,11 @@ struct smu7_buffer_entry {
unsigned long handle;
};
+struct smu7_avfs {
+ enum AVFS_BTC_STATUS avfs_btc_status;
+ uint32_t avfs_btc_param;
+};
+
struct smu7_smumgr {
uint8_t *header;
uint8_t *mec_image;
@@ -50,7 +55,8 @@ struct smu7_smumgr {
uint32_t arb_table_start;
uint32_t ulv_setting_starts;
uint8_t security_hard_key;
- uint32_t acpi_optimization;
+ uint32_t acpi_optimization;
+ struct smu7_avfs avfs;
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index bcc61ffd13cb..3bdf6478de7f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -43,7 +43,8 @@ MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
-
+MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
+MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
int smum_early_init(struct pp_instance *handle)
{
@@ -403,3 +404,11 @@ int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
return 0;
}
+
+bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr)
+{
+ if (smumgr->smumgr_funcs->is_hw_avfs_present)
+ return smumgr->smumgr_funcs->is_hw_avfs_present(smumgr);
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 269678443862..408514c965a0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -356,6 +356,9 @@ int vega10_set_tools_address(struct pp_smumgr *smumgr)
static int vega10_verify_smc_interface(struct pp_smumgr *smumgr)
{
uint32_t smc_driver_if_version;
+ struct cgs_system_info sys_info = {0};
+ uint32_t dev_id;
+ uint32_t rev_id;
PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(smumgr,
PPSMC_MSG_GetDriverIfVersion),
@@ -363,12 +366,27 @@ static int vega10_verify_smc_interface(struct pp_smumgr *smumgr)
return -EINVAL);
vega10_read_arg_from_smc(smumgr, &smc_driver_if_version);
- if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) {
- pr_err("Your firmware(0x%x) doesn't match \
- SMU9_DRIVER_IF_VERSION(0x%x). \
- Please update your firmware!\n",
- smc_driver_if_version, SMU9_DRIVER_IF_VERSION);
- return -EINVAL;
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(smumgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
+ cgs_query_system_info(smumgr->device, &sys_info);
+ rev_id = (uint32_t)sys_info.value;
+
+ if (!((dev_id == 0x687f) &&
+ ((rev_id == 0xc0) ||
+ (rev_id == 0xc1) ||
+ (rev_id == 0xc3)))) {
+ if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) {
+ pr_err("Your firmware(0x%x) doesn't match \
+ SMU9_DRIVER_IF_VERSION(0x%x). \
+ Please update your firmware!\n",
+ smc_driver_if_version, SMU9_DRIVER_IF_VERSION);
+ return -EINVAL;
+ }
}
return 0;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index dbd4fd3a810b..8bd38102b58e 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -16,16 +16,16 @@ TRACE_EVENT(amd_sched_job,
TP_ARGS(sched_job),
TP_STRUCT__entry(
__field(struct amd_sched_entity *, entity)
- __field(struct amd_sched_job *, sched_job)
__field(struct dma_fence *, fence)
__field(const char *, name)
+ __field(uint64_t, id)
__field(u32, job_count)
__field(int, hw_job_count)
),
TP_fast_assign(
__entry->entity = sched_job->s_entity;
- __entry->sched_job = sched_job;
+ __entry->id = sched_job->id;
__entry->fence = &sched_job->s_fence->finished;
__entry->name = sched_job->sched->name;
__entry->job_count = kfifo_len(
@@ -33,8 +33,9 @@ TRACE_EVENT(amd_sched_job,
__entry->hw_job_count = atomic_read(
&sched_job->sched->hw_rq_count);
),
- TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d",
- __entry->entity, __entry->sched_job, __entry->fence, __entry->name,
+ TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
+ __entry->entity, __entry->id,
+ __entry->fence, __entry->name,
__entry->job_count, __entry->hw_job_count)
);
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index ad9a95916f1f..16903dc7fe0d 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -64,6 +64,20 @@ static const struct drm_crtc_funcs arc_pgu_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
+static enum drm_mode_status arc_pgu_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
+ long rate, clk_rate = mode->clock * 1000;
+ long diff = clk_rate / 200; /* +-0.5% allowed by HDMI spec */
+
+ rate = clk_round_rate(arcpgu->clk, clk_rate);
+ if ((max(rate, clk_rate) - min(rate, clk_rate) < diff) && (rate > 0))
+ return MODE_OK;
+
+ return MODE_NOCLOCK;
+}
+
static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
@@ -106,7 +120,8 @@ static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc)
clk_set_rate(arcpgu->clk, m->crtc_clock * 1000);
}
-static void arc_pgu_crtc_enable(struct drm_crtc *crtc)
+static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
@@ -116,7 +131,8 @@ static void arc_pgu_crtc_enable(struct drm_crtc *crtc)
ARCPGU_CTRL_ENABLE_MASK);
}
-static void arc_pgu_crtc_disable(struct drm_crtc *crtc)
+static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
@@ -129,20 +145,6 @@ static void arc_pgu_crtc_disable(struct drm_crtc *crtc)
~ARCPGU_CTRL_ENABLE_MASK);
}
-static int arc_pgu_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
- struct drm_display_mode *mode = &state->adjusted_mode;
- long rate, clk_rate = mode->clock * 1000;
-
- rate = clk_round_rate(arcpgu->clk, clk_rate);
- if (rate != clk_rate)
- return -EINVAL;
-
- return 0;
-}
-
static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -158,15 +160,13 @@ static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = {
+ .mode_valid = arc_pgu_crtc_mode_valid,
.mode_set = drm_helper_crtc_mode_set,
.mode_set_base = drm_helper_crtc_mode_set_base,
.mode_set_nofb = arc_pgu_crtc_mode_set_nofb,
- .enable = arc_pgu_crtc_enable,
- .disable = arc_pgu_crtc_disable,
- .prepare = arc_pgu_crtc_disable,
- .commit = arc_pgu_crtc_enable,
- .atomic_check = arc_pgu_crtc_atomic_check,
.atomic_begin = arc_pgu_crtc_atomic_begin,
+ .atomic_enable = arc_pgu_crtc_atomic_enable,
+ .atomic_disable = arc_pgu_crtc_atomic_disable,
};
static void arc_pgu_plane_atomic_update(struct drm_plane *plane,
@@ -218,6 +218,7 @@ static struct drm_plane *arc_pgu_plane_init(struct drm_device *drm)
ret = drm_universal_plane_init(drm, plane, 0xff, &arc_pgu_plane_funcs,
formats, ARRAY_SIZE(formats),
+ NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 3e43a5d4fb09..289eda54e5aa 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -31,7 +31,7 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
}
-static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
+static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = arcpgu_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
@@ -48,29 +48,7 @@ static void arcpgu_setup_mode_config(struct drm_device *drm)
drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
}
-static int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
- return 0;
-}
-
-static const struct file_operations arcpgu_drm_ops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .poll = drm_poll,
- .read = drm_read,
- .llseek = no_llseek,
- .mmap = arcpgu_gem_mmap,
-};
+DEFINE_DRM_GEM_CMA_FOPS(arcpgu_drm_ops);
static void arcpgu_lastclose(struct drm_device *drm)
{
@@ -142,7 +120,7 @@ static int arcpgu_load(struct drm_device *drm)
return -ENODEV;
}
- platform_set_drvdata(pdev, arcpgu);
+ platform_set_drvdata(pdev, drm);
return 0;
}
@@ -160,11 +138,37 @@ static int arcpgu_unload(struct drm_device *drm)
return 0;
}
+#ifdef CONFIG_DEBUG_FS
+static int arcpgu_show_pxlclock(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *drm = node->minor->dev;
+ struct arcpgu_drm_private *arcpgu = drm->dev_private;
+ unsigned long clkrate = clk_get_rate(arcpgu->clk);
+ unsigned long mode_clock = arcpgu->crtc.mode.crtc_clock * 1000;
+
+ seq_printf(m, "hw : %lu\n", clkrate);
+ seq_printf(m, "mode: %lu\n", mode_clock);
+ return 0;
+}
+
+static struct drm_info_list arcpgu_debugfs_list[] = {
+ { "clocks", arcpgu_show_pxlclock, 0 },
+ { "fb", drm_fb_cma_debugfs_show, 0 },
+};
+
+static int arcpgu_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(arcpgu_debugfs_list,
+ ARRAY_SIZE(arcpgu_debugfs_list), minor->debugfs_root, minor);
+}
+#endif
+
static struct drm_driver arcpgu_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
.lastclose = arcpgu_lastclose,
- .name = "drm-arcpgu",
+ .name = "arcpgu",
.desc = "ARC PGU Controller",
.date = "20160219",
.major = 1,
@@ -172,8 +176,6 @@ static struct drm_driver arcpgu_drm_driver = {
.patchlevel = 0,
.fops = &arcpgu_drm_ops,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_free_object_unlocked = drm_gem_cma_free_object,
@@ -185,6 +187,9 @@ static struct drm_driver arcpgu_drm_driver = {
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = arcpgu_debugfs_init,
+#endif
};
static int arcpgu_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index d67b6f15e8b8..72b22b805412 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -165,7 +165,8 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
clk_set_rate(hdlcd->clk, m->crtc_clock * 1000);
}
-static void hdlcd_crtc_enable(struct drm_crtc *crtc)
+static void hdlcd_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
@@ -175,7 +176,8 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc)
drm_crtc_vblank_on(crtc);
}
-static void hdlcd_crtc_disable(struct drm_crtc *crtc)
+static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
@@ -218,10 +220,10 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
- .enable = hdlcd_crtc_enable,
- .disable = hdlcd_crtc_disable,
.atomic_check = hdlcd_crtc_atomic_check,
.atomic_begin = hdlcd_crtc_atomic_begin,
+ .atomic_enable = hdlcd_crtc_atomic_enable,
+ .atomic_disable = hdlcd_crtc_atomic_disable,
};
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
@@ -313,6 +315,7 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
ret = drm_universal_plane_init(drm, plane, 0xff, &hdlcd_plane_funcs,
formats, ARRAY_SIZE(formats),
+ NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index d3da87fbd85a..f9bda7b0d2ec 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -253,8 +253,6 @@ static struct drm_driver hdlcd_driver = {
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
@@ -343,7 +341,6 @@ err_register:
}
err_fbdev:
drm_kms_helper_poll_fini(drm);
- drm_vblank_cleanup(drm);
err_vblank:
pm_runtime_disable(drm->dev);
err_pm_active:
@@ -375,7 +372,6 @@ static void hdlcd_drm_unbind(struct device *dev)
component_unbind_all(dev, drm);
of_node_put(hdlcd->crtc.port);
hdlcd->crtc.port = NULL;
- drm_vblank_cleanup(drm);
pm_runtime_get_sync(drm->dev);
drm_irq_uninstall(drm);
pm_runtime_put_sync(drm->dev);
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 4bb38a21efec..3615d18a7ddf 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -46,7 +46,8 @@ static enum drm_mode_status malidp_crtc_mode_valid(struct drm_crtc *crtc,
return MODE_OK;
}
-static void malidp_crtc_enable(struct drm_crtc *crtc)
+static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
@@ -69,7 +70,8 @@ static void malidp_crtc_enable(struct drm_crtc *crtc)
drm_crtc_vblank_on(crtc);
}
-static void malidp_crtc_disable(struct drm_crtc *crtc)
+static void malidp_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
@@ -408,9 +410,9 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = {
.mode_valid = malidp_crtc_mode_valid,
- .enable = malidp_crtc_enable,
- .disable = malidp_crtc_disable,
.atomic_check = malidp_crtc_atomic_check,
+ .atomic_enable = malidp_crtc_atomic_enable,
+ .atomic_disable = malidp_crtc_atomic_disable,
};
static struct drm_crtc_state *malidp_crtc_duplicate_state(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 01b13d219917..1a57cc28955e 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -225,7 +225,7 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_disables(drm, state);
- for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+ for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
malidp_atomic_commit_update_gamma(crtc, old_crtc_state);
malidp_atomic_commit_update_coloradj(crtc, old_crtc_state);
malidp_atomic_commit_se_config(crtc, old_crtc_state);
@@ -331,8 +331,6 @@ static struct drm_driver malidp_driver = {
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 600fa7bd7f52..94e7e3fa3408 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -128,7 +128,6 @@ static void malidp_plane_atomic_print_state(struct drm_printer *p,
static const struct drm_plane_funcs malidp_de_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .set_property = drm_atomic_helper_plane_set_property,
.destroy = malidp_de_plane_destroy,
.reset = malidp_plane_reset,
.atomic_duplicate_state = malidp_duplicate_plane_state,
@@ -398,7 +397,7 @@ int malidp_de_planes_init(struct drm_device *drm)
DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(drm, &plane->base, crtcs,
&malidp_de_plane_funcs, formats,
- n, plane_type, NULL);
+ n, NULL, plane_type, NULL);
if (ret < 0)
goto cleanup;
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 4fe19fde84f9..2a4d163ac76f 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -334,16 +334,6 @@ static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
}
-void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
- int idx)
-{
-}
-
-void armada_drm_crtc_gamma_get(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- int idx)
-{
-}
-
/* The mode_config.mutex will be held for this call */
static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
{
@@ -1150,13 +1140,13 @@ int armada_drm_plane_init(struct armada_plane *plane)
return 0;
}
-static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
+static const struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
{ CSC_AUTO, "Auto" },
{ CSC_YUV_CCIR601, "CCIR601" },
{ CSC_YUV_CCIR709, "CCIR709" },
};
-static struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
+static const struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
{ CSC_AUTO, "Auto" },
{ CSC_RGB_COMPUTER, "Computer system" },
{ CSC_RGB_STUDIO, "Studio" },
@@ -1269,6 +1259,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
&armada_primary_plane_funcs,
armada_primary_formats,
ARRAY_SIZE(armada_primary_formats),
+ NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
@@ -1329,8 +1320,7 @@ armada_lcd_bind(struct device *dev, struct device *master, void *data)
port = of_get_child_by_name(parent, "port");
of_node_put(np);
if (!port) {
- dev_err(dev, "no port node found in %s\n",
- parent->full_name);
+ dev_err(dev, "no port node found in %pOF\n", parent);
return -ENXIO;
}
@@ -1364,7 +1354,7 @@ static int armada_lcd_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id armada_lcd_of_match[] = {
+static const struct of_device_id armada_lcd_of_match[] = {
{
.compatible = "marvell,dove-lcd",
.data = &armada510_ops,
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 7e8906d3ae26..bab11f483575 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -102,8 +102,6 @@ struct armada_crtc {
};
#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
-void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
-void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index e618fab7f519..0b3227c039d7 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -232,8 +232,8 @@ static void armada_add_endpoints(struct device *dev,
of_node_put(remote);
continue;
} else if (!of_device_is_available(remote->parent)) {
- dev_warn(dev, "parent device of %s is not available\n",
- remote->full_name);
+ dev_warn(dev, "parent device of %pOF is not available\n",
+ remote);
of_node_put(remote);
continue;
}
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 602dfead8eef..29c7d047b152 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -81,7 +81,6 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
info->par = fbh;
- info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &armada_fb_ops;
info->fix.smem_start = obj->phys_addr;
info->fix.smem_len = obj->obj.size;
@@ -118,8 +117,6 @@ static int armada_fb_probe(struct drm_fb_helper *fbh,
}
static const struct drm_fb_helper_funcs armada_fb_helper_funcs = {
- .gamma_set = armada_drm_crtc_gamma_set,
- .gamma_get = armada_drm_crtc_gamma_get,
.fb_probe = armada_fb_probe,
};
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index e9a29df4b443..edc44910d79f 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -388,7 +388,7 @@ static const uint32_t armada_ovl_formats[] = {
DRM_FORMAT_BGR565,
};
-static struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
+static const struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
{ CKMODE_DISABLE, "disabled" },
{ CKMODE_Y, "Y component" },
{ CKMODE_U, "U component" },
@@ -460,6 +460,7 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
&armada_ovl_plane_funcs,
armada_ovl_formats,
ARRAY_SIZE(armada_ovl_formats),
+ NULL,
DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
kfree(dplane);
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 76f07f38b941..749646ae365f 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -4,16 +4,11 @@
#include "ast_drv.h"
MODULE_FIRMWARE("ast_dp501_fw.bin");
-int ast_load_dp501_microcode(struct drm_device *dev)
+static int ast_load_dp501_microcode(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
- static char *fw_name = "ast_dp501_fw.bin";
- int err;
- err = request_firmware(&ast->dp501_fw, fw_name, dev->dev);
- if (err)
- return err;
- return 0;
+ return request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev);
}
static void send_ack(struct ast_private *ast)
@@ -187,7 +182,7 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
return false;
}
-bool ast_launch_m68k(struct drm_device *dev)
+static bool ast_launch_m68k(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
u32 i, data, len = 0;
@@ -201,7 +196,11 @@ bool ast_launch_m68k(struct drm_device *dev)
if (ast->dp501_fw_addr) {
fw_addr = ast->dp501_fw_addr;
len = 32*1024;
- } else if (ast->dp501_fw) {
+ } else {
+ if (!ast->dp501_fw &&
+ ast_load_dp501_microcode(dev) < 0)
+ return false;
+
fw_addr = (u8 *)ast->dp501_fw->data;
len = ast->dp501_fw->size;
}
@@ -432,3 +431,11 @@ void ast_init_3rdtx(struct drm_device *dev)
}
}
}
+
+void ast_release_firmware(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ release_firmware(ast->dp501_fw);
+ ast->dp501_fw = NULL;
+}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index fd7c9eec92e4..69dab82a3771 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -197,7 +197,6 @@ static struct drm_driver driver = {
.load = ast_driver_load,
.unload = ast_driver_unload,
- .set_busid = drm_pci_set_busid,
.fops = &ast_fops,
.name = DRIVER_NAME,
@@ -210,7 +209,6 @@ static struct drm_driver driver = {
.gem_free_object_unlocked = ast_gem_free_object,
.dumb_create = ast_dumb_create,
.dumb_map_offset = ast_dumb_mmap_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
};
@@ -221,11 +219,11 @@ static int __init ast_init(void)
if (ast_modeset == 0)
return -EINVAL;
- return drm_pci_init(&driver, &ast_pci_driver);
+ return pci_register_driver(&ast_pci_driver);
}
static void __exit ast_exit(void)
{
- drm_pci_exit(&driver, &ast_pci_driver);
+ pci_unregister_driver(&ast_pci_driver);
}
module_init(ast_init);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 8880f0b62e9c..e6c4cd3dc50e 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -245,7 +245,6 @@ struct ast_connector {
struct ast_crtc {
struct drm_crtc base;
- u8 lut_r[256], lut_g[256], lut_b[256];
struct drm_gem_object *cursor_bo;
uint64_t cursor_addr;
int cursor_width, cursor_height;
@@ -401,11 +400,10 @@ void ast_post_gpu(struct drm_device *dev);
u32 ast_mindwm(struct ast_private *ast, u32 r);
void ast_moutdwm(struct ast_private *ast, u32 r, u32 v);
/* ast dp501 */
-int ast_load_dp501_microcode(struct drm_device *dev);
void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
-bool ast_launch_m68k(struct drm_device *dev);
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
u8 ast_get_dp501_max_clk(struct drm_device *dev);
void ast_init_3rdtx(struct drm_device *dev);
+void ast_release_firmware(struct drm_device *dev);
#endif
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 4ad4acd0ccab..0cd827e11fa2 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -231,7 +231,6 @@ static int astfb_create(struct drm_fb_helper *helper,
strcpy(info->fix.id, "astdrmfb");
- info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &astfb_ops;
info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
@@ -255,27 +254,7 @@ out:
return ret;
}
-static void ast_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
- struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
- ast_crtc->lut_r[regno] = red >> 8;
- ast_crtc->lut_g[regno] = green >> 8;
- ast_crtc->lut_b[regno] = blue >> 8;
-}
-
-static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno)
-{
- struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
- *red = ast_crtc->lut_r[regno] << 8;
- *green = ast_crtc->lut_g[regno] << 8;
- *blue = ast_crtc->lut_b[regno] << 8;
-}
-
static const struct drm_fb_helper_funcs ast_fb_helper_funcs = {
- .gamma_set = ast_fb_gamma_set,
- .gamma_get = ast_fb_gamma_get,
.fb_probe = astfb_create,
};
@@ -287,7 +266,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_unregister_fbi(&afbdev->helper);
if (afb->obj) {
- drm_gem_object_unreference_unlocked(afb->obj);
+ drm_gem_object_put_unlocked(afb->obj);
afb->obj = NULL;
}
drm_fb_helper_fini(&afbdev->helper);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 262c2c0e43b4..dac355812adc 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -387,9 +387,9 @@ static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
- drm_gem_object_unreference_unlocked(ast_fb->obj);
+ drm_gem_object_put_unlocked(ast_fb->obj);
drm_framebuffer_cleanup(fb);
- kfree(fb);
+ kfree(ast_fb);
}
static const struct drm_framebuffer_funcs ast_fb_funcs = {
@@ -429,13 +429,13 @@ ast_user_framebuffer_create(struct drm_device *dev,
ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL);
if (!ast_fb) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj);
if (ret) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
kfree(ast_fb);
return ERR_PTR(ret);
}
@@ -576,6 +576,7 @@ void ast_driver_unload(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
+ ast_release_firmware(dev);
kfree(ast->dp501_fw_addr);
ast_mode_fini(dev);
ast_fbdev_fini(dev);
@@ -627,7 +628,7 @@ int ast_dumb_create(struct drm_file *file,
return ret;
ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret)
return ret;
@@ -675,7 +676,7 @@ ast_dumb_mmap_offset(struct drm_file *file,
bo = gem_to_ast_bo(obj);
*offset = ast_bo_mmap_offset(bo);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index aaef0a652f10..6f3849ec0c1d 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -63,15 +63,18 @@ static inline void ast_load_palette_index(struct ast_private *ast,
static void ast_crtc_load_lut(struct drm_crtc *crtc)
{
struct ast_private *ast = crtc->dev->dev_private;
- struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ u16 *r, *g, *b;
int i;
if (!crtc->enabled)
return;
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+
for (i = 0; i < 256; i++)
- ast_load_palette_index(ast, i, ast_crtc->lut_r[i],
- ast_crtc->lut_g[i], ast_crtc->lut_b[i]);
+ ast_load_palette_index(ast, i, *r++ >> 8, *g++ >> 8, *b++ >> 8);
}
static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mode *mode,
@@ -613,7 +616,23 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
static void ast_crtc_disable(struct drm_crtc *crtc)
{
+ int ret;
+ DRM_DEBUG_KMS("\n");
+ ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->primary->fb) {
+ struct ast_framebuffer *ast_fb = to_ast_framebuffer(crtc->primary->fb);
+ struct drm_gem_object *obj = ast_fb->obj;
+ struct ast_bo *bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ return;
+
+ ast_bo_push_sysram(bo);
+ ast_bo_unreserve(bo);
+ }
+ crtc->primary->fb = NULL;
}
static void ast_crtc_prepare(struct drm_crtc *crtc)
@@ -633,7 +652,6 @@ static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.mode_set = ast_crtc_mode_set,
.mode_set_base = ast_crtc_mode_set_base,
.disable = ast_crtc_disable,
- .load_lut = ast_crtc_load_lut,
.prepare = ast_crtc_prepare,
.commit = ast_crtc_commit,
@@ -648,15 +666,6 @@ static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- ast_crtc->lut_r[i] = red[i] >> 8;
- ast_crtc->lut_g[i] = green[i] >> 8;
- ast_crtc->lut_b[i] = blue[i] >> 8;
- }
ast_crtc_load_lut(crtc);
return 0;
@@ -681,7 +690,6 @@ static const struct drm_crtc_funcs ast_crtc_funcs = {
static int ast_crtc_init(struct drm_device *dev)
{
struct ast_crtc *crtc;
- int i;
crtc = kzalloc(sizeof(struct ast_crtc), GFP_KERNEL);
if (!crtc)
@@ -690,12 +698,6 @@ static int ast_crtc_init(struct drm_device *dev)
drm_crtc_init(dev, &crtc->base, &ast_crtc_funcs);
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
drm_crtc_helper_add(&crtc->base, &ast_crtc_helper_funcs);
-
- for (i = 0; i < 256; i++) {
- crtc->lut_r[i] = i;
- crtc->lut_g[i] = i;
- crtc->lut_b[i] = i;
- }
return 0;
}
@@ -948,7 +950,7 @@ static void ast_cursor_fini(struct drm_device *dev)
{
struct ast_private *ast = dev->dev_private;
ttm_bo_kunmap(&ast->cache_kmap);
- drm_gem_object_unreference_unlocked(ast->cursor_cache);
+ drm_gem_object_put_unlocked(ast->cursor_cache);
}
int ast_mode_init(struct drm_device *dev)
@@ -1213,10 +1215,10 @@ static int ast_cursor_set(struct drm_crtc *crtc,
ast_show_cursor(crtc);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
fail:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 58084985e6cf..696a15dc2f3f 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -323,10 +323,8 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
return -ENOMEM;
ret = drm_gem_object_init(dev, &astbo->gem, size);
- if (ret) {
- kfree(astbo);
- return ret;
- }
+ if (ret)
+ goto error;
astbo->bo.bdev = &ast->ttm.bdev;
@@ -340,10 +338,13 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
align >> PAGE_SHIFT, false, NULL, acc_size,
NULL, NULL, ast_bo_ttm_destroy);
if (ret)
- return ret;
+ goto error;
*pastbo = astbo;
return 0;
+error:
+ kfree(astbo);
+ return ret;
}
static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
@@ -376,7 +377,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
int ast_bo_unpin(struct ast_bo *bo)
{
- int i, ret;
+ int i;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
return 0;
@@ -387,11 +388,7 @@ int ast_bo_unpin(struct ast_bo *bo)
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
- if (ret)
- return ret;
-
- return 0;
+ return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
}
int ast_bo_push_sysram(struct ast_bo *bo)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 53489859997b..d73281095fac 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -149,7 +149,8 @@ atmel_hlcdc_crtc_mode_valid(struct drm_crtc *c,
return atmel_hlcdc_dc_mode_valid(crtc->dc, mode);
}
-static void atmel_hlcdc_crtc_disable(struct drm_crtc *c)
+static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c,
+ struct drm_crtc_state *old_state)
{
struct drm_device *dev = c->dev;
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
@@ -183,7 +184,8 @@ static void atmel_hlcdc_crtc_disable(struct drm_crtc *c)
pm_runtime_put_sync(dev->dev);
}
-static void atmel_hlcdc_crtc_enable(struct drm_crtc *c)
+static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
+ struct drm_crtc_state *old_state)
{
struct drm_device *dev = c->dev;
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
@@ -235,7 +237,7 @@ static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state)
crtc = drm_crtc_to_atmel_hlcdc_crtc(state->crtc);
- for_each_connector_in_state(state->state, connector, cstate, i) {
+ for_each_new_connector_in_state(state->state, connector, cstate, i) {
struct drm_display_info *info = &connector->display_info;
unsigned int supported_fmts = 0;
int j;
@@ -319,11 +321,11 @@ static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
.mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb,
.mode_set_base = drm_helper_crtc_mode_set_base,
- .disable = atmel_hlcdc_crtc_disable,
- .enable = atmel_hlcdc_crtc_enable,
.atomic_check = atmel_hlcdc_crtc_atomic_check,
.atomic_begin = atmel_hlcdc_crtc_atomic_begin,
.atomic_flush = atmel_hlcdc_crtc_atomic_flush,
+ .atomic_enable = atmel_hlcdc_crtc_atomic_enable,
+ .atomic_disable = atmel_hlcdc_crtc_atomic_disable,
};
static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c)
@@ -429,6 +431,7 @@ static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = {
.atomic_destroy_state = atmel_hlcdc_crtc_destroy_state,
.enable_vblank = atmel_hlcdc_crtc_enable_vblank,
.disable_vblank = atmel_hlcdc_crtc_disable_vblank,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
};
int atmel_hlcdc_crtc_create(struct drm_device *dev)
@@ -484,6 +487,10 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
drm_crtc_vblank_reset(&crtc->base);
+ drm_mode_crtc_set_gamma_size(&crtc->base, ATMEL_HLCDC_CLUT_SIZE);
+ drm_crtc_enable_color_mgmt(&crtc->base, 0, false,
+ ATMEL_HLCDC_CLUT_SIZE);
+
dc->crtc = &crtc->base;
return 0;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 30dbffdb45a3..74d66e11f688 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -42,6 +42,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9n12_layers[] = {
.default_color = 3,
.general_config = 4,
},
+ .clut_offset = 0x400,
},
};
@@ -73,6 +74,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = {
.disc_pos = 5,
.disc_size = 6,
},
+ .clut_offset = 0x400,
},
{
.name = "overlay1",
@@ -91,6 +93,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = {
.chroma_key_mask = 8,
.general_config = 9,
},
+ .clut_offset = 0x800,
},
{
.name = "high-end-overlay",
@@ -112,6 +115,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = {
.scaler_config = 13,
.csc = 14,
},
+ .clut_offset = 0x1000,
},
{
.name = "cursor",
@@ -131,6 +135,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = {
.chroma_key_mask = 8,
.general_config = 9,
},
+ .clut_offset = 0x1400,
},
};
@@ -162,6 +167,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
.disc_pos = 5,
.disc_size = 6,
},
+ .clut_offset = 0x600,
},
{
.name = "overlay1",
@@ -180,6 +186,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
.chroma_key_mask = 8,
.general_config = 9,
},
+ .clut_offset = 0xa00,
},
{
.name = "overlay2",
@@ -198,6 +205,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
.chroma_key_mask = 8,
.general_config = 9,
},
+ .clut_offset = 0xe00,
},
{
.name = "high-end-overlay",
@@ -223,6 +231,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
},
.csc = 14,
},
+ .clut_offset = 0x1200,
},
{
.name = "cursor",
@@ -244,6 +253,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
.general_config = 9,
.scaler_config = 13,
},
+ .clut_offset = 0x1600,
},
};
@@ -275,6 +285,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = {
.disc_pos = 5,
.disc_size = 6,
},
+ .clut_offset = 0x600,
},
{
.name = "overlay1",
@@ -293,6 +304,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = {
.chroma_key_mask = 8,
.general_config = 9,
},
+ .clut_offset = 0xa00,
},
{
.name = "overlay2",
@@ -311,6 +323,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = {
.chroma_key_mask = 8,
.general_config = 9,
},
+ .clut_offset = 0xe00,
},
{
.name = "high-end-overlay",
@@ -336,6 +349,7 @@ static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = {
},
.csc = 14,
},
+ .clut_offset = 0x1200,
},
};
@@ -451,8 +465,7 @@ static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
- if (dc->fbdev)
- drm_fbdev_cma_hotplug_event(dc->fbdev);
+ drm_fbdev_cma_hotplug_event(dc->fbdev);
}
struct atmel_hlcdc_dc_commit {
@@ -526,14 +539,13 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
dc->commit.pending = true;
spin_unlock(&dc->commit.wait.lock);
- if (ret) {
- kfree(commit);
- goto error;
- }
+ if (ret)
+ goto err_free;
- /* Swap the state, this is the point of no return. */
- drm_atomic_helper_swap_state(state, true);
+ /* We have our own synchronization through the commit lock. */
+ BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
+ /* Swap state succeeded, this is the point of no return. */
drm_atomic_state_get(state);
if (async)
queue_work(dc->wq, &commit->work);
@@ -542,6 +554,8 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
return 0;
+err_free:
+ kfree(commit);
error:
drm_atomic_helper_cleanup_planes(dev, state);
return ret;
@@ -747,8 +761,6 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.fops = &fops,
.name = "atmel-hlcdc",
.desc = "Atmel HLCD Controller DRM",
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index b0596a84c1b8..4237b0446721 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -88,6 +88,11 @@
#define ATMEL_HLCDC_YUV422SWP BIT(17)
#define ATMEL_HLCDC_DSCALEOPT BIT(20)
+#define ATMEL_HLCDC_C1_MODE ATMEL_HLCDC_CLUT_MODE(0)
+#define ATMEL_HLCDC_C2_MODE ATMEL_HLCDC_CLUT_MODE(1)
+#define ATMEL_HLCDC_C4_MODE ATMEL_HLCDC_CLUT_MODE(2)
+#define ATMEL_HLCDC_C8_MODE ATMEL_HLCDC_CLUT_MODE(3)
+
#define ATMEL_HLCDC_XRGB4444_MODE ATMEL_HLCDC_RGB_MODE(0)
#define ATMEL_HLCDC_ARGB4444_MODE ATMEL_HLCDC_RGB_MODE(1)
#define ATMEL_HLCDC_RGBA4444_MODE ATMEL_HLCDC_RGB_MODE(2)
@@ -142,6 +147,8 @@
#define ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE BIT(2)
#define ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN BIT(3)
+#define ATMEL_HLCDC_CLUT_SIZE 256
+
#define ATMEL_HLCDC_MAX_LAYERS 6
/**
@@ -259,6 +266,7 @@ struct atmel_hlcdc_layer_desc {
int id;
int regs_offset;
int cfgs_offset;
+ int clut_offset;
struct atmel_hlcdc_formats *formats;
struct atmel_hlcdc_layer_cfg_layout layout;
int max_width;
@@ -414,6 +422,14 @@ static inline u32 atmel_hlcdc_layer_read_cfg(struct atmel_hlcdc_layer *layer,
(cfgid * sizeof(u32)));
}
+static inline void atmel_hlcdc_layer_write_clut(struct atmel_hlcdc_layer *layer,
+ unsigned int c, u32 val)
+{
+ regmap_write(layer->regmap,
+ layer->desc->clut_offset + c * sizeof(u32),
+ val);
+}
+
static inline void atmel_hlcdc_layer_init(struct atmel_hlcdc_layer *layer,
const struct atmel_hlcdc_layer_desc *desc,
struct regmap *regmap)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 1124200bb280..703c2d13603f 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -83,6 +83,7 @@ drm_plane_state_to_atmel_hlcdc_plane_state(struct drm_plane_state *s)
#define SUBPIXEL_MASK 0xffff
static uint32_t rgb_formats[] = {
+ DRM_FORMAT_C8,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_RGBA4444,
@@ -100,6 +101,7 @@ struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_formats = {
};
static uint32_t rgb_and_yuv_formats[] = {
+ DRM_FORMAT_C8,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_RGBA4444,
@@ -128,6 +130,9 @@ struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_and_yuv_formats = {
static int atmel_hlcdc_format_to_plane_mode(u32 format, u32 *mode)
{
switch (format) {
+ case DRM_FORMAT_C8:
+ *mode = ATMEL_HLCDC_C8_MODE;
+ break;
case DRM_FORMAT_XRGB4444:
*mode = ATMEL_HLCDC_XRGB4444_MODE;
break;
@@ -424,6 +429,29 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_FORMAT_CFG, cfg);
}
+static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane)
+{
+ struct drm_crtc *crtc = plane->base.crtc;
+ struct drm_color_lut *lut;
+ int idx;
+
+ if (!crtc || !crtc->state)
+ return;
+
+ if (!crtc->state->color_mgmt_changed || !crtc->state->gamma_lut)
+ return;
+
+ lut = (struct drm_color_lut *)crtc->state->gamma_lut->data;
+
+ for (idx = 0; idx < ATMEL_HLCDC_CLUT_SIZE; idx++, lut++) {
+ u32 val = ((lut->red << 8) & 0xff0000) |
+ (lut->green & 0xff00) |
+ (lut->blue >> 8);
+
+ atmel_hlcdc_layer_write_clut(&plane->layer, idx, val);
+ }
+}
+
static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state)
{
@@ -768,6 +796,7 @@ static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
atmel_hlcdc_plane_update_pos_and_size(plane, state);
atmel_hlcdc_plane_update_general_settings(plane, state);
atmel_hlcdc_plane_update_format(plane, state);
+ atmel_hlcdc_plane_update_clut(plane);
atmel_hlcdc_plane_update_buffers(plane, state);
atmel_hlcdc_plane_update_disc_area(plane, state);
@@ -809,7 +838,7 @@ static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
if (plane->base.fb)
- drm_framebuffer_unreference(plane->base.fb);
+ drm_framebuffer_put(plane->base.fb);
drm_plane_cleanup(p);
}
@@ -911,7 +940,7 @@ void atmel_hlcdc_plane_irq(struct atmel_hlcdc_plane *plane)
desc->name);
}
-static struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
+static const struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
.atomic_check = atmel_hlcdc_plane_atomic_check,
.atomic_update = atmel_hlcdc_plane_atomic_update,
.atomic_disable = atmel_hlcdc_plane_atomic_disable,
@@ -958,7 +987,7 @@ static void atmel_hlcdc_plane_reset(struct drm_plane *p)
state = drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
if (state->base.fb)
- drm_framebuffer_unreference(state->base.fb);
+ drm_framebuffer_put(state->base.fb);
kfree(state);
p->state = NULL;
@@ -996,7 +1025,7 @@ atmel_hlcdc_plane_atomic_duplicate_state(struct drm_plane *p)
}
if (copy->base.fb)
- drm_framebuffer_reference(copy->base.fb);
+ drm_framebuffer_get(copy->base.fb);
return &copy->base;
}
@@ -1015,15 +1044,14 @@ static void atmel_hlcdc_plane_atomic_destroy_state(struct drm_plane *p,
}
if (s->fb)
- drm_framebuffer_unreference(s->fb);
+ drm_framebuffer_put(s->fb);
kfree(state);
}
-static struct drm_plane_funcs layer_plane_funcs = {
+static const struct drm_plane_funcs layer_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .set_property = drm_atomic_helper_plane_set_property,
.destroy = atmel_hlcdc_plane_destroy,
.reset = atmel_hlcdc_plane_reset,
.atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state,
@@ -1058,7 +1086,8 @@ static int atmel_hlcdc_plane_create(struct drm_device *dev,
ret = drm_universal_plane_init(dev, &plane->base, 0,
&layer_plane_funcs,
desc->formats->formats,
- desc->formats->nformats, type, NULL);
+ desc->formats->nformats,
+ NULL, type, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index aa342515ddf4..7b20318483e4 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -84,7 +84,6 @@ static struct drm_driver bochs_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
.load = bochs_load,
.unload = bochs_unload,
- .set_busid = drm_pci_set_busid,
.fops = &bochs_fops,
.name = "bochs-drm",
.desc = "bochs dispi vga interface (qemu stdvga)",
@@ -94,7 +93,6 @@ static struct drm_driver bochs_driver = {
.gem_free_object_unlocked = bochs_gem_free_object,
.dumb_create = bochs_dumb_create,
.dumb_map_offset = bochs_dumb_mmap_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
};
/* ---------------------------------------------------------------------- */
@@ -224,12 +222,12 @@ static int __init bochs_init(void)
if (bochs_modeset == 0)
return -EINVAL;
- return drm_pci_init(&bochs_driver, &bochs_pci_driver);
+ return pci_register_driver(&bochs_pci_driver);
}
static void __exit bochs_exit(void)
{
- drm_pci_exit(&bochs_driver, &bochs_pci_driver);
+ pci_unregister_driver(&bochs_pci_driver);
}
module_init(bochs_init);
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index c38deffa14de..14eb8d0d5a00 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -23,9 +23,9 @@ static int bochsfb_mmap(struct fb_info *info,
static struct fb_ops bochsfb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
- .fb_fillrect = drm_fb_helper_sys_fillrect,
- .fb_copyarea = drm_fb_helper_sys_copyarea,
- .fb_imageblit = drm_fb_helper_sys_imageblit,
+ .fb_fillrect = drm_fb_helper_cfb_fillrect,
+ .fb_copyarea = drm_fb_helper_cfb_copyarea,
+ .fb_imageblit = drm_fb_helper_cfb_imageblit,
.fb_mmap = bochsfb_mmap,
};
@@ -118,7 +118,6 @@ static int bochsfb_create(struct drm_fb_helper *helper,
strcpy(info->fix.id, "bochsdrmfb");
- info->flags = FBINFO_DEFAULT;
info->fbops = &bochsfb_ops;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index f75ab6278113..b2431aee7887 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -785,8 +785,7 @@ adv7511_connector_detect(struct drm_connector *connector, bool force)
return adv7511_detect(adv, connector);
}
-static struct drm_connector_funcs adv7511_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
+static const struct drm_connector_funcs adv7511_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = adv7511_connector_detect,
.destroy = drm_connector_cleanup,
@@ -857,7 +856,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
return ret;
}
-static struct drm_bridge_funcs adv7511_bridge_funcs = {
+static const struct drm_bridge_funcs adv7511_bridge_funcs = {
.enable = adv7511_bridge_enable,
.disable = adv7511_bridge_disable,
.mode_set = adv7511_bridge_mode_set,
@@ -1126,11 +1125,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511->bridge.funcs = &adv7511_bridge_funcs;
adv7511->bridge.of_node = dev->of_node;
- ret = drm_bridge_add(&adv7511->bridge);
- if (ret) {
- dev_err(dev, "failed to add adv7511 bridge\n");
- goto err_unregister_cec;
- }
+ drm_bridge_add(&adv7511->bridge);
adv7511_audio_init(dev, adv7511);
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index 9006578b9789..9385eb0b1ee4 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -1002,7 +1002,6 @@ static enum drm_connector_status anx78xx_detect(struct drm_connector *connector,
}
static const struct drm_connector_funcs anx78xx_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = anx78xx_detect,
.destroy = drm_connector_cleanup,
@@ -1097,7 +1096,8 @@ static void anx78xx_bridge_mode_set(struct drm_bridge *bridge,
mutex_lock(&anx78xx->lock);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, adjusted_mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, adjusted_mode,
+ false);
if (err) {
DRM_ERROR("Failed to setup AVI infoframe: %d\n", err);
goto unlock;
@@ -1438,11 +1438,7 @@ static int anx78xx_i2c_probe(struct i2c_client *client,
anx78xx->bridge.funcs = &anx78xx_bridge_funcs;
- err = drm_bridge_add(&anx78xx->bridge);
- if (err < 0) {
- DRM_ERROR("Failed to add drm bridge: %d\n", err);
- goto err_poweroff;
- }
+ drm_bridge_add(&anx78xx->bridge);
/* If cable is pulled out, just poweroff and wait for HPD event */
if (!gpiod_get_value(anx78xx->pdata.gpiod_hpd))
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 4c758ed51939..5dd3f1cd074a 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1005,7 +1005,6 @@ analogix_dp_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs analogix_dp_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = analogix_dp_detect,
.destroy = drm_connector_cleanup,
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 831a606c4706..de5e7dee7ad6 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -92,7 +92,6 @@ dumb_vga_connector_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs dumb_vga_con_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = dumb_vga_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
@@ -177,7 +176,6 @@ static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev)
static int dumb_vga_probe(struct platform_device *pdev)
{
struct dumb_vga *vga;
- int ret;
vga = devm_kzalloc(&pdev->dev, sizeof(*vga), GFP_KERNEL);
if (!vga)
@@ -186,7 +184,7 @@ static int dumb_vga_probe(struct platform_device *pdev)
vga->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
if (IS_ERR(vga->vdd)) {
- ret = PTR_ERR(vga->vdd);
+ int ret = PTR_ERR(vga->vdd);
if (ret == -EPROBE_DEFER)
return -EPROBE_DEFER;
vga->vdd = NULL;
@@ -207,11 +205,9 @@ static int dumb_vga_probe(struct platform_device *pdev)
vga->bridge.funcs = &dumb_vga_bridge_funcs;
vga->bridge.of_node = pdev->dev.of_node;
- ret = drm_bridge_add(&vga->bridge);
- if (ret && !IS_ERR(vga->ddc))
- i2c_put_adapter(vga->ddc);
+ drm_bridge_add(&vga->bridge);
- return ret;
+ return 0;
}
static int dumb_vga_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 11f11086a68f..7ccadba7c98c 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -193,7 +193,6 @@ static enum drm_connector_status ge_b850v3_lvds_detect(
}
static const struct drm_connector_funcs ge_b850v3_lvds_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = ge_b850v3_lvds_detect,
.destroy = drm_connector_cleanup,
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 4f64e717e01b..d64a3283822a 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -238,7 +238,6 @@ static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs =
};
static const struct drm_connector_funcs ptn3460_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
@@ -332,11 +331,7 @@ static int ptn3460_probe(struct i2c_client *client,
ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs;
ptn_bridge->bridge.of_node = dev->of_node;
- ret = drm_bridge_add(&ptn_bridge->bridge);
- if (ret) {
- DRM_ERROR("Failed to add bridge\n");
- return ret;
- }
+ drm_bridge_add(&ptn_bridge->bridge);
i2c_set_clientdata(client, ptn_bridge);
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 67fe19e5a9c6..e0cca19b4044 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -50,7 +50,6 @@ panel_bridge_connector_helper_funcs = {
};
static const struct drm_connector_funcs panel_bridge_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
@@ -158,7 +157,6 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
u32 connector_type)
{
struct panel_bridge *panel_bridge;
- int ret;
if (!panel)
return ERR_PTR(-EINVAL);
@@ -176,9 +174,7 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
panel_bridge->bridge.of_node = panel->dev->of_node;
#endif
- ret = drm_bridge_add(&panel_bridge->bridge);
- if (ret)
- return ERR_PTR(ret);
+ drm_bridge_add(&panel_bridge->bridge);
return &panel_bridge->bridge;
}
@@ -198,3 +194,33 @@ void drm_panel_bridge_remove(struct drm_bridge *bridge)
devm_kfree(panel_bridge->panel->dev, bridge);
}
EXPORT_SYMBOL(drm_panel_bridge_remove);
+
+static void devm_drm_panel_bridge_release(struct device *dev, void *res)
+{
+ struct drm_bridge **bridge = res;
+
+ drm_panel_bridge_remove(*bridge);
+}
+
+struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
+ struct drm_panel *panel,
+ u32 connector_type)
+{
+ struct drm_bridge **ptr, *bridge;
+
+ ptr = devres_alloc(devm_drm_panel_bridge_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ bridge = drm_panel_bridge_add(panel, connector_type);
+ if (!IS_ERR(bridge)) {
+ *ptr = bridge;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return bridge;
+}
+EXPORT_SYMBOL(devm_drm_panel_bridge_add);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 6f22f9fec9bf..81198f5e9afa 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -476,7 +476,6 @@ static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
};
static const struct drm_connector_funcs ps8622_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
@@ -598,11 +597,7 @@ static int ps8622_probe(struct i2c_client *client,
ps8622->bridge.funcs = &ps8622_bridge_funcs;
ps8622->bridge.of_node = dev->of_node;
- ret = drm_bridge_add(&ps8622->bridge);
- if (ret) {
- DRM_ERROR("Failed to add bridge\n");
- return ret;
- }
+ drm_bridge_add(&ps8622->bridge);
i2c_set_clientdata(client, ps8622);
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 9b87067c022c..b1ab4ab09532 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -124,7 +124,6 @@ sii902x_connector_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs sii902x_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = sii902x_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
@@ -269,7 +268,7 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
if (ret)
return;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj, false);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
@@ -418,11 +417,7 @@ static int sii902x_probe(struct i2c_client *client,
sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
- ret = drm_bridge_add(&sii902x->bridge);
- if (ret) {
- dev_err(dev, "Failed to add drm_bridge\n");
- return ret;
- }
+ drm_bridge_add(&sii902x->bridge);
i2c_set_clientdata(client, sii902x);
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 2d51a2269fc6..5131bfb94f06 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -597,9 +597,9 @@ static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap)
static void sii8620_mt_read_devcap_reg_recv(struct sii8620 *ctx,
struct sii8620_mt_msg *msg)
{
- u8 reg = msg->reg[0] & 0x7f;
+ u8 reg = msg->reg[1] & 0x7f;
- if (msg->reg[0] & 0x80)
+ if (msg->reg[1] & 0x80)
ctx->xdevcap[reg] = msg->ret;
else
ctx->devcap[reg] = msg->ret;
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index 53e78d092d18..3cc53b44186e 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -2,6 +2,7 @@ config DRM_DW_HDMI
tristate
select DRM_KMS_HELPER
select REGMAP_MMIO
+ select CEC_CORE if CEC_NOTIFIER
config DRM_DW_HDMI_AHB_AUDIO
tristate "Synopsys Designware AHB Audio interface"
@@ -22,3 +23,18 @@ config DRM_DW_HDMI_I2S_AUDIO
help
Support the I2S Audio interface which is part of the Synopsys
Designware HDMI block.
+
+config DRM_DW_HDMI_CEC
+ tristate "Synopsis Designware CEC interface"
+ depends on DRM_DW_HDMI
+ select CEC_CORE
+ select CEC_NOTIFIER
+ help
+ Support the CE interface which is part of the Synopsys
+ Designware HDMI block.
+
+config DRM_DW_MIPI_DSI
+ tristate
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile
index 17aa7a65b57e..5dad97d920be 100644
--- a/drivers/gpu/drm/bridge/synopsys/Makefile
+++ b/drivers/gpu/drm/bridge/synopsys/Makefile
@@ -3,3 +3,6 @@
obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o
+obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o
+
+obj-$(CONFIG_DRM_DW_MIPI_DSI) += dw-mipi-dsi.o
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index 8f2d1379c880..cf3f0caf9c63 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -517,7 +517,7 @@ static snd_pcm_uframes_t dw_hdmi_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(runtime, dw->buf_offset);
}
-static struct snd_pcm_ops snd_dw_hdmi_ops = {
+static const struct snd_pcm_ops snd_dw_hdmi_ops = {
.open = dw_hdmi_open,
.close = dw_hdmi_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
new file mode 100644
index 000000000000..6c323510f128
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
@@ -0,0 +1,327 @@
+/*
+ * Designware HDMI CEC driver
+ *
+ * Copyright (C) 2015-2017 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <drm/drm_edid.h>
+
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+
+#include "dw-hdmi-cec.h"
+
+enum {
+ HDMI_IH_CEC_STAT0 = 0x0106,
+ HDMI_IH_MUTE_CEC_STAT0 = 0x0186,
+
+ HDMI_CEC_CTRL = 0x7d00,
+ CEC_CTRL_START = BIT(0),
+ CEC_CTRL_FRAME_TYP = 3 << 1,
+ CEC_CTRL_RETRY = 0 << 1,
+ CEC_CTRL_NORMAL = 1 << 1,
+ CEC_CTRL_IMMED = 2 << 1,
+
+ HDMI_CEC_STAT = 0x7d01,
+ CEC_STAT_DONE = BIT(0),
+ CEC_STAT_EOM = BIT(1),
+ CEC_STAT_NACK = BIT(2),
+ CEC_STAT_ARBLOST = BIT(3),
+ CEC_STAT_ERROR_INIT = BIT(4),
+ CEC_STAT_ERROR_FOLL = BIT(5),
+ CEC_STAT_WAKEUP = BIT(6),
+
+ HDMI_CEC_MASK = 0x7d02,
+ HDMI_CEC_POLARITY = 0x7d03,
+ HDMI_CEC_INT = 0x7d04,
+ HDMI_CEC_ADDR_L = 0x7d05,
+ HDMI_CEC_ADDR_H = 0x7d06,
+ HDMI_CEC_TX_CNT = 0x7d07,
+ HDMI_CEC_RX_CNT = 0x7d08,
+ HDMI_CEC_TX_DATA0 = 0x7d10,
+ HDMI_CEC_RX_DATA0 = 0x7d20,
+ HDMI_CEC_LOCK = 0x7d30,
+ HDMI_CEC_WKUPCTRL = 0x7d31,
+};
+
+struct dw_hdmi_cec {
+ struct dw_hdmi *hdmi;
+ const struct dw_hdmi_cec_ops *ops;
+ u32 addresses;
+ struct cec_adapter *adap;
+ struct cec_msg rx_msg;
+ unsigned int tx_status;
+ bool tx_done;
+ bool rx_done;
+ struct cec_notifier *notify;
+ int irq;
+};
+
+static void dw_hdmi_write(struct dw_hdmi_cec *cec, u8 val, int offset)
+{
+ cec->ops->write(cec->hdmi, val, offset);
+}
+
+static u8 dw_hdmi_read(struct dw_hdmi_cec *cec, int offset)
+{
+ return cec->ops->read(cec->hdmi, offset);
+}
+
+static int dw_hdmi_cec_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct dw_hdmi_cec *cec = cec_get_drvdata(adap);
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ cec->addresses = 0;
+ else
+ cec->addresses |= BIT(logical_addr) | BIT(15);
+
+ dw_hdmi_write(cec, cec->addresses & 255, HDMI_CEC_ADDR_L);
+ dw_hdmi_write(cec, cec->addresses >> 8, HDMI_CEC_ADDR_H);
+
+ return 0;
+}
+
+static int dw_hdmi_cec_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct dw_hdmi_cec *cec = cec_get_drvdata(adap);
+ unsigned int i, ctrl;
+
+ switch (signal_free_time) {
+ case CEC_SIGNAL_FREE_TIME_RETRY:
+ ctrl = CEC_CTRL_RETRY;
+ break;
+ case CEC_SIGNAL_FREE_TIME_NEW_INITIATOR:
+ default:
+ ctrl = CEC_CTRL_NORMAL;
+ break;
+ case CEC_SIGNAL_FREE_TIME_NEXT_XFER:
+ ctrl = CEC_CTRL_IMMED;
+ break;
+ }
+
+ for (i = 0; i < msg->len; i++)
+ dw_hdmi_write(cec, msg->msg[i], HDMI_CEC_TX_DATA0 + i);
+
+ dw_hdmi_write(cec, msg->len, HDMI_CEC_TX_CNT);
+ dw_hdmi_write(cec, ctrl | CEC_CTRL_START, HDMI_CEC_CTRL);
+
+ return 0;
+}
+
+static irqreturn_t dw_hdmi_cec_hardirq(int irq, void *data)
+{
+ struct cec_adapter *adap = data;
+ struct dw_hdmi_cec *cec = cec_get_drvdata(adap);
+ unsigned int stat = dw_hdmi_read(cec, HDMI_IH_CEC_STAT0);
+ irqreturn_t ret = IRQ_HANDLED;
+
+ if (stat == 0)
+ return IRQ_NONE;
+
+ dw_hdmi_write(cec, stat, HDMI_IH_CEC_STAT0);
+
+ if (stat & CEC_STAT_ERROR_INIT) {
+ cec->tx_status = CEC_TX_STATUS_ERROR;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CEC_STAT_DONE) {
+ cec->tx_status = CEC_TX_STATUS_OK;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CEC_STAT_NACK) {
+ cec->tx_status = CEC_TX_STATUS_NACK;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ if (stat & CEC_STAT_EOM) {
+ unsigned int len, i;
+
+ len = dw_hdmi_read(cec, HDMI_CEC_RX_CNT);
+ if (len > sizeof(cec->rx_msg.msg))
+ len = sizeof(cec->rx_msg.msg);
+
+ for (i = 0; i < len; i++)
+ cec->rx_msg.msg[i] =
+ dw_hdmi_read(cec, HDMI_CEC_RX_DATA0 + i);
+
+ dw_hdmi_write(cec, 0, HDMI_CEC_LOCK);
+
+ cec->rx_msg.len = len;
+ smp_wmb();
+ cec->rx_done = true;
+
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+
+static irqreturn_t dw_hdmi_cec_thread(int irq, void *data)
+{
+ struct cec_adapter *adap = data;
+ struct dw_hdmi_cec *cec = cec_get_drvdata(adap);
+
+ if (cec->tx_done) {
+ cec->tx_done = false;
+ cec_transmit_attempt_done(adap, cec->tx_status);
+ }
+ if (cec->rx_done) {
+ cec->rx_done = false;
+ smp_rmb();
+ cec_received_msg(adap, &cec->rx_msg);
+ }
+ return IRQ_HANDLED;
+}
+
+static int dw_hdmi_cec_enable(struct cec_adapter *adap, bool enable)
+{
+ struct dw_hdmi_cec *cec = cec_get_drvdata(adap);
+
+ if (!enable) {
+ dw_hdmi_write(cec, ~0, HDMI_CEC_MASK);
+ dw_hdmi_write(cec, ~0, HDMI_IH_MUTE_CEC_STAT0);
+ dw_hdmi_write(cec, 0, HDMI_CEC_POLARITY);
+
+ cec->ops->disable(cec->hdmi);
+ } else {
+ unsigned int irqs;
+
+ dw_hdmi_write(cec, 0, HDMI_CEC_CTRL);
+ dw_hdmi_write(cec, ~0, HDMI_IH_CEC_STAT0);
+ dw_hdmi_write(cec, 0, HDMI_CEC_LOCK);
+
+ dw_hdmi_cec_log_addr(cec->adap, CEC_LOG_ADDR_INVALID);
+
+ cec->ops->enable(cec->hdmi);
+
+ irqs = CEC_STAT_ERROR_INIT | CEC_STAT_NACK | CEC_STAT_EOM |
+ CEC_STAT_DONE;
+ dw_hdmi_write(cec, irqs, HDMI_CEC_POLARITY);
+ dw_hdmi_write(cec, ~irqs, HDMI_CEC_MASK);
+ dw_hdmi_write(cec, ~irqs, HDMI_IH_MUTE_CEC_STAT0);
+ }
+ return 0;
+}
+
+static const struct cec_adap_ops dw_hdmi_cec_ops = {
+ .adap_enable = dw_hdmi_cec_enable,
+ .adap_log_addr = dw_hdmi_cec_log_addr,
+ .adap_transmit = dw_hdmi_cec_transmit,
+};
+
+static void dw_hdmi_cec_del(void *data)
+{
+ struct dw_hdmi_cec *cec = data;
+
+ cec_delete_adapter(cec->adap);
+}
+
+static int dw_hdmi_cec_probe(struct platform_device *pdev)
+{
+ struct dw_hdmi_cec_data *data = dev_get_platdata(&pdev->dev);
+ struct dw_hdmi_cec *cec;
+ int ret;
+
+ if (!data)
+ return -ENXIO;
+
+ /*
+ * Our device is just a convenience - we want to link to the real
+ * hardware device here, so that userspace can see the association
+ * between the HDMI hardware and its associated CEC chardev.
+ */
+ cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return -ENOMEM;
+
+ cec->irq = data->irq;
+ cec->ops = data->ops;
+ cec->hdmi = data->hdmi;
+
+ platform_set_drvdata(pdev, cec);
+
+ dw_hdmi_write(cec, 0, HDMI_CEC_TX_CNT);
+ dw_hdmi_write(cec, ~0, HDMI_CEC_MASK);
+ dw_hdmi_write(cec, ~0, HDMI_IH_MUTE_CEC_STAT0);
+ dw_hdmi_write(cec, 0, HDMI_CEC_POLARITY);
+
+ cec->adap = cec_allocate_adapter(&dw_hdmi_cec_ops, cec, "dw_hdmi",
+ CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT |
+ CEC_CAP_RC | CEC_CAP_PASSTHROUGH,
+ CEC_MAX_LOG_ADDRS);
+ if (IS_ERR(cec->adap))
+ return PTR_ERR(cec->adap);
+
+ /* override the module pointer */
+ cec->adap->owner = THIS_MODULE;
+
+ ret = devm_add_action(&pdev->dev, dw_hdmi_cec_del, cec);
+ if (ret) {
+ cec_delete_adapter(cec->adap);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, cec->irq,
+ dw_hdmi_cec_hardirq,
+ dw_hdmi_cec_thread, IRQF_SHARED,
+ "dw-hdmi-cec", cec->adap);
+ if (ret < 0)
+ return ret;
+
+ cec->notify = cec_notifier_get(pdev->dev.parent);
+ if (!cec->notify)
+ return -ENOMEM;
+
+ ret = cec_register_adapter(cec->adap, pdev->dev.parent);
+ if (ret < 0) {
+ cec_notifier_put(cec->notify);
+ return ret;
+ }
+
+ /*
+ * CEC documentation says we must not call cec_delete_adapter
+ * after a successful call to cec_register_adapter().
+ */
+ devm_remove_action(&pdev->dev, dw_hdmi_cec_del, cec);
+
+ cec_register_cec_notifier(cec->adap, cec->notify);
+
+ return 0;
+}
+
+static int dw_hdmi_cec_remove(struct platform_device *pdev)
+{
+ struct dw_hdmi_cec *cec = platform_get_drvdata(pdev);
+
+ cec_unregister_adapter(cec->adap);
+ cec_notifier_put(cec->notify);
+
+ return 0;
+}
+
+static struct platform_driver dw_hdmi_cec_driver = {
+ .probe = dw_hdmi_cec_probe,
+ .remove = dw_hdmi_cec_remove,
+ .driver = {
+ .name = "dw-hdmi-cec",
+ },
+};
+module_platform_driver(dw_hdmi_cec_driver);
+
+MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
+MODULE_DESCRIPTION("Synopsys Designware HDMI CEC driver for i.MX");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS(PLATFORM_MODULE_PREFIX "dw-hdmi-cec");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.h
new file mode 100644
index 000000000000..cf4dc121a2c4
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.h
@@ -0,0 +1,19 @@
+#ifndef DW_HDMI_CEC_H
+#define DW_HDMI_CEC_H
+
+struct dw_hdmi;
+
+struct dw_hdmi_cec_ops {
+ void (*write)(struct dw_hdmi *hdmi, u8 val, int offset);
+ u8 (*read)(struct dw_hdmi *hdmi, int offset);
+ void (*enable)(struct dw_hdmi *hdmi);
+ void (*disable)(struct dw_hdmi *hdmi);
+};
+
+struct dw_hdmi_cec_data {
+ struct dw_hdmi *hdmi;
+ const struct dw_hdmi_cec_ops *ops;
+ int irq;
+};
+
+#endif
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index b2cf59f54c88..3b7e5c59a5e9 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -1,7 +1,8 @@
/*
* dw-hdmi-i2s-audio.c
*
- * Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ * Copyright (c) 2017 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index ead11242c4b9..bf14214fa464 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -35,8 +35,12 @@
#include "dw-hdmi.h"
#include "dw-hdmi-audio.h"
+#include "dw-hdmi-cec.h"
+
+#include <media/cec-notifier.h>
#define DDC_SEGMENT_ADDR 0x30
+
#define HDMI_EDID_LEN 512
enum hdmi_datamap {
@@ -130,6 +134,7 @@ struct dw_hdmi {
unsigned int version;
struct platform_device *audio;
+ struct platform_device *cec;
struct device *dev;
struct clk *isfr_clk;
struct clk *iahb_clk;
@@ -163,6 +168,7 @@ struct dw_hdmi {
bool bridge_is_on; /* indicates the bridge is on */
bool rxsense; /* rxsense state */
u8 phy_mask; /* desired phy int mask settings */
+ u8 mc_clkdis; /* clock disable register */
spinlock_t audio_lock;
struct mutex audio_mutex;
@@ -175,6 +181,8 @@ struct dw_hdmi {
struct regmap *regm;
void (*enable_audio)(struct dw_hdmi *hdmi);
void (*disable_audio)(struct dw_hdmi *hdmi);
+
+ struct cec_notifier *cec_notifier;
};
#define HDMI_IH_PHY_STAT0_RX_SENSE \
@@ -546,8 +554,11 @@ EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate);
static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi, bool enable)
{
- hdmi_modb(hdmi, enable ? 0 : HDMI_MC_CLKDIS_AUDCLK_DISABLE,
- HDMI_MC_CLKDIS_AUDCLK_DISABLE, HDMI_MC_CLKDIS);
+ if (enable)
+ hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE;
+ else
+ hdmi->mc_clkdis |= HDMI_MC_CLKDIS_AUDCLK_DISABLE;
+ hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
}
static void dw_hdmi_ahb_audio_enable(struct dw_hdmi *hdmi)
@@ -1317,7 +1328,7 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
u8 val;
/* Initialise info frame from DRM mode */
- drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format))
frame.colorspace = HDMI_COLORSPACE_YUV444;
@@ -1569,8 +1580,6 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
/* HDMI Initialization Step B.4 */
static void dw_hdmi_enable_video_path(struct dw_hdmi *hdmi)
{
- u8 clkdis;
-
/* control period minimum duration */
hdmi_writeb(hdmi, 12, HDMI_FC_CTRLDUR);
hdmi_writeb(hdmi, 32, HDMI_FC_EXCTRLDUR);
@@ -1582,17 +1591,21 @@ static void dw_hdmi_enable_video_path(struct dw_hdmi *hdmi)
hdmi_writeb(hdmi, 0x21, HDMI_FC_CH2PREAM);
/* Enable pixel clock and tmds data path */
- clkdis = 0x7F;
- clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE;
- hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+ hdmi->mc_clkdis |= HDMI_MC_CLKDIS_HDCPCLK_DISABLE |
+ HDMI_MC_CLKDIS_CSCCLK_DISABLE |
+ HDMI_MC_CLKDIS_AUDCLK_DISABLE |
+ HDMI_MC_CLKDIS_PREPCLK_DISABLE |
+ HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
+ hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE;
+ hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
- clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
- hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+ hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
+ hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
/* Enable csc path */
if (is_color_space_conversion(hdmi)) {
- clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
- hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS);
+ hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
+ hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
}
/* Enable color space conversion if needed */
@@ -1783,7 +1796,6 @@ static void initialize_hdmi_ih_mutes(struct dw_hdmi *hdmi)
hdmi_writeb(hdmi, 0xff, HDMI_AUD_HBR_MASK);
hdmi_writeb(hdmi, 0xff, HDMI_GP_MASK);
hdmi_writeb(hdmi, 0xff, HDMI_A_APIINTMSK);
- hdmi_writeb(hdmi, 0xff, HDMI_CEC_MASK);
hdmi_writeb(hdmi, 0xff, HDMI_I2CM_INT);
hdmi_writeb(hdmi, 0xff, HDMI_I2CM_CTLINT);
@@ -1896,6 +1908,7 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
drm_mode_connector_update_edid_property(connector, edid);
+ cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid);
ret = drm_add_edid_modes(connector, edid);
/* Store the ELD */
drm_edid_to_eld(connector, edid);
@@ -1920,7 +1933,6 @@ static void dw_hdmi_connector_force(struct drm_connector *connector)
}
static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = dw_hdmi_connector_detect,
.destroy = drm_connector_cleanup,
@@ -2119,11 +2131,16 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
* ask the source to re-read the EDID.
*/
if (intr_stat &
- (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD))
+ (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) {
__dw_hdmi_setup_rx_sense(hdmi,
phy_stat & HDMI_PHY_HPD,
phy_stat & HDMI_PHY_RX_SENSE);
+ if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0)
+ cec_notifier_set_phys_addr(hdmi->cec_notifier,
+ CEC_PHYS_ADDR_INVALID);
+ }
+
if (intr_stat & HDMI_IH_PHY_STAT0_HPD) {
dev_dbg(hdmi->dev, "EVENT=%s\n",
phy_int_pol & HDMI_PHY_HPD ? "plugin" : "plugout");
@@ -2170,6 +2187,7 @@ static const struct dw_hdmi_phy_data dw_hdmi_phys[] = {
.name = "DWC HDMI 2.0 TX PHY",
.gen = 2,
.has_svsret = true,
+ .configure = hdmi_phy_configure_dwc_hdmi_3d_tx,
}, {
.type = DW_HDMI_PHY_VENDOR_PHY,
.name = "Vendor PHY",
@@ -2219,6 +2237,29 @@ static int dw_hdmi_detect_phy(struct dw_hdmi *hdmi)
return -ENODEV;
}
+static void dw_hdmi_cec_enable(struct dw_hdmi *hdmi)
+{
+ mutex_lock(&hdmi->mutex);
+ hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CECCLK_DISABLE;
+ hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
+ mutex_unlock(&hdmi->mutex);
+}
+
+static void dw_hdmi_cec_disable(struct dw_hdmi *hdmi)
+{
+ mutex_lock(&hdmi->mutex);
+ hdmi->mc_clkdis |= HDMI_MC_CLKDIS_CECCLK_DISABLE;
+ hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
+ mutex_unlock(&hdmi->mutex);
+}
+
+static const struct dw_hdmi_cec_ops dw_hdmi_cec_ops = {
+ .write = hdmi_writeb,
+ .read = hdmi_readb,
+ .enable = dw_hdmi_cec_enable,
+ .disable = dw_hdmi_cec_disable,
+};
+
static const struct regmap_config hdmi_regmap_8bit_config = {
.reg_bits = 32,
.val_bits = 8,
@@ -2241,6 +2282,7 @@ __dw_hdmi_probe(struct platform_device *pdev,
struct device_node *np = dev->of_node;
struct platform_device_info pdevinfo;
struct device_node *ddc_node;
+ struct dw_hdmi_cec_data cec;
struct dw_hdmi *hdmi;
struct resource *iores = NULL;
int irq;
@@ -2261,6 +2303,7 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->disabled = true;
hdmi->rxsense = true;
hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE);
+ hdmi->mc_clkdis = 0x7f;
mutex_init(&hdmi->mutex);
mutex_init(&hdmi->audio_mutex);
@@ -2376,6 +2419,12 @@ __dw_hdmi_probe(struct platform_device *pdev,
if (ret)
goto err_iahb;
+ hdmi->cec_notifier = cec_notifier_get(dev);
+ if (!hdmi->cec_notifier) {
+ ret = -ENOMEM;
+ goto err_iahb;
+ }
+
/*
* To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator
* N and cts values before enabling phy
@@ -2438,6 +2487,19 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->audio = platform_device_register_full(&pdevinfo);
}
+ if (config0 & HDMI_CONFIG0_CEC) {
+ cec.hdmi = hdmi;
+ cec.ops = &dw_hdmi_cec_ops;
+ cec.irq = irq;
+
+ pdevinfo.name = "dw-hdmi-cec";
+ pdevinfo.data = &cec;
+ pdevinfo.size_data = sizeof(cec);
+ pdevinfo.dma_mask = 0;
+
+ hdmi->cec = platform_device_register_full(&pdevinfo);
+ }
+
/* Reset HDMI DDC I2C master controller and mute I2CM interrupts */
if (hdmi->i2c)
dw_hdmi_i2c_init(hdmi);
@@ -2452,6 +2514,9 @@ err_iahb:
hdmi->ddc = NULL;
}
+ if (hdmi->cec_notifier)
+ cec_notifier_put(hdmi->cec_notifier);
+
clk_disable_unprepare(hdmi->iahb_clk);
err_isfr:
clk_disable_unprepare(hdmi->isfr_clk);
@@ -2465,10 +2530,15 @@ static void __dw_hdmi_remove(struct dw_hdmi *hdmi)
{
if (hdmi->audio && !IS_ERR(hdmi->audio))
platform_device_unregister(hdmi->audio);
+ if (!IS_ERR(hdmi->cec))
+ platform_device_unregister(hdmi->cec);
/* Disable all interrupts */
hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0);
+ if (hdmi->cec_notifier)
+ cec_notifier_put(hdmi->cec_notifier);
+
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
@@ -2485,17 +2555,12 @@ int dw_hdmi_probe(struct platform_device *pdev,
const struct dw_hdmi_plat_data *plat_data)
{
struct dw_hdmi *hdmi;
- int ret;
hdmi = __dw_hdmi_probe(pdev, plat_data);
if (IS_ERR(hdmi))
return PTR_ERR(hdmi);
- ret = drm_bridge_add(&hdmi->bridge);
- if (ret < 0) {
- __dw_hdmi_remove(hdmi);
- return ret;
- }
+ drm_bridge_add(&hdmi->bridge);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
index c59f87e1483e..9d90eb9c46e5 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
@@ -478,51 +478,6 @@
#define HDMI_A_PRESETUP 0x501A
#define HDMI_A_SRM_BASE 0x5020
-/* CEC Engine Registers */
-#define HDMI_CEC_CTRL 0x7D00
-#define HDMI_CEC_STAT 0x7D01
-#define HDMI_CEC_MASK 0x7D02
-#define HDMI_CEC_POLARITY 0x7D03
-#define HDMI_CEC_INT 0x7D04
-#define HDMI_CEC_ADDR_L 0x7D05
-#define HDMI_CEC_ADDR_H 0x7D06
-#define HDMI_CEC_TX_CNT 0x7D07
-#define HDMI_CEC_RX_CNT 0x7D08
-#define HDMI_CEC_TX_DATA0 0x7D10
-#define HDMI_CEC_TX_DATA1 0x7D11
-#define HDMI_CEC_TX_DATA2 0x7D12
-#define HDMI_CEC_TX_DATA3 0x7D13
-#define HDMI_CEC_TX_DATA4 0x7D14
-#define HDMI_CEC_TX_DATA5 0x7D15
-#define HDMI_CEC_TX_DATA6 0x7D16
-#define HDMI_CEC_TX_DATA7 0x7D17
-#define HDMI_CEC_TX_DATA8 0x7D18
-#define HDMI_CEC_TX_DATA9 0x7D19
-#define HDMI_CEC_TX_DATA10 0x7D1a
-#define HDMI_CEC_TX_DATA11 0x7D1b
-#define HDMI_CEC_TX_DATA12 0x7D1c
-#define HDMI_CEC_TX_DATA13 0x7D1d
-#define HDMI_CEC_TX_DATA14 0x7D1e
-#define HDMI_CEC_TX_DATA15 0x7D1f
-#define HDMI_CEC_RX_DATA0 0x7D20
-#define HDMI_CEC_RX_DATA1 0x7D21
-#define HDMI_CEC_RX_DATA2 0x7D22
-#define HDMI_CEC_RX_DATA3 0x7D23
-#define HDMI_CEC_RX_DATA4 0x7D24
-#define HDMI_CEC_RX_DATA5 0x7D25
-#define HDMI_CEC_RX_DATA6 0x7D26
-#define HDMI_CEC_RX_DATA7 0x7D27
-#define HDMI_CEC_RX_DATA8 0x7D28
-#define HDMI_CEC_RX_DATA9 0x7D29
-#define HDMI_CEC_RX_DATA10 0x7D2a
-#define HDMI_CEC_RX_DATA11 0x7D2b
-#define HDMI_CEC_RX_DATA12 0x7D2c
-#define HDMI_CEC_RX_DATA13 0x7D2d
-#define HDMI_CEC_RX_DATA14 0x7D2e
-#define HDMI_CEC_RX_DATA15 0x7D2f
-#define HDMI_CEC_LOCK 0x7D30
-#define HDMI_CEC_WKUPCTRL 0x7D31
-
/* I2C Master Registers (E-DDC) */
#define HDMI_I2CM_SLAVE 0x7E00
#define HDMI_I2CM_ADDRESS 0x7E01
@@ -555,6 +510,7 @@ enum {
/* CONFIG0_ID field values */
HDMI_CONFIG0_I2S = 0x10,
+ HDMI_CONFIG0_CEC = 0x02,
/* CONFIG1_ID field values */
HDMI_CONFIG1_AHB = 0x01,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
new file mode 100644
index 000000000000..63c7a01b7053
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -0,0 +1,981 @@
+/*
+ * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
+ * Copyright (C) STMicroelectronics SA 2017
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Modified by Philippe Cornu <philippe.cornu@st.com>
+ * This generic Synopsys DesignWare MIPI DSI host driver is based on the
+ * Rockchip version from rockchip/dw-mipi-dsi.c with phy & bridge APIs.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/bridge/dw_mipi_dsi.h>
+#include <video/mipi_display.h>
+
+#define DSI_VERSION 0x00
+#define DSI_PWR_UP 0x04
+#define RESET 0
+#define POWERUP BIT(0)
+
+#define DSI_CLKMGR_CFG 0x08
+#define TO_CLK_DIVIDSION(div) (((div) & 0xff) << 8)
+#define TX_ESC_CLK_DIVIDSION(div) (((div) & 0xff) << 0)
+
+#define DSI_DPI_VCID 0x0c
+#define DPI_VID(vid) (((vid) & 0x3) << 0)
+
+#define DSI_DPI_COLOR_CODING 0x10
+#define EN18_LOOSELY BIT(8)
+#define DPI_COLOR_CODING_16BIT_1 0x0
+#define DPI_COLOR_CODING_16BIT_2 0x1
+#define DPI_COLOR_CODING_16BIT_3 0x2
+#define DPI_COLOR_CODING_18BIT_1 0x3
+#define DPI_COLOR_CODING_18BIT_2 0x4
+#define DPI_COLOR_CODING_24BIT 0x5
+
+#define DSI_DPI_CFG_POL 0x14
+#define COLORM_ACTIVE_LOW BIT(4)
+#define SHUTD_ACTIVE_LOW BIT(3)
+#define HSYNC_ACTIVE_LOW BIT(2)
+#define VSYNC_ACTIVE_LOW BIT(1)
+#define DATAEN_ACTIVE_LOW BIT(0)
+
+#define DSI_DPI_LP_CMD_TIM 0x18
+#define OUTVACT_LPCMD_TIME(p) (((p) & 0xff) << 16)
+#define INVACT_LPCMD_TIME(p) ((p) & 0xff)
+
+#define DSI_DBI_CFG 0x20
+#define DSI_DBI_CMDSIZE 0x28
+
+#define DSI_PCKHDL_CFG 0x2c
+#define EN_CRC_RX BIT(4)
+#define EN_ECC_RX BIT(3)
+#define EN_BTA BIT(2)
+#define EN_EOTP_RX BIT(1)
+#define EN_EOTP_TX BIT(0)
+
+#define DSI_MODE_CFG 0x34
+#define ENABLE_VIDEO_MODE 0
+#define ENABLE_CMD_MODE BIT(0)
+
+#define DSI_VID_MODE_CFG 0x38
+#define FRAME_BTA_ACK BIT(14)
+#define ENABLE_LOW_POWER (0x3f << 8)
+#define ENABLE_LOW_POWER_MASK (0x3f << 8)
+#define VID_MODE_TYPE_NON_BURST_SYNC_PULSES 0x0
+#define VID_MODE_TYPE_NON_BURST_SYNC_EVENTS 0x1
+#define VID_MODE_TYPE_BURST 0x2
+#define VID_MODE_TYPE_MASK 0x3
+
+#define DSI_VID_PKT_SIZE 0x3c
+#define VID_PKT_SIZE(p) (((p) & 0x3fff) << 0)
+#define VID_PKT_MAX_SIZE 0x3fff
+
+#define DSI_VID_HSA_TIME 0x48
+#define DSI_VID_HBP_TIME 0x4c
+#define DSI_VID_HLINE_TIME 0x50
+#define DSI_VID_VSA_LINES 0x54
+#define DSI_VID_VBP_LINES 0x58
+#define DSI_VID_VFP_LINES 0x5c
+#define DSI_VID_VACTIVE_LINES 0x60
+#define DSI_CMD_MODE_CFG 0x68
+#define MAX_RD_PKT_SIZE_LP BIT(24)
+#define DCS_LW_TX_LP BIT(19)
+#define DCS_SR_0P_TX_LP BIT(18)
+#define DCS_SW_1P_TX_LP BIT(17)
+#define DCS_SW_0P_TX_LP BIT(16)
+#define GEN_LW_TX_LP BIT(14)
+#define GEN_SR_2P_TX_LP BIT(13)
+#define GEN_SR_1P_TX_LP BIT(12)
+#define GEN_SR_0P_TX_LP BIT(11)
+#define GEN_SW_2P_TX_LP BIT(10)
+#define GEN_SW_1P_TX_LP BIT(9)
+#define GEN_SW_0P_TX_LP BIT(8)
+#define EN_ACK_RQST BIT(1)
+#define EN_TEAR_FX BIT(0)
+
+#define CMD_MODE_ALL_LP (MAX_RD_PKT_SIZE_LP | \
+ DCS_LW_TX_LP | \
+ DCS_SR_0P_TX_LP | \
+ DCS_SW_1P_TX_LP | \
+ DCS_SW_0P_TX_LP | \
+ GEN_LW_TX_LP | \
+ GEN_SR_2P_TX_LP | \
+ GEN_SR_1P_TX_LP | \
+ GEN_SR_0P_TX_LP | \
+ GEN_SW_2P_TX_LP | \
+ GEN_SW_1P_TX_LP | \
+ GEN_SW_0P_TX_LP)
+
+#define DSI_GEN_HDR 0x6c
+#define GEN_HDATA(data) (((data) & 0xffff) << 8)
+#define GEN_HDATA_MASK (0xffff << 8)
+#define GEN_HTYPE(type) (((type) & 0xff) << 0)
+#define GEN_HTYPE_MASK 0xff
+
+#define DSI_GEN_PLD_DATA 0x70
+
+#define DSI_CMD_PKT_STATUS 0x74
+#define GEN_CMD_EMPTY BIT(0)
+#define GEN_CMD_FULL BIT(1)
+#define GEN_PLD_W_EMPTY BIT(2)
+#define GEN_PLD_W_FULL BIT(3)
+#define GEN_PLD_R_EMPTY BIT(4)
+#define GEN_PLD_R_FULL BIT(5)
+#define GEN_RD_CMD_BUSY BIT(6)
+
+#define DSI_TO_CNT_CFG 0x78
+#define HSTX_TO_CNT(p) (((p) & 0xffff) << 16)
+#define LPRX_TO_CNT(p) ((p) & 0xffff)
+
+#define DSI_BTA_TO_CNT 0x8c
+#define DSI_LPCLK_CTRL 0x94
+#define AUTO_CLKLANE_CTRL BIT(1)
+#define PHY_TXREQUESTCLKHS BIT(0)
+
+#define DSI_PHY_TMR_LPCLK_CFG 0x98
+#define PHY_CLKHS2LP_TIME(lbcc) (((lbcc) & 0x3ff) << 16)
+#define PHY_CLKLP2HS_TIME(lbcc) ((lbcc) & 0x3ff)
+
+#define DSI_PHY_TMR_CFG 0x9c
+#define PHY_HS2LP_TIME(lbcc) (((lbcc) & 0xff) << 24)
+#define PHY_LP2HS_TIME(lbcc) (((lbcc) & 0xff) << 16)
+#define MAX_RD_TIME(lbcc) ((lbcc) & 0x7fff)
+
+#define DSI_PHY_RSTZ 0xa0
+#define PHY_DISFORCEPLL 0
+#define PHY_ENFORCEPLL BIT(3)
+#define PHY_DISABLECLK 0
+#define PHY_ENABLECLK BIT(2)
+#define PHY_RSTZ 0
+#define PHY_UNRSTZ BIT(1)
+#define PHY_SHUTDOWNZ 0
+#define PHY_UNSHUTDOWNZ BIT(0)
+
+#define DSI_PHY_IF_CFG 0xa4
+#define N_LANES(n) ((((n) - 1) & 0x3) << 0)
+#define PHY_STOP_WAIT_TIME(cycle) (((cycle) & 0xff) << 8)
+
+#define DSI_PHY_STATUS 0xb0
+#define LOCK BIT(0)
+#define STOP_STATE_CLK_LANE BIT(2)
+
+#define DSI_PHY_TST_CTRL0 0xb4
+#define PHY_TESTCLK BIT(1)
+#define PHY_UNTESTCLK 0
+#define PHY_TESTCLR BIT(0)
+#define PHY_UNTESTCLR 0
+
+#define DSI_PHY_TST_CTRL1 0xb8
+#define PHY_TESTEN BIT(16)
+#define PHY_UNTESTEN 0
+#define PHY_TESTDOUT(n) (((n) & 0xff) << 8)
+#define PHY_TESTDIN(n) (((n) & 0xff) << 0)
+
+#define DSI_INT_ST0 0xbc
+#define DSI_INT_ST1 0xc0
+#define DSI_INT_MSK0 0xc4
+#define DSI_INT_MSK1 0xc8
+
+#define PHY_STATUS_TIMEOUT_US 10000
+#define CMD_PKT_STATUS_TIMEOUT_US 20000
+
+struct dw_mipi_dsi {
+ struct drm_bridge bridge;
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge *panel_bridge;
+ bool is_panel_bridge;
+ struct device *dev;
+ void __iomem *base;
+
+ struct clk *pclk;
+
+ unsigned int lane_mbps; /* per lane */
+ u32 channel;
+ u32 lanes;
+ u32 format;
+ unsigned long mode_flags;
+
+ const struct dw_mipi_dsi_plat_data *plat_data;
+};
+
+/*
+ * The controller should generate 2 frames before
+ * preparing the peripheral.
+ */
+static void dw_mipi_dsi_wait_for_two_frames(struct drm_display_mode *mode)
+{
+ int refresh, two_frames;
+
+ refresh = drm_mode_vrefresh(mode);
+ two_frames = DIV_ROUND_UP(MSEC_PER_SEC, refresh) * 2;
+ msleep(two_frames);
+}
+
+static inline struct dw_mipi_dsi *host_to_dsi(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct dw_mipi_dsi, dsi_host);
+}
+
+static inline struct dw_mipi_dsi *bridge_to_dsi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct dw_mipi_dsi, bridge);
+}
+
+static inline void dsi_write(struct dw_mipi_dsi *dsi, u32 reg, u32 val)
+{
+ writel(val, dsi->base + reg);
+}
+
+static inline u32 dsi_read(struct dw_mipi_dsi *dsi, u32 reg)
+{
+ return readl(dsi->base + reg);
+}
+
+static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi *dsi = host_to_dsi(host);
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ int ret;
+
+ if (device->lanes > dsi->plat_data->max_data_lanes) {
+ dev_err(dsi->dev, "the number of data lanes(%u) is too many\n",
+ device->lanes);
+ return -EINVAL;
+ }
+
+ dsi->lanes = device->lanes;
+ dsi->channel = device->channel;
+ dsi->format = device->format;
+ dsi->mode_flags = device->mode_flags;
+
+ ret = drm_of_find_panel_or_bridge(host->dev->of_node, 1, 0,
+ &panel, &bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ dsi->is_panel_bridge = true;
+ }
+
+ dsi->panel_bridge = bridge;
+
+ drm_bridge_add(&dsi->bridge);
+
+ return 0;
+}
+
+static int dw_mipi_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi *dsi = host_to_dsi(host);
+
+ if (dsi->is_panel_bridge)
+ drm_panel_bridge_remove(dsi->panel_bridge);
+
+ drm_bridge_remove(&dsi->bridge);
+
+ return 0;
+}
+
+static void dw_mipi_message_config(struct dw_mipi_dsi *dsi,
+ const struct mipi_dsi_msg *msg)
+{
+ bool lpm = msg->flags & MIPI_DSI_MSG_USE_LPM;
+ u32 val = 0;
+
+ if (msg->flags & MIPI_DSI_MSG_REQ_ACK)
+ val |= EN_ACK_RQST;
+ if (lpm)
+ val |= CMD_MODE_ALL_LP;
+
+ dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS);
+ dsi_write(dsi, DSI_CMD_MODE_CFG, val);
+}
+
+static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val)
+{
+ int ret;
+ u32 val, mask;
+
+ ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_CMD_FULL), 1000,
+ CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(dsi->dev, "failed to get available command FIFO\n");
+ return ret;
+ }
+
+ dsi_write(dsi, DSI_GEN_HDR, hdr_val);
+
+ mask = GEN_CMD_EMPTY | GEN_PLD_W_EMPTY;
+ ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
+ val, (val & mask) == mask,
+ 1000, CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(dsi->dev, "failed to write command FIFO\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_mipi_dsi_dcs_short_write(struct dw_mipi_dsi *dsi,
+ const struct mipi_dsi_msg *msg)
+{
+ const u8 *tx_buf = msg->tx_buf;
+ u16 data = 0;
+ u32 val;
+
+ if (msg->tx_len > 0)
+ data |= tx_buf[0];
+ if (msg->tx_len > 1)
+ data |= tx_buf[1] << 8;
+
+ if (msg->tx_len > 2) {
+ dev_err(dsi->dev, "too long tx buf length %zu for short write\n",
+ msg->tx_len);
+ return -EINVAL;
+ }
+
+ val = GEN_HDATA(data) | GEN_HTYPE(msg->type);
+ return dw_mipi_dsi_gen_pkt_hdr_write(dsi, val);
+}
+
+static int dw_mipi_dsi_dcs_long_write(struct dw_mipi_dsi *dsi,
+ const struct mipi_dsi_msg *msg)
+{
+ const u8 *tx_buf = msg->tx_buf;
+ int len = msg->tx_len, pld_data_bytes = sizeof(u32), ret;
+ u32 hdr_val = GEN_HDATA(msg->tx_len) | GEN_HTYPE(msg->type);
+ u32 remainder;
+ u32 val;
+
+ if (msg->tx_len < 3) {
+ dev_err(dsi->dev, "wrong tx buf length %zu for long write\n",
+ msg->tx_len);
+ return -EINVAL;
+ }
+
+ while (DIV_ROUND_UP(len, pld_data_bytes)) {
+ if (len < pld_data_bytes) {
+ remainder = 0;
+ memcpy(&remainder, tx_buf, len);
+ dsi_write(dsi, DSI_GEN_PLD_DATA, remainder);
+ len = 0;
+ } else {
+ memcpy(&remainder, tx_buf, pld_data_bytes);
+ dsi_write(dsi, DSI_GEN_PLD_DATA, remainder);
+ tx_buf += pld_data_bytes;
+ len -= pld_data_bytes;
+ }
+
+ ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
+ val, !(val & GEN_PLD_W_FULL), 1000,
+ CMD_PKT_STATUS_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(dsi->dev,
+ "failed to get available write payload FIFO\n");
+ return ret;
+ }
+ }
+
+ return dw_mipi_dsi_gen_pkt_hdr_write(dsi, hdr_val);
+}
+
+static ssize_t dw_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct dw_mipi_dsi *dsi = host_to_dsi(host);
+ int ret;
+
+ /*
+ * TODO dw drv improvements
+ * use mipi_dsi_create_packet() instead of all following
+ * functions and code (no switch cases, no
+ * dw_mipi_dsi_dcs_short_write(), only the loop in long_write...)
+ * and use packet.header...
+ */
+ dw_mipi_message_config(dsi, msg);
+
+ switch (msg->type) {
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
+ ret = dw_mipi_dsi_dcs_short_write(dsi, msg);
+ break;
+ case MIPI_DSI_DCS_LONG_WRITE:
+ ret = dw_mipi_dsi_dcs_long_write(dsi, msg);
+ break;
+ default:
+ dev_err(dsi->dev, "unsupported message type 0x%02x\n",
+ msg->type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops dw_mipi_dsi_host_ops = {
+ .attach = dw_mipi_dsi_host_attach,
+ .detach = dw_mipi_dsi_host_detach,
+ .transfer = dw_mipi_dsi_host_transfer,
+};
+
+static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
+{
+ u32 val;
+
+ /*
+ * TODO dw drv improvements
+ * enabling low power is panel-dependent, we should use the
+ * panel configuration here...
+ */
+ val = ENABLE_LOW_POWER;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
+ val |= VID_MODE_TYPE_BURST;
+ else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ val |= VID_MODE_TYPE_NON_BURST_SYNC_PULSES;
+ else
+ val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS;
+
+ dsi_write(dsi, DSI_VID_MODE_CFG, val);
+}
+
+static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi,
+ unsigned long mode_flags)
+{
+ dsi_write(dsi, DSI_PWR_UP, RESET);
+
+ if (mode_flags & MIPI_DSI_MODE_VIDEO) {
+ dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE);
+ dw_mipi_dsi_video_mode_config(dsi);
+ dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS);
+ } else {
+ dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
+ }
+
+ dsi_write(dsi, DSI_PWR_UP, POWERUP);
+}
+
+static void dw_mipi_dsi_disable(struct dw_mipi_dsi *dsi)
+{
+ dsi_write(dsi, DSI_PWR_UP, RESET);
+ dsi_write(dsi, DSI_PHY_RSTZ, PHY_RSTZ);
+}
+
+static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
+{
+ /*
+ * The maximum permitted escape clock is 20MHz and it is derived from
+ * lanebyteclk, which is running at "lane_mbps / 8". Thus we want:
+ *
+ * (lane_mbps >> 3) / esc_clk_division < 20
+ * which is:
+ * (lane_mbps >> 3) / 20 > esc_clk_division
+ */
+ u32 esc_clk_division = (dsi->lane_mbps >> 3) / 20 + 1;
+
+ dsi_write(dsi, DSI_PWR_UP, RESET);
+
+ /*
+ * TODO dw drv improvements
+ * timeout clock division should be computed with the
+ * high speed transmission counter timeout and byte lane...
+ */
+ dsi_write(dsi, DSI_CLKMGR_CFG, TO_CLK_DIVIDSION(10) |
+ TX_ESC_CLK_DIVIDSION(esc_clk_division));
+}
+
+static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
+ struct drm_display_mode *mode)
+{
+ u32 val = 0, color = 0;
+
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB888:
+ color = DPI_COLOR_CODING_24BIT;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ color = DPI_COLOR_CODING_18BIT_2 | EN18_LOOSELY;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ color = DPI_COLOR_CODING_18BIT_1;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ color = DPI_COLOR_CODING_16BIT_1;
+ break;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ val |= VSYNC_ACTIVE_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ val |= HSYNC_ACTIVE_LOW;
+
+ dsi_write(dsi, DSI_DPI_VCID, DPI_VID(dsi->channel));
+ dsi_write(dsi, DSI_DPI_COLOR_CODING, color);
+ dsi_write(dsi, DSI_DPI_CFG_POL, val);
+ /*
+ * TODO dw drv improvements
+ * largest packet sizes during hfp or during vsa/vpb/vfp
+ * should be computed according to byte lane, lane number and only
+ * if sending lp cmds in high speed is enable (PHY_TXREQUESTCLKHS)
+ */
+ dsi_write(dsi, DSI_DPI_LP_CMD_TIM, OUTVACT_LPCMD_TIME(4)
+ | INVACT_LPCMD_TIME(4));
+}
+
+static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
+{
+ dsi_write(dsi, DSI_PCKHDL_CFG, EN_CRC_RX | EN_ECC_RX | EN_BTA);
+}
+
+static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
+ struct drm_display_mode *mode)
+{
+ /*
+ * TODO dw drv improvements
+ * only burst mode is supported here. For non-burst video modes,
+ * we should compute DSI_VID_PKT_SIZE, DSI_VCCR.NUMC &
+ * DSI_VNPCR.NPSIZE... especially because this driver supports
+ * non-burst video modes, see dw_mipi_dsi_video_mode_config()...
+ */
+ dsi_write(dsi, DSI_VID_PKT_SIZE, VID_PKT_SIZE(mode->hdisplay));
+}
+
+static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
+{
+ /*
+ * TODO dw drv improvements
+ * compute high speed transmission counter timeout according
+ * to the timeout clock division (TO_CLK_DIVIDSION) and byte lane...
+ */
+ dsi_write(dsi, DSI_TO_CNT_CFG, HSTX_TO_CNT(1000) | LPRX_TO_CNT(1000));
+ /*
+ * TODO dw drv improvements
+ * the Bus-Turn-Around Timeout Counter should be computed
+ * according to byte lane...
+ */
+ dsi_write(dsi, DSI_BTA_TO_CNT, 0xd00);
+ dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
+}
+
+/* Get lane byte clock cycles. */
+static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
+ struct drm_display_mode *mode,
+ u32 hcomponent)
+{
+ u32 frac, lbcc;
+
+ lbcc = hcomponent * dsi->lane_mbps * MSEC_PER_SEC / 8;
+
+ frac = lbcc % mode->clock;
+ lbcc = lbcc / mode->clock;
+ if (frac)
+ lbcc++;
+
+ return lbcc;
+}
+
+static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
+ struct drm_display_mode *mode)
+{
+ u32 htotal, hsa, hbp, lbcc;
+
+ htotal = mode->htotal;
+ hsa = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ /*
+ * TODO dw drv improvements
+ * computations below may be improved...
+ */
+ lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, htotal);
+ dsi_write(dsi, DSI_VID_HLINE_TIME, lbcc);
+
+ lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, hsa);
+ dsi_write(dsi, DSI_VID_HSA_TIME, lbcc);
+
+ lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, hbp);
+ dsi_write(dsi, DSI_VID_HBP_TIME, lbcc);
+}
+
+static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi,
+ struct drm_display_mode *mode)
+{
+ u32 vactive, vsa, vfp, vbp;
+
+ vactive = mode->vdisplay;
+ vsa = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ dsi_write(dsi, DSI_VID_VACTIVE_LINES, vactive);
+ dsi_write(dsi, DSI_VID_VSA_LINES, vsa);
+ dsi_write(dsi, DSI_VID_VFP_LINES, vfp);
+ dsi_write(dsi, DSI_VID_VBP_LINES, vbp);
+}
+
+static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi)
+{
+ /*
+ * TODO dw drv improvements
+ * data & clock lane timers should be computed according to panel
+ * blankings and to the automatic clock lane control mode...
+ * note: DSI_PHY_TMR_CFG.MAX_RD_TIME should be in line with
+ * DSI_CMD_MODE_CFG.MAX_RD_PKT_SIZE_LP (see CMD_MODE_ALL_LP)
+ */
+ dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME(0x40)
+ | PHY_LP2HS_TIME(0x40) | MAX_RD_TIME(10000));
+
+ dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG, PHY_CLKHS2LP_TIME(0x40)
+ | PHY_CLKLP2HS_TIME(0x40));
+}
+
+static void dw_mipi_dsi_dphy_interface_config(struct dw_mipi_dsi *dsi)
+{
+ /*
+ * TODO dw drv improvements
+ * stop wait time should be the maximum between host dsi
+ * and panel stop wait times
+ */
+ dsi_write(dsi, DSI_PHY_IF_CFG, PHY_STOP_WAIT_TIME(0x20) |
+ N_LANES(dsi->lanes));
+}
+
+static void dw_mipi_dsi_dphy_init(struct dw_mipi_dsi *dsi)
+{
+ /* Clear PHY state */
+ dsi_write(dsi, DSI_PHY_RSTZ, PHY_DISFORCEPLL | PHY_DISABLECLK
+ | PHY_RSTZ | PHY_SHUTDOWNZ);
+ dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLR);
+ dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLR);
+ dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLR);
+}
+
+static void dw_mipi_dsi_dphy_enable(struct dw_mipi_dsi *dsi)
+{
+ u32 val;
+ int ret;
+
+ dsi_write(dsi, DSI_PHY_RSTZ, PHY_ENFORCEPLL | PHY_ENABLECLK |
+ PHY_UNRSTZ | PHY_UNSHUTDOWNZ);
+
+ ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
+ val, val & LOCK, 1000, PHY_STATUS_TIMEOUT_US);
+ if (ret < 0)
+ DRM_DEBUG_DRIVER("failed to wait phy lock state\n");
+
+ ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
+ val, val & STOP_STATE_CLK_LANE, 1000,
+ PHY_STATUS_TIMEOUT_US);
+ if (ret < 0)
+ DRM_DEBUG_DRIVER("failed to wait phy clk lane stop state\n");
+}
+
+static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi)
+{
+ dsi_read(dsi, DSI_INT_ST0);
+ dsi_read(dsi, DSI_INT_ST1);
+ dsi_write(dsi, DSI_INT_MSK0, 0);
+ dsi_write(dsi, DSI_INT_MSK1, 0);
+}
+
+static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+
+ /*
+ * Switch to command mode before panel-bridge post_disable &
+ * panel unprepare.
+ * Note: panel-bridge disable & panel disable has been called
+ * before by the drm framework.
+ */
+ dw_mipi_dsi_set_mode(dsi, 0);
+
+ /*
+ * TODO Only way found to call panel-bridge post_disable &
+ * panel unprepare before the dsi "final" disable...
+ * This needs to be fixed in the drm_bridge framework and the API
+ * needs to be updated to manage our own call chains...
+ */
+ dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
+
+ dw_mipi_dsi_disable(dsi);
+ clk_disable_unprepare(dsi->pclk);
+ pm_runtime_put(dsi->dev);
+}
+
+void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+ const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
+ void *priv_data = dsi->plat_data->priv_data;
+ int ret;
+
+ clk_prepare_enable(dsi->pclk);
+
+ ret = phy_ops->get_lane_mbps(priv_data, mode, dsi->mode_flags,
+ dsi->lanes, dsi->format, &dsi->lane_mbps);
+ if (ret)
+ DRM_DEBUG_DRIVER("Phy get_lane_mbps() failed\n");
+
+ pm_runtime_get_sync(dsi->dev);
+ dw_mipi_dsi_init(dsi);
+ dw_mipi_dsi_dpi_config(dsi, mode);
+ dw_mipi_dsi_packet_handler_config(dsi);
+ dw_mipi_dsi_video_mode_config(dsi);
+ dw_mipi_dsi_video_packet_config(dsi, mode);
+ dw_mipi_dsi_command_mode_config(dsi);
+ dw_mipi_dsi_line_timer_config(dsi, mode);
+ dw_mipi_dsi_vertical_timing_config(dsi, mode);
+
+ dw_mipi_dsi_dphy_init(dsi);
+ dw_mipi_dsi_dphy_timing_config(dsi);
+ dw_mipi_dsi_dphy_interface_config(dsi);
+
+ dw_mipi_dsi_clear_err(dsi);
+
+ ret = phy_ops->init(priv_data);
+ if (ret)
+ DRM_DEBUG_DRIVER("Phy init() failed\n");
+
+ dw_mipi_dsi_dphy_enable(dsi);
+
+ dw_mipi_dsi_wait_for_two_frames(mode);
+
+ /* Switch to cmd mode for panel-bridge pre_enable & panel prepare */
+ dw_mipi_dsi_set_mode(dsi, 0);
+}
+
+static void dw_mipi_dsi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+
+ /* Switch to video mode for panel-bridge enable & panel enable */
+ dw_mipi_dsi_set_mode(dsi, MIPI_DSI_MODE_VIDEO);
+}
+
+static enum drm_mode_status
+dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+ const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data;
+ enum drm_mode_status mode_status = MODE_OK;
+
+ if (pdata->mode_valid)
+ mode_status = pdata->mode_valid(pdata->priv_data, mode);
+
+ return mode_status;
+}
+
+static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge)
+{
+ struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found\n");
+ return -ENODEV;
+ }
+
+ /* Set the encoder type as caller does not know it */
+ bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
+
+ /* Attach the panel-bridge to the dsi bridge */
+ return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge);
+}
+
+static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
+ .mode_set = dw_mipi_dsi_bridge_mode_set,
+ .enable = dw_mipi_dsi_bridge_enable,
+ .post_disable = dw_mipi_dsi_bridge_post_disable,
+ .mode_valid = dw_mipi_dsi_bridge_mode_valid,
+ .attach = dw_mipi_dsi_bridge_attach,
+};
+
+static struct dw_mipi_dsi *
+__dw_mipi_dsi_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi_plat_data *plat_data)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *apb_rst;
+ struct dw_mipi_dsi *dsi;
+ struct resource *res;
+ int ret;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return ERR_PTR(-ENOMEM);
+
+ dsi->dev = dev;
+ dsi->plat_data = plat_data;
+
+ if (!plat_data->phy_ops->init || !plat_data->phy_ops->get_lane_mbps) {
+ DRM_ERROR("Phy not properly configured\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!plat_data->base) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return ERR_PTR(-ENODEV);
+
+ dsi->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dsi->base))
+ return ERR_PTR(-ENODEV);
+
+ } else {
+ dsi->base = plat_data->base;
+ }
+
+ dsi->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(dsi->pclk)) {
+ ret = PTR_ERR(dsi->pclk);
+ dev_err(dev, "Unable to get pclk: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /*
+ * Note that the reset was not defined in the initial device tree, so
+ * we have to be prepared for it not being found.
+ */
+ apb_rst = devm_reset_control_get(dev, "apb");
+ if (IS_ERR(apb_rst)) {
+ ret = PTR_ERR(apb_rst);
+ if (ret == -ENOENT) {
+ apb_rst = NULL;
+ } else {
+ dev_err(dev, "Unable to get reset control: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ }
+
+ if (apb_rst) {
+ ret = clk_prepare_enable(dsi->pclk);
+ if (ret) {
+ dev_err(dev, "%s: Failed to enable pclk\n", __func__);
+ return ERR_PTR(ret);
+ }
+
+ reset_control_assert(apb_rst);
+ usleep_range(10, 20);
+ reset_control_deassert(apb_rst);
+
+ clk_disable_unprepare(dsi->pclk);
+ }
+
+ pm_runtime_enable(dev);
+
+ dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
+ dsi->dsi_host.dev = dev;
+ ret = mipi_dsi_host_register(&dsi->dsi_host);
+ if (ret) {
+ dev_err(dev, "Failed to register MIPI host: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ dsi->bridge.driver_private = dsi;
+ dsi->bridge.funcs = &dw_mipi_dsi_bridge_funcs;
+#ifdef CONFIG_OF
+ dsi->bridge.of_node = pdev->dev.of_node;
+#endif
+
+ dev_set_drvdata(dev, dsi);
+
+ return dsi;
+}
+
+static void __dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi)
+{
+ pm_runtime_disable(dsi->dev);
+}
+
+/*
+ * Probe/remove API, used from platforms based on the DRM bridge API.
+ */
+int dw_mipi_dsi_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi_plat_data *plat_data)
+{
+ struct dw_mipi_dsi *dsi;
+
+ dsi = __dw_mipi_dsi_probe(pdev, plat_data);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi_probe);
+
+void dw_mipi_dsi_remove(struct platform_device *pdev)
+{
+ struct dw_mipi_dsi *dsi = platform_get_drvdata(pdev);
+
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+
+ __dw_mipi_dsi_remove(dsi);
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi_remove);
+
+/*
+ * Bind/unbind API, used from platforms based on the component framework.
+ */
+int dw_mipi_dsi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
+ const struct dw_mipi_dsi_plat_data *plat_data)
+{
+ struct dw_mipi_dsi *dsi;
+ int ret;
+
+ dsi = __dw_mipi_dsi_probe(pdev, plat_data);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
+
+ ret = drm_bridge_attach(encoder, &dsi->bridge, NULL);
+ if (ret) {
+ dw_mipi_dsi_remove(pdev);
+ DRM_ERROR("Failed to initialize bridge with drm\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi_bind);
+
+void dw_mipi_dsi_unbind(struct device *dev)
+{
+ struct dw_mipi_dsi *dsi = dev_get_drvdata(dev);
+
+ __dw_mipi_dsi_remove(dsi);
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi_unbind);
+
+MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
+MODULE_AUTHOR("Philippe Cornu <philippe.cornu@st.com>");
+MODULE_DESCRIPTION("DW MIPI DSI host controller driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:dw-mipi-dsi");
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 5c26488e7a2d..8571cfd877c5 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1160,7 +1160,6 @@ static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
};
static const struct drm_connector_funcs tc_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
@@ -1255,7 +1254,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* port@2 is the output port */
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
- if (ret)
+ if (ret && ret != -ENODEV)
return ret;
/* Shut down GPIO is optional */
@@ -1325,11 +1324,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
tc->bridge.funcs = &tc_bridge_funcs;
tc->bridge.of_node = dev->of_node;
- ret = drm_bridge_add(&tc->bridge);
- if (ret) {
- dev_err(dev, "Failed to add drm_bridge: %d\n", ret);
- goto err_unregister_aux;
- }
+ drm_bridge_add(&tc->bridge);
i2c_set_clientdata(client, tc);
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index eee4efda829e..acb857030951 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -102,7 +102,6 @@ tfp410_connector_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs tfp410_con_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = tfp410_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
@@ -237,11 +236,7 @@ static int tfp410_init(struct device *dev)
}
}
- ret = drm_bridge_add(&dvi->bridge);
- if (ret) {
- dev_err(dev, "drm_bridge_add() failed: %d\n", ret);
- goto fail;
- }
+ drm_bridge_add(&dvi->bridge);
return 0;
fail:
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index d893ea21a359..69c4e352dd78 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -132,7 +132,6 @@ static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
.load = cirrus_driver_load,
.unload = cirrus_driver_unload,
- .set_busid = drm_pci_set_busid,
.fops = &cirrus_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -143,7 +142,6 @@ static struct drm_driver driver = {
.gem_free_object_unlocked = cirrus_gem_free_object,
.dumb_create = cirrus_dumb_create,
.dumb_map_offset = cirrus_dumb_mmap_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
};
static const struct dev_pm_ops cirrus_pm_ops = {
@@ -166,12 +164,12 @@ static int __init cirrus_init(void)
if (cirrus_modeset == 0)
return -EINVAL;
- return drm_pci_init(&driver, &cirrus_pci_driver);
+ return pci_register_driver(&cirrus_pci_driver);
}
static void __exit cirrus_exit(void)
{
- drm_pci_exit(&driver, &cirrus_pci_driver);
+ pci_unregister_driver(&cirrus_pci_driver);
}
module_init(cirrus_init);
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 8690352d96f7..be2d7e488062 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -96,7 +96,6 @@
struct cirrus_crtc {
struct drm_crtc base;
- u8 lut_r[256], lut_g[256], lut_b[256];
int last_dpms;
bool enabled;
};
@@ -180,13 +179,6 @@ cirrus_bo(struct ttm_buffer_object *bo)
#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
- /* cirrus_mode.c */
-void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno);
-void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno);
-
-
/* cirrus_main.c */
int cirrus_device_init(struct cirrus_device *cdev,
struct drm_device *ddev,
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 7fa58eeadc9d..32fbfba2c623 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -215,7 +215,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
strcpy(info->fix.id, "cirrusdrmfb");
- info->flags = FBINFO_DEFAULT;
info->fbops = &cirrusfb_ops;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
@@ -252,7 +251,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_unregister_fbi(&gfbdev->helper);
if (gfb->obj) {
- drm_gem_object_unreference_unlocked(gfb->obj);
+ drm_gem_object_put_unlocked(gfb->obj);
gfb->obj = NULL;
}
@@ -265,8 +264,6 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
}
static const struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
- .gamma_set = cirrus_crtc_fb_gamma_set,
- .gamma_get = cirrus_crtc_fb_gamma_get,
.fb_probe = cirrusfb_create,
};
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index e7fc95f63dca..b5f528543956 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -18,7 +18,7 @@ static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
- drm_gem_object_unreference_unlocked(cirrus_fb->obj);
+ drm_gem_object_put_unlocked(cirrus_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
@@ -67,13 +67,13 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
if (!cirrus_fb) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
if (ret) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
kfree(cirrus_fb);
return ERR_PTR(ret);
}
@@ -261,7 +261,7 @@ int cirrus_dumb_create(struct drm_file *file,
return ret;
ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret)
return ret;
@@ -310,7 +310,7 @@ cirrus_dumb_mmap_offset(struct drm_file *file,
bo = gem_to_cirrus_bo(obj);
*offset = cirrus_bo_mmap_offset(bo);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 53f6f0f84206..a4c4a465b385 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -31,25 +31,6 @@
* This file contains setup code for the CRTC.
*/
-static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct cirrus_device *cdev = dev->dev_private;
- int i;
-
- if (!crtc->enabled)
- return;
-
- for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
- /* VGA registers */
- WREG8(PALETTE_INDEX, i);
- WREG8(PALETTE_DATA, cirrus_crtc->lut_r[i]);
- WREG8(PALETTE_DATA, cirrus_crtc->lut_g[i]);
- WREG8(PALETTE_DATA, cirrus_crtc->lut_b[i]);
- }
-}
-
/*
* The DRM core requires DPMS functions, but they make little sense in our
* case and so are just stubs
@@ -330,15 +311,25 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ u16 *r, *g, *b;
int i;
- for (i = 0; i < size; i++) {
- cirrus_crtc->lut_r[i] = red[i];
- cirrus_crtc->lut_g[i] = green[i];
- cirrus_crtc->lut_b[i] = blue[i];
+ if (!crtc->enabled)
+ return 0;
+
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ /* VGA registers */
+ WREG8(PALETTE_INDEX, i);
+ WREG8(PALETTE_DATA, *r++ >> 8);
+ WREG8(PALETTE_DATA, *g++ >> 8);
+ WREG8(PALETTE_DATA, *b++ >> 8);
}
- cirrus_crtc_load_lut(crtc);
return 0;
}
@@ -365,7 +356,6 @@ static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
.mode_set_base = cirrus_crtc_mode_set_base,
.prepare = cirrus_crtc_prepare,
.commit = cirrus_crtc_commit,
- .load_lut = cirrus_crtc_load_lut,
};
/* CRTC setup */
@@ -373,7 +363,6 @@ static void cirrus_crtc_init(struct drm_device *dev)
{
struct cirrus_device *cdev = dev->dev_private;
struct cirrus_crtc *cirrus_crtc;
- int i;
cirrus_crtc = kzalloc(sizeof(struct cirrus_crtc) +
(CIRRUSFB_CONN_LIMIT * sizeof(struct drm_connector *)),
@@ -387,37 +376,9 @@ static void cirrus_crtc_init(struct drm_device *dev)
drm_mode_crtc_set_gamma_size(&cirrus_crtc->base, CIRRUS_LUT_SIZE);
cdev->mode_info.crtc = cirrus_crtc;
- for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
- cirrus_crtc->lut_r[i] = i;
- cirrus_crtc->lut_g[i] = i;
- cirrus_crtc->lut_b[i] = i;
- }
-
drm_crtc_helper_add(&cirrus_crtc->base, &cirrus_helper_funcs);
}
-/** Sets the color ramps on behalf of fbcon */
-void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
- struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
-
- cirrus_crtc->lut_r[regno] = red;
- cirrus_crtc->lut_g[regno] = green;
- cirrus_crtc->lut_b[regno] = blue;
-}
-
-/** Gets the color ramps on behalf of fbcon */
-void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno)
-{
- struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
-
- *red = cirrus_crtc->lut_r[regno];
- *green = cirrus_crtc->lut_g[regno];
- *blue = cirrus_crtc->lut_b[regno];
-}
-
static void cirrus_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c0f336d23f9c..2fd383d7253a 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -29,7 +29,6 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_mode.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
#include <linux/sync_file.h>
@@ -188,12 +187,15 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
}
for (i = 0; i < state->num_private_objs; i++) {
- void *obj_state = state->private_objs[i].obj_state;
+ struct drm_private_obj *obj = state->private_objs[i].ptr;
- state->private_objs[i].funcs->destroy_state(obj_state);
- state->private_objs[i].obj = NULL;
- state->private_objs[i].obj_state = NULL;
- state->private_objs[i].funcs = NULL;
+ if (!obj)
+ continue;
+
+ obj->funcs->atomic_destroy_state(obj,
+ state->private_objs[i].state);
+ state->private_objs[i].ptr = NULL;
+ state->private_objs[i].state = NULL;
}
state->num_private_objs = 0;
@@ -409,34 +411,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
}
EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
-/**
- * drm_atomic_replace_property_blob - replace a blob property
- * @blob: a pointer to the member blob to be replaced
- * @new_blob: the new blob to replace with
- * @replaced: whether the blob has been replaced
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-static void
-drm_atomic_replace_property_blob(struct drm_property_blob **blob,
- struct drm_property_blob *new_blob,
- bool *replaced)
-{
- struct drm_property_blob *old_blob = *blob;
-
- if (old_blob == new_blob)
- return;
-
- drm_property_blob_put(old_blob);
- if (new_blob)
- drm_property_blob_get(new_blob);
- *blob = new_blob;
- *replaced = true;
-
- return;
-}
-
static int
drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
struct drm_property_blob **blob,
@@ -457,7 +431,7 @@ drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
}
}
- drm_atomic_replace_property_blob(blob, new_blob, replaced);
+ *replaced |= drm_property_replace_blob(blob, new_blob);
drm_property_blob_put(new_blob);
return 0;
@@ -739,7 +713,7 @@ EXPORT_SYMBOL(drm_atomic_get_plane_state);
* RETURNS:
* Zero on success, error code on failure
*/
-int drm_atomic_plane_set_property(struct drm_plane *plane,
+static int drm_atomic_plane_set_property(struct drm_plane *plane,
struct drm_plane_state *state, struct drm_property *property,
uint64_t val)
{
@@ -796,7 +770,6 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
return 0;
}
-EXPORT_SYMBOL(drm_atomic_plane_set_property);
/**
* drm_atomic_plane_get_property - get property value from plane state
@@ -991,11 +964,44 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
}
/**
+ * drm_atomic_private_obj_init - initialize private object
+ * @obj: private object
+ * @state: initial private object state
+ * @funcs: pointer to the struct of function pointers that identify the object
+ * type
+ *
+ * Initialize the private object, which can be embedded into any
+ * driver private object that needs its own atomic state.
+ */
+void
+drm_atomic_private_obj_init(struct drm_private_obj *obj,
+ struct drm_private_state *state,
+ const struct drm_private_state_funcs *funcs)
+{
+ memset(obj, 0, sizeof(*obj));
+
+ obj->state = state;
+ obj->funcs = funcs;
+}
+EXPORT_SYMBOL(drm_atomic_private_obj_init);
+
+/**
+ * drm_atomic_private_obj_fini - finalize private object
+ * @obj: private object
+ *
+ * Finalize the private object.
+ */
+void
+drm_atomic_private_obj_fini(struct drm_private_obj *obj)
+{
+ obj->funcs->atomic_destroy_state(obj, obj->state);
+}
+EXPORT_SYMBOL(drm_atomic_private_obj_fini);
+
+/**
* drm_atomic_get_private_obj_state - get private object state
* @state: global atomic state
* @obj: private object to get the state for
- * @funcs: pointer to the struct of function pointers that identify the object
- * type
*
* This function returns the private object state for the given private object,
* allocating the state if needed. It does not grab any locks as the caller is
@@ -1005,18 +1011,18 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
*
* Either the allocated state or the error code encoded into a pointer.
*/
-void *
-drm_atomic_get_private_obj_state(struct drm_atomic_state *state, void *obj,
- const struct drm_private_state_funcs *funcs)
+struct drm_private_state *
+drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
+ struct drm_private_obj *obj)
{
int index, num_objs, i;
size_t size;
struct __drm_private_objs_state *arr;
+ struct drm_private_state *obj_state;
for (i = 0; i < state->num_private_objs; i++)
- if (obj == state->private_objs[i].obj &&
- state->private_objs[i].obj_state)
- return state->private_objs[i].obj_state;
+ if (obj == state->private_objs[i].ptr)
+ return state->private_objs[i].state;
num_objs = state->num_private_objs + 1;
size = sizeof(*state->private_objs) * num_objs;
@@ -1028,18 +1034,21 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state, void *obj,
index = state->num_private_objs;
memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
- state->private_objs[index].obj_state = funcs->duplicate_state(state, obj);
- if (!state->private_objs[index].obj_state)
+ obj_state = obj->funcs->atomic_duplicate_state(obj);
+ if (!obj_state)
return ERR_PTR(-ENOMEM);
- state->private_objs[index].obj = obj;
- state->private_objs[index].funcs = funcs;
+ state->private_objs[index].state = obj_state;
+ state->private_objs[index].old_state = obj->state;
+ state->private_objs[index].new_state = obj_state;
+ state->private_objs[index].ptr = obj;
+
state->num_private_objs = num_objs;
- DRM_DEBUG_ATOMIC("Added new private object state %p to %p\n",
- state->private_objs[index].obj_state, state);
+ DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n",
+ obj, obj_state, state);
- return state->private_objs[index].obj_state;
+ return obj_state;
}
EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
@@ -1135,7 +1144,7 @@ EXPORT_SYMBOL(drm_atomic_get_connector_state);
* RETURNS:
* Zero on success, error code on failure
*/
-int drm_atomic_connector_set_property(struct drm_connector *connector,
+static int drm_atomic_connector_set_property(struct drm_connector *connector,
struct drm_connector_state *state, struct drm_property *property,
uint64_t val)
{
@@ -1202,7 +1211,6 @@ int drm_atomic_connector_set_property(struct drm_connector *connector,
return 0;
}
-EXPORT_SYMBOL(drm_atomic_connector_set_property);
static void drm_atomic_connector_print_state(struct drm_printer *p,
const struct drm_connector_state *state)
@@ -1580,38 +1588,6 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_add_affected_planes);
/**
- * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
- * @state: atomic state
- *
- * This function should be used by legacy entry points which don't understand
- * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
- * the slowpath completed.
- */
-void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
-{
- struct drm_device *dev = state->dev;
- int ret;
- bool global = false;
-
- if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
- global = true;
-
- dev->mode_config.acquire_ctx = NULL;
- }
-
-retry:
- drm_modeset_backoff(state->acquire_ctx);
-
- ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
- if (ret)
- goto retry;
-
- if (global)
- dev->mode_config.acquire_ctx = state->acquire_ctx;
-}
-EXPORT_SYMBOL(drm_atomic_legacy_backoff);
-
-/**
* drm_atomic_check_only - check whether a given config would work
* @state: atomic configuration to check
*
@@ -1655,6 +1631,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
if (config->funcs->atomic_check)
ret = config->funcs->atomic_check(state->dev, state);
+ if (ret)
+ return ret;
+
if (!state->allow_modeset) {
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
@@ -1665,7 +1644,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL(drm_atomic_check_only);
@@ -1854,9 +1833,60 @@ static struct drm_pending_vblank_event *create_vblank_event(
return e;
}
-static int atomic_set_prop(struct drm_atomic_state *state,
- struct drm_mode_object *obj, struct drm_property *prop,
- uint64_t prop_value)
+int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
+ struct drm_connector *connector,
+ int mode)
+{
+ struct drm_connector *tmp_connector;
+ struct drm_connector_state *new_conn_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, ret, old_mode = connector->dpms;
+ bool active = false;
+
+ ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
+ state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ if (mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+ connector->dpms = mode;
+
+ crtc = connector->state->crtc;
+ if (!crtc)
+ goto out;
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ goto out;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out;
+ }
+
+ for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) {
+ if (new_conn_state->crtc != crtc)
+ continue;
+ if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
+ active = true;
+ break;
+ }
+ }
+
+ crtc_state->active = active;
+ ret = drm_atomic_commit(state);
+out:
+ if (ret != 0)
+ connector->dpms = old_mode;
+ return ret;
+}
+
+int drm_atomic_set_property(struct drm_atomic_state *state,
+ struct drm_mode_object *obj,
+ struct drm_property *prop,
+ uint64_t prop_value)
{
struct drm_mode_object *ref;
int ret;
@@ -2039,7 +2069,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- int i, ret;
+ int i, c = 0, ret;
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
return 0;
@@ -2100,8 +2130,17 @@ static int prepare_crtc_signaling(struct drm_device *dev,
crtc_state->event->base.fence = fence;
}
+
+ c++;
}
+ /*
+ * Having this flag means user mode pends on event which will never
+ * reach due to lack of at least one CRTC for signaling
+ */
+ if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
+ return -EINVAL;
+
return 0;
}
@@ -2167,10 +2206,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct drm_plane *plane;
- struct drm_out_fence_state *fence_state = NULL;
+ struct drm_out_fence_state *fence_state;
unsigned plane_mask;
int ret = 0;
- unsigned int i, j, num_fences = 0;
+ unsigned int i, j, num_fences;
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -2211,6 +2250,8 @@ retry:
plane_mask = 0;
copied_objs = 0;
copied_props = 0;
+ fence_state = NULL;
+ num_fences = 0;
for (i = 0; i < arg->count_objs; i++) {
uint32_t obj_id, count_props;
@@ -2267,7 +2308,8 @@ retry:
goto out;
}
- ret = atomic_set_prop(state, obj, prop, prop_value);
+ ret = drm_atomic_set_property(state, obj, prop,
+ prop_value);
if (ret) {
drm_mode_object_put(obj);
goto out;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 86d3093c6c9b..4e53aae9a1fb 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -795,6 +795,9 @@ int drm_atomic_helper_check(struct drm_device *dev,
if (ret)
return ret;
+ if (state->legacy_cursor_update)
+ state->async_update = !drm_atomic_helper_async_check(dev, state);
+
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_check);
@@ -918,16 +921,12 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
crtc = new_conn_state->crtc;
if ((!crtc && old_conn_state->crtc) ||
(crtc && drm_atomic_crtc_needs_modeset(crtc->state))) {
- struct drm_property *dpms_prop =
- dev->mode_config.dpms_property;
int mode = DRM_MODE_DPMS_OFF;
if (crtc && crtc->state->active)
mode = DRM_MODE_DPMS_ON;
connector->dpms = mode;
- drm_object_property_set_value(&connector->base,
- dpms_prop, mode);
}
}
@@ -1069,12 +1068,13 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
int i;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
/* Need to filter out CRTCs where only planes change. */
@@ -1090,8 +1090,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
- if (funcs->enable)
- funcs->enable(crtc);
+ if (funcs->atomic_enable)
+ funcs->atomic_enable(crtc, old_crtc_state);
else
funcs->commit(crtc);
}
@@ -1191,9 +1191,13 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
*
* Helper to, after atomic commit, wait for vblanks on all effected
* crtcs (ie. before cleaning up old framebuffers using
- * drm_atomic_helper_cleanup_planes()). It will only wait on crtcs where the
+ * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the
* framebuffers have actually changed to optimize for the legacy cursor and
* plane update use-case.
+ *
+ * Drivers using the nonblocking commit tracking support initialized by calling
+ * drm_atomic_helper_setup_commit() should look at
+ * drm_atomic_helper_wait_for_flip_done() as an alternative.
*/
void
drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
@@ -1241,27 +1245,54 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
/**
+ * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * Helper to, after atomic commit, wait for page flips on all effected
+ * crtcs (ie. before cleaning up old framebuffers using
+ * drm_atomic_helper_cleanup_planes()). Compared to
+ * drm_atomic_helper_wait_for_vblanks() this waits for the completion of on all
+ * CRTCs, assuming that cursors-only updates are signalling their completion
+ * immediately (or using a different path).
+ *
+ * This requires that drivers use the nonblocking commit tracking support
+ * initialized using drm_atomic_helper_setup_commit().
+ */
+void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc_state *unused;
+ struct drm_crtc *crtc;
+ int i;
+
+ for_each_new_crtc_in_state(old_state, crtc, unused, i) {
+ struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
+ int ret;
+
+ if (!commit)
+ continue;
+
+ ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
+ crtc->base.id, crtc->name);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
+
+/**
* drm_atomic_helper_commit_tail - commit atomic update to hardware
* @old_state: atomic state object with old state structures
*
* This is the default implementation for the
- * &drm_mode_config_helper_funcs.atomic_commit_tail hook.
+ * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
+ * that do not support runtime_pm or do not need the CRTC to be
+ * enabled to perform a commit. Otherwise, see
+ * drm_atomic_helper_commit_tail_rpm().
*
* Note that the default ordering of how the various stages are called is to
- * match the legacy modeset helper library closest. One peculiarity of that is
- * that it doesn't mesh well with runtime PM at all.
- *
- * For drivers supporting runtime PM the recommended sequence is instead ::
- *
- * drm_atomic_helper_commit_modeset_disables(dev, old_state);
- *
- * drm_atomic_helper_commit_modeset_enables(dev, old_state);
- *
- * drm_atomic_helper_commit_planes(dev, old_state,
- * DRM_PLANE_COMMIT_ACTIVE_ONLY);
- *
- * for committing the atomic update to hardware. See the kerneldoc entries for
- * these three functions for more details.
+ * match the legacy modeset helper library closest.
*/
void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
{
@@ -1281,6 +1312,35 @@ void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
}
EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
+/**
+ * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware
+ * @old_state: new modeset state to be committed
+ *
+ * This is an alternative implementation for the
+ * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers
+ * that support runtime_pm or need the CRTC to be enabled to perform a
+ * commit. Otherwise, one should use the default implementation
+ * drm_atomic_helper_commit_tail().
+ */
+void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = old_state->dev;
+
+ drm_atomic_helper_commit_modeset_disables(dev, old_state);
+
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+ drm_atomic_helper_commit_planes(dev, old_state,
+ DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, old_state);
+
+ drm_atomic_helper_cleanup_planes(dev, old_state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm);
+
static void commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
@@ -1311,6 +1371,114 @@ static void commit_work(struct work_struct *work)
}
/**
+ * drm_atomic_helper_async_check - check if state can be commited asynchronously
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * This helper will check if it is possible to commit the state asynchronously.
+ * Async commits are not supposed to swap the states like normal sync commits
+ * but just do in-place changes on the current state.
+ *
+ * It will return 0 if the commit can happen in an asynchronous fashion or error
+ * if not. Note that error just mean it can't be commited asynchronously, if it
+ * fails the commit should be treated like a normal synchronous commit.
+ */
+int drm_atomic_helper_async_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc_commit *commit;
+ struct drm_plane *__plane, *plane = NULL;
+ struct drm_plane_state *__plane_state, *plane_state = NULL;
+ const struct drm_plane_helper_funcs *funcs;
+ int i, j, n_planes = 0;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ return -EINVAL;
+ }
+
+ for_each_new_plane_in_state(state, __plane, __plane_state, i) {
+ n_planes++;
+ plane = __plane;
+ plane_state = __plane_state;
+ }
+
+ /* FIXME: we support only single plane updates for now */
+ if (!plane || n_planes != 1)
+ return -EINVAL;
+
+ if (!plane_state->crtc)
+ return -EINVAL;
+
+ funcs = plane->helper_private;
+ if (!funcs->atomic_async_update)
+ return -EINVAL;
+
+ if (plane_state->fence)
+ return -EINVAL;
+
+ /*
+ * Don't do an async update if there is an outstanding commit modifying
+ * the plane. This prevents our async update's changes from getting
+ * overridden by a previous synchronous update's state.
+ */
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ if (plane->crtc != crtc)
+ continue;
+
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit,
+ commit_entry);
+ if (!commit) {
+ spin_unlock(&crtc->commit_lock);
+ continue;
+ }
+ spin_unlock(&crtc->commit_lock);
+
+ if (!crtc->state->state)
+ continue;
+
+ for_each_plane_in_state(crtc->state->state, __plane,
+ __plane_state, j) {
+ if (__plane == plane)
+ return -EINVAL;
+ }
+ }
+
+ return funcs->atomic_async_check(plane, plane_state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_async_check);
+
+/**
+ * drm_atomic_helper_async_commit - commit state asynchronously
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * This function commits a state asynchronously, i.e., not vblank
+ * synchronized. It should be used on a state only when
+ * drm_atomic_async_check() succeeds. Async commits are not supposed to swap
+ * the states like normal sync commits, but just do in-place changes on the
+ * current state.
+ */
+void drm_atomic_helper_async_commit(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ const struct drm_plane_helper_funcs *funcs;
+ int i;
+
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
+ funcs = plane->helper_private;
+ funcs->atomic_async_update(plane, plane_state);
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_async_commit);
+
+/**
* drm_atomic_helper_commit - commit validated state object
* @dev: DRM device
* @state: the driver state object
@@ -1334,6 +1502,17 @@ int drm_atomic_helper_commit(struct drm_device *dev,
{
int ret;
+ if (state->async_update) {
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ return ret;
+
+ drm_atomic_helper_async_commit(dev, state);
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ return 0;
+ }
+
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
@@ -1346,10 +1525,8 @@ int drm_atomic_helper_commit(struct drm_device *dev,
if (!nonblock) {
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
- if (ret) {
- drm_atomic_helper_cleanup_planes(dev, state);
- return ret;
- }
+ if (ret)
+ goto err;
}
/*
@@ -1358,7 +1535,9 @@ int drm_atomic_helper_commit(struct drm_device *dev,
* the software side now.
*/
- drm_atomic_helper_swap_state(state, true);
+ ret = drm_atomic_helper_swap_state(state, true);
+ if (ret)
+ goto err;
/*
* Everything below can be run asynchronously without the need to grab
@@ -1387,6 +1566,10 @@ int drm_atomic_helper_commit(struct drm_device *dev,
commit_tail(state);
return 0;
+
+err:
+ drm_atomic_helper_cleanup_planes(dev, state);
+ return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_commit);
@@ -1680,9 +1863,7 @@ void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state)
/* backend must have consumed any event by now */
WARN_ON(new_crtc_state->event);
- spin_lock(&crtc->commit_lock);
complete_all(&commit->hw_done);
- spin_unlock(&crtc->commit_lock);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
@@ -1711,7 +1892,6 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
if (WARN_ON(!commit))
continue;
- spin_lock(&crtc->commit_lock);
complete_all(&commit->cleanup_done);
WARN_ON(!try_wait_for_completion(&commit->hw_done));
@@ -1721,8 +1901,6 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
if (try_wait_for_completion(&commit->flip_done))
goto del_commit;
- spin_unlock(&crtc->commit_lock);
-
/* We must wait for the vblank event to signal our completion
* before releasing our reference, since the vblank work does
* not hold a reference of its own. */
@@ -1732,8 +1910,8 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
crtc->base.id, crtc->name);
- spin_lock(&crtc->commit_lock);
del_commit:
+ spin_lock(&crtc->commit_lock);
list_del(&commit->commit_entry);
spin_unlock(&crtc->commit_lock);
}
@@ -2069,14 +2247,14 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
/**
* drm_atomic_helper_swap_state - store atomic state into current sw state
* @state: atomic state
- * @stall: stall for proceeding commits
+ * @stall: stall for preceeding commits
*
* This function stores the atomic state into the current state pointers in all
* driver objects. It should be called after all failing steps have been done
* and succeeded, but before the actual hardware state is committed.
*
* For cleanup and error recovery the current state for all changed objects will
- * be swaped into @state.
+ * be swapped into @state.
*
* With that sequence it fits perfectly into the plane prepare/cleanup sequence:
*
@@ -2095,12 +2273,16 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
* the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With
* the current atomic helpers this is almost always the case, since the helpers
* don't pass the right state structures to the callbacks.
+ *
+ * Returns:
+ *
+ * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the
+ * waiting for the previous commits has been interrupted.
*/
-void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
+int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
bool stall)
{
- int i;
- long ret;
+ int i, ret;
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
struct drm_crtc *crtc;
@@ -2108,8 +2290,8 @@ void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_crtc_commit *commit;
- void *obj, *obj_state;
- const struct drm_private_state_funcs *funcs;
+ struct drm_private_obj *obj;
+ struct drm_private_state *old_obj_state, *new_obj_state;
if (stall) {
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -2123,12 +2305,11 @@ void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
if (!commit)
continue;
- ret = wait_for_completion_timeout(&commit->hw_done,
- 10*HZ);
- if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
- crtc->base.id, crtc->name);
+ ret = wait_for_completion_interruptible(&commit->hw_done);
drm_crtc_commit_put(commit);
+
+ if (ret)
+ return ret;
}
}
@@ -2171,8 +2352,17 @@ void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
plane->state = new_plane_state;
}
- __for_each_private_obj(state, obj, obj_state, i, funcs)
- funcs->swap_state(obj, &state->private_objs[i].obj_state);
+ for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
+ WARN_ON(obj->state != old_obj_state);
+
+ old_obj_state->state = state;
+ new_obj_state->state = NULL;
+
+ state->private_objs[i].state = old_obj_state;
+ obj->state = new_obj_state;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(drm_atomic_helper_swap_state);
@@ -2526,6 +2716,7 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_plane *plane;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
+ unsigned plane_mask = 0;
int ret, i;
state = drm_atomic_state_alloc(dev);
@@ -2556,22 +2747,26 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
goto free;
}
- for_each_connector_in_state(state, conn, conn_state, i) {
+ for_each_new_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
if (ret < 0)
goto free;
}
- for_each_plane_in_state(state, plane, plane_state, i) {
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
if (ret < 0)
goto free;
drm_atomic_set_fb_for_plane(plane_state, NULL);
+ plane_mask |= BIT(drm_plane_index(plane));
+ plane->old_fb = plane->fb;
}
ret = drm_atomic_commit(state);
free:
+ if (plane_mask)
+ drm_atomic_clean_old_fb(dev, plane_mask, ret);
drm_atomic_state_put(state);
return ret;
}
@@ -2702,11 +2897,16 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
struct drm_connector_state *new_conn_state;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
+ unsigned plane_mask = 0;
+ struct drm_device *dev = state->dev;
+ int ret;
state->acquire_ctx = ctx;
- for_each_new_plane_in_state(state, plane, new_plane_state, i)
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ plane_mask |= BIT(drm_plane_index(plane));
state->planes[i].old_state = plane->state;
+ }
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
state->crtcs[i].old_state = crtc->state;
@@ -2714,7 +2914,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
for_each_new_connector_in_state(state, connector, new_conn_state, i)
state->connectors[i].old_state = connector->state;
- return drm_atomic_commit(state);
+ ret = drm_atomic_commit(state);
+ if (plane_mask)
+ drm_atomic_clean_old_fb(dev, plane_mask, ret);
+
+ return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
@@ -2763,177 +2967,11 @@ out:
}
EXPORT_SYMBOL(drm_atomic_helper_resume);
-/**
- * drm_atomic_helper_crtc_set_property - helper for crtc properties
- * @crtc: DRM crtc
- * @property: DRM property
- * @val: value of property
- *
- * Provides a default crtc set_property handler using the atomic driver
- * interface.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int
-drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_atomic_state *state;
- struct drm_crtc_state *crtc_state;
- int ret = 0;
-
- state = drm_atomic_state_alloc(crtc->dev);
- if (!state)
- return -ENOMEM;
-
- /* ->set_property is always called with all locks held. */
- state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
-retry:
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- goto fail;
- }
-
- ret = drm_atomic_crtc_set_property(crtc, crtc_state,
- property, val);
- if (ret)
- goto fail;
-
- ret = drm_atomic_commit(state);
-fail:
- if (ret == -EDEADLK)
- goto backoff;
-
- drm_atomic_state_put(state);
- return ret;
-
-backoff:
- drm_atomic_state_clear(state);
- drm_atomic_legacy_backoff(state);
-
- goto retry;
-}
-EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property);
-
-/**
- * drm_atomic_helper_plane_set_property - helper for plane properties
- * @plane: DRM plane
- * @property: DRM property
- * @val: value of property
- *
- * Provides a default plane set_property handler using the atomic driver
- * interface.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int
-drm_atomic_helper_plane_set_property(struct drm_plane *plane,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_atomic_state *state;
- struct drm_plane_state *plane_state;
- int ret = 0;
-
- state = drm_atomic_state_alloc(plane->dev);
- if (!state)
- return -ENOMEM;
-
- /* ->set_property is always called with all locks held. */
- state->acquire_ctx = plane->dev->mode_config.acquire_ctx;
-retry:
- plane_state = drm_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state)) {
- ret = PTR_ERR(plane_state);
- goto fail;
- }
-
- ret = drm_atomic_plane_set_property(plane, plane_state,
- property, val);
- if (ret)
- goto fail;
-
- ret = drm_atomic_commit(state);
-fail:
- if (ret == -EDEADLK)
- goto backoff;
-
- drm_atomic_state_put(state);
- return ret;
-
-backoff:
- drm_atomic_state_clear(state);
- drm_atomic_legacy_backoff(state);
-
- goto retry;
-}
-EXPORT_SYMBOL(drm_atomic_helper_plane_set_property);
-
-/**
- * drm_atomic_helper_connector_set_property - helper for connector properties
- * @connector: DRM connector
- * @property: DRM property
- * @val: value of property
- *
- * Provides a default connector set_property handler using the atomic driver
- * interface.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int
-drm_atomic_helper_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- struct drm_atomic_state *state;
- struct drm_connector_state *connector_state;
- int ret = 0;
-
- state = drm_atomic_state_alloc(connector->dev);
- if (!state)
- return -ENOMEM;
-
- /* ->set_property is always called with all locks held. */
- state->acquire_ctx = connector->dev->mode_config.acquire_ctx;
-retry:
- connector_state = drm_atomic_get_connector_state(state, connector);
- if (IS_ERR(connector_state)) {
- ret = PTR_ERR(connector_state);
- goto fail;
- }
-
- ret = drm_atomic_connector_set_property(connector, connector_state,
- property, val);
- if (ret)
- goto fail;
-
- ret = drm_atomic_commit(state);
-fail:
- if (ret == -EDEADLK)
- goto backoff;
-
- drm_atomic_state_put(state);
- return ret;
-
-backoff:
- drm_atomic_state_clear(state);
- drm_atomic_legacy_backoff(state);
-
- goto retry;
-}
-EXPORT_SYMBOL(drm_atomic_helper_connector_set_property);
-
-static int page_flip_common(
- struct drm_atomic_state *state,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags)
+static int page_flip_common(struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags)
{
struct drm_plane *plane = crtc->primary;
struct drm_plane_state *plane_state;
@@ -3027,13 +3065,12 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
* Returns:
* Returns 0 on success, negative errno numbers on failure.
*/
-int drm_atomic_helper_page_flip_target(
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags,
- uint32_t target,
- struct drm_modeset_acquire_ctx *ctx)
+int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t flags,
+ uint32_t target,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_plane *plane = crtc->primary;
struct drm_atomic_state *state;
@@ -3065,85 +3102,6 @@ fail:
EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
/**
- * drm_atomic_helper_connector_dpms() - connector dpms helper implementation
- * @connector: affected connector
- * @mode: DPMS mode
- *
- * This is the main helper function provided by the atomic helper framework for
- * implementing the legacy DPMS connector interface. It computes the new desired
- * &drm_crtc_state.active state for the corresponding CRTC (if the connector is
- * enabled) and updates it.
- *
- * Returns:
- * Returns 0 on success, negative errno numbers on failure.
- */
-int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
- int mode)
-{
- struct drm_mode_config *config = &connector->dev->mode_config;
- struct drm_atomic_state *state;
- struct drm_crtc_state *crtc_state;
- struct drm_crtc *crtc;
- struct drm_connector *tmp_connector;
- struct drm_connector_list_iter conn_iter;
- int ret;
- bool active = false;
- int old_mode = connector->dpms;
-
- if (mode != DRM_MODE_DPMS_ON)
- mode = DRM_MODE_DPMS_OFF;
-
- connector->dpms = mode;
- crtc = connector->state->crtc;
-
- if (!crtc)
- return 0;
-
- state = drm_atomic_state_alloc(connector->dev);
- if (!state)
- return -ENOMEM;
-
- state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
-retry:
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- goto fail;
- }
-
- WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
-
- drm_connector_list_iter_begin(connector->dev, &conn_iter);
- drm_for_each_connector_iter(tmp_connector, &conn_iter) {
- if (tmp_connector->state->crtc != crtc)
- continue;
-
- if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
- active = true;
- break;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
- crtc_state->active = active;
-
- ret = drm_atomic_commit(state);
-fail:
- if (ret == -EDEADLK)
- goto backoff;
- if (ret != 0)
- connector->dpms = old_mode;
- drm_atomic_state_put(state);
- return ret;
-
-backoff:
- drm_atomic_state_clear(state);
- drm_atomic_legacy_backoff(state);
-
- goto retry;
-}
-EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
-
-/**
* drm_atomic_helper_best_encoder - Helper for
* &drm_connector_helper_funcs.best_encoder callback
* @connector: Connector control structure
@@ -3612,12 +3570,12 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
- struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state;
struct drm_crtc_state *crtc_state;
struct drm_property_blob *blob = NULL;
struct drm_color_lut *blob_data;
int i, ret = 0;
+ bool replaced;
state = drm_atomic_state_alloc(crtc->dev);
if (!state)
@@ -3648,20 +3606,10 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
}
/* Reset DEGAMMA_LUT and CTM properties. */
- ret = drm_atomic_crtc_set_property(crtc, crtc_state,
- config->degamma_lut_property, 0);
- if (ret)
- goto fail;
-
- ret = drm_atomic_crtc_set_property(crtc, crtc_state,
- config->ctm_property, 0);
- if (ret)
- goto fail;
-
- ret = drm_atomic_crtc_set_property(crtc, crtc_state,
- config->gamma_lut_property, blob->base.id);
- if (ret)
- goto fail;
+ replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL);
+ replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
+ replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob);
+ crtc_state->color_mgmt_changed |= replaced;
ret = drm_atomic_commit(state);
@@ -3671,3 +3619,18 @@ fail:
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
+
+/**
+ * __drm_atomic_helper_private_duplicate_state - copy atomic private state
+ * @obj: CRTC object
+ * @state: new private object state
+ *
+ * Copies atomic state from a private objects's current state and resets inferred values.
+ * This is useful for drivers that subclass the private state.
+ */
+void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ memcpy(state, obj->state, sizeof(*state));
+}
+EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 3eda500fc005..fe0982708e95 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -128,6 +128,9 @@ EXPORT_SYMBOL(drm_color_lut_extract);
* optional. The gamma and degamma properties are only attached if
* their size is not 0 and ctm_property is only attached if has_ctm is
* true.
+ *
+ * Drivers should use drm_atomic_helper_legacy_gamma_set() to implement the
+ * legacy &drm_crtc_funcs.gamma_set callback.
*/
void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
uint degamma_lut_size,
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 8072e6e4c62c..ba9f36cef68c 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -717,9 +717,9 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
* drivers, it remaps to controlling the "ACTIVE" property on the CRTC the
* connector is linked to. Drivers should never set this property directly,
* it is handled by the DRM core by calling the &drm_connector_funcs.dpms
- * callback. Atomic drivers should implement this hook using
- * drm_atomic_helper_connector_dpms(). This is the only property standard
- * connector property that userspace can change.
+ * callback. For atomic drivers the remapping to the "ACTIVE" property is
+ * implemented in the DRM core. This is the only standard connector
+ * property that userspace can change.
* PATH:
* Connector path property to identify how this sink is physically
* connected. Used by DP MST. This should be set by calling
@@ -1225,7 +1225,6 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, value);
- /* store the property value if successful */
if (!ret)
drm_object_property_set_value(&connector->base, property, value);
return ret;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 4afdf7902eda..eab36a460638 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -863,8 +863,7 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
* provided by the driver.
*
* This function is deprecated. New drivers must implement atomic modeset
- * support, for which this function is unsuitable. Instead drivers should use
- * drm_atomic_helper_connector_dpms().
+ * support, where DPMS is handled in the DRM core.
*
* Returns:
* Always returns 0.
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index d077c5490041..a43582076b20 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -178,6 +178,13 @@ struct drm_minor;
int drm_atomic_debugfs_init(struct drm_minor *minor);
#endif
+int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
+ struct drm_connector *connector,
+ int mode);
+int drm_atomic_set_property(struct drm_atomic_state *state,
+ struct drm_mode_object *obj,
+ struct drm_property *prop,
+ uint64_t prop_value);
int drm_atomic_get_property(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val);
int drm_mode_atomic_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index 1722d8f21449..f9e26dda56d6 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -136,21 +136,51 @@ static int crtc_crc_data_count(struct drm_crtc_crc *crc)
return CIRC_CNT(crc->head, crc->tail, DRM_CRC_ENTRIES_NR);
}
+static void crtc_crc_cleanup(struct drm_crtc_crc *crc)
+{
+ kfree(crc->entries);
+ crc->entries = NULL;
+ crc->head = 0;
+ crc->tail = 0;
+ crc->values_cnt = 0;
+ crc->opened = false;
+}
+
static int crtc_crc_open(struct inode *inode, struct file *filep)
{
struct drm_crtc *crtc = inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
struct drm_crtc_crc_entry *entries = NULL;
size_t values_cnt;
- int ret;
+ int ret = 0;
- if (crc->opened)
- return -EBUSY;
+ if (drm_drv_uses_atomic_modeset(crtc->dev)) {
+ ret = drm_modeset_lock_interruptible(&crtc->mutex, NULL);
+ if (ret)
+ return ret;
+
+ if (!crtc->state->active)
+ ret = -EIO;
+ drm_modeset_unlock(&crtc->mutex);
+
+ if (ret)
+ return ret;
+ }
+
+ spin_lock_irq(&crc->lock);
+ if (!crc->opened)
+ crc->opened = true;
+ else
+ ret = -EBUSY;
+ spin_unlock_irq(&crc->lock);
- ret = crtc->funcs->set_crc_source(crtc, crc->source, &values_cnt);
if (ret)
return ret;
+ ret = crtc->funcs->set_crc_source(crtc, crc->source, &values_cnt);
+ if (ret)
+ goto err;
+
if (WARN_ON(values_cnt > DRM_MAX_CRC_NR)) {
ret = -EINVAL;
goto err_disable;
@@ -170,7 +200,6 @@ static int crtc_crc_open(struct inode *inode, struct file *filep)
spin_lock_irq(&crc->lock);
crc->entries = entries;
crc->values_cnt = values_cnt;
- crc->opened = true;
/*
* Only return once we got a first frame, so userspace doesn't have to
@@ -182,12 +211,17 @@ static int crtc_crc_open(struct inode *inode, struct file *filep)
crc->lock);
spin_unlock_irq(&crc->lock);
- WARN_ON(ret);
+ if (ret)
+ goto err_disable;
return 0;
err_disable:
crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+err:
+ spin_lock_irq(&crc->lock);
+ crtc_crc_cleanup(crc);
+ spin_unlock_irq(&crc->lock);
return ret;
}
@@ -197,17 +231,12 @@ static int crtc_crc_release(struct inode *inode, struct file *filep)
struct drm_crtc_crc *crc = &crtc->crc;
size_t values_cnt;
+ crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+
spin_lock_irq(&crc->lock);
- kfree(crc->entries);
- crc->entries = NULL;
- crc->head = 0;
- crc->tail = 0;
- crc->values_cnt = 0;
- crc->opened = false;
+ crtc_crc_cleanup(crc);
spin_unlock_irq(&crc->lock);
- crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
-
return 0;
}
@@ -334,7 +363,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
spin_lock(&crc->lock);
/* Caller may not have noticed yet that userspace has stopped reading */
- if (!crc->opened) {
+ if (!crc->entries) {
spin_unlock(&crc->lock);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 213fb837e1c4..08af8d6b844b 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -544,7 +544,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
DP_DETAILED_CAP_INFO_AVAILABLE;
int clk;
int bpc;
- char id[6];
+ char id[7];
int len;
uint8_t rev[2];
int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
@@ -583,6 +583,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
seq_puts(m, "\t\tType: N/A\n");
}
+ memset(id, 0, sizeof(id));
drm_dp_downstream_id(aux, id);
seq_printf(m, "\t\tID: %s\n", id);
@@ -591,7 +592,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
seq_printf(m, "\t\tHW: %d.%d\n",
(rev[0] & 0xf0) >> 4, rev[0] & 0xf);
- len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2);
+ len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2);
if (len > 0)
seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index bfd237c15e76..41b492f99955 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -31,6 +31,8 @@
#include <drm/drmP.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
/**
* DOC: dp mst helper
@@ -330,6 +332,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
return false;
}
+ /*
+ * ignore out-of-order messages or messages that are part of a
+ * failed transaction
+ */
+ if (!recv_hdr.somt && !msg->have_somt)
+ return false;
+
/* get length contained in this portion */
msg->curchunk_len = recv_hdr.msg_len;
msg->curchunk_hdrlen = hdrlen;
@@ -1335,15 +1344,17 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
u8 *guid)
{
- static u8 zero_guid[16];
+ u64 salt;
- if (!memcmp(guid, zero_guid, 16)) {
- u64 salt = get_jiffies_64();
- memcpy(&guid[0], &salt, sizeof(u64));
- memcpy(&guid[8], &salt, sizeof(u64));
- return false;
- }
- return true;
+ if (memchr_inv(guid, 0, 16))
+ return true;
+
+ salt = get_jiffies_64();
+
+ memcpy(&guid[0], &salt, sizeof(u64));
+ memcpy(&guid[8], &salt, sizeof(u64));
+
+ return false;
}
#if 0
@@ -2164,7 +2175,7 @@ out_unlock:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
-static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
{
int len;
u8 replyblock[32];
@@ -2179,12 +2190,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
replyblock, len);
if (ret != len) {
DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
- return;
+ return false;
}
ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
if (!ret) {
DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
- return;
+ return false;
}
replylen = msg->curchunk_len + msg->curchunk_hdrlen;
@@ -2196,21 +2207,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
replyblock, len);
if (ret != len) {
- DRM_DEBUG_KMS("failed to read a chunk\n");
+ DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
+ len, ret);
+ return false;
}
+
ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
- if (ret == false)
+ if (!ret) {
DRM_DEBUG_KMS("failed to build sideband msg\n");
+ return false;
+ }
+
curreply += len;
replylen -= len;
}
+ return true;
}
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
- drm_dp_get_one_sb_msg(mgr, false);
+ if (!drm_dp_get_one_sb_msg(mgr, false)) {
+ memset(&mgr->down_rep_recv, 0,
+ sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
if (mgr->down_rep_recv.have_eomt) {
struct drm_dp_sideband_msg_tx *txmsg;
@@ -2266,7 +2288,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
- drm_dp_get_one_sb_msg(mgr, true);
+
+ if (!drm_dp_get_one_sb_msg(mgr, true)) {
+ memset(&mgr->up_req_recv, 0,
+ sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
if (mgr->up_req_recv.have_eomt) {
struct drm_dp_sideband_msg_req_body msg;
@@ -2318,7 +2345,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
}
- drm_dp_put_mst_branch_device(mstb);
+ if (mstb)
+ drm_dp_put_mst_branch_device(mstb);
+
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
}
return ret;
@@ -2515,8 +2544,8 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
int req_slots;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
- if (topology_state == NULL)
- return -ENOMEM;
+ if (IS_ERR(topology_state))
+ return PTR_ERR(topology_state);
port = drm_dp_get_validated_port_ref(mgr, port);
if (port == NULL)
@@ -2555,8 +2584,8 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_state *topology_state;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
- if (topology_state == NULL)
- return -ENOMEM;
+ if (IS_ERR(topology_state))
+ return PTR_ERR(topology_state);
/* We cannot rely on port->vcpi.num_slots to update
* topology_state->avail_slots as the port may not exist if the parent
@@ -2992,41 +3021,32 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
(*mgr->cbs->hotplug)(mgr);
}
-void *drm_dp_mst_duplicate_state(struct drm_atomic_state *state, void *obj)
+static struct drm_private_state *
+drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
{
- struct drm_dp_mst_topology_mgr *mgr = obj;
- struct drm_dp_mst_topology_state *new_mst_state;
+ struct drm_dp_mst_topology_state *state;
- if (WARN_ON(!mgr->state))
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
return NULL;
- new_mst_state = kmemdup(mgr->state, sizeof(*new_mst_state), GFP_KERNEL);
- if (new_mst_state)
- new_mst_state->state = state;
- return new_mst_state;
-}
-
-void drm_dp_mst_swap_state(void *obj, void **obj_state_ptr)
-{
- struct drm_dp_mst_topology_mgr *mgr = obj;
- struct drm_dp_mst_topology_state **topology_state_ptr;
-
- topology_state_ptr = (struct drm_dp_mst_topology_state **)obj_state_ptr;
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
- mgr->state->state = (*topology_state_ptr)->state;
- swap(*topology_state_ptr, mgr->state);
- mgr->state->state = NULL;
+ return &state->base;
}
-void drm_dp_mst_destroy_state(void *obj_state)
+static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
{
- kfree(obj_state);
+ struct drm_dp_mst_topology_state *mst_state =
+ to_dp_mst_topology_state(state);
+
+ kfree(mst_state);
}
static const struct drm_private_state_funcs mst_state_funcs = {
- .duplicate_state = drm_dp_mst_duplicate_state,
- .swap_state = drm_dp_mst_swap_state,
- .destroy_state = drm_dp_mst_destroy_state,
+ .atomic_duplicate_state = drm_dp_mst_duplicate_state,
+ .atomic_destroy_state = drm_dp_mst_destroy_state,
};
/**
@@ -3050,8 +3070,7 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
struct drm_device *dev = mgr->dev;
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- return drm_atomic_get_private_obj_state(state, mgr,
- &mst_state_funcs);
+ return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
}
EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
@@ -3071,6 +3090,8 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
int max_dpcd_transaction_bytes,
int max_payloads, int conn_base_id)
{
+ struct drm_dp_mst_topology_state *mst_state;
+
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
@@ -3099,14 +3120,18 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
if (test_calc_pbn_mode() < 0)
DRM_ERROR("MST PBN self-test failed\n");
- mgr->state = kzalloc(sizeof(*mgr->state), GFP_KERNEL);
- if (mgr->state == NULL)
+ mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
+ if (mst_state == NULL)
return -ENOMEM;
- mgr->state->mgr = mgr;
+
+ mst_state->mgr = mgr;
/* max. time slots - one slot for MTP header */
- mgr->state->avail_slots = 63;
- mgr->funcs = &mst_state_funcs;
+ mst_state->avail_slots = 63;
+
+ drm_atomic_private_obj_init(&mgr->base,
+ &mst_state->base,
+ &mst_state_funcs);
return 0;
}
@@ -3128,8 +3153,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
mutex_unlock(&mgr->payload_lock);
mgr->dev = NULL;
mgr->aux = NULL;
- kfree(mgr->state);
- mgr->state = NULL;
+ drm_atomic_private_obj_fini(&mgr->base);
mgr->funcs = NULL;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 37b8ad3e30d8..be38ac7050d4 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -63,6 +63,15 @@ module_param_named(debug, drm_debug, int, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
static struct idr drm_minors_idr;
+/*
+ * If the drm core fails to init for whatever reason,
+ * we should prevent any drivers from registering with it.
+ * It's best to check this at drm_dev_init(), as some drivers
+ * prefer to embed struct drm_device into their own device
+ * structure and call drm_dev_init() themselves.
+ */
+static bool drm_core_init_complete = false;
+
static struct dentry *drm_debugfs_root;
#define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV"
@@ -282,7 +291,7 @@ struct drm_minor *drm_minor_acquire(unsigned int minor_id)
if (!minor) {
return ERR_PTR(-ENODEV);
- } else if (drm_device_is_unplugged(minor->dev)) {
+ } else if (drm_dev_is_unplugged(minor->dev)) {
drm_dev_unref(minor->dev);
return ERR_PTR(-ENODEV);
}
@@ -355,26 +364,32 @@ void drm_put_dev(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_put_dev);
-void drm_unplug_dev(struct drm_device *dev)
+static void drm_device_set_unplugged(struct drm_device *dev)
{
- /* for a USB device */
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_modeset_unregister_all(dev);
+ smp_wmb();
+ atomic_set(&dev->unplugged, 1);
+}
- drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
- drm_minor_unregister(dev, DRM_MINOR_RENDER);
- drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+/**
+ * drm_dev_unplug - unplug a DRM device
+ * @dev: DRM device
+ *
+ * This unplugs a hotpluggable DRM device, which makes it inaccessible to
+ * userspace operations. Entry-points can use drm_dev_is_unplugged(). This
+ * essentially unregisters the device like drm_dev_unregister(), but can be
+ * called while there are still open users of @dev.
+ */
+void drm_dev_unplug(struct drm_device *dev)
+{
+ drm_dev_unregister(dev);
mutex_lock(&drm_global_mutex);
-
drm_device_set_unplugged(dev);
-
- if (dev->open_count == 0) {
- drm_put_dev(dev);
- }
+ if (dev->open_count == 0)
+ drm_dev_unref(dev);
mutex_unlock(&drm_global_mutex);
}
-EXPORT_SYMBOL(drm_unplug_dev);
+EXPORT_SYMBOL(drm_dev_unplug);
/*
* DRM internal mount
@@ -484,6 +499,11 @@ int drm_dev_init(struct drm_device *dev,
{
int ret;
+ if (!drm_core_init_complete) {
+ DRM_ERROR("DRM core is not initialized\n");
+ return -ENODEV;
+ }
+
kref_init(&dev->ref);
dev->dev = parent;
dev->driver = driver;
@@ -821,6 +841,9 @@ EXPORT_SYMBOL(drm_dev_register);
* drm_dev_register() but does not deallocate the device. The caller must call
* drm_dev_unref() to drop their final reference.
*
+ * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
+ * which can be called while there are still open users of @dev.
+ *
* This should be called first in the device teardown code to make sure
* userspace can't access the device instance any more.
*/
@@ -828,7 +851,8 @@ void drm_dev_unregister(struct drm_device *dev)
{
struct drm_map_list *r_list, *list_temp;
- drm_lastclose(dev);
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
+ drm_lastclose(dev);
dev->registered = false;
@@ -966,6 +990,8 @@ static int __init drm_core_init(void)
if (ret < 0)
goto error;
+ drm_core_init_complete = true;
+
DRM_DEBUG("Initialized\n");
return 0;
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index 10307cc16d75..39ac15ce4702 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -24,6 +24,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_gem.h>
#include "drm_crtc_internal.h"
@@ -42,9 +43,10 @@
* create dumb buffers suitable for scanout, which can then be used to create
* KMS frame buffers.
*
- * To support dumb objects drivers must implement the &drm_driver.dumb_create,
- * &drm_driver.dumb_destroy and &drm_driver.dumb_map_offset operations. See
- * there for further details.
+ * To support dumb objects drivers must implement the &drm_driver.dumb_create
+ * operation. &drm_driver.dumb_destroy defaults to drm_gem_dumb_destroy() if
+ * not set and &drm_driver.dumb_map_offset defaults to
+ * drm_gem_dumb_map_offset(). See the callbacks for further details.
*
* Note that dumb objects may not be used for gpu acceleration, as has been
* attempted on some ARM embedded platforms. Such drivers really must have
@@ -108,11 +110,16 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
{
struct drm_mode_map_dumb *args = data;
- /* call driver ioctl to get mmap offset */
- if (!dev->driver->dumb_map_offset)
+ if (!dev->driver->dumb_create)
return -ENOSYS;
- return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
+ if (dev->driver->dumb_map_offset)
+ return dev->driver->dumb_map_offset(file_priv, dev,
+ args->handle,
+ &args->offset);
+ else
+ return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
+ &args->offset);
}
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
@@ -120,9 +127,12 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
{
struct drm_mode_destroy_dumb *args = data;
- if (!dev->driver->dumb_destroy)
+ if (!dev->driver->dumb_create)
return -ENOSYS;
- return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+ if (dev->driver->dumb_destroy)
+ return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+ else
+ return drm_gem_dumb_destroy(file_priv, dev, args->handle);
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 2e55599816aa..6bb6337be920 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1006,6 +1006,221 @@ static const struct drm_display_mode edid_cea_modes[] = {
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 65 - 1280x720@24Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 66 - 1280x720@25Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+ 3740, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 67 - 1280x720@30Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 68 - 1280x720@50Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 69 - 1280x720@60Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 70 - 1280x720@100Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 71 - 1280x720@120Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 72 - 1920x1080@24Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 73 - 1920x1080@25Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 74 - 1920x1080@30Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 75 - 1920x1080@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 76 - 1920x1080@60Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 77 - 1920x1080@100Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 78 - 1920x1080@120Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 79 - 1680x720@24Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 80 - 1680x720@25Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
+ 2948, 3168, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 81 - 1680x720@30Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
+ 2420, 2640, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 82 - 1680x720@50Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
+ 1980, 2200, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 83 - 1680x720@60Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
+ 1980, 2200, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 84 - 1680x720@100Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
+ 1780, 2000, 0, 720, 725, 730, 825, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 85 - 1680x720@120Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
+ 1780, 2000, 0, 720, 725, 730, 825, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 86 - 2560x1080@24Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
+ 3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 87 - 2560x1080@25Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
+ 3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 88 - 2560x1080@30Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
+ 3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 89 - 2560x1080@50Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
+ 3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 90 - 2560x1080@60Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
+ 2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 91 - 2560x1080@100Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
+ 2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 92 - 2560x1080@120Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
+ 3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 93 - 3840x2160p@24Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 94 - 3840x2160p@25Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 95 - 3840x2160p@30Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 96 - 3840x2160p@50Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 97 - 3840x2160p@60Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 98 - 4096x2160p@24Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 99 - 4096x2160p@25Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 100 - 4096x2160p@30Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 101 - 4096x2160p@50Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 102 - 4096x2160p@60Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ /* 103 - 3840x2160p@24Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 104 - 3840x2160p@25Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 105 - 3840x2160p@30Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 106 - 3840x2160p@50Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 107 - 3840x2160p@60Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
};
/*
@@ -2566,7 +2781,10 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
-#define VIDEO_CAPABILITY_BLOCK 0x07
+#define USE_EXTENDED_TAG 0x07
+#define EXT_VIDEO_CAPABILITY_BLOCK 0x00
+#define EXT_VIDEO_DATA_BLOCK_420 0x0E
+#define EXT_VIDEO_CAP_BLOCK_Y420CMDB 0x0F
#define EDID_BASIC_AUDIO (1 << 6)
#define EDID_CEA_YCRCB444 (1 << 5)
#define EDID_CEA_YCRCB422 (1 << 4)
@@ -2902,6 +3120,15 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
return modes;
}
+static u8 svd_to_vic(u8 svd)
+{
+ /* 0-6 bit vic, 7th bit native mode indicator */
+ if ((svd >= 1 && svd <= 64) || (svd >= 129 && svd <= 192))
+ return svd & 127;
+
+ return svd;
+}
+
static struct drm_display_mode *
drm_display_mode_from_vic_index(struct drm_connector *connector,
const u8 *video_db, u8 video_len,
@@ -2915,7 +3142,7 @@ drm_display_mode_from_vic_index(struct drm_connector *connector,
return NULL;
/* CEA modes are numbered 1..127 */
- vic = (video_db[video_index] & 127);
+ vic = svd_to_vic(video_db[video_index]);
if (!drm_valid_cea_vic(vic))
return NULL;
@@ -2928,15 +3155,85 @@ drm_display_mode_from_vic_index(struct drm_connector *connector,
return newmode;
}
+/*
+ * do_y420vdb_modes - Parse YCBCR 420 only modes
+ * @connector: connector corresponding to the HDMI sink
+ * @svds: start of the data block of CEA YCBCR 420 VDB
+ * @len: length of the CEA YCBCR 420 VDB
+ *
+ * Parse the CEA-861-F YCBCR 420 Video Data Block (Y420VDB)
+ * which contains modes which can be supported in YCBCR 420
+ * output format only.
+ */
+static int do_y420vdb_modes(struct drm_connector *connector,
+ const u8 *svds, u8 svds_len)
+{
+ int modes = 0, i;
+ struct drm_device *dev = connector->dev;
+ struct drm_display_info *info = &connector->display_info;
+ struct drm_hdmi_info *hdmi = &info->hdmi;
+
+ for (i = 0; i < svds_len; i++) {
+ u8 vic = svd_to_vic(svds[i]);
+ struct drm_display_mode *newmode;
+
+ if (!drm_valid_cea_vic(vic))
+ continue;
+
+ newmode = drm_mode_duplicate(dev, &edid_cea_modes[vic]);
+ if (!newmode)
+ break;
+ bitmap_set(hdmi->y420_vdb_modes, vic, 1);
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+ if (modes > 0)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB420;
+ return modes;
+}
+
+/*
+ * drm_add_cmdb_modes - Add a YCBCR 420 mode into bitmap
+ * @connector: connector corresponding to the HDMI sink
+ * @vic: CEA vic for the video mode to be added in the map
+ *
+ * Makes an entry for a videomode in the YCBCR 420 bitmap
+ */
+static void
+drm_add_cmdb_modes(struct drm_connector *connector, u8 svd)
+{
+ u8 vic = svd_to_vic(svd);
+ struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
+
+ if (!drm_valid_cea_vic(vic))
+ return;
+
+ bitmap_set(hdmi->y420_cmdb_modes, vic, 1);
+}
+
static int
do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
{
int i, modes = 0;
+ struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
for (i = 0; i < len; i++) {
struct drm_display_mode *mode;
mode = drm_display_mode_from_vic_index(connector, db, len, i);
if (mode) {
+ /*
+ * YCBCR420 capability block contains a bitmap which
+ * gives the index of CEA modes from CEA VDB, which
+ * can support YCBCR 420 sampling output also (apart
+ * from RGB/YCBCR444 etc).
+ * For example, if the bit 0 in bitmap is set,
+ * first mode in VDB can support YCBCR420 output too.
+ * Add YCBCR420 modes only if sink is HDMI 2.0 capable.
+ */
+ if (i < 64 && hdmi->y420_cmdb_map & (1ULL << i))
+ drm_add_cmdb_modes(connector, db[i]);
+
drm_mode_probed_add(connector, mode);
modes++;
}
@@ -3218,6 +3515,12 @@ cea_db_payload_len(const u8 *db)
}
static int
+cea_db_extended_tag(const u8 *db)
+{
+ return db[1];
+}
+
+static int
cea_db_tag(const u8 *db)
{
return db[0] >> 5;
@@ -3272,9 +3575,77 @@ static bool cea_db_is_hdmi_forum_vsdb(const u8 *db)
return oui == HDMI_FORUM_IEEE_OUI;
}
+static bool cea_db_is_y420cmdb(const u8 *db)
+{
+ if (cea_db_tag(db) != USE_EXTENDED_TAG)
+ return false;
+
+ if (!cea_db_payload_len(db))
+ return false;
+
+ if (cea_db_extended_tag(db) != EXT_VIDEO_CAP_BLOCK_Y420CMDB)
+ return false;
+
+ return true;
+}
+
+static bool cea_db_is_y420vdb(const u8 *db)
+{
+ if (cea_db_tag(db) != USE_EXTENDED_TAG)
+ return false;
+
+ if (!cea_db_payload_len(db))
+ return false;
+
+ if (cea_db_extended_tag(db) != EXT_VIDEO_DATA_BLOCK_420)
+ return false;
+
+ return true;
+}
+
#define for_each_cea_db(cea, i, start, end) \
for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
+static void drm_parse_y420cmdb_bitmap(struct drm_connector *connector,
+ const u8 *db)
+{
+ struct drm_display_info *info = &connector->display_info;
+ struct drm_hdmi_info *hdmi = &info->hdmi;
+ u8 map_len = cea_db_payload_len(db) - 1;
+ u8 count;
+ u64 map = 0;
+
+ if (map_len == 0) {
+ /* All CEA modes support ycbcr420 sampling also.*/
+ hdmi->y420_cmdb_map = U64_MAX;
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB420;
+ return;
+ }
+
+ /*
+ * This map indicates which of the existing CEA block modes
+ * from VDB can support YCBCR420 output too. So if bit=0 is
+ * set, first mode from VDB can support YCBCR420 output too.
+ * We will parse and keep this map, before parsing VDB itself
+ * to avoid going through the same block again and again.
+ *
+ * Spec is not clear about max possible size of this block.
+ * Clamping max bitmap block size at 8 bytes. Every byte can
+ * address 8 CEA modes, in this way this map can address
+ * 8*8 = first 64 SVDs.
+ */
+ if (WARN_ON_ONCE(map_len > 8))
+ map_len = 8;
+
+ for (count = 0; count < map_len; count++)
+ map |= (u64)db[2 + count] << (8 * count);
+
+ if (map)
+ info->color_formats |= DRM_COLOR_FORMAT_YCRCB420;
+
+ hdmi->y420_cmdb_map = map;
+}
+
static int
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
@@ -3297,10 +3668,16 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
video = db + 1;
video_len = dbl;
modes += do_cea_modes(connector, video, dbl);
- }
- else if (cea_db_is_hdmi_vsdb(db)) {
+ } else if (cea_db_is_hdmi_vsdb(db)) {
hdmi = db;
hdmi_len = dbl;
+ } else if (cea_db_is_y420vdb(db)) {
+ const u8 *vdb420 = &db[2];
+
+ /* Add 4:2:0(only) modes present in EDID */
+ modes += do_y420vdb_modes(connector,
+ vdb420,
+ dbl - 1);
}
}
}
@@ -3793,8 +4170,10 @@ bool drm_rgb_quant_range_selectable(struct edid *edid)
return false;
for_each_cea_db(edid_ext, i, start, end) {
- if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK &&
- cea_db_payload_len(&edid_ext[i]) == 2) {
+ if (cea_db_tag(&edid_ext[i]) == USE_EXTENDED_TAG &&
+ cea_db_payload_len(&edid_ext[i]) == 2 &&
+ cea_db_extended_tag(&edid_ext[i]) ==
+ EXT_VIDEO_CAPABILITY_BLOCK) {
DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
}
@@ -3823,6 +4202,16 @@ drm_default_rgb_quant_range(const struct drm_display_mode *mode)
}
EXPORT_SYMBOL(drm_default_rgb_quant_range);
+static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
+ const u8 *db)
+{
+ u8 dc_mask;
+ struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
+
+ dc_mask = db[7] & DRM_EDID_YCBCR420_DC_MASK;
+ hdmi->y420_dc_modes |= dc_mask;
+}
+
static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
const u8 *hf_vsdb)
{
@@ -3863,6 +4252,8 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
scdc->scrambling.low_rates = true;
}
}
+
+ drm_parse_ycbcr420_deep_color_info(connector, hf_vsdb);
}
static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
@@ -3981,6 +4372,8 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
drm_parse_hdmi_vsdb_video(connector, db);
if (cea_db_is_hdmi_forum_vsdb(db))
drm_parse_hdmi_forum_vsdb(connector, db);
+ if (cea_db_is_y420cmdb(db))
+ drm_parse_y420cmdb_bitmap(connector, db);
}
}
@@ -4215,6 +4608,13 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
quirks = edid_get_quirks(edid);
/*
+ * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks.
+ * To avoid multiple parsing of same block, lets parse that map
+ * from sink info, before parsing CEA modes.
+ */
+ drm_add_display_info(connector, edid);
+
+ /*
* EDID spec says modes should be preferred in this order:
* - preferred detailed mode
* - other detailed modes from base block
@@ -4241,8 +4641,6 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
- drm_add_display_info(connector, edid);
-
if (quirks & EDID_QUIRK_FORCE_6BPC)
connector->display_info.bpc = 6;
@@ -4334,12 +4732,14 @@ EXPORT_SYMBOL(drm_set_preferred_mode);
* data from a DRM display mode
* @frame: HDMI AVI infoframe
* @mode: DRM display mode
+ * @is_hdmi2_sink: Sink is HDMI 2.0 compliant
*
* Return: 0 on success or a negative error code on failure.
*/
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
- const struct drm_display_mode *mode)
+ const struct drm_display_mode *mode,
+ bool is_hdmi2_sink)
{
int err;
@@ -4355,6 +4755,28 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
frame->video_code = drm_match_cea_mode(mode);
+ /*
+ * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
+ * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
+ * have to make sure we dont break HDMI 1.4 sinks.
+ */
+ if (!is_hdmi2_sink && frame->video_code > 64)
+ frame->video_code = 0;
+
+ /*
+ * HDMI spec says if a mode is found in HDMI 1.4b 4K modes
+ * we should send its VIC in vendor infoframes, else send the
+ * VIC in AVI infoframes. Lets check if this mode is present in
+ * HDMI 1.4b 4K modes
+ */
+ if (frame->video_code) {
+ u8 vendor_if_vic = drm_match_hdmi_mode(mode);
+ bool is_s3d = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+ if (drm_valid_hdmi_vic(vendor_if_vic) && !is_s3d)
+ frame->video_code = 0;
+ }
+
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
/*
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 53f9bdf470d7..f2ee88363015 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -18,27 +18,17 @@
*/
#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
-#include <linux/dma-buf.h>
-#include <linux/dma-mapping.h>
#include <linux/module.h>
-#include <linux/reservation.h>
#define DEFAULT_FBDEFIO_DELAY_MS 50
-struct drm_fb_cma {
- struct drm_framebuffer fb;
- struct drm_gem_cma_object *obj[4];
-};
-
struct drm_fbdev_cma {
struct drm_fb_helper fb_helper;
- struct drm_fb_cma *fb;
const struct drm_framebuffer_funcs *fb_funcs;
};
@@ -90,69 +80,19 @@ static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
return container_of(helper, struct drm_fbdev_cma, fb_helper);
}
-static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
-{
- return container_of(fb, struct drm_fb_cma, fb);
-}
-
void drm_fb_cma_destroy(struct drm_framebuffer *fb)
{
- struct drm_fb_cma *fb_cma = to_fb_cma(fb);
- int i;
-
- for (i = 0; i < 4; i++) {
- if (fb_cma->obj[i])
- drm_gem_object_put_unlocked(&fb_cma->obj[i]->base);
- }
-
- drm_framebuffer_cleanup(fb);
- kfree(fb_cma);
+ drm_gem_fb_destroy(fb);
}
EXPORT_SYMBOL(drm_fb_cma_destroy);
int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv, unsigned int *handle)
{
- struct drm_fb_cma *fb_cma = to_fb_cma(fb);
-
- return drm_gem_handle_create(file_priv,
- &fb_cma->obj[0]->base, handle);
+ return drm_gem_fb_create_handle(fb, file_priv, handle);
}
EXPORT_SYMBOL(drm_fb_cma_create_handle);
-static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
- .destroy = drm_fb_cma_destroy,
- .create_handle = drm_fb_cma_create_handle,
-};
-
-static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_cma_object **obj,
- unsigned int num_planes, const struct drm_framebuffer_funcs *funcs)
-{
- struct drm_fb_cma *fb_cma;
- int ret;
- int i;
-
- fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
- if (!fb_cma)
- return ERR_PTR(-ENOMEM);
-
- drm_helper_mode_fill_fb_struct(dev, &fb_cma->fb, mode_cmd);
-
- for (i = 0; i < num_planes; i++)
- fb_cma->obj[i] = obj[i];
-
- ret = drm_framebuffer_init(dev, &fb_cma->fb, funcs);
- if (ret) {
- dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret);
- kfree(fb_cma);
- return ERR_PTR(ret);
- }
-
- return fb_cma;
-}
-
/**
* drm_fb_cma_create_with_funcs() - helper function for the
* &drm_mode_config_funcs.fb_create
@@ -170,53 +110,7 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
- const struct drm_format_info *info;
- struct drm_fb_cma *fb_cma;
- struct drm_gem_cma_object *objs[4];
- struct drm_gem_object *obj;
- int ret;
- int i;
-
- info = drm_get_format_info(dev, mode_cmd);
- if (!info)
- return ERR_PTR(-EINVAL);
-
- for (i = 0; i < info->num_planes; i++) {
- unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
- unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
- unsigned int min_size;
-
- obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
- if (!obj) {
- dev_err(dev->dev, "Failed to lookup GEM object\n");
- ret = -ENOENT;
- goto err_gem_object_put;
- }
-
- min_size = (height - 1) * mode_cmd->pitches[i]
- + width * info->cpp[i]
- + mode_cmd->offsets[i];
-
- if (obj->size < min_size) {
- drm_gem_object_put_unlocked(obj);
- ret = -EINVAL;
- goto err_gem_object_put;
- }
- objs[i] = to_drm_gem_cma_obj(obj);
- }
-
- fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, funcs);
- if (IS_ERR(fb_cma)) {
- ret = PTR_ERR(fb_cma);
- goto err_gem_object_put;
- }
-
- return &fb_cma->fb;
-
-err_gem_object_put:
- for (i--; i >= 0; i--)
- drm_gem_object_put_unlocked(&objs[i]->base);
- return ERR_PTR(ret);
+ return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd, funcs);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
@@ -233,8 +127,7 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
{
- return drm_fb_cma_create_with_funcs(dev, file_priv, mode_cmd,
- &drm_fb_cma_funcs);
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_create);
@@ -250,12 +143,13 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create);
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane)
{
- struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+ struct drm_gem_object *gem;
- if (plane >= 4)
+ gem = drm_gem_fb_get_obj(fb, plane);
+ if (!gem)
return NULL;
- return fb_cma->obj[plane];
+ return to_drm_gem_cma_obj(gem);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
@@ -272,13 +166,14 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
struct drm_plane_state *state,
unsigned int plane)
{
- struct drm_fb_cma *fb_cma = to_fb_cma(fb);
+ struct drm_gem_cma_object *obj;
dma_addr_t paddr;
- if (plane >= 4)
+ obj = drm_fb_cma_get_gem_obj(fb, plane);
+ if (!obj)
return 0;
- paddr = fb_cma->obj[plane]->paddr + fb->offsets[plane];
+ paddr = obj->paddr + fb->offsets[plane];
paddr += fb->format->cpp[plane] * (state->src_x >> 16);
paddr += fb->pitches[plane] * (state->src_y >> 16);
@@ -302,26 +197,13 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
int drm_fb_cma_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct dma_buf *dma_buf;
- struct dma_fence *fence;
-
- if ((plane->state->fb == state->fb) || !state->fb)
- return 0;
-
- dma_buf = drm_fb_cma_get_gem_obj(state->fb, 0)->base.dma_buf;
- if (dma_buf) {
- fence = reservation_object_get_excl_rcu(dma_buf->resv);
- drm_atomic_set_fence_for_plane(state, fence);
- }
-
- return 0;
+ return drm_gem_fb_prepare_fb(plane, state);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);
#ifdef CONFIG_DEBUG_FS
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
- struct drm_fb_cma *fb_cma = to_fb_cma(fb);
int i;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
@@ -330,7 +212,7 @@ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
for (i = 0; i < fb->format->num_planes; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
- drm_gem_cma_describe(fb_cma->obj[i], m);
+ drm_gem_cma_describe(drm_fb_cma_get_gem_obj(fb, i), m);
}
}
@@ -431,7 +313,6 @@ drm_fbdev_cma_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_device *dev = helper->dev;
struct drm_gem_cma_object *obj;
struct drm_framebuffer *fb;
@@ -446,14 +327,7 @@ drm_fbdev_cma_create(struct drm_fb_helper *helper,
sizes->surface_bpp);
bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = sizes->surface_width * sizes->surface_height * bytes_per_pixel;
obj = drm_gem_cma_create(dev, size);
if (IS_ERR(obj))
return -ENOMEM;
@@ -464,15 +338,14 @@ drm_fbdev_cma_create(struct drm_fb_helper *helper,
goto err_gem_free_object;
}
- fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1,
- fbdev_cma->fb_funcs);
- if (IS_ERR(fbdev_cma->fb)) {
+ fb = drm_gem_fbdev_fb_create(dev, sizes, 0, &obj->base,
+ fbdev_cma->fb_funcs);
+ if (IS_ERR(fb)) {
dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
- ret = PTR_ERR(fbdev_cma->fb);
+ ret = PTR_ERR(fb);
goto err_fb_info_destroy;
}
- fb = &fbdev_cma->fb->fb;
helper->fb = fb;
fbi->par = helper;
@@ -500,7 +373,7 @@ drm_fbdev_cma_create(struct drm_fb_helper *helper,
return 0;
err_cma_destroy:
- drm_framebuffer_remove(&fbdev_cma->fb->fb);
+ drm_framebuffer_remove(fb);
err_fb_info_destroy:
drm_fb_helper_fini(helper);
err_gem_free_object:
@@ -570,6 +443,11 @@ err_free:
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
+static const struct drm_framebuffer_funcs drm_fb_cma_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+};
+
/**
* drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
* @dev: DRM device
@@ -597,8 +475,8 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
if (fbdev_cma->fb_helper.fbdev)
drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
- if (fbdev_cma->fb)
- drm_framebuffer_remove(&fbdev_cma->fb->fb);
+ if (fbdev_cma->fb_helper.fb)
+ drm_framebuffer_remove(fbdev_cma->fb_helper.fb);
drm_fb_helper_fini(&fbdev_cma->fb_helper);
kfree(fbdev_cma);
@@ -640,7 +518,7 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
* Calls drm_fb_helper_set_suspend, which is a wrapper around
* fb_set_suspend implemented by fbdev core.
*/
-void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state)
+void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state)
{
if (fbdev_cma)
drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
@@ -657,7 +535,7 @@ EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
* fb_set_suspend implemented by fbdev core.
*/
void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
- int state)
+ bool state)
{
if (fbdev_cma)
drm_fb_helper_set_suspend_unlocked(&fbdev_cma->fb_helper,
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 574af01d3ce9..1b8f013ffa65 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -106,11 +106,11 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
*/
#define drm_fb_helper_for_each_connector(fbh, i__) \
- for (({ lockdep_assert_held(&(fbh)->dev->mode_config.mutex); }), \
+ for (({ lockdep_assert_held(&(fbh)->lock); }), \
i__ = 0; i__ < (fbh)->connector_count; i__++)
-int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
+static int __drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
{
struct drm_fb_helper_connector *fb_conn;
struct drm_fb_helper_connector **temp;
@@ -119,7 +119,7 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
if (!drm_fbdev_emulation)
return 0;
- WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
+ lockdep_assert_held(&fb_helper->lock);
count = fb_helper->connector_count + 1;
@@ -141,8 +141,21 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
drm_connector_get(connector);
fb_conn->connector = connector;
fb_helper->connector_info[fb_helper->connector_count++] = fb_conn;
+
return 0;
}
+
+int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
+{
+ int err;
+
+ mutex_lock(&fb_helper->lock);
+ err = __drm_fb_helper_add_one_connector(fb_helper, connector);
+ mutex_unlock(&fb_helper->lock);
+
+ return err;
+}
EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
/**
@@ -169,11 +182,10 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
if (!drm_fbdev_emulation)
return 0;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&fb_helper->lock);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
- ret = drm_fb_helper_add_one_connector(fb_helper, connector);
-
+ ret = __drm_fb_helper_add_one_connector(fb_helper, connector);
if (ret)
goto fail;
}
@@ -192,14 +204,14 @@ fail:
fb_helper->connector_count = 0;
out:
drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&fb_helper->lock);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
-int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
+static int __drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
{
struct drm_fb_helper_connector *fb_helper_connector;
int i, j;
@@ -207,9 +219,9 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
if (!drm_fbdev_emulation)
return 0;
- WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
+ lockdep_assert_held(&fb_helper->lock);
- for (i = 0; i < fb_helper->connector_count; i++) {
+ drm_fb_helper_for_each_connector(fb_helper, i) {
if (fb_helper->connector_info[i]->connector == connector)
break;
}
@@ -227,23 +239,19 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
return 0;
}
-EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
-static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
+int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
{
- uint16_t *r_base, *g_base, *b_base;
- int i;
-
- if (helper->funcs->gamma_get == NULL)
- return;
+ int err;
- r_base = crtc->gamma_store;
- g_base = r_base + crtc->gamma_size;
- b_base = g_base + crtc->gamma_size;
+ mutex_lock(&fb_helper->lock);
+ err = __drm_fb_helper_remove_one_connector(fb_helper, connector);
+ mutex_unlock(&fb_helper->lock);
- for (i = 0; i < crtc->gamma_size; i++)
- helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
+ return err;
}
+EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
{
@@ -285,7 +293,6 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
if (drm_drv_uses_atomic_modeset(mode_set->crtc->dev))
continue;
- drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
funcs->mode_set_base_atomic(mode_set->crtc,
mode_set->fb,
mode_set->x,
@@ -298,20 +305,6 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_debug_enter);
-/* Find the real fb for a given fb helper CRTC */
-static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_crtc *c;
-
- drm_for_each_crtc(c, dev) {
- if (crtc->base.id == c->base.id)
- return c->primary->fb;
- }
-
- return NULL;
-}
-
/**
* drm_fb_helper_debug_leave - implementation for &fb_ops.fb_debug_leave
* @info: fbdev registered by the helper
@@ -328,8 +321,11 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
crtc = mode_set->crtc;
+ if (drm_drv_uses_atomic_modeset(crtc->dev))
+ continue;
+
funcs = crtc->helper_private;
- fb = drm_mode_config_fb(crtc);
+ fb = crtc->primary->fb;
if (!crtc->enabled)
continue;
@@ -342,9 +338,6 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
if (funcs->mode_set_base_atomic == NULL)
continue;
- if (drm_drv_uses_atomic_modeset(crtc->dev))
- continue;
-
drm_fb_helper_restore_lut_atomic(mode_set->crtc);
funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
crtc->y, LEAVE_ATOMIC_MODE_SET);
@@ -354,19 +347,24 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
-static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
+static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool active)
{
struct drm_device *dev = fb_helper->dev;
struct drm_plane *plane;
struct drm_atomic_state *state;
int i, ret;
unsigned int plane_mask;
+ struct drm_modeset_acquire_ctx ctx;
+
+ drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(dev);
- if (!state)
- return -ENOMEM;
+ if (!state) {
+ ret = -ENOMEM;
+ goto out_ctx;
+ }
- state->acquire_ctx = dev->mode_config.acquire_ctx;
+ state->acquire_ctx = &ctx;
retry:
plane_mask = 0;
drm_for_each_plane(plane, dev) {
@@ -375,7 +373,7 @@ retry:
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
- goto fail;
+ goto out_state;
}
plane_state->rotation = DRM_MODE_ROTATE_0;
@@ -389,7 +387,7 @@ retry:
ret = __drm_atomic_helper_disable_plane(plane, plane_state);
if (ret != 0)
- goto fail;
+ goto out_state;
}
for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -397,23 +395,38 @@ retry:
ret = __drm_atomic_helper_set_config(mode_set, state);
if (ret != 0)
- goto fail;
+ goto out_state;
+
+ /*
+ * __drm_atomic_helper_set_config() sets active when a
+ * mode is set, unconditionally clear it if we force DPMS off
+ */
+ if (!active) {
+ struct drm_crtc *crtc = mode_set->crtc;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ crtc_state->active = false;
+ }
}
ret = drm_atomic_commit(state);
-fail:
+out_state:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
if (ret == -EDEADLK)
goto backoff;
drm_atomic_state_put(state);
+out_ctx:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
return ret;
backoff:
drm_atomic_state_clear(state);
- drm_atomic_legacy_backoff(state);
+ drm_modeset_backoff(&ctx);
goto retry;
}
@@ -422,8 +435,9 @@ static int restore_fbdev_mode_legacy(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_plane *plane;
- int i;
+ int i, ret = 0;
+ drm_modeset_lock_all(fb_helper->dev);
drm_for_each_plane(plane, dev) {
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
@@ -437,34 +451,33 @@ static int restore_fbdev_mode_legacy(struct drm_fb_helper *fb_helper)
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
struct drm_crtc *crtc = mode_set->crtc;
- int ret;
if (crtc->funcs->cursor_set2) {
ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
if (ret)
- return ret;
+ goto out;
} else if (crtc->funcs->cursor_set) {
ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
if (ret)
- return ret;
+ goto out;
}
ret = drm_mode_set_config_internal(mode_set);
if (ret)
- return ret;
+ goto out;
}
+out:
+ drm_modeset_unlock_all(fb_helper->dev);
- return 0;
+ return ret;
}
static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
- drm_warn_on_modeset_not_all_locked(dev);
-
if (drm_drv_uses_atomic_modeset(dev))
- return restore_fbdev_mode_atomic(fb_helper);
+ return restore_fbdev_mode_atomic(fb_helper, true);
else
return restore_fbdev_mode_legacy(fb_helper);
}
@@ -482,23 +495,26 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
*/
int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
{
- struct drm_device *dev = fb_helper->dev;
bool do_delayed;
int ret;
if (!drm_fbdev_emulation)
return -ENODEV;
- drm_modeset_lock_all(dev);
+ if (READ_ONCE(fb_helper->deferred_setup))
+ return 0;
+
+ mutex_lock(&fb_helper->lock);
ret = restore_fbdev_mode(fb_helper);
do_delayed = fb_helper->delayed_hotplug;
if (do_delayed)
fb_helper->delayed_hotplug = false;
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&fb_helper->lock);
if (do_delayed)
drm_fb_helper_hotplug_event(fb_helper);
+
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
@@ -517,10 +533,12 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
return false;
drm_for_each_crtc(crtc, dev) {
+ drm_modeset_lock(&crtc->mutex, NULL);
if (crtc->primary->fb)
crtcs_bound++;
if (crtc->primary->fb == fb_helper->fb)
bound++;
+ drm_modeset_unlock(&crtc->mutex);
}
if (bound < crtcs_bound)
@@ -548,11 +566,11 @@ static bool drm_fb_helper_force_kernel_mode(void)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
continue;
- drm_modeset_lock_all(dev);
+ mutex_lock(&helper->lock);
ret = restore_fbdev_mode(helper);
if (ret)
error = true;
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&helper->lock);
}
return error;
}
@@ -581,23 +599,14 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
-static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
+static void dpms_legacy(struct drm_fb_helper *fb_helper, int dpms_mode)
{
- struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, j;
- /*
- * For each CRTC in this fb, turn the connectors on/off.
- */
drm_modeset_lock_all(dev);
- if (!drm_fb_helper_is_bound(fb_helper)) {
- drm_modeset_unlock_all(dev);
- return;
- }
-
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
@@ -615,6 +624,26 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
drm_modeset_unlock_all(dev);
}
+static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+
+ /*
+ * For each CRTC in this fb, turn the connectors on/off.
+ */
+ mutex_lock(&fb_helper->lock);
+ if (!drm_fb_helper_is_bound(fb_helper)) {
+ mutex_unlock(&fb_helper->lock);
+ return;
+ }
+
+ if (drm_drv_uses_atomic_modeset(fb_helper->dev))
+ restore_fbdev_mode_atomic(fb_helper, dpms_mode == DRM_MODE_DPMS_ON);
+ else
+ dpms_legacy(fb_helper, dpms_mode);
+ mutex_unlock(&fb_helper->lock);
+}
+
/**
* drm_fb_helper_blank - implementation for &fb_ops.fb_blank
* @blank: desired blanking state
@@ -734,6 +763,7 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker);
INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work);
helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0;
+ mutex_init(&helper->lock);
helper->funcs = funcs;
helper->dev = dev;
}
@@ -899,6 +929,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
mutex_unlock(&kernel_fb_helper_lock);
+ mutex_destroy(&fb_helper->lock);
drm_fb_helper_crtc_free(fb_helper);
}
@@ -1167,22 +1198,23 @@ void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
}
EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked);
-static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, u16 regno, struct fb_info *info)
+static int setcmap_pseudo_palette(struct fb_cmap *cmap, struct fb_info *info)
{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
+ u32 *palette = (u32 *)info->pseudo_palette;
+ int i;
- if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
- u32 *palette;
+ if (cmap->start + cmap->len > 16)
+ return -EINVAL;
+
+ for (i = 0; i < cmap->len; ++i) {
+ u16 red = cmap->red[i];
+ u16 green = cmap->green[i];
+ u16 blue = cmap->blue[i];
u32 value;
- /* place color in psuedopalette */
- if (regno > 16)
- return -EINVAL;
- palette = (u32 *)info->pseudo_palette;
- red >>= (16 - info->var.red.length);
- green >>= (16 - info->var.green.length);
- blue >>= (16 - info->var.blue.length);
+
+ red >>= 16 - info->var.red.length;
+ green >>= 16 - info->var.green.length;
+ blue >>= 16 - info->var.blue.length;
value = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
@@ -1192,23 +1224,169 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
mask <<= info->var.transp.offset;
value |= mask;
}
- palette[regno] = value;
- return 0;
+ palette[cmap->start + i] = value;
}
- /*
- * The driver really shouldn't advertise pseudo/directcolor
- * visuals if it can't deal with the palette.
- */
- if (WARN_ON(!fb_helper->funcs->gamma_set ||
- !fb_helper->funcs->gamma_get))
- return -EINVAL;
+ return 0;
+}
+
+static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_crtc *crtc;
+ u16 *r, *g, *b;
+ int i, ret = 0;
- WARN_ON(fb->format->cpp[0] != 1);
+ drm_modeset_lock_all(fb_helper->dev);
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ if (!crtc->funcs->gamma_set || !crtc->gamma_size)
+ return -EINVAL;
- fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
+ if (cmap->start + cmap->len > crtc->gamma_size)
+ return -EINVAL;
- return 0;
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+
+ memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r));
+ memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g));
+ memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b));
+
+ ret = crtc->funcs->gamma_set(crtc, r, g, b,
+ crtc->gamma_size, NULL);
+ if (ret)
+ return ret;
+ }
+ drm_modeset_unlock_all(fb_helper->dev);
+
+ return ret;
+}
+
+static struct drm_property_blob *setcmap_new_gamma_lut(struct drm_crtc *crtc,
+ struct fb_cmap *cmap)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_property_blob *gamma_lut;
+ struct drm_color_lut *lut;
+ int size = crtc->gamma_size;
+ int i;
+
+ if (!size || cmap->start + cmap->len > size)
+ return ERR_PTR(-EINVAL);
+
+ gamma_lut = drm_property_create_blob(dev, sizeof(*lut) * size, NULL);
+ if (IS_ERR(gamma_lut))
+ return gamma_lut;
+
+ lut = (struct drm_color_lut *)gamma_lut->data;
+ if (cmap->start || cmap->len != size) {
+ u16 *r = crtc->gamma_store;
+ u16 *g = r + crtc->gamma_size;
+ u16 *b = g + crtc->gamma_size;
+
+ for (i = 0; i < cmap->start; i++) {
+ lut[i].red = r[i];
+ lut[i].green = g[i];
+ lut[i].blue = b[i];
+ }
+ for (i = cmap->start + cmap->len; i < size; i++) {
+ lut[i].red = r[i];
+ lut[i].green = g[i];
+ lut[i].blue = b[i];
+ }
+ }
+
+ for (i = 0; i < cmap->len; i++) {
+ lut[cmap->start + i].red = cmap->red[i];
+ lut[cmap->start + i].green = cmap->green[i];
+ lut[cmap->start + i].blue = cmap->blue[i];
+ }
+
+ return gamma_lut;
+}
+
+static int setcmap_atomic(struct fb_cmap *cmap, struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_property_blob *gamma_lut = NULL;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_crtc_state *crtc_state;
+ struct drm_atomic_state *state;
+ struct drm_crtc *crtc;
+ u16 *r, *g, *b;
+ int i, ret = 0;
+ bool replaced;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out_ctx;
+ }
+
+ state->acquire_ctx = &ctx;
+retry:
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+
+ if (!gamma_lut)
+ gamma_lut = setcmap_new_gamma_lut(crtc, cmap);
+ if (IS_ERR(gamma_lut)) {
+ ret = PTR_ERR(gamma_lut);
+ gamma_lut = NULL;
+ goto out_state;
+ }
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto out_state;
+ }
+
+ replaced = drm_property_replace_blob(&crtc_state->degamma_lut,
+ NULL);
+ replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL);
+ replaced |= drm_property_replace_blob(&crtc_state->gamma_lut,
+ gamma_lut);
+ crtc_state->color_mgmt_changed |= replaced;
+ }
+
+ ret = drm_atomic_commit(state);
+ if (ret)
+ goto out_state;
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ crtc = fb_helper->crtc_info[i].mode_set.crtc;
+
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+
+ memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r));
+ memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g));
+ memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b));
+ }
+
+out_state:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_property_blob_put(gamma_lut);
+ drm_atomic_state_put(state);
+out_ctx:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+
+backoff:
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
}
/**
@@ -1219,52 +1397,29 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- const struct drm_crtc_helper_funcs *crtc_funcs;
- u16 *red, *green, *blue, *transp;
- struct drm_crtc *crtc;
- int i, j, rc = 0;
- int start;
+ int ret;
if (oops_in_progress)
return -EBUSY;
- drm_modeset_lock_all(dev);
+ mutex_lock(&fb_helper->lock);
+
if (!drm_fb_helper_is_bound(fb_helper)) {
- drm_modeset_unlock_all(dev);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
- for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
- crtc_funcs = crtc->helper_private;
-
- red = cmap->red;
- green = cmap->green;
- blue = cmap->blue;
- transp = cmap->transp;
- start = cmap->start;
-
- for (j = 0; j < cmap->len; j++) {
- u16 hred, hgreen, hblue, htransp = 0xffff;
-
- hred = *red++;
- hgreen = *green++;
- hblue = *blue++;
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR)
+ ret = setcmap_pseudo_palette(cmap, info);
+ else if (drm_drv_uses_atomic_modeset(fb_helper->dev))
+ ret = setcmap_atomic(cmap, info);
+ else
+ ret = setcmap_legacy(cmap, info);
- if (transp)
- htransp = *transp++;
+out:
+ mutex_unlock(&fb_helper->lock);
- rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
- if (rc)
- goto out;
- }
- if (crtc_funcs->load_lut)
- crtc_funcs->load_lut(crtc);
- }
- out:
- drm_modeset_unlock_all(dev);
- return rc;
+ return ret;
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
@@ -1281,12 +1436,11 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
struct drm_mode_set *mode_set;
struct drm_crtc *crtc;
int ret = 0;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&fb_helper->lock);
if (!drm_fb_helper_is_bound(fb_helper)) {
ret = -EBUSY;
goto unlock;
@@ -1331,7 +1485,7 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
}
unlock:
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&fb_helper->lock);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_ioctl);
@@ -1463,61 +1617,36 @@ int drm_fb_helper_set_par(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
-static int pan_display_atomic(struct fb_var_screeninfo *var,
- struct fb_info *info)
+static void pan_set(struct drm_fb_helper *fb_helper, int x, int y)
{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_atomic_state *state;
- struct drm_plane *plane;
- int i, ret;
- unsigned int plane_mask;
-
- state = drm_atomic_state_alloc(dev);
- if (!state)
- return -ENOMEM;
+ int i;
- state->acquire_ctx = dev->mode_config.acquire_ctx;
-retry:
- plane_mask = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set;
mode_set = &fb_helper->crtc_info[i].mode_set;
- mode_set->x = var->xoffset;
- mode_set->y = var->yoffset;
-
- ret = __drm_atomic_helper_set_config(mode_set, state);
- if (ret != 0)
- goto fail;
-
- plane = mode_set->crtc->primary;
- plane_mask |= (1 << drm_plane_index(plane));
- plane->old_fb = plane->fb;
+ mode_set->x = x;
+ mode_set->y = y;
}
+}
- ret = drm_atomic_commit(state);
- if (ret != 0)
- goto fail;
+static int pan_display_atomic(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ int ret;
- info->var.xoffset = var->xoffset;
- info->var.yoffset = var->yoffset;
+ pan_set(fb_helper, var->xoffset, var->yoffset);
-fail:
- drm_atomic_clean_old_fb(dev, plane_mask, ret);
+ ret = restore_fbdev_mode_atomic(fb_helper, true);
+ if (!ret) {
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ } else
+ pan_set(fb_helper, info->var.xoffset, info->var.yoffset);
- if (ret == -EDEADLK)
- goto backoff;
-
- drm_atomic_state_put(state);
return ret;
-
-backoff:
- drm_atomic_state_clear(state);
- drm_atomic_legacy_backoff(state);
-
- goto retry;
}
static int pan_display_legacy(struct fb_var_screeninfo *var,
@@ -1528,6 +1657,7 @@ static int pan_display_legacy(struct fb_var_screeninfo *var,
int ret = 0;
int i;
+ drm_modeset_lock_all(fb_helper->dev);
for (i = 0; i < fb_helper->crtc_count; i++) {
modeset = &fb_helper->crtc_info[i].mode_set;
@@ -1542,6 +1672,7 @@ static int pan_display_legacy(struct fb_var_screeninfo *var,
}
}
}
+ drm_modeset_unlock_all(fb_helper->dev);
return ret;
}
@@ -1561,9 +1692,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
if (oops_in_progress)
return -EBUSY;
- drm_modeset_lock_all(dev);
+ mutex_lock(&fb_helper->lock);
if (!drm_fb_helper_is_bound(fb_helper)) {
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&fb_helper->lock);
return -EBUSY;
}
@@ -1571,7 +1702,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
ret = pan_display_atomic(var, info);
else
ret = pan_display_legacy(var, info);
- drm_modeset_unlock_all(dev);
+ mutex_unlock(&fb_helper->lock);
return ret;
}
@@ -1579,8 +1710,7 @@ EXPORT_SYMBOL(drm_fb_helper_pan_display);
/*
* Allocates the backing storage and sets up the fbdev info structure through
- * the ->fb_probe callback and then registers the fbdev and sets up the panic
- * notifier.
+ * the ->fb_probe callback.
*/
static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
int preferred_bpp)
@@ -1678,13 +1808,8 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
- /*
- * hmm everyone went away - assume VGA cable just fell out
- * and will come back later.
- */
- DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
- sizes.fb_width = sizes.surface_width = 1024;
- sizes.fb_height = sizes.surface_height = 768;
+ DRM_INFO("Cannot find any crtc or sizes\n");
+ return -EAGAIN;
}
/* Handle our overallocation */
@@ -1696,17 +1821,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (ret < 0)
return ret;
- /*
- * Set the fb pointer - usually drm_setup_crtcs does this for hotplug
- * events, but at init time drm_setup_crtcs needs to be called before
- * the fb is allocated (since we need to figure out the desired size of
- * the fb before we can allocate it ...). Hence we need to fix things up
- * here again.
- */
- for (i = 0; i < fb_helper->crtc_count; i++)
- if (fb_helper->crtc_info[i].mode_set.num_connectors)
- fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
-
return 0;
}
@@ -1768,8 +1882,6 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
- info->var.height = -1;
- info->var.width = -1;
switch (fb->format->depth) {
case 8:
@@ -1831,12 +1943,11 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
EXPORT_SYMBOL(drm_fb_helper_fill_var);
static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
- uint32_t maxX,
- uint32_t maxY)
+ uint32_t maxX,
+ uint32_t maxY)
{
struct drm_connector *connector;
- int count = 0;
- int i;
+ int i, count = 0;
drm_fb_helper_for_each_connector(fb_helper, i) {
connector = fb_helper->connector_info[i]->connector;
@@ -2234,11 +2345,8 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
int i;
DRM_DEBUG_KMS("\n");
- if (drm_fb_helper_probe_connector_modes(fb_helper, width, height) == 0)
- DRM_DEBUG_KMS("No connectors reported connected with modes\n");
-
/* prevent concurrent modification of connector_count by hotplug */
- lockdep_assert_held(&fb_helper->dev->mode_config.mutex);
+ lockdep_assert_held(&fb_helper->lock);
crtcs = kcalloc(fb_helper->connector_count,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
@@ -2253,6 +2361,9 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
goto out;
}
+ mutex_lock(&fb_helper->dev->mode_config.mutex);
+ if (drm_fb_helper_probe_connector_modes(fb_helper, width, height) == 0)
+ DRM_DEBUG_KMS("No connectors reported connected with modes\n");
drm_enable_connectors(fb_helper, enabled);
if (!(fb_helper->funcs->initial_config &&
@@ -2274,6 +2385,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
}
+ mutex_unlock(&fb_helper->dev->mode_config.mutex);
/* need to set the modesets up here for use later */
/* fill out the connector<->crtc mappings into the modesets */
@@ -2285,9 +2397,9 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
struct drm_display_mode *mode = modes[i];
struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
struct drm_fb_offset *offset = &offsets[i];
- struct drm_mode_set *modeset = &fb_crtc->mode_set;
if (mode && fb_crtc) {
+ struct drm_mode_set *modeset = &fb_crtc->mode_set;
struct drm_connector *connector =
fb_helper->connector_info[i]->connector;
@@ -2301,7 +2413,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
fb_crtc->desired_mode);
drm_connector_get(connector);
modeset->connectors[modeset->num_connectors++] = connector;
- modeset->fb = fb_helper->fb;
modeset->x = offset->x;
modeset->y = offset->y;
}
@@ -2313,6 +2424,91 @@ out:
kfree(enabled);
}
+/*
+ * This is a continuation of drm_setup_crtcs() that sets up anything related
+ * to the framebuffer. During initialization, drm_setup_crtcs() is called before
+ * the framebuffer has been allocated (fb_helper->fb and fb_helper->fbdev).
+ * So, any setup that touches those fields needs to be done here instead of in
+ * drm_setup_crtcs().
+ */
+static void drm_setup_crtcs_fb(struct drm_fb_helper *fb_helper)
+{
+ struct fb_info *info = fb_helper->fbdev;
+ int i;
+
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ if (fb_helper->crtc_info[i].mode_set.num_connectors)
+ fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
+
+ mutex_lock(&fb_helper->dev->mode_config.mutex);
+ drm_fb_helper_for_each_connector(fb_helper, i) {
+ struct drm_connector *connector =
+ fb_helper->connector_info[i]->connector;
+
+ /* use first connected connector for the physical dimensions */
+ if (connector->status == connector_status_connected) {
+ info->var.width = connector->display_info.width_mm;
+ info->var.height = connector->display_info.height_mm;
+ break;
+ }
+ }
+ mutex_unlock(&fb_helper->dev->mode_config.mutex);
+}
+
+/* Note: Drops fb_helper->lock before returning. */
+static int
+__drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
+ int bpp_sel)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct fb_info *info;
+ unsigned int width, height;
+ int ret;
+
+ width = dev->mode_config.max_width;
+ height = dev->mode_config.max_height;
+
+ drm_setup_crtcs(fb_helper, width, height);
+ ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+ if (ret < 0) {
+ if (ret == -EAGAIN) {
+ fb_helper->preferred_bpp = bpp_sel;
+ fb_helper->deferred_setup = true;
+ ret = 0;
+ }
+ mutex_unlock(&fb_helper->lock);
+
+ return ret;
+ }
+ drm_setup_crtcs_fb(fb_helper);
+
+ fb_helper->deferred_setup = false;
+
+ info = fb_helper->fbdev;
+ info->var.pixclock = 0;
+
+ /* Need to drop locks to avoid recursive deadlock in
+ * register_framebuffer. This is ok because the only thing left to do is
+ * register the fbdev emulation instance in kernel_fb_helper_list. */
+ mutex_unlock(&fb_helper->lock);
+
+ ret = register_framebuffer(info);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev->dev, "fb%d: %s frame buffer device\n",
+ info->node, info->fix.id);
+
+ mutex_lock(&kernel_fb_helper_lock);
+ if (list_empty(&kernel_fb_helper_list))
+ register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+
+ list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
+ mutex_unlock(&kernel_fb_helper_lock);
+
+ return 0;
+}
+
/**
* drm_fb_helper_initial_config - setup a sane initial connector configuration
* @fb_helper: fb_helper device struct
@@ -2357,39 +2553,15 @@ out:
*/
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
{
- struct drm_device *dev = fb_helper->dev;
- struct fb_info *info;
int ret;
if (!drm_fbdev_emulation)
return 0;
- mutex_lock(&dev->mode_config.mutex);
- drm_setup_crtcs(fb_helper,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
- ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
- mutex_unlock(&dev->mode_config.mutex);
- if (ret)
- return ret;
+ mutex_lock(&fb_helper->lock);
+ ret = __drm_fb_helper_initial_config_and_unlock(fb_helper, bpp_sel);
- info = fb_helper->fbdev;
- info->var.pixclock = 0;
- ret = register_framebuffer(info);
- if (ret < 0)
- return ret;
-
- dev_info(dev->dev, "fb%d: %s frame buffer device\n",
- info->node, info->fix.id);
-
- mutex_lock(&kernel_fb_helper_lock);
- if (list_empty(&kernel_fb_helper_list))
- register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
-
- list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
- mutex_unlock(&kernel_fb_helper_lock);
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL(drm_fb_helper_initial_config);
@@ -2416,22 +2588,29 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
*/
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
{
- struct drm_device *dev = fb_helper->dev;
+ int err = 0;
if (!drm_fbdev_emulation)
return 0;
- mutex_lock(&dev->mode_config.mutex);
+ mutex_lock(&fb_helper->lock);
+ if (fb_helper->deferred_setup) {
+ err = __drm_fb_helper_initial_config_and_unlock(fb_helper,
+ fb_helper->preferred_bpp);
+ return err;
+ }
+
if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
- mutex_unlock(&dev->mode_config.mutex);
- return 0;
+ mutex_unlock(&fb_helper->lock);
+ return err;
}
+
DRM_DEBUG_KMS("\n");
drm_setup_crtcs(fb_helper, fb_helper->fb->width, fb_helper->fb->height);
-
- mutex_unlock(&dev->mode_config.mutex);
+ drm_setup_crtcs_fb(fb_helper);
+ mutex_unlock(&fb_helper->lock);
drm_fb_helper_set_par(fb_helper->fbdev);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 84f3a242cc39..b3c6e997ccdb 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -75,7 +75,7 @@ DEFINE_MUTEX(drm_global_mutex);
* for drivers which use the CMA GEM helpers it's drm_gem_cma_mmap().
*
* No other file operations are supported by the DRM userspace API. Overall the
- * following is an example #file_operations structure::
+ * following is an example &file_operations structure::
*
* static const example_drm_fops = {
* .owner = THIS_MODULE,
@@ -92,6 +92,11 @@ DEFINE_MUTEX(drm_global_mutex);
* For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for
* CMA based drivers there is the DEFINE_DRM_GEM_CMA_FOPS() macro to make this
* simpler.
+ *
+ * The driver's &file_operations must be stored in &drm_driver.fops.
+ *
+ * For driver-private IOCTL handling see the more detailed discussion in
+ * :ref:`IOCTL support in the userland interfaces chapter<drm_driver_ioctl>`.
*/
static int drm_open_helper(struct file *filp, struct drm_minor *minor);
@@ -431,7 +436,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (!--dev->open_count) {
drm_lastclose(dev);
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
drm_put_dev(dev);
}
mutex_unlock(&drm_global_mutex);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index b3ef4f1c2630..af279844d7ce 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -817,7 +817,7 @@ retry:
plane->old_fb = plane->fb;
}
- for_each_connector_in_state(state, conn, conn_state, i) {
+ for_each_new_connector_in_state(state, conn, conn_state, i) {
ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
if (ret)
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8dc11064253d..c55f338e380b 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -36,6 +36,7 @@
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
+#include <linux/mem_encrypt.h>
#include <drm/drmP.h>
#include <drm/drm_vma_manager.h>
#include <drm/drm_gem.h>
@@ -255,13 +256,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, file_priv);
+
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv);
- if (dev->driver->gem_close_object)
- dev->driver->gem_close_object(obj, file_priv);
-
drm_gem_object_handle_put_unlocked(obj);
return 0;
@@ -311,6 +312,41 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
EXPORT_SYMBOL(drm_gem_handle_delete);
/**
+ * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
+ * @file: drm file-private structure containing the gem object
+ * @dev: corresponding drm_device
+ * @handle: gem object handle
+ * @offset: return location for the fake mmap offset
+ *
+ * This implements the &drm_driver.dumb_map_offset kms driver callback for
+ * drivers which use gem to manage their backing storage.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ u32 handle, u64 *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+out:
+ drm_gem_object_put_unlocked(obj);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
+
+/**
* drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
* @file: drm file-private structure to remove the dumb handle from
* @dev: corresponding drm_device
@@ -826,13 +862,15 @@ drm_gem_object_put_unlocked(struct drm_gem_object *obj)
return;
dev = obj->dev;
- might_lock(&dev->struct_mutex);
- if (dev->driver->gem_free_object_unlocked)
+ if (dev->driver->gem_free_object_unlocked) {
kref_put(&obj->refcount, drm_gem_object_free);
- else if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
+ } else {
+ might_lock(&dev->struct_mutex);
+ if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
&dev->struct_mutex))
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
EXPORT_SYMBOL(drm_gem_object_put_unlocked);
@@ -928,6 +966,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
vma->vm_ops = dev->driver->gem_vm_ops;
vma->vm_private_data = obj;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
@@ -964,7 +1003,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_vma_offset_node *node;
int ret;
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
return -ENODEV;
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index bc28e7575254..373e33f22be4 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -177,7 +177,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
* This function frees the backing memory of the CMA GEM object, cleans up the
* GEM object state and frees the memory used to store the object itself.
* Drivers using the CMA helpers should set this as their
- * &drm_driver.gem_free_object callback.
+ * &drm_driver.gem_free_object_unlocked callback.
*/
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
{
@@ -264,41 +264,6 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
}
EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
-/**
- * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
- * object
- * @file_priv: DRM file-private structure containing the GEM object
- * @drm: DRM device
- * @handle: GEM object handle
- * @offset: return location for the fake mmap offset
- *
- * This function look up an object by its handle and returns the fake mmap
- * offset associated with it. Drivers using the CMA helpers should set this
- * as their &drm_driver.dumb_map_offset callback.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *drm, u32 handle,
- u64 *offset)
-{
- struct drm_gem_object *gem_obj;
-
- gem_obj = drm_gem_object_lookup(file_priv, handle);
- if (!gem_obj) {
- dev_err(drm->dev, "failed to lookup GEM object\n");
- return -EINVAL;
- }
-
- *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
-
- drm_gem_object_put_unlocked(gem_obj);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
-
const struct vm_operations_struct drm_gem_cma_vm_ops = {
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@@ -390,7 +355,7 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
struct drm_device *dev = priv->minor->dev;
struct drm_vma_offset_node *node;
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
return -ENODEV;
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
new file mode 100644
index 000000000000..d54a083dc5dd
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -0,0 +1,283 @@
+/*
+ * drm gem framebuffer helper functions
+ *
+ * Copyright (C) 2017 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-fence.h>
+#include <linux/reservation.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helpers for drivers that don't subclass
+ * &drm_framebuffer and and use &drm_gem_object for their backing storage.
+ *
+ * Drivers without additional needs to validate framebuffers can simply use
+ * drm_gem_fb_create() and everything is wired up automatically. But all
+ * parts can be used individually.
+ */
+
+/**
+ * drm_gem_fb_get_obj() - Get GEM object for framebuffer
+ * @fb: The framebuffer
+ * @plane: Which plane
+ *
+ * Returns the GEM object for given framebuffer.
+ */
+struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
+ unsigned int plane)
+{
+ if (plane >= 4)
+ return NULL;
+
+ return fb->obj[plane];
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj);
+
+static struct drm_framebuffer *
+drm_gem_fb_alloc(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **obj, unsigned int num_planes,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ struct drm_framebuffer *fb;
+ int ret, i;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ fb->obj[i] = obj[i];
+
+ ret = drm_framebuffer_init(dev, fb, funcs);
+ if (ret) {
+ DRM_DEV_ERROR(dev->dev, "Failed to init framebuffer: %d\n",
+ ret);
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
+
+ return fb;
+}
+
+/**
+ * drm_gem_fb_destroy - Free GEM backed framebuffer
+ * @fb: DRM framebuffer
+ *
+ * Frees a GEM backed framebuffer with its backing buffer(s) and the structure
+ * itself. Drivers can use this as their &drm_framebuffer_funcs->destroy
+ * callback.
+ */
+void drm_gem_fb_destroy(struct drm_framebuffer *fb)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ drm_gem_object_put_unlocked(fb->obj[i]);
+
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+EXPORT_SYMBOL(drm_gem_fb_destroy);
+
+/**
+ * drm_gem_fb_create_handle - Create handle for GEM backed framebuffer
+ * @fb: DRM framebuffer
+ * @file: drm file
+ * @handle: handle created
+ *
+ * Drivers can use this as their &drm_framebuffer_funcs->create_handle
+ * callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
+ unsigned int *handle)
+{
+ return drm_gem_handle_create(file, fb->obj[0], handle);
+}
+EXPORT_SYMBOL(drm_gem_fb_create_handle);
+
+/**
+ * drm_gem_fb_create_with_funcs() - helper function for the
+ * &drm_mode_config_funcs.fb_create
+ * callback
+ * @dev: DRM device
+ * @file: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
+ * @funcs: vtable to be used for the new framebuffer object
+ *
+ * This can be used to set &drm_framebuffer_funcs for drivers that need the
+ * &drm_framebuffer_funcs.dirty callback. Use drm_gem_fb_create() if you don't
+ * need to change &drm_framebuffer_funcs.
+ * The function does buffer size validation.
+ */
+struct drm_framebuffer *
+drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ const struct drm_format_info *info;
+ struct drm_gem_object *objs[4];
+ struct drm_framebuffer *fb;
+ int ret, i;
+
+ info = drm_get_format_info(dev, mode_cmd);
+ if (!info)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
+ unsigned int min_size;
+
+ objs[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
+ if (!objs[i]) {
+ DRM_DEV_ERROR(dev->dev, "Failed to lookup GEM\n");
+ ret = -ENOENT;
+ goto err_gem_object_put;
+ }
+
+ min_size = (height - 1) * mode_cmd->pitches[i]
+ + width * info->cpp[i]
+ + mode_cmd->offsets[i];
+
+ if (objs[i]->size < min_size) {
+ drm_gem_object_put_unlocked(objs[i]);
+ ret = -EINVAL;
+ goto err_gem_object_put;
+ }
+ }
+
+ fb = drm_gem_fb_alloc(dev, mode_cmd, objs, i, funcs);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
+ goto err_gem_object_put;
+ }
+
+ return fb;
+
+err_gem_object_put:
+ for (i--; i >= 0; i--)
+ drm_gem_object_put_unlocked(objs[i]);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_funcs);
+
+static const struct drm_framebuffer_funcs drm_gem_fb_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+};
+
+/**
+ * drm_gem_fb_create() - &drm_mode_config_funcs.fb_create callback function
+ * @dev: DRM device
+ * @file: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
+ *
+ * If your hardware has special alignment or pitch requirements these should be
+ * checked before calling this function. The function does buffer size
+ * validation. Use drm_gem_fb_create_with_funcs() if you need to set
+ * &drm_framebuffer_funcs.dirty.
+ */
+struct drm_framebuffer *
+drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ return drm_gem_fb_create_with_funcs(dev, file, mode_cmd,
+ &drm_gem_fb_funcs);
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_create);
+
+/**
+ * drm_gem_fb_prepare_fb() - Prepare gem framebuffer
+ * @plane: Which plane
+ * @state: Plane state attach fence to
+ *
+ * This can be used as the &drm_plane_helper_funcs.prepare_fb hook.
+ *
+ * This function checks if the plane FB has an dma-buf attached, extracts
+ * the exclusive fence and attaches it to plane state for the atomic helper
+ * to wait on.
+ *
+ * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
+ * gem based framebuffer drivers which have their buffers always pinned in
+ * memory.
+ */
+int drm_gem_fb_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dma_buf *dma_buf;
+ struct dma_fence *fence;
+
+ if ((plane->state->fb == state->fb) || !state->fb)
+ return 0;
+
+ dma_buf = drm_gem_fb_get_obj(state->fb, 0)->dma_buf;
+ if (dma_buf) {
+ fence = reservation_object_get_excl_rcu(dma_buf->resv);
+ drm_atomic_set_fence_for_plane(state, fence);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
+
+/**
+ * drm_gem_fbdev_fb_create - Create a drm_framebuffer for fbdev emulation
+ * @dev: DRM device
+ * @sizes: fbdev size description
+ * @pitch_align: optional pitch alignment
+ * @obj: GEM object backing the framebuffer
+ * @funcs: vtable to be used for the new framebuffer object
+ *
+ * This function creates a framebuffer for use with fbdev emulation.
+ *
+ * Returns:
+ * Pointer to a drm_framebuffer on success or an error pointer on failure.
+ */
+struct drm_framebuffer *
+drm_gem_fbdev_fb_create(struct drm_device *dev,
+ struct drm_fb_helper_surface_size *sizes,
+ unsigned int pitch_align, struct drm_gem_object *obj,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = sizes->surface_width *
+ DIV_ROUND_UP(sizes->surface_bpp, 8);
+ if (pitch_align)
+ mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0],
+ pitch_align);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ if (obj->size < mode_cmd.pitches[0] * mode_cmd.height)
+ return ERR_PTR(-EINVAL);
+
+ return drm_gem_fb_alloc(dev, &mode_cmd, &obj, 1, funcs);
+}
+EXPORT_SYMBOL(drm_gem_fbdev_fb_create);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 5edc24bd10fa..fbc3f308fa19 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -32,6 +32,7 @@ void drm_lastclose(struct drm_device *dev);
int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_pci_agp_destroy(struct drm_device *dev);
+int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
/* drm_prime.c */
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
@@ -56,14 +57,19 @@ int drm_gem_name_info(struct seq_file *m, void *data);
/* drm_vblank.c */
extern unsigned int drm_timestamp_monotonic;
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
+void drm_vblank_cleanup(struct drm_device *dev);
+
+/* IOCTLS */
+int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/* drm_irq.c */
/* IOCTLS */
-int drm_wait_vblank(struct drm_device *dev, void *data,
- struct drm_file *filp);
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int drm_legacy_modeset_ctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
/* drm_auth.c */
int drm_getmagic(struct drm_device *dev, void *data,
@@ -161,3 +167,9 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
+int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
+int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
+int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index d1f202852028..f8e96e648acf 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -842,7 +842,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
req.request.type = req32.request.type;
req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal;
- err = drm_ioctl_kernel(file, drm_wait_vblank, &req, DRM_UNLOCKED);
+ err = drm_ioctl_kernel(file, drm_wait_vblank_ioctl, &req, DRM_UNLOCKED);
if (err)
return err;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index f1eb326524cf..a9ae6dd2d593 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -143,8 +143,8 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
if (master->unique != NULL)
drm_unset_busid(dev, master);
- if (dev->driver->set_busid) {
- ret = dev->driver->set_busid(dev, master);
+ if (dev->dev && dev_is_pci(dev->dev)) {
+ ret = drm_pci_set_busid(dev, master);
if (ret) {
drm_unset_busid(dev, master);
return ret;
@@ -603,9 +603,9 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -657,6 +657,12 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -695,7 +701,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
*
* DRM driver private IOCTL must be in the range from DRM_COMMAND_BASE to
* DRM_COMMAND_END. Finally you need an array of &struct drm_ioctl_desc to wire
- * up the handlers and set the access rights:
+ * up the handlers and set the access rights::
*
* static const struct drm_ioctl_desc my_driver_ioctls[] = {
* DRM_IOCTL_DEF_DRV(MY_DRIVER_OPERATION, my_driver_operation,
@@ -704,6 +710,9 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
*
* And then assign this to the &drm_driver.ioctls field in your driver
* structure.
+ *
+ * See the separate chapter on :ref:`file operations<drm_driver_fops>` for how
+ * the driver-specific IOCTLs are wired up.
*/
long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
@@ -713,7 +722,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
struct drm_device *dev = file_priv->minor->dev;
int retcode;
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
return -ENODEV;
retcode = drm_ioctl_permit(flags, file_priv);
@@ -762,7 +771,7 @@ long drm_ioctl(struct file *filp,
dev = file_priv->minor->dev;
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
return -ENODEV;
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 1160a579e0dc..4b47226b90d4 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -165,14 +165,14 @@ of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
u32 reg;
if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) {
- dev_err(dev, "modalias failure on %s\n", node->full_name);
+ dev_err(dev, "modalias failure on %pOF\n", node);
return ERR_PTR(-EINVAL);
}
ret = of_property_read_u32(node, "reg", &reg);
if (ret) {
- dev_err(dev, "device node %s has no valid reg property: %d\n",
- node->full_name, ret);
+ dev_err(dev, "device node %pOF has no valid reg property: %d\n",
+ node, ret);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index d9862259a2a7..74f6ff5df656 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -337,6 +337,13 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.gamma_lut_size_property = prop;
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB,
+ "IN_FORMATS", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.modifiers_property = prop;
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index da9a9adbcc98..1055533792f3 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -233,6 +233,9 @@ int drm_object_property_set_value(struct drm_mode_object *obj,
{
int i;
+ WARN_ON(drm_drv_uses_atomic_modeset(property->dev) &&
+ !(property->flags & DRM_MODE_PROP_IMMUTABLE));
+
for (i = 0; i < obj->properties->count; i++) {
if (obj->properties->properties[i] == property) {
obj->properties->values[i] = val;
@@ -244,24 +247,7 @@ int drm_object_property_set_value(struct drm_mode_object *obj,
}
EXPORT_SYMBOL(drm_object_property_set_value);
-/**
- * drm_object_property_get_value - retrieve the value of a property
- * @obj: drm mode object to get property value from
- * @property: property to retrieve
- * @val: storage for the property value
- *
- * This function retrieves the softare state of the given property for the given
- * property. Since there is no driver callback to retrieve the current property
- * value this might be out of sync with the hardware, depending upon the driver
- * and property.
- *
- * Atomic drivers should never call this function directly, the core will read
- * out property values through the various ->atomic_get_property callbacks.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_object_property_get_value(struct drm_mode_object *obj,
+int __drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
int i;
@@ -284,6 +270,31 @@ int drm_object_property_get_value(struct drm_mode_object *obj,
return -EINVAL;
}
+
+/**
+ * drm_object_property_get_value - retrieve the value of a property
+ * @obj: drm mode object to get property value from
+ * @property: property to retrieve
+ * @val: storage for the property value
+ *
+ * This function retrieves the softare state of the given property for the given
+ * property. Since there is no driver callback to retrieve the current property
+ * value this might be out of sync with the hardware, depending upon the driver
+ * and property.
+ *
+ * Atomic drivers should never call this function directly, the core will read
+ * out property values through the various ->atomic_get_property callbacks.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t *val)
+{
+ WARN_ON(drm_drv_uses_atomic_modeset(property->dev));
+
+ return __drm_object_property_get_value(obj, property, val);
+}
EXPORT_SYMBOL(drm_object_property_get_value);
/* helper for getconnector and getproperties ioctls */
@@ -302,7 +313,7 @@ int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
continue;
if (*arg_count_props > count) {
- ret = drm_object_property_get_value(obj, prop, &val);
+ ret = __drm_object_property_get_value(obj, prop, &val);
if (ret)
return ret;
@@ -381,6 +392,83 @@ struct drm_property *drm_mode_obj_find_prop_id(struct drm_mode_object *obj,
return NULL;
}
+static int set_property_legacy(struct drm_mode_object *obj,
+ struct drm_property *prop,
+ uint64_t prop_value)
+{
+ struct drm_device *dev = prop->dev;
+ struct drm_mode_object *ref;
+ int ret = -EINVAL;
+
+ if (!drm_property_change_valid_get(prop, prop_value, &ref))
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+ switch (obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR:
+ ret = drm_mode_connector_set_obj_prop(obj, prop,
+ prop_value);
+ break;
+ case DRM_MODE_OBJECT_CRTC:
+ ret = drm_mode_crtc_set_obj_prop(obj, prop, prop_value);
+ break;
+ case DRM_MODE_OBJECT_PLANE:
+ ret = drm_mode_plane_set_obj_prop(obj_to_plane(obj),
+ prop, prop_value);
+ break;
+ }
+ drm_property_change_valid_put(prop, ref);
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+static int set_property_atomic(struct drm_mode_object *obj,
+ struct drm_property *prop,
+ uint64_t prop_value)
+{
+ struct drm_device *dev = prop->dev;
+ struct drm_atomic_state *state;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+ state->acquire_ctx = &ctx;
+retry:
+ if (prop == state->dev->mode_config.dpms_property) {
+ if (obj->type != DRM_MODE_OBJECT_CONNECTOR) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = drm_atomic_connector_commit_dpms(state,
+ obj_to_connector(obj),
+ prop_value);
+ } else {
+ ret = drm_atomic_set_property(state, obj, prop, prop_value);
+ if (ret)
+ goto out;
+ ret = drm_atomic_commit(state);
+ }
+out:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -388,18 +476,13 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_mode_object *arg_obj;
struct drm_property *property;
int ret = -EINVAL;
- struct drm_mode_object *ref;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- drm_modeset_lock_all(dev);
-
arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
- if (!arg_obj) {
- ret = -ENOENT;
- goto out;
- }
+ if (!arg_obj)
+ return -ENOENT;
if (!arg_obj->properties)
goto out_unref;
@@ -408,28 +491,12 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
if (!property)
goto out_unref;
- if (!drm_property_change_valid_get(property, arg->value, &ref))
- goto out_unref;
-
- switch (arg_obj->type) {
- case DRM_MODE_OBJECT_CONNECTOR:
- ret = drm_mode_connector_set_obj_prop(arg_obj, property,
- arg->value);
- break;
- case DRM_MODE_OBJECT_CRTC:
- ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
- break;
- case DRM_MODE_OBJECT_PLANE:
- ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
- property, arg->value);
- break;
- }
-
- drm_property_change_valid_put(property, ref);
+ if (drm_drv_uses_atomic_modeset(property->dev))
+ ret = set_property_atomic(arg_obj, property, arg->value);
+ else
+ ret = set_property_legacy(arg_obj, property, arg->value);
out_unref:
drm_mode_object_put(arg_obj);
-out:
- drm_modeset_unlock_all(dev);
return ret;
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index f2493b9b82e6..4a3f68a33844 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -709,8 +709,8 @@ int of_get_drm_display_mode(struct device_node *np,
if (bus_flags)
drm_bus_flags_from_videomode(&vm, bus_flags);
- pr_debug("%s: got %dx%d display mode from %s\n",
- of_node_full_name(np), vm.hactive, vm.vactive, np->name);
+ pr_debug("%pOF: got %dx%d display mode from %s\n",
+ np, vm.hactive, vm.vactive, np->name);
drm_mode_debug_printmodeline(dmode);
return 0;
@@ -1083,6 +1083,34 @@ drm_mode_validate_size(const struct drm_display_mode *mode,
}
EXPORT_SYMBOL(drm_mode_validate_size);
+/**
+ * drm_mode_validate_ycbcr420 - add 'ycbcr420-only' modes only when allowed
+ * @mode: mode to check
+ * @connector: drm connector under action
+ *
+ * This function is a helper which can be used to filter out any YCBCR420
+ * only mode, when the source doesn't support it.
+ *
+ * Returns:
+ * The mode status
+ */
+enum drm_mode_status
+drm_mode_validate_ycbcr420(const struct drm_display_mode *mode,
+ struct drm_connector *connector)
+{
+ u8 vic = drm_match_cea_mode(mode);
+ enum drm_mode_status status = MODE_OK;
+ struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
+
+ if (test_bit(vic, hdmi->y420_vdb_modes)) {
+ if (!connector->ycbcr_420_allowed)
+ status = MODE_NO_420;
+ }
+
+ return status;
+}
+EXPORT_SYMBOL(drm_mode_validate_ycbcr420);
+
#define MODE_STATUS(status) [MODE_ ## status + 3] = #status
static const char * const drm_mode_status_names[] = {
@@ -1122,6 +1150,7 @@ static const char * const drm_mode_status_names[] = {
MODE_STATUS(ONE_SIZE),
MODE_STATUS(NO_REDUCED),
MODE_STATUS(NO_STEREO),
+ MODE_STATUS(NO_420),
MODE_STATUS(STALE),
MODE_STATUS(BAD),
MODE_STATUS(ERROR),
@@ -1576,3 +1605,61 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
out:
return ret;
}
+
+/**
+ * drm_mode_is_420_only - if a given videomode can be only supported in YCBCR420
+ * output format
+ *
+ * @display: display under action
+ * @mode: video mode to be tested.
+ *
+ * Returns:
+ * true if the mode can be supported in YCBCR420 format
+ * false if not.
+ */
+bool drm_mode_is_420_only(const struct drm_display_info *display,
+ const struct drm_display_mode *mode)
+{
+ u8 vic = drm_match_cea_mode(mode);
+
+ return test_bit(vic, display->hdmi.y420_vdb_modes);
+}
+EXPORT_SYMBOL(drm_mode_is_420_only);
+
+/**
+ * drm_mode_is_420_also - if a given videomode can be supported in YCBCR420
+ * output format also (along with RGB/YCBCR444/422)
+ *
+ * @display: display under action.
+ * @mode: video mode to be tested.
+ *
+ * Returns:
+ * true if the mode can be support YCBCR420 format
+ * false if not.
+ */
+bool drm_mode_is_420_also(const struct drm_display_info *display,
+ const struct drm_display_mode *mode)
+{
+ u8 vic = drm_match_cea_mode(mode);
+
+ return test_bit(vic, display->hdmi.y420_cmdb_modes);
+}
+EXPORT_SYMBOL(drm_mode_is_420_also);
+/**
+ * drm_mode_is_420 - if a given videomode can be supported in YCBCR420
+ * output format
+ *
+ * @display: display under action.
+ * @mode: video mode to be tested.
+ *
+ * Returns:
+ * true if the mode can be supported in YCBCR420 format
+ * false if not.
+ */
+bool drm_mode_is_420(const struct drm_display_info *display,
+ const struct drm_display_mode *mode)
+{
+ return drm_mode_is_420_only(display, mode) ||
+ drm_mode_is_420_also(display, mode);
+}
+EXPORT_SYMBOL(drm_mode_is_420);
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 2b33825f2f93..9cb1eede0b4d 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -124,6 +124,7 @@ static struct drm_plane *create_primary_plane(struct drm_device *dev)
&drm_primary_helper_funcs,
safe_modeset_formats,
ARRAY_SIZE(safe_modeset_formats),
+ NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 64ef09a6cccb..af4e906c630d 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -52,7 +52,12 @@
* drm_modeset_drop_locks(&ctx);
* drm_modeset_acquire_fini(&ctx);
*
- * On top of of these per-object locks using &ww_mutex there's also an overall
+ * If all that is needed is a single modeset lock, then the &struct
+ * drm_modeset_acquire_ctx is not needed and the locking can be simplified
+ * by passing a NULL instead of ctx in the drm_modeset_lock()
+ * call and, when done, by calling drm_modeset_unlock().
+ *
+ * On top of these per-object locks using &ww_mutex there's also an overall
* &drm_mode_config.mutex, for protecting everything else. Mostly this means
* probe state of connectors, and preventing hotplug add/removal of connectors.
*
@@ -313,11 +318,14 @@ EXPORT_SYMBOL(drm_modeset_lock_init);
* @lock: lock to take
* @ctx: acquire ctx
*
- * If ctx is not NULL, then its ww acquire context is used and the
+ * If @ctx is not NULL, then its ww acquire context is used and the
* lock will be tracked by the context and can be released by calling
* drm_modeset_drop_locks(). If -EDEADLK is returned, this means a
* deadlock scenario has been detected and it is an error to attempt
* to take any more locks without first calling drm_modeset_backoff().
+ *
+ * If @ctx is NULL then the function call behaves like a normal,
+ * non-nesting mutex_lock() call.
*/
int drm_modeset_lock(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx)
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 2120f33bdf4a..8dafbdfcd2ea 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -160,8 +160,8 @@ int drm_of_component_probe(struct device *dev,
of_node_put(remote);
continue;
} else if (!of_device_is_available(remote->parent)) {
- dev_warn(dev, "parent device of %s is not available\n",
- remote->full_name);
+ dev_warn(dev, "parent device of %pOF is not available\n",
+ remote);
of_node_put(remote);
continue;
}
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 1eb4fc3eee20..1235c9877d6f 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -149,7 +149,6 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
master->unique_len = strlen(master->unique);
return 0;
}
-EXPORT_SYMBOL(drm_pci_set_busid);
static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
@@ -281,20 +280,15 @@ err_free:
EXPORT_SYMBOL(drm_get_pci_dev);
/**
- * drm_pci_init - Register matching PCI devices with the DRM subsystem
+ * drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
* @driver: DRM device driver
* @pdriver: PCI device driver
*
- * Initializes a drm_device structures, registering the stubs and initializing
- * the AGP device.
- *
- * NOTE: This function is deprecated. Modern modesetting drm drivers should use
- * pci_register_driver() directly, this function only provides shadow-binding
- * support for old legacy drivers on top of that core pci function.
+ * This is only used by legacy dri1 drivers and deprecated.
*
* Return: 0 on success or a negative error code on failure.
*/
-int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
+int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *pid;
@@ -302,8 +296,8 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
DRM_DEBUG("\n");
- if (!(driver->driver_features & DRIVER_LEGACY))
- return pci_register_driver(pdriver);
+ if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY)))
+ return -EINVAL;
/* If not using KMS, fall back to stealth mode manual scanning. */
INIT_LIST_HEAD(&driver->legacy_dev_list);
@@ -330,6 +324,7 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
}
return 0;
}
+EXPORT_SYMBOL(drm_legacy_pci_init);
int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
@@ -391,11 +386,6 @@ EXPORT_SYMBOL(drm_pcie_get_max_link_width);
#else
-int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
-{
- return -1;
-}
-
void drm_pci_agp_destroy(struct drm_device *dev) {}
int drm_irq_by_busid(struct drm_device *dev, void *data,
@@ -405,27 +395,21 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
}
#endif
-EXPORT_SYMBOL(drm_pci_init);
-
/**
- * drm_pci_exit - Unregister matching PCI devices from the DRM subsystem
+ * drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver
* @driver: DRM device driver
* @pdriver: PCI device driver
*
- * Unregisters one or more devices matched by a PCI driver from the DRM
- * subsystem.
- *
- * NOTE: This function is deprecated. Modern modesetting drm drivers should use
- * pci_unregister_driver() directly, this function only provides shadow-binding
- * support for old legacy drivers on top of that core pci function.
+ * Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This
+ * is deprecated and only used by dri1 drivers.
*/
-void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
if (!(driver->driver_features & DRIVER_LEGACY)) {
- pci_unregister_driver(pdriver);
+ WARN_ON(1);
} else {
list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
legacy_dev_list) {
@@ -435,4 +419,4 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
}
DRM_INFO("Module unloaded\n");
}
-EXPORT_SYMBOL(drm_pci_exit);
+EXPORT_SYMBOL(drm_legacy_pci_exit);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 5dc8c4350602..7a00351d5b5d 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -62,6 +62,87 @@ static unsigned int drm_num_planes(struct drm_device *dev)
return num;
}
+static inline u32 *
+formats_ptr(struct drm_format_modifier_blob *blob)
+{
+ return (u32 *)(((char *)blob) + blob->formats_offset);
+}
+
+static inline struct drm_format_modifier *
+modifiers_ptr(struct drm_format_modifier_blob *blob)
+{
+ return (struct drm_format_modifier *)(((char *)blob) + blob->modifiers_offset);
+}
+
+static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane)
+{
+ const struct drm_mode_config *config = &dev->mode_config;
+ struct drm_property_blob *blob;
+ struct drm_format_modifier *mod;
+ size_t blob_size, formats_size, modifiers_size;
+ struct drm_format_modifier_blob *blob_data;
+ unsigned int i, j;
+
+ formats_size = sizeof(__u32) * plane->format_count;
+ if (WARN_ON(!formats_size)) {
+ /* 0 formats are never expected */
+ return 0;
+ }
+
+ modifiers_size =
+ sizeof(struct drm_format_modifier) * plane->modifier_count;
+
+ blob_size = sizeof(struct drm_format_modifier_blob);
+ /* Modifiers offset is a pointer to a struct with a 64 bit field so it
+ * should be naturally aligned to 8B.
+ */
+ BUILD_BUG_ON(sizeof(struct drm_format_modifier_blob) % 8);
+ blob_size += ALIGN(formats_size, 8);
+ blob_size += modifiers_size;
+
+ blob = drm_property_create_blob(dev, blob_size, NULL);
+ if (IS_ERR(blob))
+ return -1;
+
+ blob_data = (struct drm_format_modifier_blob *)blob->data;
+ blob_data->version = FORMAT_BLOB_CURRENT;
+ blob_data->count_formats = plane->format_count;
+ blob_data->formats_offset = sizeof(struct drm_format_modifier_blob);
+ blob_data->count_modifiers = plane->modifier_count;
+
+ blob_data->modifiers_offset =
+ ALIGN(blob_data->formats_offset + formats_size, 8);
+
+ memcpy(formats_ptr(blob_data), plane->format_types, formats_size);
+
+ /* If we can't determine support, just bail */
+ if (!plane->funcs->format_mod_supported)
+ goto done;
+
+ mod = modifiers_ptr(blob_data);
+ for (i = 0; i < plane->modifier_count; i++) {
+ for (j = 0; j < plane->format_count; j++) {
+ if (plane->funcs->format_mod_supported(plane,
+ plane->format_types[j],
+ plane->modifiers[i])) {
+
+ mod->formats |= 1ULL << j;
+ }
+ }
+
+ mod->modifier = plane->modifiers[i];
+ mod->offset = 0;
+ mod->pad = 0;
+ mod++;
+ }
+
+done:
+ drm_object_attach_property(&plane->base, config->modifiers_property,
+ blob->base.id);
+
+ return 0;
+}
+
/**
* drm_universal_plane_init - Initialize a new universal plane object
* @dev: DRM device
@@ -70,6 +151,8 @@ static unsigned int drm_num_planes(struct drm_device *dev)
* @funcs: callbacks for the new plane
* @formats: array of supported formats (DRM_FORMAT\_\*)
* @format_count: number of elements in @formats
+ * @format_modifiers: array of struct drm_format modifiers terminated by
+ * DRM_FORMAT_MOD_INVALID
* @type: type of plane (overlay, primary, cursor)
* @name: printf style format string for the plane name, or NULL for default name
*
@@ -82,10 +165,12 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
enum drm_plane_type type,
const char *name, ...)
{
struct drm_mode_config *config = &dev->mode_config;
+ unsigned int format_modifier_count = 0;
int ret;
ret = drm_mode_object_add(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
@@ -105,6 +190,31 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
return -ENOMEM;
}
+ /*
+ * First driver to need more than 64 formats needs to fix this. Each
+ * format is encoded as a bit and the current code only supports a u64.
+ */
+ if (WARN_ON(format_count > 64))
+ return -EINVAL;
+
+ if (format_modifiers) {
+ const uint64_t *temp_modifiers = format_modifiers;
+ while (*temp_modifiers++ != DRM_FORMAT_MOD_INVALID)
+ format_modifier_count++;
+ }
+
+ plane->modifier_count = format_modifier_count;
+ plane->modifiers = kmalloc_array(format_modifier_count,
+ sizeof(format_modifiers[0]),
+ GFP_KERNEL);
+
+ if (format_modifier_count && !plane->modifiers) {
+ DRM_DEBUG_KMS("out of memory when allocating plane\n");
+ kfree(plane->format_types);
+ drm_mode_object_unregister(dev, &plane->base);
+ return -ENOMEM;
+ }
+
if (name) {
va_list ap;
@@ -117,12 +227,15 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
}
if (!plane->name) {
kfree(plane->format_types);
+ kfree(plane->modifiers);
drm_mode_object_unregister(dev, &plane->base);
return -ENOMEM;
}
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
plane->format_count = format_count;
+ memcpy(plane->modifiers, format_modifiers,
+ format_modifier_count * sizeof(format_modifiers[0]));
plane->possible_crtcs = possible_crtcs;
plane->type = type;
@@ -149,6 +262,9 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
drm_object_attach_property(&plane->base, config->prop_src_h, 0);
}
+ if (config->allow_fb_modifiers)
+ create_in_format_blob(dev, plane);
+
return 0;
}
EXPORT_SYMBOL(drm_universal_plane_init);
@@ -205,7 +321,8 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
- formats, format_count, type, NULL);
+ formats, format_count,
+ NULL, type, NULL);
}
EXPORT_SYMBOL(drm_plane_init);
@@ -224,6 +341,7 @@ void drm_plane_cleanup(struct drm_plane *plane)
drm_modeset_lock_fini(&plane->mutex);
kfree(plane->format_types);
+ kfree(plane->modifiers);
drm_mode_object_unregister(dev, &plane->base);
BUG_ON(list_empty(&plane->head));
@@ -601,6 +719,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
crtc = drm_crtc_find(dev, plane_req->crtc_id);
if (!crtc) {
+ drm_framebuffer_put(fb);
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id);
return -ENOENT;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 00e6832a8c1a..904966cde32b 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -528,6 +528,10 @@ retry:
if (mode->status == MODE_OK)
mode->status = drm_mode_validate_pipeline(mode,
connector);
+
+ if (mode->status == MODE_OK)
+ mode->status = drm_mode_validate_ycbcr420(mode,
+ connector);
}
prune:
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 3e88fa24eab3..bc5128203056 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -709,6 +709,29 @@ err_created:
}
EXPORT_SYMBOL(drm_property_replace_global_blob);
+/**
+ * drm_property_replace_blob - replace a blob property
+ * @blob: a pointer to the member blob to be replaced
+ * @new_blob: the new blob to replace with
+ *
+ * Return: true if the blob was in fact replaced.
+ */
+bool drm_property_replace_blob(struct drm_property_blob **blob,
+ struct drm_property_blob *new_blob)
+{
+ struct drm_property_blob *old_blob = *blob;
+
+ if (old_blob == new_blob)
+ return false;
+
+ drm_property_blob_put(old_blob);
+ if (new_blob)
+ drm_property_blob_get(new_blob);
+ *blob = new_blob;
+ return true;
+}
+EXPORT_SYMBOL(drm_property_replace_blob);
+
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
diff --git a/drivers/gpu/drm/drm_scdc_helper.c b/drivers/gpu/drm/drm_scdc_helper.c
index 3cd96a95736d..7d1b0f011d33 100644
--- a/drivers/gpu/drm/drm_scdc_helper.c
+++ b/drivers/gpu/drm/drm_scdc_helper.c
@@ -194,19 +194,26 @@ EXPORT_SYMBOL(drm_scdc_set_scrambling);
* @adapter: I2C adapter for DDC channel
* @set: ret or reset the high clock ratio
*
- * TMDS clock ratio calculations go like this:
- * TMDS character = 10 bit TMDS encoded value
- * TMDS character rate = The rate at which TMDS characters are transmitted(Mcsc)
- * TMDS bit rate = 10x TMDS character rate
- * As per the spec:
- * TMDS clock rate for pixel clock < 340 MHz = 1x the character rate
- * = 1/10 pixel clock rate
- * TMDS clock rate for pixel clock > 340 MHz = 0.25x the character rate
- * = 1/40 pixel clock rate
- *
- * Writes to the TMDS config register over SCDC channel, and:
- * sets TMDS clock ratio to 1/40 when set = 1
- * sets TMDS clock ratio to 1/10 when set = 0
+ *
+ * TMDS clock ratio calculations go like this:
+ * TMDS character = 10 bit TMDS encoded value
+ *
+ * TMDS character rate = The rate at which TMDS characters are
+ * transmitted (Mcsc)
+ *
+ * TMDS bit rate = 10x TMDS character rate
+ *
+ * As per the spec:
+ * TMDS clock rate for pixel clock < 340 MHz = 1x the character
+ * rate = 1/10 pixel clock rate
+ *
+ * TMDS clock rate for pixel clock > 340 MHz = 0.25x the character
+ * rate = 1/40 pixel clock rate
+ *
+ * Writes to the TMDS config register over SCDC channel, and:
+ * sets TMDS clock ratio to 1/40 when set = 1
+ *
+ * sets TMDS clock ratio to 1/10 when set = 0
*
* Returns:
* True if write is successful, false otherwise.
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index e084f9f8ca66..dc9fd109de14 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -37,10 +37,18 @@ static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
+ bool has_primary = state->plane_mask &
+ BIT(drm_plane_index(crtc->primary));
+
+ /* We always want to have an active plane with an active CRTC */
+ if (has_primary != state->enable)
+ return -EINVAL;
+
return drm_atomic_add_affected_planes(state->state, crtc);
}
-static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc)
+static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct drm_simple_display_pipe *pipe;
@@ -51,7 +59,8 @@ static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc)
pipe->funcs->enable(pipe, crtc->state);
}
-static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc)
+static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct drm_simple_display_pipe *pipe;
@@ -64,8 +73,8 @@ static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc)
static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = {
.atomic_check = drm_simple_kms_crtc_check,
- .disable = drm_simple_kms_crtc_disable,
- .enable = drm_simple_kms_crtc_enable,
+ .atomic_enable = drm_simple_kms_crtc_enable,
+ .atomic_disable = drm_simple_kms_crtc_disable,
};
static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = {
@@ -88,9 +97,6 @@ static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
crtc_state = drm_atomic_get_new_crtc_state(plane_state->state,
&pipe->crtc);
- if (crtc_state->enable != !!plane_state->crtc)
- return -EINVAL; /* plane must match crtc enable state */
-
if (!crtc_state->enable)
return 0; /* nothing to check when disabling or disabled */
@@ -193,6 +199,7 @@ EXPORT_SYMBOL(drm_simple_display_pipe_attach_bridge);
* @funcs: callbacks for the display pipe (optional)
* @formats: array of supported formats (DRM_FORMAT\_\*)
* @format_count: number of elements in @formats
+ * @format_modifiers: array of formats modifiers
* @connector: connector to attach and register (optional)
*
* Sets up a display pipeline which consist of a really simple
@@ -213,6 +220,7 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
struct drm_simple_display_pipe *pipe,
const struct drm_simple_display_pipe_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
+ const uint64_t *format_modifiers,
struct drm_connector *connector)
{
struct drm_encoder *encoder = &pipe->encoder;
@@ -227,6 +235,7 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
ret = drm_universal_plane_init(dev, plane, 0,
&drm_simple_kms_plane_funcs,
formats, format_count,
+ format_modifiers,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 789ba0b37f7b..0422b8c2c2e7 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1,5 +1,7 @@
/*
* Copyright 2017 Red Hat
+ * Parts ported from amdgpu (fence wait code).
+ * Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -31,6 +33,9 @@
* that contain an optional fence. The fence can be updated with a new
* fence, or be NULL.
*
+ * syncobj's can be waited upon, where it will wait for the underlying
+ * fence.
+ *
* syncobj's can be export to fd's and back, these fd's are opaque and
* have no other use case, except passing the syncobj between processes.
*
@@ -46,6 +51,7 @@
#include <linux/fs.h>
#include <linux/anon_inodes.h>
#include <linux/sync_file.h>
+#include <linux/sched/signal.h>
#include "drm_internal.h"
#include <drm/drm_syncobj.h>
@@ -75,6 +81,75 @@ struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
}
EXPORT_SYMBOL(drm_syncobj_find);
+static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb,
+ drm_syncobj_func_t func)
+{
+ cb->func = func;
+ list_add_tail(&cb->node, &syncobj->cb_list);
+}
+
+static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
+ struct dma_fence **fence,
+ struct drm_syncobj_cb *cb,
+ drm_syncobj_func_t func)
+{
+ int ret;
+
+ *fence = drm_syncobj_fence_get(syncobj);
+ if (*fence)
+ return 1;
+
+ spin_lock(&syncobj->lock);
+ /* We've already tried once to get a fence and failed. Now that we
+ * have the lock, try one more time just to be sure we don't add a
+ * callback when a fence has already been set.
+ */
+ if (syncobj->fence) {
+ *fence = dma_fence_get(syncobj->fence);
+ ret = 1;
+ } else {
+ *fence = NULL;
+ drm_syncobj_add_callback_locked(syncobj, cb, func);
+ ret = 0;
+ }
+ spin_unlock(&syncobj->lock);
+
+ return ret;
+}
+
+/**
+ * drm_syncobj_add_callback - adds a callback to syncobj::cb_list
+ * @syncobj: Sync object to which to add the callback
+ * @cb: Callback to add
+ * @func: Func to use when initializing the drm_syncobj_cb struct
+ *
+ * This adds a callback to be called next time the fence is replaced
+ */
+void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb,
+ drm_syncobj_func_t func)
+{
+ spin_lock(&syncobj->lock);
+ drm_syncobj_add_callback_locked(syncobj, cb, func);
+ spin_unlock(&syncobj->lock);
+}
+EXPORT_SYMBOL(drm_syncobj_add_callback);
+
+/**
+ * drm_syncobj_add_callback - removes a callback to syncobj::cb_list
+ * @syncobj: Sync object from which to remove the callback
+ * @cb: Callback to remove
+ */
+void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb)
+{
+ spin_lock(&syncobj->lock);
+ list_del_init(&cb->node);
+ spin_unlock(&syncobj->lock);
+}
+EXPORT_SYMBOL(drm_syncobj_remove_callback);
+
/**
* drm_syncobj_replace_fence - replace fence in a sync object.
* @syncobj: Sync object to replace fence in
@@ -86,18 +161,75 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence)
{
struct dma_fence *old_fence;
+ struct drm_syncobj_cb *cur, *tmp;
if (fence)
dma_fence_get(fence);
- old_fence = xchg(&syncobj->fence, fence);
+
+ spin_lock(&syncobj->lock);
+
+ old_fence = syncobj->fence;
+ syncobj->fence = fence;
+
+ if (fence != old_fence) {
+ list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
+ list_del_init(&cur->node);
+ cur->func(syncobj, cur);
+ }
+ }
+
+ spin_unlock(&syncobj->lock);
dma_fence_put(old_fence);
}
EXPORT_SYMBOL(drm_syncobj_replace_fence);
-int drm_syncobj_fence_get(struct drm_file *file_private,
- u32 handle,
- struct dma_fence **fence)
+struct drm_syncobj_null_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+};
+
+static const char *drm_syncobj_null_fence_get_name(struct dma_fence *fence)
+{
+ return "syncobjnull";
+}
+
+static bool drm_syncobj_null_fence_enable_signaling(struct dma_fence *fence)
+{
+ dma_fence_enable_sw_signaling(fence);
+ return !dma_fence_is_signaled(fence);
+}
+
+static const struct dma_fence_ops drm_syncobj_null_fence_ops = {
+ .get_driver_name = drm_syncobj_null_fence_get_name,
+ .get_timeline_name = drm_syncobj_null_fence_get_name,
+ .enable_signaling = drm_syncobj_null_fence_enable_signaling,
+ .wait = dma_fence_default_wait,
+ .release = NULL,
+};
+
+static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
+{
+ struct drm_syncobj_null_fence *fence;
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (fence == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&fence->lock);
+ dma_fence_init(&fence->base, &drm_syncobj_null_fence_ops,
+ &fence->lock, 0, 0);
+ dma_fence_signal(&fence->base);
+
+ drm_syncobj_replace_fence(syncobj, &fence->base);
+
+ dma_fence_put(&fence->base);
+
+ return 0;
+}
+
+int drm_syncobj_find_fence(struct drm_file *file_private,
+ u32 handle,
+ struct dma_fence **fence)
{
struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
int ret = 0;
@@ -105,14 +237,14 @@ int drm_syncobj_fence_get(struct drm_file *file_private,
if (!syncobj)
return -ENOENT;
- *fence = dma_fence_get(syncobj->fence);
+ *fence = drm_syncobj_fence_get(syncobj);
if (!*fence) {
ret = -EINVAL;
}
drm_syncobj_put(syncobj);
return ret;
}
-EXPORT_SYMBOL(drm_syncobj_fence_get);
+EXPORT_SYMBOL(drm_syncobj_find_fence);
/**
* drm_syncobj_free - free a sync object.
@@ -125,13 +257,13 @@ void drm_syncobj_free(struct kref *kref)
struct drm_syncobj *syncobj = container_of(kref,
struct drm_syncobj,
refcount);
- dma_fence_put(syncobj->fence);
+ drm_syncobj_replace_fence(syncobj, NULL);
kfree(syncobj);
}
EXPORT_SYMBOL(drm_syncobj_free);
static int drm_syncobj_create(struct drm_file *file_private,
- u32 *handle)
+ u32 *handle, uint32_t flags)
{
int ret;
struct drm_syncobj *syncobj;
@@ -141,6 +273,16 @@ static int drm_syncobj_create(struct drm_file *file_private,
return -ENOMEM;
kref_init(&syncobj->refcount);
+ INIT_LIST_HEAD(&syncobj->cb_list);
+ spin_lock_init(&syncobj->lock);
+
+ if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
+ ret = drm_syncobj_assign_null_handle(syncobj);
+ if (ret < 0) {
+ drm_syncobj_put(syncobj);
+ return ret;
+ }
+ }
idr_preload(GFP_KERNEL);
spin_lock(&file_private->syncobj_table_lock);
@@ -307,7 +449,7 @@ int drm_syncobj_export_sync_file(struct drm_file *file_private,
if (fd < 0)
return fd;
- ret = drm_syncobj_fence_get(file_private, handle, &fence);
+ ret = drm_syncobj_find_fence(file_private, handle, &fence);
if (ret)
goto err_put_fd;
@@ -330,7 +472,6 @@ err_put_fd:
}
/**
* drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
- * @dev: drm_device which is being opened by userspace
* @file_private: drm file-private structure to set up
*
* Called at device open time, sets up the structure for handling refcounting
@@ -354,7 +495,6 @@ drm_syncobj_release_handle(int id, void *ptr, void *data)
/**
* drm_syncobj_release - release file-private sync object resources
- * @dev: drm_device which is being closed by userspace
* @file_private: drm file-private structure to clean up
*
* Called at close time when the filp is going away.
@@ -379,11 +519,11 @@ drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
return -ENODEV;
/* no valid flags yet */
- if (args->flags)
+ if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
return -EINVAL;
return drm_syncobj_create(file_private,
- &args->handle);
+ &args->handle, args->flags);
}
int
@@ -449,3 +589,368 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
return drm_syncobj_fd_to_handle(file_private, args->fd,
&args->handle);
}
+
+struct syncobj_wait_entry {
+ struct task_struct *task;
+ struct dma_fence *fence;
+ struct dma_fence_cb fence_cb;
+ struct drm_syncobj_cb syncobj_cb;
+};
+
+static void syncobj_wait_fence_func(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
+{
+ struct syncobj_wait_entry *wait =
+ container_of(cb, struct syncobj_wait_entry, fence_cb);
+
+ wake_up_process(wait->task);
+}
+
+static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
+ struct drm_syncobj_cb *cb)
+{
+ struct syncobj_wait_entry *wait =
+ container_of(cb, struct syncobj_wait_entry, syncobj_cb);
+
+ /* This happens inside the syncobj lock */
+ wait->fence = dma_fence_get(syncobj->fence);
+ wake_up_process(wait->task);
+}
+
+static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ uint32_t count,
+ uint32_t flags,
+ signed long timeout,
+ uint32_t *idx)
+{
+ struct syncobj_wait_entry *entries;
+ struct dma_fence *fence;
+ signed long ret;
+ uint32_t signaled_count, i;
+
+ entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ /* Walk the list of sync objects and initialize entries. We do
+ * this up-front so that we can properly return -EINVAL if there is
+ * a syncobj with a missing fence and then never have the chance of
+ * returning -EINVAL again.
+ */
+ signaled_count = 0;
+ for (i = 0; i < count; ++i) {
+ entries[i].task = current;
+ entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
+ if (!entries[i].fence) {
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ continue;
+ } else {
+ ret = -EINVAL;
+ goto cleanup_entries;
+ }
+ }
+
+ if (dma_fence_is_signaled(entries[i].fence)) {
+ if (signaled_count == 0 && idx)
+ *idx = i;
+ signaled_count++;
+ }
+ }
+
+ /* Initialize ret to the max of timeout and 1. That way, the
+ * default return value indicates a successful wait and not a
+ * timeout.
+ */
+ ret = max_t(signed long, timeout, 1);
+
+ if (signaled_count == count ||
+ (signaled_count > 0 &&
+ !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
+ goto cleanup_entries;
+
+ /* There's a very annoying laxness in the dma_fence API here, in
+ * that backends are not required to automatically report when a
+ * fence is signaled prior to fence->ops->enable_signaling() being
+ * called. So here if we fail to match signaled_count, we need to
+ * fallthough and try a 0 timeout wait!
+ */
+
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ for (i = 0; i < count; ++i) {
+ drm_syncobj_fence_get_or_add_callback(syncobjs[i],
+ &entries[i].fence,
+ &entries[i].syncobj_cb,
+ syncobj_wait_syncobj_func);
+ }
+ }
+
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ signaled_count = 0;
+ for (i = 0; i < count; ++i) {
+ fence = entries[i].fence;
+ if (!fence)
+ continue;
+
+ if (dma_fence_is_signaled(fence) ||
+ (!entries[i].fence_cb.func &&
+ dma_fence_add_callback(fence,
+ &entries[i].fence_cb,
+ syncobj_wait_fence_func))) {
+ /* The fence has been signaled */
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
+ signaled_count++;
+ } else {
+ if (idx)
+ *idx = i;
+ goto done_waiting;
+ }
+ }
+ }
+
+ if (signaled_count == count)
+ goto done_waiting;
+
+ if (timeout == 0) {
+ /* If we are doing a 0 timeout wait and we got
+ * here, then we just timed out.
+ */
+ ret = 0;
+ goto done_waiting;
+ }
+
+ ret = schedule_timeout(ret);
+
+ if (ret > 0 && signal_pending(current))
+ ret = -ERESTARTSYS;
+ } while (ret > 0);
+
+done_waiting:
+ __set_current_state(TASK_RUNNING);
+
+cleanup_entries:
+ for (i = 0; i < count; ++i) {
+ if (entries[i].syncobj_cb.func)
+ drm_syncobj_remove_callback(syncobjs[i],
+ &entries[i].syncobj_cb);
+ if (entries[i].fence_cb.func)
+ dma_fence_remove_callback(entries[i].fence,
+ &entries[i].fence_cb);
+ dma_fence_put(entries[i].fence);
+ }
+ kfree(entries);
+
+ return ret;
+}
+
+/**
+ * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
+ *
+ * @timeout_nsec: timeout nsec component in ns, 0 for poll
+ *
+ * Calculate the timeout in jiffies from an absolute time in sec/nsec.
+ */
+static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
+{
+ ktime_t abs_timeout, now;
+ u64 timeout_ns, timeout_jiffies64;
+
+ /* make 0 timeout means poll - absolute 0 doesn't seem valid */
+ if (timeout_nsec == 0)
+ return 0;
+
+ abs_timeout = ns_to_ktime(timeout_nsec);
+ now = ktime_get();
+
+ if (!ktime_after(abs_timeout, now))
+ return 0;
+
+ timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
+
+ timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
+ /* clamp timeout to avoid infinite timeout */
+ if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
+ return MAX_SCHEDULE_TIMEOUT - 1;
+
+ return timeout_jiffies64 + 1;
+}
+
+static int drm_syncobj_array_wait(struct drm_device *dev,
+ struct drm_file *file_private,
+ struct drm_syncobj_wait *wait,
+ struct drm_syncobj **syncobjs)
+{
+ signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+ signed long ret = 0;
+ uint32_t first = ~0;
+
+ ret = drm_syncobj_array_wait_timeout(syncobjs,
+ wait->count_handles,
+ wait->flags,
+ timeout, &first);
+ if (ret < 0)
+ return ret;
+
+ wait->first_signaled = first;
+ if (ret == 0)
+ return -ETIME;
+ return 0;
+}
+
+static int drm_syncobj_array_find(struct drm_file *file_private,
+ void *user_handles, uint32_t count_handles,
+ struct drm_syncobj ***syncobjs_out)
+{
+ uint32_t i, *handles;
+ struct drm_syncobj **syncobjs;
+ int ret;
+
+ handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
+ if (handles == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(handles, user_handles,
+ sizeof(uint32_t) * count_handles)) {
+ ret = -EFAULT;
+ goto err_free_handles;
+ }
+
+ syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
+ if (syncobjs == NULL) {
+ ret = -ENOMEM;
+ goto err_free_handles;
+ }
+
+ for (i = 0; i < count_handles; i++) {
+ syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
+ if (!syncobjs[i]) {
+ ret = -ENOENT;
+ goto err_put_syncobjs;
+ }
+ }
+
+ kfree(handles);
+ *syncobjs_out = syncobjs;
+ return 0;
+
+err_put_syncobjs:
+ while (i-- > 0)
+ drm_syncobj_put(syncobjs[i]);
+ kfree(syncobjs);
+err_free_handles:
+ kfree(handles);
+
+ return ret;
+}
+
+static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
+ uint32_t count)
+{
+ uint32_t i;
+ for (i = 0; i < count; i++)
+ drm_syncobj_put(syncobjs[i]);
+ kfree(syncobjs);
+}
+
+int
+drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_wait *args = data;
+ struct drm_syncobj **syncobjs;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_syncobj_array_wait(dev, file_private,
+ args, syncobjs);
+
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
+
+int
+drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_array *args = data;
+ struct drm_syncobj **syncobjs;
+ uint32_t i;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < args->count_handles; i++)
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return 0;
+}
+
+int
+drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_array *args = data;
+ struct drm_syncobj **syncobjs;
+ uint32_t i;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+ return -ENODEV;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < args->count_handles; i++) {
+ ret = drm_syncobj_assign_null_handle(syncobjs[i]);
+ if (ret < 0)
+ break;
+ }
+
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index e9f33cd805dd..70f2b9593edc 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -31,6 +31,41 @@
#include "drm_trace.h"
#include "drm_internal.h"
+/**
+ * DOC: vblank handling
+ *
+ * Vertical blanking plays a major role in graphics rendering. To achieve
+ * tear-free display, users must synchronize page flips and/or rendering to
+ * vertical blanking. The DRM API offers ioctls to perform page flips
+ * synchronized to vertical blanking and wait for vertical blanking.
+ *
+ * The DRM core handles most of the vertical blanking management logic, which
+ * involves filtering out spurious interrupts, keeping race-free blanking
+ * counters, coping with counter wrap-around and resets and keeping use counts.
+ * It relies on the driver to generate vertical blanking interrupts and
+ * optionally provide a hardware vertical blanking counter.
+ *
+ * Drivers must initialize the vertical blanking handling core with a call to
+ * drm_vblank_init(). Minimally, a driver needs to implement
+ * &drm_crtc_funcs.enable_vblank and &drm_crtc_funcs.disable_vblank plus call
+ * drm_crtc_handle_vblank() in it's vblank interrupt handler for working vblank
+ * support.
+ *
+ * Vertical blanking interrupts can be enabled by the DRM core or by drivers
+ * themselves (for instance to handle page flipping operations). The DRM core
+ * maintains a vertical blanking use count to ensure that the interrupts are not
+ * disabled while a user still needs them. To increment the use count, drivers
+ * call drm_crtc_vblank_get() and release the vblank reference again with
+ * drm_crtc_vblank_put(). In between these two calls vblank interrupts are
+ * guaranteed to be enabled.
+ *
+ * On many hardware disabling the vblank interrupt cannot be done in a race-free
+ * manner, see &drm_driver.vblank_disable_immediate and
+ * &drm_driver.max_vblank_count. In that case the vblank core only disables the
+ * vblanks after a timer has expired, which can be configured through the
+ * ``vblankoffdelay`` module parameter.
+ */
+
/* Retry timestamp calculation up to 3 times to satisfy
* drm_timestamp_precision before giving up.
*/
@@ -259,16 +294,17 @@ static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
}
/**
- * drm_accurate_vblank_count - retrieve the master vblank counter
+ * drm_crtc_accurate_vblank_count - retrieve the master vblank counter
* @crtc: which counter to retrieve
*
- * This function is similar to @drm_crtc_vblank_count but this
- * function interpolates to handle a race with vblank irq's.
+ * This function is similar to drm_crtc_vblank_count() but this function
+ * interpolates to handle a race with vblank interrupts using the high precision
+ * timestamping support.
*
- * This is mostly useful for hardware that can obtain the scanout
- * position, but doesn't have a frame counter.
+ * This is mostly useful for hardware that can obtain the scanout position, but
+ * doesn't have a hardware frame counter.
*/
-u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
+u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = drm_crtc_index(crtc);
@@ -287,7 +323,7 @@ u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
return vblank;
}
-EXPORT_SYMBOL(drm_accurate_vblank_count);
+EXPORT_SYMBOL(drm_crtc_accurate_vblank_count);
static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
{
@@ -358,15 +394,6 @@ static void vblank_disable_fn(unsigned long arg)
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
-/**
- * drm_vblank_cleanup - cleanup vblank support
- * @dev: DRM device
- *
- * This function cleans up any resources allocated in drm_vblank_init.
- *
- * Drivers which don't use drm_irq_install() need to set &drm_device.irq_enabled
- * themselves, to signal to the DRM core that vblank interrupts are enabled.
- */
void drm_vblank_cleanup(struct drm_device *dev)
{
unsigned int pipe;
@@ -388,7 +415,6 @@ void drm_vblank_cleanup(struct drm_device *dev)
dev->num_crtcs = 0;
}
-EXPORT_SYMBOL(drm_vblank_cleanup);
/**
* drm_vblank_init - initialize vblank support
@@ -396,6 +422,8 @@ EXPORT_SYMBOL(drm_vblank_cleanup);
* @num_crtcs: number of CRTCs supported by @dev
*
* This function initializes vblank support for @num_crtcs display pipelines.
+ * Cleanup is handled by the DRM core, or through calling drm_dev_fini() for
+ * drivers with a &drm_driver.release callback.
*
* Returns:
* Zero on success or a negative error code on failure.
@@ -468,11 +496,11 @@ EXPORT_SYMBOL(drm_crtc_vblank_waitqueue);
* @crtc: drm_crtc whose timestamp constants should be updated.
* @mode: display mode containing the scanout timings
*
- * Calculate and store various constants which are later
- * needed by vblank and swap-completion timestamping, e.g,
- * by drm_calc_vbltimestamp_from_scanoutpos(). They are
- * derived from CRTC's true scanout timing, so they take
- * things like panel scaling or other adjustments into account.
+ * Calculate and store various constants which are later needed by vblank and
+ * swap-completion timestamping, e.g, by
+ * drm_calc_vbltimestamp_from_scanoutpos(). They are derived from CRTC's true
+ * scanout timing, so they take things like panel scaling or other adjustments
+ * into account.
*/
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
@@ -535,25 +563,14 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* if flag is set.
*
* Implements calculation of exact vblank timestamps from given drm_display_mode
- * timings and current video scanout position of a CRTC. This can be called from
- * within get_vblank_timestamp() implementation of a kms driver to implement the
- * actual timestamping.
- *
- * Should return timestamps conforming to the OML_sync_control OpenML
- * extension specification. The timestamp corresponds to the end of
- * the vblank interval, aka start of scanout of topmost-leftmost display
- * pixel in the following video frame.
+ * timings and current video scanout position of a CRTC. This can be directly
+ * used as the &drm_driver.get_vblank_timestamp implementation of a kms driver
+ * if &drm_driver.get_scanout_position is implemented.
*
- * Requires support for optional dev->driver->get_scanout_position()
- * in kms driver, plus a bit of setup code to provide a drm_display_mode
- * that corresponds to the true scanout timing.
- *
- * The current implementation only handles standard video modes. It
- * returns as no operation if a doublescan or interlaced video mode is
- * active. Higher level code is expected to handle this.
- *
- * This function can be used to implement the &drm_driver.get_vblank_timestamp
- * directly, if the driver implements the &drm_driver.get_scanout_position hook.
+ * The current implementation only handles standard video modes. For double scan
+ * and interlaced modes the driver is supposed to adjust the hardware mode
+ * (taken from &drm_crtc_state.adjusted mode for atomic modeset drivers) to
+ * match the scanout position reported.
*
* Note that atomic drivers must call drm_calc_timestamping_constants() before
* enabling a CRTC. The atomic helpers already take care of that in
@@ -738,7 +755,9 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
- * modesetting activity.
+ * modesetting activity. Note that this timer isn't correct against a racing
+ * vblank interrupt (since it only reports the software vblank counter), see
+ * drm_crtc_accurate_vblank_count() for such use-cases.
*
* Returns:
* The software vblank counter.
@@ -749,20 +768,6 @@ u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_vblank_count);
-/**
- * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the
- * system timestamp corresponding to that vblank counter value.
- * @dev: DRM device
- * @pipe: index of CRTC whose counter to retrieve
- * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
- *
- * Fetches the "cooked" vblank count value that represents the number of
- * vblank events since the system was booted, including lost events due to
- * modesetting activity. Returns corresponding system timestamp of the time
- * of the vblank interval that corresponds to the current vblank counter value.
- *
- * This is the legacy version of drm_crtc_vblank_count_and_time().
- */
static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime)
{
@@ -831,7 +836,7 @@ static void send_vblank_event(struct drm_device *dev,
* NOTE: Drivers using this to send out the &drm_crtc_state.event as part of an
* atomic commit must ensure that the next vblank happens at exactly the same
* time as the atomic commit is committed to the hardware. This function itself
- * does **not** protect again the next vblank interrupt racing with either this
+ * does **not** protect against the next vblank interrupt racing with either this
* function call or the atomic commit operation. A possible sequence could be:
*
* 1. Driver commits new hardware state into vblank-synchronized registers.
@@ -852,8 +857,8 @@ static void send_vblank_event(struct drm_device *dev,
* handler by calling drm_crtc_send_vblank_event() and make sure that there's no
* possible race with the hardware committing the atomic update.
*
- * Caller must hold event lock. Caller must also hold a vblank reference for
- * the event @e, which will be dropped when the next vblank arrives.
+ * Caller must hold a vblank reference for the event @e, which will be dropped
+ * when the next vblank arrives.
*/
void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e)
@@ -913,14 +918,6 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
return dev->driver->enable_vblank(dev, pipe);
}
-/**
- * drm_vblank_enable - enable the vblank interrupt on a CRTC
- * @dev: DRM device
- * @pipe: CRTC index
- *
- * Returns:
- * Zero on success or a negative error code on failure.
- */
static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -958,19 +955,6 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
return ret;
}
-/**
- * drm_vblank_get - get a reference count on vblank events
- * @dev: DRM device
- * @pipe: index of CRTC to own
- *
- * Acquire a reference count on vblank events to avoid having them disabled
- * while in use.
- *
- * This is the legacy version of drm_crtc_vblank_get().
- *
- * Returns:
- * Zero on success or a negative error code on failure.
- */
static int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -1014,16 +998,6 @@ int drm_crtc_vblank_get(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_vblank_get);
-/**
- * drm_vblank_put - release ownership of vblank events
- * @dev: DRM device
- * @pipe: index of CRTC to release
- *
- * Release ownership of a given vblank counter, turning off interrupts
- * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
- *
- * This is the legacy version of drm_crtc_vblank_put().
- */
static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -1067,6 +1041,8 @@ EXPORT_SYMBOL(drm_crtc_vblank_put);
* This waits for one vblank to pass on @pipe, using the irq driver interfaces.
* It is a failure to call this when the vblank irq for @pipe is disabled, e.g.
* due to lack of driver support or because the crtc is off.
+ *
+ * This is the legacy version of drm_crtc_wait_one_vblank().
*/
void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
{
@@ -1116,7 +1092,7 @@ EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
* stored so that drm_vblank_on can restore it again.
*
* Drivers must use this function when the hardware vblank counter can get
- * reset, e.g. when suspending.
+ * reset, e.g. when suspending or disabling the @crtc in general.
*/
void drm_crtc_vblank_off(struct drm_crtc *crtc)
{
@@ -1184,6 +1160,8 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
* drm_crtc_vblank_on() functions. The difference compared to
* drm_crtc_vblank_off() is that this function doesn't save the vblank counter
* and hence doesn't need to call any driver hooks.
+ *
+ * This is useful for recovering driver state e.g. on driver load, or on resume.
*/
void drm_crtc_vblank_reset(struct drm_crtc *crtc)
{
@@ -1212,9 +1190,10 @@ EXPORT_SYMBOL(drm_crtc_vblank_reset);
* @crtc: CRTC in question
*
* This functions restores the vblank interrupt state captured with
- * drm_crtc_vblank_off() again. Note that calls to drm_crtc_vblank_on() and
- * drm_crtc_vblank_off() can be unbalanced and so can also be unconditionally called
- * in driver load code to reflect the current hardware state of the crtc.
+ * drm_crtc_vblank_off() again and is generally called when enabling @crtc. Note
+ * that calls to drm_crtc_vblank_on() and drm_crtc_vblank_off() can be
+ * unbalanced and so can also be unconditionally called in driver load code to
+ * reflect the current hardware state of the crtc.
*/
void drm_crtc_vblank_on(struct drm_crtc *crtc)
{
@@ -1299,8 +1278,8 @@ static void drm_legacy_vblank_post_modeset(struct drm_device *dev,
}
}
-int drm_legacy_modeset_ctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
unsigned int pipe;
@@ -1419,22 +1398,8 @@ static bool drm_wait_vblank_is_query(union drm_wait_vblank *vblwait)
_DRM_VBLANK_NEXTONMISS));
}
-/*
- * Wait for VBLANK.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param data user argument, pointing to a drm_wait_vblank structure.
- * \return zero on success or a negative number on failure.
- *
- * This function enables the vblank interrupt on the pipe requested, then
- * sleeps waiting for the requested sequence number to occur, and drops
- * the vblank interrupt refcount afterwards. (vblank IRQ disable follows that
- * after a timeout with no further vblank waits scheduled).
- */
-int drm_wait_vblank(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_vblank_crtc *vblank;
union drm_wait_vblank *vblwait = data;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 1170b3209a12..2660543ad86a 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -40,6 +40,7 @@
#include <linux/efi.h>
#include <linux/slab.h>
#endif
+#include <linux/mem_encrypt.h>
#include <asm/pgtable.h>
#include "drm_internal.h"
#include "drm_legacy.h"
@@ -58,6 +59,9 @@ static pgprot_t drm_io_prot(struct drm_local_map *map,
{
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+ /* We don't want graphics memory to be mapped encrypted */
+ tmp = pgprot_decrypted(tmp);
+
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
tmp = pgprot_noncached(tmp);
@@ -631,7 +635,7 @@ int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev;
int ret;
- if (drm_device_is_unplugged(dev))
+ if (drm_dev_is_unplugged(dev))
return -ENODEV;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index 71cee4e9fefb..38b477b5fbf9 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -10,6 +10,8 @@ config DRM_ETNAVIV
select IOMMU_API
select IOMMU_SUPPORT
select WANT_DEV_COREDUMP
+ select CMA if HAVE_DMA_CONTIGUOUS
+ select DMA_CMA if HAVE_DMA_CONTIGUOUS
help
DRM driver for Vivante GPUs.
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 91e17aeee1da..2cb4773823c2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -316,7 +316,7 @@ static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -337,7 +337,7 @@ static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
ret = etnaviv_gem_cpu_fini(obj);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -357,7 +357,7 @@ static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
return -ENOENT;
ret = etnaviv_gem_mmap_offset(obj, &args->offset);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -446,7 +446,7 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 9a3bea738330..5a634594a6ce 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -68,7 +68,7 @@ static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
if (IS_ERR(p)) {
- dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
+ dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
return PTR_ERR(p);
}
@@ -265,7 +265,7 @@ void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
{
struct etnaviv_gem_object *etnaviv_obj = mapping->object;
- drm_gem_object_reference(&etnaviv_obj->base);
+ drm_gem_object_get(&etnaviv_obj->base);
mutex_lock(&etnaviv_obj->lock);
WARN_ON(mapping->use == 0);
@@ -282,7 +282,7 @@ void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
mapping->use -= 1;
mutex_unlock(&etnaviv_obj->lock);
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
}
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
@@ -358,7 +358,7 @@ out:
return ERR_PTR(ret);
/* Take a reference on the object */
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
return mapping;
}
@@ -413,6 +413,16 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
bool write = !!(op & ETNA_PREP_WRITE);
int ret;
+ if (!etnaviv_obj->sgt) {
+ void *ret;
+
+ mutex_lock(&etnaviv_obj->lock);
+ ret = etnaviv_gem_get_pages(etnaviv_obj);
+ mutex_unlock(&etnaviv_obj->lock);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+ }
+
if (op & ETNA_PREP_NOSYNC) {
if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
write))
@@ -427,16 +437,6 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
- if (!etnaviv_obj->sgt) {
- void *ret;
-
- mutex_lock(&etnaviv_obj->lock);
- ret = etnaviv_gem_get_pages(etnaviv_obj);
- mutex_unlock(&etnaviv_obj->lock);
- if (IS_ERR(ret))
- return PTR_ERR(ret);
- }
-
dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
etnaviv_obj->sgt->nents,
etnaviv_op_to_dma_dir(op));
@@ -662,7 +662,8 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
* going to pin these pages.
*/
mapping = obj->filp->f_mapping;
- mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
}
if (ret)
@@ -671,7 +672,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
return obj;
fail:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
@@ -688,14 +689,14 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
ret = etnaviv_gem_obj_add(dev, obj);
if (ret < 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -712,7 +713,7 @@ struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
ret = etnaviv_gem_obj_add(dev, obj);
if (ret < 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
@@ -800,7 +801,7 @@ static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
}
mutex_unlock(&etnaviv_obj->lock);
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
mmput(work->mm);
put_task_struct(work->task);
@@ -858,7 +859,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
}
get_task_struct(current);
- drm_gem_object_reference(&etnaviv_obj->base);
+ drm_gem_object_get(&etnaviv_obj->base);
work->mm = mm;
work->task = current;
@@ -924,6 +925,6 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
unreference:
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index e5da4f2300ba..ae884723e9b1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -146,7 +146,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
return &etnaviv_obj->base;
fail:
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 5bd93169dac2..a7ff2e4c00d2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -88,7 +88,7 @@ static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
* Take a refcount on the object. The file table lock
* prevents the object_idr's refcount on this being dropped.
*/
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
submit->bos[i].obj = to_etnaviv_bo(obj);
}
@@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
if (ret)
return ret;
- if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
- DRM_ERROR("relocation %u outside object", i);
+ if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
+ DRM_ERROR("relocation %u outside object\n", i);
return -EINVAL;
}
@@ -291,7 +291,7 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit)
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
submit_unlock_object(submit, i);
- drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put_unlocked(&etnaviv_obj->base);
}
ww_acquire_fini(&submit->ticket);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index ada45fdd0eae..fc9a6a83dfc7 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1622,10 +1622,12 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
int ret;
- gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
+ if (IS_ENABLED(CONFIG_THERMAL)) {
+ gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
(char *)dev_name(dev), gpu, &cooling_ops);
- if (IS_ERR(gpu->cooling))
- return PTR_ERR(gpu->cooling);
+ if (IS_ERR(gpu->cooling))
+ return PTR_ERR(gpu->cooling);
+ }
#ifdef CONFIG_PM
ret = pm_runtime_get_sync(gpu->dev);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 1d185347c64c..305dc3d4ff77 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -75,6 +75,7 @@ config DRM_EXYNOS_DP
config DRM_EXYNOS_HDMI
bool "HDMI"
depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON
+ select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you want to use Exynos HDMI for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 5792ca88ab7a..730b8d9db187 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -33,9 +34,8 @@
#define WINDOWS_NR 3
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
-#define IFTYPE_I80 (1 << 0)
-#define I80_HW_TRG (1 << 1)
-#define IFTYPE_HDMI (1 << 2)
+#define I80_HW_TRG (1 << 0)
+#define IFTYPE_HDMI (1 << 1)
static const char * const decon_clks_name[] = {
"pclk",
@@ -57,6 +57,8 @@ struct decon_context {
struct regmap *sysreg;
struct clk *clks[ARRAY_SIZE(decon_clks_name)];
unsigned int irq;
+ unsigned int irq_vsync;
+ unsigned int irq_lcd_sys;
unsigned int te_irq;
unsigned long out_type;
int first_win;
@@ -90,7 +92,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
u32 val;
val = VIDINTCON0_INTEN;
- if (ctx->out_type & IFTYPE_I80)
+ if (crtc->i80_mode)
val |= VIDINTCON0_FRAMEDONE;
else
val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP;
@@ -139,7 +141,7 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
switch (status & (VIDCON1_VSTATUS_MASK | VIDCON1_I80_ACTIVE)) {
case VIDCON1_VSTATUS_VS:
- if (!(ctx->out_type & IFTYPE_I80))
+ if (!(ctx->crtc->i80_mode))
--frm;
break;
case VIDCON1_VSTATUS_BP:
@@ -166,7 +168,7 @@ static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc)
static void decon_setup_trigger(struct decon_context *ctx)
{
- if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)))
+ if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG))
return;
if (!(ctx->out_type & I80_HW_TRG)) {
@@ -206,7 +208,7 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
val = VIDOUT_LCD_ON;
if (interlaced)
val |= VIDOUT_INTERLACE_EN_F;
- if (ctx->out_type & IFTYPE_I80) {
+ if (crtc->i80_mode) {
val |= VIDOUT_COMMAND_IF;
} else {
val |= VIDOUT_RGB_IF;
@@ -222,7 +224,7 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
VIDTCON2_HOZVAL(m->hdisplay - 1);
writel(val, ctx->addr + DECON_VIDTCON2);
- if (!(ctx->out_type & IFTYPE_I80)) {
+ if (!crtc->i80_mode) {
int vbp = m->crtc_vtotal - m->crtc_vsync_end;
int vfp = m->crtc_vsync_start - m->crtc_vdisplay;
@@ -277,16 +279,14 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_ARGB8888:
+ default:
val |= WINCONx_BPPMODE_32BPP_A8888;
val |= WINCONx_WSWP_F | WINCONx_BLD_PIX_F | WINCONx_ALPHA_SEL_F;
val |= WINCONx_BURSTLEN_16WORD;
break;
- default:
- DRM_ERROR("Proper pixel format is not set\n");
- return;
}
- DRM_DEBUG_KMS("bpp = %u\n", fb->format->cpp[0] * 8);
+ DRM_DEBUG_KMS("cpp = %u\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -329,7 +329,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
struct decon_context *ctx = crtc->ctx;
struct drm_framebuffer *fb = state->base.fb;
unsigned int win = plane->index;
- unsigned int bpp = fb->format->cpp[0];
+ unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
dma_addr_t dma_addr = exynos_drm_fb_dma_addr(fb, 0);
u32 val;
@@ -365,11 +365,11 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
if (!(ctx->out_type & IFTYPE_HDMI))
- val = BIT_VAL(pitch - state->crtc.w * bpp, 27, 14)
- | BIT_VAL(state->crtc.w * bpp, 13, 0);
+ val = BIT_VAL(pitch - state->crtc.w * cpp, 27, 14)
+ | BIT_VAL(state->crtc.w * cpp, 13, 0);
else
- val = BIT_VAL(pitch - state->crtc.w * bpp, 29, 15)
- | BIT_VAL(state->crtc.w * bpp, 14, 0);
+ val = BIT_VAL(pitch - state->crtc.w * cpp, 29, 15)
+ | BIT_VAL(state->crtc.w * cpp, 14, 0);
writel(val, ctx->addr + DECON_VIDW0xADD2(win));
decon_win_set_pixfmt(ctx, win, fb);
@@ -407,24 +407,19 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
static void decon_swreset(struct decon_context *ctx)
{
- unsigned int tries;
unsigned long flags;
+ u32 val;
+ int ret;
writel(0, ctx->addr + DECON_VIDCON0);
- for (tries = 2000; tries; --tries) {
- if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_STOP_STATUS)
- break;
- udelay(10);
- }
+ readl_poll_timeout(ctx->addr + DECON_VIDCON0, val,
+ ~val & VIDCON0_STOP_STATUS, 12, 20000);
writel(VIDCON0_SWRESET, ctx->addr + DECON_VIDCON0);
- for (tries = 2000; tries; --tries) {
- if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_SWRESET)
- break;
- udelay(10);
- }
+ ret = readl_poll_timeout(ctx->addr + DECON_VIDCON0, val,
+ ~val & VIDCON0_SWRESET, 12, 20000);
- WARN(tries == 0, "failed to software reset DECON\n");
+ WARN(ret < 0, "failed to software reset DECON\n");
spin_lock_irqsave(&ctx->vblank_lock, flags);
ctx->frame_id = 0;
@@ -515,6 +510,22 @@ err:
clk_disable_unprepare(ctx->clks[i]);
}
+static enum drm_mode_status decon_mode_valid(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct decon_context *ctx = crtc->ctx;
+
+ ctx->irq = crtc->i80_mode ? ctx->irq_lcd_sys : ctx->irq_vsync;
+
+ if (ctx->irq)
+ return MODE_OK;
+
+ dev_info(ctx->dev, "Sink requires %s mode, but appropriate interrupt is not provided.\n",
+ crtc->i80_mode ? "command" : "video");
+
+ return MODE_BAD;
+}
+
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable = decon_enable,
.disable = decon_disable,
@@ -524,6 +535,7 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.atomic_begin = decon_atomic_begin,
.update_plane = decon_update_plane,
.disable_plane = decon_disable_plane,
+ .mode_valid = decon_mode_valid,
.atomic_flush = decon_atomic_flush,
};
@@ -674,19 +686,22 @@ static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
MODULE_DEVICE_TABLE(of, exynos5433_decon_driver_dt_match);
static int decon_conf_irq(struct decon_context *ctx, const char *name,
- irq_handler_t handler, unsigned long int flags, bool required)
+ irq_handler_t handler, unsigned long int flags)
{
struct platform_device *pdev = to_platform_device(ctx->dev);
int ret, irq = platform_get_irq_byname(pdev, name);
if (irq < 0) {
- if (irq == -EPROBE_DEFER)
+ switch (irq) {
+ case -EPROBE_DEFER:
return irq;
- if (required)
- dev_err(ctx->dev, "cannot get %s IRQ\n", name);
- else
- irq = 0;
- return irq;
+ case -ENODATA:
+ case -ENXIO:
+ return 0;
+ default:
+ dev_err(ctx->dev, "IRQ %s get failed, %d\n", name, irq);
+ return irq;
+ }
}
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_irq(ctx->dev, irq, handler, flags, "drm_decon", ctx);
@@ -714,11 +729,8 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
ctx->out_type = (unsigned long)of_device_get_match_data(dev);
spin_lock_init(&ctx->vblank_lock);
- if (ctx->out_type & IFTYPE_HDMI) {
+ if (ctx->out_type & IFTYPE_HDMI)
ctx->first_win = 1;
- } else if (of_get_child_by_name(dev->of_node, "i80-if-timings")) {
- ctx->out_type |= IFTYPE_I80;
- }
for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
struct clk *clk;
@@ -742,25 +754,23 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
return PTR_ERR(ctx->addr);
}
- if (ctx->out_type & IFTYPE_I80) {
- ret = decon_conf_irq(ctx, "lcd_sys", decon_irq_handler, 0, true);
- if (ret < 0)
- return ret;
- ctx->irq = ret;
+ ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0);
+ if (ret < 0)
+ return ret;
+ ctx->irq_vsync = ret;
- ret = decon_conf_irq(ctx, "te", decon_te_irq_handler,
- IRQF_TRIGGER_RISING, false);
- if (ret < 0)
- return ret;
- if (ret) {
- ctx->te_irq = ret;
- ctx->out_type &= ~I80_HW_TRG;
- }
- } else {
- ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0, true);
- if (ret < 0)
+ ret = decon_conf_irq(ctx, "lcd_sys", decon_irq_handler, 0);
+ if (ret < 0)
+ return ret;
+ ctx->irq_lcd_sys = ret;
+
+ ret = decon_conf_irq(ctx, "te", decon_te_irq_handler,
+ IRQF_TRIGGER_RISING);
+ if (ret < 0)
return ret;
- ctx->irq = ret;
+ if (ret) {
+ ctx->te_irq = ret;
+ ctx->out_type &= ~I80_HW_TRG;
}
if (ctx->out_type & I80_HW_TRG) {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 3e88269fdc2e..615efcf7782a 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -309,19 +309,14 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_BGRA8888:
+ default:
val |= WINCONx_BPPMODE_32BPP_BGRA | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
val |= WINCONx_BURSTLEN_16WORD;
break;
- default:
- DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n");
-
- val |= WINCONx_BPPMODE_24BPP_xRGB;
- val |= WINCONx_BURSTLEN_16WORD;
- break;
}
- DRM_DEBUG_KMS("bpp = %d\n", fb->format->cpp[0] * 8);
+ DRM_DEBUG_KMS("cpp = %d\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -398,7 +393,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
unsigned int last_x;
unsigned int last_y;
unsigned int win = plane->index;
- unsigned int bpp = fb->format->cpp[0];
+ unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
if (ctx->suspended)
@@ -418,7 +413,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
val = (unsigned long)exynos_drm_fb_dma_addr(fb, 0);
writel(val, ctx->regs + VIDW_BUF_START(win));
- padding = (pitch / bpp) - fb->width;
+ padding = (pitch / cpp) - fb->width;
/* buffer size */
writel(fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 385537b726a6..39629e7a80b9 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -155,7 +155,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
struct exynos_dp_device *dp = dev_get_drvdata(dev);
struct drm_encoder *encoder = &dp->encoder;
struct drm_device *drm_dev = data;
- int pipe, ret;
+ int ret;
/*
* Just like the probe function said, we don't need the
@@ -179,20 +179,15 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_LCD);
- if (pipe < 0)
- return pipe;
-
- encoder->possible_crtcs = 1 << pipe;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
dp->plat_data.encoder = encoder;
return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index edbd98ff293e..b0c0621fcdf7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -13,6 +13,7 @@
*/
#include <drm/drmP.h>
+
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index d72777f6411a..6ce0821590df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -16,12 +16,14 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_encoder.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_plane.h"
-static void exynos_drm_crtc_enable(struct drm_crtc *crtc)
+static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
@@ -31,7 +33,8 @@ static void exynos_drm_crtc_enable(struct drm_crtc *crtc)
drm_crtc_vblank_on(crtc);
}
-static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
+static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
@@ -81,12 +84,24 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
exynos_crtc->ops->atomic_flush(exynos_crtc);
}
+static enum drm_mode_status exynos_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ if (exynos_crtc->ops->mode_valid)
+ return exynos_crtc->ops->mode_valid(exynos_crtc, mode);
+
+ return MODE_OK;
+}
+
static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
- .enable = exynos_drm_crtc_enable,
- .disable = exynos_drm_crtc_disable,
+ .mode_valid = exynos_crtc_mode_valid,
.atomic_check = exynos_crtc_atomic_check,
.atomic_begin = exynos_crtc_atomic_begin,
.atomic_flush = exynos_crtc_atomic_flush,
+ .atomic_enable = exynos_drm_crtc_atomic_enable,
+ .atomic_disable = exynos_drm_crtc_atomic_disable,
};
void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc)
@@ -189,16 +204,30 @@ err_crtc:
return ERR_PTR(ret);
}
-int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
+struct exynos_drm_crtc *exynos_drm_crtc_get_by_type(struct drm_device *drm_dev,
enum exynos_drm_output_type out_type)
{
struct drm_crtc *crtc;
drm_for_each_crtc(crtc, drm_dev)
if (to_exynos_crtc(crtc)->type == out_type)
- return drm_crtc_index(crtc);
+ return to_exynos_crtc(crtc);
- return -EPERM;
+ return ERR_PTR(-EPERM);
+}
+
+int exynos_drm_set_possible_crtcs(struct drm_encoder *encoder,
+ enum exynos_drm_output_type out_type)
+{
+ struct exynos_drm_crtc *crtc = exynos_drm_crtc_get_by_type(encoder->dev,
+ out_type);
+
+ if (IS_ERR(crtc))
+ return PTR_ERR(crtc);
+
+ encoder->possible_crtcs = drm_crtc_mask(&crtc->base);
+
+ return 0;
}
void exynos_drm_crtc_te_handler(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index ef58b64e3d2d..dec446109e6c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -15,21 +15,25 @@
#ifndef _EXYNOS_DRM_CRTC_H_
#define _EXYNOS_DRM_CRTC_H_
+
#include "exynos_drm_drv.h"
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
struct drm_plane *plane,
- enum exynos_drm_output_type type,
+ enum exynos_drm_output_type out_type,
const struct exynos_drm_crtc_ops *ops,
void *context);
void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc);
void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc,
struct exynos_drm_plane *exynos_plane);
-/* This function gets pipe value to crtc device matched with out_type. */
-int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
+/* This function gets crtc device matched with out_type. */
+struct exynos_drm_crtc *exynos_drm_crtc_get_by_type(struct drm_device *drm_dev,
enum exynos_drm_output_type out_type);
+int exynos_drm_set_possible_crtcs(struct drm_encoder *encoder,
+ enum exynos_drm_output_type out_type);
+
/*
* This function calls the crtc device(manager)'s te_handler() callback
* to trigger to transfer video image at the tearing effect synchronization
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 63abcd280fa0..66945e0dc57f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -59,7 +59,6 @@ static void exynos_dpi_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs exynos_dpi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = exynos_dpi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = exynos_dpi_connector_destroy,
@@ -203,19 +202,15 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
{
int ret;
- ret = exynos_drm_crtc_get_pipe_from_type(dev, EXYNOS_DISPLAY_TYPE_LCD);
- if (ret < 0)
- return ret;
-
- encoder->possible_crtcs = 1 << ret;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
ret = exynos_dpi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 35a8dfc93836..b1f7299600f0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -145,8 +145,6 @@ static struct drm_driver exynos_drm_driver = {
.gem_free_object_unlocked = exynos_drm_gem_free_object,
.gem_vm_ops = &exynos_drm_gem_vm_ops,
.dumb_create = exynos_drm_gem_dumb_create,
- .dumb_map_offset = exynos_drm_gem_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
@@ -395,8 +393,9 @@ static int exynos_drm_bind(struct device *dev)
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(drm);
- /* force connectors detection */
- drm_helper_hpd_irq_event(drm);
+ ret = exynos_drm_fbdev_init(drm);
+ if (ret)
+ goto err_cleanup_poll;
/* register the DRM device */
ret = drm_dev_register(drm, 0);
@@ -407,6 +406,7 @@ static int exynos_drm_bind(struct device *dev)
err_cleanup_fbdev:
exynos_drm_fbdev_fini(drm);
+err_cleanup_poll:
drm_kms_helper_poll_fini(drm);
exynos_drm_device_subdrv_remove(drm);
err_unbind_all:
@@ -453,7 +453,6 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
struct component_match *match;
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
match = exynos_drm_match_add(&pdev->dev);
if (IS_ERR(match))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a93de321706b..cf131c2aa23e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -91,6 +91,7 @@ struct exynos_drm_plane {
#define EXYNOS_DRM_PLANE_CAP_DOUBLE (1 << 0)
#define EXYNOS_DRM_PLANE_CAP_SCALE (1 << 1)
#define EXYNOS_DRM_PLANE_CAP_ZPOS (1 << 2)
+#define EXYNOS_DRM_PLANE_CAP_TILE (1 << 3)
/*
* Exynos DRM plane configuration structure.
@@ -117,6 +118,7 @@ struct exynos_drm_plane_config {
* @disable: disable the device
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @mode_valid: specific driver callback for mode validation
* @atomic_check: validate state
* @atomic_begin: prepare device to receive an update
* @atomic_flush: mark the end of device update
@@ -132,6 +134,8 @@ struct exynos_drm_crtc_ops {
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
+ enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
+ const struct drm_display_mode *mode);
int (*atomic_check)(struct exynos_drm_crtc *crtc,
struct drm_crtc_state *state);
void (*atomic_begin)(struct exynos_drm_crtc *crtc);
@@ -162,6 +166,7 @@ struct exynos_drm_crtc {
const struct exynos_drm_crtc_ops *ops;
void *ctx;
struct exynos_drm_clk *pipe_clk;
+ bool i80_mode : 1;
};
static inline void exynos_drm_pipe_clk_enable(struct exynos_drm_crtc *crtc,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index a11b79596e2f..7904ffa9abfb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -254,7 +254,6 @@ struct exynos_dsi {
struct drm_encoder encoder;
struct mipi_dsi_host dsi_host;
struct drm_connector connector;
- struct device_node *panel_node;
struct drm_panel *panel;
struct device *dev;
@@ -1329,12 +1328,13 @@ static int exynos_dsi_init(struct exynos_dsi *dsi)
return 0;
}
-static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
+static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi,
+ struct device *panel)
{
int ret;
int te_gpio_irq;
- dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
+ dsi->te_gpio = of_get_named_gpio(panel->of_node, "te-gpios", 0);
if (dsi->te_gpio == -ENOENT)
return 0;
@@ -1374,85 +1374,6 @@ static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
}
}
-static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
-{
- struct exynos_dsi *dsi = host_to_dsi(host);
-
- dsi->lanes = device->lanes;
- dsi->format = device->format;
- dsi->mode_flags = device->mode_flags;
- dsi->panel_node = device->dev.of_node;
-
- /*
- * This is a temporary solution and should be made by more generic way.
- *
- * If attached panel device is for command mode one, dsi should register
- * TE interrupt handler.
- */
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) {
- int ret = exynos_dsi_register_te_irq(dsi);
-
- if (ret)
- return ret;
- }
-
- if (dsi->connector.dev)
- drm_helper_hpd_irq_event(dsi->connector.dev);
-
- return 0;
-}
-
-static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
-{
- struct exynos_dsi *dsi = host_to_dsi(host);
-
- exynos_dsi_unregister_te_irq(dsi);
-
- dsi->panel_node = NULL;
-
- if (dsi->connector.dev)
- drm_helper_hpd_irq_event(dsi->connector.dev);
-
- return 0;
-}
-
-static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
- const struct mipi_dsi_msg *msg)
-{
- struct exynos_dsi *dsi = host_to_dsi(host);
- struct exynos_dsi_transfer xfer;
- int ret;
-
- if (!(dsi->state & DSIM_STATE_ENABLED))
- return -EINVAL;
-
- if (!(dsi->state & DSIM_STATE_INITIALIZED)) {
- ret = exynos_dsi_init(dsi);
- if (ret)
- return ret;
- dsi->state |= DSIM_STATE_INITIALIZED;
- }
-
- ret = mipi_dsi_create_packet(&xfer.packet, msg);
- if (ret < 0)
- return ret;
-
- xfer.rx_len = msg->rx_len;
- xfer.rx_payload = msg->rx_buf;
- xfer.flags = msg->flags;
-
- ret = exynos_dsi_transfer(dsi, &xfer);
- return (ret < 0) ? ret : xfer.rx_done;
-}
-
-static const struct mipi_dsi_host_ops exynos_dsi_ops = {
- .attach = exynos_dsi_host_attach,
- .detach = exynos_dsi_host_detach,
- .transfer = exynos_dsi_host_transfer,
-};
-
static void exynos_dsi_enable(struct drm_encoder *encoder)
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1508,25 +1429,7 @@ static void exynos_dsi_disable(struct drm_encoder *encoder)
static enum drm_connector_status
exynos_dsi_detect(struct drm_connector *connector, bool force)
{
- struct exynos_dsi *dsi = connector_to_dsi(connector);
-
- if (!dsi->panel) {
- dsi->panel = of_drm_find_panel(dsi->panel_node);
- if (dsi->panel)
- drm_panel_attach(dsi->panel, &dsi->connector);
- } else if (!dsi->panel_node) {
- struct drm_encoder *encoder;
-
- encoder = platform_get_drvdata(to_platform_device(dsi->dev));
- exynos_dsi_disable(encoder);
- drm_panel_detach(dsi->panel);
- dsi->panel = NULL;
- }
-
- if (dsi->panel)
- return connector_status_connected;
-
- return connector_status_disconnected;
+ return connector->status;
}
static void exynos_dsi_connector_destroy(struct drm_connector *connector)
@@ -1537,7 +1440,6 @@ static void exynos_dsi_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs exynos_dsi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = exynos_dsi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = exynos_dsi_connector_destroy,
@@ -1576,6 +1478,7 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
return ret;
}
+ connector->status = connector_status_disconnected;
drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
drm_mode_connector_attach_encoder(connector, encoder);
@@ -1612,14 +1515,112 @@ static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
+static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+ struct drm_device *drm = dsi->connector.dev;
+
+ /*
+ * This is a temporary solution and should be made by more generic way.
+ *
+ * If attached panel device is for command mode one, dsi should register
+ * TE interrupt handler.
+ */
+ if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ int ret = exynos_dsi_register_te_irq(dsi, &device->dev);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&drm->mode_config.mutex);
+
+ dsi->lanes = device->lanes;
+ dsi->format = device->format;
+ dsi->mode_flags = device->mode_flags;
+ dsi->panel = of_drm_find_panel(device->dev.of_node);
+ if (dsi->panel) {
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ dsi->connector.status = connector_status_connected;
+ }
+ exynos_drm_crtc_get_by_type(drm, EXYNOS_DISPLAY_TYPE_LCD)->i80_mode =
+ !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO);
+
+ mutex_unlock(&drm->mode_config.mutex);
+
+ if (drm->mode_config.poll_enabled)
+ drm_kms_helper_hotplug_event(drm);
+
+ return 0;
+}
+
+static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+ struct drm_device *drm = dsi->connector.dev;
+
+ mutex_lock(&drm->mode_config.mutex);
+
+ if (dsi->panel) {
+ exynos_dsi_disable(&dsi->encoder);
+ drm_panel_detach(dsi->panel);
+ dsi->panel = NULL;
+ dsi->connector.status = connector_status_disconnected;
+ }
+
+ mutex_unlock(&drm->mode_config.mutex);
+
+ if (drm->mode_config.poll_enabled)
+ drm_kms_helper_hotplug_event(drm);
+
+ exynos_dsi_unregister_te_irq(dsi);
+
+ return 0;
+}
+
+static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct exynos_dsi *dsi = host_to_dsi(host);
+ struct exynos_dsi_transfer xfer;
+ int ret;
+
+ if (!(dsi->state & DSIM_STATE_ENABLED))
+ return -EINVAL;
+
+ if (!(dsi->state & DSIM_STATE_INITIALIZED)) {
+ ret = exynos_dsi_init(dsi);
+ if (ret)
+ return ret;
+ dsi->state |= DSIM_STATE_INITIALIZED;
+ }
+
+ ret = mipi_dsi_create_packet(&xfer.packet, msg);
+ if (ret < 0)
+ return ret;
+
+ xfer.rx_len = msg->rx_len;
+ xfer.rx_payload = msg->rx_buf;
+ xfer.flags = msg->flags;
+
+ ret = exynos_dsi_transfer(dsi, &xfer);
+ return (ret < 0) ? ret : xfer.rx_done;
+}
+
+static const struct mipi_dsi_host_ops exynos_dsi_ops = {
+ .attach = exynos_dsi_host_attach,
+ .detach = exynos_dsi_host_detach,
+ .transfer = exynos_dsi_host_transfer,
+};
+
static int exynos_dsi_of_read_u32(const struct device_node *np,
const char *propname, u32 *out_value)
{
int ret = of_property_read_u32(np, propname, out_value);
if (ret < 0)
- pr_err("%s: failed to get '%s' property\n", np->full_name,
- propname);
+ pr_err("%pOF: failed to get '%s' property\n", np, propname);
return ret;
}
@@ -1651,8 +1652,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
return ret;
dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0);
- if (!dsi->bridge_node)
- return -EINVAL;
return 0;
}
@@ -1666,20 +1665,15 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
struct drm_bridge *bridge;
int ret;
- ret = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_LCD);
- if (ret < 0)
- return ret;
-
- encoder->possible_crtcs = 1 << ret;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_LCD);
+ if (ret < 0)
+ return ret;
+
ret = exynos_dsi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
@@ -1687,9 +1681,11 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
return ret;
}
- bridge = of_drm_find_bridge(dsi->bridge_node);
- if (bridge)
- drm_bridge_attach(encoder, bridge, NULL);
+ if (dsi->bridge_node) {
+ bridge = of_drm_find_bridge(dsi->bridge_node);
+ if (bridge)
+ drm_bridge_attach(encoder, bridge, NULL);
+ }
return mipi_dsi_host_register(&dsi->dsi_host);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d48fd7c918f8..8208df56a88f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -145,13 +145,19 @@ static struct drm_framebuffer *
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
+ const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
struct drm_gem_object *obj;
struct drm_framebuffer *fb;
int i;
int ret;
- for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int height = (i == 0) ? mode_cmd->height :
+ DIV_ROUND_UP(mode_cmd->height, info->vsub);
+ unsigned long size = height * mode_cmd->pitches[i] +
+ mode_cmd->offsets[i];
+
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
@@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
}
exynos_gem[i] = to_exynos_gem(obj);
+
+ if (size > exynos_gem[i]->size) {
+ i++;
+ ret = -EINVAL;
+ goto err;
+ }
}
fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
@@ -187,33 +199,8 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
return exynos_fb->dma_addr[index];
}
-static void exynos_drm_atomic_commit_tail(struct drm_atomic_state *state)
-{
- struct drm_device *dev = state->dev;
-
- drm_atomic_helper_commit_modeset_disables(dev, state);
-
- drm_atomic_helper_commit_modeset_enables(dev, state);
-
- /*
- * Exynos can't update planes with CRTCs and encoders disabled,
- * its updates routines, specially for FIMD, requires the clocks
- * to be enabled. So it is necessary to handle the modeset operations
- * *before* the commit_planes() step, this way it will always
- * have the relevant clocks enabled to perform the update.
- */
- drm_atomic_helper_commit_planes(dev, state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
-
- drm_atomic_helper_commit_hw_done(state);
-
- drm_atomic_helper_wait_for_vblanks(dev, state);
-
- drm_atomic_helper_cleanup_planes(dev, state);
-}
-
static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
- .atomic_commit_tail = exynos_drm_atomic_commit_tail,
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
@@ -238,4 +225,6 @@ void exynos_drm_mode_config_init(struct drm_device *dev)
dev->mode_config.funcs = &exynos_drm_mode_config_funcs;
dev->mode_config.helper_private = &exynos_drm_mode_config_helpers;
+
+ dev->mode_config.allow_fb_modifiers = true;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 641531243e04..c3a068409b48 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -183,24 +183,6 @@ static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
.fb_probe = exynos_drm_fbdev_create,
};
-static bool exynos_drm_fbdev_is_anything_connected(struct drm_device *dev)
-{
- struct drm_connector *connector;
- bool ret = false;
-
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->status != connector_status_connected)
- continue;
-
- ret = true;
- break;
- }
- mutex_unlock(&dev->mode_config.mutex);
-
- return ret;
-}
-
int exynos_drm_fbdev_init(struct drm_device *dev)
{
struct exynos_drm_fbdev *fbdev;
@@ -211,9 +193,6 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
return 0;
- if (!exynos_drm_fbdev_is_anything_connected(dev))
- return 0;
-
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
if (!fbdev)
return -ENOMEM;
@@ -304,8 +283,5 @@ void exynos_drm_output_poll_changed(struct drm_device *dev)
struct exynos_drm_private *private = dev->dev_private;
struct drm_fb_helper *fb_helper = private->fb_helper;
- if (fb_helper)
- drm_fb_helper_hotplug_event(fb_helper);
- else
- exynos_drm_fbdev_init(dev);
+ drm_fb_helper_hotplug_event(fb_helper);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 60f93cad6643..d42ae2bc3e56 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -583,18 +583,12 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_ARGB8888:
+ default:
val |= WINCON1_BPPMODE_25BPP_A1888
| WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
val |= WINCONx_WSWP;
val |= WINCONx_BURSTLEN_16WORD;
break;
- default:
- DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n");
-
- val |= WINCON0_BPPMODE_24BPP_888;
- val |= WINCONx_WSWP;
- val |= WINCONx_BURSTLEN_16WORD;
- break;
}
/*
@@ -718,13 +712,13 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
unsigned long val, size, offset;
unsigned int last_x, last_y, buf_offsize, line_size;
unsigned int win = plane->index;
- unsigned int bpp = fb->format->cpp[0];
+ unsigned int cpp = fb->format->cpp[0];
unsigned int pitch = fb->pitches[0];
if (ctx->suspended)
return;
- offset = state->src.x * bpp;
+ offset = state->src.x * cpp;
offset += state->src.y * pitch;
/* buffer start address */
@@ -743,8 +737,8 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
state->crtc.w, state->crtc.h);
/* buffer size */
- buf_offsize = pitch - (state->crtc.w * bpp);
- line_size = state->crtc.w * bpp;
+ buf_offsize = pitch - (state->crtc.w * cpp);
+ line_size = state->crtc.w * cpp;
val = VIDW_BUF_SIZE_OFFSET(buf_offsize) |
VIDW_BUF_SIZE_PAGEWIDTH(line_size) |
VIDW_BUF_SIZE_OFFSET_E(buf_offsize) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index c23479be4850..077de014d610 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -286,8 +286,8 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
{
struct drm_exynos_gem_map *args = data;
- return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
- &args->offset);
+ return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
+ &args->offset);
}
dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
@@ -422,32 +422,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
return 0;
}
-int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset)
-{
- struct drm_gem_object *obj;
- int ret = 0;
-
- /*
- * get offset of memory allocated for drm framebuffer.
- * - this callback would be called by user application
- * with DRM_IOCTL_MODE_MAP_DUMB command.
- */
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return -EINVAL;
- }
-
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
- DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
-
- drm_gem_object_unreference_unlocked(obj);
- return ret;
-}
-
int exynos_drm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 85457255fcd1..e86d1a9518c3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -110,11 +110,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-/* map memory region for drm framebuffer to user space. */
-int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset);
-
/* page fault handler and mmap fault address(virtual) to physical memory. */
int exynos_drm_gem_fault(struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index e45720543a45..ba4a32b132ba 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -21,9 +21,12 @@
#include <linux/component.h>
#include <linux/pm_runtime.h>
#include <drm/drmP.h>
+#include <drm/drm_encoder.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include "exynos_drm_drv.h"
+
/* Sysreg registers for MIC */
#define DSD_CFG_MUX 0x1004
#define MIC0_RGB_MUX (1 << 0)
@@ -85,12 +88,6 @@
#define MIC_BS_SIZE_2D(x) ((x) & 0x3fff)
-enum {
- ENDPOINT_DECON_NODE,
- ENDPOINT_DSI_NODE,
- NUM_ENDPOINTS
-};
-
static char *clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
#define NUM_CLKS ARRAY_SIZE(clk_names)
static DEFINE_MUTEX(mic_mutex);
@@ -229,36 +226,6 @@ static void mic_set_reg_on(struct exynos_mic *mic, bool enable)
writel(reg, mic->reg + MIC_OP);
}
-static int parse_dt(struct exynos_mic *mic)
-{
- int ret = 0, i, j;
- struct device_node *remote_node;
- struct device_node *nodes[3];
-
- /*
- * The order of endpoints does matter.
- * The first node must be for decon and the second one must be for dsi.
- */
- for (i = 0, j = 0; i < NUM_ENDPOINTS; i++) {
- remote_node = of_graph_get_remote_node(mic->dev->of_node, i, 0);
- if (!remote_node) {
- ret = -EPIPE;
- goto exit;
- }
- nodes[j++] = remote_node;
-
- if (i == ENDPOINT_DECON_NODE &&
- of_get_child_by_name(remote_node, "i80-if-timings"))
- mic->i80_mode = 1;
- }
-
-exit:
- while (--j > -1)
- of_node_put(nodes[j]);
-
- return ret;
-}
-
static void mic_disable(struct drm_bridge *bridge) { }
static void mic_post_disable(struct drm_bridge *bridge)
@@ -286,6 +253,7 @@ static void mic_mode_set(struct drm_bridge *bridge,
mutex_lock(&mic_mutex);
drm_display_mode_to_videomode(mode, &mic->vm);
+ mic->i80_mode = to_exynos_crtc(bridge->encoder->crtc)->i80_mode;
mutex_unlock(&mic_mutex);
}
@@ -340,16 +308,10 @@ static int exynos_mic_bind(struct device *dev, struct device *master,
void *data)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
- int ret;
- mic->bridge.funcs = &mic_bridge_funcs;
- mic->bridge.of_node = dev->of_node;
mic->bridge.driver_private = mic;
- ret = drm_bridge_add(&mic->bridge);
- if (ret)
- DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
- return ret;
+ return 0;
}
static void exynos_mic_unbind(struct device *dev, struct device *master,
@@ -365,8 +327,6 @@ static void exynos_mic_unbind(struct device *dev, struct device *master,
already_disabled:
mutex_unlock(&mic_mutex);
-
- drm_bridge_remove(&mic->bridge);
}
static const struct component_ops exynos_mic_component_ops = {
@@ -425,10 +385,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
mic->dev = dev;
- ret = parse_dt(mic);
- if (ret)
- goto err;
-
ret = of_address_to_resource(dev->of_node, 0, &res);
if (ret) {
DRM_ERROR("mic: Failed to get mem region for MIC\n");
@@ -461,6 +417,15 @@ static int exynos_mic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mic);
+ mic->bridge.funcs = &mic_bridge_funcs;
+ mic->bridge.of_node = dev->of_node;
+
+ ret = drm_bridge_add(&mic->bridge);
+ if (ret) {
+ DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
+ return ret;
+ }
+
pm_runtime_enable(dev);
ret = component_add(dev, &exynos_mic_component_ops);
@@ -479,8 +444,13 @@ err:
static int exynos_mic_remove(struct platform_device *pdev)
{
+ struct exynos_mic *mic = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &exynos_mic_component_ops);
pm_runtime_disable(&pdev->dev);
+
+ drm_bridge_remove(&mic->bridge);
+
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 611b6fd65433..d2a90dae5c71 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -173,13 +173,35 @@ static struct drm_plane_funcs exynos_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .set_property = drm_atomic_helper_plane_set_property,
.reset = exynos_drm_plane_reset,
.atomic_duplicate_state = exynos_drm_plane_duplicate_state,
.atomic_destroy_state = exynos_drm_plane_destroy_state,
};
static int
+exynos_drm_plane_check_format(const struct exynos_drm_plane_config *config,
+ struct exynos_drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->base.fb;
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
+ if (!(config->capabilities & EXYNOS_DRM_PLANE_CAP_TILE))
+ return -ENOTSUPP;
+ break;
+
+ case DRM_FORMAT_MOD_LINEAR:
+ break;
+
+ default:
+ DRM_ERROR("unsupported pixel format modifier");
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
struct exynos_drm_plane_state *state)
{
@@ -223,6 +245,10 @@ static int exynos_plane_atomic_check(struct drm_plane *plane,
/* translate state into exynos_state */
exynos_plane_mode_set(exynos_state);
+ ret = exynos_drm_plane_check_format(exynos_plane->config, exynos_state);
+ if (ret)
+ return ret;
+
ret = exynos_drm_plane_check_size(exynos_plane->config, exynos_state);
return ret;
}
@@ -283,7 +309,7 @@ int exynos_plane_init(struct drm_device *dev,
&exynos_plane_funcs,
config->pixel_formats,
config->num_pixel_formats,
- config->type, NULL);
+ NULL, config->type, NULL);
if (err) {
DRM_ERROR("failed to initialize plane\n");
return err;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index cb8a72842537..53e03f8af3d5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -289,7 +289,6 @@ static void vidi_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs vidi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = vidi_detect,
.destroy = vidi_connector_destroy,
@@ -382,7 +381,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_plane *exynos_plane;
struct exynos_drm_plane_config plane_config = { 0 };
unsigned int i;
- int pipe, ret;
+ int ret;
ctx->drm_dev = drm_dev;
@@ -407,20 +406,15 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(ctx->crtc);
}
- pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_VIDI);
- if (pipe < 0)
- return pipe;
-
- encoder->possible_crtcs = 1 << pipe;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_VIDI);
+ if (ret < 0)
+ return ret;
+
ret = vidi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 06bfbe400cf1..214fa5e51963 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -784,7 +784,7 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata)
}
ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
- &hdata->current_mode);
+ &hdata->current_mode, false);
if (!ret)
ret = hdmi_avi_infoframe_pack(&frm.avi, buf, sizeof(buf));
if (ret > 0) {
@@ -835,7 +835,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs hdmi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = hdmi_detect,
.destroy = hdmi_connector_destroy,
@@ -1501,8 +1500,6 @@ static void hdmi_disable(struct drm_encoder *encoder)
*/
cancel_delayed_work(&hdata->hotplug_work);
cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
-
- hdmiphy_disable(hdata);
}
static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
@@ -1676,7 +1673,7 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
return hdmi_bridge_init(hdata);
}
-static struct of_device_id hdmi_match_types[] = {
+static const struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos4210-hdmi",
.data = &exynos4210_hdmi_driver_data,
@@ -1700,32 +1697,25 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
struct hdmi_context *hdata = dev_get_drvdata(dev);
struct drm_encoder *encoder = &hdata->encoder;
- struct exynos_drm_crtc *exynos_crtc;
- struct drm_crtc *crtc;
- int ret, pipe;
+ struct exynos_drm_crtc *crtc;
+ int ret;
hdata->drm_dev = drm_dev;
- pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_HDMI);
- if (pipe < 0)
- return pipe;
-
hdata->phy_clk.enable = hdmiphy_clk_enable;
- crtc = drm_crtc_from_index(drm_dev, pipe);
- exynos_crtc = to_exynos_crtc(crtc);
- exynos_crtc->pipe_clk = &hdata->phy_clk;
-
- encoder->possible_crtcs = 1 << pipe;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
+ ret = exynos_drm_set_possible_crtcs(encoder, EXYNOS_DISPLAY_TYPE_HDMI);
+ if (ret < 0)
+ return ret;
+
+ crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
+ crtc->pipe_clk = &hdata->phy_clk;
+
ret = hdmi_create_connector(encoder);
if (ret) {
DRM_ERROR("failed to create connector ret = %d\n", ret);
@@ -1934,8 +1924,7 @@ static int hdmi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int exynos_hdmi_suspend(struct device *dev)
+static int __maybe_unused exynos_hdmi_suspend(struct device *dev)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
@@ -1944,7 +1933,7 @@ static int exynos_hdmi_suspend(struct device *dev)
return 0;
}
-static int exynos_hdmi_resume(struct device *dev)
+static int __maybe_unused exynos_hdmi_resume(struct device *dev)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
int ret;
@@ -1955,7 +1944,6 @@ static int exynos_hdmi_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops exynos_hdmi_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 6bed4f3ffcd6..002755415e00 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -148,7 +148,8 @@ static const struct exynos_drm_plane_config plane_configs[MIXER_WIN_NR] = {
.pixel_formats = vp_formats,
.num_pixel_formats = ARRAY_SIZE(vp_formats),
.capabilities = EXYNOS_DRM_PLANE_CAP_SCALE |
- EXYNOS_DRM_PLANE_CAP_ZPOS,
+ EXYNOS_DRM_PLANE_CAP_ZPOS |
+ EXYNOS_DRM_PLANE_CAP_TILE,
},
};
@@ -483,29 +484,18 @@ static void vp_video_buffer(struct mixer_context *ctx,
unsigned int priority = state->base.normalized_zpos + 1;
unsigned long flags;
dma_addr_t luma_addr[2], chroma_addr[2];
- bool tiled_mode = false;
- bool crcb_mode = false;
+ bool is_tiled, is_nv21;
u32 val;
- switch (fb->format->format) {
- case DRM_FORMAT_NV12:
- crcb_mode = false;
- break;
- case DRM_FORMAT_NV21:
- crcb_mode = true;
- break;
- default:
- DRM_ERROR("pixel format for vp is wrong [%d].\n",
- fb->format->format);
- return;
- }
+ is_nv21 = (fb->format->format == DRM_FORMAT_NV21);
+ is_tiled = (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE);
luma_addr[0] = exynos_drm_fb_dma_addr(fb, 0);
chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1);
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
__set_bit(MXR_BIT_INTERLACE, &ctx->flags);
- if (tiled_mode) {
+ if (is_tiled) {
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
@@ -525,14 +515,14 @@ static void vp_video_buffer(struct mixer_context *ctx,
vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
/* setup format */
- val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12);
- val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
+ val = (is_nv21 ? VP_MODE_NV21 : VP_MODE_NV12);
+ val |= (is_tiled ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height));
- /* chroma height has to reduced by 2 to avoid chroma distorions */
+ /* chroma plane for NV12/NV21 is half the height of the luma plane */
vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
VP_IMG_VSIZE(fb->height / 2));
@@ -594,7 +584,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
unsigned long flags;
unsigned int win = plane->index;
unsigned int x_ratio = 0, y_ratio = 0;
- unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+ unsigned int dst_x_offset, dst_y_offset;
dma_addr_t dma_addr;
unsigned int fmt;
u32 val;
@@ -616,12 +606,9 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
+ default:
fmt = MXR_FORMAT_ARGB8888;
break;
-
- default:
- DRM_DEBUG_KMS("pixelformat unsupported by mixer\n");
- return;
}
/* ratio is already checked by common plane code */
@@ -631,12 +618,10 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
dst_x_offset = state->crtc.x;
dst_y_offset = state->crtc.y;
- /* converting dma address base and source offset */
+ /* translate dma address base s.t. the source image offset is zero */
dma_addr = exynos_drm_fb_dma_addr(fb, 0)
+ (state->src.x * fb->format->cpp[0])
+ (state->src.y * fb->pitches[0]);
- src_x_offset = 0;
- src_y_offset = 0;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
__set_bit(MXR_BIT_INTERLACE, &ctx->flags);
@@ -667,11 +652,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
val |= MXR_GRP_WH_V_SCALE(y_ratio);
mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
- /* setup offsets in source image */
- val = MXR_GRP_SXY_SX(src_x_offset);
- val |= MXR_GRP_SXY_SY(src_y_offset);
- mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val);
-
/* setup offsets in display image */
val = MXR_GRP_DXY_DX(dst_x_offset);
val |= MXR_GRP_DXY_DY(dst_y_offset);
@@ -748,6 +728,10 @@ static void mixer_win_reset(struct mixer_context *ctx)
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+ /* set all source image offsets to zero */
+ mixer_reg_write(res, MXR_GRAPHIC_SXY(0), 0);
+ mixer_reg_write(res, MXR_GRAPHIC_SXY(1), 0);
+
spin_unlock_irqrestore(&res->reg_slock, flags);
}
@@ -1094,28 +1078,28 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
.atomic_check = mixer_atomic_check,
};
-static struct mixer_drv_data exynos5420_mxr_drv_data = {
+static const struct mixer_drv_data exynos5420_mxr_drv_data = {
.version = MXR_VER_128_0_0_184,
.is_vp_enabled = 0,
};
-static struct mixer_drv_data exynos5250_mxr_drv_data = {
+static const struct mixer_drv_data exynos5250_mxr_drv_data = {
.version = MXR_VER_16_0_33_0,
.is_vp_enabled = 0,
};
-static struct mixer_drv_data exynos4212_mxr_drv_data = {
+static const struct mixer_drv_data exynos4212_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
};
-static struct mixer_drv_data exynos4210_mxr_drv_data = {
+static const struct mixer_drv_data exynos4210_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
.has_sclk = 1,
};
-static struct of_device_id mixer_match_types[] = {
+static const struct of_device_id mixer_match_types[] = {
{
.compatible = "samsung,exynos4210-mixer",
.data = &exynos4210_mxr_drv_data,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index cc4e944a1d3c..0e3752437e44 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -63,7 +63,8 @@ static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc,
clk_disable_unprepare(fsl_dev->pix_clk);
}
-static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
+static void fsl_dcu_drm_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
@@ -133,7 +134,7 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
.atomic_disable = fsl_dcu_drm_crtc_atomic_disable,
.atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
- .enable = fsl_dcu_drm_crtc_enable,
+ .atomic_enable = fsl_dcu_drm_crtc_atomic_enable,
.mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
};
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 5cbde196895a..58e9e0601a61 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -176,8 +176,6 @@ static struct drm_driver fsl_dcu_drm_driver = {
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.fops = &fsl_dcu_drm_fops,
.name = "fsl-dcu-drm",
.desc = "Freescale DCU DRM",
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 0a20723aa6e1..9554b245746e 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -224,7 +224,7 @@ struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
&fsl_dcu_drm_plane_funcs,
fsl_dcu_drm_plane_formats,
ARRAY_SIZE(fsl_dcu_drm_plane_formats),
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
primary = NULL;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index dcbf3c06e1d8..edd7d8127d19 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -63,7 +63,6 @@ static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.destroy = fsl_dcu_drm_connector_destroy,
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = drm_atomic_helper_connector_reset,
};
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 7da70b6c83f0..2570c7f647a6 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -479,26 +479,6 @@ static struct drm_framebuffer *psb_user_framebuffer_create
return psb_framebuffer_create(dev, cmd, r);
}
-static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
- struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
-
- gma_crtc->lut_r[regno] = red >> 8;
- gma_crtc->lut_g[regno] = green >> 8;
- gma_crtc->lut_b[regno] = blue >> 8;
-}
-
-static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
- u16 *green, u16 *blue, int regno)
-{
- struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
-
- *red = gma_crtc->lut_r[regno] << 8;
- *green = gma_crtc->lut_g[regno] << 8;
- *blue = gma_crtc->lut_b[regno] << 8;
-}
-
static int psbfb_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -525,8 +505,6 @@ static int psbfb_probe(struct drm_fb_helper *helper,
}
static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
- .gamma_set = psbfb_gamma_set,
- .gamma_get = psbfb_gamma_get,
.fb_probe = psbfb_probe,
};
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 7da061aab729..131239759a75 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -48,36 +48,6 @@ int psb_gem_get_aperture(struct drm_device *dev, void *data,
}
/**
- * psb_gem_dumb_map_gtt - buffer mapping for dumb interface
- * @file: our drm client file
- * @dev: drm device
- * @handle: GEM handle to the object (from dumb_create)
- *
- * Do the necessary setup to allow the mapping of the frame buffer
- * into user memory. We don't have to do much here at the moment.
- */
-int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
-{
- int ret = 0;
- struct drm_gem_object *obj;
-
- /* GEM does all our handle to object mapping */
- obj = drm_gem_object_lookup(file, handle);
- if (obj == NULL)
- return -ENOENT;
-
- /* Make it mmapable */
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
-out:
- drm_gem_object_unreference_unlocked(obj);
- return ret;
-}
-
-/**
* psb_gem_create - create a mappable object
* @file: the DRM file of the client
* @dev: our device
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index e7fd356acf2e..f3c48a2be71b 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -144,33 +144,32 @@ void gma_crtc_load_lut(struct drm_crtc *crtc)
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
int palreg = map->palette;
+ u16 *r, *g, *b;
int i;
/* The clocks have to be on to load the palette. */
if (!crtc->enabled)
return;
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+
if (gma_power_begin(dev, false)) {
for (i = 0; i < 256; i++) {
REG_WRITE(palreg + 4 * i,
- ((gma_crtc->lut_r[i] +
- gma_crtc->lut_adj[i]) << 16) |
- ((gma_crtc->lut_g[i] +
- gma_crtc->lut_adj[i]) << 8) |
- (gma_crtc->lut_b[i] +
- gma_crtc->lut_adj[i]));
+ (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
+ (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
+ ((*b++ >> 8) + gma_crtc->lut_adj[i]));
}
gma_power_end(dev);
} else {
for (i = 0; i < 256; i++) {
/* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
dev_priv->regs.pipe[0].palette[i] =
- ((gma_crtc->lut_r[i] +
- gma_crtc->lut_adj[i]) << 16) |
- ((gma_crtc->lut_g[i] +
- gma_crtc->lut_adj[i]) << 8) |
- (gma_crtc->lut_b[i] +
- gma_crtc->lut_adj[i]);
+ (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) |
+ (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) |
+ ((*b++ >> 8) + gma_crtc->lut_adj[i]);
}
}
@@ -180,15 +179,6 @@ int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
u32 size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- int i;
-
- for (i = 0; i < size; i++) {
- gma_crtc->lut_r[i] = red[i] >> 8;
- gma_crtc->lut_g[i] = green[i] >> 8;
- gma_crtc->lut_b[i] = blue[i] >> 8;
- }
-
gma_crtc_load_lut(crtc);
return 0;
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index 1616af209bfc..c50534c923df 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -520,7 +520,7 @@ static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
u8 *data, u16 len, u32 *data_out, u16 len_out, bool hs)
{
unsigned long flags;
- struct drm_device *dev = sender->dev;
+ struct drm_device *dev;
int i;
u32 gen_data_reg;
int retry = MDFLD_DSI_READ_MAX_COUNT;
@@ -530,6 +530,8 @@ static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
return -EINVAL;
}
+ dev = sender->dev;
+
/**
* do reading.
* 0) send out generic read request
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 63c6e08600ae..531e4450c000 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -737,11 +737,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
sizeof(struct drm_display_mode));
list_for_each_entry(connector, &mode_config->connector_list, head) {
- if (!connector)
- continue;
-
encoder = connector->encoder;
-
if (!encoder)
continue;
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 1f9b35afefee..37a3be71acd9 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -480,7 +480,6 @@ static struct drm_driver driver = {
.load = psb_driver_load,
.unload = psb_driver_unload,
.lastclose = psb_driver_lastclose,
- .set_busid = drm_pci_set_busid,
.num_ioctls = ARRAY_SIZE(psb_ioctls),
.irq_preinstall = psb_irq_preinstall,
@@ -495,8 +494,6 @@ static struct drm_driver driver = {
.gem_vm_ops = &psb_gem_vm_ops,
.dumb_create = psb_gem_dumb_create,
- .dumb_map_offset = psb_gem_dumb_map_gtt,
- .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = psb_ioctls,
.fops = &psb_gem_fops,
.name = DRIVER_NAME,
@@ -517,12 +514,12 @@ static struct pci_driver psb_pci_driver = {
static int __init psb_init(void)
{
- return drm_pci_init(&driver, &psb_pci_driver);
+ return pci_register_driver(&psb_pci_driver);
}
static void __exit psb_exit(void)
{
- drm_pci_exit(&driver, &psb_pci_driver);
+ pci_unregister_driver(&psb_pci_driver);
}
late_initcall(psb_init);
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 83667087d6e5..821497dbd3fc 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -750,8 +750,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file);
extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
extern int psb_gem_fault(struct vm_fault *vmf);
/* psb_device.c */
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 7b6c84925098..8762efaef283 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -518,13 +518,8 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
gma_crtc->pipe = pipe;
gma_crtc->plane = pipe;
- for (i = 0; i < 256; i++) {
- gma_crtc->lut_r[i] = i;
- gma_crtc->lut_g[i] = i;
- gma_crtc->lut_b[i] = i;
-
+ for (i = 0; i < 256; i++)
gma_crtc->lut_adj[i] = 0;
- }
gma_crtc->mode_dev = mode_dev;
gma_crtc->cursor_addr = 0;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 6a10215fc42d..e8e4ea14b12b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -172,7 +172,6 @@ struct gma_crtc {
int plane;
uint32_t cursor_addr;
struct gtt_range *cursor_gt;
- u8 lut_r[256], lut_g[256], lut_b[256];
u8 lut_adj[256];
struct psb_intel_framebuffer *fbdev_fb;
/* a mode_set for fbdev users on this crtc */
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 59542bddc980..a956545774a3 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -150,7 +150,6 @@ static const u32 channel_formats1[] = {
static struct drm_plane_funcs hibmc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .set_property = drm_atomic_helper_plane_set_property,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
@@ -181,6 +180,7 @@ static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv)
ret = drm_universal_plane_init(dev, plane, 1, &hibmc_plane_funcs,
channel_formats1,
ARRAY_SIZE(channel_formats1),
+ NULL,
DRM_PLANE_TYPE_PRIMARY,
NULL);
if (ret) {
@@ -192,7 +192,8 @@ static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv)
return plane;
}
-static void hibmc_crtc_enable(struct drm_crtc *crtc)
+static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
unsigned int reg;
struct hibmc_drm_private *priv = crtc->dev->dev_private;
@@ -209,7 +210,8 @@ static void hibmc_crtc_enable(struct drm_crtc *crtc)
drm_crtc_vblank_on(crtc);
}
-static void hibmc_crtc_disable(struct drm_crtc *crtc)
+static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
unsigned int reg;
struct hibmc_drm_private *priv = crtc->dev->dev_private;
@@ -453,11 +455,11 @@ static const struct drm_crtc_funcs hibmc_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
- .enable = hibmc_crtc_enable,
- .disable = hibmc_crtc_disable,
.mode_set_nofb = hibmc_crtc_mode_set_nofb,
.atomic_begin = hibmc_crtc_atomic_begin,
.atomic_flush = hibmc_crtc_atomic_flush,
+ .atomic_enable = hibmc_crtc_atomic_enable,
+ .atomic_disable = hibmc_crtc_atomic_disable,
};
int hibmc_de_init(struct hibmc_drm_private *priv)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 2ffdbf9801bd..d4f6f1f9df5b 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -67,7 +67,6 @@ static struct drm_driver hibmc_driver = {
.gem_free_object_unlocked = hibmc_gem_free_object,
.dumb_create = hibmc_dumb_create,
.dumb_map_offset = hibmc_dumb_mmap_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.irq_handler = hibmc_drm_interrupt,
};
@@ -276,11 +275,12 @@ static int hibmc_unload(struct drm_device *dev)
hibmc_fbdev_fini(priv);
+ drm_atomic_helper_shutdown(dev);
+
if (dev->irq_enabled)
drm_irq_uninstall(dev);
if (priv->msi_enabled)
pci_disable_msi(dev->pdev);
- drm_vblank_cleanup(dev);
hibmc_kms_fini(priv);
hibmc_mm_fini(priv);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
index f5ac80daeef2..b92595c477ef 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
@@ -131,7 +131,6 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
strcpy(info->fix.id, "hibmcdrmfb");
- info->flags = FBINFO_DEFAULT;
info->fbops = &hibmc_drm_fb_ops;
drm_fb_helper_fill_fix(info, hi_fbdev->fb->fb.pitches[0],
@@ -158,7 +157,7 @@ out_unpin_bo:
out_unreserve_ttm_bo:
ttm_bo_unreserve(&bo->bo);
out_unref_gem:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return ret;
}
@@ -173,7 +172,7 @@ static void hibmc_fbdev_destroy(struct hibmc_fbdev *fbdev)
drm_fb_helper_fini(fbh);
if (gfb)
- drm_framebuffer_unreference(&gfb->fb);
+ drm_framebuffer_put(&gfb->fb);
}
static const struct drm_fb_helper_funcs hibmc_fbdev_helper_funcs = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 12a18557c5fd..ec4dd9df9150 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -47,7 +47,6 @@ static const struct drm_connector_helper_funcs
};
static const struct drm_connector_funcs hibmc_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index ac457c779caa..3518167a7dc4 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -444,7 +444,7 @@ int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
}
ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret) {
DRM_ERROR("failed to unreference GEM object: %d\n", ret);
return ret;
@@ -479,7 +479,7 @@ int hibmc_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
bo = gem_to_hibmc_bo(obj);
*offset = hibmc_bo_mmap_offset(bo);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
@@ -487,7 +487,7 @@ static void hibmc_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct hibmc_framebuffer *hibmc_fb = to_hibmc_framebuffer(fb);
- drm_gem_object_unreference_unlocked(hibmc_fb->obj);
+ drm_gem_object_put_unlocked(hibmc_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(hibmc_fb);
}
@@ -543,7 +543,7 @@ hibmc_user_framebuffer_create(struct drm_device *dev,
hibmc_fb = hibmc_framebuffer_init(dev, mode_cmd, obj);
if (IS_ERR(hibmc_fb)) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR((long)hibmc_fb);
}
return &hibmc_fb->fb;
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index f77dcfaade6c..b4c7af3ab6ae 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -603,6 +603,72 @@ static void dsi_encoder_enable(struct drm_encoder *encoder)
dsi->enable = true;
}
+static enum drm_mode_status dsi_encoder_phy_mode_valid(
+ struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ struct dw_dsi *dsi = encoder_to_dsi(encoder);
+ struct mipi_phy_params phy;
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+ u32 req_kHz, act_kHz, lane_byte_clk_kHz;
+
+ /* Calculate the lane byte clk using the adjusted mode clk */
+ memset(&phy, 0, sizeof(phy));
+ req_kHz = mode->clock * bpp / dsi->lanes;
+ act_kHz = dsi_calc_phy_rate(req_kHz, &phy);
+ lane_byte_clk_kHz = act_kHz / 8;
+
+ DRM_DEBUG_DRIVER("Checking mode %ix%i-%i@%i clock: %i...",
+ mode->hdisplay, mode->vdisplay, bpp,
+ drm_mode_vrefresh(mode), mode->clock);
+
+ /*
+ * Make sure the adjusted mode clock and the lane byte clk
+ * have a common denominator base frequency
+ */
+ if (mode->clock/dsi->lanes == lane_byte_clk_kHz/3) {
+ DRM_DEBUG_DRIVER("OK!\n");
+ return MODE_OK;
+ }
+
+ DRM_DEBUG_DRIVER("BAD!\n");
+ return MODE_BAD;
+}
+
+static enum drm_mode_status dsi_encoder_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+
+{
+ const struct drm_crtc_helper_funcs *crtc_funcs = NULL;
+ struct drm_crtc *crtc = NULL;
+ struct drm_display_mode adj_mode;
+ enum drm_mode_status ret;
+
+ /*
+ * The crtc might adjust the mode, so go through the
+ * possible crtcs (technically just one) and call
+ * mode_fixup to figure out the adjusted mode before we
+ * validate it.
+ */
+ drm_for_each_crtc(crtc, encoder->dev) {
+ /*
+ * reset adj_mode to the mode value each time,
+ * so we don't adjust the mode twice
+ */
+ drm_mode_copy(&adj_mode, mode);
+
+ crtc_funcs = crtc->helper_private;
+ if (crtc_funcs && crtc_funcs->mode_fixup)
+ if (!crtc_funcs->mode_fixup(crtc, mode, &adj_mode))
+ return MODE_BAD;
+
+ ret = dsi_encoder_phy_mode_valid(encoder, &adj_mode);
+ if (ret != MODE_OK)
+ return ret;
+ }
+ return MODE_OK;
+}
+
static void dsi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
@@ -622,6 +688,7 @@ static int dsi_encoder_atomic_check(struct drm_encoder *encoder,
static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = {
.atomic_check = dsi_encoder_atomic_check,
+ .mode_valid = dsi_encoder_mode_valid,
.mode_set = dsi_encoder_mode_set,
.enable = dsi_encoder_enable,
.disable = dsi_encoder_disable
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index c96c228a9898..9823477b1855 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -178,6 +178,19 @@ static void ade_init(struct ade_hw_ctx *ctx)
FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
}
+static bool ade_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+
+ adjusted_mode->clock =
+ clk_round_rate(ctx->ade_pix_clk, mode->clock * 1000) / 1000;
+ return true;
+}
+
+
static void ade_set_pix_clk(struct ade_hw_ctx *ctx,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
@@ -467,7 +480,8 @@ static void ade_dump_regs(void __iomem *base)
static void ade_dump_regs(void __iomem *base) { }
#endif
-static void ade_crtc_enable(struct drm_crtc *crtc)
+static void ade_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct ade_crtc *acrtc = to_ade_crtc(crtc);
struct ade_hw_ctx *ctx = acrtc->ctx;
@@ -489,7 +503,8 @@ static void ade_crtc_enable(struct drm_crtc *crtc)
acrtc->enable = true;
}
-static void ade_crtc_disable(struct drm_crtc *crtc)
+static void ade_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct ade_crtc *acrtc = to_ade_crtc(crtc);
struct ade_hw_ctx *ctx = acrtc->ctx;
@@ -553,11 +568,12 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = {
- .enable = ade_crtc_enable,
- .disable = ade_crtc_disable,
+ .mode_fixup = ade_crtc_mode_fixup,
.mode_set_nofb = ade_crtc_mode_set_nofb,
.atomic_begin = ade_crtc_atomic_begin,
.atomic_flush = ade_crtc_atomic_flush,
+ .atomic_enable = ade_crtc_atomic_enable,
+ .atomic_disable = ade_crtc_atomic_disable,
};
static const struct drm_crtc_funcs ade_crtc_funcs = {
@@ -565,7 +581,6 @@ static const struct drm_crtc_funcs ade_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
- .set_property = drm_atomic_helper_crtc_set_property,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = ade_crtc_enable_vblank,
@@ -583,8 +598,7 @@ static int ade_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
*/
port = of_get_child_by_name(dev->dev->of_node, "port");
if (!port) {
- DRM_ERROR("no port node found in %s\n",
- dev->dev->of_node->full_name);
+ DRM_ERROR("no port node found in %pOF\n", dev->dev->of_node);
return -EINVAL;
}
of_node_put(port);
@@ -889,7 +903,6 @@ static const struct drm_plane_helper_funcs ade_plane_helper_funcs = {
static struct drm_plane_funcs ade_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .set_property = drm_atomic_helper_plane_set_property,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
@@ -909,7 +922,7 @@ static int ade_plane_init(struct drm_device *dev, struct ade_plane *aplane,
return ret;
ret = drm_universal_plane_init(dev, &aplane->base, 1, &ade_plane_funcs,
- fmts, fmts_cnt, type, NULL);
+ fmts, fmts_cnt, NULL, type, NULL);
if (ret) {
DRM_ERROR("fail to init plane, ch=%d\n", aplane->ch);
return ret;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 9c903672f582..e27352ca26c4 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -34,14 +34,12 @@ static int kirin_drm_kms_cleanup(struct drm_device *dev)
{
struct kirin_drm_private *priv = dev->dev_private;
-#ifdef CONFIG_DRM_FBDEV_EMULATION
if (priv->fbdev) {
drm_fbdev_cma_fini(priv->fbdev);
priv->fbdev = NULL;
}
-#endif
+
drm_kms_helper_poll_fini(dev);
- drm_vblank_cleanup(dev);
dc_ops->cleanup(to_platform_device(dev->dev));
drm_mode_config_cleanup(dev);
devm_kfree(dev->dev, priv);
@@ -50,27 +48,16 @@ static int kirin_drm_kms_cleanup(struct drm_device *dev)
return 0;
}
-#ifdef CONFIG_DRM_FBDEV_EMULATION
static void kirin_fbdev_output_poll_changed(struct drm_device *dev)
{
struct kirin_drm_private *priv = dev->dev_private;
- if (priv->fbdev) {
- drm_fbdev_cma_hotplug_event(priv->fbdev);
- } else {
- priv->fbdev = drm_fbdev_cma_init(dev, 32,
- dev->mode_config.num_connector);
- if (IS_ERR(priv->fbdev))
- priv->fbdev = NULL;
- }
+ drm_fbdev_cma_hotplug_event(priv->fbdev);
}
-#endif
static const struct drm_mode_config_funcs kirin_drm_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
-#ifdef CONFIG_DRM_FBDEV_EMULATION
.output_poll_changed = kirin_fbdev_output_poll_changed,
-#endif
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -129,11 +116,18 @@ static int kirin_drm_kms_init(struct drm_device *dev)
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(dev);
- /* force detection after connectors init */
- (void)drm_helper_hpd_irq_event(dev);
+ priv->fbdev = drm_fbdev_cma_init(dev, 32,
+ dev->mode_config.num_connector);
+ if (IS_ERR(priv->fbdev)) {
+ DRM_ERROR("failed to initialize fbdev.\n");
+ ret = PTR_ERR(priv->fbdev);
+ goto err_cleanup_poll;
+ }
return 0;
+err_cleanup_poll:
+ drm_kms_helper_poll_fini(dev);
err_unbind_all:
component_unbind_all(dev->dev, dev);
err_dc_cleanup:
@@ -163,8 +157,6 @@ static struct drm_driver kirin_drm_driver = {
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = kirin_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
index 7f60c64915d9..56cb62df065c 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
@@ -20,9 +20,7 @@ struct kirin_dc_ops {
};
struct kirin_drm_private {
-#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_fbdev_cma *fbdev;
-#endif
};
extern const struct kirin_dc_ops ade_dc_ops;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 86f47e190309..54e3255dde13 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -712,7 +712,7 @@ tda998x_write_avi(struct tda998x_priv *priv, struct drm_display_mode *mode)
{
union hdmi_infoframe frame;
- drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
+ drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
tda998x_write_if(priv, DIP_IF_FLAGS_IF2, REG_IF2_HB0, &frame);
@@ -969,14 +969,6 @@ static int tda998x_audio_codec_init(struct tda998x_priv *priv,
/* DRM connector functions */
-static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
-{
- if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
- return drm_atomic_helper_connector_dpms(connector, mode);
- else
- return drm_helper_connector_dpms(connector, mode);
-}
-
static int tda998x_connector_fill_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
@@ -1014,7 +1006,7 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs tda998x_connector_funcs = {
- .dpms = tda998x_connector_dpms,
+ .dpms = drm_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.fill_modes = tda998x_connector_fill_modes,
.detect = tda998x_connector_detect,
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 37fd0906f807..c69d5c487f51 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -59,7 +59,6 @@ static struct drm_driver driver = {
.load = i810_driver_load,
.lastclose = i810_driver_lastclose,
.preclose = i810_driver_preclose,
- .set_busid = drm_pci_set_busid,
.dma_quiescent = i810_driver_dma_quiescent,
.ioctls = i810_ioctls,
.fops = &i810_driver_fops,
@@ -83,12 +82,12 @@ static int __init i810_init(void)
return -EINVAL;
}
driver.num_ioctls = i810_max_ioctl;
- return drm_pci_init(&driver, &i810_pci_driver);
+ return drm_legacy_pci_init(&driver, &i810_pci_driver);
}
static void __exit i810_exit(void)
{
- drm_pci_exit(&driver, &i810_pci_driver);
+ drm_legacy_pci_exit(&driver, &i810_pci_driver);
}
module_init(i810_init);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index a5cd5dacf055..e9e64e8e9765 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -21,6 +21,7 @@ config DRM_I915
select ACPI_BUTTON if ACPI
select SYNC_FILE
select IOSF_MBI
+ select CRC32
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 78c5c049a347..aed7d207ea84 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -25,6 +25,7 @@ config DRM_I915_DEBUG
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
select DRM_DEBUG_MM_SELFTEST
+ select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
default n
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index f8227318dcaf..892f52b53060 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -39,6 +39,7 @@ i915-y += i915_cmd_parser.o \
i915_gem_gtt.o \
i915_gem_internal.o \
i915_gem.o \
+ i915_gem_object.o \
i915_gem_render_state.o \
i915_gem_request.o \
i915_gem_shrinker.o \
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 325618d969fe..ca3d1925beda 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -285,8 +285,8 @@ static int alloc_resource(struct intel_vgpu *vgpu,
return 0;
no_enough_resource:
- gvt_vgpu_err("fail to allocate resource %s\n", item);
- gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
+ gvt_err("fail to allocate resource %s\n", item);
+ gvt_err("request %luMB avail %luMB max %luMB taken %luMB\n",
BYTES_TO_MB(request), BYTES_TO_MB(avail),
BYTES_TO_MB(max), BYTES_TO_MB(taken));
return -ENOSPC;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 713848c36349..21c36e256884 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1382,13 +1382,13 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
ret = -EINVAL;
goto err;
}
- } else if ((!vgpu_gmadr_is_valid(s->vgpu, guest_gma)) ||
- (!vgpu_gmadr_is_valid(s->vgpu,
- guest_gma + op_size - 1))) {
+ } else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
ret = -EINVAL;
goto err;
}
+
return 0;
+
err:
gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
s->info->name, guest_gma, op_size);
@@ -2647,7 +2647,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
return 0;
}
-int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
+int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
{
int ret;
struct intel_vgpu *vgpu = workload->vgpu;
@@ -2714,7 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
unmap_src:
i915_gem_object_unpin_map(obj);
put_obj:
- i915_gem_object_put(wa_ctx->indirect_ctx.obj);
+ i915_gem_object_put(obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h
index bed33514103c..286703643002 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.h
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h
@@ -42,7 +42,7 @@ void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
-int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
+int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 2deb05f618fb..3c318439a659 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -178,9 +178,9 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SDE_PORTE_HOTPLUG_SPT);
vgpu_vreg(vgpu, SKL_FUSE_STATUS) |=
SKL_FUSE_DOWNLOAD_STATUS |
- SKL_FUSE_PG0_DIST_STATUS |
- SKL_FUSE_PG1_DIST_STATUS |
- SKL_FUSE_PG2_DIST_STATUS;
+ SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
+ SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
+ SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
vgpu_vreg(vgpu, LCPLL1_CTL) |=
LCPLL_PLL_ENABLE |
LCPLL_PLL_LOCK;
@@ -323,27 +323,27 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
{
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_vgpu *vgpu;
- bool have_enabled_pipe = false;
int pipe, id;
if (WARN_ON(!mutex_is_locked(&gvt->lock)))
return;
- hrtimer_cancel(&irq->vblank_timer.timer);
-
for_each_active_vgpu(gvt, vgpu, id) {
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
- have_enabled_pipe =
- pipe_is_enabled(vgpu, pipe);
- if (have_enabled_pipe)
- break;
+ if (pipe_is_enabled(vgpu, pipe))
+ goto out;
}
}
- if (have_enabled_pipe)
- hrtimer_start(&irq->vblank_timer.timer,
- ktime_add_ns(ktime_get(), irq->vblank_timer.period),
- HRTIMER_MODE_ABS);
+ /* all the pipes are disabled */
+ hrtimer_cancel(&irq->vblank_timer.timer);
+ return;
+
+out:
+ hrtimer_start(&irq->vblank_timer.timer,
+ ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+ HRTIMER_MODE_ABS);
+
}
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 700050556242..91b4300f3b39 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -46,6 +46,8 @@
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
((a)->lrca == (b)->lrca))
+static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
+
static int context_switch_events[] = {
[RCS] = RCS_AS_CONTEXT_SWITCH,
[BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_vgpu_execlist *execlist =
- &vgpu->execlist[workload->ring_id];
+ int ring_id = workload->ring_id;
+ struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct intel_vgpu_workload *next_workload;
- struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next;
+ struct list_head *next = workload_q_head(vgpu, ring_id)->next;
bool lite_restore = false;
int ret;
@@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
- if (workload->status || vgpu->resetting)
+ if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+ /* if workload->status is not successful means HW GPU
+ * has occurred GPU hang or something wrong with i915/GVT,
+ * and GVT won't inject context switch interrupt to guest.
+ * So this error is a vGPU hang actually to the guest.
+ * According to this we should emunlate a vGPU hang. If
+ * there are pending workloads which are already submitted
+ * from guest, we should clean them up like HW GPU does.
+ *
+ * if it is in middle of engine resetting, the pending
+ * workloads won't be submitted to HW GPU and will be
+ * cleaned up during the resetting process later, so doing
+ * the workload clean up here doesn't have any impact.
+ **/
+ clean_workloads(vgpu, ENGINE_MASK(ring_id));
goto out;
+ }
- if (!list_empty(workload_q_head(vgpu, workload->ring_id))) {
+ if (!list_empty(workload_q_head(vgpu, ring_id))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
next_workload = container_of(next,
@@ -605,6 +622,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
struct list_head *q = workload_q_head(vgpu, ring_id);
struct intel_vgpu_workload *last_workload = get_last_workload(q);
struct intel_vgpu_workload *workload = NULL;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
int ret;
@@ -668,6 +686,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
workload->complete = complete_execlist_workload;
workload->status = -EINPROGRESS;
workload->emulate_schedule_in = emulate_schedule_in;
+ workload->shadowed = false;
if (ring_id == RCS) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -701,6 +720,17 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
return ret;
}
+ /* Only scan and shadow the first workload in the queue
+ * as there is only one pre-allocated buf-obj for shadow.
+ */
+ if (list_empty(workload_q_head(vgpu, ring_id))) {
+ intel_runtime_pm_get(dev_priv);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_gvt_scan_and_shadow_workload(workload);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(dev_priv);
+ }
+
queue_workload(workload);
return 0;
}
@@ -783,6 +813,8 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
list_del_init(&pos->list);
free_workload(pos);
}
+
+ clear_bit(engine->id, vgpu->shadow_ctx_desc_updated);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 5dad9298b2d5..a26c1705430e 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
struct intel_gvt_mmio_info *e;
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ int num = gvt->mmio.num_mmio_block;
struct gvt_firmware_header *h;
void *firmware;
void *p;
unsigned long size, crc32_start;
- int i;
+ int i, j;
int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
@@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
+ for (i = 0; i < num; i++, block++) {
+ for (j = 0; j < block->size; j += 4)
+ *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
+ I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
+ block->offset) + j));
+ }
+
memcpy(gvt->firmware.mmio, p, info->mmio_size);
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 6166e34d892b..e6dfc3331f4b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -259,7 +259,7 @@ static void write_pte64(struct drm_i915_private *dev_priv,
writeq(pte, addr);
}
-static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
+static inline int gtt_get_entry64(void *pt,
struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa,
struct intel_vgpu *vgpu)
@@ -268,22 +268,23 @@ static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
int ret;
if (WARN_ON(info->gtt_entry_size != 8))
- return e;
+ return -EINVAL;
if (hypervisor_access) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift),
&e->val64, 8);
- WARN_ON(ret);
+ if (WARN_ON(ret))
+ return ret;
} else if (!pt) {
e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
} else {
e->val64 = *((u64 *)pt + index);
}
- return e;
+ return 0;
}
-static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
+static inline int gtt_set_entry64(void *pt,
struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa,
struct intel_vgpu *vgpu)
@@ -292,19 +293,20 @@ static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
int ret;
if (WARN_ON(info->gtt_entry_size != 8))
- return e;
+ return -EINVAL;
if (hypervisor_access) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift),
&e->val64, 8);
- WARN_ON(ret);
+ if (WARN_ON(ret))
+ return ret;
} else if (!pt) {
write_pte64(vgpu->gvt->dev_priv, index, e->val64);
} else {
*((u64 *)pt + index) = e->val64;
}
- return e;
+ return 0;
}
#define GTT_HAW 46
@@ -445,21 +447,25 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
/*
* MM helpers.
*/
-struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
+int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index)
{
struct intel_gvt *gvt = mm->vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+ int ret;
e->type = mm->page_table_entry_type;
- ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
+ ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
+ if (ret)
+ return ret;
+
ops->test_pse(e);
- return e;
+ return 0;
}
-struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
+int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index)
{
@@ -472,7 +478,7 @@ struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
/*
* PPGTT shadow page table helpers.
*/
-static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
+static inline int ppgtt_spt_get_entry(
struct intel_vgpu_ppgtt_spt *spt,
void *page_table, int type,
struct intel_gvt_gtt_entry *e, unsigned long index,
@@ -480,20 +486,24 @@ static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
{
struct intel_gvt *gvt = spt->vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
+ int ret;
e->type = get_entry_type(type);
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
- return e;
+ return -EINVAL;
- ops->get_entry(page_table, e, index, guest,
+ ret = ops->get_entry(page_table, e, index, guest,
spt->guest_page.gfn << GTT_PAGE_SHIFT,
spt->vgpu);
+ if (ret)
+ return ret;
+
ops->test_pse(e);
- return e;
+ return 0;
}
-static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
+static inline int ppgtt_spt_set_entry(
struct intel_vgpu_ppgtt_spt *spt,
void *page_table, int type,
struct intel_gvt_gtt_entry *e, unsigned long index,
@@ -503,7 +513,7 @@ static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
- return e;
+ return -EINVAL;
return ops->set_entry(page_table, e, index, guest,
spt->guest_page.gfn << GTT_PAGE_SHIFT,
@@ -792,13 +802,13 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
#define for_each_present_guest_entry(spt, e, i) \
for (i = 0; i < pt_entries(spt); i++) \
- if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
- ppgtt_get_guest_entry(spt, e, i)))
+ if (!ppgtt_get_guest_entry(spt, e, i) && \
+ spt->vgpu->gvt->gtt.pte_ops->test_present(e))
#define for_each_present_shadow_entry(spt, e, i) \
for (i = 0; i < pt_entries(spt); i++) \
- if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
- ppgtt_get_shadow_entry(spt, e, i)))
+ if (!ppgtt_get_shadow_entry(spt, e, i) && \
+ spt->vgpu->gvt->gtt.pte_ops->test_present(e))
static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{
@@ -979,29 +989,26 @@ fail:
}
static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
- unsigned long index)
+ struct intel_gvt_gtt_entry *se, unsigned long index)
{
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- struct intel_gvt_gtt_entry e;
int ret;
- ppgtt_get_shadow_entry(spt, &e, index);
-
- trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64,
+ trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, se->val64,
index);
- if (!ops->test_present(&e))
+ if (!ops->test_present(se))
return 0;
- if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
+ if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
return 0;
- if (gtt_type_is_pt(get_next_pt_type(e.type))) {
+ if (gtt_type_is_pt(get_next_pt_type(se->type))) {
struct intel_vgpu_ppgtt_spt *s =
- ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
+ ppgtt_find_shadow_page(vgpu, ops->get_pfn(se));
if (!s) {
gvt_vgpu_err("fail to find guest page\n");
ret = -ENXIO;
@@ -1011,12 +1018,10 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
if (ret)
goto fail;
}
- ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
- ppgtt_set_shadow_entry(spt, &e, index);
return 0;
fail:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
- spt, e.val64, e.type);
+ spt, se->val64, se->type);
return ret;
}
@@ -1236,22 +1241,37 @@ static int ppgtt_handle_guest_write_page_table(
{
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu *vgpu = spt->vgpu;
+ int type = spt->shadow_page.type;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ struct intel_gvt_gtt_entry se;
int ret;
int new_present;
new_present = ops->test_present(we);
- ret = ppgtt_handle_guest_entry_removal(gpt, index);
- if (ret)
- goto fail;
+ /*
+ * Adding the new entry first and then removing the old one, that can
+ * guarantee the ppgtt table is validated during the window between
+ * adding and removal.
+ */
+ ppgtt_get_shadow_entry(spt, &se, index);
if (new_present) {
ret = ppgtt_handle_guest_entry_add(gpt, we, index);
if (ret)
goto fail;
}
+
+ ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
+ if (ret)
+ goto fail;
+
+ if (!new_present) {
+ ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
+ ppgtt_set_shadow_entry(spt, &se, index);
+ }
+
return 0;
fail:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
@@ -1323,7 +1343,7 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
- struct intel_gvt_gtt_entry we;
+ struct intel_gvt_gtt_entry we, se;
unsigned long index;
int ret;
@@ -1339,7 +1359,8 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
return ret;
} else {
if (!test_bit(index, spt->post_shadow_bitmap)) {
- ret = ppgtt_handle_guest_entry_removal(gpt, index);
+ ppgtt_get_shadow_entry(spt, &se, index);
+ ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
if (ret)
return ret;
}
@@ -1713,8 +1734,10 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
if (!vgpu_gmadr_is_valid(vgpu, gma))
goto err;
- ggtt_get_guest_entry(mm, &e,
- gma_ops->gma_to_ggtt_pte_index(gma));
+ ret = ggtt_get_guest_entry(mm, &e,
+ gma_ops->gma_to_ggtt_pte_index(gma));
+ if (ret)
+ goto err;
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+ (gma & ~GTT_PAGE_MASK);
@@ -1724,7 +1747,9 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
switch (mm->page_table_level) {
case 4:
- ppgtt_get_shadow_root_entry(mm, &e, 0);
+ ret = ppgtt_get_shadow_root_entry(mm, &e, 0);
+ if (ret)
+ goto err;
gma_index[0] = gma_ops->gma_to_pml4_index(gma);
gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
gma_index[2] = gma_ops->gma_to_pde_index(gma);
@@ -1732,15 +1757,19 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
index = 4;
break;
case 3:
- ppgtt_get_shadow_root_entry(mm, &e,
+ ret = ppgtt_get_shadow_root_entry(mm, &e,
gma_ops->gma_to_l3_pdp_index(gma));
+ if (ret)
+ goto err;
gma_index[0] = gma_ops->gma_to_pde_index(gma);
gma_index[1] = gma_ops->gma_to_pte_index(gma);
index = 2;
break;
case 2:
- ppgtt_get_shadow_root_entry(mm, &e,
+ ret = ppgtt_get_shadow_root_entry(mm, &e,
gma_ops->gma_to_pde_index(gma));
+ if (ret)
+ goto err;
gma_index[0] = gma_ops->gma_to_pte_index(gma);
index = 1;
break;
@@ -1755,6 +1784,11 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
(i == index - 1));
if (ret)
goto err;
+
+ if (!pte_ops->test_present(&e)) {
+ gvt_dbg_core("GMA 0x%lx is not present\n", gma);
+ goto err;
+ }
}
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
@@ -2329,13 +2363,12 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
/**
* intel_vgpu_reset_gtt - reset the all GTT related status
* @vgpu: a vGPU
- * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
*
* This function is called from vfio core to reset reset all
* GTT related status, including GGTT, PPGTT, scratch page.
*
*/
-void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
+void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
{
int i;
@@ -2347,9 +2380,6 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
*/
intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
- if (!dmlr)
- return;
-
intel_vgpu_reset_ggtt(vgpu);
/* clear scratch page for security */
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index f88eb5e89bea..30a4c8d16026 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -49,14 +49,18 @@ struct intel_gvt_gtt_entry {
};
struct intel_gvt_gtt_pte_ops {
- struct intel_gvt_gtt_entry *(*get_entry)(void *pt,
- struct intel_gvt_gtt_entry *e,
- unsigned long index, bool hypervisor_access, unsigned long gpa,
- struct intel_vgpu *vgpu);
- struct intel_gvt_gtt_entry *(*set_entry)(void *pt,
- struct intel_gvt_gtt_entry *e,
- unsigned long index, bool hypervisor_access, unsigned long gpa,
- struct intel_vgpu *vgpu);
+ int (*get_entry)(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index,
+ bool hypervisor_access,
+ unsigned long gpa,
+ struct intel_vgpu *vgpu);
+ int (*set_entry)(void *pt,
+ struct intel_gvt_gtt_entry *e,
+ unsigned long index,
+ bool hypervisor_access,
+ unsigned long gpa,
+ struct intel_vgpu *vgpu);
bool (*test_present)(struct intel_gvt_gtt_entry *e);
void (*clear_present)(struct intel_gvt_gtt_entry *e);
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
@@ -143,12 +147,12 @@ struct intel_vgpu_mm {
struct intel_vgpu *vgpu;
};
-extern struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(
+extern int intel_vgpu_mm_get_entry(
struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index);
-extern struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(
+extern int intel_vgpu_mm_set_entry(
struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index);
@@ -208,7 +212,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
-extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
+void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 3a74e79eac2f..44b719eda8c4 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -149,7 +149,7 @@ struct intel_vgpu {
bool active;
bool pv_notified;
bool failsafe;
- bool resetting;
+ unsigned int resetting_eng;
void *sched_data;
struct vgpu_sched_ctl sched_ctl;
@@ -167,6 +167,7 @@ struct intel_vgpu {
atomic_t running_workload_num;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
+ DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
@@ -195,6 +196,15 @@ struct intel_gvt_fence {
unsigned long vgpu_allocated_fence_num;
};
+/* Special MMIO blocks. */
+struct gvt_mmio_block {
+ unsigned int device;
+ i915_reg_t offset;
+ unsigned int size;
+ gvt_mmio_func read;
+ gvt_mmio_func write;
+};
+
#define INTEL_GVT_MMIO_HASH_BITS 11
struct intel_gvt_mmio {
@@ -214,6 +224,9 @@ struct intel_gvt_mmio {
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
+ struct gvt_mmio_block *mmio_block;
+ unsigned int num_mmio_block;
+
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
unsigned int num_tracked_mmio;
};
@@ -470,6 +483,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
void populate_pvinfo_page(struct intel_vgpu *vgpu);
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
+
struct intel_gvt_ops {
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 17febe830ff6..3502a59166ff 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -113,9 +113,17 @@ static int new_mmio_info(struct intel_gvt *gvt,
info->offset = i;
p = find_mmio_info(gvt, info->offset);
- if (p)
- gvt_err("dup mmio definition offset %x\n",
+ if (p) {
+ WARN(1, "dup mmio definition offset %x\n",
info->offset);
+ kfree(info);
+
+ /* We return -EEXIST here to make GVT-g load fail.
+ * So duplicated MMIO can be found as soon as
+ * possible.
+ */
+ return -EEXIST;
+ }
info->ro_mask = ro_mask;
info->device = device;
@@ -1222,10 +1230,12 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
{
write_vreg(vgpu, offset, p_data, bytes);
- if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_ENABLE_REQUEST)
- vgpu_vreg(vgpu, offset) |= HSW_PWR_WELL_STATE_ENABLED;
+ if (vgpu_vreg(vgpu, offset) & HSW_PWR_WELL_CTL_REQ(HSW_DISP_PW_GLOBAL))
+ vgpu_vreg(vgpu, offset) |=
+ HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
else
- vgpu_vreg(vgpu, offset) &= ~HSW_PWR_WELL_STATE_ENABLED;
+ vgpu_vreg(vgpu, offset) &=
+ ~HSW_PWR_WELL_CTL_STATE(HSW_DISP_PW_GLOBAL);
return 0;
}
@@ -2242,10 +2252,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
MMIO_D(GEN6_PMINTRMSK, D_ALL);
- MMIO_DH(HSW_PWR_WELL_BIOS, D_BDW, NULL, power_well_ctl_mmio_write);
- MMIO_DH(HSW_PWR_WELL_DRIVER, D_BDW, NULL, power_well_ctl_mmio_write);
- MMIO_DH(HSW_PWR_WELL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
- MMIO_DH(HSW_PWR_WELL_DEBUG, D_BDW, NULL, power_well_ctl_mmio_write);
+ /*
+ * Use an arbitrary power well controlled by the PWR_WELL_CTL
+ * register.
+ */
+ MMIO_DH(HSW_PWR_WELL_CTL_BIOS(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
+ power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
+ power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL_KVMR, D_BDW, NULL, power_well_ctl_mmio_write);
+ MMIO_DH(HSW_PWR_WELL_CTL_DEBUG(HSW_DISP_PW_GLOBAL), D_BDW, NULL,
+ power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
@@ -2581,7 +2598,6 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
NULL, force_nonpriv_write);
- MMIO_D(0x22040, D_BDW_PLUS);
MMIO_D(0x44484, D_BDW_PLUS);
MMIO_D(0x4448c, D_BDW_PLUS);
@@ -2636,9 +2652,13 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_D(HSW_PWR_WELL_BIOS, D_SKL_PLUS);
- MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL_PLUS, NULL,
- skl_power_well_ctl_write);
+ /*
+ * Use an arbitrary power well controlled by the PWR_WELL_CTL
+ * register.
+ */
+ MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS);
+ MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
+ skl_power_well_ctl_write);
MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL_PLUS, NULL, mailbox_write);
MMIO_D(0xa210, D_SKL_PLUS);
@@ -2831,7 +2851,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x320f0, D_SKL | D_KBL);
MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_REG_VECS_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x70034, D_SKL_PLUS);
MMIO_D(0x71034, D_SKL_PLUS);
MMIO_D(0x72034, D_SKL_PLUS);
@@ -2849,39 +2868,20 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
NULL, NULL);
MMIO_D(0x4ab8, D_KBL);
- MMIO_D(0x940c, D_SKL_PLUS);
MMIO_D(0x2248, D_SKL_PLUS | D_KBL);
- MMIO_D(0x4ab0, D_SKL | D_KBL);
- MMIO_D(0x20d4, D_SKL | D_KBL);
return 0;
}
-/* Special MMIO blocks. */
-static struct gvt_mmio_block {
- unsigned int device;
- i915_reg_t offset;
- unsigned int size;
- gvt_mmio_func read;
- gvt_mmio_func write;
-} gvt_mmio_blocks[] = {
- {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
- {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
- {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
- pvinfo_mmio_read, pvinfo_mmio_write},
- {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
- {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
- {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
-};
-
static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
unsigned int offset)
{
unsigned long device = intel_gvt_get_device_type(gvt);
- struct gvt_mmio_block *block = gvt_mmio_blocks;
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ int num = gvt->mmio.num_mmio_block;
int i;
- for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) {
+ for (i = 0; i < num; i++, block++) {
if (!(device & block->device))
continue;
if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
@@ -2912,6 +2912,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
gvt->mmio.mmio_attribute = NULL;
}
+/* Special MMIO blocks. */
+static struct gvt_mmio_block mmio_blocks[] = {
+ {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
+ {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
+ {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
+ pvinfo_mmio_read, pvinfo_mmio_write},
+ {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
+ {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
+ {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
+};
+
/**
* intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
* @gvt: GVT device
@@ -2951,6 +2962,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
goto err;
}
+ gvt->mmio.mmio_block = mmio_blocks;
+ gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
+
gvt_dbg_mmio("traced %u virtual mmio registers\n",
gvt->mmio.num_tracked_mmio);
return 0;
@@ -3030,7 +3044,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
gvt_mmio_func func;
int ret;
- if (WARN_ON(bytes > 4))
+ if (WARN_ON(bytes > 8))
return -EINVAL;
/*
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index fd0c85f9ef3c..83e88c70272a 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1170,10 +1170,27 @@ vgpu_id_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "\n");
}
+static ssize_t
+hw_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mdev_device *mdev = mdev_from_dev(dev);
+
+ if (mdev) {
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)
+ mdev_get_drvdata(mdev);
+ return sprintf(buf, "%u\n",
+ vgpu->shadow_ctx->hw_id);
+ }
+ return sprintf(buf, "\n");
+}
+
static DEVICE_ATTR_RO(vgpu_id);
+static DEVICE_ATTR_RO(hw_id);
static struct attribute *intel_vgpu_attrs[] = {
&dev_attr_vgpu_id.attr,
+ &dev_attr_hw_id.attr,
NULL
};
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 504e57c3bc23..2ea542257f03 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -207,18 +207,16 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
- gen9_render_mocs[ring_id][i] = I915_READ(offset);
+ gen9_render_mocs[ring_id][i] = I915_READ_FW(offset);
I915_WRITE(offset, vgpu_vreg(vgpu, offset));
- POSTING_READ(offset);
offset.reg += 4;
}
if (ring_id == RCS) {
l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) {
- gen9_render_mocs_L3[i] = I915_READ(l3_offset);
- I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
- POSTING_READ(l3_offset);
+ gen9_render_mocs_L3[i] = I915_READ_FW(l3_offset);
+ I915_WRITE_FW(l3_offset, vgpu_vreg(vgpu, l3_offset));
l3_offset.reg += 4;
}
}
@@ -242,18 +240,16 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
- vgpu_vreg(vgpu, offset) = I915_READ(offset);
- I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
- POSTING_READ(offset);
+ vgpu_vreg(vgpu, offset) = I915_READ_FW(offset);
+ I915_WRITE_FW(offset, gen9_render_mocs[ring_id][i]);
offset.reg += 4;
}
if (ring_id == RCS) {
l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) {
- vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
- I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
- POSTING_READ(l3_offset);
+ vgpu_vreg(vgpu, l3_offset) = I915_READ_FW(l3_offset);
+ I915_WRITE_FW(l3_offset, gen9_render_mocs_L3[i]);
l3_offset.reg += 4;
}
}
@@ -272,6 +268,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ i915_reg_t last_reg = _MMIO(0);
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {
@@ -287,7 +284,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
if (mmio->ring_id != ring_id)
continue;
- mmio->value = I915_READ(mmio->reg);
+ mmio->value = I915_READ_FW(mmio->reg);
/*
* if it is an inhibit context, load in_context mmio
@@ -304,13 +301,18 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
else
v = vgpu_vreg(vgpu, mmio->reg);
- I915_WRITE(mmio->reg, v);
- POSTING_READ(mmio->reg);
+ I915_WRITE_FW(mmio->reg, v);
+ last_reg = mmio->reg;
trace_render_mmio(vgpu->id, "load",
i915_mmio_reg_offset(mmio->reg),
mmio->value, v);
}
+
+ /* Make sure the swiched MMIOs has taken effect. */
+ if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
+ I915_READ_FW(last_reg);
+
handle_tlb_pending_event(vgpu, ring_id);
}
@@ -319,6 +321,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct render_mmio *mmio;
+ i915_reg_t last_reg = _MMIO(0);
u32 v;
int i, array_size;
@@ -335,7 +338,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
if (mmio->ring_id != ring_id)
continue;
- vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
+ vgpu_vreg(vgpu, mmio->reg) = I915_READ_FW(mmio->reg);
if (mmio->mask) {
vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
@@ -346,13 +349,17 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
if (mmio->in_context)
continue;
- I915_WRITE(mmio->reg, v);
- POSTING_READ(mmio->reg);
+ I915_WRITE_FW(mmio->reg, v);
+ last_reg = mmio->reg;
trace_render_mmio(vgpu->id, "restore",
i915_mmio_reg_offset(mmio->reg),
mmio->value, v);
}
+
+ /* Make sure the swiched MMIOs has taken effect. */
+ if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
+ I915_READ_FW(last_reg);
}
/**
@@ -367,12 +374,23 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next, int ring_id)
{
+ struct drm_i915_private *dev_priv;
+
if (WARN_ON(!pre && !next))
return;
gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
+ dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
+
+ /**
+ * We are using raw mmio access wrapper to improve the
+ * performace for batch mmio read/write, so we need
+ * handle forcewake mannually.
+ */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
/**
* TODO: Optimize for vGPU to vGPU switch by merging
* switch_mmio_to_host() and switch_mmio_to_vgpu().
@@ -382,4 +400,6 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
if (next)
switch_mmio_to_vgpu(next, ring_id);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4f7057d62d88..391800d2067b 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -184,41 +184,52 @@ static int shadow_context_status_change(struct notifier_block *nb,
return NOTIFY_OK;
}
-static int dispatch_workload(struct intel_vgpu_workload *workload)
+static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = &ctx->engine[engine->id];
+ u64 desc = 0;
+
+ desc = ce->lrc_desc;
+
+ /* Update bits 0-11 of the context descriptor which includes flags
+ * like GEN8_CTX_* cached in desc_template
+ */
+ desc &= U64_MAX << 12;
+ desc |= ctx->desc_template & ((1ULL << 12) - 1);
+
+ ce->lrc_desc = desc;
+}
+
+/**
+ * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
+ * shadow it as well, include ringbuffer,wa_ctx and ctx.
+ * @workload: an abstract entity for each execlist submission.
+ *
+ * This function is called before the workload submitting to i915, to make
+ * sure the content of the workload is valid.
+ */
+int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
{
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
- struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_ring *ring;
int ret;
- gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
- ring_id, workload);
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (workload->shadowed)
+ return 0;
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
- mutex_lock(&dev_priv->drm.struct_mutex);
-
- /* pin shadow context by gvt even the shadow context will be pinned
- * when i915 alloc request. That is because gvt will update the guest
- * context from shadow context when workload is completed, and at that
- * moment, i915 may already unpined the shadow context to make the
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
- * the guest context, gvt can unpin the shadow_ctx safely.
- */
- ring = engine->context_pin(engine, shadow_ctx);
- if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- gvt_vgpu_err("fail to pin shadow context\n");
- workload->status = ret;
- mutex_unlock(&dev_priv->drm.struct_mutex);
- return ret;
- }
+ if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
+ shadow_context_descriptor_update(shadow_ctx,
+ dev_priv->engine[ring_id]);
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
@@ -231,7 +242,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
workload->req = i915_gem_request_get(rq);
- ret = intel_gvt_scan_and_shadow_workload(workload);
+ ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
goto out;
@@ -246,25 +257,61 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (ret)
goto out;
+ workload->shadowed = true;
+
+out:
+ return ret;
+}
+
+static int dispatch_workload(struct intel_vgpu_workload *workload)
+{
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_ring *ring;
+ int ret = 0;
+
+ gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
+ ring_id, workload);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ ret = intel_gvt_scan_and_shadow_workload(workload);
+ if (ret)
+ goto out;
+
if (workload->prepare) {
ret = workload->prepare(workload);
if (ret)
goto out;
}
- gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
- ring_id, workload->req);
+ /* pin shadow context by gvt even the shadow context will be pinned
+ * when i915 alloc request. That is because gvt will update the guest
+ * context from shadow context when workload is completed, and at that
+ * moment, i915 may already unpined the shadow context to make the
+ * shadow_ctx pages invalid. So gvt need to pin itself. After update
+ * the guest context, gvt can unpin the shadow_ctx safely.
+ */
+ ring = engine->context_pin(engine, shadow_ctx);
+ if (IS_ERR(ring)) {
+ ret = PTR_ERR(ring);
+ gvt_vgpu_err("fail to pin shadow context\n");
+ goto out;
+ }
- ret = 0;
- workload->dispatched = true;
out:
if (ret)
workload->status = ret;
- if (!IS_ERR_OR_NULL(rq))
- i915_add_request(rq);
- else
- engine->context_unpin(engine, shadow_ctx);
+ if (!IS_ERR_OR_NULL(workload->req)) {
+ gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
+ ring_id, workload->req);
+ i915_add_request(workload->req);
+ workload->dispatched = true;
+ }
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
@@ -432,7 +479,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
i915_gem_request_put(fetch_and_zero(&workload->req));
- if (!workload->status && !vgpu->resetting) {
+ if (!workload->status && !(vgpu->resetting_eng &
+ ENGINE_MASK(ring_id))) {
update_guest_context(workload);
for_each_set_bit(event, workload->pending_events,
@@ -616,7 +664,7 @@ err:
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
{
- i915_gem_context_put_unlocked(vgpu->shadow_ctx);
+ i915_gem_context_put(vgpu->shadow_ctx);
}
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
@@ -630,5 +678,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
vgpu->shadow_ctx->engine[RCS].initialised = true;
+ bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 9b6bf51e9b9b..0d431a968a32 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -82,6 +82,7 @@ struct intel_vgpu_workload {
struct drm_i915_gem_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
+ bool shadowed;
int status;
struct intel_vgpu_mm *shadow_mm;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 90c14e6e3ea0..02c61a1ad56a 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -43,6 +43,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
+ vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
@@ -480,11 +481,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
gvt_dbg_core("------------------------------------------\n");
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
vgpu->id, dmlr, engine_mask);
- vgpu->resetting = true;
+
+ vgpu->resetting_eng = resetting_eng;
intel_vgpu_stop_schedule(vgpu);
/*
@@ -497,16 +500,16 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
mutex_lock(&gvt->lock);
}
- intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
+ intel_vgpu_reset_execlist(vgpu, resetting_eng);
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
- intel_vgpu_reset_gtt(vgpu, dmlr);
-
/*fence will not be reset during virtual reset */
- if (dmlr)
+ if (dmlr) {
+ intel_vgpu_reset_gtt(vgpu);
intel_vgpu_reset_resource(vgpu);
+ }
intel_vgpu_reset_mmio(vgpu, dmlr);
populate_pvinfo_page(vgpu);
@@ -520,7 +523,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
}
}
- vgpu->resetting = false;
+ vgpu->resetting_eng = 0;
gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
gvt_dbg_core("------------------------------------------\n");
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 00d8967c8512..e4d4b6b41e26 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -28,6 +28,7 @@
#include <linux/debugfs.h>
#include <linux/sort.h>
+#include <linux/sched/mm.h>
#include "intel_drv.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -543,75 +544,6 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
return 0;
}
-static int i915_gem_pageflip_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- for_each_intel_crtc(dev, crtc) {
- const char pipe = pipe_name(crtc->pipe);
- const char plane = plane_name(crtc->plane);
- struct intel_flip_work *work;
-
- spin_lock_irq(&dev->event_lock);
- work = crtc->flip_work;
- if (work == NULL) {
- seq_printf(m, "No flip due on pipe %c (plane %c)\n",
- pipe, plane);
- } else {
- u32 pending;
- u32 addr;
-
- pending = atomic_read(&work->pending);
- if (pending) {
- seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
- pipe, plane);
- } else {
- seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
- pipe, plane);
- }
- if (work->flip_queued_req) {
- struct intel_engine_cs *engine = work->flip_queued_req->engine;
-
- seq_printf(m, "Flip queued on %s at seqno %x, last submitted seqno %x [current breadcrumb %x], completed? %d\n",
- engine->name,
- work->flip_queued_req->global_seqno,
- intel_engine_last_submit(engine),
- intel_engine_get_seqno(engine),
- i915_gem_request_completed(work->flip_queued_req));
- } else
- seq_printf(m, "Flip not associated with any ring\n");
- seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
- work->flip_queued_vblank,
- work->flip_ready_vblank,
- intel_crtc_get_vblank_counter(crtc));
- seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
-
- if (INTEL_GEN(dev_priv) >= 4)
- addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
- else
- addr = I915_READ(DSPADDR(crtc->plane));
- seq_printf(m, "Current scanout address 0x%08x\n", addr);
-
- if (work->pending_flip_obj) {
- seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
- seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
- }
- }
- spin_unlock_irq(&dev->event_lock);
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1159,7 +1091,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
- if (IS_GEN9(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
reqf >>= 23;
else {
reqf &= ~GEN6_TURBO_DISABLE;
@@ -1181,7 +1113,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
- if (IS_GEN9(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
@@ -1210,7 +1142,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
dev_priv->rps.pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
- (gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
+ (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
@@ -1241,18 +1173,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
- max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
+ max_freq *= (IS_GEN9_BC(dev_priv) ||
+ IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
- max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
+ max_freq *= (IS_GEN9_BC(dev_priv) ||
+ IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
- max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
+ max_freq *= (IS_GEN9_BC(dev_priv) ||
+ IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
@@ -1407,6 +1342,23 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
return 0;
}
+static int i915_reset_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
+
+ for_each_engine(engine, dev_priv, id) {
+ seq_printf(m, "%s = %u\n", engine->name,
+ i915_reset_engine_count(error, engine));
+ }
+
+ return 0;
+}
+
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1838,7 +1790,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret)
goto out;
- if (IS_GEN9_BC(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq =
dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
@@ -1858,7 +1810,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
&ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(dev_priv, (gpu_freq *
- (IS_GEN9_BC(dev_priv) ?
+ (IS_GEN9_BC(dev_priv) ||
+ IS_CANNONLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
@@ -1914,7 +1867,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (dev_priv->fbdev) {
+ if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
@@ -1970,7 +1923,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
seq_printf(m, "HW context %u ", ctx->hw_id);
if (ctx->pid) {
struct task_struct *task;
@@ -2002,12 +1955,6 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, '\n');
}
- seq_printf(m,
- "\tvma hashtable size=%u (actual %lu), count=%u\n",
- ctx->vma_lut.ht_size,
- BIT(ctx->vma_lut.ht_bits),
- ctx->vma_lut.ht_count);
-
seq_putc(m, '\n');
}
@@ -2076,7 +2023,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
if (ret)
return ret;
- list_for_each_entry(ctx, &dev_priv->context_list, link)
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link)
for_each_engine(engine, dev_priv, id)
i915_dump_lrc_obj(m, ctx, engine);
@@ -2310,6 +2257,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "GPU busy? %s [%d requests]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
+ seq_printf(m, "Boosts outstanding? %d\n",
+ atomic_read(&dev_priv->rps.num_waiters));
seq_printf(m, "Frequency requested %d\n",
intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
@@ -2323,22 +2272,20 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq));
mutex_lock(&dev->filelist_mutex);
- spin_lock(&dev_priv->rps.client_lock);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv;
struct task_struct *task;
rcu_read_lock();
task = pid_task(file->pid, PIDTYPE_PID);
- seq_printf(m, "%s [%d]: %d boosts%s\n",
+ seq_printf(m, "%s [%d]: %d boosts\n",
task ? task->comm : "<unknown>",
task ? task->pid : -1,
- file_priv->rps.boosts,
- list_empty(&file_priv->rps.link) ? "" : ", active");
+ atomic_read(&file_priv->rps.boosts));
rcu_read_unlock();
}
- seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts);
- spin_unlock(&dev_priv->rps.client_lock);
+ seq_printf(m, "Kernel (anonymous) boosts: %d\n",
+ atomic_read(&dev_priv->rps.boosts));
mutex_unlock(&dev->filelist_mutex);
if (INTEL_GEN(dev_priv) >= 6 &&
@@ -2831,7 +2778,7 @@ out:
static int i915_energy_uJ(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u64 power;
+ unsigned long long power;
u32 units;
if (INTEL_GEN(dev_priv) < 6)
@@ -2839,15 +2786,18 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
- rdmsrl(MSR_RAPL_POWER_UNIT, power);
- power = (power & 0x1f00) >> 8;
- units = 1000000 / (1 << power); /* convert to uJ */
+ if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
+ intel_runtime_pm_put(dev_priv);
+ return -ENODEV;
+ }
+
+ units = (power & 0x1f00) >> 8;
power = I915_READ(MCH_SECP_NRG_STTS);
- power *= units;
+ power = (1000000 * power) >> units; /* convert to uJ */
intel_runtime_pm_put(dev_priv);
- seq_printf(m, "%llu", (long long unsigned)power);
+ seq_printf(m, "%llu", power);
return 0;
}
@@ -3289,6 +3239,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
static int i915_engine_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -3312,6 +3263,8 @@ static int i915_engine_info(struct seq_file *m, void *unused)
engine->hangcheck.seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
engine->timeline->inflight_seqnos);
+ seq_printf(m, "\tReset count: %d\n",
+ i915_reset_engine_count(error, engine));
rcu_read_lock();
@@ -3370,8 +3323,10 @@ static int i915_engine_info(struct seq_file *m, void *unused)
ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
read = GEN8_CSB_READ_PTR(ptr);
write = GEN8_CSB_WRITE_PTR(ptr);
- seq_printf(m, "\tExeclist CSB read %d, write %d\n",
- read, write);
+ seq_printf(m, "\tExeclist CSB read %d, write %d, interrupt posted? %s\n",
+ read, write,
+ yesno(test_bit(ENGINE_IRQ_EXECLIST,
+ &engine->irq_posted)));
if (read >= GEN8_CSB_ENTRIES)
read = 0;
if (write >= GEN8_CSB_ENTRIES)
@@ -3758,13 +3713,18 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
- if (connector->status == connector_status_connected &&
- connector->encoder != NULL) {
- intel_dp = enc_to_intel_dp(connector->encoder);
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
status = kstrtoint(input_buffer, 10, &val);
if (status < 0)
break;
@@ -3796,13 +3756,18 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
- if (connector->status == connector_status_connected &&
- connector->encoder != NULL) {
- intel_dp = enc_to_intel_dp(connector->encoder);
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
if (intel_dp->compliance.test_active)
seq_puts(m, "1");
else
@@ -3842,13 +3807,18 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
- if (connector->status == connector_status_connected &&
- connector->encoder != NULL) {
- intel_dp = enc_to_intel_dp(connector->encoder);
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
if (intel_dp->compliance.test_type ==
DP_TEST_LINK_EDID_READ)
seq_printf(m, "%lx",
@@ -3895,13 +3865,18 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
- if (connector->status == connector_status_connected &&
- connector->encoder != NULL) {
- intel_dp = enc_to_intel_dp(connector->encoder);
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
seq_printf(m, "%02lx", intel_dp->compliance.test_type);
} else
seq_puts(m, "0");
@@ -4331,16 +4306,16 @@ i915_drop_caches_set(void *data, u64 val)
mutex_unlock(&dev->struct_mutex);
}
- lockdep_set_current_reclaim_state(GFP_KERNEL);
+ fs_reclaim_acquire(GFP_KERNEL);
if (val & DROP_BOUND)
- i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
+ i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
if (val & DROP_UNBOUND)
- i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
+ i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
if (val & DROP_SHRINK_ALL)
i915_gem_shrink_all(dev_priv);
- lockdep_clear_current_reclaim_state();
+ fs_reclaim_release(GFP_KERNEL);
if (val & DROP_FREED) {
synchronize_rcu();
@@ -4580,7 +4555,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask |= BIT(s);
- if (IS_GEN9_BC(dev_priv))
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
sseu->subslice_mask =
INTEL_INFO(dev_priv)->sseu.subslice_mask;
@@ -4810,7 +4785,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1},
{"i915_gem_stolen", i915_gem_stolen_list_info },
- {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
@@ -4824,6 +4798,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_hangcheck_info", i915_hangcheck_info, 0},
+ {"i915_reset_info", i915_reset_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index fc307e03943c..43100229613c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -132,9 +132,13 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
} else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
ret = PCH_CPT;
- DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
+ DRM_DEBUG_KMS("Assuming CougarPoint PCH\n");
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
ret = PCH_LPT;
+ if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
+ dev_priv->pch_id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
+ else
+ dev_priv->pch_id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
ret = PCH_SPT;
@@ -173,29 +177,25 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
- unsigned short id_ext = pch->device &
- INTEL_PCH_DEVICE_ID_MASK_EXT;
+
+ dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_IBX;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN5(dev_priv));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev_priv) ||
- IS_IVYBRIDGE(dev_priv)));
+ WARN_ON(!IS_GEN6(dev_priv) &&
+ !IS_IVYBRIDGE(dev_priv));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_CPT;
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
- WARN_ON(!(IS_GEN6(dev_priv) ||
- IS_IVYBRIDGE(dev_priv)));
+ WARN_ON(!IS_GEN6(dev_priv) &&
+ !IS_IVYBRIDGE(dev_priv));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) &&
@@ -203,51 +203,60 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
WARN_ON(IS_HSW_ULT(dev_priv) ||
IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev_priv) &&
!IS_BROADWELL(dev_priv));
WARN_ON(!IS_HSW_ULT(dev_priv) &&
!IS_BDW_ULT(dev_priv));
+ } else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) {
+ /* WildcatPoint is LPT compatible */
+ dev_priv->pch_type = PCH_LPT;
+ DRM_DEBUG_KMS("Found WildcatPoint PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) &&
+ !IS_BROADWELL(dev_priv));
+ WARN_ON(IS_HSW_ULT(dev_priv) ||
+ IS_BDW_ULT(dev_priv));
+ } else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) {
+ /* WildcatPoint is LPT compatible */
+ dev_priv->pch_type = PCH_LPT;
+ DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev_priv) &&
+ !IS_BROADWELL(dev_priv));
+ WARN_ON(!IS_HSW_ULT(dev_priv) &&
+ !IS_BDW_ULT(dev_priv));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
- } else if (id_ext == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id_ext;
+ } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_KBP;
- DRM_DEBUG_KMS("Found KabyPoint PCH\n");
+ DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
} else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id;
dev_priv->pch_type = PCH_CNP;
- DRM_DEBUG_KMS("Found CannonPoint PCH\n");
+ DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
- } else if (id_ext == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
- dev_priv->pch_id = id_ext;
+ } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CNP;
- DRM_DEBUG_KMS("Found CannonPoint LP PCH\n");
+ DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
- } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
- (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
- ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
+ } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE ||
+ id == INTEL_PCH_P3X_DEVICE_ID_TYPE ||
+ (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE &&
pch->subsystem_vendor ==
PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
pch->subsystem_device ==
PCI_SUBDEVICE_ID_QEMU)) {
- dev_priv->pch_id = id;
dev_priv->pch_type =
intel_virt_detect_pch(dev_priv);
} else
@@ -331,6 +340,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
break;
case I915_PARAM_HAS_GPU_RESET:
value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
+ if (value && intel_has_reset_engine(dev_priv))
+ value = 2;
break;
case I915_PARAM_HAS_RESOURCE_STREAMER:
value = HAS_RESOURCE_STREAMER(dev_priv);
@@ -377,6 +388,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_FENCE:
case I915_PARAM_HAS_EXEC_CAPTURE:
case I915_PARAM_HAS_EXEC_BATCH_FIRST:
+ case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
/* For the time being all of these are always true;
* if some supported hardware does not have one of these
* features this value needs to be provided from
@@ -585,16 +597,19 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
static void i915_gem_fini(struct drm_i915_private *dev_priv)
{
+ /* Flush any outstanding unpin_work. */
+ i915_gem_drain_workqueue(dev_priv);
+
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
i915_gem_cleanup_engines(dev_priv);
- i915_gem_context_fini(dev_priv);
+ i915_gem_contexts_fini(dev_priv);
i915_gem_cleanup_userptr(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_drain_freed_objects(dev_priv);
- WARN_ON(!list_empty(&dev_priv->context_list));
+ WARN_ON(!list_empty(&dev_priv->contexts.list));
}
static int i915_load_modeset_init(struct drm_device *dev)
@@ -862,7 +877,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
- spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
@@ -1227,6 +1241,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
+ intel_fbdev_unregister(dev_priv);
intel_audio_deinit(dev_priv);
intel_gpu_ips_teardown();
@@ -1319,7 +1334,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = i915_load_modeset_init(&dev_priv->drm);
if (ret < 0)
- goto out_cleanup_vblank;
+ goto out_cleanup_hw;
i915_driver_register(dev_priv);
@@ -1336,8 +1351,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-out_cleanup_vblank:
- drm_vblank_cleanup(&dev_priv->drm);
out_cleanup_hw:
i915_driver_cleanup_hw(dev_priv);
out_cleanup_mmio:
@@ -1360,7 +1373,7 @@ void i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
- intel_fbdev_fini(dev);
+ i915_driver_unregister(dev_priv);
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
@@ -1371,10 +1384,6 @@ void i915_driver_unload(struct drm_device *dev)
intel_gvt_cleanup(dev_priv);
- i915_driver_unregister(dev_priv);
-
- drm_vblank_cleanup(dev);
-
intel_modeset_cleanup(dev);
/*
@@ -1400,9 +1409,6 @@ void i915_driver_unload(struct drm_device *dev)
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_reset_error_state(dev_priv);
- /* Flush any outstanding unpin_work. */
- drain_workqueue(dev_priv->wq);
-
i915_gem_fini(dev_priv);
intel_uc_fini_fw(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
@@ -1427,9 +1433,10 @@ static void i915_driver_release(struct drm_device *dev)
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{
+ struct drm_i915_private *i915 = to_i915(dev);
int ret;
- ret = i915_gem_open(dev, file);
+ ret = i915_gem_open(i915, file);
if (ret)
return ret;
@@ -1459,7 +1466,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
mutex_lock(&dev->struct_mutex);
- i915_gem_context_close(dev, file);
+ i915_gem_context_close(file);
i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
@@ -1825,7 +1832,8 @@ static int i915_resume_switcheroo(struct drm_device *dev)
/**
* i915_reset - reset chip after a hang
- * @dev_priv: device private to reset
+ * @i915: #drm_i915_private to reset
+ * @flags: Instructions
*
* Reset the chip. Useful if a hang is detected. Marks the device as wedged
* on failure.
@@ -1840,33 +1848,34 @@ static int i915_resume_switcheroo(struct drm_device *dev)
* - re-init interrupt state
* - re-init display
*/
-void i915_reset(struct drm_i915_private *dev_priv)
+void i915_reset(struct drm_i915_private *i915, unsigned int flags)
{
- struct i915_gpu_error *error = &dev_priv->gpu_error;
+ struct i915_gpu_error *error = &i915->gpu_error;
int ret;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
if (!test_bit(I915_RESET_HANDOFF, &error->flags))
return;
/* Clear any previous failed attempts at recovery. Time to try again. */
- if (!i915_gem_unset_wedged(dev_priv))
+ if (!i915_gem_unset_wedged(i915))
goto wakeup;
+ if (!(flags & I915_RESET_QUIET))
+ dev_notice(i915->drm.dev, "Resetting chip after gpu hang\n");
error->reset_count++;
- pr_notice("drm/i915: Resetting chip after gpu hang\n");
- disable_irq(dev_priv->drm.irq);
- ret = i915_gem_reset_prepare(dev_priv);
+ disable_irq(i915->drm.irq);
+ ret = i915_gem_reset_prepare(i915);
if (ret) {
DRM_ERROR("GPU recovery failed\n");
- intel_gpu_reset(dev_priv, ALL_ENGINES);
+ intel_gpu_reset(i915, ALL_ENGINES);
goto error;
}
- ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
+ ret = intel_gpu_reset(i915, ALL_ENGINES);
if (ret) {
if (ret != -ENODEV)
DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -1875,8 +1884,8 @@ void i915_reset(struct drm_i915_private *dev_priv)
goto error;
}
- i915_gem_reset(dev_priv);
- intel_overlay_reset(dev_priv);
+ i915_gem_reset(i915);
+ intel_overlay_reset(i915);
/* Ok, now get things going again... */
@@ -1892,17 +1901,17 @@ void i915_reset(struct drm_i915_private *dev_priv)
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- ret = i915_gem_init_hw(dev_priv);
+ ret = i915_gem_init_hw(i915);
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
goto error;
}
- i915_queue_hangcheck(dev_priv);
+ i915_queue_hangcheck(i915);
finish:
- i915_gem_reset_finish(dev_priv);
- enable_irq(dev_priv->drm.irq);
+ i915_gem_reset_finish(i915);
+ enable_irq(i915->drm.irq);
wakeup:
clear_bit(I915_RESET_HANDOFF, &error->flags);
@@ -1910,10 +1919,74 @@ wakeup:
return;
error:
- i915_gem_set_wedged(dev_priv);
+ i915_gem_set_wedged(i915);
+ i915_gem_retire_requests(i915);
goto finish;
}
+/**
+ * i915_reset_engine - reset GPU engine to recover from a hang
+ * @engine: engine to reset
+ * @flags: options
+ *
+ * Reset a specific GPU engine. Useful if a hang is detected.
+ * Returns zero on successful reset or otherwise an error code.
+ *
+ * Procedure is:
+ * - identifies the request that caused the hang and it is dropped
+ * - reset engine (which will force the engine to idle)
+ * - re-init/configure engine
+ */
+int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
+{
+ struct i915_gpu_error *error = &engine->i915->gpu_error;
+ struct drm_i915_gem_request *active_request;
+ int ret;
+
+ GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+
+ if (!(flags & I915_RESET_QUIET)) {
+ dev_notice(engine->i915->drm.dev,
+ "Resetting %s after gpu hang\n", engine->name);
+ }
+ error->reset_engine_count[engine->id]++;
+
+ active_request = i915_gem_reset_prepare_engine(engine);
+ if (IS_ERR(active_request)) {
+ DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n");
+ ret = PTR_ERR(active_request);
+ goto out;
+ }
+
+ ret = intel_gpu_reset(engine->i915, intel_engine_flag(engine));
+ if (ret) {
+ /* If we fail here, we expect to fallback to a global reset */
+ DRM_DEBUG_DRIVER("Failed to reset %s, ret=%d\n",
+ engine->name, ret);
+ goto out;
+ }
+
+ /*
+ * The request that caused the hang is stuck on elsp, we know the
+ * active request and can drop it, adjust head to skip the offending
+ * request to resume executing remaining requests in the queue.
+ */
+ i915_gem_reset_engine(engine, active_request);
+
+ /*
+ * The engine and its registers (and workarounds in case of render)
+ * have been reset to their default values. Follow the init_ring
+ * process to program RING_MODE, HWSP and re-enable submission.
+ */
+ ret = engine->init_hw(engine);
+ if (ret)
+ goto out;
+
+out:
+ i915_gem_reset_finish_engine(engine);
+ return ret;
+}
+
static int i915_pm_suspend(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
@@ -2657,6 +2730,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
static struct drm_driver driver = {
@@ -2665,12 +2740,11 @@ static struct drm_driver driver = {
*/
.driver_features =
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC,
+ DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
.release = i915_driver_release,
.open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.postclose = i915_driver_postclose,
- .set_busid = drm_pci_set_busid,
.gem_close_object = i915_gem_close_object,
.gem_free_object_unlocked = i915_gem_free_object,
@@ -2683,7 +2757,6 @@ static struct drm_driver driver = {
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_mmap_gtt,
- .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.num_ioctls = ARRAY_SIZE(i915_ioctls),
.fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e1f7c97a338a..bd74641ab7f6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -80,8 +80,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20170619"
-#define DRIVER_TIMESTAMP 1497857498
+#define DRIVER_DATE "20170818"
+#define DRIVER_TIMESTAMP 1503088845
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -122,7 +122,7 @@ static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
return false;
}
-static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val)
+static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
{
uint_fixed_16_16_t fp;
@@ -132,17 +132,17 @@ static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val)
return fp;
}
-static inline uint32_t fixed_16_16_to_u32_round_up(uint_fixed_16_16_t fp)
+static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
{
return DIV_ROUND_UP(fp.val, 1 << 16);
}
-static inline uint32_t fixed_16_16_to_u32(uint_fixed_16_16_t fp)
+static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
{
return fp.val >> 16;
}
-static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1,
+static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
uint_fixed_16_16_t min2)
{
uint_fixed_16_16_t min;
@@ -151,7 +151,7 @@ static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1,
return min;
}
-static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1,
+static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
uint_fixed_16_16_t max2)
{
uint_fixed_16_16_t max;
@@ -160,6 +160,14 @@ static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1,
return max;
}
+static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
+{
+ uint_fixed_16_16_t fp;
+ WARN_ON(val >> 32);
+ fp.val = clamp_t(uint32_t, val, 0, ~0);
+ return fp;
+}
+
static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
uint_fixed_16_16_t d)
{
@@ -170,48 +178,30 @@ static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
uint_fixed_16_16_t mul)
{
uint64_t intermediate_val;
- uint32_t result;
intermediate_val = (uint64_t) val * mul.val;
intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
WARN_ON(intermediate_val >> 32);
- result = clamp_t(uint32_t, intermediate_val, 0, ~0);
- return result;
+ return clamp_t(uint32_t, intermediate_val, 0, ~0);
}
static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
uint_fixed_16_16_t mul)
{
uint64_t intermediate_val;
- uint_fixed_16_16_t fp;
intermediate_val = (uint64_t) val.val * mul.val;
intermediate_val = intermediate_val >> 16;
- WARN_ON(intermediate_val >> 32);
- fp.val = clamp_t(uint32_t, intermediate_val, 0, ~0);
- return fp;
+ return clamp_u64_to_fixed16(intermediate_val);
}
-static inline uint_fixed_16_16_t fixed_16_16_div(uint32_t val, uint32_t d)
+static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
{
- uint_fixed_16_16_t fp, res;
-
- fp = u32_to_fixed_16_16(val);
- res.val = DIV_ROUND_UP(fp.val, d);
- return res;
-}
-
-static inline uint_fixed_16_16_t fixed_16_16_div_u64(uint32_t val, uint32_t d)
-{
- uint_fixed_16_16_t res;
uint64_t interm_val;
interm_val = (uint64_t)val << 16;
interm_val = DIV_ROUND_UP_ULL(interm_val, d);
- WARN_ON(interm_val >> 32);
- res.val = (uint32_t) interm_val;
-
- return res;
+ return clamp_u64_to_fixed16(interm_val);
}
static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
@@ -225,16 +215,32 @@ static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
return clamp_t(uint32_t, interm_val, 0, ~0);
}
-static inline uint_fixed_16_16_t mul_u32_fixed_16_16(uint32_t val,
+static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
uint_fixed_16_16_t mul)
{
uint64_t intermediate_val;
- uint_fixed_16_16_t fp;
intermediate_val = (uint64_t) val * mul.val;
- WARN_ON(intermediate_val >> 32);
- fp.val = (uint32_t) intermediate_val;
- return fp;
+ return clamp_u64_to_fixed16(intermediate_val);
+}
+
+static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
+ uint_fixed_16_16_t add2)
+{
+ uint64_t interm_sum;
+
+ interm_sum = (uint64_t) add1.val + add2.val;
+ return clamp_u64_to_fixed16(interm_sum);
+}
+
+static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
+ uint32_t add2)
+{
+ uint64_t interm_sum;
+ uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
+
+ interm_sum = (uint64_t) add1.val + interm_add2.val;
+ return clamp_u64_to_fixed16(interm_sum);
}
static inline const char *yesno(bool v)
@@ -584,8 +590,7 @@ struct drm_i915_file_private {
struct idr context_idr;
struct intel_rps_client {
- struct list_head link;
- unsigned boosts;
+ atomic_t boosts;
} rps;
unsigned int bsd_engine;
@@ -597,7 +602,7 @@ struct drm_i915_file_private {
* to limit the badly behaving clients access to gpu.
*/
#define I915_MAX_CLIENT_CONTEXT_BANS 3
- int context_bans;
+ atomic_t context_bans;
};
/* Used by dp and fdi links */
@@ -641,6 +646,7 @@ struct intel_opregion {
u32 swsci_sbcb_sub_functions;
struct opregion_asle *asle;
void *rvda;
+ void *vbt_firmware;
const void *vbt;
u32 vbt_size;
u32 *lid_state;
@@ -710,11 +716,6 @@ struct drm_i915_display_funcs {
void (*fdi_link_train)(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void (*init_clock_gating)(struct drm_i915_private *dev_priv);
- int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags);
void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
@@ -753,6 +754,7 @@ struct intel_csr {
func(has_csr); \
func(has_ddi); \
func(has_dp_mst); \
+ func(has_reset_engine); \
func(has_fbc); \
func(has_fpga_dbg); \
func(has_full_ppgtt); \
@@ -917,6 +919,7 @@ struct i915_gpu_state {
enum intel_engine_hangcheck_action hangcheck_action;
struct i915_address_space *vm;
int num_requests;
+ u32 reset_count;
/* position of active request inside the ring */
u32 rq_head, rq_post, rq_tail;
@@ -1056,6 +1059,11 @@ struct intel_fbc {
bool underrun_detected;
struct work_struct underrun_work;
+ /*
+ * Due to the atomic rules we can't access some structures without the
+ * appropriate locking, so we cache information here in order to avoid
+ * these problems.
+ */
struct intel_fbc_state_cache {
struct i915_vma *vma;
@@ -1077,6 +1085,13 @@ struct intel_fbc {
} fb;
} state_cache;
+ /*
+ * This structure contains everything that's relevant to program the
+ * hardware registers. When we want to figure out if we need to disable
+ * and re-enable FBC for a new configuration we just check if there's
+ * something different in the struct. The genx_fbc_activate functions
+ * are supposed to read from it in order to program the registers.
+ */
struct intel_fbc_reg_params {
struct i915_vma *vma;
@@ -1149,11 +1164,11 @@ struct i915_psr {
enum intel_pch {
PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */
- PCH_CPT, /* Cougarpoint PCH */
- PCH_LPT, /* Lynxpoint PCH */
+ PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
+ PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
- PCH_KBP, /* Kabypoint PCH */
- PCH_CNP, /* Cannonpoint PCH */
+ PCH_KBP, /* Kaby Lake PCH */
+ PCH_CNP, /* Cannon Lake PCH */
PCH_NOP,
};
@@ -1166,6 +1181,7 @@ enum intel_sbi_destination {
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
+#define QUIRK_INCREASE_T12_DELAY (1<<6)
struct intel_fbdev;
struct intel_fbc_work;
@@ -1301,13 +1317,10 @@ struct intel_gen6_power_mgmt {
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
- spinlock_t client_lock;
- struct list_head clients;
- bool client_boost;
-
bool enabled;
struct delayed_work autoenable_work;
- unsigned boosts;
+ atomic_t num_waiters;
+ atomic_t boosts;
/* manual wa residency calculations */
struct intel_rps_ei ei;
@@ -1383,12 +1396,23 @@ struct i915_power_well {
bool hw_enabled;
u64 domains;
/* unique identifier for this power well */
- unsigned long id;
+ enum i915_power_well_id id;
/*
* Arbitraty data associated with this power well. Platform and power
* well specific.
*/
- unsigned long data;
+ union {
+ struct {
+ enum dpio_phy phy;
+ } bxt;
+ struct {
+ /* Mask of pipes whose IRQ logic is backed by the pw */
+ u8 irq_pipe_mask;
+ /* The pw is backing the VGA functionality */
+ bool has_vga:1;
+ bool has_fuses:1;
+ } hsw;
+ };
const struct i915_power_well_ops *ops;
};
@@ -1505,6 +1529,8 @@ struct i915_gpu_error {
/* Protected by the above dev->gpu_error.lock. */
struct i915_gpu_state *first_error;
+ atomic_t pending_fb_pin;
+
unsigned long missed_irq_rings;
/**
@@ -1550,6 +1576,12 @@ struct i915_gpu_error {
* inspect the bit and do the reset directly, otherwise the worker
* waits for the struct_mutex.
*
+ * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
+ * acquire the struct_mutex to reset an engine, we need an explicit
+ * flag to prevent two concurrent reset attempts in the same engine.
+ * As the number of engines continues to grow, allocate the flags from
+ * the most significant bits.
+ *
* #I915_WEDGED - If reset fails and we can no longer use the GPU,
* we set the #I915_WEDGED bit. Prior to command submission, e.g.
* i915_gem_request_alloc(), this bit is checked and the sequence
@@ -1558,7 +1590,12 @@ struct i915_gpu_error {
unsigned long flags;
#define I915_RESET_BACKOFF 0
#define I915_RESET_HANDOFF 1
+#define I915_RESET_MODESET 2
#define I915_WEDGED (BITS_PER_LONG - 1)
+#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES)
+
+ /** Number of times an engine has been reset */
+ u32 reset_engine_count[I915_NUM_ENGINES];
/**
* Waitqueue to signal when a hang is detected. Used to for waiters
@@ -1869,6 +1906,7 @@ struct i915_workarounds {
struct i915_virtual_gpu {
bool active;
+ u32 caps;
};
/* used in computing the new watermarks state */
@@ -1888,6 +1926,24 @@ struct i915_oa_reg {
u32 value;
};
+struct i915_oa_config {
+ char uuid[UUID_STRING_LEN + 1];
+ int id;
+
+ const struct i915_oa_reg *mux_regs;
+ u32 mux_regs_len;
+ const struct i915_oa_reg *b_counter_regs;
+ u32 b_counter_regs_len;
+ const struct i915_oa_reg *flex_regs;
+ u32 flex_regs_len;
+
+ struct attribute_group sysfs_metric;
+ struct attribute *attrs[2];
+ struct device_attribute sysfs_metric_id;
+
+ atomic_t ref_count;
+};
+
struct i915_perf_stream;
/**
@@ -2000,6 +2056,11 @@ struct i915_perf_stream {
* type of configured stream.
*/
const struct i915_perf_stream_ops *ops;
+
+ /**
+ * @oa_config: The OA configuration used by the stream.
+ */
+ struct i915_oa_config *oa_config;
};
/**
@@ -2007,6 +2068,25 @@ struct i915_perf_stream {
*/
struct i915_oa_ops {
/**
+ * @is_valid_b_counter_reg: Validates register's address for
+ * programming boolean counters for a particular platform.
+ */
+ bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
+ u32 addr);
+
+ /**
+ * @is_valid_mux_reg: Validates register's address for programming mux
+ * for a particular platform.
+ */
+ bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);
+
+ /**
+ * @is_valid_flex_reg: Validates register's address for programming
+ * flex EU filtering for a particular platform.
+ */
+ bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
+
+ /**
* @init_oa_buffer: Resets the head and tail pointers of the
* circular buffer for periodic OA reports.
*
@@ -2024,20 +2104,13 @@ struct i915_oa_ops {
void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
/**
- * @select_metric_set: The auto generated code that checks whether a
- * requested OA config is applicable to the system and if so sets up
- * the mux, oa and flex eu register config pointers according to the
- * current dev_priv->perf.oa.metrics_set.
- */
- int (*select_metric_set)(struct drm_i915_private *dev_priv);
-
- /**
* @enable_metric_set: Selects and applies any MUX configuration to set
* up the Boolean and Custom (B/C) counters that are part of the
* counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required.
*/
- int (*enable_metric_set)(struct drm_i915_private *dev_priv);
+ int (*enable_metric_set)(struct drm_i915_private *dev_priv,
+ const struct i915_oa_config *oa_config);
/**
* @disable_metric_set: Remove system constraints associated with using
@@ -2083,6 +2156,7 @@ struct drm_i915_private {
struct kmem_cache *objects;
struct kmem_cache *vmas;
+ struct kmem_cache *luts;
struct kmem_cache *requests;
struct kmem_cache *dependencies;
struct kmem_cache *priorities;
@@ -2133,9 +2207,6 @@ struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
- /* protects the mmio flip data */
- spinlock_t mmio_flip_lock;
-
bool display_irqs_enabled;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
@@ -2236,18 +2307,10 @@ struct drm_i915_private {
DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock;
- /* The hw wants to have a stable context identifier for the lifetime
- * of the context (for OA, PASID, faults, etc). This is limited
- * in execlists to 21 bits.
- */
- struct ida context_hw_ida;
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
-
/* Kernel Modesetting */
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
- wait_queue_head_t pending_flip_queue;
#ifdef CONFIG_DEBUG_FS
struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
@@ -2303,11 +2366,9 @@ struct drm_i915_private {
struct drm_i915_gem_object *vlv_pctx;
-#ifdef CONFIG_DRM_FBDEV_EMULATION
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
struct work_struct fbdev_suspend_work;
-#endif
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
@@ -2321,7 +2382,18 @@ struct drm_i915_private {
*/
struct mutex av_mutex;
- struct list_head context_list;
+ struct {
+ struct list_head list;
+ struct llist_head free_list;
+ struct work_struct free_work;
+
+ /* The hw wants to have a stable context identifier for the
+ * lifetime of the context (for OA, PASID, faults, etc).
+ * This is limited in execlists to 21 bits.
+ */
+ struct ida hw_ida;
+#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+ } contexts;
u32 fdi_rx_config;
@@ -2399,10 +2471,32 @@ struct drm_i915_private {
struct kobject *metrics_kobj;
struct ctl_table_header *sysctl_header;
+ /*
+ * Lock associated with adding/modifying/removing OA configs
+ * in dev_priv->perf.metrics_idr.
+ */
+ struct mutex metrics_lock;
+
+ /*
+ * List of dynamic configurations, you need to hold
+ * dev_priv->perf.metrics_lock to access it.
+ */
+ struct idr metrics_idr;
+
+ /*
+ * Lock associated with anything below within this structure
+ * except exclusive_stream.
+ */
struct mutex lock;
struct list_head streams;
struct {
+ /*
+ * The stream currently using the OA unit. If accessed
+ * outside a syscall associated to its file
+ * descriptor, you need to hold
+ * dev_priv->drm.struct_mutex.
+ */
struct i915_perf_stream *exclusive_stream;
u32 specific_ctx_id;
@@ -2421,16 +2515,7 @@ struct drm_i915_private {
int period_exponent;
int timestamp_frequency;
- int metrics_set;
-
- const struct i915_oa_reg *mux_regs[6];
- int mux_regs_lens[6];
- int n_mux_configs;
-
- const struct i915_oa_reg *b_counter_regs;
- int b_counter_regs_len;
- const struct i915_oa_reg *flex_regs;
- int flex_regs_len;
+ struct i915_oa_config test_config;
struct {
struct i915_vma *vma;
@@ -2517,7 +2602,6 @@ struct drm_i915_private {
struct i915_oa_ops ops;
const struct i915_oa_format *oa_formats;
- int n_builtin_sets;
} oa;
} perf;
@@ -2996,16 +3080,17 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
-#define INTEL_PCH_DEVICE_ID_MASK 0xff00
-#define INTEL_PCH_DEVICE_ID_MASK_EXT 0xff80
+#define INTEL_PCH_DEVICE_ID_MASK 0xff80
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
+#define INTEL_PCH_WPT_DEVICE_ID_TYPE 0x8c80
+#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE 0x9c80
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
-#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
@@ -3020,9 +3105,11 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev_priv) \
- ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+ ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
+ (dev_priv)->pch_id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
#define HAS_PCH_LPT_H(dev_priv) \
- ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE)
+ ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
+ (dev_priv)->pch_id == INTEL_PCH_WPT_DEVICE_ID_TYPE)
#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
@@ -3088,7 +3175,13 @@ extern int i915_driver_load(struct pci_dev *pdev,
extern void i915_driver_unload(struct drm_device *dev);
extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
-extern void i915_reset(struct drm_i915_private *dev_priv);
+
+#define I915_RESET_QUIET BIT(0)
+extern void i915_reset(struct drm_i915_private *i915, unsigned int flags);
+extern int i915_reset_engine(struct intel_engine_cs *engine,
+ unsigned int flags);
+
+extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
@@ -3107,7 +3200,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
-bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
+enum port intel_hpd_pin_to_port(enum hpd_pin pin);
+enum hpd_pin intel_hpd_pin(enum port port);
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
@@ -3276,6 +3370,26 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
} while (flush_work(&i915->mm.free_work));
}
+static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
+{
+ /*
+ * Similar to objects above (see i915_gem_drain_freed-objects), in
+ * general we have workers that are armed by RCU and then rearm
+ * themselves in their callbacks. To be paranoid, we need to
+ * drain the workqueue a second time after waiting for the RCU
+ * grace period so that we catch work queued via RCU from the first
+ * pass. As neither drain_workqueue() nor flush_workqueue() report
+ * a result, we make an assumption that we only don't require more
+ * than 2 passes to catch all recursive RCU delayed work.
+ *
+ */
+ int pass = 2;
+ do {
+ rcu_barrier();
+ drain_workqueue(i915->wq);
+ } while (--pass);
+}
+
struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
@@ -3461,11 +3575,22 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
return READ_ONCE(error->reset_count);
}
+static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
+ struct intel_engine_cs *engine)
+{
+ return READ_ONCE(error->reset_engine_count[engine->id]);
+}
+
+struct drm_i915_gem_request *
+i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
void i915_gem_reset(struct drm_i915_private *dev_priv);
+void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
+void i915_gem_reset_engine(struct intel_engine_cs *engine,
+ struct drm_i915_gem_request *request);
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
@@ -3499,7 +3624,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align);
-int i915_gem_open(struct drm_device *dev, struct drm_file *file);
+int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
@@ -3531,38 +3656,23 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
static inline struct i915_gem_context *
-i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
+__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
{
- struct i915_gem_context *ctx;
-
- lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
-
- ctx = idr_find(&file_priv->context_idr, id);
- if (!ctx)
- return ERR_PTR(-ENOENT);
-
- return ctx;
+ return idr_find(&file_priv->context_idr, id);
}
static inline struct i915_gem_context *
-i915_gem_context_get(struct i915_gem_context *ctx)
-{
- kref_get(&ctx->ref);
- return ctx;
-}
-
-static inline void i915_gem_context_put(struct i915_gem_context *ctx)
+i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
{
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- kref_put(&ctx->ref, i915_gem_context_free);
-}
+ struct i915_gem_context *ctx;
-static inline void i915_gem_context_put_unlocked(struct i915_gem_context *ctx)
-{
- struct mutex *lock = &ctx->i915->drm.struct_mutex;
+ rcu_read_lock();
+ ctx = __i915_gem_context_lookup_rcu(file_priv, id);
+ if (ctx && !kref_get_unless_zero(&ctx->ref))
+ ctx = NULL;
+ rcu_read_unlock();
- if (kref_put_mutex(&ctx->ref, i915_gem_context_free, lock))
- mutex_unlock(lock);
+ return ctx;
}
static inline struct intel_timeline *
@@ -3577,6 +3687,10 @@ i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
struct i915_gem_context *ctx,
uint32_t *reg_state);
@@ -3628,6 +3742,7 @@ i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long target,
+ unsigned long *nr_scanned,
unsigned flags);
#define I915_SHRINK_PURGEABLE 0x1
#define I915_SHRINK_UNBOUND 0x2
@@ -4064,6 +4179,11 @@ static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
{
+ /* nsecs_to_jiffies64() does not guard against overflow */
+ if (NSEC_PER_SEC % HZ &&
+ div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
+ return MAX_JIFFY_OFFSET;
+
return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
}
@@ -4210,10 +4330,11 @@ int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
-static inline bool i915_gem_object_is_coherent(struct drm_i915_gem_object *obj)
+static inline bool
+intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
- return (obj->cache_level != I915_CACHE_NONE ||
- HAS_LLC(to_i915(obj->base.dev)));
+ return __intel_engine_can_store_dword(INTEL_GEN(engine->i915),
+ engine->class);
}
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 969bac8404f1..287c6ead95b3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -52,7 +52,7 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
if (obj->cache_dirty)
return false;
- if (!obj->cache_coherent)
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
return true;
return obj->pin_display;
@@ -253,7 +253,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
if (needs_clflush &&
(obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
- !obj->cache_coherent)
+ !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
drm_clflush_sg(pages);
__start_cpu_write(obj);
@@ -388,7 +388,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
*/
if (rps) {
if (INTEL_GEN(rq->i915) >= 6)
- gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
+ gen6_rps_boost(rq, rps);
else
rps = NULL;
}
@@ -399,22 +399,6 @@ out:
if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
i915_gem_request_retire_upto(rq);
- if (rps && i915_gem_request_global_seqno(rq) == intel_engine_last_submit(rq->engine)) {
- /* The GPU is now idle and this client has stalled.
- * Since no other client has submitted a request in the
- * meantime, assume that this client is the only one
- * supplying work to the GPU but is unable to keep that
- * work supplied because it is waiting. Since the GPU is
- * then never kept fully busy, RPS autoclocking will
- * keep the clocks relatively low, causing further delays.
- * Compensate by giving the synchronous client credit for
- * a waitboost next time.
- */
- spin_lock(&rq->i915->rps.client_lock);
- list_del_init(&rps->link);
- spin_unlock(&rq->i915->rps.client_lock);
- }
-
return timeout;
}
@@ -577,46 +561,6 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
return &fpriv->rps;
}
-int
-i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
- int align)
-{
- int ret;
-
- if (align > obj->base.size)
- return -EINVAL;
-
- if (obj->ops == &i915_gem_phys_ops)
- return 0;
-
- if (obj->mm.madv != I915_MADV_WILLNEED)
- return -EFAULT;
-
- if (obj->base.filp == NULL)
- return -EINVAL;
-
- ret = i915_gem_object_unbind(obj);
- if (ret)
- return ret;
-
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- if (obj->mm.pages)
- return -EBUSY;
-
- GEM_BUG_ON(obj->ops != &i915_gem_object_ops);
- obj->ops = &i915_gem_phys_ops;
-
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto err_xfer;
-
- return 0;
-
-err_xfer:
- obj->ops = &i915_gem_object_ops;
- return ret;
-}
-
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -856,7 +800,8 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- if (obj->cache_coherent || !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
+ !static_cpu_has(X86_FEATURE_CLFLUSH)) {
ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret)
goto err_unpin;
@@ -908,7 +853,8 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- if (obj->cache_coherent || !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
+ !static_cpu_has(X86_FEATURE_CLFLUSH)) {
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret)
goto err_unpin;
@@ -2408,7 +2354,7 @@ rebuild_st:
goto err_sg;
}
- i915_gem_shrink(dev_priv, 2 * page_count, *s++);
+ i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
cond_resched();
/* We've tried hard to allocate the memory by reaping
@@ -2756,34 +2702,38 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
return 0;
}
-static bool ban_context(const struct i915_gem_context *ctx)
+static bool ban_context(const struct i915_gem_context *ctx,
+ unsigned int score)
{
return (i915_gem_context_is_bannable(ctx) &&
- ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD);
+ score >= CONTEXT_SCORE_BAN_THRESHOLD);
}
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
{
- ctx->guilty_count++;
- ctx->ban_score += CONTEXT_SCORE_GUILTY;
- if (ban_context(ctx))
- i915_gem_context_set_banned(ctx);
+ unsigned int score;
+ bool banned;
- DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
- ctx->name, ctx->ban_score,
- yesno(i915_gem_context_is_banned(ctx)));
+ atomic_inc(&ctx->guilty_count);
- if (!i915_gem_context_is_banned(ctx) || IS_ERR_OR_NULL(ctx->file_priv))
+ score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
+ banned = ban_context(ctx, score);
+ DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
+ ctx->name, score, yesno(banned));
+ if (!banned)
return;
- ctx->file_priv->context_bans++;
- DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
- ctx->name, ctx->file_priv->context_bans);
+ i915_gem_context_set_banned(ctx);
+ if (!IS_ERR_OR_NULL(ctx->file_priv)) {
+ atomic_inc(&ctx->file_priv->context_bans);
+ DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
+ ctx->name, atomic_read(&ctx->file_priv->context_bans));
+ }
}
static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
{
- ctx->active_count++;
+ atomic_inc(&ctx->active_count);
}
struct drm_i915_gem_request *
@@ -2832,46 +2782,62 @@ static bool engine_stalled(struct intel_engine_cs *engine)
return true;
}
+/*
+ * Ensure irq handler finishes, and not run again.
+ * Also return the active request so that we only search for it once.
+ */
+struct drm_i915_gem_request *
+i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_request *request = NULL;
+
+ /* Prevent the signaler thread from updating the request
+ * state (by calling dma_fence_signal) as we are processing
+ * the reset. The write from the GPU of the seqno is
+ * asynchronous and the signaler thread may see a different
+ * value to us and declare the request complete, even though
+ * the reset routine have picked that request as the active
+ * (incomplete) request. This conflict is not handled
+ * gracefully!
+ */
+ kthread_park(engine->breadcrumbs.signaler);
+
+ /* Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its engine->irq_tasklet *just* as we are
+ * calling engine->init_hw() and also writing the ELSP.
+ * Turning off the engine->irq_tasklet until the reset is over
+ * prevents the race.
+ */
+ tasklet_kill(&engine->irq_tasklet);
+ tasklet_disable(&engine->irq_tasklet);
+
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
+ request = i915_gem_find_active_request(engine);
+ if (request && request->fence.error == -EIO)
+ request = ERR_PTR(-EIO); /* Previous reset failed! */
+
+ return request;
+}
+
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
+ struct drm_i915_gem_request *request;
enum intel_engine_id id;
int err = 0;
- /* Ensure irq handler finishes, and not run again. */
for_each_engine(engine, dev_priv, id) {
- struct drm_i915_gem_request *request;
-
- /* Prevent the signaler thread from updating the request
- * state (by calling dma_fence_signal) as we are processing
- * the reset. The write from the GPU of the seqno is
- * asynchronous and the signaler thread may see a different
- * value to us and declare the request complete, even though
- * the reset routine have picked that request as the active
- * (incomplete) request. This conflict is not handled
- * gracefully!
- */
- kthread_park(engine->breadcrumbs.signaler);
-
- /* Prevent request submission to the hardware until we have
- * completed the reset in i915_gem_reset_finish(). If a request
- * is completed by one engine, it may then queue a request
- * to a second via its engine->irq_tasklet *just* as we are
- * calling engine->init_hw() and also writing the ELSP.
- * Turning off the engine->irq_tasklet until the reset is over
- * prevents the race.
- */
- tasklet_kill(&engine->irq_tasklet);
- tasklet_disable(&engine->irq_tasklet);
-
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
- if (engine_stalled(engine)) {
- request = i915_gem_find_active_request(engine);
- if (request && request->fence.error == -EIO)
- err = -EIO; /* Previous reset failed! */
+ request = i915_gem_reset_prepare_engine(engine);
+ if (IS_ERR(request)) {
+ err = PTR_ERR(request);
+ continue;
}
+
+ engine->hangcheck.active_request = request;
}
i915_gem_revoke_fences(dev_priv);
@@ -2921,12 +2887,11 @@ static void engine_skip_context(struct drm_i915_gem_request *request)
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
-/* Returns true if the request was guilty of hang */
-static bool i915_gem_reset_request(struct drm_i915_gem_request *request)
+/* Returns the request if it was guilty of the hang */
+static struct drm_i915_gem_request *
+i915_gem_reset_request(struct intel_engine_cs *engine,
+ struct drm_i915_gem_request *request)
{
- /* Read once and return the resolution */
- const bool guilty = engine_stalled(request->engine);
-
/* The guilty request will get skipped on a hung engine.
*
* Users of client default contexts do not rely on logical
@@ -2948,29 +2913,47 @@ static bool i915_gem_reset_request(struct drm_i915_gem_request *request)
* subsequent hangs.
*/
- if (guilty) {
+ if (engine_stalled(engine)) {
i915_gem_context_mark_guilty(request->ctx);
skip_request(request);
+
+ /* If this context is now banned, skip all pending requests. */
+ if (i915_gem_context_is_banned(request->ctx))
+ engine_skip_context(request);
} else {
- i915_gem_context_mark_innocent(request->ctx);
- dma_fence_set_error(&request->fence, -EAGAIN);
+ /*
+ * Since this is not the hung engine, it may have advanced
+ * since the hang declaration. Double check by refinding
+ * the active request at the time of the reset.
+ */
+ request = i915_gem_find_active_request(engine);
+ if (request) {
+ i915_gem_context_mark_innocent(request->ctx);
+ dma_fence_set_error(&request->fence, -EAGAIN);
+
+ /* Rewind the engine to replay the incomplete rq */
+ spin_lock_irq(&engine->timeline->lock);
+ request = list_prev_entry(request, link);
+ if (&request->link == &engine->timeline->requests)
+ request = NULL;
+ spin_unlock_irq(&engine->timeline->lock);
+ }
}
- return guilty;
+ return request;
}
-static void i915_gem_reset_engine(struct intel_engine_cs *engine)
+void i915_gem_reset_engine(struct intel_engine_cs *engine,
+ struct drm_i915_gem_request *request)
{
- struct drm_i915_gem_request *request;
+ engine->irq_posted = 0;
- request = i915_gem_find_active_request(engine);
- if (request && i915_gem_reset_request(request)) {
+ if (request)
+ request = i915_gem_reset_request(engine, request);
+
+ if (request) {
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
engine->name, request->global_seqno);
-
- /* If this context is now banned, skip all pending requests. */
- if (i915_gem_context_is_banned(request->ctx))
- engine_skip_context(request);
}
/* Setup the CS to resume from the breadcrumb of the hung request */
@@ -2989,7 +2972,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) {
struct i915_gem_context *ctx;
- i915_gem_reset_engine(engine);
+ i915_gem_reset_engine(engine, engine->hangcheck.active_request);
ctx = fetch_and_zero(&engine->last_retired_context);
if (ctx)
engine->context_unpin(engine, ctx);
@@ -3005,6 +2988,12 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
}
}
+void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
+{
+ tasklet_enable(&engine->irq_tasklet);
+ kthread_unpark(engine->breadcrumbs.signaler);
+}
+
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
@@ -3013,13 +3002,14 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
for_each_engine(engine, dev_priv, id) {
- tasklet_enable(&engine->irq_tasklet);
- kthread_unpark(engine->breadcrumbs.signaler);
+ engine->hangcheck.active_request = NULL;
+ i915_gem_reset_finish_engine(engine);
}
}
static void nop_submit_request(struct drm_i915_gem_request *request)
{
+ GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
dma_fence_set_error(&request->fence, -EIO);
i915_gem_request_submit(request);
intel_engine_init_global_seqno(request->engine, request->global_seqno);
@@ -3041,16 +3031,10 @@ static void engine_set_wedged(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped */
spin_lock_irqsave(&engine->timeline->lock, flags);
list_for_each_entry(request, &engine->timeline->requests, link)
- dma_fence_set_error(&request->fence, -EIO);
+ if (!i915_gem_request_completed(request))
+ dma_fence_set_error(&request->fence, -EIO);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
- /* Mark all pending requests as complete so that any concurrent
- * (lockless) lookup doesn't try and wait upon the request as we
- * reset it.
- */
- intel_engine_init_global_seqno(engine,
- intel_engine_last_submit(engine));
-
/*
* Clear the execlists queue up before freeing the requests, as those
* are the ones that keep the context and ringbuffer backing objects
@@ -3071,7 +3055,21 @@ static void engine_set_wedged(struct intel_engine_cs *engine)
engine->execlist_first = NULL;
spin_unlock_irqrestore(&engine->timeline->lock, flags);
+
+ /* The port is checked prior to scheduling a tasklet, but
+ * just in case we have suspended the tasklet to do the
+ * wedging make sure that when it wakes, it decides there
+ * is no work to do by clearing the irq_posted bit.
+ */
+ clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
}
+
+ /* Mark all pending requests as complete so that any concurrent
+ * (lockless) lookup doesn't try and wait upon the request as we
+ * reset it.
+ */
+ intel_engine_init_global_seqno(engine,
+ intel_engine_last_submit(engine));
}
static int __i915_gem_set_wedged_BKL(void *data)
@@ -3083,25 +3081,15 @@ static int __i915_gem_set_wedged_BKL(void *data)
for_each_engine(engine, i915, id)
engine_set_wedged(engine);
+ set_bit(I915_WEDGED, &i915->gpu_error.flags);
+ wake_up_all(&i915->gpu_error.reset_queue);
+
return 0;
}
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
{
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
- set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
-
- /* Retire completed requests first so the list of inflight/incomplete
- * requests is accurate and we don't try and mark successful requests
- * as in error during __i915_gem_set_wedged_BKL().
- */
- i915_gem_retire_requests(dev_priv);
-
stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
-
- i915_gem_context_lost(dev_priv);
-
- mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
}
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
@@ -3156,6 +3144,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
* context and do not require stop_machine().
*/
intel_engines_reset_default_submission(i915);
+ i915_gem_contexts_lost(i915);
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
clear_bit(I915_WEDGED, &i915->gpu_error.flags);
@@ -3253,25 +3242,33 @@ out_rearm:
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
+ struct drm_i915_private *i915 = to_i915(gem->dev);
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
- struct i915_vma *vma, *vn;
+ struct i915_lut_handle *lut, *ln;
- mutex_lock(&obj->base.dev->struct_mutex);
- list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
- if (vma->vm->file == fpriv)
+ mutex_lock(&i915->drm.struct_mutex);
+
+ list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
+ struct i915_gem_context *ctx = lut->ctx;
+ struct i915_vma *vma;
+
+ if (ctx->file_priv != fpriv)
+ continue;
+
+ vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
+
+ if (!i915_vma_is_ggtt(vma))
i915_vma_close(vma);
- vma = obj->vma_hashed;
- if (vma && vma->ctx->file_priv == fpriv)
- i915_vma_unlink_ctx(vma);
+ list_del(&lut->obj_link);
+ list_del(&lut->ctx_link);
- if (i915_gem_object_is_active(obj) &&
- !i915_gem_object_has_active_reference(obj)) {
- i915_gem_object_set_active_reference(obj);
- i915_gem_object_get(obj);
+ kmem_cache_free(i915->luts, lut);
+ __i915_gem_object_release_unless_active(obj);
}
- mutex_unlock(&obj->base.dev->struct_mutex);
+
+ mutex_unlock(&i915->drm.struct_mutex);
}
static unsigned long to_wait_timeout(s64 timeout_ns)
@@ -3297,7 +3294,7 @@ static unsigned long to_wait_timeout(s64 timeout_ns)
* -ERESTARTSYS: signal interrupted the wait
* -ENONENT: object doesn't exist
* Also possible, but rare:
- * -EAGAIN: GPU wedged
+ * -EAGAIN: incomplete, restart syscall
* -ENOMEM: damn
* -ENODEV: Internal IRQ fail
* -E?: The add request failed
@@ -3345,6 +3342,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
*/
if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
args->timeout_ns = 0;
+
+ /* Asked to wait beyond the jiffie/scheduler precision? */
+ if (ret == -ETIME && args->timeout_ns)
+ ret = -EAGAIN;
}
i915_gem_object_put(obj);
@@ -3686,8 +3687,7 @@ restart:
list_for_each_entry(vma, &obj->vma_list, obj_link)
vma->node.color = cache_level;
- obj->cache_level = cache_level;
- obj->cache_coherent = i915_gem_object_is_coherent(obj);
+ i915_gem_object_set_cache_coherency(obj, cache_level);
obj->cache_dirty = true; /* Always invalidate stale cachelines */
return 0;
@@ -4260,6 +4260,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->global_link);
INIT_LIST_HEAD(&obj->userfault_link);
INIT_LIST_HEAD(&obj->vma_list);
+ INIT_LIST_HEAD(&obj->lut_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
obj->ops = ops;
@@ -4292,6 +4293,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
{
struct drm_i915_gem_object *obj;
struct address_space *mapping;
+ unsigned int cache_level;
gfp_t mask;
int ret;
@@ -4330,7 +4332,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- if (HAS_LLC(dev_priv)) {
+ if (HAS_LLC(dev_priv))
/* On some devices, we can have the GPU use the LLC (the CPU
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
@@ -4343,12 +4345,11 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
* However, we maintain the display planes as UC, and so
* need to rebind when first used as such.
*/
- obj->cache_level = I915_CACHE_LLC;
- } else
- obj->cache_level = I915_CACHE_NONE;
+ cache_level = I915_CACHE_LLC;
+ else
+ cache_level = I915_CACHE_NONE;
- obj->cache_coherent = i915_gem_object_is_coherent(obj);
- obj->cache_dirty = !obj->cache_coherent;
+ i915_gem_object_set_cache_coherency(obj, cache_level);
trace_i915_gem_object_create(obj);
@@ -4503,8 +4504,8 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
- GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
- if (i915_gem_object_is_active(obj))
+ if (!i915_gem_object_has_active_reference(obj) &&
+ i915_gem_object_is_active(obj))
i915_gem_object_set_active_reference(obj);
else
i915_gem_object_put(obj);
@@ -4565,7 +4566,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
goto err_unlock;
assert_kernel_context_is_current(dev_priv);
- i915_gem_context_lost(dev_priv);
+ i915_gem_contexts_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
intel_guc_suspend(dev_priv);
@@ -4579,8 +4580,6 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
while (flush_delayed_work(&dev_priv->gt.idle_work))
;
- i915_gem_drain_freed_objects(dev_priv);
-
/* Assert that we sucessfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
@@ -4812,7 +4811,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
goto out_unlock;
- ret = i915_gem_context_init(dev_priv);
+ ret = i915_gem_contexts_init(dev_priv);
if (ret)
goto out_unlock;
@@ -4898,12 +4897,16 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
if (!dev_priv->vmas)
goto err_objects;
+ dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
+ if (!dev_priv->luts)
+ goto err_vmas;
+
dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
SLAB_TYPESAFE_BY_RCU);
if (!dev_priv->requests)
- goto err_vmas;
+ goto err_luts;
dev_priv->dependencies = KMEM_CACHE(i915_dependency,
SLAB_HWCACHE_ALIGN |
@@ -4922,7 +4925,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
if (err)
goto err_priorities;
- INIT_LIST_HEAD(&dev_priv->context_list);
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
init_llist_head(&dev_priv->mm.free_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
@@ -4936,8 +4938,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
- init_waitqueue_head(&dev_priv->pending_flip_queue);
-
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
spin_lock_init(&dev_priv->fb_tracking.lock);
@@ -4950,6 +4950,8 @@ err_dependencies:
kmem_cache_destroy(dev_priv->dependencies);
err_requests:
kmem_cache_destroy(dev_priv->requests);
+err_luts:
+ kmem_cache_destroy(dev_priv->luts);
err_vmas:
kmem_cache_destroy(dev_priv->vmas);
err_objects:
@@ -4972,6 +4974,7 @@ void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
kmem_cache_destroy(dev_priv->priorities);
kmem_cache_destroy(dev_priv->dependencies);
kmem_cache_destroy(dev_priv->requests);
+ kmem_cache_destroy(dev_priv->luts);
kmem_cache_destroy(dev_priv->vmas);
kmem_cache_destroy(dev_priv->objects);
@@ -5012,7 +5015,7 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
* the objects as well, see i915_gem_freeze()
*/
- i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
+ i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
i915_gem_drain_freed_objects(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -5038,15 +5041,9 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
list_for_each_entry(request, &file_priv->mm.request_list, client_link)
request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
-
- if (!list_empty(&file_priv->rps.link)) {
- spin_lock(&to_i915(dev)->rps.client_lock);
- list_del(&file_priv->rps.link);
- spin_unlock(&to_i915(dev)->rps.client_lock);
- }
}
-int i915_gem_open(struct drm_device *dev, struct drm_file *file)
+int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
int ret;
@@ -5058,16 +5055,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
return -ENOMEM;
file->driver_priv = file_priv;
- file_priv->dev_priv = to_i915(dev);
+ file_priv->dev_priv = i915;
file_priv->file = file;
- INIT_LIST_HEAD(&file_priv->rps.link);
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
file_priv->bsd_engine = -1;
- ret = i915_gem_context_open(dev, file);
+ ret = i915_gem_context_open(i915, file);
if (ret)
kfree(file_priv);
@@ -5311,6 +5307,64 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}
+int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
+{
+ struct sg_table *pages;
+ int err;
+
+ if (align > obj->base.size)
+ return -EINVAL;
+
+ if (obj->ops == &i915_gem_phys_ops)
+ return 0;
+
+ if (obj->ops != &i915_gem_object_ops)
+ return -EINVAL;
+
+ err = i915_gem_object_unbind(obj);
+ if (err)
+ return err;
+
+ mutex_lock(&obj->mm.lock);
+
+ if (obj->mm.madv != I915_MADV_WILLNEED) {
+ err = -EFAULT;
+ goto err_unlock;
+ }
+
+ if (obj->mm.quirked) {
+ err = -EFAULT;
+ goto err_unlock;
+ }
+
+ if (obj->mm.mapping) {
+ err = -EBUSY;
+ goto err_unlock;
+ }
+
+ pages = obj->mm.pages;
+ obj->ops = &i915_gem_phys_ops;
+
+ err = ____i915_gem_object_get_pages(obj);
+ if (err)
+ goto err_xfer;
+
+ /* Perma-pin (until release) the physical set of pages */
+ __i915_gem_object_pin_pages(obj);
+
+ if (!IS_ERR_OR_NULL(pages))
+ i915_gem_object_ops.put_pages(obj, pages);
+ mutex_unlock(&obj->mm.lock);
+ return 0;
+
+err_xfer:
+ obj->ops = &i915_gem_object_ops;
+ obj->mm.pages = pages;
+err_unlock:
+ mutex_unlock(&obj->mm.lock);
+ return err;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/scatterlist.c"
#include "selftests/mock_gem_device.c"
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index 152f16c11878..8a04d33055be 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -114,7 +114,7 @@ i915_clflush_notify(struct i915_sw_fence *fence,
return NOTIFY_DONE;
}
-void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
unsigned int flags)
{
struct clflush *clflush;
@@ -128,7 +128,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
*/
if (!i915_gem_object_has_struct_page(obj)) {
obj->cache_dirty = false;
- return;
+ return false;
}
/* If the GPU is snooping the contents of the CPU cache,
@@ -139,8 +139,9 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
* snooping behaviour occurs naturally as the result of our domain
* tracking.
*/
- if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
- return;
+ if (!(flags & I915_CLFLUSH_FORCE) &&
+ obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
+ return false;
trace_i915_gem_object_clflush(obj);
@@ -179,4 +180,5 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
}
obj->cache_dirty = false;
+ return true;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.h b/drivers/gpu/drm/i915/i915_gem_clflush.h
index 2455a7820937..f390247561b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.h
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.h
@@ -28,7 +28,7 @@
struct drm_i915_private;
struct drm_i915_gem_object;
-void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
unsigned int flags);
#define I915_CLFLUSH_FORCE BIT(0)
#define I915_CLFLUSH_SYNC BIT(1)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 39ed58a21fc1..58a2a44f88bd 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -93,81 +93,37 @@
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
-/* Initial size (as log2) to preallocate the handle->object hashtable */
-#define VMA_HT_BITS 2u /* 4 x 2 pointers, 64 bytes minimum */
-
-static void resize_vma_ht(struct work_struct *work)
+static void lut_close(struct i915_gem_context *ctx)
{
- struct i915_gem_context_vma_lut *lut =
- container_of(work, typeof(*lut), resize);
- unsigned int bits, new_bits, size, i;
- struct hlist_head *new_ht;
-
- GEM_BUG_ON(!(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS));
-
- bits = 1 + ilog2(4*lut->ht_count/3 + 1);
- new_bits = min_t(unsigned int,
- max(bits, VMA_HT_BITS),
- sizeof(unsigned int) * BITS_PER_BYTE - 1);
- if (new_bits == lut->ht_bits)
- goto out;
-
- new_ht = kzalloc(sizeof(*new_ht)<<new_bits, GFP_KERNEL | __GFP_NOWARN);
- if (!new_ht)
- new_ht = vzalloc(sizeof(*new_ht)<<new_bits);
- if (!new_ht)
- /* Pretend resize succeeded and stop calling us for a bit! */
- goto out;
+ struct i915_lut_handle *lut, *ln;
+ struct radix_tree_iter iter;
+ void __rcu **slot;
- size = BIT(lut->ht_bits);
- for (i = 0; i < size; i++) {
- struct i915_vma *vma;
- struct hlist_node *tmp;
-
- hlist_for_each_entry_safe(vma, tmp, &lut->ht[i], ctx_node)
- hlist_add_head(&vma->ctx_node,
- &new_ht[hash_32(vma->ctx_handle,
- new_bits)]);
+ list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
+ list_del(&lut->obj_link);
+ kmem_cache_free(ctx->i915->luts, lut);
}
- kvfree(lut->ht);
- lut->ht = new_ht;
- lut->ht_bits = new_bits;
-out:
- smp_store_release(&lut->ht_size, BIT(bits));
- GEM_BUG_ON(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS);
-}
-static void vma_lut_free(struct i915_gem_context *ctx)
-{
- struct i915_gem_context_vma_lut *lut = &ctx->vma_lut;
- unsigned int i, size;
+ radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
+ struct i915_vma *vma = rcu_dereference_raw(*slot);
+ struct drm_i915_gem_object *obj = vma->obj;
- if (lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS)
- cancel_work_sync(&lut->resize);
+ radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
- size = BIT(lut->ht_bits);
- for (i = 0; i < size; i++) {
- struct i915_vma *vma;
+ if (!i915_vma_is_ggtt(vma))
+ i915_vma_close(vma);
- hlist_for_each_entry(vma, &lut->ht[i], ctx_node) {
- vma->obj->vma_hashed = NULL;
- vma->ctx = NULL;
- i915_vma_put(vma);
- }
+ __i915_gem_object_release_unless_active(obj);
}
- kvfree(lut->ht);
}
-void i915_gem_context_free(struct kref *ctx_ref)
+static void i915_gem_context_free(struct i915_gem_context *ctx)
{
- struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
int i;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
- vma_lut_free(ctx);
i915_ppgtt_put(ctx->ppgtt);
for (i = 0; i < I915_NUM_ENGINES; i++) {
@@ -188,15 +144,64 @@ void i915_gem_context_free(struct kref *ctx_ref)
list_del(&ctx->link);
- ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
- kfree(ctx);
+ ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
+ kfree_rcu(ctx, rcu);
+}
+
+static void contexts_free(struct drm_i915_private *i915)
+{
+ struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
+ struct i915_gem_context *ctx, *cn;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ llist_for_each_entry_safe(ctx, cn, freed, free_link)
+ i915_gem_context_free(ctx);
+}
+
+static void contexts_free_first(struct drm_i915_private *i915)
+{
+ struct i915_gem_context *ctx;
+ struct llist_node *freed;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ freed = llist_del_first(&i915->contexts.free_list);
+ if (!freed)
+ return;
+
+ ctx = container_of(freed, typeof(*ctx), free_link);
+ i915_gem_context_free(ctx);
+}
+
+static void contexts_free_worker(struct work_struct *work)
+{
+ struct drm_i915_private *i915 =
+ container_of(work, typeof(*i915), contexts.free_work);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ contexts_free(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+}
+
+void i915_gem_context_release(struct kref *ref)
+{
+ struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
+ struct drm_i915_private *i915 = ctx->i915;
+
+ trace_i915_context_free(ctx);
+ if (llist_add(&ctx->free_link, &i915->contexts.free_list))
+ queue_work(i915->wq, &i915->contexts.free_work);
}
static void context_close(struct i915_gem_context *ctx)
{
i915_gem_context_set_closed(ctx);
+
+ lut_close(ctx);
if (ctx->ppgtt)
i915_ppgtt_close(&ctx->ppgtt->base);
+
ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_put(ctx);
}
@@ -205,7 +210,7 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
int ret;
- ret = ida_simple_get(&dev_priv->context_hw_ida,
+ ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0) {
/* Contexts are only released when no longer active.
@@ -213,7 +218,7 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
* stale contexts and try again.
*/
i915_gem_retire_requests(dev_priv);
- ret = ida_simple_get(&dev_priv->context_hw_ida,
+ ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0)
return ret;
@@ -265,20 +270,12 @@ __create_hw_context(struct drm_i915_private *dev_priv,
}
kref_init(&ctx->ref);
- list_add_tail(&ctx->link, &dev_priv->context_list);
+ list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv;
ctx->priority = I915_PRIORITY_NORMAL;
- ctx->vma_lut.ht_bits = VMA_HT_BITS;
- ctx->vma_lut.ht_size = BIT(VMA_HT_BITS);
- BUILD_BUG_ON(BIT(VMA_HT_BITS) == I915_CTX_RESIZE_IN_PROGRESS);
- ctx->vma_lut.ht = kcalloc(ctx->vma_lut.ht_size,
- sizeof(*ctx->vma_lut.ht),
- GFP_KERNEL);
- if (!ctx->vma_lut.ht)
- goto err_out;
-
- INIT_WORK(&ctx->vma_lut.resize, resize_vma_ht);
+ INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
+ INIT_LIST_HEAD(&ctx->handles_list);
/* Default context will never have a file_priv */
ret = DEFAULT_CONTEXT_HANDLE;
@@ -328,8 +325,6 @@ err_pid:
put_pid(ctx->pid);
idr_remove(&file_priv->context_idr, ctx->user_handle);
err_lut:
- kvfree(ctx->vma_lut.ht);
-err_out:
context_close(ctx);
return ERR_PTR(ret);
}
@@ -354,6 +349,9 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ /* Reap the most stale context */
+ contexts_free_first(dev_priv);
+
ctx = __create_hw_context(dev_priv, file_priv);
if (IS_ERR(ctx))
return ctx;
@@ -418,7 +416,7 @@ out:
return ctx;
}
-int i915_gem_context_init(struct drm_i915_private *dev_priv)
+int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
@@ -427,6 +425,10 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
if (WARN_ON(dev_priv->kernel_context))
return 0;
+ INIT_LIST_HEAD(&dev_priv->contexts.list);
+ INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
+ init_llist_head(&dev_priv->contexts.free_list);
+
if (intel_vgpu_active(dev_priv) &&
HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (!i915.enable_execlists) {
@@ -437,7 +439,7 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
/* Using the simple ida interface, the max is limited by sizeof(int) */
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
- ida_init(&dev_priv->context_hw_ida);
+ ida_init(&dev_priv->contexts.hw_ida);
ctx = i915_gem_create_context(dev_priv, NULL);
if (IS_ERR(ctx)) {
@@ -463,7 +465,7 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
return 0;
}
-void i915_gem_context_lost(struct drm_i915_private *dev_priv)
+void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -484,7 +486,7 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
if (!i915.enable_execlists) {
struct i915_gem_context *ctx;
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
if (!i915_gem_context_is_default(ctx))
continue;
@@ -503,18 +505,20 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
}
}
-void i915_gem_context_fini(struct drm_i915_private *dev_priv)
+void i915_gem_contexts_fini(struct drm_i915_private *i915)
{
- struct i915_gem_context *dctx = dev_priv->kernel_context;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ struct i915_gem_context *ctx;
- GEM_BUG_ON(!i915_gem_context_is_kernel(dctx));
+ lockdep_assert_held(&i915->drm.struct_mutex);
- context_close(dctx);
- dev_priv->kernel_context = NULL;
+ /* Keep the context so that we can free it immediately ourselves */
+ ctx = i915_gem_context_get(fetch_and_zero(&i915->kernel_context));
+ GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+ context_close(ctx);
+ i915_gem_context_free(ctx);
- ida_destroy(&dev_priv->context_hw_ida);
+ /* Must free all deferred contexts (via flush_workqueue) first */
+ ida_destroy(&i915->contexts.hw_ida);
}
static int context_idr_cleanup(int id, void *p, void *data)
@@ -525,32 +529,32 @@ static int context_idr_cleanup(int id, void *p, void *data)
return 0;
}
-int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
+int i915_gem_context_open(struct drm_i915_private *i915,
+ struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_gem_context *ctx;
idr_init(&file_priv->context_idr);
- mutex_lock(&dev->struct_mutex);
- ctx = i915_gem_create_context(to_i915(dev), file_priv);
- mutex_unlock(&dev->struct_mutex);
-
- GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
-
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = i915_gem_create_context(i915, file_priv);
+ mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
idr_destroy(&file_priv->context_idr);
return PTR_ERR(ctx);
}
+ GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
+
return 0;
}
-void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
+void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- lockdep_assert_held(&dev->struct_mutex);
+ lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
@@ -688,19 +692,19 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
}
static bool
-needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *engine,
- struct i915_gem_context *to)
+needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine)
{
+ struct i915_gem_context *from = engine->legacy_active_context;
+
if (!ppgtt)
return false;
/* Always load the ppgtt on first use */
- if (!engine->legacy_active_context)
+ if (!from)
return true;
/* Same context without new entries, skip */
- if (engine->legacy_active_context == to &&
+ if ((!from->ppgtt || from->ppgtt == ppgtt) &&
!(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
@@ -744,7 +748,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (skip_rcs_switch(ppgtt, engine, to))
return 0;
- if (needs_pd_load_pre(ppgtt, engine, to)) {
+ if (needs_pd_load_pre(ppgtt, engine)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting
@@ -841,7 +845,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
struct i915_hw_ppgtt *ppgtt =
to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
- if (needs_pd_load_pre(ppgtt, engine, to)) {
+ if (needs_pd_load_pre(ppgtt, engine)) {
int ret;
trace_switch_mm(engine, to);
@@ -852,6 +856,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
+ engine->legacy_active_context = to;
return 0;
}
@@ -924,7 +929,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
static bool client_is_banned(struct drm_i915_file_private *file_priv)
{
- return file_priv->context_bans > I915_MAX_CLIENT_CONTEXT_BANS;
+ return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS;
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -981,20 +986,19 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
return -ENOENT;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
- if (IS_ERR(ctx)) {
- mutex_unlock(&dev->struct_mutex);
- return PTR_ERR(ctx);
- }
+ if (!ctx)
+ return -ENOENT;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ goto out;
__destroy_hw_context(ctx, file_priv);
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("HW context %d destroyed\n", args->ctx_id);
+out:
+ i915_gem_context_put(ctx);
return 0;
}
@@ -1004,17 +1008,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
struct i915_gem_context *ctx;
- int ret;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ int ret = 0;
ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
- if (IS_ERR(ctx)) {
- mutex_unlock(&dev->struct_mutex);
- return PTR_ERR(ctx);
- }
+ if (!ctx)
+ return -ENOENT;
args->size = 0;
switch (args->param) {
@@ -1042,8 +1040,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
break;
}
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_context_put(ctx);
return ret;
}
@@ -1055,15 +1053,13 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
struct i915_gem_context *ctx;
int ret;
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
+ if (!ctx)
+ return -ENOENT;
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- return ret;
-
- ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
- if (IS_ERR(ctx)) {
- mutex_unlock(&dev->struct_mutex);
- return PTR_ERR(ctx);
- }
+ goto out;
switch (args->param) {
case I915_CONTEXT_PARAM_BAN_PERIOD:
@@ -1101,6 +1097,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
}
mutex_unlock(&dev->struct_mutex);
+out:
+ i915_gem_context_put(ctx);
return ret;
}
@@ -1115,27 +1113,31 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
if (args->flags || args->pad)
return -EINVAL;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ ret = -ENOENT;
+ rcu_read_lock();
+ ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
+ if (!ctx)
+ goto out;
- ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
- if (IS_ERR(ctx)) {
- mutex_unlock(&dev->struct_mutex);
- return PTR_ERR(ctx);
- }
+ /*
+ * We opt for unserialised reads here. This may result in tearing
+ * in the extremely unlikely event of a GPU hang on this context
+ * as we are querying them. If we need that extra layer of protection,
+ * we should wrap the hangstats with a seqlock.
+ */
if (capable(CAP_SYS_ADMIN))
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
else
args->reset_count = 0;
- args->batch_active = ctx->guilty_count;
- args->batch_pending = ctx->active_count;
-
- mutex_unlock(&dev->struct_mutex);
+ args->batch_active = atomic_read(&ctx->guilty_count);
+ args->batch_pending = atomic_read(&ctx->active_count);
- return 0;
+ ret = 0;
+out:
+ rcu_read_unlock();
+ return ret;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 82c99ba92ad3..44688e22a5c2 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -27,6 +27,7 @@
#include <linux/bitops.h>
#include <linux/list.h>
+#include <linux/radix-tree.h>
struct pid;
@@ -86,6 +87,7 @@ struct i915_gem_context {
/** link: place with &drm_i915_private.context_list */
struct list_head link;
+ struct llist_node free_link;
/**
* @ref: reference count
@@ -99,6 +101,11 @@ struct i915_gem_context {
struct kref ref;
/**
+ * @rcu: rcu_head for deferred freeing.
+ */
+ struct rcu_head rcu;
+
+ /**
* @flags: small set of booleans
*/
unsigned long flags;
@@ -143,32 +150,6 @@ struct i915_gem_context {
/** ggtt_offset_bias: placement restriction for context objects */
u32 ggtt_offset_bias;
- struct i915_gem_context_vma_lut {
- /** ht_size: last request size to allocate the hashtable for. */
- unsigned int ht_size;
-#define I915_CTX_RESIZE_IN_PROGRESS BIT(0)
- /** ht_bits: real log2(size) of hashtable. */
- unsigned int ht_bits;
- /** ht_count: current number of entries inside the hashtable */
- unsigned int ht_count;
-
- /** ht: the array of buckets comprising the simple hashtable */
- struct hlist_head *ht;
-
- /**
- * resize: After an execbuf completes, we check the load factor
- * of the hashtable. If the hashtable is too full, or too empty,
- * we schedule a task to resize the hashtable. During the
- * resize, the entries are moved between different buckets and
- * so we cannot simultaneously read the hashtable as it is
- * being resized (unlike rhashtable). Therefore we treat the
- * active work as a strong barrier, pausing a subsequent
- * execbuf to wait for the resize worker to complete, if
- * required.
- */
- struct work_struct resize;
- } vma_lut;
-
/** engine: per-engine logical HW state */
struct intel_context {
struct i915_vma *state;
@@ -185,20 +166,32 @@ struct i915_gem_context {
u32 desc_template;
/** guilty_count: How many times this context has caused a GPU hang. */
- unsigned int guilty_count;
+ atomic_t guilty_count;
/**
* @active_count: How many times this context was active during a GPU
* hang, but did not cause it.
*/
- unsigned int active_count;
+ atomic_t active_count;
#define CONTEXT_SCORE_GUILTY 10
#define CONTEXT_SCORE_BAN_THRESHOLD 40
/** ban_score: Accumulated score of all hangs caused by this context. */
- int ban_score;
+ atomic_t ban_score;
/** remap_slice: Bitmask of cache lines that need remapping */
u8 remap_slice;
+
+ /** handles_vma: rbtree to look up our context specific obj/vma for
+ * the user handle. (user handles are per fd, but the binding is
+ * per vm, which may be one per context or shared with the global GTT)
+ */
+ struct radix_tree_root handles_vma;
+
+ /** handles_list: reverse list of all the rbtree entries in use for
+ * this context, which allows us to free all the allocations on
+ * context close.
+ */
+ struct list_head handles_list;
};
static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
@@ -273,14 +266,18 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
}
/* i915_gem_context.c */
-int __must_check i915_gem_context_init(struct drm_i915_private *dev_priv);
-void i915_gem_context_lost(struct drm_i915_private *dev_priv);
-void i915_gem_context_fini(struct drm_i915_private *dev_priv);
-int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
+int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
+void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
+void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
+
+int i915_gem_context_open(struct drm_i915_private *i915,
+ struct drm_file *file);
+void i915_gem_context_close(struct drm_file *file);
+
int i915_switch_context(struct drm_i915_gem_request *req);
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
-void i915_gem_context_free(struct kref *ctx_ref);
+
+void i915_gem_context_release(struct kref *ctx_ref);
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev);
@@ -295,4 +292,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+static inline struct i915_gem_context *
+i915_gem_context_get(struct i915_gem_context *ctx)
+{
+ kref_get(&ctx->ref);
+ return ctx;
+}
+
+static inline void i915_gem_context_put(struct i915_gem_context *ctx)
+{
+ kref_put(&ctx->ref, i915_gem_context_release);
+}
+
#endif /* !__I915_GEM_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index a193f1b36c67..4df039ef2ce3 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -318,8 +318,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
/* Overlap of objects in the same batch? */
if (i915_vma_is_pinned(vma)) {
ret = -ENOSPC;
- if (vma->exec_entry &&
- vma->exec_entry->flags & EXEC_OBJECT_PINNED)
+ if (vma->exec_flags &&
+ *vma->exec_flags & EXEC_OBJECT_PINNED)
ret = -EINVAL;
break;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 054b2e54cdaf..4c2016237d61 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -32,6 +32,7 @@
#include <linux/uaccess.h>
#include <drm/drmP.h>
+#include <drm/drm_syncobj.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -191,6 +192,8 @@ struct i915_execbuffer {
struct drm_file *file; /** per-file lookup tables and limits */
struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
+ struct i915_vma **vma;
+ unsigned int *flags;
struct intel_engine_cs *engine; /** engine to queue the request to */
struct i915_gem_context *ctx; /** context for building the request */
@@ -244,13 +247,7 @@ struct i915_execbuffer {
struct hlist_head *buckets; /** ht for relocation handles */
};
-/*
- * As an alternative to creating a hashtable of handle-to-vma for a batch,
- * we used the last available reserved field in the execobject[] and stash
- * a link from the execobj to its vma.
- */
-#define __exec_to_vma(ee) (ee)->rsvd2
-#define exec_to_vma(ee) u64_to_ptr(struct i915_vma, __exec_to_vma(ee))
+#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
/*
* Used to convert any address to canonical form.
@@ -319,85 +316,82 @@ static int eb_create(struct i915_execbuffer *eb)
static bool
eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
- const struct i915_vma *vma)
+ const struct i915_vma *vma,
+ unsigned int flags)
{
- if (!(entry->flags & __EXEC_OBJECT_HAS_PIN))
- return true;
-
if (vma->node.size < entry->pad_to_size)
return true;
if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
return true;
- if (entry->flags & EXEC_OBJECT_PINNED &&
+ if (flags & EXEC_OBJECT_PINNED &&
vma->node.start != entry->offset)
return true;
- if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
+ if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
vma->node.start < BATCH_OFFSET_BIAS)
return true;
- if (!(entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
+ if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
(vma->node.start + vma->node.size - 1) >> 32)
return true;
return false;
}
-static inline void
+static inline bool
eb_pin_vma(struct i915_execbuffer *eb,
- struct drm_i915_gem_exec_object2 *entry,
+ const struct drm_i915_gem_exec_object2 *entry,
struct i915_vma *vma)
{
- u64 flags;
+ unsigned int exec_flags = *vma->exec_flags;
+ u64 pin_flags;
if (vma->node.size)
- flags = vma->node.start;
+ pin_flags = vma->node.start;
else
- flags = entry->offset & PIN_OFFSET_MASK;
+ pin_flags = entry->offset & PIN_OFFSET_MASK;
- flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
- if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_GTT))
- flags |= PIN_GLOBAL;
+ pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
+ if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
+ pin_flags |= PIN_GLOBAL;
- if (unlikely(i915_vma_pin(vma, 0, 0, flags)))
- return;
+ if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
+ return false;
- if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
+ if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
if (unlikely(i915_vma_get_fence(vma))) {
i915_vma_unpin(vma);
- return;
+ return false;
}
if (i915_vma_pin_fence(vma))
- entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+ exec_flags |= __EXEC_OBJECT_HAS_FENCE;
}
- entry->flags |= __EXEC_OBJECT_HAS_PIN;
+ *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
+ return !eb_vma_misplaced(entry, vma, exec_flags);
}
-static inline void
-__eb_unreserve_vma(struct i915_vma *vma,
- const struct drm_i915_gem_exec_object2 *entry)
+static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
{
- GEM_BUG_ON(!(entry->flags & __EXEC_OBJECT_HAS_PIN));
+ GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
- if (unlikely(entry->flags & __EXEC_OBJECT_HAS_FENCE))
+ if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
i915_vma_unpin_fence(vma);
__i915_vma_unpin(vma);
}
static inline void
-eb_unreserve_vma(struct i915_vma *vma,
- struct drm_i915_gem_exec_object2 *entry)
+eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
{
- if (!(entry->flags & __EXEC_OBJECT_HAS_PIN))
+ if (!(*flags & __EXEC_OBJECT_HAS_PIN))
return;
- __eb_unreserve_vma(vma, entry);
- entry->flags &= ~__EXEC_OBJECT_RESERVED;
+ __eb_unreserve_vma(vma, *flags);
+ *flags &= ~__EXEC_OBJECT_RESERVED;
}
static int
@@ -427,7 +421,7 @@ eb_validate_vma(struct i915_execbuffer *eb,
entry->pad_to_size = 0;
}
- if (unlikely(vma->exec_entry)) {
+ if (unlikely(vma->exec_flags)) {
DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
entry->handle, (int)(entry - eb->exec));
return -EINVAL;
@@ -440,14 +434,25 @@ eb_validate_vma(struct i915_execbuffer *eb,
*/
entry->offset = gen8_noncanonical_addr(entry->offset);
+ if (!eb->reloc_cache.has_fence) {
+ entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
+ } else {
+ if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
+ eb->reloc_cache.needs_unfenced) &&
+ i915_gem_object_is_tiled(vma->obj))
+ entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
+ }
+
+ if (!(entry->flags & EXEC_OBJECT_PINNED))
+ entry->flags |= eb->context_flags;
+
return 0;
}
static int
-eb_add_vma(struct i915_execbuffer *eb,
- struct drm_i915_gem_exec_object2 *entry,
- struct i915_vma *vma)
+eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
{
+ struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
int err;
GEM_BUG_ON(i915_vma_is_closed(vma));
@@ -468,40 +473,28 @@ eb_add_vma(struct i915_execbuffer *eb,
if (entry->relocation_count)
list_add_tail(&vma->reloc_link, &eb->relocs);
- if (!eb->reloc_cache.has_fence) {
- entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
- } else {
- if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
- eb->reloc_cache.needs_unfenced) &&
- i915_gem_object_is_tiled(vma->obj))
- entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
- }
-
- if (!(entry->flags & EXEC_OBJECT_PINNED))
- entry->flags |= eb->context_flags;
-
/*
* Stash a pointer from the vma to execobj, so we can query its flags,
* size, alignment etc as provided by the user. Also we stash a pointer
* to the vma inside the execobj so that we can use a direct lookup
* to find the right target VMA when doing relocations.
*/
- vma->exec_entry = entry;
- __exec_to_vma(entry) = (uintptr_t)vma;
+ eb->vma[i] = vma;
+ eb->flags[i] = entry->flags;
+ vma->exec_flags = &eb->flags[i];
err = 0;
- eb_pin_vma(eb, entry, vma);
- if (eb_vma_misplaced(entry, vma)) {
- eb_unreserve_vma(vma, entry);
-
- list_add_tail(&vma->exec_link, &eb->unbound);
- if (drm_mm_node_allocated(&vma->node))
- err = i915_vma_unbind(vma);
- } else {
+ if (eb_pin_vma(eb, entry, vma)) {
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start | UPDATE;
eb->args->flags |= __EXEC_HAS_RELOC;
}
+ } else {
+ eb_unreserve_vma(vma, vma->exec_flags);
+
+ list_add_tail(&vma->exec_link, &eb->unbound);
+ if (drm_mm_node_allocated(&vma->node))
+ err = i915_vma_unbind(vma);
}
return err;
}
@@ -526,32 +519,35 @@ static inline int use_cpu_reloc(const struct reloc_cache *cache,
static int eb_reserve_vma(const struct i915_execbuffer *eb,
struct i915_vma *vma)
{
- struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
- u64 flags;
+ struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
+ unsigned int exec_flags = *vma->exec_flags;
+ u64 pin_flags;
int err;
- flags = PIN_USER | PIN_NONBLOCK;
- if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
- flags |= PIN_GLOBAL;
+ pin_flags = PIN_USER | PIN_NONBLOCK;
+ if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
+ pin_flags |= PIN_GLOBAL;
/*
* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
* limit address to the first 4GBs for unflagged objects.
*/
- if (!(entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
- flags |= PIN_ZONE_4G;
+ if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
+ pin_flags |= PIN_ZONE_4G;
- if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
- flags |= PIN_MAPPABLE;
+ if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
+ pin_flags |= PIN_MAPPABLE;
- if (entry->flags & EXEC_OBJECT_PINNED) {
- flags |= entry->offset | PIN_OFFSET_FIXED;
- flags &= ~PIN_NONBLOCK; /* force overlapping PINNED checks */
- } else if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) {
- flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+ if (exec_flags & EXEC_OBJECT_PINNED) {
+ pin_flags |= entry->offset | PIN_OFFSET_FIXED;
+ pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
+ } else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
+ pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
}
- err = i915_vma_pin(vma, entry->pad_to_size, entry->alignment, flags);
+ err = i915_vma_pin(vma,
+ entry->pad_to_size, entry->alignment,
+ pin_flags);
if (err)
return err;
@@ -560,10 +556,7 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
eb->args->flags |= __EXEC_HAS_RELOC;
}
- entry->flags |= __EXEC_OBJECT_HAS_PIN;
- GEM_BUG_ON(eb_vma_misplaced(entry, vma));
-
- if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
+ if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
err = i915_vma_get_fence(vma);
if (unlikely(err)) {
i915_vma_unpin(vma);
@@ -571,9 +564,12 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
}
if (i915_vma_pin_fence(vma))
- entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+ exec_flags |= __EXEC_OBJECT_HAS_FENCE;
}
+ *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
+ GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
+
return 0;
}
@@ -614,18 +610,18 @@ static int eb_reserve(struct i915_execbuffer *eb)
INIT_LIST_HEAD(&eb->unbound);
INIT_LIST_HEAD(&last);
for (i = 0; i < count; i++) {
- struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
+ unsigned int flags = eb->flags[i];
+ struct i915_vma *vma = eb->vma[i];
- if (entry->flags & EXEC_OBJECT_PINNED &&
- entry->flags & __EXEC_OBJECT_HAS_PIN)
+ if (flags & EXEC_OBJECT_PINNED &&
+ flags & __EXEC_OBJECT_HAS_PIN)
continue;
- vma = exec_to_vma(entry);
- eb_unreserve_vma(vma, entry);
+ eb_unreserve_vma(vma, &eb->flags[i]);
- if (entry->flags & EXEC_OBJECT_PINNED)
+ if (flags & EXEC_OBJECT_PINNED)
list_add(&vma->exec_link, &eb->unbound);
- else if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
+ else if (flags & __EXEC_OBJECT_NEEDS_MAP)
list_add_tail(&vma->exec_link, &eb->unbound);
else
list_add_tail(&vma->exec_link, &last);
@@ -649,19 +645,6 @@ static int eb_reserve(struct i915_execbuffer *eb)
} while (1);
}
-static inline struct hlist_head *
-ht_head(const struct i915_gem_context_vma_lut *lut, u32 handle)
-{
- return &lut->ht[hash_32(handle, lut->ht_bits)];
-}
-
-static inline bool
-ht_needs_resize(const struct i915_gem_context_vma_lut *lut)
-{
- return (4*lut->ht_count > 3*lut->ht_size ||
- 4*lut->ht_count + 1 < lut->ht_size);
-}
-
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
{
if (eb->args->flags & I915_EXEC_BATCH_FIRST)
@@ -675,16 +658,10 @@ static int eb_select_context(struct i915_execbuffer *eb)
struct i915_gem_context *ctx;
ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
- if (unlikely(IS_ERR(ctx)))
- return PTR_ERR(ctx);
-
- if (unlikely(i915_gem_context_is_banned(ctx))) {
- DRM_DEBUG("Context %u tried to submit while banned\n",
- ctx->user_handle);
- return -EIO;
- }
+ if (unlikely(!ctx))
+ return -ENOENT;
- eb->ctx = i915_gem_context_get(ctx);
+ eb->ctx = ctx;
eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
eb->context_flags = 0;
@@ -696,132 +673,74 @@ static int eb_select_context(struct i915_execbuffer *eb)
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
-#define INTERMEDIATE BIT(0)
- const unsigned int count = eb->buffer_count;
- struct i915_gem_context_vma_lut *lut = &eb->ctx->vma_lut;
- struct i915_vma *vma;
- struct idr *idr;
+ struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
+ struct drm_i915_gem_object *uninitialized_var(obj);
unsigned int i;
- int slow_pass = -1;
int err;
- INIT_LIST_HEAD(&eb->relocs);
- INIT_LIST_HEAD(&eb->unbound);
-
- if (unlikely(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS))
- flush_work(&lut->resize);
- GEM_BUG_ON(lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS);
-
- for (i = 0; i < count; i++) {
- __exec_to_vma(&eb->exec[i]) = 0;
-
- hlist_for_each_entry(vma,
- ht_head(lut, eb->exec[i].handle),
- ctx_node) {
- if (vma->ctx_handle != eb->exec[i].handle)
- continue;
-
- err = eb_add_vma(eb, &eb->exec[i], vma);
- if (unlikely(err))
- return err;
-
- goto next_vma;
- }
+ if (unlikely(i915_gem_context_is_closed(eb->ctx)))
+ return -ENOENT;
- if (slow_pass < 0)
- slow_pass = i;
-next_vma: ;
- }
+ if (unlikely(i915_gem_context_is_banned(eb->ctx)))
+ return -EIO;
- if (slow_pass < 0)
- goto out;
+ INIT_LIST_HEAD(&eb->relocs);
+ INIT_LIST_HEAD(&eb->unbound);
- spin_lock(&eb->file->table_lock);
- /*
- * Grab a reference to the object and release the lock so we can lookup
- * or create the VMA without using GFP_ATOMIC
- */
- idr = &eb->file->object_idr;
- for (i = slow_pass; i < count; i++) {
- struct drm_i915_gem_object *obj;
+ for (i = 0; i < eb->buffer_count; i++) {
+ u32 handle = eb->exec[i].handle;
+ struct i915_lut_handle *lut;
+ struct i915_vma *vma;
- if (__exec_to_vma(&eb->exec[i]))
- continue;
+ vma = radix_tree_lookup(handles_vma, handle);
+ if (likely(vma))
+ goto add_vma;
- obj = to_intel_bo(idr_find(idr, eb->exec[i].handle));
+ obj = i915_gem_object_lookup(eb->file, handle);
if (unlikely(!obj)) {
- spin_unlock(&eb->file->table_lock);
- DRM_DEBUG("Invalid object handle %d at index %d\n",
- eb->exec[i].handle, i);
err = -ENOENT;
- goto err;
+ goto err_vma;
}
- __exec_to_vma(&eb->exec[i]) = INTERMEDIATE | (uintptr_t)obj;
- }
- spin_unlock(&eb->file->table_lock);
-
- for (i = slow_pass; i < count; i++) {
- struct drm_i915_gem_object *obj;
-
- if (!(__exec_to_vma(&eb->exec[i]) & INTERMEDIATE))
- continue;
-
- /*
- * NOTE: We can leak any vmas created here when something fails
- * later on. But that's no issue since vma_unbind can deal with
- * vmas which are not actually bound. And since only
- * lookup_or_create exists as an interface to get at the vma
- * from the (obj, vm) we don't run the risk of creating
- * duplicated vmas for the same vm.
- */
- obj = u64_to_ptr(typeof(*obj),
- __exec_to_vma(&eb->exec[i]) & ~INTERMEDIATE);
vma = i915_vma_instance(obj, eb->vm, NULL);
if (unlikely(IS_ERR(vma))) {
- DRM_DEBUG("Failed to lookup VMA\n");
err = PTR_ERR(vma);
- goto err;
+ goto err_obj;
}
- /* First come, first served */
- if (!vma->ctx) {
- vma->ctx = eb->ctx;
- vma->ctx_handle = eb->exec[i].handle;
- hlist_add_head(&vma->ctx_node,
- ht_head(lut, eb->exec[i].handle));
- lut->ht_count++;
- lut->ht_size |= I915_CTX_RESIZE_IN_PROGRESS;
- if (i915_vma_is_ggtt(vma)) {
- GEM_BUG_ON(obj->vma_hashed);
- obj->vma_hashed = vma;
- }
+ lut = kmem_cache_alloc(eb->i915->luts, GFP_KERNEL);
+ if (unlikely(!lut)) {
+ err = -ENOMEM;
+ goto err_obj;
+ }
- i915_vma_get(vma);
+ err = radix_tree_insert(handles_vma, handle, vma);
+ if (unlikely(err)) {
+ kfree(lut);
+ goto err_obj;
}
- err = eb_add_vma(eb, &eb->exec[i], vma);
- if (unlikely(err))
- goto err;
+ list_add(&lut->obj_link, &obj->lut_list);
+ list_add(&lut->ctx_link, &eb->ctx->handles_list);
+ lut->ctx = eb->ctx;
+ lut->handle = handle;
- /* Only after we validated the user didn't use our bits */
- if (vma->ctx != eb->ctx) {
- i915_vma_get(vma);
- eb->exec[i].flags |= __EXEC_OBJECT_HAS_REF;
- }
- }
+ /* transfer ref to ctx */
+ obj = NULL;
- if (lut->ht_size & I915_CTX_RESIZE_IN_PROGRESS) {
- if (ht_needs_resize(lut))
- queue_work(system_highpri_wq, &lut->resize);
- else
- lut->ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
+add_vma:
+ err = eb_add_vma(eb, i, vma);
+ if (unlikely(err))
+ goto err_obj;
+
+ GEM_BUG_ON(vma != eb->vma[i]);
+ GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
}
-out:
/* take note of the batch buffer before we might reorder the lists */
i = eb_batch_index(eb);
- eb->batch = exec_to_vma(&eb->exec[i]);
+ eb->batch = eb->vma[i];
+ GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]);
/*
* SNA is doing fancy tricks with compressing batch buffers, which leads
@@ -832,22 +751,20 @@ out:
* Note that actual hangs have only been observed on gen7, but for
* paranoia do it everywhere.
*/
- if (!(eb->exec[i].flags & EXEC_OBJECT_PINNED))
- eb->exec[i].flags |= __EXEC_OBJECT_NEEDS_BIAS;
+ if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
+ eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
- eb->exec[i].flags |= EXEC_OBJECT_NEEDS_FENCE;
+ eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
eb->args->flags |= __EXEC_VALIDATED;
return eb_reserve(eb);
-err:
- for (i = slow_pass; i < count; i++) {
- if (__exec_to_vma(&eb->exec[i]) & INTERMEDIATE)
- __exec_to_vma(&eb->exec[i]) = 0;
- }
- lut->ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
+err_obj:
+ if (obj)
+ i915_gem_object_put(obj);
+err_vma:
+ eb->vma[i] = NULL;
return err;
-#undef INTERMEDIATE
}
static struct i915_vma *
@@ -856,7 +773,7 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
if (eb->lut_size < 0) {
if (handle >= -eb->lut_size)
return NULL;
- return exec_to_vma(&eb->exec[handle]);
+ return eb->vma[handle];
} else {
struct hlist_head *head;
struct i915_vma *vma;
@@ -876,24 +793,21 @@ static void eb_release_vmas(const struct i915_execbuffer *eb)
unsigned int i;
for (i = 0; i < count; i++) {
- struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
- struct i915_vma *vma = exec_to_vma(entry);
+ struct i915_vma *vma = eb->vma[i];
+ unsigned int flags = eb->flags[i];
if (!vma)
- continue;
+ break;
- GEM_BUG_ON(vma->exec_entry != entry);
- vma->exec_entry = NULL;
- __exec_to_vma(entry) = 0;
+ GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
+ vma->exec_flags = NULL;
+ eb->vma[i] = NULL;
- if (entry->flags & __EXEC_OBJECT_HAS_PIN)
- __eb_unreserve_vma(vma, entry);
+ if (flags & __EXEC_OBJECT_HAS_PIN)
+ __eb_unreserve_vma(vma, flags);
- if (entry->flags & __EXEC_OBJECT_HAS_REF)
+ if (flags & __EXEC_OBJECT_HAS_REF)
i915_vma_put(vma);
-
- entry->flags &=
- ~(__EXEC_OBJECT_RESERVED | __EXEC_OBJECT_HAS_REF);
}
}
@@ -1266,7 +1180,9 @@ relocate_entry(struct i915_vma *vma,
if (!eb->reloc_cache.vaddr &&
(DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
- !reservation_object_test_signaled_rcu(vma->resv, true))) {
+ !reservation_object_test_signaled_rcu(vma->resv, true)) &&
+ __intel_engine_can_store_dword(eb->reloc_cache.gen,
+ eb->engine->class)) {
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
u32 *batch;
@@ -1276,10 +1192,8 @@ relocate_entry(struct i915_vma *vma,
len = offset & 7 ? 8 : 5;
else if (gen >= 4)
len = 4;
- else if (gen >= 3)
+ else
len = 3;
- else /* On gen2 MI_STORE_DWORD_IMM uses a physical address */
- goto repeat;
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
@@ -1382,7 +1296,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
}
if (reloc->write_domain) {
- target->exec_entry->flags |= EXEC_OBJECT_WRITE;
+ *target->exec_flags |= EXEC_OBJECT_WRITE;
/*
* Sandybridge PPGTT errata: We need a global gtt mapping
@@ -1432,9 +1346,9 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* patching using the GPU (though that should be serialised by the
* timeline). To be completely sure, and since we are required to
* do relocations we are already stalling, disable the user's opt
- * of our synchronisation.
+ * out of our synchronisation.
*/
- vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC;
+ *vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
/* and update the user's relocation entry */
return relocate_entry(vma, reloc, eb, target);
@@ -1445,7 +1359,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
struct drm_i915_gem_relocation_entry __user *urelocs;
- const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+ const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
unsigned int remain;
urelocs = u64_to_user_ptr(entry->relocs_ptr);
@@ -1458,7 +1372,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
* to read. However, if the array is not writable the user loses
* the updated relocation values.
*/
- if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs))))
+ if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs))))
return -EFAULT;
do {
@@ -1528,7 +1442,7 @@ out:
static int
eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
{
- const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+ const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
struct drm_i915_gem_relocation_entry *relocs =
u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
unsigned int i;
@@ -1732,6 +1646,8 @@ repeat:
if (err)
goto err;
+ GEM_BUG_ON(!eb->batch);
+
list_for_each_entry(vma, &eb->relocs, reloc_link) {
if (!have_copy) {
pagefault_disable();
@@ -1775,7 +1691,7 @@ out:
}
}
- return err ?: have_copy;
+ return err;
}
static int eb_relocate(struct i915_execbuffer *eb)
@@ -1825,11 +1741,11 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
int err;
for (i = 0; i < count; i++) {
- const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
- struct i915_vma *vma = exec_to_vma(entry);
+ unsigned int flags = eb->flags[i];
+ struct i915_vma *vma = eb->vma[i];
struct drm_i915_gem_object *obj = vma->obj;
- if (entry->flags & EXEC_OBJECT_CAPTURE) {
+ if (flags & EXEC_OBJECT_CAPTURE) {
struct i915_gem_capture_list *capture;
capture = kmalloc(sizeof(*capture), GFP_KERNEL);
@@ -1837,33 +1753,47 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
return -ENOMEM;
capture->next = eb->request->capture_list;
- capture->vma = vma;
+ capture->vma = eb->vma[i];
eb->request->capture_list = capture;
}
- if (entry->flags & EXEC_OBJECT_ASYNC)
- goto skip_flushes;
+ /*
+ * If the GPU is not _reading_ through the CPU cache, we need
+ * to make sure that any writes (both previous GPU writes from
+ * before a change in snooping levels and normal CPU writes)
+ * caught in that cache are flushed to main memory.
+ *
+ * We want to say
+ * obj->cache_dirty &&
+ * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
+ * but gcc's optimiser doesn't handle that as well and emits
+ * two jumps instead of one. Maybe one day...
+ */
+ if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
+ if (i915_gem_clflush_object(obj, 0))
+ flags &= ~EXEC_OBJECT_ASYNC;
+ }
- if (unlikely(obj->cache_dirty && !obj->cache_coherent))
- i915_gem_clflush_object(obj, 0);
+ if (flags & EXEC_OBJECT_ASYNC)
+ continue;
err = i915_gem_request_await_object
- (eb->request, obj, entry->flags & EXEC_OBJECT_WRITE);
+ (eb->request, obj, flags & EXEC_OBJECT_WRITE);
if (err)
return err;
-
-skip_flushes:
- i915_vma_move_to_active(vma, eb->request, entry->flags);
- __eb_unreserve_vma(vma, entry);
- vma->exec_entry = NULL;
}
for (i = 0; i < count; i++) {
- const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
- struct i915_vma *vma = exec_to_vma(entry);
+ unsigned int flags = eb->flags[i];
+ struct i915_vma *vma = eb->vma[i];
+
+ i915_vma_move_to_active(vma, eb->request, flags);
+ eb_export_fence(vma, eb->request, flags);
- eb_export_fence(vma, eb->request, entry->flags);
- if (unlikely(entry->flags & __EXEC_OBJECT_HAS_REF))
+ __eb_unreserve_vma(vma, flags);
+ vma->exec_flags = NULL;
+
+ if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
i915_vma_put(vma);
}
eb->exec = NULL;
@@ -1881,8 +1811,10 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
return false;
/* Kernel clipping was a DRI1 misfeature */
- if (exec->num_cliprects || exec->cliprects_ptr)
- return false;
+ if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
+ if (exec->num_cliprects || exec->cliprects_ptr)
+ return false;
+ }
if (exec->DR4 == 0xffffffff) {
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
@@ -1990,11 +1922,11 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
if (IS_ERR(vma))
goto out;
- vma->exec_entry =
- memset(&eb->exec[eb->buffer_count++],
- 0, sizeof(*vma->exec_entry));
- vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
- __exec_to_vma(vma->exec_entry) = (uintptr_t)i915_vma_get(vma);
+ eb->vma[eb->buffer_count] = i915_vma_get(vma);
+ eb->flags[eb->buffer_count] =
+ __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
+ vma->exec_flags = &eb->flags[eb->buffer_count];
+ eb->buffer_count++;
out:
i915_gem_object_unpin_pages(shadow_batch_obj);
@@ -2113,11 +2045,129 @@ eb_select_engine(struct drm_i915_private *dev_priv,
return engine;
}
+static void
+__free_fence_array(struct drm_syncobj **fences, unsigned int n)
+{
+ while (n--)
+ drm_syncobj_put(ptr_mask_bits(fences[n], 2));
+ kvfree(fences);
+}
+
+static struct drm_syncobj **
+get_fence_array(struct drm_i915_gem_execbuffer2 *args,
+ struct drm_file *file)
+{
+ const unsigned int nfences = args->num_cliprects;
+ struct drm_i915_gem_exec_fence __user *user;
+ struct drm_syncobj **fences;
+ unsigned int n;
+ int err;
+
+ if (!(args->flags & I915_EXEC_FENCE_ARRAY))
+ return NULL;
+
+ if (nfences > SIZE_MAX / sizeof(*fences))
+ return ERR_PTR(-EINVAL);
+
+ user = u64_to_user_ptr(args->cliprects_ptr);
+ if (!access_ok(VERIFY_READ, user, nfences * 2 * sizeof(u32)))
+ return ERR_PTR(-EFAULT);
+
+ fences = kvmalloc_array(args->num_cliprects, sizeof(*fences),
+ __GFP_NOWARN | GFP_TEMPORARY);
+ if (!fences)
+ return ERR_PTR(-ENOMEM);
+
+ for (n = 0; n < nfences; n++) {
+ struct drm_i915_gem_exec_fence fence;
+ struct drm_syncobj *syncobj;
+
+ if (__copy_from_user(&fence, user++, sizeof(fence))) {
+ err = -EFAULT;
+ goto err;
+ }
+
+ syncobj = drm_syncobj_find(file, fence.handle);
+ if (!syncobj) {
+ DRM_DEBUG("Invalid syncobj handle provided\n");
+ err = -ENOENT;
+ goto err;
+ }
+
+ fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
+ }
+
+ return fences;
+
+err:
+ __free_fence_array(fences, n);
+ return ERR_PTR(err);
+}
+
+static void
+put_fence_array(struct drm_i915_gem_execbuffer2 *args,
+ struct drm_syncobj **fences)
+{
+ if (fences)
+ __free_fence_array(fences, args->num_cliprects);
+}
+
+static int
+await_fence_array(struct i915_execbuffer *eb,
+ struct drm_syncobj **fences)
+{
+ const unsigned int nfences = eb->args->num_cliprects;
+ unsigned int n;
+ int err;
+
+ for (n = 0; n < nfences; n++) {
+ struct drm_syncobj *syncobj;
+ struct dma_fence *fence;
+ unsigned int flags;
+
+ syncobj = ptr_unpack_bits(fences[n], &flags, 2);
+ if (!(flags & I915_EXEC_FENCE_WAIT))
+ continue;
+
+ fence = drm_syncobj_fence_get(syncobj);
+ if (!fence)
+ return -EINVAL;
+
+ err = i915_gem_request_await_dma_fence(eb->request, fence);
+ dma_fence_put(fence);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+signal_fence_array(struct i915_execbuffer *eb,
+ struct drm_syncobj **fences)
+{
+ const unsigned int nfences = eb->args->num_cliprects;
+ struct dma_fence * const fence = &eb->request->fence;
+ unsigned int n;
+
+ for (n = 0; n < nfences; n++) {
+ struct drm_syncobj *syncobj;
+ unsigned int flags;
+
+ syncobj = ptr_unpack_bits(fences[n], &flags, 2);
+ if (!(flags & I915_EXEC_FENCE_SIGNAL))
+ continue;
+
+ drm_syncobj_replace_fence(syncobj, fence);
+ }
+}
+
static int
i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
- struct drm_i915_gem_exec_object2 *exec)
+ struct drm_i915_gem_exec_object2 *exec,
+ struct drm_syncobj **fences)
{
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
@@ -2133,8 +2183,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.args = args;
if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
args->flags |= __EXEC_HAS_RELOC;
+
eb.exec = exec;
- eb.ctx = NULL;
+ eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
+ eb.vma[0] = NULL;
+ eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);
+
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
if (USES_FULL_PPGTT(eb.i915))
eb.invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
@@ -2192,6 +2246,10 @@ i915_gem_do_execbuffer(struct drm_device *dev,
GEM_BUG_ON(!eb.lut_size);
+ err = eb_select_context(&eb);
+ if (unlikely(err))
+ goto err_destroy;
+
/*
* Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the
@@ -2200,16 +2258,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* 100ms.
*/
intel_runtime_pm_get(eb.i915);
+
err = i915_mutex_lock_interruptible(dev);
if (err)
goto err_rpm;
- err = eb_select_context(&eb);
- if (unlikely(err))
- goto err_unlock;
-
err = eb_relocate(&eb);
- if (err)
+ if (err) {
/*
* If the user expects the execobject.offset and
* reloc.presumed_offset to be an exact match,
@@ -2218,10 +2273,10 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* relocation.
*/
args->flags &= ~__EXEC_HAS_RELOC;
- if (err < 0)
goto err_vma;
+ }
- if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) {
+ if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
err = -EINVAL;
goto err_vma;
@@ -2303,6 +2358,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_request;
}
+ if (fences) {
+ err = await_fence_array(&eb, fences);
+ if (err)
+ goto err_request;
+ }
+
if (out_fence_fd != -1) {
out_fence = sync_file_create(&eb.request->fence);
if (!out_fence) {
@@ -2326,6 +2387,9 @@ err_request:
__i915_add_request(eb.request, err == 0);
add_to_client(eb.request, file);
+ if (fences)
+ signal_fence_array(&eb, fences);
+
if (out_fence) {
if (err == 0) {
fd_install(out_fence_fd, out_fence->file);
@@ -2343,11 +2407,11 @@ err_batch_unpin:
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
- i915_gem_context_put(eb.ctx);
-err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put(eb.i915);
+ i915_gem_context_put(eb.ctx);
+err_destroy:
eb_destroy(&eb);
err_out_fence:
if (out_fence_fd != -1)
@@ -2365,7 +2429,9 @@ int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
- const size_t sz = sizeof(struct drm_i915_gem_exec_object2);
+ const size_t sz = (sizeof(struct drm_i915_gem_exec_object2) +
+ sizeof(struct i915_vma *) +
+ sizeof(unsigned int));
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -2427,7 +2493,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2_list[i].flags = 0;
}
- err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list);
+ err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
if (exec2.flags & __EXEC_HAS_RELOC) {
struct drm_i915_gem_exec_object __user *user_exec_list =
u64_to_user_ptr(args->buffers_ptr);
@@ -2456,9 +2522,12 @@ int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
- const size_t sz = sizeof(struct drm_i915_gem_exec_object2);
+ const size_t sz = (sizeof(struct drm_i915_gem_exec_object2) +
+ sizeof(struct i915_vma *) +
+ sizeof(unsigned int));
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list;
+ struct drm_syncobj **fences = NULL;
int err;
if (args->buffer_count < 1 || args->buffer_count > SIZE_MAX / sz - 1) {
@@ -2485,7 +2554,15 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EFAULT;
}
- err = i915_gem_do_execbuffer(dev, file, args, exec2_list);
+ if (args->flags & I915_EXEC_FENCE_ARRAY) {
+ fences = get_fence_array(args, file);
+ if (IS_ERR(fences)) {
+ kvfree(exec2_list);
+ return PTR_ERR(fences);
+ }
+ }
+
+ err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
/*
* Now that we have begun execution of the batchbuffer, we ignore
@@ -2515,6 +2592,7 @@ end_user:
}
args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
+ put_fence_array(args, fences);
kvfree(exec2_list);
return err;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 61fc7e90a7da..6c6b8e8592aa 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -144,9 +144,9 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
if (intel_vgpu_active(dev_priv)) {
- /* emulation is too hard */
+ /* GVT-g has no support for 32bit ppgtt */
has_full_ppgtt = false;
- has_full_48bit_ppgtt = false;
+ has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
}
if (!has_aliasing_ppgtt)
@@ -180,10 +180,15 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 0;
}
- if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
- return has_full_48bit_ppgtt ? 3 : 2;
- else
- return has_aliasing_ppgtt ? 1 : 0;
+ if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) {
+ if (has_full_48bit_ppgtt)
+ return 3;
+
+ if (has_full_ppgtt)
+ return 2;
+ }
+
+ return has_aliasing_ppgtt ? 1 : 0;
}
static int ppgtt_bind_vma(struct i915_vma *vma,
@@ -207,8 +212,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
if (vma->obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
- vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
- cache_level, pte_flags);
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
return 0;
}
@@ -907,37 +911,35 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
}
static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
- struct sg_table *pages,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = {
- .sg = pages->sgl,
+ .sg = vma->pages->sgl,
.dma = sg_dma_address(iter.sg),
.max = iter.dma + iter.sg->length,
};
- struct gen8_insert_pte idx = gen8_insert_pte(start);
+ struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
cache_level);
}
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
- struct sg_table *pages,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = {
- .sg = pages->sgl,
+ .sg = vma->pages->sgl,
.dma = sg_dma_address(iter.sg),
.max = iter.dma + iter.sg->length,
};
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
- struct gen8_insert_pte idx = gen8_insert_pte(start);
+ struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
&idx, cache_level))
@@ -1621,13 +1623,12 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
}
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
- struct sg_table *pages,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- unsigned first_entry = start >> PAGE_SHIFT;
+ unsigned first_entry = vma->node.start >> PAGE_SHIFT;
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
@@ -1635,7 +1636,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
gen6_pte_t *vaddr;
vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
- iter.sg = pages->sgl;
+ iter.sg = vma->pages->sgl;
iter.dma = sg_dma_address(iter.sg);
iter.max = iter.dma + iter.sg->length;
do {
@@ -2061,7 +2062,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
*/
GEM_BUG_ON(obj->mm.pages == pages);
} while (i915_gem_shrink(to_i915(obj->base.dev),
- obj->base.size >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE));
@@ -2090,8 +2091,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
- struct sg_table *st,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level level,
u32 unused)
{
@@ -2102,8 +2102,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
dma_addr_t addr;
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
- gtt_entries += start >> PAGE_SHIFT;
- for_each_sgt_dma(addr, sgt_iter, st)
+ gtt_entries += vma->node.start >> PAGE_SHIFT;
+ for_each_sgt_dma(addr, sgt_iter, vma->pages)
gen8_set_pte(gtt_entries++, pte_encode | addr);
wmb();
@@ -2137,17 +2137,16 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
- struct sg_table *st,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level level,
u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
- unsigned int i = start >> PAGE_SHIFT;
+ unsigned int i = vma->node.start >> PAGE_SHIFT;
struct sgt_iter iter;
dma_addr_t addr;
- for_each_sgt_dma(addr, iter, st)
+ for_each_sgt_dma(addr, iter, vma->pages)
iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
wmb();
@@ -2229,8 +2228,7 @@ static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
struct insert_entries {
struct i915_address_space *vm;
- struct sg_table *st;
- u64 start;
+ struct i915_vma *vma;
enum i915_cache_level level;
};
@@ -2238,19 +2236,18 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
- gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
+ gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
}
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
- struct sg_table *st,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level level,
u32 unused)
{
- struct insert_entries arg = { vm, st, start, level };
+ struct insert_entries arg = { vm, vma, level };
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}
@@ -2316,15 +2313,15 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
}
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
- struct sg_table *pages,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
{
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
+ intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
+ flags);
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
@@ -2353,8 +2350,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
pte_flags |= PTE_READ_ONLY;
intel_runtime_pm_get(i915);
- vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
- cache_level, pte_flags);
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
intel_runtime_pm_put(i915);
/*
@@ -2407,16 +2403,13 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
goto err_pages;
}
- appgtt->base.insert_entries(&appgtt->base,
- vma->pages, vma->node.start,
- cache_level, pte_flags);
+ appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
+ pte_flags);
}
if (flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915);
- vma->vm->insert_entries(vma->vm,
- vma->pages, vma->node.start,
- cache_level, pte_flags);
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
intel_runtime_pm_put(i915);
}
@@ -2749,6 +2742,24 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return 0;
}
+static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
+{
+ /* XXX: spec is unclear if this is still needed for CNL+ */
+ if (!USES_PPGTT(dev_priv)) {
+ I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_UC);
+ return;
+ }
+
+ I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
+ I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
+ I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
+ I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
+ I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+}
+
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
* bits. When using advanced contexts each context stores its own PAT, but
* writing this data shouldn't be harmful even in those cases. */
@@ -2863,7 +2874,9 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
- if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10)
+ cnl_setup_private_ppat(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
@@ -3145,7 +3158,9 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
ggtt->base.closed = false;
if (INTEL_GEN(dev_priv) >= 8) {
- if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 10)
+ cnl_setup_private_ppat(dev_priv);
+ else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 1b2a56c3e5d3..b4e3aa7c0ce1 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -313,8 +313,7 @@ struct i915_address_space {
enum i915_cache_level cache_level,
u32 flags);
void (*insert_entries)(struct i915_address_space *vm,
- struct sg_table *st,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
void (*cleanup)(struct i915_address_space *vm);
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index 568bf83af1f5..c1f64ddaf8aa 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -174,6 +174,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
phys_addr_t size)
{
struct drm_i915_gem_object *obj;
+ unsigned int cache_level;
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
@@ -190,9 +191,9 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
- obj->cache_coherent = i915_gem_object_is_coherent(obj);
- obj->cache_dirty = !obj->cache_coherent;
+
+ cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ i915_gem_object_set_cache_coherency(obj, cache_level);
return obj;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_object.c b/drivers/gpu/drm/i915/i915_gem_object.c
new file mode 100644
index 000000000000..aab8cdd80e6d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_object.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_object.h"
+
+/**
+ * Mark up the object's coherency levels for a given cache_level
+ * @obj: #drm_i915_gem_object
+ * @cache_level: cache level
+ */
+void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
+ unsigned int cache_level)
+{
+ obj->cache_level = cache_level;
+
+ if (cache_level != I915_CACHE_NONE)
+ obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
+ I915_BO_CACHE_COHERENT_FOR_WRITE);
+ else if (HAS_LLC(to_i915(obj->base.dev)))
+ obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
+ else
+ obj->cache_coherent = 0;
+
+ obj->cache_dirty =
+ !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 5b19a4916a4d..c30d8f808185 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -33,8 +33,24 @@
#include <drm/i915_drm.h>
+#include "i915_gem_request.h"
#include "i915_selftest.h"
+struct drm_i915_gem_object;
+
+/*
+ * struct i915_lut_handle tracks the fast lookups from handle to vma used
+ * for execbuf. Although we use a radixtree for that mapping, in order to
+ * remove them as the object or context is closed, we need a secondary list
+ * and a translation entry (i915_lut_handle).
+ */
+struct i915_lut_handle {
+ struct list_head obj_link;
+ struct list_head ctx_link;
+ struct i915_gem_context *ctx;
+ u32 handle;
+};
+
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
@@ -86,7 +102,15 @@ struct drm_i915_gem_object {
* They are also added to @vma_list for easy iteration.
*/
struct rb_root vma_tree;
- struct i915_vma *vma_hashed;
+
+ /**
+ * @lut_list: List of vma lookup entries in use for this object.
+ *
+ * If this object is closed, we need to remove all of its VMA from
+ * the fast lookup index in associated contexts; @lut_list provides
+ * this translation from object to context->handles_vma.
+ */
+ struct list_head lut_list;
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
@@ -118,8 +142,10 @@ struct drm_i915_gem_object {
*/
unsigned long gt_ro:1;
unsigned int cache_level:3;
+ unsigned int cache_coherent:2;
+#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
+#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
unsigned int cache_dirty:1;
- unsigned int cache_coherent:1;
atomic_t frontbuffer_bits;
unsigned int frontbuffer_ggtt_origin; /* write once */
@@ -391,6 +417,8 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
return engine;
}
+void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
+ unsigned int cache_level);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 7032c542a9b1..4dd4c2159a92 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -242,6 +242,10 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
goto err_unpin;
}
+ ret = req->engine->emit_flush(req, EMIT_INVALIDATE);
+ if (ret)
+ goto err_unpin;
+
ret = req->engine->emit_bb_start(req,
so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 8c59c79cbd8b..813a3b546d6e 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -213,6 +213,10 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
cond_resched();
}
+ /* Check we are idle before we fiddle with hw state! */
+ GEM_BUG_ON(!intel_engine_is_idle(engine));
+ GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
+
/* Finally reset hw state */
intel_engine_init_global_seqno(engine, seqno);
tl->seqno = seqno;
@@ -240,27 +244,60 @@ int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
return reset_all_global_seqno(dev_priv, seqno - 1);
}
-static int reserve_seqno(struct intel_engine_cs *engine)
+static void mark_busy(struct drm_i915_private *i915)
{
+ if (i915->gt.awake)
+ return;
+
+ GEM_BUG_ON(!i915->gt.active_requests);
+
+ intel_runtime_pm_get_noresume(i915);
+ i915->gt.awake = true;
+
+ intel_enable_gt_powersave(i915);
+ i915_update_gfx_val(i915);
+ if (INTEL_GEN(i915) >= 6)
+ gen6_rps_busy(i915);
+
+ queue_delayed_work(i915->wq,
+ &i915->gt.retire_work,
+ round_jiffies_up_relative(HZ));
+}
+
+static int reserve_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
u32 active = ++engine->timeline->inflight_seqnos;
u32 seqno = engine->timeline->seqno;
int ret;
/* Reservation is fine until we need to wrap around */
- if (likely(!add_overflows(seqno, active)))
- return 0;
-
- ret = reset_all_global_seqno(engine->i915, 0);
- if (ret) {
- engine->timeline->inflight_seqnos--;
- return ret;
+ if (unlikely(add_overflows(seqno, active))) {
+ ret = reset_all_global_seqno(i915, 0);
+ if (ret) {
+ engine->timeline->inflight_seqnos--;
+ return ret;
+ }
}
+ if (!i915->gt.active_requests++)
+ mark_busy(i915);
+
return 0;
}
-static void unreserve_seqno(struct intel_engine_cs *engine)
+static void unreserve_engine(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+
+ if (!--i915->gt.active_requests) {
+ /* Cancel the mark_busy() from our reserve_engine() */
+ GEM_BUG_ON(!i915->gt.awake);
+ mod_delayed_work(i915->wq,
+ &i915->gt.idle_work,
+ msecs_to_jiffies(100));
+ }
+
GEM_BUG_ON(!engine->timeline->inflight_seqnos);
engine->timeline->inflight_seqnos--;
}
@@ -329,13 +366,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
list_del_init(&request->link);
spin_unlock_irq(&engine->timeline->lock);
- if (!--request->i915->gt.active_requests) {
- GEM_BUG_ON(!request->i915->gt.awake);
- mod_delayed_work(request->i915->wq,
- &request->i915->gt.idle_work,
- msecs_to_jiffies(100));
- }
- unreserve_seqno(request->engine);
+ unreserve_engine(request->engine);
advance_ring(request);
free_capture_list(request);
@@ -370,8 +401,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
i915_gem_request_remove_from_client(request);
/* Retirement decays the ban score as it is a sign of ctx progress */
- if (request->ctx->ban_score > 0)
- request->ctx->ban_score--;
+ atomic_dec_if_positive(&request->ctx->ban_score);
/* The backing object for the context is done after switching to the
* *next* context. Therefore we cannot retire the previous context until
@@ -384,7 +414,11 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
engine->context_unpin(engine, engine->last_retired_context);
engine->last_retired_context = request->ctx;
- dma_fence_signal(&request->fence);
+ spin_lock_irq(&request->lock);
+ if (request->waitboost)
+ atomic_dec(&request->i915->rps.num_waiters);
+ dma_fence_signal_locked(&request->fence);
+ spin_unlock_irq(&request->lock);
i915_priotree_fini(request->i915, &request->priotree);
i915_gem_request_put(request);
@@ -568,7 +602,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
return ERR_CAST(ring);
GEM_BUG_ON(!ring);
- ret = reserve_seqno(engine);
+ ret = reserve_engine(engine);
if (ret)
goto err_unpin;
@@ -639,6 +673,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->file_priv = NULL;
req->batch = NULL;
req->capture_list = NULL;
+ req->waitboost = false;
/*
* Reserve space in the ring buffer for all the commands required to
@@ -673,7 +708,7 @@ err_ctx:
kmem_cache_free(dev_priv->requests, req);
err_unreserve:
- unreserve_seqno(engine);
+ unreserve_engine(engine);
err_unpin:
engine->context_unpin(engine, ctx);
return ERR_PTR(ret);
@@ -855,28 +890,6 @@ i915_gem_request_await_object(struct drm_i915_gem_request *to,
return ret;
}
-static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- if (dev_priv->gt.awake)
- return;
-
- GEM_BUG_ON(!dev_priv->gt.active_requests);
-
- intel_runtime_pm_get_noresume(dev_priv);
- dev_priv->gt.awake = true;
-
- intel_enable_gt_powersave(dev_priv);
- i915_update_gfx_val(dev_priv);
- if (INTEL_GEN(dev_priv) >= 6)
- gen6_rps_busy(dev_priv);
-
- queue_delayed_work(dev_priv->wq,
- &dev_priv->gt.retire_work,
- round_jiffies_up_relative(HZ));
-}
-
/*
* NB: This function is not allowed to fail. Doing so would mean the the
* request is not being tracked for completion but the work itself is
@@ -958,9 +971,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
list_add_tail(&request->ring_link, &ring->request_list);
request->emitted_jiffies = jiffies;
- if (!request->i915->gt.active_requests++)
- i915_gem_mark_busy(engine);
-
/* Let the backend know a new request has arrived that may need
* to adjust the existing execution schedule due to a high priority
* request - i.e. we may want to preempt the current request in order
@@ -1063,7 +1073,7 @@ static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *req
return false;
__set_current_state(TASK_RUNNING);
- i915_reset(request->i915);
+ i915_reset(request->i915, 0);
return true;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 7579b9702c22..49a4c8994ff0 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -184,6 +184,8 @@ struct drm_i915_gem_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
+ bool waitboost;
+
/** engine->request_list entry for this request */
struct list_head link;
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 1032f98add11..74002b2d1b6f 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
return true;
case MUTEX_TRYLOCK_FAILED:
+ *unlock = false;
+ preempt_disable();
do {
cpu_relax();
if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
- case MUTEX_TRYLOCK_SUCCESS:
*unlock = true;
- return true;
+ break;
}
} while (!need_resched());
+ preempt_enable();
+ return *unlock;
- return false;
+ case MUTEX_TRYLOCK_SUCCESS:
+ *unlock = true;
+ return true;
}
BUG();
@@ -131,6 +136,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
* i915_gem_shrink - Shrink buffer object caches
* @dev_priv: i915 device
* @target: amount of memory to make available, in pages
+ * @nr_scanned: optional output for number of pages scanned (incremental)
* @flags: control flags for selecting cache types
*
* This function is the main interface to the shrinker. It will try to release
@@ -153,7 +159,9 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
*/
unsigned long
i915_gem_shrink(struct drm_i915_private *dev_priv,
- unsigned long target, unsigned flags)
+ unsigned long target,
+ unsigned long *nr_scanned,
+ unsigned flags)
{
const struct {
struct list_head *list;
@@ -164,6 +172,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
{ NULL, 0 },
}, *phase;
unsigned long count = 0;
+ unsigned long scanned = 0;
bool unlock;
if (!shrinker_lock(dev_priv, &unlock))
@@ -244,6 +253,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
count += obj->base.size >> PAGE_SHIFT;
}
mutex_unlock(&obj->mm.lock);
+ scanned += obj->base.size >> PAGE_SHIFT;
}
}
list_splice_tail(&still_in_list, phase->list);
@@ -256,6 +266,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
shrinker_unlock(dev_priv, unlock);
+ if (nr_scanned)
+ *nr_scanned += scanned;
return count;
}
@@ -278,7 +290,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
unsigned long freed;
intel_runtime_pm_get(dev_priv);
- freed = i915_gem_shrink(dev_priv, -1UL,
+ freed = i915_gem_shrink(dev_priv, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
@@ -324,23 +336,28 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
unsigned long freed;
bool unlock;
+ sc->nr_scanned = 0;
+
if (!shrinker_lock(dev_priv, &unlock))
return SHRINK_STOP;
freed = i915_gem_shrink(dev_priv,
sc->nr_to_scan,
+ &sc->nr_scanned,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE);
if (freed < sc->nr_to_scan)
freed += i915_gem_shrink(dev_priv,
- sc->nr_to_scan - freed,
+ sc->nr_to_scan - sc->nr_scanned,
+ &sc->nr_scanned,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
if (freed < sc->nr_to_scan && current_is_kswapd()) {
intel_runtime_pm_get(dev_priv);
freed += i915_gem_shrink(dev_priv,
- sc->nr_to_scan - freed,
+ sc->nr_to_scan - sc->nr_scanned,
+ &sc->nr_scanned,
I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
@@ -349,7 +366,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
shrinker_unlock(dev_priv, unlock);
- return freed;
+ return sc->nr_scanned ? freed : SHRINK_STOP;
}
static bool
@@ -448,7 +465,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
goto out;
intel_runtime_pm_get(dev_priv);
- freed_pages += i915_gem_shrink(dev_priv, -1UL,
+ freed_pages += i915_gem_shrink(dev_priv, -1UL, NULL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index a817b3e0b17e..507c9f0d8df1 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -254,9 +254,10 @@ static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv)
* This is a BIOS w/a: Some BIOS wrap stolen in the root
* PCI bus, but have an off-by-one error. Hence retry the
* reservation starting from 1 instead of 0.
+ * There's also BIOS with off-by-one on the other end.
*/
r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
- ggtt->stolen_size - 1,
+ ggtt->stolen_size - 2,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
@@ -579,6 +580,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
struct drm_mm_node *stolen)
{
struct drm_i915_gem_object *obj;
+ unsigned int cache_level;
obj = i915_gem_object_alloc(dev_priv);
if (obj == NULL)
@@ -589,8 +591,8 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
obj->stolen = stolen;
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
- obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
- obj->cache_coherent = true; /* assumptions! more like cache_oblivious */
+ cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ i915_gem_object_set_cache_coherency(obj, cache_level);
if (i915_gem_object_pin_pages(obj))
goto cleanup;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index ccd09e8419f5..f152a38d7079 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -804,9 +804,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
i915_gem_object_init(obj, &i915_gem_userptr_ops);
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->cache_level = I915_CACHE_LLC;
- obj->cache_coherent = i915_gem_object_is_coherent(obj);
- obj->cache_dirty = !obj->cache_coherent;
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
obj->userptr.ptr = args->user_ptr;
obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index e18f350bc364..ed5a1eb839ad 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -463,6 +463,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " hangcheck action timestamp: %lu, %u ms ago\n",
ee->hangcheck_timestamp,
jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
+ err_printf(m, " engine reset count: %u\n", ee->reset_count);
error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
@@ -1236,6 +1237,8 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
ee->hangcheck_action = engine->hangcheck.action;
ee->hangcheck_stalled = engine->hangcheck.stalled;
+ ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
+ engine);
if (USES_PPGTT(dev_priv)) {
int i;
@@ -1263,7 +1266,7 @@ static void record_request(struct drm_i915_gem_request *request,
struct drm_i915_error_request *erq)
{
erq->context = request->ctx->hw_id;
- erq->ban_score = request->ctx->ban_score;
+ erq->ban_score = atomic_read(&request->ctx->ban_score);
erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
erq->head = request->head;
@@ -1354,9 +1357,9 @@ static void record_context(struct drm_i915_error_context *e,
e->handle = ctx->user_handle;
e->hw_id = ctx->hw_id;
- e->ban_score = ctx->ban_score;
- e->guilty = ctx->guilty_count;
- e->active = ctx->active_count;
+ e->ban_score = atomic_read(&ctx->ban_score);
+ e->guilty = atomic_read(&ctx->guilty_count);
+ e->active = atomic_read(&ctx->active_count);
}
static void request_record_user_bo(struct drm_i915_gem_request *request,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4cd9ee1ba332..e21ce9c18b6e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -275,17 +275,17 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
{
- return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
+ return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}
static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
{
- return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
+ return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
}
static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
{
- return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
+ return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
}
/**
@@ -1091,18 +1091,6 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
return events;
}
-static bool any_waiters(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id)
- if (intel_engine_has_waiter(engine))
- return true;
-
- return false;
-}
-
static void gen6_pm_rps_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
@@ -1114,7 +1102,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->rps.interrupts_enabled) {
pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
- client_boost = fetch_and_zero(&dev_priv->rps.client_boost);
+ client_boost = atomic_read(&dev_priv->rps.num_waiters);
}
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1131,7 +1119,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
new_delay = dev_priv->rps.cur_freq;
min = dev_priv->rps.min_freq_softlimit;
max = dev_priv->rps.max_freq_softlimit;
- if (client_boost || any_waiters(dev_priv))
+ if (client_boost)
max = dev_priv->rps.max_freq;
if (client_boost && new_delay < dev_priv->rps.boost_freq) {
new_delay = dev_priv->rps.boost_freq;
@@ -1144,7 +1132,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
if (new_delay >= dev_priv->rps.max_freq_softlimit)
adj = 0;
- } else if (client_boost || any_waiters(dev_priv)) {
+ } else if (client_boost) {
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
@@ -1513,7 +1501,8 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
*pin_mask |= BIT(i);
- if (!intel_hpd_pin_to_port(i, &port))
+ port = intel_hpd_pin_to_port(i);
+ if (port == PORT_NONE)
continue;
if (long_pulse_detect(port, dig_hotplug_reg))
@@ -1603,7 +1592,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
crcs[3] = crc3;
crcs[4] = crc4;
drm_crtc_add_crc_entry(&crtc->base, true,
- drm_accurate_vblank_count(&crtc->base),
+ drm_crtc_accurate_vblank_count(&crtc->base),
crcs);
}
}
@@ -1673,7 +1662,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
spin_unlock(&dev_priv->irq_lock);
}
- if (INTEL_INFO(dev_priv)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
return;
if (HAS_VEBOX(dev_priv)) {
@@ -1720,18 +1709,6 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
}
}
-static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- bool ret;
-
- ret = drm_handle_vblank(&dev_priv->drm, pipe);
- if (ret)
- intel_finish_page_flip_mmio(dev_priv, pipe);
-
- return ret;
-}
-
static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
u32 iir, u32 pipe_stats[I915_MAX_PIPES])
{
@@ -1796,12 +1773,8 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
- if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
- intel_pipe_handle_vblank(dev_priv, pipe))
- intel_check_page_flip(dev_priv, pipe);
-
- if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
- intel_finish_page_flip_cs(dev_priv, pipe);
+ if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(&dev_priv->drm, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -2098,10 +2071,10 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
- intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
+ intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
- intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
+ intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
}
static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
@@ -2135,13 +2108,13 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
DRM_ERROR("PCH poison interrupt\n");
if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
- intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
+ intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
- intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
+ intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
- intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
+ intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_C);
I915_WRITE(SERR_INT, serr_int);
}
@@ -2253,19 +2226,14 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
DRM_ERROR("Poison interrupt\n");
for_each_pipe(dev_priv, pipe) {
- if (de_iir & DE_PIPE_VBLANK(pipe) &&
- intel_pipe_handle_vblank(dev_priv, pipe))
- intel_check_page_flip(dev_priv, pipe);
+ if (de_iir & DE_PIPE_VBLANK(pipe))
+ drm_handle_vblank(&dev_priv->drm, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
if (de_iir & DE_PIPE_CRC_DONE(pipe))
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
-
- /* plane/pipes map 1:1 on ilk+ */
- if (de_iir & DE_PLANE_FLIP_DONE(pipe))
- intel_finish_page_flip_cs(dev_priv, pipe);
}
/* check event from PCH */
@@ -2304,13 +2272,8 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
intel_opregion_asle_intr(dev_priv);
for_each_pipe(dev_priv, pipe) {
- if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
- intel_pipe_handle_vblank(dev_priv, pipe))
- intel_check_page_flip(dev_priv, pipe);
-
- /* plane/pipes map 1:1 on ilk+ */
- if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
- intel_finish_page_flip_cs(dev_priv, pipe);
+ if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
+ drm_handle_vblank(&dev_priv->drm, pipe);
}
/* check event from PCH */
@@ -2452,7 +2415,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
ret = IRQ_HANDLED;
tmp_mask = GEN8_AUX_CHANNEL_A;
- if (INTEL_INFO(dev_priv)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
tmp_mask |= GEN9_AUX_CHANNEL_B |
GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
@@ -2491,7 +2454,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
for_each_pipe(dev_priv, pipe) {
- u32 flip_done, fault_errors;
+ u32 fault_errors;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
@@ -2505,18 +2468,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
ret = IRQ_HANDLED;
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
- if (iir & GEN8_PIPE_VBLANK &&
- intel_pipe_handle_vblank(dev_priv, pipe))
- intel_check_page_flip(dev_priv, pipe);
-
- flip_done = iir;
- if (INTEL_INFO(dev_priv)->gen >= 9)
- flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
- else
- flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
-
- if (flip_done)
- intel_finish_page_flip_cs(dev_priv, pipe);
+ if (iir & GEN8_PIPE_VBLANK)
+ drm_handle_vblank(&dev_priv->drm, pipe);
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev_priv, pipe);
@@ -2525,7 +2478,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
fault_errors = iir;
- if (INTEL_INFO(dev_priv)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
else
fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
@@ -2599,86 +2552,93 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
return ret;
}
+struct wedge_me {
+ struct delayed_work work;
+ struct drm_i915_private *i915;
+ const char *name;
+};
+
+static void wedge_me(struct work_struct *work)
+{
+ struct wedge_me *w = container_of(work, typeof(*w), work.work);
+
+ dev_err(w->i915->drm.dev,
+ "%s timed out, cancelling all in-flight rendering.\n",
+ w->name);
+ i915_gem_set_wedged(w->i915);
+}
+
+static void __init_wedge(struct wedge_me *w,
+ struct drm_i915_private *i915,
+ long timeout,
+ const char *name)
+{
+ w->i915 = i915;
+ w->name = name;
+
+ INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
+ schedule_delayed_work(&w->work, timeout);
+}
+
+static void __fini_wedge(struct wedge_me *w)
+{
+ cancel_delayed_work_sync(&w->work);
+ destroy_delayed_work_on_stack(&w->work);
+ w->i915 = NULL;
+}
+
+#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
+ for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \
+ (W)->i915; \
+ __fini_wedge((W)))
+
/**
- * i915_reset_and_wakeup - do process context error handling work
+ * i915_reset_device - do process context error handling work
* @dev_priv: i915 device private
*
* Fire an error uevent so userspace can see that a hang or error
* was detected.
*/
-static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
+static void i915_reset_device(struct drm_i915_private *dev_priv)
{
struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
+ struct wedge_me w;
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
- intel_prepare_reset(dev_priv);
+ /* Use a watchdog to ensure that our reset completes */
+ i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
+ intel_prepare_reset(dev_priv);
- set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
- wake_up_all(&dev_priv->gpu_error.wait_queue);
+ /* Signal that locked waiters should reset the GPU */
+ set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
+ wake_up_all(&dev_priv->gpu_error.wait_queue);
- do {
- /*
- * All state reset _must_ be completed before we update the
- * reset counter, for otherwise waiters might miss the reset
- * pending state and not properly drop locks, resulting in
- * deadlocks with the reset work.
+ /* Wait for anyone holding the lock to wakeup, without
+ * blocking indefinitely on struct_mutex.
*/
- if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
- i915_reset(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- }
-
- /* We need to wait for anyone holding the lock to wakeup */
- } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
- I915_RESET_HANDOFF,
- TASK_UNINTERRUPTIBLE,
- HZ));
+ do {
+ if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
+ i915_reset(dev_priv, 0);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ }
+ } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
+ I915_RESET_HANDOFF,
+ TASK_UNINTERRUPTIBLE,
+ 1));
- intel_finish_reset(dev_priv);
+ intel_finish_reset(dev_priv);
+ }
if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
kobject_uevent_env(kobj,
KOBJ_CHANGE, reset_done_event);
-
- /*
- * Note: The wake_up also serves as a memory barrier so that
- * waiters see the updated value of the dev_priv->gpu_error.
- */
- clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
- wake_up_all(&dev_priv->gpu_error.reset_queue);
-}
-
-static inline void
-i915_err_print_instdone(struct drm_i915_private *dev_priv,
- struct intel_instdone *instdone)
-{
- int slice;
- int subslice;
-
- pr_err(" INSTDONE: 0x%08x\n", instdone->instdone);
-
- if (INTEL_GEN(dev_priv) <= 3)
- return;
-
- pr_err(" SC_INSTDONE: 0x%08x\n", instdone->slice_common);
-
- if (INTEL_GEN(dev_priv) <= 6)
- return;
-
- for_each_instdone_slice_subslice(dev_priv, slice, subslice)
- pr_err(" SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice, instdone->sampler[slice][subslice]);
-
- for_each_instdone_slice_subslice(dev_priv, slice, subslice)
- pr_err(" ROW_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice, instdone->row[slice][subslice]);
}
static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
@@ -2722,6 +2682,8 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
u32 engine_mask,
const char *fmt, ...)
{
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
va_list args;
char error_msg[80];
@@ -2741,14 +2703,56 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
i915_capture_error_state(dev_priv, engine_mask, error_msg);
i915_clear_error_registers(dev_priv);
+ /*
+ * Try engine reset when available. We fall back to full reset if
+ * single reset fails.
+ */
+ if (intel_has_reset_engine(dev_priv)) {
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &dev_priv->gpu_error.flags))
+ continue;
+
+ if (i915_reset_engine(engine, 0) == 0)
+ engine_mask &= ~intel_engine_flag(engine);
+
+ clear_bit(I915_RESET_ENGINE + engine->id,
+ &dev_priv->gpu_error.flags);
+ wake_up_bit(&dev_priv->gpu_error.flags,
+ I915_RESET_ENGINE + engine->id);
+ }
+ }
+
if (!engine_mask)
goto out;
- if (test_and_set_bit(I915_RESET_BACKOFF,
- &dev_priv->gpu_error.flags))
+ /* Full reset needs the mutex, stop any other user trying to do so. */
+ if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
+ wait_event(dev_priv->gpu_error.reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &dev_priv->gpu_error.flags));
goto out;
+ }
+
+ /* Prevent any other reset-engine attempt. */
+ for_each_engine(engine, dev_priv, tmp) {
+ while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &dev_priv->gpu_error.flags))
+ wait_on_bit(&dev_priv->gpu_error.flags,
+ I915_RESET_ENGINE + engine->id,
+ TASK_UNINTERRUPTIBLE);
+ }
+
+ i915_reset_device(dev_priv);
- i915_reset_and_wakeup(dev_priv);
+ for_each_engine(engine, dev_priv, tmp) {
+ clear_bit(I915_RESET_ENGINE + engine->id,
+ &dev_priv->gpu_error.flags);
+ }
+
+ clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
+ wake_up_all(&dev_priv->gpu_error.reset_queue);
out:
intel_runtime_pm_put(dev_priv);
@@ -3009,7 +3013,7 @@ static void gen8_irq_reset(struct drm_device *dev)
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
- unsigned int pipe_mask)
+ u8 pipe_mask)
{
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
enum pipe pipe;
@@ -3023,7 +3027,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
}
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
- unsigned int pipe_mask)
+ u8 pipe_mask)
{
enum pipe pipe;
@@ -3427,7 +3431,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
u32 de_misc_masked = GEN8_DE_MISC_GSE;
enum pipe pipe;
- if (INTEL_INFO(dev_priv)->gen >= 9) {
+ if (INTEL_GEN(dev_priv) >= 9) {
de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
@@ -3610,34 +3614,6 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
/*
* Returns true when a page flip has completed.
*/
-static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
- int plane, int pipe, u32 iir)
-{
- u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
-
- if (!intel_pipe_handle_vblank(dev_priv, pipe))
- return false;
-
- if ((iir & flip_pending) == 0)
- goto check_page_flip;
-
- /* We detect FlipDone by looking for the change in PendingFlip from '1'
- * to '0' on the following vblank, i.e. IIR has the Pendingflip
- * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
- * the flip is completed (no longer pending). Since this doesn't raise
- * an interrupt per se, we watch for the change at vblank.
- */
- if (I915_READ16(ISR) & flip_pending)
- goto check_page_flip;
-
- intel_finish_page_flip_cs(dev_priv, pipe);
- return true;
-
-check_page_flip:
- intel_check_page_flip(dev_priv, pipe);
- return false;
-}
-
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -3645,9 +3621,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
u16 iir, new_iir;
u32 pipe_stats[2];
int pipe;
- u16 flip_mask =
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
irqreturn_t ret;
if (!intel_irqs_enabled(dev_priv))
@@ -3661,7 +3634,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (iir == 0)
goto out;
- while (iir & ~flip_mask) {
+ while (iir) {
/* Can't rely on pipestat interrupt bit in iir as it might
* have been cleared after the pipestat interrupt was received.
* It doesn't set the bit in iir again, but it still produces
@@ -3683,7 +3656,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
}
spin_unlock(&dev_priv->irq_lock);
- I915_WRITE16(IIR, iir & ~flip_mask);
+ I915_WRITE16(IIR, iir);
new_iir = I915_READ16(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
@@ -3694,9 +3667,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (HAS_FBC(dev_priv))
plane = !plane;
- if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
- i8xx_handle_vblank(dev_priv, plane, pipe, iir))
- flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(&dev_priv->drm, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -3796,45 +3768,11 @@ static int i915_irq_postinstall(struct drm_device *dev)
return 0;
}
-/*
- * Returns true when a page flip has completed.
- */
-static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
- int plane, int pipe, u32 iir)
-{
- u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
-
- if (!intel_pipe_handle_vblank(dev_priv, pipe))
- return false;
-
- if ((iir & flip_pending) == 0)
- goto check_page_flip;
-
- /* We detect FlipDone by looking for the change in PendingFlip from '1'
- * to '0' on the following vblank, i.e. IIR has the Pendingflip
- * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
- * the flip is completed (no longer pending). Since this doesn't raise
- * an interrupt per se, we watch for the change at vblank.
- */
- if (I915_READ(ISR) & flip_pending)
- goto check_page_flip;
-
- intel_finish_page_flip_cs(dev_priv, pipe);
- return true;
-
-check_page_flip:
- intel_check_page_flip(dev_priv, pipe);
- return false;
-}
-
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
- u32 flip_mask =
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
int pipe, ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -3845,7 +3783,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
iir = I915_READ(IIR);
do {
- bool irq_received = (iir & ~flip_mask) != 0;
+ bool irq_received = (iir) != 0;
bool blc_event = false;
/* Can't rely on pipestat interrupt bit in iir as it might
@@ -3880,7 +3818,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
}
- I915_WRITE(IIR, iir & ~flip_mask);
+ I915_WRITE(IIR, iir);
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
@@ -3891,9 +3829,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (HAS_FBC(dev_priv))
plane = !plane;
- if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
- i915_handle_vblank(dev_priv, plane, pipe, iir))
- flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(&dev_priv->drm, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -3926,7 +3863,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
*/
ret = IRQ_HANDLED;
iir = new_iir;
- } while (iir & ~flip_mask);
+ } while (iir);
enable_rpm_wakeref_asserts(dev_priv);
@@ -4061,9 +3998,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
u32 iir, new_iir;
u32 pipe_stats[I915_MAX_PIPES];
int ret = IRQ_NONE, pipe;
- u32 flip_mask =
- I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
@@ -4074,7 +4008,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
iir = I915_READ(IIR);
for (;;) {
- bool irq_received = (iir & ~flip_mask) != 0;
+ bool irq_received = (iir) != 0;
bool blc_event = false;
/* Can't rely on pipestat interrupt bit in iir as it might
@@ -4112,7 +4046,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
}
- I915_WRITE(IIR, iir & ~flip_mask);
+ I915_WRITE(IIR, iir);
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
@@ -4121,9 +4055,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
notify_ring(dev_priv->engine[VCS]);
for_each_pipe(dev_priv, pipe) {
- if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
- i915_handle_vblank(dev_priv, pipe, pipe, iir))
- flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
+ if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(&dev_priv->drm, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -4225,16 +4158,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
*
* TODO: verify if this can be reproduced on VLV,CHV.
*/
- if (INTEL_INFO(dev_priv)->gen <= 7)
+ if (INTEL_GEN(dev_priv) <= 7)
dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
- if (INTEL_INFO(dev_priv)->gen >= 8)
+ if (INTEL_GEN(dev_priv) >= 8)
dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
if (IS_GEN2(dev_priv)) {
/* Gen2 doesn't have a hardware frame counter */
dev->max_vblank_count = 0;
- } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
+ } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = g4x_get_vblank_counter;
} else {
@@ -4281,7 +4214,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->enable_vblank = i965_enable_vblank;
dev->driver->disable_vblank = i965_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (INTEL_INFO(dev_priv)->gen >= 8) {
+ } else if (INTEL_GEN(dev_priv) >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
dev->driver->irq_preinstall = gen8_irq_reset;
dev->driver->irq_postinstall = gen8_irq_postinstall;
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/i915_oa_bdw.c
index d4462c2aaaee..abdf4d0abcce 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.c
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.c
@@ -31,3981 +31,6 @@
#include "i915_drv.h"
#include "i915_oa_bdw.h"
-enum metric_set_id {
- METRIC_SET_ID_RENDER_BASIC = 1,
- METRIC_SET_ID_COMPUTE_BASIC,
- METRIC_SET_ID_RENDER_PIPE_PROFILE,
- METRIC_SET_ID_MEMORY_READS,
- METRIC_SET_ID_MEMORY_WRITES,
- METRIC_SET_ID_COMPUTE_EXTENDED,
- METRIC_SET_ID_COMPUTE_L3_CACHE,
- METRIC_SET_ID_DATA_PORT_READS_COALESCING,
- METRIC_SET_ID_DATA_PORT_WRITES_COALESCING,
- METRIC_SET_ID_HDC_AND_SF,
- METRIC_SET_ID_L3_1,
- METRIC_SET_ID_L3_2,
- METRIC_SET_ID_L3_3,
- METRIC_SET_ID_L3_4,
- METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
- METRIC_SET_ID_SAMPLER_1,
- METRIC_SET_ID_SAMPLER_2,
- METRIC_SET_ID_TDL_1,
- METRIC_SET_ID_TDL_2,
- METRIC_SET_ID_COMPUTE_EXTRA,
- METRIC_SET_ID_VME_PIPE,
- METRIC_SET_ID_TEST_OA,
-};
-
-int i915_oa_n_builtin_metric_sets_bdw = 22;
-
-static const struct i915_oa_reg b_counter_config_render_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_render_basic_0_slices_0x01[] = {
- { _MMIO(0x9888), 0x143f000f },
- { _MMIO(0x9888), 0x14110014 },
- { _MMIO(0x9888), 0x14310014 },
- { _MMIO(0x9888), 0x14bf000f },
- { _MMIO(0x9888), 0x118a0317 },
- { _MMIO(0x9888), 0x13837be0 },
- { _MMIO(0x9888), 0x3b800060 },
- { _MMIO(0x9888), 0x3d800005 },
- { _MMIO(0x9888), 0x005c4000 },
- { _MMIO(0x9888), 0x065c8000 },
- { _MMIO(0x9888), 0x085cc000 },
- { _MMIO(0x9888), 0x003d8000 },
- { _MMIO(0x9888), 0x183d0800 },
- { _MMIO(0x9888), 0x0a3f0023 },
- { _MMIO(0x9888), 0x103f0000 },
- { _MMIO(0x9888), 0x00584000 },
- { _MMIO(0x9888), 0x08584000 },
- { _MMIO(0x9888), 0x0a5a4000 },
- { _MMIO(0x9888), 0x005b4000 },
- { _MMIO(0x9888), 0x0e5b8000 },
- { _MMIO(0x9888), 0x185b2400 },
- { _MMIO(0x9888), 0x0a1d4000 },
- { _MMIO(0x9888), 0x0c1f0800 },
- { _MMIO(0x9888), 0x0e1faa00 },
- { _MMIO(0x9888), 0x00384000 },
- { _MMIO(0x9888), 0x0e384000 },
- { _MMIO(0x9888), 0x16384000 },
- { _MMIO(0x9888), 0x18380001 },
- { _MMIO(0x9888), 0x00392000 },
- { _MMIO(0x9888), 0x06398000 },
- { _MMIO(0x9888), 0x0839a000 },
- { _MMIO(0x9888), 0x0a391000 },
- { _MMIO(0x9888), 0x00104000 },
- { _MMIO(0x9888), 0x08104000 },
- { _MMIO(0x9888), 0x00110030 },
- { _MMIO(0x9888), 0x08110031 },
- { _MMIO(0x9888), 0x10110000 },
- { _MMIO(0x9888), 0x00134000 },
- { _MMIO(0x9888), 0x16130020 },
- { _MMIO(0x9888), 0x06308000 },
- { _MMIO(0x9888), 0x08308000 },
- { _MMIO(0x9888), 0x06311800 },
- { _MMIO(0x9888), 0x08311880 },
- { _MMIO(0x9888), 0x10310000 },
- { _MMIO(0x9888), 0x0e334000 },
- { _MMIO(0x9888), 0x16330080 },
- { _MMIO(0x9888), 0x0abf1180 },
- { _MMIO(0x9888), 0x10bf0000 },
- { _MMIO(0x9888), 0x0ada8000 },
- { _MMIO(0x9888), 0x0a9d8000 },
- { _MMIO(0x9888), 0x109f0002 },
- { _MMIO(0x9888), 0x0ab94000 },
- { _MMIO(0x9888), 0x0d888000 },
- { _MMIO(0x9888), 0x038a0380 },
- { _MMIO(0x9888), 0x058a000e },
- { _MMIO(0x9888), 0x018a8000 },
- { _MMIO(0x9888), 0x0f8a8000 },
- { _MMIO(0x9888), 0x198a8000 },
- { _MMIO(0x9888), 0x1b8a00a0 },
- { _MMIO(0x9888), 0x078a0000 },
- { _MMIO(0x9888), 0x098a0000 },
- { _MMIO(0x9888), 0x238b2820 },
- { _MMIO(0x9888), 0x258b2550 },
- { _MMIO(0x9888), 0x198c1000 },
- { _MMIO(0x9888), 0x0b8d8000 },
- { _MMIO(0x9888), 0x1f85aa80 },
- { _MMIO(0x9888), 0x2185aaa0 },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x0d831021 },
- { _MMIO(0x9888), 0x0f83572f },
- { _MMIO(0x9888), 0x01835680 },
- { _MMIO(0x9888), 0x0383002c },
- { _MMIO(0x9888), 0x11830000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830001 },
- { _MMIO(0x9888), 0x05830000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x07848000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x05844000 },
- { _MMIO(0x9888), 0x1b80c137 },
- { _MMIO(0x9888), 0x1d80c147 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x17808000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x15804000 },
- { _MMIO(0x9888), 0x4d801110 },
- { _MMIO(0x9888), 0x4f800331 },
- { _MMIO(0x9888), 0x43800802 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45801465 },
- { _MMIO(0x9888), 0x53801111 },
- { _MMIO(0x9888), 0x478014a5 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f800ca5 },
- { _MMIO(0x9888), 0x41800003 },
-};
-
-static const struct i915_oa_reg mux_config_render_basic_1_slices_0x02[] = {
- { _MMIO(0x9888), 0x143f000f },
- { _MMIO(0x9888), 0x14bf000f },
- { _MMIO(0x9888), 0x14910014 },
- { _MMIO(0x9888), 0x14b10014 },
- { _MMIO(0x9888), 0x118a0317 },
- { _MMIO(0x9888), 0x13837be0 },
- { _MMIO(0x9888), 0x3b800060 },
- { _MMIO(0x9888), 0x3d800005 },
- { _MMIO(0x9888), 0x0a3f0023 },
- { _MMIO(0x9888), 0x103f0000 },
- { _MMIO(0x9888), 0x0a5a4000 },
- { _MMIO(0x9888), 0x0a1d4000 },
- { _MMIO(0x9888), 0x0e1f8000 },
- { _MMIO(0x9888), 0x0a391000 },
- { _MMIO(0x9888), 0x00dc4000 },
- { _MMIO(0x9888), 0x06dc8000 },
- { _MMIO(0x9888), 0x08dcc000 },
- { _MMIO(0x9888), 0x00bd8000 },
- { _MMIO(0x9888), 0x18bd0800 },
- { _MMIO(0x9888), 0x0abf1180 },
- { _MMIO(0x9888), 0x10bf0000 },
- { _MMIO(0x9888), 0x00d84000 },
- { _MMIO(0x9888), 0x08d84000 },
- { _MMIO(0x9888), 0x0ada8000 },
- { _MMIO(0x9888), 0x00db4000 },
- { _MMIO(0x9888), 0x0edb8000 },
- { _MMIO(0x9888), 0x18db2400 },
- { _MMIO(0x9888), 0x0a9d8000 },
- { _MMIO(0x9888), 0x0c9f0800 },
- { _MMIO(0x9888), 0x0e9f2a00 },
- { _MMIO(0x9888), 0x109f0002 },
- { _MMIO(0x9888), 0x00b84000 },
- { _MMIO(0x9888), 0x0eb84000 },
- { _MMIO(0x9888), 0x16b84000 },
- { _MMIO(0x9888), 0x18b80001 },
- { _MMIO(0x9888), 0x00b92000 },
- { _MMIO(0x9888), 0x06b98000 },
- { _MMIO(0x9888), 0x08b9a000 },
- { _MMIO(0x9888), 0x0ab94000 },
- { _MMIO(0x9888), 0x00904000 },
- { _MMIO(0x9888), 0x08904000 },
- { _MMIO(0x9888), 0x00910030 },
- { _MMIO(0x9888), 0x08910031 },
- { _MMIO(0x9888), 0x10910000 },
- { _MMIO(0x9888), 0x00934000 },
- { _MMIO(0x9888), 0x16930020 },
- { _MMIO(0x9888), 0x06b08000 },
- { _MMIO(0x9888), 0x08b08000 },
- { _MMIO(0x9888), 0x06b11800 },
- { _MMIO(0x9888), 0x08b11880 },
- { _MMIO(0x9888), 0x10b10000 },
- { _MMIO(0x9888), 0x0eb34000 },
- { _MMIO(0x9888), 0x16b30080 },
- { _MMIO(0x9888), 0x01888000 },
- { _MMIO(0x9888), 0x0d88b800 },
- { _MMIO(0x9888), 0x038a0380 },
- { _MMIO(0x9888), 0x058a000e },
- { _MMIO(0x9888), 0x1b8a0080 },
- { _MMIO(0x9888), 0x078a0000 },
- { _MMIO(0x9888), 0x098a0000 },
- { _MMIO(0x9888), 0x238b2840 },
- { _MMIO(0x9888), 0x258b26a0 },
- { _MMIO(0x9888), 0x018c4000 },
- { _MMIO(0x9888), 0x0f8c4000 },
- { _MMIO(0x9888), 0x178c2000 },
- { _MMIO(0x9888), 0x198c1100 },
- { _MMIO(0x9888), 0x018d2000 },
- { _MMIO(0x9888), 0x078d8000 },
- { _MMIO(0x9888), 0x098da000 },
- { _MMIO(0x9888), 0x0b8d8000 },
- { _MMIO(0x9888), 0x1f85aa80 },
- { _MMIO(0x9888), 0x2185aaa0 },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x0d831021 },
- { _MMIO(0x9888), 0x0f83572f },
- { _MMIO(0x9888), 0x01835680 },
- { _MMIO(0x9888), 0x0383002c },
- { _MMIO(0x9888), 0x11830000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830001 },
- { _MMIO(0x9888), 0x05830000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x07848000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x05844000 },
- { _MMIO(0x9888), 0x1b80c137 },
- { _MMIO(0x9888), 0x1d80c147 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x17808000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x15804000 },
- { _MMIO(0x9888), 0x4d801550 },
- { _MMIO(0x9888), 0x4f800331 },
- { _MMIO(0x9888), 0x43800802 },
- { _MMIO(0x9888), 0x51800400 },
- { _MMIO(0x9888), 0x458004a1 },
- { _MMIO(0x9888), 0x53805555 },
- { _MMIO(0x9888), 0x47800421 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f801421 },
- { _MMIO(0x9888), 0x41800845 },
-};
-
-static int
-get_render_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 2);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 2);
-
- if (INTEL_INFO(dev_priv)->sseu.slice_mask & 0x01) {
- regs[n] = mux_config_render_basic_0_slices_0x01;
- lens[n] = ARRAY_SIZE(mux_config_render_basic_0_slices_0x01);
- n++;
- }
- if (INTEL_INFO(dev_priv)->sseu.slice_mask & 0x02) {
- regs[n] = mux_config_render_basic_1_slices_0x02;
- lens[n] = ARRAY_SIZE(mux_config_render_basic_1_slices_0x02);
- n++;
- }
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_basic_0_slices_0x01[] = {
- { _MMIO(0x9888), 0x105c00e0 },
- { _MMIO(0x9888), 0x105800e0 },
- { _MMIO(0x9888), 0x103800e0 },
- { _MMIO(0x9888), 0x3580001a },
- { _MMIO(0x9888), 0x3b800060 },
- { _MMIO(0x9888), 0x3d800005 },
- { _MMIO(0x9888), 0x065c2100 },
- { _MMIO(0x9888), 0x0a5c0041 },
- { _MMIO(0x9888), 0x0c5c6600 },
- { _MMIO(0x9888), 0x005c6580 },
- { _MMIO(0x9888), 0x085c8000 },
- { _MMIO(0x9888), 0x0e5c8000 },
- { _MMIO(0x9888), 0x00580042 },
- { _MMIO(0x9888), 0x08582080 },
- { _MMIO(0x9888), 0x0c58004c },
- { _MMIO(0x9888), 0x0e582580 },
- { _MMIO(0x9888), 0x005b4000 },
- { _MMIO(0x9888), 0x185b1000 },
- { _MMIO(0x9888), 0x1a5b0104 },
- { _MMIO(0x9888), 0x0c1fa800 },
- { _MMIO(0x9888), 0x0e1faa00 },
- { _MMIO(0x9888), 0x101f02aa },
- { _MMIO(0x9888), 0x08380042 },
- { _MMIO(0x9888), 0x0a382080 },
- { _MMIO(0x9888), 0x0e38404c },
- { _MMIO(0x9888), 0x0238404b },
- { _MMIO(0x9888), 0x00384000 },
- { _MMIO(0x9888), 0x16380000 },
- { _MMIO(0x9888), 0x18381145 },
- { _MMIO(0x9888), 0x04380000 },
- { _MMIO(0x9888), 0x0039a000 },
- { _MMIO(0x9888), 0x06398000 },
- { _MMIO(0x9888), 0x0839a000 },
- { _MMIO(0x9888), 0x0a39a000 },
- { _MMIO(0x9888), 0x0c39a000 },
- { _MMIO(0x9888), 0x0e39a000 },
- { _MMIO(0x9888), 0x02392000 },
- { _MMIO(0x9888), 0x018a8000 },
- { _MMIO(0x9888), 0x0f8a8000 },
- { _MMIO(0x9888), 0x198a8000 },
- { _MMIO(0x9888), 0x1b8aaaa0 },
- { _MMIO(0x9888), 0x1d8a0002 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x238b02a0 },
- { _MMIO(0x9888), 0x258b5550 },
- { _MMIO(0x9888), 0x278b0015 },
- { _MMIO(0x9888), 0x1f850a80 },
- { _MMIO(0x9888), 0x2185aaa0 },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x01834000 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x07848000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x03844000 },
- { _MMIO(0x9888), 0x17808137 },
- { _MMIO(0x9888), 0x1980c147 },
- { _MMIO(0x9888), 0x1b80c0e5 },
- { _MMIO(0x9888), 0x1d80c0e3 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x13804000 },
- { _MMIO(0x9888), 0x15800000 },
- { _MMIO(0xd24), 0x00000000 },
- { _MMIO(0x9888), 0x4d801000 },
- { _MMIO(0x9888), 0x4f800111 },
- { _MMIO(0x9888), 0x43800062 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800062 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800062 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f801062 },
- { _MMIO(0x9888), 0x41801084 },
-};
-
-static const struct i915_oa_reg mux_config_compute_basic_2_slices_0x02[] = {
- { _MMIO(0x9888), 0x10dc00e0 },
- { _MMIO(0x9888), 0x10d800e0 },
- { _MMIO(0x9888), 0x10b800e0 },
- { _MMIO(0x9888), 0x3580001a },
- { _MMIO(0x9888), 0x3b800060 },
- { _MMIO(0x9888), 0x3d800005 },
- { _MMIO(0x9888), 0x06dc2100 },
- { _MMIO(0x9888), 0x0adc0041 },
- { _MMIO(0x9888), 0x0cdc6600 },
- { _MMIO(0x9888), 0x00dc6580 },
- { _MMIO(0x9888), 0x08dc8000 },
- { _MMIO(0x9888), 0x0edc8000 },
- { _MMIO(0x9888), 0x00d80042 },
- { _MMIO(0x9888), 0x08d82080 },
- { _MMIO(0x9888), 0x0cd8004c },
- { _MMIO(0x9888), 0x0ed82580 },
- { _MMIO(0x9888), 0x00db4000 },
- { _MMIO(0x9888), 0x18db1000 },
- { _MMIO(0x9888), 0x1adb0104 },
- { _MMIO(0x9888), 0x0c9fa800 },
- { _MMIO(0x9888), 0x0e9faa00 },
- { _MMIO(0x9888), 0x109f02aa },
- { _MMIO(0x9888), 0x08b80042 },
- { _MMIO(0x9888), 0x0ab82080 },
- { _MMIO(0x9888), 0x0eb8404c },
- { _MMIO(0x9888), 0x02b8404b },
- { _MMIO(0x9888), 0x00b84000 },
- { _MMIO(0x9888), 0x16b80000 },
- { _MMIO(0x9888), 0x18b81145 },
- { _MMIO(0x9888), 0x04b80000 },
- { _MMIO(0x9888), 0x00b9a000 },
- { _MMIO(0x9888), 0x06b98000 },
- { _MMIO(0x9888), 0x08b9a000 },
- { _MMIO(0x9888), 0x0ab9a000 },
- { _MMIO(0x9888), 0x0cb9a000 },
- { _MMIO(0x9888), 0x0eb9a000 },
- { _MMIO(0x9888), 0x02b92000 },
- { _MMIO(0x9888), 0x01888000 },
- { _MMIO(0x9888), 0x0d88f800 },
- { _MMIO(0x9888), 0x0f88000f },
- { _MMIO(0x9888), 0x03888000 },
- { _MMIO(0x9888), 0x05888000 },
- { _MMIO(0x9888), 0x238b0540 },
- { _MMIO(0x9888), 0x258baaa0 },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x018c4000 },
- { _MMIO(0x9888), 0x0f8c4000 },
- { _MMIO(0x9888), 0x178c2000 },
- { _MMIO(0x9888), 0x198c5500 },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x038c4000 },
- { _MMIO(0x9888), 0x058c4000 },
- { _MMIO(0x9888), 0x018da000 },
- { _MMIO(0x9888), 0x078d8000 },
- { _MMIO(0x9888), 0x098da000 },
- { _MMIO(0x9888), 0x0b8da000 },
- { _MMIO(0x9888), 0x0d8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x038d2000 },
- { _MMIO(0x9888), 0x1f850a80 },
- { _MMIO(0x9888), 0x2185aaa0 },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x01834000 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x07848000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x03844000 },
- { _MMIO(0x9888), 0x17808137 },
- { _MMIO(0x9888), 0x1980c147 },
- { _MMIO(0x9888), 0x1b80c0e5 },
- { _MMIO(0x9888), 0x1d80c0e3 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x13804000 },
- { _MMIO(0x9888), 0x15800000 },
- { _MMIO(0xd24), 0x00000000 },
- { _MMIO(0x9888), 0x4d805000 },
- { _MMIO(0x9888), 0x4f800555 },
- { _MMIO(0x9888), 0x43800062 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800062 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800062 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f800062 },
- { _MMIO(0x9888), 0x41800000 },
-};
-
-static int
-get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 2);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 2);
-
- if (INTEL_INFO(dev_priv)->sseu.slice_mask & 0x01) {
- regs[n] = mux_config_compute_basic_0_slices_0x01;
- lens[n] = ARRAY_SIZE(mux_config_compute_basic_0_slices_0x01);
- n++;
- }
- if (INTEL_INFO(dev_priv)->sseu.slice_mask & 0x02) {
- regs[n] = mux_config_compute_basic_2_slices_0x02;
- lens[n] = ARRAY_SIZE(mux_config_compute_basic_2_slices_0x02);
- n++;
- }
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007ffea },
- { _MMIO(0x2774), 0x00007ffc },
- { _MMIO(0x2778), 0x0007affa },
- { _MMIO(0x277c), 0x0000f5fd },
- { _MMIO(0x2780), 0x00079ffa },
- { _MMIO(0x2784), 0x0000f3fb },
- { _MMIO(0x2788), 0x0007bf7a },
- { _MMIO(0x278c), 0x0000f7e7 },
- { _MMIO(0x2790), 0x0007fefa },
- { _MMIO(0x2794), 0x0000f7cf },
- { _MMIO(0x2798), 0x00077ffa },
- { _MMIO(0x279c), 0x0000efdf },
- { _MMIO(0x27a0), 0x0006fffa },
- { _MMIO(0x27a4), 0x0000cfbf },
- { _MMIO(0x27a8), 0x0003fffa },
- { _MMIO(0x27ac), 0x00005f7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
- { _MMIO(0x9888), 0x0a1e0000 },
- { _MMIO(0x9888), 0x0c1f000f },
- { _MMIO(0x9888), 0x10176800 },
- { _MMIO(0x9888), 0x1191001f },
- { _MMIO(0x9888), 0x0b880320 },
- { _MMIO(0x9888), 0x01890c40 },
- { _MMIO(0x9888), 0x118a1c00 },
- { _MMIO(0x9888), 0x118d7c00 },
- { _MMIO(0x9888), 0x118e0020 },
- { _MMIO(0x9888), 0x118f4c00 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x13900001 },
- { _MMIO(0x9888), 0x065c4000 },
- { _MMIO(0x9888), 0x0c3d8000 },
- { _MMIO(0x9888), 0x06584000 },
- { _MMIO(0x9888), 0x0c5b4000 },
- { _MMIO(0x9888), 0x081e0040 },
- { _MMIO(0x9888), 0x0e1e0000 },
- { _MMIO(0x9888), 0x021f5400 },
- { _MMIO(0x9888), 0x001f0000 },
- { _MMIO(0x9888), 0x101f0010 },
- { _MMIO(0x9888), 0x0e1f0080 },
- { _MMIO(0x9888), 0x0c384000 },
- { _MMIO(0x9888), 0x06392000 },
- { _MMIO(0x9888), 0x0c13c000 },
- { _MMIO(0x9888), 0x06164000 },
- { _MMIO(0x9888), 0x06170012 },
- { _MMIO(0x9888), 0x00170000 },
- { _MMIO(0x9888), 0x01910005 },
- { _MMIO(0x9888), 0x07880002 },
- { _MMIO(0x9888), 0x01880c00 },
- { _MMIO(0x9888), 0x0f880000 },
- { _MMIO(0x9888), 0x0d880000 },
- { _MMIO(0x9888), 0x05880000 },
- { _MMIO(0x9888), 0x09890032 },
- { _MMIO(0x9888), 0x078a0800 },
- { _MMIO(0x9888), 0x0f8a0a00 },
- { _MMIO(0x9888), 0x198a4000 },
- { _MMIO(0x9888), 0x1b8a2000 },
- { _MMIO(0x9888), 0x1d8a0000 },
- { _MMIO(0x9888), 0x038a4000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x0d8a8000 },
- { _MMIO(0x9888), 0x238b54c0 },
- { _MMIO(0x9888), 0x258baa55 },
- { _MMIO(0x9888), 0x278b0019 },
- { _MMIO(0x9888), 0x198c0100 },
- { _MMIO(0x9888), 0x058c4000 },
- { _MMIO(0x9888), 0x0f8d0015 },
- { _MMIO(0x9888), 0x018d1000 },
- { _MMIO(0x9888), 0x098d8000 },
- { _MMIO(0x9888), 0x0b8df000 },
- { _MMIO(0x9888), 0x0d8d3000 },
- { _MMIO(0x9888), 0x038de000 },
- { _MMIO(0x9888), 0x058d3000 },
- { _MMIO(0x9888), 0x0d8e0004 },
- { _MMIO(0x9888), 0x058e000c },
- { _MMIO(0x9888), 0x098e0000 },
- { _MMIO(0x9888), 0x078e0000 },
- { _MMIO(0x9888), 0x038e0000 },
- { _MMIO(0x9888), 0x0b8f0020 },
- { _MMIO(0x9888), 0x198f0c00 },
- { _MMIO(0x9888), 0x078f8000 },
- { _MMIO(0x9888), 0x098f4000 },
- { _MMIO(0x9888), 0x0b900980 },
- { _MMIO(0x9888), 0x03900d80 },
- { _MMIO(0x9888), 0x01900000 },
- { _MMIO(0x9888), 0x1f85aa80 },
- { _MMIO(0x9888), 0x2185aaaa },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x01834000 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x0784c000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x1780c000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0xd24), 0x00000000 },
- { _MMIO(0x9888), 0x4d801111 },
- { _MMIO(0x9888), 0x3d800800 },
- { _MMIO(0x9888), 0x4f801011 },
- { _MMIO(0x9888), 0x43800443 },
- { _MMIO(0x9888), 0x51801111 },
- { _MMIO(0x9888), 0x45800422 },
- { _MMIO(0x9888), 0x53801111 },
- { _MMIO(0x9888), 0x47800c60 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f800422 },
- { _MMIO(0x9888), 0x41800021 },
-};
-
-static int
-get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_render_pipe_profile;
- lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_reads[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f872 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_reads[] = {
- { _MMIO(0x9888), 0x198b0343 },
- { _MMIO(0x9888), 0x13845800 },
- { _MMIO(0x9888), 0x15840018 },
- { _MMIO(0x9888), 0x3580001a },
- { _MMIO(0x9888), 0x038b6300 },
- { _MMIO(0x9888), 0x058b6b62 },
- { _MMIO(0x9888), 0x078b006a },
- { _MMIO(0x9888), 0x118b0000 },
- { _MMIO(0x9888), 0x238b0000 },
- { _MMIO(0x9888), 0x258b0000 },
- { _MMIO(0x9888), 0x1f85a080 },
- { _MMIO(0x9888), 0x2185aaaa },
- { _MMIO(0x9888), 0x2385000a },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x01840018 },
- { _MMIO(0x9888), 0x07844c80 },
- { _MMIO(0x9888), 0x09840d9a },
- { _MMIO(0x9888), 0x0b840e9c },
- { _MMIO(0x9888), 0x0d840f9e },
- { _MMIO(0x9888), 0x0f840010 },
- { _MMIO(0x9888), 0x11840000 },
- { _MMIO(0x9888), 0x03848000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x2f8000e5 },
- { _MMIO(0x9888), 0x138080e3 },
- { _MMIO(0x9888), 0x1580c0e1 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x11804000 },
- { _MMIO(0x9888), 0x1780c000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f804000 },
- { _MMIO(0xd24), 0x00000000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3d800800 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x43800842 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800842 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47801042 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f800084 },
- { _MMIO(0x9888), 0x41800000 },
-};
-
-static int
-get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_reads;
- lens[n] = ARRAY_SIZE(mux_config_memory_reads);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_writes[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f822 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_writes[] = {
- { _MMIO(0x9888), 0x198b0343 },
- { _MMIO(0x9888), 0x13845400 },
- { _MMIO(0x9888), 0x3580001a },
- { _MMIO(0x9888), 0x3d800805 },
- { _MMIO(0x9888), 0x038b6300 },
- { _MMIO(0x9888), 0x058b6b62 },
- { _MMIO(0x9888), 0x078b006a },
- { _MMIO(0x9888), 0x118b0000 },
- { _MMIO(0x9888), 0x238b0000 },
- { _MMIO(0x9888), 0x258b0000 },
- { _MMIO(0x9888), 0x1f85a080 },
- { _MMIO(0x9888), 0x2185aaaa },
- { _MMIO(0x9888), 0x23850002 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x01840010 },
- { _MMIO(0x9888), 0x07844880 },
- { _MMIO(0x9888), 0x09840992 },
- { _MMIO(0x9888), 0x0b840a94 },
- { _MMIO(0x9888), 0x0d840b96 },
- { _MMIO(0x9888), 0x11840000 },
- { _MMIO(0x9888), 0x03848000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x2d800147 },
- { _MMIO(0x9888), 0x2f8000e5 },
- { _MMIO(0x9888), 0x138080e3 },
- { _MMIO(0x9888), 0x1580c0e1 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x11804000 },
- { _MMIO(0x9888), 0x1780c000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f800000 },
- { _MMIO(0xd24), 0x00000000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x43800842 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800842 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47801082 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f800084 },
- { _MMIO(0x9888), 0x41800000 },
-};
-
-static int
-get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_writes;
- lens[n] = ARRAY_SIZE(mux_config_memory_writes);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extended[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fc2a },
- { _MMIO(0x2774), 0x0000bf00 },
- { _MMIO(0x2778), 0x0007fc6a },
- { _MMIO(0x277c), 0x0000bf00 },
- { _MMIO(0x2780), 0x0007fc92 },
- { _MMIO(0x2784), 0x0000bf00 },
- { _MMIO(0x2788), 0x0007fca2 },
- { _MMIO(0x278c), 0x0000bf00 },
- { _MMIO(0x2790), 0x0007fc32 },
- { _MMIO(0x2794), 0x0000bf00 },
- { _MMIO(0x2798), 0x0007fc9a },
- { _MMIO(0x279c), 0x0000bf00 },
- { _MMIO(0x27a0), 0x0007fe6a },
- { _MMIO(0x27a4), 0x0000bf00 },
- { _MMIO(0x27a8), 0x0007fe7a },
- { _MMIO(0x27ac), 0x0000bf00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extended_0_subslices_0x01[] = {
- { _MMIO(0x9888), 0x143d0160 },
- { _MMIO(0x9888), 0x163d2800 },
- { _MMIO(0x9888), 0x183d0120 },
- { _MMIO(0x9888), 0x105800e0 },
- { _MMIO(0x9888), 0x005cc000 },
- { _MMIO(0x9888), 0x065c8000 },
- { _MMIO(0x9888), 0x085cc000 },
- { _MMIO(0x9888), 0x0a5cc000 },
- { _MMIO(0x9888), 0x0c5cc000 },
- { _MMIO(0x9888), 0x0e5cc000 },
- { _MMIO(0x9888), 0x025cc000 },
- { _MMIO(0x9888), 0x045cc000 },
- { _MMIO(0x9888), 0x003d0011 },
- { _MMIO(0x9888), 0x063d0900 },
- { _MMIO(0x9888), 0x083d0a13 },
- { _MMIO(0x9888), 0x0a3d0b15 },
- { _MMIO(0x9888), 0x0c3d2317 },
- { _MMIO(0x9888), 0x043d21b7 },
- { _MMIO(0x9888), 0x103d0000 },
- { _MMIO(0x9888), 0x0e3d0000 },
- { _MMIO(0x9888), 0x1a3d0000 },
- { _MMIO(0x9888), 0x0e5825c1 },
- { _MMIO(0x9888), 0x00586100 },
- { _MMIO(0x9888), 0x0258204c },
- { _MMIO(0x9888), 0x06588000 },
- { _MMIO(0x9888), 0x0858c000 },
- { _MMIO(0x9888), 0x0a58c000 },
- { _MMIO(0x9888), 0x0c58c000 },
- { _MMIO(0x9888), 0x0458c000 },
- { _MMIO(0x9888), 0x005b4000 },
- { _MMIO(0x9888), 0x0e5b4000 },
- { _MMIO(0x9888), 0x185b5400 },
- { _MMIO(0x9888), 0x1a5b0155 },
- { _MMIO(0x9888), 0x025b4000 },
- { _MMIO(0x9888), 0x045b4000 },
- { _MMIO(0x9888), 0x065b4000 },
- { _MMIO(0x9888), 0x085b4000 },
- { _MMIO(0x9888), 0x0a5b4000 },
- { _MMIO(0x9888), 0x0c1fa800 },
- { _MMIO(0x9888), 0x0e1faa2a },
- { _MMIO(0x9888), 0x101f02aa },
- { _MMIO(0x9888), 0x00384000 },
- { _MMIO(0x9888), 0x0e384000 },
- { _MMIO(0x9888), 0x16384000 },
- { _MMIO(0x9888), 0x18381555 },
- { _MMIO(0x9888), 0x02384000 },
- { _MMIO(0x9888), 0x04384000 },
- { _MMIO(0x9888), 0x06384000 },
- { _MMIO(0x9888), 0x08384000 },
- { _MMIO(0x9888), 0x0a384000 },
- { _MMIO(0x9888), 0x0039a000 },
- { _MMIO(0x9888), 0x06398000 },
- { _MMIO(0x9888), 0x0839a000 },
- { _MMIO(0x9888), 0x0a39a000 },
- { _MMIO(0x9888), 0x0c39a000 },
- { _MMIO(0x9888), 0x0e39a000 },
- { _MMIO(0x9888), 0x0239a000 },
- { _MMIO(0x9888), 0x0439a000 },
- { _MMIO(0x9888), 0x018a8000 },
- { _MMIO(0x9888), 0x0f8a8000 },
- { _MMIO(0x9888), 0x198a8000 },
- { _MMIO(0x9888), 0x1b8aaaa0 },
- { _MMIO(0x9888), 0x1d8a0002 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x078a8000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x238b2aa0 },
- { _MMIO(0x9888), 0x258b5551 },
- { _MMIO(0x9888), 0x278b0015 },
- { _MMIO(0x9888), 0x1f85aa80 },
- { _MMIO(0x9888), 0x2185aaa2 },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x01834000 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x07848000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x17808000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0xd24), 0x00000000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3d800000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x43800000 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800420 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f800421 },
- { _MMIO(0x9888), 0x41800000 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extended_2_subslices_0x02[] = {
- { _MMIO(0x9888), 0x105c00e0 },
- { _MMIO(0x9888), 0x145b0160 },
- { _MMIO(0x9888), 0x165b2800 },
- { _MMIO(0x9888), 0x185b0120 },
- { _MMIO(0x9888), 0x0e5c25c1 },
- { _MMIO(0x9888), 0x005c6100 },
- { _MMIO(0x9888), 0x025c204c },
- { _MMIO(0x9888), 0x065c8000 },
- { _MMIO(0x9888), 0x085cc000 },
- { _MMIO(0x9888), 0x0a5cc000 },
- { _MMIO(0x9888), 0x0c5cc000 },
- { _MMIO(0x9888), 0x045cc000 },
- { _MMIO(0x9888), 0x005b0011 },
- { _MMIO(0x9888), 0x065b0900 },
- { _MMIO(0x9888), 0x085b0a13 },
- { _MMIO(0x9888), 0x0a5b0b15 },
- { _MMIO(0x9888), 0x0c5b2317 },
- { _MMIO(0x9888), 0x045b21b7 },
- { _MMIO(0x9888), 0x105b0000 },
- { _MMIO(0x9888), 0x0e5b0000 },
- { _MMIO(0x9888), 0x1a5b0000 },
- { _MMIO(0x9888), 0x0c1fa800 },
- { _MMIO(0x9888), 0x0e1faa2a },
- { _MMIO(0x9888), 0x101f02aa },
- { _MMIO(0x9888), 0x00384000 },
- { _MMIO(0x9888), 0x0e384000 },
- { _MMIO(0x9888), 0x16384000 },
- { _MMIO(0x9888), 0x18381555 },
- { _MMIO(0x9888), 0x02384000 },
- { _MMIO(0x9888), 0x04384000 },
- { _MMIO(0x9888), 0x06384000 },
- { _MMIO(0x9888), 0x08384000 },
- { _MMIO(0x9888), 0x0a384000 },
- { _MMIO(0x9888), 0x0039a000 },
- { _MMIO(0x9888), 0x06398000 },
- { _MMIO(0x9888), 0x0839a000 },
- { _MMIO(0x9888), 0x0a39a000 },
- { _MMIO(0x9888), 0x0c39a000 },
- { _MMIO(0x9888), 0x0e39a000 },
- { _MMIO(0x9888), 0x0239a000 },
- { _MMIO(0x9888), 0x0439a000 },
- { _MMIO(0x9888), 0x018a8000 },
- { _MMIO(0x9888), 0x0f8a8000 },
- { _MMIO(0x9888), 0x198a8000 },
- { _MMIO(0x9888), 0x1b8aaaa0 },
- { _MMIO(0x9888), 0x1d8a0002 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x078a8000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x238b2aa0 },
- { _MMIO(0x9888), 0x258b5551 },
- { _MMIO(0x9888), 0x278b0015 },
- { _MMIO(0x9888), 0x1f85aa80 },
- { _MMIO(0x9888), 0x2185aaa2 },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x01834000 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x07848000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x17808000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0xd24), 0x00000000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3d800000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x43800000 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800420 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f800421 },
- { _MMIO(0x9888), 0x41800000 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extended_4_subslices_0x04[] = {
- { _MMIO(0x9888), 0x103800e0 },
- { _MMIO(0x9888), 0x143a0160 },
- { _MMIO(0x9888), 0x163a2800 },
- { _MMIO(0x9888), 0x183a0120 },
- { _MMIO(0x9888), 0x0c1fa800 },
- { _MMIO(0x9888), 0x0e1faa2a },
- { _MMIO(0x9888), 0x101f02aa },
- { _MMIO(0x9888), 0x0e38a5c1 },
- { _MMIO(0x9888), 0x0038a100 },
- { _MMIO(0x9888), 0x0238204c },
- { _MMIO(0x9888), 0x16388000 },
- { _MMIO(0x9888), 0x183802aa },
- { _MMIO(0x9888), 0x04380000 },
- { _MMIO(0x9888), 0x06380000 },
- { _MMIO(0x9888), 0x08388000 },
- { _MMIO(0x9888), 0x0a388000 },
- { _MMIO(0x9888), 0x0039a000 },
- { _MMIO(0x9888), 0x06398000 },
- { _MMIO(0x9888), 0x0839a000 },
- { _MMIO(0x9888), 0x0a39a000 },
- { _MMIO(0x9888), 0x0c39a000 },
- { _MMIO(0x9888), 0x0e39a000 },
- { _MMIO(0x9888), 0x0239a000 },
- { _MMIO(0x9888), 0x0439a000 },
- { _MMIO(0x9888), 0x003a0011 },
- { _MMIO(0x9888), 0x063a0900 },
- { _MMIO(0x9888), 0x083a0a13 },
- { _MMIO(0x9888), 0x0a3a0b15 },
- { _MMIO(0x9888), 0x0c3a2317 },
- { _MMIO(0x9888), 0x043a21b7 },
- { _MMIO(0x9888), 0x103a0000 },
- { _MMIO(0x9888), 0x0e3a0000 },
- { _MMIO(0x9888), 0x1a3a0000 },
- { _MMIO(0x9888), 0x018a8000 },
- { _MMIO(0x9888), 0x0f8a8000 },
- { _MMIO(0x9888), 0x198a8000 },
- { _MMIO(0x9888), 0x1b8aaaa0 },
- { _MMIO(0x9888), 0x1d8a0002 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x078a8000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x238b2aa0 },
- { _MMIO(0x9888), 0x258b5551 },
- { _MMIO(0x9888), 0x278b0015 },
- { _MMIO(0x9888), 0x1f85aa80 },
- { _MMIO(0x9888), 0x2185aaa2 },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x01834000 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x07848000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x17808000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0xd24), 0x00000000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3d800000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x43800000 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800420 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f800421 },
- { _MMIO(0x9888), 0x41800000 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extended_1_subslices_0x08[] = {
- { _MMIO(0x9888), 0x14bd0160 },
- { _MMIO(0x9888), 0x16bd2800 },
- { _MMIO(0x9888), 0x18bd0120 },
- { _MMIO(0x9888), 0x10d800e0 },
- { _MMIO(0x9888), 0x00dcc000 },
- { _MMIO(0x9888), 0x06dc8000 },
- { _MMIO(0x9888), 0x08dcc000 },
- { _MMIO(0x9888), 0x0adcc000 },
- { _MMIO(0x9888), 0x0cdcc000 },
- { _MMIO(0x9888), 0x0edcc000 },
- { _MMIO(0x9888), 0x02dcc000 },
- { _MMIO(0x9888), 0x04dcc000 },
- { _MMIO(0x9888), 0x00bd0011 },
- { _MMIO(0x9888), 0x06bd0900 },
- { _MMIO(0x9888), 0x08bd0a13 },
- { _MMIO(0x9888), 0x0abd0b15 },
- { _MMIO(0x9888), 0x0cbd2317 },
- { _MMIO(0x9888), 0x04bd21b7 },
- { _MMIO(0x9888), 0x10bd0000 },
- { _MMIO(0x9888), 0x0ebd0000 },
- { _MMIO(0x9888), 0x1abd0000 },
- { _MMIO(0x9888), 0x0ed825c1 },
- { _MMIO(0x9888), 0x00d86100 },
- { _MMIO(0x9888), 0x02d8204c },
- { _MMIO(0x9888), 0x06d88000 },
- { _MMIO(0x9888), 0x08d8c000 },
- { _MMIO(0x9888), 0x0ad8c000 },
- { _MMIO(0x9888), 0x0cd8c000 },
- { _MMIO(0x9888), 0x04d8c000 },
- { _MMIO(0x9888), 0x00db4000 },
- { _MMIO(0x9888), 0x0edb4000 },
- { _MMIO(0x9888), 0x18db5400 },
- { _MMIO(0x9888), 0x1adb0155 },
- { _MMIO(0x9888), 0x02db4000 },
- { _MMIO(0x9888), 0x04db4000 },
- { _MMIO(0x9888), 0x06db4000 },
- { _MMIO(0x9888), 0x08db4000 },
- { _MMIO(0x9888), 0x0adb4000 },
- { _MMIO(0x9888), 0x0c9fa800 },
- { _MMIO(0x9888), 0x0e9faa2a },
- { _MMIO(0x9888), 0x109f02aa },
- { _MMIO(0x9888), 0x00b84000 },
- { _MMIO(0x9888), 0x0eb84000 },
- { _MMIO(0x9888), 0x16b84000 },
- { _MMIO(0x9888), 0x18b81555 },
- { _MMIO(0x9888), 0x02b84000 },
- { _MMIO(0x9888), 0x04b84000 },
- { _MMIO(0x9888), 0x06b84000 },
- { _MMIO(0x9888), 0x08b84000 },
- { _MMIO(0x9888), 0x0ab84000 },
- { _MMIO(0x9888), 0x00b9a000 },
- { _MMIO(0x9888), 0x06b98000 },
- { _MMIO(0x9888), 0x08b9a000 },
- { _MMIO(0x9888), 0x0ab9a000 },
- { _MMIO(0x9888), 0x0cb9a000 },
- { _MMIO(0x9888), 0x0eb9a000 },
- { _MMIO(0x9888), 0x02b9a000 },
- { _MMIO(0x9888), 0x04b9a000 },
- { _MMIO(0x9888), 0x01888000 },
- { _MMIO(0x9888), 0x0d88f800 },
- { _MMIO(0x9888), 0x0f88000f },
- { _MMIO(0x9888), 0x03888000 },
- { _MMIO(0x9888), 0x05888000 },
- { _MMIO(0x9888), 0x07888000 },
- { _MMIO(0x9888), 0x09888000 },
- { _MMIO(0x9888), 0x0b888000 },
- { _MMIO(0x9888), 0x238b5540 },
- { _MMIO(0x9888), 0x258baaa2 },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x018c4000 },
- { _MMIO(0x9888), 0x0f8c4000 },
- { _MMIO(0x9888), 0x178c2000 },
- { _MMIO(0x9888), 0x198c5500 },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x038c4000 },
- { _MMIO(0x9888), 0x058c4000 },
- { _MMIO(0x9888), 0x078c4000 },
- { _MMIO(0x9888), 0x098c4000 },
- { _MMIO(0x9888), 0x0b8c4000 },
- { _MMIO(0x9888), 0x018da000 },
- { _MMIO(0x9888), 0x078d8000 },
- { _MMIO(0x9888), 0x098da000 },
- { _MMIO(0x9888), 0x0b8da000 },
- { _MMIO(0x9888), 0x0d8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x038da000 },
- { _MMIO(0x9888), 0x058da000 },
- { _MMIO(0x9888), 0x1f85aa80 },
- { _MMIO(0x9888), 0x2185aaa2 },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x01834000 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x07848000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x17808000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0xd24), 0x00000000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3d800000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x43800000 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800420 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f800421 },
- { _MMIO(0x9888), 0x41800000 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extended_3_subslices_0x10[] = {
- { _MMIO(0x9888), 0x10dc00e0 },
- { _MMIO(0x9888), 0x14db0160 },
- { _MMIO(0x9888), 0x16db2800 },
- { _MMIO(0x9888), 0x18db0120 },
- { _MMIO(0x9888), 0x0edc25c1 },
- { _MMIO(0x9888), 0x00dc6100 },
- { _MMIO(0x9888), 0x02dc204c },
- { _MMIO(0x9888), 0x06dc8000 },
- { _MMIO(0x9888), 0x08dcc000 },
- { _MMIO(0x9888), 0x0adcc000 },
- { _MMIO(0x9888), 0x0cdcc000 },
- { _MMIO(0x9888), 0x04dcc000 },
- { _MMIO(0x9888), 0x00db0011 },
- { _MMIO(0x9888), 0x06db0900 },
- { _MMIO(0x9888), 0x08db0a13 },
- { _MMIO(0x9888), 0x0adb0b15 },
- { _MMIO(0x9888), 0x0cdb2317 },
- { _MMIO(0x9888), 0x04db21b7 },
- { _MMIO(0x9888), 0x10db0000 },
- { _MMIO(0x9888), 0x0edb0000 },
- { _MMIO(0x9888), 0x1adb0000 },
- { _MMIO(0x9888), 0x0c9fa800 },
- { _MMIO(0x9888), 0x0e9faa2a },
- { _MMIO(0x9888), 0x109f02aa },
- { _MMIO(0x9888), 0x00b84000 },
- { _MMIO(0x9888), 0x0eb84000 },
- { _MMIO(0x9888), 0x16b84000 },
- { _MMIO(0x9888), 0x18b81555 },
- { _MMIO(0x9888), 0x02b84000 },
- { _MMIO(0x9888), 0x04b84000 },
- { _MMIO(0x9888), 0x06b84000 },
- { _MMIO(0x9888), 0x08b84000 },
- { _MMIO(0x9888), 0x0ab84000 },
- { _MMIO(0x9888), 0x00b9a000 },
- { _MMIO(0x9888), 0x06b98000 },
- { _MMIO(0x9888), 0x08b9a000 },
- { _MMIO(0x9888), 0x0ab9a000 },
- { _MMIO(0x9888), 0x0cb9a000 },
- { _MMIO(0x9888), 0x0eb9a000 },
- { _MMIO(0x9888), 0x02b9a000 },
- { _MMIO(0x9888), 0x04b9a000 },
- { _MMIO(0x9888), 0x01888000 },
- { _MMIO(0x9888), 0x0d88f800 },
- { _MMIO(0x9888), 0x0f88000f },
- { _MMIO(0x9888), 0x03888000 },
- { _MMIO(0x9888), 0x05888000 },
- { _MMIO(0x9888), 0x07888000 },
- { _MMIO(0x9888), 0x09888000 },
- { _MMIO(0x9888), 0x0b888000 },
- { _MMIO(0x9888), 0x238b5540 },
- { _MMIO(0x9888), 0x258baaa2 },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x018c4000 },
- { _MMIO(0x9888), 0x0f8c4000 },
- { _MMIO(0x9888), 0x178c2000 },
- { _MMIO(0x9888), 0x198c5500 },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x038c4000 },
- { _MMIO(0x9888), 0x058c4000 },
- { _MMIO(0x9888), 0x078c4000 },
- { _MMIO(0x9888), 0x098c4000 },
- { _MMIO(0x9888), 0x0b8c4000 },
- { _MMIO(0x9888), 0x018da000 },
- { _MMIO(0x9888), 0x078d8000 },
- { _MMIO(0x9888), 0x098da000 },
- { _MMIO(0x9888), 0x0b8da000 },
- { _MMIO(0x9888), 0x0d8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x038da000 },
- { _MMIO(0x9888), 0x058da000 },
- { _MMIO(0x9888), 0x1f85aa80 },
- { _MMIO(0x9888), 0x2185aaa2 },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x01834000 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x07848000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x17808000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0xd24), 0x00000000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3d800000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x43800000 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800420 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f800421 },
- { _MMIO(0x9888), 0x41800000 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extended_5_subslices_0x20[] = {
- { _MMIO(0x9888), 0x10b800e0 },
- { _MMIO(0x9888), 0x14ba0160 },
- { _MMIO(0x9888), 0x16ba2800 },
- { _MMIO(0x9888), 0x18ba0120 },
- { _MMIO(0x9888), 0x0c9fa800 },
- { _MMIO(0x9888), 0x0e9faa2a },
- { _MMIO(0x9888), 0x109f02aa },
- { _MMIO(0x9888), 0x0eb8a5c1 },
- { _MMIO(0x9888), 0x00b8a100 },
- { _MMIO(0x9888), 0x02b8204c },
- { _MMIO(0x9888), 0x16b88000 },
- { _MMIO(0x9888), 0x18b802aa },
- { _MMIO(0x9888), 0x04b80000 },
- { _MMIO(0x9888), 0x06b80000 },
- { _MMIO(0x9888), 0x08b88000 },
- { _MMIO(0x9888), 0x0ab88000 },
- { _MMIO(0x9888), 0x00b9a000 },
- { _MMIO(0x9888), 0x06b98000 },
- { _MMIO(0x9888), 0x08b9a000 },
- { _MMIO(0x9888), 0x0ab9a000 },
- { _MMIO(0x9888), 0x0cb9a000 },
- { _MMIO(0x9888), 0x0eb9a000 },
- { _MMIO(0x9888), 0x02b9a000 },
- { _MMIO(0x9888), 0x04b9a000 },
- { _MMIO(0x9888), 0x00ba0011 },
- { _MMIO(0x9888), 0x06ba0900 },
- { _MMIO(0x9888), 0x08ba0a13 },
- { _MMIO(0x9888), 0x0aba0b15 },
- { _MMIO(0x9888), 0x0cba2317 },
- { _MMIO(0x9888), 0x04ba21b7 },
- { _MMIO(0x9888), 0x10ba0000 },
- { _MMIO(0x9888), 0x0eba0000 },
- { _MMIO(0x9888), 0x1aba0000 },
- { _MMIO(0x9888), 0x01888000 },
- { _MMIO(0x9888), 0x0d88f800 },
- { _MMIO(0x9888), 0x0f88000f },
- { _MMIO(0x9888), 0x03888000 },
- { _MMIO(0x9888), 0x05888000 },
- { _MMIO(0x9888), 0x07888000 },
- { _MMIO(0x9888), 0x09888000 },
- { _MMIO(0x9888), 0x0b888000 },
- { _MMIO(0x9888), 0x238b5540 },
- { _MMIO(0x9888), 0x258baaa2 },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x018c4000 },
- { _MMIO(0x9888), 0x0f8c4000 },
- { _MMIO(0x9888), 0x178c2000 },
- { _MMIO(0x9888), 0x198c5500 },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x038c4000 },
- { _MMIO(0x9888), 0x058c4000 },
- { _MMIO(0x9888), 0x078c4000 },
- { _MMIO(0x9888), 0x098c4000 },
- { _MMIO(0x9888), 0x0b8c4000 },
- { _MMIO(0x9888), 0x018da000 },
- { _MMIO(0x9888), 0x078d8000 },
- { _MMIO(0x9888), 0x098da000 },
- { _MMIO(0x9888), 0x0b8da000 },
- { _MMIO(0x9888), 0x0d8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x038da000 },
- { _MMIO(0x9888), 0x058da000 },
- { _MMIO(0x9888), 0x1f85aa80 },
- { _MMIO(0x9888), 0x2185aaa2 },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x01834000 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x07848000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x17808000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0xd24), 0x00000000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3d800000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x43800000 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800420 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f800421 },
- { _MMIO(0x9888), 0x41800000 },
-};
-
-static int
-get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 6);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 6);
-
- if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x01) {
- regs[n] = mux_config_compute_extended_0_subslices_0x01;
- lens[n] = ARRAY_SIZE(mux_config_compute_extended_0_subslices_0x01);
- n++;
- }
- if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x08) {
- regs[n] = mux_config_compute_extended_1_subslices_0x08;
- lens[n] = ARRAY_SIZE(mux_config_compute_extended_1_subslices_0x08);
- n++;
- }
- if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x02) {
- regs[n] = mux_config_compute_extended_2_subslices_0x02;
- lens[n] = ARRAY_SIZE(mux_config_compute_extended_2_subslices_0x02);
- n++;
- }
- if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x10) {
- regs[n] = mux_config_compute_extended_3_subslices_0x10;
- lens[n] = ARRAY_SIZE(mux_config_compute_extended_3_subslices_0x10);
- n++;
- }
- if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x04) {
- regs[n] = mux_config_compute_extended_4_subslices_0x04;
- lens[n] = ARRAY_SIZE(mux_config_compute_extended_4_subslices_0x04);
- n++;
- }
- if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x20) {
- regs[n] = mux_config_compute_extended_5_subslices_0x20;
- lens[n] = ARRAY_SIZE(mux_config_compute_extended_5_subslices_0x20);
- n++;
- }
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x30800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fffa },
- { _MMIO(0x2774), 0x0000fefe },
- { _MMIO(0x2778), 0x0007fffa },
- { _MMIO(0x277c), 0x0000fefd },
- { _MMIO(0x2790), 0x0007fffa },
- { _MMIO(0x2794), 0x0000fbef },
- { _MMIO(0x2798), 0x0007fffa },
- { _MMIO(0x279c), 0x0000fbdf },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00101100 },
- { _MMIO(0xe45c), 0x00201200 },
- { _MMIO(0xe55c), 0x00301300 },
- { _MMIO(0xe65c), 0x00401400 },
-};
-
-static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
- { _MMIO(0x9888), 0x143f00b3 },
- { _MMIO(0x9888), 0x14bf00b3 },
- { _MMIO(0x9888), 0x138303c0 },
- { _MMIO(0x9888), 0x3b800060 },
- { _MMIO(0x9888), 0x3d800805 },
- { _MMIO(0x9888), 0x003f0029 },
- { _MMIO(0x9888), 0x063f1400 },
- { _MMIO(0x9888), 0x083f1225 },
- { _MMIO(0x9888), 0x0e3f1327 },
- { _MMIO(0x9888), 0x103f0000 },
- { _MMIO(0x9888), 0x005a4000 },
- { _MMIO(0x9888), 0x065a8000 },
- { _MMIO(0x9888), 0x085ac000 },
- { _MMIO(0x9888), 0x0e5ac000 },
- { _MMIO(0x9888), 0x001d4000 },
- { _MMIO(0x9888), 0x061d8000 },
- { _MMIO(0x9888), 0x081dc000 },
- { _MMIO(0x9888), 0x0e1dc000 },
- { _MMIO(0x9888), 0x0c1f0800 },
- { _MMIO(0x9888), 0x0e1f2a00 },
- { _MMIO(0x9888), 0x101f0280 },
- { _MMIO(0x9888), 0x00391000 },
- { _MMIO(0x9888), 0x06394000 },
- { _MMIO(0x9888), 0x08395000 },
- { _MMIO(0x9888), 0x0e395000 },
- { _MMIO(0x9888), 0x0abf1429 },
- { _MMIO(0x9888), 0x0cbf1225 },
- { _MMIO(0x9888), 0x00bf1380 },
- { _MMIO(0x9888), 0x02bf0026 },
- { _MMIO(0x9888), 0x10bf0000 },
- { _MMIO(0x9888), 0x0adac000 },
- { _MMIO(0x9888), 0x0cdac000 },
- { _MMIO(0x9888), 0x00da8000 },
- { _MMIO(0x9888), 0x02da4000 },
- { _MMIO(0x9888), 0x0a9dc000 },
- { _MMIO(0x9888), 0x0c9dc000 },
- { _MMIO(0x9888), 0x009d8000 },
- { _MMIO(0x9888), 0x029d4000 },
- { _MMIO(0x9888), 0x0e9f8000 },
- { _MMIO(0x9888), 0x109f002a },
- { _MMIO(0x9888), 0x0c9fa000 },
- { _MMIO(0x9888), 0x0ab95000 },
- { _MMIO(0x9888), 0x0cb95000 },
- { _MMIO(0x9888), 0x00b94000 },
- { _MMIO(0x9888), 0x02b91000 },
- { _MMIO(0x9888), 0x0d88c000 },
- { _MMIO(0x9888), 0x0f880003 },
- { _MMIO(0x9888), 0x03888000 },
- { _MMIO(0x9888), 0x05888000 },
- { _MMIO(0x9888), 0x018a8000 },
- { _MMIO(0x9888), 0x0f8a8000 },
- { _MMIO(0x9888), 0x198a8000 },
- { _MMIO(0x9888), 0x1b8a8020 },
- { _MMIO(0x9888), 0x1d8a0002 },
- { _MMIO(0x9888), 0x238b0520 },
- { _MMIO(0x9888), 0x258ba950 },
- { _MMIO(0x9888), 0x278b0016 },
- { _MMIO(0x9888), 0x198c5400 },
- { _MMIO(0x9888), 0x1b8c0001 },
- { _MMIO(0x9888), 0x038c4000 },
- { _MMIO(0x9888), 0x058c4000 },
- { _MMIO(0x9888), 0x0b8da000 },
- { _MMIO(0x9888), 0x0d8da000 },
- { _MMIO(0x9888), 0x018d8000 },
- { _MMIO(0x9888), 0x038d2000 },
- { _MMIO(0x9888), 0x1f85aa80 },
- { _MMIO(0x9888), 0x2185aaa0 },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x03835180 },
- { _MMIO(0x9888), 0x05834022 },
- { _MMIO(0x9888), 0x11830000 },
- { _MMIO(0x9888), 0x01834000 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x07830000 },
- { _MMIO(0x9888), 0x09830000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x07848000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x05844000 },
- { _MMIO(0x9888), 0x1b80c137 },
- { _MMIO(0x9888), 0x1d80c147 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x17808000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x15804000 },
- { _MMIO(0xd24), 0x00000000 },
- { _MMIO(0x9888), 0x4d801000 },
- { _MMIO(0x9888), 0x4f800111 },
- { _MMIO(0x9888), 0x43800842 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800840 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f800800 },
- { _MMIO(0x9888), 0x418014a2 },
-};
-
-static int
-get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_l3_cache;
- lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_data_port_reads_coalescing[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0xba98ba98 },
- { _MMIO(0x2748), 0xba98ba98 },
- { _MMIO(0x2744), 0x00003377 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fff2 },
- { _MMIO(0x2774), 0x00007ff0 },
- { _MMIO(0x2778), 0x0007ffe2 },
- { _MMIO(0x277c), 0x00007ff0 },
- { _MMIO(0x2780), 0x0007ffc2 },
- { _MMIO(0x2784), 0x00007ff0 },
- { _MMIO(0x2788), 0x0007ff82 },
- { _MMIO(0x278c), 0x00007ff0 },
- { _MMIO(0x2790), 0x0007fffa },
- { _MMIO(0x2794), 0x0000bfef },
- { _MMIO(0x2798), 0x0007fffa },
- { _MMIO(0x279c), 0x0000bfdf },
- { _MMIO(0x27a0), 0x0007fffa },
- { _MMIO(0x27a4), 0x0000bfbf },
- { _MMIO(0x27a8), 0x0007fffa },
- { _MMIO(0x27ac), 0x0000bf7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_data_port_reads_coalescing[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_data_port_reads_coalescing_0_subslices_0x01[] = {
- { _MMIO(0x9888), 0x103d0005 },
- { _MMIO(0x9888), 0x163d240b },
- { _MMIO(0x9888), 0x1058022f },
- { _MMIO(0x9888), 0x185b5520 },
- { _MMIO(0x9888), 0x198b0003 },
- { _MMIO(0x9888), 0x005cc000 },
- { _MMIO(0x9888), 0x065cc000 },
- { _MMIO(0x9888), 0x085cc000 },
- { _MMIO(0x9888), 0x0a5cc000 },
- { _MMIO(0x9888), 0x0c5cc000 },
- { _MMIO(0x9888), 0x0e5cc000 },
- { _MMIO(0x9888), 0x025c4000 },
- { _MMIO(0x9888), 0x045c8000 },
- { _MMIO(0x9888), 0x003d0000 },
- { _MMIO(0x9888), 0x063d00b0 },
- { _MMIO(0x9888), 0x083d0182 },
- { _MMIO(0x9888), 0x0a3d10a0 },
- { _MMIO(0x9888), 0x0c3d11a2 },
- { _MMIO(0x9888), 0x0e3d0000 },
- { _MMIO(0x9888), 0x183d0000 },
- { _MMIO(0x9888), 0x1a3d0000 },
- { _MMIO(0x9888), 0x0e582242 },
- { _MMIO(0x9888), 0x00586700 },
- { _MMIO(0x9888), 0x0258004f },
- { _MMIO(0x9888), 0x0658c000 },
- { _MMIO(0x9888), 0x0858c000 },
- { _MMIO(0x9888), 0x0a58c000 },
- { _MMIO(0x9888), 0x0c58c000 },
- { _MMIO(0x9888), 0x045b6300 },
- { _MMIO(0x9888), 0x105b0000 },
- { _MMIO(0x9888), 0x005b4000 },
- { _MMIO(0x9888), 0x0e5b4000 },
- { _MMIO(0x9888), 0x1a5b0155 },
- { _MMIO(0x9888), 0x025b4000 },
- { _MMIO(0x9888), 0x0a5b0000 },
- { _MMIO(0x9888), 0x0c5b4000 },
- { _MMIO(0x9888), 0x0c1fa800 },
- { _MMIO(0x9888), 0x0e1faaa0 },
- { _MMIO(0x9888), 0x101f02aa },
- { _MMIO(0x9888), 0x00384000 },
- { _MMIO(0x9888), 0x0e384000 },
- { _MMIO(0x9888), 0x16384000 },
- { _MMIO(0x9888), 0x18381555 },
- { _MMIO(0x9888), 0x02384000 },
- { _MMIO(0x9888), 0x04384000 },
- { _MMIO(0x9888), 0x0a384000 },
- { _MMIO(0x9888), 0x0c384000 },
- { _MMIO(0x9888), 0x0039a000 },
- { _MMIO(0x9888), 0x0639a000 },
- { _MMIO(0x9888), 0x0839a000 },
- { _MMIO(0x9888), 0x0a39a000 },
- { _MMIO(0x9888), 0x0c39a000 },
- { _MMIO(0x9888), 0x0e39a000 },
- { _MMIO(0x9888), 0x02392000 },
- { _MMIO(0x9888), 0x04398000 },
- { _MMIO(0x9888), 0x018a8000 },
- { _MMIO(0x9888), 0x0f8a8000 },
- { _MMIO(0x9888), 0x198a8000 },
- { _MMIO(0x9888), 0x1b8aaaa0 },
- { _MMIO(0x9888), 0x1d8a0002 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x0d8a8000 },
- { _MMIO(0x9888), 0x038b6300 },
- { _MMIO(0x9888), 0x058b0062 },
- { _MMIO(0x9888), 0x118b0000 },
- { _MMIO(0x9888), 0x238b02a0 },
- { _MMIO(0x9888), 0x258b5555 },
- { _MMIO(0x9888), 0x278b0015 },
- { _MMIO(0x9888), 0x1f85aa80 },
- { _MMIO(0x9888), 0x2185aaaa },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x01834000 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x0784c000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x1780c000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0xd24), 0x00000000 },
- { _MMIO(0x9888), 0x4d801000 },
- { _MMIO(0x9888), 0x3d800000 },
- { _MMIO(0x9888), 0x4f800001 },
- { _MMIO(0x9888), 0x43800000 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800420 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f800421 },
- { _MMIO(0x9888), 0x41800041 },
-};
-
-static int
-get_data_port_reads_coalescing_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x01) {
- regs[n] = mux_config_data_port_reads_coalescing_0_subslices_0x01;
- lens[n] = ARRAY_SIZE(mux_config_data_port_reads_coalescing_0_subslices_0x01);
- n++;
- }
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_data_port_writes_coalescing[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0xba98ba98 },
- { _MMIO(0x2748), 0xba98ba98 },
- { _MMIO(0x2744), 0x00003377 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007ff72 },
- { _MMIO(0x2774), 0x0000bfd0 },
- { _MMIO(0x2778), 0x0007ff62 },
- { _MMIO(0x277c), 0x0000bfd0 },
- { _MMIO(0x2780), 0x0007ff42 },
- { _MMIO(0x2784), 0x0000bfd0 },
- { _MMIO(0x2788), 0x0007ff02 },
- { _MMIO(0x278c), 0x0000bfd0 },
- { _MMIO(0x2790), 0x0005fff2 },
- { _MMIO(0x2794), 0x0000bfd0 },
- { _MMIO(0x2798), 0x0005ffe2 },
- { _MMIO(0x279c), 0x0000bfd0 },
- { _MMIO(0x27a0), 0x0005ffc2 },
- { _MMIO(0x27a4), 0x0000bfd0 },
- { _MMIO(0x27a8), 0x0005ff82 },
- { _MMIO(0x27ac), 0x0000bfd0 },
-};
-
-static const struct i915_oa_reg flex_eu_config_data_port_writes_coalescing[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_data_port_writes_coalescing_0_subslices_0x01[] = {
- { _MMIO(0x9888), 0x103d0005 },
- { _MMIO(0x9888), 0x143d0120 },
- { _MMIO(0x9888), 0x163d2400 },
- { _MMIO(0x9888), 0x1058022f },
- { _MMIO(0x9888), 0x105b0000 },
- { _MMIO(0x9888), 0x198b0003 },
- { _MMIO(0x9888), 0x005cc000 },
- { _MMIO(0x9888), 0x065cc000 },
- { _MMIO(0x9888), 0x085cc000 },
- { _MMIO(0x9888), 0x0a5cc000 },
- { _MMIO(0x9888), 0x0e5cc000 },
- { _MMIO(0x9888), 0x025c4000 },
- { _MMIO(0x9888), 0x045c8000 },
- { _MMIO(0x9888), 0x003d0000 },
- { _MMIO(0x9888), 0x063d0094 },
- { _MMIO(0x9888), 0x083d0182 },
- { _MMIO(0x9888), 0x0a3d1814 },
- { _MMIO(0x9888), 0x0e3d0000 },
- { _MMIO(0x9888), 0x183d0000 },
- { _MMIO(0x9888), 0x1a3d0000 },
- { _MMIO(0x9888), 0x0c3d0000 },
- { _MMIO(0x9888), 0x0e582242 },
- { _MMIO(0x9888), 0x00586700 },
- { _MMIO(0x9888), 0x0258004f },
- { _MMIO(0x9888), 0x0658c000 },
- { _MMIO(0x9888), 0x0858c000 },
- { _MMIO(0x9888), 0x0a58c000 },
- { _MMIO(0x9888), 0x045b6a80 },
- { _MMIO(0x9888), 0x005b4000 },
- { _MMIO(0x9888), 0x0e5b4000 },
- { _MMIO(0x9888), 0x185b5400 },
- { _MMIO(0x9888), 0x1a5b0141 },
- { _MMIO(0x9888), 0x025b4000 },
- { _MMIO(0x9888), 0x0a5b0000 },
- { _MMIO(0x9888), 0x0c5b4000 },
- { _MMIO(0x9888), 0x0c1fa800 },
- { _MMIO(0x9888), 0x0e1faaa0 },
- { _MMIO(0x9888), 0x101f0282 },
- { _MMIO(0x9888), 0x00384000 },
- { _MMIO(0x9888), 0x0e384000 },
- { _MMIO(0x9888), 0x16384000 },
- { _MMIO(0x9888), 0x18381415 },
- { _MMIO(0x9888), 0x02384000 },
- { _MMIO(0x9888), 0x04384000 },
- { _MMIO(0x9888), 0x0a384000 },
- { _MMIO(0x9888), 0x0c384000 },
- { _MMIO(0x9888), 0x0039a000 },
- { _MMIO(0x9888), 0x0639a000 },
- { _MMIO(0x9888), 0x0839a000 },
- { _MMIO(0x9888), 0x0a39a000 },
- { _MMIO(0x9888), 0x0e39a000 },
- { _MMIO(0x9888), 0x02392000 },
- { _MMIO(0x9888), 0x04398000 },
- { _MMIO(0x9888), 0x018a8000 },
- { _MMIO(0x9888), 0x0f8a8000 },
- { _MMIO(0x9888), 0x198a8000 },
- { _MMIO(0x9888), 0x1b8a82a0 },
- { _MMIO(0x9888), 0x1d8a0002 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x0d8a8000 },
- { _MMIO(0x9888), 0x038b6300 },
- { _MMIO(0x9888), 0x058b0062 },
- { _MMIO(0x9888), 0x118b0000 },
- { _MMIO(0x9888), 0x238b02a0 },
- { _MMIO(0x9888), 0x258b1555 },
- { _MMIO(0x9888), 0x278b0014 },
- { _MMIO(0x9888), 0x1f85aa80 },
- { _MMIO(0x9888), 0x21852aaa },
- { _MMIO(0x9888), 0x23850028 },
- { _MMIO(0x9888), 0x01834000 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830141 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x0784c000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x1780c000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0xd24), 0x00000000 },
- { _MMIO(0x9888), 0x4d801000 },
- { _MMIO(0x9888), 0x3d800000 },
- { _MMIO(0x9888), 0x4f800001 },
- { _MMIO(0x9888), 0x43800000 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800000 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800420 },
- { _MMIO(0x9888), 0x3f800421 },
- { _MMIO(0x9888), 0x41800041 },
-};
-
-static int
-get_data_port_writes_coalescing_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x01) {
- regs[n] = mux_config_data_port_writes_coalescing_0_subslices_0x01;
- lens[n] = ARRAY_SIZE(mux_config_data_port_writes_coalescing_0_subslices_0x01);
- n++;
- }
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x10800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000fff7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
- { _MMIO(0x9888), 0x105c0232 },
- { _MMIO(0x9888), 0x10580232 },
- { _MMIO(0x9888), 0x10380232 },
- { _MMIO(0x9888), 0x10dc0232 },
- { _MMIO(0x9888), 0x10d80232 },
- { _MMIO(0x9888), 0x10b80232 },
- { _MMIO(0x9888), 0x118e4400 },
- { _MMIO(0x9888), 0x025c6080 },
- { _MMIO(0x9888), 0x045c004b },
- { _MMIO(0x9888), 0x005c8000 },
- { _MMIO(0x9888), 0x00582080 },
- { _MMIO(0x9888), 0x0258004b },
- { _MMIO(0x9888), 0x025b4000 },
- { _MMIO(0x9888), 0x045b4000 },
- { _MMIO(0x9888), 0x0c1fa000 },
- { _MMIO(0x9888), 0x0e1f00aa },
- { _MMIO(0x9888), 0x04386080 },
- { _MMIO(0x9888), 0x0638404b },
- { _MMIO(0x9888), 0x02384000 },
- { _MMIO(0x9888), 0x08384000 },
- { _MMIO(0x9888), 0x0a380000 },
- { _MMIO(0x9888), 0x0c380000 },
- { _MMIO(0x9888), 0x00398000 },
- { _MMIO(0x9888), 0x0239a000 },
- { _MMIO(0x9888), 0x0439a000 },
- { _MMIO(0x9888), 0x06392000 },
- { _MMIO(0x9888), 0x0cdc25c1 },
- { _MMIO(0x9888), 0x0adcc000 },
- { _MMIO(0x9888), 0x0ad825c1 },
- { _MMIO(0x9888), 0x18db4000 },
- { _MMIO(0x9888), 0x1adb0001 },
- { _MMIO(0x9888), 0x0e9f8000 },
- { _MMIO(0x9888), 0x109f02aa },
- { _MMIO(0x9888), 0x0eb825c1 },
- { _MMIO(0x9888), 0x18b80154 },
- { _MMIO(0x9888), 0x0ab9a000 },
- { _MMIO(0x9888), 0x0cb9a000 },
- { _MMIO(0x9888), 0x0eb9a000 },
- { _MMIO(0x9888), 0x0d88c000 },
- { _MMIO(0x9888), 0x0f88000f },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x078a8000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x0d8a8000 },
- { _MMIO(0x9888), 0x258baa05 },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x238b2a80 },
- { _MMIO(0x9888), 0x198c5400 },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x098dc000 },
- { _MMIO(0x9888), 0x0b8da000 },
- { _MMIO(0x9888), 0x0d8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x098e05c0 },
- { _MMIO(0x9888), 0x058e0000 },
- { _MMIO(0x9888), 0x198f0020 },
- { _MMIO(0x9888), 0x2185aa0a },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x19835000 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x09848000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x19808000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x51800040 },
- { _MMIO(0x9888), 0x43800400 },
- { _MMIO(0x9888), 0x45800800 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800c62 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f801042 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x418014a4 },
-};
-
-static int
-get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_hdc_and_sf;
- lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00014002 },
- { _MMIO(0x277c), 0x0000c3ff },
- { _MMIO(0x2780), 0x00010002 },
- { _MMIO(0x2784), 0x0000c7ff },
- { _MMIO(0x2788), 0x00004002 },
- { _MMIO(0x278c), 0x0000d3ff },
- { _MMIO(0x2790), 0x00100700 },
- { _MMIO(0x2794), 0x0000ff1f },
- { _MMIO(0x2798), 0x00001402 },
- { _MMIO(0x279c), 0x0000fc3f },
- { _MMIO(0x27a0), 0x00001002 },
- { _MMIO(0x27a4), 0x0000fc7f },
- { _MMIO(0x27a8), 0x00000402 },
- { _MMIO(0x27ac), 0x0000fd3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_1[] = {
- { _MMIO(0x9888), 0x10bf03da },
- { _MMIO(0x9888), 0x14bf0001 },
- { _MMIO(0x9888), 0x12980340 },
- { _MMIO(0x9888), 0x12990340 },
- { _MMIO(0x9888), 0x0cbf1187 },
- { _MMIO(0x9888), 0x0ebf1205 },
- { _MMIO(0x9888), 0x00bf0500 },
- { _MMIO(0x9888), 0x02bf042b },
- { _MMIO(0x9888), 0x04bf002c },
- { _MMIO(0x9888), 0x0cdac000 },
- { _MMIO(0x9888), 0x0edac000 },
- { _MMIO(0x9888), 0x00da8000 },
- { _MMIO(0x9888), 0x02dac000 },
- { _MMIO(0x9888), 0x04da4000 },
- { _MMIO(0x9888), 0x04983400 },
- { _MMIO(0x9888), 0x10980000 },
- { _MMIO(0x9888), 0x06990034 },
- { _MMIO(0x9888), 0x10990000 },
- { _MMIO(0x9888), 0x0c9dc000 },
- { _MMIO(0x9888), 0x0e9dc000 },
- { _MMIO(0x9888), 0x009d8000 },
- { _MMIO(0x9888), 0x029dc000 },
- { _MMIO(0x9888), 0x049d4000 },
- { _MMIO(0x9888), 0x109f02a8 },
- { _MMIO(0x9888), 0x0c9fa000 },
- { _MMIO(0x9888), 0x0e9f00ba },
- { _MMIO(0x9888), 0x0cb88000 },
- { _MMIO(0x9888), 0x0cb95000 },
- { _MMIO(0x9888), 0x0eb95000 },
- { _MMIO(0x9888), 0x00b94000 },
- { _MMIO(0x9888), 0x02b95000 },
- { _MMIO(0x9888), 0x04b91000 },
- { _MMIO(0x9888), 0x06b92000 },
- { _MMIO(0x9888), 0x0cba4000 },
- { _MMIO(0x9888), 0x0f88000f },
- { _MMIO(0x9888), 0x03888000 },
- { _MMIO(0x9888), 0x05888000 },
- { _MMIO(0x9888), 0x07888000 },
- { _MMIO(0x9888), 0x09888000 },
- { _MMIO(0x9888), 0x0b888000 },
- { _MMIO(0x9888), 0x0d880400 },
- { _MMIO(0x9888), 0x258b800a },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x238b5500 },
- { _MMIO(0x9888), 0x198c4000 },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x038c4000 },
- { _MMIO(0x9888), 0x058c4000 },
- { _MMIO(0x9888), 0x078c4000 },
- { _MMIO(0x9888), 0x098c4000 },
- { _MMIO(0x9888), 0x0b8c4000 },
- { _MMIO(0x9888), 0x0d8c4000 },
- { _MMIO(0x9888), 0x0d8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x018d8000 },
- { _MMIO(0x9888), 0x038da000 },
- { _MMIO(0x9888), 0x058da000 },
- { _MMIO(0x9888), 0x078d2000 },
- { _MMIO(0x9888), 0x2185800a },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x1b830154 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x45800000 },
- { _MMIO(0x9888), 0x47800000 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f800000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41800060 },
-};
-
-static int
-get_l3_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_1;
- lens[n] = ARRAY_SIZE(mux_config_l3_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00014002 },
- { _MMIO(0x277c), 0x0000c3ff },
- { _MMIO(0x2780), 0x00010002 },
- { _MMIO(0x2784), 0x0000c7ff },
- { _MMIO(0x2788), 0x00004002 },
- { _MMIO(0x278c), 0x0000d3ff },
- { _MMIO(0x2790), 0x00100700 },
- { _MMIO(0x2794), 0x0000ff1f },
- { _MMIO(0x2798), 0x00001402 },
- { _MMIO(0x279c), 0x0000fc3f },
- { _MMIO(0x27a0), 0x00001002 },
- { _MMIO(0x27a4), 0x0000fc7f },
- { _MMIO(0x27a8), 0x00000402 },
- { _MMIO(0x27ac), 0x0000fd3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_2[] = {
- { _MMIO(0x9888), 0x103f03da },
- { _MMIO(0x9888), 0x143f0001 },
- { _MMIO(0x9888), 0x12180340 },
- { _MMIO(0x9888), 0x12190340 },
- { _MMIO(0x9888), 0x0c3f1187 },
- { _MMIO(0x9888), 0x0e3f1205 },
- { _MMIO(0x9888), 0x003f0500 },
- { _MMIO(0x9888), 0x023f042b },
- { _MMIO(0x9888), 0x043f002c },
- { _MMIO(0x9888), 0x0c5ac000 },
- { _MMIO(0x9888), 0x0e5ac000 },
- { _MMIO(0x9888), 0x005a8000 },
- { _MMIO(0x9888), 0x025ac000 },
- { _MMIO(0x9888), 0x045a4000 },
- { _MMIO(0x9888), 0x04183400 },
- { _MMIO(0x9888), 0x10180000 },
- { _MMIO(0x9888), 0x06190034 },
- { _MMIO(0x9888), 0x10190000 },
- { _MMIO(0x9888), 0x0c1dc000 },
- { _MMIO(0x9888), 0x0e1dc000 },
- { _MMIO(0x9888), 0x001d8000 },
- { _MMIO(0x9888), 0x021dc000 },
- { _MMIO(0x9888), 0x041d4000 },
- { _MMIO(0x9888), 0x101f02a8 },
- { _MMIO(0x9888), 0x0c1fa000 },
- { _MMIO(0x9888), 0x0e1f00ba },
- { _MMIO(0x9888), 0x0c388000 },
- { _MMIO(0x9888), 0x0c395000 },
- { _MMIO(0x9888), 0x0e395000 },
- { _MMIO(0x9888), 0x00394000 },
- { _MMIO(0x9888), 0x02395000 },
- { _MMIO(0x9888), 0x04391000 },
- { _MMIO(0x9888), 0x06392000 },
- { _MMIO(0x9888), 0x0c3a4000 },
- { _MMIO(0x9888), 0x1b8aa800 },
- { _MMIO(0x9888), 0x1d8a0002 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x078a8000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x0d8a8000 },
- { _MMIO(0x9888), 0x258b4005 },
- { _MMIO(0x9888), 0x278b0015 },
- { _MMIO(0x9888), 0x238b2a80 },
- { _MMIO(0x9888), 0x2185800a },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x1b830154 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x45800000 },
- { _MMIO(0x9888), 0x47800000 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f800000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41800060 },
-};
-
-static int
-get_l3_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_2;
- lens[n] = ARRAY_SIZE(mux_config_l3_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_3[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00014002 },
- { _MMIO(0x277c), 0x0000c3ff },
- { _MMIO(0x2780), 0x00010002 },
- { _MMIO(0x2784), 0x0000c7ff },
- { _MMIO(0x2788), 0x00004002 },
- { _MMIO(0x278c), 0x0000d3ff },
- { _MMIO(0x2790), 0x00100700 },
- { _MMIO(0x2794), 0x0000ff1f },
- { _MMIO(0x2798), 0x00001402 },
- { _MMIO(0x279c), 0x0000fc3f },
- { _MMIO(0x27a0), 0x00001002 },
- { _MMIO(0x27a4), 0x0000fc7f },
- { _MMIO(0x27a8), 0x00000402 },
- { _MMIO(0x27ac), 0x0000fd3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_3[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_3[] = {
- { _MMIO(0x9888), 0x121b0340 },
- { _MMIO(0x9888), 0x103f0274 },
- { _MMIO(0x9888), 0x123f0000 },
- { _MMIO(0x9888), 0x129b0340 },
- { _MMIO(0x9888), 0x10bf0274 },
- { _MMIO(0x9888), 0x12bf0000 },
- { _MMIO(0x9888), 0x041b3400 },
- { _MMIO(0x9888), 0x101b0000 },
- { _MMIO(0x9888), 0x045c8000 },
- { _MMIO(0x9888), 0x0a3d4000 },
- { _MMIO(0x9888), 0x003f0080 },
- { _MMIO(0x9888), 0x023f0793 },
- { _MMIO(0x9888), 0x043f0014 },
- { _MMIO(0x9888), 0x04588000 },
- { _MMIO(0x9888), 0x005a8000 },
- { _MMIO(0x9888), 0x025ac000 },
- { _MMIO(0x9888), 0x045a4000 },
- { _MMIO(0x9888), 0x0a5b4000 },
- { _MMIO(0x9888), 0x001d8000 },
- { _MMIO(0x9888), 0x021dc000 },
- { _MMIO(0x9888), 0x041d4000 },
- { _MMIO(0x9888), 0x0c1fa000 },
- { _MMIO(0x9888), 0x0e1f002a },
- { _MMIO(0x9888), 0x0a384000 },
- { _MMIO(0x9888), 0x00394000 },
- { _MMIO(0x9888), 0x02395000 },
- { _MMIO(0x9888), 0x04399000 },
- { _MMIO(0x9888), 0x069b0034 },
- { _MMIO(0x9888), 0x109b0000 },
- { _MMIO(0x9888), 0x06dc4000 },
- { _MMIO(0x9888), 0x0cbd4000 },
- { _MMIO(0x9888), 0x0cbf0981 },
- { _MMIO(0x9888), 0x0ebf0a0f },
- { _MMIO(0x9888), 0x06d84000 },
- { _MMIO(0x9888), 0x0cdac000 },
- { _MMIO(0x9888), 0x0edac000 },
- { _MMIO(0x9888), 0x0cdb4000 },
- { _MMIO(0x9888), 0x0c9dc000 },
- { _MMIO(0x9888), 0x0e9dc000 },
- { _MMIO(0x9888), 0x109f02a8 },
- { _MMIO(0x9888), 0x0e9f0080 },
- { _MMIO(0x9888), 0x0cb84000 },
- { _MMIO(0x9888), 0x0cb95000 },
- { _MMIO(0x9888), 0x0eb95000 },
- { _MMIO(0x9888), 0x06b92000 },
- { _MMIO(0x9888), 0x0f88000f },
- { _MMIO(0x9888), 0x0d880400 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x078a8000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x258b8009 },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x238b2a80 },
- { _MMIO(0x9888), 0x198c4000 },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x0d8c4000 },
- { _MMIO(0x9888), 0x0d8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x078d2000 },
- { _MMIO(0x9888), 0x2185800a },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x1b830154 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x45800c00 },
- { _MMIO(0x9888), 0x47800c63 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f8014a5 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41800045 },
-};
-
-static int
-get_l3_3_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_3;
- lens[n] = ARRAY_SIZE(mux_config_l3_3);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_4[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00014002 },
- { _MMIO(0x277c), 0x0000c3ff },
- { _MMIO(0x2780), 0x00010002 },
- { _MMIO(0x2784), 0x0000c7ff },
- { _MMIO(0x2788), 0x00004002 },
- { _MMIO(0x278c), 0x0000d3ff },
- { _MMIO(0x2790), 0x00100700 },
- { _MMIO(0x2794), 0x0000ff1f },
- { _MMIO(0x2798), 0x00001402 },
- { _MMIO(0x279c), 0x0000fc3f },
- { _MMIO(0x27a0), 0x00001002 },
- { _MMIO(0x27a4), 0x0000fc7f },
- { _MMIO(0x27a8), 0x00000402 },
- { _MMIO(0x27ac), 0x0000fd3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_4[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_4[] = {
- { _MMIO(0x9888), 0x121a0340 },
- { _MMIO(0x9888), 0x103f0017 },
- { _MMIO(0x9888), 0x123f0020 },
- { _MMIO(0x9888), 0x129a0340 },
- { _MMIO(0x9888), 0x10bf0017 },
- { _MMIO(0x9888), 0x12bf0020 },
- { _MMIO(0x9888), 0x041a3400 },
- { _MMIO(0x9888), 0x101a0000 },
- { _MMIO(0x9888), 0x043b8000 },
- { _MMIO(0x9888), 0x0a3e0010 },
- { _MMIO(0x9888), 0x003f0200 },
- { _MMIO(0x9888), 0x023f0113 },
- { _MMIO(0x9888), 0x043f0014 },
- { _MMIO(0x9888), 0x02592000 },
- { _MMIO(0x9888), 0x005a8000 },
- { _MMIO(0x9888), 0x025ac000 },
- { _MMIO(0x9888), 0x045a4000 },
- { _MMIO(0x9888), 0x0a1c8000 },
- { _MMIO(0x9888), 0x001d8000 },
- { _MMIO(0x9888), 0x021dc000 },
- { _MMIO(0x9888), 0x041d4000 },
- { _MMIO(0x9888), 0x0a1e8000 },
- { _MMIO(0x9888), 0x0c1fa000 },
- { _MMIO(0x9888), 0x0e1f001a },
- { _MMIO(0x9888), 0x00394000 },
- { _MMIO(0x9888), 0x02395000 },
- { _MMIO(0x9888), 0x04391000 },
- { _MMIO(0x9888), 0x069a0034 },
- { _MMIO(0x9888), 0x109a0000 },
- { _MMIO(0x9888), 0x06bb4000 },
- { _MMIO(0x9888), 0x0abe0040 },
- { _MMIO(0x9888), 0x0cbf0984 },
- { _MMIO(0x9888), 0x0ebf0a02 },
- { _MMIO(0x9888), 0x02d94000 },
- { _MMIO(0x9888), 0x0cdac000 },
- { _MMIO(0x9888), 0x0edac000 },
- { _MMIO(0x9888), 0x0c9c0400 },
- { _MMIO(0x9888), 0x0c9dc000 },
- { _MMIO(0x9888), 0x0e9dc000 },
- { _MMIO(0x9888), 0x0c9e0400 },
- { _MMIO(0x9888), 0x109f02a8 },
- { _MMIO(0x9888), 0x0e9f0040 },
- { _MMIO(0x9888), 0x0cb95000 },
- { _MMIO(0x9888), 0x0eb95000 },
- { _MMIO(0x9888), 0x0f88000f },
- { _MMIO(0x9888), 0x0d880400 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x078a8000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x258b8009 },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x238b2a80 },
- { _MMIO(0x9888), 0x198c4000 },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x0d8c4000 },
- { _MMIO(0x9888), 0x0d8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x078d2000 },
- { _MMIO(0x9888), 0x2185800a },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x1b830154 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x45800800 },
- { _MMIO(0x9888), 0x47800842 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f801084 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41800044 },
-};
-
-static int
-get_l3_4_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_4;
- lens[n] = ARRAY_SIZE(mux_config_l3_4);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00006000 },
- { _MMIO(0x2774), 0x0000f3ff },
- { _MMIO(0x2778), 0x00001800 },
- { _MMIO(0x277c), 0x0000fcff },
- { _MMIO(0x2780), 0x00000600 },
- { _MMIO(0x2784), 0x0000ff3f },
- { _MMIO(0x2788), 0x00000180 },
- { _MMIO(0x278c), 0x0000ffcf },
- { _MMIO(0x2790), 0x00000060 },
- { _MMIO(0x2794), 0x0000fff3 },
- { _MMIO(0x2798), 0x00000018 },
- { _MMIO(0x279c), 0x0000fffc },
-};
-
-static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x9888), 0x143b000e },
- { _MMIO(0x9888), 0x043c55c0 },
- { _MMIO(0x9888), 0x0a1e0280 },
- { _MMIO(0x9888), 0x0c1e0408 },
- { _MMIO(0x9888), 0x10390000 },
- { _MMIO(0x9888), 0x12397a1f },
- { _MMIO(0x9888), 0x14bb000e },
- { _MMIO(0x9888), 0x04bc5000 },
- { _MMIO(0x9888), 0x0a9e0296 },
- { _MMIO(0x9888), 0x0c9e0008 },
- { _MMIO(0x9888), 0x10b90000 },
- { _MMIO(0x9888), 0x12b97a1f },
- { _MMIO(0x9888), 0x063b0042 },
- { _MMIO(0x9888), 0x103b0000 },
- { _MMIO(0x9888), 0x083c0000 },
- { _MMIO(0x9888), 0x0a3e0040 },
- { _MMIO(0x9888), 0x043f8000 },
- { _MMIO(0x9888), 0x02594000 },
- { _MMIO(0x9888), 0x045a8000 },
- { _MMIO(0x9888), 0x0c1c0400 },
- { _MMIO(0x9888), 0x041d8000 },
- { _MMIO(0x9888), 0x081e02c0 },
- { _MMIO(0x9888), 0x0e1e0000 },
- { _MMIO(0x9888), 0x0c1fa800 },
- { _MMIO(0x9888), 0x0e1f0260 },
- { _MMIO(0x9888), 0x101f0014 },
- { _MMIO(0x9888), 0x003905e0 },
- { _MMIO(0x9888), 0x06390bc0 },
- { _MMIO(0x9888), 0x02390018 },
- { _MMIO(0x9888), 0x04394000 },
- { _MMIO(0x9888), 0x04bb0042 },
- { _MMIO(0x9888), 0x10bb0000 },
- { _MMIO(0x9888), 0x02bc05c0 },
- { _MMIO(0x9888), 0x08bc0000 },
- { _MMIO(0x9888), 0x0abe0004 },
- { _MMIO(0x9888), 0x02bf8000 },
- { _MMIO(0x9888), 0x02d91000 },
- { _MMIO(0x9888), 0x02da8000 },
- { _MMIO(0x9888), 0x089c8000 },
- { _MMIO(0x9888), 0x029d8000 },
- { _MMIO(0x9888), 0x089e8000 },
- { _MMIO(0x9888), 0x0e9e0000 },
- { _MMIO(0x9888), 0x0e9fa806 },
- { _MMIO(0x9888), 0x109f0142 },
- { _MMIO(0x9888), 0x08b90617 },
- { _MMIO(0x9888), 0x0ab90be0 },
- { _MMIO(0x9888), 0x02b94000 },
- { _MMIO(0x9888), 0x0d88f000 },
- { _MMIO(0x9888), 0x0f88000c },
- { _MMIO(0x9888), 0x07888000 },
- { _MMIO(0x9888), 0x09888000 },
- { _MMIO(0x9888), 0x018a8000 },
- { _MMIO(0x9888), 0x0f8a8000 },
- { _MMIO(0x9888), 0x1b8a2800 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x0d8a8000 },
- { _MMIO(0x9888), 0x238b52a0 },
- { _MMIO(0x9888), 0x258b6a95 },
- { _MMIO(0x9888), 0x278b0029 },
- { _MMIO(0x9888), 0x178c2000 },
- { _MMIO(0x9888), 0x198c1500 },
- { _MMIO(0x9888), 0x1b8c0014 },
- { _MMIO(0x9888), 0x078c4000 },
- { _MMIO(0x9888), 0x098c4000 },
- { _MMIO(0x9888), 0x098da000 },
- { _MMIO(0x9888), 0x0b8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x038d8000 },
- { _MMIO(0x9888), 0x058d2000 },
- { _MMIO(0x9888), 0x1f85aa80 },
- { _MMIO(0x9888), 0x2185aaaa },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x01834000 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x0784c000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x1780c000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x4d800444 },
- { _MMIO(0x9888), 0x3d800000 },
- { _MMIO(0x9888), 0x4f804000 },
- { _MMIO(0x9888), 0x43801080 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800084 },
- { _MMIO(0x9888), 0x53800044 },
- { _MMIO(0x9888), 0x47801080 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f800000 },
- { _MMIO(0x9888), 0x41800840 },
-};
-
-static int
-get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_rasterizer_and_pixel_backend;
- lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_sampler_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x70800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x0000c000 },
- { _MMIO(0x2774), 0x0000e7ff },
- { _MMIO(0x2778), 0x00003000 },
- { _MMIO(0x277c), 0x0000f9ff },
- { _MMIO(0x2780), 0x00000c00 },
- { _MMIO(0x2784), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_sampler_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_sampler_1[] = {
- { _MMIO(0x9888), 0x18921400 },
- { _MMIO(0x9888), 0x149500ab },
- { _MMIO(0x9888), 0x18b21400 },
- { _MMIO(0x9888), 0x14b500ab },
- { _MMIO(0x9888), 0x18d21400 },
- { _MMIO(0x9888), 0x14d500ab },
- { _MMIO(0x9888), 0x0cdc8000 },
- { _MMIO(0x9888), 0x0edc4000 },
- { _MMIO(0x9888), 0x02dcc000 },
- { _MMIO(0x9888), 0x04dcc000 },
- { _MMIO(0x9888), 0x1abd00a0 },
- { _MMIO(0x9888), 0x0abd8000 },
- { _MMIO(0x9888), 0x0cd88000 },
- { _MMIO(0x9888), 0x0ed84000 },
- { _MMIO(0x9888), 0x04d88000 },
- { _MMIO(0x9888), 0x1adb0050 },
- { _MMIO(0x9888), 0x04db8000 },
- { _MMIO(0x9888), 0x06db8000 },
- { _MMIO(0x9888), 0x08db8000 },
- { _MMIO(0x9888), 0x0adb4000 },
- { _MMIO(0x9888), 0x109f02a0 },
- { _MMIO(0x9888), 0x0c9fa000 },
- { _MMIO(0x9888), 0x0e9f00aa },
- { _MMIO(0x9888), 0x18b82500 },
- { _MMIO(0x9888), 0x02b88000 },
- { _MMIO(0x9888), 0x04b84000 },
- { _MMIO(0x9888), 0x06b84000 },
- { _MMIO(0x9888), 0x08b84000 },
- { _MMIO(0x9888), 0x0ab84000 },
- { _MMIO(0x9888), 0x0cb88000 },
- { _MMIO(0x9888), 0x0cb98000 },
- { _MMIO(0x9888), 0x0eb9a000 },
- { _MMIO(0x9888), 0x00b98000 },
- { _MMIO(0x9888), 0x02b9a000 },
- { _MMIO(0x9888), 0x04b9a000 },
- { _MMIO(0x9888), 0x06b92000 },
- { _MMIO(0x9888), 0x1aba0200 },
- { _MMIO(0x9888), 0x02ba8000 },
- { _MMIO(0x9888), 0x0cba8000 },
- { _MMIO(0x9888), 0x04908000 },
- { _MMIO(0x9888), 0x04918000 },
- { _MMIO(0x9888), 0x04927300 },
- { _MMIO(0x9888), 0x10920000 },
- { _MMIO(0x9888), 0x1893000a },
- { _MMIO(0x9888), 0x0a934000 },
- { _MMIO(0x9888), 0x0a946000 },
- { _MMIO(0x9888), 0x0c959000 },
- { _MMIO(0x9888), 0x0e950098 },
- { _MMIO(0x9888), 0x10950000 },
- { _MMIO(0x9888), 0x04b04000 },
- { _MMIO(0x9888), 0x04b14000 },
- { _MMIO(0x9888), 0x04b20073 },
- { _MMIO(0x9888), 0x10b20000 },
- { _MMIO(0x9888), 0x04b38000 },
- { _MMIO(0x9888), 0x06b38000 },
- { _MMIO(0x9888), 0x08b34000 },
- { _MMIO(0x9888), 0x04b4c000 },
- { _MMIO(0x9888), 0x02b59890 },
- { _MMIO(0x9888), 0x10b50000 },
- { _MMIO(0x9888), 0x06d04000 },
- { _MMIO(0x9888), 0x06d14000 },
- { _MMIO(0x9888), 0x06d20073 },
- { _MMIO(0x9888), 0x10d20000 },
- { _MMIO(0x9888), 0x18d30020 },
- { _MMIO(0x9888), 0x02d38000 },
- { _MMIO(0x9888), 0x0cd34000 },
- { _MMIO(0x9888), 0x0ad48000 },
- { _MMIO(0x9888), 0x04d42000 },
- { _MMIO(0x9888), 0x0ed59000 },
- { _MMIO(0x9888), 0x00d59800 },
- { _MMIO(0x9888), 0x10d50000 },
- { _MMIO(0x9888), 0x0f88000e },
- { _MMIO(0x9888), 0x03888000 },
- { _MMIO(0x9888), 0x05888000 },
- { _MMIO(0x9888), 0x07888000 },
- { _MMIO(0x9888), 0x09888000 },
- { _MMIO(0x9888), 0x0b888000 },
- { _MMIO(0x9888), 0x0d880400 },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x238b5500 },
- { _MMIO(0x9888), 0x258b000a },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x038c4000 },
- { _MMIO(0x9888), 0x058c4000 },
- { _MMIO(0x9888), 0x078c4000 },
- { _MMIO(0x9888), 0x098c4000 },
- { _MMIO(0x9888), 0x0b8c4000 },
- { _MMIO(0x9888), 0x0d8c4000 },
- { _MMIO(0x9888), 0x0d8d8000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x018d8000 },
- { _MMIO(0x9888), 0x038da000 },
- { _MMIO(0x9888), 0x058da000 },
- { _MMIO(0x9888), 0x078d2000 },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x2185000a },
- { _MMIO(0x9888), 0x1b830150 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0d848000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x1d808000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47801021 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f800c64 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41800c02 },
-};
-
-static int
-get_sampler_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_sampler_1;
- lens[n] = ARRAY_SIZE(mux_config_sampler_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_sampler_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x70800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x0000c000 },
- { _MMIO(0x2774), 0x0000e7ff },
- { _MMIO(0x2778), 0x00003000 },
- { _MMIO(0x277c), 0x0000f9ff },
- { _MMIO(0x2780), 0x00000c00 },
- { _MMIO(0x2784), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_sampler_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_sampler_2[] = {
- { _MMIO(0x9888), 0x18121400 },
- { _MMIO(0x9888), 0x141500ab },
- { _MMIO(0x9888), 0x18321400 },
- { _MMIO(0x9888), 0x143500ab },
- { _MMIO(0x9888), 0x18521400 },
- { _MMIO(0x9888), 0x145500ab },
- { _MMIO(0x9888), 0x0c5c8000 },
- { _MMIO(0x9888), 0x0e5c4000 },
- { _MMIO(0x9888), 0x025cc000 },
- { _MMIO(0x9888), 0x045cc000 },
- { _MMIO(0x9888), 0x1a3d00a0 },
- { _MMIO(0x9888), 0x0a3d8000 },
- { _MMIO(0x9888), 0x0c588000 },
- { _MMIO(0x9888), 0x0e584000 },
- { _MMIO(0x9888), 0x04588000 },
- { _MMIO(0x9888), 0x1a5b0050 },
- { _MMIO(0x9888), 0x045b8000 },
- { _MMIO(0x9888), 0x065b8000 },
- { _MMIO(0x9888), 0x085b8000 },
- { _MMIO(0x9888), 0x0a5b4000 },
- { _MMIO(0x9888), 0x101f02a0 },
- { _MMIO(0x9888), 0x0c1fa000 },
- { _MMIO(0x9888), 0x0e1f00aa },
- { _MMIO(0x9888), 0x18382500 },
- { _MMIO(0x9888), 0x02388000 },
- { _MMIO(0x9888), 0x04384000 },
- { _MMIO(0x9888), 0x06384000 },
- { _MMIO(0x9888), 0x08384000 },
- { _MMIO(0x9888), 0x0a384000 },
- { _MMIO(0x9888), 0x0c388000 },
- { _MMIO(0x9888), 0x0c398000 },
- { _MMIO(0x9888), 0x0e39a000 },
- { _MMIO(0x9888), 0x00398000 },
- { _MMIO(0x9888), 0x0239a000 },
- { _MMIO(0x9888), 0x0439a000 },
- { _MMIO(0x9888), 0x06392000 },
- { _MMIO(0x9888), 0x1a3a0200 },
- { _MMIO(0x9888), 0x023a8000 },
- { _MMIO(0x9888), 0x0c3a8000 },
- { _MMIO(0x9888), 0x04108000 },
- { _MMIO(0x9888), 0x04118000 },
- { _MMIO(0x9888), 0x04127300 },
- { _MMIO(0x9888), 0x10120000 },
- { _MMIO(0x9888), 0x1813000a },
- { _MMIO(0x9888), 0x0a134000 },
- { _MMIO(0x9888), 0x0a146000 },
- { _MMIO(0x9888), 0x0c159000 },
- { _MMIO(0x9888), 0x0e150098 },
- { _MMIO(0x9888), 0x10150000 },
- { _MMIO(0x9888), 0x04304000 },
- { _MMIO(0x9888), 0x04314000 },
- { _MMIO(0x9888), 0x04320073 },
- { _MMIO(0x9888), 0x10320000 },
- { _MMIO(0x9888), 0x04338000 },
- { _MMIO(0x9888), 0x06338000 },
- { _MMIO(0x9888), 0x08334000 },
- { _MMIO(0x9888), 0x0434c000 },
- { _MMIO(0x9888), 0x02359890 },
- { _MMIO(0x9888), 0x10350000 },
- { _MMIO(0x9888), 0x06504000 },
- { _MMIO(0x9888), 0x06514000 },
- { _MMIO(0x9888), 0x06520073 },
- { _MMIO(0x9888), 0x10520000 },
- { _MMIO(0x9888), 0x18530020 },
- { _MMIO(0x9888), 0x02538000 },
- { _MMIO(0x9888), 0x0c534000 },
- { _MMIO(0x9888), 0x0a548000 },
- { _MMIO(0x9888), 0x04542000 },
- { _MMIO(0x9888), 0x0e559000 },
- { _MMIO(0x9888), 0x00559800 },
- { _MMIO(0x9888), 0x10550000 },
- { _MMIO(0x9888), 0x1b8aa000 },
- { _MMIO(0x9888), 0x1d8a0002 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x078a8000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x0d8a8000 },
- { _MMIO(0x9888), 0x278b0015 },
- { _MMIO(0x9888), 0x238b2a80 },
- { _MMIO(0x9888), 0x258b0005 },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x2185000a },
- { _MMIO(0x9888), 0x1b830150 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0d848000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x1d808000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47801021 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f800c64 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41800c02 },
-};
-
-static int
-get_sampler_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_sampler_2;
- lens[n] = ARRAY_SIZE(mux_config_sampler_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000fdff },
- { _MMIO(0x2778), 0x00000000 },
- { _MMIO(0x277c), 0x0000fe7f },
- { _MMIO(0x2780), 0x00000002 },
- { _MMIO(0x2784), 0x0000ffbf },
- { _MMIO(0x2788), 0x00000000 },
- { _MMIO(0x278c), 0x0000ffcf },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000fff7 },
- { _MMIO(0x2798), 0x00000000 },
- { _MMIO(0x279c), 0x0000fff9 },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_1[] = {
- { _MMIO(0x9888), 0x16154d60 },
- { _MMIO(0x9888), 0x16352e60 },
- { _MMIO(0x9888), 0x16554d60 },
- { _MMIO(0x9888), 0x16950000 },
- { _MMIO(0x9888), 0x16b50000 },
- { _MMIO(0x9888), 0x16d50000 },
- { _MMIO(0x9888), 0x005c8000 },
- { _MMIO(0x9888), 0x045cc000 },
- { _MMIO(0x9888), 0x065c4000 },
- { _MMIO(0x9888), 0x083d8000 },
- { _MMIO(0x9888), 0x0a3d8000 },
- { _MMIO(0x9888), 0x0458c000 },
- { _MMIO(0x9888), 0x025b8000 },
- { _MMIO(0x9888), 0x085b4000 },
- { _MMIO(0x9888), 0x0a5b4000 },
- { _MMIO(0x9888), 0x0c5b8000 },
- { _MMIO(0x9888), 0x0c1fa000 },
- { _MMIO(0x9888), 0x0e1f00aa },
- { _MMIO(0x9888), 0x02384000 },
- { _MMIO(0x9888), 0x04388000 },
- { _MMIO(0x9888), 0x06388000 },
- { _MMIO(0x9888), 0x08384000 },
- { _MMIO(0x9888), 0x0a384000 },
- { _MMIO(0x9888), 0x0c384000 },
- { _MMIO(0x9888), 0x00398000 },
- { _MMIO(0x9888), 0x0239a000 },
- { _MMIO(0x9888), 0x0439a000 },
- { _MMIO(0x9888), 0x06392000 },
- { _MMIO(0x9888), 0x043a8000 },
- { _MMIO(0x9888), 0x063a8000 },
- { _MMIO(0x9888), 0x08138000 },
- { _MMIO(0x9888), 0x0a138000 },
- { _MMIO(0x9888), 0x06143000 },
- { _MMIO(0x9888), 0x0415cfc7 },
- { _MMIO(0x9888), 0x10150000 },
- { _MMIO(0x9888), 0x02338000 },
- { _MMIO(0x9888), 0x0c338000 },
- { _MMIO(0x9888), 0x04342000 },
- { _MMIO(0x9888), 0x06344000 },
- { _MMIO(0x9888), 0x0035c700 },
- { _MMIO(0x9888), 0x063500cf },
- { _MMIO(0x9888), 0x10350000 },
- { _MMIO(0x9888), 0x04538000 },
- { _MMIO(0x9888), 0x06538000 },
- { _MMIO(0x9888), 0x0454c000 },
- { _MMIO(0x9888), 0x0255cfc7 },
- { _MMIO(0x9888), 0x10550000 },
- { _MMIO(0x9888), 0x06dc8000 },
- { _MMIO(0x9888), 0x08dc4000 },
- { _MMIO(0x9888), 0x0cdcc000 },
- { _MMIO(0x9888), 0x0edcc000 },
- { _MMIO(0x9888), 0x1abd00a8 },
- { _MMIO(0x9888), 0x0cd8c000 },
- { _MMIO(0x9888), 0x0ed84000 },
- { _MMIO(0x9888), 0x0edb8000 },
- { _MMIO(0x9888), 0x18db0800 },
- { _MMIO(0x9888), 0x1adb0254 },
- { _MMIO(0x9888), 0x0e9faa00 },
- { _MMIO(0x9888), 0x109f02aa },
- { _MMIO(0x9888), 0x0eb84000 },
- { _MMIO(0x9888), 0x16b84000 },
- { _MMIO(0x9888), 0x18b8156a },
- { _MMIO(0x9888), 0x06b98000 },
- { _MMIO(0x9888), 0x08b9a000 },
- { _MMIO(0x9888), 0x0ab9a000 },
- { _MMIO(0x9888), 0x0cb9a000 },
- { _MMIO(0x9888), 0x0eb9a000 },
- { _MMIO(0x9888), 0x18baa000 },
- { _MMIO(0x9888), 0x1aba0002 },
- { _MMIO(0x9888), 0x16934000 },
- { _MMIO(0x9888), 0x1893000a },
- { _MMIO(0x9888), 0x0a947000 },
- { _MMIO(0x9888), 0x0c95c5c1 },
- { _MMIO(0x9888), 0x0e9500c3 },
- { _MMIO(0x9888), 0x10950000 },
- { _MMIO(0x9888), 0x0eb38000 },
- { _MMIO(0x9888), 0x16b30040 },
- { _MMIO(0x9888), 0x18b30020 },
- { _MMIO(0x9888), 0x06b48000 },
- { _MMIO(0x9888), 0x08b41000 },
- { _MMIO(0x9888), 0x0ab48000 },
- { _MMIO(0x9888), 0x06b5c500 },
- { _MMIO(0x9888), 0x08b500c3 },
- { _MMIO(0x9888), 0x0eb5c100 },
- { _MMIO(0x9888), 0x10b50000 },
- { _MMIO(0x9888), 0x16d31500 },
- { _MMIO(0x9888), 0x08d4e000 },
- { _MMIO(0x9888), 0x08d5c100 },
- { _MMIO(0x9888), 0x0ad5c3c5 },
- { _MMIO(0x9888), 0x10d50000 },
- { _MMIO(0x9888), 0x0d88f800 },
- { _MMIO(0x9888), 0x0f88000f },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x078a8000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x0d8a8000 },
- { _MMIO(0x9888), 0x258baaa5 },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x238b2a80 },
- { _MMIO(0x9888), 0x0f8c4000 },
- { _MMIO(0x9888), 0x178c2000 },
- { _MMIO(0x9888), 0x198c5500 },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x078d8000 },
- { _MMIO(0x9888), 0x098da000 },
- { _MMIO(0x9888), 0x0b8da000 },
- { _MMIO(0x9888), 0x0d8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x2185aaaa },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0784c000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x1780c000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x43800c42 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800063 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800800 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f8014a4 },
- { _MMIO(0x9888), 0x41801042 },
-};
-
-static int
-get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_1;
- lens[n] = ARRAY_SIZE(mux_config_tdl_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000fdff },
- { _MMIO(0x2778), 0x00000000 },
- { _MMIO(0x277c), 0x0000fe7f },
- { _MMIO(0x2780), 0x00000000 },
- { _MMIO(0x2784), 0x0000ff9f },
- { _MMIO(0x2788), 0x00000000 },
- { _MMIO(0x278c), 0x0000ffe7 },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000fffb },
- { _MMIO(0x2798), 0x00000002 },
- { _MMIO(0x279c), 0x0000fffd },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_2[] = {
- { _MMIO(0x9888), 0x16150000 },
- { _MMIO(0x9888), 0x16350000 },
- { _MMIO(0x9888), 0x16550000 },
- { _MMIO(0x9888), 0x16952e60 },
- { _MMIO(0x9888), 0x16b54d60 },
- { _MMIO(0x9888), 0x16d52e60 },
- { _MMIO(0x9888), 0x065c8000 },
- { _MMIO(0x9888), 0x085cc000 },
- { _MMIO(0x9888), 0x0a5cc000 },
- { _MMIO(0x9888), 0x0c5c4000 },
- { _MMIO(0x9888), 0x0e3d8000 },
- { _MMIO(0x9888), 0x183da000 },
- { _MMIO(0x9888), 0x06588000 },
- { _MMIO(0x9888), 0x08588000 },
- { _MMIO(0x9888), 0x0a584000 },
- { _MMIO(0x9888), 0x0e5b4000 },
- { _MMIO(0x9888), 0x185b5800 },
- { _MMIO(0x9888), 0x1a5b000a },
- { _MMIO(0x9888), 0x0e1faa00 },
- { _MMIO(0x9888), 0x101f02aa },
- { _MMIO(0x9888), 0x0e384000 },
- { _MMIO(0x9888), 0x16384000 },
- { _MMIO(0x9888), 0x18382a55 },
- { _MMIO(0x9888), 0x06398000 },
- { _MMIO(0x9888), 0x0839a000 },
- { _MMIO(0x9888), 0x0a39a000 },
- { _MMIO(0x9888), 0x0c39a000 },
- { _MMIO(0x9888), 0x0e39a000 },
- { _MMIO(0x9888), 0x1a3a02a0 },
- { _MMIO(0x9888), 0x0e138000 },
- { _MMIO(0x9888), 0x16130500 },
- { _MMIO(0x9888), 0x06148000 },
- { _MMIO(0x9888), 0x08146000 },
- { _MMIO(0x9888), 0x0615c100 },
- { _MMIO(0x9888), 0x0815c500 },
- { _MMIO(0x9888), 0x0a1500c3 },
- { _MMIO(0x9888), 0x10150000 },
- { _MMIO(0x9888), 0x16335040 },
- { _MMIO(0x9888), 0x08349000 },
- { _MMIO(0x9888), 0x0a341000 },
- { _MMIO(0x9888), 0x083500c1 },
- { _MMIO(0x9888), 0x0a35c500 },
- { _MMIO(0x9888), 0x0c3500c3 },
- { _MMIO(0x9888), 0x10350000 },
- { _MMIO(0x9888), 0x1853002a },
- { _MMIO(0x9888), 0x0a54e000 },
- { _MMIO(0x9888), 0x0c55c500 },
- { _MMIO(0x9888), 0x0e55c1c3 },
- { _MMIO(0x9888), 0x10550000 },
- { _MMIO(0x9888), 0x00dc8000 },
- { _MMIO(0x9888), 0x02dcc000 },
- { _MMIO(0x9888), 0x04dc4000 },
- { _MMIO(0x9888), 0x04bd8000 },
- { _MMIO(0x9888), 0x06bd8000 },
- { _MMIO(0x9888), 0x02d8c000 },
- { _MMIO(0x9888), 0x02db8000 },
- { _MMIO(0x9888), 0x04db4000 },
- { _MMIO(0x9888), 0x06db4000 },
- { _MMIO(0x9888), 0x08db8000 },
- { _MMIO(0x9888), 0x0c9fa000 },
- { _MMIO(0x9888), 0x0e9f00aa },
- { _MMIO(0x9888), 0x02b84000 },
- { _MMIO(0x9888), 0x04b84000 },
- { _MMIO(0x9888), 0x06b84000 },
- { _MMIO(0x9888), 0x08b84000 },
- { _MMIO(0x9888), 0x0ab88000 },
- { _MMIO(0x9888), 0x0cb88000 },
- { _MMIO(0x9888), 0x00b98000 },
- { _MMIO(0x9888), 0x02b9a000 },
- { _MMIO(0x9888), 0x04b9a000 },
- { _MMIO(0x9888), 0x06b92000 },
- { _MMIO(0x9888), 0x0aba8000 },
- { _MMIO(0x9888), 0x0cba8000 },
- { _MMIO(0x9888), 0x04938000 },
- { _MMIO(0x9888), 0x06938000 },
- { _MMIO(0x9888), 0x0494c000 },
- { _MMIO(0x9888), 0x0295cfc7 },
- { _MMIO(0x9888), 0x10950000 },
- { _MMIO(0x9888), 0x02b38000 },
- { _MMIO(0x9888), 0x08b38000 },
- { _MMIO(0x9888), 0x04b42000 },
- { _MMIO(0x9888), 0x06b41000 },
- { _MMIO(0x9888), 0x00b5c700 },
- { _MMIO(0x9888), 0x04b500cf },
- { _MMIO(0x9888), 0x10b50000 },
- { _MMIO(0x9888), 0x0ad38000 },
- { _MMIO(0x9888), 0x0cd38000 },
- { _MMIO(0x9888), 0x06d46000 },
- { _MMIO(0x9888), 0x04d5c700 },
- { _MMIO(0x9888), 0x06d500cf },
- { _MMIO(0x9888), 0x10d50000 },
- { _MMIO(0x9888), 0x03888000 },
- { _MMIO(0x9888), 0x05888000 },
- { _MMIO(0x9888), 0x07888000 },
- { _MMIO(0x9888), 0x09888000 },
- { _MMIO(0x9888), 0x0b888000 },
- { _MMIO(0x9888), 0x0d880400 },
- { _MMIO(0x9888), 0x0f8a8000 },
- { _MMIO(0x9888), 0x198a8000 },
- { _MMIO(0x9888), 0x1b8aaaa0 },
- { _MMIO(0x9888), 0x1d8a0002 },
- { _MMIO(0x9888), 0x258b555a },
- { _MMIO(0x9888), 0x278b0015 },
- { _MMIO(0x9888), 0x238b5500 },
- { _MMIO(0x9888), 0x038c4000 },
- { _MMIO(0x9888), 0x058c4000 },
- { _MMIO(0x9888), 0x078c4000 },
- { _MMIO(0x9888), 0x098c4000 },
- { _MMIO(0x9888), 0x0b8c4000 },
- { _MMIO(0x9888), 0x0d8c4000 },
- { _MMIO(0x9888), 0x018d8000 },
- { _MMIO(0x9888), 0x038da000 },
- { _MMIO(0x9888), 0x058da000 },
- { _MMIO(0x9888), 0x078d2000 },
- { _MMIO(0x9888), 0x2185aaaa },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0784c000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x1780c000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x43800882 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45801082 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x478014a5 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f800002 },
- { _MMIO(0x9888), 0x41800c62 },
-};
-
-static int
-get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_2;
- lens[n] = ARRAY_SIZE(mux_config_tdl_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extra[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
- { _MMIO(0xe458), 0x00001000 },
- { _MMIO(0xe558), 0x00003002 },
- { _MMIO(0xe658), 0x00005004 },
- { _MMIO(0xe758), 0x00011010 },
- { _MMIO(0xe45c), 0x00050012 },
- { _MMIO(0xe55c), 0x00052051 },
- { _MMIO(0xe65c), 0x00000008 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extra[] = {
- { _MMIO(0x9888), 0x161503e0 },
- { _MMIO(0x9888), 0x163503e0 },
- { _MMIO(0x9888), 0x165503e0 },
- { _MMIO(0x9888), 0x169503e0 },
- { _MMIO(0x9888), 0x16b503e0 },
- { _MMIO(0x9888), 0x16d503e0 },
- { _MMIO(0x9888), 0x045cc000 },
- { _MMIO(0x9888), 0x083d8000 },
- { _MMIO(0x9888), 0x04584000 },
- { _MMIO(0x9888), 0x085b4000 },
- { _MMIO(0x9888), 0x0a5b8000 },
- { _MMIO(0x9888), 0x0e1f00a8 },
- { _MMIO(0x9888), 0x08384000 },
- { _MMIO(0x9888), 0x0a384000 },
- { _MMIO(0x9888), 0x0c388000 },
- { _MMIO(0x9888), 0x0439a000 },
- { _MMIO(0x9888), 0x06392000 },
- { _MMIO(0x9888), 0x0c3a8000 },
- { _MMIO(0x9888), 0x08138000 },
- { _MMIO(0x9888), 0x06141000 },
- { _MMIO(0x9888), 0x041500c3 },
- { _MMIO(0x9888), 0x10150000 },
- { _MMIO(0x9888), 0x0a338000 },
- { _MMIO(0x9888), 0x06342000 },
- { _MMIO(0x9888), 0x0435c300 },
- { _MMIO(0x9888), 0x10350000 },
- { _MMIO(0x9888), 0x0c538000 },
- { _MMIO(0x9888), 0x06544000 },
- { _MMIO(0x9888), 0x065500c3 },
- { _MMIO(0x9888), 0x10550000 },
- { _MMIO(0x9888), 0x00dc8000 },
- { _MMIO(0x9888), 0x02dc4000 },
- { _MMIO(0x9888), 0x02bd8000 },
- { _MMIO(0x9888), 0x00d88000 },
- { _MMIO(0x9888), 0x02db4000 },
- { _MMIO(0x9888), 0x04db8000 },
- { _MMIO(0x9888), 0x0c9fa000 },
- { _MMIO(0x9888), 0x0e9f0002 },
- { _MMIO(0x9888), 0x02b84000 },
- { _MMIO(0x9888), 0x04b84000 },
- { _MMIO(0x9888), 0x06b88000 },
- { _MMIO(0x9888), 0x00b98000 },
- { _MMIO(0x9888), 0x02b9a000 },
- { _MMIO(0x9888), 0x06ba8000 },
- { _MMIO(0x9888), 0x02938000 },
- { _MMIO(0x9888), 0x04942000 },
- { _MMIO(0x9888), 0x0095c300 },
- { _MMIO(0x9888), 0x10950000 },
- { _MMIO(0x9888), 0x04b38000 },
- { _MMIO(0x9888), 0x04b44000 },
- { _MMIO(0x9888), 0x02b500c3 },
- { _MMIO(0x9888), 0x10b50000 },
- { _MMIO(0x9888), 0x06d38000 },
- { _MMIO(0x9888), 0x04d48000 },
- { _MMIO(0x9888), 0x02d5c300 },
- { _MMIO(0x9888), 0x10d50000 },
- { _MMIO(0x9888), 0x03888000 },
- { _MMIO(0x9888), 0x05888000 },
- { _MMIO(0x9888), 0x07888000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x0d8a8000 },
- { _MMIO(0x9888), 0x238b3500 },
- { _MMIO(0x9888), 0x258b0005 },
- { _MMIO(0x9888), 0x038c4000 },
- { _MMIO(0x9888), 0x058c4000 },
- { _MMIO(0x9888), 0x078c4000 },
- { _MMIO(0x9888), 0x018d8000 },
- { _MMIO(0x9888), 0x038da000 },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x2185000a },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f800c40 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41801482 },
- { _MMIO(0x9888), 0x31800000 },
-};
-
-static int
-get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_extra;
- lens[n] = ARRAY_SIZE(mux_config_compute_extra);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_vme_pipe[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00100030 },
- { _MMIO(0x2774), 0x0000fff9 },
- { _MMIO(0x2778), 0x00000002 },
- { _MMIO(0x277c), 0x0000fffc },
- { _MMIO(0x2780), 0x00000002 },
- { _MMIO(0x2784), 0x0000fff3 },
- { _MMIO(0x2788), 0x00100180 },
- { _MMIO(0x278c), 0x0000ffcf },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00000002 },
- { _MMIO(0x279c), 0x0000ff3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_vme_pipe[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00008003 },
-};
-
-static const struct i915_oa_reg mux_config_vme_pipe[] = {
- { _MMIO(0x9888), 0x14100812 },
- { _MMIO(0x9888), 0x14125800 },
- { _MMIO(0x9888), 0x161200c0 },
- { _MMIO(0x9888), 0x14300812 },
- { _MMIO(0x9888), 0x14325800 },
- { _MMIO(0x9888), 0x163200c0 },
- { _MMIO(0x9888), 0x005c4000 },
- { _MMIO(0x9888), 0x065c8000 },
- { _MMIO(0x9888), 0x085cc000 },
- { _MMIO(0x9888), 0x0a5cc000 },
- { _MMIO(0x9888), 0x0c5cc000 },
- { _MMIO(0x9888), 0x003d8000 },
- { _MMIO(0x9888), 0x0e3d8000 },
- { _MMIO(0x9888), 0x183d2800 },
- { _MMIO(0x9888), 0x00584000 },
- { _MMIO(0x9888), 0x06588000 },
- { _MMIO(0x9888), 0x0858c000 },
- { _MMIO(0x9888), 0x005b4000 },
- { _MMIO(0x9888), 0x0e5b4000 },
- { _MMIO(0x9888), 0x185b9400 },
- { _MMIO(0x9888), 0x1a5b002a },
- { _MMIO(0x9888), 0x0c1f0800 },
- { _MMIO(0x9888), 0x0e1faa00 },
- { _MMIO(0x9888), 0x101f002a },
- { _MMIO(0x9888), 0x00384000 },
- { _MMIO(0x9888), 0x0e384000 },
- { _MMIO(0x9888), 0x16384000 },
- { _MMIO(0x9888), 0x18380155 },
- { _MMIO(0x9888), 0x00392000 },
- { _MMIO(0x9888), 0x06398000 },
- { _MMIO(0x9888), 0x0839a000 },
- { _MMIO(0x9888), 0x0a39a000 },
- { _MMIO(0x9888), 0x0c39a000 },
- { _MMIO(0x9888), 0x00100047 },
- { _MMIO(0x9888), 0x06101a80 },
- { _MMIO(0x9888), 0x10100000 },
- { _MMIO(0x9888), 0x0810c000 },
- { _MMIO(0x9888), 0x0811c000 },
- { _MMIO(0x9888), 0x08126151 },
- { _MMIO(0x9888), 0x10120000 },
- { _MMIO(0x9888), 0x00134000 },
- { _MMIO(0x9888), 0x0e134000 },
- { _MMIO(0x9888), 0x161300a0 },
- { _MMIO(0x9888), 0x0a301ac7 },
- { _MMIO(0x9888), 0x10300000 },
- { _MMIO(0x9888), 0x0c30c000 },
- { _MMIO(0x9888), 0x0c31c000 },
- { _MMIO(0x9888), 0x0c326151 },
- { _MMIO(0x9888), 0x10320000 },
- { _MMIO(0x9888), 0x16332a00 },
- { _MMIO(0x9888), 0x18330001 },
- { _MMIO(0x9888), 0x018a8000 },
- { _MMIO(0x9888), 0x0f8a8000 },
- { _MMIO(0x9888), 0x198a8000 },
- { _MMIO(0x9888), 0x1b8a2aa0 },
- { _MMIO(0x9888), 0x238b0020 },
- { _MMIO(0x9888), 0x258b5550 },
- { _MMIO(0x9888), 0x278b0001 },
- { _MMIO(0x9888), 0x1f850080 },
- { _MMIO(0x9888), 0x2185aaa0 },
- { _MMIO(0x9888), 0x23850002 },
- { _MMIO(0x9888), 0x01834000 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830015 },
- { _MMIO(0x9888), 0x01844000 },
- { _MMIO(0x9888), 0x07848000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x11804000 },
- { _MMIO(0x9888), 0x17808000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3d800800 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x43800002 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800884 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800002 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
-};
-
-static int
-get_vme_pipe_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_vme_pipe;
- lens[n] = ARRAY_SIZE(mux_config_vme_pipe);
- n++;
-
- return n;
-}
-
static const struct i915_oa_reg b_counter_config_test_oa[] = {
{ _MMIO(0x2740), 0x00000000 },
{ _MMIO(0x2744), 0x00800000 },
@@ -4035,6 +60,7 @@ static const struct i915_oa_reg flex_eu_config_test_oa[] = {
};
static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x9840), 0x000000a0 },
{ _MMIO(0x9888), 0x198b0000 },
{ _MMIO(0x9888), 0x078b0066 },
{ _MMIO(0x9888), 0x118b0000 },
@@ -4047,1330 +73,38 @@ static const struct i915_oa_reg mux_config_test_oa[] = {
{ _MMIO(0x9888), 0x4f800000 },
{ _MMIO(0x9888), 0x41800000 },
{ _MMIO(0x9888), 0x31800000 },
-};
-
-static int
-get_test_oa_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_test_oa;
- lens[n] = ARRAY_SIZE(mux_config_test_oa);
- n++;
-
- return n;
-}
-
-int i915_oa_select_metric_set_bdw(struct drm_i915_private *dev_priv)
-{
- dev_priv->perf.oa.n_mux_configs = 0;
- dev_priv->perf.oa.b_counter_regs = NULL;
- dev_priv->perf.oa.b_counter_regs_len = 0;
- dev_priv->perf.oa.flex_regs = NULL;
- dev_priv->perf.oa.flex_regs_len = 0;
-
- switch (dev_priv->perf.oa.metrics_set) {
- case METRIC_SET_ID_RENDER_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_render_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_basic);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_basic);
-
- return 0;
- case METRIC_SET_ID_RENDER_PIPE_PROFILE:
- dev_priv->perf.oa.n_mux_configs =
- get_render_pipe_profile_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_pipe_profile;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_pipe_profile);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_pipe_profile;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_pipe_profile);
-
- return 0;
- case METRIC_SET_ID_MEMORY_READS:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_reads_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_reads;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_reads);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_reads;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_reads);
-
- return 0;
- case METRIC_SET_ID_MEMORY_WRITES:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_writes_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_writes;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_writes);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_writes;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_writes);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTENDED:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extended_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extended;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extended);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extended;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extended);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_L3_CACHE:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_l3_cache_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_l3_cache;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_l3_cache);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_l3_cache;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_l3_cache);
-
- return 0;
- case METRIC_SET_ID_DATA_PORT_READS_COALESCING:
- dev_priv->perf.oa.n_mux_configs =
- get_data_port_reads_coalescing_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"DATA_PORT_READS_COALESCING\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_data_port_reads_coalescing;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_data_port_reads_coalescing);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_data_port_reads_coalescing;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_data_port_reads_coalescing);
-
- return 0;
- case METRIC_SET_ID_DATA_PORT_WRITES_COALESCING:
- dev_priv->perf.oa.n_mux_configs =
- get_data_port_writes_coalescing_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"DATA_PORT_WRITES_COALESCING\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_data_port_writes_coalescing;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_data_port_writes_coalescing);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_data_port_writes_coalescing;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_data_port_writes_coalescing);
-
- return 0;
- case METRIC_SET_ID_HDC_AND_SF:
- dev_priv->perf.oa.n_mux_configs =
- get_hdc_and_sf_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_hdc_and_sf;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_hdc_and_sf);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_hdc_and_sf;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_hdc_and_sf);
-
- return 0;
- case METRIC_SET_ID_L3_1:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_1);
-
- return 0;
- case METRIC_SET_ID_L3_2:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_2);
-
- return 0;
- case METRIC_SET_ID_L3_3:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_3_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_3;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_3);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_3;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_3);
-
- return 0;
- case METRIC_SET_ID_L3_4:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_4_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_4\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_4;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_4);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_4;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_4);
-
- return 0;
- case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
- dev_priv->perf.oa.n_mux_configs =
- get_rasterizer_and_pixel_backend_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
-
- return 0;
- case METRIC_SET_ID_SAMPLER_1:
- dev_priv->perf.oa.n_mux_configs =
- get_sampler_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_sampler_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_sampler_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_sampler_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_sampler_1);
-
- return 0;
- case METRIC_SET_ID_SAMPLER_2:
- dev_priv->perf.oa.n_mux_configs =
- get_sampler_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_sampler_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_sampler_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_sampler_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_sampler_2);
-
- return 0;
- case METRIC_SET_ID_TDL_1:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_1);
-
- return 0;
- case METRIC_SET_ID_TDL_2:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_2);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTRA:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extra_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extra;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extra);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extra;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extra);
-
- return 0;
- case METRIC_SET_ID_VME_PIPE:
- dev_priv->perf.oa.n_mux_configs =
- get_vme_pipe_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"VME_PIPE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_vme_pipe;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_vme_pipe);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_vme_pipe;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_vme_pipe);
-
- return 0;
- case METRIC_SET_ID_TEST_OA:
- dev_priv->perf.oa.n_mux_configs =
- get_test_oa_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_test_oa;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_test_oa;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_test_oa);
-
- return 0;
- default:
- return -ENODEV;
- }
-}
-
-static ssize_t
-show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
-}
-
-static struct device_attribute dev_attr_render_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_basic[] = {
- &dev_attr_render_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_basic = {
- .name = "b541bd57-0e0f-4154-b4c0-5858010a2bf7",
- .attrs = attrs_render_basic,
-};
-
-static ssize_t
-show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
-}
-
-static struct device_attribute dev_attr_compute_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_basic[] = {
- &dev_attr_compute_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_basic = {
- .name = "35fbc9b2-a891-40a6-a38d-022bb7057552",
- .attrs = attrs_compute_basic,
-};
-
-static ssize_t
-show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
-}
-
-static struct device_attribute dev_attr_render_pipe_profile_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_pipe_profile_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_pipe_profile[] = {
- &dev_attr_render_pipe_profile_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_pipe_profile = {
- .name = "233d0544-fff7-4281-8291-e02f222aff72",
- .attrs = attrs_render_pipe_profile,
-};
-
-static ssize_t
-show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
-}
-
-static struct device_attribute dev_attr_memory_reads_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_reads_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_reads[] = {
- &dev_attr_memory_reads_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_reads = {
- .name = "2b255d48-2117-4fef-a8f7-f151e1d25a2c",
- .attrs = attrs_memory_reads,
-};
-
-static ssize_t
-show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
-}
-
-static struct device_attribute dev_attr_memory_writes_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_writes_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_writes[] = {
- &dev_attr_memory_writes_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_writes = {
- .name = "f7fd3220-b466-4a4d-9f98-b0caf3f2394c",
- .attrs = attrs_memory_writes,
-};
-
-static ssize_t
-show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
-}
-
-static struct device_attribute dev_attr_compute_extended_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extended_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extended[] = {
- &dev_attr_compute_extended_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extended = {
- .name = "e99ccaca-821c-4df9-97a7-96bdb7204e43",
- .attrs = attrs_compute_extended,
-};
-
-static ssize_t
-show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
-}
-
-static struct device_attribute dev_attr_compute_l3_cache_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_l3_cache_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_l3_cache[] = {
- &dev_attr_compute_l3_cache_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_l3_cache = {
- .name = "27a364dc-8225-4ecb-b607-d6f1925598d9",
- .attrs = attrs_compute_l3_cache,
-};
-
-static ssize_t
-show_data_port_reads_coalescing_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_DATA_PORT_READS_COALESCING);
-}
-
-static struct device_attribute dev_attr_data_port_reads_coalescing_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_data_port_reads_coalescing_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_data_port_reads_coalescing[] = {
- &dev_attr_data_port_reads_coalescing_id.attr,
- NULL,
-};
-
-static struct attribute_group group_data_port_reads_coalescing = {
- .name = "857fc630-2f09-4804-85f1-084adfadd5ab",
- .attrs = attrs_data_port_reads_coalescing,
-};
-
-static ssize_t
-show_data_port_writes_coalescing_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_DATA_PORT_WRITES_COALESCING);
-}
-
-static struct device_attribute dev_attr_data_port_writes_coalescing_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_data_port_writes_coalescing_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_data_port_writes_coalescing[] = {
- &dev_attr_data_port_writes_coalescing_id.attr,
- NULL,
-};
-
-static struct attribute_group group_data_port_writes_coalescing = {
- .name = "343ebc99-4a55-414c-8c17-d8e259cf5e20",
- .attrs = attrs_data_port_writes_coalescing,
-};
-
-static ssize_t
-show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
-}
-
-static struct device_attribute dev_attr_hdc_and_sf_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_hdc_and_sf_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_hdc_and_sf[] = {
- &dev_attr_hdc_and_sf_id.attr,
- NULL,
-};
-
-static struct attribute_group group_hdc_and_sf = {
- .name = "7bdafd88-a4fa-4ed5-bc09-1a977aa5be3e",
- .attrs = attrs_hdc_and_sf,
-};
-
-static ssize_t
-show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
-}
-
-static struct device_attribute dev_attr_l3_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_1[] = {
- &dev_attr_l3_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_1 = {
- .name = "9385ebb2-f34f-4aa5-aec5-7e9cbbea0f0b",
- .attrs = attrs_l3_1,
-};
-
-static ssize_t
-show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
-}
-
-static struct device_attribute dev_attr_l3_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_2[] = {
- &dev_attr_l3_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_2 = {
- .name = "446ae59b-ff2e-41c9-b49e-0184a54bf00a",
- .attrs = attrs_l3_2,
-};
-
-static ssize_t
-show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
-}
-
-static struct device_attribute dev_attr_l3_3_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_3_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_3[] = {
- &dev_attr_l3_3_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_3 = {
- .name = "84a7956f-1ea4-4d0d-837f-e39a0376e38c",
- .attrs = attrs_l3_3,
-};
-
-static ssize_t
-show_l3_4_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_4);
-}
-
-static struct device_attribute dev_attr_l3_4_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_4_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_4[] = {
- &dev_attr_l3_4_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_4 = {
- .name = "92b493d9-df18-4bed-be06-5cac6f2a6f5f",
- .attrs = attrs_l3_4,
-};
-
-static ssize_t
-show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
-}
-
-static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_rasterizer_and_pixel_backend_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
- &dev_attr_rasterizer_and_pixel_backend_id.attr,
- NULL,
-};
-
-static struct attribute_group group_rasterizer_and_pixel_backend = {
- .name = "14345c35-cc46-40d0-bb04-6ed1fbb43679",
- .attrs = attrs_rasterizer_and_pixel_backend,
-};
-
-static ssize_t
-show_sampler_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_1);
-}
-
-static struct device_attribute dev_attr_sampler_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_sampler_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_sampler_1[] = {
- &dev_attr_sampler_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_sampler_1 = {
- .name = "f0c6ba37-d3d3-4211-91b5-226730312a54",
- .attrs = attrs_sampler_1,
-};
-
-static ssize_t
-show_sampler_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_2);
-}
-
-static struct device_attribute dev_attr_sampler_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_sampler_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_sampler_2[] = {
- &dev_attr_sampler_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_sampler_2 = {
- .name = "30bf3702-48cf-4bca-b412-7cf50bb2f564",
- .attrs = attrs_sampler_2,
-};
-
-static ssize_t
-show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
-}
-
-static struct device_attribute dev_attr_tdl_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_1[] = {
- &dev_attr_tdl_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_1 = {
- .name = "238bec85-df05-44f3-b905-d166712f2451",
- .attrs = attrs_tdl_1,
-};
-
-static ssize_t
-show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
-}
-
-static struct device_attribute dev_attr_tdl_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_2[] = {
- &dev_attr_tdl_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_2 = {
- .name = "24bf02cd-8693-4583-981c-c4165b33da01",
- .attrs = attrs_tdl_2,
-};
-
-static ssize_t
-show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
-}
-
-static struct device_attribute dev_attr_compute_extra_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extra_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extra[] = {
- &dev_attr_compute_extra_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extra = {
- .name = "8fb61ba2-2fbb-454c-a136-2dec5a8a595e",
- .attrs = attrs_compute_extra,
-};
-
-static ssize_t
-show_vme_pipe_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_VME_PIPE);
-}
-
-static struct device_attribute dev_attr_vme_pipe_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_vme_pipe_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_vme_pipe[] = {
- &dev_attr_vme_pipe_id.attr,
- NULL,
-};
-
-static struct attribute_group group_vme_pipe = {
- .name = "e1743ca0-7fc8-410b-a066-de7bbb9280b7",
- .attrs = attrs_vme_pipe,
+ { _MMIO(0x9840), 0x00000080 },
};
static ssize_t
show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
+ return sprintf(buf, "1\n");
}
-static struct device_attribute dev_attr_test_oa_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_test_oa_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_test_oa[] = {
- &dev_attr_test_oa_id.attr,
- NULL,
-};
-
-static struct attribute_group group_test_oa = {
- .name = "d6de6f55-e526-4f79-a6a6-d7315c09044e",
- .attrs = attrs_test_oa,
-};
-
-int
-i915_perf_register_sysfs_bdw(struct drm_i915_private *dev_priv)
+void
+i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv)
{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
- int ret = 0;
+ strncpy(dev_priv->perf.oa.test_config.uuid,
+ "d6de6f55-e526-4f79-a6a6-d7315c09044e",
+ UUID_STRING_LEN);
+ dev_priv->perf.oa.test_config.id = 1;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (ret)
- goto error_render_basic;
- }
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (ret)
- goto error_compute_basic;
- }
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (ret)
- goto error_render_pipe_profile;
- }
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (ret)
- goto error_memory_reads;
- }
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (ret)
- goto error_memory_writes;
- }
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (ret)
- goto error_compute_extended;
- }
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (ret)
- goto error_compute_l3_cache;
- }
- if (get_data_port_reads_coalescing_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_data_port_reads_coalescing);
- if (ret)
- goto error_data_port_reads_coalescing;
- }
- if (get_data_port_writes_coalescing_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_data_port_writes_coalescing);
- if (ret)
- goto error_data_port_writes_coalescing;
- }
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (ret)
- goto error_hdc_and_sf;
- }
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (ret)
- goto error_l3_1;
- }
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
- if (ret)
- goto error_l3_2;
- }
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
- if (ret)
- goto error_l3_3;
- }
- if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_4);
- if (ret)
- goto error_l3_4;
- }
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (ret)
- goto error_rasterizer_and_pixel_backend;
- }
- if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
- if (ret)
- goto error_sampler_1;
- }
- if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
- if (ret)
- goto error_sampler_2;
- }
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (ret)
- goto error_tdl_1;
- }
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (ret)
- goto error_tdl_2;
- }
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (ret)
- goto error_compute_extra;
- }
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
- if (ret)
- goto error_vme_pipe;
- }
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
- if (ret)
- goto error_test_oa;
- }
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- return 0;
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-error_test_oa:
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
-error_vme_pipe:
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
-error_compute_extra:
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
-error_tdl_2:
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
-error_tdl_1:
- if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
-error_sampler_2:
- if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
-error_sampler_1:
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
-error_rasterizer_and_pixel_backend:
- if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_4);
-error_l3_4:
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
-error_l3_3:
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
-error_l3_2:
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
-error_l3_1:
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
-error_hdc_and_sf:
- if (get_data_port_writes_coalescing_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_data_port_writes_coalescing);
-error_data_port_writes_coalescing:
- if (get_data_port_reads_coalescing_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_data_port_reads_coalescing);
-error_data_port_reads_coalescing:
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
-error_compute_l3_cache:
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
-error_compute_extended:
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
-error_memory_writes:
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
-error_memory_reads:
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
-error_render_pipe_profile:
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
-error_compute_basic:
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
-error_render_basic:
- return ret;
-}
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-void
-i915_perf_unregister_sysfs_bdw(struct drm_i915_private *dev_priv)
-{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (get_data_port_reads_coalescing_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_data_port_reads_coalescing);
- if (get_data_port_writes_coalescing_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_data_port_writes_coalescing);
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
- if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_4);
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
- if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/i915_oa_bdw.h
index 6363ff9f64c0..b812d16162ac 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.h
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.h
@@ -29,12 +29,6 @@
#ifndef __I915_OA_BDW_H__
#define __I915_OA_BDW_H__
-extern int i915_oa_n_builtin_metric_sets_bdw;
-
-extern int i915_oa_select_metric_set_bdw(struct drm_i915_private *dev_priv);
-
-extern int i915_perf_register_sysfs_bdw(struct drm_i915_private *dev_priv);
-
-extern void i915_perf_unregister_sysfs_bdw(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/i915_oa_bxt.c
index 93864d8f32dd..b69b900de0fe 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.c
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.c
@@ -31,1702 +31,6 @@
#include "i915_drv.h"
#include "i915_oa_bxt.h"
-enum metric_set_id {
- METRIC_SET_ID_RENDER_BASIC = 1,
- METRIC_SET_ID_COMPUTE_BASIC,
- METRIC_SET_ID_RENDER_PIPE_PROFILE,
- METRIC_SET_ID_MEMORY_READS,
- METRIC_SET_ID_MEMORY_WRITES,
- METRIC_SET_ID_COMPUTE_EXTENDED,
- METRIC_SET_ID_COMPUTE_L3_CACHE,
- METRIC_SET_ID_HDC_AND_SF,
- METRIC_SET_ID_L3_1,
- METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
- METRIC_SET_ID_SAMPLER,
- METRIC_SET_ID_TDL_1,
- METRIC_SET_ID_TDL_2,
- METRIC_SET_ID_COMPUTE_EXTRA,
- METRIC_SET_ID_TEST_OA,
-};
-
-int i915_oa_n_builtin_metric_sets_bxt = 15;
-
-static const struct i915_oa_reg b_counter_config_render_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_render_basic_0_sku_gte_0x03[] = {
- { _MMIO(0x9888), 0x166c00f0 },
- { _MMIO(0x9888), 0x12120280 },
- { _MMIO(0x9888), 0x12320280 },
- { _MMIO(0x9888), 0x11930317 },
- { _MMIO(0x9888), 0x159303df },
- { _MMIO(0x9888), 0x3f900c00 },
- { _MMIO(0x9888), 0x419000a0 },
- { _MMIO(0x9888), 0x002d1000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x082d5000 },
- { _MMIO(0x9888), 0x0a2d1000 },
- { _MMIO(0x9888), 0x0c2e0800 },
- { _MMIO(0x9888), 0x0e2e5900 },
- { _MMIO(0x9888), 0x0a4c8000 },
- { _MMIO(0x9888), 0x0c4c8000 },
- { _MMIO(0x9888), 0x0e4c4000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e2000 },
- { _MMIO(0x9888), 0x1c4f0010 },
- { _MMIO(0x9888), 0x0a6c0053 },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1a0fcc00 },
- { _MMIO(0x9888), 0x1c0f0002 },
- { _MMIO(0x9888), 0x1c2c0040 },
- { _MMIO(0x9888), 0x00101000 },
- { _MMIO(0x9888), 0x04101000 },
- { _MMIO(0x9888), 0x00114000 },
- { _MMIO(0x9888), 0x08114000 },
- { _MMIO(0x9888), 0x00120020 },
- { _MMIO(0x9888), 0x08120021 },
- { _MMIO(0x9888), 0x00141000 },
- { _MMIO(0x9888), 0x08141000 },
- { _MMIO(0x9888), 0x02308000 },
- { _MMIO(0x9888), 0x04302000 },
- { _MMIO(0x9888), 0x06318000 },
- { _MMIO(0x9888), 0x08318000 },
- { _MMIO(0x9888), 0x06320800 },
- { _MMIO(0x9888), 0x08320840 },
- { _MMIO(0x9888), 0x00320000 },
- { _MMIO(0x9888), 0x06344000 },
- { _MMIO(0x9888), 0x08344000 },
- { _MMIO(0x9888), 0x0d931831 },
- { _MMIO(0x9888), 0x0f939f3f },
- { _MMIO(0x9888), 0x01939e80 },
- { _MMIO(0x9888), 0x039303bc },
- { _MMIO(0x9888), 0x0593000e },
- { _MMIO(0x9888), 0x1993002a },
- { _MMIO(0x9888), 0x07930000 },
- { _MMIO(0x9888), 0x09930000 },
- { _MMIO(0x9888), 0x1d900177 },
- { _MMIO(0x9888), 0x1f900187 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x13904000 },
- { _MMIO(0x9888), 0x21904000 },
- { _MMIO(0x9888), 0x23904000 },
- { _MMIO(0x9888), 0x25904000 },
- { _MMIO(0x9888), 0x27904000 },
- { _MMIO(0x9888), 0x2b904000 },
- { _MMIO(0x9888), 0x2d904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x53901110 },
- { _MMIO(0x9888), 0x43900423 },
- { _MMIO(0x9888), 0x55900111 },
- { _MMIO(0x9888), 0x47900c02 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900020 },
- { _MMIO(0x9888), 0x59901111 },
- { _MMIO(0x9888), 0x4b900421 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900001 },
- { _MMIO(0x9888), 0x45900821 },
-};
-
-static int
-get_render_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- if (dev_priv->drm.pdev->revision >= 0x03) {
- regs[n] = mux_config_render_basic_0_sku_gte_0x03;
- lens[n] = ARRAY_SIZE(mux_config_render_basic_0_sku_gte_0x03);
- n++;
- }
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_basic[] = {
- { _MMIO(0x9888), 0x104f00e0 },
- { _MMIO(0x9888), 0x124f1c00 },
- { _MMIO(0x9888), 0x39900340 },
- { _MMIO(0x9888), 0x3f900c00 },
- { _MMIO(0x9888), 0x41900000 },
- { _MMIO(0x9888), 0x002d5000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x082d4000 },
- { _MMIO(0x9888), 0x0a2d1000 },
- { _MMIO(0x9888), 0x0c2d5000 },
- { _MMIO(0x9888), 0x0e2d4000 },
- { _MMIO(0x9888), 0x0c2e1400 },
- { _MMIO(0x9888), 0x0e2e5100 },
- { _MMIO(0x9888), 0x102e0114 },
- { _MMIO(0x9888), 0x044cc000 },
- { _MMIO(0x9888), 0x0a4c8000 },
- { _MMIO(0x9888), 0x0c4c8000 },
- { _MMIO(0x9888), 0x0e4c4000 },
- { _MMIO(0x9888), 0x104c8000 },
- { _MMIO(0x9888), 0x124c8000 },
- { _MMIO(0x9888), 0x164c2000 },
- { _MMIO(0x9888), 0x004ea000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e2000 },
- { _MMIO(0x9888), 0x0c4ea000 },
- { _MMIO(0x9888), 0x0e4e8000 },
- { _MMIO(0x9888), 0x004f6b42 },
- { _MMIO(0x9888), 0x064f6200 },
- { _MMIO(0x9888), 0x084f4100 },
- { _MMIO(0x9888), 0x0a4f0061 },
- { _MMIO(0x9888), 0x0c4f6c4c },
- { _MMIO(0x9888), 0x0e4f4b00 },
- { _MMIO(0x9888), 0x1a4f0000 },
- { _MMIO(0x9888), 0x1c4f0000 },
- { _MMIO(0x9888), 0x180f5000 },
- { _MMIO(0x9888), 0x1a0f8800 },
- { _MMIO(0x9888), 0x1c0f08a2 },
- { _MMIO(0x9888), 0x182c4000 },
- { _MMIO(0x9888), 0x1c2c1451 },
- { _MMIO(0x9888), 0x1e2c0001 },
- { _MMIO(0x9888), 0x1a2c0010 },
- { _MMIO(0x9888), 0x01938000 },
- { _MMIO(0x9888), 0x0f938000 },
- { _MMIO(0x9888), 0x19938a28 },
- { _MMIO(0x9888), 0x03938000 },
- { _MMIO(0x9888), 0x19900177 },
- { _MMIO(0x9888), 0x1b900178 },
- { _MMIO(0x9888), 0x1d900125 },
- { _MMIO(0x9888), 0x1f900123 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x13904000 },
- { _MMIO(0x9888), 0x21904000 },
- { _MMIO(0x9888), 0x25904000 },
- { _MMIO(0x9888), 0x27904000 },
- { _MMIO(0x9888), 0x2b904000 },
- { _MMIO(0x9888), 0x2d904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x53901000 },
- { _MMIO(0x9888), 0x43900000 },
- { _MMIO(0x9888), 0x55900111 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900000 },
- { _MMIO(0x9888), 0x45900000 },
-};
-
-static int
-get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_basic;
- lens[n] = ARRAY_SIZE(mux_config_compute_basic);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007ffea },
- { _MMIO(0x2774), 0x00007ffc },
- { _MMIO(0x2778), 0x0007affa },
- { _MMIO(0x277c), 0x0000f5fd },
- { _MMIO(0x2780), 0x00079ffa },
- { _MMIO(0x2784), 0x0000f3fb },
- { _MMIO(0x2788), 0x0007bf7a },
- { _MMIO(0x278c), 0x0000f7e7 },
- { _MMIO(0x2790), 0x0007fefa },
- { _MMIO(0x2794), 0x0000f7cf },
- { _MMIO(0x2798), 0x00077ffa },
- { _MMIO(0x279c), 0x0000efdf },
- { _MMIO(0x27a0), 0x0006fffa },
- { _MMIO(0x27a4), 0x0000cfbf },
- { _MMIO(0x27a8), 0x0003fffa },
- { _MMIO(0x27ac), 0x00005f7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
- { _MMIO(0x9888), 0x0c2e001f },
- { _MMIO(0x9888), 0x0a2f0000 },
- { _MMIO(0x9888), 0x10186800 },
- { _MMIO(0x9888), 0x11810019 },
- { _MMIO(0x9888), 0x15810013 },
- { _MMIO(0x9888), 0x13820020 },
- { _MMIO(0x9888), 0x11830020 },
- { _MMIO(0x9888), 0x17840000 },
- { _MMIO(0x9888), 0x11860007 },
- { _MMIO(0x9888), 0x21860000 },
- { _MMIO(0x9888), 0x178703e0 },
- { _MMIO(0x9888), 0x0c2d8000 },
- { _MMIO(0x9888), 0x042d4000 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x022e5400 },
- { _MMIO(0x9888), 0x002e0000 },
- { _MMIO(0x9888), 0x0e2e0080 },
- { _MMIO(0x9888), 0x082f0040 },
- { _MMIO(0x9888), 0x002f0000 },
- { _MMIO(0x9888), 0x06143000 },
- { _MMIO(0x9888), 0x06174000 },
- { _MMIO(0x9888), 0x06180012 },
- { _MMIO(0x9888), 0x00180000 },
- { _MMIO(0x9888), 0x0d804000 },
- { _MMIO(0x9888), 0x0f804000 },
- { _MMIO(0x9888), 0x05804000 },
- { _MMIO(0x9888), 0x09810200 },
- { _MMIO(0x9888), 0x0b810030 },
- { _MMIO(0x9888), 0x03810003 },
- { _MMIO(0x9888), 0x21819140 },
- { _MMIO(0x9888), 0x23819050 },
- { _MMIO(0x9888), 0x25810018 },
- { _MMIO(0x9888), 0x0b820980 },
- { _MMIO(0x9888), 0x03820d80 },
- { _MMIO(0x9888), 0x11820000 },
- { _MMIO(0x9888), 0x0182c000 },
- { _MMIO(0x9888), 0x07828000 },
- { _MMIO(0x9888), 0x09824000 },
- { _MMIO(0x9888), 0x0f828000 },
- { _MMIO(0x9888), 0x0d830004 },
- { _MMIO(0x9888), 0x0583000c },
- { _MMIO(0x9888), 0x0f831000 },
- { _MMIO(0x9888), 0x01848072 },
- { _MMIO(0x9888), 0x11840000 },
- { _MMIO(0x9888), 0x07848000 },
- { _MMIO(0x9888), 0x09844000 },
- { _MMIO(0x9888), 0x0f848000 },
- { _MMIO(0x9888), 0x07860000 },
- { _MMIO(0x9888), 0x09860092 },
- { _MMIO(0x9888), 0x0f860400 },
- { _MMIO(0x9888), 0x01869100 },
- { _MMIO(0x9888), 0x0f870065 },
- { _MMIO(0x9888), 0x01870000 },
- { _MMIO(0x9888), 0x19930800 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x1b952000 },
- { _MMIO(0x9888), 0x1d955055 },
- { _MMIO(0x9888), 0x1f951455 },
- { _MMIO(0x9888), 0x0992a000 },
- { _MMIO(0x9888), 0x0f928000 },
- { _MMIO(0x9888), 0x1192a800 },
- { _MMIO(0x9888), 0x1392028a },
- { _MMIO(0x9888), 0x0b92a000 },
- { _MMIO(0x9888), 0x0d922000 },
- { _MMIO(0x9888), 0x13908000 },
- { _MMIO(0x9888), 0x21908000 },
- { _MMIO(0x9888), 0x23908000 },
- { _MMIO(0x9888), 0x25908000 },
- { _MMIO(0x9888), 0x27908000 },
- { _MMIO(0x9888), 0x29908000 },
- { _MMIO(0x9888), 0x2b908000 },
- { _MMIO(0x9888), 0x2d904000 },
- { _MMIO(0x9888), 0x2f908000 },
- { _MMIO(0x9888), 0x31908000 },
- { _MMIO(0x9888), 0x15908000 },
- { _MMIO(0x9888), 0x17908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43900c01 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900863 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b900061 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900000 },
- { _MMIO(0x9888), 0x45900c22 },
-};
-
-static int
-get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_render_pipe_profile;
- lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_reads[] = {
- { _MMIO(0x272c), 0xffffffff },
- { _MMIO(0x2728), 0xffffffff },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x271c), 0xffffffff },
- { _MMIO(0x2718), 0xffffffff },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f872 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_reads[] = {
- { _MMIO(0x9888), 0x19800343 },
- { _MMIO(0x9888), 0x39900340 },
- { _MMIO(0x9888), 0x3f901000 },
- { _MMIO(0x9888), 0x41900003 },
- { _MMIO(0x9888), 0x03803180 },
- { _MMIO(0x9888), 0x058035e2 },
- { _MMIO(0x9888), 0x0780006a },
- { _MMIO(0x9888), 0x11800000 },
- { _MMIO(0x9888), 0x2181a000 },
- { _MMIO(0x9888), 0x2381000a },
- { _MMIO(0x9888), 0x1d950550 },
- { _MMIO(0x9888), 0x0b928000 },
- { _MMIO(0x9888), 0x0d92a000 },
- { _MMIO(0x9888), 0x0f922000 },
- { _MMIO(0x9888), 0x13900170 },
- { _MMIO(0x9888), 0x21900171 },
- { _MMIO(0x9888), 0x23900172 },
- { _MMIO(0x9888), 0x25900173 },
- { _MMIO(0x9888), 0x27900174 },
- { _MMIO(0x9888), 0x29900175 },
- { _MMIO(0x9888), 0x2b900176 },
- { _MMIO(0x9888), 0x2d900177 },
- { _MMIO(0x9888), 0x2f90017f },
- { _MMIO(0x9888), 0x31900125 },
- { _MMIO(0x9888), 0x15900123 },
- { _MMIO(0x9888), 0x17900121 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43901084 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47901080 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49901084 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b901084 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900004 },
- { _MMIO(0x9888), 0x45900000 },
-};
-
-static int
-get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_reads;
- lens[n] = ARRAY_SIZE(mux_config_memory_reads);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_writes[] = {
- { _MMIO(0x272c), 0xffffffff },
- { _MMIO(0x2728), 0xffffffff },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x271c), 0xffffffff },
- { _MMIO(0x2718), 0xffffffff },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f822 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_writes[] = {
- { _MMIO(0x9888), 0x19800343 },
- { _MMIO(0x9888), 0x39900340 },
- { _MMIO(0x9888), 0x3f900000 },
- { _MMIO(0x9888), 0x41900080 },
- { _MMIO(0x9888), 0x03803180 },
- { _MMIO(0x9888), 0x058035e2 },
- { _MMIO(0x9888), 0x0780006a },
- { _MMIO(0x9888), 0x11800000 },
- { _MMIO(0x9888), 0x2181a000 },
- { _MMIO(0x9888), 0x2381000a },
- { _MMIO(0x9888), 0x1d950550 },
- { _MMIO(0x9888), 0x0b928000 },
- { _MMIO(0x9888), 0x0d92a000 },
- { _MMIO(0x9888), 0x0f922000 },
- { _MMIO(0x9888), 0x13900180 },
- { _MMIO(0x9888), 0x21900181 },
- { _MMIO(0x9888), 0x23900182 },
- { _MMIO(0x9888), 0x25900183 },
- { _MMIO(0x9888), 0x27900184 },
- { _MMIO(0x9888), 0x29900185 },
- { _MMIO(0x9888), 0x2b900186 },
- { _MMIO(0x9888), 0x2d900187 },
- { _MMIO(0x9888), 0x2f900170 },
- { _MMIO(0x9888), 0x31900125 },
- { _MMIO(0x9888), 0x15900123 },
- { _MMIO(0x9888), 0x17900121 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43901084 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47901080 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49901084 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b901084 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900004 },
- { _MMIO(0x9888), 0x45900000 },
-};
-
-static int
-get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_writes;
- lens[n] = ARRAY_SIZE(mux_config_memory_writes);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extended[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fc2a },
- { _MMIO(0x2774), 0x0000bf00 },
- { _MMIO(0x2778), 0x0007fc6a },
- { _MMIO(0x277c), 0x0000bf00 },
- { _MMIO(0x2780), 0x0007fc92 },
- { _MMIO(0x2784), 0x0000bf00 },
- { _MMIO(0x2788), 0x0007fca2 },
- { _MMIO(0x278c), 0x0000bf00 },
- { _MMIO(0x2790), 0x0007fc32 },
- { _MMIO(0x2794), 0x0000bf00 },
- { _MMIO(0x2798), 0x0007fc9a },
- { _MMIO(0x279c), 0x0000bf00 },
- { _MMIO(0x27a0), 0x0007fe6a },
- { _MMIO(0x27a4), 0x0000bf00 },
- { _MMIO(0x27a8), 0x0007fe7a },
- { _MMIO(0x27ac), 0x0000bf00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extended[] = {
- { _MMIO(0x9888), 0x104f00e0 },
- { _MMIO(0x9888), 0x141c0160 },
- { _MMIO(0x9888), 0x161c0015 },
- { _MMIO(0x9888), 0x181c0120 },
- { _MMIO(0x9888), 0x002d5000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x082d5000 },
- { _MMIO(0x9888), 0x0a2d5000 },
- { _MMIO(0x9888), 0x0c2d5000 },
- { _MMIO(0x9888), 0x0e2d5000 },
- { _MMIO(0x9888), 0x022d5000 },
- { _MMIO(0x9888), 0x042d5000 },
- { _MMIO(0x9888), 0x0c2e5400 },
- { _MMIO(0x9888), 0x0e2e5515 },
- { _MMIO(0x9888), 0x102e0155 },
- { _MMIO(0x9888), 0x044cc000 },
- { _MMIO(0x9888), 0x0a4c8000 },
- { _MMIO(0x9888), 0x0c4cc000 },
- { _MMIO(0x9888), 0x0e4cc000 },
- { _MMIO(0x9888), 0x104c8000 },
- { _MMIO(0x9888), 0x124c8000 },
- { _MMIO(0x9888), 0x144c8000 },
- { _MMIO(0x9888), 0x164c2000 },
- { _MMIO(0x9888), 0x064cc000 },
- { _MMIO(0x9888), 0x084cc000 },
- { _MMIO(0x9888), 0x004ea000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084ea000 },
- { _MMIO(0x9888), 0x0a4ea000 },
- { _MMIO(0x9888), 0x0c4ea000 },
- { _MMIO(0x9888), 0x0e4ea000 },
- { _MMIO(0x9888), 0x024ea000 },
- { _MMIO(0x9888), 0x044ea000 },
- { _MMIO(0x9888), 0x0e4f4b41 },
- { _MMIO(0x9888), 0x004f4200 },
- { _MMIO(0x9888), 0x024f404c },
- { _MMIO(0x9888), 0x1c4f0000 },
- { _MMIO(0x9888), 0x1a4f0000 },
- { _MMIO(0x9888), 0x001b4000 },
- { _MMIO(0x9888), 0x061b8000 },
- { _MMIO(0x9888), 0x081bc000 },
- { _MMIO(0x9888), 0x0a1bc000 },
- { _MMIO(0x9888), 0x0c1bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x001c0031 },
- { _MMIO(0x9888), 0x061c1900 },
- { _MMIO(0x9888), 0x081c1a33 },
- { _MMIO(0x9888), 0x0a1c1b35 },
- { _MMIO(0x9888), 0x0c1c3337 },
- { _MMIO(0x9888), 0x041c31c7 },
- { _MMIO(0x9888), 0x180f5000 },
- { _MMIO(0x9888), 0x1a0fa8aa },
- { _MMIO(0x9888), 0x1c0f0aaa },
- { _MMIO(0x9888), 0x182c8000 },
- { _MMIO(0x9888), 0x1c2c6aaa },
- { _MMIO(0x9888), 0x1e2c0001 },
- { _MMIO(0x9888), 0x1a2c2950 },
- { _MMIO(0x9888), 0x01938000 },
- { _MMIO(0x9888), 0x0f938000 },
- { _MMIO(0x9888), 0x1993aaaa },
- { _MMIO(0x9888), 0x03938000 },
- { _MMIO(0x9888), 0x05938000 },
- { _MMIO(0x9888), 0x07938000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x13904000 },
- { _MMIO(0x9888), 0x21904000 },
- { _MMIO(0x9888), 0x23904000 },
- { _MMIO(0x9888), 0x25904000 },
- { _MMIO(0x9888), 0x27904000 },
- { _MMIO(0x9888), 0x29904000 },
- { _MMIO(0x9888), 0x2b904000 },
- { _MMIO(0x9888), 0x2d904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43900420 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b900400 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900001 },
- { _MMIO(0x9888), 0x45900001 },
-};
-
-static int
-get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_extended;
- lens[n] = ARRAY_SIZE(mux_config_compute_extended);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x30800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fffa },
- { _MMIO(0x2774), 0x0000fefe },
- { _MMIO(0x2778), 0x0007fffa },
- { _MMIO(0x277c), 0x0000fefd },
- { _MMIO(0x2790), 0x0007fffa },
- { _MMIO(0x2794), 0x0000fbef },
- { _MMIO(0x2798), 0x0007fffa },
- { _MMIO(0x279c), 0x0000fbdf },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00101100 },
- { _MMIO(0xe45c), 0x00201200 },
- { _MMIO(0xe55c), 0x00301300 },
- { _MMIO(0xe65c), 0x00401400 },
-};
-
-static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
- { _MMIO(0x9888), 0x166c03b0 },
- { _MMIO(0x9888), 0x1593001e },
- { _MMIO(0x9888), 0x3f900c00 },
- { _MMIO(0x9888), 0x41900000 },
- { _MMIO(0x9888), 0x002d1000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x082d5000 },
- { _MMIO(0x9888), 0x0e2d5000 },
- { _MMIO(0x9888), 0x0c2e0400 },
- { _MMIO(0x9888), 0x0e2e1500 },
- { _MMIO(0x9888), 0x102e0140 },
- { _MMIO(0x9888), 0x044c4000 },
- { _MMIO(0x9888), 0x0a4c8000 },
- { _MMIO(0x9888), 0x0c4cc000 },
- { _MMIO(0x9888), 0x144c8000 },
- { _MMIO(0x9888), 0x164c2000 },
- { _MMIO(0x9888), 0x004e2000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084ea000 },
- { _MMIO(0x9888), 0x0e4ea000 },
- { _MMIO(0x9888), 0x1a4f4001 },
- { _MMIO(0x9888), 0x1c4f5005 },
- { _MMIO(0x9888), 0x006c0051 },
- { _MMIO(0x9888), 0x066c5000 },
- { _MMIO(0x9888), 0x086c5c5d },
- { _MMIO(0x9888), 0x0e6c5e5f },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x146c0000 },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x180f1000 },
- { _MMIO(0x9888), 0x1a0fa800 },
- { _MMIO(0x9888), 0x1c0f0a00 },
- { _MMIO(0x9888), 0x182c4000 },
- { _MMIO(0x9888), 0x1c2c4015 },
- { _MMIO(0x9888), 0x1e2c0001 },
- { _MMIO(0x9888), 0x03931980 },
- { _MMIO(0x9888), 0x05930032 },
- { _MMIO(0x9888), 0x11930000 },
- { _MMIO(0x9888), 0x01938000 },
- { _MMIO(0x9888), 0x0f938000 },
- { _MMIO(0x9888), 0x1993a00a },
- { _MMIO(0x9888), 0x07930000 },
- { _MMIO(0x9888), 0x09930000 },
- { _MMIO(0x9888), 0x1d900177 },
- { _MMIO(0x9888), 0x1f900178 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x13904000 },
- { _MMIO(0x9888), 0x21904000 },
- { _MMIO(0x9888), 0x23904000 },
- { _MMIO(0x9888), 0x25904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x53901000 },
- { _MMIO(0x9888), 0x43900000 },
- { _MMIO(0x9888), 0x55900111 },
- { _MMIO(0x9888), 0x47900001 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x4d900000 },
- { _MMIO(0x9888), 0x45900400 },
-};
-
-static int
-get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_l3_cache;
- lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x10800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000fdff },
-};
-
-static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
- { _MMIO(0x9888), 0x104f0232 },
- { _MMIO(0x9888), 0x124f4640 },
- { _MMIO(0x9888), 0x11834400 },
- { _MMIO(0x9888), 0x022d4000 },
- { _MMIO(0x9888), 0x042d5000 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x0e2e0055 },
- { _MMIO(0x9888), 0x064c8000 },
- { _MMIO(0x9888), 0x084cc000 },
- { _MMIO(0x9888), 0x0a4c4000 },
- { _MMIO(0x9888), 0x024e8000 },
- { _MMIO(0x9888), 0x044ea000 },
- { _MMIO(0x9888), 0x064e2000 },
- { _MMIO(0x9888), 0x024f6100 },
- { _MMIO(0x9888), 0x044f416b },
- { _MMIO(0x9888), 0x064f004b },
- { _MMIO(0x9888), 0x1a4f0000 },
- { _MMIO(0x9888), 0x1a0f02a8 },
- { _MMIO(0x9888), 0x1a2c5500 },
- { _MMIO(0x9888), 0x0f808000 },
- { _MMIO(0x9888), 0x25810020 },
- { _MMIO(0x9888), 0x0f8305c0 },
- { _MMIO(0x9888), 0x07938000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x1f951000 },
- { _MMIO(0x9888), 0x13920200 },
- { _MMIO(0x9888), 0x31908000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4d900003 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_hdc_and_sf;
- lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00014002 },
- { _MMIO(0x277c), 0x0000c3ff },
- { _MMIO(0x2780), 0x00010002 },
- { _MMIO(0x2784), 0x0000c7ff },
- { _MMIO(0x2788), 0x00004002 },
- { _MMIO(0x278c), 0x0000d3ff },
- { _MMIO(0x2790), 0x00100700 },
- { _MMIO(0x2794), 0x0000ff1f },
- { _MMIO(0x2798), 0x00001402 },
- { _MMIO(0x279c), 0x0000fc3f },
- { _MMIO(0x27a0), 0x00001002 },
- { _MMIO(0x27a4), 0x0000fc7f },
- { _MMIO(0x27a8), 0x00000402 },
- { _MMIO(0x27ac), 0x0000fd3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_1_0_sku_gte_0x03[] = {
- { _MMIO(0x9888), 0x12643400 },
- { _MMIO(0x9888), 0x12653400 },
- { _MMIO(0x9888), 0x106c6800 },
- { _MMIO(0x9888), 0x126c001e },
- { _MMIO(0x9888), 0x166c0010 },
- { _MMIO(0x9888), 0x0c2d5000 },
- { _MMIO(0x9888), 0x0e2d5000 },
- { _MMIO(0x9888), 0x002d4000 },
- { _MMIO(0x9888), 0x022d5000 },
- { _MMIO(0x9888), 0x042d5000 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x102e0154 },
- { _MMIO(0x9888), 0x0c2e5000 },
- { _MMIO(0x9888), 0x0e2e0055 },
- { _MMIO(0x9888), 0x104c8000 },
- { _MMIO(0x9888), 0x124c8000 },
- { _MMIO(0x9888), 0x144c8000 },
- { _MMIO(0x9888), 0x164c2000 },
- { _MMIO(0x9888), 0x044c8000 },
- { _MMIO(0x9888), 0x064cc000 },
- { _MMIO(0x9888), 0x084cc000 },
- { _MMIO(0x9888), 0x0a4c4000 },
- { _MMIO(0x9888), 0x0c4ea000 },
- { _MMIO(0x9888), 0x0e4ea000 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x024ea000 },
- { _MMIO(0x9888), 0x044ea000 },
- { _MMIO(0x9888), 0x064e2000 },
- { _MMIO(0x9888), 0x1c4f5500 },
- { _MMIO(0x9888), 0x1a4f1554 },
- { _MMIO(0x9888), 0x0a640024 },
- { _MMIO(0x9888), 0x10640000 },
- { _MMIO(0x9888), 0x04640000 },
- { _MMIO(0x9888), 0x0c650024 },
- { _MMIO(0x9888), 0x10650000 },
- { _MMIO(0x9888), 0x06650000 },
- { _MMIO(0x9888), 0x0c6c5327 },
- { _MMIO(0x9888), 0x0e6c5425 },
- { _MMIO(0x9888), 0x006c2a00 },
- { _MMIO(0x9888), 0x026c285b },
- { _MMIO(0x9888), 0x046c005c },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1a6c0900 },
- { _MMIO(0x9888), 0x1c0f0aa0 },
- { _MMIO(0x9888), 0x180f4000 },
- { _MMIO(0x9888), 0x1a0f02aa },
- { _MMIO(0x9888), 0x1c2c5400 },
- { _MMIO(0x9888), 0x1e2c0001 },
- { _MMIO(0x9888), 0x1a2c5550 },
- { _MMIO(0x9888), 0x1993aa00 },
- { _MMIO(0x9888), 0x03938000 },
- { _MMIO(0x9888), 0x05938000 },
- { _MMIO(0x9888), 0x07938000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x2b904000 },
- { _MMIO(0x9888), 0x2d904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b900421 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900001 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43900420 },
- { _MMIO(0x9888), 0x45900021 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
-};
-
-static const struct i915_oa_reg mux_config_l3_1_0_sku_lt_0x03[] = {
- { _MMIO(0x9888), 0x14640340 },
- { _MMIO(0x9888), 0x14650340 },
- { _MMIO(0x9888), 0x106c6800 },
- { _MMIO(0x9888), 0x126c001e },
- { _MMIO(0x9888), 0x166c0010 },
- { _MMIO(0x9888), 0x0c2d5000 },
- { _MMIO(0x9888), 0x0e2d5000 },
- { _MMIO(0x9888), 0x002d4000 },
- { _MMIO(0x9888), 0x022d5000 },
- { _MMIO(0x9888), 0x042d5000 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x102e0154 },
- { _MMIO(0x9888), 0x0c2e5000 },
- { _MMIO(0x9888), 0x0e2e0055 },
- { _MMIO(0x9888), 0x104c8000 },
- { _MMIO(0x9888), 0x124c8000 },
- { _MMIO(0x9888), 0x144c8000 },
- { _MMIO(0x9888), 0x164c2000 },
- { _MMIO(0x9888), 0x044c8000 },
- { _MMIO(0x9888), 0x064cc000 },
- { _MMIO(0x9888), 0x084cc000 },
- { _MMIO(0x9888), 0x0a4c4000 },
- { _MMIO(0x9888), 0x0c4ea000 },
- { _MMIO(0x9888), 0x0e4ea000 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x024ea000 },
- { _MMIO(0x9888), 0x044ea000 },
- { _MMIO(0x9888), 0x064e2000 },
- { _MMIO(0x9888), 0x1c4f5500 },
- { _MMIO(0x9888), 0x1a4f1554 },
- { _MMIO(0x9888), 0x04642400 },
- { _MMIO(0x9888), 0x22640000 },
- { _MMIO(0x9888), 0x1a640000 },
- { _MMIO(0x9888), 0x06650024 },
- { _MMIO(0x9888), 0x22650000 },
- { _MMIO(0x9888), 0x1c650000 },
- { _MMIO(0x9888), 0x0c6c5327 },
- { _MMIO(0x9888), 0x0e6c5425 },
- { _MMIO(0x9888), 0x006c2a00 },
- { _MMIO(0x9888), 0x026c285b },
- { _MMIO(0x9888), 0x046c005c },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1a6c0900 },
- { _MMIO(0x9888), 0x1c0f0aa0 },
- { _MMIO(0x9888), 0x180f4000 },
- { _MMIO(0x9888), 0x1a0f02aa },
- { _MMIO(0x9888), 0x1c2c5400 },
- { _MMIO(0x9888), 0x1e2c0001 },
- { _MMIO(0x9888), 0x1a2c5550 },
- { _MMIO(0x9888), 0x1993aa00 },
- { _MMIO(0x9888), 0x03938000 },
- { _MMIO(0x9888), 0x05938000 },
- { _MMIO(0x9888), 0x07938000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x2b904000 },
- { _MMIO(0x9888), 0x2d904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b900421 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900001 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43900420 },
- { _MMIO(0x9888), 0x45900021 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
-};
-
-static int
-get_l3_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 2);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 2);
-
- if (dev_priv->drm.pdev->revision >= 0x03) {
- regs[n] = mux_config_l3_1_0_sku_gte_0x03;
- lens[n] = ARRAY_SIZE(mux_config_l3_1_0_sku_gte_0x03);
- n++;
- }
- if (dev_priv->drm.pdev->revision < 0x03) {
- regs[n] = mux_config_l3_1_0_sku_lt_0x03;
- lens[n] = ARRAY_SIZE(mux_config_l3_1_0_sku_lt_0x03);
- n++;
- }
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x30800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000efff },
- { _MMIO(0x2778), 0x00006000 },
- { _MMIO(0x277c), 0x0000f3ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x9888), 0x102d7800 },
- { _MMIO(0x9888), 0x122d79e0 },
- { _MMIO(0x9888), 0x0c2f0004 },
- { _MMIO(0x9888), 0x100e3800 },
- { _MMIO(0x9888), 0x180f0005 },
- { _MMIO(0x9888), 0x002d0940 },
- { _MMIO(0x9888), 0x022d802f },
- { _MMIO(0x9888), 0x042d4013 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x0e2e0050 },
- { _MMIO(0x9888), 0x022f0010 },
- { _MMIO(0x9888), 0x002f0000 },
- { _MMIO(0x9888), 0x084c8000 },
- { _MMIO(0x9888), 0x0a4c4000 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e2000 },
- { _MMIO(0x9888), 0x040e0480 },
- { _MMIO(0x9888), 0x000e0000 },
- { _MMIO(0x9888), 0x060f0027 },
- { _MMIO(0x9888), 0x100f0000 },
- { _MMIO(0x9888), 0x1a0f0040 },
- { _MMIO(0x9888), 0x03938000 },
- { _MMIO(0x9888), 0x05938000 },
- { _MMIO(0x9888), 0x07938000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x439014a0 },
- { _MMIO(0x9888), 0x459000a4 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900001 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_rasterizer_and_pixel_backend;
- lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_sampler[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x70800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x0000c000 },
- { _MMIO(0x2774), 0x0000e7ff },
- { _MMIO(0x2778), 0x00003000 },
- { _MMIO(0x277c), 0x0000f9ff },
- { _MMIO(0x2780), 0x00000c00 },
- { _MMIO(0x2784), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_sampler[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_sampler[] = {
- { _MMIO(0x9888), 0x121300a0 },
- { _MMIO(0x9888), 0x141600ab },
- { _MMIO(0x9888), 0x123300a0 },
- { _MMIO(0x9888), 0x143600ab },
- { _MMIO(0x9888), 0x125300a0 },
- { _MMIO(0x9888), 0x145600ab },
- { _MMIO(0x9888), 0x0c2d4000 },
- { _MMIO(0x9888), 0x0e2d5000 },
- { _MMIO(0x9888), 0x002d4000 },
- { _MMIO(0x9888), 0x022d5000 },
- { _MMIO(0x9888), 0x042d5000 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x102e01a0 },
- { _MMIO(0x9888), 0x0c2e5000 },
- { _MMIO(0x9888), 0x0e2e0065 },
- { _MMIO(0x9888), 0x164c2000 },
- { _MMIO(0x9888), 0x044c8000 },
- { _MMIO(0x9888), 0x064cc000 },
- { _MMIO(0x9888), 0x084c4000 },
- { _MMIO(0x9888), 0x0a4c4000 },
- { _MMIO(0x9888), 0x0e4e8000 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x024ea000 },
- { _MMIO(0x9888), 0x044e2000 },
- { _MMIO(0x9888), 0x064e2000 },
- { _MMIO(0x9888), 0x1c0f0800 },
- { _MMIO(0x9888), 0x180f4000 },
- { _MMIO(0x9888), 0x1a0f023f },
- { _MMIO(0x9888), 0x1e2c0003 },
- { _MMIO(0x9888), 0x1a2cc030 },
- { _MMIO(0x9888), 0x04132180 },
- { _MMIO(0x9888), 0x02130000 },
- { _MMIO(0x9888), 0x0c148000 },
- { _MMIO(0x9888), 0x0e142000 },
- { _MMIO(0x9888), 0x04148000 },
- { _MMIO(0x9888), 0x1e150140 },
- { _MMIO(0x9888), 0x1c150040 },
- { _MMIO(0x9888), 0x0c163000 },
- { _MMIO(0x9888), 0x0e160068 },
- { _MMIO(0x9888), 0x10160000 },
- { _MMIO(0x9888), 0x18160000 },
- { _MMIO(0x9888), 0x0a164000 },
- { _MMIO(0x9888), 0x04330043 },
- { _MMIO(0x9888), 0x02330000 },
- { _MMIO(0x9888), 0x0234a000 },
- { _MMIO(0x9888), 0x04342000 },
- { _MMIO(0x9888), 0x1c350015 },
- { _MMIO(0x9888), 0x02363460 },
- { _MMIO(0x9888), 0x10360000 },
- { _MMIO(0x9888), 0x04360000 },
- { _MMIO(0x9888), 0x06360000 },
- { _MMIO(0x9888), 0x08364000 },
- { _MMIO(0x9888), 0x06530043 },
- { _MMIO(0x9888), 0x02530000 },
- { _MMIO(0x9888), 0x0e548000 },
- { _MMIO(0x9888), 0x00548000 },
- { _MMIO(0x9888), 0x06542000 },
- { _MMIO(0x9888), 0x1e550400 },
- { _MMIO(0x9888), 0x1a552000 },
- { _MMIO(0x9888), 0x1c550100 },
- { _MMIO(0x9888), 0x0e563000 },
- { _MMIO(0x9888), 0x00563400 },
- { _MMIO(0x9888), 0x10560000 },
- { _MMIO(0x9888), 0x18560000 },
- { _MMIO(0x9888), 0x02560000 },
- { _MMIO(0x9888), 0x0c564000 },
- { _MMIO(0x9888), 0x1993a800 },
- { _MMIO(0x9888), 0x03938000 },
- { _MMIO(0x9888), 0x05938000 },
- { _MMIO(0x9888), 0x07938000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x2d904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b9014a0 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900001 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43900820 },
- { _MMIO(0x9888), 0x45901022 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
-};
-
-static int
-get_sampler_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_sampler;
- lens[n] = ARRAY_SIZE(mux_config_sampler);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x00007fff },
- { _MMIO(0x2778), 0x00000000 },
- { _MMIO(0x277c), 0x00009fff },
- { _MMIO(0x2780), 0x00000002 },
- { _MMIO(0x2784), 0x0000efff },
- { _MMIO(0x2788), 0x00000000 },
- { _MMIO(0x278c), 0x0000f3ff },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000fdff },
- { _MMIO(0x2798), 0x00000000 },
- { _MMIO(0x279c), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_1[] = {
- { _MMIO(0x9888), 0x141a0000 },
- { _MMIO(0x9888), 0x143a0000 },
- { _MMIO(0x9888), 0x145a0000 },
- { _MMIO(0x9888), 0x0c2d4000 },
- { _MMIO(0x9888), 0x0e2d5000 },
- { _MMIO(0x9888), 0x002d4000 },
- { _MMIO(0x9888), 0x022d5000 },
- { _MMIO(0x9888), 0x042d5000 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x102e0150 },
- { _MMIO(0x9888), 0x0c2e5000 },
- { _MMIO(0x9888), 0x0e2e006a },
- { _MMIO(0x9888), 0x124c8000 },
- { _MMIO(0x9888), 0x144c8000 },
- { _MMIO(0x9888), 0x164c2000 },
- { _MMIO(0x9888), 0x044c8000 },
- { _MMIO(0x9888), 0x064c4000 },
- { _MMIO(0x9888), 0x0a4c4000 },
- { _MMIO(0x9888), 0x0c4e8000 },
- { _MMIO(0x9888), 0x0e4ea000 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x024e2000 },
- { _MMIO(0x9888), 0x064e2000 },
- { _MMIO(0x9888), 0x1c0f0bc0 },
- { _MMIO(0x9888), 0x180f4000 },
- { _MMIO(0x9888), 0x1a0f0302 },
- { _MMIO(0x9888), 0x1e2c0003 },
- { _MMIO(0x9888), 0x1a2c00f0 },
- { _MMIO(0x9888), 0x021a3080 },
- { _MMIO(0x9888), 0x041a31e5 },
- { _MMIO(0x9888), 0x02148000 },
- { _MMIO(0x9888), 0x0414a000 },
- { _MMIO(0x9888), 0x1c150054 },
- { _MMIO(0x9888), 0x06168000 },
- { _MMIO(0x9888), 0x08168000 },
- { _MMIO(0x9888), 0x0a168000 },
- { _MMIO(0x9888), 0x0c3a3280 },
- { _MMIO(0x9888), 0x0e3a0063 },
- { _MMIO(0x9888), 0x063a0061 },
- { _MMIO(0x9888), 0x023a0000 },
- { _MMIO(0x9888), 0x0c348000 },
- { _MMIO(0x9888), 0x0e342000 },
- { _MMIO(0x9888), 0x06342000 },
- { _MMIO(0x9888), 0x1e350140 },
- { _MMIO(0x9888), 0x1c350100 },
- { _MMIO(0x9888), 0x18360028 },
- { _MMIO(0x9888), 0x0c368000 },
- { _MMIO(0x9888), 0x0e5a3080 },
- { _MMIO(0x9888), 0x005a3280 },
- { _MMIO(0x9888), 0x025a0063 },
- { _MMIO(0x9888), 0x0e548000 },
- { _MMIO(0x9888), 0x00548000 },
- { _MMIO(0x9888), 0x02542000 },
- { _MMIO(0x9888), 0x1e550400 },
- { _MMIO(0x9888), 0x1a552000 },
- { _MMIO(0x9888), 0x1c550001 },
- { _MMIO(0x9888), 0x18560080 },
- { _MMIO(0x9888), 0x02568000 },
- { _MMIO(0x9888), 0x04568000 },
- { _MMIO(0x9888), 0x1993a800 },
- { _MMIO(0x9888), 0x03938000 },
- { _MMIO(0x9888), 0x05938000 },
- { _MMIO(0x9888), 0x07938000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x2d904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b900420 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43900000 },
- { _MMIO(0x9888), 0x45901084 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900001 },
-};
-
-static int
-get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_1;
- lens[n] = ARRAY_SIZE(mux_config_tdl_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_2[] = {
- { _MMIO(0x9888), 0x141a026b },
- { _MMIO(0x9888), 0x143a0173 },
- { _MMIO(0x9888), 0x145a026b },
- { _MMIO(0x9888), 0x002d4000 },
- { _MMIO(0x9888), 0x022d5000 },
- { _MMIO(0x9888), 0x042d5000 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x0c2e5000 },
- { _MMIO(0x9888), 0x0e2e0069 },
- { _MMIO(0x9888), 0x044c8000 },
- { _MMIO(0x9888), 0x064cc000 },
- { _MMIO(0x9888), 0x0a4c4000 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x024ea000 },
- { _MMIO(0x9888), 0x064e2000 },
- { _MMIO(0x9888), 0x180f6000 },
- { _MMIO(0x9888), 0x1a0f030a },
- { _MMIO(0x9888), 0x1a2c03c0 },
- { _MMIO(0x9888), 0x041a37e7 },
- { _MMIO(0x9888), 0x021a0000 },
- { _MMIO(0x9888), 0x0414a000 },
- { _MMIO(0x9888), 0x1c150050 },
- { _MMIO(0x9888), 0x08168000 },
- { _MMIO(0x9888), 0x0a168000 },
- { _MMIO(0x9888), 0x003a3380 },
- { _MMIO(0x9888), 0x063a006f },
- { _MMIO(0x9888), 0x023a0000 },
- { _MMIO(0x9888), 0x00348000 },
- { _MMIO(0x9888), 0x06342000 },
- { _MMIO(0x9888), 0x1a352000 },
- { _MMIO(0x9888), 0x1c350100 },
- { _MMIO(0x9888), 0x02368000 },
- { _MMIO(0x9888), 0x0c368000 },
- { _MMIO(0x9888), 0x025a37e7 },
- { _MMIO(0x9888), 0x0254a000 },
- { _MMIO(0x9888), 0x1c550005 },
- { _MMIO(0x9888), 0x04568000 },
- { _MMIO(0x9888), 0x06568000 },
- { _MMIO(0x9888), 0x03938000 },
- { _MMIO(0x9888), 0x05938000 },
- { _MMIO(0x9888), 0x07938000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43900020 },
- { _MMIO(0x9888), 0x45901080 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900001 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_2;
- lens[n] = ARRAY_SIZE(mux_config_tdl_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extra[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
- { _MMIO(0xe458), 0x00001000 },
- { _MMIO(0xe558), 0x00003002 },
- { _MMIO(0xe658), 0x00005004 },
- { _MMIO(0xe758), 0x00011010 },
- { _MMIO(0xe45c), 0x00050012 },
- { _MMIO(0xe55c), 0x00052051 },
- { _MMIO(0xe65c), 0x00000008 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extra[] = {
- { _MMIO(0x9888), 0x141a001f },
- { _MMIO(0x9888), 0x143a001f },
- { _MMIO(0x9888), 0x145a001f },
- { _MMIO(0x9888), 0x042d5000 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x0e2e0094 },
- { _MMIO(0x9888), 0x084cc000 },
- { _MMIO(0x9888), 0x044ea000 },
- { _MMIO(0x9888), 0x1a0f00e0 },
- { _MMIO(0x9888), 0x1a2c0c00 },
- { _MMIO(0x9888), 0x061a0063 },
- { _MMIO(0x9888), 0x021a0000 },
- { _MMIO(0x9888), 0x06142000 },
- { _MMIO(0x9888), 0x1c150100 },
- { _MMIO(0x9888), 0x0c168000 },
- { _MMIO(0x9888), 0x043a3180 },
- { _MMIO(0x9888), 0x023a0000 },
- { _MMIO(0x9888), 0x04348000 },
- { _MMIO(0x9888), 0x1c350040 },
- { _MMIO(0x9888), 0x0a368000 },
- { _MMIO(0x9888), 0x045a0063 },
- { _MMIO(0x9888), 0x025a0000 },
- { _MMIO(0x9888), 0x04542000 },
- { _MMIO(0x9888), 0x1c550010 },
- { _MMIO(0x9888), 0x08568000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900400 },
- { _MMIO(0x9888), 0x47900004 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_extra;
- lens[n] = ARRAY_SIZE(mux_config_compute_extra);
- n++;
-
- return n;
-}
-
static const struct i915_oa_reg b_counter_config_test_oa[] = {
{ _MMIO(0x2740), 0x00000000 },
{ _MMIO(0x2744), 0x00800000 },
@@ -1756,6 +60,7 @@ static const struct i915_oa_reg flex_eu_config_test_oa[] = {
};
static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x9840), 0x00000080 },
{ _MMIO(0x9888), 0x19800000 },
{ _MMIO(0x9888), 0x07800063 },
{ _MMIO(0x9888), 0x11800000 },
@@ -1769,922 +74,35 @@ static const struct i915_oa_reg mux_config_test_oa[] = {
{ _MMIO(0x9888), 0x33900000 },
};
-static int
-get_test_oa_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_test_oa;
- lens[n] = ARRAY_SIZE(mux_config_test_oa);
- n++;
-
- return n;
-}
-
-int i915_oa_select_metric_set_bxt(struct drm_i915_private *dev_priv)
-{
- dev_priv->perf.oa.n_mux_configs = 0;
- dev_priv->perf.oa.b_counter_regs = NULL;
- dev_priv->perf.oa.b_counter_regs_len = 0;
- dev_priv->perf.oa.flex_regs = NULL;
- dev_priv->perf.oa.flex_regs_len = 0;
-
- switch (dev_priv->perf.oa.metrics_set) {
- case METRIC_SET_ID_RENDER_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_render_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_basic);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_basic);
-
- return 0;
- case METRIC_SET_ID_RENDER_PIPE_PROFILE:
- dev_priv->perf.oa.n_mux_configs =
- get_render_pipe_profile_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_pipe_profile;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_pipe_profile);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_pipe_profile;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_pipe_profile);
-
- return 0;
- case METRIC_SET_ID_MEMORY_READS:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_reads_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_reads;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_reads);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_reads;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_reads);
-
- return 0;
- case METRIC_SET_ID_MEMORY_WRITES:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_writes_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_writes;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_writes);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_writes;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_writes);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTENDED:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extended_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extended;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extended);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extended;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extended);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_L3_CACHE:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_l3_cache_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_l3_cache;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_l3_cache);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_l3_cache;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_l3_cache);
-
- return 0;
- case METRIC_SET_ID_HDC_AND_SF:
- dev_priv->perf.oa.n_mux_configs =
- get_hdc_and_sf_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_hdc_and_sf;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_hdc_and_sf);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_hdc_and_sf;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_hdc_and_sf);
-
- return 0;
- case METRIC_SET_ID_L3_1:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_1);
-
- return 0;
- case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
- dev_priv->perf.oa.n_mux_configs =
- get_rasterizer_and_pixel_backend_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
-
- return 0;
- case METRIC_SET_ID_SAMPLER:
- dev_priv->perf.oa.n_mux_configs =
- get_sampler_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_sampler;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_sampler);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_sampler;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_sampler);
-
- return 0;
- case METRIC_SET_ID_TDL_1:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_1);
-
- return 0;
- case METRIC_SET_ID_TDL_2:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_2);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTRA:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extra_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extra;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extra);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extra;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extra);
-
- return 0;
- case METRIC_SET_ID_TEST_OA:
- dev_priv->perf.oa.n_mux_configs =
- get_test_oa_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_test_oa;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_test_oa;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_test_oa);
-
- return 0;
- default:
- return -ENODEV;
- }
-}
-
-static ssize_t
-show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
-}
-
-static struct device_attribute dev_attr_render_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_basic[] = {
- &dev_attr_render_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_basic = {
- .name = "22b9519a-e9ba-4c41-8b54-f4f8ca14fa0a",
- .attrs = attrs_render_basic,
-};
-
-static ssize_t
-show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
-}
-
-static struct device_attribute dev_attr_compute_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_basic[] = {
- &dev_attr_compute_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_basic = {
- .name = "012d72cf-82a9-4d25-8ddf-74076fd30797",
- .attrs = attrs_compute_basic,
-};
-
-static ssize_t
-show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
-}
-
-static struct device_attribute dev_attr_render_pipe_profile_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_pipe_profile_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_pipe_profile[] = {
- &dev_attr_render_pipe_profile_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_pipe_profile = {
- .name = "ce416533-e49e-4211-80af-ec513590a914",
- .attrs = attrs_render_pipe_profile,
-};
-
-static ssize_t
-show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
-}
-
-static struct device_attribute dev_attr_memory_reads_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_reads_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_reads[] = {
- &dev_attr_memory_reads_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_reads = {
- .name = "398e2452-18d7-42d0-b241-e4d0a9148ada",
- .attrs = attrs_memory_reads,
-};
-
-static ssize_t
-show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
-}
-
-static struct device_attribute dev_attr_memory_writes_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_writes_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_writes[] = {
- &dev_attr_memory_writes_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_writes = {
- .name = "d324a0d6-7269-4847-a5c2-6f71ddc7fed5",
- .attrs = attrs_memory_writes,
-};
-
-static ssize_t
-show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
-}
-
-static struct device_attribute dev_attr_compute_extended_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extended_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extended[] = {
- &dev_attr_compute_extended_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extended = {
- .name = "caf3596a-7bb1-4dec-b3b3-2a080d283b49",
- .attrs = attrs_compute_extended,
-};
-
-static ssize_t
-show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
-}
-
-static struct device_attribute dev_attr_compute_l3_cache_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_l3_cache_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_l3_cache[] = {
- &dev_attr_compute_l3_cache_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_l3_cache = {
- .name = "49b956e2-d5b9-47e0-9d8a-cee5e8cec527",
- .attrs = attrs_compute_l3_cache,
-};
-
-static ssize_t
-show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
-}
-
-static struct device_attribute dev_attr_hdc_and_sf_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_hdc_and_sf_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_hdc_and_sf[] = {
- &dev_attr_hdc_and_sf_id.attr,
- NULL,
-};
-
-static struct attribute_group group_hdc_and_sf = {
- .name = "f64ef50a-bdba-4b35-8f09-203c13d8ee5a",
- .attrs = attrs_hdc_and_sf,
-};
-
-static ssize_t
-show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
-}
-
-static struct device_attribute dev_attr_l3_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_1[] = {
- &dev_attr_l3_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_1 = {
- .name = "00ad5a41-7eab-4f7a-9103-49d411c67219",
- .attrs = attrs_l3_1,
-};
-
-static ssize_t
-show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
-}
-
-static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_rasterizer_and_pixel_backend_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
- &dev_attr_rasterizer_and_pixel_backend_id.attr,
- NULL,
-};
-
-static struct attribute_group group_rasterizer_and_pixel_backend = {
- .name = "46dc44ca-491c-4cc1-a951-e7b3e62bf02b",
- .attrs = attrs_rasterizer_and_pixel_backend,
-};
-
-static ssize_t
-show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
-}
-
-static struct device_attribute dev_attr_sampler_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_sampler_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_sampler[] = {
- &dev_attr_sampler_id.attr,
- NULL,
-};
-
-static struct attribute_group group_sampler = {
- .name = "8364e2a8-af63-40af-b0d5-42969a255654",
- .attrs = attrs_sampler,
-};
-
-static ssize_t
-show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
-}
-
-static struct device_attribute dev_attr_tdl_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_1[] = {
- &dev_attr_tdl_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_1 = {
- .name = "175c8092-cb25-4d1e-8dc7-b4fdd39e2d92",
- .attrs = attrs_tdl_1,
-};
-
-static ssize_t
-show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
-}
-
-static struct device_attribute dev_attr_tdl_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_2[] = {
- &dev_attr_tdl_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_2 = {
- .name = "d260f03f-b34d-4b49-a44e-436819117332",
- .attrs = attrs_tdl_2,
-};
-
-static ssize_t
-show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
-}
-
-static struct device_attribute dev_attr_compute_extra_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extra_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extra[] = {
- &dev_attr_compute_extra_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extra = {
- .name = "fa6ecf21-2cb8-4d0b-9308-6e4a7b4ca87a",
- .attrs = attrs_compute_extra,
-};
-
static ssize_t
show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
+ return sprintf(buf, "1\n");
}
-static struct device_attribute dev_attr_test_oa_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_test_oa_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_test_oa[] = {
- &dev_attr_test_oa_id.attr,
- NULL,
-};
-
-static struct attribute_group group_test_oa = {
- .name = "5ee72f5c-092f-421e-8b70-225f7c3e9612",
- .attrs = attrs_test_oa,
-};
-
-int
-i915_perf_register_sysfs_bxt(struct drm_i915_private *dev_priv)
+void
+i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv)
{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
- int ret = 0;
+ strncpy(dev_priv->perf.oa.test_config.uuid,
+ "5ee72f5c-092f-421e-8b70-225f7c3e9612",
+ UUID_STRING_LEN);
+ dev_priv->perf.oa.test_config.id = 1;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (ret)
- goto error_render_basic;
- }
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (ret)
- goto error_compute_basic;
- }
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (ret)
- goto error_render_pipe_profile;
- }
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (ret)
- goto error_memory_reads;
- }
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (ret)
- goto error_memory_writes;
- }
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (ret)
- goto error_compute_extended;
- }
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (ret)
- goto error_compute_l3_cache;
- }
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (ret)
- goto error_hdc_and_sf;
- }
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (ret)
- goto error_l3_1;
- }
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (ret)
- goto error_rasterizer_and_pixel_backend;
- }
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
- if (ret)
- goto error_sampler;
- }
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (ret)
- goto error_tdl_1;
- }
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (ret)
- goto error_tdl_2;
- }
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (ret)
- goto error_compute_extra;
- }
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
- if (ret)
- goto error_test_oa;
- }
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- return 0;
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-error_test_oa:
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
-error_compute_extra:
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
-error_tdl_2:
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
-error_tdl_1:
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
-error_sampler:
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
-error_rasterizer_and_pixel_backend:
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
-error_l3_1:
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
-error_hdc_and_sf:
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
-error_compute_l3_cache:
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
-error_compute_extended:
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
-error_memory_writes:
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
-error_memory_reads:
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
-error_render_pipe_profile:
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
-error_compute_basic:
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
-error_render_basic:
- return ret;
-}
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-void
-i915_perf_unregister_sysfs_bxt(struct drm_i915_private *dev_priv)
-{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/i915_oa_bxt.h
index 6cf7ba746e7e..690b963a2383 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.h
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.h
@@ -29,12 +29,6 @@
#ifndef __I915_OA_BXT_H__
#define __I915_OA_BXT_H__
-extern int i915_oa_n_builtin_metric_sets_bxt;
-
-extern int i915_oa_select_metric_set_bxt(struct drm_i915_private *dev_priv);
-
-extern int i915_perf_register_sysfs_bxt(struct drm_i915_private *dev_priv);
-
-extern void i915_perf_unregister_sysfs_bxt(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/i915_oa_chv.c
index aa6bece7e75f..322a3f94cd16 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.c
+++ b/drivers/gpu/drm/i915/i915_oa_chv.c
@@ -31,1943 +31,6 @@
#include "i915_drv.h"
#include "i915_oa_chv.h"
-enum metric_set_id {
- METRIC_SET_ID_RENDER_BASIC = 1,
- METRIC_SET_ID_COMPUTE_BASIC,
- METRIC_SET_ID_RENDER_PIPE_PROFILE,
- METRIC_SET_ID_HDC_AND_SF,
- METRIC_SET_ID_L3_1,
- METRIC_SET_ID_L3_2,
- METRIC_SET_ID_L3_3,
- METRIC_SET_ID_L3_4,
- METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
- METRIC_SET_ID_SAMPLER_1,
- METRIC_SET_ID_SAMPLER_2,
- METRIC_SET_ID_TDL_1,
- METRIC_SET_ID_TDL_2,
- METRIC_SET_ID_TEST_OA,
-};
-
-int i915_oa_n_builtin_metric_sets_chv = 14;
-
-static const struct i915_oa_reg b_counter_config_render_basic[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_render_basic[] = {
- { _MMIO(0x9888), 0x59800000 },
- { _MMIO(0x9888), 0x59800001 },
- { _MMIO(0x9888), 0x285a0006 },
- { _MMIO(0x9888), 0x2c110014 },
- { _MMIO(0x9888), 0x2e110000 },
- { _MMIO(0x9888), 0x2c310014 },
- { _MMIO(0x9888), 0x2e310000 },
- { _MMIO(0x9888), 0x2b8303df },
- { _MMIO(0x9888), 0x3580024f },
- { _MMIO(0x9888), 0x00580888 },
- { _MMIO(0x9888), 0x1e5a0015 },
- { _MMIO(0x9888), 0x205a0014 },
- { _MMIO(0x9888), 0x045a0000 },
- { _MMIO(0x9888), 0x025a0000 },
- { _MMIO(0x9888), 0x02180500 },
- { _MMIO(0x9888), 0x00190555 },
- { _MMIO(0x9888), 0x021d0500 },
- { _MMIO(0x9888), 0x021f0a00 },
- { _MMIO(0x9888), 0x00380444 },
- { _MMIO(0x9888), 0x02390500 },
- { _MMIO(0x9888), 0x003a0666 },
- { _MMIO(0x9888), 0x00100111 },
- { _MMIO(0x9888), 0x06110030 },
- { _MMIO(0x9888), 0x0a110031 },
- { _MMIO(0x9888), 0x0e110046 },
- { _MMIO(0x9888), 0x04110000 },
- { _MMIO(0x9888), 0x00110000 },
- { _MMIO(0x9888), 0x00130111 },
- { _MMIO(0x9888), 0x00300444 },
- { _MMIO(0x9888), 0x08310030 },
- { _MMIO(0x9888), 0x0c310031 },
- { _MMIO(0x9888), 0x10310046 },
- { _MMIO(0x9888), 0x04310000 },
- { _MMIO(0x9888), 0x00310000 },
- { _MMIO(0x9888), 0x00330444 },
- { _MMIO(0x9888), 0x038a0a00 },
- { _MMIO(0x9888), 0x018b0fff },
- { _MMIO(0x9888), 0x038b0a00 },
- { _MMIO(0x9888), 0x01855000 },
- { _MMIO(0x9888), 0x03850055 },
- { _MMIO(0x9888), 0x13830021 },
- { _MMIO(0x9888), 0x15830020 },
- { _MMIO(0x9888), 0x1783002f },
- { _MMIO(0x9888), 0x1983002e },
- { _MMIO(0x9888), 0x1b83002d },
- { _MMIO(0x9888), 0x1d83002c },
- { _MMIO(0x9888), 0x05830000 },
- { _MMIO(0x9888), 0x01840555 },
- { _MMIO(0x9888), 0x03840500 },
- { _MMIO(0x9888), 0x23800074 },
- { _MMIO(0x9888), 0x2580007d },
- { _MMIO(0x9888), 0x05800000 },
- { _MMIO(0x9888), 0x01805000 },
- { _MMIO(0x9888), 0x03800055 },
- { _MMIO(0x9888), 0x01865000 },
- { _MMIO(0x9888), 0x03860055 },
- { _MMIO(0x9888), 0x01875000 },
- { _MMIO(0x9888), 0x03870055 },
- { _MMIO(0x9888), 0x418000aa },
- { _MMIO(0x9888), 0x4380000a },
- { _MMIO(0x9888), 0x45800000 },
- { _MMIO(0x9888), 0x4780000a },
- { _MMIO(0x9888), 0x49800000 },
- { _MMIO(0x9888), 0x4b800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x55800000 },
- { _MMIO(0x9888), 0x57800000 },
- { _MMIO(0x9888), 0x59800000 },
-};
-
-static int
-get_render_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_render_basic;
- lens[n] = ARRAY_SIZE(mux_config_render_basic);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_basic[] = {
- { _MMIO(0x9888), 0x59800000 },
- { _MMIO(0x9888), 0x59800001 },
- { _MMIO(0x9888), 0x2e5800e0 },
- { _MMIO(0x9888), 0x2e3800e0 },
- { _MMIO(0x9888), 0x3580024f },
- { _MMIO(0x9888), 0x3d800140 },
- { _MMIO(0x9888), 0x08580042 },
- { _MMIO(0x9888), 0x0c580040 },
- { _MMIO(0x9888), 0x1058004c },
- { _MMIO(0x9888), 0x1458004b },
- { _MMIO(0x9888), 0x04580000 },
- { _MMIO(0x9888), 0x00580000 },
- { _MMIO(0x9888), 0x00195555 },
- { _MMIO(0x9888), 0x06380042 },
- { _MMIO(0x9888), 0x0a380040 },
- { _MMIO(0x9888), 0x0e38004c },
- { _MMIO(0x9888), 0x1238004b },
- { _MMIO(0x9888), 0x04380000 },
- { _MMIO(0x9888), 0x00384444 },
- { _MMIO(0x9888), 0x003a5555 },
- { _MMIO(0x9888), 0x018bffff },
- { _MMIO(0x9888), 0x01845555 },
- { _MMIO(0x9888), 0x17800074 },
- { _MMIO(0x9888), 0x1980007d },
- { _MMIO(0x9888), 0x1b80007c },
- { _MMIO(0x9888), 0x1d8000b6 },
- { _MMIO(0x9888), 0x1f8000b7 },
- { _MMIO(0x9888), 0x05800000 },
- { _MMIO(0x9888), 0x03800000 },
- { _MMIO(0x9888), 0x418000aa },
- { _MMIO(0x9888), 0x438000aa },
- { _MMIO(0x9888), 0x45800000 },
- { _MMIO(0x9888), 0x47800000 },
- { _MMIO(0x9888), 0x4980012a },
- { _MMIO(0x9888), 0x4b80012a },
- { _MMIO(0x9888), 0x4d80012a },
- { _MMIO(0x9888), 0x4f80012a },
- { _MMIO(0x9888), 0x518001ce },
- { _MMIO(0x9888), 0x538001ce },
- { _MMIO(0x9888), 0x5580000e },
- { _MMIO(0x9888), 0x59800000 },
-};
-
-static int
-get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_basic;
- lens[n] = ARRAY_SIZE(mux_config_compute_basic);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2770), 0x0007ffea },
- { _MMIO(0x2774), 0x00007ffc },
- { _MMIO(0x2778), 0x0007affa },
- { _MMIO(0x277c), 0x0000f5fd },
- { _MMIO(0x2780), 0x00079ffa },
- { _MMIO(0x2784), 0x0000f3fb },
- { _MMIO(0x2788), 0x0007bf7a },
- { _MMIO(0x278c), 0x0000f7e7 },
- { _MMIO(0x2790), 0x0007fefa },
- { _MMIO(0x2794), 0x0000f7cf },
- { _MMIO(0x2798), 0x00077ffa },
- { _MMIO(0x279c), 0x0000efdf },
- { _MMIO(0x27a0), 0x0006fffa },
- { _MMIO(0x27a4), 0x0000cfbf },
- { _MMIO(0x27a8), 0x0003fffa },
- { _MMIO(0x27ac), 0x00005f7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
- { _MMIO(0x9888), 0x59800000 },
- { _MMIO(0x9888), 0x59800001 },
- { _MMIO(0x9888), 0x261e0000 },
- { _MMIO(0x9888), 0x281f000f },
- { _MMIO(0x9888), 0x2817001a },
- { _MMIO(0x9888), 0x2791001f },
- { _MMIO(0x9888), 0x27880019 },
- { _MMIO(0x9888), 0x2d890000 },
- { _MMIO(0x9888), 0x278a0007 },
- { _MMIO(0x9888), 0x298d001f },
- { _MMIO(0x9888), 0x278e0020 },
- { _MMIO(0x9888), 0x2b8f0012 },
- { _MMIO(0x9888), 0x29900000 },
- { _MMIO(0x9888), 0x00184000 },
- { _MMIO(0x9888), 0x02181000 },
- { _MMIO(0x9888), 0x02194000 },
- { _MMIO(0x9888), 0x141e0002 },
- { _MMIO(0x9888), 0x041e0000 },
- { _MMIO(0x9888), 0x001e0000 },
- { _MMIO(0x9888), 0x221f0015 },
- { _MMIO(0x9888), 0x041f0000 },
- { _MMIO(0x9888), 0x001f4000 },
- { _MMIO(0x9888), 0x021f0000 },
- { _MMIO(0x9888), 0x023a8000 },
- { _MMIO(0x9888), 0x0213c000 },
- { _MMIO(0x9888), 0x02164000 },
- { _MMIO(0x9888), 0x24170012 },
- { _MMIO(0x9888), 0x04170000 },
- { _MMIO(0x9888), 0x07910005 },
- { _MMIO(0x9888), 0x05910000 },
- { _MMIO(0x9888), 0x01911500 },
- { _MMIO(0x9888), 0x03910501 },
- { _MMIO(0x9888), 0x0d880002 },
- { _MMIO(0x9888), 0x1d880003 },
- { _MMIO(0x9888), 0x05880000 },
- { _MMIO(0x9888), 0x0b890032 },
- { _MMIO(0x9888), 0x1b890031 },
- { _MMIO(0x9888), 0x05890000 },
- { _MMIO(0x9888), 0x01890040 },
- { _MMIO(0x9888), 0x03890040 },
- { _MMIO(0x9888), 0x098a0000 },
- { _MMIO(0x9888), 0x198a0004 },
- { _MMIO(0x9888), 0x058a0000 },
- { _MMIO(0x9888), 0x018a8050 },
- { _MMIO(0x9888), 0x038a2050 },
- { _MMIO(0x9888), 0x018b95a9 },
- { _MMIO(0x9888), 0x038be5a9 },
- { _MMIO(0x9888), 0x018c1500 },
- { _MMIO(0x9888), 0x038c0501 },
- { _MMIO(0x9888), 0x178d0015 },
- { _MMIO(0x9888), 0x058d0000 },
- { _MMIO(0x9888), 0x138e0004 },
- { _MMIO(0x9888), 0x218e000c },
- { _MMIO(0x9888), 0x058e0000 },
- { _MMIO(0x9888), 0x018e0500 },
- { _MMIO(0x9888), 0x038e0101 },
- { _MMIO(0x9888), 0x0f8f0027 },
- { _MMIO(0x9888), 0x058f0000 },
- { _MMIO(0x9888), 0x018f0000 },
- { _MMIO(0x9888), 0x038f0001 },
- { _MMIO(0x9888), 0x11900013 },
- { _MMIO(0x9888), 0x1f900017 },
- { _MMIO(0x9888), 0x05900000 },
- { _MMIO(0x9888), 0x01900100 },
- { _MMIO(0x9888), 0x03900001 },
- { _MMIO(0x9888), 0x01845555 },
- { _MMIO(0x9888), 0x03845555 },
- { _MMIO(0x9888), 0x418000aa },
- { _MMIO(0x9888), 0x438000aa },
- { _MMIO(0x9888), 0x458000aa },
- { _MMIO(0x9888), 0x478000aa },
- { _MMIO(0x9888), 0x4980018c },
- { _MMIO(0x9888), 0x4b80014b },
- { _MMIO(0x9888), 0x4d800128 },
- { _MMIO(0x9888), 0x4f80012a },
- { _MMIO(0x9888), 0x51800187 },
- { _MMIO(0x9888), 0x5380014b },
- { _MMIO(0x9888), 0x55800149 },
- { _MMIO(0x9888), 0x5780010a },
- { _MMIO(0x9888), 0x59800000 },
-};
-
-static int
-get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_render_pipe_profile;
- lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x10800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000fff7 },
-};
-
-static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
- { _MMIO(0x9888), 0x105c0232 },
- { _MMIO(0x9888), 0x10580232 },
- { _MMIO(0x9888), 0x10380232 },
- { _MMIO(0x9888), 0x10dc0232 },
- { _MMIO(0x9888), 0x10d80232 },
- { _MMIO(0x9888), 0x10b80232 },
- { _MMIO(0x9888), 0x118e4400 },
- { _MMIO(0x9888), 0x025c6080 },
- { _MMIO(0x9888), 0x045c004b },
- { _MMIO(0x9888), 0x005c8000 },
- { _MMIO(0x9888), 0x00582080 },
- { _MMIO(0x9888), 0x0258004b },
- { _MMIO(0x9888), 0x025b4000 },
- { _MMIO(0x9888), 0x045b4000 },
- { _MMIO(0x9888), 0x0c1fa000 },
- { _MMIO(0x9888), 0x0e1f00aa },
- { _MMIO(0x9888), 0x04386080 },
- { _MMIO(0x9888), 0x0638404b },
- { _MMIO(0x9888), 0x02384000 },
- { _MMIO(0x9888), 0x08384000 },
- { _MMIO(0x9888), 0x0a380000 },
- { _MMIO(0x9888), 0x0c380000 },
- { _MMIO(0x9888), 0x00398000 },
- { _MMIO(0x9888), 0x0239a000 },
- { _MMIO(0x9888), 0x0439a000 },
- { _MMIO(0x9888), 0x06392000 },
- { _MMIO(0x9888), 0x0cdc25c1 },
- { _MMIO(0x9888), 0x0adcc000 },
- { _MMIO(0x9888), 0x0ad825c1 },
- { _MMIO(0x9888), 0x18db4000 },
- { _MMIO(0x9888), 0x1adb0001 },
- { _MMIO(0x9888), 0x0e9f8000 },
- { _MMIO(0x9888), 0x109f02aa },
- { _MMIO(0x9888), 0x0eb825c1 },
- { _MMIO(0x9888), 0x18b80154 },
- { _MMIO(0x9888), 0x0ab9a000 },
- { _MMIO(0x9888), 0x0cb9a000 },
- { _MMIO(0x9888), 0x0eb9a000 },
- { _MMIO(0x9888), 0x0d88c000 },
- { _MMIO(0x9888), 0x0f88000f },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x078a8000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x0d8a8000 },
- { _MMIO(0x9888), 0x258baa05 },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x238b2a80 },
- { _MMIO(0x9888), 0x198c5400 },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x098dc000 },
- { _MMIO(0x9888), 0x0b8da000 },
- { _MMIO(0x9888), 0x0d8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x098e05c0 },
- { _MMIO(0x9888), 0x058e0000 },
- { _MMIO(0x9888), 0x198f0020 },
- { _MMIO(0x9888), 0x2185aa0a },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x19835000 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x09848000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x19808000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x51800040 },
- { _MMIO(0x9888), 0x43800400 },
- { _MMIO(0x9888), 0x45800800 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800c62 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f801042 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x418014a4 },
-};
-
-static int
-get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_hdc_and_sf;
- lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00014002 },
- { _MMIO(0x277c), 0x0000c3ff },
- { _MMIO(0x2780), 0x00010002 },
- { _MMIO(0x2784), 0x0000c7ff },
- { _MMIO(0x2788), 0x00004002 },
- { _MMIO(0x278c), 0x0000d3ff },
- { _MMIO(0x2790), 0x00100700 },
- { _MMIO(0x2794), 0x0000ff1f },
- { _MMIO(0x2798), 0x00001402 },
- { _MMIO(0x279c), 0x0000fc3f },
- { _MMIO(0x27a0), 0x00001002 },
- { _MMIO(0x27a4), 0x0000fc7f },
- { _MMIO(0x27a8), 0x00000402 },
- { _MMIO(0x27ac), 0x0000fd3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_1[] = {
- { _MMIO(0x9888), 0x10bf03da },
- { _MMIO(0x9888), 0x14bf0001 },
- { _MMIO(0x9888), 0x12980340 },
- { _MMIO(0x9888), 0x12990340 },
- { _MMIO(0x9888), 0x0cbf1187 },
- { _MMIO(0x9888), 0x0ebf1205 },
- { _MMIO(0x9888), 0x00bf0500 },
- { _MMIO(0x9888), 0x02bf042b },
- { _MMIO(0x9888), 0x04bf002c },
- { _MMIO(0x9888), 0x0cdac000 },
- { _MMIO(0x9888), 0x0edac000 },
- { _MMIO(0x9888), 0x00da8000 },
- { _MMIO(0x9888), 0x02dac000 },
- { _MMIO(0x9888), 0x04da4000 },
- { _MMIO(0x9888), 0x04983400 },
- { _MMIO(0x9888), 0x10980000 },
- { _MMIO(0x9888), 0x06990034 },
- { _MMIO(0x9888), 0x10990000 },
- { _MMIO(0x9888), 0x0c9dc000 },
- { _MMIO(0x9888), 0x0e9dc000 },
- { _MMIO(0x9888), 0x009d8000 },
- { _MMIO(0x9888), 0x029dc000 },
- { _MMIO(0x9888), 0x049d4000 },
- { _MMIO(0x9888), 0x109f02a8 },
- { _MMIO(0x9888), 0x0c9fa000 },
- { _MMIO(0x9888), 0x0e9f00ba },
- { _MMIO(0x9888), 0x0cb88000 },
- { _MMIO(0x9888), 0x0cb95000 },
- { _MMIO(0x9888), 0x0eb95000 },
- { _MMIO(0x9888), 0x00b94000 },
- { _MMIO(0x9888), 0x02b95000 },
- { _MMIO(0x9888), 0x04b91000 },
- { _MMIO(0x9888), 0x06b92000 },
- { _MMIO(0x9888), 0x0cba4000 },
- { _MMIO(0x9888), 0x0f88000f },
- { _MMIO(0x9888), 0x03888000 },
- { _MMIO(0x9888), 0x05888000 },
- { _MMIO(0x9888), 0x07888000 },
- { _MMIO(0x9888), 0x09888000 },
- { _MMIO(0x9888), 0x0b888000 },
- { _MMIO(0x9888), 0x0d880400 },
- { _MMIO(0x9888), 0x258b800a },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x238b5500 },
- { _MMIO(0x9888), 0x198c4000 },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x038c4000 },
- { _MMIO(0x9888), 0x058c4000 },
- { _MMIO(0x9888), 0x078c4000 },
- { _MMIO(0x9888), 0x098c4000 },
- { _MMIO(0x9888), 0x0b8c4000 },
- { _MMIO(0x9888), 0x0d8c4000 },
- { _MMIO(0x9888), 0x0d8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x018d8000 },
- { _MMIO(0x9888), 0x038da000 },
- { _MMIO(0x9888), 0x058da000 },
- { _MMIO(0x9888), 0x078d2000 },
- { _MMIO(0x9888), 0x2185800a },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x1b830154 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x45800000 },
- { _MMIO(0x9888), 0x47800000 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f800000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41800060 },
-};
-
-static int
-get_l3_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_1;
- lens[n] = ARRAY_SIZE(mux_config_l3_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00014002 },
- { _MMIO(0x277c), 0x0000c3ff },
- { _MMIO(0x2780), 0x00010002 },
- { _MMIO(0x2784), 0x0000c7ff },
- { _MMIO(0x2788), 0x00004002 },
- { _MMIO(0x278c), 0x0000d3ff },
- { _MMIO(0x2790), 0x00100700 },
- { _MMIO(0x2794), 0x0000ff1f },
- { _MMIO(0x2798), 0x00001402 },
- { _MMIO(0x279c), 0x0000fc3f },
- { _MMIO(0x27a0), 0x00001002 },
- { _MMIO(0x27a4), 0x0000fc7f },
- { _MMIO(0x27a8), 0x00000402 },
- { _MMIO(0x27ac), 0x0000fd3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_2[] = {
- { _MMIO(0x9888), 0x103f03da },
- { _MMIO(0x9888), 0x143f0001 },
- { _MMIO(0x9888), 0x12180340 },
- { _MMIO(0x9888), 0x12190340 },
- { _MMIO(0x9888), 0x0c3f1187 },
- { _MMIO(0x9888), 0x0e3f1205 },
- { _MMIO(0x9888), 0x003f0500 },
- { _MMIO(0x9888), 0x023f042b },
- { _MMIO(0x9888), 0x043f002c },
- { _MMIO(0x9888), 0x0c5ac000 },
- { _MMIO(0x9888), 0x0e5ac000 },
- { _MMIO(0x9888), 0x005a8000 },
- { _MMIO(0x9888), 0x025ac000 },
- { _MMIO(0x9888), 0x045a4000 },
- { _MMIO(0x9888), 0x04183400 },
- { _MMIO(0x9888), 0x10180000 },
- { _MMIO(0x9888), 0x06190034 },
- { _MMIO(0x9888), 0x10190000 },
- { _MMIO(0x9888), 0x0c1dc000 },
- { _MMIO(0x9888), 0x0e1dc000 },
- { _MMIO(0x9888), 0x001d8000 },
- { _MMIO(0x9888), 0x021dc000 },
- { _MMIO(0x9888), 0x041d4000 },
- { _MMIO(0x9888), 0x101f02a8 },
- { _MMIO(0x9888), 0x0c1fa000 },
- { _MMIO(0x9888), 0x0e1f00ba },
- { _MMIO(0x9888), 0x0c388000 },
- { _MMIO(0x9888), 0x0c395000 },
- { _MMIO(0x9888), 0x0e395000 },
- { _MMIO(0x9888), 0x00394000 },
- { _MMIO(0x9888), 0x02395000 },
- { _MMIO(0x9888), 0x04391000 },
- { _MMIO(0x9888), 0x06392000 },
- { _MMIO(0x9888), 0x0c3a4000 },
- { _MMIO(0x9888), 0x1b8aa800 },
- { _MMIO(0x9888), 0x1d8a0002 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x078a8000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x0d8a8000 },
- { _MMIO(0x9888), 0x258b4005 },
- { _MMIO(0x9888), 0x278b0015 },
- { _MMIO(0x9888), 0x238b2a80 },
- { _MMIO(0x9888), 0x2185800a },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x1b830154 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x45800000 },
- { _MMIO(0x9888), 0x47800000 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f800000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41800060 },
-};
-
-static int
-get_l3_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_2;
- lens[n] = ARRAY_SIZE(mux_config_l3_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_3[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00014002 },
- { _MMIO(0x277c), 0x0000c3ff },
- { _MMIO(0x2780), 0x00010002 },
- { _MMIO(0x2784), 0x0000c7ff },
- { _MMIO(0x2788), 0x00004002 },
- { _MMIO(0x278c), 0x0000d3ff },
- { _MMIO(0x2790), 0x00100700 },
- { _MMIO(0x2794), 0x0000ff1f },
- { _MMIO(0x2798), 0x00001402 },
- { _MMIO(0x279c), 0x0000fc3f },
- { _MMIO(0x27a0), 0x00001002 },
- { _MMIO(0x27a4), 0x0000fc7f },
- { _MMIO(0x27a8), 0x00000402 },
- { _MMIO(0x27ac), 0x0000fd3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_3[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_3[] = {
- { _MMIO(0x9888), 0x121b0340 },
- { _MMIO(0x9888), 0x103f0274 },
- { _MMIO(0x9888), 0x123f0000 },
- { _MMIO(0x9888), 0x129b0340 },
- { _MMIO(0x9888), 0x10bf0274 },
- { _MMIO(0x9888), 0x12bf0000 },
- { _MMIO(0x9888), 0x041b3400 },
- { _MMIO(0x9888), 0x101b0000 },
- { _MMIO(0x9888), 0x045c8000 },
- { _MMIO(0x9888), 0x0a3d4000 },
- { _MMIO(0x9888), 0x003f0080 },
- { _MMIO(0x9888), 0x023f0793 },
- { _MMIO(0x9888), 0x043f0014 },
- { _MMIO(0x9888), 0x04588000 },
- { _MMIO(0x9888), 0x005a8000 },
- { _MMIO(0x9888), 0x025ac000 },
- { _MMIO(0x9888), 0x045a4000 },
- { _MMIO(0x9888), 0x0a5b4000 },
- { _MMIO(0x9888), 0x001d8000 },
- { _MMIO(0x9888), 0x021dc000 },
- { _MMIO(0x9888), 0x041d4000 },
- { _MMIO(0x9888), 0x0c1fa000 },
- { _MMIO(0x9888), 0x0e1f002a },
- { _MMIO(0x9888), 0x0a384000 },
- { _MMIO(0x9888), 0x00394000 },
- { _MMIO(0x9888), 0x02395000 },
- { _MMIO(0x9888), 0x04399000 },
- { _MMIO(0x9888), 0x069b0034 },
- { _MMIO(0x9888), 0x109b0000 },
- { _MMIO(0x9888), 0x06dc4000 },
- { _MMIO(0x9888), 0x0cbd4000 },
- { _MMIO(0x9888), 0x0cbf0981 },
- { _MMIO(0x9888), 0x0ebf0a0f },
- { _MMIO(0x9888), 0x06d84000 },
- { _MMIO(0x9888), 0x0cdac000 },
- { _MMIO(0x9888), 0x0edac000 },
- { _MMIO(0x9888), 0x0cdb4000 },
- { _MMIO(0x9888), 0x0c9dc000 },
- { _MMIO(0x9888), 0x0e9dc000 },
- { _MMIO(0x9888), 0x109f02a8 },
- { _MMIO(0x9888), 0x0e9f0080 },
- { _MMIO(0x9888), 0x0cb84000 },
- { _MMIO(0x9888), 0x0cb95000 },
- { _MMIO(0x9888), 0x0eb95000 },
- { _MMIO(0x9888), 0x06b92000 },
- { _MMIO(0x9888), 0x0f88000f },
- { _MMIO(0x9888), 0x0d880400 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x078a8000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x258b8009 },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x238b2a80 },
- { _MMIO(0x9888), 0x198c4000 },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x0d8c4000 },
- { _MMIO(0x9888), 0x0d8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x078d2000 },
- { _MMIO(0x9888), 0x2185800a },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x1b830154 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x45800c00 },
- { _MMIO(0x9888), 0x47800c63 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f8014a5 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41800045 },
-};
-
-static int
-get_l3_3_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_3;
- lens[n] = ARRAY_SIZE(mux_config_l3_3);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_4[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00014002 },
- { _MMIO(0x277c), 0x0000c3ff },
- { _MMIO(0x2780), 0x00010002 },
- { _MMIO(0x2784), 0x0000c7ff },
- { _MMIO(0x2788), 0x00004002 },
- { _MMIO(0x278c), 0x0000d3ff },
- { _MMIO(0x2790), 0x00100700 },
- { _MMIO(0x2794), 0x0000ff1f },
- { _MMIO(0x2798), 0x00001402 },
- { _MMIO(0x279c), 0x0000fc3f },
- { _MMIO(0x27a0), 0x00001002 },
- { _MMIO(0x27a4), 0x0000fc7f },
- { _MMIO(0x27a8), 0x00000402 },
- { _MMIO(0x27ac), 0x0000fd3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_4[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_4[] = {
- { _MMIO(0x9888), 0x121a0340 },
- { _MMIO(0x9888), 0x103f0017 },
- { _MMIO(0x9888), 0x123f0020 },
- { _MMIO(0x9888), 0x129a0340 },
- { _MMIO(0x9888), 0x10bf0017 },
- { _MMIO(0x9888), 0x12bf0020 },
- { _MMIO(0x9888), 0x041a3400 },
- { _MMIO(0x9888), 0x101a0000 },
- { _MMIO(0x9888), 0x043b8000 },
- { _MMIO(0x9888), 0x0a3e0010 },
- { _MMIO(0x9888), 0x003f0200 },
- { _MMIO(0x9888), 0x023f0113 },
- { _MMIO(0x9888), 0x043f0014 },
- { _MMIO(0x9888), 0x02592000 },
- { _MMIO(0x9888), 0x005a8000 },
- { _MMIO(0x9888), 0x025ac000 },
- { _MMIO(0x9888), 0x045a4000 },
- { _MMIO(0x9888), 0x0a1c8000 },
- { _MMIO(0x9888), 0x001d8000 },
- { _MMIO(0x9888), 0x021dc000 },
- { _MMIO(0x9888), 0x041d4000 },
- { _MMIO(0x9888), 0x0a1e8000 },
- { _MMIO(0x9888), 0x0c1fa000 },
- { _MMIO(0x9888), 0x0e1f001a },
- { _MMIO(0x9888), 0x00394000 },
- { _MMIO(0x9888), 0x02395000 },
- { _MMIO(0x9888), 0x04391000 },
- { _MMIO(0x9888), 0x069a0034 },
- { _MMIO(0x9888), 0x109a0000 },
- { _MMIO(0x9888), 0x06bb4000 },
- { _MMIO(0x9888), 0x0abe0040 },
- { _MMIO(0x9888), 0x0cbf0984 },
- { _MMIO(0x9888), 0x0ebf0a02 },
- { _MMIO(0x9888), 0x02d94000 },
- { _MMIO(0x9888), 0x0cdac000 },
- { _MMIO(0x9888), 0x0edac000 },
- { _MMIO(0x9888), 0x0c9c0400 },
- { _MMIO(0x9888), 0x0c9dc000 },
- { _MMIO(0x9888), 0x0e9dc000 },
- { _MMIO(0x9888), 0x0c9e0400 },
- { _MMIO(0x9888), 0x109f02a8 },
- { _MMIO(0x9888), 0x0e9f0040 },
- { _MMIO(0x9888), 0x0cb95000 },
- { _MMIO(0x9888), 0x0eb95000 },
- { _MMIO(0x9888), 0x0f88000f },
- { _MMIO(0x9888), 0x0d880400 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x078a8000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x258b8009 },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x238b2a80 },
- { _MMIO(0x9888), 0x198c4000 },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x0d8c4000 },
- { _MMIO(0x9888), 0x0d8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x078d2000 },
- { _MMIO(0x9888), 0x2185800a },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x1b830154 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x45800800 },
- { _MMIO(0x9888), 0x47800842 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f801084 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41800044 },
-};
-
-static int
-get_l3_4_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_4;
- lens[n] = ARRAY_SIZE(mux_config_l3_4);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00006000 },
- { _MMIO(0x2774), 0x0000f3ff },
- { _MMIO(0x2778), 0x00001800 },
- { _MMIO(0x277c), 0x0000fcff },
- { _MMIO(0x2780), 0x00000600 },
- { _MMIO(0x2784), 0x0000ff3f },
- { _MMIO(0x2788), 0x00000180 },
- { _MMIO(0x278c), 0x0000ffcf },
- { _MMIO(0x2790), 0x00000060 },
- { _MMIO(0x2794), 0x0000fff3 },
- { _MMIO(0x2798), 0x00000018 },
- { _MMIO(0x279c), 0x0000fffc },
-};
-
-static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x9888), 0x143b000e },
- { _MMIO(0x9888), 0x043c55c0 },
- { _MMIO(0x9888), 0x0a1e0280 },
- { _MMIO(0x9888), 0x0c1e0408 },
- { _MMIO(0x9888), 0x10390000 },
- { _MMIO(0x9888), 0x12397a1f },
- { _MMIO(0x9888), 0x14bb000e },
- { _MMIO(0x9888), 0x04bc5000 },
- { _MMIO(0x9888), 0x0a9e0296 },
- { _MMIO(0x9888), 0x0c9e0008 },
- { _MMIO(0x9888), 0x10b90000 },
- { _MMIO(0x9888), 0x12b97a1f },
- { _MMIO(0x9888), 0x063b0042 },
- { _MMIO(0x9888), 0x103b0000 },
- { _MMIO(0x9888), 0x083c0000 },
- { _MMIO(0x9888), 0x0a3e0040 },
- { _MMIO(0x9888), 0x043f8000 },
- { _MMIO(0x9888), 0x02594000 },
- { _MMIO(0x9888), 0x045a8000 },
- { _MMIO(0x9888), 0x0c1c0400 },
- { _MMIO(0x9888), 0x041d8000 },
- { _MMIO(0x9888), 0x081e02c0 },
- { _MMIO(0x9888), 0x0e1e0000 },
- { _MMIO(0x9888), 0x0c1fa800 },
- { _MMIO(0x9888), 0x0e1f0260 },
- { _MMIO(0x9888), 0x101f0014 },
- { _MMIO(0x9888), 0x003905e0 },
- { _MMIO(0x9888), 0x06390bc0 },
- { _MMIO(0x9888), 0x02390018 },
- { _MMIO(0x9888), 0x04394000 },
- { _MMIO(0x9888), 0x04bb0042 },
- { _MMIO(0x9888), 0x10bb0000 },
- { _MMIO(0x9888), 0x02bc05c0 },
- { _MMIO(0x9888), 0x08bc0000 },
- { _MMIO(0x9888), 0x0abe0004 },
- { _MMIO(0x9888), 0x02bf8000 },
- { _MMIO(0x9888), 0x02d91000 },
- { _MMIO(0x9888), 0x02da8000 },
- { _MMIO(0x9888), 0x089c8000 },
- { _MMIO(0x9888), 0x029d8000 },
- { _MMIO(0x9888), 0x089e8000 },
- { _MMIO(0x9888), 0x0e9e0000 },
- { _MMIO(0x9888), 0x0e9fa806 },
- { _MMIO(0x9888), 0x109f0142 },
- { _MMIO(0x9888), 0x08b90617 },
- { _MMIO(0x9888), 0x0ab90be0 },
- { _MMIO(0x9888), 0x02b94000 },
- { _MMIO(0x9888), 0x0d88f000 },
- { _MMIO(0x9888), 0x0f88000c },
- { _MMIO(0x9888), 0x07888000 },
- { _MMIO(0x9888), 0x09888000 },
- { _MMIO(0x9888), 0x018a8000 },
- { _MMIO(0x9888), 0x0f8a8000 },
- { _MMIO(0x9888), 0x1b8a2800 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x0d8a8000 },
- { _MMIO(0x9888), 0x238b52a0 },
- { _MMIO(0x9888), 0x258b6a95 },
- { _MMIO(0x9888), 0x278b0029 },
- { _MMIO(0x9888), 0x178c2000 },
- { _MMIO(0x9888), 0x198c1500 },
- { _MMIO(0x9888), 0x1b8c0014 },
- { _MMIO(0x9888), 0x078c4000 },
- { _MMIO(0x9888), 0x098c4000 },
- { _MMIO(0x9888), 0x098da000 },
- { _MMIO(0x9888), 0x0b8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x038d8000 },
- { _MMIO(0x9888), 0x058d2000 },
- { _MMIO(0x9888), 0x1f85aa80 },
- { _MMIO(0x9888), 0x2185aaaa },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x01834000 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0184c000 },
- { _MMIO(0x9888), 0x0784c000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x1180c000 },
- { _MMIO(0x9888), 0x1780c000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x4d800444 },
- { _MMIO(0x9888), 0x3d800000 },
- { _MMIO(0x9888), 0x4f804000 },
- { _MMIO(0x9888), 0x43801080 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800084 },
- { _MMIO(0x9888), 0x53800044 },
- { _MMIO(0x9888), 0x47801080 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x3f800000 },
- { _MMIO(0x9888), 0x41800840 },
-};
-
-static int
-get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_rasterizer_and_pixel_backend;
- lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_sampler_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x70800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x0000c000 },
- { _MMIO(0x2774), 0x0000e7ff },
- { _MMIO(0x2778), 0x00003000 },
- { _MMIO(0x277c), 0x0000f9ff },
- { _MMIO(0x2780), 0x00000c00 },
- { _MMIO(0x2784), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_sampler_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_sampler_1[] = {
- { _MMIO(0x9888), 0x18921400 },
- { _MMIO(0x9888), 0x149500ab },
- { _MMIO(0x9888), 0x18b21400 },
- { _MMIO(0x9888), 0x14b500ab },
- { _MMIO(0x9888), 0x18d21400 },
- { _MMIO(0x9888), 0x14d500ab },
- { _MMIO(0x9888), 0x0cdc8000 },
- { _MMIO(0x9888), 0x0edc4000 },
- { _MMIO(0x9888), 0x02dcc000 },
- { _MMIO(0x9888), 0x04dcc000 },
- { _MMIO(0x9888), 0x1abd00a0 },
- { _MMIO(0x9888), 0x0abd8000 },
- { _MMIO(0x9888), 0x0cd88000 },
- { _MMIO(0x9888), 0x0ed84000 },
- { _MMIO(0x9888), 0x04d88000 },
- { _MMIO(0x9888), 0x1adb0050 },
- { _MMIO(0x9888), 0x04db8000 },
- { _MMIO(0x9888), 0x06db8000 },
- { _MMIO(0x9888), 0x08db8000 },
- { _MMIO(0x9888), 0x0adb4000 },
- { _MMIO(0x9888), 0x109f02a0 },
- { _MMIO(0x9888), 0x0c9fa000 },
- { _MMIO(0x9888), 0x0e9f00aa },
- { _MMIO(0x9888), 0x18b82500 },
- { _MMIO(0x9888), 0x02b88000 },
- { _MMIO(0x9888), 0x04b84000 },
- { _MMIO(0x9888), 0x06b84000 },
- { _MMIO(0x9888), 0x08b84000 },
- { _MMIO(0x9888), 0x0ab84000 },
- { _MMIO(0x9888), 0x0cb88000 },
- { _MMIO(0x9888), 0x0cb98000 },
- { _MMIO(0x9888), 0x0eb9a000 },
- { _MMIO(0x9888), 0x00b98000 },
- { _MMIO(0x9888), 0x02b9a000 },
- { _MMIO(0x9888), 0x04b9a000 },
- { _MMIO(0x9888), 0x06b92000 },
- { _MMIO(0x9888), 0x1aba0200 },
- { _MMIO(0x9888), 0x02ba8000 },
- { _MMIO(0x9888), 0x0cba8000 },
- { _MMIO(0x9888), 0x04908000 },
- { _MMIO(0x9888), 0x04918000 },
- { _MMIO(0x9888), 0x04927300 },
- { _MMIO(0x9888), 0x10920000 },
- { _MMIO(0x9888), 0x1893000a },
- { _MMIO(0x9888), 0x0a934000 },
- { _MMIO(0x9888), 0x0a946000 },
- { _MMIO(0x9888), 0x0c959000 },
- { _MMIO(0x9888), 0x0e950098 },
- { _MMIO(0x9888), 0x10950000 },
- { _MMIO(0x9888), 0x04b04000 },
- { _MMIO(0x9888), 0x04b14000 },
- { _MMIO(0x9888), 0x04b20073 },
- { _MMIO(0x9888), 0x10b20000 },
- { _MMIO(0x9888), 0x04b38000 },
- { _MMIO(0x9888), 0x06b38000 },
- { _MMIO(0x9888), 0x08b34000 },
- { _MMIO(0x9888), 0x04b4c000 },
- { _MMIO(0x9888), 0x02b59890 },
- { _MMIO(0x9888), 0x10b50000 },
- { _MMIO(0x9888), 0x06d04000 },
- { _MMIO(0x9888), 0x06d14000 },
- { _MMIO(0x9888), 0x06d20073 },
- { _MMIO(0x9888), 0x10d20000 },
- { _MMIO(0x9888), 0x18d30020 },
- { _MMIO(0x9888), 0x02d38000 },
- { _MMIO(0x9888), 0x0cd34000 },
- { _MMIO(0x9888), 0x0ad48000 },
- { _MMIO(0x9888), 0x04d42000 },
- { _MMIO(0x9888), 0x0ed59000 },
- { _MMIO(0x9888), 0x00d59800 },
- { _MMIO(0x9888), 0x10d50000 },
- { _MMIO(0x9888), 0x0f88000e },
- { _MMIO(0x9888), 0x03888000 },
- { _MMIO(0x9888), 0x05888000 },
- { _MMIO(0x9888), 0x07888000 },
- { _MMIO(0x9888), 0x09888000 },
- { _MMIO(0x9888), 0x0b888000 },
- { _MMIO(0x9888), 0x0d880400 },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x238b5500 },
- { _MMIO(0x9888), 0x258b000a },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x038c4000 },
- { _MMIO(0x9888), 0x058c4000 },
- { _MMIO(0x9888), 0x078c4000 },
- { _MMIO(0x9888), 0x098c4000 },
- { _MMIO(0x9888), 0x0b8c4000 },
- { _MMIO(0x9888), 0x0d8c4000 },
- { _MMIO(0x9888), 0x0d8d8000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x018d8000 },
- { _MMIO(0x9888), 0x038da000 },
- { _MMIO(0x9888), 0x058da000 },
- { _MMIO(0x9888), 0x078d2000 },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x2185000a },
- { _MMIO(0x9888), 0x1b830150 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0d848000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x1d808000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47801021 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f800c64 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41800c02 },
-};
-
-static int
-get_sampler_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_sampler_1;
- lens[n] = ARRAY_SIZE(mux_config_sampler_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_sampler_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x70800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x0000c000 },
- { _MMIO(0x2774), 0x0000e7ff },
- { _MMIO(0x2778), 0x00003000 },
- { _MMIO(0x277c), 0x0000f9ff },
- { _MMIO(0x2780), 0x00000c00 },
- { _MMIO(0x2784), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_sampler_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_sampler_2[] = {
- { _MMIO(0x9888), 0x18121400 },
- { _MMIO(0x9888), 0x141500ab },
- { _MMIO(0x9888), 0x18321400 },
- { _MMIO(0x9888), 0x143500ab },
- { _MMIO(0x9888), 0x18521400 },
- { _MMIO(0x9888), 0x145500ab },
- { _MMIO(0x9888), 0x0c5c8000 },
- { _MMIO(0x9888), 0x0e5c4000 },
- { _MMIO(0x9888), 0x025cc000 },
- { _MMIO(0x9888), 0x045cc000 },
- { _MMIO(0x9888), 0x1a3d00a0 },
- { _MMIO(0x9888), 0x0a3d8000 },
- { _MMIO(0x9888), 0x0c588000 },
- { _MMIO(0x9888), 0x0e584000 },
- { _MMIO(0x9888), 0x04588000 },
- { _MMIO(0x9888), 0x1a5b0050 },
- { _MMIO(0x9888), 0x045b8000 },
- { _MMIO(0x9888), 0x065b8000 },
- { _MMIO(0x9888), 0x085b8000 },
- { _MMIO(0x9888), 0x0a5b4000 },
- { _MMIO(0x9888), 0x101f02a0 },
- { _MMIO(0x9888), 0x0c1fa000 },
- { _MMIO(0x9888), 0x0e1f00aa },
- { _MMIO(0x9888), 0x18382500 },
- { _MMIO(0x9888), 0x02388000 },
- { _MMIO(0x9888), 0x04384000 },
- { _MMIO(0x9888), 0x06384000 },
- { _MMIO(0x9888), 0x08384000 },
- { _MMIO(0x9888), 0x0a384000 },
- { _MMIO(0x9888), 0x0c388000 },
- { _MMIO(0x9888), 0x0c398000 },
- { _MMIO(0x9888), 0x0e39a000 },
- { _MMIO(0x9888), 0x00398000 },
- { _MMIO(0x9888), 0x0239a000 },
- { _MMIO(0x9888), 0x0439a000 },
- { _MMIO(0x9888), 0x06392000 },
- { _MMIO(0x9888), 0x1a3a0200 },
- { _MMIO(0x9888), 0x023a8000 },
- { _MMIO(0x9888), 0x0c3a8000 },
- { _MMIO(0x9888), 0x04108000 },
- { _MMIO(0x9888), 0x04118000 },
- { _MMIO(0x9888), 0x04127300 },
- { _MMIO(0x9888), 0x10120000 },
- { _MMIO(0x9888), 0x1813000a },
- { _MMIO(0x9888), 0x0a134000 },
- { _MMIO(0x9888), 0x0a146000 },
- { _MMIO(0x9888), 0x0c159000 },
- { _MMIO(0x9888), 0x0e150098 },
- { _MMIO(0x9888), 0x10150000 },
- { _MMIO(0x9888), 0x04304000 },
- { _MMIO(0x9888), 0x04314000 },
- { _MMIO(0x9888), 0x04320073 },
- { _MMIO(0x9888), 0x10320000 },
- { _MMIO(0x9888), 0x04338000 },
- { _MMIO(0x9888), 0x06338000 },
- { _MMIO(0x9888), 0x08334000 },
- { _MMIO(0x9888), 0x0434c000 },
- { _MMIO(0x9888), 0x02359890 },
- { _MMIO(0x9888), 0x10350000 },
- { _MMIO(0x9888), 0x06504000 },
- { _MMIO(0x9888), 0x06514000 },
- { _MMIO(0x9888), 0x06520073 },
- { _MMIO(0x9888), 0x10520000 },
- { _MMIO(0x9888), 0x18530020 },
- { _MMIO(0x9888), 0x02538000 },
- { _MMIO(0x9888), 0x0c534000 },
- { _MMIO(0x9888), 0x0a548000 },
- { _MMIO(0x9888), 0x04542000 },
- { _MMIO(0x9888), 0x0e559000 },
- { _MMIO(0x9888), 0x00559800 },
- { _MMIO(0x9888), 0x10550000 },
- { _MMIO(0x9888), 0x1b8aa000 },
- { _MMIO(0x9888), 0x1d8a0002 },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x078a8000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x0d8a8000 },
- { _MMIO(0x9888), 0x278b0015 },
- { _MMIO(0x9888), 0x238b2a80 },
- { _MMIO(0x9888), 0x258b0005 },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x2185000a },
- { _MMIO(0x9888), 0x1b830150 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0d848000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x07844000 },
- { _MMIO(0x9888), 0x1d808000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x17804000 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47801021 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f800c64 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x41800c02 },
-};
-
-static int
-get_sampler_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_sampler_2;
- lens[n] = ARRAY_SIZE(mux_config_sampler_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000fdff },
- { _MMIO(0x2778), 0x00000000 },
- { _MMIO(0x277c), 0x0000fe7f },
- { _MMIO(0x2780), 0x00000002 },
- { _MMIO(0x2784), 0x0000ffbf },
- { _MMIO(0x2788), 0x00000000 },
- { _MMIO(0x278c), 0x0000ffcf },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000fff7 },
- { _MMIO(0x2798), 0x00000000 },
- { _MMIO(0x279c), 0x0000fff9 },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_1[] = {
- { _MMIO(0x9888), 0x16154d60 },
- { _MMIO(0x9888), 0x16352e60 },
- { _MMIO(0x9888), 0x16554d60 },
- { _MMIO(0x9888), 0x16950000 },
- { _MMIO(0x9888), 0x16b50000 },
- { _MMIO(0x9888), 0x16d50000 },
- { _MMIO(0x9888), 0x005c8000 },
- { _MMIO(0x9888), 0x045cc000 },
- { _MMIO(0x9888), 0x065c4000 },
- { _MMIO(0x9888), 0x083d8000 },
- { _MMIO(0x9888), 0x0a3d8000 },
- { _MMIO(0x9888), 0x0458c000 },
- { _MMIO(0x9888), 0x025b8000 },
- { _MMIO(0x9888), 0x085b4000 },
- { _MMIO(0x9888), 0x0a5b4000 },
- { _MMIO(0x9888), 0x0c5b8000 },
- { _MMIO(0x9888), 0x0c1fa000 },
- { _MMIO(0x9888), 0x0e1f00aa },
- { _MMIO(0x9888), 0x02384000 },
- { _MMIO(0x9888), 0x04388000 },
- { _MMIO(0x9888), 0x06388000 },
- { _MMIO(0x9888), 0x08384000 },
- { _MMIO(0x9888), 0x0a384000 },
- { _MMIO(0x9888), 0x0c384000 },
- { _MMIO(0x9888), 0x00398000 },
- { _MMIO(0x9888), 0x0239a000 },
- { _MMIO(0x9888), 0x0439a000 },
- { _MMIO(0x9888), 0x06392000 },
- { _MMIO(0x9888), 0x043a8000 },
- { _MMIO(0x9888), 0x063a8000 },
- { _MMIO(0x9888), 0x08138000 },
- { _MMIO(0x9888), 0x0a138000 },
- { _MMIO(0x9888), 0x06143000 },
- { _MMIO(0x9888), 0x0415cfc7 },
- { _MMIO(0x9888), 0x10150000 },
- { _MMIO(0x9888), 0x02338000 },
- { _MMIO(0x9888), 0x0c338000 },
- { _MMIO(0x9888), 0x04342000 },
- { _MMIO(0x9888), 0x06344000 },
- { _MMIO(0x9888), 0x0035c700 },
- { _MMIO(0x9888), 0x063500cf },
- { _MMIO(0x9888), 0x10350000 },
- { _MMIO(0x9888), 0x04538000 },
- { _MMIO(0x9888), 0x06538000 },
- { _MMIO(0x9888), 0x0454c000 },
- { _MMIO(0x9888), 0x0255cfc7 },
- { _MMIO(0x9888), 0x10550000 },
- { _MMIO(0x9888), 0x06dc8000 },
- { _MMIO(0x9888), 0x08dc4000 },
- { _MMIO(0x9888), 0x0cdcc000 },
- { _MMIO(0x9888), 0x0edcc000 },
- { _MMIO(0x9888), 0x1abd00a8 },
- { _MMIO(0x9888), 0x0cd8c000 },
- { _MMIO(0x9888), 0x0ed84000 },
- { _MMIO(0x9888), 0x0edb8000 },
- { _MMIO(0x9888), 0x18db0800 },
- { _MMIO(0x9888), 0x1adb0254 },
- { _MMIO(0x9888), 0x0e9faa00 },
- { _MMIO(0x9888), 0x109f02aa },
- { _MMIO(0x9888), 0x0eb84000 },
- { _MMIO(0x9888), 0x16b84000 },
- { _MMIO(0x9888), 0x18b8156a },
- { _MMIO(0x9888), 0x06b98000 },
- { _MMIO(0x9888), 0x08b9a000 },
- { _MMIO(0x9888), 0x0ab9a000 },
- { _MMIO(0x9888), 0x0cb9a000 },
- { _MMIO(0x9888), 0x0eb9a000 },
- { _MMIO(0x9888), 0x18baa000 },
- { _MMIO(0x9888), 0x1aba0002 },
- { _MMIO(0x9888), 0x16934000 },
- { _MMIO(0x9888), 0x1893000a },
- { _MMIO(0x9888), 0x0a947000 },
- { _MMIO(0x9888), 0x0c95c5c1 },
- { _MMIO(0x9888), 0x0e9500c3 },
- { _MMIO(0x9888), 0x10950000 },
- { _MMIO(0x9888), 0x0eb38000 },
- { _MMIO(0x9888), 0x16b30040 },
- { _MMIO(0x9888), 0x18b30020 },
- { _MMIO(0x9888), 0x06b48000 },
- { _MMIO(0x9888), 0x08b41000 },
- { _MMIO(0x9888), 0x0ab48000 },
- { _MMIO(0x9888), 0x06b5c500 },
- { _MMIO(0x9888), 0x08b500c3 },
- { _MMIO(0x9888), 0x0eb5c100 },
- { _MMIO(0x9888), 0x10b50000 },
- { _MMIO(0x9888), 0x16d31500 },
- { _MMIO(0x9888), 0x08d4e000 },
- { _MMIO(0x9888), 0x08d5c100 },
- { _MMIO(0x9888), 0x0ad5c3c5 },
- { _MMIO(0x9888), 0x10d50000 },
- { _MMIO(0x9888), 0x0d88f800 },
- { _MMIO(0x9888), 0x0f88000f },
- { _MMIO(0x9888), 0x038a8000 },
- { _MMIO(0x9888), 0x058a8000 },
- { _MMIO(0x9888), 0x078a8000 },
- { _MMIO(0x9888), 0x098a8000 },
- { _MMIO(0x9888), 0x0b8a8000 },
- { _MMIO(0x9888), 0x0d8a8000 },
- { _MMIO(0x9888), 0x258baaa5 },
- { _MMIO(0x9888), 0x278b002a },
- { _MMIO(0x9888), 0x238b2a80 },
- { _MMIO(0x9888), 0x0f8c4000 },
- { _MMIO(0x9888), 0x178c2000 },
- { _MMIO(0x9888), 0x198c5500 },
- { _MMIO(0x9888), 0x1b8c0015 },
- { _MMIO(0x9888), 0x078d8000 },
- { _MMIO(0x9888), 0x098da000 },
- { _MMIO(0x9888), 0x0b8da000 },
- { _MMIO(0x9888), 0x0d8da000 },
- { _MMIO(0x9888), 0x0f8da000 },
- { _MMIO(0x9888), 0x2185aaaa },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0784c000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x1780c000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x43800c42 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45800063 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x47800800 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f8014a4 },
- { _MMIO(0x9888), 0x41801042 },
-};
-
-static int
-get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_1;
- lens[n] = ARRAY_SIZE(mux_config_tdl_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000fdff },
- { _MMIO(0x2778), 0x00000000 },
- { _MMIO(0x277c), 0x0000fe7f },
- { _MMIO(0x2780), 0x00000000 },
- { _MMIO(0x2784), 0x0000ff9f },
- { _MMIO(0x2788), 0x00000000 },
- { _MMIO(0x278c), 0x0000ffe7 },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000fffb },
- { _MMIO(0x2798), 0x00000002 },
- { _MMIO(0x279c), 0x0000fffd },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_2[] = {
- { _MMIO(0x9888), 0x16150000 },
- { _MMIO(0x9888), 0x16350000 },
- { _MMIO(0x9888), 0x16550000 },
- { _MMIO(0x9888), 0x16952e60 },
- { _MMIO(0x9888), 0x16b54d60 },
- { _MMIO(0x9888), 0x16d52e60 },
- { _MMIO(0x9888), 0x065c8000 },
- { _MMIO(0x9888), 0x085cc000 },
- { _MMIO(0x9888), 0x0a5cc000 },
- { _MMIO(0x9888), 0x0c5c4000 },
- { _MMIO(0x9888), 0x0e3d8000 },
- { _MMIO(0x9888), 0x183da000 },
- { _MMIO(0x9888), 0x06588000 },
- { _MMIO(0x9888), 0x08588000 },
- { _MMIO(0x9888), 0x0a584000 },
- { _MMIO(0x9888), 0x0e5b4000 },
- { _MMIO(0x9888), 0x185b5800 },
- { _MMIO(0x9888), 0x1a5b000a },
- { _MMIO(0x9888), 0x0e1faa00 },
- { _MMIO(0x9888), 0x101f02aa },
- { _MMIO(0x9888), 0x0e384000 },
- { _MMIO(0x9888), 0x16384000 },
- { _MMIO(0x9888), 0x18382a55 },
- { _MMIO(0x9888), 0x06398000 },
- { _MMIO(0x9888), 0x0839a000 },
- { _MMIO(0x9888), 0x0a39a000 },
- { _MMIO(0x9888), 0x0c39a000 },
- { _MMIO(0x9888), 0x0e39a000 },
- { _MMIO(0x9888), 0x1a3a02a0 },
- { _MMIO(0x9888), 0x0e138000 },
- { _MMIO(0x9888), 0x16130500 },
- { _MMIO(0x9888), 0x06148000 },
- { _MMIO(0x9888), 0x08146000 },
- { _MMIO(0x9888), 0x0615c100 },
- { _MMIO(0x9888), 0x0815c500 },
- { _MMIO(0x9888), 0x0a1500c3 },
- { _MMIO(0x9888), 0x10150000 },
- { _MMIO(0x9888), 0x16335040 },
- { _MMIO(0x9888), 0x08349000 },
- { _MMIO(0x9888), 0x0a341000 },
- { _MMIO(0x9888), 0x083500c1 },
- { _MMIO(0x9888), 0x0a35c500 },
- { _MMIO(0x9888), 0x0c3500c3 },
- { _MMIO(0x9888), 0x10350000 },
- { _MMIO(0x9888), 0x1853002a },
- { _MMIO(0x9888), 0x0a54e000 },
- { _MMIO(0x9888), 0x0c55c500 },
- { _MMIO(0x9888), 0x0e55c1c3 },
- { _MMIO(0x9888), 0x10550000 },
- { _MMIO(0x9888), 0x00dc8000 },
- { _MMIO(0x9888), 0x02dcc000 },
- { _MMIO(0x9888), 0x04dc4000 },
- { _MMIO(0x9888), 0x04bd8000 },
- { _MMIO(0x9888), 0x06bd8000 },
- { _MMIO(0x9888), 0x02d8c000 },
- { _MMIO(0x9888), 0x02db8000 },
- { _MMIO(0x9888), 0x04db4000 },
- { _MMIO(0x9888), 0x06db4000 },
- { _MMIO(0x9888), 0x08db8000 },
- { _MMIO(0x9888), 0x0c9fa000 },
- { _MMIO(0x9888), 0x0e9f00aa },
- { _MMIO(0x9888), 0x02b84000 },
- { _MMIO(0x9888), 0x04b84000 },
- { _MMIO(0x9888), 0x06b84000 },
- { _MMIO(0x9888), 0x08b84000 },
- { _MMIO(0x9888), 0x0ab88000 },
- { _MMIO(0x9888), 0x0cb88000 },
- { _MMIO(0x9888), 0x00b98000 },
- { _MMIO(0x9888), 0x02b9a000 },
- { _MMIO(0x9888), 0x04b9a000 },
- { _MMIO(0x9888), 0x06b92000 },
- { _MMIO(0x9888), 0x0aba8000 },
- { _MMIO(0x9888), 0x0cba8000 },
- { _MMIO(0x9888), 0x04938000 },
- { _MMIO(0x9888), 0x06938000 },
- { _MMIO(0x9888), 0x0494c000 },
- { _MMIO(0x9888), 0x0295cfc7 },
- { _MMIO(0x9888), 0x10950000 },
- { _MMIO(0x9888), 0x02b38000 },
- { _MMIO(0x9888), 0x08b38000 },
- { _MMIO(0x9888), 0x04b42000 },
- { _MMIO(0x9888), 0x06b41000 },
- { _MMIO(0x9888), 0x00b5c700 },
- { _MMIO(0x9888), 0x04b500cf },
- { _MMIO(0x9888), 0x10b50000 },
- { _MMIO(0x9888), 0x0ad38000 },
- { _MMIO(0x9888), 0x0cd38000 },
- { _MMIO(0x9888), 0x06d46000 },
- { _MMIO(0x9888), 0x04d5c700 },
- { _MMIO(0x9888), 0x06d500cf },
- { _MMIO(0x9888), 0x10d50000 },
- { _MMIO(0x9888), 0x03888000 },
- { _MMIO(0x9888), 0x05888000 },
- { _MMIO(0x9888), 0x07888000 },
- { _MMIO(0x9888), 0x09888000 },
- { _MMIO(0x9888), 0x0b888000 },
- { _MMIO(0x9888), 0x0d880400 },
- { _MMIO(0x9888), 0x0f8a8000 },
- { _MMIO(0x9888), 0x198a8000 },
- { _MMIO(0x9888), 0x1b8aaaa0 },
- { _MMIO(0x9888), 0x1d8a0002 },
- { _MMIO(0x9888), 0x258b555a },
- { _MMIO(0x9888), 0x278b0015 },
- { _MMIO(0x9888), 0x238b5500 },
- { _MMIO(0x9888), 0x038c4000 },
- { _MMIO(0x9888), 0x058c4000 },
- { _MMIO(0x9888), 0x078c4000 },
- { _MMIO(0x9888), 0x098c4000 },
- { _MMIO(0x9888), 0x0b8c4000 },
- { _MMIO(0x9888), 0x0d8c4000 },
- { _MMIO(0x9888), 0x018d8000 },
- { _MMIO(0x9888), 0x038da000 },
- { _MMIO(0x9888), 0x058da000 },
- { _MMIO(0x9888), 0x078d2000 },
- { _MMIO(0x9888), 0x2185aaaa },
- { _MMIO(0x9888), 0x2385002a },
- { _MMIO(0x9888), 0x1f85aa00 },
- { _MMIO(0x9888), 0x0f834000 },
- { _MMIO(0x9888), 0x19835400 },
- { _MMIO(0x9888), 0x1b830155 },
- { _MMIO(0x9888), 0x03834000 },
- { _MMIO(0x9888), 0x05834000 },
- { _MMIO(0x9888), 0x07834000 },
- { _MMIO(0x9888), 0x09834000 },
- { _MMIO(0x9888), 0x0b834000 },
- { _MMIO(0x9888), 0x0d834000 },
- { _MMIO(0x9888), 0x0784c000 },
- { _MMIO(0x9888), 0x0984c000 },
- { _MMIO(0x9888), 0x0b84c000 },
- { _MMIO(0x9888), 0x0d84c000 },
- { _MMIO(0x9888), 0x0f84c000 },
- { _MMIO(0x9888), 0x01848000 },
- { _MMIO(0x9888), 0x0384c000 },
- { _MMIO(0x9888), 0x0584c000 },
- { _MMIO(0x9888), 0x1780c000 },
- { _MMIO(0x9888), 0x1980c000 },
- { _MMIO(0x9888), 0x1b80c000 },
- { _MMIO(0x9888), 0x1d80c000 },
- { _MMIO(0x9888), 0x1f80c000 },
- { _MMIO(0x9888), 0x11808000 },
- { _MMIO(0x9888), 0x1380c000 },
- { _MMIO(0x9888), 0x1580c000 },
- { _MMIO(0x9888), 0x4f800000 },
- { _MMIO(0x9888), 0x43800882 },
- { _MMIO(0x9888), 0x51800000 },
- { _MMIO(0x9888), 0x45801082 },
- { _MMIO(0x9888), 0x53800000 },
- { _MMIO(0x9888), 0x478014a5 },
- { _MMIO(0x9888), 0x21800000 },
- { _MMIO(0x9888), 0x31800000 },
- { _MMIO(0x9888), 0x4d800000 },
- { _MMIO(0x9888), 0x3f800002 },
- { _MMIO(0x9888), 0x41800c62 },
-};
-
-static int
-get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_2;
- lens[n] = ARRAY_SIZE(mux_config_tdl_2);
- n++;
-
- return n;
-}
-
static const struct i915_oa_reg b_counter_config_test_oa[] = {
{ _MMIO(0x2740), 0x00000000 },
{ _MMIO(0x2744), 0x00800000 },
@@ -1997,6 +60,7 @@ static const struct i915_oa_reg flex_eu_config_test_oa[] = {
};
static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x9840), 0x000000a0 },
{ _MMIO(0x9888), 0x59800000 },
{ _MMIO(0x9888), 0x59800001 },
{ _MMIO(0x9888), 0x338b0000 },
@@ -2008,866 +72,38 @@ static const struct i915_oa_reg mux_config_test_oa[] = {
{ _MMIO(0x9888), 0x57800000 },
{ _MMIO(0x1823a4), 0x00000000 },
{ _MMIO(0x9888), 0x59800000 },
-};
-
-static int
-get_test_oa_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_test_oa;
- lens[n] = ARRAY_SIZE(mux_config_test_oa);
- n++;
-
- return n;
-}
-
-int i915_oa_select_metric_set_chv(struct drm_i915_private *dev_priv)
-{
- dev_priv->perf.oa.n_mux_configs = 0;
- dev_priv->perf.oa.b_counter_regs = NULL;
- dev_priv->perf.oa.b_counter_regs_len = 0;
- dev_priv->perf.oa.flex_regs = NULL;
- dev_priv->perf.oa.flex_regs_len = 0;
-
- switch (dev_priv->perf.oa.metrics_set) {
- case METRIC_SET_ID_RENDER_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_render_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_basic);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_basic);
-
- return 0;
- case METRIC_SET_ID_RENDER_PIPE_PROFILE:
- dev_priv->perf.oa.n_mux_configs =
- get_render_pipe_profile_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_pipe_profile;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_pipe_profile);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_pipe_profile;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_pipe_profile);
-
- return 0;
- case METRIC_SET_ID_HDC_AND_SF:
- dev_priv->perf.oa.n_mux_configs =
- get_hdc_and_sf_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_hdc_and_sf;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_hdc_and_sf);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_hdc_and_sf;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_hdc_and_sf);
-
- return 0;
- case METRIC_SET_ID_L3_1:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_1);
-
- return 0;
- case METRIC_SET_ID_L3_2:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_2);
-
- return 0;
- case METRIC_SET_ID_L3_3:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_3_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_3;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_3);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_3;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_3);
-
- return 0;
- case METRIC_SET_ID_L3_4:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_4_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_4\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_4;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_4);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_4;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_4);
-
- return 0;
- case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
- dev_priv->perf.oa.n_mux_configs =
- get_rasterizer_and_pixel_backend_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
-
- return 0;
- case METRIC_SET_ID_SAMPLER_1:
- dev_priv->perf.oa.n_mux_configs =
- get_sampler_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_sampler_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_sampler_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_sampler_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_sampler_1);
-
- return 0;
- case METRIC_SET_ID_SAMPLER_2:
- dev_priv->perf.oa.n_mux_configs =
- get_sampler_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_sampler_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_sampler_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_sampler_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_sampler_2);
-
- return 0;
- case METRIC_SET_ID_TDL_1:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_1);
-
- return 0;
- case METRIC_SET_ID_TDL_2:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_2);
-
- return 0;
- case METRIC_SET_ID_TEST_OA:
- dev_priv->perf.oa.n_mux_configs =
- get_test_oa_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_test_oa;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_test_oa;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_test_oa);
-
- return 0;
- default:
- return -ENODEV;
- }
-}
-
-static ssize_t
-show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
-}
-
-static struct device_attribute dev_attr_render_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_basic[] = {
- &dev_attr_render_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_basic = {
- .name = "9d8a3af5-c02c-4a4a-b947-f1672469e0fb",
- .attrs = attrs_render_basic,
-};
-
-static ssize_t
-show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
-}
-
-static struct device_attribute dev_attr_compute_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_basic[] = {
- &dev_attr_compute_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_basic = {
- .name = "f522a89c-ecd1-4522-8331-3383c54af5f5",
- .attrs = attrs_compute_basic,
-};
-
-static ssize_t
-show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
-}
-
-static struct device_attribute dev_attr_render_pipe_profile_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_pipe_profile_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_pipe_profile[] = {
- &dev_attr_render_pipe_profile_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_pipe_profile = {
- .name = "a9ccc03d-a943-4e6b-9cd6-13e063075927",
- .attrs = attrs_render_pipe_profile,
-};
-
-static ssize_t
-show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
-}
-
-static struct device_attribute dev_attr_hdc_and_sf_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_hdc_and_sf_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_hdc_and_sf[] = {
- &dev_attr_hdc_and_sf_id.attr,
- NULL,
-};
-
-static struct attribute_group group_hdc_and_sf = {
- .name = "2cf0c064-68df-4fac-9b3f-57f51ca8a069",
- .attrs = attrs_hdc_and_sf,
-};
-
-static ssize_t
-show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
-}
-
-static struct device_attribute dev_attr_l3_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_1[] = {
- &dev_attr_l3_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_1 = {
- .name = "78a87ff9-543a-49ce-95ea-26d86071ea93",
- .attrs = attrs_l3_1,
-};
-
-static ssize_t
-show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
-}
-
-static struct device_attribute dev_attr_l3_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_2[] = {
- &dev_attr_l3_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_2 = {
- .name = "9f2cece5-7bfe-4320-ad66-8c7cc526bec5",
- .attrs = attrs_l3_2,
-};
-
-static ssize_t
-show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
-}
-
-static struct device_attribute dev_attr_l3_3_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_3_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_3[] = {
- &dev_attr_l3_3_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_3 = {
- .name = "d890ef38-d309-47e4-b8b5-aa779bb19ab0",
- .attrs = attrs_l3_3,
-};
-
-static ssize_t
-show_l3_4_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_4);
-}
-
-static struct device_attribute dev_attr_l3_4_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_4_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_4[] = {
- &dev_attr_l3_4_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_4 = {
- .name = "5fdff4a6-9dc8-45e1-bfda-ef54869fbdd4",
- .attrs = attrs_l3_4,
-};
-
-static ssize_t
-show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
-}
-
-static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_rasterizer_and_pixel_backend_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
- &dev_attr_rasterizer_and_pixel_backend_id.attr,
- NULL,
-};
-
-static struct attribute_group group_rasterizer_and_pixel_backend = {
- .name = "2c0e45e1-7e2c-4a14-ae00-0b7ec868b8aa",
- .attrs = attrs_rasterizer_and_pixel_backend,
-};
-
-static ssize_t
-show_sampler_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_1);
-}
-
-static struct device_attribute dev_attr_sampler_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_sampler_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_sampler_1[] = {
- &dev_attr_sampler_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_sampler_1 = {
- .name = "71148d78-baf5-474f-878a-e23158d0265d",
- .attrs = attrs_sampler_1,
-};
-
-static ssize_t
-show_sampler_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_2);
-}
-
-static struct device_attribute dev_attr_sampler_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_sampler_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_sampler_2[] = {
- &dev_attr_sampler_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_sampler_2 = {
- .name = "b996a2b7-c59c-492d-877a-8cd54fd6df84",
- .attrs = attrs_sampler_2,
-};
-
-static ssize_t
-show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
-}
-
-static struct device_attribute dev_attr_tdl_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_1[] = {
- &dev_attr_tdl_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_1 = {
- .name = "eb2fecba-b431-42e7-8261-fe9429a6e67a",
- .attrs = attrs_tdl_1,
-};
-
-static ssize_t
-show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
-}
-
-static struct device_attribute dev_attr_tdl_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_2[] = {
- &dev_attr_tdl_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_2 = {
- .name = "60749470-a648-4a4b-9f10-dbfe1e36e44d",
- .attrs = attrs_tdl_2,
+ { _MMIO(0x9840), 0x00000080 },
};
static ssize_t
show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
+ return sprintf(buf, "1\n");
}
-static struct device_attribute dev_attr_test_oa_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_test_oa_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_test_oa[] = {
- &dev_attr_test_oa_id.attr,
- NULL,
-};
-
-static struct attribute_group group_test_oa = {
- .name = "4a534b07-cba3-414d-8d60-874830e883aa",
- .attrs = attrs_test_oa,
-};
-
-int
-i915_perf_register_sysfs_chv(struct drm_i915_private *dev_priv)
+void
+i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv)
{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
- int ret = 0;
+ strncpy(dev_priv->perf.oa.test_config.uuid,
+ "4a534b07-cba3-414d-8d60-874830e883aa",
+ UUID_STRING_LEN);
+ dev_priv->perf.oa.test_config.id = 1;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (ret)
- goto error_render_basic;
- }
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (ret)
- goto error_compute_basic;
- }
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (ret)
- goto error_render_pipe_profile;
- }
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (ret)
- goto error_hdc_and_sf;
- }
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (ret)
- goto error_l3_1;
- }
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
- if (ret)
- goto error_l3_2;
- }
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
- if (ret)
- goto error_l3_3;
- }
- if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_4);
- if (ret)
- goto error_l3_4;
- }
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (ret)
- goto error_rasterizer_and_pixel_backend;
- }
- if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
- if (ret)
- goto error_sampler_1;
- }
- if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
- if (ret)
- goto error_sampler_2;
- }
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (ret)
- goto error_tdl_1;
- }
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (ret)
- goto error_tdl_2;
- }
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
- if (ret)
- goto error_test_oa;
- }
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- return 0;
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-error_test_oa:
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
-error_tdl_2:
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
-error_tdl_1:
- if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
-error_sampler_2:
- if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
-error_sampler_1:
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
-error_rasterizer_and_pixel_backend:
- if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_4);
-error_l3_4:
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
-error_l3_3:
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
-error_l3_2:
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
-error_l3_1:
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
-error_hdc_and_sf:
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
-error_render_pipe_profile:
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
-error_compute_basic:
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
-error_render_basic:
- return ret;
-}
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-void
-i915_perf_unregister_sysfs_chv(struct drm_i915_private *dev_priv)
-{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
- if (get_l3_4_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_4);
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (get_sampler_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_1);
- if (get_sampler_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_2);
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/i915_oa_chv.h
index 8b8bdc26d726..b9622496979e 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.h
+++ b/drivers/gpu/drm/i915/i915_oa_chv.h
@@ -29,12 +29,6 @@
#ifndef __I915_OA_CHV_H__
#define __I915_OA_CHV_H__
-extern int i915_oa_n_builtin_metric_sets_chv;
-
-extern int i915_oa_select_metric_set_chv(struct drm_i915_private *dev_priv);
-
-extern int i915_perf_register_sysfs_chv(struct drm_i915_private *dev_priv);
-
-extern void i915_perf_unregister_sysfs_chv(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/i915_oa_glk.c
index 2f356d51bff8..4ee527e4c926 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.c
+++ b/drivers/gpu/drm/i915/i915_oa_glk.c
@@ -31,1614 +31,6 @@
#include "i915_drv.h"
#include "i915_oa_glk.h"
-enum metric_set_id {
- METRIC_SET_ID_RENDER_BASIC = 1,
- METRIC_SET_ID_COMPUTE_BASIC,
- METRIC_SET_ID_RENDER_PIPE_PROFILE,
- METRIC_SET_ID_MEMORY_READS,
- METRIC_SET_ID_MEMORY_WRITES,
- METRIC_SET_ID_COMPUTE_EXTENDED,
- METRIC_SET_ID_COMPUTE_L3_CACHE,
- METRIC_SET_ID_HDC_AND_SF,
- METRIC_SET_ID_L3_1,
- METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
- METRIC_SET_ID_SAMPLER,
- METRIC_SET_ID_TDL_1,
- METRIC_SET_ID_TDL_2,
- METRIC_SET_ID_COMPUTE_EXTRA,
- METRIC_SET_ID_TEST_OA,
-};
-
-int i915_oa_n_builtin_metric_sets_glk = 15;
-
-static const struct i915_oa_reg b_counter_config_render_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_render_basic[] = {
- { _MMIO(0x9888), 0x166c00f0 },
- { _MMIO(0x9888), 0x12120280 },
- { _MMIO(0x9888), 0x12320280 },
- { _MMIO(0x9888), 0x11930317 },
- { _MMIO(0x9888), 0x159303df },
- { _MMIO(0x9888), 0x3f900c00 },
- { _MMIO(0x9888), 0x419000a0 },
- { _MMIO(0x9888), 0x002d1000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x082d5000 },
- { _MMIO(0x9888), 0x0a2d1000 },
- { _MMIO(0x9888), 0x0c2e0800 },
- { _MMIO(0x9888), 0x0e2e5900 },
- { _MMIO(0x9888), 0x0a4c8000 },
- { _MMIO(0x9888), 0x0c4c8000 },
- { _MMIO(0x9888), 0x0e4c4000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e2000 },
- { _MMIO(0x9888), 0x1c4f0010 },
- { _MMIO(0x9888), 0x0a6c0053 },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1a0fcc00 },
- { _MMIO(0x9888), 0x1c0f0002 },
- { _MMIO(0x9888), 0x1c2c0040 },
- { _MMIO(0x9888), 0x00101000 },
- { _MMIO(0x9888), 0x04101000 },
- { _MMIO(0x9888), 0x00114000 },
- { _MMIO(0x9888), 0x08114000 },
- { _MMIO(0x9888), 0x00120020 },
- { _MMIO(0x9888), 0x08120021 },
- { _MMIO(0x9888), 0x00141000 },
- { _MMIO(0x9888), 0x08141000 },
- { _MMIO(0x9888), 0x02308000 },
- { _MMIO(0x9888), 0x04302000 },
- { _MMIO(0x9888), 0x06318000 },
- { _MMIO(0x9888), 0x08318000 },
- { _MMIO(0x9888), 0x06320800 },
- { _MMIO(0x9888), 0x08320840 },
- { _MMIO(0x9888), 0x00320000 },
- { _MMIO(0x9888), 0x06344000 },
- { _MMIO(0x9888), 0x08344000 },
- { _MMIO(0x9888), 0x0d931831 },
- { _MMIO(0x9888), 0x0f939f3f },
- { _MMIO(0x9888), 0x01939e80 },
- { _MMIO(0x9888), 0x039303bc },
- { _MMIO(0x9888), 0x0593000e },
- { _MMIO(0x9888), 0x1993002a },
- { _MMIO(0x9888), 0x07930000 },
- { _MMIO(0x9888), 0x09930000 },
- { _MMIO(0x9888), 0x1d900177 },
- { _MMIO(0x9888), 0x1f900187 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x13904000 },
- { _MMIO(0x9888), 0x21904000 },
- { _MMIO(0x9888), 0x23904000 },
- { _MMIO(0x9888), 0x25904000 },
- { _MMIO(0x9888), 0x27904000 },
- { _MMIO(0x9888), 0x2b904000 },
- { _MMIO(0x9888), 0x2d904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x53901110 },
- { _MMIO(0x9888), 0x43900423 },
- { _MMIO(0x9888), 0x55900111 },
- { _MMIO(0x9888), 0x47900c02 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900020 },
- { _MMIO(0x9888), 0x59901111 },
- { _MMIO(0x9888), 0x4b900421 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900001 },
- { _MMIO(0x9888), 0x45900821 },
-};
-
-static int
-get_render_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_render_basic;
- lens[n] = ARRAY_SIZE(mux_config_render_basic);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_basic[] = {
- { _MMIO(0x9888), 0x104f00e0 },
- { _MMIO(0x9888), 0x124f1c00 },
- { _MMIO(0x9888), 0x39900340 },
- { _MMIO(0x9888), 0x3f900c00 },
- { _MMIO(0x9888), 0x41900000 },
- { _MMIO(0x9888), 0x002d5000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x082d4000 },
- { _MMIO(0x9888), 0x0a2d1000 },
- { _MMIO(0x9888), 0x0c2d5000 },
- { _MMIO(0x9888), 0x0e2d4000 },
- { _MMIO(0x9888), 0x0c2e1400 },
- { _MMIO(0x9888), 0x0e2e5100 },
- { _MMIO(0x9888), 0x102e0114 },
- { _MMIO(0x9888), 0x044cc000 },
- { _MMIO(0x9888), 0x0a4c8000 },
- { _MMIO(0x9888), 0x0c4c8000 },
- { _MMIO(0x9888), 0x0e4c4000 },
- { _MMIO(0x9888), 0x104c8000 },
- { _MMIO(0x9888), 0x124c8000 },
- { _MMIO(0x9888), 0x164c2000 },
- { _MMIO(0x9888), 0x004ea000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e2000 },
- { _MMIO(0x9888), 0x0c4ea000 },
- { _MMIO(0x9888), 0x0e4e8000 },
- { _MMIO(0x9888), 0x004f6b42 },
- { _MMIO(0x9888), 0x064f6200 },
- { _MMIO(0x9888), 0x084f4100 },
- { _MMIO(0x9888), 0x0a4f0061 },
- { _MMIO(0x9888), 0x0c4f6c4c },
- { _MMIO(0x9888), 0x0e4f4b00 },
- { _MMIO(0x9888), 0x1a4f0000 },
- { _MMIO(0x9888), 0x1c4f0000 },
- { _MMIO(0x9888), 0x180f5000 },
- { _MMIO(0x9888), 0x1a0f8800 },
- { _MMIO(0x9888), 0x1c0f08a2 },
- { _MMIO(0x9888), 0x182c4000 },
- { _MMIO(0x9888), 0x1c2c1451 },
- { _MMIO(0x9888), 0x1e2c0001 },
- { _MMIO(0x9888), 0x1a2c0010 },
- { _MMIO(0x9888), 0x01938000 },
- { _MMIO(0x9888), 0x0f938000 },
- { _MMIO(0x9888), 0x19938a28 },
- { _MMIO(0x9888), 0x03938000 },
- { _MMIO(0x9888), 0x19900177 },
- { _MMIO(0x9888), 0x1b900178 },
- { _MMIO(0x9888), 0x1d900125 },
- { _MMIO(0x9888), 0x1f900123 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x13904000 },
- { _MMIO(0x9888), 0x21904000 },
- { _MMIO(0x9888), 0x25904000 },
- { _MMIO(0x9888), 0x27904000 },
- { _MMIO(0x9888), 0x2b904000 },
- { _MMIO(0x9888), 0x2d904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x53901000 },
- { _MMIO(0x9888), 0x43900000 },
- { _MMIO(0x9888), 0x55900111 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900000 },
- { _MMIO(0x9888), 0x45900000 },
-};
-
-static int
-get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_basic;
- lens[n] = ARRAY_SIZE(mux_config_compute_basic);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007ffea },
- { _MMIO(0x2774), 0x00007ffc },
- { _MMIO(0x2778), 0x0007affa },
- { _MMIO(0x277c), 0x0000f5fd },
- { _MMIO(0x2780), 0x00079ffa },
- { _MMIO(0x2784), 0x0000f3fb },
- { _MMIO(0x2788), 0x0007bf7a },
- { _MMIO(0x278c), 0x0000f7e7 },
- { _MMIO(0x2790), 0x0007fefa },
- { _MMIO(0x2794), 0x0000f7cf },
- { _MMIO(0x2798), 0x00077ffa },
- { _MMIO(0x279c), 0x0000efdf },
- { _MMIO(0x27a0), 0x0006fffa },
- { _MMIO(0x27a4), 0x0000cfbf },
- { _MMIO(0x27a8), 0x0003fffa },
- { _MMIO(0x27ac), 0x00005f7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
- { _MMIO(0x9888), 0x0c2e001f },
- { _MMIO(0x9888), 0x0a2f0000 },
- { _MMIO(0x9888), 0x10186800 },
- { _MMIO(0x9888), 0x11810019 },
- { _MMIO(0x9888), 0x15810013 },
- { _MMIO(0x9888), 0x13820020 },
- { _MMIO(0x9888), 0x11830020 },
- { _MMIO(0x9888), 0x17840000 },
- { _MMIO(0x9888), 0x11860007 },
- { _MMIO(0x9888), 0x21860000 },
- { _MMIO(0x9888), 0x178703e0 },
- { _MMIO(0x9888), 0x0c2d8000 },
- { _MMIO(0x9888), 0x042d4000 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x022e5400 },
- { _MMIO(0x9888), 0x002e0000 },
- { _MMIO(0x9888), 0x0e2e0080 },
- { _MMIO(0x9888), 0x082f0040 },
- { _MMIO(0x9888), 0x002f0000 },
- { _MMIO(0x9888), 0x06143000 },
- { _MMIO(0x9888), 0x06174000 },
- { _MMIO(0x9888), 0x06180012 },
- { _MMIO(0x9888), 0x00180000 },
- { _MMIO(0x9888), 0x0d804000 },
- { _MMIO(0x9888), 0x0f804000 },
- { _MMIO(0x9888), 0x05804000 },
- { _MMIO(0x9888), 0x09810200 },
- { _MMIO(0x9888), 0x0b810030 },
- { _MMIO(0x9888), 0x03810003 },
- { _MMIO(0x9888), 0x21819140 },
- { _MMIO(0x9888), 0x23819050 },
- { _MMIO(0x9888), 0x25810018 },
- { _MMIO(0x9888), 0x0b820980 },
- { _MMIO(0x9888), 0x03820d80 },
- { _MMIO(0x9888), 0x11820000 },
- { _MMIO(0x9888), 0x0182c000 },
- { _MMIO(0x9888), 0x07828000 },
- { _MMIO(0x9888), 0x09824000 },
- { _MMIO(0x9888), 0x0f828000 },
- { _MMIO(0x9888), 0x0d830004 },
- { _MMIO(0x9888), 0x0583000c },
- { _MMIO(0x9888), 0x0f831000 },
- { _MMIO(0x9888), 0x01848072 },
- { _MMIO(0x9888), 0x11840000 },
- { _MMIO(0x9888), 0x07848000 },
- { _MMIO(0x9888), 0x09844000 },
- { _MMIO(0x9888), 0x0f848000 },
- { _MMIO(0x9888), 0x07860000 },
- { _MMIO(0x9888), 0x09860092 },
- { _MMIO(0x9888), 0x0f860400 },
- { _MMIO(0x9888), 0x01869100 },
- { _MMIO(0x9888), 0x0f870065 },
- { _MMIO(0x9888), 0x01870000 },
- { _MMIO(0x9888), 0x19930800 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x1b952000 },
- { _MMIO(0x9888), 0x1d955055 },
- { _MMIO(0x9888), 0x1f951455 },
- { _MMIO(0x9888), 0x0992a000 },
- { _MMIO(0x9888), 0x0f928000 },
- { _MMIO(0x9888), 0x1192a800 },
- { _MMIO(0x9888), 0x1392028a },
- { _MMIO(0x9888), 0x0b92a000 },
- { _MMIO(0x9888), 0x0d922000 },
- { _MMIO(0x9888), 0x13908000 },
- { _MMIO(0x9888), 0x21908000 },
- { _MMIO(0x9888), 0x23908000 },
- { _MMIO(0x9888), 0x25908000 },
- { _MMIO(0x9888), 0x27908000 },
- { _MMIO(0x9888), 0x29908000 },
- { _MMIO(0x9888), 0x2b908000 },
- { _MMIO(0x9888), 0x2d904000 },
- { _MMIO(0x9888), 0x2f908000 },
- { _MMIO(0x9888), 0x31908000 },
- { _MMIO(0x9888), 0x15908000 },
- { _MMIO(0x9888), 0x17908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43900c01 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900863 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b900061 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900000 },
- { _MMIO(0x9888), 0x45900c22 },
-};
-
-static int
-get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_render_pipe_profile;
- lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_reads[] = {
- { _MMIO(0x272c), 0xffffffff },
- { _MMIO(0x2728), 0xffffffff },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x271c), 0xffffffff },
- { _MMIO(0x2718), 0xffffffff },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f872 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_reads[] = {
- { _MMIO(0x9888), 0x19800343 },
- { _MMIO(0x9888), 0x39900340 },
- { _MMIO(0x9888), 0x3f901000 },
- { _MMIO(0x9888), 0x41900003 },
- { _MMIO(0x9888), 0x03803180 },
- { _MMIO(0x9888), 0x058035e2 },
- { _MMIO(0x9888), 0x0780006a },
- { _MMIO(0x9888), 0x11800000 },
- { _MMIO(0x9888), 0x2181a000 },
- { _MMIO(0x9888), 0x2381000a },
- { _MMIO(0x9888), 0x1d950550 },
- { _MMIO(0x9888), 0x0b928000 },
- { _MMIO(0x9888), 0x0d92a000 },
- { _MMIO(0x9888), 0x0f922000 },
- { _MMIO(0x9888), 0x13900170 },
- { _MMIO(0x9888), 0x21900171 },
- { _MMIO(0x9888), 0x23900172 },
- { _MMIO(0x9888), 0x25900173 },
- { _MMIO(0x9888), 0x27900174 },
- { _MMIO(0x9888), 0x29900175 },
- { _MMIO(0x9888), 0x2b900176 },
- { _MMIO(0x9888), 0x2d900177 },
- { _MMIO(0x9888), 0x2f90017f },
- { _MMIO(0x9888), 0x31900125 },
- { _MMIO(0x9888), 0x15900123 },
- { _MMIO(0x9888), 0x17900121 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43901084 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47901080 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49901084 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b901084 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900004 },
- { _MMIO(0x9888), 0x45900000 },
-};
-
-static int
-get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_reads;
- lens[n] = ARRAY_SIZE(mux_config_memory_reads);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_writes[] = {
- { _MMIO(0x272c), 0xffffffff },
- { _MMIO(0x2728), 0xffffffff },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x271c), 0xffffffff },
- { _MMIO(0x2718), 0xffffffff },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f822 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_writes[] = {
- { _MMIO(0x9888), 0x19800343 },
- { _MMIO(0x9888), 0x39900340 },
- { _MMIO(0x9888), 0x3f900000 },
- { _MMIO(0x9888), 0x41900080 },
- { _MMIO(0x9888), 0x03803180 },
- { _MMIO(0x9888), 0x058035e2 },
- { _MMIO(0x9888), 0x0780006a },
- { _MMIO(0x9888), 0x11800000 },
- { _MMIO(0x9888), 0x2181a000 },
- { _MMIO(0x9888), 0x2381000a },
- { _MMIO(0x9888), 0x1d950550 },
- { _MMIO(0x9888), 0x0b928000 },
- { _MMIO(0x9888), 0x0d92a000 },
- { _MMIO(0x9888), 0x0f922000 },
- { _MMIO(0x9888), 0x13900180 },
- { _MMIO(0x9888), 0x21900181 },
- { _MMIO(0x9888), 0x23900182 },
- { _MMIO(0x9888), 0x25900183 },
- { _MMIO(0x9888), 0x27900184 },
- { _MMIO(0x9888), 0x29900185 },
- { _MMIO(0x9888), 0x2b900186 },
- { _MMIO(0x9888), 0x2d900187 },
- { _MMIO(0x9888), 0x2f900170 },
- { _MMIO(0x9888), 0x31900125 },
- { _MMIO(0x9888), 0x15900123 },
- { _MMIO(0x9888), 0x17900121 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43901084 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47901080 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49901084 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b901084 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900004 },
- { _MMIO(0x9888), 0x45900000 },
-};
-
-static int
-get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_writes;
- lens[n] = ARRAY_SIZE(mux_config_memory_writes);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extended[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fc2a },
- { _MMIO(0x2774), 0x0000bf00 },
- { _MMIO(0x2778), 0x0007fc6a },
- { _MMIO(0x277c), 0x0000bf00 },
- { _MMIO(0x2780), 0x0007fc92 },
- { _MMIO(0x2784), 0x0000bf00 },
- { _MMIO(0x2788), 0x0007fca2 },
- { _MMIO(0x278c), 0x0000bf00 },
- { _MMIO(0x2790), 0x0007fc32 },
- { _MMIO(0x2794), 0x0000bf00 },
- { _MMIO(0x2798), 0x0007fc9a },
- { _MMIO(0x279c), 0x0000bf00 },
- { _MMIO(0x27a0), 0x0007fe6a },
- { _MMIO(0x27a4), 0x0000bf00 },
- { _MMIO(0x27a8), 0x0007fe7a },
- { _MMIO(0x27ac), 0x0000bf00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extended[] = {
- { _MMIO(0x9888), 0x104f00e0 },
- { _MMIO(0x9888), 0x141c0160 },
- { _MMIO(0x9888), 0x161c0015 },
- { _MMIO(0x9888), 0x181c0120 },
- { _MMIO(0x9888), 0x002d5000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x082d5000 },
- { _MMIO(0x9888), 0x0a2d5000 },
- { _MMIO(0x9888), 0x0c2d5000 },
- { _MMIO(0x9888), 0x0e2d5000 },
- { _MMIO(0x9888), 0x022d5000 },
- { _MMIO(0x9888), 0x042d5000 },
- { _MMIO(0x9888), 0x0c2e5400 },
- { _MMIO(0x9888), 0x0e2e5515 },
- { _MMIO(0x9888), 0x102e0155 },
- { _MMIO(0x9888), 0x044cc000 },
- { _MMIO(0x9888), 0x0a4c8000 },
- { _MMIO(0x9888), 0x0c4cc000 },
- { _MMIO(0x9888), 0x0e4cc000 },
- { _MMIO(0x9888), 0x104c8000 },
- { _MMIO(0x9888), 0x124c8000 },
- { _MMIO(0x9888), 0x144c8000 },
- { _MMIO(0x9888), 0x164c2000 },
- { _MMIO(0x9888), 0x064cc000 },
- { _MMIO(0x9888), 0x084cc000 },
- { _MMIO(0x9888), 0x004ea000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084ea000 },
- { _MMIO(0x9888), 0x0a4ea000 },
- { _MMIO(0x9888), 0x0c4ea000 },
- { _MMIO(0x9888), 0x0e4ea000 },
- { _MMIO(0x9888), 0x024ea000 },
- { _MMIO(0x9888), 0x044ea000 },
- { _MMIO(0x9888), 0x0e4f4b41 },
- { _MMIO(0x9888), 0x004f4200 },
- { _MMIO(0x9888), 0x024f404c },
- { _MMIO(0x9888), 0x1c4f0000 },
- { _MMIO(0x9888), 0x1a4f0000 },
- { _MMIO(0x9888), 0x001b4000 },
- { _MMIO(0x9888), 0x061b8000 },
- { _MMIO(0x9888), 0x081bc000 },
- { _MMIO(0x9888), 0x0a1bc000 },
- { _MMIO(0x9888), 0x0c1bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x001c0031 },
- { _MMIO(0x9888), 0x061c1900 },
- { _MMIO(0x9888), 0x081c1a33 },
- { _MMIO(0x9888), 0x0a1c1b35 },
- { _MMIO(0x9888), 0x0c1c3337 },
- { _MMIO(0x9888), 0x041c31c7 },
- { _MMIO(0x9888), 0x180f5000 },
- { _MMIO(0x9888), 0x1a0fa8aa },
- { _MMIO(0x9888), 0x1c0f0aaa },
- { _MMIO(0x9888), 0x182c8000 },
- { _MMIO(0x9888), 0x1c2c6aaa },
- { _MMIO(0x9888), 0x1e2c0001 },
- { _MMIO(0x9888), 0x1a2c2950 },
- { _MMIO(0x9888), 0x01938000 },
- { _MMIO(0x9888), 0x0f938000 },
- { _MMIO(0x9888), 0x1993aaaa },
- { _MMIO(0x9888), 0x03938000 },
- { _MMIO(0x9888), 0x05938000 },
- { _MMIO(0x9888), 0x07938000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x13904000 },
- { _MMIO(0x9888), 0x21904000 },
- { _MMIO(0x9888), 0x23904000 },
- { _MMIO(0x9888), 0x25904000 },
- { _MMIO(0x9888), 0x27904000 },
- { _MMIO(0x9888), 0x29904000 },
- { _MMIO(0x9888), 0x2b904000 },
- { _MMIO(0x9888), 0x2d904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43900420 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b900400 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900001 },
- { _MMIO(0x9888), 0x45900001 },
-};
-
-static int
-get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_extended;
- lens[n] = ARRAY_SIZE(mux_config_compute_extended);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x30800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fffa },
- { _MMIO(0x2774), 0x0000fefe },
- { _MMIO(0x2778), 0x0007fffa },
- { _MMIO(0x277c), 0x0000fefd },
- { _MMIO(0x2790), 0x0007fffa },
- { _MMIO(0x2794), 0x0000fbef },
- { _MMIO(0x2798), 0x0007fffa },
- { _MMIO(0x279c), 0x0000fbdf },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00101100 },
- { _MMIO(0xe45c), 0x00201200 },
- { _MMIO(0xe55c), 0x00301300 },
- { _MMIO(0xe65c), 0x00401400 },
-};
-
-static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
- { _MMIO(0x9888), 0x166c03b0 },
- { _MMIO(0x9888), 0x1593001e },
- { _MMIO(0x9888), 0x3f900c00 },
- { _MMIO(0x9888), 0x41900000 },
- { _MMIO(0x9888), 0x002d1000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x082d5000 },
- { _MMIO(0x9888), 0x0e2d5000 },
- { _MMIO(0x9888), 0x0c2e0400 },
- { _MMIO(0x9888), 0x0e2e1500 },
- { _MMIO(0x9888), 0x102e0140 },
- { _MMIO(0x9888), 0x044c4000 },
- { _MMIO(0x9888), 0x0a4c8000 },
- { _MMIO(0x9888), 0x0c4cc000 },
- { _MMIO(0x9888), 0x144c8000 },
- { _MMIO(0x9888), 0x164c2000 },
- { _MMIO(0x9888), 0x004e2000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084ea000 },
- { _MMIO(0x9888), 0x0e4ea000 },
- { _MMIO(0x9888), 0x1a4f4001 },
- { _MMIO(0x9888), 0x1c4f5005 },
- { _MMIO(0x9888), 0x006c0051 },
- { _MMIO(0x9888), 0x066c5000 },
- { _MMIO(0x9888), 0x086c5c5d },
- { _MMIO(0x9888), 0x0e6c5e5f },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x146c0000 },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x180f1000 },
- { _MMIO(0x9888), 0x1a0fa800 },
- { _MMIO(0x9888), 0x1c0f0a00 },
- { _MMIO(0x9888), 0x182c4000 },
- { _MMIO(0x9888), 0x1c2c4015 },
- { _MMIO(0x9888), 0x1e2c0001 },
- { _MMIO(0x9888), 0x03931980 },
- { _MMIO(0x9888), 0x05930032 },
- { _MMIO(0x9888), 0x11930000 },
- { _MMIO(0x9888), 0x01938000 },
- { _MMIO(0x9888), 0x0f938000 },
- { _MMIO(0x9888), 0x1993a00a },
- { _MMIO(0x9888), 0x07930000 },
- { _MMIO(0x9888), 0x09930000 },
- { _MMIO(0x9888), 0x1d900177 },
- { _MMIO(0x9888), 0x1f900178 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x13904000 },
- { _MMIO(0x9888), 0x21904000 },
- { _MMIO(0x9888), 0x23904000 },
- { _MMIO(0x9888), 0x25904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x53901000 },
- { _MMIO(0x9888), 0x43900000 },
- { _MMIO(0x9888), 0x55900111 },
- { _MMIO(0x9888), 0x47900001 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x4d900000 },
- { _MMIO(0x9888), 0x45900400 },
-};
-
-static int
-get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_l3_cache;
- lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x10800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000fdff },
-};
-
-static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
- { _MMIO(0x9888), 0x104f0232 },
- { _MMIO(0x9888), 0x124f4640 },
- { _MMIO(0x9888), 0x11834400 },
- { _MMIO(0x9888), 0x022d4000 },
- { _MMIO(0x9888), 0x042d5000 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x0e2e0055 },
- { _MMIO(0x9888), 0x064c8000 },
- { _MMIO(0x9888), 0x084cc000 },
- { _MMIO(0x9888), 0x0a4c4000 },
- { _MMIO(0x9888), 0x024e8000 },
- { _MMIO(0x9888), 0x044ea000 },
- { _MMIO(0x9888), 0x064e2000 },
- { _MMIO(0x9888), 0x024f6100 },
- { _MMIO(0x9888), 0x044f416b },
- { _MMIO(0x9888), 0x064f004b },
- { _MMIO(0x9888), 0x1a4f0000 },
- { _MMIO(0x9888), 0x1a0f02a8 },
- { _MMIO(0x9888), 0x1a2c5500 },
- { _MMIO(0x9888), 0x0f808000 },
- { _MMIO(0x9888), 0x25810020 },
- { _MMIO(0x9888), 0x0f8305c0 },
- { _MMIO(0x9888), 0x07938000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x1f951000 },
- { _MMIO(0x9888), 0x13920200 },
- { _MMIO(0x9888), 0x31908000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4d900003 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_hdc_and_sf;
- lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00014002 },
- { _MMIO(0x277c), 0x0000c3ff },
- { _MMIO(0x2780), 0x00010002 },
- { _MMIO(0x2784), 0x0000c7ff },
- { _MMIO(0x2788), 0x00004002 },
- { _MMIO(0x278c), 0x0000d3ff },
- { _MMIO(0x2790), 0x00100700 },
- { _MMIO(0x2794), 0x0000ff1f },
- { _MMIO(0x2798), 0x00001402 },
- { _MMIO(0x279c), 0x0000fc3f },
- { _MMIO(0x27a0), 0x00001002 },
- { _MMIO(0x27a4), 0x0000fc7f },
- { _MMIO(0x27a8), 0x00000402 },
- { _MMIO(0x27ac), 0x0000fd3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_1[] = {
- { _MMIO(0x9888), 0x12643400 },
- { _MMIO(0x9888), 0x12653400 },
- { _MMIO(0x9888), 0x106c6800 },
- { _MMIO(0x9888), 0x126c001e },
- { _MMIO(0x9888), 0x166c0010 },
- { _MMIO(0x9888), 0x0c2d5000 },
- { _MMIO(0x9888), 0x0e2d5000 },
- { _MMIO(0x9888), 0x002d4000 },
- { _MMIO(0x9888), 0x022d5000 },
- { _MMIO(0x9888), 0x042d5000 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x102e0154 },
- { _MMIO(0x9888), 0x0c2e5000 },
- { _MMIO(0x9888), 0x0e2e0055 },
- { _MMIO(0x9888), 0x104c8000 },
- { _MMIO(0x9888), 0x124c8000 },
- { _MMIO(0x9888), 0x144c8000 },
- { _MMIO(0x9888), 0x164c2000 },
- { _MMIO(0x9888), 0x044c8000 },
- { _MMIO(0x9888), 0x064cc000 },
- { _MMIO(0x9888), 0x084cc000 },
- { _MMIO(0x9888), 0x0a4c4000 },
- { _MMIO(0x9888), 0x0c4ea000 },
- { _MMIO(0x9888), 0x0e4ea000 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x024ea000 },
- { _MMIO(0x9888), 0x044ea000 },
- { _MMIO(0x9888), 0x064e2000 },
- { _MMIO(0x9888), 0x1c4f5500 },
- { _MMIO(0x9888), 0x1a4f1554 },
- { _MMIO(0x9888), 0x0a640024 },
- { _MMIO(0x9888), 0x10640000 },
- { _MMIO(0x9888), 0x04640000 },
- { _MMIO(0x9888), 0x0c650024 },
- { _MMIO(0x9888), 0x10650000 },
- { _MMIO(0x9888), 0x06650000 },
- { _MMIO(0x9888), 0x0c6c5327 },
- { _MMIO(0x9888), 0x0e6c5425 },
- { _MMIO(0x9888), 0x006c2a00 },
- { _MMIO(0x9888), 0x026c285b },
- { _MMIO(0x9888), 0x046c005c },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1a6c0900 },
- { _MMIO(0x9888), 0x1c0f0aa0 },
- { _MMIO(0x9888), 0x180f4000 },
- { _MMIO(0x9888), 0x1a0f02aa },
- { _MMIO(0x9888), 0x1c2c5400 },
- { _MMIO(0x9888), 0x1e2c0001 },
- { _MMIO(0x9888), 0x1a2c5550 },
- { _MMIO(0x9888), 0x1993aa00 },
- { _MMIO(0x9888), 0x03938000 },
- { _MMIO(0x9888), 0x05938000 },
- { _MMIO(0x9888), 0x07938000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x2b904000 },
- { _MMIO(0x9888), 0x2d904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b900421 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900001 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43900420 },
- { _MMIO(0x9888), 0x45900021 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
-};
-
-static int
-get_l3_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_1;
- lens[n] = ARRAY_SIZE(mux_config_l3_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x30800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000efff },
- { _MMIO(0x2778), 0x00006000 },
- { _MMIO(0x277c), 0x0000f3ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x9888), 0x102d7800 },
- { _MMIO(0x9888), 0x122d79e0 },
- { _MMIO(0x9888), 0x0c2f0004 },
- { _MMIO(0x9888), 0x100e3800 },
- { _MMIO(0x9888), 0x180f0005 },
- { _MMIO(0x9888), 0x002d0940 },
- { _MMIO(0x9888), 0x022d802f },
- { _MMIO(0x9888), 0x042d4013 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x0e2e0050 },
- { _MMIO(0x9888), 0x022f0010 },
- { _MMIO(0x9888), 0x002f0000 },
- { _MMIO(0x9888), 0x084c8000 },
- { _MMIO(0x9888), 0x0a4c4000 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e2000 },
- { _MMIO(0x9888), 0x040e0480 },
- { _MMIO(0x9888), 0x000e0000 },
- { _MMIO(0x9888), 0x060f0027 },
- { _MMIO(0x9888), 0x100f0000 },
- { _MMIO(0x9888), 0x1a0f0040 },
- { _MMIO(0x9888), 0x03938000 },
- { _MMIO(0x9888), 0x05938000 },
- { _MMIO(0x9888), 0x07938000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x439014a0 },
- { _MMIO(0x9888), 0x459000a4 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900001 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_rasterizer_and_pixel_backend;
- lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_sampler[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x70800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x0000c000 },
- { _MMIO(0x2774), 0x0000e7ff },
- { _MMIO(0x2778), 0x00003000 },
- { _MMIO(0x277c), 0x0000f9ff },
- { _MMIO(0x2780), 0x00000c00 },
- { _MMIO(0x2784), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_sampler[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_sampler[] = {
- { _MMIO(0x9888), 0x121300a0 },
- { _MMIO(0x9888), 0x141600ab },
- { _MMIO(0x9888), 0x123300a0 },
- { _MMIO(0x9888), 0x143600ab },
- { _MMIO(0x9888), 0x125300a0 },
- { _MMIO(0x9888), 0x145600ab },
- { _MMIO(0x9888), 0x0c2d4000 },
- { _MMIO(0x9888), 0x0e2d5000 },
- { _MMIO(0x9888), 0x002d4000 },
- { _MMIO(0x9888), 0x022d5000 },
- { _MMIO(0x9888), 0x042d5000 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x102e01a0 },
- { _MMIO(0x9888), 0x0c2e5000 },
- { _MMIO(0x9888), 0x0e2e0065 },
- { _MMIO(0x9888), 0x164c2000 },
- { _MMIO(0x9888), 0x044c8000 },
- { _MMIO(0x9888), 0x064cc000 },
- { _MMIO(0x9888), 0x084c4000 },
- { _MMIO(0x9888), 0x0a4c4000 },
- { _MMIO(0x9888), 0x0e4e8000 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x024ea000 },
- { _MMIO(0x9888), 0x044e2000 },
- { _MMIO(0x9888), 0x064e2000 },
- { _MMIO(0x9888), 0x1c0f0800 },
- { _MMIO(0x9888), 0x180f4000 },
- { _MMIO(0x9888), 0x1a0f023f },
- { _MMIO(0x9888), 0x1e2c0003 },
- { _MMIO(0x9888), 0x1a2cc030 },
- { _MMIO(0x9888), 0x04132180 },
- { _MMIO(0x9888), 0x02130000 },
- { _MMIO(0x9888), 0x0c148000 },
- { _MMIO(0x9888), 0x0e142000 },
- { _MMIO(0x9888), 0x04148000 },
- { _MMIO(0x9888), 0x1e150140 },
- { _MMIO(0x9888), 0x1c150040 },
- { _MMIO(0x9888), 0x0c163000 },
- { _MMIO(0x9888), 0x0e160068 },
- { _MMIO(0x9888), 0x10160000 },
- { _MMIO(0x9888), 0x18160000 },
- { _MMIO(0x9888), 0x0a164000 },
- { _MMIO(0x9888), 0x04330043 },
- { _MMIO(0x9888), 0x02330000 },
- { _MMIO(0x9888), 0x0234a000 },
- { _MMIO(0x9888), 0x04342000 },
- { _MMIO(0x9888), 0x1c350015 },
- { _MMIO(0x9888), 0x02363460 },
- { _MMIO(0x9888), 0x10360000 },
- { _MMIO(0x9888), 0x04360000 },
- { _MMIO(0x9888), 0x06360000 },
- { _MMIO(0x9888), 0x08364000 },
- { _MMIO(0x9888), 0x06530043 },
- { _MMIO(0x9888), 0x02530000 },
- { _MMIO(0x9888), 0x0e548000 },
- { _MMIO(0x9888), 0x00548000 },
- { _MMIO(0x9888), 0x06542000 },
- { _MMIO(0x9888), 0x1e550400 },
- { _MMIO(0x9888), 0x1a552000 },
- { _MMIO(0x9888), 0x1c550100 },
- { _MMIO(0x9888), 0x0e563000 },
- { _MMIO(0x9888), 0x00563400 },
- { _MMIO(0x9888), 0x10560000 },
- { _MMIO(0x9888), 0x18560000 },
- { _MMIO(0x9888), 0x02560000 },
- { _MMIO(0x9888), 0x0c564000 },
- { _MMIO(0x9888), 0x1993a800 },
- { _MMIO(0x9888), 0x03938000 },
- { _MMIO(0x9888), 0x05938000 },
- { _MMIO(0x9888), 0x07938000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x2d904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b9014a0 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900001 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43900820 },
- { _MMIO(0x9888), 0x45901022 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900000 },
-};
-
-static int
-get_sampler_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_sampler;
- lens[n] = ARRAY_SIZE(mux_config_sampler);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x00007fff },
- { _MMIO(0x2778), 0x00000000 },
- { _MMIO(0x277c), 0x00009fff },
- { _MMIO(0x2780), 0x00000002 },
- { _MMIO(0x2784), 0x0000efff },
- { _MMIO(0x2788), 0x00000000 },
- { _MMIO(0x278c), 0x0000f3ff },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000fdff },
- { _MMIO(0x2798), 0x00000000 },
- { _MMIO(0x279c), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_1[] = {
- { _MMIO(0x9888), 0x141a0000 },
- { _MMIO(0x9888), 0x143a0000 },
- { _MMIO(0x9888), 0x145a0000 },
- { _MMIO(0x9888), 0x0c2d4000 },
- { _MMIO(0x9888), 0x0e2d5000 },
- { _MMIO(0x9888), 0x002d4000 },
- { _MMIO(0x9888), 0x022d5000 },
- { _MMIO(0x9888), 0x042d5000 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x102e0150 },
- { _MMIO(0x9888), 0x0c2e5000 },
- { _MMIO(0x9888), 0x0e2e006a },
- { _MMIO(0x9888), 0x124c8000 },
- { _MMIO(0x9888), 0x144c8000 },
- { _MMIO(0x9888), 0x164c2000 },
- { _MMIO(0x9888), 0x044c8000 },
- { _MMIO(0x9888), 0x064c4000 },
- { _MMIO(0x9888), 0x0a4c4000 },
- { _MMIO(0x9888), 0x0c4e8000 },
- { _MMIO(0x9888), 0x0e4ea000 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x024e2000 },
- { _MMIO(0x9888), 0x064e2000 },
- { _MMIO(0x9888), 0x1c0f0bc0 },
- { _MMIO(0x9888), 0x180f4000 },
- { _MMIO(0x9888), 0x1a0f0302 },
- { _MMIO(0x9888), 0x1e2c0003 },
- { _MMIO(0x9888), 0x1a2c00f0 },
- { _MMIO(0x9888), 0x021a3080 },
- { _MMIO(0x9888), 0x041a31e5 },
- { _MMIO(0x9888), 0x02148000 },
- { _MMIO(0x9888), 0x0414a000 },
- { _MMIO(0x9888), 0x1c150054 },
- { _MMIO(0x9888), 0x06168000 },
- { _MMIO(0x9888), 0x08168000 },
- { _MMIO(0x9888), 0x0a168000 },
- { _MMIO(0x9888), 0x0c3a3280 },
- { _MMIO(0x9888), 0x0e3a0063 },
- { _MMIO(0x9888), 0x063a0061 },
- { _MMIO(0x9888), 0x023a0000 },
- { _MMIO(0x9888), 0x0c348000 },
- { _MMIO(0x9888), 0x0e342000 },
- { _MMIO(0x9888), 0x06342000 },
- { _MMIO(0x9888), 0x1e350140 },
- { _MMIO(0x9888), 0x1c350100 },
- { _MMIO(0x9888), 0x18360028 },
- { _MMIO(0x9888), 0x0c368000 },
- { _MMIO(0x9888), 0x0e5a3080 },
- { _MMIO(0x9888), 0x005a3280 },
- { _MMIO(0x9888), 0x025a0063 },
- { _MMIO(0x9888), 0x0e548000 },
- { _MMIO(0x9888), 0x00548000 },
- { _MMIO(0x9888), 0x02542000 },
- { _MMIO(0x9888), 0x1e550400 },
- { _MMIO(0x9888), 0x1a552000 },
- { _MMIO(0x9888), 0x1c550001 },
- { _MMIO(0x9888), 0x18560080 },
- { _MMIO(0x9888), 0x02568000 },
- { _MMIO(0x9888), 0x04568000 },
- { _MMIO(0x9888), 0x1993a800 },
- { _MMIO(0x9888), 0x03938000 },
- { _MMIO(0x9888), 0x05938000 },
- { _MMIO(0x9888), 0x07938000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x2d904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b900420 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4d900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43900000 },
- { _MMIO(0x9888), 0x45901084 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900001 },
-};
-
-static int
-get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_1;
- lens[n] = ARRAY_SIZE(mux_config_tdl_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_2[] = {
- { _MMIO(0x9888), 0x141a026b },
- { _MMIO(0x9888), 0x143a0173 },
- { _MMIO(0x9888), 0x145a026b },
- { _MMIO(0x9888), 0x002d4000 },
- { _MMIO(0x9888), 0x022d5000 },
- { _MMIO(0x9888), 0x042d5000 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x0c2e5000 },
- { _MMIO(0x9888), 0x0e2e0069 },
- { _MMIO(0x9888), 0x044c8000 },
- { _MMIO(0x9888), 0x064cc000 },
- { _MMIO(0x9888), 0x0a4c4000 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x024ea000 },
- { _MMIO(0x9888), 0x064e2000 },
- { _MMIO(0x9888), 0x180f6000 },
- { _MMIO(0x9888), 0x1a0f030a },
- { _MMIO(0x9888), 0x1a2c03c0 },
- { _MMIO(0x9888), 0x041a37e7 },
- { _MMIO(0x9888), 0x021a0000 },
- { _MMIO(0x9888), 0x0414a000 },
- { _MMIO(0x9888), 0x1c150050 },
- { _MMIO(0x9888), 0x08168000 },
- { _MMIO(0x9888), 0x0a168000 },
- { _MMIO(0x9888), 0x003a3380 },
- { _MMIO(0x9888), 0x063a006f },
- { _MMIO(0x9888), 0x023a0000 },
- { _MMIO(0x9888), 0x00348000 },
- { _MMIO(0x9888), 0x06342000 },
- { _MMIO(0x9888), 0x1a352000 },
- { _MMIO(0x9888), 0x1c350100 },
- { _MMIO(0x9888), 0x02368000 },
- { _MMIO(0x9888), 0x0c368000 },
- { _MMIO(0x9888), 0x025a37e7 },
- { _MMIO(0x9888), 0x0254a000 },
- { _MMIO(0x9888), 0x1c550005 },
- { _MMIO(0x9888), 0x04568000 },
- { _MMIO(0x9888), 0x06568000 },
- { _MMIO(0x9888), 0x03938000 },
- { _MMIO(0x9888), 0x05938000 },
- { _MMIO(0x9888), 0x07938000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17904000 },
- { _MMIO(0x9888), 0x19904000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43900020 },
- { _MMIO(0x9888), 0x45901080 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x47900001 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_2;
- lens[n] = ARRAY_SIZE(mux_config_tdl_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extra[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
- { _MMIO(0xe458), 0x00001000 },
- { _MMIO(0xe558), 0x00003002 },
- { _MMIO(0xe658), 0x00005004 },
- { _MMIO(0xe758), 0x00011010 },
- { _MMIO(0xe45c), 0x00050012 },
- { _MMIO(0xe55c), 0x00052051 },
- { _MMIO(0xe65c), 0x00000008 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extra[] = {
- { _MMIO(0x9888), 0x141a001f },
- { _MMIO(0x9888), 0x143a001f },
- { _MMIO(0x9888), 0x145a001f },
- { _MMIO(0x9888), 0x042d5000 },
- { _MMIO(0x9888), 0x062d1000 },
- { _MMIO(0x9888), 0x0e2e0094 },
- { _MMIO(0x9888), 0x084cc000 },
- { _MMIO(0x9888), 0x044ea000 },
- { _MMIO(0x9888), 0x1a0f00e0 },
- { _MMIO(0x9888), 0x1a2c0c00 },
- { _MMIO(0x9888), 0x061a0063 },
- { _MMIO(0x9888), 0x021a0000 },
- { _MMIO(0x9888), 0x06142000 },
- { _MMIO(0x9888), 0x1c150100 },
- { _MMIO(0x9888), 0x0c168000 },
- { _MMIO(0x9888), 0x043a3180 },
- { _MMIO(0x9888), 0x023a0000 },
- { _MMIO(0x9888), 0x04348000 },
- { _MMIO(0x9888), 0x1c350040 },
- { _MMIO(0x9888), 0x0a368000 },
- { _MMIO(0x9888), 0x045a0063 },
- { _MMIO(0x9888), 0x025a0000 },
- { _MMIO(0x9888), 0x04542000 },
- { _MMIO(0x9888), 0x1c550010 },
- { _MMIO(0x9888), 0x08568000 },
- { _MMIO(0x9888), 0x09938000 },
- { _MMIO(0x9888), 0x0b938000 },
- { _MMIO(0x9888), 0x0d938000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1d904000 },
- { _MMIO(0x9888), 0x1f904000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900400 },
- { _MMIO(0x9888), 0x47900004 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_extra;
- lens[n] = ARRAY_SIZE(mux_config_compute_extra);
- n++;
-
- return n;
-}
-
static const struct i915_oa_reg b_counter_config_test_oa[] = {
{ _MMIO(0x2740), 0x00000000 },
{ _MMIO(0x2744), 0x00800000 },
@@ -1668,6 +60,7 @@ static const struct i915_oa_reg flex_eu_config_test_oa[] = {
};
static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x9840), 0x00000080 },
{ _MMIO(0x9888), 0x19800000 },
{ _MMIO(0x9888), 0x07800063 },
{ _MMIO(0x9888), 0x11800000 },
@@ -1681,922 +74,35 @@ static const struct i915_oa_reg mux_config_test_oa[] = {
{ _MMIO(0x9888), 0x33900000 },
};
-static int
-get_test_oa_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_test_oa;
- lens[n] = ARRAY_SIZE(mux_config_test_oa);
- n++;
-
- return n;
-}
-
-int i915_oa_select_metric_set_glk(struct drm_i915_private *dev_priv)
-{
- dev_priv->perf.oa.n_mux_configs = 0;
- dev_priv->perf.oa.b_counter_regs = NULL;
- dev_priv->perf.oa.b_counter_regs_len = 0;
- dev_priv->perf.oa.flex_regs = NULL;
- dev_priv->perf.oa.flex_regs_len = 0;
-
- switch (dev_priv->perf.oa.metrics_set) {
- case METRIC_SET_ID_RENDER_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_render_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_basic);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_basic);
-
- return 0;
- case METRIC_SET_ID_RENDER_PIPE_PROFILE:
- dev_priv->perf.oa.n_mux_configs =
- get_render_pipe_profile_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_pipe_profile;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_pipe_profile);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_pipe_profile;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_pipe_profile);
-
- return 0;
- case METRIC_SET_ID_MEMORY_READS:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_reads_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_reads;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_reads);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_reads;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_reads);
-
- return 0;
- case METRIC_SET_ID_MEMORY_WRITES:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_writes_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_writes;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_writes);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_writes;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_writes);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTENDED:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extended_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extended;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extended);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extended;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extended);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_L3_CACHE:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_l3_cache_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_l3_cache;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_l3_cache);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_l3_cache;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_l3_cache);
-
- return 0;
- case METRIC_SET_ID_HDC_AND_SF:
- dev_priv->perf.oa.n_mux_configs =
- get_hdc_and_sf_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_hdc_and_sf;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_hdc_and_sf);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_hdc_and_sf;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_hdc_and_sf);
-
- return 0;
- case METRIC_SET_ID_L3_1:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_1);
-
- return 0;
- case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
- dev_priv->perf.oa.n_mux_configs =
- get_rasterizer_and_pixel_backend_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
-
- return 0;
- case METRIC_SET_ID_SAMPLER:
- dev_priv->perf.oa.n_mux_configs =
- get_sampler_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_sampler;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_sampler);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_sampler;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_sampler);
-
- return 0;
- case METRIC_SET_ID_TDL_1:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_1);
-
- return 0;
- case METRIC_SET_ID_TDL_2:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_2);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTRA:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extra_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extra;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extra);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extra;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extra);
-
- return 0;
- case METRIC_SET_ID_TEST_OA:
- dev_priv->perf.oa.n_mux_configs =
- get_test_oa_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_test_oa;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_test_oa;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_test_oa);
-
- return 0;
- default:
- return -ENODEV;
- }
-}
-
-static ssize_t
-show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
-}
-
-static struct device_attribute dev_attr_render_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_basic[] = {
- &dev_attr_render_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_basic = {
- .name = "d72df5c7-5b4a-4274-a43f-00b0fd51fc68",
- .attrs = attrs_render_basic,
-};
-
-static ssize_t
-show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
-}
-
-static struct device_attribute dev_attr_compute_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_basic[] = {
- &dev_attr_compute_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_basic = {
- .name = "814285f6-354d-41d2-ba49-e24e622714a0",
- .attrs = attrs_compute_basic,
-};
-
-static ssize_t
-show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
-}
-
-static struct device_attribute dev_attr_render_pipe_profile_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_pipe_profile_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_pipe_profile[] = {
- &dev_attr_render_pipe_profile_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_pipe_profile = {
- .name = "07d397a6-b3e6-49f6-9433-a4f293d55978",
- .attrs = attrs_render_pipe_profile,
-};
-
-static ssize_t
-show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
-}
-
-static struct device_attribute dev_attr_memory_reads_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_reads_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_reads[] = {
- &dev_attr_memory_reads_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_reads = {
- .name = "1a356946-5428-450b-a2f0-89f8783a302d",
- .attrs = attrs_memory_reads,
-};
-
-static ssize_t
-show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
-}
-
-static struct device_attribute dev_attr_memory_writes_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_writes_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_writes[] = {
- &dev_attr_memory_writes_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_writes = {
- .name = "5299be9d-7a61-4c99-9f81-f87e6c5aaca9",
- .attrs = attrs_memory_writes,
-};
-
-static ssize_t
-show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
-}
-
-static struct device_attribute dev_attr_compute_extended_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extended_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extended[] = {
- &dev_attr_compute_extended_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extended = {
- .name = "bc9bcff2-459a-4cbc-986d-a84b077153f3",
- .attrs = attrs_compute_extended,
-};
-
-static ssize_t
-show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
-}
-
-static struct device_attribute dev_attr_compute_l3_cache_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_l3_cache_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_l3_cache[] = {
- &dev_attr_compute_l3_cache_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_l3_cache = {
- .name = "88ec931f-5b4a-453a-9db6-a61232b6143d",
- .attrs = attrs_compute_l3_cache,
-};
-
-static ssize_t
-show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
-}
-
-static struct device_attribute dev_attr_hdc_and_sf_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_hdc_and_sf_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_hdc_and_sf[] = {
- &dev_attr_hdc_and_sf_id.attr,
- NULL,
-};
-
-static struct attribute_group group_hdc_and_sf = {
- .name = "530d176d-2a18-4014-adf8-1500c6c60835",
- .attrs = attrs_hdc_and_sf,
-};
-
-static ssize_t
-show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
-}
-
-static struct device_attribute dev_attr_l3_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_1[] = {
- &dev_attr_l3_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_1 = {
- .name = "fdee5a5a-f23c-43d1-aa73-f6257c71671d",
- .attrs = attrs_l3_1,
-};
-
-static ssize_t
-show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
-}
-
-static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_rasterizer_and_pixel_backend_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
- &dev_attr_rasterizer_and_pixel_backend_id.attr,
- NULL,
-};
-
-static struct attribute_group group_rasterizer_and_pixel_backend = {
- .name = "6617623e-ca73-4791-b2b7-ddedd0846a0c",
- .attrs = attrs_rasterizer_and_pixel_backend,
-};
-
-static ssize_t
-show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
-}
-
-static struct device_attribute dev_attr_sampler_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_sampler_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_sampler[] = {
- &dev_attr_sampler_id.attr,
- NULL,
-};
-
-static struct attribute_group group_sampler = {
- .name = "f3b2ea63-e82e-4234-b418-44dd20dd34d0",
- .attrs = attrs_sampler,
-};
-
-static ssize_t
-show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
-}
-
-static struct device_attribute dev_attr_tdl_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_1[] = {
- &dev_attr_tdl_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_1 = {
- .name = "14411d35-cbf6-4f5e-b68b-190faf9a1a83",
- .attrs = attrs_tdl_1,
-};
-
-static ssize_t
-show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
-}
-
-static struct device_attribute dev_attr_tdl_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_2[] = {
- &dev_attr_tdl_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_2 = {
- .name = "ffa3f263-0478-4724-8c9f-c911c5ec0f1d",
- .attrs = attrs_tdl_2,
-};
-
-static ssize_t
-show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
-}
-
-static struct device_attribute dev_attr_compute_extra_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extra_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extra[] = {
- &dev_attr_compute_extra_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extra = {
- .name = "15274c82-27d2-4819-876a-7cb1a2c59ba4",
- .attrs = attrs_compute_extra,
-};
-
static ssize_t
show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
+ return sprintf(buf, "1\n");
}
-static struct device_attribute dev_attr_test_oa_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_test_oa_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_test_oa[] = {
- &dev_attr_test_oa_id.attr,
- NULL,
-};
-
-static struct attribute_group group_test_oa = {
- .name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf",
- .attrs = attrs_test_oa,
-};
-
-int
-i915_perf_register_sysfs_glk(struct drm_i915_private *dev_priv)
+void
+i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv)
{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
- int ret = 0;
+ strncpy(dev_priv->perf.oa.test_config.uuid,
+ "dd3fd789-e783-4204-8cd0-b671bbccb0cf",
+ UUID_STRING_LEN);
+ dev_priv->perf.oa.test_config.id = 1;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (ret)
- goto error_render_basic;
- }
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (ret)
- goto error_compute_basic;
- }
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (ret)
- goto error_render_pipe_profile;
- }
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (ret)
- goto error_memory_reads;
- }
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (ret)
- goto error_memory_writes;
- }
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (ret)
- goto error_compute_extended;
- }
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (ret)
- goto error_compute_l3_cache;
- }
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (ret)
- goto error_hdc_and_sf;
- }
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (ret)
- goto error_l3_1;
- }
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (ret)
- goto error_rasterizer_and_pixel_backend;
- }
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
- if (ret)
- goto error_sampler;
- }
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (ret)
- goto error_tdl_1;
- }
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (ret)
- goto error_tdl_2;
- }
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (ret)
- goto error_compute_extra;
- }
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
- if (ret)
- goto error_test_oa;
- }
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- return 0;
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-error_test_oa:
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
-error_compute_extra:
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
-error_tdl_2:
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
-error_tdl_1:
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
-error_sampler:
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
-error_rasterizer_and_pixel_backend:
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
-error_l3_1:
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
-error_hdc_and_sf:
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
-error_compute_l3_cache:
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
-error_compute_extended:
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
-error_memory_writes:
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
-error_memory_reads:
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
-error_render_pipe_profile:
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
-error_compute_basic:
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
-error_render_basic:
- return ret;
-}
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-void
-i915_perf_unregister_sysfs_glk(struct drm_i915_private *dev_priv)
-{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/i915_oa_glk.h
index 5511bb1cecf7..63bd113f4bc9 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.h
+++ b/drivers/gpu/drm/i915/i915_oa_glk.h
@@ -29,12 +29,6 @@
#ifndef __I915_OA_GLK_H__
#define __I915_OA_GLK_H__
-extern int i915_oa_n_builtin_metric_sets_glk;
-
-extern int i915_oa_select_metric_set_glk(struct drm_i915_private *dev_priv);
-
-extern int i915_perf_register_sysfs_glk(struct drm_i915_private *dev_priv);
-
-extern void i915_perf_unregister_sysfs_glk(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c
index 10f169f683b7..56b03773bb9d 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.c
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.c
@@ -31,17 +31,6 @@
#include "i915_drv.h"
#include "i915_oa_hsw.h"
-enum metric_set_id {
- METRIC_SET_ID_RENDER_BASIC = 1,
- METRIC_SET_ID_COMPUTE_BASIC,
- METRIC_SET_ID_COMPUTE_EXTENDED,
- METRIC_SET_ID_MEMORY_READS,
- METRIC_SET_ID_MEMORY_WRITES,
- METRIC_SET_ID_SAMPLER_BALANCE,
-};
-
-int i915_oa_n_builtin_metric_sets_hsw = 6;
-
static const struct i915_oa_reg b_counter_config_render_basic[] = {
{ _MMIO(0x2724), 0x00800000 },
{ _MMIO(0x2720), 0x00000000 },
@@ -53,6 +42,7 @@ static const struct i915_oa_reg flex_eu_config_render_basic[] = {
};
static const struct i915_oa_reg mux_config_render_basic[] = {
+ { _MMIO(0x9840), 0x00000080 },
{ _MMIO(0x253a4), 0x01600000 },
{ _MMIO(0x25440), 0x00100000 },
{ _MMIO(0x25128), 0x00000000 },
@@ -114,750 +104,35 @@ static const struct i915_oa_reg mux_config_render_basic[] = {
{ _MMIO(0x25428), 0x00042049 },
};
-static int
-get_render_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_render_basic;
- lens[n] = ARRAY_SIZE(mux_config_render_basic);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2718), 0xaaaaaaaa },
- { _MMIO(0x271c), 0xaaaaaaaa },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2728), 0xaaaaaaaa },
- { _MMIO(0x272c), 0xaaaaaaaa },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00000000 },
- { _MMIO(0x2748), 0x00000000 },
- { _MMIO(0x274c), 0x00000000 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2754), 0x00000000 },
- { _MMIO(0x2758), 0x00000000 },
- { _MMIO(0x275c), 0x00000000 },
- { _MMIO(0x236c), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
-};
-
-static const struct i915_oa_reg mux_config_compute_basic[] = {
- { _MMIO(0x253a4), 0x00000000 },
- { _MMIO(0x2681c), 0x01f00800 },
- { _MMIO(0x26820), 0x00001000 },
- { _MMIO(0x2781c), 0x01f00800 },
- { _MMIO(0x26520), 0x00000007 },
- { _MMIO(0x265a0), 0x00000007 },
- { _MMIO(0x25380), 0x00000010 },
- { _MMIO(0x2538c), 0x00300000 },
- { _MMIO(0x25384), 0xaa8aaaaa },
- { _MMIO(0x25404), 0xffffffff },
- { _MMIO(0x26800), 0x00004202 },
- { _MMIO(0x26808), 0x00605817 },
- { _MMIO(0x2680c), 0x10001005 },
- { _MMIO(0x26804), 0x00000000 },
- { _MMIO(0x27800), 0x00000102 },
- { _MMIO(0x27808), 0x0c0701e0 },
- { _MMIO(0x2780c), 0x000200a0 },
- { _MMIO(0x27804), 0x00000000 },
- { _MMIO(0x26484), 0x44000000 },
- { _MMIO(0x26704), 0x44000000 },
- { _MMIO(0x26500), 0x00000006 },
- { _MMIO(0x26510), 0x00000001 },
- { _MMIO(0x26504), 0x88000000 },
- { _MMIO(0x26580), 0x00000006 },
- { _MMIO(0x26590), 0x00000020 },
- { _MMIO(0x26584), 0x00000000 },
- { _MMIO(0x26104), 0x55822222 },
- { _MMIO(0x26184), 0xaa866666 },
- { _MMIO(0x25420), 0x08320c83 },
- { _MMIO(0x25424), 0x06820c83 },
- { _MMIO(0x2541c), 0x00000000 },
- { _MMIO(0x25428), 0x00000c03 },
-};
-
-static int
-get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_basic;
- lens[n] = ARRAY_SIZE(mux_config_compute_basic);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extended[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2770), 0x0007fe2a },
- { _MMIO(0x2774), 0x0000ff00 },
- { _MMIO(0x2778), 0x0007fe6a },
- { _MMIO(0x277c), 0x0000ff00 },
- { _MMIO(0x2780), 0x0007fe92 },
- { _MMIO(0x2784), 0x0000ff00 },
- { _MMIO(0x2788), 0x0007fea2 },
- { _MMIO(0x278c), 0x0000ff00 },
- { _MMIO(0x2790), 0x0007fe32 },
- { _MMIO(0x2794), 0x0000ff00 },
- { _MMIO(0x2798), 0x0007fe9a },
- { _MMIO(0x279c), 0x0000ff00 },
- { _MMIO(0x27a0), 0x0007ff23 },
- { _MMIO(0x27a4), 0x0000ff00 },
- { _MMIO(0x27a8), 0x0007fff3 },
- { _MMIO(0x27ac), 0x0000fffe },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
-};
-
-static const struct i915_oa_reg mux_config_compute_extended[] = {
- { _MMIO(0x2681c), 0x3eb00800 },
- { _MMIO(0x26820), 0x00900000 },
- { _MMIO(0x25384), 0x02aaaaaa },
- { _MMIO(0x25404), 0x03ffffff },
- { _MMIO(0x26800), 0x00142284 },
- { _MMIO(0x26808), 0x0e629062 },
- { _MMIO(0x2680c), 0x3f6f55cb },
- { _MMIO(0x26810), 0x00000014 },
- { _MMIO(0x26804), 0x00000000 },
- { _MMIO(0x26104), 0x02aaaaaa },
- { _MMIO(0x26184), 0x02aaaaaa },
- { _MMIO(0x25420), 0x00000000 },
- { _MMIO(0x25424), 0x00000000 },
- { _MMIO(0x2541c), 0x00000000 },
- { _MMIO(0x25428), 0x00000000 },
-};
-
-static int
-get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_extended;
- lens[n] = ARRAY_SIZE(mux_config_compute_extended);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_reads[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x76543298 },
- { _MMIO(0x2748), 0x98989898 },
- { _MMIO(0x2744), 0x000000e4 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x98a98a98 },
- { _MMIO(0x2758), 0x88888888 },
- { _MMIO(0x2754), 0x000c5500 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fc00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fc00 },
- { _MMIO(0x2780), 0x0007f872 },
- { _MMIO(0x2784), 0x0000fc00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fc00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fc00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fc00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fc00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fc00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
-};
-
-static const struct i915_oa_reg mux_config_memory_reads[] = {
- { _MMIO(0x253a4), 0x34300000 },
- { _MMIO(0x25440), 0x2d800000 },
- { _MMIO(0x25444), 0x00000008 },
- { _MMIO(0x25128), 0x0e600000 },
- { _MMIO(0x25380), 0x00000450 },
- { _MMIO(0x25390), 0x00052c43 },
- { _MMIO(0x25384), 0x00000000 },
- { _MMIO(0x25400), 0x00006144 },
- { _MMIO(0x25408), 0x0a418820 },
- { _MMIO(0x2540c), 0x000820e6 },
- { _MMIO(0x25404), 0xff500000 },
- { _MMIO(0x25100), 0x000005d6 },
- { _MMIO(0x2510c), 0x0ef00000 },
- { _MMIO(0x25104), 0x00000000 },
- { _MMIO(0x25420), 0x02108421 },
- { _MMIO(0x25424), 0x00008421 },
- { _MMIO(0x2541c), 0x00000000 },
- { _MMIO(0x25428), 0x00000000 },
-};
-
-static int
-get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_reads;
- lens[n] = ARRAY_SIZE(mux_config_memory_reads);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_writes[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x76543298 },
- { _MMIO(0x2748), 0x98989898 },
- { _MMIO(0x2744), 0x000000e4 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0xbabababa },
- { _MMIO(0x2758), 0x88888888 },
- { _MMIO(0x2754), 0x000c5500 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fc00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fc00 },
- { _MMIO(0x2780), 0x0007f822 },
- { _MMIO(0x2784), 0x0000fc00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fc00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fc00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fc00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fc00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fc00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
-};
-
-static const struct i915_oa_reg mux_config_memory_writes[] = {
- { _MMIO(0x253a4), 0x34300000 },
- { _MMIO(0x25440), 0x01500000 },
- { _MMIO(0x25444), 0x00000120 },
- { _MMIO(0x25128), 0x0c200000 },
- { _MMIO(0x25380), 0x00000450 },
- { _MMIO(0x25390), 0x00052c43 },
- { _MMIO(0x25384), 0x00000000 },
- { _MMIO(0x25400), 0x00007184 },
- { _MMIO(0x25408), 0x0a418820 },
- { _MMIO(0x2540c), 0x000820e6 },
- { _MMIO(0x25404), 0xff500000 },
- { _MMIO(0x25100), 0x000005d6 },
- { _MMIO(0x2510c), 0x1e700000 },
- { _MMIO(0x25104), 0x00000000 },
- { _MMIO(0x25420), 0x02108421 },
- { _MMIO(0x25424), 0x00008421 },
- { _MMIO(0x2541c), 0x00000000 },
- { _MMIO(0x25428), 0x00000000 },
-};
-
-static int
-get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_writes;
- lens[n] = ARRAY_SIZE(mux_config_memory_writes);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_sampler_balance[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_sampler_balance[] = {
-};
-
-static const struct i915_oa_reg mux_config_sampler_balance[] = {
- { _MMIO(0x2eb9c), 0x01906400 },
- { _MMIO(0x2fb9c), 0x01906400 },
- { _MMIO(0x253a4), 0x00000000 },
- { _MMIO(0x26b9c), 0x01906400 },
- { _MMIO(0x27b9c), 0x01906400 },
- { _MMIO(0x27104), 0x00a00000 },
- { _MMIO(0x27184), 0x00a50000 },
- { _MMIO(0x2e804), 0x00500000 },
- { _MMIO(0x2e984), 0x00500000 },
- { _MMIO(0x2eb04), 0x00500000 },
- { _MMIO(0x2eb80), 0x00000084 },
- { _MMIO(0x2eb8c), 0x14200000 },
- { _MMIO(0x2eb84), 0x00000000 },
- { _MMIO(0x2f804), 0x00050000 },
- { _MMIO(0x2f984), 0x00050000 },
- { _MMIO(0x2fb04), 0x00050000 },
- { _MMIO(0x2fb80), 0x00000084 },
- { _MMIO(0x2fb8c), 0x00050800 },
- { _MMIO(0x2fb84), 0x00000000 },
- { _MMIO(0x25380), 0x00000010 },
- { _MMIO(0x2538c), 0x000000c0 },
- { _MMIO(0x25384), 0xaa550000 },
- { _MMIO(0x25404), 0xffffc000 },
- { _MMIO(0x26804), 0x50000000 },
- { _MMIO(0x26984), 0x50000000 },
- { _MMIO(0x26b04), 0x50000000 },
- { _MMIO(0x26b80), 0x00000084 },
- { _MMIO(0x26b90), 0x00050800 },
- { _MMIO(0x26b84), 0x00000000 },
- { _MMIO(0x27804), 0x05000000 },
- { _MMIO(0x27984), 0x05000000 },
- { _MMIO(0x27b04), 0x05000000 },
- { _MMIO(0x27b80), 0x00000084 },
- { _MMIO(0x27b90), 0x00000142 },
- { _MMIO(0x27b84), 0x00000000 },
- { _MMIO(0x26104), 0xa0000000 },
- { _MMIO(0x26184), 0xa5000000 },
- { _MMIO(0x25424), 0x00008620 },
- { _MMIO(0x2541c), 0x00000000 },
- { _MMIO(0x25428), 0x0004a54a },
-};
-
-static int
-get_sampler_balance_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_sampler_balance;
- lens[n] = ARRAY_SIZE(mux_config_sampler_balance);
- n++;
-
- return n;
-}
-
-int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
-{
- dev_priv->perf.oa.n_mux_configs = 0;
- dev_priv->perf.oa.b_counter_regs = NULL;
- dev_priv->perf.oa.b_counter_regs_len = 0;
-
- switch (dev_priv->perf.oa.metrics_set) {
- case METRIC_SET_ID_RENDER_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_render_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_basic);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_basic);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTENDED:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extended_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extended;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extended);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extended;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extended);
-
- return 0;
- case METRIC_SET_ID_MEMORY_READS:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_reads_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_reads;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_reads);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_reads;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_reads);
-
- return 0;
- case METRIC_SET_ID_MEMORY_WRITES:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_writes_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_writes;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_writes);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_writes;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_writes);
-
- return 0;
- case METRIC_SET_ID_SAMPLER_BALANCE:
- dev_priv->perf.oa.n_mux_configs =
- get_sampler_balance_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_BALANCE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_sampler_balance;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_sampler_balance);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_sampler_balance;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_sampler_balance);
-
- return 0;
- default:
- return -ENODEV;
- }
-}
-
static ssize_t
show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
-}
-
-static struct device_attribute dev_attr_render_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_basic[] = {
- &dev_attr_render_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_basic = {
- .name = "403d8832-1a27-4aa6-a64e-f5389ce7b212",
- .attrs = attrs_render_basic,
-};
-
-static ssize_t
-show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
-}
-
-static struct device_attribute dev_attr_compute_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_basic[] = {
- &dev_attr_compute_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_basic = {
- .name = "39ad14bc-2380-45c4-91eb-fbcb3aa7ae7b",
- .attrs = attrs_compute_basic,
-};
-
-static ssize_t
-show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
+ return sprintf(buf, "1\n");
}
-static struct device_attribute dev_attr_compute_extended_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extended_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extended[] = {
- &dev_attr_compute_extended_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extended = {
- .name = "3865be28-6982-49fe-9494-e4d1b4795413",
- .attrs = attrs_compute_extended,
-};
-
-static ssize_t
-show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
-}
-
-static struct device_attribute dev_attr_memory_reads_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_reads_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_reads[] = {
- &dev_attr_memory_reads_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_reads = {
- .name = "bb5ed49b-2497-4095-94f6-26ba294db88a",
- .attrs = attrs_memory_reads,
-};
-
-static ssize_t
-show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
-}
-
-static struct device_attribute dev_attr_memory_writes_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_writes_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_writes[] = {
- &dev_attr_memory_writes_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_writes = {
- .name = "3358d639-9b5f-45ab-976d-9b08cbfc6240",
- .attrs = attrs_memory_writes,
-};
-
-static ssize_t
-show_sampler_balance_id(struct device *kdev, struct device_attribute *attr, char *buf)
+void
+i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv)
{
- return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_BALANCE);
-}
-
-static struct device_attribute dev_attr_sampler_balance_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_sampler_balance_id,
- .store = NULL,
-};
+ strncpy(dev_priv->perf.oa.test_config.uuid,
+ "403d8832-1a27-4aa6-a64e-f5389ce7b212",
+ UUID_STRING_LEN);
+ dev_priv->perf.oa.test_config.id = 1;
-static struct attribute *attrs_sampler_balance[] = {
- &dev_attr_sampler_balance_id.attr,
- NULL,
-};
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_render_basic;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic);
-static struct attribute_group group_sampler_balance = {
- .name = "bc274488-b4b6-40c7-90da-b77d7ad16189",
- .attrs = attrs_sampler_balance,
-};
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_render_basic;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic);
-int
-i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv)
-{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
- int ret = 0;
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_render_basic;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic);
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (ret)
- goto error_render_basic;
- }
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (ret)
- goto error_compute_basic;
- }
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (ret)
- goto error_compute_extended;
- }
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (ret)
- goto error_memory_reads;
- }
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (ret)
- goto error_memory_writes;
- }
- if (get_sampler_balance_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_balance);
- if (ret)
- goto error_sampler_balance;
- }
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
- return 0;
-
-error_sampler_balance:
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
-error_memory_writes:
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
-error_memory_reads:
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
-error_compute_extended:
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
-error_compute_basic:
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
-error_render_basic:
- return ret;
-}
-
-void
-i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv)
-{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (get_sampler_balance_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_balance);
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_render_basic_id;
}
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h
index 6fe7e0690ef3..74d03439c157 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.h
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.h
@@ -29,12 +29,6 @@
#ifndef __I915_OA_HSW_H__
#define __I915_OA_HSW_H__
-extern int i915_oa_n_builtin_metric_sets_hsw;
-
-extern int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv);
-
-extern int i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv);
-
-extern void i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
index 87dbd0a0b076..b6e7cc774136 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
@@ -31,1828 +31,6 @@
#include "i915_drv.h"
#include "i915_oa_kblgt2.h"
-enum metric_set_id {
- METRIC_SET_ID_RENDER_BASIC = 1,
- METRIC_SET_ID_COMPUTE_BASIC,
- METRIC_SET_ID_RENDER_PIPE_PROFILE,
- METRIC_SET_ID_MEMORY_READS,
- METRIC_SET_ID_MEMORY_WRITES,
- METRIC_SET_ID_COMPUTE_EXTENDED,
- METRIC_SET_ID_COMPUTE_L3_CACHE,
- METRIC_SET_ID_HDC_AND_SF,
- METRIC_SET_ID_L3_1,
- METRIC_SET_ID_L3_2,
- METRIC_SET_ID_L3_3,
- METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
- METRIC_SET_ID_SAMPLER,
- METRIC_SET_ID_TDL_1,
- METRIC_SET_ID_TDL_2,
- METRIC_SET_ID_COMPUTE_EXTRA,
- METRIC_SET_ID_VME_PIPE,
- METRIC_SET_ID_TEST_OA,
-};
-
-int i915_oa_n_builtin_metric_sets_kblgt2 = 18;
-
-static const struct i915_oa_reg b_counter_config_render_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_render_basic[] = {
- { _MMIO(0x9888), 0x166c01e0 },
- { _MMIO(0x9888), 0x12170280 },
- { _MMIO(0x9888), 0x12370280 },
- { _MMIO(0x9888), 0x11930317 },
- { _MMIO(0x9888), 0x159303df },
- { _MMIO(0x9888), 0x3f900003 },
- { _MMIO(0x9888), 0x1a4e0080 },
- { _MMIO(0x9888), 0x0a6c0053 },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x0a1b4000 },
- { _MMIO(0x9888), 0x1c1c0001 },
- { _MMIO(0x9888), 0x002f1000 },
- { _MMIO(0x9888), 0x042f1000 },
- { _MMIO(0x9888), 0x004c4000 },
- { _MMIO(0x9888), 0x0a4c8400 },
- { _MMIO(0x9888), 0x000d2000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0d2000 },
- { _MMIO(0x9888), 0x0c0f0400 },
- { _MMIO(0x9888), 0x0e0f6600 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x162c2200 },
- { _MMIO(0x9888), 0x062d8000 },
- { _MMIO(0x9888), 0x082d8000 },
- { _MMIO(0x9888), 0x00133000 },
- { _MMIO(0x9888), 0x08133000 },
- { _MMIO(0x9888), 0x00170020 },
- { _MMIO(0x9888), 0x08170021 },
- { _MMIO(0x9888), 0x10170000 },
- { _MMIO(0x9888), 0x0633c000 },
- { _MMIO(0x9888), 0x0833c000 },
- { _MMIO(0x9888), 0x06370800 },
- { _MMIO(0x9888), 0x08370840 },
- { _MMIO(0x9888), 0x10370000 },
- { _MMIO(0x9888), 0x0d933031 },
- { _MMIO(0x9888), 0x0f933e3f },
- { _MMIO(0x9888), 0x01933d00 },
- { _MMIO(0x9888), 0x0393073c },
- { _MMIO(0x9888), 0x0593000e },
- { _MMIO(0x9888), 0x1d930000 },
- { _MMIO(0x9888), 0x19930000 },
- { _MMIO(0x9888), 0x1b930000 },
- { _MMIO(0x9888), 0x1d900157 },
- { _MMIO(0x9888), 0x1f900158 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x2b908000 },
- { _MMIO(0x9888), 0x2d908000 },
- { _MMIO(0x9888), 0x2f908000 },
- { _MMIO(0x9888), 0x31908000 },
- { _MMIO(0x9888), 0x15908000 },
- { _MMIO(0x9888), 0x17908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1190001f },
- { _MMIO(0x9888), 0x51904400 },
- { _MMIO(0x9888), 0x41900020 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900c21 },
- { _MMIO(0x9888), 0x47900061 },
- { _MMIO(0x9888), 0x57904440 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x59900004 },
- { _MMIO(0x9888), 0x43900000 },
- { _MMIO(0x9888), 0x53904444 },
-};
-
-static int
-get_render_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_render_basic;
- lens[n] = ARRAY_SIZE(mux_config_render_basic);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_basic[] = {
- { _MMIO(0x9888), 0x104f00e0 },
- { _MMIO(0x9888), 0x124f1c00 },
- { _MMIO(0x9888), 0x106c00e0 },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f900003 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x1a4e0820 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x064f0900 },
- { _MMIO(0x9888), 0x084f0032 },
- { _MMIO(0x9888), 0x0a4f1891 },
- { _MMIO(0x9888), 0x0c4f0e00 },
- { _MMIO(0x9888), 0x0e4f003c },
- { _MMIO(0x9888), 0x004f0d80 },
- { _MMIO(0x9888), 0x024f003b },
- { _MMIO(0x9888), 0x006c0002 },
- { _MMIO(0x9888), 0x086c0100 },
- { _MMIO(0x9888), 0x0c6c000c },
- { _MMIO(0x9888), 0x0e6c0b00 },
- { _MMIO(0x9888), 0x186c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x001b4000 },
- { _MMIO(0x9888), 0x081b8000 },
- { _MMIO(0x9888), 0x0c1b4000 },
- { _MMIO(0x9888), 0x0e1b8000 },
- { _MMIO(0x9888), 0x101c8000 },
- { _MMIO(0x9888), 0x1a1c8000 },
- { _MMIO(0x9888), 0x1c1c0024 },
- { _MMIO(0x9888), 0x065b8000 },
- { _MMIO(0x9888), 0x085b4000 },
- { _MMIO(0x9888), 0x0a5bc000 },
- { _MMIO(0x9888), 0x0c5b8000 },
- { _MMIO(0x9888), 0x0e5b4000 },
- { _MMIO(0x9888), 0x005b8000 },
- { _MMIO(0x9888), 0x025b4000 },
- { _MMIO(0x9888), 0x1a5c6000 },
- { _MMIO(0x9888), 0x1c5c001b },
- { _MMIO(0x9888), 0x125c8000 },
- { _MMIO(0x9888), 0x145c8000 },
- { _MMIO(0x9888), 0x004c8000 },
- { _MMIO(0x9888), 0x0a4c2000 },
- { _MMIO(0x9888), 0x0c4c0208 },
- { _MMIO(0x9888), 0x000da000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x020d2000 },
- { _MMIO(0x9888), 0x0c0f5400 },
- { _MMIO(0x9888), 0x0e0f5500 },
- { _MMIO(0x9888), 0x100f0155 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2cc000 },
- { _MMIO(0x9888), 0x162cfb00 },
- { _MMIO(0x9888), 0x182c00be },
- { _MMIO(0x9888), 0x022cc000 },
- { _MMIO(0x9888), 0x042cc000 },
- { _MMIO(0x9888), 0x19900157 },
- { _MMIO(0x9888), 0x1b900158 },
- { _MMIO(0x9888), 0x1d900105 },
- { _MMIO(0x9888), 0x1f900103 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x11900fff },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900800 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900821 },
- { _MMIO(0x9888), 0x47900802 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900802 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900002 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900422 },
- { _MMIO(0x9888), 0x53904444 },
-};
-
-static int
-get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_basic;
- lens[n] = ARRAY_SIZE(mux_config_compute_basic);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007ffea },
- { _MMIO(0x2774), 0x00007ffc },
- { _MMIO(0x2778), 0x0007affa },
- { _MMIO(0x277c), 0x0000f5fd },
- { _MMIO(0x2780), 0x00079ffa },
- { _MMIO(0x2784), 0x0000f3fb },
- { _MMIO(0x2788), 0x0007bf7a },
- { _MMIO(0x278c), 0x0000f7e7 },
- { _MMIO(0x2790), 0x0007fefa },
- { _MMIO(0x2794), 0x0000f7cf },
- { _MMIO(0x2798), 0x00077ffa },
- { _MMIO(0x279c), 0x0000efdf },
- { _MMIO(0x27a0), 0x0006fffa },
- { _MMIO(0x27a4), 0x0000cfbf },
- { _MMIO(0x27a8), 0x0003fffa },
- { _MMIO(0x27ac), 0x00005f7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
- { _MMIO(0x9888), 0x0c0e001f },
- { _MMIO(0x9888), 0x0a0f0000 },
- { _MMIO(0x9888), 0x10116800 },
- { _MMIO(0x9888), 0x178a03e0 },
- { _MMIO(0x9888), 0x11824c00 },
- { _MMIO(0x9888), 0x11830020 },
- { _MMIO(0x9888), 0x13840020 },
- { _MMIO(0x9888), 0x11850019 },
- { _MMIO(0x9888), 0x11860007 },
- { _MMIO(0x9888), 0x01870c40 },
- { _MMIO(0x9888), 0x17880000 },
- { _MMIO(0x9888), 0x022f4000 },
- { _MMIO(0x9888), 0x0a4c0040 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x040d4000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x020e5400 },
- { _MMIO(0x9888), 0x000e0000 },
- { _MMIO(0x9888), 0x080f0040 },
- { _MMIO(0x9888), 0x000f0000 },
- { _MMIO(0x9888), 0x100f0000 },
- { _MMIO(0x9888), 0x0e0f0040 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x06104000 },
- { _MMIO(0x9888), 0x06110012 },
- { _MMIO(0x9888), 0x06131000 },
- { _MMIO(0x9888), 0x01898000 },
- { _MMIO(0x9888), 0x0d890100 },
- { _MMIO(0x9888), 0x03898000 },
- { _MMIO(0x9888), 0x09808000 },
- { _MMIO(0x9888), 0x0b808000 },
- { _MMIO(0x9888), 0x0380c000 },
- { _MMIO(0x9888), 0x0f8a0075 },
- { _MMIO(0x9888), 0x1d8a0000 },
- { _MMIO(0x9888), 0x118a8000 },
- { _MMIO(0x9888), 0x1b8a4000 },
- { _MMIO(0x9888), 0x138a8000 },
- { _MMIO(0x9888), 0x1d81a000 },
- { _MMIO(0x9888), 0x15818000 },
- { _MMIO(0x9888), 0x17818000 },
- { _MMIO(0x9888), 0x0b820030 },
- { _MMIO(0x9888), 0x07828000 },
- { _MMIO(0x9888), 0x0d824000 },
- { _MMIO(0x9888), 0x0f828000 },
- { _MMIO(0x9888), 0x05824000 },
- { _MMIO(0x9888), 0x0d830003 },
- { _MMIO(0x9888), 0x0583000c },
- { _MMIO(0x9888), 0x09830000 },
- { _MMIO(0x9888), 0x03838000 },
- { _MMIO(0x9888), 0x07838000 },
- { _MMIO(0x9888), 0x0b840980 },
- { _MMIO(0x9888), 0x03844d80 },
- { _MMIO(0x9888), 0x11840000 },
- { _MMIO(0x9888), 0x09848000 },
- { _MMIO(0x9888), 0x09850080 },
- { _MMIO(0x9888), 0x03850003 },
- { _MMIO(0x9888), 0x01850000 },
- { _MMIO(0x9888), 0x07860000 },
- { _MMIO(0x9888), 0x0f860400 },
- { _MMIO(0x9888), 0x09870032 },
- { _MMIO(0x9888), 0x01888052 },
- { _MMIO(0x9888), 0x11880000 },
- { _MMIO(0x9888), 0x09884000 },
- { _MMIO(0x9888), 0x1b931001 },
- { _MMIO(0x9888), 0x1d930001 },
- { _MMIO(0x9888), 0x19934000 },
- { _MMIO(0x9888), 0x1b958000 },
- { _MMIO(0x9888), 0x1d950094 },
- { _MMIO(0x9888), 0x19958000 },
- { _MMIO(0x9888), 0x09e58000 },
- { _MMIO(0x9888), 0x0be58000 },
- { _MMIO(0x9888), 0x03e5c000 },
- { _MMIO(0x9888), 0x0592c000 },
- { _MMIO(0x9888), 0x0b928000 },
- { _MMIO(0x9888), 0x0d924000 },
- { _MMIO(0x9888), 0x0f924000 },
- { _MMIO(0x9888), 0x11928000 },
- { _MMIO(0x9888), 0x1392c000 },
- { _MMIO(0x9888), 0x09924000 },
- { _MMIO(0x9888), 0x01985000 },
- { _MMIO(0x9888), 0x07988000 },
- { _MMIO(0x9888), 0x09981000 },
- { _MMIO(0x9888), 0x0b982000 },
- { _MMIO(0x9888), 0x0d982000 },
- { _MMIO(0x9888), 0x0f989000 },
- { _MMIO(0x9888), 0x05982000 },
- { _MMIO(0x9888), 0x13904000 },
- { _MMIO(0x9888), 0x21904000 },
- { _MMIO(0x9888), 0x23904000 },
- { _MMIO(0x9888), 0x25908000 },
- { _MMIO(0x9888), 0x27904000 },
- { _MMIO(0x9888), 0x29908000 },
- { _MMIO(0x9888), 0x2b904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1190c080 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900440 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900400 },
- { _MMIO(0x9888), 0x47900c21 },
- { _MMIO(0x9888), 0x57900400 },
- { _MMIO(0x9888), 0x49900042 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900024 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900841 },
- { _MMIO(0x9888), 0x53900400 },
-};
-
-static int
-get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_render_pipe_profile;
- lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_reads[] = {
- { _MMIO(0x272c), 0xffffffff },
- { _MMIO(0x2728), 0xffffffff },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x271c), 0xffffffff },
- { _MMIO(0x2718), 0xffffffff },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f872 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_reads[] = {
- { _MMIO(0x9888), 0x11810c00 },
- { _MMIO(0x9888), 0x1381001a },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f900064 },
- { _MMIO(0x9888), 0x03811300 },
- { _MMIO(0x9888), 0x05811b12 },
- { _MMIO(0x9888), 0x0781001a },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x17810000 },
- { _MMIO(0x9888), 0x19810000 },
- { _MMIO(0x9888), 0x1b810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930055 },
- { _MMIO(0x9888), 0x03e58000 },
- { _MMIO(0x9888), 0x05e5c000 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x13900150 },
- { _MMIO(0x9888), 0x21900151 },
- { _MMIO(0x9888), 0x23900152 },
- { _MMIO(0x9888), 0x25900153 },
- { _MMIO(0x9888), 0x27900154 },
- { _MMIO(0x9888), 0x29900155 },
- { _MMIO(0x9888), 0x2b900156 },
- { _MMIO(0x9888), 0x2d900157 },
- { _MMIO(0x9888), 0x2f90015f },
- { _MMIO(0x9888), 0x31900105 },
- { _MMIO(0x9888), 0x15900103 },
- { _MMIO(0x9888), 0x17900101 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c60 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900c00 },
- { _MMIO(0x9888), 0x47900c63 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900c63 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900063 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static int
-get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_reads;
- lens[n] = ARRAY_SIZE(mux_config_memory_reads);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_writes[] = {
- { _MMIO(0x272c), 0xffffffff },
- { _MMIO(0x2728), 0xffffffff },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x271c), 0xffffffff },
- { _MMIO(0x2718), 0xffffffff },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f822 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_writes[] = {
- { _MMIO(0x9888), 0x11810c00 },
- { _MMIO(0x9888), 0x1381001a },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f901000 },
- { _MMIO(0x9888), 0x03811300 },
- { _MMIO(0x9888), 0x05811b12 },
- { _MMIO(0x9888), 0x0781001a },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x17810000 },
- { _MMIO(0x9888), 0x19810000 },
- { _MMIO(0x9888), 0x1b810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930055 },
- { _MMIO(0x9888), 0x03e58000 },
- { _MMIO(0x9888), 0x05e5c000 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x13900160 },
- { _MMIO(0x9888), 0x21900161 },
- { _MMIO(0x9888), 0x23900162 },
- { _MMIO(0x9888), 0x25900163 },
- { _MMIO(0x9888), 0x27900164 },
- { _MMIO(0x9888), 0x29900165 },
- { _MMIO(0x9888), 0x2b900166 },
- { _MMIO(0x9888), 0x2d900167 },
- { _MMIO(0x9888), 0x2f900150 },
- { _MMIO(0x9888), 0x31900105 },
- { _MMIO(0x9888), 0x15900103 },
- { _MMIO(0x9888), 0x17900101 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c60 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900c00 },
- { _MMIO(0x9888), 0x47900c63 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900c63 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900063 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static int
-get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_writes;
- lens[n] = ARRAY_SIZE(mux_config_memory_writes);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extended[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fc2a },
- { _MMIO(0x2774), 0x0000bf00 },
- { _MMIO(0x2778), 0x0007fc6a },
- { _MMIO(0x277c), 0x0000bf00 },
- { _MMIO(0x2780), 0x0007fc92 },
- { _MMIO(0x2784), 0x0000bf00 },
- { _MMIO(0x2788), 0x0007fca2 },
- { _MMIO(0x278c), 0x0000bf00 },
- { _MMIO(0x2790), 0x0007fc32 },
- { _MMIO(0x2794), 0x0000bf00 },
- { _MMIO(0x2798), 0x0007fc9a },
- { _MMIO(0x279c), 0x0000bf00 },
- { _MMIO(0x27a0), 0x0007fe6a },
- { _MMIO(0x27a4), 0x0000bf00 },
- { _MMIO(0x27a8), 0x0007fe7a },
- { _MMIO(0x27ac), 0x0000bf00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extended[] = {
- { _MMIO(0x9888), 0x106c00e0 },
- { _MMIO(0x9888), 0x141c8160 },
- { _MMIO(0x9888), 0x161c8015 },
- { _MMIO(0x9888), 0x181c0120 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x0e4e8000 },
- { _MMIO(0x9888), 0x184e8000 },
- { _MMIO(0x9888), 0x1a4eaaa0 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x024e8000 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x0e6c0b01 },
- { _MMIO(0x9888), 0x006c0200 },
- { _MMIO(0x9888), 0x026c000c },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x0e1bc000 },
- { _MMIO(0x9888), 0x001b8000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x001c0041 },
- { _MMIO(0x9888), 0x061c4200 },
- { _MMIO(0x9888), 0x081c4443 },
- { _MMIO(0x9888), 0x0a1c4645 },
- { _MMIO(0x9888), 0x0c1c7647 },
- { _MMIO(0x9888), 0x041c7357 },
- { _MMIO(0x9888), 0x1c1c0030 },
- { _MMIO(0x9888), 0x101c0000 },
- { _MMIO(0x9888), 0x1a1c0000 },
- { _MMIO(0x9888), 0x121c8000 },
- { _MMIO(0x9888), 0x004c8000 },
- { _MMIO(0x9888), 0x0a4caa2a },
- { _MMIO(0x9888), 0x0c4c02aa },
- { _MMIO(0x9888), 0x084ca000 },
- { _MMIO(0x9888), 0x000da000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x0c0f5400 },
- { _MMIO(0x9888), 0x0e0f5515 },
- { _MMIO(0x9888), 0x100f0155 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2c8000 },
- { _MMIO(0x9888), 0x162caa00 },
- { _MMIO(0x9888), 0x182c00aa },
- { _MMIO(0x9888), 0x022c8000 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x11907fff },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900040 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900802 },
- { _MMIO(0x9888), 0x47900842 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900842 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900800 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static int
-get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_extended;
- lens[n] = ARRAY_SIZE(mux_config_compute_extended);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x30800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fffa },
- { _MMIO(0x2774), 0x0000fefe },
- { _MMIO(0x2778), 0x0007fffa },
- { _MMIO(0x277c), 0x0000fefd },
- { _MMIO(0x2790), 0x0007fffa },
- { _MMIO(0x2794), 0x0000fbef },
- { _MMIO(0x2798), 0x0007fffa },
- { _MMIO(0x279c), 0x0000fbdf },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00101100 },
- { _MMIO(0xe45c), 0x00201200 },
- { _MMIO(0xe55c), 0x00301300 },
- { _MMIO(0xe65c), 0x00401400 },
-};
-
-static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
- { _MMIO(0x9888), 0x166c0760 },
- { _MMIO(0x9888), 0x1593001e },
- { _MMIO(0x9888), 0x3f900003 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x0e4e8000 },
- { _MMIO(0x9888), 0x184e8000 },
- { _MMIO(0x9888), 0x1a4e8020 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x006c0051 },
- { _MMIO(0x9888), 0x066c5000 },
- { _MMIO(0x9888), 0x086c5c5d },
- { _MMIO(0x9888), 0x0e6c5e5f },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x186c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x001b4000 },
- { _MMIO(0x9888), 0x061b8000 },
- { _MMIO(0x9888), 0x081bc000 },
- { _MMIO(0x9888), 0x0e1bc000 },
- { _MMIO(0x9888), 0x101c8000 },
- { _MMIO(0x9888), 0x1a1ce000 },
- { _MMIO(0x9888), 0x1c1c0030 },
- { _MMIO(0x9888), 0x004c8000 },
- { _MMIO(0x9888), 0x0a4c2a00 },
- { _MMIO(0x9888), 0x0c4c0280 },
- { _MMIO(0x9888), 0x000d2000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x0c0f0400 },
- { _MMIO(0x9888), 0x0e0f1500 },
- { _MMIO(0x9888), 0x100f0140 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2c8000 },
- { _MMIO(0x9888), 0x162c0a00 },
- { _MMIO(0x9888), 0x182c00a0 },
- { _MMIO(0x9888), 0x03933300 },
- { _MMIO(0x9888), 0x05930032 },
- { _MMIO(0x9888), 0x11930000 },
- { _MMIO(0x9888), 0x1b930000 },
- { _MMIO(0x9888), 0x1d900157 },
- { _MMIO(0x9888), 0x1f900158 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1190030f },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900000 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900021 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x53904444 },
- { _MMIO(0x9888), 0x43900000 },
-};
-
-static int
-get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_l3_cache;
- lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x10800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000fdff },
-};
-
-static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
- { _MMIO(0x9888), 0x104f0232 },
- { _MMIO(0x9888), 0x124f4640 },
- { _MMIO(0x9888), 0x106c0232 },
- { _MMIO(0x9888), 0x11834400 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x0c4e8000 },
- { _MMIO(0x9888), 0x004f1880 },
- { _MMIO(0x9888), 0x024f08bb },
- { _MMIO(0x9888), 0x044f001b },
- { _MMIO(0x9888), 0x046c0100 },
- { _MMIO(0x9888), 0x066c000b },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x041b8000 },
- { _MMIO(0x9888), 0x061b4000 },
- { _MMIO(0x9888), 0x1a1c1800 },
- { _MMIO(0x9888), 0x005b8000 },
- { _MMIO(0x9888), 0x025bc000 },
- { _MMIO(0x9888), 0x045b4000 },
- { _MMIO(0x9888), 0x125c8000 },
- { _MMIO(0x9888), 0x145c8000 },
- { _MMIO(0x9888), 0x165c8000 },
- { _MMIO(0x9888), 0x185c8000 },
- { _MMIO(0x9888), 0x0a4c00a0 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f5000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x022cc000 },
- { _MMIO(0x9888), 0x042cc000 },
- { _MMIO(0x9888), 0x062cc000 },
- { _MMIO(0x9888), 0x082cc000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x0f828000 },
- { _MMIO(0x9888), 0x0f8305c0 },
- { _MMIO(0x9888), 0x09830000 },
- { _MMIO(0x9888), 0x07830000 },
- { _MMIO(0x9888), 0x1d950080 },
- { _MMIO(0x9888), 0x13928000 },
- { _MMIO(0x9888), 0x0f988000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b900040 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900800 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_hdc_and_sf;
- lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00014002 },
- { _MMIO(0x277c), 0x0000c3ff },
- { _MMIO(0x2780), 0x00010002 },
- { _MMIO(0x2784), 0x0000c7ff },
- { _MMIO(0x2788), 0x00004002 },
- { _MMIO(0x278c), 0x0000d3ff },
- { _MMIO(0x2790), 0x00100700 },
- { _MMIO(0x2794), 0x0000ff1f },
- { _MMIO(0x2798), 0x00001402 },
- { _MMIO(0x279c), 0x0000fc3f },
- { _MMIO(0x27a0), 0x00001002 },
- { _MMIO(0x27a4), 0x0000fc7f },
- { _MMIO(0x27a8), 0x00000402 },
- { _MMIO(0x27ac), 0x0000fd3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_1[] = {
- { _MMIO(0x9888), 0x126c7b40 },
- { _MMIO(0x9888), 0x166c0020 },
- { _MMIO(0x9888), 0x0a603444 },
- { _MMIO(0x9888), 0x0a613400 },
- { _MMIO(0x9888), 0x1a4ea800 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x024e8000 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x064f4000 },
- { _MMIO(0x9888), 0x0c6c5327 },
- { _MMIO(0x9888), 0x0e6c5425 },
- { _MMIO(0x9888), 0x006c2a00 },
- { _MMIO(0x9888), 0x026c285b },
- { _MMIO(0x9888), 0x046c005c },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x1a6c0800 },
- { _MMIO(0x9888), 0x0c1bc000 },
- { _MMIO(0x9888), 0x0e1bc000 },
- { _MMIO(0x9888), 0x001b8000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x1c1c003c },
- { _MMIO(0x9888), 0x121c8000 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x161c8000 },
- { _MMIO(0x9888), 0x181c8000 },
- { _MMIO(0x9888), 0x1a1c0800 },
- { _MMIO(0x9888), 0x065b4000 },
- { _MMIO(0x9888), 0x1a5c1000 },
- { _MMIO(0x9888), 0x10600000 },
- { _MMIO(0x9888), 0x04600000 },
- { _MMIO(0x9888), 0x0c610044 },
- { _MMIO(0x9888), 0x10610000 },
- { _MMIO(0x9888), 0x06610000 },
- { _MMIO(0x9888), 0x0c4c02a8 },
- { _MMIO(0x9888), 0x084ca000 },
- { _MMIO(0x9888), 0x0a4c002a },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f0154 },
- { _MMIO(0x9888), 0x0c0f5000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x182c00aa },
- { _MMIO(0x9888), 0x022c8000 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2cc000 },
- { _MMIO(0x9888), 0x1190ffc0 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900420 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900021 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900400 },
- { _MMIO(0x9888), 0x43900421 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900040 },
-};
-
-static int
-get_l3_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_1;
- lens[n] = ARRAY_SIZE(mux_config_l3_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00028002 },
- { _MMIO(0x277c), 0x000087ff },
- { _MMIO(0x2780), 0x00020002 },
- { _MMIO(0x2784), 0x00008fff },
- { _MMIO(0x2788), 0x00008002 },
- { _MMIO(0x278c), 0x0000a7ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_2[] = {
- { _MMIO(0x9888), 0x126c02e0 },
- { _MMIO(0x9888), 0x146c0001 },
- { _MMIO(0x9888), 0x0a623400 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x064f4000 },
- { _MMIO(0x9888), 0x026c3324 },
- { _MMIO(0x9888), 0x046c3422 },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x161c8000 },
- { _MMIO(0x9888), 0x181c8000 },
- { _MMIO(0x9888), 0x1a1c0800 },
- { _MMIO(0x9888), 0x065b4000 },
- { _MMIO(0x9888), 0x1a5c1000 },
- { _MMIO(0x9888), 0x06614000 },
- { _MMIO(0x9888), 0x0c620044 },
- { _MMIO(0x9888), 0x10620000 },
- { _MMIO(0x9888), 0x06620000 },
- { _MMIO(0x9888), 0x084c8000 },
- { _MMIO(0x9888), 0x0a4c002a },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f4000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2cc000 },
- { _MMIO(0x9888), 0x1190f800 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x43900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_l3_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_2;
- lens[n] = ARRAY_SIZE(mux_config_l3_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_3[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00028002 },
- { _MMIO(0x277c), 0x000087ff },
- { _MMIO(0x2780), 0x00020002 },
- { _MMIO(0x2784), 0x00008fff },
- { _MMIO(0x2788), 0x00008002 },
- { _MMIO(0x278c), 0x0000a7ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_3[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_3[] = {
- { _MMIO(0x9888), 0x126c4e80 },
- { _MMIO(0x9888), 0x146c0000 },
- { _MMIO(0x9888), 0x0a633400 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x0c4e8000 },
- { _MMIO(0x9888), 0x026c3321 },
- { _MMIO(0x9888), 0x046c342f },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1a6c2000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x061b4000 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x161c8000 },
- { _MMIO(0x9888), 0x181c8000 },
- { _MMIO(0x9888), 0x1a1c1800 },
- { _MMIO(0x9888), 0x06604000 },
- { _MMIO(0x9888), 0x0c630044 },
- { _MMIO(0x9888), 0x10630000 },
- { _MMIO(0x9888), 0x06630000 },
- { _MMIO(0x9888), 0x084c8000 },
- { _MMIO(0x9888), 0x0a4c00aa },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f4000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x1190f800 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900002 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_l3_3_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_3;
- lens[n] = ARRAY_SIZE(mux_config_l3_3);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x30800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000efff },
- { _MMIO(0x2778), 0x00006000 },
- { _MMIO(0x277c), 0x0000f3ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x9888), 0x102f3800 },
- { _MMIO(0x9888), 0x144d0500 },
- { _MMIO(0x9888), 0x120d03c0 },
- { _MMIO(0x9888), 0x140d03cf },
- { _MMIO(0x9888), 0x0c0f0004 },
- { _MMIO(0x9888), 0x0c4e4000 },
- { _MMIO(0x9888), 0x042f0480 },
- { _MMIO(0x9888), 0x082f0000 },
- { _MMIO(0x9888), 0x022f0000 },
- { _MMIO(0x9888), 0x0a4c0090 },
- { _MMIO(0x9888), 0x064d0027 },
- { _MMIO(0x9888), 0x004d0000 },
- { _MMIO(0x9888), 0x000d0d40 },
- { _MMIO(0x9888), 0x020d803f },
- { _MMIO(0x9888), 0x040d8023 },
- { _MMIO(0x9888), 0x100d0000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x020f0010 },
- { _MMIO(0x9888), 0x000f0000 },
- { _MMIO(0x9888), 0x0e0f0050 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41901400 },
- { _MMIO(0x9888), 0x43901485 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900001 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_rasterizer_and_pixel_backend;
- lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_sampler[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x70800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x0000c000 },
- { _MMIO(0x2774), 0x0000e7ff },
- { _MMIO(0x2778), 0x00003000 },
- { _MMIO(0x277c), 0x0000f9ff },
- { _MMIO(0x2780), 0x00000c00 },
- { _MMIO(0x2784), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_sampler[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_sampler[] = {
- { _MMIO(0x9888), 0x14152c00 },
- { _MMIO(0x9888), 0x16150005 },
- { _MMIO(0x9888), 0x121600a0 },
- { _MMIO(0x9888), 0x14352c00 },
- { _MMIO(0x9888), 0x16350005 },
- { _MMIO(0x9888), 0x123600a0 },
- { _MMIO(0x9888), 0x14552c00 },
- { _MMIO(0x9888), 0x16550005 },
- { _MMIO(0x9888), 0x125600a0 },
- { _MMIO(0x9888), 0x062f6000 },
- { _MMIO(0x9888), 0x022f2000 },
- { _MMIO(0x9888), 0x0c4c0050 },
- { _MMIO(0x9888), 0x0a4c0010 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f0350 },
- { _MMIO(0x9888), 0x0c0fb000 },
- { _MMIO(0x9888), 0x0e0f00da },
- { _MMIO(0x9888), 0x182c0028 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x022dc000 },
- { _MMIO(0x9888), 0x042d4000 },
- { _MMIO(0x9888), 0x0c138000 },
- { _MMIO(0x9888), 0x0e132000 },
- { _MMIO(0x9888), 0x0413c000 },
- { _MMIO(0x9888), 0x1c140018 },
- { _MMIO(0x9888), 0x0c157000 },
- { _MMIO(0x9888), 0x0e150078 },
- { _MMIO(0x9888), 0x10150000 },
- { _MMIO(0x9888), 0x04162180 },
- { _MMIO(0x9888), 0x02160000 },
- { _MMIO(0x9888), 0x04174000 },
- { _MMIO(0x9888), 0x0233a000 },
- { _MMIO(0x9888), 0x04333000 },
- { _MMIO(0x9888), 0x14348000 },
- { _MMIO(0x9888), 0x16348000 },
- { _MMIO(0x9888), 0x02357870 },
- { _MMIO(0x9888), 0x10350000 },
- { _MMIO(0x9888), 0x04360043 },
- { _MMIO(0x9888), 0x02360000 },
- { _MMIO(0x9888), 0x04371000 },
- { _MMIO(0x9888), 0x0e538000 },
- { _MMIO(0x9888), 0x00538000 },
- { _MMIO(0x9888), 0x06533000 },
- { _MMIO(0x9888), 0x1c540020 },
- { _MMIO(0x9888), 0x12548000 },
- { _MMIO(0x9888), 0x0e557000 },
- { _MMIO(0x9888), 0x00557800 },
- { _MMIO(0x9888), 0x10550000 },
- { _MMIO(0x9888), 0x06560043 },
- { _MMIO(0x9888), 0x02560000 },
- { _MMIO(0x9888), 0x06571000 },
- { _MMIO(0x9888), 0x1190ff80 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900060 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c00 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900060 },
-};
-
-static int
-get_sampler_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_sampler;
- lens[n] = ARRAY_SIZE(mux_config_sampler);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x00007fff },
- { _MMIO(0x2778), 0x00000000 },
- { _MMIO(0x277c), 0x00009fff },
- { _MMIO(0x2780), 0x00000002 },
- { _MMIO(0x2784), 0x0000efff },
- { _MMIO(0x2788), 0x00000000 },
- { _MMIO(0x278c), 0x0000f3ff },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000fdff },
- { _MMIO(0x2798), 0x00000000 },
- { _MMIO(0x279c), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_1[] = {
- { _MMIO(0x9888), 0x12120000 },
- { _MMIO(0x9888), 0x12320000 },
- { _MMIO(0x9888), 0x12520000 },
- { _MMIO(0x9888), 0x002f8000 },
- { _MMIO(0x9888), 0x022f3000 },
- { _MMIO(0x9888), 0x0a4c0015 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f03a0 },
- { _MMIO(0x9888), 0x0c0ff000 },
- { _MMIO(0x9888), 0x0e0f0095 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2d8000 },
- { _MMIO(0x9888), 0x0e2d4000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x02108000 },
- { _MMIO(0x9888), 0x0410c000 },
- { _MMIO(0x9888), 0x02118000 },
- { _MMIO(0x9888), 0x0411c000 },
- { _MMIO(0x9888), 0x02121880 },
- { _MMIO(0x9888), 0x041219b5 },
- { _MMIO(0x9888), 0x00120000 },
- { _MMIO(0x9888), 0x02134000 },
- { _MMIO(0x9888), 0x04135000 },
- { _MMIO(0x9888), 0x0c308000 },
- { _MMIO(0x9888), 0x0e304000 },
- { _MMIO(0x9888), 0x06304000 },
- { _MMIO(0x9888), 0x0c318000 },
- { _MMIO(0x9888), 0x0e314000 },
- { _MMIO(0x9888), 0x06314000 },
- { _MMIO(0x9888), 0x0c321a80 },
- { _MMIO(0x9888), 0x0e320033 },
- { _MMIO(0x9888), 0x06320031 },
- { _MMIO(0x9888), 0x00320000 },
- { _MMIO(0x9888), 0x0c334000 },
- { _MMIO(0x9888), 0x0e331000 },
- { _MMIO(0x9888), 0x06331000 },
- { _MMIO(0x9888), 0x0e508000 },
- { _MMIO(0x9888), 0x00508000 },
- { _MMIO(0x9888), 0x02504000 },
- { _MMIO(0x9888), 0x0e518000 },
- { _MMIO(0x9888), 0x00518000 },
- { _MMIO(0x9888), 0x02514000 },
- { _MMIO(0x9888), 0x0e521880 },
- { _MMIO(0x9888), 0x00521a80 },
- { _MMIO(0x9888), 0x02520033 },
- { _MMIO(0x9888), 0x0e534000 },
- { _MMIO(0x9888), 0x00534000 },
- { _MMIO(0x9888), 0x02531000 },
- { _MMIO(0x9888), 0x1190ff80 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900800 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900062 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c00 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900040 },
-};
-
-static int
-get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_1;
- lens[n] = ARRAY_SIZE(mux_config_tdl_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_2[] = {
- { _MMIO(0x9888), 0x12124d60 },
- { _MMIO(0x9888), 0x12322e60 },
- { _MMIO(0x9888), 0x12524d60 },
- { _MMIO(0x9888), 0x022f3000 },
- { _MMIO(0x9888), 0x0a4c0014 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0fe000 },
- { _MMIO(0x9888), 0x0e0f0097 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x002d8000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x0410c000 },
- { _MMIO(0x9888), 0x0411c000 },
- { _MMIO(0x9888), 0x04121fb7 },
- { _MMIO(0x9888), 0x00120000 },
- { _MMIO(0x9888), 0x04135000 },
- { _MMIO(0x9888), 0x00308000 },
- { _MMIO(0x9888), 0x06304000 },
- { _MMIO(0x9888), 0x00318000 },
- { _MMIO(0x9888), 0x06314000 },
- { _MMIO(0x9888), 0x00321b80 },
- { _MMIO(0x9888), 0x0632003f },
- { _MMIO(0x9888), 0x00334000 },
- { _MMIO(0x9888), 0x06331000 },
- { _MMIO(0x9888), 0x0250c000 },
- { _MMIO(0x9888), 0x0251c000 },
- { _MMIO(0x9888), 0x02521fb7 },
- { _MMIO(0x9888), 0x00520000 },
- { _MMIO(0x9888), 0x02535000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900800 },
- { _MMIO(0x9888), 0x43900063 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900040 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_2;
- lens[n] = ARRAY_SIZE(mux_config_tdl_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extra[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
- { _MMIO(0xe458), 0x00001000 },
- { _MMIO(0xe558), 0x00003002 },
- { _MMIO(0xe658), 0x00005004 },
- { _MMIO(0xe758), 0x00011010 },
- { _MMIO(0xe45c), 0x00050012 },
- { _MMIO(0xe55c), 0x00052051 },
- { _MMIO(0xe65c), 0x00000008 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extra[] = {
- { _MMIO(0x9888), 0x121203e0 },
- { _MMIO(0x9888), 0x123203e0 },
- { _MMIO(0x9888), 0x125203e0 },
- { _MMIO(0x9888), 0x022f4000 },
- { _MMIO(0x9888), 0x0a4c0040 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0e0f006c },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x042d8000 },
- { _MMIO(0x9888), 0x06104000 },
- { _MMIO(0x9888), 0x06114000 },
- { _MMIO(0x9888), 0x06120033 },
- { _MMIO(0x9888), 0x00120000 },
- { _MMIO(0x9888), 0x06131000 },
- { _MMIO(0x9888), 0x04308000 },
- { _MMIO(0x9888), 0x04318000 },
- { _MMIO(0x9888), 0x04321980 },
- { _MMIO(0x9888), 0x00320000 },
- { _MMIO(0x9888), 0x04334000 },
- { _MMIO(0x9888), 0x04504000 },
- { _MMIO(0x9888), 0x04514000 },
- { _MMIO(0x9888), 0x04520033 },
- { _MMIO(0x9888), 0x00520000 },
- { _MMIO(0x9888), 0x04531000 },
- { _MMIO(0x9888), 0x1190e000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43900c00 },
- { _MMIO(0x9888), 0x45900002 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_extra;
- lens[n] = ARRAY_SIZE(mux_config_compute_extra);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_vme_pipe[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00100030 },
- { _MMIO(0x2774), 0x0000fff9 },
- { _MMIO(0x2778), 0x00000002 },
- { _MMIO(0x277c), 0x0000fffc },
- { _MMIO(0x2780), 0x00000002 },
- { _MMIO(0x2784), 0x0000fff3 },
- { _MMIO(0x2788), 0x00100180 },
- { _MMIO(0x278c), 0x0000ffcf },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00000002 },
- { _MMIO(0x279c), 0x0000ff3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_vme_pipe[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00008003 },
-};
-
-static const struct i915_oa_reg mux_config_vme_pipe[] = {
- { _MMIO(0x9888), 0x141a5800 },
- { _MMIO(0x9888), 0x161a00c0 },
- { _MMIO(0x9888), 0x12180240 },
- { _MMIO(0x9888), 0x14180002 },
- { _MMIO(0x9888), 0x143a5800 },
- { _MMIO(0x9888), 0x163a00c0 },
- { _MMIO(0x9888), 0x12380240 },
- { _MMIO(0x9888), 0x14380002 },
- { _MMIO(0x9888), 0x002f1000 },
- { _MMIO(0x9888), 0x022f8000 },
- { _MMIO(0x9888), 0x042f3000 },
- { _MMIO(0x9888), 0x004c4000 },
- { _MMIO(0x9888), 0x0a4c1500 },
- { _MMIO(0x9888), 0x000d2000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0c0f0400 },
- { _MMIO(0x9888), 0x0e0f9500 },
- { _MMIO(0x9888), 0x100f002a },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2c8000 },
- { _MMIO(0x9888), 0x162c0a00 },
- { _MMIO(0x9888), 0x0a2dc000 },
- { _MMIO(0x9888), 0x0c2dc000 },
- { _MMIO(0x9888), 0x04193000 },
- { _MMIO(0x9888), 0x081a28c1 },
- { _MMIO(0x9888), 0x001a0000 },
- { _MMIO(0x9888), 0x00133000 },
- { _MMIO(0x9888), 0x0613c000 },
- { _MMIO(0x9888), 0x0813f000 },
- { _MMIO(0x9888), 0x00172000 },
- { _MMIO(0x9888), 0x06178000 },
- { _MMIO(0x9888), 0x0817a000 },
- { _MMIO(0x9888), 0x00180037 },
- { _MMIO(0x9888), 0x06180940 },
- { _MMIO(0x9888), 0x08180000 },
- { _MMIO(0x9888), 0x02180000 },
- { _MMIO(0x9888), 0x04183000 },
- { _MMIO(0x9888), 0x06393000 },
- { _MMIO(0x9888), 0x0c3a28c1 },
- { _MMIO(0x9888), 0x003a0000 },
- { _MMIO(0x9888), 0x0a33f000 },
- { _MMIO(0x9888), 0x0c33f000 },
- { _MMIO(0x9888), 0x0a37a000 },
- { _MMIO(0x9888), 0x0c37a000 },
- { _MMIO(0x9888), 0x0a380977 },
- { _MMIO(0x9888), 0x08380000 },
- { _MMIO(0x9888), 0x04380000 },
- { _MMIO(0x9888), 0x06383000 },
- { _MMIO(0x9888), 0x119000ff },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900040 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900800 },
- { _MMIO(0x9888), 0x47901000 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900844 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_vme_pipe_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_vme_pipe;
- lens[n] = ARRAY_SIZE(mux_config_vme_pipe);
- n++;
-
- return n;
-}
-
static const struct i915_oa_reg b_counter_config_test_oa[] = {
{ _MMIO(0x2740), 0x00000000 },
{ _MMIO(0x2744), 0x00800000 },
@@ -1882,6 +60,7 @@ static const struct i915_oa_reg flex_eu_config_test_oa[] = {
};
static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x9840), 0x00000080 },
{ _MMIO(0x9888), 0x11810000 },
{ _MMIO(0x9888), 0x07810013 },
{ _MMIO(0x9888), 0x1f810000 },
@@ -1896,1096 +75,35 @@ static const struct i915_oa_reg mux_config_test_oa[] = {
{ _MMIO(0x9888), 0x33900000 },
};
-static int
-get_test_oa_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_test_oa;
- lens[n] = ARRAY_SIZE(mux_config_test_oa);
- n++;
-
- return n;
-}
-
-int i915_oa_select_metric_set_kblgt2(struct drm_i915_private *dev_priv)
-{
- dev_priv->perf.oa.n_mux_configs = 0;
- dev_priv->perf.oa.b_counter_regs = NULL;
- dev_priv->perf.oa.b_counter_regs_len = 0;
- dev_priv->perf.oa.flex_regs = NULL;
- dev_priv->perf.oa.flex_regs_len = 0;
-
- switch (dev_priv->perf.oa.metrics_set) {
- case METRIC_SET_ID_RENDER_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_render_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_basic);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_basic);
-
- return 0;
- case METRIC_SET_ID_RENDER_PIPE_PROFILE:
- dev_priv->perf.oa.n_mux_configs =
- get_render_pipe_profile_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_pipe_profile;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_pipe_profile);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_pipe_profile;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_pipe_profile);
-
- return 0;
- case METRIC_SET_ID_MEMORY_READS:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_reads_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_reads;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_reads);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_reads;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_reads);
-
- return 0;
- case METRIC_SET_ID_MEMORY_WRITES:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_writes_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_writes;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_writes);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_writes;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_writes);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTENDED:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extended_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extended;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extended);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extended;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extended);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_L3_CACHE:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_l3_cache_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_l3_cache;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_l3_cache);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_l3_cache;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_l3_cache);
-
- return 0;
- case METRIC_SET_ID_HDC_AND_SF:
- dev_priv->perf.oa.n_mux_configs =
- get_hdc_and_sf_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_hdc_and_sf;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_hdc_and_sf);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_hdc_and_sf;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_hdc_and_sf);
-
- return 0;
- case METRIC_SET_ID_L3_1:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_1);
-
- return 0;
- case METRIC_SET_ID_L3_2:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_2);
-
- return 0;
- case METRIC_SET_ID_L3_3:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_3_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_3;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_3);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_3;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_3);
-
- return 0;
- case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
- dev_priv->perf.oa.n_mux_configs =
- get_rasterizer_and_pixel_backend_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
-
- return 0;
- case METRIC_SET_ID_SAMPLER:
- dev_priv->perf.oa.n_mux_configs =
- get_sampler_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_sampler;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_sampler);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_sampler;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_sampler);
-
- return 0;
- case METRIC_SET_ID_TDL_1:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_1);
-
- return 0;
- case METRIC_SET_ID_TDL_2:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_2);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTRA:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extra_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extra;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extra);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extra;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extra);
-
- return 0;
- case METRIC_SET_ID_VME_PIPE:
- dev_priv->perf.oa.n_mux_configs =
- get_vme_pipe_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"VME_PIPE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_vme_pipe;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_vme_pipe);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_vme_pipe;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_vme_pipe);
-
- return 0;
- case METRIC_SET_ID_TEST_OA:
- dev_priv->perf.oa.n_mux_configs =
- get_test_oa_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_test_oa;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_test_oa;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_test_oa);
-
- return 0;
- default:
- return -ENODEV;
- }
-}
-
-static ssize_t
-show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
-}
-
-static struct device_attribute dev_attr_render_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_basic[] = {
- &dev_attr_render_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_basic = {
- .name = "f8d677e9-ff6f-4df1-9310-0334c6efacce",
- .attrs = attrs_render_basic,
-};
-
-static ssize_t
-show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
-}
-
-static struct device_attribute dev_attr_compute_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_basic[] = {
- &dev_attr_compute_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_basic = {
- .name = "e17fc42a-e614-41b6-90c4-1074841a6c77",
- .attrs = attrs_compute_basic,
-};
-
-static ssize_t
-show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
-}
-
-static struct device_attribute dev_attr_render_pipe_profile_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_pipe_profile_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_pipe_profile[] = {
- &dev_attr_render_pipe_profile_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_pipe_profile = {
- .name = "d7a17a3a-ca71-40d2-a919-ace80d50633f",
- .attrs = attrs_render_pipe_profile,
-};
-
-static ssize_t
-show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
-}
-
-static struct device_attribute dev_attr_memory_reads_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_reads_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_reads[] = {
- &dev_attr_memory_reads_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_reads = {
- .name = "57b59202-172b-477a-87de-33f85572c589",
- .attrs = attrs_memory_reads,
-};
-
-static ssize_t
-show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
-}
-
-static struct device_attribute dev_attr_memory_writes_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_writes_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_writes[] = {
- &dev_attr_memory_writes_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_writes = {
- .name = "3addf8ef-8e9b-40f5-a448-3dbb5d5128b0",
- .attrs = attrs_memory_writes,
-};
-
-static ssize_t
-show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
-}
-
-static struct device_attribute dev_attr_compute_extended_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extended_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extended[] = {
- &dev_attr_compute_extended_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extended = {
- .name = "4af0400a-81c3-47db-a6b6-deddbd75680e",
- .attrs = attrs_compute_extended,
-};
-
-static ssize_t
-show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
-}
-
-static struct device_attribute dev_attr_compute_l3_cache_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_l3_cache_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_l3_cache[] = {
- &dev_attr_compute_l3_cache_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_l3_cache = {
- .name = "0e22f995-79ca-4f67-83ab-e9d9772488d8",
- .attrs = attrs_compute_l3_cache,
-};
-
-static ssize_t
-show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
-}
-
-static struct device_attribute dev_attr_hdc_and_sf_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_hdc_and_sf_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_hdc_and_sf[] = {
- &dev_attr_hdc_and_sf_id.attr,
- NULL,
-};
-
-static struct attribute_group group_hdc_and_sf = {
- .name = "bc2a00f7-cb8a-4ff2-8ad0-e241dad16937",
- .attrs = attrs_hdc_and_sf,
-};
-
-static ssize_t
-show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
-}
-
-static struct device_attribute dev_attr_l3_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_1[] = {
- &dev_attr_l3_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_1 = {
- .name = "d2bbe790-f058-42d9-81c6-cdedcf655bc2",
- .attrs = attrs_l3_1,
-};
-
-static ssize_t
-show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
-}
-
-static struct device_attribute dev_attr_l3_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_2[] = {
- &dev_attr_l3_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_2 = {
- .name = "2f8e32e4-5956-46e2-af31-c8ea95887332",
- .attrs = attrs_l3_2,
-};
-
-static ssize_t
-show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
-}
-
-static struct device_attribute dev_attr_l3_3_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_3_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_3[] = {
- &dev_attr_l3_3_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_3 = {
- .name = "ca046aad-b5fb-4101-adce-6473ee6e5b14",
- .attrs = attrs_l3_3,
-};
-
-static ssize_t
-show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
-}
-
-static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_rasterizer_and_pixel_backend_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
- &dev_attr_rasterizer_and_pixel_backend_id.attr,
- NULL,
-};
-
-static struct attribute_group group_rasterizer_and_pixel_backend = {
- .name = "605f388f-24bb-455c-88e3-8d57ae0d7e9f",
- .attrs = attrs_rasterizer_and_pixel_backend,
-};
-
-static ssize_t
-show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
-}
-
-static struct device_attribute dev_attr_sampler_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_sampler_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_sampler[] = {
- &dev_attr_sampler_id.attr,
- NULL,
-};
-
-static struct attribute_group group_sampler = {
- .name = "31dd157c-bf4e-4bab-bf2b-f5c8174af1af",
- .attrs = attrs_sampler,
-};
-
-static ssize_t
-show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
-}
-
-static struct device_attribute dev_attr_tdl_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_1[] = {
- &dev_attr_tdl_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_1 = {
- .name = "105db928-5542-466b-9128-e1f3c91426cb",
- .attrs = attrs_tdl_1,
-};
-
-static ssize_t
-show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
-}
-
-static struct device_attribute dev_attr_tdl_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_2[] = {
- &dev_attr_tdl_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_2 = {
- .name = "03db94d2-b37f-4c58-a791-0d2067b013bb",
- .attrs = attrs_tdl_2,
-};
-
-static ssize_t
-show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
-}
-
-static struct device_attribute dev_attr_compute_extra_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extra_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extra[] = {
- &dev_attr_compute_extra_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extra = {
- .name = "aa7a3fb9-22fb-43ff-a32d-0ab6c13bbd16",
- .attrs = attrs_compute_extra,
-};
-
-static ssize_t
-show_vme_pipe_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_VME_PIPE);
-}
-
-static struct device_attribute dev_attr_vme_pipe_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_vme_pipe_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_vme_pipe[] = {
- &dev_attr_vme_pipe_id.attr,
- NULL,
-};
-
-static struct attribute_group group_vme_pipe = {
- .name = "398a4268-ef6f-4ffc-b55f-3c7b5363ce61",
- .attrs = attrs_vme_pipe,
-};
-
static ssize_t
show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
+ return sprintf(buf, "1\n");
}
-static struct device_attribute dev_attr_test_oa_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_test_oa_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_test_oa[] = {
- &dev_attr_test_oa_id.attr,
- NULL,
-};
-
-static struct attribute_group group_test_oa = {
- .name = "baa3c7e4-52b6-4b85-801e-465a94b746dd",
- .attrs = attrs_test_oa,
-};
-
-int
-i915_perf_register_sysfs_kblgt2(struct drm_i915_private *dev_priv)
+void
+i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv)
{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
- int ret = 0;
+ strncpy(dev_priv->perf.oa.test_config.uuid,
+ "baa3c7e4-52b6-4b85-801e-465a94b746dd",
+ UUID_STRING_LEN);
+ dev_priv->perf.oa.test_config.id = 1;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (ret)
- goto error_render_basic;
- }
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (ret)
- goto error_compute_basic;
- }
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (ret)
- goto error_render_pipe_profile;
- }
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (ret)
- goto error_memory_reads;
- }
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (ret)
- goto error_memory_writes;
- }
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (ret)
- goto error_compute_extended;
- }
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (ret)
- goto error_compute_l3_cache;
- }
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (ret)
- goto error_hdc_and_sf;
- }
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (ret)
- goto error_l3_1;
- }
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
- if (ret)
- goto error_l3_2;
- }
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
- if (ret)
- goto error_l3_3;
- }
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (ret)
- goto error_rasterizer_and_pixel_backend;
- }
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
- if (ret)
- goto error_sampler;
- }
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (ret)
- goto error_tdl_1;
- }
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (ret)
- goto error_tdl_2;
- }
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (ret)
- goto error_compute_extra;
- }
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
- if (ret)
- goto error_vme_pipe;
- }
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
- if (ret)
- goto error_test_oa;
- }
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- return 0;
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-error_test_oa:
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
-error_vme_pipe:
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
-error_compute_extra:
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
-error_tdl_2:
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
-error_tdl_1:
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
-error_sampler:
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
-error_rasterizer_and_pixel_backend:
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
-error_l3_3:
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
-error_l3_2:
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
-error_l3_1:
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
-error_hdc_and_sf:
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
-error_compute_l3_cache:
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
-error_compute_extended:
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
-error_memory_writes:
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
-error_memory_reads:
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
-error_render_pipe_profile:
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
-error_compute_basic:
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
-error_render_basic:
- return ret;
-}
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-void
-i915_perf_unregister_sysfs_kblgt2(struct drm_i915_private *dev_priv)
-{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
index 7e61bfc4f9f5..25b803546dc1 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
@@ -29,12 +29,6 @@
#ifndef __I915_OA_KBLGT2_H__
#define __I915_OA_KBLGT2_H__
-extern int i915_oa_n_builtin_metric_sets_kblgt2;
-
-extern int i915_oa_select_metric_set_kblgt2(struct drm_i915_private *dev_priv);
-
-extern int i915_perf_register_sysfs_kblgt2(struct drm_i915_private *dev_priv);
-
-extern void i915_perf_unregister_sysfs_kblgt2(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
index 6ed092566a32..5576afdd9a7e 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
@@ -31,1877 +31,6 @@
#include "i915_drv.h"
#include "i915_oa_kblgt3.h"
-enum metric_set_id {
- METRIC_SET_ID_RENDER_BASIC = 1,
- METRIC_SET_ID_COMPUTE_BASIC,
- METRIC_SET_ID_RENDER_PIPE_PROFILE,
- METRIC_SET_ID_MEMORY_READS,
- METRIC_SET_ID_MEMORY_WRITES,
- METRIC_SET_ID_COMPUTE_EXTENDED,
- METRIC_SET_ID_COMPUTE_L3_CACHE,
- METRIC_SET_ID_HDC_AND_SF,
- METRIC_SET_ID_L3_1,
- METRIC_SET_ID_L3_2,
- METRIC_SET_ID_L3_3,
- METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
- METRIC_SET_ID_SAMPLER,
- METRIC_SET_ID_TDL_1,
- METRIC_SET_ID_TDL_2,
- METRIC_SET_ID_COMPUTE_EXTRA,
- METRIC_SET_ID_VME_PIPE,
- METRIC_SET_ID_TEST_OA,
-};
-
-int i915_oa_n_builtin_metric_sets_kblgt3 = 18;
-
-static const struct i915_oa_reg b_counter_config_render_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_render_basic[] = {
- { _MMIO(0x9888), 0x166c01e0 },
- { _MMIO(0x9888), 0x12170280 },
- { _MMIO(0x9888), 0x12370280 },
- { _MMIO(0x9888), 0x16ec01e0 },
- { _MMIO(0x9888), 0x11930317 },
- { _MMIO(0x9888), 0x159303df },
- { _MMIO(0x9888), 0x3f900003 },
- { _MMIO(0x9888), 0x1a4e0380 },
- { _MMIO(0x9888), 0x0a6c0053 },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x0a1b4000 },
- { _MMIO(0x9888), 0x1c1c0001 },
- { _MMIO(0x9888), 0x002f1000 },
- { _MMIO(0x9888), 0x042f1000 },
- { _MMIO(0x9888), 0x004c4000 },
- { _MMIO(0x9888), 0x0a4c8400 },
- { _MMIO(0x9888), 0x0c4c0002 },
- { _MMIO(0x9888), 0x000d2000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0f0400 },
- { _MMIO(0x9888), 0x0e0f6600 },
- { _MMIO(0x9888), 0x100f0001 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x162ca200 },
- { _MMIO(0x9888), 0x062d8000 },
- { _MMIO(0x9888), 0x082d8000 },
- { _MMIO(0x9888), 0x00133000 },
- { _MMIO(0x9888), 0x08133000 },
- { _MMIO(0x9888), 0x00170020 },
- { _MMIO(0x9888), 0x08170021 },
- { _MMIO(0x9888), 0x10170000 },
- { _MMIO(0x9888), 0x0633c000 },
- { _MMIO(0x9888), 0x0833c000 },
- { _MMIO(0x9888), 0x06370800 },
- { _MMIO(0x9888), 0x08370840 },
- { _MMIO(0x9888), 0x10370000 },
- { _MMIO(0x9888), 0x1ace0200 },
- { _MMIO(0x9888), 0x0aec5300 },
- { _MMIO(0x9888), 0x10ec0000 },
- { _MMIO(0x9888), 0x1cec0000 },
- { _MMIO(0x9888), 0x0a9b8000 },
- { _MMIO(0x9888), 0x1c9c0002 },
- { _MMIO(0x9888), 0x0ccc0002 },
- { _MMIO(0x9888), 0x0a8d8000 },
- { _MMIO(0x9888), 0x108f0001 },
- { _MMIO(0x9888), 0x16ac8000 },
- { _MMIO(0x9888), 0x0d933031 },
- { _MMIO(0x9888), 0x0f933e3f },
- { _MMIO(0x9888), 0x01933d00 },
- { _MMIO(0x9888), 0x0393073c },
- { _MMIO(0x9888), 0x0593000e },
- { _MMIO(0x9888), 0x1d930000 },
- { _MMIO(0x9888), 0x19930000 },
- { _MMIO(0x9888), 0x1b930000 },
- { _MMIO(0x9888), 0x1d900157 },
- { _MMIO(0x9888), 0x1f900158 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x2b908000 },
- { _MMIO(0x9888), 0x2d908000 },
- { _MMIO(0x9888), 0x2f908000 },
- { _MMIO(0x9888), 0x31908000 },
- { _MMIO(0x9888), 0x15908000 },
- { _MMIO(0x9888), 0x17908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1190003f },
- { _MMIO(0x9888), 0x51902240 },
- { _MMIO(0x9888), 0x41900c00 },
- { _MMIO(0x9888), 0x55900242 },
- { _MMIO(0x9888), 0x45900084 },
- { _MMIO(0x9888), 0x47901400 },
- { _MMIO(0x9888), 0x57902220 },
- { _MMIO(0x9888), 0x49900c60 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900063 },
- { _MMIO(0x9888), 0x59900002 },
- { _MMIO(0x9888), 0x43900c63 },
- { _MMIO(0x9888), 0x53902222 },
-};
-
-static int
-get_render_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_render_basic;
- lens[n] = ARRAY_SIZE(mux_config_render_basic);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_basic[] = {
- { _MMIO(0x9888), 0x104f00e0 },
- { _MMIO(0x9888), 0x124f1c00 },
- { _MMIO(0x9888), 0x106c00e0 },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f900003 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x1a4e0820 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x064f0900 },
- { _MMIO(0x9888), 0x084f0032 },
- { _MMIO(0x9888), 0x0a4f1891 },
- { _MMIO(0x9888), 0x0c4f0e00 },
- { _MMIO(0x9888), 0x0e4f003c },
- { _MMIO(0x9888), 0x004f0d80 },
- { _MMIO(0x9888), 0x024f003b },
- { _MMIO(0x9888), 0x006c0002 },
- { _MMIO(0x9888), 0x086c0100 },
- { _MMIO(0x9888), 0x0c6c000c },
- { _MMIO(0x9888), 0x0e6c0b00 },
- { _MMIO(0x9888), 0x186c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x001b4000 },
- { _MMIO(0x9888), 0x081b8000 },
- { _MMIO(0x9888), 0x0c1b4000 },
- { _MMIO(0x9888), 0x0e1b8000 },
- { _MMIO(0x9888), 0x101c8000 },
- { _MMIO(0x9888), 0x1a1c8000 },
- { _MMIO(0x9888), 0x1c1c0024 },
- { _MMIO(0x9888), 0x065b8000 },
- { _MMIO(0x9888), 0x085b4000 },
- { _MMIO(0x9888), 0x0a5bc000 },
- { _MMIO(0x9888), 0x0c5b8000 },
- { _MMIO(0x9888), 0x0e5b4000 },
- { _MMIO(0x9888), 0x005b8000 },
- { _MMIO(0x9888), 0x025b4000 },
- { _MMIO(0x9888), 0x1a5c6000 },
- { _MMIO(0x9888), 0x1c5c001b },
- { _MMIO(0x9888), 0x125c8000 },
- { _MMIO(0x9888), 0x145c8000 },
- { _MMIO(0x9888), 0x004c8000 },
- { _MMIO(0x9888), 0x0a4c2000 },
- { _MMIO(0x9888), 0x0c4c0208 },
- { _MMIO(0x9888), 0x000da000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x020d2000 },
- { _MMIO(0x9888), 0x0c0f5400 },
- { _MMIO(0x9888), 0x0e0f5500 },
- { _MMIO(0x9888), 0x100f0155 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2cc000 },
- { _MMIO(0x9888), 0x162cfb00 },
- { _MMIO(0x9888), 0x182c00be },
- { _MMIO(0x9888), 0x022cc000 },
- { _MMIO(0x9888), 0x042cc000 },
- { _MMIO(0x9888), 0x19900157 },
- { _MMIO(0x9888), 0x1b900158 },
- { _MMIO(0x9888), 0x1d900105 },
- { _MMIO(0x9888), 0x1f900103 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x11900fff },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900800 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900821 },
- { _MMIO(0x9888), 0x47900802 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900802 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900002 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900422 },
- { _MMIO(0x9888), 0x53904444 },
-};
-
-static int
-get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_basic;
- lens[n] = ARRAY_SIZE(mux_config_compute_basic);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007ffea },
- { _MMIO(0x2774), 0x00007ffc },
- { _MMIO(0x2778), 0x0007affa },
- { _MMIO(0x277c), 0x0000f5fd },
- { _MMIO(0x2780), 0x00079ffa },
- { _MMIO(0x2784), 0x0000f3fb },
- { _MMIO(0x2788), 0x0007bf7a },
- { _MMIO(0x278c), 0x0000f7e7 },
- { _MMIO(0x2790), 0x0007fefa },
- { _MMIO(0x2794), 0x0000f7cf },
- { _MMIO(0x2798), 0x00077ffa },
- { _MMIO(0x279c), 0x0000efdf },
- { _MMIO(0x27a0), 0x0006fffa },
- { _MMIO(0x27a4), 0x0000cfbf },
- { _MMIO(0x27a8), 0x0003fffa },
- { _MMIO(0x27ac), 0x00005f7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
- { _MMIO(0x9888), 0x0c0e001f },
- { _MMIO(0x9888), 0x0a0f0000 },
- { _MMIO(0x9888), 0x10116800 },
- { _MMIO(0x9888), 0x178a03e0 },
- { _MMIO(0x9888), 0x11824c00 },
- { _MMIO(0x9888), 0x11830020 },
- { _MMIO(0x9888), 0x13840020 },
- { _MMIO(0x9888), 0x11850019 },
- { _MMIO(0x9888), 0x11860007 },
- { _MMIO(0x9888), 0x01870c40 },
- { _MMIO(0x9888), 0x17880000 },
- { _MMIO(0x9888), 0x022f4000 },
- { _MMIO(0x9888), 0x0a4c0040 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x040d4000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x020e5400 },
- { _MMIO(0x9888), 0x000e0000 },
- { _MMIO(0x9888), 0x080f0040 },
- { _MMIO(0x9888), 0x000f0000 },
- { _MMIO(0x9888), 0x100f0000 },
- { _MMIO(0x9888), 0x0e0f0040 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x06104000 },
- { _MMIO(0x9888), 0x06110012 },
- { _MMIO(0x9888), 0x06131000 },
- { _MMIO(0x9888), 0x01898000 },
- { _MMIO(0x9888), 0x0d890100 },
- { _MMIO(0x9888), 0x03898000 },
- { _MMIO(0x9888), 0x09808000 },
- { _MMIO(0x9888), 0x0b808000 },
- { _MMIO(0x9888), 0x0380c000 },
- { _MMIO(0x9888), 0x0f8a0075 },
- { _MMIO(0x9888), 0x1d8a0000 },
- { _MMIO(0x9888), 0x118a8000 },
- { _MMIO(0x9888), 0x1b8a4000 },
- { _MMIO(0x9888), 0x138a8000 },
- { _MMIO(0x9888), 0x1d81a000 },
- { _MMIO(0x9888), 0x15818000 },
- { _MMIO(0x9888), 0x17818000 },
- { _MMIO(0x9888), 0x0b820030 },
- { _MMIO(0x9888), 0x07828000 },
- { _MMIO(0x9888), 0x0d824000 },
- { _MMIO(0x9888), 0x0f828000 },
- { _MMIO(0x9888), 0x05824000 },
- { _MMIO(0x9888), 0x0d830003 },
- { _MMIO(0x9888), 0x0583000c },
- { _MMIO(0x9888), 0x09830000 },
- { _MMIO(0x9888), 0x03838000 },
- { _MMIO(0x9888), 0x07838000 },
- { _MMIO(0x9888), 0x0b840980 },
- { _MMIO(0x9888), 0x03844d80 },
- { _MMIO(0x9888), 0x11840000 },
- { _MMIO(0x9888), 0x09848000 },
- { _MMIO(0x9888), 0x09850080 },
- { _MMIO(0x9888), 0x03850003 },
- { _MMIO(0x9888), 0x01850000 },
- { _MMIO(0x9888), 0x07860000 },
- { _MMIO(0x9888), 0x0f860400 },
- { _MMIO(0x9888), 0x09870032 },
- { _MMIO(0x9888), 0x01888052 },
- { _MMIO(0x9888), 0x11880000 },
- { _MMIO(0x9888), 0x09884000 },
- { _MMIO(0x9888), 0x1b931001 },
- { _MMIO(0x9888), 0x1d930001 },
- { _MMIO(0x9888), 0x19934000 },
- { _MMIO(0x9888), 0x1b958000 },
- { _MMIO(0x9888), 0x1d950094 },
- { _MMIO(0x9888), 0x19958000 },
- { _MMIO(0x9888), 0x09e58000 },
- { _MMIO(0x9888), 0x0be58000 },
- { _MMIO(0x9888), 0x03e5c000 },
- { _MMIO(0x9888), 0x0592c000 },
- { _MMIO(0x9888), 0x0b928000 },
- { _MMIO(0x9888), 0x0d924000 },
- { _MMIO(0x9888), 0x0f924000 },
- { _MMIO(0x9888), 0x11928000 },
- { _MMIO(0x9888), 0x1392c000 },
- { _MMIO(0x9888), 0x09924000 },
- { _MMIO(0x9888), 0x01985000 },
- { _MMIO(0x9888), 0x07988000 },
- { _MMIO(0x9888), 0x09981000 },
- { _MMIO(0x9888), 0x0b982000 },
- { _MMIO(0x9888), 0x0d982000 },
- { _MMIO(0x9888), 0x0f989000 },
- { _MMIO(0x9888), 0x05982000 },
- { _MMIO(0x9888), 0x13904000 },
- { _MMIO(0x9888), 0x21904000 },
- { _MMIO(0x9888), 0x23904000 },
- { _MMIO(0x9888), 0x25908000 },
- { _MMIO(0x9888), 0x27904000 },
- { _MMIO(0x9888), 0x29908000 },
- { _MMIO(0x9888), 0x2b904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1190c080 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900440 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900400 },
- { _MMIO(0x9888), 0x47900c21 },
- { _MMIO(0x9888), 0x57900400 },
- { _MMIO(0x9888), 0x49900042 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900024 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900841 },
- { _MMIO(0x9888), 0x53900400 },
-};
-
-static int
-get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_render_pipe_profile;
- lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_reads[] = {
- { _MMIO(0x272c), 0xffffffff },
- { _MMIO(0x2728), 0xffffffff },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x271c), 0xffffffff },
- { _MMIO(0x2718), 0xffffffff },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f872 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_reads[] = {
- { _MMIO(0x9888), 0x11810c00 },
- { _MMIO(0x9888), 0x1381001a },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f900064 },
- { _MMIO(0x9888), 0x03811300 },
- { _MMIO(0x9888), 0x05811b12 },
- { _MMIO(0x9888), 0x0781001a },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x17810000 },
- { _MMIO(0x9888), 0x19810000 },
- { _MMIO(0x9888), 0x1b810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930055 },
- { _MMIO(0x9888), 0x03e58000 },
- { _MMIO(0x9888), 0x05e5c000 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x13900150 },
- { _MMIO(0x9888), 0x21900151 },
- { _MMIO(0x9888), 0x23900152 },
- { _MMIO(0x9888), 0x25900153 },
- { _MMIO(0x9888), 0x27900154 },
- { _MMIO(0x9888), 0x29900155 },
- { _MMIO(0x9888), 0x2b900156 },
- { _MMIO(0x9888), 0x2d900157 },
- { _MMIO(0x9888), 0x2f90015f },
- { _MMIO(0x9888), 0x31900105 },
- { _MMIO(0x9888), 0x15900103 },
- { _MMIO(0x9888), 0x17900101 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c60 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900c00 },
- { _MMIO(0x9888), 0x47900c63 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900c63 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900063 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static int
-get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_reads;
- lens[n] = ARRAY_SIZE(mux_config_memory_reads);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_writes[] = {
- { _MMIO(0x272c), 0xffffffff },
- { _MMIO(0x2728), 0xffffffff },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x271c), 0xffffffff },
- { _MMIO(0x2718), 0xffffffff },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f822 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_writes[] = {
- { _MMIO(0x9888), 0x11810c00 },
- { _MMIO(0x9888), 0x1381001a },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f901000 },
- { _MMIO(0x9888), 0x03811300 },
- { _MMIO(0x9888), 0x05811b12 },
- { _MMIO(0x9888), 0x0781001a },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x17810000 },
- { _MMIO(0x9888), 0x19810000 },
- { _MMIO(0x9888), 0x1b810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930055 },
- { _MMIO(0x9888), 0x03e58000 },
- { _MMIO(0x9888), 0x05e5c000 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x13900160 },
- { _MMIO(0x9888), 0x21900161 },
- { _MMIO(0x9888), 0x23900162 },
- { _MMIO(0x9888), 0x25900163 },
- { _MMIO(0x9888), 0x27900164 },
- { _MMIO(0x9888), 0x29900165 },
- { _MMIO(0x9888), 0x2b900166 },
- { _MMIO(0x9888), 0x2d900167 },
- { _MMIO(0x9888), 0x2f900150 },
- { _MMIO(0x9888), 0x31900105 },
- { _MMIO(0x9888), 0x15900103 },
- { _MMIO(0x9888), 0x17900101 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c60 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900c00 },
- { _MMIO(0x9888), 0x47900c63 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900c63 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900063 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static int
-get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_writes;
- lens[n] = ARRAY_SIZE(mux_config_memory_writes);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extended[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fc2a },
- { _MMIO(0x2774), 0x0000bf00 },
- { _MMIO(0x2778), 0x0007fc6a },
- { _MMIO(0x277c), 0x0000bf00 },
- { _MMIO(0x2780), 0x0007fc92 },
- { _MMIO(0x2784), 0x0000bf00 },
- { _MMIO(0x2788), 0x0007fca2 },
- { _MMIO(0x278c), 0x0000bf00 },
- { _MMIO(0x2790), 0x0007fc32 },
- { _MMIO(0x2794), 0x0000bf00 },
- { _MMIO(0x2798), 0x0007fc9a },
- { _MMIO(0x279c), 0x0000bf00 },
- { _MMIO(0x27a0), 0x0007fe6a },
- { _MMIO(0x27a4), 0x0000bf00 },
- { _MMIO(0x27a8), 0x0007fe7a },
- { _MMIO(0x27ac), 0x0000bf00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extended[] = {
- { _MMIO(0x9888), 0x106c00e0 },
- { _MMIO(0x9888), 0x141c8160 },
- { _MMIO(0x9888), 0x161c8015 },
- { _MMIO(0x9888), 0x181c0120 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x0e4e8000 },
- { _MMIO(0x9888), 0x184e8000 },
- { _MMIO(0x9888), 0x1a4eaaa0 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x024e8000 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x0e6c0b01 },
- { _MMIO(0x9888), 0x006c0200 },
- { _MMIO(0x9888), 0x026c000c },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x0e1bc000 },
- { _MMIO(0x9888), 0x001b8000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x001c0041 },
- { _MMIO(0x9888), 0x061c4200 },
- { _MMIO(0x9888), 0x081c4443 },
- { _MMIO(0x9888), 0x0a1c4645 },
- { _MMIO(0x9888), 0x0c1c7647 },
- { _MMIO(0x9888), 0x041c7357 },
- { _MMIO(0x9888), 0x1c1c0030 },
- { _MMIO(0x9888), 0x101c0000 },
- { _MMIO(0x9888), 0x1a1c0000 },
- { _MMIO(0x9888), 0x121c8000 },
- { _MMIO(0x9888), 0x004c8000 },
- { _MMIO(0x9888), 0x0a4caa2a },
- { _MMIO(0x9888), 0x0c4c02aa },
- { _MMIO(0x9888), 0x084ca000 },
- { _MMIO(0x9888), 0x000da000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x0c0f5400 },
- { _MMIO(0x9888), 0x0e0f5515 },
- { _MMIO(0x9888), 0x100f0155 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2c8000 },
- { _MMIO(0x9888), 0x162caa00 },
- { _MMIO(0x9888), 0x182c00aa },
- { _MMIO(0x9888), 0x022c8000 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x11907fff },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900040 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900802 },
- { _MMIO(0x9888), 0x47900842 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900842 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900800 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static int
-get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_extended;
- lens[n] = ARRAY_SIZE(mux_config_compute_extended);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x30800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fffa },
- { _MMIO(0x2774), 0x0000fefe },
- { _MMIO(0x2778), 0x0007fffa },
- { _MMIO(0x277c), 0x0000fefd },
- { _MMIO(0x2790), 0x0007fffa },
- { _MMIO(0x2794), 0x0000fbef },
- { _MMIO(0x2798), 0x0007fffa },
- { _MMIO(0x279c), 0x0000fbdf },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00101100 },
- { _MMIO(0xe45c), 0x00201200 },
- { _MMIO(0xe55c), 0x00301300 },
- { _MMIO(0xe65c), 0x00401400 },
-};
-
-static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
- { _MMIO(0x9888), 0x166c0760 },
- { _MMIO(0x9888), 0x1593001e },
- { _MMIO(0x9888), 0x3f900003 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x0e4e8000 },
- { _MMIO(0x9888), 0x184e8000 },
- { _MMIO(0x9888), 0x1a4e8020 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x006c0051 },
- { _MMIO(0x9888), 0x066c5000 },
- { _MMIO(0x9888), 0x086c5c5d },
- { _MMIO(0x9888), 0x0e6c5e5f },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x186c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x001b4000 },
- { _MMIO(0x9888), 0x061b8000 },
- { _MMIO(0x9888), 0x081bc000 },
- { _MMIO(0x9888), 0x0e1bc000 },
- { _MMIO(0x9888), 0x101c8000 },
- { _MMIO(0x9888), 0x1a1ce000 },
- { _MMIO(0x9888), 0x1c1c0030 },
- { _MMIO(0x9888), 0x004c8000 },
- { _MMIO(0x9888), 0x0a4c2a00 },
- { _MMIO(0x9888), 0x0c4c0280 },
- { _MMIO(0x9888), 0x000d2000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x0c0f0400 },
- { _MMIO(0x9888), 0x0e0f1500 },
- { _MMIO(0x9888), 0x100f0140 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2c8000 },
- { _MMIO(0x9888), 0x162c0a00 },
- { _MMIO(0x9888), 0x182c00a0 },
- { _MMIO(0x9888), 0x03933300 },
- { _MMIO(0x9888), 0x05930032 },
- { _MMIO(0x9888), 0x11930000 },
- { _MMIO(0x9888), 0x1b930000 },
- { _MMIO(0x9888), 0x1d900157 },
- { _MMIO(0x9888), 0x1f900158 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1190030f },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900000 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900021 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x53904444 },
- { _MMIO(0x9888), 0x43900000 },
-};
-
-static int
-get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_l3_cache;
- lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x10800000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000fdff },
-};
-
-static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
- { _MMIO(0x9888), 0x104f0232 },
- { _MMIO(0x9888), 0x124f4640 },
- { _MMIO(0x9888), 0x106c0232 },
- { _MMIO(0x9888), 0x11834400 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x0c4e8000 },
- { _MMIO(0x9888), 0x004f1880 },
- { _MMIO(0x9888), 0x024f08bb },
- { _MMIO(0x9888), 0x044f001b },
- { _MMIO(0x9888), 0x046c0100 },
- { _MMIO(0x9888), 0x066c000b },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x041b8000 },
- { _MMIO(0x9888), 0x061b4000 },
- { _MMIO(0x9888), 0x1a1c1800 },
- { _MMIO(0x9888), 0x005b8000 },
- { _MMIO(0x9888), 0x025bc000 },
- { _MMIO(0x9888), 0x045b4000 },
- { _MMIO(0x9888), 0x125c8000 },
- { _MMIO(0x9888), 0x145c8000 },
- { _MMIO(0x9888), 0x165c8000 },
- { _MMIO(0x9888), 0x185c8000 },
- { _MMIO(0x9888), 0x0a4c00a0 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f5000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x022cc000 },
- { _MMIO(0x9888), 0x042cc000 },
- { _MMIO(0x9888), 0x062cc000 },
- { _MMIO(0x9888), 0x082cc000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x0f828000 },
- { _MMIO(0x9888), 0x0f8305c0 },
- { _MMIO(0x9888), 0x09830000 },
- { _MMIO(0x9888), 0x07830000 },
- { _MMIO(0x9888), 0x1d950080 },
- { _MMIO(0x9888), 0x13928000 },
- { _MMIO(0x9888), 0x0f988000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b900040 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900800 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_hdc_and_sf;
- lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00014002 },
- { _MMIO(0x277c), 0x0000c3ff },
- { _MMIO(0x2780), 0x00010002 },
- { _MMIO(0x2784), 0x0000c7ff },
- { _MMIO(0x2788), 0x00004002 },
- { _MMIO(0x278c), 0x0000d3ff },
- { _MMIO(0x2790), 0x00100700 },
- { _MMIO(0x2794), 0x0000ff1f },
- { _MMIO(0x2798), 0x00001402 },
- { _MMIO(0x279c), 0x0000fc3f },
- { _MMIO(0x27a0), 0x00001002 },
- { _MMIO(0x27a4), 0x0000fc7f },
- { _MMIO(0x27a8), 0x00000402 },
- { _MMIO(0x27ac), 0x0000fd3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_1[] = {
- { _MMIO(0x9888), 0x126c7b40 },
- { _MMIO(0x9888), 0x166c0020 },
- { _MMIO(0x9888), 0x0a603444 },
- { _MMIO(0x9888), 0x0a613400 },
- { _MMIO(0x9888), 0x1a4ea800 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x024e8000 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x064f4000 },
- { _MMIO(0x9888), 0x0c6c5327 },
- { _MMIO(0x9888), 0x0e6c5425 },
- { _MMIO(0x9888), 0x006c2a00 },
- { _MMIO(0x9888), 0x026c285b },
- { _MMIO(0x9888), 0x046c005c },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x1a6c0800 },
- { _MMIO(0x9888), 0x0c1bc000 },
- { _MMIO(0x9888), 0x0e1bc000 },
- { _MMIO(0x9888), 0x001b8000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x1c1c003c },
- { _MMIO(0x9888), 0x121c8000 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x161c8000 },
- { _MMIO(0x9888), 0x181c8000 },
- { _MMIO(0x9888), 0x1a1c0800 },
- { _MMIO(0x9888), 0x065b4000 },
- { _MMIO(0x9888), 0x1a5c1000 },
- { _MMIO(0x9888), 0x10600000 },
- { _MMIO(0x9888), 0x04600000 },
- { _MMIO(0x9888), 0x0c610044 },
- { _MMIO(0x9888), 0x10610000 },
- { _MMIO(0x9888), 0x06610000 },
- { _MMIO(0x9888), 0x0c4c02a8 },
- { _MMIO(0x9888), 0x084ca000 },
- { _MMIO(0x9888), 0x0a4c002a },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f0154 },
- { _MMIO(0x9888), 0x0c0f5000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x182c00aa },
- { _MMIO(0x9888), 0x022c8000 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2cc000 },
- { _MMIO(0x9888), 0x1190ffc0 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900420 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900021 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900400 },
- { _MMIO(0x9888), 0x43900421 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900040 },
-};
-
-static int
-get_l3_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_1;
- lens[n] = ARRAY_SIZE(mux_config_l3_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00028002 },
- { _MMIO(0x277c), 0x000087ff },
- { _MMIO(0x2780), 0x00020002 },
- { _MMIO(0x2784), 0x00008fff },
- { _MMIO(0x2788), 0x00008002 },
- { _MMIO(0x278c), 0x0000a7ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_2[] = {
- { _MMIO(0x9888), 0x126c02e0 },
- { _MMIO(0x9888), 0x146c0001 },
- { _MMIO(0x9888), 0x0a623400 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x064f4000 },
- { _MMIO(0x9888), 0x026c3324 },
- { _MMIO(0x9888), 0x046c3422 },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x161c8000 },
- { _MMIO(0x9888), 0x181c8000 },
- { _MMIO(0x9888), 0x1a1c0800 },
- { _MMIO(0x9888), 0x065b4000 },
- { _MMIO(0x9888), 0x1a5c1000 },
- { _MMIO(0x9888), 0x06614000 },
- { _MMIO(0x9888), 0x0c620044 },
- { _MMIO(0x9888), 0x10620000 },
- { _MMIO(0x9888), 0x06620000 },
- { _MMIO(0x9888), 0x084c8000 },
- { _MMIO(0x9888), 0x0a4c002a },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f4000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2cc000 },
- { _MMIO(0x9888), 0x1190f800 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x43900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_l3_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_2;
- lens[n] = ARRAY_SIZE(mux_config_l3_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_3[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00028002 },
- { _MMIO(0x277c), 0x000087ff },
- { _MMIO(0x2780), 0x00020002 },
- { _MMIO(0x2784), 0x00008fff },
- { _MMIO(0x2788), 0x00008002 },
- { _MMIO(0x278c), 0x0000a7ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_3[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_3[] = {
- { _MMIO(0x9888), 0x126c4e80 },
- { _MMIO(0x9888), 0x146c0000 },
- { _MMIO(0x9888), 0x0a633400 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x0c4e8000 },
- { _MMIO(0x9888), 0x026c3321 },
- { _MMIO(0x9888), 0x046c342f },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1a6c2000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x061b4000 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x161c8000 },
- { _MMIO(0x9888), 0x181c8000 },
- { _MMIO(0x9888), 0x1a1c1800 },
- { _MMIO(0x9888), 0x06604000 },
- { _MMIO(0x9888), 0x0c630044 },
- { _MMIO(0x9888), 0x10630000 },
- { _MMIO(0x9888), 0x06630000 },
- { _MMIO(0x9888), 0x084c8000 },
- { _MMIO(0x9888), 0x0a4c00aa },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f4000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x1190f800 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900002 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_l3_3_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_3;
- lens[n] = ARRAY_SIZE(mux_config_l3_3);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x30800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000efff },
- { _MMIO(0x2778), 0x00006000 },
- { _MMIO(0x277c), 0x0000f3ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x9888), 0x102f3800 },
- { _MMIO(0x9888), 0x144d0500 },
- { _MMIO(0x9888), 0x120d03c0 },
- { _MMIO(0x9888), 0x140d03cf },
- { _MMIO(0x9888), 0x0c0f0004 },
- { _MMIO(0x9888), 0x0c4e4000 },
- { _MMIO(0x9888), 0x042f0480 },
- { _MMIO(0x9888), 0x082f0000 },
- { _MMIO(0x9888), 0x022f0000 },
- { _MMIO(0x9888), 0x0a4c0090 },
- { _MMIO(0x9888), 0x064d0027 },
- { _MMIO(0x9888), 0x004d0000 },
- { _MMIO(0x9888), 0x000d0d40 },
- { _MMIO(0x9888), 0x020d803f },
- { _MMIO(0x9888), 0x040d8023 },
- { _MMIO(0x9888), 0x100d0000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x020f0010 },
- { _MMIO(0x9888), 0x000f0000 },
- { _MMIO(0x9888), 0x0e0f0050 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41901400 },
- { _MMIO(0x9888), 0x43901485 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900001 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_rasterizer_and_pixel_backend;
- lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_sampler[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x70800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x0000c000 },
- { _MMIO(0x2774), 0x0000e7ff },
- { _MMIO(0x2778), 0x00003000 },
- { _MMIO(0x277c), 0x0000f9ff },
- { _MMIO(0x2780), 0x00000c00 },
- { _MMIO(0x2784), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_sampler[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_sampler[] = {
- { _MMIO(0x9888), 0x14152c00 },
- { _MMIO(0x9888), 0x16150005 },
- { _MMIO(0x9888), 0x121600a0 },
- { _MMIO(0x9888), 0x14352c00 },
- { _MMIO(0x9888), 0x16350005 },
- { _MMIO(0x9888), 0x123600a0 },
- { _MMIO(0x9888), 0x14552c00 },
- { _MMIO(0x9888), 0x16550005 },
- { _MMIO(0x9888), 0x125600a0 },
- { _MMIO(0x9888), 0x062f6000 },
- { _MMIO(0x9888), 0x022f2000 },
- { _MMIO(0x9888), 0x0c4c0050 },
- { _MMIO(0x9888), 0x0a4c0010 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f0350 },
- { _MMIO(0x9888), 0x0c0fb000 },
- { _MMIO(0x9888), 0x0e0f00da },
- { _MMIO(0x9888), 0x182c0028 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x022dc000 },
- { _MMIO(0x9888), 0x042d4000 },
- { _MMIO(0x9888), 0x0c138000 },
- { _MMIO(0x9888), 0x0e132000 },
- { _MMIO(0x9888), 0x0413c000 },
- { _MMIO(0x9888), 0x1c140018 },
- { _MMIO(0x9888), 0x0c157000 },
- { _MMIO(0x9888), 0x0e150078 },
- { _MMIO(0x9888), 0x10150000 },
- { _MMIO(0x9888), 0x04162180 },
- { _MMIO(0x9888), 0x02160000 },
- { _MMIO(0x9888), 0x04174000 },
- { _MMIO(0x9888), 0x0233a000 },
- { _MMIO(0x9888), 0x04333000 },
- { _MMIO(0x9888), 0x14348000 },
- { _MMIO(0x9888), 0x16348000 },
- { _MMIO(0x9888), 0x02357870 },
- { _MMIO(0x9888), 0x10350000 },
- { _MMIO(0x9888), 0x04360043 },
- { _MMIO(0x9888), 0x02360000 },
- { _MMIO(0x9888), 0x04371000 },
- { _MMIO(0x9888), 0x0e538000 },
- { _MMIO(0x9888), 0x00538000 },
- { _MMIO(0x9888), 0x06533000 },
- { _MMIO(0x9888), 0x1c540020 },
- { _MMIO(0x9888), 0x12548000 },
- { _MMIO(0x9888), 0x0e557000 },
- { _MMIO(0x9888), 0x00557800 },
- { _MMIO(0x9888), 0x10550000 },
- { _MMIO(0x9888), 0x06560043 },
- { _MMIO(0x9888), 0x02560000 },
- { _MMIO(0x9888), 0x06571000 },
- { _MMIO(0x9888), 0x1190ff80 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900060 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c00 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900060 },
-};
-
-static int
-get_sampler_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_sampler;
- lens[n] = ARRAY_SIZE(mux_config_sampler);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x00007fff },
- { _MMIO(0x2778), 0x00000000 },
- { _MMIO(0x277c), 0x00009fff },
- { _MMIO(0x2780), 0x00000002 },
- { _MMIO(0x2784), 0x0000efff },
- { _MMIO(0x2788), 0x00000000 },
- { _MMIO(0x278c), 0x0000f3ff },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000fdff },
- { _MMIO(0x2798), 0x00000000 },
- { _MMIO(0x279c), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_1[] = {
- { _MMIO(0x9888), 0x12120000 },
- { _MMIO(0x9888), 0x12320000 },
- { _MMIO(0x9888), 0x12520000 },
- { _MMIO(0x9888), 0x002f8000 },
- { _MMIO(0x9888), 0x022f3000 },
- { _MMIO(0x9888), 0x0a4c0015 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f03a0 },
- { _MMIO(0x9888), 0x0c0ff000 },
- { _MMIO(0x9888), 0x0e0f0095 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2d8000 },
- { _MMIO(0x9888), 0x0e2d4000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x02108000 },
- { _MMIO(0x9888), 0x0410c000 },
- { _MMIO(0x9888), 0x02118000 },
- { _MMIO(0x9888), 0x0411c000 },
- { _MMIO(0x9888), 0x02121880 },
- { _MMIO(0x9888), 0x041219b5 },
- { _MMIO(0x9888), 0x00120000 },
- { _MMIO(0x9888), 0x02134000 },
- { _MMIO(0x9888), 0x04135000 },
- { _MMIO(0x9888), 0x0c308000 },
- { _MMIO(0x9888), 0x0e304000 },
- { _MMIO(0x9888), 0x06304000 },
- { _MMIO(0x9888), 0x0c318000 },
- { _MMIO(0x9888), 0x0e314000 },
- { _MMIO(0x9888), 0x06314000 },
- { _MMIO(0x9888), 0x0c321a80 },
- { _MMIO(0x9888), 0x0e320033 },
- { _MMIO(0x9888), 0x06320031 },
- { _MMIO(0x9888), 0x00320000 },
- { _MMIO(0x9888), 0x0c334000 },
- { _MMIO(0x9888), 0x0e331000 },
- { _MMIO(0x9888), 0x06331000 },
- { _MMIO(0x9888), 0x0e508000 },
- { _MMIO(0x9888), 0x00508000 },
- { _MMIO(0x9888), 0x02504000 },
- { _MMIO(0x9888), 0x0e518000 },
- { _MMIO(0x9888), 0x00518000 },
- { _MMIO(0x9888), 0x02514000 },
- { _MMIO(0x9888), 0x0e521880 },
- { _MMIO(0x9888), 0x00521a80 },
- { _MMIO(0x9888), 0x02520033 },
- { _MMIO(0x9888), 0x0e534000 },
- { _MMIO(0x9888), 0x00534000 },
- { _MMIO(0x9888), 0x02531000 },
- { _MMIO(0x9888), 0x1190ff80 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900800 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900062 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c00 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900040 },
-};
-
-static int
-get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_1;
- lens[n] = ARRAY_SIZE(mux_config_tdl_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_2[] = {
- { _MMIO(0x9888), 0x12124d60 },
- { _MMIO(0x9888), 0x12322e60 },
- { _MMIO(0x9888), 0x12524d60 },
- { _MMIO(0x9888), 0x022f3000 },
- { _MMIO(0x9888), 0x0a4c0014 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0fe000 },
- { _MMIO(0x9888), 0x0e0f0097 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x002d8000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x0410c000 },
- { _MMIO(0x9888), 0x0411c000 },
- { _MMIO(0x9888), 0x04121fb7 },
- { _MMIO(0x9888), 0x00120000 },
- { _MMIO(0x9888), 0x04135000 },
- { _MMIO(0x9888), 0x00308000 },
- { _MMIO(0x9888), 0x06304000 },
- { _MMIO(0x9888), 0x00318000 },
- { _MMIO(0x9888), 0x06314000 },
- { _MMIO(0x9888), 0x00321b80 },
- { _MMIO(0x9888), 0x0632003f },
- { _MMIO(0x9888), 0x00334000 },
- { _MMIO(0x9888), 0x06331000 },
- { _MMIO(0x9888), 0x0250c000 },
- { _MMIO(0x9888), 0x0251c000 },
- { _MMIO(0x9888), 0x02521fb7 },
- { _MMIO(0x9888), 0x00520000 },
- { _MMIO(0x9888), 0x02535000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900800 },
- { _MMIO(0x9888), 0x43900063 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900040 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_2;
- lens[n] = ARRAY_SIZE(mux_config_tdl_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extra[] = {
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
-};
-
-static const struct i915_oa_reg mux_config_compute_extra[] = {
- { _MMIO(0x9888), 0x121203e0 },
- { _MMIO(0x9888), 0x123203e0 },
- { _MMIO(0x9888), 0x125203e0 },
- { _MMIO(0x9888), 0x129203e0 },
- { _MMIO(0x9888), 0x12b203e0 },
- { _MMIO(0x9888), 0x12d203e0 },
- { _MMIO(0x9888), 0x024ec000 },
- { _MMIO(0x9888), 0x044ec000 },
- { _MMIO(0x9888), 0x064ec000 },
- { _MMIO(0x9888), 0x022f4000 },
- { _MMIO(0x9888), 0x084ca000 },
- { _MMIO(0x9888), 0x0a4c0042 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f5000 },
- { _MMIO(0x9888), 0x0e0f006d },
- { _MMIO(0x9888), 0x022c8000 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x042d8000 },
- { _MMIO(0x9888), 0x06104000 },
- { _MMIO(0x9888), 0x06114000 },
- { _MMIO(0x9888), 0x06120033 },
- { _MMIO(0x9888), 0x00120000 },
- { _MMIO(0x9888), 0x06131000 },
- { _MMIO(0x9888), 0x04308000 },
- { _MMIO(0x9888), 0x04318000 },
- { _MMIO(0x9888), 0x04321980 },
- { _MMIO(0x9888), 0x00320000 },
- { _MMIO(0x9888), 0x04334000 },
- { _MMIO(0x9888), 0x04504000 },
- { _MMIO(0x9888), 0x04514000 },
- { _MMIO(0x9888), 0x04520033 },
- { _MMIO(0x9888), 0x00520000 },
- { _MMIO(0x9888), 0x04531000 },
- { _MMIO(0x9888), 0x00af8000 },
- { _MMIO(0x9888), 0x0acc0001 },
- { _MMIO(0x9888), 0x008d8000 },
- { _MMIO(0x9888), 0x028da000 },
- { _MMIO(0x9888), 0x0c8fb000 },
- { _MMIO(0x9888), 0x0e8f0001 },
- { _MMIO(0x9888), 0x06ac8000 },
- { _MMIO(0x9888), 0x02ad4000 },
- { _MMIO(0x9888), 0x02908000 },
- { _MMIO(0x9888), 0x02918000 },
- { _MMIO(0x9888), 0x02921980 },
- { _MMIO(0x9888), 0x00920000 },
- { _MMIO(0x9888), 0x02934000 },
- { _MMIO(0x9888), 0x02b04000 },
- { _MMIO(0x9888), 0x02b14000 },
- { _MMIO(0x9888), 0x02b20033 },
- { _MMIO(0x9888), 0x00b20000 },
- { _MMIO(0x9888), 0x02b31000 },
- { _MMIO(0x9888), 0x00d08000 },
- { _MMIO(0x9888), 0x00d18000 },
- { _MMIO(0x9888), 0x00d21980 },
- { _MMIO(0x9888), 0x00d34000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c00 },
- { _MMIO(0x9888), 0x43900002 },
- { _MMIO(0x9888), 0x53900420 },
- { _MMIO(0x9888), 0x459000a1 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_extra;
- lens[n] = ARRAY_SIZE(mux_config_compute_extra);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_vme_pipe[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00100030 },
- { _MMIO(0x2774), 0x0000fff9 },
- { _MMIO(0x2778), 0x00000002 },
- { _MMIO(0x277c), 0x0000fffc },
- { _MMIO(0x2780), 0x00000002 },
- { _MMIO(0x2784), 0x0000fff3 },
- { _MMIO(0x2788), 0x00100180 },
- { _MMIO(0x278c), 0x0000ffcf },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00000002 },
- { _MMIO(0x279c), 0x0000ff3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_vme_pipe[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00008003 },
-};
-
-static const struct i915_oa_reg mux_config_vme_pipe[] = {
- { _MMIO(0x9888), 0x141a5800 },
- { _MMIO(0x9888), 0x161a00c0 },
- { _MMIO(0x9888), 0x12180240 },
- { _MMIO(0x9888), 0x14180002 },
- { _MMIO(0x9888), 0x149a5800 },
- { _MMIO(0x9888), 0x169a00c0 },
- { _MMIO(0x9888), 0x12980240 },
- { _MMIO(0x9888), 0x14980002 },
- { _MMIO(0x9888), 0x1a4e3fc0 },
- { _MMIO(0x9888), 0x002f1000 },
- { _MMIO(0x9888), 0x022f8000 },
- { _MMIO(0x9888), 0x042f3000 },
- { _MMIO(0x9888), 0x004c4000 },
- { _MMIO(0x9888), 0x0a4c9500 },
- { _MMIO(0x9888), 0x0c4c002a },
- { _MMIO(0x9888), 0x000d2000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0c0f0400 },
- { _MMIO(0x9888), 0x0e0f5500 },
- { _MMIO(0x9888), 0x100f0015 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2c8000 },
- { _MMIO(0x9888), 0x162caa00 },
- { _MMIO(0x9888), 0x182c000a },
- { _MMIO(0x9888), 0x04193000 },
- { _MMIO(0x9888), 0x081a28c1 },
- { _MMIO(0x9888), 0x001a0000 },
- { _MMIO(0x9888), 0x00133000 },
- { _MMIO(0x9888), 0x0613c000 },
- { _MMIO(0x9888), 0x0813f000 },
- { _MMIO(0x9888), 0x00172000 },
- { _MMIO(0x9888), 0x06178000 },
- { _MMIO(0x9888), 0x0817a000 },
- { _MMIO(0x9888), 0x00180037 },
- { _MMIO(0x9888), 0x06180940 },
- { _MMIO(0x9888), 0x08180000 },
- { _MMIO(0x9888), 0x02180000 },
- { _MMIO(0x9888), 0x04183000 },
- { _MMIO(0x9888), 0x04afc000 },
- { _MMIO(0x9888), 0x06af3000 },
- { _MMIO(0x9888), 0x0acc4000 },
- { _MMIO(0x9888), 0x0ccc0015 },
- { _MMIO(0x9888), 0x0a8da000 },
- { _MMIO(0x9888), 0x0c8da000 },
- { _MMIO(0x9888), 0x0e8f4000 },
- { _MMIO(0x9888), 0x108f0015 },
- { _MMIO(0x9888), 0x16aca000 },
- { _MMIO(0x9888), 0x18ac000a },
- { _MMIO(0x9888), 0x06993000 },
- { _MMIO(0x9888), 0x0c9a28c1 },
- { _MMIO(0x9888), 0x009a0000 },
- { _MMIO(0x9888), 0x0a93f000 },
- { _MMIO(0x9888), 0x0c93f000 },
- { _MMIO(0x9888), 0x0a97a000 },
- { _MMIO(0x9888), 0x0c97a000 },
- { _MMIO(0x9888), 0x0a980977 },
- { _MMIO(0x9888), 0x08980000 },
- { _MMIO(0x9888), 0x04980000 },
- { _MMIO(0x9888), 0x06983000 },
- { _MMIO(0x9888), 0x119000ff },
- { _MMIO(0x9888), 0x51900040 },
- { _MMIO(0x9888), 0x41900020 },
- { _MMIO(0x9888), 0x55900004 },
- { _MMIO(0x9888), 0x45900400 },
- { _MMIO(0x9888), 0x479008a5 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900002 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_vme_pipe_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_vme_pipe;
- lens[n] = ARRAY_SIZE(mux_config_vme_pipe);
- n++;
-
- return n;
-}
-
static const struct i915_oa_reg b_counter_config_test_oa[] = {
{ _MMIO(0x2740), 0x00000000 },
{ _MMIO(0x2744), 0x00800000 },
@@ -1931,6 +60,7 @@ static const struct i915_oa_reg flex_eu_config_test_oa[] = {
};
static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x9840), 0x00000080 },
{ _MMIO(0x9888), 0x11810000 },
{ _MMIO(0x9888), 0x07810013 },
{ _MMIO(0x9888), 0x1f810000 },
@@ -1945,1096 +75,35 @@ static const struct i915_oa_reg mux_config_test_oa[] = {
{ _MMIO(0x9888), 0x33900000 },
};
-static int
-get_test_oa_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_test_oa;
- lens[n] = ARRAY_SIZE(mux_config_test_oa);
- n++;
-
- return n;
-}
-
-int i915_oa_select_metric_set_kblgt3(struct drm_i915_private *dev_priv)
-{
- dev_priv->perf.oa.n_mux_configs = 0;
- dev_priv->perf.oa.b_counter_regs = NULL;
- dev_priv->perf.oa.b_counter_regs_len = 0;
- dev_priv->perf.oa.flex_regs = NULL;
- dev_priv->perf.oa.flex_regs_len = 0;
-
- switch (dev_priv->perf.oa.metrics_set) {
- case METRIC_SET_ID_RENDER_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_render_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_basic);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_basic);
-
- return 0;
- case METRIC_SET_ID_RENDER_PIPE_PROFILE:
- dev_priv->perf.oa.n_mux_configs =
- get_render_pipe_profile_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_pipe_profile;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_pipe_profile);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_pipe_profile;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_pipe_profile);
-
- return 0;
- case METRIC_SET_ID_MEMORY_READS:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_reads_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_reads;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_reads);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_reads;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_reads);
-
- return 0;
- case METRIC_SET_ID_MEMORY_WRITES:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_writes_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_writes;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_writes);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_writes;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_writes);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTENDED:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extended_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extended;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extended);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extended;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extended);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_L3_CACHE:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_l3_cache_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_l3_cache;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_l3_cache);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_l3_cache;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_l3_cache);
-
- return 0;
- case METRIC_SET_ID_HDC_AND_SF:
- dev_priv->perf.oa.n_mux_configs =
- get_hdc_and_sf_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_hdc_and_sf;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_hdc_and_sf);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_hdc_and_sf;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_hdc_and_sf);
-
- return 0;
- case METRIC_SET_ID_L3_1:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_1);
-
- return 0;
- case METRIC_SET_ID_L3_2:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_2);
-
- return 0;
- case METRIC_SET_ID_L3_3:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_3_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_3;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_3);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_3;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_3);
-
- return 0;
- case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
- dev_priv->perf.oa.n_mux_configs =
- get_rasterizer_and_pixel_backend_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
-
- return 0;
- case METRIC_SET_ID_SAMPLER:
- dev_priv->perf.oa.n_mux_configs =
- get_sampler_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_sampler;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_sampler);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_sampler;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_sampler);
-
- return 0;
- case METRIC_SET_ID_TDL_1:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_1);
-
- return 0;
- case METRIC_SET_ID_TDL_2:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_2);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTRA:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extra_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extra;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extra);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extra;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extra);
-
- return 0;
- case METRIC_SET_ID_VME_PIPE:
- dev_priv->perf.oa.n_mux_configs =
- get_vme_pipe_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"VME_PIPE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_vme_pipe;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_vme_pipe);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_vme_pipe;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_vme_pipe);
-
- return 0;
- case METRIC_SET_ID_TEST_OA:
- dev_priv->perf.oa.n_mux_configs =
- get_test_oa_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_test_oa;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_test_oa;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_test_oa);
-
- return 0;
- default:
- return -ENODEV;
- }
-}
-
-static ssize_t
-show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
-}
-
-static struct device_attribute dev_attr_render_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_basic[] = {
- &dev_attr_render_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_basic = {
- .name = "0286c920-2f6d-493b-b22d-7a5280df43de",
- .attrs = attrs_render_basic,
-};
-
-static ssize_t
-show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
-}
-
-static struct device_attribute dev_attr_compute_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_basic[] = {
- &dev_attr_compute_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_basic = {
- .name = "9823aaa1-b06f-40ce-884b-cd798c79f0c2",
- .attrs = attrs_compute_basic,
-};
-
-static ssize_t
-show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
-}
-
-static struct device_attribute dev_attr_render_pipe_profile_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_pipe_profile_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_pipe_profile[] = {
- &dev_attr_render_pipe_profile_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_pipe_profile = {
- .name = "c7c735f3-ce58-45cf-aa04-30b183f1faff",
- .attrs = attrs_render_pipe_profile,
-};
-
-static ssize_t
-show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
-}
-
-static struct device_attribute dev_attr_memory_reads_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_reads_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_reads[] = {
- &dev_attr_memory_reads_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_reads = {
- .name = "96ec2219-040b-428a-856a-6bc03363a057",
- .attrs = attrs_memory_reads,
-};
-
-static ssize_t
-show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
-}
-
-static struct device_attribute dev_attr_memory_writes_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_writes_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_writes[] = {
- &dev_attr_memory_writes_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_writes = {
- .name = "03372b64-4996-4d3b-aa18-790e75eeb9c2",
- .attrs = attrs_memory_writes,
-};
-
-static ssize_t
-show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
-}
-
-static struct device_attribute dev_attr_compute_extended_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extended_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extended[] = {
- &dev_attr_compute_extended_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extended = {
- .name = "31b4ce5a-bd61-4c1f-bb5d-f2e731412150",
- .attrs = attrs_compute_extended,
-};
-
-static ssize_t
-show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
-}
-
-static struct device_attribute dev_attr_compute_l3_cache_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_l3_cache_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_l3_cache[] = {
- &dev_attr_compute_l3_cache_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_l3_cache = {
- .name = "2ce0911a-27fc-4887-96f0-11084fa807c3",
- .attrs = attrs_compute_l3_cache,
-};
-
-static ssize_t
-show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
-}
-
-static struct device_attribute dev_attr_hdc_and_sf_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_hdc_and_sf_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_hdc_and_sf[] = {
- &dev_attr_hdc_and_sf_id.attr,
- NULL,
-};
-
-static struct attribute_group group_hdc_and_sf = {
- .name = "546c4c1d-99b8-42fb-a107-5aaabb5314a8",
- .attrs = attrs_hdc_and_sf,
-};
-
-static ssize_t
-show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
-}
-
-static struct device_attribute dev_attr_l3_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_1[] = {
- &dev_attr_l3_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_1 = {
- .name = "4e93d156-9b39-4268-8544-a8e0480806d7",
- .attrs = attrs_l3_1,
-};
-
-static ssize_t
-show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
-}
-
-static struct device_attribute dev_attr_l3_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_2[] = {
- &dev_attr_l3_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_2 = {
- .name = "de1bec86-ca92-4b43-89fa-147653221cc0",
- .attrs = attrs_l3_2,
-};
-
-static ssize_t
-show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
-}
-
-static struct device_attribute dev_attr_l3_3_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_3_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_3[] = {
- &dev_attr_l3_3_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_3 = {
- .name = "e63537bb-10be-4d4a-92c4-c6b0c65e02ef",
- .attrs = attrs_l3_3,
-};
-
-static ssize_t
-show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
-}
-
-static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_rasterizer_and_pixel_backend_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
- &dev_attr_rasterizer_and_pixel_backend_id.attr,
- NULL,
-};
-
-static struct attribute_group group_rasterizer_and_pixel_backend = {
- .name = "7a03a9f8-ec5e-46bb-8b67-1f0ff1476281",
- .attrs = attrs_rasterizer_and_pixel_backend,
-};
-
-static ssize_t
-show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
-}
-
-static struct device_attribute dev_attr_sampler_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_sampler_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_sampler[] = {
- &dev_attr_sampler_id.attr,
- NULL,
-};
-
-static struct attribute_group group_sampler = {
- .name = "b25d2ebf-a6e0-4b29-96be-a9b010edeeda",
- .attrs = attrs_sampler,
-};
-
-static ssize_t
-show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
-}
-
-static struct device_attribute dev_attr_tdl_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_1[] = {
- &dev_attr_tdl_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_1 = {
- .name = "469a05e5-e299-46f7-9598-7b05f3c34991",
- .attrs = attrs_tdl_1,
-};
-
-static ssize_t
-show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
-}
-
-static struct device_attribute dev_attr_tdl_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_2[] = {
- &dev_attr_tdl_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_2 = {
- .name = "52f925c6-786a-4ec6-86ce-cba85c83453a",
- .attrs = attrs_tdl_2,
-};
-
-static ssize_t
-show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
-}
-
-static struct device_attribute dev_attr_compute_extra_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extra_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extra[] = {
- &dev_attr_compute_extra_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extra = {
- .name = "efc497ac-884e-4ee4-a4a8-15fba22aaf21",
- .attrs = attrs_compute_extra,
-};
-
-static ssize_t
-show_vme_pipe_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_VME_PIPE);
-}
-
-static struct device_attribute dev_attr_vme_pipe_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_vme_pipe_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_vme_pipe[] = {
- &dev_attr_vme_pipe_id.attr,
- NULL,
-};
-
-static struct attribute_group group_vme_pipe = {
- .name = "bfd9764d-2c5b-4c16-bfc1-89de3ca10917",
- .attrs = attrs_vme_pipe,
-};
-
static ssize_t
show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
+ return sprintf(buf, "1\n");
}
-static struct device_attribute dev_attr_test_oa_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_test_oa_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_test_oa[] = {
- &dev_attr_test_oa_id.attr,
- NULL,
-};
-
-static struct attribute_group group_test_oa = {
- .name = "f1792f32-6db2-4b50-b4b2-557128f1688d",
- .attrs = attrs_test_oa,
-};
-
-int
-i915_perf_register_sysfs_kblgt3(struct drm_i915_private *dev_priv)
+void
+i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv)
{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
- int ret = 0;
+ strncpy(dev_priv->perf.oa.test_config.uuid,
+ "f1792f32-6db2-4b50-b4b2-557128f1688d",
+ UUID_STRING_LEN);
+ dev_priv->perf.oa.test_config.id = 1;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (ret)
- goto error_render_basic;
- }
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (ret)
- goto error_compute_basic;
- }
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (ret)
- goto error_render_pipe_profile;
- }
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (ret)
- goto error_memory_reads;
- }
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (ret)
- goto error_memory_writes;
- }
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (ret)
- goto error_compute_extended;
- }
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (ret)
- goto error_compute_l3_cache;
- }
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (ret)
- goto error_hdc_and_sf;
- }
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (ret)
- goto error_l3_1;
- }
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
- if (ret)
- goto error_l3_2;
- }
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
- if (ret)
- goto error_l3_3;
- }
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (ret)
- goto error_rasterizer_and_pixel_backend;
- }
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
- if (ret)
- goto error_sampler;
- }
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (ret)
- goto error_tdl_1;
- }
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (ret)
- goto error_tdl_2;
- }
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (ret)
- goto error_compute_extra;
- }
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
- if (ret)
- goto error_vme_pipe;
- }
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
- if (ret)
- goto error_test_oa;
- }
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- return 0;
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-error_test_oa:
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
-error_vme_pipe:
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
-error_compute_extra:
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
-error_tdl_2:
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
-error_tdl_1:
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
-error_sampler:
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
-error_rasterizer_and_pixel_backend:
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
-error_l3_3:
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
-error_l3_2:
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
-error_l3_1:
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
-error_hdc_and_sf:
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
-error_compute_l3_cache:
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
-error_compute_extended:
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
-error_memory_writes:
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
-error_memory_reads:
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
-error_render_pipe_profile:
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
-error_compute_basic:
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
-error_render_basic:
- return ret;
-}
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-void
-i915_perf_unregister_sysfs_kblgt3(struct drm_i915_private *dev_priv)
-{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
index b0ca7f3114d3..d5b5b5c1923e 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
@@ -29,12 +29,6 @@
#ifndef __I915_OA_KBLGT3_H__
#define __I915_OA_KBLGT3_H__
-extern int i915_oa_n_builtin_metric_sets_kblgt3;
-
-extern int i915_oa_select_metric_set_kblgt3(struct drm_i915_private *dev_priv);
-
-extern int i915_perf_register_sysfs_kblgt3(struct drm_i915_private *dev_priv);
-
-extern void i915_perf_unregister_sysfs_kblgt3(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
index 1268beda212c..890d55879946 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
@@ -31,2317 +31,6 @@
#include "i915_drv.h"
#include "i915_oa_sklgt2.h"
-enum metric_set_id {
- METRIC_SET_ID_RENDER_BASIC = 1,
- METRIC_SET_ID_COMPUTE_BASIC,
- METRIC_SET_ID_RENDER_PIPE_PROFILE,
- METRIC_SET_ID_MEMORY_READS,
- METRIC_SET_ID_MEMORY_WRITES,
- METRIC_SET_ID_COMPUTE_EXTENDED,
- METRIC_SET_ID_COMPUTE_L3_CACHE,
- METRIC_SET_ID_HDC_AND_SF,
- METRIC_SET_ID_L3_1,
- METRIC_SET_ID_L3_2,
- METRIC_SET_ID_L3_3,
- METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
- METRIC_SET_ID_SAMPLER,
- METRIC_SET_ID_TDL_1,
- METRIC_SET_ID_TDL_2,
- METRIC_SET_ID_COMPUTE_EXTRA,
- METRIC_SET_ID_VME_PIPE,
- METRIC_SET_ID_TEST_OA,
-};
-
-int i915_oa_n_builtin_metric_sets_sklgt2 = 18;
-
-static const struct i915_oa_reg b_counter_config_render_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_render_basic_1_sku_gte_0x02[] = {
- { _MMIO(0x9888), 0x166c01e0 },
- { _MMIO(0x9888), 0x12170280 },
- { _MMIO(0x9888), 0x12370280 },
- { _MMIO(0x9888), 0x11930317 },
- { _MMIO(0x9888), 0x159303df },
- { _MMIO(0x9888), 0x3f900003 },
- { _MMIO(0x9888), 0x1a4e0080 },
- { _MMIO(0x9888), 0x0a6c0053 },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x0a1b4000 },
- { _MMIO(0x9888), 0x1c1c0001 },
- { _MMIO(0x9888), 0x002f1000 },
- { _MMIO(0x9888), 0x042f1000 },
- { _MMIO(0x9888), 0x004c4000 },
- { _MMIO(0x9888), 0x0a4c8400 },
- { _MMIO(0x9888), 0x000d2000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0d2000 },
- { _MMIO(0x9888), 0x0c0f0400 },
- { _MMIO(0x9888), 0x0e0f6600 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x162c2200 },
- { _MMIO(0x9888), 0x062d8000 },
- { _MMIO(0x9888), 0x082d8000 },
- { _MMIO(0x9888), 0x00133000 },
- { _MMIO(0x9888), 0x08133000 },
- { _MMIO(0x9888), 0x00170020 },
- { _MMIO(0x9888), 0x08170021 },
- { _MMIO(0x9888), 0x10170000 },
- { _MMIO(0x9888), 0x0633c000 },
- { _MMIO(0x9888), 0x0833c000 },
- { _MMIO(0x9888), 0x06370800 },
- { _MMIO(0x9888), 0x08370840 },
- { _MMIO(0x9888), 0x10370000 },
- { _MMIO(0x9888), 0x0d933031 },
- { _MMIO(0x9888), 0x0f933e3f },
- { _MMIO(0x9888), 0x01933d00 },
- { _MMIO(0x9888), 0x0393073c },
- { _MMIO(0x9888), 0x0593000e },
- { _MMIO(0x9888), 0x1d930000 },
- { _MMIO(0x9888), 0x19930000 },
- { _MMIO(0x9888), 0x1b930000 },
- { _MMIO(0x9888), 0x1d900157 },
- { _MMIO(0x9888), 0x1f900158 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x2b908000 },
- { _MMIO(0x9888), 0x2d908000 },
- { _MMIO(0x9888), 0x2f908000 },
- { _MMIO(0x9888), 0x31908000 },
- { _MMIO(0x9888), 0x15908000 },
- { _MMIO(0x9888), 0x17908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1190001f },
- { _MMIO(0x9888), 0x51904400 },
- { _MMIO(0x9888), 0x41900020 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900c21 },
- { _MMIO(0x9888), 0x47900061 },
- { _MMIO(0x9888), 0x57904440 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x59900004 },
- { _MMIO(0x9888), 0x43900000 },
- { _MMIO(0x9888), 0x53904444 },
-};
-
-static int
-get_render_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- if (dev_priv->drm.pdev->revision >= 0x02) {
- regs[n] = mux_config_render_basic_1_sku_gte_0x02;
- lens[n] = ARRAY_SIZE(mux_config_render_basic_1_sku_gte_0x02);
- n++;
- }
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_basic_0_slices_0x01_and_sku_lt_0x02[] = {
- { _MMIO(0x9888), 0x104f00e0 },
- { _MMIO(0x9888), 0x124f1c00 },
- { _MMIO(0x9888), 0x106c00e0 },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f901403 },
- { _MMIO(0x9888), 0x184e8000 },
- { _MMIO(0x9888), 0x1a4e8200 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x004f0db2 },
- { _MMIO(0x9888), 0x064f0900 },
- { _MMIO(0x9888), 0x084f1880 },
- { _MMIO(0x9888), 0x0a4f0011 },
- { _MMIO(0x9888), 0x0c4f0e3c },
- { _MMIO(0x9888), 0x0e4f1d80 },
- { _MMIO(0x9888), 0x086c0002 },
- { _MMIO(0x9888), 0x0a6c0100 },
- { _MMIO(0x9888), 0x0e6c000c },
- { _MMIO(0x9888), 0x026c000b },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x081b4000 },
- { _MMIO(0x9888), 0x0a1b8000 },
- { _MMIO(0x9888), 0x0e1b4000 },
- { _MMIO(0x9888), 0x021b4000 },
- { _MMIO(0x9888), 0x1a1c4000 },
- { _MMIO(0x9888), 0x1c1c0012 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x005bc000 },
- { _MMIO(0x9888), 0x065b8000 },
- { _MMIO(0x9888), 0x085b8000 },
- { _MMIO(0x9888), 0x0a5b4000 },
- { _MMIO(0x9888), 0x0c5bc000 },
- { _MMIO(0x9888), 0x0e5b8000 },
- { _MMIO(0x9888), 0x105c8000 },
- { _MMIO(0x9888), 0x1a5ca000 },
- { _MMIO(0x9888), 0x1c5c002d },
- { _MMIO(0x9888), 0x125c8000 },
- { _MMIO(0x9888), 0x0a4c0800 },
- { _MMIO(0x9888), 0x0c4c0082 },
- { _MMIO(0x9888), 0x084c8000 },
- { _MMIO(0x9888), 0x000da000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x020d2000 },
- { _MMIO(0x9888), 0x0c0f5400 },
- { _MMIO(0x9888), 0x0e0f5500 },
- { _MMIO(0x9888), 0x100f0155 },
- { _MMIO(0x9888), 0x002cc000 },
- { _MMIO(0x9888), 0x0e2cc000 },
- { _MMIO(0x9888), 0x162cbe00 },
- { _MMIO(0x9888), 0x182c00ef },
- { _MMIO(0x9888), 0x022cc000 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x19900157 },
- { _MMIO(0x9888), 0x1b900167 },
- { _MMIO(0x9888), 0x1d900105 },
- { _MMIO(0x9888), 0x1f900103 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0xd28), 0x00000000 },
- { _MMIO(0x9888), 0x11900fff },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900840 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900842 },
- { _MMIO(0x9888), 0x47900840 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900840 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900040 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900840 },
- { _MMIO(0x9888), 0x53901111 },
-};
-
-static const struct i915_oa_reg mux_config_compute_basic_0_slices_0x01_and_sku_gte_0x02[] = {
- { _MMIO(0x9888), 0x104f00e0 },
- { _MMIO(0x9888), 0x124f1c00 },
- { _MMIO(0x9888), 0x106c00e0 },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f901403 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x1a4e0820 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x064f0900 },
- { _MMIO(0x9888), 0x084f0032 },
- { _MMIO(0x9888), 0x0a4f1810 },
- { _MMIO(0x9888), 0x0c4f0e00 },
- { _MMIO(0x9888), 0x0e4f003c },
- { _MMIO(0x9888), 0x004f0d80 },
- { _MMIO(0x9888), 0x024f003b },
- { _MMIO(0x9888), 0x006c0002 },
- { _MMIO(0x9888), 0x086c0000 },
- { _MMIO(0x9888), 0x0c6c000c },
- { _MMIO(0x9888), 0x0e6c0b00 },
- { _MMIO(0x9888), 0x186c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x001b4000 },
- { _MMIO(0x9888), 0x081b8000 },
- { _MMIO(0x9888), 0x0c1b4000 },
- { _MMIO(0x9888), 0x0e1b8000 },
- { _MMIO(0x9888), 0x101c8000 },
- { _MMIO(0x9888), 0x1a1c8000 },
- { _MMIO(0x9888), 0x1c1c0024 },
- { _MMIO(0x9888), 0x065b8000 },
- { _MMIO(0x9888), 0x085b4000 },
- { _MMIO(0x9888), 0x0a5bc000 },
- { _MMIO(0x9888), 0x0c5b8000 },
- { _MMIO(0x9888), 0x0e5b4000 },
- { _MMIO(0x9888), 0x005b8000 },
- { _MMIO(0x9888), 0x025b4000 },
- { _MMIO(0x9888), 0x1a5c6000 },
- { _MMIO(0x9888), 0x1c5c001b },
- { _MMIO(0x9888), 0x125c8000 },
- { _MMIO(0x9888), 0x145c8000 },
- { _MMIO(0x9888), 0x004c8000 },
- { _MMIO(0x9888), 0x0a4c2000 },
- { _MMIO(0x9888), 0x0c4c0208 },
- { _MMIO(0x9888), 0x000da000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x020d2000 },
- { _MMIO(0x9888), 0x0c0f5400 },
- { _MMIO(0x9888), 0x0e0f5500 },
- { _MMIO(0x9888), 0x100f0155 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2cc000 },
- { _MMIO(0x9888), 0x162cfb00 },
- { _MMIO(0x9888), 0x182c00be },
- { _MMIO(0x9888), 0x022cc000 },
- { _MMIO(0x9888), 0x042cc000 },
- { _MMIO(0x9888), 0x19900157 },
- { _MMIO(0x9888), 0x1b900167 },
- { _MMIO(0x9888), 0x1d900105 },
- { _MMIO(0x9888), 0x1f900103 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x11900fff },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900800 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900842 },
- { _MMIO(0x9888), 0x47900802 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900802 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900002 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53901111 },
-};
-
-static int
-get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 2);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 2);
-
- if ((INTEL_INFO(dev_priv)->sseu.slice_mask & 0x01) &&
- (dev_priv->drm.pdev->revision < 0x02)) {
- regs[n] = mux_config_compute_basic_0_slices_0x01_and_sku_lt_0x02;
- lens[n] = ARRAY_SIZE(mux_config_compute_basic_0_slices_0x01_and_sku_lt_0x02);
- n++;
- }
- if ((INTEL_INFO(dev_priv)->sseu.slice_mask & 0x01) &&
- (dev_priv->drm.pdev->revision >= 0x02)) {
- regs[n] = mux_config_compute_basic_0_slices_0x01_and_sku_gte_0x02;
- lens[n] = ARRAY_SIZE(mux_config_compute_basic_0_slices_0x01_and_sku_gte_0x02);
- n++;
- }
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007ffea },
- { _MMIO(0x2774), 0x00007ffc },
- { _MMIO(0x2778), 0x0007affa },
- { _MMIO(0x277c), 0x0000f5fd },
- { _MMIO(0x2780), 0x00079ffa },
- { _MMIO(0x2784), 0x0000f3fb },
- { _MMIO(0x2788), 0x0007bf7a },
- { _MMIO(0x278c), 0x0000f7e7 },
- { _MMIO(0x2790), 0x0007fefa },
- { _MMIO(0x2794), 0x0000f7cf },
- { _MMIO(0x2798), 0x00077ffa },
- { _MMIO(0x279c), 0x0000efdf },
- { _MMIO(0x27a0), 0x0006fffa },
- { _MMIO(0x27a4), 0x0000cfbf },
- { _MMIO(0x27a8), 0x0003fffa },
- { _MMIO(0x27ac), 0x00005f7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_render_pipe_profile_0_sku_lt_0x02[] = {
- { _MMIO(0x9888), 0x0c0e001f },
- { _MMIO(0x9888), 0x0a0f0000 },
- { _MMIO(0x9888), 0x10116800 },
- { _MMIO(0x9888), 0x178a03e0 },
- { _MMIO(0x9888), 0x11824c00 },
- { _MMIO(0x9888), 0x11830020 },
- { _MMIO(0x9888), 0x13840020 },
- { _MMIO(0x9888), 0x11850019 },
- { _MMIO(0x9888), 0x11860007 },
- { _MMIO(0x9888), 0x01870c40 },
- { _MMIO(0x9888), 0x17880000 },
- { _MMIO(0x9888), 0x022f4000 },
- { _MMIO(0x9888), 0x0a4c0040 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x040d4000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x020e5400 },
- { _MMIO(0x9888), 0x000e0000 },
- { _MMIO(0x9888), 0x080f0040 },
- { _MMIO(0x9888), 0x000f0000 },
- { _MMIO(0x9888), 0x100f0000 },
- { _MMIO(0x9888), 0x0e0f0040 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x06104000 },
- { _MMIO(0x9888), 0x06110012 },
- { _MMIO(0x9888), 0x06131000 },
- { _MMIO(0x9888), 0x01898000 },
- { _MMIO(0x9888), 0x0d890100 },
- { _MMIO(0x9888), 0x03898000 },
- { _MMIO(0x9888), 0x09808000 },
- { _MMIO(0x9888), 0x0b808000 },
- { _MMIO(0x9888), 0x0380c000 },
- { _MMIO(0x9888), 0x0f8a0075 },
- { _MMIO(0x9888), 0x1d8a0000 },
- { _MMIO(0x9888), 0x118a8000 },
- { _MMIO(0x9888), 0x1b8a4000 },
- { _MMIO(0x9888), 0x138a8000 },
- { _MMIO(0x9888), 0x1d81a000 },
- { _MMIO(0x9888), 0x15818000 },
- { _MMIO(0x9888), 0x17818000 },
- { _MMIO(0x9888), 0x0b820030 },
- { _MMIO(0x9888), 0x07828000 },
- { _MMIO(0x9888), 0x0d824000 },
- { _MMIO(0x9888), 0x0f828000 },
- { _MMIO(0x9888), 0x05824000 },
- { _MMIO(0x9888), 0x0d830003 },
- { _MMIO(0x9888), 0x0583000c },
- { _MMIO(0x9888), 0x09830000 },
- { _MMIO(0x9888), 0x03838000 },
- { _MMIO(0x9888), 0x07838000 },
- { _MMIO(0x9888), 0x0b840980 },
- { _MMIO(0x9888), 0x03844d80 },
- { _MMIO(0x9888), 0x11840000 },
- { _MMIO(0x9888), 0x09848000 },
- { _MMIO(0x9888), 0x09850080 },
- { _MMIO(0x9888), 0x03850003 },
- { _MMIO(0x9888), 0x01850000 },
- { _MMIO(0x9888), 0x07860000 },
- { _MMIO(0x9888), 0x0f860400 },
- { _MMIO(0x9888), 0x09870032 },
- { _MMIO(0x9888), 0x01888052 },
- { _MMIO(0x9888), 0x11880000 },
- { _MMIO(0x9888), 0x09884000 },
- { _MMIO(0x9888), 0x15968000 },
- { _MMIO(0x9888), 0x17968000 },
- { _MMIO(0x9888), 0x0f96c000 },
- { _MMIO(0x9888), 0x1f950011 },
- { _MMIO(0x9888), 0x1d950014 },
- { _MMIO(0x9888), 0x0592c000 },
- { _MMIO(0x9888), 0x0b928000 },
- { _MMIO(0x9888), 0x0d924000 },
- { _MMIO(0x9888), 0x0f924000 },
- { _MMIO(0x9888), 0x11928000 },
- { _MMIO(0x9888), 0x1392c000 },
- { _MMIO(0x9888), 0x09924000 },
- { _MMIO(0x9888), 0x01985000 },
- { _MMIO(0x9888), 0x07988000 },
- { _MMIO(0x9888), 0x09981000 },
- { _MMIO(0x9888), 0x0b982000 },
- { _MMIO(0x9888), 0x0d982000 },
- { _MMIO(0x9888), 0x0f989000 },
- { _MMIO(0x9888), 0x05982000 },
- { _MMIO(0x9888), 0x13904000 },
- { _MMIO(0x9888), 0x21904000 },
- { _MMIO(0x9888), 0x23904000 },
- { _MMIO(0x9888), 0x25908000 },
- { _MMIO(0x9888), 0x27904000 },
- { _MMIO(0x9888), 0x29908000 },
- { _MMIO(0x9888), 0x2b904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x0b978000 },
- { _MMIO(0x9888), 0x0f974000 },
- { _MMIO(0x9888), 0x11974000 },
- { _MMIO(0x9888), 0x13978000 },
- { _MMIO(0x9888), 0x09974000 },
- { _MMIO(0xd28), 0x00000000 },
- { _MMIO(0x9888), 0x1190c080 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x419010a0 },
- { _MMIO(0x9888), 0x55904000 },
- { _MMIO(0x9888), 0x45901000 },
- { _MMIO(0x9888), 0x47900084 },
- { _MMIO(0x9888), 0x57904400 },
- { _MMIO(0x9888), 0x499000a5 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900081 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x439014a4 },
- { _MMIO(0x9888), 0x53900400 },
-};
-
-static const struct i915_oa_reg mux_config_render_pipe_profile_0_sku_gte_0x02[] = {
- { _MMIO(0x9888), 0x0c0e001f },
- { _MMIO(0x9888), 0x0a0f0000 },
- { _MMIO(0x9888), 0x10116800 },
- { _MMIO(0x9888), 0x178a03e0 },
- { _MMIO(0x9888), 0x11824c00 },
- { _MMIO(0x9888), 0x11830020 },
- { _MMIO(0x9888), 0x13840020 },
- { _MMIO(0x9888), 0x11850019 },
- { _MMIO(0x9888), 0x11860007 },
- { _MMIO(0x9888), 0x01870c40 },
- { _MMIO(0x9888), 0x17880000 },
- { _MMIO(0x9888), 0x022f4000 },
- { _MMIO(0x9888), 0x0a4c0040 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x040d4000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x020e5400 },
- { _MMIO(0x9888), 0x000e0000 },
- { _MMIO(0x9888), 0x080f0040 },
- { _MMIO(0x9888), 0x000f0000 },
- { _MMIO(0x9888), 0x100f0000 },
- { _MMIO(0x9888), 0x0e0f0040 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x06104000 },
- { _MMIO(0x9888), 0x06110012 },
- { _MMIO(0x9888), 0x06131000 },
- { _MMIO(0x9888), 0x01898000 },
- { _MMIO(0x9888), 0x0d890100 },
- { _MMIO(0x9888), 0x03898000 },
- { _MMIO(0x9888), 0x09808000 },
- { _MMIO(0x9888), 0x0b808000 },
- { _MMIO(0x9888), 0x0380c000 },
- { _MMIO(0x9888), 0x0f8a0075 },
- { _MMIO(0x9888), 0x1d8a0000 },
- { _MMIO(0x9888), 0x118a8000 },
- { _MMIO(0x9888), 0x1b8a4000 },
- { _MMIO(0x9888), 0x138a8000 },
- { _MMIO(0x9888), 0x1d81a000 },
- { _MMIO(0x9888), 0x15818000 },
- { _MMIO(0x9888), 0x17818000 },
- { _MMIO(0x9888), 0x0b820030 },
- { _MMIO(0x9888), 0x07828000 },
- { _MMIO(0x9888), 0x0d824000 },
- { _MMIO(0x9888), 0x0f828000 },
- { _MMIO(0x9888), 0x05824000 },
- { _MMIO(0x9888), 0x0d830003 },
- { _MMIO(0x9888), 0x0583000c },
- { _MMIO(0x9888), 0x09830000 },
- { _MMIO(0x9888), 0x03838000 },
- { _MMIO(0x9888), 0x07838000 },
- { _MMIO(0x9888), 0x0b840980 },
- { _MMIO(0x9888), 0x03844d80 },
- { _MMIO(0x9888), 0x11840000 },
- { _MMIO(0x9888), 0x09848000 },
- { _MMIO(0x9888), 0x09850080 },
- { _MMIO(0x9888), 0x03850003 },
- { _MMIO(0x9888), 0x01850000 },
- { _MMIO(0x9888), 0x07860000 },
- { _MMIO(0x9888), 0x0f860400 },
- { _MMIO(0x9888), 0x09870032 },
- { _MMIO(0x9888), 0x01888052 },
- { _MMIO(0x9888), 0x11880000 },
- { _MMIO(0x9888), 0x09884000 },
- { _MMIO(0x9888), 0x1b931001 },
- { _MMIO(0x9888), 0x1d930001 },
- { _MMIO(0x9888), 0x19934000 },
- { _MMIO(0x9888), 0x1b958000 },
- { _MMIO(0x9888), 0x1d950094 },
- { _MMIO(0x9888), 0x19958000 },
- { _MMIO(0x9888), 0x05e5a000 },
- { _MMIO(0x9888), 0x01e5c000 },
- { _MMIO(0x9888), 0x0592c000 },
- { _MMIO(0x9888), 0x0b928000 },
- { _MMIO(0x9888), 0x0d924000 },
- { _MMIO(0x9888), 0x0f924000 },
- { _MMIO(0x9888), 0x11928000 },
- { _MMIO(0x9888), 0x1392c000 },
- { _MMIO(0x9888), 0x09924000 },
- { _MMIO(0x9888), 0x01985000 },
- { _MMIO(0x9888), 0x07988000 },
- { _MMIO(0x9888), 0x09981000 },
- { _MMIO(0x9888), 0x0b982000 },
- { _MMIO(0x9888), 0x0d982000 },
- { _MMIO(0x9888), 0x0f989000 },
- { _MMIO(0x9888), 0x05982000 },
- { _MMIO(0x9888), 0x13904000 },
- { _MMIO(0x9888), 0x21904000 },
- { _MMIO(0x9888), 0x23904000 },
- { _MMIO(0x9888), 0x25908000 },
- { _MMIO(0x9888), 0x27904000 },
- { _MMIO(0x9888), 0x29908000 },
- { _MMIO(0x9888), 0x2b904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1190c080 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x419010a0 },
- { _MMIO(0x9888), 0x55904000 },
- { _MMIO(0x9888), 0x45901000 },
- { _MMIO(0x9888), 0x47900084 },
- { _MMIO(0x9888), 0x57904400 },
- { _MMIO(0x9888), 0x499000a5 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900081 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x439014a4 },
- { _MMIO(0x9888), 0x53900400 },
-};
-
-static int
-get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 2);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 2);
-
- if (dev_priv->drm.pdev->revision < 0x02) {
- regs[n] = mux_config_render_pipe_profile_0_sku_lt_0x02;
- lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile_0_sku_lt_0x02);
- n++;
- }
- if (dev_priv->drm.pdev->revision >= 0x02) {
- regs[n] = mux_config_render_pipe_profile_0_sku_gte_0x02;
- lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile_0_sku_gte_0x02);
- n++;
- }
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_reads[] = {
- { _MMIO(0x272c), 0xffffffff },
- { _MMIO(0x2728), 0xffffffff },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x271c), 0xffffffff },
- { _MMIO(0x2718), 0xffffffff },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f872 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_reads_0_slices_0x01_and_sku_lt_0x02[] = {
- { _MMIO(0x9888), 0x11810c00 },
- { _MMIO(0x9888), 0x1381001a },
- { _MMIO(0x9888), 0x13946000 },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f900003 },
- { _MMIO(0x9888), 0x03811300 },
- { _MMIO(0x9888), 0x05811b12 },
- { _MMIO(0x9888), 0x0781001a },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x17810000 },
- { _MMIO(0x9888), 0x19810000 },
- { _MMIO(0x9888), 0x1b810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x0f968000 },
- { _MMIO(0x9888), 0x1196c000 },
- { _MMIO(0x9888), 0x13964000 },
- { _MMIO(0x9888), 0x11938000 },
- { _MMIO(0x9888), 0x1b93fe00 },
- { _MMIO(0x9888), 0x01940010 },
- { _MMIO(0x9888), 0x07941100 },
- { _MMIO(0x9888), 0x09941312 },
- { _MMIO(0x9888), 0x0b941514 },
- { _MMIO(0x9888), 0x0d941716 },
- { _MMIO(0x9888), 0x11940000 },
- { _MMIO(0x9888), 0x19940000 },
- { _MMIO(0x9888), 0x1b940000 },
- { _MMIO(0x9888), 0x1d940000 },
- { _MMIO(0x9888), 0x1b954000 },
- { _MMIO(0x9888), 0x1d95a550 },
- { _MMIO(0x9888), 0x1f9502aa },
- { _MMIO(0x9888), 0x2f900157 },
- { _MMIO(0x9888), 0x31900105 },
- { _MMIO(0x9888), 0x15900103 },
- { _MMIO(0x9888), 0x17900101 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x13908000 },
- { _MMIO(0x9888), 0x21908000 },
- { _MMIO(0x9888), 0x23908000 },
- { _MMIO(0x9888), 0x25908000 },
- { _MMIO(0x9888), 0x27908000 },
- { _MMIO(0x9888), 0x29908000 },
- { _MMIO(0x9888), 0x2b908000 },
- { _MMIO(0x9888), 0x2d908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0xd28), 0x00000000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c00 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900063 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static const struct i915_oa_reg mux_config_memory_reads_0_sku_lt_0x05_and_sku_gte_0x02[] = {
- { _MMIO(0x9888), 0x11810c00 },
- { _MMIO(0x9888), 0x1381001a },
- { _MMIO(0x9888), 0x13946000 },
- { _MMIO(0x9888), 0x15940016 },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x03811300 },
- { _MMIO(0x9888), 0x05811b12 },
- { _MMIO(0x9888), 0x0781001a },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x17810000 },
- { _MMIO(0x9888), 0x19810000 },
- { _MMIO(0x9888), 0x1b810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x19930800 },
- { _MMIO(0x9888), 0x1b93aa55 },
- { _MMIO(0x9888), 0x1d9300aa },
- { _MMIO(0x9888), 0x01940010 },
- { _MMIO(0x9888), 0x07941100 },
- { _MMIO(0x9888), 0x09941312 },
- { _MMIO(0x9888), 0x0b941514 },
- { _MMIO(0x9888), 0x0d941716 },
- { _MMIO(0x9888), 0x0f940018 },
- { _MMIO(0x9888), 0x1b940000 },
- { _MMIO(0x9888), 0x11940000 },
- { _MMIO(0x9888), 0x01e58000 },
- { _MMIO(0x9888), 0x03e57000 },
- { _MMIO(0x9888), 0x31900105 },
- { _MMIO(0x9888), 0x15900103 },
- { _MMIO(0x9888), 0x17900101 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x13908000 },
- { _MMIO(0x9888), 0x21908000 },
- { _MMIO(0x9888), 0x23908000 },
- { _MMIO(0x9888), 0x25908000 },
- { _MMIO(0x9888), 0x27908000 },
- { _MMIO(0x9888), 0x29908000 },
- { _MMIO(0x9888), 0x2b908000 },
- { _MMIO(0x9888), 0x2d908000 },
- { _MMIO(0x9888), 0x2f908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c20 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900400 },
- { _MMIO(0x9888), 0x47900421 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900421 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900061 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static const struct i915_oa_reg mux_config_memory_reads_0_sku_gte_0x05[] = {
- { _MMIO(0x9888), 0x11810c00 },
- { _MMIO(0x9888), 0x1381001a },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f900064 },
- { _MMIO(0x9888), 0x03811300 },
- { _MMIO(0x9888), 0x05811b12 },
- { _MMIO(0x9888), 0x0781001a },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x17810000 },
- { _MMIO(0x9888), 0x19810000 },
- { _MMIO(0x9888), 0x1b810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930055 },
- { _MMIO(0x9888), 0x03e58000 },
- { _MMIO(0x9888), 0x05e5c000 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x13900150 },
- { _MMIO(0x9888), 0x21900151 },
- { _MMIO(0x9888), 0x23900152 },
- { _MMIO(0x9888), 0x25900153 },
- { _MMIO(0x9888), 0x27900154 },
- { _MMIO(0x9888), 0x29900155 },
- { _MMIO(0x9888), 0x2b900156 },
- { _MMIO(0x9888), 0x2d900157 },
- { _MMIO(0x9888), 0x2f90015f },
- { _MMIO(0x9888), 0x31900105 },
- { _MMIO(0x9888), 0x15900103 },
- { _MMIO(0x9888), 0x17900101 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c60 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900c00 },
- { _MMIO(0x9888), 0x47900c63 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900c63 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900063 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static int
-get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 3);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 3);
-
- if ((INTEL_INFO(dev_priv)->sseu.slice_mask & 0x01) &&
- (dev_priv->drm.pdev->revision < 0x02)) {
- regs[n] = mux_config_memory_reads_0_slices_0x01_and_sku_lt_0x02;
- lens[n] = ARRAY_SIZE(mux_config_memory_reads_0_slices_0x01_and_sku_lt_0x02);
- n++;
- }
- if ((dev_priv->drm.pdev->revision < 0x05) &&
- (dev_priv->drm.pdev->revision >= 0x02)) {
- regs[n] = mux_config_memory_reads_0_sku_lt_0x05_and_sku_gte_0x02;
- lens[n] = ARRAY_SIZE(mux_config_memory_reads_0_sku_lt_0x05_and_sku_gte_0x02);
- n++;
- }
- if (dev_priv->drm.pdev->revision >= 0x05) {
- regs[n] = mux_config_memory_reads_0_sku_gte_0x05;
- lens[n] = ARRAY_SIZE(mux_config_memory_reads_0_sku_gte_0x05);
- n++;
- }
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_writes[] = {
- { _MMIO(0x272c), 0xffffffff },
- { _MMIO(0x2728), 0xffffffff },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x271c), 0xffffffff },
- { _MMIO(0x2718), 0xffffffff },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f822 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_writes_0_slices_0x01_and_sku_lt_0x02[] = {
- { _MMIO(0x9888), 0x11810c00 },
- { _MMIO(0x9888), 0x1381001a },
- { _MMIO(0x9888), 0x13945400 },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f901400 },
- { _MMIO(0x9888), 0x03811300 },
- { _MMIO(0x9888), 0x05811b12 },
- { _MMIO(0x9888), 0x0781001a },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x17810000 },
- { _MMIO(0x9888), 0x19810000 },
- { _MMIO(0x9888), 0x1b810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x0f968000 },
- { _MMIO(0x9888), 0x1196c000 },
- { _MMIO(0x9888), 0x13964000 },
- { _MMIO(0x9888), 0x11938000 },
- { _MMIO(0x9888), 0x1b93fe00 },
- { _MMIO(0x9888), 0x01940010 },
- { _MMIO(0x9888), 0x07941100 },
- { _MMIO(0x9888), 0x09941312 },
- { _MMIO(0x9888), 0x0b941514 },
- { _MMIO(0x9888), 0x0d941716 },
- { _MMIO(0x9888), 0x11940000 },
- { _MMIO(0x9888), 0x19940000 },
- { _MMIO(0x9888), 0x1b940000 },
- { _MMIO(0x9888), 0x1d940000 },
- { _MMIO(0x9888), 0x1b954000 },
- { _MMIO(0x9888), 0x1d95a550 },
- { _MMIO(0x9888), 0x1f9502aa },
- { _MMIO(0x9888), 0x2f900167 },
- { _MMIO(0x9888), 0x31900105 },
- { _MMIO(0x9888), 0x15900103 },
- { _MMIO(0x9888), 0x17900101 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x13908000 },
- { _MMIO(0x9888), 0x21908000 },
- { _MMIO(0x9888), 0x23908000 },
- { _MMIO(0x9888), 0x25908000 },
- { _MMIO(0x9888), 0x27908000 },
- { _MMIO(0x9888), 0x29908000 },
- { _MMIO(0x9888), 0x2b908000 },
- { _MMIO(0x9888), 0x2d908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0xd28), 0x00000000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c00 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900063 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static const struct i915_oa_reg mux_config_memory_writes_0_sku_lt_0x05_and_sku_gte_0x02[] = {
- { _MMIO(0x9888), 0x11810c00 },
- { _MMIO(0x9888), 0x1381001a },
- { _MMIO(0x9888), 0x13945400 },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f901400 },
- { _MMIO(0x9888), 0x03811300 },
- { _MMIO(0x9888), 0x05811b12 },
- { _MMIO(0x9888), 0x0781001a },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x17810000 },
- { _MMIO(0x9888), 0x19810000 },
- { _MMIO(0x9888), 0x1b810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x19930800 },
- { _MMIO(0x9888), 0x1b93aa55 },
- { _MMIO(0x9888), 0x1d93002a },
- { _MMIO(0x9888), 0x01940010 },
- { _MMIO(0x9888), 0x07941100 },
- { _MMIO(0x9888), 0x09941312 },
- { _MMIO(0x9888), 0x0b941514 },
- { _MMIO(0x9888), 0x0d941716 },
- { _MMIO(0x9888), 0x1b940000 },
- { _MMIO(0x9888), 0x11940000 },
- { _MMIO(0x9888), 0x01e58000 },
- { _MMIO(0x9888), 0x03e57000 },
- { _MMIO(0x9888), 0x2f900167 },
- { _MMIO(0x9888), 0x31900105 },
- { _MMIO(0x9888), 0x15900103 },
- { _MMIO(0x9888), 0x17900101 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x13908000 },
- { _MMIO(0x9888), 0x21908000 },
- { _MMIO(0x9888), 0x23908000 },
- { _MMIO(0x9888), 0x25908000 },
- { _MMIO(0x9888), 0x27908000 },
- { _MMIO(0x9888), 0x29908000 },
- { _MMIO(0x9888), 0x2b908000 },
- { _MMIO(0x9888), 0x2d908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c20 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900400 },
- { _MMIO(0x9888), 0x47900421 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900421 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900063 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static const struct i915_oa_reg mux_config_memory_writes_0_sku_gte_0x05[] = {
- { _MMIO(0x9888), 0x11810c00 },
- { _MMIO(0x9888), 0x1381001a },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f901000 },
- { _MMIO(0x9888), 0x03811300 },
- { _MMIO(0x9888), 0x05811b12 },
- { _MMIO(0x9888), 0x0781001a },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x17810000 },
- { _MMIO(0x9888), 0x19810000 },
- { _MMIO(0x9888), 0x1b810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930055 },
- { _MMIO(0x9888), 0x03e58000 },
- { _MMIO(0x9888), 0x05e5c000 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x13900160 },
- { _MMIO(0x9888), 0x21900161 },
- { _MMIO(0x9888), 0x23900162 },
- { _MMIO(0x9888), 0x25900163 },
- { _MMIO(0x9888), 0x27900164 },
- { _MMIO(0x9888), 0x29900165 },
- { _MMIO(0x9888), 0x2b900166 },
- { _MMIO(0x9888), 0x2d900167 },
- { _MMIO(0x9888), 0x2f900150 },
- { _MMIO(0x9888), 0x31900105 },
- { _MMIO(0x9888), 0x15900103 },
- { _MMIO(0x9888), 0x17900101 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c60 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900c00 },
- { _MMIO(0x9888), 0x47900c63 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900c63 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900063 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static int
-get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 3);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 3);
-
- if ((INTEL_INFO(dev_priv)->sseu.slice_mask & 0x01) &&
- (dev_priv->drm.pdev->revision < 0x02)) {
- regs[n] = mux_config_memory_writes_0_slices_0x01_and_sku_lt_0x02;
- lens[n] = ARRAY_SIZE(mux_config_memory_writes_0_slices_0x01_and_sku_lt_0x02);
- n++;
- }
- if ((dev_priv->drm.pdev->revision < 0x05) &&
- (dev_priv->drm.pdev->revision >= 0x02)) {
- regs[n] = mux_config_memory_writes_0_sku_lt_0x05_and_sku_gte_0x02;
- lens[n] = ARRAY_SIZE(mux_config_memory_writes_0_sku_lt_0x05_and_sku_gte_0x02);
- n++;
- }
- if (dev_priv->drm.pdev->revision >= 0x05) {
- regs[n] = mux_config_memory_writes_0_sku_gte_0x05;
- lens[n] = ARRAY_SIZE(mux_config_memory_writes_0_sku_gte_0x05);
- n++;
- }
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extended[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fc2a },
- { _MMIO(0x2774), 0x0000bf00 },
- { _MMIO(0x2778), 0x0007fc6a },
- { _MMIO(0x277c), 0x0000bf00 },
- { _MMIO(0x2780), 0x0007fc92 },
- { _MMIO(0x2784), 0x0000bf00 },
- { _MMIO(0x2788), 0x0007fca2 },
- { _MMIO(0x278c), 0x0000bf00 },
- { _MMIO(0x2790), 0x0007fc32 },
- { _MMIO(0x2794), 0x0000bf00 },
- { _MMIO(0x2798), 0x0007fc9a },
- { _MMIO(0x279c), 0x0000bf00 },
- { _MMIO(0x27a0), 0x0007fe6a },
- { _MMIO(0x27a4), 0x0000bf00 },
- { _MMIO(0x27a8), 0x0007fe7a },
- { _MMIO(0x27ac), 0x0000bf00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extended_0_subslices_0x01[] = {
- { _MMIO(0x9888), 0x106c00e0 },
- { _MMIO(0x9888), 0x141c8160 },
- { _MMIO(0x9888), 0x161c8015 },
- { _MMIO(0x9888), 0x181c0120 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x0e4e8000 },
- { _MMIO(0x9888), 0x184e8000 },
- { _MMIO(0x9888), 0x1a4eaaa0 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x024e8000 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x0e6c0b01 },
- { _MMIO(0x9888), 0x006c0200 },
- { _MMIO(0x9888), 0x026c000c },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x0e1bc000 },
- { _MMIO(0x9888), 0x001b8000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x001c0041 },
- { _MMIO(0x9888), 0x061c4200 },
- { _MMIO(0x9888), 0x081c4443 },
- { _MMIO(0x9888), 0x0a1c4645 },
- { _MMIO(0x9888), 0x0c1c7647 },
- { _MMIO(0x9888), 0x041c7357 },
- { _MMIO(0x9888), 0x1c1c0030 },
- { _MMIO(0x9888), 0x101c0000 },
- { _MMIO(0x9888), 0x1a1c0000 },
- { _MMIO(0x9888), 0x121c8000 },
- { _MMIO(0x9888), 0x004c8000 },
- { _MMIO(0x9888), 0x0a4caa2a },
- { _MMIO(0x9888), 0x0c4c02aa },
- { _MMIO(0x9888), 0x084ca000 },
- { _MMIO(0x9888), 0x000da000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x0c0f5400 },
- { _MMIO(0x9888), 0x0e0f5515 },
- { _MMIO(0x9888), 0x100f0155 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2c8000 },
- { _MMIO(0x9888), 0x162caa00 },
- { _MMIO(0x9888), 0x182c00aa },
- { _MMIO(0x9888), 0x022c8000 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0xd28), 0x00000000 },
- { _MMIO(0x9888), 0x11907fff },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900040 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900802 },
- { _MMIO(0x9888), 0x47900842 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900842 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900800 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static int
-get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- if (INTEL_INFO(dev_priv)->sseu.subslice_mask & 0x01) {
- regs[n] = mux_config_compute_extended_0_subslices_0x01;
- lens[n] = ARRAY_SIZE(mux_config_compute_extended_0_subslices_0x01);
- n++;
- }
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x30800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fffa },
- { _MMIO(0x2774), 0x0000fefe },
- { _MMIO(0x2778), 0x0007fffa },
- { _MMIO(0x277c), 0x0000fefd },
- { _MMIO(0x2790), 0x0007fffa },
- { _MMIO(0x2794), 0x0000fbef },
- { _MMIO(0x2798), 0x0007fffa },
- { _MMIO(0x279c), 0x0000fbdf },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00101100 },
- { _MMIO(0xe45c), 0x00201200 },
- { _MMIO(0xe55c), 0x00301300 },
- { _MMIO(0xe65c), 0x00401400 },
-};
-
-static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
- { _MMIO(0x9888), 0x166c0760 },
- { _MMIO(0x9888), 0x1593001e },
- { _MMIO(0x9888), 0x3f901403 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x0e4e8000 },
- { _MMIO(0x9888), 0x184e8000 },
- { _MMIO(0x9888), 0x1a4e8020 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x006c0051 },
- { _MMIO(0x9888), 0x066c5000 },
- { _MMIO(0x9888), 0x086c5c5d },
- { _MMIO(0x9888), 0x0e6c5e5f },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x186c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x001b4000 },
- { _MMIO(0x9888), 0x061b8000 },
- { _MMIO(0x9888), 0x081bc000 },
- { _MMIO(0x9888), 0x0e1bc000 },
- { _MMIO(0x9888), 0x101c8000 },
- { _MMIO(0x9888), 0x1a1ce000 },
- { _MMIO(0x9888), 0x1c1c0030 },
- { _MMIO(0x9888), 0x004c8000 },
- { _MMIO(0x9888), 0x0a4c2a00 },
- { _MMIO(0x9888), 0x0c4c0280 },
- { _MMIO(0x9888), 0x000d2000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x0c0f0400 },
- { _MMIO(0x9888), 0x0e0f1500 },
- { _MMIO(0x9888), 0x100f0140 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2c8000 },
- { _MMIO(0x9888), 0x162c0a00 },
- { _MMIO(0x9888), 0x182c00a0 },
- { _MMIO(0x9888), 0x03933300 },
- { _MMIO(0x9888), 0x05930032 },
- { _MMIO(0x9888), 0x11930000 },
- { _MMIO(0x9888), 0x1b930000 },
- { _MMIO(0x9888), 0x1d900157 },
- { _MMIO(0x9888), 0x1f900167 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1190030f },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900000 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900042 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x53901111 },
- { _MMIO(0x9888), 0x43900420 },
-};
-
-static int
-get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_l3_cache;
- lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x10800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000fdff },
-};
-
-static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
- { _MMIO(0x9888), 0x104f0232 },
- { _MMIO(0x9888), 0x124f4640 },
- { _MMIO(0x9888), 0x106c0232 },
- { _MMIO(0x9888), 0x11834400 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x0c4e8000 },
- { _MMIO(0x9888), 0x004f1880 },
- { _MMIO(0x9888), 0x024f08bb },
- { _MMIO(0x9888), 0x044f001b },
- { _MMIO(0x9888), 0x046c0100 },
- { _MMIO(0x9888), 0x066c000b },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x041b8000 },
- { _MMIO(0x9888), 0x061b4000 },
- { _MMIO(0x9888), 0x1a1c1800 },
- { _MMIO(0x9888), 0x005b8000 },
- { _MMIO(0x9888), 0x025bc000 },
- { _MMIO(0x9888), 0x045b4000 },
- { _MMIO(0x9888), 0x125c8000 },
- { _MMIO(0x9888), 0x145c8000 },
- { _MMIO(0x9888), 0x165c8000 },
- { _MMIO(0x9888), 0x185c8000 },
- { _MMIO(0x9888), 0x0a4c00a0 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f5000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x022cc000 },
- { _MMIO(0x9888), 0x042cc000 },
- { _MMIO(0x9888), 0x062cc000 },
- { _MMIO(0x9888), 0x082cc000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x0f828000 },
- { _MMIO(0x9888), 0x0f8305c0 },
- { _MMIO(0x9888), 0x09830000 },
- { _MMIO(0x9888), 0x07830000 },
- { _MMIO(0x9888), 0x1d950080 },
- { _MMIO(0x9888), 0x13928000 },
- { _MMIO(0x9888), 0x0f988000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x4b9000a0 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900800 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_hdc_and_sf;
- lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00014002 },
- { _MMIO(0x277c), 0x0000c3ff },
- { _MMIO(0x2780), 0x00010002 },
- { _MMIO(0x2784), 0x0000c7ff },
- { _MMIO(0x2788), 0x00004002 },
- { _MMIO(0x278c), 0x0000d3ff },
- { _MMIO(0x2790), 0x00100700 },
- { _MMIO(0x2794), 0x0000ff1f },
- { _MMIO(0x2798), 0x00001402 },
- { _MMIO(0x279c), 0x0000fc3f },
- { _MMIO(0x27a0), 0x00001002 },
- { _MMIO(0x27a4), 0x0000fc7f },
- { _MMIO(0x27a8), 0x00000402 },
- { _MMIO(0x27ac), 0x0000fd3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_1[] = {
- { _MMIO(0x9888), 0x126c7b40 },
- { _MMIO(0x9888), 0x166c0020 },
- { _MMIO(0x9888), 0x0a603444 },
- { _MMIO(0x9888), 0x0a613400 },
- { _MMIO(0x9888), 0x1a4ea800 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x024e8000 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x064f4000 },
- { _MMIO(0x9888), 0x0c6c5327 },
- { _MMIO(0x9888), 0x0e6c5425 },
- { _MMIO(0x9888), 0x006c2a00 },
- { _MMIO(0x9888), 0x026c285b },
- { _MMIO(0x9888), 0x046c005c },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x1a6c0800 },
- { _MMIO(0x9888), 0x0c1bc000 },
- { _MMIO(0x9888), 0x0e1bc000 },
- { _MMIO(0x9888), 0x001b8000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x1c1c003c },
- { _MMIO(0x9888), 0x121c8000 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x161c8000 },
- { _MMIO(0x9888), 0x181c8000 },
- { _MMIO(0x9888), 0x1a1c0800 },
- { _MMIO(0x9888), 0x065b4000 },
- { _MMIO(0x9888), 0x1a5c1000 },
- { _MMIO(0x9888), 0x10600000 },
- { _MMIO(0x9888), 0x04600000 },
- { _MMIO(0x9888), 0x0c610044 },
- { _MMIO(0x9888), 0x10610000 },
- { _MMIO(0x9888), 0x06610000 },
- { _MMIO(0x9888), 0x0c4c02a8 },
- { _MMIO(0x9888), 0x084ca000 },
- { _MMIO(0x9888), 0x0a4c002a },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f0154 },
- { _MMIO(0x9888), 0x0c0f5000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x182c00aa },
- { _MMIO(0x9888), 0x022c8000 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2cc000 },
- { _MMIO(0x9888), 0x1190ffc0 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900420 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900021 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900400 },
- { _MMIO(0x9888), 0x43900421 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900040 },
-};
-
-static int
-get_l3_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_1;
- lens[n] = ARRAY_SIZE(mux_config_l3_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00028002 },
- { _MMIO(0x277c), 0x000087ff },
- { _MMIO(0x2780), 0x00020002 },
- { _MMIO(0x2784), 0x00008fff },
- { _MMIO(0x2788), 0x00008002 },
- { _MMIO(0x278c), 0x0000a7ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_2[] = {
- { _MMIO(0x9888), 0x126c02e0 },
- { _MMIO(0x9888), 0x146c0001 },
- { _MMIO(0x9888), 0x0a623400 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x064f4000 },
- { _MMIO(0x9888), 0x026c3324 },
- { _MMIO(0x9888), 0x046c3422 },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x161c8000 },
- { _MMIO(0x9888), 0x181c8000 },
- { _MMIO(0x9888), 0x1a1c0800 },
- { _MMIO(0x9888), 0x065b4000 },
- { _MMIO(0x9888), 0x1a5c1000 },
- { _MMIO(0x9888), 0x06614000 },
- { _MMIO(0x9888), 0x0c620044 },
- { _MMIO(0x9888), 0x10620000 },
- { _MMIO(0x9888), 0x06620000 },
- { _MMIO(0x9888), 0x084c8000 },
- { _MMIO(0x9888), 0x0a4c002a },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f4000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2cc000 },
- { _MMIO(0x9888), 0x1190f800 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x43900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_l3_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_2;
- lens[n] = ARRAY_SIZE(mux_config_l3_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_3[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00028002 },
- { _MMIO(0x277c), 0x000087ff },
- { _MMIO(0x2780), 0x00020002 },
- { _MMIO(0x2784), 0x00008fff },
- { _MMIO(0x2788), 0x00008002 },
- { _MMIO(0x278c), 0x0000a7ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_3[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_3[] = {
- { _MMIO(0x9888), 0x126c4e80 },
- { _MMIO(0x9888), 0x146c0000 },
- { _MMIO(0x9888), 0x0a633400 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x0c4e8000 },
- { _MMIO(0x9888), 0x026c3321 },
- { _MMIO(0x9888), 0x046c342f },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1a6c2000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x061b4000 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x161c8000 },
- { _MMIO(0x9888), 0x181c8000 },
- { _MMIO(0x9888), 0x1a1c1800 },
- { _MMIO(0x9888), 0x06604000 },
- { _MMIO(0x9888), 0x0c630044 },
- { _MMIO(0x9888), 0x10630000 },
- { _MMIO(0x9888), 0x06630000 },
- { _MMIO(0x9888), 0x084c8000 },
- { _MMIO(0x9888), 0x0a4c00aa },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f4000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x1190f800 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900002 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_l3_3_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_3;
- lens[n] = ARRAY_SIZE(mux_config_l3_3);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x30800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000efff },
- { _MMIO(0x2778), 0x00006000 },
- { _MMIO(0x277c), 0x0000f3ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x9888), 0x102f3800 },
- { _MMIO(0x9888), 0x144d0500 },
- { _MMIO(0x9888), 0x120d03c0 },
- { _MMIO(0x9888), 0x140d03cf },
- { _MMIO(0x9888), 0x0c0f0004 },
- { _MMIO(0x9888), 0x0c4e4000 },
- { _MMIO(0x9888), 0x042f0480 },
- { _MMIO(0x9888), 0x082f0000 },
- { _MMIO(0x9888), 0x022f0000 },
- { _MMIO(0x9888), 0x0a4c0090 },
- { _MMIO(0x9888), 0x064d0027 },
- { _MMIO(0x9888), 0x004d0000 },
- { _MMIO(0x9888), 0x000d0d40 },
- { _MMIO(0x9888), 0x020d803f },
- { _MMIO(0x9888), 0x040d8023 },
- { _MMIO(0x9888), 0x100d0000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x020f0010 },
- { _MMIO(0x9888), 0x000f0000 },
- { _MMIO(0x9888), 0x0e0f0050 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41901400 },
- { _MMIO(0x9888), 0x43901485 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900001 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_rasterizer_and_pixel_backend;
- lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_sampler[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x70800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x0000c000 },
- { _MMIO(0x2774), 0x0000e7ff },
- { _MMIO(0x2778), 0x00003000 },
- { _MMIO(0x277c), 0x0000f9ff },
- { _MMIO(0x2780), 0x00000c00 },
- { _MMIO(0x2784), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_sampler[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_sampler[] = {
- { _MMIO(0x9888), 0x14152c00 },
- { _MMIO(0x9888), 0x16150005 },
- { _MMIO(0x9888), 0x121600a0 },
- { _MMIO(0x9888), 0x14352c00 },
- { _MMIO(0x9888), 0x16350005 },
- { _MMIO(0x9888), 0x123600a0 },
- { _MMIO(0x9888), 0x14552c00 },
- { _MMIO(0x9888), 0x16550005 },
- { _MMIO(0x9888), 0x125600a0 },
- { _MMIO(0x9888), 0x062f6000 },
- { _MMIO(0x9888), 0x022f2000 },
- { _MMIO(0x9888), 0x0c4c0050 },
- { _MMIO(0x9888), 0x0a4c0010 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f0350 },
- { _MMIO(0x9888), 0x0c0fb000 },
- { _MMIO(0x9888), 0x0e0f00da },
- { _MMIO(0x9888), 0x182c0028 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x022dc000 },
- { _MMIO(0x9888), 0x042d4000 },
- { _MMIO(0x9888), 0x0c138000 },
- { _MMIO(0x9888), 0x0e132000 },
- { _MMIO(0x9888), 0x0413c000 },
- { _MMIO(0x9888), 0x1c140018 },
- { _MMIO(0x9888), 0x0c157000 },
- { _MMIO(0x9888), 0x0e150078 },
- { _MMIO(0x9888), 0x10150000 },
- { _MMIO(0x9888), 0x04162180 },
- { _MMIO(0x9888), 0x02160000 },
- { _MMIO(0x9888), 0x04174000 },
- { _MMIO(0x9888), 0x0233a000 },
- { _MMIO(0x9888), 0x04333000 },
- { _MMIO(0x9888), 0x14348000 },
- { _MMIO(0x9888), 0x16348000 },
- { _MMIO(0x9888), 0x02357870 },
- { _MMIO(0x9888), 0x10350000 },
- { _MMIO(0x9888), 0x04360043 },
- { _MMIO(0x9888), 0x02360000 },
- { _MMIO(0x9888), 0x04371000 },
- { _MMIO(0x9888), 0x0e538000 },
- { _MMIO(0x9888), 0x00538000 },
- { _MMIO(0x9888), 0x06533000 },
- { _MMIO(0x9888), 0x1c540020 },
- { _MMIO(0x9888), 0x12548000 },
- { _MMIO(0x9888), 0x0e557000 },
- { _MMIO(0x9888), 0x00557800 },
- { _MMIO(0x9888), 0x10550000 },
- { _MMIO(0x9888), 0x06560043 },
- { _MMIO(0x9888), 0x02560000 },
- { _MMIO(0x9888), 0x06571000 },
- { _MMIO(0x9888), 0x1190ff80 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900060 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c00 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900060 },
-};
-
-static int
-get_sampler_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_sampler;
- lens[n] = ARRAY_SIZE(mux_config_sampler);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x00007fff },
- { _MMIO(0x2778), 0x00000000 },
- { _MMIO(0x277c), 0x00009fff },
- { _MMIO(0x2780), 0x00000002 },
- { _MMIO(0x2784), 0x0000efff },
- { _MMIO(0x2788), 0x00000000 },
- { _MMIO(0x278c), 0x0000f3ff },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000fdff },
- { _MMIO(0x2798), 0x00000000 },
- { _MMIO(0x279c), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_1[] = {
- { _MMIO(0x9888), 0x12120000 },
- { _MMIO(0x9888), 0x12320000 },
- { _MMIO(0x9888), 0x12520000 },
- { _MMIO(0x9888), 0x002f8000 },
- { _MMIO(0x9888), 0x022f3000 },
- { _MMIO(0x9888), 0x0a4c0015 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f03a0 },
- { _MMIO(0x9888), 0x0c0ff000 },
- { _MMIO(0x9888), 0x0e0f0095 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2d8000 },
- { _MMIO(0x9888), 0x0e2d4000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x02108000 },
- { _MMIO(0x9888), 0x0410c000 },
- { _MMIO(0x9888), 0x02118000 },
- { _MMIO(0x9888), 0x0411c000 },
- { _MMIO(0x9888), 0x02121880 },
- { _MMIO(0x9888), 0x041219b5 },
- { _MMIO(0x9888), 0x00120000 },
- { _MMIO(0x9888), 0x02134000 },
- { _MMIO(0x9888), 0x04135000 },
- { _MMIO(0x9888), 0x0c308000 },
- { _MMIO(0x9888), 0x0e304000 },
- { _MMIO(0x9888), 0x06304000 },
- { _MMIO(0x9888), 0x0c318000 },
- { _MMIO(0x9888), 0x0e314000 },
- { _MMIO(0x9888), 0x06314000 },
- { _MMIO(0x9888), 0x0c321a80 },
- { _MMIO(0x9888), 0x0e320033 },
- { _MMIO(0x9888), 0x06320031 },
- { _MMIO(0x9888), 0x00320000 },
- { _MMIO(0x9888), 0x0c334000 },
- { _MMIO(0x9888), 0x0e331000 },
- { _MMIO(0x9888), 0x06331000 },
- { _MMIO(0x9888), 0x0e508000 },
- { _MMIO(0x9888), 0x00508000 },
- { _MMIO(0x9888), 0x02504000 },
- { _MMIO(0x9888), 0x0e518000 },
- { _MMIO(0x9888), 0x00518000 },
- { _MMIO(0x9888), 0x02514000 },
- { _MMIO(0x9888), 0x0e521880 },
- { _MMIO(0x9888), 0x00521a80 },
- { _MMIO(0x9888), 0x02520033 },
- { _MMIO(0x9888), 0x0e534000 },
- { _MMIO(0x9888), 0x00534000 },
- { _MMIO(0x9888), 0x02531000 },
- { _MMIO(0x9888), 0x1190ff80 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900800 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900062 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c00 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900040 },
-};
-
-static int
-get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_1;
- lens[n] = ARRAY_SIZE(mux_config_tdl_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_2[] = {
- { _MMIO(0x9888), 0x12124d60 },
- { _MMIO(0x9888), 0x12322e60 },
- { _MMIO(0x9888), 0x12524d60 },
- { _MMIO(0x9888), 0x022f3000 },
- { _MMIO(0x9888), 0x0a4c0014 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0fe000 },
- { _MMIO(0x9888), 0x0e0f0097 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x002d8000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x0410c000 },
- { _MMIO(0x9888), 0x0411c000 },
- { _MMIO(0x9888), 0x04121fb7 },
- { _MMIO(0x9888), 0x00120000 },
- { _MMIO(0x9888), 0x04135000 },
- { _MMIO(0x9888), 0x00308000 },
- { _MMIO(0x9888), 0x06304000 },
- { _MMIO(0x9888), 0x00318000 },
- { _MMIO(0x9888), 0x06314000 },
- { _MMIO(0x9888), 0x00321b80 },
- { _MMIO(0x9888), 0x0632003f },
- { _MMIO(0x9888), 0x00334000 },
- { _MMIO(0x9888), 0x06331000 },
- { _MMIO(0x9888), 0x0250c000 },
- { _MMIO(0x9888), 0x0251c000 },
- { _MMIO(0x9888), 0x02521fb7 },
- { _MMIO(0x9888), 0x00520000 },
- { _MMIO(0x9888), 0x02535000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900800 },
- { _MMIO(0x9888), 0x43900063 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900040 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_2;
- lens[n] = ARRAY_SIZE(mux_config_tdl_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extra[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
- { _MMIO(0xe458), 0x00001000 },
- { _MMIO(0xe558), 0x00003002 },
- { _MMIO(0xe658), 0x00005004 },
- { _MMIO(0xe758), 0x00011010 },
- { _MMIO(0xe45c), 0x00050012 },
- { _MMIO(0xe55c), 0x00052051 },
- { _MMIO(0xe65c), 0x00000008 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extra[] = {
- { _MMIO(0x9888), 0x121203e0 },
- { _MMIO(0x9888), 0x123203e0 },
- { _MMIO(0x9888), 0x125203e0 },
- { _MMIO(0x9888), 0x022f4000 },
- { _MMIO(0x9888), 0x0a4c0040 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0e0f006c },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x042d8000 },
- { _MMIO(0x9888), 0x06104000 },
- { _MMIO(0x9888), 0x06114000 },
- { _MMIO(0x9888), 0x06120033 },
- { _MMIO(0x9888), 0x00120000 },
- { _MMIO(0x9888), 0x06131000 },
- { _MMIO(0x9888), 0x04308000 },
- { _MMIO(0x9888), 0x04318000 },
- { _MMIO(0x9888), 0x04321980 },
- { _MMIO(0x9888), 0x00320000 },
- { _MMIO(0x9888), 0x04334000 },
- { _MMIO(0x9888), 0x04504000 },
- { _MMIO(0x9888), 0x04514000 },
- { _MMIO(0x9888), 0x04520033 },
- { _MMIO(0x9888), 0x00520000 },
- { _MMIO(0x9888), 0x04531000 },
- { _MMIO(0x9888), 0x1190e000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x43900c00 },
- { _MMIO(0x9888), 0x45900002 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_extra;
- lens[n] = ARRAY_SIZE(mux_config_compute_extra);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_vme_pipe[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00100030 },
- { _MMIO(0x2774), 0x0000fff9 },
- { _MMIO(0x2778), 0x00000002 },
- { _MMIO(0x277c), 0x0000fffc },
- { _MMIO(0x2780), 0x00000002 },
- { _MMIO(0x2784), 0x0000fff3 },
- { _MMIO(0x2788), 0x00100180 },
- { _MMIO(0x278c), 0x0000ffcf },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00000002 },
- { _MMIO(0x279c), 0x0000ff3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_vme_pipe[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00008003 },
-};
-
-static const struct i915_oa_reg mux_config_vme_pipe[] = {
- { _MMIO(0x9888), 0x141a5800 },
- { _MMIO(0x9888), 0x161a00c0 },
- { _MMIO(0x9888), 0x12180240 },
- { _MMIO(0x9888), 0x14180002 },
- { _MMIO(0x9888), 0x143a5800 },
- { _MMIO(0x9888), 0x163a00c0 },
- { _MMIO(0x9888), 0x12380240 },
- { _MMIO(0x9888), 0x14380002 },
- { _MMIO(0x9888), 0x002f1000 },
- { _MMIO(0x9888), 0x022f8000 },
- { _MMIO(0x9888), 0x042f3000 },
- { _MMIO(0x9888), 0x004c4000 },
- { _MMIO(0x9888), 0x0a4c1500 },
- { _MMIO(0x9888), 0x000d2000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0c0f0400 },
- { _MMIO(0x9888), 0x0e0f9500 },
- { _MMIO(0x9888), 0x100f002a },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2c8000 },
- { _MMIO(0x9888), 0x162c0a00 },
- { _MMIO(0x9888), 0x0a2dc000 },
- { _MMIO(0x9888), 0x0c2dc000 },
- { _MMIO(0x9888), 0x04193000 },
- { _MMIO(0x9888), 0x081a28c1 },
- { _MMIO(0x9888), 0x001a0000 },
- { _MMIO(0x9888), 0x00133000 },
- { _MMIO(0x9888), 0x0613c000 },
- { _MMIO(0x9888), 0x0813f000 },
- { _MMIO(0x9888), 0x00172000 },
- { _MMIO(0x9888), 0x06178000 },
- { _MMIO(0x9888), 0x0817a000 },
- { _MMIO(0x9888), 0x00180037 },
- { _MMIO(0x9888), 0x06180940 },
- { _MMIO(0x9888), 0x08180000 },
- { _MMIO(0x9888), 0x02180000 },
- { _MMIO(0x9888), 0x04183000 },
- { _MMIO(0x9888), 0x06393000 },
- { _MMIO(0x9888), 0x0c3a28c1 },
- { _MMIO(0x9888), 0x003a0000 },
- { _MMIO(0x9888), 0x0a33f000 },
- { _MMIO(0x9888), 0x0c33f000 },
- { _MMIO(0x9888), 0x0a37a000 },
- { _MMIO(0x9888), 0x0c37a000 },
- { _MMIO(0x9888), 0x0a380977 },
- { _MMIO(0x9888), 0x08380000 },
- { _MMIO(0x9888), 0x04380000 },
- { _MMIO(0x9888), 0x06383000 },
- { _MMIO(0x9888), 0x119000ff },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900040 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900800 },
- { _MMIO(0x9888), 0x47901000 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900844 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_vme_pipe_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_vme_pipe;
- lens[n] = ARRAY_SIZE(mux_config_vme_pipe);
- n++;
-
- return n;
-}
-
static const struct i915_oa_reg b_counter_config_test_oa[] = {
{ _MMIO(0x2740), 0x00000000 },
{ _MMIO(0x2714), 0xf0800000 },
@@ -2370,6 +59,7 @@ static const struct i915_oa_reg flex_eu_config_test_oa[] = {
};
static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x9840), 0x00000080 },
{ _MMIO(0x9888), 0x11810000 },
{ _MMIO(0x9888), 0x07810016 },
{ _MMIO(0x9888), 0x1f810000 },
@@ -2384,1096 +74,35 @@ static const struct i915_oa_reg mux_config_test_oa[] = {
{ _MMIO(0x9888), 0x33900000 },
};
-static int
-get_test_oa_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_test_oa;
- lens[n] = ARRAY_SIZE(mux_config_test_oa);
- n++;
-
- return n;
-}
-
-int i915_oa_select_metric_set_sklgt2(struct drm_i915_private *dev_priv)
-{
- dev_priv->perf.oa.n_mux_configs = 0;
- dev_priv->perf.oa.b_counter_regs = NULL;
- dev_priv->perf.oa.b_counter_regs_len = 0;
- dev_priv->perf.oa.flex_regs = NULL;
- dev_priv->perf.oa.flex_regs_len = 0;
-
- switch (dev_priv->perf.oa.metrics_set) {
- case METRIC_SET_ID_RENDER_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_render_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_basic);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_basic);
-
- return 0;
- case METRIC_SET_ID_RENDER_PIPE_PROFILE:
- dev_priv->perf.oa.n_mux_configs =
- get_render_pipe_profile_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_pipe_profile;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_pipe_profile);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_pipe_profile;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_pipe_profile);
-
- return 0;
- case METRIC_SET_ID_MEMORY_READS:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_reads_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_reads;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_reads);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_reads;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_reads);
-
- return 0;
- case METRIC_SET_ID_MEMORY_WRITES:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_writes_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_writes;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_writes);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_writes;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_writes);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTENDED:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extended_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extended;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extended);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extended;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extended);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_L3_CACHE:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_l3_cache_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_l3_cache;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_l3_cache);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_l3_cache;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_l3_cache);
-
- return 0;
- case METRIC_SET_ID_HDC_AND_SF:
- dev_priv->perf.oa.n_mux_configs =
- get_hdc_and_sf_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_hdc_and_sf;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_hdc_and_sf);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_hdc_and_sf;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_hdc_and_sf);
-
- return 0;
- case METRIC_SET_ID_L3_1:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_1);
-
- return 0;
- case METRIC_SET_ID_L3_2:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_2);
-
- return 0;
- case METRIC_SET_ID_L3_3:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_3_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_3;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_3);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_3;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_3);
-
- return 0;
- case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
- dev_priv->perf.oa.n_mux_configs =
- get_rasterizer_and_pixel_backend_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
-
- return 0;
- case METRIC_SET_ID_SAMPLER:
- dev_priv->perf.oa.n_mux_configs =
- get_sampler_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_sampler;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_sampler);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_sampler;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_sampler);
-
- return 0;
- case METRIC_SET_ID_TDL_1:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_1);
-
- return 0;
- case METRIC_SET_ID_TDL_2:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_2);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTRA:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extra_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extra;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extra);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extra;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extra);
-
- return 0;
- case METRIC_SET_ID_VME_PIPE:
- dev_priv->perf.oa.n_mux_configs =
- get_vme_pipe_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"VME_PIPE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_vme_pipe;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_vme_pipe);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_vme_pipe;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_vme_pipe);
-
- return 0;
- case METRIC_SET_ID_TEST_OA:
- dev_priv->perf.oa.n_mux_configs =
- get_test_oa_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_test_oa;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_test_oa;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_test_oa);
-
- return 0;
- default:
- return -ENODEV;
- }
-}
-
-static ssize_t
-show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
-}
-
-static struct device_attribute dev_attr_render_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_basic[] = {
- &dev_attr_render_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_basic = {
- .name = "f519e481-24d2-4d42-87c9-3fdd12c00202",
- .attrs = attrs_render_basic,
-};
-
-static ssize_t
-show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
-}
-
-static struct device_attribute dev_attr_compute_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_basic[] = {
- &dev_attr_compute_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_basic = {
- .name = "fe47b29d-ae51-423e-bff4-27d965a95b60",
- .attrs = attrs_compute_basic,
-};
-
-static ssize_t
-show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
-}
-
-static struct device_attribute dev_attr_render_pipe_profile_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_pipe_profile_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_pipe_profile[] = {
- &dev_attr_render_pipe_profile_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_pipe_profile = {
- .name = "e0ad5ae0-84ba-4f29-a723-1906c12cb774",
- .attrs = attrs_render_pipe_profile,
-};
-
-static ssize_t
-show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
-}
-
-static struct device_attribute dev_attr_memory_reads_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_reads_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_reads[] = {
- &dev_attr_memory_reads_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_reads = {
- .name = "9bc436dd-6130-4add-affc-283eb6eaa864",
- .attrs = attrs_memory_reads,
-};
-
-static ssize_t
-show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
-}
-
-static struct device_attribute dev_attr_memory_writes_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_writes_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_writes[] = {
- &dev_attr_memory_writes_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_writes = {
- .name = "2ea0da8f-3527-4669-9d9d-13099a7435bf",
- .attrs = attrs_memory_writes,
-};
-
-static ssize_t
-show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
-}
-
-static struct device_attribute dev_attr_compute_extended_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extended_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extended[] = {
- &dev_attr_compute_extended_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extended = {
- .name = "d97d16af-028b-4cd1-a672-6210cb5513dd",
- .attrs = attrs_compute_extended,
-};
-
-static ssize_t
-show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
-}
-
-static struct device_attribute dev_attr_compute_l3_cache_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_l3_cache_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_l3_cache[] = {
- &dev_attr_compute_l3_cache_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_l3_cache = {
- .name = "9fb22842-e708-43f7-9752-e0e41670c39e",
- .attrs = attrs_compute_l3_cache,
-};
-
-static ssize_t
-show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
-}
-
-static struct device_attribute dev_attr_hdc_and_sf_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_hdc_and_sf_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_hdc_and_sf[] = {
- &dev_attr_hdc_and_sf_id.attr,
- NULL,
-};
-
-static struct attribute_group group_hdc_and_sf = {
- .name = "5378e2a1-4248-4188-a4ae-da25a794c603",
- .attrs = attrs_hdc_and_sf,
-};
-
-static ssize_t
-show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
-}
-
-static struct device_attribute dev_attr_l3_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_1[] = {
- &dev_attr_l3_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_1 = {
- .name = "f42cdd6a-b000-42cb-870f-5eb423a7f514",
- .attrs = attrs_l3_1,
-};
-
-static ssize_t
-show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
-}
-
-static struct device_attribute dev_attr_l3_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_2[] = {
- &dev_attr_l3_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_2 = {
- .name = "b9bf2423-d88c-4a7b-a051-627611d00dcc",
- .attrs = attrs_l3_2,
-};
-
-static ssize_t
-show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
-}
-
-static struct device_attribute dev_attr_l3_3_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_3_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_3[] = {
- &dev_attr_l3_3_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_3 = {
- .name = "2414a93d-d84f-406e-99c0-472161194b40",
- .attrs = attrs_l3_3,
-};
-
-static ssize_t
-show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
-}
-
-static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_rasterizer_and_pixel_backend_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
- &dev_attr_rasterizer_and_pixel_backend_id.attr,
- NULL,
-};
-
-static struct attribute_group group_rasterizer_and_pixel_backend = {
- .name = "53a45d2d-170b-4cf5-b7bb-585120c8e2f5",
- .attrs = attrs_rasterizer_and_pixel_backend,
-};
-
-static ssize_t
-show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
-}
-
-static struct device_attribute dev_attr_sampler_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_sampler_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_sampler[] = {
- &dev_attr_sampler_id.attr,
- NULL,
-};
-
-static struct attribute_group group_sampler = {
- .name = "b4cff514-a91e-4798-a0b3-426ca13fc9c1",
- .attrs = attrs_sampler,
-};
-
-static ssize_t
-show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
-}
-
-static struct device_attribute dev_attr_tdl_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_1[] = {
- &dev_attr_tdl_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_1 = {
- .name = "7821d13b-9b8b-4405-9618-78cd56b62cce",
- .attrs = attrs_tdl_1,
-};
-
-static ssize_t
-show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
-}
-
-static struct device_attribute dev_attr_tdl_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_2[] = {
- &dev_attr_tdl_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_2 = {
- .name = "893f1a4d-919d-4388-8cb7-746d73ea7259",
- .attrs = attrs_tdl_2,
-};
-
-static ssize_t
-show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
-}
-
-static struct device_attribute dev_attr_compute_extra_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extra_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extra[] = {
- &dev_attr_compute_extra_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extra = {
- .name = "41a24047-7484-4ead-ae37-de907e5ff2b2",
- .attrs = attrs_compute_extra,
-};
-
-static ssize_t
-show_vme_pipe_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_VME_PIPE);
-}
-
-static struct device_attribute dev_attr_vme_pipe_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_vme_pipe_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_vme_pipe[] = {
- &dev_attr_vme_pipe_id.attr,
- NULL,
-};
-
-static struct attribute_group group_vme_pipe = {
- .name = "95910492-943f-44bd-9461-390240f243fd",
- .attrs = attrs_vme_pipe,
-};
-
static ssize_t
show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
+ return sprintf(buf, "1\n");
}
-static struct device_attribute dev_attr_test_oa_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_test_oa_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_test_oa[] = {
- &dev_attr_test_oa_id.attr,
- NULL,
-};
-
-static struct attribute_group group_test_oa = {
- .name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1",
- .attrs = attrs_test_oa,
-};
-
-int
-i915_perf_register_sysfs_sklgt2(struct drm_i915_private *dev_priv)
+void
+i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv)
{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
- int ret = 0;
+ strncpy(dev_priv->perf.oa.test_config.uuid,
+ "1651949f-0ac0-4cb1-a06f-dafd74a407d1",
+ UUID_STRING_LEN);
+ dev_priv->perf.oa.test_config.id = 1;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (ret)
- goto error_render_basic;
- }
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (ret)
- goto error_compute_basic;
- }
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (ret)
- goto error_render_pipe_profile;
- }
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (ret)
- goto error_memory_reads;
- }
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (ret)
- goto error_memory_writes;
- }
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (ret)
- goto error_compute_extended;
- }
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (ret)
- goto error_compute_l3_cache;
- }
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (ret)
- goto error_hdc_and_sf;
- }
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (ret)
- goto error_l3_1;
- }
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
- if (ret)
- goto error_l3_2;
- }
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
- if (ret)
- goto error_l3_3;
- }
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (ret)
- goto error_rasterizer_and_pixel_backend;
- }
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
- if (ret)
- goto error_sampler;
- }
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (ret)
- goto error_tdl_1;
- }
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (ret)
- goto error_tdl_2;
- }
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (ret)
- goto error_compute_extra;
- }
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
- if (ret)
- goto error_vme_pipe;
- }
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
- if (ret)
- goto error_test_oa;
- }
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- return 0;
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-error_test_oa:
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
-error_vme_pipe:
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
-error_compute_extra:
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
-error_tdl_2:
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
-error_tdl_1:
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
-error_sampler:
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
-error_rasterizer_and_pixel_backend:
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
-error_l3_3:
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
-error_l3_2:
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
-error_l3_1:
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
-error_hdc_and_sf:
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
-error_compute_l3_cache:
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
-error_compute_extended:
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
-error_memory_writes:
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
-error_memory_reads:
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
-error_render_pipe_profile:
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
-error_compute_basic:
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
-error_render_basic:
- return ret;
-}
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-void
-i915_perf_unregister_sysfs_sklgt2(struct drm_i915_private *dev_priv)
-{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
index f4397baf3328..fe1aa2c03958 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
@@ -29,12 +29,6 @@
#ifndef __I915_OA_SKLGT2_H__
#define __I915_OA_SKLGT2_H__
-extern int i915_oa_n_builtin_metric_sets_sklgt2;
-
-extern int i915_oa_select_metric_set_sklgt2(struct drm_i915_private *dev_priv);
-
-extern int i915_perf_register_sysfs_sklgt2(struct drm_i915_private *dev_priv);
-
-extern void i915_perf_unregister_sysfs_sklgt2(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
index 7765e22dfa17..85e51addf86a 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
@@ -31,1876 +31,6 @@
#include "i915_drv.h"
#include "i915_oa_sklgt3.h"
-enum metric_set_id {
- METRIC_SET_ID_RENDER_BASIC = 1,
- METRIC_SET_ID_COMPUTE_BASIC,
- METRIC_SET_ID_RENDER_PIPE_PROFILE,
- METRIC_SET_ID_MEMORY_READS,
- METRIC_SET_ID_MEMORY_WRITES,
- METRIC_SET_ID_COMPUTE_EXTENDED,
- METRIC_SET_ID_COMPUTE_L3_CACHE,
- METRIC_SET_ID_HDC_AND_SF,
- METRIC_SET_ID_L3_1,
- METRIC_SET_ID_L3_2,
- METRIC_SET_ID_L3_3,
- METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
- METRIC_SET_ID_SAMPLER,
- METRIC_SET_ID_TDL_1,
- METRIC_SET_ID_TDL_2,
- METRIC_SET_ID_COMPUTE_EXTRA,
- METRIC_SET_ID_VME_PIPE,
- METRIC_SET_ID_TEST_OA,
-};
-
-int i915_oa_n_builtin_metric_sets_sklgt3 = 18;
-
-static const struct i915_oa_reg b_counter_config_render_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_render_basic[] = {
- { _MMIO(0x9888), 0x166c01e0 },
- { _MMIO(0x9888), 0x12170280 },
- { _MMIO(0x9888), 0x12370280 },
- { _MMIO(0x9888), 0x16ec01e0 },
- { _MMIO(0x9888), 0x11930317 },
- { _MMIO(0x9888), 0x159303df },
- { _MMIO(0x9888), 0x3f900003 },
- { _MMIO(0x9888), 0x1a4e0380 },
- { _MMIO(0x9888), 0x0a6c0053 },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x0a1b4000 },
- { _MMIO(0x9888), 0x1c1c0001 },
- { _MMIO(0x9888), 0x002f1000 },
- { _MMIO(0x9888), 0x042f1000 },
- { _MMIO(0x9888), 0x004c4000 },
- { _MMIO(0x9888), 0x0a4c8400 },
- { _MMIO(0x9888), 0x0c4c0002 },
- { _MMIO(0x9888), 0x000d2000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0f0400 },
- { _MMIO(0x9888), 0x0e0f6600 },
- { _MMIO(0x9888), 0x100f0001 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x162ca200 },
- { _MMIO(0x9888), 0x062d8000 },
- { _MMIO(0x9888), 0x082d8000 },
- { _MMIO(0x9888), 0x00133000 },
- { _MMIO(0x9888), 0x08133000 },
- { _MMIO(0x9888), 0x00170020 },
- { _MMIO(0x9888), 0x08170021 },
- { _MMIO(0x9888), 0x10170000 },
- { _MMIO(0x9888), 0x0633c000 },
- { _MMIO(0x9888), 0x0833c000 },
- { _MMIO(0x9888), 0x06370800 },
- { _MMIO(0x9888), 0x08370840 },
- { _MMIO(0x9888), 0x10370000 },
- { _MMIO(0x9888), 0x1ace0200 },
- { _MMIO(0x9888), 0x0aec5300 },
- { _MMIO(0x9888), 0x10ec0000 },
- { _MMIO(0x9888), 0x1cec0000 },
- { _MMIO(0x9888), 0x0a9b8000 },
- { _MMIO(0x9888), 0x1c9c0002 },
- { _MMIO(0x9888), 0x0ccc0002 },
- { _MMIO(0x9888), 0x0a8d8000 },
- { _MMIO(0x9888), 0x108f0001 },
- { _MMIO(0x9888), 0x16ac8000 },
- { _MMIO(0x9888), 0x0d933031 },
- { _MMIO(0x9888), 0x0f933e3f },
- { _MMIO(0x9888), 0x01933d00 },
- { _MMIO(0x9888), 0x0393073c },
- { _MMIO(0x9888), 0x0593000e },
- { _MMIO(0x9888), 0x1d930000 },
- { _MMIO(0x9888), 0x19930000 },
- { _MMIO(0x9888), 0x1b930000 },
- { _MMIO(0x9888), 0x1d900157 },
- { _MMIO(0x9888), 0x1f900158 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x2b908000 },
- { _MMIO(0x9888), 0x2d908000 },
- { _MMIO(0x9888), 0x2f908000 },
- { _MMIO(0x9888), 0x31908000 },
- { _MMIO(0x9888), 0x15908000 },
- { _MMIO(0x9888), 0x17908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1190003f },
- { _MMIO(0x9888), 0x51907710 },
- { _MMIO(0x9888), 0x419020a0 },
- { _MMIO(0x9888), 0x55901515 },
- { _MMIO(0x9888), 0x45900529 },
- { _MMIO(0x9888), 0x47901025 },
- { _MMIO(0x9888), 0x57907770 },
- { _MMIO(0x9888), 0x49902100 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900108 },
- { _MMIO(0x9888), 0x59900007 },
- { _MMIO(0x9888), 0x43902108 },
- { _MMIO(0x9888), 0x53907777 },
-};
-
-static int
-get_render_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_render_basic;
- lens[n] = ARRAY_SIZE(mux_config_render_basic);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_basic[] = {
- { _MMIO(0x9888), 0x104f00e0 },
- { _MMIO(0x9888), 0x124f1c00 },
- { _MMIO(0x9888), 0x106c00e0 },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f900003 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x1a4e0820 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x064f0900 },
- { _MMIO(0x9888), 0x084f0032 },
- { _MMIO(0x9888), 0x0a4f1891 },
- { _MMIO(0x9888), 0x0c4f0e00 },
- { _MMIO(0x9888), 0x0e4f003c },
- { _MMIO(0x9888), 0x004f0d80 },
- { _MMIO(0x9888), 0x024f003b },
- { _MMIO(0x9888), 0x006c0002 },
- { _MMIO(0x9888), 0x086c0100 },
- { _MMIO(0x9888), 0x0c6c000c },
- { _MMIO(0x9888), 0x0e6c0b00 },
- { _MMIO(0x9888), 0x186c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x001b4000 },
- { _MMIO(0x9888), 0x081b8000 },
- { _MMIO(0x9888), 0x0c1b4000 },
- { _MMIO(0x9888), 0x0e1b8000 },
- { _MMIO(0x9888), 0x101c8000 },
- { _MMIO(0x9888), 0x1a1c8000 },
- { _MMIO(0x9888), 0x1c1c0024 },
- { _MMIO(0x9888), 0x065b8000 },
- { _MMIO(0x9888), 0x085b4000 },
- { _MMIO(0x9888), 0x0a5bc000 },
- { _MMIO(0x9888), 0x0c5b8000 },
- { _MMIO(0x9888), 0x0e5b4000 },
- { _MMIO(0x9888), 0x005b8000 },
- { _MMIO(0x9888), 0x025b4000 },
- { _MMIO(0x9888), 0x1a5c6000 },
- { _MMIO(0x9888), 0x1c5c001b },
- { _MMIO(0x9888), 0x125c8000 },
- { _MMIO(0x9888), 0x145c8000 },
- { _MMIO(0x9888), 0x004c8000 },
- { _MMIO(0x9888), 0x0a4c2000 },
- { _MMIO(0x9888), 0x0c4c0208 },
- { _MMIO(0x9888), 0x000da000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x020d2000 },
- { _MMIO(0x9888), 0x0c0f5400 },
- { _MMIO(0x9888), 0x0e0f5500 },
- { _MMIO(0x9888), 0x100f0155 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2cc000 },
- { _MMIO(0x9888), 0x162cfb00 },
- { _MMIO(0x9888), 0x182c00be },
- { _MMIO(0x9888), 0x022cc000 },
- { _MMIO(0x9888), 0x042cc000 },
- { _MMIO(0x9888), 0x19900157 },
- { _MMIO(0x9888), 0x1b900158 },
- { _MMIO(0x9888), 0x1d900105 },
- { _MMIO(0x9888), 0x1f900103 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x11900fff },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900800 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900863 },
- { _MMIO(0x9888), 0x47900802 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900802 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900002 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900c62 },
- { _MMIO(0x9888), 0x53903333 },
-};
-
-static int
-get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_basic;
- lens[n] = ARRAY_SIZE(mux_config_compute_basic);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007ffea },
- { _MMIO(0x2774), 0x00007ffc },
- { _MMIO(0x2778), 0x0007affa },
- { _MMIO(0x277c), 0x0000f5fd },
- { _MMIO(0x2780), 0x00079ffa },
- { _MMIO(0x2784), 0x0000f3fb },
- { _MMIO(0x2788), 0x0007bf7a },
- { _MMIO(0x278c), 0x0000f7e7 },
- { _MMIO(0x2790), 0x0007fefa },
- { _MMIO(0x2794), 0x0000f7cf },
- { _MMIO(0x2798), 0x00077ffa },
- { _MMIO(0x279c), 0x0000efdf },
- { _MMIO(0x27a0), 0x0006fffa },
- { _MMIO(0x27a4), 0x0000cfbf },
- { _MMIO(0x27a8), 0x0003fffa },
- { _MMIO(0x27ac), 0x00005f7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
- { _MMIO(0x9888), 0x0c0e001f },
- { _MMIO(0x9888), 0x0a0f0000 },
- { _MMIO(0x9888), 0x10116800 },
- { _MMIO(0x9888), 0x178a03e0 },
- { _MMIO(0x9888), 0x11824c00 },
- { _MMIO(0x9888), 0x11830020 },
- { _MMIO(0x9888), 0x13840020 },
- { _MMIO(0x9888), 0x11850019 },
- { _MMIO(0x9888), 0x11860007 },
- { _MMIO(0x9888), 0x01870c40 },
- { _MMIO(0x9888), 0x17880000 },
- { _MMIO(0x9888), 0x022f4000 },
- { _MMIO(0x9888), 0x0a4c0040 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x040d4000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x020e5400 },
- { _MMIO(0x9888), 0x000e0000 },
- { _MMIO(0x9888), 0x080f0040 },
- { _MMIO(0x9888), 0x000f0000 },
- { _MMIO(0x9888), 0x100f0000 },
- { _MMIO(0x9888), 0x0e0f0040 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x06104000 },
- { _MMIO(0x9888), 0x06110012 },
- { _MMIO(0x9888), 0x06131000 },
- { _MMIO(0x9888), 0x01898000 },
- { _MMIO(0x9888), 0x0d890100 },
- { _MMIO(0x9888), 0x03898000 },
- { _MMIO(0x9888), 0x09808000 },
- { _MMIO(0x9888), 0x0b808000 },
- { _MMIO(0x9888), 0x0380c000 },
- { _MMIO(0x9888), 0x0f8a0075 },
- { _MMIO(0x9888), 0x1d8a0000 },
- { _MMIO(0x9888), 0x118a8000 },
- { _MMIO(0x9888), 0x1b8a4000 },
- { _MMIO(0x9888), 0x138a8000 },
- { _MMIO(0x9888), 0x1d81a000 },
- { _MMIO(0x9888), 0x15818000 },
- { _MMIO(0x9888), 0x17818000 },
- { _MMIO(0x9888), 0x0b820030 },
- { _MMIO(0x9888), 0x07828000 },
- { _MMIO(0x9888), 0x0d824000 },
- { _MMIO(0x9888), 0x0f828000 },
- { _MMIO(0x9888), 0x05824000 },
- { _MMIO(0x9888), 0x0d830003 },
- { _MMIO(0x9888), 0x0583000c },
- { _MMIO(0x9888), 0x09830000 },
- { _MMIO(0x9888), 0x03838000 },
- { _MMIO(0x9888), 0x07838000 },
- { _MMIO(0x9888), 0x0b840980 },
- { _MMIO(0x9888), 0x03844d80 },
- { _MMIO(0x9888), 0x11840000 },
- { _MMIO(0x9888), 0x09848000 },
- { _MMIO(0x9888), 0x09850080 },
- { _MMIO(0x9888), 0x03850003 },
- { _MMIO(0x9888), 0x01850000 },
- { _MMIO(0x9888), 0x07860000 },
- { _MMIO(0x9888), 0x0f860400 },
- { _MMIO(0x9888), 0x09870032 },
- { _MMIO(0x9888), 0x01888052 },
- { _MMIO(0x9888), 0x11880000 },
- { _MMIO(0x9888), 0x09884000 },
- { _MMIO(0x9888), 0x1b931001 },
- { _MMIO(0x9888), 0x1d930001 },
- { _MMIO(0x9888), 0x19934000 },
- { _MMIO(0x9888), 0x1b958000 },
- { _MMIO(0x9888), 0x1d950094 },
- { _MMIO(0x9888), 0x19958000 },
- { _MMIO(0x9888), 0x09e58000 },
- { _MMIO(0x9888), 0x0be58000 },
- { _MMIO(0x9888), 0x03e5c000 },
- { _MMIO(0x9888), 0x0592c000 },
- { _MMIO(0x9888), 0x0b928000 },
- { _MMIO(0x9888), 0x0d924000 },
- { _MMIO(0x9888), 0x0f924000 },
- { _MMIO(0x9888), 0x11928000 },
- { _MMIO(0x9888), 0x1392c000 },
- { _MMIO(0x9888), 0x09924000 },
- { _MMIO(0x9888), 0x01985000 },
- { _MMIO(0x9888), 0x07988000 },
- { _MMIO(0x9888), 0x09981000 },
- { _MMIO(0x9888), 0x0b982000 },
- { _MMIO(0x9888), 0x0d982000 },
- { _MMIO(0x9888), 0x0f989000 },
- { _MMIO(0x9888), 0x05982000 },
- { _MMIO(0x9888), 0x13904000 },
- { _MMIO(0x9888), 0x21904000 },
- { _MMIO(0x9888), 0x23904000 },
- { _MMIO(0x9888), 0x25908000 },
- { _MMIO(0x9888), 0x27904000 },
- { _MMIO(0x9888), 0x29908000 },
- { _MMIO(0x9888), 0x2b904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1190c080 },
- { _MMIO(0x9888), 0x51901150 },
- { _MMIO(0x9888), 0x41901400 },
- { _MMIO(0x9888), 0x55905111 },
- { _MMIO(0x9888), 0x45901400 },
- { _MMIO(0x9888), 0x479004a5 },
- { _MMIO(0x9888), 0x57903455 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b9000a0 },
- { _MMIO(0x9888), 0x59900001 },
- { _MMIO(0x9888), 0x43900005 },
- { _MMIO(0x9888), 0x53900455 },
-};
-
-static int
-get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_render_pipe_profile;
- lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_reads[] = {
- { _MMIO(0x272c), 0xffffffff },
- { _MMIO(0x2728), 0xffffffff },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x271c), 0xffffffff },
- { _MMIO(0x2718), 0xffffffff },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f872 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_reads[] = {
- { _MMIO(0x9888), 0x11810c00 },
- { _MMIO(0x9888), 0x1381001a },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f900064 },
- { _MMIO(0x9888), 0x03811300 },
- { _MMIO(0x9888), 0x05811b12 },
- { _MMIO(0x9888), 0x0781001a },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x17810000 },
- { _MMIO(0x9888), 0x19810000 },
- { _MMIO(0x9888), 0x1b810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930055 },
- { _MMIO(0x9888), 0x03e58000 },
- { _MMIO(0x9888), 0x05e5c000 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x13900150 },
- { _MMIO(0x9888), 0x21900151 },
- { _MMIO(0x9888), 0x23900152 },
- { _MMIO(0x9888), 0x25900153 },
- { _MMIO(0x9888), 0x27900154 },
- { _MMIO(0x9888), 0x29900155 },
- { _MMIO(0x9888), 0x2b900156 },
- { _MMIO(0x9888), 0x2d900157 },
- { _MMIO(0x9888), 0x2f90015f },
- { _MMIO(0x9888), 0x31900105 },
- { _MMIO(0x9888), 0x15900103 },
- { _MMIO(0x9888), 0x17900101 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c60 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900c00 },
- { _MMIO(0x9888), 0x47900c63 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900c63 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900063 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static int
-get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_reads;
- lens[n] = ARRAY_SIZE(mux_config_memory_reads);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_writes[] = {
- { _MMIO(0x272c), 0xffffffff },
- { _MMIO(0x2728), 0xffffffff },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x271c), 0xffffffff },
- { _MMIO(0x2718), 0xffffffff },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f822 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_writes[] = {
- { _MMIO(0x9888), 0x11810c00 },
- { _MMIO(0x9888), 0x1381001a },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f901000 },
- { _MMIO(0x9888), 0x03811300 },
- { _MMIO(0x9888), 0x05811b12 },
- { _MMIO(0x9888), 0x0781001a },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x17810000 },
- { _MMIO(0x9888), 0x19810000 },
- { _MMIO(0x9888), 0x1b810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930055 },
- { _MMIO(0x9888), 0x03e58000 },
- { _MMIO(0x9888), 0x05e5c000 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x13900160 },
- { _MMIO(0x9888), 0x21900161 },
- { _MMIO(0x9888), 0x23900162 },
- { _MMIO(0x9888), 0x25900163 },
- { _MMIO(0x9888), 0x27900164 },
- { _MMIO(0x9888), 0x29900165 },
- { _MMIO(0x9888), 0x2b900166 },
- { _MMIO(0x9888), 0x2d900167 },
- { _MMIO(0x9888), 0x2f900150 },
- { _MMIO(0x9888), 0x31900105 },
- { _MMIO(0x9888), 0x15900103 },
- { _MMIO(0x9888), 0x17900101 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c60 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900c00 },
- { _MMIO(0x9888), 0x47900c63 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900c63 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900063 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static int
-get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_writes;
- lens[n] = ARRAY_SIZE(mux_config_memory_writes);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extended[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fc2a },
- { _MMIO(0x2774), 0x0000bf00 },
- { _MMIO(0x2778), 0x0007fc6a },
- { _MMIO(0x277c), 0x0000bf00 },
- { _MMIO(0x2780), 0x0007fc92 },
- { _MMIO(0x2784), 0x0000bf00 },
- { _MMIO(0x2788), 0x0007fca2 },
- { _MMIO(0x278c), 0x0000bf00 },
- { _MMIO(0x2790), 0x0007fc32 },
- { _MMIO(0x2794), 0x0000bf00 },
- { _MMIO(0x2798), 0x0007fc9a },
- { _MMIO(0x279c), 0x0000bf00 },
- { _MMIO(0x27a0), 0x0007fe6a },
- { _MMIO(0x27a4), 0x0000bf00 },
- { _MMIO(0x27a8), 0x0007fe7a },
- { _MMIO(0x27ac), 0x0000bf00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extended[] = {
- { _MMIO(0x9888), 0x106c00e0 },
- { _MMIO(0x9888), 0x141c8160 },
- { _MMIO(0x9888), 0x161c8015 },
- { _MMIO(0x9888), 0x181c0120 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x0e4e8000 },
- { _MMIO(0x9888), 0x184e8000 },
- { _MMIO(0x9888), 0x1a4eaaa0 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x024e8000 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x0e6c0b01 },
- { _MMIO(0x9888), 0x006c0200 },
- { _MMIO(0x9888), 0x026c000c },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x0e1bc000 },
- { _MMIO(0x9888), 0x001b8000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x001c0041 },
- { _MMIO(0x9888), 0x061c4200 },
- { _MMIO(0x9888), 0x081c4443 },
- { _MMIO(0x9888), 0x0a1c4645 },
- { _MMIO(0x9888), 0x0c1c7647 },
- { _MMIO(0x9888), 0x041c7357 },
- { _MMIO(0x9888), 0x1c1c0030 },
- { _MMIO(0x9888), 0x101c0000 },
- { _MMIO(0x9888), 0x1a1c0000 },
- { _MMIO(0x9888), 0x121c8000 },
- { _MMIO(0x9888), 0x004c8000 },
- { _MMIO(0x9888), 0x0a4caa2a },
- { _MMIO(0x9888), 0x0c4c02aa },
- { _MMIO(0x9888), 0x084ca000 },
- { _MMIO(0x9888), 0x000da000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x0c0f5400 },
- { _MMIO(0x9888), 0x0e0f5515 },
- { _MMIO(0x9888), 0x100f0155 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2c8000 },
- { _MMIO(0x9888), 0x162caa00 },
- { _MMIO(0x9888), 0x182c00aa },
- { _MMIO(0x9888), 0x022c8000 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x11907fff },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900040 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900802 },
- { _MMIO(0x9888), 0x47900842 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900842 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900800 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static int
-get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_extended;
- lens[n] = ARRAY_SIZE(mux_config_compute_extended);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x30800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fffa },
- { _MMIO(0x2774), 0x0000fefe },
- { _MMIO(0x2778), 0x0007fffa },
- { _MMIO(0x277c), 0x0000fefd },
- { _MMIO(0x2790), 0x0007fffa },
- { _MMIO(0x2794), 0x0000fbef },
- { _MMIO(0x2798), 0x0007fffa },
- { _MMIO(0x279c), 0x0000fbdf },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00101100 },
- { _MMIO(0xe45c), 0x00201200 },
- { _MMIO(0xe55c), 0x00301300 },
- { _MMIO(0xe65c), 0x00401400 },
-};
-
-static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
- { _MMIO(0x9888), 0x166c0760 },
- { _MMIO(0x9888), 0x1593001e },
- { _MMIO(0x9888), 0x3f900003 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x0e4e8000 },
- { _MMIO(0x9888), 0x184e8000 },
- { _MMIO(0x9888), 0x1a4e8020 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x006c0051 },
- { _MMIO(0x9888), 0x066c5000 },
- { _MMIO(0x9888), 0x086c5c5d },
- { _MMIO(0x9888), 0x0e6c5e5f },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x186c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x001b4000 },
- { _MMIO(0x9888), 0x061b8000 },
- { _MMIO(0x9888), 0x081bc000 },
- { _MMIO(0x9888), 0x0e1bc000 },
- { _MMIO(0x9888), 0x101c8000 },
- { _MMIO(0x9888), 0x1a1ce000 },
- { _MMIO(0x9888), 0x1c1c0030 },
- { _MMIO(0x9888), 0x004c8000 },
- { _MMIO(0x9888), 0x0a4c2a00 },
- { _MMIO(0x9888), 0x0c4c0280 },
- { _MMIO(0x9888), 0x000d2000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x0c0f0400 },
- { _MMIO(0x9888), 0x0e0f1500 },
- { _MMIO(0x9888), 0x100f0140 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2c8000 },
- { _MMIO(0x9888), 0x162c0a00 },
- { _MMIO(0x9888), 0x182c00a0 },
- { _MMIO(0x9888), 0x03933300 },
- { _MMIO(0x9888), 0x05930032 },
- { _MMIO(0x9888), 0x11930000 },
- { _MMIO(0x9888), 0x1b930000 },
- { _MMIO(0x9888), 0x1d900157 },
- { _MMIO(0x9888), 0x1f900158 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1190030f },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900000 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900063 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x53903333 },
- { _MMIO(0x9888), 0x43900840 },
-};
-
-static int
-get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_l3_cache;
- lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x10800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000fdff },
-};
-
-static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
- { _MMIO(0x9888), 0x104f0232 },
- { _MMIO(0x9888), 0x124f4640 },
- { _MMIO(0x9888), 0x106c0232 },
- { _MMIO(0x9888), 0x11834400 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x0c4e8000 },
- { _MMIO(0x9888), 0x004f1880 },
- { _MMIO(0x9888), 0x024f08bb },
- { _MMIO(0x9888), 0x044f001b },
- { _MMIO(0x9888), 0x046c0100 },
- { _MMIO(0x9888), 0x066c000b },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x041b8000 },
- { _MMIO(0x9888), 0x061b4000 },
- { _MMIO(0x9888), 0x1a1c1800 },
- { _MMIO(0x9888), 0x005b8000 },
- { _MMIO(0x9888), 0x025bc000 },
- { _MMIO(0x9888), 0x045b4000 },
- { _MMIO(0x9888), 0x125c8000 },
- { _MMIO(0x9888), 0x145c8000 },
- { _MMIO(0x9888), 0x165c8000 },
- { _MMIO(0x9888), 0x185c8000 },
- { _MMIO(0x9888), 0x0a4c00a0 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f5000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x022cc000 },
- { _MMIO(0x9888), 0x042cc000 },
- { _MMIO(0x9888), 0x062cc000 },
- { _MMIO(0x9888), 0x082cc000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x0f828000 },
- { _MMIO(0x9888), 0x0f8305c0 },
- { _MMIO(0x9888), 0x09830000 },
- { _MMIO(0x9888), 0x07830000 },
- { _MMIO(0x9888), 0x1d950080 },
- { _MMIO(0x9888), 0x13928000 },
- { _MMIO(0x9888), 0x0f988000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x59900005 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900800 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_hdc_and_sf;
- lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00014002 },
- { _MMIO(0x277c), 0x0000c3ff },
- { _MMIO(0x2780), 0x00010002 },
- { _MMIO(0x2784), 0x0000c7ff },
- { _MMIO(0x2788), 0x00004002 },
- { _MMIO(0x278c), 0x0000d3ff },
- { _MMIO(0x2790), 0x00100700 },
- { _MMIO(0x2794), 0x0000ff1f },
- { _MMIO(0x2798), 0x00001402 },
- { _MMIO(0x279c), 0x0000fc3f },
- { _MMIO(0x27a0), 0x00001002 },
- { _MMIO(0x27a4), 0x0000fc7f },
- { _MMIO(0x27a8), 0x00000402 },
- { _MMIO(0x27ac), 0x0000fd3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_1[] = {
- { _MMIO(0x9888), 0x126c7b40 },
- { _MMIO(0x9888), 0x166c0020 },
- { _MMIO(0x9888), 0x0a603444 },
- { _MMIO(0x9888), 0x0a613400 },
- { _MMIO(0x9888), 0x1a4ea800 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x024e8000 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x064f4000 },
- { _MMIO(0x9888), 0x0c6c5327 },
- { _MMIO(0x9888), 0x0e6c5425 },
- { _MMIO(0x9888), 0x006c2a00 },
- { _MMIO(0x9888), 0x026c285b },
- { _MMIO(0x9888), 0x046c005c },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x1a6c0800 },
- { _MMIO(0x9888), 0x0c1bc000 },
- { _MMIO(0x9888), 0x0e1bc000 },
- { _MMIO(0x9888), 0x001b8000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x1c1c003c },
- { _MMIO(0x9888), 0x121c8000 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x161c8000 },
- { _MMIO(0x9888), 0x181c8000 },
- { _MMIO(0x9888), 0x1a1c0800 },
- { _MMIO(0x9888), 0x065b4000 },
- { _MMIO(0x9888), 0x1a5c1000 },
- { _MMIO(0x9888), 0x10600000 },
- { _MMIO(0x9888), 0x04600000 },
- { _MMIO(0x9888), 0x0c610044 },
- { _MMIO(0x9888), 0x10610000 },
- { _MMIO(0x9888), 0x06610000 },
- { _MMIO(0x9888), 0x0c4c02a8 },
- { _MMIO(0x9888), 0x084ca000 },
- { _MMIO(0x9888), 0x0a4c002a },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f0154 },
- { _MMIO(0x9888), 0x0c0f5000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x182c00aa },
- { _MMIO(0x9888), 0x022c8000 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2cc000 },
- { _MMIO(0x9888), 0x1190ffc0 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900420 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900021 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900400 },
- { _MMIO(0x9888), 0x43900421 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900040 },
-};
-
-static int
-get_l3_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_1;
- lens[n] = ARRAY_SIZE(mux_config_l3_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00028002 },
- { _MMIO(0x277c), 0x000087ff },
- { _MMIO(0x2780), 0x00020002 },
- { _MMIO(0x2784), 0x00008fff },
- { _MMIO(0x2788), 0x00008002 },
- { _MMIO(0x278c), 0x0000a7ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_2[] = {
- { _MMIO(0x9888), 0x126c02e0 },
- { _MMIO(0x9888), 0x146c0001 },
- { _MMIO(0x9888), 0x0a623400 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x064f4000 },
- { _MMIO(0x9888), 0x026c3324 },
- { _MMIO(0x9888), 0x046c3422 },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x161c8000 },
- { _MMIO(0x9888), 0x181c8000 },
- { _MMIO(0x9888), 0x1a1c0800 },
- { _MMIO(0x9888), 0x065b4000 },
- { _MMIO(0x9888), 0x1a5c1000 },
- { _MMIO(0x9888), 0x06614000 },
- { _MMIO(0x9888), 0x0c620044 },
- { _MMIO(0x9888), 0x10620000 },
- { _MMIO(0x9888), 0x06620000 },
- { _MMIO(0x9888), 0x084c8000 },
- { _MMIO(0x9888), 0x0a4c002a },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f4000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2cc000 },
- { _MMIO(0x9888), 0x1190f800 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x43900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_l3_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_2;
- lens[n] = ARRAY_SIZE(mux_config_l3_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_3[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00028002 },
- { _MMIO(0x277c), 0x000087ff },
- { _MMIO(0x2780), 0x00020002 },
- { _MMIO(0x2784), 0x00008fff },
- { _MMIO(0x2788), 0x00008002 },
- { _MMIO(0x278c), 0x0000a7ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_3[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_3[] = {
- { _MMIO(0x9888), 0x126c4e80 },
- { _MMIO(0x9888), 0x146c0000 },
- { _MMIO(0x9888), 0x0a633400 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x0c4e8000 },
- { _MMIO(0x9888), 0x026c3321 },
- { _MMIO(0x9888), 0x046c342f },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1a6c2000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x061b4000 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x161c8000 },
- { _MMIO(0x9888), 0x181c8000 },
- { _MMIO(0x9888), 0x1a1c1800 },
- { _MMIO(0x9888), 0x06604000 },
- { _MMIO(0x9888), 0x0c630044 },
- { _MMIO(0x9888), 0x10630000 },
- { _MMIO(0x9888), 0x06630000 },
- { _MMIO(0x9888), 0x084c8000 },
- { _MMIO(0x9888), 0x0a4c00aa },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f4000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x1190f800 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900002 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_l3_3_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_3;
- lens[n] = ARRAY_SIZE(mux_config_l3_3);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x30800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000efff },
- { _MMIO(0x2778), 0x00006000 },
- { _MMIO(0x277c), 0x0000f3ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x9888), 0x102f3800 },
- { _MMIO(0x9888), 0x144d0500 },
- { _MMIO(0x9888), 0x120d03c0 },
- { _MMIO(0x9888), 0x140d03cf },
- { _MMIO(0x9888), 0x0c0f0004 },
- { _MMIO(0x9888), 0x0c4e4000 },
- { _MMIO(0x9888), 0x042f0480 },
- { _MMIO(0x9888), 0x082f0000 },
- { _MMIO(0x9888), 0x022f0000 },
- { _MMIO(0x9888), 0x0a4c0090 },
- { _MMIO(0x9888), 0x064d0027 },
- { _MMIO(0x9888), 0x004d0000 },
- { _MMIO(0x9888), 0x000d0d40 },
- { _MMIO(0x9888), 0x020d803f },
- { _MMIO(0x9888), 0x040d8023 },
- { _MMIO(0x9888), 0x100d0000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x020f0010 },
- { _MMIO(0x9888), 0x000f0000 },
- { _MMIO(0x9888), 0x0e0f0050 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41901400 },
- { _MMIO(0x9888), 0x43901485 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900001 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_rasterizer_and_pixel_backend;
- lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_sampler[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x70800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x0000c000 },
- { _MMIO(0x2774), 0x0000e7ff },
- { _MMIO(0x2778), 0x00003000 },
- { _MMIO(0x277c), 0x0000f9ff },
- { _MMIO(0x2780), 0x00000c00 },
- { _MMIO(0x2784), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_sampler[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_sampler[] = {
- { _MMIO(0x9888), 0x14152c00 },
- { _MMIO(0x9888), 0x16150005 },
- { _MMIO(0x9888), 0x121600a0 },
- { _MMIO(0x9888), 0x14352c00 },
- { _MMIO(0x9888), 0x16350005 },
- { _MMIO(0x9888), 0x123600a0 },
- { _MMIO(0x9888), 0x14552c00 },
- { _MMIO(0x9888), 0x16550005 },
- { _MMIO(0x9888), 0x125600a0 },
- { _MMIO(0x9888), 0x062f6000 },
- { _MMIO(0x9888), 0x022f2000 },
- { _MMIO(0x9888), 0x0c4c0050 },
- { _MMIO(0x9888), 0x0a4c0010 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f0350 },
- { _MMIO(0x9888), 0x0c0fb000 },
- { _MMIO(0x9888), 0x0e0f00da },
- { _MMIO(0x9888), 0x182c0028 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x022dc000 },
- { _MMIO(0x9888), 0x042d4000 },
- { _MMIO(0x9888), 0x0c138000 },
- { _MMIO(0x9888), 0x0e132000 },
- { _MMIO(0x9888), 0x0413c000 },
- { _MMIO(0x9888), 0x1c140018 },
- { _MMIO(0x9888), 0x0c157000 },
- { _MMIO(0x9888), 0x0e150078 },
- { _MMIO(0x9888), 0x10150000 },
- { _MMIO(0x9888), 0x04162180 },
- { _MMIO(0x9888), 0x02160000 },
- { _MMIO(0x9888), 0x04174000 },
- { _MMIO(0x9888), 0x0233a000 },
- { _MMIO(0x9888), 0x04333000 },
- { _MMIO(0x9888), 0x14348000 },
- { _MMIO(0x9888), 0x16348000 },
- { _MMIO(0x9888), 0x02357870 },
- { _MMIO(0x9888), 0x10350000 },
- { _MMIO(0x9888), 0x04360043 },
- { _MMIO(0x9888), 0x02360000 },
- { _MMIO(0x9888), 0x04371000 },
- { _MMIO(0x9888), 0x0e538000 },
- { _MMIO(0x9888), 0x00538000 },
- { _MMIO(0x9888), 0x06533000 },
- { _MMIO(0x9888), 0x1c540020 },
- { _MMIO(0x9888), 0x12548000 },
- { _MMIO(0x9888), 0x0e557000 },
- { _MMIO(0x9888), 0x00557800 },
- { _MMIO(0x9888), 0x10550000 },
- { _MMIO(0x9888), 0x06560043 },
- { _MMIO(0x9888), 0x02560000 },
- { _MMIO(0x9888), 0x06571000 },
- { _MMIO(0x9888), 0x1190ff80 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900060 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c00 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900060 },
-};
-
-static int
-get_sampler_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_sampler;
- lens[n] = ARRAY_SIZE(mux_config_sampler);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x00007fff },
- { _MMIO(0x2778), 0x00000000 },
- { _MMIO(0x277c), 0x00009fff },
- { _MMIO(0x2780), 0x00000002 },
- { _MMIO(0x2784), 0x0000efff },
- { _MMIO(0x2788), 0x00000000 },
- { _MMIO(0x278c), 0x0000f3ff },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000fdff },
- { _MMIO(0x2798), 0x00000000 },
- { _MMIO(0x279c), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_1[] = {
- { _MMIO(0x9888), 0x12120000 },
- { _MMIO(0x9888), 0x12320000 },
- { _MMIO(0x9888), 0x12520000 },
- { _MMIO(0x9888), 0x002f8000 },
- { _MMIO(0x9888), 0x022f3000 },
- { _MMIO(0x9888), 0x0a4c0015 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f03a0 },
- { _MMIO(0x9888), 0x0c0ff000 },
- { _MMIO(0x9888), 0x0e0f0095 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2d8000 },
- { _MMIO(0x9888), 0x0e2d4000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x02108000 },
- { _MMIO(0x9888), 0x0410c000 },
- { _MMIO(0x9888), 0x02118000 },
- { _MMIO(0x9888), 0x0411c000 },
- { _MMIO(0x9888), 0x02121880 },
- { _MMIO(0x9888), 0x041219b5 },
- { _MMIO(0x9888), 0x00120000 },
- { _MMIO(0x9888), 0x02134000 },
- { _MMIO(0x9888), 0x04135000 },
- { _MMIO(0x9888), 0x0c308000 },
- { _MMIO(0x9888), 0x0e304000 },
- { _MMIO(0x9888), 0x06304000 },
- { _MMIO(0x9888), 0x0c318000 },
- { _MMIO(0x9888), 0x0e314000 },
- { _MMIO(0x9888), 0x06314000 },
- { _MMIO(0x9888), 0x0c321a80 },
- { _MMIO(0x9888), 0x0e320033 },
- { _MMIO(0x9888), 0x06320031 },
- { _MMIO(0x9888), 0x00320000 },
- { _MMIO(0x9888), 0x0c334000 },
- { _MMIO(0x9888), 0x0e331000 },
- { _MMIO(0x9888), 0x06331000 },
- { _MMIO(0x9888), 0x0e508000 },
- { _MMIO(0x9888), 0x00508000 },
- { _MMIO(0x9888), 0x02504000 },
- { _MMIO(0x9888), 0x0e518000 },
- { _MMIO(0x9888), 0x00518000 },
- { _MMIO(0x9888), 0x02514000 },
- { _MMIO(0x9888), 0x0e521880 },
- { _MMIO(0x9888), 0x00521a80 },
- { _MMIO(0x9888), 0x02520033 },
- { _MMIO(0x9888), 0x0e534000 },
- { _MMIO(0x9888), 0x00534000 },
- { _MMIO(0x9888), 0x02531000 },
- { _MMIO(0x9888), 0x1190ff80 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900800 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900062 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c00 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900040 },
-};
-
-static int
-get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_1;
- lens[n] = ARRAY_SIZE(mux_config_tdl_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_2[] = {
- { _MMIO(0x9888), 0x12124d60 },
- { _MMIO(0x9888), 0x12322e60 },
- { _MMIO(0x9888), 0x12524d60 },
- { _MMIO(0x9888), 0x022f3000 },
- { _MMIO(0x9888), 0x0a4c0014 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0fe000 },
- { _MMIO(0x9888), 0x0e0f0097 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x002d8000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x0410c000 },
- { _MMIO(0x9888), 0x0411c000 },
- { _MMIO(0x9888), 0x04121fb7 },
- { _MMIO(0x9888), 0x00120000 },
- { _MMIO(0x9888), 0x04135000 },
- { _MMIO(0x9888), 0x00308000 },
- { _MMIO(0x9888), 0x06304000 },
- { _MMIO(0x9888), 0x00318000 },
- { _MMIO(0x9888), 0x06314000 },
- { _MMIO(0x9888), 0x00321b80 },
- { _MMIO(0x9888), 0x0632003f },
- { _MMIO(0x9888), 0x00334000 },
- { _MMIO(0x9888), 0x06331000 },
- { _MMIO(0x9888), 0x0250c000 },
- { _MMIO(0x9888), 0x0251c000 },
- { _MMIO(0x9888), 0x02521fb7 },
- { _MMIO(0x9888), 0x00520000 },
- { _MMIO(0x9888), 0x02535000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900800 },
- { _MMIO(0x9888), 0x43900063 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900040 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_2;
- lens[n] = ARRAY_SIZE(mux_config_tdl_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extra[] = {
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
-};
-
-static const struct i915_oa_reg mux_config_compute_extra[] = {
- { _MMIO(0x9888), 0x121203e0 },
- { _MMIO(0x9888), 0x123203e0 },
- { _MMIO(0x9888), 0x125203e0 },
- { _MMIO(0x9888), 0x129203e0 },
- { _MMIO(0x9888), 0x12b203e0 },
- { _MMIO(0x9888), 0x12d203e0 },
- { _MMIO(0x9888), 0x024ec000 },
- { _MMIO(0x9888), 0x044ec000 },
- { _MMIO(0x9888), 0x064ec000 },
- { _MMIO(0x9888), 0x022f4000 },
- { _MMIO(0x9888), 0x084ca000 },
- { _MMIO(0x9888), 0x0a4c0042 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f5000 },
- { _MMIO(0x9888), 0x0e0f006d },
- { _MMIO(0x9888), 0x022c8000 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x042d8000 },
- { _MMIO(0x9888), 0x06104000 },
- { _MMIO(0x9888), 0x06114000 },
- { _MMIO(0x9888), 0x06120033 },
- { _MMIO(0x9888), 0x00120000 },
- { _MMIO(0x9888), 0x06131000 },
- { _MMIO(0x9888), 0x04308000 },
- { _MMIO(0x9888), 0x04318000 },
- { _MMIO(0x9888), 0x04321980 },
- { _MMIO(0x9888), 0x00320000 },
- { _MMIO(0x9888), 0x04334000 },
- { _MMIO(0x9888), 0x04504000 },
- { _MMIO(0x9888), 0x04514000 },
- { _MMIO(0x9888), 0x04520033 },
- { _MMIO(0x9888), 0x00520000 },
- { _MMIO(0x9888), 0x04531000 },
- { _MMIO(0x9888), 0x00af8000 },
- { _MMIO(0x9888), 0x0acc0001 },
- { _MMIO(0x9888), 0x008d8000 },
- { _MMIO(0x9888), 0x028da000 },
- { _MMIO(0x9888), 0x0c8fb000 },
- { _MMIO(0x9888), 0x0e8f0001 },
- { _MMIO(0x9888), 0x06ac8000 },
- { _MMIO(0x9888), 0x02ad4000 },
- { _MMIO(0x9888), 0x02908000 },
- { _MMIO(0x9888), 0x02918000 },
- { _MMIO(0x9888), 0x02921980 },
- { _MMIO(0x9888), 0x00920000 },
- { _MMIO(0x9888), 0x02934000 },
- { _MMIO(0x9888), 0x02b04000 },
- { _MMIO(0x9888), 0x02b14000 },
- { _MMIO(0x9888), 0x02b20033 },
- { _MMIO(0x9888), 0x00b20000 },
- { _MMIO(0x9888), 0x02b31000 },
- { _MMIO(0x9888), 0x00d08000 },
- { _MMIO(0x9888), 0x00d18000 },
- { _MMIO(0x9888), 0x00d21980 },
- { _MMIO(0x9888), 0x00d34000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c00 },
- { _MMIO(0x9888), 0x43900402 },
- { _MMIO(0x9888), 0x53901550 },
- { _MMIO(0x9888), 0x45900080 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_extra;
- lens[n] = ARRAY_SIZE(mux_config_compute_extra);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_vme_pipe[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00100030 },
- { _MMIO(0x2774), 0x0000fff9 },
- { _MMIO(0x2778), 0x00000002 },
- { _MMIO(0x277c), 0x0000fffc },
- { _MMIO(0x2780), 0x00000002 },
- { _MMIO(0x2784), 0x0000fff3 },
- { _MMIO(0x2788), 0x00100180 },
- { _MMIO(0x278c), 0x0000ffcf },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00000002 },
- { _MMIO(0x279c), 0x0000ff3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_vme_pipe[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00008003 },
-};
-
-static const struct i915_oa_reg mux_config_vme_pipe[] = {
- { _MMIO(0x9888), 0x141a5800 },
- { _MMIO(0x9888), 0x161a00c0 },
- { _MMIO(0x9888), 0x12180240 },
- { _MMIO(0x9888), 0x14180002 },
- { _MMIO(0x9888), 0x149a5800 },
- { _MMIO(0x9888), 0x169a00c0 },
- { _MMIO(0x9888), 0x12980240 },
- { _MMIO(0x9888), 0x14980002 },
- { _MMIO(0x9888), 0x1a4e3fc0 },
- { _MMIO(0x9888), 0x002f1000 },
- { _MMIO(0x9888), 0x022f8000 },
- { _MMIO(0x9888), 0x042f3000 },
- { _MMIO(0x9888), 0x004c4000 },
- { _MMIO(0x9888), 0x0a4c9500 },
- { _MMIO(0x9888), 0x0c4c002a },
- { _MMIO(0x9888), 0x000d2000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0c0f0400 },
- { _MMIO(0x9888), 0x0e0f5500 },
- { _MMIO(0x9888), 0x100f0015 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2c8000 },
- { _MMIO(0x9888), 0x162caa00 },
- { _MMIO(0x9888), 0x182c000a },
- { _MMIO(0x9888), 0x04193000 },
- { _MMIO(0x9888), 0x081a28c1 },
- { _MMIO(0x9888), 0x001a0000 },
- { _MMIO(0x9888), 0x00133000 },
- { _MMIO(0x9888), 0x0613c000 },
- { _MMIO(0x9888), 0x0813f000 },
- { _MMIO(0x9888), 0x00172000 },
- { _MMIO(0x9888), 0x06178000 },
- { _MMIO(0x9888), 0x0817a000 },
- { _MMIO(0x9888), 0x00180037 },
- { _MMIO(0x9888), 0x06180940 },
- { _MMIO(0x9888), 0x08180000 },
- { _MMIO(0x9888), 0x02180000 },
- { _MMIO(0x9888), 0x04183000 },
- { _MMIO(0x9888), 0x04afc000 },
- { _MMIO(0x9888), 0x06af3000 },
- { _MMIO(0x9888), 0x0acc4000 },
- { _MMIO(0x9888), 0x0ccc0015 },
- { _MMIO(0x9888), 0x0a8da000 },
- { _MMIO(0x9888), 0x0c8da000 },
- { _MMIO(0x9888), 0x0e8f4000 },
- { _MMIO(0x9888), 0x108f0015 },
- { _MMIO(0x9888), 0x16aca000 },
- { _MMIO(0x9888), 0x18ac000a },
- { _MMIO(0x9888), 0x06993000 },
- { _MMIO(0x9888), 0x0c9a28c1 },
- { _MMIO(0x9888), 0x009a0000 },
- { _MMIO(0x9888), 0x0a93f000 },
- { _MMIO(0x9888), 0x0c93f000 },
- { _MMIO(0x9888), 0x0a97a000 },
- { _MMIO(0x9888), 0x0c97a000 },
- { _MMIO(0x9888), 0x0a980977 },
- { _MMIO(0x9888), 0x08980000 },
- { _MMIO(0x9888), 0x04980000 },
- { _MMIO(0x9888), 0x06983000 },
- { _MMIO(0x9888), 0x119000ff },
- { _MMIO(0x9888), 0x51900050 },
- { _MMIO(0x9888), 0x41900000 },
- { _MMIO(0x9888), 0x55900115 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x47900884 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900002 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_vme_pipe_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_vme_pipe;
- lens[n] = ARRAY_SIZE(mux_config_vme_pipe);
- n++;
-
- return n;
-}
-
static const struct i915_oa_reg b_counter_config_test_oa[] = {
{ _MMIO(0x2740), 0x00000000 },
{ _MMIO(0x2744), 0x00800000 },
@@ -1930,6 +60,7 @@ static const struct i915_oa_reg flex_eu_config_test_oa[] = {
};
static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x9840), 0x00000080 },
{ _MMIO(0x9888), 0x11810000 },
{ _MMIO(0x9888), 0x07810013 },
{ _MMIO(0x9888), 0x1f810000 },
@@ -1944,1096 +75,35 @@ static const struct i915_oa_reg mux_config_test_oa[] = {
{ _MMIO(0x9888), 0x33900000 },
};
-static int
-get_test_oa_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_test_oa;
- lens[n] = ARRAY_SIZE(mux_config_test_oa);
- n++;
-
- return n;
-}
-
-int i915_oa_select_metric_set_sklgt3(struct drm_i915_private *dev_priv)
-{
- dev_priv->perf.oa.n_mux_configs = 0;
- dev_priv->perf.oa.b_counter_regs = NULL;
- dev_priv->perf.oa.b_counter_regs_len = 0;
- dev_priv->perf.oa.flex_regs = NULL;
- dev_priv->perf.oa.flex_regs_len = 0;
-
- switch (dev_priv->perf.oa.metrics_set) {
- case METRIC_SET_ID_RENDER_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_render_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_basic);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_basic);
-
- return 0;
- case METRIC_SET_ID_RENDER_PIPE_PROFILE:
- dev_priv->perf.oa.n_mux_configs =
- get_render_pipe_profile_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_pipe_profile;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_pipe_profile);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_pipe_profile;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_pipe_profile);
-
- return 0;
- case METRIC_SET_ID_MEMORY_READS:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_reads_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_reads;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_reads);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_reads;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_reads);
-
- return 0;
- case METRIC_SET_ID_MEMORY_WRITES:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_writes_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_writes;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_writes);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_writes;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_writes);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTENDED:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extended_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extended;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extended);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extended;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extended);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_L3_CACHE:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_l3_cache_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_l3_cache;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_l3_cache);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_l3_cache;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_l3_cache);
-
- return 0;
- case METRIC_SET_ID_HDC_AND_SF:
- dev_priv->perf.oa.n_mux_configs =
- get_hdc_and_sf_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_hdc_and_sf;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_hdc_and_sf);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_hdc_and_sf;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_hdc_and_sf);
-
- return 0;
- case METRIC_SET_ID_L3_1:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_1);
-
- return 0;
- case METRIC_SET_ID_L3_2:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_2);
-
- return 0;
- case METRIC_SET_ID_L3_3:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_3_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_3;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_3);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_3;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_3);
-
- return 0;
- case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
- dev_priv->perf.oa.n_mux_configs =
- get_rasterizer_and_pixel_backend_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
-
- return 0;
- case METRIC_SET_ID_SAMPLER:
- dev_priv->perf.oa.n_mux_configs =
- get_sampler_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_sampler;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_sampler);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_sampler;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_sampler);
-
- return 0;
- case METRIC_SET_ID_TDL_1:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_1);
-
- return 0;
- case METRIC_SET_ID_TDL_2:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_2);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTRA:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extra_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extra;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extra);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extra;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extra);
-
- return 0;
- case METRIC_SET_ID_VME_PIPE:
- dev_priv->perf.oa.n_mux_configs =
- get_vme_pipe_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"VME_PIPE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_vme_pipe;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_vme_pipe);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_vme_pipe;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_vme_pipe);
-
- return 0;
- case METRIC_SET_ID_TEST_OA:
- dev_priv->perf.oa.n_mux_configs =
- get_test_oa_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_test_oa;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_test_oa;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_test_oa);
-
- return 0;
- default:
- return -ENODEV;
- }
-}
-
-static ssize_t
-show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
-}
-
-static struct device_attribute dev_attr_render_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_basic[] = {
- &dev_attr_render_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_basic = {
- .name = "4616d450-2393-4836-8146-53c5ed84d359",
- .attrs = attrs_render_basic,
-};
-
-static ssize_t
-show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
-}
-
-static struct device_attribute dev_attr_compute_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_basic[] = {
- &dev_attr_compute_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_basic = {
- .name = "4320492b-fd03-42ac-922f-dbe1ef3b7b58",
- .attrs = attrs_compute_basic,
-};
-
-static ssize_t
-show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
-}
-
-static struct device_attribute dev_attr_render_pipe_profile_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_pipe_profile_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_pipe_profile[] = {
- &dev_attr_render_pipe_profile_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_pipe_profile = {
- .name = "bd2d9cae-b9ec-4f5b-9d2f-934bed398a2d",
- .attrs = attrs_render_pipe_profile,
-};
-
-static ssize_t
-show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
-}
-
-static struct device_attribute dev_attr_memory_reads_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_reads_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_reads[] = {
- &dev_attr_memory_reads_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_reads = {
- .name = "4ca0f3fe-7fd3-4924-98cb-1807d9879767",
- .attrs = attrs_memory_reads,
-};
-
-static ssize_t
-show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
-}
-
-static struct device_attribute dev_attr_memory_writes_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_writes_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_writes[] = {
- &dev_attr_memory_writes_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_writes = {
- .name = "a0c0172c-ee13-403d-99ff-2bdf6936cf14",
- .attrs = attrs_memory_writes,
-};
-
-static ssize_t
-show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
-}
-
-static struct device_attribute dev_attr_compute_extended_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extended_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extended[] = {
- &dev_attr_compute_extended_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extended = {
- .name = "52435e0b-f188-42ea-8680-21a56ee20dee",
- .attrs = attrs_compute_extended,
-};
-
-static ssize_t
-show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
-}
-
-static struct device_attribute dev_attr_compute_l3_cache_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_l3_cache_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_l3_cache[] = {
- &dev_attr_compute_l3_cache_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_l3_cache = {
- .name = "27076eeb-49f3-4fed-8423-c66506005c63",
- .attrs = attrs_compute_l3_cache,
-};
-
-static ssize_t
-show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
-}
-
-static struct device_attribute dev_attr_hdc_and_sf_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_hdc_and_sf_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_hdc_and_sf[] = {
- &dev_attr_hdc_and_sf_id.attr,
- NULL,
-};
-
-static struct attribute_group group_hdc_and_sf = {
- .name = "8071b409-c39a-4674-94d7-32962ecfb512",
- .attrs = attrs_hdc_and_sf,
-};
-
-static ssize_t
-show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
-}
-
-static struct device_attribute dev_attr_l3_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_1[] = {
- &dev_attr_l3_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_1 = {
- .name = "5e0b391e-9ea8-4901-b2ff-b64ff616c7ed",
- .attrs = attrs_l3_1,
-};
-
-static ssize_t
-show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
-}
-
-static struct device_attribute dev_attr_l3_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_2[] = {
- &dev_attr_l3_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_2 = {
- .name = "25dc828e-1d2d-426e-9546-a1d4233cdf16",
- .attrs = attrs_l3_2,
-};
-
-static ssize_t
-show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
-}
-
-static struct device_attribute dev_attr_l3_3_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_3_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_3[] = {
- &dev_attr_l3_3_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_3 = {
- .name = "3dba9405-2d7e-4d70-8199-e734e82fd6bf",
- .attrs = attrs_l3_3,
-};
-
-static ssize_t
-show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
-}
-
-static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_rasterizer_and_pixel_backend_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
- &dev_attr_rasterizer_and_pixel_backend_id.attr,
- NULL,
-};
-
-static struct attribute_group group_rasterizer_and_pixel_backend = {
- .name = "76935d7b-09c9-46bf-87f1-c18b4a86ebe5",
- .attrs = attrs_rasterizer_and_pixel_backend,
-};
-
-static ssize_t
-show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
-}
-
-static struct device_attribute dev_attr_sampler_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_sampler_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_sampler[] = {
- &dev_attr_sampler_id.attr,
- NULL,
-};
-
-static struct attribute_group group_sampler = {
- .name = "1b34c0d6-4f4c-4d7b-833f-4aaf236d87a6",
- .attrs = attrs_sampler,
-};
-
-static ssize_t
-show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
-}
-
-static struct device_attribute dev_attr_tdl_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_1[] = {
- &dev_attr_tdl_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_1 = {
- .name = "b375c985-9953-455b-bda2-b03f7594e9db",
- .attrs = attrs_tdl_1,
-};
-
-static ssize_t
-show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
-}
-
-static struct device_attribute dev_attr_tdl_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_2[] = {
- &dev_attr_tdl_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_2 = {
- .name = "3e2be2bb-884a-49bb-82c5-2358e6bd5f2d",
- .attrs = attrs_tdl_2,
-};
-
-static ssize_t
-show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
-}
-
-static struct device_attribute dev_attr_compute_extra_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extra_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extra[] = {
- &dev_attr_compute_extra_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extra = {
- .name = "2d80a648-7b5a-4e92-bbe7-3b5c76f2e221",
- .attrs = attrs_compute_extra,
-};
-
-static ssize_t
-show_vme_pipe_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_VME_PIPE);
-}
-
-static struct device_attribute dev_attr_vme_pipe_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_vme_pipe_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_vme_pipe[] = {
- &dev_attr_vme_pipe_id.attr,
- NULL,
-};
-
-static struct attribute_group group_vme_pipe = {
- .name = "cfae9232-6ffc-42cc-a703-9790016925f0",
- .attrs = attrs_vme_pipe,
-};
-
static ssize_t
show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
+ return sprintf(buf, "1\n");
}
-static struct device_attribute dev_attr_test_oa_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_test_oa_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_test_oa[] = {
- &dev_attr_test_oa_id.attr,
- NULL,
-};
-
-static struct attribute_group group_test_oa = {
- .name = "2b985803-d3c9-4629-8a4f-634bfecba0e8",
- .attrs = attrs_test_oa,
-};
-
-int
-i915_perf_register_sysfs_sklgt3(struct drm_i915_private *dev_priv)
+void
+i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv)
{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
- int ret = 0;
+ strncpy(dev_priv->perf.oa.test_config.uuid,
+ "2b985803-d3c9-4629-8a4f-634bfecba0e8",
+ UUID_STRING_LEN);
+ dev_priv->perf.oa.test_config.id = 1;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (ret)
- goto error_render_basic;
- }
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (ret)
- goto error_compute_basic;
- }
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (ret)
- goto error_render_pipe_profile;
- }
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (ret)
- goto error_memory_reads;
- }
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (ret)
- goto error_memory_writes;
- }
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (ret)
- goto error_compute_extended;
- }
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (ret)
- goto error_compute_l3_cache;
- }
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (ret)
- goto error_hdc_and_sf;
- }
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (ret)
- goto error_l3_1;
- }
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
- if (ret)
- goto error_l3_2;
- }
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
- if (ret)
- goto error_l3_3;
- }
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (ret)
- goto error_rasterizer_and_pixel_backend;
- }
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
- if (ret)
- goto error_sampler;
- }
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (ret)
- goto error_tdl_1;
- }
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (ret)
- goto error_tdl_2;
- }
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (ret)
- goto error_compute_extra;
- }
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
- if (ret)
- goto error_vme_pipe;
- }
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
- if (ret)
- goto error_test_oa;
- }
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- return 0;
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-error_test_oa:
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
-error_vme_pipe:
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
-error_compute_extra:
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
-error_tdl_2:
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
-error_tdl_1:
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
-error_sampler:
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
-error_rasterizer_and_pixel_backend:
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
-error_l3_3:
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
-error_l3_2:
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
-error_l3_1:
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
-error_hdc_and_sf:
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
-error_compute_l3_cache:
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
-error_compute_extended:
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
-error_memory_writes:
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
-error_memory_reads:
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
-error_render_pipe_profile:
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
-error_compute_basic:
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
-error_render_basic:
- return ret;
-}
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-void
-i915_perf_unregister_sysfs_sklgt3(struct drm_i915_private *dev_priv)
-{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
index c0accb1f9b74..06746b2616c8 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
@@ -29,12 +29,6 @@
#ifndef __I915_OA_SKLGT3_H__
#define __I915_OA_SKLGT3_H__
-extern int i915_oa_n_builtin_metric_sets_sklgt3;
-
-extern int i915_oa_select_metric_set_sklgt3(struct drm_i915_private *dev_priv);
-
-extern int i915_perf_register_sysfs_sklgt3(struct drm_i915_private *dev_priv);
-
-extern void i915_perf_unregister_sysfs_sklgt3(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
index 9ddab43a2176..bce031ee4445 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
@@ -31,1930 +31,6 @@
#include "i915_drv.h"
#include "i915_oa_sklgt4.h"
-enum metric_set_id {
- METRIC_SET_ID_RENDER_BASIC = 1,
- METRIC_SET_ID_COMPUTE_BASIC,
- METRIC_SET_ID_RENDER_PIPE_PROFILE,
- METRIC_SET_ID_MEMORY_READS,
- METRIC_SET_ID_MEMORY_WRITES,
- METRIC_SET_ID_COMPUTE_EXTENDED,
- METRIC_SET_ID_COMPUTE_L3_CACHE,
- METRIC_SET_ID_HDC_AND_SF,
- METRIC_SET_ID_L3_1,
- METRIC_SET_ID_L3_2,
- METRIC_SET_ID_L3_3,
- METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND,
- METRIC_SET_ID_SAMPLER,
- METRIC_SET_ID_TDL_1,
- METRIC_SET_ID_TDL_2,
- METRIC_SET_ID_COMPUTE_EXTRA,
- METRIC_SET_ID_VME_PIPE,
- METRIC_SET_ID_TEST_OA,
-};
-
-int i915_oa_n_builtin_metric_sets_sklgt4 = 18;
-
-static const struct i915_oa_reg b_counter_config_render_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_render_basic[] = {
- { _MMIO(0x9888), 0x166c01e0 },
- { _MMIO(0x9888), 0x12170280 },
- { _MMIO(0x9888), 0x12370280 },
- { _MMIO(0x9888), 0x16ec01e0 },
- { _MMIO(0x9888), 0x176c01e0 },
- { _MMIO(0x9888), 0x11930317 },
- { _MMIO(0x9888), 0x159303df },
- { _MMIO(0x9888), 0x3f900003 },
- { _MMIO(0x9888), 0x1a4e03b0 },
- { _MMIO(0x9888), 0x0a6c0053 },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x0a1b4000 },
- { _MMIO(0x9888), 0x1c1c0001 },
- { _MMIO(0x9888), 0x002f1000 },
- { _MMIO(0x9888), 0x042f1000 },
- { _MMIO(0x9888), 0x004c4000 },
- { _MMIO(0x9888), 0x0a4ca400 },
- { _MMIO(0x9888), 0x0c4c0002 },
- { _MMIO(0x9888), 0x000d2000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0f0400 },
- { _MMIO(0x9888), 0x0e0f5600 },
- { _MMIO(0x9888), 0x100f0001 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x162caa00 },
- { _MMIO(0x9888), 0x062d8000 },
- { _MMIO(0x9888), 0x00133000 },
- { _MMIO(0x9888), 0x08133000 },
- { _MMIO(0x9888), 0x00170020 },
- { _MMIO(0x9888), 0x08170021 },
- { _MMIO(0x9888), 0x10170000 },
- { _MMIO(0x9888), 0x0633c000 },
- { _MMIO(0x9888), 0x06370800 },
- { _MMIO(0x9888), 0x10370000 },
- { _MMIO(0x9888), 0x1ace0230 },
- { _MMIO(0x9888), 0x0aec5300 },
- { _MMIO(0x9888), 0x10ec0000 },
- { _MMIO(0x9888), 0x1cec0000 },
- { _MMIO(0x9888), 0x0a9b8000 },
- { _MMIO(0x9888), 0x1c9c0002 },
- { _MMIO(0x9888), 0x0acc2000 },
- { _MMIO(0x9888), 0x0ccc0002 },
- { _MMIO(0x9888), 0x088d8000 },
- { _MMIO(0x9888), 0x0a8d8000 },
- { _MMIO(0x9888), 0x0e8f1000 },
- { _MMIO(0x9888), 0x108f0001 },
- { _MMIO(0x9888), 0x16ac8800 },
- { _MMIO(0x9888), 0x1b4e0020 },
- { _MMIO(0x9888), 0x096c5300 },
- { _MMIO(0x9888), 0x116c0000 },
- { _MMIO(0x9888), 0x1d6c0000 },
- { _MMIO(0x9888), 0x091b8000 },
- { _MMIO(0x9888), 0x1b1c8000 },
- { _MMIO(0x9888), 0x0b4c2000 },
- { _MMIO(0x9888), 0x090d8000 },
- { _MMIO(0x9888), 0x0f0f1000 },
- { _MMIO(0x9888), 0x172c0800 },
- { _MMIO(0x9888), 0x0d933031 },
- { _MMIO(0x9888), 0x0f933e3f },
- { _MMIO(0x9888), 0x01933d00 },
- { _MMIO(0x9888), 0x0393073c },
- { _MMIO(0x9888), 0x0593000e },
- { _MMIO(0x9888), 0x1d930000 },
- { _MMIO(0x9888), 0x19930000 },
- { _MMIO(0x9888), 0x1b930000 },
- { _MMIO(0x9888), 0x1d900157 },
- { _MMIO(0x9888), 0x1f900158 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x2b908000 },
- { _MMIO(0x9888), 0x2d908000 },
- { _MMIO(0x9888), 0x2f908000 },
- { _MMIO(0x9888), 0x31908000 },
- { _MMIO(0x9888), 0x15908000 },
- { _MMIO(0x9888), 0x17908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1190003f },
- { _MMIO(0x9888), 0x5190ff30 },
- { _MMIO(0x9888), 0x41900060 },
- { _MMIO(0x9888), 0x55903033 },
- { _MMIO(0x9888), 0x45901421 },
- { _MMIO(0x9888), 0x47900803 },
- { _MMIO(0x9888), 0x5790fff1 },
- { _MMIO(0x9888), 0x49900001 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x5990000f },
- { _MMIO(0x9888), 0x43900000 },
- { _MMIO(0x9888), 0x5390ffff },
-};
-
-static int
-get_render_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_render_basic;
- lens[n] = ARRAY_SIZE(mux_config_render_basic);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_basic[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2740), 0x00000000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_basic[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_basic[] = {
- { _MMIO(0x9888), 0x104f00e0 },
- { _MMIO(0x9888), 0x124f1c00 },
- { _MMIO(0x9888), 0x106c00e0 },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f900003 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x1a4e0820 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x064f0900 },
- { _MMIO(0x9888), 0x084f0032 },
- { _MMIO(0x9888), 0x0a4f1891 },
- { _MMIO(0x9888), 0x0c4f0e00 },
- { _MMIO(0x9888), 0x0e4f003c },
- { _MMIO(0x9888), 0x004f0d80 },
- { _MMIO(0x9888), 0x024f003b },
- { _MMIO(0x9888), 0x006c0002 },
- { _MMIO(0x9888), 0x086c0100 },
- { _MMIO(0x9888), 0x0c6c000c },
- { _MMIO(0x9888), 0x0e6c0b00 },
- { _MMIO(0x9888), 0x186c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x001b4000 },
- { _MMIO(0x9888), 0x081b8000 },
- { _MMIO(0x9888), 0x0c1b4000 },
- { _MMIO(0x9888), 0x0e1b8000 },
- { _MMIO(0x9888), 0x101c8000 },
- { _MMIO(0x9888), 0x1a1c8000 },
- { _MMIO(0x9888), 0x1c1c0024 },
- { _MMIO(0x9888), 0x065b8000 },
- { _MMIO(0x9888), 0x085b4000 },
- { _MMIO(0x9888), 0x0a5bc000 },
- { _MMIO(0x9888), 0x0c5b8000 },
- { _MMIO(0x9888), 0x0e5b4000 },
- { _MMIO(0x9888), 0x005b8000 },
- { _MMIO(0x9888), 0x025b4000 },
- { _MMIO(0x9888), 0x1a5c6000 },
- { _MMIO(0x9888), 0x1c5c001b },
- { _MMIO(0x9888), 0x125c8000 },
- { _MMIO(0x9888), 0x145c8000 },
- { _MMIO(0x9888), 0x004c8000 },
- { _MMIO(0x9888), 0x0a4c2000 },
- { _MMIO(0x9888), 0x0c4c0208 },
- { _MMIO(0x9888), 0x000da000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x020d2000 },
- { _MMIO(0x9888), 0x0c0f5400 },
- { _MMIO(0x9888), 0x0e0f5500 },
- { _MMIO(0x9888), 0x100f0155 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2cc000 },
- { _MMIO(0x9888), 0x162cfb00 },
- { _MMIO(0x9888), 0x182c00be },
- { _MMIO(0x9888), 0x022cc000 },
- { _MMIO(0x9888), 0x042cc000 },
- { _MMIO(0x9888), 0x19900157 },
- { _MMIO(0x9888), 0x1b900158 },
- { _MMIO(0x9888), 0x1d900105 },
- { _MMIO(0x9888), 0x1f900103 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x11900fff },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900800 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900821 },
- { _MMIO(0x9888), 0x47900802 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900802 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900002 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900422 },
- { _MMIO(0x9888), 0x53905555 },
-};
-
-static int
-get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_basic;
- lens[n] = ARRAY_SIZE(mux_config_compute_basic);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_render_pipe_profile[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007ffea },
- { _MMIO(0x2774), 0x00007ffc },
- { _MMIO(0x2778), 0x0007affa },
- { _MMIO(0x277c), 0x0000f5fd },
- { _MMIO(0x2780), 0x00079ffa },
- { _MMIO(0x2784), 0x0000f3fb },
- { _MMIO(0x2788), 0x0007bf7a },
- { _MMIO(0x278c), 0x0000f7e7 },
- { _MMIO(0x2790), 0x0007fefa },
- { _MMIO(0x2794), 0x0000f7cf },
- { _MMIO(0x2798), 0x00077ffa },
- { _MMIO(0x279c), 0x0000efdf },
- { _MMIO(0x27a0), 0x0006fffa },
- { _MMIO(0x27a4), 0x0000cfbf },
- { _MMIO(0x27a8), 0x0003fffa },
- { _MMIO(0x27ac), 0x00005f7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_render_pipe_profile[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_render_pipe_profile[] = {
- { _MMIO(0x9888), 0x0c0e001f },
- { _MMIO(0x9888), 0x0a0f0000 },
- { _MMIO(0x9888), 0x10116800 },
- { _MMIO(0x9888), 0x178a03e0 },
- { _MMIO(0x9888), 0x11824c00 },
- { _MMIO(0x9888), 0x11830020 },
- { _MMIO(0x9888), 0x13840020 },
- { _MMIO(0x9888), 0x11850019 },
- { _MMIO(0x9888), 0x11860007 },
- { _MMIO(0x9888), 0x01870c40 },
- { _MMIO(0x9888), 0x17880000 },
- { _MMIO(0x9888), 0x022f4000 },
- { _MMIO(0x9888), 0x0a4c0040 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x040d4000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x020e5400 },
- { _MMIO(0x9888), 0x000e0000 },
- { _MMIO(0x9888), 0x080f0040 },
- { _MMIO(0x9888), 0x000f0000 },
- { _MMIO(0x9888), 0x100f0000 },
- { _MMIO(0x9888), 0x0e0f0040 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x06104000 },
- { _MMIO(0x9888), 0x06110012 },
- { _MMIO(0x9888), 0x06131000 },
- { _MMIO(0x9888), 0x01898000 },
- { _MMIO(0x9888), 0x0d890100 },
- { _MMIO(0x9888), 0x03898000 },
- { _MMIO(0x9888), 0x09808000 },
- { _MMIO(0x9888), 0x0b808000 },
- { _MMIO(0x9888), 0x0380c000 },
- { _MMIO(0x9888), 0x0f8a0075 },
- { _MMIO(0x9888), 0x1d8a0000 },
- { _MMIO(0x9888), 0x118a8000 },
- { _MMIO(0x9888), 0x1b8a4000 },
- { _MMIO(0x9888), 0x138a8000 },
- { _MMIO(0x9888), 0x1d81a000 },
- { _MMIO(0x9888), 0x15818000 },
- { _MMIO(0x9888), 0x17818000 },
- { _MMIO(0x9888), 0x0b820030 },
- { _MMIO(0x9888), 0x07828000 },
- { _MMIO(0x9888), 0x0d824000 },
- { _MMIO(0x9888), 0x0f828000 },
- { _MMIO(0x9888), 0x05824000 },
- { _MMIO(0x9888), 0x0d830003 },
- { _MMIO(0x9888), 0x0583000c },
- { _MMIO(0x9888), 0x09830000 },
- { _MMIO(0x9888), 0x03838000 },
- { _MMIO(0x9888), 0x07838000 },
- { _MMIO(0x9888), 0x0b840980 },
- { _MMIO(0x9888), 0x03844d80 },
- { _MMIO(0x9888), 0x11840000 },
- { _MMIO(0x9888), 0x09848000 },
- { _MMIO(0x9888), 0x09850080 },
- { _MMIO(0x9888), 0x03850003 },
- { _MMIO(0x9888), 0x01850000 },
- { _MMIO(0x9888), 0x07860000 },
- { _MMIO(0x9888), 0x0f860400 },
- { _MMIO(0x9888), 0x09870032 },
- { _MMIO(0x9888), 0x01888052 },
- { _MMIO(0x9888), 0x11880000 },
- { _MMIO(0x9888), 0x09884000 },
- { _MMIO(0x9888), 0x1b931001 },
- { _MMIO(0x9888), 0x1d930001 },
- { _MMIO(0x9888), 0x19934000 },
- { _MMIO(0x9888), 0x1b958000 },
- { _MMIO(0x9888), 0x1d950094 },
- { _MMIO(0x9888), 0x19958000 },
- { _MMIO(0x9888), 0x09e58000 },
- { _MMIO(0x9888), 0x0be58000 },
- { _MMIO(0x9888), 0x03e5c000 },
- { _MMIO(0x9888), 0x0592c000 },
- { _MMIO(0x9888), 0x0b928000 },
- { _MMIO(0x9888), 0x0d924000 },
- { _MMIO(0x9888), 0x0f924000 },
- { _MMIO(0x9888), 0x11928000 },
- { _MMIO(0x9888), 0x1392c000 },
- { _MMIO(0x9888), 0x09924000 },
- { _MMIO(0x9888), 0x01985000 },
- { _MMIO(0x9888), 0x07988000 },
- { _MMIO(0x9888), 0x09981000 },
- { _MMIO(0x9888), 0x0b982000 },
- { _MMIO(0x9888), 0x0d982000 },
- { _MMIO(0x9888), 0x0f989000 },
- { _MMIO(0x9888), 0x05982000 },
- { _MMIO(0x9888), 0x13904000 },
- { _MMIO(0x9888), 0x21904000 },
- { _MMIO(0x9888), 0x23904000 },
- { _MMIO(0x9888), 0x25908000 },
- { _MMIO(0x9888), 0x27904000 },
- { _MMIO(0x9888), 0x29908000 },
- { _MMIO(0x9888), 0x2b904000 },
- { _MMIO(0x9888), 0x2f904000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x15904000 },
- { _MMIO(0x9888), 0x17908000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b904000 },
- { _MMIO(0x9888), 0x1190c080 },
- { _MMIO(0x9888), 0x51901110 },
- { _MMIO(0x9888), 0x41900440 },
- { _MMIO(0x9888), 0x55901111 },
- { _MMIO(0x9888), 0x45900400 },
- { _MMIO(0x9888), 0x47900c21 },
- { _MMIO(0x9888), 0x57901411 },
- { _MMIO(0x9888), 0x49900042 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900024 },
- { _MMIO(0x9888), 0x59900001 },
- { _MMIO(0x9888), 0x43900841 },
- { _MMIO(0x9888), 0x53900411 },
-};
-
-static int
-get_render_pipe_profile_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_render_pipe_profile;
- lens[n] = ARRAY_SIZE(mux_config_render_pipe_profile);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_reads[] = {
- { _MMIO(0x272c), 0xffffffff },
- { _MMIO(0x2728), 0xffffffff },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x271c), 0xffffffff },
- { _MMIO(0x2718), 0xffffffff },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f872 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_reads[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_reads[] = {
- { _MMIO(0x9888), 0x11810c00 },
- { _MMIO(0x9888), 0x1381001a },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f900064 },
- { _MMIO(0x9888), 0x03811300 },
- { _MMIO(0x9888), 0x05811b12 },
- { _MMIO(0x9888), 0x0781001a },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x17810000 },
- { _MMIO(0x9888), 0x19810000 },
- { _MMIO(0x9888), 0x1b810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930055 },
- { _MMIO(0x9888), 0x03e58000 },
- { _MMIO(0x9888), 0x05e5c000 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x13900150 },
- { _MMIO(0x9888), 0x21900151 },
- { _MMIO(0x9888), 0x23900152 },
- { _MMIO(0x9888), 0x25900153 },
- { _MMIO(0x9888), 0x27900154 },
- { _MMIO(0x9888), 0x29900155 },
- { _MMIO(0x9888), 0x2b900156 },
- { _MMIO(0x9888), 0x2d900157 },
- { _MMIO(0x9888), 0x2f90015f },
- { _MMIO(0x9888), 0x31900105 },
- { _MMIO(0x9888), 0x15900103 },
- { _MMIO(0x9888), 0x17900101 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c60 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900c00 },
- { _MMIO(0x9888), 0x47900c63 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900c63 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900063 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static int
-get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_reads;
- lens[n] = ARRAY_SIZE(mux_config_memory_reads);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_memory_writes[] = {
- { _MMIO(0x272c), 0xffffffff },
- { _MMIO(0x2728), 0xffffffff },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x271c), 0xffffffff },
- { _MMIO(0x2718), 0xffffffff },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x274c), 0x86543210 },
- { _MMIO(0x2748), 0x86543210 },
- { _MMIO(0x2744), 0x00006667 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x275c), 0x86543210 },
- { _MMIO(0x2758), 0x86543210 },
- { _MMIO(0x2754), 0x00006465 },
- { _MMIO(0x2750), 0x00000000 },
- { _MMIO(0x2770), 0x0007f81a },
- { _MMIO(0x2774), 0x0000fe00 },
- { _MMIO(0x2778), 0x0007f82a },
- { _MMIO(0x277c), 0x0000fe00 },
- { _MMIO(0x2780), 0x0007f822 },
- { _MMIO(0x2784), 0x0000fe00 },
- { _MMIO(0x2788), 0x0007f8ba },
- { _MMIO(0x278c), 0x0000fe00 },
- { _MMIO(0x2790), 0x0007f87a },
- { _MMIO(0x2794), 0x0000fe00 },
- { _MMIO(0x2798), 0x0007f8ea },
- { _MMIO(0x279c), 0x0000fe00 },
- { _MMIO(0x27a0), 0x0007f8e2 },
- { _MMIO(0x27a4), 0x0000fe00 },
- { _MMIO(0x27a8), 0x0007f8f2 },
- { _MMIO(0x27ac), 0x0000fe00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_memory_writes[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00015014 },
- { _MMIO(0xe658), 0x00025024 },
- { _MMIO(0xe758), 0x00035034 },
- { _MMIO(0xe45c), 0x00045044 },
- { _MMIO(0xe55c), 0x00055054 },
- { _MMIO(0xe65c), 0x00065064 },
-};
-
-static const struct i915_oa_reg mux_config_memory_writes[] = {
- { _MMIO(0x9888), 0x11810c00 },
- { _MMIO(0x9888), 0x1381001a },
- { _MMIO(0x9888), 0x37906800 },
- { _MMIO(0x9888), 0x3f901000 },
- { _MMIO(0x9888), 0x03811300 },
- { _MMIO(0x9888), 0x05811b12 },
- { _MMIO(0x9888), 0x0781001a },
- { _MMIO(0x9888), 0x1f810000 },
- { _MMIO(0x9888), 0x17810000 },
- { _MMIO(0x9888), 0x19810000 },
- { _MMIO(0x9888), 0x1b810000 },
- { _MMIO(0x9888), 0x1d810000 },
- { _MMIO(0x9888), 0x1b930055 },
- { _MMIO(0x9888), 0x03e58000 },
- { _MMIO(0x9888), 0x05e5c000 },
- { _MMIO(0x9888), 0x07e54000 },
- { _MMIO(0x9888), 0x13900160 },
- { _MMIO(0x9888), 0x21900161 },
- { _MMIO(0x9888), 0x23900162 },
- { _MMIO(0x9888), 0x25900163 },
- { _MMIO(0x9888), 0x27900164 },
- { _MMIO(0x9888), 0x29900165 },
- { _MMIO(0x9888), 0x2b900166 },
- { _MMIO(0x9888), 0x2d900167 },
- { _MMIO(0x9888), 0x2f900150 },
- { _MMIO(0x9888), 0x31900105 },
- { _MMIO(0x9888), 0x15900103 },
- { _MMIO(0x9888), 0x17900101 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1d908000 },
- { _MMIO(0x9888), 0x1f908000 },
- { _MMIO(0x9888), 0x11900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c60 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900c00 },
- { _MMIO(0x9888), 0x47900c63 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900c63 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900063 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static int
-get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_memory_writes;
- lens[n] = ARRAY_SIZE(mux_config_memory_writes);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extended[] = {
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fc2a },
- { _MMIO(0x2774), 0x0000bf00 },
- { _MMIO(0x2778), 0x0007fc6a },
- { _MMIO(0x277c), 0x0000bf00 },
- { _MMIO(0x2780), 0x0007fc92 },
- { _MMIO(0x2784), 0x0000bf00 },
- { _MMIO(0x2788), 0x0007fca2 },
- { _MMIO(0x278c), 0x0000bf00 },
- { _MMIO(0x2790), 0x0007fc32 },
- { _MMIO(0x2794), 0x0000bf00 },
- { _MMIO(0x2798), 0x0007fc9a },
- { _MMIO(0x279c), 0x0000bf00 },
- { _MMIO(0x27a0), 0x0007fe6a },
- { _MMIO(0x27a4), 0x0000bf00 },
- { _MMIO(0x27a8), 0x0007fe7a },
- { _MMIO(0x27ac), 0x0000bf00 },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extended[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00778008 },
- { _MMIO(0xe45c), 0x00088078 },
- { _MMIO(0xe55c), 0x00808708 },
- { _MMIO(0xe65c), 0x00a08908 },
-};
-
-static const struct i915_oa_reg mux_config_compute_extended[] = {
- { _MMIO(0x9888), 0x106c00e0 },
- { _MMIO(0x9888), 0x141c8160 },
- { _MMIO(0x9888), 0x161c8015 },
- { _MMIO(0x9888), 0x181c0120 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x0e4e8000 },
- { _MMIO(0x9888), 0x184e8000 },
- { _MMIO(0x9888), 0x1a4eaaa0 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x024e8000 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x0e6c0b01 },
- { _MMIO(0x9888), 0x006c0200 },
- { _MMIO(0x9888), 0x026c000c },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x0e1bc000 },
- { _MMIO(0x9888), 0x001b8000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x001c0041 },
- { _MMIO(0x9888), 0x061c4200 },
- { _MMIO(0x9888), 0x081c4443 },
- { _MMIO(0x9888), 0x0a1c4645 },
- { _MMIO(0x9888), 0x0c1c7647 },
- { _MMIO(0x9888), 0x041c7357 },
- { _MMIO(0x9888), 0x1c1c0030 },
- { _MMIO(0x9888), 0x101c0000 },
- { _MMIO(0x9888), 0x1a1c0000 },
- { _MMIO(0x9888), 0x121c8000 },
- { _MMIO(0x9888), 0x004c8000 },
- { _MMIO(0x9888), 0x0a4caa2a },
- { _MMIO(0x9888), 0x0c4c02aa },
- { _MMIO(0x9888), 0x084ca000 },
- { _MMIO(0x9888), 0x000da000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x0c0f5400 },
- { _MMIO(0x9888), 0x0e0f5515 },
- { _MMIO(0x9888), 0x100f0155 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2c8000 },
- { _MMIO(0x9888), 0x162caa00 },
- { _MMIO(0x9888), 0x182c00aa },
- { _MMIO(0x9888), 0x022c8000 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x11907fff },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900040 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900802 },
- { _MMIO(0x9888), 0x47900842 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900842 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x43900800 },
- { _MMIO(0x9888), 0x53900000 },
-};
-
-static int
-get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_extended;
- lens[n] = ARRAY_SIZE(mux_config_compute_extended);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_l3_cache[] = {
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x30800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2770), 0x0007fffa },
- { _MMIO(0x2774), 0x0000fefe },
- { _MMIO(0x2778), 0x0007fffa },
- { _MMIO(0x277c), 0x0000fefd },
- { _MMIO(0x2790), 0x0007fffa },
- { _MMIO(0x2794), 0x0000fbef },
- { _MMIO(0x2798), 0x0007fffa },
- { _MMIO(0x279c), 0x0000fbdf },
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_l3_cache[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00000003 },
- { _MMIO(0xe658), 0x00002001 },
- { _MMIO(0xe758), 0x00101100 },
- { _MMIO(0xe45c), 0x00201200 },
- { _MMIO(0xe55c), 0x00301300 },
- { _MMIO(0xe65c), 0x00401400 },
-};
-
-static const struct i915_oa_reg mux_config_compute_l3_cache[] = {
- { _MMIO(0x9888), 0x166c0760 },
- { _MMIO(0x9888), 0x1593001e },
- { _MMIO(0x9888), 0x3f900003 },
- { _MMIO(0x9888), 0x004e8000 },
- { _MMIO(0x9888), 0x0e4e8000 },
- { _MMIO(0x9888), 0x184e8000 },
- { _MMIO(0x9888), 0x1a4e8020 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x006c0051 },
- { _MMIO(0x9888), 0x066c5000 },
- { _MMIO(0x9888), 0x086c5c5d },
- { _MMIO(0x9888), 0x0e6c5e5f },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x186c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x001b4000 },
- { _MMIO(0x9888), 0x061b8000 },
- { _MMIO(0x9888), 0x081bc000 },
- { _MMIO(0x9888), 0x0e1bc000 },
- { _MMIO(0x9888), 0x101c8000 },
- { _MMIO(0x9888), 0x1a1ce000 },
- { _MMIO(0x9888), 0x1c1c0030 },
- { _MMIO(0x9888), 0x004c8000 },
- { _MMIO(0x9888), 0x0a4c2a00 },
- { _MMIO(0x9888), 0x0c4c0280 },
- { _MMIO(0x9888), 0x000d2000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x0c0f0400 },
- { _MMIO(0x9888), 0x0e0f1500 },
- { _MMIO(0x9888), 0x100f0140 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2c8000 },
- { _MMIO(0x9888), 0x162c0a00 },
- { _MMIO(0x9888), 0x182c00a0 },
- { _MMIO(0x9888), 0x03933300 },
- { _MMIO(0x9888), 0x05930032 },
- { _MMIO(0x9888), 0x11930000 },
- { _MMIO(0x9888), 0x1b930000 },
- { _MMIO(0x9888), 0x1d900157 },
- { _MMIO(0x9888), 0x1f900158 },
- { _MMIO(0x9888), 0x35900000 },
- { _MMIO(0x9888), 0x19908000 },
- { _MMIO(0x9888), 0x1b908000 },
- { _MMIO(0x9888), 0x1190030f },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900000 },
- { _MMIO(0x9888), 0x55900000 },
- { _MMIO(0x9888), 0x45900021 },
- { _MMIO(0x9888), 0x47900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x4b900000 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x53905555 },
- { _MMIO(0x9888), 0x43900000 },
-};
-
-static int
-get_compute_l3_cache_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_l3_cache;
- lens[n] = ARRAY_SIZE(mux_config_compute_l3_cache);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_hdc_and_sf[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x10800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000fdff },
-};
-
-static const struct i915_oa_reg flex_eu_config_hdc_and_sf[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_hdc_and_sf[] = {
- { _MMIO(0x9888), 0x104f0232 },
- { _MMIO(0x9888), 0x124f4640 },
- { _MMIO(0x9888), 0x106c0232 },
- { _MMIO(0x9888), 0x11834400 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x0c4e8000 },
- { _MMIO(0x9888), 0x004f1880 },
- { _MMIO(0x9888), 0x024f08bb },
- { _MMIO(0x9888), 0x044f001b },
- { _MMIO(0x9888), 0x046c0100 },
- { _MMIO(0x9888), 0x066c000b },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x041b8000 },
- { _MMIO(0x9888), 0x061b4000 },
- { _MMIO(0x9888), 0x1a1c1800 },
- { _MMIO(0x9888), 0x005b8000 },
- { _MMIO(0x9888), 0x025bc000 },
- { _MMIO(0x9888), 0x045b4000 },
- { _MMIO(0x9888), 0x125c8000 },
- { _MMIO(0x9888), 0x145c8000 },
- { _MMIO(0x9888), 0x165c8000 },
- { _MMIO(0x9888), 0x185c8000 },
- { _MMIO(0x9888), 0x0a4c00a0 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f5000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x022cc000 },
- { _MMIO(0x9888), 0x042cc000 },
- { _MMIO(0x9888), 0x062cc000 },
- { _MMIO(0x9888), 0x082cc000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x0f828000 },
- { _MMIO(0x9888), 0x0f8305c0 },
- { _MMIO(0x9888), 0x09830000 },
- { _MMIO(0x9888), 0x07830000 },
- { _MMIO(0x9888), 0x1d950080 },
- { _MMIO(0x9888), 0x13928000 },
- { _MMIO(0x9888), 0x0f988000 },
- { _MMIO(0x9888), 0x31904000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x59900001 },
- { _MMIO(0x9888), 0x4b900040 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900800 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_hdc_and_sf_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_hdc_and_sf;
- lens[n] = ARRAY_SIZE(mux_config_hdc_and_sf);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0xf0800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00014002 },
- { _MMIO(0x277c), 0x0000c3ff },
- { _MMIO(0x2780), 0x00010002 },
- { _MMIO(0x2784), 0x0000c7ff },
- { _MMIO(0x2788), 0x00004002 },
- { _MMIO(0x278c), 0x0000d3ff },
- { _MMIO(0x2790), 0x00100700 },
- { _MMIO(0x2794), 0x0000ff1f },
- { _MMIO(0x2798), 0x00001402 },
- { _MMIO(0x279c), 0x0000fc3f },
- { _MMIO(0x27a0), 0x00001002 },
- { _MMIO(0x27a4), 0x0000fc7f },
- { _MMIO(0x27a8), 0x00000402 },
- { _MMIO(0x27ac), 0x0000fd3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_1[] = {
- { _MMIO(0x9888), 0x126c7b40 },
- { _MMIO(0x9888), 0x166c0020 },
- { _MMIO(0x9888), 0x0a603444 },
- { _MMIO(0x9888), 0x0a613400 },
- { _MMIO(0x9888), 0x1a4ea800 },
- { _MMIO(0x9888), 0x1c4e0002 },
- { _MMIO(0x9888), 0x024e8000 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x064f4000 },
- { _MMIO(0x9888), 0x0c6c5327 },
- { _MMIO(0x9888), 0x0e6c5425 },
- { _MMIO(0x9888), 0x006c2a00 },
- { _MMIO(0x9888), 0x026c285b },
- { _MMIO(0x9888), 0x046c005c },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1c6c0000 },
- { _MMIO(0x9888), 0x1e6c0000 },
- { _MMIO(0x9888), 0x1a6c0800 },
- { _MMIO(0x9888), 0x0c1bc000 },
- { _MMIO(0x9888), 0x0e1bc000 },
- { _MMIO(0x9888), 0x001b8000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x1c1c003c },
- { _MMIO(0x9888), 0x121c8000 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x161c8000 },
- { _MMIO(0x9888), 0x181c8000 },
- { _MMIO(0x9888), 0x1a1c0800 },
- { _MMIO(0x9888), 0x065b4000 },
- { _MMIO(0x9888), 0x1a5c1000 },
- { _MMIO(0x9888), 0x10600000 },
- { _MMIO(0x9888), 0x04600000 },
- { _MMIO(0x9888), 0x0c610044 },
- { _MMIO(0x9888), 0x10610000 },
- { _MMIO(0x9888), 0x06610000 },
- { _MMIO(0x9888), 0x0c4c02a8 },
- { _MMIO(0x9888), 0x084ca000 },
- { _MMIO(0x9888), 0x0a4c002a },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f0154 },
- { _MMIO(0x9888), 0x0c0f5000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x182c00aa },
- { _MMIO(0x9888), 0x022c8000 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2cc000 },
- { _MMIO(0x9888), 0x1190ffc0 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900420 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900021 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900400 },
- { _MMIO(0x9888), 0x43900421 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900040 },
-};
-
-static int
-get_l3_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_1;
- lens[n] = ARRAY_SIZE(mux_config_l3_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00028002 },
- { _MMIO(0x277c), 0x000087ff },
- { _MMIO(0x2780), 0x00020002 },
- { _MMIO(0x2784), 0x00008fff },
- { _MMIO(0x2788), 0x00008002 },
- { _MMIO(0x278c), 0x0000a7ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_2[] = {
- { _MMIO(0x9888), 0x126c02e0 },
- { _MMIO(0x9888), 0x146c0001 },
- { _MMIO(0x9888), 0x0a623400 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x064f4000 },
- { _MMIO(0x9888), 0x026c3324 },
- { _MMIO(0x9888), 0x046c3422 },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1a6c0000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x161c8000 },
- { _MMIO(0x9888), 0x181c8000 },
- { _MMIO(0x9888), 0x1a1c0800 },
- { _MMIO(0x9888), 0x065b4000 },
- { _MMIO(0x9888), 0x1a5c1000 },
- { _MMIO(0x9888), 0x06614000 },
- { _MMIO(0x9888), 0x0c620044 },
- { _MMIO(0x9888), 0x10620000 },
- { _MMIO(0x9888), 0x06620000 },
- { _MMIO(0x9888), 0x084c8000 },
- { _MMIO(0x9888), 0x0a4c002a },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f4000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2cc000 },
- { _MMIO(0x9888), 0x1190f800 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x43900000 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_l3_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_2;
- lens[n] = ARRAY_SIZE(mux_config_l3_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_l3_3[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00100070 },
- { _MMIO(0x2774), 0x0000fff1 },
- { _MMIO(0x2778), 0x00028002 },
- { _MMIO(0x277c), 0x000087ff },
- { _MMIO(0x2780), 0x00020002 },
- { _MMIO(0x2784), 0x00008fff },
- { _MMIO(0x2788), 0x00008002 },
- { _MMIO(0x278c), 0x0000a7ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_l3_3[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_l3_3[] = {
- { _MMIO(0x9888), 0x126c4e80 },
- { _MMIO(0x9888), 0x146c0000 },
- { _MMIO(0x9888), 0x0a633400 },
- { _MMIO(0x9888), 0x044e8000 },
- { _MMIO(0x9888), 0x064e8000 },
- { _MMIO(0x9888), 0x084e8000 },
- { _MMIO(0x9888), 0x0a4e8000 },
- { _MMIO(0x9888), 0x0c4e8000 },
- { _MMIO(0x9888), 0x026c3321 },
- { _MMIO(0x9888), 0x046c342f },
- { _MMIO(0x9888), 0x106c0000 },
- { _MMIO(0x9888), 0x1a6c2000 },
- { _MMIO(0x9888), 0x021bc000 },
- { _MMIO(0x9888), 0x041bc000 },
- { _MMIO(0x9888), 0x061b4000 },
- { _MMIO(0x9888), 0x141c8000 },
- { _MMIO(0x9888), 0x161c8000 },
- { _MMIO(0x9888), 0x181c8000 },
- { _MMIO(0x9888), 0x1a1c1800 },
- { _MMIO(0x9888), 0x06604000 },
- { _MMIO(0x9888), 0x0c630044 },
- { _MMIO(0x9888), 0x10630000 },
- { _MMIO(0x9888), 0x06630000 },
- { _MMIO(0x9888), 0x084c8000 },
- { _MMIO(0x9888), 0x0a4c00aa },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0f4000 },
- { _MMIO(0x9888), 0x0e0f0055 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x1190f800 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900002 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_l3_3_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_l3_3;
- lens[n] = ARRAY_SIZE(mux_config_l3_3);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x30800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x0000efff },
- { _MMIO(0x2778), 0x00006000 },
- { _MMIO(0x277c), 0x0000f3ff },
-};
-
-static const struct i915_oa_reg flex_eu_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_rasterizer_and_pixel_backend[] = {
- { _MMIO(0x9888), 0x102f3800 },
- { _MMIO(0x9888), 0x144d0500 },
- { _MMIO(0x9888), 0x120d03c0 },
- { _MMIO(0x9888), 0x140d03cf },
- { _MMIO(0x9888), 0x0c0f0004 },
- { _MMIO(0x9888), 0x0c4e4000 },
- { _MMIO(0x9888), 0x042f0480 },
- { _MMIO(0x9888), 0x082f0000 },
- { _MMIO(0x9888), 0x022f0000 },
- { _MMIO(0x9888), 0x0a4c0090 },
- { _MMIO(0x9888), 0x064d0027 },
- { _MMIO(0x9888), 0x004d0000 },
- { _MMIO(0x9888), 0x000d0d40 },
- { _MMIO(0x9888), 0x020d803f },
- { _MMIO(0x9888), 0x040d8023 },
- { _MMIO(0x9888), 0x100d0000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x020f0010 },
- { _MMIO(0x9888), 0x000f0000 },
- { _MMIO(0x9888), 0x0e0f0050 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41901400 },
- { _MMIO(0x9888), 0x43901485 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900001 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_rasterizer_and_pixel_backend_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_rasterizer_and_pixel_backend;
- lens[n] = ARRAY_SIZE(mux_config_rasterizer_and_pixel_backend);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_sampler[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x70800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
- { _MMIO(0x2770), 0x0000c000 },
- { _MMIO(0x2774), 0x0000e7ff },
- { _MMIO(0x2778), 0x00003000 },
- { _MMIO(0x277c), 0x0000f9ff },
- { _MMIO(0x2780), 0x00000c00 },
- { _MMIO(0x2784), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_sampler[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_sampler[] = {
- { _MMIO(0x9888), 0x14152c00 },
- { _MMIO(0x9888), 0x16150005 },
- { _MMIO(0x9888), 0x121600a0 },
- { _MMIO(0x9888), 0x14352c00 },
- { _MMIO(0x9888), 0x16350005 },
- { _MMIO(0x9888), 0x123600a0 },
- { _MMIO(0x9888), 0x14552c00 },
- { _MMIO(0x9888), 0x16550005 },
- { _MMIO(0x9888), 0x125600a0 },
- { _MMIO(0x9888), 0x062f6000 },
- { _MMIO(0x9888), 0x022f2000 },
- { _MMIO(0x9888), 0x0c4c0050 },
- { _MMIO(0x9888), 0x0a4c0010 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f0350 },
- { _MMIO(0x9888), 0x0c0fb000 },
- { _MMIO(0x9888), 0x0e0f00da },
- { _MMIO(0x9888), 0x182c0028 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x022dc000 },
- { _MMIO(0x9888), 0x042d4000 },
- { _MMIO(0x9888), 0x0c138000 },
- { _MMIO(0x9888), 0x0e132000 },
- { _MMIO(0x9888), 0x0413c000 },
- { _MMIO(0x9888), 0x1c140018 },
- { _MMIO(0x9888), 0x0c157000 },
- { _MMIO(0x9888), 0x0e150078 },
- { _MMIO(0x9888), 0x10150000 },
- { _MMIO(0x9888), 0x04162180 },
- { _MMIO(0x9888), 0x02160000 },
- { _MMIO(0x9888), 0x04174000 },
- { _MMIO(0x9888), 0x0233a000 },
- { _MMIO(0x9888), 0x04333000 },
- { _MMIO(0x9888), 0x14348000 },
- { _MMIO(0x9888), 0x16348000 },
- { _MMIO(0x9888), 0x02357870 },
- { _MMIO(0x9888), 0x10350000 },
- { _MMIO(0x9888), 0x04360043 },
- { _MMIO(0x9888), 0x02360000 },
- { _MMIO(0x9888), 0x04371000 },
- { _MMIO(0x9888), 0x0e538000 },
- { _MMIO(0x9888), 0x00538000 },
- { _MMIO(0x9888), 0x06533000 },
- { _MMIO(0x9888), 0x1c540020 },
- { _MMIO(0x9888), 0x12548000 },
- { _MMIO(0x9888), 0x0e557000 },
- { _MMIO(0x9888), 0x00557800 },
- { _MMIO(0x9888), 0x10550000 },
- { _MMIO(0x9888), 0x06560043 },
- { _MMIO(0x9888), 0x02560000 },
- { _MMIO(0x9888), 0x06571000 },
- { _MMIO(0x9888), 0x1190ff80 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900000 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900060 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c00 },
- { _MMIO(0x9888), 0x43900842 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900060 },
-};
-
-static int
-get_sampler_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_sampler;
- lens[n] = ARRAY_SIZE(mux_config_sampler);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_1[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00000002 },
- { _MMIO(0x2774), 0x00007fff },
- { _MMIO(0x2778), 0x00000000 },
- { _MMIO(0x277c), 0x00009fff },
- { _MMIO(0x2780), 0x00000002 },
- { _MMIO(0x2784), 0x0000efff },
- { _MMIO(0x2788), 0x00000000 },
- { _MMIO(0x278c), 0x0000f3ff },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000fdff },
- { _MMIO(0x2798), 0x00000000 },
- { _MMIO(0x279c), 0x0000fe7f },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_1[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_1[] = {
- { _MMIO(0x9888), 0x12120000 },
- { _MMIO(0x9888), 0x12320000 },
- { _MMIO(0x9888), 0x12520000 },
- { _MMIO(0x9888), 0x002f8000 },
- { _MMIO(0x9888), 0x022f3000 },
- { _MMIO(0x9888), 0x0a4c0015 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f03a0 },
- { _MMIO(0x9888), 0x0c0ff000 },
- { _MMIO(0x9888), 0x0e0f0095 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x0c2d8000 },
- { _MMIO(0x9888), 0x0e2d4000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x02108000 },
- { _MMIO(0x9888), 0x0410c000 },
- { _MMIO(0x9888), 0x02118000 },
- { _MMIO(0x9888), 0x0411c000 },
- { _MMIO(0x9888), 0x02121880 },
- { _MMIO(0x9888), 0x041219b5 },
- { _MMIO(0x9888), 0x00120000 },
- { _MMIO(0x9888), 0x02134000 },
- { _MMIO(0x9888), 0x04135000 },
- { _MMIO(0x9888), 0x0c308000 },
- { _MMIO(0x9888), 0x0e304000 },
- { _MMIO(0x9888), 0x06304000 },
- { _MMIO(0x9888), 0x0c318000 },
- { _MMIO(0x9888), 0x0e314000 },
- { _MMIO(0x9888), 0x06314000 },
- { _MMIO(0x9888), 0x0c321a80 },
- { _MMIO(0x9888), 0x0e320033 },
- { _MMIO(0x9888), 0x06320031 },
- { _MMIO(0x9888), 0x00320000 },
- { _MMIO(0x9888), 0x0c334000 },
- { _MMIO(0x9888), 0x0e331000 },
- { _MMIO(0x9888), 0x06331000 },
- { _MMIO(0x9888), 0x0e508000 },
- { _MMIO(0x9888), 0x00508000 },
- { _MMIO(0x9888), 0x02504000 },
- { _MMIO(0x9888), 0x0e518000 },
- { _MMIO(0x9888), 0x00518000 },
- { _MMIO(0x9888), 0x02514000 },
- { _MMIO(0x9888), 0x0e521880 },
- { _MMIO(0x9888), 0x00521a80 },
- { _MMIO(0x9888), 0x02520033 },
- { _MMIO(0x9888), 0x0e534000 },
- { _MMIO(0x9888), 0x00534000 },
- { _MMIO(0x9888), 0x02531000 },
- { _MMIO(0x9888), 0x1190ff80 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900800 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900062 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900c00 },
- { _MMIO(0x9888), 0x43900003 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900040 },
-};
-
-static int
-get_tdl_1_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_1;
- lens[n] = ARRAY_SIZE(mux_config_tdl_1);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_tdl_2[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2744), 0x00800000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0x00800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x00800000 },
-};
-
-static const struct i915_oa_reg flex_eu_config_tdl_2[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00010003 },
- { _MMIO(0xe658), 0x00012011 },
- { _MMIO(0xe758), 0x00015014 },
- { _MMIO(0xe45c), 0x00051050 },
- { _MMIO(0xe55c), 0x00053052 },
- { _MMIO(0xe65c), 0x00055054 },
-};
-
-static const struct i915_oa_reg mux_config_tdl_2[] = {
- { _MMIO(0x9888), 0x12124d60 },
- { _MMIO(0x9888), 0x12322e60 },
- { _MMIO(0x9888), 0x12524d60 },
- { _MMIO(0x9888), 0x022f3000 },
- { _MMIO(0x9888), 0x0a4c0014 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x0c0fe000 },
- { _MMIO(0x9888), 0x0e0f0097 },
- { _MMIO(0x9888), 0x082c8000 },
- { _MMIO(0x9888), 0x0a2c8000 },
- { _MMIO(0x9888), 0x002d8000 },
- { _MMIO(0x9888), 0x062d4000 },
- { _MMIO(0x9888), 0x0410c000 },
- { _MMIO(0x9888), 0x0411c000 },
- { _MMIO(0x9888), 0x04121fb7 },
- { _MMIO(0x9888), 0x00120000 },
- { _MMIO(0x9888), 0x04135000 },
- { _MMIO(0x9888), 0x00308000 },
- { _MMIO(0x9888), 0x06304000 },
- { _MMIO(0x9888), 0x00318000 },
- { _MMIO(0x9888), 0x06314000 },
- { _MMIO(0x9888), 0x00321b80 },
- { _MMIO(0x9888), 0x0632003f },
- { _MMIO(0x9888), 0x00334000 },
- { _MMIO(0x9888), 0x06331000 },
- { _MMIO(0x9888), 0x0250c000 },
- { _MMIO(0x9888), 0x0251c000 },
- { _MMIO(0x9888), 0x02521fb7 },
- { _MMIO(0x9888), 0x00520000 },
- { _MMIO(0x9888), 0x02535000 },
- { _MMIO(0x9888), 0x1190fc00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x51900000 },
- { _MMIO(0x9888), 0x41900800 },
- { _MMIO(0x9888), 0x43900063 },
- { _MMIO(0x9888), 0x53900000 },
- { _MMIO(0x9888), 0x45900040 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_tdl_2_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_tdl_2;
- lens[n] = ARRAY_SIZE(mux_config_tdl_2);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_compute_extra[] = {
-};
-
-static const struct i915_oa_reg flex_eu_config_compute_extra[] = {
-};
-
-static const struct i915_oa_reg mux_config_compute_extra[] = {
- { _MMIO(0x9888), 0x121203e0 },
- { _MMIO(0x9888), 0x123203e0 },
- { _MMIO(0x9888), 0x125203e0 },
- { _MMIO(0x9888), 0x129203e0 },
- { _MMIO(0x9888), 0x12b203e0 },
- { _MMIO(0x9888), 0x12d203e0 },
- { _MMIO(0x9888), 0x131203e0 },
- { _MMIO(0x9888), 0x133203e0 },
- { _MMIO(0x9888), 0x135203e0 },
- { _MMIO(0x9888), 0x1a4ef000 },
- { _MMIO(0x9888), 0x1c4e0003 },
- { _MMIO(0x9888), 0x024ec000 },
- { _MMIO(0x9888), 0x044ec000 },
- { _MMIO(0x9888), 0x064ec000 },
- { _MMIO(0x9888), 0x022f4000 },
- { _MMIO(0x9888), 0x0c4c02a0 },
- { _MMIO(0x9888), 0x084ca000 },
- { _MMIO(0x9888), 0x0a4c0042 },
- { _MMIO(0x9888), 0x0c0d8000 },
- { _MMIO(0x9888), 0x0e0da000 },
- { _MMIO(0x9888), 0x000d8000 },
- { _MMIO(0x9888), 0x020da000 },
- { _MMIO(0x9888), 0x040da000 },
- { _MMIO(0x9888), 0x060d2000 },
- { _MMIO(0x9888), 0x100f0150 },
- { _MMIO(0x9888), 0x0c0f5000 },
- { _MMIO(0x9888), 0x0e0f006d },
- { _MMIO(0x9888), 0x182c00a8 },
- { _MMIO(0x9888), 0x022c8000 },
- { _MMIO(0x9888), 0x042c8000 },
- { _MMIO(0x9888), 0x062c8000 },
- { _MMIO(0x9888), 0x0c2c8000 },
- { _MMIO(0x9888), 0x042d8000 },
- { _MMIO(0x9888), 0x06104000 },
- { _MMIO(0x9888), 0x06114000 },
- { _MMIO(0x9888), 0x06120033 },
- { _MMIO(0x9888), 0x00120000 },
- { _MMIO(0x9888), 0x06131000 },
- { _MMIO(0x9888), 0x04308000 },
- { _MMIO(0x9888), 0x04318000 },
- { _MMIO(0x9888), 0x04321980 },
- { _MMIO(0x9888), 0x00320000 },
- { _MMIO(0x9888), 0x04334000 },
- { _MMIO(0x9888), 0x04504000 },
- { _MMIO(0x9888), 0x04514000 },
- { _MMIO(0x9888), 0x04520033 },
- { _MMIO(0x9888), 0x00520000 },
- { _MMIO(0x9888), 0x04531000 },
- { _MMIO(0x9888), 0x1acef000 },
- { _MMIO(0x9888), 0x1cce0003 },
- { _MMIO(0x9888), 0x00af8000 },
- { _MMIO(0x9888), 0x0ccc02a0 },
- { _MMIO(0x9888), 0x0acc0001 },
- { _MMIO(0x9888), 0x0c8d8000 },
- { _MMIO(0x9888), 0x0e8da000 },
- { _MMIO(0x9888), 0x008d8000 },
- { _MMIO(0x9888), 0x028da000 },
- { _MMIO(0x9888), 0x108f0150 },
- { _MMIO(0x9888), 0x0c8fb000 },
- { _MMIO(0x9888), 0x0e8f0001 },
- { _MMIO(0x9888), 0x18ac00a8 },
- { _MMIO(0x9888), 0x06ac8000 },
- { _MMIO(0x9888), 0x02ad4000 },
- { _MMIO(0x9888), 0x02908000 },
- { _MMIO(0x9888), 0x02918000 },
- { _MMIO(0x9888), 0x02921980 },
- { _MMIO(0x9888), 0x00920000 },
- { _MMIO(0x9888), 0x02934000 },
- { _MMIO(0x9888), 0x02b04000 },
- { _MMIO(0x9888), 0x02b14000 },
- { _MMIO(0x9888), 0x02b20033 },
- { _MMIO(0x9888), 0x00b20000 },
- { _MMIO(0x9888), 0x02b31000 },
- { _MMIO(0x9888), 0x00d08000 },
- { _MMIO(0x9888), 0x00d18000 },
- { _MMIO(0x9888), 0x00d21980 },
- { _MMIO(0x9888), 0x00d34000 },
- { _MMIO(0x9888), 0x072f8000 },
- { _MMIO(0x9888), 0x0d4c0100 },
- { _MMIO(0x9888), 0x0d0d8000 },
- { _MMIO(0x9888), 0x0f0da000 },
- { _MMIO(0x9888), 0x110f01b0 },
- { _MMIO(0x9888), 0x192c0080 },
- { _MMIO(0x9888), 0x0f2d4000 },
- { _MMIO(0x9888), 0x0f108000 },
- { _MMIO(0x9888), 0x0f118000 },
- { _MMIO(0x9888), 0x0f121980 },
- { _MMIO(0x9888), 0x01120000 },
- { _MMIO(0x9888), 0x0f134000 },
- { _MMIO(0x9888), 0x0f304000 },
- { _MMIO(0x9888), 0x0f314000 },
- { _MMIO(0x9888), 0x0f320033 },
- { _MMIO(0x9888), 0x01320000 },
- { _MMIO(0x9888), 0x0f331000 },
- { _MMIO(0x9888), 0x0d508000 },
- { _MMIO(0x9888), 0x0d518000 },
- { _MMIO(0x9888), 0x0d521980 },
- { _MMIO(0x9888), 0x01520000 },
- { _MMIO(0x9888), 0x0d534000 },
- { _MMIO(0x9888), 0x1190ff80 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900c00 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
- { _MMIO(0x9888), 0x4b900002 },
- { _MMIO(0x9888), 0x59900000 },
- { _MMIO(0x9888), 0x51901100 },
- { _MMIO(0x9888), 0x41901000 },
- { _MMIO(0x9888), 0x43901423 },
- { _MMIO(0x9888), 0x53903331 },
- { _MMIO(0x9888), 0x45900044 },
-};
-
-static int
-get_compute_extra_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_compute_extra;
- lens[n] = ARRAY_SIZE(mux_config_compute_extra);
- n++;
-
- return n;
-}
-
-static const struct i915_oa_reg b_counter_config_vme_pipe[] = {
- { _MMIO(0x2740), 0x00000000 },
- { _MMIO(0x2710), 0x00000000 },
- { _MMIO(0x2714), 0xf0800000 },
- { _MMIO(0x2720), 0x00000000 },
- { _MMIO(0x2724), 0x30800000 },
- { _MMIO(0x2770), 0x00100030 },
- { _MMIO(0x2774), 0x0000fff9 },
- { _MMIO(0x2778), 0x00000002 },
- { _MMIO(0x277c), 0x0000fffc },
- { _MMIO(0x2780), 0x00000002 },
- { _MMIO(0x2784), 0x0000fff3 },
- { _MMIO(0x2788), 0x00100180 },
- { _MMIO(0x278c), 0x0000ffcf },
- { _MMIO(0x2790), 0x00000002 },
- { _MMIO(0x2794), 0x0000ffcf },
- { _MMIO(0x2798), 0x00000002 },
- { _MMIO(0x279c), 0x0000ff3f },
-};
-
-static const struct i915_oa_reg flex_eu_config_vme_pipe[] = {
- { _MMIO(0xe458), 0x00005004 },
- { _MMIO(0xe558), 0x00008003 },
-};
-
-static const struct i915_oa_reg mux_config_vme_pipe[] = {
- { _MMIO(0x9888), 0x141a5800 },
- { _MMIO(0x9888), 0x161a00c0 },
- { _MMIO(0x9888), 0x12180240 },
- { _MMIO(0x9888), 0x14180002 },
- { _MMIO(0x9888), 0x149a5800 },
- { _MMIO(0x9888), 0x169a00c0 },
- { _MMIO(0x9888), 0x12980240 },
- { _MMIO(0x9888), 0x14980002 },
- { _MMIO(0x9888), 0x1a4e3fc0 },
- { _MMIO(0x9888), 0x002f1000 },
- { _MMIO(0x9888), 0x022f8000 },
- { _MMIO(0x9888), 0x042f3000 },
- { _MMIO(0x9888), 0x004c4000 },
- { _MMIO(0x9888), 0x0a4c9500 },
- { _MMIO(0x9888), 0x0c4c002a },
- { _MMIO(0x9888), 0x000d2000 },
- { _MMIO(0x9888), 0x060d8000 },
- { _MMIO(0x9888), 0x080da000 },
- { _MMIO(0x9888), 0x0a0da000 },
- { _MMIO(0x9888), 0x0c0da000 },
- { _MMIO(0x9888), 0x0c0f0400 },
- { _MMIO(0x9888), 0x0e0f5500 },
- { _MMIO(0x9888), 0x100f0015 },
- { _MMIO(0x9888), 0x002c8000 },
- { _MMIO(0x9888), 0x0e2c8000 },
- { _MMIO(0x9888), 0x162caa00 },
- { _MMIO(0x9888), 0x182c000a },
- { _MMIO(0x9888), 0x04193000 },
- { _MMIO(0x9888), 0x081a28c1 },
- { _MMIO(0x9888), 0x001a0000 },
- { _MMIO(0x9888), 0x00133000 },
- { _MMIO(0x9888), 0x0613c000 },
- { _MMIO(0x9888), 0x0813f000 },
- { _MMIO(0x9888), 0x00172000 },
- { _MMIO(0x9888), 0x06178000 },
- { _MMIO(0x9888), 0x0817a000 },
- { _MMIO(0x9888), 0x00180037 },
- { _MMIO(0x9888), 0x06180940 },
- { _MMIO(0x9888), 0x08180000 },
- { _MMIO(0x9888), 0x02180000 },
- { _MMIO(0x9888), 0x04183000 },
- { _MMIO(0x9888), 0x04afc000 },
- { _MMIO(0x9888), 0x06af3000 },
- { _MMIO(0x9888), 0x0acc4000 },
- { _MMIO(0x9888), 0x0ccc0015 },
- { _MMIO(0x9888), 0x0a8da000 },
- { _MMIO(0x9888), 0x0c8da000 },
- { _MMIO(0x9888), 0x0e8f4000 },
- { _MMIO(0x9888), 0x108f0015 },
- { _MMIO(0x9888), 0x16aca000 },
- { _MMIO(0x9888), 0x18ac000a },
- { _MMIO(0x9888), 0x06993000 },
- { _MMIO(0x9888), 0x0c9a28c1 },
- { _MMIO(0x9888), 0x009a0000 },
- { _MMIO(0x9888), 0x0a93f000 },
- { _MMIO(0x9888), 0x0c93f000 },
- { _MMIO(0x9888), 0x0a97a000 },
- { _MMIO(0x9888), 0x0c97a000 },
- { _MMIO(0x9888), 0x0a980977 },
- { _MMIO(0x9888), 0x08980000 },
- { _MMIO(0x9888), 0x04980000 },
- { _MMIO(0x9888), 0x06983000 },
- { _MMIO(0x9888), 0x119000ff },
- { _MMIO(0x9888), 0x51900010 },
- { _MMIO(0x9888), 0x41900060 },
- { _MMIO(0x9888), 0x55900111 },
- { _MMIO(0x9888), 0x45900c00 },
- { _MMIO(0x9888), 0x47900821 },
- { _MMIO(0x9888), 0x57900000 },
- { _MMIO(0x9888), 0x49900002 },
- { _MMIO(0x9888), 0x37900000 },
- { _MMIO(0x9888), 0x33900000 },
-};
-
-static int
-get_vme_pipe_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_vme_pipe;
- lens[n] = ARRAY_SIZE(mux_config_vme_pipe);
- n++;
-
- return n;
-}
-
static const struct i915_oa_reg b_counter_config_test_oa[] = {
{ _MMIO(0x2740), 0x00000000 },
{ _MMIO(0x2744), 0x00800000 },
@@ -1984,6 +60,7 @@ static const struct i915_oa_reg flex_eu_config_test_oa[] = {
};
static const struct i915_oa_reg mux_config_test_oa[] = {
+ { _MMIO(0x9840), 0x00000080 },
{ _MMIO(0x9888), 0x11810000 },
{ _MMIO(0x9888), 0x07810013 },
{ _MMIO(0x9888), 0x1f810000 },
@@ -1998,1096 +75,35 @@ static const struct i915_oa_reg mux_config_test_oa[] = {
{ _MMIO(0x9888), 0x33900000 },
};
-static int
-get_test_oa_mux_config(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg **regs,
- int *lens)
-{
- int n = 0;
-
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs) < 1);
- BUILD_BUG_ON(ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens) < 1);
-
- regs[n] = mux_config_test_oa;
- lens[n] = ARRAY_SIZE(mux_config_test_oa);
- n++;
-
- return n;
-}
-
-int i915_oa_select_metric_set_sklgt4(struct drm_i915_private *dev_priv)
-{
- dev_priv->perf.oa.n_mux_configs = 0;
- dev_priv->perf.oa.b_counter_regs = NULL;
- dev_priv->perf.oa.b_counter_regs_len = 0;
- dev_priv->perf.oa.flex_regs = NULL;
- dev_priv->perf.oa.flex_regs_len = 0;
-
- switch (dev_priv->perf.oa.metrics_set) {
- case METRIC_SET_ID_RENDER_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_render_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_basic);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_BASIC:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_basic_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_basic;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_basic);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_basic;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_basic);
-
- return 0;
- case METRIC_SET_ID_RENDER_PIPE_PROFILE:
- dev_priv->perf.oa.n_mux_configs =
- get_render_pipe_profile_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_PIPE_PROFILE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_render_pipe_profile;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_render_pipe_profile);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_render_pipe_profile;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_render_pipe_profile);
-
- return 0;
- case METRIC_SET_ID_MEMORY_READS:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_reads_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_reads;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_reads);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_reads;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_reads);
-
- return 0;
- case METRIC_SET_ID_MEMORY_WRITES:
- dev_priv->perf.oa.n_mux_configs =
- get_memory_writes_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_memory_writes;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_memory_writes);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_memory_writes;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_memory_writes);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTENDED:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extended_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extended;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extended);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extended;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extended);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_L3_CACHE:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_l3_cache_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_L3_CACHE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_l3_cache;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_l3_cache);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_l3_cache;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_l3_cache);
-
- return 0;
- case METRIC_SET_ID_HDC_AND_SF:
- dev_priv->perf.oa.n_mux_configs =
- get_hdc_and_sf_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"HDC_AND_SF\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_hdc_and_sf;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_hdc_and_sf);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_hdc_and_sf;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_hdc_and_sf);
-
- return 0;
- case METRIC_SET_ID_L3_1:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_1);
-
- return 0;
- case METRIC_SET_ID_L3_2:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_2);
-
- return 0;
- case METRIC_SET_ID_L3_3:
- dev_priv->perf.oa.n_mux_configs =
- get_l3_3_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"L3_3\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_l3_3;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_l3_3);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_l3_3;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_l3_3);
-
- return 0;
- case METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND:
- dev_priv->perf.oa.n_mux_configs =
- get_rasterizer_and_pixel_backend_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"RASTERIZER_AND_PIXEL_BACKEND\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_rasterizer_and_pixel_backend);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_rasterizer_and_pixel_backend;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_rasterizer_and_pixel_backend);
-
- return 0;
- case METRIC_SET_ID_SAMPLER:
- dev_priv->perf.oa.n_mux_configs =
- get_sampler_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_sampler;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_sampler);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_sampler;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_sampler);
-
- return 0;
- case METRIC_SET_ID_TDL_1:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_1_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_1\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_1;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_1);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_1;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_1);
-
- return 0;
- case METRIC_SET_ID_TDL_2:
- dev_priv->perf.oa.n_mux_configs =
- get_tdl_2_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TDL_2\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_tdl_2;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_tdl_2);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_tdl_2;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_tdl_2);
-
- return 0;
- case METRIC_SET_ID_COMPUTE_EXTRA:
- dev_priv->perf.oa.n_mux_configs =
- get_compute_extra_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTRA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_compute_extra;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_compute_extra);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_compute_extra;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_compute_extra);
-
- return 0;
- case METRIC_SET_ID_VME_PIPE:
- dev_priv->perf.oa.n_mux_configs =
- get_vme_pipe_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"VME_PIPE\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_vme_pipe;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_vme_pipe);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_vme_pipe;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_vme_pipe);
-
- return 0;
- case METRIC_SET_ID_TEST_OA:
- dev_priv->perf.oa.n_mux_configs =
- get_test_oa_mux_config(dev_priv,
- dev_priv->perf.oa.mux_regs,
- dev_priv->perf.oa.mux_regs_lens);
- if (dev_priv->perf.oa.n_mux_configs == 0) {
- DRM_DEBUG_DRIVER("No suitable MUX config for \"TEST_OA\" metric set\n");
-
- /* EINVAL because *_register_sysfs already checked this
- * and so it wouldn't have been advertised to userspace and
- * so shouldn't have been requested
- */
- return -EINVAL;
- }
-
- dev_priv->perf.oa.b_counter_regs =
- b_counter_config_test_oa;
- dev_priv->perf.oa.b_counter_regs_len =
- ARRAY_SIZE(b_counter_config_test_oa);
-
- dev_priv->perf.oa.flex_regs =
- flex_eu_config_test_oa;
- dev_priv->perf.oa.flex_regs_len =
- ARRAY_SIZE(flex_eu_config_test_oa);
-
- return 0;
- default:
- return -ENODEV;
- }
-}
-
-static ssize_t
-show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
-}
-
-static struct device_attribute dev_attr_render_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_basic[] = {
- &dev_attr_render_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_basic = {
- .name = "bad77c24-cc64-480d-99bf-e7b740713800",
- .attrs = attrs_render_basic,
-};
-
-static ssize_t
-show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
-}
-
-static struct device_attribute dev_attr_compute_basic_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_basic_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_basic[] = {
- &dev_attr_compute_basic_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_basic = {
- .name = "7277228f-e7f3-4743-945a-6a2049d11377",
- .attrs = attrs_compute_basic,
-};
-
-static ssize_t
-show_render_pipe_profile_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_PIPE_PROFILE);
-}
-
-static struct device_attribute dev_attr_render_pipe_profile_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_render_pipe_profile_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_render_pipe_profile[] = {
- &dev_attr_render_pipe_profile_id.attr,
- NULL,
-};
-
-static struct attribute_group group_render_pipe_profile = {
- .name = "463c668c-3f60-49b6-8f85-d995b635b3b2",
- .attrs = attrs_render_pipe_profile,
-};
-
-static ssize_t
-show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
-}
-
-static struct device_attribute dev_attr_memory_reads_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_reads_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_reads[] = {
- &dev_attr_memory_reads_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_reads = {
- .name = "3ae6e74c-72c3-4040-9bd0-7961430b8cc8",
- .attrs = attrs_memory_reads,
-};
-
-static ssize_t
-show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
-}
-
-static struct device_attribute dev_attr_memory_writes_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_memory_writes_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_memory_writes[] = {
- &dev_attr_memory_writes_id.attr,
- NULL,
-};
-
-static struct attribute_group group_memory_writes = {
- .name = "055f256d-4052-467c-8dec-6064a4806433",
- .attrs = attrs_memory_writes,
-};
-
-static ssize_t
-show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
-}
-
-static struct device_attribute dev_attr_compute_extended_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extended_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extended[] = {
- &dev_attr_compute_extended_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extended = {
- .name = "753972d4-87cd-4460-824d-754463ac5054",
- .attrs = attrs_compute_extended,
-};
-
-static ssize_t
-show_compute_l3_cache_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_L3_CACHE);
-}
-
-static struct device_attribute dev_attr_compute_l3_cache_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_l3_cache_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_l3_cache[] = {
- &dev_attr_compute_l3_cache_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_l3_cache = {
- .name = "4e4392e9-8f73-457b-ab44-b49f7a0c733b",
- .attrs = attrs_compute_l3_cache,
-};
-
-static ssize_t
-show_hdc_and_sf_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_HDC_AND_SF);
-}
-
-static struct device_attribute dev_attr_hdc_and_sf_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_hdc_and_sf_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_hdc_and_sf[] = {
- &dev_attr_hdc_and_sf_id.attr,
- NULL,
-};
-
-static struct attribute_group group_hdc_and_sf = {
- .name = "730d95dd-7da8-4e1c-ab8d-c0eb1e4c1805",
- .attrs = attrs_hdc_and_sf,
-};
-
-static ssize_t
-show_l3_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_1);
-}
-
-static struct device_attribute dev_attr_l3_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_1[] = {
- &dev_attr_l3_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_1 = {
- .name = "d9e86d70-462b-462a-851e-fd63e8c13d63",
- .attrs = attrs_l3_1,
-};
-
-static ssize_t
-show_l3_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_2);
-}
-
-static struct device_attribute dev_attr_l3_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_2[] = {
- &dev_attr_l3_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_2 = {
- .name = "52200424-6ee9-48b3-b7fa-0afcf1975e4d",
- .attrs = attrs_l3_2,
-};
-
-static ssize_t
-show_l3_3_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_L3_3);
-}
-
-static struct device_attribute dev_attr_l3_3_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_l3_3_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_l3_3[] = {
- &dev_attr_l3_3_id.attr,
- NULL,
-};
-
-static struct attribute_group group_l3_3 = {
- .name = "1988315f-0a26-44df-acb0-df7ec86b1456",
- .attrs = attrs_l3_3,
-};
-
-static ssize_t
-show_rasterizer_and_pixel_backend_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_RASTERIZER_AND_PIXEL_BACKEND);
-}
-
-static struct device_attribute dev_attr_rasterizer_and_pixel_backend_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_rasterizer_and_pixel_backend_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_rasterizer_and_pixel_backend[] = {
- &dev_attr_rasterizer_and_pixel_backend_id.attr,
- NULL,
-};
-
-static struct attribute_group group_rasterizer_and_pixel_backend = {
- .name = "f1f17ca7-286e-4ae5-9d15-9fccad6c665d",
- .attrs = attrs_rasterizer_and_pixel_backend,
-};
-
-static ssize_t
-show_sampler_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER);
-}
-
-static struct device_attribute dev_attr_sampler_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_sampler_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_sampler[] = {
- &dev_attr_sampler_id.attr,
- NULL,
-};
-
-static struct attribute_group group_sampler = {
- .name = "00a9e0fb-3d2e-4405-852c-dce6334ffb3b",
- .attrs = attrs_sampler,
-};
-
-static ssize_t
-show_tdl_1_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_1);
-}
-
-static struct device_attribute dev_attr_tdl_1_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_1_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_1[] = {
- &dev_attr_tdl_1_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_1 = {
- .name = "13dcc50a-7ec0-409b-99d6-a3f932cedcb3",
- .attrs = attrs_tdl_1,
-};
-
-static ssize_t
-show_tdl_2_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TDL_2);
-}
-
-static struct device_attribute dev_attr_tdl_2_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_tdl_2_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_tdl_2[] = {
- &dev_attr_tdl_2_id.attr,
- NULL,
-};
-
-static struct attribute_group group_tdl_2 = {
- .name = "97875e21-6624-4aee-9191-682feb3eae21",
- .attrs = attrs_tdl_2,
-};
-
-static ssize_t
-show_compute_extra_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTRA);
-}
-
-static struct device_attribute dev_attr_compute_extra_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_compute_extra_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_compute_extra[] = {
- &dev_attr_compute_extra_id.attr,
- NULL,
-};
-
-static struct attribute_group group_compute_extra = {
- .name = "a5aa857d-e8f0-4dfa-8981-ce340fa748fd",
- .attrs = attrs_compute_extra,
-};
-
-static ssize_t
-show_vme_pipe_id(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", METRIC_SET_ID_VME_PIPE);
-}
-
-static struct device_attribute dev_attr_vme_pipe_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_vme_pipe_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_vme_pipe[] = {
- &dev_attr_vme_pipe_id.attr,
- NULL,
-};
-
-static struct attribute_group group_vme_pipe = {
- .name = "0e8d8b86-4ee7-4cdd-aaaa-58adc92cb29e",
- .attrs = attrs_vme_pipe,
-};
-
static ssize_t
show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", METRIC_SET_ID_TEST_OA);
+ return sprintf(buf, "1\n");
}
-static struct device_attribute dev_attr_test_oa_id = {
- .attr = { .name = "id", .mode = 0444 },
- .show = show_test_oa_id,
- .store = NULL,
-};
-
-static struct attribute *attrs_test_oa[] = {
- &dev_attr_test_oa_id.attr,
- NULL,
-};
-
-static struct attribute_group group_test_oa = {
- .name = "882fa433-1f4a-4a67-a962-c741888fe5f5",
- .attrs = attrs_test_oa,
-};
-
-int
-i915_perf_register_sysfs_sklgt4(struct drm_i915_private *dev_priv)
+void
+i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv)
{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
- int ret = 0;
+ strncpy(dev_priv->perf.oa.test_config.uuid,
+ "882fa433-1f4a-4a67-a962-c741888fe5f5",
+ UUID_STRING_LEN);
+ dev_priv->perf.oa.test_config.id = 1;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (ret)
- goto error_render_basic;
- }
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (ret)
- goto error_compute_basic;
- }
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (ret)
- goto error_render_pipe_profile;
- }
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (ret)
- goto error_memory_reads;
- }
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (ret)
- goto error_memory_writes;
- }
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (ret)
- goto error_compute_extended;
- }
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (ret)
- goto error_compute_l3_cache;
- }
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (ret)
- goto error_hdc_and_sf;
- }
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (ret)
- goto error_l3_1;
- }
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_2);
- if (ret)
- goto error_l3_2;
- }
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_l3_3);
- if (ret)
- goto error_l3_3;
- }
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (ret)
- goto error_rasterizer_and_pixel_backend;
- }
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler);
- if (ret)
- goto error_sampler;
- }
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (ret)
- goto error_tdl_1;
- }
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (ret)
- goto error_tdl_2;
- }
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (ret)
- goto error_compute_extra;
- }
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
- if (ret)
- goto error_vme_pipe;
- }
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens)) {
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_test_oa);
- if (ret)
- goto error_test_oa;
- }
+ dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+ dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
- return 0;
+ dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
+ dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
-error_test_oa:
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
-error_vme_pipe:
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
-error_compute_extra:
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
-error_tdl_2:
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
-error_tdl_1:
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
-error_sampler:
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
-error_rasterizer_and_pixel_backend:
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
-error_l3_3:
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
-error_l3_2:
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
-error_l3_1:
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
-error_hdc_and_sf:
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
-error_compute_l3_cache:
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
-error_compute_extended:
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
-error_memory_writes:
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
-error_memory_reads:
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
-error_render_pipe_profile:
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
-error_compute_basic:
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
-error_render_basic:
- return ret;
-}
+ dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
+ dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
-void
-i915_perf_unregister_sysfs_sklgt4(struct drm_i915_private *dev_priv)
-{
- const struct i915_oa_reg *mux_regs[ARRAY_SIZE(dev_priv->perf.oa.mux_regs)];
- int mux_lens[ARRAY_SIZE(dev_priv->perf.oa.mux_regs_lens)];
+ dev_priv->perf.oa.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5";
+ dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
+
+ dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
- if (get_render_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
- if (get_compute_basic_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
- if (get_render_pipe_profile_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_pipe_profile);
- if (get_memory_reads_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
- if (get_memory_writes_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
- if (get_compute_extended_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
- if (get_compute_l3_cache_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_l3_cache);
- if (get_hdc_and_sf_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_hdc_and_sf);
- if (get_l3_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_1);
- if (get_l3_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_2);
- if (get_l3_3_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_l3_3);
- if (get_rasterizer_and_pixel_backend_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_rasterizer_and_pixel_backend);
- if (get_sampler_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler);
- if (get_tdl_1_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_1);
- if (get_tdl_2_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_tdl_2);
- if (get_compute_extra_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extra);
- if (get_vme_pipe_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_vme_pipe);
- if (get_test_oa_mux_config(dev_priv, mux_regs, mux_lens))
- sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_test_oa);
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
+ dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
+ dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
}
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
index 1b718f15f62e..944fd525c8b1 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
@@ -29,12 +29,6 @@
#ifndef __I915_OA_SKLGT4_H__
#define __I915_OA_SKLGT4_H__
-extern int i915_oa_n_builtin_metric_sets_sklgt4;
-
-extern int i915_oa_select_metric_set_sklgt4(struct drm_i915_private *dev_priv);
-
-extern int i915_perf_register_sysfs_sklgt4(struct drm_i915_private *dev_priv);
-
-extern void i915_perf_unregister_sysfs_sklgt4(struct drm_i915_private *dev_priv);
+extern void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index b6a7e363d076..8ab003dca113 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -46,7 +46,7 @@ struct i915_params i915 __read_mostly = {
.prefault_disable = 0,
.load_detect_test = 0,
.force_reset_modeset_test = 0,
- .reset = true,
+ .reset = 2,
.error_capture = true,
.invert_brightness = 0,
.disable_display = 0,
@@ -115,8 +115,12 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-module_param_named_unsafe(reset, i915.reset, bool, 0600);
-MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
+module_param_named_unsafe(reset, i915.reset, int, 0600);
+MODULE_PARM_DESC(reset, "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
+
+module_param_named_unsafe(vbt_firmware, i915.vbt_firmware, charp, 0400);
+MODULE_PARM_DESC(vbt_firmware,
+ "Load VBT from specified file under /lib/firmware");
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
module_param_named(error_capture, i915.error_capture, bool, 0600);
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 34148cc8637c..ac844709c97e 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -28,6 +28,7 @@
#include <linux/cache.h> /* for __read_mostly */
#define I915_PARAMS_FOR_EACH(func) \
+ func(char *, vbt_firmware); \
func(int, modeset); \
func(int, panel_ignore_lid); \
func(int, semaphores); \
@@ -51,6 +52,7 @@
func(int, use_mmio_flip); \
func(int, mmio_debug); \
func(int, edp_vswing); \
+ func(int, reset); \
func(unsigned int, inject_load_failure); \
/* leave bools at the end to not create holes */ \
func(bool, alpha_support); \
@@ -60,7 +62,6 @@
func(bool, prefault_disable); \
func(bool, load_detect_test); \
func(bool, force_reset_modeset_test); \
- func(bool, reset); \
func(bool, error_capture); \
func(bool, disable_display); \
func(bool, verbose_state_checks); \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 506ec32b9e53..09d97e0990b7 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -310,7 +310,8 @@ static const struct intel_device_info intel_haswell_info = {
BDW_COLORS, \
.has_logical_ring_contexts = 1, \
.has_full_48bit_ppgtt = 1, \
- .has_64bit_reloc = 1
+ .has_64bit_reloc = 1, \
+ .has_reset_engine = 1
#define BDW_PLATFORM \
BDW_FEATURES, \
@@ -342,6 +343,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_gmch_display = 1,
.has_aliasing_ppgtt = 1,
.has_full_ppgtt = 1,
+ .has_reset_engine = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS,
@@ -387,6 +389,7 @@ static const struct intel_device_info intel_skylake_gt3_info = {
.has_aliasing_ppgtt = 1, \
.has_full_ppgtt = 1, \
.has_full_48bit_ppgtt = 1, \
+ .has_reset_engine = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS, \
BDW_COLORS
@@ -395,6 +398,7 @@ static const struct intel_device_info intel_broxton_info = {
GEN9_LP_FEATURES,
.platform = INTEL_BROXTON,
.ddb_size = 512,
+ .has_reset_engine = false,
};
static const struct intel_device_info intel_geminilake_info = {
@@ -446,6 +450,7 @@ static const struct intel_device_info intel_cannonlake_info = {
.gen = 10,
.ddb_size = 1024,
.has_csr = 1,
+ .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
};
/*
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 9cd22f83b0cf..94185d610673 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -193,6 +193,7 @@
#include <linux/anon_inodes.h>
#include <linux/sizes.h>
+#include <linux/uuid.h>
#include "i915_drv.h"
#include "i915_oa_hsw.h"
@@ -357,6 +358,54 @@ struct perf_open_properties {
int oa_period_exponent;
};
+static void free_oa_config(struct drm_i915_private *dev_priv,
+ struct i915_oa_config *oa_config)
+{
+ if (!PTR_ERR(oa_config->flex_regs))
+ kfree(oa_config->flex_regs);
+ if (!PTR_ERR(oa_config->b_counter_regs))
+ kfree(oa_config->b_counter_regs);
+ if (!PTR_ERR(oa_config->mux_regs))
+ kfree(oa_config->mux_regs);
+ kfree(oa_config);
+}
+
+static void put_oa_config(struct drm_i915_private *dev_priv,
+ struct i915_oa_config *oa_config)
+{
+ if (!atomic_dec_and_test(&oa_config->ref_count))
+ return;
+
+ free_oa_config(dev_priv, oa_config);
+}
+
+static int get_oa_config(struct drm_i915_private *dev_priv,
+ int metrics_set,
+ struct i915_oa_config **out_config)
+{
+ int ret;
+
+ if (metrics_set == 1) {
+ *out_config = &dev_priv->perf.oa.test_config;
+ atomic_inc(&dev_priv->perf.oa.test_config.ref_count);
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+ if (ret)
+ return ret;
+
+ *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
+ if (!*out_config)
+ ret = -EINVAL;
+ else
+ atomic_inc(&(*out_config)->ref_count);
+
+ mutex_unlock(&dev_priv->perf.metrics_lock);
+
+ return ret;
+}
+
static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
{
return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
@@ -1246,10 +1295,12 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
/*
- * Unset exclusive_stream first, it might be checked while
- * disabling the metric set on gen8+.
+ * Unset exclusive_stream first, it will be checked while disabling
+ * the metric set on gen8+.
*/
+ mutex_lock(&dev_priv->drm.struct_mutex);
dev_priv->perf.oa.exclusive_stream = NULL;
+ mutex_unlock(&dev_priv->drm.struct_mutex);
dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
@@ -1261,6 +1312,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
if (stream->ctx)
oa_put_render_ctx_id(stream);
+ put_oa_config(dev_priv, stream->oa_config);
+
if (dev_priv->perf.oa.spurious_report_rs.missed) {
DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
dev_priv->perf.oa.spurious_report_rs.missed);
@@ -1440,9 +1493,9 @@ unlock:
static void config_oa_regs(struct drm_i915_private *dev_priv,
const struct i915_oa_reg *regs,
- int n_regs)
+ u32 n_regs)
{
- int i;
+ u32 i;
for (i = 0; i < n_regs; i++) {
const struct i915_oa_reg *reg = regs + i;
@@ -1451,17 +1504,9 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
}
}
-static int hsw_enable_metric_set(struct drm_i915_private *dev_priv)
+static int hsw_enable_metric_set(struct drm_i915_private *dev_priv,
+ const struct i915_oa_config *oa_config)
{
- int ret = i915_oa_select_metric_set_hsw(dev_priv);
- int i;
-
- if (ret)
- return ret;
-
- I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) |
- GT_NOA_ENABLE));
-
/* PRM:
*
* OA unit is using “crclk” for its functionality. When trunk
@@ -1476,10 +1521,7 @@ static int hsw_enable_metric_set(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
GEN6_CSUNIT_CLOCK_GATE_DISABLE));
- for (i = 0; i < dev_priv->perf.oa.n_mux_configs; i++) {
- config_oa_regs(dev_priv, dev_priv->perf.oa.mux_regs[i],
- dev_priv->perf.oa.mux_regs_lens[i]);
- }
+ config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
/* It apparently takes a fairly long time for a new MUX
* configuration to be be applied after these register writes.
@@ -1504,8 +1546,8 @@ static int hsw_enable_metric_set(struct drm_i915_private *dev_priv)
*/
usleep_range(15000, 20000);
- config_oa_regs(dev_priv, dev_priv->perf.oa.b_counter_regs,
- dev_priv->perf.oa.b_counter_regs_len);
+ config_oa_regs(dev_priv, oa_config->b_counter_regs,
+ oa_config->b_counter_regs_len);
return 0;
}
@@ -1529,11 +1571,10 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
* in the case that the OA unit has been disabled.
*/
static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
- u32 *reg_state)
+ u32 *reg_state,
+ const struct i915_oa_config *oa_config)
{
struct drm_i915_private *dev_priv = ctx->i915;
- const struct i915_oa_reg *flex_regs = dev_priv->perf.oa.flex_regs;
- int n_flex_regs = dev_priv->perf.oa.flex_regs_len;
u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
@@ -1565,12 +1606,15 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
* will be an explicit 'No Event' we can select, but not yet...
*/
u32 value = 0;
- int j;
- for (j = 0; j < n_flex_regs; j++) {
- if (i915_mmio_reg_offset(flex_regs[j].addr) == mmio) {
- value = flex_regs[j].value;
- break;
+ if (oa_config) {
+ u32 j;
+
+ for (j = 0; j < oa_config->flex_regs_len; j++) {
+ if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
+ value = oa_config->flex_regs[j].value;
+ break;
+ }
}
}
@@ -1583,11 +1627,10 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
* Same as gen8_update_reg_state_unlocked only through the batchbuffer. This
* is only used by the kernel context.
*/
-static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
+static int gen8_emit_oa_config(struct drm_i915_gem_request *req,
+ const struct i915_oa_config *oa_config)
{
struct drm_i915_private *dev_priv = req->i915;
- const struct i915_oa_reg *flex_regs = dev_priv->perf.oa.flex_regs;
- int n_flex_regs = dev_priv->perf.oa.flex_regs_len;
/* The MMIO offsets for Flex EU registers aren't contiguous */
u32 flex_mmio[] = {
i915_mmio_reg_offset(EU_PERF_CNTL0),
@@ -1601,11 +1644,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
u32 *cs;
int i;
- cs = intel_ring_begin(req, n_flex_regs * 2 + 4);
+ cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1);
+ *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
*cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
*cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
@@ -1622,12 +1665,15 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
* yet...
*/
u32 value = 0;
- int j;
- for (j = 0; j < n_flex_regs; j++) {
- if (i915_mmio_reg_offset(flex_regs[j].addr) == mmio) {
- value = flex_regs[j].value;
- break;
+ if (oa_config) {
+ u32 j;
+
+ for (j = 0; j < oa_config->flex_regs_len; j++) {
+ if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
+ value = oa_config->flex_regs[j].value;
+ break;
+ }
}
}
@@ -1641,7 +1687,8 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
return 0;
}
-static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv)
+static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_priv,
+ const struct i915_oa_config *oa_config)
{
struct intel_engine_cs *engine = dev_priv->engine[RCS];
struct i915_gem_timeline *timeline;
@@ -1656,7 +1703,7 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
if (IS_ERR(req))
return PTR_ERR(req);
- ret = gen8_emit_oa_config(req);
+ ret = gen8_emit_oa_config(req, oa_config);
if (ret) {
i915_add_request(req);
return ret;
@@ -1707,6 +1754,7 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
* Note: it's only the RCS/Render context that has any OA state.
*/
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
+ const struct i915_oa_config *oa_config,
bool interruptible)
{
struct i915_gem_context *ctx;
@@ -1724,7 +1772,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
}
/* Switch away from any user context. */
- ret = gen8_switch_to_updated_kernel_context(dev_priv);
+ ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
if (ret)
goto out;
@@ -1746,7 +1794,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
goto out;
/* Update all contexts now that we've stalled the submission. */
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
struct intel_context *ce = &ctx->engine[RCS];
u32 *regs;
@@ -1763,7 +1811,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
ce->state->obj->mm.dirty = true;
regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
- gen8_update_reg_state_unlocked(ctx, regs);
+ gen8_update_reg_state_unlocked(ctx, regs, oa_config);
i915_gem_object_unpin_map(ce->state->obj);
}
@@ -1774,13 +1822,10 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
return ret;
}
-static int gen8_enable_metric_set(struct drm_i915_private *dev_priv)
+static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
+ const struct i915_oa_config *oa_config)
{
- int ret = dev_priv->perf.oa.ops.select_metric_set(dev_priv);
- int i;
-
- if (ret)
- return ret;
+ int ret;
/*
* We disable slice/unslice clock ratio change reports on SKL since
@@ -1817,19 +1862,14 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv)
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = gen8_configure_all_contexts(dev_priv, true);
+ ret = gen8_configure_all_contexts(dev_priv, oa_config, true);
if (ret)
return ret;
- I915_WRITE(GDT_CHICKEN_BITS, 0xA0);
- for (i = 0; i < dev_priv->perf.oa.n_mux_configs; i++) {
- config_oa_regs(dev_priv, dev_priv->perf.oa.mux_regs[i],
- dev_priv->perf.oa.mux_regs_lens[i]);
- }
- I915_WRITE(GDT_CHICKEN_BITS, 0x80);
+ config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
- config_oa_regs(dev_priv, dev_priv->perf.oa.b_counter_regs,
- dev_priv->perf.oa.b_counter_regs_len);
+ config_oa_regs(dev_priv, oa_config->b_counter_regs,
+ oa_config->b_counter_regs_len);
return 0;
}
@@ -1837,7 +1877,11 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv)
static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
{
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(dev_priv, false);
+ gen8_configure_all_contexts(dev_priv, NULL, false);
+
+ I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
+ ~GT_NOA_ENABLE));
+
}
static void gen7_oa_enable(struct drm_i915_private *dev_priv)
@@ -2011,11 +2055,6 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EBUSY;
}
- if (!props->metrics_set) {
- DRM_DEBUG("OA metric set not specified\n");
- return -EINVAL;
- }
-
if (!props->oa_format) {
DRM_DEBUG("OA report format not specified\n");
return -EINVAL;
@@ -2055,8 +2094,6 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
dev_priv->perf.oa.oa_buffer.format =
dev_priv->perf.oa.oa_formats[props->oa_format].format;
- dev_priv->perf.oa.metrics_set = props->metrics_set;
-
dev_priv->perf.oa.periodic = props->oa_periodic;
if (dev_priv->perf.oa.periodic)
dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
@@ -2067,6 +2104,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return ret;
}
+ ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
+ if (ret)
+ goto err_config;
+
/* PRM - observability performance counters:
*
* OACONTROL, performance counter enable, note:
@@ -2086,22 +2127,39 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
if (ret)
goto err_oa_buf_alloc;
- ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv);
+ ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
+ stream->oa_config);
if (ret)
goto err_enable;
stream->ops = &i915_oa_stream_ops;
+ /* Lock device for exclusive_stream access late because
+ * enable_metric_set() might lock as well on gen8+.
+ */
+ ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+ if (ret)
+ goto err_lock;
+
dev_priv->perf.oa.exclusive_stream = stream;
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
return 0;
+err_lock:
+ dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
+
err_enable:
free_oa_buffer(dev_priv);
err_oa_buf_alloc:
+ put_oa_config(dev_priv, stream->oa_config);
+
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv);
+
+err_config:
if (stream->ctx)
oa_put_render_ctx_id(stream);
@@ -2112,15 +2170,14 @@ void i915_oa_init_reg_state(struct intel_engine_cs *engine,
struct i915_gem_context *ctx,
u32 *reg_state)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct i915_perf_stream *stream;
if (engine->id != RCS)
return;
- if (!dev_priv->perf.initialized)
- return;
-
- gen8_update_reg_state_unlocked(ctx, reg_state);
+ stream = engine->i915->perf.oa.exclusive_stream;
+ if (stream)
+ gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
}
/**
@@ -2444,7 +2501,7 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
list_del(&stream->link);
if (stream->ctx)
- i915_gem_context_put_unlocked(stream->ctx);
+ i915_gem_context_put(stream->ctx);
kfree(stream);
}
@@ -2483,27 +2540,6 @@ static const struct file_operations fops = {
};
-static struct i915_gem_context *
-lookup_context(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *file_priv,
- u32 ctx_user_handle)
-{
- struct i915_gem_context *ctx;
- int ret;
-
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- return ERR_PTR(ret);
-
- ctx = i915_gem_context_lookup(file_priv, ctx_user_handle);
- if (!IS_ERR(ctx))
- i915_gem_context_get(ctx);
-
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- return ctx;
-}
-
/**
* i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
* @dev_priv: i915 device instance
@@ -2545,12 +2581,11 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
u32 ctx_handle = props->ctx_handle;
struct drm_i915_file_private *file_priv = file->driver_priv;
- specific_ctx = lookup_context(dev_priv, file_priv, ctx_handle);
- if (IS_ERR(specific_ctx)) {
- ret = PTR_ERR(specific_ctx);
- if (ret != -EINTR)
- DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
- ctx_handle);
+ specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
+ if (!specific_ctx) {
+ DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
+ ctx_handle);
+ ret = -ENOENT;
goto err;
}
}
@@ -2633,7 +2668,7 @@ err_alloc:
kfree(stream);
err_ctx:
if (specific_ctx)
- i915_gem_context_put_unlocked(specific_ctx);
+ i915_gem_context_put(specific_ctx);
err:
return ret;
}
@@ -2665,7 +2700,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
struct perf_open_properties *props)
{
u64 __user *uprop = uprops;
- int i;
+ u32 i;
memset(props, 0, sizeof(struct perf_open_properties));
@@ -2712,8 +2747,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
props->sample_flags |= SAMPLE_OA_REPORT;
break;
case DRM_I915_PERF_PROP_OA_METRICS_SET:
- if (value == 0 ||
- value > dev_priv->perf.oa.n_builtin_sets) {
+ if (value == 0) {
DRM_DEBUG("Unknown OA metric set ID\n");
return -EINVAL;
}
@@ -2852,6 +2886,8 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
*/
void i915_perf_register(struct drm_i915_private *dev_priv)
{
+ int ret;
+
if (!dev_priv->perf.initialized)
return;
@@ -2867,44 +2903,42 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
if (!dev_priv->perf.metrics_kobj)
goto exit;
+ sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
+
if (IS_HASWELL(dev_priv)) {
- if (i915_perf_register_sysfs_hsw(dev_priv))
- goto sysfs_error;
+ i915_perf_load_test_config_hsw(dev_priv);
} else if (IS_BROADWELL(dev_priv)) {
- if (i915_perf_register_sysfs_bdw(dev_priv))
- goto sysfs_error;
+ i915_perf_load_test_config_bdw(dev_priv);
} else if (IS_CHERRYVIEW(dev_priv)) {
- if (i915_perf_register_sysfs_chv(dev_priv))
- goto sysfs_error;
+ i915_perf_load_test_config_chv(dev_priv);
} else if (IS_SKYLAKE(dev_priv)) {
- if (IS_SKL_GT2(dev_priv)) {
- if (i915_perf_register_sysfs_sklgt2(dev_priv))
- goto sysfs_error;
- } else if (IS_SKL_GT3(dev_priv)) {
- if (i915_perf_register_sysfs_sklgt3(dev_priv))
- goto sysfs_error;
- } else if (IS_SKL_GT4(dev_priv)) {
- if (i915_perf_register_sysfs_sklgt4(dev_priv))
- goto sysfs_error;
- } else
- goto sysfs_error;
+ if (IS_SKL_GT2(dev_priv))
+ i915_perf_load_test_config_sklgt2(dev_priv);
+ else if (IS_SKL_GT3(dev_priv))
+ i915_perf_load_test_config_sklgt3(dev_priv);
+ else if (IS_SKL_GT4(dev_priv))
+ i915_perf_load_test_config_sklgt4(dev_priv);
} else if (IS_BROXTON(dev_priv)) {
- if (i915_perf_register_sysfs_bxt(dev_priv))
- goto sysfs_error;
+ i915_perf_load_test_config_bxt(dev_priv);
} else if (IS_KABYLAKE(dev_priv)) {
- if (IS_KBL_GT2(dev_priv)) {
- if (i915_perf_register_sysfs_kblgt2(dev_priv))
- goto sysfs_error;
- } else if (IS_KBL_GT3(dev_priv)) {
- if (i915_perf_register_sysfs_kblgt3(dev_priv))
- goto sysfs_error;
- } else
- goto sysfs_error;
+ if (IS_KBL_GT2(dev_priv))
+ i915_perf_load_test_config_kblgt2(dev_priv);
+ else if (IS_KBL_GT3(dev_priv))
+ i915_perf_load_test_config_kblgt3(dev_priv);
} else if (IS_GEMINILAKE(dev_priv)) {
- if (i915_perf_register_sysfs_glk(dev_priv))
- goto sysfs_error;
+ i915_perf_load_test_config_glk(dev_priv);
}
+ if (dev_priv->perf.oa.test_config.id == 0)
+ goto sysfs_error;
+
+ ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
+ &dev_priv->perf.oa.test_config.sysfs_metric);
+ if (ret)
+ goto sysfs_error;
+
+ atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1);
+
goto exit;
sysfs_error:
@@ -2929,34 +2963,375 @@ void i915_perf_unregister(struct drm_i915_private *dev_priv)
if (!dev_priv->perf.metrics_kobj)
return;
- if (IS_HASWELL(dev_priv))
- i915_perf_unregister_sysfs_hsw(dev_priv);
- else if (IS_BROADWELL(dev_priv))
- i915_perf_unregister_sysfs_bdw(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- i915_perf_unregister_sysfs_chv(dev_priv);
- else if (IS_SKYLAKE(dev_priv)) {
- if (IS_SKL_GT2(dev_priv))
- i915_perf_unregister_sysfs_sklgt2(dev_priv);
- else if (IS_SKL_GT3(dev_priv))
- i915_perf_unregister_sysfs_sklgt3(dev_priv);
- else if (IS_SKL_GT4(dev_priv))
- i915_perf_unregister_sysfs_sklgt4(dev_priv);
- } else if (IS_BROXTON(dev_priv))
- i915_perf_unregister_sysfs_bxt(dev_priv);
- else if (IS_KABYLAKE(dev_priv)) {
- if (IS_KBL_GT2(dev_priv))
- i915_perf_unregister_sysfs_kblgt2(dev_priv);
- else if (IS_KBL_GT3(dev_priv))
- i915_perf_unregister_sysfs_kblgt3(dev_priv);
- } else if (IS_GEMINILAKE(dev_priv))
- i915_perf_unregister_sysfs_glk(dev_priv);
-
+ sysfs_remove_group(dev_priv->perf.metrics_kobj,
+ &dev_priv->perf.oa.test_config.sysfs_metric);
kobject_put(dev_priv->perf.metrics_kobj);
dev_priv->perf.metrics_kobj = NULL;
}
+static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
+{
+ static const i915_reg_t flex_eu_regs[] = {
+ EU_PERF_CNTL0,
+ EU_PERF_CNTL1,
+ EU_PERF_CNTL2,
+ EU_PERF_CNTL3,
+ EU_PERF_CNTL4,
+ EU_PERF_CNTL5,
+ EU_PERF_CNTL6,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(flex_eu_regs); i++) {
+ if (flex_eu_regs[i].reg == addr)
+ return true;
+ }
+ return false;
+}
+
+static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
+{
+ return (addr >= OASTARTTRIG1.reg && addr <= OASTARTTRIG8.reg) ||
+ (addr >= OAREPORTTRIG1.reg && addr <= OAREPORTTRIG8.reg) ||
+ (addr >= OACEC0_0.reg && addr <= OACEC7_1.reg);
+}
+
+static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+{
+ return addr == HALF_SLICE_CHICKEN2.reg ||
+ (addr >= MICRO_BP0_0.reg && addr <= NOA_WRITE.reg) ||
+ (addr >= OA_PERFCNT1_LO.reg && addr <= OA_PERFCNT2_HI.reg) ||
+ (addr >= OA_PERFMATRIX_LO.reg && addr <= OA_PERFMATRIX_HI.reg);
+}
+
+static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+{
+ return gen7_is_valid_mux_addr(dev_priv, addr) ||
+ addr == WAIT_FOR_RC6_EXIT.reg ||
+ (addr >= RPM_CONFIG0.reg && addr <= NOA_CONFIG(8).reg);
+}
+
+static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+{
+ return gen7_is_valid_mux_addr(dev_priv, addr) ||
+ (addr >= 0x25100 && addr <= 0x2FF90) ||
+ addr == 0x9ec0;
+}
+
+static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+{
+ return gen7_is_valid_mux_addr(dev_priv, addr) ||
+ (addr >= 0x182300 && addr <= 0x1823A4);
+}
+
+static uint32_t mask_reg_value(u32 reg, u32 val)
+{
+ /* HALF_SLICE_CHICKEN2 is programmed with a the
+ * WaDisableSTUnitPowerOptimization workaround. Make sure the value
+ * programmed by userspace doesn't change this.
+ */
+ if (HALF_SLICE_CHICKEN2.reg == reg)
+ val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
+
+ /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
+ * indicated by its name and a bunch of selection fields used by OA
+ * configs.
+ */
+ if (WAIT_FOR_RC6_EXIT.reg == reg)
+ val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
+
+ return val;
+}
+
+static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
+ bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
+ u32 __user *regs,
+ u32 n_regs)
+{
+ struct i915_oa_reg *oa_regs;
+ int err;
+ u32 i;
+
+ if (!n_regs)
+ return NULL;
+
+ if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2))
+ return ERR_PTR(-EFAULT);
+
+ /* No is_valid function means we're not allowing any register to be programmed. */
+ GEM_BUG_ON(!is_valid);
+ if (!is_valid)
+ return ERR_PTR(-EINVAL);
+
+ oa_regs = kmalloc_array(n_regs, sizeof(*oa_regs), GFP_KERNEL);
+ if (!oa_regs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < n_regs; i++) {
+ u32 addr, value;
+
+ err = get_user(addr, regs);
+ if (err)
+ goto addr_err;
+
+ if (!is_valid(dev_priv, addr)) {
+ DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
+ err = -EINVAL;
+ goto addr_err;
+ }
+
+ err = get_user(value, regs + 1);
+ if (err)
+ goto addr_err;
+
+ oa_regs[i].addr = _MMIO(addr);
+ oa_regs[i].value = mask_reg_value(addr, value);
+
+ regs += 2;
+ }
+
+ return oa_regs;
+
+addr_err:
+ kfree(oa_regs);
+ return ERR_PTR(err);
+}
+
+static ssize_t show_dynamic_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i915_oa_config *oa_config =
+ container_of(attr, typeof(*oa_config), sysfs_metric_id);
+
+ return sprintf(buf, "%d\n", oa_config->id);
+}
+
+static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
+ struct i915_oa_config *oa_config)
+{
+ sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
+ oa_config->sysfs_metric_id.attr.name = "id";
+ oa_config->sysfs_metric_id.attr.mode = S_IRUGO;
+ oa_config->sysfs_metric_id.show = show_dynamic_id;
+ oa_config->sysfs_metric_id.store = NULL;
+
+ oa_config->attrs[0] = &oa_config->sysfs_metric_id.attr;
+ oa_config->attrs[1] = NULL;
+
+ oa_config->sysfs_metric.name = oa_config->uuid;
+ oa_config->sysfs_metric.attrs = oa_config->attrs;
+
+ return sysfs_create_group(dev_priv->perf.metrics_kobj,
+ &oa_config->sysfs_metric);
+}
+
+/**
+ * i915_perf_add_config_ioctl - DRM ioctl() for userspace to add a new OA config
+ * @dev: drm device
+ * @data: ioctl data (pointer to struct drm_i915_perf_oa_config) copied from
+ * userspace (unvalidated)
+ * @file: drm file
+ *
+ * Validates the submitted OA register to be saved into a new OA config that
+ * can then be used for programming the OA unit and its NOA network.
+ *
+ * Returns: A new allocated config number to be used with the perf open ioctl
+ * or a negative error code on failure.
+ */
+int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_perf_oa_config *args = data;
+ struct i915_oa_config *oa_config, *tmp;
+ int err, id;
+
+ if (!dev_priv->perf.initialized) {
+ DRM_DEBUG("i915 perf interface not available for this system\n");
+ return -ENOTSUPP;
+ }
+
+ if (!dev_priv->perf.metrics_kobj) {
+ DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
+ return -EINVAL;
+ }
+
+ if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
+ DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
+ return -EACCES;
+ }
+
+ if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
+ (!args->boolean_regs_ptr || !args->n_boolean_regs) &&
+ (!args->flex_regs_ptr || !args->n_flex_regs)) {
+ DRM_DEBUG("No OA registers given\n");
+ return -EINVAL;
+ }
+
+ oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
+ if (!oa_config) {
+ DRM_DEBUG("Failed to allocate memory for the OA config\n");
+ return -ENOMEM;
+ }
+
+ atomic_set(&oa_config->ref_count, 1);
+
+ if (!uuid_is_valid(args->uuid)) {
+ DRM_DEBUG("Invalid uuid format for OA config\n");
+ err = -EINVAL;
+ goto reg_err;
+ }
+
+ /* Last character in oa_config->uuid will be 0 because oa_config is
+ * kzalloc.
+ */
+ memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
+
+ oa_config->mux_regs_len = args->n_mux_regs;
+ oa_config->mux_regs =
+ alloc_oa_regs(dev_priv,
+ dev_priv->perf.oa.ops.is_valid_mux_reg,
+ u64_to_user_ptr(args->mux_regs_ptr),
+ args->n_mux_regs);
+
+ if (IS_ERR(oa_config->mux_regs)) {
+ DRM_DEBUG("Failed to create OA config for mux_regs\n");
+ err = PTR_ERR(oa_config->mux_regs);
+ goto reg_err;
+ }
+
+ oa_config->b_counter_regs_len = args->n_boolean_regs;
+ oa_config->b_counter_regs =
+ alloc_oa_regs(dev_priv,
+ dev_priv->perf.oa.ops.is_valid_b_counter_reg,
+ u64_to_user_ptr(args->boolean_regs_ptr),
+ args->n_boolean_regs);
+
+ if (IS_ERR(oa_config->b_counter_regs)) {
+ DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
+ err = PTR_ERR(oa_config->b_counter_regs);
+ goto reg_err;
+ }
+
+ if (INTEL_GEN(dev_priv) < 8) {
+ if (args->n_flex_regs != 0) {
+ err = -EINVAL;
+ goto reg_err;
+ }
+ } else {
+ oa_config->flex_regs_len = args->n_flex_regs;
+ oa_config->flex_regs =
+ alloc_oa_regs(dev_priv,
+ dev_priv->perf.oa.ops.is_valid_flex_reg,
+ u64_to_user_ptr(args->flex_regs_ptr),
+ args->n_flex_regs);
+
+ if (IS_ERR(oa_config->flex_regs)) {
+ DRM_DEBUG("Failed to create OA config for flex_regs\n");
+ err = PTR_ERR(oa_config->flex_regs);
+ goto reg_err;
+ }
+ }
+
+ err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+ if (err)
+ goto reg_err;
+
+ /* We shouldn't have too many configs, so this iteration shouldn't be
+ * too costly.
+ */
+ idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
+ if (!strcmp(tmp->uuid, oa_config->uuid)) {
+ DRM_DEBUG("OA config already exists with this uuid\n");
+ err = -EADDRINUSE;
+ goto sysfs_err;
+ }
+ }
+
+ err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
+ if (err) {
+ DRM_DEBUG("Failed to create sysfs entry for OA config\n");
+ goto sysfs_err;
+ }
+
+ /* Config id 0 is invalid, id 1 for kernel stored test config. */
+ oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
+ oa_config, 2,
+ 0, GFP_KERNEL);
+ if (oa_config->id < 0) {
+ DRM_DEBUG("Failed to create sysfs entry for OA config\n");
+ err = oa_config->id;
+ goto sysfs_err;
+ }
+
+ mutex_unlock(&dev_priv->perf.metrics_lock);
+
+ return oa_config->id;
+
+sysfs_err:
+ mutex_unlock(&dev_priv->perf.metrics_lock);
+reg_err:
+ put_oa_config(dev_priv, oa_config);
+ DRM_DEBUG("Failed to add new OA config\n");
+ return err;
+}
+
+/**
+ * i915_perf_remove_config_ioctl - DRM ioctl() for userspace to remove an OA config
+ * @dev: drm device
+ * @data: ioctl data (pointer to u64 integer) copied from userspace
+ * @file: drm file
+ *
+ * Configs can be removed while being used, the will stop appearing in sysfs
+ * and their content will be freed when the stream using the config is closed.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u64 *arg = data;
+ struct i915_oa_config *oa_config;
+ int ret;
+
+ if (!dev_priv->perf.initialized) {
+ DRM_DEBUG("i915 perf interface not available for this system\n");
+ return -ENOTSUPP;
+ }
+
+ if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
+ DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
+ return -EACCES;
+ }
+
+ ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+ if (ret)
+ goto lock_err;
+
+ oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
+ if (!oa_config) {
+ DRM_DEBUG("Failed to remove unknown OA config\n");
+ ret = -ENOENT;
+ goto config_err;
+ }
+
+ GEM_BUG_ON(*arg != oa_config->id);
+
+ sysfs_remove_group(dev_priv->perf.metrics_kobj,
+ &oa_config->sysfs_metric);
+
+ idr_remove(&dev_priv->perf.metrics_idr, *arg);
+ put_oa_config(dev_priv, oa_config);
+
+config_err:
+ mutex_unlock(&dev_priv->perf.metrics_lock);
+lock_err:
+ return ret;
+}
+
static struct ctl_table oa_table[] = {
{
.procname = "perf_stream_paranoid",
@@ -3010,9 +3385,14 @@ static struct ctl_table dev_root[] = {
*/
void i915_perf_init(struct drm_i915_private *dev_priv)
{
- dev_priv->perf.oa.n_builtin_sets = 0;
+ dev_priv->perf.oa.timestamp_frequency = 0;
if (IS_HASWELL(dev_priv)) {
+ dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ gen7_is_valid_b_counter_addr;
+ dev_priv->perf.oa.ops.is_valid_mux_reg =
+ hsw_is_valid_mux_addr;
+ dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
@@ -3025,9 +3405,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.timestamp_frequency = 12500000;
dev_priv->perf.oa.oa_formats = hsw_oa_formats;
-
- dev_priv->perf.oa.n_builtin_sets =
- i915_oa_n_builtin_metric_sets_hsw;
} else if (i915.enable_execlists) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
@@ -3035,6 +3412,22 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
* worth the complexity to maintain now that BDW+ enable
* execlist mode by default.
*/
+ dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ gen7_is_valid_b_counter_addr;
+ dev_priv->perf.oa.ops.is_valid_mux_reg =
+ gen8_is_valid_mux_addr;
+ dev_priv->perf.oa.ops.is_valid_flex_reg =
+ gen8_is_valid_flex_addr;
+
+ dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
+ dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
+ dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
+ dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
+ dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
+ dev_priv->perf.oa.ops.read = gen8_oa_read;
+ dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
+
+ dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
if (IS_GEN8(dev_priv)) {
dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
@@ -3043,85 +3436,35 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.timestamp_frequency = 12500000;
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
-
- if (IS_BROADWELL(dev_priv)) {
- dev_priv->perf.oa.n_builtin_sets =
- i915_oa_n_builtin_metric_sets_bdw;
- dev_priv->perf.oa.ops.select_metric_set =
- i915_oa_select_metric_set_bdw;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->perf.oa.n_builtin_sets =
- i915_oa_n_builtin_metric_sets_chv;
- dev_priv->perf.oa.ops.select_metric_set =
- i915_oa_select_metric_set_chv;
+ if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->perf.oa.ops.is_valid_mux_reg =
+ chv_is_valid_mux_addr;
}
} else if (IS_GEN9(dev_priv)) {
dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
- dev_priv->perf.oa.timestamp_frequency = 12000000;
-
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
- if (IS_SKL_GT2(dev_priv)) {
- dev_priv->perf.oa.n_builtin_sets =
- i915_oa_n_builtin_metric_sets_sklgt2;
- dev_priv->perf.oa.ops.select_metric_set =
- i915_oa_select_metric_set_sklgt2;
- } else if (IS_SKL_GT3(dev_priv)) {
- dev_priv->perf.oa.n_builtin_sets =
- i915_oa_n_builtin_metric_sets_sklgt3;
- dev_priv->perf.oa.ops.select_metric_set =
- i915_oa_select_metric_set_sklgt3;
- } else if (IS_SKL_GT4(dev_priv)) {
- dev_priv->perf.oa.n_builtin_sets =
- i915_oa_n_builtin_metric_sets_sklgt4;
- dev_priv->perf.oa.ops.select_metric_set =
- i915_oa_select_metric_set_sklgt4;
- } else if (IS_BROXTON(dev_priv)) {
+ switch (dev_priv->info.platform) {
+ case INTEL_BROXTON:
+ case INTEL_GEMINILAKE:
dev_priv->perf.oa.timestamp_frequency = 19200000;
-
- dev_priv->perf.oa.n_builtin_sets =
- i915_oa_n_builtin_metric_sets_bxt;
- dev_priv->perf.oa.ops.select_metric_set =
- i915_oa_select_metric_set_bxt;
- } else if (IS_KBL_GT2(dev_priv)) {
- dev_priv->perf.oa.n_builtin_sets =
- i915_oa_n_builtin_metric_sets_kblgt2;
- dev_priv->perf.oa.ops.select_metric_set =
- i915_oa_select_metric_set_kblgt2;
- } else if (IS_KBL_GT3(dev_priv)) {
- dev_priv->perf.oa.n_builtin_sets =
- i915_oa_n_builtin_metric_sets_kblgt3;
- dev_priv->perf.oa.ops.select_metric_set =
- i915_oa_select_metric_set_kblgt3;
- } else if (IS_GEMINILAKE(dev_priv)) {
- dev_priv->perf.oa.timestamp_frequency = 19200000;
-
- dev_priv->perf.oa.n_builtin_sets =
- i915_oa_n_builtin_metric_sets_glk;
- dev_priv->perf.oa.ops.select_metric_set =
- i915_oa_select_metric_set_glk;
+ break;
+ case INTEL_SKYLAKE:
+ case INTEL_KABYLAKE:
+ dev_priv->perf.oa.timestamp_frequency = 12000000;
+ break;
+ default:
+ /* Leave timestamp_frequency to 0 so we can
+ * detect unsupported platforms.
+ */
+ break;
}
}
-
- if (dev_priv->perf.oa.n_builtin_sets) {
- dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
- dev_priv->perf.oa.ops.enable_metric_set =
- gen8_enable_metric_set;
- dev_priv->perf.oa.ops.disable_metric_set =
- gen8_disable_metric_set;
- dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
- dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
- dev_priv->perf.oa.ops.read = gen8_oa_read;
- dev_priv->perf.oa.ops.oa_hw_tail_read =
- gen8_oa_hw_tail_read;
-
- dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
- }
}
- if (dev_priv->perf.oa.n_builtin_sets) {
+ if (dev_priv->perf.oa.timestamp_frequency) {
hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
@@ -3135,10 +3478,23 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.timestamp_frequency / 2;
dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
+ mutex_init(&dev_priv->perf.metrics_lock);
+ idr_init(&dev_priv->perf.metrics_idr);
+
dev_priv->perf.initialized = true;
}
}
+static int destroy_config(int id, void *p, void *data)
+{
+ struct drm_i915_private *dev_priv = data;
+ struct i915_oa_config *oa_config = p;
+
+ put_oa_config(dev_priv, oa_config);
+
+ return 0;
+}
+
/**
* i915_perf_fini - Counter part to i915_perf_init()
* @dev_priv: i915 device instance
@@ -3148,6 +3504,9 @@ void i915_perf_fini(struct drm_i915_private *dev_priv)
if (!dev_priv->perf.initialized)
return;
+ idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
+ idr_destroy(&dev_priv->perf.metrics_idr);
+
unregister_sysctl_table(dev_priv->perf.sysctl_header);
memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index 2cfe96d3e5d1..0679a58cdbae 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -49,12 +49,18 @@ enum vgt_g2v_type {
VGT_G2V_MAX,
};
+/*
+ * VGT capabilities type
+ */
+#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
+
struct vgt_if {
u64 magic; /* VGT_MAGIC */
u16 version_major;
u16 version_minor;
u32 vgt_id; /* ID of vGT instance */
- u32 rsv1[12]; /* pad to offset 0x40 */
+ u32 vgt_caps; /* VGT capabilities */
+ u32 rsv1[11]; /* pad to offset 0x40 */
/*
* Data structure to describe the balooning info of resources.
* Each VM can only have one portion of continuous area for now.
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 64cc674b652a..ed7cd9ee2c2a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -25,6 +25,97 @@
#ifndef _I915_REG_H_
#define _I915_REG_H_
+/**
+ * DOC: The i915 register macro definition style guide
+ *
+ * Follow the style described here for new macros, and while changing existing
+ * macros. Do **not** mass change existing definitions just to update the style.
+ *
+ * Layout
+ * ''''''
+ *
+ * Keep helper macros near the top. For example, _PIPE() and friends.
+ *
+ * Prefix macros that generally should not be used outside of this file with
+ * underscore '_'. For example, _PIPE() and friends, single instances of
+ * registers that are defined solely for the use by function-like macros.
+ *
+ * Avoid using the underscore prefixed macros outside of this file. There are
+ * exceptions, but keep them to a minimum.
+ *
+ * There are two basic types of register definitions: Single registers and
+ * register groups. Register groups are registers which have two or more
+ * instances, for example one per pipe, port, transcoder, etc. Register groups
+ * should be defined using function-like macros.
+ *
+ * For single registers, define the register offset first, followed by register
+ * contents.
+ *
+ * For register groups, define the register instance offsets first, prefixed
+ * with underscore, followed by a function-like macro choosing the right
+ * instance based on the parameter, followed by register contents.
+ *
+ * Define the register contents (i.e. bit and bit field macros) from most
+ * significant to least significant bit. Indent the register content macros
+ * using two extra spaces between ``#define`` and the macro name.
+ *
+ * For bit fields, define a ``_MASK`` and a ``_SHIFT`` macro. Define bit field
+ * contents so that they are already shifted in place, and can be directly
+ * OR'd. For convenience, function-like macros may be used to define bit fields,
+ * but do note that the macros may be needed to read as well as write the
+ * register contents.
+ *
+ * Define bits using ``(1 << N)`` instead of ``BIT(N)``. We may change this in
+ * the future, but this is the prevailing style. Do **not** add ``_BIT`` suffix
+ * to the name.
+ *
+ * Group the register and its contents together without blank lines, separate
+ * from other registers and their contents with one blank line.
+ *
+ * Indent macro values from macro names using TABs. Align values vertically. Use
+ * braces in macro values as needed to avoid unintended precedence after macro
+ * substitution. Use spaces in macro values according to kernel coding
+ * style. Use lower case in hexadecimal values.
+ *
+ * Naming
+ * ''''''
+ *
+ * Try to name registers according to the specs. If the register name changes in
+ * the specs from platform to another, stick to the original name.
+ *
+ * Try to re-use existing register macro definitions. Only add new macros for
+ * new register offsets, or when the register contents have changed enough to
+ * warrant a full redefinition.
+ *
+ * When a register macro changes for a new platform, prefix the new macro using
+ * the platform acronym or generation. For example, ``SKL_`` or ``GEN8_``. The
+ * prefix signifies the start platform/generation using the register.
+ *
+ * When a bit (field) macro changes or gets added for a new platform, while
+ * retaining the existing register macro, add a platform acronym or generation
+ * suffix to the name. For example, ``_SKL`` or ``_GEN8``.
+ *
+ * Examples
+ * ''''''''
+ *
+ * (Note that the values in the example are indented using spaces instead of
+ * TABs to avoid misalignment in generated documentation. Use TABs in the
+ * definitions.)::
+ *
+ * #define _FOO_A 0xf000
+ * #define _FOO_B 0xf001
+ * #define FOO(pipe) _MMIO_PIPE(pipe, _FOO_A, _FOO_B)
+ * #define FOO_ENABLE (1 << 31)
+ * #define FOO_MODE_MASK (0xf << 16)
+ * #define FOO_MODE_SHIFT 16
+ * #define FOO_MODE_BAR (0 << 16)
+ * #define FOO_MODE_BAZ (1 << 16)
+ * #define FOO_MODE_QUX_SNB (2 << 16)
+ *
+ * #define BAR _MMIO(0xb000)
+ * #define GEN8_BAR _MMIO(0xb888)
+ */
+
typedef struct {
uint32_t reg;
} i915_reg_t;
@@ -229,6 +320,28 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN8_RPCS_EU_MIN_SHIFT 0
#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT)
+#define WAIT_FOR_RC6_EXIT _MMIO(0x20CC)
+/* HSW only */
+#define HSW_SELECTIVE_READ_ADDRESSING_SHIFT 2
+#define HSW_SELECTIVE_READ_ADDRESSING_MASK (0x3 << HSW_SLECTIVE_READ_ADDRESSING_SHIFT)
+#define HSW_SELECTIVE_WRITE_ADDRESS_SHIFT 4
+#define HSW_SELECTIVE_WRITE_ADDRESS_MASK (0x7 << HSW_SELECTIVE_WRITE_ADDRESS_SHIFT)
+/* HSW+ */
+#define HSW_WAIT_FOR_RC6_EXIT_ENABLE (1 << 0)
+#define HSW_RCS_CONTEXT_ENABLE (1 << 7)
+#define HSW_RCS_INHIBIT (1 << 8)
+/* Gen8 */
+#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4
+#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT)
+#define GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT 4
+#define GEN8_SELECTIVE_WRITE_ADDRESS_MASK (0x3 << GEN8_SELECTIVE_WRITE_ADDRESS_SHIFT)
+#define GEN8_SELECTIVE_WRITE_ADDRESSING_ENABLE (1 << 6)
+#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT 9
+#define GEN8_SELECTIVE_READ_SUBSLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SUBSLICE_SELECT_SHIFT)
+#define GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT 11
+#define GEN8_SELECTIVE_READ_SLICE_SELECT_MASK (0x3 << GEN8_SELECTIVE_READ_SLICE_SELECT_SHIFT)
+#define GEN8_SELECTIVE_READ_ADDRESSING_ENABLE (1 << 13)
+
#define GAM_ECOCHK _MMIO(0x4090)
#define BDW_DISABLE_HDC_INVALIDATION (1<<25)
#define ECOCHK_SNB_BIT (1<<10)
@@ -729,119 +842,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define EU_PERF_CNTL5 _MMIO(0xe55c)
#define EU_PERF_CNTL6 _MMIO(0xe65c)
-#define GDT_CHICKEN_BITS _MMIO(0x9840)
-#define GT_NOA_ENABLE 0x00000080
-
/*
* OA Boolean state
*/
-#define OAREPORTTRIG1 _MMIO(0x2740)
-#define OAREPORTTRIG1_THRESHOLD_MASK 0xffff
-#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
-
-#define OAREPORTTRIG2 _MMIO(0x2744)
-#define OAREPORTTRIG2_INVERT_A_0 (1<<0)
-#define OAREPORTTRIG2_INVERT_A_1 (1<<1)
-#define OAREPORTTRIG2_INVERT_A_2 (1<<2)
-#define OAREPORTTRIG2_INVERT_A_3 (1<<3)
-#define OAREPORTTRIG2_INVERT_A_4 (1<<4)
-#define OAREPORTTRIG2_INVERT_A_5 (1<<5)
-#define OAREPORTTRIG2_INVERT_A_6 (1<<6)
-#define OAREPORTTRIG2_INVERT_A_7 (1<<7)
-#define OAREPORTTRIG2_INVERT_A_8 (1<<8)
-#define OAREPORTTRIG2_INVERT_A_9 (1<<9)
-#define OAREPORTTRIG2_INVERT_A_10 (1<<10)
-#define OAREPORTTRIG2_INVERT_A_11 (1<<11)
-#define OAREPORTTRIG2_INVERT_A_12 (1<<12)
-#define OAREPORTTRIG2_INVERT_A_13 (1<<13)
-#define OAREPORTTRIG2_INVERT_A_14 (1<<14)
-#define OAREPORTTRIG2_INVERT_A_15 (1<<15)
-#define OAREPORTTRIG2_INVERT_B_0 (1<<16)
-#define OAREPORTTRIG2_INVERT_B_1 (1<<17)
-#define OAREPORTTRIG2_INVERT_B_2 (1<<18)
-#define OAREPORTTRIG2_INVERT_B_3 (1<<19)
-#define OAREPORTTRIG2_INVERT_C_0 (1<<20)
-#define OAREPORTTRIG2_INVERT_C_1 (1<<21)
-#define OAREPORTTRIG2_INVERT_D_0 (1<<22)
-#define OAREPORTTRIG2_THRESHOLD_ENABLE (1<<23)
-#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31)
-
-#define OAREPORTTRIG3 _MMIO(0x2748)
-#define OAREPORTTRIG3_NOA_SELECT_MASK 0xf
-#define OAREPORTTRIG3_NOA_SELECT_8_SHIFT 0
-#define OAREPORTTRIG3_NOA_SELECT_9_SHIFT 4
-#define OAREPORTTRIG3_NOA_SELECT_10_SHIFT 8
-#define OAREPORTTRIG3_NOA_SELECT_11_SHIFT 12
-#define OAREPORTTRIG3_NOA_SELECT_12_SHIFT 16
-#define OAREPORTTRIG3_NOA_SELECT_13_SHIFT 20
-#define OAREPORTTRIG3_NOA_SELECT_14_SHIFT 24
-#define OAREPORTTRIG3_NOA_SELECT_15_SHIFT 28
-
-#define OAREPORTTRIG4 _MMIO(0x274c)
-#define OAREPORTTRIG4_NOA_SELECT_MASK 0xf
-#define OAREPORTTRIG4_NOA_SELECT_0_SHIFT 0
-#define OAREPORTTRIG4_NOA_SELECT_1_SHIFT 4
-#define OAREPORTTRIG4_NOA_SELECT_2_SHIFT 8
-#define OAREPORTTRIG4_NOA_SELECT_3_SHIFT 12
-#define OAREPORTTRIG4_NOA_SELECT_4_SHIFT 16
-#define OAREPORTTRIG4_NOA_SELECT_5_SHIFT 20
-#define OAREPORTTRIG4_NOA_SELECT_6_SHIFT 24
-#define OAREPORTTRIG4_NOA_SELECT_7_SHIFT 28
-
-#define OAREPORTTRIG5 _MMIO(0x2750)
-#define OAREPORTTRIG5_THRESHOLD_MASK 0xffff
-#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
-
-#define OAREPORTTRIG6 _MMIO(0x2754)
-#define OAREPORTTRIG6_INVERT_A_0 (1<<0)
-#define OAREPORTTRIG6_INVERT_A_1 (1<<1)
-#define OAREPORTTRIG6_INVERT_A_2 (1<<2)
-#define OAREPORTTRIG6_INVERT_A_3 (1<<3)
-#define OAREPORTTRIG6_INVERT_A_4 (1<<4)
-#define OAREPORTTRIG6_INVERT_A_5 (1<<5)
-#define OAREPORTTRIG6_INVERT_A_6 (1<<6)
-#define OAREPORTTRIG6_INVERT_A_7 (1<<7)
-#define OAREPORTTRIG6_INVERT_A_8 (1<<8)
-#define OAREPORTTRIG6_INVERT_A_9 (1<<9)
-#define OAREPORTTRIG6_INVERT_A_10 (1<<10)
-#define OAREPORTTRIG6_INVERT_A_11 (1<<11)
-#define OAREPORTTRIG6_INVERT_A_12 (1<<12)
-#define OAREPORTTRIG6_INVERT_A_13 (1<<13)
-#define OAREPORTTRIG6_INVERT_A_14 (1<<14)
-#define OAREPORTTRIG6_INVERT_A_15 (1<<15)
-#define OAREPORTTRIG6_INVERT_B_0 (1<<16)
-#define OAREPORTTRIG6_INVERT_B_1 (1<<17)
-#define OAREPORTTRIG6_INVERT_B_2 (1<<18)
-#define OAREPORTTRIG6_INVERT_B_3 (1<<19)
-#define OAREPORTTRIG6_INVERT_C_0 (1<<20)
-#define OAREPORTTRIG6_INVERT_C_1 (1<<21)
-#define OAREPORTTRIG6_INVERT_D_0 (1<<22)
-#define OAREPORTTRIG6_THRESHOLD_ENABLE (1<<23)
-#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31)
-
-#define OAREPORTTRIG7 _MMIO(0x2758)
-#define OAREPORTTRIG7_NOA_SELECT_MASK 0xf
-#define OAREPORTTRIG7_NOA_SELECT_8_SHIFT 0
-#define OAREPORTTRIG7_NOA_SELECT_9_SHIFT 4
-#define OAREPORTTRIG7_NOA_SELECT_10_SHIFT 8
-#define OAREPORTTRIG7_NOA_SELECT_11_SHIFT 12
-#define OAREPORTTRIG7_NOA_SELECT_12_SHIFT 16
-#define OAREPORTTRIG7_NOA_SELECT_13_SHIFT 20
-#define OAREPORTTRIG7_NOA_SELECT_14_SHIFT 24
-#define OAREPORTTRIG7_NOA_SELECT_15_SHIFT 28
-
-#define OAREPORTTRIG8 _MMIO(0x275c)
-#define OAREPORTTRIG8_NOA_SELECT_MASK 0xf
-#define OAREPORTTRIG8_NOA_SELECT_0_SHIFT 0
-#define OAREPORTTRIG8_NOA_SELECT_1_SHIFT 4
-#define OAREPORTTRIG8_NOA_SELECT_2_SHIFT 8
-#define OAREPORTTRIG8_NOA_SELECT_3_SHIFT 12
-#define OAREPORTTRIG8_NOA_SELECT_4_SHIFT 16
-#define OAREPORTTRIG8_NOA_SELECT_5_SHIFT 20
-#define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24
-#define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28
-
#define OASTARTTRIG1 _MMIO(0x2710)
#define OASTARTTRIG1_THRESHOLD_COUNT_MASK_MBZ 0xffff0000
#define OASTARTTRIG1_THRESHOLD_MASK 0xffff
@@ -956,6 +960,112 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OASTARTTRIG8_NOA_SELECT_6_SHIFT 24
#define OASTARTTRIG8_NOA_SELECT_7_SHIFT 28
+#define OAREPORTTRIG1 _MMIO(0x2740)
+#define OAREPORTTRIG1_THRESHOLD_MASK 0xffff
+#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
+
+#define OAREPORTTRIG2 _MMIO(0x2744)
+#define OAREPORTTRIG2_INVERT_A_0 (1<<0)
+#define OAREPORTTRIG2_INVERT_A_1 (1<<1)
+#define OAREPORTTRIG2_INVERT_A_2 (1<<2)
+#define OAREPORTTRIG2_INVERT_A_3 (1<<3)
+#define OAREPORTTRIG2_INVERT_A_4 (1<<4)
+#define OAREPORTTRIG2_INVERT_A_5 (1<<5)
+#define OAREPORTTRIG2_INVERT_A_6 (1<<6)
+#define OAREPORTTRIG2_INVERT_A_7 (1<<7)
+#define OAREPORTTRIG2_INVERT_A_8 (1<<8)
+#define OAREPORTTRIG2_INVERT_A_9 (1<<9)
+#define OAREPORTTRIG2_INVERT_A_10 (1<<10)
+#define OAREPORTTRIG2_INVERT_A_11 (1<<11)
+#define OAREPORTTRIG2_INVERT_A_12 (1<<12)
+#define OAREPORTTRIG2_INVERT_A_13 (1<<13)
+#define OAREPORTTRIG2_INVERT_A_14 (1<<14)
+#define OAREPORTTRIG2_INVERT_A_15 (1<<15)
+#define OAREPORTTRIG2_INVERT_B_0 (1<<16)
+#define OAREPORTTRIG2_INVERT_B_1 (1<<17)
+#define OAREPORTTRIG2_INVERT_B_2 (1<<18)
+#define OAREPORTTRIG2_INVERT_B_3 (1<<19)
+#define OAREPORTTRIG2_INVERT_C_0 (1<<20)
+#define OAREPORTTRIG2_INVERT_C_1 (1<<21)
+#define OAREPORTTRIG2_INVERT_D_0 (1<<22)
+#define OAREPORTTRIG2_THRESHOLD_ENABLE (1<<23)
+#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31)
+
+#define OAREPORTTRIG3 _MMIO(0x2748)
+#define OAREPORTTRIG3_NOA_SELECT_MASK 0xf
+#define OAREPORTTRIG3_NOA_SELECT_8_SHIFT 0
+#define OAREPORTTRIG3_NOA_SELECT_9_SHIFT 4
+#define OAREPORTTRIG3_NOA_SELECT_10_SHIFT 8
+#define OAREPORTTRIG3_NOA_SELECT_11_SHIFT 12
+#define OAREPORTTRIG3_NOA_SELECT_12_SHIFT 16
+#define OAREPORTTRIG3_NOA_SELECT_13_SHIFT 20
+#define OAREPORTTRIG3_NOA_SELECT_14_SHIFT 24
+#define OAREPORTTRIG3_NOA_SELECT_15_SHIFT 28
+
+#define OAREPORTTRIG4 _MMIO(0x274c)
+#define OAREPORTTRIG4_NOA_SELECT_MASK 0xf
+#define OAREPORTTRIG4_NOA_SELECT_0_SHIFT 0
+#define OAREPORTTRIG4_NOA_SELECT_1_SHIFT 4
+#define OAREPORTTRIG4_NOA_SELECT_2_SHIFT 8
+#define OAREPORTTRIG4_NOA_SELECT_3_SHIFT 12
+#define OAREPORTTRIG4_NOA_SELECT_4_SHIFT 16
+#define OAREPORTTRIG4_NOA_SELECT_5_SHIFT 20
+#define OAREPORTTRIG4_NOA_SELECT_6_SHIFT 24
+#define OAREPORTTRIG4_NOA_SELECT_7_SHIFT 28
+
+#define OAREPORTTRIG5 _MMIO(0x2750)
+#define OAREPORTTRIG5_THRESHOLD_MASK 0xffff
+#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
+
+#define OAREPORTTRIG6 _MMIO(0x2754)
+#define OAREPORTTRIG6_INVERT_A_0 (1<<0)
+#define OAREPORTTRIG6_INVERT_A_1 (1<<1)
+#define OAREPORTTRIG6_INVERT_A_2 (1<<2)
+#define OAREPORTTRIG6_INVERT_A_3 (1<<3)
+#define OAREPORTTRIG6_INVERT_A_4 (1<<4)
+#define OAREPORTTRIG6_INVERT_A_5 (1<<5)
+#define OAREPORTTRIG6_INVERT_A_6 (1<<6)
+#define OAREPORTTRIG6_INVERT_A_7 (1<<7)
+#define OAREPORTTRIG6_INVERT_A_8 (1<<8)
+#define OAREPORTTRIG6_INVERT_A_9 (1<<9)
+#define OAREPORTTRIG6_INVERT_A_10 (1<<10)
+#define OAREPORTTRIG6_INVERT_A_11 (1<<11)
+#define OAREPORTTRIG6_INVERT_A_12 (1<<12)
+#define OAREPORTTRIG6_INVERT_A_13 (1<<13)
+#define OAREPORTTRIG6_INVERT_A_14 (1<<14)
+#define OAREPORTTRIG6_INVERT_A_15 (1<<15)
+#define OAREPORTTRIG6_INVERT_B_0 (1<<16)
+#define OAREPORTTRIG6_INVERT_B_1 (1<<17)
+#define OAREPORTTRIG6_INVERT_B_2 (1<<18)
+#define OAREPORTTRIG6_INVERT_B_3 (1<<19)
+#define OAREPORTTRIG6_INVERT_C_0 (1<<20)
+#define OAREPORTTRIG6_INVERT_C_1 (1<<21)
+#define OAREPORTTRIG6_INVERT_D_0 (1<<22)
+#define OAREPORTTRIG6_THRESHOLD_ENABLE (1<<23)
+#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31)
+
+#define OAREPORTTRIG7 _MMIO(0x2758)
+#define OAREPORTTRIG7_NOA_SELECT_MASK 0xf
+#define OAREPORTTRIG7_NOA_SELECT_8_SHIFT 0
+#define OAREPORTTRIG7_NOA_SELECT_9_SHIFT 4
+#define OAREPORTTRIG7_NOA_SELECT_10_SHIFT 8
+#define OAREPORTTRIG7_NOA_SELECT_11_SHIFT 12
+#define OAREPORTTRIG7_NOA_SELECT_12_SHIFT 16
+#define OAREPORTTRIG7_NOA_SELECT_13_SHIFT 20
+#define OAREPORTTRIG7_NOA_SELECT_14_SHIFT 24
+#define OAREPORTTRIG7_NOA_SELECT_15_SHIFT 28
+
+#define OAREPORTTRIG8 _MMIO(0x275c)
+#define OAREPORTTRIG8_NOA_SELECT_MASK 0xf
+#define OAREPORTTRIG8_NOA_SELECT_0_SHIFT 0
+#define OAREPORTTRIG8_NOA_SELECT_1_SHIFT 4
+#define OAREPORTTRIG8_NOA_SELECT_2_SHIFT 8
+#define OAREPORTTRIG8_NOA_SELECT_3_SHIFT 12
+#define OAREPORTTRIG8_NOA_SELECT_4_SHIFT 16
+#define OAREPORTTRIG8_NOA_SELECT_5_SHIFT 20
+#define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24
+#define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28
+
/* CECX_0 */
#define OACEC_COMPARE_LESS_OR_EQUAL 6
#define OACEC_COMPARE_NOT_EQUAL 5
@@ -994,6 +1104,51 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define OACEC7_0 _MMIO(0x27a8)
#define OACEC7_1 _MMIO(0x27ac)
+/* OA perf counters */
+#define OA_PERFCNT1_LO _MMIO(0x91B8)
+#define OA_PERFCNT1_HI _MMIO(0x91BC)
+#define OA_PERFCNT2_LO _MMIO(0x91C0)
+#define OA_PERFCNT2_HI _MMIO(0x91C4)
+
+#define OA_PERFMATRIX_LO _MMIO(0x91C8)
+#define OA_PERFMATRIX_HI _MMIO(0x91CC)
+
+/* RPM unit config (Gen8+) */
+#define RPM_CONFIG0 _MMIO(0x0D00)
+#define RPM_CONFIG1 _MMIO(0x0D04)
+
+/* RPC unit config (Gen8+) */
+#define RPM_CONFIG _MMIO(0x0D08)
+
+/* NOA (Gen8+) */
+#define NOA_CONFIG(i) _MMIO(0x0D0C + (i) * 4)
+
+#define MICRO_BP0_0 _MMIO(0x9800)
+#define MICRO_BP0_2 _MMIO(0x9804)
+#define MICRO_BP0_1 _MMIO(0x9808)
+
+#define MICRO_BP1_0 _MMIO(0x980C)
+#define MICRO_BP1_2 _MMIO(0x9810)
+#define MICRO_BP1_1 _MMIO(0x9814)
+
+#define MICRO_BP2_0 _MMIO(0x9818)
+#define MICRO_BP2_2 _MMIO(0x981C)
+#define MICRO_BP2_1 _MMIO(0x9820)
+
+#define MICRO_BP3_0 _MMIO(0x9824)
+#define MICRO_BP3_2 _MMIO(0x9828)
+#define MICRO_BP3_1 _MMIO(0x982C)
+
+#define MICRO_BP_TRIGGER _MMIO(0x9830)
+#define MICRO_BP3_COUNT_STATUS01 _MMIO(0x9834)
+#define MICRO_BP3_COUNT_STATUS23 _MMIO(0x9838)
+#define MICRO_BP_FIRED_ARMED _MMIO(0x983C)
+
+#define GDT_CHICKEN_BITS _MMIO(0x9840)
+#define GT_NOA_ENABLE 0x00000080
+
+#define NOA_DATA _MMIO(0x986C)
+#define NOA_WRITE _MMIO(0x9888)
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
@@ -1063,9 +1218,26 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe))
#define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe))
-/* See the PUNIT HAS v0.8 for the below bits */
-enum punit_power_well {
- /* These numbers are fixed and must match the position of the pw bits */
+/*
+ * i915_power_well_id:
+ *
+ * Platform specific IDs used to look up power wells and - except for custom
+ * power wells - to define request/status register flag bit positions. As such
+ * the set of IDs on a given platform must be unique and except for custom
+ * power wells their value must stay fixed.
+ */
+enum i915_power_well_id {
+ /*
+ * I830
+ * - custom power well
+ */
+ I830_DISP_PW_PIPES = 0,
+
+ /*
+ * VLV/CHV
+ * - PUNIT_REG_PWRGT_CTRL (bit: id*2),
+ * PUNIT_REG_PWRGT_STATUS (bit: id*2) (PUNIT HAS v0.8)
+ */
PUNIT_POWER_WELL_RENDER = 0,
PUNIT_POWER_WELL_MEDIA = 1,
PUNIT_POWER_WELL_DISP2D = 3,
@@ -1077,14 +1249,20 @@ enum punit_power_well {
PUNIT_POWER_WELL_DPIO_RX0 = 10,
PUNIT_POWER_WELL_DPIO_RX1 = 11,
PUNIT_POWER_WELL_DPIO_CMN_D = 12,
-
- /* Not actual bit groups. Used as IDs for lookup_power_well() */
- PUNIT_POWER_WELL_ALWAYS_ON,
-};
-
-enum skl_disp_power_wells {
- /* These numbers are fixed and must match the position of the pw bits */
- SKL_DISP_PW_MISC_IO,
+ /* - custom power well */
+ CHV_DISP_PW_PIPE_A, /* 13 */
+
+ /*
+ * HSW/BDW
+ * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1)
+ */
+ HSW_DISP_PW_GLOBAL = 15,
+
+ /*
+ * GEN9+
+ * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1)
+ */
+ SKL_DISP_PW_MISC_IO = 0,
SKL_DISP_PW_DDI_A_E,
GLK_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E,
CNL_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E,
@@ -1103,18 +1281,20 @@ enum skl_disp_power_wells {
SKL_DISP_PW_1 = 14,
SKL_DISP_PW_2,
- /* Not actual bit groups. Used as IDs for lookup_power_well() */
- SKL_DISP_PW_ALWAYS_ON,
+ /* - custom power wells */
SKL_DISP_PW_DC_OFF,
-
BXT_DPIO_CMN_A,
BXT_DPIO_CMN_BC,
- GLK_DPIO_CMN_C,
+ GLK_DPIO_CMN_C, /* 19 */
+
+ /*
+ * Multiple platforms.
+ * Must start following the highest ID of any platform.
+ * - custom power wells
+ */
+ I915_DISP_PW_ALWAYS_ON = 20,
};
-#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
-#define SKL_POWER_WELL_REQ(pw) (1 << (((pw) * 2) + 1))
-
#define PUNIT_REG_PWRGT_CTRL 0x60
#define PUNIT_REG_PWRGT_STATUS 0x61
#define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2))
@@ -2156,6 +2336,7 @@ enum skl_disp_power_wells {
#define DONE_REG _MMIO(0x40b0)
#define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0)
#define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4)
+#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + index*4)
#define BSD_HWS_PGA_GEN7 _MMIO(0x04180)
#define BLT_HWS_PGA_GEN7 _MMIO(0x04280)
#define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380)
@@ -3522,7 +3703,7 @@ enum skl_disp_power_wells {
#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
-#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
+#define GT_INTERVAL_FROM_US(dev_priv, us) (INTEL_GEN(dev_priv) >= 9 ? \
(IS_GEN9_LP(dev_priv) ? \
INTERVAL_0_833_US(us) : \
INTERVAL_1_33_US(us)) : \
@@ -3531,7 +3712,7 @@ enum skl_disp_power_wells {
#define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100)
#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3)
#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6)
-#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \
+#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (INTEL_GEN(dev_priv) >= 9 ? \
(IS_GEN9_LP(dev_priv) ? \
INTERVAL_0_833_TO_US(interval) : \
INTERVAL_1_33_TO_US(interval)) : \
@@ -3783,6 +3964,7 @@ enum {
#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0)
#define EDP_PSR_ENABLE (1<<31)
#define BDW_PSR_SINGLE_FRAME (1<<30)
+#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1<<29) /* SW can't modify */
#define EDP_PSR_LINK_STANDBY (1<<27)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
@@ -5227,6 +5409,9 @@ enum {
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
+#define PIPEMISC_YUV420_ENABLE (1<<27)
+#define PIPEMISC_YUV420_MODE_FULL_BLEND (1<<26)
+#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1<<11)
#define PIPEMISC_DITHER_BPC_MASK (7<<5)
#define PIPEMISC_DITHER_8_BPC (0<<5)
#define PIPEMISC_DITHER_10_BPC (1<<5)
@@ -6106,6 +6291,10 @@ enum {
#define _PLANE_KEYMSK_2_A 0x70298
#define _PLANE_KEYMAX_1_A 0x701a0
#define _PLANE_KEYMAX_2_A 0x702a0
+#define _PLANE_AUX_DIST_1_A 0x701c0
+#define _PLANE_AUX_DIST_2_A 0x702c0
+#define _PLANE_AUX_OFFSET_1_A 0x701c4
+#define _PLANE_AUX_OFFSET_2_A 0x702c4
#define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */
#define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */
#define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */
@@ -6212,6 +6401,24 @@ enum {
#define PLANE_NV12_BUF_CFG(pipe, plane) \
_MMIO_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe))
+#define _PLANE_AUX_DIST_1_B 0x711c0
+#define _PLANE_AUX_DIST_2_B 0x712c0
+#define _PLANE_AUX_DIST_1(pipe) \
+ _PIPE(pipe, _PLANE_AUX_DIST_1_A, _PLANE_AUX_DIST_1_B)
+#define _PLANE_AUX_DIST_2(pipe) \
+ _PIPE(pipe, _PLANE_AUX_DIST_2_A, _PLANE_AUX_DIST_2_B)
+#define PLANE_AUX_DIST(pipe, plane) \
+ _MMIO_PLANE(plane, _PLANE_AUX_DIST_1(pipe), _PLANE_AUX_DIST_2(pipe))
+
+#define _PLANE_AUX_OFFSET_1_B 0x711c4
+#define _PLANE_AUX_OFFSET_2_B 0x712c4
+#define _PLANE_AUX_OFFSET_1(pipe) \
+ _PIPE(pipe, _PLANE_AUX_OFFSET_1_A, _PLANE_AUX_OFFSET_1_B)
+#define _PLANE_AUX_OFFSET_2(pipe) \
+ _PIPE(pipe, _PLANE_AUX_OFFSET_2_A, _PLANE_AUX_OFFSET_2_B)
+#define PLANE_AUX_OFFSET(pipe, plane) \
+ _MMIO_PLANE(plane, _PLANE_AUX_OFFSET_1(pipe), _PLANE_AUX_OFFSET_2(pipe))
+
#define _PLANE_COLOR_CTL_1_B 0x711CC
#define _PLANE_COLOR_CTL_2_B 0x712CC
#define _PLANE_COLOR_CTL_3_B 0x713CC
@@ -6695,6 +6902,7 @@ enum {
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
#define CHICKEN_PAR1_1 _MMIO(0x42080)
+#define SKL_RC_HASH_OUTSIDE (1 << 15)
#define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14)
#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
@@ -6703,12 +6911,10 @@ enum {
#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14)
#define CHICKEN_MISC_2 _MMIO(0x42084)
-#define GLK_CL0_PWR_DOWN (1 << 10)
-#define GLK_CL1_PWR_DOWN (1 << 11)
+#define CNL_COMP_PWR_DOWN (1 << 23)
#define GLK_CL2_PWR_DOWN (1 << 12)
-
-#define CHICKEN_MISC_2 _MMIO(0x42084)
-#define COMP_PWR_DOWN (1 << 23)
+#define GLK_CL1_PWR_DOWN (1 << 11)
+#define GLK_CL0_PWR_DOWN (1 << 10)
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
@@ -7984,12 +8190,31 @@ enum {
#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
/* HSW Power Wells */
-#define HSW_PWR_WELL_BIOS _MMIO(0x45400) /* CTL1 */
-#define HSW_PWR_WELL_DRIVER _MMIO(0x45404) /* CTL2 */
-#define HSW_PWR_WELL_KVMR _MMIO(0x45408) /* CTL3 */
-#define HSW_PWR_WELL_DEBUG _MMIO(0x4540C) /* CTL4 */
-#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31)
-#define HSW_PWR_WELL_STATE_ENABLED (1<<30)
+#define _HSW_PWR_WELL_CTL1 0x45400
+#define _HSW_PWR_WELL_CTL2 0x45404
+#define _HSW_PWR_WELL_CTL3 0x45408
+#define _HSW_PWR_WELL_CTL4 0x4540C
+
+/*
+ * Each power well control register contains up to 16 (request, status) HW
+ * flag tuples. The register index and HW flag shift is determined by the
+ * power well ID (see i915_power_well_id). There are 4 possible sources of
+ * power well requests each source having its own set of control registers:
+ * BIOS, DRIVER, KVMR, DEBUG.
+ */
+#define _HSW_PW_REG_IDX(pw) ((pw) >> 4)
+#define _HSW_PW_SHIFT(pw) (((pw) & 0xf) * 2)
+/* TODO: Add all PWR_WELL_CTL registers below for new platforms */
+#define HSW_PWR_WELL_CTL_BIOS(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
+ _HSW_PWR_WELL_CTL1))
+#define HSW_PWR_WELL_CTL_DRIVER(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
+ _HSW_PWR_WELL_CTL2))
+#define HSW_PWR_WELL_CTL_KVMR _MMIO(_HSW_PWR_WELL_CTL3)
+#define HSW_PWR_WELL_CTL_DEBUG(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \
+ _HSW_PWR_WELL_CTL4))
+
+#define HSW_PWR_WELL_CTL_REQ(pw) (1 << (_HSW_PW_SHIFT(pw) + 1))
+#define HSW_PWR_WELL_CTL_STATE(pw) (1 << _HSW_PW_SHIFT(pw))
#define HSW_PWR_WELL_CTL5 _MMIO(0x45410)
#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
@@ -7997,11 +8222,17 @@ enum {
#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
/* SKL Fuse Status */
+enum skl_power_gate {
+ SKL_PG0,
+ SKL_PG1,
+ SKL_PG2,
+};
+
#define SKL_FUSE_STATUS _MMIO(0x42000)
-#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
-#define SKL_FUSE_PG0_DIST_STATUS (1<<27)
-#define SKL_FUSE_PG1_DIST_STATUS (1<<26)
-#define SKL_FUSE_PG2_DIST_STATUS (1<<25)
+#define SKL_FUSE_DOWNLOAD_STATUS (1<<31)
+/* PG0 (HW control->no power well ID), PG1..PG2 (SKL_DISP_PW1..SKL_DISP_PW2) */
+#define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1)
+#define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg)))
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
@@ -8343,6 +8574,7 @@ enum {
#define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25)
#define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25)
#define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
+#define DPLL_CFGCR0_DCO_FRAC_SHIFT (10)
#define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
#define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
#define CNL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR0, _CNL_DPLL1_CFGCR0)
@@ -8350,6 +8582,7 @@ enum {
#define _CNL_DPLL0_CFGCR1 0x6C004
#define _CNL_DPLL1_CFGCR1 0x6C084
#define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
+#define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
#define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10)
#define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9)
#define DPLL_CFGCR1_KDIV_MASK (7 << 6)
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index 9d7d86f1733d..78e1a1b168ff 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -101,6 +101,4 @@ bool __igt_timeout(unsigned long timeout, const char *fmt, ...);
#define igt_timeout(t, fmt, ...) \
__igt_timeout((t), KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
-#define igt_can_mi_store_dword_imm(D) (INTEL_GEN(D) > 2)
-
#endif /* !__I915_SELFTEST_H__ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 1eef3fae4db3..d61c8727f756 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -96,7 +96,7 @@ static struct attribute *rc6_attrs[] = {
NULL
};
-static struct attribute_group rc6_attr_group = {
+static const struct attribute_group rc6_attr_group = {
.name = power_group_name,
.attrs = rc6_attrs
};
@@ -107,7 +107,7 @@ static struct attribute *rc6p_attrs[] = {
NULL
};
-static struct attribute_group rc6p_attr_group = {
+static const struct attribute_group rc6p_attr_group = {
.name = power_group_name,
.attrs = rc6p_attrs
};
@@ -117,7 +117,7 @@ static struct attribute *media_rc6_attrs[] = {
NULL
};
-static struct attribute_group media_rc6_attr_group = {
+static const struct attribute_group media_rc6_attr_group = {
.name = power_group_name,
.attrs = media_rc6_attrs
};
@@ -209,7 +209,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
memcpy(*remap_info + (offset/4), buf, count);
/* NB: We defer the remapping until we switch to the context */
- list_for_each_entry(ctx, &dev_priv->context_list, link)
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link)
ctx->remap_slice |= (1<<slice);
ret = count;
@@ -220,7 +220,7 @@ out:
return ret;
}
-static struct bin_attribute dpf_attrs = {
+static const struct bin_attribute dpf_attrs = {
.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
.size = GEN7_L3LOG_SIZE,
.read = i915_l3_read,
@@ -229,7 +229,7 @@ static struct bin_attribute dpf_attrs = {
.private = (void *)0
};
-static struct bin_attribute dpf_attrs_1 = {
+static const struct bin_attribute dpf_attrs_1 = {
.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
.size = GEN7_L3LOG_SIZE,
.read = i915_l3_read,
@@ -253,7 +253,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
u32 rpstat = I915_READ(GEN6_RPSTAT1);
- if (IS_GEN9(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
@@ -532,7 +532,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
return count;
}
-static struct bin_attribute error_state_attr = {
+static const struct bin_attribute error_state_attr = {
.attr.name = "error",
.attr.mode = S_IRUSR | S_IWUSR,
.size = 0,
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index cf7a958e4d3c..5fe9f3f39467 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -75,10 +75,17 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
return;
}
+ dev_priv->vgpu.caps = __raw_i915_read32(dev_priv, vgtif_reg(vgt_caps));
+
dev_priv->vgpu.active = true;
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
}
+bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_FULL_48BIT_PPGTT;
+}
+
struct _balloon_info_ {
/*
* There are up to 2 regions per mappable/unmappable graphic
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 3c3b2d24e830..b72bd2956b70 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -27,6 +27,9 @@
#include "i915_pvinfo.h"
void i915_check_vgpu(struct drm_i915_private *dev_priv);
+
+bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv);
+
int intel_vgt_balloon(struct drm_i915_private *dev_priv);
void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 1cfe137cdc32..02d1a5eacb00 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -579,11 +579,17 @@ err_unpin:
static void i915_vma_destroy(struct i915_vma *vma)
{
+ int i;
+
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(i915_vma_is_active(vma));
GEM_BUG_ON(!i915_vma_is_closed(vma));
GEM_BUG_ON(vma->fence);
+ for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
+ GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
+ GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
+
list_del(&vma->vm_link);
if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
@@ -591,33 +597,11 @@ static void i915_vma_destroy(struct i915_vma *vma)
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
}
-void i915_vma_unlink_ctx(struct i915_vma *vma)
-{
- struct i915_gem_context *ctx = vma->ctx;
-
- if (ctx->vma_lut.ht_size & I915_CTX_RESIZE_IN_PROGRESS) {
- cancel_work_sync(&ctx->vma_lut.resize);
- ctx->vma_lut.ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
- }
-
- __hlist_del(&vma->ctx_node);
- ctx->vma_lut.ht_count--;
-
- if (i915_vma_is_ggtt(vma))
- vma->obj->vma_hashed = NULL;
- vma->ctx = NULL;
-
- i915_vma_put(vma);
-}
-
void i915_vma_close(struct i915_vma *vma)
{
GEM_BUG_ON(i915_vma_is_closed(vma));
vma->flags |= I915_VMA_CLOSED;
- if (vma->ctx)
- i915_vma_unlink_ctx(vma);
-
list_del(&vma->obj_link);
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
@@ -680,9 +664,8 @@ int i915_vma_unbind(struct i915_vma *vma)
__i915_vma_unpin(vma);
if (ret)
return ret;
-
- GEM_BUG_ON(i915_vma_is_active(vma));
}
+ GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma))
return -EBUSY;
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 4a673fc1a432..1fd61e88cfd0 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -112,13 +112,9 @@ struct i915_vma {
/**
* Used for performing relocations during execbuffer insertion.
*/
- struct drm_i915_gem_exec_object2 *exec_entry;
+ unsigned int *exec_flags;
struct hlist_node exec_node;
u32 exec_handle;
-
- struct i915_gem_context *ctx;
- struct hlist_node ctx_node;
- u32 ctx_handle;
};
struct i915_vma *
@@ -284,12 +280,12 @@ static inline void __i915_vma_pin(struct i915_vma *vma)
static inline void __i915_vma_unpin(struct i915_vma *vma)
{
- GEM_BUG_ON(!i915_vma_is_pinned(vma));
vma->flags--;
}
static inline void i915_vma_unpin(struct i915_vma *vma)
{
+ GEM_BUG_ON(!i915_vma_is_pinned(vma));
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
__i915_vma_unpin(vma);
}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 4325cb0a04f5..ee76fab7bb6f 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -114,6 +114,8 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_plane_state *state = &intel_state->base;
struct intel_plane *intel_plane = to_intel_plane(plane);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
int ret;
/*
@@ -173,6 +175,19 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ /*
+ * Y-tiling is not supported in IF-ID Interlace mode in
+ * GEN9 and above.
+ */
+ if (state->fb && INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (state->fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ state->fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
+ DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
+ return -EINVAL;
+ }
+ }
+
/* FIXME pre-g4x don't work like this */
if (intel_state->base.visible)
crtc_state->active_planes |= BIT(intel_plane->id);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 639d45c1dd2e..183e87e8ea31 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
uint8_t aux_channel, ddc_pin;
/* Each DDI port can have more than one value on the "DVO Port" field,
- * so look for all the possible values for each port and abort if more
- * than one is found. */
+ * so look for all the possible values for each port.
+ */
int dvo_ports[][3] = {
{DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
{DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
@@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
};
- /* Find the child device to use, abort if more than one found. */
+ /*
+ * Find the first child device to reference the port, report if more
+ * than one found.
+ */
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
it = dev_priv->vbt.child_dev + i;
@@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (it->common.dvo_port == dvo_ports[port][j]) {
if (child) {
- DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
+ DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
port_name(port));
- return;
+ } else {
+ child = it;
}
- child = it;
}
}
}
@@ -1187,6 +1190,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (is_dvi) {
info->alternate_ddc_pin = ddc_pin;
+ /*
+ * All VBTs that we got so far for B Stepping has this
+ * information wrong for Port D. So, let's just ignore for now.
+ */
+ if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0) &&
+ port == PORT_D) {
+ info->alternate_ddc_pin = 0;
+ }
+
sanitize_ddc_pin(dev_priv, port);
}
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 306c6b06b330..8e4e829682b9 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -41,6 +41,22 @@
#define LEGACY_LUT_LENGTH (sizeof(struct drm_color_lut) * 256)
+/* Post offset values for RGB->YCBCR conversion */
+#define POSTOFF_RGB_TO_YUV_HI 0x800
+#define POSTOFF_RGB_TO_YUV_ME 0x100
+#define POSTOFF_RGB_TO_YUV_LO 0x800
+
+/*
+ * These values are direct register values specified in the Bspec,
+ * for RGB->YUV conversion matrix (colorspace BT709)
+ */
+#define CSC_RGB_TO_YUV_RU_GU 0x2ba809d8
+#define CSC_RGB_TO_YUV_BU 0x37e80000
+#define CSC_RGB_TO_YUV_RY_GY 0x1e089cc0
+#define CSC_RGB_TO_YUV_BY 0xb5280000
+#define CSC_RGB_TO_YUV_RV_GV 0xbce89ad8
+#define CSC_RGB_TO_YUV_BV 0x1e080000
+
/*
* Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
* format). This macro takes the coefficient we want transformed and the
@@ -91,6 +107,30 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
}
}
+void i9xx_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
+{
+ int pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+
+ I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+
+ I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), CSC_RGB_TO_YUV_RU_GU);
+ I915_WRITE(PIPE_CSC_COEFF_BU(pipe), CSC_RGB_TO_YUV_BU);
+
+ I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), CSC_RGB_TO_YUV_RY_GY);
+ I915_WRITE(PIPE_CSC_COEFF_BY(pipe), CSC_RGB_TO_YUV_BY);
+
+ I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), CSC_RGB_TO_YUV_RV_GV);
+ I915_WRITE(PIPE_CSC_COEFF_BV(pipe), CSC_RGB_TO_YUV_BV);
+
+ I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), POSTOFF_RGB_TO_YUV_HI);
+ I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), POSTOFF_RGB_TO_YUV_ME);
+ I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), POSTOFF_RGB_TO_YUV_LO);
+ I915_WRITE(PIPE_CSC_MODE(pipe), 0);
+}
+
/* Set up the pipe CSC unit. */
static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
{
@@ -101,7 +141,10 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
uint16_t coeffs[9] = { 0, };
struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
- if (crtc_state->ctm) {
+ if (intel_crtc_state->ycbcr420) {
+ i9xx_load_ycbcr_conversion_matrix(intel_crtc);
+ return;
+ } else if (crtc_state->ctm) {
struct drm_color_ctm *ctm =
(struct drm_color_ctm *)crtc_state->ctm->data;
uint64_t input[9] = { 0, };
@@ -398,6 +441,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
}
/* Program the max register to clamp values > 1.0. */
+ i = lut_size - 1;
I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
drm_color_lut_extract(lut[i].red, 16));
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
@@ -615,7 +659,7 @@ void intel_color_init(struct drm_crtc *crtc)
IS_BROXTON(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = broadwell_load_luts;
- } else if (IS_GEMINILAKE(dev_priv)) {
+ } else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = glk_load_luts;
} else {
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 84a1f5e85153..70e0ff41070c 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -802,12 +802,10 @@ void intel_crt_reset(struct drm_encoder *encoder)
*/
static const struct drm_connector_funcs intel_crt_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_crt_destroy,
- .set_property = drm_atomic_helper_connector_set_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 80e96f1f49d2..4b4fd1f8110b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1103,6 +1103,62 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
return dco_freq / (p0 * p1 * p2 * 5);
}
+static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ uint32_t pll_id)
+{
+ uint32_t cfgcr0, cfgcr1;
+ uint32_t p0, p1, p2, dco_freq, ref_clock;
+
+ cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
+ cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
+
+ p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+ p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+
+ if (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+ p1 = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR1_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR1_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR1_PDIV_5:
+ p0 = 5;
+ break;
+ case DPLL_CFGCR1_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR1_KDIV_1:
+ p2 = 1;
+ break;
+ case DPLL_CFGCR1_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR1_KDIV_4:
+ p2 = 4;
+ break;
+ }
+
+ ref_clock = dev_priv->cdclk.hw.ref;
+
+ dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock;
+
+ dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ DPLL_CFGCR0_DCO_FRAC_SHIFT) * ref_clock) / 0x8000;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
{
int dotclock;
@@ -1118,12 +1174,68 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
else
dotclock = pipe_config->port_clock;
+ if (pipe_config->ycbcr420)
+ dotclock *= 2;
+
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
}
+static void cnl_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int link_clock = 0;
+ uint32_t cfgcr0, pll_id;
+
+ pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+
+ cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
+
+ if (cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
+ link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ } else {
+ link_clock = cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
+
+ switch (link_clock) {
+ case DPLL_CFGCR0_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_3240:
+ link_clock = 324000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_4050:
+ link_clock = 405000;
+ break;
+ default:
+ WARN(1, "Unsupported link rate\n");
+ break;
+ }
+ link_clock *= 2;
+ }
+
+ pipe_config->port_clock = link_clock;
+
+ ddi_dotclock_get(pipe_config);
+}
+
static void skl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -1267,6 +1379,8 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
skl_ddi_clock_get(encoder, pipe_config);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config);
+ else if (IS_CANNONLAKE(dev_priv))
+ cnl_ddi_clock_get(encoder, pipe_config);
}
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@@ -1762,7 +1876,7 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv,
if (dev_priv->vbt.edp.low_vswing) {
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
- return cnl_ddi_translations_dp_0_85V;
+ return cnl_ddi_translations_edp_0_85V;
} else if (voltage == VOLTAGE_INFO_0_95V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
return cnl_ddi_translations_edp_0_95V;
@@ -1868,9 +1982,12 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
if ((intel_dp) && (type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP)) {
width = intel_dp->lane_count;
rate = intel_dp->link_rate;
- } else {
+ } else if (type == INTEL_OUTPUT_HDMI) {
width = 4;
/* Rate is always < than 6GHz for HDMI */
+ } else {
+ MISSING_CASE(type);
+ return;
}
/*
@@ -1896,8 +2013,8 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
val &= ~LOADGEN_SELECT;
- if (((rate < 600000) && (width == 4) && (ln >= 1)) ||
- ((rate < 600000) && (width < 4) && ((ln == 1) || (ln == 2)))) {
+ if ((rate <= 600000 && width == 4 && ln >= 1) ||
+ (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 77d3214e1a77..5f91ddc78c7a 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -363,7 +363,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
*/
if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
- (dev_priv->pch_type == PCH_CPT &&
+ (HAS_PCH_CPT(dev_priv) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
DRM_INFO("Display fused off, disabling\n");
info->num_pipes = 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dec9e58545a1..0e93ec201fe3 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -49,11 +49,6 @@
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
-static bool is_mmio_work(struct intel_flip_work *work)
-{
- return work->mmio_work.func;
-}
-
/* Primary plane formats for gen <= 3 */
static const uint32_t i8xx_primary_formats[] = {
DRM_FORMAT_C8,
@@ -72,6 +67,12 @@ static const uint32_t i965_primary_formats[] = {
DRM_FORMAT_XBGR2101010,
};
+static const uint64_t i9xx_format_modifiers[] = {
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static const uint32_t skl_primary_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
@@ -87,11 +88,34 @@ static const uint32_t skl_primary_formats[] = {
DRM_FORMAT_VYUY,
};
+static const uint64_t skl_format_modifiers_noccs[] = {
+ I915_FORMAT_MOD_Yf_TILED,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const uint64_t skl_format_modifiers_ccs[] = {
+ I915_FORMAT_MOD_Yf_TILED_CCS,
+ I915_FORMAT_MOD_Y_TILED_CCS,
+ I915_FORMAT_MOD_Yf_TILED,
+ I915_FORMAT_MOD_Y_TILED,
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
/* Cursor formats */
static const uint32_t intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
+static const uint64_t cursor_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
@@ -1777,7 +1801,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
- assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
+ assert_fdi_rx_enabled(dev_priv, PIPE_A);
/* Workaround: set timing override bit. */
val = I915_READ(TRANS_CHICKEN2(PIPE_A));
@@ -1853,16 +1877,16 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
}
-enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc)
+enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
WARN_ON(!crtc->config->has_pch_encoder);
if (HAS_PCH_LPT(dev_priv))
- return TRANSCODER_A;
+ return PIPE_A;
else
- return (enum transcoder) crtc->pipe;
+ return crtc->pipe;
}
/**
@@ -1901,7 +1925,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
if (crtc->config->has_pch_encoder) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv,
- (enum pipe) intel_crtc_pch_transcoder(crtc));
+ intel_crtc_pch_transcoder(crtc));
assert_fdi_tx_pll_enabled(dev_priv,
(enum pipe) cpu_transcoder);
}
@@ -1999,11 +2023,19 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
return 128;
else
return 512;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ if (plane == 1)
+ return 128;
+ /* fall through */
case I915_FORMAT_MOD_Y_TILED:
if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
return 128;
else
return 512;
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ if (plane == 1)
+ return 128;
+ /* fall through */
case I915_FORMAT_MOD_Yf_TILED:
switch (cpp) {
case 1:
@@ -2110,7 +2142,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = to_i915(fb->dev);
/* AUX_DIST needs only 4K alignment */
- if (fb->format->format == DRM_FORMAT_NV12 && plane == 1)
+ if (plane == 1)
return 4096;
switch (fb->modifier) {
@@ -2120,6 +2152,8 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
if (INTEL_GEN(dev_priv) >= 9)
return 256 * 1024;
return 0;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
return 1 * 1024 * 1024;
@@ -2162,6 +2196,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
*/
intel_runtime_pm_get(dev_priv);
+ atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+
vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view);
if (IS_ERR(vma))
goto err;
@@ -2189,6 +2225,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
i915_vma_get(vma);
err:
+ atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+
intel_runtime_pm_put(dev_priv);
return vma;
}
@@ -2427,12 +2465,48 @@ static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
case I915_FORMAT_MOD_X_TILED:
return I915_TILING_X;
case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Y_TILED_CCS:
return I915_TILING_Y;
default:
return I915_TILING_NONE;
}
}
+static const struct drm_format_info ccs_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
+};
+
+static const struct drm_format_info *
+lookup_format_info(const struct drm_format_info formats[],
+ int num_formats, u32 format)
+{
+ int i;
+
+ for (i = 0; i < num_formats; i++) {
+ if (formats[i].format == format)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+static const struct drm_format_info *
+intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
+{
+ switch (cmd->modifier[0]) {
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ return lookup_format_info(ccs_formats,
+ ARRAY_SIZE(ccs_formats),
+ cmd->pixel_format);
+ default:
+ return NULL;
+ }
+}
+
static int
intel_fill_fb_info(struct drm_i915_private *dev_priv,
struct drm_framebuffer *fb)
@@ -2456,6 +2530,36 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
intel_fb_offset_to_xy(&x, &y, fb, i);
+ if ((fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) && i == 1) {
+ int hsub = fb->format->hsub;
+ int vsub = fb->format->vsub;
+ int tile_width, tile_height;
+ int main_x, main_y;
+ int ccs_x, ccs_y;
+
+ intel_tile_dims(fb, i, &tile_width, &tile_height);
+
+ ccs_x = (x * hsub) % (tile_width * hsub);
+ ccs_y = (y * vsub) % (tile_height * vsub);
+ main_x = intel_fb->normal[0].x % (tile_width * hsub);
+ main_y = intel_fb->normal[0].y % (tile_height * vsub);
+
+ /*
+ * CCS doesn't have its own x/y offset register, so the intra CCS tile
+ * x/y offsets must match between CCS and the main surface.
+ */
+ if (main_x != ccs_x || main_y != ccs_y) {
+ DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
+ main_x, main_y,
+ ccs_x, ccs_y,
+ intel_fb->normal[0].x,
+ intel_fb->normal[0].y,
+ x, y);
+ return -EINVAL;
+ }
+ }
+
/*
* The fence (if used) is aligned to the start of the object
* so having the framebuffer wrap around across the edge of the
@@ -2664,20 +2768,6 @@ out_unref_obj:
return false;
}
-/* Update plane->state->fb to match plane->fb after driver-internal updates */
-static void
-update_state_fb(struct drm_plane *plane)
-{
- if (plane->fb == plane->state->fb)
- return;
-
- if (plane->state->fb)
- drm_framebuffer_unreference(plane->state->fb);
- plane->state->fb = plane->fb;
- if (plane->state->fb)
- drm_framebuffer_reference(plane->state->fb);
-}
-
static void
intel_set_plane_visible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state,
@@ -2830,6 +2920,9 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
break;
}
break;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ /* FIXME AUX plane? */
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
switch (cpp) {
@@ -2852,6 +2945,44 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
return 2048;
}
+static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
+ int main_x, int main_y, u32 main_offset)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int hsub = fb->format->hsub;
+ int vsub = fb->format->vsub;
+ int aux_x = plane_state->aux.x;
+ int aux_y = plane_state->aux.y;
+ u32 aux_offset = plane_state->aux.offset;
+ u32 alignment = intel_surf_alignment(fb, 1);
+
+ while (aux_offset >= main_offset && aux_y <= main_y) {
+ int x, y;
+
+ if (aux_x == main_x && aux_y == main_y)
+ break;
+
+ if (aux_offset == 0)
+ break;
+
+ x = aux_x / hsub;
+ y = aux_y / vsub;
+ aux_offset = intel_adjust_tile_offset(&x, &y, plane_state, 1,
+ aux_offset, aux_offset - alignment);
+ aux_x = x * hsub + aux_x % hsub;
+ aux_y = y * vsub + aux_y % vsub;
+ }
+
+ if (aux_x != main_x || aux_y != main_y)
+ return false;
+
+ plane_state->aux.offset = aux_offset;
+ plane_state->aux.x = aux_x;
+ plane_state->aux.y = aux_y;
+
+ return true;
+}
+
static int skl_check_main_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
@@ -2894,7 +3025,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
while ((x + w) * cpp > fb->pitches[0]) {
if (offset == 0) {
- DRM_DEBUG_KMS("Unable to find suitable display surface offset\n");
+ DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
return -EINVAL;
}
@@ -2903,6 +3034,26 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
}
}
+ /*
+ * CCS AUX surface doesn't have its own x/y offsets, we must make sure
+ * they match with the main surface x/y offsets.
+ */
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
+ while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
+ if (offset == 0)
+ break;
+
+ offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
+ offset, offset - alignment);
+ }
+
+ if (x != plane_state->aux.x || y != plane_state->aux.y) {
+ DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
+ return -EINVAL;
+ }
+ }
+
plane_state->main.offset = offset;
plane_state->main.x = x;
plane_state->main.y = y;
@@ -2939,6 +3090,49 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
return 0;
}
+static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int src_x = plane_state->base.src.x1 >> 16;
+ int src_y = plane_state->base.src.y1 >> 16;
+ int hsub = fb->format->hsub;
+ int vsub = fb->format->vsub;
+ int x = src_x / hsub;
+ int y = src_y / vsub;
+ u32 offset;
+
+ switch (plane->id) {
+ case PLANE_PRIMARY:
+ case PLANE_SPRITE0:
+ break;
+ default:
+ DRM_DEBUG_KMS("RC support only on plane 1 and 2\n");
+ return -EINVAL;
+ }
+
+ if (crtc->pipe == PIPE_C) {
+ DRM_DEBUG_KMS("No RC support on pipe C\n");
+ return -EINVAL;
+ }
+
+ if (plane_state->base.rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180)) {
+ DRM_DEBUG_KMS("RC support only with 0/180 degree rotation %x\n",
+ plane_state->base.rotation);
+ return -EINVAL;
+ }
+
+ intel_add_fb_offsets(&x, &y, plane_state, 1);
+ offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
+
+ plane_state->aux.offset = offset;
+ plane_state->aux.x = x * hsub + src_x % hsub;
+ plane_state->aux.y = y * vsub + src_y % vsub;
+
+ return 0;
+}
+
int skl_check_plane_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
@@ -2962,6 +3156,11 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
+ } else if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
+ ret = skl_check_ccs_aux_surface(plane_state);
+ if (ret)
+ return ret;
} else {
plane_state->aux.offset = ~0xfff;
plane_state->aux.x = 0;
@@ -3268,8 +3467,12 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
return PLANE_CTL_TILED_X;
case I915_FORMAT_MOD_Y_TILED:
return PLANE_CTL_TILED_Y;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE;
case I915_FORMAT_MOD_Yf_TILED:
return PLANE_CTL_TILED_YF;
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE;
default:
MISSING_CASE(fb_modifier);
}
@@ -3311,7 +3514,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl = PLANE_CTL_ENABLE;
- if (!IS_GEMINILAKE(dev_priv)) {
+ if (!IS_GEMINILAKE(dev_priv) && !IS_CANNONLAKE(dev_priv)) {
plane_ctl |=
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE |
@@ -3342,6 +3545,7 @@ static void skylake_update_primary_plane(struct intel_plane *plane,
u32 plane_ctl = plane_state->ctl;
unsigned int rotation = plane_state->base.rotation;
u32 stride = skl_plane_stride(fb, 0, rotation);
+ u32 aux_stride = skl_plane_stride(fb, 1, rotation);
u32 surf_addr = plane_state->main.offset;
int scaler_id = plane_state->scaler_id;
int src_x = plane_state->main.x;
@@ -3367,7 +3571,7 @@ static void skylake_update_primary_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (IS_GEMINILAKE(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
PLANE_COLOR_PIPE_GAMMA_ENABLE |
PLANE_COLOR_PIPE_CSC_ENABLE |
@@ -3378,6 +3582,10 @@ static void skylake_update_primary_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
+ I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
+ (plane_state->aux.offset - surf_addr) | aux_stride);
+ I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
+ (plane_state->aux.y << 16) | plane_state->aux.x);
if (scaler_id >= 0) {
uint32_t ps_ctrl = 0;
@@ -3419,34 +3627,6 @@ static void skylake_disable_primary_plane(struct intel_plane *primary,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
-{
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&dev_priv->drm, crtc)
- intel_finish_page_flip_cs(dev_priv, crtc->pipe);
-}
-
-static void intel_update_primary_planes(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
-
- for_each_crtc(dev, crtc) {
- struct intel_plane *plane = to_intel_plane(crtc->primary);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (plane_state->base.visible) {
- trace_intel_update_plane(&plane->base,
- to_intel_crtc(crtc));
-
- plane->update_plane(plane,
- to_intel_crtc_state(crtc->state),
- plane_state);
- }
- }
-}
-
static int
__intel_display_resume(struct drm_device *dev,
struct drm_atomic_state *state,
@@ -3499,6 +3679,21 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
struct drm_atomic_state *state;
int ret;
+
+ /* reset doesn't touch the display */
+ if (!i915.force_reset_modeset_test &&
+ !gpu_reset_clobbers_display(dev_priv))
+ return;
+
+ /* We have a modeset vs reset deadlock, defensively unbreak it. */
+ set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
+ wake_up_all(&dev_priv->gpu_error.wait_queue);
+
+ if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
+ DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
+ i915_gem_set_wedged(dev_priv);
+ }
+
/*
* Need mode_config.mutex so that we don't
* trample ongoing ->detect() and whatnot.
@@ -3512,12 +3707,6 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
drm_modeset_backoff(ctx);
}
-
- /* reset doesn't touch the display, but flips might get nuked anyway, */
- if (!i915.force_reset_modeset_test &&
- !gpu_reset_clobbers_display(dev_priv))
- return;
-
/*
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
@@ -3547,33 +3736,22 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
struct drm_atomic_state *state = dev_priv->modeset_restore_state;
int ret;
- /*
- * Flips in the rings will be nuked by the reset,
- * so complete all pending flips so that user space
- * will get its events and not get stuck.
- */
- intel_complete_page_flips(dev_priv);
+ /* reset doesn't touch the display */
+ if (!i915.force_reset_modeset_test &&
+ !gpu_reset_clobbers_display(dev_priv))
+ return;
+
+ if (!state)
+ goto unlock;
dev_priv->modeset_restore_state = NULL;
/* reset doesn't touch the display */
if (!gpu_reset_clobbers_display(dev_priv)) {
- if (!state) {
- /*
- * Flips in the rings have been nuked by the reset,
- * so update the base address of all primary
- * planes to the the last fb to make sure we're
- * showing the correct fb after a reset.
- *
- * FIXME: Atomic will make this obsolete since we won't schedule
- * CS-based flips (which might get lost in gpu resets) any more.
- */
- intel_update_primary_planes(dev);
- } else {
- ret = __intel_display_resume(dev, state, ctx);
+ /* for testing only restore the display */
+ ret = __intel_display_resume(dev, state, ctx);
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
- }
} else {
/*
* The display has been reset as well,
@@ -3597,40 +3775,13 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
intel_hpd_init(dev_priv);
}
- if (state)
- drm_atomic_state_put(state);
+ drm_atomic_state_put(state);
+unlock:
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
-}
-static bool abort_flip_on_reset(struct intel_crtc *crtc)
-{
- struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;
-
- if (i915_reset_backoff(error))
- return true;
-
- if (crtc->reset_count != i915_reset_count(error))
- return true;
-
- return false;
-}
-
-static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- bool pending;
-
- if (abort_flip_on_reset(intel_crtc))
- return false;
-
- spin_lock_irq(&dev->event_lock);
- pending = to_intel_crtc(crtc)->flip_work != NULL;
- spin_unlock_irq(&dev->event_lock);
-
- return pending;
+ clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
}
static void intel_update_pipe_config(struct intel_crtc *crtc,
@@ -4187,21 +4338,22 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
{
- struct intel_crtc *crtc;
-
- /* Note that we don't need to be called with mode_config.lock here
- * as our list of CRTC objects is static for the lifetime of the
- * device and so cannot disappear as we iterate. Similarly, we can
- * happily treat the predicates as racy, atomic checks as userspace
- * cannot claim and pin a new fb without at least acquring the
- * struct_mutex and so serialising with us.
- */
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- if (atomic_read(&crtc->unpin_work_count) == 0)
+ struct drm_crtc *crtc;
+ bool cleanup_done;
+
+ drm_for_each_crtc(crtc, &dev_priv->drm) {
+ struct drm_crtc_commit *commit;
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ cleanup_done = commit ?
+ try_wait_for_completion(&commit->cleanup_done) : true;
+ spin_unlock(&crtc->commit_lock);
+
+ if (cleanup_done)
continue;
- if (crtc->flip_work)
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ drm_crtc_wait_one_vblank(crtc);
return true;
}
@@ -4209,57 +4361,6 @@ bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
return false;
}
-static void page_flip_completed(struct intel_crtc *intel_crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct intel_flip_work *work = intel_crtc->flip_work;
-
- intel_crtc->flip_work = NULL;
-
- if (work->event)
- drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
-
- drm_crtc_vblank_put(&intel_crtc->base);
-
- wake_up_all(&dev_priv->pending_flip_queue);
- trace_i915_flip_complete(intel_crtc->plane,
- work->pending_flip_obj);
-
- queue_work(dev_priv->wq, &work->unpin_work);
-}
-
-static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- long ret;
-
- WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
-
- ret = wait_event_interruptible_timeout(
- dev_priv->pending_flip_queue,
- !intel_crtc_has_pending_flip(crtc),
- 60*HZ);
-
- if (ret < 0)
- return ret;
-
- if (ret == 0) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_flip_work *work;
-
- spin_lock_irq(&dev->event_lock);
- work = intel_crtc->flip_work;
- if (work && !is_mmio_work(work)) {
- WARN_ONCE(1, "Removing stuck page flip\n");
- page_flip_completed(intel_crtc);
- }
- spin_unlock_irq(&dev->event_lock);
- }
-
- return 0;
-}
-
void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
{
u32 temp;
@@ -4579,7 +4680,7 @@ static void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
+ assert_pch_transcoder_disabled(dev_priv, PIPE_A);
lpt_program_iclkip(crtc);
@@ -4612,6 +4713,9 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
&crtc_state->scaler_state;
struct intel_crtc *intel_crtc =
to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
int need_scaling;
/*
@@ -4621,6 +4725,21 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
need_scaling = src_w != dst_w || src_h != dst_h;
+ if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
+ need_scaling = true;
+
+ /*
+ * Scaling/fitting not supported in IF-ID mode in GEN9+
+ * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
+ * Once NV12 is enabled, handle it here while allocating scaler
+ * for NV12.
+ */
+ if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
+ need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
+ return -EINVAL;
+ }
+
/*
* if plane is being disabled or scaler is no more required or force detach
* - free scaler binded to this plane/crtc
@@ -5332,8 +5451,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
return;
if (intel_crtc->config->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
- false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
@@ -5418,8 +5536,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_wait_for_vblank(dev_priv, pipe);
intel_wait_for_vblank(dev_priv, pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
- true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
/* If we change the relative order between pipe/planes enabling, we need
@@ -5516,8 +5633,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
if (intel_crtc->config->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
- false);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -5545,8 +5661,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
if (old_crtc_state->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
- true);
+ intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -5855,8 +5970,6 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
return;
if (crtc->primary->state->visible) {
- WARN_ON(intel_crtc->flip_work);
-
intel_pre_disable_primary_noatomic(crtc);
intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
@@ -6265,6 +6378,16 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return -EINVAL;
}
+ if (pipe_config->ycbcr420 && pipe_config->base.ctm) {
+ /*
+ * There is only one pipe CSC unit per pipe, and we need that
+ * for output conversion from RGB->YCBCR. So if CTM is already
+ * applied we can't support YCBCR420 output.
+ */
+ DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
+ return -EINVAL;
+ }
+
/*
* Pipe horizontal size must be even in:
* - DVO ganged mode
@@ -8058,6 +8181,7 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *config = intel_crtc->config;
if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
u32 val = 0;
@@ -8083,6 +8207,12 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc)
if (intel_crtc->config->dither)
val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
+ if (config->ycbcr420) {
+ val |= PIPEMISC_OUTPUT_COLORSPACE_YUV |
+ PIPEMISC_YUV420_ENABLE |
+ PIPEMISC_YUV420_MODE_FULL_BLEND;
+ }
+
I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
}
}
@@ -8410,10 +8540,16 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->modifier = I915_FORMAT_MOD_X_TILED;
break;
case PLANE_CTL_TILED_Y:
- fb->modifier = I915_FORMAT_MOD_Y_TILED;
+ if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
+ else
+ fb->modifier = I915_FORMAT_MOD_Y_TILED;
break;
case PLANE_CTL_TILED_YF:
- fb->modifier = I915_FORMAT_MOD_Yf_TILED;
+ if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
+ fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
+ else
+ fb->modifier = I915_FORMAT_MOD_Yf_TILED;
break;
default:
MISSING_CASE(tiling);
@@ -8647,7 +8783,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
- I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
+ I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL)),
+ "Display power well on\n");
I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
@@ -9117,6 +9254,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
u64 power_domain_mask;
bool active;
+ intel_crtc_init_scalers(crtc, pipe_config);
+
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
@@ -9145,11 +9284,21 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
- if (INTEL_GEN(dev_priv) >= 9) {
- intel_crtc_init_scalers(crtc, pipe_config);
+ if (IS_BROADWELL(dev_priv) || dev_priv->info.gen >= 9) {
+ u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
+ bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
+
+ if (IS_GEMINILAKE(dev_priv) || dev_priv->info.gen >= 10) {
+ bool blend_mode_420 = tmp &
+ PIPEMISC_YUV420_MODE_FULL_BLEND;
- pipe_config->scaler_state.scaler_id = -1;
- pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
+ pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
+ if (pipe_config->ycbcr420 != clrspace_yuv ||
+ pipe_config->ycbcr420 != blend_mode_420)
+ DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
+ } else if (clrspace_yuv) {
+ DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
+ }
}
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
@@ -9540,7 +9689,16 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* On some platforms writing CURCNTR first will also
* cause CURPOS to be armed by the CURBASE write.
* Without the CURCNTR write the CURPOS write would
- * arm itself.
+ * arm itself. Thus we always start the full update
+ * with a CURCNTR write.
+ *
+ * On other platforms CURPOS always requires the
+ * CURBASE write to arm the update. Additonally
+ * a write to any of the cursor register will cancel
+ * an already armed cursor update. Thus leaving out
+ * the CURBASE write after CURPOS could lead to a
+ * cursor that doesn't appear to move, or even change
+ * shape. Thus we always write CURBASE.
*
* CURCNTR and CUR_FBC_CTL are always
* armed by the CURBASE write only.
@@ -9559,6 +9717,7 @@ static void i9xx_update_cursor(struct intel_plane *plane,
plane->cursor.cntl = cntl;
} else {
I915_WRITE_FW(CURPOS(pipe), pos);
+ I915_WRITE_FW(CURBASE(pipe), base);
}
POSTING_READ_FW(CURBASE(pipe));
@@ -10119,849 +10278,11 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct intel_flip_work *work;
-
- spin_lock_irq(&dev->event_lock);
- work = intel_crtc->flip_work;
- intel_crtc->flip_work = NULL;
- spin_unlock_irq(&dev->event_lock);
-
- if (work) {
- cancel_work_sync(&work->mmio_work);
- cancel_work_sync(&work->unpin_work);
- kfree(work);
- }
drm_crtc_cleanup(crtc);
-
kfree(intel_crtc);
}
-static void intel_unpin_work_fn(struct work_struct *__work)
-{
- struct intel_flip_work *work =
- container_of(__work, struct intel_flip_work, unpin_work);
- struct intel_crtc *crtc = to_intel_crtc(work->crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_plane *primary = crtc->base.primary;
-
- if (is_mmio_work(work))
- flush_work(&work->mmio_work);
-
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_vma(work->old_vma);
- i915_gem_object_put(work->pending_flip_obj);
- mutex_unlock(&dev->struct_mutex);
-
- i915_gem_request_put(work->flip_queued_req);
-
- intel_frontbuffer_flip_complete(to_i915(dev),
- to_intel_plane(primary)->frontbuffer_bit);
- intel_fbc_post_update(crtc);
- drm_framebuffer_unreference(work->old_fb);
-
- BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
- atomic_dec(&crtc->unpin_work_count);
-
- kfree(work);
-}
-
-/* Is 'a' after or equal to 'b'? */
-static bool g4x_flip_count_after_eq(u32 a, u32 b)
-{
- return !((a - b) & 0x80000000);
-}
-
-static bool __pageflip_finished_cs(struct intel_crtc *crtc,
- struct intel_flip_work *work)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (abort_flip_on_reset(crtc))
- return true;
-
- /*
- * The relevant registers doen't exist on pre-ctg.
- * As the flip done interrupt doesn't trigger for mmio
- * flips on gmch platforms, a flip count check isn't
- * really needed there. But since ctg has the registers,
- * include it in the check anyway.
- */
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
- return true;
-
- /*
- * BDW signals flip done immediately if the plane
- * is disabled, even if the plane enable is already
- * armed to occur at the next vblank :(
- */
-
- /*
- * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
- * used the same base address. In that case the mmio flip might
- * have completed, but the CS hasn't even executed the flip yet.
- *
- * A flip count check isn't enough as the CS might have updated
- * the base address just after start of vblank, but before we
- * managed to process the interrupt. This means we'd complete the
- * CS flip too soon.
- *
- * Combining both checks should get us a good enough result. It may
- * still happen that the CS flip has been executed, but has not
- * yet actually completed. But in case the base address is the same
- * anyway, we don't really care.
- */
- return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
- crtc->flip_work->gtt_offset &&
- g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
- crtc->flip_work->flip_count);
-}
-
-static bool
-__pageflip_finished_mmio(struct intel_crtc *crtc,
- struct intel_flip_work *work)
-{
- /*
- * MMIO work completes when vblank is different from
- * flip_queued_vblank.
- *
- * Reset counter value doesn't matter, this is handled by
- * i915_wait_request finishing early, so no need to handle
- * reset here.
- */
- return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
-}
-
-
-static bool pageflip_finished(struct intel_crtc *crtc,
- struct intel_flip_work *work)
-{
- if (!atomic_read(&work->pending))
- return false;
-
- smp_rmb();
-
- if (is_mmio_work(work))
- return __pageflip_finished_mmio(crtc, work);
- else
- return __pageflip_finished_cs(crtc, work);
-}
-
-void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- struct intel_flip_work *work;
- unsigned long flags;
-
- /* Ignore early vblank irqs */
- if (!crtc)
- return;
-
- /*
- * This is called both by irq handlers and the reset code (to complete
- * lost pageflips) so needs the full irqsave spinlocks.
- */
- spin_lock_irqsave(&dev->event_lock, flags);
- work = crtc->flip_work;
-
- if (work != NULL &&
- !is_mmio_work(work) &&
- pageflip_finished(crtc, work))
- page_flip_completed(crtc);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- struct intel_flip_work *work;
- unsigned long flags;
-
- /* Ignore early vblank irqs */
- if (!crtc)
- return;
-
- /*
- * This is called both by irq handlers and the reset code (to complete
- * lost pageflips) so needs the full irqsave spinlocks.
- */
- spin_lock_irqsave(&dev->event_lock, flags);
- work = crtc->flip_work;
-
- if (work != NULL &&
- is_mmio_work(work) &&
- pageflip_finished(crtc, work))
- page_flip_completed(crtc);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
- struct intel_flip_work *work)
-{
- work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
-
- /* Ensure that the work item is consistent when activating it ... */
- smp_mb__before_atomic();
- atomic_set(&work->pending, 1);
-}
-
-static int intel_gen2_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 flip_mask, *cs;
-
- cs = intel_ring_begin(req, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Can't queue multiple flips, so wait for the previous
- * one to finish before executing the next.
- */
- if (intel_crtc->plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- *cs++ = MI_WAIT_FOR_EVENT | flip_mask;
- *cs++ = MI_NOOP;
- *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
- *cs++ = fb->pitches[0];
- *cs++ = intel_crtc->flip_work->gtt_offset;
- *cs++ = 0; /* aux display base address, unused */
-
- return 0;
-}
-
-static int intel_gen3_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 flip_mask, *cs;
-
- cs = intel_ring_begin(req, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (intel_crtc->plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- *cs++ = MI_WAIT_FOR_EVENT | flip_mask;
- *cs++ = MI_NOOP;
- *cs++ = MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
- *cs++ = fb->pitches[0];
- *cs++ = intel_crtc->flip_work->gtt_offset;
- *cs++ = MI_NOOP;
-
- return 0;
-}
-
-static int intel_gen4_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 pf, pipesrc, *cs;
-
- cs = intel_ring_begin(req, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* i965+ uses the linear or tiled offsets from the
- * Display Registers (which do not change across a page-flip)
- * so we need only reprogram the base address.
- */
- *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
- *cs++ = fb->pitches[0];
- *cs++ = intel_crtc->flip_work->gtt_offset |
- intel_fb_modifier_to_tiling(fb->modifier);
-
- /* XXX Enabling the panel-fitter across page-flip is so far
- * untested on non-native modes, so ignore it for now.
- * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
- */
- pf = 0;
- pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- *cs++ = pf | pipesrc;
-
- return 0;
-}
-
-static int intel_gen6_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 pf, pipesrc, *cs;
-
- cs = intel_ring_begin(req, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
- *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
- *cs++ = intel_crtc->flip_work->gtt_offset;
-
- /* Contrary to the suggestions in the documentation,
- * "Enable Panel Fitter" does not seem to be required when page
- * flipping with a non-native mode, and worse causes a normal
- * modeset to fail.
- * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
- */
- pf = 0;
- pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- *cs++ = pf | pipesrc;
-
- return 0;
-}
-
-static int intel_gen7_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 *cs, plane_bit = 0;
- int len, ret;
-
- switch (intel_crtc->plane) {
- case PLANE_A:
- plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
- break;
- case PLANE_B:
- plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
- break;
- case PLANE_C:
- plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
- break;
- default:
- WARN_ONCE(1, "unknown plane in flip command\n");
- return -ENODEV;
- }
-
- len = 4;
- if (req->engine->id == RCS) {
- len += 6;
- /*
- * On Gen 8, SRM is now taking an extra dword to accommodate
- * 48bits addresses, and we need a NOOP for the batch size to
- * stay even.
- */
- if (IS_GEN8(dev_priv))
- len += 2;
- }
-
- /*
- * BSpec MI_DISPLAY_FLIP for IVB:
- * "The full packet must be contained within the same cache line."
- *
- * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
- * cacheline, if we ever start emitting more commands before
- * the MI_DISPLAY_FLIP we may need to first emit everything else,
- * then do the cacheline alignment, and finally emit the
- * MI_DISPLAY_FLIP.
- */
- ret = intel_ring_cacheline_align(req);
- if (ret)
- return ret;
-
- cs = intel_ring_begin(req, len);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- /* Unmask the flip-done completion message. Note that the bspec says that
- * we should do this for both the BCS and RCS, and that we must not unmask
- * more than one flip event at any time (or ensure that one flip message
- * can be sent by waiting for flip-done prior to queueing new flips).
- * Experimentation says that BCS works despite DERRMR masking all
- * flip-done completion events and that unmasking all planes at once
- * for the RCS also doesn't appear to drop events. Setting the DERRMR
- * to zero does lead to lockups within MI_DISPLAY_FLIP.
- */
- if (req->engine->id == RCS) {
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(DERRMR);
- *cs++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
- DERRMR_PIPEB_PRI_FLIP_DONE |
- DERRMR_PIPEC_PRI_FLIP_DONE);
- if (IS_GEN8(dev_priv))
- *cs++ = MI_STORE_REGISTER_MEM_GEN8 |
- MI_SRM_LRM_GLOBAL_GTT;
- else
- *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- *cs++ = i915_mmio_reg_offset(DERRMR);
- *cs++ = i915_ggtt_offset(req->engine->scratch) + 256;
- if (IS_GEN8(dev_priv)) {
- *cs++ = 0;
- *cs++ = MI_NOOP;
- }
- }
-
- *cs++ = MI_DISPLAY_FLIP_I915 | plane_bit;
- *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
- *cs++ = intel_crtc->flip_work->gtt_offset;
- *cs++ = MI_NOOP;
-
- return 0;
-}
-
-static bool use_mmio_flip(struct intel_engine_cs *engine,
- struct drm_i915_gem_object *obj)
-{
- /*
- * This is not being used for older platforms, because
- * non-availability of flip done interrupt forces us to use
- * CS flips. Older platforms derive flip done using some clever
- * tricks involving the flip_pending status bits and vblank irqs.
- * So using MMIO flips there would disrupt this mechanism.
- */
-
- if (engine == NULL)
- return true;
-
- if (INTEL_GEN(engine->i915) < 5)
- return false;
-
- if (i915.use_mmio_flip < 0)
- return false;
- else if (i915.use_mmio_flip > 0)
- return true;
- else if (i915.enable_execlists)
- return true;
-
- return engine != i915_gem_object_last_write_engine(obj);
-}
-
-static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
- unsigned int rotation,
- struct intel_flip_work *work)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
- const enum pipe pipe = intel_crtc->pipe;
- u32 ctl, stride = skl_plane_stride(fb, 0, rotation);
-
- ctl = I915_READ(PLANE_CTL(pipe, 0));
- ctl &= ~PLANE_CTL_TILED_MASK;
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- break;
- case I915_FORMAT_MOD_X_TILED:
- ctl |= PLANE_CTL_TILED_X;
- break;
- case I915_FORMAT_MOD_Y_TILED:
- ctl |= PLANE_CTL_TILED_Y;
- break;
- case I915_FORMAT_MOD_Yf_TILED:
- ctl |= PLANE_CTL_TILED_YF;
- break;
- default:
- MISSING_CASE(fb->modifier);
- }
-
- /*
- * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
- * PLANE_SURF updates, the update is then guaranteed to be atomic.
- */
- I915_WRITE(PLANE_CTL(pipe, 0), ctl);
- I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
-
- I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
- POSTING_READ(PLANE_SURF(pipe, 0));
-}
-
-static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
- struct intel_flip_work *work)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
- i915_reg_t reg = DSPCNTR(intel_crtc->plane);
- u32 dspcntr;
-
- dspcntr = I915_READ(reg);
-
- if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- dspcntr |= DISPPLANE_TILED;
- else
- dspcntr &= ~DISPPLANE_TILED;
-
- I915_WRITE(reg, dspcntr);
-
- I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
- POSTING_READ(DSPSURF(intel_crtc->plane));
-}
-
-static void intel_mmio_flip_work_func(struct work_struct *w)
-{
- struct intel_flip_work *work =
- container_of(w, struct intel_flip_work, mmio_work);
- struct intel_crtc *crtc = to_intel_crtc(work->crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_framebuffer *intel_fb =
- to_intel_framebuffer(crtc->base.primary->fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
-
- WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
-
- intel_pipe_update_start(crtc);
-
- if (INTEL_GEN(dev_priv) >= 9)
- skl_do_mmio_flip(crtc, work->rotation, work);
- else
- /* use_mmio_flip() retricts MMIO flips to ilk+ */
- ilk_do_mmio_flip(crtc, work);
-
- intel_pipe_update_end(crtc, work);
-}
-
-static int intel_default_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- return -ENODEV;
-}
-
-static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc,
- struct intel_flip_work *work)
-{
- u32 addr, vblank;
-
- if (!atomic_read(&work->pending))
- return false;
-
- smp_rmb();
-
- vblank = intel_crtc_get_vblank_counter(intel_crtc);
- if (work->flip_ready_vblank == 0) {
- if (work->flip_queued_req &&
- !i915_gem_request_completed(work->flip_queued_req))
- return false;
-
- work->flip_ready_vblank = vblank;
- }
-
- if (vblank - work->flip_ready_vblank < 3)
- return false;
-
- /* Potential stall - if we see that the flip has happened,
- * assume a missed interrupt. */
- if (INTEL_GEN(dev_priv) >= 4)
- addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
- else
- addr = I915_READ(DSPADDR(intel_crtc->plane));
-
- /* There is a potential issue here with a false positive after a flip
- * to the same address. We could address this by checking for a
- * non-incrementing frame counter.
- */
- return addr == work->gtt_offset;
-}
-
-void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- struct intel_flip_work *work;
-
- WARN_ON(!in_interrupt());
-
- if (crtc == NULL)
- return;
-
- spin_lock(&dev->event_lock);
- work = crtc->flip_work;
-
- if (work != NULL && !is_mmio_work(work) &&
- __pageflip_stall_check_cs(dev_priv, crtc, work)) {
- WARN_ONCE(1,
- "Kicking stuck page flip: queued at %d, now %d\n",
- work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc));
- page_flip_completed(crtc);
- work = NULL;
- }
-
- if (work != NULL && !is_mmio_work(work) &&
- intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1)
- intel_queue_rps_boost_for_request(work->flip_queued_req);
- spin_unlock(&dev->event_lock);
-}
-
-__maybe_unused
-static int intel_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_plane *primary = crtc->primary;
- enum pipe pipe = intel_crtc->pipe;
- struct intel_flip_work *work;
- struct intel_engine_cs *engine;
- bool mmio_flip;
- struct drm_i915_gem_request *request;
- struct i915_vma *vma;
- int ret;
-
- /*
- * drm_mode_page_flip_ioctl() should already catch this, but double
- * check to be safe. In the future we may enable pageflipping from
- * a disabled primary plane.
- */
- if (WARN_ON(intel_fb_obj(old_fb) == NULL))
- return -EBUSY;
-
- /* Can't change pixel format via MI display flips. */
- if (fb->format != crtc->primary->fb->format)
- return -EINVAL;
-
- /*
- * TILEOFF/LINOFF registers can't be changed via MI display flips.
- * Note that pitch changes could also affect these register.
- */
- if (INTEL_GEN(dev_priv) > 3 &&
- (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
- fb->pitches[0] != crtc->primary->fb->pitches[0]))
- return -EINVAL;
-
- if (i915_terminally_wedged(&dev_priv->gpu_error))
- goto out_hang;
-
- work = kzalloc(sizeof(*work), GFP_KERNEL);
- if (work == NULL)
- return -ENOMEM;
-
- work->event = event;
- work->crtc = crtc;
- work->old_fb = old_fb;
- INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
-
- ret = drm_crtc_vblank_get(crtc);
- if (ret)
- goto free_work;
-
- /* We borrow the event spin lock for protecting flip_work */
- spin_lock_irq(&dev->event_lock);
- if (intel_crtc->flip_work) {
- /* Before declaring the flip queue wedged, check if
- * the hardware completed the operation behind our backs.
- */
- if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
- DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
- page_flip_completed(intel_crtc);
- } else {
- DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
- spin_unlock_irq(&dev->event_lock);
-
- drm_crtc_vblank_put(crtc);
- kfree(work);
- return -EBUSY;
- }
- }
- intel_crtc->flip_work = work;
- spin_unlock_irq(&dev->event_lock);
-
- if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
- flush_workqueue(dev_priv->wq);
-
- /* Reference the objects for the scheduled work. */
- drm_framebuffer_reference(work->old_fb);
-
- crtc->primary->fb = fb;
- update_state_fb(crtc->primary);
-
- work->pending_flip_obj = i915_gem_object_get(obj);
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto cleanup;
-
- intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
- if (i915_reset_backoff_or_wedged(&dev_priv->gpu_error)) {
- ret = -EIO;
- goto unlock;
- }
-
- atomic_inc(&intel_crtc->unpin_work_count);
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- engine = dev_priv->engine[BCS];
- if (fb->modifier != old_fb->modifier)
- /* vlv: DISPLAY_FLIP fails to change tiling */
- engine = NULL;
- } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
- engine = dev_priv->engine[BCS];
- } else if (INTEL_GEN(dev_priv) >= 7) {
- engine = i915_gem_object_last_write_engine(obj);
- if (engine == NULL || engine->id != RCS)
- engine = dev_priv->engine[BCS];
- } else {
- engine = dev_priv->engine[RCS];
- }
-
- mmio_flip = use_mmio_flip(engine, obj);
-
- vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto cleanup_pending;
- }
-
- work->old_vma = to_intel_plane_state(primary->state)->vma;
- to_intel_plane_state(primary->state)->vma = vma;
-
- work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
- work->rotation = crtc->primary->state->rotation;
-
- /*
- * There's the potential that the next frame will not be compatible with
- * FBC, so we want to call pre_update() before the actual page flip.
- * The problem is that pre_update() caches some information about the fb
- * object, so we want to do this only after the object is pinned. Let's
- * be on the safe side and do this immediately before scheduling the
- * flip.
- */
- intel_fbc_pre_update(intel_crtc, intel_crtc->config,
- to_intel_plane_state(primary->state));
-
- if (mmio_flip) {
- INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
- queue_work(system_unbound_wq, &work->mmio_work);
- } else {
- request = i915_gem_request_alloc(engine,
- dev_priv->kernel_context);
- if (IS_ERR(request)) {
- ret = PTR_ERR(request);
- goto cleanup_unpin;
- }
-
- ret = i915_gem_request_await_object(request, obj, false);
- if (ret)
- goto cleanup_request;
-
- ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
- page_flip_flags);
- if (ret)
- goto cleanup_request;
-
- intel_mark_page_flip_active(intel_crtc, work);
-
- work->flip_queued_req = i915_gem_request_get(request);
- i915_add_request(request);
- }
-
- i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
- i915_gem_track_fb(intel_fb_obj(old_fb), obj,
- to_intel_plane(primary)->frontbuffer_bit);
- mutex_unlock(&dev->struct_mutex);
-
- intel_frontbuffer_flip_prepare(to_i915(dev),
- to_intel_plane(primary)->frontbuffer_bit);
-
- trace_i915_flip_request(intel_crtc->plane, obj);
-
- return 0;
-
-cleanup_request:
- i915_add_request(request);
-cleanup_unpin:
- to_intel_plane_state(primary->state)->vma = work->old_vma;
- intel_unpin_fb_vma(vma);
-cleanup_pending:
- atomic_dec(&intel_crtc->unpin_work_count);
-unlock:
- mutex_unlock(&dev->struct_mutex);
-cleanup:
- crtc->primary->fb = old_fb;
- update_state_fb(crtc->primary);
-
- i915_gem_object_put(obj);
- drm_framebuffer_unreference(work->old_fb);
-
- spin_lock_irq(&dev->event_lock);
- intel_crtc->flip_work = NULL;
- spin_unlock_irq(&dev->event_lock);
-
- drm_crtc_vblank_put(crtc);
-free_work:
- kfree(work);
-
- if (ret == -EIO) {
- struct drm_atomic_state *state;
- struct drm_plane_state *plane_state;
-
-out_hang:
- state = drm_atomic_state_alloc(dev);
- if (!state)
- return -ENOMEM;
- state->acquire_ctx = dev->mode_config.acquire_ctx;
-
-retry:
- plane_state = drm_atomic_get_plane_state(state, primary);
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (!ret) {
- drm_atomic_set_fb_for_plane(plane_state, fb);
-
- ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
- if (!ret)
- ret = drm_atomic_commit(state);
- }
-
- if (ret == -EDEADLK) {
- drm_modeset_backoff(state->acquire_ctx);
- drm_atomic_state_clear(state);
- goto retry;
- }
-
- drm_atomic_state_put(state);
-
- if (ret == 0 && event) {
- spin_lock_irq(&dev->event_lock);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irq(&dev->event_lock);
- }
- }
- return ret;
-}
-
-
/**
* intel_wm_need_update - Check whether watermarks need updating
* @plane: drm plane
@@ -11359,6 +10680,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->fdi_lanes,
&pipe_config->fdi_m_n);
+ if (pipe_config->ycbcr420)
+ DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
+
if (intel_crtc_has_dp_encoder(pipe_config)) {
intel_dump_m_n_config(pipe_config, "dp m_n",
pipe_config->lane_count, &pipe_config->dp_m_n);
@@ -11930,6 +11254,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(hdmi_scrambling);
PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_I(has_infoframe);
+ PIPE_CONF_CHECK_I(ycbcr420);
PIPE_CONF_CHECK_I(has_audio);
@@ -12771,31 +12096,7 @@ static int intel_atomic_check(struct drm_device *dev,
static int intel_atomic_prepare_commit(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc_state *crtc_state;
- struct drm_crtc *crtc;
- int i, ret;
-
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- if (state->legacy_cursor_update)
- continue;
-
- ret = intel_crtc_wait_for_pending_flips(crtc);
- if (ret)
- return ret;
-
- if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
- flush_workqueue(dev_priv->wq);
- }
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ return drm_atomic_helper_prepare_planes(dev, state);
}
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
@@ -12803,7 +12104,7 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
if (!dev->max_vblank_count)
- return drm_accurate_vblank_count(&crtc->base);
+ return drm_crtc_accurate_vblank_count(&crtc->base);
return dev->driver->get_vblank_counter(dev, crtc->pipe);
}
@@ -13006,6 +12307,30 @@ static void intel_atomic_helper_free_state_worker(struct work_struct *work)
intel_atomic_helper_free_state(dev_priv);
}
+static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
+{
+ struct wait_queue_entry wait_fence, wait_reset;
+ struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
+
+ init_wait_entry(&wait_fence, 0);
+ init_wait_entry(&wait_reset, 0);
+ for (;;) {
+ prepare_to_wait(&intel_state->commit_ready.wait,
+ &wait_fence, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(&dev_priv->gpu_error.wait_queue,
+ &wait_reset, TASK_UNINTERRUPTIBLE);
+
+
+ if (i915_sw_fence_done(&intel_state->commit_ready)
+ || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
+ break;
+
+ schedule();
+ }
+ finish_wait(&intel_state->commit_ready.wait, &wait_fence);
+ finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
+}
+
static void intel_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -13019,6 +12344,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
unsigned crtc_vblank_mask = 0;
int i;
+ intel_atomic_commit_fence_wait(intel_state);
+
drm_atomic_helper_wait_for_dependencies(state);
if (intel_state->modeset)
@@ -13158,9 +12485,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
}
- mutex_lock(&dev->struct_mutex);
drm_atomic_helper_cleanup_planes(dev, state);
- mutex_unlock(&dev->struct_mutex);
drm_atomic_helper_commit_cleanup_done(state);
@@ -13186,10 +12511,8 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
switch (notify) {
case FENCE_COMPLETE:
- if (state->base.commit_work.func)
- queue_work(system_unbound_wq, &state->base.commit_work);
+ /* we do blocking waits in the worker, nothing to do here */
break;
-
case FENCE_FREE:
{
struct intel_atomic_helper *helper =
@@ -13271,7 +12594,13 @@ static int intel_atomic_commit(struct drm_device *dev,
if (INTEL_GEN(dev_priv) < 9)
state->legacy_cursor_update = false;
- drm_atomic_helper_swap_state(state, true);
+ ret = drm_atomic_helper_swap_state(state, true);
+ if (ret) {
+ i915_sw_fence_commit(&intel_state->commit_ready);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+ return ret;
+ }
dev_priv->wm.distrust_bios_wm = false;
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
@@ -13285,14 +12614,14 @@ static int intel_atomic_commit(struct drm_device *dev,
}
drm_atomic_state_get(state);
- INIT_WORK(&state->commit_work,
- nonblock ? intel_atomic_commit_work : NULL);
+ INIT_WORK(&state->commit_work, intel_atomic_commit_work);
i915_sw_fence_commit(&intel_state->commit_ready);
- if (!nonblock) {
- i915_sw_fence_wait(&intel_state->commit_ready);
+ if (nonblock)
+ queue_work(system_unbound_wq, &state->commit_work);
+ else
intel_atomic_commit_tail(state);
- }
+
return 0;
}
@@ -13300,7 +12629,6 @@ static int intel_atomic_commit(struct drm_device *dev,
static const struct drm_crtc_funcs intel_crtc_funcs = {
.gamma_set = drm_atomic_helper_legacy_gamma_set,
.set_config = drm_atomic_helper_set_config,
- .set_property = drm_atomic_helper_crtc_set_property,
.destroy = intel_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = intel_crtc_duplicate_state,
@@ -13334,32 +12662,6 @@ intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
int ret;
- if (obj) {
- if (plane->type == DRM_PLANE_TYPE_CURSOR &&
- INTEL_INFO(dev_priv)->cursor_needs_physical) {
- const int align = intel_cursor_alignment(dev_priv);
-
- ret = i915_gem_object_attach_phys(obj, align);
- if (ret) {
- DRM_DEBUG_KMS("failed to attach phys object\n");
- return ret;
- }
- } else {
- struct i915_vma *vma;
-
- vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
- if (IS_ERR(vma)) {
- DRM_DEBUG_KMS("failed to pin object\n");
- return PTR_ERR(vma);
- }
-
- to_intel_plane_state(new_state)->vma = vma;
- }
- }
-
- if (!obj && !old_obj)
- return 0;
-
if (old_obj) {
struct drm_crtc_state *crtc_state =
drm_atomic_get_existing_crtc_state(new_state->state,
@@ -13398,6 +12700,38 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (!obj)
return 0;
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ return ret;
+
+ ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
+ if (ret) {
+ i915_gem_object_unpin_pages(obj);
+ return ret;
+ }
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ INTEL_INFO(dev_priv)->cursor_needs_physical) {
+ const int align = intel_cursor_alignment(dev_priv);
+
+ ret = i915_gem_object_attach_phys(obj, align);
+ } else {
+ struct i915_vma *vma;
+
+ vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
+ if (!IS_ERR(vma))
+ to_intel_plane_state(new_state)->vma = vma;
+ else
+ ret = PTR_ERR(vma);
+ }
+
+ i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ i915_gem_object_unpin_pages(obj);
+ if (ret)
+ return ret;
+
if (!new_state->fence) { /* implicit fencing */
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
obj->resv, NULL,
@@ -13405,8 +12739,6 @@ intel_prepare_plane_fb(struct drm_plane *plane,
GFP_KERNEL);
if (ret < 0)
return ret;
-
- i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
}
return 0;
@@ -13429,8 +12761,11 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
/* Should only be called after a successful intel_prepare_plane_fb()! */
vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
- if (vma)
+ if (vma) {
+ mutex_lock(&plane->dev->struct_mutex);
intel_unpin_fb_vma(vma);
+ mutex_unlock(&plane->dev->struct_mutex);
+ }
}
int
@@ -13557,7 +12892,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_pipe_update_end(intel_crtc, NULL);
+ intel_pipe_update_end(intel_crtc);
}
/**
@@ -13573,15 +12908,110 @@ void intel_plane_destroy(struct drm_plane *plane)
kfree(to_intel_plane(plane));
}
-const struct drm_plane_funcs intel_plane_funcs = {
+static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XRGB8888:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED;
+ default:
+ return false;
+ }
+}
+
+static bool i965_mod_supported(uint32_t format, uint64_t modifier)
+{
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ return modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED;
+ default:
+ return false;
+ }
+}
+
+static bool skl_mod_supported(uint32_t format, uint64_t modifier)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Y_TILED_CCS)
+ return true;
+ /* fall through */
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ if (modifier == I915_FORMAT_MOD_Yf_TILED)
+ return true;
+ /* fall through */
+ case DRM_FORMAT_C8:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED)
+ return true;
+ /* fall through */
+ default:
+ return false;
+ }
+}
+
+static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
+
+ if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
+ return false;
+
+ if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
+ modifier != DRM_FORMAT_MOD_LINEAR)
+ return false;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ return skl_mod_supported(format, modifier);
+ else if (INTEL_GEN(dev_priv) >= 4)
+ return i965_mod_supported(format, modifier);
+ else
+ return i8xx_mod_supported(format, modifier);
+
+ unreachable();
+}
+
+static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
+ return false;
+
+ return modifier == DRM_FORMAT_MOD_LINEAR && format == DRM_FORMAT_ARGB8888;
+}
+
+static struct drm_plane_funcs intel_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .set_property = drm_atomic_helper_plane_set_property,
.atomic_get_property = intel_plane_atomic_get_property,
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = intel_primary_plane_format_mod_supported,
};
static int
@@ -13600,7 +13030,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *old_fb;
struct drm_crtc_state *crtc_state = crtc->state;
- struct i915_vma *old_vma;
+ struct i915_vma *old_vma, *vma;
/*
* When crtc is inactive or there is a modeset pending,
@@ -13658,8 +13088,6 @@ intel_legacy_cursor_update(struct drm_plane *plane,
goto out_unlock;
}
} else {
- struct i915_vma *vma;
-
vma = intel_pin_and_fence_fb_obj(fb, new_plane_state->rotation);
if (IS_ERR(vma)) {
DRM_DEBUG_KMS("failed to pin object\n");
@@ -13682,7 +13110,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
*to_intel_plane_state(old_plane_state) = *to_intel_plane_state(new_plane_state);
new_plane_state->fence = NULL;
new_plane_state->fb = old_fb;
- to_intel_plane_state(new_plane_state)->vma = old_vma;
+ to_intel_plane_state(new_plane_state)->vma = NULL;
if (plane->state->visible) {
trace_intel_update_plane(plane, to_intel_crtc(crtc));
@@ -13694,7 +13122,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
}
- intel_cleanup_plane_fb(plane, new_plane_state);
+ if (old_vma)
+ intel_unpin_fb_vma(old_vma);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -13712,11 +13141,11 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.update_plane = intel_legacy_cursor_update,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
- .set_property = drm_atomic_helper_plane_set_property,
.atomic_get_property = intel_plane_atomic_get_property,
.atomic_set_property = intel_plane_atomic_set_property,
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = intel_cursor_plane_format_mod_supported,
};
static struct intel_plane *
@@ -13727,6 +13156,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
const uint32_t *intel_primary_formats;
unsigned int supported_rotations;
unsigned int num_formats;
+ const uint64_t *modifiers;
int ret;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
@@ -13762,21 +13192,34 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ intel_primary_formats = skl_primary_formats;
+ num_formats = ARRAY_SIZE(skl_primary_formats);
+ modifiers = skl_format_modifiers_ccs;
+
+ primary->update_plane = skylake_update_primary_plane;
+ primary->disable_plane = skylake_disable_primary_plane;
+ } else if (INTEL_GEN(dev_priv) >= 9) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
+ if (pipe < PIPE_C)
+ modifiers = skl_format_modifiers_ccs;
+ else
+ modifiers = skl_format_modifiers_noccs;
primary->update_plane = skylake_update_primary_plane;
primary->disable_plane = skylake_disable_primary_plane;
} else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
+ modifiers = i9xx_format_modifiers;
primary->update_plane = i9xx_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
} else {
intel_primary_formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
+ modifiers = i9xx_format_modifiers;
primary->update_plane = i9xx_update_primary_plane;
primary->disable_plane = i9xx_disable_primary_plane;
@@ -13786,18 +13229,21 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
0, &intel_plane_funcs,
intel_primary_formats, num_formats,
+ modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane 1%c", pipe_name(pipe));
else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
0, &intel_plane_funcs,
intel_primary_formats, num_formats,
+ modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
0, &intel_plane_funcs,
intel_primary_formats, num_formats,
+ modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane %c", plane_name(primary->plane));
if (ret)
@@ -13883,6 +13329,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
0, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
+ cursor_format_modifiers,
DRM_PLANE_TYPE_CURSOR,
"cursor %c", pipe_name(pipe));
if (ret)
@@ -14405,10 +13852,12 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_framebuffer *fb = &intel_fb->base;
struct drm_format_name_buf format_name;
- u32 pitch_limit, stride_alignment;
+ u32 pitch_limit;
unsigned int tiling, stride;
int ret = -EINVAL;
+ int i;
i915_gem_object_lock(obj);
obj->framebuffer_references++;
@@ -14437,6 +13886,19 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
/* Passed in modifier sanity checking. */
switch (mode_cmd->modifier[0]) {
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ break;
+ default:
+ DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
+ goto err;
+ }
+ /* fall through */
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
if (INTEL_GEN(dev_priv) < 9) {
@@ -14541,25 +14003,46 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (mode_cmd->offsets[0] != 0)
goto err;
- drm_helper_mode_fill_fb_struct(&dev_priv->drm,
- &intel_fb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
- stride_alignment = intel_fb_stride_alignment(&intel_fb->base, 0);
- if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
- DRM_DEBUG_KMS("pitch (%d) must be at least %u byte aligned\n",
- mode_cmd->pitches[0], stride_alignment);
- goto err;
+ for (i = 0; i < fb->format->num_planes; i++) {
+ u32 stride_alignment;
+
+ if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
+ DRM_DEBUG_KMS("bad plane %d handle\n", i);
+ return -EINVAL;
+ }
+
+ stride_alignment = intel_fb_stride_alignment(fb, i);
+
+ /*
+ * Display WA #0531: skl,bxt,kbl,glk
+ *
+ * Render decompression and plane width > 3840
+ * combined with horizontal panning requires the
+ * plane stride to be a multiple of 4. We'll just
+ * require the entire fb to accommodate that to avoid
+ * potential runtime errors at plane configuration time.
+ */
+ if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
+ (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS))
+ stride_alignment *= 4;
+
+ if (fb->pitches[i] & (stride_alignment - 1)) {
+ DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
+ i, fb->pitches[i], stride_alignment);
+ goto err;
+ }
}
intel_fb->obj = obj;
- ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
+ ret = intel_fill_fb_info(dev_priv, fb);
if (ret)
goto err;
- ret = drm_framebuffer_init(obj->base.dev,
- &intel_fb->base,
- &intel_fb_funcs);
+ ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
goto err;
@@ -14607,6 +14090,7 @@ static void intel_atomic_state_free(struct drm_atomic_state *state)
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
+ .get_format_info = intel_get_format_info,
.output_poll_changed = intel_fbdev_output_poll_changed,
.atomic_check = intel_atomic_check,
.atomic_commit = intel_atomic_commit,
@@ -14706,34 +14190,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.update_crtcs = skl_update_crtcs;
else
dev_priv->display.update_crtcs = intel_update_crtcs;
-
- switch (INTEL_INFO(dev_priv)->gen) {
- case 2:
- dev_priv->display.queue_flip = intel_gen2_queue_flip;
- break;
-
- case 3:
- dev_priv->display.queue_flip = intel_gen3_queue_flip;
- break;
-
- case 4:
- case 5:
- dev_priv->display.queue_flip = intel_gen4_queue_flip;
- break;
-
- case 6:
- dev_priv->display.queue_flip = intel_gen6_queue_flip;
- break;
- case 7:
- case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
- dev_priv->display.queue_flip = intel_gen7_queue_flip;
- break;
- case 9:
- /* Drop through - unsupported since execlist only. */
- default:
- /* Default just returns -ENODEV to indicate unsupported */
- dev_priv->display.queue_flip = intel_default_queue_flip;
- }
}
/*
@@ -14765,6 +14221,17 @@ static void quirk_backlight_present(struct drm_device *dev)
DRM_INFO("applying backlight present quirk\n");
}
+/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
+ * which is 300 ms greater than eDP spec T12 min.
+ */
+static void quirk_increase_t12_delay(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
+ DRM_INFO("Applying T12 delay quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -14848,6 +14315,9 @@ static struct intel_quirk intel_quirks[] = {
/* Dell Chromebook 11 (2015 version) */
{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
+
+ /* Toshiba Satellite P50-C-18C */
+ { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -15650,7 +15120,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_wm_get_hw_state(dev);
vlv_wm_sanitize(dev_priv);
- } else if (IS_GEN9(dev_priv)) {
+ } else if (INTEL_GEN(dev_priv) >= 9) {
skl_wm_get_hw_state(dev);
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_wm_get_hw_state(dev);
@@ -15757,6 +15227,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
*/
drm_kms_helper_poll_fini(dev);
+ /* poll work can call into fbdev, hence clean that up afterwards */
+ intel_fbdev_fini(dev_priv);
+
intel_unregister_dsm_handler();
intel_fbc_global_disable(dev_priv);
@@ -15876,7 +15349,8 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
return NULL;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
+ error->power_well_driver =
+ I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL));
for_each_pipe(dev_priv, i) {
error->pipe[i].power_domain_on =
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 64fa774c855b..4fd4853b2250 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -97,6 +97,9 @@ static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
324000, 432000, 540000 };
static const int skl_rates[] = { 162000, 216000, 270000,
324000, 432000, 540000 };
+static const int cnl_rates[] = { 162000, 216000, 270000,
+ 324000, 432000, 540000,
+ 648000, 810000 };
static const int default_rates[] = { 162000, 270000, 540000 };
/**
@@ -229,8 +232,10 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->port;
const int *source_rates;
int size;
+ u32 voltage;
/* This should only be done once */
WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
@@ -238,6 +243,13 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
if (IS_GEN9_LP(dev_priv)) {
source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ source_rates = cnl_rates;
+ size = ARRAY_SIZE(cnl_rates);
+ voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+ if (port == PORT_A || port == PORT_D ||
+ voltage == VOLTAGE_INFO_0_85V)
+ size -= 2;
} else if (IS_GEN9_BC(dev_priv)) {
source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
@@ -322,19 +334,20 @@ static int intel_dp_common_len_rate_limit(struct intel_dp *intel_dp,
return 0;
}
-static bool intel_dp_link_params_valid(struct intel_dp *intel_dp)
+static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
+ uint8_t lane_count)
{
/*
* FIXME: we need to synchronize the current link parameters with
* hardware readout. Currently fast link training doesn't work on
* boot-up.
*/
- if (intel_dp->link_rate == 0 ||
- intel_dp->link_rate > intel_dp->max_link_rate)
+ if (link_rate == 0 ||
+ link_rate > intel_dp->max_link_rate)
return false;
- if (intel_dp->lane_count == 0 ||
- intel_dp->lane_count > intel_dp_max_lane_count(intel_dp))
+ if (lane_count == 0 ||
+ lane_count > intel_dp_max_lane_count(intel_dp))
return false;
return true;
@@ -1606,6 +1619,23 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
return bpp;
}
+static bool intel_edp_compare_alt_mode(struct drm_display_mode *m1,
+ struct drm_display_mode *m2)
+{
+ bool bres = false;
+
+ if (m1 && m2)
+ bres = (m1->hdisplay == m2->hdisplay &&
+ m1->hsync_start == m2->hsync_start &&
+ m1->hsync_end == m2->hsync_end &&
+ m1->htotal == m2->htotal &&
+ m1->vdisplay == m2->vdisplay &&
+ m1->vsync_start == m2->vsync_start &&
+ m1->vsync_end == m2->vsync_end &&
+ m1->vtotal == m2->vtotal);
+ return bres;
+}
+
bool
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -1652,8 +1682,16 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
- intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
- adjusted_mode);
+ struct drm_display_mode *panel_mode =
+ intel_connector->panel.alt_fixed_mode;
+ struct drm_display_mode *req_mode = &pipe_config->base.mode;
+
+ if (!intel_edp_compare_alt_mode(req_mode, panel_mode))
+ panel_mode = intel_connector->panel.fixed_mode;
+
+ drm_mode_debug_printmodeline(panel_mode);
+
+ intel_fixed_panel_mode(panel_mode, adjusted_mode);
if (INTEL_GEN(dev_priv) >= 9) {
int ret;
@@ -1677,12 +1715,18 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
int index;
- index = intel_dp_rate_index(intel_dp->common_rates,
- intel_dp->num_common_rates,
- intel_dp->compliance.test_link_rate);
- if (index >= 0)
- min_clock = max_clock = index;
- min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
+ /* Validate the compliance test data since max values
+ * might have changed due to link train fallback.
+ */
+ if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
+ intel_dp->compliance.test_lane_count)) {
+ index = intel_dp_rate_index(intel_dp->common_rates,
+ intel_dp->num_common_rates,
+ intel_dp->compliance.test_link_rate);
+ if (index >= 0)
+ min_clock = max_clock = index;
+ min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
+ }
}
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %d pixel clock %iKHz\n",
@@ -3963,8 +4007,7 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
int status = 0;
- int min_lane_count = 1;
- int link_rate_index, test_link_rate;
+ int test_link_rate;
uint8_t test_lane_count, test_link_bw;
/* (DP CTS 1.2)
* 4.3.1.11
@@ -3978,10 +4021,6 @@ static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
return DP_TEST_NAK;
}
test_lane_count &= DP_MAX_LANE_COUNT_MASK;
- /* Validate the requested lane count */
- if (test_lane_count < min_lane_count ||
- test_lane_count > intel_dp->max_link_lane_count)
- return DP_TEST_NAK;
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
&test_link_bw);
@@ -3989,12 +4028,11 @@ static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Link Rate read failed\n");
return DP_TEST_NAK;
}
- /* Validate the requested link rate */
test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
- link_rate_index = intel_dp_rate_index(intel_dp->common_rates,
- intel_dp->num_common_rates,
- test_link_rate);
- if (link_rate_index < 0)
+
+ /* Validate the requested link rate and lane count */
+ if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
+ test_lane_count))
return DP_TEST_NAK;
intel_dp->compliance.test_lane_count = test_lane_count;
@@ -4263,7 +4301,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
* Validate the cached values of intel_dp->link_rate and
* intel_dp->lane_count before attempting to retrain.
*/
- if (!intel_dp_link_params_valid(intel_dp))
+ if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
+ intel_dp->lane_count))
return;
/* Retrain if Channel EQ or CR not ok */
@@ -4418,8 +4457,6 @@ static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
u32 bit;
switch (port->port) {
- case PORT_A:
- return true;
case PORT_B:
bit = SDE_PORTB_HOTPLUG;
break;
@@ -4443,8 +4480,6 @@ static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
u32 bit;
switch (port->port) {
- case PORT_A:
- return true;
case PORT_B:
bit = SDE_PORTB_HOTPLUG_CPT;
break;
@@ -4454,12 +4489,28 @@ static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
case PORT_D:
bit = SDE_PORTD_HOTPLUG_CPT;
break;
+ default:
+ MISSING_CASE(port->port);
+ return false;
+ }
+
+ return I915_READ(SDEISR) & bit;
+}
+
+static bool spt_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ u32 bit;
+
+ switch (port->port) {
+ case PORT_A:
+ bit = SDE_PORTA_HOTPLUG_SPT;
+ break;
case PORT_E:
bit = SDE_PORTE_HOTPLUG_SPT;
break;
default:
- MISSING_CASE(port->port);
- return false;
+ return cpt_digital_port_connected(dev_priv, port);
}
return I915_READ(SDEISR) & bit;
@@ -4511,6 +4562,42 @@ static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
return I915_READ(PORT_HOTPLUG_STAT) & bit;
}
+static bool ilk_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ if (port->port == PORT_A)
+ return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
+ else
+ return ibx_digital_port_connected(dev_priv, port);
+}
+
+static bool snb_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ if (port->port == PORT_A)
+ return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
+ else
+ return cpt_digital_port_connected(dev_priv, port);
+}
+
+static bool ivb_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ if (port->port == PORT_A)
+ return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
+ else
+ return cpt_digital_port_connected(dev_priv, port);
+}
+
+static bool bdw_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
+{
+ if (port->port == PORT_A)
+ return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
+ else
+ return cpt_digital_port_connected(dev_priv, port);
+}
+
static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *intel_dig_port)
{
@@ -4518,7 +4605,7 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
enum port port;
u32 bit;
- intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
+ port = intel_hpd_pin_to_port(intel_encoder->hpd_pin);
switch (port) {
case PORT_A:
bit = BXT_DE_PORT_HP_DDIA;
@@ -4547,16 +4634,25 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
- if (HAS_PCH_IBX(dev_priv))
- return ibx_digital_port_connected(dev_priv, port);
- else if (HAS_PCH_SPLIT(dev_priv))
- return cpt_digital_port_connected(dev_priv, port);
+ if (HAS_GMCH_DISPLAY(dev_priv)) {
+ if (IS_GM45(dev_priv))
+ return gm45_digital_port_connected(dev_priv, port);
+ else
+ return g4x_digital_port_connected(dev_priv, port);
+ }
+
+ if (IS_GEN5(dev_priv))
+ return ilk_digital_port_connected(dev_priv, port);
+ else if (IS_GEN6(dev_priv))
+ return snb_digital_port_connected(dev_priv, port);
+ else if (IS_GEN7(dev_priv))
+ return ivb_digital_port_connected(dev_priv, port);
+ else if (IS_GEN8(dev_priv))
+ return bdw_digital_port_connected(dev_priv, port);
else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(dev_priv, port);
- else if (IS_GM45(dev_priv))
- return gm45_digital_port_connected(dev_priv, port);
else
- return g4x_digital_port_connected(dev_priv, port);
+ return spt_digital_port_connected(dev_priv, port);
}
static struct edid *
@@ -4950,10 +5046,8 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.force = intel_dp_force,
.fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = drm_atomic_helper_connector_set_property,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_dp_connector_register,
@@ -5121,12 +5215,8 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
PANEL_POWER_DOWN_DELAY_SHIFT;
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) {
- u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
- BXT_POWER_CYCLE_DELAY_SHIFT;
- if (tmp > 0)
- seq->t11_t12 = (tmp - 1) * 1000;
- else
- seq->t11_t12 = 0;
+ seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
+ BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
} else {
seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
@@ -5177,6 +5267,21 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
intel_pps_dump_state("cur", &cur);
vbt = dev_priv->vbt.edp.pps;
+ /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
+ * of 500ms appears to be too short. Ocassionally the panel
+ * just fails to power back on. Increasing the delay to 800ms
+ * seems sufficient to avoid this problem.
+ */
+ if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
+ vbt.t11_t12 = max_t(u16, vbt.t11_t12, 800 * 10);
+ DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
+ vbt.t11_t12);
+ }
+ /* T11_T12 delay is special and actually in units of 100ms, but zero
+ * based in the hw (so we need to add 100 ms). But the sw vbt
+ * table multiplies it with 1000 to make it in units of 100usec,
+ * too. */
+ vbt.t11_t12 += 100 * 10;
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */
@@ -5280,7 +5385,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
- pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
+ pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
<< BXT_POWER_CYCLE_DELAY_SHIFT);
} else {
pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
@@ -5714,6 +5819,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *fixed_mode = NULL;
+ struct drm_display_mode *alt_fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
struct drm_display_mode *scan;
@@ -5769,13 +5875,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_connector->edid = edid;
- /* prefer fixed mode from EDID if available */
+ /* prefer fixed mode from EDID if available, save an alt mode also */
list_for_each_entry(scan, &connector->probed_modes, head) {
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
downclock_mode = intel_dp_drrs_init(
intel_connector, fixed_mode);
- break;
+ } else if (!alt_fixed_mode) {
+ alt_fixed_mode = drm_mode_duplicate(dev, scan);
}
}
@@ -5812,7 +5919,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
pipe_name(pipe));
}
- intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+ intel_panel_init(&intel_connector->panel, fixed_mode, alt_fixed_mode,
+ downclock_mode);
intel_connector->panel.backlight.power = intel_edp_backlight_power;
intel_panel_setup_backlight(connector, pipe);
@@ -5838,26 +5946,22 @@ intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
struct intel_encoder *encoder = &intel_dig_port->base;
struct intel_dp *intel_dp = &intel_dig_port->dp;
+ encoder->hpd_pin = intel_hpd_pin(intel_dig_port->port);
+
switch (intel_dig_port->port) {
case PORT_A:
- encoder->hpd_pin = HPD_PORT_A;
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_A;
break;
case PORT_B:
- encoder->hpd_pin = HPD_PORT_B;
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_B;
break;
case PORT_C:
- encoder->hpd_pin = HPD_PORT_C;
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_C;
break;
case PORT_D:
- encoder->hpd_pin = HPD_PORT_D;
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
break;
case PORT_E:
- encoder->hpd_pin = HPD_PORT_E;
-
/* FIXME: Check VBT for actual wiring of PORT E */
intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
break;
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
index 228ca06d9f0b..d2830ba3162e 100644
--- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -98,13 +98,87 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
}
}
+/*
+ * Set PWM Frequency divider to match desired frequency in vbt.
+ * The PWM Frequency is calculated as 27Mhz / (F x P).
+ * - Where F = PWM Frequency Pre-Divider value programmed by field 7:0 of the
+ * EDP_BACKLIGHT_FREQ_SET register (DPCD Address 00728h)
+ * - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the
+ * EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h)
+ */
+static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+ int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
+ u8 pn, pn_min, pn_max;
+
+ /* Find desired value of (F x P)
+ * Note that, if F x P is out of supported range, the maximum value or
+ * minimum value will applied automatically. So no need to check that.
+ */
+ freq = dev_priv->vbt.backlight.pwm_freq_hz;
+ DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
+ if (!freq) {
+ DRM_DEBUG_KMS("Use panel default backlight frequency\n");
+ return false;
+ }
+
+ fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq);
+
+ /* Use highest possible value of Pn for more granularity of brightness
+ * adjustment while satifying the conditions below.
+ * - Pn is in the range of Pn_min and Pn_max
+ * - F is in the range of 1 and 255
+ * - FxP is within 25% of desired value.
+ * Note: 25% is arbitrary value and may need some tweak.
+ */
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
+ DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
+ return false;
+ }
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
+ DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
+ return false;
+ }
+ pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+ pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+
+ fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
+ fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
+ if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
+ DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
+ return false;
+ }
+
+ for (pn = pn_max; pn >= pn_min; pn--) {
+ f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
+ fxp_actual = f << pn;
+ if (fxp_min <= fxp_actual && fxp_actual <= fxp_max)
+ break;
+ }
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
+ DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+ return false;
+ }
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) {
+ DRM_DEBUG_KMS("Failed to write aux backlight freq\n");
+ return false;
+ }
+ return true;
+}
+
static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
- uint8_t dpcd_buf = 0;
- uint8_t edp_backlight_mode = 0;
+ uint8_t dpcd_buf, new_dpcd_buf, edp_backlight_mode;
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
@@ -113,18 +187,15 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
return;
}
+ new_dpcd_buf = dpcd_buf;
edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
switch (edp_backlight_mode) {
case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM:
case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET:
case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT:
- dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
- dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
- if (drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf) < 0) {
- DRM_DEBUG_KMS("Failed to write aux backlight mode\n");
- }
+ new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
+ new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
break;
/* Do nothing when it is already DPCD mode */
@@ -133,6 +204,17 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
break;
}
+ if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP)
+ if (intel_dp_aux_set_pwm_freq(connector))
+ new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE;
+
+ if (new_dpcd_buf != dpcd_buf) {
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) {
+ DRM_DEBUG_KMS("Failed to write aux backlight mode\n");
+ }
+ }
+
set_aux_backlight_enable(intel_dp, true);
intel_dp_aux_set_backlight(conn_state, connector->panel.backlight.level);
}
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index b79c1c0e404c..05907fa8a553 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -321,12 +321,16 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if (!intel_dp_link_training_channel_equalization(intel_dp))
goto failure_handling;
- DRM_DEBUG_KMS("Link Training Passed at Link Rate = %d, Lane count = %d",
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Passed at Link Rate = %d, Lane count = %d",
+ intel_connector->base.base.id,
+ intel_connector->base.name,
intel_dp->link_rate, intel_dp->lane_count);
return;
failure_handling:
- DRM_DEBUG_KMS("Link Training failed at link rate = %d, lane count = %d",
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
+ intel_connector->base.base.id,
+ intel_connector->base.name,
intel_dp->link_rate, intel_dp->lane_count);
if (!intel_dp_get_link_train_fallback_values(intel_dp,
intel_dp->link_rate,
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 2cf046beae0f..93fc8ab9bb31 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -346,10 +346,8 @@ intel_dp_mst_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = drm_atomic_helper_connector_set_property,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_dp_mst_connector_destroy,
@@ -372,6 +370,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
int bpp = 24; /* MST uses fixed bpp */
int max_rate, mode_rate, max_lanes, max_link_clock;
+ if (!intel_dp)
+ return MODE_ERROR;
+
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
@@ -443,28 +444,6 @@ static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
return false;
}
-static void intel_connector_add_to_fbdev(struct intel_connector *connector)
-{
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- if (dev_priv->fbdev)
- drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
- &connector->base);
-#endif
-}
-
-static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
-{
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- if (dev_priv->fbdev)
- drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
- &connector->base);
-#endif
-}
-
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
@@ -500,31 +479,32 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
static void intel_dp_register_mst_connector(struct drm_connector *connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
- drm_modeset_lock_all(dev);
- intel_connector_add_to_fbdev(intel_connector);
- drm_modeset_unlock_all(dev);
+ if (dev_priv->fbdev)
+ drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
+ connector);
- drm_connector_register(&intel_connector->base);
+ drm_connector_register(connector);
}
static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
drm_connector_unregister(connector);
- /* need to nuke the connector */
- drm_modeset_lock_all(dev);
- intel_connector_remove_from_fbdev(intel_connector);
+ if (dev_priv->fbdev)
+ drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
+ connector);
+ /* prevent race with the check in ->detect */
+ drm_modeset_lock(&connector->dev->mode_config.connection_mutex, NULL);
intel_connector->mst_port = NULL;
- drm_modeset_unlock_all(dev);
+ drm_modeset_unlock(&connector->dev->mode_config.connection_mutex);
- drm_connector_unreference(&intel_connector->base);
+ drm_connector_unreference(connector);
DRM_DEBUG_KMS("\n");
}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 2f7b0e64f628..a2a3d93d67bd 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -2379,6 +2379,15 @@ cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return pll;
}
+static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state)
+{
+ DRM_DEBUG_KMS("dpll_hw_state: "
+ "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
+ hw_state->cfgcr0,
+ hw_state->cfgcr1);
+}
+
static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
.enable = cnl_ddi_pll_enable,
.disable = cnl_ddi_pll_disable,
@@ -2395,7 +2404,7 @@ static const struct dpll_info cnl_plls[] = {
static const struct intel_dpll_mgr cnl_pll_mgr = {
.dpll_info = cnl_plls,
.get_dpll = cnl_get_dpll,
- .dump_hw_state = skl_dump_hw_state,
+ .dump_hw_state = cnl_dump_hw_state,
};
/**
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d93efb49a2e2..fa47285918f4 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -265,6 +265,7 @@ struct intel_encoder {
struct intel_panel {
struct drm_display_mode *fixed_mode;
+ struct drm_display_mode *alt_fixed_mode;
struct drm_display_mode *downclock_mode;
/* backlight */
@@ -780,13 +781,15 @@ struct intel_crtc_state {
/* HDMI High TMDS char rate ratio */
bool hdmi_high_tmds_clock_ratio;
+
+ /* output format is YCBCR 4:2:0 */
+ bool ycbcr420;
};
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
enum plane plane;
- u8 lut_r[256], lut_g[256], lut_b[256];
/*
* Whether the crtc and the connected output pipeline is active. Implies
* that crtc->enabled is set, i.e. the current mode configuration has
@@ -797,9 +800,6 @@ struct intel_crtc {
u8 plane_ids_mask;
unsigned long long enabled_power_domains;
struct intel_overlay *overlay;
- struct intel_flip_work *flip_work;
-
- atomic_t unpin_work_count;
/* Display surface base address adjustement for pageflips. Note that on
* gen4+ this only adjusts up to a tile, offsets within a tile are
@@ -1132,24 +1132,6 @@ intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum plane plane)
return dev_priv->plane_to_crtc_mapping[plane];
}
-struct intel_flip_work {
- struct work_struct unpin_work;
- struct work_struct mmio_work;
-
- struct drm_crtc *crtc;
- struct i915_vma *old_vma;
- struct drm_framebuffer *old_fb;
- struct drm_i915_gem_object *pending_flip_obj;
- struct drm_pending_vblank_event *event;
- atomic_t pending;
- u32 flip_count;
- u32 gtt_offset;
- struct drm_i915_gem_request *flip_queued_req;
- u32 flip_queued_vblank;
- u32 flip_ready_vblank;
- unsigned int rotation;
-};
-
struct intel_load_detect_pipe {
struct drm_atomic_state *restore_state;
};
@@ -1211,12 +1193,12 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
- enum transcoder pch_transcoder,
+ enum pipe pch_transcoder,
bool enable);
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe);
void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
- enum transcoder pch_transcoder);
+ enum pipe pch_transcoder);
void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
@@ -1251,9 +1233,9 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
- unsigned int pipe_mask);
+ u8 pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
- unsigned int pipe_mask);
+ u8 pipe_mask);
void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
@@ -1326,7 +1308,7 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
/* intel_display.c */
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
-enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc);
+enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
@@ -1335,7 +1317,6 @@ int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
const char *name, u32 reg);
void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
-extern const struct drm_plane_funcs intel_plane_funcs;
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_fb_xy_to_linear(int x, int y,
const struct intel_plane_state *state,
@@ -1408,9 +1389,6 @@ void intel_unpin_fb_vma(struct i915_vma *vma);
struct drm_framebuffer *
intel_framebuffer_create(struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
-void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
-void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
-void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
int intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_plane_state *new_state);
void intel_cleanup_plane_fb(struct drm_plane *plane,
@@ -1597,7 +1575,8 @@ void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
#ifdef CONFIG_DRM_FBDEV_EMULATION
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config_async(struct drm_device *dev);
-extern void intel_fbdev_fini(struct drm_device *dev);
+extern void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
+extern void intel_fbdev_fini(struct drm_i915_private *dev_priv);
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
extern void intel_fbdev_restore_mode(struct drm_device *dev);
@@ -1611,7 +1590,11 @@ static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
{
}
-static inline void intel_fbdev_fini(struct drm_device *dev)
+static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
{
}
@@ -1696,6 +1679,7 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv);
/* intel_panel.c */
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *alt_fixed_mode,
struct drm_display_mode *downclock_mode);
void intel_panel_fini(struct intel_panel *panel);
void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
@@ -1858,9 +1842,8 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct drm_i915_private *dev_priv,
- struct intel_rps_client *rps,
- unsigned long submitted);
+void gen6_rps_boost(struct drm_i915_gem_request *rq,
+ struct intel_rps_client *rps);
void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
@@ -1902,7 +1885,7 @@ struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(struct intel_crtc *crtc);
-void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work);
+void intel_pipe_update_end(struct intel_crtc *crtc);
/* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 50ec836da8b1..f0c11aec5ea5 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -1653,12 +1653,10 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
};
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_dsi_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = drm_atomic_helper_connector_set_property,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -1851,7 +1849,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
connector->display_info.width_mm = fixed_mode->width_mm;
connector->display_info.height_mm = fixed_mode->height_mm;
- intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+ intel_panel_init(&intel_connector->panel, fixed_mode, NULL, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);
intel_dsi_add_properties(intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
index 6e09ceb71500..150a156f3b1e 100644
--- a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
@@ -46,7 +46,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector)
struct intel_encoder *encoder = connector->encoder;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct mipi_dsi_device *dsi_device;
- u8 data;
+ u8 data = 0;
enum port port;
/* FIXME: Need to take care of 16 bit brightness level */
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 7158c7ce9c09..91c07b0c8db9 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -306,7 +306,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
if (!gpio_desc) {
gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
- "panel", gpio_index,
+ NULL, gpio_index,
value ? GPIOD_OUT_LOW :
GPIOD_OUT_HIGH);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index c1544a53095d..c0a027274c06 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -344,13 +344,11 @@ static void intel_dvo_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dvo_detect,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = drm_atomic_helper_connector_set_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
@@ -554,7 +552,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
*/
intel_panel_init(&intel_connector->panel,
intel_dvo_get_current_mode(connector),
- NULL);
+ NULL, NULL);
intel_dvo->panel_wants_dither = true;
}
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 5b4de719bec3..9ab596941372 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -149,6 +149,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
switch (INTEL_GEN(dev_priv)) {
default:
MISSING_CASE(INTEL_GEN(dev_priv));
+ case 10:
case 9:
return GEN9_LR_CONTEXT_RENDER_SIZE;
case 8:
@@ -291,11 +292,9 @@ cleanup:
*/
int intel_engines_init(struct drm_i915_private *dev_priv)
{
- struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
struct intel_engine_cs *engine;
enum intel_engine_id id, err_id;
- unsigned int mask = 0;
- int err = 0;
+ int err;
for_each_engine(engine, dev_priv, id) {
const struct engine_class_info *class_info =
@@ -306,40 +305,30 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
init = class_info->init_execlists;
else
init = class_info->init_legacy;
- if (!init) {
- kfree(engine);
- dev_priv->engine[id] = NULL;
- continue;
- }
+
+ err = -EINVAL;
+ err_id = id;
+
+ if (GEM_WARN_ON(!init))
+ goto cleanup;
err = init(engine);
- if (err) {
- err_id = id;
+ if (err)
goto cleanup;
- }
GEM_BUG_ON(!engine->submit_request);
- mask |= ENGINE_MASK(id);
}
- /*
- * Catch failures to update intel_engines table when the new engines
- * are added to the driver by a warning and disabling the forgotten
- * engines.
- */
- if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
- device_info->ring_mask = mask;
-
- device_info->num_rings = hweight32(mask);
-
return 0;
cleanup:
for_each_engine(engine, dev_priv, id) {
- if (id >= err_id)
+ if (id >= err_id) {
kfree(engine);
- else
+ dev_priv->engine[id] = NULL;
+ } else {
dev_priv->gt.cleanup_engine(engine);
+ }
}
return err;
}
@@ -348,9 +337,6 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
{
struct drm_i915_private *dev_priv = engine->i915;
- GEM_BUG_ON(!intel_engine_is_idle(engine));
- GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
-
/* Our semaphore implementation is strictly monotonic (i.e. we proceed
* so long as the semaphore value in the register/page is greater
* than the sync value), so whenever we reset the seqno,
@@ -1294,6 +1280,10 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
if (port_request(&engine->execlist_port[0]))
return false;
+ /* ELSP is empty, but there are ready requests? */
+ if (READ_ONCE(engine->execlist_first))
+ return false;
+
/* Ring stopped? */
if (!ring_is_idle(engine))
return false;
@@ -1340,6 +1330,7 @@ void intel_engines_mark_idle(struct drm_i915_private *i915)
for_each_engine(engine, i915, id) {
intel_engine_disarm_breadcrumbs(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
+ tasklet_kill(&engine->irq_tasklet);
engine->no_priolist = false;
}
}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 860b8c26d29b..3fca9fa39a8e 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -461,6 +461,8 @@ static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
struct intel_fbc_work *work = &fbc->work;
WARN_ON(!mutex_is_locked(&fbc->lock));
+ if (WARN_ON(!fbc->enabled))
+ return;
if (drm_crtc_vblank_get(&crtc->base)) {
DRM_ERROR("vblank not available for FBC on pipe %c\n",
@@ -1216,7 +1218,7 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work)
mutex_lock(&fbc->lock);
/* Maybe we were scheduled twice. */
- if (fbc->underrun_detected)
+ if (fbc->underrun_detected || !fbc->enabled)
goto out;
DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 0c4cde6b2e6f..262e75c00dd2 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -232,7 +232,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
strcpy(info->fix.id, "inteldrmfb");
- info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
/* setup aperture base/size for vesafb takeover */
@@ -281,27 +280,6 @@ out_unlock:
return ret;
}
-/** Sets the color ramps on behalf of RandR */
-static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- intel_crtc->lut_r[regno] = red >> 8;
- intel_crtc->lut_g[regno] = green >> 8;
- intel_crtc->lut_b[regno] = blue >> 8;
-}
-
-static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- *red = intel_crtc->lut_r[regno] << 8;
- *green = intel_crtc->lut_g[regno] << 8;
- *blue = intel_crtc->lut_b[regno] << 8;
-}
-
static struct drm_fb_helper_crtc *
intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
{
@@ -352,14 +330,20 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
int i, j;
bool *save_enabled;
- bool fallback = true;
+ bool fallback = true, ret = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
+ struct drm_modeset_acquire_ctx ctx;
save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
if (!save_enabled)
return false;
+ drm_modeset_acquire_init(&ctx, 0);
+
+ while (drm_modeset_lock_all_ctx(fb_helper->dev, &ctx) != 0)
+ drm_modeset_backoff(&ctx);
+
memcpy(save_enabled, enabled, count);
mask = GENMASK(count - 1, 0);
conn_configured = 0;
@@ -370,7 +354,6 @@ retry:
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_fb_helper_crtc *new_crtc;
- struct intel_crtc *intel_crtc;
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
@@ -412,13 +395,6 @@ retry:
num_connectors_enabled++;
- intel_crtc = to_intel_crtc(connector->state->crtc);
- for (j = 0; j < 256; j++) {
- intel_crtc->lut_r[j] = j;
- intel_crtc->lut_g[j] = j;
- intel_crtc->lut_b[j] = j;
- }
-
new_crtc = intel_fb_helper_crtc(fb_helper,
connector->state->crtc);
@@ -509,18 +485,18 @@ retry:
bail:
DRM_DEBUG_KMS("Not using firmware configuration\n");
memcpy(enabled, save_enabled, count);
- kfree(save_enabled);
- return false;
+ ret = false;
}
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
kfree(save_enabled);
- return true;
+ return ret;
}
static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.initial_config = intel_fb_initial_config,
- .gamma_set = intel_crtc_fb_gamma_set,
- .gamma_get = intel_crtc_fb_gamma_get,
.fb_probe = intelfb_create,
};
@@ -531,8 +507,6 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
* trying to rectify all the possible error paths leading here.
*/
- drm_fb_helper_unregister_fbi(&ifbdev->helper);
-
drm_fb_helper_fini(&ifbdev->helper);
if (ifbdev->vma) {
@@ -720,8 +694,10 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
/* Due to peculiar init order wrt to hpd handling this is separate. */
if (drm_fb_helper_initial_config(&ifbdev->helper,
- ifbdev->preferred_bpp))
- intel_fbdev_fini(ifbdev->helper.dev);
+ ifbdev->preferred_bpp)) {
+ intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
+ intel_fbdev_fini(to_i915(ifbdev->helper.dev));
+ }
}
void intel_fbdev_initial_config_async(struct drm_device *dev)
@@ -744,9 +720,8 @@ static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
ifbdev->cookie = 0;
}
-void intel_fbdev_fini(struct drm_device *dev)
+void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_fbdev *ifbdev = dev_priv->fbdev;
if (!ifbdev)
@@ -756,8 +731,17 @@ void intel_fbdev_fini(struct drm_device *dev)
if (!current_is_async())
intel_fbdev_sync(ifbdev);
+ drm_fb_helper_unregister_fbi(&ifbdev->helper);
+}
+
+void intel_fbdev_fini(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->fbdev);
+
+ if (!ifbdev)
+ return;
+
intel_fbdev_destroy(ifbdev);
- dev_priv->fbdev = NULL;
}
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
@@ -813,7 +797,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
- if (ifbdev && ifbdev->vma)
+ if (ifbdev)
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index d484862cc7df..5a7cca32c0fa 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -313,11 +313,11 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
* Returns the previous state of underrun reporting.
*/
bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
- enum transcoder pch_transcoder,
+ enum pipe pch_transcoder,
bool enable)
{
struct intel_crtc *crtc =
- intel_get_crtc_for_pipe(dev_priv, (enum pipe) pch_transcoder);
+ intel_get_crtc_for_pipe(dev_priv, pch_transcoder);
unsigned long flags;
bool old;
@@ -390,7 +390,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
* interrupt to avoid an irq storm.
*/
void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
- enum transcoder pch_transcoder)
+ enum pipe pch_transcoder)
{
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
false)) {
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 52d5b82790d9..c17ed0e62b67 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -45,7 +45,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return true;
if (IS_SKYLAKE(dev_priv))
return true;
- if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D)
+ if (IS_KABYLAKE(dev_priv))
return true;
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 9b0ece427bdc..d9d87d96fb69 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -324,7 +324,7 @@ hangcheck_get_action(struct intel_engine_cs *engine,
if (engine->hangcheck.seqno != hc->seqno)
return ENGINE_ACTIVE_SEQNO;
- if (i915_seqno_passed(hc->seqno, intel_engine_last_submit(engine)))
+ if (intel_engine_is_idle(engine))
return ENGINE_IDLE;
return engine_stuck(engine, hc->acthd);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ec0779a52d53..e8abea7594ec 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -459,22 +459,31 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
+ struct drm_connector *connector = &intel_hdmi->attached_connector->base;
+ bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
union hdmi_infoframe frame;
int ret;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- adjusted_mode);
+ adjusted_mode,
+ is_hdmi2_sink);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
}
+ if (crtc_state->ycbcr420)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+ else
+ frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+
drm_hdmi_avi_infoframe_quant_range(&frame.avi, adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL,
intel_hdmi->rgb_quant_range_selectable);
+ /* TODO: handle pixel repetition for YCBCR420 outputs */
intel_write_infoframe(encoder, crtc_state, &frame);
}
@@ -1292,6 +1301,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
clock *= 2;
+ if (drm_mode_is_420_only(&connector->display_info, mode))
+ clock /= 2;
+
/* check if we can do 8bpc */
status = hdmi_port_clock_valid(hdmi, clock, true, force_dvi);
@@ -1321,14 +1333,21 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
return false;
- for_each_connector_in_state(state, connector, connector_state, i) {
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
const struct drm_display_info *info = &connector->display_info;
if (connector_state->crtc != crtc_state->base.crtc)
continue;
- if ((info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) == 0)
- return false;
+ if (crtc_state->ycbcr420) {
+ const struct drm_hdmi_info *hdmi = &info->hdmi;
+
+ if (!(hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36))
+ return false;
+ } else {
+ if (!(info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36))
+ return false;
+ }
}
/* Display Wa #1139 */
@@ -1339,6 +1358,36 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
return true;
}
+static bool
+intel_hdmi_ycbcr420_config(struct drm_connector *connector,
+ struct intel_crtc_state *config,
+ int *clock_12bpc, int *clock_8bpc)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
+
+ if (!connector->ycbcr_420_allowed) {
+ DRM_ERROR("Platform doesn't support YCBCR420 output\n");
+ return false;
+ }
+
+ /* YCBCR420 TMDS rate requirement is half the pixel clock */
+ config->port_clock /= 2;
+ *clock_12bpc /= 2;
+ *clock_8bpc /= 2;
+ config->ycbcr420 = true;
+
+ /* YCBCR 420 output conversion needs a scaler */
+ if (skl_update_scaler_crtc(config)) {
+ DRM_DEBUG_KMS("Scaler allocation for output failed\n");
+ return false;
+ }
+
+ intel_pch_panel_fitting(intel_crtc, config,
+ DRM_MODE_SCALE_FULLSCREEN);
+
+ return true;
+}
+
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -1346,7 +1395,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct drm_scdc *scdc = &conn_state->connector->display_info.hdmi.scdc;
+ struct drm_connector *connector = conn_state->connector;
+ struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
@@ -1376,6 +1426,14 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
clock_12bpc *= 2;
}
+ if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
+ if (!intel_hdmi_ycbcr420_config(connector, pipe_config,
+ &clock_12bpc, &clock_8bpc)) {
+ DRM_ERROR("Can't support YCBCR420 output\n");
+ return false;
+ }
+ }
+
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv))
pipe_config->has_pch_encoder = true;
@@ -1703,11 +1761,9 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_hdmi_detect,
.force = intel_hdmi_force,
.fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = drm_atomic_helper_connector_set_property,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_connector_register,
@@ -1787,45 +1843,114 @@ void intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
DRM_DEBUG_KMS("sink scrambling handled\n");
}
-static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static u8 chv_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
{
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
u8 ddc_pin;
- if (info->alternate_ddc_pin) {
- DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
- info->alternate_ddc_pin, port_name(port));
- return info->alternate_ddc_pin;
+ switch (port) {
+ case PORT_B:
+ ddc_pin = GMBUS_PIN_DPB;
+ break;
+ case PORT_C:
+ ddc_pin = GMBUS_PIN_DPC;
+ break;
+ case PORT_D:
+ ddc_pin = GMBUS_PIN_DPD_CHV;
+ break;
+ default:
+ MISSING_CASE(port);
+ ddc_pin = GMBUS_PIN_DPB;
+ break;
}
+ return ddc_pin;
+}
+
+static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
+{
+ u8 ddc_pin;
switch (port) {
case PORT_B:
- if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv))
- ddc_pin = GMBUS_PIN_1_BXT;
- else
- ddc_pin = GMBUS_PIN_DPB;
+ ddc_pin = GMBUS_PIN_1_BXT;
break;
case PORT_C:
- if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv))
- ddc_pin = GMBUS_PIN_2_BXT;
- else
- ddc_pin = GMBUS_PIN_DPC;
+ ddc_pin = GMBUS_PIN_2_BXT;
+ break;
+ default:
+ MISSING_CASE(port);
+ ddc_pin = GMBUS_PIN_1_BXT;
+ break;
+ }
+ return ddc_pin;
+}
+
+static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ u8 ddc_pin;
+
+ switch (port) {
+ case PORT_B:
+ ddc_pin = GMBUS_PIN_1_BXT;
+ break;
+ case PORT_C:
+ ddc_pin = GMBUS_PIN_2_BXT;
break;
case PORT_D:
- if (HAS_PCH_CNP(dev_priv))
- ddc_pin = GMBUS_PIN_4_CNP;
- else if (IS_CHERRYVIEW(dev_priv))
- ddc_pin = GMBUS_PIN_DPD_CHV;
- else
- ddc_pin = GMBUS_PIN_DPD;
+ ddc_pin = GMBUS_PIN_4_CNP;
+ break;
+ default:
+ MISSING_CASE(port);
+ ddc_pin = GMBUS_PIN_1_BXT;
+ break;
+ }
+ return ddc_pin;
+}
+
+static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ u8 ddc_pin;
+
+ switch (port) {
+ case PORT_B:
+ ddc_pin = GMBUS_PIN_DPB;
+ break;
+ case PORT_C:
+ ddc_pin = GMBUS_PIN_DPC;
+ break;
+ case PORT_D:
+ ddc_pin = GMBUS_PIN_DPD;
break;
default:
MISSING_CASE(port);
ddc_pin = GMBUS_PIN_DPB;
break;
}
+ return ddc_pin;
+}
+
+static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ u8 ddc_pin;
+
+ if (info->alternate_ddc_pin) {
+ DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
+ info->alternate_ddc_pin, port_name(port));
+ return info->alternate_ddc_pin;
+ }
+
+ if (IS_CHERRYVIEW(dev_priv))
+ ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
+ else if (IS_GEN9_LP(dev_priv))
+ ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
+ else if (HAS_PCH_CNP(dev_priv))
+ ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
+ else
+ ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
ddc_pin, port_name(port));
@@ -1859,25 +1984,14 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->doublescan_allowed = 0;
connector->stereo_allowed = 1;
+ if (IS_GEMINILAKE(dev_priv))
+ connector->ycbcr_420_allowed = true;
+
intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
- switch (port) {
- case PORT_B:
- intel_encoder->hpd_pin = HPD_PORT_B;
- break;
- case PORT_C:
- intel_encoder->hpd_pin = HPD_PORT_C;
- break;
- case PORT_D:
- intel_encoder->hpd_pin = HPD_PORT_D;
- break;
- case PORT_E:
- intel_encoder->hpd_pin = HPD_PORT_E;
- break;
- default:
- MISSING_CASE(port);
+ if (WARN_ON(port == PORT_A))
return;
- }
+ intel_encoder->hpd_pin = intel_hpd_pin(port);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index f1200272a699..875d5d218d5c 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -76,26 +76,54 @@
* it will use i915_hotplug_work_func where this logic is handled.
*/
-bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
+/**
+ * intel_hpd_port - return port hard associated with certain pin.
+ * @pin: the hpd pin to get associated port
+ *
+ * Return port that is associatade with @pin and PORT_NONE if no port is
+ * hard associated with that @pin.
+ */
+enum port intel_hpd_pin_to_port(enum hpd_pin pin)
{
switch (pin) {
case HPD_PORT_A:
- *port = PORT_A;
- return true;
+ return PORT_A;
case HPD_PORT_B:
- *port = PORT_B;
- return true;
+ return PORT_B;
case HPD_PORT_C:
- *port = PORT_C;
- return true;
+ return PORT_C;
case HPD_PORT_D:
- *port = PORT_D;
- return true;
+ return PORT_D;
case HPD_PORT_E:
- *port = PORT_E;
- return true;
+ return PORT_E;
+ default:
+ return PORT_NONE; /* no port for this pin */
+ }
+}
+
+/**
+ * intel_hpd_pin - return pin hard associated with certain port.
+ * @port: the hpd port to get associated pin
+ *
+ * Return pin that is associatade with @port and HDP_NONE if no pin is
+ * hard associated with that @port.
+ */
+enum hpd_pin intel_hpd_pin(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return HPD_PORT_A;
+ case PORT_B:
+ return HPD_PORT_B;
+ case PORT_C:
+ return HPD_PORT_C;
+ case PORT_D:
+ return HPD_PORT_D;
+ case PORT_E:
+ return HPD_PORT_E;
default:
- return false; /* no hpd */
+ MISSING_CASE(port);
+ return HPD_NONE;
}
}
@@ -389,8 +417,9 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (!(BIT(i) & pin_mask))
continue;
- is_dig_port = intel_hpd_pin_to_port(i, &port) &&
- dev_priv->hotplug.irq_port[port];
+ port = intel_hpd_pin_to_port(i);
+ is_dig_port = port != PORT_NONE &&
+ dev_priv->hotplug.irq_port[port];
if (is_dig_port) {
bool long_hpd = long_mask & BIT(i);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3c9e00d4ba5a..6698826954e1 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -592,7 +592,6 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
int ret;
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- mutex_lock(&dev_priv->gmbus_mutex);
if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
@@ -604,7 +603,6 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
}
- mutex_unlock(&dev_priv->gmbus_mutex);
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
return ret;
@@ -624,6 +622,39 @@ static const struct i2c_algorithm gmbus_algorithm = {
.functionality = gmbus_func
};
+static void gmbus_lock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ mutex_lock(&dev_priv->gmbus_mutex);
+}
+
+static int gmbus_trylock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ return mutex_trylock(&dev_priv->gmbus_mutex);
+}
+
+static void gmbus_unlock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ mutex_unlock(&dev_priv->gmbus_mutex);
+}
+
+const struct i2c_lock_operations gmbus_lock_ops = {
+ .lock_bus = gmbus_lock_bus,
+ .trylock_bus = gmbus_trylock_bus,
+ .unlock_bus = gmbus_unlock_bus,
+};
+
/**
* intel_gmbus_setup - instantiate all Intel i2c GMBuses
* @dev_priv: i915 device private
@@ -665,6 +696,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
bus->dev_priv = dev_priv;
bus->adapter.algo = &gmbus_algorithm;
+ bus->adapter.lock_ops = &gmbus_lock_ops;
/*
* We wish to retry with bit banging
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7404cf2aac28..6f972e6ec663 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1221,6 +1221,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret;
}
+static u8 gtiir[] = {
+ [RCS] = 0,
+ [BCS] = 0,
+ [VCS] = 1,
+ [VCS2] = 1,
+ [VECS] = 3,
+};
+
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -1245,9 +1253,22 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
- /* After a GPU reset, we may have requests to replay */
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
+
+ /*
+ * Clear any pending interrupt state.
+ *
+ * We do it twice out of paranoia that some of the IIR are double
+ * buffered, and if we only reset it once there may still be
+ * an interrupt pending.
+ */
+ I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
+ GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
+ I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
+ GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ /* After a GPU reset, we may have requests to replay */
submit = false;
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
if (!port_isset(&port[n]))
@@ -1306,6 +1327,31 @@ static void reset_common_ring(struct intel_engine_cs *engine,
{
struct execlist_port *port = engine->execlist_port;
struct intel_context *ce;
+ unsigned int n;
+
+ /*
+ * Catch up with any missed context-switch interrupts.
+ *
+ * Ideally we would just read the remaining CSB entries now that we
+ * know the gpu is idle. However, the CSB registers are sometimes^W
+ * often trashed across a GPU reset! Instead we have to rely on
+ * guessing the missed context-switch events by looking at what
+ * requests were completed.
+ */
+ if (!request) {
+ for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
+ i915_gem_request_put(port_request(&port[n]));
+ memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
+ return;
+ }
+
+ if (request->ctx != port_request(port)->ctx) {
+ i915_gem_request_put(port_request(port));
+ port[0] = port[1];
+ memset(&port[1], 0, sizeof(port[1]));
+ }
+
+ GEM_BUG_ON(request->ctx != port_request(port)->ctx);
/* If the request was innocent, we leave the request in the ELSP
* and will try to replay it on restarting. The context image may
@@ -1317,7 +1363,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* and have to at least restore the RING register in the context
* image back to the expected values to skip over the guilty request.
*/
- if (!request || request->fence.error != -EIO)
+ if (request->fence.error != -EIO)
return;
/* We want a simple context + ring to execute the breadcrumb update.
@@ -1339,15 +1385,6 @@ static void reset_common_ring(struct intel_engine_cs *engine,
request->ring->head = request->postfix;
intel_ring_update_space(request->ring);
- /* Catch up with any missed context-switch interrupts */
- if (request->ctx != port_request(port)->ctx) {
- i915_gem_request_put(port_request(port));
- port[0] = port[1];
- memset(&port[1], 0, sizeof(port[1]));
- }
-
- GEM_BUG_ON(request->ctx != port_request(port)->ctx);
-
/* Reset WaIdleLiteRestore:bdw,skl as well */
request->tail =
intel_ring_wrap(request->ring,
@@ -2071,7 +2108,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
* So to avoid that we reset the context images upon resume. For
* simplicity, we just zero everything out.
*/
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
for_each_engine(engine, dev_priv, id) {
struct intel_context *ce = &ctx->engine[engine->id];
u32 *reg;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 52b3a1fd4059..57ef5833c427 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -63,7 +63,6 @@ enum {
};
/* Logical Rings */
-void intel_logical_ring_stop(struct intel_engine_cs *engine);
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int logical_render_ring_init(struct intel_engine_cs *engine);
int logical_xcs_ring_init(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 5abef482eacf..beb9baaf2f2e 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -210,8 +210,8 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!IS_GEN9(dev_priv)) {
- DRM_ERROR("LSPCON is supported on GEN9 only\n");
+ if (!HAS_LSPCON(dev_priv)) {
+ DRM_ERROR("LSPCON is not supported on this platform\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 6fe5d7c3bc23..8e215777c7f4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -595,10 +595,8 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
};
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = drm_atomic_helper_connector_set_property,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_connector_register,
@@ -1140,7 +1138,8 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
out:
mutex_unlock(&dev->mode_config.mutex);
- intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+ intel_panel_init(&intel_connector->panel, fixed_mode, NULL,
+ downclock_mode);
intel_panel_setup_backlight(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 2bd03001cc70..98154efcb2f4 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -27,6 +27,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/firmware.h>
#include <acpi/video.h>
#include <drm/drmP.h>
@@ -829,6 +830,10 @@ void intel_opregion_unregister(struct drm_i915_private *dev_priv)
memunmap(opregion->rvda);
opregion->rvda = NULL;
}
+ if (opregion->vbt_firmware) {
+ kfree(opregion->vbt_firmware);
+ opregion->vbt_firmware = NULL;
+ }
opregion->header = NULL;
opregion->acpi = NULL;
opregion->swsci = NULL;
@@ -912,6 +917,43 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
{ }
};
+static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
+{
+ struct intel_opregion *opregion = &dev_priv->opregion;
+ const struct firmware *fw = NULL;
+ const char *name = i915.vbt_firmware;
+ int ret;
+
+ if (!name || !*name)
+ return -ENOENT;
+
+ ret = request_firmware(&fw, name, &dev_priv->drm.pdev->dev);
+ if (ret) {
+ DRM_ERROR("Requesting VBT firmware \"%s\" failed (%d)\n",
+ name, ret);
+ return ret;
+ }
+
+ if (intel_bios_is_valid_vbt(fw->data, fw->size)) {
+ opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ if (opregion->vbt_firmware) {
+ DRM_DEBUG_KMS("Found valid VBT firmware \"%s\"\n", name);
+ opregion->vbt = opregion->vbt_firmware;
+ opregion->vbt_size = fw->size;
+ ret = 0;
+ } else {
+ ret = -ENOMEM;
+ }
+ } else {
+ DRM_DEBUG_KMS("Invalid VBT firmware \"%s\"\n", name);
+ ret = -EINVAL;
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+
int intel_opregion_setup(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
@@ -974,6 +1016,9 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
if (mboxes & MBOX_ASLE_EXT)
DRM_DEBUG_DRIVER("ASLE extension supported\n");
+ if (intel_load_vbt_firmware(dev_priv) == 0)
+ goto out;
+
if (dmi_check_system(intel_no_opregion_vbt))
goto out;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index b96aed941b97..aace22e7ccac 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -799,9 +799,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
+ atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+
vma = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out_pin_section;
+ }
ret = i915_vma_put_fence(vma);
if (ret)
@@ -886,6 +890,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
out_unpin:
i915_gem_object_unpin_from_display_plane(vma);
+out_pin_section:
+ atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 96c2cbd81869..a17b1de7d7e0 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -110,7 +110,8 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
/* Native modes don't need fitting */
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
- adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h)
+ adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h &&
+ !pipe_config->ycbcr420)
goto done;
switch (fitting_mode) {
@@ -469,7 +470,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
if (i915.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
- return panel->backlight.max - val;
+ return panel->backlight.max - val + panel->backlight.min;
}
return val;
@@ -1919,11 +1920,13 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *alt_fixed_mode,
struct drm_display_mode *downclock_mode)
{
intel_panel_init_backlight_funcs(panel);
panel->fixed_mode = fixed_mode;
+ panel->alt_fixed_mode = alt_fixed_mode;
panel->downclock_mode = downclock_mode;
return 0;
@@ -1937,6 +1940,10 @@ void intel_panel_fini(struct intel_panel *panel)
if (panel->fixed_mode)
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+ if (panel->alt_fixed_mode)
+ drm_mode_destroy(intel_connector->base.dev,
+ panel->alt_fixed_mode);
+
if (panel->downclock_mode)
drm_mode_destroy(intel_connector->base.dev,
panel->downclock_mode);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 48ea0fca1f72..ed662937ec3c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -62,6 +62,20 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
+ /*
+ * Display WA#0390: skl,bxt,kbl,glk
+ *
+ * Must match Sampler, Pixel Back End, and Media
+ * (0xE194 bit 8, 0x7014 bit 13, 0x4DDC bits 27 and 31).
+ *
+ * Including bits outside the page in the hash would
+ * require 2 (or 4?) MiB alignment of resources. Just
+ * assume the defaul hashing mode which only uses bits
+ * within the page.
+ */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) & ~SKL_RC_HASH_OUTSIDE);
+
I915_WRITE(GEN8_CONFIG0,
I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
@@ -78,6 +92,12 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
/* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_DISABLE_DUMMY0);
+
+ if (IS_SKYLAKE(dev_priv)) {
+ /* WaDisableDopClockGating */
+ I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL)
+ & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ }
}
static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -2758,7 +2778,7 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
uint16_t wm[8])
{
- if (IS_GEN9(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
uint32_t val;
int ret, i;
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -2818,7 +2838,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
}
/*
- * WaWmMemoryReadLatency:skl,glk
+ * WaWmMemoryReadLatency:skl+,glk
*
* punit doesn't take into account the read latency so we need
* to add 2us to the various latency levels we retrieve from the
@@ -2857,6 +2877,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
wm[0] = 7;
wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
+ } else {
+ MISSING_CASE(INTEL_DEVID(dev_priv));
}
}
@@ -2912,7 +2934,7 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
* - latencies are in us on gen9.
* - before then, WM1+ latency values are in 0.5us units
*/
- if (IS_GEN9(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
latency *= 10;
else if (level > 0)
latency *= 5;
@@ -3530,8 +3552,6 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
-#define SKL_SAGV_BLOCK_TIME 30 /* µs */
-
/*
* FIXME: We still don't have the proper code detect if we need to apply the WA,
* so assume we'll always need it in order to avoid underruns.
@@ -3549,7 +3569,8 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
+ IS_CANNONLAKE(dev_priv))
return true;
if (IS_SKYLAKE(dev_priv) &&
@@ -3655,12 +3676,13 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
struct intel_crtc_state *cstate;
enum pipe pipe;
int level, latency;
+ int sagv_block_time_us = IS_GEN9(dev_priv) ? 30 : 20;
if (!intel_has_sagv(dev_priv))
return false;
/*
- * SKL workaround: bspec recommends we disable the SAGV when we have
+ * SKL+ workaround: bspec recommends we disable the SAGV when we have
* more then one pipe enabled
*
* If there are no active CRTCs, no additional checks need be performed
@@ -3699,11 +3721,11 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
latency += 15;
/*
- * If any of the planes on this pipe don't enable wm levels
- * that incur memory latencies higher then 30µs we can't enable
- * the SAGV
+ * If any of the planes on this pipe don't enable wm levels that
+ * incur memory latencies higher than sagv_block_time_us we
+ * can't enable the SAGV.
*/
- if (latency < SKL_SAGV_BLOCK_TIME)
+ if (latency < sagv_block_time_us)
return false;
}
@@ -3837,7 +3859,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
uint_fixed_16_16_t downscale_h, downscale_w;
if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
- return u32_to_fixed_16_16(0);
+ return u32_to_fixed16(0);
/* n.b., src is 16.16 fixed point, dst is whole integer */
if (plane->id == PLANE_CURSOR) {
@@ -3861,10 +3883,10 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
dst_h = drm_rect_height(&pstate->base.dst);
}
- fp_w_ratio = fixed_16_16_div(src_w, dst_w);
- fp_h_ratio = fixed_16_16_div(src_h, dst_h);
- downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1));
- downscale_h = max_fixed_16_16(fp_h_ratio, u32_to_fixed_16_16(1));
+ fp_w_ratio = div_fixed16(src_w, dst_w);
+ fp_h_ratio = div_fixed16(src_h, dst_h);
+ downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
+ downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
return mul_fixed16(downscale_w, downscale_h);
}
@@ -3872,7 +3894,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
static uint_fixed_16_16_t
skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
{
- uint_fixed_16_16_t pipe_downscale = u32_to_fixed_16_16(1);
+ uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1);
if (!crtc_state->base.enable)
return pipe_downscale;
@@ -3891,10 +3913,10 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
if (!dst_w || !dst_h)
return pipe_downscale;
- fp_w_ratio = fixed_16_16_div(src_w, dst_w);
- fp_h_ratio = fixed_16_16_div(src_h, dst_h);
- downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1));
- downscale_h = max_fixed_16_16(fp_h_ratio, u32_to_fixed_16_16(1));
+ fp_w_ratio = div_fixed16(src_w, dst_w);
+ fp_h_ratio = div_fixed16(src_h, dst_h);
+ downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1));
+ downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1));
pipe_downscale = mul_fixed16(downscale_w, downscale_h);
}
@@ -3913,14 +3935,14 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
int crtc_clock, dotclk;
uint32_t pipe_max_pixel_rate;
uint_fixed_16_16_t pipe_downscale;
- uint_fixed_16_16_t max_downscale = u32_to_fixed_16_16(1);
+ uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
if (!cstate->base.enable)
return 0;
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
uint_fixed_16_16_t plane_downscale;
- uint_fixed_16_16_t fp_9_div_8 = fixed_16_16_div(9, 8);
+ uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
int bpp;
if (!intel_wm_plane_visible(cstate,
@@ -3938,7 +3960,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
plane_downscale = mul_fixed16(plane_downscale,
fp_9_div_8);
- max_downscale = max_fixed_16_16(plane_downscale, max_downscale);
+ max_downscale = max_fixed16(plane_downscale, max_downscale);
}
pipe_downscale = skl_pipe_downscale_amount(cstate);
@@ -4071,7 +4093,9 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
/* For Non Y-tile return 8-blocks */
if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
- fb->modifier != I915_FORMAT_MOD_Yf_TILED)
+ fb->modifier != I915_FORMAT_MOD_Yf_TILED &&
+ fb->modifier != I915_FORMAT_MOD_Y_TILED_CCS &&
+ fb->modifier != I915_FORMAT_MOD_Yf_TILED_CCS)
return 8;
/*
@@ -4266,8 +4290,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* should allow pixel_rate up to ~2 GHz which seems sufficient since max
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
-static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp,
- uint32_t latency)
+static uint_fixed_16_16_t
+skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
+ uint8_t cpp, uint32_t latency)
{
uint32_t wm_intermediate_val;
uint_fixed_16_16_t ret;
@@ -4276,7 +4301,11 @@ static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp,
return FP_16_16_MAX;
wm_intermediate_val = latency * pixel_rate * cpp;
- ret = fixed_16_16_div_u64(wm_intermediate_val, 1000 * 512);
+ ret = div_fixed16(wm_intermediate_val, 1000 * 512);
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ ret = add_fixed16_u32(ret, 1);
+
return ret;
}
@@ -4294,7 +4323,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
wm_intermediate_val = latency * pixel_rate;
wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
pipe_htotal * 1000);
- ret = mul_u32_fixed_16_16(wm_intermediate_val, plane_blocks_per_line);
+ ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
return ret;
}
@@ -4306,15 +4335,15 @@ intel_get_linetime_us(struct intel_crtc_state *cstate)
uint_fixed_16_16_t linetime_us;
if (!cstate->base.active)
- return u32_to_fixed_16_16(0);
+ return u32_to_fixed16(0);
pixel_rate = cstate->pixel_rate;
if (WARN_ON(pixel_rate == 0))
- return u32_to_fixed_16_16(0);
+ return u32_to_fixed16(0);
crtc_htotal = cstate->base.adjusted_mode.crtc_htotal;
- linetime_us = fixed_16_16_div_u64(crtc_htotal * 1000, pixel_rate);
+ linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
return linetime_us;
}
@@ -4361,7 +4390,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint32_t plane_bytes_per_line;
uint32_t res_blocks, res_lines;
uint8_t cpp;
- uint32_t width = 0, height = 0;
+ uint32_t width = 0;
uint32_t plane_pixel_rate;
uint_fixed_16_16_t y_tile_minimum;
uint32_t y_min_scanlines;
@@ -4377,7 +4406,9 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
}
y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED;
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
/* Display WA #1141: kbl,cfl */
@@ -4390,7 +4421,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (plane->id == PLANE_CURSOR) {
width = intel_pstate->base.crtc_w;
- height = intel_pstate->base.crtc_h;
} else {
/*
* Src coordinates are already rotated by 270 degrees for
@@ -4398,16 +4428,13 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
* GTT mapping), hence no need to account for rotation here.
*/
width = drm_rect_width(&intel_pstate->base.src) >> 16;
- height = drm_rect_height(&intel_pstate->base.src) >> 16;
}
- cpp = fb->format->cpp[0];
+ cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] :
+ fb->format->cpp[0];
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
if (drm_rotation_90_or_270(pstate->rotation)) {
- int cpp = (fb->format->format == DRM_FORMAT_NV12) ?
- fb->format->cpp[1] :
- fb->format->cpp[0];
switch (cpp) {
case 1:
@@ -4434,51 +4461,62 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (y_tiled) {
interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line *
y_min_scanlines, 512);
- plane_blocks_per_line = fixed_16_16_div(interm_pbpl,
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ interm_pbpl++;
+
+ plane_blocks_per_line = div_fixed16(interm_pbpl,
y_min_scanlines);
- } else if (x_tiled) {
+ } else if (x_tiled && INTEL_GEN(dev_priv) == 9) {
interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512);
- plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
+ plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
} else {
interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
- plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
+ plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
}
- method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
+ method1 = skl_wm_method1(dev_priv, plane_pixel_rate, cpp, latency);
method2 = skl_wm_method2(plane_pixel_rate,
cstate->base.adjusted_mode.crtc_htotal,
latency,
plane_blocks_per_line);
- y_tile_minimum = mul_u32_fixed_16_16(y_min_scanlines,
- plane_blocks_per_line);
+ y_tile_minimum = mul_u32_fixed16(y_min_scanlines,
+ plane_blocks_per_line);
if (y_tiled) {
- selected_result = max_fixed_16_16(method2, y_tile_minimum);
+ selected_result = max_fixed16(method2, y_tile_minimum);
} else {
uint32_t linetime_us;
- linetime_us = fixed_16_16_to_u32_round_up(
+ linetime_us = fixed16_to_u32_round_up(
intel_get_linetime_us(cstate));
if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
(plane_bytes_per_line / 512 < 1))
selected_result = method2;
- else if ((ddb_allocation && ddb_allocation /
- fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1)
- selected_result = min_fixed_16_16(method1, method2);
+ else if (ddb_allocation >=
+ fixed16_to_u32_round_up(plane_blocks_per_line))
+ selected_result = min_fixed16(method1, method2);
else if (latency >= linetime_us)
- selected_result = min_fixed_16_16(method1, method2);
+ selected_result = min_fixed16(method1, method2);
else
selected_result = method1;
}
- res_blocks = fixed_16_16_to_u32_round_up(selected_result) + 1;
+ res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
res_lines = div_round_up_fixed16(selected_result,
plane_blocks_per_line);
+ /* Display WA #1125: skl,bxt,kbl,glk */
+ if (level == 0 &&
+ (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS))
+ res_blocks += fixed16_to_u32_round_up(y_tile_minimum);
+
+ /* Display WA #1126: skl,bxt,kbl,glk */
if (level >= 1 && level <= 7) {
if (y_tiled) {
- res_blocks += fixed_16_16_to_u32_round_up(y_tile_minimum);
+ res_blocks += fixed16_to_u32_round_up(y_tile_minimum);
res_lines += y_min_scanlines;
} else {
res_blocks++;
@@ -4563,8 +4601,7 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
if (is_fixed16_zero(linetime_us))
return 0;
- linetime_wm = fixed_16_16_to_u32_round_up(mul_u32_fixed_16_16(8,
- linetime_us));
+ linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
/* Display WA #1135: bxt. */
if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled)
@@ -5852,7 +5889,7 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
- if (IS_GEN9(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
limits = (dev_priv->rps.max_freq_softlimit) << 23;
if (val <= dev_priv->rps.min_freq_softlimit)
limits |= (dev_priv->rps.min_freq_softlimit) << 14;
@@ -5994,7 +6031,7 @@ static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
if (val != dev_priv->rps.cur_freq) {
gen6_set_rps_thresholds(dev_priv, val);
- if (IS_GEN9(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
I915_WRITE(GEN6_RPNSWREQ,
GEN9_FREQUENCY(val));
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -6126,47 +6163,35 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
gen6_sanitize_rps_pm_mask(dev_priv, ~0));
}
mutex_unlock(&dev_priv->rps.hw_lock);
-
- spin_lock(&dev_priv->rps.client_lock);
- while (!list_empty(&dev_priv->rps.clients))
- list_del_init(dev_priv->rps.clients.next);
- spin_unlock(&dev_priv->rps.client_lock);
}
-void gen6_rps_boost(struct drm_i915_private *dev_priv,
- struct intel_rps_client *rps,
- unsigned long submitted)
+void gen6_rps_boost(struct drm_i915_gem_request *rq,
+ struct intel_rps_client *rps)
{
+ struct drm_i915_private *i915 = rq->i915;
+ bool boost;
+
/* This is intentionally racy! We peek at the state here, then
* validate inside the RPS worker.
*/
- if (!(dev_priv->gt.awake &&
- dev_priv->rps.enabled &&
- dev_priv->rps.cur_freq < dev_priv->rps.boost_freq))
+ if (!i915->rps.enabled)
return;
- /* Force a RPS boost (and don't count it against the client) if
- * the GPU is severely congested.
- */
- if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
- rps = NULL;
-
- spin_lock(&dev_priv->rps.client_lock);
- if (rps == NULL || list_empty(&rps->link)) {
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->rps.interrupts_enabled) {
- dev_priv->rps.client_boost = true;
- schedule_work(&dev_priv->rps.work);
- }
- spin_unlock_irq(&dev_priv->irq_lock);
-
- if (rps != NULL) {
- list_add(&rps->link, &dev_priv->rps.clients);
- rps->boosts++;
- } else
- dev_priv->rps.boosts++;
+ boost = false;
+ spin_lock_irq(&rq->lock);
+ if (!rq->waitboost && !i915_gem_request_completed(rq)) {
+ atomic_inc(&i915->rps.num_waiters);
+ rq->waitboost = true;
+ boost = true;
}
- spin_unlock(&dev_priv->rps.client_lock);
+ spin_unlock_irq(&rq->lock);
+ if (!boost)
+ return;
+
+ if (READ_ONCE(i915->rps.cur_freq) < i915->rps.boost_freq)
+ schedule_work(&i915->rps.work);
+
+ atomic_inc(rps ? &rps->boosts : &i915->rps.boosts);
}
int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
@@ -6365,7 +6390,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
- IS_GEN9_BC(dev_priv)) {
+ IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
u32 ddcc_status = 0;
if (sandybridge_pcode_read(dev_priv,
@@ -6378,7 +6403,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
dev_priv->rps.max_freq);
}
- if (IS_GEN9_BC(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Store the frequency values in 16.66 MHZ units, which is
* the natural hardware unit for SKL
*/
@@ -6684,7 +6709,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
/* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
- if (IS_GEN9_BC(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@@ -6702,7 +6727,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
int diff = max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
- if (IS_GEN9_BC(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
/*
* ring_freq = 2 * GT. ring_freq is in 100MHz units
* No floor required for ring frequency on SKL.
@@ -7833,7 +7858,7 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
} else if (INTEL_GEN(dev_priv) >= 9) {
gen9_enable_rc6(dev_priv);
gen9_enable_rps(dev_priv);
- if (IS_GEN9_BC(dev_priv))
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
gen6_update_ring_freq(dev_priv);
} else if (IS_BROADWELL(dev_priv)) {
gen8_enable_rps(dev_priv);
@@ -8848,6 +8873,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
case GEN6_PCODE_SUCCESS:
return 0;
case GEN6_PCODE_UNIMPLEMENTED_CMD:
+ return -ENODEV;
case GEN6_PCODE_ILLEGAL_CMD:
return -ENXIO;
case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
@@ -8895,7 +8921,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
*/
if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
- DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
+ DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps\n",
+ mbox, __builtin_return_address(0));
return -EAGAIN;
}
@@ -8906,7 +8933,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
if (__intel_wait_for_register_fw(dev_priv,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
500, 0, NULL)) {
- DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
+ DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n",
+ mbox, __builtin_return_address(0));
return -ETIMEDOUT;
}
@@ -8919,8 +8947,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
status = gen6_check_mailbox_status(dev_priv);
if (status) {
- DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
- status);
+ DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
+ mbox, __builtin_return_address(0), status);
return status;
}
@@ -8940,7 +8968,8 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
*/
if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
- DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
+ DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps\n",
+ val, mbox, __builtin_return_address(0));
return -EAGAIN;
}
@@ -8951,7 +8980,8 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
if (__intel_wait_for_register_fw(dev_priv,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
500, 0, NULL)) {
- DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
+ DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
+ val, mbox, __builtin_return_address(0));
return -ETIMEDOUT;
}
@@ -8963,8 +8993,8 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
status = gen6_check_mailbox_status(dev_priv);
if (status) {
- DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
- status);
+ DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
+ val, mbox, __builtin_return_address(0), status);
return status;
}
@@ -9078,7 +9108,7 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- if (IS_GEN9(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
GEN9_FREQ_SCALER);
else if (IS_CHERRYVIEW(dev_priv))
@@ -9091,7 +9121,7 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- if (IS_GEN9(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
GT_FREQUENCY_MULTIPLIER);
else if (IS_CHERRYVIEW(dev_priv))
@@ -9113,7 +9143,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
struct drm_i915_gem_request *req = boost->req;
if (!i915_gem_request_completed(req))
- gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
+ gen6_rps_boost(req, NULL);
i915_gem_request_put(req);
kfree(boost);
@@ -9142,11 +9172,10 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
void intel_pm_setup(struct drm_i915_private *dev_priv)
{
mutex_init(&dev_priv->rps.hw_lock);
- spin_lock_init(&dev_priv->rps.client_lock);
INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
__intel_autoenable_gt_powersave);
- INIT_LIST_HEAD(&dev_priv->rps.clients);
+ atomic_set(&dev_priv->rps.num_waiters, 0);
dev_priv->pm.suspended = false;
atomic_set(&dev_priv->pm.wakeref_count, 0);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 559f1ab42bfc..1b31ab002dae 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -315,6 +315,7 @@ static void intel_enable_source_psr1(struct intel_dp *intel_dp)
else
val |= EDP_PSR_TP1_TP2_SEL;
+ val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
I915_WRITE(EDP_PSR_CTL, val);
}
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
index 16a7ec273bd9..7d3ac02f0177 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen9.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
@@ -20,7 +20,7 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
- * Generated by: intel-gpu-tools-1.8-220-g01153e7
+ * Generated by: intel-gpu-tools-1.19-177-g68e2eab2
*/
#include "intel_renderstate.h"
@@ -873,7 +873,7 @@ static const u32 gen9_null_state_batch[] = {
0x00000000,
0x00000000,
0x78550003,
- 0x00000000,
+ 0x0000000f,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index acd1da9b62a3..cdf084ef5aae 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1712,6 +1712,9 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req,
unsigned int total_bytes;
u32 *cs;
+ /* Packets must be qword aligned. */
+ GEM_BUG_ON(num_dwords & 1);
+
total_bytes = bytes + req->reserved_space;
GEM_BUG_ON(total_bytes > ring->effective_size);
@@ -2140,7 +2143,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
- num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
+ num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
if (INTEL_GEN(dev_priv) >= 8) {
engine->emit_breadcrumb_sz += num_rings * 6;
} else {
@@ -2184,8 +2187,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
engine->semaphore.signal = gen8_rcs_signal;
- num_rings =
- hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
+ num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
engine->emit_breadcrumb_sz += num_rings * 8;
}
} else if (INTEL_GEN(dev_priv) >= 6) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6aa20ac8cde3..02d8974bf9ab 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -121,6 +121,7 @@ struct intel_engine_hangcheck {
unsigned long action_timestamp;
int deadlock;
struct intel_instdone instdone;
+ struct drm_i915_gem_request *active_request;
bool stalled;
};
@@ -734,4 +735,16 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
void intel_engines_mark_idle(struct drm_i915_private *i915);
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
+static inline bool
+__intel_engine_can_store_dword(unsigned int gen, unsigned int class)
+{
+ if (gen <= 2)
+ return false; /* uses physical not virtual addresses */
+
+ if (gen == 6 && class == VIDEO_DECODE_CLASS)
+ return false; /* b0rked */
+
+ return true;
+}
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index efe80ed5fd4d..b66d8e136aa3 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -50,10 +50,11 @@
*/
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
- int power_well_id);
+ enum i915_power_well_id power_well_id);
static struct i915_power_well *
-lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
+lookup_power_well(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id power_well_id);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)
@@ -168,18 +169,6 @@ static void intel_power_well_put(struct drm_i915_private *dev_priv,
intel_power_well_disable(dev_priv, power_well);
}
-/*
- * We should only use the power well if we explicitly asked the hardware to
- * enable it, so check if it's enabled and also check if we've requested it to
- * be enabled.
- */
-static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return I915_READ(HSW_PWR_WELL_DRIVER) ==
- (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
-}
-
/**
* __intel_display_power_is_enabled - unlocked check for a power domain
* @dev_priv: i915 device instance
@@ -278,7 +267,8 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
* to be enabled, and it will only be disabled if none of the registers is
* requesting it to be enabled.
*/
-static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
+ u8 irq_pipe_mask, bool has_vga)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
@@ -292,264 +282,158 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
* sure vgacon can keep working normally without triggering interrupts
* and error messages.
*/
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
+ if (has_vga) {
+ vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+ outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
+ }
- if (IS_BROADWELL(dev_priv))
- gen8_irq_power_well_post_enable(dev_priv,
- 1 << PIPE_C | 1 << PIPE_B);
+ if (irq_pipe_mask)
+ gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
}
-static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
+static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
+ u8 irq_pipe_mask)
{
- if (IS_BROADWELL(dev_priv))
- gen8_irq_power_well_pre_disable(dev_priv,
- 1 << PIPE_C | 1 << PIPE_B);
+ if (irq_pipe_mask)
+ gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
}
-static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+
+static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ enum i915_power_well_id id = power_well->id;
+
+ /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
+ WARN_ON(intel_wait_for_register(dev_priv,
+ HSW_PWR_WELL_CTL_DRIVER(id),
+ HSW_PWR_WELL_CTL_STATE(id),
+ HSW_PWR_WELL_CTL_STATE(id),
+ 1));
+}
+
+static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id id)
+{
+ u32 req_mask = HSW_PWR_WELL_CTL_REQ(id);
+ u32 ret;
+
+ ret = I915_READ(HSW_PWR_WELL_CTL_BIOS(id)) & req_mask ? 1 : 0;
+ ret |= I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & req_mask ? 2 : 0;
+ ret |= I915_READ(HSW_PWR_WELL_CTL_KVMR) & req_mask ? 4 : 0;
+ ret |= I915_READ(HSW_PWR_WELL_CTL_DEBUG(id)) & req_mask ? 8 : 0;
+
+ return ret;
+}
+
+static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum i915_power_well_id id = power_well->id;
+ bool disabled;
+ u32 reqs;
/*
- * After we re-enable the power well, if we touch VGA register 0x3d5
- * we'll get unclaimed register interrupts. This stops after we write
- * anything to the VGA MSR register. The vgacon module uses this
- * register all the time, so if we unbind our driver and, as a
- * consequence, bind vgacon, we'll get stuck in an infinite loop at
- * console_unlock(). So make here we touch the VGA MSR register, making
- * sure vgacon can keep working normally without triggering interrupts
- * and error messages.
+ * Bspec doesn't require waiting for PWs to get disabled, but still do
+ * this for paranoia. The known cases where a PW will be forced on:
+ * - a KVMR request on any power well via the KVMR request register
+ * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
+ * DEBUG request registers
+ * Skip the wait in case any of the request bits are set and print a
+ * diagnostic message.
*/
- if (power_well->id == SKL_DISP_PW_2) {
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
+ wait_for((disabled = !(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
+ HSW_PWR_WELL_CTL_STATE(id))) ||
+ (reqs = hsw_power_well_requesters(dev_priv, id)), 1);
+ if (disabled)
+ return;
- gen8_irq_power_well_post_enable(dev_priv,
- 1 << PIPE_C | 1 << PIPE_B);
- }
+ DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
+ power_well->name,
+ !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
}
-static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
+ enum skl_power_gate pg)
{
- if (power_well->id == SKL_DISP_PW_2)
- gen8_irq_power_well_pre_disable(dev_priv,
- 1 << PIPE_C | 1 << PIPE_B);
+ /* Timeout 5us for PG#0, for other PGs 1us */
+ WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg),
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
-static void hsw_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
{
- bool is_enabled, enable_requested;
- uint32_t tmp;
-
- tmp = I915_READ(HSW_PWR_WELL_DRIVER);
- is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
- enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
-
- if (enable) {
- if (!enable_requested)
- I915_WRITE(HSW_PWR_WELL_DRIVER,
- HSW_PWR_WELL_ENABLE_REQUEST);
-
- if (!is_enabled) {
- DRM_DEBUG_KMS("Enabling power well\n");
- if (intel_wait_for_register(dev_priv,
- HSW_PWR_WELL_DRIVER,
- HSW_PWR_WELL_STATE_ENABLED,
- HSW_PWR_WELL_STATE_ENABLED,
- 20))
- DRM_ERROR("Timeout enabling power well\n");
- hsw_power_well_post_enable(dev_priv);
- }
+ enum i915_power_well_id id = power_well->id;
+ bool wait_fuses = power_well->hsw.has_fuses;
+ enum skl_power_gate pg;
+ u32 val;
- } else {
- if (enable_requested) {
- hsw_power_well_pre_disable(dev_priv);
- I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
- POSTING_READ(HSW_PWR_WELL_DRIVER);
- DRM_DEBUG_KMS("Requesting to disable the power well\n");
- }
+ if (wait_fuses) {
+ pg = SKL_PW_TO_PG(id);
+ /*
+ * For PW1 we have to wait both for the PW0/PG0 fuse state
+ * before enabling the power well and PW1/PG1's own fuse
+ * state after the enabling. For all other power wells with
+ * fuses we only have to wait for that PW/PG's fuse state
+ * after the enabling.
+ */
+ if (pg == SKL_PG1)
+ gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
}
+
+ val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
+ I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
+ hsw_wait_for_power_well_enable(dev_priv, power_well);
+
+ if (wait_fuses)
+ gen9_wait_for_power_well_fuses(dev_priv, pg);
+
+ hsw_power_well_post_enable(dev_priv, power_well->hsw.irq_pipe_mask,
+ power_well->hsw.has_vga);
}
-#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum i915_power_well_id id = power_well->id;
+ u32 val;
-#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
+ hsw_power_well_pre_disable(dev_priv, power_well->hsw.irq_pipe_mask);
-#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
-#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
-#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
-#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
+ val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
+ I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
+ val & ~HSW_PWR_WELL_CTL_REQ(id));
+ hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
-#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
+/*
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum i915_power_well_id id = power_well->id;
+ u32 mask = HSW_PWR_WELL_CTL_REQ(id) | HSW_PWR_WELL_CTL_STATE(id);
+
+ return (I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) & mask) == mask;
+}
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
+ enum i915_power_well_id id = SKL_DISP_PW_2;
+
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
"DC9 already programmed to be enabled.\n");
WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
"DC5 still not disabled to enable DC9.\n");
- WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
+ WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)) &
+ HSW_PWR_WELL_CTL_REQ(id),
+ "Power well 2 on.\n");
WARN_ONCE(intel_irqs_enabled(dev_priv),
"Interrupts not disabled yet.\n");
@@ -744,223 +628,39 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
}
-static void
-gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum skl_disp_power_wells power_well_id = power_well->id;
- u32 val;
- u32 mask;
-
- mask = SKL_POWER_WELL_REQ(power_well_id);
-
- val = I915_READ(HSW_PWR_WELL_KVMR);
- if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
- power_well->name))
- I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
-
- val = I915_READ(HSW_PWR_WELL_BIOS);
- val |= I915_READ(HSW_PWR_WELL_DEBUG);
-
- if (!(val & mask))
- return;
-
- /*
- * DMC is known to force on the request bits for power well 1 on SKL
- * and BXT and the misc IO power well on SKL but we don't expect any
- * other request bits to be set, so WARN for those.
- */
- if (power_well_id == SKL_DISP_PW_1 ||
- (IS_GEN9_BC(dev_priv) &&
- power_well_id == SKL_DISP_PW_MISC_IO))
- DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
- "by DMC\n", power_well->name);
- else
- WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
- power_well->name);
-
- I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
- I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
-}
-
-static void skl_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
-{
- uint32_t tmp, fuse_status;
- uint32_t req_mask, state_mask;
- bool is_enabled, enable_requested, check_fuse_status = false;
-
- tmp = I915_READ(HSW_PWR_WELL_DRIVER);
- fuse_status = I915_READ(SKL_FUSE_STATUS);
-
- switch (power_well->id) {
- case SKL_DISP_PW_1:
- if (intel_wait_for_register(dev_priv,
- SKL_FUSE_STATUS,
- SKL_FUSE_PG0_DIST_STATUS,
- SKL_FUSE_PG0_DIST_STATUS,
- 1)) {
- DRM_ERROR("PG0 not enabled\n");
- return;
- }
- break;
- case SKL_DISP_PW_2:
- if (!(fuse_status & SKL_FUSE_PG1_DIST_STATUS)) {
- DRM_ERROR("PG1 in disabled state\n");
- return;
- }
- break;
- case SKL_DISP_PW_MISC_IO:
- case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A, CNL_DISP_PW_DDI_A */
- case SKL_DISP_PW_DDI_B:
- case SKL_DISP_PW_DDI_C:
- case SKL_DISP_PW_DDI_D:
- case GLK_DISP_PW_AUX_A: /* CNL_DISP_PW_AUX_A */
- case GLK_DISP_PW_AUX_B: /* CNL_DISP_PW_AUX_B */
- case GLK_DISP_PW_AUX_C: /* CNL_DISP_PW_AUX_C */
- case CNL_DISP_PW_AUX_D:
- break;
- default:
- WARN(1, "Unknown power well %lu\n", power_well->id);
- return;
- }
-
- req_mask = SKL_POWER_WELL_REQ(power_well->id);
- enable_requested = tmp & req_mask;
- state_mask = SKL_POWER_WELL_STATE(power_well->id);
- is_enabled = tmp & state_mask;
-
- if (!enable && enable_requested)
- skl_power_well_pre_disable(dev_priv, power_well);
-
- if (enable) {
- if (!enable_requested) {
- WARN((tmp & state_mask) &&
- !I915_READ(HSW_PWR_WELL_BIOS),
- "Invalid for power well status to be enabled, unless done by the BIOS, \
- when request is to disable!\n");
- I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
- }
-
- if (!is_enabled) {
- DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
- check_fuse_status = true;
- }
- } else {
- if (enable_requested) {
- I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
- POSTING_READ(HSW_PWR_WELL_DRIVER);
- DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
- }
-
- gen9_sanitize_power_well_requests(dev_priv, power_well);
- }
-
- if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
- 1))
- DRM_ERROR("%s %s timeout\n",
- power_well->name, enable ? "enable" : "disable");
-
- if (check_fuse_status) {
- if (power_well->id == SKL_DISP_PW_1) {
- if (intel_wait_for_register(dev_priv,
- SKL_FUSE_STATUS,
- SKL_FUSE_PG1_DIST_STATUS,
- SKL_FUSE_PG1_DIST_STATUS,
- 1))
- DRM_ERROR("PG1 distributing status timeout\n");
- } else if (power_well->id == SKL_DISP_PW_2) {
- if (intel_wait_for_register(dev_priv,
- SKL_FUSE_STATUS,
- SKL_FUSE_PG2_DIST_STATUS,
- SKL_FUSE_PG2_DIST_STATUS,
- 1))
- DRM_ERROR("PG2 distributing status timeout\n");
- }
- }
-
- if (enable && !is_enabled)
- skl_power_well_post_enable(dev_priv, power_well);
-}
-
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- /* Take over the request bit if set by BIOS. */
- if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) {
- if (!(I915_READ(HSW_PWR_WELL_DRIVER) &
- HSW_PWR_WELL_ENABLE_REQUEST))
- I915_WRITE(HSW_PWR_WELL_DRIVER,
- HSW_PWR_WELL_ENABLE_REQUEST);
- I915_WRITE(HSW_PWR_WELL_BIOS, 0);
- }
-}
-
-static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- hsw_set_power_well(dev_priv, power_well, true);
-}
-
-static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- hsw_set_power_well(dev_priv, power_well, false);
-}
-
-static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- uint32_t mask = SKL_POWER_WELL_REQ(power_well->id) |
- SKL_POWER_WELL_STATE(power_well->id);
-
- return (I915_READ(HSW_PWR_WELL_DRIVER) & mask) == mask;
-}
-
-static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- uint32_t mask = SKL_POWER_WELL_REQ(power_well->id);
- uint32_t bios_req = I915_READ(HSW_PWR_WELL_BIOS);
+ enum i915_power_well_id id = power_well->id;
+ u32 mask = HSW_PWR_WELL_CTL_REQ(id);
+ u32 bios_req = I915_READ(HSW_PWR_WELL_CTL_BIOS(id));
/* Take over the request bit if set by BIOS. */
if (bios_req & mask) {
- uint32_t drv_req = I915_READ(HSW_PWR_WELL_DRIVER);
+ u32 drv_req = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
if (!(drv_req & mask))
- I915_WRITE(HSW_PWR_WELL_DRIVER, drv_req | mask);
- I915_WRITE(HSW_PWR_WELL_BIOS, bios_req & ~mask);
+ I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), drv_req | mask);
+ I915_WRITE(HSW_PWR_WELL_CTL_BIOS(id), bios_req & ~mask);
}
}
-static void skl_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- skl_set_power_well(dev_priv, power_well, true);
-}
-
-static void skl_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- skl_set_power_well(dev_priv, power_well, false);
-}
-
static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- bxt_ddi_phy_init(dev_priv, power_well->data);
+ bxt_ddi_phy_init(dev_priv, power_well->bxt.phy);
}
static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- bxt_ddi_phy_uninit(dev_priv, power_well->data);
+ bxt_ddi_phy_uninit(dev_priv, power_well->bxt.phy);
}
static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
+ return bxt_ddi_phy_is_enabled(dev_priv, power_well->bxt.phy);
}
static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
@@ -969,16 +669,16 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv, power_well->data);
+ bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv, power_well->data);
+ bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
if (IS_GEMINILAKE(dev_priv)) {
power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
if (power_well->count > 0)
- bxt_ddi_phy_verify_state(dev_priv, power_well->data);
+ bxt_ddi_phy_verify_state(dev_priv, power_well->bxt.phy);
}
}
@@ -1076,7 +776,7 @@ static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
- enum punit_power_well power_well_id = power_well->id;
+ enum i915_power_well_id power_well_id = power_well->id;
u32 mask;
u32 state;
u32 ctrl;
@@ -1124,7 +824,7 @@ static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- int power_well_id = power_well->id;
+ enum i915_power_well_id power_well_id = power_well->id;
bool enabled = false;
u32 mask;
u32 state;
@@ -1311,8 +1011,9 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
-static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
- int power_well_id)
+static struct i915_power_well *
+lookup_power_well(struct drm_i915_private *dev_priv,
+ enum i915_power_well_id power_well_id)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
int i;
@@ -1659,7 +1360,7 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum pipe pipe = power_well->id;
+ enum pipe pipe = PIPE_A;
bool enabled;
u32 state, ctrl;
@@ -1689,7 +1390,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well,
bool enable)
{
- enum pipe pipe = power_well->id;
+ enum pipe pipe = PIPE_A;
u32 state;
u32 ctrl;
@@ -1722,7 +1423,7 @@ out:
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->id != PIPE_A);
+ WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
chv_set_pipe_power_well(dev_priv, power_well, true);
@@ -1732,7 +1433,7 @@ static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN_ON_ONCE(power_well->id != PIPE_A);
+ WARN_ON_ONCE(power_well->id != CHV_DISP_PW_PIPE_A);
vlv_display_power_well_deinit(dev_priv);
@@ -1848,37 +1549,13 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
intel_runtime_pm_put(dev_priv);
}
-#define HSW_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define BDW_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO) | \
+#define I830_PIPES_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define VLV_DISPLAY_POWER_DOMAINS ( \
@@ -1961,13 +1638,201 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_D) | \
BIT_ULL(POWER_DOMAIN_INIT))
-#define I830_PIPES_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+#define HSW_DISPLAY_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define BDW_DISPLAY_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
+#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
+#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
+#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
@@ -1997,6 +1862,7 @@ static struct i915_power_well i9xx_always_on_power_well[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
+ .id = I915_DISP_PW_ALWAYS_ON,
},
};
@@ -2013,11 +1879,13 @@ static struct i915_power_well i830_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
+ .id = I915_DISP_PW_ALWAYS_ON,
},
{
.name = "pipes",
.domains = I830_PIPES_POWER_DOMAINS,
.ops = &i830_pipes_power_well_ops,
+ .id = I830_DISP_PW_PIPES,
},
};
@@ -2028,13 +1896,6 @@ static const struct i915_power_well_ops hsw_power_well_ops = {
.is_enabled = hsw_power_well_enabled,
};
-static const struct i915_power_well_ops skl_power_well_ops = {
- .sync_hw = skl_power_well_sync_hw,
- .enable = skl_power_well_enable,
- .disable = skl_power_well_disable,
- .is_enabled = skl_power_well_enabled,
-};
-
static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = gen9_dc_off_power_well_enable,
@@ -2055,11 +1916,16 @@ static struct i915_power_well hsw_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
+ .id = I915_DISP_PW_ALWAYS_ON,
},
{
.name = "display",
.domains = HSW_DISPLAY_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
+ .id = HSW_DISP_PW_GLOBAL,
+ {
+ .hsw.has_vga = true,
+ },
},
};
@@ -2069,11 +1935,17 @@ static struct i915_power_well bdw_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
+ .id = I915_DISP_PW_ALWAYS_ON,
},
{
.name = "display",
.domains = BDW_DISPLAY_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
+ .id = HSW_DISP_PW_GLOBAL,
+ {
+ .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .hsw.has_vga = true,
+ },
},
};
@@ -2104,7 +1976,7 @@ static struct i915_power_well vlv_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .id = PUNIT_POWER_WELL_ALWAYS_ON,
+ .id = I915_DISP_PW_ALWAYS_ON,
},
{
.name = "display",
@@ -2162,6 +2034,7 @@ static struct i915_power_well chv_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
+ .id = I915_DISP_PW_ALWAYS_ON,
},
{
.name = "display",
@@ -2171,7 +2044,7 @@ static struct i915_power_well chv_power_wells[] = {
* required for any pipe to work.
*/
.domains = CHV_DISPLAY_POWER_DOMAINS,
- .id = PIPE_A,
+ .id = CHV_DISP_PW_PIPE_A,
.ops = &chv_pipe_power_well_ops,
},
{
@@ -2189,7 +2062,7 @@ static struct i915_power_well chv_power_wells[] = {
};
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
- int power_well_id)
+ enum i915_power_well_id power_well_id)
{
struct i915_power_well *power_well;
bool ret;
@@ -2206,20 +2079,23 @@ static struct i915_power_well skl_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
- .id = SKL_DISP_PW_ALWAYS_ON,
+ .id = I915_DISP_PW_ALWAYS_ON,
},
{
.name = "power well 1",
/* Handled by the DMC firmware */
.domains = 0,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
+ {
+ .hsw.has_fuses = true,
+ },
},
{
.name = "MISC IO power well",
/* Handled by the DMC firmware */
.domains = 0,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_MISC_IO,
},
{
@@ -2231,31 +2107,36 @@ static struct i915_power_well skl_power_wells[] = {
{
.name = "power well 2",
.domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_2,
+ {
+ .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
},
{
.name = "DDI A/E IO power well",
.domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_DDI_A_E,
},
{
.name = "DDI B IO power well",
.domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_DDI_B,
},
{
.name = "DDI C IO power well",
.domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_DDI_C,
},
{
.name = "DDI D IO power well",
.domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_DDI_D,
},
};
@@ -2266,12 +2147,16 @@ static struct i915_power_well bxt_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
+ .id = I915_DISP_PW_ALWAYS_ON,
},
{
.name = "power well 1",
.domains = 0,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
+ {
+ .hsw.has_fuses = true,
+ },
},
{
.name = "DC off",
@@ -2282,22 +2167,31 @@ static struct i915_power_well bxt_power_wells[] = {
{
.name = "power well 2",
.domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_2,
+ {
+ .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
},
{
.name = "dpio-common-a",
.domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
.id = BXT_DPIO_CMN_A,
- .data = DPIO_PHY1,
+ {
+ .bxt.phy = DPIO_PHY1,
+ },
},
{
.name = "dpio-common-bc",
.domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
.id = BXT_DPIO_CMN_BC,
- .data = DPIO_PHY0,
+ {
+ .bxt.phy = DPIO_PHY0,
+ },
},
};
@@ -2307,13 +2201,17 @@ static struct i915_power_well glk_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
+ .id = I915_DISP_PW_ALWAYS_ON,
},
{
.name = "power well 1",
/* Handled by the DMC firmware */
.domains = 0,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
+ {
+ .hsw.has_fuses = true,
+ },
},
{
.name = "DC off",
@@ -2324,64 +2222,75 @@ static struct i915_power_well glk_power_wells[] = {
{
.name = "power well 2",
.domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_2,
+ {
+ .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
},
{
.name = "dpio-common-a",
.domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
.id = BXT_DPIO_CMN_A,
- .data = DPIO_PHY1,
+ {
+ .bxt.phy = DPIO_PHY1,
+ },
},
{
.name = "dpio-common-b",
.domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
.id = BXT_DPIO_CMN_BC,
- .data = DPIO_PHY0,
+ {
+ .bxt.phy = DPIO_PHY0,
+ },
},
{
.name = "dpio-common-c",
.domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
.ops = &bxt_dpio_cmn_power_well_ops,
.id = GLK_DPIO_CMN_C,
- .data = DPIO_PHY2,
+ {
+ .bxt.phy = DPIO_PHY2,
+ },
},
{
.name = "AUX A",
.domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = GLK_DISP_PW_AUX_A,
},
{
.name = "AUX B",
.domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = GLK_DISP_PW_AUX_B,
},
{
.name = "AUX C",
.domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = GLK_DISP_PW_AUX_C,
},
{
.name = "DDI A IO power well",
.domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = GLK_DISP_PW_DDI_A,
},
{
.name = "DDI B IO power well",
.domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_DDI_B,
},
{
.name = "DDI C IO power well",
.domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_DDI_C,
},
};
@@ -2392,36 +2301,40 @@ static struct i915_power_well cnl_power_wells[] = {
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
+ .id = I915_DISP_PW_ALWAYS_ON,
},
{
.name = "power well 1",
/* Handled by the DMC firmware */
.domains = 0,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
+ {
+ .hsw.has_fuses = true,
+ },
},
{
.name = "AUX A",
.domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = CNL_DISP_PW_AUX_A,
},
{
.name = "AUX B",
.domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = CNL_DISP_PW_AUX_B,
},
{
.name = "AUX C",
.domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = CNL_DISP_PW_AUX_C,
},
{
.name = "AUX D",
.domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = CNL_DISP_PW_AUX_D,
},
{
@@ -2433,31 +2346,36 @@ static struct i915_power_well cnl_power_wells[] = {
{
.name = "power well 2",
.domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_2,
+ {
+ .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
},
{
.name = "DDI A IO power well",
.domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = CNL_DISP_PW_DDI_A,
},
{
.name = "DDI B IO power well",
.domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_DDI_B,
},
{
.name = "DDI C IO power well",
.domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_DDI_C,
},
{
.name = "DDI D IO power well",
.domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
- .ops = &skl_power_well_ops,
+ .ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_DDI_D,
},
};
@@ -2479,7 +2397,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc;
int max_dc;
- if (IS_GEN9_BC(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
max_dc = 2;
mask = 0;
} else if (IS_GEN9_LP(dev_priv)) {
@@ -2521,6 +2439,22 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
return mask;
}
+static void assert_power_well_ids_unique(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ u64 power_well_ids;
+ int i;
+
+ power_well_ids = 0;
+ for (i = 0; i < power_domains->power_well_count; i++) {
+ enum i915_power_well_id id = power_domains->power_wells[i].id;
+
+ WARN_ON(id >= sizeof(power_well_ids) * 8);
+ WARN_ON(power_well_ids & BIT_ULL(id));
+ power_well_ids |= BIT_ULL(id);
+ }
+}
+
#define set_power_wells(power_domains, __power_wells) ({ \
(power_domains)->power_wells = (__power_wells); \
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
@@ -2572,6 +2506,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
set_power_wells(power_domains, i9xx_always_on_power_well);
}
+ assert_power_well_ids_unique(dev_priv);
+
return 0;
}
@@ -2694,13 +2630,18 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
mutex_lock(&power_domains->lock);
- well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
- intel_power_well_disable(dev_priv, well);
-
+ /*
+ * BSpec says to keep the MISC IO power well enabled here, only
+ * remove our request for power well 1.
+ * Note that even though the driver's request is removed power well 1
+ * may stay enabled after this due to DMC's own request on it.
+ */
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
+
+ usleep_range(10, 30); /* 10 us delay per Bspec */
}
void bxt_display_core_init(struct drm_i915_private *dev_priv,
@@ -2751,13 +2692,19 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
/* The spec doesn't call for removing the reset handshake flag */
- /* Disable PG1 */
+ /*
+ * Disable PW1 (PG1).
+ * Note that even though the driver's request is removed power well 1
+ * may stay enabled after this due to DMC's own request on it.
+ */
mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
+
+ usleep_range(10, 30); /* 10 us delay per Bspec */
}
#define CNL_PROCMON_IDX(val) \
@@ -2796,7 +2743,7 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
/* 2. Enable Comp */
val = I915_READ(CHICKEN_MISC_2);
- val &= ~COMP_PWR_DOWN;
+ val &= ~CNL_COMP_PWR_DOWN;
I915_WRITE(CHICKEN_MISC_2, val);
val = I915_READ(CNL_PORT_COMP_DW3);
@@ -2821,7 +2768,10 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
val |= CL_POWER_DOWN_ENABLE;
I915_WRITE(CNL_PORT_CL1CM_DW5, val);
- /* 4. Enable Power Well 1 (PG1) and Aux IO Power */
+ /*
+ * 4. Enable Power Well 1 (PG1).
+ * The AUX IO power wells will be enabled on demand.
+ */
mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_enable(dev_priv, well);
@@ -2853,15 +2803,21 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
/* 3. Disable CD clock */
cnl_uninit_cdclk(dev_priv);
- /* 4. Disable Power Well 1 (PG1) and Aux IO Power */
+ /*
+ * 4. Disable Power Well 1 (PG1).
+ * The AUX IO power wells are toggled on demand, so they are already
+ * disabled at this point.
+ */
mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
+ usleep_range(10, 30); /* 10 us delay per Bspec */
+
/* 5. Disable Comp */
val = I915_READ(CHICKEN_MISC_2);
- val |= COMP_PWR_DOWN;
+ val |= CNL_COMP_PWR_DOWN;
I915_WRITE(CHICKEN_MISC_2, val);
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3f8f30b412cd..3dc38c2ef4c3 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -451,23 +451,24 @@ static const char * const cmd_status_names[] = {
"Scaling not supported"
};
-static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
- const void *args, int args_len)
+static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len,
+ bool unlocked)
{
u8 *buf, status;
struct i2c_msg *msgs;
int i, ret = true;
- /* Would be simpler to allocate both in one go ? */
+ /* Would be simpler to allocate both in one go ? */
buf = kzalloc(args_len * 2 + 2, GFP_KERNEL);
if (!buf)
return false;
msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
if (!msgs) {
- kfree(buf);
+ kfree(buf);
return false;
- }
+ }
intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
@@ -498,7 +499,10 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
msgs[i+2].len = 1;
msgs[i+2].buf = &status;
- ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
+ if (unlocked)
+ ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
+ else
+ ret = __i2c_transfer(intel_sdvo->i2c, msgs, i+3);
if (ret < 0) {
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
ret = false;
@@ -516,6 +520,12 @@ out:
return ret;
}
+static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ return __intel_sdvo_write_cmd(intel_sdvo, cmd, args, args_len, true);
+}
+
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
@@ -602,13 +612,13 @@ static int intel_sdvo_get_pixel_multiplier(const struct drm_display_mode *adjust
return 4;
}
-static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
- u8 ddc_bus)
+static bool __intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
+ u8 ddc_bus)
{
/* This must be the immediately preceding write before the i2c xfer */
- return intel_sdvo_write_cmd(intel_sdvo,
- SDVO_CMD_SET_CONTROL_BUS_SWITCH,
- &ddc_bus, 1);
+ return __intel_sdvo_write_cmd(intel_sdvo,
+ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &ddc_bus, 1, false);
}
static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
@@ -996,7 +1006,8 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
ssize_t len;
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- &pipe_config->base.adjusted_mode);
+ &pipe_config->base.adjusted_mode,
+ false);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return false;
@@ -1343,13 +1354,15 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (INTEL_PCH_TYPE(dev_priv) >= PCH_CPT)
+ if (HAS_PCH_CPT(dev_priv))
sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
- if (crtc_state->has_audio)
+ if (crtc_state->has_audio) {
+ WARN_ON_ONCE(INTEL_GEN(dev_priv) < 4);
sdvox |= SDVO_AUDIO_ENABLE;
+ }
if (INTEL_GEN(dev_priv) >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
@@ -1479,6 +1492,9 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
if (sdvox & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
+ if (sdvox & SDVO_AUDIO_ENABLE)
+ pipe_config->has_audio = true;
+
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
&val, 1)) {
if (val == SDVO_ENCODE_HDMI)
@@ -2192,10 +2208,8 @@ intel_sdvo_connector_duplicate_state(struct drm_connector *connector)
}
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = drm_atomic_helper_connector_set_property,
.atomic_get_property = intel_sdvo_connector_atomic_get_property,
.atomic_set_property = intel_sdvo_connector_atomic_set_property,
.late_register = intel_sdvo_connector_register,
@@ -2454,6 +2468,7 @@ static bool
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
struct drm_encoder *encoder = &intel_sdvo->base.base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct drm_connector *connector;
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
struct intel_connector *intel_connector;
@@ -2489,7 +2504,9 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
- if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
+ /* gen3 doesn't do the hdmi bits in the SDVO register */
+ if (INTEL_GEN(dev_priv) >= 4 &&
+ intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo->is_hdmi = true;
}
@@ -2925,7 +2942,7 @@ static int intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
{
struct intel_sdvo *sdvo = adapter->algo_data;
- if (!intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+ if (!__intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
return -EIO;
return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
@@ -2942,6 +2959,33 @@ static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
.functionality = intel_sdvo_ddc_proxy_func
};
+static void proxy_lock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+ sdvo->i2c->lock_ops->lock_bus(sdvo->i2c, flags);
+}
+
+static int proxy_trylock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+ return sdvo->i2c->lock_ops->trylock_bus(sdvo->i2c, flags);
+}
+
+static void proxy_unlock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ struct intel_sdvo *sdvo = adapter->algo_data;
+ sdvo->i2c->lock_ops->unlock_bus(sdvo->i2c, flags);
+}
+
+const struct i2c_lock_operations proxy_lock_ops = {
+ .lock_bus = proxy_lock_bus,
+ .trylock_bus = proxy_trylock_bus,
+ .unlock_bus = proxy_unlock_bus,
+};
+
static bool
intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
struct drm_i915_private *dev_priv)
@@ -2954,6 +2998,7 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
sdvo->ddc.dev.parent = &pdev->dev;
sdvo->ddc.algo_data = sdvo;
sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
+ sdvo->ddc.lock_ops = &proxy_lock_ops;
return i2c_add_adapter(&sdvo->ddc) == 0;
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 0c650c2cbca8..524933b01483 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -30,6 +30,7 @@
* support.
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
@@ -176,7 +177,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
* re-enables interrupts and verifies the update was actually completed
* before a vblank using the value of @start_vbl_count.
*/
-void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work)
+void intel_pipe_update_end(struct intel_crtc *crtc)
{
enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc);
@@ -184,12 +185,6 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
ktime_t end_vbl_time = ktime_get();
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (work) {
- work->flip_queued_vblank = end_vbl_count;
- smp_mb__before_atomic();
- atomic_set(&work->pending, 1);
- }
-
trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
/* We're still in the vblank-evade critical section, this can't race.
@@ -244,6 +239,7 @@ skl_update_plane(struct intel_plane *plane,
u32 surf_addr = plane_state->main.offset;
unsigned int rotation = plane_state->base.rotation;
u32 stride = skl_plane_stride(fb, 0, rotation);
+ u32 aux_stride = skl_plane_stride(fb, 1, rotation);
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
@@ -262,7 +258,7 @@ skl_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- if (IS_GEMINILAKE(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
PLANE_COLOR_PIPE_GAMMA_ENABLE |
PLANE_COLOR_PIPE_CSC_ENABLE |
@@ -278,6 +274,10 @@ skl_update_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
+ I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
+ (plane_state->aux.offset - surf_addr) | aux_stride);
+ I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
+ (plane_state->aux.y << 16) | plane_state->aux.x);
/* program plane scaler */
if (plane_state->scaler_id >= 0) {
@@ -1038,6 +1038,12 @@ static const uint32_t g4x_plane_formats[] = {
DRM_FORMAT_VYUY,
};
+static const uint64_t i9xx_plane_format_modifiers[] = {
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static const uint32_t snb_plane_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
@@ -1073,6 +1079,122 @@ static uint32_t skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
+static const uint64_t skl_plane_format_modifiers[] = {
+ I915_FORMAT_MOD_X_TILED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static bool g4x_sprite_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ switch (format) {
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED)
+ return true;
+ /* fall through */
+ default:
+ return false;
+ }
+}
+
+static bool vlv_sprite_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED)
+ return true;
+ /* fall through */
+ default:
+ return false;
+ }
+}
+
+static bool skl_sprite_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ /* This is the same as primary plane since SKL has universal planes */
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ if (modifier == I915_FORMAT_MOD_Yf_TILED)
+ return true;
+ /* fall through */
+ case DRM_FORMAT_C8:
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == I915_FORMAT_MOD_X_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED)
+ return true;
+ /* fall through */
+ default:
+ return false;
+ }
+}
+
+static bool intel_sprite_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
+
+ if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
+ return false;
+
+ if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
+ modifier != DRM_FORMAT_MOD_LINEAR)
+ return false;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ return skl_sprite_plane_format_mod_supported(plane, format, modifier);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return vlv_sprite_plane_format_mod_supported(plane, format, modifier);
+ else
+ return g4x_sprite_plane_format_mod_supported(plane, format, modifier);
+
+ unreachable();
+}
+
+static const struct drm_plane_funcs intel_sprite_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+ .format_mod_supported = intel_sprite_plane_format_mod_supported,
+};
+
struct intel_plane *
intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane)
@@ -1081,6 +1203,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
struct intel_plane_state *state = NULL;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
+ const uint64_t *modifiers;
unsigned int supported_rotations;
int num_plane_formats;
int ret;
@@ -1098,7 +1221,17 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
}
intel_plane->base.state = &state->base;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 10) {
+ intel_plane->can_scale = true;
+ state->scaler_id = -1;
+
+ intel_plane->update_plane = skl_update_plane;
+ intel_plane->disable_plane = skl_disable_plane;
+
+ plane_formats = skl_plane_formats;
+ num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+ modifiers = skl_plane_format_modifiers;
+ } else if (INTEL_GEN(dev_priv) >= 9) {
intel_plane->can_scale = true;
state->scaler_id = -1;
@@ -1107,6 +1240,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
+ modifiers = skl_plane_format_modifiers;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
@@ -1116,6 +1250,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_formats = vlv_plane_formats;
num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+ modifiers = i9xx_plane_format_modifiers;
} else if (INTEL_GEN(dev_priv) >= 7) {
if (IS_IVYBRIDGE(dev_priv)) {
intel_plane->can_scale = true;
@@ -1130,6 +1265,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ modifiers = i9xx_plane_format_modifiers;
} else {
intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
@@ -1137,6 +1273,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->update_plane = g4x_update_plane;
intel_plane->disable_plane = g4x_disable_plane;
+ modifiers = i9xx_plane_format_modifiers;
if (IS_GEN6(dev_priv)) {
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
@@ -1169,14 +1306,16 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 9)
ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, &intel_plane_funcs,
+ possible_crtcs, &intel_sprite_plane_funcs,
plane_formats, num_plane_formats,
+ modifiers,
DRM_PLANE_TYPE_OVERLAY,
"plane %d%c", plane + 2, pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, &intel_plane_funcs,
+ possible_crtcs, &intel_sprite_plane_funcs,
plane_formats, num_plane_formats,
+ modifiers,
DRM_PLANE_TYPE_OVERLAY,
"sprite %c", sprite_name(pipe, plane));
if (ret)
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 784df024e230..906893c006d8 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1407,11 +1407,9 @@ intel_tv_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs intel_tv_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_tv_destroy,
- .set_property = drm_atomic_helper_connector_set_property,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 27e072cc96eb..0178ba42a0e5 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -94,7 +94,7 @@ void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
}
-static void guc_write_irq_trigger(struct intel_guc *guc)
+static void gen8_guc_raise_irq(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -109,7 +109,7 @@ void intel_uc_init_early(struct drm_i915_private *dev_priv)
mutex_init(&guc->send_mutex);
guc->send = intel_guc_send_nop;
- guc->notify = guc_write_irq_trigger;
+ guc->notify = gen8_guc_raise_irq;
}
static void fetch_uc_fw(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 9882724bc2b6..1d7b879cc68c 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -643,7 +643,7 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
{ .start = (s), .end = (e), .domains = (d) }
#define HAS_FWTABLE(dev_priv) \
- (IS_GEN9(dev_priv) || \
+ (INTEL_GEN(dev_priv) >= 9 || \
IS_CHERRYVIEW(dev_priv) || \
IS_VALLEYVIEW(dev_priv))
@@ -1072,7 +1072,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
}
- if (IS_GEN9(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
@@ -1497,7 +1497,6 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
[VECS] = GEN6_GRDOM_VECS,
};
u32 hw_mask;
- int ret;
if (engine_mask == ALL_ENGINES) {
hw_mask = GEN6_GRDOM_FULL;
@@ -1509,11 +1508,7 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
hw_mask |= hw_engine_mask[engine->id];
}
- ret = gen6_hw_domain_reset(dev_priv, hw_mask);
-
- intel_uncore_forcewake_reset(dev_priv, true);
-
- return ret;
+ return gen6_hw_domain_reset(dev_priv, hw_mask);
}
/**
@@ -1719,6 +1714,17 @@ bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
return intel_get_gpu_reset(dev_priv) != NULL;
}
+/*
+ * When GuC submission is enabled, GuC manages ELSP and can initiate the
+ * engine reset too. For now, fall back to full GPU reset if it is enabled.
+ */
+bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
+{
+ return (dev_priv->info.has_reset_engine &&
+ !dev_priv->guc.execbuf_client &&
+ i915.reset >= 2);
+}
+
int intel_guc_reset(struct drm_i915_private *dev_priv)
{
int ret;
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
index caf76af36aba..c5c7e8efbdd3 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -111,6 +111,7 @@ huge_gem_object(struct drm_i915_private *i915,
dma_addr_t dma_size)
{
struct drm_i915_gem_object *obj;
+ unsigned int cache_level;
GEM_BUG_ON(!phys_size || phys_size > dma_size);
GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE));
@@ -128,9 +129,8 @@ huge_gem_object(struct drm_i915_private *i915,
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
- obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
- obj->cache_coherent = i915_gem_object_is_coherent(obj);
- obj->cache_dirty = !obj->cache_coherent;
+ cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ i915_gem_object_set_cache_coherency(obj, cache_level);
obj->scratch = phys_size;
return obj;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index 95d4aebc0181..35d778d70626 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -241,7 +241,7 @@ static bool always_valid(struct drm_i915_private *i915)
static bool needs_mi_store_dword(struct drm_i915_private *i915)
{
- return igt_can_mi_store_dword_imm(i915);
+ return intel_engine_can_store_dword(i915->engine[RCS]);
}
static const struct igt_coherency_mode {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 12b85b3278cd..fb0a58fc8348 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -38,8 +38,6 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
u32 *cmd;
int err;
- GEM_BUG_ON(!igt_can_mi_store_dword_imm(vma->vm->i915));
-
size = (4 * count + 1) * sizeof(u32);
size = round_up(size, PAGE_SIZE);
obj = i915_gem_object_create_internal(vma->vm->i915, size);
@@ -123,6 +121,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
int err;
GEM_BUG_ON(obj->base.size > vm->total);
+ GEM_BUG_ON(!intel_engine_can_store_dword(engine));
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma))
@@ -359,6 +358,9 @@ static int igt_ctx_exec(void *arg)
}
for_each_engine(engine, i915, id) {
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
if (!obj) {
obj = create_test_object(ctx, file, &objects);
if (IS_ERR(obj)) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 50710e3f1caa..6b132caffa18 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -197,6 +197,9 @@ static int lowlevel_hole(struct drm_i915_private *i915,
{
I915_RND_STATE(seed_prng);
unsigned int size;
+ struct i915_vma mock_vma;
+
+ memset(&mock_vma, 0, sizeof(struct i915_vma));
/* Keep creating larger objects until one cannot fit into the hole */
for (size = 12; (hole_end - hole_start) >> size; size++) {
@@ -255,8 +258,11 @@ static int lowlevel_hole(struct drm_i915_private *i915,
vm->allocate_va_range(vm, addr, BIT_ULL(size)))
break;
- vm->insert_entries(vm, obj->mm.pages, addr,
- I915_CACHE_NONE, 0);
+ mock_vma.pages = obj->mm.pages;
+ mock_vma.node.size = BIT_ULL(size);
+ mock_vma.node.start = addr;
+
+ vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
}
count = n;
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index fb9072d5877f..2e86ec136b35 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -186,16 +186,20 @@ static int igt_vma_create(void *arg)
goto end;
}
- list_for_each_entry_safe(ctx, cn, &contexts, link)
+ list_for_each_entry_safe(ctx, cn, &contexts, link) {
+ list_del_init(&ctx->link);
mock_context_close(ctx);
+ }
}
end:
/* Final pass to lookup all created contexts */
err = create_vmas(i915, &objects, &contexts);
out:
- list_for_each_entry_safe(ctx, cn, &contexts, link)
+ list_for_each_entry_safe(ctx, cn, &contexts, link) {
+ list_del_init(&ctx->link);
mock_context_close(ctx);
+ }
list_for_each_entry_safe(obj, on, &objects, st_link)
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index aa31d6c0cdfb..02e52a146ed8 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -22,8 +22,13 @@
*
*/
+#include <linux/kthread.h>
+
#include "../i915_selftest.h"
+#include "mock_context.h"
+#include "mock_drm.h"
+
struct hang {
struct drm_i915_private *i915;
struct drm_i915_gem_object *hws;
@@ -248,9 +253,6 @@ static int igt_hang_sanitycheck(void *arg)
/* Basic check that we can execute our hanging batch */
- if (!igt_can_mi_store_dword_imm(i915))
- return 0;
-
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
if (err)
@@ -259,6 +261,9 @@ static int igt_hang_sanitycheck(void *arg)
for_each_engine(engine, i915, id) {
long timeout;
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
rq = hang_create_request(&h, engine, i915->kernel_context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
@@ -292,6 +297,37 @@ unlock:
return err;
}
+static void global_reset_lock(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
+ wait_event(i915->gpu_error.reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &i915->gpu_error.flags));
+
+ for_each_engine(engine, i915, id) {
+ while (test_and_set_bit(I915_RESET_ENGINE + id,
+ &i915->gpu_error.flags))
+ wait_on_bit(&i915->gpu_error.flags,
+ I915_RESET_ENGINE + id,
+ TASK_UNINTERRUPTIBLE);
+ }
+}
+
+static void global_reset_unlock(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ wake_up_all(&i915->gpu_error.reset_queue);
+}
+
static int igt_global_reset(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -300,13 +336,13 @@ static int igt_global_reset(void *arg)
/* Check that we can issue a global GPU reset */
- set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ global_reset_lock(i915);
set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
mutex_lock(&i915->drm.struct_mutex);
reset_count = i915_reset_count(&i915->gpu_error);
- i915_reset(i915);
+ i915_reset(i915, I915_RESET_QUIET);
if (i915_reset_count(&i915->gpu_error) == reset_count) {
pr_err("No GPU reset recorded!\n");
@@ -315,7 +351,214 @@ static int igt_global_reset(void *arg)
mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
- clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ global_reset_unlock(i915);
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ err = -EIO;
+
+ return err;
+}
+
+static int igt_reset_engine(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int reset_count, reset_engine_count;
+ int err = 0;
+
+ /* Check that we can issue a global GPU and engine reset */
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ for_each_engine(engine, i915, id) {
+ set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags);
+ reset_count = i915_reset_count(&i915->gpu_error);
+ reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
+ engine);
+
+ err = i915_reset_engine(engine, I915_RESET_QUIET);
+ if (err) {
+ pr_err("i915_reset_engine failed\n");
+ break;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ pr_err("Full GPU reset recorded! (engine reset expected)\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (i915_reset_engine_count(&i915->gpu_error, engine) ==
+ reset_engine_count) {
+ pr_err("No %s engine reset recorded!\n", engine->name);
+ err = -EINVAL;
+ break;
+ }
+
+ clear_bit(I915_RESET_ENGINE + engine->id,
+ &i915->gpu_error.flags);
+ }
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ err = -EIO;
+
+ return err;
+}
+
+static int active_engine(void *data)
+{
+ struct intel_engine_cs *engine = data;
+ struct drm_i915_gem_request *rq[2] = {};
+ struct i915_gem_context *ctx[2];
+ struct drm_file *file;
+ unsigned long count = 0;
+ int err = 0;
+
+ file = mock_file(engine->i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&engine->i915->drm.struct_mutex);
+ ctx[0] = live_context(engine->i915, file);
+ mutex_unlock(&engine->i915->drm.struct_mutex);
+ if (IS_ERR(ctx[0])) {
+ err = PTR_ERR(ctx[0]);
+ goto err_file;
+ }
+
+ mutex_lock(&engine->i915->drm.struct_mutex);
+ ctx[1] = live_context(engine->i915, file);
+ mutex_unlock(&engine->i915->drm.struct_mutex);
+ if (IS_ERR(ctx[1])) {
+ err = PTR_ERR(ctx[1]);
+ i915_gem_context_put(ctx[0]);
+ goto err_file;
+ }
+
+ while (!kthread_should_stop()) {
+ unsigned int idx = count++ & 1;
+ struct drm_i915_gem_request *old = rq[idx];
+ struct drm_i915_gem_request *new;
+
+ mutex_lock(&engine->i915->drm.struct_mutex);
+ new = i915_gem_request_alloc(engine, ctx[idx]);
+ if (IS_ERR(new)) {
+ mutex_unlock(&engine->i915->drm.struct_mutex);
+ err = PTR_ERR(new);
+ break;
+ }
+
+ rq[idx] = i915_gem_request_get(new);
+ i915_add_request(new);
+ mutex_unlock(&engine->i915->drm.struct_mutex);
+
+ if (old) {
+ i915_wait_request(old, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_gem_request_put(old);
+ }
+ }
+
+ for (count = 0; count < ARRAY_SIZE(rq); count++)
+ i915_gem_request_put(rq[count]);
+
+err_file:
+ mock_file_free(engine->i915, file);
+ return err;
+}
+
+static int igt_reset_active_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine, *active;
+ enum intel_engine_id id, tmp;
+ int err = 0;
+
+ /* Check that issuing a reset on one engine does not interfere
+ * with any other engine.
+ */
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ for_each_engine(engine, i915, id) {
+ struct task_struct *threads[I915_NUM_ENGINES];
+ unsigned long resets[I915_NUM_ENGINES];
+ unsigned long global = i915_reset_count(&i915->gpu_error);
+ IGT_TIMEOUT(end_time);
+
+ memset(threads, 0, sizeof(threads));
+ for_each_engine(active, i915, tmp) {
+ struct task_struct *tsk;
+
+ if (active == engine)
+ continue;
+
+ resets[tmp] = i915_reset_engine_count(&i915->gpu_error,
+ active);
+
+ tsk = kthread_run(active_engine, active,
+ "igt/%s", active->name);
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ goto unwind;
+ }
+
+ threads[tmp] = tsk;
+ get_task_struct(tsk);
+ }
+
+ set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags);
+ do {
+ err = i915_reset_engine(engine, I915_RESET_QUIET);
+ if (err) {
+ pr_err("i915_reset_engine(%s) failed, err=%d\n",
+ engine->name, err);
+ break;
+ }
+ } while (time_before(jiffies, end_time));
+ clear_bit(I915_RESET_ENGINE + engine->id,
+ &i915->gpu_error.flags);
+
+unwind:
+ for_each_engine(active, i915, tmp) {
+ int ret;
+
+ if (!threads[tmp])
+ continue;
+
+ ret = kthread_stop(threads[tmp]);
+ if (ret) {
+ pr_err("kthread for active engine %s failed, err=%d\n",
+ active->name, ret);
+ if (!err)
+ err = ret;
+ }
+ put_task_struct(threads[tmp]);
+
+ if (resets[tmp] != i915_reset_engine_count(&i915->gpu_error,
+ active)) {
+ pr_err("Innocent engine %s was reset (count=%ld)\n",
+ active->name,
+ i915_reset_engine_count(&i915->gpu_error,
+ active) - resets[tmp]);
+ err = -EIO;
+ }
+ }
+
+ if (global != i915_reset_count(&i915->gpu_error)) {
+ pr_err("Global reset (count=%ld)!\n",
+ i915_reset_count(&i915->gpu_error) - global);
+ err = -EIO;
+ }
+
+ if (err)
+ break;
+
+ cond_resched();
+ }
+
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
@@ -356,9 +599,12 @@ static int igt_wait_reset(void *arg)
long timeout;
int err;
+ if (!intel_engine_can_store_dword(i915->engine[RCS]))
+ return 0;
+
/* Check that we detect a stuck waiter and issue a reset */
- set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -403,7 +649,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
@@ -421,10 +667,8 @@ static int igt_reset_queue(void *arg)
/* Check that we replay pending requests following a hang */
- if (!igt_can_mi_store_dword_imm(i915))
- return 0;
+ global_reset_lock(i915);
- set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
if (err)
@@ -435,6 +679,9 @@ static int igt_reset_queue(void *arg)
IGT_TIMEOUT(end_time);
unsigned int count;
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
prev = hang_create_request(&h, engine, i915->kernel_context);
if (IS_ERR(prev)) {
err = PTR_ERR(prev);
@@ -471,7 +718,7 @@ static int igt_reset_queue(void *arg)
reset_count = fake_hangcheck(prev);
- i915_reset(i915);
+ i915_reset(i915, I915_RESET_QUIET);
GEM_BUG_ON(test_bit(I915_RESET_HANDOFF,
&i915->gpu_error.flags));
@@ -518,7 +765,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
@@ -526,13 +773,83 @@ unlock:
return err;
}
+static int igt_handle_error(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine = i915->engine[RCS];
+ struct hang h;
+ struct drm_i915_gem_request *rq;
+ struct i915_gpu_state *error;
+ int err;
+
+ /* Check that we can issue a global GPU and engine reset */
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ if (!intel_engine_can_store_dword(i915->engine[RCS]))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ err = hang_init(&h, i915);
+ if (err)
+ goto err_unlock;
+
+ rq = hang_create_request(&h, engine, i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_fini;
+ }
+
+ i915_gem_request_get(rq);
+ __i915_add_request(rq, true);
+
+ if (!wait_for_hang(&h, rq)) {
+ pr_err("Failed to start request %x\n", rq->fence.seqno);
+ err = -EIO;
+ goto err_request;
+ }
+
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ /* Temporarily disable error capture */
+ error = xchg(&i915->gpu_error.first_error, (void *)-1);
+
+ engine->hangcheck.stalled = true;
+ engine->hangcheck.seqno = intel_engine_get_seqno(engine);
+
+ i915_handle_error(i915, intel_engine_flag(engine), "%s", __func__);
+
+ xchg(&i915->gpu_error.first_error, error);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ if (rq->fence.error != -EIO) {
+ pr_err("Guilty request not identified!\n");
+ err = -EINVAL;
+ goto err_request;
+ }
+
+err_request:
+ i915_gem_request_put(rq);
+err_fini:
+ hang_fini(&h);
+err_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_hang_sanitycheck),
SUBTEST(igt_global_reset),
+ SUBTEST(igt_reset_engine),
+ SUBTEST(igt_reset_active_engines),
SUBTEST(igt_wait_reset),
SUBTEST(igt_reset_queue),
+ SUBTEST(igt_handle_error),
};
if (!intel_has_gpu_reset(i915))
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index f8b9cc212b02..098ce643ad07 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -40,18 +40,13 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
- ctx->vma_lut.ht_bits = VMA_HT_BITS;
- ctx->vma_lut.ht_size = BIT(VMA_HT_BITS);
- ctx->vma_lut.ht = kcalloc(ctx->vma_lut.ht_size,
- sizeof(*ctx->vma_lut.ht),
- GFP_KERNEL);
- if (!ctx->vma_lut.ht)
- goto err_free;
-
- ret = ida_simple_get(&i915->context_hw_ida,
+ INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
+ INIT_LIST_HEAD(&ctx->handles_list);
+
+ ret = ida_simple_get(&i915->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0)
- goto err_vma_ht;
+ goto err_handles;
ctx->hw_id = ret;
if (name) {
@@ -66,9 +61,7 @@ mock_context(struct drm_i915_private *i915,
return ctx;
-err_vma_ht:
- kvfree(ctx->vma_lut.ht);
-err_free:
+err_handles:
kfree(ctx);
return NULL;
@@ -86,3 +79,20 @@ void mock_context_close(struct i915_gem_context *ctx)
i915_gem_context_put(ctx);
}
+
+void mock_init_contexts(struct drm_i915_private *i915)
+{
+ INIT_LIST_HEAD(&i915->contexts.list);
+ ida_init(&i915->contexts.hw_ida);
+
+ INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
+ init_llist_head(&i915->contexts.free_list);
+}
+
+struct i915_gem_context *
+live_context(struct drm_i915_private *i915, struct drm_file *file)
+{
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ return i915_gem_create_context(i915, file->driver_priv);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.h b/drivers/gpu/drm/i915/selftests/mock_context.h
index 2427e5c0916a..2f432c03d413 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.h
+++ b/drivers/gpu/drm/i915/selftests/mock_context.h
@@ -25,10 +25,15 @@
#ifndef __MOCK_CONTEXT_H
#define __MOCK_CONTEXT_H
+void mock_init_contexts(struct drm_i915_private *i915);
+
struct i915_gem_context *
mock_context(struct drm_i915_private *i915,
const char *name);
void mock_context_close(struct i915_gem_context *ctx);
+struct i915_gem_context *
+live_context(struct drm_i915_private *i915, struct drm_file *file);
+
#endif /* !__MOCK_CONTEXT_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 5b18a2dc19a8..fc0fd7498689 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -123,10 +123,12 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
}
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
- const char *name)
+ const char *name,
+ int id)
{
struct mock_engine *engine;
- static int id;
+
+ GEM_BUG_ON(id >= I915_NUM_ENGINES);
engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
if (!engine)
@@ -141,7 +143,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
/* minimal engine setup for requests */
engine->base.i915 = i915;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
- engine->base.id = id++;
+ engine->base.id = id;
engine->base.status_page.page_addr = (void *)(engine + 1);
engine->base.context_pin = mock_context_pin;
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.h b/drivers/gpu/drm/i915/selftests/mock_engine.h
index e5e240216ba3..133d0c21790d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.h
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.h
@@ -40,7 +40,8 @@ struct mock_engine {
};
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
- const char *name);
+ const char *name,
+ int id);
void mock_engine_flush(struct intel_engine_cs *engine);
void mock_engine_reset(struct intel_engine_cs *engine);
void mock_engine_free(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 627e2aa09766..678723430d78 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -22,6 +22,7 @@
*
*/
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include "mock_engine.h"
@@ -53,15 +54,17 @@ static void mock_device_release(struct drm_device *dev)
mutex_lock(&i915->drm.struct_mutex);
mock_device_flush(i915);
+ i915_gem_contexts_lost(i915);
mutex_unlock(&i915->drm.struct_mutex);
cancel_delayed_work_sync(&i915->gt.retire_work);
cancel_delayed_work_sync(&i915->gt.idle_work);
+ i915_gem_drain_workqueue(i915);
mutex_lock(&i915->drm.struct_mutex);
for_each_engine(engine, i915, id)
mock_engine_free(engine);
- i915_gem_context_fini(i915);
+ i915_gem_contexts_fini(i915);
mutex_unlock(&i915->drm.struct_mutex);
drain_workqueue(i915->wq);
@@ -108,6 +111,23 @@ static void mock_idle_work_handler(struct work_struct *work)
{
}
+static int pm_domain_resume(struct device *dev)
+{
+ return pm_generic_runtime_resume(dev);
+}
+
+static int pm_domain_suspend(struct device *dev)
+{
+ return pm_generic_runtime_suspend(dev);
+}
+
+static struct dev_pm_domain pm_domain = {
+ .ops = {
+ .runtime_suspend = pm_domain_suspend,
+ .runtime_resume = pm_domain_resume,
+ },
+};
+
struct drm_i915_private *mock_gem_device(void)
{
struct drm_i915_private *i915;
@@ -126,8 +146,10 @@ struct drm_i915_private *mock_gem_device(void)
dev_set_name(&pdev->dev, "mock");
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ dev_pm_domain_set(&pdev->dev, &pm_domain);
+ pm_runtime_enable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
+ WARN_ON(pm_runtime_get_sync(&pdev->dev));
i915 = (struct drm_i915_private *)(pdev + 1);
pci_set_drvdata(pdev, i915);
@@ -160,7 +182,7 @@ struct drm_i915_private *mock_gem_device(void)
INIT_LIST_HEAD(&i915->mm.unbound_list);
INIT_LIST_HEAD(&i915->mm.bound_list);
- ida_init(&i915->context_hw_ida);
+ mock_init_contexts(i915);
INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler);
INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler);
@@ -204,9 +226,9 @@ struct drm_i915_private *mock_gem_device(void)
mutex_unlock(&i915->drm.struct_mutex);
mkwrite_device_info(i915)->ring_mask = BIT(0);
- i915->engine[RCS] = mock_engine(i915, "mock");
+ i915->engine[RCS] = mock_engine(i915, "mock", RCS);
if (!i915->engine[RCS])
- goto err_dependencies;
+ goto err_priorities;
i915->kernel_context = mock_context(i915, NULL);
if (!i915->kernel_context)
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index a61309c7cb3e..f2118cf535a0 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -33,8 +33,7 @@ static void mock_insert_page(struct i915_address_space *vm,
}
static void mock_insert_entries(struct i915_address_space *vm,
- struct sg_table *st,
- u64 start,
+ struct i915_vma *vma,
enum i915_cache_level level, u32 flags)
{
}
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 95e2181963d9..f91cb72d0830 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -115,7 +115,7 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_plane *plane;
- struct drm_plane_state *old_plane_state;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
bool plane_disabling = false;
int i;
@@ -127,15 +127,15 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_enables(dev, state);
- for_each_plane_in_state(state, plane, old_plane_state, i) {
- if (drm_atomic_plane_disabling(old_plane_state, plane->state))
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ if (drm_atomic_plane_disabling(old_plane_state, new_plane_state))
plane_disabling = true;
}
if (plane_disabling) {
drm_atomic_helper_wait_for_vblanks(dev, state);
- for_each_plane_in_state(state, plane, old_plane_state, i)
+ for_each_old_plane_in_state(state, plane, old_plane_state, i)
ipu_plane_disable_deferred(plane);
}
@@ -182,8 +182,6 @@ static struct drm_driver imx_drm_driver = {
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 8b05ecb8fdef..56dd7a9a8e25 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -389,7 +389,6 @@ static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder,
static const struct drm_connector_funcs imx_ldb_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = imx_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 4826bb781723..bc27c2699464 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -341,7 +341,6 @@ static int imx_tve_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_connector_funcs imx_tve_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = imx_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 5456c15d962c..53e0b24beda6 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -50,7 +50,8 @@ static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc)
return container_of(crtc, struct ipu_crtc, base);
}
-static void ipu_crtc_enable(struct drm_crtc *crtc)
+static void ipu_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
@@ -293,7 +294,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
.atomic_check = ipu_crtc_atomic_check,
.atomic_begin = ipu_crtc_atomic_begin,
.atomic_disable = ipu_crtc_atomic_disable,
- .enable = ipu_crtc_enable,
+ .atomic_enable = ipu_crtc_atomic_enable,
};
static void ipu_put_resources(struct ipu_crtc *ipu_crtc)
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 49546222c6d3..cf98596c7ce1 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -54,7 +54,7 @@ static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRA8888,
- DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
@@ -496,6 +496,27 @@ static int ipu_chan_assign_axi_id(int ipu_chan)
}
}
+static void ipu_calculate_bursts(u32 width, u32 cpp, u32 stride,
+ u8 *burstsize, u8 *num_bursts)
+{
+ const unsigned int width_bytes = width * cpp;
+ unsigned int npb, bursts;
+
+ /* Maximum number of pixels per burst without overshooting stride */
+ for (npb = 64 / cpp; npb > 0; --npb) {
+ if (round_up(width_bytes, npb * cpp) <= stride)
+ break;
+ }
+ *burstsize = npb;
+
+ /* Maximum number of consecutive bursts without overshooting stride */
+ for (bursts = 8; bursts > 1; bursts /= 2) {
+ if (round_up(width_bytes, npb * cpp * bursts) <= stride)
+ break;
+ }
+ *num_bursts = bursts;
+}
+
static void ipu_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -509,6 +530,9 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
unsigned long alpha_eba = 0;
enum ipu_color_space ics;
unsigned int axi_id = 0;
+ const struct drm_format_info *info;
+ u8 burstsize, num_bursts;
+ u32 width, height;
int active;
if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_FG)
@@ -525,8 +549,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id,
drm_rect_width(&state->src) >> 16,
drm_rect_height(&state->src) >> 16,
- state->fb->pitches[0],
- state->fb->format->format, &eba);
+ fb->pitches[0],
+ fb->format->format, &eba);
}
if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state)) {
@@ -545,19 +569,17 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
return;
}
+ ics = ipu_drm_fourcc_to_colorspace(fb->format->format);
switch (ipu_plane->dp_flow) {
case IPU_DP_FLOW_SYNC_BG:
- ipu_dp_setup_channel(ipu_plane->dp,
- IPUV3_COLORSPACE_RGB,
- IPUV3_COLORSPACE_RGB);
+ ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_RGB);
ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
break;
case IPU_DP_FLOW_SYNC_FG:
- ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format);
ipu_dp_setup_channel(ipu_plane->dp, ics,
IPUV3_COLORSPACE_UNKNOWN);
/* Enable local alpha on partial plane */
- switch (state->fb->format->format) {
+ switch (fb->format->format) {
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_RGBA5551:
@@ -583,15 +605,21 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst));
+ width = drm_rect_width(&state->src) >> 16;
+ height = drm_rect_height(&state->src) >> 16;
+ info = drm_format_info(fb->format->format);
+ ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0],
+ &burstsize, &num_bursts);
+
ipu_cpmem_zero(ipu_plane->ipu_ch);
- ipu_cpmem_set_resolution(ipu_plane->ipu_ch,
- drm_rect_width(&state->src) >> 16,
- drm_rect_height(&state->src) >> 16);
- ipu_cpmem_set_fmt(ipu_plane->ipu_ch, state->fb->format->format);
+ ipu_cpmem_set_resolution(ipu_plane->ipu_ch, width, height);
+ ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->format->format);
+ ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, burstsize);
ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
- ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
+ ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]);
ipu_cpmem_set_axi_id(ipu_plane->ipu_ch, axi_id);
+
switch (fb->format->format) {
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
@@ -631,6 +659,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
case DRM_FORMAT_RGBX8888_A8:
case DRM_FORMAT_BGRX8888_A8:
alpha_eba = drm_plane_state_to_eba(state, 1);
+ num_bursts = 0;
dev_dbg(ipu_plane->base.dev->dev, "phys = %lu %lu, x = %d, y = %d",
eba, alpha_eba, state->src.x1 >> 16, state->src.y1 >> 16);
@@ -644,8 +673,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8);
ipu_cpmem_set_high_priority(ipu_plane->alpha_ch);
ipu_idmac_set_double_buffer(ipu_plane->alpha_ch, 1);
- ipu_cpmem_set_stride(ipu_plane->alpha_ch,
- state->fb->pitches[1]);
+ ipu_cpmem_set_stride(ipu_plane->alpha_ch, fb->pitches[1]);
ipu_cpmem_set_burstsize(ipu_plane->alpha_ch, 16);
ipu_cpmem_set_buffer(ipu_plane->alpha_ch, 0, alpha_eba);
ipu_cpmem_set_buffer(ipu_plane->alpha_ch, 1, alpha_eba);
@@ -657,6 +685,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
}
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
+ ipu_idmac_lock_enable(ipu_plane->ipu_ch, num_bursts);
ipu_plane_enable(ipu_plane);
}
@@ -675,7 +704,7 @@ int ipu_planes_assign_pre(struct drm_device *dev,
int available_pres = ipu_prg_max_active_channels();
int i;
- for_each_plane_in_state(state, plane, plane_state, i) {
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
struct ipu_plane_state *ipu_state =
to_ipu_plane_state(plane_state);
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
@@ -718,8 +747,8 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs,
&ipu_plane_funcs, ipu_plane_formats,
- ARRAY_SIZE(ipu_plane_formats), type,
- NULL);
+ ARRAY_SIZE(ipu_plane_formats),
+ NULL, type, NULL);
if (ret) {
DRM_ERROR("failed to initialize plane\n");
kfree(ipu_plane);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 636031a30e17..8def97d75030 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -135,7 +135,6 @@ static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_connector_funcs imx_pd_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = imx_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
@@ -237,7 +236,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
/* port@1 is the output port */
ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge);
- if (ret)
+ if (ret && ret != -ENODEV)
return ret;
imxpd->dev = dev;
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index ef79a6d55646..f609b62b8be6 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -84,8 +84,8 @@ static int mtk_disp_color_bind(struct device *dev, struct device *master,
ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
if (ret < 0) {
- dev_err(dev, "Failed to register component %s: %d\n",
- dev->of_node->full_name, ret);
+ dev_err(dev, "Failed to register component %pOF: %d\n",
+ dev->of_node, ret);
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 35bc5babdbf7..978782a77629 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -235,8 +235,8 @@ static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
if (ret < 0) {
- dev_err(dev, "Failed to register component %s: %d\n",
- dev->of_node->full_name, ret);
+ dev_err(dev, "Failed to register component %pOF: %d\n",
+ dev->of_node, ret);
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index b68a51376f83..585943c81e1f 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -155,8 +155,8 @@ static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
if (ret < 0) {
- dev_err(dev, "Failed to register component %s: %d\n",
- dev->of_node->full_name, ret);
+ dev_err(dev, "Failed to register component %pOF: %d\n",
+ dev->of_node, ret);
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 32ca351ecd09..e80a603e5fb0 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -605,8 +605,8 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
ret = mtk_ddp_comp_register(drm_dev, &dpi->ddp_comp);
if (ret < 0) {
- dev_err(dev, "Failed to register component %s: %d\n",
- dev->of_node->full_name, ret);
+ dev_err(dev, "Failed to register component %pOF: %d\n",
+ dev->of_node, ret);
return ret;
}
@@ -710,7 +710,7 @@ static int mtk_dpi_probe(struct platform_device *pdev)
if (!bridge_node)
return -ENODEV;
- dev_info(dev, "Found bridge node: %s\n", bridge_node->full_name);
+ dev_info(dev, "Found bridge node: %pOF\n", bridge_node);
dpi->bridge = of_drm_find_bridge(bridge_node);
of_node_put(bridge_node);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index cb32c9369f3a..658b8dd45b83 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -366,7 +366,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
}
}
-static void mtk_drm_crtc_enable(struct drm_crtc *crtc)
+static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
@@ -390,7 +391,8 @@ static void mtk_drm_crtc_enable(struct drm_crtc *crtc)
mtk_crtc->enabled = true;
}
-static void mtk_drm_crtc_disable(struct drm_crtc *crtc)
+static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
@@ -487,10 +489,10 @@ static const struct drm_crtc_funcs mtk_crtc_funcs = {
static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
.mode_fixup = mtk_drm_crtc_mode_fixup,
.mode_set_nofb = mtk_drm_crtc_mode_set_nofb,
- .enable = mtk_drm_crtc_enable,
- .disable = mtk_drm_crtc_disable,
.atomic_begin = mtk_drm_crtc_atomic_begin,
.atomic_flush = mtk_drm_crtc_atomic_flush,
+ .atomic_enable = mtk_drm_crtc_atomic_enable,
+ .atomic_disable = mtk_drm_crtc_atomic_disable,
};
static int mtk_drm_crtc_init(struct drm_device *drm,
@@ -577,8 +579,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
node = priv->comp_node[comp_id];
comp = priv->ddp_comp[comp_id];
if (!comp) {
- dev_err(dev, "Component %s not initialized\n",
- node->full_name);
+ dev_err(dev, "Component %pOF not initialized\n", node);
ret = -ENODEV;
goto unprepare;
}
@@ -586,8 +587,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
ret = clk_prepare(comp->clk);
if (ret) {
dev_err(dev,
- "Failed to prepare clock for component %s: %d\n",
- node->full_name, ret);
+ "Failed to prepare clock for component %pOF: %d\n",
+ node, ret);
goto unprepare;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 07d7ea2268ef..4672317e3ad1 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -295,15 +295,13 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
larb_node = of_parse_phandle(node, "mediatek,larb", 0);
if (!larb_node) {
dev_err(dev,
- "Missing mediadek,larb phandle in %s node\n",
- node->full_name);
+ "Missing mediadek,larb phandle in %pOF node\n", node);
return -EINVAL;
}
larb_pdev = of_find_device_by_node(larb_node);
if (!larb_pdev) {
- dev_warn(dev, "Waiting for larb device %s\n",
- larb_node->full_name);
+ dev_warn(dev, "Waiting for larb device %pOF\n", larb_node);
of_node_put(larb_node);
return -EPROBE_DEFER;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 41d2cffe953e..a2ca90fc403c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -48,11 +48,11 @@ static void mtk_atomic_schedule(struct mtk_drm_private *private,
static void mtk_atomic_wait_for_fences(struct drm_atomic_state *state)
{
struct drm_plane *plane;
- struct drm_plane_state *plane_state;
+ struct drm_plane_state *new_plane_state;
int i;
- for_each_plane_in_state(state, plane, plane_state, i)
- mtk_fb_wait(plane->state->fb);
+ for_each_new_plane_in_state(state, plane, new_plane_state, i)
+ mtk_fb_wait(new_plane_state->fb);
}
static void mtk_atomic_complete(struct mtk_drm_private *private,
@@ -109,7 +109,12 @@ static int mtk_atomic_commit(struct drm_device *drm,
mutex_lock(&private->commit.lock);
flush_work(&private->commit.work);
- drm_atomic_helper_swap_state(state, true);
+ ret = drm_atomic_helper_swap_state(state, true);
+ if (ret) {
+ mutex_unlock(&private->commit.lock);
+ drm_atomic_helper_cleanup_planes(drm, state);
+ return ret;
+ }
drm_atomic_state_get(state);
if (async)
@@ -187,8 +192,8 @@ static int mtk_drm_kms_init(struct drm_device *drm)
pdev = of_find_device_by_node(private->mutex_node);
if (!pdev) {
- dev_err(drm->dev, "Waiting for disp-mutex device %s\n",
- private->mutex_node->full_name);
+ dev_err(drm->dev, "Waiting for disp-mutex device %pOF\n",
+ private->mutex_node);
of_node_put(private->mutex_node);
return -EPROBE_DEFER;
}
@@ -266,7 +271,6 @@ static void mtk_drm_kms_deinit(struct drm_device *drm)
{
drm_kms_helper_poll_fini(drm);
- drm_vblank_cleanup(drm);
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
}
@@ -289,8 +293,6 @@ static struct drm_driver mtk_drm_driver = {
.gem_free_object_unlocked = mtk_drm_gem_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = mtk_drm_gem_dumb_create,
- .dumb_map_offset = mtk_drm_gem_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -417,8 +419,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
continue;
if (!of_device_is_available(node)) {
- dev_dbg(dev, "Skipping disabled component %s\n",
- node->full_name);
+ dev_dbg(dev, "Skipping disabled component %pOF\n",
+ node);
continue;
}
@@ -431,8 +433,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
comp_id = mtk_ddp_comp_get_id(node, comp_type);
if (comp_id < 0) {
- dev_warn(dev, "Skipping unknown component %s\n",
- node->full_name);
+ dev_warn(dev, "Skipping unknown component %pOF\n",
+ node);
continue;
}
@@ -448,8 +450,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
comp_type == MTK_DISP_RDMA ||
comp_type == MTK_DSI ||
comp_type == MTK_DPI) {
- dev_info(dev, "Adding component match for %s\n",
- node->full_name);
+ dev_info(dev, "Adding component match for %pOF\n",
+ node);
drm_of_component_match_add(dev, &match, compare_of,
node);
} else {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
index d4246c9dceae..0d8d506695f9 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
@@ -58,7 +58,7 @@ static void mtk_drm_fb_destroy(struct drm_framebuffer *fb)
drm_framebuffer_cleanup(fb);
- drm_gem_object_unreference_unlocked(mtk_fb->gem_obj);
+ drm_gem_object_put_unlocked(mtk_fb->gem_obj);
kfree(mtk_fb);
}
@@ -160,6 +160,6 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
return &mtk_fb->base;
unreference:
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 7abc550ebc00..f595ac816b55 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -122,7 +122,7 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
goto err_handle_create;
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_unreference_unlocked(&mtk_gem->base);
+ drm_gem_object_put_unlocked(&mtk_gem->base);
return 0;
@@ -131,31 +131,6 @@ err_handle_create:
return ret;
}
-int mtk_drm_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset)
-{
- struct drm_gem_object *obj;
- int ret;
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return -EINVAL;
- }
-
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
-
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
- DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
-
-out:
- drm_gem_object_unreference_unlocked(obj);
- return ret;
-}
-
static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
index 2752718fa5b2..534639b43a1c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
@@ -46,9 +46,6 @@ struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev, size_t size,
bool alloc_kmap);
int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int mtk_drm_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset);
int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj,
struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 1a59b9ab4aa8..6f121891430f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -175,7 +175,7 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
err = drm_universal_plane_init(dev, plane, possible_crtcs,
&mtk_plane_funcs, formats,
- ARRAY_SIZE(formats), type, NULL);
+ ARRAY_SIZE(formats), NULL, type, NULL);
if (err) {
DRM_ERROR("failed to initialize plane\n");
return err;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 97253c8f813b..7e5e24c2152a 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -766,7 +766,6 @@ static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
};
static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
@@ -1048,8 +1047,8 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
ret = mtk_ddp_comp_register(drm, &dsi->ddp_comp);
if (ret < 0) {
- dev_err(dev, "Failed to register component %s: %d\n",
- dev->of_node->full_name, ret);
+ dev_err(dev, "Failed to register component %pOF: %d\n",
+ dev->of_node, ret);
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 71eb4fbbfc85..690c67507cbc 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -975,7 +975,7 @@ static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
u8 buffer[17];
ssize_t err;
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
if (err < 0) {
dev_err(hdmi->dev,
"Failed to get AVI infoframe from mode: %zd\n", err);
@@ -1261,7 +1261,6 @@ static struct drm_encoder *mtk_hdmi_conn_best_enc(struct drm_connector *conn)
}
static const struct drm_connector_funcs mtk_hdmi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = hdmi_conn_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = hdmi_conn_destroy,
@@ -1456,8 +1455,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
cec_pdev = of_find_device_by_node(cec_np);
if (!cec_pdev) {
- dev_err(hdmi->dev, "Waiting for CEC device %s\n",
- cec_np->full_name);
+ dev_err(hdmi->dev, "Waiting for CEC device %pOF\n",
+ cec_np);
return -EPROBE_DEFER;
}
hdmi->cec_dev = &cec_pdev->dev;
@@ -1501,8 +1500,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0);
if (!i2c_np) {
- dev_err(dev, "Failed to find ddc-i2c-bus node in %s\n",
- remote->full_name);
+ dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n",
+ remote);
of_node_put(remote);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index c986eb03b9d9..5155f0179b61 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -79,7 +79,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
};
-static void meson_crtc_enable(struct drm_crtc *crtc)
+static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
@@ -102,7 +103,8 @@ static void meson_crtc_enable(struct drm_crtc *crtc)
priv->viu.osd1_enabled = true;
}
-static void meson_crtc_disable(struct drm_crtc *crtc)
+static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
@@ -149,10 +151,10 @@ static void meson_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = {
- .enable = meson_crtc_enable,
- .disable = meson_crtc_disable,
.atomic_begin = meson_crtc_atomic_begin,
.atomic_flush = meson_crtc_atomic_flush,
+ .atomic_enable = meson_crtc_atomic_enable,
+ .atomic_disable = meson_crtc_atomic_disable,
};
void meson_crtc_irq(struct meson_drm *priv)
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 4d98fac92795..7742c7d81ed8 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -116,8 +116,6 @@ static struct drm_driver meson_driver = {
/* GEM Ops */
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_destroy = drm_gem_dumb_destroy,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -303,9 +301,8 @@ static const struct component_master_ops meson_drv_master_ops = {
static int compare_of(struct device *dev, void *data)
{
- DRM_DEBUG_DRIVER("Comparing of node %s with %s\n",
- of_node_full_name(dev->of_node),
- of_node_full_name(data));
+ DRM_DEBUG_DRIVER("Comparing of node %pOF with %pOF\n",
+ dev->of_node, data);
return dev->of_node == data;
}
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index a32d3b6e2e12..17e96fa47868 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -223,6 +223,7 @@ int meson_plane_create(struct meson_drm *priv)
&meson_plane_funcs,
supported_drm_formats,
ARRAY_SIZE(supported_drm_formats),
+ NULL,
DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
drm_plane_helper_add(plane, &meson_plane_helper_funcs);
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index 00775b397dba..79d95ca8a0c0 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -118,7 +118,6 @@ static int meson_cvbs_connector_mode_valid(struct drm_connector *connector,
}
static const struct drm_connector_funcs meson_cvbs_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = meson_cvbs_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = meson_cvbs_connector_destroy,
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 63ba0699d107..1aad27813c23 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -62,7 +62,6 @@ static struct drm_driver driver = {
.load = mga_driver_load,
.unload = mga_driver_unload,
.lastclose = mga_driver_lastclose,
- .set_busid = drm_pci_set_busid,
.dma_quiescent = mga_driver_dma_quiescent,
.get_vblank_counter = mga_get_vblank_counter,
.enable_vblank = mga_enable_vblank,
@@ -90,12 +89,12 @@ static struct pci_driver mga_pci_driver = {
static int __init mga_init(void)
{
driver.num_ioctls = mga_max_ioctl;
- return drm_pci_init(&driver, &mga_pci_driver);
+ return drm_legacy_pci_init(&driver, &mga_pci_driver);
}
static void __exit mga_exit(void)
{
- drm_pci_exit(&driver, &mga_pci_driver);
+ drm_legacy_pci_exit(&driver, &mga_pci_driver);
}
module_init(mga_init);
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 2ac3fcbfea7b..968e20379d54 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -248,7 +248,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
out_unreserve1:
mgag200_bo_unreserve(pixels_2);
out_unref:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 9ac007880328..74cdde2ee474 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -91,7 +91,6 @@ static struct drm_driver driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
.load = mgag200_driver_load,
.unload = mgag200_driver_unload,
- .set_busid = drm_pci_set_busid,
.fops = &mgag200_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -103,7 +102,6 @@ static struct drm_driver driver = {
.gem_free_object_unlocked = mgag200_gem_free_object,
.dumb_create = mgag200_dumb_create,
.dumb_map_offset = mgag200_dumb_mmap_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
};
static struct pci_driver mgag200_pci_driver = {
@@ -120,12 +118,13 @@ static int __init mgag200_init(void)
if (mgag200_modeset == 0)
return -EINVAL;
- return drm_pci_init(&driver, &mgag200_pci_driver);
+
+ return pci_register_driver(&mgag200_pci_driver);
}
static void __exit mgag200_exit(void)
{
- drm_pci_exit(&driver, &mgag200_pci_driver);
+ pci_unregister_driver(&mgag200_pci_driver);
}
module_init(mgag200_init);
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index c88b6ec88dd2..04f1dfba12e5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -237,11 +237,6 @@ mgag200_bo(struct ttm_buffer_object *bo)
{
return container_of(bo, struct mgag200_bo, bo);
}
- /* mgag200_crtc.c */
-void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno);
-void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno);
/* mgag200_mode.c */
int mgag200_modeset_init(struct mga_device *mdev);
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 5d3b1fac906f..30726c9fe28c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -210,7 +210,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
strcpy(info->fix.id, "mgadrmfb");
- info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &mgag200fb_ops;
/* setup aperture base/size for vesafb takeover */
@@ -233,7 +232,7 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
err_alloc_fbi:
vfree(sysram);
err_sysram:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return ret;
}
@@ -246,7 +245,7 @@ static int mga_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_unregister_fbi(&mfbdev->helper);
if (mfb->obj) {
- drm_gem_object_unreference_unlocked(mfb->obj);
+ drm_gem_object_put_unlocked(mfb->obj);
mfb->obj = NULL;
}
drm_fb_helper_fini(&mfbdev->helper);
@@ -258,8 +257,6 @@ static int mga_fbdev_destroy(struct drm_device *dev,
}
static const struct drm_fb_helper_funcs mga_fb_helper_funcs = {
- .gamma_set = mga_crtc_fb_gamma_set,
- .gamma_get = mga_crtc_fb_gamma_get,
.fb_probe = mgag200fb_create,
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index dce8a3eb5a10..780f983b0294 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -18,7 +18,7 @@ static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
- drm_gem_object_unreference_unlocked(mga_fb->obj);
+ drm_gem_object_put_unlocked(mga_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
@@ -59,13 +59,13 @@ mgag200_user_framebuffer_create(struct drm_device *dev,
mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL);
if (!mga_fb) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj);
if (ret) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
kfree(mga_fb);
return ERR_PTR(ret);
}
@@ -317,7 +317,7 @@ int mgag200_dumb_create(struct drm_file *file,
return ret;
ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret)
return ret;
@@ -366,6 +366,6 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
bo = gem_to_mga_bo(obj);
*offset = mgag200_bo_mmap_offset(bo);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index f4b53588e071..5e9cd4c0e8b6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -27,15 +27,19 @@
static void mga_crtc_load_lut(struct drm_crtc *crtc)
{
- struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
+ u16 *r_ptr, *g_ptr, *b_ptr;
int i;
if (!crtc->enabled)
return;
+ r_ptr = crtc->gamma_store;
+ g_ptr = r_ptr + crtc->gamma_size;
+ b_ptr = g_ptr + crtc->gamma_size;
+
WREG8(DAC_INDEX + MGA1064_INDEX, 0);
if (fb && fb->format->cpp[0] * 8 == 16) {
@@ -46,25 +50,27 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
if (i > (MGAG200_LUT_SIZE >> 1)) {
r = b = 0;
} else {
- r = mga_crtc->lut_r[i << 1];
- b = mga_crtc->lut_b[i << 1];
+ r = *r_ptr++ >> 8;
+ b = *b_ptr++ >> 8;
+ r_ptr++;
+ b_ptr++;
}
} else {
- r = mga_crtc->lut_r[i];
- b = mga_crtc->lut_b[i];
+ r = *r_ptr++ >> 8;
+ b = *b_ptr++ >> 8;
}
/* VGA registers */
WREG8(DAC_INDEX + MGA1064_COL_PAL, r);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, *g_ptr++ >> 8);
WREG8(DAC_INDEX + MGA1064_COL_PAL, b);
}
return;
}
for (i = 0; i < MGAG200_LUT_SIZE; i++) {
/* VGA registers */
- WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_b[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, *r_ptr++ >> 8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, *g_ptr++ >> 8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, *b_ptr++ >> 8);
}
}
@@ -1399,14 +1405,6 @@ static int mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
- int i;
-
- for (i = 0; i < size; i++) {
- mga_crtc->lut_r[i] = red[i] >> 8;
- mga_crtc->lut_g[i] = green[i] >> 8;
- mga_crtc->lut_b[i] = blue[i] >> 8;
- }
mga_crtc_load_lut(crtc);
return 0;
@@ -1455,14 +1453,12 @@ static const struct drm_crtc_helper_funcs mga_helper_funcs = {
.mode_set_base = mga_crtc_mode_set_base,
.prepare = mga_crtc_prepare,
.commit = mga_crtc_commit,
- .load_lut = mga_crtc_load_lut,
};
/* CRTC setup */
static void mga_crtc_init(struct mga_device *mdev)
{
struct mga_crtc *mga_crtc;
- int i;
mga_crtc = kzalloc(sizeof(struct mga_crtc) +
(MGAG200FB_CONN_LIMIT * sizeof(struct drm_connector *)),
@@ -1476,37 +1472,9 @@ static void mga_crtc_init(struct mga_device *mdev)
drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
mdev->mode_info.crtc = mga_crtc;
- for (i = 0; i < MGAG200_LUT_SIZE; i++) {
- mga_crtc->lut_r[i] = i;
- mga_crtc->lut_g[i] = i;
- mga_crtc->lut_b[i] = i;
- }
-
drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
}
-/** Sets the color ramps on behalf of fbcon */
-void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
- struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
-
- mga_crtc->lut_r[regno] = red >> 8;
- mga_crtc->lut_g[regno] = green >> 8;
- mga_crtc->lut_b[regno] = blue >> 8;
-}
-
-/** Gets the color ramps on behalf of fbcon */
-void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno)
-{
- struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
-
- *red = (u16)mga_crtc->lut_r[regno] << 8;
- *green = (u16)mga_crtc->lut_g[regno] << 8;
- *blue = (u16)mga_crtc->lut_b[regno] << 8;
-}
-
/*
* The encoder comes after the CRTC in the output pipeline, but before
* the connector. It's responsible for ensuring that the digital
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index b638d192ce5e..99d39b2aefa6 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -5,7 +5,7 @@ config DRM_MSM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
- select QCOM_MDT_LOADER
+ select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 0e3828ed1e46..7791313405b5 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -486,8 +486,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a3xx_gpu->base;
gpu = &adreno_gpu->base;
- a3xx_gpu->pdev = pdev;
-
gpu->perfcntrs = perfcntrs;
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
index 85ff66cbddd6..ab60dc9e344e 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -28,7 +28,6 @@
struct a3xx_gpu {
struct adreno_gpu base;
- struct platform_device *pdev;
/* if OCMEM is used for GMEM: */
uint32_t ocmem_base;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 19abf229b08d..58341ef6f15b 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -568,8 +568,6 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a4xx_gpu->base;
gpu = &adreno_gpu->base;
- a4xx_gpu->pdev = pdev;
-
gpu->perfcntrs = NULL;
gpu->num_perfcntrs = 0;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
index 01247204ac92..f757184328a3 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
@@ -23,7 +23,6 @@
struct a4xx_gpu {
struct adreno_gpu base;
- struct platform_device *pdev;
/* if OCMEM is used for GMEM: */
uint32_t ocmem_base;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index b4b54f1c24bc..17c59d839e6f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -15,7 +15,7 @@
#include <linux/cpumask.h>
#include <linux/qcom_scm.h>
#include <linux/dma-mapping.h>
-#include <linux/of_reserved_mem.h>
+#include <linux/of_address.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
-#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
-
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
{
const struct firmware *fw;
+ struct device_node *np;
+ struct resource r;
phys_addr_t mem_phys;
ssize_t mem_size;
void *mem_region = NULL;
int ret;
+ if (!IS_ENABLED(CONFIG_ARCH_QCOM))
+ return -EINVAL;
+
+ np = of_get_child_by_name(dev->of_node, "zap-shader");
+ if (!np)
+ return -ENODEV;
+
+ np = of_parse_phandle(np, "memory-region", 0);
+ if (!np)
+ return -EINVAL;
+
+ ret = of_address_to_resource(np, 0, &r);
+ if (ret)
+ return ret;
+
+ mem_phys = r.start;
+ mem_size = resource_size(&r);
+
/* Request the MDT file for the firmware */
ret = request_firmware(&fw, fwname, dev);
if (ret) {
@@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
}
/* Allocate memory for the firmware image */
- mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL);
+ mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
if (!mem_region) {
ret = -ENOMEM;
goto out;
@@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
out:
+ if (mem_region)
+ memunmap(mem_region);
+
release_firmware(fw);
return ret;
}
-#else
-static int zap_shader_load_mdt(struct device *dev, const char *fwname)
-{
- return -ENODEV;
-}
-#endif
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
@@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
gpu->funcs->flush(gpu);
}
-struct a5xx_hwcg {
+static const struct {
u32 offset;
u32 value;
-};
-
-static const struct a5xx_hwcg a530_hwcg[] = {
+} a5xx_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
@@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
};
-static const struct {
- int (*test)(struct adreno_gpu *gpu);
- const struct a5xx_hwcg *regs;
- unsigned int count;
-} a5xx_hwcg_regs[] = {
- { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
-};
-
-static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
- const struct a5xx_hwcg *regs, unsigned int count)
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
unsigned int i;
- for (i = 0; i < count; i++)
- gpu_write(gpu, regs[i].offset, regs[i].value);
+ for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
+ gpu_write(gpu, a5xx_hwcg[i].offset,
+ state ? a5xx_hwcg[i].value : 0);
- gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00);
- gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182);
-}
-
-static void a5xx_enable_hwcg(struct msm_gpu *gpu)
-{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
- if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
- _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
- a5xx_hwcg_regs[i].count);
- return;
- }
- }
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
}
static int a5xx_me_init(struct msm_gpu *gpu)
@@ -293,28 +284,14 @@ static int a5xx_me_init(struct msm_gpu *gpu)
static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
const struct firmware *fw, u64 *iova)
{
- struct drm_device *drm = gpu->dev;
struct drm_gem_object *bo;
void *ptr;
- bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED);
- if (IS_ERR(bo))
- return bo;
-
- ptr = msm_gem_get_vaddr(bo);
- if (!ptr) {
- drm_gem_object_unreference(bo);
- return ERR_PTR(-ENOMEM);
- }
-
- if (iova) {
- int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
+ ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
- if (ret) {
- drm_gem_object_unreference(bo);
- return ERR_PTR(ret);
- }
- }
+ if (IS_ERR(ptr))
+ return ERR_CAST(ptr);
memcpy(ptr, &fw->data[4], fw->size - 4);
@@ -377,51 +354,11 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
return ret;
}
-/* Set up a child device to "own" the zap shader */
-static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
-{
- struct device_node *node;
- int ret;
-
- if (dev->parent)
- return 0;
-
- /* Find the sub-node for the zap shader */
- node = of_get_child_by_name(parent->of_node, "zap-shader");
- if (!node) {
- DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
- return -ENODEV;
- }
-
- dev->parent = parent;
- dev->of_node = node;
- dev_set_name(dev, "adreno_zap_shader");
-
- ret = device_register(dev);
- if (ret) {
- DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
- goto out;
- }
-
- ret = of_reserved_mem_device_init(dev);
- if (ret) {
- DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
- device_unregister(dev);
- }
-
-out:
- if (ret)
- dev->parent = NULL;
-
- return ret;
-}
-
static int a5xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
- struct platform_device *pdev = a5xx_gpu->pdev;
+ struct platform_device *pdev = gpu->pdev;
int ret;
/*
@@ -444,11 +381,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
return -ENODEV;
}
- ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev);
-
- if (!ret)
- ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
- adreno_gpu->info->zapfw);
+ ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
loaded = !ret;
@@ -462,6 +395,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
@@ -545,7 +479,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
/* Enable HWCG */
- a5xx_enable_hwcg(gpu);
+ a5xx_set_hwcg(gpu, true);
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
@@ -691,9 +625,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
DBG("%s", gpu->name);
- if (a5xx_gpu->zap_dev.parent)
- device_unregister(&a5xx_gpu->zap_dev);
-
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
@@ -867,6 +798,27 @@ static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
}
+static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ dev_err(dev->dev, "gpu fault fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ gpu->funcs->last_fence(gpu),
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+ gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
+ gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
+ gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
+ gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
#define RBBM_ERROR_MASK \
(A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
@@ -893,6 +845,9 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
a5xx_cp_err_irq(gpu);
+ if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
+ a5xx_fault_detect_irq(gpu);
+
if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
a5xx_uche_err_irq(gpu);
@@ -920,31 +875,30 @@ static const u32 a5xx_registers[] = {
0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
- 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807,
- 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
- 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
- 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82,
- 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2,
- 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7,
- 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8,
- 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145,
- 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
- 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43,
- 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E,
- 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147,
- 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7,
- 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268,
- 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB,
- 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405,
- 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3,
- 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9,
- 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01,
- 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A,
- 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F,
- 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0,
- 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF,
- 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
- ~0
+ 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
+ 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
+ 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
+ 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
+ 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
+ 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
+ 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
+ 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
+ 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
+ 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
+ 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
+ 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
+ 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
+ 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
+ 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
+ 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
+ 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
+ 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
+ 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
+ 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
+ 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
+ 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
+ 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
+ 0xB9A0, 0xB9BF, ~0
};
static void a5xx_dump(struct msm_gpu *gpu)
@@ -1020,7 +974,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+
+ /*
+ * Temporarily disable hardware clock gating before going into
+ * adreno_show to avoid issues while reading the registers
+ */
+ a5xx_set_hwcg(gpu, false);
adreno_show(gpu, m);
+ a5xx_set_hwcg(gpu, true);
}
#endif
@@ -1064,7 +1025,6 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a5xx_gpu->base;
gpu = &adreno_gpu->base;
- a5xx_gpu->pdev = pdev;
adreno_gpu->registers = a5xx_registers;
adreno_gpu->reg_offsets = a5xx_register_offsets;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 6638bc85645d..e94451685bf8 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -23,7 +23,6 @@
struct a5xx_gpu {
struct adreno_gpu base;
- struct platform_device *pdev;
struct drm_gem_object *pm4_bo;
uint64_t pm4_iova;
@@ -36,8 +35,6 @@ struct a5xx_gpu {
uint32_t gpmu_dwords;
uint32_t lm_leakage;
-
- struct device zap_dev;
};
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
@@ -59,5 +56,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
}
bool a5xx_idle(struct msm_gpu *gpu);
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 87af6eea0483..04aab1dcae2b 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -294,16 +294,10 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
*/
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
- a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED);
- if (IS_ERR(a5xx_gpu->gpmu_bo))
- goto err;
-
- if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
- &a5xx_gpu->gpmu_iova))
- goto err;
-
- ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
- if (!ptr)
+ ptr = msm_gem_kernel_new_locked(drm, bosize,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
+ &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
+ if (IS_ERR(ptr))
goto err;
while (cmds_size > 0) {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f1ab2703674a..c8b4ac254bb5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
*value = adreno_gpu->base.fast_rate;
return 0;
case MSM_PARAM_TIMESTAMP:
- if (adreno_gpu->funcs->get_timestamp)
- return adreno_gpu->funcs->get_timestamp(gpu, value);
+ if (adreno_gpu->funcs->get_timestamp) {
+ int ret;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ ret = adreno_gpu->funcs->get_timestamp(gpu, value);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
+ return ret;
+ }
return -EINVAL;
default:
DBG("%s: invalid param: %u", gpu->name, param);
@@ -330,11 +337,6 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
}
-static const char *iommu_ports[] = {
- "gfx3d_user", "gfx3d_priv",
- "gfx3d1_user", "gfx3d1_priv",
-};
-
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
{
@@ -366,15 +368,15 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.ringsz = RB_SIZE;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, &adreno_gpu_config);
if (ret)
return ret;
- pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
if (ret) {
dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
@@ -389,37 +391,17 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- if (gpu->aspace && gpu->aspace->mmu) {
- struct msm_mmu *mmu = gpu->aspace->mmu;
- ret = mmu->funcs->attach(mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
- if (ret)
- return ret;
- }
+ adreno_gpu->memptrs = msm_gem_kernel_new(drm,
+ sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace,
+ &adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova);
- adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
- MSM_BO_UNCACHED);
- if (IS_ERR(adreno_gpu->memptrs_bo)) {
- ret = PTR_ERR(adreno_gpu->memptrs_bo);
- adreno_gpu->memptrs_bo = NULL;
- dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
- return ret;
- }
-
- adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
if (IS_ERR(adreno_gpu->memptrs)) {
- dev_err(drm->dev, "could not vmap memptrs\n");
- return -ENOMEM;
- }
-
- ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
- &adreno_gpu->memptrs_iova);
- if (ret) {
- dev_err(drm->dev, "could not map memptrs: %d\n", ret);
- return ret;
+ ret = PTR_ERR(adreno_gpu->memptrs);
+ adreno_gpu->memptrs = NULL;
+ dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
}
- return 0;
+ return ret;
}
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
@@ -439,10 +421,4 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
release_firmware(adreno_gpu->pfp);
msm_gpu_cleanup(gpu);
-
- if (gpu->aspace) {
- gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
- msm_gem_address_space_put(gpu->aspace);
- }
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 311c1c1e7d6c..98742d7af6dc 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -161,12 +161,17 @@ static const struct of_device_id dt_match[] = {
{}
};
+static const struct dev_pm_ops dsi_pm_ops = {
+ SET_RUNTIME_PM_OPS(msm_dsi_runtime_suspend, msm_dsi_runtime_resume, NULL)
+};
+
static struct platform_driver dsi_driver = {
.probe = dsi_dev_probe,
.remove = dsi_dev_remove,
.driver = {
.name = "msm_dsi",
.of_match_table = dt_match,
+ .pm = &dsi_pm_ops,
},
};
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 9e6017387efb..2302046197a8 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -179,6 +179,8 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host);
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev);
int msm_dsi_host_init(struct msm_dsi *msm_dsi);
+int msm_dsi_runtime_suspend(struct device *dev);
+int msm_dsi_runtime_resume(struct device *dev);
/* dsi phy */
struct msm_dsi_phy;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 9e9c5696bc03..dbb31a014419 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -135,7 +135,6 @@ struct msm_dsi_host {
struct completion video_comp;
struct mutex dev_mutex;
struct mutex cmd_mutex;
- struct mutex clk_mutex;
spinlock_t intr_lock; /* Protect interrupt ctrl register */
u32 err_work_state;
@@ -221,6 +220,8 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
goto put_gdsc;
}
+ pm_runtime_get_sync(dev);
+
ret = regulator_enable(gdsc_reg);
if (ret) {
pr_err("%s: unable to enable gdsc\n", __func__);
@@ -247,6 +248,7 @@ disable_clks:
clk_disable_unprepare(ahb_clk);
disable_gdsc:
regulator_disable(gdsc_reg);
+ pm_runtime_put_autosuspend(dev);
put_clk:
clk_put(ahb_clk);
put_gdsc:
@@ -455,6 +457,34 @@ static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
clk_disable_unprepare(msm_host->bus_clks[i]);
}
+int msm_dsi_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (!msm_host->cfg_hnd)
+ return 0;
+
+ dsi_bus_clk_disable(msm_host);
+
+ return 0;
+}
+
+int msm_dsi_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
+ struct mipi_dsi_host *host = msm_dsi->host;
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ if (!msm_host->cfg_hnd)
+ return 0;
+
+ return dsi_bus_clk_enable(msm_host);
+}
+
static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -596,35 +626,6 @@ static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
}
}
-static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
-{
- int ret = 0;
-
- mutex_lock(&msm_host->clk_mutex);
- if (enable) {
- ret = dsi_bus_clk_enable(msm_host);
- if (ret) {
- pr_err("%s: Can not enable bus clk, %d\n",
- __func__, ret);
- goto unlock_ret;
- }
- ret = dsi_link_clk_enable(msm_host);
- if (ret) {
- pr_err("%s: Can not enable link clk, %d\n",
- __func__, ret);
- dsi_bus_clk_disable(msm_host);
- goto unlock_ret;
- }
- } else {
- dsi_link_clk_disable(msm_host);
- dsi_bus_clk_disable(msm_host);
- }
-
-unlock_ret:
- mutex_unlock(&msm_host->clk_mutex);
- return ret;
-}
-
static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
{
struct drm_display_mode *mode = msm_host->mode;
@@ -1699,6 +1700,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
}
msm_host->pdev = pdev;
+ msm_dsi->host = &msm_host->base;
ret = dsi_host_parse_dt(msm_host);
if (ret) {
@@ -1713,6 +1715,8 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
goto fail;
}
+ pm_runtime_enable(&pdev->dev);
+
msm_host->cfg_hnd = dsi_get_config(msm_host);
if (!msm_host->cfg_hnd) {
ret = -EINVAL;
@@ -1753,7 +1757,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
init_completion(&msm_host->video_comp);
mutex_init(&msm_host->dev_mutex);
mutex_init(&msm_host->cmd_mutex);
- mutex_init(&msm_host->clk_mutex);
spin_lock_init(&msm_host->intr_lock);
/* setup workqueue */
@@ -1761,7 +1764,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
INIT_WORK(&msm_host->err_work, dsi_err_worker);
INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
- msm_dsi->host = &msm_host->base;
msm_dsi->id = msm_host->id;
DBG("Dsi Host %d initialized", msm_host->id);
@@ -1783,9 +1785,10 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
msm_host->workqueue = NULL;
}
- mutex_destroy(&msm_host->clk_mutex);
mutex_destroy(&msm_host->cmd_mutex);
mutex_destroy(&msm_host->dev_mutex);
+
+ pm_runtime_disable(&msm_host->pdev->dev);
}
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
@@ -1881,7 +1884,8 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
* mdss interrupt is generated in mdp core clock domain
* mdp clock need to be enabled to receive dsi interrupt
*/
- dsi_clk_ctrl(msm_host, 1);
+ pm_runtime_get_sync(&msm_host->pdev->dev);
+ dsi_link_clk_enable(msm_host);
/* TODO: vote for bus bandwidth */
@@ -1911,7 +1915,8 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
/* TODO: unvote for bus bandwidth */
- dsi_clk_ctrl(msm_host, 0);
+ dsi_link_clk_disable(msm_host);
+ pm_runtime_put_autosuspend(&msm_host->pdev->dev);
}
int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
@@ -2137,6 +2142,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
struct msm_dsi_phy_clk_request *clk_req)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ int ret;
+
+ ret = dsi_calc_clk_rate(msm_host);
+ if (ret) {
+ pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
+ return;
+ }
clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
clk_req->escclk_rate = msm_host->esc_clk_rate;
@@ -2153,8 +2165,11 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host)
* and only turned on before MDP START.
* This part of code should be enabled once mdp driver support it.
*/
- /* if (msm_panel->mode == MSM_DSI_CMD_MODE)
- dsi_clk_ctrl(msm_host, 0); */
+ /* if (msm_panel->mode == MSM_DSI_CMD_MODE) {
+ * dsi_link_clk_disable(msm_host);
+ * pm_runtime_put_autosuspend(&msm_host->pdev->dev);
+ * }
+ */
return 0;
}
@@ -2210,9 +2225,11 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
goto unlock_ret;
}
- ret = dsi_clk_ctrl(msm_host, 1);
+ pm_runtime_get_sync(&msm_host->pdev->dev);
+ ret = dsi_link_clk_enable(msm_host);
if (ret) {
- pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
+ pr_err("%s: failed to enable link clocks. ret=%d\n",
+ __func__, ret);
goto fail_disable_reg;
}
@@ -2236,7 +2253,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
return 0;
fail_disable_clk:
- dsi_clk_ctrl(msm_host, 0);
+ dsi_link_clk_disable(msm_host);
+ pm_runtime_put_autosuspend(&msm_host->pdev->dev);
fail_disable_reg:
dsi_host_regulator_disable(msm_host);
unlock_ret:
@@ -2261,7 +2279,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
- dsi_clk_ctrl(msm_host, 0);
+ dsi_link_clk_disable(msm_host);
+ pm_runtime_put_autosuspend(&msm_host->pdev->dev);
dsi_host_regulator_disable(msm_host);
@@ -2280,7 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- int ret;
if (msm_host->mode) {
drm_mode_destroy(msm_host->dev, msm_host->mode);
@@ -2293,12 +2311,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
return -ENOMEM;
}
- ret = dsi_calc_clk_rate(msm_host);
- if (ret) {
- pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index a879ffa534b4..855248132b2b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -626,7 +626,6 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
}
static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = dsi_mgr_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = dsi_mgr_connector_destroy,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 0c2eb9c9a1fc..7c9bf91bc22b 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -373,7 +373,7 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
{
clk_disable_unprepare(phy->ahb_clk);
- pm_runtime_put_sync(&phy->pdev->dev);
+ pm_runtime_put_autosuspend(&phy->pdev->dev);
}
static const struct of_device_id dsi_phy_dt_match[] = {
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index 5960628ceb93..6f3fc6b0f0a3 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -92,7 +92,6 @@ static int edp_connector_mode_valid(struct drm_connector *connector,
}
static const struct drm_connector_funcs edp_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = edp_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = edp_connector_destroy,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index a968cad509c2..17e069a133a4 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -239,6 +239,8 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->pwr_clks[i] = clk;
}
+ pm_runtime_enable(&pdev->dev);
+
hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
hdmi->i2c = msm_hdmi_i2c_init(hdmi);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index ae40e7179d4f..7e357077ed26 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -35,6 +35,8 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
const struct hdmi_platform_config *config = hdmi->config;
int i, ret;
+ pm_runtime_get_sync(&hdmi->pdev->dev);
+
for (i = 0; i < config->pwr_reg_cnt; i++) {
ret = regulator_enable(hdmi->pwr_regs[i]);
if (ret) {
@@ -84,6 +86,8 @@ static void power_off(struct drm_bridge *bridge)
config->pwr_reg_names[i], ret);
}
}
+
+ pm_runtime_put_autosuspend(&hdmi->pdev->dev);
}
#define AVI_IFRAME_LINE_NUMBER 1
@@ -97,7 +101,7 @@ static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
u32 val;
int len;
- drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
+ drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (len < 0) {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index a2515b466ce5..c0848dfedd50 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -137,6 +137,36 @@ err:
return ret;
}
+static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
+{
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct device *dev = &hdmi->pdev->dev;
+ int i, ret;
+
+ if (enable) {
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ if (config->hpd_freq && config->hpd_freq[i]) {
+ ret = clk_set_rate(hdmi->hpd_clks[i],
+ config->hpd_freq[i]);
+ if (ret)
+ dev_warn(dev,
+ "failed to set clk %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+
+ ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+ if (ret) {
+ dev_err(dev,
+ "failed to enable hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+ }
+ } else {
+ for (i = config->hpd_clk_cnt - 1; i >= 0; i--)
+ clk_disable_unprepare(hdmi->hpd_clks[i]);
+ }
+}
+
static int hpd_enable(struct hdmi_connector *hdmi_connector)
{
struct hdmi *hdmi = hdmi_connector->hdmi;
@@ -167,22 +197,8 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
goto fail;
}
- for (i = 0; i < config->hpd_clk_cnt; i++) {
- if (config->hpd_freq && config->hpd_freq[i]) {
- ret = clk_set_rate(hdmi->hpd_clks[i],
- config->hpd_freq[i]);
- if (ret)
- dev_warn(dev, "failed to set clk %s (%d)\n",
- config->hpd_clk_names[i], ret);
- }
-
- ret = clk_prepare_enable(hdmi->hpd_clks[i]);
- if (ret) {
- dev_err(dev, "failed to enable hpd clk: %s (%d)\n",
- config->hpd_clk_names[i], ret);
- goto fail;
- }
- }
+ pm_runtime_get_sync(dev);
+ enable_hpd_clocks(hdmi, true);
msm_hdmi_set_mode(hdmi, false);
msm_hdmi_phy_reset(hdmi);
@@ -225,8 +241,8 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
msm_hdmi_set_mode(hdmi, false);
- for (i = 0; i < config->hpd_clk_cnt; i++)
- clk_disable_unprepare(hdmi->hpd_clks[i]);
+ enable_hpd_clocks(hdmi, false);
+ pm_runtime_put_autosuspend(dev);
ret = gpio_config(hdmi, false);
if (ret)
@@ -285,7 +301,16 @@ void msm_hdmi_connector_irq(struct drm_connector *connector)
static enum drm_connector_status detect_reg(struct hdmi *hdmi)
{
- uint32_t hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ uint32_t hpd_int_status;
+
+ pm_runtime_get_sync(&hdmi->pdev->dev);
+ enable_hpd_clocks(hdmi, true);
+
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+
+ enable_hpd_clocks(hdmi, false);
+ pm_runtime_put_autosuspend(&hdmi->pdev->dev);
+
return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
connector_status_connected : connector_status_disconnected;
}
@@ -407,7 +432,6 @@ static int msm_hdmi_connector_mode_valid(struct drm_connector *connector,
}
static const struct drm_connector_funcs hdmi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = hdmi_connector_destroy,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 615e1def64d9..47fa2aba1983 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -279,7 +279,8 @@ static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
}
-static void mdp4_crtc_disable(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
@@ -295,7 +296,8 @@ static void mdp4_crtc_disable(struct drm_crtc *crtc)
mdp4_crtc->enabled = false;
}
-static void mdp4_crtc_enable(struct drm_crtc *crtc)
+static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
@@ -482,7 +484,6 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp4_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
- .set_property = drm_atomic_helper_crtc_set_property,
.cursor_set = mdp4_crtc_cursor_set,
.cursor_move = mdp4_crtc_cursor_move,
.reset = drm_atomic_helper_crtc_reset,
@@ -492,11 +493,11 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = {
static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
.mode_set_nofb = mdp4_crtc_mode_set_nofb,
- .disable = mdp4_crtc_disable,
- .enable = mdp4_crtc_enable,
.atomic_check = mdp4_crtc_atomic_check,
.atomic_begin = mdp4_crtc_atomic_begin,
.atomic_flush = mdp4_crtc_atomic_flush,
+ .atomic_enable = mdp4_crtc_atomic_enable,
+ .atomic_disable = mdp4_crtc_atomic_disable,
};
static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index bcd1f5cac72c..f7f087419ed8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -114,7 +114,7 @@ static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
mdp4_enable(mdp4_kms);
/* see 119ecb7fd */
- for_each_crtc_in_state(state, crtc, crtc_state, i)
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_get(crtc);
}
@@ -126,7 +126,7 @@ static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
struct drm_crtc_state *crtc_state;
/* see 119ecb7fd */
- for_each_crtc_in_state(state, crtc, crtc_state, i)
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_put(crtc);
mdp4_disable(mdp4_kms);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 353429b05733..e3b1c86b7aae 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -91,7 +91,6 @@ static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
}
static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = mdp4_lvds_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = mdp4_lvds_connector_destroy,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index a20e3d644523..7a1ad3af08e3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -401,7 +401,7 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
mdp4_plane->formats, mdp4_plane->nformats,
- type, NULL);
+ NULL, type, NULL);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index aa7402e03f67..60790df91bfa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -192,6 +192,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
{
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms;
+ struct device *dev;
int intf_num;
u32 data = 0;
@@ -214,14 +215,16 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
/* Smart Panel, Sync mode */
data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
+ dev = &mdp5_kms->pdev->dev;
+
/* Make sure clocks are on when connectors calling this function. */
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index cb5415d6c04b..6fcb58ab718c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
unsigned long flags;
- enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
- enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
+ enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
+ enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
int i, plane_cnt = 0;
bool bg_alpha_enabled = false;
u32 mixer_op_mode = 0;
@@ -409,11 +409,13 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
-static void mdp5_crtc_disable(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct device *dev = &mdp5_kms->pdev->dev;
DBG("%s", crtc->name);
@@ -424,23 +426,28 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc)
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
mdp5_crtc->enabled = false;
}
-static void mdp5_crtc_enable(struct drm_crtc *crtc)
+static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct device *dev = &mdp5_kms->pdev->dev;
DBG("%s", crtc->name);
if (WARN_ON(mdp5_crtc->enabled))
return;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
+
+ mdp5_crtc_mode_set_nofb(crtc);
+
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
if (mdp5_cstate->cmd_mode)
@@ -531,7 +538,7 @@ static bool is_fullscreen(struct drm_crtc_state *cstate,
((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
}
-enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
+static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
struct drm_crtc_state *new_crtc_state,
struct drm_plane_state *bpstate)
{
@@ -725,6 +732,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct platform_device *pdev = mdp5_kms->pdev;
struct msm_kms *kms = &mdp5_kms->base.base;
struct drm_gem_object *cursor_bo, *old_bo = NULL;
uint32_t blendcfg, stride;
@@ -753,6 +761,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DBG("Cursor off");
cursor_enable = false;
+ pm_runtime_get_sync(&pdev->dev);
goto set_cursor;
}
@@ -767,6 +776,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
lm = mdp5_cstate->pipeline.mixer->lm;
stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
+ pm_runtime_get_sync(&pdev->dev);
+
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
old_bo = mdp5_crtc->cursor.scanout_bo;
@@ -793,6 +804,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
set_cursor:
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) {
@@ -804,6 +817,7 @@ set_cursor:
crtc_flush(crtc, flush_mask);
end:
+ pm_runtime_put_autosuspend(&pdev->dev);
if (old_bo) {
drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
/* enable vblank to complete cursor work: */
@@ -836,6 +850,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
get_roi(crtc, &roi_w, &roi_h);
+ pm_runtime_get_sync(&mdp5_kms->pdev->dev);
+
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
@@ -847,6 +863,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
crtc_flush(crtc, flush_mask);
+ pm_runtime_put_autosuspend(&mdp5_kms->pdev->dev);
+
return 0;
}
@@ -917,7 +935,6 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
- .set_property = drm_atomic_helper_crtc_set_property,
.reset = mdp5_crtc_reset,
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
.atomic_destroy_state = mdp5_crtc_destroy_state,
@@ -930,7 +947,6 @@ static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
- .set_property = drm_atomic_helper_crtc_set_property,
.reset = mdp5_crtc_reset,
.atomic_duplicate_state = mdp5_crtc_duplicate_state,
.atomic_destroy_state = mdp5_crtc_destroy_state,
@@ -939,11 +955,11 @@ static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.mode_set_nofb = mdp5_crtc_mode_set_nofb,
- .disable = mdp5_crtc_disable,
- .enable = mdp5_crtc_enable,
.atomic_check = mdp5_crtc_atomic_check,
.atomic_begin = mdp5_crtc_atomic_begin,
.atomic_flush = mdp5_crtc_atomic_flush,
+ .atomic_enable = mdp5_crtc_atomic_enable,
+ .atomic_disable = mdp5_crtc_atomic_disable,
};
static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 97f3294fbfc6..5b851380d3f2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -297,9 +297,13 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_interface *intf = mdp5_encoder->intf;
+ /* this isn't right I think */
+ struct drm_crtc_state *cstate = encoder->crtc->state;
+
+ mdp5_encoder_mode_set(encoder, &cstate->mode, &cstate->adjusted_mode);
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
- mdp5_cmd_encoder_disable(encoder);
+ mdp5_cmd_encoder_enable(encoder);
else
mdp5_vid_encoder_enable(encoder);
}
@@ -320,7 +324,6 @@ static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
- .mode_set = mdp5_encoder_mode_set,
.disable = mdp5_encoder_disable,
.enable = mdp5_encoder_enable,
.atomic_check = mdp5_encoder_atomic_check,
@@ -350,6 +353,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
struct mdp5_kms *mdp5_kms;
+ struct device *dev;
int intf_num;
u32 data = 0;
@@ -369,8 +373,10 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
else
return -EINVAL;
+ dev = &mdp5_kms->pdev->dev;
/* Make sure clocks are on when connectors calling this function. */
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
+
/* Dumb Panel, Sync mode */
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
@@ -378,7 +384,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 3ce8b9dec9c1..bb5deb00c899 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -49,16 +49,19 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
void mdp5_irq_preinstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- mdp5_enable(mdp5_kms);
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
}
int mdp5_irq_postinstall(struct msm_kms *kms)
{
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
+ struct device *dev = &mdp5_kms->pdev->dev;
struct mdp_irq *error_handler = &mdp5_kms->error_handler;
error_handler->irq = mdp5_irq_error_handler;
@@ -67,9 +70,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
MDP5_IRQ_INTF2_UNDER_RUN |
MDP5_IRQ_INTF3_UNDER_RUN;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
mdp_irq_register(mdp_kms, error_handler);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
return 0;
}
@@ -77,9 +80,11 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
void mdp5_irq_uninstall(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- mdp5_enable(mdp5_kms);
+ struct device *dev = &mdp5_kms->pdev->dev;
+
+ pm_runtime_get_sync(dev);
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
}
irqreturn_t mdp5_irq(struct msm_kms *kms)
@@ -109,11 +114,12 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), true);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
return 0;
}
@@ -121,9 +127,10 @@ int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
mdp_update_vblank_mask(to_mdp_kms(kms),
mdp5_crtc_vblank(crtc), false);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 5d13fa5381ee..f7c0698fec40 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -30,11 +30,10 @@ static const char *iommu_ports[] = {
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct platform_device *pdev = mdp5_kms->pdev;
+ struct device *dev = &mdp5_kms->pdev->dev;
unsigned long flags;
- pm_runtime_get_sync(&pdev->dev);
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
/* Magic unknown register writes:
*
@@ -66,8 +65,7 @@ static int mdp5_hw_init(struct msm_kms *kms)
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
- mdp5_disable(mdp5_kms);
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_sync(dev);
return 0;
}
@@ -111,8 +109,9 @@ static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state)
static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
if (mdp5_kms->smp)
mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
@@ -121,11 +120,12 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct device *dev = &mdp5_kms->pdev->dev;
if (mdp5_kms->smp)
mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
}
static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms,
@@ -249,6 +249,9 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms)
{
DBG("");
+ mdp5_kms->enable_count--;
+ WARN_ON(mdp5_kms->enable_count < 0);
+
clk_disable_unprepare(mdp5_kms->ahb_clk);
clk_disable_unprepare(mdp5_kms->axi_clk);
clk_disable_unprepare(mdp5_kms->core_clk);
@@ -262,6 +265,8 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
{
DBG("");
+ mdp5_kms->enable_count++;
+
clk_prepare_enable(mdp5_kms->ahb_clk);
clk_prepare_enable(mdp5_kms->axi_clk);
clk_prepare_enable(mdp5_kms->core_clk);
@@ -486,11 +491,12 @@ fail:
static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
u32 *major, u32 *minor)
{
+ struct device *dev = &mdp5_kms->pdev->dev;
u32 version;
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(dev);
version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
- mdp5_disable(mdp5_kms);
+ pm_runtime_put_autosuspend(dev);
*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
@@ -502,7 +508,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
const char *name, bool mandatory)
{
struct device *dev = &pdev->dev;
- struct clk *clk = devm_clk_get(dev, name);
+ struct clk *clk = msm_clk_get(pdev, name);
if (IS_ERR(clk) && mandatory) {
dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
return PTR_ERR(clk);
@@ -643,7 +649,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
* have left things on, in which case we'll start getting faults if
* we don't disable):
*/
- mdp5_enable(mdp5_kms);
+ pm_runtime_get_sync(&pdev->dev);
for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
!config->hw->intf.base[i])
@@ -652,7 +658,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
}
- mdp5_disable(mdp5_kms);
mdelay(16);
if (config->platform.iommu) {
@@ -678,6 +683,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
aspace = NULL;;
}
+ pm_runtime_put_autosuspend(&pdev->dev);
+
ret = modeset_init(mdp5_kms);
if (ret) {
dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
@@ -887,21 +894,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
}
/* mandatory clocks: */
- ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
if (ret)
goto fail;
- ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
if (ret)
goto fail;
- ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
if (ret)
goto fail;
- ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);
+ ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
if (ret)
goto fail;
/* optional clocks: */
- get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);
+ get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
/* we need to set a default rate before enabling. Set a safe
* rate first, then figure out hw revision, and then set a
@@ -1005,6 +1012,30 @@ static int mdp5_dev_remove(struct platform_device *pdev)
return 0;
}
+static __maybe_unused int mdp5_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
+
+ DBG("");
+
+ return mdp5_disable(mdp5_kms);
+}
+
+static __maybe_unused int mdp5_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
+
+ DBG("");
+
+ return mdp5_enable(mdp5_kms);
+}
+
+static const struct dev_pm_ops mdp5_pm_ops = {
+ SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
+};
+
static const struct of_device_id mdp5_dt_match[] = {
{ .compatible = "qcom,mdp5", },
/* to support downstream DT files */
@@ -1019,6 +1050,7 @@ static struct platform_driver mdp5_driver = {
.driver = {
.name = "msm_mdp",
.of_match_table = mdp5_dt_match,
+ .pm = &mdp5_pm_ops,
},
};
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 17caa0e8c8ae..9b3fe01089d1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -76,6 +76,8 @@ struct mdp5_kms {
bool rpm_enabled;
struct mdp_irq error_handler;
+
+ int enable_count;
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
@@ -167,11 +169,13 @@ struct mdp5_encoder {
static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
{
+ WARN_ON(mdp5_kms->enable_count <= 0);
msm_writel(data, mdp5_kms->mmio + reg);
}
static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg)
{
+ WARN_ON(mdp5_kms->enable_count <= 0);
return msm_readl(mdp5_kms->mmio + reg);
}
@@ -255,9 +259,6 @@ static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer)
return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp;
}
-int mdp5_disable(struct mdp5_kms *mdp5_kms);
-int mdp5_enable(struct mdp5_kms *mdp5_kms);
-
void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
uint32_t old_irqmask);
void mdp5_irq_preinstall(struct msm_kms *kms);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
index 9c34d7824988..f2a0db7a8a03 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
@@ -31,6 +31,10 @@ struct msm_mdss {
struct regulator *vdd;
+ struct clk *ahb_clk;
+ struct clk *axi_clk;
+ struct clk *vsync_clk;
+
struct {
volatile unsigned long enabled_mask;
struct irq_domain *domain;
@@ -140,6 +144,51 @@ static int mdss_irq_domain_init(struct msm_mdss *mdss)
return 0;
}
+int msm_mdss_enable(struct msm_mdss *mdss)
+{
+ DBG("");
+
+ clk_prepare_enable(mdss->ahb_clk);
+ if (mdss->axi_clk)
+ clk_prepare_enable(mdss->axi_clk);
+ if (mdss->vsync_clk)
+ clk_prepare_enable(mdss->vsync_clk);
+
+ return 0;
+}
+
+int msm_mdss_disable(struct msm_mdss *mdss)
+{
+ DBG("");
+
+ if (mdss->vsync_clk)
+ clk_disable_unprepare(mdss->vsync_clk);
+ if (mdss->axi_clk)
+ clk_disable_unprepare(mdss->axi_clk);
+ clk_disable_unprepare(mdss->ahb_clk);
+
+ return 0;
+}
+
+static int msm_mdss_get_clocks(struct msm_mdss *mdss)
+{
+ struct platform_device *pdev = to_platform_device(mdss->dev->dev);
+
+ mdss->ahb_clk = msm_clk_get(pdev, "iface");
+ if (IS_ERR(mdss->ahb_clk))
+ mdss->ahb_clk = NULL;
+
+ mdss->axi_clk = msm_clk_get(pdev, "bus");
+ if (IS_ERR(mdss->axi_clk))
+ mdss->axi_clk = NULL;
+
+ mdss->vsync_clk = msm_clk_get(pdev, "vsync");
+ if (IS_ERR(mdss->vsync_clk))
+ mdss->vsync_clk = NULL;
+
+ return 0;
+}
+
void msm_mdss_destroy(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -153,8 +202,6 @@ void msm_mdss_destroy(struct drm_device *dev)
regulator_disable(mdss->vdd);
- pm_runtime_put_sync(dev->dev);
-
pm_runtime_disable(dev->dev);
}
@@ -190,6 +237,12 @@ int msm_mdss_init(struct drm_device *dev)
goto fail;
}
+ ret = msm_mdss_get_clocks(mdss);
+ if (ret) {
+ dev_err(dev->dev, "failed to get clocks: %d\n", ret);
+ goto fail;
+ }
+
/* Regulator to enable GDSCs in downstream kernels */
mdss->vdd = devm_regulator_get(dev->dev, "vdd");
if (IS_ERR(mdss->vdd)) {
@@ -221,12 +274,6 @@ int msm_mdss_init(struct drm_device *dev)
pm_runtime_enable(dev->dev);
- /*
- * TODO: This is needed as the MDSS GDSC is only tied to MDSS's power
- * domain. Remove this once runtime PM is adapted for all the devices.
- */
- pm_runtime_get_sync(dev->dev);
-
return 0;
fail_irq:
regulator_disable(mdss->vdd);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index fe3a4de1a433..4b22ac3413a1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -246,7 +246,6 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp5_plane_destroy,
- .set_property = drm_atomic_helper_plane_set_property,
.atomic_set_property = mdp5_plane_atomic_set_property,
.atomic_get_property = mdp5_plane_atomic_get_property,
.reset = mdp5_plane_reset,
@@ -259,7 +258,6 @@ static const struct drm_plane_funcs mdp5_cursor_plane_funcs = {
.update_plane = mdp5_update_cursor_plane_legacy,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = mdp5_plane_destroy,
- .set_property = drm_atomic_helper_plane_set_property,
.atomic_set_property = mdp5_plane_atomic_set_property,
.atomic_get_property = mdp5_plane_atomic_get_property,
.reset = mdp5_plane_reset,
@@ -890,8 +888,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
struct mdp5_hw_pipe *right_hwpipe;
const struct mdp_format *format;
uint32_t nplanes, config = 0;
- struct phase_step step = { 0 };
- struct pixel_ext pe = { 0 };
+ struct phase_step step = { { 0 } };
+ struct pixel_ext pe = { { 0 } };
uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format;
unsigned int rotation;
@@ -1139,12 +1137,12 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
ret = drm_universal_plane_init(dev, plane, 0xff,
&mdp5_cursor_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
- type, NULL);
+ NULL, type, NULL);
else
ret = drm_universal_plane_init(dev, plane, 0xff,
&mdp5_plane_funcs,
mdp5_plane->formats, mdp5_plane->nformats,
- type, NULL);
+ NULL, type, NULL);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 58f712d37e7f..ae4983d9d0a5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -28,6 +28,13 @@ struct mdp5_smp {
int blk_cnt;
int blk_size;
+
+ /* register cache */
+ u32 alloc_w[22];
+ u32 alloc_r[22];
+ u32 pipe_reqprio_fifo_wm0[SSPP_MAX];
+ u32 pipe_reqprio_fifo_wm1[SSPP_MAX];
+ u32 pipe_reqprio_fifo_wm2[SSPP_MAX];
};
static inline
@@ -98,16 +105,15 @@ static int smp_request_block(struct mdp5_smp *smp,
static void set_fifo_thresholds(struct mdp5_smp *smp,
enum mdp5_pipe pipe, int nblks)
{
- struct mdp5_kms *mdp5_kms = get_kms(smp);
u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
u32 val;
/* 1/4 of SMP pool that is being fetched */
val = (nblks * smp_entries_per_blk) / 4;
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
+ smp->pipe_reqprio_fifo_wm0[pipe] = val * 1;
+ smp->pipe_reqprio_fifo_wm1[pipe] = val * 2;
+ smp->pipe_reqprio_fifo_wm2[pipe] = val * 3;
}
/*
@@ -222,7 +228,6 @@ void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state,
static unsigned update_smp_state(struct mdp5_smp *smp,
u32 cid, mdp5_smp_state_t *assigned)
{
- struct mdp5_kms *mdp5_kms = get_kms(smp);
int cnt = smp->blk_cnt;
unsigned nblks = 0;
u32 blk, val;
@@ -231,7 +236,7 @@ static unsigned update_smp_state(struct mdp5_smp *smp,
int idx = blk / 3;
int fld = blk % 3;
- val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
+ val = smp->alloc_w[idx];
switch (fld) {
case 0:
@@ -248,8 +253,8 @@ static unsigned update_smp_state(struct mdp5_smp *smp,
break;
}
- mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
- mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
+ smp->alloc_w[idx] = val;
+ smp->alloc_r[idx] = val;
nblks++;
}
@@ -257,6 +262,39 @@ static unsigned update_smp_state(struct mdp5_smp *smp,
return nblks;
}
+static void write_smp_alloc_regs(struct mdp5_smp *smp)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ int i, num_regs;
+
+ num_regs = smp->blk_cnt / 3 + 1;
+
+ for (i = 0; i < num_regs; i++) {
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(i),
+ smp->alloc_w[i]);
+ mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(i),
+ smp->alloc_r[i]);
+ }
+}
+
+static void write_smp_fifo_regs(struct mdp5_smp *smp)
+{
+ struct mdp5_kms *mdp5_kms = get_kms(smp);
+ int i;
+
+ for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
+ struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
+ enum mdp5_pipe pipe = hwpipe->pipe;
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe),
+ smp->pipe_reqprio_fifo_wm0[pipe]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe),
+ smp->pipe_reqprio_fifo_wm1[pipe]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe),
+ smp->pipe_reqprio_fifo_wm2[pipe]);
+ }
+}
+
void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
{
enum mdp5_pipe pipe;
@@ -277,6 +315,9 @@ void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state)
set_fifo_thresholds(smp, pipe, nblks);
}
+ write_smp_alloc_regs(smp);
+ write_smp_fifo_regs(smp);
+
state->assigned = 0;
}
@@ -289,6 +330,8 @@ void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state
set_fifo_thresholds(smp, pipe, 0);
}
+ write_smp_fifo_regs(smp);
+
state->released = 0;
}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 9633a68b14d7..025d454163b0 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -84,13 +84,13 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *new_crtc_state;
struct msm_drm_private *priv = old_state->dev->dev_private;
struct msm_kms *kms = priv->kms;
int i;
- for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
- if (!crtc->state->enable)
+ for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->active)
continue;
kms->funcs->wait_for_crtc_commit_done(kms, crtc);
@@ -195,7 +195,7 @@ int msm_atomic_commit(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
struct drm_plane *plane;
- struct drm_plane_state *plane_state;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -211,19 +211,19 @@ int msm_atomic_commit(struct drm_device *dev,
/*
* Figure out what crtcs we have:
*/
- for_each_crtc_in_state(state, crtc, crtc_state, i)
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i)
c->crtc_mask |= drm_crtc_mask(crtc);
/*
* Figure out what fence to wait for:
*/
- for_each_plane_in_state(state, plane, plane_state, i) {
- if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
- struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0);
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ if ((new_plane_state->fb != old_plane_state->fb) && new_plane_state->fb) {
+ struct drm_gem_object *obj = msm_framebuffer_bo(new_plane_state->fb, 0);
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_fence *fence = reservation_object_get_excl_rcu(msm_obj->resv);
- drm_atomic_set_fence_for_plane(plane_state, fence);
+ drm_atomic_set_fence_for_plane(new_plane_state, fence);
}
}
@@ -232,20 +232,18 @@ int msm_atomic_commit(struct drm_device *dev,
* mark our set of crtc's as busy:
*/
ret = start_atomic(dev->dev_private, c->crtc_mask);
- if (ret) {
- kfree(c);
- goto error;
- }
+ if (ret)
+ goto err_free;
+
+ BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on
* the software side now.
+ *
+ * swap driver private state while still holding state_lock
*/
-
- drm_atomic_helper_swap_state(state, true);
-
- /* swap driver private state while still holding state_lock */
if (to_kms_state(state)->state)
priv->kms->funcs->swap_state(priv->kms, state);
@@ -275,6 +273,8 @@ int msm_atomic_commit(struct drm_device *dev,
return 0;
+err_free:
+ kfree(c);
error:
drm_atomic_helper_cleanup_planes(dev, state);
return ret;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index f49f6ac5585c..606df7bea97b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -73,6 +73,10 @@ bool dumpstate = false;
MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
module_param(dumpstate, bool, 0600);
+static bool modeset = true;
+MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
+module_param(modeset, bool, 0600);
+
/*
* Util/helpers:
*/
@@ -832,7 +836,6 @@ static struct drm_driver msm_driver = {
.gem_vm_ops = &vm_ops,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
@@ -879,8 +882,37 @@ static int msm_pm_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM
+static int msm_runtime_suspend(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+
+ DBG("");
+
+ if (priv->mdss)
+ return msm_mdss_disable(priv->mdss);
+
+ return 0;
+}
+
+static int msm_runtime_resume(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+
+ DBG("");
+
+ if (priv->mdss)
+ return msm_mdss_enable(priv->mdss);
+
+ return 0;
+}
+#endif
+
static const struct dev_pm_ops msm_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
+ SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
};
/*
@@ -1104,6 +1136,9 @@ static struct platform_driver msm_platform_driver = {
static int __init msm_drm_register(void)
{
+ if (!modeset)
+ return -EINVAL;
+
DBG("init");
msm_mdp_register();
msm_dsi_register();
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index fc8d24f7c084..5e8109c07560 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -55,8 +55,6 @@ struct msm_fence_cb;
struct msm_gem_address_space;
struct msm_gem_vma;
-#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
-
struct msm_file_private {
/* currently we don't do anything useful with this.. but when
* per-context address spaces are supported we'd keep track of
@@ -237,6 +235,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
uint32_t size, uint32_t flags);
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova);
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
@@ -248,10 +252,10 @@ uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
-struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
+ int w, int h, int p, uint32_t format);
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
void msm_fbdev_free(struct drm_device *dev);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 6ecb7b170316..fc175e724ad6 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -20,6 +20,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
+#include "msm_gem.h"
struct msm_framebuffer {
struct drm_framebuffer base;
@@ -28,6 +29,8 @@ struct msm_framebuffer {
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
+static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
@@ -161,7 +164,7 @@ out_unref:
return ERR_PTR(ret);
}
-struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -237,3 +240,43 @@ fail:
return ERR_PTR(ret);
}
+
+struct drm_framebuffer *
+msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format)
+{
+ struct drm_mode_fb_cmd2 mode_cmd = {
+ .pixel_format = format,
+ .width = w,
+ .height = h,
+ .pitches = { p },
+ };
+ struct drm_gem_object *bo;
+ struct drm_framebuffer *fb;
+ int size;
+
+ /* allocate backing bo */
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ DBG("allocating %d bytes for fb %d", size, dev->primary->index);
+ bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_STOLEN);
+ if (IS_ERR(bo)) {
+ dev_warn(dev->dev, "could not allocate stolen bo\n");
+ /* try regular bo: */
+ bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
+ }
+ if (IS_ERR(bo)) {
+ dev_err(dev->dev, "failed to allocate buffer object\n");
+ return ERR_CAST(bo);
+ }
+
+ fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
+ if (IS_ERR(fb)) {
+ dev_err(dev->dev, "failed to allocate fb\n");
+ /* note: if fb creation failed, we can't rely on fb destroy
+ * to unref the bo:
+ */
+ drm_gem_object_unreference_unlocked(bo);
+ return ERR_CAST(fb);
+ }
+
+ return fb;
+}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 5ecf4ff9a059..c178563fcd4d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -19,7 +19,6 @@
#include <drm/drm_fb_helper.h>
#include "msm_drv.h"
-#include "msm_gem.h"
#include "msm_kms.h"
extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
@@ -35,7 +34,6 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
struct msm_fbdev {
struct drm_fb_helper base;
struct drm_framebuffer *fb;
- struct drm_gem_object *bo;
};
static struct fb_ops msm_fb_ops = {
@@ -57,16 +55,16 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
struct msm_fbdev *fbdev = to_msm_fbdev(helper);
- struct drm_gem_object *drm_obj = fbdev->bo;
+ struct drm_gem_object *bo = msm_framebuffer_bo(fbdev->fb, 0);
int ret = 0;
- ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma);
+ ret = drm_gem_mmap_obj(bo, bo->size, vma);
if (ret) {
pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
return ret;
}
- return msm_gem_mmap_obj(drm_obj, vma);
+ return msm_gem_mmap_obj(bo, vma);
}
static int msm_fbdev_create(struct drm_fb_helper *helper,
@@ -76,47 +74,30 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_device *dev = helper->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb = NULL;
+ struct drm_gem_object *bo;
struct fb_info *fbi = NULL;
- struct drm_mode_fb_cmd2 mode_cmd = {0};
uint64_t paddr;
- int ret, size;
+ uint32_t format;
+ int ret, pitch;
+
+ format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp,
sizes->fb_width, sizes->fb_height);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
-
- mode_cmd.pitches[0] = align_pitch(
- mode_cmd.width, sizes->surface_bpp);
+ pitch = align_pitch(sizes->surface_width, sizes->surface_bpp);
+ fb = msm_alloc_stolen_fb(dev, sizes->surface_width,
+ sizes->surface_height, pitch, format);
- /* allocate backing bo */
- size = mode_cmd.pitches[0] * mode_cmd.height;
- DBG("allocating %d bytes for fb %d", size, dev->primary->index);
- fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
- MSM_BO_WC | MSM_BO_STOLEN);
- if (IS_ERR(fbdev->bo)) {
- ret = PTR_ERR(fbdev->bo);
- fbdev->bo = NULL;
- dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret);
- goto fail;
- }
-
- fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
- /* note: if fb creation failed, we can't rely on fb destroy
- * to unref the bo:
- */
- drm_gem_object_unreference_unlocked(fbdev->bo);
ret = PTR_ERR(fb);
goto fail;
}
+ bo = msm_framebuffer_bo(fb, 0);
+
mutex_lock(&dev->struct_mutex);
/*
@@ -124,7 +105,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
- ret = msm_gem_get_iova(fbdev->bo, priv->kms->aspace, &paddr);
+ ret = msm_gem_get_iova(bo, priv->kms->aspace, &paddr);
if (ret) {
dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail_unlock;
@@ -143,7 +124,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
helper->fb = fb;
fbi->par = helper;
- fbi->flags = FBINFO_DEFAULT;
fbi->fbops = &msm_fb_ops;
strcpy(fbi->fix.id, "msm");
@@ -153,14 +133,14 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dev->mode_config.fb_base = paddr;
- fbi->screen_base = msm_gem_get_vaddr(fbdev->bo);
+ fbi->screen_base = msm_gem_get_vaddr(bo);
if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base);
goto fail_unlock;
}
- fbi->screen_size = fbdev->bo->size;
+ fbi->screen_size = bo->size;
fbi->fix.smem_start = paddr;
- fbi->fix.smem_len = fbdev->bo->size;
+ fbi->fix.smem_len = bo->size;
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
@@ -242,7 +222,9 @@ void msm_fbdev_free(struct drm_device *dev)
/* this will free the backing object */
if (fbdev->fb) {
- msm_gem_put_vaddr(fbdev->bo);
+ struct drm_gem_object *bo =
+ msm_framebuffer_bo(fbdev->fb, 0);
+ msm_gem_put_vaddr(bo);
drm_framebuffer_remove(fbdev->fb);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 65f35544c1ec..f15821a0d900 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
struct page **pages;
vma = add_vma(obj, aspace);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto unlock;
+ }
pages = get_pages(obj);
if (IS_ERR(pages)) {
@@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
fail:
del_vma(vma);
-
+unlock:
mutex_unlock(&msm_obj->lock);
return ret;
}
@@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
if (use_vram) {
struct msm_gem_vma *vma;
struct page **pages;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ mutex_lock(&msm_obj->lock);
vma = add_vma(obj, NULL);
+ mutex_unlock(&msm_obj->lock);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto fail;
@@ -1018,3 +1024,49 @@ fail:
drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret);
}
+
+static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova, bool locked)
+{
+ void *vaddr;
+ struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
+ int ret;
+
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ if (iova) {
+ ret = msm_gem_get_iova(obj, aspace, iova);
+ if (ret) {
+ drm_gem_object_unreference(obj);
+ return ERR_PTR(ret);
+ }
+ }
+
+ vaddr = msm_gem_get_vaddr(obj);
+ if (!vaddr) {
+ msm_gem_put_iova(obj, aspace);
+ drm_gem_object_unreference(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (bo)
+ *bo = obj;
+
+ return vaddr;
+}
+
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova)
+{
+ return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
+}
+
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova)
+{
+ return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 6bfca7470141..8a75c0bd8a78 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
- uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
- (nr_cmds * sizeof(submit->cmd[0]));
+ uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
+ ((u64)nr_cmds * sizeof(submit->cmd[0]));
if (sz > SIZE_MAX)
return NULL;
@@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
- if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
+ if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
ret = submit_fence_sync(submit);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index c36321bc8714..d34e331554f3 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -42,7 +42,7 @@ void
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt)
{
- if (!vma->iova)
+ if (!aspace || !vma->iova)
return;
if (aspace->mmu) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 9f3dbc236ab3..ffbff27600e0 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -562,11 +562,49 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
return 0;
}
+static struct msm_gem_address_space *
+msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
+ uint64_t va_start, uint64_t va_end)
+{
+ struct iommu_domain *iommu;
+ struct msm_gem_address_space *aspace;
+ int ret;
+
+ /*
+ * Setup IOMMU.. eventually we will (I think) do this once per context
+ * and have separate page tables per context. For now, to keep things
+ * simple and to get something working, just use a single address space:
+ */
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu)
+ return NULL;
+
+ iommu->geometry.aperture_start = va_start;
+ iommu->geometry.aperture_end = va_end;
+
+ dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
+
+ aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
+ if (IS_ERR(aspace)) {
+ dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
+ PTR_ERR(aspace));
+ iommu_domain_free(iommu);
+ return ERR_CAST(aspace);
+ }
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
+ if (ret) {
+ msm_gem_address_space_put(aspace);
+ return ERR_PTR(ret);
+ }
+
+ return aspace;
+}
+
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
{
- struct iommu_domain *iommu;
int ret;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
@@ -636,28 +674,19 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
- /* Setup IOMMU.. eventually we will (I think) do this once per context
- * and have separate page tables per context. For now, to keep things
- * simple and to get something working, just use a single address space:
- */
- iommu = iommu_domain_alloc(&platform_bus_type);
- if (iommu) {
- iommu->geometry.aperture_start = config->va_start;
- iommu->geometry.aperture_end = config->va_end;
-
- dev_info(drm->dev, "%s: using IOMMU\n", name);
- gpu->aspace = msm_gem_address_space_create(&pdev->dev,
- iommu, "gpu");
- if (IS_ERR(gpu->aspace)) {
- ret = PTR_ERR(gpu->aspace);
- dev_err(drm->dev, "failed to init iommu: %d\n", ret);
- gpu->aspace = NULL;
- iommu_domain_free(iommu);
- goto fail;
- }
+ gpu->pdev = pdev;
+ platform_set_drvdata(pdev, gpu);
+
+ bs_init(gpu);
- } else {
+ gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
+ config->va_start, config->va_end);
+
+ if (gpu->aspace == NULL)
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
+ else if (IS_ERR(gpu->aspace)) {
+ ret = PTR_ERR(gpu->aspace);
+ goto fail;
}
/* Create ringbuffer: */
@@ -669,14 +698,10 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
- gpu->pdev = pdev;
- platform_set_drvdata(pdev, gpu);
-
- bs_init(gpu);
-
return 0;
fail:
+ platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -693,7 +718,9 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
msm_ringbuffer_destroy(gpu->rb);
}
-
- if (gpu->fctx)
- msm_fence_context_free(gpu->fctx);
+ if (gpu->aspace) {
+ gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
+ NULL, 0);
+ msm_gem_address_space_put(gpu->aspace);
+ }
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index a8f2ba5e5f07..17d5824417ad 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -99,5 +99,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
int msm_mdss_init(struct drm_device *dev);
void msm_mdss_destroy(struct drm_device *dev);
+int msm_mdss_enable(struct msm_mdss *mdss);
+int msm_mdss_disable(struct msm_mdss *mdss);
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 791bca3c6a9c..bf065a540130 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -33,16 +33,14 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->gpu = gpu;
- ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
- if (IS_ERR(ring->bo)) {
- ret = PTR_ERR(ring->bo);
- ring->bo = NULL;
- goto fail;
- }
- ring->start = msm_gem_get_vaddr(ring->bo);
+ /* Pass NULL for the iova pointer - we will map it later */
+ ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC,
+ gpu->aspace, &ring->bo, NULL);
+
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
+ ring->start = 0;
goto fail;
}
ring->end = ring->start + (size / 4);
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index d1b9c34c7c00..7fbad9cb656e 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -190,7 +190,7 @@ static int mxsfb_load(struct drm_device *drm, unsigned long flags)
}
ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs,
- mxsfb_formats, ARRAY_SIZE(mxsfb_formats),
+ mxsfb_formats, ARRAY_SIZE(mxsfb_formats), NULL,
&mxsfb->connector);
if (ret < 0) {
dev_err(drm->dev, "Cannot setup simple display pipe\n");
@@ -256,7 +256,6 @@ static void mxsfb_unload(struct drm_device *drm)
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
- drm_vblank_cleanup(drm);
pm_runtime_get_sync(drm->dev);
drm_irq_uninstall(drm);
@@ -335,11 +334,9 @@ static struct drm_driver mxsfb_driver = {
.irq_uninstall = mxsfb_irq_preinstall,
.enable_vblank = mxsfb_enable_vblank,
.disable_vblank = mxsfb_disable_vblank,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index f7d729aa09bd..e5edf016a439 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -74,7 +74,6 @@ static void mxsfb_panel_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs mxsfb_panel_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = mxsfb_panel_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = mxsfb_panel_connector_destroy,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 4b4b0b496262..6aa6ee16dcbd 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -764,13 +764,18 @@ nv_crtc_gamma_load(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = nv_crtc->base.dev;
struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs;
+ u16 *r, *g, *b;
int i;
rgbs = (struct rgb *)nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].DAC;
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+
for (i = 0; i < 256; i++) {
- rgbs[i].r = nv_crtc->lut.r[i] >> 8;
- rgbs[i].g = nv_crtc->lut.g[i] >> 8;
- rgbs[i].b = nv_crtc->lut.b[i] >> 8;
+ rgbs[i].r = *r++ >> 8;
+ rgbs[i].g = *g++ >> 8;
+ rgbs[i].b = *b++ >> 8;
}
nouveau_hw_load_state_palette(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
@@ -792,13 +797,6 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
struct drm_modeset_acquire_ctx *ctx)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int i;
-
- for (i = 0; i < size; i++) {
- nv_crtc->lut.r[i] = r[i];
- nv_crtc->lut.g[i] = g[i];
- nv_crtc->lut.b[i] = b[i];
- }
/* We need to know the depth before we upload, but it's possible to
* get called before a framebuffer is bound. If this is the case,
@@ -1095,25 +1093,51 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
.mode_set = nv_crtc_mode_set,
.mode_set_base = nv04_crtc_mode_set_base,
.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
- .load_lut = nv_crtc_gamma_load,
.disable = nv_crtc_disable,
};
+static const uint32_t modeset_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
+};
+
+static struct drm_plane *
+create_primary_plane(struct drm_device *dev)
+{
+ struct drm_plane *primary;
+ int ret;
+
+ primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+ if (primary == NULL) {
+ DRM_DEBUG_KMS("Failed to allocate primary plane\n");
+ return NULL;
+ }
+
+ /* possible_crtc's will be filled in later by crtc_init */
+ ret = drm_universal_plane_init(dev, primary, 0,
+ &drm_primary_helper_funcs,
+ modeset_formats,
+ ARRAY_SIZE(modeset_formats), NULL,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ kfree(primary);
+ primary = NULL;
+ }
+
+ return primary;
+}
+
int
nv04_crtc_create(struct drm_device *dev, int crtc_num)
{
struct nouveau_crtc *nv_crtc;
- int ret, i;
+ int ret;
nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
if (!nv_crtc)
return -ENOMEM;
- for (i = 0; i < 256; i++) {
- nv_crtc->lut.r[i] = i << 8;
- nv_crtc->lut.g[i] = i << 8;
- nv_crtc->lut.b[i] = i << 8;
- }
nv_crtc->lut.depth = 0;
nv_crtc->index = crtc_num;
@@ -1122,7 +1146,9 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
nv_crtc->save = nv_crtc_save;
nv_crtc->restore = nv_crtc_restore;
- drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs);
+ drm_crtc_init_with_planes(dev, &nv_crtc->base,
+ create_primary_plane(dev), NULL,
+ &nv04_crtc_funcs, NULL);
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index e54944d23268..c8c2333f24ee 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -63,6 +63,7 @@ static uint32_t formats[] = {
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
};
/* Sine can be approximated with
@@ -90,6 +91,26 @@ cos_mul(int degrees, int factor)
}
static int
+verify_scaling(const struct drm_framebuffer *fb, uint8_t shift,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
+ uint32_t crtc_w, uint32_t crtc_h)
+{
+ if (crtc_w < (src_w >> shift) || crtc_h < (src_h >> shift)) {
+ DRM_DEBUG_KMS("Unsuitable framebuffer scaling: %dx%d -> %dx%d\n",
+ src_w, src_h, crtc_w, crtc_h);
+ return -ERANGE;
+ }
+
+ if (src_x != 0 || src_y != 0) {
+ DRM_DEBUG_KMS("Unsuitable framebuffer offset: %d,%d\n",
+ src_x, src_y);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int
nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
@@ -107,7 +128,9 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
bool flip = nv_plane->flip;
int soff = NV_PCRTC0_SIZE * nv_crtc->index;
int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
- int format, ret;
+ unsigned shift = drm->client.device.info.chipset >= 0x30 ? 1 : 3;
+ unsigned format = 0;
+ int ret;
/* Source parameters given in 16.16 fixed point, ignore fractional. */
src_x >>= 16;
@@ -115,18 +138,9 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
src_w >>= 16;
src_h >>= 16;
- format = ALIGN(src_w * 4, 0x100);
-
- if (format > 0xffff)
- return -ERANGE;
-
- if (drm->client.device.info.chipset >= 0x30) {
- if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
- return -ERANGE;
- } else {
- if (crtc_w < (src_w >> 3) || crtc_h < (src_h >> 3))
- return -ERANGE;
- }
+ ret = verify_scaling(fb, shift, 0, 0, src_w, src_h, crtc_w, crtc_h);
+ if (ret)
+ return ret;
ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
if (ret)
@@ -146,21 +160,23 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
nvif_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
nvif_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
- if (fb->format->format != DRM_FORMAT_UYVY)
+ if (fb->format->format == DRM_FORMAT_YUYV ||
+ fb->format->format == DRM_FORMAT_NV12)
format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
- if (fb->format->format == DRM_FORMAT_NV12)
+ if (fb->format->format == DRM_FORMAT_NV12 ||
+ fb->format->format == DRM_FORMAT_NV21)
format |= NV_PVIDEO_FORMAT_PLANAR;
if (nv_plane->iturbt_709)
format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
if (nv_plane->colorkey & (1 << 24))
format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
- if (fb->format->format == DRM_FORMAT_NV12) {
+ if (format & NV_PVIDEO_FORMAT_PLANAR) {
nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
nv_fb->nvbo->bo.offset + fb->offsets[1]);
}
- nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format);
+ nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format | fb->pitches[0]);
nvif_wr32(dev, NV_PVIDEO_STOP, 0);
/* TODO: wait for vblank? */
nvif_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1);
@@ -357,7 +373,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct nouveau_bo *cur = nv_plane->cur;
uint32_t overlay = 1;
int brightness = (nv_plane->brightness - 512) * 62 / 512;
- int pitch, ret, i;
+ int ret, i;
/* Source parameters given in 16.16 fixed point, ignore fractional. */
src_x >>= 16;
@@ -365,17 +381,9 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
src_w >>= 16;
src_h >>= 16;
- pitch = ALIGN(src_w * 4, 0x100);
-
- if (pitch > 0xffff)
- return -ERANGE;
-
- /* TODO: Compute an offset? Not sure how to do this for YUYV. */
- if (src_x != 0 || src_y != 0)
- return -ERANGE;
-
- if (crtc_w < src_w || crtc_h < src_h)
- return -ERANGE;
+ ret = verify_scaling(fb, 0, src_x, src_y, src_w, src_h, crtc_w, crtc_h);
+ if (ret)
+ return ret;
ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
if (ret)
@@ -389,8 +397,9 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
for (i = 0; i < 2; i++) {
nvif_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
- nv_fb->nvbo->bo.offset);
- nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch);
+ nv_fb->nvbo->bo.offset);
+ nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i,
+ fb->pitches[0]);
nvif_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
}
nvif_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
index e8e77ee24776..deb477282dde 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
@@ -18,6 +18,7 @@ enum dcb_connector_type {
DCB_CONNECTOR_HDMI_C = 0x63,
DCB_CONNECTOR_DMS59_DP0 = 0x64,
DCB_CONNECTOR_DMS59_DP1 = 0x65,
+ DCB_CONNECTOR_WFD = 0x70,
DCB_CONNECTOR_NONE = 0xff
};
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
index 4892a65ddd48..903d117603d8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
@@ -6,6 +6,7 @@ enum dcb_output_type {
DCB_OUTPUT_TMDS = 0x2,
DCB_OUTPUT_LVDS = 0x3,
DCB_OUTPUT_DP = 0x6,
+ DCB_OUTPUT_WFD = 0x8,
DCB_OUTPUT_EOL = 0xe,
DCB_OUTPUT_UNUSED = 0xf,
DCB_OUTPUT_ANY = -1,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index b268b96faece..1bfd93b85575 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -96,4 +96,5 @@ int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index b998c33af18a..dd6fba55ad5d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -351,11 +351,8 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
struct lvdstableheader lth;
if (bios->fp.fptablepointer == 0x0) {
- /* Apple cards don't have the fp table; the laptops use DDC */
- /* The table is also missing on some x86 IGPs */
-#ifndef __powerpc__
- NV_ERROR(drm, "Pointer to flat panel table invalid\n");
-#endif
+ /* Most laptop cards lack an fp table. They use DDC. */
+ NV_DEBUG(drm, "Pointer to flat panel table invalid\n");
bios->digital_min_front_porch = 0x4b;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 147b22163f9f..70d8e0d69ad5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -770,9 +770,6 @@ nouveau_connector_set_property(struct drm_connector *connector,
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
int ret;
- if (drm_drv_uses_atomic_modeset(connector->dev))
- return drm_atomic_helper_connector_set_property(connector, property, value);
-
ret = connector->funcs->atomic_set_property(&nv_connector->base,
&asyc->state,
property, value);
@@ -1075,17 +1072,9 @@ nouveau_connector_helper_funcs = {
.best_encoder = nouveau_connector_best_encoder,
};
-static int
-nouveau_connector_dpms(struct drm_connector *connector, int mode)
-{
- if (drm_drv_uses_atomic_modeset(connector->dev))
- return drm_atomic_helper_connector_dpms(connector, mode);
- return drm_helper_connector_dpms(connector, mode);
-}
-
static const struct drm_connector_funcs
nouveau_connector_funcs = {
- .dpms = nouveau_connector_dpms,
+ .dpms = drm_helper_connector_dpms,
.reset = nouveau_conn_reset,
.detect = nouveau_connector_detect,
.force = nouveau_connector_force,
@@ -1100,7 +1089,7 @@ nouveau_connector_funcs = {
static const struct drm_connector_funcs
nouveau_connector_funcs_lvds = {
- .dpms = nouveau_connector_dpms,
+ .dpms = drm_helper_connector_dpms,
.reset = nouveau_conn_reset,
.detect = nouveau_connector_detect_lvds,
.force = nouveau_connector_force,
@@ -1158,8 +1147,6 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
return -ENODEV;
if (WARN_ON(msg->size > 16))
return -E2BIG;
- if (msg->size == 0)
- return msg->size;
ret = nvkm_i2c_aux_acquire(aux);
if (ret)
@@ -1197,6 +1184,7 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
case DCB_CONNECTOR_HDMI_0 :
case DCB_CONNECTOR_HDMI_1 :
case DCB_CONNECTOR_HDMI_C : return DRM_MODE_CONNECTOR_HDMIA;
+ case DCB_CONNECTOR_WFD : return DRM_MODE_CONNECTOR_VIRTUAL;
default:
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index 050fcf30a0d2..b7a18fbee6dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -61,9 +61,6 @@ struct nouveau_crtc {
struct {
struct nouveau_bo *nvbo;
- uint16_t r[256];
- uint16_t g[256];
- uint16_t b[256];
int depth;
} lut;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 8d1df5678eaa..2e7785f49e6d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -159,8 +159,6 @@ nouveau_display_vblank_fini(struct drm_device *dev)
{
struct drm_crtc *crtc;
- drm_vblank_cleanup(dev);
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
nvif_notify_fini(&nv_crtc->vblank);
@@ -233,9 +231,30 @@ nouveau_framebuffer_new(struct drm_device *dev,
struct nouveau_bo *nvbo,
struct nouveau_framebuffer **pfb)
{
+ struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_framebuffer *fb;
int ret;
+ /* YUV overlays have special requirements pre-NV50 */
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
+
+ (mode_cmd->pixel_format == DRM_FORMAT_YUYV ||
+ mode_cmd->pixel_format == DRM_FORMAT_UYVY ||
+ mode_cmd->pixel_format == DRM_FORMAT_NV12 ||
+ mode_cmd->pixel_format == DRM_FORMAT_NV21) &&
+ (mode_cmd->pitches[0] & 0x3f || /* align 64 */
+ mode_cmd->pitches[0] >= 0x10000 || /* at most 64k pitch */
+ (mode_cmd->pitches[1] && /* pitches for planes must match */
+ mode_cmd->pitches[0] != mode_cmd->pitches[1]))) {
+ struct drm_format_name_buf format_name;
+ DRM_DEBUG_KMS("Unsuitable framebuffer: format: %s; pitches: 0x%x\n 0x%x\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name),
+ mode_cmd->pitches[0],
+ mode_cmd->pitches[1]);
+ return -EINVAL;
+ }
+
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
@@ -409,7 +428,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
- struct drm_crtc *crtc;
if (!suspend) {
if (drm_drv_uses_atomic_modeset(dev))
@@ -418,10 +436,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
drm_crtc_force_disable_all(dev);
}
- /* Make sure that drm and hw vblank irqs get properly disabled. */
- drm_for_each_crtc(crtc, dev)
- drm_crtc_vblank_off(crtc);
-
/* disable flip completion events */
nvif_notify_put(&drm->flip);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 90757af9bc73..595630d1fb9e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -585,18 +585,18 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
nouveau_led_suspend(dev);
if (dev->mode_config.num_crtc) {
- NV_INFO(drm, "suspending console...\n");
+ NV_DEBUG(drm, "suspending console...\n");
nouveau_fbcon_set_suspend(dev, 1);
- NV_INFO(drm, "suspending display...\n");
+ NV_DEBUG(drm, "suspending display...\n");
ret = nouveau_display_suspend(dev, runtime);
if (ret)
return ret;
}
- NV_INFO(drm, "evicting buffers...\n");
+ NV_DEBUG(drm, "evicting buffers...\n");
ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
- NV_INFO(drm, "waiting for kernel channels to go idle...\n");
+ NV_DEBUG(drm, "waiting for kernel channels to go idle...\n");
if (drm->cechan) {
ret = nouveau_channel_idle(drm->cechan);
if (ret)
@@ -609,7 +609,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
goto fail_display;
}
- NV_INFO(drm, "suspending fence...\n");
+ NV_DEBUG(drm, "suspending fence...\n");
if (drm->fence && nouveau_fence(drm)->suspend) {
if (!nouveau_fence(drm)->suspend(drm)) {
ret = -ENOMEM;
@@ -617,7 +617,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
}
}
- NV_INFO(drm, "suspending object tree...\n");
+ NV_DEBUG(drm, "suspending object tree...\n");
ret = nvif_client_suspend(&drm->client.base);
if (ret)
goto fail_client;
@@ -630,7 +630,7 @@ fail_client:
fail_display:
if (dev->mode_config.num_crtc) {
- NV_INFO(drm, "resuming display...\n");
+ NV_DEBUG(drm, "resuming display...\n");
nouveau_display_resume(dev, runtime);
}
return ret;
@@ -641,19 +641,19 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- NV_INFO(drm, "resuming object tree...\n");
+ NV_DEBUG(drm, "resuming object tree...\n");
nvif_client_resume(&drm->client.base);
- NV_INFO(drm, "resuming fence...\n");
+ NV_DEBUG(drm, "resuming fence...\n");
if (drm->fence && nouveau_fence(drm)->resume)
nouveau_fence(drm)->resume(drm);
nouveau_run_vbios_init(dev);
if (dev->mode_config.num_crtc) {
- NV_INFO(drm, "resuming display...\n");
+ NV_DEBUG(drm, "resuming display...\n");
nouveau_display_resume(dev, runtime);
- NV_INFO(drm, "resuming console...\n");
+ NV_DEBUG(drm, "resuming console...\n");
nouveau_fbcon_set_suspend(dev, 0);
}
@@ -998,7 +998,6 @@ driver_stub = {
.dumb_create = nouveau_display_dumb_create,
.dumb_map_offset = nouveau_display_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -1098,7 +1097,6 @@ static int __init
nouveau_drm_init(void)
{
driver_pci = driver_stub;
- driver_pci.set_busid = drm_pci_set_busid;
driver_platform = driver_stub;
nouveau_display_options();
@@ -1117,7 +1115,12 @@ nouveau_drm_init(void)
nouveau_register_dsm_handler();
nouveau_backlight_ctor();
- return drm_pci_init(&driver_pci, &nouveau_drm_pci_driver);
+
+#ifdef CONFIG_PCI
+ return pci_register_driver(&nouveau_drm_pci_driver);
+#else
+ return 0;
+#endif
}
static void __exit
@@ -1126,7 +1129,9 @@ nouveau_drm_exit(void)
if (!nouveau_modeset)
return;
- drm_pci_exit(&driver_pci, &nouveau_drm_pci_driver);
+#ifdef CONFIG_PCI
+ pci_unregister_driver(&nouveau_drm_pci_driver);
+#endif
nouveau_backlight_dtor();
nouveau_unregister_dsm_handler();
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 2665a078b6da..f7707849bb53 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -278,26 +278,6 @@ nouveau_fbcon_accel_init(struct drm_device *dev)
info->fbops = &nouveau_fbcon_ops;
}
-static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- nv_crtc->lut.r[regno] = red;
- nv_crtc->lut.g[regno] = green;
- nv_crtc->lut.b[regno] = blue;
-}
-
-static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno)
-{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- *red = nv_crtc->lut.r[regno];
- *green = nv_crtc->lut.g[regno];
- *blue = nv_crtc->lut.b[regno];
-}
-
static void
nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
{
@@ -467,8 +447,6 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info)
}
static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
- .gamma_set = nouveau_fbcon_gamma_set,
- .gamma_get = nouveau_fbcon_gamma_get,
.fb_probe = nouveau_fbcon_create,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 999c35a25498..b0ad7fcefcf5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -179,7 +179,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
}
static void
-nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+nouveau_gart_manager_debug(struct ttm_mem_type_manager *man,
+ struct drm_printer *printer)
{
}
@@ -252,7 +253,8 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
}
static void
-nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+nv04_gart_manager_debug(struct ttm_mem_type_manager *man,
+ struct drm_printer *printer)
{
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index e3132a2ce34d..2dbf62a2ac41 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1055,7 +1055,6 @@ nv50_wndw = {
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = nv50_wndw_destroy,
.reset = nv50_wndw_reset,
- .set_property = drm_atomic_helper_plane_set_property,
.atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
.atomic_destroy_state = nv50_wndw_atomic_destroy_state,
};
@@ -1083,8 +1082,9 @@ nv50_wndw_ctor(const struct nv50_wndw_func *func, struct drm_device *dev,
wndw->func = func;
wndw->dmac = dmac;
- ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw, format,
- nformat, type, "%s-%d", name, index);
+ ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw,
+ format, nformat, NULL,
+ type, "%s-%d", name, index);
if (ret)
return ret;
@@ -2103,7 +2103,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
if (asyh->state.active) {
- for_each_connector_in_state(asyh->state.state, conn, conns, i) {
+ for_each_new_connector_in_state(asyh->state.state, conn, conns, i) {
if (conns->crtc == crtc) {
asyc = nouveau_conn_atom(conns);
break;
@@ -2204,28 +2204,29 @@ nv50_head_lut_load(struct drm_crtc *crtc)
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
+ u16 *r, *g, *b;
int i;
- for (i = 0; i < 256; i++) {
- u16 r = nv_crtc->lut.r[i] >> 2;
- u16 g = nv_crtc->lut.g[i] >> 2;
- u16 b = nv_crtc->lut.b[i] >> 2;
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+ for (i = 0; i < 256; i++) {
if (disp->disp->oclass < GF110_DISP) {
- writew(r + 0x0000, lut + (i * 0x08) + 0);
- writew(g + 0x0000, lut + (i * 0x08) + 2);
- writew(b + 0x0000, lut + (i * 0x08) + 4);
+ writew((*r++ >> 2) + 0x0000, lut + (i * 0x08) + 0);
+ writew((*g++ >> 2) + 0x0000, lut + (i * 0x08) + 2);
+ writew((*b++ >> 2) + 0x0000, lut + (i * 0x08) + 4);
} else {
- writew(r + 0x6000, lut + (i * 0x20) + 0);
- writew(g + 0x6000, lut + (i * 0x20) + 2);
- writew(b + 0x6000, lut + (i * 0x20) + 4);
+ /* 0x6000 interferes with the 14-bit color??? */
+ writew((*r++ >> 2) + 0x6000, lut + (i * 0x20) + 0);
+ writew((*g++ >> 2) + 0x6000, lut + (i * 0x20) + 2);
+ writew((*b++ >> 2) + 0x6000, lut + (i * 0x20) + 4);
}
}
}
static const struct drm_crtc_helper_funcs
nv50_head_help = {
- .load_lut = nv50_head_lut_load,
.atomic_check = nv50_head_atomic_check,
};
@@ -2234,15 +2235,6 @@ nv50_head_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 i;
-
- for (i = 0; i < size; i++) {
- nv_crtc->lut.r[i] = r[i];
- nv_crtc->lut.g[i] = g[i];
- nv_crtc->lut.b[i] = b[i];
- }
-
nv50_head_lut_load(crtc);
return 0;
}
@@ -2325,7 +2317,6 @@ nv50_head_func = {
.destroy = nv50_head_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
- .set_property = drm_atomic_helper_crtc_set_property,
.atomic_duplicate_state = nv50_head_atomic_duplicate_state,
.atomic_destroy_state = nv50_head_atomic_destroy_state,
};
@@ -2340,19 +2331,13 @@ nv50_head_create(struct drm_device *dev, int index)
struct nv50_base *base;
struct nv50_curs *curs;
struct drm_crtc *crtc;
- int ret, i;
+ int ret;
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
return -ENOMEM;
head->base.index = index;
- for (i = 0; i < 256; i++) {
- head->base.lut.r[i] = i << 8;
- head->base.lut.g[i] = i << 8;
- head->base.lut.b[i] = i << 8;
- }
-
ret = nv50_base_new(drm, head, &base);
if (ret == 0)
ret = nv50_curs_new(drm, head, &curs);
@@ -2762,7 +2747,8 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
if (!drm_detect_hdmi_monitor(nv_connector->edid))
return;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode,
+ false);
if (!ret) {
/* We have an AVI InfoFrame, populate it to the display */
args.pwr.avi_infoframe_length
@@ -3119,11 +3105,9 @@ nv50_mstc_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs
nv50_mstc = {
- .dpms = drm_atomic_helper_connector_dpms,
.reset = nouveau_conn_reset,
.detect = nv50_mstc_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = drm_atomic_helper_connector_set_property,
.destroy = nv50_mstc_destroy,
.atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
.atomic_destroy_state = nouveau_conn_atomic_destroy_state,
@@ -3157,7 +3141,7 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
mstc->connector.funcs->reset(&mstc->connector);
nouveau_conn_attach_properties(&mstc->connector);
- for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto; i++)
+ for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
@@ -3674,15 +3658,24 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_mode_connector_attach_encoder(connector, encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
struct nvkm_i2c_aux *aux =
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
if (aux) {
- nv_encoder->i2c = &nv_connector->aux.ddc;
+ if (disp->disp->oclass < GF110_DISP) {
+ /* HW has no support for address-only
+ * transactions, so we're required to
+ * use custom I2C-over-AUX code.
+ */
+ nv_encoder->i2c = &aux->i2c;
+ } else {
+ nv_encoder->i2c = &nv_connector->aux.ddc;
+ }
nv_encoder->aux = aux;
}
/*TODO: Use DP Info Table to check for support. */
- if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) {
+ if (disp->disp->oclass >= GF110_DISP) {
ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
nv_connector->base.base.id,
&nv_encoder->dp.mstm);
@@ -3904,9 +3897,9 @@ static void
nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *new_crtc_state, *old_crtc_state;
struct drm_crtc *crtc;
- struct drm_plane_state *plane_state;
+ struct drm_plane_state *new_plane_state;
struct drm_plane *plane;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
@@ -3925,12 +3918,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
mutex_lock(&disp->mutex);
/* Disable head(s). */
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- struct nv50_head_atom *asyh = nv50_head_atom(crtc->state);
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_head *head = nv50_head(crtc);
NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
asyh->clr.mask, asyh->set.mask);
+ if (old_crtc_state->active && !new_crtc_state->active)
+ drm_crtc_vblank_off(crtc);
if (asyh->clr.mask) {
nv50_head_flush_clr(head, asyh, atom->flush_disable);
@@ -3939,8 +3934,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
}
/* Disable plane(s). */
- for_each_plane_in_state(state, plane, plane_state, i) {
- struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
@@ -4005,8 +4000,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
}
/* Update head(s). */
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- struct nv50_head_atom *asyh = nv50_head_atom(crtc->state);
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_head *head = nv50_head(crtc);
NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
@@ -4016,16 +4011,18 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
nv50_head_flush_set(head, asyh);
interlock_core = 1;
}
- }
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (crtc->state->event)
- drm_crtc_vblank_get(crtc);
+ if (new_crtc_state->active) {
+ if (!old_crtc_state->active)
+ drm_crtc_vblank_on(crtc);
+ if (new_crtc_state->event)
+ drm_crtc_vblank_get(crtc);
+ }
}
/* Update plane(s). */
- for_each_plane_in_state(state, plane, plane_state, i) {
- struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
@@ -4055,24 +4052,27 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
mutex_unlock(&disp->mutex);
/* Wait for HW to signal completion. */
- for_each_plane_in_state(state, plane, plane_state, i) {
- struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
int ret = nv50_wndw_wait_armed(wndw, asyw);
if (ret)
NV_ERROR(drm, "%s: timeout\n", plane->name);
}
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (crtc->state->event) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->event) {
unsigned long flags;
/* Get correct count/ts if racing with vblank irq */
- drm_accurate_vblank_count(crtc);
+ if (new_crtc_state->active)
+ drm_crtc_accurate_vblank_count(crtc);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- crtc->state->event = NULL;
- drm_crtc_vblank_put(crtc);
+
+ new_crtc_state->event = NULL;
+ if (new_crtc_state->active)
+ drm_crtc_vblank_put(crtc);
}
}
@@ -4096,7 +4096,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
- struct drm_plane_state *plane_state;
+ struct drm_plane_state *old_plane_state;
struct drm_plane *plane;
struct drm_crtc *crtc;
bool active = false;
@@ -4119,12 +4119,17 @@ nv50_disp_atomic_commit(struct drm_device *dev,
if (!nonblock) {
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
if (ret)
- goto done;
+ goto err_cleanup;
}
- for_each_plane_in_state(state, plane, plane_state, i) {
- struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane_state);
+ ret = drm_atomic_helper_swap_state(state, true);
+ if (ret)
+ goto err_cleanup;
+
+ for_each_old_plane_in_state(state, plane, old_plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(old_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
+
if (asyw->set.image) {
asyw->ntfy.handle = wndw->dmac->sync.handle;
asyw->ntfy.offset = wndw->ntfy;
@@ -4135,7 +4140,6 @@ nv50_disp_atomic_commit(struct drm_device *dev,
}
}
- drm_atomic_helper_swap_state(state, true);
drm_atomic_state_get(state);
if (nonblock)
@@ -4147,7 +4151,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
if (crtc->state->enable) {
if (!drm->have_disp_power_ref) {
drm->have_disp_power_ref = true;
- return ret;
+ return 0;
}
active = true;
break;
@@ -4159,6 +4163,9 @@ nv50_disp_atomic_commit(struct drm_device *dev,
drm->have_disp_power_ref = false;
}
+err_cleanup:
+ if (ret)
+ drm_atomic_helper_cleanup_planes(dev, state);
done:
pm_runtime_put_autosuspend(dev->dev);
return ret;
@@ -4185,18 +4192,19 @@ nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
static int
nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
- struct drm_connector *connector)
+ struct drm_connector_state *old_connector_state)
{
- struct drm_encoder *encoder = connector->state->best_encoder;
- struct drm_crtc_state *crtc_state;
+ struct drm_encoder *encoder = old_connector_state->best_encoder;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
struct nv50_outp_atom *outp;
- if (!(crtc = connector->state->crtc))
+ if (!(crtc = old_connector_state->crtc))
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(&atom->state, crtc);
- if (crtc->state->active && drm_atomic_crtc_needs_modeset(crtc_state)) {
+ old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
+ if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
outp = nv50_disp_outp_atomic_add(atom, encoder);
if (IS_ERR(outp))
return PTR_ERR(outp);
@@ -4217,15 +4225,15 @@ nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
struct drm_connector_state *connector_state)
{
struct drm_encoder *encoder = connector_state->best_encoder;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
struct nv50_outp_atom *outp;
if (!(crtc = connector_state->crtc))
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(&atom->state, crtc);
- if (crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state)) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
+ if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
outp = nv50_disp_outp_atomic_add(atom, encoder);
if (IS_ERR(outp))
return PTR_ERR(outp);
@@ -4241,7 +4249,7 @@ static int
nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
struct nv50_atom *atom = nv50_atom(state);
- struct drm_connector_state *connector_state;
+ struct drm_connector_state *old_connector_state, *new_connector_state;
struct drm_connector *connector;
int ret, i;
@@ -4249,12 +4257,12 @@ nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
if (ret)
return ret;
- for_each_connector_in_state(state, connector, connector_state, i) {
- ret = nv50_disp_outp_atomic_check_clr(atom, connector);
+ for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
+ ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
if (ret)
return ret;
- ret = nv50_disp_outp_atomic_check_set(atom, connector_state);
+ ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
if (ret)
return ret;
}
@@ -4443,11 +4451,13 @@ nv50_display_create(struct drm_device *dev)
/* create crtc objects to represent the hw heads */
if (disp->disp->oclass >= GF110_DISP)
- crtcs = nvif_rd32(&device->object, 0x022448);
+ crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
else
- crtcs = 2;
+ crtcs = 0x3;
- for (i = 0; i < crtcs; i++) {
+ for (i = 0; i < fls(crtcs); i++) {
+ if (!(crtcs & (1 << i)))
+ continue;
ret = nv50_head_create(dev, i);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7bdc7a5ae723..e096a5d9c292 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2043,6 +2043,7 @@ nv120_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
+ .therm = gm200_therm_new,
.secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
@@ -2077,6 +2078,7 @@ nv124_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
+ .therm = gm200_therm_new,
.secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
@@ -2111,6 +2113,7 @@ nv126_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gm107_pmu_new,
+ .therm = gm200_therm_new,
.secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
.top = gk104_top_new,
@@ -2321,6 +2324,35 @@ nv137_chipset = {
};
static const struct nvkm_device_chip
+nv138_chipset = {
+ .name = "GP108",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gp102_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp100_mc_new,
+ .mmu = gf100_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = gp102_ce_new,
+ .ce[1] = gp102_ce_new,
+ .ce[2] = gp102_ce_new,
+ .ce[3] = gp102_ce_new,
+ .disp = gp102_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gp100_fifo_new,
+};
+
+static const struct nvkm_device_chip
nv13b_chipset = {
.name = "GP10B",
.bar = gk20a_bar_new,
@@ -2782,6 +2814,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x134: device->chip = &nv134_chipset; break;
case 0x136: device->chip = &nv136_chipset; break;
case 0x137: device->chip = &nv137_chipset; break;
+ case 0x138: device->chip = &nv138_chipset; break;
case 0x13b: device->chip = &nv13b_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index c7c84d34d97e..93a75e5b2791 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
/* Create output path objects for each VBIOS display path. */
i = -1;
while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
+ if (ver < 0x40) /* No support for chipsets prior to NV50. */
+ break;
if (dcbE.type == DCB_OUTPUT_UNUSED)
continue;
if (dcbE.type == DCB_OUTPUT_EOL)
@@ -283,6 +285,10 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
case DCB_OUTPUT_DP:
ret = nvkm_dp_new(disp, i, &dcbE, &outp);
break;
+ case DCB_OUTPUT_WFD:
+ /* No support for WFD yet. */
+ ret = -ENODEV;
+ continue;
default:
nvkm_warn(subdev, "dcb %d type %d unknown\n",
i, dcbE.type);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c
index b33552757647..9fd7ae331308 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c
@@ -92,5 +92,8 @@ gf119_head = {
int
gf119_head_new(struct nvkm_disp *disp, int id)
{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ if (!(nvkm_rd32(device, 0x612004) & (0x00000001 << id)))
+ return 0;
return nvkm_head_new_(&gf119_head, disp, id);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index a24312fb0228..a1e8bf48b778 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -22,6 +22,7 @@ struct nvkm_ior {
unsigned proto_evo:4;
enum nvkm_ior_proto {
CRT,
+ TV,
TMDS,
LVDS,
DP,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index 19c635663399..6ea19466f436 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -22,7 +22,7 @@ struct nv50_disp {
u8 type[3];
} pior;
- struct nv50_disp_chan *chan[17];
+ struct nv50_disp_chan *chan[21];
};
void nv50_disp_super_1(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index 85aff85394ac..be9e7f8c3b23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -62,6 +62,7 @@ nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type)
case 0:
switch (outp->info.type) {
case DCB_OUTPUT_ANALOG: *type = DAC; return CRT;
+ case DCB_OUTPUT_TV : *type = DAC; return TV;
case DCB_OUTPUT_TMDS : *type = SOR; return TMDS;
case DCB_OUTPUT_LVDS : *type = SOR; return LVDS;
case DCB_OUTPUT_DP : *type = SOR; return DP;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index 8a8895246d26..7fea7d45202f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -124,6 +124,8 @@ nv31_mpeg_tile(struct nvkm_engine *engine, int i, struct nvkm_fb_tile *tile)
static bool
nv31_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
{
+ struct nv31_mpeg *mpeg = nv31_mpeg(device->mpeg);
+ struct nvkm_subdev *subdev = &mpeg->engine.subdev;
u32 inst = data << 4;
u32 dma0 = nvkm_rd32(device, 0x700000 + inst);
u32 dma1 = nvkm_rd32(device, 0x700004 + inst);
@@ -132,8 +134,11 @@ nv31_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
u32 size = dma1 + 1;
/* only allow linear DMA objects */
- if (!(dma0 & 0x00002000))
+ if (!(dma0 & 0x00002000)) {
+ nvkm_error(subdev, "inst %08x dma0 %08x dma1 %08x dma2 %08x\n",
+ inst, dma0, dma1, dma2);
return false;
+ }
if (mthd == 0x0190) {
/* DMA_CMD */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
index 16de5bd94b14..b5ec7c504dc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c
@@ -31,6 +31,8 @@ bool
nv40_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
{
struct nvkm_instmem *imem = device->imem;
+ struct nv31_mpeg *mpeg = nv31_mpeg(device->mpeg);
+ struct nvkm_subdev *subdev = &mpeg->engine.subdev;
u32 inst = data << 4;
u32 dma0 = nvkm_instmem_rd32(imem, inst + 0);
u32 dma1 = nvkm_instmem_rd32(imem, inst + 4);
@@ -39,8 +41,11 @@ nv40_mpeg_mthd_dma(struct nvkm_device *device, u32 mthd, u32 data)
u32 size = dma1 + 1;
/* only allow linear DMA objects */
- if (!(dma0 & 0x00002000))
+ if (!(dma0 & 0x00002000)) {
+ nvkm_error(subdev, "inst %08x dma0 %08x dma1 %08x dma2 %08x\n",
+ inst, dma0, dma1, dma2);
return false;
+ }
if (mthd == 0x0190) {
/* DMA_CMD */
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
index d45d7947a964..77273b53672c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
@@ -251,7 +251,7 @@ cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd,
struct nvkm_msgqueue_queue *queue)
{
const struct nvkm_subdev *subdev = priv->falcon->owner;
- static unsigned long timeout = ~0;
+ static unsigned timeout = 2000;
unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
int ret = -EAGAIN;
bool commit = true;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index c794b2c2d21e..676c167c95b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -24,6 +24,7 @@
#include "gf100.h"
#include <core/gpuobj.h>
+#include <core/option.h>
#include <subdev/fb.h>
#include <subdev/mmu.h>
@@ -59,6 +60,8 @@ gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
return ret;
bar_len = device->func->resource_size(device, bar_nr);
+ if (bar_nr == 3 && bar->bar2_halve)
+ bar_len >>= 1;
ret = nvkm_vm_new(device, 0, bar_len, 0, key, &vm);
if (ret)
@@ -129,7 +132,9 @@ gf100_bar_init(struct nvkm_bar *base)
if (bar->bar[0].mem) {
addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
- nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
+ if (bar->bar2_halve)
+ addr |= 0x40000000;
+ nvkm_wr32(device, 0x001714, 0x80000000 | addr);
}
return 0;
@@ -161,6 +166,7 @@ gf100_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
return -ENOMEM;
nvkm_bar_ctor(func, device, index, &bar->base);
+ bar->bar2_halve = nvkm_boolopt(device->cfgopt, "NvBar2Halve", false);
*pbar = &bar->base;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
index f7dea69640d8..20a5255362ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -11,6 +11,7 @@ struct gf100_bar_vm {
struct gf100_bar {
struct nvkm_bar base;
+ bool bar2_halve;
struct gf100_bar_vm bar[2];
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 3841ad6be99e..a239e73562c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -60,12 +60,12 @@ gf100_fb_oneinit(struct nvkm_fb *base)
size = min(size, 0x1000);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->base.mmu_rd);
+ true, &fb->base.mmu_rd);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->base.mmu_wr);
+ true, &fb->base.mmu_wr);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index 48f01e40b8fc..b768e66a472b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -25,6 +25,7 @@ nvkm-y += nvkm/subdev/i2c/bit.o
nvkm-y += nvkm/subdev/i2c/aux.o
nvkm-y += nvkm/subdev/i2c/auxg94.o
+nvkm-y += nvkm/subdev/i2c/auxgf119.o
nvkm-y += nvkm/subdev/i2c/auxgm200.o
nvkm-y += nvkm/subdev/i2c/anx9805.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index d172e42dd228..4c1f547da463 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -117,6 +117,10 @@ int
nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
u32 addr, u8 *data, u8 *size)
{
+ if (!*size && !aux->func->address_only) {
+ AUX_ERR(aux, "address-only transaction dropped");
+ return -ENOSYS;
+ }
return aux->func->xfer(aux, retry, type, addr, data, size);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 27a4a39c87f0..9587ab456d9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -3,6 +3,7 @@
#include "pad.h"
struct nvkm_i2c_aux_func {
+ bool address_only;
int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 *size);
int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
@@ -17,7 +18,12 @@ void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 *size);
+int g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
+ int, u8, struct nvkm_i2c_aux **);
+
int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
+int g94_i2c_aux_xfer(struct nvkm_i2c_aux *, bool, u8, u32, u8 *, u8 *);
+int gf119_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
#define AUX_MSG(b,l,f,a...) do { \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index ab8cb196c34e..c8ab1b5741a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -72,7 +72,7 @@ g94_i2c_aux_init(struct g94_i2c_aux *aux)
return 0;
}
-static int
+int
g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u8 type, u32 addr, u8 *data, u8 *size)
{
@@ -105,9 +105,9 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
}
ctrl = nvkm_rd32(device, 0x00e4e4 + base);
- ctrl &= ~0x0001f0ff;
+ ctrl &= ~0x0001f1ff;
ctrl |= type << 12;
- ctrl |= *size - 1;
+ ctrl |= (*size ? (*size - 1) : 0x00000100);
nvkm_wr32(device, 0x00e4e0 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
@@ -160,14 +160,10 @@ out:
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
}
-static const struct nvkm_i2c_aux_func
-g94_i2c_aux_func = {
- .xfer = g94_i2c_aux_xfer,
-};
-
int
-g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
- struct nvkm_i2c_aux **paux)
+g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *func,
+ struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
{
struct g94_i2c_aux *aux;
@@ -175,8 +171,20 @@ g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
return -ENOMEM;
*paux = &aux->base;
- nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base);
+ nvkm_i2c_aux_ctor(func, pad, index, &aux->base);
aux->ch = drive;
aux->base.intr = 1 << aux->ch;
return 0;
}
+
+static const struct nvkm_i2c_aux_func
+g94_i2c_aux = {
+ .xfer = g94_i2c_aux_xfer,
+};
+
+int
+g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
+{
+ return g94_i2c_aux_new_(&g94_i2c_aux, pad, index, drive, paux);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
new file mode 100644
index 000000000000..dab40cd8fe3a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "aux.h"
+
+static const struct nvkm_i2c_aux_func
+gf119_i2c_aux = {
+ .address_only = true,
+ .xfer = g94_i2c_aux_xfer,
+};
+
+int
+gf119_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
+ struct nvkm_i2c_aux **paux)
+{
+ return g94_i2c_aux_new_(&gf119_i2c_aux, pad, index, drive, paux);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index ee091fa79628..7ef60895f43a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -105,9 +105,9 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
}
ctrl = nvkm_rd32(device, 0x00d954 + base);
- ctrl &= ~0x0001f0ff;
+ ctrl &= ~0x0001f1ff;
ctrl |= type << 12;
- ctrl |= *size - 1;
+ ctrl |= (*size ? (*size - 1) : 0x00000100);
nvkm_wr32(device, 0x00d950 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
@@ -162,6 +162,7 @@ out:
static const struct nvkm_i2c_aux_func
gm200_i2c_aux_func = {
+ .address_only = true,
.xfer = gm200_i2c_aux_xfer,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
index d53212f1aa52..3bc4d0310076 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
@@ -28,7 +28,7 @@
static const struct nvkm_i2c_pad_func
gf119_i2c_pad_s_func = {
.bus_new_4 = gf119_i2c_bus_new,
- .aux_new_6 = g94_i2c_aux_new,
+ .aux_new_6 = gf119_i2c_aux_new,
.mode = g94_i2c_pad_mode,
};
@@ -41,7 +41,7 @@ gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
static const struct nvkm_i2c_pad_func
gf119_i2c_pad_x_func = {
.bus_new_4 = gf119_i2c_bus_new,
- .aux_new_6 = g94_i2c_aux_new,
+ .aux_new_6 = gf119_i2c_aux_new,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index d2c4d6033abb..f93766418056 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -27,6 +27,7 @@ static const struct nvkm_mc_map
gf100_mc_reset[] = {
{ 0x00020000, NVKM_ENGINE_MSPDEC },
{ 0x00008000, NVKM_ENGINE_MSVLD },
+ { 0x00002000, NVKM_SUBDEV_PMU, true },
{ 0x00001000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
{ 0x00000080, NVKM_ENGINE_CE1 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index eb9b278198b2..a4cb82495cee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -192,6 +192,10 @@ nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
}
}
+#ifdef __BIG_ENDIAN
+ pci->msi = false;
+#endif
+
pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
if (pci->msi && func->msi_rearm) {
pci->msi = pci_enable_msi(pci->pdev) == 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index 3306f9fe7140..ce70a193caa7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -75,7 +75,7 @@ nvkm_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
- if (!(nvkm_rd32(device, 0x000200) & 0x00002000))
+ if (!pmu->func->enabled(pmu))
return 0;
/* Inhibit interrupts, and wait for idle. */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
index 0e36d4cb7201..0b458656e870 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c
@@ -24,13 +24,30 @@
#include "priv.h"
#include "fuc/gf100.fuc3.h"
+#include <subdev/mc.h>
+
+void
+gf100_pmu_reset(struct nvkm_pmu *pmu)
+{
+ struct nvkm_device *device = pmu->subdev.device;
+ nvkm_mc_disable(device, NVKM_SUBDEV_PMU);
+ nvkm_mc_enable(device, NVKM_SUBDEV_PMU);
+}
+
+bool
+gf100_pmu_enabled(struct nvkm_pmu *pmu)
+{
+ return nvkm_mc_enabled(pmu->subdev.device, NVKM_SUBDEV_PMU);
+}
+
static const struct nvkm_pmu_func
gf100_pmu = {
.code.data = gf100_pmu_code,
.code.size = sizeof(gf100_pmu_code),
.data.data = gf100_pmu_data,
.data.size = sizeof(gf100_pmu_data),
- .reset = gt215_pmu_reset,
+ .enabled = gf100_pmu_enabled,
+ .reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
index 0e4ba4248b15..3dfa79d4fb13 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c
@@ -30,7 +30,8 @@ gf119_pmu = {
.code.size = sizeof(gf119_pmu_code),
.data.data = gf119_pmu_data,
.data.size = sizeof(gf119_pmu_data),
- .reset = gt215_pmu_reset,
+ .enabled = gf100_pmu_enabled,
+ .reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index 2ad858d825ac..8f7ec10fd2a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -109,7 +109,8 @@ gk104_pmu = {
.code.size = sizeof(gk104_pmu_code),
.data.data = gk104_pmu_data,
.data.size = sizeof(gk104_pmu_data),
- .reset = gt215_pmu_reset,
+ .enabled = gf100_pmu_enabled,
+ .reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
index fc4b8ecfdaeb..345741d55a56 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c
@@ -88,7 +88,8 @@ gk110_pmu = {
.code.size = sizeof(gk110_pmu_code),
.data.data = gk110_pmu_data,
.data.size = sizeof(gk110_pmu_data),
- .reset = gt215_pmu_reset,
+ .enabled = gf100_pmu_enabled,
+ .reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
index e9a91277683a..e4acf7876ea1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c
@@ -30,7 +30,8 @@ gk208_pmu = {
.code.size = sizeof(gk208_pmu_code),
.data.data = gk208_pmu_data,
.data.size = sizeof(gk208_pmu_data),
- .reset = gt215_pmu_reset,
+ .enabled = gf100_pmu_enabled,
+ .reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 978aae3c1001..05e81855c367 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -196,9 +196,10 @@ gk20a_dvfs_data= {
static const struct nvkm_pmu_func
gk20a_pmu = {
+ .enabled = gf100_pmu_enabled,
.init = gk20a_pmu_init,
.fini = gk20a_pmu_fini,
- .reset = gt215_pmu_reset,
+ .reset = gf100_pmu_reset,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
index 9a248ed75f09..459df1ef9e70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c
@@ -32,7 +32,8 @@ gm107_pmu = {
.code.size = sizeof(gm107_pmu_code),
.data.data = gm107_pmu_data,
.data.size = sizeof(gm107_pmu_data),
- .reset = gt215_pmu_reset,
+ .enabled = gf100_pmu_enabled,
+ .reset = gf100_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
.intr = gt215_pmu_intr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index 44bef22bce52..31c843145c7a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -38,6 +38,7 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu)
static const struct nvkm_pmu_func
gm20b_pmu = {
+ .enabled = gf100_pmu_enabled,
.intr = gt215_pmu_intr,
.recv = gm20b_pmu_recv,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
index 6c41c20c85a7..e210cd6af816 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c
@@ -25,7 +25,8 @@
static const struct nvkm_pmu_func
gp100_pmu = {
- .reset = gt215_pmu_reset,
+ .enabled = gf100_pmu_enabled,
+ .reset = gf100_pmu_reset,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
index f017352206c9..98c7a2a8afc4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
@@ -31,8 +31,15 @@ gp102_pmu_reset(struct nvkm_pmu *pmu)
nvkm_mask(device, 0x10a3c0, 0x00000001, 0x00000000);
}
+static bool
+gp102_pmu_enabled(struct nvkm_pmu *pmu)
+{
+ return !(nvkm_rd32(pmu->subdev.device, 0x10a3c0) & 0x00000001);
+}
+
static const struct nvkm_pmu_func
gp102_pmu = {
+ .enabled = gp102_pmu_enabled,
.reset = gp102_pmu_reset,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
index 90d428b3be97..e04216daea58 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c
@@ -180,13 +180,19 @@ gt215_pmu_fini(struct nvkm_pmu *pmu)
nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060);
}
-void
+static void
gt215_pmu_reset(struct nvkm_pmu *pmu)
{
struct nvkm_device *device = pmu->subdev.device;
- nvkm_mask(device, 0x000200, 0x00002000, 0x00000000);
- nvkm_mask(device, 0x000200, 0x00002000, 0x00002000);
- nvkm_rd32(device, 0x000200);
+ nvkm_mask(device, 0x022210, 0x00000001, 0x00000000);
+ nvkm_mask(device, 0x022210, 0x00000001, 0x00000001);
+ nvkm_rd32(device, 0x022210);
+}
+
+static bool
+gt215_pmu_enabled(struct nvkm_pmu *pmu)
+{
+ return nvkm_rd32(pmu->subdev.device, 0x022210) & 0x00000001;
}
int
@@ -241,6 +247,7 @@ gt215_pmu = {
.code.size = sizeof(gt215_pmu_code),
.data.data = gt215_pmu_data,
.data.size = sizeof(gt215_pmu_data),
+ .enabled = gt215_pmu_enabled,
.reset = gt215_pmu_reset,
.init = gt215_pmu_init,
.fini = gt215_pmu_fini,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index 096cba069f72..a4c48a10cd47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -20,6 +20,7 @@ struct nvkm_pmu_func {
u32 size;
} data;
+ bool (*enabled)(struct nvkm_pmu *);
void (*reset)(struct nvkm_pmu *);
int (*init)(struct nvkm_pmu *);
void (*fini)(struct nvkm_pmu *);
@@ -30,12 +31,14 @@ struct nvkm_pmu_func {
void (*pgob)(struct nvkm_pmu *, bool);
};
-void gt215_pmu_reset(struct nvkm_pmu *);
int gt215_pmu_init(struct nvkm_pmu *);
void gt215_pmu_fini(struct nvkm_pmu *);
void gt215_pmu_intr(struct nvkm_pmu *);
void gt215_pmu_recv(struct nvkm_pmu *);
int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
+bool gf100_pmu_enabled(struct nvkm_pmu *);
+void gf100_pmu_reset(struct nvkm_pmu *);
+
void gk110_pmu_pgob(struct nvkm_pmu *, bool);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
index 135758ba3e28..2bafcc1d1818 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
@@ -11,3 +11,4 @@ nvkm-y += nvkm/subdev/therm/g84.o
nvkm-y += nvkm/subdev/therm/gt215.o
nvkm-y += nvkm/subdev/therm/gf119.o
nvkm-y += nvkm/subdev/therm/gm107.o
+nvkm-y += nvkm/subdev/therm/gm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
index 86e81930d8ee..96f8da40ac82 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c
@@ -203,7 +203,7 @@ g84_therm_fini(struct nvkm_therm *therm)
nvkm_wr32(device, 0x1100, 0x10000); /* PBUS */
}
-static void
+void
g84_therm_init(struct nvkm_therm *therm)
{
g84_sensor_setup(therm);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c
new file mode 100644
index 000000000000..73dc78093d5d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017 Karol Herbst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Karol Herbst
+ */
+#include "priv.h"
+
+static const struct nvkm_therm_func
+gm200_therm = {
+ .init = g84_therm_init,
+ .fini = g84_therm_fini,
+ .temp_get = g84_temp_get,
+ .program_alarms = nvkm_therm_program_alarms_polling,
+};
+
+int
+gm200_therm_new(struct nvkm_device *device, int index,
+ struct nvkm_therm **ptherm)
+{
+ return nvkm_therm_new_(&gm200_therm, device, index, ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
index 235a5d8daff6..1f46e371d7c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
@@ -111,6 +111,7 @@ void g84_therm_fini(struct nvkm_therm *);
int gt215_therm_fan_sense(struct nvkm_therm *);
+void g84_therm_init(struct nvkm_therm *);
void gf119_therm_init(struct nvkm_therm *);
int nvkm_fanpwm_create(struct nvkm_therm *, struct dcb_gpio_func *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
index e93b2410c38b..ddb2b2c600ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
@@ -83,7 +83,7 @@ nvkm_therm_sensor_event(struct nvkm_therm *therm, enum nvkm_therm_thrs thrs,
{
struct nvkm_subdev *subdev = &therm->subdev;
bool active;
- const char *thresolds[] = {
+ static const char * const thresholds[] = {
"fanboost", "downclock", "critical", "shutdown"
};
int temperature = therm->func->temp_get(therm);
@@ -94,10 +94,10 @@ nvkm_therm_sensor_event(struct nvkm_therm *therm, enum nvkm_therm_thrs thrs,
if (dir == NVKM_THERM_THRS_FALLING)
nvkm_info(subdev,
"temperature (%i C) went below the '%s' threshold\n",
- temperature, thresolds[thrs]);
+ temperature, thresholds[thrs]);
else
nvkm_info(subdev, "temperature (%i C) hit the '%s' threshold\n",
- temperature, thresolds[thrs]);
+ temperature, thresholds[thrs]);
active = (dir == NVKM_THERM_THRS_RISING);
switch (thrs) {
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index e1fa143a5625..542a76503fbd 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -198,6 +198,9 @@ static int tvc_probe(struct platform_device *pdev)
struct omap_dss_device *dssdev;
int r;
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 79cb69f1acf5..d9d25df6fc1b 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/mutex.h>
#include <drm/drm_edid.h>
@@ -37,6 +38,10 @@ static const struct videomode hdmic_default_vm = {
struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
+ void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
+ void *hpd_cb_data;
+ bool hpd_enabled;
+ struct mutex hpd_lock;
struct device *dev;
@@ -167,6 +172,70 @@ static bool hdmic_detect(struct omap_dss_device *dssdev)
return in->ops.hdmi->detect(in);
}
+static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
+ enum drm_connector_status status),
+ void *cb_data)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (gpio_is_valid(ddata->hpd_gpio)) {
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = cb;
+ ddata->hpd_cb_data = cb_data;
+ mutex_unlock(&ddata->hpd_lock);
+ return 0;
+ } else if (in->ops.hdmi->register_hpd_cb) {
+ return in->ops.hdmi->register_hpd_cb(in, cb, cb_data);
+ }
+
+ return -ENOTSUPP;
+}
+
+static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (gpio_is_valid(ddata->hpd_gpio)) {
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = NULL;
+ ddata->hpd_cb_data = NULL;
+ mutex_unlock(&ddata->hpd_lock);
+ } else if (in->ops.hdmi->unregister_hpd_cb) {
+ in->ops.hdmi->unregister_hpd_cb(in);
+ }
+}
+
+static void hdmic_enable_hpd(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (gpio_is_valid(ddata->hpd_gpio)) {
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_enabled = true;
+ mutex_unlock(&ddata->hpd_lock);
+ } else if (in->ops.hdmi->enable_hpd) {
+ in->ops.hdmi->enable_hpd(in);
+ }
+}
+
+static void hdmic_disable_hpd(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ if (gpio_is_valid(ddata->hpd_gpio)) {
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_enabled = false;
+ mutex_unlock(&ddata->hpd_lock);
+ } else if (in->ops.hdmi->disable_hpd) {
+ in->ops.hdmi->disable_hpd(in);
+ }
+}
+
static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
@@ -197,10 +266,34 @@ static struct omap_dss_driver hdmic_driver = {
.read_edid = hdmic_read_edid,
.detect = hdmic_detect,
+ .register_hpd_cb = hdmic_register_hpd_cb,
+ .unregister_hpd_cb = hdmic_unregister_hpd_cb,
+ .enable_hpd = hdmic_enable_hpd,
+ .disable_hpd = hdmic_disable_hpd,
.set_hdmi_mode = hdmic_set_hdmi_mode,
.set_hdmi_infoframe = hdmic_set_infoframe,
};
+static irqreturn_t hdmic_hpd_isr(int irq, void *data)
+{
+ struct panel_drv_data *ddata = data;
+
+ mutex_lock(&ddata->hpd_lock);
+ if (ddata->hpd_enabled && ddata->hpd_cb) {
+ enum drm_connector_status status;
+
+ if (hdmic_detect(&ddata->dssdev))
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ ddata->hpd_cb(ddata->hpd_cb_data, status);
+ }
+ mutex_unlock(&ddata->hpd_lock);
+
+ return IRQ_HANDLED;
+}
+
static int hdmic_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -246,11 +339,22 @@ static int hdmic_probe(struct platform_device *pdev)
if (r)
return r;
+ mutex_init(&ddata->hpd_lock);
+
if (gpio_is_valid(ddata->hpd_gpio)) {
r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
GPIOF_DIR_IN, "hdmi_hpd");
if (r)
goto err_reg;
+
+ r = devm_request_threaded_irq(&pdev->dev,
+ gpio_to_irq(ddata->hpd_gpio),
+ NULL, hdmic_hpd_isr,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "hdmic hpd", ddata);
+ if (r)
+ goto err_reg;
}
ddata->vm = hdmic_default_vm;
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 58276a48112e..a9e9d667c55e 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -15,12 +15,17 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
+#include <linux/mutex.h>
#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
struct omap_dss_device *in;
+ void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
+ void *hpd_cb_data;
+ bool hpd_enabled;
+ struct mutex hpd_lock;
struct gpio_desc *ct_cp_hpd_gpio;
struct gpio_desc *ls_oe_gpio;
@@ -162,6 +167,49 @@ static bool tpd_detect(struct omap_dss_device *dssdev)
return gpiod_get_value_cansleep(ddata->hpd_gpio);
}
+static int tpd_register_hpd_cb(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
+ enum drm_connector_status status),
+ void *cb_data)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = cb;
+ ddata->hpd_cb_data = cb_data;
+ mutex_unlock(&ddata->hpd_lock);
+
+ return 0;
+}
+
+static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = NULL;
+ ddata->hpd_cb_data = NULL;
+ mutex_unlock(&ddata->hpd_lock);
+}
+
+static void tpd_enable_hpd(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_enabled = true;
+ mutex_unlock(&ddata->hpd_lock);
+}
+
+static void tpd_disable_hpd(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_enabled = false;
+ mutex_unlock(&ddata->hpd_lock);
+}
+
static int tpd_set_infoframe(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi)
{
@@ -193,10 +241,34 @@ static const struct omapdss_hdmi_ops tpd_hdmi_ops = {
.read_edid = tpd_read_edid,
.detect = tpd_detect,
+ .register_hpd_cb = tpd_register_hpd_cb,
+ .unregister_hpd_cb = tpd_unregister_hpd_cb,
+ .enable_hpd = tpd_enable_hpd,
+ .disable_hpd = tpd_disable_hpd,
.set_infoframe = tpd_set_infoframe,
.set_hdmi_mode = tpd_set_hdmi_mode,
};
+static irqreturn_t tpd_hpd_isr(int irq, void *data)
+{
+ struct panel_drv_data *ddata = data;
+
+ mutex_lock(&ddata->hpd_lock);
+ if (ddata->hpd_enabled && ddata->hpd_cb) {
+ enum drm_connector_status status;
+
+ if (tpd_detect(&ddata->dssdev))
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ ddata->hpd_cb(ddata->hpd_cb_data, status);
+ }
+ mutex_unlock(&ddata->hpd_lock);
+
+ return IRQ_HANDLED;
+}
+
static int tpd_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -261,6 +333,15 @@ static int tpd_probe(struct platform_device *pdev)
ddata->hpd_gpio = gpio;
+ mutex_init(&ddata->hpd_lock);
+
+ r = devm_request_threaded_irq(&pdev->dev, gpiod_to_irq(ddata->hpd_gpio),
+ NULL, tpd_hpd_isr,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "tpd12s015 hpd", ddata);
+ if (r)
+ goto err_gpio;
+
dssdev = &ddata->dssdev;
dssdev->ops.hdmi = &tpd_hdmi_ops;
dssdev->dev = &pdev->dev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 6468a765f3d1..e065f7e10cca 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -231,6 +231,9 @@ static int panel_dpi_probe(struct platform_device *pdev)
struct omap_dss_device *dssdev;
int r;
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (ddata == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 76787a75a4dc..92c556ac22c7 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -554,7 +554,7 @@ static struct attribute *dsicm_attrs[] = {
NULL,
};
-static struct attribute_group dsicm_attr_group = {
+static const struct attribute_group dsicm_attr_group = {
.attrs = dsicm_attrs,
};
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index c90474afaebd..74d13969b9ca 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -19,7 +19,7 @@
#include "../dss/omapdss.h"
-static struct videomode lb035q02_vm = {
+static const struct videomode lb035q02_vm = {
.hactive = 320,
.vactive = 240,
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 346aefdb015f..8e5bff4e5226 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -503,7 +503,7 @@ static struct attribute *bldev_attrs[] = {
NULL,
};
-static struct attribute_group bldev_attr_group = {
+static const struct attribute_group bldev_attr_group = {
.attrs = bldev_attrs,
};
@@ -720,6 +720,9 @@ static int acx565akm_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "%s\n", __func__);
+ if (!spi->dev.of_node)
+ return -ENODEV;
+
spi->mode = SPI_MODE_3;
ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index cbf4c67c4933..0a38a0e8c925 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -40,7 +40,7 @@ struct panel_drv_data {
struct spi_device *spi_dev;
};
-static struct videomode td028ttec1_panel_vm = {
+static const struct videomode td028ttec1_panel_vm = {
.hactive = 480,
.vactive = 640,
.pixelclock = 22153000,
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 20c6d8fe215a..ac4a6d4d134c 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -282,7 +282,7 @@ static struct attribute *tpo_td043_attrs[] = {
NULL,
};
-static struct attribute_group tpo_td043_attr_group = {
+static const struct attribute_group tpo_td043_attr_group = {
.attrs = tpo_td043_attrs,
};
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 688195e448c5..142ce5a02542 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -5,7 +5,7 @@ omapdss-base-y := base.o display.o dss-of.o output.o
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
# Core DSS files
-omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o \
+omapdss-y := core.o dss.o dispc.o dispc_coefs.o \
pll.o video-pll.o
omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index bdce4bfdf6e0..197ddbc1512b 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -24,182 +24,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/seq_file.h>
-#include <linux/debugfs.h>
-#include <linux/io.h>
-#include <linux/device.h>
-#include <linux/regulator/consumer.h>
-#include <linux/suspend.h>
-#include <linux/slab.h>
#include "omapdss.h"
#include "dss.h"
-#include "dss_features.h"
-
-static struct {
- struct platform_device *pdev;
-} core;
-
-enum omapdss_version omapdss_get_version(void)
-{
- struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
- return pdata->version;
-}
-EXPORT_SYMBOL(omapdss_get_version);
-
-int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask)
-{
- struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
-
- if (!board_data->dsi_enable_pads)
- return -ENOENT;
-
- return board_data->dsi_enable_pads(dsi_id, lane_mask);
-}
-
-void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask)
-{
- struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
-
- if (!board_data->dsi_disable_pads)
- return;
-
- return board_data->dsi_disable_pads(dsi_id, lane_mask);
-}
-
-int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
-{
- struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
-
- if (pdata->set_min_bus_tput)
- return pdata->set_min_bus_tput(dev, tput);
- else
- return 0;
-}
-
-#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
-static int dss_debug_show(struct seq_file *s, void *unused)
-{
- void (*func)(struct seq_file *) = s->private;
- func(s);
- return 0;
-}
-
-static int dss_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dss_debug_show, inode->i_private);
-}
-
-static const struct file_operations dss_debug_fops = {
- .open = dss_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static struct dentry *dss_debugfs_dir;
-
-static int dss_initialize_debugfs(void)
-{
- dss_debugfs_dir = debugfs_create_dir("omapdss", NULL);
- if (IS_ERR(dss_debugfs_dir)) {
- int err = PTR_ERR(dss_debugfs_dir);
- dss_debugfs_dir = NULL;
- return err;
- }
-
- debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
- &dss_debug_dump_clocks, &dss_debug_fops);
-
- return 0;
-}
-
-static void dss_uninitialize_debugfs(void)
-{
- if (dss_debugfs_dir)
- debugfs_remove_recursive(dss_debugfs_dir);
-}
-
-int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
-{
- struct dentry *d;
-
- d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
- write, &dss_debug_fops);
-
- return PTR_ERR_OR_ZERO(d);
-}
-#else /* CONFIG_OMAP2_DSS_DEBUGFS */
-static inline int dss_initialize_debugfs(void)
-{
- return 0;
-}
-static inline void dss_uninitialize_debugfs(void)
-{
-}
-int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
-{
- return 0;
-}
-#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
-
-/* PLATFORM DEVICE */
-
-static void dss_disable_all_devices(void)
-{
- struct omap_dss_device *dssdev = NULL;
-
- for_each_dss_dev(dssdev) {
- if (!dssdev->driver)
- continue;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- dssdev->driver->disable(dssdev);
- }
-}
-
-static int __init omap_dss_probe(struct platform_device *pdev)
-{
- int r;
-
- core.pdev = pdev;
-
- dss_features_init(omapdss_get_version());
-
- r = dss_initialize_debugfs();
- if (r)
- goto err_debugfs;
-
- return 0;
-
-err_debugfs:
-
- return r;
-}
-
-static int omap_dss_remove(struct platform_device *pdev)
-{
- dss_uninitialize_debugfs();
-
- return 0;
-}
-
-static void omap_dss_shutdown(struct platform_device *pdev)
-{
- DSSDBG("shutdown\n");
- dss_disable_all_devices();
-}
-
-static struct platform_driver omap_dss_driver = {
- .remove = omap_dss_remove,
- .shutdown = omap_dss_shutdown,
- .driver = {
- .name = "omapdss",
- },
-};
/* INIT */
static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
@@ -236,21 +64,25 @@ static void (*dss_output_drv_unreg_funcs[])(void) = {
dss_uninit_platform_driver,
};
+static struct platform_device *omap_drm_device;
+
static int __init omap_dss_init(void)
{
int r;
int i;
- r = platform_driver_probe(&omap_dss_driver, omap_dss_probe);
- if (r)
- return r;
-
for (i = 0; i < ARRAY_SIZE(dss_output_drv_reg_funcs); ++i) {
r = dss_output_drv_reg_funcs[i]();
if (r)
goto err_reg;
}
+ omap_drm_device = platform_device_register_simple("omapdrm", 0, NULL, 0);
+ if (IS_ERR(omap_drm_device)) {
+ r = PTR_ERR(omap_drm_device);
+ goto err_reg;
+ }
+
return 0;
err_reg:
@@ -259,8 +91,6 @@ err_reg:
++i)
dss_output_drv_unreg_funcs[i]();
- platform_driver_unregister(&omap_dss_driver);
-
return r;
}
@@ -268,10 +98,10 @@ static void __exit omap_dss_exit(void)
{
int i;
+ platform_device_unregister(omap_drm_device);
+
for (i = 0; i < ARRAY_SIZE(dss_output_drv_unreg_funcs); ++i)
dss_output_drv_unreg_funcs[i]();
-
- platform_driver_unregister(&omap_dss_driver);
}
module_init(omap_dss_init);
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index fd7504b37e3b..0f4fdb221498 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -39,13 +39,14 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/component.h>
+#include <linux/sys_soc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_blend.h>
#include "omapdss.h"
#include "dss.h"
-#include "dss_features.h"
#include "dispc.h"
/* DISPC */
@@ -63,6 +64,33 @@ enum omap_burst_size {
#define REG_FLD_MOD(idx, val, start, end) \
dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end))
+/* DISPC has feature id */
+enum dispc_feature_id {
+ FEAT_LCDENABLEPOL,
+ FEAT_LCDENABLESIGNAL,
+ FEAT_PCKFREEENABLE,
+ FEAT_FUNCGATED,
+ FEAT_MGR_LCD2,
+ FEAT_MGR_LCD3,
+ FEAT_LINEBUFFERSPLIT,
+ FEAT_ROWREPEATENABLE,
+ FEAT_RESIZECONF,
+ /* Independent core clk divider */
+ FEAT_CORE_CLK_DIV,
+ FEAT_HANDLE_UV_SEPARATE,
+ FEAT_ATTR2,
+ FEAT_CPR,
+ FEAT_PRELOAD,
+ FEAT_FIR_COEF_V,
+ FEAT_ALPHA_FIXED_ZORDER,
+ FEAT_ALPHA_FREE_ZORDER,
+ FEAT_FIFO_MERGE,
+ /* An unknown HW bug causing the normal FIFO thresholds not to work */
+ FEAT_OMAP3_DSI_FIFO_BUG,
+ FEAT_BURST_2D,
+ FEAT_MFLAG,
+};
+
struct dispc_features {
u8 sw_start;
u8 fp_start;
@@ -76,6 +104,9 @@ struct dispc_features {
u16 mgr_height_max;
unsigned long max_lcd_pclk;
unsigned long max_tv_pclk;
+ unsigned int max_downscale;
+ unsigned int max_line_width;
+ unsigned int min_pcd;
int (*calc_scaling) (unsigned long pclk, unsigned long lclk,
const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
@@ -86,6 +117,16 @@ struct dispc_features {
u16 width, u16 height, u16 out_width, u16 out_height,
bool mem_to_mem);
u8 num_fifos;
+ const enum dispc_feature_id *features;
+ unsigned int num_features;
+ const struct dss_reg_field *reg_fields;
+ const unsigned int num_reg_fields;
+ const enum omap_overlay_caps *overlay_caps;
+ const u32 **supported_color_modes;
+ unsigned int num_mgrs;
+ unsigned int num_ovls;
+ unsigned int buffer_size_unit;
+ unsigned int burst_size_unit;
/* swap GFX & WB fifos */
bool gfx_fifo_workaround:1;
@@ -180,6 +221,17 @@ enum mgr_reg_fields {
DISPC_MGR_FLD_NUM,
};
+/* DISPC register field id */
+enum dispc_feat_reg_field {
+ FEAT_REG_FIRHINC,
+ FEAT_REG_FIRVINC,
+ FEAT_REG_FIFOHIGHTHRESHOLD,
+ FEAT_REG_FIFOLOWTHRESHOLD,
+ FEAT_REG_FIFOSIZE,
+ FEAT_REG_HORIZONTALACCU,
+ FEAT_REG_VERTICALACCU,
+};
+
struct dispc_reg_field {
u16 reg;
u8 high;
@@ -343,6 +395,38 @@ static void mgr_fld_write(enum omap_channel channel,
spin_unlock_irqrestore(&dispc.control_lock, flags);
}
+static int dispc_get_num_ovls(void)
+{
+ return dispc.feat->num_ovls;
+}
+
+static int dispc_get_num_mgrs(void)
+{
+ return dispc.feat->num_mgrs;
+}
+
+static void dispc_get_reg_field(enum dispc_feat_reg_field id,
+ u8 *start, u8 *end)
+{
+ if (id >= dispc.feat->num_reg_fields)
+ BUG();
+
+ *start = dispc.feat->reg_fields[id].start;
+ *end = dispc.feat->reg_fields[id].end;
+}
+
+static bool dispc_has_feature(enum dispc_feature_id id)
+{
+ unsigned int i;
+
+ for (i = 0; i < dispc.feat->num_features; i++) {
+ if (dispc.feat->features[i] == id)
+ return true;
+ }
+
+ return false;
+}
+
#define SR(reg) \
dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
#define RR(reg) \
@@ -358,19 +442,19 @@ static void dispc_save_context(void)
SR(CONTROL);
SR(CONFIG);
SR(LINE_NUMBER);
- if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
- dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
+ if (dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
+ dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
SR(GLOBAL_ALPHA);
- if (dss_has_feature(FEAT_MGR_LCD2)) {
+ if (dispc_has_feature(FEAT_MGR_LCD2)) {
SR(CONTROL2);
SR(CONFIG2);
}
- if (dss_has_feature(FEAT_MGR_LCD3)) {
+ if (dispc_has_feature(FEAT_MGR_LCD3)) {
SR(CONTROL3);
SR(CONFIG3);
}
- for (i = 0; i < dss_feat_get_num_mgrs(); i++) {
+ for (i = 0; i < dispc_get_num_mgrs(); i++) {
SR(DEFAULT_COLOR(i));
SR(TRANS_COLOR(i));
SR(SIZE_MGR(i));
@@ -385,14 +469,14 @@ static void dispc_save_context(void)
SR(DATA_CYCLE2(i));
SR(DATA_CYCLE3(i));
- if (dss_has_feature(FEAT_CPR)) {
+ if (dispc_has_feature(FEAT_CPR)) {
SR(CPR_COEF_R(i));
SR(CPR_COEF_G(i));
SR(CPR_COEF_B(i));
}
}
- for (i = 0; i < dss_feat_get_num_ovls(); i++) {
+ for (i = 0; i < dispc_get_num_ovls(); i++) {
SR(OVL_BA0(i));
SR(OVL_BA1(i));
SR(OVL_POSITION(i));
@@ -401,7 +485,7 @@ static void dispc_save_context(void)
SR(OVL_FIFO_THRESHOLD(i));
SR(OVL_ROW_INC(i));
SR(OVL_PIXEL_INC(i));
- if (dss_has_feature(FEAT_PRELOAD))
+ if (dispc_has_feature(FEAT_PRELOAD))
SR(OVL_PRELOAD(i));
if (i == OMAP_DSS_GFX) {
SR(OVL_WINDOW_SKIP(i));
@@ -422,12 +506,12 @@ static void dispc_save_context(void)
for (j = 0; j < 5; j++)
SR(OVL_CONV_COEF(i, j));
- if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ if (dispc_has_feature(FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
SR(OVL_FIR_COEF_V(i, j));
}
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
SR(OVL_BA0_UV(i));
SR(OVL_BA1_UV(i));
SR(OVL_FIR2(i));
@@ -443,11 +527,11 @@ static void dispc_save_context(void)
for (j = 0; j < 8; j++)
SR(OVL_FIR_COEF_V2(i, j));
}
- if (dss_has_feature(FEAT_ATTR2))
+ if (dispc_has_feature(FEAT_ATTR2))
SR(OVL_ATTRIBUTES2(i));
}
- if (dss_has_feature(FEAT_CORE_CLK_DIV))
+ if (dispc_has_feature(FEAT_CORE_CLK_DIV))
SR(DIVISOR);
dispc.ctx_valid = true;
@@ -468,15 +552,15 @@ static void dispc_restore_context(void)
/*RR(CONTROL);*/
RR(CONFIG);
RR(LINE_NUMBER);
- if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
- dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
+ if (dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
+ dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
RR(GLOBAL_ALPHA);
- if (dss_has_feature(FEAT_MGR_LCD2))
+ if (dispc_has_feature(FEAT_MGR_LCD2))
RR(CONFIG2);
- if (dss_has_feature(FEAT_MGR_LCD3))
+ if (dispc_has_feature(FEAT_MGR_LCD3))
RR(CONFIG3);
- for (i = 0; i < dss_feat_get_num_mgrs(); i++) {
+ for (i = 0; i < dispc_get_num_mgrs(); i++) {
RR(DEFAULT_COLOR(i));
RR(TRANS_COLOR(i));
RR(SIZE_MGR(i));
@@ -491,14 +575,14 @@ static void dispc_restore_context(void)
RR(DATA_CYCLE2(i));
RR(DATA_CYCLE3(i));
- if (dss_has_feature(FEAT_CPR)) {
+ if (dispc_has_feature(FEAT_CPR)) {
RR(CPR_COEF_R(i));
RR(CPR_COEF_G(i));
RR(CPR_COEF_B(i));
}
}
- for (i = 0; i < dss_feat_get_num_ovls(); i++) {
+ for (i = 0; i < dispc_get_num_ovls(); i++) {
RR(OVL_BA0(i));
RR(OVL_BA1(i));
RR(OVL_POSITION(i));
@@ -507,7 +591,7 @@ static void dispc_restore_context(void)
RR(OVL_FIFO_THRESHOLD(i));
RR(OVL_ROW_INC(i));
RR(OVL_PIXEL_INC(i));
- if (dss_has_feature(FEAT_PRELOAD))
+ if (dispc_has_feature(FEAT_PRELOAD))
RR(OVL_PRELOAD(i));
if (i == OMAP_DSS_GFX) {
RR(OVL_WINDOW_SKIP(i));
@@ -528,12 +612,12 @@ static void dispc_restore_context(void)
for (j = 0; j < 5; j++)
RR(OVL_CONV_COEF(i, j));
- if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ if (dispc_has_feature(FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
RR(OVL_FIR_COEF_V(i, j));
}
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
RR(OVL_BA0_UV(i));
RR(OVL_BA1_UV(i));
RR(OVL_FIR2(i));
@@ -549,18 +633,18 @@ static void dispc_restore_context(void)
for (j = 0; j < 8; j++)
RR(OVL_FIR_COEF_V2(i, j));
}
- if (dss_has_feature(FEAT_ATTR2))
+ if (dispc_has_feature(FEAT_ATTR2))
RR(OVL_ATTRIBUTES2(i));
}
- if (dss_has_feature(FEAT_CORE_CLK_DIV))
+ if (dispc_has_feature(FEAT_CORE_CLK_DIV))
RR(DIVISOR);
/* enable last, because LCD & DIGIT enable are here */
RR(CONTROL);
- if (dss_has_feature(FEAT_MGR_LCD2))
+ if (dispc_has_feature(FEAT_MGR_LCD2))
RR(CONTROL2);
- if (dss_has_feature(FEAT_MGR_LCD3))
+ if (dispc_has_feature(FEAT_MGR_LCD3))
RR(CONTROL3);
/* clear spurious SYNC_LOST_DIGIT interrupts */
dispc_clear_irqstatus(DISPC_IRQ_SYNC_LOST_DIGIT);
@@ -779,7 +863,7 @@ static void dispc_ovl_write_color_conv_coef(enum omap_plane_id plane,
static void dispc_setup_color_conv_coef(void)
{
int i;
- int num_ovl = dss_feat_get_num_ovls();
+ int num_ovl = dispc_get_num_ovls();
const struct color_conv_coef ctbl_bt601_5_ovl = {
/* YUV -> RGB */
298, 409, 0, 298, -208, -100, 298, 0, 517, 0,
@@ -868,10 +952,10 @@ static void dispc_ovl_enable_zorder_planes(void)
{
int i;
- if (!dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
+ if (!dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
return;
- for (i = 0; i < dss_feat_get_num_ovls(); i++)
+ for (i = 0; i < dispc_get_num_ovls(); i++)
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(i), 1, 25, 25);
}
@@ -994,7 +1078,7 @@ static bool format_is_yuv(u32 fourcc)
static void dispc_ovl_configure_burst_type(enum omap_plane_id plane,
enum omap_dss_rotation_type rotation_type)
{
- if (dss_has_feature(FEAT_BURST_2D) == 0)
+ if (dispc_has_feature(FEAT_BURST_2D) == 0)
return;
if (rotation_type == OMAP_DSS_ROT_TILER)
@@ -1025,7 +1109,7 @@ static void dispc_ovl_set_channel_out(enum omap_plane_id plane,
}
val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
- if (dss_has_feature(FEAT_MGR_LCD2)) {
+ if (dispc_has_feature(FEAT_MGR_LCD2)) {
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
chan = 0;
@@ -1040,7 +1124,7 @@ static void dispc_ovl_set_channel_out(enum omap_plane_id plane,
chan2 = 1;
break;
case OMAP_DSS_CHANNEL_LCD3:
- if (dss_has_feature(FEAT_MGR_LCD3)) {
+ if (dispc_has_feature(FEAT_MGR_LCD3)) {
chan = 0;
chan2 = 2;
} else {
@@ -1089,7 +1173,7 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane_id plane)
if (FLD_GET(val, shift, shift) == 1)
return OMAP_DSS_CHANNEL_DIGIT;
- if (!dss_has_feature(FEAT_MGR_LCD2))
+ if (!dispc_has_feature(FEAT_MGR_LCD2))
return OMAP_DSS_CHANNEL_LCD;
switch (FLD_GET(val, 31, 30)) {
@@ -1128,7 +1212,7 @@ static void dispc_configure_burst_sizes(void)
const int burst_size = BURST_SIZE_X8;
/* Configure burst size always to maximum size */
- for (i = 0; i < dss_feat_get_num_ovls(); ++i)
+ for (i = 0; i < dispc_get_num_ovls(); ++i)
dispc_ovl_set_burst_size(i, burst_size);
if (dispc.feat->has_writeback)
dispc_ovl_set_burst_size(OMAP_DSS_WB, burst_size);
@@ -1136,19 +1220,28 @@ static void dispc_configure_burst_sizes(void)
static u32 dispc_ovl_get_burst_size(enum omap_plane_id plane)
{
- unsigned unit = dss_feat_get_burst_size_unit();
/* burst multiplier is always x8 (see dispc_configure_burst_sizes()) */
- return unit * 8;
+ return dispc.feat->burst_size_unit * 8;
}
-static const u32 *dispc_ovl_get_color_modes(enum omap_plane_id plane)
+static bool dispc_ovl_color_mode_supported(enum omap_plane_id plane, u32 fourcc)
{
- return dss_feat_get_supported_color_modes(plane);
+ const u32 *modes;
+ unsigned int i;
+
+ modes = dispc.feat->supported_color_modes[plane];
+
+ for (i = 0; modes[i]; ++i) {
+ if (modes[i] == fourcc)
+ return true;
+ }
+
+ return false;
}
-static int dispc_get_num_ovls(void)
+static const u32 *dispc_ovl_get_color_modes(enum omap_plane_id plane)
{
- return dss_feat_get_num_ovls();
+ return dispc.feat->supported_color_modes[plane];
}
static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
@@ -1223,9 +1316,9 @@ static void dispc_init_fifos(void)
u32 unit;
int i;
- unit = dss_feat_get_buffer_size_unit();
+ unit = dispc.feat->buffer_size_unit;
- dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
+ dispc_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
for (fifo = 0; fifo < dispc.feat->num_fifos; ++fifo) {
size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(fifo), start, end);
@@ -1265,7 +1358,7 @@ static void dispc_init_fifos(void)
/*
* Setup default fifo thresholds.
*/
- for (i = 0; i < dss_feat_get_num_ovls(); ++i) {
+ for (i = 0; i < dispc_get_num_ovls(); ++i) {
u32 low, high;
const bool use_fifomerge = false;
const bool manual_update = false;
@@ -1307,7 +1400,7 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane_id plane, u32 low,
u8 hi_start, hi_end, lo_start, lo_end;
u32 unit;
- unit = dss_feat_get_buffer_size_unit();
+ unit = dispc.feat->buffer_size_unit;
WARN_ON(low % unit != 0);
WARN_ON(high % unit != 0);
@@ -1315,8 +1408,8 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane_id plane, u32 low,
low /= unit;
high /= unit;
- dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end);
- dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
+ dispc_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end);
+ dispc_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
DSSDBG("fifo(%d) threshold (bytes), old %u/%u, new %u/%u\n",
plane,
@@ -1335,14 +1428,14 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane_id plane, u32 low,
* large for the preload field, set the threshold to the maximum value
* that can be held by the preload register
*/
- if (dss_has_feature(FEAT_PRELOAD) && dispc.feat->set_max_preload &&
+ if (dispc_has_feature(FEAT_PRELOAD) && dispc.feat->set_max_preload &&
plane != OMAP_DSS_WB)
dispc_write_reg(DISPC_OVL_PRELOAD(plane), min(high, 0xfffu));
}
void dispc_enable_fifomerge(bool enable)
{
- if (!dss_has_feature(FEAT_FIFO_MERGE)) {
+ if (!dispc_has_feature(FEAT_FIFO_MERGE)) {
WARN_ON(enable);
return;
}
@@ -1360,7 +1453,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane_id plane,
* buffer_units, and the fifo thresholds must be buffer_unit aligned.
*/
- unsigned buf_unit = dss_feat_get_buffer_size_unit();
+ unsigned buf_unit = dispc.feat->buffer_size_unit;
unsigned ovl_fifo_size, total_fifo_size, burst_size;
int i;
@@ -1369,7 +1462,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane_id plane,
if (use_fifomerge) {
total_fifo_size = 0;
- for (i = 0; i < dss_feat_get_num_ovls(); ++i)
+ for (i = 0; i < dispc_get_num_ovls(); ++i)
total_fifo_size += dispc_ovl_get_fifo_size(i);
} else {
total_fifo_size = ovl_fifo_size;
@@ -1381,7 +1474,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane_id plane,
* combined fifo size
*/
- if (manual_update && dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
+ if (manual_update && dispc_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
*fifo_low = ovl_fifo_size - burst_size * 2;
*fifo_high = total_fifo_size - burst_size;
} else if (plane == OMAP_DSS_WB) {
@@ -1435,9 +1528,9 @@ static void dispc_init_mflag(void)
(1 << 0) | /* MFLAG_CTRL = force always on */
(0 << 2)); /* MFLAG_START = disable */
- for (i = 0; i < dss_feat_get_num_ovls(); ++i) {
+ for (i = 0; i < dispc_get_num_ovls(); ++i) {
u32 size = dispc_ovl_get_fifo_size(i);
- u32 unit = dss_feat_get_buffer_size_unit();
+ u32 unit = dispc.feat->buffer_size_unit;
u32 low, high;
dispc_ovl_set_mflag(i, true);
@@ -1456,7 +1549,7 @@ static void dispc_init_mflag(void)
if (dispc.feat->has_writeback) {
u32 size = dispc_ovl_get_fifo_size(OMAP_DSS_WB);
- u32 unit = dss_feat_get_buffer_size_unit();
+ u32 unit = dispc.feat->buffer_size_unit;
u32 low, high;
dispc_ovl_set_mflag(OMAP_DSS_WB, true);
@@ -1483,10 +1576,8 @@ static void dispc_ovl_set_fir(enum omap_plane_id plane,
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
u8 hinc_start, hinc_end, vinc_start, vinc_end;
- dss_feat_get_reg_field(FEAT_REG_FIRHINC,
- &hinc_start, &hinc_end);
- dss_feat_get_reg_field(FEAT_REG_FIRVINC,
- &vinc_start, &vinc_end);
+ dispc_get_reg_field(FEAT_REG_FIRHINC, &hinc_start, &hinc_end);
+ dispc_get_reg_field(FEAT_REG_FIRVINC, &vinc_start, &vinc_end);
val = FLD_VAL(vinc, vinc_start, vinc_end) |
FLD_VAL(hinc, hinc_start, hinc_end);
@@ -1503,8 +1594,8 @@ static void dispc_ovl_set_vid_accu0(enum omap_plane_id plane, int haccu,
u32 val;
u8 hor_start, hor_end, vert_start, vert_end;
- dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
- dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
+ dispc_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
+ dispc_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
val = FLD_VAL(vaccu, vert_start, vert_end) |
FLD_VAL(haccu, hor_start, hor_end);
@@ -1518,8 +1609,8 @@ static void dispc_ovl_set_vid_accu1(enum omap_plane_id plane, int haccu,
u32 val;
u8 hor_start, hor_end, vert_start, vert_end;
- dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
- dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
+ dispc_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
+ dispc_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
val = FLD_VAL(vaccu, vert_start, vert_end) |
FLD_VAL(haccu, hor_start, hor_end);
@@ -1671,14 +1762,14 @@ static void dispc_ovl_set_scaling_common(enum omap_plane_id plane,
l |= five_taps ? (1 << 21) : 0;
/* VRESIZECONF and HRESIZECONF */
- if (dss_has_feature(FEAT_RESIZECONF)) {
+ if (dispc_has_feature(FEAT_RESIZECONF)) {
l &= ~(0x3 << 7);
l |= (orig_width <= out_width) ? 0 : (1 << 7);
l |= (orig_height <= out_height) ? 0 : (1 << 8);
}
/* LINEBUFFERSPLIT */
- if (dss_has_feature(FEAT_LINEBUFFERSPLIT)) {
+ if (dispc_has_feature(FEAT_LINEBUFFERSPLIT)) {
l &= ~(0x1 << 22);
l |= five_taps ? (1 << 22) : 0;
}
@@ -1713,7 +1804,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane_id plane,
int scale_y = out_height != orig_height;
bool chroma_upscale = plane != OMAP_DSS_WB;
- if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE))
+ if (!dispc_has_feature(FEAT_HANDLE_UV_SEPARATE))
return;
if (!format_is_yuv(fourcc)) {
@@ -1860,11 +1951,11 @@ static void dispc_ovl_set_rotation_attrs(enum omap_plane_id plane, u8 rotation,
vidrot = 1;
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), vidrot, 13, 12);
- if (dss_has_feature(FEAT_ROWREPEATENABLE))
+ if (dispc_has_feature(FEAT_ROWREPEATENABLE))
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane),
row_repeat ? 1 : 0, 18, 18);
- if (dss_feat_color_mode_supported(plane, DRM_FORMAT_NV12)) {
+ if (dispc_ovl_color_mode_supported(plane, DRM_FORMAT_NV12)) {
bool doublestride =
fourcc == DRM_FORMAT_NV12 &&
rotation_type == OMAP_DSS_ROT_TILER &&
@@ -2118,8 +2209,7 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
int error;
u16 in_width, in_height;
int min_factor = min(*decim_x, *decim_y);
- const int maxsinglelinewidth =
- dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
+ const int maxsinglelinewidth = dispc.feat->max_line_width;
*five_taps = false;
@@ -2163,8 +2253,7 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
{
int error;
u16 in_width, in_height;
- const int maxsinglelinewidth =
- dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
+ const int maxsinglelinewidth = dispc.feat->max_line_width;
do {
in_height = height / *decim_y;
@@ -2249,9 +2338,8 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
u16 in_width, in_width_max;
int decim_x_min = *decim_x;
u16 in_height = height / *decim_y;
- const int maxsinglelinewidth =
- dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
- const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
+ const int maxsinglelinewidth = dispc.feat->max_line_width;
+ const int maxdownscale = dispc.feat->max_downscale;
if (mem_to_mem) {
in_width_max = out_width * maxdownscale;
@@ -2311,7 +2399,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
int *x_predecim, int *y_predecim, u16 pos_x,
enum omap_dss_rotation_type rotation_type, bool mem_to_mem)
{
- const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
+ const int maxdownscale = dispc.feat->max_downscale;
const int max_decim_limit = 16;
unsigned long core_clk = 0;
int decim_x, decim_y, ret;
@@ -2332,7 +2420,7 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
} else {
*x_predecim = max_decim_limit;
*y_predecim = (rotation_type == OMAP_DSS_ROT_TILER &&
- dss_has_feature(FEAT_BURST_2D)) ?
+ dispc_has_feature(FEAT_BURST_2D)) ?
2 : max_decim_limit;
}
@@ -2428,7 +2516,7 @@ static int dispc_ovl_setup_common(enum omap_plane_id plane,
out_height);
}
- if (!dss_feat_color_mode_supported(plane, fourcc))
+ if (!dispc_ovl_color_mode_supported(plane, fourcc))
return -EINVAL;
r = dispc_ovl_calc_scaling(pclk, lclk, caps, vm, in_width,
@@ -2549,7 +2637,7 @@ static int dispc_ovl_setup(enum omap_plane_id plane,
enum omap_channel channel)
{
int r;
- enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane);
+ enum omap_overlay_caps caps = dispc.feat->overlay_caps[plane];
const bool replication = true;
DSSDBG("dispc_ovl_setup %d, pa %pad, pa_uv %pad, sw %d, %d,%d, %dx%d ->"
@@ -2647,12 +2735,12 @@ static int dispc_ovl_enable(enum omap_plane_id plane, bool enable)
static enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channel)
{
- return dss_feat_get_supported_outputs(channel);
+ return dss_get_supported_outputs(channel);
}
static void dispc_lcd_enable_signal_polarity(bool act_high)
{
- if (!dss_has_feature(FEAT_LCDENABLEPOL))
+ if (!dispc_has_feature(FEAT_LCDENABLEPOL))
return;
REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
@@ -2660,7 +2748,7 @@ static void dispc_lcd_enable_signal_polarity(bool act_high)
void dispc_lcd_enable_signal(bool enable)
{
- if (!dss_has_feature(FEAT_LCDENABLESIGNAL))
+ if (!dispc_has_feature(FEAT_LCDENABLESIGNAL))
return;
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
@@ -2668,17 +2756,12 @@ void dispc_lcd_enable_signal(bool enable)
void dispc_pck_free_enable(bool enable)
{
- if (!dss_has_feature(FEAT_PCKFREEENABLE))
+ if (!dispc_has_feature(FEAT_PCKFREEENABLE))
return;
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
}
-static int dispc_get_num_mgrs(void)
-{
- return dss_feat_get_num_mgrs();
-}
-
static void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable)
{
mgr_fld_write(channel, DISPC_MGR_FLD_FIFOHANDCHECK, enable);
@@ -2718,7 +2801,7 @@ static void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable)
static void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch,
bool enable)
{
- if (!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
+ if (!dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER))
return;
if (ch == OMAP_DSS_CHANNEL_LCD)
@@ -2735,7 +2818,7 @@ static void dispc_mgr_setup(enum omap_channel channel,
dispc_mgr_enable_trans_key(channel, info->trans_enabled);
dispc_mgr_enable_alpha_fixed_zorder(channel,
info->partial_alpha_enabled);
- if (dss_has_feature(FEAT_CPR)) {
+ if (dispc_has_feature(FEAT_CPR)) {
dispc_mgr_enable_cpr(channel, info->cpr_enable);
dispc_mgr_set_cpr_coef(channel, &info->cpr_coefs);
}
@@ -3013,7 +3096,7 @@ static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
dispc_write_reg(DISPC_DIVISORo(channel),
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
- if (!dss_has_feature(FEAT_CORE_CLK_DIV) &&
+ if (!dispc_has_feature(FEAT_CORE_CLK_DIV) &&
channel == OMAP_DSS_CHANNEL_LCD)
dispc.core_clk_rate = dispc_fclk_rate() / lck_div;
}
@@ -3168,7 +3251,7 @@ void dispc_dump_clocks(struct seq_file *s)
seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
- if (dss_has_feature(FEAT_CORE_CLK_DIV)) {
+ if (dispc_has_feature(FEAT_CORE_CLK_DIV)) {
seq_printf(s, "- DISPC-CORE-CLK -\n");
l = dispc_read_reg(DISPC_DIVISOR);
lcd = FLD_GET(l, 23, 16);
@@ -3179,9 +3262,9 @@ void dispc_dump_clocks(struct seq_file *s)
dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD);
- if (dss_has_feature(FEAT_MGR_LCD2))
+ if (dispc_has_feature(FEAT_MGR_LCD2))
dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD2);
- if (dss_has_feature(FEAT_MGR_LCD3))
+ if (dispc_has_feature(FEAT_MGR_LCD3))
dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD3);
dispc_runtime_put();
@@ -3221,18 +3304,18 @@ static void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_CAPABLE);
DUMPREG(DISPC_LINE_STATUS);
DUMPREG(DISPC_LINE_NUMBER);
- if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
- dss_has_feature(FEAT_ALPHA_FREE_ZORDER))
+ if (dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
+ dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
DUMPREG(DISPC_GLOBAL_ALPHA);
- if (dss_has_feature(FEAT_MGR_LCD2)) {
+ if (dispc_has_feature(FEAT_MGR_LCD2)) {
DUMPREG(DISPC_CONTROL2);
DUMPREG(DISPC_CONFIG2);
}
- if (dss_has_feature(FEAT_MGR_LCD3)) {
+ if (dispc_has_feature(FEAT_MGR_LCD3)) {
DUMPREG(DISPC_CONTROL3);
DUMPREG(DISPC_CONFIG3);
}
- if (dss_has_feature(FEAT_MFLAG))
+ if (dispc_has_feature(FEAT_MFLAG))
DUMPREG(DISPC_GLOBAL_MFLAG_ATTRIBUTE);
#undef DUMPREG
@@ -3245,7 +3328,7 @@ static void dispc_dump_regs(struct seq_file *s)
p_names = mgr_names;
/* DISPC channel specific registers */
- for (i = 0; i < dss_feat_get_num_mgrs(); i++) {
+ for (i = 0; i < dispc_get_num_mgrs(); i++) {
DUMPREG(i, DISPC_DEFAULT_COLOR);
DUMPREG(i, DISPC_TRANS_COLOR);
DUMPREG(i, DISPC_SIZE_MGR);
@@ -3262,7 +3345,7 @@ static void dispc_dump_regs(struct seq_file *s)
DUMPREG(i, DISPC_DATA_CYCLE2);
DUMPREG(i, DISPC_DATA_CYCLE3);
- if (dss_has_feature(FEAT_CPR)) {
+ if (dispc_has_feature(FEAT_CPR)) {
DUMPREG(i, DISPC_CPR_COEF_R);
DUMPREG(i, DISPC_CPR_COEF_G);
DUMPREG(i, DISPC_CPR_COEF_B);
@@ -3271,7 +3354,7 @@ static void dispc_dump_regs(struct seq_file *s)
p_names = ovl_names;
- for (i = 0; i < dss_feat_get_num_ovls(); i++) {
+ for (i = 0; i < dispc_get_num_ovls(); i++) {
DUMPREG(i, DISPC_OVL_BA0);
DUMPREG(i, DISPC_OVL_BA1);
DUMPREG(i, DISPC_OVL_POSITION);
@@ -3282,9 +3365,9 @@ static void dispc_dump_regs(struct seq_file *s)
DUMPREG(i, DISPC_OVL_ROW_INC);
DUMPREG(i, DISPC_OVL_PIXEL_INC);
- if (dss_has_feature(FEAT_PRELOAD))
+ if (dispc_has_feature(FEAT_PRELOAD))
DUMPREG(i, DISPC_OVL_PRELOAD);
- if (dss_has_feature(FEAT_MFLAG))
+ if (dispc_has_feature(FEAT_MFLAG))
DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
if (i == OMAP_DSS_GFX) {
@@ -3297,14 +3380,14 @@ static void dispc_dump_regs(struct seq_file *s)
DUMPREG(i, DISPC_OVL_PICTURE_SIZE);
DUMPREG(i, DISPC_OVL_ACCU0);
DUMPREG(i, DISPC_OVL_ACCU1);
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
DUMPREG(i, DISPC_OVL_BA0_UV);
DUMPREG(i, DISPC_OVL_BA1_UV);
DUMPREG(i, DISPC_OVL_FIR2);
DUMPREG(i, DISPC_OVL_ACCU2_0);
DUMPREG(i, DISPC_OVL_ACCU2_1);
}
- if (dss_has_feature(FEAT_ATTR2))
+ if (dispc_has_feature(FEAT_ATTR2))
DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
}
@@ -3319,21 +3402,21 @@ static void dispc_dump_regs(struct seq_file *s)
DUMPREG(i, DISPC_OVL_ROW_INC);
DUMPREG(i, DISPC_OVL_PIXEL_INC);
- if (dss_has_feature(FEAT_MFLAG))
+ if (dispc_has_feature(FEAT_MFLAG))
DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
DUMPREG(i, DISPC_OVL_FIR);
DUMPREG(i, DISPC_OVL_PICTURE_SIZE);
DUMPREG(i, DISPC_OVL_ACCU0);
DUMPREG(i, DISPC_OVL_ACCU1);
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
DUMPREG(i, DISPC_OVL_BA0_UV);
DUMPREG(i, DISPC_OVL_BA1_UV);
DUMPREG(i, DISPC_OVL_FIR2);
DUMPREG(i, DISPC_OVL_ACCU2_0);
DUMPREG(i, DISPC_OVL_ACCU2_1);
}
- if (dss_has_feature(FEAT_ATTR2))
+ if (dispc_has_feature(FEAT_ATTR2))
DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
}
@@ -3349,7 +3432,7 @@ static void dispc_dump_regs(struct seq_file *s)
/* Video pipeline coefficient registers */
/* start from OMAP_DSS_VIDEO1 */
- for (i = 1; i < dss_feat_get_num_ovls(); i++) {
+ for (i = 1; i < dispc_get_num_ovls(); i++) {
for (j = 0; j < 8; j++)
DUMPREG(i, DISPC_OVL_FIR_COEF_H, j);
@@ -3359,12 +3442,12 @@ static void dispc_dump_regs(struct seq_file *s)
for (j = 0; j < 5; j++)
DUMPREG(i, DISPC_OVL_CONV_COEF, j);
- if (dss_has_feature(FEAT_FIR_COEF_V)) {
+ if (dispc_has_feature(FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
DUMPREG(i, DISPC_OVL_FIR_COEF_V, j);
}
- if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
for (j = 0; j < 8; j++)
DUMPREG(i, DISPC_OVL_FIR_COEF_H2, j);
@@ -3397,7 +3480,7 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
return 0;
}
-bool dispc_div_calc(unsigned long dispc,
+bool dispc_div_calc(unsigned long dispc_freq,
unsigned long pck_min, unsigned long pck_max,
dispc_div_calc_func func, void *data)
{
@@ -3415,19 +3498,19 @@ bool dispc_div_calc(unsigned long dispc,
min_fck_per_pck = 0;
#endif
- pckd_hw_min = dss_feat_get_param_min(FEAT_PARAM_DSS_PCD);
- pckd_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_PCD);
+ pckd_hw_min = dispc.feat->min_pcd;
+ pckd_hw_max = 255;
- lck_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+ lck_max = dss_get_max_fck_rate();
pck_min = pck_min ? pck_min : 1;
pck_max = pck_max ? pck_max : ULONG_MAX;
- lckd_start = max(DIV_ROUND_UP(dispc, lck_max), 1ul);
- lckd_stop = min(dispc / pck_min, 255ul);
+ lckd_start = max(DIV_ROUND_UP(dispc_freq, lck_max), 1ul);
+ lckd_stop = min(dispc_freq / pck_min, 255ul);
for (lckd = lckd_start; lckd <= lckd_stop; ++lckd) {
- lck = dispc / lckd;
+ lck = dispc_freq / lckd;
pckd_start = max(DIV_ROUND_UP(lck, pck_max), pckd_hw_min);
pckd_stop = min(lck / pck_min, pckd_hw_max);
@@ -3441,7 +3524,7 @@ bool dispc_div_calc(unsigned long dispc,
* also. Thus we need to use the calculated lck. For
* OMAP4+ the DISPC fclk is a separate clock.
*/
- if (dss_has_feature(FEAT_CORE_CLK_DIV))
+ if (dispc_has_feature(FEAT_CORE_CLK_DIV))
fck = dispc_core_clk_rate();
else
fck = lck;
@@ -3556,10 +3639,10 @@ static void dispc_restore_gamma_tables(void)
dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_DIGIT);
- if (dss_has_feature(FEAT_MGR_LCD2))
+ if (dispc_has_feature(FEAT_MGR_LCD2))
dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD2);
- if (dss_has_feature(FEAT_MGR_LCD3))
+ if (dispc_has_feature(FEAT_MGR_LCD3))
dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD3);
}
@@ -3627,11 +3710,11 @@ static int dispc_init_gamma_tables(void)
u32 *gt;
if (channel == OMAP_DSS_CHANNEL_LCD2 &&
- !dss_has_feature(FEAT_MGR_LCD2))
+ !dispc_has_feature(FEAT_MGR_LCD2))
continue;
if (channel == OMAP_DSS_CHANNEL_LCD3 &&
- !dss_has_feature(FEAT_MGR_LCD3))
+ !dispc_has_feature(FEAT_MGR_LCD3))
continue;
gt = devm_kmalloc_array(&dispc.pdev->dev, gdesc->len,
@@ -3651,7 +3734,7 @@ static void _omap_dispc_initial_config(void)
u32 l;
/* Exclusively enable DISPC_CORE_CLK and set divider to 1 */
- if (dss_has_feature(FEAT_CORE_CLK_DIV)) {
+ if (dispc_has_feature(FEAT_CORE_CLK_DIV)) {
l = dispc_read_reg(DISPC_DIVISOR);
/* Use DISPC_DIVISOR.LCD, instead of DISPC_DIVISOR1.LCD */
l = FLD_MOD(l, 1, 0, 0);
@@ -3669,7 +3752,7 @@ static void _omap_dispc_initial_config(void)
* func-clock auto-gating. For newer versions
* (dispc.feat->has_gamma_table) this enables tv-out gamma tables.
*/
- if (dss_has_feature(FEAT_FUNCGATED) || dispc.feat->has_gamma_table)
+ if (dispc_has_feature(FEAT_FUNCGATED) || dispc.feat->has_gamma_table)
REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
dispc_setup_color_conv_coef();
@@ -3685,10 +3768,272 @@ static void _omap_dispc_initial_config(void)
if (dispc.feat->mstandby_workaround)
REG_FLD_MOD(DISPC_MSTANDBY_CTRL, 1, 0, 0);
- if (dss_has_feature(FEAT_MFLAG))
+ if (dispc_has_feature(FEAT_MFLAG))
dispc_init_mflag();
}
+static const enum dispc_feature_id omap2_dispc_features_list[] = {
+ FEAT_LCDENABLEPOL,
+ FEAT_LCDENABLESIGNAL,
+ FEAT_PCKFREEENABLE,
+ FEAT_FUNCGATED,
+ FEAT_ROWREPEATENABLE,
+ FEAT_RESIZECONF,
+};
+
+static const enum dispc_feature_id omap3_dispc_features_list[] = {
+ FEAT_LCDENABLEPOL,
+ FEAT_LCDENABLESIGNAL,
+ FEAT_PCKFREEENABLE,
+ FEAT_FUNCGATED,
+ FEAT_LINEBUFFERSPLIT,
+ FEAT_ROWREPEATENABLE,
+ FEAT_RESIZECONF,
+ FEAT_CPR,
+ FEAT_PRELOAD,
+ FEAT_FIR_COEF_V,
+ FEAT_ALPHA_FIXED_ZORDER,
+ FEAT_FIFO_MERGE,
+ FEAT_OMAP3_DSI_FIFO_BUG,
+};
+
+static const enum dispc_feature_id am43xx_dispc_features_list[] = {
+ FEAT_LCDENABLEPOL,
+ FEAT_LCDENABLESIGNAL,
+ FEAT_PCKFREEENABLE,
+ FEAT_FUNCGATED,
+ FEAT_LINEBUFFERSPLIT,
+ FEAT_ROWREPEATENABLE,
+ FEAT_RESIZECONF,
+ FEAT_CPR,
+ FEAT_PRELOAD,
+ FEAT_FIR_COEF_V,
+ FEAT_ALPHA_FIXED_ZORDER,
+ FEAT_FIFO_MERGE,
+};
+
+static const enum dispc_feature_id omap4_dispc_features_list[] = {
+ FEAT_MGR_LCD2,
+ FEAT_CORE_CLK_DIV,
+ FEAT_HANDLE_UV_SEPARATE,
+ FEAT_ATTR2,
+ FEAT_CPR,
+ FEAT_PRELOAD,
+ FEAT_FIR_COEF_V,
+ FEAT_ALPHA_FREE_ZORDER,
+ FEAT_FIFO_MERGE,
+ FEAT_BURST_2D,
+};
+
+static const enum dispc_feature_id omap5_dispc_features_list[] = {
+ FEAT_MGR_LCD2,
+ FEAT_MGR_LCD3,
+ FEAT_CORE_CLK_DIV,
+ FEAT_HANDLE_UV_SEPARATE,
+ FEAT_ATTR2,
+ FEAT_CPR,
+ FEAT_PRELOAD,
+ FEAT_FIR_COEF_V,
+ FEAT_ALPHA_FREE_ZORDER,
+ FEAT_FIFO_MERGE,
+ FEAT_BURST_2D,
+ FEAT_MFLAG,
+};
+
+static const struct dss_reg_field omap2_dispc_reg_fields[] = {
+ [FEAT_REG_FIRHINC] = { 11, 0 },
+ [FEAT_REG_FIRVINC] = { 27, 16 },
+ [FEAT_REG_FIFOLOWTHRESHOLD] = { 8, 0 },
+ [FEAT_REG_FIFOHIGHTHRESHOLD] = { 24, 16 },
+ [FEAT_REG_FIFOSIZE] = { 8, 0 },
+ [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
+ [FEAT_REG_VERTICALACCU] = { 25, 16 },
+};
+
+static const struct dss_reg_field omap3_dispc_reg_fields[] = {
+ [FEAT_REG_FIRHINC] = { 12, 0 },
+ [FEAT_REG_FIRVINC] = { 28, 16 },
+ [FEAT_REG_FIFOLOWTHRESHOLD] = { 11, 0 },
+ [FEAT_REG_FIFOHIGHTHRESHOLD] = { 27, 16 },
+ [FEAT_REG_FIFOSIZE] = { 10, 0 },
+ [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
+ [FEAT_REG_VERTICALACCU] = { 25, 16 },
+};
+
+static const struct dss_reg_field omap4_dispc_reg_fields[] = {
+ [FEAT_REG_FIRHINC] = { 12, 0 },
+ [FEAT_REG_FIRVINC] = { 28, 16 },
+ [FEAT_REG_FIFOLOWTHRESHOLD] = { 15, 0 },
+ [FEAT_REG_FIFOHIGHTHRESHOLD] = { 31, 16 },
+ [FEAT_REG_FIFOSIZE] = { 15, 0 },
+ [FEAT_REG_HORIZONTALACCU] = { 10, 0 },
+ [FEAT_REG_VERTICALACCU] = { 26, 16 },
+};
+
+static const enum omap_overlay_caps omap2_dispc_overlay_caps[] = {
+ /* OMAP_DSS_GFX */
+ OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO1 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
+ OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO2 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
+ OMAP_DSS_OVL_CAP_REPLICATION,
+};
+
+static const enum omap_overlay_caps omap3430_dispc_overlay_caps[] = {
+ /* OMAP_DSS_GFX */
+ OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_POS |
+ OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO1 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
+ OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO2 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
+ OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
+};
+
+static const enum omap_overlay_caps omap3630_dispc_overlay_caps[] = {
+ /* OMAP_DSS_GFX */
+ OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA |
+ OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO1 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
+ OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO2 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
+ OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_POS |
+ OMAP_DSS_OVL_CAP_REPLICATION,
+};
+
+static const enum omap_overlay_caps omap4_dispc_overlay_caps[] = {
+ /* OMAP_DSS_GFX */
+ OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA |
+ OMAP_DSS_OVL_CAP_ZORDER | OMAP_DSS_OVL_CAP_POS |
+ OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO1 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
+ OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
+ OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO2 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
+ OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
+ OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
+
+ /* OMAP_DSS_VIDEO3 */
+ OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
+ OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
+ OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
+};
+
+#define COLOR_ARRAY(arr...) (const u32[]) { arr, 0 }
+
+static const u32 *omap2_dispc_supported_color_modes[] = {
+
+ /* OMAP_DSS_GFX */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGBX4444, DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB888),
+
+ /* OMAP_DSS_VIDEO1 */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY),
+
+ /* OMAP_DSS_VIDEO2 */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY),
+};
+
+static const u32 *omap3_dispc_supported_color_modes[] = {
+ /* OMAP_DSS_GFX */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888),
+
+ /* OMAP_DSS_VIDEO1 */
+ COLOR_ARRAY(
+ DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGBX4444, DRM_FORMAT_RGB565,
+ DRM_FORMAT_YUYV, DRM_FORMAT_UYVY),
+
+ /* OMAP_DSS_VIDEO2 */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888),
+};
+
+static const u32 *omap4_dispc_supported_color_modes[] = {
+ /* OMAP_DSS_GFX */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_ARGB1555, DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB1555),
+
+ /* OMAP_DSS_VIDEO1 */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
+ DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
+ DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBX8888),
+
+ /* OMAP_DSS_VIDEO2 */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
+ DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
+ DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBX8888),
+
+ /* OMAP_DSS_VIDEO3 */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
+ DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
+ DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBX8888),
+
+ /* OMAP_DSS_WB */
+ COLOR_ARRAY(
+ DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
+ DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
+ DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBX8888),
+};
+
static const struct dispc_features omap24xx_dispc_feats = {
.sw_start = 5,
.fp_start = 15,
@@ -3701,9 +4046,26 @@ static const struct dispc_features omap24xx_dispc_feats = {
.mgr_width_max = 2048,
.mgr_height_max = 2048,
.max_lcd_pclk = 66500000,
+ .max_downscale = 2,
+ /*
+ * Assume the line width buffer to be 768 pixels as OMAP2 DISPC scaler
+ * cannot scale an image width larger than 768.
+ */
+ .max_line_width = 768,
+ .min_pcd = 2,
.calc_scaling = dispc_ovl_calc_scaling_24xx,
.calc_core_clk = calc_core_clk_24xx,
.num_fifos = 3,
+ .features = omap2_dispc_features_list,
+ .num_features = ARRAY_SIZE(omap2_dispc_features_list),
+ .reg_fields = omap2_dispc_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap2_dispc_reg_fields),
+ .overlay_caps = omap2_dispc_overlay_caps,
+ .supported_color_modes = omap2_dispc_supported_color_modes,
+ .num_mgrs = 2,
+ .num_ovls = 3,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
.no_framedone_tv = true,
.set_max_preload = false,
.last_pixel_inc_missing = true,
@@ -3722,9 +4084,22 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats = {
.mgr_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
+ .max_downscale = 4,
+ .max_line_width = 1024,
+ .min_pcd = 1,
.calc_scaling = dispc_ovl_calc_scaling_34xx,
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
+ .features = omap3_dispc_features_list,
+ .num_features = ARRAY_SIZE(omap3_dispc_features_list),
+ .reg_fields = omap3_dispc_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
+ .overlay_caps = omap3430_dispc_overlay_caps,
+ .supported_color_modes = omap3_dispc_supported_color_modes,
+ .num_mgrs = 2,
+ .num_ovls = 3,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
.no_framedone_tv = true,
.set_max_preload = false,
.last_pixel_inc_missing = true,
@@ -3743,9 +4118,90 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats = {
.mgr_height_max = 2048,
.max_lcd_pclk = 173000000,
.max_tv_pclk = 59000000,
+ .max_downscale = 4,
+ .max_line_width = 1024,
+ .min_pcd = 1,
+ .calc_scaling = dispc_ovl_calc_scaling_34xx,
+ .calc_core_clk = calc_core_clk_34xx,
+ .num_fifos = 3,
+ .features = omap3_dispc_features_list,
+ .num_features = ARRAY_SIZE(omap3_dispc_features_list),
+ .reg_fields = omap3_dispc_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
+ .overlay_caps = omap3430_dispc_overlay_caps,
+ .supported_color_modes = omap3_dispc_supported_color_modes,
+ .num_mgrs = 2,
+ .num_ovls = 3,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
+ .no_framedone_tv = true,
+ .set_max_preload = false,
+ .last_pixel_inc_missing = true,
+};
+
+static const struct dispc_features omap36xx_dispc_feats = {
+ .sw_start = 7,
+ .fp_start = 19,
+ .bp_start = 31,
+ .sw_max = 256,
+ .vp_max = 4095,
+ .hp_max = 4096,
+ .mgr_width_start = 10,
+ .mgr_height_start = 26,
+ .mgr_width_max = 2048,
+ .mgr_height_max = 2048,
+ .max_lcd_pclk = 173000000,
+ .max_tv_pclk = 59000000,
+ .max_downscale = 4,
+ .max_line_width = 1024,
+ .min_pcd = 1,
.calc_scaling = dispc_ovl_calc_scaling_34xx,
.calc_core_clk = calc_core_clk_34xx,
.num_fifos = 3,
+ .features = omap3_dispc_features_list,
+ .num_features = ARRAY_SIZE(omap3_dispc_features_list),
+ .reg_fields = omap3_dispc_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
+ .overlay_caps = omap3630_dispc_overlay_caps,
+ .supported_color_modes = omap3_dispc_supported_color_modes,
+ .num_mgrs = 2,
+ .num_ovls = 3,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
+ .no_framedone_tv = true,
+ .set_max_preload = false,
+ .last_pixel_inc_missing = true,
+};
+
+static const struct dispc_features am43xx_dispc_feats = {
+ .sw_start = 7,
+ .fp_start = 19,
+ .bp_start = 31,
+ .sw_max = 256,
+ .vp_max = 4095,
+ .hp_max = 4096,
+ .mgr_width_start = 10,
+ .mgr_height_start = 26,
+ .mgr_width_max = 2048,
+ .mgr_height_max = 2048,
+ .max_lcd_pclk = 173000000,
+ .max_tv_pclk = 59000000,
+ .max_downscale = 4,
+ .max_line_width = 1024,
+ .min_pcd = 1,
+ .calc_scaling = dispc_ovl_calc_scaling_34xx,
+ .calc_core_clk = calc_core_clk_34xx,
+ .num_fifos = 3,
+ .features = am43xx_dispc_features_list,
+ .num_features = ARRAY_SIZE(am43xx_dispc_features_list),
+ .reg_fields = omap3_dispc_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields),
+ .overlay_caps = omap3430_dispc_overlay_caps,
+ .supported_color_modes = omap3_dispc_supported_color_modes,
+ .num_mgrs = 1,
+ .num_ovls = 3,
+ .buffer_size_unit = 1,
+ .burst_size_unit = 8,
.no_framedone_tv = true,
.set_max_preload = false,
.last_pixel_inc_missing = true,
@@ -3764,9 +4220,22 @@ static const struct dispc_features omap44xx_dispc_feats = {
.mgr_height_max = 2048,
.max_lcd_pclk = 170000000,
.max_tv_pclk = 185625000,
+ .max_downscale = 4,
+ .max_line_width = 2048,
+ .min_pcd = 1,
.calc_scaling = dispc_ovl_calc_scaling_44xx,
.calc_core_clk = calc_core_clk_44xx,
.num_fifos = 5,
+ .features = omap4_dispc_features_list,
+ .num_features = ARRAY_SIZE(omap4_dispc_features_list),
+ .reg_fields = omap4_dispc_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap4_dispc_reg_fields),
+ .overlay_caps = omap4_dispc_overlay_caps,
+ .supported_color_modes = omap4_dispc_supported_color_modes,
+ .num_mgrs = 3,
+ .num_ovls = 4,
+ .buffer_size_unit = 16,
+ .burst_size_unit = 16,
.gfx_fifo_workaround = true,
.set_max_preload = true,
.supports_sync_align = true,
@@ -3790,9 +4259,22 @@ static const struct dispc_features omap54xx_dispc_feats = {
.mgr_height_max = 4096,
.max_lcd_pclk = 170000000,
.max_tv_pclk = 186000000,
+ .max_downscale = 4,
+ .max_line_width = 2048,
+ .min_pcd = 1,
.calc_scaling = dispc_ovl_calc_scaling_44xx,
.calc_core_clk = calc_core_clk_44xx,
.num_fifos = 5,
+ .features = omap5_dispc_features_list,
+ .num_features = ARRAY_SIZE(omap5_dispc_features_list),
+ .reg_fields = omap4_dispc_reg_fields,
+ .num_reg_fields = ARRAY_SIZE(omap4_dispc_reg_fields),
+ .overlay_caps = omap4_dispc_overlay_caps,
+ .supported_color_modes = omap4_dispc_supported_color_modes,
+ .num_mgrs = 4,
+ .num_ovls = 4,
+ .buffer_size_unit = 16,
+ .burst_size_unit = 16,
.gfx_fifo_workaround = true,
.mstandby_workaround = true,
.set_max_preload = true,
@@ -3804,54 +4286,6 @@ static const struct dispc_features omap54xx_dispc_feats = {
.has_gamma_i734_bug = true,
};
-static int dispc_init_features(struct platform_device *pdev)
-{
- const struct dispc_features *src;
- struct dispc_features *dst;
-
- dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL);
- if (!dst) {
- dev_err(&pdev->dev, "Failed to allocate DISPC Features\n");
- return -ENOMEM;
- }
-
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP24xx:
- src = &omap24xx_dispc_feats;
- break;
-
- case OMAPDSS_VER_OMAP34xx_ES1:
- src = &omap34xx_rev1_0_dispc_feats;
- break;
-
- case OMAPDSS_VER_OMAP34xx_ES3:
- case OMAPDSS_VER_OMAP3630:
- case OMAPDSS_VER_AM35xx:
- case OMAPDSS_VER_AM43xx:
- src = &omap34xx_rev3_0_dispc_feats;
- break;
-
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
- src = &omap44xx_dispc_feats;
- break;
-
- case OMAPDSS_VER_OMAP5:
- case OMAPDSS_VER_DRA7xx:
- src = &omap54xx_dispc_feats;
- break;
-
- default:
- return -ENODEV;
- }
-
- memcpy(dst, src, sizeof(*dst));
- dispc.feat = dst;
-
- return 0;
-}
-
static irqreturn_t dispc_irq_handler(int irq, void *arg)
{
if (!dispc.is_enabled)
@@ -4083,9 +4517,28 @@ static const struct dispc_ops dispc_ops = {
};
/* DISPC HW IP initialisation */
+static const struct of_device_id dispc_of_match[] = {
+ { .compatible = "ti,omap2-dispc", .data = &omap24xx_dispc_feats },
+ { .compatible = "ti,omap3-dispc", .data = &omap36xx_dispc_feats },
+ { .compatible = "ti,omap4-dispc", .data = &omap44xx_dispc_feats },
+ { .compatible = "ti,omap5-dispc", .data = &omap54xx_dispc_feats },
+ { .compatible = "ti,dra7-dispc", .data = &omap54xx_dispc_feats },
+ {},
+};
+
+static const struct soc_device_attribute dispc_soc_devices[] = {
+ { .machine = "OMAP3[45]*",
+ .revision = "ES[12].?", .data = &omap34xx_rev1_0_dispc_feats },
+ { .machine = "OMAP3[45]*", .data = &omap34xx_rev3_0_dispc_feats },
+ { .machine = "AM35*", .data = &omap34xx_rev3_0_dispc_feats },
+ { .machine = "AM43*", .data = &am43xx_dispc_feats },
+ { /* sentinel */ }
+};
+
static int dispc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
+ const struct soc_device_attribute *soc;
u32 rev;
int r = 0;
struct resource *dispc_mem;
@@ -4095,9 +4548,15 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
spin_lock_init(&dispc.control_lock);
- r = dispc_init_features(dispc.pdev);
- if (r)
- return r;
+ /*
+ * The OMAP3-based models can't be told apart using the compatible
+ * string, use SoC device matching.
+ */
+ soc = soc_device_match(dispc_soc_devices);
+ if (soc)
+ dispc.feat = soc->data;
+ else
+ dispc.feat = of_match_device(dispc_of_match, &pdev->dev)->data;
r = dispc_errata_i734_wa_init();
if (r)
@@ -4226,15 +4685,6 @@ static const struct dev_pm_ops dispc_pm_ops = {
.runtime_resume = dispc_runtime_resume,
};
-static const struct of_device_id dispc_of_match[] = {
- { .compatible = "ti,omap2-dispc", },
- { .compatible = "ti,omap3-dispc", },
- { .compatible = "ti,omap4-dispc", },
- { .compatible = "ti,omap5-dispc", },
- { .compatible = "ti,dra7-dispc", },
- {},
-};
-
static struct platform_driver omap_dispchw_driver = {
.probe = dispc_probe,
.remove = dispc_remove,
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 86dbb65a6c28..daf286fc8a40 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -32,13 +32,14 @@
#include <linux/string.h>
#include <linux/of.h>
#include <linux/clk.h>
+#include <linux/sys_soc.h>
#include "omapdss.h"
#include "dss.h"
-#include "dss_features.h"
struct dpi_data {
struct platform_device *pdev;
+ enum dss_model dss_model;
struct regulator *vdds_dsi_reg;
enum dss_clk_source clk_src;
@@ -99,25 +100,21 @@ static enum dss_clk_source dpi_get_clk_src_dra7xx(enum omap_channel channel)
return DSS_CLK_SRC_FCK;
}
-static enum dss_clk_source dpi_get_clk_src(enum omap_channel channel)
+static enum dss_clk_source dpi_get_clk_src(struct dpi_data *dpi)
{
+ enum omap_channel channel = dpi->output.dispc_channel;
+
/*
* XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL
* would also be used for DISPC fclk. Meaning, when the DPI output is
* disabled, DISPC clock will be disabled, and TV out will stop.
*/
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP24xx:
- case OMAPDSS_VER_OMAP34xx_ES1:
- case OMAPDSS_VER_OMAP34xx_ES3:
- case OMAPDSS_VER_OMAP3630:
- case OMAPDSS_VER_AM35xx:
- case OMAPDSS_VER_AM43xx:
+ switch (dpi->dss_model) {
+ case DSS_MODEL_OMAP2:
+ case DSS_MODEL_OMAP3:
return DSS_CLK_SRC_FCK;
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
+ case DSS_MODEL_OMAP4:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
return DSS_CLK_SRC_PLL1_1;
@@ -127,7 +124,7 @@ static enum dss_clk_source dpi_get_clk_src(enum omap_channel channel)
return DSS_CLK_SRC_FCK;
}
- case OMAPDSS_VER_OMAP5:
+ case DSS_MODEL_OMAP5:
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
return DSS_CLK_SRC_PLL1_1;
@@ -138,7 +135,7 @@ static enum dss_clk_source dpi_get_clk_src(enum omap_channel channel)
return DSS_CLK_SRC_FCK;
}
- case OMAPDSS_VER_DRA7xx:
+ case DSS_MODEL_DRA7:
return dpi_get_clk_src_dra7xx(channel);
default:
@@ -213,7 +210,7 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
ctx->pll_cinfo.clkdco = clkdco;
return dss_pll_hsdiv_calc_a(ctx->pll, clkdco,
- ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
+ ctx->pck_min, dss_get_max_fck_rate(),
dpi_calc_hsdiv_cb, ctx);
}
@@ -403,19 +400,13 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
mutex_lock(&dpi->lock);
- if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI) && !dpi->vdds_dsi_reg) {
- DSSERR("no VDSS_DSI regulator\n");
- r = -ENODEV;
- goto err_no_reg;
- }
-
if (!out->dispc_channel_connected) {
DSSERR("failed to enable display: no output/manager\n");
r = -ENODEV;
goto err_no_out_mgr;
}
- if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI)) {
+ if (dpi->vdds_dsi_reg) {
r = regulator_enable(dpi->vdds_dsi_reg);
if (r)
goto err_reg_enable;
@@ -459,11 +450,10 @@ err_pll_init:
err_src_sel:
dispc_runtime_put();
err_get_dispc:
- if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
+ if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
err_reg_enable:
err_no_out_mgr:
-err_no_reg:
mutex_unlock(&dpi->lock);
return r;
}
@@ -484,7 +474,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
dispc_runtime_put();
- if (dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
+ if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
mutex_unlock(&dpi->lock);
@@ -575,11 +565,21 @@ static int dpi_verify_pll(struct dss_pll *pll)
return 0;
}
+static const struct soc_device_attribute dpi_soc_devices[] = {
+ { .family = "OMAP3[456]*" },
+ { .family = "[AD]M37*" },
+ { /* sentinel */ }
+};
+
static int dpi_init_regulator(struct dpi_data *dpi)
{
struct regulator *vdds_dsi;
- if (!dss_has_feature(FEAT_DPI_USES_VDDS_DSI))
+ /*
+ * The DPI uses the DSI VDDS on OMAP34xx, OMAP35xx, OMAP36xx, AM37xx and
+ * DM37xx only.
+ */
+ if (!soc_device_match(dpi_soc_devices))
return 0;
if (dpi->vdds_dsi_reg)
@@ -604,7 +604,7 @@ static void dpi_init_pll(struct dpi_data *dpi)
if (dpi->pll)
return;
- dpi->clk_src = dpi_get_clk_src(dpi->output.dispc_channel);
+ dpi->clk_src = dpi_get_clk_src(dpi);
pll = dss_pll_find_by_src(dpi->clk_src);
if (!pll)
@@ -624,18 +624,14 @@ static void dpi_init_pll(struct dpi_data *dpi)
* the channel in some more dynamic manner, or get the channel as a user
* parameter.
*/
-static enum omap_channel dpi_get_channel(int port_num)
+static enum omap_channel dpi_get_channel(struct dpi_data *dpi, int port_num)
{
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP24xx:
- case OMAPDSS_VER_OMAP34xx_ES1:
- case OMAPDSS_VER_OMAP34xx_ES3:
- case OMAPDSS_VER_OMAP3630:
- case OMAPDSS_VER_AM35xx:
- case OMAPDSS_VER_AM43xx:
+ switch (dpi->dss_model) {
+ case DSS_MODEL_OMAP2:
+ case DSS_MODEL_OMAP3:
return OMAP_DSS_CHANNEL_LCD;
- case OMAPDSS_VER_DRA7xx:
+ case DSS_MODEL_DRA7:
switch (port_num) {
case 2:
return OMAP_DSS_CHANNEL_LCD3;
@@ -646,12 +642,10 @@ static enum omap_channel dpi_get_channel(int port_num)
return OMAP_DSS_CHANNEL_LCD;
}
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
+ case DSS_MODEL_OMAP4:
return OMAP_DSS_CHANNEL_LCD2;
- case OMAPDSS_VER_OMAP5:
+ case DSS_MODEL_OMAP5:
return OMAP_DSS_CHANNEL_LCD3;
default:
@@ -716,10 +710,8 @@ static const struct omapdss_dpi_ops dpi_ops = {
.get_timings = dpi_get_timings,
};
-static void dpi_init_output_port(struct platform_device *pdev,
- struct device_node *port)
+static void dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
{
- struct dpi_data *dpi = port->data;
struct omap_dss_device *out = &dpi->output;
int r;
u32 port_num;
@@ -741,10 +733,10 @@ static void dpi_init_output_port(struct platform_device *pdev,
break;
}
- out->dev = &pdev->dev;
+ out->dev = &dpi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_DPI;
out->output_type = OMAP_DISPLAY_TYPE_DPI;
- out->dispc_channel = dpi_get_channel(port_num);
+ out->dispc_channel = dpi_get_channel(dpi, port_num);
out->port_num = port_num;
out->ops.dpi = &dpi_ops;
out->owner = THIS_MODULE;
@@ -760,7 +752,8 @@ static void dpi_uninit_output_port(struct device_node *port)
omapdss_unregister_output(out);
}
-int dpi_init_port(struct platform_device *pdev, struct device_node *port)
+int dpi_init_port(struct platform_device *pdev, struct device_node *port,
+ enum dss_model dss_model)
{
struct dpi_data *dpi;
struct device_node *ep;
@@ -786,11 +779,12 @@ int dpi_init_port(struct platform_device *pdev, struct device_node *port)
of_node_put(ep);
dpi->pdev = pdev;
+ dpi->dss_model = dss_model;
port->data = dpi;
mutex_init(&dpi->lock);
- dpi_init_output_port(pdev, port);
+ dpi_init_output_port(dpi, port);
dpi->port_initialized = true;
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 835f49004bc3..b56a05730314 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -20,6 +20,8 @@
#define DSS_SUBSYS_NAME "DSI"
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -42,12 +44,12 @@
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/component.h>
+#include <linux/sys_soc.h>
#include <video/mipi_display.h>
#include "omapdss.h"
#include "dss.h"
-#include "dss_features.h"
#define DSI_CATCH_MISSING_TE
@@ -228,6 +230,12 @@ static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
#define DSI_MAX_NR_ISRS 2
#define DSI_MAX_NR_LANES 5
+enum dsi_model {
+ DSI_MODEL_OMAP3,
+ DSI_MODEL_OMAP4,
+ DSI_MODEL_OMAP5,
+};
+
enum dsi_lane_function {
DSI_LANE_UNUSED = 0,
DSI_LANE_CLK,
@@ -299,12 +307,36 @@ struct dsi_lp_clock_info {
u16 lp_clk_div;
};
+struct dsi_module_id_data {
+ u32 address;
+ int id;
+};
+
+enum dsi_quirks {
+ DSI_QUIRK_PLL_PWR_BUG = (1 << 0), /* DSI-PLL power command 0x3 is not working */
+ DSI_QUIRK_DCS_CMD_CONFIG_VC = (1 << 1),
+ DSI_QUIRK_VC_OCP_WIDTH = (1 << 2),
+ DSI_QUIRK_REVERSE_TXCLKESC = (1 << 3),
+ DSI_QUIRK_GNQ = (1 << 4),
+ DSI_QUIRK_PHY_DCC = (1 << 5),
+};
+
+struct dsi_of_data {
+ enum dsi_model model;
+ const struct dss_pll_hw *pll_hw;
+ const struct dsi_module_id_data *modules;
+ unsigned int max_fck_freq;
+ unsigned int max_pll_lpdiv;
+ enum dsi_quirks quirks;
+};
+
struct dsi_data {
struct platform_device *pdev;
void __iomem *proto_base;
void __iomem *phy_base;
void __iomem *pll_base;
+ const struct dsi_of_data *data;
int module_id;
int irq;
@@ -312,6 +344,7 @@ struct dsi_data {
bool is_enabled;
struct clk *dss_clk;
+ struct regmap *syscon;
struct dispc_clock_info user_dispc_cinfo;
struct dss_pll_clock_info user_dsi_cinfo;
@@ -397,13 +430,6 @@ struct dsi_packet_sent_handler_data {
struct completion *completion;
};
-struct dsi_module_id_data {
- u32 address;
- int id;
-};
-
-static const struct of_device_id dsi_of_match[];
-
#ifdef DSI_PERF_MEASURE
static bool dsi_perf;
module_param(dsi_perf, bool, 0644);
@@ -1186,6 +1212,7 @@ static int dsi_regulator_init(struct platform_device *dsidev)
static void _dsi_print_reset_status(struct platform_device *dsidev)
{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 l;
int b0, b1, b2;
@@ -1194,7 +1221,7 @@ static void _dsi_print_reset_status(struct platform_device *dsidev)
* I/O. */
l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
- if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
+ if (dsi->data->quirks & DSI_QUIRK_REVERSE_TXCLKESC) {
b0 = 28;
b1 = 27;
b2 = 26;
@@ -1297,7 +1324,7 @@ static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
unsigned long dsi_fclk;
unsigned lp_clk_div;
unsigned long lp_clk;
- unsigned lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
+ unsigned lpdiv_max = dsi->data->max_pll_lpdiv;
lp_clk_div = dsi->user_lp_cinfo.lp_clk_div;
@@ -1349,11 +1376,12 @@ enum dsi_pll_power_state {
static int dsi_pll_power(struct platform_device *dsidev,
enum dsi_pll_power_state state)
{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int t = 0;
/* DSI-PLL power command 0x3 is not working */
- if (dss_has_feature(FEAT_DSI_PLL_PWR_BUG) &&
- state == DSI_PLL_POWER_ON_DIV)
+ if ((dsi->data->quirks & DSI_QUIRK_PLL_PWR_BUG) &&
+ state == DSI_PLL_POWER_ON_DIV)
state = DSI_PLL_POWER_ON_ALL;
/* PLL_PWR_CMD */
@@ -1373,11 +1401,12 @@ static int dsi_pll_power(struct platform_device *dsidev,
}
-static void dsi_pll_calc_dsi_fck(struct dss_pll_clock_info *cinfo)
+static void dsi_pll_calc_dsi_fck(struct dsi_data *dsi,
+ struct dss_pll_clock_info *cinfo)
{
unsigned long max_dsi_fck;
- max_dsi_fck = dss_feat_get_param_max(FEAT_PARAM_DSI_FCK);
+ max_dsi_fck = dsi->data->max_fck_freq;
cinfo->mX[HSDIV_DSI] = DIV_ROUND_UP(cinfo->clkdco, max_dsi_fck);
cinfo->clkout[HSDIV_DSI] = cinfo->clkdco / cinfo->mX[HSDIV_DSI];
@@ -1773,13 +1802,14 @@ static int dsi_cio_power(struct platform_device *dsidev,
static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int val;
/* line buffer on OMAP3 is 1024 x 24bits */
/* XXX: for some reason using full buffer size causes
* considerable TX slowdown with update sizes that fill the
* whole buffer */
- if (!dss_has_feature(FEAT_DSI_GNQ))
+ if (!(dsi->data->quirks & DSI_QUIRK_GNQ))
return 1023 * 3;
val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
@@ -1872,6 +1902,7 @@ static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
static void dsi_cio_timings(struct platform_device *dsidev)
{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
u32 tlpx_half, tclk_trail, tclk_zero;
@@ -1934,7 +1965,7 @@ static void dsi_cio_timings(struct platform_device *dsidev)
r = FLD_MOD(r, tclk_trail, 15, 8);
r = FLD_MOD(r, tclk_zero, 7, 0);
- if (dss_has_feature(FEAT_DSI_PHY_DCC)) {
+ if (dsi->data->quirks & DSI_QUIRK_PHY_DCC) {
r = FLD_MOD(r, 0, 21, 21); /* DCCEN = disable */
r = FLD_MOD(r, 1, 22, 22); /* CLKINP_DIVBY2EN = enable */
r = FLD_MOD(r, 1, 23, 23); /* CLKINP_SEL = enable */
@@ -2006,7 +2037,7 @@ static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
static const u8 offsets_new[] = { 24, 25, 26, 27, 28 };
const u8 *offsets;
- if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC))
+ if (dsi->data->quirks & DSI_QUIRK_REVERSE_TXCLKESC)
offsets = offsets_old;
else
offsets = offsets_new;
@@ -2060,6 +2091,83 @@ static unsigned dsi_get_lane_mask(struct platform_device *dsidev)
return mask;
}
+/* OMAP4 CONTROL_DSIPHY */
+#define OMAP4_DSIPHY_SYSCON_OFFSET 0x78
+
+#define OMAP4_DSI2_LANEENABLE_SHIFT 29
+#define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29)
+#define OMAP4_DSI1_LANEENABLE_SHIFT 24
+#define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24)
+#define OMAP4_DSI1_PIPD_SHIFT 19
+#define OMAP4_DSI1_PIPD_MASK (0x1f << 19)
+#define OMAP4_DSI2_PIPD_SHIFT 14
+#define OMAP4_DSI2_PIPD_MASK (0x1f << 14)
+
+static int dsi_omap4_mux_pads(struct dsi_data *dsi, unsigned int lanes)
+{
+ u32 enable_mask, enable_shift;
+ u32 pipd_mask, pipd_shift;
+
+ if (dsi->module_id == 0) {
+ enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
+ enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT;
+ pipd_mask = OMAP4_DSI1_PIPD_MASK;
+ pipd_shift = OMAP4_DSI1_PIPD_SHIFT;
+ } else if (dsi->module_id == 1) {
+ enable_mask = OMAP4_DSI2_LANEENABLE_MASK;
+ enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT;
+ pipd_mask = OMAP4_DSI2_PIPD_MASK;
+ pipd_shift = OMAP4_DSI2_PIPD_SHIFT;
+ } else {
+ return -ENODEV;
+ }
+
+ return regmap_update_bits(dsi->syscon, OMAP4_DSIPHY_SYSCON_OFFSET,
+ enable_mask | pipd_mask,
+ (lanes << enable_shift) | (lanes << pipd_shift));
+}
+
+/* OMAP5 CONTROL_DSIPHY */
+
+#define OMAP5_DSIPHY_SYSCON_OFFSET 0x74
+
+#define OMAP5_DSI1_LANEENABLE_SHIFT 24
+#define OMAP5_DSI2_LANEENABLE_SHIFT 19
+#define OMAP5_DSI_LANEENABLE_MASK 0x1f
+
+static int dsi_omap5_mux_pads(struct dsi_data *dsi, unsigned int lanes)
+{
+ u32 enable_shift;
+
+ if (dsi->module_id == 0)
+ enable_shift = OMAP5_DSI1_LANEENABLE_SHIFT;
+ else if (dsi->module_id == 1)
+ enable_shift = OMAP5_DSI2_LANEENABLE_SHIFT;
+ else
+ return -ENODEV;
+
+ return regmap_update_bits(dsi->syscon, OMAP5_DSIPHY_SYSCON_OFFSET,
+ OMAP5_DSI_LANEENABLE_MASK << enable_shift,
+ lanes << enable_shift);
+}
+
+static int dsi_enable_pads(struct dsi_data *dsi, unsigned int lane_mask)
+{
+ if (dsi->data->model == DSI_MODEL_OMAP4)
+ return dsi_omap4_mux_pads(dsi, lane_mask);
+ if (dsi->data->model == DSI_MODEL_OMAP5)
+ return dsi_omap5_mux_pads(dsi, lane_mask);
+ return 0;
+}
+
+static void dsi_disable_pads(struct dsi_data *dsi)
+{
+ if (dsi->data->model == DSI_MODEL_OMAP4)
+ dsi_omap4_mux_pads(dsi, 0);
+ else if (dsi->data->model == DSI_MODEL_OMAP5)
+ dsi_omap5_mux_pads(dsi, 0);
+}
+
static int dsi_cio_init(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -2068,7 +2176,7 @@ static int dsi_cio_init(struct platform_device *dsidev)
DSSDBG("DSI CIO init starts");
- r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
+ r = dsi_enable_pads(dsi, dsi_get_lane_mask(dsidev));
if (r)
return r;
@@ -2178,7 +2286,7 @@ err_cio_pwr:
dsi_cio_disable_lane_override(dsidev);
err_scp_clk_dom:
dsi_disable_scp_clk(dsidev);
- dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
+ dsi_disable_pads(dsi);
return r;
}
@@ -2191,7 +2299,7 @@ static void dsi_cio_uninit(struct platform_device *dsidev)
dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
dsi_disable_scp_clk(dsidev);
- dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
+ dsi_disable_pads(dsi);
}
static void dsi_config_tx_fifo(struct platform_device *dsidev,
@@ -2439,7 +2547,7 @@ static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
- if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH))
+ if (dsi->data->quirks & DSI_QUIRK_VC_OCP_WIDTH)
r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */
r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
@@ -2474,7 +2582,7 @@ static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), source, 1, 1);
/* DCS_CMD_ENABLE */
- if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
+ if (dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) {
bool enable = source == DSI_VC_SOURCE_VP;
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 30, 30);
}
@@ -3607,7 +3715,7 @@ static int dsi_proto_config(struct platform_device *dsidev)
r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
- if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
+ if (!(dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC)) {
r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
/* DCS_CMD_CODE, 1=start, 0=continue */
r = FLD_MOD(r, 0, 25, 25);
@@ -4450,6 +4558,7 @@ static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
unsigned long clkdco, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
+ struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
ctx->dsi_cinfo.n = n;
ctx->dsi_cinfo.m = m;
@@ -4457,7 +4566,7 @@ static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
ctx->dsi_cinfo.clkdco = clkdco;
return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
- dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
+ dsi->data->max_fck_freq,
dsi_cm_calc_hsdiv_cb, ctx);
}
@@ -4749,6 +4858,7 @@ static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
unsigned long clkdco, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
+ struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
ctx->dsi_cinfo.n = n;
ctx->dsi_cinfo.m = m;
@@ -4756,7 +4866,7 @@ static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
ctx->dsi_cinfo.clkdco = clkdco;
return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
- dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
+ dsi->data->max_fck_freq,
dsi_vm_calc_hsdiv_cb, ctx);
}
@@ -4827,7 +4937,7 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
goto err;
}
- dsi_pll_calc_dsi_fck(&ctx.dsi_cinfo);
+ dsi_pll_calc_dsi_fck(dsi, &ctx.dsi_cinfo);
r = dsi_lp_clock_calc(ctx.dsi_cinfo.clkout[HSDIV_DSI],
config->lp_clk_min, config->lp_clk_max, &dsi->user_lp_cinfo);
@@ -4857,24 +4967,14 @@ err:
* the channel in some more dynamic manner, or get the channel as a user
* parameter.
*/
-static enum omap_channel dsi_get_channel(int module_id)
+static enum omap_channel dsi_get_channel(struct dsi_data *dsi)
{
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP24xx:
- case OMAPDSS_VER_AM43xx:
- DSSWARN("DSI not supported\n");
+ switch (dsi->data->model) {
+ case DSI_MODEL_OMAP3:
return OMAP_DSS_CHANNEL_LCD;
- case OMAPDSS_VER_OMAP34xx_ES1:
- case OMAPDSS_VER_OMAP34xx_ES3:
- case OMAPDSS_VER_OMAP3630:
- case OMAPDSS_VER_AM35xx:
- return OMAP_DSS_CHANNEL_LCD;
-
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
- switch (module_id) {
+ case DSI_MODEL_OMAP4:
+ switch (dsi->module_id) {
case 0:
return OMAP_DSS_CHANNEL_LCD;
case 1:
@@ -4884,8 +4984,8 @@ static enum omap_channel dsi_get_channel(int module_id)
return OMAP_DSS_CHANNEL_LCD;
}
- case OMAPDSS_VER_OMAP5:
- switch (module_id) {
+ case DSI_MODEL_OMAP5:
+ switch (dsi->module_id) {
case 0:
return OMAP_DSS_CHANNEL_LCD;
case 1:
@@ -5065,7 +5165,7 @@ static void dsi_init_output(struct platform_device *dsidev)
out->output_type = OMAP_DISPLAY_TYPE_DSI;
out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
- out->dispc_channel = dsi_get_channel(dsi->module_id);
+ out->dispc_channel = dsi_get_channel(dsi);
out->ops.dsi = &dsi_ops;
out->owner = THIS_MODULE;
@@ -5240,29 +5340,7 @@ static int dsi_init_pll_data(struct platform_device *dsidev)
pll->id = dsi->module_id == 0 ? DSS_PLL_DSI1 : DSS_PLL_DSI2;
pll->clkin = clk;
pll->base = dsi->pll_base;
-
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP34xx_ES1:
- case OMAPDSS_VER_OMAP34xx_ES3:
- case OMAPDSS_VER_OMAP3630:
- case OMAPDSS_VER_AM35xx:
- pll->hw = &dss_omap3_dsi_pll_hw;
- break;
-
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
- pll->hw = &dss_omap4_dsi_pll_hw;
- break;
-
- case OMAPDSS_VER_OMAP5:
- pll->hw = &dss_omap5_dsi_pll_hw;
- break;
-
- default:
- return -ENODEV;
- }
-
+ pll->hw = dsi->data->pll_hw;
pll->ops = &dsi_pll_ops;
r = dss_pll_register(pll);
@@ -5273,9 +5351,74 @@ static int dsi_init_pll_data(struct platform_device *dsidev)
}
/* DSI1 HW IP initialisation */
+static const struct dsi_of_data dsi_of_data_omap34xx = {
+ .model = DSI_MODEL_OMAP3,
+ .pll_hw = &dss_omap3_dsi_pll_hw,
+ .modules = (const struct dsi_module_id_data[]) {
+ { .address = 0x4804fc00, .id = 0, },
+ { },
+ },
+ .max_fck_freq = 173000000,
+ .max_pll_lpdiv = (1 << 13) - 1,
+ .quirks = DSI_QUIRK_REVERSE_TXCLKESC,
+};
+
+static const struct dsi_of_data dsi_of_data_omap36xx = {
+ .model = DSI_MODEL_OMAP3,
+ .pll_hw = &dss_omap3_dsi_pll_hw,
+ .modules = (const struct dsi_module_id_data[]) {
+ { .address = 0x4804fc00, .id = 0, },
+ { },
+ },
+ .max_fck_freq = 173000000,
+ .max_pll_lpdiv = (1 << 13) - 1,
+ .quirks = DSI_QUIRK_PLL_PWR_BUG,
+};
+
+static const struct dsi_of_data dsi_of_data_omap4 = {
+ .model = DSI_MODEL_OMAP4,
+ .pll_hw = &dss_omap4_dsi_pll_hw,
+ .modules = (const struct dsi_module_id_data[]) {
+ { .address = 0x58004000, .id = 0, },
+ { .address = 0x58005000, .id = 1, },
+ { },
+ },
+ .max_fck_freq = 170000000,
+ .max_pll_lpdiv = (1 << 13) - 1,
+ .quirks = DSI_QUIRK_DCS_CMD_CONFIG_VC | DSI_QUIRK_VC_OCP_WIDTH
+ | DSI_QUIRK_GNQ,
+};
+
+static const struct dsi_of_data dsi_of_data_omap5 = {
+ .model = DSI_MODEL_OMAP5,
+ .pll_hw = &dss_omap5_dsi_pll_hw,
+ .modules = (const struct dsi_module_id_data[]) {
+ { .address = 0x58004000, .id = 0, },
+ { .address = 0x58009000, .id = 1, },
+ { },
+ },
+ .max_fck_freq = 209250000,
+ .max_pll_lpdiv = (1 << 13) - 1,
+ .quirks = DSI_QUIRK_DCS_CMD_CONFIG_VC | DSI_QUIRK_VC_OCP_WIDTH
+ | DSI_QUIRK_GNQ | DSI_QUIRK_PHY_DCC,
+};
+
+static const struct of_device_id dsi_of_match[] = {
+ { .compatible = "ti,omap3-dsi", .data = &dsi_of_data_omap36xx, },
+ { .compatible = "ti,omap4-dsi", .data = &dsi_of_data_omap4, },
+ { .compatible = "ti,omap5-dsi", .data = &dsi_of_data_omap5, },
+ {},
+};
+
+static const struct soc_device_attribute dsi_soc_devices[] = {
+ { .machine = "OMAP3[45]*", .data = &dsi_of_data_omap34xx },
+ { .machine = "AM35*", .data = &dsi_of_data_omap34xx },
+ { /* sentinel */ }
+};
static int dsi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *dsidev = to_platform_device(dev);
+ const struct soc_device_attribute *soc;
const struct dsi_module_id_data *d;
u32 rev;
int r, i;
@@ -5339,7 +5482,13 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
return r;
}
- d = of_match_node(dsi_of_match, dsidev->dev.of_node)->data;
+ soc = soc_device_match(dsi_soc_devices);
+ if (soc)
+ dsi->data = soc->data;
+ else
+ dsi->data = of_match_node(dsi_of_match, dev->of_node)->data;
+
+ d = dsi->data->modules;
while (d->address != 0 && d->address != dsi_mem->start)
d++;
@@ -5350,6 +5499,24 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->module_id = d->id;
+ if (dsi->data->model == DSI_MODEL_OMAP4 ||
+ dsi->data->model == DSI_MODEL_OMAP5) {
+ struct device_node *np;
+
+ /*
+ * The OMAP4/5 display DT bindings don't reference the padconf
+ * syscon. Our only option to retrieve it is to find it by name.
+ */
+ np = of_find_node_by_name(NULL,
+ dsi->data->model == DSI_MODEL_OMAP4 ?
+ "omap4_padconf_global" : "omap5_padconf_global");
+ if (!np)
+ return -ENODEV;
+
+ dsi->syscon = syscon_node_to_regmap(np);
+ of_node_put(np);
+ }
+
/* DSI VCs initialization */
for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
dsi->vc[i].source = DSI_VC_SOURCE_L4;
@@ -5375,7 +5542,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
/* DSI on OMAP3 doesn't have register DSI_GNQ, set number
* of data to 3 by default */
- if (dss_has_feature(FEAT_DSI_GNQ))
+ if (dsi->data->quirks & DSI_QUIRK_GNQ)
/* NB_DATA_LANES */
dsi->num_lanes_supported = 1 + REG_GET(dsidev, DSI_GNQ, 11, 9);
else
@@ -5495,30 +5662,6 @@ static const struct dev_pm_ops dsi_pm_ops = {
.runtime_resume = dsi_runtime_resume,
};
-static const struct dsi_module_id_data dsi_of_data_omap3[] = {
- { .address = 0x4804fc00, .id = 0, },
- { },
-};
-
-static const struct dsi_module_id_data dsi_of_data_omap4[] = {
- { .address = 0x58004000, .id = 0, },
- { .address = 0x58005000, .id = 1, },
- { },
-};
-
-static const struct dsi_module_id_data dsi_of_data_omap5[] = {
- { .address = 0x58004000, .id = 0, },
- { .address = 0x58009000, .id = 1, },
- { },
-};
-
-static const struct of_device_id dsi_of_match[] = {
- { .compatible = "ti,omap3-dsi", .data = dsi_of_data_omap3, },
- { .compatible = "ti,omap4-dsi", .data = dsi_of_data_omap4, },
- { .compatible = "ti,omap5-dsi", .data = dsi_of_data_omap5, },
- {},
-};
-
static struct platform_driver omap_dsihw_driver = {
.probe = dsi_probe,
.remove = dsi_remove,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 99e22ca972c7..d1755f12236b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -22,6 +22,7 @@
#define DSS_SUBSYS_NAME "DSS"
+#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -38,14 +39,15 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <linux/suspend.h>
#include <linux/component.h>
+#include <linux/sys_soc.h>
#include "omapdss.h"
#include "dss.h"
-#include "dss_features.h"
#define DSS_SZ_REGS SZ_512
@@ -69,15 +71,24 @@ struct dss_reg {
#define REG_FLD_MOD(idx, val, start, end) \
dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
+struct dss_ops {
+ int (*dpi_select_source)(int port, enum omap_channel channel);
+ int (*select_lcd_source)(enum omap_channel channel,
+ enum dss_clk_source clk_src);
+};
+
struct dss_features {
+ enum dss_model model;
u8 fck_div_max;
+ unsigned int fck_freq_max;
u8 dss_fck_multiplier;
const char *parent_clk_name;
const enum omap_display_type *ports;
int num_ports;
- int (*dpi_select_source)(int port, enum omap_channel channel);
- int (*select_lcd_source)(enum omap_channel channel,
- enum dss_clk_source clk_src);
+ const enum omap_dss_output_id *outputs;
+ const struct dss_ops *ops;
+ struct dss_reg_field dispc_clk_switch;
+ bool has_lcd_clk_src;
};
static struct {
@@ -139,8 +150,7 @@ static void dss_save_context(void)
SR(CONTROL);
- if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
- OMAP_DISPLAY_TYPE_SDI) {
+ if (dss.feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
SR(SDI_CONTROL);
SR(PLL_CONTROL);
}
@@ -159,8 +169,7 @@ static void dss_restore_context(void)
RR(CONTROL);
- if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
- OMAP_DISPLAY_TYPE_SDI) {
+ if (dss.feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
RR(SDI_CONTROL);
RR(PLL_CONTROL);
}
@@ -390,8 +399,7 @@ static void dss_dump_regs(struct seq_file *s)
DUMPREG(DSS_SYSSTATUS);
DUMPREG(DSS_CONTROL);
- if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) &
- OMAP_DISPLAY_TYPE_SDI) {
+ if (dss.feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
DUMPREG(DSS_SDI_CONTROL);
DUMPREG(DSS_PLL_CONTROL);
DUMPREG(DSS_SDI_STATUS);
@@ -419,14 +427,12 @@ static int dss_get_channel_index(enum omap_channel channel)
static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
{
int b;
- u8 start, end;
/*
* We always use PRCM clock as the DISPC func clock, except on DSS3,
* where we don't have separate DISPC and LCD clock sources.
*/
- if (WARN_ON(dss_has_feature(FEAT_LCD_CLK_SRC) &&
- clk_src != DSS_CLK_SRC_FCK))
+ if (WARN_ON(dss.feat->has_lcd_clk_src && clk_src != DSS_CLK_SRC_FCK))
return;
switch (clk_src) {
@@ -444,9 +450,9 @@ static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
return;
}
- dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end);
-
- REG_FLD_MOD(DSS_CONTROL, b, start, end); /* DISPC_CLK_SWITCH */
+ REG_FLD_MOD(DSS_CONTROL, b, /* DISPC_CLK_SWITCH */
+ dss.feat->dispc_clk_switch.start,
+ dss.feat->dispc_clk_switch.end);
dss.dispc_clk_source = clk_src;
}
@@ -570,13 +576,13 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
int idx = dss_get_channel_index(channel);
int r;
- if (!dss_has_feature(FEAT_LCD_CLK_SRC)) {
+ if (!dss.feat->has_lcd_clk_src) {
dss_select_dispc_clk_source(clk_src);
dss.lcd_clk_source[idx] = clk_src;
return;
}
- r = dss.feat->select_lcd_source(channel, clk_src);
+ r = dss.feat->ops->select_lcd_source(channel, clk_src);
if (r)
return;
@@ -595,7 +601,7 @@ enum dss_clk_source dss_get_dsi_clk_source(int dsi_module)
enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
{
- if (dss_has_feature(FEAT_LCD_CLK_SRC)) {
+ if (dss.feat->has_lcd_clk_src) {
int idx = dss_get_channel_index(channel);
return dss.lcd_clk_source[idx];
} else {
@@ -615,7 +621,7 @@ bool dss_div_calc(unsigned long pck, unsigned long fck_min,
unsigned long prate;
unsigned m;
- fck_hw_max = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+ fck_hw_max = dss.feat->fck_freq_max;
if (dss.parent_clk == NULL) {
unsigned pckd;
@@ -673,6 +679,16 @@ unsigned long dss_get_dispc_clk_rate(void)
return dss.dss_clk_rate;
}
+unsigned long dss_get_max_fck_rate(void)
+{
+ return dss.feat->fck_freq_max;
+}
+
+enum omap_dss_output_id dss_get_supported_outputs(enum omap_channel channel)
+{
+ return dss.feat->outputs[channel];
+}
+
static int dss_setup_default_clock(void)
{
unsigned long max_dss_fck, prate;
@@ -680,7 +696,7 @@ static int dss_setup_default_clock(void)
unsigned fck_div;
int r;
- max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
+ max_dss_fck = dss.feat->fck_freq_max;
if (dss.parent_clk == NULL) {
fck = clk_round_rate(dss.dss_clk, max_dss_fck);
@@ -721,27 +737,29 @@ void dss_set_dac_pwrdn_bgz(bool enable)
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select src)
{
- enum omap_display_type dp;
- dp = dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_DIGIT);
+ enum omap_dss_output_id outputs;
+
+ outputs = dss.feat->outputs[OMAP_DSS_CHANNEL_DIGIT];
/* Complain about invalid selections */
- WARN_ON((src == DSS_VENC_TV_CLK) && !(dp & OMAP_DISPLAY_TYPE_VENC));
- WARN_ON((src == DSS_HDMI_M_PCLK) && !(dp & OMAP_DISPLAY_TYPE_HDMI));
+ WARN_ON((src == DSS_VENC_TV_CLK) && !(outputs & OMAP_DSS_OUTPUT_VENC));
+ WARN_ON((src == DSS_HDMI_M_PCLK) && !(outputs & OMAP_DSS_OUTPUT_HDMI));
/* Select only if we have options */
- if ((dp & OMAP_DISPLAY_TYPE_VENC) && (dp & OMAP_DISPLAY_TYPE_HDMI))
+ if ((outputs & OMAP_DSS_OUTPUT_VENC) &&
+ (outputs & OMAP_DSS_OUTPUT_HDMI))
REG_FLD_MOD(DSS_CONTROL, src, 15, 15); /* VENC_HDMI_SWITCH */
}
enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void)
{
- enum omap_display_type displays;
+ enum omap_dss_output_id outputs;
- displays = dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_DIGIT);
- if ((displays & OMAP_DISPLAY_TYPE_HDMI) == 0)
+ outputs = dss.feat->outputs[OMAP_DSS_CHANNEL_DIGIT];
+ if ((outputs & OMAP_DSS_OUTPUT_HDMI) == 0)
return DSS_VENC_TV_CLK;
- if ((displays & OMAP_DISPLAY_TYPE_VENC) == 0)
+ if ((outputs & OMAP_DSS_OUTPUT_VENC) == 0)
return DSS_HDMI_M_PCLK;
return REG_GET(DSS_CONTROL, 15, 15);
@@ -823,7 +841,7 @@ static int dss_dpi_select_source_dra7xx(int port, enum omap_channel channel)
int dss_dpi_select_source(int port, enum omap_channel channel)
{
- return dss.feat->dpi_select_source(port, channel);
+ return dss.feat->ops->dpi_select_source(port, channel);
}
static int dss_get_clocks(void)
@@ -882,7 +900,7 @@ void dss_runtime_put(void)
/* DEBUGFS */
#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
-void dss_debug_dump_clocks(struct seq_file *s)
+static void dss_debug_dump_clocks(struct seq_file *s)
{
dss_dump_clocks(s);
dispc_dump_clocks(s);
@@ -890,8 +908,88 @@ void dss_debug_dump_clocks(struct seq_file *s)
dsi_dump_clocks(s);
#endif
}
-#endif
+static int dss_debug_show(struct seq_file *s, void *unused)
+{
+ void (*func)(struct seq_file *) = s->private;
+
+ func(s);
+ return 0;
+}
+
+static int dss_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dss_debug_show, inode->i_private);
+}
+
+static const struct file_operations dss_debug_fops = {
+ .open = dss_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *dss_debugfs_dir;
+
+static int dss_initialize_debugfs(void)
+{
+ dss_debugfs_dir = debugfs_create_dir("omapdss", NULL);
+ if (IS_ERR(dss_debugfs_dir)) {
+ int err = PTR_ERR(dss_debugfs_dir);
+
+ dss_debugfs_dir = NULL;
+ return err;
+ }
+
+ debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
+ &dss_debug_dump_clocks, &dss_debug_fops);
+
+ return 0;
+}
+
+static void dss_uninitialize_debugfs(void)
+{
+ if (dss_debugfs_dir)
+ debugfs_remove_recursive(dss_debugfs_dir);
+}
+
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
+{
+ struct dentry *d;
+
+ d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
+ write, &dss_debug_fops);
+
+ return PTR_ERR_OR_ZERO(d);
+}
+#else /* CONFIG_OMAP2_DSS_DEBUGFS */
+static inline int dss_initialize_debugfs(void)
+{
+ return 0;
+}
+static inline void dss_uninitialize_debugfs(void)
+{
+}
+#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
+
+static const struct dss_ops dss_ops_omap2_omap3 = {
+ .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
+};
+
+static const struct dss_ops dss_ops_omap4 = {
+ .dpi_select_source = &dss_dpi_select_source_omap4,
+ .select_lcd_source = &dss_lcd_clk_mux_omap4,
+};
+
+static const struct dss_ops dss_ops_omap5 = {
+ .dpi_select_source = &dss_dpi_select_source_omap5,
+ .select_lcd_source = &dss_lcd_clk_mux_omap5,
+};
+
+static const struct dss_ops dss_ops_dra7 = {
+ .dpi_select_source = &dss_dpi_select_source_dra7xx,
+ .select_lcd_source = &dss_lcd_clk_mux_dra7,
+};
static const enum omap_display_type omap2plus_ports[] = {
OMAP_DISPLAY_TYPE_DPI,
@@ -908,130 +1006,168 @@ static const enum omap_display_type dra7xx_ports[] = {
OMAP_DISPLAY_TYPE_DPI,
};
+static const enum omap_dss_output_id omap2_dss_supported_outputs[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI,
+
+ /* OMAP_DSS_CHANNEL_DIGIT */
+ OMAP_DSS_OUTPUT_VENC,
+};
+
+static const enum omap_dss_output_id omap3430_dss_supported_outputs[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
+ OMAP_DSS_OUTPUT_SDI | OMAP_DSS_OUTPUT_DSI1,
+
+ /* OMAP_DSS_CHANNEL_DIGIT */
+ OMAP_DSS_OUTPUT_VENC,
+};
+
+static const enum omap_dss_output_id omap3630_dss_supported_outputs[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
+ OMAP_DSS_OUTPUT_DSI1,
+
+ /* OMAP_DSS_CHANNEL_DIGIT */
+ OMAP_DSS_OUTPUT_VENC,
+};
+
+static const enum omap_dss_output_id am43xx_dss_supported_outputs[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI,
+};
+
+static const enum omap_dss_output_id omap4_dss_supported_outputs[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DSS_OUTPUT_DBI | OMAP_DSS_OUTPUT_DSI1,
+
+ /* OMAP_DSS_CHANNEL_DIGIT */
+ OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI,
+
+ /* OMAP_DSS_CHANNEL_LCD2 */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
+ OMAP_DSS_OUTPUT_DSI2,
+};
+
+static const enum omap_dss_output_id omap5_dss_supported_outputs[] = {
+ /* OMAP_DSS_CHANNEL_LCD */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
+ OMAP_DSS_OUTPUT_DSI1 | OMAP_DSS_OUTPUT_DSI2,
+
+ /* OMAP_DSS_CHANNEL_DIGIT */
+ OMAP_DSS_OUTPUT_HDMI,
+
+ /* OMAP_DSS_CHANNEL_LCD2 */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
+ OMAP_DSS_OUTPUT_DSI1,
+
+ /* OMAP_DSS_CHANNEL_LCD3 */
+ OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
+ OMAP_DSS_OUTPUT_DSI2,
+};
+
static const struct dss_features omap24xx_dss_feats = {
+ .model = DSS_MODEL_OMAP2,
/*
* fck div max is really 16, but the divider range has gaps. The range
* from 1 to 6 has no gaps, so let's use that as a max.
*/
.fck_div_max = 6,
+ .fck_freq_max = 133000000,
.dss_fck_multiplier = 2,
.parent_clk_name = "core_ck",
- .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
+ .outputs = omap2_dss_supported_outputs,
+ .ops = &dss_ops_omap2_omap3,
+ .dispc_clk_switch = { 0, 0 },
+ .has_lcd_clk_src = false,
};
static const struct dss_features omap34xx_dss_feats = {
+ .model = DSS_MODEL_OMAP3,
.fck_div_max = 16,
+ .fck_freq_max = 173000000,
.dss_fck_multiplier = 2,
.parent_clk_name = "dpll4_ck",
- .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
.ports = omap34xx_ports,
+ .outputs = omap3430_dss_supported_outputs,
.num_ports = ARRAY_SIZE(omap34xx_ports),
+ .ops = &dss_ops_omap2_omap3,
+ .dispc_clk_switch = { 0, 0 },
+ .has_lcd_clk_src = false,
};
static const struct dss_features omap3630_dss_feats = {
+ .model = DSS_MODEL_OMAP3,
.fck_div_max = 32,
+ .fck_freq_max = 173000000,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll4_ck",
- .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
+ .outputs = omap3630_dss_supported_outputs,
+ .ops = &dss_ops_omap2_omap3,
+ .dispc_clk_switch = { 0, 0 },
+ .has_lcd_clk_src = false,
};
static const struct dss_features omap44xx_dss_feats = {
+ .model = DSS_MODEL_OMAP4,
.fck_div_max = 32,
+ .fck_freq_max = 186000000,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll_per_x2_ck",
- .dpi_select_source = &dss_dpi_select_source_omap4,
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
- .select_lcd_source = &dss_lcd_clk_mux_omap4,
+ .outputs = omap4_dss_supported_outputs,
+ .ops = &dss_ops_omap4,
+ .dispc_clk_switch = { 9, 8 },
+ .has_lcd_clk_src = true,
};
static const struct dss_features omap54xx_dss_feats = {
+ .model = DSS_MODEL_OMAP5,
.fck_div_max = 64,
+ .fck_freq_max = 209250000,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll_per_x2_ck",
- .dpi_select_source = &dss_dpi_select_source_omap5,
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
- .select_lcd_source = &dss_lcd_clk_mux_omap5,
+ .outputs = omap5_dss_supported_outputs,
+ .ops = &dss_ops_omap5,
+ .dispc_clk_switch = { 9, 7 },
+ .has_lcd_clk_src = true,
};
static const struct dss_features am43xx_dss_feats = {
+ .model = DSS_MODEL_OMAP3,
.fck_div_max = 0,
+ .fck_freq_max = 200000000,
.dss_fck_multiplier = 0,
.parent_clk_name = NULL,
- .dpi_select_source = &dss_dpi_select_source_omap2_omap3,
.ports = omap2plus_ports,
.num_ports = ARRAY_SIZE(omap2plus_ports),
+ .outputs = am43xx_dss_supported_outputs,
+ .ops = &dss_ops_omap2_omap3,
+ .dispc_clk_switch = { 0, 0 },
+ .has_lcd_clk_src = true,
};
static const struct dss_features dra7xx_dss_feats = {
+ .model = DSS_MODEL_DRA7,
.fck_div_max = 64,
+ .fck_freq_max = 209250000,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll_per_x2_ck",
- .dpi_select_source = &dss_dpi_select_source_dra7xx,
.ports = dra7xx_ports,
.num_ports = ARRAY_SIZE(dra7xx_ports),
- .select_lcd_source = &dss_lcd_clk_mux_dra7,
+ .outputs = omap5_dss_supported_outputs,
+ .ops = &dss_ops_dra7,
+ .dispc_clk_switch = { 9, 7 },
+ .has_lcd_clk_src = true,
};
-static int dss_init_features(struct platform_device *pdev)
-{
- const struct dss_features *src;
- struct dss_features *dst;
-
- dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL);
- if (!dst) {
- dev_err(&pdev->dev, "Failed to allocate local DSS Features\n");
- return -ENOMEM;
- }
-
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP24xx:
- src = &omap24xx_dss_feats;
- break;
-
- case OMAPDSS_VER_OMAP34xx_ES1:
- case OMAPDSS_VER_OMAP34xx_ES3:
- case OMAPDSS_VER_AM35xx:
- src = &omap34xx_dss_feats;
- break;
-
- case OMAPDSS_VER_OMAP3630:
- src = &omap3630_dss_feats;
- break;
-
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
- src = &omap44xx_dss_feats;
- break;
-
- case OMAPDSS_VER_OMAP5:
- src = &omap54xx_dss_feats;
- break;
-
- case OMAPDSS_VER_AM43xx:
- src = &am43xx_dss_feats;
- break;
-
- case OMAPDSS_VER_DRA7xx:
- src = &dra7xx_dss_feats;
- break;
-
- default:
- return -ENODEV;
- }
-
- memcpy(dst, src, sizeof(*dst));
- dss.feat = dst;
-
- return 0;
-}
-
static int dss_init_ports(struct platform_device *pdev)
{
struct device_node *parent = pdev->dev.of_node;
@@ -1045,7 +1181,7 @@ static int dss_init_ports(struct platform_device *pdev)
switch (dss.feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
- dpi_init_port(pdev, port);
+ dpi_init_port(pdev, port, dss.feat->model);
break;
case OMAP_DISPLAY_TYPE_SDI:
sdi_init_port(pdev, port);
@@ -1144,6 +1280,23 @@ static int dss_video_pll_probe(struct platform_device *pdev)
}
/* DSS HW IP initialisation */
+static const struct of_device_id dss_of_match[] = {
+ { .compatible = "ti,omap2-dss", .data = &omap24xx_dss_feats },
+ { .compatible = "ti,omap3-dss", .data = &omap3630_dss_feats },
+ { .compatible = "ti,omap4-dss", .data = &omap44xx_dss_feats },
+ { .compatible = "ti,omap5-dss", .data = &omap54xx_dss_feats },
+ { .compatible = "ti,dra7-dss", .data = &dra7xx_dss_feats },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dss_of_match);
+
+static const struct soc_device_attribute dss_soc_devices[] = {
+ { .machine = "OMAP3430/3530", .data = &omap34xx_dss_feats },
+ { .machine = "AM35??", .data = &omap34xx_dss_feats },
+ { .family = "AM43xx", .data = &am43xx_dss_feats },
+ { /* sentinel */ }
+};
+
static int dss_bind(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1151,12 +1304,6 @@ static int dss_bind(struct device *dev)
u32 rev;
int r;
- dss.pdev = pdev;
-
- r = dss_init_features(dss.pdev);
- if (r)
- return r;
-
dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
dss.base = devm_ioremap_resource(&pdev->dev, dss_mem);
if (IS_ERR(dss.base))
@@ -1288,15 +1435,34 @@ static int dss_add_child_component(struct device *dev, void *data)
static int dss_probe(struct platform_device *pdev)
{
+ const struct soc_device_attribute *soc;
struct component_match *match = NULL;
int r;
+ dss.pdev = pdev;
+
+ /*
+ * The various OMAP3-based SoCs can't be told apart using the compatible
+ * string, use SoC device matching.
+ */
+ soc = soc_device_match(dss_soc_devices);
+ if (soc)
+ dss.feat = soc->data;
+ else
+ dss.feat = of_match_device(dss_of_match, &pdev->dev)->data;
+
+ r = dss_initialize_debugfs();
+ if (r)
+ return r;
+
/* add all the child devices as components */
device_for_each_child(&pdev->dev, &match, dss_add_child_component);
r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
- if (r)
+ if (r) {
+ dss_uninitialize_debugfs();
return r;
+ }
return 0;
}
@@ -1304,9 +1470,27 @@ static int dss_probe(struct platform_device *pdev)
static int dss_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &dss_component_ops);
+
+ dss_uninitialize_debugfs();
+
return 0;
}
+static void dss_shutdown(struct platform_device *pdev)
+{
+ struct omap_dss_device *dssdev = NULL;
+
+ DSSDBG("shutdown\n");
+
+ for_each_dss_dev(dssdev) {
+ if (!dssdev->driver)
+ continue;
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ dssdev->driver->disable(dssdev);
+ }
+}
+
static int dss_runtime_suspend(struct device *dev)
{
dss_save_context();
@@ -1343,20 +1527,10 @@ static const struct dev_pm_ops dss_pm_ops = {
.runtime_resume = dss_runtime_resume,
};
-static const struct of_device_id dss_of_match[] = {
- { .compatible = "ti,omap2-dss", },
- { .compatible = "ti,omap3-dss", },
- { .compatible = "ti,omap4-dss", },
- { .compatible = "ti,omap5-dss", },
- { .compatible = "ti,dra7-dss", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, dss_of_match);
-
static struct platform_driver omap_dsshw_driver = {
.probe = dss_probe,
.remove = dss_remove,
+ .shutdown = dss_shutdown,
.driver = {
.name = "omapdss_dss",
.pm = &dss_pm_ops,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 8dbf35f3ab23..ed465572491e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -27,6 +27,9 @@
#include "omapdss.h"
+#define MAX_DSS_LCD_MANAGERS 3
+#define MAX_NUM_DSI 2
+
#ifdef pr_fmt
#undef pr_fmt
#endif
@@ -72,6 +75,14 @@
#define FLD_MOD(orig, val, start, end) \
(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
+enum dss_model {
+ DSS_MODEL_OMAP2,
+ DSS_MODEL_OMAP3,
+ DSS_MODEL_OMAP4,
+ DSS_MODEL_OMAP5,
+ DSS_MODEL_DRA7,
+};
+
enum dss_io_pad_mode {
DSS_IO_PAD_MODE_RESET,
DSS_IO_PAD_MODE_RFBI,
@@ -174,6 +185,9 @@ struct dss_pll_hw {
bool has_freqsel;
bool has_selfreqdco;
bool has_refsel;
+
+ /* DRA7 errata i886: use high N & M to avoid jitter */
+ bool errata_i886;
};
struct dss_pll {
@@ -192,6 +206,11 @@ struct dss_pll {
struct dss_pll_clock_info cinfo;
};
+/* Defines a generic omap register field */
+struct dss_reg_field {
+ u8 start, end;
+};
+
struct dispc_clock_info {
/* rates that we get with dividers below */
unsigned long lck;
@@ -219,10 +238,11 @@ struct seq_file;
struct platform_device;
/* core */
-int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask);
-void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
-int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
-int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
+static inline int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
+{
+ /* To be implemented when the OMAP platform will provide this feature */
+ return 0;
+}
static inline bool dss_mgr_is_lcd(enum omap_channel id)
{
@@ -234,6 +254,16 @@ static inline bool dss_mgr_is_lcd(enum omap_channel id)
}
/* DSS */
+#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
+#else
+static inline int dss_debugfs_create_file(const char *name,
+ void (*write)(struct seq_file *))
+{
+ return 0;
+}
+#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
+
int dss_init_platform_driver(void) __init;
void dss_uninit_platform_driver(void);
@@ -241,6 +271,8 @@ int dss_runtime_get(void);
void dss_runtime_put(void);
unsigned long dss_get_dispc_clk_rate(void);
+unsigned long dss_get_max_fck_rate(void);
+enum omap_dss_output_id dss_get_supported_outputs(enum omap_channel channel);
int dss_dpi_select_source(int port, enum omap_channel channel);
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
@@ -252,10 +284,6 @@ struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
struct regulator *regulator);
void dss_video_pll_uninit(struct dss_pll *pll);
-#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
-void dss_debug_dump_clocks(struct seq_file *s);
-#endif
-
void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable);
void dss_sdi_init(int datapairs);
@@ -312,11 +340,12 @@ void dsi_irq_handler(void);
/* DPI */
#ifdef CONFIG_OMAP2_DSS_DPI
-int dpi_init_port(struct platform_device *pdev, struct device_node *port);
+int dpi_init_port(struct platform_device *pdev, struct device_node *port,
+ enum dss_model dss_model);
void dpi_uninit_port(struct device_node *port);
#else
static inline int dpi_init_port(struct platform_device *pdev,
- struct device_node *port)
+ struct device_node *port, enum dss_model dss_model)
{
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.c b/drivers/gpu/drm/omapdrm/dss/dss_features.c
deleted file mode 100644
index 0e599710dd95..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.c
+++ /dev/null
@@ -1,905 +0,0 @@
-/*
- * linux/drivers/video/omap2/dss/dss_features.c
- *
- * Copyright (C) 2010 Texas Instruments
- * Author: Archit Taneja <archit@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <drm/drm_fourcc.h>
-
-#include "omapdss.h"
-#include "dss.h"
-#include "dss_features.h"
-
-/* Defines a generic omap register field */
-struct dss_reg_field {
- u8 start, end;
-};
-
-struct dss_param_range {
- int min, max;
-};
-
-struct omap_dss_features {
- const struct dss_reg_field *reg_fields;
- const int num_reg_fields;
-
- const enum dss_feat_id *features;
- const int num_features;
-
- const int num_mgrs;
- const int num_ovls;
- const enum omap_display_type *supported_displays;
- const enum omap_dss_output_id *supported_outputs;
- const u32 **supported_color_modes;
- const enum omap_overlay_caps *overlay_caps;
- const struct dss_param_range *dss_params;
-
- const u32 buffer_size_unit;
- const u32 burst_size_unit;
-};
-
-/* This struct is assigned to one of the below during initialization */
-static const struct omap_dss_features *omap_current_dss_features;
-
-static const struct dss_reg_field omap2_dss_reg_fields[] = {
- [FEAT_REG_FIRHINC] = { 11, 0 },
- [FEAT_REG_FIRVINC] = { 27, 16 },
- [FEAT_REG_FIFOLOWTHRESHOLD] = { 8, 0 },
- [FEAT_REG_FIFOHIGHTHRESHOLD] = { 24, 16 },
- [FEAT_REG_FIFOSIZE] = { 8, 0 },
- [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
- [FEAT_REG_VERTICALACCU] = { 25, 16 },
- [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 },
-};
-
-static const struct dss_reg_field omap3_dss_reg_fields[] = {
- [FEAT_REG_FIRHINC] = { 12, 0 },
- [FEAT_REG_FIRVINC] = { 28, 16 },
- [FEAT_REG_FIFOLOWTHRESHOLD] = { 11, 0 },
- [FEAT_REG_FIFOHIGHTHRESHOLD] = { 27, 16 },
- [FEAT_REG_FIFOSIZE] = { 10, 0 },
- [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
- [FEAT_REG_VERTICALACCU] = { 25, 16 },
- [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 },
-};
-
-static const struct dss_reg_field am43xx_dss_reg_fields[] = {
- [FEAT_REG_FIRHINC] = { 12, 0 },
- [FEAT_REG_FIRVINC] = { 28, 16 },
- [FEAT_REG_FIFOLOWTHRESHOLD] = { 11, 0 },
- [FEAT_REG_FIFOHIGHTHRESHOLD] = { 27, 16 },
- [FEAT_REG_FIFOSIZE] = { 10, 0 },
- [FEAT_REG_HORIZONTALACCU] = { 9, 0 },
- [FEAT_REG_VERTICALACCU] = { 25, 16 },
- [FEAT_REG_DISPC_CLK_SWITCH] = { 0, 0 },
-};
-
-static const struct dss_reg_field omap4_dss_reg_fields[] = {
- [FEAT_REG_FIRHINC] = { 12, 0 },
- [FEAT_REG_FIRVINC] = { 28, 16 },
- [FEAT_REG_FIFOLOWTHRESHOLD] = { 15, 0 },
- [FEAT_REG_FIFOHIGHTHRESHOLD] = { 31, 16 },
- [FEAT_REG_FIFOSIZE] = { 15, 0 },
- [FEAT_REG_HORIZONTALACCU] = { 10, 0 },
- [FEAT_REG_VERTICALACCU] = { 26, 16 },
- [FEAT_REG_DISPC_CLK_SWITCH] = { 9, 8 },
-};
-
-static const struct dss_reg_field omap5_dss_reg_fields[] = {
- [FEAT_REG_FIRHINC] = { 12, 0 },
- [FEAT_REG_FIRVINC] = { 28, 16 },
- [FEAT_REG_FIFOLOWTHRESHOLD] = { 15, 0 },
- [FEAT_REG_FIFOHIGHTHRESHOLD] = { 31, 16 },
- [FEAT_REG_FIFOSIZE] = { 15, 0 },
- [FEAT_REG_HORIZONTALACCU] = { 10, 0 },
- [FEAT_REG_VERTICALACCU] = { 26, 16 },
- [FEAT_REG_DISPC_CLK_SWITCH] = { 9, 7 },
-};
-
-static const enum omap_display_type omap2_dss_supported_displays[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DISPLAY_TYPE_VENC,
-};
-
-static const enum omap_display_type omap3430_dss_supported_displays[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
- OMAP_DISPLAY_TYPE_SDI | OMAP_DISPLAY_TYPE_DSI,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DISPLAY_TYPE_VENC,
-};
-
-static const enum omap_display_type omap3630_dss_supported_displays[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
- OMAP_DISPLAY_TYPE_DSI,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DISPLAY_TYPE_VENC,
-};
-
-static const enum omap_display_type am43xx_dss_supported_displays[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI,
-};
-
-static const enum omap_display_type omap4_dss_supported_displays[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DISPLAY_TYPE_VENC | OMAP_DISPLAY_TYPE_HDMI,
-
- /* OMAP_DSS_CHANNEL_LCD2 */
- OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
- OMAP_DISPLAY_TYPE_DSI,
-};
-
-static const enum omap_display_type omap5_dss_supported_displays[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
- OMAP_DISPLAY_TYPE_DSI,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DISPLAY_TYPE_HDMI | OMAP_DISPLAY_TYPE_DPI,
-
- /* OMAP_DSS_CHANNEL_LCD2 */
- OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
- OMAP_DISPLAY_TYPE_DSI,
-};
-
-static const enum omap_dss_output_id omap2_dss_supported_outputs[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DSS_OUTPUT_VENC,
-};
-
-static const enum omap_dss_output_id omap3430_dss_supported_outputs[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
- OMAP_DSS_OUTPUT_SDI | OMAP_DSS_OUTPUT_DSI1,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DSS_OUTPUT_VENC,
-};
-
-static const enum omap_dss_output_id omap3630_dss_supported_outputs[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
- OMAP_DSS_OUTPUT_DSI1,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DSS_OUTPUT_VENC,
-};
-
-static const enum omap_dss_output_id am43xx_dss_supported_outputs[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI,
-};
-
-static const enum omap_dss_output_id omap4_dss_supported_outputs[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DSS_OUTPUT_DBI | OMAP_DSS_OUTPUT_DSI1,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI,
-
- /* OMAP_DSS_CHANNEL_LCD2 */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
- OMAP_DSS_OUTPUT_DSI2,
-};
-
-static const enum omap_dss_output_id omap5_dss_supported_outputs[] = {
- /* OMAP_DSS_CHANNEL_LCD */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
- OMAP_DSS_OUTPUT_DSI1 | OMAP_DSS_OUTPUT_DSI2,
-
- /* OMAP_DSS_CHANNEL_DIGIT */
- OMAP_DSS_OUTPUT_HDMI,
-
- /* OMAP_DSS_CHANNEL_LCD2 */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
- OMAP_DSS_OUTPUT_DSI1,
-
- /* OMAP_DSS_CHANNEL_LCD3 */
- OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
- OMAP_DSS_OUTPUT_DSI2,
-};
-
-#define COLOR_ARRAY(arr...) (const u32[]) { arr, 0 }
-
-static const u32 *omap2_dss_supported_color_modes[] = {
-
- /* OMAP_DSS_GFX */
- COLOR_ARRAY(
- DRM_FORMAT_RGBX4444, DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB888),
-
- /* OMAP_DSS_VIDEO1 */
- COLOR_ARRAY(
- DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
- DRM_FORMAT_UYVY),
-
- /* OMAP_DSS_VIDEO2 */
- COLOR_ARRAY(
- DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
- DRM_FORMAT_UYVY),
-};
-
-static const u32 *omap3_dss_supported_color_modes[] = {
- /* OMAP_DSS_GFX */
- COLOR_ARRAY(
- DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
- DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888),
-
- /* OMAP_DSS_VIDEO1 */
- COLOR_ARRAY(
- DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB888,
- DRM_FORMAT_RGBX4444, DRM_FORMAT_RGB565,
- DRM_FORMAT_YUYV, DRM_FORMAT_UYVY),
-
- /* OMAP_DSS_VIDEO2 */
- COLOR_ARRAY(
- DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
- DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_YUYV,
- DRM_FORMAT_UYVY, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888),
-};
-
-static const u32 *omap4_dss_supported_color_modes[] = {
- /* OMAP_DSS_GFX */
- COLOR_ARRAY(
- DRM_FORMAT_RGBX4444, DRM_FORMAT_ARGB4444,
- DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888,
- DRM_FORMAT_ARGB1555, DRM_FORMAT_XRGB4444,
- DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB1555),
-
- /* OMAP_DSS_VIDEO1 */
- COLOR_ARRAY(
- DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
- DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
- DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
- DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
- DRM_FORMAT_RGBX8888),
-
- /* OMAP_DSS_VIDEO2 */
- COLOR_ARRAY(
- DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
- DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
- DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
- DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
- DRM_FORMAT_RGBX8888),
-
- /* OMAP_DSS_VIDEO3 */
- COLOR_ARRAY(
- DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
- DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
- DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
- DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
- DRM_FORMAT_RGBX8888),
-
- /* OMAP_DSS_WB */
- COLOR_ARRAY(
- DRM_FORMAT_RGB565, DRM_FORMAT_RGBX4444,
- DRM_FORMAT_YUYV, DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGBA8888, DRM_FORMAT_NV12,
- DRM_FORMAT_RGBA4444, DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888, DRM_FORMAT_UYVY,
- DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444,
- DRM_FORMAT_RGBX8888),
-};
-
-static const enum omap_overlay_caps omap2_dss_overlay_caps[] = {
- /* OMAP_DSS_GFX */
- OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO1 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
- OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO2 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
- OMAP_DSS_OVL_CAP_REPLICATION,
-};
-
-static const enum omap_overlay_caps omap3430_dss_overlay_caps[] = {
- /* OMAP_DSS_GFX */
- OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_POS |
- OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO1 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
- OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO2 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
- OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
-};
-
-static const enum omap_overlay_caps omap3630_dss_overlay_caps[] = {
- /* OMAP_DSS_GFX */
- OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA |
- OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO1 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_POS |
- OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO2 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
- OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_POS |
- OMAP_DSS_OVL_CAP_REPLICATION,
-};
-
-static const enum omap_overlay_caps omap4_dss_overlay_caps[] = {
- /* OMAP_DSS_GFX */
- OMAP_DSS_OVL_CAP_GLOBAL_ALPHA | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA |
- OMAP_DSS_OVL_CAP_ZORDER | OMAP_DSS_OVL_CAP_POS |
- OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO1 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
- OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
- OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO2 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
- OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
- OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
-
- /* OMAP_DSS_VIDEO3 */
- OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_GLOBAL_ALPHA |
- OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA | OMAP_DSS_OVL_CAP_ZORDER |
- OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
-};
-
-static const struct dss_param_range omap2_dss_param_range[] = {
- [FEAT_PARAM_DSS_FCK] = { 0, 133000000 },
- [FEAT_PARAM_DSS_PCD] = { 2, 255 },
- [FEAT_PARAM_DOWNSCALE] = { 1, 2 },
- /*
- * Assuming the line width buffer to be 768 pixels as OMAP2 DISPC
- * scaler cannot scale a image with width more than 768.
- */
- [FEAT_PARAM_LINEWIDTH] = { 1, 768 },
-};
-
-static const struct dss_param_range omap3_dss_param_range[] = {
- [FEAT_PARAM_DSS_FCK] = { 0, 173000000 },
- [FEAT_PARAM_DSS_PCD] = { 1, 255 },
- [FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1},
- [FEAT_PARAM_DSI_FCK] = { 0, 173000000 },
- [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
- [FEAT_PARAM_LINEWIDTH] = { 1, 1024 },
-};
-
-static const struct dss_param_range am43xx_dss_param_range[] = {
- [FEAT_PARAM_DSS_FCK] = { 0, 200000000 },
- [FEAT_PARAM_DSS_PCD] = { 1, 255 },
- [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
- [FEAT_PARAM_LINEWIDTH] = { 1, 1024 },
-};
-
-static const struct dss_param_range omap4_dss_param_range[] = {
- [FEAT_PARAM_DSS_FCK] = { 0, 186000000 },
- [FEAT_PARAM_DSS_PCD] = { 1, 255 },
- [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
- [FEAT_PARAM_DSI_FCK] = { 0, 170000000 },
- [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
- [FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
-};
-
-static const struct dss_param_range omap5_dss_param_range[] = {
- [FEAT_PARAM_DSS_FCK] = { 0, 209250000 },
- [FEAT_PARAM_DSS_PCD] = { 1, 255 },
- [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
- [FEAT_PARAM_DSI_FCK] = { 0, 209250000 },
- [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
- [FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
-};
-
-static const enum dss_feat_id omap2_dss_feat_list[] = {
- FEAT_LCDENABLEPOL,
- FEAT_LCDENABLESIGNAL,
- FEAT_PCKFREEENABLE,
- FEAT_FUNCGATED,
- FEAT_ROWREPEATENABLE,
- FEAT_RESIZECONF,
-};
-
-static const enum dss_feat_id omap3430_dss_feat_list[] = {
- FEAT_LCDENABLEPOL,
- FEAT_LCDENABLESIGNAL,
- FEAT_PCKFREEENABLE,
- FEAT_FUNCGATED,
- FEAT_LINEBUFFERSPLIT,
- FEAT_ROWREPEATENABLE,
- FEAT_RESIZECONF,
- FEAT_DSI_REVERSE_TXCLKESC,
- FEAT_VENC_REQUIRES_TV_DAC_CLK,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FIXED_ZORDER,
- FEAT_FIFO_MERGE,
- FEAT_OMAP3_DSI_FIFO_BUG,
- FEAT_DPI_USES_VDDS_DSI,
-};
-
-static const enum dss_feat_id am35xx_dss_feat_list[] = {
- FEAT_LCDENABLEPOL,
- FEAT_LCDENABLESIGNAL,
- FEAT_PCKFREEENABLE,
- FEAT_FUNCGATED,
- FEAT_LINEBUFFERSPLIT,
- FEAT_ROWREPEATENABLE,
- FEAT_RESIZECONF,
- FEAT_DSI_REVERSE_TXCLKESC,
- FEAT_VENC_REQUIRES_TV_DAC_CLK,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FIXED_ZORDER,
- FEAT_FIFO_MERGE,
- FEAT_OMAP3_DSI_FIFO_BUG,
-};
-
-static const enum dss_feat_id am43xx_dss_feat_list[] = {
- FEAT_LCDENABLEPOL,
- FEAT_LCDENABLESIGNAL,
- FEAT_PCKFREEENABLE,
- FEAT_FUNCGATED,
- FEAT_LINEBUFFERSPLIT,
- FEAT_ROWREPEATENABLE,
- FEAT_RESIZECONF,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FIXED_ZORDER,
- FEAT_FIFO_MERGE,
-};
-
-static const enum dss_feat_id omap3630_dss_feat_list[] = {
- FEAT_LCDENABLEPOL,
- FEAT_LCDENABLESIGNAL,
- FEAT_PCKFREEENABLE,
- FEAT_FUNCGATED,
- FEAT_LINEBUFFERSPLIT,
- FEAT_ROWREPEATENABLE,
- FEAT_RESIZECONF,
- FEAT_DSI_PLL_PWR_BUG,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FIXED_ZORDER,
- FEAT_FIFO_MERGE,
- FEAT_OMAP3_DSI_FIFO_BUG,
- FEAT_DPI_USES_VDDS_DSI,
-};
-
-static const enum dss_feat_id omap4430_es1_0_dss_feat_list[] = {
- FEAT_MGR_LCD2,
- FEAT_CORE_CLK_DIV,
- FEAT_LCD_CLK_SRC,
- FEAT_DSI_DCS_CMD_CONFIG_VC,
- FEAT_DSI_VC_OCP_WIDTH,
- FEAT_DSI_GNQ,
- FEAT_HANDLE_UV_SEPARATE,
- FEAT_ATTR2,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FREE_ZORDER,
- FEAT_FIFO_MERGE,
- FEAT_BURST_2D,
-};
-
-static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = {
- FEAT_MGR_LCD2,
- FEAT_CORE_CLK_DIV,
- FEAT_LCD_CLK_SRC,
- FEAT_DSI_DCS_CMD_CONFIG_VC,
- FEAT_DSI_VC_OCP_WIDTH,
- FEAT_DSI_GNQ,
- FEAT_HDMI_CTS_SWMODE,
- FEAT_HANDLE_UV_SEPARATE,
- FEAT_ATTR2,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FREE_ZORDER,
- FEAT_FIFO_MERGE,
- FEAT_BURST_2D,
-};
-
-static const enum dss_feat_id omap4_dss_feat_list[] = {
- FEAT_MGR_LCD2,
- FEAT_CORE_CLK_DIV,
- FEAT_LCD_CLK_SRC,
- FEAT_DSI_DCS_CMD_CONFIG_VC,
- FEAT_DSI_VC_OCP_WIDTH,
- FEAT_DSI_GNQ,
- FEAT_HDMI_CTS_SWMODE,
- FEAT_HDMI_AUDIO_USE_MCLK,
- FEAT_HANDLE_UV_SEPARATE,
- FEAT_ATTR2,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FREE_ZORDER,
- FEAT_FIFO_MERGE,
- FEAT_BURST_2D,
-};
-
-static const enum dss_feat_id omap5_dss_feat_list[] = {
- FEAT_MGR_LCD2,
- FEAT_MGR_LCD3,
- FEAT_CORE_CLK_DIV,
- FEAT_LCD_CLK_SRC,
- FEAT_DSI_DCS_CMD_CONFIG_VC,
- FEAT_DSI_VC_OCP_WIDTH,
- FEAT_DSI_GNQ,
- FEAT_HDMI_CTS_SWMODE,
- FEAT_HDMI_AUDIO_USE_MCLK,
- FEAT_HANDLE_UV_SEPARATE,
- FEAT_ATTR2,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FREE_ZORDER,
- FEAT_FIFO_MERGE,
- FEAT_BURST_2D,
- FEAT_DSI_PHY_DCC,
- FEAT_MFLAG,
-};
-
-/* OMAP2 DSS Features */
-static const struct omap_dss_features omap2_dss_features = {
- .reg_fields = omap2_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap2_dss_reg_fields),
-
- .features = omap2_dss_feat_list,
- .num_features = ARRAY_SIZE(omap2_dss_feat_list),
-
- .num_mgrs = 2,
- .num_ovls = 3,
- .supported_displays = omap2_dss_supported_displays,
- .supported_outputs = omap2_dss_supported_outputs,
- .supported_color_modes = omap2_dss_supported_color_modes,
- .overlay_caps = omap2_dss_overlay_caps,
- .dss_params = omap2_dss_param_range,
- .buffer_size_unit = 1,
- .burst_size_unit = 8,
-};
-
-/* OMAP3 DSS Features */
-static const struct omap_dss_features omap3430_dss_features = {
- .reg_fields = omap3_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
-
- .features = omap3430_dss_feat_list,
- .num_features = ARRAY_SIZE(omap3430_dss_feat_list),
-
- .num_mgrs = 2,
- .num_ovls = 3,
- .supported_displays = omap3430_dss_supported_displays,
- .supported_outputs = omap3430_dss_supported_outputs,
- .supported_color_modes = omap3_dss_supported_color_modes,
- .overlay_caps = omap3430_dss_overlay_caps,
- .dss_params = omap3_dss_param_range,
- .buffer_size_unit = 1,
- .burst_size_unit = 8,
-};
-
-/*
- * AM35xx DSS Features. This is basically OMAP3 DSS Features without the
- * vdds_dsi regulator.
- */
-static const struct omap_dss_features am35xx_dss_features = {
- .reg_fields = omap3_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
-
- .features = am35xx_dss_feat_list,
- .num_features = ARRAY_SIZE(am35xx_dss_feat_list),
-
- .num_mgrs = 2,
- .num_ovls = 3,
- .supported_displays = omap3430_dss_supported_displays,
- .supported_outputs = omap3430_dss_supported_outputs,
- .supported_color_modes = omap3_dss_supported_color_modes,
- .overlay_caps = omap3430_dss_overlay_caps,
- .dss_params = omap3_dss_param_range,
- .buffer_size_unit = 1,
- .burst_size_unit = 8,
-};
-
-static const struct omap_dss_features am43xx_dss_features = {
- .reg_fields = am43xx_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(am43xx_dss_reg_fields),
-
- .features = am43xx_dss_feat_list,
- .num_features = ARRAY_SIZE(am43xx_dss_feat_list),
-
- .num_mgrs = 1,
- .num_ovls = 3,
- .supported_displays = am43xx_dss_supported_displays,
- .supported_outputs = am43xx_dss_supported_outputs,
- .supported_color_modes = omap3_dss_supported_color_modes,
- .overlay_caps = omap3430_dss_overlay_caps,
- .dss_params = am43xx_dss_param_range,
- .buffer_size_unit = 1,
- .burst_size_unit = 8,
-};
-
-static const struct omap_dss_features omap3630_dss_features = {
- .reg_fields = omap3_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields),
-
- .features = omap3630_dss_feat_list,
- .num_features = ARRAY_SIZE(omap3630_dss_feat_list),
-
- .num_mgrs = 2,
- .num_ovls = 3,
- .supported_displays = omap3630_dss_supported_displays,
- .supported_outputs = omap3630_dss_supported_outputs,
- .supported_color_modes = omap3_dss_supported_color_modes,
- .overlay_caps = omap3630_dss_overlay_caps,
- .dss_params = omap3_dss_param_range,
- .buffer_size_unit = 1,
- .burst_size_unit = 8,
-};
-
-/* OMAP4 DSS Features */
-/* For OMAP4430 ES 1.0 revision */
-static const struct omap_dss_features omap4430_es1_0_dss_features = {
- .reg_fields = omap4_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields),
-
- .features = omap4430_es1_0_dss_feat_list,
- .num_features = ARRAY_SIZE(omap4430_es1_0_dss_feat_list),
-
- .num_mgrs = 3,
- .num_ovls = 4,
- .supported_displays = omap4_dss_supported_displays,
- .supported_outputs = omap4_dss_supported_outputs,
- .supported_color_modes = omap4_dss_supported_color_modes,
- .overlay_caps = omap4_dss_overlay_caps,
- .dss_params = omap4_dss_param_range,
- .buffer_size_unit = 16,
- .burst_size_unit = 16,
-};
-
-/* For OMAP4430 ES 2.0, 2.1 and 2.2 revisions */
-static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
- .reg_fields = omap4_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields),
-
- .features = omap4430_es2_0_1_2_dss_feat_list,
- .num_features = ARRAY_SIZE(omap4430_es2_0_1_2_dss_feat_list),
-
- .num_mgrs = 3,
- .num_ovls = 4,
- .supported_displays = omap4_dss_supported_displays,
- .supported_outputs = omap4_dss_supported_outputs,
- .supported_color_modes = omap4_dss_supported_color_modes,
- .overlay_caps = omap4_dss_overlay_caps,
- .dss_params = omap4_dss_param_range,
- .buffer_size_unit = 16,
- .burst_size_unit = 16,
-};
-
-/* For all the other OMAP4 versions */
-static const struct omap_dss_features omap4_dss_features = {
- .reg_fields = omap4_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields),
-
- .features = omap4_dss_feat_list,
- .num_features = ARRAY_SIZE(omap4_dss_feat_list),
-
- .num_mgrs = 3,
- .num_ovls = 4,
- .supported_displays = omap4_dss_supported_displays,
- .supported_outputs = omap4_dss_supported_outputs,
- .supported_color_modes = omap4_dss_supported_color_modes,
- .overlay_caps = omap4_dss_overlay_caps,
- .dss_params = omap4_dss_param_range,
- .buffer_size_unit = 16,
- .burst_size_unit = 16,
-};
-
-/* OMAP5 DSS Features */
-static const struct omap_dss_features omap5_dss_features = {
- .reg_fields = omap5_dss_reg_fields,
- .num_reg_fields = ARRAY_SIZE(omap5_dss_reg_fields),
-
- .features = omap5_dss_feat_list,
- .num_features = ARRAY_SIZE(omap5_dss_feat_list),
-
- .num_mgrs = 4,
- .num_ovls = 4,
- .supported_displays = omap5_dss_supported_displays,
- .supported_outputs = omap5_dss_supported_outputs,
- .supported_color_modes = omap4_dss_supported_color_modes,
- .overlay_caps = omap4_dss_overlay_caps,
- .dss_params = omap5_dss_param_range,
- .buffer_size_unit = 16,
- .burst_size_unit = 16,
-};
-
-/* Functions returning values related to a DSS feature */
-int dss_feat_get_num_mgrs(void)
-{
- return omap_current_dss_features->num_mgrs;
-}
-
-int dss_feat_get_num_ovls(void)
-{
- return omap_current_dss_features->num_ovls;
-}
-
-unsigned long dss_feat_get_param_min(enum dss_range_param param)
-{
- return omap_current_dss_features->dss_params[param].min;
-}
-
-unsigned long dss_feat_get_param_max(enum dss_range_param param)
-{
- return omap_current_dss_features->dss_params[param].max;
-}
-
-enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel)
-{
- return omap_current_dss_features->supported_displays[channel];
-}
-
-enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel)
-{
- return omap_current_dss_features->supported_outputs[channel];
-}
-
-const u32 *dss_feat_get_supported_color_modes(enum omap_plane_id plane)
-{
- return omap_current_dss_features->supported_color_modes[plane];
-}
-
-enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane_id plane)
-{
- return omap_current_dss_features->overlay_caps[plane];
-}
-
-bool dss_feat_color_mode_supported(enum omap_plane_id plane, u32 fourcc)
-{
- const u32 *modes;
- unsigned int i;
-
- modes = omap_current_dss_features->supported_color_modes[plane];
-
- for (i = 0; modes[i]; ++i) {
- if (modes[i] == fourcc)
- return true;
- }
-
- return false;
-}
-
-u32 dss_feat_get_buffer_size_unit(void)
-{
- return omap_current_dss_features->buffer_size_unit;
-}
-
-u32 dss_feat_get_burst_size_unit(void)
-{
- return omap_current_dss_features->burst_size_unit;
-}
-
-/* DSS has_feature check */
-bool dss_has_feature(enum dss_feat_id id)
-{
- int i;
- const enum dss_feat_id *features = omap_current_dss_features->features;
- const int num_features = omap_current_dss_features->num_features;
-
- for (i = 0; i < num_features; i++) {
- if (features[i] == id)
- return true;
- }
-
- return false;
-}
-
-void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end)
-{
- if (id >= omap_current_dss_features->num_reg_fields)
- BUG();
-
- *start = omap_current_dss_features->reg_fields[id].start;
- *end = omap_current_dss_features->reg_fields[id].end;
-}
-
-void dss_features_init(enum omapdss_version version)
-{
- switch (version) {
- case OMAPDSS_VER_OMAP24xx:
- omap_current_dss_features = &omap2_dss_features;
- break;
-
- case OMAPDSS_VER_OMAP34xx_ES1:
- case OMAPDSS_VER_OMAP34xx_ES3:
- omap_current_dss_features = &omap3430_dss_features;
- break;
-
- case OMAPDSS_VER_OMAP3630:
- omap_current_dss_features = &omap3630_dss_features;
- break;
-
- case OMAPDSS_VER_OMAP4430_ES1:
- omap_current_dss_features = &omap4430_es1_0_dss_features;
- break;
-
- case OMAPDSS_VER_OMAP4430_ES2:
- omap_current_dss_features = &omap4430_es2_0_1_2_dss_features;
- break;
-
- case OMAPDSS_VER_OMAP4:
- omap_current_dss_features = &omap4_dss_features;
- break;
-
- case OMAPDSS_VER_OMAP5:
- case OMAPDSS_VER_DRA7xx:
- omap_current_dss_features = &omap5_dss_features;
- break;
-
- case OMAPDSS_VER_AM35xx:
- omap_current_dss_features = &am35xx_dss_features;
- break;
-
- case OMAPDSS_VER_AM43xx:
- omap_current_dss_features = &am43xx_dss_features;
- break;
-
- default:
- DSSWARN("Unsupported OMAP version");
- break;
- }
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.h b/drivers/gpu/drm/omapdrm/dss/dss_features.h
deleted file mode 100644
index c36436d27ff5..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * linux/drivers/video/omap2/dss/dss_features.h
- *
- * Copyright (C) 2010 Texas Instruments
- * Author: Archit Taneja <archit@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __OMAP2_DSS_FEATURES_H
-#define __OMAP2_DSS_FEATURES_H
-
-#define MAX_DSS_MANAGERS 4
-#define MAX_DSS_OVERLAYS 4
-#define MAX_DSS_LCD_MANAGERS 3
-#define MAX_NUM_DSI 2
-
-/* DSS has feature id */
-enum dss_feat_id {
- FEAT_LCDENABLEPOL,
- FEAT_LCDENABLESIGNAL,
- FEAT_PCKFREEENABLE,
- FEAT_FUNCGATED,
- FEAT_MGR_LCD2,
- FEAT_MGR_LCD3,
- FEAT_LINEBUFFERSPLIT,
- FEAT_ROWREPEATENABLE,
- FEAT_RESIZECONF,
- /* Independent core clk divider */
- FEAT_CORE_CLK_DIV,
- FEAT_LCD_CLK_SRC,
- /* DSI-PLL power command 0x3 is not working */
- FEAT_DSI_PLL_PWR_BUG,
- FEAT_DSI_DCS_CMD_CONFIG_VC,
- FEAT_DSI_VC_OCP_WIDTH,
- FEAT_DSI_REVERSE_TXCLKESC,
- FEAT_DSI_GNQ,
- FEAT_DPI_USES_VDDS_DSI,
- FEAT_HDMI_CTS_SWMODE,
- FEAT_HDMI_AUDIO_USE_MCLK,
- FEAT_HANDLE_UV_SEPARATE,
- FEAT_ATTR2,
- FEAT_VENC_REQUIRES_TV_DAC_CLK,
- FEAT_CPR,
- FEAT_PRELOAD,
- FEAT_FIR_COEF_V,
- FEAT_ALPHA_FIXED_ZORDER,
- FEAT_ALPHA_FREE_ZORDER,
- FEAT_FIFO_MERGE,
- /* An unknown HW bug causing the normal FIFO thresholds not to work */
- FEAT_OMAP3_DSI_FIFO_BUG,
- FEAT_BURST_2D,
- FEAT_DSI_PHY_DCC,
- FEAT_MFLAG,
-};
-
-/* DSS register field id */
-enum dss_feat_reg_field {
- FEAT_REG_FIRHINC,
- FEAT_REG_FIRVINC,
- FEAT_REG_FIFOHIGHTHRESHOLD,
- FEAT_REG_FIFOLOWTHRESHOLD,
- FEAT_REG_FIFOSIZE,
- FEAT_REG_HORIZONTALACCU,
- FEAT_REG_VERTICALACCU,
- FEAT_REG_DISPC_CLK_SWITCH,
-};
-
-enum dss_range_param {
- FEAT_PARAM_DSS_FCK,
- FEAT_PARAM_DSS_PCD,
- FEAT_PARAM_DSIPLL_LPDIV,
- FEAT_PARAM_DSI_FCK,
- FEAT_PARAM_DOWNSCALE,
- FEAT_PARAM_LINEWIDTH,
-};
-
-/* DSS Feature Functions */
-unsigned long dss_feat_get_param_min(enum dss_range_param param);
-unsigned long dss_feat_get_param_max(enum dss_range_param param);
-enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane_id plane);
-bool dss_feat_color_mode_supported(enum omap_plane_id plane,
- u32 fourcc);
-
-u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
-u32 dss_feat_get_burst_size_unit(void); /* in bytes */
-
-bool dss_has_feature(enum dss_feat_id id);
-void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
-void dss_features_init(enum omapdss_version version);
-
-enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel);
-enum omap_dss_output_id dss_feat_get_supported_outputs(enum omap_channel channel);
-
-int dss_feat_get_num_mgrs(void);
-int dss_feat_get_num_ovls(void);
-const u32 *dss_feat_get_supported_color_modes(enum omap_plane_id plane);
-
-#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index fb6cccd02374..a820b394af09 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -234,6 +234,7 @@ struct hdmi_core_audio_config {
struct hdmi_wp_data {
void __iomem *base;
phys_addr_t phys_base;
+ unsigned int version;
};
struct hdmi_pll_data {
@@ -245,15 +246,24 @@ struct hdmi_pll_data {
struct hdmi_wp_data *wp;
};
+struct hdmi_phy_features {
+ bool bist_ctrl;
+ bool ldo_voltage;
+ unsigned long max_phy;
+};
+
struct hdmi_phy_data {
void __iomem *base;
+ const struct hdmi_phy_features *features;
u8 lane_function[4];
u8 lane_polarity[4];
};
struct hdmi_core_data {
void __iomem *base;
+ bool cts_swmode;
+ bool audio_use_mclk;
};
static inline void hdmi_write_reg(void __iomem *base_addr, const u32 idx,
@@ -303,7 +313,8 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
struct videomode *vm);
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
struct videomode *vm, struct hdmi_config *param);
-int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp);
+int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp,
+ unsigned int version);
phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
/* HDMI PLL funcs */
@@ -316,7 +327,8 @@ void hdmi_pll_uninit(struct hdmi_pll_data *hpll);
int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
unsigned long lfbitclk);
void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s);
-int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy);
+int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy,
+ unsigned int version);
int hdmi_phy_parse_lanes(struct hdmi_phy_data *phy, const u32 *lanes);
/* HDMI common funcs */
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 284b4942b9ac..f169348da377 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -40,7 +40,6 @@
#include "omapdss.h"
#include "hdmi4_core.h"
#include "dss.h"
-#include "dss_features.h"
#include "hdmi.h"
static struct omap_hdmi hdmi;
@@ -668,7 +667,7 @@ static int hdmi_audio_register(struct device *dev)
{
struct omap_hdmi_audio_pdata pdata = {
.dev = dev,
- .dss_version = omapdss_get_version(),
+ .version = 4,
.audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp),
.ops = &hdmi_audio_ops,
};
@@ -700,7 +699,7 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
if (r)
return r;
- r = hdmi_wp_init(pdev, &hdmi.wp);
+ r = hdmi_wp_init(pdev, &hdmi.wp, 4);
if (r)
return r;
@@ -708,7 +707,7 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
if (r)
return r;
- r = hdmi_phy_init(pdev, &hdmi.phy);
+ r = hdmi_phy_init(pdev, &hdmi.phy, 4);
if (r)
goto err;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index ed6001613405..365cf07daa01 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -31,11 +31,11 @@
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/seq_file.h>
+#include <linux/sys_soc.h>
#include <sound/asound.h>
#include <sound/asoundef.h>
#include "hdmi4_core.h"
-#include "dss_features.h"
#define HDMI_CORE_AV 0x500
@@ -757,10 +757,10 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
/* Audio clock regeneration settings */
acore.n = n;
acore.cts = cts;
- if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
+ if (core->cts_swmode) {
acore.aud_par_busclk = 0;
acore.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
- acore.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
+ acore.use_mclk = core->audio_use_mclk;
} else {
acore.aud_par_busclk = (((128 * 31) - 1) << 8);
acore.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
@@ -884,10 +884,42 @@ void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp)
hdmi_wp_audio_core_req_enable(wp, false);
}
+struct hdmi4_features {
+ bool cts_swmode;
+ bool audio_use_mclk;
+};
+
+static const struct hdmi4_features hdmi4_es1_features = {
+ .cts_swmode = false,
+ .audio_use_mclk = false,
+};
+
+static const struct hdmi4_features hdmi4_es2_features = {
+ .cts_swmode = true,
+ .audio_use_mclk = false,
+};
+
+static const struct hdmi4_features hdmi4_es3_features = {
+ .cts_swmode = true,
+ .audio_use_mclk = true,
+};
+
+static const struct soc_device_attribute hdmi4_soc_devices[] = {
+ { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features },
+ { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features },
+ { .family = "OMAP4", .data = &hdmi4_es3_features },
+ { /* sentinel */ }
+};
+
int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
{
+ const struct hdmi4_features *features;
struct resource *res;
+ features = soc_device_match(hdmi4_soc_devices)->data;
+ core->cts_swmode = features->cts_swmode;
+ core->audio_use_mclk = features->audio_use_mclk;
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
core->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(core->base))
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 441e1999d86a..b3221ca5bcd8 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -45,7 +45,6 @@
#include "omapdss.h"
#include "hdmi5_core.h"
#include "dss.h"
-#include "dss_features.h"
static struct omap_hdmi hdmi;
@@ -695,7 +694,7 @@ static int hdmi_audio_register(struct device *dev)
{
struct omap_hdmi_audio_pdata pdata = {
.dev = dev,
- .dss_version = omapdss_get_version(),
+ .version = 5,
.audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp),
.ops = &hdmi_audio_ops,
};
@@ -732,7 +731,7 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data)
if (r)
return r;
- r = hdmi_wp_init(pdev, &hdmi.wp);
+ r = hdmi_wp_init(pdev, &hdmi.wp, 5);
if (r)
return r;
@@ -740,7 +739,7 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data)
if (r)
return r;
- r = hdmi_phy_init(pdev, &hdmi.phy);
+ r = hdmi_phy_init(pdev, &hdmi.phy, 5);
if (r)
goto err;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
index fb5e4c724b4b..a156292b1820 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
@@ -19,14 +19,6 @@
#include "dss.h"
#include "hdmi.h"
-struct hdmi_phy_features {
- bool bist_ctrl;
- bool ldo_voltage;
- unsigned long max_phy;
-};
-
-static const struct hdmi_phy_features *phy_feat;
-
void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s)
{
#define DUMPPHY(r) seq_printf(s, "%-35s %08x\n", #r,\
@@ -36,7 +28,7 @@ void hdmi_phy_dump(struct hdmi_phy_data *phy, struct seq_file *s)
DUMPPHY(HDMI_TXPHY_DIGITAL_CTRL);
DUMPPHY(HDMI_TXPHY_POWER_CTRL);
DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
- if (phy_feat->bist_ctrl)
+ if (phy->features->bist_ctrl)
DUMPPHY(HDMI_TXPHY_BIST_CONTROL);
}
@@ -146,7 +138,7 @@ int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
* In OMAP5+, the HFBITCLK must be divided by 2 before issuing the
* HDMI_PHYPWRCMD_LDOON command.
*/
- if (phy_feat->bist_ctrl)
+ if (phy->features->bist_ctrl)
REG_FLD_MOD(phy->base, HDMI_TXPHY_BIST_CONTROL, 1, 11, 11);
/*
@@ -155,7 +147,7 @@ int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
*/
if (hfbitclk != lfbitclk)
freqout = 0;
- else if (hfbitclk / 10 < phy_feat->max_phy)
+ else if (hfbitclk / 10 < phy->features->max_phy)
freqout = 1;
else
freqout = 2;
@@ -170,7 +162,7 @@ int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk,
hdmi_write_reg(phy->base, HDMI_TXPHY_DIGITAL_CTRL, 0xF0000000);
/* Setup max LDO voltage */
- if (phy_feat->ldo_voltage)
+ if (phy->features->ldo_voltage)
REG_FLD_MOD(phy->base, HDMI_TXPHY_POWER_CTRL, 0xB, 3, 0);
hdmi_phy_configure_lanes(phy);
@@ -190,47 +182,15 @@ static const struct hdmi_phy_features omap54xx_phy_feats = {
.max_phy = 186000000,
};
-static int hdmi_phy_init_features(struct platform_device *pdev)
-{
- struct hdmi_phy_features *dst;
- const struct hdmi_phy_features *src;
-
- dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL);
- if (!dst) {
- dev_err(&pdev->dev, "Failed to allocate HDMI PHY Features\n");
- return -ENOMEM;
- }
-
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
- src = &omap44xx_phy_feats;
- break;
-
- case OMAPDSS_VER_OMAP5:
- case OMAPDSS_VER_DRA7xx:
- src = &omap54xx_phy_feats;
- break;
-
- default:
- return -ENODEV;
- }
-
- memcpy(dst, src, sizeof(*dst));
- phy_feat = dst;
-
- return 0;
-}
-
-int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy)
+int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy,
+ unsigned int version)
{
- int r;
struct resource *res;
- r = hdmi_phy_init_features(pdev);
- if (r)
- return r;
+ if (version == 4)
+ phy->features = &omap44xx_phy_feats;
+ else
+ phy->features = &omap54xx_phy_feats;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
phy->base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
index 46239358655a..55bee81f4dd5 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
@@ -71,7 +71,7 @@ static void hdmi_pll_disable(struct dss_pll *dsspll)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static const struct dss_pll_ops dsi_pll_ops = {
+static const struct dss_pll_ops hdmi_pll_ops = {
.enable = hdmi_pll_enable,
.disable = hdmi_pll_disable,
.set_config = dss_pll_write_config_type_b,
@@ -128,7 +128,8 @@ static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
.has_refsel = true,
};
-static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data *hpll)
+static int hdmi_init_pll_data(struct platform_device *pdev,
+ struct hdmi_pll_data *hpll)
{
struct dss_pll *pll = &hpll->pll;
struct clk *clk;
@@ -145,23 +146,12 @@ static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data
pll->base = hpll->base;
pll->clkin = clk;
- switch (omapdss_get_version()) {
- case OMAPDSS_VER_OMAP4430_ES1:
- case OMAPDSS_VER_OMAP4430_ES2:
- case OMAPDSS_VER_OMAP4:
+ if (hpll->wp->version == 4)
pll->hw = &dss_omap4_hdmi_pll_hw;
- break;
-
- case OMAPDSS_VER_OMAP5:
- case OMAPDSS_VER_DRA7xx:
+ else
pll->hw = &dss_omap5_hdmi_pll_hw;
- break;
-
- default:
- return -ENODEV;
- }
- pll->ops = &dsi_pll_ops;
+ pll->ops = &hdmi_pll_ops;
r = dss_pll_register(pll);
if (r)
@@ -184,7 +174,7 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
if (IS_ERR(pll->base))
return PTR_ERR(pll->base);
- r = dsi_init_pll_data(pdev, pll);
+ r = hdmi_init_pll_data(pdev, pll);
if (r) {
DSSERR("failed to init HDMI PLL\n");
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index ab129df2e310..88034fbe0e9f 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -178,9 +178,7 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
* However, we don't support OMAP5 ES1 at all, so we can just check for
* OMAP4 here.
*/
- if (omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES1 ||
- omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES2 ||
- omapdss_get_version() == OMAPDSS_VER_OMAP4)
+ if (wp->version == 4)
hsync_len_offset = 0;
timing_h |= FLD_VAL(vm->hback_porch, 31, 20);
@@ -235,9 +233,7 @@ void hdmi_wp_audio_config_format(struct hdmi_wp_data *wp,
DSSDBG("Enter hdmi_wp_audio_config_format\n");
r = hdmi_read_reg(wp->base, HDMI_WP_AUDIO_CFG);
- if (omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES1 ||
- omapdss_get_version() == OMAPDSS_VER_OMAP4430_ES2 ||
- omapdss_get_version() == OMAPDSS_VER_OMAP4) {
+ if (wp->version == 4) {
r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24);
r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16);
}
@@ -282,7 +278,8 @@ int hdmi_wp_audio_core_req_enable(struct hdmi_wp_data *wp, bool enable)
return 0;
}
-int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
+int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp,
+ unsigned int version)
{
struct resource *res;
@@ -292,6 +289,7 @@ int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp)
return PTR_ERR(wp->base);
wp->phys_base = res->start;
+ wp->version = version;
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 85953a0bc7c2..47a331670963 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -25,6 +25,7 @@
#include <video/videomode.h>
#include <linux/platform_data/omapdss.h>
#include <uapi/drm/drm_mode.h>
+#include <drm/drm_crtc.h>
#define DISPC_IRQ_FRAMEDONE (1 << 0)
#define DISPC_IRQ_VSYNC (1 << 1)
@@ -241,13 +242,6 @@ struct omap_dss_dsi_config {
enum omap_dss_dsi_trans_mode trans_mode;
};
-/* Hardcoded videomodes for tv. Venc only uses these to
- * identify the mode, and does not actually use the configs
- * itself. However, the configs should be something that
- * a normal monitor can also show */
-extern const struct videomode omap_dss_pal_vm;
-extern const struct videomode omap_dss_ntsc_vm;
-
struct omap_dss_cpr_coefs {
s16 rr, rg, rb;
s16 gr, gg, gb;
@@ -403,6 +397,14 @@ struct omapdss_hdmi_ops {
int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
bool (*detect)(struct omap_dss_device *dssdev);
+ int (*register_hpd_cb)(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
+ enum drm_connector_status status),
+ void *cb_data);
+ void (*unregister_hpd_cb)(struct omap_dss_device *dssdev);
+ void (*enable_hpd)(struct omap_dss_device *dssdev);
+ void (*disable_hpd)(struct omap_dss_device *dssdev);
+
int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
int (*set_infoframe)(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi);
@@ -567,12 +569,19 @@ struct omap_dss_driver {
int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
bool (*detect)(struct omap_dss_device *dssdev);
+ int (*register_hpd_cb)(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
+ enum drm_connector_status status),
+ void *cb_data);
+ void (*unregister_hpd_cb)(struct omap_dss_device *dssdev);
+ void (*enable_hpd)(struct omap_dss_device *dssdev);
+ void (*disable_hpd)(struct omap_dss_device *dssdev);
+
int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi);
};
-enum omapdss_version omapdss_get_version(void);
bool omapdss_is_initialized(void);
int omap_dss_register_driver(struct omap_dss_driver *);
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index 5e221302768b..9d9d9d42009b 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -215,8 +215,8 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
dss_pll_calc_func func, void *data)
{
const struct dss_pll_hw *hw = pll->hw;
- int n, n_min, n_max;
- int m, m_min, m_max;
+ int n, n_start, n_stop, n_inc;
+ int m, m_start, m_stop, m_inc;
unsigned long fint, clkdco;
unsigned long pll_hw_max;
unsigned long fint_hw_min, fint_hw_max;
@@ -226,22 +226,33 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
fint_hw_min = hw->fint_min;
fint_hw_max = hw->fint_max;
- n_min = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul);
- n_max = min((unsigned)(clkin / fint_hw_min), hw->n_max);
+ n_start = max(DIV_ROUND_UP(clkin, fint_hw_max), 1ul);
+ n_stop = min((unsigned)(clkin / fint_hw_min), hw->n_max);
+ n_inc = 1;
+
+ if (hw->errata_i886) {
+ swap(n_start, n_stop);
+ n_inc = -1;
+ }
pll_max = pll_max ? pll_max : ULONG_MAX;
- /* Try to find high N & M to avoid jitter (DRA7 errata i886) */
- for (n = n_max; n >= n_min; --n) {
+ for (n = n_start; n != n_stop; n += n_inc) {
fint = clkin / n;
- m_min = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2),
+ m_start = max(DIV_ROUND_UP(DIV_ROUND_UP(pll_min, fint), 2),
1ul);
- m_max = min3((unsigned)(pll_max / fint / 2),
+ m_stop = min3((unsigned)(pll_max / fint / 2),
(unsigned)(pll_hw_max / fint / 2),
hw->m_max);
+ m_inc = 1;
+
+ if (hw->errata_i886) {
+ swap(m_start, m_stop);
+ m_inc = -1;
+ }
- for (m = m_max; m >= m_min; --m) {
+ for (m = m_start; m != m_stop; m += m_inc) {
clkdco = 2 * m * fint;
if (func(n, m, fint, clkdco, data))
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index a6bfb3918b8d..d58da6f32693 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -37,10 +37,10 @@
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/component.h>
+#include <linux/sys_soc.h>
#include "omapdss.h"
#include "dss.h"
-#include "dss_features.h"
/* Venc registers */
#define VENC_REV_ID 0x00
@@ -263,7 +263,13 @@ static const struct venc_config venc_config_pal_bdghi = {
.fid_ext_start_y__fid_ext_offset_y = 0x01380005,
};
-const struct videomode omap_dss_pal_vm = {
+enum venc_videomode {
+ VENC_MODE_UNKNOWN,
+ VENC_MODE_PAL,
+ VENC_MODE_NTSC,
+};
+
+static const struct videomode omap_dss_pal_vm = {
.hactive = 720,
.vactive = 574,
.pixelclock = 13500000,
@@ -279,9 +285,8 @@ const struct videomode omap_dss_pal_vm = {
DISPLAY_FLAGS_PIXDATA_POSEDGE |
DISPLAY_FLAGS_SYNC_NEGEDGE,
};
-EXPORT_SYMBOL(omap_dss_pal_vm);
-const struct videomode omap_dss_ntsc_vm = {
+static const struct videomode omap_dss_ntsc_vm = {
.hactive = 720,
.vactive = 482,
.pixelclock = 13500000,
@@ -297,7 +302,24 @@ const struct videomode omap_dss_ntsc_vm = {
DISPLAY_FLAGS_PIXDATA_POSEDGE |
DISPLAY_FLAGS_SYNC_NEGEDGE,
};
-EXPORT_SYMBOL(omap_dss_ntsc_vm);
+
+static enum venc_videomode venc_get_videomode(const struct videomode *vm)
+{
+ if (!(vm->flags & DISPLAY_FLAGS_INTERLACED))
+ return VENC_MODE_UNKNOWN;
+
+ if (vm->pixelclock == omap_dss_pal_vm.pixelclock &&
+ vm->hactive == omap_dss_pal_vm.hactive &&
+ vm->vactive == omap_dss_pal_vm.vactive)
+ return VENC_MODE_PAL;
+
+ if (vm->pixelclock == omap_dss_ntsc_vm.pixelclock &&
+ vm->hactive == omap_dss_ntsc_vm.hactive &&
+ vm->vactive == omap_dss_ntsc_vm.vactive)
+ return VENC_MODE_NTSC;
+
+ return VENC_MODE_UNKNOWN;
+}
static struct {
struct platform_device *pdev;
@@ -311,6 +333,7 @@ static struct {
struct videomode vm;
enum omap_dss_venc_type type;
bool invert_polarity;
+ bool requires_tv_dac_clk;
struct omap_dss_device output;
} venc;
@@ -424,14 +447,14 @@ static void venc_runtime_put(void)
static const struct venc_config *venc_timings_to_config(struct videomode *vm)
{
- if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
+ switch (venc_get_videomode(vm)) {
+ default:
+ WARN_ON_ONCE(1);
+ case VENC_MODE_PAL:
return &venc_config_pal_trm;
-
- if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
+ case VENC_MODE_NTSC:
return &venc_config_ntsc_trm;
-
- BUG();
- return NULL;
+ }
}
static int venc_power_on(struct omap_dss_device *dssdev)
@@ -542,15 +565,28 @@ static void venc_display_disable(struct omap_dss_device *dssdev)
static void venc_set_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
+ struct videomode actual_vm;
+
DSSDBG("venc_set_timings\n");
mutex_lock(&venc.venc_lock);
+ switch (venc_get_videomode(vm)) {
+ default:
+ WARN_ON_ONCE(1);
+ case VENC_MODE_PAL:
+ actual_vm = omap_dss_pal_vm;
+ break;
+ case VENC_MODE_NTSC:
+ actual_vm = omap_dss_ntsc_vm;
+ break;
+ }
+
/* Reset WSS data when the TV standard changes. */
- if (memcmp(&venc.vm, vm, sizeof(*vm)))
+ if (memcmp(&venc.vm, &actual_vm, sizeof(actual_vm)))
venc.wss_data = 0;
- venc.vm = *vm;
+ venc.vm = actual_vm;
dispc_set_tv_pclk(13500000);
@@ -562,13 +598,13 @@ static int venc_check_timings(struct omap_dss_device *dssdev,
{
DSSDBG("venc_check_timings\n");
- if (memcmp(&omap_dss_pal_vm, vm, sizeof(*vm)) == 0)
- return 0;
-
- if (memcmp(&omap_dss_ntsc_vm, vm, sizeof(*vm)) == 0)
+ switch (venc_get_videomode(vm)) {
+ case VENC_MODE_PAL:
+ case VENC_MODE_NTSC:
return 0;
-
- return -EINVAL;
+ default:
+ return -EINVAL;
+ }
}
static void venc_get_timings(struct omap_dss_device *dssdev,
@@ -693,7 +729,7 @@ static int venc_get_clocks(struct platform_device *pdev)
{
struct clk *clk;
- if (dss_has_feature(FEAT_VENC_REQUIRES_TV_DAC_CLK)) {
+ if (venc.requires_tv_dac_clk) {
clk = devm_clk_get(&pdev->dev, "tv_dac_clk");
if (IS_ERR(clk)) {
DSSERR("can't get tv_dac_clk\n");
@@ -828,6 +864,12 @@ err:
}
/* VENC HW IP initialisation */
+static const struct soc_device_attribute venc_soc_devices[] = {
+ { .machine = "OMAP3[45]*" },
+ { .machine = "AM35*" },
+ { /* sentinel */ }
+};
+
static int venc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -837,6 +879,10 @@ static int venc_bind(struct device *dev, struct device *master, void *data)
venc.pdev = pdev;
+ /* The OMAP34xx, OMAP35xx and AM35xx VENC require the TV DAC clock. */
+ if (soc_device_match(venc_soc_devices))
+ venc.requires_tv_dac_clk = true;
+
mutex_init(&venc.venc_lock);
venc.wss_data = 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index fbd1263a29a4..38a239cc5e04 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -19,7 +19,6 @@
#include "omapdss.h"
#include "dss.h"
-#include "dss_features.h"
struct dss_video_pll {
struct dss_pll pll;
@@ -131,6 +130,8 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = {
.mX_lsb[3] = 5,
.has_refsel = true,
+
+ .errata_i886 = true,
};
struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index c24b6b783e9a..aa5ba9ae2191 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -35,6 +35,23 @@ struct omap_connector {
bool hdmi_mode;
};
+static void omap_connector_hpd_cb(void *cb_data,
+ enum drm_connector_status status)
+{
+ struct omap_connector *omap_connector = cb_data;
+ struct drm_connector *connector = &omap_connector->base;
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status old_status;
+
+ mutex_lock(&dev->mode_config.mutex);
+ old_status = connector->status;
+ connector->status = status;
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (old_status != status)
+ drm_kms_helper_hotplug_event(dev);
+}
+
bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
@@ -75,6 +92,10 @@ static void omap_connector_destroy(struct drm_connector *connector)
struct omap_dss_device *dssdev = omap_connector->dssdev;
DBG("%s", omap_connector->dssdev->name);
+ if (connector->polled == DRM_CONNECTOR_POLL_HPD &&
+ dssdev->driver->unregister_hpd_cb) {
+ dssdev->driver->unregister_hpd_cb(dssdev);
+ }
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(omap_connector);
@@ -195,7 +216,6 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
}
static const struct drm_connector_funcs omap_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.detect = omap_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -216,6 +236,7 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
{
struct drm_connector *connector = NULL;
struct omap_connector *omap_connector;
+ bool hpd_supported = false;
DBG("%s", dssdev->name);
@@ -233,7 +254,20 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
connector_type);
drm_connector_helper_add(connector, &omap_connector_helper_funcs);
- if (dssdev->driver->detect)
+ if (dssdev->driver->register_hpd_cb) {
+ int ret = dssdev->driver->register_hpd_cb(dssdev,
+ omap_connector_hpd_cb,
+ omap_connector);
+ if (!ret)
+ hpd_supported = true;
+ else if (ret != -ENOTSUPP)
+ DBG("%s: Failed to register HPD callback (%d).",
+ dssdev->name, ret);
+ }
+
+ if (hpd_supported)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else if (dssdev->driver->detect)
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
else
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index dd0ef40ca469..cc85c16cbc2a 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -26,6 +26,16 @@
#include "omap_drv.h"
+#define to_omap_crtc_state(x) container_of(x, struct omap_crtc_state, base)
+
+struct omap_crtc_state {
+ /* Must be first. */
+ struct drm_crtc_state base;
+ /* Shadow values for legacy userspace support. */
+ unsigned int rotation;
+ unsigned int zpos;
+};
+
#define to_omap_crtc(x) container_of(x, struct omap_crtc, base)
struct omap_crtc {
@@ -356,7 +366,8 @@ static void omap_crtc_arm_event(struct drm_crtc *crtc)
}
}
-static void omap_crtc_enable(struct drm_crtc *crtc)
+static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
int ret;
@@ -372,7 +383,8 @@ static void omap_crtc_enable(struct drm_crtc *crtc)
spin_unlock_irq(&crtc->dev->event_lock);
}
-static void omap_crtc_disable(struct drm_crtc *crtc)
+static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -443,6 +455,8 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
+ struct drm_plane_state *pri_state;
+
if (state->color_mgmt_changed && state->gamma_lut) {
uint length = state->gamma_lut->length /
sizeof(struct drm_color_lut);
@@ -451,6 +465,16 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ pri_state = drm_atomic_get_new_plane_state(state->state, crtc->primary);
+ if (pri_state) {
+ struct omap_crtc_state *omap_crtc_state =
+ to_omap_crtc_state(state);
+
+ /* Mirror new values for zpos and rotation in omap_crtc_state */
+ omap_crtc_state->zpos = pri_state->zpos;
+ omap_crtc_state->rotation = pri_state->rotation;
+ }
+
return 0;
}
@@ -496,39 +520,32 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
spin_unlock_irq(&crtc->dev->event_lock);
}
-static bool omap_crtc_is_plane_prop(struct drm_crtc *crtc,
- struct drm_property *property)
-{
- struct drm_device *dev = crtc->dev;
- struct omap_drm_private *priv = dev->dev_private;
-
- return property == priv->zorder_prop ||
- property == crtc->primary->rotation_property;
-}
-
static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *state,
struct drm_property *property,
uint64_t val)
{
- if (omap_crtc_is_plane_prop(crtc, property)) {
- struct drm_plane_state *plane_state;
- struct drm_plane *plane = crtc->primary;
-
- /*
- * Delegate property set to the primary plane. Get the plane
- * state and set the property directly.
- */
-
- plane_state = drm_atomic_get_plane_state(state->state, plane);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ struct drm_plane_state *plane_state;
- return drm_atomic_plane_set_property(plane, plane_state,
- property, val);
- }
+ /*
+ * Delegate property set to the primary plane. Get the plane state and
+ * set the property directly, the shadow copy will be assigned in the
+ * omap_crtc_atomic_check callback. This way updates to plane state will
+ * always be mirrored in the crtc state correctly.
+ */
+ plane_state = drm_atomic_get_plane_state(state->state, crtc->primary);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ if (property == crtc->primary->rotation_property)
+ plane_state->rotation = val;
+ else if (property == priv->zorder_prop)
+ plane_state->zpos = val;
+ else
+ return -EINVAL;
- return -EINVAL;
+ return 0;
}
static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
@@ -536,28 +553,60 @@ static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t *val)
{
- if (omap_crtc_is_plane_prop(crtc, property)) {
- /*
- * Delegate property get to the primary plane. The
- * drm_atomic_plane_get_property() function isn't exported, but
- * can be called through drm_object_property_get_value() as that
- * will call drm_atomic_get_property() for atomic drivers.
- */
- return drm_object_property_get_value(&crtc->primary->base,
- property, val);
- }
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ struct omap_crtc_state *omap_state = to_omap_crtc_state(state);
+
+ if (property == crtc->primary->rotation_property)
+ *val = omap_state->rotation;
+ else if (property == priv->zorder_prop)
+ *val = omap_state->zpos;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static void omap_crtc_reset(struct drm_crtc *crtc)
+{
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+ kfree(crtc->state);
+ crtc->state = kzalloc(sizeof(struct omap_crtc_state), GFP_KERNEL);
+
+ if (crtc->state)
+ crtc->state->crtc = crtc;
+}
+
+static struct drm_crtc_state *
+omap_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct omap_crtc_state *state, *current_state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ current_state = to_omap_crtc_state(crtc->state);
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ state->zpos = current_state->zpos;
+ state->rotation = current_state->rotation;
- return -EINVAL;
+ return &state->base;
}
static const struct drm_crtc_funcs omap_crtc_funcs = {
- .reset = drm_atomic_helper_crtc_reset,
+ .reset = omap_crtc_reset,
.set_config = drm_atomic_helper_set_config,
.destroy = omap_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.gamma_set = drm_atomic_helper_legacy_gamma_set,
- .set_property = drm_atomic_helper_crtc_set_property,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_duplicate_state = omap_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.atomic_set_property = omap_crtc_atomic_set_property,
.atomic_get_property = omap_crtc_atomic_get_property,
@@ -567,11 +616,11 @@ static const struct drm_crtc_funcs omap_crtc_funcs = {
static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
.mode_set_nofb = omap_crtc_mode_set_nofb,
- .disable = omap_crtc_disable,
- .enable = omap_crtc_enable,
.atomic_check = omap_crtc_atomic_check,
.atomic_begin = omap_crtc_atomic_begin,
.atomic_flush = omap_crtc_atomic_flush,
+ .atomic_enable = omap_crtc_atomic_enable,
+ .atomic_disable = omap_crtc_atomic_disable,
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 022029ea6972..cdf5b0601eba 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -57,13 +57,13 @@ static void omap_fb_output_poll_changed(struct drm_device *dev)
static void omap_atomic_wait_for_completion(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
- struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
unsigned int i;
int ret;
- for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
- if (!crtc->state->enable)
+ for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->active)
continue;
ret = omap_crtc_wait_pending(crtc);
@@ -84,23 +84,36 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
- /* With the current dss dispc implementation we have to enable
- * the new modeset before we can commit planes. The dispc ovl
- * configuration relies on the video mode configuration been
- * written into the HW when the ovl configuration is
- * calculated.
- *
- * This approach is not ideal because after a mode change the
- * plane update is executed only after the first vblank
- * interrupt. The dispc implementation should be fixed so that
- * it is able use uncommitted drm state information.
- */
- drm_atomic_helper_commit_modeset_enables(dev, old_state);
- omap_atomic_wait_for_completion(dev, old_state);
-
- drm_atomic_helper_commit_planes(dev, old_state, 0);
-
- drm_atomic_helper_commit_hw_done(old_state);
+ if (priv->omaprev != 0x3430) {
+ /* With the current dss dispc implementation we have to enable
+ * the new modeset before we can commit planes. The dispc ovl
+ * configuration relies on the video mode configuration been
+ * written into the HW when the ovl configuration is
+ * calculated.
+ *
+ * This approach is not ideal because after a mode change the
+ * plane update is executed only after the first vblank
+ * interrupt. The dispc implementation should be fixed so that
+ * it is able use uncommitted drm state information.
+ */
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+ omap_atomic_wait_for_completion(dev, old_state);
+
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+ } else {
+ /*
+ * OMAP3 DSS seems to have issues with the work-around above,
+ * resulting in endless sync losts if a crtc is enabled without
+ * a plane. For now, skip the WA for OMAP3.
+ */
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
+
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+ }
/*
* Wait for completion of the page flips to ensure that old buffers
@@ -324,6 +337,32 @@ static int omap_modeset_init(struct drm_device *dev)
}
/*
+ * Enable the HPD in external components if supported
+ */
+static void omap_modeset_enable_external_hpd(void)
+{
+ struct omap_dss_device *dssdev = NULL;
+
+ for_each_dss_dev(dssdev) {
+ if (dssdev->driver->enable_hpd)
+ dssdev->driver->enable_hpd(dssdev);
+ }
+}
+
+/*
+ * Disable the HPD in external components if supported
+ */
+static void omap_modeset_disable_external_hpd(void)
+{
+ struct omap_dss_device *dssdev = NULL;
+
+ for_each_dss_dev(dssdev) {
+ if (dssdev->driver->disable_hpd)
+ dssdev->driver->disable_hpd(dssdev);
+ }
+}
+
+/*
* drm ioctl funcs
*/
@@ -438,44 +477,11 @@ static int dev_open(struct drm_device *dev, struct drm_file *file)
*/
static void dev_lastclose(struct drm_device *dev)
{
- int i;
-
- /* we don't support vga_switcheroo.. so just make sure the fbdev
- * mode is active
- */
struct omap_drm_private *priv = dev->dev_private;
int ret;
DBG("lastclose: dev=%p", dev);
- /* need to restore default rotation state.. not sure
- * if there is a cleaner way to restore properties to
- * default state? Maybe a flag that properties should
- * automatically be restored to default state on
- * lastclose?
- */
- for (i = 0; i < priv->num_crtcs; i++) {
- struct drm_crtc *crtc = priv->crtcs[i];
-
- if (!crtc->primary->rotation_property)
- continue;
-
- drm_object_property_set_value(&crtc->base,
- crtc->primary->rotation_property,
- DRM_MODE_ROTATE_0);
- }
-
- for (i = 0; i < priv->num_planes; i++) {
- struct drm_plane *plane = priv->planes[i];
-
- if (!plane->rotation_property)
- continue;
-
- drm_object_property_set_value(&plane->base,
- plane->rotation_property,
- DRM_MODE_ROTATE_0);
- }
-
if (priv->fbdev) {
ret = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
if (ret)
@@ -517,7 +523,6 @@ static struct drm_driver omap_drm_driver = {
.gem_vm_ops = &omap_gem_vm_ops,
.dumb_create = omap_gem_dumb_create,
.dumb_map_offset = omap_gem_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = ioctls,
.num_ioctls = DRM_OMAP_NUM_IOCTLS,
.fops = &omapdriver_fops,
@@ -550,6 +555,12 @@ static int pdev_probe(struct platform_device *pdev)
if (omapdss_is_initialized() == false)
return -EPROBE_DEFER;
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set the DMA mask\n");
+ return ret;
+ }
+
omap_crtc_pre_init();
ret = omap_connect_dssdevs();
@@ -603,6 +614,7 @@ static int pdev_probe(struct platform_device *pdev)
priv->fbdev = omap_fbdev_init(ddev);
drm_kms_helper_poll_init(ddev);
+ omap_modeset_enable_external_hpd();
/*
* Register the DRM device with the core and the connectors with
@@ -615,6 +627,7 @@ static int pdev_probe(struct platform_device *pdev)
return 0;
err_cleanup_helpers:
+ omap_modeset_disable_external_hpd();
drm_kms_helper_poll_fini(ddev);
if (priv->fbdev)
omap_fbdev_free(ddev);
@@ -643,6 +656,7 @@ static int pdev_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
+ omap_modeset_disable_external_hpd();
drm_kms_helper_poll_fini(ddev);
if (priv->fbdev)
@@ -734,7 +748,7 @@ static SIMPLE_DEV_PM_OPS(omapdrm_pm_ops, omap_drm_suspend, omap_drm_resume);
static struct platform_driver pdev = {
.driver = {
- .name = DRIVER_NAME,
+ .name = "omapdrm",
.pm = &omapdrm_pm_ops,
},
.probe = pdev_probe,
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 86c977b7189a..624f5b50b755 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -85,7 +85,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
if (hdmi_mode && dssdev->driver->set_hdmi_infoframe) {
struct hdmi_avi_infoframe avi;
- r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode);
+ r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
+ false);
if (r == 0)
dssdev->driver->set_hdmi_infoframe(dssdev, &avi);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index ddf7a457951b..b1a762b70cbf 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -379,7 +379,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
return fb;
error:
- while (--i > 0)
+ while (--i >= 0)
drm_gem_object_unreference_unlocked(bos[i]);
return fb;
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index daf81a0a2899..9273118040b7 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -184,7 +184,6 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
helper->fb = fb;
fbi->par = helper;
- fbi->flags = FBINFO_DEFAULT;
fbi->fbops = &omap_fb_ops;
strcpy(fbi->fix.id, MODULE_NAME);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 863a881dd7cd..afdbad5c866a 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -144,7 +144,7 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
return omap_gem_mmap_obj(obj, vma);
}
-static struct dma_buf_ops omap_dmabuf_ops = {
+static const struct dma_buf_ops omap_dmabuf_ops = {
.map_dma_buf = omap_gem_map_dma_buf,
.unmap_dma_buf = omap_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 2160f64548e0..15e5d5d325c6 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -235,7 +235,6 @@ static const struct drm_plane_funcs omap_plane_funcs = {
.disable_plane = drm_atomic_helper_disable_plane,
.reset = omap_plane_reset,
.destroy = omap_plane_destroy,
- .set_property = drm_atomic_helper_plane_set_property,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.atomic_set_property = omap_plane_atomic_set_property,
@@ -291,7 +290,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
ret = drm_universal_plane_init(dev, plane, possible_crtcs,
&omap_plane_funcs, formats,
- nformats, type, NULL);
+ nformats, NULL, type, NULL);
if (ret < 0)
goto error;
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 3216aa9a88d6..e2d57c01200b 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -143,14 +143,14 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
ret = of_property_read_u32(np, "width-mm", &lvds->width);
if (ret < 0) {
- dev_err(lvds->dev, "%s: invalid or missing %s DT property\n",
- of_node_full_name(np), "width-mm");
+ dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n",
+ np, "width-mm");
return -ENODEV;
}
ret = of_property_read_u32(np, "height-mm", &lvds->height);
if (ret < 0) {
- dev_err(lvds->dev, "%s: invalid or missing %s DT property\n",
- of_node_full_name(np), "height-mm");
+ dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n",
+ np, "height-mm");
return -ENODEV;
}
@@ -158,8 +158,8 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
ret = of_property_read_string(np, "data-mapping", &mapping);
if (ret < 0) {
- dev_err(lvds->dev, "%s: invalid or missing %s DT property\n",
- of_node_full_name(np), "data-mapping");
+ dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n",
+ np, "data-mapping");
return -ENODEV;
}
@@ -170,8 +170,8 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
} else if (!strcmp(mapping, "vesa-24")) {
lvds->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG;
} else {
- dev_err(lvds->dev, "%s: invalid or missing %s DT property\n",
- of_node_full_name(np), "data-mapping");
+ dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n",
+ np, "data-mapping");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/pl111/pl111_connector.c b/drivers/gpu/drm/pl111/pl111_connector.c
index 3f213d7e7692..d335f9a29ce4 100644
--- a/drivers/gpu/drm/pl111/pl111_connector.c
+++ b/drivers/gpu/drm/pl111/pl111_connector.c
@@ -69,7 +69,6 @@ const struct drm_connector_funcs connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = pl111_connector_destroy,
.detect = pl111_connector_detect,
- .dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index c6ca4f1bbd49..b58c988d9da0 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -23,6 +23,7 @@
#include <drm/drmP.h>
#include <drm/drm_panel.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include "pl111_drm.h"
@@ -274,7 +275,7 @@ void pl111_disable_vblank(struct drm_device *drm, unsigned int crtc)
static int pl111_display_prepare_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
- return drm_fb_cma_prepare_fb(&pipe->plane, plane_state);
+ return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
}
static const struct drm_simple_display_pipe_funcs pl111_display_funcs = {
@@ -457,7 +458,7 @@ int pl111_display_init(struct drm_device *drm)
ret = drm_simple_display_pipe_init(drm, &priv->pipe,
&pl111_display_funcs,
formats, ARRAY_SIZE(formats),
- &priv->connector.connector);
+ NULL, &priv->connector.connector);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index ac8771be70b0..581c452cede1 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -66,14 +66,15 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include "pl111_drm.h"
#define DRIVER_DESC "DRM module for PL111"
-static struct drm_mode_config_funcs mode_config_funcs = {
- .fb_create = drm_fb_cma_create,
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -159,9 +160,7 @@ static struct drm_driver pl111_drm_driver = {
.minor = 0,
.patchlevel = 0,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_destroy = drm_gem_dumb_destroy,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.enable_vblank = pl111_enable_vblank,
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 03fe182203ce..14c5613b4388 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -378,10 +378,6 @@ qxl_framebuffer_init(struct drm_device *dev,
return 0;
}
-static void qxl_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -437,7 +433,7 @@ static void qxl_monitors_config_set(struct qxl_device *qdev,
}
-void qxl_mode_set_nofb(struct drm_crtc *crtc)
+static void qxl_mode_set_nofb(struct drm_crtc *crtc)
{
struct qxl_device *qdev = crtc->dev->dev_private;
struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
@@ -451,12 +447,14 @@ void qxl_mode_set_nofb(struct drm_crtc *crtc)
}
-static void qxl_crtc_commit(struct drm_crtc *crtc)
+static void qxl_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
DRM_DEBUG("\n");
}
-static void qxl_crtc_disable(struct drm_crtc *crtc)
+static void qxl_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
struct qxl_device *qdev = crtc->dev->dev_private;
@@ -467,16 +465,15 @@ static void qxl_crtc_disable(struct drm_crtc *crtc)
}
static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
- .dpms = qxl_crtc_dpms,
- .disable = qxl_crtc_disable,
.mode_fixup = qxl_crtc_mode_fixup,
.mode_set_nofb = qxl_mode_set_nofb,
- .commit = qxl_crtc_commit,
.atomic_flush = qxl_crtc_atomic_flush,
+ .atomic_enable = qxl_crtc_atomic_enable,
+ .atomic_disable = qxl_crtc_atomic_disable,
};
-int qxl_primary_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+static int qxl_primary_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
struct qxl_device *qdev = plane->dev->dev_private;
struct qxl_framebuffer *qfb;
@@ -547,8 +544,8 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane,
}
}
-int qxl_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+static int qxl_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
return 0;
}
@@ -647,8 +644,8 @@ out_free_release:
}
-void qxl_cursor_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void qxl_cursor_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
struct qxl_device *qdev = plane->dev->dev_private;
struct qxl_release *release;
@@ -675,8 +672,8 @@ void qxl_cursor_atomic_disable(struct drm_plane *plane,
qxl_release_fence_buffer_objects(release);
}
-int qxl_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+static int qxl_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
{
struct drm_gem_object *obj;
struct qxl_bo *user_bo;
@@ -787,7 +784,7 @@ static struct drm_plane *qxl_create_plane(struct qxl_device *qdev,
err = drm_universal_plane_init(&qdev->ddev, plane, possible_crtcs,
funcs, formats, num_formats,
- type, NULL);
+ NULL, type, NULL);
if (err)
goto free_plane;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index c2fc201d9e1b..2445e75cf7ea 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -37,7 +37,6 @@
#include "qxl_drv.h"
#include "qxl_object.h"
-extern int qxl_max_ioctls;
static const struct pci_device_id pciidlist[] = {
{ 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8,
0xffff00, 0 },
@@ -262,11 +261,8 @@ static struct drm_driver qxl_driver = {
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_ATOMIC,
- .set_busid = drm_pci_set_busid,
-
.dumb_create = qxl_mode_dumb_create,
.dumb_map_offset = qxl_mode_dumb_mmap,
- .dumb_destroy = drm_gem_dumb_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
#endif
@@ -303,12 +299,12 @@ static int __init qxl_init(void)
if (qxl_modeset == 0)
return -EINVAL;
qxl_driver.num_ioctls = qxl_max_ioctls;
- return drm_pci_init(&qxl_driver, &qxl_pci_driver);
+ return pci_register_driver(&qxl_pci_driver);
}
static void __exit qxl_exit(void)
{
- drm_pci_exit(&qxl_driver, &qxl_pci_driver);
+ pci_unregister_driver(&qxl_pci_driver);
}
module_init(qxl_init);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 3591d2330a09..3397a1907336 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -64,6 +64,7 @@
extern int qxl_log_level;
extern int qxl_num_crtc;
+extern int qxl_max_ioctls;
enum {
QXL_INFO_LEVEL = 1,
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 573e7e9a5f98..844c4a31ca13 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -275,7 +275,6 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
info->fbops = &qxlfb_ops;
/*
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 0b82a87916ae..31effed4a3c8 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -163,7 +163,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
return -EINVAL;
if (!access_ok(VERIFY_READ,
- (void *)(unsigned long)cmd->command,
+ u64_to_user_ptr(cmd->command),
cmd->command_size))
return -EFAULT;
@@ -183,7 +183,9 @@ static int qxl_process_single_command(struct qxl_device *qdev,
/* TODO copy slow path code from i915 */
fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
+ unwritten = __copy_from_user_inatomic_nocache
+ (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE),
+ u64_to_user_ptr(cmd->command), cmd->command_size);
{
struct qxl_drawable *draw = fb_cmd;
@@ -201,10 +203,9 @@ static int qxl_process_single_command(struct qxl_device *qdev,
num_relocs = 0;
for (i = 0; i < cmd->relocs_num; ++i) {
struct drm_qxl_reloc reloc;
+ struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs);
- if (copy_from_user(&reloc,
- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
- sizeof(reloc))) {
+ if (copy_from_user(&reloc, u + i, sizeof(reloc))) {
ret = -EFAULT;
goto out_free_bos;
}
@@ -282,10 +283,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
- struct drm_qxl_command *commands =
- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
+ struct drm_qxl_command __user *commands =
+ u64_to_user_ptr(execbuffer->commands);
- if (copy_from_user(&user_cmd, &commands[cmd_num],
+ if (copy_from_user(&user_cmd, commands + cmd_num,
sizeof(user_cmd)))
return -EFAULT;
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 9a7eef7dd604..0a67ddf19c3d 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -221,7 +221,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
return bo;
}
-int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
{
struct drm_device *ddev = bo->gem_base.dev;
int r;
@@ -244,7 +244,7 @@ int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
return r;
}
-int __qxl_bo_unpin(struct qxl_bo *bo)
+static int __qxl_bo_unpin(struct qxl_bo *bo)
{
struct drm_device *ddev = bo->gem_base.dev;
int r, i;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 87fc1dbd0a2f..7ecf8a4b9fe6 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -187,7 +187,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
struct qxl_bo *qbo;
- static struct ttm_place placements = {
+ static const struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index a982be57d1ef..0d2b7e42b3a7 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -62,7 +62,6 @@ static struct drm_driver driver = {
.load = r128_driver_load,
.preclose = r128_driver_preclose,
.lastclose = r128_driver_lastclose,
- .set_busid = drm_pci_set_busid,
.get_vblank_counter = r128_get_vblank_counter,
.enable_vblank = r128_enable_vblank,
.disable_vblank = r128_disable_vblank,
@@ -96,12 +95,12 @@ static int __init r128_init(void)
{
driver.num_ioctls = r128_max_ioctl;
- return drm_pci_init(&driver, &r128_pci_driver);
+ return drm_legacy_pci_init(&driver, &r128_pci_driver);
}
static void __exit r128_exit(void)
{
- drm_pci_exit(&driver, &r128_pci_driver);
+ drm_legacy_pci_exit(&driver, &r128_pci_driver);
}
module_init(r128_init);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 3c492a0aa6bd..02baaaf20e9d 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -2217,7 +2217,6 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
.mode_set_base_atomic = atombios_crtc_set_base_atomic,
.prepare = atombios_crtc_prepare,
.commit = atombios_crtc_commit,
- .load_lut = radeon_crtc_load_lut,
.disable = atombios_crtc_disable,
};
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5008f3d4cccc..ec63bc5e9de7 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -464,7 +464,7 @@ struct radeon_bo_list {
struct radeon_bo *robj;
struct ttm_validate_buffer tv;
uint64_t gpu_offset;
- unsigned prefered_domains;
+ unsigned preferred_domains;
unsigned allowed_domains;
uint32_t tiling_flags;
};
@@ -2327,7 +2327,7 @@ struct radeon_device {
uint8_t *bios;
bool is_atom_bios;
uint16_t bios_header_start;
- struct radeon_bo *stollen_vga_memory;
+ struct radeon_bo *stolen_vga_memory;
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 6efbd65c929e..8d3251a10cd4 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -351,7 +351,7 @@ out:
* handles it.
* Returns NOTIFY code
*/
-int radeon_atif_handler(struct radeon_device *rdev,
+static int radeon_atif_handler(struct radeon_device *rdev,
struct acpi_bus_event *event)
{
struct radeon_atif *atif = &rdev->atif;
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.h b/drivers/gpu/drm/radeon/radeon_acpi.h
index 7af1977c2c68..35202a453e66 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.h
+++ b/drivers/gpu/drm/radeon/radeon_acpi.h
@@ -27,9 +27,6 @@
struct radeon_device;
struct acpi_bus_event;
-int radeon_atif_handler(struct radeon_device *rdev,
- struct acpi_bus_event *event);
-
/* AMD hw uses four ACPI control methods:
* 1. ATIF
* ARG0: (ACPI_INTEGER) function code
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index aaacac190d26..770e31f5fd1b 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -516,7 +516,7 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
if (!connector)
return -EINVAL;
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 27affbde058c..2f642cbefd8e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -773,12 +773,15 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
if (connector->encoder->crtc) {
struct drm_crtc *crtc = connector->encoder->crtc;
- const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
radeon_crtc->output_csc = radeon_encoder->output_csc;
- (*crtc_funcs->load_lut)(crtc);
+ /*
+ * Our .gamma_set assumes the .gamma_store has been
+ * prefilled and don't care about its arguments.
+ */
+ crtc->funcs->gamma_set(crtc, NULL, NULL, NULL, 0, NULL);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 00b22af70f5c..1ae31dbc61c6 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -130,7 +130,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->rdev->family == CHIP_RS880)) {
/* TODO: is this still needed for NI+ ? */
- p->relocs[i].prefered_domains =
+ p->relocs[i].preferred_domains =
RADEON_GEM_DOMAIN_VRAM;
p->relocs[i].allowed_domains =
@@ -148,14 +148,14 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
return -EINVAL;
}
- p->relocs[i].prefered_domains = domain;
+ p->relocs[i].preferred_domains = domain;
if (domain == RADEON_GEM_DOMAIN_VRAM)
domain |= RADEON_GEM_DOMAIN_GTT;
p->relocs[i].allowed_domains = domain;
}
if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
- uint32_t domain = p->relocs[i].prefered_domains;
+ uint32_t domain = p->relocs[i].preferred_domains;
if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
"allowed for userptr BOs\n");
@@ -163,7 +163,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
need_mmap_lock = true;
domain = RADEON_GEM_DOMAIN_GTT;
- p->relocs[i].prefered_domains = domain;
+ p->relocs[i].preferred_domains = domain;
p->relocs[i].allowed_domains = domain;
}
@@ -437,7 +437,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
if (bo == NULL)
continue;
- drm_gem_object_unreference_unlocked(&bo->gem_base);
+ drm_gem_object_put_unlocked(&bo->gem_base);
}
}
kfree(parser->track);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 4a4f9533c53b..91952277557e 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -307,7 +307,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
robj = gem_to_radeon_bo(obj);
ret = radeon_bo_reserve(robj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
/* Only 27 bit offset for legacy cursor */
@@ -317,7 +317,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
radeon_bo_unreserve(robj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -352,7 +352,7 @@ unpin:
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
- drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+ drm_gem_object_put_unlocked(radeon_crtc->cursor_bo);
}
radeon_crtc->cursor_bo = obj;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 17d3dafc8319..ddfe91efa61e 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -42,6 +42,7 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
+ u16 *r, *g, *b;
int i;
DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
@@ -60,11 +61,14 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
WREG32(AVIVO_DC_LUT_WRITE_EN_MASK, 0x0000003f);
WREG8(AVIVO_DC_LUT_RW_INDEX, 0);
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
WREG32(AVIVO_DC_LUT_30_COLOR,
- (radeon_crtc->lut_r[i] << 20) |
- (radeon_crtc->lut_g[i] << 10) |
- (radeon_crtc->lut_b[i] << 0));
+ ((*r++ & 0xffc0) << 14) |
+ ((*g++ & 0xffc0) << 4) |
+ (*b++ >> 6));
}
/* Only change bit 0 of LUT_SEL, other bits are set elsewhere */
@@ -76,6 +80,7 @@ static void dce4_crtc_load_lut(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
+ u16 *r, *g, *b;
int i;
DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
@@ -93,11 +98,14 @@ static void dce4_crtc_load_lut(struct drm_crtc *crtc)
WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
- (radeon_crtc->lut_r[i] << 20) |
- (radeon_crtc->lut_g[i] << 10) |
- (radeon_crtc->lut_b[i] << 0));
+ ((*r++ & 0xffc0) << 14) |
+ ((*g++ & 0xffc0) << 4) |
+ (*b++ >> 6));
}
}
@@ -106,6 +114,7 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
+ u16 *r, *g, *b;
int i;
DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
@@ -135,11 +144,14 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc)
WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
- (radeon_crtc->lut_r[i] << 20) |
- (radeon_crtc->lut_g[i] << 10) |
- (radeon_crtc->lut_b[i] << 0));
+ ((*r++ & 0xffc0) << 14) |
+ ((*g++ & 0xffc0) << 4) |
+ (*b++ >> 6));
}
WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset,
@@ -172,6 +184,7 @@ static void legacy_crtc_load_lut(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
+ u16 *r, *g, *b;
int i;
uint32_t dac2_cntl;
@@ -183,11 +196,14 @@ static void legacy_crtc_load_lut(struct drm_crtc *crtc)
WREG32(RADEON_DAC_CNTL2, dac2_cntl);
WREG8(RADEON_PALETTE_INDEX, 0);
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
WREG32(RADEON_PALETTE_30_DATA,
- (radeon_crtc->lut_r[i] << 20) |
- (radeon_crtc->lut_g[i] << 10) |
- (radeon_crtc->lut_b[i] << 0));
+ ((*r++ & 0xffc0) << 14) |
+ ((*g++ & 0xffc0) << 4) |
+ (*b++ >> 6));
}
}
@@ -209,41 +225,10 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
legacy_crtc_load_lut(crtc);
}
-/** Sets the color ramps on behalf of fbcon */
-void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-
- radeon_crtc->lut_r[regno] = red >> 6;
- radeon_crtc->lut_g[regno] = green >> 6;
- radeon_crtc->lut_b[regno] = blue >> 6;
-}
-
-/** Gets the color ramps on behalf of fbcon */
-void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno)
-{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-
- *red = radeon_crtc->lut_r[regno] << 6;
- *green = radeon_crtc->lut_g[regno] << 6;
- *blue = radeon_crtc->lut_b[regno] << 6;
-}
-
static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- radeon_crtc->lut_r[i] = red[i] >> 6;
- radeon_crtc->lut_g[i] = green[i] >> 6;
- radeon_crtc->lut_b[i] = blue[i] >> 6;
- }
radeon_crtc_load_lut(crtc);
return 0;
@@ -282,7 +267,7 @@ static void radeon_unpin_work_func(struct work_struct *__work)
} else
DRM_ERROR("failed to reserve buffer after flip\n");
- drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+ drm_gem_object_put_unlocked(&work->old_rbo->gem_base);
kfree(work);
}
@@ -519,7 +504,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
obj = old_radeon_fb->obj;
/* take a reference to the old object */
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
work->old_rbo = gem_to_radeon_bo(obj);
new_radeon_fb = to_radeon_framebuffer(fb);
@@ -618,7 +603,7 @@ pflip_cleanup:
radeon_bo_unreserve(new_rbo);
cleanup:
- drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+ drm_gem_object_put_unlocked(&work->old_rbo->gem_base);
dma_fence_put(work->fence);
kfree(work);
return r;
@@ -1303,7 +1288,7 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
- drm_gem_object_unreference_unlocked(radeon_fb->obj);
+ drm_gem_object_put_unlocked(radeon_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(radeon_fb);
}
@@ -1363,14 +1348,14 @@ radeon_user_framebuffer_create(struct drm_device *dev,
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
if (radeon_fb == NULL) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
if (ret) {
kfree(radeon_fb);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
@@ -1388,12 +1373,12 @@ static const struct drm_mode_config_funcs radeon_mode_funcs = {
.output_poll_changed = radeon_output_poll_changed
};
-static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] =
+static const struct drm_prop_enum_list radeon_tmds_pll_enum_list[] =
{ { 0, "driver" },
{ 1, "bios" },
};
-static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
+static const struct drm_prop_enum_list radeon_tv_std_enum_list[] =
{ { TV_STD_NTSC, "ntsc" },
{ TV_STD_PAL, "pal" },
{ TV_STD_PAL_M, "pal-m" },
@@ -1404,25 +1389,25 @@ static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
{ TV_STD_SECAM, "secam" },
};
-static struct drm_prop_enum_list radeon_underscan_enum_list[] =
+static const struct drm_prop_enum_list radeon_underscan_enum_list[] =
{ { UNDERSCAN_OFF, "off" },
{ UNDERSCAN_ON, "on" },
{ UNDERSCAN_AUTO, "auto" },
};
-static struct drm_prop_enum_list radeon_audio_enum_list[] =
+static const struct drm_prop_enum_list radeon_audio_enum_list[] =
{ { RADEON_AUDIO_DISABLE, "off" },
{ RADEON_AUDIO_ENABLE, "on" },
{ RADEON_AUDIO_AUTO, "auto" },
};
/* XXX support different dither options? spatial, temporal, both, etc. */
-static struct drm_prop_enum_list radeon_dither_enum_list[] =
+static const struct drm_prop_enum_list radeon_dither_enum_list[] =
{ { RADEON_FMT_DITHER_DISABLE, "off" },
{ RADEON_FMT_DITHER_ENABLE, "on" },
};
-static struct drm_prop_enum_list radeon_output_csc_enum_list[] =
+static const struct drm_prop_enum_list radeon_output_csc_enum_list[] =
{ { RADEON_OUTPUT_CSC_BYPASS, "bypass" },
{ RADEON_OUTPUT_CSC_TVRGB, "tvrgb" },
{ RADEON_OUTPUT_CSC_YCBCR601, "ycbcr601" },
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 6598306dca9b..ebdf1b859cb6 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -300,9 +300,7 @@ static void radeon_dp_register_mst_connector(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
- drm_modeset_lock_all(dev);
radeon_fb_add_connector(rdev, connector);
- drm_modeset_unlock_all(dev);
drm_connector_register(connector);
}
@@ -315,13 +313,8 @@ static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct radeon_device *rdev = dev->dev_private;
drm_connector_unregister(connector);
- /* need to nuke the connector */
- drm_modeset_lock_all(dev);
- /* dpms off */
radeon_fb_remove_connector(rdev, connector);
-
drm_connector_cleanup(connector);
- drm_modeset_unlock_all(dev);
kfree(connector);
DRM_DEBUG_KMS("\n");
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 74abd161237b..f4becad0a78c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -567,7 +567,6 @@ static struct drm_driver kms_driver = {
.open = radeon_driver_open_kms,
.postclose = radeon_driver_postclose_kms,
.lastclose = radeon_driver_lastclose_kms,
- .set_busid = drm_pci_set_busid,
.unload = radeon_driver_unload_kms,
.get_vblank_counter = radeon_get_vblank_counter_kms,
.enable_vblank = radeon_enable_vblank_kms,
@@ -584,7 +583,6 @@ static struct drm_driver kms_driver = {
.gem_close_object = radeon_gem_object_close,
.dumb_create = radeon_mode_dumb_create,
.dumb_map_offset = radeon_mode_dumb_mmap,
- .dumb_destroy = drm_gem_dumb_destroy,
.fops = &radeon_driver_kms_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -642,14 +640,13 @@ static int __init radeon_init(void)
return -EINVAL;
}
- /* let modprobe override vga console setting */
- return drm_pci_init(driver, pdriver);
+ return pci_register_driver(pdriver);
}
static void __exit radeon_exit(void)
{
radeon_kfd_fini();
- drm_pci_exit(driver, pdriver);
+ pci_unregister_driver(pdriver);
radeon_unregister_atpx_handler();
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 356ad90a5238..fd25361ac681 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -118,7 +118,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
}
static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
@@ -264,7 +264,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
@@ -300,7 +299,7 @@ out:
}
if (fb && ret) {
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
@@ -332,8 +331,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
}
static const struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
- .gamma_set = radeon_crtc_fb_gamma_set,
- .gamma_get = radeon_crtc_fb_gamma_get,
.fb_probe = radeonfb_create,
};
@@ -347,9 +344,12 @@ int radeon_fbdev_init(struct radeon_device *rdev)
if (list_empty(&rdev->ddev->mode_config.connector_list))
return 0;
- /* select 8 bpp console on RN50 or 16MB cards */
- if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+ /* select 8 bpp console on 8MB cards, or 16 bpp on RN50 or 32MB */
+ if (rdev->mc.real_vram_size <= (8*1024*1024))
bpp_sel = 8;
+ else if (ASIC_IS_RN50(rdev) ||
+ rdev->mc.real_vram_size <= (32*1024*1024))
+ bpp_sel = 16;
rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
if (!rfbdev)
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 574bf7e6b118..3386452bd2f0 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -271,7 +271,7 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
}
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (r) {
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
@@ -352,7 +352,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (r)
goto handle_lockup;
@@ -361,7 +361,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
return 0;
release_object:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
handle_lockup:
up_read(&rdev->exclusive_lock);
@@ -395,7 +395,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
@@ -414,11 +414,11 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
}
robj = gem_to_radeon_bo(gobj);
if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return -EPERM;
}
*offset_p = radeon_bo_mmap_offset(robj);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return 0;
}
@@ -453,7 +453,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -485,7 +485,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
@@ -504,7 +504,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
robj = gem_to_radeon_bo(gobj);
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -527,7 +527,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
radeon_bo_unreserve(rbo);
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -661,14 +661,14 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
r = radeon_bo_reserve(rbo, false);
if (r) {
args->operation = RADEON_VA_RESULT_ERROR;
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
if (!bo_va) {
args->operation = RADEON_VA_RESULT_ERROR;
radeon_bo_unreserve(rbo);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return -ENOENT;
}
@@ -695,7 +695,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
args->operation = RADEON_VA_RESULT_ERROR;
}
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -736,7 +736,7 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
radeon_bo_unreserve(robj);
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return r;
}
@@ -762,7 +762,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
r = drm_gem_handle_create(file_priv, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 7aacb44df201..afaf10db47cc 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -283,6 +283,10 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
int r = 0;
spin_lock_init(&rdev->irq.lock);
+
+ /* Disable vblank irqs aggressively for power-saving */
+ rdev->ddev->vblank_disable_immediate = true;
+
r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
if (r) {
return r;
@@ -324,7 +328,6 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
*/
void radeon_irq_kms_fini(struct radeon_device *rdev)
{
- drm_vblank_cleanup(rdev->ddev);
if (rdev->irq.installed) {
drm_irq_uninstall(rdev->ddev);
rdev->irq.installed = false;
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 699fe7f9b8bf..f6578c96925c 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -75,12 +75,14 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr);
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr);
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm);
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
@@ -184,7 +186,6 @@ void radeon_kfd_device_init(struct radeon_device *rdev)
if (rdev->kfd) {
struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap = 0xFF00,
- .num_mec = 1,
.num_pipe_per_mec = 4,
.num_queue_per_pipe = 8
};
@@ -483,7 +484,9 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
}
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr)
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm)
{
uint32_t wptr_shadow, is_wptr_shadow_valid;
struct cik_mqd *m;
@@ -637,7 +640,7 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
@@ -786,7 +789,8 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
unsigned int watch_point_id,
unsigned int reg_offset)
{
- return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
+ return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]
+ / 4;
}
static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid)
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index ce6cb6666212..1f1856e0b1e0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1116,7 +1116,6 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
.mode_set_base_atomic = radeon_crtc_set_base_atomic,
.prepare = radeon_crtc_prepare,
.commit = radeon_crtc_commit,
- .load_lut = radeon_crtc_load_lut,
.disable = radeon_crtc_disable
};
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 00f5ec5c12c7..da44ac234f64 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -935,10 +935,6 @@ extern void
radeon_combios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc);
extern void
radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
-extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno);
-extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno);
int radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 8b722297a05c..093594976126 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -445,7 +445,7 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_unreference_unlocked(&bo->gem_base);
+ drm_gem_object_put_unlocked(&bo->gem_base);
}
}
@@ -546,7 +546,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
list_for_each_entry(lobj, head, tv.head) {
struct radeon_bo *bo = lobj->robj;
if (!bo->pin_count) {
- u32 domain = lobj->prefered_domains;
+ u32 domain = lobj->preferred_domains;
u32 allowed = lobj->allowed_domains;
u32 current_domain =
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index faa021396da3..bf69bf9086bf 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -178,7 +178,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void radeon_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- static struct ttm_place placements = {
+ static const struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
@@ -907,17 +907,17 @@ int radeon_ttm_init(struct radeon_device *rdev)
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
- NULL, &rdev->stollen_vga_memory);
+ NULL, &rdev->stolen_vga_memory);
if (r) {
return r;
}
- r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+ r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
if (r)
return r;
- r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
- radeon_bo_unreserve(rdev->stollen_vga_memory);
+ r = radeon_bo_pin(rdev->stolen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+ radeon_bo_unreserve(rdev->stolen_vga_memory);
if (r) {
- radeon_bo_unref(&rdev->stollen_vga_memory);
+ radeon_bo_unref(&rdev->stolen_vga_memory);
return r;
}
DRM_INFO("radeon: %uM of VRAM memory ready\n",
@@ -946,13 +946,13 @@ void radeon_ttm_fini(struct radeon_device *rdev)
if (!rdev->mman.initialized)
return;
radeon_ttm_debugfs_fini(rdev);
- if (rdev->stollen_vga_memory) {
- r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+ if (rdev->stolen_vga_memory) {
+ r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
if (r == 0) {
- radeon_bo_unpin(rdev->stollen_vga_memory);
- radeon_bo_unreserve(rdev->stollen_vga_memory);
+ radeon_bo_unpin(rdev->stolen_vga_memory);
+ radeon_bo_unreserve(rdev->stolen_vga_memory);
}
- radeon_bo_unref(&rdev->stollen_vga_memory);
+ radeon_bo_unref(&rdev->stolen_vga_memory);
}
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
@@ -1030,19 +1030,17 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
static int radeon_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
- unsigned ttm_pl = *(int *)node->info_ent->data;
+ unsigned ttm_pl = *(int*)node->info_ent->data;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv;
- struct ttm_bo_global *glob = rdev->mman.bdev.glob;
+ struct ttm_mem_type_manager *man = &rdev->mman.bdev.man[ttm_pl];
struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&glob->lru_lock);
- drm_mm_print(mm, &p);
- spin_unlock(&glob->lru_lock);
+ man->func->debug(man, &p);
return 0;
}
+
static int ttm_pl_vram = TTM_PL_VRAM;
static int ttm_pl_tt = TTM_PL_TT;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 5f68245579a3..5e82b408d522 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -139,7 +139,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
/* add the vm page table to the list */
list[0].robj = vm->page_directory;
- list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
+ list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo;
list[0].tv.shared = true;
@@ -151,7 +151,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
continue;
list[idx].robj = vm->page_tables[i].bo;
- list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
+ list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo;
list[idx].tv.shared = true;
diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c
index fce214482e72..b0a43b68776d 100644
--- a/drivers/gpu/drm/radeon/vce_v2_0.c
+++ b/drivers/gpu/drm/radeon/vce_v2_0.c
@@ -104,6 +104,10 @@ static void vce_v2_0_disable_cg(struct radeon_device *rdev)
WREG32(VCE_CGTT_CLK_OVERRIDE, 7);
}
+/*
+ * Local variable sw_cg is used for debugging purposes, in case we
+ * ran into problems with dynamic clock gating. Don't remove it.
+ */
void vce_v2_0_enable_mgcg(struct radeon_device *rdev, bool enable)
{
bool sw_cg = false;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 345eff72f581..301ea1a8018e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/mutex.h>
+#include <linux/sys_soc.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@@ -129,10 +130,8 @@ static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc,
for (fdpll = 1; fdpll < 32; fdpll++) {
unsigned long output;
- /* 1/2 (FRQSEL=1) for duty rate 50% */
output = input * (n + 1) / (m + 1)
- / (fdpll + 1) / 2;
-
+ / (fdpll + 1);
if (output >= 400000000)
continue;
@@ -158,6 +157,11 @@ done:
best_diff);
}
+static const struct soc_device_attribute rcar_du_r8a7795_es1[] = {
+ { .soc_id = "r8a7795", .revision = "ES1.*" },
+ { /* sentinel */ }
+};
+
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
@@ -168,7 +172,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
u32 escr;
u32 div;
- /* Compute the clock divisor and select the internal or external dot
+ /*
+ * Compute the clock divisor and select the internal or external dot
* clock based on the requested frequency.
*/
clk = clk_get_rate(rcrtc->clock);
@@ -185,7 +190,20 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
extclk = clk_get_rate(rcrtc->extclock);
if (rcdu->info->dpll_ch & (1 << rcrtc->index)) {
- rcar_du_dpll_divider(rcrtc, &dpll, extclk, mode_clock);
+ unsigned long target = mode_clock;
+
+ /*
+ * The H3 ES1.x exhibits dot clock duty cycle stability
+ * issues. We can work around them by configuring the
+ * DPLL to twice the desired frequency, coupled with a
+ * /2 post-divider. This isn't needed on other SoCs and
+ * breaks HDMI output on M3-W for a currently unknown
+ * reason, so restrict the workaround to H3 ES1.x.
+ */
+ if (soc_device_match(rcar_du_r8a7795_es1))
+ target *= 2;
+
+ rcar_du_dpll_divider(rcrtc, &dpll, extclk, target);
extclk = dpll.output;
}
@@ -197,8 +215,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
if (abs((long)extrate - (long)mode_clock) <
abs((long)rate - (long)mode_clock)) {
- dev_dbg(rcrtc->group->dev->dev,
- "crtc%u: using external clock\n", rcrtc->index);
if (rcdu->info->dpll_ch & (1 << rcrtc->index)) {
u32 dpllcr = DPLLCR_CODE | DPLLCR_CLKE
@@ -215,12 +231,14 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
rcar_du_group_write(rcrtc->group, DPLLCR,
dpllcr);
-
- escr = ESCR_DCLKSEL_DCLKIN | 1;
- } else {
- escr = ESCR_DCLKSEL_DCLKIN | extdiv;
}
+
+ escr = ESCR_DCLKSEL_DCLKIN | extdiv;
}
+
+ dev_dbg(rcrtc->group->dev->dev,
+ "mode clock %lu extrate %lu rate %lu ESCR 0x%08x\n",
+ mode_clock, extrate, rate, escr);
}
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR,
@@ -261,12 +279,14 @@ void rcar_du_crtc_route_output(struct drm_crtc *crtc,
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_device *rcdu = rcrtc->group->dev;
- /* Store the route from the CRTC output to the DU output. The DU will be
+ /*
+ * Store the route from the CRTC output to the DU output. The DU will be
* configured when starting the CRTC.
*/
rcrtc->outputs |= BIT(output);
- /* Store RGB routing to DPAD0, the hardware will be configured when
+ /*
+ * Store RGB routing to DPAD0, the hardware will be configured when
* starting the CRTC.
*/
if (output == RCAR_DU_OUTPUT_DPAD0)
@@ -342,7 +362,8 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
}
}
- /* Update the planes to display timing and dot clock generator
+ /*
+ * Update the planes to display timing and dot clock generator
* associations.
*
* Updating the DPTSR register requires restarting the CRTC group,
@@ -431,14 +452,8 @@ static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
* Start/Stop and Suspend/Resume
*/
-static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
+static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc)
{
- struct drm_crtc *crtc = &rcrtc->crtc;
- bool interlaced;
-
- if (rcrtc->started)
- return;
-
/* Set display off and background to black */
rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));
@@ -450,7 +465,20 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
/* Start with all planes disabled. */
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
- /* Select master sync mode. This enables display operation in master
+ /* Enable the VSP compositor. */
+ if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ rcar_du_vsp_enable(rcrtc);
+
+ /* Turn vertical blanking interrupt reporting on. */
+ drm_crtc_vblank_on(&rcrtc->crtc);
+}
+
+static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
+{
+ bool interlaced;
+
+ /*
+ * Select master sync mode. This enables display operation in master
* sync mode (with the HSYNC and VSYNC signals configured as outputs and
* actively driven).
*/
@@ -460,38 +488,56 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
DSYSR_TVM_MASTER);
rcar_du_group_start_stop(rcrtc->group, true);
+}
- /* Enable the VSP compositor. */
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
- rcar_du_vsp_enable(rcrtc);
+static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc)
+{
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct drm_crtc *crtc = &rcrtc->crtc;
+ u32 status;
- /* Turn vertical blanking interrupt reporting back on. */
- drm_crtc_vblank_on(crtc);
+ /* Make sure vblank interrupts are enabled. */
+ drm_crtc_vblank_get(crtc);
- rcrtc->started = true;
+ /*
+ * Disable planes and calculate how many vertical blanking interrupts we
+ * have to wait for. If a vertical blanking interrupt has been triggered
+ * but not processed yet, we don't know whether it occurred before or
+ * after the planes got disabled. We thus have to wait for two vblank
+ * interrupts in that case.
+ */
+ spin_lock_irq(&rcrtc->vblank_lock);
+ rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
+ status = rcar_du_crtc_read(rcrtc, DSSR);
+ rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1;
+ spin_unlock_irq(&rcrtc->vblank_lock);
+
+ if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0,
+ msecs_to_jiffies(100)))
+ dev_warn(rcdu->dev, "vertical blanking timeout\n");
+
+ drm_crtc_vblank_put(crtc);
}
static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
{
struct drm_crtc *crtc = &rcrtc->crtc;
- if (!rcrtc->started)
- return;
-
- /* Disable all planes and wait for the change to take effect. This is
- * required as the DSnPR registers are updated on vblank, and no vblank
- * will occur once the CRTC is stopped. Disabling planes when starting
- * the CRTC thus wouldn't be enough as it would start scanning out
- * immediately from old frame buffers until the next vblank.
+ /*
+ * Disable all planes and wait for the change to take effect. This is
+ * required as the plane enable registers are updated on vblank, and no
+ * vblank will occur once the CRTC is stopped. Disabling planes when
+ * starting the CRTC thus wouldn't be enough as it would start scanning
+ * out immediately from old frame buffers until the next vblank.
*
* This increases the CRTC stop delay, especially when multiple CRTCs
* are stopped in one operation as we now wait for one vblank per CRTC.
* Whether this can be improved needs to be researched.
*/
- rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
- drm_crtc_wait_one_vblank(crtc);
+ rcar_du_crtc_disable_planes(rcrtc);
- /* Disable vertical blanking interrupt reporting. We first need to wait
+ /*
+ * Disable vertical blanking interrupt reporting. We first need to wait
* for page flip completion before stopping the CRTC as userspace
* expects page flips to eventually complete.
*/
@@ -502,14 +548,13 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_disable(rcrtc);
- /* Select switch sync mode. This stops display operation and configures
+ /*
+ * Select switch sync mode. This stops display operation and configures
* the HSYNC and VSYNC signals as inputs.
*/
rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH);
rcar_du_group_start_stop(rcrtc->group, false);
-
- rcrtc->started = false;
}
void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
@@ -529,12 +574,10 @@ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
return;
rcar_du_crtc_get(rcrtc);
- rcar_du_crtc_start(rcrtc);
+ rcar_du_crtc_setup(rcrtc);
/* Commit the planes state. */
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) {
- rcar_du_vsp_enable(rcrtc);
- } else {
+ if (!rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) {
for (i = 0; i < rcrtc->group->num_planes; ++i) {
struct rcar_du_plane *plane = &rcrtc->group->planes[i];
@@ -546,21 +589,33 @@ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
}
rcar_du_crtc_update_planes(rcrtc);
+ rcar_du_crtc_start(rcrtc);
}
/* -----------------------------------------------------------------------------
* CRTC Functions
*/
-static void rcar_du_crtc_enable(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- rcar_du_crtc_get(rcrtc);
+ /*
+ * If the CRTC has already been setup by the .atomic_begin() handler we
+ * can skip the setup stage.
+ */
+ if (!rcrtc->initialized) {
+ rcar_du_crtc_get(rcrtc);
+ rcar_du_crtc_setup(rcrtc);
+ rcrtc->initialized = true;
+ }
+
rcar_du_crtc_start(rcrtc);
}
-static void rcar_du_crtc_disable(struct drm_crtc *crtc)
+static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
@@ -574,6 +629,7 @@ static void rcar_du_crtc_disable(struct drm_crtc *crtc)
}
spin_unlock_irq(&crtc->dev->event_lock);
+ rcrtc->initialized = false;
rcrtc->outputs = 0;
}
@@ -582,6 +638,19 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ WARN_ON(!crtc->state->enable);
+
+ /*
+ * If a mode set is in progress we can be called with the CRTC disabled.
+ * We then need to first setup the CRTC in order to configure planes.
+ * The .atomic_enable() handler will notice and skip the CRTC setup.
+ */
+ if (!rcrtc->initialized) {
+ rcar_du_crtc_get(rcrtc);
+ rcar_du_crtc_setup(rcrtc);
+ rcrtc->initialized = true;
+ }
+
if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_atomic_begin(rcrtc);
}
@@ -609,10 +678,10 @@ static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
- .disable = rcar_du_crtc_disable,
- .enable = rcar_du_crtc_enable,
.atomic_begin = rcar_du_crtc_atomic_begin,
.atomic_flush = rcar_du_crtc_atomic_flush,
+ .atomic_enable = rcar_du_crtc_atomic_enable,
+ .atomic_disable = rcar_du_crtc_atomic_disable,
};
static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc)
@@ -621,6 +690,7 @@ static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc)
rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL);
rcar_du_crtc_set(rcrtc, DIER, DIER_VBE);
+ rcrtc->vblank_enable = true;
return 0;
}
@@ -630,6 +700,7 @@ static void rcar_du_crtc_disable_vblank(struct drm_crtc *crtc)
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
+ rcrtc->vblank_enable = false;
}
static const struct drm_crtc_funcs crtc_funcs = {
@@ -654,14 +725,30 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
irqreturn_t ret = IRQ_NONE;
u32 status;
+ spin_lock(&rcrtc->vblank_lock);
+
status = rcar_du_crtc_read(rcrtc, DSSR);
rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
- if (status & DSSR_FRM) {
- drm_crtc_handle_vblank(&rcrtc->crtc);
+ if (status & DSSR_VBK) {
+ /*
+ * Wake up the vblank wait if the counter reaches 0. This must
+ * be protected by the vblank_lock to avoid races in
+ * rcar_du_crtc_disable_planes().
+ */
+ if (rcrtc->vblank_count) {
+ if (--rcrtc->vblank_count == 0)
+ wake_up(&rcrtc->vblank_wait);
+ }
+ }
- if (rcdu->info->gen < 3)
+ spin_unlock(&rcrtc->vblank_lock);
+
+ if (status & DSSR_VBK) {
+ if (rcdu->info->gen < 3) {
+ drm_crtc_handle_vblank(&rcrtc->crtc);
rcar_du_crtc_finish_page_flip(rcrtc);
+ }
ret = IRQ_HANDLED;
}
@@ -715,13 +802,15 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
}
init_waitqueue_head(&rcrtc->flip_wait);
+ init_waitqueue_head(&rcrtc->vblank_wait);
+ spin_lock_init(&rcrtc->vblank_lock);
rcrtc->group = rgrp;
rcrtc->mmio_offset = mmio_offsets[index];
rcrtc->index = index;
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
- primary = &rcrtc->vsp->planes[0].plane;
+ primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane;
else
primary = &rgrp->planes[index % 2].plane;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index b199ed5adf36..fdc2bf99bda1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,6 +15,7 @@
#define __RCAR_DU_CRTC_H__
#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/wait.h>
#include <drm/drmP.h>
@@ -30,11 +31,17 @@ struct rcar_du_vsp;
* @extclock: external pixel dot clock (optional)
* @mmio_offset: offset of the CRTC registers in the DU MMIO block
* @index: CRTC software and hardware index
- * @started: whether the CRTC has been started and is running
+ * @initialized: whether the CRTC has been initialized and clocks enabled
+ * @vblank_enable: whether vblank events are enabled on this CRTC
* @event: event to post when the pending page flip completes
* @flip_wait: wait queue used to signal page flip completion
+ * @vblank_lock: protects vblank_wait and vblank_count
+ * @vblank_wait: wait queue used to signal vertical blanking
+ * @vblank_count: number of vertical blanking interrupts to wait for
* @outputs: bitmask of the outputs (enum rcar_du_output) driven by this CRTC
* @group: CRTC group this CRTC belongs to
+ * @vsp: VSP feeding video to this CRTC
+ * @vsp_pipe: index of the VSP pipeline feeding video to this CRTC
*/
struct rcar_du_crtc {
struct drm_crtc crtc;
@@ -43,15 +50,21 @@ struct rcar_du_crtc {
struct clk *extclock;
unsigned int mmio_offset;
unsigned int index;
- bool started;
+ bool initialized;
+ bool vblank_enable;
struct drm_pending_vblank_event *event;
wait_queue_head_t flip_wait;
+ spinlock_t vblank_lock;
+ wait_queue_head_t vblank_wait;
+ unsigned int vblank_count;
+
unsigned int outputs;
struct rcar_du_group *group;
struct rcar_du_vsp *vsp;
+ unsigned int vsp_pipe;
};
#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index d6a0255181cc..d2f29e6b1112 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -39,7 +39,8 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
.features = 0,
.num_crtcs = 2,
.routes = {
- /* R8A7779 has two RGB outputs and one (currently unsupported)
+ /*
+ * R8A7779 has two RGB outputs and one (currently unsupported)
* TCON output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
@@ -61,7 +62,8 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
.quirks = RCAR_DU_QUIRK_ALIGN_128B | RCAR_DU_QUIRK_LVDS_LANES,
.num_crtcs = 3,
.routes = {
- /* R8A7790 has one RGB output, two LVDS outputs and one
+ /*
+ * R8A7790 has one RGB output, two LVDS outputs and one
* (currently unsupported) TCON output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
@@ -87,7 +89,8 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
| RCAR_DU_FEATURE_EXT_CTRL_REGS,
.num_crtcs = 2,
.routes = {
- /* R8A779[13] has one RGB output, one LVDS output and one
+ /*
+ * R8A779[13] has one RGB output, one LVDS output and one
* (currently unsupported) TCON output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
@@ -127,7 +130,8 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = {
| RCAR_DU_FEATURE_EXT_CTRL_REGS,
.num_crtcs = 2,
.routes = {
- /* R8A7794 has two RGB outputs and one (currently unsupported)
+ /*
+ * R8A7794 has two RGB outputs and one (currently unsupported)
* TCON output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
@@ -149,7 +153,8 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
| RCAR_DU_FEATURE_VSP1_SOURCE,
.num_crtcs = 4,
.routes = {
- /* R8A7795 has one RGB output, two HDMI outputs and one
+ /*
+ * R8A7795 has one RGB output, two HDMI outputs and one
* LVDS output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
@@ -180,19 +185,25 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
| RCAR_DU_FEATURE_VSP1_SOURCE,
.num_crtcs = 3,
.routes = {
- /* R8A7796 has one RGB output, one LVDS output and one
- * (currently unsupported) HDMI output.
+ /*
+ * R8A7796 has one RGB output, one LVDS output and one HDMI
+ * output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(2),
.port = 0,
},
+ [RCAR_DU_OUTPUT_HDMI0] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
.port = 2,
},
},
.num_lvds = 1,
+ .dpll_ch = BIT(1),
};
static const struct of_device_id rcar_du_of_table[] = {
@@ -238,8 +249,6 @@ static struct drm_driver rcar_du_driver = {
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = rcar_du_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.fops = &rcar_du_fops,
.name = "rcar-du",
.desc = "Renesas R-Car Display Unit",
@@ -341,7 +350,8 @@ static int rcar_du_probe(struct platform_device *pdev)
ddev->irq_enabled = 1;
- /* Register the DRM device with the core and the connectors with
+ /*
+ * Register the DRM device with the core and the connectors with
* sysfs.
*/
ret = drm_dev_register(ddev, 0);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 3e048dd98b64..ba8d2804c1d1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -186,8 +186,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
}
if (enc_node) {
- dev_dbg(rcdu->dev, "initializing encoder %s for output %u\n",
- of_node_full_name(enc_node), output);
+ dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
+ enc_node, output);
/* Locate the DRM bridge from the encoder DT node. */
bridge = of_drm_find_bridge(enc_node);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 64738fca96d0..2f37ea901873 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -64,7 +64,8 @@ static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
if (rcdu->info->gen < 3) {
defr8 |= DEFR8_DEFE8;
- /* On Gen2 the DEFR8 register for the first group also controls
+ /*
+ * On Gen2 the DEFR8 register for the first group also controls
* RGB output routing to DPAD0 and VSPD1 routing to DU0/1/2 for
* DU instances that support it.
*/
@@ -75,7 +76,8 @@ static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
defr8 |= DEFR8_VSCS;
}
} else {
- /* On Gen3 VSPD routing can't be configured, but DPAD routing
+ /*
+ * On Gen3 VSPD routing can't be configured, but DPAD routing
* needs to be set despite having a single option available.
*/
u32 crtc = ffs(possible_crtcs) - 1;
@@ -124,7 +126,8 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
if (rcdu->info->gen >= 3)
rcar_du_group_write(rgrp, DEFR10, DEFR10_CODE | DEFR10_DEFE10);
- /* Use DS1PR and DS2PR to configure planes priorities and connects the
+ /*
+ * Use DS1PR and DS2PR to configure planes priorities and connects the
* superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
*/
rcar_du_group_write(rgrp, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
@@ -177,7 +180,8 @@ static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
{
- /* Many of the configuration bits are only updated when the display
+ /*
+ * Many of the configuration bits are only updated when the display
* reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
* of those bits could be pre-configured, but others (especially the
* bits related to plane assignment to display timing controllers) need
@@ -208,23 +212,32 @@ void rcar_du_group_restart(struct rcar_du_group *rgrp)
int rcar_du_set_dpad0_vsp1_routing(struct rcar_du_device *rcdu)
{
+ struct rcar_du_group *rgrp;
+ struct rcar_du_crtc *crtc;
+ unsigned int index;
int ret;
if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_EXT_CTRL_REGS))
return 0;
- /* RGB output routing to DPAD0 and VSP1D routing to DU0/1/2 are
- * configured in the DEFR8 register of the first group. As this function
- * can be called with the DU0 and DU1 CRTCs disabled, we need to enable
- * the first group clock before accessing the register.
+ /*
+ * RGB output routing to DPAD0 and VSP1D routing to DU0/1/2 are
+ * configured in the DEFR8 register of the first group on Gen2 and the
+ * last group on Gen3. As this function can be called with the DU
+ * channels of the corresponding CRTCs disabled, we need to enable the
+ * group clock before accessing the register.
*/
- ret = clk_prepare_enable(rcdu->crtcs[0].clock);
+ index = rcdu->info->gen < 3 ? 0 : DIV_ROUND_UP(rcdu->num_crtcs, 2) - 1;
+ rgrp = &rcdu->groups[index];
+ crtc = &rcdu->crtcs[index * 2];
+
+ ret = clk_prepare_enable(crtc->clock);
if (ret < 0)
return ret;
- rcar_du_group_setup_defr8(&rcdu->groups[0]);
+ rcar_du_group_setup_defr8(rgrp);
- clk_disable_unprepare(rcdu->crtcs[0].clock);
+ clk_disable_unprepare(crtc->clock);
return 0;
}
@@ -236,7 +249,8 @@ int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
- /* Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and
+ /*
+ * Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and
* CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1
* by default.
*/
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index f4125c8ca902..7278b9703c15 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -96,7 +96,8 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
},
- /* The following formats are not supported on Gen2 and thus have no
+ /*
+ * The following formats are not supported on Gen2 and thus have no
* associated .pnmr or .edf settings.
*/
{
@@ -153,7 +154,8 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
unsigned int align;
- /* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
+ /*
+ * The R8A7779 DU requires a 16 pixels pitch alignment as documented,
* but the R8A7790 DU seems to require a 128 bytes pitch alignment.
*/
if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
@@ -255,12 +257,12 @@ static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state)
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
- drm_atomic_helper_commit_modeset_enables(dev, old_state);
drm_atomic_helper_commit_planes(dev, old_state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
drm_atomic_helper_commit_hw_done(old_state);
- drm_atomic_helper_wait_for_vblanks(dev, old_state);
+ drm_atomic_helper_wait_for_flip_done(dev, old_state);
drm_atomic_helper_cleanup_planes(dev, old_state);
}
@@ -297,19 +299,19 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
*/
entity = of_graph_get_remote_port_parent(ep->local_node);
if (!entity) {
- dev_dbg(rcdu->dev, "unconnected endpoint %s, skipping\n",
- ep->local_node->full_name);
+ dev_dbg(rcdu->dev, "unconnected endpoint %pOF, skipping\n",
+ ep->local_node);
return -ENODEV;
}
if (!of_device_is_available(entity)) {
dev_dbg(rcdu->dev,
- "connected entity %s is disabled, skipping\n",
- entity->full_name);
+ "connected entity %pOF is disabled, skipping\n",
+ entity);
return -ENODEV;
}
- entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0);
+ entity_ep_node = of_graph_get_remote_endpoint(ep->local_node);
for_each_endpoint_of_node(entity, ep_node) {
if (ep_node == entity_ep_node)
@@ -325,8 +327,8 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
if (!connector) {
dev_warn(rcdu->dev,
- "no connector for encoder %s, skipping\n",
- encoder->full_name);
+ "no connector for encoder %pOF, skipping\n",
+ encoder);
of_node_put(entity_ep_node);
of_node_put(encoder);
return -ENODEV;
@@ -348,8 +350,8 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
ret = rcar_du_encoder_init(rcdu, output, encoder, connector);
if (ret && ret != -EPROBE_DEFER)
dev_warn(rcdu->dev,
- "failed to initialize encoder %s on output %u (%d), skipping\n",
- of_node_full_name(encoder), output, ret);
+ "failed to initialize encoder %pOF on output %u (%d), skipping\n",
+ encoder, output, ret);
of_node_put(encoder);
of_node_put(connector);
@@ -419,7 +421,8 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu)
if (rcdu->props.alpha == NULL)
return -ENOMEM;
- /* The color key is expressed as an RGB888 triplet stored in a 32-bit
+ /*
+ * The color key is expressed as an RGB888 triplet stored in a 32-bit
* integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
* or enable source color keying (1).
*/
@@ -432,6 +435,81 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu)
return 0;
}
+static int rcar_du_vsps_init(struct rcar_du_device *rcdu)
+{
+ const struct device_node *np = rcdu->dev->of_node;
+ struct of_phandle_args args;
+ struct {
+ struct device_node *np;
+ unsigned int crtcs_mask;
+ } vsps[RCAR_DU_MAX_VSPS] = { { 0, }, };
+ unsigned int vsps_count = 0;
+ unsigned int cells;
+ unsigned int i;
+ int ret;
+
+ /*
+ * First parse the DT vsps property to populate the list of VSPs. Each
+ * entry contains a pointer to the VSP DT node and a bitmask of the
+ * connected DU CRTCs.
+ */
+ cells = of_property_count_u32_elems(np, "vsps") / rcdu->num_crtcs - 1;
+ if (cells > 1)
+ return -EINVAL;
+
+ for (i = 0; i < rcdu->num_crtcs; ++i) {
+ unsigned int j;
+
+ ret = of_parse_phandle_with_fixed_args(np, "vsps", cells, i,
+ &args);
+ if (ret < 0)
+ goto error;
+
+ /*
+ * Add the VSP to the list or update the corresponding existing
+ * entry if the VSP has already been added.
+ */
+ for (j = 0; j < vsps_count; ++j) {
+ if (vsps[j].np == args.np)
+ break;
+ }
+
+ if (j < vsps_count)
+ of_node_put(args.np);
+ else
+ vsps[vsps_count++].np = args.np;
+
+ vsps[j].crtcs_mask |= BIT(i);
+
+ /* Store the VSP pointer and pipe index in the CRTC. */
+ rcdu->crtcs[i].vsp = &rcdu->vsps[j];
+ rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0;
+ }
+
+ /*
+ * Then initialize all the VSPs from the node pointers and CRTCs bitmask
+ * computed previously.
+ */
+ for (i = 0; i < vsps_count; ++i) {
+ struct rcar_du_vsp *vsp = &rcdu->vsps[i];
+
+ vsp->index = i;
+ vsp->dev = rcdu;
+
+ ret = rcar_du_vsp_init(vsp, vsps[i].np, vsps[i].crtcs_mask);
+ if (ret < 0)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < ARRAY_SIZE(vsps); ++i)
+ of_node_put(vsps[i].np);
+
+ return ret;
+}
+
int rcar_du_modeset_init(struct rcar_du_device *rcdu)
{
static const unsigned int mmio_offsets[] = {
@@ -461,7 +539,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
if (ret < 0)
return ret;
- /* Initialize vertical blanking interrupts handling. Start with vblank
+ /*
+ * Initialize vertical blanking interrupts handling. Start with vblank
* disabled for all CRTCs.
*/
ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
@@ -481,7 +560,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
rgrp->index = i;
rgrp->num_crtcs = min(rcdu->num_crtcs - 2 * i, 2U);
- /* If we have more than one CRTCs in this group pre-associate
+ /*
+ * If we have more than one CRTCs in this group pre-associate
* the low-order planes with CRTC 0 and the high-order planes
* with CRTC 1 to minimize flicker occurring when the
* association is changed.
@@ -499,17 +579,9 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
/* Initialize the compositors. */
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
- for (i = 0; i < rcdu->num_crtcs; ++i) {
- struct rcar_du_vsp *vsp = &rcdu->vsps[i];
-
- vsp->index = i;
- vsp->dev = rcdu;
- rcdu->crtcs[i].vsp = vsp;
-
- ret = rcar_du_vsp_init(vsp);
- if (ret < 0)
- return ret;
- }
+ ret = rcar_du_vsps_init(rcdu);
+ if (ret < 0)
+ return ret;
}
/* Create the CRTCs. */
@@ -537,7 +609,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
num_encoders = ret;
- /* Set the possible CRTCs and possible clones. There's always at least
+ /*
+ * Set the possible CRTCs and possible clones. There's always at least
* one way for all encoders to clone each other, set all bits in the
* possible clones field.
*/
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index ee91481131ad..b373ad48ef5f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -46,7 +46,6 @@ static void rcar_du_lvds_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = rcar_du_lvds_connector_destroy,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index 1661f6201210..12d22f3db1af 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -59,7 +59,8 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds,
rcar_lvds_write(lvds, LVDPLLCR, pllcr);
- /* Select the input, hardcode mode 0, enable LVDS operation and turn
+ /*
+ * Select the input, hardcode mode 0, enable LVDS operation and turn
* bias circuitry on.
*/
lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_BEN | LVDCR0_LVEN;
@@ -73,7 +74,8 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds,
LVDCR1_CHSTBY_GEN2(1) | LVDCR1_CHSTBY_GEN2(0) |
LVDCR1_CLKSTBY_GEN2);
- /* Turn the PLL on, wait for the startup delay, and turn the output
+ /*
+ * Turn the PLL on, wait for the startup delay, and turn the output
* on.
*/
lvdcr0 |= LVDCR0_PLLON;
@@ -140,7 +142,8 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
if (ret < 0)
return ret;
- /* Hardcode the channels and control signals routing for now.
+ /*
+ * Hardcode the channels and control signals routing for now.
*
* HSYNC -> CTRL0
* VSYNC -> CTRL1
@@ -202,7 +205,8 @@ void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds,
{
struct rcar_du_device *rcdu = lvds->dev;
- /* The internal LVDS encoder has a restricted clock frequency operating
+ /*
+ * The internal LVDS encoder has a restricted clock frequency operating
* range (30MHz to 150MHz on Gen2, 25.175MHz to 148.5MHz on Gen3). Clamp
* the clock accordingly.
*/
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index dcde6288da6c..61833cc1c699 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -50,23 +50,21 @@
* automatically when the core swaps the old and new states.
*/
-static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane,
- struct rcar_du_plane_state *new_state)
+static bool rcar_du_plane_needs_realloc(
+ const struct rcar_du_plane_state *old_state,
+ const struct rcar_du_plane_state *new_state)
{
- struct rcar_du_plane_state *cur_state;
-
- cur_state = to_rcar_plane_state(plane->plane.state);
-
- /* Lowering the number of planes doesn't strictly require reallocation
+ /*
+ * Lowering the number of planes doesn't strictly require reallocation
* as the extra hardware plane will be freed when committing, but doing
* so could lead to more fragmentation.
*/
- if (!cur_state->format ||
- cur_state->format->planes != new_state->format->planes)
+ if (!old_state->format ||
+ old_state->format->planes != new_state->format->planes)
return true;
/* Reallocate hardware planes if the source has changed. */
- if (cur_state->source != new_state->source)
+ if (old_state->source != new_state->source)
return true;
return false;
@@ -141,37 +139,43 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
unsigned int groups = 0;
unsigned int i;
struct drm_plane *drm_plane;
- struct drm_plane_state *drm_plane_state;
+ struct drm_plane_state *old_drm_plane_state;
+ struct drm_plane_state *new_drm_plane_state;
/* Check if hardware planes need to be reallocated. */
- for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
- struct rcar_du_plane_state *plane_state;
+ for_each_oldnew_plane_in_state(state, drm_plane, old_drm_plane_state,
+ new_drm_plane_state, i) {
+ struct rcar_du_plane_state *old_plane_state;
+ struct rcar_du_plane_state *new_plane_state;
struct rcar_du_plane *plane;
unsigned int index;
plane = to_rcar_plane(drm_plane);
- plane_state = to_rcar_plane_state(drm_plane_state);
+ old_plane_state = to_rcar_plane_state(old_drm_plane_state);
+ new_plane_state = to_rcar_plane_state(new_drm_plane_state);
dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__,
plane->group->index, plane - plane->group->planes);
- /* If the plane is being disabled we don't need to go through
+ /*
+ * If the plane is being disabled we don't need to go through
* the full reallocation procedure. Just mark the hardware
* plane(s) as freed.
*/
- if (!plane_state->format) {
+ if (!new_plane_state->format) {
dev_dbg(rcdu->dev, "%s: plane is being disabled\n",
__func__);
index = plane - plane->group->planes;
group_freed_planes[plane->group->index] |= 1 << index;
- plane_state->hwindex = -1;
+ new_plane_state->hwindex = -1;
continue;
}
- /* If the plane needs to be reallocated mark it as such, and
+ /*
+ * If the plane needs to be reallocated mark it as such, and
* mark the hardware plane(s) as free.
*/
- if (rcar_du_plane_needs_realloc(plane, plane_state)) {
+ if (rcar_du_plane_needs_realloc(old_plane_state, new_plane_state)) {
dev_dbg(rcdu->dev, "%s: plane needs reallocation\n",
__func__);
groups |= 1 << plane->group->index;
@@ -179,14 +183,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
index = plane - plane->group->planes;
group_freed_planes[plane->group->index] |= 1 << index;
- plane_state->hwindex = -1;
+ new_plane_state->hwindex = -1;
}
}
if (!needs_realloc)
return 0;
- /* Grab all plane states for the groups that need reallocation to ensure
+ /*
+ * Grab all plane states for the groups that need reallocation to ensure
* locking and avoid racy updates. This serializes the update operation,
* but there's not much we can do about it as that's the hardware
* design.
@@ -204,14 +209,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
for (i = 0; i < group->num_planes; ++i) {
struct rcar_du_plane *plane = &group->planes[i];
- struct rcar_du_plane_state *plane_state;
+ struct rcar_du_plane_state *new_plane_state;
struct drm_plane_state *s;
s = drm_atomic_get_plane_state(state, &plane->plane);
if (IS_ERR(s))
return PTR_ERR(s);
- /* If the plane has been freed in the above loop its
+ /*
+ * If the plane has been freed in the above loop its
* hardware planes must not be added to the used planes
* bitmask. However, the current state doesn't reflect
* the free state yet, as we've modified the new state
@@ -226,16 +232,16 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
continue;
}
- plane_state = to_rcar_plane_state(plane->plane.state);
- used_planes |= rcar_du_plane_hwmask(plane_state);
+ new_plane_state = to_rcar_plane_state(s);
+ used_planes |= rcar_du_plane_hwmask(new_plane_state);
dev_dbg(rcdu->dev,
"%s: plane (%u,%tu) uses %u hwplanes (index %d)\n",
__func__, plane->group->index,
plane - plane->group->planes,
- plane_state->format ?
- plane_state->format->planes : 0,
- plane_state->hwindex);
+ new_plane_state->format ?
+ new_plane_state->format->planes : 0,
+ new_plane_state->hwindex);
}
group_free_planes[index] = 0xff & ~used_planes;
@@ -246,40 +252,45 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
}
/* Reallocate hardware planes for each plane that needs it. */
- for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
- struct rcar_du_plane_state *plane_state;
+ for_each_oldnew_plane_in_state(state, drm_plane, old_drm_plane_state,
+ new_drm_plane_state, i) {
+ struct rcar_du_plane_state *old_plane_state;
+ struct rcar_du_plane_state *new_plane_state;
struct rcar_du_plane *plane;
unsigned int crtc_planes;
unsigned int free;
int idx;
plane = to_rcar_plane(drm_plane);
- plane_state = to_rcar_plane_state(drm_plane_state);
+ old_plane_state = to_rcar_plane_state(old_drm_plane_state);
+ new_plane_state = to_rcar_plane_state(new_drm_plane_state);
dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__,
plane->group->index, plane - plane->group->planes);
- /* Skip planes that are being disabled or don't need to be
+ /*
+ * Skip planes that are being disabled or don't need to be
* reallocated.
*/
- if (!plane_state->format ||
- !rcar_du_plane_needs_realloc(plane, plane_state))
+ if (!new_plane_state->format ||
+ !rcar_du_plane_needs_realloc(old_plane_state, new_plane_state))
continue;
- /* Try to allocate the plane from the free planes currently
+ /*
+ * Try to allocate the plane from the free planes currently
* associated with the target CRTC to avoid restarting the CRTC
* group and thus minimize flicker. If it fails fall back to
* allocating from all free planes.
*/
- crtc_planes = to_rcar_crtc(plane_state->state.crtc)->index % 2
+ crtc_planes = to_rcar_crtc(new_plane_state->state.crtc)->index % 2
? plane->group->dptsr_planes
: ~plane->group->dptsr_planes;
free = group_free_planes[plane->group->index];
- idx = rcar_du_plane_hwalloc(plane, plane_state,
+ idx = rcar_du_plane_hwalloc(plane, new_plane_state,
free & crtc_planes);
if (idx < 0)
- idx = rcar_du_plane_hwalloc(plane, plane_state,
+ idx = rcar_du_plane_hwalloc(plane, new_plane_state,
free);
if (idx < 0) {
dev_dbg(rcdu->dev, "%s: no available hardware plane\n",
@@ -288,12 +299,12 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
}
dev_dbg(rcdu->dev, "%s: allocated %u hwplanes (index %u)\n",
- __func__, plane_state->format->planes, idx);
+ __func__, new_plane_state->format->planes, idx);
- plane_state->hwindex = idx;
+ new_plane_state->hwindex = idx;
group_free_planes[plane->group->index] &=
- ~rcar_du_plane_hwmask(plane_state);
+ ~rcar_du_plane_hwmask(new_plane_state);
dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
__func__, plane->group->index,
@@ -351,14 +362,16 @@ static void rcar_du_plane_setup_scanout(struct rcar_du_group *rgrp,
dma[1] = 0;
}
- /* Memory pitch (expressed in pixels). Must be doubled for interlaced
+ /*
+ * Memory pitch (expressed in pixels). Must be doubled for interlaced
* operation with 32bpp formats.
*/
rcar_du_plane_write(rgrp, index, PnMWR,
(interlaced && state->format->bpp == 32) ?
pitch * 2 : pitch);
- /* The Y position is expressed in raster line units and must be doubled
+ /*
+ * The Y position is expressed in raster line units and must be doubled
* for 32bpp formats, according to the R8A7790 datasheet. No mention of
* doubling the Y position is found in the R8A7779 datasheet, but the
* rule seems to apply there as well.
@@ -396,7 +409,8 @@ static void rcar_du_plane_setup_mode(struct rcar_du_group *rgrp,
u32 colorkey;
u32 pnmr;
- /* The PnALPHAR register controls alpha-blending in 16bpp formats
+ /*
+ * The PnALPHAR register controls alpha-blending in 16bpp formats
* (ARGB1555 and XRGB1555).
*
* For ARGB, set the alpha value to 0, and enable alpha-blending when
@@ -413,7 +427,8 @@ static void rcar_du_plane_setup_mode(struct rcar_du_group *rgrp,
pnmr = PnMR_BM_MD | state->format->pnmr;
- /* Disable color keying when requested. YUV formats have the
+ /*
+ * Disable color keying when requested. YUV formats have the
* PnMR_SPIM_TP_OFF bit set in their pnmr field, disabling color keying
* automatically.
*/
@@ -457,7 +472,8 @@ static void rcar_du_plane_setup_format_gen2(struct rcar_du_group *rgrp,
u32 ddcr2 = PnDDCR2_CODE;
u32 ddcr4;
- /* Data format
+ /*
+ * Data format
*
* The data format is selected by the DDDF field in PnMR and the EDF
* field in DDCR4.
@@ -589,7 +605,8 @@ static void rcar_du_plane_atomic_update(struct drm_plane *plane,
rcar_du_plane_setup(rplane);
- /* Check whether the source has changed from memory to live source or
+ /*
+ * Check whether the source has changed from memory to live source or
* from live source to memory. The source has been configured by the
* VSPS bit in the PnDDCR4 register. Although the datasheet states that
* the bit is updated during vertical blanking, it seems that updates
@@ -698,7 +715,6 @@ static const struct drm_plane_funcs rcar_du_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = rcar_du_plane_reset,
- .set_property = drm_atomic_helper_plane_set_property,
.destroy = drm_plane_cleanup,
.atomic_duplicate_state = rcar_du_plane_atomic_duplicate_state,
.atomic_destroy_state = rcar_du_plane_atomic_destroy_state,
@@ -726,7 +742,8 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
unsigned int i;
int ret;
- /* Create one primary plane per CRTC in this group and seven overlay
+ /*
+ * Create one primary plane per CRTC in this group and seven overlay
* planes.
*/
rgrp->num_planes = rgrp->num_crtcs + 7;
@@ -743,8 +760,8 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
&rcar_du_plane_funcs, formats,
- ARRAY_SIZE(formats), type,
- NULL);
+ ARRAY_SIZE(formats),
+ NULL, type, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index 8b91dd3a46e4..f62e09f195de 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -20,7 +20,8 @@
struct rcar_du_format_info;
struct rcar_du_group;
-/* The RCAR DU has 8 hardware planes, shared between primary and overlay planes.
+/*
+ * The RCAR DU has 8 hardware planes, shared between primary and overlay planes.
* As using overlay planes requires at least one of the CRTCs being enabled, no
* more than 7 overlay planes can be available. We thus create 1 primary plane
* per CRTC and 7 overlay planes, for a total of up to 9 KMS planes.
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index f870445ebc8d..2c96147bc444 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -19,6 +19,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
+#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/of_platform.h>
#include <linux/scatterlist.h>
@@ -30,11 +31,15 @@
#include "rcar_du_kms.h"
#include "rcar_du_vsp.h"
-static void rcar_du_vsp_complete(void *private)
+static void rcar_du_vsp_complete(void *private, bool completed)
{
struct rcar_du_crtc *crtc = private;
- rcar_du_crtc_finish_page_flip(crtc);
+ if (crtc->vblank_enable)
+ drm_crtc_handle_vblank(&crtc->crtc);
+
+ if (completed)
+ rcar_du_crtc_finish_page_flip(crtc);
}
void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
@@ -73,7 +78,8 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
__rcar_du_plane_setup(crtc->group, &state);
- /* Ensure that the plane source configuration takes effect by requesting
+ /*
+ * Ensure that the plane source configuration takes effect by requesting
* a restart of the group. See rcar_du_plane_atomic_update() for a more
* detailed explanation.
*
@@ -81,22 +87,22 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
*/
crtc->group->need_restart = true;
- vsp1_du_setup_lif(crtc->vsp->vsp, &cfg);
+ vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, &cfg);
}
void rcar_du_vsp_disable(struct rcar_du_crtc *crtc)
{
- vsp1_du_setup_lif(crtc->vsp->vsp, NULL);
+ vsp1_du_setup_lif(crtc->vsp->vsp, crtc->vsp_pipe, NULL);
}
void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc)
{
- vsp1_du_atomic_begin(crtc->vsp->vsp);
+ vsp1_du_atomic_begin(crtc->vsp->vsp, crtc->vsp_pipe);
}
void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc)
{
- vsp1_du_atomic_flush(crtc->vsp->vsp);
+ vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe);
}
/* Keep the two tables in sync. */
@@ -162,6 +168,7 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
{
struct rcar_du_vsp_plane_state *state =
to_rcar_vsp_plane_state(plane->plane.state);
+ struct rcar_du_crtc *crtc = to_rcar_crtc(state->state.crtc);
struct drm_framebuffer *fb = plane->plane.state->fb;
struct vsp1_du_atomic_config cfg = {
.pixelformat = 0,
@@ -192,7 +199,8 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
}
}
- vsp1_du_atomic_update(plane->vsp->vsp, plane->index, &cfg);
+ vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe,
+ plane->index, &cfg);
}
static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
@@ -288,11 +296,13 @@ static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct rcar_du_vsp_plane *rplane = to_rcar_vsp_plane(plane);
+ struct rcar_du_crtc *crtc = to_rcar_crtc(old_state->crtc);
if (plane->state->crtc)
rcar_du_vsp_plane_setup(rplane);
else
- vsp1_du_atomic_update(rplane->vsp->vsp, rplane->index, NULL);
+ vsp1_du_atomic_update(rplane->vsp->vsp, crtc->vsp_pipe,
+ rplane->index, NULL);
}
static const struct drm_plane_helper_funcs rcar_du_vsp_plane_helper_funcs = {
@@ -383,7 +393,6 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = rcar_du_vsp_plane_reset,
- .set_property = drm_atomic_helper_plane_set_property,
.destroy = drm_plane_cleanup,
.atomic_duplicate_state = rcar_du_vsp_plane_atomic_duplicate_state,
.atomic_destroy_state = rcar_du_vsp_plane_atomic_destroy_state,
@@ -391,23 +400,17 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
.atomic_get_property = rcar_du_vsp_plane_atomic_get_property,
};
-int rcar_du_vsp_init(struct rcar_du_vsp *vsp)
+int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
+ unsigned int crtcs)
{
struct rcar_du_device *rcdu = vsp->dev;
struct platform_device *pdev;
- struct device_node *np;
+ unsigned int num_crtcs = hweight32(crtcs);
unsigned int i;
int ret;
/* Find the VSP device and initialize it. */
- np = of_parse_phandle(rcdu->dev->of_node, "vsps", vsp->index);
- if (!np) {
- dev_err(rcdu->dev, "vsps node not found\n");
- return -ENXIO;
- }
-
pdev = of_find_device_by_node(np);
- of_node_put(np);
if (!pdev)
return -ENXIO;
@@ -417,7 +420,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp)
if (ret < 0)
return ret;
- /* The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to
+ /*
+ * The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to
* 4 RPFs.
*/
vsp->num_planes = rcdu->info->gen >= 3 ? 5 : 4;
@@ -428,19 +432,19 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp)
return -ENOMEM;
for (i = 0; i < vsp->num_planes; ++i) {
- enum drm_plane_type type = i ? DRM_PLANE_TYPE_OVERLAY
- : DRM_PLANE_TYPE_PRIMARY;
+ enum drm_plane_type type = i < num_crtcs
+ ? DRM_PLANE_TYPE_PRIMARY
+ : DRM_PLANE_TYPE_OVERLAY;
struct rcar_du_vsp_plane *plane = &vsp->planes[i];
plane->vsp = vsp;
plane->index = i;
- ret = drm_universal_plane_init(rcdu->ddev, &plane->plane,
- 1 << vsp->index,
+ ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
&rcar_du_vsp_plane_funcs,
formats_kms,
- ARRAY_SIZE(formats_kms), type,
- NULL);
+ ARRAY_SIZE(formats_kms),
+ NULL, type, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
index 8861661590ff..f876c512163c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
@@ -64,13 +64,19 @@ to_rcar_vsp_plane_state(struct drm_plane_state *state)
}
#ifdef CONFIG_DRM_RCAR_VSP
-int rcar_du_vsp_init(struct rcar_du_vsp *vsp);
+int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
+ unsigned int crtcs);
void rcar_du_vsp_enable(struct rcar_du_crtc *crtc);
void rcar_du_vsp_disable(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc);
#else
-static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp) { return -ENXIO; };
+static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp,
+ struct device_node *np,
+ unsigned int crtcs)
+{
+ return -ENXIO;
+}
static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { };
diff --git a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
index 7539626b8ebd..dc85b53d58ef 100644
--- a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
+++ b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
@@ -45,7 +45,7 @@ static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi,
{
const struct rcar_hdmi_phy_params *params = rcar_hdmi_phy_params;
- for (; params && params->mpixelclock != ~0UL; ++params) {
+ for (; params->mpixelclock != ~0UL; ++params) {
if (mpixelclock <= params->mpixelclock)
break;
}
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 50c41c0a50ef..dcc539ba85d6 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -5,6 +5,10 @@ config DRM_ROCKCHIP
select DRM_KMS_HELPER
select DRM_PANEL
select VIDEOMODE_HELPERS
+ select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
+ select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
+ select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
+ select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
help
Choose this option if you have a Rockchip soc chipset.
This driver provides kernel mode setting and buffer
@@ -12,10 +16,10 @@ config DRM_ROCKCHIP
2D or 3D acceleration; acceleration is performed by other
IP found on the SoC.
+if DRM_ROCKCHIP
+
config ROCKCHIP_ANALOGIX_DP
bool "Rockchip specific extensions for Analogix DP driver"
- depends on DRM_ROCKCHIP
- select DRM_ANALOGIX_DP
help
This selects support for Rockchip SoC specific extensions
for the Analogix Core DP driver. If you want to enable DP
@@ -23,9 +27,7 @@ config ROCKCHIP_ANALOGIX_DP
config ROCKCHIP_CDN_DP
bool "Rockchip cdn DP"
- depends on DRM_ROCKCHIP
- depends on EXTCON
- select SND_SOC_HDMI_CODEC if SND_SOC
+ depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
help
This selects support for Rockchip SoC specific extensions
for the cdn DP driver. If you want to enable Dp on
@@ -34,8 +36,6 @@ config ROCKCHIP_CDN_DP
config ROCKCHIP_DW_HDMI
bool "Rockchip specific extensions for Synopsys DW HDMI"
- depends on DRM_ROCKCHIP
- select DRM_DW_HDMI
help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
@@ -44,8 +44,6 @@ config ROCKCHIP_DW_HDMI
config ROCKCHIP_DW_MIPI_DSI
bool "Rockchip specific extensions for Synopsys DW MIPI DSI"
- depends on DRM_ROCKCHIP
- select DRM_MIPI_DSI
help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
@@ -54,8 +52,9 @@ config ROCKCHIP_DW_MIPI_DSI
config ROCKCHIP_INNO_HDMI
bool "Rockchip specific extensions for Innosilicon HDMI"
- depends on DRM_ROCKCHIP
help
This selects support for Rockchip SoC specific extensions
for the Innosilicon HDMI driver. If you want to enable
HDMI on RK3036 based SoC, you should select this option.
+
+endif
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 9b0b0588bbed..a57da051f516 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -254,7 +254,6 @@ static void cdn_dp_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = cdn_dp_connector_detect,
.destroy = cdn_dp_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index 21b9737662ae..9a20b9dc27c8 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -1080,7 +1080,6 @@ static void dw_mipi_dsi_drm_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs dw_mipi_dsi_atomic_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = dw_mipi_dsi_drm_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index f8208489724e..ccd5d595ada7 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -7,10 +7,12 @@
* (at your option) any later version.
*/
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+
#include <drm/drm_of.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -20,13 +22,32 @@
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
-#define GRF_SOC_CON6 0x025c
-#define HDMI_SEL_VOP_LIT (1 << 4)
+#define RK3288_GRF_SOC_CON6 0x025C
+#define RK3288_HDMI_LCDC_SEL BIT(4)
+#define RK3399_GRF_SOC_CON20 0x6250
+#define RK3399_HDMI_LCDC_SEL BIT(6)
+
+#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
+
+/**
+ * struct rockchip_hdmi_chip_data - splite the grf setting of kind of chips
+ * @lcdsel_grf_reg: grf register offset of lcdc select
+ * @lcdsel_big: reg value of selecting vop big for HDMI
+ * @lcdsel_lit: reg value of selecting vop little for HDMI
+ */
+struct rockchip_hdmi_chip_data {
+ u32 lcdsel_grf_reg;
+ u32 lcdsel_big;
+ u32 lcdsel_lit;
+};
struct rockchip_hdmi {
struct device *dev;
struct regmap *regmap;
struct drm_encoder encoder;
+ const struct rockchip_hdmi_chip_data *chip_data;
+ struct clk *vpll_clk;
+ struct clk *grf_clk;
};
#define to_rockchip_hdmi(x) container_of(x, struct rockchip_hdmi, x)
@@ -143,6 +164,7 @@ static const struct dw_hdmi_phy_config rockchip_phy_config[] = {
static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
{
struct device_node *np = hdmi->dev->of_node;
+ int ret;
hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(hdmi->regmap)) {
@@ -150,6 +172,32 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi)
return PTR_ERR(hdmi->regmap);
}
+ hdmi->vpll_clk = devm_clk_get(hdmi->dev, "vpll");
+ if (PTR_ERR(hdmi->vpll_clk) == -ENOENT) {
+ hdmi->vpll_clk = NULL;
+ } else if (PTR_ERR(hdmi->vpll_clk) == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (IS_ERR(hdmi->vpll_clk)) {
+ dev_err(hdmi->dev, "failed to get grf clock\n");
+ return PTR_ERR(hdmi->vpll_clk);
+ }
+
+ hdmi->grf_clk = devm_clk_get(hdmi->dev, "grf");
+ if (PTR_ERR(hdmi->grf_clk) == -ENOENT) {
+ hdmi->grf_clk = NULL;
+ } else if (PTR_ERR(hdmi->grf_clk) == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (IS_ERR(hdmi->grf_clk)) {
+ dev_err(hdmi->dev, "failed to get grf clock\n");
+ return PTR_ERR(hdmi->grf_clk);
+ }
+
+ ret = clk_prepare_enable(hdmi->vpll_clk);
+ if (ret) {
+ dev_err(hdmi->dev, "Failed to enable HDMI vpll: %d\n", ret);
+ return ret;
+ }
+
return 0;
}
@@ -192,23 +240,36 @@ static void dw_hdmi_rockchip_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
+ struct rockchip_hdmi *hdmi = to_rockchip_hdmi(encoder);
+
+ clk_set_rate(hdmi->vpll_clk, adj_mode->clock * 1000);
}
static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
{
struct rockchip_hdmi *hdmi = to_rockchip_hdmi(encoder);
u32 val;
- int mux;
+ int ret;
- mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
- if (mux)
- val = HDMI_SEL_VOP_LIT | (HDMI_SEL_VOP_LIT << 16);
+ ret = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
+ if (ret)
+ val = hdmi->chip_data->lcdsel_lit;
else
- val = HDMI_SEL_VOP_LIT << 16;
+ val = hdmi->chip_data->lcdsel_big;
- regmap_write(hdmi->regmap, GRF_SOC_CON6, val);
+ ret = clk_prepare_enable(hdmi->grf_clk);
+ if (ret < 0) {
+ dev_err(hdmi->dev, "failed to enable grfclk %d\n", ret);
+ return;
+ }
+
+ ret = regmap_write(hdmi->regmap, hdmi->chip_data->lcdsel_grf_reg, val);
+ if (ret != 0)
+ dev_err(hdmi->dev, "Could not write to GRF: %d\n", ret);
+
+ clk_disable_unprepare(hdmi->grf_clk);
dev_dbg(hdmi->dev, "vop %s output to hdmi\n",
- (mux) ? "LIT" : "BIG");
+ ret ? "LIT" : "BIG");
}
static int
@@ -232,16 +293,40 @@ static const struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_fun
.atomic_check = dw_hdmi_rockchip_encoder_atomic_check,
};
-static const struct dw_hdmi_plat_data rockchip_hdmi_drv_data = {
+static struct rockchip_hdmi_chip_data rk3288_chip_data = {
+ .lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3288_HDMI_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3288_HDMI_LCDC_SEL, RK3288_HDMI_LCDC_SEL),
+};
+
+static const struct dw_hdmi_plat_data rk3288_hdmi_drv_data = {
.mode_valid = dw_hdmi_rockchip_mode_valid,
.mpll_cfg = rockchip_mpll_cfg,
.cur_ctr = rockchip_cur_ctr,
.phy_config = rockchip_phy_config,
+ .phy_data = &rk3288_chip_data,
+};
+
+static struct rockchip_hdmi_chip_data rk3399_chip_data = {
+ .lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3399_HDMI_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3399_HDMI_LCDC_SEL, RK3399_HDMI_LCDC_SEL),
+};
+
+static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = {
+ .mode_valid = dw_hdmi_rockchip_mode_valid,
+ .mpll_cfg = rockchip_mpll_cfg,
+ .cur_ctr = rockchip_cur_ctr,
+ .phy_config = rockchip_phy_config,
+ .phy_data = &rk3399_chip_data,
};
static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = {
{ .compatible = "rockchip,rk3288-dw-hdmi",
- .data = &rockchip_hdmi_drv_data
+ .data = &rk3288_hdmi_drv_data
+ },
+ { .compatible = "rockchip,rk3399-dw-hdmi",
+ .data = &rk3399_hdmi_drv_data
},
{},
};
@@ -268,6 +353,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
match = of_match_node(dw_hdmi_rockchip_dt_ids, pdev->dev.of_node);
plat_data = match->data;
hdmi->dev = &pdev->dev;
+ hdmi->chip_data = plat_data->phy_data;
encoder = &hdmi->encoder;
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 7d9b75eb6c44..7a251a54e792 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -294,7 +294,7 @@ static int inno_hdmi_config_video_avi(struct inno_hdmi *hdmi,
union hdmi_infoframe frame;
int rc;
- rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
+ rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
@@ -592,8 +592,7 @@ static void inno_hdmi_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static struct drm_connector_funcs inno_hdmi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
+static const struct drm_connector_funcs inno_hdmi_connector_funcs = {
.fill_modes = inno_hdmi_probe_single_connector_modes,
.detect = inno_hdmi_connector_detect,
.destroy = inno_hdmi_connector_destroy,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index c6b1b7f3a2a3..ff3d0f5efbb1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -161,23 +161,21 @@ static int rockchip_drm_bind(struct device *dev)
*/
drm_dev->irq_enabled = true;
- /* init kms poll for handling hpd */
- drm_kms_helper_poll_init(drm_dev);
-
ret = rockchip_drm_fbdev_init(drm_dev);
if (ret)
- goto err_kms_helper_poll_fini;
+ goto err_unbind_all;
+
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(drm_dev);
ret = drm_dev_register(drm_dev, 0);
if (ret)
- goto err_fbdev_fini;
+ goto err_kms_helper_poll_fini;
return 0;
-err_fbdev_fini:
- rockchip_drm_fbdev_fini(drm_dev);
err_kms_helper_poll_fini:
drm_kms_helper_poll_fini(drm_dev);
- drm_vblank_cleanup(drm_dev);
+ rockchip_drm_fbdev_fini(drm_dev);
err_unbind_all:
component_unbind_all(dev, drm_dev);
err_mode_config_cleanup:
@@ -200,7 +198,6 @@ static void rockchip_drm_unbind(struct device *dev)
drm_kms_helper_poll_fini(drm_dev);
drm_atomic_helper_shutdown(drm_dev);
- drm_vblank_cleanup(drm_dev);
component_unbind_all(dev, drm_dev);
drm_mode_config_cleanup(drm_dev);
rockchip_iommu_cleanup(drm_dev);
@@ -235,8 +232,6 @@ static struct drm_driver rockchip_drm_driver = {
.gem_vm_ops = &drm_gem_cma_vm_ops,
.gem_free_object_unlocked = rockchip_gem_free_object,
.dumb_create = rockchip_gem_dumb_create,
- .dumb_map_offset = rockchip_gem_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = drm_gem_prime_import,
@@ -275,11 +270,15 @@ static void rockchip_drm_fb_resume(struct drm_device *drm)
static int rockchip_drm_sys_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct rockchip_drm_private *priv = drm->dev_private;
+ struct rockchip_drm_private *priv;
+
+ if (!drm)
+ return 0;
drm_kms_helper_poll_disable(drm);
rockchip_drm_fb_suspend(drm);
+ priv = drm->dev_private;
priv->state = drm_atomic_helper_suspend(drm);
if (IS_ERR(priv->state)) {
rockchip_drm_fb_resume(drm);
@@ -293,8 +292,12 @@ static int rockchip_drm_sys_suspend(struct device *dev)
static int rockchip_drm_sys_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct rockchip_drm_private *priv = drm->dev_private;
+ struct rockchip_drm_private *priv;
+
+ if (!drm)
+ return 0;
+ priv = drm->dev_private;
drm_atomic_helper_resume(drm, priv->state);
rockchip_drm_fb_resume(drm);
drm_kms_helper_poll_enable(drm);
@@ -370,8 +373,8 @@ static int rockchip_drm_platform_of_probe(struct device *dev)
iommu = of_parse_phandle(port->parent, "iommus", 0);
if (!iommu || !of_device_is_available(iommu->parent)) {
- dev_dbg(dev, "no iommu attached for %s, using non-iommu buffers\n",
- port->parent->full_name);
+ dev_dbg(dev, "no iommu attached for %pOF, using non-iommu buffers\n",
+ port->parent);
/*
* if there is a crtc not support iommu, force set all
* crtc use non-iommu buffer.
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 81f9548672b0..70773041785b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -48,7 +48,7 @@ static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
int i;
for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++)
- drm_gem_object_unreference_unlocked(rockchip_fb->obj[i]);
+ drm_gem_object_put_unlocked(rockchip_fb->obj[i]);
drm_framebuffer_cleanup(fb);
kfree(rockchip_fb);
@@ -144,7 +144,7 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
width * drm_format_plane_cpp(mode_cmd->pixel_format, i);
if (obj->size < min_size) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
ret = -EINVAL;
goto err_gem_object_unreference;
}
@@ -161,40 +161,19 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
err_gem_object_unreference:
for (i--; i >= 0; i--)
- drm_gem_object_unreference_unlocked(objs[i]);
+ drm_gem_object_put_unlocked(objs[i]);
return ERR_PTR(ret);
}
static void rockchip_drm_output_poll_changed(struct drm_device *dev)
{
struct rockchip_drm_private *private = dev->dev_private;
- struct drm_fb_helper *fb_helper = &private->fbdev_helper;
- if (fb_helper)
- drm_fb_helper_hotplug_event(fb_helper);
-}
-
-static void
-rockchip_atomic_commit_tail(struct drm_atomic_state *state)
-{
- struct drm_device *dev = state->dev;
-
- drm_atomic_helper_commit_modeset_disables(dev, state);
-
- drm_atomic_helper_commit_modeset_enables(dev, state);
-
- drm_atomic_helper_commit_planes(dev, state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
-
- drm_atomic_helper_commit_hw_done(state);
-
- drm_atomic_helper_wait_for_vblanks(dev, state);
-
- drm_atomic_helper_cleanup_planes(dev, state);
+ drm_fb_helper_hotplug_event(&private->fbdev_helper);
}
static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
- .atomic_commit_tail = rockchip_atomic_commit_tail,
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index ce946b9c57a9..724579ebf947 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -173,7 +173,7 @@ void rockchip_drm_fbdev_fini(struct drm_device *dev)
drm_fb_helper_unregister_fbi(helper);
if (helper->fb)
- drm_framebuffer_unreference(helper->fb);
+ drm_framebuffer_put(helper->fb);
drm_fb_helper_fini(helper);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index b74ac717e56a..1869c8bb76c8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -383,7 +383,7 @@ rockchip_gem_create_with_handle(struct drm_file *file_priv,
goto err_handle_create;
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return rk_obj;
@@ -393,32 +393,6 @@ err_handle_create:
return ERR_PTR(ret);
}
-int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset)
-{
- struct drm_gem_object *obj;
- int ret;
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return -EINVAL;
- }
-
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
-
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
- DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
-
-out:
- drm_gem_object_unreference_unlocked(obj);
-
- return 0;
-}
-
/*
* rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
* function
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
index 3f6ea4d18a5c..f237375582fb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -57,7 +57,4 @@ void rockchip_gem_free_object(struct drm_gem_object *obj);
int rockchip_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset);
#endif /* _ROCKCHIP_DRM_GEM_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 5d450332c2fd..bf9ed0e63973 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -42,33 +42,20 @@
#include "rockchip_drm_psr.h"
#include "rockchip_drm_vop.h"
-#define __REG_SET_RELAXED(x, off, mask, shift, v, write_mask) \
- vop_mask_write(x, off, mask, shift, v, write_mask, true)
-
-#define __REG_SET_NORMAL(x, off, mask, shift, v, write_mask) \
- vop_mask_write(x, off, mask, shift, v, write_mask, false)
-
-#define REG_SET(x, base, reg, v, mode) \
- __REG_SET_##mode(x, base + reg.offset, \
- reg.mask, reg.shift, v, reg.write_mask)
-#define REG_SET_MASK(x, base, reg, mask, v, mode) \
- __REG_SET_##mode(x, base + reg.offset, \
- mask, reg.shift, v, reg.write_mask)
-
#define VOP_WIN_SET(x, win, name, v) \
- REG_SET(x, win->base, win->phy->name, v, RELAXED)
+ vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name)
#define VOP_SCL_SET(x, win, name, v) \
- REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
+ vop_reg_set(vop, &win->phy->scl->name, win->base, ~0, v, #name)
#define VOP_SCL_SET_EXT(x, win, name, v) \
- REG_SET(x, win->base, win->phy->scl->ext->name, v, RELAXED)
-#define VOP_CTRL_SET(x, name, v) \
- REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
+ vop_reg_set(vop, &win->phy->scl->ext->name, \
+ win->base, ~0, v, #name)
+
+#define VOP_INTR_SET_MASK(vop, name, mask, v) \
+ vop_reg_set(vop, &vop->data->intr->name, 0, mask, v, #name)
-#define VOP_INTR_GET(vop, name) \
- vop_read_reg(vop, 0, &vop->data->ctrl->name)
+#define VOP_REG_SET(vop, group, name, v) \
+ vop_reg_set(vop, &vop->data->group->name, 0, ~0, v, #name)
-#define VOP_INTR_SET(vop, name, mask, v) \
- REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL)
#define VOP_INTR_SET_TYPE(vop, name, type, v) \
do { \
int i, reg = 0, mask = 0; \
@@ -78,13 +65,13 @@
mask |= 1 << i; \
} \
} \
- VOP_INTR_SET(vop, name, mask, reg); \
+ VOP_INTR_SET_MASK(vop, name, mask, reg); \
} while (0)
#define VOP_INTR_GET_TYPE(vop, name, type) \
vop_get_intr_type(vop, &vop->data->intr->name, type)
#define VOP_WIN_GET(x, win, name) \
- vop_read_reg(x, win->base, &win->phy->name)
+ vop_read_reg(x, win->offset, win->phy->name)
#define VOP_WIN_GET_YRGBADDR(vop, win) \
vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
@@ -166,14 +153,22 @@ static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
}
-static inline void vop_mask_write(struct vop *vop, uint32_t offset,
- uint32_t mask, uint32_t shift, uint32_t v,
- bool write_mask, bool relaxed)
+static void vop_reg_set(struct vop *vop, const struct vop_reg *reg,
+ uint32_t _offset, uint32_t _mask, uint32_t v,
+ const char *reg_name)
{
- if (!mask)
+ int offset, mask, shift;
+
+ if (!reg || !reg->mask) {
+ dev_dbg(vop->dev, "Warning: not support %s\n", reg_name);
return;
+ }
+
+ offset = reg->offset + _offset;
+ mask = reg->mask & _mask;
+ shift = reg->shift;
- if (write_mask) {
+ if (reg->write_mask) {
v = ((v << shift) & 0xffff) | (mask << (shift + 16));
} else {
uint32_t cached_val = vop->regsbak[offset >> 2];
@@ -182,7 +177,7 @@ static inline void vop_mask_write(struct vop *vop, uint32_t offset,
vop->regsbak[offset >> 2] = v;
}
- if (relaxed)
+ if (reg->relaxed)
writel_relaxed(v, vop->regs + offset);
else
writel(v, vop->regs + offset);
@@ -204,7 +199,7 @@ static inline uint32_t vop_get_intr_type(struct vop *vop,
static inline void vop_cfg_done(struct vop *vop)
{
- VOP_CTRL_SET(vop, cfg_done, 1);
+ VOP_REG_SET(vop, common, cfg_done, 1);
}
static bool has_rb_swapped(uint32_t format)
@@ -500,7 +495,7 @@ static void vop_line_flag_irq_disable(struct vop *vop)
static int vop_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
- int ret;
+ int ret, i;
ret = pm_runtime_get_sync(vop->dev);
if (ret < 0) {
@@ -533,6 +528,20 @@ static int vop_enable(struct drm_crtc *crtc)
}
memcpy(vop->regs, vop->regsbak, vop->len);
+ /*
+ * We need to make sure that all windows are disabled before we
+ * enable the crtc. Otherwise we might try to scan from a destroyed
+ * buffer later.
+ */
+ for (i = 0; i < vop->data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win = vop_win->data;
+
+ spin_lock(&vop->reg_lock);
+ VOP_WIN_SET(vop, win, enable, 0);
+ spin_unlock(&vop->reg_lock);
+ }
+
vop_cfg_done(vop);
/*
@@ -542,7 +551,7 @@ static int vop_enable(struct drm_crtc *crtc)
spin_lock(&vop->reg_lock);
- VOP_CTRL_SET(vop, standby, 0);
+ VOP_REG_SET(vop, common, standby, 1);
spin_unlock(&vop->reg_lock);
@@ -563,31 +572,15 @@ err_put_pm_runtime:
return ret;
}
-static void vop_crtc_disable(struct drm_crtc *crtc)
+static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct vop *vop = to_vop(crtc);
- int i;
WARN_ON(vop->event);
rockchip_drm_psr_deactivate(&vop->crtc);
- /*
- * We need to make sure that all windows are disabled before we
- * disable that crtc. Otherwise we might try to scan from a destroyed
- * buffer later.
- */
- for (i = 0; i < vop->data->win_size; i++) {
- struct vop_win *vop_win = &vop->win[i];
- const struct vop_win_data *win = vop_win->data;
-
- spin_lock(&vop->reg_lock);
- VOP_WIN_SET(vop, win, enable, 0);
- spin_unlock(&vop->reg_lock);
- }
-
- vop_cfg_done(vop);
-
drm_crtc_vblank_off(crtc);
/*
@@ -602,7 +595,7 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
spin_lock(&vop->reg_lock);
- VOP_CTRL_SET(vop, standby, 1);
+ VOP_REG_SET(vop, common, standby, 1);
spin_unlock(&vop->reg_lock);
@@ -682,8 +675,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
* Src.x1 can be odd when do clip, but yuv plane start point
* need align with 2 pixel.
*/
- if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2))
+ if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
+ DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
return -EINVAL;
+ }
return 0;
}
@@ -764,7 +759,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
spin_lock(&vop->reg_lock);
VOP_WIN_SET(vop, win, format, format);
- VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2);
+ VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
if (is_yuv_support(fb->format->format)) {
int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
@@ -778,7 +773,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
- VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2);
+ VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
VOP_WIN_SET(vop, win, uv_mst, dma_addr);
}
@@ -871,7 +866,8 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-static void vop_crtc_enable(struct drm_crtc *crtc)
+static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct vop *vop = to_vop(crtc);
const struct vop_data *vop_data = vop->data;
@@ -898,70 +894,34 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
return;
}
- /*
- * If dclk rate is zero, mean that scanout is stop,
- * we don't need wait any more.
- */
- if (clk_get_rate(vop->dclk)) {
- /*
- * Rk3288 vop timing register is immediately, when configure
- * display timing on display time, may cause tearing.
- *
- * Vop standby will take effect at end of current frame,
- * if dsp hold valid irq happen, it means standby complete.
- *
- * mode set:
- * standby and wait complete --> |----
- * | display time
- * |----
- * |---> dsp hold irq
- * configure display timing --> |
- * standby exit |
- * | new frame start.
- */
-
- reinit_completion(&vop->dsp_hold_completion);
- vop_dsp_hold_valid_irq_enable(vop);
-
- spin_lock(&vop->reg_lock);
-
- VOP_CTRL_SET(vop, standby, 1);
-
- spin_unlock(&vop->reg_lock);
-
- wait_for_completion(&vop->dsp_hold_completion);
-
- vop_dsp_hold_valid_irq_disable(vop);
- }
-
pin_pol = BIT(DCLK_INVERT);
pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ?
BIT(HSYNC_POSITIVE) : 0;
pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ?
BIT(VSYNC_POSITIVE) : 0;
- VOP_CTRL_SET(vop, pin_pol, pin_pol);
+ VOP_REG_SET(vop, output, pin_pol, pin_pol);
switch (s->output_type) {
case DRM_MODE_CONNECTOR_LVDS:
- VOP_CTRL_SET(vop, rgb_en, 1);
- VOP_CTRL_SET(vop, rgb_pin_pol, pin_pol);
+ VOP_REG_SET(vop, output, rgb_en, 1);
+ VOP_REG_SET(vop, output, rgb_pin_pol, pin_pol);
break;
case DRM_MODE_CONNECTOR_eDP:
- VOP_CTRL_SET(vop, edp_pin_pol, pin_pol);
- VOP_CTRL_SET(vop, edp_en, 1);
+ VOP_REG_SET(vop, output, edp_pin_pol, pin_pol);
+ VOP_REG_SET(vop, output, edp_en, 1);
break;
case DRM_MODE_CONNECTOR_HDMIA:
- VOP_CTRL_SET(vop, hdmi_pin_pol, pin_pol);
- VOP_CTRL_SET(vop, hdmi_en, 1);
+ VOP_REG_SET(vop, output, hdmi_pin_pol, pin_pol);
+ VOP_REG_SET(vop, output, hdmi_en, 1);
break;
case DRM_MODE_CONNECTOR_DSI:
- VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol);
- VOP_CTRL_SET(vop, mipi_en, 1);
+ VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol);
+ VOP_REG_SET(vop, output, mipi_en, 1);
break;
case DRM_MODE_CONNECTOR_DisplayPort:
pin_pol &= ~BIT(DCLK_INVERT);
- VOP_CTRL_SET(vop, dp_pin_pol, pin_pol);
- VOP_CTRL_SET(vop, dp_en, 1);
+ VOP_REG_SET(vop, output, dp_pin_pol, pin_pol);
+ VOP_REG_SET(vop, output, dp_en, 1);
break;
default:
DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
@@ -974,25 +934,25 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
!(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
s->output_mode = ROCKCHIP_OUT_MODE_P888;
- VOP_CTRL_SET(vop, out_mode, s->output_mode);
+ VOP_REG_SET(vop, common, out_mode, s->output_mode);
- VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
+ VOP_REG_SET(vop, modeset, htotal_pw, (htotal << 16) | hsync_len);
val = hact_st << 16;
val |= hact_end;
- VOP_CTRL_SET(vop, hact_st_end, val);
- VOP_CTRL_SET(vop, hpost_st_end, val);
+ VOP_REG_SET(vop, modeset, hact_st_end, val);
+ VOP_REG_SET(vop, modeset, hpost_st_end, val);
- VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
+ VOP_REG_SET(vop, modeset, vtotal_pw, (vtotal << 16) | vsync_len);
val = vact_st << 16;
val |= vact_end;
- VOP_CTRL_SET(vop, vact_st_end, val);
- VOP_CTRL_SET(vop, vpost_st_end, val);
+ VOP_REG_SET(vop, modeset, vact_st_end, val);
+ VOP_REG_SET(vop, modeset, vpost_st_end, val);
- VOP_CTRL_SET(vop, line_flag_num[0], vact_end);
+ VOP_REG_SET(vop, intr, line_flag_num[0], vact_end);
clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
- VOP_CTRL_SET(vop, standby, 0);
+ VOP_REG_SET(vop, common, standby, 0);
rockchip_drm_psr_activate(&vop->crtc);
}
@@ -1027,7 +987,7 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct drm_atomic_state *old_state = old_crtc_state->state;
- struct drm_plane_state *old_plane_state;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
struct vop *vop = to_vop(crtc);
struct drm_plane *plane;
int i;
@@ -1058,14 +1018,15 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
}
spin_unlock_irq(&crtc->dev->event_lock);
- for_each_plane_in_state(old_state, plane, old_plane_state, i) {
+ for_each_oldnew_plane_in_state(old_state, plane, old_plane_state,
+ new_plane_state, i) {
if (!old_plane_state->fb)
continue;
- if (old_plane_state->fb == plane->state->fb)
+ if (old_plane_state->fb == new_plane_state->fb)
continue;
- drm_framebuffer_reference(old_plane_state->fb);
+ drm_framebuffer_get(old_plane_state->fb);
drm_flip_work_queue(&vop->fb_unref_work, old_plane_state->fb);
set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
@@ -1079,11 +1040,11 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
- .enable = vop_crtc_enable,
- .disable = vop_crtc_disable,
.mode_fixup = vop_crtc_mode_fixup,
.atomic_flush = vop_crtc_atomic_flush,
.atomic_begin = vop_crtc_atomic_begin,
+ .atomic_enable = vop_crtc_atomic_enable,
+ .atomic_disable = vop_crtc_atomic_disable,
};
static void vop_crtc_destroy(struct drm_crtc *crtc)
@@ -1189,7 +1150,7 @@ static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
struct drm_framebuffer *fb = val;
drm_crtc_vblank_put(&vop->crtc);
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
}
static void vop_handle_vblank(struct vop *vop)
@@ -1290,7 +1251,7 @@ static int vop_create_crtc(struct vop *vop)
0, &vop_plane_funcs,
win_data->phy->data_formats,
win_data->phy->nformats,
- win_data->type, NULL);
+ NULL, win_data->type, NULL);
if (ret) {
DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
ret);
@@ -1329,7 +1290,7 @@ static int vop_create_crtc(struct vop *vop)
&vop_plane_funcs,
win_data->phy->data_formats,
win_data->phy->nformats,
- win_data->type, NULL);
+ NULL, win_data->type, NULL);
if (ret) {
DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
ret);
@@ -1340,8 +1301,8 @@ static int vop_create_crtc(struct vop *vop)
port = of_get_child_by_name(dev->of_node, "port");
if (!port) {
- DRM_DEV_ERROR(vop->dev, "no port node found in %s\n",
- dev->of_node->full_name);
+ DRM_DEV_ERROR(vop->dev, "no port node found in %pOF\n",
+ dev->of_node);
ret = -ENOENT;
goto err_cleanup_crtc;
}
@@ -1395,7 +1356,6 @@ static void vop_destroy_crtc(struct vop *vop)
static int vop_initial(struct vop *vop)
{
const struct vop_data *vop_data = vop->data;
- const struct vop_reg_data *init_table = vop_data->init_table;
struct reset_control *ahb_rst;
int i, ret;
@@ -1455,13 +1415,16 @@ static int vop_initial(struct vop *vop)
memcpy(vop->regsbak, vop->regs, vop->len);
- for (i = 0; i < vop_data->table_size; i++)
- vop_writel(vop, init_table[i].offset, init_table[i].value);
+ VOP_REG_SET(vop, misc, global_regdone_en, 1);
+ VOP_REG_SET(vop, common, dsp_blank, 0);
for (i = 0; i < vop_data->win_size; i++) {
const struct vop_win_data *win = &vop_data->win[i];
+ int channel = i * 2 + 1;
+ VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
VOP_WIN_SET(vop, win, enable, 0);
+ VOP_WIN_SET(vop, win, gate, 1);
}
vop_cfg_done(vop);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 9979fd0c2282..56bbd2e2a8ef 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -15,6 +15,14 @@
#ifndef _ROCKCHIP_DRM_VOP_H
#define _ROCKCHIP_DRM_VOP_H
+/*
+ * major: IP major version, used for IP structure
+ * minor: big feature change under same structure
+ */
+#define VOP_VERSION(major, minor) ((major) << 8 | (minor))
+#define VOP_MAJOR(version) ((version) >> 8)
+#define VOP_MINOR(version) ((version) & 0xff)
+
enum vop_data_format {
VOP_FMT_ARGB8888 = 0,
VOP_FMT_RGB888,
@@ -24,53 +32,58 @@ enum vop_data_format {
VOP_FMT_YUV444SP,
};
-struct vop_reg_data {
- uint32_t offset;
- uint32_t value;
-};
-
struct vop_reg {
- uint32_t offset;
- uint32_t shift;
uint32_t mask;
+ uint16_t offset;
+ uint8_t shift;
bool write_mask;
+ bool relaxed;
};
-struct vop_ctrl {
- struct vop_reg standby;
- struct vop_reg data_blank;
- struct vop_reg gate_en;
- struct vop_reg mmu_en;
- struct vop_reg rgb_en;
- struct vop_reg edp_en;
- struct vop_reg hdmi_en;
- struct vop_reg mipi_en;
- struct vop_reg dp_en;
- struct vop_reg out_mode;
- struct vop_reg dither_down;
- struct vop_reg dither_up;
- struct vop_reg pin_pol;
- struct vop_reg rgb_pin_pol;
- struct vop_reg hdmi_pin_pol;
- struct vop_reg edp_pin_pol;
- struct vop_reg mipi_pin_pol;
- struct vop_reg dp_pin_pol;
-
+struct vop_modeset {
struct vop_reg htotal_pw;
struct vop_reg hact_st_end;
+ struct vop_reg hpost_st_end;
struct vop_reg vtotal_pw;
struct vop_reg vact_st_end;
- struct vop_reg hpost_st_end;
struct vop_reg vpost_st_end;
+};
- struct vop_reg line_flag_num[2];
+struct vop_output {
+ struct vop_reg pin_pol;
+ struct vop_reg dp_pin_pol;
+ struct vop_reg edp_pin_pol;
+ struct vop_reg hdmi_pin_pol;
+ struct vop_reg mipi_pin_pol;
+ struct vop_reg rgb_pin_pol;
+ struct vop_reg dp_en;
+ struct vop_reg edp_en;
+ struct vop_reg hdmi_en;
+ struct vop_reg mipi_en;
+ struct vop_reg rgb_en;
+};
+struct vop_common {
struct vop_reg cfg_done;
+ struct vop_reg dsp_blank;
+ struct vop_reg data_blank;
+ struct vop_reg dither_down;
+ struct vop_reg dither_up;
+ struct vop_reg gate_en;
+ struct vop_reg mmu_en;
+ struct vop_reg out_mode;
+ struct vop_reg standby;
+};
+
+struct vop_misc {
+ struct vop_reg global_regdone_en;
};
struct vop_intr {
const int *intrs;
uint32_t nintrs;
+
+ struct vop_reg line_flag_num[2];
struct vop_reg enable;
struct vop_reg clear;
struct vop_reg status;
@@ -115,6 +128,7 @@ struct vop_win_phy {
uint32_t nformats;
struct vop_reg enable;
+ struct vop_reg gate;
struct vop_reg format;
struct vop_reg rb_swap;
struct vop_reg act_info;
@@ -127,6 +141,7 @@ struct vop_win_phy {
struct vop_reg dst_alpha_ctl;
struct vop_reg src_alpha_ctl;
+ struct vop_reg channel;
};
struct vop_win_data {
@@ -136,10 +151,12 @@ struct vop_win_data {
};
struct vop_data {
- const struct vop_reg_data *init_table;
- unsigned int table_size;
- const struct vop_ctrl *ctrl;
+ uint32_t version;
const struct vop_intr *intr;
+ const struct vop_common *common;
+ const struct vop_misc *misc;
+ const struct vop_modeset *modeset;
+ const struct vop_output *output;
const struct vop_win_data *win;
unsigned int win_size;
@@ -282,6 +299,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
act_height = (src_h + vskiplines - 1) / vskiplines;
+ if (act_height == dst_h)
+ return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
+
return GET_SCL_FT_BILI_DN(act_height, dst_h);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index bafd698a28b1..94de7b9f6fde 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -20,17 +20,23 @@
#include "rockchip_drm_vop.h"
#include "rockchip_vop_reg.h"
-#define VOP_REG(off, _mask, s) \
- {.offset = off, \
+#define _VOP_REG(off, _mask, _shift, _write_mask, _relaxed) \
+ { \
+ .offset = off, \
.mask = _mask, \
- .shift = s, \
- .write_mask = false,}
+ .shift = _shift, \
+ .write_mask = _write_mask, \
+ .relaxed = _relaxed, \
+ }
-#define VOP_REG_MASK(off, _mask, s) \
- {.offset = off, \
- .mask = _mask, \
- .shift = s, \
- .write_mask = true,}
+#define VOP_REG(off, _mask, _shift) \
+ _VOP_REG(off, _mask, _shift, false, true)
+
+#define VOP_REG_SYNC(off, _mask, _shift) \
+ _VOP_REG(off, _mask, _shift, false, false)
+
+#define VOP_REG_MASK_SYNC(off, _mask, _shift) \
+ _VOP_REG(off, _mask, _shift, true, false)
static const uint32_t formats_win_full[] = {
DRM_FORMAT_XRGB8888,
@@ -110,32 +116,35 @@ static const int rk3036_vop_intrs[] = {
static const struct vop_intr rk3036_intr = {
.intrs = rk3036_vop_intrs,
.nintrs = ARRAY_SIZE(rk3036_vop_intrs),
- .status = VOP_REG(RK3036_INT_STATUS, 0xf, 0),
- .enable = VOP_REG(RK3036_INT_STATUS, 0xf, 4),
- .clear = VOP_REG(RK3036_INT_STATUS, 0xf, 8),
+ .line_flag_num[0] = VOP_REG(RK3036_INT_STATUS, 0xfff, 12),
+ .status = VOP_REG_SYNC(RK3036_INT_STATUS, 0xf, 0),
+ .enable = VOP_REG_SYNC(RK3036_INT_STATUS, 0xf, 4),
+ .clear = VOP_REG_SYNC(RK3036_INT_STATUS, 0xf, 8),
};
-static const struct vop_ctrl rk3036_ctrl_data = {
- .standby = VOP_REG(RK3036_SYS_CTRL, 0x1, 30),
- .out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
- .pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4),
+static const struct vop_modeset rk3036_modeset = {
.htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
.hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0),
.vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
.vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0),
- .line_flag_num[0] = VOP_REG(RK3036_INT_STATUS, 0xfff, 12),
- .cfg_done = VOP_REG(RK3036_REG_CFG_DONE, 0x1, 0),
};
-static const struct vop_reg_data rk3036_vop_init_reg_table[] = {
- {RK3036_DSP_CTRL1, 0x00000000},
+static const struct vop_output rk3036_output = {
+ .pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4),
+};
+
+static const struct vop_common rk3036_common = {
+ .standby = VOP_REG_SYNC(RK3036_SYS_CTRL, 0x1, 30),
+ .out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
+ .dsp_blank = VOP_REG(RK3036_DSP_CTRL1, 0x1, 24),
+ .cfg_done = VOP_REG_SYNC(RK3036_REG_CFG_DONE, 0x1, 0),
};
static const struct vop_data rk3036_vop = {
- .init_table = rk3036_vop_init_reg_table,
- .table_size = ARRAY_SIZE(rk3036_vop_init_reg_table),
- .ctrl = &rk3036_ctrl_data,
.intr = &rk3036_intr,
+ .common = &rk3036_common,
+ .modeset = &rk3036_modeset,
+ .output = &rk3036_output,
.win = rk3036_vop_win_data,
.win_size = ARRAY_SIZE(rk3036_vop_win_data),
};
@@ -188,12 +197,14 @@ static const struct vop_win_phy rk3288_win01_data = {
.uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
.src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
.dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+ .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
};
static const struct vop_win_phy rk3288_win23_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
- .enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 0),
+ .enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 4),
+ .gate = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 0),
.format = VOP_REG(RK3288_WIN2_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 12),
.dsp_info = VOP_REG(RK3288_WIN2_DSP_INFO0, 0x0fff0fff, 0),
@@ -204,40 +215,33 @@ static const struct vop_win_phy rk3288_win23_data = {
.dst_alpha_ctl = VOP_REG(RK3288_WIN2_DST_ALPHA_CTRL, 0xff, 0),
};
-static const struct vop_ctrl rk3288_ctrl_data = {
- .standby = VOP_REG(RK3288_SYS_CTRL, 0x1, 22),
- .gate_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 23),
- .mmu_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 20),
- .rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
- .hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
- .edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
- .mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
- .dither_down = VOP_REG(RK3288_DSP_CTRL1, 0xf, 1),
- .dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
- .data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),
- .out_mode = VOP_REG(RK3288_DSP_CTRL0, 0xf, 0),
- .pin_pol = VOP_REG(RK3288_DSP_CTRL0, 0xf, 4),
+static const struct vop_modeset rk3288_modeset = {
.htotal_pw = VOP_REG(RK3288_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
.hact_st_end = VOP_REG(RK3288_DSP_HACT_ST_END, 0x1fff1fff, 0),
.vtotal_pw = VOP_REG(RK3288_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
.vact_st_end = VOP_REG(RK3288_DSP_VACT_ST_END, 0x1fff1fff, 0),
.hpost_st_end = VOP_REG(RK3288_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
.vpost_st_end = VOP_REG(RK3288_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
- .line_flag_num[0] = VOP_REG(RK3288_INTR_CTRL0, 0x1fff, 12),
- .cfg_done = VOP_REG(RK3288_REG_CFG_DONE, 0x1, 0),
};
-static const struct vop_reg_data rk3288_init_reg_table[] = {
- {RK3288_SYS_CTRL, 0x00c00000},
- {RK3288_DSP_CTRL0, 0x00000000},
- {RK3288_WIN0_CTRL0, 0x00000080},
- {RK3288_WIN1_CTRL0, 0x00000080},
- /* TODO: Win2/3 support multiple area function, but we haven't found
- * a suitable way to use it yet, so let's just use them as other windows
- * with only area 0 enabled.
- */
- {RK3288_WIN2_CTRL0, 0x00000010},
- {RK3288_WIN3_CTRL0, 0x00000010},
+static const struct vop_output rk3288_output = {
+ .pin_pol = VOP_REG(RK3288_DSP_CTRL0, 0xf, 4),
+ .rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
+ .hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
+ .edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
+ .mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
+};
+
+static const struct vop_common rk3288_common = {
+ .standby = VOP_REG_SYNC(RK3288_SYS_CTRL, 0x1, 22),
+ .gate_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 23),
+ .mmu_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 20),
+ .dither_down = VOP_REG(RK3288_DSP_CTRL1, 0xf, 1),
+ .dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
+ .data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),
+ .dsp_blank = VOP_REG(RK3288_DSP_CTRL0, 0x3, 18),
+ .out_mode = VOP_REG(RK3288_DSP_CTRL0, 0xf, 0),
+ .cfg_done = VOP_REG_SYNC(RK3288_REG_CFG_DONE, 0x1, 0),
};
/*
@@ -267,50 +271,24 @@ static const int rk3288_vop_intrs[] = {
static const struct vop_intr rk3288_vop_intr = {
.intrs = rk3288_vop_intrs,
.nintrs = ARRAY_SIZE(rk3288_vop_intrs),
+ .line_flag_num[0] = VOP_REG(RK3288_INTR_CTRL0, 0x1fff, 12),
.status = VOP_REG(RK3288_INTR_CTRL0, 0xf, 0),
.enable = VOP_REG(RK3288_INTR_CTRL0, 0xf, 4),
.clear = VOP_REG(RK3288_INTR_CTRL0, 0xf, 8),
};
static const struct vop_data rk3288_vop = {
- .init_table = rk3288_init_reg_table,
- .table_size = ARRAY_SIZE(rk3288_init_reg_table),
+ .version = VOP_VERSION(3, 1),
.feature = VOP_FEATURE_OUTPUT_RGB10,
.intr = &rk3288_vop_intr,
- .ctrl = &rk3288_ctrl_data,
+ .common = &rk3288_common,
+ .modeset = &rk3288_modeset,
+ .output = &rk3288_output,
.win = rk3288_vop_win_data,
.win_size = ARRAY_SIZE(rk3288_vop_win_data),
};
-static const struct vop_ctrl rk3399_ctrl_data = {
- .standby = VOP_REG(RK3399_SYS_CTRL, 0x1, 22),
- .gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23),
- .dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11),
- .rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12),
- .hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13),
- .edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14),
- .mipi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 15),
- .dither_down = VOP_REG(RK3399_DSP_CTRL1, 0xf, 1),
- .dither_up = VOP_REG(RK3399_DSP_CTRL1, 0x1, 6),
- .data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19),
- .out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0),
- .rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
- .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
- .hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 20),
- .edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 24),
- .mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 28),
- .htotal_pw = VOP_REG(RK3399_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
- .hact_st_end = VOP_REG(RK3399_DSP_HACT_ST_END, 0x1fff1fff, 0),
- .vtotal_pw = VOP_REG(RK3399_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
- .vact_st_end = VOP_REG(RK3399_DSP_VACT_ST_END, 0x1fff1fff, 0),
- .hpost_st_end = VOP_REG(RK3399_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
- .vpost_st_end = VOP_REG(RK3399_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
- .line_flag_num[0] = VOP_REG(RK3399_LINE_FLAG, 0xffff, 0),
- .line_flag_num[1] = VOP_REG(RK3399_LINE_FLAG, 0xffff, 16),
- .cfg_done = VOP_REG_MASK(RK3399_REG_CFG_DONE, 0x1, 0),
-};
-
-static const int rk3399_vop_intrs[] = {
+static const int rk3368_vop_intrs[] = {
FS_INTR,
0, 0,
LINE_FLAG_INTR,
@@ -320,69 +298,232 @@ static const int rk3399_vop_intrs[] = {
DSP_HOLD_VALID_INTR,
};
-static const struct vop_intr rk3399_vop_intr = {
- .intrs = rk3399_vop_intrs,
- .nintrs = ARRAY_SIZE(rk3399_vop_intrs),
- .status = VOP_REG_MASK(RK3399_INTR_STATUS0, 0xffff, 0),
- .enable = VOP_REG_MASK(RK3399_INTR_EN0, 0xffff, 0),
- .clear = VOP_REG_MASK(RK3399_INTR_CLEAR0, 0xffff, 0),
+static const struct vop_intr rk3368_vop_intr = {
+ .intrs = rk3368_vop_intrs,
+ .nintrs = ARRAY_SIZE(rk3368_vop_intrs),
+ .line_flag_num[0] = VOP_REG(RK3368_LINE_FLAG, 0xffff, 0),
+ .line_flag_num[1] = VOP_REG(RK3368_LINE_FLAG, 0xffff, 16),
+ .status = VOP_REG_MASK_SYNC(RK3368_INTR_STATUS, 0x3fff, 0),
+ .enable = VOP_REG_MASK_SYNC(RK3368_INTR_EN, 0x3fff, 0),
+ .clear = VOP_REG_MASK_SYNC(RK3368_INTR_CLEAR, 0x3fff, 0),
};
-static const struct vop_reg_data rk3399_init_reg_table[] = {
- {RK3399_SYS_CTRL, 0x2000f800},
- {RK3399_DSP_CTRL0, 0x00000000},
- {RK3399_WIN0_CTRL0, 0x00000080},
- {RK3399_WIN1_CTRL0, 0x00000080},
- /* TODO: Win2/3 support multiple area function, but we haven't found
- * a suitable way to use it yet, so let's just use them as other windows
- * with only area 0 enabled.
- */
- {RK3399_WIN2_CTRL0, 0x00000010},
- {RK3399_WIN3_CTRL0, 0x00000010},
+static const struct vop_win_phy rk3368_win23_data = {
+ .data_formats = formats_win_lite,
+ .nformats = ARRAY_SIZE(formats_win_lite),
+ .gate = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 0),
+ .enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 4),
+ .format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 5),
+ .rb_swap = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 20),
+ .dsp_info = VOP_REG(RK3368_WIN2_DSP_INFO0, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3368_WIN2_DSP_ST0, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3368_WIN2_MST0, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3368_WIN2_VIR0_1, 0x1fff, 0),
+ .src_alpha_ctl = VOP_REG(RK3368_WIN2_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(RK3368_WIN2_DST_ALPHA_CTRL, 0xff, 0),
+};
+
+static const struct vop_win_data rk3368_vop_win_data[] = {
+ { .base = 0x00, .phy = &rk3288_win01_data,
+ .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x40, .phy = &rk3288_win01_data,
+ .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x00, .phy = &rk3368_win23_data,
+ .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x50, .phy = &rk3368_win23_data,
+ .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const struct vop_output rk3368_output = {
+ .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16),
+ .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20),
+ .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24),
+ .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28),
+ .rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
+ .hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
+ .edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
+ .mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
+};
+
+static const struct vop_misc rk3368_misc = {
+ .global_regdone_en = VOP_REG(RK3368_SYS_CTRL, 0x1, 11),
+};
+
+static const struct vop_data rk3368_vop = {
+ .version = VOP_VERSION(3, 2),
+ .intr = &rk3368_vop_intr,
+ .common = &rk3288_common,
+ .modeset = &rk3288_modeset,
+ .output = &rk3368_output,
+ .misc = &rk3368_misc,
+ .win = rk3368_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3368_vop_win_data),
+};
+
+static const struct vop_intr rk3366_vop_intr = {
+ .intrs = rk3368_vop_intrs,
+ .nintrs = ARRAY_SIZE(rk3368_vop_intrs),
+ .line_flag_num[0] = VOP_REG(RK3366_LINE_FLAG, 0xffff, 0),
+ .line_flag_num[1] = VOP_REG(RK3366_LINE_FLAG, 0xffff, 16),
+ .status = VOP_REG_MASK_SYNC(RK3366_INTR_STATUS0, 0xffff, 0),
+ .enable = VOP_REG_MASK_SYNC(RK3366_INTR_EN0, 0xffff, 0),
+ .clear = VOP_REG_MASK_SYNC(RK3366_INTR_CLEAR0, 0xffff, 0),
+};
+
+static const struct vop_data rk3366_vop = {
+ .version = VOP_VERSION(3, 4),
+ .intr = &rk3366_vop_intr,
+ .common = &rk3288_common,
+ .modeset = &rk3288_modeset,
+ .output = &rk3368_output,
+ .misc = &rk3368_misc,
+ .win = rk3368_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3368_vop_win_data),
+};
+
+static const struct vop_output rk3399_output = {
+ .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16),
+ .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16),
+ .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20),
+ .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24),
+ .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28),
+ .dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11),
+ .rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12),
+ .hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
+ .edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
+ .mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
};
static const struct vop_data rk3399_vop_big = {
- .init_table = rk3399_init_reg_table,
- .table_size = ARRAY_SIZE(rk3399_init_reg_table),
+ .version = VOP_VERSION(3, 5),
.feature = VOP_FEATURE_OUTPUT_RGB10,
- .intr = &rk3399_vop_intr,
- .ctrl = &rk3399_ctrl_data,
- /*
- * rk3399 vop big windows register layout is same as rk3288.
- */
- .win = rk3288_vop_win_data,
- .win_size = ARRAY_SIZE(rk3288_vop_win_data),
+ .intr = &rk3366_vop_intr,
+ .common = &rk3288_common,
+ .modeset = &rk3288_modeset,
+ .output = &rk3399_output,
+ .misc = &rk3368_misc,
+ .win = rk3368_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3368_vop_win_data),
};
static const struct vop_win_data rk3399_vop_lit_win_data[] = {
{ .base = 0x00, .phy = &rk3288_win01_data,
.type = DRM_PLANE_TYPE_PRIMARY },
- { .base = 0x00, .phy = &rk3288_win23_data,
+ { .base = 0x00, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_CURSOR},
};
static const struct vop_data rk3399_vop_lit = {
- .init_table = rk3399_init_reg_table,
- .table_size = ARRAY_SIZE(rk3399_init_reg_table),
- .intr = &rk3399_vop_intr,
- .ctrl = &rk3399_ctrl_data,
- /*
- * rk3399 vop lit windows register layout is same as rk3288,
- * but cut off the win1 and win3 windows.
- */
+ .version = VOP_VERSION(3, 6),
+ .intr = &rk3366_vop_intr,
+ .common = &rk3288_common,
+ .modeset = &rk3288_modeset,
+ .output = &rk3399_output,
+ .misc = &rk3368_misc,
.win = rk3399_vop_lit_win_data,
.win_size = ARRAY_SIZE(rk3399_vop_lit_win_data),
};
+static const struct vop_win_data rk3228_vop_win_data[] = {
+ { .base = 0x00, .phy = &rk3288_win01_data,
+ .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x40, .phy = &rk3288_win01_data,
+ .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const struct vop_data rk3228_vop = {
+ .version = VOP_VERSION(3, 7),
+ .feature = VOP_FEATURE_OUTPUT_RGB10,
+ .intr = &rk3366_vop_intr,
+ .common = &rk3288_common,
+ .modeset = &rk3288_modeset,
+ .output = &rk3399_output,
+ .misc = &rk3368_misc,
+ .win = rk3228_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3228_vop_win_data),
+};
+
+static const struct vop_modeset rk3328_modeset = {
+ .htotal_pw = VOP_REG(RK3328_DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
+ .hact_st_end = VOP_REG(RK3328_DSP_HACT_ST_END, 0x1fff1fff, 0),
+ .vtotal_pw = VOP_REG(RK3328_DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
+ .vact_st_end = VOP_REG(RK3328_DSP_VACT_ST_END, 0x1fff1fff, 0),
+ .hpost_st_end = VOP_REG(RK3328_POST_DSP_HACT_INFO, 0x1fff1fff, 0),
+ .vpost_st_end = VOP_REG(RK3328_POST_DSP_VACT_INFO, 0x1fff1fff, 0),
+};
+
+static const struct vop_output rk3328_output = {
+ .rgb_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 12),
+ .hdmi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 13),
+ .edp_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 14),
+ .mipi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 15),
+ .rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 16),
+ .hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 20),
+ .edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 24),
+ .mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 28),
+};
+
+static const struct vop_misc rk3328_misc = {
+ .global_regdone_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 11),
+};
+
+static const struct vop_common rk3328_common = {
+ .standby = VOP_REG_SYNC(RK3328_SYS_CTRL, 0x1, 22),
+ .dither_down = VOP_REG(RK3328_DSP_CTRL1, 0xf, 1),
+ .dither_up = VOP_REG(RK3328_DSP_CTRL1, 0x1, 6),
+ .dsp_blank = VOP_REG(RK3328_DSP_CTRL0, 0x3, 18),
+ .out_mode = VOP_REG(RK3328_DSP_CTRL0, 0xf, 0),
+ .cfg_done = VOP_REG_SYNC(RK3328_REG_CFG_DONE, 0x1, 0),
+};
+
+static const struct vop_intr rk3328_vop_intr = {
+ .intrs = rk3368_vop_intrs,
+ .nintrs = ARRAY_SIZE(rk3368_vop_intrs),
+ .line_flag_num[0] = VOP_REG(RK3328_LINE_FLAG, 0xffff, 0),
+ .line_flag_num[1] = VOP_REG(RK3328_LINE_FLAG, 0xffff, 16),
+ .status = VOP_REG_MASK_SYNC(RK3328_INTR_STATUS0, 0xffff, 0),
+ .enable = VOP_REG_MASK_SYNC(RK3328_INTR_EN0, 0xffff, 0),
+ .clear = VOP_REG_MASK_SYNC(RK3328_INTR_CLEAR0, 0xffff, 0),
+};
+
+static const struct vop_win_data rk3328_vop_win_data[] = {
+ { .base = 0xd0, .phy = &rk3288_win01_data,
+ .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x1d0, .phy = &rk3288_win01_data,
+ .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x2d0, .phy = &rk3288_win01_data,
+ .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const struct vop_data rk3328_vop = {
+ .version = VOP_VERSION(3, 8),
+ .feature = VOP_FEATURE_OUTPUT_RGB10,
+ .intr = &rk3328_vop_intr,
+ .common = &rk3328_common,
+ .modeset = &rk3328_modeset,
+ .output = &rk3328_output,
+ .misc = &rk3328_misc,
+ .win = rk3328_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3328_vop_win_data),
+};
+
static const struct of_device_id vop_driver_dt_match[] = {
{ .compatible = "rockchip,rk3036-vop",
.data = &rk3036_vop },
{ .compatible = "rockchip,rk3288-vop",
.data = &rk3288_vop },
+ { .compatible = "rockchip,rk3368-vop",
+ .data = &rk3368_vop },
+ { .compatible = "rockchip,rk3366-vop",
+ .data = &rk3366_vop },
{ .compatible = "rockchip,rk3399-vop-big",
.data = &rk3399_vop_big },
{ .compatible = "rockchip,rk3399-vop-lit",
.data = &rk3399_vop_lit },
+ { .compatible = "rockchip,rk3228-vop",
+ .data = &rk3228_vop },
+ { .compatible = "rockchip,rk3328-vop",
+ .data = &rk3328_vop },
{},
};
MODULE_DEVICE_TABLE(of, vop_driver_dt_match);
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
index cd197260ece5..4a4799ff65de 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -41,6 +41,7 @@
#define RK3288_WIN0_SRC_ALPHA_CTRL 0x0060
#define RK3288_WIN0_DST_ALPHA_CTRL 0x0064
#define RK3288_WIN0_FADING_CTRL 0x0068
+#define RK3288_WIN0_CTRL2 0x006c
/* win1 register */
#define RK3288_WIN1_CTRL0 0x0070
@@ -122,6 +123,717 @@
#define RK3288_DSP_VACT_ST_END_F1 0x019c
/* register definition end */
+/* rk3368 register definition */
+#define RK3368_REG_CFG_DONE 0x0000
+#define RK3368_VERSION_INFO 0x0004
+#define RK3368_SYS_CTRL 0x0008
+#define RK3368_SYS_CTRL1 0x000c
+#define RK3368_DSP_CTRL0 0x0010
+#define RK3368_DSP_CTRL1 0x0014
+#define RK3368_DSP_BG 0x0018
+#define RK3368_MCU_CTRL 0x001c
+#define RK3368_LINE_FLAG 0x0020
+#define RK3368_INTR_EN 0x0024
+#define RK3368_INTR_CLEAR 0x0028
+#define RK3368_INTR_STATUS 0x002c
+#define RK3368_WIN0_CTRL0 0x0030
+#define RK3368_WIN0_CTRL1 0x0034
+#define RK3368_WIN0_COLOR_KEY 0x0038
+#define RK3368_WIN0_VIR 0x003c
+#define RK3368_WIN0_YRGB_MST 0x0040
+#define RK3368_WIN0_CBR_MST 0x0044
+#define RK3368_WIN0_ACT_INFO 0x0048
+#define RK3368_WIN0_DSP_INFO 0x004c
+#define RK3368_WIN0_DSP_ST 0x0050
+#define RK3368_WIN0_SCL_FACTOR_YRGB 0x0054
+#define RK3368_WIN0_SCL_FACTOR_CBR 0x0058
+#define RK3368_WIN0_SCL_OFFSET 0x005c
+#define RK3368_WIN0_SRC_ALPHA_CTRL 0x0060
+#define RK3368_WIN0_DST_ALPHA_CTRL 0x0064
+#define RK3368_WIN0_FADING_CTRL 0x0068
+#define RK3368_WIN0_CTRL2 0x006c
+#define RK3368_WIN1_CTRL0 0x0070
+#define RK3368_WIN1_CTRL1 0x0074
+#define RK3368_WIN1_COLOR_KEY 0x0078
+#define RK3368_WIN1_VIR 0x007c
+#define RK3368_WIN1_YRGB_MST 0x0080
+#define RK3368_WIN1_CBR_MST 0x0084
+#define RK3368_WIN1_ACT_INFO 0x0088
+#define RK3368_WIN1_DSP_INFO 0x008c
+#define RK3368_WIN1_DSP_ST 0x0090
+#define RK3368_WIN1_SCL_FACTOR_YRGB 0x0094
+#define RK3368_WIN1_SCL_FACTOR_CBR 0x0098
+#define RK3368_WIN1_SCL_OFFSET 0x009c
+#define RK3368_WIN1_SRC_ALPHA_CTRL 0x00a0
+#define RK3368_WIN1_DST_ALPHA_CTRL 0x00a4
+#define RK3368_WIN1_FADING_CTRL 0x00a8
+#define RK3368_WIN1_CTRL2 0x00ac
+#define RK3368_WIN2_CTRL0 0x00b0
+#define RK3368_WIN2_CTRL1 0x00b4
+#define RK3368_WIN2_VIR0_1 0x00b8
+#define RK3368_WIN2_VIR2_3 0x00bc
+#define RK3368_WIN2_MST0 0x00c0
+#define RK3368_WIN2_DSP_INFO0 0x00c4
+#define RK3368_WIN2_DSP_ST0 0x00c8
+#define RK3368_WIN2_COLOR_KEY 0x00cc
+#define RK3368_WIN2_MST1 0x00d0
+#define RK3368_WIN2_DSP_INFO1 0x00d4
+#define RK3368_WIN2_DSP_ST1 0x00d8
+#define RK3368_WIN2_SRC_ALPHA_CTRL 0x00dc
+#define RK3368_WIN2_MST2 0x00e0
+#define RK3368_WIN2_DSP_INFO2 0x00e4
+#define RK3368_WIN2_DSP_ST2 0x00e8
+#define RK3368_WIN2_DST_ALPHA_CTRL 0x00ec
+#define RK3368_WIN2_MST3 0x00f0
+#define RK3368_WIN2_DSP_INFO3 0x00f4
+#define RK3368_WIN2_DSP_ST3 0x00f8
+#define RK3368_WIN2_FADING_CTRL 0x00fc
+#define RK3368_WIN3_CTRL0 0x0100
+#define RK3368_WIN3_CTRL1 0x0104
+#define RK3368_WIN3_VIR0_1 0x0108
+#define RK3368_WIN3_VIR2_3 0x010c
+#define RK3368_WIN3_MST0 0x0110
+#define RK3368_WIN3_DSP_INFO0 0x0114
+#define RK3368_WIN3_DSP_ST0 0x0118
+#define RK3368_WIN3_COLOR_KEY 0x011c
+#define RK3368_WIN3_MST1 0x0120
+#define RK3368_WIN3_DSP_INFO1 0x0124
+#define RK3368_WIN3_DSP_ST1 0x0128
+#define RK3368_WIN3_SRC_ALPHA_CTRL 0x012c
+#define RK3368_WIN3_MST2 0x0130
+#define RK3368_WIN3_DSP_INFO2 0x0134
+#define RK3368_WIN3_DSP_ST2 0x0138
+#define RK3368_WIN3_DST_ALPHA_CTRL 0x013c
+#define RK3368_WIN3_MST3 0x0140
+#define RK3368_WIN3_DSP_INFO3 0x0144
+#define RK3368_WIN3_DSP_ST3 0x0148
+#define RK3368_WIN3_FADING_CTRL 0x014c
+#define RK3368_HWC_CTRL0 0x0150
+#define RK3368_HWC_CTRL1 0x0154
+#define RK3368_HWC_MST 0x0158
+#define RK3368_HWC_DSP_ST 0x015c
+#define RK3368_HWC_SRC_ALPHA_CTRL 0x0160
+#define RK3368_HWC_DST_ALPHA_CTRL 0x0164
+#define RK3368_HWC_FADING_CTRL 0x0168
+#define RK3368_HWC_RESERVED1 0x016c
+#define RK3368_POST_DSP_HACT_INFO 0x0170
+#define RK3368_POST_DSP_VACT_INFO 0x0174
+#define RK3368_POST_SCL_FACTOR_YRGB 0x0178
+#define RK3368_POST_RESERVED 0x017c
+#define RK3368_POST_SCL_CTRL 0x0180
+#define RK3368_POST_DSP_VACT_INFO_F1 0x0184
+#define RK3368_DSP_HTOTAL_HS_END 0x0188
+#define RK3368_DSP_HACT_ST_END 0x018c
+#define RK3368_DSP_VTOTAL_VS_END 0x0190
+#define RK3368_DSP_VACT_ST_END 0x0194
+#define RK3368_DSP_VS_ST_END_F1 0x0198
+#define RK3368_DSP_VACT_ST_END_F1 0x019c
+#define RK3368_PWM_CTRL 0x01a0
+#define RK3368_PWM_PERIOD_HPR 0x01a4
+#define RK3368_PWM_DUTY_LPR 0x01a8
+#define RK3368_PWM_CNT 0x01ac
+#define RK3368_BCSH_COLOR_BAR 0x01b0
+#define RK3368_BCSH_BCS 0x01b4
+#define RK3368_BCSH_H 0x01b8
+#define RK3368_BCSH_CTRL 0x01bc
+#define RK3368_CABC_CTRL0 0x01c0
+#define RK3368_CABC_CTRL1 0x01c4
+#define RK3368_CABC_CTRL2 0x01c8
+#define RK3368_CABC_CTRL3 0x01cc
+#define RK3368_CABC_GAUSS_LINE0_0 0x01d0
+#define RK3368_CABC_GAUSS_LINE0_1 0x01d4
+#define RK3368_CABC_GAUSS_LINE1_0 0x01d8
+#define RK3368_CABC_GAUSS_LINE1_1 0x01dc
+#define RK3368_CABC_GAUSS_LINE2_0 0x01e0
+#define RK3368_CABC_GAUSS_LINE2_1 0x01e4
+#define RK3368_FRC_LOWER01_0 0x01e8
+#define RK3368_FRC_LOWER01_1 0x01ec
+#define RK3368_FRC_LOWER10_0 0x01f0
+#define RK3368_FRC_LOWER10_1 0x01f4
+#define RK3368_FRC_LOWER11_0 0x01f8
+#define RK3368_FRC_LOWER11_1 0x01fc
+#define RK3368_IFBDC_CTRL 0x0200
+#define RK3368_IFBDC_TILES_NUM 0x0204
+#define RK3368_IFBDC_FRAME_RST_CYCLE 0x0208
+#define RK3368_IFBDC_BASE_ADDR 0x020c
+#define RK3368_IFBDC_MB_SIZE 0x0210
+#define RK3368_IFBDC_CMP_INDEX_INIT 0x0214
+#define RK3368_IFBDC_VIR 0x0220
+#define RK3368_IFBDC_DEBUG0 0x0230
+#define RK3368_IFBDC_DEBUG1 0x0234
+#define RK3368_LATENCY_CTRL0 0x0250
+#define RK3368_RD_MAX_LATENCY_NUM0 0x0254
+#define RK3368_RD_LATENCY_THR_NUM0 0x0258
+#define RK3368_RD_LATENCY_SAMP_NUM0 0x025c
+#define RK3368_WIN0_DSP_BG 0x0260
+#define RK3368_WIN1_DSP_BG 0x0264
+#define RK3368_WIN2_DSP_BG 0x0268
+#define RK3368_WIN3_DSP_BG 0x026c
+#define RK3368_SCAN_LINE_NUM 0x0270
+#define RK3368_CABC_DEBUG0 0x0274
+#define RK3368_CABC_DEBUG1 0x0278
+#define RK3368_CABC_DEBUG2 0x027c
+#define RK3368_DBG_REG_000 0x0280
+#define RK3368_DBG_REG_001 0x0284
+#define RK3368_DBG_REG_002 0x0288
+#define RK3368_DBG_REG_003 0x028c
+#define RK3368_DBG_REG_004 0x0290
+#define RK3368_DBG_REG_005 0x0294
+#define RK3368_DBG_REG_006 0x0298
+#define RK3368_DBG_REG_007 0x029c
+#define RK3368_DBG_REG_008 0x02a0
+#define RK3368_DBG_REG_016 0x02c0
+#define RK3368_DBG_REG_017 0x02c4
+#define RK3368_DBG_REG_018 0x02c8
+#define RK3368_DBG_REG_019 0x02cc
+#define RK3368_DBG_REG_020 0x02d0
+#define RK3368_DBG_REG_021 0x02d4
+#define RK3368_DBG_REG_022 0x02d8
+#define RK3368_DBG_REG_023 0x02dc
+#define RK3368_DBG_REG_028 0x02f0
+#define RK3368_MMU_DTE_ADDR 0x0300
+#define RK3368_MMU_STATUS 0x0304
+#define RK3368_MMU_COMMAND 0x0308
+#define RK3368_MMU_PAGE_FAULT_ADDR 0x030c
+#define RK3368_MMU_ZAP_ONE_LINE 0x0310
+#define RK3368_MMU_INT_RAWSTAT 0x0314
+#define RK3368_MMU_INT_CLEAR 0x0318
+#define RK3368_MMU_INT_MASK 0x031c
+#define RK3368_MMU_INT_STATUS 0x0320
+#define RK3368_MMU_AUTO_GATING 0x0324
+#define RK3368_WIN2_LUT_ADDR 0x0400
+#define RK3368_WIN3_LUT_ADDR 0x0800
+#define RK3368_HWC_LUT_ADDR 0x0c00
+#define RK3368_GAMMA_LUT_ADDR 0x1000
+#define RK3368_CABC_GAMMA_LUT_ADDR 0x1800
+#define RK3368_MCU_BYPASS_WPORT 0x2200
+#define RK3368_MCU_BYPASS_RPORT 0x2300
+/* rk3368 register definition end */
+
+#define RK3366_REG_CFG_DONE 0x0000
+#define RK3366_VERSION_INFO 0x0004
+#define RK3366_SYS_CTRL 0x0008
+#define RK3366_SYS_CTRL1 0x000c
+#define RK3366_DSP_CTRL0 0x0010
+#define RK3366_DSP_CTRL1 0x0014
+#define RK3366_DSP_BG 0x0018
+#define RK3366_MCU_CTRL 0x001c
+#define RK3366_WB_CTRL0 0x0020
+#define RK3366_WB_CTRL1 0x0024
+#define RK3366_WB_YRGB_MST 0x0028
+#define RK3366_WB_CBR_MST 0x002c
+#define RK3366_WIN0_CTRL0 0x0030
+#define RK3366_WIN0_CTRL1 0x0034
+#define RK3366_WIN0_COLOR_KEY 0x0038
+#define RK3366_WIN0_VIR 0x003c
+#define RK3366_WIN0_YRGB_MST 0x0040
+#define RK3366_WIN0_CBR_MST 0x0044
+#define RK3366_WIN0_ACT_INFO 0x0048
+#define RK3366_WIN0_DSP_INFO 0x004c
+#define RK3366_WIN0_DSP_ST 0x0050
+#define RK3366_WIN0_SCL_FACTOR_YRGB 0x0054
+#define RK3366_WIN0_SCL_FACTOR_CBR 0x0058
+#define RK3366_WIN0_SCL_OFFSET 0x005c
+#define RK3366_WIN0_SRC_ALPHA_CTRL 0x0060
+#define RK3366_WIN0_DST_ALPHA_CTRL 0x0064
+#define RK3366_WIN0_FADING_CTRL 0x0068
+#define RK3366_WIN0_CTRL2 0x006c
+#define RK3366_WIN1_CTRL0 0x0070
+#define RK3366_WIN1_CTRL1 0x0074
+#define RK3366_WIN1_COLOR_KEY 0x0078
+#define RK3366_WIN1_VIR 0x007c
+#define RK3366_WIN1_YRGB_MST 0x0080
+#define RK3366_WIN1_CBR_MST 0x0084
+#define RK3366_WIN1_ACT_INFO 0x0088
+#define RK3366_WIN1_DSP_INFO 0x008c
+#define RK3366_WIN1_DSP_ST 0x0090
+#define RK3366_WIN1_SCL_FACTOR_YRGB 0x0094
+#define RK3366_WIN1_SCL_FACTOR_CBR 0x0098
+#define RK3366_WIN1_SCL_OFFSET 0x009c
+#define RK3366_WIN1_SRC_ALPHA_CTRL 0x00a0
+#define RK3366_WIN1_DST_ALPHA_CTRL 0x00a4
+#define RK3366_WIN1_FADING_CTRL 0x00a8
+#define RK3366_WIN1_CTRL2 0x00ac
+#define RK3366_WIN2_CTRL0 0x00b0
+#define RK3366_WIN2_CTRL1 0x00b4
+#define RK3366_WIN2_VIR0_1 0x00b8
+#define RK3366_WIN2_VIR2_3 0x00bc
+#define RK3366_WIN2_MST0 0x00c0
+#define RK3366_WIN2_DSP_INFO0 0x00c4
+#define RK3366_WIN2_DSP_ST0 0x00c8
+#define RK3366_WIN2_COLOR_KEY 0x00cc
+#define RK3366_WIN2_MST1 0x00d0
+#define RK3366_WIN2_DSP_INFO1 0x00d4
+#define RK3366_WIN2_DSP_ST1 0x00d8
+#define RK3366_WIN2_SRC_ALPHA_CTRL 0x00dc
+#define RK3366_WIN2_MST2 0x00e0
+#define RK3366_WIN2_DSP_INFO2 0x00e4
+#define RK3366_WIN2_DSP_ST2 0x00e8
+#define RK3366_WIN2_DST_ALPHA_CTRL 0x00ec
+#define RK3366_WIN2_MST3 0x00f0
+#define RK3366_WIN2_DSP_INFO3 0x00f4
+#define RK3366_WIN2_DSP_ST3 0x00f8
+#define RK3366_WIN2_FADING_CTRL 0x00fc
+#define RK3366_WIN3_CTRL0 0x0100
+#define RK3366_WIN3_CTRL1 0x0104
+#define RK3366_WIN3_VIR0_1 0x0108
+#define RK3366_WIN3_VIR2_3 0x010c
+#define RK3366_WIN3_MST0 0x0110
+#define RK3366_WIN3_DSP_INFO0 0x0114
+#define RK3366_WIN3_DSP_ST0 0x0118
+#define RK3366_WIN3_COLOR_KEY 0x011c
+#define RK3366_WIN3_MST1 0x0120
+#define RK3366_WIN3_DSP_INFO1 0x0124
+#define RK3366_WIN3_DSP_ST1 0x0128
+#define RK3366_WIN3_SRC_ALPHA_CTRL 0x012c
+#define RK3366_WIN3_MST2 0x0130
+#define RK3366_WIN3_DSP_INFO2 0x0134
+#define RK3366_WIN3_DSP_ST2 0x0138
+#define RK3366_WIN3_DST_ALPHA_CTRL 0x013c
+#define RK3366_WIN3_MST3 0x0140
+#define RK3366_WIN3_DSP_INFO3 0x0144
+#define RK3366_WIN3_DSP_ST3 0x0148
+#define RK3366_WIN3_FADING_CTRL 0x014c
+#define RK3366_HWC_CTRL0 0x0150
+#define RK3366_HWC_CTRL1 0x0154
+#define RK3366_HWC_MST 0x0158
+#define RK3366_HWC_DSP_ST 0x015c
+#define RK3366_HWC_SRC_ALPHA_CTRL 0x0160
+#define RK3366_HWC_DST_ALPHA_CTRL 0x0164
+#define RK3366_HWC_FADING_CTRL 0x0168
+#define RK3366_HWC_RESERVED1 0x016c
+#define RK3366_POST_DSP_HACT_INFO 0x0170
+#define RK3366_POST_DSP_VACT_INFO 0x0174
+#define RK3366_POST_SCL_FACTOR_YRGB 0x0178
+#define RK3366_POST_RESERVED 0x017c
+#define RK3366_POST_SCL_CTRL 0x0180
+#define RK3366_POST_DSP_VACT_INFO_F1 0x0184
+#define RK3366_DSP_HTOTAL_HS_END 0x0188
+#define RK3366_DSP_HACT_ST_END 0x018c
+#define RK3366_DSP_VTOTAL_VS_END 0x0190
+#define RK3366_DSP_VACT_ST_END 0x0194
+#define RK3366_DSP_VS_ST_END_F1 0x0198
+#define RK3366_DSP_VACT_ST_END_F1 0x019c
+#define RK3366_PWM_CTRL 0x01a0
+#define RK3366_PWM_PERIOD_HPR 0x01a4
+#define RK3366_PWM_DUTY_LPR 0x01a8
+#define RK3366_PWM_CNT 0x01ac
+#define RK3366_BCSH_COLOR_BAR 0x01b0
+#define RK3366_BCSH_BCS 0x01b4
+#define RK3366_BCSH_H 0x01b8
+#define RK3366_BCSH_CTRL 0x01bc
+#define RK3366_CABC_CTRL0 0x01c0
+#define RK3366_CABC_CTRL1 0x01c4
+#define RK3366_CABC_CTRL2 0x01c8
+#define RK3366_CABC_CTRL3 0x01cc
+#define RK3366_CABC_GAUSS_LINE0_0 0x01d0
+#define RK3366_CABC_GAUSS_LINE0_1 0x01d4
+#define RK3366_CABC_GAUSS_LINE1_0 0x01d8
+#define RK3366_CABC_GAUSS_LINE1_1 0x01dc
+#define RK3366_CABC_GAUSS_LINE2_0 0x01e0
+#define RK3366_CABC_GAUSS_LINE2_1 0x01e4
+#define RK3366_FRC_LOWER01_0 0x01e8
+#define RK3366_FRC_LOWER01_1 0x01ec
+#define RK3366_FRC_LOWER10_0 0x01f0
+#define RK3366_FRC_LOWER10_1 0x01f4
+#define RK3366_FRC_LOWER11_0 0x01f8
+#define RK3366_FRC_LOWER11_1 0x01fc
+#define RK3366_INTR_EN0 0x0280
+#define RK3366_INTR_CLEAR0 0x0284
+#define RK3366_INTR_STATUS0 0x0288
+#define RK3366_INTR_RAW_STATUS0 0x028c
+#define RK3366_INTR_EN1 0x0290
+#define RK3366_INTR_CLEAR1 0x0294
+#define RK3366_INTR_STATUS1 0x0298
+#define RK3366_INTR_RAW_STATUS1 0x029c
+#define RK3366_LINE_FLAG 0x02a0
+#define RK3366_VOP_STATUS 0x02a4
+#define RK3366_BLANKING_VALUE 0x02a8
+#define RK3366_WIN0_DSP_BG 0x02b0
+#define RK3366_WIN1_DSP_BG 0x02b4
+#define RK3366_WIN2_DSP_BG 0x02b8
+#define RK3366_WIN3_DSP_BG 0x02bc
+#define RK3366_WIN2_LUT_ADDR 0x0400
+#define RK3366_WIN3_LUT_ADDR 0x0800
+#define RK3366_HWC_LUT_ADDR 0x0c00
+#define RK3366_GAMMA0_LUT_ADDR 0x1000
+#define RK3366_GAMMA1_LUT_ADDR 0x1400
+#define RK3366_CABC_GAMMA_LUT_ADDR 0x1800
+#define RK3366_MCU_BYPASS_WPORT 0x2200
+#define RK3366_MCU_BYPASS_RPORT 0x2300
+#define RK3366_MMU_DTE_ADDR 0x2400
+#define RK3366_MMU_STATUS 0x2404
+#define RK3366_MMU_COMMAND 0x2408
+#define RK3366_MMU_PAGE_FAULT_ADDR 0x240c
+#define RK3366_MMU_ZAP_ONE_LINE 0x2410
+#define RK3366_MMU_INT_RAWSTAT 0x2414
+#define RK3366_MMU_INT_CLEAR 0x2418
+#define RK3366_MMU_INT_MASK 0x241c
+#define RK3366_MMU_INT_STATUS 0x2420
+#define RK3366_MMU_AUTO_GATING 0x2424
+
+/* rk3399 register definition */
+#define RK3399_REG_CFG_DONE 0x0000
+#define RK3399_VERSION_INFO 0x0004
+#define RK3399_SYS_CTRL 0x0008
+#define RK3399_SYS_CTRL1 0x000c
+#define RK3399_DSP_CTRL0 0x0010
+#define RK3399_DSP_CTRL1 0x0014
+#define RK3399_DSP_BG 0x0018
+#define RK3399_MCU_CTRL 0x001c
+#define RK3399_WB_CTRL0 0x0020
+#define RK3399_WB_CTRL1 0x0024
+#define RK3399_WB_YRGB_MST 0x0028
+#define RK3399_WB_CBR_MST 0x002c
+#define RK3399_WIN0_CTRL0 0x0030
+#define RK3399_WIN0_CTRL1 0x0034
+#define RK3399_WIN0_COLOR_KEY 0x0038
+#define RK3399_WIN0_VIR 0x003c
+#define RK3399_WIN0_YRGB_MST 0x0040
+#define RK3399_WIN0_CBR_MST 0x0044
+#define RK3399_WIN0_ACT_INFO 0x0048
+#define RK3399_WIN0_DSP_INFO 0x004c
+#define RK3399_WIN0_DSP_ST 0x0050
+#define RK3399_WIN0_SCL_FACTOR_YRGB 0x0054
+#define RK3399_WIN0_SCL_FACTOR_CBR 0x0058
+#define RK3399_WIN0_SCL_OFFSET 0x005c
+#define RK3399_WIN0_SRC_ALPHA_CTRL 0x0060
+#define RK3399_WIN0_DST_ALPHA_CTRL 0x0064
+#define RK3399_WIN0_FADING_CTRL 0x0068
+#define RK3399_WIN0_CTRL2 0x006c
+#define RK3399_WIN1_CTRL0 0x0070
+#define RK3399_WIN1_CTRL1 0x0074
+#define RK3399_WIN1_COLOR_KEY 0x0078
+#define RK3399_WIN1_VIR 0x007c
+#define RK3399_WIN1_YRGB_MST 0x0080
+#define RK3399_WIN1_CBR_MST 0x0084
+#define RK3399_WIN1_ACT_INFO 0x0088
+#define RK3399_WIN1_DSP_INFO 0x008c
+#define RK3399_WIN1_DSP_ST 0x0090
+#define RK3399_WIN1_SCL_FACTOR_YRGB 0x0094
+#define RK3399_WIN1_SCL_FACTOR_CBR 0x0098
+#define RK3399_WIN1_SCL_OFFSET 0x009c
+#define RK3399_WIN1_SRC_ALPHA_CTRL 0x00a0
+#define RK3399_WIN1_DST_ALPHA_CTRL 0x00a4
+#define RK3399_WIN1_FADING_CTRL 0x00a8
+#define RK3399_WIN1_CTRL2 0x00ac
+#define RK3399_WIN2_CTRL0 0x00b0
+#define RK3399_WIN2_CTRL1 0x00b4
+#define RK3399_WIN2_VIR0_1 0x00b8
+#define RK3399_WIN2_VIR2_3 0x00bc
+#define RK3399_WIN2_MST0 0x00c0
+#define RK3399_WIN2_DSP_INFO0 0x00c4
+#define RK3399_WIN2_DSP_ST0 0x00c8
+#define RK3399_WIN2_COLOR_KEY 0x00cc
+#define RK3399_WIN2_MST1 0x00d0
+#define RK3399_WIN2_DSP_INFO1 0x00d4
+#define RK3399_WIN2_DSP_ST1 0x00d8
+#define RK3399_WIN2_SRC_ALPHA_CTRL 0x00dc
+#define RK3399_WIN2_MST2 0x00e0
+#define RK3399_WIN2_DSP_INFO2 0x00e4
+#define RK3399_WIN2_DSP_ST2 0x00e8
+#define RK3399_WIN2_DST_ALPHA_CTRL 0x00ec
+#define RK3399_WIN2_MST3 0x00f0
+#define RK3399_WIN2_DSP_INFO3 0x00f4
+#define RK3399_WIN2_DSP_ST3 0x00f8
+#define RK3399_WIN2_FADING_CTRL 0x00fc
+#define RK3399_WIN3_CTRL0 0x0100
+#define RK3399_WIN3_CTRL1 0x0104
+#define RK3399_WIN3_VIR0_1 0x0108
+#define RK3399_WIN3_VIR2_3 0x010c
+#define RK3399_WIN3_MST0 0x0110
+#define RK3399_WIN3_DSP_INFO0 0x0114
+#define RK3399_WIN3_DSP_ST0 0x0118
+#define RK3399_WIN3_COLOR_KEY 0x011c
+#define RK3399_WIN3_MST1 0x0120
+#define RK3399_WIN3_DSP_INFO1 0x0124
+#define RK3399_WIN3_DSP_ST1 0x0128
+#define RK3399_WIN3_SRC_ALPHA_CTRL 0x012c
+#define RK3399_WIN3_MST2 0x0130
+#define RK3399_WIN3_DSP_INFO2 0x0134
+#define RK3399_WIN3_DSP_ST2 0x0138
+#define RK3399_WIN3_DST_ALPHA_CTRL 0x013c
+#define RK3399_WIN3_MST3 0x0140
+#define RK3399_WIN3_DSP_INFO3 0x0144
+#define RK3399_WIN3_DSP_ST3 0x0148
+#define RK3399_WIN3_FADING_CTRL 0x014c
+#define RK3399_HWC_CTRL0 0x0150
+#define RK3399_HWC_CTRL1 0x0154
+#define RK3399_HWC_MST 0x0158
+#define RK3399_HWC_DSP_ST 0x015c
+#define RK3399_HWC_SRC_ALPHA_CTRL 0x0160
+#define RK3399_HWC_DST_ALPHA_CTRL 0x0164
+#define RK3399_HWC_FADING_CTRL 0x0168
+#define RK3399_HWC_RESERVED1 0x016c
+#define RK3399_POST_DSP_HACT_INFO 0x0170
+#define RK3399_POST_DSP_VACT_INFO 0x0174
+#define RK3399_POST_SCL_FACTOR_YRGB 0x0178
+#define RK3399_POST_RESERVED 0x017c
+#define RK3399_POST_SCL_CTRL 0x0180
+#define RK3399_POST_DSP_VACT_INFO_F1 0x0184
+#define RK3399_DSP_HTOTAL_HS_END 0x0188
+#define RK3399_DSP_HACT_ST_END 0x018c
+#define RK3399_DSP_VTOTAL_VS_END 0x0190
+#define RK3399_DSP_VACT_ST_END 0x0194
+#define RK3399_DSP_VS_ST_END_F1 0x0198
+#define RK3399_DSP_VACT_ST_END_F1 0x019c
+#define RK3399_PWM_CTRL 0x01a0
+#define RK3399_PWM_PERIOD_HPR 0x01a4
+#define RK3399_PWM_DUTY_LPR 0x01a8
+#define RK3399_PWM_CNT 0x01ac
+#define RK3399_BCSH_COLOR_BAR 0x01b0
+#define RK3399_BCSH_BCS 0x01b4
+#define RK3399_BCSH_H 0x01b8
+#define RK3399_BCSH_CTRL 0x01bc
+#define RK3399_CABC_CTRL0 0x01c0
+#define RK3399_CABC_CTRL1 0x01c4
+#define RK3399_CABC_CTRL2 0x01c8
+#define RK3399_CABC_CTRL3 0x01cc
+#define RK3399_CABC_GAUSS_LINE0_0 0x01d0
+#define RK3399_CABC_GAUSS_LINE0_1 0x01d4
+#define RK3399_CABC_GAUSS_LINE1_0 0x01d8
+#define RK3399_CABC_GAUSS_LINE1_1 0x01dc
+#define RK3399_CABC_GAUSS_LINE2_0 0x01e0
+#define RK3399_CABC_GAUSS_LINE2_1 0x01e4
+#define RK3399_FRC_LOWER01_0 0x01e8
+#define RK3399_FRC_LOWER01_1 0x01ec
+#define RK3399_FRC_LOWER10_0 0x01f0
+#define RK3399_FRC_LOWER10_1 0x01f4
+#define RK3399_FRC_LOWER11_0 0x01f8
+#define RK3399_FRC_LOWER11_1 0x01fc
+#define RK3399_AFBCD0_CTRL 0x0200
+#define RK3399_AFBCD0_HDR_PTR 0x0204
+#define RK3399_AFBCD0_PIC_SIZE 0x0208
+#define RK3399_AFBCD0_STATUS 0x020c
+#define RK3399_AFBCD1_CTRL 0x0220
+#define RK3399_AFBCD1_HDR_PTR 0x0224
+#define RK3399_AFBCD1_PIC_SIZE 0x0228
+#define RK3399_AFBCD1_STATUS 0x022c
+#define RK3399_AFBCD2_CTRL 0x0240
+#define RK3399_AFBCD2_HDR_PTR 0x0244
+#define RK3399_AFBCD2_PIC_SIZE 0x0248
+#define RK3399_AFBCD2_STATUS 0x024c
+#define RK3399_AFBCD3_CTRL 0x0260
+#define RK3399_AFBCD3_HDR_PTR 0x0264
+#define RK3399_AFBCD3_PIC_SIZE 0x0268
+#define RK3399_AFBCD3_STATUS 0x026c
+#define RK3399_INTR_EN0 0x0280
+#define RK3399_INTR_CLEAR0 0x0284
+#define RK3399_INTR_STATUS0 0x0288
+#define RK3399_INTR_RAW_STATUS0 0x028c
+#define RK3399_INTR_EN1 0x0290
+#define RK3399_INTR_CLEAR1 0x0294
+#define RK3399_INTR_STATUS1 0x0298
+#define RK3399_INTR_RAW_STATUS1 0x029c
+#define RK3399_LINE_FLAG 0x02a0
+#define RK3399_VOP_STATUS 0x02a4
+#define RK3399_BLANKING_VALUE 0x02a8
+#define RK3399_MCU_BYPASS_PORT 0x02ac
+#define RK3399_WIN0_DSP_BG 0x02b0
+#define RK3399_WIN1_DSP_BG 0x02b4
+#define RK3399_WIN2_DSP_BG 0x02b8
+#define RK3399_WIN3_DSP_BG 0x02bc
+#define RK3399_YUV2YUV_WIN 0x02c0
+#define RK3399_YUV2YUV_POST 0x02c4
+#define RK3399_AUTO_GATING_EN 0x02cc
+#define RK3399_WIN0_CSC_COE 0x03a0
+#define RK3399_WIN1_CSC_COE 0x03c0
+#define RK3399_WIN2_CSC_COE 0x03e0
+#define RK3399_WIN3_CSC_COE 0x0400
+#define RK3399_HWC_CSC_COE 0x0420
+#define RK3399_BCSH_R2Y_CSC_COE 0x0440
+#define RK3399_BCSH_Y2R_CSC_COE 0x0460
+#define RK3399_POST_YUV2YUV_Y2R_COE 0x0480
+#define RK3399_POST_YUV2YUV_3X3_COE 0x04a0
+#define RK3399_POST_YUV2YUV_R2Y_COE 0x04c0
+#define RK3399_WIN0_YUV2YUV_Y2R 0x04e0
+#define RK3399_WIN0_YUV2YUV_3X3 0x0500
+#define RK3399_WIN0_YUV2YUV_R2Y 0x0520
+#define RK3399_WIN1_YUV2YUV_Y2R 0x0540
+#define RK3399_WIN1_YUV2YUV_3X3 0x0560
+#define RK3399_WIN1_YUV2YUV_R2Y 0x0580
+#define RK3399_WIN2_YUV2YUV_Y2R 0x05a0
+#define RK3399_WIN2_YUV2YUV_3X3 0x05c0
+#define RK3399_WIN2_YUV2YUV_R2Y 0x05e0
+#define RK3399_WIN3_YUV2YUV_Y2R 0x0600
+#define RK3399_WIN3_YUV2YUV_3X3 0x0620
+#define RK3399_WIN3_YUV2YUV_R2Y 0x0640
+#define RK3399_WIN2_LUT_ADDR 0x1000
+#define RK3399_WIN3_LUT_ADDR 0x1400
+#define RK3399_HWC_LUT_ADDR 0x1800
+#define RK3399_CABC_GAMMA_LUT_ADDR 0x1c00
+#define RK3399_GAMMA_LUT_ADDR 0x2000
+/* rk3399 register definition end */
+
+/* rk3328 register definition end */
+#define RK3328_REG_CFG_DONE 0x00000000
+#define RK3328_VERSION_INFO 0x00000004
+#define RK3328_SYS_CTRL 0x00000008
+#define RK3328_SYS_CTRL1 0x0000000c
+#define RK3328_DSP_CTRL0 0x00000010
+#define RK3328_DSP_CTRL1 0x00000014
+#define RK3328_DSP_BG 0x00000018
+#define RK3328_AUTO_GATING_EN 0x0000003c
+#define RK3328_LINE_FLAG 0x00000040
+#define RK3328_VOP_STATUS 0x00000044
+#define RK3328_BLANKING_VALUE 0x00000048
+#define RK3328_WIN0_DSP_BG 0x00000050
+#define RK3328_WIN1_DSP_BG 0x00000054
+#define RK3328_DBG_PERF_LATENCY_CTRL0 0x000000c0
+#define RK3328_DBG_PERF_RD_MAX_LATENCY_NUM0 0x000000c4
+#define RK3328_DBG_PERF_RD_LATENCY_THR_NUM0 0x000000c8
+#define RK3328_DBG_PERF_RD_LATENCY_SAMP_NUM0 0x000000cc
+#define RK3328_INTR_EN0 0x000000e0
+#define RK3328_INTR_CLEAR0 0x000000e4
+#define RK3328_INTR_STATUS0 0x000000e8
+#define RK3328_INTR_RAW_STATUS0 0x000000ec
+#define RK3328_INTR_EN1 0x000000f0
+#define RK3328_INTR_CLEAR1 0x000000f4
+#define RK3328_INTR_STATUS1 0x000000f8
+#define RK3328_INTR_RAW_STATUS1 0x000000fc
+#define RK3328_WIN0_CTRL0 0x00000100
+#define RK3328_WIN0_CTRL1 0x00000104
+#define RK3328_WIN0_COLOR_KEY 0x00000108
+#define RK3328_WIN0_VIR 0x0000010c
+#define RK3328_WIN0_YRGB_MST 0x00000110
+#define RK3328_WIN0_CBR_MST 0x00000114
+#define RK3328_WIN0_ACT_INFO 0x00000118
+#define RK3328_WIN0_DSP_INFO 0x0000011c
+#define RK3328_WIN0_DSP_ST 0x00000120
+#define RK3328_WIN0_SCL_FACTOR_YRGB 0x00000124
+#define RK3328_WIN0_SCL_FACTOR_CBR 0x00000128
+#define RK3328_WIN0_SCL_OFFSET 0x0000012c
+#define RK3328_WIN0_SRC_ALPHA_CTRL 0x00000130
+#define RK3328_WIN0_DST_ALPHA_CTRL 0x00000134
+#define RK3328_WIN0_FADING_CTRL 0x00000138
+#define RK3328_WIN0_CTRL2 0x0000013c
+#define RK3328_DBG_WIN0_REG0 0x000001f0
+#define RK3328_DBG_WIN0_REG1 0x000001f4
+#define RK3328_DBG_WIN0_REG2 0x000001f8
+#define RK3328_DBG_WIN0_RESERVED 0x000001fc
+#define RK3328_WIN1_CTRL0 0x00000200
+#define RK3328_WIN1_CTRL1 0x00000204
+#define RK3328_WIN1_COLOR_KEY 0x00000208
+#define RK3328_WIN1_VIR 0x0000020c
+#define RK3328_WIN1_YRGB_MST 0x00000210
+#define RK3328_WIN1_CBR_MST 0x00000214
+#define RK3328_WIN1_ACT_INFO 0x00000218
+#define RK3328_WIN1_DSP_INFO 0x0000021c
+#define RK3328_WIN1_DSP_ST 0x00000220
+#define RK3328_WIN1_SCL_FACTOR_YRGB 0x00000224
+#define RK3328_WIN1_SCL_FACTOR_CBR 0x00000228
+#define RK3328_WIN1_SCL_OFFSET 0x0000022c
+#define RK3328_WIN1_SRC_ALPHA_CTRL 0x00000230
+#define RK3328_WIN1_DST_ALPHA_CTRL 0x00000234
+#define RK3328_WIN1_FADING_CTRL 0x00000238
+#define RK3328_WIN1_CTRL2 0x0000023c
+#define RK3328_DBG_WIN1_REG0 0x000002f0
+#define RK3328_DBG_WIN1_REG1 0x000002f4
+#define RK3328_DBG_WIN1_REG2 0x000002f8
+#define RK3328_DBG_WIN1_RESERVED 0x000002fc
+#define RK3328_WIN2_CTRL0 0x00000300
+#define RK3328_WIN2_CTRL1 0x00000304
+#define RK3328_WIN2_COLOR_KEY 0x00000308
+#define RK3328_WIN2_VIR 0x0000030c
+#define RK3328_WIN2_YRGB_MST 0x00000310
+#define RK3328_WIN2_CBR_MST 0x00000314
+#define RK3328_WIN2_ACT_INFO 0x00000318
+#define RK3328_WIN2_DSP_INFO 0x0000031c
+#define RK3328_WIN2_DSP_ST 0x00000320
+#define RK3328_WIN2_SCL_FACTOR_YRGB 0x00000324
+#define RK3328_WIN2_SCL_FACTOR_CBR 0x00000328
+#define RK3328_WIN2_SCL_OFFSET 0x0000032c
+#define RK3328_WIN2_SRC_ALPHA_CTRL 0x00000330
+#define RK3328_WIN2_DST_ALPHA_CTRL 0x00000334
+#define RK3328_WIN2_FADING_CTRL 0x00000338
+#define RK3328_WIN2_CTRL2 0x0000033c
+#define RK3328_DBG_WIN2_REG0 0x000003f0
+#define RK3328_DBG_WIN2_REG1 0x000003f4
+#define RK3328_DBG_WIN2_REG2 0x000003f8
+#define RK3328_DBG_WIN2_RESERVED 0x000003fc
+#define RK3328_WIN3_CTRL0 0x00000400
+#define RK3328_WIN3_CTRL1 0x00000404
+#define RK3328_WIN3_COLOR_KEY 0x00000408
+#define RK3328_WIN3_VIR 0x0000040c
+#define RK3328_WIN3_YRGB_MST 0x00000410
+#define RK3328_WIN3_CBR_MST 0x00000414
+#define RK3328_WIN3_ACT_INFO 0x00000418
+#define RK3328_WIN3_DSP_INFO 0x0000041c
+#define RK3328_WIN3_DSP_ST 0x00000420
+#define RK3328_WIN3_SCL_FACTOR_YRGB 0x00000424
+#define RK3328_WIN3_SCL_FACTOR_CBR 0x00000428
+#define RK3328_WIN3_SCL_OFFSET 0x0000042c
+#define RK3328_WIN3_SRC_ALPHA_CTRL 0x00000430
+#define RK3328_WIN3_DST_ALPHA_CTRL 0x00000434
+#define RK3328_WIN3_FADING_CTRL 0x00000438
+#define RK3328_WIN3_CTRL2 0x0000043c
+#define RK3328_DBG_WIN3_REG0 0x000004f0
+#define RK3328_DBG_WIN3_REG1 0x000004f4
+#define RK3328_DBG_WIN3_REG2 0x000004f8
+#define RK3328_DBG_WIN3_RESERVED 0x000004fc
+
+#define RK3328_HWC_CTRL0 0x00000500
+#define RK3328_HWC_CTRL1 0x00000504
+#define RK3328_HWC_MST 0x00000508
+#define RK3328_HWC_DSP_ST 0x0000050c
+#define RK3328_HWC_SRC_ALPHA_CTRL 0x00000510
+#define RK3328_HWC_DST_ALPHA_CTRL 0x00000514
+#define RK3328_HWC_FADING_CTRL 0x00000518
+#define RK3328_HWC_RESERVED1 0x0000051c
+#define RK3328_POST_DSP_HACT_INFO 0x00000600
+#define RK3328_POST_DSP_VACT_INFO 0x00000604
+#define RK3328_POST_SCL_FACTOR_YRGB 0x00000608
+#define RK3328_POST_RESERVED 0x0000060c
+#define RK3328_POST_SCL_CTRL 0x00000610
+#define RK3328_POST_DSP_VACT_INFO_F1 0x00000614
+#define RK3328_DSP_HTOTAL_HS_END 0x00000618
+#define RK3328_DSP_HACT_ST_END 0x0000061c
+#define RK3328_DSP_VTOTAL_VS_END 0x00000620
+#define RK3328_DSP_VACT_ST_END 0x00000624
+#define RK3328_DSP_VS_ST_END_F1 0x00000628
+#define RK3328_DSP_VACT_ST_END_F1 0x0000062c
+#define RK3328_BCSH_COLOR_BAR 0x00000640
+#define RK3328_BCSH_BCS 0x00000644
+#define RK3328_BCSH_H 0x00000648
+#define RK3328_BCSH_CTRL 0x0000064c
+#define RK3328_FRC_LOWER01_0 0x00000678
+#define RK3328_FRC_LOWER01_1 0x0000067c
+#define RK3328_FRC_LOWER10_0 0x00000680
+#define RK3328_FRC_LOWER10_1 0x00000684
+#define RK3328_FRC_LOWER11_0 0x00000688
+#define RK3328_FRC_LOWER11_1 0x0000068c
+#define RK3328_DBG_POST_REG0 0x000006e8
+#define RK3328_DBG_POST_RESERVED 0x000006ec
+#define RK3328_DBG_DATAO 0x000006f0
+#define RK3328_DBG_DATAO_2 0x000006f4
+
+/* sdr to hdr */
+#define RK3328_SDR2HDR_CTRL 0x00000700
+#define RK3328_EOTF_OETF_Y0 0x00000704
+#define RK3328_RESERVED0001 0x00000708
+#define RK3328_RESERVED0002 0x0000070c
+#define RK3328_EOTF_OETF_Y1 0x00000710
+#define RK3328_EOTF_OETF_Y64 0x0000080c
+#define RK3328_OETF_DX_DXPOW1 0x00000810
+#define RK3328_OETF_DX_DXPOW64 0x0000090c
+#define RK3328_OETF_XN1 0x00000910
+#define RK3328_OETF_XN63 0x00000a08
+
+/* hdr to sdr */
+#define RK3328_HDR2SDR_CTRL 0x00000a10
+#define RK3328_HDR2SDR_SRC_RANGE 0x00000a14
+#define RK3328_HDR2SDR_NORMFACEETF 0x00000a18
+#define RK3328_RESERVED0003 0x00000a1c
+#define RK3328_HDR2SDR_DST_RANGE 0x00000a20
+#define RK3328_HDR2SDR_NORMFACCGAMMA 0x00000a24
+#define RK3328_EETF_OETF_Y0 0x00000a28
+#define RK3328_SAT_Y0 0x00000a2c
+#define RK3328_EETF_OETF_Y1 0x00000a30
+#define RK3328_SAT_Y1 0x00000ab0
+#define RK3328_SAT_Y8 0x00000acc
+
+#define RK3328_HWC_LUT_ADDR 0x00000c00
+
/* rk3036 register definition */
#define RK3036_SYS_CTRL 0x00
#define RK3036_DSP_CTRL0 0x04
@@ -166,197 +878,4 @@
#define RK3036_HWC_LUT_ADDR 0x800
/* rk3036 register definition end */
-/* rk3399 register definition */
-#define RK3399_REG_CFG_DONE 0x00000
-#define RK3399_VERSION_INFO 0x00004
-#define RK3399_SYS_CTRL 0x00008
-#define RK3399_SYS_CTRL1 0x0000c
-#define RK3399_DSP_CTRL0 0x00010
-#define RK3399_DSP_CTRL1 0x00014
-#define RK3399_DSP_BG 0x00018
-#define RK3399_MCU_CTRL 0x0001c
-#define RK3399_WB_CTRL0 0x00020
-#define RK3399_WB_CTRL1 0x00024
-#define RK3399_WB_YRGB_MST 0x00028
-#define RK3399_WB_CBR_MST 0x0002c
-#define RK3399_WIN0_CTRL0 0x00030
-#define RK3399_WIN0_CTRL1 0x00034
-#define RK3399_WIN0_COLOR_KEY 0x00038
-#define RK3399_WIN0_VIR 0x0003c
-#define RK3399_WIN0_YRGB_MST 0x00040
-#define RK3399_WIN0_CBR_MST 0x00044
-#define RK3399_WIN0_ACT_INFO 0x00048
-#define RK3399_WIN0_DSP_INFO 0x0004c
-#define RK3399_WIN0_DSP_ST 0x00050
-#define RK3399_WIN0_SCL_FACTOR_YRGB 0x00054
-#define RK3399_WIN0_SCL_FACTOR_CBR 0x00058
-#define RK3399_WIN0_SCL_OFFSET 0x0005c
-#define RK3399_WIN0_SRC_ALPHA_CTRL 0x00060
-#define RK3399_WIN0_DST_ALPHA_CTRL 0x00064
-#define RK3399_WIN0_FADING_CTRL 0x00068
-#define RK3399_WIN0_CTRL2 0x0006c
-#define RK3399_WIN1_CTRL0 0x00070
-#define RK3399_WIN1_CTRL1 0x00074
-#define RK3399_WIN1_COLOR_KEY 0x00078
-#define RK3399_WIN1_VIR 0x0007c
-#define RK3399_WIN1_YRGB_MST 0x00080
-#define RK3399_WIN1_CBR_MST 0x00084
-#define RK3399_WIN1_ACT_INFO 0x00088
-#define RK3399_WIN1_DSP_INFO 0x0008c
-#define RK3399_WIN1_DSP_ST 0x00090
-#define RK3399_WIN1_SCL_FACTOR_YRGB 0x00094
-#define RK3399_WIN1_SCL_FACTOR_CBR 0x00098
-#define RK3399_WIN1_SCL_OFFSET 0x0009c
-#define RK3399_WIN1_SRC_ALPHA_CTRL 0x000a0
-#define RK3399_WIN1_DST_ALPHA_CTRL 0x000a4
-#define RK3399_WIN1_FADING_CTRL 0x000a8
-#define RK3399_WIN1_CTRL2 0x000ac
-#define RK3399_WIN2_CTRL0 0x000b0
-#define RK3399_WIN2_CTRL1 0x000b4
-#define RK3399_WIN2_VIR0_1 0x000b8
-#define RK3399_WIN2_VIR2_3 0x000bc
-#define RK3399_WIN2_MST0 0x000c0
-#define RK3399_WIN2_DSP_INFO0 0x000c4
-#define RK3399_WIN2_DSP_ST0 0x000c8
-#define RK3399_WIN2_COLOR_KEY 0x000cc
-#define RK3399_WIN2_MST1 0x000d0
-#define RK3399_WIN2_DSP_INFO1 0x000d4
-#define RK3399_WIN2_DSP_ST1 0x000d8
-#define RK3399_WIN2_SRC_ALPHA_CTRL 0x000dc
-#define RK3399_WIN2_MST2 0x000e0
-#define RK3399_WIN2_DSP_INFO2 0x000e4
-#define RK3399_WIN2_DSP_ST2 0x000e8
-#define RK3399_WIN2_DST_ALPHA_CTRL 0x000ec
-#define RK3399_WIN2_MST3 0x000f0
-#define RK3399_WIN2_DSP_INFO3 0x000f4
-#define RK3399_WIN2_DSP_ST3 0x000f8
-#define RK3399_WIN2_FADING_CTRL 0x000fc
-#define RK3399_WIN3_CTRL0 0x00100
-#define RK3399_WIN3_CTRL1 0x00104
-#define RK3399_WIN3_VIR0_1 0x00108
-#define RK3399_WIN3_VIR2_3 0x0010c
-#define RK3399_WIN3_MST0 0x00110
-#define RK3399_WIN3_DSP_INFO0 0x00114
-#define RK3399_WIN3_DSP_ST0 0x00118
-#define RK3399_WIN3_COLOR_KEY 0x0011c
-#define RK3399_WIN3_MST1 0x00120
-#define RK3399_WIN3_DSP_INFO1 0x00124
-#define RK3399_WIN3_DSP_ST1 0x00128
-#define RK3399_WIN3_SRC_ALPHA_CTRL 0x0012c
-#define RK3399_WIN3_MST2 0x00130
-#define RK3399_WIN3_DSP_INFO2 0x00134
-#define RK3399_WIN3_DSP_ST2 0x00138
-#define RK3399_WIN3_DST_ALPHA_CTRL 0x0013c
-#define RK3399_WIN3_MST3 0x00140
-#define RK3399_WIN3_DSP_INFO3 0x00144
-#define RK3399_WIN3_DSP_ST3 0x00148
-#define RK3399_WIN3_FADING_CTRL 0x0014c
-#define RK3399_HWC_CTRL0 0x00150
-#define RK3399_HWC_CTRL1 0x00154
-#define RK3399_HWC_MST 0x00158
-#define RK3399_HWC_DSP_ST 0x0015c
-#define RK3399_HWC_SRC_ALPHA_CTRL 0x00160
-#define RK3399_HWC_DST_ALPHA_CTRL 0x00164
-#define RK3399_HWC_FADING_CTRL 0x00168
-#define RK3399_HWC_RESERVED1 0x0016c
-#define RK3399_POST_DSP_HACT_INFO 0x00170
-#define RK3399_POST_DSP_VACT_INFO 0x00174
-#define RK3399_POST_SCL_FACTOR_YRGB 0x00178
-#define RK3399_POST_RESERVED 0x0017c
-#define RK3399_POST_SCL_CTRL 0x00180
-#define RK3399_POST_DSP_VACT_INFO_F1 0x00184
-#define RK3399_DSP_HTOTAL_HS_END 0x00188
-#define RK3399_DSP_HACT_ST_END 0x0018c
-#define RK3399_DSP_VTOTAL_VS_END 0x00190
-#define RK3399_DSP_VACT_ST_END 0x00194
-#define RK3399_DSP_VS_ST_END_F1 0x00198
-#define RK3399_DSP_VACT_ST_END_F1 0x0019c
-#define RK3399_PWM_CTRL 0x001a0
-#define RK3399_PWM_PERIOD_HPR 0x001a4
-#define RK3399_PWM_DUTY_LPR 0x001a8
-#define RK3399_PWM_CNT 0x001ac
-#define RK3399_BCSH_COLOR_BAR 0x001b0
-#define RK3399_BCSH_BCS 0x001b4
-#define RK3399_BCSH_H 0x001b8
-#define RK3399_BCSH_CTRL 0x001bc
-#define RK3399_CABC_CTRL0 0x001c0
-#define RK3399_CABC_CTRL1 0x001c4
-#define RK3399_CABC_CTRL2 0x001c8
-#define RK3399_CABC_CTRL3 0x001cc
-#define RK3399_CABC_GAUSS_LINE0_0 0x001d0
-#define RK3399_CABC_GAUSS_LINE0_1 0x001d4
-#define RK3399_CABC_GAUSS_LINE1_0 0x001d8
-#define RK3399_CABC_GAUSS_LINE1_1 0x001dc
-#define RK3399_CABC_GAUSS_LINE2_0 0x001e0
-#define RK3399_CABC_GAUSS_LINE2_1 0x001e4
-#define RK3399_FRC_LOWER01_0 0x001e8
-#define RK3399_FRC_LOWER01_1 0x001ec
-#define RK3399_FRC_LOWER10_0 0x001f0
-#define RK3399_FRC_LOWER10_1 0x001f4
-#define RK3399_FRC_LOWER11_0 0x001f8
-#define RK3399_FRC_LOWER11_1 0x001fc
-#define RK3399_AFBCD0_CTRL 0x00200
-#define RK3399_AFBCD0_HDR_PTR 0x00204
-#define RK3399_AFBCD0_PIC_SIZE 0x00208
-#define RK3399_AFBCD0_STATUS 0x0020c
-#define RK3399_AFBCD1_CTRL 0x00220
-#define RK3399_AFBCD1_HDR_PTR 0x00224
-#define RK3399_AFBCD1_PIC_SIZE 0x00228
-#define RK3399_AFBCD1_STATUS 0x0022c
-#define RK3399_AFBCD2_CTRL 0x00240
-#define RK3399_AFBCD2_HDR_PTR 0x00244
-#define RK3399_AFBCD2_PIC_SIZE 0x00248
-#define RK3399_AFBCD2_STATUS 0x0024c
-#define RK3399_AFBCD3_CTRL 0x00260
-#define RK3399_AFBCD3_HDR_PTR 0x00264
-#define RK3399_AFBCD3_PIC_SIZE 0x00268
-#define RK3399_AFBCD3_STATUS 0x0026c
-#define RK3399_INTR_EN0 0x00280
-#define RK3399_INTR_CLEAR0 0x00284
-#define RK3399_INTR_STATUS0 0x00288
-#define RK3399_INTR_RAW_STATUS0 0x0028c
-#define RK3399_INTR_EN1 0x00290
-#define RK3399_INTR_CLEAR1 0x00294
-#define RK3399_INTR_STATUS1 0x00298
-#define RK3399_INTR_RAW_STATUS1 0x0029c
-#define RK3399_LINE_FLAG 0x002a0
-#define RK3399_VOP_STATUS 0x002a4
-#define RK3399_BLANKING_VALUE 0x002a8
-#define RK3399_MCU_BYPASS_PORT 0x002ac
-#define RK3399_WIN0_DSP_BG 0x002b0
-#define RK3399_WIN1_DSP_BG 0x002b4
-#define RK3399_WIN2_DSP_BG 0x002b8
-#define RK3399_WIN3_DSP_BG 0x002bc
-#define RK3399_YUV2YUV_WIN 0x002c0
-#define RK3399_YUV2YUV_POST 0x002c4
-#define RK3399_AUTO_GATING_EN 0x002cc
-#define RK3399_WIN0_CSC_COE 0x003a0
-#define RK3399_WIN1_CSC_COE 0x003c0
-#define RK3399_WIN2_CSC_COE 0x003e0
-#define RK3399_WIN3_CSC_COE 0x00400
-#define RK3399_HWC_CSC_COE 0x00420
-#define RK3399_BCSH_R2Y_CSC_COE 0x00440
-#define RK3399_BCSH_Y2R_CSC_COE 0x00460
-#define RK3399_POST_YUV2YUV_Y2R_COE 0x00480
-#define RK3399_POST_YUV2YUV_3X3_COE 0x004a0
-#define RK3399_POST_YUV2YUV_R2Y_COE 0x004c0
-#define RK3399_WIN0_YUV2YUV_Y2R 0x004e0
-#define RK3399_WIN0_YUV2YUV_3X3 0x00500
-#define RK3399_WIN0_YUV2YUV_R2Y 0x00520
-#define RK3399_WIN1_YUV2YUV_Y2R 0x00540
-#define RK3399_WIN1_YUV2YUV_3X3 0x00560
-#define RK3399_WIN1_YUV2YUV_R2Y 0x00580
-#define RK3399_WIN2_YUV2YUV_Y2R 0x005a0
-#define RK3399_WIN2_YUV2YUV_3X3 0x005c0
-#define RK3399_WIN2_YUV2YUV_R2Y 0x005e0
-#define RK3399_WIN3_YUV2YUV_Y2R 0x00600
-#define RK3399_WIN3_YUV2YUV_3X3 0x00620
-#define RK3399_WIN3_YUV2YUV_R2Y 0x00640
-#define RK3399_WIN2_LUT_ADDR 0x01000
-#define RK3399_WIN3_LUT_ADDR 0x01400
-#define RK3399_HWC_LUT_ADDR 0x01800
-#define RK3399_CABC_GAMMA_LUT_ADDR 0x01c00
-#define RK3399_GAMMA_LUT_ADDR 0x02000
-/* rk3399 register definition end */
-
#endif /* _ROCKCHIP_VOP_REG_H */
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 78c6d8e9b42c..2bddeb8bf457 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -55,7 +55,6 @@ static struct drm_driver driver = {
.preclose = savage_reclaim_buffers,
.lastclose = savage_driver_lastclose,
.unload = savage_driver_unload,
- .set_busid = drm_pci_set_busid,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
.fops = &savage_driver_fops,
@@ -75,12 +74,12 @@ static struct pci_driver savage_pci_driver = {
static int __init savage_init(void)
{
driver.num_ioctls = savage_max_ioctl;
- return drm_pci_init(&driver, &savage_pci_driver);
+ return drm_legacy_pci_init(&driver, &savage_pci_driver);
}
static void __exit savage_exit(void)
{
- drm_pci_exit(&driver, &savage_pci_driver);
+ drm_legacy_pci_exit(&driver, &savage_pci_driver);
}
module_init(savage_init);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 800d1d2c435d..592572554eb0 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -145,8 +145,6 @@ static struct drm_driver shmob_drm_driver = {
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.fops = &shmob_drm_fops,
.name = "shmob-drm",
.desc = "Renesas SH Mobile DRM",
@@ -277,7 +275,7 @@ static int shmob_drm_probe(struct platform_device *pdev)
ret = drm_irq_install(ddev, platform_get_irq(pdev, 0));
if (ret < 0) {
dev_err(&pdev->dev, "failed to install IRQ handler\n");
- goto err_vblank_cleanup;
+ goto err_modeset_cleanup;
}
/*
@@ -292,8 +290,6 @@ static int shmob_drm_probe(struct platform_device *pdev)
err_irq_uninstall:
drm_irq_uninstall(ddev);
-err_vblank_cleanup:
- drm_vblank_cleanup(ddev);
err_modeset_cleanup:
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 7f05da13ea5e..e04a92658cd7 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -104,7 +104,6 @@ static struct drm_driver driver = {
.open = sis_driver_open,
.preclose = sis_reclaim_buffers_locked,
.postclose = sis_driver_postclose,
- .set_busid = drm_pci_set_busid,
.dma_quiescent = sis_idle,
.lastclose = sis_lastclose,
.ioctls = sis_ioctls,
@@ -125,12 +124,12 @@ static struct pci_driver sis_pci_driver = {
static int __init sis_init(void)
{
driver.num_ioctls = sis_max_ioctl;
- return drm_pci_init(&driver, &sis_pci_driver);
+ return drm_legacy_pci_init(&driver, &sis_pci_driver);
}
static void __exit sis_exit(void)
{
- drm_pci_exit(&driver, &sis_pci_driver);
+ drm_legacy_pci_exit(&driver, &sis_pci_driver);
}
module_init(sis_init);
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index d45a4335df5d..e8a4d48e985a 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -20,7 +20,8 @@
#include "sti_vid.h"
#include "sti_vtg.h"
-static void sti_crtc_enable(struct drm_crtc *crtc)
+static void sti_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
@@ -31,7 +32,8 @@ static void sti_crtc_enable(struct drm_crtc *crtc)
drm_crtc_vblank_on(crtc);
}
-static void sti_crtc_disabling(struct drm_crtc *crtc)
+static void sti_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
@@ -222,10 +224,10 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
- .enable = sti_crtc_enable,
- .disable = sti_crtc_disabling,
.mode_set_nofb = sti_crtc_mode_set_nofb,
.atomic_flush = sti_crtc_atomic_flush,
+ .atomic_enable = sti_crtc_atomic_enable,
+ .atomic_disable = sti_crtc_atomic_disable,
};
static void sti_crtc_destroy(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 5b3a41f74f21..b709ebbec095 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -348,7 +348,6 @@ static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = sti_cursor_destroy,
- .set_property = drm_atomic_helper_plane_set_property,
.reset = sti_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -392,7 +391,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
&sti_cursor_plane_helpers_funcs,
cursor_supported_formats,
ARRAY_SIZE(cursor_supported_formats),
- DRM_PLANE_TYPE_CURSOR, NULL);
+ NULL, DRM_PLANE_TYPE_CURSOR, NULL);
if (res) {
DRM_ERROR("Failed to initialize universal plane\n");
goto err_plane;
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index a4b574283269..1700c542cd93 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -175,8 +175,6 @@ static struct drm_driver sti_driver = {
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.fops = &sti_driver_fops,
.enable_vblank = sti_crtc_enable_vblank,
@@ -237,7 +235,6 @@ static void sti_cleanup(struct drm_device *ddev)
}
drm_kms_helper_poll_fini(ddev);
- drm_vblank_cleanup(ddev);
component_unbind_all(ddev->dev, ddev);
kfree(private);
ddev->dev_private = NULL;
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 24ebc6b2f34d..852bf2293b05 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -412,7 +412,6 @@ static int sti_dvo_late_register(struct drm_connector *connector)
}
static const struct drm_connector_funcs sti_dvo_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_dvo_connector_detect,
.destroy = drm_connector_cleanup,
@@ -582,7 +581,7 @@ static int sti_dvo_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id dvo_of_match[] = {
+static const struct of_device_id dvo_of_match[] = {
{ .compatible = "st,stih407-dvo", },
{ /* end node */ }
};
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 5ee0503945c8..b65eea4f2c97 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -895,7 +895,6 @@ static const struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = sti_gdp_destroy,
- .set_property = drm_atomic_helper_plane_set_property,
.reset = sti_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -931,7 +930,7 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
&sti_gdp_plane_helpers_funcs,
gdp_supported_formats,
ARRAY_SIZE(gdp_supported_formats),
- type, NULL);
+ NULL, type, NULL);
if (res) {
DRM_ERROR("Failed to initialize universal plane\n");
goto err;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index d6ed909d9d75..cf65e32b5090 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -647,7 +647,6 @@ static int sti_hda_late_register(struct drm_connector *connector)
}
static const struct drm_connector_funcs sti_hda_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index a59c95a8081b..30f02d2fdd03 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -434,7 +434,7 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
DRM_DEBUG_DRIVER("\n");
- ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe, mode);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe, mode, false);
if (ret < 0) {
DRM_ERROR("failed to setup AVI infoframe: %d\n", ret);
return ret;
@@ -1113,12 +1113,10 @@ static int sti_hdmi_late_register(struct drm_connector *connector)
}
static const struct drm_connector_funcs sti_hdmi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_hdmi_connector_detect,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
- .set_property = drm_atomic_helper_connector_set_property,
.atomic_set_property = sti_hdmi_connector_set_property,
.atomic_get_property = sti_hdmi_connector_get_property,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index a1c161f77804..b19b3430b296 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -958,6 +958,7 @@ static void sti_hqvdp_start_xp70(struct sti_hqvdp *hqvdp)
}
if (i == POLL_MAX_ATTEMPT) {
DRM_ERROR("Could not reset\n");
+ clk_disable_unprepare(hqvdp->clk);
goto out;
}
@@ -994,6 +995,7 @@ static void sti_hqvdp_start_xp70(struct sti_hqvdp *hqvdp)
}
if (i == POLL_MAX_ATTEMPT) {
DRM_ERROR("Could not boot\n");
+ clk_disable_unprepare(hqvdp->clk);
goto out;
}
@@ -1081,6 +1083,7 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
&hqvdp->vtg_nb,
crtc)) {
DRM_ERROR("Cannot register VTG notifier\n");
+ clk_disable_unprepare(hqvdp->clk_pix_main);
return -EINVAL;
}
hqvdp->vtg_registered = true;
@@ -1273,7 +1276,6 @@ static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = sti_hqvdp_destroy,
- .set_property = drm_atomic_helper_plane_set_property,
.reset = sti_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -1295,7 +1297,7 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
&sti_hqvdp_plane_helpers_funcs,
hqvdp_supported_formats,
ARRAY_SIZE(hqvdp_supported_formats),
- DRM_PLANE_TYPE_OVERLAY, NULL);
+ NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
if (res) {
DRM_ERROR("Failed to initialize universal plane\n");
return NULL;
@@ -1395,7 +1397,7 @@ static int sti_hqvdp_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id hqvdp_of_match[] = {
+static const struct of_device_id hqvdp_of_match[] = {
{ .compatible = "st,stih407-hqvdp", },
{ /* end node */ }
};
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index 2c4817fb0890..35367ada3bc1 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -4,13 +4,19 @@ config DRM_STM
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
- select DRM_PANEL
+ select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
select FB_PROVIDE_GET_FB_UNMAPPED_AREA
- default y
help
Enable support for the on-chip display controller on
STMicroelectronics STM32 MCUs.
To compile this driver as a module, choose M here: the module
will be called stm-drm.
+
+config DRM_STM_DSI
+ tristate "STMicroelectronics specific extensions for Synopsys MIPI DSI"
+ depends on DRM_STM
+ select DRM_DW_MIPI_DSI
+ help
+ Choose this option for MIPI DSI support on STMicroelectronics SoC.
diff --git a/drivers/gpu/drm/stm/Makefile b/drivers/gpu/drm/stm/Makefile
index a09ecf450218..d883adc365a2 100644
--- a/drivers/gpu/drm/stm/Makefile
+++ b/drivers/gpu/drm/stm/Makefile
@@ -2,4 +2,6 @@ stm-drm-y := \
drv.o \
ltdc.o
+obj-$(CONFIG_DRM_STM_DSI) += dw_mipi_dsi-stm.o
+
obj-$(CONFIG_DRM_STM) += stm-drm.o
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 83ab48f1fd00..b333b37f3f89 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -20,13 +20,6 @@
#include "ltdc.h"
-#define DRIVER_NAME "stm"
-#define DRIVER_DESC "STMicroelectronics SoC DRM"
-#define DRIVER_DATE "20170330"
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 0
-#define DRIVER_PATCH_LEVEL 0
-
#define STM_MAX_FB_WIDTH 2048
#define STM_MAX_FB_HEIGHT 2048 /* same as width to handle orientation */
@@ -59,16 +52,14 @@ static struct drm_driver drv_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
.lastclose = drv_lastclose,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCH_LEVEL,
+ .name = "stm",
+ .desc = "STMicroelectronics SoC DRM",
+ .date = "20170330",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
.fops = &drv_driver_fops,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_free_object_unlocked = drm_gem_cma_free_object,
@@ -206,7 +197,7 @@ static struct platform_driver stm_drm_platform_driver = {
.probe = stm_drm_platform_probe,
.remove = stm_drm_platform_remove,
.driver = {
- .name = DRIVER_NAME,
+ .name = "stm32-display",
.of_match_table = drv_dt_ids,
},
};
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
new file mode 100644
index 000000000000..568c5d0461ea
--- /dev/null
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2017
+ *
+ * Authors: Philippe Cornu <philippe.cornu@st.com>
+ * Yannick Fertre <yannick.fertre@st.com>
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/bridge/dw_mipi_dsi.h>
+#include <video/mipi_display.h>
+
+/* DSI wrapper register & bit definitions */
+/* Note: registers are named as in the Reference Manual */
+#define DSI_WCFGR 0x0400 /* Wrapper ConFiGuration Reg */
+#define WCFGR_DSIM BIT(0) /* DSI Mode */
+#define WCFGR_COLMUX GENMASK(3, 1) /* COLor MUltipleXing */
+
+#define DSI_WCR 0x0404 /* Wrapper Control Reg */
+#define WCR_DSIEN BIT(3) /* DSI ENable */
+
+#define DSI_WISR 0x040C /* Wrapper Interrupt and Status Reg */
+#define WISR_PLLLS BIT(8) /* PLL Lock Status */
+#define WISR_RRS BIT(12) /* Regulator Ready Status */
+
+#define DSI_WPCR0 0x0418 /* Wrapper Phy Conf Reg 0 */
+#define WPCR0_UIX4 GENMASK(5, 0) /* Unit Interval X 4 */
+#define WPCR0_TDDL BIT(16) /* Turn Disable Data Lanes */
+
+#define DSI_WRPCR 0x0430 /* Wrapper Regulator & Pll Ctrl Reg */
+#define WRPCR_PLLEN BIT(0) /* PLL ENable */
+#define WRPCR_NDIV GENMASK(8, 2) /* pll loop DIVision Factor */
+#define WRPCR_IDF GENMASK(14, 11) /* pll Input Division Factor */
+#define WRPCR_ODF GENMASK(17, 16) /* pll Output Division Factor */
+#define WRPCR_REGEN BIT(24) /* REGulator ENable */
+#define WRPCR_BGREN BIT(28) /* BandGap Reference ENable */
+#define IDF_MIN 1
+#define IDF_MAX 7
+#define NDIV_MIN 10
+#define NDIV_MAX 125
+#define ODF_MIN 1
+#define ODF_MAX 8
+
+/* dsi color format coding according to the datasheet */
+enum dsi_color {
+ DSI_RGB565_CONF1,
+ DSI_RGB565_CONF2,
+ DSI_RGB565_CONF3,
+ DSI_RGB666_CONF1,
+ DSI_RGB666_CONF2,
+ DSI_RGB888,
+};
+
+#define LANE_MIN_KBPS 31250
+#define LANE_MAX_KBPS 500000
+
+/* Sleep & timeout for regulator on/off, pll lock/unlock & fifo empty */
+#define SLEEP_US 1000
+#define TIMEOUT_US 200000
+
+struct dw_mipi_dsi_stm {
+ void __iomem *base;
+ struct clk *pllref_clk;
+};
+
+static inline void dsi_write(struct dw_mipi_dsi_stm *dsi, u32 reg, u32 val)
+{
+ writel(val, dsi->base + reg);
+}
+
+static inline u32 dsi_read(struct dw_mipi_dsi_stm *dsi, u32 reg)
+{
+ return readl(dsi->base + reg);
+}
+
+static inline void dsi_set(struct dw_mipi_dsi_stm *dsi, u32 reg, u32 mask)
+{
+ dsi_write(dsi, reg, dsi_read(dsi, reg) | mask);
+}
+
+static inline void dsi_clear(struct dw_mipi_dsi_stm *dsi, u32 reg, u32 mask)
+{
+ dsi_write(dsi, reg, dsi_read(dsi, reg) & ~mask);
+}
+
+static inline void dsi_update_bits(struct dw_mipi_dsi_stm *dsi, u32 reg,
+ u32 mask, u32 val)
+{
+ dsi_write(dsi, reg, (dsi_read(dsi, reg) & ~mask) | val);
+}
+
+static enum dsi_color dsi_color_from_mipi(enum mipi_dsi_pixel_format fmt)
+{
+ switch (fmt) {
+ case MIPI_DSI_FMT_RGB888:
+ return DSI_RGB888;
+ case MIPI_DSI_FMT_RGB666:
+ return DSI_RGB666_CONF2;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return DSI_RGB666_CONF1;
+ case MIPI_DSI_FMT_RGB565:
+ return DSI_RGB565_CONF1;
+ default:
+ DRM_DEBUG_DRIVER("MIPI color invalid, so we use rgb888\n");
+ }
+ return DSI_RGB888;
+}
+
+static int dsi_pll_get_clkout_khz(int clkin_khz, int idf, int ndiv, int odf)
+{
+ /* prevent from division by 0 */
+ if (idf * odf)
+ return DIV_ROUND_CLOSEST(clkin_khz * ndiv, idf * odf);
+
+ return 0;
+}
+
+static int dsi_pll_get_params(int clkin_khz, int clkout_khz,
+ int *idf, int *ndiv, int *odf)
+{
+ int i, o, n, n_min, n_max;
+ int fvco_min, fvco_max, delta, best_delta; /* all in khz */
+
+ /* Early checks preventing division by 0 & odd results */
+ if ((clkin_khz <= 0) || (clkout_khz <= 0))
+ return -EINVAL;
+
+ fvco_min = LANE_MIN_KBPS * 2 * ODF_MAX;
+ fvco_max = LANE_MAX_KBPS * 2 * ODF_MIN;
+
+ best_delta = 1000000; /* big started value (1000000khz) */
+
+ for (i = IDF_MIN; i <= IDF_MAX; i++) {
+ /* Compute ndiv range according to Fvco */
+ n_min = ((fvco_min * i) / (2 * clkin_khz)) + 1;
+ n_max = (fvco_max * i) / (2 * clkin_khz);
+
+ /* No need to continue idf loop if we reach ndiv max */
+ if (n_min >= NDIV_MAX)
+ break;
+
+ /* Clamp ndiv to valid values */
+ if (n_min < NDIV_MIN)
+ n_min = NDIV_MIN;
+ if (n_max > NDIV_MAX)
+ n_max = NDIV_MAX;
+
+ for (o = ODF_MIN; o <= ODF_MAX; o *= 2) {
+ n = DIV_ROUND_CLOSEST(i * o * clkout_khz, clkin_khz);
+ /* Check ndiv according to vco range */
+ if ((n < n_min) || (n > n_max))
+ continue;
+ /* Check if new delta is better & saves parameters */
+ delta = dsi_pll_get_clkout_khz(clkin_khz, i, n, o) -
+ clkout_khz;
+ if (delta < 0)
+ delta = -delta;
+ if (delta < best_delta) {
+ *idf = i;
+ *ndiv = n;
+ *odf = o;
+ best_delta = delta;
+ }
+ /* fast return in case of "perfect result" */
+ if (!delta)
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static int dw_mipi_dsi_phy_init(void *priv_data)
+{
+ struct dw_mipi_dsi_stm *dsi = priv_data;
+ u32 val;
+ int ret;
+
+ /* Enable the regulator */
+ dsi_set(dsi, DSI_WRPCR, WRPCR_REGEN | WRPCR_BGREN);
+ ret = readl_poll_timeout(dsi->base + DSI_WISR, val, val & WISR_RRS,
+ SLEEP_US, TIMEOUT_US);
+ if (ret)
+ DRM_DEBUG_DRIVER("!TIMEOUT! waiting REGU, let's continue\n");
+
+ /* Enable the DSI PLL & wait for its lock */
+ dsi_set(dsi, DSI_WRPCR, WRPCR_PLLEN);
+ ret = readl_poll_timeout(dsi->base + DSI_WISR, val, val & WISR_PLLLS,
+ SLEEP_US, TIMEOUT_US);
+ if (ret)
+ DRM_DEBUG_DRIVER("!TIMEOUT! waiting PLL, let's continue\n");
+
+ /* Enable the DSI wrapper */
+ dsi_set(dsi, DSI_WCR, WCR_DSIEN);
+
+ return 0;
+}
+
+static int
+dw_mipi_dsi_get_lane_mbps(void *priv_data, struct drm_display_mode *mode,
+ unsigned long mode_flags, u32 lanes, u32 format,
+ unsigned int *lane_mbps)
+{
+ struct dw_mipi_dsi_stm *dsi = priv_data;
+ unsigned int idf, ndiv, odf, pll_in_khz, pll_out_khz;
+ int ret, bpp;
+ u32 val;
+
+ pll_in_khz = (unsigned int)(clk_get_rate(dsi->pllref_clk) / 1000);
+
+ /* Compute requested pll out */
+ bpp = mipi_dsi_pixel_format_to_bpp(format);
+ pll_out_khz = mode->clock * bpp / lanes;
+ /* Add 20% to pll out to be higher than pixel bw (burst mode only) */
+ pll_out_khz = (pll_out_khz * 12) / 10;
+ if (pll_out_khz > LANE_MAX_KBPS) {
+ pll_out_khz = LANE_MAX_KBPS;
+ DRM_WARN("Warning max phy mbps is used\n");
+ }
+ if (pll_out_khz < LANE_MIN_KBPS) {
+ pll_out_khz = LANE_MIN_KBPS;
+ DRM_WARN("Warning min phy mbps is used\n");
+ }
+
+ /* Compute best pll parameters */
+ idf = 0;
+ ndiv = 0;
+ odf = 0;
+ ret = dsi_pll_get_params(pll_in_khz, pll_out_khz, &idf, &ndiv, &odf);
+ if (ret)
+ DRM_WARN("Warning dsi_pll_get_params(): bad params\n");
+
+ /* Get the adjusted pll out value */
+ pll_out_khz = dsi_pll_get_clkout_khz(pll_in_khz, idf, ndiv, odf);
+
+ /* Set the PLL division factors */
+ dsi_update_bits(dsi, DSI_WRPCR, WRPCR_NDIV | WRPCR_IDF | WRPCR_ODF,
+ (ndiv << 2) | (idf << 11) | ((ffs(odf) - 1) << 16));
+
+ /* Compute uix4 & set the bit period in high-speed mode */
+ val = 4000000 / pll_out_khz;
+ dsi_update_bits(dsi, DSI_WPCR0, WPCR0_UIX4, val);
+
+ /* Select video mode by resetting DSIM bit */
+ dsi_clear(dsi, DSI_WCFGR, WCFGR_DSIM);
+
+ /* Select the color coding */
+ dsi_update_bits(dsi, DSI_WCFGR, WCFGR_COLMUX,
+ dsi_color_from_mipi(format) << 1);
+
+ *lane_mbps = pll_out_khz / 1000;
+
+ DRM_DEBUG_DRIVER("pll_in %ukHz pll_out %ukHz lane_mbps %uMHz\n",
+ pll_in_khz, pll_out_khz, *lane_mbps);
+
+ return 0;
+}
+
+static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_stm_phy_ops = {
+ .init = dw_mipi_dsi_phy_init,
+ .get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
+};
+
+static struct dw_mipi_dsi_plat_data dw_mipi_dsi_stm_plat_data = {
+ .max_data_lanes = 2,
+ .phy_ops = &dw_mipi_dsi_stm_phy_ops,
+};
+
+static const struct of_device_id dw_mipi_dsi_stm_dt_ids[] = {
+ { .compatible = "st,stm32-dsi", .data = &dw_mipi_dsi_stm_plat_data, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, dw_mipi_dsi_stm_dt_ids);
+
+static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_mipi_dsi_stm *dsi;
+ struct resource *res;
+ int ret;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DRM_ERROR("Unable to get resource\n");
+ return -ENODEV;
+ }
+
+ dsi->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dsi->base)) {
+ DRM_ERROR("Unable to get dsi registers\n");
+ return PTR_ERR(dsi->base);
+ }
+
+ dsi->pllref_clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(dsi->pllref_clk)) {
+ ret = PTR_ERR(dsi->pllref_clk);
+ dev_err(dev, "Unable to get pll reference clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dsi->pllref_clk);
+ if (ret) {
+ dev_err(dev, "%s: Failed to enable pllref_clk\n", __func__);
+ return ret;
+ }
+
+ dw_mipi_dsi_stm_plat_data.base = dsi->base;
+ dw_mipi_dsi_stm_plat_data.priv_data = dsi;
+
+ ret = dw_mipi_dsi_probe(pdev, &dw_mipi_dsi_stm_plat_data);
+ if (ret) {
+ DRM_ERROR("Failed to initialize mipi dsi host\n");
+ clk_disable_unprepare(dsi->pllref_clk);
+ }
+
+ return ret;
+}
+
+static int dw_mipi_dsi_stm_remove(struct platform_device *pdev)
+{
+ struct dw_mipi_dsi_stm *dsi = dw_mipi_dsi_stm_plat_data.priv_data;
+
+ clk_disable_unprepare(dsi->pllref_clk);
+ dw_mipi_dsi_remove(pdev);
+
+ return 0;
+}
+
+static struct platform_driver dw_mipi_dsi_stm_driver = {
+ .probe = dw_mipi_dsi_stm_probe,
+ .remove = dw_mipi_dsi_stm_remove,
+ .driver = {
+ .of_match_table = dw_mipi_dsi_stm_dt_ids,
+ .name = "dw_mipi_dsi-stm",
+ },
+};
+
+module_platform_driver(dw_mipi_dsi_stm_driver);
+
+MODULE_AUTHOR("Philippe Cornu <philippe.cornu@st.com>");
+MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics DW MIPI DSI host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 1b9483d4f2a4..d394a03632c4 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -21,7 +21,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_plane_helper.h>
#include <video/videomode.h>
@@ -42,52 +42,52 @@
* an extra offset specified with reg_ofs.
*/
#define REG_OFS_NONE 0
-#define REG_OFS_4 4 /* Insertion of "Layer Configuration 2" reg */
+#define REG_OFS_4 4 /* Insertion of "Layer Conf. 2" reg */
#define REG_OFS (ldev->caps.reg_ofs)
-#define LAY_OFS 0x80 /* Register Offset between 2 layers */
+#define LAY_OFS 0x80 /* Register Offset between 2 layers */
/* Global register offsets */
-#define LTDC_IDR 0x0000 /* IDentification */
-#define LTDC_LCR 0x0004 /* Layer Count */
-#define LTDC_SSCR 0x0008 /* Synchronization Size Configuration */
-#define LTDC_BPCR 0x000C /* Back Porch Configuration */
-#define LTDC_AWCR 0x0010 /* Active Width Configuration */
-#define LTDC_TWCR 0x0014 /* Total Width Configuration */
-#define LTDC_GCR 0x0018 /* Global Control */
-#define LTDC_GC1R 0x001C /* Global Configuration 1 */
-#define LTDC_GC2R 0x0020 /* Global Configuration 2 */
-#define LTDC_SRCR 0x0024 /* Shadow Reload Configuration */
-#define LTDC_GACR 0x0028 /* GAmma Correction */
-#define LTDC_BCCR 0x002C /* Background Color Configuration */
-#define LTDC_IER 0x0034 /* Interrupt Enable */
-#define LTDC_ISR 0x0038 /* Interrupt Status */
-#define LTDC_ICR 0x003C /* Interrupt Clear */
-#define LTDC_LIPCR 0x0040 /* Line Interrupt Position Configuration */
-#define LTDC_CPSR 0x0044 /* Current Position Status */
-#define LTDC_CDSR 0x0048 /* Current Display Status */
+#define LTDC_IDR 0x0000 /* IDentification */
+#define LTDC_LCR 0x0004 /* Layer Count */
+#define LTDC_SSCR 0x0008 /* Synchronization Size Configuration */
+#define LTDC_BPCR 0x000C /* Back Porch Configuration */
+#define LTDC_AWCR 0x0010 /* Active Width Configuration */
+#define LTDC_TWCR 0x0014 /* Total Width Configuration */
+#define LTDC_GCR 0x0018 /* Global Control */
+#define LTDC_GC1R 0x001C /* Global Configuration 1 */
+#define LTDC_GC2R 0x0020 /* Global Configuration 2 */
+#define LTDC_SRCR 0x0024 /* Shadow Reload Configuration */
+#define LTDC_GACR 0x0028 /* GAmma Correction */
+#define LTDC_BCCR 0x002C /* Background Color Configuration */
+#define LTDC_IER 0x0034 /* Interrupt Enable */
+#define LTDC_ISR 0x0038 /* Interrupt Status */
+#define LTDC_ICR 0x003C /* Interrupt Clear */
+#define LTDC_LIPCR 0x0040 /* Line Interrupt Position Conf. */
+#define LTDC_CPSR 0x0044 /* Current Position Status */
+#define LTDC_CDSR 0x0048 /* Current Display Status */
/* Layer register offsets */
-#define LTDC_L1LC1R (0x0080) /* L1 Layer Configuration 1 */
-#define LTDC_L1LC2R (0x0084) /* L1 Layer Configuration 2 */
-#define LTDC_L1CR (0x0084 + REG_OFS) /* L1 Control */
-#define LTDC_L1WHPCR (0x0088 + REG_OFS) /* L1 Window Hor Position Config */
-#define LTDC_L1WVPCR (0x008C + REG_OFS) /* L1 Window Vert Position Config */
-#define LTDC_L1CKCR (0x0090 + REG_OFS) /* L1 Color Keying Configuration */
-#define LTDC_L1PFCR (0x0094 + REG_OFS) /* L1 Pixel Format Configuration */
-#define LTDC_L1CACR (0x0098 + REG_OFS) /* L1 Constant Alpha Config */
-#define LTDC_L1DCCR (0x009C + REG_OFS) /* L1 Default Color Configuration */
-#define LTDC_L1BFCR (0x00A0 + REG_OFS) /* L1 Blend Factors Configuration */
-#define LTDC_L1FBBCR (0x00A4 + REG_OFS) /* L1 FrameBuffer Bus Control */
-#define LTDC_L1AFBCR (0x00A8 + REG_OFS) /* L1 AuxFB Control */
-#define LTDC_L1CFBAR (0x00AC + REG_OFS) /* L1 Color FrameBuffer Address */
-#define LTDC_L1CFBLR (0x00B0 + REG_OFS) /* L1 Color FrameBuffer Length */
-#define LTDC_L1CFBLNR (0x00B4 + REG_OFS) /* L1 Color FrameBuffer Line Nb */
-#define LTDC_L1AFBAR (0x00B8 + REG_OFS) /* L1 AuxFB Address */
-#define LTDC_L1AFBLR (0x00BC + REG_OFS) /* L1 AuxFB Length */
-#define LTDC_L1AFBLNR (0x00C0 + REG_OFS) /* L1 AuxFB Line Number */
-#define LTDC_L1CLUTWR (0x00C4 + REG_OFS) /* L1 CLUT Write */
-#define LTDC_L1YS1R (0x00E0 + REG_OFS) /* L1 YCbCr Scale 1 */
-#define LTDC_L1YS2R (0x00E4 + REG_OFS) /* L1 YCbCr Scale 2 */
+#define LTDC_L1LC1R (0x80) /* L1 Layer Configuration 1 */
+#define LTDC_L1LC2R (0x84) /* L1 Layer Configuration 2 */
+#define LTDC_L1CR (0x84 + REG_OFS)/* L1 Control */
+#define LTDC_L1WHPCR (0x88 + REG_OFS)/* L1 Window Hor Position Config */
+#define LTDC_L1WVPCR (0x8C + REG_OFS)/* L1 Window Vert Position Config */
+#define LTDC_L1CKCR (0x90 + REG_OFS)/* L1 Color Keying Configuration */
+#define LTDC_L1PFCR (0x94 + REG_OFS)/* L1 Pixel Format Configuration */
+#define LTDC_L1CACR (0x98 + REG_OFS)/* L1 Constant Alpha Config */
+#define LTDC_L1DCCR (0x9C + REG_OFS)/* L1 Default Color Configuration */
+#define LTDC_L1BFCR (0xA0 + REG_OFS)/* L1 Blend Factors Configuration */
+#define LTDC_L1FBBCR (0xA4 + REG_OFS)/* L1 FrameBuffer Bus Control */
+#define LTDC_L1AFBCR (0xA8 + REG_OFS)/* L1 AuxFB Control */
+#define LTDC_L1CFBAR (0xAC + REG_OFS)/* L1 Color FrameBuffer Address */
+#define LTDC_L1CFBLR (0xB0 + REG_OFS)/* L1 Color FrameBuffer Length */
+#define LTDC_L1CFBLNR (0xB4 + REG_OFS)/* L1 Color FrameBuffer Line Nb */
+#define LTDC_L1AFBAR (0xB8 + REG_OFS)/* L1 AuxFB Address */
+#define LTDC_L1AFBLR (0xBC + REG_OFS)/* L1 AuxFB Length */
+#define LTDC_L1AFBLNR (0xC0 + REG_OFS)/* L1 AuxFB Line Number */
+#define LTDC_L1CLUTWR (0xC4 + REG_OFS)/* L1 CLUT Write */
+#define LTDC_L1YS1R (0xE0 + REG_OFS)/* L1 YCbCr Scale 1 */
+#define LTDC_L1YS2R (0xE4 + REG_OFS)/* L1 YCbCr Scale 2 */
/* Bit definitions */
#define SSCR_VSH GENMASK(10, 0) /* Vertical Synchronization Height */
@@ -104,10 +104,10 @@
#define GCR_LTDCEN BIT(0) /* LTDC ENable */
#define GCR_DEN BIT(16) /* Dither ENable */
-#define GCR_PCPOL BIT(28) /* Pixel Clock POLarity */
-#define GCR_DEPOL BIT(29) /* Data Enable POLarity */
-#define GCR_VSPOL BIT(30) /* Vertical Synchro POLarity */
-#define GCR_HSPOL BIT(31) /* Horizontal Synchro POLarity */
+#define GCR_PCPOL BIT(28) /* Pixel Clock POLarity-Inverted */
+#define GCR_DEPOL BIT(29) /* Data Enable POLarity-High */
+#define GCR_VSPOL BIT(30) /* Vertical Synchro POLarity-High */
+#define GCR_HSPOL BIT(31) /* Horizontal Synchro POLarity-High */
#define GC1R_WBCH GENMASK(3, 0) /* Width of Blue CHannel output */
#define GC1R_WGCH GENMASK(7, 4) /* Width of Green Channel output */
@@ -172,60 +172,52 @@
#define LXCFBLR_CFBLL GENMASK(12, 0) /* Color Frame Buffer Line Length */
#define LXCFBLR_CFBP GENMASK(28, 16) /* Color Frame Buffer Pitch in bytes */
-#define LXCFBLNR_CFBLN GENMASK(10, 0) /* Color Frame Buffer Line Number */
+#define LXCFBLNR_CFBLN GENMASK(10, 0) /* Color Frame Buffer Line Number */
-#define HSPOL_AL 0 /* Horizontal Sync POLarity Active Low */
-#define VSPOL_AL 0 /* Vertical Sync POLarity Active Low */
-#define DEPOL_AL 0 /* Data Enable POLarity Active Low */
-#define PCPOL_IPC 0 /* Input Pixel Clock */
-#define HSPOL_AH GCR_HSPOL /* Horizontal Sync POLarity Active High */
-#define VSPOL_AH GCR_VSPOL /* Vertical Sync POLarity Active High */
-#define DEPOL_AH GCR_DEPOL /* Data Enable POLarity Active High */
-#define PCPOL_IIPC GCR_PCPOL /* Inverted Input Pixel Clock */
-#define CONSTA_MAX 0xFF /* CONSTant Alpha MAX= 1.0 */
-#define BF1_PAXCA 0x600 /* Pixel Alpha x Constant Alpha */
-#define BF1_CA 0x400 /* Constant Alpha */
-#define BF2_1PAXCA 0x007 /* 1 - (Pixel Alpha x Constant Alpha) */
-#define BF2_1CA 0x005 /* 1 - Constant Alpha */
+#define CONSTA_MAX 0xFF /* CONSTant Alpha MAX= 1.0 */
+#define BF1_PAXCA 0x600 /* Pixel Alpha x Constant Alpha */
+#define BF1_CA 0x400 /* Constant Alpha */
+#define BF2_1PAXCA 0x007 /* 1 - (Pixel Alpha x Constant Alpha) */
+#define BF2_1CA 0x005 /* 1 - Constant Alpha */
-#define NB_PF 8 /* Max nb of HW pixel format */
+#define NB_PF 8 /* Max nb of HW pixel format */
enum ltdc_pix_fmt {
PF_NONE,
/* RGB formats */
- PF_ARGB8888, /* ARGB [32 bits] */
- PF_RGBA8888, /* RGBA [32 bits] */
- PF_RGB888, /* RGB [24 bits] */
- PF_RGB565, /* RGB [16 bits] */
- PF_ARGB1555, /* ARGB A:1 bit RGB:15 bits [16 bits] */
- PF_ARGB4444, /* ARGB A:4 bits R/G/B: 4 bits each [16 bits] */
+ PF_ARGB8888, /* ARGB [32 bits] */
+ PF_RGBA8888, /* RGBA [32 bits] */
+ PF_RGB888, /* RGB [24 bits] */
+ PF_RGB565, /* RGB [16 bits] */
+ PF_ARGB1555, /* ARGB A:1 bit RGB:15 bits [16 bits] */
+ PF_ARGB4444, /* ARGB A:4 bits R/G/B: 4 bits each [16 bits] */
/* Indexed formats */
- PF_L8, /* Indexed 8 bits [8 bits] */
- PF_AL44, /* Alpha:4 bits + indexed 4 bits [8 bits] */
- PF_AL88 /* Alpha:8 bits + indexed 8 bits [16 bits] */
+ PF_L8, /* Indexed 8 bits [8 bits] */
+ PF_AL44, /* Alpha:4 bits + indexed 4 bits [8 bits] */
+ PF_AL88 /* Alpha:8 bits + indexed 8 bits [16 bits] */
};
/* The index gives the encoding of the pixel format for an HW version */
static const enum ltdc_pix_fmt ltdc_pix_fmt_a0[NB_PF] = {
- PF_ARGB8888, /* 0x00 */
- PF_RGB888, /* 0x01 */
- PF_RGB565, /* 0x02 */
- PF_ARGB1555, /* 0x03 */
- PF_ARGB4444, /* 0x04 */
- PF_L8, /* 0x05 */
- PF_AL44, /* 0x06 */
- PF_AL88 /* 0x07 */
+ PF_ARGB8888, /* 0x00 */
+ PF_RGB888, /* 0x01 */
+ PF_RGB565, /* 0x02 */
+ PF_ARGB1555, /* 0x03 */
+ PF_ARGB4444, /* 0x04 */
+ PF_L8, /* 0x05 */
+ PF_AL44, /* 0x06 */
+ PF_AL88 /* 0x07 */
};
static const enum ltdc_pix_fmt ltdc_pix_fmt_a1[NB_PF] = {
- PF_ARGB8888, /* 0x00 */
- PF_RGB888, /* 0x01 */
- PF_RGB565, /* 0x02 */
- PF_RGBA8888, /* 0x03 */
- PF_AL44, /* 0x04 */
- PF_L8, /* 0x05 */
- PF_ARGB1555, /* 0x06 */
- PF_ARGB4444 /* 0x07 */
+ PF_ARGB8888, /* 0x00 */
+ PF_RGB888, /* 0x01 */
+ PF_RGB565, /* 0x02 */
+ PF_RGBA8888, /* 0x03 */
+ PF_AL44, /* 0x04 */
+ PF_L8, /* 0x05 */
+ PF_ARGB1555, /* 0x06 */
+ PF_ARGB4444 /* 0x07 */
};
static inline u32 reg_read(void __iomem *base, u32 reg)
@@ -269,11 +261,6 @@ static inline struct ltdc_device *encoder_to_ltdc(struct drm_encoder *enc)
return (struct ltdc_device *)enc->dev->dev_private;
}
-static inline struct ltdc_device *connector_to_ltdc(struct drm_connector *con)
-{
- return (struct ltdc_device *)con->dev->dev_private;
-}
-
static inline enum ltdc_pix_fmt to_ltdc_pixelformat(u32 drm_fmt)
{
enum ltdc_pix_fmt pf;
@@ -307,7 +294,7 @@ static inline enum ltdc_pix_fmt to_ltdc_pixelformat(u32 drm_fmt)
default:
pf = PF_NONE;
break;
- /* Note: There are no DRM_FORMAT for AL44 and AL88 */
+ /* Note: There are no DRM_FORMAT for AL44 and AL88 */
}
return pf;
@@ -330,8 +317,8 @@ static inline u32 to_drm_pixelformat(enum ltdc_pix_fmt pf)
return DRM_FORMAT_ARGB4444;
case PF_L8:
return DRM_FORMAT_C8;
- case PF_AL44: /* No DRM support */
- case PF_AL88: /* No DRM support */
+ case PF_AL44: /* No DRM support */
+ case PF_AL88: /* No DRM support */
case PF_NONE:
default:
return 0;
@@ -375,18 +362,8 @@ static irqreturn_t ltdc_irq(int irq, void *arg)
* DRM_CRTC
*/
-static void ltdc_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct ltdc_device *ldev = crtc_to_ltdc(crtc);
- unsigned int i, lay;
-
- for (lay = 0; lay < ldev->caps.nb_layers; lay++)
- for (i = 0; i < 256; i++)
- reg_write(ldev->regs, LTDC_L1CLUTWR + lay * LAY_OFS,
- ldev->clut[i]);
-}
-
-static void ltdc_crtc_enable(struct drm_crtc *crtc)
+static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
@@ -407,7 +384,8 @@ static void ltdc_crtc_enable(struct drm_crtc *crtc)
drm_crtc_vblank_on(crtc);
}
-static void ltdc_crtc_disable(struct drm_crtc *crtc)
+static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
@@ -462,20 +440,20 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
clk_enable(ldev->pixel_clk);
- /* Configures the HS, VS, DE and PC polarities. */
- val = HSPOL_AL | VSPOL_AL | DEPOL_AL | PCPOL_IPC;
+ /* Configures the HS, VS, DE and PC polarities. Default Active Low */
+ val = 0;
if (vm.flags & DISPLAY_FLAGS_HSYNC_HIGH)
- val |= HSPOL_AH;
+ val |= GCR_HSPOL;
if (vm.flags & DISPLAY_FLAGS_VSYNC_HIGH)
- val |= VSPOL_AH;
+ val |= GCR_VSPOL;
if (vm.flags & DISPLAY_FLAGS_DE_HIGH)
- val |= DEPOL_AH;
+ val |= GCR_DEPOL;
if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
- val |= PCPOL_IIPC;
+ val |= GCR_PCPOL;
reg_update_bits(ldev->regs, LTDC_GCR,
GCR_HSPOL | GCR_VSPOL | GCR_DEPOL | GCR_PCPOL, val);
@@ -522,12 +500,11 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
-static struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
- .load_lut = ltdc_crtc_load_lut,
- .enable = ltdc_crtc_enable,
- .disable = ltdc_crtc_disable,
+static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
.mode_set_nofb = ltdc_crtc_mode_set_nofb,
.atomic_flush = ltdc_crtc_atomic_flush,
+ .atomic_enable = ltdc_crtc_atomic_enable,
+ .atomic_disable = ltdc_crtc_atomic_disable,
};
int ltdc_crtc_enable_vblank(struct drm_device *ddev, unsigned int pipe)
@@ -548,7 +525,7 @@ void ltdc_crtc_disable_vblank(struct drm_device *ddev, unsigned int pipe)
reg_clear(ldev->regs, LTDC_IER, IER_LIE);
}
-static struct drm_crtc_funcs ltdc_crtc_funcs = {
+static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -613,11 +590,11 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
src_w = state->src_w >> 16;
src_h = state->src_h >> 16;
- DRM_DEBUG_DRIVER(
- "plane:%d fb:%d (%dx%d)@(%d,%d) -> (%dx%d)@(%d,%d)\n",
- plane->base.id, fb->base.id,
- src_w, src_h, src_x, src_y,
- state->crtc_w, state->crtc_h, state->crtc_x, state->crtc_y);
+ DRM_DEBUG_DRIVER("plane:%d fb:%d (%dx%d)@(%d,%d) -> (%dx%d)@(%d,%d)\n",
+ plane->base.id, fb->base.id,
+ src_w, src_h, src_x, src_y,
+ state->crtc_w, state->crtc_h,
+ state->crtc_x, state->crtc_y);
bpcr = reg_read(ldev->regs, LTDC_BPCR);
ahbp = (bpcr & BPCR_AHBP) >> 16;
@@ -642,7 +619,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
if (val == NB_PF) {
DRM_ERROR("Pixel format %.4s not supported\n",
(char *)&fb->format->format);
- val = 0; /* set by default ARGB 32 bits */
+ val = 0; /* set by default ARGB 32 bits */
}
reg_update_bits(ldev->regs, LTDC_L1PFCR + lofs, LXPFCR_PF, val);
@@ -656,8 +633,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
/* Specifies the constant alpha value */
val = CONSTA_MAX;
- reg_update_bits(ldev->regs, LTDC_L1CACR + lofs,
- LXCACR_CONSTA, val);
+ reg_update_bits(ldev->regs, LTDC_L1CACR + lofs, LXCACR_CONSTA, val);
/* Specifies the blending factors */
val = BF1_PAXCA | BF2_1PAXCA;
@@ -666,8 +642,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
/* Configures the frame buffer line number */
val = y1 - y0 + 1;
- reg_update_bits(ldev->regs, LTDC_L1CFBLNR + lofs,
- LXCFBLNR_CFBLN, val);
+ reg_update_bits(ldev->regs, LTDC_L1CFBLNR + lofs, LXCFBLNR_CFBLN, val);
/* Sets the FB address */
paddr = (u32)drm_fb_cma_get_gem_addr(fb, state, 0);
@@ -706,11 +681,10 @@ static void ltdc_plane_atomic_disable(struct drm_plane *plane,
oldstate->crtc->base.id, plane->base.id);
}
-static struct drm_plane_funcs ltdc_plane_funcs = {
+static const struct drm_plane_funcs ltdc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .set_property = drm_atomic_helper_plane_set_property,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -748,7 +722,7 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
ret = drm_universal_plane_init(ddev, plane, possible_crtcs,
&ltdc_plane_funcs, formats, nb_fmt,
- type, NULL);
+ NULL, type, NULL);
if (ret < 0)
return 0;
@@ -773,7 +747,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
struct ltdc_device *ldev = ddev->dev_private;
struct drm_plane *primary, *overlay;
unsigned int i;
- int res;
+ int ret;
primary = ltdc_plane_create(ddev, DRM_PLANE_TYPE_PRIMARY);
if (!primary) {
@@ -781,9 +755,9 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
return -EINVAL;
}
- res = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
+ ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
&ltdc_crtc_funcs, NULL);
- if (res) {
+ if (ret) {
DRM_ERROR("Can not initialize CRTC\n");
goto cleanup;
}
@@ -796,7 +770,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
for (i = 1; i < ldev->caps.nb_layers; i++) {
overlay = ltdc_plane_create(ddev, DRM_PLANE_TYPE_OVERLAY);
if (!overlay) {
- res = -ENOMEM;
+ ret = -ENOMEM;
DRM_ERROR("Can not create overlay plane %d\n", i);
goto cleanup;
}
@@ -806,137 +780,42 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
cleanup:
ltdc_plane_destroy_all(ddev);
- return res;
+ return ret;
}
/*
* DRM_ENCODER
*/
-static void ltdc_rgb_encoder_enable(struct drm_encoder *encoder)
-{
- struct ltdc_device *ldev = encoder_to_ltdc(encoder);
-
- DRM_DEBUG_DRIVER("\n");
-
- drm_panel_prepare(ldev->panel);
- drm_panel_enable(ldev->panel);
-}
-
-static void ltdc_rgb_encoder_disable(struct drm_encoder *encoder)
-{
- struct ltdc_device *ldev = encoder_to_ltdc(encoder);
-
- DRM_DEBUG_DRIVER("\n");
-
- drm_panel_disable(ldev->panel);
- drm_panel_unprepare(ldev->panel);
-}
-
-static const struct drm_encoder_helper_funcs ltdc_rgb_encoder_helper_funcs = {
- .enable = ltdc_rgb_encoder_enable,
- .disable = ltdc_rgb_encoder_disable,
-};
-
-static const struct drm_encoder_funcs ltdc_rgb_encoder_funcs = {
+static const struct drm_encoder_funcs ltdc_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-static struct drm_encoder *ltdc_rgb_encoder_create(struct drm_device *ddev)
+static int ltdc_encoder_init(struct drm_device *ddev)
{
+ struct ltdc_device *ldev = ddev->dev_private;
struct drm_encoder *encoder;
+ int ret;
encoder = devm_kzalloc(ddev->dev, sizeof(*encoder), GFP_KERNEL);
if (!encoder)
- return NULL;
+ return -ENOMEM;
encoder->possible_crtcs = CRTC_MASK;
- encoder->possible_clones = 0; /* No cloning support */
+ encoder->possible_clones = 0; /* No cloning support */
- drm_encoder_init(ddev, encoder, &ltdc_rgb_encoder_funcs,
+ drm_encoder_init(ddev, encoder, &ltdc_encoder_funcs,
DRM_MODE_ENCODER_DPI, NULL);
- drm_encoder_helper_add(encoder, &ltdc_rgb_encoder_helper_funcs);
-
- DRM_DEBUG_DRIVER("RGB encoder:%d created\n", encoder->base.id);
-
- return encoder;
-}
-
-/*
- * DRM_CONNECTOR
- */
-
-static int ltdc_rgb_connector_get_modes(struct drm_connector *connector)
-{
- struct drm_device *ddev = connector->dev;
- struct ltdc_device *ldev = ddev->dev_private;
- int ret = 0;
-
- DRM_DEBUG_DRIVER("\n");
-
- if (ldev->panel)
- ret = drm_panel_get_modes(ldev->panel);
-
- return ret < 0 ? 0 : ret;
-}
-
-static struct drm_connector_helper_funcs ltdc_rgb_connector_helper_funcs = {
- .get_modes = ltdc_rgb_connector_get_modes,
-};
-
-static enum drm_connector_status
-ltdc_rgb_connector_detect(struct drm_connector *connector, bool force)
-{
- struct ltdc_device *ldev = connector_to_ltdc(connector);
-
- return ldev->panel ? connector_status_connected :
- connector_status_disconnected;
-}
-
-static void ltdc_rgb_connector_destroy(struct drm_connector *connector)
-{
- DRM_DEBUG_DRIVER("\n");
-
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs ltdc_rgb_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = ltdc_rgb_connector_detect,
- .destroy = ltdc_rgb_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-struct drm_connector *ltdc_rgb_connector_create(struct drm_device *ddev)
-{
- struct drm_connector *connector;
- int err;
-
- connector = devm_kzalloc(ddev->dev, sizeof(*connector), GFP_KERNEL);
- if (!connector) {
- DRM_ERROR("Failed to allocate connector\n");
- return NULL;
- }
-
- connector->polled = DRM_CONNECTOR_POLL_HPD;
-
- err = drm_connector_init(ddev, connector, &ltdc_rgb_connector_funcs,
- DRM_MODE_CONNECTOR_DPI);
- if (err) {
- DRM_ERROR("Failed to initialize connector\n");
- return NULL;
+ ret = drm_bridge_attach(encoder, ldev->bridge, NULL);
+ if (ret) {
+ drm_encoder_cleanup(encoder);
+ return -EINVAL;
}
- drm_connector_helper_add(connector, &ltdc_rgb_connector_helper_funcs);
-
- DRM_DEBUG_DRIVER("RGB connector:%d created\n", connector->base.id);
+ DRM_DEBUG_DRIVER("Bridge encoder:%d created\n", encoder->base.id);
- return connector;
+ return 0;
}
static int ltdc_get_caps(struct drm_device *ddev)
@@ -972,61 +851,26 @@ static int ltdc_get_caps(struct drm_device *ddev)
return 0;
}
-static struct drm_panel *ltdc_get_panel(struct drm_device *ddev)
-{
- struct device *dev = ddev->dev;
- struct device_node *np = dev->of_node;
- struct device_node *entity, *port = NULL;
- struct drm_panel *panel = NULL;
-
- DRM_DEBUG_DRIVER("\n");
-
- /*
- * Parse ltdc node to get remote port and find RGB panel / HDMI slave
- * If a dsi or a bridge (hdmi, lvds...) is connected to ltdc,
- * a remote port & RGB panel will not be found.
- */
- for_each_endpoint_of_node(np, entity) {
- if (!of_device_is_available(entity))
- continue;
-
- port = of_graph_get_remote_port_parent(entity);
- if (port) {
- panel = of_drm_find_panel(port);
- of_node_put(port);
- if (panel) {
- DRM_DEBUG_DRIVER("remote panel %s\n",
- port->full_name);
- } else {
- DRM_DEBUG_DRIVER("panel missing\n");
- of_node_put(entity);
- }
- }
- }
-
- return panel;
-}
-
int ltdc_load(struct drm_device *ddev)
{
struct platform_device *pdev = to_platform_device(ddev->dev);
struct ltdc_device *ldev = ddev->dev_private;
struct device *dev = ddev->dev;
struct device_node *np = dev->of_node;
- struct drm_encoder *encoder;
- struct drm_connector *connector = NULL;
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
struct drm_crtc *crtc;
struct reset_control *rstc;
- struct resource res;
+ struct resource *res;
int irq, ret, i;
DRM_DEBUG_DRIVER("\n");
- ldev->panel = ltdc_get_panel(ddev);
- if (!ldev->panel)
- return -EPROBE_DEFER;
+ ret = drm_of_find_panel_or_bridge(np, 0, 0, &panel, &bridge);
+ if (ret)
+ return ret;
- rstc = of_reset_control_get(np, NULL);
+ rstc = devm_reset_control_get_exclusive(dev, NULL);
mutex_init(&ldev->err_lock);
@@ -1041,15 +885,18 @@ int ltdc_load(struct drm_device *ddev)
return -ENODEV;
}
- if (of_address_to_resource(np, 0, &res)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
DRM_ERROR("Unable to get resource\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err;
}
- ldev->regs = devm_ioremap_resource(dev, &res);
+ ldev->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(ldev->regs)) {
DRM_ERROR("Unable to get ltdc registers\n");
- return PTR_ERR(ldev->regs);
+ ret = PTR_ERR(ldev->regs);
+ goto err;
}
for (i = 0; i < MAX_IRQ; i++) {
@@ -1062,7 +909,7 @@ int ltdc_load(struct drm_device *ddev)
dev_name(dev), ddev);
if (ret) {
DRM_ERROR("Failed to register LTDC interrupt\n");
- return ret;
+ goto err;
}
}
@@ -1077,33 +924,27 @@ int ltdc_load(struct drm_device *ddev)
if (ret) {
DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
ldev->caps.hw_version);
- return ret;
+ goto err;
}
DRM_INFO("ltdc hw version 0x%08x - ready\n", ldev->caps.hw_version);
- if (ldev->panel) {
- encoder = ltdc_rgb_encoder_create(ddev);
- if (!encoder) {
- DRM_ERROR("Failed to create RGB encoder\n");
- ret = -EINVAL;
+ if (panel) {
+ bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge)) {
+ DRM_ERROR("Failed to create panel-bridge\n");
+ ret = PTR_ERR(bridge);
goto err;
}
+ ldev->is_panel_bridge = true;
+ }
- connector = ltdc_rgb_connector_create(ddev);
- if (!connector) {
- DRM_ERROR("Failed to create RGB connector\n");
- ret = -EINVAL;
- goto err;
- }
+ ldev->bridge = bridge;
- ret = drm_mode_connector_attach_encoder(connector, encoder);
- if (ret) {
- DRM_ERROR("Failed to attach connector to encoder\n");
- goto err;
- }
-
- drm_panel_attach(ldev->panel, connector);
+ ret = ltdc_encoder_init(ddev);
+ if (ret) {
+ DRM_ERROR("Failed to init encoder\n");
+ goto err;
}
crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
@@ -1129,9 +970,10 @@ int ltdc_load(struct drm_device *ddev)
ddev->irq_enabled = 1;
return 0;
+
err:
- if (ldev->panel)
- drm_panel_detach(ldev->panel);
+ if (ldev->is_panel_bridge)
+ drm_panel_bridge_remove(bridge);
clk_disable_unprepare(ldev->pixel_clk);
@@ -1144,8 +986,8 @@ void ltdc_unload(struct drm_device *ddev)
DRM_DEBUG_DRIVER("\n");
- if (ldev->panel)
- drm_panel_detach(ldev->panel);
+ if (ldev->is_panel_bridge)
+ drm_panel_bridge_remove(ldev->bridge);
clk_disable_unprepare(ldev->pixel_clk);
}
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index d7a9c736ac1e..bc6d6f6419a9 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -24,10 +24,10 @@ struct ltdc_device {
struct drm_fbdev_cma *fbdev;
void __iomem *regs;
struct clk *pixel_clk; /* lcd pixel clock */
- struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ bool is_panel_bridge;
struct mutex err_lock; /* protecting error_status */
struct ltdc_caps caps;
- u32 clut[256]; /* color look up table */
u32 error_status;
u32 irq_status;
};
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 5bcad8f5fb4f..06f05302ee75 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -13,17 +13,26 @@ config DRM_SUN4I
Display Engine. If M is selected the module will be called
sun4i-drm.
+if DRM_SUN4I
+
config DRM_SUN4I_HDMI
tristate "Allwinner A10 HDMI Controller Support"
- depends on DRM_SUN4I
default DRM_SUN4I
help
Choose this option if you have an Allwinner SoC with an HDMI
controller.
+config DRM_SUN4I_HDMI_CEC
+ bool "Allwinner A10 HDMI CEC Support"
+ depends on DRM_SUN4I_HDMI
+ select CEC_CORE
+ depends on CEC_PIN
+ help
+ Choose this option if you have an Allwinner SoC with an HDMI
+ controller and want to use CEC.
+
config DRM_SUN4I_BACKEND
tristate "Support for Allwinner A10 Display Engine Backend"
- depends on DRM_SUN4I
default DRM_SUN4I
help
Choose this option if you have an Allwinner SoC with the
@@ -33,10 +42,11 @@ config DRM_SUN4I_BACKEND
config DRM_SUN8I_MIXER
tristate "Support for Allwinner Display Engine 2.0 Mixer"
- depends on DRM_SUN4I
default MACH_SUN8I
help
Choose this option if you have an Allwinner SoC with the
Allwinner Display Engine 2.0, which has a mixer to do some
graphics mixture and feed graphics to TCON, If M is
selected the module will be called sun8i-mixer.
+
+endif
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index e29fd3a2ba9c..43c753cafc88 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -2,6 +2,7 @@ sun4i-drm-y += sun4i_drv.o
sun4i-drm-y += sun4i_framebuffer.o
sun4i-drm-hdmi-y += sun4i_hdmi_enc.o
+sun4i-drm-hdmi-y += sun4i_hdmi_i2c.o
sun4i-drm-hdmi-y += sun4i_hdmi_ddc_clk.o
sun4i-drm-hdmi-y += sun4i_hdmi_tmds_clk.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index cf480218daa5..ec5943627aa5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -312,7 +312,7 @@ static int sun4i_backend_of_get_id(struct device_node *node)
struct device_node *remote;
u32 reg;
- remote = of_parse_phandle(ep, "remote-endpoint", 0);
+ remote = of_graph_get_remote_endpoint(ep);
if (!remote)
continue;
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index f8c70439d1e2..d097c6f93ad0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -69,7 +69,8 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
-static void sun4i_crtc_disable(struct drm_crtc *crtc)
+static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
@@ -86,7 +87,8 @@ static void sun4i_crtc_disable(struct drm_crtc *crtc)
}
}
-static void sun4i_crtc_enable(struct drm_crtc *crtc)
+static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
@@ -98,8 +100,8 @@ static void sun4i_crtc_enable(struct drm_crtc *crtc)
static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
.atomic_begin = sun4i_crtc_atomic_begin,
.atomic_flush = sun4i_crtc_atomic_flush,
- .disable = sun4i_crtc_disable,
- .enable = sun4i_crtc_enable,
+ .atomic_enable = sun4i_crtc_atomic_enable,
+ .atomic_disable = sun4i_crtc_atomic_disable,
};
static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index abc7d8fe06b4..ace59651892f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -25,12 +25,20 @@
#include "sun4i_framebuffer.h"
#include "sun4i_tcon.h"
+static void sun4i_drv_lastclose(struct drm_device *dev)
+{
+ struct sun4i_drv *drv = dev->dev_private;
+
+ drm_fbdev_cma_restore_mode(drv->fbdev);
+}
+
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
static struct drm_driver sun4i_drv_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
/* Generic Operations */
+ .lastclose = sun4i_drv_lastclose,
.fops = &sun4i_drv_fops,
.name = "sun4i-drm",
.desc = "Allwinner sun4i Display Engine",
@@ -40,8 +48,6 @@ static struct drm_driver sun4i_drv_driver = {
/* GEM Operations */
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_destroy = drm_gem_dumb_destroy,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -187,9 +193,9 @@ static bool sun4i_drv_node_is_tcon(struct device_node *node)
static int compare_of(struct device *dev, void *data)
{
- DRM_DEBUG_DRIVER("Comparing of node %s with %s\n",
- of_node_full_name(dev->of_node),
- of_node_full_name(data));
+ DRM_DEBUG_DRIVER("Comparing of node %pOF with %pOF\n",
+ dev->of_node,
+ data);
return dev->of_node == data;
}
@@ -219,8 +225,7 @@ static int sun4i_drv_add_endpoints(struct device *dev,
if (!sun4i_drv_node_is_frontend(node)) {
/* Add current component */
- DRM_DEBUG_DRIVER("Adding component %s\n",
- of_node_full_name(node));
+ DRM_DEBUG_DRIVER("Adding component %pOF\n", node);
drm_of_component_match_add(dev, match, compare_of, node);
count++;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index 2f2f2ff1ea63..1457750988da 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -15,6 +15,8 @@
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
+#include <media/cec.h>
+
#define SUN4I_HDMI_CTRL_REG 0x004
#define SUN4I_HDMI_CTRL_ENABLE BIT(31)
@@ -86,6 +88,11 @@
#define SUN4I_HDMI_PLL_DBG0_TMDS_PARENT_MASK BIT(21)
#define SUN4I_HDMI_PLL_DBG0_TMDS_PARENT_SHIFT 21
+#define SUN4I_HDMI_CEC 0x214
+#define SUN4I_HDMI_CEC_ENABLE BIT(11)
+#define SUN4I_HDMI_CEC_TX BIT(9)
+#define SUN4I_HDMI_CEC_RX BIT(8)
+
#define SUN4I_HDMI_PKT_CTRL_REG(n) (0x2f0 + (4 * (n)))
#define SUN4I_HDMI_PKT_CTRL_TYPE(n, t) ((t) << (((n) % 4) * 4))
@@ -96,6 +103,7 @@
#define SUN4I_HDMI_DDC_CTRL_ENABLE BIT(31)
#define SUN4I_HDMI_DDC_CTRL_START_CMD BIT(30)
#define SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK BIT(8)
+#define SUN4I_HDMI_DDC_CTRL_FIFO_DIR_WRITE (1 << 8)
#define SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ (0 << 8)
#define SUN4I_HDMI_DDC_CTRL_RESET BIT(0)
@@ -105,14 +113,34 @@
#define SUN4I_HDMI_DDC_ADDR_OFFSET(off) (((off) & 0xff) << 8)
#define SUN4I_HDMI_DDC_ADDR_SLAVE(addr) ((addr) & 0xff)
+#define SUN4I_HDMI_DDC_INT_STATUS_REG 0x50c
+#define SUN4I_HDMI_DDC_INT_STATUS_ILLEGAL_FIFO_OPERATION BIT(7)
+#define SUN4I_HDMI_DDC_INT_STATUS_DDC_RX_FIFO_UNDERFLOW BIT(6)
+#define SUN4I_HDMI_DDC_INT_STATUS_DDC_TX_FIFO_OVERFLOW BIT(5)
+#define SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST BIT(4)
+#define SUN4I_HDMI_DDC_INT_STATUS_ARBITRATION_ERROR BIT(3)
+#define SUN4I_HDMI_DDC_INT_STATUS_ACK_ERROR BIT(2)
+#define SUN4I_HDMI_DDC_INT_STATUS_BUS_ERROR BIT(1)
+#define SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE BIT(0)
+
#define SUN4I_HDMI_DDC_FIFO_CTRL_REG 0x510
#define SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR BIT(31)
+#define SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES(n) (((n) & 0xf) << 4)
+#define SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MASK GENMASK(7, 4)
+#define SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MAX (BIT(4) - 1)
+#define SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES(n) ((n) & 0xf)
+#define SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES_MASK GENMASK(3, 0)
+#define SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES_MAX (BIT(4) - 1)
#define SUN4I_HDMI_DDC_FIFO_DATA_REG 0x518
+
#define SUN4I_HDMI_DDC_BYTE_COUNT_REG 0x51c
+#define SUN4I_HDMI_DDC_BYTE_COUNT_MAX (BIT(10) - 1)
#define SUN4I_HDMI_DDC_CMD_REG 0x520
#define SUN4I_HDMI_DDC_CMD_EXPLICIT_EDDC_READ 6
+#define SUN4I_HDMI_DDC_CMD_IMPLICIT_READ 5
+#define SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE 3
#define SUN4I_HDMI_DDC_CLK_REG 0x528
#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0x7) << 3)
@@ -146,12 +174,16 @@ struct sun4i_hdmi {
struct clk *ddc_clk;
struct clk *tmds_clk;
+ struct i2c_adapter *i2c;
+
struct sun4i_drv *drv;
bool hdmi_monitor;
+ struct cec_adapter *cec_adap;
};
int sun4i_ddc_create(struct sun4i_hdmi *hdmi, struct clk *clk);
int sun4i_tmds_create(struct sun4i_hdmi *hdmi);
+int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi);
#endif /* _SUN4I_HDMI_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index d3398f6250ef..9ea6cd5a1370 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -29,8 +29,6 @@
#include "sun4i_hdmi.h"
#include "sun4i_tcon.h"
-#define DDC_SEGMENT_ADDR 0x30
-
static inline struct sun4i_hdmi *
drm_encoder_to_sun4i_hdmi(struct drm_encoder *encoder)
{
@@ -52,7 +50,7 @@ static int sun4i_hdmi_setup_avi_infoframes(struct sun4i_hdmi *hdmi,
u8 buffer[17];
int i, ret;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
if (ret < 0) {
DRM_ERROR("Failed to get infoframes from mode\n");
return ret;
@@ -184,93 +182,13 @@ static const struct drm_encoder_funcs sun4i_hdmi_funcs = {
.destroy = drm_encoder_cleanup,
};
-static int sun4i_hdmi_read_sub_block(struct sun4i_hdmi *hdmi,
- unsigned int blk, unsigned int offset,
- u8 *buf, unsigned int count)
-{
- unsigned long reg;
- int i;
-
- reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
- reg &= ~SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK;
- writel(reg | SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ,
- hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
-
- writel(SUN4I_HDMI_DDC_ADDR_SEGMENT(offset >> 8) |
- SUN4I_HDMI_DDC_ADDR_EDDC(DDC_SEGMENT_ADDR << 1) |
- SUN4I_HDMI_DDC_ADDR_OFFSET(offset) |
- SUN4I_HDMI_DDC_ADDR_SLAVE(DDC_ADDR),
- hdmi->base + SUN4I_HDMI_DDC_ADDR_REG);
-
- reg = readl(hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG);
- writel(reg | SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR,
- hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG);
-
- writel(count, hdmi->base + SUN4I_HDMI_DDC_BYTE_COUNT_REG);
- writel(SUN4I_HDMI_DDC_CMD_EXPLICIT_EDDC_READ,
- hdmi->base + SUN4I_HDMI_DDC_CMD_REG);
-
- reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
- writel(reg | SUN4I_HDMI_DDC_CTRL_START_CMD,
- hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
-
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG, reg,
- !(reg & SUN4I_HDMI_DDC_CTRL_START_CMD),
- 100, 100000))
- return -EIO;
-
- for (i = 0; i < count; i++)
- buf[i] = readb(hdmi->base + SUN4I_HDMI_DDC_FIFO_DATA_REG);
-
- return 0;
-}
-
-static int sun4i_hdmi_read_edid_block(void *data, u8 *buf, unsigned int blk,
- size_t length)
-{
- struct sun4i_hdmi *hdmi = data;
- int retry = 2, i;
-
- do {
- for (i = 0; i < length; i += SUN4I_HDMI_DDC_FIFO_SIZE) {
- unsigned char offset = blk * EDID_LENGTH + i;
- unsigned int count = min((unsigned int)SUN4I_HDMI_DDC_FIFO_SIZE,
- length - i);
- int ret;
-
- ret = sun4i_hdmi_read_sub_block(hdmi, blk, offset,
- buf + i, count);
- if (ret)
- return ret;
- }
- } while (!drm_edid_block_valid(buf, blk, true, NULL) && (retry--));
-
- return 0;
-}
-
static int sun4i_hdmi_get_modes(struct drm_connector *connector)
{
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
- unsigned long reg;
struct edid *edid;
int ret;
- /* Reset i2c controller */
- writel(SUN4I_HDMI_DDC_CTRL_ENABLE | SUN4I_HDMI_DDC_CTRL_RESET,
- hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG, reg,
- !(reg & SUN4I_HDMI_DDC_CTRL_RESET),
- 100, 2000))
- return -EIO;
-
- writel(SUN4I_HDMI_DDC_LINE_CTRL_SDA_ENABLE |
- SUN4I_HDMI_DDC_LINE_CTRL_SCL_ENABLE,
- hdmi->base + SUN4I_HDMI_DDC_LINE_CTRL_REG);
-
- clk_prepare_enable(hdmi->ddc_clk);
- clk_set_rate(hdmi->ddc_clk, 100000);
-
- edid = drm_do_get_edid(connector, sun4i_hdmi_read_edid_block, hdmi);
+ edid = drm_get_edid(connector, hdmi->i2c);
if (!edid)
return 0;
@@ -279,11 +197,10 @@ static int sun4i_hdmi_get_modes(struct drm_connector *connector)
hdmi->hdmi_monitor ? "an HDMI" : "a DVI");
drm_mode_connector_update_edid_property(connector, edid);
+ cec_s_phys_addr_from_edid(hdmi->cec_adap, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
- clk_disable_unprepare(hdmi->ddc_clk);
-
return ret;
}
@@ -299,14 +216,15 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg,
reg & SUN4I_HDMI_HPD_HIGH,
- 0, 500000))
+ 0, 500000)) {
+ cec_phys_addr_invalidate(hdmi->cec_adap);
return connector_status_disconnected;
+ }
return connector_status_connected;
}
static const struct drm_connector_funcs sun4i_hdmi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = sun4i_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
@@ -315,6 +233,40 @@ static const struct drm_connector_funcs sun4i_hdmi_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
+#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
+static bool sun4i_hdmi_cec_pin_read(struct cec_adapter *adap)
+{
+ struct sun4i_hdmi *hdmi = cec_get_drvdata(adap);
+
+ return readl(hdmi->base + SUN4I_HDMI_CEC) & SUN4I_HDMI_CEC_RX;
+}
+
+static void sun4i_hdmi_cec_pin_low(struct cec_adapter *adap)
+{
+ struct sun4i_hdmi *hdmi = cec_get_drvdata(adap);
+
+ /* Start driving the CEC pin low */
+ writel(SUN4I_HDMI_CEC_ENABLE, hdmi->base + SUN4I_HDMI_CEC);
+}
+
+static void sun4i_hdmi_cec_pin_high(struct cec_adapter *adap)
+{
+ struct sun4i_hdmi *hdmi = cec_get_drvdata(adap);
+
+ /*
+ * Stop driving the CEC pin, the pull up will take over
+ * unless another CEC device is driving the pin low.
+ */
+ writel(0, hdmi->base + SUN4I_HDMI_CEC);
+}
+
+static const struct cec_pin_ops sun4i_hdmi_cec_pin_ops = {
+ .read = sun4i_hdmi_cec_pin_read,
+ .low = sun4i_hdmi_cec_pin_low,
+ .high = sun4i_hdmi_cec_pin_high,
+};
+#endif
+
static int sun4i_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -407,9 +359,9 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
SUN4I_HDMI_PLL_CTRL_PLL_EN;
writel(reg, hdmi->base + SUN4I_HDMI_PLL_CTRL_REG);
- ret = sun4i_ddc_create(hdmi, hdmi->tmds_clk);
+ ret = sun4i_hdmi_i2c_create(dev, hdmi);
if (ret) {
- dev_err(dev, "Couldn't create the DDC clock\n");
+ dev_err(dev, "Couldn't create the HDMI I2C adapter\n");
return ret;
}
@@ -422,13 +374,26 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
NULL);
if (ret) {
dev_err(dev, "Couldn't initialise the HDMI encoder\n");
- return ret;
+ goto err_del_i2c_adapter;
}
hdmi->encoder.possible_crtcs = drm_of_find_possible_crtcs(drm,
dev->of_node);
- if (!hdmi->encoder.possible_crtcs)
- return -EPROBE_DEFER;
+ if (!hdmi->encoder.possible_crtcs) {
+ ret = -EPROBE_DEFER;
+ goto err_del_i2c_adapter;
+ }
+
+#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
+ hdmi->cec_adap = cec_pin_allocate_adapter(&sun4i_hdmi_cec_pin_ops,
+ hdmi, "sun4i", CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC);
+ ret = PTR_ERR_OR_ZERO(hdmi->cec_adap);
+ if (ret < 0)
+ goto err_cleanup_connector;
+ writel(readl(hdmi->base + SUN4I_HDMI_CEC) & ~SUN4I_HDMI_CEC_TX,
+ hdmi->base + SUN4I_HDMI_CEC);
+#endif
drm_connector_helper_add(&hdmi->connector,
&sun4i_hdmi_connector_helper_funcs);
@@ -445,12 +410,18 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
hdmi->connector.polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
+ ret = cec_register_adapter(hdmi->cec_adap, dev);
+ if (ret < 0)
+ goto err_cleanup_connector;
drm_mode_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
return 0;
err_cleanup_connector:
+ cec_delete_adapter(hdmi->cec_adap);
drm_encoder_cleanup(&hdmi->encoder);
+err_del_i2c_adapter:
+ i2c_del_adapter(hdmi->i2c);
return ret;
}
@@ -459,8 +430,10 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
{
struct sun4i_hdmi *hdmi = dev_get_drvdata(dev);
+ cec_unregister_adapter(hdmi->cec_adap);
drm_connector_cleanup(&hdmi->connector);
drm_encoder_cleanup(&hdmi->encoder);
+ i2c_del_adapter(hdmi->i2c);
}
static const struct component_ops sun4i_hdmi_ops = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
new file mode 100644
index 000000000000..2e42d09ab42e
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ * Copyright (C) 2017 Jonathan Liu <net147@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+
+#include "sun4i_hdmi.h"
+
+#define SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK ( \
+ SUN4I_HDMI_DDC_INT_STATUS_ILLEGAL_FIFO_OPERATION | \
+ SUN4I_HDMI_DDC_INT_STATUS_DDC_RX_FIFO_UNDERFLOW | \
+ SUN4I_HDMI_DDC_INT_STATUS_DDC_TX_FIFO_OVERFLOW | \
+ SUN4I_HDMI_DDC_INT_STATUS_ARBITRATION_ERROR | \
+ SUN4I_HDMI_DDC_INT_STATUS_ACK_ERROR | \
+ SUN4I_HDMI_DDC_INT_STATUS_BUS_ERROR \
+)
+
+/* FIFO request bit is set when FIFO level is above RX_THRESHOLD during read */
+#define RX_THRESHOLD SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MAX
+/* FIFO request bit is set when FIFO level is below TX_THRESHOLD during write */
+#define TX_THRESHOLD 1
+
+static int fifo_transfer(struct sun4i_hdmi *hdmi, u8 *buf, int len, bool read)
+{
+ /*
+ * 1 byte takes 9 clock cycles (8 bits + 1 ACK) = 90 us for 100 kHz
+ * clock. As clock rate is fixed, just round it up to 100 us.
+ */
+ const unsigned long byte_time_ns = 100;
+ const u32 mask = SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK |
+ SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST |
+ SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE;
+ u32 reg;
+
+ /* Limit transfer length by FIFO threshold */
+ len = min_t(int, len, read ? (RX_THRESHOLD + 1) :
+ (SUN4I_HDMI_DDC_FIFO_SIZE - TX_THRESHOLD + 1));
+
+ /* Wait until error, FIFO request bit set or transfer complete */
+ if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG, reg,
+ reg & mask, len * byte_time_ns, 100000))
+ return -ETIMEDOUT;
+
+ if (reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK)
+ return -EIO;
+
+ if (read)
+ readsb(hdmi->base + SUN4I_HDMI_DDC_FIFO_DATA_REG, buf, len);
+ else
+ writesb(hdmi->base + SUN4I_HDMI_DDC_FIFO_DATA_REG, buf, len);
+
+ /* Clear FIFO request bit */
+ writel(SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST,
+ hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG);
+
+ return len;
+}
+
+static int xfer_msg(struct sun4i_hdmi *hdmi, struct i2c_msg *msg)
+{
+ int i, len;
+ u32 reg;
+
+ /* Set FIFO direction */
+ reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+ reg &= ~SUN4I_HDMI_DDC_CTRL_FIFO_DIR_MASK;
+ reg |= (msg->flags & I2C_M_RD) ?
+ SUN4I_HDMI_DDC_CTRL_FIFO_DIR_READ :
+ SUN4I_HDMI_DDC_CTRL_FIFO_DIR_WRITE;
+ writel(reg, hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+
+ /* Set I2C address */
+ writel(SUN4I_HDMI_DDC_ADDR_SLAVE(msg->addr),
+ hdmi->base + SUN4I_HDMI_DDC_ADDR_REG);
+
+ /* Set FIFO RX/TX thresholds and clear FIFO */
+ reg = readl(hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG);
+ reg |= SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR;
+ reg &= ~SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES_MASK;
+ reg |= SUN4I_HDMI_DDC_FIFO_CTRL_RX_THRES(RX_THRESHOLD);
+ reg &= ~SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES_MASK;
+ reg |= SUN4I_HDMI_DDC_FIFO_CTRL_TX_THRES(TX_THRESHOLD);
+ writel(reg, hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG);
+ if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_FIFO_CTRL_REG,
+ reg,
+ !(reg & SUN4I_HDMI_DDC_FIFO_CTRL_CLEAR),
+ 100, 2000))
+ return -EIO;
+
+ /* Set transfer length */
+ writel(msg->len, hdmi->base + SUN4I_HDMI_DDC_BYTE_COUNT_REG);
+
+ /* Set command */
+ writel(msg->flags & I2C_M_RD ?
+ SUN4I_HDMI_DDC_CMD_IMPLICIT_READ :
+ SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE,
+ hdmi->base + SUN4I_HDMI_DDC_CMD_REG);
+
+ /* Clear interrupt status bits */
+ writel(SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK |
+ SUN4I_HDMI_DDC_INT_STATUS_FIFO_REQUEST |
+ SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE,
+ hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG);
+
+ /* Start command */
+ reg = readl(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+ writel(reg | SUN4I_HDMI_DDC_CTRL_START_CMD,
+ hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+
+ /* Transfer bytes */
+ for (i = 0; i < msg->len; i += len) {
+ len = fifo_transfer(hdmi, msg->buf + i, msg->len - i,
+ msg->flags & I2C_M_RD);
+ if (len <= 0)
+ return len;
+ }
+
+ /* Wait for command to finish */
+ if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG,
+ reg,
+ !(reg & SUN4I_HDMI_DDC_CTRL_START_CMD),
+ 100, 100000))
+ return -EIO;
+
+ /* Check for errors */
+ reg = readl(hdmi->base + SUN4I_HDMI_DDC_INT_STATUS_REG);
+ if ((reg & SUN4I_HDMI_DDC_INT_STATUS_ERROR_MASK) ||
+ !(reg & SUN4I_HDMI_DDC_INT_STATUS_TRANSFER_COMPLETE)) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int sun4i_hdmi_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct sun4i_hdmi *hdmi = i2c_get_adapdata(adap);
+ u32 reg;
+ int err, i, ret = num;
+
+ for (i = 0; i < num; i++) {
+ if (!msgs[i].len)
+ return -EINVAL;
+ if (msgs[i].len > SUN4I_HDMI_DDC_BYTE_COUNT_MAX)
+ return -EINVAL;
+ }
+
+ /* Reset I2C controller */
+ writel(SUN4I_HDMI_DDC_CTRL_ENABLE | SUN4I_HDMI_DDC_CTRL_RESET,
+ hdmi->base + SUN4I_HDMI_DDC_CTRL_REG);
+ if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_DDC_CTRL_REG, reg,
+ !(reg & SUN4I_HDMI_DDC_CTRL_RESET),
+ 100, 2000))
+ return -EIO;
+
+ writel(SUN4I_HDMI_DDC_LINE_CTRL_SDA_ENABLE |
+ SUN4I_HDMI_DDC_LINE_CTRL_SCL_ENABLE,
+ hdmi->base + SUN4I_HDMI_DDC_LINE_CTRL_REG);
+
+ clk_prepare_enable(hdmi->ddc_clk);
+ clk_set_rate(hdmi->ddc_clk, 100000);
+
+ for (i = 0; i < num; i++) {
+ err = xfer_msg(hdmi, &msgs[i]);
+ if (err) {
+ ret = err;
+ break;
+ }
+ }
+
+ clk_disable_unprepare(hdmi->ddc_clk);
+ return ret;
+}
+
+static u32 sun4i_hdmi_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm sun4i_hdmi_i2c_algorithm = {
+ .master_xfer = sun4i_hdmi_i2c_xfer,
+ .functionality = sun4i_hdmi_i2c_func,
+};
+
+int sun4i_hdmi_i2c_create(struct device *dev, struct sun4i_hdmi *hdmi)
+{
+ struct i2c_adapter *adap;
+ int ret = 0;
+
+ ret = sun4i_ddc_create(hdmi, hdmi->tmds_clk);
+ if (ret)
+ return ret;
+
+ adap = devm_kzalloc(dev, sizeof(*adap), GFP_KERNEL);
+ if (!adap)
+ return -ENOMEM;
+
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_DDC;
+ adap->algo = &sun4i_hdmi_i2c_algorithm;
+ strlcpy(adap->name, "sun4i_hdmi_i2c adapter", sizeof(adap->name));
+ i2c_set_adapdata(adap, hdmi);
+
+ ret = i2c_add_adapter(adap);
+ if (ret)
+ return ret;
+
+ hdmi->i2c = adap;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index ead4f9d4c1ee..7bddf12548d3 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -25,12 +25,6 @@ struct sun4i_plane_desc {
uint32_t nformats;
};
-static int sun4i_backend_layer_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- return 0;
-}
-
static void sun4i_backend_layer_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -52,8 +46,7 @@ static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
sun4i_backend_layer_enable(backend, layer->id, true);
}
-static struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = {
- .atomic_check = sun4i_backend_layer_atomic_check,
+static const struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = {
.atomic_disable = sun4i_backend_layer_atomic_disable,
.atomic_update = sun4i_backend_layer_atomic_update,
};
@@ -115,7 +108,7 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun4i_backend_layer_funcs,
plane->formats, plane->nformats,
- plane->type, NULL);
+ NULL, plane->type, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 422b191faa77..7cd7090ad63a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -119,8 +119,7 @@ sun4i_rgb_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static struct drm_connector_funcs sun4i_rgb_con_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
+static const struct drm_connector_funcs sun4i_rgb_con_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = sun4i_rgb_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
@@ -128,13 +127,6 @@ static struct drm_connector_funcs sun4i_rgb_con_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int sun4i_rgb_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- return 0;
-}
-
static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
@@ -182,7 +174,6 @@ static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
}
static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
- .atomic_check = sun4i_rgb_atomic_check,
.mode_set = sun4i_rgb_encoder_mode_set,
.disable = sun4i_rgb_encoder_disable,
.enable = sun4i_rgb_encoder_enable,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index e3c50ecdcd04..552c88ec16be 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -194,8 +194,6 @@ void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel);
void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable);
/* Mode Related Controls */
-void sun4i_tcon_switch_interlace(struct sun4i_tcon *tcon,
- bool enable);
void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
struct drm_encoder *encoder);
void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 338b9e5bb2a3..050cfd43c7a0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -341,13 +341,6 @@ static void sun4i_tv_mode_to_drm_mode(const struct tv_mode *tv_mode,
mode->vtotal = mode->vsync_end + tv_mode->vback_porch;
}
-static int sun4i_tv_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- return 0;
-}
-
static void sun4i_tv_disable(struct drm_encoder *encoder)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
@@ -489,7 +482,6 @@ static void sun4i_tv_mode_set(struct drm_encoder *encoder,
}
static struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
- .atomic_check = sun4i_tv_atomic_check,
.disable = sun4i_tv_disable,
.enable = sun4i_tv_enable,
.mode_set = sun4i_tv_mode_set,
@@ -545,8 +537,7 @@ sun4i_tv_comp_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-static struct drm_connector_funcs sun4i_tv_comp_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
+static const struct drm_connector_funcs sun4i_tv_comp_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = sun4i_tv_comp_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/sun4i/sun8i_layer.c b/drivers/gpu/drm/sun4i/sun8i_layer.c
index e627eeece658..23810ff72684 100644
--- a/drivers/gpu/drm/sun4i/sun8i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_layer.c
@@ -90,7 +90,7 @@ static struct sun8i_layer *sun8i_layer_init_one(struct drm_device *drm,
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun8i_mixer_layer_funcs,
plane->formats, plane->nformats,
- plane->type, NULL);
+ NULL, plane->type, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index c54138c3a376..3a1476818c65 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -55,7 +55,6 @@ static const struct file_operations tdfx_driver_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_LEGACY,
- .set_busid = drm_pci_set_busid,
.fops = &tdfx_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -72,12 +71,12 @@ static struct pci_driver tdfx_pci_driver = {
static int __init tdfx_init(void)
{
- return drm_pci_init(&driver, &tdfx_pci_driver);
+ return drm_legacy_pci_init(&driver, &tdfx_pci_driver);
}
static void __exit tdfx_exit(void)
{
- drm_pci_exit(&driver, &tdfx_pci_driver);
+ drm_legacy_pci_exit(&driver, &tdfx_pci_driver);
}
module_init(tdfx_init);
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 2db29d67193d..dc58ab140151 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -3,6 +3,7 @@ config DRM_TEGRA
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
depends on COMMON_CLK
depends on DRM
+ depends on OF
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index 6af3a9ad6565..8927784396e8 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -17,4 +17,6 @@ tegra-drm-y := \
falcon.o \
vic.o
+tegra-drm-y += trace.o
+
obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index c875f11786b9..4df39112e38e 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -678,8 +678,8 @@ static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_primary_plane_funcs, formats,
- num_formats, DRM_PLANE_TYPE_PRIMARY,
- NULL);
+ num_formats, NULL,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
@@ -844,8 +844,8 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
&tegra_cursor_plane_funcs, formats,
- num_formats, DRM_PLANE_TYPE_CURSOR,
- NULL);
+ num_formats, NULL,
+ DRM_PLANE_TYPE_CURSOR, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
@@ -906,8 +906,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
&tegra_overlay_plane_funcs, formats,
- num_formats, DRM_PLANE_TYPE_OVERLAY,
- NULL);
+ num_formats, NULL,
+ DRM_PLANE_TYPE_OVERLAY, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
@@ -1199,7 +1199,8 @@ static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
return -ETIMEDOUT;
}
-static void tegra_crtc_disable(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
@@ -1243,7 +1244,8 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
pm_runtime_put_sync(dc->dev);
}
-static void tegra_crtc_enable(struct drm_crtc *crtc)
+static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct tegra_dc_state *state = to_dc_state(crtc->state);
@@ -1351,11 +1353,11 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
- .disable = tegra_crtc_disable,
- .enable = tegra_crtc_enable,
.atomic_check = tegra_crtc_atomic_check,
.atomic_begin = tegra_crtc_atomic_begin,
.atomic_flush = tegra_crtc_atomic_flush,
+ .atomic_enable = tegra_crtc_atomic_enable,
+ .atomic_disable = tegra_crtc_atomic_disable,
};
static irqreturn_t tegra_dc_irq(int irq, void *data)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 2fde44c3a1b3..e4da041ba89b 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -25,6 +25,7 @@
#include "dpaux.h"
#include "drm.h"
+#include "trace.h"
static DEFINE_MUTEX(dpaux_lock);
static LIST_HEAD(dpaux_list);
@@ -65,14 +66,19 @@ static inline struct tegra_dpaux *work_to_dpaux(struct work_struct *work)
}
static inline u32 tegra_dpaux_readl(struct tegra_dpaux *dpaux,
- unsigned long offset)
+ unsigned int offset)
{
- return readl(dpaux->regs + (offset << 2));
+ u32 value = readl(dpaux->regs + (offset << 2));
+
+ trace_dpaux_readl(dpaux->dev, offset, value);
+
+ return value;
}
static inline void tegra_dpaux_writel(struct tegra_dpaux *dpaux,
- u32 value, unsigned long offset)
+ u32 value, unsigned int offset)
{
+ trace_dpaux_writel(dpaux->dev, offset, value);
writel(value, dpaux->regs + (offset << 2));
}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 518f4b69ea53..597d563d636a 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -100,7 +100,12 @@ static int tegra_atomic_commit(struct drm_device *drm,
* the software side now.
*/
- drm_atomic_helper_swap_state(state, true);
+ err = drm_atomic_helper_swap_state(state, true);
+ if (err) {
+ mutex_unlock(&tegra->commit.lock);
+ drm_atomic_helper_cleanup_planes(drm, state);
+ return err;
+ }
drm_atomic_state_get(state);
if (nonblock)
@@ -214,12 +219,10 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
err = tegra_drm_fb_init(drm);
if (err < 0)
- goto vblank;
+ goto device;
return 0;
-vblank:
- drm_vblank_cleanup(drm);
device:
host1x_device_exit(device);
fbdev:
@@ -248,7 +251,6 @@ static void tegra_drm_unload(struct drm_device *drm)
drm_kms_helper_poll_fini(drm);
tegra_drm_fb_exit(drm);
drm_mode_config_cleanup(drm);
- drm_vblank_cleanup(drm);
err = host1x_device_exit(device);
if (err < 0)
@@ -304,8 +306,6 @@ host1x_bo_lookup(struct drm_file *file, u32 handle)
if (!gem)
return NULL;
- drm_gem_object_unreference_unlocked(gem);
-
bo = to_tegra_bo(gem);
return &bo->base;
}
@@ -394,8 +394,10 @@ int tegra_drm_submit(struct tegra_drm_context *context,
(void __user *)(uintptr_t)args->waitchks;
struct drm_tegra_syncpt syncpt;
struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
+ struct drm_gem_object **refs;
struct host1x_syncpt *sp;
struct host1x_job *job;
+ unsigned int num_refs;
int err;
/* We don't yet support other than one syncpt_incr struct per submit */
@@ -417,6 +419,21 @@ int tegra_drm_submit(struct tegra_drm_context *context,
job->class = context->client->base.class;
job->serialize = true;
+ /*
+ * Track referenced BOs so that they can be unreferenced after the
+ * submission is complete.
+ */
+ num_refs = num_cmdbufs + num_relocs * 2 + num_waitchks;
+
+ refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
+ if (!refs) {
+ err = -ENOMEM;
+ goto put;
+ }
+
+ /* reuse as an iterator later */
+ num_refs = 0;
+
while (num_cmdbufs) {
struct drm_tegra_cmdbuf cmdbuf;
struct host1x_bo *bo;
@@ -445,6 +462,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
obj = host1x_to_tegra_bo(bo);
+ refs[num_refs++] = &obj->gem;
/*
* Gather buffer base address must be 4-bytes aligned,
@@ -474,6 +492,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
reloc = &job->relocarray[num_relocs];
obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
+ refs[num_refs++] = &obj->gem;
/*
* The unaligned cmdbuf offset will cause an unaligned write
@@ -487,6 +506,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
}
obj = host1x_to_tegra_bo(reloc->target.bo);
+ refs[num_refs++] = &obj->gem;
if (reloc->target.offset >= obj->gem.size) {
err = -EINVAL;
@@ -506,6 +526,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
goto fail;
obj = host1x_to_tegra_bo(wait->bo);
+ refs[num_refs++] = &obj->gem;
/*
* The unaligned offset will cause an unaligned write during
@@ -545,17 +566,20 @@ int tegra_drm_submit(struct tegra_drm_context *context,
goto fail;
err = host1x_job_submit(job);
- if (err)
- goto fail_submit;
+ if (err) {
+ host1x_job_unpin(job);
+ goto fail;
+ }
args->fence = job->syncpt_end;
- host1x_job_put(job);
- return 0;
-
-fail_submit:
- host1x_job_unpin(job);
fail:
+ while (num_refs--)
+ drm_gem_object_put_unlocked(refs[num_refs]);
+
+ kfree(refs);
+
+put:
host1x_job_put(job);
return err;
}
@@ -591,7 +615,7 @@ static int tegra_gem_mmap(struct drm_device *drm, void *data,
args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return 0;
}
@@ -858,7 +882,7 @@ static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
bo->tiling.mode = mode;
bo->tiling.value = value;
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return 0;
}
@@ -898,7 +922,7 @@ static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
break;
}
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return err;
}
@@ -923,7 +947,7 @@ static int tegra_gem_set_flags(struct drm_device *drm, void *data,
if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
bo->flags |= TEGRA_BO_BOTTOM_UP;
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return 0;
}
@@ -945,7 +969,7 @@ static int tegra_gem_get_flags(struct drm_device *drm, void *data,
if (bo->flags & TEGRA_BO_BOTTOM_UP)
args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
- drm_gem_object_unreference_unlocked(gem);
+ drm_gem_object_put_unlocked(gem);
return 0;
}
@@ -953,20 +977,34 @@ static int tegra_gem_get_flags(struct drm_device *drm, void *data,
static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
#ifdef CONFIG_DRM_TEGRA_STAGING
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 0),
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 0),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
+ DRM_UNLOCKED | DRM_RENDER_ALLOW),
#endif
};
@@ -1033,9 +1071,11 @@ static int tegra_debugfs_iova(struct seq_file *s, void *data)
struct tegra_drm *tegra = drm->dev_private;
struct drm_printer p = drm_seq_file_printer(s);
- mutex_lock(&tegra->mm_lock);
- drm_mm_print(&tegra->mm, &p);
- mutex_unlock(&tegra->mm_lock);
+ if (tegra->domain) {
+ mutex_lock(&tegra->mm_lock);
+ drm_mm_print(&tegra->mm, &p);
+ mutex_unlock(&tegra->mm_lock);
+ }
return 0;
}
@@ -1055,7 +1095,7 @@ static int tegra_debugfs_init(struct drm_minor *minor)
static struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ DRIVER_ATOMIC | DRIVER_RENDER,
.load = tegra_drm_load,
.unload = tegra_drm_unload,
.open = tegra_drm_open,
@@ -1075,8 +1115,6 @@ static struct drm_driver tegra_drm_driver = {
.gem_prime_import = tegra_gem_prime_import,
.dumb_create = tegra_bo_dumb_create,
- .dumb_map_offset = tegra_bo_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = tegra_drm_ioctls,
.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 6d6da01282f3..063f5d397526 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -23,6 +23,7 @@
#include <drm/drm_fixed.h>
#include "gem.h"
+#include "trace.h"
struct reset_control;
@@ -172,14 +173,19 @@ static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
}
static inline void tegra_dc_writel(struct tegra_dc *dc, u32 value,
- unsigned long offset)
+ unsigned int offset)
{
+ trace_dc_writel(dc->dev, offset, value);
writel(value, dc->regs + (offset << 2));
}
-static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned long offset)
+static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned int offset)
{
- return readl(dc->regs + (offset << 2));
+ u32 value = readl(dc->regs + (offset << 2));
+
+ trace_dc_readl(dc->dev, offset, value);
+
+ return value;
}
struct tegra_dc_window {
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 3dea1216bafd..046649ec9441 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -28,6 +28,7 @@
#include "drm.h"
#include "dsi.h"
#include "mipi-phy.h"
+#include "trace.h"
struct tegra_dsi_state {
struct drm_connector_state base;
@@ -105,15 +106,20 @@ static struct tegra_dsi_state *tegra_dsi_get_state(struct tegra_dsi *dsi)
return to_dsi_state(dsi->output.connector.state);
}
-static inline u32 tegra_dsi_readl(struct tegra_dsi *dsi, unsigned long reg)
+static inline u32 tegra_dsi_readl(struct tegra_dsi *dsi, unsigned int offset)
{
- return readl(dsi->regs + (reg << 2));
+ u32 value = readl(dsi->regs + (offset << 2));
+
+ trace_dsi_readl(dsi->dev, offset, value);
+
+ return value;
}
static inline void tegra_dsi_writel(struct tegra_dsi *dsi, u32 value,
- unsigned long reg)
+ unsigned int offset)
{
- writel(value, dsi->regs + (reg << 2));
+ trace_dsi_writel(dsi->dev, offset, value);
+ writel(value, dsi->regs + (offset << 2));
}
static int tegra_dsi_show_regs(struct seq_file *s, void *data)
@@ -815,7 +821,6 @@ tegra_dsi_connector_duplicate_state(struct drm_connector *connector)
}
static const struct drm_connector_funcs tegra_dsi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.reset = tegra_dsi_connector_reset,
.detect = tegra_output_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 25acb73ee728..80540c1c66dc 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -88,7 +88,7 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
if (bo->pages)
vunmap(bo->vaddr);
- drm_gem_object_unreference_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(&bo->gem);
}
}
@@ -195,7 +195,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
unreference:
while (i--)
- drm_gem_object_unreference_unlocked(&planes[i]->gem);
+ drm_gem_object_put_unlocked(&planes[i]->gem);
return ERR_PTR(err);
}
@@ -242,7 +242,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
dev_err(drm->dev, "failed to allocate framebuffer info\n");
- drm_gem_object_unreference_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(&bo->gem);
return PTR_ERR(info);
}
@@ -251,7 +251,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
err = PTR_ERR(fbdev->fb);
dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n",
err);
- drm_gem_object_unreference_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(&bo->gem);
return PTR_ERR(fbdev->fb);
}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 7a39a355678a..ab1e53d434e8 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -24,7 +24,7 @@ static void tegra_bo_put(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- drm_gem_object_unreference_unlocked(&obj->gem);
+ drm_gem_object_put_unlocked(&obj->gem);
}
static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
@@ -95,7 +95,7 @@ static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- drm_gem_object_reference(&obj->gem);
+ drm_gem_object_get(&obj->gem);
return bo;
}
@@ -325,7 +325,7 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
return ERR_PTR(err);
}
- drm_gem_object_unreference_unlocked(&bo->gem);
+ drm_gem_object_put_unlocked(&bo->gem);
return bo;
}
@@ -423,27 +423,6 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
return 0;
}
-int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
- u32 handle, u64 *offset)
-{
- struct drm_gem_object *gem;
- struct tegra_bo *bo;
-
- gem = drm_gem_object_lookup(file, handle);
- if (!gem) {
- dev_err(drm->dev, "failed to lookup GEM object\n");
- return -EINVAL;
- }
-
- bo = to_tegra_bo(gem);
-
- *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
-
- drm_gem_object_unreference_unlocked(gem);
-
- return 0;
-}
-
static int tegra_bo_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
@@ -481,30 +460,28 @@ const struct vm_operations_struct tegra_bo_vm_ops = {
.close = drm_gem_vm_close,
};
-int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
+static int tegra_gem_mmap(struct drm_gem_object *gem,
+ struct vm_area_struct *vma)
{
- struct drm_gem_object *gem;
- struct tegra_bo *bo;
- int ret;
-
- ret = drm_gem_mmap(file, vma);
- if (ret)
- return ret;
-
- gem = vma->vm_private_data;
- bo = to_tegra_bo(gem);
+ struct tegra_bo *bo = to_tegra_bo(gem);
if (!bo->pages) {
unsigned long vm_pgoff = vma->vm_pgoff;
+ int err;
+ /*
+ * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
+ * and set the vm_pgoff (used as a fake buffer offset by DRM)
+ * to 0 as we want to map the whole buffer.
+ */
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- ret = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
+ err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
gem->size);
- if (ret) {
+ if (err < 0) {
drm_gem_vm_close(vma);
- return ret;
+ return err;
}
vma->vm_pgoff = vm_pgoff;
@@ -520,6 +497,20 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
+int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *gem;
+ int err;
+
+ err = drm_gem_mmap(file, vma);
+ if (err < 0)
+ return err;
+
+ gem = vma->vm_private_data;
+
+ return tegra_gem_mmap(gem, vma);
+}
+
static struct sg_table *
tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
@@ -603,7 +594,14 @@ static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
{
- return -EINVAL;
+ struct drm_gem_object *gem = buf->priv;
+ int err;
+
+ err = drm_gem_mmap_obj(gem, gem->size, vma);
+ if (err < 0)
+ return err;
+
+ return tegra_gem_mmap(gem, vma);
}
static void *tegra_gem_prime_vmap(struct dma_buf *buf)
@@ -654,7 +652,7 @@ struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
struct drm_gem_object *gem = buf->priv;
if (gem->dev == drm) {
- drm_gem_object_reference(gem);
+ drm_gem_object_get(gem);
return gem;
}
}
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 8b32a6fd586d..8eb9fd24ef0e 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -67,8 +67,6 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
void tegra_bo_free_object(struct drm_gem_object *gem);
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args);
-int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
- u32 handle, u64 *offset);
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index cda0491ed6bf..5b9d83b71943 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -24,6 +24,7 @@
#include "hdmi.h"
#include "drm.h"
#include "dc.h"
+#include "trace.h"
#define HDMI_ELD_BUFFER_SIZE 96
@@ -100,14 +101,19 @@ enum {
};
static inline u32 tegra_hdmi_readl(struct tegra_hdmi *hdmi,
- unsigned long offset)
+ unsigned int offset)
{
- return readl(hdmi->regs + (offset << 2));
+ u32 value = readl(hdmi->regs + (offset << 2));
+
+ trace_hdmi_readl(hdmi->dev, offset, value);
+
+ return value;
}
static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, u32 value,
- unsigned long offset)
+ unsigned int offset)
{
+ trace_hdmi_writel(hdmi->dev, offset, value);
writel(value, hdmi->regs + (offset << 2));
}
@@ -734,7 +740,7 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
u8 buffer[17];
ssize_t err;
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
if (err < 0) {
dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
return;
@@ -902,7 +908,6 @@ tegra_hdmi_connector_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.detect = tegra_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index a131b44e2d6f..78ec5193741d 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -88,7 +88,6 @@ static void tegra_dc_write_regs(struct tegra_dc *dc,
}
static const struct drm_connector_funcs tegra_rgb_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.detect = tegra_output_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index a8f528925009..7ab1d1dc7cd7 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -26,6 +26,7 @@
#include "dc.h"
#include "drm.h"
#include "sor.h"
+#include "trace.h"
#define SOR_REKEY 0x38
@@ -232,14 +233,19 @@ static inline struct tegra_sor *to_sor(struct tegra_output *output)
return container_of(output, struct tegra_sor, output);
}
-static inline u32 tegra_sor_readl(struct tegra_sor *sor, unsigned long offset)
+static inline u32 tegra_sor_readl(struct tegra_sor *sor, unsigned int offset)
{
- return readl(sor->regs + (offset << 2));
+ u32 value = readl(sor->regs + (offset << 2));
+
+ trace_sor_readl(sor->dev, offset, value);
+
+ return value;
}
static inline void tegra_sor_writel(struct tegra_sor *sor, u32 value,
- unsigned long offset)
+ unsigned int offset)
{
+ trace_sor_writel(sor->dev, offset, value);
writel(value, sor->regs + (offset << 2));
}
@@ -1340,7 +1346,6 @@ tegra_sor_connector_duplicate_state(struct drm_connector *connector)
}
static const struct drm_connector_funcs tegra_sor_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.reset = tegra_sor_connector_reset,
.detect = tegra_sor_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -1904,7 +1909,7 @@ tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
value &= ~INFOFRAME_CTRL_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
if (err < 0) {
dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
return err;
diff --git a/drivers/gpu/drm/tegra/trace.c b/drivers/gpu/drm/tegra/trace.c
new file mode 100644
index 000000000000..006f65c72a34
--- /dev/null
+++ b/drivers/gpu/drm/tegra/trace.c
@@ -0,0 +1,2 @@
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/gpu/drm/tegra/trace.h b/drivers/gpu/drm/tegra/trace.h
new file mode 100644
index 000000000000..e9b7cdad5c4c
--- /dev/null
+++ b/drivers/gpu/drm/tegra/trace.h
@@ -0,0 +1,68 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tegra
+
+#if !defined(DRM_TEGRA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define DRM_TEGRA_TRACE_H 1
+
+#include <linux/device.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(register_access,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value),
+ TP_STRUCT__entry(
+ __field(struct device *, dev)
+ __field(unsigned int, offset)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->offset = offset;
+ __entry->value = value;
+ ),
+ TP_printk("%s %04x %08x", dev_name(__entry->dev), __entry->offset,
+ __entry->value)
+);
+
+DEFINE_EVENT(register_access, dc_writel,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+DEFINE_EVENT(register_access, dc_readl,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+
+DEFINE_EVENT(register_access, hdmi_writel,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+DEFINE_EVENT(register_access, hdmi_readl,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+
+DEFINE_EVENT(register_access, dsi_writel,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+DEFINE_EVENT(register_access, dsi_readl,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+
+DEFINE_EVENT(register_access, dpaux_writel,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+DEFINE_EVENT(register_access, dpaux_readl,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+
+DEFINE_EVENT(register_access, sor_writel,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+DEFINE_EVENT(register_access, sor_readl,
+ TP_PROTO(struct device *dev, unsigned int offset, u32 value),
+ TP_ARGS(dev, offset, value));
+
+#endif /* DRM_TEGRA_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 47cb1aaa58b1..2448229fa653 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -258,12 +258,16 @@ static const struct tegra_drm_client_ops vic_ops = {
.submit = tegra_drm_submit,
};
+#define NVIDIA_TEGRA_124_VIC_FIRMWARE "nvidia/tegra124/vic03_ucode.bin"
+
static const struct vic_config vic_t124_config = {
- .firmware = "nvidia/tegra124/vic03_ucode.bin",
+ .firmware = NVIDIA_TEGRA_124_VIC_FIRMWARE,
};
+#define NVIDIA_TEGRA_210_VIC_FIRMWARE "nvidia/tegra210/vic04_ucode.bin"
+
static const struct vic_config vic_t210_config = {
- .firmware = "nvidia/tegra210/vic04_ucode.bin",
+ .firmware = NVIDIA_TEGRA_210_VIC_FIRMWARE,
};
static const struct of_device_id vic_match[] = {
@@ -394,3 +398,10 @@ struct platform_driver tegra_vic_driver = {
.probe = vic_probe,
.remove = vic_remove,
};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_124_VIC_FIRMWARE);
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE);
+#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index d524ed0d5146..406fe4544b83 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -504,6 +504,12 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
mutex_unlock(&tilcdc_crtc->enable_lock);
}
+static void tilcdc_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ tilcdc_crtc_enable(crtc);
+}
+
static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
@@ -562,6 +568,12 @@ static void tilcdc_crtc_disable(struct drm_crtc *crtc)
tilcdc_crtc_off(crtc, false);
}
+static void tilcdc_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ tilcdc_crtc_disable(crtc);
+}
+
void tilcdc_crtc_shutdown(struct drm_crtc *crtc)
{
tilcdc_crtc_off(crtc, true);
@@ -729,9 +741,9 @@ static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
.mode_fixup = tilcdc_crtc_mode_fixup,
- .enable = tilcdc_crtc_enable,
- .disable = tilcdc_crtc_disable,
.atomic_check = tilcdc_crtc_atomic_check,
+ .atomic_enable = tilcdc_crtc_atomic_enable,
+ .atomic_disable = tilcdc_crtc_atomic_disable,
};
int tilcdc_crtc_max_width(struct drm_crtc *crtc)
@@ -1038,8 +1050,8 @@ int tilcdc_crtc_create(struct drm_device *dev)
if (priv->is_componentized) {
crtc->port = of_graph_get_port_by_id(dev->dev->of_node, 0);
if (!crtc->port) { /* This should never happen */
- dev_err(dev->dev, "Port node not found in %s\n",
- dev->dev->of_node->full_name);
+ dev_err(dev->dev, "Port node not found in %pOF\n",
+ dev->dev->of_node);
ret = -EINVAL;
goto fail;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index d67e18983a7d..b0d70f943cec 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -108,7 +108,11 @@ static int tilcdc_commit(struct drm_device *dev,
if (ret)
return ret;
- drm_atomic_helper_swap_state(state, true);
+ ret = drm_atomic_helper_swap_state(state, true);
+ if (ret) {
+ drm_atomic_helper_cleanup_planes(dev, state);
+ return ret;
+ }
/*
* Everything below can be run asynchronously without the need to grab
@@ -538,8 +542,6 @@ static struct drm_driver tilcdc_driver = {
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 28c3e2f44f64..1813a3623ce6 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -189,7 +189,6 @@ static struct drm_encoder *panel_connector_best_encoder(
static const struct drm_connector_funcs panel_connector_funcs = {
.destroy = panel_connector_destroy,
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index ba0d66c0d8ac..7667b038ae7f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -28,7 +28,6 @@ static struct drm_plane_funcs tilcdc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .set_property = drm_atomic_helper_plane_set_property,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index aabfad882e23..1e2dfb1b1d6b 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -202,7 +202,6 @@ static struct drm_encoder *tfp410_connector_best_encoder(
static const struct drm_connector_funcs tfp410_connector_funcs = {
.destroy = tfp410_connector_destroy,
- .dpms = drm_atomic_helper_connector_dpms,
.detect = tfp410_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig
index 3504c53846da..2e790e7dced5 100644
--- a/drivers/gpu/drm/tinydrm/Kconfig
+++ b/drivers/gpu/drm/tinydrm/Kconfig
@@ -19,3 +19,26 @@ config TINYDRM_MI0283QT
help
DRM driver for the Multi-Inno MI0283QT display panel
If M is selected the module will be called mi0283qt.
+
+config TINYDRM_REPAPER
+ tristate "DRM support for Pervasive Displays RePaper panels (V231)"
+ depends on DRM_TINYDRM && SPI
+ depends on THERMAL || !THERMAL
+ help
+ DRM driver for the following Pervasive Displays panels:
+ 1.44" TFT EPD Panel (E1144CS021)
+ 1.90" TFT EPD Panel (E1190CS021)
+ 2.00" TFT EPD Panel (E2200CS021)
+ 2.71" TFT EPD Panel (E2271CS021)
+
+ If M is selected the module will be called repaper.
+
+config TINYDRM_ST7586
+ tristate "DRM support for Sitronix ST7586 display panels"
+ depends on DRM_TINYDRM && SPI
+ select TINYDRM_MIPI_DBI
+ help
+ DRM driver for the following Sitronix ST7586 panels:
+ * LEGO MINDSTORMS EV3
+
+ If M is selected the module will be called st7586.
diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile
index 7a3604cf4fc2..0c184bd1bb59 100644
--- a/drivers/gpu/drm/tinydrm/Makefile
+++ b/drivers/gpu/drm/tinydrm/Makefile
@@ -5,3 +5,5 @@ obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o
# Displays
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
+obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
+obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
index d4cda3308ac7..bd6cce093a85 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -7,13 +7,15 @@
* (at your option) any later version.
*/
-#include <drm/tinydrm/tinydrm.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
#include <linux/backlight.h>
+#include <linux/dma-buf.h>
#include <linux/pm.h>
#include <linux/spi/spi.h>
#include <linux/swab.h>
+#include <drm/tinydrm/tinydrm.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+
static unsigned int spi_max;
module_param(spi_max, uint, 0400);
MODULE_PARM_DESC(spi_max, "Set a lower SPI max transfer size");
@@ -181,6 +183,60 @@ void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr,
EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565);
/**
+ * tinydrm_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
+ * @dst: 8-bit grayscale destination buffer
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * Drm doesn't have native monochrome or grayscale support.
+ * Such drivers can announce the commonly supported XR24 format to userspace
+ * and use this function to convert to the native format.
+ *
+ * Monochrome drivers will use the most significant bit,
+ * where 1 means foreground color and 0 background color.
+ *
+ * ITU BT.601 is used for the RGB -> luma (brightness) conversion.
+ */
+void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip)
+{
+ unsigned int len = (clip->x2 - clip->x1) * sizeof(u32);
+ unsigned int x, y;
+ void *buf;
+ u32 *src;
+
+ if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888))
+ return;
+ /*
+ * The cma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ */
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ src = vaddr + (y * fb->pitches[0]);
+ src += clip->x1;
+ memcpy(buf, src, len);
+ src = buf;
+ for (x = clip->x1; x < clip->x2; x++) {
+ u8 r = (*src & 0x00ff0000) >> 16;
+ u8 g = (*src & 0x0000ff00) >> 8;
+ u8 b = *src & 0x000000ff;
+
+ /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
+ *dst++ = (3 * r + 6 * g + b) / 10;
+ src++;
+ }
+ }
+
+ kfree(buf);
+}
+EXPORT_SYMBOL(tinydrm_xrgb8888_to_gray8);
+
+/**
* tinydrm_of_find_backlight - Find backlight device in device-tree
* @dev: Device
*
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index ec43fb7ad9e4..177e9d861001 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -56,7 +56,7 @@ static const struct drm_connector_helper_funcs tinydrm_connector_hfuncs = {
static enum drm_connector_status
tinydrm_connector_detect(struct drm_connector *connector, bool force)
{
- if (drm_device_is_unplugged(connector->dev))
+ if (drm_dev_is_unplugged(connector->dev))
return connector_status_disconnected;
return connector->status;
@@ -71,7 +71,6 @@ static void tinydrm_connector_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs tinydrm_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.detect = tinydrm_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -225,7 +224,7 @@ tinydrm_display_pipe_init(struct tinydrm_device *tdev,
return PTR_ERR(connector);
ret = drm_simple_display_pipe_init(drm, &tdev->pipe, funcs, formats,
- format_count, connector);
+ format_count, NULL, connector);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index 482ff1c3db61..7e5bb7d6f655 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -195,8 +195,12 @@ static int mi0283qt_probe(struct spi_device *spi)
device_property_read_u32(dev, "rotation", &rotation);
- ret = mipi_dbi_spi_init(spi, mipi, dc, &mi0283qt_pipe_funcs,
- &mi0283qt_driver, &mi0283qt_mode, rotation);
+ ret = mipi_dbi_spi_init(spi, mipi, dc);
+ if (ret)
+ return ret;
+
+ ret = mipi_dbi_init(&spi->dev, mipi, &mi0283qt_pipe_funcs,
+ &mi0283qt_driver, &mi0283qt_mode, rotation);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index c83eeb7a34b0..2caeabcd3458 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -776,15 +776,12 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
/**
* mipi_dbi_spi_init - Initialize MIPI DBI SPI interfaced controller
* @spi: SPI device
- * @dc: D/C gpio (optional)
* @mipi: &mipi_dbi structure to initialize
- * @pipe_funcs: Display pipe functions
- * @driver: DRM driver
- * @mode: Display mode
- * @rotation: Initial rotation in degrees Counter Clock Wise
+ * @dc: D/C gpio (optional)
*
* This function sets &mipi_dbi->command, enables &mipi->read_commands for the
- * usual read commands and initializes @mipi using mipi_dbi_init().
+ * usual read commands. It should be followed by a call to mipi_dbi_init() or
+ * a driver-specific init.
*
* If @dc is set, a Type C Option 3 interface is assumed, if not
* Type C Option 1.
@@ -799,11 +796,7 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
* Zero on success, negative error code on failure.
*/
int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
- struct gpio_desc *dc,
- const struct drm_simple_display_pipe_funcs *pipe_funcs,
- struct drm_driver *driver,
- const struct drm_display_mode *mode,
- unsigned int rotation)
+ struct gpio_desc *dc)
{
size_t tx_size = tinydrm_spi_max_transfer_size(spi, 0);
struct device *dev = &spi->dev;
@@ -849,7 +842,7 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
return -ENOMEM;
}
- return mipi_dbi_init(dev, mipi, pipe_funcs, driver, mode, rotation);
+ return 0;
}
EXPORT_SYMBOL(mipi_dbi_spi_init);
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
new file mode 100644
index 000000000000..30dc97b3ff21
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -0,0 +1,1117 @@
+/*
+ * DRM driver for Pervasive Displays RePaper branded e-ink panels
+ *
+ * Copyright 2013-2017 Pervasive Displays, Inc.
+ * Copyright 2017 Noralf Trønnes
+ *
+ * The driver supports:
+ * Material Film: Aurora Mb (V231)
+ * Driver IC: G2 (eTC)
+ *
+ * The controller code was taken from the userspace driver:
+ * https://github.com/repaper/gratis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-buf.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/sched/clock.h>
+#include <linux/spi/spi.h>
+#include <linux/thermal.h>
+
+#include <drm/tinydrm/tinydrm.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+
+#define REPAPER_RID_G2_COG_ID 0x12
+
+enum repaper_model {
+ E1144CS021 = 1,
+ E1190CS021,
+ E2200CS021,
+ E2271CS021,
+};
+
+enum repaper_stage { /* Image pixel -> Display pixel */
+ REPAPER_COMPENSATE, /* B -> W, W -> B (Current Image) */
+ REPAPER_WHITE, /* B -> N, W -> W (Current Image) */
+ REPAPER_INVERSE, /* B -> N, W -> B (New Image) */
+ REPAPER_NORMAL /* B -> B, W -> W (New Image) */
+};
+
+enum repaper_epd_border_byte {
+ REPAPER_BORDER_BYTE_NONE,
+ REPAPER_BORDER_BYTE_ZERO,
+ REPAPER_BORDER_BYTE_SET,
+};
+
+struct repaper_epd {
+ struct tinydrm_device tinydrm;
+ struct spi_device *spi;
+
+ struct gpio_desc *panel_on;
+ struct gpio_desc *border;
+ struct gpio_desc *discharge;
+ struct gpio_desc *reset;
+ struct gpio_desc *busy;
+
+ struct thermal_zone_device *thermal;
+
+ unsigned int height;
+ unsigned int width;
+ unsigned int bytes_per_scan;
+ const u8 *channel_select;
+ unsigned int stage_time;
+ unsigned int factored_stage_time;
+ bool middle_scan;
+ bool pre_border_byte;
+ enum repaper_epd_border_byte border_byte;
+
+ u8 *line_buffer;
+ void *current_frame;
+
+ bool enabled;
+ bool cleared;
+ bool partial;
+};
+
+static inline struct repaper_epd *
+epd_from_tinydrm(struct tinydrm_device *tdev)
+{
+ return container_of(tdev, struct repaper_epd, tinydrm);
+}
+
+static int repaper_spi_transfer(struct spi_device *spi, u8 header,
+ const void *tx, void *rx, size_t len)
+{
+ void *txbuf = NULL, *rxbuf = NULL;
+ struct spi_transfer tr[2] = {};
+ u8 *headerbuf;
+ int ret;
+
+ headerbuf = kmalloc(1, GFP_KERNEL);
+ if (!headerbuf)
+ return -ENOMEM;
+
+ headerbuf[0] = header;
+ tr[0].tx_buf = headerbuf;
+ tr[0].len = 1;
+
+ /* Stack allocated tx? */
+ if (tx && len <= 32) {
+ txbuf = kmalloc(len, GFP_KERNEL);
+ if (!txbuf) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ memcpy(txbuf, tx, len);
+ }
+
+ if (rx) {
+ rxbuf = kmalloc(len, GFP_KERNEL);
+ if (!rxbuf) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ }
+
+ tr[1].tx_buf = txbuf ? txbuf : tx;
+ tr[1].rx_buf = rxbuf;
+ tr[1].len = len;
+
+ ndelay(80);
+ ret = spi_sync_transfer(spi, tr, 2);
+ if (rx && !ret)
+ memcpy(rx, rxbuf, len);
+
+out_free:
+ kfree(headerbuf);
+ kfree(txbuf);
+ kfree(rxbuf);
+
+ return ret;
+}
+
+static int repaper_write_buf(struct spi_device *spi, u8 reg,
+ const u8 *buf, size_t len)
+{
+ int ret;
+
+ ret = repaper_spi_transfer(spi, 0x70, &reg, NULL, 1);
+ if (ret)
+ return ret;
+
+ return repaper_spi_transfer(spi, 0x72, buf, NULL, len);
+}
+
+static int repaper_write_val(struct spi_device *spi, u8 reg, u8 val)
+{
+ return repaper_write_buf(spi, reg, &val, 1);
+}
+
+static int repaper_read_val(struct spi_device *spi, u8 reg)
+{
+ int ret;
+ u8 val;
+
+ ret = repaper_spi_transfer(spi, 0x70, &reg, NULL, 1);
+ if (ret)
+ return ret;
+
+ ret = repaper_spi_transfer(spi, 0x73, NULL, &val, 1);
+
+ return ret ? ret : val;
+}
+
+static int repaper_read_id(struct spi_device *spi)
+{
+ int ret;
+ u8 id;
+
+ ret = repaper_spi_transfer(spi, 0x71, NULL, &id, 1);
+
+ return ret ? ret : id;
+}
+
+static void repaper_spi_mosi_low(struct spi_device *spi)
+{
+ const u8 buf[1] = { 0 };
+
+ spi_write(spi, buf, 1);
+}
+
+/* pixels on display are numbered from 1 so even is actually bits 1,3,5,... */
+static void repaper_even_pixels(struct repaper_epd *epd, u8 **pp,
+ const u8 *data, u8 fixed_value, const u8 *mask,
+ enum repaper_stage stage)
+{
+ unsigned int b;
+
+ for (b = 0; b < (epd->width / 8); b++) {
+ if (data) {
+ u8 pixels = data[b] & 0xaa;
+ u8 pixel_mask = 0xff;
+ u8 p1, p2, p3, p4;
+
+ if (mask) {
+ pixel_mask = (mask[b] ^ pixels) & 0xaa;
+ pixel_mask |= pixel_mask >> 1;
+ }
+
+ switch (stage) {
+ case REPAPER_COMPENSATE: /* B -> W, W -> B (Current) */
+ pixels = 0xaa | ((pixels ^ 0xaa) >> 1);
+ break;
+ case REPAPER_WHITE: /* B -> N, W -> W (Current) */
+ pixels = 0x55 + ((pixels ^ 0xaa) >> 1);
+ break;
+ case REPAPER_INVERSE: /* B -> N, W -> B (New) */
+ pixels = 0x55 | (pixels ^ 0xaa);
+ break;
+ case REPAPER_NORMAL: /* B -> B, W -> W (New) */
+ pixels = 0xaa | (pixels >> 1);
+ break;
+ }
+
+ pixels = (pixels & pixel_mask) | (~pixel_mask & 0x55);
+ p1 = (pixels >> 6) & 0x03;
+ p2 = (pixels >> 4) & 0x03;
+ p3 = (pixels >> 2) & 0x03;
+ p4 = (pixels >> 0) & 0x03;
+ pixels = (p1 << 0) | (p2 << 2) | (p3 << 4) | (p4 << 6);
+ *(*pp)++ = pixels;
+ } else {
+ *(*pp)++ = fixed_value;
+ }
+ }
+}
+
+/* pixels on display are numbered from 1 so odd is actually bits 0,2,4,... */
+static void repaper_odd_pixels(struct repaper_epd *epd, u8 **pp,
+ const u8 *data, u8 fixed_value, const u8 *mask,
+ enum repaper_stage stage)
+{
+ unsigned int b;
+
+ for (b = epd->width / 8; b > 0; b--) {
+ if (data) {
+ u8 pixels = data[b - 1] & 0x55;
+ u8 pixel_mask = 0xff;
+
+ if (mask) {
+ pixel_mask = (mask[b - 1] ^ pixels) & 0x55;
+ pixel_mask |= pixel_mask << 1;
+ }
+
+ switch (stage) {
+ case REPAPER_COMPENSATE: /* B -> W, W -> B (Current) */
+ pixels = 0xaa | (pixels ^ 0x55);
+ break;
+ case REPAPER_WHITE: /* B -> N, W -> W (Current) */
+ pixels = 0x55 + (pixels ^ 0x55);
+ break;
+ case REPAPER_INVERSE: /* B -> N, W -> B (New) */
+ pixels = 0x55 | ((pixels ^ 0x55) << 1);
+ break;
+ case REPAPER_NORMAL: /* B -> B, W -> W (New) */
+ pixels = 0xaa | pixels;
+ break;
+ }
+
+ pixels = (pixels & pixel_mask) | (~pixel_mask & 0x55);
+ *(*pp)++ = pixels;
+ } else {
+ *(*pp)++ = fixed_value;
+ }
+ }
+}
+
+/* interleave bits: (byte)76543210 -> (16 bit).7.6.5.4.3.2.1 */
+static inline u16 repaper_interleave_bits(u16 value)
+{
+ value = (value | (value << 4)) & 0x0f0f;
+ value = (value | (value << 2)) & 0x3333;
+ value = (value | (value << 1)) & 0x5555;
+
+ return value;
+}
+
+/* pixels on display are numbered from 1 */
+static void repaper_all_pixels(struct repaper_epd *epd, u8 **pp,
+ const u8 *data, u8 fixed_value, const u8 *mask,
+ enum repaper_stage stage)
+{
+ unsigned int b;
+
+ for (b = epd->width / 8; b > 0; b--) {
+ if (data) {
+ u16 pixels = repaper_interleave_bits(data[b - 1]);
+ u16 pixel_mask = 0xffff;
+
+ if (mask) {
+ pixel_mask = repaper_interleave_bits(mask[b - 1]);
+
+ pixel_mask = (pixel_mask ^ pixels) & 0x5555;
+ pixel_mask |= pixel_mask << 1;
+ }
+
+ switch (stage) {
+ case REPAPER_COMPENSATE: /* B -> W, W -> B (Current) */
+ pixels = 0xaaaa | (pixels ^ 0x5555);
+ break;
+ case REPAPER_WHITE: /* B -> N, W -> W (Current) */
+ pixels = 0x5555 + (pixels ^ 0x5555);
+ break;
+ case REPAPER_INVERSE: /* B -> N, W -> B (New) */
+ pixels = 0x5555 | ((pixels ^ 0x5555) << 1);
+ break;
+ case REPAPER_NORMAL: /* B -> B, W -> W (New) */
+ pixels = 0xaaaa | pixels;
+ break;
+ }
+
+ pixels = (pixels & pixel_mask) | (~pixel_mask & 0x5555);
+ *(*pp)++ = pixels >> 8;
+ *(*pp)++ = pixels;
+ } else {
+ *(*pp)++ = fixed_value;
+ *(*pp)++ = fixed_value;
+ }
+ }
+}
+
+/* output one line of scan and data bytes to the display */
+static void repaper_one_line(struct repaper_epd *epd, unsigned int line,
+ const u8 *data, u8 fixed_value, const u8 *mask,
+ enum repaper_stage stage)
+{
+ u8 *p = epd->line_buffer;
+ unsigned int b;
+
+ repaper_spi_mosi_low(epd->spi);
+
+ if (epd->pre_border_byte)
+ *p++ = 0x00;
+
+ if (epd->middle_scan) {
+ /* data bytes */
+ repaper_odd_pixels(epd, &p, data, fixed_value, mask, stage);
+
+ /* scan line */
+ for (b = epd->bytes_per_scan; b > 0; b--) {
+ if (line / 4 == b - 1)
+ *p++ = 0x03 << (2 * (line & 0x03));
+ else
+ *p++ = 0x00;
+ }
+
+ /* data bytes */
+ repaper_even_pixels(epd, &p, data, fixed_value, mask, stage);
+ } else {
+ /*
+ * even scan line, but as lines on display are numbered from 1,
+ * line: 1,3,5,...
+ */
+ for (b = 0; b < epd->bytes_per_scan; b++) {
+ if (0 != (line & 0x01) && line / 8 == b)
+ *p++ = 0xc0 >> (line & 0x06);
+ else
+ *p++ = 0x00;
+ }
+
+ /* data bytes */
+ repaper_all_pixels(epd, &p, data, fixed_value, mask, stage);
+
+ /*
+ * odd scan line, but as lines on display are numbered from 1,
+ * line: 0,2,4,6,...
+ */
+ for (b = epd->bytes_per_scan; b > 0; b--) {
+ if (0 == (line & 0x01) && line / 8 == b - 1)
+ *p++ = 0x03 << (line & 0x06);
+ else
+ *p++ = 0x00;
+ }
+ }
+
+ switch (epd->border_byte) {
+ case REPAPER_BORDER_BYTE_NONE:
+ break;
+
+ case REPAPER_BORDER_BYTE_ZERO:
+ *p++ = 0x00;
+ break;
+
+ case REPAPER_BORDER_BYTE_SET:
+ switch (stage) {
+ case REPAPER_COMPENSATE:
+ case REPAPER_WHITE:
+ case REPAPER_INVERSE:
+ *p++ = 0x00;
+ break;
+ case REPAPER_NORMAL:
+ *p++ = 0xaa;
+ break;
+ }
+ break;
+ }
+
+ repaper_write_buf(epd->spi, 0x0a, epd->line_buffer,
+ p - epd->line_buffer);
+
+ /* Output data to panel */
+ repaper_write_val(epd->spi, 0x02, 0x07);
+
+ repaper_spi_mosi_low(epd->spi);
+}
+
+static void repaper_frame_fixed(struct repaper_epd *epd, u8 fixed_value,
+ enum repaper_stage stage)
+{
+ unsigned int line;
+
+ for (line = 0; line < epd->height; line++)
+ repaper_one_line(epd, line, NULL, fixed_value, NULL, stage);
+}
+
+static void repaper_frame_data(struct repaper_epd *epd, const u8 *image,
+ const u8 *mask, enum repaper_stage stage)
+{
+ unsigned int line;
+
+ if (!mask) {
+ for (line = 0; line < epd->height; line++) {
+ repaper_one_line(epd, line,
+ &image[line * (epd->width / 8)],
+ 0, NULL, stage);
+ }
+ } else {
+ for (line = 0; line < epd->height; line++) {
+ size_t n = line * epd->width / 8;
+
+ repaper_one_line(epd, line, &image[n], 0, &mask[n],
+ stage);
+ }
+ }
+}
+
+static void repaper_frame_fixed_repeat(struct repaper_epd *epd, u8 fixed_value,
+ enum repaper_stage stage)
+{
+ u64 start = local_clock();
+ u64 end = start + (epd->factored_stage_time * 1000 * 1000);
+
+ do {
+ repaper_frame_fixed(epd, fixed_value, stage);
+ } while (local_clock() < end);
+}
+
+static void repaper_frame_data_repeat(struct repaper_epd *epd, const u8 *image,
+ const u8 *mask, enum repaper_stage stage)
+{
+ u64 start = local_clock();
+ u64 end = start + (epd->factored_stage_time * 1000 * 1000);
+
+ do {
+ repaper_frame_data(epd, image, mask, stage);
+ } while (local_clock() < end);
+}
+
+static void repaper_get_temperature(struct repaper_epd *epd)
+{
+ int ret, temperature = 0;
+ unsigned int factor10x;
+
+ if (!epd->thermal)
+ return;
+
+ ret = thermal_zone_get_temp(epd->thermal, &temperature);
+ if (ret) {
+ dev_err(&epd->spi->dev, "Failed to get temperature (%d)\n",
+ ret);
+ return;
+ }
+
+ temperature /= 1000;
+
+ if (temperature <= -10)
+ factor10x = 170;
+ else if (temperature <= -5)
+ factor10x = 120;
+ else if (temperature <= 5)
+ factor10x = 80;
+ else if (temperature <= 10)
+ factor10x = 40;
+ else if (temperature <= 15)
+ factor10x = 30;
+ else if (temperature <= 20)
+ factor10x = 20;
+ else if (temperature <= 40)
+ factor10x = 10;
+ else
+ factor10x = 7;
+
+ epd->factored_stage_time = epd->stage_time * factor10x / 10;
+}
+
+static void repaper_gray8_to_mono_reversed(u8 *buf, u32 width, u32 height)
+{
+ u8 *gray8 = buf, *mono = buf;
+ int y, xb, i;
+
+ for (y = 0; y < height; y++)
+ for (xb = 0; xb < width / 8; xb++) {
+ u8 byte = 0x00;
+
+ for (i = 0; i < 8; i++) {
+ int x = xb * 8 + i;
+
+ byte >>= 1;
+ if (gray8[y * width + x] >> 7)
+ byte |= BIT(7);
+ }
+ *mono++ = byte;
+ }
+}
+
+static int repaper_fb_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
+ struct tinydrm_device *tdev = fb->dev->dev_private;
+ struct repaper_epd *epd = epd_from_tinydrm(tdev);
+ struct drm_clip_rect clip;
+ u8 *buf = NULL;
+ int ret = 0;
+
+ /* repaper can't do partial updates */
+ clip.x1 = 0;
+ clip.x2 = fb->width;
+ clip.y1 = 0;
+ clip.y2 = fb->height;
+
+ mutex_lock(&tdev->dirty_lock);
+
+ if (!epd->enabled)
+ goto out_unlock;
+
+ /* fbdev can flush even when we're not interested */
+ if (tdev->pipe.plane.fb != fb)
+ goto out_unlock;
+
+ repaper_get_temperature(epd);
+
+ DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id,
+ epd->factored_stage_time);
+
+ buf = kmalloc(fb->width * fb->height, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ if (import_attach) {
+ ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+ if (ret)
+ goto out_unlock;
+ }
+
+ tinydrm_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip);
+
+ if (import_attach) {
+ ret = dma_buf_end_cpu_access(import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+ if (ret)
+ goto out_unlock;
+ }
+
+ repaper_gray8_to_mono_reversed(buf, fb->width, fb->height);
+
+ if (epd->partial) {
+ repaper_frame_data_repeat(epd, buf, epd->current_frame,
+ REPAPER_NORMAL);
+ } else if (epd->cleared) {
+ repaper_frame_data_repeat(epd, epd->current_frame, NULL,
+ REPAPER_COMPENSATE);
+ repaper_frame_data_repeat(epd, epd->current_frame, NULL,
+ REPAPER_WHITE);
+ repaper_frame_data_repeat(epd, buf, NULL, REPAPER_INVERSE);
+ repaper_frame_data_repeat(epd, buf, NULL, REPAPER_NORMAL);
+
+ epd->partial = true;
+ } else {
+ /* Clear display (anything -> white) */
+ repaper_frame_fixed_repeat(epd, 0xff, REPAPER_COMPENSATE);
+ repaper_frame_fixed_repeat(epd, 0xff, REPAPER_WHITE);
+ repaper_frame_fixed_repeat(epd, 0xaa, REPAPER_INVERSE);
+ repaper_frame_fixed_repeat(epd, 0xaa, REPAPER_NORMAL);
+
+ /* Assuming a clear (white) screen output an image */
+ repaper_frame_fixed_repeat(epd, 0xaa, REPAPER_COMPENSATE);
+ repaper_frame_fixed_repeat(epd, 0xaa, REPAPER_WHITE);
+ repaper_frame_data_repeat(epd, buf, NULL, REPAPER_INVERSE);
+ repaper_frame_data_repeat(epd, buf, NULL, REPAPER_NORMAL);
+
+ epd->cleared = true;
+ epd->partial = true;
+ }
+
+ memcpy(epd->current_frame, buf, fb->width * fb->height / 8);
+
+ /*
+ * An extra frame write is needed if pixels are set in the bottom line,
+ * or else grey lines rises up from the pixels
+ */
+ if (epd->pre_border_byte) {
+ unsigned int x;
+
+ for (x = 0; x < (fb->width / 8); x++)
+ if (buf[x + (fb->width * (fb->height - 1) / 8)]) {
+ repaper_frame_data_repeat(epd, buf,
+ epd->current_frame,
+ REPAPER_NORMAL);
+ break;
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&tdev->dirty_lock);
+
+ if (ret)
+ dev_err(fb->dev->dev, "Failed to update display (%d)\n", ret);
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct drm_framebuffer_funcs repaper_fb_funcs = {
+ .destroy = drm_fb_cma_destroy,
+ .create_handle = drm_fb_cma_create_handle,
+ .dirty = repaper_fb_dirty,
+};
+
+static void power_off(struct repaper_epd *epd)
+{
+ /* Turn off power and all signals */
+ gpiod_set_value_cansleep(epd->reset, 0);
+ gpiod_set_value_cansleep(epd->panel_on, 0);
+ if (epd->border)
+ gpiod_set_value_cansleep(epd->border, 0);
+
+ /* Ensure SPI MOSI and CLOCK are Low before CS Low */
+ repaper_spi_mosi_low(epd->spi);
+
+ /* Discharge pulse */
+ gpiod_set_value_cansleep(epd->discharge, 1);
+ msleep(150);
+ gpiod_set_value_cansleep(epd->discharge, 0);
+}
+
+static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct repaper_epd *epd = epd_from_tinydrm(tdev);
+ struct spi_device *spi = epd->spi;
+ struct device *dev = &spi->dev;
+ bool dc_ok = false;
+ int i, ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Power up sequence */
+ gpiod_set_value_cansleep(epd->reset, 0);
+ gpiod_set_value_cansleep(epd->panel_on, 0);
+ gpiod_set_value_cansleep(epd->discharge, 0);
+ if (epd->border)
+ gpiod_set_value_cansleep(epd->border, 0);
+ repaper_spi_mosi_low(spi);
+ usleep_range(5000, 10000);
+
+ gpiod_set_value_cansleep(epd->panel_on, 1);
+ /*
+ * This delay comes from the repaper.org userspace driver, it's not
+ * mentioned in the datasheet.
+ */
+ usleep_range(10000, 15000);
+ gpiod_set_value_cansleep(epd->reset, 1);
+ if (epd->border)
+ gpiod_set_value_cansleep(epd->border, 1);
+ usleep_range(5000, 10000);
+ gpiod_set_value_cansleep(epd->reset, 0);
+ usleep_range(5000, 10000);
+ gpiod_set_value_cansleep(epd->reset, 1);
+ usleep_range(5000, 10000);
+
+ /* Wait for COG to become ready */
+ for (i = 100; i > 0; i--) {
+ if (!gpiod_get_value_cansleep(epd->busy))
+ break;
+
+ usleep_range(10, 100);
+ }
+
+ if (!i) {
+ dev_err(dev, "timeout waiting for panel to become ready.\n");
+ power_off(epd);
+ return;
+ }
+
+ repaper_read_id(spi);
+ ret = repaper_read_id(spi);
+ if (ret != REPAPER_RID_G2_COG_ID) {
+ if (ret < 0)
+ dev_err(dev, "failed to read chip (%d)\n", ret);
+ else
+ dev_err(dev, "wrong COG ID 0x%02x\n", ret);
+ power_off(epd);
+ return;
+ }
+
+ /* Disable OE */
+ repaper_write_val(spi, 0x02, 0x40);
+
+ ret = repaper_read_val(spi, 0x0f);
+ if (ret < 0 || !(ret & 0x80)) {
+ if (ret < 0)
+ dev_err(dev, "failed to read chip (%d)\n", ret);
+ else
+ dev_err(dev, "panel is reported broken\n");
+ power_off(epd);
+ return;
+ }
+
+ /* Power saving mode */
+ repaper_write_val(spi, 0x0b, 0x02);
+ /* Channel select */
+ repaper_write_buf(spi, 0x01, epd->channel_select, 8);
+ /* High power mode osc */
+ repaper_write_val(spi, 0x07, 0xd1);
+ /* Power setting */
+ repaper_write_val(spi, 0x08, 0x02);
+ /* Vcom level */
+ repaper_write_val(spi, 0x09, 0xc2);
+ /* Power setting */
+ repaper_write_val(spi, 0x04, 0x03);
+ /* Driver latch on */
+ repaper_write_val(spi, 0x03, 0x01);
+ /* Driver latch off */
+ repaper_write_val(spi, 0x03, 0x00);
+ usleep_range(5000, 10000);
+
+ /* Start chargepump */
+ for (i = 0; i < 4; ++i) {
+ /* Charge pump positive voltage on - VGH/VDL on */
+ repaper_write_val(spi, 0x05, 0x01);
+ msleep(240);
+
+ /* Charge pump negative voltage on - VGL/VDL on */
+ repaper_write_val(spi, 0x05, 0x03);
+ msleep(40);
+
+ /* Charge pump Vcom on - Vcom driver on */
+ repaper_write_val(spi, 0x05, 0x0f);
+ msleep(40);
+
+ /* check DC/DC */
+ ret = repaper_read_val(spi, 0x0f);
+ if (ret < 0) {
+ dev_err(dev, "failed to read chip (%d)\n", ret);
+ power_off(epd);
+ return;
+ }
+
+ if (ret & 0x40) {
+ dc_ok = true;
+ break;
+ }
+ }
+
+ if (!dc_ok) {
+ dev_err(dev, "dc/dc failed\n");
+ power_off(epd);
+ return;
+ }
+
+ /*
+ * Output enable to disable
+ * The userspace driver sets this to 0x04, but the datasheet says 0x06
+ */
+ repaper_write_val(spi, 0x02, 0x04);
+
+ epd->enabled = true;
+ epd->partial = false;
+}
+
+static void repaper_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct repaper_epd *epd = epd_from_tinydrm(tdev);
+ struct spi_device *spi = epd->spi;
+ unsigned int line;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ mutex_lock(&tdev->dirty_lock);
+ epd->enabled = false;
+ mutex_unlock(&tdev->dirty_lock);
+
+ /* Nothing frame */
+ for (line = 0; line < epd->height; line++)
+ repaper_one_line(epd, 0x7fffu, NULL, 0x00, NULL,
+ REPAPER_COMPENSATE);
+
+ /* 2.7" */
+ if (epd->border) {
+ /* Dummy line */
+ repaper_one_line(epd, 0x7fffu, NULL, 0x00, NULL,
+ REPAPER_COMPENSATE);
+ msleep(25);
+ gpiod_set_value_cansleep(epd->border, 0);
+ msleep(200);
+ gpiod_set_value_cansleep(epd->border, 1);
+ } else {
+ /* Border dummy line */
+ repaper_one_line(epd, 0x7fffu, NULL, 0x00, NULL,
+ REPAPER_NORMAL);
+ msleep(200);
+ }
+
+ /* not described in datasheet */
+ repaper_write_val(spi, 0x0b, 0x00);
+ /* Latch reset turn on */
+ repaper_write_val(spi, 0x03, 0x01);
+ /* Power off charge pump Vcom */
+ repaper_write_val(spi, 0x05, 0x03);
+ /* Power off charge pump neg voltage */
+ repaper_write_val(spi, 0x05, 0x01);
+ msleep(120);
+ /* Discharge internal */
+ repaper_write_val(spi, 0x04, 0x80);
+ /* turn off all charge pumps */
+ repaper_write_val(spi, 0x05, 0x00);
+ /* Turn off osc */
+ repaper_write_val(spi, 0x07, 0x01);
+ msleep(50);
+
+ power_off(epd);
+}
+
+static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
+ .enable = repaper_pipe_enable,
+ .disable = repaper_pipe_disable,
+ .update = tinydrm_display_pipe_update,
+ .prepare_fb = tinydrm_display_pipe_prepare_fb,
+};
+
+static const uint32_t repaper_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const struct drm_display_mode repaper_e1144cs021_mode = {
+ TINYDRM_MODE(128, 96, 29, 22),
+};
+
+static const u8 repaper_e1144cs021_cs[] = { 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x0f, 0xff, 0x00 };
+
+static const struct drm_display_mode repaper_e1190cs021_mode = {
+ TINYDRM_MODE(144, 128, 36, 32),
+};
+
+static const u8 repaper_e1190cs021_cs[] = { 0x00, 0x00, 0x00, 0x03,
+ 0xfc, 0x00, 0x00, 0xff };
+
+static const struct drm_display_mode repaper_e2200cs021_mode = {
+ TINYDRM_MODE(200, 96, 46, 22),
+};
+
+static const u8 repaper_e2200cs021_cs[] = { 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0xff, 0xe0, 0x00 };
+
+static const struct drm_display_mode repaper_e2271cs021_mode = {
+ TINYDRM_MODE(264, 176, 57, 38),
+};
+
+static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f,
+ 0xff, 0xfe, 0x00, 0x00 };
+
+DEFINE_DRM_GEM_CMA_FOPS(repaper_fops);
+
+static struct drm_driver repaper_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+ .fops = &repaper_fops,
+ TINYDRM_GEM_DRIVER_OPS,
+ .name = "repaper",
+ .desc = "Pervasive Displays RePaper e-ink panels",
+ .date = "20170405",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id repaper_of_match[] = {
+ { .compatible = "pervasive,e1144cs021", .data = (void *)E1144CS021 },
+ { .compatible = "pervasive,e1190cs021", .data = (void *)E1190CS021 },
+ { .compatible = "pervasive,e2200cs021", .data = (void *)E2200CS021 },
+ { .compatible = "pervasive,e2271cs021", .data = (void *)E2271CS021 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, repaper_of_match);
+
+static const struct spi_device_id repaper_id[] = {
+ { "e1144cs021", E1144CS021 },
+ { "e1190cs021", E1190CS021 },
+ { "e2200cs021", E2200CS021 },
+ { "e2271cs021", E2271CS021 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, repaper_id);
+
+static int repaper_probe(struct spi_device *spi)
+{
+ const struct drm_display_mode *mode;
+ const struct spi_device_id *spi_id;
+ const struct of_device_id *match;
+ struct device *dev = &spi->dev;
+ struct tinydrm_device *tdev;
+ enum repaper_model model;
+ const char *thermal_zone;
+ struct repaper_epd *epd;
+ size_t line_buffer_size;
+ int ret;
+
+ match = of_match_device(repaper_of_match, dev);
+ if (match) {
+ model = (enum repaper_model)match->data;
+ } else {
+ spi_id = spi_get_device_id(spi);
+ model = spi_id->driver_data;
+ }
+
+ /* The SPI device is used to allocate dma memory */
+ if (!dev->coherent_dma_mask) {
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_warn(dev, "Failed to set dma mask %d\n", ret);
+ return ret;
+ }
+ }
+
+ epd = devm_kzalloc(dev, sizeof(*epd), GFP_KERNEL);
+ if (!epd)
+ return -ENOMEM;
+
+ epd->spi = spi;
+
+ epd->panel_on = devm_gpiod_get(dev, "panel-on", GPIOD_OUT_LOW);
+ if (IS_ERR(epd->panel_on)) {
+ ret = PTR_ERR(epd->panel_on);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get gpio 'panel-on'\n");
+ return ret;
+ }
+
+ epd->discharge = devm_gpiod_get(dev, "discharge", GPIOD_OUT_LOW);
+ if (IS_ERR(epd->discharge)) {
+ ret = PTR_ERR(epd->discharge);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get gpio 'discharge'\n");
+ return ret;
+ }
+
+ epd->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(epd->reset)) {
+ ret = PTR_ERR(epd->reset);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get gpio 'reset'\n");
+ return ret;
+ }
+
+ epd->busy = devm_gpiod_get(dev, "busy", GPIOD_IN);
+ if (IS_ERR(epd->busy)) {
+ ret = PTR_ERR(epd->busy);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get gpio 'busy'\n");
+ return ret;
+ }
+
+ if (!device_property_read_string(dev, "pervasive,thermal-zone",
+ &thermal_zone)) {
+ epd->thermal = thermal_zone_get_zone_by_name(thermal_zone);
+ if (IS_ERR(epd->thermal)) {
+ dev_err(dev, "Failed to get thermal zone: %s\n",
+ thermal_zone);
+ return PTR_ERR(epd->thermal);
+ }
+ }
+
+ switch (model) {
+ case E1144CS021:
+ mode = &repaper_e1144cs021_mode;
+ epd->channel_select = repaper_e1144cs021_cs;
+ epd->stage_time = 480;
+ epd->bytes_per_scan = 96 / 4;
+ epd->middle_scan = true; /* data-scan-data */
+ epd->pre_border_byte = false;
+ epd->border_byte = REPAPER_BORDER_BYTE_ZERO;
+ break;
+
+ case E1190CS021:
+ mode = &repaper_e1190cs021_mode;
+ epd->channel_select = repaper_e1190cs021_cs;
+ epd->stage_time = 480;
+ epd->bytes_per_scan = 128 / 4 / 2;
+ epd->middle_scan = false; /* scan-data-scan */
+ epd->pre_border_byte = false;
+ epd->border_byte = REPAPER_BORDER_BYTE_SET;
+ break;
+
+ case E2200CS021:
+ mode = &repaper_e2200cs021_mode;
+ epd->channel_select = repaper_e2200cs021_cs;
+ epd->stage_time = 480;
+ epd->bytes_per_scan = 96 / 4;
+ epd->middle_scan = true; /* data-scan-data */
+ epd->pre_border_byte = true;
+ epd->border_byte = REPAPER_BORDER_BYTE_NONE;
+ break;
+
+ case E2271CS021:
+ epd->border = devm_gpiod_get(dev, "border", GPIOD_OUT_LOW);
+ if (IS_ERR(epd->border)) {
+ ret = PTR_ERR(epd->border);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get gpio 'border'\n");
+ return ret;
+ }
+
+ mode = &repaper_e2271cs021_mode;
+ epd->channel_select = repaper_e2271cs021_cs;
+ epd->stage_time = 630;
+ epd->bytes_per_scan = 176 / 4;
+ epd->middle_scan = true; /* data-scan-data */
+ epd->pre_border_byte = true;
+ epd->border_byte = REPAPER_BORDER_BYTE_NONE;
+ break;
+
+ default:
+ return -ENODEV;
+ }
+
+ epd->width = mode->hdisplay;
+ epd->height = mode->vdisplay;
+ epd->factored_stage_time = epd->stage_time;
+
+ line_buffer_size = 2 * epd->width / 8 + epd->bytes_per_scan + 2;
+ epd->line_buffer = devm_kzalloc(dev, line_buffer_size, GFP_KERNEL);
+ if (!epd->line_buffer)
+ return -ENOMEM;
+
+ epd->current_frame = devm_kzalloc(dev, epd->width * epd->height / 8,
+ GFP_KERNEL);
+ if (!epd->current_frame)
+ return -ENOMEM;
+
+ tdev = &epd->tinydrm;
+
+ ret = devm_tinydrm_init(dev, tdev, &repaper_fb_funcs, &repaper_driver);
+ if (ret)
+ return ret;
+
+ ret = tinydrm_display_pipe_init(tdev, &repaper_pipe_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL,
+ repaper_formats,
+ ARRAY_SIZE(repaper_formats), mode, 0);
+ if (ret)
+ return ret;
+
+ drm_mode_config_reset(tdev->drm);
+
+ ret = devm_tinydrm_register(tdev);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, tdev);
+
+ DRM_DEBUG_DRIVER("Initialized %s:%s @%uMHz on minor %d\n",
+ tdev->drm->driver->name, dev_name(dev),
+ spi->max_speed_hz / 1000000,
+ tdev->drm->primary->index);
+
+ return 0;
+}
+
+static void repaper_shutdown(struct spi_device *spi)
+{
+ struct tinydrm_device *tdev = spi_get_drvdata(spi);
+
+ tinydrm_shutdown(tdev);
+}
+
+static struct spi_driver repaper_spi_driver = {
+ .driver = {
+ .name = "repaper",
+ .owner = THIS_MODULE,
+ .of_match_table = repaper_of_match,
+ },
+ .id_table = repaper_id,
+ .probe = repaper_probe,
+ .shutdown = repaper_shutdown,
+};
+module_spi_driver(repaper_spi_driver);
+
+MODULE_DESCRIPTION("Pervasive Displays RePaper DRM driver");
+MODULE_AUTHOR("Noralf Trønnes");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
new file mode 100644
index 000000000000..b439956a07f4
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -0,0 +1,428 @@
+/*
+ * DRM driver for Sitronix ST7586 panels
+ *
+ * Copyright 2017 David Lechner <david@lechnology.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-buf.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+#include <video/mipi_display.h>
+
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+
+/* controller-specific commands */
+#define ST7586_DISP_MODE_GRAY 0x38
+#define ST7586_DISP_MODE_MONO 0x39
+#define ST7586_ENABLE_DDRAM 0x3a
+#define ST7586_SET_DISP_DUTY 0xb0
+#define ST7586_SET_PART_DISP 0xb4
+#define ST7586_SET_NLINE_INV 0xb5
+#define ST7586_SET_VOP 0xc0
+#define ST7586_SET_BIAS_SYSTEM 0xc3
+#define ST7586_SET_BOOST_LEVEL 0xc4
+#define ST7586_SET_VOP_OFFSET 0xc7
+#define ST7586_ENABLE_ANALOG 0xd0
+#define ST7586_AUTO_READ_CTRL 0xd7
+#define ST7586_OTP_RW_CTRL 0xe0
+#define ST7586_OTP_CTRL_OUT 0xe1
+#define ST7586_OTP_READ 0xe3
+
+#define ST7586_DISP_CTRL_MX BIT(6)
+#define ST7586_DISP_CTRL_MY BIT(7)
+
+/*
+ * The ST7586 controller has an unusual pixel format where 2bpp grayscale is
+ * packed 3 pixels per byte with the first two pixels using 3 bits and the 3rd
+ * pixel using only 2 bits.
+ *
+ * | D7 | D6 | D5 || | || 2bpp |
+ * | (D4) | (D3) | (D2) || D1 | D0 || GRAY |
+ * +------+------+------++------+------++------+
+ * | 1 | 1 | 1 || 1 | 1 || 0 0 | black
+ * | 1 | 0 | 0 || 1 | 0 || 0 1 | dark gray
+ * | 0 | 1 | 0 || 0 | 1 || 1 0 | light gray
+ * | 0 | 0 | 0 || 0 | 0 || 1 1 | white
+ */
+
+static const u8 st7586_lookup[] = { 0x7, 0x4, 0x2, 0x0 };
+
+static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
+ struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip)
+{
+ size_t len = (clip->x2 - clip->x1) * (clip->y2 - clip->y1);
+ unsigned int x, y;
+ u8 *src, *buf, val;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ tinydrm_xrgb8888_to_gray8(buf, vaddr, fb, clip);
+ src = buf;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ for (x = clip->x1; x < clip->x2; x += 3) {
+ val = st7586_lookup[*src++ >> 6] << 5;
+ val |= st7586_lookup[*src++ >> 6] << 2;
+ val |= st7586_lookup[*src++ >> 6] >> 1;
+ *dst++ = val;
+ }
+ }
+
+ kfree(buf);
+}
+
+static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
+ struct drm_clip_rect *clip)
+{
+ struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
+ void *src = cma_obj->vaddr;
+ int ret = 0;
+
+ if (import_attach) {
+ ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+ }
+
+ st7586_xrgb8888_to_gray332(dst, src, fb, clip);
+
+ if (import_attach)
+ ret = dma_buf_end_cpu_access(import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+
+ return ret;
+}
+
+static int st7586_fb_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int flags,
+ unsigned int color, struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct tinydrm_device *tdev = fb->dev->dev_private;
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct drm_clip_rect clip;
+ int start, end;
+ int ret = 0;
+
+ mutex_lock(&tdev->dirty_lock);
+
+ if (!mipi->enabled)
+ goto out_unlock;
+
+ /* fbdev can flush even when we're not interested */
+ if (tdev->pipe.plane.fb != fb)
+ goto out_unlock;
+
+ tinydrm_merge_clips(&clip, clips, num_clips, flags, fb->width,
+ fb->height);
+
+ /* 3 pixels per byte, so grow clip to nearest multiple of 3 */
+ clip.x1 = rounddown(clip.x1, 3);
+ clip.x2 = roundup(clip.x2, 3);
+
+ DRM_DEBUG("Flushing [FB:%d] x1=%u, x2=%u, y1=%u, y2=%u\n", fb->base.id,
+ clip.x1, clip.x2, clip.y1, clip.y2);
+
+ ret = st7586_buf_copy(mipi->tx_buf, fb, &clip);
+ if (ret)
+ goto out_unlock;
+
+ /* Pixels are packed 3 per byte */
+ start = clip.x1 / 3;
+ end = clip.x2 / 3;
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS,
+ (start >> 8) & 0xFF, start & 0xFF,
+ (end >> 8) & 0xFF, (end - 1) & 0xFF);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS,
+ (clip.y1 >> 8) & 0xFF, clip.y1 & 0xFF,
+ (clip.y2 >> 8) & 0xFF, (clip.y2 - 1) & 0xFF);
+
+ ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START,
+ (u8 *)mipi->tx_buf,
+ (end - start) * (clip.y2 - clip.y1));
+
+out_unlock:
+ mutex_unlock(&tdev->dirty_lock);
+
+ if (ret)
+ dev_err_once(fb->dev->dev, "Failed to update display %d\n",
+ ret);
+
+ return ret;
+}
+
+static const struct drm_framebuffer_funcs st7586_fb_funcs = {
+ .destroy = drm_fb_cma_destroy,
+ .create_handle = drm_fb_cma_create_handle,
+ .dirty = st7586_fb_dirty,
+};
+
+static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct drm_framebuffer *fb = pipe->plane.fb;
+ struct device *dev = tdev->drm->dev;
+ int ret;
+ u8 addr_mode;
+
+ DRM_DEBUG_KMS("\n");
+
+ mipi_dbi_hw_reset(mipi);
+ ret = mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f);
+ if (ret) {
+ dev_err(dev, "Error sending command %d\n", ret);
+ return;
+ }
+
+ mipi_dbi_command(mipi, ST7586_OTP_RW_CTRL, 0x00);
+
+ msleep(10);
+
+ mipi_dbi_command(mipi, ST7586_OTP_READ);
+
+ msleep(20);
+
+ mipi_dbi_command(mipi, ST7586_OTP_CTRL_OUT);
+ mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+
+ msleep(50);
+
+ mipi_dbi_command(mipi, ST7586_SET_VOP_OFFSET, 0x00);
+ mipi_dbi_command(mipi, ST7586_SET_VOP, 0xe3, 0x00);
+ mipi_dbi_command(mipi, ST7586_SET_BIAS_SYSTEM, 0x02);
+ mipi_dbi_command(mipi, ST7586_SET_BOOST_LEVEL, 0x04);
+ mipi_dbi_command(mipi, ST7586_ENABLE_ANALOG, 0x1d);
+ mipi_dbi_command(mipi, ST7586_SET_NLINE_INV, 0x00);
+ mipi_dbi_command(mipi, ST7586_DISP_MODE_GRAY);
+ mipi_dbi_command(mipi, ST7586_ENABLE_DDRAM, 0x02);
+
+ switch (mipi->rotation) {
+ default:
+ addr_mode = 0x00;
+ break;
+ case 90:
+ addr_mode = ST7586_DISP_CTRL_MY;
+ break;
+ case 180:
+ addr_mode = ST7586_DISP_CTRL_MX | ST7586_DISP_CTRL_MY;
+ break;
+ case 270:
+ addr_mode = ST7586_DISP_CTRL_MX;
+ break;
+ }
+ mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+
+ mipi_dbi_command(mipi, ST7586_SET_DISP_DUTY, 0x7f);
+ mipi_dbi_command(mipi, ST7586_SET_PART_DISP, 0xa0);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_PARTIAL_AREA, 0x00, 0x00, 0x00, 0x77);
+ mipi_dbi_command(mipi, MIPI_DCS_EXIT_INVERT_MODE);
+
+ msleep(100);
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+
+ mipi->enabled = true;
+
+ if (fb)
+ fb->funcs->dirty(fb, NULL, 0, 0, NULL, 0);
+}
+
+static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!mipi->enabled)
+ return;
+
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+ mipi->enabled = false;
+}
+
+static const u32 st7586_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static int st7586_init(struct device *dev, struct mipi_dbi *mipi,
+ const struct drm_simple_display_pipe_funcs *pipe_funcs,
+ struct drm_driver *driver, const struct drm_display_mode *mode,
+ unsigned int rotation)
+{
+ size_t bufsize = (mode->vdisplay + 2) / 3 * mode->hdisplay;
+ struct tinydrm_device *tdev = &mipi->tinydrm;
+ int ret;
+
+ mutex_init(&mipi->cmdlock);
+
+ mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
+ if (!mipi->tx_buf)
+ return -ENOMEM;
+
+ ret = devm_tinydrm_init(dev, tdev, &st7586_fb_funcs, driver);
+ if (ret)
+ return ret;
+
+ ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL,
+ st7586_formats,
+ ARRAY_SIZE(st7586_formats),
+ mode, rotation);
+ if (ret)
+ return ret;
+
+ tdev->drm->mode_config.preferred_depth = 32;
+ mipi->rotation = rotation;
+
+ drm_mode_config_reset(tdev->drm);
+
+ DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
+ tdev->drm->mode_config.preferred_depth, rotation);
+
+ return 0;
+}
+
+static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
+ .enable = st7586_pipe_enable,
+ .disable = st7586_pipe_disable,
+ .update = tinydrm_display_pipe_update,
+ .prepare_fb = tinydrm_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode st7586_mode = {
+ TINYDRM_MODE(178, 128, 37, 27),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
+
+static struct drm_driver st7586_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+ .fops = &st7586_fops,
+ TINYDRM_GEM_DRIVER_OPS,
+ .lastclose = tinydrm_lastclose,
+ .debugfs_init = mipi_dbi_debugfs_init,
+ .name = "st7586",
+ .desc = "Sitronix ST7586",
+ .date = "20170801",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id st7586_of_match[] = {
+ { .compatible = "lego,ev3-lcd" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st7586_of_match);
+
+static const struct spi_device_id st7586_id[] = {
+ { "ev3-lcd", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, st7586_id);
+
+static int st7586_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct tinydrm_device *tdev;
+ struct mipi_dbi *mipi;
+ struct gpio_desc *a0;
+ u32 rotation = 0;
+ int ret;
+
+ mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ if (!mipi)
+ return -ENOMEM;
+
+ mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(mipi->reset)) {
+ dev_err(dev, "Failed to get gpio 'reset'\n");
+ return PTR_ERR(mipi->reset);
+ }
+
+ a0 = devm_gpiod_get(dev, "a0", GPIOD_OUT_LOW);
+ if (IS_ERR(a0)) {
+ dev_err(dev, "Failed to get gpio 'a0'\n");
+ return PTR_ERR(a0);
+ }
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, mipi, a0);
+ if (ret)
+ return ret;
+
+ /* Cannot read from this controller via SPI */
+ mipi->read_commands = NULL;
+
+ /*
+ * we are using 8-bit data, so we are not actually swapping anything,
+ * but setting mipi->swap_bytes makes mipi_dbi_typec3_command() do the
+ * right thing and not use 16-bit transfers (which results in swapped
+ * bytes on little-endian systems and causes out of order data to be
+ * sent to the display).
+ */
+ mipi->swap_bytes = true;
+
+ ret = st7586_init(&spi->dev, mipi, &st7586_pipe_funcs, &st7586_driver,
+ &st7586_mode, rotation);
+ if (ret)
+ return ret;
+
+ tdev = &mipi->tinydrm;
+
+ ret = devm_tinydrm_register(tdev);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, mipi);
+
+ DRM_DEBUG_DRIVER("Initialized %s:%s @%uMHz on minor %d\n",
+ tdev->drm->driver->name, dev_name(dev),
+ spi->max_speed_hz / 1000000,
+ tdev->drm->primary->index);
+
+ return 0;
+}
+
+static void st7586_shutdown(struct spi_device *spi)
+{
+ struct mipi_dbi *mipi = spi_get_drvdata(spi);
+
+ tinydrm_shutdown(&mipi->tinydrm);
+}
+
+static struct spi_driver st7586_spi_driver = {
+ .driver = {
+ .name = "st7586",
+ .owner = THIS_MODULE,
+ .of_match_table = st7586_of_match,
+ },
+ .id_table = st7586_id,
+ .probe = st7586_probe,
+ .shutdown = st7586_shutdown,
+};
+module_spi_driver(st7586_spi_driver);
+
+MODULE_DESCRIPTION("Sitronix ST7586 DRM driver");
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 22b57020790d..cba11f13d994 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -70,6 +70,7 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place,
static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct drm_printer p = drm_debug_printer(TTM_PFX);
pr_err(" has_type: %d\n", man->has_type);
pr_err(" use_type: %d\n", man->use_type);
@@ -79,7 +80,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
pr_err(" available_caching: 0x%08X\n", man->available_caching);
pr_err(" default_caching: 0x%08X\n", man->default_caching);
if (mem_type != TTM_PL_SYSTEM)
- (*man->func->debug)(man, TTM_PFX);
+ (*man->func->debug)(man, &p);
}
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
@@ -394,14 +395,33 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ww_mutex_unlock (&bo->resv->lock);
}
+static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
+{
+ int r;
+
+ if (bo->resv == &bo->ttm_resv)
+ return 0;
+
+ reservation_object_init(&bo->ttm_resv);
+ BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
+
+ r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
+ if (r) {
+ reservation_object_unlock(&bo->ttm_resv);
+ reservation_object_fini(&bo->ttm_resv);
+ }
+
+ return r;
+}
+
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
struct reservation_object_list *fobj;
struct dma_fence *fence;
int i;
- fobj = reservation_object_get_list(bo->resv);
- fence = reservation_object_get_excl(bo->resv);
+ fobj = reservation_object_get_list(&bo->ttm_resv);
+ fence = reservation_object_get_excl(&bo->ttm_resv);
if (fence && !fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
@@ -430,8 +450,19 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_cleanup_memtype_use(bo);
return;
- } else
- ttm_bo_flush_all_fences(bo);
+ }
+
+ ret = ttm_bo_individualize_resv(bo);
+ if (ret) {
+ /* Last resort, if we fail to allocate memory for the
+ * fences block for the BO to become idle and free it.
+ */
+ spin_unlock(&glob->lru_lock);
+ ttm_bo_wait(bo, true, true);
+ ttm_bo_cleanup_memtype_use(bo);
+ return;
+ }
+ ttm_bo_flush_all_fences(bo);
/*
* Make NO_EVICT bos immediately available to
@@ -443,6 +474,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_add_to_lru(bo);
}
+ if (bo->resv != &bo->ttm_resv)
+ reservation_object_unlock(&bo->ttm_resv);
__ttm_bo_unreserve(bo);
}
@@ -471,17 +504,25 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
bool no_wait_gpu)
{
struct ttm_bo_global *glob = bo->glob;
+ struct reservation_object *resv;
int ret;
- ret = ttm_bo_wait(bo, false, true);
+ if (unlikely(list_empty(&bo->ddestroy)))
+ resv = bo->resv;
+ else
+ resv = &bo->ttm_resv;
+
+ if (reservation_object_test_signaled_rcu(resv, true))
+ ret = 0;
+ else
+ ret = -EBUSY;
if (ret && !no_wait_gpu) {
long lret;
ww_mutex_unlock(&bo->resv->lock);
spin_unlock(&glob->lru_lock);
- lret = reservation_object_wait_timeout_rcu(bo->resv,
- true,
+ lret = reservation_object_wait_timeout_rcu(resv, true,
interruptible,
30 * HZ);
@@ -505,13 +546,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
spin_unlock(&glob->lru_lock);
return 0;
}
-
- /*
- * remove sync_obj with ttm_bo_wait, the wait should be
- * finished, and no new wait object should have been added.
- */
- ret = ttm_bo_wait(bo, false, true);
- WARN_ON(ret);
}
if (ret || unlikely(list_empty(&bo->ddestroy))) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 90a6c0b03afc..a7c232dc39cb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -136,13 +136,12 @@ static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
}
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
- const char *prefix)
+ struct drm_printer *printer)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
- struct drm_printer p = drm_debug_printer(prefix);
spin_lock(&rman->lock);
- drm_mm_print(&rman->mm, &p);
+ drm_mm_print(&rman->mm, printer);
spin_unlock(&rman->lock);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index b442d12f2f7d..c8ebb757e36b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -39,6 +39,7 @@
#include <linux/rbtree.h>
#include <linux/module.h>
#include <linux/uaccess.h>
+#include <linux/mem_encrypt.h>
#define TTM_BO_VM_NUM_PREFAULT 16
@@ -230,9 +231,11 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
* first page.
*/
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
- if (bo->mem.bus.is_iomem)
+ if (bo->mem.bus.is_iomem) {
+ /* Iomem should not be marked encrypted */
+ cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
pfn = bdev->driver->io_mem_pfn(bo, page_offset);
- else {
+ } else {
page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
retval = VM_FAULT_OOM;
@@ -294,10 +297,87 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma)
vma->vm_private_data = NULL;
}
+static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
+ unsigned long offset,
+ void *buf, int len, int write)
+{
+ unsigned long page = offset >> PAGE_SHIFT;
+ unsigned long bytes_left = len;
+ int ret;
+
+ /* Copy a page at a time, that way no extra virtual address
+ * mapping is needed
+ */
+ offset -= page << PAGE_SHIFT;
+ do {
+ unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
+ struct ttm_bo_kmap_obj map;
+ void *ptr;
+ bool is_iomem;
+
+ ret = ttm_bo_kmap(bo, page, 1, &map);
+ if (ret)
+ return ret;
+
+ ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
+ WARN_ON_ONCE(is_iomem);
+ if (write)
+ memcpy(ptr, buf, bytes);
+ else
+ memcpy(buf, ptr, bytes);
+ ttm_bo_kunmap(&map);
+
+ page++;
+ bytes_left -= bytes;
+ offset = 0;
+ } while (bytes_left);
+
+ return len;
+}
+
+static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ unsigned long offset = (addr) - vma->vm_start;
+ struct ttm_buffer_object *bo = vma->vm_private_data;
+ int ret;
+
+ if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
+ return -EIO;
+
+ ret = ttm_bo_reserve(bo, true, false, NULL);
+ if (ret)
+ return ret;
+
+ switch (bo->mem.mem_type) {
+ case TTM_PL_SYSTEM:
+ if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(bo->ttm);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ /* fall through */
+ case TTM_PL_TT:
+ ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
+ break;
+ default:
+ if (bo->bdev->driver->access_memory)
+ ret = bo->bdev->driver->access_memory(
+ bo, offset, buf, len, write);
+ else
+ ret = -EIO;
+ }
+
+ ttm_bo_unreserve(bo);
+
+ return ret;
+}
+
static const struct vm_operations_struct ttm_bo_vm_ops = {
.fault = ttm_bo_vm_fault,
.open = ttm_bo_vm_open,
- .close = ttm_bo_vm_close
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access
};
static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index eeddc1e48409..871599826773 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -615,7 +615,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
} else {
pr_err("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
- list_for_each_entry(p, &pool->list, lru) {
+ list_for_each_entry(p, &new_pages, lru) {
++cpages;
}
list_splice(&new_pages, &pool->list);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index d2f57c52f7db..9f9a49748d17 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -96,7 +96,7 @@ static int udl_mode_valid(struct drm_connector *connector,
static enum drm_connector_status
udl_detect(struct drm_connector *connector, bool force)
{
- if (drm_device_is_unplugged(connector->dev))
+ if (drm_dev_is_unplugged(connector->dev))
return connector_status_disconnected;
return connector_status_connected;
}
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
index 2e031a894813..2867ed155ff6 100644
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -186,7 +186,7 @@ static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
return -EINVAL;
}
-static struct dma_buf_ops udl_dmabuf_ops = {
+static const struct dma_buf_ops udl_dmabuf_ops = {
.attach = udl_attach_dma_buf,
.detach = udl_detach_dma_buf,
.map_dma_buf = udl_map_dma_buf,
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index cd8b01727734..31421b6b586e 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -11,11 +11,6 @@
#include <drm/drm_crtc_helper.h>
#include "udl_drv.h"
-static int udl_driver_set_busid(struct drm_device *d, struct drm_master *m)
-{
- return 0;
-}
-
static int udl_usb_suspend(struct usb_interface *interface,
pm_message_t message)
{
@@ -52,7 +47,6 @@ static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = udl_driver_load,
.unload = udl_driver_unload,
- .set_busid = udl_driver_set_busid,
/* gem hooks */
.gem_free_object = udl_gem_free_object,
@@ -60,7 +54,6 @@ static struct drm_driver driver = {
.dumb_create = udl_dumb_create,
.dumb_map_offset = udl_gem_mmap,
- .dumb_destroy = drm_gem_dumb_destroy,
.fops = &udl_driver_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -108,7 +101,7 @@ static void udl_usb_disconnect(struct usb_interface *interface)
drm_kms_helper_poll_disable(dev);
udl_fbdev_unplug(dev);
udl_drop_usb(dev);
- drm_unplug_dev(dev);
+ drm_dev_unplug(dev);
}
/*
@@ -118,7 +111,7 @@ static void udl_usb_disconnect(struct usb_interface *interface)
* which is compatible with all known USB 2.0 era graphics chips and firmware,
* but allows DisplayLink to increment those for any future incompatible chips
*/
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{.idVendor = 0x17e9, .bInterfaceClass = 0xff,
.bInterfaceSubClass = 0x00,
.bInterfaceProtocol = 0x00,
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 4a6500362564..b5b335c9b2bb 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/dma-buf.h>
+#include <linux/mem_encrypt.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
@@ -169,6 +170,9 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
pos, size);
+ /* We don't want the framebuffer to be mapped encrypted */
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
while (size > 0) {
page = vmalloc_to_pfn((void *)pos);
if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
@@ -198,7 +202,7 @@ static int udl_fb_open(struct fb_info *info, int user)
struct udl_device *udl = dev->dev_private;
/* If the USB device is gone, we don't accept new opens */
- if (drm_device_is_unplugged(udl->ddev))
+ if (drm_dev_is_unplugged(udl->ddev))
return -ENODEV;
ufbdev->fb_count++;
@@ -309,7 +313,7 @@ static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct udl_framebuffer *ufb = to_udl_fb(fb);
if (ufb->obj)
- drm_gem_object_unreference_unlocked(&ufb->obj->base);
+ drm_gem_object_put_unlocked(&ufb->obj->base);
drm_framebuffer_cleanup(fb);
kfree(ufb);
@@ -393,7 +397,6 @@ static int udlfb_create(struct drm_fb_helper *helper,
info->fix.smem_len = size;
info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
- info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &udlfb_ops;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
@@ -404,7 +407,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
return ret;
out_gfree:
- drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
+ drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
out:
return ret;
}
@@ -420,7 +423,7 @@ static void udl_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_fini(&ufbdev->helper);
drm_framebuffer_unregister_private(&ufbdev->ufb.base);
drm_framebuffer_cleanup(&ufbdev->ufb.base);
- drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
+ drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
}
int udl_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index db9ceceba30e..dee6bd9a3dd1 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
return ret;
}
- drm_gem_object_unreference_unlocked(&obj->base);
+ drm_gem_object_put_unlocked(&obj->base);
*handle_p = handle;
return 0;
}
@@ -234,7 +234,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
*offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
out:
- drm_gem_object_unreference(&gobj->base);
+ drm_gem_object_put(&gobj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index a9d93b871a15..0328b2c7b210 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -371,8 +371,6 @@ void udl_driver_unload(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
- drm_vblank_cleanup(dev);
-
if (udl->urbs.count)
udl_free_urb_list(dev);
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 4361bdcfd28a..fdae18aeab4f 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -19,3 +19,11 @@ config DRM_VC4
This driver requires that "avoid_warnings=2" be present in
the config.txt for the firmware, to keep it from smashing
our display setup.
+
+config DRM_VC4_HDMI_CEC
+ bool "Broadcom VC4 HDMI CEC Support"
+ depends on DRM_VC4
+ select CEC_CORE
+ help
+ Choose this option if you have a Broadcom VC4 GPU
+ and want to use CEC.
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 487f96412d35..3afdbf4bc10b 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -24,21 +24,35 @@
#include "vc4_drv.h"
#include "uapi/drm/vc4_drm.h"
+static const char * const bo_type_names[] = {
+ "kernel",
+ "V3D",
+ "V3D shader",
+ "dumb",
+ "binner",
+ "RCL",
+ "BCL",
+ "kernel BO cache",
+};
+
+static bool is_user_label(int label)
+{
+ return label >= VC4_BO_TYPE_COUNT;
+}
+
static void vc4_bo_stats_dump(struct vc4_dev *vc4)
{
- DRM_INFO("num bos allocated: %d\n",
- vc4->bo_stats.num_allocated);
- DRM_INFO("size bos allocated: %dkb\n",
- vc4->bo_stats.size_allocated / 1024);
- DRM_INFO("num bos used: %d\n",
- vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached);
- DRM_INFO("size bos used: %dkb\n",
- (vc4->bo_stats.size_allocated -
- vc4->bo_stats.size_cached) / 1024);
- DRM_INFO("num bos cached: %d\n",
- vc4->bo_stats.num_cached);
- DRM_INFO("size bos cached: %dkb\n",
- vc4->bo_stats.size_cached / 1024);
+ int i;
+
+ for (i = 0; i < vc4->num_labels; i++) {
+ if (!vc4->bo_labels[i].num_allocated)
+ continue;
+
+ DRM_INFO("%30s: %6dkb BOs (%d)\n",
+ vc4->bo_labels[i].name,
+ vc4->bo_labels[i].size_allocated / 1024,
+ vc4->bo_labels[i].num_allocated);
+ }
}
#ifdef CONFIG_DEBUG_FS
@@ -47,64 +61,133 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_bo_stats stats;
+ int i;
- /* Take a snapshot of the current stats with the lock held. */
mutex_lock(&vc4->bo_lock);
- stats = vc4->bo_stats;
+ for (i = 0; i < vc4->num_labels; i++) {
+ if (!vc4->bo_labels[i].num_allocated)
+ continue;
+
+ seq_printf(m, "%30s: %6dkb BOs (%d)\n",
+ vc4->bo_labels[i].name,
+ vc4->bo_labels[i].size_allocated / 1024,
+ vc4->bo_labels[i].num_allocated);
+ }
mutex_unlock(&vc4->bo_lock);
- seq_printf(m, "num bos allocated: %d\n",
- stats.num_allocated);
- seq_printf(m, "size bos allocated: %dkb\n",
- stats.size_allocated / 1024);
- seq_printf(m, "num bos used: %d\n",
- stats.num_allocated - stats.num_cached);
- seq_printf(m, "size bos used: %dkb\n",
- (stats.size_allocated - stats.size_cached) / 1024);
- seq_printf(m, "num bos cached: %d\n",
- stats.num_cached);
- seq_printf(m, "size bos cached: %dkb\n",
- stats.size_cached / 1024);
-
return 0;
}
#endif
+/* Takes ownership of *name and returns the appropriate slot for it in
+ * the bo_labels[] array, extending it as necessary.
+ *
+ * This is inefficient and could use a hash table instead of walking
+ * an array and strcmp()ing. However, the assumption is that user
+ * labeling will be infrequent (scanout buffers and other long-lived
+ * objects, or debug driver builds), so we can live with it for now.
+ */
+static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
+{
+ int i;
+ int free_slot = -1;
+
+ for (i = 0; i < vc4->num_labels; i++) {
+ if (!vc4->bo_labels[i].name) {
+ free_slot = i;
+ } else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
+ kfree(name);
+ return i;
+ }
+ }
+
+ if (free_slot != -1) {
+ WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
+ vc4->bo_labels[free_slot].name = name;
+ return free_slot;
+ } else {
+ u32 new_label_count = vc4->num_labels + 1;
+ struct vc4_label *new_labels =
+ krealloc(vc4->bo_labels,
+ new_label_count * sizeof(*new_labels),
+ GFP_KERNEL);
+
+ if (!new_labels) {
+ kfree(name);
+ return -1;
+ }
+
+ free_slot = vc4->num_labels;
+ vc4->bo_labels = new_labels;
+ vc4->num_labels = new_label_count;
+
+ vc4->bo_labels[free_slot].name = name;
+ vc4->bo_labels[free_slot].num_allocated = 0;
+ vc4->bo_labels[free_slot].size_allocated = 0;
+
+ return free_slot;
+ }
+}
+
+static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
+{
+ struct vc4_bo *bo = to_vc4_bo(gem_obj);
+ struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
+
+ lockdep_assert_held(&vc4->bo_lock);
+
+ if (label != -1) {
+ vc4->bo_labels[label].num_allocated++;
+ vc4->bo_labels[label].size_allocated += gem_obj->size;
+ }
+
+ vc4->bo_labels[bo->label].num_allocated--;
+ vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
+
+ if (vc4->bo_labels[bo->label].num_allocated == 0 &&
+ is_user_label(bo->label)) {
+ /* Free user BO label slots on last unreference.
+ * Slots are just where we track the stats for a given
+ * name, and once a name is unused we can reuse that
+ * slot.
+ */
+ kfree(vc4->bo_labels[bo->label].name);
+ vc4->bo_labels[bo->label].name = NULL;
+ }
+
+ bo->label = label;
+}
+
static uint32_t bo_page_index(size_t size)
{
return (size / PAGE_SIZE) - 1;
}
-/* Must be called with bo_lock held. */
static void vc4_bo_destroy(struct vc4_bo *bo)
{
struct drm_gem_object *obj = &bo->base.base;
struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
+ lockdep_assert_held(&vc4->bo_lock);
+
+ vc4_bo_set_label(obj, -1);
+
if (bo->validated_shader) {
kfree(bo->validated_shader->texture_samples);
kfree(bo->validated_shader);
bo->validated_shader = NULL;
}
- vc4->bo_stats.num_allocated--;
- vc4->bo_stats.size_allocated -= obj->size;
-
reservation_object_fini(&bo->_resv);
drm_gem_cma_free_object(obj);
}
-/* Must be called with bo_lock held. */
static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
{
- struct drm_gem_object *obj = &bo->base.base;
- struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
-
- vc4->bo_stats.num_cached--;
- vc4->bo_stats.size_cached -= obj->size;
+ struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
+ lockdep_assert_held(&vc4->bo_lock);
list_del(&bo->unref_head);
list_del(&bo->size_head);
}
@@ -165,7 +248,8 @@ static void vc4_bo_cache_purge(struct drm_device *dev)
}
static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
- uint32_t size)
+ uint32_t size,
+ enum vc4_kernel_bo_type type)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t page_index = bo_page_index(size);
@@ -186,6 +270,8 @@ static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
kref_init(&bo->base.base.refcount);
out:
+ if (bo)
+ vc4_bo_set_label(&bo->base.base, type);
mutex_unlock(&vc4->bo_lock);
return bo;
}
@@ -208,8 +294,9 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
return ERR_PTR(-ENOMEM);
mutex_lock(&vc4->bo_lock);
- vc4->bo_stats.num_allocated++;
- vc4->bo_stats.size_allocated += size;
+ bo->label = VC4_BO_TYPE_KERNEL;
+ vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
+ vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
mutex_unlock(&vc4->bo_lock);
bo->resv = &bo->_resv;
reservation_object_init(bo->resv);
@@ -218,7 +305,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
}
struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
- bool allow_unzeroed)
+ bool allow_unzeroed, enum vc4_kernel_bo_type type)
{
size_t size = roundup(unaligned_size, PAGE_SIZE);
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -229,7 +316,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
return ERR_PTR(-EINVAL);
/* First, try to get a vc4_bo from the kernel BO cache. */
- bo = vc4_bo_get_from_cache(dev, size);
+ bo = vc4_bo_get_from_cache(dev, size, type);
if (bo) {
if (!allow_unzeroed)
memset(bo->base.vaddr, 0, bo->base.base.size);
@@ -251,7 +338,13 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
return ERR_PTR(-ENOMEM);
}
}
- return to_vc4_bo(&cma_obj->base);
+ bo = to_vc4_bo(&cma_obj->base);
+
+ mutex_lock(&vc4->bo_lock);
+ vc4_bo_set_label(&cma_obj->base, type);
+ mutex_unlock(&vc4->bo_lock);
+
+ return bo;
}
int vc4_dumb_create(struct drm_file *file_priv,
@@ -268,22 +361,23 @@ int vc4_dumb_create(struct drm_file *file_priv,
if (args->size < args->pitch * args->height)
args->size = args->pitch * args->height;
- bo = vc4_bo_create(dev, args->size, false);
+ bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
if (IS_ERR(bo))
return PTR_ERR(bo);
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
- drm_gem_object_unreference_unlocked(&bo->base.base);
+ drm_gem_object_put_unlocked(&bo->base.base);
return ret;
}
-/* Must be called with bo_lock held. */
static void vc4_bo_cache_free_old(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned long expire_time = jiffies - msecs_to_jiffies(1000);
+ lockdep_assert_held(&vc4->bo_lock);
+
while (!list_empty(&vc4->bo_cache.time_list)) {
struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list,
struct vc4_bo, unref_head);
@@ -348,8 +442,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
list_add(&bo->size_head, cache_list);
list_add(&bo->unref_head, &vc4->bo_cache.time_list);
- vc4->bo_stats.num_cached++;
- vc4->bo_stats.size_cached += gem_bo->size;
+ vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
vc4_bo_cache_free_old(dev);
@@ -389,7 +482,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
struct vc4_bo *bo = to_vc4_bo(obj);
if (bo->validated_shader) {
- DRM_ERROR("Attempting to export shader BO\n");
+ DRM_DEBUG("Attempting to export shader BO\n");
return ERR_PTR(-EINVAL);
}
@@ -410,7 +503,7 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
bo = to_vc4_bo(gem_obj);
if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
- DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
+ DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
return -EINVAL;
}
@@ -435,7 +528,7 @@ int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct vc4_bo *bo = to_vc4_bo(obj);
if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
- DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
+ DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
return -EINVAL;
}
@@ -447,7 +540,7 @@ void *vc4_prime_vmap(struct drm_gem_object *obj)
struct vc4_bo *bo = to_vc4_bo(obj);
if (bo->validated_shader) {
- DRM_ERROR("mmaping of shader BOs not allowed.\n");
+ DRM_DEBUG("mmaping of shader BOs not allowed.\n");
return ERR_PTR(-EINVAL);
}
@@ -483,12 +576,12 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
* We can't allocate from the BO cache, because the BOs don't
* get zeroed, and that might leak data between users.
*/
- bo = vc4_bo_create(dev, args->size, false);
+ bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
if (IS_ERR(bo))
return PTR_ERR(bo);
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
- drm_gem_object_unreference_unlocked(&bo->base.base);
+ drm_gem_object_put_unlocked(&bo->base.base);
return ret;
}
@@ -501,14 +594,14 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
- DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -EINVAL;
}
/* The mmap offset was set up at BO allocation time. */
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
- drm_gem_object_unreference_unlocked(gem_obj);
+ drm_gem_object_put_unlocked(gem_obj);
return 0;
}
@@ -536,7 +629,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- bo = vc4_bo_create(dev, args->size, true);
+ bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -564,7 +657,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
fail:
- drm_gem_object_unreference_unlocked(&bo->base.base);
+ drm_gem_object_put_unlocked(&bo->base.base);
return ret;
}
@@ -605,13 +698,13 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
- DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -ENOENT;
}
bo = to_vc4_bo(gem_obj);
bo->t_format = t_format;
- drm_gem_object_unreference_unlocked(gem_obj);
+ drm_gem_object_put_unlocked(gem_obj);
return 0;
}
@@ -636,7 +729,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
- DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -ENOENT;
}
bo = to_vc4_bo(gem_obj);
@@ -646,14 +739,29 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
else
args->modifier = DRM_FORMAT_MOD_NONE;
- drm_gem_object_unreference_unlocked(gem_obj);
+ drm_gem_object_put_unlocked(gem_obj);
return 0;
}
-void vc4_bo_cache_init(struct drm_device *dev)
+int vc4_bo_cache_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int i;
+
+ /* Create the initial set of BO labels that the kernel will
+ * use. This lets us avoid a bunch of string reallocation in
+ * the kernel's draw and BO allocation paths.
+ */
+ vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
+ GFP_KERNEL);
+ if (!vc4->bo_labels)
+ return -ENOMEM;
+ vc4->num_labels = VC4_BO_TYPE_COUNT;
+
+ BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
+ for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
+ vc4->bo_labels[i].name = bo_type_names[i];
mutex_init(&vc4->bo_lock);
@@ -663,19 +771,66 @@ void vc4_bo_cache_init(struct drm_device *dev)
setup_timer(&vc4->bo_cache.time_timer,
vc4_bo_cache_time_timer,
(unsigned long)dev);
+
+ return 0;
}
void vc4_bo_cache_destroy(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int i;
del_timer(&vc4->bo_cache.time_timer);
cancel_work_sync(&vc4->bo_cache.time_work);
vc4_bo_cache_purge(dev);
- if (vc4->bo_stats.num_allocated) {
- DRM_ERROR("Destroying BO cache while BOs still allocated:\n");
- vc4_bo_stats_dump(vc4);
+ for (i = 0; i < vc4->num_labels; i++) {
+ if (vc4->bo_labels[i].num_allocated) {
+ DRM_ERROR("Destroying BO cache with %d %s "
+ "BOs still allocated\n",
+ vc4->bo_labels[i].num_allocated,
+ vc4->bo_labels[i].name);
+ }
+
+ if (is_user_label(i))
+ kfree(vc4->bo_labels[i].name);
}
+ kfree(vc4->bo_labels);
+}
+
+int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_vc4_label_bo *args = data;
+ char *name;
+ struct drm_gem_object *gem_obj;
+ int ret = 0, label;
+
+ if (!args->len)
+ return -EINVAL;
+
+ name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ kfree(name);
+ return -ENOENT;
+ }
+
+ mutex_lock(&vc4->bo_lock);
+ label = vc4_get_user_label(vc4, name);
+ if (label != -1)
+ vc4_bo_set_label(gem_obj, label);
+ else
+ ret = -ENOMEM;
+ mutex_unlock(&vc4->bo_lock);
+
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 403bbd5f99a9..ce1e3b9e14c9 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -479,7 +479,8 @@ static void require_hvs_enabled(struct drm_device *dev)
SCALER_DISPCTRL_ENABLE);
}
-static void vc4_crtc_disable(struct drm_crtc *crtc)
+static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -518,9 +519,51 @@ static void vc4_crtc_disable(struct drm_crtc *crtc)
WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) &
(SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) !=
SCALER_DISPSTATX_EMPTY);
+
+ /*
+ * Make sure we issue a vblank event after disabling the CRTC if
+ * someone was waiting it.
+ */
+ if (crtc->state->event) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+}
+
+static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+
+ if (crtc->state->event) {
+ unsigned long flags;
+
+ crtc->state->event->pipe = drm_crtc_index(crtc);
+
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ vc4_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ } else {
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+ }
}
-static void vc4_crtc_enable(struct drm_crtc *crtc)
+static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -530,6 +573,12 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
require_hvs_enabled(dev);
+ /* Enable vblank irq handling before crtc is started otherwise
+ * drm_crtc_get_vblank() fails in vc4_crtc_update_dlist().
+ */
+ drm_crtc_vblank_on(crtc);
+ vc4_crtc_update_dlist(crtc);
+
/* Turn on the scaler, which will wait for vstart to start
* compositing.
*/
@@ -541,23 +590,19 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
/* Turn on the pixel valve, which will emit the vstart signal. */
CRTC_WRITE(PV_V_CONTROL,
CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
-
- /* Enable vblank irq handling after crtc is started. */
- drm_crtc_vblank_on(crtc);
}
-static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
/* Do not allow doublescan modes from user space */
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
DRM_DEBUG_KMS("[CRTC:%d] Doublescan mode rejected.\n",
crtc->base.id);
- return false;
+ return MODE_NO_DBLESCAN;
}
- return true;
+ return MODE_OK;
}
static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
@@ -598,7 +643,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
struct drm_plane *plane;
bool debug_dump_regs = false;
@@ -620,25 +664,15 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
- if (crtc->state->event) {
- unsigned long flags;
-
- crtc->state->event->pipe = drm_crtc_index(crtc);
-
- WARN_ON(drm_crtc_vblank_get(crtc) != 0);
-
- spin_lock_irqsave(&dev->event_lock, flags);
- vc4_crtc->event = crtc->state->event;
- crtc->state->event = NULL;
-
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
- vc4_state->mm.start);
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
- } else {
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
- vc4_state->mm.start);
- }
+ /* Only update DISPLIST if the CRTC was already running and is not
+ * being disabled.
+ * vc4_crtc_enable() takes care of updating the dlist just after
+ * re-enabling VBLANK interrupts and before enabling the engine.
+ * If the CRTC is being disabled, there's no point in updating this
+ * information.
+ */
+ if (crtc->state->active && old_state->active)
+ vc4_crtc_update_dlist(crtc);
if (debug_dump_regs) {
DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
@@ -662,14 +696,6 @@ static void vc4_disable_vblank(struct drm_crtc *crtc)
CRTC_WRITE(PV_INTEN, 0);
}
-/* Must be called with the event lock held */
-bool vc4_event_pending(struct drm_crtc *crtc)
-{
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
-
- return !!vc4_crtc->event;
-}
-
static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
{
struct drm_crtc *crtc = &vc4_crtc->base;
@@ -737,7 +763,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
}
drm_crtc_vblank_put(crtc);
- drm_framebuffer_unreference(flip_state->fb);
+ drm_framebuffer_put(flip_state->fb);
kfree(flip_state);
up(&vc4->async_modeset);
@@ -766,7 +792,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
if (!flip_state)
return -ENOMEM;
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
flip_state->fb = fb;
flip_state->crtc = crtc;
flip_state->event = event;
@@ -774,7 +800,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
/* Make sure all other async modesetes have landed. */
ret = down_interruptible(&vc4->async_modeset);
if (ret) {
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
kfree(flip_state);
return ret;
}
@@ -865,11 +891,11 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
.mode_set_nofb = vc4_crtc_mode_set_nofb,
- .disable = vc4_crtc_disable,
- .enable = vc4_crtc_enable,
- .mode_fixup = vc4_crtc_mode_fixup,
+ .mode_valid = vc4_crtc_mode_valid,
.atomic_check = vc4_crtc_atomic_check,
.atomic_flush = vc4_crtc_atomic_flush,
+ .atomic_enable = vc4_crtc_atomic_enable,
+ .atomic_disable = vc4_crtc_atomic_disable,
};
static const struct vc4_crtc_data pv0_data = {
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 2e0fe46aeb2e..519cefef800d 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -224,20 +224,19 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
DRM_ERROR("Failed to set clock rate: %d\n", ret);
}
-static bool vc4_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static enum drm_mode_status vc4_dpi_encoder_mode_valid(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
{
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
- return false;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
- return true;
+ return MODE_OK;
}
static const struct drm_encoder_helper_funcs vc4_dpi_encoder_helper_funcs = {
.disable = vc4_dpi_encoder_disable,
.enable = vc4_dpi_encoder_enable,
- .mode_fixup = vc4_dpi_encoder_mode_fixup,
+ .mode_valid = vc4_dpi_encoder_mode_valid,
};
static const struct of_device_id vc4_dpi_dt_match[] = {
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index c6b487c3d2b7..1c96edcb302b 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -99,6 +99,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
case DRM_VC4_PARAM_SUPPORTS_ETC1:
case DRM_VC4_PARAM_SUPPORTS_THREADED_FS:
+ case DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER:
args->value = true;
break;
default:
@@ -140,6 +141,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(VC4_GET_PARAM, vc4_get_param_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_SET_TILING, vc4_set_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_GET_TILING, vc4_get_tiling_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_LABEL_BO, vc4_label_bo_ioctl, DRM_RENDER_ALLOW),
};
static struct drm_driver vc4_drm_driver = {
@@ -178,8 +180,6 @@ static struct drm_driver vc4_drm_driver = {
.gem_prime_mmap = vc4_prime_mmap,
.dumb_create = vc4_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = vc4_drm_ioctls,
.num_ioctls = ARRAY_SIZE(vc4_drm_ioctls),
@@ -257,7 +257,9 @@ static int vc4_drm_bind(struct device *dev)
vc4->dev = drm;
drm->dev_private = vc4;
- vc4_bo_cache_init(drm);
+ ret = vc4_bo_cache_init(drm);
+ if (ret)
+ goto dev_unref;
drm_mode_config_init(drm);
@@ -281,8 +283,9 @@ unbind_all:
component_unbind_all(dev, drm);
gem_destroy:
vc4_gem_destroy(drm);
- drm_dev_unref(drm);
vc4_bo_cache_destroy(drm);
+dev_unref:
+ drm_dev_unref(drm);
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index df22698d62ee..87f2d8e5c134 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -11,6 +11,24 @@
#include <drm/drm_encoder.h>
#include <drm/drm_gem_cma_helper.h>
+/* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
+ * this.
+ */
+enum vc4_kernel_bo_type {
+ /* Any kernel allocation (gem_create_object hook) before it
+ * gets another type set.
+ */
+ VC4_BO_TYPE_KERNEL,
+ VC4_BO_TYPE_V3D,
+ VC4_BO_TYPE_V3D_SHADER,
+ VC4_BO_TYPE_DUMB,
+ VC4_BO_TYPE_BIN,
+ VC4_BO_TYPE_RCL,
+ VC4_BO_TYPE_BCL,
+ VC4_BO_TYPE_KERNEL_CACHE,
+ VC4_BO_TYPE_COUNT
+};
+
struct vc4_dev {
struct drm_device *dev;
@@ -46,14 +64,14 @@ struct vc4_dev {
struct timer_list time_timer;
} bo_cache;
- struct vc4_bo_stats {
+ u32 num_labels;
+ struct vc4_label {
+ const char *name;
u32 num_allocated;
u32 size_allocated;
- u32 num_cached;
- u32 size_cached;
- } bo_stats;
+ } *bo_labels;
- /* Protects bo_cache and the BO stats. */
+ /* Protects bo_cache and bo_labels. */
struct mutex bo_lock;
uint64_t dma_fence_context;
@@ -169,6 +187,11 @@ struct vc4_bo {
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
struct reservation_object _resv;
+
+ /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
+ * for user-allocated labels.
+ */
+ int label;
};
static inline struct vc4_bo *
@@ -460,7 +483,7 @@ struct vc4_validated_shader_info {
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
void vc4_free_object(struct drm_gem_object *gem_obj);
struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
- bool from_cache);
+ bool from_cache, enum vc4_kernel_bo_type type);
int vc4_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -478,6 +501,8 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
@@ -485,13 +510,12 @@ struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
void *vc4_prime_vmap(struct drm_gem_object *obj);
-void vc4_bo_cache_init(struct drm_device *dev);
+int vc4_bo_cache_init(struct drm_device *dev);
void vc4_bo_cache_destroy(struct drm_device *dev);
int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
-bool vc4_event_pending(struct drm_crtc *crtc);
int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
bool in_vblank_irq, int *vpos, int *hpos,
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 5e8b81eaa168..d1e0dc908048 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -736,18 +736,18 @@ static void vc4_dsi_latch_ulps(struct vc4_dsi *dsi, bool latch)
/* Enters or exits Ultra Low Power State. */
static void vc4_dsi_ulps(struct vc4_dsi *dsi, bool ulps)
{
- bool continuous = dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS;
- u32 phyc_ulps = ((continuous ? DSI_PORT_BIT(PHYC_CLANE_ULPS) : 0) |
+ bool non_continuous = dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ u32 phyc_ulps = ((non_continuous ? DSI_PORT_BIT(PHYC_CLANE_ULPS) : 0) |
DSI_PHYC_DLANE0_ULPS |
(dsi->lanes > 1 ? DSI_PHYC_DLANE1_ULPS : 0) |
(dsi->lanes > 2 ? DSI_PHYC_DLANE2_ULPS : 0) |
(dsi->lanes > 3 ? DSI_PHYC_DLANE3_ULPS : 0));
- u32 stat_ulps = ((continuous ? DSI1_STAT_PHY_CLOCK_ULPS : 0) |
+ u32 stat_ulps = ((non_continuous ? DSI1_STAT_PHY_CLOCK_ULPS : 0) |
DSI1_STAT_PHY_D0_ULPS |
(dsi->lanes > 1 ? DSI1_STAT_PHY_D1_ULPS : 0) |
(dsi->lanes > 2 ? DSI1_STAT_PHY_D2_ULPS : 0) |
(dsi->lanes > 3 ? DSI1_STAT_PHY_D3_ULPS : 0));
- u32 stat_stop = ((continuous ? DSI1_STAT_PHY_CLOCK_STOP : 0) |
+ u32 stat_stop = ((non_continuous ? DSI1_STAT_PHY_CLOCK_STOP : 0) |
DSI1_STAT_PHY_D0_STOP |
(dsi->lanes > 1 ? DSI1_STAT_PHY_D1_STOP : 0) |
(dsi->lanes > 2 ? DSI1_STAT_PHY_D2_STOP : 0) |
@@ -1035,7 +1035,17 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
DSI_HS_DLT4_TRAIL) |
VC4_SET_FIELD(0, DSI_HS_DLT4_ANLAT));
- DSI_PORT_WRITE(HS_DLT5, VC4_SET_FIELD(dsi_hs_timing(ui_ns, 1000, 5000),
+ /* T_INIT is how long STOP is driven after power-up to
+ * indicate to the slave (also coming out of power-up) that
+ * master init is complete, and should be greater than the
+ * maximum of two value: T_INIT,MASTER and T_INIT,SLAVE. The
+ * D-PHY spec gives a minimum 100us for T_INIT,MASTER and
+ * T_INIT,SLAVE, while allowing protocols on top of it to give
+ * greater minimums. The vc4 firmware uses an extremely
+ * conservative 5ms, and we maintain that here.
+ */
+ DSI_PORT_WRITE(HS_DLT5, VC4_SET_FIELD(dsi_hs_timing(ui_ns,
+ 5 * 1000 * 1000, 0),
DSI_HS_DLT5_INIT));
DSI_PORT_WRITE(HS_DLT6,
@@ -1626,14 +1636,10 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
pm_runtime_disable(dev);
- drm_bridge_remove(dsi->bridge);
vc4_dsi_encoder_destroy(dsi->encoder);
mipi_dsi_host_unregister(&dsi->dsi_host);
- clk_disable_unprepare(dsi->pll_phy_clock);
- clk_disable_unprepare(dsi->escape_clock);
-
if (dsi->port == 1)
vc4->dsi1 = NULL;
}
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index d5b821ad06af..d0c6bfb68c4e 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -55,7 +55,7 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
unsigned int i;
for (i = 0; i < state->user_state.bo_count; i++)
- drm_gem_object_unreference_unlocked(state->bo[i]);
+ drm_gem_object_put_unlocked(state->bo[i]);
kfree(state);
}
@@ -119,7 +119,7 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
bo_state[i].size = vc4_bo->base.base.size;
}
- if (copy_to_user((void __user *)(uintptr_t)get_state->bo,
+ if (copy_to_user(u64_to_user_ptr(get_state->bo),
bo_state,
state->bo_count * sizeof(*bo_state)))
ret = -EFAULT;
@@ -188,12 +188,12 @@ vc4_save_hang_state(struct drm_device *dev)
continue;
for (j = 0; j < exec[i]->bo_count; j++) {
- drm_gem_object_reference(&exec[i]->bo[j]->base);
+ drm_gem_object_get(&exec[i]->bo[j]->base);
kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
}
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
- drm_gem_object_reference(&bo->base.base);
+ drm_gem_object_get(&bo->base.base);
kernel_state->bo[j + prev_idx] = &bo->base.base;
j++;
}
@@ -659,7 +659,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
/* See comment on bo_index for why we have to check
* this.
*/
- DRM_ERROR("Rendering requires BOs to validate\n");
+ DRM_DEBUG("Rendering requires BOs to validate\n");
return -EINVAL;
}
@@ -678,8 +678,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
goto fail;
}
- if (copy_from_user(handles,
- (void __user *)(uintptr_t)args->bo_handles,
+ if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
exec->bo_count * sizeof(uint32_t))) {
ret = -EFAULT;
DRM_ERROR("Failed to copy in GEM handles\n");
@@ -691,13 +690,13 @@ vc4_cl_lookup_bos(struct drm_device *dev,
struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
handles[i]);
if (!bo) {
- DRM_ERROR("Failed to look up GEM BO %d: %d\n",
+ DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
i, handles[i]);
ret = -EINVAL;
spin_unlock(&file_priv->table_lock);
goto fail;
}
- drm_gem_object_reference(bo);
+ drm_gem_object_get(bo);
exec->bo[i] = (struct drm_gem_cma_object *)bo;
}
spin_unlock(&file_priv->table_lock);
@@ -729,7 +728,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
args->shader_rec_count >= (UINT_MAX /
sizeof(struct vc4_shader_state)) ||
temp_size < exec_size) {
- DRM_ERROR("overflow in exec arguments\n");
+ DRM_DEBUG("overflow in exec arguments\n");
ret = -EINVAL;
goto fail;
}
@@ -755,27 +754,27 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
exec->shader_state_size = args->shader_rec_count;
if (copy_from_user(bin,
- (void __user *)(uintptr_t)args->bin_cl,
+ u64_to_user_ptr(args->bin_cl),
args->bin_cl_size)) {
ret = -EFAULT;
goto fail;
}
if (copy_from_user(exec->shader_rec_u,
- (void __user *)(uintptr_t)args->shader_rec,
+ u64_to_user_ptr(args->shader_rec),
args->shader_rec_size)) {
ret = -EFAULT;
goto fail;
}
if (copy_from_user(exec->uniforms_u,
- (void __user *)(uintptr_t)args->uniforms,
+ u64_to_user_ptr(args->uniforms),
args->uniforms_size)) {
ret = -EFAULT;
goto fail;
}
- bo = vc4_bo_create(dev, exec_size, true);
+ bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
if (IS_ERR(bo)) {
DRM_ERROR("Couldn't allocate BO for binning\n");
ret = PTR_ERR(bo);
@@ -835,7 +834,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++)
- drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
+ drm_gem_object_put_unlocked(&exec->bo[i]->base);
kvfree(exec->bo);
}
@@ -843,7 +842,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_bo *bo = list_first_entry(&exec->unref_list,
struct vc4_bo, unref_head);
list_del(&bo->unref_head);
- drm_gem_object_unreference_unlocked(&bo->base.base);
+ drm_gem_object_put_unlocked(&bo->base.base);
}
/* Free up the allocation of any bin slots we used. */
@@ -974,7 +973,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
- DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
return -EINVAL;
}
bo = to_vc4_bo(gem_obj);
@@ -982,7 +981,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
&args->timeout_ns);
- drm_gem_object_unreference_unlocked(gem_obj);
+ drm_gem_object_put_unlocked(gem_obj);
return ret;
}
@@ -1008,8 +1007,11 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
- if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
- DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
+ if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
+ VC4_SUBMIT_CL_FIXED_RCL_ORDER |
+ VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
+ VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) {
+ DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
return -EINVAL;
}
@@ -1118,6 +1120,4 @@ vc4_gem_destroy(struct drm_device *dev)
if (vc4->hang_state)
vc4_free_hang_state(dev, vc4->hang_state);
-
- vc4_bo_cache_destroy(dev);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index ed63d4e85762..937da8dd65b8 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -57,9 +57,14 @@
#include <sound/pcm_drm_eld.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include "media/cec.h"
#include "vc4_drv.h"
#include "vc4_regs.h"
+#define HSM_CLOCK_FREQ 163682864
+#define CEC_CLOCK_FREQ 40000
+#define CEC_CLOCK_DIV (HSM_CLOCK_FREQ / CEC_CLOCK_FREQ)
+
/* HDMI audio information */
struct vc4_hdmi_audio {
struct snd_soc_card card;
@@ -85,6 +90,11 @@ struct vc4_hdmi {
int hpd_gpio;
bool hpd_active_low;
+ struct cec_adapter *cec_adap;
+ struct cec_msg cec_rx_msg;
+ bool cec_tx_ok;
+ bool cec_irq_was_rx;
+
struct clk *pixel_clock;
struct clk *hsm_clock;
};
@@ -149,6 +159,23 @@ static const struct {
HDMI_REG(VC4_HDMI_VERTB1),
HDMI_REG(VC4_HDMI_TX_PHY_RESET_CTL),
HDMI_REG(VC4_HDMI_TX_PHY_CTL0),
+
+ HDMI_REG(VC4_HDMI_CEC_CNTRL_1),
+ HDMI_REG(VC4_HDMI_CEC_CNTRL_2),
+ HDMI_REG(VC4_HDMI_CEC_CNTRL_3),
+ HDMI_REG(VC4_HDMI_CEC_CNTRL_4),
+ HDMI_REG(VC4_HDMI_CEC_CNTRL_5),
+ HDMI_REG(VC4_HDMI_CPU_STATUS),
+ HDMI_REG(VC4_HDMI_CPU_MASK_STATUS),
+
+ HDMI_REG(VC4_HDMI_CEC_RX_DATA_1),
+ HDMI_REG(VC4_HDMI_CEC_RX_DATA_2),
+ HDMI_REG(VC4_HDMI_CEC_RX_DATA_3),
+ HDMI_REG(VC4_HDMI_CEC_RX_DATA_4),
+ HDMI_REG(VC4_HDMI_CEC_TX_DATA_1),
+ HDMI_REG(VC4_HDMI_CEC_TX_DATA_2),
+ HDMI_REG(VC4_HDMI_CEC_TX_DATA_3),
+ HDMI_REG(VC4_HDMI_CEC_TX_DATA_4),
};
static const struct {
@@ -216,8 +243,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
if (gpio_get_value_cansleep(vc4->hdmi->hpd_gpio) ^
vc4->hdmi->hpd_active_low)
return connector_status_connected;
- else
- return connector_status_disconnected;
+ cec_phys_addr_invalidate(vc4->hdmi->cec_adap);
+ return connector_status_disconnected;
}
if (drm_probe_ddc(vc4->hdmi->ddc))
@@ -225,8 +252,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
if (HDMI_READ(VC4_HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED)
return connector_status_connected;
- else
- return connector_status_disconnected;
+ cec_phys_addr_invalidate(vc4->hdmi->cec_adap);
+ return connector_status_disconnected;
}
static void vc4_hdmi_connector_destroy(struct drm_connector *connector)
@@ -247,6 +274,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
struct edid *edid;
edid = drm_get_edid(connector, vc4->hdmi->ddc);
+ cec_s_phys_addr_from_edid(vc4->hdmi->cec_adap, edid);
if (!edid)
return -ENODEV;
@@ -260,12 +288,12 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
+ kfree(edid);
return ret;
}
static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = vc4_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = vc4_hdmi_connector_destroy,
@@ -395,7 +423,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
union hdmi_infoframe frame;
int ret;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
if (ret < 0) {
DRM_ERROR("couldn't fill AVI infoframe\n");
return;
@@ -463,11 +491,6 @@ static void vc4_hdmi_encoder_disable(struct drm_encoder *encoder)
HD_WRITE(VC4_HD_VID_CTL,
HD_READ(VC4_HD_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE);
- HD_WRITE(VC4_HD_M_CTL, VC4_HD_M_SW_RST);
- udelay(1);
- HD_WRITE(VC4_HD_M_CTL, 0);
-
- clk_disable_unprepare(hdmi->hsm_clock);
clk_disable_unprepare(hdmi->pixel_clock);
ret = pm_runtime_put(&hdmi->pdev->dev);
@@ -509,16 +532,6 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
return;
}
- /* This is the rate that is set by the firmware. The number
- * needs to be a bit higher than the pixel clock rate
- * (generally 148.5Mhz).
- */
- ret = clk_set_rate(hdmi->hsm_clock, 163682864);
- if (ret) {
- DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
- return;
- }
-
ret = clk_set_rate(hdmi->pixel_clock,
mode->clock * 1000 *
((mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1));
@@ -533,20 +546,6 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
return;
}
- ret = clk_prepare_enable(hdmi->hsm_clock);
- if (ret) {
- DRM_ERROR("Failed to turn on HDMI state machine clock: %d\n",
- ret);
- clk_disable_unprepare(hdmi->pixel_clock);
- return;
- }
-
- HD_WRITE(VC4_HD_M_CTL, VC4_HD_M_SW_RST);
- udelay(1);
- HD_WRITE(VC4_HD_M_CTL, 0);
-
- HD_WRITE(VC4_HD_M_CTL, VC4_HD_M_ENABLE);
-
HDMI_WRITE(VC4_HDMI_SW_RESET_CONTROL,
VC4_HDMI_SW_RESET_HDMI |
VC4_HDMI_SW_RESET_FORMAT_DETECT);
@@ -1150,6 +1149,159 @@ static void vc4_hdmi_audio_cleanup(struct vc4_hdmi *hdmi)
snd_soc_unregister_codec(dev);
}
+#ifdef CONFIG_DRM_VC4_HDMI_CEC
+static irqreturn_t vc4_cec_irq_handler_thread(int irq, void *priv)
+{
+ struct vc4_dev *vc4 = priv;
+ struct vc4_hdmi *hdmi = vc4->hdmi;
+
+ if (hdmi->cec_irq_was_rx) {
+ if (hdmi->cec_rx_msg.len)
+ cec_received_msg(hdmi->cec_adap, &hdmi->cec_rx_msg);
+ } else if (hdmi->cec_tx_ok) {
+ cec_transmit_done(hdmi->cec_adap, CEC_TX_STATUS_OK,
+ 0, 0, 0, 0);
+ } else {
+ /*
+ * This CEC implementation makes 1 retry, so if we
+ * get a NACK, then that means it made 2 attempts.
+ */
+ cec_transmit_done(hdmi->cec_adap, CEC_TX_STATUS_NACK,
+ 0, 2, 0, 0);
+ }
+ return IRQ_HANDLED;
+}
+
+static void vc4_cec_read_msg(struct vc4_dev *vc4, u32 cntrl1)
+{
+ struct cec_msg *msg = &vc4->hdmi->cec_rx_msg;
+ unsigned int i;
+
+ msg->len = 1 + ((cntrl1 & VC4_HDMI_CEC_REC_WRD_CNT_MASK) >>
+ VC4_HDMI_CEC_REC_WRD_CNT_SHIFT);
+ for (i = 0; i < msg->len; i += 4) {
+ u32 val = HDMI_READ(VC4_HDMI_CEC_RX_DATA_1 + i);
+
+ msg->msg[i] = val & 0xff;
+ msg->msg[i + 1] = (val >> 8) & 0xff;
+ msg->msg[i + 2] = (val >> 16) & 0xff;
+ msg->msg[i + 3] = (val >> 24) & 0xff;
+ }
+}
+
+static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
+{
+ struct vc4_dev *vc4 = priv;
+ struct vc4_hdmi *hdmi = vc4->hdmi;
+ u32 stat = HDMI_READ(VC4_HDMI_CPU_STATUS);
+ u32 cntrl1, cntrl5;
+
+ if (!(stat & VC4_HDMI_CPU_CEC))
+ return IRQ_NONE;
+ hdmi->cec_rx_msg.len = 0;
+ cntrl1 = HDMI_READ(VC4_HDMI_CEC_CNTRL_1);
+ cntrl5 = HDMI_READ(VC4_HDMI_CEC_CNTRL_5);
+ hdmi->cec_irq_was_rx = cntrl5 & VC4_HDMI_CEC_RX_CEC_INT;
+ if (hdmi->cec_irq_was_rx) {
+ vc4_cec_read_msg(vc4, cntrl1);
+ cntrl1 |= VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
+ HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, cntrl1);
+ cntrl1 &= ~VC4_HDMI_CEC_CLEAR_RECEIVE_OFF;
+ } else {
+ hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD;
+ cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN;
+ }
+ HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, cntrl1);
+ HDMI_WRITE(VC4_HDMI_CPU_CLEAR, VC4_HDMI_CPU_CEC);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct vc4_dev *vc4 = cec_get_drvdata(adap);
+ /* clock period in microseconds */
+ const u32 usecs = 1000000 / CEC_CLOCK_FREQ;
+ u32 val = HDMI_READ(VC4_HDMI_CEC_CNTRL_5);
+
+ val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET |
+ VC4_HDMI_CEC_CNT_TO_4700_US_MASK |
+ VC4_HDMI_CEC_CNT_TO_4500_US_MASK);
+ val |= ((4700 / usecs) << VC4_HDMI_CEC_CNT_TO_4700_US_SHIFT) |
+ ((4500 / usecs) << VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT);
+
+ if (enable) {
+ HDMI_WRITE(VC4_HDMI_CEC_CNTRL_5, val |
+ VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
+ HDMI_WRITE(VC4_HDMI_CEC_CNTRL_5, val);
+ HDMI_WRITE(VC4_HDMI_CEC_CNTRL_2,
+ ((1500 / usecs) << VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT) |
+ ((1300 / usecs) << VC4_HDMI_CEC_CNT_TO_1300_US_SHIFT) |
+ ((800 / usecs) << VC4_HDMI_CEC_CNT_TO_800_US_SHIFT) |
+ ((600 / usecs) << VC4_HDMI_CEC_CNT_TO_600_US_SHIFT) |
+ ((400 / usecs) << VC4_HDMI_CEC_CNT_TO_400_US_SHIFT));
+ HDMI_WRITE(VC4_HDMI_CEC_CNTRL_3,
+ ((2750 / usecs) << VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT) |
+ ((2400 / usecs) << VC4_HDMI_CEC_CNT_TO_2400_US_SHIFT) |
+ ((2050 / usecs) << VC4_HDMI_CEC_CNT_TO_2050_US_SHIFT) |
+ ((1700 / usecs) << VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT));
+ HDMI_WRITE(VC4_HDMI_CEC_CNTRL_4,
+ ((4300 / usecs) << VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT) |
+ ((3900 / usecs) << VC4_HDMI_CEC_CNT_TO_3900_US_SHIFT) |
+ ((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) |
+ ((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT));
+
+ HDMI_WRITE(VC4_HDMI_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC);
+ } else {
+ HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, VC4_HDMI_CPU_CEC);
+ HDMI_WRITE(VC4_HDMI_CEC_CNTRL_5, val |
+ VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET);
+ }
+ return 0;
+}
+
+static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+{
+ struct vc4_dev *vc4 = cec_get_drvdata(adap);
+
+ HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1,
+ (HDMI_READ(VC4_HDMI_CEC_CNTRL_1) & ~VC4_HDMI_CEC_ADDR_MASK) |
+ (log_addr & 0xf) << VC4_HDMI_CEC_ADDR_SHIFT);
+ return 0;
+}
+
+static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct vc4_dev *vc4 = cec_get_drvdata(adap);
+ u32 val;
+ unsigned int i;
+
+ for (i = 0; i < msg->len; i += 4)
+ HDMI_WRITE(VC4_HDMI_CEC_TX_DATA_1 + i,
+ (msg->msg[i]) |
+ (msg->msg[i + 1] << 8) |
+ (msg->msg[i + 2] << 16) |
+ (msg->msg[i + 3] << 24));
+
+ val = HDMI_READ(VC4_HDMI_CEC_CNTRL_1);
+ val &= ~VC4_HDMI_CEC_START_XMIT_BEGIN;
+ HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, val);
+ val &= ~VC4_HDMI_CEC_MESSAGE_LENGTH_MASK;
+ val |= (msg->len - 1) << VC4_HDMI_CEC_MESSAGE_LENGTH_SHIFT;
+ val |= VC4_HDMI_CEC_START_XMIT_BEGIN;
+
+ HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, val);
+ return 0;
+}
+
+static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = {
+ .adap_enable = vc4_hdmi_cec_adap_enable,
+ .adap_log_addr = vc4_hdmi_cec_adap_log_addr,
+ .adap_transmit = vc4_hdmi_cec_adap_transmit,
+};
+#endif
+
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1205,6 +1357,23 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
return -EPROBE_DEFER;
}
+ /* This is the rate that is set by the firmware. The number
+ * needs to be a bit higher than the pixel clock rate
+ * (generally 148.5Mhz).
+ */
+ ret = clk_set_rate(hdmi->hsm_clock, HSM_CLOCK_FREQ);
+ if (ret) {
+ DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
+ goto err_put_i2c;
+ }
+
+ ret = clk_prepare_enable(hdmi->hsm_clock);
+ if (ret) {
+ DRM_ERROR("Failed to turn on HDMI state machine clock: %d\n",
+ ret);
+ goto err_put_i2c;
+ }
+
/* Only use the GPIO HPD pin if present in the DT, otherwise
* we'll use the HDMI core's register.
*/
@@ -1216,7 +1385,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
&hpd_gpio_flags);
if (hdmi->hpd_gpio < 0) {
ret = hdmi->hpd_gpio;
- goto err_put_i2c;
+ goto err_unprepare_hsm;
}
hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
@@ -1224,6 +1393,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
vc4->hdmi = hdmi;
+ /* HDMI core must be enabled. */
+ if (!(HD_READ(VC4_HD_M_CTL) & VC4_HD_M_ENABLE)) {
+ HD_WRITE(VC4_HD_M_CTL, VC4_HD_M_SW_RST);
+ udelay(1);
+ HD_WRITE(VC4_HD_M_CTL, 0);
+
+ HD_WRITE(VC4_HD_M_CTL, VC4_HD_M_ENABLE);
+ }
pm_runtime_enable(dev);
drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs,
@@ -1235,6 +1412,37 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
ret = PTR_ERR(hdmi->connector);
goto err_destroy_encoder;
}
+#ifdef CONFIG_DRM_VC4_HDMI_CEC
+ hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
+ vc4, "vc4",
+ CEC_CAP_TRANSMIT |
+ CEC_CAP_LOG_ADDRS |
+ CEC_CAP_PASSTHROUGH |
+ CEC_CAP_RC, 1);
+ ret = PTR_ERR_OR_ZERO(hdmi->cec_adap);
+ if (ret < 0)
+ goto err_destroy_conn;
+ HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, 0xffffffff);
+ value = HDMI_READ(VC4_HDMI_CEC_CNTRL_1);
+ value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK;
+ /*
+ * Set the logical address to Unregistered and set the clock
+ * divider: the hsm_clock rate and this divider setting will
+ * give a 40 kHz CEC clock.
+ */
+ value |= VC4_HDMI_CEC_ADDR_MASK |
+ (4091 << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT);
+ HDMI_WRITE(VC4_HDMI_CEC_CNTRL_1, value);
+ ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0),
+ vc4_cec_irq_handler,
+ vc4_cec_irq_handler_thread, 0,
+ "vc4 hdmi cec", vc4);
+ if (ret)
+ goto err_delete_cec_adap;
+ ret = cec_register_adapter(hdmi->cec_adap, dev);
+ if (ret < 0)
+ goto err_delete_cec_adap;
+#endif
ret = vc4_hdmi_audio_init(hdmi);
if (ret)
@@ -1242,8 +1450,16 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
return 0;
+#ifdef CONFIG_DRM_VC4_HDMI_CEC
+err_delete_cec_adap:
+ cec_delete_adapter(hdmi->cec_adap);
+err_destroy_conn:
+ vc4_hdmi_connector_destroy(hdmi->connector);
+#endif
err_destroy_encoder:
vc4_hdmi_encoder_destroy(hdmi->encoder);
+err_unprepare_hsm:
+ clk_disable_unprepare(hdmi->hsm_clock);
pm_runtime_disable(dev);
err_put_i2c:
put_device(&hdmi->ddc->dev);
@@ -1259,10 +1475,11 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master,
struct vc4_hdmi *hdmi = vc4->hdmi;
vc4_hdmi_audio_cleanup(hdmi);
-
+ cec_unregister_adapter(hdmi->cec_adap);
vc4_hdmi_connector_destroy(hdmi->connector);
vc4_hdmi_encoder_destroy(hdmi->encoder);
+ clk_disable_unprepare(hdmi->hsm_clock);
pm_runtime_disable(dev);
put_device(&hdmi->ddc->dev);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index bc6ecdc6f104..50c4959b5bd3 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -20,6 +20,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "vc4_drv.h"
static void vc4_output_poll_changed(struct drm_device *dev)
@@ -29,16 +30,9 @@ static void vc4_output_poll_changed(struct drm_device *dev)
drm_fbdev_cma_hotplug_event(vc4->fbdev);
}
-struct vc4_commit {
- struct drm_device *dev;
- struct drm_atomic_state *state;
- struct vc4_seqno_cb cb;
-};
-
static void
-vc4_atomic_complete_commit(struct vc4_commit *c)
+vc4_atomic_complete_commit(struct drm_atomic_state *state)
{
- struct drm_atomic_state *state = c->state;
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -72,28 +66,14 @@ vc4_atomic_complete_commit(struct vc4_commit *c)
drm_atomic_state_put(state);
up(&vc4->async_modeset);
-
- kfree(c);
}
-static void
-vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb)
+static void commit_work(struct work_struct *work)
{
- struct vc4_commit *c = container_of(cb, struct vc4_commit, cb);
-
- vc4_atomic_complete_commit(c);
-}
-
-static struct vc4_commit *commit_init(struct drm_atomic_state *state)
-{
- struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
-
- if (!c)
- return NULL;
- c->dev = state->dev;
- c->state = state;
-
- return c;
+ struct drm_atomic_state *state = container_of(work,
+ struct drm_atomic_state,
+ commit_work);
+ vc4_atomic_complete_commit(state);
}
/**
@@ -115,40 +95,29 @@ static int vc4_atomic_commit(struct drm_device *dev,
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
- int i;
- uint64_t wait_seqno = 0;
- struct vc4_commit *c;
- struct drm_plane *plane;
- struct drm_plane_state *new_state;
-
- c = commit_init(state);
- if (!c)
- return -ENOMEM;
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
+ INIT_WORK(&state->commit_work, commit_work);
+
ret = down_interruptible(&vc4->async_modeset);
- if (ret) {
- kfree(c);
+ if (ret)
return ret;
- }
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret) {
- kfree(c);
up(&vc4->async_modeset);
return ret;
}
- for_each_plane_in_state(state, plane, new_state, i) {
- if ((plane->state->fb != new_state->fb) && new_state->fb) {
- struct drm_gem_cma_object *cma_bo =
- drm_fb_cma_get_gem_obj(new_state->fb, 0);
- struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
-
- wait_seqno = max(bo->seqno, wait_seqno);
+ if (!nonblock) {
+ ret = drm_atomic_helper_wait_for_fences(dev, state, true);
+ if (ret) {
+ drm_atomic_helper_cleanup_planes(dev, state);
+ up(&vc4->async_modeset);
+ return ret;
}
}
@@ -158,7 +127,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
* the software side now.
*/
- drm_atomic_helper_swap_state(state, true);
+ BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
/*
* Everything below can be run asynchronously without the need to grab
@@ -177,13 +146,10 @@ static int vc4_atomic_commit(struct drm_device *dev,
*/
drm_atomic_state_get(state);
- if (nonblock) {
- vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
- vc4_atomic_complete_commit_seqno_cb);
- } else {
- vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false);
- vc4_atomic_complete_commit(c);
- }
+ if (nonblock)
+ queue_work(system_unbound_wq, &state->commit_work);
+ else
+ vc4_atomic_complete_commit(state);
return 0;
}
@@ -204,7 +170,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
gem_obj = drm_gem_object_lookup(file_priv,
mode_cmd->handles[0]);
if (!gem_obj) {
- DRM_ERROR("Failed to look up GEM BO %d\n",
+ DRM_DEBUG("Failed to look up GEM BO %d\n",
mode_cmd->handles[0]);
return ERR_PTR(-ENOENT);
}
@@ -219,12 +185,12 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
}
- drm_gem_object_unreference_unlocked(gem_obj);
+ drm_gem_object_put_unlocked(gem_obj);
mode_cmd = &mode_cmd_local;
}
- return drm_fb_cma_create(dev, file_priv, mode_cmd);
+ return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
static const struct drm_mode_config_funcs vc4_mode_funcs = {
@@ -241,6 +207,9 @@ int vc4_kms_load(struct drm_device *dev)
sema_init(&vc4->async_modeset, 1);
+ /* Set support for vblank irq fast disable, before drm_vblank_init() */
+ dev->vblank_disable_immediate = true;
+
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize vblank\n");
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index fa6809d8b0fe..2968b3ebb895 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -759,9 +759,26 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
vc4_state->dlist[vc4_state->ptr0_offset] = addr;
}
+static int vc4_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct vc4_bo *bo;
+ struct dma_fence *fence;
+
+ if ((plane->state->fb == state->fb) || !state->fb)
+ return 0;
+
+ bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
+ fence = reservation_object_get_excl_rcu(bo->resv);
+ drm_atomic_set_fence_for_plane(state, fence);
+
+ return 0;
+}
+
static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
.atomic_check = vc4_plane_atomic_check,
.atomic_update = vc4_plane_atomic_update,
+ .prepare_fb = vc4_prepare_fb,
};
static void vc4_plane_destroy(struct drm_plane *plane)
@@ -885,7 +902,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
ret = drm_universal_plane_init(dev, plane, 0,
&vc4_plane_funcs,
formats, num_formats,
- type, NULL);
+ NULL, type, NULL);
drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index d382c34c1b9e..55677bd50f66 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -561,16 +561,129 @@
# define VC4_HDMI_VERTB_VBP_MASK VC4_MASK(8, 0)
# define VC4_HDMI_VERTB_VBP_SHIFT 0
+#define VC4_HDMI_CEC_CNTRL_1 0x0e8
+/* Set when the transmission has ended. */
+# define VC4_HDMI_CEC_TX_EOM BIT(31)
+/* If set, transmission was acked on the 1st or 2nd attempt (only one
+ * retry is attempted). If in continuous mode, this means TX needs to
+ * be filled if !TX_EOM.
+ */
+# define VC4_HDMI_CEC_TX_STATUS_GOOD BIT(30)
+# define VC4_HDMI_CEC_RX_EOM BIT(29)
+# define VC4_HDMI_CEC_RX_STATUS_GOOD BIT(28)
+/* Number of bytes received for the message. */
+# define VC4_HDMI_CEC_REC_WRD_CNT_MASK VC4_MASK(27, 24)
+# define VC4_HDMI_CEC_REC_WRD_CNT_SHIFT 24
+/* Sets continuous receive mode. Generates interrupt after each 8
+ * bytes to signal that RX_DATA should be consumed, and at RX_EOM.
+ *
+ * If disabled, maximum 16 bytes will be received (including header),
+ * and interrupt at RX_EOM. Later bytes will be acked but not put
+ * into the RX_DATA.
+ */
+# define VC4_HDMI_CEC_RX_CONTINUE BIT(23)
+# define VC4_HDMI_CEC_TX_CONTINUE BIT(22)
+/* Set this after a CEC interrupt. */
+# define VC4_HDMI_CEC_CLEAR_RECEIVE_OFF BIT(21)
+/* Starts a TX. Will wait for appropriate idel time before CEC
+ * activity. Must be cleared in between transmits.
+ */
+# define VC4_HDMI_CEC_START_XMIT_BEGIN BIT(20)
+# define VC4_HDMI_CEC_MESSAGE_LENGTH_MASK VC4_MASK(19, 16)
+# define VC4_HDMI_CEC_MESSAGE_LENGTH_SHIFT 16
+/* Device's CEC address */
+# define VC4_HDMI_CEC_ADDR_MASK VC4_MASK(15, 12)
+# define VC4_HDMI_CEC_ADDR_SHIFT 12
+/* Divides off of HSM clock to generate CEC bit clock. */
+/* With the current defaults the CEC bit clock is 40 kHz = 25 usec */
+# define VC4_HDMI_CEC_DIV_CLK_CNT_MASK VC4_MASK(11, 0)
+# define VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT 0
+
+/* Set these fields to how many bit clock cycles get to that many
+ * microseconds.
+ */
+#define VC4_HDMI_CEC_CNTRL_2 0x0ec
+# define VC4_HDMI_CEC_CNT_TO_1500_US_MASK VC4_MASK(30, 24)
+# define VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT 24
+# define VC4_HDMI_CEC_CNT_TO_1300_US_MASK VC4_MASK(23, 17)
+# define VC4_HDMI_CEC_CNT_TO_1300_US_SHIFT 17
+# define VC4_HDMI_CEC_CNT_TO_800_US_MASK VC4_MASK(16, 11)
+# define VC4_HDMI_CEC_CNT_TO_800_US_SHIFT 11
+# define VC4_HDMI_CEC_CNT_TO_600_US_MASK VC4_MASK(10, 5)
+# define VC4_HDMI_CEC_CNT_TO_600_US_SHIFT 5
+# define VC4_HDMI_CEC_CNT_TO_400_US_MASK VC4_MASK(4, 0)
+# define VC4_HDMI_CEC_CNT_TO_400_US_SHIFT 0
+
+#define VC4_HDMI_CEC_CNTRL_3 0x0f0
+# define VC4_HDMI_CEC_CNT_TO_2750_US_MASK VC4_MASK(31, 24)
+# define VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT 24
+# define VC4_HDMI_CEC_CNT_TO_2400_US_MASK VC4_MASK(23, 16)
+# define VC4_HDMI_CEC_CNT_TO_2400_US_SHIFT 16
+# define VC4_HDMI_CEC_CNT_TO_2050_US_MASK VC4_MASK(15, 8)
+# define VC4_HDMI_CEC_CNT_TO_2050_US_SHIFT 8
+# define VC4_HDMI_CEC_CNT_TO_1700_US_MASK VC4_MASK(7, 0)
+# define VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT 0
+
+#define VC4_HDMI_CEC_CNTRL_4 0x0f4
+# define VC4_HDMI_CEC_CNT_TO_4300_US_MASK VC4_MASK(31, 24)
+# define VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT 24
+# define VC4_HDMI_CEC_CNT_TO_3900_US_MASK VC4_MASK(23, 16)
+# define VC4_HDMI_CEC_CNT_TO_3900_US_SHIFT 16
+# define VC4_HDMI_CEC_CNT_TO_3600_US_MASK VC4_MASK(15, 8)
+# define VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT 8
+# define VC4_HDMI_CEC_CNT_TO_3500_US_MASK VC4_MASK(7, 0)
+# define VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT 0
+
+#define VC4_HDMI_CEC_CNTRL_5 0x0f8
+# define VC4_HDMI_CEC_TX_SW_RESET BIT(27)
+# define VC4_HDMI_CEC_RX_SW_RESET BIT(26)
+# define VC4_HDMI_CEC_PAD_SW_RESET BIT(25)
+# define VC4_HDMI_CEC_MUX_TP_OUT_CEC BIT(24)
+# define VC4_HDMI_CEC_RX_CEC_INT BIT(23)
+# define VC4_HDMI_CEC_CLK_PRELOAD_MASK VC4_MASK(22, 16)
+# define VC4_HDMI_CEC_CLK_PRELOAD_SHIFT 16
+# define VC4_HDMI_CEC_CNT_TO_4700_US_MASK VC4_MASK(15, 8)
+# define VC4_HDMI_CEC_CNT_TO_4700_US_SHIFT 8
+# define VC4_HDMI_CEC_CNT_TO_4500_US_MASK VC4_MASK(7, 0)
+# define VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT 0
+
+/* Transmit data, first byte is low byte of the 32-bit reg. MSB of
+ * each byte transmitted first.
+ */
+#define VC4_HDMI_CEC_TX_DATA_1 0x0fc
+#define VC4_HDMI_CEC_TX_DATA_2 0x100
+#define VC4_HDMI_CEC_TX_DATA_3 0x104
+#define VC4_HDMI_CEC_TX_DATA_4 0x108
+#define VC4_HDMI_CEC_RX_DATA_1 0x10c
+#define VC4_HDMI_CEC_RX_DATA_2 0x110
+#define VC4_HDMI_CEC_RX_DATA_3 0x114
+#define VC4_HDMI_CEC_RX_DATA_4 0x118
+
#define VC4_HDMI_TX_PHY_RESET_CTL 0x2c0
#define VC4_HDMI_TX_PHY_CTL0 0x2c4
# define VC4_HDMI_TX_PHY_RNG_PWRDN BIT(25)
+/* Interrupt status bits */
+#define VC4_HDMI_CPU_STATUS 0x340
+#define VC4_HDMI_CPU_SET 0x344
+#define VC4_HDMI_CPU_CLEAR 0x348
+# define VC4_HDMI_CPU_CEC BIT(6)
+# define VC4_HDMI_CPU_HOTPLUG BIT(0)
+
+#define VC4_HDMI_CPU_MASK_STATUS 0x34c
+#define VC4_HDMI_CPU_MASK_SET 0x350
+#define VC4_HDMI_CPU_MASK_CLEAR 0x354
+
#define VC4_HDMI_GCP(x) (0x400 + ((x) * 0x4))
#define VC4_HDMI_RAM_PACKET(x) (0x400 + ((x) * 0x24))
#define VC4_HDMI_PACKET_STRIDE 0x24
#define VC4_HD_M_CTL 0x00c
+/* Debug: Current receive value on the CEC pad. */
+# define VC4_HD_CECRXD BIT(9)
+/* Debug: Override CEC output to 0. */
+# define VC4_HD_CECOVR BIT(8)
# define VC4_HD_M_REGISTER_FILE_STANDBY (3 << 6)
# define VC4_HD_M_RAM_STANDBY (3 << 4)
# define VC4_HD_M_SW_RST BIT(2)
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 5dc19429d4ae..273984f71ae2 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -261,8 +261,17 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
uint8_t max_y_tile = args->max_y_tile;
uint8_t xtiles = max_x_tile - min_x_tile + 1;
uint8_t ytiles = max_y_tile - min_y_tile + 1;
- uint8_t x, y;
+ uint8_t xi, yi;
uint32_t size, loop_body_size;
+ bool positive_x = true;
+ bool positive_y = true;
+
+ if (args->flags & VC4_SUBMIT_CL_FIXED_RCL_ORDER) {
+ if (!(args->flags & VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X))
+ positive_x = false;
+ if (!(args->flags & VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y))
+ positive_y = false;
+ }
size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE;
loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE;
@@ -320,7 +329,7 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
size += xtiles * ytiles * loop_body_size;
- setup->rcl = &vc4_bo_create(dev, size, true)->base;
+ setup->rcl = &vc4_bo_create(dev, size, true, VC4_BO_TYPE_RCL)->base;
if (IS_ERR(setup->rcl))
return PTR_ERR(setup->rcl);
list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
@@ -354,10 +363,12 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
rcl_u16(setup, args->height);
rcl_u16(setup, args->color_write.bits);
- for (y = min_y_tile; y <= max_y_tile; y++) {
- for (x = min_x_tile; x <= max_x_tile; x++) {
- bool first = (x == min_x_tile && y == min_y_tile);
- bool last = (x == max_x_tile && y == max_y_tile);
+ for (yi = 0; yi < ytiles; yi++) {
+ int y = positive_y ? min_y_tile + yi : max_y_tile - yi;
+ for (xi = 0; xi < xtiles; xi++) {
+ int x = positive_x ? min_x_tile + xi : max_x_tile - xi;
+ bool first = (xi == 0 && yi == 0);
+ bool last = (xi == xtiles - 1 && yi == ytiles - 1);
emit_tile(exec, setup, x, y, first, last);
}
@@ -378,14 +389,14 @@ static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
u32 render_tiles_stride = DIV_ROUND_UP(exec->args->width, 32);
if (surf->offset > obj->base.size) {
- DRM_ERROR("surface offset %d > BO size %zd\n",
+ DRM_DEBUG("surface offset %d > BO size %zd\n",
surf->offset, obj->base.size);
return -EINVAL;
}
if ((obj->base.size - surf->offset) / VC4_TILE_BUFFER_SIZE <
render_tiles_stride * args->max_y_tile + args->max_x_tile) {
- DRM_ERROR("MSAA tile %d, %d out of bounds "
+ DRM_DEBUG("MSAA tile %d, %d out of bounds "
"(bo size %zd, offset %d).\n",
args->max_x_tile, args->max_y_tile,
obj->base.size,
@@ -401,7 +412,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
struct drm_vc4_submit_rcl_surface *surf)
{
if (surf->flags != 0 || surf->bits != 0) {
- DRM_ERROR("MSAA surface had nonzero flags/bits\n");
+ DRM_DEBUG("MSAA surface had nonzero flags/bits\n");
return -EINVAL;
}
@@ -415,7 +426,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
if (surf->offset & 0xf) {
- DRM_ERROR("MSAA write must be 16b aligned.\n");
+ DRM_DEBUG("MSAA write must be 16b aligned.\n");
return -EINVAL;
}
@@ -437,7 +448,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
int ret;
if (surf->flags & ~VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
- DRM_ERROR("Extra flags set\n");
+ DRM_DEBUG("Extra flags set\n");
return -EINVAL;
}
@@ -453,12 +464,12 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
if (surf == &exec->args->zs_write) {
- DRM_ERROR("general zs write may not be a full-res.\n");
+ DRM_DEBUG("general zs write may not be a full-res.\n");
return -EINVAL;
}
if (surf->bits != 0) {
- DRM_ERROR("load/store general bits set with "
+ DRM_DEBUG("load/store general bits set with "
"full res load/store.\n");
return -EINVAL;
}
@@ -473,19 +484,19 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK |
VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK |
VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) {
- DRM_ERROR("Unknown bits in load/store: 0x%04x\n",
+ DRM_DEBUG("Unknown bits in load/store: 0x%04x\n",
surf->bits);
return -EINVAL;
}
if (tiling > VC4_TILING_FORMAT_LT) {
- DRM_ERROR("Bad tiling format\n");
+ DRM_DEBUG("Bad tiling format\n");
return -EINVAL;
}
if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) {
if (format != 0) {
- DRM_ERROR("No color format should be set for ZS\n");
+ DRM_DEBUG("No color format should be set for ZS\n");
return -EINVAL;
}
cpp = 4;
@@ -499,16 +510,16 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
cpp = 4;
break;
default:
- DRM_ERROR("Bad tile buffer format\n");
+ DRM_DEBUG("Bad tile buffer format\n");
return -EINVAL;
}
} else {
- DRM_ERROR("Bad load/store buffer %d.\n", buffer);
+ DRM_DEBUG("Bad load/store buffer %d.\n", buffer);
return -EINVAL;
}
if (surf->offset & 0xf) {
- DRM_ERROR("load/store buffer must be 16b aligned.\n");
+ DRM_DEBUG("load/store buffer must be 16b aligned.\n");
return -EINVAL;
}
@@ -533,7 +544,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
int cpp;
if (surf->flags != 0) {
- DRM_ERROR("No flags supported on render config.\n");
+ DRM_DEBUG("No flags supported on render config.\n");
return -EINVAL;
}
@@ -541,7 +552,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
VC4_RENDER_CONFIG_FORMAT_MASK |
VC4_RENDER_CONFIG_MS_MODE_4X |
VC4_RENDER_CONFIG_DECIMATE_MODE_4X)) {
- DRM_ERROR("Unknown bits in render config: 0x%04x\n",
+ DRM_DEBUG("Unknown bits in render config: 0x%04x\n",
surf->bits);
return -EINVAL;
}
@@ -556,7 +567,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
if (tiling > VC4_TILING_FORMAT_LT) {
- DRM_ERROR("Bad tiling format\n");
+ DRM_DEBUG("Bad tiling format\n");
return -EINVAL;
}
@@ -569,7 +580,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
cpp = 4;
break;
default:
- DRM_ERROR("Bad tile buffer format\n");
+ DRM_DEBUG("Bad tile buffer format\n");
return -EINVAL;
}
@@ -590,7 +601,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
if (args->min_x_tile > args->max_x_tile ||
args->min_y_tile > args->max_y_tile) {
- DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n",
+ DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n",
args->min_x_tile, args->min_y_tile,
args->max_x_tile, args->max_y_tile);
return -EINVAL;
@@ -599,7 +610,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
if (has_bin &&
(args->max_x_tile > exec->bin_tiles_x ||
args->max_y_tile > exec->bin_tiles_y)) {
- DRM_ERROR("Render tiles (%d,%d) outside of bin config "
+ DRM_DEBUG("Render tiles (%d,%d) outside of bin config "
"(%d,%d)\n",
args->max_x_tile, args->max_y_tile,
exec->bin_tiles_x, exec->bin_tiles_y);
@@ -642,7 +653,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
*/
if (!setup.color_write && !setup.zs_write &&
!setup.msaa_color_write && !setup.msaa_zs_write) {
- DRM_ERROR("RCL requires color or Z/S write\n");
+ DRM_DEBUG("RCL requires color or Z/S write\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 8c723da71f66..622cd43840b8 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -236,7 +236,8 @@ vc4_allocate_bin_bo(struct drm_device *drm)
INIT_LIST_HEAD(&list);
while (true) {
- struct vc4_bo *bo = vc4_bo_create(drm, size, true);
+ struct vc4_bo *bo = vc4_bo_create(drm, size, true,
+ VC4_BO_TYPE_BIN);
if (IS_ERR(bo)) {
ret = PTR_ERR(bo);
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 814b512c6b9a..2db485abb186 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -109,7 +109,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
struct vc4_bo *bo;
if (hindex >= exec->bo_count) {
- DRM_ERROR("BO index %d greater than BO count %d\n",
+ DRM_DEBUG("BO index %d greater than BO count %d\n",
hindex, exec->bo_count);
return NULL;
}
@@ -117,7 +117,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
bo = to_vc4_bo(&obj->base);
if (bo->validated_shader) {
- DRM_ERROR("Trying to use shader BO as something other than "
+ DRM_DEBUG("Trying to use shader BO as something other than "
"a shader\n");
return NULL;
}
@@ -172,7 +172,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
* our math.
*/
if (width > 4096 || height > 4096) {
- DRM_ERROR("Surface dimensions (%d,%d) too large",
+ DRM_DEBUG("Surface dimensions (%d,%d) too large",
width, height);
return false;
}
@@ -191,7 +191,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
aligned_height = round_up(height, utile_h);
break;
default:
- DRM_ERROR("buffer tiling %d unsupported\n", tiling_format);
+ DRM_DEBUG("buffer tiling %d unsupported\n", tiling_format);
return false;
}
@@ -200,7 +200,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
if (size + offset < size ||
size + offset > fbo->base.size) {
- DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n",
+ DRM_DEBUG("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n",
width, height,
aligned_width, aligned_height,
size, offset, fbo->base.size);
@@ -214,7 +214,7 @@ static int
validate_flush(VALIDATE_ARGS)
{
if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 1)) {
- DRM_ERROR("Bin CL must end with VC4_PACKET_FLUSH\n");
+ DRM_DEBUG("Bin CL must end with VC4_PACKET_FLUSH\n");
return -EINVAL;
}
exec->found_flush = true;
@@ -226,13 +226,13 @@ static int
validate_start_tile_binning(VALIDATE_ARGS)
{
if (exec->found_start_tile_binning_packet) {
- DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n");
+ DRM_DEBUG("Duplicate VC4_PACKET_START_TILE_BINNING\n");
return -EINVAL;
}
exec->found_start_tile_binning_packet = true;
if (!exec->found_tile_binning_mode_config_packet) {
- DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
+ DRM_DEBUG("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
return -EINVAL;
}
@@ -243,7 +243,7 @@ static int
validate_increment_semaphore(VALIDATE_ARGS)
{
if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 2)) {
- DRM_ERROR("Bin CL must end with "
+ DRM_DEBUG("Bin CL must end with "
"VC4_PACKET_INCREMENT_SEMAPHORE\n");
return -EINVAL;
}
@@ -264,7 +264,7 @@ validate_indexed_prim_list(VALIDATE_ARGS)
/* Check overflow condition */
if (exec->shader_state_count == 0) {
- DRM_ERROR("shader state must precede primitives\n");
+ DRM_DEBUG("shader state must precede primitives\n");
return -EINVAL;
}
shader_state = &exec->shader_state[exec->shader_state_count - 1];
@@ -281,7 +281,7 @@ validate_indexed_prim_list(VALIDATE_ARGS)
if (offset > ib->base.size ||
(ib->base.size - offset) / index_size < length) {
- DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n",
+ DRM_DEBUG("IB access overflow (%d + %d*%d > %zd)\n",
offset, length, index_size, ib->base.size);
return -EINVAL;
}
@@ -301,13 +301,13 @@ validate_gl_array_primitive(VALIDATE_ARGS)
/* Check overflow condition */
if (exec->shader_state_count == 0) {
- DRM_ERROR("shader state must precede primitives\n");
+ DRM_DEBUG("shader state must precede primitives\n");
return -EINVAL;
}
shader_state = &exec->shader_state[exec->shader_state_count - 1];
if (length + base_index < length) {
- DRM_ERROR("primitive vertex count overflow\n");
+ DRM_DEBUG("primitive vertex count overflow\n");
return -EINVAL;
}
max_index = length + base_index - 1;
@@ -324,7 +324,7 @@ validate_gl_shader_state(VALIDATE_ARGS)
uint32_t i = exec->shader_state_count++;
if (i >= exec->shader_state_size) {
- DRM_ERROR("More requests for shader states than declared\n");
+ DRM_DEBUG("More requests for shader states than declared\n");
return -EINVAL;
}
@@ -332,7 +332,7 @@ validate_gl_shader_state(VALIDATE_ARGS)
exec->shader_state[i].max_index = 0;
if (exec->shader_state[i].addr & ~0xf) {
- DRM_ERROR("high bits set in GL shader rec reference\n");
+ DRM_DEBUG("high bits set in GL shader rec reference\n");
return -EINVAL;
}
@@ -356,7 +356,7 @@ validate_tile_binning_config(VALIDATE_ARGS)
int bin_slot;
if (exec->found_tile_binning_mode_config_packet) {
- DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
+ DRM_DEBUG("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
return -EINVAL;
}
exec->found_tile_binning_mode_config_packet = true;
@@ -368,14 +368,14 @@ validate_tile_binning_config(VALIDATE_ARGS)
if (exec->bin_tiles_x == 0 ||
exec->bin_tiles_y == 0) {
- DRM_ERROR("Tile binning config of %dx%d too small\n",
+ DRM_DEBUG("Tile binning config of %dx%d too small\n",
exec->bin_tiles_x, exec->bin_tiles_y);
return -EINVAL;
}
if (flags & (VC4_BIN_CONFIG_DB_NON_MS |
VC4_BIN_CONFIG_TILE_BUFFER_64BIT)) {
- DRM_ERROR("unsupported binning config flags 0x%02x\n", flags);
+ DRM_DEBUG("unsupported binning config flags 0x%02x\n", flags);
return -EINVAL;
}
@@ -493,20 +493,20 @@ vc4_validate_bin_cl(struct drm_device *dev,
const struct cmd_info *info;
if (cmd >= ARRAY_SIZE(cmd_info)) {
- DRM_ERROR("0x%08x: packet %d out of bounds\n",
+ DRM_DEBUG("0x%08x: packet %d out of bounds\n",
src_offset, cmd);
return -EINVAL;
}
info = &cmd_info[cmd];
if (!info->name) {
- DRM_ERROR("0x%08x: packet %d invalid\n",
+ DRM_DEBUG("0x%08x: packet %d invalid\n",
src_offset, cmd);
return -EINVAL;
}
if (src_offset + info->len > len) {
- DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x "
+ DRM_DEBUG("0x%08x: packet %d (%s) length 0x%08x "
"exceeds bounds (0x%08x)\n",
src_offset, cmd, info->name, info->len,
src_offset + len);
@@ -519,7 +519,7 @@ vc4_validate_bin_cl(struct drm_device *dev,
if (info->func && info->func(exec,
dst_pkt + 1,
src_pkt + 1)) {
- DRM_ERROR("0x%08x: packet %d (%s) failed to validate\n",
+ DRM_DEBUG("0x%08x: packet %d (%s) failed to validate\n",
src_offset, cmd, info->name);
return -EINVAL;
}
@@ -537,7 +537,7 @@ vc4_validate_bin_cl(struct drm_device *dev,
exec->ct0ea = exec->ct0ca + dst_offset;
if (!exec->found_start_tile_binning_packet) {
- DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
+ DRM_DEBUG("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
return -EINVAL;
}
@@ -549,7 +549,7 @@ vc4_validate_bin_cl(struct drm_device *dev,
* semaphore increment.
*/
if (!exec->found_increment_semaphore_packet || !exec->found_flush) {
- DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + "
+ DRM_DEBUG("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + "
"VC4_PACKET_FLUSH\n");
return -EINVAL;
}
@@ -588,11 +588,11 @@ reloc_tex(struct vc4_exec_info *exec,
uint32_t remaining_size = tex->base.size - p0;
if (p0 > tex->base.size - 4) {
- DRM_ERROR("UBO offset greater than UBO size\n");
+ DRM_DEBUG("UBO offset greater than UBO size\n");
goto fail;
}
if (p1 > remaining_size - 4) {
- DRM_ERROR("UBO clamp would allow reads "
+ DRM_DEBUG("UBO clamp would allow reads "
"outside of UBO\n");
goto fail;
}
@@ -612,14 +612,14 @@ reloc_tex(struct vc4_exec_info *exec,
if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) ==
VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) {
if (cube_map_stride) {
- DRM_ERROR("Cube map stride set twice\n");
+ DRM_DEBUG("Cube map stride set twice\n");
goto fail;
}
cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK;
}
if (!cube_map_stride) {
- DRM_ERROR("Cube map stride not set\n");
+ DRM_DEBUG("Cube map stride not set\n");
goto fail;
}
}
@@ -660,7 +660,7 @@ reloc_tex(struct vc4_exec_info *exec,
case VC4_TEXTURE_TYPE_RGBA64:
case VC4_TEXTURE_TYPE_YUV422R:
default:
- DRM_ERROR("Texture format %d unsupported\n", type);
+ DRM_DEBUG("Texture format %d unsupported\n", type);
goto fail;
}
utile_w = utile_width(cpp);
@@ -713,7 +713,7 @@ reloc_tex(struct vc4_exec_info *exec,
level_size = aligned_width * cpp * aligned_height;
if (offset < level_size) {
- DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db "
+ DRM_DEBUG("Level %d (%dx%d -> %dx%d) size %db "
"overflowed buffer bounds (offset %d)\n",
i, level_width, level_height,
aligned_width, aligned_height,
@@ -764,7 +764,7 @@ validate_gl_shader_rec(struct drm_device *dev,
nr_relocs = ARRAY_SIZE(shader_reloc_offsets) + nr_attributes;
if (nr_relocs * 4 > exec->shader_rec_size) {
- DRM_ERROR("overflowed shader recs reading %d handles "
+ DRM_DEBUG("overflowed shader recs reading %d handles "
"from %d bytes left\n",
nr_relocs, exec->shader_rec_size);
return -EINVAL;
@@ -774,7 +774,7 @@ validate_gl_shader_rec(struct drm_device *dev,
exec->shader_rec_size -= nr_relocs * 4;
if (packet_size > exec->shader_rec_size) {
- DRM_ERROR("overflowed shader recs copying %db packet "
+ DRM_DEBUG("overflowed shader recs copying %db packet "
"from %d bytes left\n",
packet_size, exec->shader_rec_size);
return -EINVAL;
@@ -794,7 +794,7 @@ validate_gl_shader_rec(struct drm_device *dev,
for (i = 0; i < shader_reloc_count; i++) {
if (src_handles[i] > exec->bo_count) {
- DRM_ERROR("Shader handle %d too big\n", src_handles[i]);
+ DRM_DEBUG("Shader handle %d too big\n", src_handles[i]);
return -EINVAL;
}
@@ -810,13 +810,13 @@ validate_gl_shader_rec(struct drm_device *dev,
if (((*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD) == 0) !=
to_vc4_bo(&bo[0]->base)->validated_shader->is_threaded) {
- DRM_ERROR("Thread mode of CL and FS do not match\n");
+ DRM_DEBUG("Thread mode of CL and FS do not match\n");
return -EINVAL;
}
if (to_vc4_bo(&bo[1]->base)->validated_shader->is_threaded ||
to_vc4_bo(&bo[2]->base)->validated_shader->is_threaded) {
- DRM_ERROR("cs and vs cannot be threaded\n");
+ DRM_DEBUG("cs and vs cannot be threaded\n");
return -EINVAL;
}
@@ -831,7 +831,7 @@ validate_gl_shader_rec(struct drm_device *dev,
*(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
if (src_offset != 0) {
- DRM_ERROR("Shaders must be at offset 0 of "
+ DRM_DEBUG("Shaders must be at offset 0 of "
"the BO.\n");
return -EINVAL;
}
@@ -842,7 +842,7 @@ validate_gl_shader_rec(struct drm_device *dev,
if (validated_shader->uniforms_src_size >
exec->uniforms_size) {
- DRM_ERROR("Uniforms src buffer overflow\n");
+ DRM_DEBUG("Uniforms src buffer overflow\n");
return -EINVAL;
}
@@ -900,7 +900,7 @@ validate_gl_shader_rec(struct drm_device *dev,
if (vbo->base.size < offset ||
vbo->base.size - offset < attr_size) {
- DRM_ERROR("BO offset overflow (%d + %d > %zu)\n",
+ DRM_DEBUG("BO offset overflow (%d + %d > %zu)\n",
offset, attr_size, vbo->base.size);
return -EINVAL;
}
@@ -909,7 +909,7 @@ validate_gl_shader_rec(struct drm_device *dev,
max_index = ((vbo->base.size - offset - attr_size) /
stride);
if (state->max_index > max_index) {
- DRM_ERROR("primitives use index %d out of "
+ DRM_DEBUG("primitives use index %d out of "
"supplied %d\n",
state->max_index, max_index);
return -EINVAL;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 0b2df5c6efb4..d3f15bf60900 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -200,7 +200,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
uint32_t clamp_reg, clamp_offset;
if (sig == QPU_SIG_SMALL_IMM) {
- DRM_ERROR("direct TMU read used small immediate\n");
+ DRM_DEBUG("direct TMU read used small immediate\n");
return false;
}
@@ -209,7 +209,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
*/
if (is_mul ||
QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
- DRM_ERROR("direct TMU load wasn't an add\n");
+ DRM_DEBUG("direct TMU load wasn't an add\n");
return false;
}
@@ -220,13 +220,13 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
*/
clamp_reg = raddr_add_a_to_live_reg_index(inst);
if (clamp_reg == ~0) {
- DRM_ERROR("direct TMU load wasn't clamped\n");
+ DRM_DEBUG("direct TMU load wasn't clamped\n");
return false;
}
clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
if (clamp_offset == ~0) {
- DRM_ERROR("direct TMU load wasn't clamped\n");
+ DRM_DEBUG("direct TMU load wasn't clamped\n");
return false;
}
@@ -238,7 +238,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
!(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
- DRM_ERROR("direct TMU load didn't add to a uniform\n");
+ DRM_DEBUG("direct TMU load didn't add to a uniform\n");
return false;
}
@@ -246,14 +246,14 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
} else {
if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
raddr_b == QPU_R_UNIF)) {
- DRM_ERROR("uniform read in the same instruction as "
+ DRM_DEBUG("uniform read in the same instruction as "
"texture setup.\n");
return false;
}
}
if (validation_state->tmu_write_count[tmu] >= 4) {
- DRM_ERROR("TMU%d got too many parameters before dispatch\n",
+ DRM_DEBUG("TMU%d got too many parameters before dispatch\n",
tmu);
return false;
}
@@ -265,7 +265,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
*/
if (!is_direct) {
if (validation_state->needs_uniform_address_update) {
- DRM_ERROR("Texturing with undefined uniform address\n");
+ DRM_DEBUG("Texturing with undefined uniform address\n");
return false;
}
@@ -336,35 +336,35 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
case QPU_SIG_LOAD_TMU1:
break;
default:
- DRM_ERROR("uniforms address change must be "
+ DRM_DEBUG("uniforms address change must be "
"normal math\n");
return false;
}
if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
- DRM_ERROR("Uniform address reset must be an ADD.\n");
+ DRM_DEBUG("Uniform address reset must be an ADD.\n");
return false;
}
if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) {
- DRM_ERROR("Uniform address reset must be unconditional.\n");
+ DRM_DEBUG("Uniform address reset must be unconditional.\n");
return false;
}
if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP &&
!(inst & QPU_PM)) {
- DRM_ERROR("No packing allowed on uniforms reset\n");
+ DRM_DEBUG("No packing allowed on uniforms reset\n");
return false;
}
if (add_lri == -1) {
- DRM_ERROR("First argument of uniform address write must be "
+ DRM_DEBUG("First argument of uniform address write must be "
"an immediate value.\n");
return false;
}
if (validation_state->live_immediates[add_lri] != expected_offset) {
- DRM_ERROR("Resetting uniforms with offset %db instead of %db\n",
+ DRM_DEBUG("Resetting uniforms with offset %db instead of %db\n",
validation_state->live_immediates[add_lri],
expected_offset);
return false;
@@ -372,7 +372,7 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
!(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
- DRM_ERROR("Second argument of uniform address write must be "
+ DRM_DEBUG("Second argument of uniform address write must be "
"a uniform.\n");
return false;
}
@@ -417,7 +417,7 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
switch (waddr) {
case QPU_W_UNIFORMS_ADDRESS:
if (is_b) {
- DRM_ERROR("relative uniforms address change "
+ DRM_DEBUG("relative uniforms address change "
"unsupported\n");
return false;
}
@@ -452,11 +452,11 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
/* XXX: I haven't thought about these, so don't support them
* for now.
*/
- DRM_ERROR("Unsupported waddr %d\n", waddr);
+ DRM_DEBUG("Unsupported waddr %d\n", waddr);
return false;
case QPU_W_VPM_ADDR:
- DRM_ERROR("General VPM DMA unsupported\n");
+ DRM_DEBUG("General VPM DMA unsupported\n");
return false;
case QPU_W_VPM:
@@ -559,7 +559,7 @@ check_instruction_writes(struct vc4_validated_shader_info *validated_shader,
bool ok;
if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
- DRM_ERROR("ADD and MUL both set up textures\n");
+ DRM_DEBUG("ADD and MUL both set up textures\n");
return false;
}
@@ -588,7 +588,7 @@ check_branch(uint64_t inst,
* there's no need for it.
*/
if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) {
- DRM_ERROR("branch instruction at %d wrote a register.\n",
+ DRM_DEBUG("branch instruction at %d wrote a register.\n",
validation_state->ip);
return false;
}
@@ -614,7 +614,7 @@ check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
validated_shader->uniforms_size += 4;
if (validation_state->needs_uniform_address_update) {
- DRM_ERROR("Uniform read with undefined uniform "
+ DRM_DEBUG("Uniform read with undefined uniform "
"address\n");
return false;
}
@@ -660,19 +660,19 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
continue;
if (ip - last_branch < 4) {
- DRM_ERROR("Branch at %d during delay slots\n", ip);
+ DRM_DEBUG("Branch at %d during delay slots\n", ip);
return false;
}
last_branch = ip;
if (inst & QPU_BRANCH_REG) {
- DRM_ERROR("branching from register relative "
+ DRM_DEBUG("branching from register relative "
"not supported\n");
return false;
}
if (!(inst & QPU_BRANCH_REL)) {
- DRM_ERROR("relative branching required\n");
+ DRM_DEBUG("relative branching required\n");
return false;
}
@@ -682,13 +682,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
* end of the shader object.
*/
if (branch_imm % sizeof(inst) != 0) {
- DRM_ERROR("branch target not aligned\n");
+ DRM_DEBUG("branch target not aligned\n");
return false;
}
branch_target_ip = after_delay_ip + (branch_imm >> 3);
if (branch_target_ip >= validation_state->max_ip) {
- DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n",
+ DRM_DEBUG("Branch at %d outside of shader (ip %d/%d)\n",
ip, branch_target_ip,
validation_state->max_ip);
return false;
@@ -699,7 +699,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
* the shader.
*/
if (after_delay_ip >= validation_state->max_ip) {
- DRM_ERROR("Branch at %d continues past shader end "
+ DRM_DEBUG("Branch at %d continues past shader end "
"(%d/%d)\n",
ip, after_delay_ip, validation_state->max_ip);
return false;
@@ -709,7 +709,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
}
if (max_branch_target > validation_state->max_ip - 3) {
- DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
+ DRM_DEBUG("Branch landed after QPU_SIG_PROG_END");
return false;
}
@@ -750,7 +750,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
return true;
if (texturing_in_progress(validation_state)) {
- DRM_ERROR("Branch target landed during TMU setup\n");
+ DRM_DEBUG("Branch target landed during TMU setup\n");
return false;
}
@@ -837,7 +837,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
case QPU_SIG_LAST_THREAD_SWITCH:
if (!check_instruction_writes(validated_shader,
&validation_state)) {
- DRM_ERROR("Bad write at ip %d\n", ip);
+ DRM_DEBUG("Bad write at ip %d\n", ip);
goto fail;
}
@@ -855,7 +855,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
validated_shader->is_threaded = true;
if (ip < last_thread_switch_ip + 3) {
- DRM_ERROR("Thread switch too soon after "
+ DRM_DEBUG("Thread switch too soon after "
"last switch at ip %d\n", ip);
goto fail;
}
@@ -867,7 +867,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
case QPU_SIG_LOAD_IMM:
if (!check_instruction_writes(validated_shader,
&validation_state)) {
- DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
+ DRM_DEBUG("Bad LOAD_IMM write at ip %d\n", ip);
goto fail;
}
break;
@@ -878,14 +878,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
goto fail;
if (ip < last_thread_switch_ip + 3) {
- DRM_ERROR("Branch in thread switch at ip %d",
+ DRM_DEBUG("Branch in thread switch at ip %d",
ip);
goto fail;
}
break;
default:
- DRM_ERROR("Unsupported QPU signal %d at "
+ DRM_DEBUG("Unsupported QPU signal %d at "
"instruction %d\n", sig, ip);
goto fail;
}
@@ -898,7 +898,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
}
if (ip == validation_state.max_ip) {
- DRM_ERROR("shader failed to terminate before "
+ DRM_DEBUG("shader failed to terminate before "
"shader BO end at %zd\n",
shader_obj->base.size);
goto fail;
@@ -907,7 +907,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
/* Might corrupt other thread */
if (validated_shader->is_threaded &&
validation_state.all_registers_used) {
- DRM_ERROR("Shader uses threading, but uses the upper "
+ DRM_DEBUG("Shader uses threading, but uses the upper "
"half of the registers, too\n");
goto fail;
}
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 09c1e05765fa..3a9a302247a2 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -366,10 +366,8 @@ static int vc4_vec_connector_get_modes(struct drm_connector *connector)
}
static const struct drm_connector_funcs vc4_vec_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = vc4_vec_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = drm_atomic_helper_connector_set_property,
.destroy = vc4_vec_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 18f401b442c2..2524ff116f00 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -52,6 +52,7 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
kvfree(vgem_obj->pages);
+ mutex_destroy(&vgem_obj->pages_lock);
if (obj->import_attach)
drm_prime_gem_destroy(obj, vgem_obj->table);
@@ -76,11 +77,15 @@ static int vgem_gem_fault(struct vm_fault *vmf)
if (page_offset > num_pages)
return VM_FAULT_SIGBUS;
+ ret = -ENOENT;
+ mutex_lock(&obj->pages_lock);
if (obj->pages) {
get_page(obj->pages[page_offset]);
vmf->page = obj->pages[page_offset];
ret = 0;
- } else {
+ }
+ mutex_unlock(&obj->pages_lock);
+ if (ret) {
struct page *page;
page = shmem_read_mapping_page(
@@ -161,6 +166,8 @@ static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
return ERR_PTR(ret);
}
+ mutex_init(&obj->pages_lock);
+
return obj;
}
@@ -183,7 +190,7 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->base, handle);
- drm_gem_object_unreference_unlocked(&obj->base);
+ drm_gem_object_put_unlocked(&obj->base);
if (ret)
goto err;
@@ -238,7 +245,7 @@ static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
*offset = drm_vma_node_offset_addr(&obj->vma_node);
unref:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -271,40 +278,70 @@ static const struct file_operations vgem_driver_fops = {
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
.release = drm_release,
};
+static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
+{
+ mutex_lock(&bo->pages_lock);
+ if (bo->pages_pin_count++ == 0) {
+ struct page **pages;
+
+ pages = drm_gem_get_pages(&bo->base);
+ if (IS_ERR(pages)) {
+ bo->pages_pin_count--;
+ mutex_unlock(&bo->pages_lock);
+ return pages;
+ }
+
+ bo->pages = pages;
+ }
+ mutex_unlock(&bo->pages_lock);
+
+ return bo->pages;
+}
+
+static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
+{
+ mutex_lock(&bo->pages_lock);
+ if (--bo->pages_pin_count == 0) {
+ drm_gem_put_pages(&bo->base, bo->pages, true, true);
+ bo->pages = NULL;
+ }
+ mutex_unlock(&bo->pages_lock);
+}
+
static int vgem_prime_pin(struct drm_gem_object *obj)
{
+ struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
long n_pages = obj->size >> PAGE_SHIFT;
struct page **pages;
- /* Flush the object from the CPU cache so that importers can rely
- * on coherent indirect access via the exported dma-address.
- */
- pages = drm_gem_get_pages(obj);
+ pages = vgem_pin_pages(bo);
if (IS_ERR(pages))
return PTR_ERR(pages);
+ /* Flush the object from the CPU cache so that importers can rely
+ * on coherent indirect access via the exported dma-address.
+ */
drm_clflush_pages(pages, n_pages);
- drm_gem_put_pages(obj, pages, true, false);
return 0;
}
-static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
+static void vgem_prime_unpin(struct drm_gem_object *obj)
{
- struct sg_table *st;
- struct page **pages;
+ struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
- pages = drm_gem_get_pages(obj);
- if (IS_ERR(pages))
- return ERR_CAST(pages);
+ vgem_unpin_pages(bo);
+}
- st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
- drm_gem_put_pages(obj, pages, false, false);
+static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
- return st;
+ return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
}
static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
@@ -333,6 +370,8 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
__vgem_gem_destroy(obj);
return ERR_PTR(-ENOMEM);
}
+
+ obj->pages_pin_count++; /* perma-pinned */
drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
npages);
return &obj->base;
@@ -340,23 +379,23 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
static void *vgem_prime_vmap(struct drm_gem_object *obj)
{
+ struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
long n_pages = obj->size >> PAGE_SHIFT;
struct page **pages;
- void *addr;
- pages = drm_gem_get_pages(obj);
+ pages = vgem_pin_pages(bo);
if (IS_ERR(pages))
return NULL;
- addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
- drm_gem_put_pages(obj, pages, false, false);
-
- return addr;
+ return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
}
static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
+ struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
+
vunmap(vaddr);
+ vgem_unpin_pages(bo);
}
static int vgem_prime_mmap(struct drm_gem_object *obj,
@@ -409,6 +448,7 @@ static struct drm_driver vgem_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_pin = vgem_prime_pin,
+ .gem_prime_unpin = vgem_prime_unpin,
.gem_prime_import = vgem_prime_import,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import_sg_table = vgem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index 1aae01419112..5c8f6d619ff3 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -43,7 +43,11 @@ struct vgem_file {
#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
struct drm_vgem_gem_object {
struct drm_gem_object base;
+
struct page **pages;
+ unsigned int pages_pin_count;
+ struct mutex pages_lock;
+
struct sg_table *table;
};
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 3109c8308eb5..8fd52f211e9d 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -213,7 +213,7 @@ err_fence:
dma_fence_put(fence);
}
err:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 9e0e5392b6ec..aaf766f7cca2 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -77,7 +77,6 @@ static struct drm_driver driver = {
.open = via_driver_open,
.preclose = via_reclaim_buffers_locked,
.postclose = via_driver_postclose,
- .set_busid = drm_pci_set_busid,
.context_dtor = via_final_context,
.get_vblank_counter = via_get_vblank_counter,
.enable_vblank = via_enable_vblank,
@@ -107,12 +106,12 @@ static int __init via_init(void)
{
driver.num_ioctls = via_max_ioctl;
via_init_command_verifier();
- return drm_pci_init(&driver, &via_pci_driver);
+ return drm_legacy_pci_init(&driver, &via_pci_driver);
}
static void __exit via_exit(void)
{
- drm_pci_exit(&driver, &via_pci_driver);
+ drm_legacy_pci_exit(&driver, &via_pci_driver);
}
module_init(via_init);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index d51bd4521f17..b6d52055a11f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -113,11 +113,13 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
crtc->mode.vdisplay, 0, 0);
}
-static void virtio_gpu_crtc_enable(struct drm_crtc *crtc)
+static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
}
-static void virtio_gpu_crtc_disable(struct drm_crtc *crtc)
+static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct drm_device *dev = crtc->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
@@ -145,11 +147,11 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
- .enable = virtio_gpu_crtc_enable,
- .disable = virtio_gpu_crtc_disable,
.mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
.atomic_check = virtio_gpu_crtc_atomic_check,
.atomic_flush = virtio_gpu_crtc_atomic_flush,
+ .atomic_enable = virtio_gpu_crtc_atomic_enable,
+ .atomic_disable = virtio_gpu_crtc_atomic_disable,
};
static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
@@ -250,7 +252,6 @@ static void virtio_gpu_conn_destroy(struct drm_connector *connector)
}
static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.detect = virtio_gpu_conn_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = virtio_gpu_conn_destroy,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 63d35c7e416c..49a3d8d5a249 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -122,7 +122,6 @@ static struct drm_driver driver = {
.dumb_create = virtio_gpu_mode_dumb_create,
.dumb_map_offset = virtio_gpu_mode_dumb_mmap,
- .dumb_destroy = virtio_gpu_mode_dumb_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 3a66abb8fd50..da2fb585fea4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -236,9 +236,6 @@ struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 33df067b11c1..15d18fd0c64b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -273,7 +273,6 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
vfbdev->helper.fb = fb;
strcpy(info->fix.id, "virtiodrmfb");
- info->flags = FBINFO_DEFAULT;
info->fbops = &virtio_gpufb_ops;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
@@ -309,7 +308,7 @@ static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
return 0;
}
-static struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
.fb_probe = virtio_gpufb_create,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index cc025d8fbe19..72ad7b103448 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -118,13 +118,6 @@ fail:
return ret;
}
-int virtio_gpu_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file_priv, handle);
-}
-
int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p)
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index adcdbd0abef6..71ba455af915 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -298,7 +298,7 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
ret = drm_universal_plane_init(dev, plane, 1 << index,
&virtio_gpu_plane_funcs,
formats, nformats,
- type, NULL);
+ NULL, type, NULL);
if (ret)
goto err_plane_init;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index c1f2af4ca4ca..cd389c5eaef5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -192,7 +192,7 @@ static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
}
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
- const char *prefix)
+ struct drm_printer *printer)
{
}
@@ -234,7 +234,7 @@ static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- static struct ttm_place placements = {
+ static const struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 35bf781e418e..c7056322211c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -30,49 +30,49 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_page_alloc.h>
-static struct ttm_place vram_placement_flags = {
+static const struct ttm_place vram_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
};
-static struct ttm_place vram_ne_placement_flags = {
+static const struct ttm_place vram_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
-static struct ttm_place sys_placement_flags = {
+static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
};
-static struct ttm_place sys_ne_placement_flags = {
+static const struct ttm_place sys_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
-static struct ttm_place gmr_placement_flags = {
+static const struct ttm_place gmr_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
};
-static struct ttm_place gmr_ne_placement_flags = {
+static const struct ttm_place gmr_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
};
-static struct ttm_place mob_placement_flags = {
+static const struct ttm_place mob_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
};
-static struct ttm_place mob_ne_placement_flags = {
+static const struct ttm_place mob_ne_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
@@ -85,7 +85,7 @@ struct ttm_placement vmw_vram_placement = {
.busy_placement = &vram_placement_flags
};
-static struct ttm_place vram_gmr_placement_flags[] = {
+static const struct ttm_place vram_gmr_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
@@ -97,7 +97,7 @@ static struct ttm_place vram_gmr_placement_flags[] = {
}
};
-static struct ttm_place gmr_vram_placement_flags[] = {
+static const struct ttm_place gmr_vram_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
@@ -116,7 +116,7 @@ struct ttm_placement vmw_vram_gmr_placement = {
.busy_placement = &gmr_placement_flags
};
-static struct ttm_place vram_gmr_ne_placement_flags[] = {
+static const struct ttm_place vram_gmr_ne_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
@@ -165,7 +165,7 @@ struct ttm_placement vmw_sys_ne_placement = {
.busy_placement = &sys_ne_placement_flags
};
-static struct ttm_place evictable_placement_flags[] = {
+static const struct ttm_place evictable_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 99a7f4ab7d97..c706ad30411b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -51,6 +51,7 @@ struct vmw_cmdbuf_context {
struct list_head hw_submitted;
struct list_head preempted;
unsigned num_hw_submitted;
+ bool block_submission;
};
/**
@@ -60,6 +61,9 @@ struct vmw_cmdbuf_context {
* kernel command submissions, @cur.
* @space_mutex: Mutex to protect against starvation when we allocate
* main pool buffer space.
+ * @error_mutex: Mutex to serialize the work queue error handling.
+ * Note this is not needed if the same workqueue handler
+ * can't race with itself...
* @work: A struct work_struct implementeing command buffer error handling.
* Immutable.
* @dev_priv: Pointer to the device private struct. Immutable.
@@ -85,7 +89,6 @@ struct vmw_cmdbuf_context {
* Internal protection.
* @dheaders: Pool of DMA memory for device command buffer headers with trailing
* space for inline data. Internal protection.
- * @tasklet: Tasklet struct for irq processing. Immutable.
* @alloc_queue: Wait queue for processes waiting to allocate command buffer
* space.
* @idle_queue: Wait queue for processes waiting for command buffer idle.
@@ -102,6 +105,7 @@ struct vmw_cmdbuf_context {
struct vmw_cmdbuf_man {
struct mutex cur_mutex;
struct mutex space_mutex;
+ struct mutex error_mutex;
struct work_struct work;
struct vmw_private *dev_priv;
struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
@@ -117,7 +121,6 @@ struct vmw_cmdbuf_man {
spinlock_t lock;
struct dma_pool *headers;
struct dma_pool *dheaders;
- struct tasklet_struct tasklet;
wait_queue_head_t alloc_queue;
wait_queue_head_t idle_queue;
bool irq_on;
@@ -181,12 +184,13 @@ struct vmw_cmdbuf_alloc_info {
};
/* Loop over each context in the command buffer manager. */
-#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
+#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
++(_i), ++(_ctx))
-static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
-
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
+ bool enable);
+static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
/**
* vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
@@ -278,9 +282,9 @@ void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
vmw_cmdbuf_header_inline_free(header);
return;
}
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
__vmw_cmdbuf_header_free(header);
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
}
@@ -331,7 +335,8 @@ static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
struct vmw_cmdbuf_context *ctx)
{
while (ctx->num_hw_submitted < man->max_hw_submitted &&
- !list_empty(&ctx->submitted)) {
+ !list_empty(&ctx->submitted) &&
+ !ctx->block_submission) {
struct vmw_cmdbuf_header *entry;
SVGACBStatus status;
@@ -386,12 +391,17 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
__vmw_cmdbuf_header_free(entry);
break;
case SVGA_CB_STATUS_COMMAND_ERROR:
- case SVGA_CB_STATUS_CB_HEADER_ERROR:
+ entry->cb_header->status = SVGA_CB_STATUS_NONE;
list_add_tail(&entry->list, &man->error);
schedule_work(&man->work);
break;
case SVGA_CB_STATUS_PREEMPTED:
- list_add(&entry->list, &ctx->preempted);
+ entry->cb_header->status = SVGA_CB_STATUS_NONE;
+ list_add_tail(&entry->list, &ctx->preempted);
+ break;
+ case SVGA_CB_STATUS_CB_HEADER_ERROR:
+ WARN_ONCE(true, "Command buffer header error.\n");
+ __vmw_cmdbuf_header_free(entry);
break;
default:
WARN_ONCE(true, "Undefined command buffer status.\n");
@@ -468,20 +478,17 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
}
/**
- * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
- * handler implemented as a tasklet.
+ * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
+ * handler implemented as a threaded irq task.
*
- * @data: Tasklet closure. A pointer to the command buffer manager cast to
- * an unsigned long.
+ * @man: Pointer to the command buffer manager.
*
- * The bottom half (tasklet) of the interrupt handler simply calls into the
+ * The bottom half of the interrupt handler simply calls into the
* command buffer processor to free finished buffers and submit any
* queued buffers to hardware.
*/
-static void vmw_cmdbuf_man_tasklet(unsigned long data)
+void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
{
- struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
-
spin_lock(&man->lock);
vmw_cmdbuf_man_process(man);
spin_unlock(&man->lock);
@@ -502,24 +509,112 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
container_of(work, struct vmw_cmdbuf_man, work);
struct vmw_cmdbuf_header *entry, *next;
uint32_t dummy;
- bool restart = false;
+ bool restart[SVGA_CB_CONTEXT_MAX];
+ bool send_fence = false;
+ struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
+ int i;
+ struct vmw_cmdbuf_context *ctx;
- spin_lock_bh(&man->lock);
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ INIT_LIST_HEAD(&restart_head[i]);
+ restart[i] = false;
+ }
+
+ mutex_lock(&man->error_mutex);
+ spin_lock(&man->lock);
list_for_each_entry_safe(entry, next, &man->error, list) {
- restart = true;
- DRM_ERROR("Command buffer error.\n");
+ SVGACBHeader *cb_hdr = entry->cb_header;
+ SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
+ (entry->cmd + cb_hdr->errorOffset);
+ u32 error_cmd_size, new_start_offset;
+ const char *cmd_name;
+
+ list_del_init(&entry->list);
+ restart[entry->cb_context] = true;
+
+ if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
+ DRM_ERROR("Unknown command causing device error.\n");
+ DRM_ERROR("Command buffer offset is %lu\n",
+ (unsigned long) cb_hdr->errorOffset);
+ __vmw_cmdbuf_header_free(entry);
+ send_fence = true;
+ continue;
+ }
- list_del(&entry->list);
- __vmw_cmdbuf_header_free(entry);
- wake_up_all(&man->idle_queue);
+ DRM_ERROR("Command \"%s\" causing device error.\n", cmd_name);
+ DRM_ERROR("Command buffer offset is %lu\n",
+ (unsigned long) cb_hdr->errorOffset);
+ DRM_ERROR("Command size is %lu\n",
+ (unsigned long) error_cmd_size);
+
+ new_start_offset = cb_hdr->errorOffset + error_cmd_size;
+
+ if (new_start_offset >= cb_hdr->length) {
+ __vmw_cmdbuf_header_free(entry);
+ send_fence = true;
+ continue;
+ }
+
+ if (man->using_mob)
+ cb_hdr->ptr.mob.mobOffset += new_start_offset;
+ else
+ cb_hdr->ptr.pa += (u64) new_start_offset;
+
+ entry->cmd += new_start_offset;
+ cb_hdr->length -= new_start_offset;
+ cb_hdr->errorOffset = 0;
+ cb_hdr->offset = 0;
+ list_add_tail(&entry->list, &restart_head[entry->cb_context]);
+ man->ctx[entry->cb_context].block_submission = true;
+ }
+ spin_unlock(&man->lock);
+
+ /* Preempt all contexts with errors */
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ if (ctx->block_submission && vmw_cmdbuf_preempt(man, i))
+ DRM_ERROR("Failed preempting command buffer "
+ "context %u.\n", i);
+ }
+
+ spin_lock(&man->lock);
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ if (!ctx->block_submission)
+ continue;
+
+ /* Move preempted command buffers to the preempted queue. */
+ vmw_cmdbuf_ctx_process(man, ctx, &dummy);
+
+ /*
+ * Add the preempted queue after the command buffer
+ * that caused an error.
+ */
+ list_splice_init(&ctx->preempted, restart_head[i].prev);
+
+ /*
+ * Finally add all command buffers first in the submitted
+ * queue, to rerun them.
+ */
+ list_splice_init(&restart_head[i], &ctx->submitted);
+
+ ctx->block_submission = false;
}
- spin_unlock_bh(&man->lock);
- if (restart && vmw_cmdbuf_startstop(man, true))
- DRM_ERROR("Failed restarting command buffer context 0.\n");
+ vmw_cmdbuf_man_process(man);
+ spin_unlock(&man->lock);
+
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ if (restart[i] && vmw_cmdbuf_startstop(man, i, true))
+ DRM_ERROR("Failed restarting command buffer "
+ "context %u.\n", i);
+ }
/* Send a new fence in case one was removed */
- vmw_fifo_send_fence(man->dev_priv, &dummy);
+ if (send_fence) {
+ vmw_fifo_send_fence(man->dev_priv, &dummy);
+ wake_up_all(&man->idle_queue);
+ }
+
+ mutex_unlock(&man->error_mutex);
}
/**
@@ -536,7 +631,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
bool idle = false;
int i;
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
vmw_cmdbuf_man_process(man);
for_each_cmdbuf_ctx(man, i, ctx) {
if (!list_empty(&ctx->submitted) ||
@@ -548,7 +643,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
idle = list_empty(&man->error);
out_unlock:
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
return idle;
}
@@ -571,7 +666,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
if (!cur)
return;
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
if (man->cur_pos == 0) {
__vmw_cmdbuf_header_free(cur);
goto out_unlock;
@@ -580,7 +675,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
man->cur->cb_header->length = man->cur_pos;
vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
out_unlock:
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
man->cur = NULL;
man->cur_pos = 0;
}
@@ -673,14 +768,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
return true;
memset(info->node, 0, sizeof(*info->node));
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
if (ret) {
vmw_cmdbuf_man_process(man);
ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
}
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
info->done = !ret;
return info->done;
@@ -779,8 +874,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
if (ret)
return ret;
- header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
- &header->handle);
+ header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
+ &header->handle);
if (!header->cb_header) {
ret = -ENOMEM;
goto out_no_cb_header;
@@ -790,7 +885,6 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
cb_hdr = header->cb_header;
offset = header->node.start << PAGE_SHIFT;
header->cmd = man->map + offset;
- memset(cb_hdr, 0, sizeof(*cb_hdr));
if (man->using_mob) {
cb_hdr->flags = SVGA_CB_FLAG_MOB;
cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
@@ -802,9 +896,9 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
return 0;
out_no_cb_header:
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
drm_mm_remove_node(&header->node);
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
return ret;
}
@@ -827,8 +921,8 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
return -ENOMEM;
- dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
- &header->handle);
+ dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
+ &header->handle);
if (!dheader)
return -ENOMEM;
@@ -837,7 +931,6 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
cb_hdr = &dheader->cb_header;
header->cb_header = cb_hdr;
header->cmd = dheader->cmd;
- memset(dheader, 0, sizeof(*dheader));
cb_hdr->status = SVGA_CB_STATUS_NONE;
cb_hdr->flags = SVGA_CB_FLAG_NONE;
cb_hdr->ptr.pa = (u64)header->handle +
@@ -1025,18 +1118,6 @@ void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
vmw_cmdbuf_cur_unlock(man);
}
-/**
- * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
- *
- * @man: The command buffer manager.
- */
-void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
-{
- if (!man)
- return;
-
- tasklet_schedule(&man->tasklet);
-}
/**
* vmw_cmdbuf_send_device_command - Send a command through the device context.
@@ -1061,9 +1142,9 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
memcpy(cmd, command, size);
header->cb_header->length = size;
header->cb_context = SVGA_CB_CONTEXT_DEVICE;
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
status = vmw_cmdbuf_header_submit(header);
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
vmw_cmdbuf_header_free(header);
if (status != SVGA_CB_STATUS_COMPLETED) {
@@ -1076,6 +1157,29 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
}
/**
+ * vmw_cmdbuf_preempt - Send a preempt command through the device
+ * context.
+ *
+ * @man: The command buffer manager.
+ *
+ * Synchronously sends a preempt command.
+ */
+static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
+{
+ struct {
+ uint32 id;
+ SVGADCCmdPreempt body;
+ } __packed cmd;
+
+ cmd.id = SVGA_DC_CMD_PREEMPT;
+ cmd.body.context = SVGA_CB_CONTEXT_0 + context;
+ cmd.body.ignoreIDZero = 0;
+
+ return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
+}
+
+
+/**
* vmw_cmdbuf_startstop - Send a start / stop command through the device
* context.
*
@@ -1084,7 +1188,7 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
*
* Synchronously sends a device start / stop context command.
*/
-static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
+static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
bool enable)
{
struct {
@@ -1094,7 +1198,7 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
cmd.body.enable = (enable) ? 1 : 0;
- cmd.body.context = SVGA_CB_CONTEXT_0;
+ cmd.body.context = SVGA_CB_CONTEXT_0 + context;
return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
}
@@ -1193,7 +1297,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
{
struct vmw_cmdbuf_man *man;
struct vmw_cmdbuf_context *ctx;
- int i;
+ unsigned int i;
int ret;
if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
@@ -1228,8 +1332,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
spin_lock_init(&man->lock);
mutex_init(&man->cur_mutex);
mutex_init(&man->space_mutex);
- tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
- (unsigned long) man);
+ mutex_init(&man->error_mutex);
man->default_size = VMW_CMDBUF_INLINE_SIZE;
init_waitqueue_head(&man->alloc_queue);
init_waitqueue_head(&man->idle_queue);
@@ -1238,11 +1341,14 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
&dev_priv->error_waiters);
- ret = vmw_cmdbuf_startstop(man, true);
- if (ret) {
- DRM_ERROR("Failed starting command buffer context 0.\n");
- vmw_cmdbuf_man_destroy(man);
- return ERR_PTR(ret);
+ for_each_cmdbuf_ctx(man, i, ctx) {
+ ret = vmw_cmdbuf_startstop(man, i, true);
+ if (ret) {
+ DRM_ERROR("Failed starting command buffer "
+ "context %u.\n", i);
+ vmw_cmdbuf_man_destroy(man);
+ return ERR_PTR(ret);
+ }
}
return man;
@@ -1292,18 +1398,24 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
*/
void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
{
+ struct vmw_cmdbuf_context *ctx;
+ unsigned int i;
+
WARN_ON_ONCE(man->has_pool);
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
- if (vmw_cmdbuf_startstop(man, false))
- DRM_ERROR("Failed stopping command buffer context 0.\n");
+
+ for_each_cmdbuf_ctx(man, i, ctx)
+ if (vmw_cmdbuf_startstop(man, i, false))
+ DRM_ERROR("Failed stopping command buffer "
+ "context %u.\n", i);
vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
&man->dev_priv->error_waiters);
- tasklet_kill(&man->tasklet);
(void) cancel_work_sync(&man->work);
dma_pool_destroy(man->dheaders);
dma_pool_destroy(man->headers);
mutex_destroy(&man->cur_mutex);
mutex_destroy(&man->space_mutex);
+ mutex_destroy(&man->error_mutex);
kfree(man);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 1f013d45c9e9..36c7b6c839c0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -205,7 +205,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
int ret;
cres = kzalloc(sizeof(*cres), GFP_KERNEL);
- if (unlikely(cres == NULL))
+ if (unlikely(!cres))
return -ENOMEM;
cres->hash.key = user_key | (res_type << 24);
@@ -291,7 +291,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
int ret;
man = kzalloc(sizeof(*man), GFP_KERNEL);
- if (man == NULL)
+ if (!man)
return ERR_PTR(-ENOMEM);
man->dev_priv = dev_priv;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index bcc6d4136c87..4212b3e673bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -210,8 +210,8 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
&uctx->res, i);
- if (unlikely(uctx->cotables[i] == NULL)) {
- ret = -ENOMEM;
+ if (unlikely(IS_ERR(uctx->cotables[i]))) {
+ ret = PTR_ERR(uctx->cotables[i]);
goto out_cotables;
}
}
@@ -777,7 +777,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (unlikely(ctx == NULL)) {
+ if (unlikely(!ctx)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 6c026d75c180..d87861bbe971 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -584,7 +584,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
return ERR_PTR(ret);
vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
- if (unlikely(vcotbl == NULL)) {
+ if (unlikely(!vcotbl)) {
ret = -ENOMEM;
goto out_no_alloc;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 4a641555b960..e84fee3ec4f3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -36,7 +36,6 @@
#include <drm/ttm/ttm_module.h>
#include <linux/dma_remapping.h>
-#define VMWGFX_DRIVER_NAME "vmwgfx"
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
#define VMWGFX_CHIP_SVGAII 0
#define VMW_FB_RESERVATION 0
@@ -227,7 +226,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_AUTH | DRM_RENDER_ALLOW),
};
-static struct pci_device_id vmw_pci_id_list[] = {
+static const struct pci_device_id vmw_pci_id_list[] = {
{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
{0, 0, 0}
};
@@ -630,7 +629,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
char host_log[100] = {0};
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (unlikely(dev_priv == NULL)) {
+ if (unlikely(!dev_priv)) {
DRM_ERROR("Failed allocating a device private struct.\n");
return -ENOMEM;
}
@@ -825,7 +824,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
- ret = drm_irq_install(dev, dev->pdev->irq);
+ ret = vmw_irq_install(dev, dev->pdev->irq);
if (ret != 0) {
DRM_ERROR("Failed installing irq: %d\n", ret);
goto out_no_irq;
@@ -937,7 +936,7 @@ out_no_bdev:
vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- drm_irq_uninstall(dev_priv->dev);
+ vmw_irq_uninstall(dev_priv->dev);
out_no_irq:
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
@@ -990,7 +989,7 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
- drm_irq_uninstall(dev_priv->dev);
+ vmw_irq_uninstall(dev_priv->dev);
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
@@ -1035,7 +1034,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
int ret = -ENOMEM;
vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
- if (unlikely(vmw_fp == NULL))
+ if (unlikely(!vmw_fp))
return ret;
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
@@ -1196,7 +1195,7 @@ static int vmw_master_create(struct drm_device *dev,
struct vmw_master *vmaster;
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
- if (unlikely(vmaster == NULL))
+ if (unlikely(!vmaster))
return -ENOMEM;
vmw_master_init(vmaster);
@@ -1516,10 +1515,6 @@ static struct drm_driver driver = {
.load = vmw_driver_load,
.unload = vmw_driver_unload,
.lastclose = vmw_lastclose,
- .irq_preinstall = vmw_irq_preinstall,
- .irq_postinstall = vmw_irq_postinstall,
- .irq_uninstall = vmw_irq_uninstall,
- .irq_handler = vmw_irq_handler,
.get_vblank_counter = vmw_get_vblank_counter,
.enable_vblank = vmw_enable_vblank,
.disable_vblank = vmw_disable_vblank,
@@ -1531,7 +1526,6 @@ static struct drm_driver driver = {
.master_drop = vmw_master_drop,
.open = vmw_driver_open,
.postclose = vmw_postclose,
- .set_busid = drm_pci_set_busid,
.dumb_create = vmw_dumb_create,
.dumb_map_offset = vmw_dumb_map_offset,
@@ -1571,7 +1565,7 @@ static int __init vmwgfx_init(void)
if (vgacon_text_force())
return -EINVAL;
- ret = drm_pci_init(&driver, &vmw_pci_driver);
+ ret = pci_register_driver(&vmw_pci_driver);
if (ret)
DRM_ERROR("Failed initializing DRM.\n");
return ret;
@@ -1579,7 +1573,7 @@ static int __init vmwgfx_init(void)
static void __exit vmwgfx_exit(void)
{
- drm_pci_exit(&driver, &vmw_pci_driver);
+ pci_unregister_driver(&vmw_pci_driver);
}
module_init(vmwgfx_init);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4b948fba9eec..7e5f30e234b1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,10 +40,12 @@
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
+#include <linux/sync_file.h>
-#define VMWGFX_DRIVER_DATE "20170607"
+#define VMWGFX_DRIVER_NAME "vmwgfx"
+#define VMWGFX_DRIVER_DATE "20170612"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 13
+#define VMWGFX_DRIVER_MINOR 14
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -351,6 +353,12 @@ struct vmw_otable_batch {
struct ttm_buffer_object *otable_bo;
};
+enum {
+ VMW_IRQTHREAD_FENCE,
+ VMW_IRQTHREAD_CMDBUF,
+ VMW_IRQTHREAD_MAX
+};
+
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
@@ -529,6 +537,7 @@ struct vmw_private {
struct vmw_otable_batch otable_batch;
struct vmw_cmdbuf_man *cman;
+ DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -561,24 +570,21 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value)
{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
+ spin_lock(&dev_priv->hw_lock);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
- spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+ spin_unlock(&dev_priv->hw_lock);
}
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
unsigned int offset)
{
- unsigned long irq_flags;
u32 val;
- spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
+ spin_lock(&dev_priv->hw_lock);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
- spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+ spin_unlock(&dev_priv->hw_lock);
return val;
}
@@ -821,7 +827,8 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user
*user_fence_rep,
- struct vmw_fence_obj **out_fence);
+ struct vmw_fence_obj **out_fence,
+ uint32_t flags);
extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct vmw_fence_obj *fence);
extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
@@ -836,23 +843,23 @@ extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj *fence,
- uint32_t fence_handle);
+ uint32_t fence_handle,
+ int32_t out_fence_fd,
+ struct sync_file *sync_file);
extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo,
bool interruptible,
bool validate_as_mob);
-
+bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/**
* IRQs and wating - vmwgfx_irq.c
*/
-extern irqreturn_t vmw_irq_handler(int irq, void *arg);
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
uint32_t seqno, bool interruptible,
unsigned long timeout);
-extern void vmw_irq_preinstall(struct drm_device *dev);
-extern int vmw_irq_postinstall(struct drm_device *dev);
+extern int vmw_irq_install(struct drm_device *dev, int irq);
extern void vmw_irq_uninstall(struct drm_device *dev);
extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno);
@@ -1150,13 +1157,13 @@ extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
struct vmw_cmdbuf_header *header,
bool flush);
-extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
size_t size, bool interruptible,
struct vmw_cmdbuf_header **p_header);
extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
bool interruptible);
+extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index c7b53d987f06..21c62a34e558 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -24,6 +24,7 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
+#include <linux/sync_file.h>
#include "vmwgfx_drv.h"
#include "vmwgfx_reg.h"
@@ -112,11 +113,12 @@ struct vmw_cmd_entry {
bool user_allow;
bool gb_disable;
bool gb_enable;
+ const char *cmd_name;
};
#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
- (_gb_disable), (_gb_enable)}
+ (_gb_disable), (_gb_enable), #_cmd}
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -264,7 +266,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
}
node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (unlikely(node == NULL)) {
+ if (unlikely(!node)) {
DRM_ERROR("Failed to allocate a resource validation "
"entry.\n");
return -ENOMEM;
@@ -452,7 +454,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
struct vmw_resource_relocation *rel;
rel = kmalloc(sizeof(*rel), GFP_KERNEL);
- if (unlikely(rel == NULL)) {
+ if (unlikely(!rel)) {
DRM_ERROR("Failed to allocate a resource relocation.\n");
return -ENOMEM;
}
@@ -519,7 +521,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- return capable(CAP_SYS_ADMIN) ? : -EINVAL;
+ return -EINVAL;
}
static int vmw_cmd_ok(struct vmw_private *dev_priv,
@@ -2584,7 +2586,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
/**
* vmw_cmd_dx_ia_set_vertex_buffers - Validate an
- * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
+ * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -3302,6 +3304,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
+ true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
@@ -3469,6 +3473,51 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, true),
};
+bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
+{
+ u32 cmd_id = ((u32 *) buf)[0];
+
+ if (cmd_id >= SVGA_CMD_MAX) {
+ SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
+ const struct vmw_cmd_entry *entry;
+
+ *size = header->size + sizeof(SVGA3dCmdHeader);
+ cmd_id = header->id;
+ if (cmd_id >= SVGA_3D_CMD_MAX)
+ return false;
+
+ cmd_id -= SVGA_3D_CMD_BASE;
+ entry = &vmw_cmd_entries[cmd_id];
+ *cmd = entry->cmd_name;
+ return true;
+ }
+
+ switch (cmd_id) {
+ case SVGA_CMD_UPDATE:
+ *cmd = "SVGA_CMD_UPDATE";
+ *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
+ break;
+ case SVGA_CMD_DEFINE_GMRFB:
+ *cmd = "SVGA_CMD_DEFINE_GMRFB";
+ *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
+ break;
+ case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
+ *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
+ *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+ break;
+ case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
+ *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
+ *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+ break;
+ default:
+ *cmd = "UNKNOWN";
+ *size = 0;
+ return false;
+ }
+
+ return true;
+}
+
static int vmw_cmd_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -3781,6 +3830,8 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
* which the information should be copied.
* @fence: Pointer to the fenc object.
* @fence_handle: User-space fence handle.
+ * @out_fence_fd: exported file descriptor for the fence. -1 if not used
+ * @sync_file: Only used to clean up in case of an error in this function.
*
* This function copies fence information to user-space. If copying fails,
* The user-space struct drm_vmw_fence_rep::error member is hopefully
@@ -3796,7 +3847,9 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
int ret,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj *fence,
- uint32_t fence_handle)
+ uint32_t fence_handle,
+ int32_t out_fence_fd,
+ struct sync_file *sync_file)
{
struct drm_vmw_fence_rep fence_rep;
@@ -3806,6 +3859,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
memset(&fence_rep, 0, sizeof(fence_rep));
fence_rep.error = ret;
+ fence_rep.fd = out_fence_fd;
if (ret == 0) {
BUG_ON(fence == NULL);
@@ -3828,6 +3882,14 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
* and unreference the handle.
*/
if (unlikely(ret != 0) && (fence_rep.error == 0)) {
+ if (sync_file)
+ fput(sync_file->file);
+
+ if (fence_rep.fd != -1) {
+ put_unused_fd(fence_rep.fd);
+ fence_rep.fd = -1;
+ }
+
ttm_ref_object_base_unref(vmw_fp->tfile,
fence_handle, TTM_REF_USAGE);
DRM_ERROR("Fence copy error. Syncing.\n");
@@ -4003,7 +4065,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
uint64_t throttle_us,
uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user *user_fence_rep,
- struct vmw_fence_obj **out_fence)
+ struct vmw_fence_obj **out_fence,
+ uint32_t flags)
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
@@ -4013,20 +4076,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct ww_acquire_ctx ticket;
uint32_t handle;
int ret;
+ int32_t out_fence_fd = -1;
+ struct sync_file *sync_file = NULL;
+
+
+ if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ DRM_ERROR("Failed to get a fence file descriptor.\n");
+ return out_fence_fd;
+ }
+ }
if (throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
throttle_us);
if (ret)
- return ret;
+ goto out_free_fence_fd;
}
kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
kernel_commands, command_size,
&header);
- if (IS_ERR(kernel_commands))
- return PTR_ERR(kernel_commands);
+ if (IS_ERR(kernel_commands)) {
+ ret = PTR_ERR(kernel_commands);
+ goto out_free_fence_fd;
+ }
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
if (ret) {
@@ -4162,8 +4238,32 @@ int vmw_execbuf_process(struct drm_file *file_priv,
__vmw_execbuf_release_pinned_bo(dev_priv, fence);
vmw_clear_validations(sw_context);
+
+ /*
+ * If anything fails here, give up trying to export the fence
+ * and do a sync since the user mode will not be able to sync
+ * the fence itself. This ensures we are still functionally
+ * correct.
+ */
+ if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
+
+ sync_file = sync_file_create(&fence->base);
+ if (!sync_file) {
+ DRM_ERROR("Unable to create sync file for fence\n");
+ put_unused_fd(out_fence_fd);
+ out_fence_fd = -1;
+
+ (void) vmw_fence_obj_wait(fence, false, false,
+ VMW_FENCE_WAIT_TIMEOUT);
+ } else {
+ /* Link the fence with the FD created earlier */
+ fd_install(out_fence_fd, sync_file->file);
+ }
+ }
+
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
- user_fence_rep, fence, handle);
+ user_fence_rep, fence, handle,
+ out_fence_fd, sync_file);
/* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) {
@@ -4214,6 +4314,9 @@ out_unlock:
out_free_header:
if (header)
vmw_cmdbuf_header_free(header);
+out_free_fence_fd:
+ if (out_fence_fd >= 0)
+ put_unused_fd(out_fence_fd);
return ret;
}
@@ -4366,6 +4469,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
static const size_t copy_offset[] = {
offsetof(struct drm_vmw_execbuf_arg, context_handle),
sizeof(struct drm_vmw_execbuf_arg)};
+ struct dma_fence *in_fence = NULL;
if (unlikely(size < copy_offset[0])) {
DRM_ERROR("Invalid command size, ioctl %d\n",
@@ -4401,15 +4505,25 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
arg.context_handle = (uint32_t) -1;
break;
case 2:
- if (arg.pad64 != 0) {
- DRM_ERROR("Unused IOCTL data not set to zero.\n");
- return -EINVAL;
- }
- break;
default:
break;
}
+
+ /* If imported a fence FD from elsewhere, then wait on it */
+ if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
+ in_fence = sync_file_get_fence(arg.imported_fence_fd);
+
+ if (!in_fence) {
+ DRM_ERROR("Cannot get imported fence\n");
+ return -EINVAL;
+ }
+
+ ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
+ if (ret)
+ goto out;
+ }
+
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
return ret;
@@ -4419,12 +4533,16 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
NULL, arg.command_size, arg.throttle_us,
arg.context_handle,
(void __user *)(unsigned long)arg.fence_rep,
- NULL);
+ NULL,
+ arg.flags);
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0))
- return ret;
+ goto out;
vmw_kms_cursor_post_execbuf(dev_priv);
- return 0;
+out:
+ if (in_fence)
+ dma_fence_put(in_fence);
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 6f4cb4678cbc..d23a18aae476 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -779,7 +779,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->screen_base = (char __iomem *)par->vmalloc;
info->screen_size = fb_size;
- info->flags = FBINFO_DEFAULT;
info->fbops = &vmw_fb_ops;
/* 24 depth per default */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 6b2708b4eafe..3bbad22b3748 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -114,12 +114,11 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
container_of(f, struct vmw_fence_obj, base);
struct vmw_fence_manager *fman = fman_from_fence(fence);
- unsigned long irq_flags;
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
list_del_init(&fence->head);
--fman->num_fence_objects;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
fence->destroy(fence);
}
@@ -252,10 +251,10 @@ static void vmw_fence_work_func(struct work_struct *work)
INIT_LIST_HEAD(&list);
mutex_lock(&fman->goal_irq_mutex);
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
list_splice_init(&fman->cleanup_list, &list);
seqno_valid = fman->seqno_valid;
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
if (!seqno_valid && fman->goal_irq_on) {
fman->goal_irq_on = false;
@@ -284,7 +283,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
{
struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
- if (unlikely(fman == NULL))
+ if (unlikely(!fman))
return NULL;
fman->dev_priv = dev_priv;
@@ -305,15 +304,14 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
{
- unsigned long irq_flags;
bool lists_empty;
(void) cancel_work_sync(&fman->work);
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
lists_empty = list_empty(&fman->fence_list) &&
list_empty(&fman->cleanup_list);
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
BUG_ON(!lists_empty);
kfree(fman);
@@ -323,7 +321,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
struct vmw_fence_obj *fence, u32 seqno,
void (*destroy) (struct vmw_fence_obj *fence))
{
- unsigned long irq_flags;
int ret = 0;
dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
@@ -331,7 +328,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->destroy = destroy;
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
if (unlikely(fman->fifo_down)) {
ret = -EBUSY;
goto out_unlock;
@@ -340,7 +337,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
++fman->num_fence_objects;
out_unlock:
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
return ret;
}
@@ -489,11 +486,9 @@ rerun:
void vmw_fences_update(struct vmw_fence_manager *fman)
{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
__vmw_fences_update(fman);
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
}
bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
@@ -541,7 +536,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (unlikely(fence == NULL))
+ if (unlikely(!fence))
return -ENOMEM;
ret = vmw_fence_obj_init(fman, fence, seqno,
@@ -606,7 +601,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
return ret;
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
- if (unlikely(ufence == NULL)) {
+ if (unlikely(!ufence)) {
ret = -ENOMEM;
goto out_no_object;
}
@@ -650,6 +645,51 @@ out_no_object:
/**
+ * vmw_wait_dma_fence - Wait for a dma fence
+ *
+ * @fman: pointer to a fence manager
+ * @fence: DMA fence to wait on
+ *
+ * This function handles the case when the fence is actually a fence
+ * array. If that's the case, it'll wait on each of the child fence
+ */
+int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
+ struct dma_fence *fence)
+{
+ struct dma_fence_array *fence_array;
+ int ret = 0;
+ int i;
+
+
+ if (dma_fence_is_signaled(fence))
+ return 0;
+
+ if (!dma_fence_is_array(fence))
+ return dma_fence_wait(fence, true);
+
+ /* From i915: Note that if the fence-array was created in
+ * signal-on-any mode, we should *not* decompose it into its individual
+ * fences. However, we don't currently store which mode the fence-array
+ * is operating in. Fortunately, the only user of signal-on-any is
+ * private to amdgpu and we should not see any incoming fence-array
+ * from sync-file being in signal-on-any mode.
+ */
+
+ fence_array = to_dma_fence_array(fence);
+ for (i = 0; i < fence_array->num_fences; i++) {
+ struct dma_fence *child = fence_array->fences[i];
+
+ ret = dma_fence_wait(child, true);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+
+/**
* vmw_fence_fifo_down - signal all unsignaled fence objects.
*/
@@ -663,14 +703,14 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
* restart when we've released the fman->lock.
*/
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
fman->fifo_down = true;
while (!list_empty(&fman->fence_list)) {
struct vmw_fence_obj *fence =
list_entry(fman->fence_list.prev, struct vmw_fence_obj,
head);
dma_fence_get(&fence->base);
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
ret = vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
@@ -686,18 +726,16 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
BUG_ON(!list_empty(&fence->head));
dma_fence_put(&fence->base);
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
}
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
}
void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
fman->fifo_down = false;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
}
@@ -812,9 +850,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
arg->signaled = vmw_fence_obj_signaled(fence);
arg->signaled_flags = arg->flags;
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
arg->passed_seqno = dev_priv->last_read_seqno;
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
ttm_base_object_unref(&base);
@@ -841,8 +879,7 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
*
* This function is called when the seqno of the fence where @action is
* attached has passed. It queues the event on the submitter's event list.
- * This function is always called from atomic context, and may be called
- * from irq context.
+ * This function is always called from atomic context.
*/
static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
{
@@ -851,13 +888,13 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
struct drm_device *dev = eaction->dev;
struct drm_pending_event *event = eaction->event;
struct drm_file *file_priv;
- unsigned long irq_flags;
+
if (unlikely(event == NULL))
return;
file_priv = event->file_priv;
- spin_lock_irqsave(&dev->event_lock, irq_flags);
+ spin_lock_irq(&dev->event_lock);
if (likely(eaction->tv_sec != NULL)) {
struct timeval tv;
@@ -869,7 +906,7 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
drm_send_event_locked(dev, eaction->event);
eaction->event = NULL;
- spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+ spin_unlock_irq(&dev->event_lock);
}
/**
@@ -904,11 +941,10 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
struct vmw_fence_action *action)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
- unsigned long irq_flags;
bool run_update = false;
mutex_lock(&fman->goal_irq_mutex);
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
fman->pending_actions[action->type]++;
if (dma_fence_is_signaled_locked(&fence->base)) {
@@ -927,7 +963,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
run_update = vmw_fence_goal_check_locked(fence);
}
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
if (run_update) {
if (!fman->goal_irq_on) {
@@ -966,7 +1002,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
struct vmw_fence_manager *fman = fman_from_fence(fence);
eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
- if (unlikely(eaction == NULL))
+ if (unlikely(!eaction))
return -ENOMEM;
eaction->event = event;
@@ -1002,7 +1038,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
int ret;
event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (unlikely(event == NULL)) {
+ if (unlikely(!event)) {
DRM_ERROR("Failed to allocate an event.\n");
ret = -ENOMEM;
goto out_no_space;
@@ -1114,7 +1150,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
}
vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
- handle);
+ handle, -1, NULL);
vmw_fence_obj_unreference(&fence);
return 0;
out_no_create:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index d9d85aa6ed20..20224dba9d8e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -28,6 +28,7 @@
#ifndef _VMWGFX_FENCE_H_
#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
@@ -102,6 +103,9 @@ extern int vmw_user_fence_create(struct drm_file *file_priv,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle);
+extern int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
+ struct dma_fence *fence);
+
extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index c1900f4390a4..f2f9d88131f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -121,7 +121,7 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
struct vmwgfx_gmrid_man *gman =
kzalloc(sizeof(*gman), GFP_KERNEL);
- if (unlikely(gman == NULL))
+ if (unlikely(!gman))
return -ENOMEM;
spin_lock_init(&gman->lock);
@@ -157,9 +157,9 @@ static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
}
static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
- const char *prefix)
+ struct drm_printer *printer)
{
- pr_info("%s: No debug info available for the GMR id manager\n", prefix);
+ drm_printf(printer, "No debug info available for the GMR id manager\n");
}
const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 0c7e1723292c..b9239ba067c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -30,11 +30,56 @@
#define VMW_FENCE_WRAP (1 << 24)
-irqreturn_t vmw_irq_handler(int irq, void *arg)
+/**
+ * vmw_thread_fn - Deferred (process context) irq handler
+ *
+ * @irq: irq number
+ * @arg: Closure argument. Pointer to a struct drm_device cast to void *
+ *
+ * This function implements the deferred part of irq processing.
+ * The function is guaranteed to run at least once after the
+ * vmw_irq_handler has returned with IRQ_WAKE_THREAD.
+ *
+ */
+static irqreturn_t vmw_thread_fn(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ irqreturn_t ret = IRQ_NONE;
+
+ if (test_and_clear_bit(VMW_IRQTHREAD_FENCE,
+ dev_priv->irqthread_pending)) {
+ vmw_fences_update(dev_priv->fman);
+ wake_up_all(&dev_priv->fence_queue);
+ ret = IRQ_HANDLED;
+ }
+
+ if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF,
+ dev_priv->irqthread_pending)) {
+ vmw_cmdbuf_irqthread(dev_priv->cman);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+/**
+ * vmw_irq_handler irq handler
+ *
+ * @irq: irq number
+ * @arg: Closure argument. Pointer to a struct drm_device cast to void *
+ *
+ * This function implements the quick part of irq processing.
+ * The function performs fast actions like clearing the device interrupt
+ * flags and also reasonably quick actions like waking processes waiting for
+ * FIFO space. Other IRQ actions are deferred to the IRQ thread.
+ */
+static irqreturn_t vmw_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status, masked_status;
+ irqreturn_t ret = IRQ_HANDLED;
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
masked_status = status & READ_ONCE(dev_priv->irq_mask);
@@ -45,20 +90,21 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
if (!status)
return IRQ_NONE;
- if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
- SVGA_IRQFLAG_FENCE_GOAL)) {
- vmw_fences_update(dev_priv->fman);
- wake_up_all(&dev_priv->fence_queue);
- }
-
if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
wake_up_all(&dev_priv->fifo_queue);
- if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
- SVGA_IRQFLAG_ERROR))
- vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
+ if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
+ SVGA_IRQFLAG_FENCE_GOAL)) &&
+ !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
+ ret = IRQ_WAKE_THREAD;
- return IRQ_HANDLED;
+ if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
+ SVGA_IRQFLAG_ERROR)) &&
+ !test_and_set_bit(VMW_IRQTHREAD_CMDBUF,
+ dev_priv->irqthread_pending))
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
}
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
@@ -281,23 +327,15 @@ int vmw_wait_seqno(struct vmw_private *dev_priv,
return ret;
}
-void vmw_irq_preinstall(struct drm_device *dev)
+static void vmw_irq_preinstall(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status;
- if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
- return;
-
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
-int vmw_irq_postinstall(struct drm_device *dev)
-{
- return 0;
-}
-
void vmw_irq_uninstall(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
@@ -306,8 +344,41 @@ void vmw_irq_uninstall(struct drm_device *dev)
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
+ if (!dev->irq_enabled)
+ return;
+
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+
+ dev->irq_enabled = false;
+ free_irq(dev->irq, dev);
+}
+
+/**
+ * vmw_irq_install - Install the irq handlers
+ *
+ * @dev: Pointer to the drm device.
+ * @irq: The irq number.
+ * Return: Zero if successful. Negative number otherwise.
+ */
+int vmw_irq_install(struct drm_device *dev, int irq)
+{
+ int ret;
+
+ if (dev->irq_enabled)
+ return -EBUSY;
+
+ vmw_irq_preinstall(dev);
+
+ ret = request_threaded_irq(irq, vmw_irq_handler, vmw_thread_fn,
+ IRQF_SHARED, VMWGFX_DRIVER_NAME, dev);
+ if (ret < 0)
+ return ret;
+
+ dev->irq_enabled = true;
+ dev->irq = irq;
+
+ return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 3d94ea67a825..b850562fbdd6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x = du->hotspot_x;
hotspot_y = du->hotspot_y;
+
+ if (plane->fb) {
+ hotspot_x += plane->fb->hot_x;
+ hotspot_y += plane->fb->hot_y;
+ }
+
du->cursor_surface = vps->surf;
du->cursor_dmabuf = vps->dmabuf;
@@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
vmw_cursor_update_position(dev_priv, true,
du->cursor_x + hotspot_x,
du->cursor_y + hotspot_y);
+
+ du->core_hotspot_x = hotspot_x - du->hotspot_x;
+ du->core_hotspot_y = hotspot_y - du->hotspot_y;
} else {
DRM_ERROR("Failed to update cursor image\n");
}
@@ -1527,7 +1536,7 @@ err_out:
* RETURNS
* Zero for success or -errno
*/
-int
+static int
vmw_kms_atomic_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -1536,8 +1545,7 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
struct vmw_private *dev_priv = vmw_priv(dev);
int i;
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
unsigned long requested_bb_mem = 0;
if (dev_priv->active_display_unit == vmw_du_screen_target) {
@@ -1558,10 +1566,34 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
}
+/**
+ * vmw_kms_atomic_commit - Perform an atomic state commit
+ *
+ * @dev: DRM device
+ * @state: the driver state object
+ * @nonblock: Whether nonblocking behaviour is requested
+ *
+ * This is a simple wrapper around drm_atomic_helper_commit() for
+ * us to clear the nonblocking value.
+ *
+ * Nonblocking commits currently cause synchronization issues
+ * for vmwgfx.
+ *
+ * RETURNS
+ * Zero for success or negative error code on failure.
+ */
+int vmw_kms_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock)
+{
+ return drm_atomic_helper_commit(dev, state, false);
+}
+
+
static const struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
.atomic_check = vmw_kms_atomic_check_modeset,
- .atomic_commit = drm_atomic_helper_commit,
+ .atomic_commit = vmw_kms_atomic_commit,
};
static int vmw_kms_generic_present(struct vmw_private *dev_priv,
@@ -1658,7 +1690,7 @@ int vmw_kms_init(struct vmw_private *dev_priv)
int vmw_kms_close(struct vmw_private *dev_priv)
{
- int ret;
+ int ret = 0;
/*
* Docs says we should take the lock before calling this function
@@ -1666,11 +1698,7 @@ int vmw_kms_close(struct vmw_private *dev_priv)
* drm_encoder_cleanup which takes the lock we deadlock.
*/
drm_mode_config_cleanup(dev_priv->dev);
- if (dev_priv->active_display_unit == vmw_du_screen_object)
- ret = vmw_kms_sou_close_display(dev_priv);
- else if (dev_priv->active_display_unit == vmw_du_screen_target)
- ret = vmw_kms_stdu_close_display(dev_priv);
- else
+ if (dev_priv->active_display_unit == vmw_du_legacy)
ret = vmw_kms_ldu_close_display(dev_priv);
return ret;
@@ -2490,7 +2518,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence,
- handle);
+ handle, -1, NULL);
if (out_fence)
*out_fence = fence;
else
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 5f8d678ae675..ff9c8389ff21 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -390,7 +390,6 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
* Screen Objects display functions - vmwgfx_scrn.c
*/
int vmw_kms_sou_init_display(struct vmw_private *dev_priv);
-int vmw_kms_sou_close_display(struct vmw_private *dev_priv);
int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
@@ -418,7 +417,6 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
* Screen Target Display Unit functions - vmwgfx_stdu.c
*/
int vmw_kms_stdu_init_display(struct vmw_private *dev_priv);
-int vmw_kms_stdu_close_display(struct vmw_private *dev_priv);
int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index d3987bcf53f8..b8a09807c5de 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -203,19 +203,7 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
/**
- * vmw_ldu_crtc_helper_prepare - Noop
- *
- * @crtc: CRTC associated with the new screen
- *
- * Prepares the CRTC for a mode set, but we don't need to do anything here.
- *
- */
-static void vmw_ldu_crtc_helper_prepare(struct drm_crtc *crtc)
-{
-}
-
-/**
- * vmw_ldu_crtc_helper_commit - Noop
+ * vmw_ldu_crtc_atomic_enable - Noop
*
* @crtc: CRTC associated with the new screen
*
@@ -224,16 +212,18 @@ static void vmw_ldu_crtc_helper_prepare(struct drm_crtc *crtc)
* but since for LDU the display plane is closely tied to the
* CRTC, it makes more sense to do those at plane update time.
*/
-static void vmw_ldu_crtc_helper_commit(struct drm_crtc *crtc)
+static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
}
/**
- * vmw_ldu_crtc_helper_disable - Turns off CRTC
+ * vmw_ldu_crtc_atomic_disable - Turns off CRTC
*
* @crtc: CRTC to be turned off
*/
-static void vmw_ldu_crtc_helper_disable(struct drm_crtc *crtc)
+static void vmw_ldu_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
}
@@ -388,13 +378,12 @@ drm_plane_helper_funcs vmw_ldu_primary_plane_helper_funcs = {
};
static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = {
- .prepare = vmw_ldu_crtc_helper_prepare,
- .commit = vmw_ldu_crtc_helper_commit,
- .disable = vmw_ldu_crtc_helper_disable,
.mode_set_nofb = vmw_ldu_crtc_mode_set_nofb,
.atomic_check = vmw_du_crtc_atomic_check,
.atomic_begin = vmw_du_crtc_atomic_begin,
.atomic_flush = vmw_du_crtc_atomic_flush,
+ .atomic_enable = vmw_ldu_crtc_atomic_enable,
+ .atomic_disable = vmw_ldu_crtc_atomic_disable,
};
@@ -439,7 +428,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
0, &vmw_ldu_plane_funcs,
vmw_primary_plane_formats,
ARRAY_SIZE(vmw_primary_plane_formats),
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
DRM_ERROR("Failed to initialize primary plane");
goto err_free;
@@ -454,7 +443,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
0, &vmw_ldu_cursor_funcs,
vmw_cursor_plane_formats,
ARRAY_SIZE(vmw_cursor_plane_formats),
- DRM_PLANE_TYPE_CURSOR, NULL);
+ NULL, DRM_PLANE_TYPE_CURSOR, NULL);
if (ret) {
DRM_ERROR("Failed to initialize cursor plane");
drm_plane_cleanup(&ldu->base.primary);
@@ -582,13 +571,9 @@ err_free:
int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
-
if (!dev_priv->ldu_priv)
return -ENOSYS;
- drm_vblank_cleanup(dev);
-
BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
kfree(dev_priv->ldu_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 941bcfd131ff..b17f08fc50d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -320,14 +320,14 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
if (dev_priv->has_dx) {
*otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
- if (*otables == NULL)
+ if (!(*otables))
return -ENOMEM;
dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
} else {
*otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
GFP_KERNEL);
- if (*otables == NULL)
+ if (!(*otables))
return -ENOMEM;
dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
@@ -407,7 +407,7 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages)
{
struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
- if (unlikely(mob == NULL))
+ if (unlikely(!mob))
return NULL;
mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 6063c9636d4a..97000996b8dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -244,7 +244,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
reply_len = ebx;
reply = kzalloc(reply_len + 1, GFP_KERNEL);
- if (reply == NULL) {
+ if (!reply) {
DRM_ERROR("Cannot allocate memory for reply\n");
return -ENOMEM;
}
@@ -340,7 +340,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
msg_len = strlen(guest_info_param) + strlen("info-get ") + 1;
msg = kzalloc(msg_len, GFP_KERNEL);
- if (msg == NULL) {
+ if (!msg) {
DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
return -ENOMEM;
}
@@ -400,7 +400,7 @@ int vmw_host_log(const char *log)
msg_len = strlen(log) + strlen("log ") + 1;
msg = kzalloc(msg_len, GFP_KERNEL);
- if (msg == NULL) {
+ if (!msg) {
DRM_ERROR("Cannot allocate memory for log message\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 7d591f653dfa..a96f90f017d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -446,7 +446,7 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
int ret;
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
- if (unlikely(user_bo == NULL)) {
+ if (unlikely(!user_bo)) {
DRM_ERROR("Failed to allocate a buffer.\n");
return -ENOMEM;
}
@@ -836,7 +836,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
}
backup = kzalloc(sizeof(*backup), GFP_KERNEL);
- if (unlikely(backup == NULL))
+ if (unlikely(!backup))
return -ENOMEM;
ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 8d7dc9def7c2..d1552d3e0652 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -270,22 +270,24 @@ static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
}
/**
- * vmw_sou_crtc_helper_commit - Noop
+ * vmw_sou_crtc_atomic_enable - Noop
*
* @crtc: CRTC associated with the new screen
*
* This is called after a mode set has been completed.
*/
-static void vmw_sou_crtc_helper_commit(struct drm_crtc *crtc)
+static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
}
/**
- * vmw_sou_crtc_helper_disable - Turns off CRTC
+ * vmw_sou_crtc_atomic_disable - Turns off CRTC
*
* @crtc: CRTC to be turned off
*/
-static void vmw_sou_crtc_helper_disable(struct drm_crtc *crtc)
+static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct vmw_private *dev_priv;
struct vmw_screen_object_unit *sou;
@@ -573,12 +575,12 @@ drm_plane_helper_funcs vmw_sou_primary_plane_helper_funcs = {
static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
.prepare = vmw_sou_crtc_helper_prepare,
- .commit = vmw_sou_crtc_helper_commit,
- .disable = vmw_sou_crtc_helper_disable,
.mode_set_nofb = vmw_sou_crtc_mode_set_nofb,
.atomic_check = vmw_du_crtc_atomic_check,
.atomic_begin = vmw_du_crtc_atomic_begin,
.atomic_flush = vmw_du_crtc_atomic_flush,
+ .atomic_enable = vmw_sou_crtc_atomic_enable,
+ .atomic_disable = vmw_sou_crtc_atomic_disable,
};
@@ -622,7 +624,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
0, &vmw_sou_plane_funcs,
vmw_primary_plane_formats,
ARRAY_SIZE(vmw_primary_plane_formats),
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
DRM_ERROR("Failed to initialize primary plane");
goto err_free;
@@ -637,7 +639,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
0, &vmw_sou_cursor_funcs,
vmw_cursor_plane_formats,
ARRAY_SIZE(vmw_cursor_plane_formats),
- DRM_PLANE_TYPE_CURSOR, NULL);
+ NULL, DRM_PLANE_TYPE_CURSOR, NULL);
if (ret) {
DRM_ERROR("Failed to initialize cursor plane");
drm_plane_cleanup(&sou->base.primary);
@@ -746,15 +748,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
return 0;
}
-int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
-
- drm_vblank_cleanup(dev);
-
- return 0;
-}
-
static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer)
{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 68f135c5b0d8..9b832f136813 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -751,7 +751,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
}
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
- if (unlikely(ushader == NULL)) {
+ if (unlikely(!ushader)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_shader_size);
ret = -ENOMEM;
@@ -821,7 +821,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
}
shader = kzalloc(sizeof(*shader), GFP_KERNEL);
- if (unlikely(shader == NULL)) {
+ if (unlikely(!shader)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_shader_size);
ret = -ENOMEM;
@@ -981,7 +981,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
/* Allocate and pin a DMA buffer */
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (unlikely(buf == NULL))
+ if (unlikely(!buf))
return -ENOMEM;
ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 50be1f034f9e..ca3afae2db1f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -412,7 +412,8 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
}
-static void vmw_stdu_crtc_helper_commit(struct drm_crtc *crtc)
+static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
@@ -432,7 +433,8 @@ static void vmw_stdu_crtc_helper_commit(struct drm_crtc *crtc)
vmw_kms_del_active(dev_priv, &stdu->base);
}
-static void vmw_stdu_crtc_helper_disable(struct drm_crtc *crtc)
+static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
@@ -1415,12 +1417,12 @@ drm_plane_helper_funcs vmw_stdu_primary_plane_helper_funcs = {
static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = {
.prepare = vmw_stdu_crtc_helper_prepare,
- .commit = vmw_stdu_crtc_helper_commit,
- .disable = vmw_stdu_crtc_helper_disable,
.mode_set_nofb = vmw_stdu_crtc_mode_set_nofb,
.atomic_check = vmw_du_crtc_atomic_check,
.atomic_begin = vmw_du_crtc_atomic_begin,
.atomic_flush = vmw_du_crtc_atomic_flush,
+ .atomic_enable = vmw_stdu_crtc_atomic_enable,
+ .atomic_disable = vmw_stdu_crtc_atomic_disable,
};
@@ -1473,7 +1475,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
0, &vmw_stdu_plane_funcs,
vmw_primary_plane_formats,
ARRAY_SIZE(vmw_primary_plane_formats),
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
DRM_ERROR("Failed to initialize primary plane");
goto err_free;
@@ -1488,7 +1490,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
0, &vmw_stdu_cursor_funcs,
vmw_cursor_plane_formats,
ARRAY_SIZE(vmw_cursor_plane_formats),
- DRM_PLANE_TYPE_CURSOR, NULL);
+ NULL, DRM_PLANE_TYPE_CURSOR, NULL);
if (ret) {
DRM_ERROR("Failed to initialize cursor plane");
drm_plane_cleanup(&stdu->base.primary);
@@ -1640,8 +1642,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
* something arbitrarily large and we will reject any layout
* that doesn't fit prim_bb_mem later
*/
- dev->mode_config.max_width = 16384;
- dev->mode_config.max_height = 16384;
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
}
vmw_kms_create_implicit_placement_property(dev_priv, false);
@@ -1651,36 +1653,11 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize STDU %d", i);
- goto err_vblank_cleanup;
+ return ret;
}
}
DRM_INFO("Screen Target Display device initialized\n");
return 0;
-
-err_vblank_cleanup:
- drm_vblank_cleanup(dev);
- return ret;
-}
-
-
-
-/**
- * vmw_kms_stdu_close_display - Cleans up after vmw_kms_stdu_init_display
- *
- * @dev_priv: VMW DRM device
- *
- * Frees up any resources allocated by vmw_kms_stdu_init_display
- *
- * RETURNS:
- * 0 on success
- */
-int vmw_kms_stdu_close_display(struct vmw_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
-
- drm_vblank_cleanup(dev);
-
- return 0;
}
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
index f46c855d274b..45244828fc1f 100644
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -59,11 +59,9 @@ static struct drm_driver zx_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.lastclose = zx_drm_lastclose,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
- .dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
@@ -149,7 +147,6 @@ out_fbdev_fini:
out_poll_fini:
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
- drm_vblank_cleanup(drm);
out_unbind:
component_unbind_all(dev, drm);
out_unregister:
@@ -171,7 +168,6 @@ static void zx_drm_unbind(struct device *dev)
}
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
- drm_vblank_cleanup(drm);
component_unbind_all(dev, drm);
dev_set_drvdata(dev, NULL);
drm->dev_private = NULL;
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
index 0df7366e594b..b8abb1b496ff 100644
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -124,7 +124,7 @@ static int zx_hdmi_config_video_avi(struct zx_hdmi *hdmi,
union hdmi_infoframe frame;
int ret;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, mode, false);
if (ret) {
DRM_DEV_ERROR(hdmi->dev, "failed to get avi infoframe: %d\n",
ret);
@@ -300,7 +300,6 @@ zx_hdmi_connector_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs zx_hdmi_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = zx_hdmi_connector_detect,
.destroy = drm_connector_cleanup,
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index 4a6252720c10..18e763493264 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -540,7 +540,7 @@ int zx_plane_init(struct drm_device *drm, struct zx_plane *zplane,
ret = drm_universal_plane_init(drm, plane, VOU_CRTC_MASK,
&zx_plane_funcs, formats, format_count,
- type, NULL);
+ NULL, type, NULL);
if (ret) {
DRM_DEV_ERROR(dev, "failed to init universal plane: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c
index b56dc69843fc..0de1a71ca4e0 100644
--- a/drivers/gpu/drm/zte/zx_tvenc.c
+++ b/drivers/gpu/drm/zte/zx_tvenc.c
@@ -269,7 +269,6 @@ static struct drm_connector_helper_funcs zx_tvenc_connector_helper_funcs = {
};
static const struct drm_connector_funcs zx_tvenc_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c
index 1e0811f775cb..3e7e33cd3dfa 100644
--- a/drivers/gpu/drm/zte/zx_vga.c
+++ b/drivers/gpu/drm/zte/zx_vga.c
@@ -138,7 +138,6 @@ zx_vga_connector_detect(struct drm_connector *connector, bool force)
}
static const struct drm_connector_funcs zx_vga_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = zx_vga_connector_detect,
.destroy = drm_connector_cleanup,
diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c
index 5fbd10b60ee5..7491813131f3 100644
--- a/drivers/gpu/drm/zte/zx_vou.c
+++ b/drivers/gpu/drm/zte/zx_vou.c
@@ -350,7 +350,8 @@ static inline void vou_chn_set_update(struct zx_crtc *zcrtc)
zx_writel(zcrtc->chnreg + CHN_UPDATE, 1);
}
-static void zx_crtc_enable(struct drm_crtc *crtc)
+static void zx_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
@@ -454,7 +455,8 @@ static void zx_crtc_enable(struct drm_crtc *crtc)
DRM_DEV_ERROR(vou->dev, "failed to enable pixclk: %d\n", ret);
}
-static void zx_crtc_disable(struct drm_crtc *crtc)
+static void zx_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
struct zx_crtc *zcrtc = to_zx_crtc(crtc);
const struct zx_crtc_bits *bits = zcrtc->bits;
@@ -490,9 +492,9 @@ static void zx_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs zx_crtc_helper_funcs = {
- .enable = zx_crtc_enable,
- .disable = zx_crtc_disable,
.atomic_flush = zx_crtc_atomic_flush,
+ .atomic_enable = zx_crtc_atomic_enable,
+ .atomic_disable = zx_crtc_atomic_disable,
};
static int zx_vou_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index a048e3ac523d..f9cde03030fd 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -41,13 +41,15 @@ struct host1x_subdev {
/**
* host1x_subdev_add() - add a new subdevice with an associated device node
* @device: host1x device to add the subdevice to
- * @driver: host1x driver
* @np: device node
*/
static int host1x_subdev_add(struct host1x_device *device,
+ struct host1x_driver *driver,
struct device_node *np)
{
struct host1x_subdev *subdev;
+ struct device_node *child;
+ int err;
subdev = kzalloc(sizeof(*subdev), GFP_KERNEL);
if (!subdev)
@@ -60,6 +62,19 @@ static int host1x_subdev_add(struct host1x_device *device,
list_add_tail(&subdev->list, &device->subdevs);
mutex_unlock(&device->subdevs_lock);
+ /* recursively add children */
+ for_each_child_of_node(np, child) {
+ if (of_match_node(driver->subdevs, child) &&
+ of_device_is_available(child)) {
+ err = host1x_subdev_add(device, driver, child);
+ if (err < 0) {
+ /* XXX cleanup? */
+ of_node_put(child);
+ return err;
+ }
+ }
+ }
+
return 0;
}
@@ -88,7 +103,7 @@ static int host1x_device_parse_dt(struct host1x_device *device,
for_each_child_of_node(device->dev.parent->of_node, np) {
if (of_match_node(driver->subdevs, np) &&
of_device_is_available(np)) {
- err = host1x_subdev_add(device, np);
+ err = host1x_subdev_add(device, driver, np);
if (err < 0) {
of_node_put(np);
return err;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 2c58a390123a..7f22c5c37660 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -134,8 +134,8 @@ static int host1x_probe(struct platform_device *pdev)
syncpt_irq = platform_get_irq(pdev, 0);
if (syncpt_irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ\n");
- return -ENXIO;
+ dev_err(&pdev->dev, "failed to get IRQ: %d\n", syncpt_irq);
+ return syncpt_irq;
}
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
@@ -186,8 +186,13 @@ static int host1x_probe(struct platform_device *pdev)
return -ENOMEM;
err = iommu_attach_device(host->domain, &pdev->dev);
- if (err)
+ if (err == -ENODEV) {
+ iommu_domain_free(host->domain);
+ host->domain = NULL;
+ goto skip_iommu;
+ } else if (err) {
goto fail_free_domain;
+ }
geometry = &host->domain->geometry;
@@ -198,6 +203,7 @@ static int host1x_probe(struct platform_device *pdev)
host->iova_end = geometry->aperture_end;
}
+skip_iommu:
err = host1x_channel_list_init(&host->channel_list,
host->info->nb_channels);
if (err) {
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index dacb8009a605..37ebb51703fa 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -33,10 +33,10 @@ static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt)
unsigned int id = syncpt->id;
struct host1x *host = syncpt->host;
- host1x_sync_writel(host, BIT_MASK(id),
- HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
- host1x_sync_writel(host, BIT_MASK(id),
- HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
+ host1x_sync_writel(host, BIT(id % 32),
+ HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id / 32));
+ host1x_sync_writel(host, BIT(id % 32),
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id / 32));
schedule_work(&syncpt->intr.work);
}
@@ -50,9 +50,9 @@ static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
reg = host1x_sync_readl(host,
HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
- for_each_set_bit(id, &reg, BITS_PER_LONG) {
+ for_each_set_bit(id, &reg, 32) {
struct host1x_syncpt *syncpt =
- host->syncpt + (i * BITS_PER_LONG + id);
+ host->syncpt + (i * 32 + id);
host1x_intr_syncpt_handle(syncpt);
}
}
@@ -117,17 +117,17 @@ static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
static void _host1x_intr_enable_syncpt_intr(struct host1x *host,
unsigned int id)
{
- host1x_sync_writel(host, BIT_MASK(id),
- HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id)));
+ host1x_sync_writel(host, BIT(id % 32),
+ HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id / 32));
}
static void _host1x_intr_disable_syncpt_intr(struct host1x *host,
unsigned int id)
{
- host1x_sync_writel(host, BIT_MASK(id),
- HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
- host1x_sync_writel(host, BIT_MASK(id),
- HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
+ host1x_sync_writel(host, BIT(id % 32),
+ HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id / 32));
+ host1x_sync_writel(host, BIT(id % 32),
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id / 32));
}
static int _host1x_free_syncpt_irq(struct host1x *host)
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index c93f74fcce72..7b0270d60742 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -89,7 +89,7 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp)
host1x_syncpt_idle(sp))
return -EINVAL;
- host1x_sync_writel(host, BIT_MASK(sp->id),
+ host1x_sync_writel(host, BIT(sp->id % 32),
HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
wmb();
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index bee504406cfc..db509ab8874e 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -197,10 +197,6 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
}
phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
- if (!phys_addr) {
- err = -EINVAL;
- goto unpin;
- }
job->addr_phys[job->num_unpins] = phys_addr;
job->unpins[job->num_unpins].bo = reloc->target.bo;
@@ -225,10 +221,6 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
}
phys_addr = host1x_bo_pin(g->bo, &sgt);
- if (!phys_addr) {
- err = -EINVAL;
- goto unpin;
- }
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
for_each_sg(sgt->sgl, sg, sgt->nents, j)
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
index 08766c6e7856..87a20b3dcf7a 100644
--- a/drivers/gpu/ipu-v3/Kconfig
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -1,6 +1,7 @@
config IMX_IPUV3_CORE
tristate "IPUv3 core support"
depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
+ depends on DRM || !DRM # if DRM=m, this can't be 'y'
select GENERIC_IRQ_CHIP
help
Choose this if you have a i.MX5/6 system and want to use the Image
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 960d816ad7f7..6a573d21d3cc 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1217,8 +1217,8 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
of_node = of_graph_get_port_by_id(dev->of_node, i);
if (!of_node) {
dev_info(dev,
- "no port@%d node in %s, not using %s%d\n",
- i, dev->of_node->full_name,
+ "no port@%d node in %pOF, not using %s%d\n",
+ i, dev->of_node,
(i / 2) ? "DI" : "CSI", i % 2);
continue;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 3cd60f460b61..0a3117cc29e7 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -924,7 +924,7 @@ config HID_UDRAW_PS3
config HID_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
- depends on HID
+ depends on USB_HID
select POWER_SUPPLY
select NEW_LEDS
select LEDS_CLASS
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index a4a3c38bc145..50c294be8324 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -29,6 +29,7 @@
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/input/mt.h>
+#include <linux/usb.h> /* For to_usb_interface for T100 touchpad intf check */
#include "hid-ids.h"
@@ -38,24 +39,19 @@ MODULE_AUTHOR("Victor Vlasenko <victor.vlasenko@sysgears.com>");
MODULE_AUTHOR("Frederik Wenigwieser <frederik.wenigwieser@gmail.com>");
MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
+#define T100_TPAD_INTF 2
+
+#define T100CHI_MOUSE_REPORT_ID 0x06
#define FEATURE_REPORT_ID 0x0d
#define INPUT_REPORT_ID 0x5d
#define FEATURE_KBD_REPORT_ID 0x5a
-
-#define INPUT_REPORT_SIZE 28
#define FEATURE_KBD_REPORT_SIZE 16
#define SUPPORT_KBD_BACKLIGHT BIT(0)
-#define MAX_CONTACTS 5
-
-#define MAX_X 2794
-#define MAX_Y 1758
#define MAX_TOUCH_MAJOR 8
#define MAX_PRESSURE 128
-#define CONTACT_DATA_SIZE 5
-
#define BTN_LEFT_MASK 0x01
#define CONTACT_TOOL_TYPE_MASK 0x80
#define CONTACT_X_MSB_MASK 0xf0
@@ -70,6 +66,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
#define QUIRK_NO_CONSUMER_USAGES BIT(4)
#define QUIRK_USE_KBD_BACKLIGHT BIT(5)
#define QUIRK_T100_KEYBOARD BIT(6)
+#define QUIRK_T100CHI BIT(7)
#define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
QUIRK_NO_INIT_REPORTS | \
@@ -88,19 +85,62 @@ struct asus_kbd_leds {
bool removed;
};
+struct asus_touchpad_info {
+ int max_x;
+ int max_y;
+ int res_x;
+ int res_y;
+ int contact_size;
+ int max_contacts;
+};
+
struct asus_drvdata {
unsigned long quirks;
struct input_dev *input;
struct asus_kbd_leds *kbd_backlight;
+ const struct asus_touchpad_info *tp;
bool enable_backlight;
};
-static void asus_report_contact_down(struct input_dev *input,
+static const struct asus_touchpad_info asus_i2c_tp = {
+ .max_x = 2794,
+ .max_y = 1758,
+ .contact_size = 5,
+ .max_contacts = 5,
+};
+
+static const struct asus_touchpad_info asus_t100ta_tp = {
+ .max_x = 2240,
+ .max_y = 1120,
+ .res_x = 30, /* units/mm */
+ .res_y = 27, /* units/mm */
+ .contact_size = 5,
+ .max_contacts = 5,
+};
+
+static const struct asus_touchpad_info asus_t100chi_tp = {
+ .max_x = 2640,
+ .max_y = 1320,
+ .res_x = 31, /* units/mm */
+ .res_y = 29, /* units/mm */
+ .contact_size = 3,
+ .max_contacts = 4,
+};
+
+static void asus_report_contact_down(struct asus_drvdata *drvdat,
int toolType, u8 *data)
{
- int touch_major, pressure;
- int x = (data[0] & CONTACT_X_MSB_MASK) << 4 | data[1];
- int y = MAX_Y - ((data[0] & CONTACT_Y_MSB_MASK) << 8 | data[2]);
+ struct input_dev *input = drvdat->input;
+ int touch_major, pressure, x, y;
+
+ x = (data[0] & CONTACT_X_MSB_MASK) << 4 | data[1];
+ y = drvdat->tp->max_y - ((data[0] & CONTACT_Y_MSB_MASK) << 8 | data[2]);
+
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+
+ if (drvdat->tp->contact_size < 5)
+ return;
if (toolType == MT_TOOL_PALM) {
touch_major = MAX_TOUCH_MAJOR;
@@ -110,19 +150,20 @@ static void asus_report_contact_down(struct input_dev *input,
pressure = data[4] & CONTACT_PRESSURE_MASK;
}
- input_report_abs(input, ABS_MT_POSITION_X, x);
- input_report_abs(input, ABS_MT_POSITION_Y, y);
input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major);
input_report_abs(input, ABS_MT_PRESSURE, pressure);
}
/* Required for Synaptics Palm Detection */
-static void asus_report_tool_width(struct input_dev *input)
+static void asus_report_tool_width(struct asus_drvdata *drvdat)
{
- struct input_mt *mt = input->mt;
+ struct input_mt *mt = drvdat->input->mt;
struct input_mt_slot *oldest;
int oldid, count, i;
+ if (drvdat->tp->contact_size < 5)
+ return;
+
oldest = NULL;
oldid = mt->trkid;
count = 0;
@@ -141,35 +182,42 @@ static void asus_report_tool_width(struct input_dev *input)
}
if (oldest) {
- input_report_abs(input, ABS_TOOL_WIDTH,
+ input_report_abs(drvdat->input, ABS_TOOL_WIDTH,
input_mt_get_value(oldest, ABS_MT_TOUCH_MAJOR));
}
}
-static void asus_report_input(struct input_dev *input, u8 *data)
+static int asus_report_input(struct asus_drvdata *drvdat, u8 *data, int size)
{
- int i;
+ int i, toolType = MT_TOOL_FINGER;
u8 *contactData = data + 2;
- for (i = 0; i < MAX_CONTACTS; i++) {
+ if (size != 3 + drvdat->tp->contact_size * drvdat->tp->max_contacts)
+ return 0;
+
+ for (i = 0; i < drvdat->tp->max_contacts; i++) {
bool down = !!(data[1] & BIT(i+3));
- int toolType = contactData[3] & CONTACT_TOOL_TYPE_MASK ?
+
+ if (drvdat->tp->contact_size >= 5)
+ toolType = contactData[3] & CONTACT_TOOL_TYPE_MASK ?
MT_TOOL_PALM : MT_TOOL_FINGER;
- input_mt_slot(input, i);
- input_mt_report_slot_state(input, toolType, down);
+ input_mt_slot(drvdat->input, i);
+ input_mt_report_slot_state(drvdat->input, toolType, down);
if (down) {
- asus_report_contact_down(input, toolType, contactData);
- contactData += CONTACT_DATA_SIZE;
+ asus_report_contact_down(drvdat, toolType, contactData);
+ contactData += drvdat->tp->contact_size;
}
}
- input_report_key(input, BTN_LEFT, data[1] & BTN_LEFT_MASK);
- asus_report_tool_width(input);
+ input_report_key(drvdat->input, BTN_LEFT, data[1] & BTN_LEFT_MASK);
+ asus_report_tool_width(drvdat);
+
+ input_mt_sync_frame(drvdat->input);
+ input_sync(drvdat->input);
- input_mt_sync_frame(input);
- input_sync(input);
+ return 1;
}
static int asus_raw_event(struct hid_device *hdev,
@@ -177,12 +225,8 @@ static int asus_raw_event(struct hid_device *hdev,
{
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
- if (drvdata->quirks & QUIRK_IS_MULTITOUCH &&
- data[0] == INPUT_REPORT_ID &&
- size == INPUT_REPORT_SIZE) {
- asus_report_input(drvdata->input, data);
- return 1;
- }
+ if (drvdata->tp && data[0] == INPUT_REPORT_ID)
+ return asus_report_input(drvdata, data, size);
return 0;
}
@@ -334,19 +378,35 @@ static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
struct input_dev *input = hi->input;
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
- if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+ /* T100CHI uses MULTI_INPUT, bind the touchpad to the mouse hid_input */
+ if (drvdata->quirks & QUIRK_T100CHI &&
+ hi->report->id != T100CHI_MOUSE_REPORT_ID)
+ return 0;
+
+ if (drvdata->tp) {
int ret;
- input_set_abs_params(input, ABS_MT_POSITION_X, 0, MAX_X, 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0, MAX_Y, 0, 0);
- input_set_abs_params(input, ABS_TOOL_WIDTH, 0, MAX_TOUCH_MAJOR, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, MAX_TOUCH_MAJOR, 0, 0);
- input_set_abs_params(input, ABS_MT_PRESSURE, 0, MAX_PRESSURE, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0,
+ drvdata->tp->max_x, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0,
+ drvdata->tp->max_y, 0, 0);
+ input_abs_set_res(input, ABS_MT_POSITION_X, drvdata->tp->res_x);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, drvdata->tp->res_y);
+
+ if (drvdata->tp->contact_size >= 5) {
+ input_set_abs_params(input, ABS_TOOL_WIDTH, 0,
+ MAX_TOUCH_MAJOR, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0,
+ MAX_TOUCH_MAJOR, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0,
+ MAX_PRESSURE, 0, 0);
+ }
__set_bit(BTN_LEFT, input->keybit);
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
- ret = input_mt_init_slots(input, MAX_CONTACTS, INPUT_MT_POINTER);
+ ret = input_mt_init_slots(input, drvdata->tp->max_contacts,
+ INPUT_MT_POINTER);
if (ret) {
hid_err(hdev, "Asus input mt init slots failed: %d\n", ret);
@@ -378,6 +438,26 @@ static int asus_input_mapping(struct hid_device *hdev,
return -1;
}
+ /*
+ * Ignore a bunch of bogus collections in the T100CHI descriptor.
+ * This avoids a bunch of non-functional hid_input devices getting
+ * created because of the T100CHI using HID_QUIRK_MULTI_INPUT.
+ */
+ if (drvdata->quirks & QUIRK_T100CHI) {
+ if (field->application == (HID_UP_GENDESK | 0x0080) ||
+ usage->hid == (HID_UP_GENDEVCTRLS | 0x0024) ||
+ usage->hid == (HID_UP_GENDEVCTRLS | 0x0025) ||
+ usage->hid == (HID_UP_GENDEVCTRLS | 0x0026))
+ return -1;
+ /*
+ * We use the hid_input for the mouse report for the touchpad,
+ * keep the left button, to avoid the core removing it.
+ */
+ if (field->application == HID_GD_MOUSE &&
+ usage->hid != (HID_UP_BUTTON | 1))
+ return -1;
+ }
+
/* ASUS-specific keyboard hotkeys */
if ((usage->hid & HID_USAGE_PAGE) == 0xff310000) {
set_bit(EV_REP, hi->input->evbit);
@@ -496,7 +576,7 @@ static int __maybe_unused asus_reset_resume(struct hid_device *hdev)
{
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
- if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
+ if (drvdata->tp)
return asus_start_multitouch(hdev);
return 0;
@@ -517,6 +597,28 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
drvdata->quirks = id->driver_data;
+ if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
+ drvdata->tp = &asus_i2c_tp;
+
+ if (drvdata->quirks & QUIRK_T100_KEYBOARD) {
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+ if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
+ drvdata->quirks = QUIRK_SKIP_INPUT_MAPPING;
+ drvdata->tp = &asus_t100ta_tp;
+ }
+ }
+
+ if (drvdata->quirks & QUIRK_T100CHI) {
+ /*
+ * All functionality is on a single HID interface and for
+ * userspace the touchpad must be a separate input_dev.
+ */
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT |
+ HID_QUIRK_NO_EMPTY_INPUT;
+ drvdata->tp = &asus_t100chi_tp;
+ }
+
if (drvdata->quirks & QUIRK_NO_INIT_REPORTS)
hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
@@ -538,13 +640,13 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_stop_hw;
}
- if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+ if (drvdata->tp) {
drvdata->input->name = "Asus TouchPad";
} else {
drvdata->input->name = "Asus Keyboard";
}
- if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+ if (drvdata->tp) {
ret = asus_start_multitouch(hdev);
if (ret)
goto err_stop_hw;
@@ -578,11 +680,34 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
rdesc[55] = 0xdd;
}
+ /* For the T100TA keyboard dock */
if (drvdata->quirks & QUIRK_T100_KEYBOARD &&
*rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) {
hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n");
rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT;
}
+ /* For the T100CHI keyboard dock */
+ if (drvdata->quirks & QUIRK_T100CHI &&
+ *rsize == 403 && rdesc[388] == 0x09 && rdesc[389] == 0x76) {
+ /*
+ * Change Usage (76h) to Usage Minimum (00h), Usage Maximum
+ * (FFh) and clear the flags in the Input() byte.
+ * Note the descriptor has a bogus 0 byte at the end so we
+ * only need 1 extra byte.
+ */
+ *rsize = 404;
+ rdesc = kmemdup(rdesc, *rsize, GFP_KERNEL);
+ if (!rdesc)
+ return NULL;
+
+ hid_info(hdev, "Fixing up T100CHI keyb report descriptor\n");
+ memmove(rdesc + 392, rdesc + 390, 12);
+ rdesc[388] = 0x19;
+ rdesc[389] = 0x00;
+ rdesc[390] = 0x29;
+ rdesc[391] = 0xff;
+ rdesc[402] = 0x00;
+ }
return rdesc;
}
@@ -602,6 +727,9 @@ static const struct hid_device_id asus_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), QUIRK_T100CHI },
+
{ }
};
MODULE_DEVICE_TABLE(hid, asus_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 6fd01a692197..9bc91160819b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1982,6 +1982,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD) },
#endif
#if IS_ENABLED(CONFIG_HID_AUREAL)
{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
@@ -2216,6 +2217,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
#if IS_ENABLED(CONFIG_HID_ORTEK)
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
#endif
#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
@@ -2486,11 +2488,9 @@ static int hid_device_probe(struct device *dev)
const struct hid_device_id *id;
int ret = 0;
- if (down_interruptible(&hdev->driver_lock))
- return -EINTR;
if (down_interruptible(&hdev->driver_input_lock)) {
ret = -EINTR;
- goto unlock_driver_lock;
+ goto end;
}
hdev->io_started = false;
@@ -2517,8 +2517,7 @@ static int hid_device_probe(struct device *dev)
unlock:
if (!hdev->io_started)
up(&hdev->driver_input_lock);
-unlock_driver_lock:
- up(&hdev->driver_lock);
+end:
return ret;
}
@@ -2528,11 +2527,9 @@ static int hid_device_remove(struct device *dev)
struct hid_driver *hdrv;
int ret = 0;
- if (down_interruptible(&hdev->driver_lock))
- return -EINTR;
if (down_interruptible(&hdev->driver_input_lock)) {
ret = -EINTR;
- goto unlock_driver_lock;
+ goto end;
}
hdev->io_started = false;
@@ -2548,8 +2545,7 @@ static int hid_device_remove(struct device *dev)
if (!hdev->io_started)
up(&hdev->driver_input_lock);
-unlock_driver_lock:
- up(&hdev->driver_lock);
+end:
return ret;
}
@@ -3007,7 +3003,6 @@ struct hid_device *hid_allocate_device(void)
init_waitqueue_head(&hdev->debug_wait);
INIT_LIST_HEAD(&hdev->debug_list);
spin_lock_init(&hdev->debug_list_lock);
- sema_init(&hdev->driver_lock, 1);
sema_init(&hdev->driver_input_lock, 1);
mutex_init(&hdev->ll_open_lock);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 3d911bfd91cf..b397a14ab970 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -176,6 +176,8 @@
#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b
#define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD 0x17e0
+#define USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD 0x8502
+#define USB_DEVICE_ID_ASUSTEK_T304_KEYBOARD 0x184a
#define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD 0x8585
#define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD 0x0101
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854
@@ -666,7 +668,8 @@
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
#define USB_DEVICE_ID_LOGITECH_T651 0xb00c
-#define USB_DEVICE_ID_LOGITECH_C077 0xc007
+#define USB_DEVICE_ID_LOGITECH_C007 0xc007
+#define USB_DEVICE_ID_LOGITECH_C077 0xc077
#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
@@ -824,6 +827,7 @@
#define USB_VENDOR_ID_ORTEK 0x05a4
#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700
#define USB_DEVICE_ID_ORTEK_WKB2000 0x2000
+#define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003
#define USB_VENDOR_ID_PLANTRONICS 0x047f
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index ccdff1ee1f0c..199f6a01fc62 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -340,13 +340,45 @@ static unsigned find_battery_quirk(struct hid_device *hdev)
return quirks;
}
+static int hidinput_scale_battery_capacity(struct hid_device *dev,
+ int value)
+{
+ if (dev->battery_min < dev->battery_max &&
+ value >= dev->battery_min && value <= dev->battery_max)
+ value = ((value - dev->battery_min) * 100) /
+ (dev->battery_max - dev->battery_min);
+
+ return value;
+}
+
+static int hidinput_query_battery_capacity(struct hid_device *dev)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2,
+ dev->battery_report_type, HID_REQ_GET_REPORT);
+ if (ret != 2) {
+ kfree(buf);
+ return -ENODATA;
+ }
+
+ ret = hidinput_scale_battery_capacity(dev, buf[1]);
+ kfree(buf);
+ return ret;
+}
+
static int hidinput_get_battery_property(struct power_supply *psy,
enum power_supply_property prop,
union power_supply_propval *val)
{
struct hid_device *dev = power_supply_get_drvdata(psy);
+ int value;
int ret = 0;
- __u8 *buf;
switch (prop) {
case POWER_SUPPLY_PROP_PRESENT:
@@ -355,29 +387,15 @@ static int hidinput_get_battery_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CAPACITY:
-
- buf = kmalloc(2 * sizeof(__u8), GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- break;
- }
- ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2,
- dev->battery_report_type,
- HID_REQ_GET_REPORT);
-
- if (ret != 2) {
- ret = -ENODATA;
- kfree(buf);
- break;
+ if (dev->battery_report_type == HID_FEATURE_REPORT) {
+ value = hidinput_query_battery_capacity(dev);
+ if (value < 0)
+ return value;
+ } else {
+ value = dev->battery_capacity;
}
- ret = 0;
- if (dev->battery_min < dev->battery_max &&
- buf[1] >= dev->battery_min &&
- buf[1] <= dev->battery_max)
- val->intval = (100 * (buf[1] - dev->battery_min)) /
- (dev->battery_max - dev->battery_min);
- kfree(buf);
+ val->intval = value;
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
@@ -385,7 +403,22 @@ static int hidinput_get_battery_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_STATUS:
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ if (!dev->battery_reported &&
+ dev->battery_report_type == HID_FEATURE_REPORT) {
+ value = hidinput_query_battery_capacity(dev);
+ if (value < 0)
+ return value;
+
+ dev->battery_capacity = value;
+ dev->battery_reported = true;
+ }
+
+ if (!dev->battery_reported)
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ else if (dev->battery_capacity == 100)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
break;
case POWER_SUPPLY_PROP_SCOPE:
@@ -400,18 +433,16 @@ static int hidinput_get_battery_property(struct power_supply *psy,
return ret;
}
-static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type, struct hid_field *field)
+static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, struct hid_field *field)
{
- struct power_supply_desc *psy_desc = NULL;
+ struct power_supply_desc *psy_desc;
struct power_supply_config psy_cfg = { .drv_data = dev, };
unsigned quirks;
s32 min, max;
+ int error;
- if (field->usage->hid != HID_DC_BATTERYSTRENGTH)
- return false; /* no match */
-
- if (dev->battery != NULL)
- goto out; /* already initialized? */
+ if (dev->battery)
+ return 0; /* already initialized? */
quirks = find_battery_quirk(dev);
@@ -419,16 +450,18 @@ static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
dev->bus, dev->vendor, dev->product, dev->version, quirks);
if (quirks & HID_BATTERY_QUIRK_IGNORE)
- goto out;
+ return 0;
psy_desc = kzalloc(sizeof(*psy_desc), GFP_KERNEL);
- if (psy_desc == NULL)
- goto out;
-
- psy_desc->name = kasprintf(GFP_KERNEL, "hid-%s-battery", dev->uniq);
- if (psy_desc->name == NULL) {
- kfree(psy_desc);
- goto out;
+ if (!psy_desc)
+ return -ENOMEM;
+
+ psy_desc->name = kasprintf(GFP_KERNEL, "hid-%s-battery",
+ strlen(dev->uniq) ?
+ dev->uniq : dev_name(&dev->dev));
+ if (!psy_desc->name) {
+ error = -ENOMEM;
+ goto err_free_mem;
}
psy_desc->type = POWER_SUPPLY_TYPE_BATTERY;
@@ -455,17 +488,20 @@ static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg);
if (IS_ERR(dev->battery)) {
- hid_warn(dev, "can't register power supply: %ld\n",
- PTR_ERR(dev->battery));
- kfree(psy_desc->name);
- kfree(psy_desc);
- dev->battery = NULL;
- } else {
- power_supply_powers(dev->battery, &dev->dev);
+ error = PTR_ERR(dev->battery);
+ hid_warn(dev, "can't register power supply: %d\n", error);
+ goto err_free_name;
}
-out:
- return true;
+ power_supply_powers(dev->battery, &dev->dev);
+ return 0;
+
+err_free_name:
+ kfree(psy_desc->name);
+err_free_mem:
+ kfree(psy_desc);
+ dev->battery = NULL;
+ return error;
}
static void hidinput_cleanup_battery(struct hid_device *dev)
@@ -481,16 +517,39 @@ static void hidinput_cleanup_battery(struct hid_device *dev)
kfree(psy_desc);
dev->battery = NULL;
}
+
+static void hidinput_update_battery(struct hid_device *dev, int value)
+{
+ int capacity;
+
+ if (!dev->battery)
+ return;
+
+ if (value == 0 || value < dev->battery_min || value > dev->battery_max)
+ return;
+
+ capacity = hidinput_scale_battery_capacity(dev, value);
+
+ if (!dev->battery_reported || capacity != dev->battery_capacity) {
+ dev->battery_capacity = capacity;
+ dev->battery_reported = true;
+ power_supply_changed(dev->battery);
+ }
+}
#else /* !CONFIG_HID_BATTERY_STRENGTH */
-static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
- struct hid_field *field)
+static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
+ struct hid_field *field)
{
- return false;
+ return 0;
}
static void hidinput_cleanup_battery(struct hid_device *dev)
{
}
+
+static void hidinput_update_battery(struct hid_device *dev, int value)
+{
+}
#endif /* CONFIG_HID_BATTERY_STRENGTH */
static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_field *field,
@@ -710,6 +769,11 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
}
break;
+ case 0x3b: /* Battery Strength */
+ hidinput_setup_battery(device, HID_INPUT_REPORT, field);
+ usage->type = EV_PWR;
+ goto ignore;
+
case 0x3c: /* Invert */
map_key_clear(BTN_TOOL_RUBBER);
break;
@@ -944,11 +1008,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
case HID_UP_GENDEVCTRLS:
- if (hidinput_setup_battery(device, HID_INPUT_REPORT, field))
+ switch (usage->hid) {
+ case HID_DC_BATTERYSTRENGTH:
+ hidinput_setup_battery(device, HID_INPUT_REPORT, field);
+ usage->type = EV_PWR;
goto ignore;
- else
- goto unknown;
- break;
+ }
+ goto unknown;
case HID_UP_HPVENDOR: /* Reported on a Dutch layout HP5308 */
set_bit(EV_REP, input->evbit);
@@ -1031,7 +1097,6 @@ mapped:
if (usage->code > max)
goto ignore;
-
if (usage->type == EV_ABS) {
int a = field->logical_minimum;
@@ -1090,14 +1155,19 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
struct input_dev *input;
unsigned *quirks = &hid->quirks;
- if (!field->hidinput)
+ if (!usage->type)
return;
- input = field->hidinput->input;
+ if (usage->type == EV_PWR) {
+ hidinput_update_battery(hid, value);
+ return;
+ }
- if (!usage->type)
+ if (!field->hidinput)
return;
+ input = field->hidinput->input;
+
if (usage->hat_min < usage->hat_max || usage->hat_dir) {
int hat_dir = usage->hat_dir;
if (!hat_dir)
@@ -1373,6 +1443,7 @@ static void report_features(struct hid_device *hid)
struct hid_driver *drv = hid->driver;
struct hid_report_enum *rep_enum;
struct hid_report *rep;
+ struct hid_usage *usage;
int i, j;
rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
@@ -1383,12 +1454,15 @@ static void report_features(struct hid_device *hid)
continue;
for (j = 0; j < rep->field[i]->maxusage; j++) {
+ usage = &rep->field[i]->usage[j];
+
/* Verify if Battery Strength feature is available */
- hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i]);
+ if (usage->hid == HID_DC_BATTERYSTRENGTH)
+ hidinput_setup_battery(hid, HID_FEATURE_REPORT,
+ rep->field[i]);
if (drv->feature_mapping)
- drv->feature_mapping(hid, rep->field[i],
- rep->field[i]->usage + j);
+ drv->feature_mapping(hid, rep->field[i], usage);
}
}
}
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 41b39464ded8..614054af904a 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -2732,6 +2732,9 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
hidpp_battery_props,
sizeof(hidpp_battery_props),
GFP_KERNEL);
+ if (!battery_props)
+ return -ENOMEM;
+
num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2;
if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE)
@@ -2923,7 +2926,7 @@ static struct attribute *sysfs_attrs[] = {
NULL
};
-static struct attribute_group ps_attribute_group = {
+static const struct attribute_group ps_attribute_group = {
.attrs = sysfs_attrs
};
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f3e35e7a189d..440b999304a5 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -72,6 +72,7 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_FIX_CONST_CONTACT_ID BIT(14)
#define MT_QUIRK_TOUCH_SIZE_SCALING BIT(15)
#define MT_QUIRK_STICKY_FINGERS BIT(16)
+#define MT_QUIRK_ASUS_CUSTOM_UP BIT(17)
#define MT_INPUTMODE_TOUCHSCREEN 0x02
#define MT_INPUTMODE_TOUCHPAD 0x03
@@ -169,6 +170,7 @@ static void mt_post_parse(struct mt_device *td);
#define MT_CLS_GENERALTOUCH_TWOFINGERS 0x0108
#define MT_CLS_GENERALTOUCH_PWT_TENFINGERS 0x0109
#define MT_CLS_LG 0x010a
+#define MT_CLS_ASUS 0x010b
#define MT_CLS_VTL 0x0110
#define MT_CLS_GOOGLE 0x0111
@@ -290,6 +292,10 @@ static struct mt_class mt_classes[] = {
MT_QUIRK_IGNORE_DUPLICATES |
MT_QUIRK_HOVERING |
MT_QUIRK_CONTACT_CNT_ACCURATE },
+ { .name = MT_CLS_ASUS,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_CONTACT_CNT_ACCURATE |
+ MT_QUIRK_ASUS_CUSTOM_UP },
{ .name = MT_CLS_VTL,
.quirks = MT_QUIRK_ALWAYS_VALID |
MT_QUIRK_CONTACT_CNT_ACCURATE |
@@ -341,7 +347,7 @@ static struct attribute *sysfs_attrs[] = {
NULL
};
-static struct attribute_group mt_attribute_group = {
+static const struct attribute_group mt_attribute_group = {
.attrs = sysfs_attrs
};
@@ -620,16 +626,6 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
-static int mt_touch_input_mapped(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- if (usage->type == EV_KEY || usage->type == EV_ABS)
- set_bit(usage->type, hi->input->evbit);
-
- return -1;
-}
-
static int mt_compute_slot(struct mt_device *td, struct input_dev *input)
{
__s32 quirks = td->mtclass.quirks;
@@ -915,6 +911,8 @@ static int mt_touch_input_configured(struct hid_device *hdev,
return 0;
}
+#define mt_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, \
+ max, EV_KEY, (c))
static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
@@ -932,10 +930,36 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
field->application != HID_DG_PEN &&
field->application != HID_DG_TOUCHPAD &&
field->application != HID_GD_KEYBOARD &&
- field->application != HID_CP_CONSUMER_CONTROL)
+ field->application != HID_CP_CONSUMER_CONTROL &&
+ field->application != HID_GD_WIRELESS_RADIO_CTLS &&
+ !(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS &&
+ td->mtclass.quirks & MT_QUIRK_ASUS_CUSTOM_UP))
return -1;
/*
+ * Some Asus keyboard+touchpad devices have the hotkeys defined in the
+ * touchpad report descriptor. We need to treat these as an array to
+ * map usages to input keys.
+ */
+ if (field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS &&
+ td->mtclass.quirks & MT_QUIRK_ASUS_CUSTOM_UP &&
+ (usage->hid & HID_USAGE_PAGE) == HID_UP_CUSTOM) {
+ set_bit(EV_REP, hi->input->evbit);
+ if (field->flags & HID_MAIN_ITEM_VARIABLE)
+ field->flags &= ~HID_MAIN_ITEM_VARIABLE;
+ switch (usage->hid & HID_USAGE) {
+ case 0x10: mt_map_key_clear(KEY_BRIGHTNESSDOWN); break;
+ case 0x20: mt_map_key_clear(KEY_BRIGHTNESSUP); break;
+ case 0x35: mt_map_key_clear(KEY_DISPLAY_OFF); break;
+ case 0x6b: mt_map_key_clear(KEY_F21); break;
+ case 0x6c: mt_map_key_clear(KEY_SLEEP); break;
+ default:
+ return -1;
+ }
+ return 1;
+ }
+
+ /*
* some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
* for the stylus.
* The check for mt_report_id ensures we don't process
@@ -969,8 +993,10 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
return 0;
if (field->application == HID_DG_TOUCHSCREEN ||
- field->application == HID_DG_TOUCHPAD)
- return mt_touch_input_mapped(hdev, hi, field, usage, bit, max);
+ field->application == HID_DG_TOUCHPAD) {
+ /* We own these mappings, tell hid-input to ignore them */
+ return -1;
+ }
/* let hid-core decide for the others */
return 0;
@@ -1141,6 +1167,12 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
case HID_CP_CONSUMER_CONTROL:
suffix = "Consumer Control";
break;
+ case HID_GD_WIRELESS_RADIO_CTLS:
+ suffix = "Wireless Radio Control";
+ break;
+ case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
+ suffix = "Custom Media Keys";
+ break;
default:
suffix = "UNKNOWN";
break;
@@ -1392,6 +1424,12 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
USB_DEVICE_ID_ANTON_TOUCH_PAD) },
+ /* Asus T304UA */
+ { .driver_data = MT_CLS_ASUS,
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_T304_KEYBOARD) },
+
/* Atmel panels */
{ .driver_data = MT_CLS_SERIAL,
MT_USB_DEVICE(USB_VENDOR_ID_ATMEL,
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 1b0084d4af2e..3d121d8ee980 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -445,7 +445,7 @@ static struct attribute *sysfs_attrs[] = {
NULL
};
-static struct attribute_group ntrig_attribute_group = {
+static const struct attribute_group ntrig_attribute_group = {
.attrs = sysfs_attrs
};
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
index 6620f15fec22..8783a064cdcf 100644
--- a/drivers/hid/hid-ortek.c
+++ b/drivers/hid/hid-ortek.c
@@ -5,6 +5,7 @@
*
* Ortek PKB-1700
* Ortek WKB-2000
+ * iHome IMAC-A210S
* Skycable wireless presenter
*
* Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
@@ -28,10 +29,10 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) {
- hid_info(hdev, "Fixing up logical minimum in report descriptor (Ortek)\n");
+ hid_info(hdev, "Fixing up logical maximum in report descriptor (Ortek)\n");
rdesc[55] = 0x92;
} else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) {
- hid_info(hdev, "Fixing up logical minimum in report descriptor (Skycable)\n");
+ hid_info(hdev, "Fixing up logical maximum in report descriptor (Skycable)\n");
rdesc[53] = 0x65;
}
return rdesc;
@@ -40,6 +41,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static const struct hid_device_id ortek_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
{ }
};
diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c
index 8ffbb6f65a65..32747b7f917e 100644
--- a/drivers/hid/hid-picolcd_cir.c
+++ b/drivers/hid/hid-picolcd_cir.c
@@ -113,10 +113,10 @@ int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report)
return -ENOMEM;
rdev->priv = data;
- rdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
+ rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rdev->open = picolcd_cir_open;
rdev->close = picolcd_cir_close;
- rdev->input_name = data->hdev->name;
+ rdev->device_name = data->hdev->name;
rdev->input_phys = data->hdev->phys;
rdev->input_id.bustype = data->hdev->bus;
rdev->input_id.vendor = data->hdev->vendor;
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index f095bf8a3aa9..49c4bd34b3c5 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -593,7 +593,7 @@ static void pcmidi_in_trigger(struct snd_rawmidi_substream *substream, int up)
pm->in_triggered = up;
}
-static struct snd_rawmidi_ops pcmidi_in_ops = {
+static const struct snd_rawmidi_ops pcmidi_in_ops = {
.open = pcmidi_in_open,
.close = pcmidi_in_close,
.trigger = pcmidi_in_trigger
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 3a84aaf1418b..0bcf041368c7 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -276,7 +276,7 @@ static struct attribute *enable_sensor_attrs[] = {
NULL,
};
-static struct attribute_group enable_sensor_attr_group = {
+static const struct attribute_group enable_sensor_attr_group = {
.attrs = enable_sensor_attrs,
};
@@ -823,7 +823,7 @@ static int hid_sensor_custom_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_device_id hid_sensor_custom_ids[] = {
+static const struct platform_device_id hid_sensor_custom_ids[] = {
{
.name = "HID-SENSOR-2000e1",
},
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 4ef73374a8f9..25363fc571bc 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -579,54 +579,6 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev)
}
EXPORT_SYMBOL_GPL(sensor_hub_device_close);
-static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
-{
- int index;
- struct sensor_hub_data *sd = hid_get_drvdata(hdev);
- unsigned char report_block[] = {
- 0x0a, 0x16, 0x03, 0x15, 0x00, 0x25, 0x05};
- unsigned char power_block[] = {
- 0x0a, 0x19, 0x03, 0x15, 0x00, 0x25, 0x05};
-
- if (!(sd->quirks & HID_SENSOR_HUB_ENUM_QUIRK)) {
- hid_dbg(hdev, "No Enum quirks\n");
- return rdesc;
- }
-
- /* Looks for power and report state usage id and force to 1 */
- for (index = 0; index < *rsize; ++index) {
- if (((*rsize - index) > sizeof(report_block)) &&
- !memcmp(&rdesc[index], report_block,
- sizeof(report_block))) {
- rdesc[index + 4] = 0x01;
- index += sizeof(report_block);
- }
- if (((*rsize - index) > sizeof(power_block)) &&
- !memcmp(&rdesc[index], power_block,
- sizeof(power_block))) {
- rdesc[index + 4] = 0x01;
- index += sizeof(power_block);
- }
- }
-
- /* Checks if the report descriptor of Thinkpad Helix 2 has a logical
- * minimum for magnetic flux axis greater than the maximum */
- if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA &&
- *rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 &&
- rdesc[915] == 0x81 && rdesc[916] == 0x08 &&
- rdesc[917] == 0x00 && rdesc[918] == 0x27 &&
- rdesc[921] == 0x07 && rdesc[922] == 0x00) {
- /* Sets negative logical minimum for mag x, y and z */
- rdesc[914] = rdesc[935] = rdesc[956] = 0xc0;
- rdesc[915] = rdesc[936] = rdesc[957] = 0x7e;
- rdesc[916] = rdesc[937] = rdesc[958] = 0xf7;
- rdesc[917] = rdesc[938] = rdesc[959] = 0xff;
- }
-
- return rdesc;
-}
-
static int sensor_hub_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -778,51 +730,6 @@ static void sensor_hub_remove(struct hid_device *hdev)
}
static const struct hid_device_id sensor_hub_devices[] = {
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_0,
- USB_DEVICE_ID_INTEL_HID_SENSOR_0),
- .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1,
- USB_DEVICE_ID_INTEL_HID_SENSOR_0),
- .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1,
- USB_DEVICE_ID_INTEL_HID_SENSOR_1),
- .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
- USB_DEVICE_ID_MS_SURFACE_PRO_2),
- .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
- USB_DEVICE_ID_MS_TOUCH_COVER_2),
- .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
- USB_DEVICE_ID_MS_TYPE_COVER_2),
- .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
- 0x07bd), /* Microsoft Surface 3 */
- .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROCHIP,
- 0x0f01), /* MM7150 */
- .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
- USB_DEVICE_ID_STM_HID_SENSOR),
- .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
- USB_DEVICE_ID_STM_HID_SENSOR_1),
- .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_TEXAS_INSTRUMENTS,
- USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA),
- .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_ITE,
- USB_DEVICE_ID_ITE_LENOVO_YOGA),
- .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_ITE,
- USB_DEVICE_ID_ITE_LENOVO_YOGA2),
- .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_ITE,
- USB_DEVICE_ID_ITE_LENOVO_YOGA900),
- .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_0,
- 0x22D8),
- .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
HID_ANY_ID) },
{ }
@@ -835,7 +742,6 @@ static struct hid_driver sensor_hub_driver = {
.probe = sensor_hub_probe,
.remove = sensor_hub_remove,
.raw_event = sensor_hub_raw_event,
- .report_fixup = sensor_hub_report_fixup,
#ifdef CONFIG_PM
.suspend = sensor_hub_suspend,
.resume = sensor_hub_resume,
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 046f692fd0a2..77396145d2d0 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -780,7 +780,7 @@ static int i2c_hid_power(struct hid_device *hid, int lvl)
return 0;
}
-static struct hid_ll_driver i2c_hid_ll_driver = {
+struct hid_ll_driver i2c_hid_ll_driver = {
.parse = i2c_hid_parse,
.start = i2c_hid_start,
.stop = i2c_hid_stop,
@@ -790,6 +790,7 @@ static struct hid_ll_driver i2c_hid_ll_driver = {
.output_report = i2c_hid_output_report,
.raw_request = i2c_hid_raw_request,
};
+EXPORT_SYMBOL_GPL(i2c_hid_ll_driver);
static int i2c_hid_init_irq(struct i2c_client *client)
{
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 7f8ff39ed44b..6f819f144cb4 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -369,7 +369,7 @@ static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf,
return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT);
}
-static struct hid_ll_driver uhid_hid_driver = {
+struct hid_ll_driver uhid_hid_driver = {
.start = uhid_hid_start,
.stop = uhid_hid_stop,
.open = uhid_hid_open,
@@ -378,6 +378,7 @@ static struct hid_ll_driver uhid_hid_driver = {
.raw_request = uhid_hid_raw_request,
.output_report = uhid_hid_output_report,
};
+EXPORT_SYMBOL_GPL(uhid_hid_driver);
#ifdef CONFIG_COMPAT
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 76013eb5cb7f..089bad8a9a21 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -680,18 +680,21 @@ static int usbhid_open(struct hid_device *hid)
struct usbhid_device *usbhid = hid->driver_data;
int res;
+ set_bit(HID_OPENED, &usbhid->iofl);
+
if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
return 0;
res = usb_autopm_get_interface(usbhid->intf);
/* the device must be awake to reliably request remote wakeup */
- if (res < 0)
+ if (res < 0) {
+ clear_bit(HID_OPENED, &usbhid->iofl);
return -EIO;
+ }
usbhid->intf->needs_remote_wakeup = 1;
set_bit(HID_RESUME_RUNNING, &usbhid->iofl);
- set_bit(HID_OPENED, &usbhid->iofl);
set_bit(HID_IN_POLLING, &usbhid->iofl);
res = hid_start_in(hid);
@@ -727,19 +730,20 @@ static void usbhid_close(struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
- if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
- return;
-
/*
* Make sure we don't restart data acquisition due to
* a resumption we no longer care about by avoiding racing
* with hid_start_in().
*/
spin_lock_irq(&usbhid->lock);
- clear_bit(HID_IN_POLLING, &usbhid->iofl);
clear_bit(HID_OPENED, &usbhid->iofl);
+ if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL))
+ clear_bit(HID_IN_POLLING, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
+ if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
+ return;
+
hid_cancel_delayed_stuff(usbhid);
usb_kill_urb(usbhid->urbin);
usbhid->intf->needs_remote_wakeup = 0;
@@ -1261,7 +1265,7 @@ static int usbhid_idle(struct hid_device *hid, int report, int idle,
return hid_set_idle(dev, ifnum, report, idle);
}
-static struct hid_ll_driver usb_hid_driver = {
+struct hid_ll_driver usb_hid_driver = {
.parse = usbhid_parse,
.start = usbhid_start,
.stop = usbhid_stop,
@@ -1274,6 +1278,7 @@ static struct hid_ll_driver usb_hid_driver = {
.output_report = usbhid_output_report,
.idle = usbhid_idle,
};
+EXPORT_SYMBOL_GPL(usb_hid_driver);
static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index a88e7c7bea0a..a83fa76655b9 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -99,6 +99,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index 7fb2d1e4f5dd..ed01dc425d29 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -392,7 +392,7 @@ static void usb_kbd_disconnect(struct usb_interface *intf)
}
}
-static struct usb_device_id usb_kbd_id_table [] = {
+static const struct usb_device_id usb_kbd_id_table[] = {
{ USB_INTERFACE_INFO(USB_INTERFACE_CLASS_HID, USB_INTERFACE_SUBCLASS_BOOT,
USB_INTERFACE_PROTOCOL_KEYBOARD) },
{ } /* Terminating entry */
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index dd911c5241d8..589ad7c15a58 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -226,7 +226,7 @@ static void usb_mouse_disconnect(struct usb_interface *intf)
}
}
-static struct usb_device_id usb_mouse_id_table [] = {
+static const struct usb_device_id usb_mouse_id_table[] = {
{ USB_INTERFACE_INFO(USB_INTERFACE_CLASS_HID, USB_INTERFACE_SUBCLASS_BOOT,
USB_INTERFACE_PROTOCOL_MOUSE) },
{ } /* Terminating entry */
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 838c1ebfffa9..e82a696a1d07 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1671,10 +1671,7 @@ static ssize_t wacom_show_remote_mode(struct kobject *kobj,
u8 mode;
mode = wacom->led.groups[index].select;
- if (mode >= 0 && mode < 3)
- return snprintf(buf, PAGE_SIZE, "%d\n", mode);
- else
- return snprintf(buf, PAGE_SIZE, "%d\n", -1);
+ return sprintf(buf, "%d\n", mode < 3 ? mode : -1);
}
#define DEVICE_EKR_ATTR_GROUP(SET_ID) \
@@ -2028,41 +2025,37 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
/* Generic devices name unspecified */
if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
- if (strstr(wacom->hdev->name, "Wacom") ||
- strstr(wacom->hdev->name, "wacom") ||
- strstr(wacom->hdev->name, "WACOM")) {
- /* name is in HID descriptor, use it */
- strlcpy(name, wacom->hdev->name, sizeof(name));
-
- /* strip out excess whitespaces */
- while (1) {
- char *gap = strstr(name, " ");
- if (gap == NULL)
- break;
- /* shift everything including the terminator */
- memmove(gap, gap+1, strlen(gap));
- }
+ char *product_name = wacom->hdev->name;
- /* strip off excessive prefixing */
- if (strstr(name, "Wacom Co.,Ltd. Wacom ") == name) {
- int n = strlen(name);
- int x = strlen("Wacom Co.,Ltd. ");
- memmove(name, name+x, n-x+1);
- }
- if (strstr(name, "Wacom Co., Ltd. Wacom ") == name) {
- int n = strlen(name);
- int x = strlen("Wacom Co., Ltd. ");
- memmove(name, name+x, n-x+1);
- }
+ if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) {
+ struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
+ struct usb_device *dev = interface_to_usbdev(intf);
+ product_name = dev->product;
+ }
- /* get rid of trailing whitespace */
- if (name[strlen(name)-1] == ' ')
- name[strlen(name)-1] = '\0';
+ if (wacom->hdev->bus == BUS_I2C) {
+ snprintf(name, sizeof(name), "%s %X",
+ features->name, wacom->hdev->product);
+ } else if (strstr(product_name, "Wacom") ||
+ strstr(product_name, "wacom") ||
+ strstr(product_name, "WACOM")) {
+ strlcpy(name, product_name, sizeof(name));
} else {
- /* no meaningful name retrieved. use product ID */
- snprintf(name, sizeof(name),
- "%s %X", features->name, wacom->hdev->product);
+ snprintf(name, sizeof(name), "Wacom %s", product_name);
}
+
+ /* strip out excess whitespaces */
+ while (1) {
+ char *gap = strstr(name, " ");
+ if (gap == NULL)
+ break;
+ /* shift everything including the terminator */
+ memmove(gap, gap+1, strlen(gap));
+ }
+
+ /* get rid of trailing whitespace */
+ if (name[strlen(name)-1] == ' ')
+ name[strlen(name)-1] = '\0';
} else {
strlcpy(name, features->name, sizeof(name));
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 9f940293ede4..bb17d7bbefd3 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1846,7 +1846,13 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
features->device_type |= WACOM_DEVICETYPE_PAD;
break;
case WACOM_HID_WD_TOUCHRINGSTATUS:
- wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+ /*
+ * Only set up type/code association. Completely mapping
+ * this usage may overwrite the axis resolution and range.
+ */
+ usage->type = EV_ABS;
+ usage->code = ABS_WHEEL;
+ set_bit(EV_ABS, input->evbit);
features->device_type |= WACOM_DEVICETYPE_PAD;
break;
case WACOM_HID_WD_BUTTONCONFIG:
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index c29cd5387a35..50b89ea0e60f 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -3,6 +3,7 @@ menu "Microsoft Hyper-V guest support"
config HYPERV
tristate "Microsoft Hyper-V client drivers"
depends on X86 && ACPI && PCI && X86_LOCAL_APIC && HYPERVISOR_GUEST
+ select PARAVIRT
help
Select this option to run Linux as a Hyper-V client operating
system.
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index e9bf0bb87ac4..efd5db743319 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -177,6 +177,11 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
&vmbus_connection.chn_msg_list);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ if (newchannel->rescind) {
+ err = -ENODEV;
+ goto error_free_gpadl;
+ }
+
ret = vmbus_post_msg(open_msg,
sizeof(struct vmbus_channel_open_channel), true);
@@ -421,6 +426,11 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ if (channel->rescind) {
+ ret = -ENODEV;
+ goto cleanup;
+ }
+
ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
sizeof(*msginfo), true);
if (ret != 0)
@@ -494,6 +504,10 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
list_add_tail(&info->msglistentry,
&vmbus_connection.chn_msg_list);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+ if (channel->rescind)
+ goto post_msg_err;
+
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
true);
@@ -606,6 +620,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
out:
+ /* re-enable tasklet for use on re-open */
+ tasklet_enable(&channel->callback_event);
return ret;
}
@@ -645,9 +661,23 @@ void vmbus_close(struct vmbus_channel *channel)
}
EXPORT_SYMBOL_GPL(vmbus_close);
-int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
- u32 bufferlen, u64 requestid,
- enum vmbus_packet_type type, u32 flags)
+/**
+ * vmbus_sendpacket() - Send the specified buffer on the given channel
+ * @channel: Pointer to vmbus_channel structure.
+ * @buffer: Pointer to the buffer you want to receive the data into.
+ * @bufferlen: Maximum size of what the the buffer will hold
+ * @requestid: Identifier of the request
+ * @type: Type of packet that is being send e.g. negotiate, time
+ * packet etc.
+ *
+ * Sends data in @buffer directly to hyper-v via the vmbus
+ * This will send the data unparsed to hyper-v.
+ *
+ * Mainly used by Hyper-V drivers.
+ */
+int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
+ u32 bufferlen, u64 requestid,
+ enum vmbus_packet_type type, u32 flags)
{
struct vmpacket_descriptor desc;
u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
@@ -674,42 +704,19 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
return hv_ringbuffer_write(channel, bufferlist, num_vecs);
}
-EXPORT_SYMBOL(vmbus_sendpacket_ctl);
-
-/**
- * vmbus_sendpacket() - Send the specified buffer on the given channel
- * @channel: Pointer to vmbus_channel structure.
- * @buffer: Pointer to the buffer you want to receive the data into.
- * @bufferlen: Maximum size of what the the buffer will hold
- * @requestid: Identifier of the request
- * @type: Type of packet that is being send e.g. negotiate, time
- * packet etc.
- *
- * Sends data in @buffer directly to hyper-v via the vmbus
- * This will send the data unparsed to hyper-v.
- *
- * Mainly used by Hyper-V drivers.
- */
-int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
- u32 bufferlen, u64 requestid,
- enum vmbus_packet_type type, u32 flags)
-{
- return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid,
- type, flags);
-}
EXPORT_SYMBOL(vmbus_sendpacket);
/*
- * vmbus_sendpacket_pagebuffer_ctl - Send a range of single-page buffer
+ * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
* packets using a GPADL Direct packet type. This interface allows you
* to control notifying the host. This will be useful for sending
* batched data. Also the sender can control the send flags
* explicitly.
*/
-int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
- struct hv_page_buffer pagebuffers[],
- u32 pagecount, void *buffer, u32 bufferlen,
- u64 requestid, u32 flags)
+int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
+ struct hv_page_buffer pagebuffers[],
+ u32 pagecount, void *buffer, u32 bufferlen,
+ u64 requestid)
{
int i;
struct vmbus_channel_packet_page_buffer desc;
@@ -734,7 +741,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
/* Setup the descriptor */
desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
- desc.flags = flags;
+ desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
desc.length8 = (u16)(packetlen_aligned >> 3);
desc.transactionid = requestid;
@@ -755,24 +762,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
return hv_ringbuffer_write(channel, bufferlist, 3);
}
-EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
-
-/*
- * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
- * packets using a GPADL Direct packet type.
- */
-int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
- struct hv_page_buffer pagebuffers[],
- u32 pagecount, void *buffer, u32 bufferlen,
- u64 requestid)
-{
- u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
-
- return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount,
- buffer, bufferlen,
- requestid, flags);
-
-}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
/*
@@ -812,62 +801,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
-/*
- * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
- * using a GPADL Direct packet type.
- */
-int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
- struct hv_multipage_buffer *multi_pagebuffer,
- void *buffer, u32 bufferlen, u64 requestid)
-{
- struct vmbus_channel_packet_multipage_buffer desc;
- u32 descsize;
- u32 packetlen;
- u32 packetlen_aligned;
- struct kvec bufferlist[3];
- u64 aligned_data = 0;
- u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
- multi_pagebuffer->len);
-
- if (pfncount > MAX_MULTIPAGE_BUFFER_COUNT)
- return -EINVAL;
-
- /*
- * Adjust the size down since vmbus_channel_packet_multipage_buffer is
- * the largest size we support
- */
- descsize = sizeof(struct vmbus_channel_packet_multipage_buffer) -
- ((MAX_MULTIPAGE_BUFFER_COUNT - pfncount) *
- sizeof(u64));
- packetlen = descsize + bufferlen;
- packetlen_aligned = ALIGN(packetlen, sizeof(u64));
-
-
- /* Setup the descriptor */
- desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
- desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
- desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
- desc.length8 = (u16)(packetlen_aligned >> 3);
- desc.transactionid = requestid;
- desc.rangecount = 1;
-
- desc.range.len = multi_pagebuffer->len;
- desc.range.offset = multi_pagebuffer->offset;
-
- memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array,
- pfncount * sizeof(u64));
-
- bufferlist[0].iov_base = &desc;
- bufferlist[0].iov_len = descsize;
- bufferlist[1].iov_base = buffer;
- bufferlist[1].iov_len = bufferlen;
- bufferlist[2].iov_base = &aligned_data;
- bufferlist[2].iov_len = (packetlen_aligned - packetlen);
-
- return hv_ringbuffer_write(channel, bufferlist, 3);
-}
-EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
-
/**
* vmbus_recvpacket() - Retrieve the user packet on the specified channel
* @channel: Pointer to vmbus_channel structure.
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 4bbb8dea4727..060df71c2e8b 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -451,6 +451,12 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
/* Make sure this is a new offer */
mutex_lock(&vmbus_connection.channel_mutex);
+ /*
+ * Now that we have acquired the channel_mutex,
+ * we can release the potentially racing rescind thread.
+ */
+ atomic_dec(&vmbus_connection.offer_in_progress);
+
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (!uuid_le_cmp(channel->offermsg.offer.if_type,
newchannel->offermsg.offer.if_type) &&
@@ -481,7 +487,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
channel->num_sc++;
spin_unlock_irqrestore(&channel->lock, flags);
} else {
- atomic_dec(&vmbus_connection.offer_in_progress);
goto err_free_chan;
}
}
@@ -510,7 +515,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
if (!fnew) {
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
- atomic_dec(&vmbus_connection.offer_in_progress);
return;
}
@@ -541,7 +545,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
goto err_deq_chan;
}
- atomic_dec(&vmbus_connection.offer_in_progress);
+ newchannel->probe_done = true;
return;
err_deq_chan:
@@ -599,7 +603,7 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
*/
channel->numa_node = 0;
channel->target_cpu = 0;
- channel->target_vp = hv_context.vp_index[0];
+ channel->target_vp = hv_cpu_number_to_vp_number(0);
return;
}
@@ -683,7 +687,7 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
}
channel->target_cpu = cur_cpu;
- channel->target_vp = hv_context.vp_index[cur_cpu];
+ channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
}
static void vmbus_wait_for_unload(void)
@@ -805,21 +809,12 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
/*
* Setup state for signalling the host.
*/
- newchannel->sig_event = (struct hv_input_signal_event *)
- (ALIGN((unsigned long)
- &newchannel->sig_buf,
- HV_HYPERCALL_PARAM_ALIGN));
-
- newchannel->sig_event->connectionid.asu32 = 0;
- newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
- newchannel->sig_event->flag_number = 0;
- newchannel->sig_event->rsvdz = 0;
+ newchannel->sig_event = VMBUS_EVENT_CONNECTION_ID;
if (vmbus_proto_version != VERSION_WS2008) {
newchannel->is_dedicated_interrupt =
(offer->is_dedicated_interrupt != 0);
- newchannel->sig_event->connectionid.u.id =
- offer->connection_id;
+ newchannel->sig_event = offer->connection_id;
}
memcpy(&newchannel->offermsg, offer,
@@ -882,8 +877,27 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
channel->rescind = true;
spin_unlock_irqrestore(&channel->lock, flags);
+ /*
+ * Now that we have posted the rescind state, perform
+ * rescind related cleanup.
+ */
vmbus_rescind_cleanup(channel);
+ /*
+ * Now wait for offer handling to complete.
+ */
+ while (READ_ONCE(channel->probe_done) == false) {
+ /*
+ * We wait here until any channel offer is currently
+ * being processed.
+ */
+ msleep(1);
+ }
+
+ /*
+ * At this point, the rescind handling can proceed safely.
+ */
+
if (channel->device_obj) {
if (channel->chn_rescind_callback) {
channel->chn_rescind_callback(channel);
@@ -1228,8 +1242,7 @@ struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
return outgoing_channel;
}
- cur_cpu = hv_context.vp_index[get_cpu()];
- put_cpu();
+ cur_cpu = hv_cpu_number_to_vp_number(smp_processor_id());
list_for_each_safe(cur, tmp, &primary->sc_list) {
cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
if (cur_channel->state != CHANNEL_OPENED_STATE)
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 59c11ff90d12..f41901f80b64 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -32,6 +32,8 @@
#include <linux/hyperv.h>
#include <linux/export.h>
#include <asm/hyperv.h>
+#include <asm/mshyperv.h>
+
#include "hyperv_vmbus.h"
@@ -94,7 +96,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
* the CPU attempting to connect may not be CPU 0.
*/
if (version >= VERSION_WIN8_1) {
- msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
+ msg->target_vcpu =
+ hv_cpu_number_to_vp_number(smp_processor_id());
vmbus_connection.connect_cpu = smp_processor_id();
} else {
msg->target_vcpu = 0;
@@ -406,6 +409,6 @@ void vmbus_set_event(struct vmbus_channel *channel)
if (!channel->is_dedicated_interrupt)
vmbus_send_interrupt(child_relid);
- hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL);
+ hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
}
EXPORT_SYMBOL_GPL(vmbus_set_event);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 2ea12207caa0..8267439dd1ee 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -234,7 +234,6 @@ int hv_synic_init(unsigned int cpu)
union hv_synic_siefp siefp;
union hv_synic_sint shared_sint;
union hv_synic_scontrol sctrl;
- u64 vp_index;
/* Setup the Synic's message page */
hv_get_simp(simp.as_uint64);
@@ -276,14 +275,6 @@ int hv_synic_init(unsigned int cpu)
hv_context.synic_initialized = true;
/*
- * Setup the mapping between Hyper-V's notion
- * of cpuid and Linux' notion of cpuid.
- * This array will be indexed using Linux cpuid.
- */
- hv_get_vp_index(vp_index);
- hv_context.vp_index[cpu] = (u32)vp_index;
-
- /*
* Register the per-cpu clockevent source.
*/
if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index f5728deff893..db0e6652d7ef 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -584,10 +584,6 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
switch (val) {
case MEM_ONLINE:
- spin_lock_irqsave(&dm_device.ha_lock, flags);
- dm_device.num_pages_onlined += mem->nr_pages;
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
- /* Fall through */
case MEM_CANCEL_ONLINE:
if (dm_device.ha_waiting) {
dm_device.ha_waiting = false;
@@ -644,6 +640,9 @@ static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
__online_page_set_limits(pg);
__online_page_increment_counters(pg);
__online_page_free(pg);
+
+ WARN_ON_ONCE(!spin_is_locked(&dm_device.ha_lock));
+ dm_device.num_pages_onlined++;
}
static void hv_bring_pgs_online(struct hv_hotadd_state *has,
@@ -1036,8 +1035,8 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
if (info_hdr->data_size == sizeof(__u64)) {
__u64 *max_page_count = (__u64 *)&info_hdr[1];
- pr_info("INFO_TYPE_MAX_PAGE_CNT = %llu\n",
- *max_page_count);
+ pr_info("Max. dynamic memory size: %llu MB\n",
+ (*max_page_count) >> (20 - PAGE_SHIFT));
}
break;
@@ -1656,6 +1655,7 @@ static int balloon_probe(struct hv_device *dev,
}
dm_device.state = DM_INITIALIZED;
+ last_post_time = jiffies;
return 0;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 9a90b915b5be..5eed1e7da15c 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -304,7 +304,7 @@ static int process_ob_ipinfo(void *in_msg, void *out_msg, int op)
strlen((char *)in->body.kvp_ip_val.adapter_id),
UTF16_HOST_ENDIAN,
(wchar_t *)out->kvp_ip_val.adapter_id,
- MAX_IP_ADDR_SIZE);
+ MAX_ADAPTER_ID_SIZE);
if (len < 0)
return len;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 1b6a5e0dfa75..49569f8fe038 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -229,17 +229,6 @@ struct hv_context {
struct hv_per_cpu_context __percpu *cpu_context;
/*
- * Hypervisor's notion of virtual processor ID is different from
- * Linux' notion of CPU ID. This information can only be retrieved
- * in the context of the calling CPU. Setup a map for easy access
- * to this information:
- *
- * vp_index[a] is the Hyper-V's processor ID corresponding to
- * Linux cpuid 'a'.
- */
- u32 vp_index[NR_CPUS];
-
- /*
* To manage allocations in a NUMA node.
* Array indexed by numa node ID.
*/
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 1f450c39a9b0..12eb8caa4263 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -29,6 +29,7 @@
#include <linux/uio.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/prefetch.h>
#include "hyperv_vmbus.h"
@@ -94,30 +95,6 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
ring_info->ring_buffer->write_index = next_write_location;
}
-/* Get the next read location for the specified ring buffer. */
-static inline u32
-hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info)
-{
- return ring_info->ring_buffer->read_index;
-}
-
-/*
- * Get the next read location + offset for the specified ring buffer.
- * This allows the caller to skip.
- */
-static inline u32
-hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
- u32 offset)
-{
- u32 next = ring_info->ring_buffer->read_index;
-
- next += offset;
- if (next >= ring_info->ring_datasize)
- next -= ring_info->ring_datasize;
-
- return next;
-}
-
/* Set the next read location for the specified ring buffer. */
static inline void
hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
@@ -142,29 +119,6 @@ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
}
/*
- * Helper routine to copy to source from ring buffer.
- * Assume there is enough room. Handles wrap-around in src case only!!
- */
-static u32 hv_copyfrom_ringbuffer(
- const struct hv_ring_buffer_info *ring_info,
- void *dest,
- u32 destlen,
- u32 start_read_offset)
-{
- void *ring_buffer = hv_get_ring_buffer(ring_info);
- u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
-
- memcpy(dest, ring_buffer + start_read_offset, destlen);
-
- start_read_offset += destlen;
- if (start_read_offset >= ring_buffer_size)
- start_read_offset -= ring_buffer_size;
-
- return start_read_offset;
-}
-
-
-/*
* Helper routine to copy from source to ring buffer.
* Assume there is enough room. Handles wrap-around in dest case only!!
*/
@@ -334,33 +288,22 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
return 0;
}
-static inline void
-init_cached_read_index(struct hv_ring_buffer_info *rbi)
-{
- rbi->cached_read_index = rbi->ring_buffer->read_index;
-}
-
int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
u64 *requestid, bool raw)
{
- u32 bytes_avail_toread;
- u32 next_read_location;
- u64 prev_indices = 0;
- struct vmpacket_descriptor desc;
- u32 offset;
- u32 packetlen;
- struct hv_ring_buffer_info *inring_info = &channel->inbound;
-
- if (buflen <= 0)
+ struct vmpacket_descriptor *desc;
+ u32 packetlen, offset;
+
+ if (unlikely(buflen == 0))
return -EINVAL;
*buffer_actual_len = 0;
*requestid = 0;
- bytes_avail_toread = hv_get_bytes_to_read(inring_info);
/* Make sure there is something to read */
- if (bytes_avail_toread < sizeof(desc)) {
+ desc = hv_pkt_iter_first(channel);
+ if (desc == NULL) {
/*
* No error is set when there is even no header, drivers are
* supposed to analyze buffer_actual_len.
@@ -368,48 +311,22 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
return 0;
}
- init_cached_read_index(inring_info);
-
- next_read_location = hv_get_next_read_location(inring_info);
- next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
- sizeof(desc),
- next_read_location);
-
- offset = raw ? 0 : (desc.offset8 << 3);
- packetlen = (desc.len8 << 3) - offset;
+ offset = raw ? 0 : (desc->offset8 << 3);
+ packetlen = (desc->len8 << 3) - offset;
*buffer_actual_len = packetlen;
- *requestid = desc.trans_id;
-
- if (bytes_avail_toread < packetlen + offset)
- return -EAGAIN;
+ *requestid = desc->trans_id;
- if (packetlen > buflen)
+ if (unlikely(packetlen > buflen))
return -ENOBUFS;
- next_read_location =
- hv_get_next_readlocation_withoffset(inring_info, offset);
+ /* since ring is double mapped, only one copy is necessary */
+ memcpy(buffer, (const char *)desc + offset, packetlen);
- next_read_location = hv_copyfrom_ringbuffer(inring_info,
- buffer,
- packetlen,
- next_read_location);
+ /* Advance ring index to next packet descriptor */
+ __hv_pkt_iter_next(channel, desc);
- next_read_location = hv_copyfrom_ringbuffer(inring_info,
- &prev_indices,
- sizeof(u64),
- next_read_location);
-
- /*
- * Make sure all reads are done before we update the read index since
- * the writer may start writing to the read area once the read index
- * is updated.
- */
- virt_mb();
-
- /* Update the read index */
- hv_set_next_read_location(inring_info, next_read_location);
-
- hv_signal_on_read(channel);
+ /* Notify host of update */
+ hv_pkt_iter_close(channel);
return 0;
}
@@ -440,14 +357,16 @@ static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
-
- /* set state for later hv_signal_on_read() */
- init_cached_read_index(rbi);
+ struct vmpacket_descriptor *desc;
if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
return NULL;
- return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
+ desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index;
+ if (desc)
+ prefetch((char *)desc + (desc->len8 << 3));
+
+ return desc;
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
@@ -471,10 +390,7 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
rbi->priv_read_index -= dsize;
/* more data? */
- if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
- return NULL;
- else
- return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
+ return hv_pkt_iter_first(channel);
}
EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
@@ -484,6 +400,7 @@ EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
void hv_pkt_iter_close(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
+ u32 orig_write_sz = hv_get_bytes_to_write(rbi);
/*
* Make sure all reads are done before we update the read index since
@@ -493,6 +410,40 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
virt_rmb();
rbi->ring_buffer->read_index = rbi->priv_read_index;
- hv_signal_on_read(channel);
+ /*
+ * Issue a full memory barrier before making the signaling decision.
+ * Here is the reason for having this barrier:
+ * If the reading of the pend_sz (in this function)
+ * were to be reordered and read before we commit the new read
+ * index (in the calling function) we could
+ * have a problem. If the host were to set the pending_sz after we
+ * have sampled pending_sz and go to sleep before we commit the
+ * read index, we could miss sending the interrupt. Issue a full
+ * memory barrier to address this.
+ */
+ virt_mb();
+
+ /* If host has disabled notifications then skip */
+ if (rbi->ring_buffer->interrupt_mask)
+ return;
+
+ if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) {
+ u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
+
+ /*
+ * If there was space before we began iteration,
+ * then host was not blocked. Also handles case where
+ * pending_sz is zero then host has nothing pending
+ * and does not need to be signaled.
+ */
+ if (orig_write_sz > pending_sz)
+ return;
+
+ /* If pending write will not fit, don't give false hope. */
+ if (hv_get_bytes_to_write(rbi) < pending_sz)
+ return;
+ }
+
+ vmbus_setevent(channel);
}
EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index ed84e96715a0..a9d49f6f6501 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -940,6 +940,9 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
if (channel->offermsg.child_relid != relid)
continue;
+ if (channel->rescind)
+ continue;
+
switch (channel->callback_mode) {
case HV_CALL_ISR:
vmbus_channel_isr(channel);
@@ -1451,23 +1454,6 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size)
}
EXPORT_SYMBOL_GPL(vmbus_free_mmio);
-/**
- * vmbus_cpu_number_to_vp_number() - Map CPU to VP.
- * @cpu_number: CPU number in Linux terms
- *
- * This function returns the mapping between the Linux processor
- * number and the hypervisor's virtual processor number, useful
- * in making hypercalls and such that talk about specific
- * processors.
- *
- * Return: Virtual processor number in Hyper-V terms
- */
-int vmbus_cpu_number_to_vp_number(int cpu_number)
-{
- return hv_context.vp_index[cpu_number];
-}
-EXPORT_SYMBOL_GPL(vmbus_cpu_number_to_vp_number);
-
static int vmbus_acpi_add(struct acpi_device *device)
{
acpi_status result;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 5ef2814345ef..d65431417b17 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -343,6 +343,7 @@ config SENSORS_ASB100
config SENSORS_ASPEED
tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver"
+ depends on THERMAL || THERMAL=n
select REGMAP
help
This driver provides support for ASPEED AST2400/AST2500 PWM
@@ -790,6 +791,13 @@ config SENSORS_LTC4261
This driver can also be built as a module. If so, the module will
be called ltc4261.
+config SENSORS_LTQ_CPUTEMP
+ bool "Lantiq cpu temperature sensor driver"
+ depends on LANTIQ
+ help
+ If you say yes here you get support for the temperature
+ sensor inside your CPU.
+
config SENSORS_MAX1111
tristate "Maxim MAX1111 Serial 8-bit ADC chip and compatibles"
depends on SPI_MASTER
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index d4641a9f16c1..c84d9784be98 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -110,6 +110,7 @@ obj-$(CONFIG_SENSORS_LTC4222) += ltc4222.o
obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o
obj-$(CONFIG_SENSORS_LTC4260) += ltc4260.o
obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o
+obj-$(CONFIG_SENSORS_LTQ_CPUTEMP) += ltq-cputemp.o
obj-$(CONFIG_SENSORS_MAX1111) += max1111.o
obj-$(CONFIG_SENSORS_MAX16065) += max16065.o
obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index a557b46dbe8e..bd2ca315c9d8 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -384,7 +384,7 @@ static struct attribute *adc128_attrs[] = {
NULL
};
-static struct attribute_group adc128_group = {
+static const struct attribute_group adc128_group = {
.attrs = adc128_attrs,
.is_visible = adc128_is_visible,
};
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index 357b42607164..98c704d3366a 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -191,24 +191,23 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE;
if (of_property_read_u32(node, "reg", &pval)) {
- dev_err(&client->dev, "invalid reg on %s\n",
- node->full_name);
+ dev_err(&client->dev, "invalid reg on %pOF\n", node);
continue;
}
channel = pval;
if (channel >= ADS1015_CHANNELS) {
dev_err(&client->dev,
- "invalid channel index %d on %s\n",
- channel, node->full_name);
+ "invalid channel index %d on %pOF\n",
+ channel, node);
continue;
}
if (!of_property_read_u32(node, "ti,gain", &pval)) {
pga = pval;
if (pga > 6) {
- dev_err(&client->dev, "invalid gain on %s\n",
- node->full_name);
+ dev_err(&client->dev, "invalid gain on %pOF\n",
+ node);
return -EINVAL;
}
}
@@ -217,8 +216,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
data_rate = pval;
if (data_rate > 7) {
dev_err(&client->dev,
- "invalid data_rate on %s\n",
- node->full_name);
+ "invalid data_rate on %pOF\n", node);
return -EINVAL;
}
}
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 1baa213a60bd..9ef84998c7f3 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -1319,14 +1319,14 @@ static struct attribute *vid_attrs[] = {
NULL
};
-static struct attribute_group adt7475_attr_group = { .attrs = adt7475_attrs };
-static struct attribute_group fan4_attr_group = { .attrs = fan4_attrs };
-static struct attribute_group pwm2_attr_group = { .attrs = pwm2_attrs };
-static struct attribute_group in0_attr_group = { .attrs = in0_attrs };
-static struct attribute_group in3_attr_group = { .attrs = in3_attrs };
-static struct attribute_group in4_attr_group = { .attrs = in4_attrs };
-static struct attribute_group in5_attr_group = { .attrs = in5_attrs };
-static struct attribute_group vid_attr_group = { .attrs = vid_attrs };
+static const struct attribute_group adt7475_attr_group = { .attrs = adt7475_attrs };
+static const struct attribute_group fan4_attr_group = { .attrs = fan4_attrs };
+static const struct attribute_group pwm2_attr_group = { .attrs = pwm2_attrs };
+static const struct attribute_group in0_attr_group = { .attrs = in0_attrs };
+static const struct attribute_group in3_attr_group = { .attrs = in3_attrs };
+static const struct attribute_group in4_attr_group = { .attrs = in4_attrs };
+static const struct attribute_group in5_attr_group = { .attrs = in5_attrs };
+static const struct attribute_group vid_attr_group = { .attrs = vid_attrs };
static int adt7475_detect(struct i2c_client *client,
struct i2c_board_info *info)
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 0af7fd311979..76c34f4fde13 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -566,6 +566,8 @@ static int applesmc_init_smcreg_try(void)
if (ret)
return ret;
s->fan_count = tmp[0];
+ if (s->fan_count > 10)
+ s->fan_count = 10;
ret = applesmc_get_lower_bound(&s->temp_begin, "T");
if (ret)
@@ -811,7 +813,8 @@ static ssize_t applesmc_show_fan_speed(struct device *dev,
char newkey[5];
u8 buffer[2];
- sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
+ scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)],
+ to_index(attr));
ret = applesmc_read_key(newkey, buffer, 2);
speed = ((buffer[0] << 8 | buffer[1]) >> 2);
@@ -834,7 +837,8 @@ static ssize_t applesmc_store_fan_speed(struct device *dev,
if (kstrtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000)
return -EINVAL; /* Bigger than a 14-bit value */
- sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
+ scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)],
+ to_index(attr));
buffer[0] = (speed >> 6) & 0xff;
buffer[1] = (speed << 2) & 0xff;
@@ -903,7 +907,7 @@ static ssize_t applesmc_show_fan_position(struct device *dev,
char newkey[5];
u8 buffer[17];
- sprintf(newkey, FAN_ID_FMT, to_index(attr));
+ scnprintf(newkey, sizeof(newkey), FAN_ID_FMT, to_index(attr));
ret = applesmc_read_key(newkey, buffer, 16);
buffer[16] = 0;
@@ -1116,7 +1120,8 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
}
for (i = 0; i < num; i++) {
node = &grp->nodes[i];
- sprintf(node->name, grp->format, i + 1);
+ scnprintf(node->name, sizeof(node->name), grp->format,
+ i + 1);
node->sda.index = (grp->option << 16) | (i & 0xffff);
node->sda.dev_attr.show = grp->show;
node->sda.dev_attr.store = grp->store;
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index c77644d45a02..4875e99b59c9 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -512,7 +512,7 @@ static ssize_t show_pwm_ac(struct device *dev,
{
SETUP_SHOW_DATA_PARAM(dev, attr);
u8 config, altbit, regval;
- const u8 map[] = {
+ static const u8 map[] = {
0x01, 0x02, 0x04, 0x1f, 0x00, 0x06, 0x07, 0x10,
0x08, 0x0f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f
};
@@ -533,7 +533,7 @@ static ssize_t store_pwm_ac(struct device *dev,
SETUP_STORE_DATA_PARAM(dev, attr);
unsigned long reqval;
u8 currval, config, altbit, newval;
- const u16 map[] = {
+ static const u16 map[] = {
0x04, 0x00, 0x01, 0xff, 0x02, 0xff, 0x05, 0x06,
0x08, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f,
0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index ddfe66bdff86..69b97d45e3cb 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -20,6 +20,7 @@
#include <linux/platform_device.h>
#include <linux/sysfs.h>
#include <linux/regmap.h>
+#include <linux/thermal.h>
/* ASPEED PWM & FAN Tach Register Definition */
#define ASPEED_PTCR_CTRL 0x00
@@ -166,6 +167,18 @@
/* How long we sleep in us while waiting for an RPM result. */
#define ASPEED_RPM_STATUS_SLEEP_USEC 500
+#define MAX_CDEV_NAME_LEN 16
+
+struct aspeed_cooling_device {
+ char name[16];
+ struct aspeed_pwm_tacho_data *priv;
+ struct thermal_cooling_device *tcdev;
+ int pwm_port;
+ u8 *cooling_levels;
+ u8 max_state;
+ u8 cur_state;
+};
+
struct aspeed_pwm_tacho_data {
struct regmap *regmap;
unsigned long clk_freq;
@@ -180,6 +193,7 @@ struct aspeed_pwm_tacho_data {
u8 pwm_port_type[8];
u8 pwm_port_fan_ctrl[8];
u8 fan_tach_ch_source[16];
+ struct aspeed_cooling_device *cdev[8];
const struct attribute_group *groups[3];
};
@@ -765,6 +779,94 @@ static void aspeed_create_fan_tach_channel(struct aspeed_pwm_tacho_data *priv,
}
}
+static int
+aspeed_pwm_cz_get_max_state(struct thermal_cooling_device *tcdev,
+ unsigned long *state)
+{
+ struct aspeed_cooling_device *cdev = tcdev->devdata;
+
+ *state = cdev->max_state;
+
+ return 0;
+}
+
+static int
+aspeed_pwm_cz_get_cur_state(struct thermal_cooling_device *tcdev,
+ unsigned long *state)
+{
+ struct aspeed_cooling_device *cdev = tcdev->devdata;
+
+ *state = cdev->cur_state;
+
+ return 0;
+}
+
+static int
+aspeed_pwm_cz_set_cur_state(struct thermal_cooling_device *tcdev,
+ unsigned long state)
+{
+ struct aspeed_cooling_device *cdev = tcdev->devdata;
+
+ if (state > cdev->max_state)
+ return -EINVAL;
+
+ cdev->cur_state = state;
+ cdev->priv->pwm_port_fan_ctrl[cdev->pwm_port] =
+ cdev->cooling_levels[cdev->cur_state];
+ aspeed_set_pwm_port_fan_ctrl(cdev->priv, cdev->pwm_port,
+ cdev->cooling_levels[cdev->cur_state]);
+
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops aspeed_pwm_cool_ops = {
+ .get_max_state = aspeed_pwm_cz_get_max_state,
+ .get_cur_state = aspeed_pwm_cz_get_cur_state,
+ .set_cur_state = aspeed_pwm_cz_set_cur_state,
+};
+
+static int aspeed_create_pwm_cooling(struct device *dev,
+ struct device_node *child,
+ struct aspeed_pwm_tacho_data *priv,
+ u32 pwm_port, u8 num_levels)
+{
+ int ret;
+ struct aspeed_cooling_device *cdev;
+
+ cdev = devm_kzalloc(dev, sizeof(*cdev), GFP_KERNEL);
+
+ if (!cdev)
+ return -ENOMEM;
+
+ cdev->cooling_levels = devm_kzalloc(dev, num_levels, GFP_KERNEL);
+ if (!cdev->cooling_levels)
+ return -ENOMEM;
+
+ cdev->max_state = num_levels - 1;
+ ret = of_property_read_u8_array(child, "cooling-levels",
+ cdev->cooling_levels,
+ num_levels);
+ if (ret) {
+ dev_err(dev, "Property 'cooling-levels' cannot be read.\n");
+ return ret;
+ }
+ snprintf(cdev->name, MAX_CDEV_NAME_LEN, "%s%d", child->name, pwm_port);
+
+ cdev->tcdev = thermal_of_cooling_device_register(child,
+ cdev->name,
+ cdev,
+ &aspeed_pwm_cool_ops);
+ if (IS_ERR(cdev->tcdev))
+ return PTR_ERR(cdev->tcdev);
+
+ cdev->priv = priv;
+ cdev->pwm_port = pwm_port;
+
+ priv->cdev[pwm_port] = cdev;
+
+ return 0;
+}
+
static int aspeed_create_fan(struct device *dev,
struct device_node *child,
struct aspeed_pwm_tacho_data *priv)
@@ -778,6 +880,15 @@ static int aspeed_create_fan(struct device *dev,
return ret;
aspeed_create_pwm_port(priv, (u8)pwm_port);
+ ret = of_property_count_u8_elems(child, "cooling-levels");
+
+ if (ret > 0) {
+ ret = aspeed_create_pwm_cooling(dev, child, priv, pwm_port,
+ ret);
+ if (ret)
+ return ret;
+ }
+
count = of_property_count_u8_elems(child, "aspeed,fan-tach-ch");
if (count < 1)
return -EINVAL;
@@ -834,9 +945,10 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
for_each_child_of_node(np, child) {
ret = aspeed_create_fan(dev, child, priv);
- of_node_put(child);
- if (ret)
+ if (ret) {
+ of_node_put(child);
return ret;
+ }
}
priv->groups[0] = &pwm_dev_group;
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index c9832bfacfe5..97a62f5b9ea4 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -20,13 +20,19 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
+#include <linux/regulator/consumer.h>
struct da9052_hwmon {
- struct da9052 *da9052;
- struct mutex hwmon_lock;
+ struct da9052 *da9052;
+ struct mutex hwmon_lock;
+ bool tsi_as_adc;
+ int tsiref_mv;
+ struct regulator *tsiref;
+ struct completion tsidone;
};
static const char * const input_names[] = {
@@ -37,6 +43,10 @@ static const char * const input_names[] = {
[DA9052_ADC_IN4] = "ADC IN4",
[DA9052_ADC_IN5] = "ADC IN5",
[DA9052_ADC_IN6] = "ADC IN6",
+ [DA9052_ADC_TSI_XP] = "ADC TS X+",
+ [DA9052_ADC_TSI_YP] = "ADC TS Y+",
+ [DA9052_ADC_TSI_XN] = "ADC TS X-",
+ [DA9052_ADC_TSI_YN] = "ADC TS Y-",
[DA9052_ADC_TJUNC] = "BATTERY JUNCTION TEMP",
[DA9052_ADC_VBBAT] = "BACK-UP BATTERY VOLTAGE",
};
@@ -59,6 +69,11 @@ static inline int vbbat_reg_to_mv(int value)
return DIV_ROUND_CLOSEST(value * 5000, 1023);
}
+static inline int input_tsireg_to_mv(struct da9052_hwmon *hwmon, int value)
+{
+ return DIV_ROUND_CLOSEST(value * hwmon->tsiref_mv, 1023);
+}
+
static inline int da9052_enable_vddout_channel(struct da9052 *da9052)
{
return da9052_reg_update(da9052, DA9052_ADC_CONT_REG,
@@ -154,6 +169,97 @@ static ssize_t da9052_read_misc_channel(struct device *dev,
return sprintf(buf, "%d\n", input_reg_to_mv(ret));
}
+static int da9052_request_tsi_read(struct da9052_hwmon *hwmon, int channel)
+{
+ u8 val = DA9052_TSICONTB_TSIMAN;
+
+ switch (channel) {
+ case DA9052_ADC_TSI_XP:
+ val |= DA9052_TSICONTB_TSIMUX_XP;
+ break;
+ case DA9052_ADC_TSI_YP:
+ val |= DA9052_TSICONTB_TSIMUX_YP;
+ break;
+ case DA9052_ADC_TSI_XN:
+ val |= DA9052_TSICONTB_TSIMUX_XN;
+ break;
+ case DA9052_ADC_TSI_YN:
+ val |= DA9052_TSICONTB_TSIMUX_YN;
+ break;
+ }
+
+ return da9052_reg_write(hwmon->da9052, DA9052_TSI_CONT_B_REG, val);
+}
+
+static int da9052_get_tsi_result(struct da9052_hwmon *hwmon, int channel)
+{
+ u8 regs[3];
+ int msb, lsb, err;
+
+ /* block read to avoid separation of MSB and LSB */
+ err = da9052_group_read(hwmon->da9052, DA9052_TSI_X_MSB_REG,
+ ARRAY_SIZE(regs), regs);
+ if (err)
+ return err;
+
+ switch (channel) {
+ case DA9052_ADC_TSI_XP:
+ case DA9052_ADC_TSI_XN:
+ msb = regs[0] << DA9052_TSILSB_TSIXL_BITS;
+ lsb = regs[2] & DA9052_TSILSB_TSIXL;
+ lsb >>= DA9052_TSILSB_TSIXL_SHIFT;
+ break;
+ case DA9052_ADC_TSI_YP:
+ case DA9052_ADC_TSI_YN:
+ msb = regs[1] << DA9052_TSILSB_TSIYL_BITS;
+ lsb = regs[2] & DA9052_TSILSB_TSIYL;
+ lsb >>= DA9052_TSILSB_TSIYL_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return msb | lsb;
+}
+
+
+static ssize_t __da9052_read_tsi(struct device *dev, int channel)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+ int ret;
+
+ reinit_completion(&hwmon->tsidone);
+
+ ret = da9052_request_tsi_read(hwmon, channel);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for an conversion done interrupt */
+ if (!wait_for_completion_timeout(&hwmon->tsidone,
+ msecs_to_jiffies(500)))
+ return -ETIMEDOUT;
+
+ return da9052_get_tsi_result(hwmon, channel);
+}
+
+static ssize_t da9052_read_tsi(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+ int channel = to_sensor_dev_attr(devattr)->index;
+ int ret;
+
+ mutex_lock(&hwmon->hwmon_lock);
+ ret = __da9052_read_tsi(dev, channel);
+ mutex_unlock(&hwmon->hwmon_lock);
+
+ if (ret < 0)
+ return ret;
+ else
+ return sprintf(buf, "%d\n", input_tsireg_to_mv(hwmon, ret));
+}
+
static ssize_t da9052_read_tjunc(struct device *dev,
struct device_attribute *devattr, char *buf)
{
@@ -196,43 +302,82 @@ static ssize_t show_label(struct device *dev,
input_names[to_sensor_dev_attr(devattr)->index]);
}
-static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, da9052_read_vddout, NULL,
+static umode_t da9052_channel_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+ struct device_attribute *dattr = container_of(attr,
+ struct device_attribute, attr);
+ struct sensor_device_attribute *sattr = to_sensor_dev_attr(dattr);
+
+ if (!hwmon->tsi_as_adc) {
+ switch (sattr->index) {
+ case DA9052_ADC_TSI_XP:
+ case DA9052_ADC_TSI_YP:
+ case DA9052_ADC_TSI_XN:
+ case DA9052_ADC_TSI_YN:
+ return 0;
+ }
+ }
+
+ return attr->mode;
+}
+
+static SENSOR_DEVICE_ATTR(in0_input, 0444, da9052_read_vddout, NULL,
DA9052_ADC_VDDOUT);
-static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, show_label, NULL,
+static SENSOR_DEVICE_ATTR(in0_label, 0444, show_label, NULL,
DA9052_ADC_VDDOUT);
-static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, da9052_read_vbat, NULL,
+static SENSOR_DEVICE_ATTR(in3_input, 0444, da9052_read_vbat, NULL,
DA9052_ADC_VBAT);
-static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL,
+static SENSOR_DEVICE_ATTR(in3_label, 0444, show_label, NULL,
DA9052_ADC_VBAT);
-static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, da9052_read_misc_channel, NULL,
+static SENSOR_DEVICE_ATTR(in4_input, 0444, da9052_read_misc_channel, NULL,
DA9052_ADC_IN4);
-static SENSOR_DEVICE_ATTR(in4_label, S_IRUGO, show_label, NULL,
+static SENSOR_DEVICE_ATTR(in4_label, 0444, show_label, NULL,
DA9052_ADC_IN4);
-static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, da9052_read_misc_channel, NULL,
+static SENSOR_DEVICE_ATTR(in5_input, 0444, da9052_read_misc_channel, NULL,
DA9052_ADC_IN5);
-static SENSOR_DEVICE_ATTR(in5_label, S_IRUGO, show_label, NULL,
+static SENSOR_DEVICE_ATTR(in5_label, 0444, show_label, NULL,
DA9052_ADC_IN5);
-static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, da9052_read_misc_channel, NULL,
+static SENSOR_DEVICE_ATTR(in6_input, 0444, da9052_read_misc_channel, NULL,
DA9052_ADC_IN6);
-static SENSOR_DEVICE_ATTR(in6_label, S_IRUGO, show_label, NULL,
+static SENSOR_DEVICE_ATTR(in6_label, 0444, show_label, NULL,
DA9052_ADC_IN6);
-static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, da9052_read_vbbat, NULL,
+static SENSOR_DEVICE_ATTR(in9_input, 0444, da9052_read_vbbat, NULL,
DA9052_ADC_VBBAT);
-static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL,
+static SENSOR_DEVICE_ATTR(in9_label, 0444, show_label, NULL,
DA9052_ADC_VBBAT);
-static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, da9052_read_ich, NULL,
+static SENSOR_DEVICE_ATTR(in70_input, 0444, da9052_read_tsi, NULL,
+ DA9052_ADC_TSI_XP);
+static SENSOR_DEVICE_ATTR(in70_label, 0444, show_label, NULL,
+ DA9052_ADC_TSI_XP);
+static SENSOR_DEVICE_ATTR(in71_input, 0444, da9052_read_tsi, NULL,
+ DA9052_ADC_TSI_XN);
+static SENSOR_DEVICE_ATTR(in71_label, 0444, show_label, NULL,
+ DA9052_ADC_TSI_XN);
+static SENSOR_DEVICE_ATTR(in72_input, 0444, da9052_read_tsi, NULL,
+ DA9052_ADC_TSI_YP);
+static SENSOR_DEVICE_ATTR(in72_label, 0444, show_label, NULL,
+ DA9052_ADC_TSI_YP);
+static SENSOR_DEVICE_ATTR(in73_input, 0444, da9052_read_tsi, NULL,
+ DA9052_ADC_TSI_YN);
+static SENSOR_DEVICE_ATTR(in73_label, 0444, show_label, NULL,
+ DA9052_ADC_TSI_YN);
+
+static SENSOR_DEVICE_ATTR(curr1_input, 0444, da9052_read_ich, NULL,
DA9052_ADC_ICH);
-static SENSOR_DEVICE_ATTR(curr1_label, S_IRUGO, show_label, NULL,
+static SENSOR_DEVICE_ATTR(curr1_label, 0444, show_label, NULL,
DA9052_ADC_ICH);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, da9052_read_tbat, NULL,
+static SENSOR_DEVICE_ATTR(temp2_input, 0444, da9052_read_tbat, NULL,
DA9052_ADC_TBAT);
-static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, show_label, NULL,
+static SENSOR_DEVICE_ATTR(temp2_label, 0444, show_label, NULL,
DA9052_ADC_TBAT);
-static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO, da9052_read_tjunc, NULL,
+static SENSOR_DEVICE_ATTR(temp8_input, 0444, da9052_read_tjunc, NULL,
DA9052_ADC_TJUNC);
-static SENSOR_DEVICE_ATTR(temp8_label, S_IRUGO, show_label, NULL,
+static SENSOR_DEVICE_ATTR(temp8_label, 0444, show_label, NULL,
DA9052_ADC_TJUNC);
static struct attribute *da9052_attrs[] = {
@@ -246,6 +391,14 @@ static struct attribute *da9052_attrs[] = {
&sensor_dev_attr_in5_label.dev_attr.attr,
&sensor_dev_attr_in6_input.dev_attr.attr,
&sensor_dev_attr_in6_label.dev_attr.attr,
+ &sensor_dev_attr_in70_input.dev_attr.attr,
+ &sensor_dev_attr_in70_label.dev_attr.attr,
+ &sensor_dev_attr_in71_input.dev_attr.attr,
+ &sensor_dev_attr_in71_label.dev_attr.attr,
+ &sensor_dev_attr_in72_input.dev_attr.attr,
+ &sensor_dev_attr_in72_label.dev_attr.attr,
+ &sensor_dev_attr_in73_input.dev_attr.attr,
+ &sensor_dev_attr_in73_label.dev_attr.attr,
&sensor_dev_attr_in9_input.dev_attr.attr,
&sensor_dev_attr_in9_label.dev_attr.attr,
&sensor_dev_attr_curr1_input.dev_attr.attr,
@@ -257,29 +410,117 @@ static struct attribute *da9052_attrs[] = {
NULL
};
-ATTRIBUTE_GROUPS(da9052);
+static const struct attribute_group da9052_group = {
+ .attrs = da9052_attrs,
+ .is_visible = da9052_channel_is_visible,
+};
+__ATTRIBUTE_GROUPS(da9052);
+
+static irqreturn_t da9052_tsi_datardy_irq(int irq, void *data)
+{
+ struct da9052_hwmon *hwmon = data;
+
+ complete(&hwmon->tsidone);
+ return IRQ_HANDLED;
+}
static int da9052_hwmon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct da9052_hwmon *hwmon;
struct device *hwmon_dev;
+ int err;
hwmon = devm_kzalloc(dev, sizeof(struct da9052_hwmon), GFP_KERNEL);
if (!hwmon)
return -ENOMEM;
+ platform_set_drvdata(pdev, hwmon);
+
mutex_init(&hwmon->hwmon_lock);
hwmon->da9052 = dev_get_drvdata(pdev->dev.parent);
+ init_completion(&hwmon->tsidone);
+
+ hwmon->tsi_as_adc =
+ device_property_read_bool(pdev->dev.parent, "dlg,tsi-as-adc");
+
+ if (hwmon->tsi_as_adc) {
+ hwmon->tsiref = devm_regulator_get(pdev->dev.parent, "tsiref");
+ if (IS_ERR(hwmon->tsiref)) {
+ err = PTR_ERR(hwmon->tsiref);
+ dev_err(&pdev->dev, "failed to get tsiref: %d", err);
+ return err;
+ }
+
+ err = regulator_enable(hwmon->tsiref);
+ if (err)
+ return err;
+
+ hwmon->tsiref_mv = regulator_get_voltage(hwmon->tsiref);
+ if (hwmon->tsiref_mv < 0) {
+ err = hwmon->tsiref_mv;
+ goto exit_regulator;
+ }
+
+ /* convert from microvolt (DT) to millivolt (hwmon) */
+ hwmon->tsiref_mv /= 1000;
+
+ /* TSIREF limits from datasheet */
+ if (hwmon->tsiref_mv < 1800 || hwmon->tsiref_mv > 2600) {
+ dev_err(hwmon->da9052->dev, "invalid TSIREF voltage: %d",
+ hwmon->tsiref_mv);
+ err = -ENXIO;
+ goto exit_regulator;
+ }
+
+ /* disable touchscreen features */
+ da9052_reg_write(hwmon->da9052, DA9052_TSI_CONT_A_REG, 0x00);
+
+ err = da9052_request_irq(hwmon->da9052, DA9052_IRQ_TSIREADY,
+ "tsiready-irq", da9052_tsi_datardy_irq,
+ hwmon);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register TSIRDY IRQ: %d",
+ err);
+ goto exit_regulator;
+ }
+ }
+
hwmon_dev = devm_hwmon_device_register_with_groups(dev, "da9052",
hwmon,
da9052_groups);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ err = PTR_ERR_OR_ZERO(hwmon_dev);
+ if (err)
+ goto exit_irq;
+
+ return 0;
+
+exit_irq:
+ if (hwmon->tsi_as_adc)
+ da9052_free_irq(hwmon->da9052, DA9052_IRQ_TSIREADY, hwmon);
+exit_regulator:
+ if (hwmon->tsiref)
+ regulator_disable(hwmon->tsiref);
+
+ return err;
+}
+
+static int da9052_hwmon_remove(struct platform_device *pdev)
+{
+ struct da9052_hwmon *hwmon = platform_get_drvdata(pdev);
+
+ if (hwmon->tsi_as_adc) {
+ da9052_free_irq(hwmon->da9052, DA9052_IRQ_TSIREADY, hwmon);
+ regulator_disable(hwmon->tsiref);
+ }
+
+ return 0;
}
static struct platform_driver da9052_hwmon_driver = {
.probe = da9052_hwmon_probe,
+ .remove = da9052_hwmon_remove,
.driver = {
.name = "da9052-hwmon",
},
diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
index 0f0277e7aae5..0801f48a41f7 100644
--- a/drivers/hwmon/ftsteutates.c
+++ b/drivers/hwmon/ftsteutates.c
@@ -60,7 +60,7 @@
static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
-static struct i2c_device_id fts_id[] = {
+static const struct i2c_device_id fts_id[] = {
{ "ftsteutates", 0 },
{ }
};
@@ -435,6 +435,7 @@ clear_temp_alarm(struct device *dev, struct device_attribute *devattr,
goto error;
data->valid = false;
+ ret = count;
error:
mutex_unlock(&data->update_lock);
return ret;
@@ -508,6 +509,7 @@ clear_fan_alarm(struct device *dev, struct device_attribute *devattr,
goto error;
data->valid = false;
+ ret = count;
error:
mutex_unlock(&data->update_lock);
return ret;
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index dd6e17c1076b..c9790e2c3440 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -85,7 +85,7 @@ static umode_t hwmon_dev_name_is_visible(struct kobject *kobj,
return attr->mode;
}
-static struct attribute_group hwmon_dev_attr_group = {
+static const struct attribute_group hwmon_dev_attr_group = {
.attrs = hwmon_dev_attrs,
.is_visible = hwmon_dev_name_is_visible,
};
@@ -135,7 +135,7 @@ static int hwmon_thermal_get_temp(void *data, int *temp)
return 0;
}
-static struct thermal_zone_of_device_ops hwmon_thermal_ops = {
+static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
.get_temp = hwmon_thermal_get_temp,
};
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index a5a9f457b7f7..9397d2f0e79a 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -495,7 +495,7 @@ static struct {
};
#ifdef MODULE
-static struct pci_device_id i5k_amb_ids[] = {
+static const struct pci_device_id i5k_amb_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR) },
{ 0, }
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 4dfc7238313e..f8499cb95fec 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -497,12 +497,14 @@ static const struct it87_devices it87_devices[] = {
#define has_vin3_5v(data) ((data)->features & FEAT_VIN3_5V)
struct it87_sio_data {
+ int sioaddr;
enum chips type;
/* Values read from Super-I/O config space */
u8 revision;
u8 vid_value;
u8 beep_pin;
u8 internal; /* Internal sensors can be labeled */
+ bool need_in7_reroute;
/* Features skipped based on config or DMI */
u16 skip_in;
u8 skip_vid;
@@ -517,6 +519,7 @@ struct it87_sio_data {
*/
struct it87_data {
const struct attribute_group *groups[7];
+ int sioaddr;
enum chips type;
u32 features;
u8 peci_mask;
@@ -532,6 +535,7 @@ struct it87_data {
u16 in_internal; /* Bitfield, internal sensors (for labels) */
u16 has_in; /* Bitfield, voltage sensors enabled */
u8 in[NUM_VIN][3]; /* [nr][0]=in, [1]=min, [2]=max */
+ bool need_in7_reroute;
u8 has_fan; /* Bitfield, fans enabled */
u16 fan[NUM_FAN][2]; /* Register values, [nr][0]=fan, [1]=min */
u8 has_temp; /* Bitfield, temp sensors enabled */
@@ -2487,6 +2491,7 @@ static int __init it87_find(int sioaddr, unsigned short *address,
}
err = 0;
+ sio_data->sioaddr = sioaddr;
sio_data->revision = superio_inb(sioaddr, DEVREV) & 0x0f;
pr_info("Found IT%04x%s chip at 0x%x, revision %d\n", chip_type,
it87_devices[sio_data->type].suffix,
@@ -2575,6 +2580,7 @@ static int __init it87_find(int sioaddr, unsigned short *address,
reg2c |= BIT(1);
superio_outb(sioaddr, IT87_SIO_PINX2_REG,
reg2c);
+ sio_data->need_in7_reroute = true;
pr_notice("Routing internal VCCH5V to in7.\n");
}
pr_notice("in7 routed to internal voltage divider, with external pin disabled.\n");
@@ -2761,13 +2767,13 @@ static int __init it87_find(int sioaddr, unsigned short *address,
uart6 = sio_data->type == it8782 && (reg & BIT(2));
/*
- * The IT8720F has no VIN7 pin, so VCCH should always be
+ * The IT8720F has no VIN7 pin, so VCCH5V should always be
* routed internally to VIN7 with an internal divider.
* Curiously, there still is a configuration bit to control
* this, which means it can be set incorrectly. And even
* more curiously, many boards out there are improperly
* configured, even though the IT8720F datasheet claims
- * that the internal routing of VCCH to VIN7 is the default
+ * that the internal routing of VCCH5V to VIN7 is the default
* setting. So we force the internal routing in this case.
*
* On IT8782F, VIN7 is multiplexed with one of the UART6 pins.
@@ -2777,7 +2783,8 @@ static int __init it87_find(int sioaddr, unsigned short *address,
if ((sio_data->type == it8720 || uart6) && !(reg & BIT(1))) {
reg |= BIT(1);
superio_outb(sioaddr, IT87_SIO_PINX2_REG, reg);
- pr_notice("Routing internal VCCH to in7\n");
+ sio_data->need_in7_reroute = true;
+ pr_notice("Routing internal VCCH5V to in7\n");
}
if (reg & BIT(0))
sio_data->internal |= BIT(0);
@@ -2828,13 +2835,89 @@ exit:
return err;
}
+/*
+ * Some chips seem to have default value 0xff for all limit
+ * registers. For low voltage limits it makes no sense and triggers
+ * alarms, so change to 0 instead. For high temperature limits, it
+ * means -1 degree C, which surprisingly doesn't trigger an alarm,
+ * but is still confusing, so change to 127 degrees C.
+ */
+static void it87_check_limit_regs(struct it87_data *data)
+{
+ int i, reg;
+
+ for (i = 0; i < NUM_VIN_LIMIT; i++) {
+ reg = it87_read_value(data, IT87_REG_VIN_MIN(i));
+ if (reg == 0xff)
+ it87_write_value(data, IT87_REG_VIN_MIN(i), 0);
+ }
+ for (i = 0; i < NUM_TEMP_LIMIT; i++) {
+ reg = it87_read_value(data, IT87_REG_TEMP_HIGH(i));
+ if (reg == 0xff)
+ it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127);
+ }
+}
+
+/* Check if voltage monitors are reset manually or by some reason */
+static void it87_check_voltage_monitors_reset(struct it87_data *data)
+{
+ int reg;
+
+ reg = it87_read_value(data, IT87_REG_VIN_ENABLE);
+ if ((reg & 0xff) == 0) {
+ /* Enable all voltage monitors */
+ it87_write_value(data, IT87_REG_VIN_ENABLE, 0xff);
+ }
+}
+
+/* Check if tachometers are reset manually or by some reason */
+static void it87_check_tachometers_reset(struct platform_device *pdev)
+{
+ struct it87_sio_data *sio_data = dev_get_platdata(&pdev->dev);
+ struct it87_data *data = platform_get_drvdata(pdev);
+ u8 mask, fan_main_ctrl;
+
+ mask = 0x70 & ~(sio_data->skip_fan << 4);
+ fan_main_ctrl = it87_read_value(data, IT87_REG_FAN_MAIN_CTRL);
+ if ((fan_main_ctrl & mask) == 0) {
+ /* Enable all fan tachometers */
+ fan_main_ctrl |= mask;
+ it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
+ fan_main_ctrl);
+ }
+}
+
+/* Set tachometers to 16-bit mode if needed */
+static void it87_check_tachometers_16bit_mode(struct platform_device *pdev)
+{
+ struct it87_data *data = platform_get_drvdata(pdev);
+ int reg;
+
+ if (!has_fan16_config(data))
+ return;
+
+ reg = it87_read_value(data, IT87_REG_FAN_16BIT);
+ if (~reg & 0x07 & data->has_fan) {
+ dev_dbg(&pdev->dev,
+ "Setting fan1-3 to 16-bit mode\n");
+ it87_write_value(data, IT87_REG_FAN_16BIT,
+ reg | 0x07);
+ }
+}
+
+static void it87_start_monitoring(struct it87_data *data)
+{
+ it87_write_value(data, IT87_REG_CONFIG,
+ (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
+ | (update_vbat ? 0x41 : 0x01));
+}
+
/* Called when we have found a new IT87. */
static void it87_init_device(struct platform_device *pdev)
{
struct it87_sio_data *sio_data = dev_get_platdata(&pdev->dev);
struct it87_data *data = platform_get_drvdata(pdev);
int tmp, i;
- u8 mask;
/*
* For each PWM channel:
@@ -2855,23 +2938,7 @@ static void it87_init_device(struct platform_device *pdev)
data->auto_pwm[i][3] = 0x7f; /* Full speed, hard-coded */
}
- /*
- * Some chips seem to have default value 0xff for all limit
- * registers. For low voltage limits it makes no sense and triggers
- * alarms, so change to 0 instead. For high temperature limits, it
- * means -1 degree C, which surprisingly doesn't trigger an alarm,
- * but is still confusing, so change to 127 degrees C.
- */
- for (i = 0; i < NUM_VIN_LIMIT; i++) {
- tmp = it87_read_value(data, IT87_REG_VIN_MIN(i));
- if (tmp == 0xff)
- it87_write_value(data, IT87_REG_VIN_MIN(i), 0);
- }
- for (i = 0; i < NUM_TEMP_LIMIT; i++) {
- tmp = it87_read_value(data, IT87_REG_TEMP_HIGH(i));
- if (tmp == 0xff)
- it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127);
- }
+ it87_check_limit_regs(data);
/*
* Temperature channels are not forcibly enabled, as they can be
@@ -2880,38 +2947,19 @@ static void it87_init_device(struct platform_device *pdev)
* run-time through the temp{1-3}_type sysfs accessors if needed.
*/
- /* Check if voltage monitors are reset manually or by some reason */
- tmp = it87_read_value(data, IT87_REG_VIN_ENABLE);
- if ((tmp & 0xff) == 0) {
- /* Enable all voltage monitors */
- it87_write_value(data, IT87_REG_VIN_ENABLE, 0xff);
- }
+ it87_check_voltage_monitors_reset(data);
+
+ it87_check_tachometers_reset(pdev);
- /* Check if tachometers are reset manually or by some reason */
- mask = 0x70 & ~(sio_data->skip_fan << 4);
data->fan_main_ctrl = it87_read_value(data, IT87_REG_FAN_MAIN_CTRL);
- if ((data->fan_main_ctrl & mask) == 0) {
- /* Enable all fan tachometers */
- data->fan_main_ctrl |= mask;
- it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
- data->fan_main_ctrl);
- }
data->has_fan = (data->fan_main_ctrl >> 4) & 0x07;
- tmp = it87_read_value(data, IT87_REG_FAN_16BIT);
-
- /* Set tachometers to 16-bit mode if needed */
- if (has_fan16_config(data)) {
- if (~tmp & 0x07 & data->has_fan) {
- dev_dbg(&pdev->dev,
- "Setting fan1-3 to 16-bit mode\n");
- it87_write_value(data, IT87_REG_FAN_16BIT,
- tmp | 0x07);
- }
- }
+ it87_check_tachometers_16bit_mode(pdev);
/* Check for additional fans */
if (has_five_fans(data)) {
+ tmp = it87_read_value(data, IT87_REG_FAN_16BIT);
+
if (tmp & BIT(4))
data->has_fan |= BIT(3); /* fan4 enabled */
if (tmp & BIT(5))
@@ -2933,10 +2981,7 @@ static void it87_init_device(struct platform_device *pdev)
sio_data->skip_pwm |= BIT(5);
}
- /* Start monitoring */
- it87_write_value(data, IT87_REG_CONFIG,
- (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
- | (update_vbat ? 0x41 : 0x01));
+ it87_start_monitoring(data);
}
/* Return 1 if and only if the PWM interface is safe to use */
@@ -2986,8 +3031,6 @@ static int it87_check_pwm(struct device *dev)
"PWM configuration is too broken to be fixed\n");
}
- dev_info(dev,
- "Detected broken BIOS defaults, disabling PWM interface\n");
return 0;
} else if (fix_pwm_polarity) {
dev_info(dev,
@@ -3020,6 +3063,7 @@ static int it87_probe(struct platform_device *pdev)
return -ENOMEM;
data->addr = res->start;
+ data->sioaddr = sio_data->sioaddr;
data->type = sio_data->type;
data->features = it87_devices[sio_data->type].features;
data->peci_mask = it87_devices[sio_data->type].peci_mask;
@@ -3058,6 +3102,9 @@ static int it87_probe(struct platform_device *pdev)
/* Check PWM configuration */
enable_pwm_interface = it87_check_pwm(dev);
+ if (!enable_pwm_interface)
+ dev_info(dev,
+ "Detected broken BIOS defaults, disabling PWM interface\n");
/* Starting with IT8721F, we handle scaling of internal voltages */
if (has_12mv_adc(data)) {
@@ -3085,6 +3132,7 @@ static int it87_probe(struct platform_device *pdev)
}
data->in_internal = sio_data->internal;
+ data->need_in7_reroute = sio_data->need_in7_reroute;
data->has_in = 0x3ff & ~sio_data->skip_in;
if (has_six_temp(data)) {
@@ -3140,9 +3188,71 @@ static int it87_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
+static void __maybe_unused it87_resume_sio(struct platform_device *pdev)
+{
+ struct it87_data *data = dev_get_drvdata(&pdev->dev);
+ int err;
+ int reg2c;
+
+ if (!data->need_in7_reroute)
+ return;
+
+ err = superio_enter(data->sioaddr);
+ if (err) {
+ dev_warn(&pdev->dev,
+ "Unable to enter Super I/O to reroute in7 (%d)",
+ err);
+ return;
+ }
+
+ superio_select(data->sioaddr, GPIO);
+
+ reg2c = superio_inb(data->sioaddr, IT87_SIO_PINX2_REG);
+ if (!(reg2c & BIT(1))) {
+ dev_dbg(&pdev->dev,
+ "Routing internal VCCH5V to in7 again");
+
+ reg2c |= BIT(1);
+ superio_outb(data->sioaddr, IT87_SIO_PINX2_REG,
+ reg2c);
+ }
+
+ superio_exit(data->sioaddr);
+}
+
+static int __maybe_unused it87_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct it87_data *data = dev_get_drvdata(dev);
+
+ it87_resume_sio(pdev);
+
+ mutex_lock(&data->update_lock);
+
+ it87_check_pwm(dev);
+ it87_check_limit_regs(data);
+ it87_check_voltage_monitors_reset(data);
+ it87_check_tachometers_reset(pdev);
+ it87_check_tachometers_16bit_mode(pdev);
+
+ it87_start_monitoring(data);
+
+ /* force update */
+ data->valid = 0;
+
+ mutex_unlock(&data->update_lock);
+
+ it87_update_device(dev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(it87_dev_pm_ops, NULL, it87_resume);
+
static struct platform_driver it87_driver = {
.driver = {
.name = DRVNAME,
+ .pm = &it87_dev_pm_ops,
},
.probe = it87_probe,
};
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 1bf22eff0b08..5f11dc014ed6 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -72,6 +72,8 @@ static const unsigned short normal_i2c[] = {
#define NXP_MANID 0x1131 /* NXP Semiconductors */
#define ONS_MANID 0x1b09 /* ON Semiconductor */
#define STM_MANID 0x104a /* ST Microelectronics */
+#define GT_MANID 0x1c68 /* Giantec */
+#define GT_MANID2 0x132d /* Giantec, 2nd mfg ID */
/* Supported chips */
@@ -86,6 +88,13 @@ static const unsigned short normal_i2c[] = {
#define AT30TSE004_DEVID 0x2200
#define AT30TSE004_DEVID_MASK 0xffff
+/* Giantec */
+#define GT30TS00_DEVID 0x2200
+#define GT30TS00_DEVID_MASK 0xff00
+
+#define GT34TS02_DEVID 0x3300
+#define GT34TS02_DEVID_MASK 0xff00
+
/* IDT */
#define TSE2004_DEVID 0x2200
#define TSE2004_DEVID_MASK 0xff00
@@ -130,6 +139,12 @@ static const unsigned short normal_i2c[] = {
#define CAT6095_DEVID 0x0800 /* Also matches CAT34TS02 */
#define CAT6095_DEVID_MASK 0xffe0
+#define CAT34TS02C_DEVID 0x0a00
+#define CAT34TS02C_DEVID_MASK 0xfff0
+
+#define CAT34TS04_DEVID 0x2200
+#define CAT34TS04_DEVID_MASK 0xfff0
+
/* ST Microelectronics */
#define STTS424_DEVID 0x0101
#define STTS424_DEVID_MASK 0xffff
@@ -158,6 +173,8 @@ static struct jc42_chips jc42_chips[] = {
{ ADT_MANID, ADT7408_DEVID, ADT7408_DEVID_MASK },
{ ATMEL_MANID, AT30TS00_DEVID, AT30TS00_DEVID_MASK },
{ ATMEL_MANID2, AT30TSE004_DEVID, AT30TSE004_DEVID_MASK },
+ { GT_MANID, GT30TS00_DEVID, GT30TS00_DEVID_MASK },
+ { GT_MANID2, GT34TS02_DEVID, GT34TS02_DEVID_MASK },
{ IDT_MANID, TSE2004_DEVID, TSE2004_DEVID_MASK },
{ IDT_MANID, TS3000_DEVID, TS3000_DEVID_MASK },
{ IDT_MANID, TS3001_DEVID, TS3001_DEVID_MASK },
@@ -170,6 +187,8 @@ static struct jc42_chips jc42_chips[] = {
{ MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK },
{ NXP_MANID, SE97_DEVID, SE97_DEVID_MASK },
{ ONS_MANID, CAT6095_DEVID, CAT6095_DEVID_MASK },
+ { ONS_MANID, CAT34TS02C_DEVID, CAT34TS02C_DEVID_MASK },
+ { ONS_MANID, CAT34TS04_DEVID, CAT34TS04_DEVID_MASK },
{ NXP_MANID, SE98_DEVID, SE98_DEVID_MASK },
{ STM_MANID, STTS424_DEVID, STTS424_DEVID_MASK },
{ STM_MANID, STTS424E_DEVID, STTS424E_DEVID_MASK },
diff --git a/drivers/hwmon/ltq-cputemp.c b/drivers/hwmon/ltq-cputemp.c
new file mode 100644
index 000000000000..1d33f94594c1
--- /dev/null
+++ b/drivers/hwmon/ltq-cputemp.c
@@ -0,0 +1,163 @@
+/* Lantiq cpu temperature sensor driver
+ *
+ * Copyright (C) 2017 Florian Eckert <fe@dev.tdt.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version
+ *
+ * This program is distributed in the hope that it will be useful
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include <lantiq_soc.h>
+
+/* gphy1 configuration register contains cpu temperature */
+#define CGU_GPHY1_CR 0x0040
+#define CGU_TEMP_PD BIT(19)
+
+static void ltq_cputemp_enable(void)
+{
+ ltq_cgu_w32(ltq_cgu_r32(CGU_GPHY1_CR) | CGU_TEMP_PD, CGU_GPHY1_CR);
+}
+
+static void ltq_cputemp_disable(void *data)
+{
+ ltq_cgu_w32(ltq_cgu_r32(CGU_GPHY1_CR) & ~CGU_TEMP_PD, CGU_GPHY1_CR);
+}
+
+static int ltq_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ int value;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ /* get the temperature including one decimal place */
+ value = (ltq_cgu_r32(CGU_GPHY1_CR) >> 9) & 0x01FF;
+ value = value * 5;
+ /* range -38 to +154 °C, register value zero is -38.0 °C */
+ value -= 380;
+ /* scale temp to millidegree */
+ value = value * 100;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ *temp = value;
+ return 0;
+}
+
+static umode_t ltq_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static const u32 ltq_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0
+};
+
+static const struct hwmon_channel_info ltq_chip = {
+ .type = hwmon_chip,
+ .config = ltq_chip_config,
+};
+
+static const u32 ltq_temp_config[] = {
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info ltq_temp = {
+ .type = hwmon_temp,
+ .config = ltq_temp_config,
+};
+
+static const struct hwmon_channel_info *ltq_info[] = {
+ &ltq_chip,
+ &ltq_temp,
+ NULL
+};
+
+static const struct hwmon_ops ltq_hwmon_ops = {
+ .is_visible = ltq_is_visible,
+ .read = ltq_read,
+};
+
+static const struct hwmon_chip_info ltq_chip_info = {
+ .ops = &ltq_hwmon_ops,
+ .info = ltq_info,
+};
+
+static int ltq_cputemp_probe(struct platform_device *pdev)
+{
+ struct device *hwmon_dev;
+ int err = 0;
+
+ /* available on vr9 v1.2 SoCs only */
+ if (ltq_soc_type() != SOC_TYPE_VR9_2)
+ return -ENODEV;
+
+ err = devm_add_action(&pdev->dev, ltq_cputemp_disable, NULL);
+ if (err)
+ return err;
+
+ ltq_cputemp_enable();
+
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+ "ltq_cputemp",
+ NULL,
+ &ltq_chip_info,
+ NULL);
+
+ if (IS_ERR(hwmon_dev)) {
+ dev_err(&pdev->dev, "Failed to register as hwmon device");
+ return PTR_ERR(hwmon_dev);
+ }
+
+ return 0;
+}
+
+const struct of_device_id ltq_cputemp_match[] = {
+ { .compatible = "lantiq,cputemp" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_cputemp_match);
+
+static struct platform_driver ltq_cputemp_driver = {
+ .probe = ltq_cputemp_probe,
+ .driver = {
+ .name = "ltq-cputemp",
+ .of_match_table = ltq_cputemp_match,
+ },
+};
+
+module_platform_driver(ltq_cputemp_driver);
+
+MODULE_AUTHOR("Florian Eckert <fe@dev.tdt.de>");
+MODULE_DESCRIPTION("Lantiq cpu temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 12b94b094c0d..2876c18ed841 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -704,7 +704,7 @@ static umode_t nct7802_temp_is_visible(struct kobject *kobj,
return attr->mode;
}
-static struct attribute_group nct7802_temp_group = {
+static const struct attribute_group nct7802_temp_group = {
.attrs = nct7802_temp_attrs,
.is_visible = nct7802_temp_is_visible,
};
@@ -802,7 +802,7 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj,
return attr->mode;
}
-static struct attribute_group nct7802_in_group = {
+static const struct attribute_group nct7802_in_group = {
.attrs = nct7802_in_attrs,
.is_visible = nct7802_in_is_visible,
};
@@ -880,7 +880,7 @@ static umode_t nct7802_fan_is_visible(struct kobject *kobj,
return attr->mode;
}
-static struct attribute_group nct7802_fan_group = {
+static const struct attribute_group nct7802_fan_group = {
.attrs = nct7802_fan_attrs,
.is_visible = nct7802_fan_is_visible,
};
@@ -898,7 +898,7 @@ static struct attribute *nct7802_pwm_attrs[] = {
NULL
};
-static struct attribute_group nct7802_pwm_group = {
+static const struct attribute_group nct7802_pwm_group = {
.attrs = nct7802_pwm_attrs,
};
@@ -1011,7 +1011,7 @@ static struct attribute *nct7802_auto_point_attrs[] = {
NULL
};
-static struct attribute_group nct7802_auto_point_group = {
+static const struct attribute_group nct7802_auto_point_group = {
.attrs = nct7802_auto_point_attrs,
};
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 68d717a3fd59..40019325b517 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -37,6 +37,15 @@ config SENSORS_ADM1275
This driver can also be built as a module. If so, the module will
be called adm1275.
+config SENSORS_IBM_CFFPS
+ tristate "IBM Common Form Factor Power Supply"
+ help
+ If you say yes here you get hardware monitoring support for the IBM
+ Common Form Factor power supply.
+
+ This driver can also be built as a module. If so, the module will
+ be called ibm-cffps.
+
config SENSORS_IR35221
tristate "Infineon IR35221"
default n
@@ -135,6 +144,15 @@ config SENSORS_TPS40422
This driver can also be built as a module. If so, the module will
be called tps40422.
+config SENSORS_TPS53679
+ tristate "TI TPS53679"
+ help
+ If you say yes here you get hardware monitoring support for TI
+ TPS53679.
+
+ This driver can also be built as a module. If so, the module will
+ be called tps53679.
+
config SENSORS_UCD9000
tristate "TI UCD90120, UCD90124, UCD90160, UCD9090, UCD90910"
default n
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 75bb7ca619d9..459a6be3390e 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -5,6 +5,7 @@
obj-$(CONFIG_PMBUS) += pmbus_core.o
obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
+obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
obj-$(CONFIG_SENSORS_IR35221) += ir35221.o
obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
@@ -14,6 +15,7 @@ obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
+obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o
obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o
obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
new file mode 100644
index 000000000000..cb56da6834e5
--- /dev/null
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include "pmbus.h"
+
+/* STATUS_MFR_SPECIFIC bits */
+#define CFFPS_MFR_FAN_FAULT BIT(0)
+#define CFFPS_MFR_THERMAL_FAULT BIT(1)
+#define CFFPS_MFR_OV_FAULT BIT(2)
+#define CFFPS_MFR_UV_FAULT BIT(3)
+#define CFFPS_MFR_PS_KILL BIT(4)
+#define CFFPS_MFR_OC_FAULT BIT(5)
+#define CFFPS_MFR_VAUX_FAULT BIT(6)
+#define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
+
+static int ibm_cffps_read_byte_data(struct i2c_client *client, int page,
+ int reg)
+{
+ int rc, mfr;
+
+ switch (reg) {
+ case PMBUS_STATUS_VOUT:
+ case PMBUS_STATUS_IOUT:
+ case PMBUS_STATUS_TEMPERATURE:
+ case PMBUS_STATUS_FAN_12:
+ rc = pmbus_read_byte_data(client, page, reg);
+ if (rc < 0)
+ return rc;
+
+ mfr = pmbus_read_byte_data(client, page,
+ PMBUS_STATUS_MFR_SPECIFIC);
+ if (mfr < 0)
+ /*
+ * Return the status register instead of an error,
+ * since we successfully read status.
+ */
+ return rc;
+
+ /* Add MFR_SPECIFIC bits to the standard pmbus status regs. */
+ if (reg == PMBUS_STATUS_FAN_12) {
+ if (mfr & CFFPS_MFR_FAN_FAULT)
+ rc |= PB_FAN_FAN1_FAULT;
+ } else if (reg == PMBUS_STATUS_TEMPERATURE) {
+ if (mfr & CFFPS_MFR_THERMAL_FAULT)
+ rc |= PB_TEMP_OT_FAULT;
+ } else if (reg == PMBUS_STATUS_VOUT) {
+ if (mfr & (CFFPS_MFR_OV_FAULT | CFFPS_MFR_VAUX_FAULT))
+ rc |= PB_VOLTAGE_OV_FAULT;
+ if (mfr & CFFPS_MFR_UV_FAULT)
+ rc |= PB_VOLTAGE_UV_FAULT;
+ } else if (reg == PMBUS_STATUS_IOUT) {
+ if (mfr & CFFPS_MFR_OC_FAULT)
+ rc |= PB_IOUT_OC_FAULT;
+ if (mfr & CFFPS_MFR_CURRENT_SHARE_WARNING)
+ rc |= PB_CURRENT_SHARE_FAULT;
+ }
+ break;
+ default:
+ rc = -ENODATA;
+ break;
+ }
+
+ return rc;
+}
+
+static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
+ int reg)
+{
+ int rc, mfr;
+
+ switch (reg) {
+ case PMBUS_STATUS_WORD:
+ rc = pmbus_read_word_data(client, page, reg);
+ if (rc < 0)
+ return rc;
+
+ mfr = pmbus_read_byte_data(client, page,
+ PMBUS_STATUS_MFR_SPECIFIC);
+ if (mfr < 0)
+ /*
+ * Return the status register instead of an error,
+ * since we successfully read status.
+ */
+ return rc;
+
+ if (mfr & CFFPS_MFR_PS_KILL)
+ rc |= PB_STATUS_OFF;
+ break;
+ default:
+ rc = -ENODATA;
+ break;
+ }
+
+ return rc;
+}
+
+static struct pmbus_driver_info ibm_cffps_info = {
+ .pages = 1,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_TEMP |
+ PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_STATUS_FAN12,
+ .read_byte_data = ibm_cffps_read_byte_data,
+ .read_word_data = ibm_cffps_read_word_data,
+};
+
+static int ibm_cffps_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &ibm_cffps_info);
+}
+
+static const struct i2c_device_id ibm_cffps_id[] = {
+ { "ibm_cffps1", 1 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ibm_cffps_id);
+
+static const struct of_device_id ibm_cffps_of_match[] = {
+ { .compatible = "ibm,cffps1" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ibm_cffps_of_match);
+
+static struct i2c_driver ibm_cffps_driver = {
+ .driver = {
+ .name = "ibm-cffps",
+ .of_match_table = ibm_cffps_of_match,
+ },
+ .probe = ibm_cffps_probe,
+ .remove = pmbus_do_remove,
+ .id_table = ibm_cffps_id,
+};
+
+module_i2c_driver(ibm_cffps_driver);
+
+MODULE_AUTHOR("Eddie James");
+MODULE_DESCRIPTION("PMBus driver for IBM Common Form Factor power supplies");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index a3d912cd3b8d..10d17fb8f283 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -28,7 +28,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { lm25056, lm25063, lm25066, lm5064, lm5066 };
+enum chips { lm25056, lm25063, lm25066, lm5064, lm5066, lm5066i };
#define LM25066_READ_VAUX 0xd0
#define LM25066_MFR_READ_IIN 0xd1
@@ -65,7 +65,7 @@ struct __coeff {
#define PSC_CURRENT_IN_L (PSC_NUM_CLASSES)
#define PSC_POWER_L (PSC_NUM_CLASSES + 1)
-static struct __coeff lm25066_coeff[5][PSC_NUM_CLASSES + 2] = {
+static struct __coeff lm25066_coeff[6][PSC_NUM_CLASSES + 2] = {
[lm25056] = {
[PSC_VOLTAGE_IN] = {
.m = 16296,
@@ -210,6 +210,41 @@ static struct __coeff lm25066_coeff[5][PSC_NUM_CLASSES + 2] = {
.m = 16,
},
},
+ [lm5066i] = {
+ [PSC_VOLTAGE_IN] = {
+ .m = 4617,
+ .b = -140,
+ .R = -2,
+ },
+ [PSC_VOLTAGE_OUT] = {
+ .m = 4602,
+ .b = 500,
+ .R = -2,
+ },
+ [PSC_CURRENT_IN] = {
+ .m = 15076,
+ .b = -504,
+ .R = -2,
+ },
+ [PSC_CURRENT_IN_L] = {
+ .m = 7645,
+ .b = 100,
+ .R = -2,
+ },
+ [PSC_POWER] = {
+ .m = 1701,
+ .b = -4000,
+ .R = -3,
+ },
+ [PSC_POWER_L] = {
+ .m = 861,
+ .b = -965,
+ .R = -3,
+ },
+ [PSC_TEMPERATURE] = {
+ .m = 16,
+ },
+ },
};
struct lm25066_data {
@@ -250,6 +285,7 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
ret = DIV_ROUND_CLOSEST(ret * 70, 453);
break;
case lm5066:
+ case lm5066i:
/* VIN: 2.18 mV VAUX: 725 uV LSB */
ret = DIV_ROUND_CLOSEST(ret * 725, 2180);
break;
@@ -488,16 +524,18 @@ static int lm25066_probe(struct i2c_client *client,
info->m[PSC_VOLTAGE_OUT] = coeff[PSC_VOLTAGE_OUT].m;
info->b[PSC_VOLTAGE_OUT] = coeff[PSC_VOLTAGE_OUT].b;
info->R[PSC_VOLTAGE_OUT] = coeff[PSC_VOLTAGE_OUT].R;
- info->b[PSC_CURRENT_IN] = coeff[PSC_CURRENT_IN].b;
info->R[PSC_CURRENT_IN] = coeff[PSC_CURRENT_IN].R;
- info->b[PSC_POWER] = coeff[PSC_POWER].b;
info->R[PSC_POWER] = coeff[PSC_POWER].R;
if (config & LM25066_DEV_SETUP_CL) {
info->m[PSC_CURRENT_IN] = coeff[PSC_CURRENT_IN_L].m;
+ info->b[PSC_CURRENT_IN] = coeff[PSC_CURRENT_IN_L].b;
info->m[PSC_POWER] = coeff[PSC_POWER_L].m;
+ info->b[PSC_POWER] = coeff[PSC_POWER_L].b;
} else {
info->m[PSC_CURRENT_IN] = coeff[PSC_CURRENT_IN].m;
+ info->b[PSC_CURRENT_IN] = coeff[PSC_CURRENT_IN].b;
info->m[PSC_POWER] = coeff[PSC_POWER].m;
+ info->b[PSC_POWER] = coeff[PSC_POWER].b;
}
return pmbus_do_probe(client, id, info);
@@ -509,6 +547,7 @@ static const struct i2c_device_id lm25066_id[] = {
{"lm25066", lm25066},
{"lm5064", lm5064},
{"lm5066", lm5066},
+ {"lm5066i", lm5066i},
{ }
};
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index bfcb13bae34b..4efa2bd4f6d8 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -341,7 +341,7 @@ enum pmbus_sensor_classes {
#define PMBUS_HAVE_STATUS_VMON BIT(19)
enum pmbus_data_format { linear = 0, direct, vid };
-enum vrm_version { vr11 = 0, vr12 };
+enum vrm_version { vr11 = 0, vr12, vr13 };
struct pmbus_driver_info {
int pages; /* Total number of pages */
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index f1eff6b6c798..302f0aef59de 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -101,6 +102,7 @@ struct pmbus_data {
int num_attributes;
struct attribute_group group;
const struct attribute_group *groups[2];
+ struct dentry *debugfs; /* debugfs device directory */
struct pmbus_sensor *sensors;
@@ -112,12 +114,20 @@ struct pmbus_data {
* A single status register covers multiple attributes,
* so we keep them all together.
*/
- u8 status[PB_NUM_STATUS_REG];
- u8 status_register;
+ u16 status[PB_NUM_STATUS_REG];
+
+ bool has_status_word; /* device uses STATUS_WORD register */
+ int (*read_status)(struct i2c_client *client, int page);
u8 currpage;
};
+struct pmbus_debugfs_entry {
+ struct i2c_client *client;
+ u8 page;
+ u8 reg;
+};
+
void pmbus_clear_cache(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
@@ -324,7 +334,7 @@ static int pmbus_check_status_cml(struct i2c_client *client)
struct pmbus_data *data = i2c_get_clientdata(client);
int status, status2;
- status = _pmbus_read_byte_data(client, -1, data->status_register);
+ status = data->read_status(client, -1);
if (status < 0 || (status & PB_STATUS_CML)) {
status2 = _pmbus_read_byte_data(client, -1, PMBUS_STATUS_CML);
if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND))
@@ -348,6 +358,23 @@ static bool pmbus_check_register(struct i2c_client *client,
return rv >= 0;
}
+static bool pmbus_check_status_register(struct i2c_client *client, int page)
+{
+ int status;
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ status = data->read_status(client, page);
+ if (status >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK) &&
+ (status & PB_STATUS_CML)) {
+ status = _pmbus_read_byte_data(client, -1, PMBUS_STATUS_CML);
+ if (status < 0 || (status & PB_CML_FAULT_INVALID_COMMAND))
+ status = -EIO;
+ }
+
+ pmbus_clear_fault_page(client, -1);
+ return status >= 0;
+}
+
bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
{
return pmbus_check_register(client, _pmbus_read_byte_data, page, reg);
@@ -394,8 +421,7 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
for (i = 0; i < info->pages; i++) {
data->status[PB_STATUS_BASE + i]
- = _pmbus_read_byte_data(client, i,
- data->status_register);
+ = data->read_status(client, i);
for (j = 0; j < ARRAY_SIZE(pmbus_status); j++) {
struct _pmbus_status *s = &pmbus_status[j];
@@ -531,6 +557,10 @@ static long pmbus_reg2data_vid(struct pmbus_data *data,
if (val >= 0x01)
rv = 250 + (val - 1) * 5;
break;
+ case vr13:
+ if (val >= 0x01)
+ rv = 500 + (val - 1) * 10;
+ break;
}
return rv;
}
@@ -716,10 +746,10 @@ static int pmbus_get_boolean(struct pmbus_data *data, struct pmbus_boolean *b,
{
struct pmbus_sensor *s1 = b->s1;
struct pmbus_sensor *s2 = b->s2;
- u16 reg = (index >> 8) & 0xffff;
- u8 mask = index & 0xff;
+ u16 reg = (index >> 16) & 0xffff;
+ u16 mask = index & 0xffff;
int ret, status;
- u8 regval;
+ u16 regval;
status = data->status[reg];
if (status < 0)
@@ -860,7 +890,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
const char *name, const char *type, int seq,
struct pmbus_sensor *s1,
struct pmbus_sensor *s2,
- u16 reg, u8 mask)
+ u16 reg, u16 mask)
{
struct pmbus_boolean *boolean;
struct sensor_device_attribute *a;
@@ -876,7 +906,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
boolean->s1 = s1;
boolean->s2 = s2;
pmbus_attr_init(a, boolean->name, S_IRUGO, pmbus_show_boolean, NULL,
- (reg << 8) | mask);
+ (reg << 16) | mask);
return pmbus_add_attribute(data, &a->dev_attr.attr);
}
@@ -962,7 +992,7 @@ struct pmbus_limit_attr {
*/
struct pmbus_sensor_attr {
u16 reg; /* sensor register */
- u8 gbit; /* generic status bit */
+ u16 gbit; /* generic status bit */
u8 nlimit; /* # of limit registers */
enum pmbus_sensor_classes class;/* sensor class */
const char *label; /* sensor label */
@@ -1028,6 +1058,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
const struct pmbus_sensor_attr *attr)
{
struct pmbus_sensor *base;
+ bool upper = !!(attr->gbit & 0xff00); /* need to check STATUS_WORD */
int ret;
if (attr->label) {
@@ -1048,11 +1079,12 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
/*
* Add generic alarm attribute only if there are no individual
* alarm attributes, if there is a global alarm bit, and if
- * the generic status register for this page is accessible.
+ * the generic status register (word or byte, depending on
+ * which global bit is set) for this page is accessible.
*/
if (!ret && attr->gbit &&
- pmbus_check_byte_register(client, page,
- data->status_register)) {
+ (!upper || (upper && data->has_status_word)) &&
+ pmbus_check_status_register(client, page)) {
ret = pmbus_add_boolean(data, name, "alarm", index,
NULL, NULL,
PB_STATUS_BASE + page,
@@ -1308,6 +1340,7 @@ static const struct pmbus_sensor_attr current_attributes[] = {
.func = PMBUS_HAVE_IIN,
.sfunc = PMBUS_HAVE_STATUS_INPUT,
.sbase = PB_STATUS_INPUT_BASE,
+ .gbit = PB_STATUS_INPUT,
.limit = iin_limit_attrs,
.nlimit = ARRAY_SIZE(iin_limit_attrs),
}, {
@@ -1392,6 +1425,7 @@ static const struct pmbus_sensor_attr power_attributes[] = {
.func = PMBUS_HAVE_PIN,
.sfunc = PMBUS_HAVE_STATUS_INPUT,
.sbase = PB_STATUS_INPUT_BASE,
+ .gbit = PB_STATUS_INPUT,
.limit = pin_limit_attrs,
.nlimit = ARRAY_SIZE(pin_limit_attrs),
}, {
@@ -1729,6 +1763,16 @@ static int pmbus_identify_common(struct i2c_client *client,
return 0;
}
+static int pmbus_read_status_byte(struct i2c_client *client, int page)
+{
+ return _pmbus_read_byte_data(client, page, PMBUS_STATUS_BYTE);
+}
+
+static int pmbus_read_status_word(struct i2c_client *client, int page)
+{
+ return _pmbus_read_word_data(client, page, PMBUS_STATUS_WORD);
+}
+
static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
struct pmbus_driver_info *info)
{
@@ -1736,19 +1780,21 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
int page, ret;
/*
- * Some PMBus chips don't support PMBUS_STATUS_BYTE, so try
- * to use PMBUS_STATUS_WORD instead if that is the case.
+ * Some PMBus chips don't support PMBUS_STATUS_WORD, so try
+ * to use PMBUS_STATUS_BYTE instead if that is the case.
* Bail out if both registers are not supported.
*/
- data->status_register = PMBUS_STATUS_BYTE;
- ret = i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE);
- if (ret < 0 || ret == 0xff) {
- data->status_register = PMBUS_STATUS_WORD;
- ret = i2c_smbus_read_word_data(client, PMBUS_STATUS_WORD);
- if (ret < 0 || ret == 0xffff) {
+ data->read_status = pmbus_read_status_word;
+ ret = i2c_smbus_read_word_data(client, PMBUS_STATUS_WORD);
+ if (ret < 0 || ret == 0xffff) {
+ data->read_status = pmbus_read_status_byte;
+ ret = i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE);
+ if (ret < 0 || ret == 0xff) {
dev_err(dev, "PMBus status register not found\n");
return -ENODEV;
}
+ } else {
+ data->has_status_word = true;
}
/* Enable PEC if the controller supports it */
@@ -1859,6 +1905,184 @@ static int pmbus_regulator_register(struct pmbus_data *data)
}
#endif
+static struct dentry *pmbus_debugfs_dir; /* pmbus debugfs directory */
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static int pmbus_debugfs_get(void *data, u64 *val)
+{
+ int rc;
+ struct pmbus_debugfs_entry *entry = data;
+
+ rc = _pmbus_read_byte_data(entry->client, entry->page, entry->reg);
+ if (rc < 0)
+ return rc;
+
+ *val = rc;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(pmbus_debugfs_ops, pmbus_debugfs_get, NULL,
+ "0x%02llx\n");
+
+static int pmbus_debugfs_get_status(void *data, u64 *val)
+{
+ int rc;
+ struct pmbus_debugfs_entry *entry = data;
+ struct pmbus_data *pdata = i2c_get_clientdata(entry->client);
+
+ rc = pdata->read_status(entry->client, entry->page);
+ if (rc < 0)
+ return rc;
+
+ *val = rc;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(pmbus_debugfs_ops_status, pmbus_debugfs_get_status,
+ NULL, "0x%04llx\n");
+
+static int pmbus_init_debugfs(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ int i, idx = 0;
+ char name[PMBUS_NAME_SIZE];
+ struct pmbus_debugfs_entry *entries;
+
+ if (!pmbus_debugfs_dir)
+ return -ENODEV;
+
+ /*
+ * Create the debugfs directory for this device. Use the hwmon device
+ * name to avoid conflicts (hwmon numbers are globally unique).
+ */
+ data->debugfs = debugfs_create_dir(dev_name(data->hwmon_dev),
+ pmbus_debugfs_dir);
+ if (IS_ERR_OR_NULL(data->debugfs)) {
+ data->debugfs = NULL;
+ return -ENODEV;
+ }
+
+ /* Allocate the max possible entries we need. */
+ entries = devm_kzalloc(data->dev,
+ sizeof(*entries) * (data->info->pages * 10),
+ GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ for (i = 0; i < data->info->pages; ++i) {
+ /* Check accessibility of status register if it's not page 0 */
+ if (!i || pmbus_check_status_register(client, i)) {
+ /* No need to set reg as we have special read op. */
+ entries[idx].client = client;
+ entries[idx].page = i;
+ scnprintf(name, PMBUS_NAME_SIZE, "status%d", i);
+ debugfs_create_file(name, 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops_status);
+ }
+
+ if (data->info->func[i] & PMBUS_HAVE_STATUS_VOUT) {
+ entries[idx].client = client;
+ entries[idx].page = i;
+ entries[idx].reg = PMBUS_STATUS_VOUT;
+ scnprintf(name, PMBUS_NAME_SIZE, "status%d_vout", i);
+ debugfs_create_file(name, 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops);
+ }
+
+ if (data->info->func[i] & PMBUS_HAVE_STATUS_IOUT) {
+ entries[idx].client = client;
+ entries[idx].page = i;
+ entries[idx].reg = PMBUS_STATUS_IOUT;
+ scnprintf(name, PMBUS_NAME_SIZE, "status%d_iout", i);
+ debugfs_create_file(name, 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops);
+ }
+
+ if (data->info->func[i] & PMBUS_HAVE_STATUS_INPUT) {
+ entries[idx].client = client;
+ entries[idx].page = i;
+ entries[idx].reg = PMBUS_STATUS_INPUT;
+ scnprintf(name, PMBUS_NAME_SIZE, "status%d_input", i);
+ debugfs_create_file(name, 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops);
+ }
+
+ if (data->info->func[i] & PMBUS_HAVE_STATUS_TEMP) {
+ entries[idx].client = client;
+ entries[idx].page = i;
+ entries[idx].reg = PMBUS_STATUS_TEMPERATURE;
+ scnprintf(name, PMBUS_NAME_SIZE, "status%d_temp", i);
+ debugfs_create_file(name, 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops);
+ }
+
+ if (pmbus_check_byte_register(client, i, PMBUS_STATUS_CML)) {
+ entries[idx].client = client;
+ entries[idx].page = i;
+ entries[idx].reg = PMBUS_STATUS_CML;
+ scnprintf(name, PMBUS_NAME_SIZE, "status%d_cml", i);
+ debugfs_create_file(name, 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops);
+ }
+
+ if (pmbus_check_byte_register(client, i, PMBUS_STATUS_OTHER)) {
+ entries[idx].client = client;
+ entries[idx].page = i;
+ entries[idx].reg = PMBUS_STATUS_OTHER;
+ scnprintf(name, PMBUS_NAME_SIZE, "status%d_other", i);
+ debugfs_create_file(name, 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops);
+ }
+
+ if (pmbus_check_byte_register(client, i,
+ PMBUS_STATUS_MFR_SPECIFIC)) {
+ entries[idx].client = client;
+ entries[idx].page = i;
+ entries[idx].reg = PMBUS_STATUS_MFR_SPECIFIC;
+ scnprintf(name, PMBUS_NAME_SIZE, "status%d_mfr", i);
+ debugfs_create_file(name, 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops);
+ }
+
+ if (data->info->func[i] & PMBUS_HAVE_STATUS_FAN12) {
+ entries[idx].client = client;
+ entries[idx].page = i;
+ entries[idx].reg = PMBUS_STATUS_FAN_12;
+ scnprintf(name, PMBUS_NAME_SIZE, "status%d_fan12", i);
+ debugfs_create_file(name, 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops);
+ }
+
+ if (data->info->func[i] & PMBUS_HAVE_STATUS_FAN34) {
+ entries[idx].client = client;
+ entries[idx].page = i;
+ entries[idx].reg = PMBUS_STATUS_FAN_34;
+ scnprintf(name, PMBUS_NAME_SIZE, "status%d_fan34", i);
+ debugfs_create_file(name, 0444, data->debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops);
+ }
+ }
+
+ return 0;
+}
+#else
+static int pmbus_init_debugfs(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ return 0;
+}
+#endif /* IS_ENABLED(CONFIG_DEBUG_FS) */
+
int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
struct pmbus_driver_info *info)
{
@@ -1918,6 +2142,10 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
if (ret)
goto out_unregister;
+ ret = pmbus_init_debugfs(client, data);
+ if (ret)
+ dev_warn(dev, "Failed to register debugfs\n");
+
return 0;
out_unregister:
@@ -1931,12 +2159,32 @@ EXPORT_SYMBOL_GPL(pmbus_do_probe);
int pmbus_do_remove(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
+
+ debugfs_remove_recursive(data->debugfs);
+
hwmon_device_unregister(data->hwmon_dev);
kfree(data->group.attrs);
return 0;
}
EXPORT_SYMBOL_GPL(pmbus_do_remove);
+static int __init pmbus_core_init(void)
+{
+ pmbus_debugfs_dir = debugfs_create_dir("pmbus", NULL);
+ if (IS_ERR(pmbus_debugfs_dir))
+ pmbus_debugfs_dir = NULL;
+
+ return 0;
+}
+
+static void __exit pmbus_core_exit(void)
+{
+ debugfs_remove_recursive(pmbus_debugfs_dir);
+}
+
+module_init(pmbus_core_init);
+module_exit(pmbus_core_exit);
+
MODULE_AUTHOR("Guenter Roeck");
MODULE_DESCRIPTION("PMBus core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c
new file mode 100644
index 000000000000..85b515cd9df0
--- /dev/null
+++ b/drivers/hwmon/pmbus/tps53679.c
@@ -0,0 +1,113 @@
+/*
+ * Hardware monitoring driver for Texas Instruments TPS53679
+ *
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+#define TPS53679_PROT_VR12_5MV 0x01 /* VR12.0 mode, 5-mV DAC */
+#define TPS53679_PROT_VR12_5_10MV 0x02 /* VR12.5 mode, 10-mV DAC */
+#define TPS53679_PROT_VR13_10MV 0x04 /* VR13.0 mode, 10-mV DAC */
+#define TPS53679_PROT_IMVP8_5MV 0x05 /* IMVP8 mode, 5-mV DAC */
+#define TPS53679_PROT_VR13_5MV 0x07 /* VR13.0 mode, 5-mV DAC */
+#define TPS53679_PAGE_NUM 2
+
+static int tps53679_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ u8 vout_params;
+ int ret;
+
+ /* Read the register with VOUT scaling value.*/
+ ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ vout_params = ret & GENMASK(4, 0);
+
+ switch (vout_params) {
+ case TPS53679_PROT_VR13_10MV:
+ case TPS53679_PROT_VR12_5_10MV:
+ info->vrm_version = vr13;
+ break;
+ case TPS53679_PROT_VR13_5MV:
+ case TPS53679_PROT_VR12_5MV:
+ case TPS53679_PROT_IMVP8_5MV:
+ info->vrm_version = vr12;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct pmbus_driver_info tps53679_info = {
+ .pages = TPS53679_PAGE_NUM,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = vid,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_POUT,
+ .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+ PMBUS_HAVE_POUT,
+ .identify = tps53679_identify,
+};
+
+static int tps53679_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &tps53679_info);
+}
+
+static const struct i2c_device_id tps53679_id[] = {
+ {"tps53679", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, tps53679_id);
+
+static const struct of_device_id tps53679_of_match[] = {
+ {.compatible = "ti,tps53679"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, tps53679_of_match);
+
+static struct i2c_driver tps53679_driver = {
+ .driver = {
+ .name = "tps53679",
+ .of_match_table = of_match_ptr(tps53679_of_match),
+ },
+ .probe = tps53679_probe,
+ .remove = pmbus_do_remove,
+ .id_table = tps53679_id,
+};
+
+module_i2c_driver(tps53679_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("PMBus driver for Texas Instruments TPS53679");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index a586480a7ca9..7e49da50bc69 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -120,7 +120,7 @@ scpi_show_label(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%s\n", sensor->info.name);
}
-static struct thermal_zone_of_device_ops scpi_sensor_ops = {
+static const struct thermal_zone_of_device_ops scpi_sensor_ops = {
.get_temp = scpi_read_temp,
};
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
index d56251d6eec2..3f940fb67dc6 100644
--- a/drivers/hwmon/stts751.c
+++ b/drivers/hwmon/stts751.c
@@ -718,6 +718,10 @@ static int stts751_read_chip_config(struct stts751_priv *priv)
ret = i2c_smbus_read_byte_data(priv->client, STTS751_REG_RATE);
if (ret < 0)
return ret;
+ if (ret >= ARRAY_SIZE(stts751_intervals)) {
+ dev_err(priv->dev, "Unrecognized conversion rate 0x%x\n", ret);
+ return -ENODEV;
+ }
priv->interval = ret;
ret = stts751_read_reg16(priv, &priv->event_max,
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 8d55d6d79015..ef9cb3c164e1 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -70,13 +70,13 @@ config CORESIGHT_SOURCE_ETM4X
for instruction level tracing. Depending on the implemented version
data tracing may also be available.
-config CORESIGHT_QCOM_REPLICATOR
- bool "Qualcomm CoreSight Replicator driver"
+config CORESIGHT_DYNAMIC_REPLICATOR
+ bool "CoreSight Programmable Replicator driver"
depends on CORESIGHT_LINKS_AND_SINKS
help
- This enables support for Qualcomm CoreSight link driver. The
- programmable ATB replicator sends the ATB trace stream from the
- ETB/ETF to the TPIUi and ETR.
+ This enables support for dynamic CoreSight replicator link driver.
+ The programmable ATB replicator allows independent filtering of the
+ trace data based on the traceid.
config CORESIGHT_STM
bool "CoreSight System Trace Macrocell driver"
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 433d59025eb6..5bae90ce794d 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -14,6 +14,6 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \
coresight-etm3x-sysfs.o
obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
coresight-etm4x-sysfs.o
-obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
+obj-$(CONFIG_CORESIGHT_DYNAMIC_REPLICATOR) += coresight-dynamic-replicator.o
obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index 64a77e00eaa6..6ea62c62ff27 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -667,7 +667,7 @@ static int debug_remove(struct amba_device *adev)
return 0;
}
-static struct amba_id debug_ids[] = {
+static const struct amba_id debug_ids[] = {
{ /* Debug for Cortex-A53 */
.id = 0x000bbd03,
.mask = 0x000fffff,
diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
index 0a3d15f0b009..accc2056f7c6 100644
--- a/drivers/hwtracing/coresight/coresight-replicator-qcom.c
+++ b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
@@ -95,6 +95,28 @@ static const struct coresight_ops replicator_cs_ops = {
.link_ops = &replicator_link_ops,
};
+#define coresight_replicator_reg(name, offset) \
+ coresight_simple_reg32(struct replicator_state, name, offset)
+
+coresight_replicator_reg(idfilter0, REPLICATOR_IDFILTER0);
+coresight_replicator_reg(idfilter1, REPLICATOR_IDFILTER1);
+
+static struct attribute *replicator_mgmt_attrs[] = {
+ &dev_attr_idfilter0.attr,
+ &dev_attr_idfilter1.attr,
+ NULL,
+};
+
+static const struct attribute_group replicator_mgmt_group = {
+ .attrs = replicator_mgmt_attrs,
+ .name = "mgmt",
+};
+
+static const struct attribute_group *replicator_groups[] = {
+ &replicator_mgmt_group,
+ NULL,
+};
+
static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
@@ -139,11 +161,11 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
desc.ops = &replicator_cs_ops;
desc.pdata = adev->dev.platform_data;
desc.dev = &adev->dev;
+ desc.groups = replicator_groups;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
- dev_info(dev, "%s initialized\n", (char *)id->data);
return 0;
}
@@ -175,18 +197,22 @@ static const struct dev_pm_ops replicator_dev_pm_ops = {
NULL)
};
-static struct amba_id replicator_ids[] = {
+static const struct amba_id replicator_ids[] = {
{
.id = 0x0003b909,
.mask = 0x0003ffff,
- .data = "REPLICATOR 1.0",
+ },
+ {
+ /* Coresight SoC-600 */
+ .id = 0x000bb9ec,
+ .mask = 0x000fffff,
},
{ 0, 0 },
};
static struct amba_driver replicator_driver = {
.drv = {
- .name = "coresight-replicator-qcom",
+ .name = "coresight-dynamic-replicator",
.pm = &replicator_dev_pm_ops,
.suppress_bind_attrs = true,
},
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index d5b96423e1a5..56ecd7aff5eb 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -200,8 +200,10 @@ static void etb_disable_hw(struct etb_drvdata *drvdata)
static void etb_dump_hw(struct etb_drvdata *drvdata)
{
+ bool lost = false;
int i;
u8 *buf_ptr;
+ const u32 *barrier;
u32 read_data, depth;
u32 read_ptr, write_ptr;
u32 frame_off, frame_endoff;
@@ -223,20 +225,26 @@ static void etb_dump_hw(struct etb_drvdata *drvdata)
}
if ((readl_relaxed(drvdata->base + ETB_STATUS_REG)
- & ETB_STATUS_RAM_FULL) == 0)
+ & ETB_STATUS_RAM_FULL) == 0) {
writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
- else
+ } else {
writel_relaxed(write_ptr, drvdata->base + ETB_RAM_READ_POINTER);
+ lost = true;
+ }
depth = drvdata->buffer_depth;
buf_ptr = drvdata->buf;
+ barrier = barrier_pkt;
for (i = 0; i < depth; i++) {
read_data = readl_relaxed(drvdata->base +
ETB_RAM_READ_DATA_REG);
- *buf_ptr++ = read_data >> 0;
- *buf_ptr++ = read_data >> 8;
- *buf_ptr++ = read_data >> 16;
- *buf_ptr++ = read_data >> 24;
+ if (lost && *barrier) {
+ read_data = *barrier;
+ barrier++;
+ }
+
+ *(u32 *)buf_ptr = read_data;
+ buf_ptr += 4;
}
if (frame_off) {
@@ -353,8 +361,10 @@ static void etb_update_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config)
{
+ bool lost = false;
int i, cur;
u8 *buf_ptr;
+ const u32 *barrier;
u32 read_ptr, write_ptr, capacity;
u32 status, read_data, to_read;
unsigned long offset;
@@ -366,8 +376,8 @@ static void etb_update_buffer(struct coresight_device *csdev,
capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
- CS_UNLOCK(drvdata->base);
etb_disable_hw(drvdata);
+ CS_UNLOCK(drvdata->base);
/* unit is in words, not bytes */
read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
@@ -384,7 +394,7 @@ static void etb_update_buffer(struct coresight_device *csdev,
(unsigned long)write_ptr);
write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
- perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ lost = true;
}
/*
@@ -395,7 +405,7 @@ static void etb_update_buffer(struct coresight_device *csdev,
*/
status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
if (status & ETB_STATUS_RAM_FULL) {
- perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ lost = true;
to_read = capacity;
read_ptr = write_ptr;
} else {
@@ -428,22 +438,30 @@ static void etb_update_buffer(struct coresight_device *csdev,
if (read_ptr > (drvdata->buffer_depth - 1))
read_ptr -= drvdata->buffer_depth;
/* let the decoder know we've skipped ahead */
- perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ lost = true;
}
+ if (lost)
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+
/* finally tell HW where we want to start reading from */
writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
cur = buf->cur;
offset = buf->offset;
+ barrier = barrier_pkt;
+
for (i = 0; i < to_read; i += 4) {
buf_ptr = buf->data_pages[cur] + offset;
read_data = readl_relaxed(drvdata->base +
ETB_RAM_READ_DATA_REG);
- *buf_ptr++ = read_data >> 0;
- *buf_ptr++ = read_data >> 8;
- *buf_ptr++ = read_data >> 16;
- *buf_ptr++ = read_data >> 24;
+ if (lost && *barrier) {
+ read_data = *barrier;
+ barrier++;
+ }
+
+ *(u32 *)buf_ptr = read_data;
+ buf_ptr += 4;
offset += 4;
if (offset >= PAGE_SIZE) {
@@ -557,17 +575,17 @@ static const struct file_operations etb_fops = {
.llseek = no_llseek,
};
-#define coresight_etb10_simple_func(name, offset) \
- coresight_simple_func(struct etb_drvdata, NULL, name, offset)
+#define coresight_etb10_reg(name, offset) \
+ coresight_simple_reg32(struct etb_drvdata, name, offset)
-coresight_etb10_simple_func(rdp, ETB_RAM_DEPTH_REG);
-coresight_etb10_simple_func(sts, ETB_STATUS_REG);
-coresight_etb10_simple_func(rrp, ETB_RAM_READ_POINTER);
-coresight_etb10_simple_func(rwp, ETB_RAM_WRITE_POINTER);
-coresight_etb10_simple_func(trg, ETB_TRG);
-coresight_etb10_simple_func(ctl, ETB_CTL_REG);
-coresight_etb10_simple_func(ffsr, ETB_FFSR);
-coresight_etb10_simple_func(ffcr, ETB_FFCR);
+coresight_etb10_reg(rdp, ETB_RAM_DEPTH_REG);
+coresight_etb10_reg(sts, ETB_STATUS_REG);
+coresight_etb10_reg(rrp, ETB_RAM_READ_POINTER);
+coresight_etb10_reg(rwp, ETB_RAM_WRITE_POINTER);
+coresight_etb10_reg(trg, ETB_TRG);
+coresight_etb10_reg(ctl, ETB_CTL_REG);
+coresight_etb10_reg(ffsr, ETB_FFSR);
+coresight_etb10_reg(ffcr, ETB_FFCR);
static struct attribute *coresight_etb_mgmt_attrs[] = {
&dev_attr_rdp.attr,
@@ -728,7 +746,7 @@ static const struct dev_pm_ops etb_dev_pm_ops = {
SET_RUNTIME_PM_OPS(etb_runtime_suspend, etb_runtime_resume, NULL)
};
-static struct amba_id etb_ids[] = {
+static const struct amba_id etb_ids[] = {
{
.id = 0x0003b907,
.mask = 0x0003ffff,
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 8f546f59a3fd..8a0ad77574e7 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -53,14 +53,16 @@ static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
/* ETMv3.5/PTM's ETMCR is 'config' */
PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
+PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
static struct attribute *etm_config_formats_attr[] = {
&format_attr_cycacc.attr,
&format_attr_timestamp.attr,
+ &format_attr_retstack.attr,
NULL,
};
-static struct attribute_group etm_pmu_format_group = {
+static const struct attribute_group etm_pmu_format_group = {
.name = "format",
.attrs = etm_config_formats_attr,
};
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index ad063d7444e1..70b0a248c321 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -106,6 +106,7 @@
#define ETMTECR1_START_STOP BIT(25)
/* ETMCCER - 0x1E8 */
#define ETMCCER_TIMESTAMP BIT(22)
+#define ETMCCER_RETSTACK BIT(23)
#define ETM_MODE_EXCLUDE BIT(0)
#define ETM_MODE_CYCACC BIT(1)
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index ca98ad13bb8c..6e547ec6fead 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -1232,19 +1232,19 @@ static struct attribute *coresight_etm_attrs[] = {
NULL,
};
-#define coresight_etm3x_simple_func(name, offset) \
- coresight_simple_func(struct etm_drvdata, NULL, name, offset)
-
-coresight_etm3x_simple_func(etmccr, ETMCCR);
-coresight_etm3x_simple_func(etmccer, ETMCCER);
-coresight_etm3x_simple_func(etmscr, ETMSCR);
-coresight_etm3x_simple_func(etmidr, ETMIDR);
-coresight_etm3x_simple_func(etmcr, ETMCR);
-coresight_etm3x_simple_func(etmtraceidr, ETMTRACEIDR);
-coresight_etm3x_simple_func(etmteevr, ETMTEEVR);
-coresight_etm3x_simple_func(etmtssvr, ETMTSSCR);
-coresight_etm3x_simple_func(etmtecr1, ETMTECR1);
-coresight_etm3x_simple_func(etmtecr2, ETMTECR2);
+#define coresight_etm3x_reg(name, offset) \
+ coresight_simple_reg32(struct etm_drvdata, name, offset)
+
+coresight_etm3x_reg(etmccr, ETMCCR);
+coresight_etm3x_reg(etmccer, ETMCCER);
+coresight_etm3x_reg(etmscr, ETMSCR);
+coresight_etm3x_reg(etmidr, ETMIDR);
+coresight_etm3x_reg(etmcr, ETMCR);
+coresight_etm3x_reg(etmtraceidr, ETMTRACEIDR);
+coresight_etm3x_reg(etmteevr, ETMTEEVR);
+coresight_etm3x_reg(etmtssvr, ETMTSSCR);
+coresight_etm3x_reg(etmtecr1, ETMTECR1);
+coresight_etm3x_reg(etmtecr2, ETMTECR2);
static struct attribute *coresight_etm_mgmt_attrs[] = {
&dev_attr_etmccr.attr,
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 93ee8fc539be..e5b1ec57dbde 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -243,6 +243,8 @@ void etm_set_default(struct etm_config *config)
}
config->ctxid_mask = 0x0;
+ /* Setting default to 1024 as per TRM recommendation */
+ config->sync_freq = 0x400;
}
void etm_config_trace_mode(struct etm_config *config)
@@ -308,7 +310,9 @@ void etm_config_trace_mode(struct etm_config *config)
config->addr_type[1] = ETM_ADDR_TYPE_RANGE;
}
-#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN)
+#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | \
+ ETMCR_TIMESTAMP_EN | \
+ ETMCR_RETURN_STACK)
static int etm_parse_event_config(struct etm_drvdata *drvdata,
struct perf_event *event)
@@ -339,14 +343,24 @@ static int etm_parse_event_config(struct etm_drvdata *drvdata,
etm_config_trace_mode(config);
/*
- * At this time only cycle accurate and timestamp options are
- * available.
+ * At this time only cycle accurate, return stack and timestamp
+ * options are available.
*/
if (attr->config & ~ETM3X_SUPPORTED_OPTIONS)
return -EINVAL;
config->ctrl = attr->config;
+ /*
+ * Possible to have cores with PTM (supports ret stack) and ETM
+ * (never has ret stack) on the same SoC. So if we have a request
+ * for return stack that can't be honoured on this core then
+ * clear the bit - trace will still continue normally
+ */
+ if ((config->ctrl & ETMCR_RETURN_STACK) &&
+ !(drvdata->etmccer & ETMCCER_RETSTACK))
+ config->ctrl &= ~ETMCR_RETURN_STACK;
+
return 0;
}
@@ -885,7 +899,7 @@ static const struct dev_pm_ops etm_dev_pm_ops = {
SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
};
-static struct amba_id etm_ids[] = {
+static const struct amba_id etm_ids[] = {
{ /* ETM 3.3 */
.id = 0x0003b921,
.mask = 0x0003ffff,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index b9b1e9c8f4c4..4e6eab53e34e 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -2066,23 +2066,23 @@ static u32 etmv4_cross_read(const struct device *dev, u32 offset)
return reg.data;
}
-#define coresight_etm4x_simple_func(name, offset) \
- coresight_simple_func(struct etmv4_drvdata, NULL, name, offset)
+#define coresight_etm4x_reg(name, offset) \
+ coresight_simple_reg32(struct etmv4_drvdata, name, offset)
#define coresight_etm4x_cross_read(name, offset) \
coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
name, offset)
-coresight_etm4x_simple_func(trcpdcr, TRCPDCR);
-coresight_etm4x_simple_func(trcpdsr, TRCPDSR);
-coresight_etm4x_simple_func(trclsr, TRCLSR);
-coresight_etm4x_simple_func(trcauthstatus, TRCAUTHSTATUS);
-coresight_etm4x_simple_func(trcdevid, TRCDEVID);
-coresight_etm4x_simple_func(trcdevtype, TRCDEVTYPE);
-coresight_etm4x_simple_func(trcpidr0, TRCPIDR0);
-coresight_etm4x_simple_func(trcpidr1, TRCPIDR1);
-coresight_etm4x_simple_func(trcpidr2, TRCPIDR2);
-coresight_etm4x_simple_func(trcpidr3, TRCPIDR3);
+coresight_etm4x_reg(trcpdcr, TRCPDCR);
+coresight_etm4x_reg(trcpdsr, TRCPDSR);
+coresight_etm4x_reg(trclsr, TRCLSR);
+coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
+coresight_etm4x_reg(trcdevid, TRCDEVID);
+coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
+coresight_etm4x_reg(trcpidr0, TRCPIDR0);
+coresight_etm4x_reg(trcpidr1, TRCPIDR1);
+coresight_etm4x_reg(trcpidr2, TRCPIDR2);
+coresight_etm4x_reg(trcpidr3, TRCPIDR3);
coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 532adc9dd32a..cf364a514c12 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -224,6 +224,10 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
if (attr->config & BIT(ETM_OPT_TS))
/* bit[11], Global timestamp tracing bit */
config->cfg |= BIT(11);
+ /* return stack - enable if selected and supported */
+ if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
+ /* bit[12], Return stack enable bit */
+ config->cfg |= BIT(12);
out:
return ret;
@@ -1048,7 +1052,7 @@ err_arch_supported:
return ret;
}
-static struct amba_id etm4_ids[] = {
+static const struct amba_id etm4_ids[] = {
{ /* ETM 4.0 - Cortex-A53 */
.id = 0x000bb95d,
.mask = 0x000fffff,
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 860fe6ef5632..77642e0e955b 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -246,11 +246,16 @@ static const struct dev_pm_ops funnel_dev_pm_ops = {
SET_RUNTIME_PM_OPS(funnel_runtime_suspend, funnel_runtime_resume, NULL)
};
-static struct amba_id funnel_ids[] = {
+static const struct amba_id funnel_ids[] = {
{
.id = 0x0003b908,
.mask = 0x0003ffff,
},
+ {
+ /* Coresight SoC-600 */
+ .id = 0x000bb9eb,
+ .mask = 0x000fffff,
+ },
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 5f662d82052c..f1d0e21d8cab 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -39,23 +39,33 @@
#define ETM_MODE_EXCL_USER BIT(31)
typedef u32 (*coresight_read_fn)(const struct device *, u32 offset);
-#define coresight_simple_func(type, func, name, offset) \
+#define __coresight_simple_func(type, func, name, lo_off, hi_off) \
static ssize_t name##_show(struct device *_dev, \
struct device_attribute *attr, char *buf) \
{ \
type *drvdata = dev_get_drvdata(_dev->parent); \
coresight_read_fn fn = func; \
- u32 val; \
+ u64 val; \
pm_runtime_get_sync(_dev->parent); \
if (fn) \
- val = fn(_dev->parent, offset); \
+ val = (u64)fn(_dev->parent, lo_off); \
else \
- val = readl_relaxed(drvdata->base + offset); \
+ val = coresight_read_reg_pair(drvdata->base, \
+ lo_off, hi_off); \
pm_runtime_put_sync(_dev->parent); \
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", val); \
+ return scnprintf(buf, PAGE_SIZE, "0x%llx\n", val); \
} \
static DEVICE_ATTR_RO(name)
+#define coresight_simple_func(type, func, name, offset) \
+ __coresight_simple_func(type, func, name, offset, -1)
+#define coresight_simple_reg32(type, name, offset) \
+ __coresight_simple_func(type, NULL, name, offset, -1)
+#define coresight_simple_reg64(type, name, lo_off, hi_off) \
+ __coresight_simple_func(type, NULL, name, lo_off, hi_off)
+
+extern const u32 barrier_pkt[5];
+
enum etm_addr_type {
ETM_ADDR_TYPE_NONE,
ETM_ADDR_TYPE_SINGLE,
@@ -106,6 +116,25 @@ static inline void CS_UNLOCK(void __iomem *addr)
} while (0);
}
+static inline u64
+coresight_read_reg_pair(void __iomem *addr, s32 lo_offset, s32 hi_offset)
+{
+ u64 val;
+
+ val = readl_relaxed(addr + lo_offset);
+ val |= (hi_offset < 0) ? 0 :
+ (u64)readl_relaxed(addr + hi_offset) << 32;
+ return val;
+}
+
+static inline void coresight_write_reg_pair(void __iomem *addr, u64 val,
+ s32 lo_offset, s32 hi_offset)
+{
+ writel_relaxed((u32)val, addr + lo_offset);
+ if (hi_offset >= 0)
+ writel_relaxed((u32)(val >> 32), addr + hi_offset);
+}
+
void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode);
struct coresight_device *coresight_get_sink(struct list_head *path);
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 93fc26f01bab..92a780a6df1d 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -276,7 +276,7 @@ static void stm_disable(struct coresight_device *csdev,
spin_unlock(&drvdata->spinlock);
/* Wait until the engine has completely stopped */
- coresight_timeout(drvdata, STMTCSR, STMTCSR_BUSY_BIT, 0);
+ coresight_timeout(drvdata->base, STMTCSR, STMTCSR_BUSY_BIT, 0);
pm_runtime_put(drvdata->dev);
@@ -307,7 +307,8 @@ static inline bool stm_addr_unaligned(const void *addr, u8 write_bytes)
return ((unsigned long)addr & (write_bytes - 1));
}
-static void stm_send(void *addr, const void *data, u32 size, u8 write_bytes)
+static void stm_send(void __iomem *addr, const void *data,
+ u32 size, u8 write_bytes)
{
u8 paload[8];
@@ -414,7 +415,7 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
unsigned int size,
const unsigned char *payload)
{
- unsigned long ch_addr;
+ void __iomem *ch_addr;
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
@@ -424,7 +425,7 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
if (channel >= drvdata->numsp)
return -EINVAL;
- ch_addr = (unsigned long)stm_channel_addr(drvdata, channel);
+ ch_addr = stm_channel_addr(drvdata, channel);
flags = (flags == STP_PACKET_TIMESTAMPED) ? STM_FLAG_TIMESTAMPED : 0;
flags |= test_bit(channel, drvdata->chs.guaranteed) ?
@@ -437,20 +438,20 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
switch (packet) {
case STP_PACKET_FLAG:
- ch_addr |= stm_channel_off(STM_PKT_TYPE_FLAG, flags);
+ ch_addr += stm_channel_off(STM_PKT_TYPE_FLAG, flags);
/*
* The generic STM core sets a size of '0' on flag packets.
* As such send a flag packet of size '1' and tell the
* core we did so.
*/
- stm_send((void *)ch_addr, payload, 1, drvdata->write_bytes);
+ stm_send(ch_addr, payload, 1, drvdata->write_bytes);
size = 1;
break;
case STP_PACKET_DATA:
- ch_addr |= stm_channel_off(STM_PKT_TYPE_DATA, flags);
- stm_send((void *)ch_addr, payload, size,
+ ch_addr += stm_channel_off(STM_PKT_TYPE_DATA, flags);
+ stm_send(ch_addr, payload, size,
drvdata->write_bytes);
break;
@@ -635,21 +636,21 @@ static ssize_t traceid_store(struct device *dev,
}
static DEVICE_ATTR_RW(traceid);
-#define coresight_stm_simple_func(name, offset) \
- coresight_simple_func(struct stm_drvdata, NULL, name, offset)
-
-coresight_stm_simple_func(tcsr, STMTCSR);
-coresight_stm_simple_func(tsfreqr, STMTSFREQR);
-coresight_stm_simple_func(syncr, STMSYNCR);
-coresight_stm_simple_func(sper, STMSPER);
-coresight_stm_simple_func(spter, STMSPTER);
-coresight_stm_simple_func(privmaskr, STMPRIVMASKR);
-coresight_stm_simple_func(spscr, STMSPSCR);
-coresight_stm_simple_func(spmscr, STMSPMSCR);
-coresight_stm_simple_func(spfeat1r, STMSPFEAT1R);
-coresight_stm_simple_func(spfeat2r, STMSPFEAT2R);
-coresight_stm_simple_func(spfeat3r, STMSPFEAT3R);
-coresight_stm_simple_func(devid, CORESIGHT_DEVID);
+#define coresight_stm_reg(name, offset) \
+ coresight_simple_reg32(struct stm_drvdata, name, offset)
+
+coresight_stm_reg(tcsr, STMTCSR);
+coresight_stm_reg(tsfreqr, STMTSFREQR);
+coresight_stm_reg(syncr, STMSYNCR);
+coresight_stm_reg(sper, STMSPER);
+coresight_stm_reg(spter, STMSPTER);
+coresight_stm_reg(privmaskr, STMPRIVMASKR);
+coresight_stm_reg(spscr, STMSPSCR);
+coresight_stm_reg(spmscr, STMSPMSCR);
+coresight_stm_reg(spfeat1r, STMSPFEAT1R);
+coresight_stm_reg(spfeat2r, STMSPFEAT2R);
+coresight_stm_reg(spfeat3r, STMSPFEAT3R);
+coresight_stm_reg(devid, CORESIGHT_DEVID);
static struct attribute *coresight_stm_attrs[] = {
&dev_attr_hwevent_enable.attr,
@@ -914,7 +915,7 @@ static const struct dev_pm_ops stm_dev_pm_ops = {
SET_RUNTIME_PM_OPS(stm_runtime_suspend, stm_runtime_resume, NULL)
};
-static struct amba_id stm_ids[] = {
+static const struct amba_id stm_ids[] = {
{
.id = 0x0003b962,
.mask = 0x0003ffff,
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index e3b9fb82eb8d..e2513b786242 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -43,17 +43,34 @@ static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
{
+ bool lost = false;
char *bufp;
- u32 read_data;
+ const u32 *barrier;
+ u32 read_data, status;
int i;
+ /*
+ * Get a hold of the status register and see if a wrap around
+ * has occurred.
+ */
+ status = readl_relaxed(drvdata->base + TMC_STS);
+ if (status & TMC_STS_FULL)
+ lost = true;
+
bufp = drvdata->buf;
drvdata->len = 0;
+ barrier = barrier_pkt;
while (1) {
for (i = 0; i < drvdata->memwidth; i++) {
read_data = readl_relaxed(drvdata->base + TMC_RRD);
if (read_data == 0xFFFFFFFF)
return;
+
+ if (lost && *barrier) {
+ read_data = *barrier;
+ barrier++;
+ }
+
memcpy(bufp, &read_data, 4);
bufp += 4;
drvdata->len += 4;
@@ -369,9 +386,11 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config)
{
+ bool lost = false;
int i, cur;
+ const u32 *barrier;
u32 *buf_ptr;
- u32 read_ptr, write_ptr;
+ u64 read_ptr, write_ptr;
u32 status, to_read;
unsigned long offset;
struct cs_buffers *buf = sink_config;
@@ -388,8 +407,8 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
tmc_flush_and_stop(drvdata);
- read_ptr = readl_relaxed(drvdata->base + TMC_RRP);
- write_ptr = readl_relaxed(drvdata->base + TMC_RWP);
+ read_ptr = tmc_read_rrp(drvdata);
+ write_ptr = tmc_read_rwp(drvdata);
/*
* Get a hold of the status register and see if a wrap around
@@ -397,7 +416,7 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
*/
status = readl_relaxed(drvdata->base + TMC_STS);
if (status & TMC_STS_FULL) {
- perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ lost = true;
to_read = drvdata->size;
} else {
to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
@@ -441,18 +460,27 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
if (read_ptr > (drvdata->size - 1))
read_ptr -= drvdata->size;
/* Tell the HW */
- writel_relaxed(read_ptr, drvdata->base + TMC_RRP);
- perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ tmc_write_rrp(drvdata, read_ptr);
+ lost = true;
}
+ if (lost)
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+
cur = buf->cur;
offset = buf->offset;
+ barrier = barrier_pkt;
/* for every byte to read */
for (i = 0; i < to_read; i += 4) {
buf_ptr = buf->data_pages[cur] + offset;
*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
+ if (lost && *barrier) {
+ *buf_ptr = *barrier;
+ barrier++;
+ }
+
offset += 4;
if (offset >= PAGE_SIZE) {
offset = 0;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 5d312699b3b9..68fbc8f7450e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -22,7 +22,7 @@
static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
- u32 axictl;
+ u32 axictl, sts;
/* Zero out the memory to help with debug */
memset(drvdata->vaddr, 0, drvdata->size);
@@ -36,17 +36,29 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
- axictl |= TMC_AXICTL_WR_BURST_16;
- writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
- axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
- writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
- axictl = (axictl &
- ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
- TMC_AXICTL_PROT_CTL_B1;
+ axictl &= ~TMC_AXICTL_CLEAR_MASK;
+ axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
+ axictl |= TMC_AXICTL_AXCACHE_OS;
+
+ if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
+ axictl &= ~TMC_AXICTL_ARCACHE_MASK;
+ axictl |= TMC_AXICTL_ARCACHE_OS;
+ }
+
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+ tmc_write_dba(drvdata, drvdata->paddr);
+ /*
+ * If the TMC pointers must be programmed before the session,
+ * we have to set it properly (i.e, RRP/RWP to base address and
+ * STS to "not full").
+ */
+ if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
+ tmc_write_rrp(drvdata, drvdata->paddr);
+ tmc_write_rwp(drvdata, drvdata->paddr);
+ sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
+ writel_relaxed(sts, drvdata->base + TMC_STS);
+ }
- writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
- writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
TMC_FFCR_TRIGON_TRIGIN,
@@ -59,9 +71,12 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
{
- u32 rwp, val;
+ const u32 *barrier;
+ u32 val;
+ u32 *temp;
+ u64 rwp;
- rwp = readl_relaxed(drvdata->base + TMC_RWP);
+ rwp = tmc_read_rwp(drvdata);
val = readl_relaxed(drvdata->base + TMC_STS);
/*
@@ -71,6 +86,16 @@ static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
if (val & TMC_STS_FULL) {
drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
drvdata->len = drvdata->size;
+
+ barrier = barrier_pkt;
+ temp = (u32 *)drvdata->buf;
+
+ while (*barrier) {
+ *temp = *barrier;
+ temp++;
+ barrier++;
+ }
+
} else {
drvdata->buf = drvdata->vaddr;
drvdata->len = rwp - drvdata->paddr;
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 864488793f09..2ff4a66a3caa 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -217,20 +217,24 @@ static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
return memwidth;
}
-#define coresight_tmc_simple_func(name, offset) \
- coresight_simple_func(struct tmc_drvdata, NULL, name, offset)
-
-coresight_tmc_simple_func(rsz, TMC_RSZ);
-coresight_tmc_simple_func(sts, TMC_STS);
-coresight_tmc_simple_func(rrp, TMC_RRP);
-coresight_tmc_simple_func(rwp, TMC_RWP);
-coresight_tmc_simple_func(trg, TMC_TRG);
-coresight_tmc_simple_func(ctl, TMC_CTL);
-coresight_tmc_simple_func(ffsr, TMC_FFSR);
-coresight_tmc_simple_func(ffcr, TMC_FFCR);
-coresight_tmc_simple_func(mode, TMC_MODE);
-coresight_tmc_simple_func(pscr, TMC_PSCR);
-coresight_tmc_simple_func(devid, CORESIGHT_DEVID);
+#define coresight_tmc_reg(name, offset) \
+ coresight_simple_reg32(struct tmc_drvdata, name, offset)
+#define coresight_tmc_reg64(name, lo_off, hi_off) \
+ coresight_simple_reg64(struct tmc_drvdata, name, lo_off, hi_off)
+
+coresight_tmc_reg(rsz, TMC_RSZ);
+coresight_tmc_reg(sts, TMC_STS);
+coresight_tmc_reg(trg, TMC_TRG);
+coresight_tmc_reg(ctl, TMC_CTL);
+coresight_tmc_reg(ffsr, TMC_FFSR);
+coresight_tmc_reg(ffcr, TMC_FFCR);
+coresight_tmc_reg(mode, TMC_MODE);
+coresight_tmc_reg(pscr, TMC_PSCR);
+coresight_tmc_reg(axictl, TMC_AXICTL);
+coresight_tmc_reg(devid, CORESIGHT_DEVID);
+coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
+coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
+coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI);
static struct attribute *coresight_tmc_mgmt_attrs[] = {
&dev_attr_rsz.attr,
@@ -244,6 +248,8 @@ static struct attribute *coresight_tmc_mgmt_attrs[] = {
&dev_attr_mode.attr,
&dev_attr_pscr.attr,
&dev_attr_devid.attr,
+ &dev_attr_dba.attr,
+ &dev_attr_axictl.attr,
NULL,
};
@@ -293,6 +299,42 @@ const struct attribute_group *coresight_tmc_groups[] = {
NULL,
};
+/* Detect and initialise the capabilities of a TMC ETR */
+static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
+ u32 devid, void *dev_caps)
+{
+ u32 dma_mask = 0;
+
+ /* Set the unadvertised capabilities */
+ tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
+
+ if (!(devid & TMC_DEVID_NOSCAT))
+ tmc_etr_set_cap(drvdata, TMC_ETR_SG);
+
+ /* Check if the AXI address width is available */
+ if (devid & TMC_DEVID_AXIAW_VALID)
+ dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
+ TMC_DEVID_AXIAW_MASK);
+
+ /*
+ * Unless specified in the device configuration, ETR uses a 40-bit
+ * AXI master in place of the embedded SRAM of ETB/ETF.
+ */
+ switch (dma_mask) {
+ case 32:
+ case 40:
+ case 44:
+ case 48:
+ case 52:
+ dev_info(drvdata->dev, "Detected dma mask %dbits\n", dma_mask);
+ break;
+ default:
+ dma_mask = 40;
+ }
+
+ return dma_set_mask_and_coherent(drvdata->dev, DMA_BIT_MASK(dma_mask));
+}
+
static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
@@ -354,25 +396,29 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
desc.dev = dev;
desc.groups = coresight_tmc_groups;
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+ switch (drvdata->config_type) {
+ case TMC_CONFIG_TYPE_ETB:
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.ops = &tmc_etb_cs_ops;
- } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+ break;
+ case TMC_CONFIG_TYPE_ETR:
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.ops = &tmc_etr_cs_ops;
- /*
- * ETR configuration uses a 40-bit AXI master in place of
- * the embedded SRAM of ETB/ETF.
- */
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ ret = tmc_etr_setup_caps(drvdata, devid, id->data);
if (ret)
goto out;
- } else {
+ break;
+ case TMC_CONFIG_TYPE_ETF:
desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
desc.ops = &tmc_etf_cs_ops;
+ break;
+ default:
+ pr_err("%s: Unsupported TMC config\n", pdata->name);
+ ret = -EINVAL;
+ goto out;
}
drvdata->csdev = coresight_register(&desc);
@@ -391,11 +437,27 @@ out:
return ret;
}
-static struct amba_id tmc_ids[] = {
+static const struct amba_id tmc_ids[] = {
{
.id = 0x0003b961,
.mask = 0x0003ffff,
},
+ {
+ /* Coresight SoC 600 TMC-ETR/ETS */
+ .id = 0x000bb9e8,
+ .mask = 0x000fffff,
+ .data = (void *)(unsigned long)CORESIGHT_SOC_600_ETR_CAPS,
+ },
+ {
+ /* Coresight SoC 600 TMC-ETB */
+ .id = 0x000bb9e9,
+ .mask = 0x000fffff,
+ },
+ {
+ /* Coresight SoC 600 TMC-ETF */
+ .id = 0x000bb9ea,
+ .mask = 0x000fffff,
+ },
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 51c01851533e..8df7a813f537 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -54,11 +54,32 @@
#define TMC_STS_TMCREADY_BIT 2
#define TMC_STS_FULL BIT(0)
#define TMC_STS_TRIGGERED BIT(1)
-/* TMC_AXICTL - 0x110 */
+/*
+ * TMC_AXICTL - 0x110
+ *
+ * TMC AXICTL format for SoC-400
+ * Bits [0-1] : ProtCtrlBit0-1
+ * Bits [2-5] : CacheCtrlBits 0-3 (AXCACHE)
+ * Bit 6 : Reserved
+ * Bit 7 : ScatterGatherMode
+ * Bits [8-11] : WrBurstLen
+ * Bits [12-31] : Reserved.
+ * TMC AXICTL format for SoC-600, as above except:
+ * Bits [2-5] : AXI WCACHE
+ * Bits [16-19] : AXI RCACHE
+ * Bits [20-31] : Reserved
+ */
+#define TMC_AXICTL_CLEAR_MASK 0xfbf
+#define TMC_AXICTL_ARCACHE_MASK (0xf << 16)
+
#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
#define TMC_AXICTL_SCT_GAT_MODE BIT(7)
#define TMC_AXICTL_WR_BURST_16 0xF00
+/* Write-back Read and Write-allocate */
+#define TMC_AXICTL_AXCACHE_OS (0xf << 2)
+#define TMC_AXICTL_ARCACHE_OS (0xf << 16)
+
/* TMC_FFCR - 0x304 */
#define TMC_FFCR_FLUSHMAN_BIT 6
#define TMC_FFCR_EN_FMT BIT(0)
@@ -69,6 +90,12 @@
#define TMC_FFCR_STOP_ON_FLUSH BIT(12)
+#define TMC_DEVID_NOSCAT BIT(24)
+
+#define TMC_DEVID_AXIAW_VALID BIT(16)
+#define TMC_DEVID_AXIAW_SHIFT 17
+#define TMC_DEVID_AXIAW_MASK 0x7f
+
enum tmc_config_type {
TMC_CONFIG_TYPE_ETB,
TMC_CONFIG_TYPE_ETR,
@@ -88,6 +115,24 @@ enum tmc_mem_intf_width {
TMC_MEM_INTF_WIDTH_256BITS = 8,
};
+/* TMC ETR Capability bit definitions */
+#define TMC_ETR_SG (0x1U << 0)
+/* ETR has separate read/write cache encodings */
+#define TMC_ETR_AXI_ARCACHE (0x1U << 1)
+/*
+ * TMC_ETR_SAVE_RESTORE - Values of RRP/RWP/STS.Full are
+ * retained when TMC leaves Disabled state, allowing us to continue
+ * the tracing from a point where we stopped. This also implies that
+ * the RRP/RWP/STS.Full should always be programmed to the correct
+ * value. Unfortunately this is not advertised by the hardware,
+ * so we have to rely on PID of the IP to detect the functionality.
+ */
+#define TMC_ETR_SAVE_RESTORE (0x1U << 2)
+
+/* Coresight SoC-600 TMC-ETR unadvertised capabilities */
+#define CORESIGHT_SOC_600_ETR_CAPS \
+ (TMC_ETR_SAVE_RESTORE | TMC_ETR_AXI_ARCACHE)
+
/**
* struct tmc_drvdata - specifics associated to an TMC component
* @base: memory mapped base address for this component.
@@ -104,6 +149,8 @@ enum tmc_mem_intf_width {
* @config_type: TMC variant, must be of type @tmc_config_type.
* @memwidth: width of the memory interface databus, in bytes.
* @trigger_cntr: amount of words to store after a trigger.
+ * @etr_caps: Bitmask of capabilities of the TMC ETR, inferred from the
+ * device configuration register (DEVID)
*/
struct tmc_drvdata {
void __iomem *base;
@@ -121,6 +168,7 @@ struct tmc_drvdata {
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
u32 trigger_cntr;
+ u32 etr_caps;
};
/* Generic functions */
@@ -139,4 +187,39 @@ extern const struct coresight_ops tmc_etf_cs_ops;
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
extern const struct coresight_ops tmc_etr_cs_ops;
+
+
+#define TMC_REG_PAIR(name, lo_off, hi_off) \
+static inline u64 \
+tmc_read_##name(struct tmc_drvdata *drvdata) \
+{ \
+ return coresight_read_reg_pair(drvdata->base, lo_off, hi_off); \
+} \
+static inline void \
+tmc_write_##name(struct tmc_drvdata *drvdata, u64 val) \
+{ \
+ coresight_write_reg_pair(drvdata->base, val, lo_off, hi_off); \
+}
+
+TMC_REG_PAIR(rrp, TMC_RRP, TMC_RRPHI)
+TMC_REG_PAIR(rwp, TMC_RWP, TMC_RWPHI)
+TMC_REG_PAIR(dba, TMC_DBALO, TMC_DBAHI)
+
+/* Initialise the caps from unadvertised static capabilities of the device */
+static inline void tmc_etr_init_caps(struct tmc_drvdata *drvdata, u32 dev_caps)
+{
+ WARN_ON(drvdata->etr_caps);
+ drvdata->etr_caps = dev_caps;
+}
+
+static inline void tmc_etr_set_cap(struct tmc_drvdata *drvdata, u32 cap)
+{
+ drvdata->etr_caps |= cap;
+}
+
+static inline bool tmc_etr_has_cap(struct tmc_drvdata *drvdata, u32 cap)
+{
+ return !!(drvdata->etr_caps & cap);
+}
+
#endif
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 0673baf0f2f5..d7a3e453016d 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -192,7 +192,7 @@ static const struct dev_pm_ops tpiu_dev_pm_ops = {
SET_RUNTIME_PM_OPS(tpiu_runtime_suspend, tpiu_runtime_resume, NULL)
};
-static struct amba_id tpiu_ids[] = {
+static const struct amba_id tpiu_ids[] = {
{
.id = 0x0003b912,
.mask = 0x0003ffff,
@@ -201,6 +201,11 @@ static struct amba_id tpiu_ids[] = {
.id = 0x0004b912,
.mask = 0x0007ffff,
},
+ {
+ /* Coresight SoC-600 */
+ .id = 0x000bb9e7,
+ .mask = 0x000fffff,
+ },
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 6a0202b7384f..b8091bef21dc 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -53,6 +53,14 @@ static DEFINE_PER_CPU(struct list_head *, tracer_path);
*/
static struct list_head *stm_path;
+/*
+ * When losing synchronisation a new barrier packet needs to be inserted at the
+ * beginning of the data collected in a buffer. That way the decoder knows that
+ * it needs to look for another sync sequence.
+ */
+const u32 barrier_pkt[5] = {0x7fffffff, 0x7fffffff,
+ 0x7fffffff, 0x7fffffff, 0x0};
+
static int coresight_id_match(struct device *dev, void *data)
{
int trace_id, i_trace_id;
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 8da567abc0ce..1a023e30488c 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -101,17 +101,53 @@ out_pm:
return ret;
}
+static void intel_th_device_remove(struct intel_th_device *thdev);
+
static int intel_th_remove(struct device *dev)
{
struct intel_th_driver *thdrv = to_intel_th_driver(dev->driver);
struct intel_th_device *thdev = to_intel_th_device(dev);
- struct intel_th_device *hub = to_intel_th_device(dev->parent);
+ struct intel_th_device *hub = to_intel_th_hub(thdev);
int err;
if (thdev->type == INTEL_TH_SWITCH) {
+ struct intel_th *th = to_intel_th(hub);
+ int i, lowest;
+
+ /* disconnect outputs */
err = device_for_each_child(dev, thdev, intel_th_child_remove);
if (err)
return err;
+
+ /*
+ * Remove outputs, that is, hub's children: they are created
+ * at hub's probe time by having the hub call
+ * intel_th_output_enable() for each of them.
+ */
+ for (i = 0, lowest = -1; i < th->num_thdevs; i++) {
+ /*
+ * Move the non-output devices from higher up the
+ * th->thdev[] array to lower positions to maintain
+ * a contiguous array.
+ */
+ if (th->thdev[i]->type != INTEL_TH_OUTPUT) {
+ if (lowest >= 0) {
+ th->thdev[lowest] = th->thdev[i];
+ th->thdev[i] = NULL;
+ ++lowest;
+ }
+
+ continue;
+ }
+
+ if (lowest == -1)
+ lowest = i;
+
+ intel_th_device_remove(th->thdev[i]);
+ th->thdev[i] = NULL;
+ }
+
+ th->num_thdevs = lowest;
}
if (thdrv->attr_group)
@@ -156,21 +192,6 @@ static struct device_type intel_th_source_device_type = {
.release = intel_th_device_release,
};
-static struct intel_th *to_intel_th(struct intel_th_device *thdev)
-{
- /*
- * subdevice tree is flat: if this one is not a switch, its
- * parent must be
- */
- if (thdev->type != INTEL_TH_SWITCH)
- thdev = to_intel_th_hub(thdev);
-
- if (WARN_ON_ONCE(!thdev || thdev->type != INTEL_TH_SWITCH))
- return NULL;
-
- return dev_get_drvdata(thdev->dev.parent);
-}
-
static char *intel_th_output_devnode(struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
@@ -205,6 +226,7 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
{
struct intel_th_driver *thdrv =
to_intel_th_driver_or_null(thdev->dev.driver);
+ struct intel_th *th = to_intel_th(thdev);
int ret = 0;
if (!thdrv)
@@ -215,15 +237,28 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
pm_runtime_get_sync(&thdev->dev);
+ if (th->activate)
+ ret = th->activate(th);
+ if (ret)
+ goto fail_put;
+
if (thdrv->activate)
ret = thdrv->activate(thdev);
else
intel_th_trace_enable(thdev);
- if (ret) {
- pm_runtime_put(&thdev->dev);
- module_put(thdrv->driver.owner);
- }
+ if (ret)
+ goto fail_deactivate;
+
+ return 0;
+
+fail_deactivate:
+ if (th->deactivate)
+ th->deactivate(th);
+
+fail_put:
+ pm_runtime_put(&thdev->dev);
+ module_put(thdrv->driver.owner);
return ret;
}
@@ -232,6 +267,7 @@ static void intel_th_output_deactivate(struct intel_th_device *thdev)
{
struct intel_th_driver *thdrv =
to_intel_th_driver_or_null(thdev->dev.driver);
+ struct intel_th *th = to_intel_th(thdev);
if (!thdrv)
return;
@@ -241,6 +277,9 @@ static void intel_th_output_deactivate(struct intel_th_device *thdev)
else
intel_th_trace_disable(thdev);
+ if (th->deactivate)
+ th->deactivate(th);
+
pm_runtime_put(&thdev->dev);
module_put(thdrv->driver.owner);
}
@@ -326,10 +365,10 @@ intel_th_device_alloc(struct intel_th *th, unsigned int type, const char *name,
struct device *parent;
struct intel_th_device *thdev;
- if (type == INTEL_TH_SWITCH)
- parent = th->dev;
- else
+ if (type == INTEL_TH_OUTPUT)
parent = &th->hub->dev;
+ else
+ parent = th->dev;
thdev = kzalloc(sizeof(*thdev) + strlen(name) + 1, GFP_KERNEL);
if (!thdev)
@@ -392,13 +431,14 @@ static const struct intel_th_subdevice {
unsigned otype;
unsigned scrpd;
int id;
-} intel_th_subdevices[TH_SUBDEVICE_MAX] = {
+} intel_th_subdevices[] = {
{
.nres = 1,
.res = {
{
+ /* Handle TSCU from GTH driver */
.start = REG_GTH_OFFSET,
- .end = REG_GTH_OFFSET + REG_GTH_LENGTH - 1,
+ .end = REG_TSCU_OFFSET + REG_TSCU_LENGTH - 1,
.flags = IORESOURCE_MEM,
},
},
@@ -483,6 +523,21 @@ static const struct intel_th_subdevice {
.nres = 1,
.res = {
{
+ .start = REG_PTI_OFFSET,
+ .end = REG_PTI_OFFSET + REG_PTI_LENGTH - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ },
+ .id = -1,
+ .name = "lpp",
+ .type = INTEL_TH_OUTPUT,
+ .otype = GTH_LPP,
+ .scrpd = SCRPD_PTI_IS_PRIM_DEST,
+ },
+ {
+ .nres = 1,
+ .res = {
+ {
.start = REG_DCIH_OFFSET,
.end = REG_DCIH_OFFSET + REG_DCIH_LENGTH - 1,
.flags = IORESOURCE_MEM,
@@ -526,98 +581,182 @@ static inline void intel_th_request_hub_module_flush(struct intel_th *th)
}
#endif /* CONFIG_MODULES */
-static int intel_th_populate(struct intel_th *th, struct resource *devres,
- unsigned int ndevres, int irq)
+static struct intel_th_device *
+intel_th_subdevice_alloc(struct intel_th *th,
+ const struct intel_th_subdevice *subdev)
{
+ struct intel_th_device *thdev;
struct resource res[3];
unsigned int req = 0;
- int src, dst, err;
+ int r, err;
- /* create devices for each intel_th_subdevice */
- for (src = 0, dst = 0; src < ARRAY_SIZE(intel_th_subdevices); src++) {
- const struct intel_th_subdevice *subdev =
- &intel_th_subdevices[src];
- struct intel_th_device *thdev;
- int r;
+ thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
+ subdev->id);
+ if (!thdev)
+ return ERR_PTR(-ENOMEM);
- /* only allow SOURCE and SWITCH devices in host mode */
- if (host_mode && subdev->type == INTEL_TH_OUTPUT)
- continue;
+ thdev->drvdata = th->drvdata;
+
+ memcpy(res, subdev->res,
+ sizeof(struct resource) * subdev->nres);
+
+ for (r = 0; r < subdev->nres; r++) {
+ struct resource *devres = th->resource;
+ int bar = TH_MMIO_CONFIG;
+
+ /*
+ * Take .end == 0 to mean 'take the whole bar',
+ * .start then tells us which bar it is. Default to
+ * TH_MMIO_CONFIG.
+ */
+ if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
+ bar = res[r].start;
+ res[r].start = 0;
+ res[r].end = resource_size(&devres[bar]) - 1;
+ }
+
+ if (res[r].flags & IORESOURCE_MEM) {
+ res[r].start += devres[bar].start;
+ res[r].end += devres[bar].start;
- thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
- subdev->id);
- if (!thdev) {
- err = -ENOMEM;
- goto kill_subdevs;
+ dev_dbg(th->dev, "%s:%d @ %pR\n",
+ subdev->name, r, &res[r]);
+ } else if (res[r].flags & IORESOURCE_IRQ) {
+ res[r].start = th->irq;
}
+ }
- memcpy(res, subdev->res,
- sizeof(struct resource) * subdev->nres);
+ err = intel_th_device_add_resources(thdev, res, subdev->nres);
+ if (err) {
+ put_device(&thdev->dev);
+ goto fail_put_device;
+ }
- for (r = 0; r < subdev->nres; r++) {
- int bar = TH_MMIO_CONFIG;
+ if (subdev->type == INTEL_TH_OUTPUT) {
+ thdev->dev.devt = MKDEV(th->major, th->num_thdevs);
+ thdev->output.type = subdev->otype;
+ thdev->output.port = -1;
+ thdev->output.scratchpad = subdev->scrpd;
+ } else if (subdev->type == INTEL_TH_SWITCH) {
+ thdev->host_mode = host_mode;
+ th->hub = thdev;
+ }
- /*
- * Take .end == 0 to mean 'take the whole bar',
- * .start then tells us which bar it is. Default to
- * TH_MMIO_CONFIG.
- */
- if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
- bar = res[r].start;
- res[r].start = 0;
- res[r].end = resource_size(&devres[bar]) - 1;
- }
+ err = device_add(&thdev->dev);
+ if (err) {
+ put_device(&thdev->dev);
+ goto fail_free_res;
+ }
- if (res[r].flags & IORESOURCE_MEM) {
- res[r].start += devres[bar].start;
- res[r].end += devres[bar].start;
+ /* need switch driver to be loaded to enumerate the rest */
+ if (subdev->type == INTEL_TH_SWITCH && !req) {
+ err = intel_th_request_hub_module(th);
+ if (!err)
+ req++;
+ }
- dev_dbg(th->dev, "%s:%d @ %pR\n",
- subdev->name, r, &res[r]);
- } else if (res[r].flags & IORESOURCE_IRQ) {
- res[r].start = irq;
- }
- }
+ return thdev;
- err = intel_th_device_add_resources(thdev, res, subdev->nres);
- if (err) {
- put_device(&thdev->dev);
- goto kill_subdevs;
- }
+fail_free_res:
+ kfree(thdev->resource);
- if (subdev->type == INTEL_TH_OUTPUT) {
- thdev->dev.devt = MKDEV(th->major, dst);
- thdev->output.type = subdev->otype;
- thdev->output.port = -1;
- thdev->output.scratchpad = subdev->scrpd;
- } else if (subdev->type == INTEL_TH_SWITCH) {
- thdev->host_mode = host_mode;
- }
+fail_put_device:
+ put_device(&thdev->dev);
+
+ return ERR_PTR(err);
+}
- err = device_add(&thdev->dev);
- if (err) {
- put_device(&thdev->dev);
- goto kill_subdevs;
+/**
+ * intel_th_output_enable() - find and enable a device for a given output type
+ * @th: Intel TH instance
+ * @otype: output type
+ *
+ * Go through the unallocated output devices, find the first one whos type
+ * matches @otype and instantiate it. These devices are removed when the hub
+ * device is removed, see intel_th_remove().
+ */
+int intel_th_output_enable(struct intel_th *th, unsigned int otype)
+{
+ struct intel_th_device *thdev;
+ int src = 0, dst = 0;
+
+ for (src = 0, dst = 0; dst <= th->num_thdevs; src++, dst++) {
+ for (; src < ARRAY_SIZE(intel_th_subdevices); src++) {
+ if (intel_th_subdevices[src].type != INTEL_TH_OUTPUT)
+ continue;
+
+ if (intel_th_subdevices[src].otype != otype)
+ continue;
+
+ break;
}
- /* need switch driver to be loaded to enumerate the rest */
- if (subdev->type == INTEL_TH_SWITCH && !req) {
- th->hub = thdev;
- err = intel_th_request_hub_module(th);
- if (!err)
- req++;
+ /* no unallocated matching subdevices */
+ if (src == ARRAY_SIZE(intel_th_subdevices))
+ return -ENODEV;
+
+ for (; dst < th->num_thdevs; dst++) {
+ if (th->thdev[dst]->type != INTEL_TH_OUTPUT)
+ continue;
+
+ if (th->thdev[dst]->output.type != otype)
+ continue;
+
+ break;
}
- th->thdev[dst++] = thdev;
+ /*
+ * intel_th_subdevices[src] matches our requirements and is
+ * not matched in th::thdev[]
+ */
+ if (dst == th->num_thdevs)
+ goto found;
}
+ return -ENODEV;
+
+found:
+ thdev = intel_th_subdevice_alloc(th, &intel_th_subdevices[src]);
+ if (IS_ERR(thdev))
+ return PTR_ERR(thdev);
+
+ th->thdev[th->num_thdevs++] = thdev;
+
return 0;
+}
+EXPORT_SYMBOL_GPL(intel_th_output_enable);
+
+static int intel_th_populate(struct intel_th *th)
+{
+ int src;
+
+ /* create devices for each intel_th_subdevice */
+ for (src = 0; src < ARRAY_SIZE(intel_th_subdevices); src++) {
+ const struct intel_th_subdevice *subdev =
+ &intel_th_subdevices[src];
+ struct intel_th_device *thdev;
+
+ /* only allow SOURCE and SWITCH devices in host mode */
+ if (host_mode && subdev->type == INTEL_TH_OUTPUT)
+ continue;
-kill_subdevs:
- for (; dst >= 0; dst--)
- intel_th_device_remove(th->thdev[dst]);
+ /*
+ * don't enable port OUTPUTs in this path; SWITCH enables them
+ * via intel_th_output_enable()
+ */
+ if (subdev->type == INTEL_TH_OUTPUT &&
+ subdev->otype != GTH_NONE)
+ continue;
+
+ thdev = intel_th_subdevice_alloc(th, subdev);
+ /* note: caller should free subdevices from th::thdev[] */
+ if (IS_ERR(thdev))
+ return PTR_ERR(thdev);
+
+ th->thdev[th->num_thdevs++] = thdev;
+ }
- return err;
+ return 0;
}
static int match_devt(struct device *dev, void *data)
@@ -670,8 +809,8 @@ static const struct file_operations intel_th_output_fops = {
* @irq: irq number
*/
struct intel_th *
-intel_th_alloc(struct device *dev, struct resource *devres,
- unsigned int ndevres, int irq)
+intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
+ struct resource *devres, unsigned int ndevres, int irq)
{
struct intel_th *th;
int err;
@@ -693,6 +832,11 @@ intel_th_alloc(struct device *dev, struct resource *devres,
goto err_ida;
}
th->dev = dev;
+ th->drvdata = drvdata;
+
+ th->resource = devres;
+ th->num_resources = ndevres;
+ th->irq = irq;
dev_set_drvdata(dev, th);
@@ -700,18 +844,15 @@ intel_th_alloc(struct device *dev, struct resource *devres,
pm_runtime_put(dev);
pm_runtime_allow(dev);
- err = intel_th_populate(th, devres, ndevres, irq);
- if (err)
- goto err_chrdev;
+ err = intel_th_populate(th);
+ if (err) {
+ /* free the subdevices and undo everything */
+ intel_th_free(th);
+ return ERR_PTR(err);
+ }
return th;
-err_chrdev:
- pm_runtime_forbid(dev);
-
- __unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
- "intel_th/output");
-
err_ida:
ida_simple_remove(&intel_th_ida, th->id);
@@ -727,11 +868,15 @@ void intel_th_free(struct intel_th *th)
int i;
intel_th_request_hub_module_flush(th);
- for (i = 0; i < TH_SUBDEVICE_MAX; i++)
- if (th->thdev[i] && th->thdev[i] != th->hub)
- intel_th_device_remove(th->thdev[i]);
intel_th_device_remove(th->hub);
+ for (i = 0; i < th->num_thdevs; i++) {
+ if (th->thdev[i] != th->hub)
+ intel_th_device_remove(th->thdev[i]);
+ th->thdev[i] = NULL;
+ }
+
+ th->num_thdevs = 0;
pm_runtime_get_sync(th->dev);
pm_runtime_forbid(th->dev);
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index dd32d0bad687..018678ec3c13 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -285,16 +285,16 @@ gth_output_parm_get(struct gth_device *gth, int port, unsigned int parm)
*/
static int intel_th_gth_reset(struct gth_device *gth)
{
- u32 scratchpad;
+ u32 reg;
int port, i;
- scratchpad = ioread32(gth->base + REG_GTH_SCRPD0);
- if (scratchpad & SCRPD_DEBUGGER_IN_USE)
+ reg = ioread32(gth->base + REG_GTH_SCRPD0);
+ if (reg & SCRPD_DEBUGGER_IN_USE)
return -EBUSY;
/* Always save/restore STH and TU registers in S0ix entry/exit */
- scratchpad |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
- iowrite32(scratchpad, gth->base + REG_GTH_SCRPD0);
+ reg |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
+ iowrite32(reg, gth->base + REG_GTH_SCRPD0);
/* output ports */
for (port = 0; port < 8; port++) {
@@ -512,6 +512,15 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
iowrite32(reg, gth->base + REG_GTH_SCRPD0);
}
+static void gth_tscu_resync(struct gth_device *gth)
+{
+ u32 reg;
+
+ reg = ioread32(gth->base + REG_TSCU_TSUCTRL);
+ reg &= ~TSUCTRL_CTCRESYNC;
+ iowrite32(reg, gth->base + REG_TSCU_TSUCTRL);
+}
+
/**
* intel_th_gth_enable() - enable tracing to an output device
* @thdev: GTH device
@@ -524,6 +533,7 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
struct intel_th_output *output)
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
+ struct intel_th *th = to_intel_th(thdev);
u32 scr = 0xfc0000, scrpd;
int master;
@@ -539,6 +549,9 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
output->active = true;
spin_unlock(&gth->gth_lock);
+ if (INTEL_TH_CAP(th, tscu_enable))
+ gth_tscu_resync(gth);
+
scrpd = ioread32(gth->base + REG_GTH_SCRPD0);
scrpd |= output->scratchpad;
iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
@@ -639,6 +652,7 @@ intel_th_gth_set_output(struct intel_th_device *thdev, unsigned int master)
static int intel_th_gth_probe(struct intel_th_device *thdev)
{
struct device *dev = &thdev->dev;
+ struct intel_th *th = dev_get_drvdata(dev->parent);
struct gth_device *gth;
struct resource *res;
void __iomem *base;
@@ -660,6 +674,8 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
gth->base = base;
spin_lock_init(&gth->gth_lock);
+ dev_set_drvdata(dev, gth);
+
/*
* Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE
* bit. Either way, don't reset HW in this case, and don't export any
@@ -667,7 +683,7 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
* drivers to ports, see intel_th_gth_assign().
*/
if (thdev->host_mode)
- goto done;
+ return 0;
ret = intel_th_gth_reset(gth);
if (ret) {
@@ -676,7 +692,7 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
thdev->host_mode = true;
- goto done;
+ return 0;
}
for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++)
@@ -687,6 +703,13 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
gth->output[i].index = i;
gth->output[i].port_type =
gth_output_parm_get(gth, i, TH_OUTPUT_PARM(port));
+ if (gth->output[i].port_type == GTH_NONE)
+ continue;
+
+ ret = intel_th_output_enable(th, gth->output[i].port_type);
+ /* -ENODEV is ok, we just won't have that device enumerated */
+ if (ret && ret != -ENODEV)
+ return ret;
}
if (intel_th_output_attributes(gth) ||
@@ -698,9 +721,6 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
return -ENOMEM;
}
-done:
- dev_set_drvdata(dev, gth);
-
return 0;
}
diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h
index 56f0d2620577..f3d234251a12 100644
--- a/drivers/hwtracing/intel_th/gth.h
+++ b/drivers/hwtracing/intel_th/gth.h
@@ -55,9 +55,14 @@ enum {
REG_GTH_SCRPD1 = 0xe4, /* ScratchPad[1] */
REG_GTH_SCRPD2 = 0xe8, /* ScratchPad[2] */
REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */
+ REG_TSCU_TSUCTRL = 0x2000, /* TSCU control register */
+ REG_TSCU_TSCUSTAT = 0x2004, /* TSCU status register */
};
/* waiting for Pipeline Empty bit(s) to assert for GTH */
#define GTH_PLE_WAITLOOP_DEPTH 10000
+#define TSUCTRL_CTCRESYNC BIT(0)
+#define TSCUSTAT_CTCSYNCING BIT(1)
+
#endif /* __INTEL_TH_GTH_H__ */
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 3096e7054f6d..99ad563fc40d 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -48,8 +48,19 @@ struct intel_th_output {
};
/**
+ * struct intel_th_drvdata - describes hardware capabilities and quirks
+ * @tscu_enable: device needs SW to enable time stamping unit
+ */
+struct intel_th_drvdata {
+ unsigned int tscu_enable : 1;
+};
+
+#define INTEL_TH_CAP(_th, _cap) ((_th)->drvdata ? (_th)->drvdata->_cap : 0)
+
+/**
* struct intel_th_device - device on the intel_th bus
* @dev: device
+ * @drvdata: hardware capabilities/quirks
* @resource: array of resources available to this device
* @num_resources: number of resources in @resource array
* @type: INTEL_TH_{SOURCE,OUTPUT,SWITCH}
@@ -59,11 +70,12 @@ struct intel_th_output {
* @name: device name to match the driver
*/
struct intel_th_device {
- struct device dev;
- struct resource *resource;
- unsigned int num_resources;
- unsigned int type;
- int id;
+ struct device dev;
+ struct intel_th_drvdata *drvdata;
+ struct resource *resource;
+ unsigned int num_resources;
+ unsigned int type;
+ int id;
/* INTEL_TH_SWITCH specific */
bool host_mode;
@@ -96,6 +108,17 @@ intel_th_device_get_resource(struct intel_th_device *thdev, unsigned int type,
return NULL;
}
+/*
+ * GTH, output ports configuration
+ */
+enum {
+ GTH_NONE = 0,
+ GTH_MSU, /* memory/usb */
+ GTH_CTP, /* Common Trace Port */
+ GTH_LPP, /* Low Power Path */
+ GTH_PTI, /* MIPI-PTI */
+};
+
/**
* intel_th_output_assigned() - if an output device is assigned to a switch port
* @thdev: the output device
@@ -106,7 +129,8 @@ static inline bool
intel_th_output_assigned(struct intel_th_device *thdev)
{
return thdev->type == INTEL_TH_OUTPUT &&
- thdev->output.port >= 0;
+ (thdev->output.port >= 0 ||
+ thdev->output.type == GTH_NONE);
}
/**
@@ -161,8 +185,18 @@ struct intel_th_driver {
#define to_intel_th_driver_or_null(_d) \
((_d) ? to_intel_th_driver(_d) : NULL)
+/*
+ * Subdevice tree structure is as follows:
+ * + struct intel_th device (pci; dev_{get,set}_drvdata()
+ * + struct intel_th_device INTEL_TH_SWITCH (GTH)
+ * + struct intel_th_device INTEL_TH_OUTPUT (MSU, PTI)
+ * + struct intel_th_device INTEL_TH_SOURCE (STH)
+ *
+ * In other words, INTEL_TH_OUTPUT devices are children of INTEL_TH_SWITCH;
+ * INTEL_TH_SWITCH and INTEL_TH_SOURCE are children of the intel_th device.
+ */
static inline struct intel_th_device *
-to_intel_th_hub(struct intel_th_device *thdev)
+to_intel_th_parent(struct intel_th_device *thdev)
{
struct device *parent = thdev->dev.parent;
@@ -172,9 +206,20 @@ to_intel_th_hub(struct intel_th_device *thdev)
return to_intel_th_device(parent);
}
+static inline struct intel_th *to_intel_th(struct intel_th_device *thdev)
+{
+ if (thdev->type == INTEL_TH_OUTPUT)
+ thdev = to_intel_th_parent(thdev);
+
+ if (WARN_ON_ONCE(!thdev || thdev->type == INTEL_TH_OUTPUT))
+ return NULL;
+
+ return dev_get_drvdata(thdev->dev.parent);
+}
+
struct intel_th *
-intel_th_alloc(struct device *dev, struct resource *devres,
- unsigned int ndevres, int irq);
+intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
+ struct resource *devres, unsigned int ndevres, int irq);
void intel_th_free(struct intel_th *th);
int intel_th_driver_register(struct intel_th_driver *thdrv);
@@ -184,6 +229,7 @@ int intel_th_trace_enable(struct intel_th_device *thdev);
int intel_th_trace_disable(struct intel_th_device *thdev);
int intel_th_set_output(struct intel_th_device *thdev,
unsigned int master);
+int intel_th_output_enable(struct intel_th *th, unsigned int otype);
enum {
TH_MMIO_CONFIG = 0,
@@ -191,8 +237,9 @@ enum {
TH_MMIO_END,
};
-#define TH_SUBDEVICE_MAX 6
#define TH_POSSIBLE_OUTPUTS 8
+/* Total number of possible subdevices: outputs + GTH + STH */
+#define TH_SUBDEVICE_MAX (TH_POSSIBLE_OUTPUTS + 2)
#define TH_CONFIGURABLE_MASTERS 256
#define TH_MSC_MAX 2
@@ -201,6 +248,10 @@ enum {
* @dev: driver core's device
* @thdev: subdevices
* @hub: "switch" subdevice (GTH)
+ * @resource: resources of the entire controller
+ * @num_thdevs: number of devices in the @thdev array
+ * @num_resources: number or resources in the @resource array
+ * @irq: irq number
* @id: this Intel TH controller's device ID in the system
* @major: device node major for output devices
*/
@@ -209,6 +260,14 @@ struct intel_th {
struct intel_th_device *thdev[TH_SUBDEVICE_MAX];
struct intel_th_device *hub;
+ struct intel_th_drvdata *drvdata;
+
+ struct resource *resource;
+ int (*activate)(struct intel_th *);
+ void (*deactivate)(struct intel_th *);
+ unsigned int num_thdevs;
+ unsigned int num_resources;
+ int irq;
int id;
int major;
@@ -220,6 +279,17 @@ struct intel_th {
#endif
};
+static inline struct intel_th_device *
+to_intel_th_hub(struct intel_th_device *thdev)
+{
+ if (thdev->type == INTEL_TH_SWITCH)
+ return thdev;
+ else if (thdev->type == INTEL_TH_OUTPUT)
+ return to_intel_th_parent(thdev);
+
+ return to_intel_th(thdev)->hub;
+}
+
/*
* Register windows
*/
@@ -228,6 +298,10 @@ enum {
REG_GTH_OFFSET = 0x0000,
REG_GTH_LENGTH = 0x2000,
+ /* Timestamp counter unit (TSCU) */
+ REG_TSCU_OFFSET = 0x2000,
+ REG_TSCU_LENGTH = 0x1000,
+
/* Software Trace Hub (STH) [0x4000..0x4fff] */
REG_STH_OFFSET = 0x4000,
REG_STH_LENGTH = 0x2000,
@@ -250,16 +324,6 @@ enum {
};
/*
- * GTH, output ports configuration
- */
-enum {
- GTH_NONE = 0,
- GTH_MSU, /* memory/usb */
- GTH_CTP, /* Common Trace Port */
- GTH_PTI = 4, /* MIPI-PTI */
-};
-
-/*
* Scratchpad bits: tell firmware and external debuggers
* what we are up to.
*/
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index dbbe31df74df..dfb57eaa9f22 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -709,17 +709,17 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
}
for (i = 0; i < nr_blocks; i++) {
- win->block[i].bdesc = dma_alloc_coherent(msc_dev(msc), size,
- &win->block[i].addr,
- GFP_KERNEL);
+ win->block[i].bdesc =
+ dma_alloc_coherent(msc_dev(msc)->parent->parent, size,
+ &win->block[i].addr, GFP_KERNEL);
+
+ if (!win->block[i].bdesc)
+ goto err_nomem;
#ifdef CONFIG_X86
/* Set the page as uncached */
set_memory_uc((unsigned long)win->block[i].bdesc, 1);
#endif
-
- if (!win->block[i].bdesc)
- goto err_nomem;
}
win->msc = msc;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 590cf90dd21a..bc9cebc30526 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -27,9 +27,53 @@
#define BAR_MASK (BIT(TH_MMIO_CONFIG) | BIT(TH_MMIO_SW))
+#define PCI_REG_NPKDSC 0x80
+#define NPKDSC_TSACT BIT(5)
+
+static int intel_th_pci_activate(struct intel_th *th)
+{
+ struct pci_dev *pdev = to_pci_dev(th->dev);
+ u32 npkdsc;
+ int err;
+
+ if (!INTEL_TH_CAP(th, tscu_enable))
+ return 0;
+
+ err = pci_read_config_dword(pdev, PCI_REG_NPKDSC, &npkdsc);
+ if (!err) {
+ npkdsc |= NPKDSC_TSACT;
+ err = pci_write_config_dword(pdev, PCI_REG_NPKDSC, npkdsc);
+ }
+
+ if (err)
+ dev_err(&pdev->dev, "failed to read NPKDSC register\n");
+
+ return err;
+}
+
+static void intel_th_pci_deactivate(struct intel_th *th)
+{
+ struct pci_dev *pdev = to_pci_dev(th->dev);
+ u32 npkdsc;
+ int err;
+
+ if (!INTEL_TH_CAP(th, tscu_enable))
+ return;
+
+ err = pci_read_config_dword(pdev, PCI_REG_NPKDSC, &npkdsc);
+ if (!err) {
+ npkdsc |= NPKDSC_TSACT;
+ err = pci_write_config_dword(pdev, PCI_REG_NPKDSC, npkdsc);
+ }
+
+ if (err)
+ dev_err(&pdev->dev, "failed to read NPKDSC register\n");
+}
+
static int intel_th_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ struct intel_th_drvdata *drvdata = (void *)id->driver_data;
struct intel_th *th;
int err;
@@ -41,11 +85,16 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
if (err)
return err;
- th = intel_th_alloc(&pdev->dev, pdev->resource,
+ th = intel_th_alloc(&pdev->dev, drvdata, pdev->resource,
DEVICE_COUNT_RESOURCE, pdev->irq);
if (IS_ERR(th))
return PTR_ERR(th);
+ th->activate = intel_th_pci_activate;
+ th->deactivate = intel_th_pci_deactivate;
+
+ pci_set_master(pdev);
+
return 0;
}
@@ -56,6 +105,10 @@ static void intel_th_pci_remove(struct pci_dev *pdev)
intel_th_free(th);
}
+static const struct intel_th_drvdata intel_th_2x = {
+ .tscu_enable = 1,
+};
+
static const struct pci_device_id intel_th_pci_id_table[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9d26),
@@ -93,7 +146,17 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
{
/* Gemini Lake */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
- .driver_data = (kernel_ulong_t)0,
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Cannon Lake H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa326),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Cannon Lake LP */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
},
{ 0 },
};
diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c
index 35738b5bfccd..e96a1fcb57b2 100644
--- a/drivers/hwtracing/intel_th/pti.c
+++ b/drivers/hwtracing/intel_th/pti.c
@@ -1,7 +1,7 @@
/*
* Intel(R) Trace Hub PTI output driver
*
- * Copyright (C) 2014-2015 Intel Corporation.
+ * Copyright (C) 2014-2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -34,6 +34,8 @@ struct pti_device {
unsigned int freeclk;
unsigned int clkdiv;
unsigned int patgen;
+ unsigned int lpp_dest_mask;
+ unsigned int lpp_dest;
};
/* map PTI widths to MODE settings of PTI_CTL register */
@@ -163,6 +165,7 @@ static int intel_th_pti_activate(struct intel_th_device *thdev)
ctl |= PTI_FCEN;
ctl |= pti->mode << __ffs(PTI_MODE);
ctl |= pti->clkdiv << __ffs(PTI_CLKDIV);
+ ctl |= pti->lpp_dest << __ffs(LPP_DEST);
iowrite32(ctl, pti->base + REG_PTI_CTL);
@@ -192,6 +195,15 @@ static void read_hw_config(struct pti_device *pti)
pti->mode = pti_width_mode(4);
if (!pti->clkdiv)
pti->clkdiv = 1;
+
+ if (pti->thdev->output.type == GTH_LPP) {
+ if (ctl & LPP_PTIPRESENT)
+ pti->lpp_dest_mask |= LPP_DEST_PTI;
+ if (ctl & LPP_BSSBPRESENT)
+ pti->lpp_dest_mask |= LPP_DEST_EXI;
+ if (ctl & LPP_DEST)
+ pti->lpp_dest = 1;
+ }
}
static int intel_th_pti_probe(struct intel_th_device *thdev)
@@ -239,10 +251,103 @@ static struct intel_th_driver intel_th_pti_driver = {
},
};
-module_driver(intel_th_pti_driver,
- intel_th_driver_register,
- intel_th_driver_unregister);
+static const char * const lpp_dest_str[] = { "pti", "exi" };
+
+static ssize_t lpp_dest_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pti_device *pti = dev_get_drvdata(dev);
+ ssize_t ret = 0;
+ int i;
+
+ for (i = ARRAY_SIZE(lpp_dest_str) - 1; i >= 0; i--) {
+ const char *fmt = pti->lpp_dest == i ? "[%s] " : "%s ";
+
+ if (!(pti->lpp_dest_mask & BIT(i)))
+ continue;
+
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ fmt, lpp_dest_str[i]);
+ }
+
+ if (ret)
+ buf[ret - 1] = '\n';
+
+ return ret;
+}
+
+static ssize_t lpp_dest_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct pti_device *pti = dev_get_drvdata(dev);
+ ssize_t ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lpp_dest_str); i++)
+ if (sysfs_streq(buf, lpp_dest_str[i]))
+ break;
+
+ if (i < ARRAY_SIZE(lpp_dest_str) && pti->lpp_dest_mask & BIT(i)) {
+ pti->lpp_dest = i;
+ ret = size;
+ }
+
+ return ret;
+}
+
+static DEVICE_ATTR_RW(lpp_dest);
+
+static struct attribute *lpp_output_attrs[] = {
+ &dev_attr_mode.attr,
+ &dev_attr_freerunning_clock.attr,
+ &dev_attr_clock_divider.attr,
+ &dev_attr_lpp_dest.attr,
+ NULL,
+};
+
+static struct attribute_group lpp_output_group = {
+ .attrs = lpp_output_attrs,
+};
+
+static struct intel_th_driver intel_th_lpp_driver = {
+ .probe = intel_th_pti_probe,
+ .remove = intel_th_pti_remove,
+ .activate = intel_th_pti_activate,
+ .deactivate = intel_th_pti_deactivate,
+ .attr_group = &lpp_output_group,
+ .driver = {
+ .name = "lpp",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init intel_th_pti_lpp_init(void)
+{
+ int err;
+
+ err = intel_th_driver_register(&intel_th_pti_driver);
+ if (err)
+ return err;
+
+ err = intel_th_driver_register(&intel_th_lpp_driver);
+ if (err) {
+ intel_th_driver_unregister(&intel_th_pti_driver);
+ return err;
+ }
+
+ return 0;
+}
+
+module_init(intel_th_pti_lpp_init);
+
+static void __exit intel_th_pti_lpp_exit(void)
+{
+ intel_th_driver_unregister(&intel_th_pti_driver);
+ intel_th_driver_unregister(&intel_th_lpp_driver);
+}
+
+module_exit(intel_th_pti_lpp_exit);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Intel(R) Trace Hub PTI output driver");
+MODULE_DESCRIPTION("Intel(R) Trace Hub PTI/LPP output driver");
MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/intel_th/pti.h b/drivers/hwtracing/intel_th/pti.h
index 20883f5628cf..30827be67b4c 100644
--- a/drivers/hwtracing/intel_th/pti.h
+++ b/drivers/hwtracing/intel_th/pti.h
@@ -23,7 +23,15 @@ enum {
#define PTI_EN BIT(0)
#define PTI_FCEN BIT(1)
#define PTI_MODE 0xf0
+#define LPP_PTIPRESENT BIT(8)
+#define LPP_BSSBPRESENT BIT(9)
#define PTI_CLKDIV 0x000f0000
#define PTI_PATGENMODE 0x00f00000
+#define LPP_DEST BIT(25)
+#define LPP_BSSBACT BIT(30)
+#define LPP_LPPBUSY BIT(31)
+
+#define LPP_DEST_PTI BIT(0)
+#define LPP_DEST_EXI BIT(1)
#endif /* __INTEL_TH_STH_H__ */
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 0e731143f6a4..9414900575d8 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -566,7 +566,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
if (copy_from_user(&size, arg, sizeof(size)))
return -EFAULT;
- if (size >= PATH_MAX + sizeof(*id))
+ if (size < sizeof(*id) || size >= PATH_MAX + sizeof(*id))
return -EINVAL;
/*
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 1006b230b236..65fa29591d21 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -983,7 +983,7 @@ config I2C_UNIPHIER_F
config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support"
- depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
+ depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
select I2C_ALGOBIT
help
Say yes if you want to support the I2C serial bus on ARMs Versatile
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index f19348328a71..6fdf9231c23c 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -410,10 +410,11 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
}
/* We are in an invalid state; reset bus to a known state. */
- if (!bus->msgs && bus->master_state != ASPEED_I2C_MASTER_STOP) {
+ if (!bus->msgs) {
dev_err(bus->dev, "bus in unknown state");
bus->cmd_err = -EIO;
- aspeed_i2c_do_stop(bus);
+ if (bus->master_state != ASPEED_I2C_MASTER_STOP)
+ aspeed_i2c_do_stop(bus);
goto out_no_complete;
}
msg = &bus->msgs[bus->msgs_index];
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 2ea6d0d25a01..2b98a173136f 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -198,8 +198,7 @@ static void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
- DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED |
- DW_IC_CON_SPEED_FAST;
+ DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
dev->mode = DW_IC_SLAVE;
@@ -257,7 +256,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
struct dw_i2c_dev *dev;
u32 acpi_speed, ht = 0;
struct resource *mem;
- int irq, ret;
+ int i, irq, ret;
+ const int supported_speeds[] = { 0, 100000, 400000, 1000000, 3400000 };
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -299,6 +299,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
/*
+ * Some DSTDs use a non standard speed, round down to the lowest
+ * standard speed.
+ */
+ for (i = 1; i < ARRAY_SIZE(supported_speeds); i++) {
+ if (acpi_speed < supported_speeds[i])
+ break;
+ }
+ acpi_speed = supported_speeds[i - 1];
+
+ /*
* Find bus speed from the "clock-frequency" device property, ACPI
* or by using fast mode if neither is set.
*/
@@ -319,7 +329,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
if (dev->clk_freq != 100000 && dev->clk_freq != 400000
&& dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
dev_err(&pdev->dev,
- "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");
+ "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",
+ dev->clk_freq);
ret = -EINVAL;
goto exit_reset;
}
@@ -426,7 +437,7 @@ static void dw_i2c_plat_complete(struct device *dev)
#endif
#ifdef CONFIG_PM
-static int dw_i2c_plat_suspend(struct device *dev)
+static int dw_i2c_plat_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
@@ -448,11 +459,21 @@ static int dw_i2c_plat_resume(struct device *dev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int dw_i2c_plat_suspend(struct device *dev)
+{
+ pm_runtime_resume(dev);
+ return dw_i2c_plat_runtime_suspend(dev);
+}
+#endif
+
static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
.prepare = dw_i2c_plat_prepare,
.complete = dw_i2c_plat_complete,
SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
- SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL)
+ SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
+ dw_i2c_plat_resume,
+ NULL)
};
#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 0548c7ea578c..78d8fb73927d 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -177,6 +177,8 @@ static int i2c_dw_reg_slave(struct i2c_client *slave)
return -EBUSY;
if (slave->flags & I2C_CLIENT_TEN)
return -EAFNOSUPPORT;
+ pm_runtime_get_sync(dev->dev);
+
/*
* Set slave address in the IC_SAR register,
* the address to which the DW_apb_i2c responds.
@@ -205,6 +207,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
dev->disable_int(dev);
dev->disable(dev);
dev->slave = NULL;
+ pm_runtime_put(dev->dev);
return 0;
}
@@ -272,7 +275,7 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
slave_activity = ((dw_readl(dev, DW_IC_STATUS) &
DW_IC_STATUS_SLAVE_ACTIVITY) >> 6);
- if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY))
+ if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave)
return 0;
dev_dbg(dev->dev,
@@ -382,7 +385,6 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
ret = i2c_add_numbered_adapter(adap);
if (ret)
dev_err(dev->dev, "failure adding adapter: %d\n", ret);
- pm_runtime_put_noidle(dev->dev);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index e98e44e584a4..22ffcb73c185 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -341,8 +341,10 @@ static int ismt_process_desc(const struct ismt_desc *desc,
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_I2C_BLOCK_DATA:
- memcpy(&data->block[1], dma_buffer, desc->rxbytes);
- data->block[0] = desc->rxbytes;
+ if (desc->rxbytes != dma_buffer[0] + 1)
+ return -EMSGSIZE;
+
+ memcpy(data->block, dma_buffer, desc->rxbytes);
break;
}
return 0;
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index b4685bb9b5d7..adca51a99487 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -127,8 +127,7 @@ static int simtec_i2c_probe(struct platform_device *dev)
iounmap(pd->reg);
err_res:
- release_resource(pd->ioarea);
- kfree(pd->ioarea);
+ release_mem_region(pd->ioarea->start, size);
err:
kfree(pd);
@@ -142,8 +141,7 @@ static int simtec_i2c_remove(struct platform_device *dev)
i2c_del_adapter(&pd->adap);
iounmap(pd->reg);
- release_resource(pd->ioarea);
- kfree(pd->ioarea);
+ release_mem_region(pd->ioarea->start, resource_size(pd->ioarea));
kfree(pd);
return 0;
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 4842ec3a5451..a9126b3cda61 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -230,6 +230,16 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
}
+const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+ struct i2c_client *client)
+{
+ if (!(client && matches))
+ return NULL;
+
+ return acpi_match_device(matches, &client->dev);
+}
+
static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
void *data, void **return_value)
{
@@ -289,7 +299,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
}
EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
-static int i2c_acpi_match_adapter(struct device *dev, void *data)
+static int i2c_acpi_find_match_adapter(struct device *dev, void *data)
{
struct i2c_adapter *adapter = i2c_verify_adapter(dev);
@@ -299,7 +309,7 @@ static int i2c_acpi_match_adapter(struct device *dev, void *data)
return ACPI_HANDLE(dev) == (acpi_handle)data;
}
-static int i2c_acpi_match_device(struct device *dev, void *data)
+static int i2c_acpi_find_match_device(struct device *dev, void *data)
{
return ACPI_COMPANION(dev) == data;
}
@@ -309,7 +319,7 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
struct device *dev;
dev = bus_find_device(&i2c_bus_type, NULL, handle,
- i2c_acpi_match_adapter);
+ i2c_acpi_find_match_adapter);
return dev ? i2c_verify_adapter(dev) : NULL;
}
@@ -317,7 +327,8 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
{
struct device *dev;
- dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device);
+ dev = bus_find_device(&i2c_bus_type, NULL, adev,
+ i2c_acpi_find_match_device);
return dev ? i2c_verify_client(dev) : NULL;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index c89dac7fd2e7..56e46581b84b 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -353,10 +353,11 @@ static int i2c_device_probe(struct device *dev)
}
/*
- * An I2C ID table is not mandatory, if and only if, a suitable Device
- * Tree match table entry is supplied for the probing device.
+ * An I2C ID table is not mandatory, if and only if, a suitable OF
+ * or ACPI ID table is supplied for the probing device.
*/
if (!driver->id_table &&
+ !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
!i2c_of_match_device(dev->driver->of_match_table, client))
return -ENODEV;
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
index 3b63f5e5b89c..3d3d9bf02101 100644
--- a/drivers/i2c/i2c-core.h
+++ b/drivers/i2c/i2c-core.h
@@ -31,9 +31,18 @@ int i2c_check_addr_validity(unsigned addr, unsigned short flags);
int i2c_check_7bit_addr_validity_strict(unsigned short addr);
#ifdef CONFIG_ACPI
+const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+ struct i2c_client *client);
void i2c_acpi_register_devices(struct i2c_adapter *adap);
#else /* CONFIG_ACPI */
static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
+static inline const struct acpi_device_id *
+i2c_acpi_match_device(const struct acpi_device_id *matches,
+ struct i2c_client *client)
+{
+ return NULL;
+}
#endif /* CONFIG_ACPI */
extern struct notifier_block i2c_acpi_notifier;
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 2c64d0e0740f..17121329bb79 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -83,7 +83,7 @@ config I2C_MUX_PINCTRL
different sets of pins at run-time.
This driver can also be built as a module. If so, the module will be
- called pinctrl-i2cmux.
+ called i2c-mux-pinctrl.
config I2C_MUX_REG
tristate "Register-based I2C multiplexer"
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 627b1f62a749..3ddd88219906 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -72,7 +72,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
drive->failed_pc = NULL;
if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
- (req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT))
+ blk_rq_is_scsi(rq))
uptodate = 1; /* FIXME */
else if (pc->c[0] == GPCMD_REQUEST_SENSE) {
diff --git a/drivers/ide/ide-timings.c b/drivers/ide/ide-timings.c
index 0e05f75934c9..1858e3ce3993 100644
--- a/drivers/ide/ide-timings.c
+++ b/drivers/ide/ide-timings.c
@@ -104,19 +104,19 @@ u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio)
EXPORT_SYMBOL_GPL(ide_pio_cycle_time);
#define ENOUGH(v, unit) (((v) - 1) / (unit) + 1)
-#define EZ(v, unit) ((v) ? ENOUGH(v, unit) : 0)
+#define EZ(v, unit) ((v) ? ENOUGH((v) * 1000, unit) : 0)
static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q,
int T, int UT)
{
- q->setup = EZ(t->setup * 1000, T);
- q->act8b = EZ(t->act8b * 1000, T);
- q->rec8b = EZ(t->rec8b * 1000, T);
- q->cyc8b = EZ(t->cyc8b * 1000, T);
- q->active = EZ(t->active * 1000, T);
- q->recover = EZ(t->recover * 1000, T);
- q->cycle = EZ(t->cycle * 1000, T);
- q->udma = EZ(t->udma * 1000, UT);
+ q->setup = EZ(t->setup, T);
+ q->act8b = EZ(t->act8b, T);
+ q->rec8b = EZ(t->rec8b, T);
+ q->cyc8b = EZ(t->cyc8b, T);
+ q->active = EZ(t->active, T);
+ q->recover = EZ(t->recover, T);
+ q->cycle = EZ(t->cycle, T);
+ q->udma = EZ(t->udma, UT);
}
void ide_timing_merge(struct ide_timing *a, struct ide_timing *b,
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 0c5d3a99468e..c5b902b86b44 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -1145,8 +1145,8 @@ static int pmac_ide_macio_attach(struct macio_dev *mdev,
return -ENOMEM;
if (macio_resource_count(mdev) == 0) {
- printk(KERN_WARNING "ide-pmac: no address for %s\n",
- mdev->ofdev.dev.of_node->full_name);
+ printk(KERN_WARNING "ide-pmac: no address for %pOF\n",
+ mdev->ofdev.dev.of_node);
rc = -ENXIO;
goto out_free_pmif;
}
@@ -1154,7 +1154,7 @@ static int pmac_ide_macio_attach(struct macio_dev *mdev,
/* Request memory resource for IO ports */
if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
printk(KERN_ERR "ide-pmac: can't request MMIO resource for "
- "%s!\n", mdev->ofdev.dev.of_node->full_name);
+ "%pOF!\n", mdev->ofdev.dev.of_node);
rc = -EBUSY;
goto out_free_pmif;
}
@@ -1165,8 +1165,8 @@ static int pmac_ide_macio_attach(struct macio_dev *mdev,
* where that happens though...
*/
if (macio_irq_count(mdev) == 0) {
- printk(KERN_WARNING "ide-pmac: no intrs for device %s, using "
- "13\n", mdev->ofdev.dev.of_node->full_name);
+ printk(KERN_WARNING "ide-pmac: no intrs for device %pOF, using "
+ "13\n", mdev->ofdev.dev.of_node);
irq = irq_create_mapping(NULL, 13);
} else
irq = macio_irq(mdev, 0);
@@ -1183,8 +1183,8 @@ static int pmac_ide_macio_attach(struct macio_dev *mdev,
if (macio_resource_count(mdev) >= 2) {
if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
printk(KERN_WARNING "ide-pmac: can't request DMA "
- "resource for %s!\n",
- mdev->ofdev.dev.of_node->full_name);
+ "resource for %pOF!\n",
+ mdev->ofdev.dev.of_node);
else
pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
} else
@@ -1274,7 +1274,7 @@ static int pmac_ide_pci_attach(struct pci_dev *pdev,
if (pci_enable_device(pdev)) {
printk(KERN_WARNING "ide-pmac: Can't enable PCI device for "
- "%s\n", np->full_name);
+ "%pOF\n", np);
rc = -ENXIO;
goto out_free_pmif;
}
@@ -1282,7 +1282,7 @@ static int pmac_ide_pci_attach(struct pci_dev *pdev,
if (pci_request_regions(pdev, "Kauai ATA")) {
printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources for "
- "%s\n", np->full_name);
+ "%pOF\n", np);
rc = -ENXIO;
goto out_free_pmif;
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index c2ae819a871c..5dc7ea4b6bc4 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -97,7 +97,7 @@ static const struct idle_cpu *icpu;
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
-static void intel_idle_freeze(struct cpuidle_device *dev,
+static void intel_idle_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
static struct cpuidle_state *cpuidle_state_table;
@@ -132,7 +132,7 @@ static struct cpuidle_state nehalem_cstates[] = {
.exit_latency = 3,
.target_residency = 6,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@@ -140,7 +140,7 @@ static struct cpuidle_state nehalem_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@@ -148,7 +148,7 @@ static struct cpuidle_state nehalem_cstates[] = {
.exit_latency = 20,
.target_residency = 80,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[] = {
.exit_latency = 200,
.target_residency = 800,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -169,7 +169,7 @@ static struct cpuidle_state snb_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@@ -177,7 +177,7 @@ static struct cpuidle_state snb_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@@ -185,7 +185,7 @@ static struct cpuidle_state snb_cstates[] = {
.exit_latency = 80,
.target_residency = 211,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@@ -193,7 +193,7 @@ static struct cpuidle_state snb_cstates[] = {
.exit_latency = 104,
.target_residency = 345,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
.desc = "MWAIT 0x30",
@@ -201,7 +201,7 @@ static struct cpuidle_state snb_cstates[] = {
.exit_latency = 109,
.target_residency = 345,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -214,7 +214,7 @@ static struct cpuidle_state byt_cstates[] = {
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6N",
.desc = "MWAIT 0x58",
@@ -222,7 +222,7 @@ static struct cpuidle_state byt_cstates[] = {
.exit_latency = 300,
.target_residency = 275,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
.desc = "MWAIT 0x52",
@@ -230,7 +230,7 @@ static struct cpuidle_state byt_cstates[] = {
.exit_latency = 500,
.target_residency = 560,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
.desc = "MWAIT 0x60",
@@ -238,7 +238,7 @@ static struct cpuidle_state byt_cstates[] = {
.exit_latency = 1200,
.target_residency = 4000,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C7S",
.desc = "MWAIT 0x64",
@@ -246,7 +246,7 @@ static struct cpuidle_state byt_cstates[] = {
.exit_latency = 10000,
.target_residency = 20000,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -259,7 +259,7 @@ static struct cpuidle_state cht_cstates[] = {
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6N",
.desc = "MWAIT 0x58",
@@ -267,7 +267,7 @@ static struct cpuidle_state cht_cstates[] = {
.exit_latency = 80,
.target_residency = 275,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
.desc = "MWAIT 0x52",
@@ -275,7 +275,7 @@ static struct cpuidle_state cht_cstates[] = {
.exit_latency = 200,
.target_residency = 560,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
.desc = "MWAIT 0x60",
@@ -283,7 +283,7 @@ static struct cpuidle_state cht_cstates[] = {
.exit_latency = 1200,
.target_residency = 4000,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C7S",
.desc = "MWAIT 0x64",
@@ -291,7 +291,7 @@ static struct cpuidle_state cht_cstates[] = {
.exit_latency = 10000,
.target_residency = 20000,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -304,7 +304,7 @@ static struct cpuidle_state ivb_cstates[] = {
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@@ -312,7 +312,7 @@ static struct cpuidle_state ivb_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@@ -320,7 +320,7 @@ static struct cpuidle_state ivb_cstates[] = {
.exit_latency = 59,
.target_residency = 156,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@@ -328,7 +328,7 @@ static struct cpuidle_state ivb_cstates[] = {
.exit_latency = 80,
.target_residency = 300,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
.desc = "MWAIT 0x30",
@@ -336,7 +336,7 @@ static struct cpuidle_state ivb_cstates[] = {
.exit_latency = 87,
.target_residency = 300,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -349,7 +349,7 @@ static struct cpuidle_state ivt_cstates[] = {
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@@ -357,7 +357,7 @@ static struct cpuidle_state ivt_cstates[] = {
.exit_latency = 10,
.target_residency = 80,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@@ -365,7 +365,7 @@ static struct cpuidle_state ivt_cstates[] = {
.exit_latency = 59,
.target_residency = 156,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@@ -373,7 +373,7 @@ static struct cpuidle_state ivt_cstates[] = {
.exit_latency = 82,
.target_residency = 300,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -386,7 +386,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@@ -394,7 +394,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
.exit_latency = 10,
.target_residency = 250,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@@ -402,7 +402,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
.exit_latency = 59,
.target_residency = 300,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@@ -410,7 +410,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
.exit_latency = 84,
.target_residency = 400,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -423,7 +423,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@@ -431,7 +431,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
.exit_latency = 10,
.target_residency = 500,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@@ -439,7 +439,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
.exit_latency = 59,
.target_residency = 600,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@@ -447,7 +447,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
.exit_latency = 88,
.target_residency = 700,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -460,7 +460,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@@ -468,7 +468,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@@ -476,7 +476,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 33,
.target_residency = 100,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@@ -484,7 +484,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 133,
.target_residency = 400,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
.desc = "MWAIT 0x32",
@@ -492,7 +492,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 166,
.target_residency = 500,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
.desc = "MWAIT 0x40",
@@ -500,7 +500,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 300,
.target_residency = 900,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
.desc = "MWAIT 0x50",
@@ -508,7 +508,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 600,
.target_residency = 1800,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
.desc = "MWAIT 0x60",
@@ -516,7 +516,7 @@ static struct cpuidle_state hsw_cstates[] = {
.exit_latency = 2600,
.target_residency = 7700,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -528,7 +528,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@@ -536,7 +536,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@@ -544,7 +544,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 40,
.target_residency = 100,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@@ -552,7 +552,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 133,
.target_residency = 400,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
.desc = "MWAIT 0x32",
@@ -560,7 +560,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 166,
.target_residency = 500,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
.desc = "MWAIT 0x40",
@@ -568,7 +568,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 300,
.target_residency = 900,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
.desc = "MWAIT 0x50",
@@ -576,7 +576,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 600,
.target_residency = 1800,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
.desc = "MWAIT 0x60",
@@ -584,7 +584,7 @@ static struct cpuidle_state bdw_cstates[] = {
.exit_latency = 2600,
.target_residency = 7700,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -597,7 +597,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@@ -605,7 +605,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
.desc = "MWAIT 0x10",
@@ -613,7 +613,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 70,
.target_residency = 100,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@@ -621,7 +621,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 85,
.target_residency = 200,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
.desc = "MWAIT 0x33",
@@ -629,7 +629,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 124,
.target_residency = 800,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
.desc = "MWAIT 0x40",
@@ -637,7 +637,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 200,
.target_residency = 800,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
.desc = "MWAIT 0x50",
@@ -645,7 +645,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 480,
.target_residency = 5000,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
.desc = "MWAIT 0x60",
@@ -653,7 +653,7 @@ static struct cpuidle_state skl_cstates[] = {
.exit_latency = 890,
.target_residency = 5000,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -666,7 +666,7 @@ static struct cpuidle_state skx_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@@ -674,7 +674,7 @@ static struct cpuidle_state skx_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@@ -682,7 +682,7 @@ static struct cpuidle_state skx_cstates[] = {
.exit_latency = 133,
.target_residency = 600,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -695,7 +695,7 @@ static struct cpuidle_state atom_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C2",
.desc = "MWAIT 0x10",
@@ -703,7 +703,7 @@ static struct cpuidle_state atom_cstates[] = {
.exit_latency = 20,
.target_residency = 80,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C4",
.desc = "MWAIT 0x30",
@@ -711,7 +711,7 @@ static struct cpuidle_state atom_cstates[] = {
.exit_latency = 100,
.target_residency = 400,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x52",
@@ -719,7 +719,7 @@ static struct cpuidle_state atom_cstates[] = {
.exit_latency = 140,
.target_residency = 560,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -731,7 +731,7 @@ static struct cpuidle_state tangier_cstates[] = {
.exit_latency = 1,
.target_residency = 4,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C4",
.desc = "MWAIT 0x30",
@@ -739,7 +739,7 @@ static struct cpuidle_state tangier_cstates[] = {
.exit_latency = 100,
.target_residency = 400,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x52",
@@ -747,7 +747,7 @@ static struct cpuidle_state tangier_cstates[] = {
.exit_latency = 140,
.target_residency = 560,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
.desc = "MWAIT 0x60",
@@ -755,7 +755,7 @@ static struct cpuidle_state tangier_cstates[] = {
.exit_latency = 1200,
.target_residency = 4000,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
.desc = "MWAIT 0x64",
@@ -763,7 +763,7 @@ static struct cpuidle_state tangier_cstates[] = {
.exit_latency = 10000,
.target_residency = 20000,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -775,7 +775,7 @@ static struct cpuidle_state avn_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x51",
@@ -783,7 +783,7 @@ static struct cpuidle_state avn_cstates[] = {
.exit_latency = 15,
.target_residency = 45,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -795,7 +795,7 @@ static struct cpuidle_state knl_cstates[] = {
.exit_latency = 1,
.target_residency = 2,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze },
+ .enter_s2idle = intel_idle_s2idle },
{
.name = "C6",
.desc = "MWAIT 0x10",
@@ -803,7 +803,7 @@ static struct cpuidle_state knl_cstates[] = {
.exit_latency = 120,
.target_residency = 500,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze },
+ .enter_s2idle = intel_idle_s2idle },
{
.enter = NULL }
};
@@ -816,7 +816,7 @@ static struct cpuidle_state bxt_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@@ -824,7 +824,7 @@ static struct cpuidle_state bxt_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@@ -832,7 +832,7 @@ static struct cpuidle_state bxt_cstates[] = {
.exit_latency = 133,
.target_residency = 133,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
.desc = "MWAIT 0x31",
@@ -840,7 +840,7 @@ static struct cpuidle_state bxt_cstates[] = {
.exit_latency = 155,
.target_residency = 155,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
.desc = "MWAIT 0x40",
@@ -848,7 +848,7 @@ static struct cpuidle_state bxt_cstates[] = {
.exit_latency = 1000,
.target_residency = 1000,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
.desc = "MWAIT 0x50",
@@ -856,7 +856,7 @@ static struct cpuidle_state bxt_cstates[] = {
.exit_latency = 2000,
.target_residency = 2000,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
.desc = "MWAIT 0x60",
@@ -864,7 +864,7 @@ static struct cpuidle_state bxt_cstates[] = {
.exit_latency = 10000,
.target_residency = 10000,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -877,7 +877,7 @@ static struct cpuidle_state dnv_cstates[] = {
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
.desc = "MWAIT 0x01",
@@ -885,7 +885,7 @@ static struct cpuidle_state dnv_cstates[] = {
.exit_latency = 10,
.target_residency = 20,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
.desc = "MWAIT 0x20",
@@ -893,7 +893,7 @@ static struct cpuidle_state dnv_cstates[] = {
.exit_latency = 50,
.target_residency = 500,
.enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
+ .enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
};
@@ -913,16 +913,15 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
struct cpuidle_state *state = &drv->states[index];
unsigned long eax = flg2MWAIT(state->flags);
unsigned int cstate;
- int cpu = smp_processor_id();
cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
/*
- * leave_mm() to avoid costly and often unnecessary wakeups
- * for flushing the user TLB's associated with the active mm.
+ * NB: if CPUIDLE_FLAG_TLB_FLUSHED is set, this idle transition
+ * will probably flush the TLB. It's not guaranteed to flush
+ * the TLB, though, so it's not clear that we can do anything
+ * useful with this knowledge.
*/
- if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
- leave_mm(cpu);
if (!(lapic_timer_reliable_states & (1 << (cstate))))
tick_broadcast_enter();
@@ -936,12 +935,12 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
}
/**
- * intel_idle_freeze - simplified "enter" callback routine for suspend-to-idle
+ * intel_idle_s2idle - simplified "enter" callback routine for suspend-to-idle
* @dev: cpuidle_device
* @drv: cpuidle driver
* @index: state index
*/
-static void intel_idle_freeze(struct cpuidle_device *dev,
+static void intel_idle_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
unsigned long ecx = 1; /* break on interrupt flag */
@@ -1331,13 +1330,14 @@ static void __init intel_idle_cpuidle_driver_init(void)
intel_idle_state_table_update();
+ cpuidle_poll_state_init(drv);
drv->state_count = 1;
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
int num_substates, mwait_hint, mwait_cstate;
if ((cpuidle_state_table[cstate].enter == NULL) &&
- (cpuidle_state_table[cstate].enter_freeze == NULL))
+ (cpuidle_state_table[cstate].enter_s2idle == NULL))
break;
if (cstate + 1 > max_cstate) {
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index efc67739c28f..3dec972ca672 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -842,7 +842,7 @@ static SIMPLE_DEV_PM_OPS(bma180_pm_ops, bma180_suspend, bma180_resume);
#define BMA180_PM_OPS NULL
#endif
-static struct i2c_device_id bma180_ids[] = {
+static const struct i2c_device_id bma180_ids[] = {
{ "bma180", BMA180 },
{ "bma250", BMA250 },
{ }
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 6b5d3be283c4..807299dd45eb 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -193,7 +193,6 @@ struct bmc150_accel_data {
struct regmap *regmap;
int irq;
struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
- atomic_t active_intr;
struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
struct mutex mutex;
u8 fifo_mode, watermark;
@@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
goto out_fix_power_state;
}
- if (state)
- atomic_inc(&data->active_intr);
- else
- atomic_dec(&data->active_intr);
-
return 0;
out_fix_power_state:
@@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev)
struct bmc150_accel_data *data = iio_priv(indio_dev);
mutex_lock(&data->mutex);
- if (atomic_read(&data->active_intr))
- bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+ bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
bmc150_accel_fifo_set_mode(data);
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
index 8ca8041267ef..f85014fbaa12 100644
--- a/drivers/iio/accel/bmc150-accel-i2c.c
+++ b/drivers/iio/accel/bmc150-accel-i2c.c
@@ -64,6 +64,7 @@ static const struct acpi_device_id bmc150_accel_acpi_match[] = {
{"BMA250E", bma250e},
{"BMA222E", bma222e},
{"BMA0280", bma280},
+ {"BOSC0200"},
{ },
};
MODULE_DEVICE_TABLE(acpi, bmc150_accel_acpi_match);
diff --git a/drivers/iio/accel/da311.c b/drivers/iio/accel/da311.c
index 537cfa8b6edf..c0c1620d2a2f 100644
--- a/drivers/iio/accel/da311.c
+++ b/drivers/iio/accel/da311.c
@@ -139,7 +139,7 @@ static int da311_register_mask_write(struct i2c_client *client, u16 addr,
/* Init sequence taken from the android driver */
static int da311_reset(struct i2c_client *client)
{
- const struct {
+ static const struct {
u16 addr;
u8 mask;
u8 data;
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index cb1d83fa19a0..39ab210c44f6 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -36,7 +36,7 @@
#define SCA3000_LOCKED BIT(5)
#define SCA3000_EEPROM_CS_ERROR BIT(1)
#define SCA3000_SPI_FRAME_ERROR BIT(0)
-
+
/* All reads done using register decrement so no need to directly access LSBs */
#define SCA3000_REG_X_MSB_ADDR 0x05
#define SCA3000_REG_Y_MSB_ADDR 0x07
@@ -74,7 +74,7 @@
#define SCA3000_REG_INT_STATUS_ADDR 0x16
#define SCA3000_REG_INT_STATUS_THREE_QUARTERS BIT(7)
#define SCA3000_REG_INT_STATUS_HALF BIT(6)
-
+
#define SCA3000_INT_STATUS_FREE_FALL BIT(3)
#define SCA3000_INT_STATUS_Y_TRIGGER BIT(2)
#define SCA3000_INT_STATUS_X_TRIGGER BIT(1)
@@ -124,7 +124,7 @@
#define SCA3000_REG_INT_MASK_ADDR 0x21
#define SCA3000_REG_INT_MASK_PROT_MASK 0x1C
-
+
#define SCA3000_REG_INT_MASK_RING_THREE_QUARTER BIT(7)
#define SCA3000_REG_INT_MASK_RING_HALF BIT(6)
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 3ad44ce7ae82..0fe521609a3a 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -29,10 +29,13 @@ enum st_accel_type {
LIS2DH12,
LIS3L02DQ,
LNG2DM,
+ H3LIS331DL,
+ LIS331DL,
+ LIS3LV02DL,
ST_ACCEL_MAX,
};
-#define H3LIS331DL_DRIVER_NAME "h3lis331dl_accel"
+#define H3LIS331DL_ACCEL_DEV_NAME "h3lis331dl_accel"
#define LIS3LV02DL_ACCEL_DEV_NAME "lis3lv02dl_accel"
#define LSM303DLHC_ACCEL_DEV_NAME "lsm303dlhc_accel"
#define LIS3DH_ACCEL_DEV_NAME "lis3dh"
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 07d1489cd457..752856b3a849 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -161,11 +161,15 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.drdy_irq = {
.addr = 0x22,
.mask_int1 = 0x10,
- .mask_int2 = 0x08,
+ .mask_int2 = 0x00,
.addr_ihl = 0x25,
.mask_ihl = 0x02,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
@@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
@@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.en_mask = 0x08,
},
},
+ .sim = {
+ .addr = 0x24,
+ .value = BIT(0),
+ },
.multi_read_bit = false,
.bootime = 2,
},
@@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int1 = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(1),
+ },
.multi_read_bit = true,
.bootime = 2, /* guess */
},
@@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(7),
+ },
.multi_read_bit = false,
.bootime = 2, /* guess */
},
@@ -444,7 +464,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.wai = 0x32,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
- [0] = H3LIS331DL_DRIVER_NAME,
+ [0] = H3LIS331DL_ACCEL_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
.odr = {
@@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.addr_ihl = 0x22,
.mask_ihl = 0x80,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
@@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int1 = 0x04,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x21,
+ .value = BIT(1),
+ },
.multi_read_bit = false,
.bootime = 2,
},
@@ -609,11 +637,15 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.drdy_irq = {
.addr = 0x22,
.mask_int1 = 0x10,
- .mask_int2 = 0x08,
+ .mask_int2 = 0x00,
.addr_ihl = 0x25,
.mask_ihl = 0x02,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
.multi_read_bit = true,
.bootime = 2,
},
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 543f0ad7fd7e..18cafb9f2468 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -84,7 +84,7 @@ static const struct of_device_id st_accel_of_match[] = {
},
{
.compatible = "st,h3lis331dl-accel",
- .data = H3LIS331DL_DRIVER_NAME,
+ .data = H3LIS331DL_ACCEL_DEV_NAME,
},
{
.compatible = "st,lis3l02dq",
@@ -126,6 +126,9 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LIS2DH12_ACCEL_DEV_NAME, LIS2DH12 },
{ LIS3L02DQ_ACCEL_DEV_NAME, LIS3L02DQ },
{ LNG2DM_ACCEL_DEV_NAME, LNG2DM },
+ { H3LIS331DL_ACCEL_DEV_NAME, H3LIS331DL },
+ { LIS331DL_ACCEL_DEV_NAME, LIS331DL },
+ { LIS3LV02DL_ACCEL_DEV_NAME, LIS3LV02DL },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
@@ -144,7 +147,8 @@ static int st_accel_i2c_probe(struct i2c_client *client,
adata = iio_priv(indio_dev);
if (client->dev.of_node) {
- st_sensors_of_i2c_probe(client, st_accel_of_match);
+ st_sensors_of_name_probe(&client->dev, st_accel_of_match,
+ client->name, sizeof(client->name));
} else if (ACPI_HANDLE(&client->dev)) {
ret = st_sensors_match_acpi_device(&client->dev);
if ((ret < 0) || (ret >= ST_ACCEL_MAX))
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 1a867f5563a4..915fa49085f7 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -18,6 +18,77 @@
#include <linux/iio/common/st_sensors_spi.h>
#include "st_accel.h"
+#ifdef CONFIG_OF
+/*
+ * For new single-chip sensors use <device_name> as compatible string.
+ * For old single-chip devices keep <device_name>-accel to maintain
+ * compatibility
+ */
+static const struct of_device_id st_accel_of_match[] = {
+ {
+ /* An older compatible */
+ .compatible = "st,lis302dl-spi",
+ .data = LIS3LV02DL_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lis3lv02dl-accel",
+ .data = LIS3LV02DL_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lis3dh-accel",
+ .data = LIS3DH_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330d-accel",
+ .data = LSM330D_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330dl-accel",
+ .data = LSM330DL_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330dlc-accel",
+ .data = LSM330DLC_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lis331dlh-accel",
+ .data = LIS331DLH_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330-accel",
+ .data = LSM330_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm303agr-accel",
+ .data = LSM303AGR_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lis2dh12-accel",
+ .data = LIS2DH12_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lis3l02dq",
+ .data = LIS3L02DQ_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lng2dm-accel",
+ .data = LNG2DM_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,h3lis331dl-accel",
+ .data = H3LIS331DL_ACCEL_DEV_NAME,
+ },
+ {
+ .compatible = "st,lis331dl-accel",
+ .data = LIS331DL_ACCEL_DEV_NAME,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, st_accel_of_match);
+#else
+#define st_accel_of_match NULL
+#endif
+
static int st_accel_spi_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -30,6 +101,8 @@ static int st_accel_spi_probe(struct spi_device *spi)
adata = iio_priv(indio_dev);
+ st_sensors_of_name_probe(&spi->dev, st_accel_of_match,
+ spi->modalias, sizeof(spi->modalias));
st_sensors_spi_configure(indio_dev, spi, adata);
err = st_accel_common_probe(indio_dev);
@@ -57,22 +130,17 @@ static const struct spi_device_id st_accel_id_table[] = {
{ LIS2DH12_ACCEL_DEV_NAME },
{ LIS3L02DQ_ACCEL_DEV_NAME },
{ LNG2DM_ACCEL_DEV_NAME },
+ { H3LIS331DL_ACCEL_DEV_NAME },
+ { LIS331DL_ACCEL_DEV_NAME },
+ { LIS3LV02DL_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_accel_id_table);
-#ifdef CONFIG_OF
-static const struct of_device_id lis302dl_spi_dt_ids[] = {
- { .compatible = "st,lis302dl-spi" },
- {}
-};
-MODULE_DEVICE_TABLE(of, lis302dl_spi_dt_ids);
-#endif
-
static struct spi_driver st_accel_driver = {
.driver = {
.name = "st-accel-spi",
- .of_match_table = of_match_ptr(lis302dl_spi_dt_ids),
+ .of_match_table = of_match_ptr(st_accel_of_match),
},
.probe = st_accel_spi_probe,
.remove = st_accel_spi_remove,
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 614fa41559b1..57625653fcb6 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -158,6 +158,7 @@ config AT91_SAMA5D2_ADC
tristate "Atmel AT91 SAMA5D2 ADC"
depends on ARCH_AT91 || COMPILE_TEST
depends on HAS_IOMEM
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Atmel SAMA5D2 ADC which is
available on SAMA5D2 SoC family.
@@ -239,6 +240,15 @@ config DA9150_GPADC
To compile this driver as a module, choose M here: the module will be
called berlin2-adc.
+config DLN2_ADC
+ tristate "Diolan DLN-2 ADC driver support"
+ depends on MFD_DLN2
+ help
+ Say yes here to build support for Diolan DLN-2 ADC.
+
+ This driver can also be built as a module. If so, the module will be
+ called adc_dln2.
+
config ENVELOPE_DETECTOR
tristate "Envelope detector using a DAC and a comparator"
depends on OF
@@ -249,6 +259,17 @@ config ENVELOPE_DETECTOR
To compile this driver as a module, choose M here: the module will be
called envelope-detector.
+config EP93XX_ADC
+ tristate "Cirrus Logic EP93XX ADC driver"
+ depends on ARCH_EP93XX
+ help
+ Driver for the ADC module on the EP93XX series of SoC from Cirrus Logic.
+ It's recommended to switch on CONFIG_HIGH_RES_TIMERS option, in this
+ case driver will reduce its CPU usage by 90% in some use cases.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ep93xx_adc.
+
config EXYNOS_ADC
tristate "Exynos ADC driver support"
depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST)
@@ -322,7 +343,7 @@ config INA2XX_ADC
This driver is mutually exclusive with the HWMON version.
config IMX7D_ADC
- tristate "IMX7D ADC driver"
+ tristate "Freescale IMX7D ADC driver"
depends on ARCH_MXC || COMPILE_TEST
depends on HAS_IOMEM
help
@@ -362,6 +383,16 @@ config LPC32XX_ADC
activate only one via device tree selection. Provides direct access
via sysfs.
+config LTC2471
+ tristate "Linear Technology LTC2471 and LTC2473 ADC driver"
+ depends on I2C
+ help
+ Say yes here to build support for Linear Technology LTC2471 and
+ LTC2473 16-bit I2C ADC.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc2471.
+
config LTC2485
tristate "Linear Technology LTC2485 ADC driver"
depends on I2C
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index b546736a5541..9874e05f52d7 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -24,7 +24,9 @@ obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o
obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o
obj-$(CONFIG_CPCAP_ADC) += cpcap-adc.o
obj-$(CONFIG_DA9150_GPADC) += da9150-gpadc.o
+obj-$(CONFIG_DLN2_ADC) += dln2-adc.o
obj-$(CONFIG_ENVELOPE_DETECTOR) += envelope-detector.o
+obj-$(CONFIG_EP93XX_ADC) += ep93xx_adc.o
obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
obj-$(CONFIG_FSL_MX25_ADC) += fsl-imx25-gcq.o
obj-$(CONFIG_HI8435) += hi8435.o
@@ -34,6 +36,7 @@ obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o
obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
+obj-$(CONFIG_LTC2471) += ltc2471.o
obj-$(CONFIG_LTC2485) += ltc2485.o
obj-$(CONFIG_LTC2497) += ltc2497.o
obj-$(CONFIG_MAX1027) += max1027.o
diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c
index 75cca42b6e70..ce45037295d8 100644
--- a/drivers/iio/adc/ad7766.c
+++ b/drivers/iio/adc/ad7766.c
@@ -103,8 +103,7 @@ static int ad7766_preenable(struct iio_dev *indio_dev)
return ret;
}
- if (ad7766->pd_gpio)
- gpiod_set_value(ad7766->pd_gpio, 0);
+ gpiod_set_value(ad7766->pd_gpio, 0);
return 0;
}
@@ -113,8 +112,7 @@ static int ad7766_postdisable(struct iio_dev *indio_dev)
{
struct ad7766 *ad7766 = iio_priv(indio_dev);
- if (ad7766->pd_gpio)
- gpiod_set_value(ad7766->pd_gpio, 1);
+ gpiod_set_value(ad7766->pd_gpio, 1);
/*
* The PD pin is synchronous to the clock, so give it some time to
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index e0ea411a0b2d..c02b23d675cb 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -22,6 +22,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/driver.h>
+#include <linux/iopoll.h>
#define ASPEED_RESOLUTION_BITS 10
#define ASPEED_CLOCKS_PER_SAMPLE 12
@@ -38,11 +39,17 @@
#define ASPEED_ENGINE_ENABLE BIT(0)
+#define ASPEED_ADC_CTRL_INIT_RDY BIT(8)
+
+#define ASPEED_ADC_INIT_POLLING_TIME 500
+#define ASPEED_ADC_INIT_TIMEOUT 500000
+
struct aspeed_adc_model_data {
const char *model_name;
unsigned int min_sampling_rate; // Hz
unsigned int max_sampling_rate; // Hz
unsigned int vref_voltage; // mV
+ bool wait_init_sequence;
};
struct aspeed_adc_data {
@@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev)
goto scaler_error;
}
+ model_data = of_device_get_match_data(&pdev->dev);
+
+ if (model_data->wait_init_sequence) {
+ /* Enable engine in normal mode. */
+ writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE,
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+
+ /* Wait for initial sequence complete. */
+ ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL,
+ adc_engine_control_reg_val,
+ adc_engine_control_reg_val &
+ ASPEED_ADC_CTRL_INIT_RDY,
+ ASPEED_ADC_INIT_POLLING_TIME,
+ ASPEED_ADC_INIT_TIMEOUT);
+ if (ret)
+ goto scaler_error;
+ }
+
/* Start all channels in normal mode. */
ret = clk_prepare_enable(data->clk_scaler->clk);
if (ret)
@@ -274,6 +299,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = {
.vref_voltage = 1800, // mV
.min_sampling_rate = 1,
.max_sampling_rate = 1000000,
+ .wait_init_sequence = true,
};
static const struct of_device_id aspeed_adc_matches[] = {
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index e10dca3ed74b..bc5b38e3a147 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -25,6 +25,11 @@
#include <linux/wait.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/regulator/consumer.h>
/* Control Register */
@@ -132,6 +137,17 @@
#define AT91_SAMA5D2_PRESSR 0xbc
/* Trigger Register */
#define AT91_SAMA5D2_TRGR 0xc0
+/* Mask for TRGMOD field of TRGR register */
+#define AT91_SAMA5D2_TRGR_TRGMOD_MASK GENMASK(2, 0)
+/* No trigger, only software trigger can start conversions */
+#define AT91_SAMA5D2_TRGR_TRGMOD_NO_TRIGGER 0
+/* Trigger Mode external trigger rising edge */
+#define AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_RISE 1
+/* Trigger Mode external trigger falling edge */
+#define AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL 2
+/* Trigger Mode external trigger any edge */
+#define AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY 3
+
/* Correction Select Register */
#define AT91_SAMA5D2_COSR 0xd0
/* Correction Value Register */
@@ -145,14 +161,29 @@
/* Version Register */
#define AT91_SAMA5D2_VERSION 0xfc
+#define AT91_SAMA5D2_HW_TRIG_CNT 3
+#define AT91_SAMA5D2_SINGLE_CHAN_CNT 12
+#define AT91_SAMA5D2_DIFF_CHAN_CNT 6
+
+/*
+ * Maximum number of bytes to hold conversion from all channels
+ * plus the timestamp
+ */
+#define AT91_BUFFER_MAX_BYTES ((AT91_SAMA5D2_SINGLE_CHAN_CNT + \
+ AT91_SAMA5D2_DIFF_CHAN_CNT) * 2 + 8)
+
+#define AT91_BUFFER_MAX_HWORDS (AT91_BUFFER_MAX_BYTES / 2)
+
#define AT91_SAMA5D2_CHAN_SINGLE(num, addr) \
{ \
.type = IIO_VOLTAGE, \
.channel = num, \
.address = addr, \
+ .scan_index = num, \
.scan_type = { \
.sign = 'u', \
.realbits = 12, \
+ .storagebits = 16, \
}, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
@@ -168,9 +199,11 @@
.channel = num, \
.channel2 = num2, \
.address = addr, \
+ .scan_index = num + AT91_SAMA5D2_SINGLE_CHAN_CNT, \
.scan_type = { \
.sign = 's', \
.realbits = 12, \
+ .storagebits = 16, \
}, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
@@ -188,6 +221,12 @@ struct at91_adc_soc_info {
unsigned max_sample_rate;
};
+struct at91_adc_trigger {
+ char *name;
+ unsigned int trgmod_value;
+ unsigned int edge_type;
+};
+
struct at91_adc_state {
void __iomem *base;
int irq;
@@ -195,11 +234,14 @@ struct at91_adc_state {
struct regulator *reg;
struct regulator *vref;
int vref_uv;
+ struct iio_trigger *trig;
+ const struct at91_adc_trigger *selected_trig;
const struct iio_chan_spec *chan;
bool conversion_done;
u32 conversion_value;
struct at91_adc_soc_info soc_info;
wait_queue_head_t wq_data_available;
+ u16 buffer[AT91_BUFFER_MAX_HWORDS];
/*
* lock to prevent concurrent 'single conversion' requests through
* sysfs.
@@ -207,6 +249,24 @@ struct at91_adc_state {
struct mutex lock;
};
+static const struct at91_adc_trigger at91_adc_trigger_list[] = {
+ {
+ .name = "external_rising",
+ .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_RISE,
+ .edge_type = IRQ_TYPE_EDGE_RISING,
+ },
+ {
+ .name = "external_falling",
+ .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL,
+ .edge_type = IRQ_TYPE_EDGE_FALLING,
+ },
+ {
+ .name = "external_any",
+ .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY,
+ .edge_type = IRQ_TYPE_EDGE_BOTH,
+ },
+};
+
static const struct iio_chan_spec at91_adc_channels[] = {
AT91_SAMA5D2_CHAN_SINGLE(0, 0x50),
AT91_SAMA5D2_CHAN_SINGLE(1, 0x54),
@@ -226,12 +286,132 @@ static const struct iio_chan_spec at91_adc_channels[] = {
AT91_SAMA5D2_CHAN_DIFF(6, 7, 0x68),
AT91_SAMA5D2_CHAN_DIFF(8, 9, 0x70),
AT91_SAMA5D2_CHAN_DIFF(10, 11, 0x78),
+ IIO_CHAN_SOFT_TIMESTAMP(AT91_SAMA5D2_SINGLE_CHAN_CNT
+ + AT91_SAMA5D2_DIFF_CHAN_CNT + 1),
+};
+
+static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio = iio_trigger_get_drvdata(trig);
+ struct at91_adc_state *st = iio_priv(indio);
+ u32 status = at91_adc_readl(st, AT91_SAMA5D2_TRGR);
+ u8 bit;
+
+ /* clear TRGMOD */
+ status &= ~AT91_SAMA5D2_TRGR_TRGMOD_MASK;
+
+ if (state)
+ status |= st->selected_trig->trgmod_value;
+
+ /* set/unset hw trigger */
+ at91_adc_writel(st, AT91_SAMA5D2_TRGR, status);
+
+ for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
+ struct iio_chan_spec const *chan = indio->channels + bit;
+
+ if (state) {
+ at91_adc_writel(st, AT91_SAMA5D2_CHER,
+ BIT(chan->channel));
+ at91_adc_writel(st, AT91_SAMA5D2_IER,
+ BIT(chan->channel));
+ } else {
+ at91_adc_writel(st, AT91_SAMA5D2_IDR,
+ BIT(chan->channel));
+ at91_adc_writel(st, AT91_SAMA5D2_CHDR,
+ BIT(chan->channel));
+ }
+ }
+
+ return 0;
+}
+
+static int at91_adc_reenable_trigger(struct iio_trigger *trig)
+{
+ struct iio_dev *indio = iio_trigger_get_drvdata(trig);
+ struct at91_adc_state *st = iio_priv(indio);
+
+ enable_irq(st->irq);
+
+ /* Needed to ACK the DRDY interruption */
+ at91_adc_readl(st, AT91_SAMA5D2_LCDR);
+ return 0;
+}
+
+static const struct iio_trigger_ops at91_adc_trigger_ops = {
+ .owner = THIS_MODULE,
+ .set_trigger_state = &at91_adc_configure_trigger,
+ .try_reenable = &at91_adc_reenable_trigger,
};
+static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
+ char *trigger_name)
+{
+ struct iio_trigger *trig;
+ int ret;
+
+ trig = devm_iio_trigger_alloc(&indio->dev, "%s-dev%d-%s", indio->name,
+ indio->id, trigger_name);
+ if (!trig)
+ return NULL;
+
+ trig->dev.parent = indio->dev.parent;
+ iio_trigger_set_drvdata(trig, indio);
+ trig->ops = &at91_adc_trigger_ops;
+
+ ret = devm_iio_trigger_register(&indio->dev, trig);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return trig;
+}
+
+static int at91_adc_trigger_init(struct iio_dev *indio)
+{
+ struct at91_adc_state *st = iio_priv(indio);
+
+ st->trig = at91_adc_allocate_trigger(indio, st->selected_trig->name);
+ if (IS_ERR(st->trig)) {
+ dev_err(&indio->dev,
+ "could not allocate trigger\n");
+ return PTR_ERR(st->trig);
+ }
+
+ return 0;
+}
+
+static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio = pf->indio_dev;
+ struct at91_adc_state *st = iio_priv(indio);
+ int i = 0;
+ u8 bit;
+
+ for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
+ struct iio_chan_spec const *chan = indio->channels + bit;
+
+ st->buffer[i] = at91_adc_readl(st, chan->address);
+ i++;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio, st->buffer, pf->timestamp);
+
+ iio_trigger_notify_done(indio->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int at91_adc_buffer_init(struct iio_dev *indio)
+{
+ return devm_iio_triggered_buffer_setup(&indio->dev, indio,
+ &iio_pollfunc_store_time,
+ &at91_adc_trigger_handler, NULL);
+}
+
static unsigned at91_adc_startup_time(unsigned startup_time_min,
unsigned adc_clk_khz)
{
- const unsigned startup_lookup[] = {
+ static const unsigned int startup_lookup[] = {
0, 8, 16, 24,
64, 80, 96, 112,
512, 576, 640, 704,
@@ -293,14 +473,18 @@ static irqreturn_t at91_adc_interrupt(int irq, void *private)
u32 status = at91_adc_readl(st, AT91_SAMA5D2_ISR);
u32 imr = at91_adc_readl(st, AT91_SAMA5D2_IMR);
- if (status & imr) {
+ if (!(status & imr))
+ return IRQ_NONE;
+
+ if (iio_buffer_enabled(indio)) {
+ disable_irq_nosync(irq);
+ iio_trigger_poll(indio->trig);
+ } else {
st->conversion_value = at91_adc_readl(st, st->chan->address);
st->conversion_done = true;
wake_up_interruptible(&st->wq_data_available);
- return IRQ_HANDLED;
}
-
- return IRQ_NONE;
+ return IRQ_HANDLED;
}
static int at91_adc_read_raw(struct iio_dev *indio_dev,
@@ -313,6 +497,11 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ /* we cannot use software trigger if hw trigger enabled */
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
mutex_lock(&st->lock);
st->chan = chan;
@@ -344,6 +533,8 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
mutex_unlock(&st->lock);
+
+ iio_device_release_direct_mode(indio_dev);
return ret;
case IIO_CHAN_INFO_SCALE:
@@ -386,12 +577,27 @@ static const struct iio_info at91_adc_info = {
.driver_module = THIS_MODULE,
};
+static void at91_adc_hw_init(struct at91_adc_state *st)
+{
+ at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST);
+ at91_adc_writel(st, AT91_SAMA5D2_IDR, 0xffffffff);
+ /*
+ * Transfer field must be set to 2 according to the datasheet and
+ * allows different analog settings for each channel.
+ */
+ at91_adc_writel(st, AT91_SAMA5D2_MR,
+ AT91_SAMA5D2_MR_TRANSFER(2) | AT91_SAMA5D2_MR_ANACH);
+
+ at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate);
+}
+
static int at91_adc_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
struct at91_adc_state *st;
struct resource *res;
- int ret;
+ int ret, i;
+ u32 edge_type;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
if (!indio_dev)
@@ -432,6 +638,27 @@ static int at91_adc_probe(struct platform_device *pdev)
return ret;
}
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "atmel,trigger-edge-type", &edge_type);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "invalid or missing value for atmel,trigger-edge-type\n");
+ return ret;
+ }
+
+ st->selected_trig = NULL;
+
+ for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT; i++)
+ if (at91_adc_trigger_list[i].edge_type == edge_type) {
+ st->selected_trig = &at91_adc_trigger_list[i];
+ break;
+ }
+
+ if (!st->selected_trig) {
+ dev_err(&pdev->dev, "invalid external trigger edge value\n");
+ return -EINVAL;
+ }
+
init_waitqueue_head(&st->wq_data_available);
mutex_init(&st->lock);
@@ -482,16 +709,7 @@ static int at91_adc_probe(struct platform_device *pdev)
goto vref_disable;
}
- at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST);
- at91_adc_writel(st, AT91_SAMA5D2_IDR, 0xffffffff);
- /*
- * Transfer field must be set to 2 according to the datasheet and
- * allows different analog settings for each channel.
- */
- at91_adc_writel(st, AT91_SAMA5D2_MR,
- AT91_SAMA5D2_MR_TRANSFER(2) | AT91_SAMA5D2_MR_ANACH);
-
- at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate);
+ at91_adc_hw_init(st);
ret = clk_prepare_enable(st->per_clk);
if (ret)
@@ -499,10 +717,25 @@ static int at91_adc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
+ ret = at91_adc_buffer_init(indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "couldn't initialize the buffer.\n");
+ goto per_clk_disable_unprepare;
+ }
+
+ ret = at91_adc_trigger_init(indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "couldn't setup the triggers.\n");
+ goto per_clk_disable_unprepare;
+ }
+
ret = iio_device_register(indio_dev);
if (ret < 0)
goto per_clk_disable_unprepare;
+ dev_info(&pdev->dev, "setting up trigger as %s\n",
+ st->selected_trig->name);
+
dev_info(&pdev->dev, "version: %x\n",
readl_relaxed(st->base + AT91_SAMA5D2_VERSION));
@@ -532,6 +765,69 @@ static int at91_adc_remove(struct platform_device *pdev)
return 0;
}
+static __maybe_unused int at91_adc_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev =
+ platform_get_drvdata(to_platform_device(dev));
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ /*
+ * Do a sofware reset of the ADC before we go to suspend.
+ * this will ensure that all pins are free from being muxed by the ADC
+ * and can be used by for other devices.
+ * Otherwise, ADC will hog them and we can't go to suspend mode.
+ */
+ at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST);
+
+ clk_disable_unprepare(st->per_clk);
+ regulator_disable(st->vref);
+ regulator_disable(st->reg);
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static __maybe_unused int at91_adc_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev =
+ platform_get_drvdata(to_platform_device(dev));
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ goto resume_failed;
+
+ ret = regulator_enable(st->reg);
+ if (ret)
+ goto resume_failed;
+
+ ret = regulator_enable(st->vref);
+ if (ret)
+ goto reg_disable_resume;
+
+ ret = clk_prepare_enable(st->per_clk);
+ if (ret)
+ goto vref_disable_resume;
+
+ at91_adc_hw_init(st);
+
+ /* reconfiguring trigger hardware state */
+ if (iio_buffer_enabled(indio_dev))
+ at91_adc_configure_trigger(st->trig, true);
+
+ return 0;
+
+vref_disable_resume:
+ regulator_disable(st->vref);
+reg_disable_resume:
+ regulator_disable(st->reg);
+resume_failed:
+ dev_err(&indio_dev->dev, "failed to resume\n");
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(at91_adc_pm_ops, at91_adc_suspend, at91_adc_resume);
+
static const struct of_device_id at91_adc_dt_match[] = {
{
.compatible = "atmel,sama5d2-adc",
@@ -547,6 +843,7 @@ static struct platform_driver at91_adc_driver = {
.driver = {
.name = "at91-sama5d2_adc",
.of_match_table = at91_adc_dt_match,
+ .pm = &at91_adc_pm_ops,
},
};
module_platform_driver(at91_adc_driver)
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 34b928cefeed..15109728cae7 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -799,7 +799,7 @@ static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz)
* For sama5d3x and at91sam9x5, the formula changes to:
* Startup Time = <lookup_table_value> / ADC Clock
*/
- const int startup_lookup[] = {
+ static const int startup_lookup[] = {
0, 8, 16, 24,
64, 80, 96, 112,
512, 576, 640, 704,
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 64799ad7ebad..462a99c13e7a 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -28,6 +28,8 @@
#include <linux/iio/driver.h>
#define AXP288_ADC_EN_MASK 0xF1
+#define AXP288_ADC_TS_PIN_GPADC 0xF2
+#define AXP288_ADC_TS_PIN_ON 0xF3
enum axp288_adc_id {
AXP288_ADC_TS,
@@ -121,6 +123,26 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
return IIO_VAL_INT;
}
+static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
+ unsigned long address)
+{
+ int ret;
+
+ /* channels other than GPADC do not need to switch TS pin */
+ if (address != AXP288_GP_ADC_H)
+ return 0;
+
+ ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
+ if (ret)
+ return ret;
+
+ /* When switching to the GPADC pin give things some time to settle */
+ if (mode == AXP288_ADC_TS_PIN_GPADC)
+ usleep_range(6000, 10000);
+
+ return 0;
+}
+
static int axp288_adc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -131,7 +153,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
+ chan->address)) {
+ dev_err(&indio_dev->dev, "GPADC mode\n");
+ ret = -EINVAL;
+ break;
+ }
ret = axp288_adc_read_channel(val, chan->address, info->regmap);
+ if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
+ chan->address))
+ dev_err(&indio_dev->dev, "TS pin restore\n");
break;
default:
ret = -EINVAL;
@@ -141,6 +172,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
return ret;
}
+static int axp288_adc_set_state(struct regmap *regmap)
+{
+ /* ADC should be always enabled for internal FG to function */
+ if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
+ return -EIO;
+
+ return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+}
+
static const struct iio_info axp288_adc_iio_info = {
.read_raw = &axp288_adc_read_raw,
.driver_module = THIS_MODULE,
@@ -169,7 +209,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
* Set ADC to enabled state at all time, including system suspend.
* otherwise internal fuel gauge functionality may be affected.
*/
- ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+ ret = axp288_adc_set_state(axp20x->regmap);
if (ret) {
dev_err(&pdev->dev, "unable to enable ADC device\n");
return ret;
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
new file mode 100644
index 000000000000..ab8d6aed5085
--- /dev/null
+++ b/drivers/iio/adc/dln2-adc.c
@@ -0,0 +1,722 @@
+/*
+ * Driver for the Diolan DLN-2 USB-ADC adapter
+ *
+ * Copyright (c) 2017 Jack Andersen
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/dln2.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
+
+#define DLN2_ADC_MOD_NAME "dln2-adc"
+
+#define DLN2_ADC_ID 0x06
+
+#define DLN2_ADC_GET_CHANNEL_COUNT DLN2_CMD(0x01, DLN2_ADC_ID)
+#define DLN2_ADC_ENABLE DLN2_CMD(0x02, DLN2_ADC_ID)
+#define DLN2_ADC_DISABLE DLN2_CMD(0x03, DLN2_ADC_ID)
+#define DLN2_ADC_CHANNEL_ENABLE DLN2_CMD(0x05, DLN2_ADC_ID)
+#define DLN2_ADC_CHANNEL_DISABLE DLN2_CMD(0x06, DLN2_ADC_ID)
+#define DLN2_ADC_SET_RESOLUTION DLN2_CMD(0x08, DLN2_ADC_ID)
+#define DLN2_ADC_CHANNEL_GET_VAL DLN2_CMD(0x0A, DLN2_ADC_ID)
+#define DLN2_ADC_CHANNEL_GET_ALL_VAL DLN2_CMD(0x0B, DLN2_ADC_ID)
+#define DLN2_ADC_CHANNEL_SET_CFG DLN2_CMD(0x0C, DLN2_ADC_ID)
+#define DLN2_ADC_CHANNEL_GET_CFG DLN2_CMD(0x0D, DLN2_ADC_ID)
+#define DLN2_ADC_CONDITION_MET_EV DLN2_CMD(0x10, DLN2_ADC_ID)
+
+#define DLN2_ADC_EVENT_NONE 0
+#define DLN2_ADC_EVENT_BELOW 1
+#define DLN2_ADC_EVENT_LEVEL_ABOVE 2
+#define DLN2_ADC_EVENT_OUTSIDE 3
+#define DLN2_ADC_EVENT_INSIDE 4
+#define DLN2_ADC_EVENT_ALWAYS 5
+
+#define DLN2_ADC_MAX_CHANNELS 8
+#define DLN2_ADC_DATA_BITS 10
+
+/*
+ * Plays similar role to iio_demux_table in subsystem core; except allocated
+ * in a fixed 8-element array.
+ */
+struct dln2_adc_demux_table {
+ unsigned int from;
+ unsigned int to;
+ unsigned int length;
+};
+
+struct dln2_adc {
+ struct platform_device *pdev;
+ struct iio_chan_spec iio_channels[DLN2_ADC_MAX_CHANNELS + 1];
+ int port, trigger_chan;
+ struct iio_trigger *trig;
+ struct mutex mutex;
+ /* Cached sample period in milliseconds */
+ unsigned int sample_period;
+ /* Demux table */
+ unsigned int demux_count;
+ struct dln2_adc_demux_table demux[DLN2_ADC_MAX_CHANNELS];
+ /* Precomputed timestamp padding offset and length */
+ unsigned int ts_pad_offset, ts_pad_length;
+};
+
+struct dln2_adc_port_chan {
+ u8 port;
+ u8 chan;
+};
+
+struct dln2_adc_get_all_vals {
+ __le16 channel_mask;
+ __le16 values[DLN2_ADC_MAX_CHANNELS];
+};
+
+static void dln2_adc_add_demux(struct dln2_adc *dln2,
+ unsigned int in_loc, unsigned int out_loc,
+ unsigned int length)
+{
+ struct dln2_adc_demux_table *p = dln2->demux_count ?
+ &dln2->demux[dln2->demux_count - 1] : NULL;
+
+ if (p && p->from + p->length == in_loc &&
+ p->to + p->length == out_loc) {
+ p->length += length;
+ } else if (dln2->demux_count < DLN2_ADC_MAX_CHANNELS) {
+ p = &dln2->demux[dln2->demux_count++];
+ p->from = in_loc;
+ p->to = out_loc;
+ p->length = length;
+ }
+}
+
+static void dln2_adc_update_demux(struct dln2_adc *dln2)
+{
+ int in_ind = -1, out_ind;
+ unsigned int in_loc = 0, out_loc = 0;
+ struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev);
+
+ /* Clear out any old demux */
+ dln2->demux_count = 0;
+
+ /* Optimize all 8-channels case */
+ if (indio_dev->masklength &&
+ (*indio_dev->active_scan_mask & 0xff) == 0xff) {
+ dln2_adc_add_demux(dln2, 0, 0, 16);
+ dln2->ts_pad_offset = 0;
+ dln2->ts_pad_length = 0;
+ return;
+ }
+
+ /* Build demux table from fixed 8-channels to active_scan_mask */
+ for_each_set_bit(out_ind,
+ indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ /* Handle timestamp separately */
+ if (out_ind == DLN2_ADC_MAX_CHANNELS)
+ break;
+ for (++in_ind; in_ind != out_ind; ++in_ind)
+ in_loc += 2;
+ dln2_adc_add_demux(dln2, in_loc, out_loc, 2);
+ out_loc += 2;
+ in_loc += 2;
+ }
+
+ if (indio_dev->scan_timestamp) {
+ size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1;
+
+ dln2->ts_pad_offset = out_loc;
+ dln2->ts_pad_length = ts_offset * sizeof(int64_t) - out_loc;
+ } else {
+ dln2->ts_pad_offset = 0;
+ dln2->ts_pad_length = 0;
+ }
+}
+
+static int dln2_adc_get_chan_count(struct dln2_adc *dln2)
+{
+ int ret;
+ u8 port = dln2->port;
+ u8 count;
+ int olen = sizeof(count);
+
+ ret = dln2_transfer(dln2->pdev, DLN2_ADC_GET_CHANNEL_COUNT,
+ &port, sizeof(port), &count, &olen);
+ if (ret < 0) {
+ dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
+ return ret;
+ }
+ if (olen < sizeof(count))
+ return -EPROTO;
+
+ return count;
+}
+
+static int dln2_adc_set_port_resolution(struct dln2_adc *dln2)
+{
+ int ret;
+ struct dln2_adc_port_chan port_chan = {
+ .port = dln2->port,
+ .chan = DLN2_ADC_DATA_BITS,
+ };
+
+ ret = dln2_transfer_tx(dln2->pdev, DLN2_ADC_SET_RESOLUTION,
+ &port_chan, sizeof(port_chan));
+ if (ret < 0)
+ dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
+
+ return ret;
+}
+
+static int dln2_adc_set_chan_enabled(struct dln2_adc *dln2,
+ int channel, bool enable)
+{
+ int ret;
+ struct dln2_adc_port_chan port_chan = {
+ .port = dln2->port,
+ .chan = channel,
+ };
+ u16 cmd = enable ? DLN2_ADC_CHANNEL_ENABLE : DLN2_ADC_CHANNEL_DISABLE;
+
+ ret = dln2_transfer_tx(dln2->pdev, cmd, &port_chan, sizeof(port_chan));
+ if (ret < 0)
+ dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
+
+ return ret;
+}
+
+static int dln2_adc_set_port_enabled(struct dln2_adc *dln2, bool enable,
+ u16 *conflict_out)
+{
+ int ret;
+ u8 port = dln2->port;
+ __le16 conflict;
+ int olen = sizeof(conflict);
+ u16 cmd = enable ? DLN2_ADC_ENABLE : DLN2_ADC_DISABLE;
+
+ if (conflict_out)
+ *conflict_out = 0;
+
+ ret = dln2_transfer(dln2->pdev, cmd, &port, sizeof(port),
+ &conflict, &olen);
+ if (ret < 0) {
+ dev_dbg(&dln2->pdev->dev, "Problem in %s(%d)\n",
+ __func__, (int)enable);
+ if (conflict_out && enable && olen >= sizeof(conflict))
+ *conflict_out = le16_to_cpu(conflict);
+ return ret;
+ }
+ if (enable && olen < sizeof(conflict))
+ return -EPROTO;
+
+ return ret;
+}
+
+static int dln2_adc_set_chan_period(struct dln2_adc *dln2,
+ unsigned int channel, unsigned int period)
+{
+ int ret;
+ struct {
+ struct dln2_adc_port_chan port_chan;
+ __u8 type;
+ __le16 period;
+ __le16 low;
+ __le16 high;
+ } __packed set_cfg = {
+ .port_chan.port = dln2->port,
+ .port_chan.chan = channel,
+ .type = period ? DLN2_ADC_EVENT_ALWAYS : DLN2_ADC_EVENT_NONE,
+ .period = cpu_to_le16(period)
+ };
+
+ ret = dln2_transfer_tx(dln2->pdev, DLN2_ADC_CHANNEL_SET_CFG,
+ &set_cfg, sizeof(set_cfg));
+ if (ret < 0)
+ dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
+
+ return ret;
+}
+
+static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
+{
+ int ret, i;
+ struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev);
+ u16 conflict;
+ __le16 value;
+ int olen = sizeof(value);
+ struct dln2_adc_port_chan port_chan = {
+ .port = dln2->port,
+ .chan = channel,
+ };
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = dln2_adc_set_chan_enabled(dln2, channel, true);
+ if (ret < 0)
+ goto release_direct;
+
+ ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
+ if (ret < 0) {
+ if (conflict) {
+ dev_err(&dln2->pdev->dev,
+ "ADC pins conflict with mask %04X\n",
+ (int)conflict);
+ ret = -EBUSY;
+ }
+ goto disable_chan;
+ }
+
+ /*
+ * Call GET_VAL twice due to initial zero-return immediately after
+ * enabling channel.
+ */
+ for (i = 0; i < 2; ++i) {
+ ret = dln2_transfer(dln2->pdev, DLN2_ADC_CHANNEL_GET_VAL,
+ &port_chan, sizeof(port_chan),
+ &value, &olen);
+ if (ret < 0) {
+ dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
+ goto disable_port;
+ }
+ if (olen < sizeof(value)) {
+ ret = -EPROTO;
+ goto disable_port;
+ }
+ }
+
+ ret = le16_to_cpu(value);
+
+disable_port:
+ dln2_adc_set_port_enabled(dln2, false, NULL);
+disable_chan:
+ dln2_adc_set_chan_enabled(dln2, channel, false);
+release_direct:
+ iio_device_release_direct_mode(indio_dev);
+
+ return ret;
+}
+
+static int dln2_adc_read_all(struct dln2_adc *dln2,
+ struct dln2_adc_get_all_vals *get_all_vals)
+{
+ int ret;
+ __u8 port = dln2->port;
+ int olen = sizeof(*get_all_vals);
+
+ ret = dln2_transfer(dln2->pdev, DLN2_ADC_CHANNEL_GET_ALL_VAL,
+ &port, sizeof(port), get_all_vals, &olen);
+ if (ret < 0) {
+ dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
+ return ret;
+ }
+ if (olen < sizeof(*get_all_vals))
+ return -EPROTO;
+
+ return ret;
+}
+
+static int dln2_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ int ret;
+ unsigned int microhertz;
+ struct dln2_adc *dln2 = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&dln2->mutex);
+ ret = dln2_adc_read(dln2, chan->channel);
+ mutex_unlock(&dln2->mutex);
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ /*
+ * Voltage reference is fixed at 3.3v
+ * 3.3 / (1 << 10) * 1000000000
+ */
+ *val = 0;
+ *val2 = 3222656;
+ return IIO_VAL_INT_PLUS_NANO;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (dln2->sample_period) {
+ microhertz = 1000000000 / dln2->sample_period;
+ *val = microhertz / 1000000;
+ *val2 = microhertz % 1000000;
+ } else {
+ *val = 0;
+ *val2 = 0;
+ }
+
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int dln2_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ int ret;
+ unsigned int microhertz;
+ struct dln2_adc *dln2 = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ microhertz = 1000000 * val + val2;
+
+ mutex_lock(&dln2->mutex);
+
+ dln2->sample_period =
+ microhertz ? 1000000000 / microhertz : UINT_MAX;
+ if (dln2->sample_period > 65535) {
+ dln2->sample_period = 65535;
+ dev_warn(&dln2->pdev->dev,
+ "clamping period to 65535ms\n");
+ }
+
+ /*
+ * The first requested channel is arbitrated as a shared
+ * trigger source, so only one event is registered with the
+ * DLN. The event handler will then read all enabled channel
+ * values using DLN2_ADC_CHANNEL_GET_ALL_VAL to maintain
+ * synchronization between ADC readings.
+ */
+ if (dln2->trigger_chan != -1)
+ ret = dln2_adc_set_chan_period(dln2,
+ dln2->trigger_chan, dln2->sample_period);
+ else
+ ret = 0;
+
+ mutex_unlock(&dln2->mutex);
+
+ return ret;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int dln2_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct dln2_adc *dln2 = iio_priv(indio_dev);
+ int chan_count = indio_dev->num_channels - 1;
+ int ret, i, j;
+
+ mutex_lock(&dln2->mutex);
+
+ for (i = 0; i < chan_count; ++i) {
+ ret = dln2_adc_set_chan_enabled(dln2, i,
+ test_bit(i, scan_mask));
+ if (ret < 0) {
+ for (j = 0; j < i; ++j)
+ dln2_adc_set_chan_enabled(dln2, j, false);
+ mutex_unlock(&dln2->mutex);
+ dev_err(&dln2->pdev->dev,
+ "Unable to enable ADC channel %d\n", i);
+ return -EBUSY;
+ }
+ }
+
+ dln2_adc_update_demux(dln2);
+
+ mutex_unlock(&dln2->mutex);
+
+ return 0;
+}
+
+#define DLN2_ADC_CHAN(lval, idx) { \
+ lval.type = IIO_VOLTAGE; \
+ lval.channel = idx; \
+ lval.indexed = 1; \
+ lval.info_mask_separate = BIT(IIO_CHAN_INFO_RAW); \
+ lval.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ); \
+ lval.scan_index = idx; \
+ lval.scan_type.sign = 'u'; \
+ lval.scan_type.realbits = DLN2_ADC_DATA_BITS; \
+ lval.scan_type.storagebits = 16; \
+ lval.scan_type.endianness = IIO_LE; \
+}
+
+/* Assignment version of IIO_CHAN_SOFT_TIMESTAMP */
+#define IIO_CHAN_SOFT_TIMESTAMP_ASSIGN(lval, _si) { \
+ lval.type = IIO_TIMESTAMP; \
+ lval.channel = -1; \
+ lval.scan_index = _si; \
+ lval.scan_type.sign = 's'; \
+ lval.scan_type.realbits = 64; \
+ lval.scan_type.storagebits = 64; \
+}
+
+static const struct iio_info dln2_adc_info = {
+ .read_raw = dln2_adc_read_raw,
+ .write_raw = dln2_adc_write_raw,
+ .update_scan_mode = dln2_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct {
+ __le16 values[DLN2_ADC_MAX_CHANNELS];
+ int64_t timestamp_space;
+ } data;
+ struct dln2_adc_get_all_vals dev_data;
+ struct dln2_adc *dln2 = iio_priv(indio_dev);
+ const struct dln2_adc_demux_table *t;
+ int ret, i;
+
+ mutex_lock(&dln2->mutex);
+ ret = dln2_adc_read_all(dln2, &dev_data);
+ mutex_unlock(&dln2->mutex);
+ if (ret < 0)
+ goto done;
+
+ /* Demux operation */
+ for (i = 0; i < dln2->demux_count; ++i) {
+ t = &dln2->demux[i];
+ memcpy((void *)data.values + t->to,
+ (void *)dev_data.values + t->from, t->length);
+ }
+
+ /* Zero padding space between values and timestamp */
+ if (dln2->ts_pad_length)
+ memset((void *)data.values + dln2->ts_pad_offset,
+ 0, dln2->ts_pad_length);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data,
+ iio_get_time_ns(indio_dev));
+
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct dln2_adc *dln2 = iio_priv(indio_dev);
+ u16 conflict;
+ unsigned int trigger_chan;
+
+ mutex_lock(&dln2->mutex);
+
+ /* Enable ADC */
+ ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
+ if (ret < 0) {
+ mutex_unlock(&dln2->mutex);
+ dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
+ if (conflict) {
+ dev_err(&dln2->pdev->dev,
+ "ADC pins conflict with mask %04X\n",
+ (int)conflict);
+ ret = -EBUSY;
+ }
+ return ret;
+ }
+
+ /* Assign trigger channel based on first enabled channel */
+ trigger_chan = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+ if (trigger_chan < DLN2_ADC_MAX_CHANNELS) {
+ dln2->trigger_chan = trigger_chan;
+ ret = dln2_adc_set_chan_period(dln2, dln2->trigger_chan,
+ dln2->sample_period);
+ mutex_unlock(&dln2->mutex);
+ if (ret < 0) {
+ dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
+ return ret;
+ }
+ } else {
+ dln2->trigger_chan = -1;
+ mutex_unlock(&dln2->mutex);
+ }
+
+ return iio_triggered_buffer_postenable(indio_dev);
+}
+
+static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct dln2_adc *dln2 = iio_priv(indio_dev);
+
+ mutex_lock(&dln2->mutex);
+
+ /* Disable trigger channel */
+ if (dln2->trigger_chan != -1) {
+ dln2_adc_set_chan_period(dln2, dln2->trigger_chan, 0);
+ dln2->trigger_chan = -1;
+ }
+
+ /* Disable ADC */
+ ret = dln2_adc_set_port_enabled(dln2, false, NULL);
+
+ mutex_unlock(&dln2->mutex);
+ if (ret < 0) {
+ dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
+ return ret;
+ }
+
+ return iio_triggered_buffer_predisable(indio_dev);
+}
+
+static const struct iio_buffer_setup_ops dln2_adc_buffer_setup_ops = {
+ .postenable = dln2_adc_triggered_buffer_postenable,
+ .predisable = dln2_adc_triggered_buffer_predisable,
+};
+
+static void dln2_adc_event(struct platform_device *pdev, u16 echo,
+ const void *data, int len)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct dln2_adc *dln2 = iio_priv(indio_dev);
+
+ /* Called via URB completion handler */
+ iio_trigger_poll(dln2->trig);
+}
+
+static const struct iio_trigger_ops dln2_adc_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
+static int dln2_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dln2_adc *dln2;
+ struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct iio_dev *indio_dev;
+ int i, ret, chans;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*dln2));
+ if (!indio_dev) {
+ dev_err(dev, "failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ dln2 = iio_priv(indio_dev);
+ dln2->pdev = pdev;
+ dln2->port = pdata->port;
+ dln2->trigger_chan = -1;
+ mutex_init(&dln2->mutex);
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ ret = dln2_adc_set_port_resolution(dln2);
+ if (ret < 0) {
+ dev_err(dev, "failed to set ADC resolution to 10 bits\n");
+ return ret;
+ }
+
+ chans = dln2_adc_get_chan_count(dln2);
+ if (chans < 0) {
+ dev_err(dev, "failed to get channel count: %d\n", chans);
+ return chans;
+ }
+ if (chans > DLN2_ADC_MAX_CHANNELS) {
+ chans = DLN2_ADC_MAX_CHANNELS;
+ dev_warn(dev, "clamping channels to %d\n",
+ DLN2_ADC_MAX_CHANNELS);
+ }
+
+ for (i = 0; i < chans; ++i)
+ DLN2_ADC_CHAN(dln2->iio_channels[i], i)
+ IIO_CHAN_SOFT_TIMESTAMP_ASSIGN(dln2->iio_channels[i], i);
+
+ indio_dev->name = DLN2_ADC_MOD_NAME;
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &dln2_adc_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = dln2->iio_channels;
+ indio_dev->num_channels = chans + 1;
+ indio_dev->setup_ops = &dln2_adc_buffer_setup_ops;
+
+ dln2->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+ if (!dln2->trig) {
+ dev_err(dev, "failed to allocate trigger\n");
+ return -ENOMEM;
+ }
+ dln2->trig->ops = &dln2_adc_trigger_ops;
+ iio_trigger_set_drvdata(dln2->trig, dln2);
+ devm_iio_trigger_register(dev, dln2->trig);
+ iio_trigger_set_immutable(indio_dev, dln2->trig);
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ dln2_adc_trigger_h,
+ &dln2_adc_buffer_setup_ops);
+ if (ret) {
+ dev_err(dev, "failed to allocate triggered buffer: %d\n", ret);
+ return ret;
+ }
+
+ ret = dln2_register_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV,
+ dln2_adc_event);
+ if (ret) {
+ dev_err(dev, "failed to setup DLN2 periodic event: %d\n", ret);
+ return ret;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "failed to register iio device: %d\n", ret);
+ goto unregister_event;
+ }
+
+ return ret;
+
+unregister_event:
+ dln2_unregister_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV);
+
+ return ret;
+}
+
+static int dln2_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ iio_device_unregister(indio_dev);
+ dln2_unregister_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV);
+ return 0;
+}
+
+static struct platform_driver dln2_adc_driver = {
+ .driver.name = DLN2_ADC_MOD_NAME,
+ .probe = dln2_adc_probe,
+ .remove = dln2_adc_remove,
+};
+
+module_platform_driver(dln2_adc_driver);
+
+MODULE_AUTHOR("Jack Andersen <jackoalan@gmail.com");
+MODULE_DESCRIPTION("Driver for the Diolan DLN2 ADC interface");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dln2-adc");
diff --git a/drivers/iio/adc/ep93xx_adc.c b/drivers/iio/adc/ep93xx_adc.c
new file mode 100644
index 000000000000..a179ac476c6d
--- /dev/null
+++ b/drivers/iio/adc/ep93xx_adc.c
@@ -0,0 +1,255 @@
+/*
+ * Driver for ADC module on the Cirrus Logic EP93xx series of SoCs
+ *
+ * Copyright (C) 2015 Alexander Sverdlin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The driver uses polling to get the conversion status. According to EP93xx
+ * datasheets, reading ADCResult register starts the conversion, but user is also
+ * responsible for ensuring that delay between adjacent conversion triggers is
+ * long enough so that maximum allowed conversion rate is not exceeded. This
+ * basically renders IRQ mode unusable.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/iio/iio.h>
+#include <linux/io.h>
+#include <linux/irqflags.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+/*
+ * This code could benefit from real HR Timers, but jiffy granularity would
+ * lower ADC conversion rate down to CONFIG_HZ, so we fallback to busy wait
+ * in such case.
+ *
+ * HR Timers-based version loads CPU only up to 10% during back to back ADC
+ * conversion, while busy wait-based version consumes whole CPU power.
+ */
+#ifdef CONFIG_HIGH_RES_TIMERS
+#define ep93xx_adc_delay(usmin, usmax) usleep_range(usmin, usmax)
+#else
+#define ep93xx_adc_delay(usmin, usmax) udelay(usmin)
+#endif
+
+#define EP93XX_ADC_RESULT 0x08
+#define EP93XX_ADC_SDR BIT(31)
+#define EP93XX_ADC_SWITCH 0x18
+#define EP93XX_ADC_SW_LOCK 0x20
+
+struct ep93xx_adc_priv {
+ struct clk *clk;
+ void __iomem *base;
+ int lastch;
+ struct mutex lock;
+};
+
+#define EP93XX_ADC_CH(index, dname, swcfg) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = index, \
+ .address = swcfg, \
+ .datasheet_name = dname, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+}
+
+/*
+ * Numbering scheme for channels 0..4 is defined in EP9301 and EP9302 datasheets.
+ * EP9307, EP9312 and EP9312 have 3 channels more (total 8), but the numbering is
+ * not defined. So the last three are numbered randomly, let's say.
+ */
+static const struct iio_chan_spec ep93xx_adc_channels[8] = {
+ EP93XX_ADC_CH(0, "YM", 0x608),
+ EP93XX_ADC_CH(1, "SXP", 0x680),
+ EP93XX_ADC_CH(2, "SXM", 0x640),
+ EP93XX_ADC_CH(3, "SYP", 0x620),
+ EP93XX_ADC_CH(4, "SYM", 0x610),
+ EP93XX_ADC_CH(5, "XP", 0x601),
+ EP93XX_ADC_CH(6, "XM", 0x602),
+ EP93XX_ADC_CH(7, "YP", 0x604),
+};
+
+static int ep93xx_read_raw(struct iio_dev *iiodev,
+ struct iio_chan_spec const *channel, int *value,
+ int *shift, long mask)
+{
+ struct ep93xx_adc_priv *priv = iio_priv(iiodev);
+ unsigned long timeout;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&priv->lock);
+ if (priv->lastch != channel->channel) {
+ priv->lastch = channel->channel;
+ /*
+ * Switch register is software-locked, unlocking must be
+ * immediately followed by write
+ */
+ local_irq_disable();
+ writel_relaxed(0xAA, priv->base + EP93XX_ADC_SW_LOCK);
+ writel_relaxed(channel->address,
+ priv->base + EP93XX_ADC_SWITCH);
+ local_irq_enable();
+ /*
+ * Settling delay depends on module clock and could be
+ * 2ms or 500us
+ */
+ ep93xx_adc_delay(2000, 2000);
+ }
+ /* Start the conversion, eventually discarding old result */
+ readl_relaxed(priv->base + EP93XX_ADC_RESULT);
+ /* Ensure maximum conversion rate is not exceeded */
+ ep93xx_adc_delay(DIV_ROUND_UP(1000000, 925),
+ DIV_ROUND_UP(1000000, 925));
+ /* At this point conversion must be completed, but anyway... */
+ ret = IIO_VAL_INT;
+ timeout = jiffies + msecs_to_jiffies(1) + 1;
+ while (1) {
+ u32 t;
+
+ t = readl_relaxed(priv->base + EP93XX_ADC_RESULT);
+ if (t & EP93XX_ADC_SDR) {
+ *value = sign_extend32(t, 15);
+ break;
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(&iiodev->dev, "Conversion timeout\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ cpu_relax();
+ }
+ mutex_unlock(&priv->lock);
+ return ret;
+
+ case IIO_CHAN_INFO_OFFSET:
+ /* According to datasheet, range is -25000..25000 */
+ *value = 25000;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ /* Typical supply voltage is 3.3v */
+ *value = (1ULL << 32) * 3300 / 50000;
+ *shift = 32;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ep93xx_adc_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = ep93xx_read_raw,
+};
+
+static int ep93xx_adc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct iio_dev *iiodev;
+ struct ep93xx_adc_priv *priv;
+ struct clk *pclk;
+ struct resource *res;
+
+ iiodev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
+ if (!iiodev)
+ return -ENOMEM;
+ priv = iio_priv(iiodev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Cannot obtain memory resource\n");
+ return -ENXIO;
+ }
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base)) {
+ dev_err(&pdev->dev, "Cannot map memory resource\n");
+ return PTR_ERR(priv->base);
+ }
+
+ iiodev->dev.parent = &pdev->dev;
+ iiodev->name = dev_name(&pdev->dev);
+ iiodev->modes = INDIO_DIRECT_MODE;
+ iiodev->info = &ep93xx_adc_info;
+ iiodev->num_channels = ARRAY_SIZE(ep93xx_adc_channels);
+ iiodev->channels = ep93xx_adc_channels;
+
+ priv->lastch = -1;
+ mutex_init(&priv->lock);
+
+ platform_set_drvdata(pdev, iiodev);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "Cannot obtain clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ pclk = clk_get_parent(priv->clk);
+ if (!pclk) {
+ dev_warn(&pdev->dev, "Cannot obtain parent clock\n");
+ } else {
+ /*
+ * This is actually a place for improvement:
+ * EP93xx ADC supports two clock divisors -- 4 and 16,
+ * resulting in conversion rates 3750 and 925 samples per second
+ * with 500us or 2ms settling time respectively.
+ * One might find this interesting enough to be configurable.
+ */
+ ret = clk_set_rate(priv->clk, clk_get_rate(pclk) / 16);
+ if (ret)
+ dev_warn(&pdev->dev, "Cannot set clock rate\n");
+ /*
+ * We can tolerate rate setting failure because the module should
+ * work in any case.
+ */
+ }
+
+ ret = clk_enable(priv->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot enable clock\n");
+ return ret;
+ }
+
+ ret = iio_device_register(iiodev);
+ if (ret)
+ clk_disable(priv->clk);
+
+ return ret;
+}
+
+static int ep93xx_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *iiodev = platform_get_drvdata(pdev);
+ struct ep93xx_adc_priv *priv = iio_priv(iiodev);
+
+ iio_device_unregister(iiodev);
+ clk_disable(priv->clk);
+
+ return 0;
+}
+
+static struct platform_driver ep93xx_adc_driver = {
+ .driver = {
+ .name = "ep93xx-adc",
+ },
+ .probe = ep93xx_adc_probe,
+ .remove = ep93xx_adc_remove,
+};
+module_platform_driver(ep93xx_adc_driver);
+
+MODULE_AUTHOR("Alexander Sverdlin <alexander.sverdlin@gmail.com>");
+MODULE_DESCRIPTION("Cirrus Logic EP93XX ADC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ep93xx-adc");
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 232c0b80d658..f387b972e4f4 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -44,6 +44,7 @@
#define INA226_MASK_ENABLE 0x06
#define INA226_CVRF BIT(3)
+#define INA219_CNVR BIT(1)
#define INA2XX_MAX_REGISTERS 8
@@ -592,6 +593,7 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
int bit, ret, i = 0;
s64 time_a, time_b;
unsigned int alert;
+ int cnvr_need_clear = 0;
time_a = iio_get_time_ns(indio_dev);
@@ -603,22 +605,30 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
* we check the ConVersionReadyFlag.
* On hardware that supports using the ALERT pin to toggle a
* GPIO a triggered buffer could be used instead.
- * For now, we pay for that extra read of the ALERT register
+ * For now, we do an extra read of the MASK_ENABLE register (INA226)
+ * resp. the BUS_VOLTAGE register (INA219).
*/
if (!chip->allow_async_readout)
do {
- ret = regmap_read(chip->regmap, INA226_MASK_ENABLE,
- &alert);
+ if (chip->config->chip_id == ina226) {
+ ret = regmap_read(chip->regmap,
+ INA226_MASK_ENABLE, &alert);
+ alert &= INA226_CVRF;
+ } else {
+ ret = regmap_read(chip->regmap,
+ INA2XX_BUS_VOLTAGE, &alert);
+ alert &= INA219_CNVR;
+ cnvr_need_clear = alert;
+ }
+
if (ret < 0)
return ret;
- alert &= INA226_CVRF;
} while (!alert);
/*
- * Single register reads: bulk_read will not work with ina226
- * as there is no auto-increment of the address register for
- * data length longer than 16bits.
+ * Single register reads: bulk_read will not work with ina226/219
+ * as there is no auto-increment of the register pointer.
*/
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
@@ -630,6 +640,18 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
return ret;
data[i++] = val;
+
+ if (INA2XX_SHUNT_VOLTAGE + bit == INA2XX_POWER)
+ cnvr_need_clear = 0;
+ }
+
+ /* Dummy read on INA219 power register to clear CNVR flag */
+ if (cnvr_need_clear && chip->config->chip_id == ina219) {
+ unsigned int val;
+
+ ret = regmap_read(chip->regmap, INA2XX_POWER, &val);
+ if (ret < 0)
+ return ret;
}
time_b = iio_get_time_ns(indio_dev);
@@ -644,7 +666,7 @@ static int ina2xx_capture_thread(void *data)
{
struct iio_dev *indio_dev = data;
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
- unsigned int sampling_us = SAMPLING_PERIOD(chip);
+ int sampling_us = SAMPLING_PERIOD(chip);
int buffer_us;
/*
diff --git a/drivers/iio/adc/ltc2471.c b/drivers/iio/adc/ltc2471.c
new file mode 100644
index 000000000000..29b7ed60cdb0
--- /dev/null
+++ b/drivers/iio/adc/ltc2471.c
@@ -0,0 +1,160 @@
+/*
+ * Driver for Linear Technology LTC2471 and LTC2473 voltage monitors
+ * The LTC2473 is identical to the 2471, but reports a differential signal.
+ *
+ * Copyright (C) 2017 Topic Embedded Products
+ * Author: Mike Looijmans <mike.looijmans@topic.nl>
+ *
+ * License: GPLv2
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+enum ltc2471_chips {
+ ltc2471,
+ ltc2473,
+};
+
+struct ltc2471_data {
+ struct i2c_client *client;
+};
+
+/* Reference voltage is 1.25V */
+#define LTC2471_VREF 1250
+
+/* Read two bytes from the I2C bus to obtain the ADC result */
+static int ltc2471_get_value(struct i2c_client *client)
+{
+ int ret;
+ __be16 buf;
+
+ ret = i2c_master_recv(client, (char *)&buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+ if (ret != sizeof(buf))
+ return -EIO;
+
+ /* MSB first */
+ return be16_to_cpu(buf);
+}
+
+static int ltc2471_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct ltc2471_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ltc2471_get_value(data->client);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->differential)
+ /* Output ranges from -VREF to +VREF */
+ *val = 2 * LTC2471_VREF;
+ else
+ /* Output ranges from 0 to VREF */
+ *val = LTC2471_VREF;
+ *val2 = 16; /* 16 data bits */
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_OFFSET:
+ /* Only differential chip has this property */
+ *val = -LTC2471_VREF;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_chan_spec ltc2471_channel[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static const struct iio_chan_spec ltc2473_channel[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .differential = 1,
+ },
+};
+
+static const struct iio_info ltc2471_info = {
+ .read_raw = ltc2471_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int ltc2471_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct ltc2471_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -EOPNOTSUPP;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
+ indio_dev->info = &ltc2471_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ if (id->driver_data == ltc2473)
+ indio_dev->channels = ltc2473_channel;
+ else
+ indio_dev->channels = ltc2471_channel;
+ indio_dev->num_channels = 1;
+
+ /* Trigger once to start conversion and check if chip is there */
+ ret = ltc2471_get_value(client);
+ if (ret < 0) {
+ dev_err(&client->dev, "Cannot read from device.\n");
+ return ret;
+ }
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id ltc2471_i2c_id[] = {
+ { "ltc2471", ltc2471 },
+ { "ltc2473", ltc2473 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ltc2471_i2c_id);
+
+static struct i2c_driver ltc2471_i2c_driver = {
+ .driver = {
+ .name = "ltc2471",
+ },
+ .probe = ltc2471_i2c_probe,
+ .id_table = ltc2471_i2c_id,
+};
+
+module_i2c_driver(ltc2471_i2c_driver);
+
+MODULE_DESCRIPTION("LTC2471/LTC2473 ADC driver");
+MODULE_AUTHOR("Topic Embedded Products");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
index 2691b10023f5..5bf8011dcde9 100644
--- a/drivers/iio/adc/ltc2497.c
+++ b/drivers/iio/adc/ltc2497.c
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
#include <linux/iio/sysfs.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -127,13 +128,14 @@ static int ltc2497_read_raw(struct iio_dev *indio_dev,
}
}
-#define LTC2497_CHAN(_chan, _addr) { \
+#define LTC2497_CHAN(_chan, _addr, _ds_name) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = (_chan), \
.address = (_addr | (_chan / 2) | ((_chan & 1) ? LTC2497_SIGN : 0)), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = (_ds_name), \
}
#define LTC2497_CHAN_DIFF(_chan, _addr) { \
@@ -148,22 +150,22 @@ static int ltc2497_read_raw(struct iio_dev *indio_dev,
}
static const struct iio_chan_spec ltc2497_channel[] = {
- LTC2497_CHAN(0, LTC2497_SGL),
- LTC2497_CHAN(1, LTC2497_SGL),
- LTC2497_CHAN(2, LTC2497_SGL),
- LTC2497_CHAN(3, LTC2497_SGL),
- LTC2497_CHAN(4, LTC2497_SGL),
- LTC2497_CHAN(5, LTC2497_SGL),
- LTC2497_CHAN(6, LTC2497_SGL),
- LTC2497_CHAN(7, LTC2497_SGL),
- LTC2497_CHAN(8, LTC2497_SGL),
- LTC2497_CHAN(9, LTC2497_SGL),
- LTC2497_CHAN(10, LTC2497_SGL),
- LTC2497_CHAN(11, LTC2497_SGL),
- LTC2497_CHAN(12, LTC2497_SGL),
- LTC2497_CHAN(13, LTC2497_SGL),
- LTC2497_CHAN(14, LTC2497_SGL),
- LTC2497_CHAN(15, LTC2497_SGL),
+ LTC2497_CHAN(0, LTC2497_SGL, "CH0"),
+ LTC2497_CHAN(1, LTC2497_SGL, "CH1"),
+ LTC2497_CHAN(2, LTC2497_SGL, "CH2"),
+ LTC2497_CHAN(3, LTC2497_SGL, "CH3"),
+ LTC2497_CHAN(4, LTC2497_SGL, "CH4"),
+ LTC2497_CHAN(5, LTC2497_SGL, "CH5"),
+ LTC2497_CHAN(6, LTC2497_SGL, "CH6"),
+ LTC2497_CHAN(7, LTC2497_SGL, "CH7"),
+ LTC2497_CHAN(8, LTC2497_SGL, "CH8"),
+ LTC2497_CHAN(9, LTC2497_SGL, "CH9"),
+ LTC2497_CHAN(10, LTC2497_SGL, "CH10"),
+ LTC2497_CHAN(11, LTC2497_SGL, "CH11"),
+ LTC2497_CHAN(12, LTC2497_SGL, "CH12"),
+ LTC2497_CHAN(13, LTC2497_SGL, "CH13"),
+ LTC2497_CHAN(14, LTC2497_SGL, "CH14"),
+ LTC2497_CHAN(15, LTC2497_SGL, "CH15"),
LTC2497_CHAN_DIFF(0, LTC2497_DIFF),
LTC2497_CHAN_DIFF(1, LTC2497_DIFF),
LTC2497_CHAN_DIFF(2, LTC2497_DIFF),
@@ -192,6 +194,7 @@ static int ltc2497_probe(struct i2c_client *client,
{
struct iio_dev *indio_dev;
struct ltc2497_st *st;
+ struct iio_map *plat_data;
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
@@ -221,19 +224,31 @@ static int ltc2497_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ if (client->dev.platform_data) {
+ plat_data = ((struct iio_map *)client->dev.platform_data);
+ ret = iio_map_array_register(indio_dev, plat_data);
+ if (ret) {
+ dev_err(&indio_dev->dev, "iio map err: %d\n", ret);
+ goto err_regulator_disable;
+ }
+ }
+
ret = i2c_smbus_write_byte(st->client, LTC2497_CONFIG_DEFAULT);
if (ret < 0)
- goto err_regulator_disable;
+ goto err_array_unregister;
st->addr_prev = LTC2497_CONFIG_DEFAULT;
st->time_prev = ktime_get();
ret = iio_device_register(indio_dev);
if (ret < 0)
- goto err_regulator_disable;
+ goto err_array_unregister;
return 0;
+err_array_unregister:
+ iio_map_array_unregister(indio_dev);
+
err_regulator_disable:
regulator_disable(st->ref);
@@ -245,6 +260,7 @@ static int ltc2497_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ltc2497_st *st = iio_priv(indio_dev);
+ iio_map_array_unregister(indio_dev);
iio_device_unregister(indio_dev);
regulator_disable(st->ref);
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index b0526e4b9530..b1dd17cbce58 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -549,8 +549,8 @@ static int max9611_probe(struct i2c_client *client,
ret = of_property_read_u32(of_node, shunt_res_prop, &of_shunt);
if (ret) {
dev_err(&client->dev,
- "Missing %s property for %s node\n",
- shunt_res_prop, of_node->full_name);
+ "Missing %s property for %pOF node\n",
+ shunt_res_prop, of_node);
return ret;
}
max9611->shunt_resistor_uohm = of_shunt;
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index 254135e07792..63de705086ed 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -379,10 +379,12 @@ static int mcp3422_probe(struct i2c_client *client,
/* meaningful default configuration */
config = (MCP3422_CONT_SAMPLING
- | MCP3422_CHANNEL_VALUE(1)
+ | MCP3422_CHANNEL_VALUE(0)
| MCP3422_PGA_VALUE(MCP3422_PGA_1)
| MCP3422_SAMPLE_RATE_VALUE(MCP3422_SRATE_240));
- mcp3422_update_config(adc, config);
+ err = mcp3422_update_config(adc, config);
+ if (err < 0)
+ return err;
err = devm_iio_device_register(&client->dev, indio_dev);
if (err < 0)
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 83da50ed73ab..2e8dbb89c8c9 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -572,8 +572,8 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
struct clk_init_data init;
const char *clk_parents[1];
- init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_div",
- of_node_full_name(indio_dev->dev.of_node));
+ init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%pOF#adc_div",
+ indio_dev->dev.of_node);
init.flags = 0;
init.ops = &clk_divider_ops;
clk_parents[0] = __clk_get_name(priv->clkin);
@@ -591,8 +591,8 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
if (WARN_ON(IS_ERR(priv->adc_div_clk)))
return PTR_ERR(priv->adc_div_clk);
- init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_en",
- of_node_full_name(indio_dev->dev.of_node));
+ init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%pOF#adc_en",
+ indio_dev->dev.of_node);
init.flags = CLK_SET_RATE_PARENT;
init.ops = &clk_gate_ops;
clk_parents[0] = __clk_get_name(priv->adc_div_clk);
@@ -915,6 +915,11 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
init_completion(&priv->done);
match = of_match_device(meson_sar_adc_of_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "failed to match device\n");
+ return -ENODEV;
+ }
+
priv->data = match->data;
indio_dev->name = priv->data->name;
diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
index 2d104c828041..414cf44bf19d 100644
--- a/drivers/iio/adc/mt6577_auxadc.c
+++ b/drivers/iio/adc/mt6577_auxadc.c
@@ -184,6 +184,37 @@ static const struct iio_info mt6577_auxadc_info = {
.read_raw = &mt6577_auxadc_read_raw,
};
+static int __maybe_unused mt6577_auxadc_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct mt6577_auxadc_device *adc_dev = iio_priv(indio_dev);
+ int ret;
+
+ ret = clk_prepare_enable(adc_dev->adc_clk);
+ if (ret) {
+ pr_err("failed to enable auxadc clock\n");
+ return ret;
+ }
+
+ mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC,
+ MT6577_AUXADC_PDN_EN, 0);
+ mdelay(MT6577_AUXADC_POWER_READY_MS);
+
+ return 0;
+}
+
+static int __maybe_unused mt6577_auxadc_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct mt6577_auxadc_device *adc_dev = iio_priv(indio_dev);
+
+ mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC,
+ 0, MT6577_AUXADC_PDN_EN);
+ clk_disable_unprepare(adc_dev->adc_clk);
+
+ return 0;
+}
+
static int mt6577_auxadc_probe(struct platform_device *pdev)
{
struct mt6577_auxadc_device *adc_dev;
@@ -269,8 +300,13 @@ static int mt6577_auxadc_remove(struct platform_device *pdev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(mt6577_auxadc_pm_ops,
+ mt6577_auxadc_suspend,
+ mt6577_auxadc_resume);
+
static const struct of_device_id mt6577_auxadc_of_match[] = {
{ .compatible = "mediatek,mt2701-auxadc", },
+ { .compatible = "mediatek,mt7622-auxadc", },
{ .compatible = "mediatek,mt8173-auxadc", },
{ }
};
@@ -280,6 +316,7 @@ static struct platform_driver mt6577_auxadc_driver = {
.driver = {
.name = "mt6577-auxadc",
.of_match_table = mt6577_auxadc_of_match,
+ .pm = &mt6577_auxadc_pm_ops,
},
.probe = mt6577_auxadc_probe,
.remove = mt6577_auxadc_remove,
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index ae6d3324f518..5f612d694b33 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -224,6 +224,11 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
info = iio_priv(indio_dev);
match = of_match_device(rockchip_saradc_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "failed to match device\n");
+ return -ENODEV;
+ }
+
info->data = match->data;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -235,7 +240,8 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
* The reset should be an optional property, as it should work
* with old devicetrees as well
*/
- info->reset = devm_reset_control_get(&pdev->dev, "saradc-apb");
+ info->reset = devm_reset_control_get_exclusive(&pdev->dev,
+ "saradc-apb");
if (IS_ERR(info->reset)) {
ret = PTR_ERR(info->reset);
if (ret != -ENOENT)
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index e09233b03c05..804198eb0eef 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -64,7 +64,7 @@
#define STM32H7_CKMODE_MASK GENMASK(17, 16)
/* STM32 H7 maximum analog clock rate (from datasheet) */
-#define STM32H7_ADC_MAX_CLK_RATE 72000000
+#define STM32H7_ADC_MAX_CLK_RATE 36000000
/**
* stm32_adc_common_regs - stm32 common registers, compatible dependent data
@@ -148,14 +148,14 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev,
return -EINVAL;
}
- priv->common.rate = rate;
+ priv->common.rate = rate / stm32f4_pclk_div[i];
val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR);
val &= ~STM32F4_ADC_ADCPRE_MASK;
val |= i << STM32F4_ADC_ADCPRE_SHIFT;
writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR);
dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n",
- rate / (stm32f4_pclk_div[i] * 1000));
+ priv->common.rate / 1000);
return 0;
}
@@ -172,7 +172,7 @@ struct stm32h7_adc_ck_spec {
int div;
};
-const struct stm32h7_adc_ck_spec stm32h7_adc_ckmodes_spec[] = {
+static const struct stm32h7_adc_ck_spec stm32h7_adc_ckmodes_spec[] = {
/* 00: CK_ADC[1..3]: Asynchronous clock modes */
{ 0, 0, 1 },
{ 0, 1, 2 },
@@ -250,7 +250,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
out:
/* rate used later by each ADC instance to control BOOST mode */
- priv->common.rate = rate;
+ priv->common.rate = rate / div;
/* Set common clock mode and prescaler */
val = readl_relaxed(priv->common.base + STM32H7_ADC_CCR);
@@ -260,7 +260,7 @@ out:
writel_relaxed(val, priv->common.base + STM32H7_ADC_CCR);
dev_dbg(&pdev->dev, "Using %s clock/%d source at %ld kHz\n",
- ckmode ? "bus" : "adc", div, rate / (div * 1000));
+ ckmode ? "bus" : "adc", div, priv->common.rate / 1000);
return 0;
}
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 5bfcc1f13105..e3c15f88075f 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -25,6 +25,7 @@
#include <linux/dmaengine.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/timer/stm32-lptim-trigger.h>
#include <linux/iio/timer/stm32-timer-trigger.h>
#include <linux/iio/trigger.h>
#include <linux/iio/trigger_consumer.h>
@@ -83,6 +84,8 @@
#define STM32H7_ADC_IER 0x04
#define STM32H7_ADC_CR 0x08
#define STM32H7_ADC_CFGR 0x0C
+#define STM32H7_ADC_SMPR1 0x14
+#define STM32H7_ADC_SMPR2 0x18
#define STM32H7_ADC_PCSEL 0x1C
#define STM32H7_ADC_SQR1 0x30
#define STM32H7_ADC_SQR2 0x34
@@ -151,6 +154,7 @@ enum stm32h7_adc_dmngt {
#define STM32H7_BOOST_CLKRATE 20000000UL
#define STM32_ADC_MAX_SQ 16 /* SQ1..SQ16 */
+#define STM32_ADC_MAX_SMP 7 /* SMPx range is [0..7] */
#define STM32_ADC_TIMEOUT_US 100000
#define STM32_ADC_TIMEOUT (msecs_to_jiffies(STM32_ADC_TIMEOUT_US / 1000))
@@ -182,6 +186,11 @@ enum stm32_adc_extsel {
STM32_EXT13,
STM32_EXT14,
STM32_EXT15,
+ STM32_EXT16,
+ STM32_EXT17,
+ STM32_EXT18,
+ STM32_EXT19,
+ STM32_EXT20,
};
/**
@@ -227,6 +236,8 @@ struct stm32_adc_regs {
* @exten: trigger control register & bitfield
* @extsel: trigger selection register & bitfield
* @res: resolution selection register & bitfield
+ * @smpr: smpr1 & smpr2 registers offset array
+ * @smp_bits: smpr1 & smpr2 index and bitfields
*/
struct stm32_adc_regspec {
const u32 dr;
@@ -236,6 +247,8 @@ struct stm32_adc_regspec {
const struct stm32_adc_regs exten;
const struct stm32_adc_regs extsel;
const struct stm32_adc_regs res;
+ const u32 smpr[2];
+ const struct stm32_adc_regs *smp_bits;
};
struct stm32_adc;
@@ -251,6 +264,7 @@ struct stm32_adc;
* @start_conv: routine to start conversions
* @stop_conv: routine to stop conversions
* @unprepare: optional unprepare routine (disable, power-down)
+ * @smp_cycles: programmable sampling time (ADC clock cycles)
*/
struct stm32_adc_cfg {
const struct stm32_adc_regspec *regs;
@@ -262,6 +276,7 @@ struct stm32_adc_cfg {
void (*start_conv)(struct stm32_adc *, bool dma);
void (*stop_conv)(struct stm32_adc *);
void (*unprepare)(struct stm32_adc *);
+ const unsigned int *smp_cycles;
};
/**
@@ -283,6 +298,7 @@ struct stm32_adc_cfg {
* @rx_dma_buf: dma rx buffer bus address
* @rx_buf_sz: dma rx buffer size
* @pcsel bitmask to preselect channels on some devices
+ * @smpr_val: sampling time settings (e.g. smpr1 / smpr2)
* @cal: optional calibration data on some devices
*/
struct stm32_adc {
@@ -303,6 +319,7 @@ struct stm32_adc {
dma_addr_t rx_dma_buf;
unsigned int rx_buf_sz;
u32 pcsel;
+ u32 smpr_val[2];
struct stm32_adc_calib cal;
};
@@ -431,6 +448,39 @@ static struct stm32_adc_trig_info stm32f4_adc_trigs[] = {
{}, /* sentinel */
};
+/**
+ * stm32f4_smp_bits[] - describe sampling time register index & bit fields
+ * Sorted so it can be indexed by channel number.
+ */
+static const struct stm32_adc_regs stm32f4_smp_bits[] = {
+ /* STM32F4_ADC_SMPR2: smpr[] index, mask, shift for SMP0 to SMP9 */
+ { 1, GENMASK(2, 0), 0 },
+ { 1, GENMASK(5, 3), 3 },
+ { 1, GENMASK(8, 6), 6 },
+ { 1, GENMASK(11, 9), 9 },
+ { 1, GENMASK(14, 12), 12 },
+ { 1, GENMASK(17, 15), 15 },
+ { 1, GENMASK(20, 18), 18 },
+ { 1, GENMASK(23, 21), 21 },
+ { 1, GENMASK(26, 24), 24 },
+ { 1, GENMASK(29, 27), 27 },
+ /* STM32F4_ADC_SMPR1, smpr[] index, mask, shift for SMP10 to SMP18 */
+ { 0, GENMASK(2, 0), 0 },
+ { 0, GENMASK(5, 3), 3 },
+ { 0, GENMASK(8, 6), 6 },
+ { 0, GENMASK(11, 9), 9 },
+ { 0, GENMASK(14, 12), 12 },
+ { 0, GENMASK(17, 15), 15 },
+ { 0, GENMASK(20, 18), 18 },
+ { 0, GENMASK(23, 21), 21 },
+ { 0, GENMASK(26, 24), 24 },
+};
+
+/* STM32F4 programmable sampling time (ADC clock cycles) */
+static const unsigned int stm32f4_adc_smp_cycles[STM32_ADC_MAX_SMP + 1] = {
+ 3, 15, 28, 56, 84, 112, 144, 480,
+};
+
static const struct stm32_adc_regspec stm32f4_adc_regspec = {
.dr = STM32F4_ADC_DR,
.ier_eoc = { STM32F4_ADC_CR1, STM32F4_EOCIE },
@@ -440,6 +490,8 @@ static const struct stm32_adc_regspec stm32f4_adc_regspec = {
.extsel = { STM32F4_ADC_CR2, STM32F4_EXTSEL_MASK,
STM32F4_EXTSEL_SHIFT },
.res = { STM32F4_ADC_CR1, STM32F4_RES_MASK, STM32F4_RES_SHIFT },
+ .smpr = { STM32F4_ADC_SMPR1, STM32F4_ADC_SMPR2 },
+ .smp_bits = stm32f4_smp_bits,
};
static const struct stm32_adc_regs stm32h7_sq[STM32_ADC_MAX_SQ + 1] = {
@@ -480,9 +532,46 @@ static struct stm32_adc_trig_info stm32h7_adc_trigs[] = {
{ TIM4_TRGO, STM32_EXT12 },
{ TIM6_TRGO, STM32_EXT13 },
{ TIM3_CH4, STM32_EXT15 },
+ { LPTIM1_OUT, STM32_EXT18 },
+ { LPTIM2_OUT, STM32_EXT19 },
+ { LPTIM3_OUT, STM32_EXT20 },
{},
};
+/**
+ * stm32h7_smp_bits - describe sampling time register index & bit fields
+ * Sorted so it can be indexed by channel number.
+ */
+static const struct stm32_adc_regs stm32h7_smp_bits[] = {
+ /* STM32H7_ADC_SMPR1, smpr[] index, mask, shift for SMP0 to SMP9 */
+ { 0, GENMASK(2, 0), 0 },
+ { 0, GENMASK(5, 3), 3 },
+ { 0, GENMASK(8, 6), 6 },
+ { 0, GENMASK(11, 9), 9 },
+ { 0, GENMASK(14, 12), 12 },
+ { 0, GENMASK(17, 15), 15 },
+ { 0, GENMASK(20, 18), 18 },
+ { 0, GENMASK(23, 21), 21 },
+ { 0, GENMASK(26, 24), 24 },
+ { 0, GENMASK(29, 27), 27 },
+ /* STM32H7_ADC_SMPR2, smpr[] index, mask, shift for SMP10 to SMP19 */
+ { 1, GENMASK(2, 0), 0 },
+ { 1, GENMASK(5, 3), 3 },
+ { 1, GENMASK(8, 6), 6 },
+ { 1, GENMASK(11, 9), 9 },
+ { 1, GENMASK(14, 12), 12 },
+ { 1, GENMASK(17, 15), 15 },
+ { 1, GENMASK(20, 18), 18 },
+ { 1, GENMASK(23, 21), 21 },
+ { 1, GENMASK(26, 24), 24 },
+ { 1, GENMASK(29, 27), 27 },
+};
+
+/* STM32H7 programmable sampling time (ADC clock cycles, rounded down) */
+static const unsigned int stm32h7_adc_smp_cycles[STM32_ADC_MAX_SMP + 1] = {
+ 1, 2, 8, 16, 32, 64, 387, 810,
+};
+
static const struct stm32_adc_regspec stm32h7_adc_regspec = {
.dr = STM32H7_ADC_DR,
.ier_eoc = { STM32H7_ADC_IER, STM32H7_EOCIE },
@@ -492,6 +581,8 @@ static const struct stm32_adc_regspec stm32h7_adc_regspec = {
.extsel = { STM32H7_ADC_CFGR, STM32H7_EXTSEL_MASK,
STM32H7_EXTSEL_SHIFT },
.res = { STM32H7_ADC_CFGR, STM32H7_RES_MASK, STM32H7_RES_SHIFT },
+ .smpr = { STM32H7_ADC_SMPR1, STM32H7_ADC_SMPR2 },
+ .smp_bits = stm32h7_smp_bits,
};
/**
@@ -933,6 +1024,7 @@ static void stm32h7_adc_unprepare(struct stm32_adc *adc)
* @scan_mask: channels to be converted
*
* Conversion sequence :
+ * Apply sampling time settings for all channels.
* Configure ADC scan sequence based on selected channels in scan_mask.
* Add channels to SQR registers, from scan_mask LSB to MSB, then
* program sequence len.
@@ -946,6 +1038,10 @@ static int stm32_adc_conf_scan_seq(struct iio_dev *indio_dev,
u32 val, bit;
int i = 0;
+ /* Apply sampling time settings */
+ stm32_adc_writel(adc, adc->cfg->regs->smpr[0], adc->smpr_val[0]);
+ stm32_adc_writel(adc, adc->cfg->regs->smpr[1], adc->smpr_val[1]);
+
for_each_set_bit(bit, scan_mask, indio_dev->masklength) {
chan = indio_dev->channels + bit;
/*
@@ -995,7 +1091,8 @@ static int stm32_adc_get_trig_extsel(struct iio_dev *indio_dev,
* Checking both stm32 timer trigger type and trig name
* should be safe against arbitrary trigger names.
*/
- if (is_stm32_timer_trigger(trig) &&
+ if ((is_stm32_timer_trigger(trig) ||
+ is_stm32_lptim_trigger(trig)) &&
!strcmp(adc->cfg->trigs[i].name, trig->name)) {
return adc->cfg->trigs[i].extsel;
}
@@ -1079,6 +1176,7 @@ static const struct iio_enum stm32_adc_trig_pol = {
* @res: conversion result
*
* The function performs a single conversion on a given channel:
+ * - Apply sampling time settings
* - Program sequencer with one channel (e.g. in SQ1 with len = 1)
* - Use SW trigger
* - Start conversion, then wait for interrupt completion.
@@ -1103,6 +1201,10 @@ static int stm32_adc_single_conv(struct iio_dev *indio_dev,
return ret;
}
+ /* Apply sampling time settings */
+ stm32_adc_writel(adc, regs->smpr[0], adc->smpr_val[0]);
+ stm32_adc_writel(adc, regs->smpr[1], adc->smpr_val[1]);
+
/* Program chan number in regular sequence (SQ1) */
val = stm32_adc_readl(adc, regs->sqr[1].reg);
val &= ~regs->sqr[1].mask;
@@ -1507,10 +1609,28 @@ static int stm32_adc_of_get_resolution(struct iio_dev *indio_dev)
return 0;
}
+static void stm32_adc_smpr_init(struct stm32_adc *adc, int channel, u32 smp_ns)
+{
+ const struct stm32_adc_regs *smpr = &adc->cfg->regs->smp_bits[channel];
+ u32 period_ns, shift = smpr->shift, mask = smpr->mask;
+ unsigned int smp, r = smpr->reg;
+
+ /* Determine sampling time (ADC clock cycles) */
+ period_ns = NSEC_PER_SEC / adc->common->rate;
+ for (smp = 0; smp <= STM32_ADC_MAX_SMP; smp++)
+ if ((period_ns * adc->cfg->smp_cycles[smp]) >= smp_ns)
+ break;
+ if (smp > STM32_ADC_MAX_SMP)
+ smp = STM32_ADC_MAX_SMP;
+
+ /* pre-build sampling time registers (e.g. smpr1, smpr2) */
+ adc->smpr_val[r] = (adc->smpr_val[r] & ~mask) | (smp << shift);
+}
+
static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
struct iio_chan_spec *chan,
const struct stm32_adc_chan_spec *channel,
- int scan_index)
+ int scan_index, u32 smp)
{
struct stm32_adc *adc = iio_priv(indio_dev);
@@ -1526,6 +1646,9 @@ static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
chan->scan_type.storagebits = 16;
chan->ext_info = stm32_adc_ext_info;
+ /* Prepare sampling time settings */
+ stm32_adc_smpr_init(adc, chan->channel, smp);
+
/* pre-build selected channels mask */
adc->pcsel |= BIT(chan->channel);
}
@@ -1538,8 +1661,8 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
struct property *prop;
const __be32 *cur;
struct iio_chan_spec *channels;
- int scan_index = 0, num_channels;
- u32 val;
+ int scan_index = 0, num_channels, ret;
+ u32 val, smp = 0;
num_channels = of_property_count_u32_elems(node, "st,adc-channels");
if (num_channels < 0 ||
@@ -1548,6 +1671,13 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
return num_channels < 0 ? num_channels : -EINVAL;
}
+ /* Optional sample time is provided either for each, or all channels */
+ ret = of_property_count_u32_elems(node, "st,min-sample-time-nsecs");
+ if (ret > 1 && ret != num_channels) {
+ dev_err(&indio_dev->dev, "Invalid st,min-sample-time-nsecs\n");
+ return -EINVAL;
+ }
+
channels = devm_kcalloc(&indio_dev->dev, num_channels,
sizeof(struct iio_chan_spec), GFP_KERNEL);
if (!channels)
@@ -1558,9 +1688,19 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
dev_err(&indio_dev->dev, "Invalid channel %d\n", val);
return -EINVAL;
}
+
+ /*
+ * Using of_property_read_u32_index(), smp value will only be
+ * modified if valid u32 value can be decoded. This allows to
+ * get either no value, 1 shared value for all indexes, or one
+ * value per channel.
+ */
+ of_property_read_u32_index(node, "st,min-sample-time-nsecs",
+ scan_index, &smp);
+
stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
&adc_info->channels[val],
- scan_index);
+ scan_index, smp);
scan_index++;
}
@@ -1634,7 +1774,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
indio_dev->dev.parent = &pdev->dev;
indio_dev->dev.of_node = pdev->dev.of_node;
indio_dev->info = &stm32_adc_iio_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->modes = INDIO_DIRECT_MODE | INDIO_HARDWARE_TRIGGERED;
platform_set_drvdata(pdev, adc);
@@ -1755,6 +1895,7 @@ static const struct stm32_adc_cfg stm32f4_adc_cfg = {
.clk_required = true,
.start_conv = stm32f4_adc_start_conv,
.stop_conv = stm32f4_adc_stop_conv,
+ .smp_cycles = stm32f4_adc_smp_cycles,
};
static const struct stm32_adc_cfg stm32h7_adc_cfg = {
@@ -1766,6 +1907,7 @@ static const struct stm32_adc_cfg stm32h7_adc_cfg = {
.stop_conv = stm32h7_adc_stop_conv,
.prepare = stm32h7_adc_prepare,
.unprepare = stm32h7_adc_unprepare,
+ .smp_cycles = stm32h7_adc_smp_cycles,
};
static const struct of_device_id stm32_adc_of_match[] = {
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 81d4c39e414a..137f577d9432 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -256,6 +256,7 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val,
err:
pm_runtime_put_autosuspend(indio_dev->dev.parent);
+ disable_irq(irq);
mutex_unlock(&info->mutex);
return ret;
@@ -365,7 +366,6 @@ static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id)
complete(&info->completion);
out:
- disable_irq_nosync(info->temp_data_irq);
return IRQ_HANDLED;
}
@@ -380,7 +380,6 @@ static irqreturn_t sun4i_gpadc_fifo_data_irq_handler(int irq, void *dev_id)
complete(&info->completion);
out:
- disable_irq_nosync(info->fifo_data_irq);
return IRQ_HANDLED;
}
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 884b8e461b17..d1210024f6bc 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/pm_runtime.h>
@@ -28,6 +29,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
#include <linux/iio/buffer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
@@ -36,17 +38,38 @@
#define ADS1015_CONV_REG 0x00
#define ADS1015_CFG_REG 0x01
+#define ADS1015_LO_THRESH_REG 0x02
+#define ADS1015_HI_THRESH_REG 0x03
+#define ADS1015_CFG_COMP_QUE_SHIFT 0
+#define ADS1015_CFG_COMP_LAT_SHIFT 2
+#define ADS1015_CFG_COMP_POL_SHIFT 3
+#define ADS1015_CFG_COMP_MODE_SHIFT 4
#define ADS1015_CFG_DR_SHIFT 5
#define ADS1015_CFG_MOD_SHIFT 8
#define ADS1015_CFG_PGA_SHIFT 9
#define ADS1015_CFG_MUX_SHIFT 12
+#define ADS1015_CFG_COMP_QUE_MASK GENMASK(1, 0)
+#define ADS1015_CFG_COMP_LAT_MASK BIT(2)
+#define ADS1015_CFG_COMP_POL_MASK BIT(2)
+#define ADS1015_CFG_COMP_MODE_MASK BIT(4)
#define ADS1015_CFG_DR_MASK GENMASK(7, 5)
#define ADS1015_CFG_MOD_MASK BIT(8)
#define ADS1015_CFG_PGA_MASK GENMASK(11, 9)
#define ADS1015_CFG_MUX_MASK GENMASK(14, 12)
+/* Comparator queue and disable field */
+#define ADS1015_CFG_COMP_DISABLE 3
+
+/* Comparator polarity field */
+#define ADS1015_CFG_COMP_POL_LOW 0
+#define ADS1015_CFG_COMP_POL_HIGH 1
+
+/* Comparator mode field */
+#define ADS1015_CFG_COMP_MODE_TRAD 0
+#define ADS1015_CFG_COMP_MODE_WINDOW 1
+
/* device operating modes */
#define ADS1015_CONTINUOUS 0
#define ADS1015_SINGLESHOT 1
@@ -81,18 +104,36 @@ static const unsigned int ads1115_data_rate[] = {
8, 16, 32, 64, 128, 250, 475, 860
};
-static const struct {
- int scale;
- int uscale;
-} ads1015_scale[] = {
- {3, 0},
- {2, 0},
- {1, 0},
- {0, 500000},
- {0, 250000},
- {0, 125000},
- {0, 125000},
- {0, 125000},
+/*
+ * Translation from PGA bits to full-scale positive and negative input voltage
+ * range in mV
+ */
+static int ads1015_fullscale_range[] = {
+ 6144, 4096, 2048, 1024, 512, 256, 256, 256
+};
+
+/*
+ * Translation from COMP_QUE field value to the number of successive readings
+ * exceed the threshold values before an interrupt is generated
+ */
+static const int ads1015_comp_queue[] = { 1, 2, 4 };
+
+static const struct iio_event_spec ads1015_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_PERIOD),
+ },
};
#define ADS1015_V_CHAN(_chan, _addr) { \
@@ -111,6 +152,8 @@ static const struct {
.shift = 4, \
.endianness = IIO_CPU, \
}, \
+ .event_spec = ads1015_events, \
+ .num_event_specs = ARRAY_SIZE(ads1015_events), \
.datasheet_name = "AIN"#_chan, \
}
@@ -132,6 +175,8 @@ static const struct {
.shift = 4, \
.endianness = IIO_CPU, \
}, \
+ .event_spec = ads1015_events, \
+ .num_event_specs = ARRAY_SIZE(ads1015_events), \
.datasheet_name = "AIN"#_chan"-AIN"#_chan2, \
}
@@ -150,6 +195,8 @@ static const struct {
.storagebits = 16, \
.endianness = IIO_CPU, \
}, \
+ .event_spec = ads1015_events, \
+ .num_event_specs = ARRAY_SIZE(ads1015_events), \
.datasheet_name = "AIN"#_chan, \
}
@@ -170,9 +217,17 @@ static const struct {
.storagebits = 16, \
.endianness = IIO_CPU, \
}, \
+ .event_spec = ads1015_events, \
+ .num_event_specs = ARRAY_SIZE(ads1015_events), \
.datasheet_name = "AIN"#_chan"-AIN"#_chan2, \
}
+struct ads1015_thresh_data {
+ unsigned int comp_queue;
+ int high_thresh;
+ int low_thresh;
+};
+
struct ads1015_data {
struct regmap *regmap;
/*
@@ -182,18 +237,54 @@ struct ads1015_data {
struct mutex lock;
struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
+ unsigned int event_channel;
+ unsigned int comp_mode;
+ struct ads1015_thresh_data thresh_data[ADS1015_CHANNELS];
+
unsigned int *data_rate;
+ /*
+ * Set to true when the ADC is switched to the continuous-conversion
+ * mode and exits from a power-down state. This flag is used to avoid
+ * getting the stale result from the conversion register.
+ */
+ bool conv_invalid;
};
+static bool ads1015_event_channel_enabled(struct ads1015_data *data)
+{
+ return (data->event_channel != ADS1015_CHANNELS);
+}
+
+static void ads1015_event_channel_enable(struct ads1015_data *data, int chan,
+ int comp_mode)
+{
+ WARN_ON(ads1015_event_channel_enabled(data));
+
+ data->event_channel = chan;
+ data->comp_mode = comp_mode;
+}
+
+static void ads1015_event_channel_disable(struct ads1015_data *data, int chan)
+{
+ data->event_channel = ADS1015_CHANNELS;
+}
+
static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg)
{
- return (reg == ADS1015_CFG_REG);
+ switch (reg) {
+ case ADS1015_CFG_REG:
+ case ADS1015_LO_THRESH_REG:
+ case ADS1015_HI_THRESH_REG:
+ return true;
+ default:
+ return false;
+ }
}
static const struct regmap_config ads1015_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
- .max_register = ADS1015_CFG_REG,
+ .max_register = ADS1015_HI_THRESH_REG,
.writeable_reg = ads1015_is_writeable_reg,
};
@@ -235,33 +326,51 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on)
ret = pm_runtime_put_autosuspend(dev);
}
- return ret;
+ return ret < 0 ? ret : 0;
}
static
int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
{
int ret, pga, dr, conv_time;
- bool change;
+ unsigned int old, mask, cfg;
if (chan < 0 || chan >= ADS1015_CHANNELS)
return -EINVAL;
+ ret = regmap_read(data->regmap, ADS1015_CFG_REG, &old);
+ if (ret)
+ return ret;
+
pga = data->channel_data[chan].pga;
dr = data->channel_data[chan].data_rate;
+ mask = ADS1015_CFG_MUX_MASK | ADS1015_CFG_PGA_MASK |
+ ADS1015_CFG_DR_MASK;
+ cfg = chan << ADS1015_CFG_MUX_SHIFT | pga << ADS1015_CFG_PGA_SHIFT |
+ dr << ADS1015_CFG_DR_SHIFT;
+
+ if (ads1015_event_channel_enabled(data)) {
+ mask |= ADS1015_CFG_COMP_QUE_MASK | ADS1015_CFG_COMP_MODE_MASK;
+ cfg |= data->thresh_data[chan].comp_queue <<
+ ADS1015_CFG_COMP_QUE_SHIFT |
+ data->comp_mode <<
+ ADS1015_CFG_COMP_MODE_SHIFT;
+ }
- ret = regmap_update_bits_check(data->regmap, ADS1015_CFG_REG,
- ADS1015_CFG_MUX_MASK |
- ADS1015_CFG_PGA_MASK,
- chan << ADS1015_CFG_MUX_SHIFT |
- pga << ADS1015_CFG_PGA_SHIFT,
- &change);
- if (ret < 0)
+ cfg = (old & ~mask) | (cfg & mask);
+
+ ret = regmap_write(data->regmap, ADS1015_CFG_REG, cfg);
+ if (ret)
return ret;
- if (change) {
- conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
+ if (old != cfg || data->conv_invalid) {
+ int dr_old = (old & ADS1015_CFG_DR_MASK) >>
+ ADS1015_CFG_DR_SHIFT;
+
+ conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]);
+ conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
usleep_range(conv_time, conv_time + 1);
+ data->conv_invalid = false;
}
return regmap_read(data->regmap, ADS1015_CONV_REG, val);
@@ -298,52 +407,36 @@ err:
return IRQ_HANDLED;
}
-static int ads1015_set_scale(struct ads1015_data *data, int chan,
+static int ads1015_set_scale(struct ads1015_data *data,
+ struct iio_chan_spec const *chan,
int scale, int uscale)
{
- int i, ret, rindex = -1;
-
- for (i = 0; i < ARRAY_SIZE(ads1015_scale); i++)
- if (ads1015_scale[i].scale == scale &&
- ads1015_scale[i].uscale == uscale) {
- rindex = i;
- break;
+ int i;
+ int fullscale = div_s64((scale * 1000000LL + uscale) <<
+ (chan->scan_type.realbits - 1), 1000000);
+
+ for (i = 0; i < ARRAY_SIZE(ads1015_fullscale_range); i++) {
+ if (ads1015_fullscale_range[i] == fullscale) {
+ data->channel_data[chan->address].pga = i;
+ return 0;
}
- if (rindex < 0)
- return -EINVAL;
-
- ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
- ADS1015_CFG_PGA_MASK,
- rindex << ADS1015_CFG_PGA_SHIFT);
- if (ret < 0)
- return ret;
-
- data->channel_data[chan].pga = rindex;
+ }
- return 0;
+ return -EINVAL;
}
static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
{
- int i, ret, rindex = -1;
+ int i;
- for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++)
+ for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++) {
if (data->data_rate[i] == rate) {
- rindex = i;
- break;
+ data->channel_data[chan].data_rate = i;
+ return 0;
}
- if (rindex < 0)
- return -EINVAL;
-
- ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
- ADS1015_CFG_DR_MASK,
- rindex << ADS1015_CFG_DR_SHIFT);
- if (ret < 0)
- return ret;
-
- data->channel_data[chan].data_rate = rindex;
+ }
- return 0;
+ return -EINVAL;
}
static int ads1015_read_raw(struct iio_dev *indio_dev,
@@ -353,41 +446,47 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
int ret, idx;
struct ads1015_data *data = iio_priv(indio_dev);
- mutex_lock(&indio_dev->mlock);
mutex_lock(&data->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW: {
int shift = chan->scan_type.shift;
- if (iio_buffer_enabled(indio_dev)) {
- ret = -EBUSY;
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
break;
+
+ if (ads1015_event_channel_enabled(data) &&
+ data->event_channel != chan->address) {
+ ret = -EBUSY;
+ goto release_direct;
}
ret = ads1015_set_power_state(data, true);
if (ret < 0)
- break;
+ goto release_direct;
ret = ads1015_get_adc_result(data, chan->address, val);
if (ret < 0) {
ads1015_set_power_state(data, false);
- break;
+ goto release_direct;
}
*val = sign_extend32(*val >> shift, 15 - shift);
ret = ads1015_set_power_state(data, false);
if (ret < 0)
- break;
+ goto release_direct;
ret = IIO_VAL_INT;
+release_direct:
+ iio_device_release_direct_mode(indio_dev);
break;
}
case IIO_CHAN_INFO_SCALE:
idx = data->channel_data[chan->address].pga;
- *val = ads1015_scale[idx].scale;
- *val2 = ads1015_scale[idx].uscale;
- ret = IIO_VAL_INT_PLUS_MICRO;
+ *val = ads1015_fullscale_range[idx];
+ *val2 = chan->scan_type.realbits - 1;
+ ret = IIO_VAL_FRACTIONAL_LOG2;
break;
case IIO_CHAN_INFO_SAMP_FREQ:
idx = data->channel_data[chan->address].data_rate;
@@ -399,7 +498,6 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
break;
}
mutex_unlock(&data->lock);
- mutex_unlock(&indio_dev->mlock);
return ret;
}
@@ -414,7 +512,7 @@ static int ads1015_write_raw(struct iio_dev *indio_dev,
mutex_lock(&data->lock);
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- ret = ads1015_set_scale(data, chan->address, val, val2);
+ ret = ads1015_set_scale(data, chan, val, val2);
break;
case IIO_CHAN_INFO_SAMP_FREQ:
ret = ads1015_set_data_rate(data, chan->address, val);
@@ -428,8 +526,254 @@ static int ads1015_write_raw(struct iio_dev *indio_dev,
return ret;
}
+static int ads1015_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int *val,
+ int *val2)
+{
+ struct ads1015_data *data = iio_priv(indio_dev);
+ int ret;
+ unsigned int comp_queue;
+ int period;
+ int dr;
+
+ mutex_lock(&data->lock);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ *val = (dir == IIO_EV_DIR_RISING) ?
+ data->thresh_data[chan->address].high_thresh :
+ data->thresh_data[chan->address].low_thresh;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_EV_INFO_PERIOD:
+ dr = data->channel_data[chan->address].data_rate;
+ comp_queue = data->thresh_data[chan->address].comp_queue;
+ period = ads1015_comp_queue[comp_queue] *
+ USEC_PER_SEC / data->data_rate[dr];
+
+ *val = period / USEC_PER_SEC;
+ *val2 = period % USEC_PER_SEC;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int ads1015_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, enum iio_event_info info, int val,
+ int val2)
+{
+ struct ads1015_data *data = iio_priv(indio_dev);
+ int realbits = chan->scan_type.realbits;
+ int ret = 0;
+ long long period;
+ int i;
+ int dr;
+
+ mutex_lock(&data->lock);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (val >= 1 << (realbits - 1) || val < -1 << (realbits - 1)) {
+ ret = -EINVAL;
+ break;
+ }
+ if (dir == IIO_EV_DIR_RISING)
+ data->thresh_data[chan->address].high_thresh = val;
+ else
+ data->thresh_data[chan->address].low_thresh = val;
+ break;
+ case IIO_EV_INFO_PERIOD:
+ dr = data->channel_data[chan->address].data_rate;
+ period = val * USEC_PER_SEC + val2;
+
+ for (i = 0; i < ARRAY_SIZE(ads1015_comp_queue) - 1; i++) {
+ if (period <= ads1015_comp_queue[i] *
+ USEC_PER_SEC / data->data_rate[dr])
+ break;
+ }
+ data->thresh_data[chan->address].comp_queue = i;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int ads1015_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ads1015_data *data = iio_priv(indio_dev);
+ int ret = 0;
+
+ mutex_lock(&data->lock);
+ if (data->event_channel == chan->address) {
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = 1;
+ break;
+ case IIO_EV_DIR_EITHER:
+ ret = (data->comp_mode == ADS1015_CFG_COMP_MODE_WINDOW);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int ads1015_enable_event_config(struct ads1015_data *data,
+ const struct iio_chan_spec *chan, int comp_mode)
+{
+ int low_thresh = data->thresh_data[chan->address].low_thresh;
+ int high_thresh = data->thresh_data[chan->address].high_thresh;
+ int ret;
+ unsigned int val;
+
+ if (ads1015_event_channel_enabled(data)) {
+ if (data->event_channel != chan->address ||
+ (data->comp_mode == ADS1015_CFG_COMP_MODE_TRAD &&
+ comp_mode == ADS1015_CFG_COMP_MODE_WINDOW))
+ return -EBUSY;
+
+ return 0;
+ }
+
+ if (comp_mode == ADS1015_CFG_COMP_MODE_TRAD) {
+ low_thresh = max(-1 << (chan->scan_type.realbits - 1),
+ high_thresh - 1);
+ }
+ ret = regmap_write(data->regmap, ADS1015_LO_THRESH_REG,
+ low_thresh << chan->scan_type.shift);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap, ADS1015_HI_THRESH_REG,
+ high_thresh << chan->scan_type.shift);
+ if (ret)
+ return ret;
+
+ ret = ads1015_set_power_state(data, true);
+ if (ret < 0)
+ return ret;
+
+ ads1015_event_channel_enable(data, chan->address, comp_mode);
+
+ ret = ads1015_get_adc_result(data, chan->address, &val);
+ if (ret) {
+ ads1015_event_channel_disable(data, chan->address);
+ ads1015_set_power_state(data, false);
+ }
+
+ return ret;
+}
+
+static int ads1015_disable_event_config(struct ads1015_data *data,
+ const struct iio_chan_spec *chan, int comp_mode)
+{
+ int ret;
+
+ if (!ads1015_event_channel_enabled(data))
+ return 0;
+
+ if (data->event_channel != chan->address)
+ return 0;
+
+ if (data->comp_mode == ADS1015_CFG_COMP_MODE_TRAD &&
+ comp_mode == ADS1015_CFG_COMP_MODE_WINDOW)
+ return 0;
+
+ ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+ ADS1015_CFG_COMP_QUE_MASK,
+ ADS1015_CFG_COMP_DISABLE <<
+ ADS1015_CFG_COMP_QUE_SHIFT);
+ if (ret)
+ return ret;
+
+ ads1015_event_channel_disable(data, chan->address);
+
+ return ads1015_set_power_state(data, false);
+}
+
+static int ads1015_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct ads1015_data *data = iio_priv(indio_dev);
+ int ret;
+ int comp_mode = (dir == IIO_EV_DIR_EITHER) ?
+ ADS1015_CFG_COMP_MODE_WINDOW : ADS1015_CFG_COMP_MODE_TRAD;
+
+ mutex_lock(&data->lock);
+
+ /* Prevent from enabling both buffer and event at a time */
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
+ if (state)
+ ret = ads1015_enable_event_config(data, chan, comp_mode);
+ else
+ ret = ads1015_disable_event_config(data, chan, comp_mode);
+
+ iio_device_release_direct_mode(indio_dev);
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static irqreturn_t ads1015_event_handler(int irq, void *priv)
+{
+ struct iio_dev *indio_dev = priv;
+ struct ads1015_data *data = iio_priv(indio_dev);
+ int val;
+ int ret;
+
+ /* Clear the latched ALERT/RDY pin */
+ ret = regmap_read(data->regmap, ADS1015_CONV_REG, &val);
+ if (ret)
+ return IRQ_HANDLED;
+
+ if (ads1015_event_channel_enabled(data)) {
+ enum iio_event_direction dir;
+ u64 code;
+
+ dir = data->comp_mode == ADS1015_CFG_COMP_MODE_TRAD ?
+ IIO_EV_DIR_RISING : IIO_EV_DIR_EITHER;
+ code = IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, data->event_channel,
+ IIO_EV_TYPE_THRESH, dir);
+ iio_push_event(indio_dev, code, iio_get_time_ns(indio_dev));
+ }
+
+ return IRQ_HANDLED;
+}
+
static int ads1015_buffer_preenable(struct iio_dev *indio_dev)
{
+ struct ads1015_data *data = iio_priv(indio_dev);
+
+ /* Prevent from enabling both buffer and event at a time */
+ if (ads1015_event_channel_enabled(data))
+ return -EBUSY;
+
return ads1015_set_power_state(iio_priv(indio_dev), true);
}
@@ -446,7 +790,10 @@ static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = {
.validate_scan_mask = &iio_validate_scan_mask_onehot,
};
-static IIO_CONST_ATTR(scale_available, "3 2 1 0.5 0.25 0.125");
+static IIO_CONST_ATTR_NAMED(ads1015_scale_available, scale_available,
+ "3 2 1 0.5 0.25 0.125");
+static IIO_CONST_ATTR_NAMED(ads1115_scale_available, scale_available,
+ "0.1875 0.125 0.0625 0.03125 0.015625 0.007813");
static IIO_CONST_ATTR_NAMED(ads1015_sampling_frequency_available,
sampling_frequency_available, "128 250 490 920 1600 2400 3300");
@@ -454,7 +801,7 @@ static IIO_CONST_ATTR_NAMED(ads1115_sampling_frequency_available,
sampling_frequency_available, "8 16 32 64 128 250 475 860");
static struct attribute *ads1015_attributes[] = {
- &iio_const_attr_scale_available.dev_attr.attr,
+ &iio_const_attr_ads1015_scale_available.dev_attr.attr,
&iio_const_attr_ads1015_sampling_frequency_available.dev_attr.attr,
NULL,
};
@@ -464,7 +811,7 @@ static const struct attribute_group ads1015_attribute_group = {
};
static struct attribute *ads1115_attributes[] = {
- &iio_const_attr_scale_available.dev_attr.attr,
+ &iio_const_attr_ads1115_scale_available.dev_attr.attr,
&iio_const_attr_ads1115_sampling_frequency_available.dev_attr.attr,
NULL,
};
@@ -477,6 +824,10 @@ static const struct iio_info ads1015_info = {
.driver_module = THIS_MODULE,
.read_raw = ads1015_read_raw,
.write_raw = ads1015_write_raw,
+ .read_event_value = ads1015_read_event,
+ .write_event_value = ads1015_write_event,
+ .read_event_config = ads1015_read_event_config,
+ .write_event_config = ads1015_write_event_config,
.attrs = &ads1015_attribute_group,
};
@@ -484,6 +835,10 @@ static const struct iio_info ads1115_info = {
.driver_module = THIS_MODULE,
.read_raw = ads1015_read_raw,
.write_raw = ads1015_write_raw,
+ .read_event_value = ads1015_read_event,
+ .write_event_value = ads1015_write_event,
+ .read_event_config = ads1015_read_event_config,
+ .write_event_config = ads1015_write_event_config,
.attrs = &ads1115_attribute_group,
};
@@ -505,24 +860,24 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE;
if (of_property_read_u32(node, "reg", &pval)) {
- dev_err(&client->dev, "invalid reg on %s\n",
- node->full_name);
+ dev_err(&client->dev, "invalid reg on %pOF\n",
+ node);
continue;
}
channel = pval;
if (channel >= ADS1015_CHANNELS) {
dev_err(&client->dev,
- "invalid channel index %d on %s\n",
- channel, node->full_name);
+ "invalid channel index %d on %pOF\n",
+ channel, node);
continue;
}
if (!of_property_read_u32(node, "ti,gain", &pval)) {
pga = pval;
if (pga > 6) {
- dev_err(&client->dev, "invalid gain on %s\n",
- node->full_name);
+ dev_err(&client->dev, "invalid gain on %pOF\n",
+ node);
of_node_put(node);
return -EINVAL;
}
@@ -532,8 +887,8 @@ static int ads1015_get_channels_config_of(struct i2c_client *client)
data_rate = pval;
if (data_rate > 7) {
dev_err(&client->dev,
- "invalid data_rate on %s\n",
- node->full_name);
+ "invalid data_rate on %pOF\n",
+ node);
of_node_put(node);
return -EINVAL;
}
@@ -573,6 +928,13 @@ static void ads1015_get_channels_config(struct i2c_client *client)
}
}
+static int ads1015_set_conv_mode(struct ads1015_data *data, int mode)
+{
+ return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+ ADS1015_CFG_MOD_MASK,
+ mode << ADS1015_CFG_MOD_SHIFT);
+}
+
static int ads1015_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -580,6 +942,7 @@ static int ads1015_probe(struct i2c_client *client,
struct ads1015_data *data;
int ret;
enum chip_ids chip;
+ int i;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -614,6 +977,18 @@ static int ads1015_probe(struct i2c_client *client,
break;
}
+ data->event_channel = ADS1015_CHANNELS;
+ /*
+ * Set default lower and upper threshold to min and max value
+ * respectively.
+ */
+ for (i = 0; i < ADS1015_CHANNELS; i++) {
+ int realbits = indio_dev->channels[i].scan_type.realbits;
+
+ data->thresh_data[i].low_thresh = -1 << (realbits - 1);
+ data->thresh_data[i].high_thresh = (1 << (realbits - 1)) - 1;
+ }
+
/* we need to keep this ABI the same as used by hwmon ADS1015 driver */
ads1015_get_channels_config(client);
@@ -623,16 +998,56 @@ static int ads1015_probe(struct i2c_client *client,
return PTR_ERR(data->regmap);
}
- ret = iio_triggered_buffer_setup(indio_dev, NULL,
- ads1015_trigger_handler,
- &ads1015_buffer_setup_ops);
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev, NULL,
+ ads1015_trigger_handler,
+ &ads1015_buffer_setup_ops);
if (ret < 0) {
dev_err(&client->dev, "iio triggered buffer setup failed\n");
return ret;
}
+
+ if (client->irq) {
+ unsigned long irq_trig =
+ irqd_get_trigger_type(irq_get_irq_data(client->irq));
+ unsigned int cfg_comp_mask = ADS1015_CFG_COMP_QUE_MASK |
+ ADS1015_CFG_COMP_LAT_MASK | ADS1015_CFG_COMP_POL_MASK;
+ unsigned int cfg_comp =
+ ADS1015_CFG_COMP_DISABLE << ADS1015_CFG_COMP_QUE_SHIFT |
+ 1 << ADS1015_CFG_COMP_LAT_SHIFT;
+
+ switch (irq_trig) {
+ case IRQF_TRIGGER_LOW:
+ cfg_comp |= ADS1015_CFG_COMP_POL_LOW;
+ break;
+ case IRQF_TRIGGER_HIGH:
+ cfg_comp |= ADS1015_CFG_COMP_POL_HIGH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+ cfg_comp_mask, cfg_comp);
+ if (ret)
+ return ret;
+
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, ads1015_event_handler,
+ irq_trig | IRQF_ONESHOT,
+ client->name, indio_dev);
+ if (ret)
+ return ret;
+ }
+
+ ret = ads1015_set_conv_mode(data, ADS1015_CONTINUOUS);
+ if (ret)
+ return ret;
+
+ data->conv_invalid = true;
+
ret = pm_runtime_set_active(&client->dev);
if (ret)
- goto err_buffer_cleanup;
+ return ret;
pm_runtime_set_autosuspend_delay(&client->dev, ADS1015_SLEEP_DELAY_MS);
pm_runtime_use_autosuspend(&client->dev);
pm_runtime_enable(&client->dev);
@@ -640,15 +1055,10 @@ static int ads1015_probe(struct i2c_client *client,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "Failed to register IIO device\n");
- goto err_buffer_cleanup;
+ return ret;
}
return 0;
-
-err_buffer_cleanup:
- iio_triggered_buffer_cleanup(indio_dev);
-
- return ret;
}
static int ads1015_remove(struct i2c_client *client)
@@ -662,12 +1072,8 @@ static int ads1015_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
pm_runtime_put_noidle(&client->dev);
- iio_triggered_buffer_cleanup(indio_dev);
-
/* power down single shot mode */
- return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
- ADS1015_CFG_MOD_MASK,
- ADS1015_SINGLESHOT << ADS1015_CFG_MOD_SHIFT);
+ return ads1015_set_conv_mode(data, ADS1015_SINGLESHOT);
}
#ifdef CONFIG_PM
@@ -676,19 +1082,20 @@ static int ads1015_runtime_suspend(struct device *dev)
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct ads1015_data *data = iio_priv(indio_dev);
- return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
- ADS1015_CFG_MOD_MASK,
- ADS1015_SINGLESHOT << ADS1015_CFG_MOD_SHIFT);
+ return ads1015_set_conv_mode(data, ADS1015_SINGLESHOT);
}
static int ads1015_runtime_resume(struct device *dev)
{
struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct ads1015_data *data = iio_priv(indio_dev);
+ int ret;
- return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
- ADS1015_CFG_MOD_MASK,
- ADS1015_CONTINUOUS << ADS1015_CFG_MOD_SHIFT);
+ ret = ads1015_set_conv_mode(data, ADS1015_CONTINUOUS);
+ if (!ret)
+ data->conv_invalid = true;
+
+ return ret;
}
#endif
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 16a06633332c..a376190914ad 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -21,6 +21,7 @@
* GNU General Public License for more details.
*/
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -37,6 +38,12 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+/*
+ * In case of ACPI, we use the 5000 mV as default for the reference pin.
+ * Device tree users encode that via the vref-supply regulator.
+ */
+#define TI_ADS7950_VA_MV_ACPI_DEFAULT 5000
+
#define TI_ADS7950_CR_MANUAL BIT(12)
#define TI_ADS7950_CR_WRITE BIT(11)
#define TI_ADS7950_CR_CHAN(ch) ((ch) << 7)
@@ -58,6 +65,7 @@ struct ti_ads7950_state {
struct spi_message scan_single_msg;
struct regulator *reg;
+ unsigned int vref_mv;
unsigned int settings;
@@ -305,11 +313,15 @@ static int ti_ads7950_get_range(struct ti_ads7950_state *st)
{
int vref;
- vref = regulator_get_voltage(st->reg);
- if (vref < 0)
- return vref;
+ if (st->vref_mv) {
+ vref = st->vref_mv;
+ } else {
+ vref = regulator_get_voltage(st->reg);
+ if (vref < 0)
+ return vref;
- vref /= 1000;
+ vref /= 1000;
+ }
if (st->settings & TI_ADS7950_CR_RANGE_5V)
vref *= 2;
@@ -411,6 +423,10 @@ static int ti_ads7950_probe(struct spi_device *spi)
spi_message_init_with_transfers(&st->scan_single_msg,
st->scan_single_xfer, 3);
+ /* Use hard coded value for reference voltage in ACPI case */
+ if (ACPI_COMPANION(&spi->dev))
+ st->vref_mv = TI_ADS7950_VA_MV_ACPI_DEFAULT;
+
st->reg = devm_regulator_get(&spi->dev, "vref");
if (IS_ERR(st->reg)) {
dev_err(&spi->dev, "Failed get get regulator \"vref\"\n");
@@ -475,9 +491,27 @@ static const struct spi_device_id ti_ads7950_id[] = {
};
MODULE_DEVICE_TABLE(spi, ti_ads7950_id);
+static const struct of_device_id ads7950_of_table[] = {
+ { .compatible = "ti,ads7950", .data = &ti_ads7950_chip_info[TI_ADS7950] },
+ { .compatible = "ti,ads7951", .data = &ti_ads7950_chip_info[TI_ADS7951] },
+ { .compatible = "ti,ads7952", .data = &ti_ads7950_chip_info[TI_ADS7952] },
+ { .compatible = "ti,ads7953", .data = &ti_ads7950_chip_info[TI_ADS7953] },
+ { .compatible = "ti,ads7954", .data = &ti_ads7950_chip_info[TI_ADS7954] },
+ { .compatible = "ti,ads7955", .data = &ti_ads7950_chip_info[TI_ADS7955] },
+ { .compatible = "ti,ads7956", .data = &ti_ads7950_chip_info[TI_ADS7956] },
+ { .compatible = "ti,ads7957", .data = &ti_ads7950_chip_info[TI_ADS7957] },
+ { .compatible = "ti,ads7958", .data = &ti_ads7950_chip_info[TI_ADS7958] },
+ { .compatible = "ti,ads7959", .data = &ti_ads7950_chip_info[TI_ADS7959] },
+ { .compatible = "ti,ads7960", .data = &ti_ads7950_chip_info[TI_ADS7960] },
+ { .compatible = "ti,ads7961", .data = &ti_ads7950_chip_info[TI_ADS7961] },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ads7950_of_table);
+
static struct spi_driver ti_ads7950_driver = {
.driver = {
.name = "ads7950",
+ .of_match_table = ads7950_of_table,
},
.probe = ti_ads7950_probe,
.remove = ti_ads7950_remove,
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index bd3d37fc2144..1edd99f0c5e5 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -35,7 +35,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/i2c/twl.h>
+#include <linux/mfd/twl.h>
#include <linux/module.h>
#include <linux/stddef.h>
#include <linux/mutex.h>
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index becbb0aef232..bc0e60b9da45 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -33,7 +33,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
-#include <linux/i2c/twl.h>
+#include <linux/mfd/twl.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 01fc76f7d660..c168e0db329a 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -77,7 +77,7 @@
#define VF610_ADC_ADSTS_MASK 0x300
#define VF610_ADC_ADLPC_EN 0x80
#define VF610_ADC_ADHSC_EN 0x400
-#define VF610_ADC_REFSEL_VALT 0x100
+#define VF610_ADC_REFSEL_VALT 0x800
#define VF610_ADC_REFSEL_VBG 0x1000
#define VF610_ADC_ADTRG_HARD 0x2000
#define VF610_ADC_AVGS_8 0x4000
diff --git a/drivers/iio/adc/xilinx-xadc-events.c b/drivers/iio/adc/xilinx-xadc-events.c
index 6d5c2a6f4e6e..dc0670308253 100644
--- a/drivers/iio/adc/xilinx-xadc-events.c
+++ b/drivers/iio/adc/xilinx-xadc-events.c
@@ -68,7 +68,7 @@ void xadc_handle_events(struct iio_dev *indio_dev, unsigned long events)
xadc_handle_event(indio_dev, i);
}
-static unsigned xadc_get_threshold_offset(const struct iio_chan_spec *chan,
+static unsigned int xadc_get_threshold_offset(const struct iio_chan_spec *chan,
enum iio_event_direction dir)
{
unsigned int offset;
@@ -90,26 +90,24 @@ static unsigned xadc_get_threshold_offset(const struct iio_chan_spec *chan,
static unsigned int xadc_get_alarm_mask(const struct iio_chan_spec *chan)
{
- if (chan->type == IIO_TEMP) {
+ if (chan->type == IIO_TEMP)
return XADC_ALARM_OT_MASK;
- } else {
- switch (chan->channel) {
- case 0:
- return XADC_ALARM_VCCINT_MASK;
- case 1:
- return XADC_ALARM_VCCAUX_MASK;
- case 2:
- return XADC_ALARM_VCCBRAM_MASK;
- case 3:
- return XADC_ALARM_VCCPINT_MASK;
- case 4:
- return XADC_ALARM_VCCPAUX_MASK;
- case 5:
- return XADC_ALARM_VCCODDR_MASK;
- default:
- /* We will never get here */
- return 0;
- }
+ switch (chan->channel) {
+ case 0:
+ return XADC_ALARM_VCCINT_MASK;
+ case 1:
+ return XADC_ALARM_VCCAUX_MASK;
+ case 2:
+ return XADC_ALARM_VCCBRAM_MASK;
+ case 3:
+ return XADC_ALARM_VCCPINT_MASK;
+ case 4:
+ return XADC_ALARM_VCCPAUX_MASK;
+ case 5:
+ return XADC_ALARM_VCCODDR_MASK;
+ default:
+ /* We will never get here */
+ return 0;
}
}
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
index f6f081965647..62edbdae1244 100644
--- a/drivers/iio/adc/xilinx-xadc.h
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -71,13 +71,13 @@ struct xadc {
};
struct xadc_ops {
- int (*read)(struct xadc *, unsigned int, uint16_t *);
- int (*write)(struct xadc *, unsigned int, uint16_t);
+ int (*read)(struct xadc *xadc, unsigned int reg, uint16_t *val);
+ int (*write)(struct xadc *xadc, unsigned int reg, uint16_t val);
int (*setup)(struct platform_device *pdev, struct iio_dev *indio_dev,
int irq);
- void (*update_alarm)(struct xadc *, unsigned int);
- unsigned long (*get_dclk_rate)(struct xadc *);
- irqreturn_t (*interrupt_handler)(int, void *);
+ void (*update_alarm)(struct xadc *xadc, unsigned int alarm);
+ unsigned long (*get_dclk_rate)(struct xadc *xadc);
+ irqreturn_t (*interrupt_handler)(int irq, void *devid);
unsigned int flags;
};
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index cea7f9857a1f..5cb5be7612b4 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -21,6 +21,15 @@ config ATLAS_PH_SENSOR
To compile this driver as module, choose M here: the
module will be called atlas-ph-sensor.
+config CCS811
+ tristate "AMS CCS811 VOC sensor"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y here to build I2C interface support for the AMS
+ CCS811 VOC (Volatile Organic Compounds) sensor
+
config IAQCORE
tristate "AMS iAQ-Core VOC sensors"
depends on I2C
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index b02202b41289..a629b29d1e0b 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -4,5 +4,6 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-ph-sensor.o
+obj-$(CONFIG_CCS811) += ccs811.o
obj-$(CONFIG_IAQCORE) += ams-iaq-core.o
obj-$(CONFIG_VZ89X) += vz89x.o
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
new file mode 100644
index 000000000000..840a6cbd5f0f
--- /dev/null
+++ b/drivers/iio/chemical/ccs811.c
@@ -0,0 +1,405 @@
+/*
+ * ccs811.c - Support for AMS CCS811 VOC Sensor
+ *
+ * Copyright (C) 2017 Narcisa Vasile <narcisaanamaria12@gmail.com>
+ *
+ * Datasheet: ams.com/content/download/951091/2269479/CCS811_DS000459_3-00.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * IIO driver for AMS CCS811 (I2C address 0x5A/0x5B set by ADDR Low/High)
+ *
+ * TODO:
+ * 1. Make the drive mode selectable form userspace
+ * 2. Add support for interrupts
+ * 3. Adjust time to wait for data to be ready based on selected operation mode
+ * 4. Read error register and put the information in logs
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/module.h>
+
+#define CCS811_STATUS 0x00
+#define CCS811_MEAS_MODE 0x01
+#define CCS811_ALG_RESULT_DATA 0x02
+#define CCS811_RAW_DATA 0x03
+#define CCS811_HW_ID 0x20
+#define CCS881_HW_ID_VALUE 0x81
+#define CCS811_HW_VERSION 0x21
+#define CCS811_HW_VERSION_VALUE 0x10
+#define CCS811_HW_VERSION_MASK 0xF0
+#define CCS811_ERR 0xE0
+/* Used to transition from boot to application mode */
+#define CCS811_APP_START 0xF4
+
+/* Status register flags */
+#define CCS811_STATUS_ERROR BIT(0)
+#define CCS811_STATUS_DATA_READY BIT(3)
+#define CCS811_STATUS_APP_VALID_MASK BIT(4)
+#define CCS811_STATUS_APP_VALID_LOADED BIT(4)
+/*
+ * Value of FW_MODE bit of STATUS register describes the sensor's state:
+ * 0: Firmware is in boot mode, this allows new firmware to be loaded
+ * 1: Firmware is in application mode. CCS811 is ready to take ADC measurements
+ */
+#define CCS811_STATUS_FW_MODE_MASK BIT(7)
+#define CCS811_STATUS_FW_MODE_APPLICATION BIT(7)
+
+/* Measurement modes */
+#define CCS811_MODE_IDLE 0x00
+#define CCS811_MODE_IAQ_1SEC 0x10
+#define CCS811_MODE_IAQ_10SEC 0x20
+#define CCS811_MODE_IAQ_60SEC 0x30
+#define CCS811_MODE_RAW_DATA 0x40
+
+#define CCS811_VOLTAGE_MASK 0x3FF
+
+struct ccs811_reading {
+ __be16 co2;
+ __be16 voc;
+ u8 status;
+ u8 error;
+ __be16 resistance;
+} __attribute__((__packed__));
+
+struct ccs811_data {
+ struct i2c_client *client;
+ struct mutex lock; /* Protect readings */
+ struct ccs811_reading buffer;
+};
+
+static const struct iio_chan_spec ccs811_channels[] = {
+ {
+ .type = IIO_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = -1,
+ }, {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = -1,
+ }, {
+ .type = IIO_CONCENTRATION,
+ .channel2 = IIO_MOD_CO2,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ }, {
+ .type = IIO_CONCENTRATION,
+ .channel2 = IIO_MOD_VOC,
+ .modified = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+/*
+ * The CCS811 powers-up in boot mode. A setup write to CCS811_APP_START will
+ * transition the sensor to application mode.
+ */
+static int ccs811_start_sensor_application(struct i2c_client *client)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, CCS811_STATUS);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & CCS811_STATUS_APP_VALID_MASK) !=
+ CCS811_STATUS_APP_VALID_LOADED)
+ return -EIO;
+
+ ret = i2c_smbus_write_byte(client, CCS811_APP_START);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(client, CCS811_STATUS);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & CCS811_STATUS_FW_MODE_MASK) !=
+ CCS811_STATUS_FW_MODE_APPLICATION) {
+ dev_err(&client->dev, "Application failed to start. Sensor is still in boot mode.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ccs811_setup(struct i2c_client *client)
+{
+ int ret;
+
+ ret = ccs811_start_sensor_application(client);
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(client, CCS811_MEAS_MODE,
+ CCS811_MODE_IAQ_1SEC);
+}
+
+static int ccs811_get_measurement(struct ccs811_data *data)
+{
+ int ret, tries = 11;
+
+ /* Maximum waiting time: 1s, as measurements are made every second */
+ while (tries-- > 0) {
+ ret = i2c_smbus_read_byte_data(data->client, CCS811_STATUS);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & CCS811_STATUS_DATA_READY) || tries == 0)
+ break;
+ msleep(100);
+ }
+ if (!(ret & CCS811_STATUS_DATA_READY))
+ return -EIO;
+
+ return i2c_smbus_read_i2c_block_data(data->client,
+ CCS811_ALG_RESULT_DATA, 8,
+ (char *)&data->buffer);
+}
+
+static int ccs811_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ccs811_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->lock);
+ ret = ccs811_get_measurement(data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = be16_to_cpu(data->buffer.resistance) &
+ CCS811_VOLTAGE_MASK;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CURRENT:
+ *val = be16_to_cpu(data->buffer.resistance) >> 10;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CONCENTRATION:
+ switch (chan->channel2) {
+ case IIO_MOD_CO2:
+ *val = be16_to_cpu(data->buffer.co2);
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_MOD_VOC:
+ *val = be16_to_cpu(data->buffer.voc);
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&data->lock);
+
+ return ret;
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = 1;
+ *val2 = 612903;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CURRENT:
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CONCENTRATION:
+ switch (chan->channel2) {
+ case IIO_MOD_CO2:
+ *val = 0;
+ *val2 = 12834;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_MOD_VOC:
+ *val = 0;
+ *val2 = 84246;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ if (!(chan->type == IIO_CONCENTRATION &&
+ chan->channel2 == IIO_MOD_CO2))
+ return -EINVAL;
+ *val = -400;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ccs811_info = {
+ .read_raw = ccs811_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static irqreturn_t ccs811_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ccs811_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+ s16 buf[8]; /* s16 eCO2 + s16 TVOC + padding + 8 byte timestamp */
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, 4,
+ (u8 *)&buf);
+ if (ret != 4) {
+ dev_err(&client->dev, "cannot read sensor data\n");
+ goto err;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_get_time_ns(indio_dev));
+
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ccs811_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct ccs811_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE
+ | I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_READ_I2C_BLOCK))
+ return -EOPNOTSUPP;
+
+ /* Check hardware id (should be 0x81 for this family of devices) */
+ ret = i2c_smbus_read_byte_data(client, CCS811_HW_ID);
+ if (ret < 0)
+ return ret;
+
+ if (ret != CCS881_HW_ID_VALUE) {
+ dev_err(&client->dev, "hardware id doesn't match CCS81x\n");
+ return -ENODEV;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, CCS811_HW_VERSION);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & CCS811_HW_VERSION_MASK) != CCS811_HW_VERSION_VALUE) {
+ dev_err(&client->dev, "no CCS811 sensor\n");
+ return -ENODEV;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ret = ccs811_setup(client);
+ if (ret < 0)
+ return ret;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
+ indio_dev->info = &ccs811_info;
+
+ indio_dev->channels = ccs811_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ccs811_channels);
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ccs811_trigger_handler, NULL);
+
+ if (ret < 0) {
+ dev_err(&client->dev, "triggered buffer setup failed\n");
+ goto err_poweroff;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to register iio device\n");
+ goto err_buffer_cleanup;
+ }
+ return 0;
+
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_poweroff:
+ i2c_smbus_write_byte_data(client, CCS811_MEAS_MODE, CCS811_MODE_IDLE);
+
+ return ret;
+}
+
+static int ccs811_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return i2c_smbus_write_byte_data(client, CCS811_MEAS_MODE,
+ CCS811_MODE_IDLE);
+}
+
+static const struct i2c_device_id ccs811_id[] = {
+ {"ccs811", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ccs811_id);
+
+static struct i2c_driver ccs811_driver = {
+ .driver = {
+ .name = "ccs811",
+ },
+ .probe = ccs811_probe,
+ .remove = ccs811_remove,
+ .id_table = ccs811_id,
+};
+module_i2c_driver(ccs811_driver);
+
+MODULE_AUTHOR("Narcisa Vasile <narcisaanamaria12@gmail.com>");
+MODULE_DESCRIPTION("CCS811 volatile organic compounds sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index f5d4d786e193..ed3849d6fc6a 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -473,6 +473,9 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
HID_USAGE_SENSOR_PROY_POWER_STATE,
&st->power_state);
+ st->power_state.logical_minimum = 1;
+ st->report_state.logical_minimum = 1;
+
sensor_hub_input_get_attribute_info(hsdev,
HID_FEATURE_REPORT, usage_id,
HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS,
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 16ade0a0327b..0e4b379ada45 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -111,8 +111,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
s32 poll_value = 0;
if (state) {
- if (!atomic_read(&st->user_requested_state))
- return 0;
if (sensor_hub_device_open(st->hsdev))
return -EIO;
@@ -161,6 +159,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
&report_val);
}
+ pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
+ st->pdev->name, state_val, report_val);
+
sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
st->power_state.index,
sizeof(state_val), &state_val);
@@ -182,6 +183,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
ret = pm_runtime_get_sync(&st->pdev->dev);
else {
pm_runtime_mark_last_busy(&st->pdev->dev);
+ pm_runtime_use_autosuspend(&st->pdev->dev);
ret = pm_runtime_put_autosuspend(&st->pdev->dev);
}
if (ret < 0) {
@@ -285,8 +287,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
/* Default to 3 seconds, but can be changed from sysfs */
pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
3000);
- pm_runtime_use_autosuspend(&attrb->pdev->dev);
-
return ret;
error_unreg_trigger:
iio_trigger_unregister(trig);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 79c8c7cd70d5..d99bb1460fe2 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -15,6 +15,7 @@
#include <linux/iio/iio.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <asm/unaligned.h>
#include <linux/iio/common/st_sensors.h>
@@ -345,6 +346,36 @@ static struct st_sensors_platform_data *st_sensors_of_probe(struct device *dev,
return pdata;
}
+
+/**
+ * st_sensors_of_name_probe() - device tree probe for ST sensor name
+ * @dev: driver model representation of the device.
+ * @match: the OF match table for the device, containing compatible strings
+ * but also a .data field with the corresponding internal kernel name
+ * used by this sensor.
+ * @name: device name buffer reference.
+ * @len: device name buffer length.
+ *
+ * In effect this function matches a compatible string to an internal kernel
+ * name for a certain sensor device, so that the rest of the autodetection can
+ * rely on that name from this point on. I2C/SPI devices will be renamed
+ * to match the internal kernel convention.
+ */
+void st_sensors_of_name_probe(struct device *dev,
+ const struct of_device_id *match,
+ char *name, int len)
+{
+ const struct of_device_id *of_id;
+
+ of_id = of_match_device(match, dev);
+ if (!of_id || !of_id->data)
+ return;
+
+ /* The name from the OF match takes precedence if present */
+ strncpy(name, of_id->data, len);
+ name[len - 1] = '\0';
+}
+EXPORT_SYMBOL(st_sensors_of_name_probe);
#else
static struct st_sensors_platform_data *st_sensors_of_probe(struct device *dev,
struct st_sensors_platform_data *defdata)
@@ -550,6 +581,31 @@ out:
}
EXPORT_SYMBOL(st_sensors_read_info_raw);
+static int st_sensors_init_interface_mode(struct iio_dev *indio_dev,
+ const struct st_sensor_settings *sensor_settings)
+{
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+ struct device_node *np = sdata->dev->of_node;
+ struct st_sensors_platform_data *pdata;
+
+ pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data;
+ if (((np && of_property_read_bool(np, "spi-3wire")) ||
+ (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) {
+ int err;
+
+ err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
+ sensor_settings->sim.addr,
+ sensor_settings->sim.value);
+ if (err < 0) {
+ dev_err(&indio_dev->dev,
+ "failed to init interface mode\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
int st_sensors_check_device_support(struct iio_dev *indio_dev,
int num_sensors_list,
const struct st_sensor_settings *sensor_settings)
@@ -574,6 +630,10 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
return -ENODEV;
}
+ err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]);
+ if (err < 0)
+ return err;
+
if (sensor_settings[i].wai_addr) {
err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
sensor_settings[i].wai_addr, &wai);
diff --git a/drivers/iio/common/st_sensors/st_sensors_i2c.c b/drivers/iio/common/st_sensors/st_sensors_i2c.c
index c83df4dbfcd7..b81e48e9f27e 100644
--- a/drivers/iio/common/st_sensors/st_sensors_i2c.c
+++ b/drivers/iio/common/st_sensors/st_sensors_i2c.c
@@ -79,35 +79,6 @@ void st_sensors_i2c_configure(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL(st_sensors_i2c_configure);
-#ifdef CONFIG_OF
-/**
- * st_sensors_of_i2c_probe() - device tree probe for ST I2C sensors
- * @client: the I2C client device for the sensor
- * @match: the OF match table for the device, containing compatible strings
- * but also a .data field with the corresponding internal kernel name
- * used by this sensor.
- *
- * In effect this function matches a compatible string to an internal kernel
- * name for a certain sensor device, so that the rest of the autodetection can
- * rely on that name from this point on. I2C client devices will be renamed
- * to match the internal kernel convention.
- */
-void st_sensors_of_i2c_probe(struct i2c_client *client,
- const struct of_device_id *match)
-{
- const struct of_device_id *of_id;
-
- of_id = of_match_device(match, &client->dev);
- if (!of_id)
- return;
-
- /* The name from the OF match takes precedence if present */
- strncpy(client->name, of_id->data, sizeof(client->name));
- client->name[sizeof(client->name) - 1] = '\0';
-}
-EXPORT_SYMBOL(st_sensors_of_i2c_probe);
-#endif
-
#ifdef CONFIG_ACPI
int st_sensors_match_acpi_device(struct device *dev)
{
diff --git a/drivers/iio/counter/Kconfig b/drivers/iio/counter/Kconfig
index b37e5fc03149..474e1ac4e7c0 100644
--- a/drivers/iio/counter/Kconfig
+++ b/drivers/iio/counter/Kconfig
@@ -21,4 +21,13 @@ config 104_QUAD_8
The base port addresses for the devices may be configured via the base
array module parameter.
+config STM32_LPTIMER_CNT
+ tristate "STM32 LP Timer encoder counter driver"
+ depends on MFD_STM32_LPTIMER || COMPILE_TEST
+ help
+ Select this option to enable STM32 Low-Power Timer quadrature encoder
+ and counter driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called stm32-lptimer-cnt.
endmenu
diff --git a/drivers/iio/counter/Makefile b/drivers/iio/counter/Makefile
index 007e88411648..1b9a896eb488 100644
--- a/drivers/iio/counter/Makefile
+++ b/drivers/iio/counter/Makefile
@@ -5,3 +5,4 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o
+obj-$(CONFIG_STM32_LPTIMER_CNT) += stm32-lptimer-cnt.o
diff --git a/drivers/iio/counter/stm32-lptimer-cnt.c b/drivers/iio/counter/stm32-lptimer-cnt.c
new file mode 100644
index 000000000000..1c5909bb1605
--- /dev/null
+++ b/drivers/iio/counter/stm32-lptimer-cnt.c
@@ -0,0 +1,383 @@
+/*
+ * STM32 Low-Power Timer Encoder and Counter driver
+ *
+ * Copyright (C) STMicroelectronics 2017
+ *
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>
+ *
+ * Inspired by 104-quad-8 and stm32-timer-trigger drivers.
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/bitfield.h>
+#include <linux/iio/iio.h>
+#include <linux/mfd/stm32-lptimer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+struct stm32_lptim_cnt {
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk *clk;
+ u32 preset;
+ u32 polarity;
+ u32 quadrature_mode;
+};
+
+static int stm32_lptim_is_enabled(struct stm32_lptim_cnt *priv)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CR, &val);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(STM32_LPTIM_ENABLE, val);
+}
+
+static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv,
+ int enable)
+{
+ int ret;
+ u32 val;
+
+ val = FIELD_PREP(STM32_LPTIM_ENABLE, enable);
+ ret = regmap_write(priv->regmap, STM32_LPTIM_CR, val);
+ if (ret)
+ return ret;
+
+ if (!enable) {
+ clk_disable(priv->clk);
+ return 0;
+ }
+
+ /* LP timer must be enabled before writing CMP & ARR */
+ ret = regmap_write(priv->regmap, STM32_LPTIM_ARR, priv->preset);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, 0);
+ if (ret)
+ return ret;
+
+ /* ensure CMP & ARR registers are properly written */
+ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
+ (val & STM32_LPTIM_CMPOK_ARROK),
+ 100, 1000);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(priv->regmap, STM32_LPTIM_ICR,
+ STM32_LPTIM_CMPOKCF_ARROKCF);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(priv->clk);
+ if (ret) {
+ regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
+ return ret;
+ }
+
+ /* Start LP timer in continuous mode */
+ return regmap_update_bits(priv->regmap, STM32_LPTIM_CR,
+ STM32_LPTIM_CNTSTRT, STM32_LPTIM_CNTSTRT);
+}
+
+static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable)
+{
+ u32 mask = STM32_LPTIM_ENC | STM32_LPTIM_COUNTMODE |
+ STM32_LPTIM_CKPOL | STM32_LPTIM_PRESC;
+ u32 val;
+
+ /* Setup LP timer encoder/counter and polarity, without prescaler */
+ if (priv->quadrature_mode)
+ val = enable ? STM32_LPTIM_ENC : 0;
+ else
+ val = enable ? STM32_LPTIM_COUNTMODE : 0;
+ val |= FIELD_PREP(STM32_LPTIM_CKPOL, enable ? priv->polarity : 0);
+
+ return regmap_update_bits(priv->regmap, STM32_LPTIM_CFGR, mask, val);
+}
+
+static int stm32_lptim_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_ENABLE:
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ /* Check nobody uses the timer, or already disabled/enabled */
+ ret = stm32_lptim_is_enabled(priv);
+ if ((ret < 0) || (!ret && !val))
+ return ret;
+ if (val && ret)
+ return -EBUSY;
+
+ ret = stm32_lptim_setup(priv, val);
+ if (ret)
+ return ret;
+ return stm32_lptim_set_enable_state(priv, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int stm32_lptim_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+ u32 dat;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CNT, &dat);
+ if (ret)
+ return ret;
+ *val = dat;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_ENABLE:
+ ret = stm32_lptim_is_enabled(priv);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ /* Non-quadrature mode: scale = 1 */
+ *val = 1;
+ *val2 = 0;
+ if (priv->quadrature_mode) {
+ /*
+ * Quadrature encoder mode:
+ * - both edges, quarter cycle, scale is 0.25
+ * - either rising/falling edge scale is 0.5
+ */
+ if (priv->polarity > 1)
+ *val2 = 2;
+ else
+ *val2 = 1;
+ }
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info stm32_lptim_cnt_iio_info = {
+ .read_raw = stm32_lptim_read_raw,
+ .write_raw = stm32_lptim_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const char *const stm32_lptim_quadrature_modes[] = {
+ "non-quadrature",
+ "quadrature",
+};
+
+static int stm32_lptim_get_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ return priv->quadrature_mode;
+}
+
+static int stm32_lptim_set_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int type)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ if (stm32_lptim_is_enabled(priv))
+ return -EBUSY;
+
+ priv->quadrature_mode = type;
+
+ return 0;
+}
+
+static const struct iio_enum stm32_lptim_quadrature_mode_en = {
+ .items = stm32_lptim_quadrature_modes,
+ .num_items = ARRAY_SIZE(stm32_lptim_quadrature_modes),
+ .get = stm32_lptim_get_quadrature_mode,
+ .set = stm32_lptim_set_quadrature_mode,
+};
+
+static const char * const stm32_lptim_cnt_polarity[] = {
+ "rising-edge", "falling-edge", "both-edges",
+};
+
+static int stm32_lptim_cnt_get_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ return priv->polarity;
+}
+
+static int stm32_lptim_cnt_set_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int type)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ if (stm32_lptim_is_enabled(priv))
+ return -EBUSY;
+
+ priv->polarity = type;
+
+ return 0;
+}
+
+static const struct iio_enum stm32_lptim_cnt_polarity_en = {
+ .items = stm32_lptim_cnt_polarity,
+ .num_items = ARRAY_SIZE(stm32_lptim_cnt_polarity),
+ .get = stm32_lptim_cnt_get_polarity,
+ .set = stm32_lptim_cnt_set_polarity,
+};
+
+static ssize_t stm32_lptim_cnt_get_preset(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", priv->preset);
+}
+
+static ssize_t stm32_lptim_cnt_set_preset(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+ int ret;
+
+ if (stm32_lptim_is_enabled(priv))
+ return -EBUSY;
+
+ ret = kstrtouint(buf, 0, &priv->preset);
+ if (ret)
+ return ret;
+
+ if (priv->preset > STM32_LPTIM_MAX_ARR)
+ return -EINVAL;
+
+ return len;
+}
+
+/* LP timer with encoder */
+static const struct iio_chan_spec_ext_info stm32_lptim_enc_ext_info[] = {
+ {
+ .name = "preset",
+ .shared = IIO_SEPARATE,
+ .read = stm32_lptim_cnt_get_preset,
+ .write = stm32_lptim_cnt_set_preset,
+ },
+ IIO_ENUM("polarity", IIO_SEPARATE, &stm32_lptim_cnt_polarity_en),
+ IIO_ENUM_AVAILABLE("polarity", &stm32_lptim_cnt_polarity_en),
+ IIO_ENUM("quadrature_mode", IIO_SEPARATE,
+ &stm32_lptim_quadrature_mode_en),
+ IIO_ENUM_AVAILABLE("quadrature_mode", &stm32_lptim_quadrature_mode_en),
+ {}
+};
+
+static const struct iio_chan_spec stm32_lptim_enc_channels = {
+ .type = IIO_COUNT,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_ENABLE) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = stm32_lptim_enc_ext_info,
+ .indexed = 1,
+};
+
+/* LP timer without encoder (counter only) */
+static const struct iio_chan_spec_ext_info stm32_lptim_cnt_ext_info[] = {
+ {
+ .name = "preset",
+ .shared = IIO_SEPARATE,
+ .read = stm32_lptim_cnt_get_preset,
+ .write = stm32_lptim_cnt_set_preset,
+ },
+ IIO_ENUM("polarity", IIO_SEPARATE, &stm32_lptim_cnt_polarity_en),
+ IIO_ENUM_AVAILABLE("polarity", &stm32_lptim_cnt_polarity_en),
+ {}
+};
+
+static const struct iio_chan_spec stm32_lptim_cnt_channels = {
+ .type = IIO_COUNT,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_ENABLE) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = stm32_lptim_cnt_ext_info,
+ .indexed = 1,
+};
+
+static int stm32_lptim_cnt_probe(struct platform_device *pdev)
+{
+ struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct stm32_lptim_cnt *priv;
+ struct iio_dev *indio_dev;
+
+ if (IS_ERR_OR_NULL(ddata))
+ return -EINVAL;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+ priv->dev = &pdev->dev;
+ priv->regmap = ddata->regmap;
+ priv->clk = ddata->clk;
+ priv->preset = STM32_LPTIM_MAX_ARR;
+
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &stm32_lptim_cnt_iio_info;
+ if (ddata->has_encoder)
+ indio_dev->channels = &stm32_lptim_enc_channels;
+ else
+ indio_dev->channels = &stm32_lptim_cnt_channels;
+ indio_dev->num_channels = 1;
+
+ platform_set_drvdata(pdev, priv);
+
+ return devm_iio_device_register(&pdev->dev, indio_dev);
+}
+
+static const struct of_device_id stm32_lptim_cnt_of_match[] = {
+ { .compatible = "st,stm32-lptimer-counter", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_lptim_cnt_of_match);
+
+static struct platform_driver stm32_lptim_cnt_driver = {
+ .probe = stm32_lptim_cnt_probe,
+ .driver = {
+ .name = "stm32-lptimer-counter",
+ .of_match_table = stm32_lptim_cnt_of_match,
+ },
+};
+module_platform_driver(stm32_lptim_cnt_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_ALIAS("platform:stm32-lptimer-counter");
+MODULE_DESCRIPTION("STMicroelectronics STM32 LPTIM counter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/stm32-dac-core.c b/drivers/iio/dac/stm32-dac-core.c
index 75e48788c7ea..55026fe1c610 100644
--- a/drivers/iio/dac/stm32-dac-core.c
+++ b/drivers/iio/dac/stm32-dac-core.c
@@ -42,6 +42,14 @@ struct stm32_dac_priv {
struct stm32_dac_common common;
};
+/**
+ * struct stm32_dac_cfg - DAC configuration
+ * @has_hfsel: DAC has high frequency control
+ */
+struct stm32_dac_cfg {
+ bool has_hfsel;
+};
+
static struct stm32_dac_priv *to_stm32_dac_priv(struct stm32_dac_common *com)
{
return container_of(com, struct stm32_dac_priv, common);
@@ -57,6 +65,7 @@ static const struct regmap_config stm32_dac_regmap_cfg = {
static int stm32_dac_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct stm32_dac_cfg *cfg;
struct stm32_dac_priv *priv;
struct regmap *regmap;
struct resource *res;
@@ -69,6 +78,8 @@ static int stm32_dac_probe(struct platform_device *pdev)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ cfg = (const struct stm32_dac_cfg *)
+ of_match_device(dev->driver->of_match_table, dev)->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mmio = devm_ioremap_resource(dev, res);
@@ -114,19 +125,23 @@ static int stm32_dac_probe(struct platform_device *pdev)
goto err_vref;
}
- priv->rst = devm_reset_control_get(dev, NULL);
+ priv->rst = devm_reset_control_get_exclusive(dev, NULL);
if (!IS_ERR(priv->rst)) {
reset_control_assert(priv->rst);
udelay(2);
reset_control_deassert(priv->rst);
}
- /* When clock speed is higher than 80MHz, set HFSEL */
- priv->common.hfsel = (clk_get_rate(priv->pclk) > 80000000UL);
- ret = regmap_update_bits(regmap, STM32_DAC_CR, STM32H7_DAC_CR_HFSEL,
- priv->common.hfsel ? STM32H7_DAC_CR_HFSEL : 0);
- if (ret)
- goto err_pclk;
+ if (cfg && cfg->has_hfsel) {
+ /* When clock speed is higher than 80MHz, set HFSEL */
+ priv->common.hfsel = (clk_get_rate(priv->pclk) > 80000000UL);
+ ret = regmap_update_bits(regmap, STM32_DAC_CR,
+ STM32H7_DAC_CR_HFSEL,
+ priv->common.hfsel ?
+ STM32H7_DAC_CR_HFSEL : 0);
+ if (ret)
+ goto err_pclk;
+ }
platform_set_drvdata(pdev, &priv->common);
@@ -158,8 +173,17 @@ static int stm32_dac_remove(struct platform_device *pdev)
return 0;
}
+static const struct stm32_dac_cfg stm32h7_dac_cfg = {
+ .has_hfsel = true,
+};
+
static const struct of_device_id stm32_dac_of_match[] = {
- { .compatible = "st,stm32h7-dac-core", },
+ {
+ .compatible = "st,stm32f4-dac-core",
+ }, {
+ .compatible = "st,stm32h7-dac-core",
+ .data = (void *)&stm32h7_dac_cfg,
+ },
{},
};
MODULE_DEVICE_TABLE(of, stm32_dac_of_match);
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index 50f8ec091058..c1864e8aa851 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -268,7 +268,7 @@ static int stm32_dac_chan_of_init(struct iio_dev *indio_dev)
break;
}
if (i >= ARRAY_SIZE(stm32_dac_channels)) {
- dev_err(&indio_dev->dev, "Invalid st,dac-channel\n");
+ dev_err(&indio_dev->dev, "Invalid reg property\n");
return -EINVAL;
}
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index 2be2a5d287e6..e0d241a9aa30 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -1063,11 +1063,6 @@ static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
case IRQF_TRIGGER_RISING:
dev_info(&indio_dev->dev,
"pulse interrupts on the rising edge\n");
- if (mpu3050->irq_opendrain) {
- dev_info(&indio_dev->dev,
- "rising edge incompatible with open drain\n");
- mpu3050->irq_opendrain = false;
- }
break;
case IRQF_TRIGGER_FALLING:
mpu3050->irq_actl = true;
@@ -1078,11 +1073,6 @@ static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
mpu3050->irq_latch = true;
dev_info(&indio_dev->dev,
"interrupts active high level\n");
- if (mpu3050->irq_opendrain) {
- dev_info(&indio_dev->dev,
- "active high incompatible with open drain\n");
- mpu3050->irq_opendrain = false;
- }
/*
* With level IRQs, we mask the IRQ until it is processed,
* but with edge IRQs (pulses) we can queue several interrupts
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index a5c5c4e29add..48923ae6ac3b 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -19,6 +19,7 @@
#define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro"
#define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro"
#define L3GD20_GYRO_DEV_NAME "l3gd20"
+#define L3GD20H_GYRO_DEV_NAME "l3gd20h"
#define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
#define LSM330_GYRO_DEV_NAME "lsm330_gyro"
#define LSM9DS0_GYRO_DEV_NAME "lsm9ds0_gyro"
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 2a42b3d583e8..e366422e8512 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -35,6 +35,7 @@
#define ST_GYRO_DEFAULT_OUT_Z_L_ADDR 0x2c
/* FULLSCALE */
+#define ST_GYRO_FS_AVL_245DPS 245
#define ST_GYRO_FS_AVL_250DPS 250
#define ST_GYRO_FS_AVL_500DPS 500
#define ST_GYRO_FS_AVL_2000DPS 2000
@@ -196,17 +197,17 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.wai = 0xd7,
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
- [0] = L3GD20_GYRO_DEV_NAME,
+ [0] = L3GD20H_GYRO_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
.addr = 0x20,
.mask = 0xc0,
.odr_avl = {
- { .hz = 95, .value = 0x00, },
- { .hz = 190, .value = 0x01, },
- { .hz = 380, .value = 0x02, },
- { .hz = 760, .value = 0x03, },
+ { .hz = 100, .value = 0x00, },
+ { .hz = 200, .value = 0x01, },
+ { .hz = 400, .value = 0x02, },
+ { .hz = 800, .value = 0x03, },
},
},
.pw = {
@@ -224,7 +225,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.mask = 0x30,
.fs_avl = {
[0] = {
- .num = ST_GYRO_FS_AVL_250DPS,
+ .num = ST_GYRO_FS_AVL_245DPS,
.value = 0x00,
.gain = IIO_DEGREE_TO_RAD(8750),
},
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 40056b821036..b405b82b9177 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -41,6 +41,10 @@ static const struct of_device_id st_gyro_of_match[] = {
.data = L3GD20_GYRO_DEV_NAME,
},
{
+ .compatible = "st,l3gd20h-gyro",
+ .data = L3GD20H_GYRO_DEV_NAME,
+ },
+ {
.compatible = "st,l3g4is-gyro",
.data = L3G4IS_GYRO_DEV_NAME,
},
@@ -71,7 +75,8 @@ static int st_gyro_i2c_probe(struct i2c_client *client,
return -ENOMEM;
gdata = iio_priv(indio_dev);
- st_sensors_of_i2c_probe(client, st_gyro_of_match);
+ st_sensors_of_name_probe(&client->dev, st_gyro_of_match,
+ client->name, sizeof(client->name));
st_sensors_i2c_configure(indio_dev, client, gdata);
@@ -95,6 +100,7 @@ static const struct i2c_device_id st_gyro_id_table[] = {
{ LSM330DL_GYRO_DEV_NAME },
{ LSM330DLC_GYRO_DEV_NAME },
{ L3GD20_GYRO_DEV_NAME },
+ { L3GD20H_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
{ LSM9DS0_GYRO_DEV_NAME },
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index fbf2faed501c..0b52ed577dc2 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -18,6 +18,56 @@
#include <linux/iio/common/st_sensors_spi.h>
#include "st_gyro.h"
+#ifdef CONFIG_OF
+/*
+ * For new single-chip sensors use <device_name> as compatible string.
+ * For old single-chip devices keep <device_name>-gyro to maintain
+ * compatibility
+ */
+static const struct of_device_id st_gyro_of_match[] = {
+ {
+ .compatible = "st,l3g4200d-gyro",
+ .data = L3G4200D_GYRO_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330d-gyro",
+ .data = LSM330D_GYRO_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330dl-gyro",
+ .data = LSM330DL_GYRO_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330dlc-gyro",
+ .data = LSM330DLC_GYRO_DEV_NAME,
+ },
+ {
+ .compatible = "st,l3gd20-gyro",
+ .data = L3GD20_GYRO_DEV_NAME,
+ },
+ {
+ .compatible = "st,l3gd20h-gyro",
+ .data = L3GD20H_GYRO_DEV_NAME,
+ },
+ {
+ .compatible = "st,l3g4is-gyro",
+ .data = L3G4IS_GYRO_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm330-gyro",
+ .data = LSM330_GYRO_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm9ds0-gyro",
+ .data = LSM9DS0_GYRO_DEV_NAME,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_gyro_of_match);
+#else
+#define st_gyro_of_match NULL
+#endif
+
static int st_gyro_spi_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -30,6 +80,8 @@ static int st_gyro_spi_probe(struct spi_device *spi)
gdata = iio_priv(indio_dev);
+ st_sensors_of_name_probe(&spi->dev, st_gyro_of_match,
+ spi->modalias, sizeof(spi->modalias));
st_sensors_spi_configure(indio_dev, spi, gdata);
err = st_gyro_common_probe(indio_dev);
@@ -52,6 +104,7 @@ static const struct spi_device_id st_gyro_id_table[] = {
{ LSM330DL_GYRO_DEV_NAME },
{ LSM330DLC_GYRO_DEV_NAME },
{ L3GD20_GYRO_DEV_NAME },
+ { L3GD20H_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
{ LSM9DS0_GYRO_DEV_NAME },
@@ -62,6 +115,7 @@ MODULE_DEVICE_TABLE(spi, st_gyro_id_table);
static struct spi_driver st_gyro_driver = {
.driver = {
.name = "st-gyro-spi",
+ .of_match_table = of_match_ptr(st_gyro_of_match),
},
.probe = st_gyro_spi_probe,
.remove = st_gyro_spi_remove,
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 14b9ce453d9d..2c0fc9a400b8 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -31,7 +31,8 @@ config HDC100X
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for the Texas Instruments
- HDC1000 and HDC1008 relative humidity and temperature sensors.
+ HDC1000, HDC1008, HDC1010, HDC1050, and HDC1080 relative
+ humidity and temperature sensors.
To compile this driver as a module, choose M here: the module
will be called hdc100x.
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index aa17115f54c9..7851bd90ef64 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -13,6 +13,12 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
+ * Datasheets:
+ * http://www.ti.com/product/HDC1000/datasheet
+ * http://www.ti.com/product/HDC1008/datasheet
+ * http://www.ti.com/product/HDC1010/datasheet
+ * http://www.ti.com/product/HDC1050/datasheet
+ * http://www.ti.com/product/HDC1080/datasheet
*/
#include <linux/delay.h>
@@ -414,13 +420,29 @@ static int hdc100x_remove(struct i2c_client *client)
static const struct i2c_device_id hdc100x_id[] = {
{ "hdc100x", 0 },
+ { "hdc1000", 0 },
+ { "hdc1008", 0 },
+ { "hdc1010", 0 },
+ { "hdc1050", 0 },
+ { "hdc1080", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, hdc100x_id);
+static const struct of_device_id hdc100x_dt_ids[] = {
+ { .compatible = "ti,hdc1000" },
+ { .compatible = "ti,hdc1008" },
+ { .compatible = "ti,hdc1010" },
+ { .compatible = "ti,hdc1050" },
+ { .compatible = "ti,hdc1080" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hdc100x_dt_ids);
+
static struct i2c_driver hdc100x_driver = {
.driver = {
.name = "hdc100x",
+ .of_match_table = of_match_ptr(hdc100x_dt_ids),
},
.probe = hdc100x_probe,
.remove = hdc100x_remove,
diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h
index 94510266e0a5..51d021966222 100644
--- a/drivers/iio/humidity/hts221.h
+++ b/drivers/iio/humidity/hts221.h
@@ -30,12 +30,6 @@ struct hts221_transfer_function {
int (*write)(struct device *dev, u8 addr, int len, u8 *data);
};
-#define HTS221_AVG_DEPTH 8
-struct hts221_avg_avl {
- u16 avg;
- u8 val;
-};
-
enum hts221_sensor_type {
HTS221_SENSOR_H,
HTS221_SENSOR_T,
@@ -66,10 +60,9 @@ struct hts221_hw {
extern const struct dev_pm_ops hts221_pm_ops;
-int hts221_config_drdy(struct hts221_hw *hw, bool enable);
+int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask, u8 val);
int hts221_probe(struct iio_dev *iio_dev);
-int hts221_power_on(struct hts221_hw *hw);
-int hts221_power_off(struct hts221_hw *hw);
+int hts221_set_enable(struct hts221_hw *hw, bool enable);
int hts221_allocate_buffers(struct hts221_hw *hw);
int hts221_allocate_trigger(struct hts221_hw *hw);
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
index 7d19a3da7ab7..9690dfe9a844 100644
--- a/drivers/iio/humidity/hts221_buffer.c
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -20,8 +20,16 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/buffer.h>
+#include <linux/platform_data/st_sensors_pdata.h>
+
#include "hts221.h"
+#define HTS221_REG_DRDY_HL_ADDR 0x22
+#define HTS221_REG_DRDY_HL_MASK BIT(7)
+#define HTS221_REG_DRDY_PP_OD_ADDR 0x22
+#define HTS221_REG_DRDY_PP_OD_MASK BIT(6)
+#define HTS221_REG_DRDY_EN_ADDR 0x22
+#define HTS221_REG_DRDY_EN_MASK BIT(2)
#define HTS221_REG_STATUS_ADDR 0x27
#define HTS221_RH_DRDY_MASK BIT(1)
#define HTS221_TEMP_DRDY_MASK BIT(0)
@@ -30,8 +38,12 @@ static int hts221_trig_set_state(struct iio_trigger *trig, bool state)
{
struct iio_dev *iio_dev = iio_trigger_get_drvdata(trig);
struct hts221_hw *hw = iio_priv(iio_dev);
+ int err;
+
+ err = hts221_write_with_mask(hw, HTS221_REG_DRDY_EN_ADDR,
+ HTS221_REG_DRDY_EN_MASK, state);
- return hts221_config_drdy(hw, state);
+ return err < 0 ? err : 0;
}
static const struct iio_trigger_ops hts221_trigger_ops = {
@@ -67,6 +79,9 @@ static irqreturn_t hts221_trigger_handler_thread(int irq, void *private)
int hts221_allocate_trigger(struct hts221_hw *hw)
{
struct iio_dev *iio_dev = iio_priv_to_dev(hw);
+ bool irq_active_low = false, open_drain = false;
+ struct device_node *np = hw->dev->of_node;
+ struct st_sensors_platform_data *pdata;
unsigned long irq_type;
int err;
@@ -76,6 +91,10 @@ int hts221_allocate_trigger(struct hts221_hw *hw)
case IRQF_TRIGGER_HIGH:
case IRQF_TRIGGER_RISING:
break;
+ case IRQF_TRIGGER_LOW:
+ case IRQF_TRIGGER_FALLING:
+ irq_active_low = true;
+ break;
default:
dev_info(hw->dev,
"mode %lx unsupported, using IRQF_TRIGGER_RISING\n",
@@ -84,6 +103,24 @@ int hts221_allocate_trigger(struct hts221_hw *hw)
break;
}
+ err = hts221_write_with_mask(hw, HTS221_REG_DRDY_HL_ADDR,
+ HTS221_REG_DRDY_HL_MASK, irq_active_low);
+ if (err < 0)
+ return err;
+
+ pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
+ if ((np && of_property_read_bool(np, "drive-open-drain")) ||
+ (pdata && pdata->open_drain)) {
+ irq_type |= IRQF_SHARED;
+ open_drain = true;
+ }
+
+ err = hts221_write_with_mask(hw, HTS221_REG_DRDY_PP_OD_ADDR,
+ HTS221_REG_DRDY_PP_OD_MASK,
+ open_drain);
+ if (err < 0)
+ return err;
+
err = devm_request_threaded_irq(hw->dev, hw->irq, NULL,
hts221_trigger_handler_thread,
irq_type | IRQF_ONESHOT,
@@ -109,12 +146,12 @@ int hts221_allocate_trigger(struct hts221_hw *hw)
static int hts221_buffer_preenable(struct iio_dev *iio_dev)
{
- return hts221_power_on(iio_priv(iio_dev));
+ return hts221_set_enable(iio_priv(iio_dev), true);
}
static int hts221_buffer_postdisable(struct iio_dev *iio_dev)
{
- return hts221_power_off(iio_priv(iio_dev));
+ return hts221_set_enable(iio_priv(iio_dev), false);
}
static const struct iio_buffer_setup_ops hts221_buffer_ops = {
diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c
index a56da3999e00..32524a8dc66f 100644
--- a/drivers/iio/humidity/hts221_core.c
+++ b/drivers/iio/humidity/hts221_core.c
@@ -23,7 +23,6 @@
#define HTS221_REG_CNTRL1_ADDR 0x20
#define HTS221_REG_CNTRL2_ADDR 0x21
-#define HTS221_REG_CNTRL3_ADDR 0x22
#define HTS221_REG_AVG_ADDR 0x10
#define HTS221_REG_H_OUT_L 0x28
@@ -32,30 +31,9 @@
#define HTS221_HUMIDITY_AVG_MASK 0x07
#define HTS221_TEMP_AVG_MASK 0x38
-#define HTS221_ODR_MASK 0x87
+#define HTS221_ODR_MASK 0x03
#define HTS221_BDU_MASK BIT(2)
-
-#define HTS221_DRDY_MASK BIT(2)
-
-#define HTS221_ENABLE_SENSOR BIT(7)
-
-#define HTS221_HUMIDITY_AVG_4 0x00 /* 0.4 %RH */
-#define HTS221_HUMIDITY_AVG_8 0x01 /* 0.3 %RH */
-#define HTS221_HUMIDITY_AVG_16 0x02 /* 0.2 %RH */
-#define HTS221_HUMIDITY_AVG_32 0x03 /* 0.15 %RH */
-#define HTS221_HUMIDITY_AVG_64 0x04 /* 0.1 %RH */
-#define HTS221_HUMIDITY_AVG_128 0x05 /* 0.07 %RH */
-#define HTS221_HUMIDITY_AVG_256 0x06 /* 0.05 %RH */
-#define HTS221_HUMIDITY_AVG_512 0x07 /* 0.03 %RH */
-
-#define HTS221_TEMP_AVG_2 0x00 /* 0.08 degC */
-#define HTS221_TEMP_AVG_4 0x08 /* 0.05 degC */
-#define HTS221_TEMP_AVG_8 0x10 /* 0.04 degC */
-#define HTS221_TEMP_AVG_16 0x18 /* 0.03 degC */
-#define HTS221_TEMP_AVG_32 0x20 /* 0.02 degC */
-#define HTS221_TEMP_AVG_64 0x28 /* 0.015 degC */
-#define HTS221_TEMP_AVG_128 0x30 /* 0.01 degC */
-#define HTS221_TEMP_AVG_256 0x38 /* 0.007 degC */
+#define HTS221_ENABLE_MASK BIT(7)
/* calibration registers */
#define HTS221_REG_0RH_CAL_X_H 0x36
@@ -73,10 +51,11 @@ struct hts221_odr {
u8 val;
};
+#define HTS221_AVG_DEPTH 8
struct hts221_avg {
u8 addr;
u8 mask;
- struct hts221_avg_avl avg_avl[HTS221_AVG_DEPTH];
+ u16 avg_avl[HTS221_AVG_DEPTH];
};
static const struct hts221_odr hts221_odr_table[] = {
@@ -90,28 +69,28 @@ static const struct hts221_avg hts221_avg_list[] = {
.addr = HTS221_REG_AVG_ADDR,
.mask = HTS221_HUMIDITY_AVG_MASK,
.avg_avl = {
- { 4, HTS221_HUMIDITY_AVG_4 },
- { 8, HTS221_HUMIDITY_AVG_8 },
- { 16, HTS221_HUMIDITY_AVG_16 },
- { 32, HTS221_HUMIDITY_AVG_32 },
- { 64, HTS221_HUMIDITY_AVG_64 },
- { 128, HTS221_HUMIDITY_AVG_128 },
- { 256, HTS221_HUMIDITY_AVG_256 },
- { 512, HTS221_HUMIDITY_AVG_512 },
+ 4, /* 0.4 %RH */
+ 8, /* 0.3 %RH */
+ 16, /* 0.2 %RH */
+ 32, /* 0.15 %RH */
+ 64, /* 0.1 %RH */
+ 128, /* 0.07 %RH */
+ 256, /* 0.05 %RH */
+ 512, /* 0.03 %RH */
},
},
{
.addr = HTS221_REG_AVG_ADDR,
.mask = HTS221_TEMP_AVG_MASK,
.avg_avl = {
- { 2, HTS221_TEMP_AVG_2 },
- { 4, HTS221_TEMP_AVG_4 },
- { 8, HTS221_TEMP_AVG_8 },
- { 16, HTS221_TEMP_AVG_16 },
- { 32, HTS221_TEMP_AVG_32 },
- { 64, HTS221_TEMP_AVG_64 },
- { 128, HTS221_TEMP_AVG_128 },
- { 256, HTS221_TEMP_AVG_256 },
+ 2, /* 0.08 degC */
+ 4, /* 0.05 degC */
+ 8, /* 0.04 degC */
+ 16, /* 0.03 degC */
+ 32, /* 0.02 degC */
+ 64, /* 0.015 degC */
+ 128, /* 0.01 degC */
+ 256, /* 0.007 degC */
},
},
};
@@ -152,8 +131,7 @@ static const struct iio_chan_spec hts221_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(2),
};
-static int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask,
- u8 val)
+int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask, u8 val)
{
u8 data;
int err;
@@ -166,7 +144,7 @@ static int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask,
goto unlock;
}
- data = (data & ~mask) | (val & mask);
+ data = (data & ~mask) | ((val << __ffs(mask)) & mask);
err = hw->tf->write(hw->dev, addr, sizeof(data), &data);
if (err < 0)
@@ -199,21 +177,9 @@ static int hts221_check_whoami(struct hts221_hw *hw)
return 0;
}
-int hts221_config_drdy(struct hts221_hw *hw, bool enable)
-{
- u8 val = enable ? BIT(2) : 0;
- int err;
-
- err = hts221_write_with_mask(hw, HTS221_REG_CNTRL3_ADDR,
- HTS221_DRDY_MASK, val);
-
- return err < 0 ? err : 0;
-}
-
static int hts221_update_odr(struct hts221_hw *hw, u8 odr)
{
int i, err;
- u8 val;
for (i = 0; i < ARRAY_SIZE(hts221_odr_table); i++)
if (hts221_odr_table[i].hz == odr)
@@ -222,9 +188,8 @@ static int hts221_update_odr(struct hts221_hw *hw, u8 odr)
if (i == ARRAY_SIZE(hts221_odr_table))
return -EINVAL;
- val = HTS221_ENABLE_SENSOR | HTS221_BDU_MASK | hts221_odr_table[i].val;
err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR,
- HTS221_ODR_MASK, val);
+ HTS221_ODR_MASK, hts221_odr_table[i].val);
if (err < 0)
return err;
@@ -241,14 +206,13 @@ static int hts221_update_avg(struct hts221_hw *hw,
const struct hts221_avg *avg = &hts221_avg_list[type];
for (i = 0; i < HTS221_AVG_DEPTH; i++)
- if (avg->avg_avl[i].avg == val)
+ if (avg->avg_avl[i] == val)
break;
if (i == HTS221_AVG_DEPTH)
return -EINVAL;
- err = hts221_write_with_mask(hw, avg->addr, avg->mask,
- avg->avg_avl[i].val);
+ err = hts221_write_with_mask(hw, avg->addr, avg->mask, i);
if (err < 0)
return err;
@@ -283,7 +247,7 @@ hts221_sysfs_rh_oversampling_avail(struct device *dev,
for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++)
len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
- avg->avg_avl[i].avg);
+ avg->avg_avl[i]);
buf[len - 1] = '\n';
return len;
@@ -300,36 +264,22 @@ hts221_sysfs_temp_oversampling_avail(struct device *dev,
for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++)
len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
- avg->avg_avl[i].avg);
+ avg->avg_avl[i]);
buf[len - 1] = '\n';
return len;
}
-int hts221_power_on(struct hts221_hw *hw)
-{
- int err;
-
- err = hts221_update_odr(hw, hw->odr);
- if (err < 0)
- return err;
-
- hw->enabled = true;
-
- return 0;
-}
-
-int hts221_power_off(struct hts221_hw *hw)
+int hts221_set_enable(struct hts221_hw *hw, bool enable)
{
- __le16 data = 0;
int err;
- err = hw->tf->write(hw->dev, HTS221_REG_CNTRL1_ADDR, sizeof(data),
- (u8 *)&data);
+ err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR,
+ HTS221_ENABLE_MASK, enable);
if (err < 0)
return err;
- hw->enabled = false;
+ hw->enabled = enable;
return 0;
}
@@ -484,7 +434,7 @@ static int hts221_read_oneshot(struct hts221_hw *hw, u8 addr, int *val)
u8 data[HTS221_DATA_SIZE];
int err;
- err = hts221_power_on(hw);
+ err = hts221_set_enable(hw, true);
if (err < 0)
return err;
@@ -494,7 +444,7 @@ static int hts221_read_oneshot(struct hts221_hw *hw, u8 addr, int *val)
if (err < 0)
return err;
- hts221_power_off(hw);
+ hts221_set_enable(hw, false);
*val = (s16)get_unaligned_le16(data);
@@ -534,13 +484,13 @@ static int hts221_read_raw(struct iio_dev *iio_dev,
case IIO_HUMIDITYRELATIVE:
avg = &hts221_avg_list[HTS221_SENSOR_H];
idx = hw->sensors[HTS221_SENSOR_H].cur_avg_idx;
- *val = avg->avg_avl[idx].avg;
+ *val = avg->avg_avl[idx];
ret = IIO_VAL_INT;
break;
case IIO_TEMP:
avg = &hts221_avg_list[HTS221_SENSOR_T];
idx = hw->sensors[HTS221_SENSOR_T].cur_avg_idx;
- *val = avg->avg_avl[idx].avg;
+ *val = avg->avg_avl[idx];
ret = IIO_VAL_INT;
break;
default:
@@ -644,8 +594,6 @@ int hts221_probe(struct iio_dev *iio_dev)
if (err < 0)
return err;
- hw->odr = hts221_odr_table[0].hz;
-
iio_dev->modes = INDIO_DIRECT_MODE;
iio_dev->dev.parent = hw->dev;
iio_dev->available_scan_masks = hts221_scan_masks;
@@ -654,6 +602,16 @@ int hts221_probe(struct iio_dev *iio_dev)
iio_dev->name = HTS221_DEV_NAME;
iio_dev->info = &hts221_info;
+ /* enable Block Data Update */
+ err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR,
+ HTS221_BDU_MASK, 1);
+ if (err < 0)
+ return err;
+
+ err = hts221_update_odr(hw, hts221_odr_table[0].hz);
+ if (err < 0)
+ return err;
+
/* configure humidity sensor */
err = hts221_parse_rh_caldata(hw);
if (err < 0) {
@@ -661,7 +619,7 @@ int hts221_probe(struct iio_dev *iio_dev)
return err;
}
- data = hts221_avg_list[HTS221_SENSOR_H].avg_avl[3].avg;
+ data = hts221_avg_list[HTS221_SENSOR_H].avg_avl[3];
err = hts221_update_avg(hw, HTS221_SENSOR_H, data);
if (err < 0) {
dev_err(hw->dev, "failed to set rh oversampling ratio\n");
@@ -676,7 +634,7 @@ int hts221_probe(struct iio_dev *iio_dev)
return err;
}
- data = hts221_avg_list[HTS221_SENSOR_T].avg_avl[3].avg;
+ data = hts221_avg_list[HTS221_SENSOR_T].avg_avl[3];
err = hts221_update_avg(hw, HTS221_SENSOR_T, data);
if (err < 0) {
dev_err(hw->dev,
@@ -702,11 +660,10 @@ static int __maybe_unused hts221_suspend(struct device *dev)
{
struct iio_dev *iio_dev = dev_get_drvdata(dev);
struct hts221_hw *hw = iio_priv(iio_dev);
- __le16 data = 0;
int err;
- err = hw->tf->write(hw->dev, HTS221_REG_CNTRL1_ADDR, sizeof(data),
- (u8 *)&data);
+ err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR,
+ HTS221_ENABLE_MASK, false);
return err < 0 ? err : 0;
}
@@ -718,7 +675,8 @@ static int __maybe_unused hts221_resume(struct device *dev)
int err = 0;
if (hw->enabled)
- err = hts221_update_odr(hw, hw->odr);
+ err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR,
+ HTS221_ENABLE_MASK, true);
return err;
}
diff --git a/drivers/iio/humidity/htu21.c b/drivers/iio/humidity/htu21.c
index 0fbbd8c40894..2c4b9be85a05 100644
--- a/drivers/iio/humidity/htu21.c
+++ b/drivers/iio/humidity/htu21.c
@@ -238,11 +238,19 @@ static const struct i2c_device_id htu21_id[] = {
};
MODULE_DEVICE_TABLE(i2c, htu21_id);
+static const struct of_device_id htu21_of_match[] = {
+ { .compatible = "meas,htu21", },
+ { .compatible = "meas,ms8607-humidity", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, htu21_of_match);
+
static struct i2c_driver htu21_driver = {
.probe = htu21_probe,
.id_table = htu21_id,
.driver = {
.name = "htu21",
+ .of_match_table = of_match_ptr(htu21_of_match),
},
};
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index fb7c0dbed51c..9b697d35dbef 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -217,7 +217,7 @@ static int adis16400_set_freq(struct adis16400_state *st, unsigned int freq)
return adis_write_reg_8(&st->adis, ADIS16400_SMPL_PRD, val);
}
-static const unsigned adis16400_3db_divisors[] = {
+static const unsigned int adis16400_3db_divisors[] = {
[0] = 2, /* Special case */
[1] = 6,
[2] = 12,
@@ -890,7 +890,7 @@ static const struct adis_data adis16400_data = {
static void adis16400_setup_chan_mask(struct adis16400_state *st)
{
const struct adis16400_chip_info *chip_info = st->variant;
- unsigned i;
+ unsigned int i;
for (i = 0; i < chip_info->num_channels; i++) {
const struct iio_chan_spec *ch = &chip_info->channels[i];
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 8cf84d3488b2..12898424d838 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
.gyro_max_scale = 450,
.accel_max_val = IIO_M_S_2_TO_G(12500),
- .accel_max_scale = 5,
+ .accel_max_scale = 10,
},
[ADIS16485] = {
.channels = adis16485_channels,
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 2a72acc6e049..e2737dc71b67 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -31,6 +31,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
+#include <linux/platform_data/st_sensors_pdata.h>
+
#include "st_lsm6dsx.h"
#define ST_LSM6DSX_REG_FIFO_THL_ADDR 0x06
@@ -39,6 +41,8 @@
#define ST_LSM6DSX_REG_FIFO_DEC_GXL_ADDR 0x08
#define ST_LSM6DSX_REG_HLACTIVE_ADDR 0x12
#define ST_LSM6DSX_REG_HLACTIVE_MASK BIT(5)
+#define ST_LSM6DSX_REG_PP_OD_ADDR 0x12
+#define ST_LSM6DSX_REG_PP_OD_MASK BIT(4)
#define ST_LSM6DSX_REG_FIFO_MODE_ADDR 0x0a
#define ST_LSM6DSX_FIFO_MODE_MASK GENMASK(2, 0)
#define ST_LSM6DSX_FIFO_ODR_MASK GENMASK(6, 3)
@@ -417,6 +421,8 @@ static const struct iio_buffer_setup_ops st_lsm6dsx_buffer_ops = {
int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw)
{
+ struct device_node *np = hw->dev->of_node;
+ struct st_sensors_platform_data *pdata;
struct iio_buffer *buffer;
unsigned long irq_type;
bool irq_active_low;
@@ -444,6 +450,17 @@ int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw)
if (err < 0)
return err;
+ pdata = (struct st_sensors_platform_data *)hw->dev->platform_data;
+ if ((np && of_property_read_bool(np, "drive-open-drain")) ||
+ (pdata && pdata->open_drain)) {
+ err = st_lsm6dsx_write_with_mask(hw, ST_LSM6DSX_REG_PP_OD_ADDR,
+ ST_LSM6DSX_REG_PP_OD_MASK, 1);
+ if (err < 0)
+ return err;
+
+ irq_type |= IRQF_SHARED;
+ }
+
err = devm_request_threaded_irq(hw->dev, hw->irq,
st_lsm6dsx_handler_irq,
st_lsm6dsx_handler_thread,
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index da3d06b073bb..069defcc6d9b 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -44,7 +44,7 @@ int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
}
mapi->map = &maps[i];
mapi->indio_dev = indio_dev;
- list_add(&mapi->l, &iio_map_list);
+ list_add_tail(&mapi->l, &iio_map_list);
i++;
}
error_ret:
@@ -205,8 +205,8 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
break;
else if (name && index >= 0) {
- pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
- np->full_name, name ? name : "", index);
+ pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n",
+ np, name ? name : "", index);
return NULL;
}
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
index 649b26f67813..05eacd1ee40f 100644
--- a/drivers/iio/light/apds9300.c
+++ b/drivers/iio/light/apds9300.c
@@ -505,7 +505,7 @@ static SIMPLE_DEV_PM_OPS(apds9300_pm_ops, apds9300_suspend, apds9300_resume);
#define APDS9300_PM_OPS NULL
#endif
-static struct i2c_device_id apds9300_id[] = {
+static const struct i2c_device_id apds9300_id[] = {
{ APDS9300_DRV_NAME, 0 },
{ }
};
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index 9d0c2e859bb2..a6efa12613a2 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -9,7 +9,7 @@
*
* IIO driver for RPR-0521RS (7-bit I2C slave address 0x38).
*
- * TODO: illuminance channel, buffer
+ * TODO: illuminance channel
*/
#include <linux/module.h>
@@ -20,6 +20,10 @@
#include <linux/acpi.h>
#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include <linux/iio/sysfs.h>
#include <linux/pm_runtime.h>
@@ -30,6 +34,7 @@
#define RPR0521_REG_PXS_DATA 0x44 /* 16-bit, little endian */
#define RPR0521_REG_ALS_DATA0 0x46 /* 16-bit, little endian */
#define RPR0521_REG_ALS_DATA1 0x48 /* 16-bit, little endian */
+#define RPR0521_REG_INTERRUPT 0x4A
#define RPR0521_REG_PS_OFFSET_LSB 0x53
#define RPR0521_REG_ID 0x92
@@ -42,16 +47,31 @@
#define RPR0521_ALS_DATA1_GAIN_SHIFT 2
#define RPR0521_PXS_GAIN_MASK GENMASK(5, 4)
#define RPR0521_PXS_GAIN_SHIFT 4
+#define RPR0521_PXS_PERSISTENCE_MASK GENMASK(3, 0)
+#define RPR0521_INTERRUPT_INT_TRIG_PS_MASK BIT(0)
+#define RPR0521_INTERRUPT_INT_TRIG_ALS_MASK BIT(1)
+#define RPR0521_INTERRUPT_INT_REASSERT_MASK BIT(3)
+#define RPR0521_INTERRUPT_ALS_INT_STATUS_MASK BIT(6)
+#define RPR0521_INTERRUPT_PS_INT_STATUS_MASK BIT(7)
#define RPR0521_MODE_ALS_ENABLE BIT(7)
#define RPR0521_MODE_ALS_DISABLE 0x00
#define RPR0521_MODE_PXS_ENABLE BIT(6)
#define RPR0521_MODE_PXS_DISABLE 0x00
+#define RPR0521_PXS_PERSISTENCE_DRDY 0x00
+
+#define RPR0521_INTERRUPT_INT_TRIG_PS_ENABLE BIT(0)
+#define RPR0521_INTERRUPT_INT_TRIG_PS_DISABLE 0x00
+#define RPR0521_INTERRUPT_INT_TRIG_ALS_ENABLE BIT(1)
+#define RPR0521_INTERRUPT_INT_TRIG_ALS_DISABLE 0x00
+#define RPR0521_INTERRUPT_INT_REASSERT_ENABLE BIT(3)
+#define RPR0521_INTERRUPT_INT_REASSERT_DISABLE 0x00
#define RPR0521_MANUFACT_ID 0xE0
#define RPR0521_DEFAULT_MEAS_TIME 0x06 /* ALS - 100ms, PXS - 100ms */
#define RPR0521_DRV_NAME "RPR0521"
+#define RPR0521_IRQ_NAME "rpr0521_event"
#define RPR0521_REGMAP_NAME "rpr0521_regmap"
#define RPR0521_SLEEP_DELAY_MS 2000
@@ -167,6 +187,9 @@ struct rpr0521_data {
bool als_dev_en;
bool pxs_dev_en;
+ struct iio_trigger *drdy_trigger0;
+ s64 irq_timestamp;
+
/* optimize runtime pm ops - enable/disable device only if needed */
bool als_ps_need_en;
bool pxs_ps_need_en;
@@ -196,6 +219,19 @@ static const struct attribute_group rpr0521_attribute_group = {
.attrs = rpr0521_attributes,
};
+/* Order of the channel data in buffer */
+enum rpr0521_scan_index_order {
+ RPR0521_CHAN_INDEX_PXS,
+ RPR0521_CHAN_INDEX_BOTH,
+ RPR0521_CHAN_INDEX_IR,
+};
+
+static const unsigned long rpr0521_available_scan_masks[] = {
+ BIT(RPR0521_CHAN_INDEX_PXS) | BIT(RPR0521_CHAN_INDEX_BOTH) |
+ BIT(RPR0521_CHAN_INDEX_IR),
+ 0
+};
+
static const struct iio_chan_spec rpr0521_channels[] = {
{
.type = IIO_PROXIMITY,
@@ -204,6 +240,13 @@ static const struct iio_chan_spec rpr0521_channels[] = {
BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE),
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_index = RPR0521_CHAN_INDEX_PXS,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
},
{
.type = IIO_INTENSITY,
@@ -213,6 +256,13 @@ static const struct iio_chan_spec rpr0521_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE),
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_index = RPR0521_CHAN_INDEX_BOTH,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
},
{
.type = IIO_INTENSITY,
@@ -222,6 +272,13 @@ static const struct iio_chan_spec rpr0521_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE),
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .scan_index = RPR0521_CHAN_INDEX_IR,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
},
};
@@ -330,6 +387,198 @@ static int rpr0521_set_power_state(struct rpr0521_data *data, bool on,
return 0;
}
+/* Interrupt register tells if this sensor caused the interrupt or not. */
+static inline bool rpr0521_is_triggered(struct rpr0521_data *data)
+{
+ int ret;
+ int reg;
+
+ ret = regmap_read(data->regmap, RPR0521_REG_INTERRUPT, &reg);
+ if (ret < 0)
+ return false; /* Reg read failed. */
+ if (reg &
+ (RPR0521_INTERRUPT_ALS_INT_STATUS_MASK |
+ RPR0521_INTERRUPT_PS_INT_STATUS_MASK))
+ return true;
+ else
+ return false; /* Int not from this sensor. */
+}
+
+/* IRQ to trigger handler */
+static irqreturn_t rpr0521_drdy_irq_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct rpr0521_data *data = iio_priv(indio_dev);
+
+ data->irq_timestamp = iio_get_time_ns(indio_dev);
+ /*
+ * We need to wake the thread to read the interrupt reg. It
+ * is not possible to do that here because regmap_read takes a
+ * mutex.
+ */
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t rpr0521_drdy_irq_thread(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct rpr0521_data *data = iio_priv(indio_dev);
+
+ if (rpr0521_is_triggered(data)) {
+ iio_trigger_poll_chained(data->drdy_trigger0);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t rpr0521_trigger_consumer_store_time(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+
+ /* Other trigger polls store time here. */
+ if (!iio_trigger_using_own(indio_dev))
+ pf->timestamp = iio_get_time_ns(indio_dev);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t rpr0521_trigger_consumer_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct rpr0521_data *data = iio_priv(indio_dev);
+ int err;
+
+ u8 buffer[16]; /* 3 16-bit channels + padding + ts */
+
+ /* Use irq timestamp when reasonable. */
+ if (iio_trigger_using_own(indio_dev) && data->irq_timestamp) {
+ pf->timestamp = data->irq_timestamp;
+ data->irq_timestamp = 0;
+ }
+ /* Other chained trigger polls get timestamp only here. */
+ if (!pf->timestamp)
+ pf->timestamp = iio_get_time_ns(indio_dev);
+
+ err = regmap_bulk_read(data->regmap, RPR0521_REG_PXS_DATA,
+ &buffer,
+ (3 * 2) + 1); /* 3 * 16-bit + (discarded) int clear reg. */
+ if (!err)
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ buffer, pf->timestamp);
+ else
+ dev_err(&data->client->dev,
+ "Trigger consumer can't read from sensor.\n");
+ pf->timestamp = 0;
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int rpr0521_write_int_enable(struct rpr0521_data *data)
+{
+ int err;
+
+ /* Interrupt after each measurement */
+ err = regmap_update_bits(data->regmap, RPR0521_REG_PXS_CTRL,
+ RPR0521_PXS_PERSISTENCE_MASK,
+ RPR0521_PXS_PERSISTENCE_DRDY);
+ if (err) {
+ dev_err(&data->client->dev, "PS control reg write fail.\n");
+ return -EBUSY;
+ }
+
+ /* Ignore latch and mode because of drdy */
+ err = regmap_write(data->regmap, RPR0521_REG_INTERRUPT,
+ RPR0521_INTERRUPT_INT_REASSERT_DISABLE |
+ RPR0521_INTERRUPT_INT_TRIG_ALS_DISABLE |
+ RPR0521_INTERRUPT_INT_TRIG_PS_ENABLE
+ );
+ if (err) {
+ dev_err(&data->client->dev, "Interrupt setup write fail.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int rpr0521_write_int_disable(struct rpr0521_data *data)
+{
+ /* Don't care of clearing mode, assert and latch. */
+ return regmap_write(data->regmap, RPR0521_REG_INTERRUPT,
+ RPR0521_INTERRUPT_INT_TRIG_ALS_DISABLE |
+ RPR0521_INTERRUPT_INT_TRIG_PS_DISABLE
+ );
+}
+
+/*
+ * Trigger producer enable / disable. Note that there will be trigs only when
+ * measurement data is ready to be read.
+ */
+static int rpr0521_pxs_drdy_set_state(struct iio_trigger *trigger,
+ bool enable_drdy)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trigger);
+ struct rpr0521_data *data = iio_priv(indio_dev);
+ int err;
+
+ if (enable_drdy)
+ err = rpr0521_write_int_enable(data);
+ else
+ err = rpr0521_write_int_disable(data);
+ if (err)
+ dev_err(&data->client->dev, "rpr0521_pxs_drdy_set_state failed\n");
+
+ return err;
+}
+
+static const struct iio_trigger_ops rpr0521_trigger_ops = {
+ .set_trigger_state = rpr0521_pxs_drdy_set_state,
+ .owner = THIS_MODULE,
+ };
+
+
+static int rpr0521_buffer_preenable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct rpr0521_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->lock);
+ err = rpr0521_set_power_state(data, true,
+ (RPR0521_MODE_PXS_MASK | RPR0521_MODE_ALS_MASK));
+ mutex_unlock(&data->lock);
+ if (err)
+ dev_err(&data->client->dev, "_buffer_preenable fail\n");
+
+ return err;
+}
+
+static int rpr0521_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ int err;
+ struct rpr0521_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->lock);
+ err = rpr0521_set_power_state(data, false,
+ (RPR0521_MODE_PXS_MASK | RPR0521_MODE_ALS_MASK));
+ mutex_unlock(&data->lock);
+ if (err)
+ dev_err(&data->client->dev, "_buffer_postdisable fail\n");
+
+ return err;
+}
+
+static const struct iio_buffer_setup_ops rpr0521_buffer_setup_ops = {
+ .preenable = rpr0521_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = rpr0521_buffer_postdisable,
+};
+
static int rpr0521_get_gain(struct rpr0521_data *data, int chan,
int *val, int *val2)
{
@@ -473,6 +722,7 @@ static int rpr0521_read_raw(struct iio_dev *indio_dev,
{
struct rpr0521_data *data = iio_priv(indio_dev);
int ret;
+ int busy;
u8 device_mask;
__le16 raw_data;
@@ -481,26 +731,30 @@ static int rpr0521_read_raw(struct iio_dev *indio_dev,
if (chan->type != IIO_INTENSITY && chan->type != IIO_PROXIMITY)
return -EINVAL;
+ busy = iio_device_claim_direct_mode(indio_dev);
+ if (busy)
+ return -EBUSY;
+
device_mask = rpr0521_data_reg[chan->address].device_mask;
mutex_lock(&data->lock);
ret = rpr0521_set_power_state(data, true, device_mask);
- if (ret < 0) {
- mutex_unlock(&data->lock);
- return ret;
- }
+ if (ret < 0)
+ goto rpr0521_read_raw_out;
ret = regmap_bulk_read(data->regmap,
rpr0521_data_reg[chan->address].address,
&raw_data, sizeof(raw_data));
if (ret < 0) {
rpr0521_set_power_state(data, false, device_mask);
- mutex_unlock(&data->lock);
- return ret;
+ goto rpr0521_read_raw_out;
}
ret = rpr0521_set_power_state(data, false, device_mask);
+
+rpr0521_read_raw_out:
mutex_unlock(&data->lock);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -617,12 +871,15 @@ static int rpr0521_init(struct rpr0521_data *data)
return ret;
#endif
+ data->irq_timestamp = 0;
+
return 0;
}
static int rpr0521_poweroff(struct rpr0521_data *data)
{
int ret;
+ int tmp;
ret = regmap_update_bits(data->regmap, RPR0521_REG_MODE_CTRL,
RPR0521_MODE_ALS_MASK |
@@ -635,6 +892,16 @@ static int rpr0521_poweroff(struct rpr0521_data *data)
data->als_dev_en = false;
data->pxs_dev_en = false;
+ /*
+ * Int pin keeps state after power off. Set pin to high impedance
+ * mode to prevent power drain.
+ */
+ ret = regmap_read(data->regmap, RPR0521_REG_INTERRUPT, &tmp);
+ if (ret) {
+ dev_err(&data->client->dev, "Failed to reset int pin.\n");
+ return ret;
+ }
+
return 0;
}
@@ -707,6 +974,61 @@ static int rpr0521_probe(struct i2c_client *client,
pm_runtime_set_autosuspend_delay(&client->dev, RPR0521_SLEEP_DELAY_MS);
pm_runtime_use_autosuspend(&client->dev);
+ /*
+ * If sensor write/read is needed in _probe after _use_autosuspend,
+ * sensor needs to be _resumed first using rpr0521_set_power_state().
+ */
+
+ /* IRQ to trigger setup */
+ if (client->irq) {
+ /* Trigger0 producer setup */
+ data->drdy_trigger0 = devm_iio_trigger_alloc(
+ indio_dev->dev.parent,
+ "%s-dev%d", indio_dev->name, indio_dev->id);
+ if (!data->drdy_trigger0) {
+ ret = -ENOMEM;
+ goto err_pm_disable;
+ }
+ data->drdy_trigger0->dev.parent = indio_dev->dev.parent;
+ data->drdy_trigger0->ops = &rpr0521_trigger_ops;
+ indio_dev->available_scan_masks = rpr0521_available_scan_masks;
+ iio_trigger_set_drvdata(data->drdy_trigger0, indio_dev);
+
+ /* Ties irq to trigger producer handler. */
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ rpr0521_drdy_irq_handler, rpr0521_drdy_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ RPR0521_IRQ_NAME, indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "request irq %d for trigger0 failed\n",
+ client->irq);
+ goto err_pm_disable;
+ }
+
+ ret = devm_iio_trigger_register(indio_dev->dev.parent,
+ data->drdy_trigger0);
+ if (ret) {
+ dev_err(&client->dev, "iio trigger register failed\n");
+ goto err_pm_disable;
+ }
+
+ /*
+ * Now whole pipe from physical interrupt (irq defined by
+ * devicetree to device) to trigger0 output is set up.
+ */
+
+ /* Trigger consumer setup */
+ ret = devm_iio_triggered_buffer_setup(indio_dev->dev.parent,
+ indio_dev,
+ rpr0521_trigger_consumer_store_time,
+ rpr0521_trigger_consumer_handler,
+ &rpr0521_buffer_setup_ops);
+ if (ret < 0) {
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ goto err_pm_disable;
+ }
+ }
+
ret = iio_device_register(indio_dev);
if (ret)
goto err_pm_disable;
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 3aa71e34ae28..09e6ca5e332e 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -11,6 +11,8 @@
* 7-bit I2C slave address 0x39 (TCS34721, TCS34723) or 0x29 (TCS34725,
* TCS34727)
*
+ * Datasheet: http://ams.com/eng/content/download/319364/1117183/file/TCS3472_Datasheet_EN_v2.pdf
+ *
* TODO: interrupt support, thresholds, wait time
*/
@@ -169,7 +171,7 @@ static int tcs3472_write_raw(struct iio_dev *indio_dev,
for (i = 0; i < 256; i++) {
if (val2 == (256 - i) * 2400) {
data->atime = i;
- return i2c_smbus_write_word_data(
+ return i2c_smbus_write_byte_data(
data->client, TCS3472_ATIME,
data->atime);
}
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index e7d4ea75e007..7599693f7fe9 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
struct tsl2563_chip *chip = iio_priv(dev_info);
iio_push_event(dev_info,
- IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
+ IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index 1679181d2bdd..fb711ed4862e 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -924,7 +924,7 @@ static const struct dev_pm_ops tsl2583_pm_ops = {
SET_RUNTIME_PM_OPS(tsl2583_suspend, tsl2583_resume, NULL)
};
-static struct i2c_device_id tsl2583_idtable[] = {
+static const struct i2c_device_id tsl2583_idtable[] = {
{ "tsl2580", 0 },
{ "tsl2581", 1 },
{ "tsl2583", 2 },
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 421ad90a5fbe..ed9d776d01af 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -13,8 +13,8 @@ config AK8974
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say yes here to build support for Asahi Kasei AK8974 or
- AMI305 I2C-based 3-axis magnetometer chips.
+ Say yes here to build support for Asahi Kasei AK8974, AMI305 or
+ AMI306 I2C-based 3-axis magnetometer chips.
To compile this driver as a module, choose M here: the module
will be called ak8974.
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index e13370dc9b1c..0bff76e96950 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -20,6 +20,7 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/bitops.h>
+#include <linux/random.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
@@ -36,7 +37,7 @@
* and MSB is at the next higher address.
*/
-/* These registers are common for AK8974 and AMI305 */
+/* These registers are common for AK8974 and AMI30x */
#define AK8974_SELFTEST 0x0C
#define AK8974_SELFTEST_IDLE 0x55
#define AK8974_SELFTEST_OK 0xAA
@@ -44,6 +45,7 @@
#define AK8974_INFO 0x0D
#define AK8974_WHOAMI 0x0F
+#define AK8974_WHOAMI_VALUE_AMI306 0x46
#define AK8974_WHOAMI_VALUE_AMI305 0x47
#define AK8974_WHOAMI_VALUE_AK8974 0x48
@@ -73,6 +75,35 @@
#define AK8974_TEMP 0x31
#define AMI305_TEMP 0x60
+/* AMI306-specific control register */
+#define AMI306_CTRL4 0x5C
+
+/* AMI306 factory calibration data */
+
+/* fine axis sensitivity */
+#define AMI306_FINEOUTPUT_X 0x90
+#define AMI306_FINEOUTPUT_Y 0x92
+#define AMI306_FINEOUTPUT_Z 0x94
+
+/* axis sensitivity */
+#define AMI306_SENS_X 0x96
+#define AMI306_SENS_Y 0x98
+#define AMI306_SENS_Z 0x9A
+
+/* axis cross-interference */
+#define AMI306_GAIN_PARA_XZ 0x9C
+#define AMI306_GAIN_PARA_XY 0x9D
+#define AMI306_GAIN_PARA_YZ 0x9E
+#define AMI306_GAIN_PARA_YX 0x9F
+#define AMI306_GAIN_PARA_ZY 0xA0
+#define AMI306_GAIN_PARA_ZX 0xA1
+
+/* offset at ZERO magnetic field */
+#define AMI306_OFFZERO_X 0xF8
+#define AMI306_OFFZERO_Y 0xFA
+#define AMI306_OFFZERO_Z 0xFC
+
+
#define AK8974_INT_X_HIGH BIT(7) /* Axis over +threshold */
#define AK8974_INT_Y_HIGH BIT(6)
#define AK8974_INT_Z_HIGH BIT(5)
@@ -158,6 +189,26 @@ struct ak8974 {
static const char ak8974_reg_avdd[] = "avdd";
static const char ak8974_reg_dvdd[] = "dvdd";
+static int ak8974_get_u16_val(struct ak8974 *ak8974, u8 reg, u16 *val)
+{
+ int ret;
+ __le16 bulk;
+
+ ret = regmap_bulk_read(ak8974->map, reg, &bulk, 2);
+ if (ret)
+ return ret;
+ *val = le16_to_cpu(bulk);
+
+ return 0;
+}
+
+static int ak8974_set_u16_val(struct ak8974 *ak8974, u8 reg, u16 val)
+{
+ __le16 bulk = cpu_to_le16(val);
+
+ return regmap_bulk_write(ak8974->map, reg, &bulk, 2);
+}
+
static int ak8974_set_power(struct ak8974 *ak8974, bool mode)
{
int ret;
@@ -209,6 +260,12 @@ static int ak8974_configure(struct ak8974 *ak8974)
ret = regmap_write(ak8974->map, AK8974_CTRL3, 0);
if (ret)
return ret;
+ if (ak8974->variant == AK8974_WHOAMI_VALUE_AMI306) {
+ /* magic from datasheet: set high-speed measurement mode */
+ ret = ak8974_set_u16_val(ak8974, AMI306_CTRL4, 0xA07E);
+ if (ret)
+ return ret;
+ }
ret = regmap_write(ak8974->map, AK8974_INT_CTRL, AK8974_INT_CTRL_POL);
if (ret)
return ret;
@@ -388,17 +445,18 @@ static int ak8974_selftest(struct ak8974 *ak8974)
return 0;
}
-static int ak8974_get_u16_val(struct ak8974 *ak8974, u8 reg, u16 *val)
+static void ak8974_read_calib_data(struct ak8974 *ak8974, unsigned int reg,
+ __le16 *tab, size_t tab_size)
{
- int ret;
- __le16 bulk;
-
- ret = regmap_bulk_read(ak8974->map, reg, &bulk, 2);
- if (ret)
- return ret;
- *val = le16_to_cpu(bulk);
-
- return 0;
+ int ret = regmap_bulk_read(ak8974->map, reg, tab, tab_size);
+ if (ret) {
+ memset(tab, 0xFF, tab_size);
+ dev_warn(&ak8974->i2c->dev,
+ "can't read calibration data (regs %u..%zu): %d\n",
+ reg, reg + tab_size - 1, ret);
+ } else {
+ add_device_randomness(tab, tab_size);
+ }
}
static int ak8974_detect(struct ak8974 *ak8974)
@@ -413,9 +471,13 @@ static int ak8974_detect(struct ak8974 *ak8974)
if (ret)
return ret;
+ name = "ami305";
+
switch (whoami) {
+ case AK8974_WHOAMI_VALUE_AMI306:
+ name = "ami306";
+ /* fall-through */
case AK8974_WHOAMI_VALUE_AMI305:
- name = "ami305";
ret = regmap_read(ak8974->map, AMI305_VER, &fw);
if (ret)
return ret;
@@ -423,6 +485,7 @@ static int ak8974_detect(struct ak8974 *ak8974)
ret = ak8974_get_u16_val(ak8974, AMI305_SN, &sn);
if (ret)
return ret;
+ add_device_randomness(&sn, sizeof(sn));
dev_info(&ak8974->i2c->dev,
"detected %s, FW ver %02x, S/N: %04x\n",
name, fw, sn);
@@ -440,6 +503,33 @@ static int ak8974_detect(struct ak8974 *ak8974)
ak8974->name = name;
ak8974->variant = whoami;
+ if (whoami == AK8974_WHOAMI_VALUE_AMI306) {
+ __le16 fab_data1[9], fab_data2[3];
+ int i;
+
+ ak8974_read_calib_data(ak8974, AMI306_FINEOUTPUT_X,
+ fab_data1, sizeof(fab_data1));
+ ak8974_read_calib_data(ak8974, AMI306_OFFZERO_X,
+ fab_data2, sizeof(fab_data2));
+
+ for (i = 0; i < 3; ++i) {
+ static const char axis[3] = "XYZ";
+ static const char pgaxis[6] = "ZYZXYX";
+ unsigned offz = le16_to_cpu(fab_data2[i]) & 0x7F;
+ unsigned fine = le16_to_cpu(fab_data1[i]);
+ unsigned sens = le16_to_cpu(fab_data1[i + 3]);
+ unsigned pgain1 = le16_to_cpu(fab_data1[i + 6]);
+ unsigned pgain2 = pgain1 >> 8;
+
+ pgain1 &= 0xFF;
+
+ dev_info(&ak8974->i2c->dev,
+ "factory calibration for axis %c: offz=%u sens=%u fine=%u pga%c=%u pga%c=%u\n",
+ axis[i], offz, sens, fine, pgaxis[i * 2],
+ pgain1, pgaxis[i * 2 + 1], pgain2);
+ }
+ }
+
return 0;
}
@@ -602,19 +692,27 @@ static bool ak8974_writeable_reg(struct device *dev, unsigned int reg)
case AMI305_OFFSET_Y + 1:
case AMI305_OFFSET_Z:
case AMI305_OFFSET_Z + 1:
- if (ak8974->variant == AK8974_WHOAMI_VALUE_AMI305)
- return true;
- return false;
+ return ak8974->variant == AK8974_WHOAMI_VALUE_AMI305 ||
+ ak8974->variant == AK8974_WHOAMI_VALUE_AMI306;
+ case AMI306_CTRL4:
+ case AMI306_CTRL4 + 1:
+ return ak8974->variant == AK8974_WHOAMI_VALUE_AMI306;
default:
return false;
}
}
+static bool ak8974_precious_reg(struct device *dev, unsigned int reg)
+{
+ return reg == AK8974_INT_CLEAR;
+}
+
static const struct regmap_config ak8974_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xff,
.writeable_reg = ak8974_writeable_reg,
+ .precious_reg = ak8974_precious_reg,
};
static int ak8974_probe(struct i2c_client *i2c,
@@ -678,7 +776,7 @@ static int ak8974_probe(struct i2c_client *i2c,
ret = ak8974_detect(ak8974);
if (ret) {
- dev_err(&i2c->dev, "neither AK8974 nor AMI305 found\n");
+ dev_err(&i2c->dev, "neither AK8974 nor AMI30x found\n");
goto power_off;
}
@@ -827,6 +925,7 @@ static const struct dev_pm_ops ak8974_dev_pm_ops = {
static const struct i2c_device_id ak8974_id[] = {
{"ami305", 0 },
+ {"ami306", 0 },
{"ak8974", 0 },
{}
};
@@ -850,7 +949,7 @@ static struct i2c_driver ak8974_driver = {
};
module_i2c_driver(ak8974_driver);
-MODULE_DESCRIPTION("AK8974 and AMI305 3-axis magnetometer driver");
+MODULE_DESCRIPTION("AK8974 and AMI30x 3-axis magnetometer driver");
MODULE_AUTHOR("Samu Onkalo");
MODULE_AUTHOR("Linus Walleij");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 825369fb1c57..4ff883942f7b 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -784,6 +784,7 @@ static const struct iio_info ak8975_info = {
.driver_module = THIS_MODULE,
};
+#ifdef CONFIG_ACPI
static const struct acpi_device_id ak_acpi_match[] = {
{"AK8975", AK8975},
{"AK8963", AK8963},
@@ -793,6 +794,7 @@ static const struct acpi_device_id ak_acpi_match[] = {
{ },
};
MODULE_DEVICE_TABLE(acpi, ak_acpi_match);
+#endif
static const char *ak8975_match_acpi_device(struct device *dev,
enum asahi_compass_chipset *chipset)
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 9daca4681922..8fe51ce427bd 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -19,6 +19,7 @@
#define LSM303DLM_MAGN_DEV_NAME "lsm303dlm_magn"
#define LIS3MDL_MAGN_DEV_NAME "lis3mdl"
#define LSM303AGR_MAGN_DEV_NAME "lsm303agr_magn"
+#define LIS2MDL_MAGN_DEV_NAME "lis2mdl"
int st_magn_common_probe(struct iio_dev *indio_dev);
void st_magn_common_remove(struct iio_dev *indio_dev);
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 8e1b0861fbe4..e68368b5b2a3 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -315,7 +315,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
},
},
},
- .multi_read_bit = false,
+ .multi_read_bit = true,
.bootime = 2,
},
{
@@ -323,6 +323,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
.wai_addr = 0x4f,
.sensors_supported = {
[0] = LSM303AGR_MAGN_DEV_NAME,
+ [1] = LIS2MDL_MAGN_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_magn_3_16bit_channels,
.odr = {
@@ -356,9 +357,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
.drdy_irq = {
.addr = 0x62,
.mask_int1 = 0x01,
- .addr_ihl = 0x63,
- .mask_ihl = 0x04,
- .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .addr_stat_drdy = 0x67,
},
.multi_read_bit = false,
.bootime = 2,
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
index 8aa37af306ed..feaa28cf6a77 100644
--- a/drivers/iio/magnetometer/st_magn_i2c.c
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -40,6 +40,10 @@ static const struct of_device_id st_magn_of_match[] = {
.compatible = "st,lsm303agr-magn",
.data = LSM303AGR_MAGN_DEV_NAME,
},
+ {
+ .compatible = "st,lis2mdl",
+ .data = LIS2MDL_MAGN_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_magn_of_match);
@@ -59,7 +63,8 @@ static int st_magn_i2c_probe(struct i2c_client *client,
return -ENOMEM;
mdata = iio_priv(indio_dev);
- st_sensors_of_i2c_probe(client, st_magn_of_match);
+ st_sensors_of_name_probe(&client->dev, st_magn_of_match,
+ client->name, sizeof(client->name));
st_sensors_i2c_configure(indio_dev, client, mdata);
@@ -84,6 +89,7 @@ static const struct i2c_device_id st_magn_id_table[] = {
{ LSM303DLM_MAGN_DEV_NAME },
{ LIS3MDL_MAGN_DEV_NAME },
{ LSM303AGR_MAGN_DEV_NAME },
+ { LIS2MDL_MAGN_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_magn_id_table);
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index f3cb4dc05391..7b7cd08fcc32 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -18,6 +18,32 @@
#include <linux/iio/common/st_sensors_spi.h>
#include "st_magn.h"
+#ifdef CONFIG_OF
+/*
+ * For new single-chip sensors use <device_name> as compatible string.
+ * For old single-chip devices keep <device_name>-magn to maintain
+ * compatibility
+ */
+static const struct of_device_id st_magn_of_match[] = {
+ {
+ .compatible = "st,lis3mdl-magn",
+ .data = LIS3MDL_MAGN_DEV_NAME,
+ },
+ {
+ .compatible = "st,lsm303agr-magn",
+ .data = LSM303AGR_MAGN_DEV_NAME,
+ },
+ {
+ .compatible = "st,lis2mdl",
+ .data = LIS2MDL_MAGN_DEV_NAME,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, st_magn_of_match);
+#else
+#define st_magn_of_match NULL
+#endif
+
static int st_magn_spi_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -30,6 +56,8 @@ static int st_magn_spi_probe(struct spi_device *spi)
mdata = iio_priv(indio_dev);
+ st_sensors_of_name_probe(&spi->dev, st_magn_of_match,
+ spi->modalias, sizeof(spi->modalias));
st_sensors_spi_configure(indio_dev, spi, mdata);
err = st_magn_common_probe(indio_dev);
@@ -50,6 +78,7 @@ static int st_magn_spi_remove(struct spi_device *spi)
static const struct spi_device_id st_magn_id_table[] = {
{ LIS3MDL_MAGN_DEV_NAME },
{ LSM303AGR_MAGN_DEV_NAME },
+ { LIS2MDL_MAGN_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_magn_id_table);
@@ -57,6 +86,7 @@ MODULE_DEVICE_TABLE(spi, st_magn_id_table);
static struct spi_driver st_magn_driver = {
.driver = {
.name = "st-magn-spi",
+ .of_match_table = of_match_ptr(st_magn_of_match),
},
.probe = st_magn_spi_probe,
.remove = st_magn_spi_remove,
diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
index e9fa86c87db5..98fe0c5df380 100644
--- a/drivers/iio/orientation/hid-sensor-rotation.c
+++ b/drivers/iio/orientation/hid-sensor-rotation.c
@@ -238,7 +238,7 @@ static int dev_rot_parse_report(struct platform_device *pdev,
static int hid_dev_rot_probe(struct platform_device *pdev)
{
int ret;
- static char *name;
+ char *name;
struct iio_dev *indio_dev;
struct dev_rot_state *rot_state;
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index d82b788374b6..0d2ea3ee371b 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -282,6 +282,11 @@ static int bmp280_read_temp(struct bmp280_data *data,
}
adc_temp = be32_to_cpu(tmp) >> 12;
+ if (adc_temp == BMP280_TEMP_SKIPPED) {
+ /* reading was skipped */
+ dev_err(data->dev, "reading temperature skipped\n");
+ return -EIO;
+ }
comp_temp = bmp280_compensate_temp(data, adc_temp);
/*
@@ -317,6 +322,11 @@ static int bmp280_read_press(struct bmp280_data *data,
}
adc_press = be32_to_cpu(tmp) >> 12;
+ if (adc_press == BMP280_PRESS_SKIPPED) {
+ /* reading was skipped */
+ dev_err(data->dev, "reading pressure skipped\n");
+ return -EIO;
+ }
comp_press = bmp280_compensate_press(data, adc_press);
*val = comp_press;
@@ -345,6 +355,11 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
}
adc_humidity = be16_to_cpu(tmp);
+ if (adc_humidity == BMP280_HUMIDITY_SKIPPED) {
+ /* reading was skipped */
+ dev_err(data->dev, "reading humidity skipped\n");
+ return -EIO;
+ }
comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
*val = comp_humidity;
@@ -597,14 +612,20 @@ static const struct bmp280_chip_info bmp280_chip_info = {
static int bme280_chip_config(struct bmp280_data *data)
{
- int ret = bmp280_chip_config(data);
+ int ret;
u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1);
+ /*
+ * Oversampling of humidity must be set before oversampling of
+ * temperature/pressure is set to become effective.
+ */
+ ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY,
+ BMP280_OSRS_HUMIDITY_MASK, osrs);
+
if (ret < 0)
return ret;
- return regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY,
- BMP280_OSRS_HUMIDITY_MASK, osrs);
+ return bmp280_chip_config(data);
}
static const struct bmp280_chip_info bme280_chip_info = {
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 2c770e13be0e..61347438b779 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -96,6 +96,11 @@
#define BME280_CHIP_ID 0x60
#define BMP280_SOFT_RESET_VAL 0xB6
+/* BMP280 register skipped special values */
+#define BMP280_TEMP_SKIPPED 0x80000
+#define BMP280_PRESS_SKIPPED 0x80000
+#define BMP280_HUMIDITY_SKIPPED 0x8000
+
/* Regmap configurations */
extern const struct regmap_config bmp180_regmap_config;
extern const struct regmap_config bmp280_regmap_config;
diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c
index 953ffbc0ef96..c413f8a84a63 100644
--- a/drivers/iio/pressure/ms5637.c
+++ b/drivers/iio/pressure/ms5637.c
@@ -181,11 +181,21 @@ static const struct i2c_device_id ms5637_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ms5637_id);
+static const struct of_device_id ms5637_of_match[] = {
+ { .compatible = "meas,ms5637", },
+ { .compatible = "meas,ms5805", },
+ { .compatible = "meas,ms5837", },
+ { .compatible = "meas,ms8607-temppressure", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ms5637_of_match);
+
static struct i2c_driver ms5637_driver = {
.probe = ms5637_probe,
.id_table = ms5637_id,
.driver = {
- .name = "ms5637"
+ .name = "ms5637",
+ .of_match_table = of_match_ptr(ms5637_of_match),
},
};
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index aa61ec15c139..34611a8ea2ce 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -390,7 +390,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.drdy_irq = {
.addr = 0x23,
.mask_int1 = 0x01,
- .mask_int2 = 0x10,
+ .mask_int2 = 0x00,
.addr_ihl = 0x22,
.mask_ihl = 0x80,
.addr_od = 0x22,
@@ -449,14 +449,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.drdy_irq = {
.addr = 0x12,
.mask_int1 = 0x04,
- .mask_int2 = 0x08,
+ .mask_int2 = 0x00,
.addr_ihl = 0x12,
.mask_ihl = 0x80,
.addr_od = 0x12,
.mask_od = 0x40,
.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
- .multi_read_bit = true,
+ .multi_read_bit = false,
.bootime = 2,
},
};
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 17417a4d5a5f..7f15e927fa2b 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -77,7 +77,8 @@ static int st_press_i2c_probe(struct i2c_client *client,
press_data = iio_priv(indio_dev);
if (client->dev.of_node) {
- st_sensors_of_i2c_probe(client, st_press_of_match);
+ st_sensors_of_name_probe(&client->dev, st_press_of_match,
+ client->name, sizeof(client->name));
} else if (ACPI_HANDLE(&client->dev)) {
ret = st_sensors_match_acpi_device(&client->dev);
if ((ret < 0) || (ret >= ST_PRESS_MAX))
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index 550508025af1..f5ebd36bb4bf 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -18,6 +18,36 @@
#include <linux/iio/common/st_sensors_spi.h>
#include "st_pressure.h"
+#ifdef CONFIG_OF
+/*
+ * For new single-chip sensors use <device_name> as compatible string.
+ * For old single-chip devices keep <device_name>-press to maintain
+ * compatibility
+ */
+static const struct of_device_id st_press_of_match[] = {
+ {
+ .compatible = "st,lps001wp-press",
+ .data = LPS001WP_PRESS_DEV_NAME,
+ },
+ {
+ .compatible = "st,lps25h-press",
+ .data = LPS25H_PRESS_DEV_NAME,
+ },
+ {
+ .compatible = "st,lps331ap-press",
+ .data = LPS331AP_PRESS_DEV_NAME,
+ },
+ {
+ .compatible = "st,lps22hb-press",
+ .data = LPS22HB_PRESS_DEV_NAME,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_press_of_match);
+#else
+#define st_press_of_match NULL
+#endif
+
static int st_press_spi_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -30,6 +60,8 @@ static int st_press_spi_probe(struct spi_device *spi)
press_data = iio_priv(indio_dev);
+ st_sensors_of_name_probe(&spi->dev, st_press_of_match,
+ spi->modalias, sizeof(spi->modalias));
st_sensors_spi_configure(indio_dev, spi, press_data);
err = st_press_common_probe(indio_dev);
@@ -58,6 +90,7 @@ MODULE_DEVICE_TABLE(spi, st_press_id_table);
static struct spi_driver st_press_driver = {
.driver = {
.name = "st-press-spi",
+ .of_match_table = of_match_ptr(st_press_of_match),
},
.probe = st_press_spi_probe,
.remove = st_press_spi_remove,
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index c92a95f9f52c..ebfb1de7377f 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -141,14 +141,14 @@ struct zpa2326_private {
struct regulator *vdd;
};
-#define zpa2326_err(_idev, _format, _arg...) \
- dev_err(_idev->dev.parent, _format, ##_arg)
+#define zpa2326_err(idev, fmt, ...) \
+ dev_err(idev->dev.parent, fmt "\n", ##__VA_ARGS__)
-#define zpa2326_warn(_idev, _format, _arg...) \
- dev_warn(_idev->dev.parent, _format, ##_arg)
+#define zpa2326_warn(idev, fmt, ...) \
+ dev_warn(idev->dev.parent, fmt "\n", ##__VA_ARGS__)
-#define zpa2326_dbg(_idev, _format, _arg...) \
- dev_dbg(_idev->dev.parent, _format, ##_arg)
+#define zpa2326_dbg(idev, fmt, ...) \
+ dev_dbg(idev->dev.parent, fmt "\n", ##__VA_ARGS__)
bool zpa2326_isreg_writeable(struct device *dev, unsigned int reg)
{
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index 5b81a8c9d438..ae070950f920 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -57,12 +57,12 @@ config SX9500
module will be called sx9500.
config SRF08
- tristate "Devantech SRF08 ultrasonic ranger sensor"
+ tristate "Devantech SRF02/SRF08/SRF10 ultrasonic ranger sensor"
depends on I2C
help
- Say Y here to build a driver for Devantech SRF08 ultrasonic
- ranger sensor. This driver can be used to measure the distance
- of objects.
+ Say Y here to build a driver for Devantech SRF02/SRF08/SRF10
+ ultrasonic ranger sensors with i2c interface.
+ This driver can be used to measure the distance of objects.
To compile this driver as a module, choose M here: the
module will be called srf08.
diff --git a/drivers/iio/proximity/srf08.c b/drivers/iio/proximity/srf08.c
index 49316cbf7c60..9380d545aab1 100644
--- a/drivers/iio/proximity/srf08.c
+++ b/drivers/iio/proximity/srf08.c
@@ -1,14 +1,18 @@
/*
- * srf08.c - Support for Devantech SRF08 ultrasonic ranger
+ * srf08.c - Support for Devantech SRFxx ultrasonic ranger
+ * with i2c interface
+ * actually supported are srf02, srf08, srf10
*
- * Copyright (c) 2016 Andreas Klinger <ak@it-klinger.de>
+ * Copyright (c) 2016, 2017 Andreas Klinger <ak@it-klinger.de>
*
* This file is subject to the terms and conditions of version 2 of
- * the GNU General Public License. See the file COPYING in the main
+ * the GNU General Public License. See the file COPYING in the main
* directory of this archive for more details.
*
* For details about the device see:
* http://www.robot-electronics.co.uk/htm/srf08tech.html
+ * http://www.robot-electronics.co.uk/htm/srf10tech.htm
+ * http://www.robot-electronics.co.uk/htm/srf02tech.htm
*/
#include <linux/err.h>
@@ -18,6 +22,9 @@
#include <linux/bitops.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
/* registers of SRF08 device */
#define SRF08_WRITE_COMMAND 0x00 /* Command Register */
@@ -30,14 +37,46 @@
#define SRF08_CMD_RANGING_CM 0x51 /* Ranging Mode - Result in cm */
-#define SRF08_DEFAULT_GAIN 1025 /* default analogue value of Gain */
-#define SRF08_DEFAULT_RANGE 6020 /* default value of Range in mm */
+enum srf08_sensor_type {
+ SRF02,
+ SRF08,
+ SRF10,
+ SRF_MAX_TYPE
+};
+
+struct srf08_chip_info {
+ const int *sensitivity_avail;
+ int num_sensitivity_avail;
+ int sensitivity_default;
+
+ /* default value of Range in mm */
+ int range_default;
+};
struct srf08_data {
struct i2c_client *client;
- int sensitivity; /* Gain */
- int range_mm; /* max. Range in mm */
+
+ /*
+ * Gain in the datasheet is called sensitivity here to distinct it
+ * from the gain used with amplifiers of adc's
+ */
+ int sensitivity;
+
+ /* max. Range in mm */
+ int range_mm;
struct mutex lock;
+
+ /*
+ * triggered buffer
+ * 1x16-bit channel + 3x16 padding + 4x16 timestamp
+ */
+ s16 buffer[8];
+
+ /* Sensor-Type */
+ enum srf08_sensor_type sensor_type;
+
+ /* Chip-specific information */
+ const struct srf08_chip_info *chip_info;
};
/*
@@ -47,11 +86,42 @@ struct srf08_data {
* But with ADC's this term is already used differently and that's why it
* is called "Sensitivity" here.
*/
-static const int srf08_sensitivity[] = {
+static const struct srf08_chip_info srf02_chip_info = {
+ .sensitivity_avail = NULL,
+ .num_sensitivity_avail = 0,
+ .sensitivity_default = 0,
+
+ .range_default = 0,
+};
+
+static const int srf08_sensitivity_avail[] = {
94, 97, 100, 103, 107, 110, 114, 118,
123, 128, 133, 139, 145, 152, 159, 168,
177, 187, 199, 212, 227, 245, 265, 288,
- 317, 352, 395, 450, 524, 626, 777, 1025 };
+ 317, 352, 395, 450, 524, 626, 777, 1025
+ };
+
+static const struct srf08_chip_info srf08_chip_info = {
+ .sensitivity_avail = srf08_sensitivity_avail,
+ .num_sensitivity_avail = ARRAY_SIZE(srf08_sensitivity_avail),
+ .sensitivity_default = 1025,
+
+ .range_default = 6020,
+};
+
+static const int srf10_sensitivity_avail[] = {
+ 40, 40, 50, 60, 70, 80, 100, 120,
+ 140, 200, 250, 300, 350, 400, 500, 600,
+ 700,
+ };
+
+static const struct srf08_chip_info srf10_chip_info = {
+ .sensitivity_avail = srf10_sensitivity_avail,
+ .num_sensitivity_avail = ARRAY_SIZE(srf10_sensitivity_avail),
+ .sensitivity_default = 700,
+
+ .range_default = 6020,
+};
static int srf08_read_ranging(struct srf08_data *data)
{
@@ -110,6 +180,29 @@ static int srf08_read_ranging(struct srf08_data *data)
return ret;
}
+static irqreturn_t srf08_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct srf08_data *data = iio_priv(indio_dev);
+ s16 sensor_data;
+
+ sensor_data = srf08_read_ranging(data);
+ if (sensor_data < 0)
+ goto err;
+
+ mutex_lock(&data->lock);
+
+ data->buffer[0] = sensor_data;
+ iio_push_to_buffers_with_timestamp(indio_dev,
+ data->buffer, pf->timestamp);
+
+ mutex_unlock(&data->lock);
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
static int srf08_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *channel, int *val,
int *val2, long mask)
@@ -225,9 +318,13 @@ static ssize_t srf08_show_sensitivity_available(struct device *dev,
struct device_attribute *attr, char *buf)
{
int i, len = 0;
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct srf08_data *data = iio_priv(indio_dev);
- for (i = 0; i < ARRAY_SIZE(srf08_sensitivity); i++)
- len += sprintf(buf + len, "%d ", srf08_sensitivity[i]);
+ for (i = 0; i < data->chip_info->num_sensitivity_avail; i++)
+ if (data->chip_info->sensitivity_avail[i])
+ len += sprintf(buf + len, "%d ",
+ data->chip_info->sensitivity_avail[i]);
len += sprintf(buf + len, "\n");
@@ -256,19 +353,21 @@ static ssize_t srf08_write_sensitivity(struct srf08_data *data,
int ret, i;
u8 regval;
- for (i = 0; i < ARRAY_SIZE(srf08_sensitivity); i++)
- if (val == srf08_sensitivity[i]) {
+ if (!val)
+ return -EINVAL;
+
+ for (i = 0; i < data->chip_info->num_sensitivity_avail; i++)
+ if (val && (val == data->chip_info->sensitivity_avail[i])) {
regval = i;
break;
}
- if (i >= ARRAY_SIZE(srf08_sensitivity))
+ if (i >= data->chip_info->num_sensitivity_avail)
return -EINVAL;
mutex_lock(&data->lock);
- ret = i2c_smbus_write_byte_data(client,
- SRF08_WRITE_MAX_GAIN, regval);
+ ret = i2c_smbus_write_byte_data(client, SRF08_WRITE_MAX_GAIN, regval);
if (ret < 0) {
dev_err(&client->dev, "write_sensitivity - err: %d\n", ret);
mutex_unlock(&data->lock);
@@ -323,7 +422,15 @@ static const struct iio_chan_spec srf08_channels[] = {
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
},
+ IIO_CHAN_SOFT_TIMESTAMP(1),
};
static const struct iio_info srf08_info = {
@@ -332,6 +439,15 @@ static const struct iio_info srf08_info = {
.driver_module = THIS_MODULE,
};
+/*
+ * srf02 don't have an adjustable range or sensitivity,
+ * so we don't need attributes at all
+ */
+static const struct iio_info srf02_info = {
+ .read_raw = srf08_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
static int srf08_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -352,34 +468,84 @@ static int srf08_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
+ data->sensor_type = (enum srf08_sensor_type)id->driver_data;
+
+ switch (data->sensor_type) {
+ case SRF02:
+ data->chip_info = &srf02_chip_info;
+ indio_dev->info = &srf02_info;
+ break;
+ case SRF08:
+ data->chip_info = &srf08_chip_info;
+ indio_dev->info = &srf08_info;
+ break;
+ case SRF10:
+ data->chip_info = &srf10_chip_info;
+ indio_dev->info = &srf08_info;
+ break;
+ default:
+ return -EINVAL;
+ }
- indio_dev->name = "srf08";
+ indio_dev->name = id->name;
indio_dev->dev.parent = &client->dev;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->info = &srf08_info;
indio_dev->channels = srf08_channels;
indio_dev->num_channels = ARRAY_SIZE(srf08_channels);
mutex_init(&data->lock);
- /*
- * set default values of device here
- * these register values cannot be read from the hardware
- * therefore set driver specific default values
- */
- ret = srf08_write_range_mm(data, SRF08_DEFAULT_RANGE);
- if (ret < 0)
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
+ iio_pollfunc_store_time, srf08_trigger_handler, NULL);
+ if (ret < 0) {
+ dev_err(&client->dev, "setup of iio triggered buffer failed\n");
return ret;
+ }
- ret = srf08_write_sensitivity(data, SRF08_DEFAULT_GAIN);
- if (ret < 0)
- return ret;
+ if (data->chip_info->range_default) {
+ /*
+ * set default range of device in mm here
+ * these register values cannot be read from the hardware
+ * therefore set driver specific default values
+ *
+ * srf02 don't have a default value so it'll be omitted
+ */
+ ret = srf08_write_range_mm(data,
+ data->chip_info->range_default);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (data->chip_info->sensitivity_default) {
+ /*
+ * set default sensitivity of device here
+ * these register values cannot be read from the hardware
+ * therefore set driver specific default values
+ *
+ * srf02 don't have a default value so it'll be omitted
+ */
+ ret = srf08_write_sensitivity(data,
+ data->chip_info->sensitivity_default);
+ if (ret < 0)
+ return ret;
+ }
return devm_iio_device_register(&client->dev, indio_dev);
}
+static const struct of_device_id of_srf08_match[] = {
+ { .compatible = "devantech,srf02", (void *)SRF02},
+ { .compatible = "devantech,srf08", (void *)SRF08},
+ { .compatible = "devantech,srf10", (void *)SRF10},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_srf08_match);
+
static const struct i2c_device_id srf08_id[] = {
- { "srf08", 0 },
+ { "srf02", SRF02 },
+ { "srf08", SRF08 },
+ { "srf10", SRF10 },
{ }
};
MODULE_DEVICE_TABLE(i2c, srf08_id);
@@ -387,6 +553,7 @@ MODULE_DEVICE_TABLE(i2c, srf08_id);
static struct i2c_driver srf08_driver = {
.driver = {
.name = "srf08",
+ .of_match_table = of_srf08_match,
},
.probe = srf08_probe,
.id_table = srf08_id,
@@ -394,5 +561,5 @@ static struct i2c_driver srf08_driver = {
module_i2c_driver(srf08_driver);
MODULE_AUTHOR("Andreas Klinger <ak@it-klinger.de>");
-MODULE_DESCRIPTION("Devantech SRF08 ultrasonic ranger driver");
+MODULE_DESCRIPTION("Devantech SRF02/SRF08/SRF10 i2c ultrasonic ranger driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/temperature/tsys01.c b/drivers/iio/temperature/tsys01.c
index 3e60c6189d98..d8aa211d76e4 100644
--- a/drivers/iio/temperature/tsys01.c
+++ b/drivers/iio/temperature/tsys01.c
@@ -214,11 +214,18 @@ static const struct i2c_device_id tsys01_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tsys01_id);
+static const struct of_device_id tsys01_of_match[] = {
+ { .compatible = "meas,tsys01", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tsys01_of_match);
+
static struct i2c_driver tsys01_driver = {
.probe = tsys01_i2c_probe,
.id_table = tsys01_id,
.driver = {
.name = "tsys01",
+ .of_match_table = of_match_ptr(tsys01_of_match),
},
};
diff --git a/drivers/iio/trigger/Kconfig b/drivers/iio/trigger/Kconfig
index e4d4e63434db..a633d2c8e805 100644
--- a/drivers/iio/trigger/Kconfig
+++ b/drivers/iio/trigger/Kconfig
@@ -24,6 +24,17 @@ config IIO_INTERRUPT_TRIGGER
To compile this driver as a module, choose M here: the
module will be called iio-trig-interrupt.
+config IIO_STM32_LPTIMER_TRIGGER
+ tristate "STM32 Low-Power Timer Trigger"
+ depends on MFD_STM32_LPTIMER || COMPILE_TEST
+ help
+ Select this option to enable STM32 Low-Power Timer Trigger.
+ This can be used as trigger source for STM32 internal ADC
+ and/or DAC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called stm32-lptimer-trigger.
+
config IIO_STM32_TIMER_TRIGGER
tristate "STM32 Timer Trigger"
depends on (ARCH_STM32 && OF && MFD_STM32_TIMERS) || COMPILE_TEST
diff --git a/drivers/iio/trigger/Makefile b/drivers/iio/trigger/Makefile
index 5c4ecd380653..0a72a2a76cb2 100644
--- a/drivers/iio/trigger/Makefile
+++ b/drivers/iio/trigger/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_IIO_HRTIMER_TRIGGER) += iio-trig-hrtimer.o
obj-$(CONFIG_IIO_INTERRUPT_TRIGGER) += iio-trig-interrupt.o
+obj-$(CONFIG_IIO_STM32_LPTIMER_TRIGGER) += stm32-lptimer-trigger.o
obj-$(CONFIG_IIO_STM32_TIMER_TRIGGER) += stm32-timer-trigger.o
obj-$(CONFIG_IIO_SYSFS_TRIGGER) += iio-trig-sysfs.o
obj-$(CONFIG_IIO_TIGHTLOOP_TRIGGER) += iio-trig-loop.o
diff --git a/drivers/iio/trigger/stm32-lptimer-trigger.c b/drivers/iio/trigger/stm32-lptimer-trigger.c
new file mode 100644
index 000000000000..241eae6a4306
--- /dev/null
+++ b/drivers/iio/trigger/stm32-lptimer-trigger.c
@@ -0,0 +1,118 @@
+/*
+ * STM32 Low-Power Timer Trigger driver
+ *
+ * Copyright (C) STMicroelectronics 2017
+ *
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Inspired by Benjamin Gaignard's stm32-timer-trigger driver
+ */
+
+#include <linux/iio/timer/stm32-lptim-trigger.h>
+#include <linux/mfd/stm32-lptimer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/* List Low-Power Timer triggers */
+static const char * const stm32_lptim_triggers[] = {
+ LPTIM1_OUT,
+ LPTIM2_OUT,
+ LPTIM3_OUT,
+};
+
+struct stm32_lptim_trigger {
+ struct device *dev;
+ const char *trg;
+};
+
+static int stm32_lptim_validate_device(struct iio_trigger *trig,
+ struct iio_dev *indio_dev)
+{
+ if (indio_dev->modes & INDIO_HARDWARE_TRIGGERED)
+ return 0;
+
+ return -EINVAL;
+}
+
+static const struct iio_trigger_ops stm32_lptim_trigger_ops = {
+ .owner = THIS_MODULE,
+ .validate_device = stm32_lptim_validate_device,
+};
+
+/**
+ * is_stm32_lptim_trigger
+ * @trig: trigger to be checked
+ *
+ * return true if the trigger is a valid STM32 IIO Low-Power Timer Trigger
+ * either return false
+ */
+bool is_stm32_lptim_trigger(struct iio_trigger *trig)
+{
+ return (trig->ops == &stm32_lptim_trigger_ops);
+}
+EXPORT_SYMBOL(is_stm32_lptim_trigger);
+
+static int stm32_lptim_setup_trig(struct stm32_lptim_trigger *priv)
+{
+ struct iio_trigger *trig;
+
+ trig = devm_iio_trigger_alloc(priv->dev, "%s", priv->trg);
+ if (!trig)
+ return -ENOMEM;
+
+ trig->dev.parent = priv->dev->parent;
+ trig->ops = &stm32_lptim_trigger_ops;
+ iio_trigger_set_drvdata(trig, priv);
+
+ return devm_iio_trigger_register(priv->dev, trig);
+}
+
+static int stm32_lptim_trigger_probe(struct platform_device *pdev)
+{
+ struct stm32_lptim_trigger *priv;
+ u32 index;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (of_property_read_u32(pdev->dev.of_node, "reg", &index))
+ return -EINVAL;
+
+ if (index >= ARRAY_SIZE(stm32_lptim_triggers))
+ return -EINVAL;
+
+ priv->dev = &pdev->dev;
+ priv->trg = stm32_lptim_triggers[index];
+
+ ret = stm32_lptim_setup_trig(priv);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static const struct of_device_id stm32_lptim_trig_of_match[] = {
+ { .compatible = "st,stm32-lptimer-trigger", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_lptim_trig_of_match);
+
+static struct platform_driver stm32_lptim_trigger_driver = {
+ .probe = stm32_lptim_trigger_probe,
+ .driver = {
+ .name = "stm32-lptimer-trigger",
+ .of_match_table = stm32_lptim_trig_of_match,
+ },
+};
+module_platform_driver(stm32_lptim_trigger_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_ALIAS("platform:stm32-lptimer-trigger");
+MODULE_DESCRIPTION("STMicroelectronics STM32 LPTIM trigger driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index d22bc56dd9fc..9b9053494daf 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -13,6 +13,7 @@
#include <linux/mfd/stm32-timers.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/of_device.h>
#define MAX_TRIGGERS 7
#define MAX_VALIDS 5
@@ -28,9 +29,14 @@ static const void *triggers_table[][MAX_TRIGGERS] = {
{ TIM7_TRGO,},
{ TIM8_TRGO, TIM8_TRGO2, TIM8_CH1, TIM8_CH2, TIM8_CH3, TIM8_CH4,},
{ TIM9_TRGO, TIM9_CH1, TIM9_CH2,},
- { }, /* timer 10 */
- { }, /* timer 11 */
+ { TIM10_OC1,},
+ { TIM11_OC1,},
{ TIM12_TRGO, TIM12_CH1, TIM12_CH2,},
+ { TIM13_OC1,},
+ { TIM14_OC1,},
+ { TIM15_TRGO,},
+ { TIM16_OC1,},
+ { TIM17_OC1,},
};
/* List the triggers accepted by each timer */
@@ -43,10 +49,30 @@ static const void *valids_table[][MAX_VALIDS] = {
{ }, /* timer 6 */
{ }, /* timer 7 */
{ TIM1_TRGO, TIM2_TRGO, TIM4_TRGO, TIM5_TRGO,},
- { TIM2_TRGO, TIM3_TRGO,},
+ { TIM2_TRGO, TIM3_TRGO, TIM10_OC1, TIM11_OC1,},
+ { }, /* timer 10 */
+ { }, /* timer 11 */
+ { TIM4_TRGO, TIM5_TRGO, TIM13_OC1, TIM14_OC1,},
+};
+
+static const void *stm32h7_valids_table[][MAX_VALIDS] = {
+ { TIM15_TRGO, TIM2_TRGO, TIM3_TRGO, TIM4_TRGO,},
+ { TIM1_TRGO, TIM8_TRGO, TIM3_TRGO, TIM4_TRGO,},
+ { TIM1_TRGO, TIM2_TRGO, TIM15_TRGO, TIM4_TRGO,},
+ { TIM1_TRGO, TIM2_TRGO, TIM3_TRGO, TIM8_TRGO,},
+ { TIM1_TRGO, TIM8_TRGO, TIM3_TRGO, TIM4_TRGO,},
+ { }, /* timer 6 */
+ { }, /* timer 7 */
+ { TIM1_TRGO, TIM2_TRGO, TIM4_TRGO, TIM5_TRGO,},
+ { }, /* timer 9 */
{ }, /* timer 10 */
{ }, /* timer 11 */
- { TIM4_TRGO, TIM5_TRGO,},
+ { TIM4_TRGO, TIM5_TRGO, TIM13_OC1, TIM14_OC1,},
+ { }, /* timer 13 */
+ { }, /* timer 14 */
+ { TIM1_TRGO, TIM3_TRGO, TIM16_OC1, TIM17_OC1,},
+ { }, /* timer 16 */
+ { }, /* timer 17 */
};
struct stm32_timer_trigger {
@@ -59,11 +85,21 @@ struct stm32_timer_trigger {
bool has_trgo2;
};
+struct stm32_timer_trigger_cfg {
+ const void *(*valids_table)[MAX_VALIDS];
+ const unsigned int num_valids_table;
+};
+
static bool stm32_timer_is_trgo2_name(const char *name)
{
return !!strstr(name, "trgo2");
}
+static bool stm32_timer_is_trgo_name(const char *name)
+{
+ return (!!strstr(name, "trgo") && !strstr(name, "trgo2"));
+}
+
static int stm32_timer_start(struct stm32_timer_trigger *priv,
struct iio_trigger *trig,
unsigned int frequency)
@@ -328,6 +364,7 @@ static int stm32_setup_iio_triggers(struct stm32_timer_trigger *priv)
while (cur && *cur) {
struct iio_trigger *trig;
+ bool cur_is_trgo = stm32_timer_is_trgo_name(*cur);
bool cur_is_trgo2 = stm32_timer_is_trgo2_name(*cur);
if (cur_is_trgo2 && !priv->has_trgo2) {
@@ -344,10 +381,9 @@ static int stm32_setup_iio_triggers(struct stm32_timer_trigger *priv)
/*
* sampling frequency and master mode attributes
- * should only be available on trgo trigger which
- * is always the first in the list.
+ * should only be available on trgo/trgo2 triggers
*/
- if (cur == priv->triggers || cur_is_trgo2)
+ if (cur_is_trgo || cur_is_trgo2)
trig->dev.groups = stm32_trigger_attr_groups;
iio_trigger_set_drvdata(trig, priv);
@@ -366,34 +402,32 @@ static int stm32_counter_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ u32 dat;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- {
- u32 cnt;
-
- regmap_read(priv->regmap, TIM_CNT, &cnt);
- *val = cnt;
+ regmap_read(priv->regmap, TIM_CNT, &dat);
+ *val = dat;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_ENABLE:
+ regmap_read(priv->regmap, TIM_CR1, &dat);
+ *val = (dat & TIM_CR1_CEN) ? 1 : 0;
return IIO_VAL_INT;
- }
- case IIO_CHAN_INFO_SCALE:
- {
- u32 smcr;
- regmap_read(priv->regmap, TIM_SMCR, &smcr);
- smcr &= TIM_SMCR_SMS;
+ case IIO_CHAN_INFO_SCALE:
+ regmap_read(priv->regmap, TIM_SMCR, &dat);
+ dat &= TIM_SMCR_SMS;
*val = 1;
*val2 = 0;
/* in quadrature case scale = 0.25 */
- if (smcr == 3)
+ if (dat == 3)
*val2 = 2;
return IIO_VAL_FRACTIONAL_LOG2;
}
- }
return -EINVAL;
}
@@ -403,15 +437,31 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ u32 dat;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- regmap_write(priv->regmap, TIM_CNT, val);
+ return regmap_write(priv->regmap, TIM_CNT, val);
- return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
/* fixed scale */
return -EINVAL;
+
+ case IIO_CHAN_INFO_ENABLE:
+ if (val) {
+ regmap_read(priv->regmap, TIM_CR1, &dat);
+ if (!(dat & TIM_CR1_CEN))
+ clk_enable(priv->clk);
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
+ TIM_CR1_CEN);
+ } else {
+ regmap_read(priv->regmap, TIM_CR1, &dat);
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
+ 0);
+ if (dat & TIM_CR1_CEN)
+ clk_disable(priv->clk);
+ }
+ return 0;
}
return -EINVAL;
@@ -471,7 +521,7 @@ static int stm32_get_trigger_mode(struct iio_dev *indio_dev,
regmap_read(priv->regmap, TIM_SMCR, &smcr);
- return smcr == TIM_SMCR_SMS ? 0 : -EINVAL;
+ return (smcr & TIM_SMCR_SMS) == TIM_SMCR_SMS ? 0 : -EINVAL;
}
static const struct iio_enum stm32_trigger_mode_enum = {
@@ -507,9 +557,19 @@ static int stm32_set_enable_mode(struct iio_dev *indio_dev,
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
int sms = stm32_enable_mode2sms(mode);
+ u32 val;
if (sms < 0)
return sms;
+ /*
+ * Triggered mode sets CEN bit automatically by hardware. So, first
+ * enable counter clock, so it can use it. Keeps it in sync with CEN.
+ */
+ if (sms == 6) {
+ regmap_read(priv->regmap, TIM_CR1, &val);
+ if (!(val & TIM_CR1_CEN))
+ clk_enable(priv->clk);
+ }
regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
@@ -571,11 +631,14 @@ static int stm32_get_quadrature_mode(struct iio_dev *indio_dev,
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
u32 smcr;
+ int mode;
regmap_read(priv->regmap, TIM_SMCR, &smcr);
- smcr &= TIM_SMCR_SMS;
+ mode = (smcr & TIM_SMCR_SMS) - 1;
+ if ((mode < 0) || (mode > ARRAY_SIZE(stm32_quadrature_modes)))
+ return -EINVAL;
- return smcr - 1;
+ return mode;
}
static const struct iio_enum stm32_quadrature_mode_enum = {
@@ -592,13 +655,20 @@ static const char *const stm32_count_direction_states[] = {
static int stm32_set_count_direction(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
- unsigned int mode)
+ unsigned int dir)
{
struct stm32_timer_trigger *priv = iio_priv(indio_dev);
+ u32 val;
+ int mode;
- regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR, mode);
+ /* In encoder mode, direction is RO (given by TI1/TI2 signals) */
+ regmap_read(priv->regmap, TIM_SMCR, &val);
+ mode = (val & TIM_SMCR_SMS) - 1;
+ if ((mode >= 0) || (mode < ARRAY_SIZE(stm32_quadrature_modes)))
+ return -EBUSY;
- return 0;
+ return regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR,
+ dir ? TIM_CR1_DIR : 0);
}
static int stm32_get_count_direction(struct iio_dev *indio_dev,
@@ -609,7 +679,7 @@ static int stm32_get_count_direction(struct iio_dev *indio_dev,
regmap_read(priv->regmap, TIM_CR1, &cr1);
- return (cr1 & TIM_CR1_DIR);
+ return ((cr1 & TIM_CR1_DIR) ? 1 : 0);
}
static const struct iio_enum stm32_count_direction_enum = {
@@ -672,7 +742,9 @@ static const struct iio_chan_spec_ext_info stm32_trigger_count_info[] = {
static const struct iio_chan_spec stm32_trigger_channel = {
.type = IIO_COUNT,
.channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_ENABLE) |
+ BIT(IIO_CHAN_INFO_SCALE),
.ext_info = stm32_trigger_count_info,
.indexed = 1
};
@@ -734,18 +806,22 @@ static int stm32_timer_trigger_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct stm32_timer_trigger *priv;
struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent);
+ const struct stm32_timer_trigger_cfg *cfg;
unsigned int index;
int ret;
if (of_property_read_u32(dev->of_node, "reg", &index))
return -EINVAL;
+ cfg = (const struct stm32_timer_trigger_cfg *)
+ of_match_device(dev->driver->of_match_table, dev)->data;
+
if (index >= ARRAY_SIZE(triggers_table) ||
- index >= ARRAY_SIZE(valids_table))
+ index >= cfg->num_valids_table)
return -EINVAL;
/* Create an IIO device only if we have triggers to be validated */
- if (*valids_table[index])
+ if (*cfg->valids_table[index])
priv = stm32_setup_counter_device(dev);
else
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -758,7 +834,7 @@ static int stm32_timer_trigger_probe(struct platform_device *pdev)
priv->clk = ddata->clk;
priv->max_arr = ddata->max_arr;
priv->triggers = triggers_table[index];
- priv->valids = valids_table[index];
+ priv->valids = cfg->valids_table[index];
stm32_timer_detect_trgo2(priv);
ret = stm32_setup_iio_triggers(priv);
@@ -770,8 +846,24 @@ static int stm32_timer_trigger_probe(struct platform_device *pdev)
return 0;
}
+static const struct stm32_timer_trigger_cfg stm32_timer_trg_cfg = {
+ .valids_table = valids_table,
+ .num_valids_table = ARRAY_SIZE(valids_table),
+};
+
+static const struct stm32_timer_trigger_cfg stm32h7_timer_trg_cfg = {
+ .valids_table = stm32h7_valids_table,
+ .num_valids_table = ARRAY_SIZE(stm32h7_valids_table),
+};
+
static const struct of_device_id stm32_trig_of_match[] = {
- { .compatible = "st,stm32-timer-trigger", },
+ {
+ .compatible = "st,stm32-timer-trigger",
+ .data = (void *)&stm32_timer_trg_cfg,
+ }, {
+ .compatible = "st,stm32h7-timer-trigger",
+ .data = (void *)&stm32h7_timer_trg_cfg,
+ },
{ /* end node */ },
};
MODULE_DEVICE_TABLE(of, stm32_trig_of_match);
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 234fe01904e7..3726205c8704 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -34,6 +34,15 @@ config INFINIBAND_USER_ACCESS
libibverbs, libibcm and a hardware driver library from
<http://www.openfabrics.org/git/>.
+config INFINIBAND_EXP_USER_ACCESS
+ bool "Allow experimental support for Infiniband ABI"
+ depends on INFINIBAND_USER_ACCESS
+ ---help---
+ IOCTL based ABI support for Infiniband. This allows userspace
+ to invoke the experimental IOCTL based ABI.
+ These commands are parsed via per-device parsing tree and
+ enables per-device features.
+
config INFINIBAND_USER_MEM
bool
depends on INFINIBAND_USER_ACCESS != n
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index e3cdafff8ece..b4df164f71a6 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -11,7 +11,8 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o \
roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
multicast.o mad.o smi.o agent.o mad_rmpp.o \
- security.o
+ security.o nldev.o
+
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
@@ -31,4 +32,5 @@ ib_umad-y := user_mad.o
ib_ucm-y := ucm.o
ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
- rdma_core.o uverbs_std_types.o
+ rdma_core.o uverbs_std_types.o uverbs_ioctl.o \
+ uverbs_ioctl_merge.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a6cb379a4ebc..12523f630b61 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -61,6 +61,7 @@ struct addr_req {
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context);
unsigned long timeout;
+ struct delayed_work work;
int status;
u32 seq;
};
@@ -129,13 +130,11 @@ static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
}
int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
- struct netlink_callback *cb)
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
{
- const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
-
if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
- !(NETLINK_CB(skb).sk) ||
- !netlink_capable(skb, CAP_NET_ADMIN))
+ !(NETLINK_CB(skb).sk))
return -EPERM;
if (ib_nl_is_good_ip_resp(nlh))
@@ -185,7 +184,7 @@ static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
/* Repair the nlmsg header length */
nlmsg_end(skb, nlh);
- ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL);
+ rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, GFP_KERNEL);
/* Make the request retry, so when we get the response from userspace
* we will have something.
@@ -268,6 +267,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
return ret;
ret = rdma_copy_addr(dev_addr, dev, NULL);
+ dev_addr->bound_dev_if = dev->ifindex;
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(dev);
dev_put(dev);
@@ -280,6 +280,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
&((const struct sockaddr_in6 *)addr)->sin6_addr,
dev, 1)) {
ret = rdma_copy_addr(dev_addr, dev, NULL);
+ dev_addr->bound_dev_if = dev->ifindex;
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(dev);
break;
@@ -293,7 +294,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
}
EXPORT_SYMBOL(rdma_translate_ip);
-static void set_timeout(unsigned long time)
+static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
{
unsigned long delay;
@@ -301,7 +302,7 @@ static void set_timeout(unsigned long time)
if ((long)delay < 0)
delay = 0;
- mod_delayed_work(addr_wq, &work, delay);
+ mod_delayed_work(addr_wq, delayed_work, delay);
}
static void queue_req(struct addr_req *req)
@@ -316,15 +317,14 @@ static void queue_req(struct addr_req *req)
list_add(&req->list, &temp_req->list);
- if (req_list.next == &req->list)
- set_timeout(req->timeout);
+ set_timeout(&req->work, req->timeout);
mutex_unlock(&lock);
}
static int ib_nl_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
const void *daddr, u32 seq, u16 family)
{
- if (ibnl_chk_listeners(RDMA_NL_GROUP_LS))
+ if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS))
return -EADDRNOTAVAIL;
/* We fill in what we can, the response will fill the rest */
@@ -405,10 +405,10 @@ static int addr4_resolve(struct sockaddr_in *src_in,
fl4.saddr = src_ip;
fl4.flowi4_oif = addr->bound_dev_if;
rt = ip_route_output_key(addr->net, &fl4);
- if (IS_ERR(rt)) {
- ret = PTR_ERR(rt);
- goto out;
- }
+ ret = PTR_ERR_OR_ZERO(rt);
+ if (ret)
+ return ret;
+
src_in->sin_family = AF_INET;
src_in->sin_addr.s_addr = fl4.saddr;
@@ -423,8 +423,6 @@ static int addr4_resolve(struct sockaddr_in *src_in,
*prt = rt;
return 0;
-out:
- return ret;
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -509,6 +507,11 @@ static int addr_resolve(struct sockaddr *src_in,
struct dst_entry *dst;
int ret;
+ if (!addr->net) {
+ pr_warn_ratelimited("%s: missing namespace\n", __func__);
+ return -EINVAL;
+ }
+
if (src_in->sa_family == AF_INET) {
struct rtable *rt = NULL;
const struct sockaddr_in *dst_in4 =
@@ -522,8 +525,12 @@ static int addr_resolve(struct sockaddr *src_in,
if (resolve_neigh)
ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq);
- ndev = rt->dst.dev;
- dev_hold(ndev);
+ if (addr->bound_dev_if) {
+ ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
+ } else {
+ ndev = rt->dst.dev;
+ dev_hold(ndev);
+ }
ip_rt_put(rt);
} else {
@@ -539,19 +546,63 @@ static int addr_resolve(struct sockaddr *src_in,
if (resolve_neigh)
ret = addr_resolve_neigh(dst, dst_in, addr, seq);
- ndev = dst->dev;
- dev_hold(ndev);
+ if (addr->bound_dev_if) {
+ ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
+ } else {
+ ndev = dst->dev;
+ dev_hold(ndev);
+ }
dst_release(dst);
}
- addr->bound_dev_if = ndev->ifindex;
- addr->net = dev_net(ndev);
+ if (ndev->flags & IFF_LOOPBACK) {
+ ret = rdma_translate_ip(dst_in, addr, NULL);
+ /*
+ * Put the loopback device and get the translated
+ * device instead.
+ */
+ dev_put(ndev);
+ ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
+ } else {
+ addr->bound_dev_if = ndev->ifindex;
+ }
dev_put(ndev);
return ret;
}
+static void process_one_req(struct work_struct *_work)
+{
+ struct addr_req *req;
+ struct sockaddr *src_in, *dst_in;
+
+ mutex_lock(&lock);
+ req = container_of(_work, struct addr_req, work.work);
+
+ if (req->status == -ENODATA) {
+ src_in = (struct sockaddr *)&req->src_addr;
+ dst_in = (struct sockaddr *)&req->dst_addr;
+ req->status = addr_resolve(src_in, dst_in, req->addr,
+ true, req->seq);
+ if (req->status && time_after_eq(jiffies, req->timeout)) {
+ req->status = -ETIMEDOUT;
+ } else if (req->status == -ENODATA) {
+ /* requeue the work for retrying again */
+ set_timeout(&req->work, req->timeout);
+ mutex_unlock(&lock);
+ return;
+ }
+ }
+ list_del(&req->list);
+ mutex_unlock(&lock);
+
+ req->callback(req->status, (struct sockaddr *)&req->src_addr,
+ req->addr, req->context);
+ put_client(req->client);
+ kfree(req);
+}
+
static void process_req(struct work_struct *work)
{
struct addr_req *req, *temp_req;
@@ -569,20 +620,23 @@ static void process_req(struct work_struct *work)
true, req->seq);
if (req->status && time_after_eq(jiffies, req->timeout))
req->status = -ETIMEDOUT;
- else if (req->status == -ENODATA)
+ else if (req->status == -ENODATA) {
+ set_timeout(&req->work, req->timeout);
continue;
+ }
}
list_move_tail(&req->list, &done_list);
}
- if (!list_empty(&req_list)) {
- req = list_entry(req_list.next, struct addr_req, list);
- set_timeout(req->timeout);
- }
mutex_unlock(&lock);
list_for_each_entry_safe(req, temp_req, &done_list, list) {
list_del(&req->list);
+ /* It is safe to cancel other work items from this work item
+ * because at a time there can be only one work item running
+ * with this single threaded work queue.
+ */
+ cancel_delayed_work(&req->work);
req->callback(req->status, (struct sockaddr *) &req->src_addr,
req->addr, req->context);
put_client(req->client);
@@ -625,6 +679,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
req->context = context;
req->client = client;
atomic_inc(&client->refcount);
+ INIT_DELAYED_WORK(&req->work, process_one_req);
req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
@@ -679,7 +734,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
req->status = -ECANCELED;
req->timeout = jiffies;
list_move(&req->list, &req_list);
- set_timeout(req->timeout);
+ set_timeout(&req->work, req->timeout);
break;
}
}
@@ -785,9 +840,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event,
if (event == NETEVENT_NEIGH_UPDATE) {
struct neighbour *neigh = ctx;
- if (neigh->nud_state & NUD_VALID) {
- set_timeout(jiffies);
- }
+ if (neigh->nud_state & NUD_VALID)
+ set_timeout(&work, jiffies);
}
return 0;
}
@@ -798,7 +852,7 @@ static struct notifier_block nb = {
int addr_init(void)
{
- addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0);
+ addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM);
if (!addr_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index efc94304dee3..77515638c55c 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1199,30 +1199,23 @@ int ib_cache_setup_one(struct ib_device *device)
device->cache.ports =
kzalloc(sizeof(*device->cache.ports) *
(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
- if (!device->cache.ports) {
- err = -ENOMEM;
- goto out;
- }
+ if (!device->cache.ports)
+ return -ENOMEM;
err = gid_table_setup_one(device);
- if (err)
- goto out;
+ if (err) {
+ kfree(device->cache.ports);
+ device->cache.ports = NULL;
+ return err;
+ }
for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
ib_cache_update(device, p + rdma_start_port(device), true);
INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
device, ib_cache_event);
- err = ib_register_event_handler(&device->cache.event_handler);
- if (err)
- goto err;
-
+ ib_register_event_handler(&device->cache.event_handler);
return 0;
-
-err:
- gid_table_cleanup_one(device);
-out:
- return err;
}
void ib_cache_release_one(struct ib_device *device)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 2b4d613a3474..4c4b46586af2 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -373,11 +373,19 @@ out:
return ret;
}
-static int cm_alloc_response_msg(struct cm_port *port,
- struct ib_mad_recv_wc *mad_recv_wc,
- struct ib_mad_send_buf **msg)
+static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
+ struct ib_mad_recv_wc *mad_recv_wc)
+{
+ return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
+ 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
+ GFP_ATOMIC,
+ IB_MGMT_BASE_VERSION);
+}
+
+static int cm_create_response_msg_ah(struct cm_port *port,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ struct ib_mad_send_buf *msg)
{
- struct ib_mad_send_buf *m;
struct ib_ah *ah;
ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
@@ -385,27 +393,40 @@ static int cm_alloc_response_msg(struct cm_port *port,
if (IS_ERR(ah))
return PTR_ERR(ah);
- m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
- 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
- GFP_ATOMIC,
- IB_MGMT_BASE_VERSION);
- if (IS_ERR(m)) {
- rdma_destroy_ah(ah);
- return PTR_ERR(m);
- }
- m->ah = ah;
- *msg = m;
+ msg->ah = ah;
return 0;
}
static void cm_free_msg(struct ib_mad_send_buf *msg)
{
- rdma_destroy_ah(msg->ah);
+ if (msg->ah)
+ rdma_destroy_ah(msg->ah);
if (msg->context[0])
cm_deref_id(msg->context[0]);
ib_free_send_mad(msg);
}
+static int cm_alloc_response_msg(struct cm_port *port,
+ struct ib_mad_recv_wc *mad_recv_wc,
+ struct ib_mad_send_buf **msg)
+{
+ struct ib_mad_send_buf *m;
+ int ret;
+
+ m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
+ if (IS_ERR(m))
+ return PTR_ERR(m);
+
+ ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
+ if (ret) {
+ cm_free_msg(m);
+ return ret;
+ }
+
+ *msg = m;
+ return 0;
+}
+
static void * cm_copy_private_data(const void *private_data,
u8 private_data_len)
{
@@ -1175,6 +1196,11 @@ static void cm_format_req(struct cm_req_msg *req_msg,
{
struct sa_path_rec *pri_path = param->primary_path;
struct sa_path_rec *alt_path = param->alternate_path;
+ bool pri_ext = false;
+
+ if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
+ pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
+ pri_path->opa.slid);
cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
@@ -1202,18 +1228,24 @@ static void cm_format_req(struct cm_req_msg *req_msg,
cm_req_set_srq(req_msg, param->srq);
}
+ req_msg->primary_local_gid = pri_path->sgid;
+ req_msg->primary_remote_gid = pri_path->dgid;
+ if (pri_ext) {
+ req_msg->primary_local_gid.global.interface_id
+ = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
+ req_msg->primary_remote_gid.global.interface_id
+ = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
+ }
if (pri_path->hop_limit <= 1) {
- req_msg->primary_local_lid =
+ req_msg->primary_local_lid = pri_ext ? 0 :
htons(ntohl(sa_path_get_slid(pri_path)));
- req_msg->primary_remote_lid =
+ req_msg->primary_remote_lid = pri_ext ? 0 :
htons(ntohl(sa_path_get_dlid(pri_path)));
} else {
/* Work-around until there's a way to obtain remote LID info */
req_msg->primary_local_lid = IB_LID_PERMISSIVE;
req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
}
- req_msg->primary_local_gid = pri_path->sgid;
- req_msg->primary_remote_gid = pri_path->dgid;
cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
req_msg->primary_traffic_class = pri_path->traffic_class;
@@ -1225,17 +1257,29 @@ static void cm_format_req(struct cm_req_msg *req_msg,
pri_path->packet_life_time));
if (alt_path) {
+ bool alt_ext = false;
+
+ if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
+ alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
+ alt_path->opa.slid);
+
+ req_msg->alt_local_gid = alt_path->sgid;
+ req_msg->alt_remote_gid = alt_path->dgid;
+ if (alt_ext) {
+ req_msg->alt_local_gid.global.interface_id
+ = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
+ req_msg->alt_remote_gid.global.interface_id
+ = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
+ }
if (alt_path->hop_limit <= 1) {
- req_msg->alt_local_lid =
+ req_msg->alt_local_lid = alt_ext ? 0 :
htons(ntohl(sa_path_get_slid(alt_path)));
- req_msg->alt_remote_lid =
+ req_msg->alt_remote_lid = alt_ext ? 0 :
htons(ntohl(sa_path_get_dlid(alt_path)));
} else {
req_msg->alt_local_lid = IB_LID_PERMISSIVE;
req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
}
- req_msg->alt_local_gid = alt_path->sgid;
- req_msg->alt_remote_gid = alt_path->dgid;
cm_req_set_alt_flow_label(req_msg,
alt_path->flow_label);
cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
@@ -1405,16 +1449,63 @@ static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
(be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
}
+static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
+{
+ return ((req_msg->alt_local_lid) ||
+ (ib_is_opa_gid(&req_msg->alt_local_gid)));
+}
+
+static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
+ struct sa_path_rec *path, union ib_gid *gid)
+{
+ if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
+ path->rec_type = SA_PATH_REC_TYPE_OPA;
+ else
+ path->rec_type = SA_PATH_REC_TYPE_IB;
+}
+
+static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
+ struct sa_path_rec *primary_path,
+ struct sa_path_rec *alt_path)
+{
+ u32 lid;
+
+ if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
+ sa_path_set_dlid(primary_path,
+ htonl(ntohs(req_msg->primary_local_lid)));
+ sa_path_set_slid(primary_path,
+ htonl(ntohs(req_msg->primary_remote_lid)));
+ } else {
+ lid = opa_get_lid_from_gid(&req_msg->primary_local_gid);
+ sa_path_set_dlid(primary_path, cpu_to_be32(lid));
+
+ lid = opa_get_lid_from_gid(&req_msg->primary_remote_gid);
+ sa_path_set_slid(primary_path, cpu_to_be32(lid));
+ }
+
+ if (!cm_req_has_alt_path(req_msg))
+ return;
+
+ if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
+ sa_path_set_dlid(alt_path,
+ htonl(ntohs(req_msg->alt_local_lid)));
+ sa_path_set_slid(alt_path,
+ htonl(ntohs(req_msg->alt_remote_lid)));
+ } else {
+ lid = opa_get_lid_from_gid(&req_msg->alt_local_gid);
+ sa_path_set_dlid(alt_path, cpu_to_be32(lid));
+
+ lid = opa_get_lid_from_gid(&req_msg->alt_remote_gid);
+ sa_path_set_slid(alt_path, cpu_to_be32(lid));
+ }
+}
+
static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
struct sa_path_rec *primary_path,
struct sa_path_rec *alt_path)
{
primary_path->dgid = req_msg->primary_local_gid;
primary_path->sgid = req_msg->primary_remote_gid;
- sa_path_set_dlid(primary_path,
- htonl(ntohs(req_msg->primary_local_lid)));
- sa_path_set_slid(primary_path,
- htonl(ntohs(req_msg->primary_remote_lid)));
primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
primary_path->hop_limit = req_msg->primary_hop_limit;
primary_path->traffic_class = req_msg->primary_traffic_class;
@@ -1431,13 +1522,9 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
primary_path->service_id = req_msg->service_id;
- if (req_msg->alt_local_lid) {
+ if (cm_req_has_alt_path(req_msg)) {
alt_path->dgid = req_msg->alt_local_gid;
alt_path->sgid = req_msg->alt_remote_gid;
- sa_path_set_dlid(alt_path,
- htonl(ntohs(req_msg->alt_local_lid)));
- sa_path_set_slid(alt_path,
- htonl(ntohs(req_msg->alt_remote_lid)));
alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
alt_path->hop_limit = req_msg->alt_hop_limit;
alt_path->traffic_class = req_msg->alt_traffic_class;
@@ -1454,6 +1541,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
alt_path->service_id = req_msg->service_id;
}
+ cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
}
static u16 cm_get_bth_pkey(struct cm_work *work)
@@ -1703,7 +1791,7 @@ static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
{
if (!cm_req_get_primary_subnet_local(req_msg)) {
if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
- req_msg->primary_local_lid = cpu_to_be16(wc->slid);
+ req_msg->primary_local_lid = ib_lid_be16(wc->slid);
cm_req_set_primary_sl(req_msg, wc->sl);
}
@@ -1713,7 +1801,7 @@ static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
if (!cm_req_get_alt_subnet_local(req_msg)) {
if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
- req_msg->alt_local_lid = cpu_to_be16(wc->slid);
+ req_msg->alt_local_lid = ib_lid_be16(wc->slid);
cm_req_set_alt_sl(req_msg, wc->sl);
}
@@ -1784,9 +1872,12 @@ static int cm_req_handler(struct cm_work *work)
dev_net(gid_attr.ndev));
dev_put(gid_attr.ndev);
} else {
- work->path[0].rec_type = SA_PATH_REC_TYPE_IB;
+ cm_path_set_rec_type(work->port->cm_dev->ib_device,
+ work->port->port_num,
+ &work->path[0],
+ &req_msg->primary_local_gid);
}
- if (req_msg->alt_local_lid)
+ if (cm_req_has_alt_path(req_msg))
work->path[1].rec_type = work->path[0].rec_type;
cm_format_paths_from_req(req_msg, &work->path[0],
&work->path[1]);
@@ -1811,16 +1902,19 @@ static int cm_req_handler(struct cm_work *work)
dev_net(gid_attr.ndev));
dev_put(gid_attr.ndev);
} else {
- work->path[0].rec_type = SA_PATH_REC_TYPE_IB;
+ cm_path_set_rec_type(work->port->cm_dev->ib_device,
+ work->port->port_num,
+ &work->path[0],
+ &req_msg->primary_local_gid);
}
- if (req_msg->alt_local_lid)
+ if (cm_req_has_alt_path(req_msg))
work->path[1].rec_type = work->path[0].rec_type;
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
&work->path[0].sgid, sizeof work->path[0].sgid,
NULL, 0);
goto rejected;
}
- if (req_msg->alt_local_lid) {
+ if (cm_req_has_alt_path(req_msg)) {
ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
cm_id_priv);
if (ret) {
@@ -2424,7 +2518,8 @@ static int cm_dreq_handler(struct cm_work *work)
case IB_CM_TIMEWAIT:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_DREQ_COUNTER]);
- if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+ msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
+ if (IS_ERR(msg))
goto unlock;
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
@@ -2432,7 +2527,8 @@ static int cm_dreq_handler(struct cm_work *work)
cm_id_priv->private_data_len);
spin_unlock_irq(&cm_id_priv->lock);
- if (ib_post_send_mad(msg, NULL))
+ if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
+ ib_post_send_mad(msg, NULL))
cm_free_msg(msg);
goto deref;
case IB_CM_DREQ_RCVD:
@@ -2843,6 +2939,11 @@ static void cm_format_lap(struct cm_lap_msg *lap_msg,
const void *private_data,
u8 private_data_len)
{
+ bool alt_ext = false;
+
+ if (alternate_path->rec_type == SA_PATH_REC_TYPE_OPA)
+ alt_ext = opa_is_extended_lid(alternate_path->opa.dlid,
+ alternate_path->opa.slid);
cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
lap_msg->local_comm_id = cm_id_priv->id.local_id;
@@ -2856,6 +2957,12 @@ static void cm_format_lap(struct cm_lap_msg *lap_msg,
htons(ntohl(sa_path_get_dlid(alternate_path)));
lap_msg->alt_local_gid = alternate_path->sgid;
lap_msg->alt_remote_gid = alternate_path->dgid;
+ if (alt_ext) {
+ lap_msg->alt_local_gid.global.interface_id
+ = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.slid));
+ lap_msg->alt_remote_gid.global.interface_id
+ = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.dlid));
+ }
cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
lap_msg->alt_hop_limit = alternate_path->hop_limit;
@@ -2924,16 +3031,29 @@ out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
}
EXPORT_SYMBOL(ib_send_cm_lap);
+static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
+ struct sa_path_rec *path)
+{
+ u32 lid;
+
+ if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
+ sa_path_set_dlid(path, htonl(ntohs(lap_msg->alt_local_lid)));
+ sa_path_set_slid(path, htonl(ntohs(lap_msg->alt_remote_lid)));
+ } else {
+ lid = opa_get_lid_from_gid(&lap_msg->alt_local_gid);
+ sa_path_set_dlid(path, cpu_to_be32(lid));
+
+ lid = opa_get_lid_from_gid(&lap_msg->alt_remote_gid);
+ sa_path_set_slid(path, cpu_to_be32(lid));
+ }
+}
+
static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
struct sa_path_rec *path,
struct cm_lap_msg *lap_msg)
{
- memset(path, 0, sizeof *path);
- path->rec_type = SA_PATH_REC_TYPE_IB;
path->dgid = lap_msg->alt_local_gid;
path->sgid = lap_msg->alt_remote_gid;
- sa_path_set_dlid(path, htonl(ntohs(lap_msg->alt_local_lid)));
- sa_path_set_slid(path, htonl(ntohs(lap_msg->alt_remote_lid)));
path->flow_label = cm_lap_get_flow_label(lap_msg);
path->hop_limit = lap_msg->alt_hop_limit;
path->traffic_class = cm_lap_get_traffic_class(lap_msg);
@@ -2947,6 +3067,7 @@ static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
path->packet_life_time_selector = IB_SA_EQ;
path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
path->packet_life_time -= (path->packet_life_time > 0);
+ cm_format_path_lid_from_lap(lap_msg, path);
}
static int cm_lap_handler(struct cm_work *work)
@@ -2965,6 +3086,11 @@ static int cm_lap_handler(struct cm_work *work)
return -EINVAL;
param = &work->cm_event.param.lap_rcvd;
+ memset(&work->path[0], 0, sizeof(work->path[1]));
+ cm_path_set_rec_type(work->port->cm_dev->ib_device,
+ work->port->port_num,
+ &work->path[0],
+ &lap_msg->alt_local_gid);
param->alternate_path = &work->path[0];
cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
work->cm_event.private_data = &lap_msg->private_data;
@@ -2980,7 +3106,8 @@ static int cm_lap_handler(struct cm_work *work)
case IB_CM_MRA_LAP_SENT:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_LAP_COUNTER]);
- if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
+ msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
+ if (IS_ERR(msg))
goto unlock;
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
@@ -2990,7 +3117,8 @@ static int cm_lap_handler(struct cm_work *work)
cm_id_priv->private_data_len);
spin_unlock_irq(&cm_id_priv->lock);
- if (ib_post_send_mad(msg, NULL))
+ if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
+ ib_post_send_mad(msg, NULL))
cm_free_msg(msg);
goto deref;
case IB_CM_LAP_RCVD:
@@ -4201,7 +4329,7 @@ static int __init ib_cm_init(void)
goto error1;
}
- cm.wq = create_workqueue("ib_cm");
+ cm.wq = alloc_workqueue("ib_cm", 0, 1);
if (!cm.wq) {
ret = -ENOMEM;
goto error2;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 31bb82d8ecd7..852c8fec8088 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -72,6 +72,7 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CMA_MAX_CM_RETRIES 15
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 18
+#define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
static const char * const cma_events[] = {
[RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved",
@@ -623,22 +624,11 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
return ret;
- if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
+ if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
ndev = dev_get_by_index(&init_net, bound_if_index);
- if (ndev && ndev->flags & IFF_LOOPBACK) {
- pr_info("detected loopback device\n");
- dev_put(ndev);
-
- if (!device->get_netdev)
- return -EOPNOTSUPP;
-
- ndev = device->get_netdev(device, port);
- if (!ndev)
- return -ENODEV;
- }
- } else {
+ else
gid_type = IB_GID_TYPE_IB;
- }
+
ret = ib_find_cached_gid_by_port(device, gid, gid_type, port,
ndev, NULL);
@@ -1044,6 +1034,8 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
} else
ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
qp_attr_mask);
+ qp_attr->port_num = id_priv->id.port_num;
+ *qp_attr_mask |= IB_QP_PORT;
} else
ret = -ENOSYS;
@@ -2569,21 +2561,6 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2;
}
- if (ndev->flags & IFF_LOOPBACK) {
- dev_put(ndev);
- if (!id_priv->id.device->get_netdev) {
- ret = -EOPNOTSUPP;
- goto err2;
- }
-
- ndev = id_priv->id.device->get_netdev(id_priv->id.device,
- id_priv->id.port_num);
- if (!ndev) {
- ret = -ENODEV;
- goto err2;
- }
- }
-
supported_gids = roce_gid_type_mask_support(id_priv->id.device,
id_priv->id.port_num);
gid_type = cma_route_gid_type(addr->dev_addr.network,
@@ -4022,7 +3999,8 @@ static void iboe_mcast_work_handler(struct work_struct *work)
kfree(mw);
}
-static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
+static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
+ enum ib_gid_type gid_type)
{
struct sockaddr_in *sin = (struct sockaddr_in *)addr;
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
@@ -4032,8 +4010,8 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid)
} else if (addr->sa_family == AF_INET6) {
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
} else {
- mgid->raw[0] = 0xff;
- mgid->raw[1] = 0x0e;
+ mgid->raw[0] = (gid_type == IB_GID_TYPE_IB) ? 0xff : 0;
+ mgid->raw[1] = (gid_type == IB_GID_TYPE_IB) ? 0x0e : 0;
mgid->raw[2] = 0;
mgid->raw[3] = 0;
mgid->raw[4] = 0;
@@ -4074,7 +4052,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
goto out1;
}
- cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid);
+ gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
+ rdma_start_port(id_priv->cma_dev->device)];
+ cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid, gid_type);
mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
if (id_priv->id.ps == RDMA_PS_UDP)
@@ -4090,8 +4070,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
mc->multicast.ib->rec.hop_limit = 1;
mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu);
- gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
- rdma_start_port(id_priv->cma_dev->device)];
if (addr->sa_family == AF_INET) {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
@@ -4304,8 +4282,12 @@ static void cma_add_one(struct ib_device *device)
for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
supported_gids = roce_gid_type_mask_support(device, i);
WARN_ON(!supported_gids);
- cma_dev->default_gid_type[i - rdma_start_port(device)] =
- find_first_bit(&supported_gids, BITS_PER_LONG);
+ if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE))
+ cma_dev->default_gid_type[i - rdma_start_port(device)] =
+ CMA_PREFERRED_ROCE_GID_TYPE;
+ else
+ cma_dev->default_gid_type[i - rdma_start_port(device)] =
+ find_first_bit(&supported_gids, BITS_PER_LONG);
cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0;
}
@@ -4476,9 +4458,8 @@ out:
return skb->len;
}
-static const struct ibnl_client_cbs cma_cb_table[] = {
- [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats,
- .module = THIS_MODULE },
+static const struct rdma_nl_cbs cma_cb_table[] = {
+ [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats},
};
static int cma_init_net(struct net *net)
@@ -4530,9 +4511,7 @@ static int __init cma_init(void)
if (ret)
goto err;
- if (ibnl_add_client(RDMA_NL_RDMA_CM, ARRAY_SIZE(cma_cb_table),
- cma_cb_table))
- pr_warn("RDMA CMA: failed to add netlink callback\n");
+ rdma_nl_register(RDMA_NL_RDMA_CM, cma_cb_table);
cma_configfs_init();
return 0;
@@ -4549,7 +4528,7 @@ err_wq:
static void __exit cma_cleanup(void)
{
cma_configfs_exit();
- ibnl_remove_client(RDMA_NL_RDMA_CM);
+ rdma_nl_unregister(RDMA_NL_RDMA_CM);
ib_unregister_client(&cma_client);
unregister_netdevice_notifier(&cma_nb);
rdma_addr_unregister_client(&addr_client);
@@ -4558,5 +4537,7 @@ static void __exit cma_cleanup(void)
destroy_workqueue(cma_wq);
}
+MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_RDMA_CM, 1);
+
module_init(cma_init);
module_exit(cma_cleanup);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 11ae67514e13..a1d687a664f8 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -38,6 +38,7 @@
#include <linux/cgroup_rdma.h>
#include <rdma/ib_verbs.h>
+#include <rdma/opa_addr.h>
#include <rdma/ib_mad.h>
#include "mad_priv.h"
@@ -102,6 +103,14 @@ void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
roce_netdev_callback cb,
void *cookie);
+typedef int (*nldev_callback)(struct ib_device *device,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ unsigned int idx);
+
+int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
+ struct netlink_callback *cb);
+
enum ib_cache_gid_default_mode {
IB_CACHE_GID_DEFAULT_MODE_SET,
IB_CACHE_GID_DEFAULT_MODE_DELETE
@@ -179,8 +188,8 @@ void ib_mad_cleanup(void);
int ib_sa_init(void);
void ib_sa_cleanup(void);
-int ibnl_init(void);
-void ibnl_cleanup(void);
+int rdma_nl_init(void);
+void rdma_nl_exit(void);
/**
* Check if there are any listeners to the netlink group
@@ -190,11 +199,14 @@ void ibnl_cleanup(void);
int ibnl_chk_listeners(unsigned int group);
int ib_nl_handle_resolve_resp(struct sk_buff *skb,
- struct netlink_callback *cb);
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack);
int ib_nl_handle_set_timeout(struct sk_buff *skb,
- struct netlink_callback *cb);
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack);
int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
- struct netlink_callback *cb);
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack);
int ib_get_cached_subnet_prefix(struct ib_device *device,
u8 port_num,
@@ -301,4 +313,9 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
return 0;
}
#endif
+
+struct ib_device *__ib_device_get_by_index(u32 ifindex);
+/* RDMA device netlink */
+void nldev_init(void);
+void nldev_exit(void);
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index a5dfab6adf49..84fc32a2c8b3 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -134,6 +134,17 @@ static int ib_device_check_mandatory(struct ib_device *device)
return 0;
}
+struct ib_device *__ib_device_get_by_index(u32 index)
+{
+ struct ib_device *device;
+
+ list_for_each_entry(device, &device_list, core_list)
+ if (device->index == index)
+ return device;
+
+ return NULL;
+}
+
static struct ib_device *__ib_device_get_by_name(const char *name)
{
struct ib_device *device;
@@ -145,7 +156,6 @@ static struct ib_device *__ib_device_get_by_name(const char *name)
return NULL;
}
-
static int alloc_name(char *name)
{
unsigned long *inuse;
@@ -326,10 +336,10 @@ static int read_port_immutable(struct ib_device *device)
return 0;
}
-void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len)
+void ib_get_device_fw_str(struct ib_device *dev, char *str)
{
if (dev->get_dev_fw_str)
- dev->get_dev_fw_str(dev, str, str_len);
+ dev->get_dev_fw_str(dev, str);
else
str[0] = '\0';
}
@@ -395,6 +405,30 @@ static int ib_security_change(struct notifier_block *nb, unsigned long event,
}
/**
+ * __dev_new_index - allocate an device index
+ *
+ * Returns a suitable unique value for a new device interface
+ * number. It assumes that there are less than 2^32-1 ib devices
+ * will be present in the system.
+ */
+static u32 __dev_new_index(void)
+{
+ /*
+ * The device index to allow stable naming.
+ * Similar to struct net -> ifindex.
+ */
+ static u32 index;
+
+ for (;;) {
+ if (!(++index))
+ index = 1;
+
+ if (!__ib_device_get_by_index(index))
+ return index;
+ }
+}
+
+/**
* ib_register_device - Register an IB device with IB core
* @device:Device to register
*
@@ -489,9 +523,10 @@ int ib_register_device(struct ib_device *device,
device->reg_state = IB_DEV_REGISTERED;
list_for_each_entry(client, &client_list, list)
- if (client->add && !add_client_context(device, client))
+ if (!add_client_context(device, client) && client->add)
client->add(device);
+ device->index = __dev_new_index();
down_write(&lists_rwsem);
list_add_tail(&device->core_list, &device_list);
up_write(&lists_rwsem);
@@ -537,10 +572,11 @@ void ib_unregister_device(struct ib_device *device)
}
up_read(&lists_rwsem);
- mutex_unlock(&device_mutex);
-
ib_device_unregister_rdmacg(device);
ib_device_unregister_sysfs(device);
+
+ mutex_unlock(&device_mutex);
+
ib_cache_cleanup_one(device);
ib_security_destroy_port_pkey_list(device);
@@ -577,7 +613,7 @@ int ib_register_client(struct ib_client *client)
mutex_lock(&device_mutex);
list_for_each_entry(device, &device_list, core_list)
- if (client->add && !add_client_context(device, client))
+ if (!add_client_context(device, client) && client->add)
client->add(device);
down_write(&lists_rwsem);
@@ -711,7 +747,7 @@ EXPORT_SYMBOL(ib_set_client_data);
* chapter 11 of the InfiniBand Architecture Specification). This
* callback may occur in interrupt context.
*/
-int ib_register_event_handler (struct ib_event_handler *event_handler)
+void ib_register_event_handler(struct ib_event_handler *event_handler)
{
unsigned long flags;
@@ -719,8 +755,6 @@ int ib_register_event_handler (struct ib_event_handler *event_handler)
list_add_tail(&event_handler->list,
&event_handler->device->event_handler_list);
spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
-
- return 0;
}
EXPORT_SYMBOL(ib_register_event_handler);
@@ -731,15 +765,13 @@ EXPORT_SYMBOL(ib_register_event_handler);
* Unregister an event handler registered with
* ib_register_event_handler().
*/
-int ib_unregister_event_handler(struct ib_event_handler *event_handler)
+void ib_unregister_event_handler(struct ib_event_handler *event_handler)
{
unsigned long flags;
spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
list_del(&event_handler->list);
spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
-
- return 0;
}
EXPORT_SYMBOL(ib_unregister_event_handler);
@@ -893,6 +925,31 @@ void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
}
/**
+ * ib_enum_all_devs - enumerate all ib_devices
+ * @cb: Callback to call for each found ib_device
+ *
+ * Enumerates all ib_devices and calls callback() on each device.
+ */
+int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct ib_device *dev;
+ unsigned int idx = 0;
+ int ret = 0;
+
+ down_read(&lists_rwsem);
+ list_for_each_entry(dev, &device_list, core_list) {
+ ret = nldev_cb(dev, skb, cb, idx);
+ if (ret)
+ break;
+ idx++;
+ }
+
+ up_read(&lists_rwsem);
+ return ret;
+}
+
+/**
* ib_query_pkey - Get P_Key table entry
* @device:Device to query
* @port_num:Port number to query
@@ -944,14 +1001,17 @@ int ib_modify_port(struct ib_device *device,
u8 port_num, int port_modify_mask,
struct ib_port_modify *port_modify)
{
- if (!device->modify_port)
- return -ENOSYS;
+ int rc;
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
- return device->modify_port(device, port_num, port_modify_mask,
- port_modify);
+ if (device->modify_port)
+ rc = device->modify_port(device, port_num, port_modify_mask,
+ port_modify);
+ else
+ rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
+ return rc;
}
EXPORT_SYMBOL(ib_modify_port);
@@ -1086,29 +1146,21 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
}
EXPORT_SYMBOL(ib_get_net_dev_by_params);
-static struct ibnl_client_cbs ibnl_ls_cb_table[] = {
+static const struct rdma_nl_cbs ibnl_ls_cb_table[] = {
[RDMA_NL_LS_OP_RESOLVE] = {
- .dump = ib_nl_handle_resolve_resp,
- .module = THIS_MODULE },
+ .doit = ib_nl_handle_resolve_resp,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
[RDMA_NL_LS_OP_SET_TIMEOUT] = {
- .dump = ib_nl_handle_set_timeout,
- .module = THIS_MODULE },
+ .doit = ib_nl_handle_set_timeout,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
[RDMA_NL_LS_OP_IP_RESOLVE] = {
- .dump = ib_nl_handle_ip_res_resp,
- .module = THIS_MODULE },
+ .doit = ib_nl_handle_ip_res_resp,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
};
-static int ib_add_ibnl_clients(void)
-{
- return ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ibnl_ls_cb_table),
- ibnl_ls_cb_table);
-}
-
-static void ib_remove_ibnl_clients(void)
-{
- ibnl_remove_client(RDMA_NL_LS);
-}
-
static int __init ib_core_init(void)
{
int ret;
@@ -1130,9 +1182,9 @@ static int __init ib_core_init(void)
goto err_comp;
}
- ret = ibnl_init();
+ ret = rdma_nl_init();
if (ret) {
- pr_warn("Couldn't init IB netlink interface\n");
+ pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
goto err_sysfs;
}
@@ -1154,24 +1206,18 @@ static int __init ib_core_init(void)
goto err_mad;
}
- ret = ib_add_ibnl_clients();
- if (ret) {
- pr_warn("Couldn't register ibnl clients\n");
- goto err_sa;
- }
-
ret = register_lsm_notifier(&ibdev_lsm_nb);
if (ret) {
pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
- goto err_ibnl_clients;
+ goto err_sa;
}
+ nldev_init();
+ rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
ib_cache_setup();
return 0;
-err_ibnl_clients:
- ib_remove_ibnl_clients();
err_sa:
ib_sa_cleanup();
err_mad:
@@ -1179,7 +1225,7 @@ err_mad:
err_addr:
addr_cleanup();
err_ibnl:
- ibnl_cleanup();
+ rdma_nl_exit();
err_sysfs:
class_unregister(&ib_class);
err_comp:
@@ -1191,18 +1237,21 @@ err:
static void __exit ib_core_cleanup(void)
{
- unregister_lsm_notifier(&ibdev_lsm_nb);
ib_cache_cleanup();
- ib_remove_ibnl_clients();
+ nldev_exit();
+ rdma_nl_unregister(RDMA_NL_LS);
+ unregister_lsm_notifier(&ibdev_lsm_nb);
ib_sa_cleanup();
ib_mad_cleanup();
addr_cleanup();
- ibnl_cleanup();
+ rdma_nl_exit();
class_unregister(&ib_class);
destroy_workqueue(ib_comp_wq);
/* Make sure that any pending umem accounting work is done. */
destroy_workqueue(ib_wq);
}
+MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
+
module_init(ib_core_init);
module_exit(ib_core_cleanup);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 31661b5c1743..fcf42f6bb82a 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason)
}
EXPORT_SYMBOL(iwcm_reject_msg);
-static struct ibnl_client_cbs iwcm_nl_cb_table[] = {
+static struct rdma_nl_cbs iwcm_nl_cb_table[] = {
[RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
[RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
@@ -1175,13 +1175,9 @@ static int __init iw_cm_init(void)
ret = iwpm_init(RDMA_NL_IWCM);
if (ret)
pr_err("iw_cm: couldn't init iwpm\n");
-
- ret = ibnl_add_client(RDMA_NL_IWCM, ARRAY_SIZE(iwcm_nl_cb_table),
- iwcm_nl_cb_table);
- if (ret)
- pr_err("iw_cm: couldn't register netlink callbacks\n");
-
- iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", WQ_MEM_RECLAIM);
+ else
+ rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
+ iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
if (!iwcm_wq)
return -ENOMEM;
@@ -1200,9 +1196,11 @@ static void __exit iw_cm_cleanup(void)
{
unregister_net_sysctl_table(iwcm_ctl_table_hdr);
destroy_workqueue(iwcm_wq);
- ibnl_remove_client(RDMA_NL_IWCM);
+ rdma_nl_unregister(RDMA_NL_IWCM);
iwpm_exit(RDMA_NL_IWCM);
}
+MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_IWCM, 2);
+
module_init(iw_cm_init);
module_exit(iw_cm_cleanup);
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index a0e7c16d8bd8..30825bb9b8e9 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -42,7 +42,6 @@ int iwpm_valid_pid(void)
{
return iwpm_user_pid > 0;
}
-EXPORT_SYMBOL(iwpm_valid_pid);
/*
* iwpm_register_pid - Send a netlink query to user space
@@ -104,7 +103,7 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
__func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
- ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
+ ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
iwpm_user_pid = IWPM_PID_UNAVAILABLE;
@@ -122,7 +121,6 @@ pid_query_error:
iwpm_free_nlmsg_request(&nlmsg_request->kref);
return ret;
}
-EXPORT_SYMBOL(iwpm_register_pid);
/*
* iwpm_add_mapping - Send a netlink add mapping message
@@ -174,7 +172,7 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
goto add_mapping_error;
nlmsg_request->req_buffer = pm_msg;
- ret = ibnl_unicast(skb, nlh, iwpm_user_pid);
+ ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
iwpm_user_pid = IWPM_PID_UNDEFINED;
@@ -191,7 +189,6 @@ add_mapping_error:
iwpm_free_nlmsg_request(&nlmsg_request->kref);
return ret;
}
-EXPORT_SYMBOL(iwpm_add_mapping);
/*
* iwpm_add_and_query_mapping - Send a netlink add and query
@@ -251,7 +248,7 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
goto query_mapping_error;
nlmsg_request->req_buffer = pm_msg;
- ret = ibnl_unicast(skb, nlh, iwpm_user_pid);
+ ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
err_str = "Unable to send a nlmsg";
@@ -267,7 +264,6 @@ query_mapping_error:
iwpm_free_nlmsg_request(&nlmsg_request->kref);
return ret;
}
-EXPORT_SYMBOL(iwpm_add_and_query_mapping);
/*
* iwpm_remove_mapping - Send a netlink remove mapping message
@@ -312,7 +308,7 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
if (ret)
goto remove_mapping_error;
- ret = ibnl_unicast(skb, nlh, iwpm_user_pid);
+ ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
iwpm_user_pid = IWPM_PID_UNDEFINED;
@@ -328,7 +324,6 @@ remove_mapping_error:
dev_kfree_skb_any(skb);
return ret;
}
-EXPORT_SYMBOL(iwpm_remove_mapping);
/* netlink attribute policy for the received response to register pid request */
static const struct nla_policy resp_reg_policy[IWPM_NLA_RREG_PID_MAX] = {
@@ -397,7 +392,6 @@ register_pid_response_exit:
up(&nlmsg_request->sem);
return 0;
}
-EXPORT_SYMBOL(iwpm_register_pid_cb);
/* netlink attribute policy for the received response to add mapping request */
static const struct nla_policy resp_add_policy[IWPM_NLA_RMANAGE_MAPPING_MAX] = {
@@ -466,7 +460,6 @@ add_mapping_response_exit:
up(&nlmsg_request->sem);
return 0;
}
-EXPORT_SYMBOL(iwpm_add_mapping_cb);
/* netlink attribute policy for the response to add and query mapping request
* and response with remote address info */
@@ -558,7 +551,6 @@ query_mapping_response_exit:
up(&nlmsg_request->sem);
return 0;
}
-EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb);
/*
* iwpm_remote_info_cb - Process a port mapper message, containing
@@ -627,7 +619,6 @@ int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
"remote_info: Mapped remote sockaddr:");
return ret;
}
-EXPORT_SYMBOL(iwpm_remote_info_cb);
/* netlink attribute policy for the received request for mapping info */
static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = {
@@ -677,7 +668,6 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
ret = iwpm_send_mapinfo(nl_client, iwpm_user_pid);
return ret;
}
-EXPORT_SYMBOL(iwpm_mapping_info_cb);
/* netlink attribute policy for the received mapping info ack */
static const struct nla_policy ack_mapinfo_policy[IWPM_NLA_MAPINFO_NUM_MAX] = {
@@ -707,7 +697,6 @@ int iwpm_ack_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
return 0;
}
-EXPORT_SYMBOL(iwpm_ack_mapping_info_cb);
/* netlink attribute policy for the received port mapper error message */
static const struct nla_policy map_error_policy[IWPM_NLA_ERR_MAX] = {
@@ -751,4 +740,3 @@ int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb)
up(&nlmsg_request->sem);
return 0;
}
-EXPORT_SYMBOL(iwpm_mapping_error_cb);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index f13870e69ccd..c81c55942626 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -54,8 +54,6 @@ static struct iwpm_admin_data iwpm_admin;
int iwpm_init(u8 nl_client)
{
int ret = 0;
- if (iwpm_valid_client(nl_client))
- return -EINVAL;
mutex_lock(&iwpm_admin_lock);
if (atomic_read(&iwpm_admin.refcount) == 0) {
iwpm_hash_bucket = kzalloc(IWPM_MAPINFO_HASH_SIZE *
@@ -83,7 +81,6 @@ init_exit:
}
return ret;
}
-EXPORT_SYMBOL(iwpm_init);
static void free_hash_bucket(void);
static void free_reminfo_bucket(void);
@@ -109,7 +106,6 @@ int iwpm_exit(u8 nl_client)
iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
return 0;
}
-EXPORT_SYMBOL(iwpm_exit);
static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *,
struct sockaddr_storage *);
@@ -148,7 +144,6 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
return ret;
}
-EXPORT_SYMBOL(iwpm_create_mapinfo);
int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
struct sockaddr_storage *mapped_local_addr)
@@ -184,7 +179,6 @@ remove_mapinfo_exit:
spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
return ret;
}
-EXPORT_SYMBOL(iwpm_remove_mapinfo);
static void free_hash_bucket(void)
{
@@ -297,7 +291,6 @@ get_remote_info_exit:
spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
return ret;
}
-EXPORT_SYMBOL(iwpm_get_remote_info);
struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
u8 nl_client, gfp_t gfp)
@@ -383,15 +376,11 @@ int iwpm_get_nlmsg_seq(void)
int iwpm_valid_client(u8 nl_client)
{
- if (nl_client >= RDMA_NL_NUM_CLIENTS)
- return 0;
return iwpm_admin.client_list[nl_client];
}
void iwpm_set_valid(u8 nl_client, int valid)
{
- if (nl_client >= RDMA_NL_NUM_CLIENTS)
- return;
iwpm_admin.client_list[nl_client] = valid;
}
@@ -608,7 +597,7 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
&mapping_num, IWPM_NLA_MAPINFO_SEND_NUM);
if (ret)
goto mapinfo_num_error;
- ret = ibnl_unicast(skb, nlh, iwpm_pid);
+ ret = rdma_nl_unicast(skb, iwpm_pid);
if (ret) {
skb = NULL;
err_str = "Unable to send a nlmsg";
@@ -637,7 +626,7 @@ static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid)
return -ENOMEM;
}
nlh->nlmsg_type = NLMSG_DONE;
- ret = ibnl_unicast(skb, (struct nlmsghdr *)skb->data, iwpm_pid);
+ ret = rdma_nl_unicast(skb, iwpm_pid);
if (ret)
pr_warn("%s Unable to send a nlmsg\n", __func__);
return ret;
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 0d3cca0a8890..e5cf09c66fe6 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -64,7 +64,7 @@ struct mad_rmpp_recv {
__be64 tid;
u32 src_qp;
- u16 slid;
+ u32 slid;
u8 mgmt_class;
u8 class_version;
u8 method;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 94931c474d41..e685148dd3e6 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2017 Mellanox Technologies Inc. All rights reserved.
* Copyright (c) 2010 Voltaire Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -37,239 +38,267 @@
#include <net/net_namespace.h>
#include <net/sock.h>
#include <rdma/rdma_netlink.h>
+#include <linux/module.h>
#include "core_priv.h"
-struct ibnl_client {
- struct list_head list;
- int index;
- int nops;
- const struct ibnl_client_cbs *cb_table;
-};
+#include "core_priv.h"
-static DEFINE_MUTEX(ibnl_mutex);
+static DEFINE_MUTEX(rdma_nl_mutex);
static struct sock *nls;
-static LIST_HEAD(client_list);
+static struct {
+ const struct rdma_nl_cbs *cb_table;
+} rdma_nl_types[RDMA_NL_NUM_CLIENTS];
-int ibnl_chk_listeners(unsigned int group)
+int rdma_nl_chk_listeners(unsigned int group)
{
- if (netlink_has_listeners(nls, group) == 0)
- return -1;
- return 0;
+ return (netlink_has_listeners(nls, group)) ? 0 : -1;
}
+EXPORT_SYMBOL(rdma_nl_chk_listeners);
-int ibnl_add_client(int index, int nops,
- const struct ibnl_client_cbs cb_table[])
+static bool is_nl_msg_valid(unsigned int type, unsigned int op)
{
- struct ibnl_client *cur;
- struct ibnl_client *nl_client;
+ static const unsigned int max_num_ops[RDMA_NL_NUM_CLIENTS - 1] = {
+ RDMA_NL_RDMA_CM_NUM_OPS,
+ RDMA_NL_IWPM_NUM_OPS,
+ 0,
+ RDMA_NL_LS_NUM_OPS,
+ RDMA_NLDEV_NUM_OPS };
- nl_client = kmalloc(sizeof *nl_client, GFP_KERNEL);
- if (!nl_client)
- return -ENOMEM;
+ /*
+ * This BUILD_BUG_ON is intended to catch addition of new
+ * RDMA netlink protocol without updating the array above.
+ */
+ BUILD_BUG_ON(RDMA_NL_NUM_CLIENTS != 6);
- nl_client->index = index;
- nl_client->nops = nops;
- nl_client->cb_table = cb_table;
+ if (type > RDMA_NL_NUM_CLIENTS - 1)
+ return false;
- mutex_lock(&ibnl_mutex);
+ return (op < max_num_ops[type - 1]) ? true : false;
+}
- list_for_each_entry(cur, &client_list, list) {
- if (cur->index == index) {
- pr_warn("Client for %d already exists\n", index);
- mutex_unlock(&ibnl_mutex);
- kfree(nl_client);
- return -EINVAL;
- }
+static bool is_nl_valid(unsigned int type, unsigned int op)
+{
+ const struct rdma_nl_cbs *cb_table;
+
+ if (!is_nl_msg_valid(type, op))
+ return false;
+
+ cb_table = rdma_nl_types[type].cb_table;
+#ifdef CONFIG_MODULES
+ if (!cb_table) {
+ mutex_unlock(&rdma_nl_mutex);
+ request_module("rdma-netlink-subsys-%d", type);
+ mutex_lock(&rdma_nl_mutex);
+ cb_table = rdma_nl_types[type].cb_table;
}
+#endif
- list_add_tail(&nl_client->list, &client_list);
-
- mutex_unlock(&ibnl_mutex);
-
- return 0;
+ if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
+ return false;
+ return true;
}
-EXPORT_SYMBOL(ibnl_add_client);
-int ibnl_remove_client(int index)
+void rdma_nl_register(unsigned int index,
+ const struct rdma_nl_cbs cb_table[])
{
- struct ibnl_client *cur, *next;
-
- mutex_lock(&ibnl_mutex);
- list_for_each_entry_safe(cur, next, &client_list, list) {
- if (cur->index == index) {
- list_del(&(cur->list));
- mutex_unlock(&ibnl_mutex);
- kfree(cur);
- return 0;
- }
+ mutex_lock(&rdma_nl_mutex);
+ if (!is_nl_msg_valid(index, 0)) {
+ /*
+ * All clients are not interesting in success/failure of
+ * this call. They want to see the print to error log and
+ * continue their initialization. Print warning for them,
+ * because it is programmer's error to be here.
+ */
+ mutex_unlock(&rdma_nl_mutex);
+ WARN(true,
+ "The not-valid %u index was supplied to RDMA netlink\n",
+ index);
+ return;
}
- pr_warn("Can't remove callback for client idx %d. Not found\n", index);
- mutex_unlock(&ibnl_mutex);
- return -EINVAL;
+ if (rdma_nl_types[index].cb_table) {
+ mutex_unlock(&rdma_nl_mutex);
+ WARN(true,
+ "The %u index is already registered in RDMA netlink\n",
+ index);
+ return;
+ }
+
+ rdma_nl_types[index].cb_table = cb_table;
+ mutex_unlock(&rdma_nl_mutex);
+}
+EXPORT_SYMBOL(rdma_nl_register);
+
+void rdma_nl_unregister(unsigned int index)
+{
+ mutex_lock(&rdma_nl_mutex);
+ rdma_nl_types[index].cb_table = NULL;
+ mutex_unlock(&rdma_nl_mutex);
}
-EXPORT_SYMBOL(ibnl_remove_client);
+EXPORT_SYMBOL(rdma_nl_unregister);
void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
int len, int client, int op, int flags)
{
- unsigned char *prev_tail;
-
- prev_tail = skb_tail_pointer(skb);
- *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),
- len, flags);
+ *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), len, flags);
if (!*nlh)
- goto out_nlmsg_trim;
- (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail;
+ return NULL;
return nlmsg_data(*nlh);
-
-out_nlmsg_trim:
- nlmsg_trim(skb, prev_tail);
- return NULL;
}
EXPORT_SYMBOL(ibnl_put_msg);
int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
int len, void *data, int type)
{
- unsigned char *prev_tail;
-
- prev_tail = skb_tail_pointer(skb);
- if (nla_put(skb, type, len, data))
- goto nla_put_failure;
- nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail;
+ if (nla_put(skb, type, len, data)) {
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+ }
return 0;
-
-nla_put_failure:
- nlmsg_trim(skb, prev_tail - nlh->nlmsg_len);
- return -EMSGSIZE;
}
EXPORT_SYMBOL(ibnl_put_attr);
-static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack)
+static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
{
- struct ibnl_client *client;
int type = nlh->nlmsg_type;
- int index = RDMA_NL_GET_CLIENT(type);
+ unsigned int index = RDMA_NL_GET_CLIENT(type);
unsigned int op = RDMA_NL_GET_OP(type);
+ const struct rdma_nl_cbs *cb_table;
+
+ if (!is_nl_valid(index, op))
+ return -EINVAL;
+
+ cb_table = rdma_nl_types[index].cb_table;
- list_for_each_entry(client, &client_list, list) {
- if (client->index == index) {
- if (op >= client->nops || !client->cb_table[op].dump)
- return -EINVAL;
-
- /*
- * For response or local service set_timeout request,
- * there is no need to use netlink_dump_start.
- */
- if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
- (index == RDMA_NL_LS &&
- op == RDMA_NL_LS_OP_SET_TIMEOUT)) {
- struct netlink_callback cb = {
- .skb = skb,
- .nlh = nlh,
- .dump = client->cb_table[op].dump,
- .module = client->cb_table[op].module,
- };
-
- return cb.dump(skb, &cb);
- }
-
- {
- struct netlink_dump_control c = {
- .dump = client->cb_table[op].dump,
- .module = client->cb_table[op].module,
- };
- return netlink_dump_start(nls, skb, nlh, &c);
- }
- }
+ if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
+ !netlink_capable(skb, CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* FIXME: Convert IWCM to properly handle doit callbacks */
+ if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM ||
+ index == RDMA_NL_IWCM) {
+ struct netlink_dump_control c = {
+ .dump = cb_table[op].dump,
+ };
+ return netlink_dump_start(nls, skb, nlh, &c);
}
- pr_info("Index %d wasn't found in client list\n", index);
- return -EINVAL;
+ if (cb_table[op].doit)
+ return cb_table[op].doit(skb, nlh, extack);
+
+ return 0;
}
-static void ibnl_rcv_reply_skb(struct sk_buff *skb)
+/*
+ * This function is similar to netlink_rcv_skb with one exception:
+ * It calls to the callback for the netlink messages without NLM_F_REQUEST
+ * flag. These messages are intended for RDMA_NL_LS consumer, so it is allowed
+ * for that consumer only.
+ */
+static int rdma_nl_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
+ struct nlmsghdr *,
+ struct netlink_ext_ack *))
{
+ struct netlink_ext_ack extack = {};
struct nlmsghdr *nlh;
- int msglen;
+ int err;
- /*
- * Process responses until there is no more message or the first
- * request. Generally speaking, it is not recommended to mix responses
- * with requests.
- */
while (skb->len >= nlmsg_total_size(0)) {
+ int msglen;
+
nlh = nlmsg_hdr(skb);
+ err = 0;
if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
- return;
-
- /* Handle response only */
- if (nlh->nlmsg_flags & NLM_F_REQUEST)
- return;
-
- ibnl_rcv_msg(skb, nlh, NULL);
+ return 0;
+ /*
+ * Generally speaking, the only requests are handled
+ * by the kernel, but RDMA_NL_LS is different, because it
+ * runs backward netlink scheme. Kernel initiates messages
+ * and waits for reply with data to keep pathrecord cache
+ * in sync.
+ */
+ if (!(nlh->nlmsg_flags & NLM_F_REQUEST) &&
+ (RDMA_NL_GET_CLIENT(nlh->nlmsg_type) != RDMA_NL_LS))
+ goto ack;
+
+ /* Skip control messages */
+ if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
+ goto ack;
+
+ err = cb(skb, nlh, &extack);
+ if (err == -EINTR)
+ goto skip;
+
+ack:
+ if (nlh->nlmsg_flags & NLM_F_ACK || err)
+ netlink_ack(skb, nlh, err, &extack);
+
+skip:
msglen = NLMSG_ALIGN(nlh->nlmsg_len);
if (msglen > skb->len)
msglen = skb->len;
skb_pull(skb, msglen);
}
+
+ return 0;
}
-static void ibnl_rcv(struct sk_buff *skb)
+static void rdma_nl_rcv(struct sk_buff *skb)
{
- mutex_lock(&ibnl_mutex);
- ibnl_rcv_reply_skb(skb);
- netlink_rcv_skb(skb, &ibnl_rcv_msg);
- mutex_unlock(&ibnl_mutex);
+ mutex_lock(&rdma_nl_mutex);
+ rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
+ mutex_unlock(&rdma_nl_mutex);
}
-int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
- __u32 pid)
+int rdma_nl_unicast(struct sk_buff *skb, u32 pid)
+{
+ int err;
+
+ err = netlink_unicast(nls, skb, pid, MSG_DONTWAIT);
+ return (err < 0) ? err : 0;
+}
+EXPORT_SYMBOL(rdma_nl_unicast);
+
+int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid)
{
int err;
err = netlink_unicast(nls, skb, pid, 0);
return (err < 0) ? err : 0;
}
-EXPORT_SYMBOL(ibnl_unicast);
+EXPORT_SYMBOL(rdma_nl_unicast_wait);
-int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
- unsigned int group, gfp_t flags)
+int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags)
{
return nlmsg_multicast(nls, skb, 0, group, flags);
}
-EXPORT_SYMBOL(ibnl_multicast);
+EXPORT_SYMBOL(rdma_nl_multicast);
-int __init ibnl_init(void)
+int __init rdma_nl_init(void)
{
struct netlink_kernel_cfg cfg = {
- .input = ibnl_rcv,
+ .input = rdma_nl_rcv,
};
nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg);
- if (!nls) {
- pr_warn("Failed to create netlink socket\n");
+ if (!nls)
return -ENOMEM;
- }
nls->sk_sndtimeo = 10 * HZ;
return 0;
}
-void ibnl_cleanup(void)
+void rdma_nl_exit(void)
{
- struct ibnl_client *cur, *next;
+ int idx;
- mutex_lock(&ibnl_mutex);
- list_for_each_entry_safe(cur, next, &client_list, list) {
- list_del(&(cur->list));
- kfree(cur);
- }
- mutex_unlock(&ibnl_mutex);
+ for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
+ rdma_nl_unregister(idx);
netlink_kernel_release(nls);
}
+
+MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
new file mode 100644
index 000000000000..3ba24c428c3b
--- /dev/null
+++ b/drivers/infiniband/core/nldev.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <net/netlink.h>
+#include <rdma/rdma_netlink.h>
+
+#include "core_priv.h"
+
+static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
+ [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING,
+ .len = IB_DEVICE_NAME_MAX - 1},
+ [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING,
+ .len = IB_FW_VERSION_NAME_MAX - 1},
+ [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
+};
+
+static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
+{
+ char fw[IB_FW_VERSION_NAME_MAX];
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
+ return -EMSGSIZE;
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
+ return -EMSGSIZE;
+
+ BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
+ device->attrs.device_cap_flags, 0))
+ return -EMSGSIZE;
+
+ ib_get_device_fw_str(device, fw);
+ /* Device without FW has strlen(fw) */
+ if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
+ return -EMSGSIZE;
+
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
+ be64_to_cpu(device->node_guid), 0))
+ return -EMSGSIZE;
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
+ be64_to_cpu(device->attrs.sys_image_guid), 0))
+ return -EMSGSIZE;
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int fill_port_info(struct sk_buff *msg,
+ struct ib_device *device, u32 port)
+{
+ struct ib_port_attr attr;
+ int ret;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
+ return -EMSGSIZE;
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
+ return -EMSGSIZE;
+
+ ret = ib_query_port(device, port, &attr);
+ if (ret)
+ return ret;
+
+ BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
+ if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
+ (u64)attr.port_cap_flags, 0))
+ return -EMSGSIZE;
+ if (rdma_protocol_ib(device, port) &&
+ nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
+ attr.subnet_prefix, 0))
+ return -EMSGSIZE;
+ if (rdma_protocol_ib(device, port)) {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
+ return -EMSGSIZE;
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
+ return -EMSGSIZE;
+ }
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
+ return -EMSGSIZE;
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 index;
+ int err;
+
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+
+ device = __ib_device_get_by_index(index);
+ if (!device)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
+ 0, 0);
+
+ err = fill_dev_info(msg, device);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ nlmsg_end(msg, nlh);
+
+ return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+}
+
+static int _nldev_get_dumpit(struct ib_device *device,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ unsigned int idx)
+{
+ int start = cb->args[0];
+ struct nlmsghdr *nlh;
+
+ if (idx < start)
+ return 0;
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
+ 0, NLM_F_MULTI);
+
+ if (fill_dev_info(skb, device)) {
+ nlmsg_cancel(skb, nlh);
+ goto out;
+ }
+
+ nlmsg_end(skb, nlh);
+
+ idx++;
+
+out: cb->args[0] = idx;
+ return skb->len;
+}
+
+static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ /*
+ * There is no need to take lock, because
+ * we are relying on ib_core's lists_rwsem
+ */
+ return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
+}
+
+static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 index;
+ u32 port;
+ int err;
+
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (err || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = __ib_device_get_by_index(index);
+ if (!device)
+ return -EINVAL;
+
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(device, port))
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
+ 0, 0);
+
+ err = fill_port_info(msg, device, port);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ nlmsg_end(msg, nlh);
+
+ return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+}
+
+static int nldev_port_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ int start = cb->args[0];
+ struct nlmsghdr *nlh;
+ u32 idx = 0;
+ u32 ifindex;
+ int err;
+ u32 p;
+
+ err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NULL);
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = __ib_device_get_by_index(ifindex);
+ if (!device)
+ return -EINVAL;
+
+ for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
+ /*
+ * The dumpit function returns all information from specific
+ * index. This specific index is taken from the netlink
+ * messages request sent by user and it is available
+ * in cb->args[0].
+ *
+ * Usually, the user doesn't fill this field and it causes
+ * to return everything.
+ *
+ */
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_PORT_GET),
+ 0, NLM_F_MULTI);
+
+ if (fill_port_info(skb, device, p)) {
+ nlmsg_cancel(skb, nlh);
+ goto out;
+ }
+ idx++;
+ nlmsg_end(skb, nlh);
+ }
+
+out: cb->args[0] = idx;
+ return skb->len;
+}
+
+static const struct rdma_nl_cbs nldev_cb_table[] = {
+ [RDMA_NLDEV_CMD_GET] = {
+ .doit = nldev_get_doit,
+ .dump = nldev_get_dumpit,
+ },
+ [RDMA_NLDEV_CMD_PORT_GET] = {
+ .doit = nldev_port_get_doit,
+ .dump = nldev_port_get_dumpit,
+ },
+};
+
+void __init nldev_init(void)
+{
+ rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
+}
+
+void __exit nldev_exit(void)
+{
+ rdma_nl_unregister(RDMA_NL_NLDEV);
+}
+
+MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 41c31a2bf093..85b5ee4defa4 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -35,10 +35,57 @@
#include <rdma/ib_verbs.h>
#include <rdma/uverbs_types.h>
#include <linux/rcupdate.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/rdma_user_ioctl.h>
#include "uverbs.h"
#include "core_priv.h"
#include "rdma_core.h"
+int uverbs_ns_idx(u16 *id, unsigned int ns_count)
+{
+ int ret = (*id & UVERBS_ID_NS_MASK) >> UVERBS_ID_NS_SHIFT;
+
+ if (ret >= ns_count)
+ return -EINVAL;
+
+ *id &= ~UVERBS_ID_NS_MASK;
+ return ret;
+}
+
+const struct uverbs_object_spec *uverbs_get_object(const struct ib_device *ibdev,
+ uint16_t object)
+{
+ const struct uverbs_root_spec *object_hash = ibdev->specs_root;
+ const struct uverbs_object_spec_hash *objects;
+ int ret = uverbs_ns_idx(&object, object_hash->num_buckets);
+
+ if (ret < 0)
+ return NULL;
+
+ objects = object_hash->object_buckets[ret];
+
+ if (object >= objects->num_objects)
+ return NULL;
+
+ return objects->objects[object];
+}
+
+const struct uverbs_method_spec *uverbs_get_method(const struct uverbs_object_spec *object,
+ uint16_t method)
+{
+ const struct uverbs_method_spec_hash *methods;
+ int ret = uverbs_ns_idx(&method, object->num_buckets);
+
+ if (ret < 0)
+ return NULL;
+
+ methods = object->method_buckets[ret];
+ if (method >= methods->num_methods)
+ return NULL;
+
+ return methods->methods[method];
+}
+
void uverbs_uobject_get(struct ib_uobject *uobject)
{
kref_get(&uobject->ref);
@@ -404,6 +451,41 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
return ret;
}
+static int null_obj_type_class_remove_commit(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
+{
+ return 0;
+}
+
+static const struct uverbs_obj_type null_obj_type = {
+ .type_class = &((const struct uverbs_obj_type_class){
+ .remove_commit = null_obj_type_class_remove_commit,
+ /* be cautious */
+ .needs_kfree_rcu = true}),
+};
+
+int rdma_explicit_destroy(struct ib_uobject *uobject)
+{
+ int ret;
+ struct ib_ucontext *ucontext = uobject->context;
+
+ /* Cleanup is running. Calling this should have been impossible */
+ if (!down_read_trylock(&ucontext->cleanup_rwsem)) {
+ WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
+ return 0;
+ }
+ lockdep_check(uobject, true);
+ ret = uobject->type->type_class->remove_commit(uobject,
+ RDMA_REMOVE_DESTROY);
+ if (ret)
+ return ret;
+
+ uobject->type = &null_obj_type;
+
+ up_read(&ucontext->cleanup_rwsem);
+ return 0;
+}
+
static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
{
uverbs_uobject_add(uobj);
@@ -625,3 +707,100 @@ const struct uverbs_obj_type_class uverbs_fd_class = {
.needs_kfree_rcu = false,
};
+struct ib_uobject *uverbs_get_uobject_from_context(const struct uverbs_obj_type *type_attrs,
+ struct ib_ucontext *ucontext,
+ enum uverbs_obj_access access,
+ int id)
+{
+ switch (access) {
+ case UVERBS_ACCESS_READ:
+ return rdma_lookup_get_uobject(type_attrs, ucontext, id, false);
+ case UVERBS_ACCESS_DESTROY:
+ case UVERBS_ACCESS_WRITE:
+ return rdma_lookup_get_uobject(type_attrs, ucontext, id, true);
+ case UVERBS_ACCESS_NEW:
+ return rdma_alloc_begin_uobject(type_attrs, ucontext);
+ default:
+ WARN_ON(true);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+}
+
+int uverbs_finalize_object(struct ib_uobject *uobj,
+ enum uverbs_obj_access access,
+ bool commit)
+{
+ int ret = 0;
+
+ /*
+ * refcounts should be handled at the object level and not at the
+ * uobject level. Refcounts of the objects themselves are done in
+ * handlers.
+ */
+
+ switch (access) {
+ case UVERBS_ACCESS_READ:
+ rdma_lookup_put_uobject(uobj, false);
+ break;
+ case UVERBS_ACCESS_WRITE:
+ rdma_lookup_put_uobject(uobj, true);
+ break;
+ case UVERBS_ACCESS_DESTROY:
+ if (commit)
+ ret = rdma_remove_commit_uobject(uobj);
+ else
+ rdma_lookup_put_uobject(uobj, true);
+ break;
+ case UVERBS_ACCESS_NEW:
+ if (commit)
+ ret = rdma_alloc_commit_uobject(uobj);
+ else
+ rdma_alloc_abort_uobject(uobj);
+ break;
+ default:
+ WARN_ON(true);
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+int uverbs_finalize_objects(struct uverbs_attr_bundle *attrs_bundle,
+ struct uverbs_attr_spec_hash * const *spec_hash,
+ size_t num,
+ bool commit)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < num; i++) {
+ struct uverbs_attr_bundle_hash *curr_bundle =
+ &attrs_bundle->hash[i];
+ const struct uverbs_attr_spec_hash *curr_spec_bucket =
+ spec_hash[i];
+ unsigned int j;
+
+ for (j = 0; j < curr_bundle->num_attrs; j++) {
+ struct uverbs_attr *attr;
+ const struct uverbs_attr_spec *spec;
+
+ if (!uverbs_attr_is_valid_in_hash(curr_bundle, j))
+ continue;
+
+ attr = &curr_bundle->attrs[j];
+ spec = &curr_spec_bucket->attrs[j];
+
+ if (spec->type == UVERBS_ATTR_TYPE_IDR ||
+ spec->type == UVERBS_ATTR_TYPE_FD) {
+ int current_ret;
+
+ current_ret = uverbs_finalize_object(attr->obj_attr.uobject,
+ spec->obj.access,
+ commit);
+ if (!ret)
+ ret = current_ret;
+ }
+ }
+ }
+ return ret;
+}
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index 1b82e7ff7fe8..1efcf93238dd 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -39,9 +39,15 @@
#include <linux/idr.h>
#include <rdma/uverbs_types.h>
+#include <rdma/uverbs_ioctl.h>
#include <rdma/ib_verbs.h>
#include <linux/mutex.h>
+int uverbs_ns_idx(u16 *id, unsigned int ns_count);
+const struct uverbs_object_spec *uverbs_get_object(const struct ib_device *ibdev,
+ uint16_t object);
+const struct uverbs_method_spec *uverbs_get_method(const struct uverbs_object_spec *object,
+ uint16_t method);
/*
* These functions initialize the context and cleanups its uobjects.
* The context has a list of objects which is protected by a mutex
@@ -75,4 +81,40 @@ void uverbs_uobject_put(struct ib_uobject *uobject);
*/
void uverbs_close_fd(struct file *f);
+/*
+ * Get an ib_uobject that corresponds to the given id from ucontext, assuming
+ * the object is from the given type. Lock it to the required access when
+ * applicable.
+ * This function could create (access == NEW), destroy (access == DESTROY)
+ * or unlock (access == READ || access == WRITE) objects if required.
+ * The action will be finalized only when uverbs_finalize_object or
+ * uverbs_finalize_objects are called.
+ */
+struct ib_uobject *uverbs_get_uobject_from_context(const struct uverbs_obj_type *type_attrs,
+ struct ib_ucontext *ucontext,
+ enum uverbs_obj_access access,
+ int id);
+int uverbs_finalize_object(struct ib_uobject *uobj,
+ enum uverbs_obj_access access,
+ bool commit);
+/*
+ * Note that certain finalize stages could return a status:
+ * (a) alloc_commit could return a failure if the object is committed at the
+ * same time when the context is destroyed.
+ * (b) remove_commit could fail if the object wasn't destroyed successfully.
+ * Since multiple objects could be finalized in one transaction, it is very NOT
+ * recommended to have several finalize actions which have side effects.
+ * For example, it's NOT recommended to have a certain action which has both
+ * a commit action and a destroy action or two destroy objects in the same
+ * action. The rule of thumb is to have one destroy or commit action with
+ * multiple lookups.
+ * The first non zero return value of finalize_object is returned from this
+ * function. For example, this could happen when we couldn't destroy an
+ * object.
+ */
+int uverbs_finalize_objects(struct uverbs_attr_bundle *attrs_bundle,
+ struct uverbs_attr_spec_hash * const *spec_hash,
+ size_t num,
+ bool commit);
+
#endif /* RDMA_CORE_H */
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index db958d3207ef..90e3889b7fbe 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -42,6 +42,10 @@
#include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
+static struct workqueue_struct *gid_cache_wq;
+
+static struct workqueue_struct *gid_cache_wq;
+
enum gid_op_type {
GID_DEL = 0,
GID_ADD
@@ -560,7 +564,7 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
}
INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
- queue_work(ib_wq, &ndev_work->work);
+ queue_work(gid_cache_wq, &ndev_work->work);
return NOTIFY_DONE;
}
@@ -693,7 +697,7 @@ static int addr_event(struct notifier_block *this, unsigned long event,
dev_hold(ndev);
work->gid_attr.ndev = ndev;
- queue_work(ib_wq, &work->work);
+ queue_work(gid_cache_wq, &work->work);
return NOTIFY_DONE;
}
@@ -740,6 +744,10 @@ static struct notifier_block nb_inet6addr = {
int __init roce_gid_mgmt_init(void)
{
+ gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
+ if (!gid_cache_wq)
+ return -ENOMEM;
+
register_inetaddr_notifier(&nb_inetaddr);
if (IS_ENABLED(CONFIG_IPV6))
register_inet6addr_notifier(&nb_inet6addr);
@@ -764,4 +772,5 @@ void __exit roce_gid_mgmt_cleanup(void)
* ib-core is removed, all physical devices have been removed,
* so no issue with remaining hardware contexts.
*/
+ destroy_workqueue(gid_cache_wq);
}
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 70fa4cabe48e..ab5e1024fea9 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -50,6 +50,7 @@
#include <uapi/rdma/ib_user_sa.h>
#include <rdma/ib_marshall.h>
#include <rdma/ib_addr.h>
+#include <rdma/opa_addr.h>
#include "sa.h"
#include "core_priv.h"
@@ -861,7 +862,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
/* Repair the nlmsg header length */
nlmsg_end(skb, nlh);
- ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask);
+ ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask);
if (!ret)
ret = len;
else
@@ -1021,9 +1022,9 @@ static void ib_nl_request_timeout(struct work_struct *work)
}
int ib_nl_handle_set_timeout(struct sk_buff *skb,
- struct netlink_callback *cb)
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
{
- const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
int timeout, delta, abs_delta;
const struct nlattr *attr;
unsigned long flags;
@@ -1033,8 +1034,7 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
int ret;
if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
- !(NETLINK_CB(skb).sk) ||
- !netlink_capable(skb, CAP_NET_ADMIN))
+ !(NETLINK_CB(skb).sk))
return -EPERM;
ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
@@ -1098,9 +1098,9 @@ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
}
int ib_nl_handle_resolve_resp(struct sk_buff *skb,
- struct netlink_callback *cb)
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
{
- const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
unsigned long flags;
struct ib_sa_query *query;
struct ib_mad_send_buf *send_buf;
@@ -1109,8 +1109,7 @@ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
int ret;
if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
- !(NETLINK_CB(skb).sk) ||
- !netlink_capable(skb, CAP_NET_ADMIN))
+ !(NETLINK_CB(skb).sk))
return -EPERM;
spin_lock_irqsave(&ib_nl_request_lock, flags);
@@ -1241,6 +1240,11 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
ah_attr->type = rdma_ah_find_type(device, port_num);
rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec)));
+
+ if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) &&
+ (rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE)))
+ rdma_ah_set_make_grd(ah_attr, true);
+
rdma_ah_set_sl(ah_attr, rec->sl);
rdma_ah_set_path_bits(ah_attr, be32_to_cpu(sa_path_get_slid(rec)) &
get_src_path_mask(device, port_num));
@@ -1420,7 +1424,7 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
(!(query->flags & IB_SA_QUERY_OPA))) {
- if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
+ if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
if (!ib_nl_make_request(query, gfp_mask))
return id;
}
@@ -2290,12 +2294,15 @@ static void update_sm_ah(struct work_struct *work)
rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
rdma_ah_set_port_num(&ah_attr, port->port_num);
if (port_attr.grh_required) {
- rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
-
- rdma_ah_set_subnet_prefix(&ah_attr,
- cpu_to_be64(port_attr.subnet_prefix));
- rdma_ah_set_interface_id(&ah_attr,
- cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
+ if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA) {
+ rdma_ah_set_make_grd(&ah_attr, true);
+ } else {
+ rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
+ rdma_ah_set_subnet_prefix(&ah_attr,
+ cpu_to_be64(port_attr.subnet_prefix));
+ rdma_ah_set_interface_id(&ah_attr,
+ cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
+ }
}
new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr);
@@ -2410,8 +2417,7 @@ static void ib_sa_add_one(struct ib_device *device)
*/
INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
- if (ib_register_event_handler(&sa_dev->event_handler))
- goto err;
+ ib_register_event_handler(&sa_dev->event_handler);
for (i = 0; i <= e - s; ++i) {
if (rdma_cap_ib_sa(device, i + 1))
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 7ebe1ef23652..abc5ab581f82 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1210,8 +1210,8 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
- ib_get_device_fw_str(dev, buf, PAGE_SIZE);
- strlcat(buf, "\n", PAGE_SIZE);
+ ib_get_device_fw_str(dev, buf);
+ strlcat(buf, "\n", IB_FW_VERSION_NAME_MAX);
return strlen(buf);
}
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 112099c86a19..f2a7f62c2834 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -618,7 +618,7 @@ static ssize_t ib_ucm_init_qp_attr(struct ib_ucm_file *file,
if (result)
goto out;
- ib_copy_qp_attr_to_user(&resp, &qp_attr);
+ ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr);
if (copy_to_user((void __user *)(unsigned long)cmd.response,
&resp, sizeof(resp)))
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 276f0ef835bd..eb85b546e223 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -248,14 +248,15 @@ static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
dst->qp_num = src->qp_num;
}
-static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
+static void ucma_copy_ud_event(struct ib_device *device,
+ struct rdma_ucm_ud_param *dst,
struct rdma_ud_param *src)
{
if (src->private_data_len)
memcpy(dst->private_data, src->private_data,
src->private_data_len);
dst->private_data_len = src->private_data_len;
- ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
+ ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr);
dst->qp_num = src->qp_num;
dst->qkey = src->qkey;
}
@@ -335,7 +336,8 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
uevent->resp.event = event->event;
uevent->resp.status = event->status;
if (cm_id->qp_type == IB_QPT_UD)
- ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
+ ucma_copy_ud_event(cm_id->device, &uevent->resp.param.ud,
+ &event->param.ud);
else
ucma_copy_conn_event(&uevent->resp.param.conn,
&event->param.conn);
@@ -1157,7 +1159,7 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
if (ret)
goto out;
- ib_copy_qp_attr_to_user(&resp, &qp_attr);
+ ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr);
if (copy_to_user((void __user *)(unsigned long)cmd.response,
&resp, sizeof(resp)))
ret = -EFAULT;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 8c4ec564e495..55e8f5ed8b3c 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -166,24 +166,6 @@ static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
return 0;
}
-static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
-
- if (!context->invalidate_range)
- return;
-
- ib_ucontext_notifier_start_account(context);
- down_read(&context->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
- address + PAGE_SIZE,
- invalidate_page_trampoline, NULL);
- up_read(&context->umem_rwsem);
- ib_ucontext_notifier_end_account(context);
-}
-
static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
u64 end, void *cookie)
{
@@ -237,7 +219,6 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
static const struct mmu_notifier_ops ib_umem_notifiers = {
.release = ib_umem_notifier_release,
- .invalidate_page = ib_umem_notifier_invalidate_page,
.invalidate_range_start = ib_umem_notifier_invalidate_range_start,
.invalidate_range_end = ib_umem_notifier_invalidate_range_end,
};
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 36a6f5c8914c..c1696e6084b2 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -229,7 +229,7 @@ static void recv_handler(struct ib_mad_agent *agent,
packet->mad.hdr.status = 0;
packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len;
packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
- packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid);
+ packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
packet->mad.hdr.sl = mad_recv_wc->wc->sl;
packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 64d494a64daf..37c8903e7fd0 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -100,6 +100,7 @@ struct ib_uverbs_device {
struct mutex lists_mutex; /* protect lists */
struct list_head uverbs_file_list;
struct list_head uverbs_events_file_list;
+ struct uverbs_root_spec *specs_root;
};
struct ib_uverbs_event_queue {
@@ -218,6 +219,8 @@ int uverbs_dealloc_mw(struct ib_mw *mw);
void ib_uverbs_detach_umcast(struct ib_qp *qp,
struct ib_uqp_object *uobj);
+long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
struct ib_uverbs_flow_spec {
union {
union {
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 8ba9bfb073d1..e0cb99860934 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -91,9 +91,10 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
goto err;
}
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ INIT_UDATA(&udata, buf + sizeof(cmd),
+ (unsigned long) cmd.response + sizeof(resp),
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof(resp));
ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
if (ret)
@@ -275,8 +276,14 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
resp.bad_pkey_cntr = attr.bad_pkey_cntr;
resp.qkey_viol_cntr = attr.qkey_viol_cntr;
resp.pkey_tbl_len = attr.pkey_tbl_len;
- resp.lid = attr.lid;
- resp.sm_lid = attr.sm_lid;
+
+ if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) {
+ resp.lid = OPA_TO_IB_UCAST_LID(attr.lid);
+ resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid);
+ } else {
+ resp.lid = ib_lid_cpu16(attr.lid);
+ resp.sm_lid = ib_lid_cpu16(attr.sm_lid);
+ }
resp.lmc = attr.lmc;
resp.max_vl_num = attr.max_vl_num;
resp.sm_sl = attr.sm_sl;
@@ -313,9 +320,10 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ INIT_UDATA(&udata, buf + sizeof(cmd),
+ (unsigned long) cmd.response + sizeof(resp),
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof(resp));
uobj = uobj_alloc(uobj_get_type(pd), file->ucontext);
if (IS_ERR(uobj))
@@ -482,9 +490,10 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ INIT_UDATA(&udata, buf + sizeof(cmd),
+ (unsigned long) cmd.response + sizeof(resp),
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof(resp));
mutex_lock(&file->device->xrcd_tree_mutex);
@@ -646,9 +655,10 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ INIT_UDATA(&udata, buf + sizeof(cmd),
+ (unsigned long) cmd.response + sizeof(resp),
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof(resp));
if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
return -EINVAL;
@@ -740,7 +750,8 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
INIT_UDATA(&udata, buf + sizeof(cmd),
(unsigned long) cmd.response + sizeof(resp),
- in_len - sizeof(cmd), out_len - sizeof(resp));
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof(resp));
if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
return -EINVAL;
@@ -1015,7 +1026,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
cq->uobject = &obj->uobject;
cq->comp_handler = ib_uverbs_comp_handler;
cq->event_handler = ib_uverbs_cq_event_handler;
- cq->cq_context = &ev_file->ev_queue;
+ cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
atomic_set(&cq->usecnt, 0);
obj->uobject.object = cq;
@@ -1080,7 +1091,8 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
INIT_UDATA(&uhw, buf + sizeof(cmd),
(unsigned long)cmd.response + sizeof(resp),
- in_len - sizeof(cmd), out_len - sizeof(resp));
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof(resp));
memset(&cmd_ex, 0, sizeof(cmd_ex));
cmd_ex.user_handle = cmd.user_handle;
@@ -1153,7 +1165,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
int out_len)
{
struct ib_uverbs_resize_cq cmd;
- struct ib_uverbs_resize_cq_resp resp;
+ struct ib_uverbs_resize_cq_resp resp = {};
struct ib_udata udata;
struct ib_cq *cq;
int ret = -EINVAL;
@@ -1161,9 +1173,10 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ INIT_UDATA(&udata, buf + sizeof(cmd),
+ (unsigned long) cmd.response + sizeof(resp),
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof(resp));
cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
if (!cq)
@@ -1185,7 +1198,8 @@ out:
return ret ? ret : in_len;
}
-static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
+static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
+ struct ib_wc *wc)
{
struct ib_uverbs_wc tmp;
@@ -1199,7 +1213,10 @@ static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
tmp.src_qp = wc->src_qp;
tmp.wc_flags = wc->wc_flags;
tmp.pkey_index = wc->pkey_index;
- tmp.slid = wc->slid;
+ if (rdma_cap_opa_ah(ib_dev, wc->port_num))
+ tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid);
+ else
+ tmp.slid = ib_lid_cpu16(wc->slid);
tmp.sl = wc->sl;
tmp.dlid_path_bits = wc->dlid_path_bits;
tmp.port_num = wc->port_num;
@@ -1243,7 +1260,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
if (!ret)
break;
- ret = copy_wc_to_user(data_ptr, &wc);
+ ret = copy_wc_to_user(ib_dev, data_ptr, &wc);
if (ret)
goto out_put;
@@ -1296,7 +1313,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
struct ib_uobject *uobj;
struct ib_cq *cq;
struct ib_ucq_object *obj;
- struct ib_uverbs_event_queue *ev_queue;
int ret = -EINVAL;
if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -1313,7 +1329,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
*/
uverbs_uobject_get(uobj);
cq = uobj->object;
- ev_queue = cq->cq_context;
obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
memset(&resp, 0, sizeof(resp));
@@ -1385,8 +1400,9 @@ static int create_qp(struct ib_uverbs_file *file,
attr.rwq_ind_tbl = ind_tbl;
}
- if ((cmd_sz >= offsetof(typeof(*cmd), reserved1) +
- sizeof(cmd->reserved1)) && cmd->reserved1) {
+ if (cmd_sz > sizeof(*cmd) &&
+ !ib_is_udata_cleared(ucore, sizeof(*cmd),
+ cmd_sz - sizeof(*cmd))) {
ret = -EOPNOTSUPP;
goto err_put;
}
@@ -1422,7 +1438,7 @@ static int create_qp(struct ib_uverbs_file *file,
if (cmd->is_srq) {
srq = uobj_get_obj_read(srq, cmd->srq_handle,
file->ucontext);
- if (!srq || srq->srq_type != IB_SRQT_BASIC) {
+ if (!srq || srq->srq_type == IB_SRQT_XRC) {
ret = -EINVAL;
goto err_put;
}
@@ -1484,11 +1500,21 @@ static int create_qp(struct ib_uverbs_file *file,
IB_QP_CREATE_MANAGED_SEND |
IB_QP_CREATE_MANAGED_RECV |
IB_QP_CREATE_SCATTER_FCS |
- IB_QP_CREATE_CVLAN_STRIPPING)) {
+ IB_QP_CREATE_CVLAN_STRIPPING |
+ IB_QP_CREATE_SOURCE_QPN)) {
ret = -EINVAL;
goto err_put;
}
+ if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
+ if (!capable(CAP_NET_RAW)) {
+ ret = -EPERM;
+ goto err_put;
+ }
+
+ attr.source_qpn = cmd->source_qpn;
+ }
+
buf = (void *)cmd + sizeof(*cmd);
if (cmd_sz > sizeof(*cmd))
if (!(buf[0] == 0 && !memcmp(buf, buf + 1,
@@ -1524,6 +1550,7 @@ static int create_qp(struct ib_uverbs_file *file,
qp->qp_type = attr.qp_type;
atomic_set(&qp->usecnt, 0);
atomic_inc(&pd->usecnt);
+ qp->port = 0;
if (attr.send_cq)
atomic_inc(&attr.send_cq->usecnt);
if (attr.recv_cq)
@@ -1723,9 +1750,10 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd, out_len - sizeof resp);
+ INIT_UDATA(&udata, buf + sizeof(cmd),
+ (unsigned long) cmd.response + sizeof(resp),
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof(resp));
obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp),
file->ucontext);
@@ -1792,6 +1820,28 @@ err_put:
return ret;
}
+static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
+ struct rdma_ah_attr *rdma_attr)
+{
+ const struct ib_global_route *grh;
+
+ uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr);
+ uverb_attr->sl = rdma_ah_get_sl(rdma_attr);
+ uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr);
+ uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr);
+ uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) &
+ IB_AH_GRH);
+ if (uverb_attr->is_global) {
+ grh = rdma_ah_read_grh(rdma_attr);
+ memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
+ uverb_attr->flow_label = grh->flow_label;
+ uverb_attr->sgid_index = grh->sgid_index;
+ uverb_attr->hop_limit = grh->hop_limit;
+ uverb_attr->traffic_class = grh->traffic_class;
+ }
+ uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr);
+}
+
ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
struct ib_device *ib_dev,
const char __user *buf, int in_len,
@@ -1802,7 +1852,6 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
struct ib_qp *qp;
struct ib_qp_attr *attr;
struct ib_qp_init_attr *init_attr;
- const struct ib_global_route *grh;
int ret;
if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -1852,39 +1901,8 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
resp.alt_port_num = attr->alt_port_num;
resp.alt_timeout = attr->alt_timeout;
- resp.dest.dlid = rdma_ah_get_dlid(&attr->ah_attr);
- resp.dest.sl = rdma_ah_get_sl(&attr->ah_attr);
- resp.dest.src_path_bits = rdma_ah_get_path_bits(&attr->ah_attr);
- resp.dest.static_rate = rdma_ah_get_static_rate(&attr->ah_attr);
- resp.dest.is_global = !!(rdma_ah_get_ah_flags(&attr->ah_attr) &
- IB_AH_GRH);
- if (resp.dest.is_global) {
- grh = rdma_ah_read_grh(&attr->ah_attr);
- memcpy(resp.dest.dgid, grh->dgid.raw, 16);
- resp.dest.flow_label = grh->flow_label;
- resp.dest.sgid_index = grh->sgid_index;
- resp.dest.hop_limit = grh->hop_limit;
- resp.dest.traffic_class = grh->traffic_class;
- }
- resp.dest.port_num = rdma_ah_get_port_num(&attr->ah_attr);
-
- resp.alt_dest.dlid = rdma_ah_get_dlid(&attr->alt_ah_attr);
- resp.alt_dest.sl = rdma_ah_get_sl(&attr->alt_ah_attr);
- resp.alt_dest.src_path_bits = rdma_ah_get_path_bits(&attr->alt_ah_attr);
- resp.alt_dest.static_rate
- = rdma_ah_get_static_rate(&attr->alt_ah_attr);
- resp.alt_dest.is_global
- = !!(rdma_ah_get_ah_flags(&attr->alt_ah_attr) &
- IB_AH_GRH);
- if (resp.alt_dest.is_global) {
- grh = rdma_ah_read_grh(&attr->alt_ah_attr);
- memcpy(resp.alt_dest.dgid, grh->dgid.raw, 16);
- resp.alt_dest.flow_label = grh->flow_label;
- resp.alt_dest.sgid_index = grh->sgid_index;
- resp.alt_dest.hop_limit = grh->hop_limit;
- resp.alt_dest.traffic_class = grh->traffic_class;
- }
- resp.alt_dest.port_num = rdma_ah_get_port_num(&attr->alt_ah_attr);
+ copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
+ copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
resp.max_send_wr = init_attr->cap.max_send_wr;
resp.max_recv_wr = init_attr->cap.max_recv_wr;
@@ -1918,6 +1936,29 @@ static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
}
}
+static void copy_ah_attr_from_uverbs(struct ib_device *dev,
+ struct rdma_ah_attr *rdma_attr,
+ struct ib_uverbs_qp_dest *uverb_attr)
+{
+ rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
+ if (uverb_attr->is_global) {
+ rdma_ah_set_grh(rdma_attr, NULL,
+ uverb_attr->flow_label,
+ uverb_attr->sgid_index,
+ uverb_attr->hop_limit,
+ uverb_attr->traffic_class);
+ rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
+ } else {
+ rdma_ah_set_ah_flags(rdma_attr, 0);
+ }
+ rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
+ rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
+ rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
+ rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
+ rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
+ rdma_ah_set_make_grd(rdma_attr, false);
+}
+
static int modify_qp(struct ib_uverbs_file *file,
struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata)
{
@@ -1935,7 +1976,8 @@ static int modify_qp(struct ib_uverbs_file *file,
goto out;
}
- if (!rdma_is_port_valid(qp->device, cmd->base.port_num)) {
+ if ((cmd->base.attr_mask & IB_QP_PORT) &&
+ !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
ret = -EINVAL;
goto release_qp;
}
@@ -1963,70 +2005,21 @@ static int modify_qp(struct ib_uverbs_file *file,
attr->alt_timeout = cmd->base.alt_timeout;
attr->rate_limit = cmd->rate_limit;
- attr->ah_attr.type = rdma_ah_find_type(qp->device,
- cmd->base.dest.port_num);
- if (cmd->base.dest.is_global) {
- rdma_ah_set_grh(&attr->ah_attr, NULL,
- cmd->base.dest.flow_label,
- cmd->base.dest.sgid_index,
- cmd->base.dest.hop_limit,
- cmd->base.dest.traffic_class);
- rdma_ah_set_dgid_raw(&attr->ah_attr, cmd->base.dest.dgid);
- } else {
- rdma_ah_set_ah_flags(&attr->ah_attr, 0);
- }
- rdma_ah_set_dlid(&attr->ah_attr, cmd->base.dest.dlid);
- rdma_ah_set_sl(&attr->ah_attr, cmd->base.dest.sl);
- rdma_ah_set_path_bits(&attr->ah_attr, cmd->base.dest.src_path_bits);
- rdma_ah_set_static_rate(&attr->ah_attr, cmd->base.dest.static_rate);
- rdma_ah_set_port_num(&attr->ah_attr,
- cmd->base.dest.port_num);
-
- attr->alt_ah_attr.type = rdma_ah_find_type(qp->device,
- cmd->base.dest.port_num);
- if (cmd->base.alt_dest.is_global) {
- rdma_ah_set_grh(&attr->alt_ah_attr, NULL,
- cmd->base.alt_dest.flow_label,
- cmd->base.alt_dest.sgid_index,
- cmd->base.alt_dest.hop_limit,
- cmd->base.alt_dest.traffic_class);
- rdma_ah_set_dgid_raw(&attr->alt_ah_attr,
- cmd->base.alt_dest.dgid);
- } else {
- rdma_ah_set_ah_flags(&attr->alt_ah_attr, 0);
- }
+ if (cmd->base.attr_mask & IB_QP_AV)
+ copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
+ &cmd->base.dest);
- rdma_ah_set_dlid(&attr->alt_ah_attr, cmd->base.alt_dest.dlid);
- rdma_ah_set_sl(&attr->alt_ah_attr, cmd->base.alt_dest.sl);
- rdma_ah_set_path_bits(&attr->alt_ah_attr,
- cmd->base.alt_dest.src_path_bits);
- rdma_ah_set_static_rate(&attr->alt_ah_attr,
- cmd->base.alt_dest.static_rate);
- rdma_ah_set_port_num(&attr->alt_ah_attr,
- cmd->base.alt_dest.port_num);
+ if (cmd->base.attr_mask & IB_QP_ALT_PATH)
+ copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
+ &cmd->base.alt_dest);
- if (qp->real_qp == qp) {
- if (cmd->base.attr_mask & IB_QP_AV) {
- ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
- if (ret)
- goto release_qp;
- }
- ret = ib_security_modify_qp(qp,
- attr,
- modify_qp_mask(qp->qp_type,
- cmd->base.attr_mask),
- udata);
- } else {
- ret = ib_security_modify_qp(qp,
- attr,
- modify_qp_mask(qp->qp_type,
- cmd->base.attr_mask),
- NULL);
- }
+ ret = ib_modify_qp_with_udata(qp, attr,
+ modify_qp_mask(qp->qp_type,
+ cmd->base.attr_mask),
+ udata);
release_qp:
uobj_put_obj_read(qp);
-
out:
kfree(attr);
@@ -2050,7 +2043,8 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
return -EOPNOTSUPP;
INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL,
- in_len - sizeof(cmd.base), out_len);
+ in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len);
ret = modify_qp(file, &cmd, &udata);
if (ret)
@@ -2103,7 +2097,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
struct ib_uverbs_destroy_qp cmd;
struct ib_uverbs_destroy_qp_resp resp;
struct ib_uobject *uobj;
- struct ib_qp *qp;
struct ib_uqp_object *obj;
int ret = -EINVAL;
@@ -2117,7 +2110,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- qp = uobj->object;
obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
/*
* Make sure we don't free the memory in remove_commit as we still
@@ -2558,7 +2550,8 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
INIT_UDATA(&udata, buf + sizeof(cmd),
(unsigned long)cmd.response + sizeof(resp),
- in_len - sizeof(cmd), out_len - sizeof(resp));
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof(resp));
uobj = uobj_alloc(uobj_get_type(ah), file->ucontext);
if (IS_ERR(uobj))
@@ -2571,6 +2564,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
}
attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
+ rdma_ah_set_make_grd(&attr, false);
rdma_ah_set_dlid(&attr, cmd.attr.dlid);
rdma_ah_set_sl(&attr, cmd.attr.sl);
rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
@@ -3019,7 +3013,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
{
struct ib_uverbs_ex_destroy_wq cmd = {};
struct ib_uverbs_ex_destroy_wq_resp resp = {};
- struct ib_wq *wq;
struct ib_uobject *uobj;
struct ib_uwq_object *obj;
size_t required_cmd_sz;
@@ -3053,7 +3046,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- wq = uobj->object;
obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
/*
* Make sure we don't free the memory in remove_commit as we still
@@ -3489,6 +3481,9 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
if (IS_ERR(obj))
return PTR_ERR(obj);
+ if (cmd->srq_type == IB_SRQT_TM)
+ attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;
+
if (cmd->srq_type == IB_SRQT_XRC) {
xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->xrcd_handle,
file->ucontext);
@@ -3505,10 +3500,12 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
atomic_inc(&obj->uxrcd->refcnt);
+ }
- attr.ext.xrc.cq = uobj_get_obj_read(cq, cmd->cq_handle,
- file->ucontext);
- if (!attr.ext.xrc.cq) {
+ if (ib_srq_has_cq(cmd->srq_type)) {
+ attr.ext.cq = uobj_get_obj_read(cq, cmd->cq_handle,
+ file->ucontext);
+ if (!attr.ext.cq) {
ret = -EINVAL;
goto err_put_xrcd;
}
@@ -3543,10 +3540,13 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
srq->event_handler = attr.event_handler;
srq->srq_context = attr.srq_context;
+ if (ib_srq_has_cq(cmd->srq_type)) {
+ srq->ext.cq = attr.ext.cq;
+ atomic_inc(&attr.ext.cq->usecnt);
+ }
+
if (cmd->srq_type == IB_SRQT_XRC) {
- srq->ext.xrc.cq = attr.ext.xrc.cq;
srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
- atomic_inc(&attr.ext.xrc.cq->usecnt);
atomic_inc(&attr.ext.xrc.xrcd->usecnt);
}
@@ -3569,10 +3569,12 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
goto err_copy;
}
- if (cmd->srq_type == IB_SRQT_XRC) {
+ if (cmd->srq_type == IB_SRQT_XRC)
uobj_put_read(xrcd_uobj);
- uobj_put_obj_read(attr.ext.xrc.cq);
- }
+
+ if (ib_srq_has_cq(cmd->srq_type))
+ uobj_put_obj_read(attr.ext.cq);
+
uobj_put_obj_read(pd);
uobj_alloc_commit(&obj->uevent.uobject);
@@ -3585,8 +3587,8 @@ err_put:
uobj_put_obj_read(pd);
err_put_cq:
- if (cmd->srq_type == IB_SRQT_XRC)
- uobj_put_obj_read(attr.ext.xrc.cq);
+ if (ib_srq_has_cq(cmd->srq_type))
+ uobj_put_obj_read(attr.ext.cq);
err_put_xrcd:
if (cmd->srq_type == IB_SRQT_XRC) {
@@ -3616,6 +3618,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ memset(&xcmd, 0, sizeof(xcmd));
xcmd.response = cmd.response;
xcmd.user_handle = cmd.user_handle;
xcmd.srq_type = IB_SRQT_BASIC;
@@ -3624,10 +3627,10 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
xcmd.max_sge = cmd.max_sge;
xcmd.srq_limit = cmd.srq_limit;
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
- out_len - sizeof resp);
+ INIT_UDATA(&udata, buf + sizeof(cmd),
+ (unsigned long) cmd.response + sizeof(resp),
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof(resp));
ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
if (ret)
@@ -3651,10 +3654,10 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- INIT_UDATA(&udata, buf + sizeof cmd,
- (unsigned long) cmd.response + sizeof resp,
- in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
- out_len - sizeof resp);
+ INIT_UDATA(&udata, buf + sizeof(cmd),
+ (unsigned long) cmd.response + sizeof(resp),
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof(resp));
ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
if (ret)
@@ -3743,10 +3746,8 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
struct ib_uverbs_destroy_srq cmd;
struct ib_uverbs_destroy_srq_resp resp;
struct ib_uobject *uobj;
- struct ib_srq *srq;
struct ib_uevent_object *obj;
int ret = -EINVAL;
- enum ib_srq_type srq_type;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
@@ -3756,9 +3757,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
if (IS_ERR(uobj))
return PTR_ERR(uobj);
- srq = uobj->object;
obj = container_of(uobj, struct ib_uevent_object, uobject);
- srq_type = srq->srq_type;
/*
* Make sure we don't free the memory in remove_commit as we still
* needs the uobject memory to create the response.
@@ -3869,6 +3868,16 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
resp.raw_packet_caps = attr.raw_packet_caps;
resp.response_length += sizeof(resp.raw_packet_caps);
+
+ if (ucore->outlen < resp.response_length + sizeof(resp.xrq_caps))
+ goto end;
+
+ resp.xrq_caps.max_rndv_hdr_size = attr.xrq_caps.max_rndv_hdr_size;
+ resp.xrq_caps.max_num_tags = attr.xrq_caps.max_num_tags;
+ resp.xrq_caps.max_ops = attr.xrq_caps.max_ops;
+ resp.xrq_caps.max_sge = attr.xrq_caps.max_sge;
+ resp.xrq_caps.flags = attr.xrq_caps.flags;
+ resp.response_length += sizeof(resp.xrq_caps);
end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
return err;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
new file mode 100644
index 000000000000..5286ad57d903
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/rdma_user_ioctl.h>
+#include <rdma/uverbs_ioctl.h>
+#include "rdma_core.h"
+#include "uverbs.h"
+
+static int uverbs_process_attr(struct ib_device *ibdev,
+ struct ib_ucontext *ucontext,
+ const struct ib_uverbs_attr *uattr,
+ u16 attr_id,
+ const struct uverbs_attr_spec_hash *attr_spec_bucket,
+ struct uverbs_attr_bundle_hash *attr_bundle_h,
+ struct ib_uverbs_attr __user *uattr_ptr)
+{
+ const struct uverbs_attr_spec *spec;
+ struct uverbs_attr *e;
+ const struct uverbs_object_spec *object;
+ struct uverbs_obj_attr *o_attr;
+ struct uverbs_attr *elements = attr_bundle_h->attrs;
+
+ if (uattr->reserved)
+ return -EINVAL;
+
+ if (attr_id >= attr_spec_bucket->num_attrs) {
+ if (uattr->flags & UVERBS_ATTR_F_MANDATORY)
+ return -EINVAL;
+ else
+ return 0;
+ }
+
+ spec = &attr_spec_bucket->attrs[attr_id];
+ e = &elements[attr_id];
+ e->uattr = uattr_ptr;
+
+ switch (spec->type) {
+ case UVERBS_ATTR_TYPE_PTR_IN:
+ case UVERBS_ATTR_TYPE_PTR_OUT:
+ if (uattr->len < spec->len ||
+ (!(spec->flags & UVERBS_ATTR_SPEC_F_MIN_SZ) &&
+ uattr->len > spec->len))
+ return -EINVAL;
+
+ e->ptr_attr.data = uattr->data;
+ e->ptr_attr.len = uattr->len;
+ e->ptr_attr.flags = uattr->flags;
+ break;
+
+ case UVERBS_ATTR_TYPE_IDR:
+ if (uattr->data >> 32)
+ return -EINVAL;
+ /* fall through */
+ case UVERBS_ATTR_TYPE_FD:
+ if (uattr->len != 0 || !ucontext || uattr->data > INT_MAX)
+ return -EINVAL;
+
+ o_attr = &e->obj_attr;
+ object = uverbs_get_object(ibdev, spec->obj.obj_type);
+ if (!object)
+ return -EINVAL;
+ o_attr->type = object->type_attrs;
+
+ o_attr->id = (int)uattr->data;
+ o_attr->uobject = uverbs_get_uobject_from_context(
+ o_attr->type,
+ ucontext,
+ spec->obj.access,
+ o_attr->id);
+
+ if (IS_ERR(o_attr->uobject))
+ return PTR_ERR(o_attr->uobject);
+
+ if (spec->obj.access == UVERBS_ACCESS_NEW) {
+ u64 id = o_attr->uobject->id;
+
+ /* Copy the allocated id to the user-space */
+ if (put_user(id, &e->uattr->data)) {
+ uverbs_finalize_object(o_attr->uobject,
+ UVERBS_ACCESS_NEW,
+ false);
+ return -EFAULT;
+ }
+ }
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ set_bit(attr_id, attr_bundle_h->valid_bitmap);
+ return 0;
+}
+
+static int uverbs_uattrs_process(struct ib_device *ibdev,
+ struct ib_ucontext *ucontext,
+ const struct ib_uverbs_attr *uattrs,
+ size_t num_uattrs,
+ const struct uverbs_method_spec *method,
+ struct uverbs_attr_bundle *attr_bundle,
+ struct ib_uverbs_attr __user *uattr_ptr)
+{
+ size_t i;
+ int ret = 0;
+ int num_given_buckets = 0;
+
+ for (i = 0; i < num_uattrs; i++) {
+ const struct ib_uverbs_attr *uattr = &uattrs[i];
+ u16 attr_id = uattr->attr_id;
+ struct uverbs_attr_spec_hash *attr_spec_bucket;
+
+ ret = uverbs_ns_idx(&attr_id, method->num_buckets);
+ if (ret < 0) {
+ if (uattr->flags & UVERBS_ATTR_F_MANDATORY) {
+ uverbs_finalize_objects(attr_bundle,
+ method->attr_buckets,
+ num_given_buckets,
+ false);
+ return ret;
+ }
+ continue;
+ }
+
+ /*
+ * ret is the found ns, so increase num_given_buckets if
+ * necessary.
+ */
+ if (ret >= num_given_buckets)
+ num_given_buckets = ret + 1;
+
+ attr_spec_bucket = method->attr_buckets[ret];
+ ret = uverbs_process_attr(ibdev, ucontext, uattr, attr_id,
+ attr_spec_bucket, &attr_bundle->hash[ret],
+ uattr_ptr++);
+ if (ret) {
+ uverbs_finalize_objects(attr_bundle,
+ method->attr_buckets,
+ num_given_buckets,
+ false);
+ return ret;
+ }
+ }
+
+ return num_given_buckets;
+}
+
+static int uverbs_validate_kernel_mandatory(const struct uverbs_method_spec *method_spec,
+ struct uverbs_attr_bundle *attr_bundle)
+{
+ unsigned int i;
+
+ for (i = 0; i < attr_bundle->num_buckets; i++) {
+ struct uverbs_attr_spec_hash *attr_spec_bucket =
+ method_spec->attr_buckets[i];
+
+ if (!bitmap_subset(attr_spec_bucket->mandatory_attrs_bitmask,
+ attr_bundle->hash[i].valid_bitmap,
+ attr_spec_bucket->num_attrs))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int uverbs_handle_method(struct ib_uverbs_attr __user *uattr_ptr,
+ const struct ib_uverbs_attr *uattrs,
+ size_t num_uattrs,
+ struct ib_device *ibdev,
+ struct ib_uverbs_file *ufile,
+ const struct uverbs_method_spec *method_spec,
+ struct uverbs_attr_bundle *attr_bundle)
+{
+ int ret;
+ int finalize_ret;
+ int num_given_buckets;
+
+ num_given_buckets = uverbs_uattrs_process(ibdev, ufile->ucontext, uattrs,
+ num_uattrs, method_spec,
+ attr_bundle, uattr_ptr);
+ if (num_given_buckets <= 0)
+ return -EINVAL;
+
+ attr_bundle->num_buckets = num_given_buckets;
+ ret = uverbs_validate_kernel_mandatory(method_spec, attr_bundle);
+ if (ret)
+ goto cleanup;
+
+ ret = method_spec->handler(ibdev, ufile, attr_bundle);
+cleanup:
+ finalize_ret = uverbs_finalize_objects(attr_bundle,
+ method_spec->attr_buckets,
+ attr_bundle->num_buckets,
+ !ret);
+
+ return ret ? ret : finalize_ret;
+}
+
+#define UVERBS_OPTIMIZE_USING_STACK_SZ 256
+static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
+ struct ib_uverbs_file *file,
+ struct ib_uverbs_ioctl_hdr *hdr,
+ void __user *buf)
+{
+ const struct uverbs_object_spec *object_spec;
+ const struct uverbs_method_spec *method_spec;
+ long err = 0;
+ unsigned int i;
+ struct {
+ struct ib_uverbs_attr *uattrs;
+ struct uverbs_attr_bundle *uverbs_attr_bundle;
+ } *ctx = NULL;
+ struct uverbs_attr *curr_attr;
+ unsigned long *curr_bitmap;
+ size_t ctx_size;
+#ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
+ uintptr_t data[UVERBS_OPTIMIZE_USING_STACK_SZ / sizeof(uintptr_t)];
+#endif
+
+ if (hdr->reserved)
+ return -EINVAL;
+
+ object_spec = uverbs_get_object(ib_dev, hdr->object_id);
+ if (!object_spec)
+ return -EOPNOTSUPP;
+
+ method_spec = uverbs_get_method(object_spec, hdr->method_id);
+ if (!method_spec)
+ return -EOPNOTSUPP;
+
+ if ((method_spec->flags & UVERBS_ACTION_FLAG_CREATE_ROOT) ^ !file->ucontext)
+ return -EINVAL;
+
+ ctx_size = sizeof(*ctx) +
+ sizeof(struct uverbs_attr_bundle) +
+ sizeof(struct uverbs_attr_bundle_hash) * method_spec->num_buckets +
+ sizeof(*ctx->uattrs) * hdr->num_attrs +
+ sizeof(*ctx->uverbs_attr_bundle->hash[0].attrs) *
+ method_spec->num_child_attrs +
+ sizeof(*ctx->uverbs_attr_bundle->hash[0].valid_bitmap) *
+ (method_spec->num_child_attrs / BITS_PER_LONG +
+ method_spec->num_buckets);
+
+#ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
+ if (ctx_size <= UVERBS_OPTIMIZE_USING_STACK_SZ)
+ ctx = (void *)data;
+
+ if (!ctx)
+#endif
+ ctx = kmalloc(ctx_size, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->uverbs_attr_bundle = (void *)ctx + sizeof(*ctx);
+ ctx->uattrs = (void *)(ctx->uverbs_attr_bundle + 1) +
+ (sizeof(ctx->uverbs_attr_bundle->hash[0]) *
+ method_spec->num_buckets);
+ curr_attr = (void *)(ctx->uattrs + hdr->num_attrs);
+ curr_bitmap = (void *)(curr_attr + method_spec->num_child_attrs);
+
+ /*
+ * We just fill the pointers and num_attrs here. The data itself will be
+ * filled at a later stage (uverbs_process_attr)
+ */
+ for (i = 0; i < method_spec->num_buckets; i++) {
+ unsigned int curr_num_attrs = method_spec->attr_buckets[i]->num_attrs;
+
+ ctx->uverbs_attr_bundle->hash[i].attrs = curr_attr;
+ curr_attr += curr_num_attrs;
+ ctx->uverbs_attr_bundle->hash[i].num_attrs = curr_num_attrs;
+ ctx->uverbs_attr_bundle->hash[i].valid_bitmap = curr_bitmap;
+ bitmap_zero(curr_bitmap, curr_num_attrs);
+ curr_bitmap += BITS_TO_LONGS(curr_num_attrs);
+ }
+
+ err = copy_from_user(ctx->uattrs, buf,
+ sizeof(*ctx->uattrs) * hdr->num_attrs);
+ if (err) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ err = uverbs_handle_method(buf, ctx->uattrs, hdr->num_attrs, ib_dev,
+ file, method_spec, ctx->uverbs_attr_bundle);
+out:
+#ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
+ if (ctx_size > UVERBS_OPTIMIZE_USING_STACK_SZ)
+#endif
+ kfree(ctx);
+ return err;
+}
+
+#define IB_UVERBS_MAX_CMD_SZ 4096
+
+long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct ib_uverbs_file *file = filp->private_data;
+ struct ib_uverbs_ioctl_hdr __user *user_hdr =
+ (struct ib_uverbs_ioctl_hdr __user *)arg;
+ struct ib_uverbs_ioctl_hdr hdr;
+ struct ib_device *ib_dev;
+ int srcu_key;
+ long err;
+
+ srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
+ ib_dev = srcu_dereference(file->device->ib_dev,
+ &file->device->disassociate_srcu);
+ if (!ib_dev) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (cmd == RDMA_VERBS_IOCTL) {
+ err = copy_from_user(&hdr, user_hdr, sizeof(hdr));
+
+ if (err || hdr.length > IB_UVERBS_MAX_CMD_SZ ||
+ hdr.length != sizeof(hdr) + hdr.num_attrs * sizeof(struct ib_uverbs_attr)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (hdr.reserved) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = ib_uverbs_cmd_verbs(ib_dev, file, &hdr,
+ (__user void *)arg + sizeof(hdr));
+ } else {
+ err = -ENOIOCTLCMD;
+ }
+out:
+ srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
+
+ return err;
+}
diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c
new file mode 100644
index 000000000000..76ddb6564578
--- /dev/null
+++ b/drivers/infiniband/core/uverbs_ioctl_merge.c
@@ -0,0 +1,665 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/rdma_user_ioctl.h>
+#include <linux/bitops.h>
+#include "uverbs.h"
+
+#define UVERBS_NUM_NS (UVERBS_ID_NS_MASK >> UVERBS_ID_NS_SHIFT)
+#define GET_NS_ID(idx) (((idx) & UVERBS_ID_NS_MASK) >> UVERBS_ID_NS_SHIFT)
+#define GET_ID(idx) ((idx) & ~UVERBS_ID_NS_MASK)
+
+#define _for_each_element(elem, tmpi, tmpj, hashes, num_buckets_offset, \
+ buckets_offset) \
+ for (tmpj = 0, \
+ elem = (*(const void ***)((hashes)[tmpi] + \
+ (buckets_offset)))[0]; \
+ tmpj < *(size_t *)((hashes)[tmpi] + (num_buckets_offset)); \
+ tmpj++) \
+ if ((elem = ((*(const void ***)(hashes[tmpi] + \
+ (buckets_offset)))[tmpj])))
+
+/*
+ * Iterate all elements of a few @hashes. The number of given hashes is
+ * indicated by @num_hashes. The offset of the number of buckets in the hash is
+ * represented by @num_buckets_offset, while the offset of the buckets array in
+ * the hash structure is represented by @buckets_offset. tmpi and tmpj are two
+ * short (or int) based indices that are given by the user. tmpi iterates over
+ * the different hashes. @elem points the current element in the hashes[tmpi]
+ * bucket we are looping on. To be honest, @hashes representation isn't exactly
+ * a hash, but more a collection of elements. These elements' ids are treated
+ * in a hash like manner, where the first upper bits are the bucket number.
+ * These elements are later mapped into a perfect-hash.
+ */
+#define for_each_element(elem, tmpi, tmpj, hashes, num_hashes, \
+ num_buckets_offset, buckets_offset) \
+ for (tmpi = 0; tmpi < (num_hashes); tmpi++) \
+ _for_each_element(elem, tmpi, tmpj, hashes, num_buckets_offset,\
+ buckets_offset)
+
+#define get_elements_iterators_entry_above(iters, num_elements, elements, \
+ num_objects_fld, objects_fld, bucket,\
+ min_id) \
+ get_elements_above_id((const void **)iters, num_elements, \
+ (const void **)(elements), \
+ offsetof(typeof(**elements), \
+ num_objects_fld), \
+ offsetof(typeof(**elements), objects_fld),\
+ offsetof(typeof(***(*elements)->objects_fld), id),\
+ bucket, min_id)
+
+#define get_objects_above_id(iters, num_trees, trees, bucket, min_id) \
+ get_elements_iterators_entry_above(iters, num_trees, trees, \
+ num_objects, objects, bucket, min_id)
+
+#define get_methods_above_id(method_iters, num_iters, iters, bucket, min_id)\
+ get_elements_iterators_entry_above(method_iters, num_iters, iters, \
+ num_methods, methods, bucket, min_id)
+
+#define get_attrs_above_id(attrs_iters, num_iters, iters, bucket, min_id)\
+ get_elements_iterators_entry_above(attrs_iters, num_iters, iters, \
+ num_attrs, attrs, bucket, min_id)
+
+/*
+ * get_elements_above_id get a few hashes represented by @elements and
+ * @num_elements. The hashes fields are described by @num_offset, @data_offset
+ * and @id_offset in the same way as required by for_each_element. The function
+ * returns an array of @iters, represents an array of elements in the hashes
+ * buckets, which their ids are the smallest ids in all hashes but are all
+ * larger than the id given by min_id. Elements are only added to the iters
+ * array if their id belongs to the bucket @bucket. The number of elements in
+ * the returned array is returned by the function. @min_id is also updated to
+ * reflect the new min_id of all elements in iters.
+ */
+static size_t get_elements_above_id(const void **iters,
+ unsigned int num_elements,
+ const void **elements,
+ size_t num_offset,
+ size_t data_offset,
+ size_t id_offset,
+ u16 bucket,
+ short *min_id)
+{
+ size_t num_iters = 0;
+ short min = SHRT_MAX;
+ const void *elem;
+ int i, j, last_stored = -1;
+
+ for_each_element(elem, i, j, elements, num_elements, num_offset,
+ data_offset) {
+ u16 id = *(u16 *)(elem + id_offset);
+
+ if (GET_NS_ID(id) != bucket)
+ continue;
+
+ if (GET_ID(id) < *min_id ||
+ (min != SHRT_MAX && GET_ID(id) > min))
+ continue;
+
+ /*
+ * We first iterate all hashes represented by @elements. When
+ * we do, we try to find an element @elem in the bucket @bucket
+ * which its id is min. Since we can't ensure the user sorted
+ * the elements in increasing order, we override this hash's
+ * minimal id element we found, if a new element with a smaller
+ * id was just found.
+ */
+ iters[last_stored == i ? num_iters - 1 : num_iters++] = elem;
+ last_stored = i;
+ min = GET_ID(id);
+ }
+
+ /*
+ * We only insert to our iters array an element, if its id is smaller
+ * than all previous ids. Therefore, the final iters array is sorted so
+ * that smaller ids are in the end of the array.
+ * Therefore, we need to clean the beginning of the array to make sure
+ * all ids of final elements are equal to min.
+ */
+ for (i = num_iters - 1; i >= 0 &&
+ GET_ID(*(u16 *)(iters[i] + id_offset)) == min; i--)
+ ;
+
+ num_iters -= i + 1;
+ memmove(iters, iters + i + 1, sizeof(*iters) * num_iters);
+
+ *min_id = min;
+ return num_iters;
+}
+
+#define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
+ objects_fld, bucket) \
+ find_max_element_id(num_elements, (const void **)(elements), \
+ offsetof(typeof(**elements), num_objects_fld), \
+ offsetof(typeof(**elements), objects_fld), \
+ offsetof(typeof(***(*elements)->objects_fld), id),\
+ bucket)
+
+static short find_max_element_ns_id(unsigned int num_elements,
+ const void **elements,
+ size_t num_offset,
+ size_t data_offset,
+ size_t id_offset)
+{
+ short max_ns = SHRT_MIN;
+ const void *elem;
+ int i, j;
+
+ for_each_element(elem, i, j, elements, num_elements, num_offset,
+ data_offset) {
+ u16 id = *(u16 *)(elem + id_offset);
+
+ if (GET_NS_ID(id) > max_ns)
+ max_ns = GET_NS_ID(id);
+ }
+
+ return max_ns;
+}
+
+static short find_max_element_id(unsigned int num_elements,
+ const void **elements,
+ size_t num_offset,
+ size_t data_offset,
+ size_t id_offset,
+ u16 bucket)
+{
+ short max_id = SHRT_MIN;
+ const void *elem;
+ int i, j;
+
+ for_each_element(elem, i, j, elements, num_elements, num_offset,
+ data_offset) {
+ u16 id = *(u16 *)(elem + id_offset);
+
+ if (GET_NS_ID(id) == bucket &&
+ GET_ID(id) > max_id)
+ max_id = GET_ID(id);
+ }
+ return max_id;
+}
+
+#define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
+ objects_fld, bucket) \
+ find_max_element_id(num_elements, (const void **)(elements), \
+ offsetof(typeof(**elements), num_objects_fld), \
+ offsetof(typeof(**elements), objects_fld), \
+ offsetof(typeof(***(*elements)->objects_fld), id),\
+ bucket)
+
+#define find_max_element_ns_entry_id(num_elements, elements, \
+ num_objects_fld, objects_fld) \
+ find_max_element_ns_id(num_elements, (const void **)(elements), \
+ offsetof(typeof(**elements), num_objects_fld),\
+ offsetof(typeof(**elements), objects_fld), \
+ offsetof(typeof(***(*elements)->objects_fld), id))
+
+/*
+ * find_max_xxxx_ns_id gets a few elements. Each element is described by an id
+ * which its upper bits represents a namespace. It finds the max namespace. This
+ * could be used in order to know how many buckets do we need to allocate. If no
+ * elements exist, SHRT_MIN is returned. Namespace represents here different
+ * buckets. The common example is "common bucket" and "driver bucket".
+ *
+ * find_max_xxxx_id gets a few elements and a bucket. Each element is described
+ * by an id which its upper bits represent a namespace. It returns the max id
+ * which is contained in the same namespace defined in @bucket. This could be
+ * used in order to know how many elements do we need to allocate in the bucket.
+ * If no elements exist, SHRT_MIN is returned.
+ */
+
+#define find_max_object_id(num_trees, trees, bucket) \
+ find_max_element_entry_id(num_trees, trees, num_objects,\
+ objects, bucket)
+#define find_max_object_ns_id(num_trees, trees) \
+ find_max_element_ns_entry_id(num_trees, trees, \
+ num_objects, objects)
+
+#define find_max_method_id(num_iters, iters, bucket) \
+ find_max_element_entry_id(num_iters, iters, num_methods,\
+ methods, bucket)
+#define find_max_method_ns_id(num_iters, iters) \
+ find_max_element_ns_entry_id(num_iters, iters, \
+ num_methods, methods)
+
+#define find_max_attr_id(num_iters, iters, bucket) \
+ find_max_element_entry_id(num_iters, iters, num_attrs, \
+ attrs, bucket)
+#define find_max_attr_ns_id(num_iters, iters) \
+ find_max_element_ns_entry_id(num_iters, iters, \
+ num_attrs, attrs)
+
+static void free_method(struct uverbs_method_spec *method)
+{
+ unsigned int i;
+
+ if (!method)
+ return;
+
+ for (i = 0; i < method->num_buckets; i++)
+ kfree(method->attr_buckets[i]);
+
+ kfree(method);
+}
+
+#define IS_ATTR_OBJECT(attr) ((attr)->type == UVERBS_ATTR_TYPE_IDR || \
+ (attr)->type == UVERBS_ATTR_TYPE_FD)
+
+/*
+ * This function gets array of size @num_method_defs which contains pointers to
+ * method definitions @method_defs. The function allocates an
+ * uverbs_method_spec structure and initializes its number of buckets and the
+ * elements in buckets to the correct attributes. While doing that, it
+ * validates that there aren't conflicts between attributes of different
+ * method_defs.
+ */
+static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_method_def **method_defs,
+ size_t num_method_defs)
+{
+ int bucket_idx;
+ int max_attr_buckets = 0;
+ size_t num_attr_buckets = 0;
+ int res = 0;
+ struct uverbs_method_spec *method = NULL;
+ const struct uverbs_attr_def **attr_defs;
+ unsigned int num_of_singularities = 0;
+
+ max_attr_buckets = find_max_attr_ns_id(num_method_defs, method_defs);
+ if (max_attr_buckets >= 0)
+ num_attr_buckets = max_attr_buckets + 1;
+
+ method = kzalloc(sizeof(*method) +
+ num_attr_buckets * sizeof(*method->attr_buckets),
+ GFP_KERNEL);
+ if (!method)
+ return ERR_PTR(-ENOMEM);
+
+ method->num_buckets = num_attr_buckets;
+ attr_defs = kcalloc(num_method_defs, sizeof(*attr_defs), GFP_KERNEL);
+ if (!attr_defs) {
+ res = -ENOMEM;
+ goto free_method;
+ }
+ for (bucket_idx = 0; bucket_idx < method->num_buckets; bucket_idx++) {
+ short min_id = SHRT_MIN;
+ int attr_max_bucket = 0;
+ struct uverbs_attr_spec_hash *hash = NULL;
+
+ attr_max_bucket = find_max_attr_id(num_method_defs, method_defs,
+ bucket_idx);
+ if (attr_max_bucket < 0)
+ continue;
+
+ hash = kzalloc(sizeof(*hash) +
+ ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1),
+ sizeof(long)) +
+ BITS_TO_LONGS(attr_max_bucket) * sizeof(long),
+ GFP_KERNEL);
+ if (!hash) {
+ res = -ENOMEM;
+ goto free;
+ }
+ hash->num_attrs = attr_max_bucket + 1;
+ method->num_child_attrs += hash->num_attrs;
+ hash->mandatory_attrs_bitmask = (void *)(hash + 1) +
+ ALIGN(sizeof(*hash->attrs) *
+ (attr_max_bucket + 1),
+ sizeof(long));
+
+ method->attr_buckets[bucket_idx] = hash;
+
+ do {
+ size_t num_attr_defs;
+ struct uverbs_attr_spec *attr;
+ bool attr_obj_with_special_access;
+
+ num_attr_defs =
+ get_attrs_above_id(attr_defs,
+ num_method_defs,
+ method_defs,
+ bucket_idx,
+ &min_id);
+ /* Last attr in bucket */
+ if (!num_attr_defs)
+ break;
+
+ if (num_attr_defs > 1) {
+ /*
+ * We don't allow two attribute definitions for
+ * the same attribute. This is usually a
+ * programmer error. If required, it's better to
+ * just add a new attribute to capture the new
+ * semantics.
+ */
+ res = -EEXIST;
+ goto free;
+ }
+
+ attr = &hash->attrs[min_id];
+ memcpy(attr, &attr_defs[0]->attr, sizeof(*attr));
+
+ attr_obj_with_special_access = IS_ATTR_OBJECT(attr) &&
+ (attr->obj.access == UVERBS_ACCESS_NEW ||
+ attr->obj.access == UVERBS_ACCESS_DESTROY);
+ num_of_singularities += !!attr_obj_with_special_access;
+ if (WARN(num_of_singularities > 1,
+ "ib_uverbs: Method contains more than one object attr (%d) with new/destroy access\n",
+ min_id) ||
+ WARN(attr_obj_with_special_access &&
+ !(attr->flags & UVERBS_ATTR_SPEC_F_MANDATORY),
+ "ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy aceess but isn't mandatory\n",
+ min_id) ||
+ WARN(IS_ATTR_OBJECT(attr) &&
+ attr->flags & UVERBS_ATTR_SPEC_F_MIN_SZ,
+ "ib_uverbs: Tried to merge attr (%d) but it's an object with min_sz flag\n",
+ min_id)) {
+ res = -EINVAL;
+ goto free;
+ }
+
+ if (attr->flags & UVERBS_ATTR_SPEC_F_MANDATORY)
+ set_bit(min_id, hash->mandatory_attrs_bitmask);
+ min_id++;
+
+ } while (1);
+ }
+ kfree(attr_defs);
+ return method;
+
+free:
+ kfree(attr_defs);
+free_method:
+ free_method(method);
+ return ERR_PTR(res);
+}
+
+static void free_object(struct uverbs_object_spec *object)
+{
+ unsigned int i, j;
+
+ if (!object)
+ return;
+
+ for (i = 0; i < object->num_buckets; i++) {
+ struct uverbs_method_spec_hash *method_buckets =
+ object->method_buckets[i];
+
+ if (!method_buckets)
+ continue;
+
+ for (j = 0; j < method_buckets->num_methods; j++)
+ free_method(method_buckets->methods[j]);
+
+ kfree(method_buckets);
+ }
+
+ kfree(object);
+}
+
+/*
+ * This function gets array of size @num_object_defs which contains pointers to
+ * object definitions @object_defs. The function allocated an
+ * uverbs_object_spec structure and initialize its number of buckets and the
+ * elements in buckets to the correct methods. While doing that, it
+ * sorts out the correct relationship between conflicts in the same method.
+ */
+static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_object_def **object_defs,
+ size_t num_object_defs)
+{
+ u16 bucket_idx;
+ int max_method_buckets = 0;
+ u16 num_method_buckets = 0;
+ int res = 0;
+ struct uverbs_object_spec *object = NULL;
+ const struct uverbs_method_def **method_defs;
+
+ max_method_buckets = find_max_method_ns_id(num_object_defs, object_defs);
+ if (max_method_buckets >= 0)
+ num_method_buckets = max_method_buckets + 1;
+
+ object = kzalloc(sizeof(*object) +
+ num_method_buckets *
+ sizeof(*object->method_buckets), GFP_KERNEL);
+ if (!object)
+ return ERR_PTR(-ENOMEM);
+
+ object->num_buckets = num_method_buckets;
+ method_defs = kcalloc(num_object_defs, sizeof(*method_defs), GFP_KERNEL);
+ if (!method_defs) {
+ res = -ENOMEM;
+ goto free_object;
+ }
+
+ for (bucket_idx = 0; bucket_idx < object->num_buckets; bucket_idx++) {
+ short min_id = SHRT_MIN;
+ int methods_max_bucket = 0;
+ struct uverbs_method_spec_hash *hash = NULL;
+
+ methods_max_bucket = find_max_method_id(num_object_defs, object_defs,
+ bucket_idx);
+ if (methods_max_bucket < 0)
+ continue;
+
+ hash = kzalloc(sizeof(*hash) +
+ sizeof(*hash->methods) * (methods_max_bucket + 1),
+ GFP_KERNEL);
+ if (!hash) {
+ res = -ENOMEM;
+ goto free;
+ }
+
+ hash->num_methods = methods_max_bucket + 1;
+ object->method_buckets[bucket_idx] = hash;
+
+ do {
+ size_t num_method_defs;
+ struct uverbs_method_spec *method;
+ int i;
+
+ num_method_defs =
+ get_methods_above_id(method_defs,
+ num_object_defs,
+ object_defs,
+ bucket_idx,
+ &min_id);
+ /* Last method in bucket */
+ if (!num_method_defs)
+ break;
+
+ method = build_method_with_attrs(method_defs,
+ num_method_defs);
+ if (IS_ERR(method)) {
+ res = PTR_ERR(method);
+ goto free;
+ }
+
+ /*
+ * The last tree which is given as an argument to the
+ * merge overrides previous method handler.
+ * Therefore, we iterate backwards and search for the
+ * first handler which != NULL. This also defines the
+ * set of flags used for this handler.
+ */
+ for (i = num_object_defs - 1;
+ i >= 0 && !method_defs[i]->handler; i--)
+ ;
+ hash->methods[min_id++] = method;
+ /* NULL handler isn't allowed */
+ if (WARN(i < 0,
+ "ib_uverbs: tried to merge function id %d, but all handlers are NULL\n",
+ min_id)) {
+ res = -EINVAL;
+ goto free;
+ }
+ method->handler = method_defs[i]->handler;
+ method->flags = method_defs[i]->flags;
+
+ } while (1);
+ }
+ kfree(method_defs);
+ return object;
+
+free:
+ kfree(method_defs);
+free_object:
+ free_object(object);
+ return ERR_PTR(res);
+}
+
+void uverbs_free_spec_tree(struct uverbs_root_spec *root)
+{
+ unsigned int i, j;
+
+ if (!root)
+ return;
+
+ for (i = 0; i < root->num_buckets; i++) {
+ struct uverbs_object_spec_hash *object_hash =
+ root->object_buckets[i];
+
+ if (!object_hash)
+ continue;
+
+ for (j = 0; j < object_hash->num_objects; j++)
+ free_object(object_hash->objects[j]);
+
+ kfree(object_hash);
+ }
+
+ kfree(root);
+}
+EXPORT_SYMBOL(uverbs_free_spec_tree);
+
+struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees,
+ const struct uverbs_object_tree_def **trees)
+{
+ u16 bucket_idx;
+ short max_object_buckets = 0;
+ size_t num_objects_buckets = 0;
+ struct uverbs_root_spec *root_spec = NULL;
+ const struct uverbs_object_def **object_defs;
+ int i;
+ int res = 0;
+
+ max_object_buckets = find_max_object_ns_id(num_trees, trees);
+ /*
+ * Devices which don't want to support ib_uverbs, should just allocate
+ * an empty parsing tree. Every user-space command won't hit any valid
+ * entry in the parsing tree and thus will fail.
+ */
+ if (max_object_buckets >= 0)
+ num_objects_buckets = max_object_buckets + 1;
+
+ root_spec = kzalloc(sizeof(*root_spec) +
+ num_objects_buckets * sizeof(*root_spec->object_buckets),
+ GFP_KERNEL);
+ if (!root_spec)
+ return ERR_PTR(-ENOMEM);
+ root_spec->num_buckets = num_objects_buckets;
+
+ object_defs = kcalloc(num_trees, sizeof(*object_defs),
+ GFP_KERNEL);
+ if (!object_defs) {
+ res = -ENOMEM;
+ goto free_root;
+ }
+
+ for (bucket_idx = 0; bucket_idx < root_spec->num_buckets; bucket_idx++) {
+ short min_id = SHRT_MIN;
+ short objects_max_bucket;
+ struct uverbs_object_spec_hash *hash = NULL;
+
+ objects_max_bucket = find_max_object_id(num_trees, trees,
+ bucket_idx);
+ if (objects_max_bucket < 0)
+ continue;
+
+ hash = kzalloc(sizeof(*hash) +
+ sizeof(*hash->objects) * (objects_max_bucket + 1),
+ GFP_KERNEL);
+ if (!hash) {
+ res = -ENOMEM;
+ goto free;
+ }
+ hash->num_objects = objects_max_bucket + 1;
+ root_spec->object_buckets[bucket_idx] = hash;
+
+ do {
+ size_t num_object_defs;
+ struct uverbs_object_spec *object;
+
+ num_object_defs = get_objects_above_id(object_defs,
+ num_trees,
+ trees,
+ bucket_idx,
+ &min_id);
+ /* Last object in bucket */
+ if (!num_object_defs)
+ break;
+
+ object = build_object_with_methods(object_defs,
+ num_object_defs);
+ if (IS_ERR(object)) {
+ res = PTR_ERR(object);
+ goto free;
+ }
+
+ /*
+ * The last tree which is given as an argument to the
+ * merge overrides previous object's type_attrs.
+ * Therefore, we iterate backwards and search for the
+ * first type_attrs which != NULL.
+ */
+ for (i = num_object_defs - 1;
+ i >= 0 && !object_defs[i]->type_attrs; i--)
+ ;
+ /*
+ * NULL is a valid type_attrs. It means an object we
+ * can't instantiate (like DEVICE).
+ */
+ object->type_attrs = i < 0 ? NULL :
+ object_defs[i]->type_attrs;
+
+ hash->objects[min_id++] = object;
+ } while (1);
+ }
+
+ kfree(object_defs);
+ return root_spec;
+
+free:
+ kfree(object_defs);
+free_root:
+ uverbs_free_spec_tree(root_spec);
+ return ERR_PTR(res);
+}
+EXPORT_SYMBOL(uverbs_alloc_spec_tree);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 3d2609608f58..dc2aed6fb21b 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -49,6 +49,7 @@
#include <linux/uaccess.h>
#include <rdma/ib.h>
+#include <rdma/uverbs_std_types.h>
#include "uverbs.h"
#include "core_priv.h"
@@ -250,6 +251,7 @@ void ib_uverbs_release_file(struct kref *ref)
if (atomic_dec_and_test(&file->device->refcount))
ib_uverbs_comp_dev(file->device);
+ kobject_put(&file->device->kobj);
kfree(file);
}
@@ -594,7 +596,6 @@ struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file
{
struct ib_uverbs_async_event_file *ev_file;
struct file *filp;
- int ret;
ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
if (!ev_file)
@@ -620,21 +621,11 @@ struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file
INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
ib_dev,
ib_uverbs_event_handler);
- ret = ib_register_event_handler(&uverbs_file->event_handler);
- if (ret)
- goto err_put_file;
-
+ ib_register_event_handler(&uverbs_file->event_handler);
/* At that point async file stuff was fully set */
return filp;
-err_put_file:
- fput(filp);
- kref_put(&uverbs_file->async_file->ref,
- ib_uverbs_release_async_event_file);
- uverbs_file->async_file = NULL;
- return ERR_PTR(ret);
-
err_put_refs:
kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
kref_put(&ev_file->ref, ib_uverbs_release_async_event_file);
@@ -917,7 +908,6 @@ err:
static int ib_uverbs_close(struct inode *inode, struct file *filp)
{
struct ib_uverbs_file *file = filp->private_data;
- struct ib_uverbs_device *dev = file->device;
mutex_lock(&file->cleanup_mutex);
if (file->ucontext) {
@@ -939,7 +929,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
ib_uverbs_release_async_event_file);
kref_put(&file->ref, ib_uverbs_release_file);
- kobject_put(&dev->kobj);
return 0;
}
@@ -950,6 +939,9 @@ static const struct file_operations uverbs_fops = {
.open = ib_uverbs_open,
.release = ib_uverbs_close,
.llseek = no_llseek,
+#if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
+ .unlocked_ioctl = ib_uverbs_ioctl,
+#endif
};
static const struct file_operations uverbs_mmap_fops = {
@@ -959,6 +951,9 @@ static const struct file_operations uverbs_mmap_fops = {
.open = ib_uverbs_open,
.release = ib_uverbs_close,
.llseek = no_llseek,
+#if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
+ .unlocked_ioctl = ib_uverbs_ioctl,
+#endif
};
static struct ib_client uverbs_client = {
@@ -1109,6 +1104,18 @@ static void ib_uverbs_add_one(struct ib_device *device)
if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
goto err_class;
+ if (!device->specs_root) {
+ const struct uverbs_object_tree_def *default_root[] = {
+ uverbs_default_get_objects()};
+
+ uverbs_dev->specs_root = uverbs_alloc_spec_tree(1,
+ default_root);
+ if (IS_ERR(uverbs_dev->specs_root))
+ goto err_class;
+
+ device->specs_root = uverbs_dev->specs_root;
+ }
+
ib_set_client_data(device, &uverbs_client, uverbs_dev);
return;
@@ -1154,7 +1161,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
kref_get(&file->ref);
mutex_unlock(&uverbs_dev->lists_mutex);
- ib_uverbs_event_handler(&file->event_handler, &event);
mutex_lock(&file->cleanup_mutex);
ucontext = file->ucontext;
@@ -1171,6 +1177,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
* for example due to freeing the resources
* (e.g mmput).
*/
+ ib_uverbs_event_handler(&file->event_handler, &event);
ib_dev->disassociate_ucontext(ucontext);
mutex_lock(&file->cleanup_mutex);
ib_uverbs_cleanup_ucontext(file, ucontext, true);
@@ -1240,6 +1247,11 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
ib_uverbs_comp_dev(uverbs_dev);
if (wait_clients)
wait_for_completion(&uverbs_dev->comp);
+ if (uverbs_dev->specs_root) {
+ uverbs_free_spec_tree(uverbs_dev->specs_root);
+ device->specs_root = NULL;
+ }
+
kobject_put(&uverbs_dev->kobj);
}
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index 94fd989c9060..bd0acf376af0 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -33,10 +33,47 @@
#include <linux/export.h>
#include <rdma/ib_marshall.h>
-void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
- struct rdma_ah_attr *src)
+#define OPA_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
+static int rdma_ah_conv_opa_to_ib(struct ib_device *dev,
+ struct rdma_ah_attr *ib,
+ struct rdma_ah_attr *opa)
{
+ struct ib_port_attr port_attr;
+ int ret = 0;
+
+ /* Do structure copy and the over-write fields */
+ *ib = *opa;
+
+ ib->type = RDMA_AH_ATTR_TYPE_IB;
+ rdma_ah_set_grh(ib, NULL, 0, 0, 1, 0);
+
+ if (ib_query_port(dev, opa->port_num, &port_attr)) {
+ /* Set to default subnet to indicate error */
+ rdma_ah_set_subnet_prefix(ib, OPA_DEFAULT_GID_PREFIX);
+ ret = -EINVAL;
+ } else {
+ rdma_ah_set_subnet_prefix(ib,
+ cpu_to_be64(port_attr.subnet_prefix));
+ }
+ rdma_ah_set_interface_id(ib, OPA_MAKE_ID(rdma_ah_get_dlid(opa)));
+ return ret;
+}
+
+void ib_copy_ah_attr_to_user(struct ib_device *device,
+ struct ib_uverbs_ah_attr *dst,
+ struct rdma_ah_attr *ah_attr)
+{
+ struct rdma_ah_attr *src = ah_attr;
+ struct rdma_ah_attr conv_ah;
+
memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
+
+ if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) &&
+ (rdma_ah_get_dlid(ah_attr) >=
+ be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+ (!rdma_ah_conv_opa_to_ib(device, &conv_ah, ah_attr)))
+ src = &conv_ah;
+
dst->dlid = rdma_ah_get_dlid(src);
dst->sl = rdma_ah_get_sl(src);
dst->src_path_bits = rdma_ah_get_path_bits(src);
@@ -57,7 +94,8 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
}
EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
-void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
+void ib_copy_qp_attr_to_user(struct ib_device *device,
+ struct ib_uverbs_qp_attr *dst,
struct ib_qp_attr *src)
{
dst->qp_state = src->qp_state;
@@ -76,8 +114,8 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
dst->max_recv_sge = src->cap.max_recv_sge;
dst->max_inline_data = src->cap.max_inline_data;
- ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
- ib_copy_ah_attr_to_user(&dst->alt_ah_attr, &src->alt_ah_attr);
+ ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr);
+ ib_copy_ah_attr_to_user(device, &dst->alt_ah_attr, &src->alt_ah_attr);
dst->pkey_index = src->pkey_index;
dst->alt_pkey_index = src->alt_pkey_index;
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index ef293379f37a..0a98579700ec 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -209,67 +209,244 @@ static int uverbs_hot_unplug_completion_event_file(struct ib_uobject_file *uobj_
return 0;
};
-const struct uverbs_obj_fd_type uverbs_type_attrs_comp_channel = {
- .type = UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_completion_event_file), 0),
- .context_closed = uverbs_hot_unplug_completion_event_file,
- .fops = &uverbs_event_fops,
- .name = "[infinibandevent]",
- .flags = O_RDONLY,
-};
+/*
+ * This spec is used in order to pass information to the hardware driver in a
+ * legacy way. Every verb that could get driver specific data should get this
+ * spec.
+ */
+static const struct uverbs_attr_def uverbs_uhw_compat_in =
+ UVERBS_ATTR_PTR_IN_SZ(UVERBS_UHW_IN, 0, UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ));
+static const struct uverbs_attr_def uverbs_uhw_compat_out =
+ UVERBS_ATTR_PTR_OUT_SZ(UVERBS_UHW_OUT, 0, UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ));
-const struct uverbs_obj_idr_type uverbs_type_attrs_cq = {
- .type = UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_ucq_object), 0),
- .destroy_object = uverbs_free_cq,
-};
+static void create_udata(struct uverbs_attr_bundle *ctx,
+ struct ib_udata *udata)
+{
+ /*
+ * This is for ease of conversion. The purpose is to convert all drivers
+ * to use uverbs_attr_bundle instead of ib_udata.
+ * Assume attr == 0 is input and attr == 1 is output.
+ */
+ void __user *inbuf;
+ size_t inbuf_len = 0;
+ void __user *outbuf;
+ size_t outbuf_len = 0;
+ const struct uverbs_attr *uhw_in =
+ uverbs_attr_get(ctx, UVERBS_UHW_IN);
+ const struct uverbs_attr *uhw_out =
+ uverbs_attr_get(ctx, UVERBS_UHW_OUT);
+
+ if (!IS_ERR(uhw_in)) {
+ inbuf = uhw_in->ptr_attr.ptr;
+ inbuf_len = uhw_in->ptr_attr.len;
+ }
-const struct uverbs_obj_idr_type uverbs_type_attrs_qp = {
- .type = UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), 0),
- .destroy_object = uverbs_free_qp,
-};
+ if (!IS_ERR(uhw_out)) {
+ outbuf = uhw_out->ptr_attr.ptr;
+ outbuf_len = uhw_out->ptr_attr.len;
+ }
-const struct uverbs_obj_idr_type uverbs_type_attrs_mw = {
- .type = UVERBS_TYPE_ALLOC_IDR(0),
- .destroy_object = uverbs_free_mw,
-};
+ INIT_UDATA_BUF_OR_NULL(udata, inbuf, outbuf, inbuf_len, outbuf_len);
+}
-const struct uverbs_obj_idr_type uverbs_type_attrs_mr = {
- /* 1 is used in order to free the MR after all the MWs */
- .type = UVERBS_TYPE_ALLOC_IDR(1),
- .destroy_object = uverbs_free_mr,
-};
+static int uverbs_create_cq_handler(struct ib_device *ib_dev,
+ struct ib_uverbs_file *file,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_ucontext *ucontext = file->ucontext;
+ struct ib_ucq_object *obj;
+ struct ib_udata uhw;
+ int ret;
+ u64 user_handle;
+ struct ib_cq_init_attr attr = {};
+ struct ib_cq *cq;
+ struct ib_uverbs_completion_event_file *ev_file = NULL;
+ const struct uverbs_attr *ev_file_attr;
+ struct ib_uobject *ev_file_uobj;
+
+ if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_CREATE_CQ))
+ return -EOPNOTSUPP;
+
+ ret = uverbs_copy_from(&attr.comp_vector, attrs, CREATE_CQ_COMP_VECTOR);
+ if (!ret)
+ ret = uverbs_copy_from(&attr.cqe, attrs, CREATE_CQ_CQE);
+ if (!ret)
+ ret = uverbs_copy_from(&user_handle, attrs, CREATE_CQ_USER_HANDLE);
+ if (ret)
+ return ret;
-const struct uverbs_obj_idr_type uverbs_type_attrs_srq = {
- .type = UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object), 0),
- .destroy_object = uverbs_free_srq,
-};
+ /* Optional param, if it doesn't exist, we get -ENOENT and skip it */
+ if (uverbs_copy_from(&attr.flags, attrs, CREATE_CQ_FLAGS) == -EFAULT)
+ return -EFAULT;
-const struct uverbs_obj_idr_type uverbs_type_attrs_ah = {
- .type = UVERBS_TYPE_ALLOC_IDR(0),
- .destroy_object = uverbs_free_ah,
-};
+ ev_file_attr = uverbs_attr_get(attrs, CREATE_CQ_COMP_CHANNEL);
+ if (!IS_ERR(ev_file_attr)) {
+ ev_file_uobj = ev_file_attr->obj_attr.uobject;
-const struct uverbs_obj_idr_type uverbs_type_attrs_flow = {
- .type = UVERBS_TYPE_ALLOC_IDR(0),
- .destroy_object = uverbs_free_flow,
-};
+ ev_file = container_of(ev_file_uobj,
+ struct ib_uverbs_completion_event_file,
+ uobj_file.uobj);
+ uverbs_uobject_get(ev_file_uobj);
+ }
-const struct uverbs_obj_idr_type uverbs_type_attrs_wq = {
- .type = UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), 0),
- .destroy_object = uverbs_free_wq,
-};
+ if (attr.comp_vector >= ucontext->ufile->device->num_comp_vectors) {
+ ret = -EINVAL;
+ goto err_event_file;
+ }
-const struct uverbs_obj_idr_type uverbs_type_attrs_rwq_ind_table = {
- .type = UVERBS_TYPE_ALLOC_IDR(0),
- .destroy_object = uverbs_free_rwq_ind_tbl,
-};
+ obj = container_of(uverbs_attr_get(attrs, CREATE_CQ_HANDLE)->obj_attr.uobject,
+ typeof(*obj), uobject);
+ obj->uverbs_file = ucontext->ufile;
+ obj->comp_events_reported = 0;
+ obj->async_events_reported = 0;
+ INIT_LIST_HEAD(&obj->comp_list);
+ INIT_LIST_HEAD(&obj->async_list);
+
+ /* Temporary, only until drivers get the new uverbs_attr_bundle */
+ create_udata(attrs, &uhw);
+
+ cq = ib_dev->create_cq(ib_dev, &attr, ucontext, &uhw);
+ if (IS_ERR(cq)) {
+ ret = PTR_ERR(cq);
+ goto err_event_file;
+ }
-const struct uverbs_obj_idr_type uverbs_type_attrs_xrcd = {
- .type = UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object), 0),
- .destroy_object = uverbs_free_xrcd,
-};
+ cq->device = ib_dev;
+ cq->uobject = &obj->uobject;
+ cq->comp_handler = ib_uverbs_comp_handler;
+ cq->event_handler = ib_uverbs_cq_event_handler;
+ cq->cq_context = &ev_file->ev_queue;
+ obj->uobject.object = cq;
+ obj->uobject.user_handle = user_handle;
+ atomic_set(&cq->usecnt, 0);
+
+ ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe);
+ if (ret)
+ goto err_cq;
-const struct uverbs_obj_idr_type uverbs_type_attrs_pd = {
- /* 2 is used in order to free the PD after MRs */
- .type = UVERBS_TYPE_ALLOC_IDR(2),
- .destroy_object = uverbs_free_pd,
+ return 0;
+err_cq:
+ ib_destroy_cq(cq);
+
+err_event_file:
+ if (ev_file)
+ uverbs_uobject_put(ev_file_uobj);
+ return ret;
};
+
+static DECLARE_UVERBS_METHOD(
+ uverbs_method_cq_create, UVERBS_CQ_CREATE, uverbs_create_cq_handler,
+ &UVERBS_ATTR_IDR(CREATE_CQ_HANDLE, UVERBS_OBJECT_CQ, UVERBS_ACCESS_NEW,
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
+ &UVERBS_ATTR_PTR_IN(CREATE_CQ_CQE, u32,
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
+ &UVERBS_ATTR_PTR_IN(CREATE_CQ_USER_HANDLE, u64,
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
+ &UVERBS_ATTR_FD(CREATE_CQ_COMP_CHANNEL, UVERBS_OBJECT_COMP_CHANNEL,
+ UVERBS_ACCESS_READ),
+ &UVERBS_ATTR_PTR_IN(CREATE_CQ_COMP_VECTOR, u32,
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
+ &UVERBS_ATTR_PTR_IN(CREATE_CQ_FLAGS, u32),
+ &UVERBS_ATTR_PTR_OUT(CREATE_CQ_RESP_CQE, u32,
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
+ &uverbs_uhw_compat_in, &uverbs_uhw_compat_out);
+
+static int uverbs_destroy_cq_handler(struct ib_device *ib_dev,
+ struct ib_uverbs_file *file,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uverbs_destroy_cq_resp resp;
+ struct ib_uobject *uobj =
+ uverbs_attr_get(attrs, DESTROY_CQ_HANDLE)->obj_attr.uobject;
+ struct ib_ucq_object *obj = container_of(uobj, struct ib_ucq_object,
+ uobject);
+ int ret;
+
+ if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_DESTROY_CQ))
+ return -EOPNOTSUPP;
+
+ ret = rdma_explicit_destroy(uobj);
+ if (ret)
+ return ret;
+
+ resp.comp_events_reported = obj->comp_events_reported;
+ resp.async_events_reported = obj->async_events_reported;
+
+ return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp);
+}
+
+static DECLARE_UVERBS_METHOD(
+ uverbs_method_cq_destroy, UVERBS_CQ_DESTROY, uverbs_destroy_cq_handler,
+ &UVERBS_ATTR_IDR(DESTROY_CQ_HANDLE, UVERBS_OBJECT_CQ,
+ UVERBS_ACCESS_DESTROY,
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
+ &UVERBS_ATTR_PTR_OUT(DESTROY_CQ_RESP, struct ib_uverbs_destroy_cq_resp,
+ UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
+
+DECLARE_UVERBS_OBJECT(uverbs_object_comp_channel,
+ UVERBS_OBJECT_COMP_CHANNEL,
+ &UVERBS_TYPE_ALLOC_FD(0,
+ sizeof(struct ib_uverbs_completion_event_file),
+ uverbs_hot_unplug_completion_event_file,
+ &uverbs_event_fops,
+ "[infinibandevent]", O_RDONLY));
+
+DECLARE_UVERBS_OBJECT(uverbs_object_cq, UVERBS_OBJECT_CQ,
+ &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_ucq_object), 0,
+ uverbs_free_cq),
+ &uverbs_method_cq_create,
+ &uverbs_method_cq_destroy);
+
+DECLARE_UVERBS_OBJECT(uverbs_object_qp, UVERBS_OBJECT_QP,
+ &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), 0,
+ uverbs_free_qp));
+
+DECLARE_UVERBS_OBJECT(uverbs_object_mw, UVERBS_OBJECT_MW,
+ &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_mw));
+
+DECLARE_UVERBS_OBJECT(uverbs_object_mr, UVERBS_OBJECT_MR,
+ /* 1 is used in order to free the MR after all the MWs */
+ &UVERBS_TYPE_ALLOC_IDR(1, uverbs_free_mr));
+
+DECLARE_UVERBS_OBJECT(uverbs_object_srq, UVERBS_OBJECT_SRQ,
+ &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object), 0,
+ uverbs_free_srq));
+
+DECLARE_UVERBS_OBJECT(uverbs_object_ah, UVERBS_OBJECT_AH,
+ &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_ah));
+
+DECLARE_UVERBS_OBJECT(uverbs_object_flow, UVERBS_OBJECT_FLOW,
+ &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_flow));
+
+DECLARE_UVERBS_OBJECT(uverbs_object_wq, UVERBS_OBJECT_WQ,
+ &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), 0,
+ uverbs_free_wq));
+
+DECLARE_UVERBS_OBJECT(uverbs_object_rwq_ind_table,
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_rwq_ind_tbl));
+
+DECLARE_UVERBS_OBJECT(uverbs_object_xrcd, UVERBS_OBJECT_XRCD,
+ &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object), 0,
+ uverbs_free_xrcd));
+
+DECLARE_UVERBS_OBJECT(uverbs_object_pd, UVERBS_OBJECT_PD,
+ /* 2 is used in order to free the PD after MRs */
+ &UVERBS_TYPE_ALLOC_IDR(2, uverbs_free_pd));
+
+DECLARE_UVERBS_OBJECT(uverbs_object_device, UVERBS_OBJECT_DEVICE, NULL);
+
+DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects,
+ &uverbs_object_device,
+ &uverbs_object_pd,
+ &uverbs_object_mr,
+ &uverbs_object_comp_channel,
+ &uverbs_object_cq,
+ &uverbs_object_qp,
+ &uverbs_object_ah,
+ &uverbs_object_mw,
+ &uverbs_object_srq,
+ &uverbs_object_flow,
+ &uverbs_object_wq,
+ &uverbs_object_rwq_ind_table,
+ &uverbs_object_xrcd);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c973a83c898b..ee9e27dc799b 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -180,39 +180,29 @@ EXPORT_SYMBOL(ib_rate_to_mbps);
__attribute_const__ enum rdma_transport_type
rdma_node_get_transport(enum rdma_node_type node_type)
{
- switch (node_type) {
- case RDMA_NODE_IB_CA:
- case RDMA_NODE_IB_SWITCH:
- case RDMA_NODE_IB_ROUTER:
- return RDMA_TRANSPORT_IB;
- case RDMA_NODE_RNIC:
- return RDMA_TRANSPORT_IWARP;
- case RDMA_NODE_USNIC:
+
+ if (node_type == RDMA_NODE_USNIC)
return RDMA_TRANSPORT_USNIC;
- case RDMA_NODE_USNIC_UDP:
+ if (node_type == RDMA_NODE_USNIC_UDP)
return RDMA_TRANSPORT_USNIC_UDP;
- default:
- BUG();
- return 0;
- }
+ if (node_type == RDMA_NODE_RNIC)
+ return RDMA_TRANSPORT_IWARP;
+
+ return RDMA_TRANSPORT_IB;
}
EXPORT_SYMBOL(rdma_node_get_transport);
enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
{
+ enum rdma_transport_type lt;
if (device->get_link_layer)
return device->get_link_layer(device, port_num);
- switch (rdma_node_get_transport(device->node_type)) {
- case RDMA_TRANSPORT_IB:
+ lt = rdma_node_get_transport(device->node_type);
+ if (lt == RDMA_TRANSPORT_IB)
return IB_LINK_LAYER_INFINIBAND;
- case RDMA_TRANSPORT_IWARP:
- case RDMA_TRANSPORT_USNIC:
- case RDMA_TRANSPORT_USNIC_UDP:
- return IB_LINK_LAYER_ETHERNET;
- default:
- return IB_LINK_LAYER_UNSPECIFIED;
- }
+
+ return IB_LINK_LAYER_ETHERNET;
}
EXPORT_SYMBOL(rdma_port_get_link_layer);
@@ -452,6 +442,19 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
}
EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
+/*
+ * This function creates ah from the incoming packet.
+ * Incoming packet has dgid of the receiver node on which this code is
+ * getting executed and, sgid contains the GID of the sender.
+ *
+ * When resolving mac address of destination, the arrived dgid is used
+ * as sgid and, sgid is used as dgid because sgid contains destinations
+ * GID whom to respond to.
+ *
+ * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the
+ * position of arguments dgid and sgid do not match the order of the
+ * parameters.
+ */
int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
const struct ib_wc *wc, const struct ib_grh *grh,
struct rdma_ah_attr *ah_attr)
@@ -465,6 +468,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
union ib_gid dgid;
union ib_gid sgid;
+ might_sleep();
+
memset(ah_attr, 0, sizeof *ah_attr);
ah_attr->type = rdma_ah_find_type(device, port_num);
if (rdma_cap_eth_ah(device, port_num)) {
@@ -507,11 +512,6 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
}
resolved_dev = dev_get_by_index(&init_net, if_index);
- if (resolved_dev->flags & IFF_LOOPBACK) {
- dev_put(resolved_dev);
- resolved_dev = idev;
- dev_hold(resolved_dev);
- }
rcu_read_lock();
if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
resolved_dev))
@@ -624,11 +624,13 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
srq->event_handler = srq_init_attr->event_handler;
srq->srq_context = srq_init_attr->srq_context;
srq->srq_type = srq_init_attr->srq_type;
+ if (ib_srq_has_cq(srq->srq_type)) {
+ srq->ext.cq = srq_init_attr->ext.cq;
+ atomic_inc(&srq->ext.cq->usecnt);
+ }
if (srq->srq_type == IB_SRQT_XRC) {
srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
- srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
atomic_inc(&srq->ext.xrc.xrcd->usecnt);
- atomic_inc(&srq->ext.xrc.cq->usecnt);
}
atomic_inc(&pd->usecnt);
atomic_set(&srq->usecnt, 0);
@@ -669,18 +671,18 @@ int ib_destroy_srq(struct ib_srq *srq)
pd = srq->pd;
srq_type = srq->srq_type;
- if (srq_type == IB_SRQT_XRC) {
+ if (ib_srq_has_cq(srq_type))
+ cq = srq->ext.cq;
+ if (srq_type == IB_SRQT_XRC)
xrcd = srq->ext.xrc.xrcd;
- cq = srq->ext.xrc.cq;
- }
ret = srq->device->destroy_srq(srq);
if (!ret) {
atomic_dec(&pd->usecnt);
- if (srq_type == IB_SRQT_XRC) {
+ if (srq_type == IB_SRQT_XRC)
atomic_dec(&xrcd->usecnt);
+ if (ib_srq_has_cq(srq_type))
atomic_dec(&cq->usecnt);
- }
}
return ret;
@@ -830,6 +832,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
spin_lock_init(&qp->mr_lock);
INIT_LIST_HEAD(&qp->rdma_mrs);
INIT_LIST_HEAD(&qp->sig_mrs);
+ qp->port = 0;
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
return ib_create_xrc_qp(qp, qp_init_attr);
@@ -1235,6 +1238,18 @@ int ib_resolve_eth_dmac(struct ib_device *device,
if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw)) {
rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
ah_attr->roce.dmac);
+ return 0;
+ }
+ if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
+ if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
+ __be32 addr = 0;
+
+ memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
+ ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
+ } else {
+ ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
+ (char *)ah_attr->roce.dmac);
+ }
} else {
union ib_gid sgid;
struct ib_gid_attr sgid_attr;
@@ -1268,20 +1283,95 @@ out:
}
EXPORT_SYMBOL(ib_resolve_eth_dmac);
-int ib_modify_qp(struct ib_qp *qp,
- struct ib_qp_attr *qp_attr,
- int qp_attr_mask)
+/**
+ * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
+ * @qp: The QP to modify.
+ * @attr: On input, specifies the QP attributes to modify. On output,
+ * the current values of selected QP attributes are returned.
+ * @attr_mask: A bit-mask used to specify which attributes of the QP
+ * are being modified.
+ * @udata: pointer to user's input output buffer information
+ * are being modified.
+ * It returns 0 on success and returns appropriate error code on error.
+ */
+int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
{
+ int ret;
- if (qp_attr_mask & IB_QP_AV) {
- int ret;
-
- ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr);
+ if (attr_mask & IB_QP_AV) {
+ ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
if (ret)
return ret;
}
+ ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
+ if (!ret && (attr_mask & IB_QP_PORT))
+ qp->port = attr->port_num;
+
+ return ret;
+}
+EXPORT_SYMBOL(ib_modify_qp_with_udata);
+
+int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
+{
+ int rc;
+ u32 netdev_speed;
+ struct net_device *netdev;
+ struct ethtool_link_ksettings lksettings;
+
+ if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
+ return -EINVAL;
+
+ if (!dev->get_netdev)
+ return -EOPNOTSUPP;
+
+ netdev = dev->get_netdev(dev, port_num);
+ if (!netdev)
+ return -ENODEV;
+
+ rtnl_lock();
+ rc = __ethtool_get_link_ksettings(netdev, &lksettings);
+ rtnl_unlock();
+
+ dev_put(netdev);
+
+ if (!rc) {
+ netdev_speed = lksettings.base.speed;
+ } else {
+ netdev_speed = SPEED_1000;
+ pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name,
+ netdev_speed);
+ }
+
+ if (netdev_speed <= SPEED_1000) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_SDR;
+ } else if (netdev_speed <= SPEED_10000) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_FDR10;
+ } else if (netdev_speed <= SPEED_20000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_DDR;
+ } else if (netdev_speed <= SPEED_25000) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_EDR;
+ } else if (netdev_speed <= SPEED_40000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_FDR10;
+ } else {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_EDR;
+ }
- return ib_security_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
+ return 0;
+}
+EXPORT_SYMBOL(ib_get_eth_speed);
+
+int ib_modify_qp(struct ib_qp *qp,
+ struct ib_qp_attr *qp_attr,
+ int qp_attr_mask)
+{
+ return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL);
}
EXPORT_SYMBOL(ib_modify_qp);
@@ -1544,15 +1634,53 @@ EXPORT_SYMBOL(ib_dealloc_fmr);
/* Multicast groups */
+static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
+{
+ struct ib_qp_init_attr init_attr = {};
+ struct ib_qp_attr attr = {};
+ int num_eth_ports = 0;
+ int port;
+
+ /* If QP state >= init, it is assigned to a port and we can check this
+ * port only.
+ */
+ if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
+ if (attr.qp_state >= IB_QPS_INIT) {
+ if (qp->device->get_link_layer(qp->device, attr.port_num) !=
+ IB_LINK_LAYER_INFINIBAND)
+ return true;
+ goto lid_check;
+ }
+ }
+
+ /* Can't get a quick answer, iterate over all ports */
+ for (port = 0; port < qp->device->phys_port_cnt; port++)
+ if (qp->device->get_link_layer(qp->device, port) !=
+ IB_LINK_LAYER_INFINIBAND)
+ num_eth_ports++;
+
+ /* If we have at lease one Ethernet port, RoCE annex declares that
+ * multicast LID should be ignored. We can't tell at this step if the
+ * QP belongs to an IB or Ethernet port.
+ */
+ if (num_eth_ports)
+ return true;
+
+ /* If all the ports are IB, we can check according to IB spec. */
+lid_check:
+ return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
+ lid == be16_to_cpu(IB_LID_PERMISSIVE));
+}
+
int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
{
int ret;
if (!qp->device->attach_mcast)
return -ENOSYS;
- if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
- lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
- lid == be16_to_cpu(IB_LID_PERMISSIVE))
+
+ if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
+ qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
return -EINVAL;
ret = qp->device->attach_mcast(qp, gid, lid);
@@ -1568,9 +1696,9 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
if (!qp->device->detach_mcast)
return -ENOSYS;
- if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
- lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
- lid == be16_to_cpu(IB_LID_PERMISSIVE))
+
+ if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
+ qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
return -EINVAL;
ret = qp->device->detach_mcast(qp, gid, lid);
diff --git a/drivers/infiniband/hw/bnxt_re/Kconfig b/drivers/infiniband/hw/bnxt_re/Kconfig
index 19982a4a9bba..18f5ed082f41 100644
--- a/drivers/infiniband/hw/bnxt_re/Kconfig
+++ b/drivers/infiniband/hw/bnxt_re/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_BNXT_RE
tristate "Broadcom Netxtreme HCA support"
depends on ETHERNET && NETDEVICES && PCI && INET && DCB
+ depends on MAY_USE_DEVLINK
select NET_VENDOR_BROADCOM
select BNXT
---help---
diff --git a/drivers/infiniband/hw/bnxt_re/Makefile b/drivers/infiniband/hw/bnxt_re/Makefile
index 036f84efbc73..afbaa0e20670 100644
--- a/drivers/infiniband/hw/bnxt_re/Makefile
+++ b/drivers/infiniband/hw/bnxt_re/Makefile
@@ -3,4 +3,4 @@ ccflags-y := -Idrivers/net/ethernet/broadcom/bnxt
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re.o
bnxt_re-y := main.o ib_verbs.o \
qplib_res.o qplib_rcfw.o \
- qplib_sp.o qplib_fp.o
+ qplib_sp.o qplib_fp.o hw_counters.o
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 08772836fded..b3ad37fec578 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -51,6 +51,8 @@
#define BNXT_RE_PAGE_SIZE_8M BIT(23)
#define BNXT_RE_PAGE_SIZE_1G BIT(30)
+#define BNXT_RE_MAX_MR_SIZE BIT(30)
+
#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
@@ -60,6 +62,13 @@
#define BNXT_RE_RQ_WQE_THRESHOLD 32
+/*
+ * Setting the default ack delay value to 16, which means
+ * the default timeout is approx. 260ms(4 usec * 2 ^(timeout))
+ */
+
+#define BNXT_RE_DEFAULT_ACK_DELAY 16
+
struct bnxt_re_work {
struct work_struct work;
unsigned long event;
@@ -76,7 +85,7 @@ struct bnxt_re_sqp_entries {
};
#define BNXT_RE_MIN_MSIX 2
-#define BNXT_RE_MAX_MSIX 16
+#define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
@@ -107,7 +116,7 @@ struct bnxt_re_dev {
struct bnxt_qplib_rcfw rcfw;
/* NQ */
- struct bnxt_qplib_nq nq;
+ struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
/* Device Resources */
struct bnxt_qplib_dev_attr dev_attr;
@@ -131,6 +140,7 @@ struct bnxt_re_dev {
struct bnxt_re_qp *qp1_sqp;
struct bnxt_re_ah *sqp_ah;
struct bnxt_re_sqp_entries sqp_tbl[1024];
+ atomic_t nq_alloc_cnt;
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
new file mode 100644
index 000000000000..7b28219eba46
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -0,0 +1,114 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Statistics
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/prefetch.h>
+#include <linux/delay.h>
+
+#include <rdma/ib_addr.h>
+
+#include "bnxt_ulp.h"
+#include "roce_hsi.h"
+#include "qplib_res.h"
+#include "qplib_sp.h"
+#include "qplib_fp.h"
+#include "qplib_rcfw.h"
+#include "bnxt_re.h"
+#include "hw_counters.h"
+
+static const char * const bnxt_re_stat_name[] = {
+ [BNXT_RE_ACTIVE_QP] = "active_qps",
+ [BNXT_RE_ACTIVE_SRQ] = "active_srqs",
+ [BNXT_RE_ACTIVE_CQ] = "active_cqs",
+ [BNXT_RE_ACTIVE_MR] = "active_mrs",
+ [BNXT_RE_ACTIVE_MW] = "active_mws",
+ [BNXT_RE_RX_PKTS] = "rx_pkts",
+ [BNXT_RE_RX_BYTES] = "rx_bytes",
+ [BNXT_RE_TX_PKTS] = "tx_pkts",
+ [BNXT_RE_TX_BYTES] = "tx_bytes",
+ [BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors"
+};
+
+int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u8 port, int index)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct ctx_hw_stats *bnxt_re_stats = rdev->qplib_ctx.stats.dma;
+
+ if (!port || !stats)
+ return -EINVAL;
+
+ stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&rdev->qp_count);
+ stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&rdev->srq_count);
+ stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&rdev->cq_count);
+ stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&rdev->mr_count);
+ stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&rdev->mw_count);
+ if (bnxt_re_stats) {
+ stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
+ le64_to_cpu(bnxt_re_stats->tx_bcast_pkts);
+ stats->value[BNXT_RE_RX_PKTS] =
+ le64_to_cpu(bnxt_re_stats->rx_ucast_pkts);
+ stats->value[BNXT_RE_RX_BYTES] =
+ le64_to_cpu(bnxt_re_stats->rx_ucast_bytes);
+ stats->value[BNXT_RE_TX_PKTS] =
+ le64_to_cpu(bnxt_re_stats->tx_ucast_pkts);
+ stats->value[BNXT_RE_TX_BYTES] =
+ le64_to_cpu(bnxt_re_stats->tx_ucast_bytes);
+ }
+ return ARRAY_SIZE(bnxt_re_stat_name);
+}
+
+struct rdma_hw_stats *bnxt_re_ib_alloc_hw_stats(struct ib_device *ibdev,
+ u8 port_num)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(bnxt_re_stat_name) != BNXT_RE_NUM_COUNTERS);
+ /* We support only per port stats */
+ if (!port_num)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(bnxt_re_stat_name,
+ ARRAY_SIZE(bnxt_re_stat_name),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
new file mode 100644
index 000000000000..be0dc0093b58
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -0,0 +1,62 @@
+/*
+ * Broadcom NetXtreme-E RoCE driver.
+ *
+ * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Statistics (header)
+ *
+ */
+
+#ifndef __BNXT_RE_HW_STATS_H__
+#define __BNXT_RE_HW_STATS_H__
+
+enum bnxt_re_hw_stats {
+ BNXT_RE_ACTIVE_QP,
+ BNXT_RE_ACTIVE_SRQ,
+ BNXT_RE_ACTIVE_CQ,
+ BNXT_RE_ACTIVE_MR,
+ BNXT_RE_ACTIVE_MW,
+ BNXT_RE_RX_PKTS,
+ BNXT_RE_RX_BYTES,
+ BNXT_RE_TX_PKTS,
+ BNXT_RE_TX_BYTES,
+ BNXT_RE_RECOVERABLE_ERRORS,
+ BNXT_RE_NUM_COUNTERS
+};
+
+struct rdma_hw_stats *bnxt_re_ib_alloc_hw_stats(struct ib_device *ibdev,
+ u8 port_num);
+int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u8 port, int index);
+#endif /* __BNXT_RE_HW_STATS_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index c7bd68311d0c..01eee15bbd65 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -145,10 +145,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
bnxt_qplib_get_guid(rdev->netdev->dev_addr,
(u8 *)&ib_attr->sys_image_guid);
- ib_attr->max_mr_size = ~0ull;
- ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_8K |
- BNXT_RE_PAGE_SIZE_64K | BNXT_RE_PAGE_SIZE_2M |
- BNXT_RE_PAGE_SIZE_8M | BNXT_RE_PAGE_SIZE_1G;
+ ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
+ ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
@@ -174,9 +172,11 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_mr = dev_attr->max_mr;
ib_attr->max_pd = dev_attr->max_pd;
ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
- ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_rd_atom;
- ib_attr->atomic_cap = IB_ATOMIC_HCA;
- ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
+ ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
+ if (dev_attr->is_atomic) {
+ ib_attr->atomic_cap = IB_ATOMIC_HCA;
+ ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
+ }
ib_attr->max_ee_rd_atom = 0;
ib_attr->max_res_rd_atom = 0;
@@ -201,7 +201,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
ib_attr->max_pkeys = 1;
- ib_attr->local_ca_ack_delay = 0;
+ ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
return 0;
}
@@ -223,50 +223,6 @@ int bnxt_re_modify_device(struct ib_device *ibdev,
return 0;
}
-static void __to_ib_speed_width(struct net_device *netdev, u8 *speed, u8 *width)
-{
- struct ethtool_link_ksettings lksettings;
- u32 espeed;
-
- if (netdev->ethtool_ops && netdev->ethtool_ops->get_link_ksettings) {
- memset(&lksettings, 0, sizeof(lksettings));
- rtnl_lock();
- netdev->ethtool_ops->get_link_ksettings(netdev, &lksettings);
- rtnl_unlock();
- espeed = lksettings.base.speed;
- } else {
- espeed = SPEED_UNKNOWN;
- }
- switch (espeed) {
- case SPEED_1000:
- *speed = IB_SPEED_SDR;
- *width = IB_WIDTH_1X;
- break;
- case SPEED_10000:
- *speed = IB_SPEED_QDR;
- *width = IB_WIDTH_1X;
- break;
- case SPEED_20000:
- *speed = IB_SPEED_DDR;
- *width = IB_WIDTH_4X;
- break;
- case SPEED_25000:
- *speed = IB_SPEED_EDR;
- *width = IB_WIDTH_1X;
- break;
- case SPEED_40000:
- *speed = IB_SPEED_QDR;
- *width = IB_WIDTH_4X;
- break;
- case SPEED_50000:
- break;
- default:
- *speed = IB_SPEED_SDR;
- *width = IB_WIDTH_1X;
- break;
- }
-}
-
/* Port */
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr)
@@ -308,25 +264,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
* IB stack to avoid race in the NETDEV_UNREG path
*/
if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
- __to_ib_speed_width(rdev->netdev, &port_attr->active_speed,
- &port_attr->active_width);
- return 0;
-}
-
-int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
- int port_modify_mask,
- struct ib_port_modify *port_modify)
-{
- switch (port_modify_mask) {
- case IB_PORT_SHUTDOWN:
- break;
- case IB_PORT_INIT_TYPE:
- break;
- case IB_PORT_RESET_QKEY_CNTR:
- break;
- default:
- break;
- }
+ if (ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed,
+ &port_attr->active_width))
+ return -EINVAL;
return 0;
}
@@ -390,15 +330,17 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
return -EINVAL;
ctx->refcnt--;
if (!ctx->refcnt) {
- rc = bnxt_qplib_del_sgid
- (sgid_tbl,
- &sgid_tbl->tbl[ctx->idx], true);
- if (rc)
+ rc = bnxt_qplib_del_sgid(sgid_tbl,
+ &sgid_tbl->tbl[ctx->idx],
+ true);
+ if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to remove GID: %#x", rc);
- ctx_tbl = sgid_tbl->ctx;
- ctx_tbl[ctx->idx] = NULL;
- kfree(ctx);
+ } else {
+ ctx_tbl = sgid_tbl->ctx;
+ ctx_tbl[ctx->idx] = NULL;
+ kfree(ctx);
+ }
}
} else {
return -EINVAL;
@@ -588,10 +530,10 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
/* Create a fence MW only for kernel consumers */
mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
- if (!mw) {
+ if (IS_ERR(mw)) {
dev_err(rdev_to_dev(rdev),
"Failed to create fence-MW for PD: %p\n", pd);
- rc = -EINVAL;
+ rc = PTR_ERR(mw);
goto fail;
}
fence->mw = mw;
@@ -612,30 +554,13 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
int rc;
bnxt_re_destroy_fence_mr(pd);
- if (ib_pd->uobject && pd->dpi.dbr) {
- struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
- struct bnxt_re_ucontext *ucntx;
-
- /* Free DPI only if this is the first PD allocated by the
- * application and mark the context dpi as NULL
- */
- ucntx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
- rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
- &rdev->qplib_res.dpi_tbl,
- &pd->dpi);
+ if (pd->qplib_pd.id) {
+ rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
+ &rdev->qplib_res.pd_tbl,
+ &pd->qplib_pd);
if (rc)
- dev_err(rdev_to_dev(rdev), "Failed to deallocate HW DPI");
- /* Don't fail, continue*/
- ucntx->dpi = NULL;
- }
-
- rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
- &rdev->qplib_res.pd_tbl,
- &pd->qplib_pd);
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
- return rc;
+ dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
}
kfree(pd);
@@ -667,23 +592,22 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
if (udata) {
struct bnxt_re_pd_resp resp;
- if (!ucntx->dpi) {
+ if (!ucntx->dpi.dbr) {
/* Allocate DPI in alloc_pd to avoid failing of
* ibv_devinfo and family of application when DPIs
* are depleted.
*/
if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
- &pd->dpi, ucntx)) {
+ &ucntx->dpi, ucntx)) {
rc = -ENOMEM;
goto dbfail;
}
- ucntx->dpi = &pd->dpi;
}
resp.pdid = pd->qplib_pd.id;
/* Still allow mapping this DBR to the new user PD. */
- resp.dpi = ucntx->dpi->dpi;
- resp.dbr = (u64)ucntx->dpi->umdbr;
+ resp.dpi = ucntx->dpi.dpi;
+ resp.dbr = (u64)ucntx->dpi.umdbr;
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (rc) {
@@ -862,6 +786,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
struct bnxt_re_dev *rdev = qp->rdev;
int rc;
+ bnxt_qplib_del_flush_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
@@ -876,6 +801,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
return rc;
}
+ bnxt_qplib_del_flush_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
&rdev->qp1_sqp->qplib_qp);
if (rc) {
@@ -960,7 +886,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
qplib_qp->rq.nmap = umem->nmap;
}
- qplib_qp->dpi = cntx->dpi;
+ qplib_qp->dpi = &cntx->dpi;
return 0;
rqfail:
ib_umem_release(qp->sumem);
@@ -985,7 +911,6 @@ static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
if (!ah)
return NULL;
- memset(ah, 0, sizeof(*ah));
ah->rdev = rdev;
ah->qplib_ah.pd = &pd->qplib_pd;
@@ -1032,7 +957,6 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
if (!qp)
return NULL;
- memset(qp, 0, sizeof(*qp));
qp->rdev = rdev;
/* Initialize the shadow QP structure from the QP1 values */
@@ -1420,6 +1344,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
}
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
+
+ if (!qp->sumem &&
+ qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ dev_dbg(rdev_to_dev(rdev),
+ "Move QP = %p to flush list\n",
+ qp);
+ bnxt_qplib_add_flush_qp(&qp->qplib_qp);
+ }
+ if (!qp->sumem &&
+ qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
+ dev_dbg(rdev_to_dev(rdev),
+ "Move QP = %p out of flush list\n",
+ qp);
+ bnxt_qplib_del_flush_qp(&qp->qplib_qp);
+ }
}
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
qp->qplib_qp.modify_flags |=
@@ -1530,13 +1469,24 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
- qp->qplib_qp.max_rd_atomic = qp_attr->max_rd_atomic;
+ /* Cap the max_rd_atomic to device max */
+ qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
+ dev_attr->max_qp_rd_atom);
}
if (qp_attr_mask & IB_QP_SQ_PSN) {
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
qp->qplib_qp.sq.psn = qp_attr->sq_psn;
}
if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (qp_attr->max_dest_rd_atomic >
+ dev_attr->max_qp_init_rd_atom) {
+ dev_err(rdev_to_dev(rdev),
+ "max_dest_rd_atomic requested%d is > dev_max%d",
+ qp_attr->max_dest_rd_atomic,
+ dev_attr->max_qp_init_rd_atom);
+ return -EINVAL;
+ }
+
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
@@ -2338,6 +2288,7 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
struct bnxt_re_dev *rdev = cq->rdev;
int rc;
+ struct bnxt_qplib_nq *nq = cq->qplib_cq.nq;
rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
if (rc) {
@@ -2352,7 +2303,7 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
kfree(cq);
}
atomic_dec(&rdev->cq_count);
- rdev->nq.budget--;
+ nq->budget--;
return 0;
}
@@ -2366,6 +2317,8 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
struct bnxt_re_cq *cq = NULL;
int rc, entries;
int cqe = attr->cqe;
+ struct bnxt_qplib_nq *nq = NULL;
+ unsigned int nq_alloc_cnt;
/* Validate CQ fields */
if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
@@ -2403,7 +2356,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
}
cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
cq->qplib_cq.nmap = cq->umem->nmap;
- cq->qplib_cq.dpi = uctx->dpi;
+ cq->qplib_cq.dpi = &uctx->dpi;
} else {
cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
@@ -2417,8 +2370,15 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
cq->qplib_cq.sghead = NULL;
cq->qplib_cq.nmap = 0;
}
+ /*
+ * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
+ * used for getting the NQ index.
+ */
+ nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
+ nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
cq->qplib_cq.max_wqe = entries;
- cq->qplib_cq.cnq_hw_ring_id = rdev->nq.ring_id;
+ cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
+ cq->qplib_cq.nq = nq;
rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
if (rc) {
@@ -2428,7 +2388,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
cq->ib_cq.cqe = entries;
cq->cq_period = cq->qplib_cq.period;
- rdev->nq.budget++;
+ nq->budget++;
atomic_inc(&rdev->cq_count);
@@ -2905,6 +2865,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
spin_lock_irqsave(&cq->cq_lock, flags);
budget = min_t(u32, num_entries, cq->max_cql);
+ num_entries = budget;
if (!cq->cql) {
dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
goto exit;
@@ -2925,6 +2886,10 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
sq->send_phantom = false;
}
}
+ if (ncqe < budget)
+ ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
+ cqe + ncqe,
+ budget - ncqe);
if (!ncqe)
break;
@@ -3031,6 +2996,11 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
else if (ib_cqn_flags & IB_CQ_SOLICITED)
type = DBR_DBR_TYPE_CQ_ARMSE;
+ /* Poll to see if there are missed events */
+ if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
+ return 1;
+
bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
return 0;
@@ -3245,6 +3215,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct scatterlist *sg;
int entry;
+ if (length > BNXT_RE_MAX_MR_SIZE) {
+ dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
+ length, BNXT_RE_MAX_MR_SIZE);
+ return ERR_PTR(-ENOMEM);
+ }
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
@@ -3388,8 +3364,26 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
struct bnxt_re_ucontext,
ib_uctx);
+
+ struct bnxt_re_dev *rdev = uctx->rdev;
+ int rc = 0;
+
if (uctx->shpg)
free_page((unsigned long)uctx->shpg);
+
+ if (uctx->dpi.dbr) {
+ /* Free DPI only if this is the first PD allocated by the
+ * application and mark the context dpi as NULL
+ */
+ rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
+ &rdev->qplib_res.dpi_tbl,
+ &uctx->dpi);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
+ /* Don't fail, continue*/
+ uctx->dpi.dbr = NULL;
+ }
+
kfree(uctx);
return 0;
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 6c160f6a5398..1df11ed272ea 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -59,7 +59,6 @@ struct bnxt_re_pd {
struct bnxt_re_dev *rdev;
struct ib_pd ib_pd;
struct bnxt_qplib_pd qplib_pd;
- struct bnxt_qplib_dpi dpi;
struct bnxt_re_fence_data fence;
};
@@ -127,7 +126,7 @@ struct bnxt_re_mw {
struct bnxt_re_ucontext {
struct bnxt_re_dev *rdev;
struct ib_ucontext ib_uctx;
- struct bnxt_qplib_dpi *dpi;
+ struct bnxt_qplib_dpi dpi;
void *shpg;
spinlock_t sh_lock; /* protect shpg */
};
@@ -142,9 +141,6 @@ int bnxt_re_modify_device(struct ib_device *ibdev,
struct ib_device_modify *device_modify);
int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr);
-int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
- int port_modify_mask,
- struct ib_port_modify *port_modify);
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 1fce5e73216b..82d1cbc27aee 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -64,13 +64,14 @@
#include "ib_verbs.h"
#include <rdma/bnxt_re-abi.h>
#include "bnxt.h"
+#include "hw_counters.h"
+
static char version[] =
BNXT_RE_DESC " v" ROCE_DRV_MODULE_VERSION "\n";
MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(ROCE_DRV_MODULE_VERSION);
/* globals */
static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
@@ -162,7 +163,7 @@ static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
{
- int rc = 0, num_msix_want = BNXT_RE_MIN_MSIX, num_msix_got;
+ int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got;
struct bnxt_en_dev *en_dev;
if (!rdev)
@@ -170,6 +171,8 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
en_dev = rdev->en_dev;
+ num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
+
rtnl_lock();
num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
rdev->msix_entries,
@@ -333,6 +336,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000);
req.stats_dma_addr = cpu_to_le64(dma_map);
+ req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
@@ -473,7 +477,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->modify_device = bnxt_re_modify_device;
ibdev->query_port = bnxt_re_query_port;
- ibdev->modify_port = bnxt_re_modify_port;
ibdev->get_port_immutable = bnxt_re_get_port_immutable;
ibdev->query_pkey = bnxt_re_query_pkey;
ibdev->query_gid = bnxt_re_query_gid;
@@ -512,6 +515,8 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->alloc_ucontext = bnxt_re_alloc_ucontext;
ibdev->dealloc_ucontext = bnxt_re_dealloc_ucontext;
ibdev->mmap = bnxt_re_mmap;
+ ibdev->get_hw_stats = bnxt_re_ib_get_hw_stats;
+ ibdev->alloc_hw_stats = bnxt_re_ib_alloc_hw_stats;
return ib_register_device(ibdev, NULL);
}
@@ -652,8 +657,12 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
{
- if (rdev->nq.hwq.max_elements)
- bnxt_qplib_disable_nq(&rdev->nq);
+ int i;
+
+ if (rdev->nq[0].hwq.max_elements) {
+ for (i = 1; i < rdev->num_msix; i++)
+ bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
+ }
if (rdev->qplib_res.rcfw)
bnxt_qplib_cleanup_res(&rdev->qplib_res);
@@ -661,31 +670,41 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
{
- int rc = 0;
+ int rc = 0, i;
bnxt_qplib_init_res(&rdev->qplib_res);
- if (rdev->msix_entries[BNXT_RE_NQ_IDX].vector <= 0)
- return -EINVAL;
+ for (i = 1; i < rdev->num_msix ; i++) {
+ rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
+ i - 1, rdev->msix_entries[i].vector,
+ rdev->msix_entries[i].db_offset,
+ &bnxt_re_cqn_handler, NULL);
- rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq,
- rdev->msix_entries[BNXT_RE_NQ_IDX].vector,
- rdev->msix_entries[BNXT_RE_NQ_IDX].db_offset,
- &bnxt_re_cqn_handler,
- NULL);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to enable NQ with rc = 0x%x", rc);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ return rc;
+}
- if (rc)
- dev_err(rdev_to_dev(rdev), "Failed to enable NQ: %#x", rc);
+static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev, bool lock_wait)
+{
+ int i;
- return rc;
+ for (i = 0; i < rdev->num_msix - 1; i++) {
+ bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, lock_wait);
+ bnxt_qplib_free_nq(&rdev->nq[i]);
+ }
}
static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait)
{
- if (rdev->nq.hwq.max_elements) {
- bnxt_re_net_ring_free(rdev, rdev->nq.ring_id, lock_wait);
- bnxt_qplib_free_nq(&rdev->nq);
- }
+ bnxt_re_free_nq_res(rdev, lock_wait);
+
if (rdev->qplib_res.dpi_tbl.max) {
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
&rdev->qplib_res.dpi_tbl,
@@ -699,7 +718,7 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait)
static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
{
- int rc = 0;
+ int rc = 0, i;
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
@@ -716,30 +735,42 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
&rdev->dpi_privileged,
rdev);
if (rc)
- goto fail;
+ goto dealloc_res;
- rdev->nq.hwq.max_elements = BNXT_RE_MAX_CQ_COUNT +
- BNXT_RE_MAX_SRQC_COUNT + 2;
- rc = bnxt_qplib_alloc_nq(rdev->en_dev->pdev, &rdev->nq);
- if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to allocate NQ memory: %#x", rc);
- goto fail;
- }
- rc = bnxt_re_net_ring_alloc
- (rdev, rdev->nq.hwq.pbl[PBL_LVL_0].pg_map_arr,
- rdev->nq.hwq.pbl[rdev->nq.hwq.level].pg_count,
- HWRM_RING_ALLOC_CMPL, BNXT_QPLIB_NQE_MAX_CNT - 1,
- rdev->msix_entries[BNXT_RE_NQ_IDX].ring_idx,
- &rdev->nq.ring_id);
- if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to allocate NQ ring: %#x", rc);
- goto free_nq;
+ for (i = 0; i < rdev->num_msix - 1; i++) {
+ rdev->nq[i].hwq.max_elements = BNXT_RE_MAX_CQ_COUNT +
+ BNXT_RE_MAX_SRQC_COUNT + 2;
+ rc = bnxt_qplib_alloc_nq(rdev->en_dev->pdev, &rdev->nq[i]);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
+ i, rc);
+ goto dealloc_dpi;
+ }
+ rc = bnxt_re_net_ring_alloc
+ (rdev, rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr,
+ rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count,
+ HWRM_RING_ALLOC_CMPL,
+ BNXT_QPLIB_NQE_MAX_CNT - 1,
+ rdev->msix_entries[i + 1].ring_idx,
+ &rdev->nq[i].ring_id);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to allocate NQ fw id with rc = 0x%x",
+ rc);
+ goto free_nq;
+ }
}
return 0;
free_nq:
- bnxt_qplib_free_nq(&rdev->nq);
+ for (i = 0; i < rdev->num_msix - 1; i++)
+ bnxt_qplib_free_nq(&rdev->nq[i]);
+dealloc_dpi:
+ bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
+ &rdev->qplib_res.dpi_tbl,
+ &rdev->dpi_privileged);
+dealloc_res:
+ bnxt_qplib_free_res(&rdev->qplib_res);
+
fail:
rdev->qplib_res.rcfw = NULL;
return rc;
@@ -834,6 +865,42 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
mutex_unlock(&rdev->qp_lock);
}
+static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+ struct bnxt_qplib_gid gid;
+ u16 gid_idx, index;
+ int rc = 0;
+
+ if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
+ return 0;
+
+ if (!sgid_tbl) {
+ dev_err(rdev_to_dev(rdev), "QPLIB: SGID table not allocated");
+ return -EINVAL;
+ }
+
+ for (index = 0; index < sgid_tbl->active; index++) {
+ gid_idx = sgid_tbl->hw_id[index];
+
+ if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
+ sizeof(bnxt_qplib_gid_zero)))
+ continue;
+ /* need to modify the VLAN enable setting of non VLAN GID only
+ * as setting is done for VLAN GID while adding GID
+ */
+ if (sgid_tbl->vlan[index])
+ continue;
+
+ memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid));
+
+ rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
+ rdev->qplib_res.netdev->dev_addr);
+ }
+
+ return rc;
+}
+
static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
{
u32 prio_map = 0, tmp_map = 0;
@@ -853,8 +920,6 @@ static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
tmp_map = dcb_ieee_getapp_mask(netdev, &app);
prio_map |= tmp_map;
- if (!prio_map)
- prio_map = -EFAULT;
return prio_map;
}
@@ -880,10 +945,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
int rc;
/* Get priority for roce */
- rc = bnxt_re_get_priority_mask(rdev);
- if (rc < 0)
- return rc;
- prio_map = (u8)rc;
+ prio_map = bnxt_re_get_priority_mask(rdev);
if (prio_map == rdev->cur_prio_map)
return 0;
@@ -905,6 +967,16 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
return rc;
}
+ /* Actual priorities are not programmed as they are already
+ * done by L2 driver; just enable or disable priority vlan tagging
+ */
+ if ((prio_map == 0 && rdev->qplib_res.prio) ||
+ (prio_map != 0 && !rdev->qplib_res.prio)) {
+ rdev->qplib_res.prio = prio_map ? true : false;
+
+ bnxt_re_update_gid(rdev);
+ }
+
return 0;
}
@@ -997,7 +1069,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
/* Establish RCFW Communication Channel to initialize the context
* memory for the function and all child VFs
*/
- rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw);
+ rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
+ BNXT_RE_MAX_QPC_COUNT);
if (rc)
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index f05500bcdcf1..e8afc47f8949 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -51,6 +51,168 @@
#include "qplib_fp.h"
static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
+static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
+
+static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
+{
+ qp->sq.condition = false;
+ qp->sq.send_phantom = false;
+ qp->sq.single = false;
+}
+
+/* Flush list */
+static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_cq *scq, *rcq;
+
+ scq = qp->scq;
+ rcq = qp->rcq;
+
+ if (!qp->sq.flushed) {
+ dev_dbg(&scq->hwq.pdev->dev,
+ "QPLIB: FP: Adding to SQ Flush list = %p",
+ qp);
+ bnxt_qplib_cancel_phantom_processing(qp);
+ list_add_tail(&qp->sq_flush, &scq->sqf_head);
+ qp->sq.flushed = true;
+ }
+ if (!qp->srq) {
+ if (!qp->rq.flushed) {
+ dev_dbg(&rcq->hwq.pdev->dev,
+ "QPLIB: FP: Adding to RQ Flush list = %p",
+ qp);
+ list_add_tail(&qp->rq_flush, &rcq->rqf_head);
+ qp->rq.flushed = true;
+ }
+ }
+}
+
+void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags)
+ __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock)
+{
+ spin_lock_irqsave(&qp->scq->hwq.lock, *flags);
+ if (qp->scq == qp->rcq)
+ __acquire(&qp->rcq->hwq.lock);
+ else
+ spin_lock(&qp->rcq->hwq.lock);
+}
+
+void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags)
+ __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock)
+{
+ if (qp->scq == qp->rcq)
+ __release(&qp->rcq->hwq.lock);
+ else
+ spin_unlock(&qp->rcq->hwq.lock);
+ spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags);
+}
+
+static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_cq *cq)
+{
+ struct bnxt_qplib_cq *buddy_cq = NULL;
+
+ if (qp->scq == qp->rcq)
+ buddy_cq = NULL;
+ else if (qp->scq == cq)
+ buddy_cq = qp->rcq;
+ else
+ buddy_cq = qp->scq;
+ return buddy_cq;
+}
+
+static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_cq *cq)
+ __acquires(&buddy_cq->hwq.lock)
+{
+ struct bnxt_qplib_cq *buddy_cq = NULL;
+
+ buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
+ if (!buddy_cq)
+ __acquire(&cq->hwq.lock);
+ else
+ spin_lock(&buddy_cq->hwq.lock);
+}
+
+static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_cq *cq)
+ __releases(&buddy_cq->hwq.lock)
+{
+ struct bnxt_qplib_cq *buddy_cq = NULL;
+
+ buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq);
+ if (!buddy_cq)
+ __release(&cq->hwq.lock);
+ else
+ spin_unlock(&buddy_cq->hwq.lock);
+}
+
+void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
+{
+ unsigned long flags;
+
+ bnxt_qplib_acquire_cq_locks(qp, &flags);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_release_cq_locks(qp, &flags);
+}
+
+static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_cq *scq, *rcq;
+
+ scq = qp->scq;
+ rcq = qp->rcq;
+
+ if (qp->sq.flushed) {
+ qp->sq.flushed = false;
+ list_del(&qp->sq_flush);
+ }
+ if (!qp->srq) {
+ if (qp->rq.flushed) {
+ qp->rq.flushed = false;
+ list_del(&qp->rq_flush);
+ }
+ }
+}
+
+void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
+{
+ unsigned long flags;
+
+ bnxt_qplib_acquire_cq_locks(qp, &flags);
+ __clean_cq(qp->scq, (u64)(unsigned long)qp);
+ qp->sq.hwq.prod = 0;
+ qp->sq.hwq.cons = 0;
+ __clean_cq(qp->rcq, (u64)(unsigned long)qp);
+ qp->rq.hwq.prod = 0;
+ qp->rq.hwq.cons = 0;
+
+ __bnxt_qplib_del_flush_qp(qp);
+ bnxt_qplib_release_cq_locks(qp, &flags);
+}
+
+static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
+{
+ struct bnxt_qplib_nq_work *nq_work =
+ container_of(work, struct bnxt_qplib_nq_work, work);
+
+ struct bnxt_qplib_cq *cq = nq_work->cq;
+ struct bnxt_qplib_nq *nq = nq_work->nq;
+
+ if (cq && nq) {
+ spin_lock_bh(&cq->compl_lock);
+ if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
+ dev_dbg(&nq->pdev->dev,
+ "%s:Trigger cq = %p event nq = %p\n",
+ __func__, cq, nq);
+ nq->cqn_handler(nq, cq);
+ }
+ spin_unlock_bh(&cq->compl_lock);
+ }
+ kfree(nq_work);
+}
static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp)
@@ -119,6 +281,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
struct bnxt_qplib_hwq *hwq = &nq->hwq;
struct nq_base *nqe, **nq_ptr;
+ struct bnxt_qplib_cq *cq;
int num_cqne_processed = 0;
u32 sw_cons, raw_cons;
u16 type;
@@ -143,15 +306,17 @@ static void bnxt_qplib_service_nq(unsigned long data)
q_handle = le32_to_cpu(nqcne->cq_handle_low);
q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
<< 32;
- bnxt_qplib_arm_cq_enable((struct bnxt_qplib_cq *)
- ((unsigned long)q_handle));
- if (!nq->cqn_handler(nq, (struct bnxt_qplib_cq *)
- ((unsigned long)q_handle)))
+ cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
+ bnxt_qplib_arm_cq_enable(cq);
+ spin_lock_bh(&cq->compl_lock);
+ atomic_set(&cq->arm_state, 0);
+ if (!nq->cqn_handler(nq, (cq)))
num_cqne_processed++;
else
dev_warn(&nq->pdev->dev,
"QPLIB: cqn - type 0x%x not handled",
type);
+ spin_unlock_bh(&cq->compl_lock);
break;
}
case NQ_BASE_TYPE_DBQ_EVENT:
@@ -190,12 +355,17 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
{
+ if (nq->cqn_wq) {
+ destroy_workqueue(nq->cqn_wq);
+ nq->cqn_wq = NULL;
+ }
/* Make sure the HW is stopped! */
synchronize_irq(nq->vector);
tasklet_disable(&nq->worker);
tasklet_kill(&nq->worker);
if (nq->requested) {
+ irq_set_affinity_hint(nq->vector, NULL);
free_irq(nq->vector, nq);
nq->requested = false;
}
@@ -209,14 +379,14 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
}
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
- int msix_vector, int bar_reg_offset,
+ int nq_idx, int msix_vector, int bar_reg_offset,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *),
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
void *, u8 event))
{
resource_size_t nq_base;
- int rc;
+ int rc = -1;
nq->pdev = pdev;
nq->vector = msix_vector;
@@ -227,14 +397,31 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
+ /* Have a task to schedule CQ notifiers in post send case */
+ nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
+ if (!nq->cqn_wq)
+ goto fail;
+
nq->requested = false;
- rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, "bnxt_qplib_nq", nq);
+ memset(nq->name, 0, 32);
+ sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
+ rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
if (rc) {
dev_err(&nq->pdev->dev,
"Failed to request IRQ for NQ: %#x", rc);
bnxt_qplib_disable_nq(nq);
goto fail;
}
+
+ cpumask_clear(&nq->mask);
+ cpumask_set_cpu(nq_idx, &nq->mask);
+ rc = irq_set_affinity_hint(nq->vector, &nq->mask);
+ if (rc) {
+ dev_warn(&nq->pdev->dev,
+ "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
+ nq->vector, nq_idx);
+ }
+
nq->requested = true;
nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
nq->bar_reg_off = bar_reg_offset;
@@ -258,8 +445,10 @@ fail:
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
{
- if (nq->hwq.max_elements)
+ if (nq->hwq.max_elements) {
bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
+ nq->hwq.max_elements = 0;
+ }
}
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
@@ -401,8 +590,8 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->id = le32_to_cpu(resp.xid);
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
- sq->flush_in_progress = false;
- rq->flush_in_progress = false;
+ rcfw->qp_tbl[qp->id].qp_id = qp->id;
+ rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
return 0;
@@ -615,8 +804,10 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->id = le32_to_cpu(resp.xid);
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
- sq->flush_in_progress = false;
- rq->flush_in_progress = false;
+ INIT_LIST_HEAD(&qp->sq_flush);
+ INIT_LIST_HEAD(&qp->rq_flush);
+ rcfw->qp_tbl[qp->id].qp_id = qp->id;
+ rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
return 0;
@@ -963,13 +1154,19 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
u16 cmd_flags = 0;
int rc;
+ rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
+ rcfw->qp_tbl[qp->id].qp_handle = NULL;
+
RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
req.qp_cid = cpu_to_le32(qp->id);
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
(void *)&resp, NULL, 0);
- if (rc)
+ if (rc) {
+ rcfw->qp_tbl[qp->id].qp_id = qp->id;
+ rcfw->qp_tbl[qp->id].qp_handle = qp;
return rc;
+ }
/* Must walk the associated CQs to nullified the QP ptr */
spin_lock_irqsave(&qp->scq->hwq.lock, flags);
@@ -1074,14 +1271,21 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_swq *swq;
struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
struct sq_sge *hw_sge;
+ struct bnxt_qplib_nq_work *nq_work = NULL;
+ bool sch_handler = false;
u32 sw_prod;
u8 wqe_size16;
int i, rc = 0, data_len = 0, pkt_num = 0;
__le32 temp32;
if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
- rc = -EINVAL;
- goto done;
+ if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ sch_handler = true;
+ dev_dbg(&sq->hwq.pdev->dev,
+ "%s Error QP. Scheduling for poll_cq\n",
+ __func__);
+ goto queue_err;
+ }
}
if (bnxt_qplib_queue_full(sq)) {
@@ -1128,6 +1332,11 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
}
/* Each SGE entry = 1 WQE size16 */
wqe_size16 = wqe->num_sge;
+ /* HW requires wqe size has room for atleast one SGE even if
+ * none was supplied by ULP
+ */
+ if (!wqe->num_sge)
+ wqe_size16++;
}
/* Specifics */
@@ -1296,12 +1505,35 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
SQ_PSN_SEARCH_NEXT_PSN_MASK));
}
-
+queue_err:
+ if (sch_handler) {
+ /* Store the ULP info in the software structures */
+ sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
+ swq = &sq->swq[sw_prod];
+ swq->wr_id = wqe->wr_id;
+ swq->type = wqe->type;
+ swq->flags = wqe->flags;
+ if (qp->sig_type)
+ swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
+ swq->start_psn = sq->psn & BTH_PSN_MASK;
+ }
sq->hwq.prod++;
-
qp->wqe_cnt++;
done:
+ if (sch_handler) {
+ nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
+ if (nq_work) {
+ nq_work->cq = qp->scq;
+ nq_work->nq = qp->scq->nq;
+ INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
+ queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
+ } else {
+ dev_err(&sq->hwq.pdev->dev,
+ "QPLIB: FP: Failed to allocate SQ nq_work!");
+ rc = -ENOMEM;
+ }
+ }
return rc;
}
@@ -1329,15 +1561,17 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_q *rq = &qp->rq;
struct rq_wqe *rqe, **rqe_ptr;
struct sq_sge *hw_sge;
+ struct bnxt_qplib_nq_work *nq_work = NULL;
+ bool sch_handler = false;
u32 sw_prod;
int i, rc = 0;
if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
- dev_err(&rq->hwq.pdev->dev,
- "QPLIB: FP: QP (0x%x) is in the 0x%x state",
- qp->id, qp->state);
- rc = -EINVAL;
- goto done;
+ sch_handler = true;
+ dev_dbg(&rq->hwq.pdev->dev,
+ "%s Error QP. Scheduling for poll_cq\n",
+ __func__);
+ goto queue_err;
}
if (bnxt_qplib_queue_full(rq)) {
dev_err(&rq->hwq.pdev->dev,
@@ -1364,11 +1598,36 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
rqe->flags = wqe->flags;
rqe->wqe_size = wqe->num_sge +
((offsetof(typeof(*rqe), data) + 15) >> 4);
+ /* HW requires wqe size has room for atleast one SGE even if none
+ * was supplied by ULP
+ */
+ if (!wqe->num_sge)
+ rqe->wqe_size++;
/* Supply the rqe->wr_id index to the wr_id_tbl for now */
rqe->wr_id[0] = cpu_to_le32(sw_prod);
+queue_err:
+ if (sch_handler) {
+ /* Store the ULP info in the software structures */
+ sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
+ rq->swq[sw_prod].wr_id = wqe->wr_id;
+ }
+
rq->hwq.prod++;
+ if (sch_handler) {
+ nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
+ if (nq_work) {
+ nq_work->cq = qp->rcq;
+ nq_work->nq = qp->rcq->nq;
+ INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
+ queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
+ } else {
+ dev_err(&rq->hwq.pdev->dev,
+ "QPLIB: FP: Failed to allocate RQ nq_work!");
+ rc = -ENOMEM;
+ }
+ }
done:
return rc;
}
@@ -1461,6 +1720,9 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
init_waitqueue_head(&cq->waitq);
+ INIT_LIST_HEAD(&cq->sqf_head);
+ INIT_LIST_HEAD(&cq->rqf_head);
+ spin_lock_init(&cq->compl_lock);
bnxt_qplib_arm_cq_enable(cq);
return 0;
@@ -1503,9 +1765,13 @@ static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
while (*budget) {
sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
if (sw_cons == sw_prod) {
- sq->flush_in_progress = false;
break;
}
+ /* Skip the FENCE WQE completions */
+ if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
+ bnxt_qplib_cancel_phantom_processing(qp);
+ goto skip_compl;
+ }
memset(cqe, 0, sizeof(*cqe));
cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
@@ -1515,6 +1781,7 @@ static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
cqe->type = sq->swq[sw_cons].type;
cqe++;
(*budget)--;
+skip_compl:
sq->hwq.cons++;
}
*pcqe = cqe;
@@ -1526,11 +1793,24 @@ static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
}
static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
- int opcode, struct bnxt_qplib_cqe **pcqe, int *budget)
+ struct bnxt_qplib_cqe **pcqe, int *budget)
{
struct bnxt_qplib_cqe *cqe;
u32 sw_prod, sw_cons;
int rc = 0;
+ int opcode = 0;
+
+ switch (qp->type) {
+ case CMDQ_CREATE_QP1_TYPE_GSI:
+ opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
+ break;
+ case CMDQ_CREATE_QP_TYPE_RC:
+ opcode = CQ_BASE_CQE_TYPE_RES_RC;
+ break;
+ case CMDQ_CREATE_QP_TYPE_UD:
+ opcode = CQ_BASE_CQE_TYPE_RES_UD;
+ break;
+ }
/* Flush the rest of the RQ */
sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
@@ -1557,6 +1837,21 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
return rc;
}
+void bnxt_qplib_mark_qp_error(void *qp_handle)
+{
+ struct bnxt_qplib_qp *qp = qp_handle;
+
+ if (!qp)
+ return;
+
+ /* Must block new posting of SQ and RQ */
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ bnxt_qplib_cancel_phantom_processing(qp);
+
+ /* Add qp to flush list of the CQ */
+ __bnxt_qplib_add_flush_qp(qp);
+}
+
/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
* CQE is track from sw_cq_cons to max_element but valid only if VALID=1
*/
@@ -1684,10 +1979,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
cqe_sq_cons, sq->hwq.max_elements);
return -EINVAL;
}
- /* If we were in the middle of flushing the SQ, continue */
- if (sq->flush_in_progress)
- goto flush;
+ if (qp->sq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ goto done;
+ }
/* Require to walk the sq's swq to fabricate CQEs for all previously
* signaled SWQEs due to CQE aggregation from the current sq cons
* to the cqe_sq_cons
@@ -1723,11 +2020,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
sw_sq_cons, cqe->wr_id, cqe->status);
cqe++;
(*budget)--;
- sq->flush_in_progress = true;
- /* Must block new posting of SQ and RQ */
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- sq->condition = false;
- sq->single = false;
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ bnxt_qplib_mark_qp_error(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
} else {
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
/* Before we complete, do WA 9060 */
@@ -1758,15 +2053,6 @@ out:
* the WC for this CQE
*/
sq->single = false;
- if (!sq->flush_in_progress)
- goto done;
-flush:
- /* Require to walk the sq's swq to fabricate CQEs for all
- * previously posted SWQEs due to the error CQE received
- */
- rc = __flush_sq(sq, qp, pcqe, budget);
- if (!rc)
- sq->flush_in_progress = false;
done:
return rc;
}
@@ -1788,6 +2074,12 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
return -EINVAL;
}
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ goto done;
+ }
+
cqe = *pcqe;
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
cqe->length = le32_to_cpu(hwcqe->length);
@@ -1807,8 +2099,6 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
wr_id_idx, rq->hwq.max_elements);
return -EINVAL;
}
- if (rq->flush_in_progress)
- goto flush_rq;
cqe->wr_id = rq->swq[wr_id_idx].wr_id;
cqe++;
@@ -1817,12 +2107,13 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- rq->flush_in_progress = true;
-flush_rq:
- rc = __flush_rq(rq, qp, CQ_BASE_CQE_TYPE_RES_RC, pcqe, budget);
- if (!rc)
- rq->flush_in_progress = false;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
}
+
+done:
return rc;
}
@@ -1843,6 +2134,11 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
return -EINVAL;
}
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ goto done;
+ }
cqe = *pcqe;
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
cqe->length = le32_to_cpu(hwcqe->length);
@@ -1866,8 +2162,6 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
wr_id_idx, rq->hwq.max_elements);
return -EINVAL;
}
- if (rq->flush_in_progress)
- goto flush_rq;
cqe->wr_id = rq->swq[wr_id_idx].wr_id;
cqe++;
@@ -1876,12 +2170,31 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- rq->flush_in_progress = true;
-flush_rq:
- rc = __flush_rq(rq, qp, CQ_BASE_CQE_TYPE_RES_UD, pcqe, budget);
- if (!rc)
- rq->flush_in_progress = false;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
}
+done:
+ return rc;
+}
+
+bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
+{
+ struct cq_base *hw_cqe, **hw_cqe_ptr;
+ unsigned long flags;
+ u32 sw_cons, raw_cons;
+ bool rc = true;
+
+ spin_lock_irqsave(&cq->hwq.lock, flags);
+ raw_cons = cq->hwq.cons;
+ sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
+ hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
+ hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
+
+ /* Check for Valid bit. If the CQE is valid, return false */
+ rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
+ spin_unlock_irqrestore(&cq->hwq.lock, flags);
return rc;
}
@@ -1903,6 +2216,11 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
"QPLIB: process_cq Raw/QP1 qp is NULL");
return -EINVAL;
}
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ goto done;
+ }
cqe = *pcqe;
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
cqe->flags = le16_to_cpu(hwcqe->flags);
@@ -1931,8 +2249,6 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
wr_id_idx, rq->hwq.max_elements);
return -EINVAL;
}
- if (rq->flush_in_progress)
- goto flush_rq;
cqe->wr_id = rq->swq[wr_id_idx].wr_id;
cqe++;
@@ -1941,13 +2257,13 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
*pcqe = cqe;
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- rq->flush_in_progress = true;
-flush_rq:
- rc = __flush_rq(rq, qp, CQ_BASE_CQE_TYPE_RES_RAWETH_QP1, pcqe,
- budget);
- if (!rc)
- rq->flush_in_progress = false;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
}
+
+done:
return rc;
}
@@ -1961,7 +2277,6 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
struct bnxt_qplib_cqe *cqe;
u32 sw_cons = 0, cqe_cons;
int rc = 0;
- u8 opcode = 0;
/* Check the Status */
if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
@@ -1976,6 +2291,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
"QPLIB: FP: CQ Process terminal qp is NULL");
return -EINVAL;
}
+
/* Must block new posting of SQ and RQ */
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
@@ -1994,9 +2310,12 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
cqe_cons, sq->hwq.max_elements);
goto do_rq;
}
- /* If we were in the middle of flushing, continue */
- if (sq->flush_in_progress)
- goto flush_sq;
+
+ if (qp->sq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ goto sq_done;
+ }
/* Terminal CQE can also include aggregated successful CQEs prior.
* So we must complete all CQEs from the current sq's cons to the
@@ -2026,11 +2345,6 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
rc = -EAGAIN;
goto sq_done;
}
- sq->flush_in_progress = true;
-flush_sq:
- rc = __flush_sq(sq, qp, pcqe, budget);
- if (!rc)
- sq->flush_in_progress = false;
sq_done:
if (rc)
return rc;
@@ -2046,26 +2360,23 @@ do_rq:
cqe_cons, rq->hwq.max_elements);
goto done;
}
+
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ rc = 0;
+ goto done;
+ }
+
/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
* from the current rq->cons to the rq->prod regardless what the
* rq->cons the terminal CQE indicates
*/
- rq->flush_in_progress = true;
- switch (qp->type) {
- case CMDQ_CREATE_QP1_TYPE_GSI:
- opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
- break;
- case CMDQ_CREATE_QP_TYPE_RC:
- opcode = CQ_BASE_CQE_TYPE_RES_RC;
- break;
- case CMDQ_CREATE_QP_TYPE_UD:
- opcode = CQ_BASE_CQE_TYPE_RES_UD;
- break;
- }
- rc = __flush_rq(rq, qp, opcode, pcqe, budget);
- if (!rc)
- rq->flush_in_progress = false;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
done:
return rc;
}
@@ -2086,6 +2397,33 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
return 0;
}
+int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
+ struct bnxt_qplib_cqe *cqe,
+ int num_cqes)
+{
+ struct bnxt_qplib_qp *qp = NULL;
+ u32 budget = num_cqes;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->hwq.lock, flags);
+ list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: Flushing SQ QP= %p",
+ qp);
+ __flush_sq(&qp->sq, qp, &cqe, &budget);
+ }
+
+ list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: Flushing RQ QP= %p",
+ qp);
+ __flush_rq(&qp->rq, qp, &cqe, &budget);
+ }
+ spin_unlock_irqrestore(&cq->hwq.lock, flags);
+
+ return num_cqes - budget;
+}
+
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num_cqes, struct bnxt_qplib_qp **lib_qp)
{
@@ -2176,6 +2514,7 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
spin_lock_irqsave(&cq->hwq.lock, flags);
if (arm_type)
bnxt_qplib_arm_cq(cq, arm_type);
-
+ /* Using cq->arm_state variable to track whether to issue cq handler */
+ atomic_set(&cq->arm_state, 1);
spin_unlock_irqrestore(&cq->hwq.lock, flags);
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 36b7b7db0e3f..8ead70ca1c1d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -220,19 +220,20 @@ struct bnxt_qplib_q {
u16 q_full_delta;
u16 max_sge;
u32 psn;
- bool flush_in_progress;
bool condition;
bool single;
bool send_phantom;
u32 phantom_wqe_cnt;
u32 phantom_cqe_cnt;
u32 next_cq_cons;
+ bool flushed;
};
struct bnxt_qplib_qp {
struct bnxt_qplib_pd *pd;
struct bnxt_qplib_dpi *dpi;
u64 qp_handle;
+#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
u32 id;
u8 type;
u8 sig_type;
@@ -296,6 +297,8 @@ struct bnxt_qplib_qp {
dma_addr_t sq_hdr_buf_map;
void *rq_hdr_buf;
dma_addr_t rq_hdr_buf_map;
+ struct list_head sq_flush;
+ struct list_head rq_flush;
};
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
@@ -351,6 +354,7 @@ struct bnxt_qplib_cq {
u16 period;
struct bnxt_qplib_hwq hwq;
u32 cnq_hw_ring_id;
+ struct bnxt_qplib_nq *nq;
bool resize_in_progress;
struct scatterlist *sghead;
u32 nmap;
@@ -360,6 +364,9 @@ struct bnxt_qplib_cq {
unsigned long flags;
#define CQ_FLAGS_RESIZE_IN_PROG 1
wait_queue_head_t waitq;
+ struct list_head sqf_head, rqf_head;
+ atomic_t arm_state;
+ spinlock_t compl_lock; /* synch CQ handlers */
};
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
@@ -400,6 +407,7 @@ struct bnxt_qplib_nq {
struct pci_dev *pdev;
int vector;
+ cpumask_t mask;
int budget;
bool requested;
struct tasklet_struct worker;
@@ -417,11 +425,19 @@ struct bnxt_qplib_nq {
(struct bnxt_qplib_nq *nq,
void *srq,
u8 event);
+ struct workqueue_struct *cqn_wq;
+ char name[32];
+};
+
+struct bnxt_qplib_nq_work {
+ struct work_struct work;
+ struct bnxt_qplib_nq *nq;
+ struct bnxt_qplib_cq *cq;
};
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
- int msix_vector, int bar_reg_offset,
+ int nq_idx, int msix_vector, int bar_reg_offset,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *cq),
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
@@ -449,7 +465,17 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num, struct bnxt_qplib_qp **qp);
+bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
+void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
+void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp);
+void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags);
+void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
+ unsigned long *flags);
+int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
+ struct bnxt_qplib_cqe *cqe,
+ int num_cqes);
#endif /* __BNXT_QPLIB_FP_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 16e42754dbec..391bb7006e8f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -44,6 +44,9 @@
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_rcfw.h"
+#include "qplib_sp.h"
+#include "qplib_fp.h"
+
static void bnxt_qplib_service_creq(unsigned long data);
/* Hardware communication channel */
@@ -279,16 +282,29 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_qp_event *qp_event)
{
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
+ struct creq_qp_error_notification *err_event;
struct bnxt_qplib_crsq *crsqe;
unsigned long flags;
+ struct bnxt_qplib_qp *qp;
u16 cbit, blocked = 0;
u16 cookie;
__le16 mcookie;
+ u32 qp_id;
switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
+ err_event = (struct creq_qp_error_notification *)qp_event;
+ qp_id = le32_to_cpu(err_event->xid);
+ qp = rcfw->qp_tbl[qp_id].qp_handle;
dev_dbg(&rcfw->pdev->dev,
"QPLIB: Received QP error notification");
+ dev_dbg(&rcfw->pdev->dev,
+ "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
+ qp_id, err_event->req_err_state_reason,
+ err_event->res_err_state_reason);
+ bnxt_qplib_acquire_cq_locks(qp, &flags);
+ bnxt_qplib_mark_qp_error(qp);
+ bnxt_qplib_release_cq_locks(qp, &flags);
break;
default:
/* Command Response */
@@ -507,6 +523,7 @@ skip_ctx_setup:
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
+ kfree(rcfw->qp_tbl);
kfree(rcfw->crsqe_tbl);
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
@@ -514,7 +531,8 @@ void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
}
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
- struct bnxt_qplib_rcfw *rcfw)
+ struct bnxt_qplib_rcfw *rcfw,
+ int qp_tbl_sz)
{
rcfw->pdev = pdev;
rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
@@ -541,6 +559,12 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
if (!rcfw->crsqe_tbl)
goto fail;
+ rcfw->qp_tbl_size = qp_tbl_sz;
+ rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node),
+ GFP_KERNEL);
+ if (!rcfw->qp_tbl)
+ goto fail;
+
return 0;
fail:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 09ce121770cd..0ed312f17c8d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -148,6 +148,11 @@ struct bnxt_qplib_rcfw_sbuf {
u32 size;
};
+struct bnxt_qplib_qp_node {
+ u32 qp_id; /* QP id */
+ void *qp_handle; /* ptr to qplib_qp */
+};
+
/* RCFW Communication Channels */
struct bnxt_qplib_rcfw {
struct pci_dev *pdev;
@@ -181,11 +186,13 @@ struct bnxt_qplib_rcfw {
/* Actual Cmd and Resp Queues */
struct bnxt_qplib_hwq cmdq;
struct bnxt_qplib_crsq *crsqe_tbl;
+ int qp_tbl_size;
+ struct bnxt_qplib_qp_node *qp_tbl;
};
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
- struct bnxt_qplib_rcfw *rcfw);
+ struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw,
@@ -207,4 +214,5 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn);
+void bnxt_qplib_mark_qp_error(void *qp_handle);
#endif /* __BNXT_QPLIB_RCFW_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 62447b3badec..4e101704e801 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -468,9 +468,11 @@ static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
kfree(sgid_tbl->tbl);
kfree(sgid_tbl->hw_id);
kfree(sgid_tbl->ctx);
+ kfree(sgid_tbl->vlan);
sgid_tbl->tbl = NULL;
sgid_tbl->hw_id = NULL;
sgid_tbl->ctx = NULL;
+ sgid_tbl->vlan = NULL;
sgid_tbl->max = 0;
sgid_tbl->active = 0;
}
@@ -491,8 +493,15 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
if (!sgid_tbl->ctx)
goto out_free2;
+ sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
+ if (!sgid_tbl->vlan)
+ goto out_free3;
+
sgid_tbl->max = max;
return 0;
+out_free3:
+ kfree(sgid_tbl->ctx);
+ sgid_tbl->ctx = NULL;
out_free2:
kfree(sgid_tbl->hw_id);
sgid_tbl->hw_id = NULL;
@@ -514,6 +523,7 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
}
memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
+ memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
sgid_tbl->active = 0;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 2e4855509719..e87207526d2c 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -116,6 +116,7 @@ struct bnxt_qplib_sgid_tbl {
u16 max;
u16 active;
void *ctx;
+ u8 *vlan;
};
struct bnxt_qplib_pkey_tbl {
@@ -188,6 +189,7 @@ struct bnxt_qplib_res {
struct bnxt_qplib_sgid_tbl sgid_tbl;
struct bnxt_qplib_pkey_tbl pkey_tbl;
struct bnxt_qplib_dpi_tbl dpi_tbl;
+ bool prio;
};
#define to_bnxt_qplib(ptr, type, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index fde18cf0e406..e277e54a05eb 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -51,6 +51,19 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0 } };
/* Device */
+
+static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
+{
+ int rc;
+ u16 pcie_ctl2;
+
+ rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2,
+ &pcie_ctl2);
+ if (rc)
+ return false;
+ return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
+}
+
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_dev_attr *attr)
{
@@ -81,6 +94,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
/* Extract the context from the side buffer */
attr->max_qp = le32_to_cpu(sb->max_qp);
+ /* max_qp value reported by FW for PF doesn't include the QP1 for PF */
+ attr->max_qp += 1;
attr->max_qp_rd_atom =
sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
@@ -129,6 +144,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
}
+ attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc;
@@ -197,6 +213,7 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero));
+ sgid_tbl->vlan[index] = 0;
sgid_tbl->active--;
dev_dbg(&res->pdev->dev,
"QPLIB: SGID deleted hw_id[0x%x] = 0x%x active = 0x%x",
@@ -249,28 +266,32 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct cmdq_add_gid req;
struct creq_add_gid_resp resp;
u16 cmd_flags = 0;
- u32 temp32[4];
- u16 temp16[3];
int rc;
RCFW_CMD_PREP(req, ADD_GID, cmd_flags);
- memcpy(temp32, gid->data, sizeof(struct bnxt_qplib_gid));
- req.gid[0] = cpu_to_be32(temp32[3]);
- req.gid[1] = cpu_to_be32(temp32[2]);
- req.gid[2] = cpu_to_be32(temp32[1]);
- req.gid[3] = cpu_to_be32(temp32[0]);
- if (vlan_id != 0xFFFF)
- req.vlan = cpu_to_le16((vlan_id &
- CMDQ_ADD_GID_VLAN_VLAN_ID_MASK) |
- CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
- CMDQ_ADD_GID_VLAN_VLAN_EN);
+ req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
+ req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
+ req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
+ req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
+ /*
+ * driver should ensure that all RoCE traffic is always VLAN
+ * tagged if RoCE traffic is running on non-zero VLAN ID or
+ * RoCE traffic is running on non-zero Priority.
+ */
+ if ((vlan_id != 0xFFFF) || res->prio) {
+ if (vlan_id != 0xFFFF)
+ req.vlan = cpu_to_le16
+ (vlan_id & CMDQ_ADD_GID_VLAN_VLAN_ID_MASK);
+ req.vlan |= cpu_to_le16
+ (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
+ CMDQ_ADD_GID_VLAN_VLAN_EN);
+ }
/* MAC in network format */
- memcpy(temp16, smac, 6);
- req.src_mac[0] = cpu_to_be16(temp16[0]);
- req.src_mac[1] = cpu_to_be16(temp16[1]);
- req.src_mac[2] = cpu_to_be16(temp16[2]);
+ req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
+ req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
+ req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
(void *)&resp, NULL, 0);
@@ -281,6 +302,9 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
/* Add GID to the sgid_tbl */
memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
sgid_tbl->active++;
+ if (vlan_id != 0xFFFF)
+ sgid_tbl->vlan[free_idx] = 1;
+
dev_dbg(&res->pdev->dev,
"QPLIB: SGID added hw_id[0x%x] = 0x%x active = 0x%x",
free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active);
@@ -290,6 +314,43 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
return 0;
}
+int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct bnxt_qplib_gid *gid, u16 gid_idx,
+ u8 *smac)
+{
+ struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
+ struct bnxt_qplib_res,
+ sgid_tbl);
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_modify_gid_resp resp;
+ struct cmdq_modify_gid req;
+ int rc;
+ u16 cmd_flags = 0;
+
+ RCFW_CMD_PREP(req, MODIFY_GID, cmd_flags);
+
+ req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
+ req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
+ req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
+ req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
+ if (res->prio) {
+ req.vlan |= cpu_to_le16
+ (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
+ CMDQ_ADD_GID_VLAN_VLAN_EN);
+ }
+
+ /* MAC in network format */
+ req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
+ req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
+ req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
+
+ req.gid_index = cpu_to_le16(gid_idx);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ return rc;
+}
+
/* pkeys */
int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index a543f959098b..11322582f5e4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -42,6 +42,8 @@
#define BNXT_QPLIB_RESERVED_QP_WRS 128
+#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040
+
struct bnxt_qplib_dev_attr {
char fw_ver[32];
u16 max_sgid;
@@ -70,6 +72,7 @@ struct bnxt_qplib_dev_attr {
u32 max_inline_data;
u32 l2_db_size;
u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
+ bool is_atomic;
};
struct bnxt_qplib_pd {
@@ -132,6 +135,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
bool update, u32 *index);
+int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct bnxt_qplib_gid *gid, u16 gid_idx, u8 *smac);
int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
u16 *pkey);
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index fc23477ac52f..eeb55b2db57e 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -1473,8 +1473,8 @@ struct cmdq_modify_gid {
u8 resp_size;
u8 reserved8;
__le64 resp_addr;
- __le32 gid[4];
- __le16 src_mac[3];
+ __be32 gid[4];
+ __be16 src_mac[3];
__le16 vlan;
#define CMDQ_MODIFY_GID_VLAN_VLAN_ID_MASK 0xfffUL
#define CMDQ_MODIFY_GID_VLAN_VLAN_ID_SFT 0
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 47b2ce2ef203..591de319c178 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -45,7 +45,6 @@
MODULE_AUTHOR("Boyd Faulkner, Steve Wise");
MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
static void open_rnic_dev(struct t3cdev *);
static void close_rnic_dev(struct t3cdev *);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 29d30744d6c9..099e76f3758a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -718,7 +718,7 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
struct iwch_mr *mhp;
u32 mmid;
u32 stag = 0;
- int ret = 0;
+ int ret = -ENOMEM;
if (mr_type != IB_MR_TYPE_MEM_REG ||
max_num_sg > T3_MAX_FASTREG_DEPTH)
@@ -731,10 +731,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
goto err;
mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
- if (!mhp->pages) {
- ret = -ENOMEM;
+ if (!mhp->pages)
goto pl_err;
- }
mhp->rhp = rhp;
ret = iwch_alloc_pbl(mhp, max_num_sg);
@@ -751,7 +749,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
mhp->attr.state = 1;
mmid = (stag) >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
- if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
+ ret = insert_handle(rhp, &rhp->mmidr, mhp, mmid);
+ if (ret)
goto err3;
pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
@@ -1337,8 +1336,7 @@ static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str,
- size_t str_len)
+static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str)
{
struct iwch_dev *iwch_dev = to_iwch_dev(ibdev);
struct ethtool_drvinfo info;
@@ -1346,7 +1344,7 @@ static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str,
pr_debug("%s dev 0x%p\n", __func__, iwch_dev);
lldev->ethtool_ops->get_drvinfo(lldev, &info);
- snprintf(str, str_len, "%s", info.fw_version);
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
}
int iwch_register_device(struct iwch_dev *dev)
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index e49b34c3b136..ceaa2fa54d32 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2871,7 +2871,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- BUG_ON(!ep);
/* The cm_id may be null if we failed to connect */
mutex_lock(&ep->com.mutex);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index e16fcaf6b5a3..be07da1997e6 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -963,6 +963,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
goto err3;
if (ucontext) {
+ ret = -ENOMEM;
mm = kmalloc(sizeof *mm, GFP_KERNEL);
if (!mm)
goto err4;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index ae0b79aeea2e..fc886f81b885 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -44,7 +44,6 @@
MODULE_AUTHOR("Steve Wise");
MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
static int allow_db_fc_on_t5;
module_param(allow_db_fc_on_t5, int, 0644);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 5332f06b99ba..c2fba76becd4 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -661,7 +661,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
rhp = php->rhp;
if (mr_type != IB_MR_TYPE_MEM_REG ||
- max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl &&
+ max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl &&
use_dsgl))
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 0771e9a4d061..346e8334279a 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -517,14 +517,13 @@ static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static void get_dev_fw_str(struct ib_device *dev, char *str,
- size_t str_len)
+static void get_dev_fw_str(struct ib_device *dev, char *str)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev);
pr_debug("%s dev 0x%p\n", __func__, dev);
- snprintf(str, str_len, "%u.%u.%u.%u",
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index bfc77596acbe..cb7fc0d35d1d 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -569,7 +569,7 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
{
if (wr->num_sge > 1)
return -EINVAL;
- if (wr->num_sge) {
+ if (wr->num_sge && wr->sg_list[0].length) {
wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
>> 32));
diff --git a/drivers/infiniband/hw/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig
index f6ea0881765a..7b146b67a80f 100644
--- a/drivers/infiniband/hw/hfi1/Kconfig
+++ b/drivers/infiniband/hw/hfi1/Kconfig
@@ -13,13 +13,6 @@ config HFI1_DEBUG_SDMA_ORDER
---help---
This is a debug flag to test for out of order
sdma completions for unit testing
-config HFI1_VERBS_31BIT_PSN
- bool "HFI1 enable 31 bit PSN"
- depends on INFINIBAND_HFI1
- default y
- ---help---
- Setting this enables 31 BIT PSN
- For verbs RC/UC
config SDMA_VERBOSITY
bool "Config SDMA Verbosity"
depends on INFINIBAND_HFI1
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index 88085f65432e..66d538c033b0 100644
--- a/drivers/infiniband/hw/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -8,7 +8,7 @@
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
hfi1-y := affinity.o chip.o device.o driver.o efivar.o \
- eprom.o file_ops.o firmware.o \
+ eprom.o exp_rcv.o file_ops.o firmware.o \
init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \
qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o \
uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o \
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index e2cd2cd3b28a..a97055dd4fbd 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -335,10 +335,10 @@ static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
sde->cpu = cpu;
cpumask_clear(&msix->mask);
cpumask_set_cpu(cpu, &msix->mask);
- dd_dev_dbg(dd, "IRQ vector: %u, type %s engine %u -> cpu: %d\n",
- msix->msix.vector, irq_type_names[msix->type],
+ dd_dev_dbg(dd, "IRQ: %u, type %s engine %u -> cpu: %d\n",
+ msix->irq, irq_type_names[msix->type],
sde->this_idx, cpu);
- irq_set_affinity_hint(msix->msix.vector, &msix->mask);
+ irq_set_affinity_hint(msix->irq, &msix->mask);
/*
* Set the new cpu in the hfi1_affinity_node and clean
@@ -387,7 +387,7 @@ static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix)
{
struct irq_affinity_notify *notify = &msix->notify;
- notify->irq = msix->msix.vector;
+ notify->irq = msix->irq;
notify->notify = hfi1_irq_notifier_notify;
notify->release = hfi1_irq_notifier_release;
@@ -472,10 +472,10 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
}
cpumask_set_cpu(cpu, &msix->mask);
- dd_dev_info(dd, "IRQ vector: %u, type %s %s -> cpu: %d\n",
- msix->msix.vector, irq_type_names[msix->type],
+ dd_dev_info(dd, "IRQ: %u, type %s %s -> cpu: %d\n",
+ msix->irq, irq_type_names[msix->type],
extra, cpu);
- irq_set_affinity_hint(msix->msix.vector, &msix->mask);
+ irq_set_affinity_hint(msix->irq, &msix->mask);
if (msix->type == IRQ_SDMA) {
sde->cpu = cpu;
@@ -533,7 +533,7 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
}
}
- irq_set_affinity_hint(msix->msix.vector, NULL);
+ irq_set_affinity_hint(msix->irq, NULL);
cpumask_clear(&msix->mask);
mutex_unlock(&node_affinity.lock);
}
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index e78c7aa094e0..2a1e374169c0 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -75,24 +75,26 @@ struct hfi1_msix_entry;
/* Initialize non-HT cpu cores mask */
void init_real_cpu_mask(void);
/* Initialize driver affinity data */
-int hfi1_dev_affinity_init(struct hfi1_devdata *);
+int hfi1_dev_affinity_init(struct hfi1_devdata *dd);
/*
* Set IRQ affinity to a CPU. The function will determine the
* CPU and set the affinity to it.
*/
-int hfi1_get_irq_affinity(struct hfi1_devdata *, struct hfi1_msix_entry *);
+int hfi1_get_irq_affinity(struct hfi1_devdata *dd,
+ struct hfi1_msix_entry *msix);
/*
* Remove the IRQ's CPU affinity. This function also updates
* any internal CPU tracking data
*/
-void hfi1_put_irq_affinity(struct hfi1_devdata *, struct hfi1_msix_entry *);
+void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
+ struct hfi1_msix_entry *msix);
/*
* Determine a CPU affinity for a user process, if the process does not
* have an affinity set yet.
*/
-int hfi1_get_proc_affinity(int);
+int hfi1_get_proc_affinity(int node);
/* Release a CPU used by a user process. */
-void hfi1_put_proc_affinity(int);
+void hfi1_put_proc_affinity(int cpu);
struct hfi1_affinity_node {
int node;
diff --git a/drivers/infiniband/hw/hfi1/aspm.h b/drivers/infiniband/hw/hfi1/aspm.h
index 794e6814a531..522b40ed9937 100644
--- a/drivers/infiniband/hw/hfi1/aspm.h
+++ b/drivers/infiniband/hw/hfi1/aspm.h
@@ -237,14 +237,17 @@ static inline void aspm_disable_all(struct hfi1_devdata *dd)
{
struct hfi1_ctxtdata *rcd;
unsigned long flags;
- unsigned i;
+ u16 i;
for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) {
- rcd = dd->rcd[i];
- del_timer_sync(&rcd->aspm_timer);
- spin_lock_irqsave(&rcd->aspm_lock, flags);
- rcd->aspm_intr_enable = false;
- spin_unlock_irqrestore(&rcd->aspm_lock, flags);
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (rcd) {
+ del_timer_sync(&rcd->aspm_timer);
+ spin_lock_irqsave(&rcd->aspm_lock, flags);
+ rcd->aspm_intr_enable = false;
+ spin_unlock_irqrestore(&rcd->aspm_lock, flags);
+ hfi1_rcd_put(rcd);
+ }
}
aspm_disable(dd);
@@ -256,7 +259,7 @@ static inline void aspm_enable_all(struct hfi1_devdata *dd)
{
struct hfi1_ctxtdata *rcd;
unsigned long flags;
- unsigned i;
+ u16 i;
aspm_enable(dd);
@@ -264,11 +267,14 @@ static inline void aspm_enable_all(struct hfi1_devdata *dd)
return;
for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) {
- rcd = dd->rcd[i];
- spin_lock_irqsave(&rcd->aspm_lock, flags);
- rcd->aspm_intr_enable = true;
- rcd->aspm_enabled = true;
- spin_unlock_irqrestore(&rcd->aspm_lock, flags);
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (rcd) {
+ spin_lock_irqsave(&rcd->aspm_lock, flags);
+ rcd->aspm_intr_enable = true;
+ rcd->aspm_enabled = true;
+ spin_unlock_irqrestore(&rcd->aspm_lock, flags);
+ hfi1_rcd_put(rcd);
+ }
}
}
@@ -284,13 +290,18 @@ static inline void aspm_ctx_init(struct hfi1_ctxtdata *rcd)
static inline void aspm_init(struct hfi1_devdata *dd)
{
- unsigned i;
+ struct hfi1_ctxtdata *rcd;
+ u16 i;
spin_lock_init(&dd->aspm_lock);
dd->aspm_supported = aspm_hw_l1_supported(dd);
- for (i = 0; i < dd->first_dyn_alloc_ctxt; i++)
- aspm_ctx_init(dd->rcd[i]);
+ for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (rcd)
+ aspm_ctx_init(rcd);
+ hfi1_rcd_put(rcd);
+ }
/* Start with ASPM disabled */
aspm_hw_set_l1_ent_latency(dd);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 2ba00b89df6a..b2ed4b9cda6e 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1012,14 +1012,15 @@ static struct flag_table dc8051_info_err_flags[] = {
*/
static struct flag_table dc8051_info_host_msg_flags[] = {
FLAG_ENTRY0("Host request done", 0x0001),
- FLAG_ENTRY0("BC SMA message", 0x0002),
- FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
+ FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
+ FLAG_ENTRY0("BC SMA message", 0x0004),
FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
FLAG_ENTRY0("External device config request", 0x0020),
FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
FLAG_ENTRY0("LinkUp achieved", 0x0080),
FLAG_ENTRY0("Link going down", 0x0100),
+ FLAG_ENTRY0("Link width downgraded", 0x0200),
};
static u32 encoded_size(u32 size);
@@ -1064,8 +1065,13 @@ static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
static int thermal_init(struct hfi1_devdata *dd);
+static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
int msecs);
+static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
+static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
+static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
+ int msecs);
static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
static void handle_temp_err(struct hfi1_devdata *dd);
@@ -1294,25 +1300,71 @@ CNTR_ELEM(#name, \
CNTR_SYNTH, \
access_ibp_##cntr)
+/**
+ * hfi_addr_from_offset - return addr for readq/writeq
+ * @dd - the dd device
+ * @offset - the offset of the CSR within bar0
+ *
+ * This routine selects the appropriate base address
+ * based on the indicated offset.
+ */
+static inline void __iomem *hfi1_addr_from_offset(
+ const struct hfi1_devdata *dd,
+ u32 offset)
+{
+ if (offset >= dd->base2_start)
+ return dd->kregbase2 + (offset - dd->base2_start);
+ return dd->kregbase1 + offset;
+}
+
+/**
+ * read_csr - read CSR at the indicated offset
+ * @dd - the dd device
+ * @offset - the offset of the CSR within bar0
+ *
+ * Return: the value read or all FF's if there
+ * is no mapping
+ */
u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
{
- if (dd->flags & HFI1_PRESENT) {
- return readq((void __iomem *)dd->kregbase + offset);
- }
+ if (dd->flags & HFI1_PRESENT)
+ return readq(hfi1_addr_from_offset(dd, offset));
return -1;
}
+/**
+ * write_csr - write CSR at the indicated offset
+ * @dd - the dd device
+ * @offset - the offset of the CSR within bar0
+ * @value - value to write
+ */
void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
{
- if (dd->flags & HFI1_PRESENT)
- writeq(value, (void __iomem *)dd->kregbase + offset);
+ if (dd->flags & HFI1_PRESENT) {
+ void __iomem *base = hfi1_addr_from_offset(dd, offset);
+
+ /* avoid write to RcvArray */
+ if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
+ return;
+ writeq(value, base);
+ }
}
+/**
+ * get_csr_addr - return te iomem address for offset
+ * @dd - the dd device
+ * @offset - the offset of the CSR within bar0
+ *
+ * Return: The iomem address to use in subsequent
+ * writeq/readq operations.
+ */
void __iomem *get_csr_addr(
- struct hfi1_devdata *dd,
+ const struct hfi1_devdata *dd,
u32 offset)
{
- return (void __iomem *)dd->kregbase + offset;
+ if (dd->flags & HFI1_PRESENT)
+ return hfi1_addr_from_offset(dd, offset);
+ return NULL;
}
static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
@@ -5496,7 +5548,7 @@ static void update_rcverr_timer(unsigned long opaque)
set_link_down_reason(
ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
- queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
+ queue_work(ppd->link_wq, &ppd->link_bounce_work);
}
dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
@@ -6051,7 +6103,7 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
* will not happen. We have to do it here
* before turning the DC off.
*/
- queue_work(ppd->hfi1_wq, &ppd->link_down_work);
+ queue_work(ppd->link_wq, &ppd->link_down_work);
}
} else {
dd_dev_info(dd, "%s: QSFP module inserted\n",
@@ -6086,7 +6138,7 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
/* Schedule the QSFP work only if there is a cable attached. */
if (qsfp_mod_present(ppd))
- queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
+ queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
}
static int request_host_lcb_access(struct hfi1_devdata *dd)
@@ -6735,13 +6787,17 @@ static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
static void rxe_freeze(struct hfi1_devdata *dd)
{
int i;
+ struct hfi1_ctxtdata *rcd;
/* disable port */
clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
/* disable all receive contexts */
- for (i = 0; i < dd->num_rcv_contexts; i++)
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
+ for (i = 0; i < dd->num_rcv_contexts; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
+ hfi1_rcd_put(rcd);
+ }
}
/*
@@ -6753,21 +6809,24 @@ static void rxe_freeze(struct hfi1_devdata *dd)
static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
{
u32 rcvmask;
- int i;
+ u16 i;
+ struct hfi1_ctxtdata *rcd;
/* enable all kernel contexts */
for (i = 0; i < dd->num_rcv_contexts; i++) {
- struct hfi1_ctxtdata *rcd = dd->rcd[i];
+ rcd = hfi1_rcd_get_by_index(dd, i);
/* Ensure all non-user contexts(including vnic) are enabled */
- if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER))
+ if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER)) {
+ hfi1_rcd_put(rcd);
continue;
-
+ }
rcvmask = HFI1_RCVCTRL_CTXT_ENB;
/* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
- rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
+ rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
- hfi1_rcvctrl(dd, rcvmask, i);
+ hfi1_rcvctrl(dd, rcvmask, rcd);
+ hfi1_rcd_put(rcd);
}
/* enable port */
@@ -6906,7 +6965,7 @@ static void reset_neighbor_info(struct hfi1_pportdata *ppd)
static const char * const link_down_reason_strs[] = {
[OPA_LINKDOWN_REASON_NONE] = "None",
- [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
[OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
[OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
[OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
@@ -6996,6 +7055,7 @@ void handle_link_down(struct work_struct *work)
/* Go offline first, then deal with reading/writing through 8051 */
was_up = !!(ppd->host_link_state & HLS_UP);
set_link_state(ppd, HLS_DN_OFFLINE);
+ xchg(&ppd->is_link_down_queued, 0);
if (was_up) {
lcl_reason = 0;
@@ -7330,7 +7390,7 @@ void handle_verify_cap(struct work_struct *work)
struct hfi1_devdata *dd = ppd->dd;
u64 reg;
u8 power_management;
- u8 continious;
+ u8 continuous;
u8 vcu;
u8 vau;
u8 z;
@@ -7349,7 +7409,7 @@ void handle_verify_cap(struct work_struct *work)
lcb_shutdown(dd, 0);
adjust_lcb_for_fpga_serdes(dd);
- read_vc_remote_phy(dd, &power_management, &continious);
+ read_vc_remote_phy(dd, &power_management, &continuous);
read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
&partner_supported_crc);
read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
@@ -7363,7 +7423,7 @@ void handle_verify_cap(struct work_struct *work)
get_link_widths(dd, &active_tx, &active_rx);
dd_dev_info(dd,
"Peer PHY: power management 0x%x, continuous updates 0x%x\n",
- (int)power_management, (int)continious);
+ (int)power_management, (int)continuous);
dd_dev_info(dd,
"Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
(int)vau, (int)z, (int)vcu, (int)vl15buf,
@@ -7689,12 +7749,12 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
host_msg &= ~(u64)HOST_REQ_DONE;
}
if (host_msg & BC_SMA_MSG) {
- queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
+ queue_work(ppd->link_wq, &ppd->sma_message_work);
host_msg &= ~(u64)BC_SMA_MSG;
}
if (host_msg & LINKUP_ACHIEVED) {
dd_dev_info(dd, "8051: Link up\n");
- queue_work(ppd->hfi1_wq, &ppd->link_up_work);
+ queue_work(ppd->link_wq, &ppd->link_up_work);
host_msg &= ~(u64)LINKUP_ACHIEVED;
}
if (host_msg & EXT_DEVICE_CFG_REQ) {
@@ -7702,7 +7762,7 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
}
if (host_msg & VERIFY_CAP_FRAME) {
- queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
+ queue_work(ppd->link_wq, &ppd->link_vc_work);
host_msg &= ~(u64)VERIFY_CAP_FRAME;
}
if (host_msg & LINK_GOING_DOWN) {
@@ -7717,7 +7777,7 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
host_msg &= ~(u64)LINK_GOING_DOWN;
}
if (host_msg & LINK_WIDTH_DOWNGRADED) {
- queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
+ queue_work(ppd->link_wq, &ppd->link_downgrade_work);
host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
}
if (host_msg) {
@@ -7752,15 +7812,22 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
if (queue_link_down) {
/*
* if the link is already going down or disabled, do not
- * queue another
+ * queue another. If there's a link down entry already
+ * queued, don't queue another one.
*/
if ((ppd->host_link_state &
(HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
ppd->link_enabled == 0) {
- dd_dev_info(dd, "%s: not queuing link down\n",
- __func__);
+ dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
+ __func__, ppd->host_link_state,
+ ppd->link_enabled);
} else {
- queue_work(ppd->hfi1_wq, &ppd->link_down_work);
+ if (xchg(&ppd->is_link_down_queued, 1) == 1)
+ dd_dev_info(dd,
+ "%s: link down request already queued\n",
+ __func__);
+ else
+ queue_work(ppd->link_wq, &ppd->link_down_work);
}
}
}
@@ -7968,7 +8035,7 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
__func__);
set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
- queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
+ queue_work(ppd->link_wq, &ppd->link_bounce_work);
}
}
@@ -8052,7 +8119,7 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
char *err_detail;
if (likely(source < dd->num_rcv_contexts)) {
- rcd = dd->rcd[source];
+ rcd = hfi1_rcd_get_by_index(dd, source);
if (rcd) {
/* Check for non-user contexts, including vnic */
if ((source < dd->first_dyn_alloc_ctxt) ||
@@ -8060,6 +8127,8 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
rcd->do_interrupt(rcd, 0);
else
handle_user_interrupt(rcd);
+
+ hfi1_rcd_put(rcd);
return; /* OK */
}
/* received an interrupt, but no rcd */
@@ -8081,12 +8150,14 @@ static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
char *err_detail;
if (likely(source < dd->num_rcv_contexts)) {
- rcd = dd->rcd[source];
+ rcd = hfi1_rcd_get_by_index(dd, source);
if (rcd) {
/* only pay attention to user urgent interrupts */
if ((source >= dd->first_dyn_alloc_ctxt) &&
(!rcd->sc || (rcd->sc->type == SC_USER)))
handle_user_interrupt(rcd);
+
+ hfi1_rcd_put(rcd);
return; /* OK */
}
/* received an interrupt, but no rcd */
@@ -8219,8 +8290,8 @@ static irqreturn_t sdma_interrupt(int irq, void *data)
/* handle the interrupt(s) */
sdma_engine_interrupt(sde, status);
} else {
- dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
- sde->this_idx);
+ dd_dev_err_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
+ sde->this_idx);
}
return IRQ_HANDLED;
}
@@ -8291,7 +8362,7 @@ static irqreturn_t receive_context_interrupt(int irq, void *data)
int disposition;
int present;
- trace_hfi1_receive_interrupt(dd, rcd->ctxt);
+ trace_hfi1_receive_interrupt(dd, rcd);
this_cpu_inc(*dd->int_counter);
aspm_ctx_disable(rcd);
@@ -8781,6 +8852,20 @@ static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
& REMOTE_DEVICE_REV_MASK;
}
+int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
+{
+ u32 frame;
+ u32 mask;
+
+ mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
+ read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
+ /* Clear, then set field */
+ frame &= ~mask;
+ frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
+ return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
+ frame);
+}
+
void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
u8 *ver_patch)
{
@@ -9257,12 +9342,6 @@ int start_link(struct hfi1_pportdata *ppd)
*/
tune_serdes(ppd);
- if (!ppd->link_enabled) {
- dd_dev_info(ppd->dd,
- "%s: stopping link start because link is disabled\n",
- __func__);
- return 0;
- }
if (!ppd->driver_link_ready) {
dd_dev_info(ppd->dd,
"%s: stopping link start because driver is not ready\n",
@@ -9373,13 +9452,13 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
(qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
- dd_dev_info(dd, "%s: QSFP cable temperature too high\n",
- __func__);
+ dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
+ __func__);
if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
(qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
- dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
- __func__);
+ dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
+ __func__);
/*
* The remaining alarms/warnings don't matter if the link is down.
@@ -9389,75 +9468,75 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
(qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
- dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
- __func__);
+ dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
+ __func__);
if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
(qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
- dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
- __func__);
+ dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
+ __func__);
/* Byte 2 is vendor specific */
if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
(qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
- __func__);
+ dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
+ __func__);
if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
(qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
- __func__);
+ dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
+ __func__);
if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
(qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
- __func__);
+ dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
+ __func__);
if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
(qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
- __func__);
+ dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
+ __func__);
if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
(qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
- __func__);
+ dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
+ __func__);
if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
(qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
- __func__);
+ dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
+ __func__);
if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
(qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
- __func__);
+ dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
+ __func__);
if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
(qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
- __func__);
+ dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
+ __func__);
if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
(qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
- __func__);
+ dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
+ __func__);
if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
(qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
- __func__);
+ dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
+ __func__);
if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
(qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
- __func__);
+ dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
+ __func__);
if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
(qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
- dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
- __func__);
+ dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
+ __func__);
/* Bytes 9-10 and 11-12 are reserved */
/* Bytes 13-15 are vendor specific */
@@ -9480,6 +9559,13 @@ void qsfp_event(struct work_struct *work)
if (!qsfp_mod_present(ppd))
return;
+ if (ppd->host_link_state == HLS_DN_DISABLE) {
+ dd_dev_info(ppd->dd,
+ "%s: stopping link start because link is disabled\n",
+ __func__);
+ return;
+ }
+
/*
* Turn DC back on after cable has been re-inserted. Up until
* now, the DC has been in reset to save power.
@@ -9635,7 +9721,7 @@ static void try_start_link(struct hfi1_pportdata *ppd)
"QSFP not responding, waiting and retrying %d\n",
(int)ppd->qsfp_retry_count);
ppd->qsfp_retry_count++;
- queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
+ queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
msecs_to_jiffies(QSFP_RETRY_WAIT));
return;
}
@@ -9742,17 +9828,6 @@ static inline int init_cpu_counters(struct hfi1_devdata *dd)
return 0;
}
-static const char * const pt_names[] = {
- "expected",
- "eager",
- "invalid"
-};
-
-static const char *pt_name(u32 type)
-{
- return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
-}
-
/*
* index is the index into the receive array
*/
@@ -9760,35 +9835,34 @@ void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
u32 type, unsigned long pa, u16 order)
{
u64 reg;
- void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
- (dd->kregbase + RCV_ARRAY));
if (!(dd->flags & HFI1_PRESENT))
goto done;
- if (type == PT_INVALID) {
+ if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
pa = 0;
+ order = 0;
} else if (type > PT_INVALID) {
dd_dev_err(dd,
"unexpected receive array type %u for index %u, not handled\n",
type, index);
goto done;
}
-
- hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
- pt_name(type), index, pa, (unsigned long)order);
+ trace_hfi1_put_tid(dd, index, type, pa, order);
#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
| (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
| ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
<< RCV_ARRAY_RT_ADDR_SHIFT;
- writeq(reg, base + (index * 8));
+ trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
+ writeq(reg, dd->rcvarray_wc + (index * 8));
- if (type == PT_EAGER)
+ if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
/*
- * Eager entries are written one-by-one so we have to push them
- * after we write the entry.
+ * Eager entries are written and flushed
+ *
+ * Expected entries are flushed every 4 writes
*/
flush_wc();
done:
@@ -9810,15 +9884,6 @@ void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
}
-struct ib_header *hfi1_get_msgheader(
- struct hfi1_devdata *dd, __le32 *rhf_addr)
-{
- u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
-
- return (struct ib_header *)
- (rhf_addr - dd->rhf_offset + offset);
-}
-
static const char * const ib_cfg_name_strings[] = {
"HFI1_IB_CFG_LIDLMC",
"HFI1_IB_CFG_LWID_DG_ENB",
@@ -10010,10 +10075,16 @@ static void set_lidlmc(struct hfi1_pportdata *ppd)
struct hfi1_devdata *dd = ppd->dd;
u32 mask = ~((1U << ppd->lmc) - 1);
u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
+ u32 lid;
+ /*
+ * Program 0 in CSR if port lid is extended. This prevents
+ * 9B packets being sent out for large lids.
+ */
+ lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
| DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
- c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
+ c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
<< DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
<< DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
@@ -10024,7 +10095,7 @@ static void set_lidlmc(struct hfi1_pportdata *ppd)
*/
sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
- (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
+ (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
for (i = 0; i < dd->chip_send_contexts; i++) {
@@ -10034,29 +10105,7 @@ static void set_lidlmc(struct hfi1_pportdata *ppd)
}
/* Now we have to do the same thing for the sdma engines */
- sdma_update_lmc(dd, mask, ppd->lid);
-}
-
-static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
-{
- unsigned long timeout;
- u32 curr_state;
-
- timeout = jiffies + msecs_to_jiffies(msecs);
- while (1) {
- curr_state = read_physical_state(dd);
- if (curr_state == state)
- break;
- if (time_after(jiffies, timeout)) {
- dd_dev_err(dd,
- "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
- state, curr_state);
- return -ETIMEDOUT;
- }
- usleep_range(1950, 2050); /* sleep 2ms-ish */
- }
-
- return 0;
+ sdma_update_lmc(dd, mask, lid);
}
static const char *state_completed_string(u32 completed)
@@ -10238,8 +10287,10 @@ static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
- /* call again to adjust ppd->statusp, if needed */
- get_logical_state(ppd);
+ /* adjust ppd->statusp, if needed */
+ update_statusp(ppd, IB_PORT_DOWN);
+
+ dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
}
/*
@@ -10253,49 +10304,35 @@ static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
{
struct hfi1_devdata *dd = ppd->dd;
- u32 pstate, previous_state;
+ u32 previous_state;
int ret;
- int do_transition;
- int do_wait;
update_lcb_cache(dd);
previous_state = ppd->host_link_state;
ppd->host_link_state = HLS_GOING_OFFLINE;
- pstate = read_physical_state(dd);
- if (pstate == PLS_OFFLINE) {
- do_transition = 0; /* in right state */
- do_wait = 0; /* ...no need to wait */
- } else if ((pstate & 0xf0) == PLS_OFFLINE) {
- do_transition = 0; /* in an offline transient state */
- do_wait = 1; /* ...wait for it to settle */
- } else {
- do_transition = 1; /* need to move to offline */
- do_wait = 1; /* ...will need to wait */
- }
- if (do_transition) {
- ret = set_physical_link_state(dd,
- (rem_reason << 8) | PLS_OFFLINE);
+ /* start offline transition */
+ ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
- if (ret != HCMD_SUCCESS) {
- dd_dev_err(dd,
- "Failed to transition to Offline link state, return %d\n",
- ret);
- return -EINVAL;
- }
- if (ppd->offline_disabled_reason ==
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to transition to Offline link state, return %d\n",
+ ret);
+ return -EINVAL;
}
+ if (ppd->offline_disabled_reason ==
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
- if (do_wait) {
- /* it can take a while for the link to go down */
- ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
- if (ret < 0)
- return ret;
- }
+ /*
+ * Wait for offline transition. It can take a while for
+ * the link to go down.
+ */
+ ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000);
+ if (ret < 0)
+ return ret;
/*
* Now in charge of LCB - must be after the physical state is
@@ -10415,11 +10452,11 @@ static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
}
/*
- * driver_physical_state - convert the driver's notion of a port's
+ * driver_pstate - convert the driver's notion of a port's
* state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
* Return -1 (converted to a u32) to indicate error.
*/
-u32 driver_physical_state(struct hfi1_pportdata *ppd)
+u32 driver_pstate(struct hfi1_pportdata *ppd)
{
switch (ppd->host_link_state) {
case HLS_UP_INIT:
@@ -10449,11 +10486,11 @@ u32 driver_physical_state(struct hfi1_pportdata *ppd)
}
/*
- * driver_logical_state - convert the driver's notion of a port's
+ * driver_lstate - convert the driver's notion of a port's
* state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
* (converted to a u32) to indicate error.
*/
-u32 driver_logical_state(struct hfi1_pportdata *ppd)
+u32 driver_lstate(struct hfi1_pportdata *ppd)
{
if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
return IB_PORT_DOWN;
@@ -10484,6 +10521,14 @@ void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
}
/*
+ * Verify if BCT for data VLs is non-zero.
+ */
+static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
+{
+ return !!ppd->actual_vls_operational;
+}
+
+/*
* Change the physical and/or logical link state.
*
* Do not call this routine while inside an interrupt. It contains
@@ -10545,38 +10590,58 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
goto unexpected;
}
+ /*
+ * Wait for Link_Up physical state.
+ * Physical and Logical states should already be
+ * be transitioned to LinkUp and LinkInit respectively.
+ */
+ ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: physical state did not change to LINK-UP\n",
+ __func__);
+ break;
+ }
+
ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
if (ret) {
dd_dev_err(dd,
"%s: logical state did not change to INIT\n",
__func__);
- } else {
- /* clear old transient LINKINIT_REASON code */
- if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
- ppd->linkinit_reason =
- OPA_LINKINIT_REASON_LINKUP;
+ break;
+ }
- /* enable the port */
- add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+ /* clear old transient LINKINIT_REASON code */
+ if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
+ ppd->linkinit_reason =
+ OPA_LINKINIT_REASON_LINKUP;
- handle_linkup_change(dd, 1);
- ppd->host_link_state = HLS_UP_INIT;
- }
+ /* enable the port */
+ add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+
+ handle_linkup_change(dd, 1);
+ ppd->host_link_state = HLS_UP_INIT;
break;
case HLS_UP_ARMED:
if (ppd->host_link_state != HLS_UP_INIT)
goto unexpected;
- ppd->host_link_state = HLS_UP_ARMED;
+ if (!data_vls_operational(ppd)) {
+ dd_dev_err(dd,
+ "%s: data VLs not operational\n", __func__);
+ ret = -EINVAL;
+ break;
+ }
+
set_logical_state(dd, LSTATE_ARMED);
ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
if (ret) {
- /* logical state didn't change, stay at init */
- ppd->host_link_state = HLS_UP_INIT;
dd_dev_err(dd,
"%s: logical state did not change to ARMED\n",
__func__);
+ break;
}
+ ppd->host_link_state = HLS_UP_ARMED;
/*
* The simulator does not currently implement SMA messages,
* so neighbor_normal is not set. Set it here when we first
@@ -10589,18 +10654,16 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
if (ppd->host_link_state != HLS_UP_ARMED)
goto unexpected;
- ppd->host_link_state = HLS_UP_ACTIVE;
set_logical_state(dd, LSTATE_ACTIVE);
ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
if (ret) {
- /* logical state didn't change, stay at armed */
- ppd->host_link_state = HLS_UP_ARMED;
dd_dev_err(dd,
"%s: logical state did not change to ACTIVE\n",
__func__);
} else {
/* tell all engines to go running */
sdma_all_running(dd);
+ ppd->host_link_state = HLS_UP_ACTIVE;
/* Signal the IB layer that the port has went active */
event.device = &dd->verbs_dev.rdi.ibdev;
@@ -10658,6 +10721,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
*/
if (ret)
goto_offline(ppd, 0);
+ else
+ log_physical_state(ppd, PLS_POLLING);
break;
case HLS_DN_DISABLE:
/* link is disabled */
@@ -10682,6 +10747,13 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
ret = -EINVAL;
break;
}
+ ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: physical state did not change to DISABLED\n",
+ __func__);
+ break;
+ }
dc_shutdown(dd);
}
ppd->host_link_state = HLS_DN_DISABLE;
@@ -10699,6 +10771,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
if (ppd->host_link_state != HLS_DN_POLL)
goto unexpected;
ppd->host_link_state = HLS_VERIFY_CAP;
+ log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
break;
case HLS_GOING_UP:
if (ppd->host_link_state != HLS_VERIFY_CAP)
@@ -11693,16 +11766,18 @@ static u32 encoded_size(u32 size)
return 0x1; /* if invalid, go with the minimum size */
}
-void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
+void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
+ struct hfi1_ctxtdata *rcd)
{
- struct hfi1_ctxtdata *rcd;
u64 rcvctrl, reg;
int did_enable = 0;
+ u16 ctxt;
- rcd = dd->rcd[ctxt];
if (!rcd)
return;
+ ctxt = rcd->ctxt;
+
hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
@@ -12604,20 +12679,8 @@ const char *opa_pstate_name(u32 pstate)
return "unknown";
}
-/*
- * Read the hardware link state and set the driver's cached value of it.
- * Return the (new) current value.
- */
-u32 get_logical_state(struct hfi1_pportdata *ppd)
+static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
{
- u32 new_state;
-
- new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
- if (new_state != ppd->lstate) {
- dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
- opa_lstate_name(new_state), new_state);
- ppd->lstate = new_state;
- }
/*
* Set port status flags in the page mapped into userspace
* memory. Do it here to ensure a reliable state - this is
@@ -12627,7 +12690,7 @@ u32 get_logical_state(struct hfi1_pportdata *ppd)
* function.
*/
if (ppd->statusp) {
- switch (ppd->lstate) {
+ switch (state) {
case IB_PORT_DOWN:
case IB_PORT_INIT:
*ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
@@ -12641,10 +12704,9 @@ u32 get_logical_state(struct hfi1_pportdata *ppd)
break;
}
}
- return ppd->lstate;
}
-/**
+/*
* wait_logical_linkstate - wait for an IB link state change to occur
* @ppd: port device
* @state: the state to wait for
@@ -12658,35 +12720,88 @@ static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
int msecs)
{
unsigned long timeout;
+ u32 new_state;
timeout = jiffies + msecs_to_jiffies(msecs);
while (1) {
- if (get_logical_state(ppd) == state)
- return 0;
- if (time_after(jiffies, timeout))
+ new_state = chip_to_opa_lstate(ppd->dd,
+ read_logical_state(ppd->dd));
+ if (new_state == state)
break;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(ppd->dd,
+ "timeout waiting for link state 0x%x\n",
+ state);
+ return -ETIMEDOUT;
+ }
msleep(20);
}
- dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
- return -ETIMEDOUT;
+ update_statusp(ppd, state);
+ dd_dev_info(ppd->dd,
+ "logical state changed to %s (0x%x)\n",
+ opa_lstate_name(state),
+ state);
+ return 0;
}
-u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
+static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
{
- u32 pstate;
- u32 ib_pstate;
+ u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
- pstate = read_physical_state(ppd->dd);
- ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
- if (ppd->last_pstate != ib_pstate) {
- dd_dev_info(ppd->dd,
- "%s: physical state changed to %s (0x%x), phy 0x%x\n",
- __func__, opa_pstate_name(ib_pstate), ib_pstate,
- pstate);
- ppd->last_pstate = ib_pstate;
+ dd_dev_info(ppd->dd,
+ "physical state changed to %s (0x%x), phy 0x%x\n",
+ opa_pstate_name(ib_pstate), ib_pstate, state);
+}
+
+/*
+ * Read the physical hardware link state and check if it matches host
+ * drivers anticipated state.
+ */
+static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
+{
+ u32 read_state = read_physical_state(ppd->dd);
+
+ if (read_state == state) {
+ log_state_transition(ppd, state);
+ } else {
+ dd_dev_err(ppd->dd,
+ "anticipated phy link state 0x%x, read 0x%x\n",
+ state, read_state);
}
- return ib_pstate;
+}
+
+/*
+ * wait_physical_linkstate - wait for an physical link state change to occur
+ * @ppd: port device
+ * @state: the state to wait for
+ * @msecs: the number of milliseconds to wait
+ *
+ * Wait up to msecs milliseconds for physical link state change to occur.
+ * Returns 0 if state reached, otherwise -ETIMEDOUT.
+ */
+static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
+ int msecs)
+{
+ u32 read_state;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(msecs);
+ while (1) {
+ read_state = read_physical_state(ppd->dd);
+ if (read_state == state)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dd_dev_err(ppd->dd,
+ "timeout waiting for phy link state 0x%x\n",
+ state);
+ return -ETIMEDOUT;
+ }
+ usleep_range(1950, 2050); /* sleep 2ms-ish */
+ }
+
+ log_state_transition(ppd, state);
+ return 0;
}
#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
@@ -12809,30 +12924,24 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
for (i = 0; i < dd->num_msix_entries; i++, me++) {
if (!me->arg) /* => no irq, no affinity */
continue;
- hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
- free_irq(me->msix.vector, me->arg);
+ hfi1_put_irq_affinity(dd, me);
+ free_irq(me->irq, me->arg);
}
+
+ /* clean structures */
+ kfree(dd->msix_entries);
+ dd->msix_entries = NULL;
+ dd->num_msix_entries = 0;
} else {
/* INTx */
if (dd->requested_intx_irq) {
free_irq(dd->pcidev->irq, dd);
dd->requested_intx_irq = 0;
}
- }
-
- /* turn off interrupts */
- if (dd->num_msix_entries) {
- /* MSI-X */
- pci_disable_msix(dd->pcidev);
- } else {
- /* INTx */
disable_intx(dd->pcidev);
}
- /* clean structures */
- kfree(dd->msix_entries);
- dd->msix_entries = NULL;
- dd->num_msix_entries = 0;
+ pci_free_irq_vectors(dd->pcidev);
}
/*
@@ -12847,7 +12956,12 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
/* clear from the handled mask of the general interrupt */
m = isrc / 64;
n = isrc % 64;
- dd->gi_mask[m] &= ~((u64)1 << n);
+ if (likely(m < CCE_NUM_INT_CSRS)) {
+ dd->gi_mask[m] &= ~((u64)1 << n);
+ } else {
+ dd_dev_err(dd, "remap interrupt err\n");
+ return;
+ }
/* direct the chip source to the given MSI-X interrupt */
m = isrc / 8;
@@ -12948,7 +13062,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
me->type = IRQ_SDMA;
} else if (first_rx <= i && i < last_rx) {
idx = i - first_rx;
- rcd = dd->rcd[idx];
+ rcd = hfi1_rcd_get_by_index(dd, idx);
if (rcd) {
/*
* Set the interrupt register and mask for this
@@ -12967,6 +13081,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
remap_intr(dd, IS_RCVAVAIL_START + idx, i);
me->type = IRQ_RCVCTXT;
rcd->msix_intr = i;
+ hfi1_rcd_put(rcd);
}
} else {
/* not in our expected range - complain, then
@@ -12981,13 +13096,21 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
continue;
/* make sure the name is terminated */
me->name[sizeof(me->name) - 1] = 0;
+ me->irq = pci_irq_vector(dd->pcidev, i);
+ /*
+ * On err return me->irq. Don't need to clear this
+ * because 'arg' has not been set, and cleanup will
+ * do the right thing.
+ */
+ if (me->irq < 0)
+ return me->irq;
- ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
+ ret = request_threaded_irq(me->irq, handler, thread, 0,
me->name, arg);
if (ret) {
dd_dev_err(dd,
- "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
- err_info, me->msix.vector, idx, ret);
+ "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
+ err_info, me->irq, idx, ret);
return ret;
}
/*
@@ -12998,8 +13121,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
ret = hfi1_get_irq_affinity(dd, me);
if (ret)
- dd_dev_err(dd,
- "unable to pin IRQ %d\n", ret);
+ dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
}
return ret;
@@ -13018,7 +13140,7 @@ void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
- synchronize_irq(me->msix.vector);
+ synchronize_irq(me->irq);
}
}
@@ -13031,7 +13153,7 @@ void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
return;
hfi1_put_irq_affinity(dd, me);
- free_irq(me->msix.vector, me->arg);
+ free_irq(me->irq, me->arg);
me->arg = NULL;
}
@@ -13059,14 +13181,19 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
me->name[sizeof(me->name) - 1] = 0;
me->type = IRQ_RCVCTXT;
-
+ me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
+ if (me->irq < 0) {
+ dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
+ idx, me->irq);
+ return;
+ }
remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
- ret = request_threaded_irq(me->msix.vector, receive_context_interrupt,
+ ret = request_threaded_irq(me->irq, receive_context_interrupt,
receive_context_thread, 0, me->name, arg);
if (ret) {
- dd_dev_err(dd, "vnic irq request (vector %d, idx %d) fail %d\n",
- me->msix.vector, idx, ret);
+ dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
+ me->irq, idx, ret);
return;
}
/*
@@ -13079,7 +13206,7 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
if (ret) {
dd_dev_err(dd,
"unable to pin IRQ %d\n", ret);
- free_irq(me->msix.vector, me->arg);
+ free_irq(me->irq, me->arg);
}
}
@@ -13102,9 +13229,8 @@ static void reset_interrupts(struct hfi1_devdata *dd)
static int set_up_interrupts(struct hfi1_devdata *dd)
{
- struct hfi1_msix_entry *entries;
- u32 total, request;
- int i, ret;
+ u32 total;
+ int ret, request;
int single_interrupt = 0; /* we expect to have all the interrupts */
/*
@@ -13116,39 +13242,31 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
*/
total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
- entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
- if (!entries) {
- ret = -ENOMEM;
- goto fail;
- }
- /* 1-1 MSI-X entry assignment */
- for (i = 0; i < total; i++)
- entries[i].msix.entry = i;
-
/* ask for MSI-X interrupts */
- request = total;
- request_msix(dd, &request, entries);
-
- if (request == 0) {
+ request = request_msix(dd, total);
+ if (request < 0) {
+ ret = request;
+ goto fail;
+ } else if (request == 0) {
/* using INTx */
/* dd->num_msix_entries already zero */
- kfree(entries);
single_interrupt = 1;
dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
+ } else if (request < total) {
+ /* using MSI-X, with reduced interrupts */
+ dd_dev_err(dd, "reduced interrupt found, wanted %u, got %u\n",
+ total, request);
+ ret = -EINVAL;
+ goto fail;
} else {
- /* using MSI-X */
- dd->num_msix_entries = request;
- dd->msix_entries = entries;
-
- if (request != total) {
- /* using MSI-X, with reduced interrupts */
- dd_dev_err(
- dd,
- "cannot handle reduced interrupt case, want %u, got %u\n",
- total, request);
- ret = -EINVAL;
+ dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
+ GFP_KERNEL);
+ if (!dd->msix_entries) {
+ ret = -ENOMEM;
goto fail;
}
+ /* using MSI-X */
+ dd->num_msix_entries = total;
dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
}
@@ -13391,8 +13509,7 @@ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
/* RcvArray */
for (i = 0; i < dd->chip_rcv_array_count; i++)
- write_csr(dd, RCV_ARRAY + (8 * i),
- RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
+ hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
/* RcvQPMapTable */
for (i = 0; i < 32; i++)
@@ -13826,9 +13943,10 @@ static void init_sc2vl_tables(struct hfi1_devdata *dd)
* a reset following the (possible) FLR in this routine.
*
*/
-static void init_chip(struct hfi1_devdata *dd)
+static int init_chip(struct hfi1_devdata *dd)
{
int i;
+ int ret = 0;
/*
* Put the HFI CSRs in a known state.
@@ -13876,12 +13994,22 @@ static void init_chip(struct hfi1_devdata *dd)
pcie_flr(dd->pcidev);
/* restore command and BARs */
- restore_pci_variables(dd);
+ ret = restore_pci_variables(dd);
+ if (ret) {
+ dd_dev_err(dd, "%s: Could not restore PCI variables\n",
+ __func__);
+ return ret;
+ }
if (is_ax(dd)) {
dd_dev_info(dd, "Resetting CSRs with FLR\n");
pcie_flr(dd->pcidev);
- restore_pci_variables(dd);
+ ret = restore_pci_variables(dd);
+ if (ret) {
+ dd_dev_err(dd, "%s: Could not restore PCI variables\n",
+ __func__);
+ return ret;
+ }
}
} else {
dd_dev_info(dd, "Resetting CSRs with writes\n");
@@ -13909,6 +14037,7 @@ static void init_chip(struct hfi1_devdata *dd)
write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
init_chip_resources(dd);
+ return ret;
}
static void init_early_variables(struct hfi1_devdata *dd)
@@ -14360,6 +14489,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
static void init_rxe(struct hfi1_devdata *dd)
{
struct rsm_map_table *rmt;
+ u64 val;
/* enable all receive errors */
write_csr(dd, RCV_ERR_MASK, ~0ull);
@@ -14384,6 +14514,11 @@ static void init_rxe(struct hfi1_devdata *dd)
* (64 bytes). Max_Payload_Size is possibly modified upward in
* tune_pcie_caps() which is called after this routine.
*/
+
+ /* Have 16 bytes (4DW) of bypass header available in header queue */
+ val = read_csr(dd, RCV_BYPASS);
+ val |= (4ull << 16);
+ write_csr(dd, RCV_BYPASS, val);
}
static void init_other(struct hfi1_devdata *dd)
@@ -14465,99 +14600,86 @@ static void init_txe(struct hfi1_devdata *dd)
write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
}
-int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
+int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
+ u16 jkey)
{
- struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
- unsigned sctxt;
- int ret = 0;
+ u8 hw_ctxt;
u64 reg;
- if (!rcd || !rcd->sc) {
- ret = -EINVAL;
- goto done;
- }
- sctxt = rcd->sc->hw_context;
+ if (!rcd || !rcd->sc)
+ return -EINVAL;
+
+ hw_ctxt = rcd->sc->hw_context;
reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
/* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
/*
* Enable send-side J_KEY integrity check, unless this is A0 h/w
*/
if (!is_ax(dd)) {
- reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
+ reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
}
/* Enable J_KEY check on receive context. */
reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
- write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
-done:
- return ret;
+ write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
+
+ return 0;
}
-int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
+int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
{
- struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
- unsigned sctxt;
- int ret = 0;
+ u8 hw_ctxt;
u64 reg;
- if (!rcd || !rcd->sc) {
- ret = -EINVAL;
- goto done;
- }
- sctxt = rcd->sc->hw_context;
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
+ if (!rcd || !rcd->sc)
+ return -EINVAL;
+
+ hw_ctxt = rcd->sc->hw_context;
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
/*
* Disable send-side J_KEY integrity check, unless this is A0 h/w.
* This check would not have been enabled for A0 h/w, see
* set_ctxt_jkey().
*/
if (!is_ax(dd)) {
- reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
+ reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
}
/* Turn off the J_KEY on the receive side */
- write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
-done:
- return ret;
+ write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
+
+ return 0;
}
-int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
+int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
+ u16 pkey)
{
- struct hfi1_ctxtdata *rcd;
- unsigned sctxt;
- int ret = 0;
+ u8 hw_ctxt;
u64 reg;
- if (ctxt < dd->num_rcv_contexts) {
- rcd = dd->rcd[ctxt];
- } else {
- ret = -EINVAL;
- goto done;
- }
- if (!rcd || !rcd->sc) {
- ret = -EINVAL;
- goto done;
- }
- sctxt = rcd->sc->hw_context;
+ if (!rcd || !rcd->sc)
+ return -EINVAL;
+
+ hw_ctxt = rcd->sc->hw_context;
reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
- reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
+ reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
- write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
-done:
- return ret;
+ write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
+
+ return 0;
}
int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
@@ -14568,9 +14690,6 @@ int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
if (!ctxt || !ctxt->sc)
return -EINVAL;
- if (ctxt->ctxt >= dd->num_rcv_contexts)
- return -EINVAL;
-
hw_ctxt = ctxt->sc->hw_context;
reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
@@ -14768,7 +14887,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
}
ppd->vls_supported = num_vls;
ppd->vls_operational = ppd->vls_supported;
- ppd->actual_vls_operational = ppd->vls_supported;
/* Set the default MTU. */
for (vl = 0; vl < num_vls; vl++)
dd->vld[vl].mtu = hfi1_max_mtu;
@@ -14777,7 +14895,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
* Set the initial values to reasonable default, will be set
* for real when link is up.
*/
- ppd->lstate = IB_PORT_DOWN;
ppd->overrun_threshold = 0x4;
ppd->phy_error_threshold = 0xf;
ppd->port_crc_mode_enabled = link_crc_mask;
@@ -14788,7 +14905,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
/* start in offline */
ppd->host_link_state = HLS_DN_OFFLINE;
init_vl_arb_caches(ppd);
- ppd->last_pstate = 0xff; /* invalid value */
}
dd->link_default = HLS_DN_POLL;
@@ -14802,6 +14918,11 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (ret < 0)
goto bail_free;
+ /* Save PCI space registers to rewrite after device reset */
+ ret = save_pci_variables(dd);
+ if (ret < 0)
+ goto bail_cleanup;
+
/* verify that reads actually work, save revision for reset check */
dd->revision = read_csr(dd, CCE_REVISION);
if (dd->revision == ~(u64)0) {
@@ -14894,7 +15015,9 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
goto bail_cleanup;
/* obtain chip sizes, reset chip CSRs */
- init_chip(dd);
+ ret = init_chip(dd);
+ if (ret)
+ goto bail_cleanup;
/* read in the PCIe link speed information */
ret = pcie_speeds(dd);
@@ -14969,10 +15092,16 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (ret)
goto bail_cleanup;
- ret = hfi1_create_ctxts(dd);
+ ret = hfi1_create_kctxts(dd);
if (ret)
goto bail_cleanup;
+ /*
+ * Initialize aspm, to be done after gen3 transition and setting up
+ * contexts and before enabling interrupts
+ */
+ aspm_init(dd);
+
dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
/*
* rcd[0] is guaranteed to be valid by this point. Also, all
@@ -14991,7 +15120,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
goto bail_cleanup;
}
- /* use contexts created by hfi1_create_ctxts */
+ /* use contexts created by hfi1_create_kctxts */
ret = set_up_interrupts(dd);
if (ret)
goto bail_cleanup;
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index cbe455d9ab8b..b8345a60a0fb 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -384,6 +384,7 @@
#define VERIFY_CAP_LOCAL_FABRIC 0x08
#define VERIFY_CAP_LOCAL_LINK_WIDTH 0x09
#define LOCAL_DEVICE_ID 0x0a
+#define RESERVED_REGISTERS 0x0b
#define LOCAL_LNI_INFO 0x0c
#define REMOTE_LNI_INFO 0x0d
#define MISC_STATUS 0x0e
@@ -506,6 +507,9 @@
#define DOWN_REMOTE_REASON_SHIFT 16
#define DOWN_REMOTE_REASON_MASK 0xff
+#define HOST_INTERFACE_VERSION_SHIFT 16
+#define HOST_INTERFACE_VERSION_MASK 0xff
+
/* verify capability PHY power management bits */
#define PWRM_BER_CONTROL 0x1
#define PWRM_BANDWIDTH_CONTROL 0x2
@@ -605,11 +609,11 @@ int read_lcb_csr(struct hfi1_devdata *dd, u32 offset, u64 *data);
int write_lcb_csr(struct hfi1_devdata *dd, u32 offset, u64 data);
void __iomem *get_csr_addr(
- struct hfi1_devdata *dd,
+ const struct hfi1_devdata *dd,
u32 offset);
static inline void __iomem *get_kctxt_csr_addr(
- struct hfi1_devdata *dd,
+ const struct hfi1_devdata *dd,
int ctxt,
u32 offset0)
{
@@ -644,7 +648,6 @@ u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
#define NUM_PCIE_SERDES 16 /* number of PCIe serdes on the SBus */
extern const u8 pcie_serdes_broadcast[];
extern const u8 pcie_pcs_addrs[2][NUM_PCIE_SERDES];
-extern uint platform_config_load;
/* SBus commands */
#define RESET_SBUS_RECEIVER 0x20
@@ -704,6 +707,7 @@ int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result);
/* chip.c */
void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
u8 *ver_patch);
+int write_host_interface_version(struct hfi1_devdata *dd, u8 version);
void read_guid(struct hfi1_devdata *dd);
int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout);
void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
@@ -743,11 +747,10 @@ int is_ax(struct hfi1_devdata *dd);
int is_bx(struct hfi1_devdata *dd);
u32 read_physical_state(struct hfi1_devdata *dd);
u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate);
-u32 get_logical_state(struct hfi1_pportdata *ppd);
const char *opa_lstate_name(u32 lstate);
const char *opa_pstate_name(u32 pstate);
-u32 driver_physical_state(struct hfi1_pportdata *ppd);
-u32 driver_logical_state(struct hfi1_pportdata *ppd);
+u32 driver_pstate(struct hfi1_pportdata *ppd);
+u32 driver_lstate(struct hfi1_pportdata *ppd);
int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok);
@@ -1347,21 +1350,21 @@ enum {
u64 get_all_cpu_total(u64 __percpu *cntr);
void hfi1_start_cleanup(struct hfi1_devdata *dd);
void hfi1_clear_tids(struct hfi1_ctxtdata *rcd);
-struct ib_header *hfi1_get_msgheader(
- struct hfi1_devdata *dd, __le32 *rhf_addr);
void hfi1_init_ctxt(struct send_context *sc);
void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
u32 type, unsigned long pa, u16 order);
void hfi1_quiet_serdes(struct hfi1_pportdata *ppd);
-void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt);
+void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
+ struct hfi1_ctxtdata *rcd);
u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp);
u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp);
-u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd);
int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which);
int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val);
-int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey);
-int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt);
-int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey);
+int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
+ u16 jkey);
+int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt);
+int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt,
+ u16 pkey);
int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt);
void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality);
void hfi1_init_vnic_rsm(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
index 995d62c7f9a7..3e27794ec750 100644
--- a/drivers/infiniband/hw/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -325,22 +325,15 @@ struct diag_pkt {
#define HFI1_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
/* misc. */
+#define SC15_PACKET 0xF
#define SIZE_OF_CRC 1
+#define SIZE_OF_LT 1
#define LIM_MGMT_P_KEY 0x7FFF
#define FULL_MGMT_P_KEY 0xFFFF
#define DEFAULT_P_KEY LIM_MGMT_P_KEY
-/**
- * 0xF8 - 4 bits of multicast range and 1 bit for collective range
- * Example: For 24 bit LID space,
- * Multicast range: 0xF00000 to 0xF7FFFF
- * Collective range: 0xF80000 to 0xFFFFFE
- */
-#define HFI1_MCAST_NR 0x4 /* Number of top bits set */
-#define HFI1_COLLECTIVE_NR 0x1 /* Number of bits after MCAST_NR */
-
#define HFI1_PSM_IOC_BASE_SEQ 0x0
static inline __u64 rhf_to_cpu(const __le32 *rbuf)
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index e9fa3c293e42..36ae1fd86502 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -1,4 +1,3 @@
-#ifdef CONFIG_DEBUG_FS
/*
* Copyright(c) 2015-2017 Intel Corporation.
*
@@ -173,12 +172,15 @@ static int _opcode_stats_seq_show(struct seq_file *s, void *v)
u64 n_packets = 0, n_bytes = 0;
struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
struct hfi1_devdata *dd = dd_from_dev(ibd);
+ struct hfi1_ctxtdata *rcd;
for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) {
- if (!dd->rcd[j])
- continue;
- n_packets += dd->rcd[j]->opstats->stats[i].n_packets;
- n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes;
+ rcd = hfi1_rcd_get_by_index(dd, j);
+ if (rcd) {
+ n_packets += rcd->opstats->stats[i].n_packets;
+ n_bytes += rcd->opstats->stats[i].n_bytes;
+ }
+ hfi1_rcd_put(rcd);
}
if (!n_packets && !n_bytes)
return SEQ_SKIP;
@@ -231,6 +233,7 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
u64 n_packets = 0;
struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
struct hfi1_devdata *dd = dd_from_dev(ibd);
+ struct hfi1_ctxtdata *rcd;
if (v == SEQ_START_TOKEN) {
seq_puts(s, "Ctx:npkts\n");
@@ -240,11 +243,14 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
spos = v;
i = *spos;
- if (!dd->rcd[i])
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (!rcd)
return SEQ_SKIP;
- for (j = 0; j < ARRAY_SIZE(dd->rcd[i]->opstats->stats); j++)
- n_packets += dd->rcd[i]->opstats->stats[j].n_packets;
+ for (j = 0; j < ARRAY_SIZE(rcd->opstats->stats); j++)
+ n_packets += rcd->opstats->stats[j].n_packets;
+
+ hfi1_rcd_put(rcd);
if (!n_packets)
return SEQ_SKIP;
@@ -260,10 +266,10 @@ DEBUGFS_FILE_OPS(ctx_stats);
static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
__acquires(RCU)
{
- struct qp_iter *iter;
+ struct rvt_qp_iter *iter;
loff_t n = *pos;
- iter = qp_iter_init(s->private);
+ iter = rvt_qp_iter_init(s->private, 0, NULL);
/* stop calls rcu_read_unlock */
rcu_read_lock();
@@ -272,7 +278,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
return NULL;
do {
- if (qp_iter_next(iter)) {
+ if (rvt_qp_iter_next(iter)) {
kfree(iter);
return NULL;
}
@@ -285,11 +291,11 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
loff_t *pos)
__must_hold(RCU)
{
- struct qp_iter *iter = iter_ptr;
+ struct rvt_qp_iter *iter = iter_ptr;
(*pos)++;
- if (qp_iter_next(iter)) {
+ if (rvt_qp_iter_next(iter)) {
kfree(iter);
return NULL;
}
@@ -305,7 +311,7 @@ static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
{
- struct qp_iter *iter = iter_ptr;
+ struct rvt_qp_iter *iter = iter_ptr;
if (!iter)
return 0;
@@ -361,6 +367,52 @@ DEBUGFS_SEQ_FILE_OPS(sdes);
DEBUGFS_SEQ_FILE_OPEN(sdes)
DEBUGFS_FILE_OPS(sdes);
+static void *_rcds_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct hfi1_ibdev *ibd;
+ struct hfi1_devdata *dd;
+
+ ibd = (struct hfi1_ibdev *)s->private;
+ dd = dd_from_dev(ibd);
+ if (!dd->rcd || *pos >= dd->n_krcv_queues)
+ return NULL;
+ return pos;
+}
+
+static void *_rcds_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+
+ ++*pos;
+ if (!dd->rcd || *pos >= dd->n_krcv_queues)
+ return NULL;
+ return pos;
+}
+
+static void _rcds_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int _rcds_seq_show(struct seq_file *s, void *v)
+{
+ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
+ struct hfi1_devdata *dd = dd_from_dev(ibd);
+ struct hfi1_ctxtdata *rcd;
+ loff_t *spos = v;
+ loff_t i = *spos;
+
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (rcd)
+ seqfile_dump_rcd(s, rcd);
+ hfi1_rcd_put(rcd);
+ return 0;
+}
+
+DEBUGFS_SEQ_FILE_OPS(rcds);
+DEBUGFS_SEQ_FILE_OPEN(rcds)
+DEBUGFS_FILE_OPS(rcds);
+
/* read the per-device counters */
static ssize_t dev_counters_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
@@ -1098,12 +1150,15 @@ static int _fault_stats_seq_show(struct seq_file *s, void *v)
u64 n_packets = 0, n_bytes = 0;
struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
struct hfi1_devdata *dd = dd_from_dev(ibd);
+ struct hfi1_ctxtdata *rcd;
for (j = 0; j < dd->first_dyn_alloc_ctxt; j++) {
- if (!dd->rcd[j])
- continue;
- n_packets += dd->rcd[j]->opstats->stats[i].n_packets;
- n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes;
+ rcd = hfi1_rcd_get_by_index(dd, j);
+ if (rcd) {
+ n_packets += rcd->opstats->stats[i].n_packets;
+ n_bytes += rcd->opstats->stats[i].n_bytes;
+ }
+ hfi1_rcd_put(rcd);
}
if (!n_packets && !n_bytes)
return SEQ_SKIP;
@@ -1311,6 +1366,7 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
DEBUGFS_SEQ_FILE_CREATE(ctx_stats, ibd->hfi1_ibdev_dbg, ibd);
DEBUGFS_SEQ_FILE_CREATE(qp_stats, ibd->hfi1_ibdev_dbg, ibd);
DEBUGFS_SEQ_FILE_CREATE(sdes, ibd->hfi1_ibdev_dbg, ibd);
+ DEBUGFS_SEQ_FILE_CREATE(rcds, ibd->hfi1_ibdev_dbg, ibd);
DEBUGFS_SEQ_FILE_CREATE(sdma_cpu_list, ibd->hfi1_ibdev_dbg, ibd);
/* dev counter files */
for (i = 0; i < ARRAY_SIZE(cntr_ops); i++)
@@ -1478,5 +1534,3 @@ void hfi1_dbg_exit(void)
debugfs_remove_recursive(hfi1_dbg_root);
hfi1_dbg_root = NULL;
}
-
-#endif
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index a50870e455a3..7372cc00cb2d 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -96,7 +96,6 @@ MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
-MODULE_VERSION(HFI1_DRIVER_VERSION);
/*
* MAX_PKT_RCV is the max # if packets processed per receive interrupt.
@@ -196,7 +195,7 @@ int hfi1_count_active_units(void)
spin_lock_irqsave(&hfi1_devs_lock, flags);
list_for_each_entry(dd, &hfi1_dev_list, list) {
- if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase)
+ if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase1)
continue;
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
@@ -224,6 +223,27 @@ static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
(offset * RCV_BUF_BLOCK_SIZE));
}
+static inline void *hfi1_get_header(struct hfi1_devdata *dd,
+ __le32 *rhf_addr)
+{
+ u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
+
+ return (void *)(rhf_addr - dd->rhf_offset + offset);
+}
+
+static inline struct ib_header *hfi1_get_msgheader(struct hfi1_devdata *dd,
+ __le32 *rhf_addr)
+{
+ return (struct ib_header *)hfi1_get_header(dd, rhf_addr);
+}
+
+static inline struct hfi1_16b_header
+ *hfi1_get_16B_header(struct hfi1_devdata *dd,
+ __le32 *rhf_addr)
+{
+ return (struct hfi1_16b_header *)hfi1_get_header(dd, rhf_addr);
+}
+
/*
* Validate and encode the a given RcvArray Buffer size.
* The function will check whether the given size falls within
@@ -249,7 +269,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
{
struct ib_header *rhdr = packet->hdr;
u32 rte = rhf_rcv_type_err(packet->rhf);
- int lnh = ib_get_lnh(rhdr);
+ u32 mlid_base;
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
struct hfi1_devdata *dd = ppd->dd;
struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
@@ -257,37 +277,47 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR))
return;
+ if (packet->etype == RHF_RCV_TYPE_BYPASS) {
+ goto drop;
+ } else {
+ u8 lnh = ib_get_lnh(rhdr);
+
+ mlid_base = be16_to_cpu(IB_MULTICAST_LID_BASE);
+ if (lnh == HFI1_LRH_BTH) {
+ packet->ohdr = &rhdr->u.oth;
+ } else if (lnh == HFI1_LRH_GRH) {
+ packet->ohdr = &rhdr->u.l.oth;
+ packet->grh = &rhdr->u.l.grh;
+ } else {
+ goto drop;
+ }
+ }
+
if (packet->rhf & RHF_TID_ERR) {
/* For TIDERR and RC QPs preemptively schedule a NAK */
- struct ib_other_headers *ohdr = NULL;
u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
- u16 lid = ib_get_dlid(rhdr);
+ u32 dlid = ib_get_dlid(rhdr);
u32 qp_num;
- u32 rcv_flags = 0;
/* Sanity check packet */
if (tlen < 24)
goto drop;
/* Check for GRH */
- if (lnh == HFI1_LRH_BTH) {
- ohdr = &rhdr->u.oth;
- } else if (lnh == HFI1_LRH_GRH) {
+ if (packet->grh) {
u32 vtf;
+ struct ib_grh *grh = packet->grh;
- ohdr = &rhdr->u.l.oth;
- if (rhdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
+ if (grh->next_hdr != IB_GRH_NEXT_HDR)
goto drop;
- vtf = be32_to_cpu(rhdr->u.l.grh.version_tclass_flow);
+ vtf = be32_to_cpu(grh->version_tclass_flow);
if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
goto drop;
- rcv_flags |= HFI1_HAS_GRH;
- } else {
- goto drop;
}
+
/* Get the destination QP number. */
- qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
+ qp_num = ib_bth_get_qpn(packet->ohdr);
+ if (dlid < mlid_base) {
struct rvt_qp *qp;
unsigned long flags;
@@ -312,11 +342,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
- hfi1_rc_hdrerr(
- rcd,
- rhdr,
- rcv_flags,
- qp);
+ hfi1_rc_hdrerr(rcd, packet, qp);
break;
default:
/* For now don't handle any other QP types */
@@ -332,9 +358,8 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
switch (rte) {
case RHF_RTE_ERROR_OP_CODE_ERR:
{
- u32 opcode;
void *ebuf = NULL;
- __be32 *bth = NULL;
+ u8 opcode;
if (rhf_use_egr_bfr(packet->rhf))
ebuf = packet->ebuf;
@@ -342,16 +367,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
if (!ebuf)
goto drop; /* this should never happen */
- if (lnh == HFI1_LRH_BTH)
- bth = (__be32 *)ebuf;
- else if (lnh == HFI1_LRH_GRH)
- bth = (__be32 *)((char *)ebuf + sizeof(struct ib_grh));
- else
- goto drop;
-
- opcode = be32_to_cpu(bth[0]) >> 24;
- opcode &= 0xff;
-
+ opcode = ib_bth_get_opcode(packet->ohdr);
if (opcode == IB_OPCODE_CNP) {
/*
* Only in pre-B0 h/w is the CNP_OPCODE handled
@@ -365,7 +381,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf);
sl = ibp->sc_to_sl[sc5];
- lqpn = be32_to_cpu(bth[1]) & RVT_QPN_MASK;
+ lqpn = ib_bth_get_qpn(packet->ohdr);
rcu_read_lock();
qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn);
if (!qp) {
@@ -415,33 +431,39 @@ static inline void init_packet(struct hfi1_ctxtdata *rcd,
packet->rhf = rhf_to_cpu(packet->rhf_addr);
packet->rhqoff = rcd->head;
packet->numpkt = 0;
- packet->rcv_flags = 0;
}
void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
bool do_cnp)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct ib_header *hdr = pkt->hdr;
struct ib_other_headers *ohdr = pkt->ohdr;
- struct ib_grh *grh = NULL;
+ struct ib_grh *grh = pkt->grh;
u32 rqpn = 0, bth1;
- u16 rlid, dlid = ib_get_dlid(hdr);
- u8 sc, svc_type;
+ u16 pkey, rlid, dlid = ib_get_dlid(pkt->hdr);
+ u8 hdr_type, sc, svc_type;
bool is_mcast = false;
- if (pkt->rcv_flags & HFI1_HAS_GRH)
- grh = &hdr->u.l.grh;
+ if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
+ is_mcast = hfi1_is_16B_mcast(dlid);
+ pkey = hfi1_16B_get_pkey(pkt->hdr);
+ sc = hfi1_16B_get_sc(pkt->hdr);
+ hdr_type = HFI1_PKT_TYPE_16B;
+ } else {
+ is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+ (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
+ pkey = ib_bth_get_pkey(ohdr);
+ sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
+ hdr_type = HFI1_PKT_TYPE_9B;
+ }
switch (qp->ibqp.qp_type) {
case IB_QPT_SMI:
case IB_QPT_GSI:
case IB_QPT_UD:
- rlid = ib_get_slid(hdr);
- rqpn = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
+ rlid = ib_get_slid(pkt->hdr);
+ rqpn = ib_get_sqpn(pkt->ohdr);
svc_type = IB_CC_SVCTYPE_UD;
- is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
- (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
break;
case IB_QPT_UC:
rlid = rdma_ah_get_dlid(&qp->remote_ah_attr);
@@ -457,14 +479,11 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
return;
}
- sc = hfi1_9B_get_sc5(hdr, pkt->rhf);
-
bth1 = be32_to_cpu(ohdr->bth[1]);
- if (do_cnp && (bth1 & IB_FECN_SMASK)) {
- u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
-
- return_cnp(ibp, qp, rqpn, pkey, dlid, rlid, sc, grh);
- }
+ /* Call appropriate CNP handler */
+ if (do_cnp && (bth1 & IB_FECN_SMASK))
+ hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey,
+ dlid, rlid, sc, grh);
if (!is_mcast && (bth1 & IB_BECN_SMASK)) {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
@@ -591,9 +610,10 @@ static void __prescan_rxq(struct hfi1_packet *packet)
if (lnh == HFI1_LRH_BTH) {
packet->ohdr = &hdr->u.oth;
+ packet->grh = NULL;
} else if (lnh == HFI1_LRH_GRH) {
packet->ohdr = &hdr->u.l.oth;
- packet->rcv_flags |= HFI1_HAS_GRH;
+ packet->grh = &hdr->u.l.grh;
} else {
goto next; /* just in case */
}
@@ -698,10 +718,8 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
{
int ret;
- packet->hdr = hfi1_get_msgheader(packet->rcd->dd,
- packet->rhf_addr);
- packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
packet->etype = rhf_rcv_type(packet->rhf);
+
/* total length */
packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
/* retrieve eager buffer details */
@@ -759,7 +777,7 @@ static inline void process_rcv_update(int last, struct hfi1_packet *packet)
packet->etail, 0, 0);
packet->updegr = 0;
}
- packet->rcv_flags = 0;
+ packet->grh = NULL;
}
static inline void finish_packet(struct hfi1_packet *packet)
@@ -837,9 +855,10 @@ bail:
return last;
}
-static inline void set_nodma_rtail(struct hfi1_devdata *dd, u8 ctxt)
+static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
{
- int i;
+ struct hfi1_ctxtdata *rcd;
+ u16 i;
/*
* For dynamically allocated kernel contexts (like vnic) switch
@@ -847,19 +866,28 @@ static inline void set_nodma_rtail(struct hfi1_devdata *dd, u8 ctxt)
* interrupt handler for all statically allocated kernel contexts.
*/
if (ctxt >= dd->first_dyn_alloc_ctxt) {
- dd->rcd[ctxt]->do_interrupt =
- &handle_receive_interrupt_nodma_rtail;
+ rcd = hfi1_rcd_get_by_index(dd, ctxt);
+ if (rcd) {
+ rcd->do_interrupt =
+ &handle_receive_interrupt_nodma_rtail;
+ hfi1_rcd_put(rcd);
+ }
return;
}
- for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++)
- dd->rcd[i]->do_interrupt =
- &handle_receive_interrupt_nodma_rtail;
+ for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (rcd)
+ rcd->do_interrupt =
+ &handle_receive_interrupt_nodma_rtail;
+ hfi1_rcd_put(rcd);
+ }
}
-static inline void set_dma_rtail(struct hfi1_devdata *dd, u8 ctxt)
+static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt)
{
- int i;
+ struct hfi1_ctxtdata *rcd;
+ u16 i;
/*
* For dynamically allocated kernel contexts (like vnic) switch
@@ -867,27 +895,39 @@ static inline void set_dma_rtail(struct hfi1_devdata *dd, u8 ctxt)
* interrupt handler for all statically allocated kernel contexts.
*/
if (ctxt >= dd->first_dyn_alloc_ctxt) {
- dd->rcd[ctxt]->do_interrupt =
- &handle_receive_interrupt_dma_rtail;
+ rcd = hfi1_rcd_get_by_index(dd, ctxt);
+ if (rcd) {
+ rcd->do_interrupt =
+ &handle_receive_interrupt_dma_rtail;
+ hfi1_rcd_put(rcd);
+ }
return;
}
- for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++)
- dd->rcd[i]->do_interrupt =
- &handle_receive_interrupt_dma_rtail;
+ for (i = HFI1_CTRL_CTXT + 1; i < dd->first_dyn_alloc_ctxt; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (rcd)
+ rcd->do_interrupt =
+ &handle_receive_interrupt_dma_rtail;
+ hfi1_rcd_put(rcd);
+ }
}
void set_all_slowpath(struct hfi1_devdata *dd)
{
- int i;
+ struct hfi1_ctxtdata *rcd;
+ u16 i;
/* HFI1_CTRL_CTXT must always use the slow path interrupt handler */
for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
- struct hfi1_ctxtdata *rcd = dd->rcd[i];
-
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (!rcd)
+ continue;
if ((i < dd->first_dyn_alloc_ctxt) ||
- (rcd && rcd->sc && (rcd->sc->type == SC_KERNEL)))
+ (rcd->sc && (rcd->sc->type == SC_KERNEL))) {
rcd->do_interrupt = &handle_receive_interrupt;
+ }
+ hfi1_rcd_put(rcd);
}
}
@@ -896,20 +936,30 @@ static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
struct hfi1_devdata *dd)
{
struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
- struct ib_header *hdr = hfi1_get_msgheader(packet->rcd->dd,
- packet->rhf_addr);
u8 etype = rhf_rcv_type(packet->rhf);
+ u8 sc = SC15_PACKET;
+
+ if (etype == RHF_RCV_TYPE_IB) {
+ struct ib_header *hdr = hfi1_get_msgheader(packet->rcd->dd,
+ packet->rhf_addr);
+ sc = hfi1_9B_get_sc5(hdr, packet->rhf);
+ } else if (etype == RHF_RCV_TYPE_BYPASS) {
+ struct hfi1_16b_header *hdr = hfi1_get_16B_header(
+ packet->rcd->dd,
+ packet->rhf_addr);
+ sc = hfi1_16B_get_sc(hdr);
+ }
+ if (sc != SC15_PACKET) {
+ int hwstate = driver_lstate(rcd->ppd);
- if (etype == RHF_RCV_TYPE_IB &&
- hfi1_9B_get_sc5(hdr, packet->rhf) != 0xf) {
- int hwstate = read_logical_state(dd);
-
- if (hwstate != LSTATE_ACTIVE) {
- dd_dev_info(dd, "Unexpected link state %d\n", hwstate);
+ if (hwstate != IB_PORT_ACTIVE) {
+ dd_dev_info(dd,
+ "Unexpected link state %s\n",
+ opa_lstate_name(hwstate));
return 0;
}
- queue_work(rcd->ppd->hfi1_wq, lsaw);
+ queue_work(rcd->ppd->link_wq, lsaw);
return 1;
}
return 0;
@@ -1063,7 +1113,8 @@ void receive_interrupt_work(struct work_struct *work)
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
linkstate_active_work);
struct hfi1_devdata *dd = ppd->dd;
- int i;
+ struct hfi1_ctxtdata *rcd;
+ u16 i;
/* Received non-SC15 packet implies neighbor_normal */
ppd->neighbor_normal = 1;
@@ -1073,8 +1124,12 @@ void receive_interrupt_work(struct work_struct *work)
* Interrupt all statically allocated kernel contexts that could
* have had an interrupt during auto activation.
*/
- for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++)
- force_recv_intr(dd->rcd[i]);
+ for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (rcd)
+ force_recv_intr(rcd);
+ hfi1_rcd_put(rcd);
+ }
}
/*
@@ -1264,10 +1319,9 @@ void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
*/
int hfi1_reset_device(int unit)
{
- int ret, i;
+ int ret;
struct hfi1_devdata *dd = hfi1_lookup(unit);
struct hfi1_pportdata *ppd;
- unsigned long flags;
int pidx;
if (!dd) {
@@ -1277,7 +1331,7 @@ int hfi1_reset_device(int unit)
dd_dev_info(dd, "Reset on unit %u requested\n", unit);
- if (!dd->kregbase || !(dd->flags & HFI1_PRESENT)) {
+ if (!dd->kregbase1 || !(dd->flags & HFI1_PRESENT)) {
dd_dev_info(dd,
"Invalid unit number %u or not initialized or not present\n",
unit);
@@ -1285,17 +1339,15 @@ int hfi1_reset_device(int unit)
goto bail;
}
- spin_lock_irqsave(&dd->uctxt_lock, flags);
+ /* If there are any user/vnic contexts, we cannot reset */
+ mutex_lock(&hfi1_mutex);
if (dd->rcd)
- for (i = dd->first_dyn_alloc_ctxt;
- i < dd->num_rcv_contexts; i++) {
- if (!dd->rcd[i])
- continue;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ if (hfi1_stats.sps_ctxts) {
+ mutex_unlock(&hfi1_mutex);
ret = -EBUSY;
goto bail;
}
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ mutex_unlock(&hfi1_mutex);
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
@@ -1321,6 +1373,162 @@ bail:
return ret;
}
+static inline void hfi1_setup_ib_header(struct hfi1_packet *packet)
+{
+ packet->hdr = (struct hfi1_ib_message_header *)
+ hfi1_get_msgheader(packet->rcd->dd,
+ packet->rhf_addr);
+ packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
+}
+
+static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet)
+{
+ struct hfi1_pportdata *ppd = packet->rcd->ppd;
+
+ /* slid and dlid cannot be 0 */
+ if ((!packet->slid) || (!packet->dlid))
+ return -EINVAL;
+
+ /* Compare port lid with incoming packet dlid */
+ if ((!(hfi1_is_16B_mcast(packet->dlid))) &&
+ (packet->dlid !=
+ opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) {
+ if (packet->dlid != ppd->lid)
+ return -EINVAL;
+ }
+
+ /* No multicast packets with SC15 */
+ if ((hfi1_is_16B_mcast(packet->dlid)) && (packet->sc == 0xF))
+ return -EINVAL;
+
+ /* Packets with permissive DLID always on SC15 */
+ if ((packet->dlid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE),
+ 16B)) &&
+ (packet->sc != 0xF))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hfi1_setup_9B_packet(struct hfi1_packet *packet)
+{
+ struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
+ struct ib_header *hdr;
+ u8 lnh;
+
+ hfi1_setup_ib_header(packet);
+ hdr = packet->hdr;
+
+ lnh = ib_get_lnh(hdr);
+ if (lnh == HFI1_LRH_BTH) {
+ packet->ohdr = &hdr->u.oth;
+ packet->grh = NULL;
+ } else if (lnh == HFI1_LRH_GRH) {
+ u32 vtf;
+
+ packet->ohdr = &hdr->u.l.oth;
+ packet->grh = &hdr->u.l.grh;
+ if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
+ goto drop;
+ vtf = be32_to_cpu(packet->grh->version_tclass_flow);
+ if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+ goto drop;
+ } else {
+ goto drop;
+ }
+
+ /* Query commonly used fields from packet header */
+ packet->payload = packet->ebuf;
+ packet->opcode = ib_bth_get_opcode(packet->ohdr);
+ packet->slid = ib_get_slid(hdr);
+ packet->dlid = ib_get_dlid(hdr);
+ if (unlikely((packet->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+ (packet->dlid != be16_to_cpu(IB_LID_PERMISSIVE))))
+ packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
+ be16_to_cpu(IB_MULTICAST_LID_BASE);
+ packet->sl = ib_get_sl(hdr);
+ packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf);
+ packet->pad = ib_bth_get_pad(packet->ohdr);
+ packet->extra_byte = 0;
+ packet->fecn = ib_bth_get_fecn(packet->ohdr);
+ packet->becn = ib_bth_get_becn(packet->ohdr);
+
+ return 0;
+drop:
+ ibp->rvp.n_pkt_drops++;
+ return -EINVAL;
+}
+
+static int hfi1_setup_bypass_packet(struct hfi1_packet *packet)
+{
+ /*
+ * Bypass packets have a different header/payload split
+ * compared to an IB packet.
+ * Current split is set such that 16 bytes of the actual
+ * header is in the header buffer and the remining is in
+ * the eager buffer. We chose 16 since hfi1 driver only
+ * supports 16B bypass packets and we will be able to
+ * receive the entire LRH with such a split.
+ */
+
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ struct hfi1_pportdata *ppd = rcd->ppd;
+ struct hfi1_ibport *ibp = &ppd->ibport_data;
+ u8 l4;
+ u8 grh_len;
+
+ packet->hdr = (struct hfi1_16b_header *)
+ hfi1_get_16B_header(packet->rcd->dd,
+ packet->rhf_addr);
+ packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
+
+ l4 = hfi1_16B_get_l4(packet->hdr);
+ if (l4 == OPA_16B_L4_IB_LOCAL) {
+ grh_len = 0;
+ packet->ohdr = packet->ebuf;
+ packet->grh = NULL;
+ } else if (l4 == OPA_16B_L4_IB_GLOBAL) {
+ u32 vtf;
+
+ grh_len = sizeof(struct ib_grh);
+ packet->ohdr = packet->ebuf + grh_len;
+ packet->grh = packet->ebuf;
+ if (packet->grh->next_hdr != IB_GRH_NEXT_HDR)
+ goto drop;
+ vtf = be32_to_cpu(packet->grh->version_tclass_flow);
+ if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+ goto drop;
+ } else {
+ goto drop;
+ }
+
+ /* Query commonly used fields from packet header */
+ packet->opcode = ib_bth_get_opcode(packet->ohdr);
+ packet->hlen = hdr_len_by_opcode[packet->opcode] + 8 + grh_len;
+ packet->payload = packet->ebuf + packet->hlen - (4 * sizeof(u32));
+ packet->slid = hfi1_16B_get_slid(packet->hdr);
+ packet->dlid = hfi1_16B_get_dlid(packet->hdr);
+ if (unlikely(hfi1_is_16B_mcast(packet->dlid)))
+ packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) -
+ opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR),
+ 16B);
+ packet->sc = hfi1_16B_get_sc(packet->hdr);
+ packet->sl = ibp->sc_to_sl[packet->sc];
+ packet->pad = hfi1_16B_bth_get_pad(packet->ohdr);
+ packet->extra_byte = SIZE_OF_LT;
+ packet->fecn = hfi1_16B_get_fecn(packet->hdr);
+ packet->becn = hfi1_16B_get_becn(packet->hdr);
+
+ if (hfi1_bypass_ingress_pkt_check(packet))
+ goto drop;
+
+ return 0;
+drop:
+ hfi1_cdbg(PKT, "%s: packet dropped\n", __func__);
+ ibp->rvp.n_pkt_drops++;
+ return -EINVAL;
+}
+
void handle_eflags(struct hfi1_packet *packet)
{
struct hfi1_ctxtdata *rcd = packet->rcd;
@@ -1351,6 +1559,9 @@ int process_receive_ib(struct hfi1_packet *packet)
if (unlikely(hfi1_dbg_fault_packet(packet)))
return RHF_RCV_CONTINUE;
+ if (hfi1_setup_9B_packet(packet))
+ return RHF_RCV_CONTINUE;
+
trace_hfi1_rcvhdr(packet->rcd->ppd->dd,
packet->rcd->ctxt,
rhf_err_flags(packet->rhf),
@@ -1380,8 +1591,8 @@ static inline bool hfi1_is_vnic_packet(struct hfi1_packet *packet)
if (packet->rcd->is_vnic)
return true;
- if ((HFI1_GET_L2_TYPE(packet->ebuf) == OPA_VNIC_L2_TYPE) &&
- (HFI1_GET_L4_TYPE(packet->ebuf) == OPA_VNIC_L4_ETHR))
+ if ((hfi1_16B_get_l2(packet->ebuf) == OPA_16B_L2_TYPE) &&
+ (hfi1_16B_get_l4(packet->ebuf) == OPA_16B_L4_ETHR))
return true;
return false;
@@ -1391,25 +1602,38 @@ int process_receive_bypass(struct hfi1_packet *packet)
{
struct hfi1_devdata *dd = packet->rcd->dd;
- if (unlikely(rhf_err_flags(packet->rhf))) {
- handle_eflags(packet);
- } else if (hfi1_is_vnic_packet(packet)) {
+ if (hfi1_is_vnic_packet(packet)) {
hfi1_vnic_bypass_rcv(packet);
return RHF_RCV_CONTINUE;
}
- dd_dev_err(dd, "Unsupported bypass packet. Dropping\n");
- incr_cntr64(&dd->sw_rcv_bypass_packet_errors);
- if (!(dd->err_info_rcvport.status_and_code & OPA_EI_STATUS_SMASK)) {
- u64 *flits = packet->ebuf;
+ if (hfi1_setup_bypass_packet(packet))
+ return RHF_RCV_CONTINUE;
+
+ if (unlikely(rhf_err_flags(packet->rhf))) {
+ handle_eflags(packet);
+ return RHF_RCV_CONTINUE;
+ }
- if (flits && !(packet->rhf & RHF_LEN_ERR)) {
- dd->err_info_rcvport.packet_flit1 = flits[0];
- dd->err_info_rcvport.packet_flit2 =
- packet->tlen > sizeof(flits[0]) ? flits[1] : 0;
+ if (hfi1_16B_get_l2(packet->hdr) == 0x2) {
+ hfi1_16B_rcv(packet);
+ } else {
+ dd_dev_err(dd,
+ "Bypass packets other than 16B are not supported in normal operation. Dropping\n");
+ incr_cntr64(&dd->sw_rcv_bypass_packet_errors);
+ if (!(dd->err_info_rcvport.status_and_code &
+ OPA_EI_STATUS_SMASK)) {
+ u64 *flits = packet->ebuf;
+
+ if (flits && !(packet->rhf & RHF_LEN_ERR)) {
+ dd->err_info_rcvport.packet_flit1 = flits[0];
+ dd->err_info_rcvport.packet_flit2 =
+ packet->tlen > sizeof(flits[0]) ?
+ flits[1] : 0;
+ }
+ dd->err_info_rcvport.status_and_code |=
+ (OPA_EI_STATUS_SMASK | BAD_L2_ERR);
}
- dd->err_info_rcvport.status_and_code |=
- (OPA_EI_STATUS_SMASK | BAD_L2_ERR);
}
return RHF_RCV_CONTINUE;
}
@@ -1422,6 +1646,7 @@ int process_receive_error(struct hfi1_packet *packet)
rhf_rcv_type_err(packet->rhf) == 3))
return RHF_RCV_CONTINUE;
+ hfi1_setup_ib_header(packet);
handle_eflags(packet);
if (unlikely(rhf_err_flags(packet->rhf)))
@@ -1435,6 +1660,8 @@ int kdeth_process_expected(struct hfi1_packet *packet)
{
if (unlikely(hfi1_dbg_fault_packet(packet)))
return RHF_RCV_CONTINUE;
+
+ hfi1_setup_ib_header(packet);
if (unlikely(rhf_err_flags(packet->rhf)))
handle_eflags(packet);
@@ -1445,6 +1672,7 @@ int kdeth_process_expected(struct hfi1_packet *packet)
int kdeth_process_eager(struct hfi1_packet *packet)
{
+ hfi1_setup_ib_header(packet);
if (unlikely(rhf_err_flags(packet->rhf)))
handle_eflags(packet);
if (unlikely(hfi1_dbg_fault_packet(packet)))
@@ -1461,3 +1689,62 @@ int process_receive_invalid(struct hfi1_packet *packet)
rhf_rcv_type(packet->rhf));
return RHF_RCV_CONTINUE;
}
+
+void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_packet packet;
+ struct ps_mdata mdata;
+
+ seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s head %llu tail %llu\n",
+ rcd->ctxt, rcd->rcvhdrq_cnt, rcd->rcvhdrqentsize,
+ HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
+ "dma_rtail" : "nodma_rtail",
+ read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) &
+ RCV_HDR_HEAD_HEAD_MASK,
+ read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL));
+
+ init_packet(rcd, &packet);
+ init_ps_mdata(&mdata, &packet);
+
+ while (1) {
+ struct hfi1_devdata *dd = rcd->dd;
+ __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
+ dd->rhf_offset;
+ struct ib_header *hdr;
+ u64 rhf = rhf_to_cpu(rhf_addr);
+ u32 etype = rhf_rcv_type(rhf), qpn;
+ u8 opcode;
+ u32 psn;
+ u8 lnh;
+
+ if (ps_done(&mdata, rhf, rcd))
+ break;
+
+ if (ps_skip(&mdata, rhf, rcd))
+ goto next;
+
+ if (etype > RHF_RCV_TYPE_IB)
+ goto next;
+
+ packet.hdr = hfi1_get_msgheader(dd, rhf_addr);
+ hdr = packet.hdr;
+
+ lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+
+ if (lnh == HFI1_LRH_BTH)
+ packet.ohdr = &hdr->u.oth;
+ else if (lnh == HFI1_LRH_GRH)
+ packet.ohdr = &hdr->u.l.oth;
+ else
+ goto next; /* just in case */
+
+ opcode = (be32_to_cpu(packet.ohdr->bth[0]) >> 24);
+ qpn = be32_to_cpu(packet.ohdr->bth[1]) & RVT_QPN_MASK;
+ psn = mask_psn(be32_to_cpu(packet.ohdr->bth[2]));
+
+ seq_printf(s, "\tEnt %u: opcode 0x%x, qpn 0x%x, psn 0x%x\n",
+ mdata.ps_head, opcode, qpn, psn);
+next:
+ update_ps_mdata(&mdata, rcd);
+ }
+}
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c
index 26da124c88e2..d46b17107901 100644
--- a/drivers/infiniband/hw/hfi1/eprom.c
+++ b/drivers/infiniband/hw/hfi1/eprom.c
@@ -250,7 +250,6 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
{
void *buffer;
void *p;
- u32 length;
int ret;
buffer = kmalloc(P1_SIZE, GFP_KERNEL);
@@ -265,13 +264,13 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
/* scan for image magic that may trail the actual data */
p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE);
- if (p)
- length = p - buffer;
- else
- length = P1_SIZE;
+ if (!p) {
+ kfree(buffer);
+ return -ENOENT;
+ }
*data = buffer;
- *size = length;
+ *size = p - buffer;
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.c b/drivers/infiniband/hw/hfi1/exp_rcv.c
new file mode 100644
index 000000000000..0af91675acc6
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "exp_rcv.h"
+#include "trace.h"
+
+/**
+ * exp_tid_group_init - initialize exp_tid_set
+ * @set - the set
+ */
+void hfi1_exp_tid_group_init(struct exp_tid_set *set)
+{
+ INIT_LIST_HEAD(&set->list);
+ set->count = 0;
+}
+
+/**
+ * alloc_ctxt_rcv_groups - initialize expected receive groups
+ * @rcd - the context to add the groupings to
+ */
+int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
+{
+ struct hfi1_devdata *dd = rcd->dd;
+ u32 tidbase;
+ struct tid_group *grp;
+ int i;
+
+ tidbase = rcd->expected_base;
+ for (i = 0; i < rcd->expected_count /
+ dd->rcv_entries.group_size; i++) {
+ grp = kzalloc(sizeof(*grp), GFP_KERNEL);
+ if (!grp)
+ goto bail;
+ grp->size = dd->rcv_entries.group_size;
+ grp->base = tidbase;
+ tid_group_add_tail(grp, &rcd->tid_group_list);
+ tidbase += dd->rcv_entries.group_size;
+ }
+
+ return 0;
+bail:
+ hfi1_free_ctxt_rcv_groups(rcd);
+ return -ENOMEM;
+}
+
+/**
+ * free_ctxt_rcv_groups - free expected receive groups
+ * @rcd - the context to free
+ *
+ * The routine dismantles the expect receive linked
+ * list and clears any tids associated with the receive
+ * context.
+ *
+ * This should only be called for kernel contexts and the
+ * a base user context.
+ */
+void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
+{
+ struct tid_group *grp, *gptr;
+
+ WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_full_list));
+ WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_used_list));
+
+ list_for_each_entry_safe(grp, gptr, &rcd->tid_group_list.list, list) {
+ tid_group_remove(grp, &rcd->tid_group_list);
+ kfree(grp);
+ }
+
+ hfi1_clear_tids(rcd);
+}
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.h b/drivers/infiniband/hw/hfi1/exp_rcv.h
new file mode 100644
index 000000000000..08719047628a
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.h
@@ -0,0 +1,190 @@
+#ifndef _HFI1_EXP_RCV_H
+#define _HFI1_EXP_RCV_H
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "hfi.h"
+
+#define EXP_TID_SET_EMPTY(set) (set.count == 0 && list_empty(&set.list))
+
+#define EXP_TID_TIDLEN_MASK 0x7FFULL
+#define EXP_TID_TIDLEN_SHIFT 0
+#define EXP_TID_TIDCTRL_MASK 0x3ULL
+#define EXP_TID_TIDCTRL_SHIFT 20
+#define EXP_TID_TIDIDX_MASK 0x3FFULL
+#define EXP_TID_TIDIDX_SHIFT 22
+#define EXP_TID_GET(tid, field) \
+ (((tid) >> EXP_TID_TID##field##_SHIFT) & EXP_TID_TID##field##_MASK)
+
+#define EXP_TID_SET(field, value) \
+ (((value) & EXP_TID_TID##field##_MASK) << \
+ EXP_TID_TID##field##_SHIFT)
+#define EXP_TID_CLEAR(tid, field) ({ \
+ (tid) &= ~(EXP_TID_TID##field##_MASK << \
+ EXP_TID_TID##field##_SHIFT); \
+ })
+#define EXP_TID_RESET(tid, field, value) do { \
+ EXP_TID_CLEAR(tid, field); \
+ (tid) |= EXP_TID_SET(field, (value)); \
+ } while (0)
+
+/*
+ * Define fields in the KDETH header so we can update the header
+ * template.
+ */
+#define KDETH_OFFSET_SHIFT 0
+#define KDETH_OFFSET_MASK 0x7fff
+#define KDETH_OM_SHIFT 15
+#define KDETH_OM_MASK 0x1
+#define KDETH_TID_SHIFT 16
+#define KDETH_TID_MASK 0x3ff
+#define KDETH_TIDCTRL_SHIFT 26
+#define KDETH_TIDCTRL_MASK 0x3
+#define KDETH_INTR_SHIFT 28
+#define KDETH_INTR_MASK 0x1
+#define KDETH_SH_SHIFT 29
+#define KDETH_SH_MASK 0x1
+#define KDETH_KVER_SHIFT 30
+#define KDETH_KVER_MASK 0x3
+#define KDETH_JKEY_SHIFT 0x0
+#define KDETH_JKEY_MASK 0xff
+#define KDETH_HCRC_UPPER_SHIFT 16
+#define KDETH_HCRC_UPPER_MASK 0xff
+#define KDETH_HCRC_LOWER_SHIFT 24
+#define KDETH_HCRC_LOWER_MASK 0xff
+
+#define KDETH_GET(val, field) \
+ (((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK)
+#define KDETH_SET(dw, field, val) do { \
+ u32 dwval = le32_to_cpu(dw); \
+ dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \
+ dwval |= (((val) & KDETH_##field##_MASK) << \
+ KDETH_##field##_SHIFT); \
+ dw = cpu_to_le32(dwval); \
+ } while (0)
+
+#define KDETH_RESET(dw, field, val) ({ dw = 0; KDETH_SET(dw, field, val); })
+
+/* KDETH OM multipliers and switch over point */
+#define KDETH_OM_SMALL 4
+#define KDETH_OM_SMALL_SHIFT 2
+#define KDETH_OM_LARGE 64
+#define KDETH_OM_LARGE_SHIFT 6
+#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
+
+struct tid_group {
+ struct list_head list;
+ u32 base;
+ u8 size;
+ u8 used;
+ u8 map;
+};
+
+/*
+ * Write an "empty" RcvArray entry.
+ * This function exists so the TID registaration code can use it
+ * to write to unused/unneeded entries and still take advantage
+ * of the WC performance improvements. The HFI will ignore this
+ * write to the RcvArray entry.
+ */
+static inline void rcv_array_wc_fill(struct hfi1_devdata *dd, u32 index)
+{
+ /*
+ * Doing the WC fill writes only makes sense if the device is
+ * present and the RcvArray has been mapped as WC memory.
+ */
+ if ((dd->flags & HFI1_PRESENT) && dd->rcvarray_wc) {
+ writeq(0, dd->rcvarray_wc + (index * 8));
+ if ((index & 3) == 3)
+ flush_wc();
+ }
+}
+
+static inline void tid_group_add_tail(struct tid_group *grp,
+ struct exp_tid_set *set)
+{
+ list_add_tail(&grp->list, &set->list);
+ set->count++;
+}
+
+static inline void tid_group_remove(struct tid_group *grp,
+ struct exp_tid_set *set)
+{
+ list_del_init(&grp->list);
+ set->count--;
+}
+
+static inline void tid_group_move(struct tid_group *group,
+ struct exp_tid_set *s1,
+ struct exp_tid_set *s2)
+{
+ tid_group_remove(group, s1);
+ tid_group_add_tail(group, s2);
+}
+
+static inline struct tid_group *tid_group_pop(struct exp_tid_set *set)
+{
+ struct tid_group *grp =
+ list_first_entry(&set->list, struct tid_group, list);
+ list_del_init(&grp->list);
+ set->count--;
+ return grp;
+}
+
+static inline u32 rcventry2tidinfo(u32 rcventry)
+{
+ u32 pair = rcventry & ~0x1;
+
+ return EXP_TID_SET(IDX, pair >> 1) |
+ EXP_TID_SET(CTRL, 1 << (rcventry - pair));
+}
+
+int hfi1_alloc_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd);
+void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd);
+void hfi1_exp_tid_group_init(struct exp_tid_set *set);
+
+#endif /* _HFI1_EXP_RCV_H */
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 3158128d57e8..2bc89260235a 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -58,10 +58,10 @@
#include "device.h"
#include "common.h"
#include "trace.h"
+#include "mmu_rb.h"
#include "user_sdma.h"
#include "user_exp_rcv.h"
#include "aspm.h"
-#include "mmu_rb.h"
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
@@ -79,21 +79,25 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
static u64 kvirt_to_phys(void *addr);
static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo);
-static int init_subctxts(struct hfi1_ctxtdata *uctxt,
- const struct hfi1_user_info *uinfo);
-static int init_user_ctxt(struct hfi1_filedata *fd);
+static void init_subctxts(struct hfi1_ctxtdata *uctxt,
+ const struct hfi1_user_info *uinfo);
+static int init_user_ctxt(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt);
static void user_init(struct hfi1_ctxtdata *uctxt);
static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
__u32 len);
static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
__u32 len);
-static int setup_base_ctxt(struct hfi1_filedata *fd);
+static int setup_base_ctxt(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt);
static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
static int find_sub_ctxt(struct hfi1_filedata *fd,
const struct hfi1_user_info *uinfo);
static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
- struct hfi1_user_info *uinfo);
+ struct hfi1_user_info *uinfo,
+ struct hfi1_ctxtdata **cd);
+static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
@@ -116,7 +120,7 @@ static const struct file_operations hfi1_file_ops = {
.llseek = noop_llseek,
};
-static struct vm_operations_struct vm_ops = {
+static const struct vm_operations_struct vm_ops = {
.fault = vma_fault,
};
@@ -181,7 +185,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
struct hfi1_devdata,
user_cdev);
- if (!((dd->flags & HFI1_PRESENT) && dd->kregbase))
+ if (!((dd->flags & HFI1_PRESENT) && dd->kregbase1))
return -EINVAL;
if (!atomic_inc_not_zero(&dd->user_refcount))
@@ -267,12 +271,14 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
/*
* Copy the number of tidlist entries we used
* and the length of the buffer we registered.
- * These fields are adjacent in the structure so
- * we can copy them at the same time.
*/
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
- sizeof(tinfo.tidcnt) +
+ sizeof(tinfo.tidcnt)))
+ return -EFAULT;
+
+ addr = arg + offsetof(struct hfi1_tid_info, length);
+ if (copy_to_user((void __user *)addr, &tinfo.length,
sizeof(tinfo.length)))
ret = -EFAULT;
}
@@ -388,8 +394,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
sc_disable(sc);
ret = sc_enable(sc);
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
- uctxt->ctxt);
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
} else {
ret = sc_restart(sc);
}
@@ -425,8 +430,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
if (!iter_is_iovec(from) || !dim)
return -EINVAL;
- hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
- fd->uctxt->ctxt, fd->subctxt, dim);
+ trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
return -ENOSPC;
@@ -752,12 +756,11 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
if (!uctxt)
goto done;
- hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
- mutex_lock(&hfi1_mutex);
+ hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
flush_wc();
/* drain user sdma queue */
- hfi1_user_sdma_free_queues(fdata);
+ hfi1_user_sdma_free_queues(fdata, uctxt);
/* release the cpu */
hfi1_put_proc_affinity(fdata->rec_cpu_num);
@@ -766,6 +769,13 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
hfi1_user_exp_rcv_free(fdata);
/*
+ * fdata->uctxt is used in the above cleanup. It is not ready to be
+ * removed until here.
+ */
+ fdata->uctxt = NULL;
+ hfi1_rcd_put(uctxt);
+
+ /*
* Clear any left over, unhandled events so the next process that
* gets this context doesn't get confused.
*/
@@ -773,13 +783,14 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
*ev = 0;
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
__clear_bit(fdata->subctxt, uctxt->in_use_ctxts);
if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
- mutex_unlock(&hfi1_mutex);
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
goto done;
}
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- spin_lock_irqsave(&dd->uctxt_lock, flags);
/*
* Disable receive context and interrupt available, reset all
* RcvCtxtCtrl bits to default values.
@@ -790,34 +801,24 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
HFI1_RCVCTRL_TAILUPD_DIS |
HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
- HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt->ctxt);
+ HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
/* Clear the context's J_KEY */
- hfi1_clear_ctxt_jkey(dd, uctxt->ctxt);
+ hfi1_clear_ctxt_jkey(dd, uctxt);
/*
- * Reset context integrity checks to default.
- * (writes to CSRs probably belong in chip.c)
+ * If a send context is allocated, reset context integrity
+ * checks to default and disable the send context.
*/
- write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
- hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type));
- sc_disable(uctxt->sc);
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-
- dd->rcd[uctxt->ctxt] = NULL;
+ if (uctxt->sc) {
+ set_pio_integrity(uctxt->sc);
+ sc_disable(uctxt->sc);
+ }
- hfi1_user_exp_rcv_grp_free(uctxt);
+ hfi1_free_ctxt_rcv_groups(uctxt);
hfi1_clear_ctxt_pkey(dd, uctxt);
- uctxt->rcvwait_to = 0;
- uctxt->piowait_to = 0;
- uctxt->rcvnowait = 0;
- uctxt->pionowait = 0;
uctxt->event_flags = 0;
- hfi1_stats.sps_ctxts--;
- if (++dd->freectxts == dd->num_user_contexts)
- aspm_enable_all(dd);
- mutex_unlock(&hfi1_mutex);
- hfi1_free_ctxtdata(dd, uctxt);
+ deallocate_ctxt(uctxt);
done:
mmdrop(fdata->mm);
kobject_put(&dd->kobj);
@@ -845,135 +846,211 @@ static u64 kvirt_to_phys(void *addr)
return paddr;
}
+/**
+ * complete_subctxt
+ * @fd: valid filedata pointer
+ *
+ * Sub-context info can only be set up after the base context
+ * has been completed. This is indicated by the clearing of the
+ * HFI1_CTXT_BASE_UINIT bit.
+ *
+ * Wait for the bit to be cleared, and then complete the subcontext
+ * initialization.
+ *
+ */
+static int complete_subctxt(struct hfi1_filedata *fd)
+{
+ int ret;
+ unsigned long flags;
+
+ /*
+ * sub-context info can only be set up after the base context
+ * has been completed.
+ */
+ ret = wait_event_interruptible(
+ fd->uctxt->wait,
+ !test_bit(HFI1_CTXT_BASE_UNINIT, &fd->uctxt->event_flags));
+
+ if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags))
+ ret = -ENOMEM;
+
+ /* Finish the sub-context init */
+ if (!ret) {
+ fd->rec_cpu_num = hfi1_get_proc_affinity(fd->uctxt->numa_id);
+ ret = init_user_ctxt(fd, fd->uctxt);
+ }
+
+ if (ret) {
+ hfi1_rcd_put(fd->uctxt);
+ fd->uctxt = NULL;
+ spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
+ __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
+ spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
+ }
+
+ return ret;
+}
+
static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
{
int ret;
unsigned int swmajor, swminor;
+ struct hfi1_ctxtdata *uctxt = NULL;
swmajor = uinfo->userversion >> 16;
if (swmajor != HFI1_USER_SWMAJOR)
return -ENODEV;
+ if (uinfo->subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
+ return -EINVAL;
+
swminor = uinfo->userversion & 0xffff;
+ /*
+ * Acquire the mutex to protect against multiple creations of what
+ * could be a shared base context.
+ */
mutex_lock(&hfi1_mutex);
/*
- * Get a sub context if necessary.
+ * Get a sub context if available (fd->uctxt will be set).
* ret < 0 error, 0 no context, 1 sub-context found
*/
- ret = 0;
- if (uinfo->subctxt_cnt) {
- ret = find_sub_ctxt(fd, uinfo);
- if (ret > 0)
- fd->rec_cpu_num =
- hfi1_get_proc_affinity(fd->uctxt->numa_id);
- }
+ ret = find_sub_ctxt(fd, uinfo);
/*
- * Allocate a base context if context sharing is not required or we
- * couldn't find a sub context.
+ * Allocate a base context if context sharing is not required or a
+ * sub context wasn't found.
*/
if (!ret)
- ret = allocate_ctxt(fd, fd->dd, uinfo);
+ ret = allocate_ctxt(fd, fd->dd, uinfo, &uctxt);
mutex_unlock(&hfi1_mutex);
- /* Depending on the context type, do the appropriate init */
- if (ret > 0) {
- /*
- * sub-context info can only be set up after the base
- * context has been completed.
- */
- ret = wait_event_interruptible(fd->uctxt->wait, !test_bit(
- HFI1_CTXT_BASE_UNINIT,
- &fd->uctxt->event_flags));
- if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags)) {
- clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
- return -ENOMEM;
- }
- /* The only thing a sub context needs is the user_xxx stuff */
- if (!ret)
- ret = init_user_ctxt(fd);
-
- if (ret)
- clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
- } else if (!ret) {
- ret = setup_base_ctxt(fd);
- if (fd->uctxt->subctxt_cnt) {
- /* If there is an error, set the failed bit. */
- if (ret)
- set_bit(HFI1_CTXT_BASE_FAILED,
- &fd->uctxt->event_flags);
+ /* Depending on the context type, finish the appropriate init */
+ switch (ret) {
+ case 0:
+ ret = setup_base_ctxt(fd, uctxt);
+ if (uctxt->subctxt_cnt) {
/*
- * Base context is done, notify anybody using a
- * sub-context that is waiting for this completion
+ * Base context is done (successfully or not), notify
+ * anybody using a sub-context that is waiting for
+ * this completion.
*/
- clear_bit(HFI1_CTXT_BASE_UNINIT,
- &fd->uctxt->event_flags);
- wake_up(&fd->uctxt->wait);
+ clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
+ wake_up(&uctxt->wait);
}
+ break;
+ case 1:
+ ret = complete_subctxt(fd);
+ break;
+ default:
+ break;
}
return ret;
}
-/*
- * The hfi1_mutex must be held when this function is called. It is
- * necessary to ensure serialized access to the bitmask in_use_ctxts.
+/**
+ * match_ctxt
+ * @fd: valid filedata pointer
+ * @uinfo: user info to compare base context with
+ * @uctxt: context to compare uinfo to.
+ *
+ * Compare the given context with the given information to see if it
+ * can be used for a sub context.
*/
-static int find_sub_ctxt(struct hfi1_filedata *fd,
- const struct hfi1_user_info *uinfo)
+static int match_ctxt(struct hfi1_filedata *fd,
+ const struct hfi1_user_info *uinfo,
+ struct hfi1_ctxtdata *uctxt)
{
- int i;
struct hfi1_devdata *dd = fd->dd;
+ unsigned long flags;
u16 subctxt;
- for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) {
- struct hfi1_ctxtdata *uctxt = dd->rcd[i];
+ /* Skip dynamically allocated kernel contexts */
+ if (uctxt->sc && (uctxt->sc->type == SC_KERNEL))
+ return 0;
- /* Skip ctxts which are not yet open */
- if (!uctxt ||
- bitmap_empty(uctxt->in_use_ctxts,
- HFI1_MAX_SHARED_CTXTS))
- continue;
+ /* Skip ctxt if it doesn't match the requested one */
+ if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) ||
+ uctxt->jkey != generate_jkey(current_uid()) ||
+ uctxt->subctxt_id != uinfo->subctxt_id ||
+ uctxt->subctxt_cnt != uinfo->subctxt_cnt)
+ return 0;
- /* Skip dynamically allocted kernel contexts */
- if (uctxt->sc && (uctxt->sc->type == SC_KERNEL))
- continue;
+ /* Verify the sharing process matches the base */
+ if (uctxt->userversion != uinfo->userversion)
+ return -EINVAL;
- /* Skip ctxt if it doesn't match the requested one */
- if (memcmp(uctxt->uuid, uinfo->uuid,
- sizeof(uctxt->uuid)) ||
- uctxt->jkey != generate_jkey(current_uid()) ||
- uctxt->subctxt_id != uinfo->subctxt_id ||
- uctxt->subctxt_cnt != uinfo->subctxt_cnt)
- continue;
+ /* Find an unused sub context */
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ if (bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
+ /* context is being closed, do not use */
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ return 0;
+ }
- /* Verify the sharing process matches the master */
- if (uctxt->userversion != uinfo->userversion)
- return -EINVAL;
+ subctxt = find_first_zero_bit(uctxt->in_use_ctxts,
+ HFI1_MAX_SHARED_CTXTS);
+ if (subctxt >= uctxt->subctxt_cnt) {
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+ return -EBUSY;
+ }
- /* Find an unused context */
- subctxt = find_first_zero_bit(uctxt->in_use_ctxts,
- HFI1_MAX_SHARED_CTXTS);
- if (subctxt >= uctxt->subctxt_cnt)
- return -EBUSY;
+ fd->subctxt = subctxt;
+ __set_bit(fd->subctxt, uctxt->in_use_ctxts);
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- fd->uctxt = uctxt;
- fd->subctxt = subctxt;
- __set_bit(fd->subctxt, uctxt->in_use_ctxts);
+ fd->uctxt = uctxt;
+ hfi1_rcd_get(uctxt);
+
+ return 1;
+}
- return 1;
+/**
+ * find_sub_ctxt
+ * @fd: valid filedata pointer
+ * @uinfo: matching info to use to find a possible context to share.
+ *
+ * The hfi1_mutex must be held when this function is called. It is
+ * necessary to ensure serialized creation of shared contexts.
+ *
+ * Return:
+ * 0 No sub-context found
+ * 1 Subcontext found and allocated
+ * errno EINVAL (incorrect parameters)
+ * EBUSY (all sub contexts in use)
+ */
+static int find_sub_ctxt(struct hfi1_filedata *fd,
+ const struct hfi1_user_info *uinfo)
+{
+ struct hfi1_ctxtdata *uctxt;
+ struct hfi1_devdata *dd = fd->dd;
+ u16 i;
+ int ret;
+
+ if (!uinfo->subctxt_cnt)
+ return 0;
+
+ for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) {
+ uctxt = hfi1_rcd_get_by_index(dd, i);
+ if (uctxt) {
+ ret = match_ctxt(fd, uinfo, uctxt);
+ hfi1_rcd_put(uctxt);
+ /* value of != 0 will return */
+ if (ret)
+ return ret;
+ }
}
return 0;
}
static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
- struct hfi1_user_info *uinfo)
+ struct hfi1_user_info *uinfo,
+ struct hfi1_ctxtdata **rcd)
{
struct hfi1_ctxtdata *uctxt;
- unsigned int ctxt;
int ret, numa;
if (dd->flags & HFI1_FROZEN) {
@@ -987,22 +1064,9 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
return -EIO;
}
- /*
- * This check is sort of redundant to the next EBUSY error. It would
- * also indicate an inconsistancy in the driver if this value was
- * zero, but there were still contexts available.
- */
if (!dd->freectxts)
return -EBUSY;
- for (ctxt = dd->first_dyn_alloc_ctxt;
- ctxt < dd->num_rcv_contexts; ctxt++)
- if (!dd->rcd[ctxt])
- break;
-
- if (ctxt == dd->num_rcv_contexts)
- return -EBUSY;
-
/*
* If we don't have a NUMA node requested, preference is towards
* device NUMA node.
@@ -1012,11 +1076,10 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
numa = cpu_to_node(fd->rec_cpu_num);
else
numa = numa_node_id();
- uctxt = hfi1_create_ctxtdata(dd->pport, ctxt, numa);
- if (!uctxt) {
- dd_dev_err(dd,
- "Unable to allocate ctxtdata memory, failing open\n");
- return -ENOMEM;
+ ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt);
+ if (ret < 0) {
+ dd_dev_err(dd, "user ctxtdata allocation failed\n");
+ return ret;
}
hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)",
uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num,
@@ -1025,8 +1088,7 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
/*
* Allocate and enable a PIO send context.
*/
- uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize,
- uctxt->dd->node);
+ uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node);
if (!uctxt->sc) {
ret = -ENOMEM;
goto ctxdata_free;
@@ -1038,28 +1100,19 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
goto ctxdata_free;
/*
- * Setup sub context resources if the user-level has requested
+ * Setup sub context information if the user-level has requested
* sub contexts.
* This has to be done here so the rest of the sub-contexts find the
- * proper master.
+ * proper base context.
*/
- if (uinfo->subctxt_cnt) {
- ret = init_subctxts(uctxt, uinfo);
- /*
- * On error, we don't need to disable and de-allocate the
- * send context because it will be done during file close
- */
- if (ret)
- goto ctxdata_free;
- }
+ if (uinfo->subctxt_cnt)
+ init_subctxts(uctxt, uinfo);
uctxt->userversion = uinfo->userversion;
uctxt->flags = hfi1_cap_mask; /* save current flag state */
init_waitqueue_head(&uctxt->wait);
strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
uctxt->jkey = generate_jkey(current_uid());
- INIT_LIST_HEAD(&uctxt->sdma_queues);
- spin_lock_init(&uctxt->sdma_qlock);
hfi1_stats.sps_ctxts++;
/*
* Disable ASPM when there are open user/PSM contexts to avoid
@@ -1067,31 +1120,33 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
*/
if (dd->freectxts-- == dd->num_user_contexts)
aspm_disable_all(dd);
- fd->uctxt = uctxt;
+
+ *rcd = uctxt;
return 0;
ctxdata_free:
- dd->rcd[ctxt] = NULL;
- hfi1_free_ctxtdata(dd, uctxt);
+ hfi1_free_ctxt(uctxt);
return ret;
}
-static int init_subctxts(struct hfi1_ctxtdata *uctxt,
- const struct hfi1_user_info *uinfo)
+static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt)
{
- u16 num_subctxts;
+ mutex_lock(&hfi1_mutex);
+ hfi1_stats.sps_ctxts--;
+ if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts)
+ aspm_enable_all(uctxt->dd);
+ mutex_unlock(&hfi1_mutex);
- num_subctxts = uinfo->subctxt_cnt;
- if (num_subctxts > HFI1_MAX_SHARED_CTXTS)
- return -EINVAL;
+ hfi1_free_ctxt(uctxt);
+}
+static void init_subctxts(struct hfi1_ctxtdata *uctxt,
+ const struct hfi1_user_info *uinfo)
+{
uctxt->subctxt_cnt = uinfo->subctxt_cnt;
uctxt->subctxt_id = uinfo->subctxt_id;
- uctxt->redirect_seq_cnt = 1;
set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
-
- return 0;
}
static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
@@ -1153,7 +1208,7 @@ static void user_init(struct hfi1_ctxtdata *uctxt)
clear_rcvhdrtail(uctxt);
/* Setup J_KEY before enabling the context */
- hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey);
+ hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey);
rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
@@ -1179,7 +1234,7 @@ static void user_init(struct hfi1_ctxtdata *uctxt)
rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
else
rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
- hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt);
+ hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
}
static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
@@ -1223,23 +1278,25 @@ static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
return ret;
}
-static int init_user_ctxt(struct hfi1_filedata *fd)
+static int init_user_ctxt(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt)
{
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
int ret;
ret = hfi1_user_sdma_alloc_queues(uctxt, fd);
if (ret)
return ret;
- ret = hfi1_user_exp_rcv_init(fd);
+ ret = hfi1_user_exp_rcv_init(fd, uctxt);
+ if (ret)
+ hfi1_user_sdma_free_queues(fd, uctxt);
return ret;
}
-static int setup_base_ctxt(struct hfi1_filedata *fd)
+static int setup_base_ctxt(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt)
{
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
int ret = 0;
@@ -1260,20 +1317,27 @@ static int setup_base_ctxt(struct hfi1_filedata *fd)
if (ret)
goto setup_failed;
- ret = hfi1_user_exp_rcv_grp_init(fd);
+ ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
if (ret)
goto setup_failed;
- ret = init_user_ctxt(fd);
+ ret = init_user_ctxt(fd, uctxt);
if (ret)
goto setup_failed;
user_init(uctxt);
+ /* Now that the context is set up, the fd can get a reference. */
+ fd->uctxt = uctxt;
+ hfi1_rcd_get(uctxt);
+
return 0;
setup_failed:
- hfi1_free_ctxtdata(dd, uctxt);
+ /* Set the failed bit so sub-context init can do the right thing */
+ set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
+ deallocate_ctxt(uctxt);
+
return ret;
}
@@ -1390,7 +1454,7 @@ static unsigned int poll_next(struct file *fp,
spin_lock_irq(&dd->uctxt_lock);
if (hdrqempty(uctxt)) {
set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt->ctxt);
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt);
pollflag = 0;
} else {
pollflag = POLLIN | POLLRDNORM;
@@ -1409,19 +1473,14 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
{
struct hfi1_ctxtdata *uctxt;
struct hfi1_devdata *dd = ppd->dd;
- unsigned ctxt;
- int ret = 0;
- unsigned long flags;
+ u16 ctxt;
- if (!dd->events) {
- ret = -EINVAL;
- goto done;
- }
+ if (!dd->events)
+ return -EINVAL;
- spin_lock_irqsave(&dd->uctxt_lock, flags);
for (ctxt = dd->first_dyn_alloc_ctxt; ctxt < dd->num_rcv_contexts;
ctxt++) {
- uctxt = dd->rcd[ctxt];
+ uctxt = hfi1_rcd_get_by_index(dd, ctxt);
if (uctxt) {
unsigned long *evs = dd->events +
(uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
@@ -1434,11 +1493,11 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
set_bit(evtbit, evs);
for (i = 1; i < uctxt->subctxt_cnt; i++)
set_bit(evtbit, evs + i);
+ hfi1_rcd_put(uctxt);
}
}
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-done:
- return ret;
+
+ return 0;
}
/**
@@ -1475,7 +1534,7 @@ static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
} else {
rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
}
- hfi1_rcvctrl(dd, rcvctrl_op, uctxt->ctxt);
+ hfi1_rcvctrl(dd, rcvctrl_op, uctxt);
/* always; new head should be equal to new tail; see above */
bail:
return 0;
@@ -1525,7 +1584,7 @@ static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey)
}
if (intable)
- ret = hfi1_set_ctxt_pkey(dd, uctxt->ctxt, pkey);
+ ret = hfi1_set_ctxt_pkey(dd, uctxt, pkey);
done:
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index 4042c11b2742..5aea8f47e670 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -64,30 +64,22 @@
#define DEFAULT_FW_FABRIC_NAME "hfi1_fabric.fw"
#define DEFAULT_FW_SBUS_NAME "hfi1_sbus.fw"
#define DEFAULT_FW_PCIE_NAME "hfi1_pcie.fw"
-#define DEFAULT_PLATFORM_CONFIG_NAME "hfi1_platform.dat"
#define ALT_FW_8051_NAME_ASIC "hfi1_dc8051_d.fw"
#define ALT_FW_FABRIC_NAME "hfi1_fabric_d.fw"
#define ALT_FW_SBUS_NAME "hfi1_sbus_d.fw"
#define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw"
+#define HOST_INTERFACE_VERSION 1
static uint fw_8051_load = 1;
static uint fw_fabric_serdes_load = 1;
static uint fw_pcie_serdes_load = 1;
static uint fw_sbus_load = 1;
-/*
- * Access required in platform.c
- * Maintains state of whether the platform config was fetched via the
- * fallback option
- */
-uint platform_config_load;
-
/* Firmware file names get set in hfi1_firmware_init() based on the above */
static char *fw_8051_name;
static char *fw_fabric_serdes_name;
static char *fw_sbus_name;
static char *fw_pcie_serdes_name;
-static char *platform_config_name;
#define SBUS_MAX_POLL_COUNT 100
#define SBUS_COUNTER(reg, name) \
@@ -177,7 +169,6 @@ static struct firmware_details fw_8051;
static struct firmware_details fw_fabric;
static struct firmware_details fw_pcie;
static struct firmware_details fw_sbus;
-static const struct firmware *platform_config;
/* flags for turn_off_spicos() */
#define SPICO_SBUS 0x1
@@ -615,6 +606,14 @@ retry:
fw_fabric_serdes_name = ALT_FW_FABRIC_NAME;
fw_sbus_name = ALT_FW_SBUS_NAME;
fw_pcie_serdes_name = ALT_FW_PCIE_NAME;
+
+ /*
+ * Add a delay before obtaining and loading debug firmware.
+ * Authorization will fail if the delay between firmware
+ * authorization events is shorter than 50us. Add 100us to
+ * make a delay time safe.
+ */
+ usleep_range(100, 120);
}
if (fw_sbus_load) {
@@ -675,7 +674,6 @@ done:
static int obtain_firmware(struct hfi1_devdata *dd)
{
unsigned long timeout;
- int err = 0;
mutex_lock(&fw_mutex);
@@ -699,38 +697,11 @@ static int obtain_firmware(struct hfi1_devdata *dd)
}
/* not in FW_TRY state */
- if (fw_state == FW_FINAL) {
- if (platform_config) {
- dd->platform_config.data = platform_config->data;
- dd->platform_config.size = platform_config->size;
- }
- goto done; /* already acquired */
- } else if (fw_state == FW_ERR) {
- goto done; /* already tried and failed */
- }
- /* fw_state is FW_EMPTY */
-
/* set fw_state to FW_TRY, FW_FINAL, or FW_ERR, and fw_err */
- __obtain_firmware(dd);
-
- if (platform_config_load) {
- platform_config = NULL;
- err = request_firmware(&platform_config, platform_config_name,
- &dd->pcidev->dev);
- if (err) {
- platform_config = NULL;
- dd_dev_err(dd,
- "%s: No default platform config file found\n",
- __func__);
- goto done;
- }
- dd->platform_config.data = platform_config->data;
- dd->platform_config.size = platform_config->size;
- }
+ if (fw_state == FW_EMPTY)
+ __obtain_firmware(dd);
-done:
mutex_unlock(&fw_mutex);
-
return fw_err;
}
@@ -752,9 +723,6 @@ void dispose_firmware(void)
dispose_one_firmware(&fw_pcie);
dispose_one_firmware(&fw_sbus);
- release_firmware(platform_config);
- platform_config = NULL;
-
/* retain the error state, otherwise revert to empty */
if (fw_state != FW_ERR)
fw_state = FW_EMPTY;
@@ -1079,6 +1047,13 @@ static int load_8051_firmware(struct hfi1_devdata *dd,
dd_dev_info(dd, "8051 firmware version %d.%d.%d\n",
(int)ver_major, (int)ver_minor, (int)ver_patch);
dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch);
+ ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to set host interface version, return 0x%x\n",
+ ret);
+ return -EIO;
+ }
return 0;
}
@@ -1709,10 +1684,8 @@ int hfi1_firmware_init(struct hfi1_devdata *dd)
}
/* no 8051 or QSFP on simulator */
- if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
+ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
fw_8051_load = 0;
- platform_config_load = 0;
- }
if (!fw_8051_name) {
if (dd->icode == ICODE_RTL_SILICON)
@@ -1726,8 +1699,6 @@ int hfi1_firmware_init(struct hfi1_devdata *dd)
fw_sbus_name = DEFAULT_FW_SBUS_NAME;
if (!fw_pcie_serdes_name)
fw_pcie_serdes_name = DEFAULT_FW_PCIE_NAME;
- if (!platform_config_name)
- platform_config_name = DEFAULT_PLATFORM_CONFIG_NAME;
return obtain_firmware(dd);
}
@@ -1773,6 +1744,7 @@ static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table)
int parse_platform_config(struct hfi1_devdata *dd)
{
struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
+ struct hfi1_pportdata *ppd = dd->pport;
u32 *ptr = NULL;
u32 header1 = 0, header2 = 0, magic_num = 0, crc = 0, file_length = 0;
u32 record_idx = 0, table_type = 0, table_length_dwords = 0;
@@ -1784,7 +1756,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
* scratch register bitmap, thus there is no platform config to parse.
* Skip parsing in these situations.
*/
- if (is_integrated(dd) && !platform_config_load)
+ if (ppd->config_from_scratch)
return 0;
if (!dd->platform_config.data) {
@@ -2073,13 +2045,14 @@ int get_platform_config_field(struct hfi1_devdata *dd,
int ret = 0, wlen = 0, seek = 0;
u32 field_len_bits = 0, field_start_bits = 0, *src_ptr = NULL;
struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
+ struct hfi1_pportdata *ppd = dd->pport;
if (data)
memset(data, 0, len);
else
return -EINVAL;
- if (is_integrated(dd) && !platform_config_load) {
+ if (ppd->config_from_scratch) {
/*
* Use saved configuration from ppd for integrated platforms
*/
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 414a04a481c2..3ac9c307a285 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -66,9 +66,11 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <rdma/ib_hdrs.h>
+#include <rdma/opa_addr.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <rdma/rdma_vt.h>
+#include <rdma/opa_addr.h>
#include "chip_registers.h"
#include "common.h"
@@ -213,13 +215,11 @@ struct hfi1_ctxtdata {
/* dynamic receive available interrupt timeout */
u32 rcvavail_timeout;
- /*
- * number of opens (including slave sub-contexts) on this instance
- * (ignoring forks, dup, etc. for now)
- */
- int cnt;
+ /* Reference count the base context usage */
+ struct kref kref;
+
/* Device context index */
- unsigned ctxt;
+ u16 ctxt;
/*
* non-zero if ctxt can be shared, and defines the maximum number of
* sub-contexts for this device context.
@@ -245,24 +245,10 @@ struct hfi1_ctxtdata {
/* lock protecting all Expected TID data */
struct mutex exp_lock;
- /* number of pio bufs for this ctxt (all procs, if shared) */
- u32 piocnt;
- /* first pio buffer for this ctxt */
- u32 pio_base;
- /* chip offset of PIO buffers for this ctxt */
- u32 piobufs;
/* per-context configuration flags */
unsigned long flags;
/* per-context event flags for fileops/intr communication */
unsigned long event_flags;
- /* WAIT_RCV that timed out, no interrupt */
- u32 rcvwait_to;
- /* WAIT_PIO that timed out, no interrupt */
- u32 piowait_to;
- /* WAIT_RCV already happened, no wait */
- u32 rcvnowait;
- /* WAIT_PIO already happened, no wait */
- u32 pionowait;
/* total number of polled urgent packets */
u32 urgent;
/* saved total number of polled urgent packets for poll edge trigger */
@@ -289,10 +275,8 @@ struct hfi1_ctxtdata {
u16 poll_type;
/* receive packet sequence counter */
u8 seq_cnt;
- u8 redirect_seq_cnt;
/* ctxt rcvhdrq head offset */
u32 head;
- u32 pkt_count;
/* QPs waiting for context processing */
struct list_head qp_wait_list;
/* interrupt handling */
@@ -301,15 +285,6 @@ struct hfi1_ctxtdata {
unsigned numa_id; /* numa node of this context */
/* verbs stats per CTX */
struct hfi1_opcode_stats_perctx *opstats;
- /*
- * This is the kernel thread that will keep making
- * progress on the user sdma requests behind the scenes.
- * There is one per context (shared contexts use the master's).
- */
- struct task_struct *progress;
- struct list_head sdma_queues;
- /* protect sdma queues */
- spinlock_t sdma_qlock;
/* Is ASPM interrupt supported for this context */
bool aspm_intr_supported;
@@ -352,23 +327,150 @@ struct hfi1_ctxtdata {
struct hfi1_packet {
void *ebuf;
void *hdr;
+ void *payload;
struct hfi1_ctxtdata *rcd;
__le32 *rhf_addr;
struct rvt_qp *qp;
struct ib_other_headers *ohdr;
+ struct ib_grh *grh;
u64 rhf;
u32 maxcnt;
u32 rhqoff;
+ u32 dlid;
+ u32 slid;
u16 tlen;
s16 etail;
u8 hlen;
u8 numpkt;
u8 rsize;
u8 updegr;
- u8 rcv_flags;
u8 etype;
+ u8 extra_byte;
+ u8 pad;
+ u8 sc;
+ u8 sl;
+ u8 opcode;
+ bool becn;
+ bool fecn;
};
+/* Packet types */
+#define HFI1_PKT_TYPE_9B 0
+#define HFI1_PKT_TYPE_16B 1
+
+/*
+ * OPA 16B Header
+ */
+#define OPA_16B_L4_MASK 0xFFull
+#define OPA_16B_SC_MASK 0x1F00000ull
+#define OPA_16B_SC_SHIFT 20
+#define OPA_16B_LID_MASK 0xFFFFFull
+#define OPA_16B_DLID_MASK 0xF000ull
+#define OPA_16B_DLID_SHIFT 20
+#define OPA_16B_DLID_HIGH_SHIFT 12
+#define OPA_16B_SLID_MASK 0xF00ull
+#define OPA_16B_SLID_SHIFT 20
+#define OPA_16B_SLID_HIGH_SHIFT 8
+#define OPA_16B_BECN_MASK 0x80000000ull
+#define OPA_16B_BECN_SHIFT 31
+#define OPA_16B_FECN_MASK 0x10000000ull
+#define OPA_16B_FECN_SHIFT 28
+#define OPA_16B_L2_MASK 0x60000000ull
+#define OPA_16B_L2_SHIFT 29
+#define OPA_16B_PKEY_MASK 0xFFFF0000ull
+#define OPA_16B_PKEY_SHIFT 16
+#define OPA_16B_LEN_MASK 0x7FF00000ull
+#define OPA_16B_LEN_SHIFT 20
+#define OPA_16B_RC_MASK 0xE000000ull
+#define OPA_16B_RC_SHIFT 25
+#define OPA_16B_AGE_MASK 0xFF0000ull
+#define OPA_16B_AGE_SHIFT 16
+#define OPA_16B_ENTROPY_MASK 0xFFFFull
+
+/*
+ * OPA 16B L2/L4 Encodings
+ */
+#define OPA_16B_L2_TYPE 0x02
+#define OPA_16B_L4_IB_LOCAL 0x09
+#define OPA_16B_L4_IB_GLOBAL 0x0A
+#define OPA_16B_L4_ETHR OPA_VNIC_L4_ETHR
+
+static inline u8 hfi1_16B_get_l4(struct hfi1_16b_header *hdr)
+{
+ return (u8)(hdr->lrh[2] & OPA_16B_L4_MASK);
+}
+
+static inline u8 hfi1_16B_get_sc(struct hfi1_16b_header *hdr)
+{
+ return (u8)((hdr->lrh[1] & OPA_16B_SC_MASK) >> OPA_16B_SC_SHIFT);
+}
+
+static inline u32 hfi1_16B_get_dlid(struct hfi1_16b_header *hdr)
+{
+ return (u32)((hdr->lrh[1] & OPA_16B_LID_MASK) |
+ (((hdr->lrh[2] & OPA_16B_DLID_MASK) >>
+ OPA_16B_DLID_HIGH_SHIFT) << OPA_16B_DLID_SHIFT));
+}
+
+static inline u32 hfi1_16B_get_slid(struct hfi1_16b_header *hdr)
+{
+ return (u32)((hdr->lrh[0] & OPA_16B_LID_MASK) |
+ (((hdr->lrh[2] & OPA_16B_SLID_MASK) >>
+ OPA_16B_SLID_HIGH_SHIFT) << OPA_16B_SLID_SHIFT));
+}
+
+static inline u8 hfi1_16B_get_becn(struct hfi1_16b_header *hdr)
+{
+ return (u8)((hdr->lrh[0] & OPA_16B_BECN_MASK) >> OPA_16B_BECN_SHIFT);
+}
+
+static inline u8 hfi1_16B_get_fecn(struct hfi1_16b_header *hdr)
+{
+ return (u8)((hdr->lrh[1] & OPA_16B_FECN_MASK) >> OPA_16B_FECN_SHIFT);
+}
+
+static inline u8 hfi1_16B_get_l2(struct hfi1_16b_header *hdr)
+{
+ return (u8)((hdr->lrh[1] & OPA_16B_L2_MASK) >> OPA_16B_L2_SHIFT);
+}
+
+static inline u16 hfi1_16B_get_pkey(struct hfi1_16b_header *hdr)
+{
+ return (u16)((hdr->lrh[2] & OPA_16B_PKEY_MASK) >> OPA_16B_PKEY_SHIFT);
+}
+
+static inline u8 hfi1_16B_get_rc(struct hfi1_16b_header *hdr)
+{
+ return (u8)((hdr->lrh[1] & OPA_16B_RC_MASK) >> OPA_16B_RC_SHIFT);
+}
+
+static inline u8 hfi1_16B_get_age(struct hfi1_16b_header *hdr)
+{
+ return (u8)((hdr->lrh[3] & OPA_16B_AGE_MASK) >> OPA_16B_AGE_SHIFT);
+}
+
+static inline u16 hfi1_16B_get_len(struct hfi1_16b_header *hdr)
+{
+ return (u16)((hdr->lrh[0] & OPA_16B_LEN_MASK) >> OPA_16B_LEN_SHIFT);
+}
+
+static inline u16 hfi1_16B_get_entropy(struct hfi1_16b_header *hdr)
+{
+ return (u16)(hdr->lrh[3] & OPA_16B_ENTROPY_MASK);
+}
+
+#define OPA_16B_MAKE_QW(low_dw, high_dw) (((u64)(high_dw) << 32) | (low_dw))
+
+/*
+ * BTH
+ */
+#define OPA_16B_BTH_PAD_MASK 7
+static inline u8 hfi1_16B_bth_get_pad(struct ib_other_headers *ohdr)
+{
+ return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_PAD_SHIFT) &
+ OPA_16B_BTH_PAD_MASK);
+}
+
struct rvt_sge_state;
/*
@@ -512,7 +614,7 @@ static inline void incr_cntr32(u32 *cntr)
#define MAX_NAME_SIZE 64
struct hfi1_msix_entry {
enum irq_type type;
- struct msix_entry msix;
+ int irq;
void *arg;
char name[MAX_NAME_SIZE];
cpumask_t mask;
@@ -575,6 +677,9 @@ struct hfi1_pportdata {
u8 default_atten;
u8 max_power_class;
+ /* did we read platform config from scratch registers? */
+ bool config_from_scratch;
+
/* GUIDs for this interface, in host order, guids[0] is a port guid */
u64 guids[HFI1_GUIDS_PER_PORT];
@@ -593,6 +698,7 @@ struct hfi1_pportdata {
/* SendDMA related entries */
struct workqueue_struct *hfi1_wq;
+ struct workqueue_struct *link_wq;
/* move out of interrupt context */
struct work_struct link_vc_work;
@@ -607,8 +713,6 @@ struct hfi1_pportdata {
struct mutex hls_lock;
u32 host_link_state;
- u32 lstate; /* logical link state */
-
/* these are the "32 bit" regs */
u32 ibmtu; /* The MTU programmed for this unit */
@@ -619,7 +723,7 @@ struct hfi1_pportdata {
u32 ibmaxlen;
u32 current_egress_rate; /* units [10^6 bits/sec] */
/* LID programmed for this instance */
- u16 lid;
+ u32 lid;
/* list of pkeys programmed; 0 if not set */
u16 pkeys[MAX_PKEY_VALUES];
u16 link_width_supported;
@@ -654,12 +758,12 @@ struct hfi1_pportdata {
u8 link_enabled; /* link enabled? */
u8 linkinit_reason;
u8 local_tx_rate; /* rate given to 8051 firmware */
- u8 last_pstate; /* info only */
u8 qsfp_retry_count;
/* placeholders for IB MAD packet settings */
u8 overrun_threshold;
u8 phy_error_threshold;
+ unsigned int is_link_down_queued;
/* Used to override LED behavior for things like maintenance beaconing*/
/*
@@ -756,6 +860,10 @@ struct hfi1_pportdata {
typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
typedef void (*opcode_handler)(struct hfi1_packet *packet);
+typedef void (*hfi1_make_req)(struct rvt_qp *qp,
+ struct hfi1_pkt_state *ps,
+ struct rvt_swqe *wqe);
+
/* return values for the RHF receive functions */
#define RHF_RCV_CONTINUE 0 /* keep going */
@@ -860,12 +968,15 @@ struct hfi1_devdata {
struct device *diag_device;
struct device *ui_device;
- /* mem-mapped pointer to base of chip regs */
- u8 __iomem *kregbase;
- /* end of mem-mapped chip space excluding sendbuf and user regs */
- u8 __iomem *kregend;
- /* physical address of chip for io_remap, etc. */
+ /* first mapping up to RcvArray */
+ u8 __iomem *kregbase1;
resource_size_t physaddr;
+
+ /* second uncached mapping from RcvArray to pio send buffers */
+ u8 __iomem *kregbase2;
+ /* for detecting offset above kregbase2 address */
+ u32 base2_start;
+
/* Per VL data. Enough for all VLs but not all elements are set/used. */
struct per_vl_data vld[PER_VL_SEND_CONTEXTS];
/* send context data */
@@ -953,8 +1064,7 @@ struct hfi1_devdata {
u64 __iomem *egrtidbase;
spinlock_t sendctrl_lock; /* protect changes to SendCtrl */
spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */
- /* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
- spinlock_t uctxt_lock; /* rcd and user context changes */
+ spinlock_t uctxt_lock; /* protect rcd changes */
struct mutex dc8051_lock; /* exclusive access to 8051 */
struct workqueue_struct *update_cntr_wq;
struct work_struct update_cntr_work;
@@ -1229,9 +1339,10 @@ static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata *dd, int spare)
#define dc8051_ver_patch(a) ((a) & 0x0000ff)
/* f_put_tid types */
-#define PT_EXPECTED 0
-#define PT_EAGER 1
-#define PT_INVALID 2
+#define PT_EXPECTED 0
+#define PT_EAGER 1
+#define PT_INVALID_FLUSH 2
+#define PT_INVALID 3
struct tid_rb_node;
struct mmu_rb_node;
@@ -1276,13 +1387,16 @@ void handle_user_interrupt(struct hfi1_ctxtdata *rcd);
int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd);
-int hfi1_create_ctxts(struct hfi1_devdata *dd);
-struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
- int numa);
+int hfi1_create_kctxts(struct hfi1_devdata *dd);
+int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
+ struct hfi1_ctxtdata **rcd);
+void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd);
void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
struct hfi1_devdata *dd, u8 hw_pidx, u8 port);
void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
-
+int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
+void hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
+struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread);
@@ -1292,6 +1406,13 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd);
void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd);
extern const struct pci_device_id hfi1_pci_tbl[];
+void hfi1_make_ud_req_9B(struct rvt_qp *qp,
+ struct hfi1_pkt_state *ps,
+ struct rvt_swqe *wqe);
+
+void hfi1_make_ud_req_16B(struct rvt_qp *qp,
+ struct hfi1_pkt_state *ps,
+ struct rvt_swqe *wqe);
/* receive packet handler dispositions */
#define RCV_PKT_OK 0x0 /* keep going */
@@ -1306,21 +1427,6 @@ static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd)
int hfi1_reset_device(int);
-/* return the driver's idea of the logical OPA port state */
-static inline u32 driver_lstate(struct hfi1_pportdata *ppd)
-{
- /*
- * The driver does some processing from the time the logical
- * link state is at INIT to the time the SM can be notified
- * as such. Return IB_PORT_DOWN until the software state
- * is ready.
- */
- if (ppd->lstate == IB_PORT_INIT && !(ppd->host_link_state & HLS_UP))
- return IB_PORT_DOWN;
- else
- return ppd->lstate;
-}
-
void receive_interrupt_work(struct work_struct *work);
/* extract service channel from header and rhf */
@@ -1413,13 +1519,25 @@ static inline u32 egress_cycles(u32 len, u32 rate)
}
void set_link_ipg(struct hfi1_pportdata *ppd);
-void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
+void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
u32 rqpn, u8 svc_type);
void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
u32 pkey, u32 slid, u32 dlid, u8 sc5,
const struct ib_grh *old_grh);
+void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
+ u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
+ u8 sc5, const struct ib_grh *old_grh);
+typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp,
+ u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
+ u8 sc5, const struct ib_grh *old_grh);
+
+/* We support only two types - 9B and 16B for now */
+static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
+ [HFI1_PKT_TYPE_9B] = &return_cnp,
+ [HFI1_PKT_TYPE_16B] = &return_cnp_16B
+};
#define PKEY_CHECK_INVALID -1
-int egress_pkey_check(struct hfi1_pportdata *ppd, __be16 *lrh, __be32 *bth,
+int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
u8 sc5, int8_t s_pkey_index);
#define PACKET_EGRESS_TIMEOUT 350
@@ -1522,9 +1640,9 @@ static void ingress_pkey_table_fail(struct hfi1_pportdata *ppd, u16 pkey,
* by HW and rcv_pkey_check function should be called instead.
*/
static inline int ingress_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
- u8 sc5, u8 idx, u16 slid)
+ u8 sc5, u8 idx, u32 slid, bool force)
{
- if (!(ppd->part_enforce & HFI1_PART_ENFORCE_IN))
+ if (!(force) && !(ppd->part_enforce & HFI1_PART_ENFORCE_IN))
return 0;
/* If SC15, pkey[0:14] must be 0x7fff */
@@ -1658,12 +1776,22 @@ static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt,
bool do_cnp)
{
struct ib_other_headers *ohdr = pkt->ohdr;
- u32 bth1;
- bth1 = be32_to_cpu(ohdr->bth[1]);
- if (unlikely(bth1 & (IB_BECN_SMASK | IB_FECN_SMASK))) {
+ u32 bth1;
+ bool becn = false;
+ bool fecn = false;
+
+ if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
+ fecn = hfi1_16B_get_fecn(pkt->hdr);
+ becn = hfi1_16B_get_becn(pkt->hdr);
+ } else {
+ bth1 = be32_to_cpu(ohdr->bth[1]);
+ fecn = bth1 & IB_FECN_SMASK;
+ becn = bth1 & IB_BECN_SMASK;
+ }
+ if (unlikely(fecn || becn)) {
hfi1_process_ecn_slowpath(qp, pkt, do_cnp);
- return !!(bth1 & IB_FECN_SMASK);
+ return fecn;
}
return false;
}
@@ -1829,10 +1957,9 @@ void hfi1_pcie_cleanup(struct pci_dev *pdev);
int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev);
void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
int pcie_speeds(struct hfi1_devdata *dd);
-void request_msix(struct hfi1_devdata *dd, u32 *nent,
- struct hfi1_msix_entry *entry);
-void hfi1_enable_intx(struct pci_dev *pdev);
-void restore_pci_variables(struct hfi1_devdata *dd);
+int request_msix(struct hfi1_devdata *dd, u32 msireq);
+int restore_pci_variables(struct hfi1_devdata *dd);
+int save_pci_variables(struct hfi1_devdata *dd);
int do_pcie_gen3_transition(struct hfi1_devdata *dd);
int parse_platform_config(struct hfi1_devdata *dd);
int get_platform_config_field(struct hfi1_devdata *dd,
@@ -1860,6 +1987,7 @@ int process_receive_error(struct hfi1_packet *packet);
int kdeth_process_expected(struct hfi1_packet *packet);
int kdeth_process_eager(struct hfi1_packet *packet);
int process_receive_invalid(struct hfi1_packet *packet);
+void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd);
/* global module parameter variables */
extern unsigned int hfi1_max_mtu;
@@ -1991,9 +2119,15 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
#define dd_dev_emerg(dd, fmt, ...) \
dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
get_unit_name((dd)->unit), ##__VA_ARGS__)
+
#define dd_dev_err(dd, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
get_unit_name((dd)->unit), ##__VA_ARGS__)
+
+#define dd_dev_err_ratelimited(dd, fmt, ...) \
+ dev_err_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
+ get_unit_name((dd)->unit), ##__VA_ARGS__)
+
#define dd_dev_warn(dd, fmt, ...) \
dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
get_unit_name((dd)->unit), ##__VA_ARGS__)
@@ -2087,52 +2221,220 @@ int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
#define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev))
#define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev))
-#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
-#define show_packettype(etype) \
-__print_symbolic(etype, \
- packettype_name(EXPECTED), \
- packettype_name(EAGER), \
- packettype_name(IB), \
- packettype_name(ERROR), \
- packettype_name(BYPASS))
-
-#define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode }
-#define show_ib_opcode(opcode) \
-__print_symbolic(opcode, \
- ib_opcode_name(RC_SEND_FIRST), \
- ib_opcode_name(RC_SEND_MIDDLE), \
- ib_opcode_name(RC_SEND_LAST), \
- ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(RC_SEND_ONLY), \
- ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(RC_RDMA_WRITE_FIRST), \
- ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \
- ib_opcode_name(RC_RDMA_WRITE_LAST), \
- ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(RC_RDMA_WRITE_ONLY), \
- ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(RC_RDMA_READ_REQUEST), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \
- ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \
- ib_opcode_name(RC_ACKNOWLEDGE), \
- ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
- ib_opcode_name(RC_COMPARE_SWAP), \
- ib_opcode_name(RC_FETCH_ADD), \
- ib_opcode_name(UC_SEND_FIRST), \
- ib_opcode_name(UC_SEND_MIDDLE), \
- ib_opcode_name(UC_SEND_LAST), \
- ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(UC_SEND_ONLY), \
- ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(UC_RDMA_WRITE_FIRST), \
- ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \
- ib_opcode_name(UC_RDMA_WRITE_LAST), \
- ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
- ib_opcode_name(UC_RDMA_WRITE_ONLY), \
- ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(UD_SEND_ONLY), \
- ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \
- ib_opcode_name(CNP))
+static inline void hfi1_update_ah_attr(struct ib_device *ibdev,
+ struct rdma_ah_attr *attr)
+{
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+ u32 dlid = rdma_ah_get_dlid(attr);
+
+ /*
+ * Kernel clients may not have setup GRH information
+ * Set that here.
+ */
+ ibp = to_iport(ibdev, rdma_ah_get_port_num(attr));
+ ppd = ppd_from_ibp(ibp);
+ if ((((dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ||
+ (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))) &&
+ (dlid != be32_to_cpu(OPA_LID_PERMISSIVE)) &&
+ (dlid != be16_to_cpu(IB_LID_PERMISSIVE)) &&
+ (!(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))) ||
+ (rdma_ah_get_make_grd(attr))) {
+ rdma_ah_set_ah_flags(attr, IB_AH_GRH);
+ rdma_ah_set_interface_id(attr, OPA_MAKE_ID(dlid));
+ rdma_ah_set_subnet_prefix(attr, ibp->rvp.gid_prefix);
+ }
+}
+
+/*
+ * hfi1_check_mcast- Check if the given lid is
+ * in the OPA multicast range.
+ *
+ * The LID might either reside in ah.dlid or might be
+ * in the GRH of the address handle as DGID if extended
+ * addresses are in use.
+ */
+static inline bool hfi1_check_mcast(u32 lid)
+{
+ return ((lid >= opa_get_mcast_base(OPA_MCAST_NR)) &&
+ (lid != be32_to_cpu(OPA_LID_PERMISSIVE)));
+}
+
+#define opa_get_lid(lid, format) \
+ __opa_get_lid(lid, OPA_PORT_PACKET_FORMAT_##format)
+
+/* Convert a lid to a specific lid space */
+static inline u32 __opa_get_lid(u32 lid, u8 format)
+{
+ bool is_mcast = hfi1_check_mcast(lid);
+
+ switch (format) {
+ case OPA_PORT_PACKET_FORMAT_8B:
+ case OPA_PORT_PACKET_FORMAT_10B:
+ if (is_mcast)
+ return (lid - opa_get_mcast_base(OPA_MCAST_NR) +
+ 0xF0000);
+ return lid & 0xFFFFF;
+ case OPA_PORT_PACKET_FORMAT_16B:
+ if (is_mcast)
+ return (lid - opa_get_mcast_base(OPA_MCAST_NR) +
+ 0xF00000);
+ return lid & 0xFFFFFF;
+ case OPA_PORT_PACKET_FORMAT_9B:
+ if (is_mcast)
+ return (lid -
+ opa_get_mcast_base(OPA_MCAST_NR) +
+ be16_to_cpu(IB_MULTICAST_LID_BASE));
+ else
+ return lid & 0xFFFF;
+ default:
+ return lid;
+ }
+}
+
+/* Return true if the given lid is the OPA 16B multicast range */
+static inline bool hfi1_is_16B_mcast(u32 lid)
+{
+ return ((lid >=
+ opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 16B)) &&
+ (lid != opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B)));
+}
+
+static inline void hfi1_make_opa_lid(struct rdma_ah_attr *attr)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(attr);
+ u32 dlid = rdma_ah_get_dlid(attr);
+
+ /* Modify ah_attr.dlid to be in the 32 bit LID space.
+ * This is how the address will be laid out:
+ * Assuming MCAST_NR to be 4,
+ * 32 bit permissive LID = 0xFFFFFFFF
+ * Multicast LID range = 0xFFFFFFFE to 0xF0000000
+ * Unicast LID range = 0xEFFFFFFF to 1
+ * Invalid LID = 0
+ */
+ if (ib_is_opa_gid(&grh->dgid))
+ dlid = opa_get_lid_from_gid(&grh->dgid);
+ else if ((dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+ (dlid != be16_to_cpu(IB_LID_PERMISSIVE)) &&
+ (dlid != be32_to_cpu(OPA_LID_PERMISSIVE)))
+ dlid = dlid - be16_to_cpu(IB_MULTICAST_LID_BASE) +
+ opa_get_mcast_base(OPA_MCAST_NR);
+ else if (dlid == be16_to_cpu(IB_LID_PERMISSIVE))
+ dlid = be32_to_cpu(OPA_LID_PERMISSIVE);
+
+ rdma_ah_set_dlid(attr, dlid);
+}
+
+static inline u8 hfi1_get_packet_type(u32 lid)
+{
+ /* 9B if lid > 0xF0000000 */
+ if (lid >= opa_get_mcast_base(OPA_MCAST_NR))
+ return HFI1_PKT_TYPE_9B;
+
+ /* 16B if lid > 0xC000 */
+ if (lid >= opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 9B))
+ return HFI1_PKT_TYPE_16B;
+
+ return HFI1_PKT_TYPE_9B;
+}
+
+static inline bool hfi1_get_hdr_type(u32 lid, struct rdma_ah_attr *attr)
+{
+ /*
+ * If there was an incoming 16B packet with permissive
+ * LIDs, OPA GIDs would have been programmed when those
+ * packets were received. A 16B packet will have to
+ * be sent in response to that packet. Return a 16B
+ * header type if that's the case.
+ */
+ if (rdma_ah_get_dlid(attr) == be32_to_cpu(OPA_LID_PERMISSIVE))
+ return (ib_is_opa_gid(&rdma_ah_read_grh(attr)->dgid)) ?
+ HFI1_PKT_TYPE_16B : HFI1_PKT_TYPE_9B;
+
+ /*
+ * Return a 16B header type if either the the destination
+ * or source lid is extended.
+ */
+ if (hfi1_get_packet_type(rdma_ah_get_dlid(attr)) == HFI1_PKT_TYPE_16B)
+ return HFI1_PKT_TYPE_16B;
+
+ return hfi1_get_packet_type(lid);
+}
+
+static inline void hfi1_make_ext_grh(struct hfi1_packet *packet,
+ struct ib_grh *grh, u32 slid,
+ u32 dlid)
+{
+ struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ if (!ibp)
+ return;
+
+ grh->hop_limit = 1;
+ grh->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
+ if (slid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))
+ grh->sgid.global.interface_id =
+ OPA_MAKE_ID(be32_to_cpu(OPA_LID_PERMISSIVE));
+ else
+ grh->sgid.global.interface_id = OPA_MAKE_ID(slid);
+
+ /*
+ * Upper layers (like mad) may compare the dgid in the
+ * wc that is obtained here with the sgid_index in
+ * the wr. Since sgid_index in wr is always 0 for
+ * extended lids, set the dgid here to the default
+ * IB gid.
+ */
+ grh->dgid.global.subnet_prefix = ibp->rvp.gid_prefix;
+ grh->dgid.global.interface_id =
+ cpu_to_be64(ppd->guids[HFI1_PORT_GUID_INDEX]);
+}
+
+static inline int hfi1_get_16b_padding(u32 hdr_size, u32 payload)
+{
+ return -(hdr_size + payload + (SIZE_OF_CRC << 2) +
+ SIZE_OF_LT) & 0x7;
+}
+
+static inline void hfi1_make_ib_hdr(struct ib_header *hdr,
+ u16 lrh0, u16 len,
+ u16 dlid, u16 slid)
+{
+ hdr->lrh[0] = cpu_to_be16(lrh0);
+ hdr->lrh[1] = cpu_to_be16(dlid);
+ hdr->lrh[2] = cpu_to_be16(len);
+ hdr->lrh[3] = cpu_to_be16(slid);
+}
+
+static inline void hfi1_make_16b_hdr(struct hfi1_16b_header *hdr,
+ u32 slid, u32 dlid,
+ u16 len, u16 pkey,
+ u8 becn, u8 fecn, u8 l4,
+ u8 sc)
+{
+ u32 lrh0 = 0;
+ u32 lrh1 = 0x40000000;
+ u32 lrh2 = 0;
+ u32 lrh3 = 0;
+
+ lrh0 = (lrh0 & ~OPA_16B_BECN_MASK) | (becn << OPA_16B_BECN_SHIFT);
+ lrh0 = (lrh0 & ~OPA_16B_LEN_MASK) | (len << OPA_16B_LEN_SHIFT);
+ lrh0 = (lrh0 & ~OPA_16B_LID_MASK) | (slid & OPA_16B_LID_MASK);
+ lrh1 = (lrh1 & ~OPA_16B_FECN_MASK) | (fecn << OPA_16B_FECN_SHIFT);
+ lrh1 = (lrh1 & ~OPA_16B_SC_MASK) | (sc << OPA_16B_SC_SHIFT);
+ lrh1 = (lrh1 & ~OPA_16B_LID_MASK) | (dlid & OPA_16B_LID_MASK);
+ lrh2 = (lrh2 & ~OPA_16B_SLID_MASK) |
+ ((slid >> OPA_16B_SLID_SHIFT) << OPA_16B_SLID_HIGH_SHIFT);
+ lrh2 = (lrh2 & ~OPA_16B_DLID_MASK) |
+ ((dlid >> OPA_16B_DLID_SHIFT) << OPA_16B_DLID_HIGH_SHIFT);
+ lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | (pkey << OPA_16B_PKEY_SHIFT);
+ lrh2 = (lrh2 & ~OPA_16B_L4_MASK) | l4;
+
+ hdr->lrh[0] = lrh0;
+ hdr->lrh[1] = lrh1;
+ hdr->lrh[2] = lrh2;
+ hdr->lrh[3] = lrh3;
+}
#endif /* _HFI1_KERNEL_H */
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 4a11d4da4c92..fba77001c3a7 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -67,6 +67,7 @@
#include "aspm.h"
#include "affinity.h"
#include "vnic.h"
+#include "exp_rcv.h"
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
@@ -125,85 +126,198 @@ static struct idr hfi1_unit_table;
u32 hfi1_cpulist_count;
unsigned long *hfi1_cpulist;
-/*
- * Common code for creating the receive context array.
- */
-int hfi1_create_ctxts(struct hfi1_devdata *dd)
+static int hfi1_create_kctxt(struct hfi1_devdata *dd,
+ struct hfi1_pportdata *ppd)
{
- unsigned i;
+ struct hfi1_ctxtdata *rcd;
int ret;
/* Control context has to be always 0 */
BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
+ ret = hfi1_create_ctxtdata(ppd, dd->node, &rcd);
+ if (ret < 0) {
+ dd_dev_err(dd, "Kernel receive context allocation failed\n");
+ return ret;
+ }
+
+ /*
+ * Set up the kernel context flags here and now because they use
+ * default values for all receive side memories. User contexts will
+ * be handled as they are created.
+ */
+ rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
+ HFI1_CAP_KGET(NODROP_RHQ_FULL) |
+ HFI1_CAP_KGET(NODROP_EGR_FULL) |
+ HFI1_CAP_KGET(DMA_RTAIL);
+
+ /* Control context must use DMA_RTAIL */
+ if (rcd->ctxt == HFI1_CTRL_CTXT)
+ rcd->flags |= HFI1_CAP_DMA_RTAIL;
+ rcd->seq_cnt = 1;
+
+ rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
+ if (!rcd->sc) {
+ dd_dev_err(dd, "Kernel send context allocation failed\n");
+ return -ENOMEM;
+ }
+ hfi1_init_ctxt(rcd->sc);
+
+ return 0;
+}
+
+/*
+ * Create the receive context array and one or more kernel contexts
+ */
+int hfi1_create_kctxts(struct hfi1_devdata *dd)
+{
+ u16 i;
+ int ret;
+
dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd),
GFP_KERNEL, dd->node);
if (!dd->rcd)
- goto nomem;
+ return -ENOMEM;
- /* create one or more kernel contexts */
for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
- struct hfi1_pportdata *ppd;
- struct hfi1_ctxtdata *rcd;
+ ret = hfi1_create_kctxt(dd, dd->pport);
+ if (ret)
+ goto bail;
+ }
- ppd = dd->pport + (i % dd->num_pports);
+ return 0;
+bail:
+ for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i)
+ hfi1_free_ctxt(dd->rcd[i]);
- /* dd->rcd[i] gets assigned inside the callee */
- rcd = hfi1_create_ctxtdata(ppd, i, dd->node);
- if (!rcd) {
- dd_dev_err(dd,
- "Unable to allocate kernel receive context, failing\n");
- goto nomem;
- }
- /*
- * Set up the kernel context flags here and now because they
- * use default values for all receive side memories. User
- * contexts will be handled as they are created.
- */
- rcd->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
- HFI1_CAP_KGET(NODROP_RHQ_FULL) |
- HFI1_CAP_KGET(NODROP_EGR_FULL) |
- HFI1_CAP_KGET(DMA_RTAIL);
-
- /* Control context must use DMA_RTAIL */
- if (rcd->ctxt == HFI1_CTRL_CTXT)
- rcd->flags |= HFI1_CAP_DMA_RTAIL;
- rcd->seq_cnt = 1;
-
- rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
- if (!rcd->sc) {
- dd_dev_err(dd,
- "Unable to allocate kernel send context, failing\n");
- goto nomem;
- }
+ /* All the contexts should be freed, free the array */
+ kfree(dd->rcd);
+ dd->rcd = NULL;
+ return ret;
+}
+
+/*
+ * Helper routines for the receive context reference count (rcd and uctxt).
+ */
+static void hfi1_rcd_init(struct hfi1_ctxtdata *rcd)
+{
+ kref_init(&rcd->kref);
+}
- hfi1_init_ctxt(rcd->sc);
+/**
+ * hfi1_rcd_free - When reference is zero clean up.
+ * @kref: pointer to an initialized rcd data structure
+ *
+ */
+static void hfi1_rcd_free(struct kref *kref)
+{
+ unsigned long flags;
+ struct hfi1_ctxtdata *rcd =
+ container_of(kref, struct hfi1_ctxtdata, kref);
+
+ hfi1_free_ctxtdata(rcd->dd, rcd);
+
+ spin_lock_irqsave(&rcd->dd->uctxt_lock, flags);
+ rcd->dd->rcd[rcd->ctxt] = NULL;
+ spin_unlock_irqrestore(&rcd->dd->uctxt_lock, flags);
+
+ kfree(rcd);
+}
+
+/**
+ * hfi1_rcd_put - decrement reference for rcd
+ * @rcd: pointer to an initialized rcd data structure
+ *
+ * Use this to put a reference after the init.
+ */
+int hfi1_rcd_put(struct hfi1_ctxtdata *rcd)
+{
+ if (rcd)
+ return kref_put(&rcd->kref, hfi1_rcd_free);
+
+ return 0;
+}
+
+/**
+ * hfi1_rcd_get - increment reference for rcd
+ * @rcd: pointer to an initialized rcd data structure
+ *
+ * Use this to get a reference after the init.
+ */
+void hfi1_rcd_get(struct hfi1_ctxtdata *rcd)
+{
+ kref_get(&rcd->kref);
+}
+
+/**
+ * allocate_rcd_index - allocate an rcd index from the rcd array
+ * @dd: pointer to a valid devdata structure
+ * @rcd: rcd data structure to assign
+ * @index: pointer to index that is allocated
+ *
+ * Find an empty index in the rcd array, and assign the given rcd to it.
+ * If the array is full, we are EBUSY.
+ *
+ */
+static int allocate_rcd_index(struct hfi1_devdata *dd,
+ struct hfi1_ctxtdata *rcd, u16 *index)
+{
+ unsigned long flags;
+ u16 ctxt;
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++)
+ if (!dd->rcd[ctxt])
+ break;
+
+ if (ctxt < dd->num_rcv_contexts) {
+ rcd->ctxt = ctxt;
+ dd->rcd[ctxt] = rcd;
+ hfi1_rcd_init(rcd);
}
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- /*
- * Initialize aspm, to be done after gen3 transition and setting up
- * contexts and before enabling interrupts
- */
- aspm_init(dd);
+ if (ctxt >= dd->num_rcv_contexts)
+ return -EBUSY;
+
+ *index = ctxt;
return 0;
-nomem:
- ret = -ENOMEM;
+}
- if (dd->rcd) {
- for (i = 0; i < dd->num_rcv_contexts; ++i)
- hfi1_free_ctxtdata(dd, dd->rcd[i]);
+/**
+ * hfi1_rcd_get_by_index
+ * @dd: pointer to a valid devdata structure
+ * @ctxt: the index of an possilbe rcd
+ *
+ * We need to protect access to the rcd array. If access is needed to
+ * one or more index, get the protecting spinlock and then increment the
+ * kref.
+ *
+ * The caller is responsible for making the _put().
+ *
+ */
+struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt)
+{
+ unsigned long flags;
+ struct hfi1_ctxtdata *rcd = NULL;
+
+ spin_lock_irqsave(&dd->uctxt_lock, flags);
+ if (dd->rcd[ctxt]) {
+ rcd = dd->rcd[ctxt];
+ hfi1_rcd_get(rcd);
}
- kfree(dd->rcd);
- dd->rcd = NULL;
- return ret;
+ spin_unlock_irqrestore(&dd->uctxt_lock, flags);
+
+ return rcd;
}
/*
- * Common code for user and kernel context setup.
+ * Common code for user and kernel context create and setup.
+ * NOTE: the initial kref is done here (hf1_rcd_init()).
*/
-struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
- int numa)
+int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
+ struct hfi1_ctxtdata **context)
{
struct hfi1_devdata *dd = ppd->dd;
struct hfi1_ctxtdata *rcd;
@@ -217,20 +331,30 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
if (rcd) {
u32 rcvtids, max_entries;
-
- hfi1_cdbg(PROC, "setting up context %u\n", ctxt);
+ u16 ctxt;
+ int ret;
+
+ ret = allocate_rcd_index(dd, rcd, &ctxt);
+ if (ret) {
+ *context = NULL;
+ kfree(rcd);
+ return ret;
+ }
INIT_LIST_HEAD(&rcd->qp_wait_list);
+ hfi1_exp_tid_group_init(&rcd->tid_group_list);
+ hfi1_exp_tid_group_init(&rcd->tid_used_list);
+ hfi1_exp_tid_group_init(&rcd->tid_full_list);
rcd->ppd = ppd;
rcd->dd = dd;
__set_bit(0, rcd->in_use_ctxts);
- rcd->ctxt = ctxt;
- dd->rcd[ctxt] = rcd;
rcd->numa_id = numa;
rcd->rcv_array_groups = dd->rcv_entries.ngroups;
mutex_init(&rcd->exp_lock);
+ hfi1_cdbg(PROC, "setting up context %u\n", rcd->ctxt);
+
/*
* Calculate the context's RcvArray entry starting point.
* We do this here because we have to take into account all
@@ -328,14 +452,30 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
if (!rcd->opstats)
goto bail;
}
+
+ *context = rcd;
+ return 0;
}
- return rcd;
+
bail:
- dd->rcd[ctxt] = NULL;
- kfree(rcd->egrbufs.rcvtids);
- kfree(rcd->egrbufs.buffers);
- kfree(rcd);
- return NULL;
+ *context = NULL;
+ hfi1_free_ctxt(rcd);
+ return -ENOMEM;
+}
+
+/**
+ * hfi1_free_ctxt
+ * @rcd: pointer to an initialized rcd data structure
+ *
+ * This wrapper is the free function that matches hfi1_create_ctxtdata().
+ * When a context is done being used (kernel or user), this function is called
+ * for the "final" put to match the kref init from hf1i_create_ctxtdata().
+ * Other users of the context do a get/put sequence to make sure that the
+ * structure isn't removed while in use.
+ */
+void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd)
+{
+ hfi1_rcd_put(rcd);
}
/*
@@ -483,7 +623,6 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
ppd->pkeys[default_pkey_idx] = DEFAULT_P_KEY;
ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
- ppd->part_enforce |= HFI1_PART_ENFORCE_OUT;
if (loopback) {
hfi1_early_err(&pdev->dev,
@@ -559,16 +698,19 @@ static int loadtime_init(struct hfi1_devdata *dd)
static int init_after_reset(struct hfi1_devdata *dd)
{
int i;
-
+ struct hfi1_ctxtdata *rcd;
/*
* Ensure chip does no sends or receives, tail updates, or
* pioavail updates while we re-initialize. This is mostly
* for the driver data structures, not chip registers.
*/
- for (i = 0; i < dd->num_rcv_contexts; i++)
+ for (i = 0; i < dd->num_rcv_contexts; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
- HFI1_RCVCTRL_INTRAVAIL_DIS |
- HFI1_RCVCTRL_TAILUPD_DIS, i);
+ HFI1_RCVCTRL_INTRAVAIL_DIS |
+ HFI1_RCVCTRL_TAILUPD_DIS, rcd);
+ hfi1_rcd_put(rcd);
+ }
pio_send_control(dd, PSC_GLOBAL_DISABLE);
for (i = 0; i < dd->num_send_contexts; i++)
sc_disable(dd->send_contexts[i].sc);
@@ -578,8 +720,9 @@ static int init_after_reset(struct hfi1_devdata *dd)
static void enable_chip(struct hfi1_devdata *dd)
{
+ struct hfi1_ctxtdata *rcd;
u32 rcvmask;
- u32 i;
+ u16 i;
/* enable PIO send */
pio_send_control(dd, PSC_GLOBAL_ENABLE);
@@ -589,17 +732,21 @@ static void enable_chip(struct hfi1_devdata *dd)
* Other ctxts done as user opens and initializes them.
*/
for (i = 0; i < dd->first_dyn_alloc_ctxt; ++i) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
+ if (!rcd)
+ continue;
rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
- rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
+ rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
- if (!HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, MULTI_PKT_EGR))
+ if (!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR))
rcvmask |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
- if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_RHQ_FULL))
+ if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_RHQ_FULL))
rcvmask |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
- if (HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, NODROP_EGR_FULL))
+ if (HFI1_CAP_KGET_MASK(rcd->flags, NODROP_EGR_FULL))
rcvmask |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
- hfi1_rcvctrl(dd, rcvmask, i);
- sc_enable(dd->rcd[i]->sc);
+ hfi1_rcvctrl(dd, rcvmask, rcd);
+ sc_enable(rcd->sc);
+ hfi1_rcd_put(rcd);
}
}
@@ -624,6 +771,20 @@ static int create_workqueues(struct hfi1_devdata *dd)
if (!ppd->hfi1_wq)
goto wq_error;
}
+ if (!ppd->link_wq) {
+ /*
+ * Make the link workqueue single-threaded to enforce
+ * serialization.
+ */
+ ppd->link_wq =
+ alloc_workqueue(
+ "hfi_link_%d_%d",
+ WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 1, /* max_active */
+ dd->unit, pidx);
+ if (!ppd->link_wq)
+ goto wq_error;
+ }
}
return 0;
wq_error:
@@ -634,6 +795,10 @@ wq_error:
destroy_workqueue(ppd->hfi1_wq);
ppd->hfi1_wq = NULL;
}
+ if (ppd->link_wq) {
+ destroy_workqueue(ppd->link_wq);
+ ppd->link_wq = NULL;
+ }
}
return -ENOMEM;
}
@@ -656,7 +821,8 @@ wq_error:
int hfi1_init(struct hfi1_devdata *dd, int reinit)
{
int ret = 0, pidx, lastfail = 0;
- unsigned i, len;
+ unsigned long len;
+ u16 i;
struct hfi1_ctxtdata *rcd;
struct hfi1_pportdata *ppd;
@@ -725,7 +891,7 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
* existing, and re-allocate.
* Need to re-create rest of ctxt 0 ctxtdata as well.
*/
- rcd = dd->rcd[i];
+ rcd = hfi1_rcd_get_by_index(dd, i);
if (!rcd)
continue;
@@ -739,6 +905,7 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
"failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
ret = lastfail;
}
+ hfi1_rcd_put(rcd);
}
/* Allocate enough memory for user event notification. */
@@ -858,6 +1025,7 @@ static void stop_timers(struct hfi1_devdata *dd)
static void shutdown_device(struct hfi1_devdata *dd)
{
struct hfi1_pportdata *ppd;
+ struct hfi1_ctxtdata *rcd;
unsigned pidx;
int i;
@@ -876,12 +1044,15 @@ static void shutdown_device(struct hfi1_devdata *dd)
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
- for (i = 0; i < dd->num_rcv_contexts; i++)
+ for (i = 0; i < dd->num_rcv_contexts; i++) {
+ rcd = hfi1_rcd_get_by_index(dd, i);
hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS |
- HFI1_RCVCTRL_CTXT_DIS |
- HFI1_RCVCTRL_INTRAVAIL_DIS |
- HFI1_RCVCTRL_PKEY_DIS |
- HFI1_RCVCTRL_ONE_PKT_EGR_DIS, i);
+ HFI1_RCVCTRL_CTXT_DIS |
+ HFI1_RCVCTRL_INTRAVAIL_DIS |
+ HFI1_RCVCTRL_PKEY_DIS |
+ HFI1_RCVCTRL_ONE_PKT_EGR_DIS, rcd);
+ hfi1_rcd_put(rcd);
+ }
/*
* Gracefully stop all sends allowing any in progress to
* trickle out first.
@@ -917,6 +1088,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
destroy_workqueue(ppd->hfi1_wq);
ppd->hfi1_wq = NULL;
}
+ if (ppd->link_wq) {
+ destroy_workqueue(ppd->link_wq);
+ ppd->link_wq = NULL;
+ }
}
sdma_exit(dd);
}
@@ -927,14 +1102,11 @@ static void shutdown_device(struct hfi1_devdata *dd)
* @rcd: the ctxtdata structure
*
* free up any allocated data for a context
- * This should not touch anything that would affect a simultaneous
- * re-allocation of context data, because it is called after hfi1_mutex
- * is released (and can be called from reinit as well).
* It should never change any chip state, or global driver state.
*/
void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
{
- unsigned e;
+ u32 e;
if (!rcd)
return;
@@ -953,6 +1125,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
/* all the RcvArray entries should have been cleared by now */
kfree(rcd->egrbufs.rcvtids);
+ rcd->egrbufs.rcvtids = NULL;
for (e = 0; e < rcd->egrbufs.alloced; e++) {
if (rcd->egrbufs.buffers[e].dma)
@@ -962,13 +1135,21 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
rcd->egrbufs.buffers[e].dma);
}
kfree(rcd->egrbufs.buffers);
+ rcd->egrbufs.alloced = 0;
+ rcd->egrbufs.buffers = NULL;
sc_free(rcd->sc);
+ rcd->sc = NULL;
+
vfree(rcd->subctxt_uregbase);
vfree(rcd->subctxt_rcvegrbuf);
vfree(rcd->subctxt_rcvhdr_base);
kfree(rcd->opstats);
- kfree(rcd);
+
+ rcd->subctxt_uregbase = NULL;
+ rcd->subctxt_rcvegrbuf = NULL;
+ rcd->subctxt_rcvhdr_base = NULL;
+ rcd->opstats = NULL;
}
/*
@@ -1311,8 +1492,6 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
{
int ctxt;
int pidx;
- struct hfi1_ctxtdata **tmp;
- unsigned long flags;
/* users can't do anything more with chip */
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
@@ -1337,18 +1516,6 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
free_credit_return(dd);
- /*
- * Free any resources still in use (usually just kernel contexts)
- * at unload; we do for ctxtcnt, because that's what we allocate.
- * We acquire lock to be really paranoid that rcd isn't being
- * accessed from some interrupt-related code (that should not happen,
- * but best to be sure).
- */
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- tmp = dd->rcd;
- dd->rcd = NULL;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-
if (dd->rcvhdrtail_dummy_kvaddr) {
dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
(void *)dd->rcvhdrtail_dummy_kvaddr,
@@ -1356,16 +1523,22 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
dd->rcvhdrtail_dummy_kvaddr = NULL;
}
- for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
- struct hfi1_ctxtdata *rcd = tmp[ctxt];
+ /*
+ * Free any resources still in use (usually just kernel contexts)
+ * at unload; we do for ctxtcnt, because that's what we allocate.
+ */
+ for (ctxt = 0; dd->rcd && ctxt < dd->num_rcv_contexts; ctxt++) {
+ struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
- tmp[ctxt] = NULL; /* debugging paranoia */
if (rcd) {
hfi1_clear_tids(rcd);
- hfi1_free_ctxtdata(dd, rcd);
+ hfi1_free_ctxt(rcd);
}
}
- kfree(tmp);
+
+ kfree(dd->rcd);
+ dd->rcd = NULL;
+
free_pio_map(dd);
/* must follow rcv context free - need to remove rcv's hooks */
for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
@@ -1532,6 +1705,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
destroy_workqueue(ppd->hfi1_wq);
ppd->hfi1_wq = NULL;
}
+ if (ppd->link_wq) {
+ destroy_workqueue(ppd->link_wq);
+ ppd->link_wq = NULL;
+ }
}
if (!j)
hfi1_device_remove(dd);
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index 04a5082d5ac5..96845dfed5c5 100644
--- a/drivers/infiniband/hw/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
@@ -164,6 +164,7 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
ppd->linkup = 0;
/* clear HW details of the previous connection */
+ ppd->actual_vls_operational = 0;
reset_link_credits(dd);
/* freeze after a link down to guarantee a clean egress */
@@ -196,7 +197,7 @@ void handle_user_interrupt(struct hfi1_ctxtdata *rcd)
if (test_and_clear_bit(HFI1_CTXT_WAITING_RCV, &rcd->event_flags)) {
wake_up_interruptible(&rcd->wait);
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_DIS, rcd->ctxt);
+ hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_DIS, rcd);
} else if (test_and_clear_bit(HFI1_CTXT_WAITING_URG,
&rcd->event_flags)) {
rcd->urgent++;
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index d9740ddea6f1..591697d85eed 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -106,7 +106,9 @@ struct iowait {
struct sdma_engine *sde,
struct iowait *wait,
struct sdma_txreq *tx,
- unsigned seq);
+ uint seq,
+ bool pkts_sent
+ );
void (*wakeup)(struct iowait *wait, int reason);
void (*sdma_drained)(struct iowait *wait);
seqlock_t *lock;
@@ -118,6 +120,7 @@ struct iowait {
u32 count;
u32 tx_limit;
u32 tx_count;
+ u8 starved_cnt;
};
#define SDMA_AVAIL_REASON 0
@@ -143,7 +146,8 @@ static inline void iowait_init(
struct sdma_engine *sde,
struct iowait *wait,
struct sdma_txreq *tx,
- unsigned seq),
+ uint seq,
+ bool pkts_sent),
void (*wakeup)(struct iowait *wait, int reason),
void (*sdma_drained)(struct iowait *wait))
{
@@ -305,4 +309,66 @@ static inline struct sdma_txreq *iowait_get_txhead(struct iowait *wait)
return tx;
}
+/**
+ * iowait_queue - Put the iowait on a wait queue
+ * @pkts_sent: have some packets been sent before queuing?
+ * @w: the iowait struct
+ * @wait_head: the wait queue
+ *
+ * This function is called to insert an iowait struct into a
+ * wait queue after a resource (eg, sdma decriptor or pio
+ * buffer) is run out.
+ */
+static inline void iowait_queue(bool pkts_sent, struct iowait *w,
+ struct list_head *wait_head)
+{
+ /*
+ * To play fair, insert the iowait at the tail of the wait queue if it
+ * has already sent some packets; Otherwise, put it at the head.
+ */
+ if (pkts_sent) {
+ list_add_tail(&w->list, wait_head);
+ w->starved_cnt = 0;
+ } else {
+ list_add(&w->list, wait_head);
+ w->starved_cnt++;
+ }
+}
+
+/**
+ * iowait_starve_clear - clear the wait queue's starve count
+ * @pkts_sent: have some packets been sent?
+ * @w: the iowait struct
+ *
+ * This function is called to clear the starve count. If no
+ * packets have been sent, the starve count will not be cleared.
+ */
+static inline void iowait_starve_clear(bool pkts_sent, struct iowait *w)
+{
+ if (pkts_sent)
+ w->starved_cnt = 0;
+}
+
+/**
+ * iowait_starve_find_max - Find the maximum of the starve count
+ * @w: the iowait struct
+ * @max: a variable containing the max starve count
+ * @idx: the index of the current iowait in an array
+ * @max_idx: a variable containing the array index for the
+ * iowait entry that has the max starve count
+ *
+ * This function is called to compare the starve count of a
+ * given iowait with the given max starve count. The max starve
+ * count and the index will be updated if the iowait's start
+ * count is larger.
+ */
+static inline void iowait_starve_find_max(struct iowait *w, u8 *max,
+ uint idx, uint *max_idx)
+{
+ if (w->starved_cnt > *max) {
+ *max = w->starved_cnt;
+ *max_idx = idx;
+ }
+}
+
#endif
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 5977673a52d4..f4c0ffc040cc 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -46,6 +46,7 @@
*/
#include <linux/net.h>
+#include <rdma/opa_addr.h>
#define OPA_NUM_PKEY_BLOCKS_PER_SMP (OPA_SMP_DR_DATA_SIZE \
/ (OPA_PARTITION_TABLE_BLK_SIZE * sizeof(u16)))
@@ -59,6 +60,24 @@
#define OPA_LINK_WIDTH_RESET_OLD 0x0fff
#define OPA_LINK_WIDTH_RESET 0xffff
+struct trap_node {
+ struct list_head list;
+ struct opa_mad_notice_attr data;
+ __be64 tid;
+ int len;
+ u32 retry;
+ u8 in_use;
+ u8 repress;
+};
+
+static int smp_length_check(u32 data_size, u32 request_len)
+{
+ if (unlikely(request_len < data_size))
+ return -EINVAL;
+
+ return 0;
+}
+
static int reply(struct ib_mad_hdr *smp)
{
/*
@@ -89,28 +108,222 @@ void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
ib_dispatch_event(&event);
}
-static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
+/*
+ * If the port is down, clean up all pending traps. We need to be careful
+ * with the given trap, because it may be queued.
+ */
+static void cleanup_traps(struct hfi1_ibport *ibp, struct trap_node *trap)
+{
+ struct trap_node *node, *q;
+ unsigned long flags;
+ struct list_head trap_list;
+ int i;
+
+ for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) {
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ list_replace_init(&ibp->rvp.trap_lists[i].list, &trap_list);
+ ibp->rvp.trap_lists[i].list_len = 0;
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+
+ /*
+ * Remove all items from the list, freeing all the non-given
+ * traps.
+ */
+ list_for_each_entry_safe(node, q, &trap_list, list) {
+ list_del(&node->list);
+ if (node != trap)
+ kfree(node);
+ }
+ }
+
+ /*
+ * If this wasn't on one of the lists it would not be freed. If it
+ * was on the list, it is now safe to free.
+ */
+ kfree(trap);
+}
+
+static struct trap_node *check_and_add_trap(struct hfi1_ibport *ibp,
+ struct trap_node *trap)
+{
+ struct trap_node *node;
+ struct trap_list *trap_list;
+ unsigned long flags;
+ unsigned long timeout;
+ int found = 0;
+ unsigned int queue_id;
+ static int trap_count;
+
+ queue_id = trap->data.generic_type & 0x0F;
+ if (queue_id >= RVT_MAX_TRAP_LISTS) {
+ trap_count++;
+ pr_err_ratelimited("hfi1: Invalid trap 0x%0x dropped. Total dropped: %d\n",
+ trap->data.generic_type, trap_count);
+ kfree(trap);
+ return NULL;
+ }
+
+ /*
+ * Since the retry (handle timeout) does not remove a trap request
+ * from the list, all we have to do is compare the node.
+ */
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ trap_list = &ibp->rvp.trap_lists[queue_id];
+
+ list_for_each_entry(node, &trap_list->list, list) {
+ if (node == trap) {
+ node->retry++;
+ found = 1;
+ break;
+ }
+ }
+
+ /* If it is not on the list, add it, limited to RVT-MAX_TRAP_LEN. */
+ if (!found) {
+ if (trap_list->list_len < RVT_MAX_TRAP_LEN) {
+ trap_list->list_len++;
+ list_add_tail(&trap->list, &trap_list->list);
+ } else {
+ pr_warn_ratelimited("hfi1: Maximum trap limit reached for 0x%0x traps\n",
+ trap->data.generic_type);
+ kfree(trap);
+ }
+ }
+
+ /*
+ * Next check to see if there is a timer pending. If not, set it up
+ * and get the first trap from the list.
+ */
+ node = NULL;
+ if (!timer_pending(&ibp->rvp.trap_timer)) {
+ /*
+ * o14-2
+ * If the time out is set we have to wait until it expires
+ * before the trap can be sent.
+ * This should be > RVT_TRAP_TIMEOUT
+ */
+ timeout = (RVT_TRAP_TIMEOUT *
+ (1UL << ibp->rvp.subnet_timeout)) / 1000;
+ mod_timer(&ibp->rvp.trap_timer,
+ jiffies + usecs_to_jiffies(timeout));
+ node = list_first_entry(&trap_list->list, struct trap_node,
+ list);
+ node->in_use = 1;
+ }
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+
+ return node;
+}
+
+static void subn_handle_opa_trap_repress(struct hfi1_ibport *ibp,
+ struct opa_smp *smp)
+{
+ struct trap_list *trap_list;
+ struct trap_node *trap;
+ unsigned long flags;
+ int i;
+
+ if (smp->attr_id != IB_SMP_ATTR_NOTICE)
+ return;
+
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) {
+ trap_list = &ibp->rvp.trap_lists[i];
+ trap = list_first_entry_or_null(&trap_list->list,
+ struct trap_node, list);
+ if (trap && trap->tid == smp->tid) {
+ if (trap->in_use) {
+ trap->repress = 1;
+ } else {
+ trap_list->list_len--;
+ list_del(&trap->list);
+ kfree(trap);
+ }
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+}
+
+static void hfi1_update_sm_ah_attr(struct hfi1_ibport *ibp,
+ struct rdma_ah_attr *attr, u32 dlid)
+{
+ rdma_ah_set_dlid(attr, dlid);
+ rdma_ah_set_port_num(attr, ppd_from_ibp(ibp)->port);
+ if (dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
+ struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
+
+ rdma_ah_set_ah_flags(attr, IB_AH_GRH);
+ grh->sgid_index = 0;
+ grh->hop_limit = 1;
+ grh->dgid.global.subnet_prefix =
+ ibp->rvp.gid_prefix;
+ grh->dgid.global.interface_id = OPA_MAKE_ID(dlid);
+ }
+}
+
+static int hfi1_modify_qp0_ah(struct hfi1_ibport *ibp,
+ struct rvt_ah *ah, u32 dlid)
+{
+ struct rdma_ah_attr attr;
+ struct rvt_qp *qp0;
+ int ret = -EINVAL;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.type = ah->ibah.type;
+ hfi1_update_sm_ah_attr(ibp, &attr, dlid);
+ rcu_read_lock();
+ qp0 = rcu_dereference(ibp->rvp.qp[0]);
+ if (qp0)
+ ret = rdma_modify_ah(&ah->ibah, &attr);
+ rcu_read_unlock();
+ return ret;
+}
+
+static struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u32 dlid)
+{
+ struct rdma_ah_attr attr;
+ struct ib_ah *ah = ERR_PTR(-EINVAL);
+ struct rvt_qp *qp0;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_devdata *dd = dd_from_ppd(ppd);
+ u8 port_num = ppd->port;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
+ hfi1_update_sm_ah_attr(ibp, &attr, dlid);
+ rcu_read_lock();
+ qp0 = rcu_dereference(ibp->rvp.qp[0]);
+ if (qp0)
+ ah = rdma_create_ah(qp0->ibqp.pd, &attr);
+ rcu_read_unlock();
+ return ah;
+}
+
+static void send_trap(struct hfi1_ibport *ibp, struct trap_node *trap)
{
struct ib_mad_send_buf *send_buf;
struct ib_mad_agent *agent;
struct opa_smp *smp;
- int ret;
unsigned long flags;
- unsigned long timeout;
int pkey_idx;
u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp;
agent = ibp->rvp.send_agent;
- if (!agent)
+ if (!agent) {
+ cleanup_traps(ibp, trap);
return;
+ }
/* o14-3.2.1 */
- if (ppd_from_ibp(ibp)->lstate != IB_PORT_ACTIVE)
+ if (driver_lstate(ppd_from_ibp(ibp)) != IB_PORT_ACTIVE) {
+ cleanup_traps(ibp, trap);
return;
+ }
- /* o14-2 */
- if (ibp->rvp.trap_timeout && time_before(jiffies,
- ibp->rvp.trap_timeout))
+ /* Add the trap to the list if necessary and see if we can send it */
+ trap = check_and_add_trap(ibp, trap);
+ if (!trap)
return;
pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
@@ -131,11 +344,21 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
smp->class_version = OPA_SM_CLASS_VERSION;
smp->method = IB_MGMT_METHOD_TRAP;
- ibp->rvp.tid++;
- smp->tid = cpu_to_be64(ibp->rvp.tid);
+
+ /* Only update the transaction ID for new traps (o13-5). */
+ if (trap->tid == 0) {
+ ibp->rvp.tid++;
+ /* make sure that tid != 0 */
+ if (ibp->rvp.tid == 0)
+ ibp->rvp.tid++;
+ trap->tid = cpu_to_be64(ibp->rvp.tid);
+ }
+ smp->tid = trap->tid;
+
smp->attr_id = IB_SMP_ATTR_NOTICE;
/* o14-1: smp->mkey = 0; */
- memcpy(smp->route.lid.data, data, len);
+
+ memcpy(smp->route.lid.data, &trap->data, trap->len);
spin_lock_irqsave(&ibp->rvp.lock, flags);
if (!ibp->rvp.sm_ah) {
@@ -144,65 +367,101 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
ah = hfi1_create_qp0_ah(ibp, ibp->rvp.sm_lid);
if (IS_ERR(ah)) {
- ret = PTR_ERR(ah);
- } else {
- send_buf->ah = ah;
- ibp->rvp.sm_ah = ibah_to_rvtah(ah);
- ret = 0;
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+ return;
}
+ send_buf->ah = ah;
+ ibp->rvp.sm_ah = ibah_to_rvtah(ah);
} else {
- ret = -EINVAL;
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+ return;
}
} else {
send_buf->ah = &ibp->rvp.sm_ah->ibah;
- ret = 0;
}
+
+ /*
+ * If the trap was repressed while things were getting set up, don't
+ * bother sending it. This could happen for a retry.
+ */
+ if (trap->repress) {
+ list_del(&trap->list);
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+ kfree(trap);
+ ib_free_send_mad(send_buf);
+ return;
+ }
+
+ trap->in_use = 0;
spin_unlock_irqrestore(&ibp->rvp.lock, flags);
- if (!ret)
- ret = ib_post_send_mad(send_buf, NULL);
- if (!ret) {
- /* 4.096 usec. */
- timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
- ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
- } else {
+ if (ib_post_send_mad(send_buf, NULL))
ib_free_send_mad(send_buf);
- ibp->rvp.trap_timeout = 0;
+}
+
+void hfi1_handle_trap_timer(unsigned long data)
+{
+ struct hfi1_ibport *ibp = (struct hfi1_ibport *)data;
+ struct trap_node *trap = NULL;
+ unsigned long flags;
+ int i;
+
+ /* Find the trap with the highest priority */
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ for (i = 0; !trap && i < RVT_MAX_TRAP_LISTS; i++) {
+ trap = list_first_entry_or_null(&ibp->rvp.trap_lists[i].list,
+ struct trap_node, list);
}
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+
+ if (trap)
+ send_trap(ibp, trap);
+}
+
+static struct trap_node *create_trap_node(u8 type, __be16 trap_num, u32 lid)
+{
+ struct trap_node *trap;
+
+ trap = kzalloc(sizeof(*trap), GFP_ATOMIC);
+ if (!trap)
+ return NULL;
+
+ INIT_LIST_HEAD(&trap->list);
+ trap->data.generic_type = type;
+ trap->data.prod_type_lsb = IB_NOTICE_PROD_CA;
+ trap->data.trap_num = trap_num;
+ trap->data.issuer_lid = cpu_to_be32(lid);
+
+ return trap;
}
/*
- * Send a bad [PQ]_Key trap (ch. 14.3.8).
+ * Send a bad P_Key trap (ch. 14.3.8).
*/
-void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
- u32 qp1, u32 qp2, u16 lid1, u16 lid2)
+void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl,
+ u32 qp1, u32 qp2, u32 lid1, u32 lid2)
{
- struct opa_mad_notice_attr data;
+ struct trap_node *trap;
u32 lid = ppd_from_ibp(ibp)->lid;
- u32 _lid1 = lid1;
- u32 _lid2 = lid2;
-
- memset(&data, 0, sizeof(data));
- if (trap_num == OPA_TRAP_BAD_P_KEY)
- ibp->rvp.pkey_violations++;
- else
- ibp->rvp.qkey_violations++;
ibp->rvp.n_pkt_drops++;
+ ibp->rvp.pkey_violations++;
+
+ trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_P_KEY,
+ lid);
+ if (!trap)
+ return;
/* Send violation trap */
- data.generic_type = IB_NOTICE_TYPE_SECURITY;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = trap_num;
- data.issuer_lid = cpu_to_be32(lid);
- data.ntc_257_258.lid1 = cpu_to_be32(_lid1);
- data.ntc_257_258.lid2 = cpu_to_be32(_lid2);
- data.ntc_257_258.key = cpu_to_be32(key);
- data.ntc_257_258.sl = sl << 3;
- data.ntc_257_258.qp1 = cpu_to_be32(qp1);
- data.ntc_257_258.qp2 = cpu_to_be32(qp2);
-
- send_trap(ibp, &data, sizeof(data));
+ trap->data.ntc_257_258.lid1 = cpu_to_be32(lid1);
+ trap->data.ntc_257_258.lid2 = cpu_to_be32(lid2);
+ trap->data.ntc_257_258.key = cpu_to_be32(key);
+ trap->data.ntc_257_258.sl = sl << 3;
+ trap->data.ntc_257_258.qp1 = cpu_to_be32(qp1);
+ trap->data.ntc_257_258.qp2 = cpu_to_be32(qp2);
+
+ trap->len = sizeof(trap->data);
+ send_trap(ibp, trap);
}
/*
@@ -211,34 +470,36 @@ void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
__be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt)
{
- struct opa_mad_notice_attr data;
+ struct trap_node *trap;
u32 lid = ppd_from_ibp(ibp)->lid;
- memset(&data, 0, sizeof(data));
+ trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_M_KEY,
+ lid);
+ if (!trap)
+ return;
+
/* Send violation trap */
- data.generic_type = IB_NOTICE_TYPE_SECURITY;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = OPA_TRAP_BAD_M_KEY;
- data.issuer_lid = cpu_to_be32(lid);
- data.ntc_256.lid = data.issuer_lid;
- data.ntc_256.method = mad->method;
- data.ntc_256.attr_id = mad->attr_id;
- data.ntc_256.attr_mod = mad->attr_mod;
- data.ntc_256.mkey = mkey;
+ trap->data.ntc_256.lid = trap->data.issuer_lid;
+ trap->data.ntc_256.method = mad->method;
+ trap->data.ntc_256.attr_id = mad->attr_id;
+ trap->data.ntc_256.attr_mod = mad->attr_mod;
+ trap->data.ntc_256.mkey = mkey;
if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- data.ntc_256.dr_slid = dr_slid;
- data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
- if (hop_cnt > ARRAY_SIZE(data.ntc_256.dr_rtn_path)) {
- data.ntc_256.dr_trunc_hop |=
+ trap->data.ntc_256.dr_slid = dr_slid;
+ trap->data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
+ if (hop_cnt > ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path)) {
+ trap->data.ntc_256.dr_trunc_hop |=
IB_NOTICE_TRAP_DR_TRUNC;
- hop_cnt = ARRAY_SIZE(data.ntc_256.dr_rtn_path);
+ hop_cnt = ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path);
}
- data.ntc_256.dr_trunc_hop |= hop_cnt;
- memcpy(data.ntc_256.dr_rtn_path, return_path,
+ trap->data.ntc_256.dr_trunc_hop |= hop_cnt;
+ memcpy(trap->data.ntc_256.dr_rtn_path, return_path,
hop_cnt);
}
- send_trap(ibp, &data, sizeof(data));
+ trap->len = sizeof(trap->data);
+
+ send_trap(ibp, trap);
}
/*
@@ -246,22 +507,24 @@ static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
*/
void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
{
- struct opa_mad_notice_attr data;
+ struct trap_node *trap;
struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
u32 lid = ppd_from_ibp(ibp)->lid;
- memset(&data, 0, sizeof(data));
+ trap = create_trap_node(IB_NOTICE_TYPE_INFO,
+ OPA_TRAP_CHANGE_CAPABILITY,
+ lid);
+ if (!trap)
+ return;
- data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
- data.issuer_lid = cpu_to_be32(lid);
- data.ntc_144.lid = data.issuer_lid;
- data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
+ trap->data.ntc_144.lid = trap->data.issuer_lid;
+ trap->data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
+ trap->data.ntc_144.cap_mask3 = cpu_to_be16(ibp->rvp.port_cap3_flags);
- send_trap(ibp, &data, sizeof(data));
+ trap->len = sizeof(trap->data);
+ send_trap(ibp, trap);
}
/*
@@ -269,19 +532,19 @@ void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
*/
void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
{
- struct opa_mad_notice_attr data;
+ struct trap_node *trap;
u32 lid = ppd_from_ibp(ibp)->lid;
- memset(&data, 0, sizeof(data));
+ trap = create_trap_node(IB_NOTICE_TYPE_INFO, OPA_TRAP_CHANGE_SYSGUID,
+ lid);
+ if (!trap)
+ return;
- data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = OPA_TRAP_CHANGE_SYSGUID;
- data.issuer_lid = cpu_to_be32(lid);
- data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
- data.ntc_145.lid = data.issuer_lid;
+ trap->data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
+ trap->data.ntc_145.lid = trap->data.issuer_lid;
- send_trap(ibp, &data, sizeof(data));
+ trap->len = sizeof(trap->data);
+ send_trap(ibp, trap);
}
/*
@@ -289,29 +552,30 @@ void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
*/
void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
{
- struct opa_mad_notice_attr data;
+ struct trap_node *trap;
u32 lid = ppd_from_ibp(ibp)->lid;
- memset(&data, 0, sizeof(data));
+ trap = create_trap_node(IB_NOTICE_TYPE_INFO,
+ OPA_TRAP_CHANGE_CAPABILITY,
+ lid);
+ if (!trap)
+ return;
- data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
- data.issuer_lid = cpu_to_be32(lid);
- data.ntc_144.lid = data.issuer_lid;
- data.ntc_144.change_flags =
+ trap->data.ntc_144.lid = trap->data.issuer_lid;
+ trap->data.ntc_144.change_flags =
cpu_to_be16(OPA_NOTICE_TRAP_NODE_DESC_CHG);
- send_trap(ibp, &data, sizeof(data));
+ trap->len = sizeof(trap->data);
+ send_trap(ibp, trap);
}
static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
u8 *data, struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u8 port, u32 *resp_len, u32 max_len)
{
struct opa_node_description *nd;
- if (am) {
+ if (am || smp_length_check(sizeof(*nd), max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -328,7 +592,7 @@ static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct opa_node_info *ni;
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -338,6 +602,7 @@ static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
/* GUID 0 is illegal */
if (am || pidx >= dd->num_pports || ibdev->node_guid == 0 ||
+ smp_length_check(sizeof(*ni), max_len) ||
get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
@@ -519,7 +784,7 @@ void read_ltp_rtt(struct hfi1_devdata *dd)
static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
int i;
struct hfi1_devdata *dd;
@@ -535,7 +800,7 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
u32 buffer_units;
u64 tmp = 0;
- if (num_ports != 1) {
+ if (num_ports != 1 || smp_length_check(sizeof(*pi), max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -605,7 +870,7 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
ppd->offline_disabled_reason;
pi->port_states.portphysstate_portstate =
- (hfi1_ibphys_portstate(ppd) << 4) | state;
+ (driver_pstate(ppd) << 4) | state;
pi->mkeyprotect_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
@@ -704,13 +969,9 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT;
pi->buffer_units = cpu_to_be32(buffer_units);
- pi->opa_cap_mask = cpu_to_be16(OPA_CAP_MASK3_IsSharedSpaceSupported |
- OPA_CAP_MASK3_IsEthOnFabricSupported);
- /* Driver does not support mcast/collective configuration */
- pi->opa_cap_mask &=
- cpu_to_be16(~OPA_CAP_MASK3_IsAddrRangeConfigSupported);
- pi->collectivemask_multicastmask = ((HFI1_COLLECTIVE_NR & 0x7)
- << 3 | (HFI1_MCAST_NR & 0x7));
+ pi->opa_cap_mask = cpu_to_be16(ibp->rvp.port_cap3_flags);
+ pi->collectivemask_multicastmask = ((OPA_COLLECTIVE_NR & 0x7)
+ << 3 | (OPA_MCAST_NR & 0x7));
/* HFI supports a replay buffer 128 LTPs in size */
pi->replay_depth.buffer = 0x80;
@@ -748,7 +1009,7 @@ static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
u32 n_blocks_req = OPA_AM_NBLK(am);
@@ -771,6 +1032,11 @@ static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16);
+ if (smp_length_check(size, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
if (start_block + n_blocks_req > n_blocks_avail ||
n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; "
@@ -915,8 +1181,8 @@ static int physical_transition_allowed(int old, int new)
static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
u32 logical_new, u32 physical_new)
{
- u32 physical_old = driver_physical_state(ppd);
- u32 logical_old = driver_logical_state(ppd);
+ u32 physical_old = driver_pstate(ppd);
+ u32 logical_old = driver_lstate(ppd);
int ret, logical_allowed, physical_allowed;
ret = logical_transition_allowed(logical_old, logical_new);
@@ -1074,7 +1340,7 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
*/
static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct opa_port_info *pi = (struct opa_port_info *)data;
struct ib_event event;
@@ -1083,8 +1349,8 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
struct hfi1_ibport *ibp;
u8 clientrereg;
unsigned long flags;
- u32 smlid, opa_lid; /* tmp vars to hold LID values */
- u16 lid;
+ u32 smlid;
+ u32 lid;
u8 ls_old, ls_new, ps_new;
u8 vls;
u8 msl;
@@ -1095,27 +1361,26 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
int ret, i, invalid = 0, call_set_mtu = 0;
int call_link_downgrade_policy = 0;
- if (num_ports != 1) {
+ if (num_ports != 1 ||
+ smp_length_check(sizeof(*pi), max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
- opa_lid = be32_to_cpu(pi->lid);
- if (opa_lid & 0xFFFF0000) {
- pr_warn("OPA_PortInfo lid out of range: %X\n", opa_lid);
+ lid = be32_to_cpu(pi->lid);
+ if (lid & 0xFF000000) {
+ pr_warn("OPA_PortInfo lid out of range: %X\n", lid);
smp->status |= IB_SMP_INVALID_FIELD;
goto get_only;
}
- lid = (u16)(opa_lid & 0x0000FFFF);
smlid = be32_to_cpu(pi->sm_lid);
- if (smlid & 0xFFFF0000) {
+ if (smlid & 0xFF000000) {
pr_warn("OPA_PortInfo SM lid out of range: %X\n", smlid);
smp->status |= IB_SMP_INVALID_FIELD;
goto get_only;
}
- smlid &= 0x0000FFFF;
clientrereg = (pi->clientrereg_subnettimeout &
OPA_PI_MASK_CLIENT_REREGISTER);
@@ -1130,12 +1395,16 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
ls_old = driver_lstate(ppd);
ibp->rvp.mkey = pi->mkey;
- ibp->rvp.gid_prefix = pi->subnet_prefix;
+ if (ibp->rvp.gid_prefix != pi->subnet_prefix) {
+ ibp->rvp.gid_prefix = pi->subnet_prefix;
+ event.event = IB_EVENT_GID_CHANGE;
+ ib_dispatch_event(&event);
+ }
ibp->rvp.mkey_lease_period = be16_to_cpu(pi->mkey_lease_period);
/* Must be a valid unicast LID address. */
if ((lid == 0 && ls_old > IB_PORT_INIT) ||
- lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
+ (hfi1_is_16B_mcast(lid))) {
smp->status |= IB_SMP_INVALID_FIELD;
pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n",
lid);
@@ -1148,6 +1417,16 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
hfi1_set_lid(ppd, lid, pi->mkeyprotect_lmc & OPA_PI_MASK_LMC);
event.event = IB_EVENT_LID_CHANGE;
ib_dispatch_event(&event);
+
+ if (HFI1_PORT_GUID_INDEX + 1 < HFI1_GUIDS_PER_PORT) {
+ /* Manufacture GID from LID to support extended
+ * addresses
+ */
+ ppd->guids[HFI1_PORT_GUID_INDEX + 1] =
+ be64_to_cpu(OPA_MAKE_ID(lid));
+ event.event = IB_EVENT_GID_CHANGE;
+ ib_dispatch_event(&event);
+ }
}
msl = pi->smsl & OPA_PI_MASK_SMSL;
@@ -1158,7 +1437,7 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
/* Must be a valid unicast LID address. */
if ((smlid == 0 && ls_old > IB_PORT_INIT) ||
- smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
+ (hfi1_is_16B_mcast(smlid))) {
smp->status |= IB_SMP_INVALID_FIELD;
pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid);
} else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
@@ -1166,7 +1445,7 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
spin_lock_irqsave(&ibp->rvp.lock, flags);
if (ibp->rvp.sm_ah) {
if (smlid != ibp->rvp.sm_lid)
- rdma_ah_set_dlid(&ibp->rvp.sm_ah->attr, smlid);
+ hfi1_modify_qp0_ah(ibp, ibp->rvp.sm_ah, smlid);
if (msl != ibp->rvp.sm_sl)
rdma_ah_set_sl(&ibp->rvp.sm_ah->attr, msl);
}
@@ -1346,7 +1625,8 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
if (ret)
return ret;
- ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
+ ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len,
+ max_len);
/* restore re-reg bit per o14-12.2.1 */
pi->clientrereg_subnettimeout |= clientrereg;
@@ -1363,7 +1643,8 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
return ret;
get_only:
- return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len);
+ return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len,
+ max_len);
}
/**
@@ -1424,7 +1705,7 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
u32 n_blocks_sent = OPA_AM_NBLK(am);
@@ -1434,6 +1715,7 @@ static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
int i;
u16 n_blocks_avail;
unsigned npkeys = hfi1_get_npkeys(dd);
+ u32 size = 0;
if (n_blocks_sent == 0) {
pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
@@ -1444,6 +1726,13 @@ static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
+ size = sizeof(u16) * (n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE);
+
+ if (smp_length_check(size, max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
if (start_block + n_blocks_sent > n_blocks_avail ||
n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n",
@@ -1461,7 +1750,8 @@ static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
return reply((struct ib_mad_hdr *)smp);
}
- return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len);
+ return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len,
+ max_len);
}
#define ILLEGAL_VL 12
@@ -1522,14 +1812,14 @@ static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct hfi1_ibport *ibp = to_iport(ibdev, port);
u8 *p = data;
size_t size = ARRAY_SIZE(ibp->sl_to_sc); /* == 32 */
unsigned i;
- if (am) {
+ if (am || smp_length_check(size, max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -1545,14 +1835,15 @@ static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct hfi1_ibport *ibp = to_iport(ibdev, port);
u8 *p = data;
+ size_t size = ARRAY_SIZE(ibp->sl_to_sc);
int i;
u8 sc;
- if (am) {
+ if (am || smp_length_check(size, max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -1567,19 +1858,20 @@ static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
}
}
- return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len);
+ return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len,
+ max_len);
}
static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct hfi1_ibport *ibp = to_iport(ibdev, port);
u8 *p = data;
size_t size = ARRAY_SIZE(ibp->sc_to_sl); /* == 32 */
unsigned i;
- if (am) {
+ if (am || smp_length_check(size, max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -1595,13 +1887,14 @@ static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ size_t size = ARRAY_SIZE(ibp->sc_to_sl);
u8 *p = data;
int i;
- if (am) {
+ if (am || smp_length_check(size, max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -1609,19 +1902,20 @@ static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
ibp->sc_to_sl[i] = *p++;
- return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len);
+ return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len,
+ max_len);
}
static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
u32 n_blocks = OPA_AM_NBLK(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
void *vp = (void *)data;
size_t size = 4 * sizeof(u64);
- if (n_blocks != 1) {
+ if (n_blocks != 1 || smp_length_check(size, max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -1636,7 +1930,7 @@ static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
u32 n_blocks = OPA_AM_NBLK(am);
int async_update = OPA_AM_ASYNC(am);
@@ -1644,8 +1938,15 @@ static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
void *vp = (void *)data;
struct hfi1_pportdata *ppd;
int lstate;
+ /*
+ * set_sc2vlt_tables writes the information contained in *data
+ * to four 64-bit registers SendSC2VLt[0-3]. We need to make
+ * sure *max_len is not greater than the total size of the four
+ * SendSC2VLt[0-3] registers.
+ */
+ size_t size = 4 * sizeof(u64);
- if (n_blocks != 1 || async_update) {
+ if (n_blocks != 1 || async_update || smp_length_check(size, max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -1665,27 +1966,28 @@ static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
set_sc2vlt_tables(dd, vp);
- return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len);
+ return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len,
+ max_len);
}
static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
u32 n_blocks = OPA_AM_NPORT(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct hfi1_pportdata *ppd;
void *vp = (void *)data;
- int size;
+ int size = sizeof(struct sc2vlnt);
- if (n_blocks != 1) {
+ if (n_blocks != 1 || smp_length_check(size, max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
ppd = dd->pport + (port - 1);
- size = fm_get_table(ppd, FM_TBL_SC2VLNT, vp);
+ fm_get_table(ppd, FM_TBL_SC2VLNT, vp);
if (resp_len)
*resp_len += size;
@@ -1695,15 +1997,16 @@ static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
u32 n_blocks = OPA_AM_NPORT(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct hfi1_pportdata *ppd;
void *vp = (void *)data;
int lstate;
+ int size = sizeof(struct sc2vlnt);
- if (n_blocks != 1) {
+ if (n_blocks != 1 || smp_length_check(size, max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -1721,12 +2024,12 @@ static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
fm_set_table(ppd, FM_TBL_SC2VLNT, vp);
return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
}
static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
u32 nports = OPA_AM_NPORT(am);
u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
@@ -1735,7 +2038,7 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
struct hfi1_pportdata *ppd;
struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
- if (nports != 1) {
+ if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -1755,7 +2058,7 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
ppd->offline_disabled_reason;
psi->port_states.portphysstate_portstate =
- (hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf);
+ (driver_pstate(ppd) << 4) | (lstate & 0xf);
psi->link_width_downgrade_tx_active =
cpu_to_be16(ppd->link_width_downgrade_tx_active);
psi->link_width_downgrade_rx_active =
@@ -1768,7 +2071,7 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
u32 nports = OPA_AM_NPORT(am);
u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
@@ -1779,7 +2082,7 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
int ret, invalid = 0;
- if (nports != 1) {
+ if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -1809,19 +2112,21 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
if (invalid)
smp->status |= IB_SMP_INVALID_FIELD;
- return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len);
+ return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len,
+ max_len);
}
static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
u32 addr = OPA_AM_CI_ADDR(am);
u32 len = OPA_AM_CI_LEN(am) + 1;
int ret;
- if (dd->pport->port_type != PORT_TYPE_QSFP) {
+ if (dd->pport->port_type != PORT_TYPE_QSFP ||
+ smp_length_check(len, max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -1864,21 +2169,22 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port, u32 *resp_len)
+ struct ib_device *ibdev, u8 port, u32 *resp_len,
+ u32 max_len)
{
u32 num_ports = OPA_AM_NPORT(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct hfi1_pportdata *ppd;
struct buffer_control *p = (struct buffer_control *)data;
- int size;
+ int size = sizeof(struct buffer_control);
- if (num_ports != 1) {
+ if (num_ports != 1 || smp_length_check(size, max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
ppd = dd->pport + (port - 1);
- size = fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p);
+ fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p);
trace_bct_get(dd, p);
if (resp_len)
*resp_len += size;
@@ -1887,14 +2193,15 @@ static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
- struct ib_device *ibdev, u8 port, u32 *resp_len)
+ struct ib_device *ibdev, u8 port, u32 *resp_len,
+ u32 max_len)
{
u32 num_ports = OPA_AM_NPORT(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct hfi1_pportdata *ppd;
struct buffer_control *p = (struct buffer_control *)data;
- if (num_ports != 1) {
+ if (num_ports != 1 || smp_length_check(sizeof(*p), max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -1905,41 +2212,43 @@ static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
return reply((struct ib_mad_hdr *)smp);
}
- return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len);
+ return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len,
+ max_len);
}
static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
u32 num_ports = OPA_AM_NPORT(am);
u8 section = (am & 0x00ff0000) >> 16;
u8 *p = data;
- int size = 0;
+ int size = 256;
- if (num_ports != 1) {
+ if (num_ports != 1 || smp_length_check(size, max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
switch (section) {
case OPA_VLARB_LOW_ELEMENTS:
- size = fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p);
+ fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p);
break;
case OPA_VLARB_HIGH_ELEMENTS:
- size = fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p);
+ fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p);
break;
case OPA_VLARB_PREEMPT_ELEMENTS:
- size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p);
+ fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p);
break;
case OPA_VLARB_PREEMPT_MATRIX:
- size = fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p);
+ fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p);
break;
default:
pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n",
be32_to_cpu(smp->attr_mod));
smp->status |= IB_SMP_INVALID_FIELD;
+ size = 0;
break;
}
@@ -1951,14 +2260,15 @@ static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
u32 num_ports = OPA_AM_NPORT(am);
u8 section = (am & 0x00ff0000) >> 16;
u8 *p = data;
+ int size = 256;
- if (num_ports != 1) {
+ if (num_ports != 1 || smp_length_check(size, max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -1986,7 +2296,8 @@ static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
break;
}
- return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len);
+ return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len,
+ max_len);
}
struct opa_pma_mad {
@@ -3282,13 +3593,18 @@ struct opa_congestion_info_attr {
static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct opa_congestion_info_attr *p =
(struct opa_congestion_info_attr *)data;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ if (smp_length_check(sizeof(*p), max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
p->congestion_info = 0;
p->control_table_cap = ppd->cc_max_table_entries;
p->congestion_log_length = OPA_CONG_LOG_ELEMS;
@@ -3301,7 +3617,7 @@ static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
u8 *data, struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u8 port, u32 *resp_len, u32 max_len)
{
int i;
struct opa_congestion_setting_attr *p =
@@ -3311,6 +3627,11 @@ static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
struct opa_congestion_setting_entry_shadow *entries;
struct cc_state *cc_state;
+ if (smp_length_check(sizeof(*p), max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
rcu_read_lock();
cc_state = get_cc_state(ppd);
@@ -3385,7 +3706,7 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct opa_congestion_setting_attr *p =
(struct opa_congestion_setting_attr *)data;
@@ -3394,6 +3715,11 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
struct opa_congestion_setting_entry_shadow *entries;
int i;
+ if (smp_length_check(sizeof(*p), max_len)) {
+ smp->status |= IB_SMP_INVALID_FIELD;
+ return reply((struct ib_mad_hdr *)smp);
+ }
+
/*
* Save details from packet into the ppd. Hold the cc_state_lock so
* our information is consistent with anyone trying to apply the state.
@@ -3415,12 +3741,12 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
apply_cc_state(ppd);
return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
}
static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
u8 *data, struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u8 port, u32 *resp_len, u32 max_len)
{
struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
@@ -3428,7 +3754,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
s64 ts;
int i;
- if (am != 0) {
+ if (am || smp_length_check(sizeof(*cong_log), max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -3486,7 +3812,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct ib_cc_table_attr *cc_table_attr =
(struct ib_cc_table_attr *)data;
@@ -3498,9 +3824,10 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
int i, j;
u32 sentry, eentry;
struct cc_state *cc_state;
+ u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
/* sanity check n_blocks, start_block */
- if (n_blocks == 0 ||
+ if (n_blocks == 0 || smp_length_check(size, max_len) ||
start_block + n_blocks > ppd->cc_max_table_entries) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
@@ -3530,14 +3857,14 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
rcu_read_unlock();
if (resp_len)
- *resp_len += sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
+ *resp_len += size;
return reply((struct ib_mad_hdr *)smp);
}
static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -3548,9 +3875,10 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
int i, j;
u32 sentry, eentry;
u16 ccti_limit;
+ u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
/* sanity check n_blocks, start_block */
- if (n_blocks == 0 ||
+ if (n_blocks == 0 || smp_length_check(size, max_len) ||
start_block + n_blocks > ppd->cc_max_table_entries) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
@@ -3581,7 +3909,8 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
/* now apply the information */
apply_cc_state(ppd);
- return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len);
+ return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len,
+ max_len);
}
struct opa_led_info {
@@ -3594,7 +3923,7 @@ struct opa_led_info {
static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct hfi1_pportdata *ppd = dd->pport;
@@ -3602,7 +3931,7 @@ static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
u32 nport = OPA_AM_NPORT(am);
u32 is_beaconing_active;
- if (nport != 1) {
+ if (nport != 1 || smp_length_check(sizeof(*p), max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -3624,14 +3953,14 @@ static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct opa_led_info *p = (struct opa_led_info *)data;
u32 nport = OPA_AM_NPORT(am);
int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
- if (nport != 1) {
+ if (nport != 1 || smp_length_check(sizeof(*p), max_len)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -3641,12 +3970,13 @@ static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
else
shutdown_led_override(dd->pport);
- return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len);
+ return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len,
+ max_len);
}
static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
u8 *data, struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
int ret;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -3654,71 +3984,71 @@ static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
switch (attr_id) {
case IB_SMP_ATTR_NODE_DESC:
ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case IB_SMP_ATTR_NODE_INFO:
ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case IB_SMP_ATTR_PORT_INFO:
ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case IB_SMP_ATTR_PKEY_TABLE:
ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_SL_TO_SC_MAP:
ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_SC_TO_SL_MAP:
ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_PORT_STATE_INFO:
ret = __subn_get_opa_psi(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
ret = __subn_get_opa_bct(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_CABLE_INFO:
ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case IB_SMP_ATTR_VL_ARB_TABLE:
ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_CONGESTION_INFO:
ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
ret = __subn_get_opa_cong_setting(smp, am, data, ibdev,
- port, resp_len);
+ port, resp_len, max_len);
break;
case OPA_ATTRIB_ID_HFI_CONGESTION_LOG:
ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev,
- port, resp_len);
+ port, resp_len, max_len);
break;
case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case IB_SMP_ATTR_LED_INFO:
ret = __subn_get_opa_led_info(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case IB_SMP_ATTR_SM_INFO:
if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
@@ -3736,7 +4066,7 @@ static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
u8 *data, struct ib_device *ibdev, u8 port,
- u32 *resp_len)
+ u32 *resp_len, u32 max_len)
{
int ret;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
@@ -3744,51 +4074,51 @@ static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
switch (attr_id) {
case IB_SMP_ATTR_PORT_INFO:
ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case IB_SMP_ATTR_PKEY_TABLE:
ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_SL_TO_SC_MAP:
ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_SC_TO_SL_MAP:
ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_PORT_STATE_INFO:
ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
ret = __subn_set_opa_bct(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case IB_SMP_ATTR_VL_ARB_TABLE:
ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
ret = __subn_set_opa_cong_setting(smp, am, data, ibdev,
- port, resp_len);
+ port, resp_len, max_len);
break;
case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case IB_SMP_ATTR_LED_INFO:
ret = __subn_set_opa_led_info(smp, am, data, ibdev, port,
- resp_len);
+ resp_len, max_len);
break;
case IB_SMP_ATTR_SM_INFO:
if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
@@ -3844,7 +4174,10 @@ static int subn_get_opa_aggregate(struct opa_smp *smp,
memset(next_smp + sizeof(*agg), 0, agg_data_len);
(void)subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
- ibdev, port, NULL);
+ ibdev, port, NULL, (u32)agg_data_len);
+
+ if (smp->status & IB_SMP_INVALID_FIELD)
+ break;
if (smp->status & ~IB_SMP_DIRECTION) {
set_aggr_error(agg);
return reply((struct ib_mad_hdr *)smp);
@@ -3887,7 +4220,9 @@ static int subn_set_opa_aggregate(struct opa_smp *smp,
}
(void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
- ibdev, port, NULL);
+ ibdev, port, NULL, (u32)agg_data_len);
+ if (smp->status & IB_SMP_INVALID_FIELD)
+ break;
if (smp->status & ~IB_SMP_DIRECTION) {
set_aggr_error(agg);
return reply((struct ib_mad_hdr *)smp);
@@ -3958,7 +4293,7 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
const struct ib_wc *in_wc)
{
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u16 slid = in_wc->slid;
+ u16 slid = ib_lid_cpu16(in_wc->slid);
u16 pkey;
if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
@@ -3997,12 +4332,13 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
struct opa_smp *smp = (struct opa_smp *)out_mad;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
u8 *data;
- u32 am;
+ u32 am, data_size;
__be16 attr_id;
int ret;
*out_mad = *in_mad;
data = opa_get_smp_data(smp);
+ data_size = (u32)opa_get_smp_data_size(smp);
am = be32_to_cpu(smp->attr_mod);
attr_id = smp->attr_id;
@@ -4046,7 +4382,8 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
default:
clear_opa_smp_data(smp);
ret = subn_get_opa_sma(attr_id, smp, am, data,
- ibdev, port, resp_len);
+ ibdev, port, resp_len,
+ data_size);
break;
case OPA_ATTRIB_ID_AGGREGATE:
ret = subn_get_opa_aggregate(smp, ibdev, port,
@@ -4058,7 +4395,8 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
switch (attr_id) {
default:
ret = subn_set_opa_sma(attr_id, smp, am, data,
- ibdev, port, resp_len);
+ ibdev, port, resp_len,
+ data_size);
break;
case OPA_ATTRIB_ID_AGGREGATE:
ret = subn_set_opa_aggregate(smp, ibdev, port,
@@ -4077,6 +4415,11 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
*/
ret = IB_MAD_RESULT_SUCCESS;
break;
+ case IB_MGMT_METHOD_TRAP_REPRESS:
+ subn_handle_opa_trap_repress(ibp, smp);
+ /* Always successful */
+ ret = IB_MAD_RESULT_SUCCESS;
+ break;
default:
smp->status |= IB_SMP_UNSUP_METHOD;
ret = reply((struct ib_mad_hdr *)smp);
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index 5aa3fd1be653..4c1245072093 100644
--- a/drivers/infiniband/hw/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -115,7 +115,7 @@ struct opa_mad_notice_attr {
__be32 lid; /* LID where change occurred */
__be32 new_cap_mask; /* new capability mask */
__be16 reserved2;
- __be16 cap_mask;
+ __be16 cap_mask3;
__be16 change_flags; /* low 4 bits only */
} __packed ntc_144;
@@ -428,5 +428,6 @@ struct sc2vlnt {
COUNTER_MASK(1, 4))
void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
+void hfi1_handle_trap_timer(unsigned long data);
#endif /* _HFI1_MAD_H */
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index ccbf52c8ff6f..2f0d285dc278 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -67,8 +67,6 @@ struct mmu_rb_handler {
static unsigned long mmu_node_start(struct mmu_rb_node *);
static unsigned long mmu_node_last(struct mmu_rb_node *);
-static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *,
- unsigned long);
static inline void mmu_notifier_range_start(struct mmu_notifier *,
struct mm_struct *,
unsigned long, unsigned long);
@@ -82,7 +80,6 @@ static void do_remove(struct mmu_rb_handler *handler,
static void handle_remove(struct work_struct *work);
static const struct mmu_notifier_ops mn_opts = {
- .invalidate_page = mmu_notifier_page,
.invalidate_range_start = mmu_notifier_range_start,
};
@@ -172,9 +169,8 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
unsigned long flags;
int ret = 0;
+ trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
spin_lock_irqsave(&handler->lock, flags);
- hfi1_cdbg(MMU, "Inserting node addr 0x%llx, len %u", mnode->addr,
- mnode->len);
node = __mmu_rb_search(handler, mnode->addr, mnode->len);
if (node) {
ret = -EINVAL;
@@ -200,7 +196,7 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
{
struct mmu_rb_node *node = NULL;
- hfi1_cdbg(MMU, "Searching for addr 0x%llx, len %u", addr, len);
+ trace_hfi1_mmu_rb_search(addr, len);
if (!handler->ops->filter) {
node = __mmu_int_rb_iter_first(&handler->root, addr,
(addr + len) - 1);
@@ -217,21 +213,27 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
return node;
}
-struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler,
- unsigned long addr, unsigned long len)
+bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
+ unsigned long addr, unsigned long len,
+ struct mmu_rb_node **rb_node)
{
struct mmu_rb_node *node;
unsigned long flags;
+ bool ret = false;
spin_lock_irqsave(&handler->lock, flags);
node = __mmu_rb_search(handler, addr, len);
if (node) {
+ if (node->addr == addr && node->len == len)
+ goto unlock;
__mmu_int_rb_remove(node, &handler->root);
list_del(&node->list); /* remove from LRU list */
+ ret = true;
}
+unlock:
spin_unlock_irqrestore(&handler->lock, flags);
-
- return node;
+ *rb_node = node;
+ return ret;
}
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
@@ -275,8 +277,7 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
unsigned long flags;
/* Validity of handler and node pointers has been checked by caller. */
- hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
- node->len);
+ trace_hfi1_mmu_rb_remove(node->addr, node->len);
spin_lock_irqsave(&handler->lock, flags);
__mmu_int_rb_remove(node, &handler->root);
list_del(&node->list); /* remove from LRU list */
@@ -285,12 +286,6 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
handler->ops->remove(handler->ops_arg, node);
}
-static inline void mmu_notifier_page(struct mmu_notifier *mn,
- struct mm_struct *mm, unsigned long addr)
-{
- mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
-}
-
static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
@@ -315,8 +310,7 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
node; node = ptr) {
/* Guard against node removal. */
ptr = __mmu_int_rb_iter_next(node, start, end - 1);
- hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
- node->addr, node->len);
+ trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
if (handler->ops->invalidate(handler->ops_arg, node)) {
__mmu_int_rb_remove(node, root);
/* move from LRU list to delete list */
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
index 754f6ebf13fb..f04cec1e99d1 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.h
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
@@ -81,7 +81,8 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
struct mmu_rb_node *mnode);
-struct mmu_rb_node *hfi1_mmu_rb_extract(struct mmu_rb_handler *handler,
- unsigned long addr, unsigned long len);
+bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
+ unsigned long addr, unsigned long len,
+ struct mmu_rb_node **rb_node);
#endif /* _HFI1_MMU_RB_H */
diff --git a/drivers/infiniband/hw/hfi1/opa_compat.h b/drivers/infiniband/hw/hfi1/opa_compat.h
index 6ef3c1cbdcd7..774215b95df5 100644
--- a/drivers/infiniband/hw/hfi1/opa_compat.h
+++ b/drivers/infiniband/hw/hfi1/opa_compat.h
@@ -84,7 +84,8 @@ static inline u8 port_states_to_phys_state(struct opa_port_states *ps)
/*
* OPA port physical states
* IB Volume 1, Table 146 PortInfo/IB Volume 2 Section 5.4.2(1) PortPhysState
- * values.
+ * values are the same in OmniPath Architecture. OPA leverages some of the same
+ * concepts as InfiniBand, but has a few other states as well.
*
* When writing, only values 0-3 are valid, other values are ignored.
* When reading, 0 is reserved.
@@ -92,6 +93,8 @@ static inline u8 port_states_to_phys_state(struct opa_port_states *ps)
* Returned by the ibphys_portstate() routine.
*/
enum opa_port_phys_state {
+ /* Values 0-7 have the same meaning in OPA as in InfiniBand. */
+
IB_PORTPHYSSTATE_NOP = 0,
/* 1 is reserved */
IB_PORTPHYSSTATE_POLLING = 2,
@@ -101,9 +104,23 @@ enum opa_port_phys_state {
IB_PORTPHYSSTATE_LINK_ERROR_RECOVERY = 6,
IB_PORTPHYSSTATE_PHY_TEST = 7,
/* 8 is reserved */
+
+ /*
+ * Offline: Port is quiet (transmitters disabled) due to lack of
+ * physical media, unsupported media, or transition between link up
+ * and next link up attempt
+ */
OPA_PORTPHYSSTATE_OFFLINE = 9,
- OPA_PORTPHYSSTATE_GANGED = 10,
+
+ /* 10 is reserved */
+
+ /*
+ * Phy_Test: Specific test patterns are transmitted, and receiver BER
+ * can be monitored. This facilitates signal integrity testing for the
+ * physical layer of the port.
+ */
OPA_PORTPHYSSTATE_TEST = 11,
+
OPA_PORTPHYSSTATE_MAX = 11,
/* values 12-15 are reserved/ignored */
};
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 6a9f6f9819e1..82447b7cdda1 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -68,7 +68,7 @@
/*
* Code to adjust PCIe capabilities.
*/
-static void tune_pcie_caps(struct hfi1_devdata *);
+static int tune_pcie_caps(struct hfi1_devdata *);
/*
* Do all the common PCIe setup and initialization.
@@ -161,6 +161,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
{
unsigned long len;
resource_size_t addr;
+ int ret = 0;
dd->pcidev = pdev;
pci_set_drvdata(pdev, dd);
@@ -179,47 +180,54 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
return -EINVAL;
}
- dd->kregbase = ioremap_nocache(addr, TXE_PIO_SEND);
- if (!dd->kregbase)
+ dd->kregbase1 = ioremap_nocache(addr, RCV_ARRAY);
+ if (!dd->kregbase1) {
+ dd_dev_err(dd, "UC mapping of kregbase1 failed\n");
return -ENOMEM;
+ }
+ dd_dev_info(dd, "UC base1: %p for %x\n", dd->kregbase1, RCV_ARRAY);
+ dd->chip_rcv_array_count = readq(dd->kregbase1 + RCV_ARRAY_CNT);
+ dd_dev_info(dd, "RcvArray count: %u\n", dd->chip_rcv_array_count);
+ dd->base2_start = RCV_ARRAY + dd->chip_rcv_array_count * 8;
+
+ dd->kregbase2 = ioremap_nocache(
+ addr + dd->base2_start,
+ TXE_PIO_SEND - dd->base2_start);
+ if (!dd->kregbase2) {
+ dd_dev_err(dd, "UC mapping of kregbase2 failed\n");
+ goto nomem;
+ }
+ dd_dev_info(dd, "UC base2: %p for %x\n", dd->kregbase2,
+ TXE_PIO_SEND - dd->base2_start);
dd->piobase = ioremap_wc(addr + TXE_PIO_SEND, TXE_PIO_SIZE);
if (!dd->piobase) {
- iounmap(dd->kregbase);
- return -ENOMEM;
+ dd_dev_err(dd, "WC mapping of send buffers failed\n");
+ goto nomem;
}
+ dd_dev_info(dd, "WC piobase: %p\n for %x", dd->piobase, TXE_PIO_SIZE);
- dd->flags |= HFI1_PRESENT; /* now register routines work */
-
- dd->kregend = dd->kregbase + TXE_PIO_SEND;
dd->physaddr = addr; /* used for io_remap, etc. */
/*
- * Re-map the chip's RcvArray as write-combining to allow us
+ * Map the chip's RcvArray as write-combining to allow us
* to write an entire cacheline worth of entries in one shot.
- * If this re-map fails, just continue - the RcvArray programming
- * function will handle both cases.
*/
- dd->chip_rcv_array_count = read_csr(dd, RCV_ARRAY_CNT);
dd->rcvarray_wc = ioremap_wc(addr + RCV_ARRAY,
dd->chip_rcv_array_count * 8);
- dd_dev_info(dd, "WC Remapped RcvArray: %p\n", dd->rcvarray_wc);
- /*
- * Save BARs and command to rewrite after device reset.
- */
- pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0);
- pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1);
- pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
- pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
- pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl);
- pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL, &dd->pcie_lnkctl);
- pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL2,
- &dd->pcie_devctl2);
- pci_read_config_dword(dd->pcidev, PCI_CFG_MSIX0, &dd->pci_msix0);
- pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, &dd->pci_lnkctl3);
- pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2);
+ if (!dd->rcvarray_wc) {
+ dd_dev_err(dd, "WC mapping of receive array failed\n");
+ goto nomem;
+ }
+ dd_dev_info(dd, "WC RcvArray: %p for %x\n",
+ dd->rcvarray_wc, dd->chip_rcv_array_count * 8);
+ dd->flags |= HFI1_PRESENT; /* chip.c CSR routines now work */
return 0;
+nomem:
+ ret = -ENOMEM;
+ hfi1_pcie_ddcleanup(dd);
+ return ret;
}
/*
@@ -229,59 +237,19 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
*/
void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd)
{
- u64 __iomem *base = (void __iomem *)dd->kregbase;
-
dd->flags &= ~HFI1_PRESENT;
- dd->kregbase = NULL;
- iounmap(base);
+ if (dd->kregbase1)
+ iounmap(dd->kregbase1);
+ dd->kregbase1 = NULL;
+ if (dd->kregbase2)
+ iounmap(dd->kregbase2);
+ dd->kregbase2 = NULL;
if (dd->rcvarray_wc)
iounmap(dd->rcvarray_wc);
+ dd->rcvarray_wc = NULL;
if (dd->piobase)
iounmap(dd->piobase);
-}
-
-static void msix_setup(struct hfi1_devdata *dd, int pos, u32 *msixcnt,
- struct hfi1_msix_entry *hfi1_msix_entry)
-{
- int ret;
- int nvec = *msixcnt;
- struct msix_entry *msix_entry;
- int i;
-
- /*
- * We can't pass hfi1_msix_entry array to msix_setup
- * so use a dummy msix_entry array and copy the allocated
- * irq back to the hfi1_msix_entry array.
- */
- msix_entry = kmalloc_array(nvec, sizeof(*msix_entry), GFP_KERNEL);
- if (!msix_entry) {
- ret = -ENOMEM;
- goto do_intx;
- }
-
- for (i = 0; i < nvec; i++)
- msix_entry[i] = hfi1_msix_entry[i].msix;
-
- ret = pci_enable_msix_range(dd->pcidev, msix_entry, 1, nvec);
- if (ret < 0)
- goto free_msix_entry;
- nvec = ret;
-
- for (i = 0; i < nvec; i++)
- hfi1_msix_entry[i].msix = msix_entry[i];
-
- kfree(msix_entry);
- *msixcnt = nvec;
- return;
-
-free_msix_entry:
- kfree(msix_entry);
-
-do_intx:
- dd_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
- nvec, ret);
- *msixcnt = 0;
- hfi1_enable_intx(dd->pcidev);
+ dd->piobase = NULL;
}
/* return the PCIe link speed from the given link status */
@@ -314,8 +282,14 @@ static u32 extract_width(u16 linkstat)
static void update_lbus_info(struct hfi1_devdata *dd)
{
u16 linkstat;
+ int ret;
+
+ ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
+ if (ret) {
+ dd_dev_err(dd, "Unable to read from PCI config\n");
+ return;
+ }
- pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
dd->lbus_width = extract_width(linkstat);
dd->lbus_speed = extract_speed(linkstat);
snprintf(dd->lbus_info, sizeof(dd->lbus_info),
@@ -330,6 +304,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
{
u32 linkcap;
struct pci_dev *parent = dd->pcidev->bus->self;
+ int ret;
if (!pci_is_pcie(dd->pcidev)) {
dd_dev_err(dd, "Can't find PCI Express capability!\n");
@@ -339,7 +314,12 @@ int pcie_speeds(struct hfi1_devdata *dd)
/* find if our max speed is Gen3 and parent supports Gen3 speeds */
dd->link_gen3_capable = 1;
- pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &linkcap);
+ ret = pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &linkcap);
+ if (ret) {
+ dd_dev_err(dd, "Unable to read from PCI config\n");
+ return ret;
+ }
+
if ((linkcap & PCI_EXP_LNKCAP_SLS) != GEN3_SPEED_VECTOR) {
dd_dev_info(dd,
"This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n",
@@ -364,49 +344,150 @@ int pcie_speeds(struct hfi1_devdata *dd)
}
/*
- * Returns in *nent:
- * - actual number of interrupts allocated
+ * Returns:
+ * - actual number of interrupts allocated or
* - 0 if fell back to INTx.
+ * - error
*/
-void request_msix(struct hfi1_devdata *dd, u32 *nent,
- struct hfi1_msix_entry *entry)
+int request_msix(struct hfi1_devdata *dd, u32 msireq)
{
- int pos;
+ int nvec, ret;
- pos = dd->pcidev->msix_cap;
- if (*nent && pos) {
- msix_setup(dd, pos, nent, entry);
- /* did it, either MSI-X or INTx */
- } else {
- *nent = 0;
- hfi1_enable_intx(dd->pcidev);
+ nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq,
+ PCI_IRQ_MSIX | PCI_IRQ_LEGACY);
+ if (nvec < 0) {
+ dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", nvec);
+ return nvec;
}
- tune_pcie_caps(dd);
+ ret = tune_pcie_caps(dd);
+ if (ret) {
+ dd_dev_err(dd, "tune_pcie_caps() failed: %d\n", ret);
+ pci_free_irq_vectors(dd->pcidev);
+ return ret;
+ }
+
+ /* check for legacy IRQ */
+ if (nvec == 1 && !dd->pcidev->msix_enabled)
+ return 0;
+
+ return nvec;
}
-void hfi1_enable_intx(struct pci_dev *pdev)
+/* restore command and BARs after a reset has wiped them out */
+int restore_pci_variables(struct hfi1_devdata *dd)
{
- /* first, turn on INTx */
- pci_intx(pdev, 1);
- /* then turn off MSI-X */
- pci_disable_msix(pdev);
+ int ret = 0;
+
+ ret = pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command);
+ if (ret)
+ goto error;
+
+ ret = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
+ dd->pcibar0);
+ if (ret)
+ goto error;
+
+ ret = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
+ dd->pcibar1);
+ if (ret)
+ goto error;
+
+ ret = pci_write_config_dword(dd->pcidev, PCI_ROM_ADDRESS, dd->pci_rom);
+ if (ret)
+ goto error;
+
+ ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL,
+ dd->pcie_devctl);
+ if (ret)
+ goto error;
+
+ ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL,
+ dd->pcie_lnkctl);
+ if (ret)
+ goto error;
+
+ ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL2,
+ dd->pcie_devctl2);
+ if (ret)
+ goto error;
+
+ ret = pci_write_config_dword(dd->pcidev, PCI_CFG_MSIX0, dd->pci_msix0);
+ if (ret)
+ goto error;
+
+ ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
+ dd->pci_lnkctl3);
+ if (ret)
+ goto error;
+
+ ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ dd_dev_err(dd, "Unable to write to PCI config\n");
+ return ret;
}
-/* restore command and BARs after a reset has wiped them out */
-void restore_pci_variables(struct hfi1_devdata *dd)
+/* Save BARs and command to rewrite after device reset */
+int save_pci_variables(struct hfi1_devdata *dd)
{
- pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command);
- pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, dd->pcibar0);
- pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, dd->pcibar1);
- pci_write_config_dword(dd->pcidev, PCI_ROM_ADDRESS, dd->pci_rom);
- pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, dd->pcie_devctl);
- pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL, dd->pcie_lnkctl);
- pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL2,
- dd->pcie_devctl2);
- pci_write_config_dword(dd->pcidev, PCI_CFG_MSIX0, dd->pci_msix0);
- pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, dd->pci_lnkctl3);
- pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2);
+ int ret = 0;
+
+ ret = pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
+ &dd->pcibar0);
+ if (ret)
+ goto error;
+
+ ret = pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
+ &dd->pcibar1);
+ if (ret)
+ goto error;
+
+ ret = pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
+ if (ret)
+ goto error;
+
+ ret = pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
+ if (ret)
+ goto error;
+
+ ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL,
+ &dd->pcie_devctl);
+ if (ret)
+ goto error;
+
+ ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL,
+ &dd->pcie_lnkctl);
+ if (ret)
+ goto error;
+
+ ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL2,
+ &dd->pcie_devctl2);
+ if (ret)
+ goto error;
+
+ ret = pci_read_config_dword(dd->pcidev, PCI_CFG_MSIX0, &dd->pci_msix0);
+ if (ret)
+ goto error;
+
+ ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
+ &dd->pci_lnkctl3);
+ if (ret)
+ goto error;
+
+ ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ dd_dev_err(dd, "Unable to read from PCI config\n");
+ return ret;
}
/*
@@ -421,21 +502,33 @@ uint aspm_mode = ASPM_MODE_DISABLED;
module_param_named(aspm, aspm_mode, uint, S_IRUGO);
MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic");
-static void tune_pcie_caps(struct hfi1_devdata *dd)
+static int tune_pcie_caps(struct hfi1_devdata *dd)
{
struct pci_dev *parent;
u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
u16 rc_mrrs, ep_mrrs, max_mrrs, ectl;
+ int ret;
/*
* Turn on extended tags in DevCtl in case the BIOS has turned it off
* to improve WFR SDMA bandwidth
*/
- pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
+ ret = pcie_capability_read_word(dd->pcidev,
+ PCI_EXP_DEVCTL, &ectl);
+ if (ret) {
+ dd_dev_err(dd, "Unable to read from PCI config\n");
+ return ret;
+ }
+
if (!(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
dd_dev_info(dd, "Enabling PCIe extended tags\n");
ectl |= PCI_EXP_DEVCTL_EXT_TAG;
- pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, ectl);
+ ret = pcie_capability_write_word(dd->pcidev,
+ PCI_EXP_DEVCTL, ectl);
+ if (ret) {
+ dd_dev_err(dd, "Unable to write to PCI config\n");
+ return ret;
+ }
}
/* Find out supported and configured values for parent (root) */
parent = dd->pcidev->bus->self;
@@ -444,14 +537,14 @@ static void tune_pcie_caps(struct hfi1_devdata *dd)
* access to the upstream component.
*/
if (!parent)
- return;
+ return -EINVAL;
if (!pci_is_root_bus(parent->bus)) {
dd_dev_info(dd, "Parent not root\n");
- return;
+ return -EINVAL;
}
if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
- return;
+ return -EINVAL;
rc_mpss = parent->pcie_mpss;
rc_mps = ffs(pcie_get_mps(parent)) - 8;
/* Find out supported and configured values for endpoint (us) */
@@ -497,6 +590,8 @@ static void tune_pcie_caps(struct hfi1_devdata *dd)
ep_mrrs = max_mrrs;
pcie_set_readrq(dd->pcidev, ep_mrrs);
}
+
+ return 0;
}
/* End of PCIe capability tuning */
@@ -728,6 +823,7 @@ static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs,
u32 violation;
u32 i;
u8 c_minus1, c0, c_plus1;
+ int ret;
for (i = 0; i < 11; i++) {
/* set index */
@@ -739,8 +835,14 @@ static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs,
pci_write_config_dword(pdev, PCIE_CFG_REG_PL102,
eq_value(c_minus1, c0, c_plus1));
/* check if these coefficients violate EQ rules */
- pci_read_config_dword(dd->pcidev, PCIE_CFG_REG_PL105,
- &violation);
+ ret = pci_read_config_dword(dd->pcidev,
+ PCIE_CFG_REG_PL105, &violation);
+ if (ret) {
+ dd_dev_err(dd, "Unable to read from PCI config\n");
+ hit_error = 1;
+ break;
+ }
+
if (violation
& PCIE_CFG_REG_PL105_GEN3_EQ_VIOLATE_COEF_RULES_SMASK){
if (hit_error == 0) {
@@ -1194,7 +1296,13 @@ retry:
* that it is Gen3 capable earlier.
*/
dd_dev_info(dd, "%s: setting parent target link speed\n", __func__);
- pcie_capability_read_word(parent, PCI_EXP_LNKCTL2, &lnkctl2);
+ ret = pcie_capability_read_word(parent, PCI_EXP_LNKCTL2, &lnkctl2);
+ if (ret) {
+ dd_dev_err(dd, "Unable to read from PCI config\n");
+ return_error = 1;
+ goto done;
+ }
+
dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
(u32)lnkctl2);
/* only write to parent if target is not as high as ours */
@@ -1203,20 +1311,37 @@ retry:
lnkctl2 |= target_vector;
dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
(u32)lnkctl2);
- pcie_capability_write_word(parent, PCI_EXP_LNKCTL2, lnkctl2);
+ ret = pcie_capability_write_word(parent,
+ PCI_EXP_LNKCTL2, lnkctl2);
+ if (ret) {
+ dd_dev_err(dd, "Unable to write to PCI config\n");
+ return_error = 1;
+ goto done;
+ }
} else {
dd_dev_info(dd, "%s: ..target speed is OK\n", __func__);
}
dd_dev_info(dd, "%s: setting target link speed\n", __func__);
- pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL2, &lnkctl2);
+ ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL2, &lnkctl2);
+ if (ret) {
+ dd_dev_err(dd, "Unable to read from PCI config\n");
+ return_error = 1;
+ goto done;
+ }
+
dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
(u32)lnkctl2);
lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
lnkctl2 |= target_vector;
dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
(u32)lnkctl2);
- pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL2, lnkctl2);
+ ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL2, lnkctl2);
+ if (ret) {
+ dd_dev_err(dd, "Unable to write to PCI config\n");
+ return_error = 1;
+ goto done;
+ }
/* step 5h: arm gasket logic */
/* hold DC in reset across the SBR */
@@ -1266,7 +1391,14 @@ retry:
/* restore PCI space registers we know were reset */
dd_dev_info(dd, "%s: calling restore_pci_variables\n", __func__);
- restore_pci_variables(dd);
+ ret = restore_pci_variables(dd);
+ if (ret) {
+ dd_dev_err(dd, "%s: Could not restore PCI variables\n",
+ __func__);
+ return_error = 1;
+ goto done;
+ }
+
/* restore firmware control */
write_csr(dd, MISC_CFG_FW_CTRL, fw_ctrl);
@@ -1296,7 +1428,13 @@ retry:
setextled(dd, 0);
/* check for any per-lane errors */
- pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, &reg32);
+ ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, &reg32);
+ if (ret) {
+ dd_dev_err(dd, "Unable to read from PCI config\n");
+ return_error = 1;
+ goto done;
+ }
+
dd_dev_info(dd, "%s: per-lane errors: 0x%x\n", __func__, reg32);
/* extract status, look for our HFI */
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index ed72b5aca139..7108a4b5e94c 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1012,7 +1012,7 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
"%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
__func__, sc->sw_index,
sc->hw_context, (u32)reg);
- queue_work(dd->pport->hfi1_wq,
+ queue_work(dd->pport->link_wq,
&dd->pport->link_bounce_work);
break;
}
@@ -1568,7 +1568,8 @@ static void sc_piobufavail(struct send_context *sc)
struct rvt_qp *qp;
struct hfi1_qp_priv *priv;
unsigned long flags;
- unsigned i, n = 0;
+ uint i, n = 0, max_idx = 0;
+ u8 max_starved_cnt = 0;
if (dd->send_contexts[sc->sw_index].type != SC_KERNEL &&
dd->send_contexts[sc->sw_index].type != SC_VL15)
@@ -1591,6 +1592,7 @@ static void sc_piobufavail(struct send_context *sc)
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL;
+ iowait_starve_find_max(wait, &max_starved_cnt, n, &max_idx);
/* refcount held until actual wake up */
qps[n++] = qp;
}
@@ -1605,9 +1607,14 @@ static void sc_piobufavail(struct send_context *sc)
}
write_sequnlock_irqrestore(&dev->iowait_lock, flags);
- for (i = 0; i < n; i++)
- hfi1_qp_wakeup(qps[i],
+ /* Wake up the most starved one first */
+ if (n)
+ hfi1_qp_wakeup(qps[max_idx],
RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN);
+ for (i = 0; i < n; i++)
+ if (i != max_idx)
+ hfi1_qp_wakeup(qps[i],
+ RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN);
}
/* translate a send credit update to a bit code of reasons */
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index 838fe84e285a..a8af96d2b1b0 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -45,10 +45,14 @@
*
*/
+#include <linux/firmware.h>
+
#include "hfi.h"
#include "efivar.h"
#include "eprom.h"
+#define DEFAULT_PLATFORM_CONFIG_NAME "hfi1_platform.dat"
+
static int validate_scratch_checksum(struct hfi1_devdata *dd)
{
u64 checksum = 0, temp_scratch = 0;
@@ -58,8 +62,13 @@ static int validate_scratch_checksum(struct hfi1_devdata *dd)
version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
/* Prevent power on default of all zeroes from passing checksum */
- if (!version)
+ if (!version) {
+ dd_dev_err(dd, "%s: Config bitmap uninitialized\n", __func__);
+ dd_dev_err(dd,
+ "%s: Please update your BIOS to support active channels\n",
+ __func__);
return 0;
+ }
/*
* ASIC scratch 0 only contains the checksum and bitmap version as
@@ -84,6 +93,8 @@ static int validate_scratch_checksum(struct hfi1_devdata *dd)
if (checksum + temp_scratch == 0xFFFF)
return 1;
+
+ dd_dev_err(dd, "%s: Configuration bitmap corrupted\n", __func__);
return 0;
}
@@ -131,25 +142,22 @@ static void save_platform_config_fields(struct hfi1_devdata *dd)
ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
QSFP_MAX_POWER_SHIFT;
+
+ ppd->config_from_scratch = true;
}
void get_platform_config(struct hfi1_devdata *dd)
{
int ret = 0;
- unsigned long size = 0;
u8 *temp_platform_config = NULL;
u32 esize;
+ const struct firmware *platform_config_file = NULL;
if (is_integrated(dd)) {
if (validate_scratch_checksum(dd)) {
save_platform_config_fields(dd);
return;
}
- dd_dev_err(dd, "%s: Config bitmap corrupted/uninitialized\n",
- __func__);
- dd_dev_err(dd,
- "%s: Please update your BIOS to support active channels\n",
- __func__);
} else {
ret = eprom_read_platform_config(dd,
(void **)&temp_platform_config,
@@ -160,36 +168,37 @@ void get_platform_config(struct hfi1_devdata *dd)
dd->platform_config.size = esize;
return;
}
- /* fail, try EFI variable */
-
- ret = read_hfi1_efi_var(dd, "configuration", &size,
- (void **)&temp_platform_config);
- if (!ret) {
- dd->platform_config.data = temp_platform_config;
- dd->platform_config.size = size;
- return;
- }
}
dd_dev_err(dd,
"%s: Failed to get platform config, falling back to sub-optimal default file\n",
__func__);
- /* fall back to request firmware */
- platform_config_load = 1;
-}
-void free_platform_config(struct hfi1_devdata *dd)
-{
- if (!platform_config_load) {
- /*
- * was loaded from EFI or the EPROM, release memory
- * allocated by read_efi_var/eprom_read_platform_config
- */
- kfree(dd->platform_config.data);
+ ret = request_firmware(&platform_config_file,
+ DEFAULT_PLATFORM_CONFIG_NAME,
+ &dd->pcidev->dev);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: No default platform config file found\n",
+ __func__);
+ return;
}
+
/*
- * else do nothing, dispose_firmware will release
- * struct firmware platform_config on driver exit
+ * Allocate separate memory block to store data and free firmware
+ * structure. This allows free_platform_config to treat EPROM and
+ * fallback configs in the same manner.
*/
+ dd->platform_config.data = kmemdup(platform_config_file->data,
+ platform_config_file->size,
+ GFP_KERNEL);
+ dd->platform_config.size = platform_config_file->size;
+ release_firmware(platform_config_file);
+}
+
+void free_platform_config(struct hfi1_devdata *dd)
+{
+ /* Release memory allocated for eprom or fallback file read. */
+ kfree(dd->platform_config.data);
}
void get_port_type(struct hfi1_pportdata *ppd)
@@ -242,7 +251,7 @@ static int qual_power(struct hfi1_pportdata *ppd)
if (ppd->offline_disabled_reason ==
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
- dd_dev_info(
+ dd_dev_err(
ppd->dd,
"%s: Port disabled due to system power restrictions\n",
__func__);
@@ -268,7 +277,7 @@ static int qual_bitrate(struct hfi1_pportdata *ppd)
if (ppd->offline_disabled_reason ==
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
- dd_dev_info(
+ dd_dev_err(
ppd->dd,
"%s: Cable failed bitrate check, disabling port\n",
__func__);
@@ -709,15 +718,15 @@ static void apply_tunings(
ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
GENERAL_CONFIG, config_data);
if (ret != HCMD_SUCCESS)
- dd_dev_info(ppd->dd,
- "%s: Failed set ext device config params\n",
- __func__);
+ dd_dev_err(ppd->dd,
+ "%s: Failed set ext device config params\n",
+ __func__);
}
if (tx_preset_index == OPA_INVALID_INDEX) {
if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
- dd_dev_info(ppd->dd, "%s: Invalid Tx preset index\n",
- __func__);
+ dd_dev_err(ppd->dd, "%s: Invalid Tx preset index\n",
+ __func__);
return;
}
@@ -900,7 +909,7 @@ static int tune_qsfp(struct hfi1_pportdata *ppd,
case 0xD: /* fallthrough */
case 0xF:
default:
- dd_dev_info(ppd->dd, "%s: Unknown/unsupported cable\n",
+ dd_dev_warn(ppd->dd, "%s: Unknown/unsupported cable\n",
__func__);
break;
}
@@ -935,6 +944,21 @@ void tune_serdes(struct hfi1_pportdata *ppd)
if (loopback != LOOPBACK_NONE ||
ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
ppd->driver_link_ready = 1;
+
+ if (qsfp_mod_present(ppd)) {
+ ret = acquire_chip_resource(ppd->dd,
+ qsfp_resource(ppd->dd),
+ QSFP_WAIT);
+ if (ret) {
+ dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
+ __func__, (int)ppd->dd->hfi1_id);
+ goto bail;
+ }
+
+ refresh_qsfp_cache(ppd, &ppd->qsfp_info);
+ release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
+ }
+
return;
}
@@ -942,7 +966,7 @@ void tune_serdes(struct hfi1_pportdata *ppd)
case PORT_TYPE_DISCONNECTED:
ppd->offline_disabled_reason =
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
- dd_dev_info(dd, "%s: Port disconnected, disabling port\n",
+ dd_dev_warn(dd, "%s: Port disconnected, disabling port\n",
__func__);
goto bail;
case PORT_TYPE_FIXED:
@@ -1027,7 +1051,7 @@ void tune_serdes(struct hfi1_pportdata *ppd)
}
break;
default:
- dd_dev_info(ppd->dd, "%s: Unknown port type\n", __func__);
+ dd_dev_warn(ppd->dd, "%s: Unknown port type\n", __func__);
ppd->port_type = PORT_TYPE_UNKNOWN;
tuning_method = OPA_UNKNOWN_TUNING;
total_atten = 0;
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 650305cc0373..4b01ccd895b4 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -68,17 +68,12 @@ static int iowait_sleep(
struct sdma_engine *sde,
struct iowait *wait,
struct sdma_txreq *stx,
- unsigned seq);
+ unsigned int seq,
+ bool pkts_sent);
static void iowait_wakeup(struct iowait *wait, int reason);
static void iowait_sdma_drained(struct iowait *wait);
static void qp_pio_drain(struct rvt_qp *qp);
-static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
- struct rvt_qpn_map *map, unsigned off)
-{
- return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
-}
-
const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
[IB_WR_RDMA_WRITE] = {
.length = sizeof(struct ib_rdma_wr),
@@ -237,6 +232,31 @@ int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
return 0;
}
+/*
+ * qp_set_16b - Set the hdr_type based on whether the slid or the
+ * dlid in the connection is extended. Only applicable for RC and UC
+ * QPs. UD QPs determine this on the fly from the ah in the wqe
+ */
+static inline void qp_set_16b(struct rvt_qp *qp)
+{
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ /* Update ah_attr to account for extended LIDs */
+ hfi1_update_ah_attr(qp->ibqp.device, &qp->remote_ah_attr);
+
+ /* Create 32 bit LIDs */
+ hfi1_make_opa_lid(&qp->remote_ah_attr);
+
+ if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH))
+ return;
+
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ppd = ppd_from_ibp(ibp);
+ priv->hdr_type = hfi1_get_hdr_type(ppd->lid, &qp->remote_ah_attr);
+}
+
void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
@@ -247,6 +267,7 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
+ qp_set_16b(qp);
}
if (attr_mask & IB_QP_PATH_MIG_STATE &&
@@ -256,6 +277,7 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
+ qp_set_16b(qp);
}
}
@@ -377,7 +399,8 @@ static int iowait_sleep(
struct sdma_engine *sde,
struct iowait *wait,
struct sdma_txreq *stx,
- unsigned seq)
+ uint seq,
+ bool pkts_sent)
{
struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
struct rvt_qp *qp;
@@ -408,7 +431,8 @@ static int iowait_sleep(
ibp->rvp.n_dmawait++;
qp->s_flags |= RVT_S_WAIT_DMA_DESC;
- list_add_tail(&priv->s_iowait.list, &sde->dmawait);
+ iowait_queue(pkts_sent, &priv->s_iowait,
+ &sde->dmawait);
priv->s_iowait.lock = &dev->iowait_lock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
rvt_get_qp(qp);
@@ -506,82 +530,6 @@ struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5)
sc5);
}
-struct qp_iter {
- struct hfi1_ibdev *dev;
- struct rvt_qp *qp;
- int specials;
- int n;
-};
-
-struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
-{
- struct qp_iter *iter;
-
- iter = kzalloc(sizeof(*iter), GFP_KERNEL);
- if (!iter)
- return NULL;
-
- iter->dev = dev;
- iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
-
- return iter;
-}
-
-int qp_iter_next(struct qp_iter *iter)
-{
- struct hfi1_ibdev *dev = iter->dev;
- int n = iter->n;
- int ret = 1;
- struct rvt_qp *pqp = iter->qp;
- struct rvt_qp *qp;
-
- /*
- * The approach is to consider the special qps
- * as an additional table entries before the
- * real hash table. Since the qp code sets
- * the qp->next hash link to NULL, this works just fine.
- *
- * iter->specials is 2 * # ports
- *
- * n = 0..iter->specials is the special qp indices
- *
- * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are
- * the potential hash bucket entries
- *
- */
- for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) {
- if (pqp) {
- qp = rcu_dereference(pqp->next);
- } else {
- if (n < iter->specials) {
- struct hfi1_pportdata *ppd;
- struct hfi1_ibport *ibp;
- int pidx;
-
- pidx = n % dev->rdi.ibdev.phys_port_cnt;
- ppd = &dd_from_dev(dev)->pport[pidx];
- ibp = &ppd->ibport_data;
-
- if (!(n & 1))
- qp = rcu_dereference(ibp->rvp.qp[0]);
- else
- qp = rcu_dereference(ibp->rvp.qp[1]);
- } else {
- qp = rcu_dereference(
- dev->rdi.qp_dev->qp_table[
- (n - iter->specials)]);
- }
- }
- pqp = qp;
- if (qp) {
- iter->qp = qp;
- iter->n = n;
- return 0;
- }
- }
- return ret;
-}
-
static const char * const qp_type_str[] = {
"SMI", "GSI", "RC", "UC", "UD",
};
@@ -595,19 +543,27 @@ static int qp_idle(struct rvt_qp *qp)
qp->s_tail == qp->s_head;
}
-void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
+/**
+ * qp_iter_print - print the qp information to seq_file
+ * @s: the seq_file to emit the qp information on
+ * @iter: the iterator for the qp hash list
+ */
+void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
{
struct rvt_swqe *wqe;
struct rvt_qp *qp = iter->qp;
struct hfi1_qp_priv *priv = qp->priv;
struct sdma_engine *sde;
struct send_context *send_context;
+ struct rvt_ack_entry *e = NULL;
sde = qp_to_sdma_engine(qp, priv->s_sc);
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
send_context = qp_to_send_context(qp, priv->s_sc);
+ if (qp->s_ack_queue)
+ e = &qp->s_ack_queue[qp->s_tail_ack_queue];
seq_printf(s,
- "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x (%u %u %u %u %u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d\n",
+ "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x\n",
iter->n,
qp_idle(qp) ? "I" : "B",
qp->ibqp.qp_num,
@@ -630,6 +586,10 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
qp->s_last, qp->s_acked, qp->s_cur,
qp->s_tail, qp->s_head, qp->s_size,
qp->s_avail,
+ /* ack_queue ring pointers, size */
+ qp->s_tail_ack_queue, qp->r_head_ack_queue,
+ rvt_max_atomic(&to_idev(qp->ibqp.device)->rdi),
+ /* remote QP info */
qp->remote_qpn,
rdma_ah_get_dlid(&qp->remote_ah_attr),
rdma_ah_get_sl(&qp->remote_ah_attr),
@@ -644,21 +604,26 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
send_context ? send_context->sw_index : 0,
ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head,
ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail,
- qp->pid);
+ qp->pid,
+ qp->s_state,
+ qp->s_ack_state,
+ /* ack queue information */
+ e ? e->opcode : 0,
+ e ? e->psn : 0,
+ e ? e->lpsn : 0);
}
-void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- gfp_t gfp)
+void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv;
- priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node);
+ priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node);
if (!priv)
return ERR_PTR(-ENOMEM);
priv->owner = qp;
- priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp,
+ priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL,
rdi->dparms.node);
if (!priv->s_ahg) {
kfree(priv);
@@ -751,6 +716,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp)
qp->s_flags |= RVT_S_AHG_CLEAR;
priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
+ qp_set_16b(qp);
ev.device = qp->ibqp.device;
ev.element.qp = &qp->ibqp;
@@ -833,6 +799,45 @@ void notify_error_qp(struct rvt_qp *qp)
}
/**
+ * hfi1_qp_iter_cb - callback for iterator
+ * @qp - the qp
+ * @v - the sl in low bits of v
+ *
+ * This is called from the iterator callback to work
+ * on an individual qp.
+ */
+static void hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v)
+{
+ int lastwqe;
+ struct ib_event ev;
+ struct hfi1_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u8 sl = (u8)v;
+
+ if (qp->port_num != ppd->port ||
+ (qp->ibqp.qp_type != IB_QPT_UC &&
+ qp->ibqp.qp_type != IB_QPT_RC) ||
+ rdma_ah_get_sl(&qp->remote_ah_attr) != sl ||
+ !(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))
+ return;
+
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+ lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+ if (lastwqe) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+}
+
+/**
* hfi1_error_port_qps - put a port's RC/UC qps into error state
* @ibp: the ibport.
* @sl: the service level.
@@ -843,44 +848,8 @@ void notify_error_qp(struct rvt_qp *qp)
*/
void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl)
{
- struct rvt_qp *qp = NULL;
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_ibdev *dev = &ppd->dd->verbs_dev;
- int n;
- int lastwqe;
- struct ib_event ev;
-
- rcu_read_lock();
-
- /* Deal only with RC/UC qps that use the given SL. */
- for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) {
- for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp;
- qp = rcu_dereference(qp->next)) {
- if (qp->port_num == ppd->port &&
- (qp->ibqp.qp_type == IB_QPT_UC ||
- qp->ibqp.qp_type == IB_QPT_RC) &&
- rdma_ah_get_sl(&qp->remote_ah_attr) == sl &&
- (ib_rvt_state_ops[qp->state] &
- RVT_POST_SEND_OK)) {
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_hlock);
- spin_lock(&qp->s_lock);
- lastwqe = rvt_error_qp(qp,
- IB_WC_WR_FLUSH_ERR);
- spin_unlock(&qp->s_lock);
- spin_unlock(&qp->s_hlock);
- spin_unlock_irq(&qp->r_lock);
- if (lastwqe) {
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event =
- IB_EVENT_QP_LAST_WQE_REACHED;
- qp->ibqp.event_handler(&ev,
- qp->ibqp.qp_context);
- }
- }
- }
- }
- rcu_read_unlock();
+ rvt_qp_iter(&dev->rdi, sl, hfi1_qp_iter_cb);
}
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index 1eb9cd7b8c19..c06d2f8348e0 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -1,7 +1,7 @@
#ifndef _QP_H
#define _QP_H
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -94,26 +94,7 @@ void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag);
struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5);
struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5);
-struct qp_iter;
-
-/**
- * qp_iter_init - initialize the iterator for the qp hash list
- * @dev: the hfi1_ibdev
- */
-struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev);
-
-/**
- * qp_iter_next - Find the next qp in the hash list
- * @iter: the iterator for the qp hash list
- */
-int qp_iter_next(struct qp_iter *iter);
-
-/**
- * qp_iter_print - print the qp information to seq_file
- * @s: the seq_file to emit the qp information on
- * @iter: the iterator for the qp hash list
- */
-void qp_iter_print(struct seq_file *s, struct qp_iter *iter);
+void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter);
void _hfi1_schedule_send(struct rvt_qp *qp);
void hfi1_schedule_send(struct rvt_qp *qp);
@@ -123,8 +104,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp);
/*
* Functions provided by hfi1 driver for rdmavt to use
*/
-void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- gfp_t gfp);
+void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
unsigned free_all_qps(struct rvt_dev_info *rdi);
void notify_qp_reset(struct rvt_qp *qp);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 1080778a1f7c..e1cf0c08ca6f 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -100,8 +100,12 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
goto bail;
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- hwords = 5;
+ if (priv->hdr_type == HFI1_PKT_TYPE_9B)
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+ else
+ /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
+ hwords = 7;
switch (qp->s_ack_state) {
case OP(RDMA_READ_RESPONSE_LAST):
@@ -258,8 +262,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
struct ib_other_headers *ohdr;
struct rvt_sge_state *ss;
struct rvt_swqe *wqe;
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- u32 hwords = 5;
+ u32 hwords;
u32 len;
u32 bth0 = 0;
u32 bth2;
@@ -273,9 +276,23 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (IS_ERR(ps->s_txreq))
goto bail_no_tx;
- ohdr = &ps->s_txreq->phdr.hdr.u.oth;
- if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
- ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
+ ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type;
+ if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+ if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
+ ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
+ else
+ ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
+ } else {
+ /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
+ hwords = 7;
+ if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
+ (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
+ ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
+ else
+ ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
+ }
/* Sending responses has higher priority over sending requests. */
if ((qp->s_flags & RVT_S_RESP_PENDING) &&
@@ -425,7 +442,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
case IB_WR_RDMA_WRITE:
if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
- /* FALLTHROUGH */
+ goto no_flow_control;
case IB_WR_RDMA_WRITE_WITH_IMM:
/* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
@@ -433,6 +450,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
+no_flow_control:
put_ib_reth_vaddr(
wqe->rdma_wr.remote_addr,
&ohdr->u.rc.reth);
@@ -703,6 +721,154 @@ bail_no_tx:
return 0;
}
+static inline void hfi1_make_bth_aeth(struct rvt_qp *qp,
+ struct ib_other_headers *ohdr,
+ u32 bth0, u32 bth1)
+{
+ if (qp->r_nak_state)
+ ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
+ (qp->r_nak_state <<
+ IB_AETH_CREDIT_SHIFT));
+ else
+ ohdr->u.aeth = rvt_compute_aeth(qp);
+
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
+}
+
+static inline void hfi1_queue_rc_ack(struct rvt_qp *qp, bool is_fecn)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
+ goto unlock;
+ this_cpu_inc(*ibp->rvp.rc_qacks);
+ qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
+ qp->s_nak_state = qp->r_nak_state;
+ qp->s_ack_psn = qp->r_ack_psn;
+ if (is_fecn)
+ qp->s_flags |= RVT_S_ECN;
+
+ /* Schedule the send tasklet. */
+ hfi1_schedule_send(qp);
+unlock:
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+}
+
+static inline void hfi1_make_rc_ack_9B(struct rvt_qp *qp,
+ struct hfi1_opa_header *opa_hdr,
+ u8 sc5, bool is_fecn,
+ u64 *pbc_flags, u32 *hwords,
+ u32 *nwords)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct ib_header *hdr = &opa_hdr->ibh;
+ struct ib_other_headers *ohdr;
+ u16 lrh0 = HFI1_LRH_BTH;
+ u16 pkey;
+ u32 bth0, bth1;
+
+ opa_hdr->hdr_type = HFI1_PKT_TYPE_9B;
+ ohdr = &hdr->u.oth;
+ /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
+ *hwords = 6;
+
+ if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
+ *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
+ rdma_ah_read_grh(&qp->remote_ah_attr),
+ *hwords - 2, SIZE_OF_CRC);
+ ohdr = &hdr->u.l.oth;
+ lrh0 = HFI1_LRH_GRH;
+ }
+ /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
+ *pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
+
+ /* read pkey_index w/o lock (its atomic) */
+ pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
+
+ lrh0 |= (sc5 & IB_SC_MASK) << IB_SC_SHIFT |
+ (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) <<
+ IB_SL_SHIFT;
+
+ hfi1_make_ib_hdr(hdr, lrh0, *hwords + SIZE_OF_CRC,
+ opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
+ ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr));
+
+ bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
+ if (qp->s_mig_state == IB_MIG_MIGRATED)
+ bth0 |= IB_BTH_MIG_REQ;
+ bth1 = (!!is_fecn) << IB_BECN_SHIFT;
+ hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
+}
+
+static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp,
+ struct hfi1_opa_header *opa_hdr,
+ u8 sc5, bool is_fecn,
+ u64 *pbc_flags, u32 *hwords,
+ u32 *nwords)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_16b_header *hdr = &opa_hdr->opah;
+ struct ib_other_headers *ohdr;
+ u32 bth0, bth1;
+ u16 len, pkey;
+ u8 becn = !!is_fecn;
+ u8 l4 = OPA_16B_L4_IB_LOCAL;
+ u8 extra_bytes;
+
+ opa_hdr->hdr_type = HFI1_PKT_TYPE_16B;
+ ohdr = &hdr->u.oth;
+ /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */
+ *hwords = 8;
+ extra_bytes = hfi1_get_16b_padding(*hwords << 2, 0);
+ *nwords = SIZE_OF_CRC + ((extra_bytes + SIZE_OF_LT) >> 2);
+
+ if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
+ hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
+ *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
+ rdma_ah_read_grh(&qp->remote_ah_attr),
+ *hwords - 4, *nwords);
+ ohdr = &hdr->u.l.oth;
+ l4 = OPA_16B_L4_IB_GLOBAL;
+ }
+ *pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
+
+ /* read pkey_index w/o lock (its atomic) */
+ pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
+
+ /* Convert dwords to flits */
+ len = (*hwords + *nwords) >> 1;
+
+ hfi1_make_16b_hdr(hdr,
+ ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr),
+ opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
+ 16B),
+ len, pkey, becn, 0, l4, sc5);
+
+ bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
+ bth0 |= extra_bytes << 20;
+ if (qp->s_mig_state == IB_MIG_MIGRATED)
+ bth1 = OPA_BTH_MIG_REQ;
+ hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
+}
+
+typedef void (*hfi1_make_rc_ack)(struct rvt_qp *qp,
+ struct hfi1_opa_header *opa_hdr,
+ u8 sc5, bool is_fecn,
+ u64 *pbc_flags, u32 *hwords,
+ u32 *nwords);
+
+/* We support only two types - 9B and 16B for now */
+static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = {
+ [HFI1_PKT_TYPE_9B] = &hfi1_make_rc_ack_9B,
+ [HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B
+};
+
/**
* hfi1_send_rc_ack - Construct an ACK packet and send it
* @qp: a pointer to the QP
@@ -711,83 +877,48 @@ bail_no_tx:
* Note that RDMA reads and atomics are handled in the
* send side QP state and send engine.
*/
-void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
- int is_fecn)
+void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd,
+ struct rvt_qp *qp, bool is_fecn)
{
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
+ struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
u64 pbc, pbc_flags = 0;
- u16 lrh0;
- u16 sc5;
- u32 bth0;
- u32 hwords;
- u32 vl, plen;
- struct send_context *sc;
+ u32 hwords = 0;
+ u32 nwords = 0;
+ u32 plen;
struct pio_buf *pbuf;
- struct ib_header hdr;
- struct ib_other_headers *ohdr;
- unsigned long flags;
+ struct hfi1_opa_header opa_hdr;
/* clear the defer count */
qp->r_adefered = 0;
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
- if (qp->s_flags & RVT_S_RESP_PENDING)
- goto queue_ack;
+ if (qp->s_flags & RVT_S_RESP_PENDING) {
+ hfi1_queue_rc_ack(qp, is_fecn);
+ return;
+ }
/* Ensure s_rdma_ack_cnt changes are committed */
smp_read_barrier_depends();
- if (qp->s_rdma_ack_cnt)
- goto queue_ack;
-
- /* Construct the header */
- /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
- hwords = 6;
- if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
- hwords += hfi1_make_grh(ibp, &hdr.u.l.grh,
- rdma_ah_read_grh(&qp->remote_ah_attr),
- hwords, 0);
- ohdr = &hdr.u.l.oth;
- lrh0 = HFI1_LRH_GRH;
- } else {
- ohdr = &hdr.u.oth;
- lrh0 = HFI1_LRH_BTH;
+ if (qp->s_rdma_ack_cnt) {
+ hfi1_queue_rc_ack(qp, is_fecn);
+ return;
}
- /* read pkey_index w/o lock (its atomic) */
- bth0 = hfi1_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
- if (qp->s_mig_state == IB_MIG_MIGRATED)
- bth0 |= IB_BTH_MIG_REQ;
- if (qp->r_nak_state)
- ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
- (qp->r_nak_state <<
- IB_AETH_CREDIT_SHIFT));
- else
- ohdr->u.aeth = rvt_compute_aeth(qp);
- sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
- /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
- pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
- lrh0 |= (sc5 & 0xf) << 12 | (rdma_ah_get_sl(&qp->remote_ah_attr)
- & 0xf) << 4;
- hdr.lrh[0] = cpu_to_be16(lrh0);
- hdr.lrh[1] = cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
- hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
- hdr.lrh[3] = cpu_to_be16(ppd->lid |
- rdma_ah_get_path_bits(&qp->remote_ah_attr));
- ohdr->bth[0] = cpu_to_be32(bth0);
- ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
- ohdr->bth[1] |= cpu_to_be32((!!is_fecn) << IB_BECN_SHIFT);
- ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
/* Don't try to send ACKs if the link isn't ACTIVE */
if (driver_lstate(ppd) != IB_PORT_ACTIVE)
return;
- sc = rcd->sc;
- plen = 2 /* PBC */ + hwords;
- vl = sc_to_vlt(ppd->dd, sc5);
- pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
+ /* Make the appropriate header */
+ hfi1_make_rc_ack_tbl[priv->hdr_type](qp, &opa_hdr, sc5, is_fecn,
+ &pbc_flags, &hwords, &nwords);
- pbuf = sc_buffer_alloc(sc, plen, NULL, NULL);
+ plen = 2 /* PBC */ + hwords + nwords;
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps,
+ sc_to_vlt(ppd->dd, sc5), plen);
+ pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL);
if (!pbuf) {
/*
* We have no room to send at the moment. Pass
@@ -795,31 +926,18 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
* so that when enough buffer space becomes available,
* the ACK is sent ahead of other outgoing packets.
*/
- goto queue_ack;
+ hfi1_queue_rc_ack(qp, is_fecn);
+ return;
}
-
- trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr);
+ trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
+ &opa_hdr, ib_is_sc5(sc5));
/* write the pbc and data */
- ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords);
-
+ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
+ (priv->hdr_type == HFI1_PKT_TYPE_9B ?
+ (void *)&opa_hdr.ibh :
+ (void *)&opa_hdr.opah), hwords);
return;
-
-queue_ack:
- spin_lock_irqsave(&qp->s_lock, flags);
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
- goto unlock;
- this_cpu_inc(*ibp->rvp.rc_qacks);
- qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
- qp->s_nak_state = qp->r_nak_state;
- qp->s_ack_psn = qp->r_ack_psn;
- if (is_fecn)
- qp->s_flags |= RVT_S_ECN;
-
- /* Schedule the send engine. */
- hfi1_schedule_send(qp);
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
}
/**
@@ -984,10 +1102,13 @@ static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
/*
* This should be called with the QP s_lock held and interrupts disabled.
*/
-void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
+void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
{
struct ib_other_headers *ohdr;
+ struct hfi1_qp_priv *priv = qp->priv;
struct rvt_swqe *wqe;
+ struct ib_header *hdr = NULL;
+ struct hfi1_16b_header *hdr_16b = NULL;
u32 opcode;
u32 psn;
@@ -996,10 +1117,22 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
return;
/* Find out where the BTH is */
- if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
+ if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
+ hdr = &opah->ibh;
+ if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+ } else {
+ u8 l4;
+
+ hdr_16b = &opah->opah;
+ l4 = hfi1_16B_get_l4(hdr_16b);
+ if (l4 == OPA_16B_L4_IB_LOCAL)
+ ohdr = &hdr_16b->u.oth;
+ else
+ ohdr = &hdr_16b->u.l.oth;
+ }
opcode = ib_bth_get_opcode(ohdr);
if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
@@ -1009,7 +1142,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
return;
}
- psn = be32_to_cpu(ohdr->bth[2]);
+ psn = ib_bth_get_psn(ohdr);
reset_sending_psn(qp, psn);
/*
@@ -1399,36 +1532,34 @@ static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
/**
* rc_rcv_resp - process an incoming RC response packet
- * @ibp: the port this packet came in on
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @hdrsize: the header length
- * @pmtu: the path MTU
+ * @packet: data packet information
*
* This is called from hfi1_rc_rcv() to process an incoming RC response
* packet for the given QP.
* Called at interrupt level.
*/
-static void rc_rcv_resp(struct hfi1_ibport *ibp,
- struct ib_other_headers *ohdr,
- void *data, u32 tlen, struct rvt_qp *qp,
- u32 opcode, u32 psn, u32 hdrsize, u32 pmtu,
- struct hfi1_ctxtdata *rcd)
+static void rc_rcv_resp(struct hfi1_packet *packet)
{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+ void *data = packet->payload;
+ u32 tlen = packet->tlen;
+ struct rvt_qp *qp = packet->qp;
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct ib_other_headers *ohdr = packet->ohdr;
struct rvt_swqe *wqe;
enum ib_wc_status status;
unsigned long flags;
int diff;
- u32 pad;
- u32 aeth;
u64 val;
+ u32 aeth;
+ u32 psn = ib_bth_get_psn(packet->ohdr);
+ u32 pmtu = qp->pmtu;
+ u16 hdrsize = packet->hlen;
+ u8 opcode = packet->opcode;
+ u8 pad = packet->pad;
+ u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
spin_lock_irqsave(&qp->s_lock, flags);
-
trace_hfi1_ack(qp, psn);
/* Ignore invalid responses. */
@@ -1494,7 +1625,7 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
goto ack_op_err;
read_middle:
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
goto ack_len_err;
if (unlikely(pmtu >= qp->s_rdma_read_len))
goto ack_len_err;
@@ -1526,13 +1657,11 @@ read_middle:
aeth = be32_to_cpu(ohdr->u.aeth);
if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
goto ack_done;
- /* Get the number of bytes the message was padded by. */
- pad = ib_bth_get_pad(ohdr);
/*
* Check that the data size is >= 0 && <= pmtu.
* Remember to account for ICRC (4).
*/
- if (unlikely(tlen < (hdrsize + pad + 4)))
+ if (unlikely(tlen < (hdrsize + extra_bytes)))
goto ack_len_err;
/*
* If this is a response to a resent RDMA read, we
@@ -1550,16 +1679,14 @@ read_middle:
goto ack_seq_err;
if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
goto ack_op_err;
- /* Get the number of bytes the message was padded by. */
- pad = ib_bth_get_pad(ohdr);
/*
* Check that the data size is >= 1 && <= pmtu.
* Remember to account for ICRC (4).
*/
- if (unlikely(tlen <= (hdrsize + pad + 4)))
+ if (unlikely(tlen <= (hdrsize + extra_bytes)))
goto ack_len_err;
read_last:
- tlen -= hdrsize + pad + 4;
+ tlen -= hdrsize + extra_bytes;
if (unlikely(tlen != qp->s_rdma_read_len))
goto ack_len_err;
aeth = be32_to_cpu(ohdr->u.aeth);
@@ -1844,7 +1971,7 @@ static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
}
-void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
+void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
u32 rqpn, u8 svc_type)
{
struct cca_timer *cca_timer;
@@ -1901,12 +2028,7 @@ void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
/**
* hfi1_rc_rcv - process an incoming RC packet
- * @rcd: the context pointer
- * @hdr: the header of this packet
- * @rcv_flags: flags relevant to rcv processing
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
+ * @packet: data packet information
*
* This is called from qp_rcv() to process an incoming RC packet
* for the given QP.
@@ -1915,17 +2037,16 @@ void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
void hfi1_rc_rcv(struct hfi1_packet *packet)
{
struct hfi1_ctxtdata *rcd = packet->rcd;
- struct ib_header *hdr = packet->hdr;
- u32 rcv_flags = packet->rcv_flags;
- void *data = packet->ebuf;
+ void *data = packet->payload;
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
struct ib_other_headers *ohdr = packet->ohdr;
- u32 bth0, opcode;
+ u32 bth0 = be32_to_cpu(ohdr->bth[0]);
+ u32 opcode = packet->opcode;
u32 hdrsize = packet->hlen;
- u32 psn;
- u32 pad;
+ u32 psn = ib_bth_get_psn(packet->ohdr);
+ u32 pad = packet->pad;
struct ib_wc wc;
u32 pmtu = qp->pmtu;
int diff;
@@ -1935,17 +2056,15 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
bool is_fecn = false;
bool copy_last = false;
u32 rkey;
+ u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
lockdep_assert_held(&qp->r_lock);
- bth0 = be32_to_cpu(ohdr->bth[0]);
- if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
+
+ if (hfi1_ruc_check_hdr(ibp, packet))
return;
is_fecn = process_ecn(qp, packet, false);
- psn = be32_to_cpu(ohdr->bth[2]);
- opcode = ib_bth_get_opcode(ohdr);
-
/*
* Process responses (ACKs) before anything else. Note that the
* packet sequence number will be for something in the send work
@@ -1954,8 +2073,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
*/
if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
- rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
- hdrsize, pmtu, rcd);
+ rc_rcv_resp(packet);
if (is_fecn)
goto send_ack;
return;
@@ -2022,7 +2140,12 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
case OP(RDMA_WRITE_MIDDLE):
send_middle:
/* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ /*
+ * There will be no padding for 9B packet but 16B packets
+ * will come in with some padding since we always add
+ * CRC and LT bytes which will need to be flit aligned
+ */
+ if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
goto nack_inv;
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
@@ -2074,14 +2197,12 @@ no_immediate_data:
wc.wc_flags = 0;
wc.ex.imm_data = 0;
send_last:
- /* Get the number of bytes the message was padded by. */
- pad = ib_bth_get_pad(ohdr);
/* Check for invalid length. */
/* LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
+ if (unlikely(tlen < (hdrsize + extra_bytes)))
goto nack_inv;
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
+ /* Don't count the CRC(and padding and LT byte for 16B). */
+ tlen -= (hdrsize + extra_bytes);
wc.byte_len = tlen + qp->r_rcv_len;
if (unlikely(wc.byte_len > qp->r_len))
goto nack_inv;
@@ -2368,28 +2489,19 @@ send_ack:
void hfi1_rc_hdrerr(
struct hfi1_ctxtdata *rcd,
- struct ib_header *hdr,
- u32 rcv_flags,
+ struct hfi1_packet *packet,
struct rvt_qp *qp)
{
- int has_grh = rcv_flags & HFI1_HAS_GRH;
- struct ib_other_headers *ohdr;
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
int diff;
u32 opcode;
- u32 psn, bth0;
-
- /* Check for GRH */
- ohdr = &hdr->u.oth;
- if (has_grh)
- ohdr = &hdr->u.l.oth;
+ u32 psn;
- bth0 = be32_to_cpu(ohdr->bth[0]);
- if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0))
+ if (hfi1_ruc_check_hdr(ibp, packet))
return;
- psn = be32_to_cpu(ohdr->bth[2]);
- opcode = ib_bth_get_opcode(ohdr);
+ psn = ib_bth_get_psn(packet->ohdr);
+ opcode = ib_bth_get_opcode(packet->ohdr);
/* Only deal with RDMA Writes for now */
if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 3a17daba28a9..b3291f0fde9a 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -74,8 +74,10 @@ static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
if (wqe->sg_list[i].length == 0)
continue;
/* Check LKEY */
- if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
- &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
+ ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
+ NULL, &wqe->sg_list[i],
+ IB_ACCESS_LOCAL_WRITE);
+ if (unlikely(ret <= 0))
goto bad_lkey;
qp->r_len += wqe->sg_list[i].length;
j++;
@@ -214,100 +216,104 @@ static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
*
* The s_lock will be acquired around the hfi1_migrate_qp() call.
*/
-int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct ib_header *hdr,
- int has_grh, struct rvt_qp *qp, u32 bth0)
+int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
{
__be64 guid;
unsigned long flags;
+ struct rvt_qp *qp = packet->qp;
u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
+ u32 dlid = packet->dlid;
+ u32 slid = packet->slid;
+ u32 sl = packet->sl;
+ int migrated;
+ u32 bth0, bth1;
+ u16 pkey;
+
+ bth0 = be32_to_cpu(packet->ohdr->bth[0]);
+ bth1 = be32_to_cpu(packet->ohdr->bth[1]);
+ if (packet->etype == RHF_RCV_TYPE_BYPASS) {
+ pkey = hfi1_16B_get_pkey(packet->hdr);
+ migrated = bth1 & OPA_BTH_MIG_REQ;
+ } else {
+ pkey = ib_bth_get_pkey(packet->ohdr);
+ migrated = bth0 & IB_BTH_MIG_REQ;
+ }
- if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
- if (!has_grh) {
- if (rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
- IB_AH_GRH)
- goto err;
+ if (qp->s_mig_state == IB_MIG_ARMED && migrated) {
+ if (!packet->grh) {
+ if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
+ IB_AH_GRH) &&
+ (packet->etype != RHF_RCV_TYPE_BYPASS))
+ return 1;
} else {
const struct ib_global_route *grh;
if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
IB_AH_GRH))
- goto err;
+ return 1;
grh = rdma_ah_read_grh(&qp->alt_ah_attr);
guid = get_sguid(ibp, grh->sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
+ if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
guid))
- goto err;
+ return 1;
if (!gid_ok(
- &hdr->u.l.grh.sgid,
+ &packet->grh->sgid,
grh->dgid.global.subnet_prefix,
grh->dgid.global.interface_id))
- goto err;
+ return 1;
}
- if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, sc5,
- ib_get_slid(hdr)))) {
- hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
- (u16)bth0,
- ib_get_sl(hdr),
- 0, qp->ibqp.qp_num,
- ib_get_slid(hdr),
- ib_get_dlid(hdr));
- goto err;
+ if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
+ sc5, slid))) {
+ hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
+ slid, dlid);
+ return 1;
}
/* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
- if (ib_get_slid(hdr) !=
- rdma_ah_get_dlid(&qp->alt_ah_attr) ||
+ if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) ||
ppd_from_ibp(ibp)->port !=
rdma_ah_get_port_num(&qp->alt_ah_attr))
- goto err;
+ return 1;
spin_lock_irqsave(&qp->s_lock, flags);
hfi1_migrate_qp(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
} else {
- if (!has_grh) {
- if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
- IB_AH_GRH)
- goto err;
+ if (!packet->grh) {
+ if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
+ IB_AH_GRH) &&
+ (packet->etype != RHF_RCV_TYPE_BYPASS))
+ return 1;
} else {
const struct ib_global_route *grh;
if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
IB_AH_GRH))
- goto err;
+ return 1;
grh = rdma_ah_read_grh(&qp->remote_ah_attr);
guid = get_sguid(ibp, grh->sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
+ if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
guid))
- goto err;
+ return 1;
if (!gid_ok(
- &hdr->u.l.grh.sgid,
+ &packet->grh->sgid,
grh->dgid.global.subnet_prefix,
grh->dgid.global.interface_id))
- goto err;
+ return 1;
}
- if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0, sc5,
- ib_get_slid(hdr)))) {
- hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
- (u16)bth0,
- ib_get_sl(hdr),
- 0, qp->ibqp.qp_num,
- ib_get_slid(hdr),
- ib_get_dlid(hdr));
- goto err;
+ if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
+ sc5, slid))) {
+ hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
+ slid, dlid);
+ return 1;
}
/* Validate the SLID. See Ch. 9.6.1.5 */
- if (ib_get_slid(hdr) !=
- rdma_ah_get_dlid(&qp->remote_ah_attr) ||
+ if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) ||
ppd_from_ibp(ibp)->port != qp->port_num)
- goto err;
- if (qp->s_mig_state == IB_MIG_REARM &&
- !(bth0 & IB_BTH_MIG_REQ))
+ return 1;
+ if (qp->s_mig_state == IB_MIG_REARM && !migrated)
qp->s_mig_state = IB_MIG_ARMED;
}
return 0;
-
-err:
- return 1;
}
/**
@@ -643,7 +649,7 @@ done:
* @ibp: a pointer to the IB port
* @hdr: a pointer to the GRH header being constructed
* @grh: the global route address to send to
- * @hwords: the number of 32 bit words of header being sent
+ * @hwords: size of header after grh being sent in dwords
* @nwords: the number of 32 bit words of data being sent
*
* Return the size of the header in 32 bit words.
@@ -655,7 +661,7 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
(grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
(grh->flow_label << IB_GRH_FLOW_SHIFT));
- hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
+ hdr->paylen = cpu_to_be16((hwords + nwords) << 2);
/* next_hdr is defined by C8-7 in ch. 8.4.1 */
hdr->next_hdr = IB_GRH_NEXT_HDR;
hdr->hop_limit = grh->hop_limit;
@@ -671,7 +677,8 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
return sizeof(struct ib_grh) / sizeof(u32);
}
-#define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, hdr.u.oth.bth[2]) / 4)
+#define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, \
+ hdr.ibh.u.oth.bth[2]) / 4)
/**
* build_ahg - create ahg in s_ahg
@@ -728,73 +735,186 @@ static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
}
}
-void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
- u32 bth0, u32 bth2, int middle,
- struct hfi1_pkt_state *ps)
+static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
+ struct ib_other_headers *ohdr,
+ u32 bth0, u32 bth1, u32 bth2)
+{
+ bth1 |= qp->remote_qpn;
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(bth1);
+ ohdr->bth[2] = cpu_to_be32(bth2);
+}
+
+static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
+ struct ib_other_headers *ohdr,
+ u32 bth0, u32 bth2, int middle,
+ struct hfi1_pkt_state *ps)
{
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ibport *ibp = ps->ibp;
- u16 lrh0;
- u32 nwords;
- u32 extra_bytes;
- u32 bth1;
-
- /* Construct the header. */
- extra_bytes = -ps->s_txreq->s_cur_size & 3;
- nwords = (ps->s_txreq->s_cur_size + extra_bytes) >> 2;
- lrh0 = HFI1_LRH_BTH;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 bth1 = 0;
+ u32 slid;
+ u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
+ u8 l4 = OPA_16B_L4_IB_LOCAL;
+ u8 extra_bytes = hfi1_get_16b_padding((qp->s_hdrwords << 2),
+ ps->s_txreq->s_cur_size);
+ u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
+ extra_bytes + SIZE_OF_LT) >> 2);
+ u8 becn = 0;
+
+ if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
+ hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
+ struct ib_grh *grh;
+ struct ib_global_route *grd =
+ rdma_ah_retrieve_grh(&qp->remote_ah_attr);
+ int hdrwords;
+
+ /*
+ * Ensure OPA GIDs are transformed to IB gids
+ * before creating the GRH.
+ */
+ if (grd->sgid_index == OPA_GID_INDEX)
+ grd->sgid_index = 0;
+ grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
+ l4 = OPA_16B_L4_IB_GLOBAL;
+ hdrwords = qp->s_hdrwords - 4;
+ qp->s_hdrwords += hfi1_make_grh(ibp, grh, grd,
+ hdrwords, nwords);
+ middle = 0;
+ }
+
+ if (qp->s_mig_state == IB_MIG_MIGRATED)
+ bth1 |= OPA_BTH_MIG_REQ;
+ else
+ middle = 0;
+
+ if (middle)
+ build_ahg(qp, bth2);
+ else
+ qp->s_flags &= ~RVT_S_AHG_VALID;
+
+ bth0 |= pkey;
+ bth0 |= extra_bytes << 20;
+ if (qp->s_flags & RVT_S_ECN) {
+ qp->s_flags &= ~RVT_S_ECN;
+ /* we recently received a FECN, so return a BECN */
+ becn = 1;
+ }
+ hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
+
+ if (!ppd->lid)
+ slid = be32_to_cpu(OPA_LID_PERMISSIVE);
+ else
+ slid = ppd->lid |
+ (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
+ ((1 << ppd->lmc) - 1));
+
+ hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
+ slid,
+ opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
+ 16B),
+ (qp->s_hdrwords + nwords) >> 1,
+ pkey, becn, 0, l4, priv->s_sc);
+}
+
+static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
+ struct ib_other_headers *ohdr,
+ u32 bth0, u32 bth2, int middle,
+ struct hfi1_pkt_state *ps)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ibport *ibp = ps->ibp;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 bth1 = 0;
+ u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
+ u16 lrh0 = HFI1_LRH_BTH;
+ u16 slid;
+ u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
+ u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
+ extra_bytes) >> 2);
+
if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
+ struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
+ int hdrwords = qp->s_hdrwords - 2;
+
+ lrh0 = HFI1_LRH_GRH;
qp->s_hdrwords +=
- hfi1_make_grh(ibp,
- &ps->s_txreq->phdr.hdr.u.l.grh,
+ hfi1_make_grh(ibp, grh,
rdma_ah_read_grh(&qp->remote_ah_attr),
- qp->s_hdrwords, nwords);
- lrh0 = HFI1_LRH_GRH;
+ hdrwords, nwords);
middle = 0;
}
lrh0 |= (priv->s_sc & 0xf) << 12 |
(rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4;
- /*
- * reset s_ahg/AHG fields
- *
- * This insures that the ahgentry/ahgcount
- * are at a non-AHG default to protect
- * build_verbs_tx_desc() from using
- * an include ahgidx.
- *
- * build_ahg() will modify as appropriate
- * to use the AHG feature.
- */
- priv->s_ahg->tx_flags = 0;
- priv->s_ahg->ahgcount = 0;
- priv->s_ahg->ahgidx = 0;
+
if (qp->s_mig_state == IB_MIG_MIGRATED)
bth0 |= IB_BTH_MIG_REQ;
else
middle = 0;
+
if (middle)
build_ahg(qp, bth2);
else
qp->s_flags &= ~RVT_S_AHG_VALID;
- ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0);
- ps->s_txreq->phdr.hdr.lrh[1] =
- cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
- ps->s_txreq->phdr.hdr.lrh[2] =
- cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- ps->s_txreq->phdr.hdr.lrh[3] =
- cpu_to_be16(ppd_from_ibp(ibp)->lid |
- rdma_ah_get_path_bits(&qp->remote_ah_attr));
- bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
+
+ bth0 |= pkey;
bth0 |= extra_bytes << 20;
- ohdr->bth[0] = cpu_to_be32(bth0);
- bth1 = qp->remote_qpn;
if (qp->s_flags & RVT_S_ECN) {
qp->s_flags &= ~RVT_S_ECN;
/* we recently received a FECN, so return a BECN */
bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
}
- ohdr->bth[1] = cpu_to_be32(bth1);
- ohdr->bth[2] = cpu_to_be32(bth2);
+ hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
+
+ if (!ppd->lid)
+ slid = be16_to_cpu(IB_LID_PERMISSIVE);
+ else
+ slid = ppd->lid |
+ (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
+ ((1 << ppd->lmc) - 1));
+ hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
+ lrh0,
+ qp->s_hdrwords + nwords,
+ opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
+ ppd_from_ibp(ibp)->lid |
+ rdma_ah_get_path_bits(&qp->remote_ah_attr));
+}
+
+typedef void (*hfi1_make_ruc_hdr)(struct rvt_qp *qp,
+ struct ib_other_headers *ohdr,
+ u32 bth0, u32 bth2, int middle,
+ struct hfi1_pkt_state *ps);
+
+/* We support only two types - 9B and 16B for now */
+static const hfi1_make_ruc_hdr hfi1_ruc_header_tbl[2] = {
+ [HFI1_PKT_TYPE_9B] = &hfi1_make_ruc_header_9B,
+ [HFI1_PKT_TYPE_16B] = &hfi1_make_ruc_header_16B
+};
+
+void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
+ u32 bth0, u32 bth2, int middle,
+ struct hfi1_pkt_state *ps)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ /*
+ * reset s_ahg/AHG fields
+ *
+ * This insures that the ahgentry/ahgcount
+ * are at a non-AHG default to protect
+ * build_verbs_tx_desc() from using
+ * an include ahgidx.
+ *
+ * build_ahg() will modify as appropriate
+ * to use the AHG feature.
+ */
+ priv->s_ahg->tx_flags = 0;
+ priv->s_ahg->ahgcount = 0;
+ priv->s_ahg->ahgidx = 0;
+
+ /* Make the appropriate header */
+ hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth2, middle, ps);
}
/* when sending, force a reschedule every one of these periods */
@@ -816,6 +936,8 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
static bool schedule_send_yield(struct rvt_qp *qp,
struct hfi1_pkt_state *ps)
{
+ ps->pkts_sent = true;
+
if (unlikely(time_after(jiffies, ps->timeout))) {
if (!ps->in_thread ||
workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
@@ -912,6 +1034,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
ps.timeout = jiffies + ps.timeout_int;
ps.cpu = priv->s_sde ? priv->s_sde->cpu :
cpumask_first(cpumask_of_node(ps.ppd->dd->node));
+ ps.pkts_sent = false;
/* insure a pre-built packet is handled */
ps.s_txreq = get_waiting_verbs_txreq(qp);
@@ -934,7 +1057,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
spin_lock_irqsave(&qp->s_lock, ps.flags);
}
} while (make_req(qp, &ps));
-
+ iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
spin_unlock_irqrestore(&qp->s_lock, ps.flags);
}
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index bfd0d5187e9b..6781bcdb10b3 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -246,7 +246,7 @@ static void __sdma_process_event(
enum sdma_events event);
static void dump_sdma_state(struct sdma_engine *sde);
static void sdma_make_progress(struct sdma_engine *sde, u64 status);
-static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail);
+static void sdma_desc_avail(struct sdma_engine *sde, uint avail);
static void sdma_flush_descq(struct sdma_engine *sde);
/**
@@ -325,7 +325,7 @@ static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
/* timed out - bounce the link */
dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
__func__, sde->this_idx, (u32)reg);
- queue_work(dd->pport->hfi1_wq,
+ queue_work(dd->pport->link_wq,
&dd->pport->link_bounce_work);
break;
}
@@ -1340,10 +1340,8 @@ static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
* @dd: hfi1_devdata
* @port: port number (currently only zero)
*
- * sdma_init initializes the specified number of engines.
- *
- * The code initializes each sde, its csrs. Interrupts
- * are not required to be enabled.
+ * Initializes each sde and its csrs.
+ * Interrupts are not required to be enabled.
*
* Returns:
* 0 - success, -errno on failure
@@ -1764,13 +1762,14 @@ retry:
*
* This is called with head_lock held.
*/
-static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail)
+static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
{
struct iowait *wait, *nw;
struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
- unsigned i, n = 0, seq;
+ uint i, n = 0, seq, max_idx = 0;
struct sdma_txreq *stx;
struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
+ u8 max_starved_cnt = 0;
#ifdef CONFIG_SDMA_VERBOSITY
dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
@@ -1805,6 +1804,9 @@ static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail)
if (num_desc > avail)
break;
avail -= num_desc;
+ /* Find the most starved wait memeber */
+ iowait_starve_find_max(wait, &max_starved_cnt,
+ n, &max_idx);
list_del_init(&wait->list);
waits[n++] = wait;
}
@@ -1813,8 +1815,13 @@ static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail)
}
} while (read_seqretry(&dev->iowait_lock, seq));
+ /* Schedule the most starved one first */
+ if (n)
+ waits[max_idx]->wakeup(waits[max_idx], SDMA_AVAIL_REASON);
+
for (i = 0; i < n; i++)
- waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
+ if (i != max_idx)
+ waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
}
/* head_lock must be held */
@@ -2351,7 +2358,8 @@ static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
static int sdma_check_progress(
struct sdma_engine *sde,
struct iowait *wait,
- struct sdma_txreq *tx)
+ struct sdma_txreq *tx,
+ bool pkts_sent)
{
int ret;
@@ -2364,7 +2372,7 @@ static int sdma_check_progress(
seq = raw_seqcount_begin(
(const seqcount_t *)&sde->head_lock.seqcount);
- ret = wait->sleep(sde, wait, tx, seq);
+ ret = wait->sleep(sde, wait, tx, seq, pkts_sent);
if (ret == -EAGAIN)
sde->desc_avail = sdma_descq_freecnt(sde);
} else {
@@ -2378,6 +2386,7 @@ static int sdma_check_progress(
* @sde: sdma engine to use
* @wait: wait structure to use when full (may be NULL)
* @tx: sdma_txreq to submit
+ * @pkts_sent: has any packet been sent yet?
*
* The call submits the tx into the ring. If a iowait structure is non-NULL
* the packet will be queued to the list in wait.
@@ -2389,7 +2398,8 @@ static int sdma_check_progress(
*/
int sdma_send_txreq(struct sdma_engine *sde,
struct iowait *wait,
- struct sdma_txreq *tx)
+ struct sdma_txreq *tx,
+ bool pkts_sent)
{
int ret = 0;
u16 tail;
@@ -2431,7 +2441,7 @@ unlock_noconn:
ret = -ECOMM;
goto unlock;
nodesc:
- ret = sdma_check_progress(sde, wait, tx);
+ ret = sdma_check_progress(sde, wait, tx, pkts_sent);
if (ret == -EAGAIN) {
ret = 0;
goto retry;
@@ -2500,8 +2510,10 @@ retry:
}
update_tail:
total_count = submit_count + flush_count;
- if (wait)
+ if (wait) {
iowait_sdma_add(wait, total_count);
+ iowait_starve_clear(submit_count > 0, wait);
+ }
if (tail != INVALID_TAIL)
sdma_update_tail(sde, tail);
spin_unlock_irqrestore(&sde->tail_lock, flags);
@@ -2529,7 +2541,7 @@ unlock_noconn:
ret = -ECOMM;
goto update_tail;
nodesc:
- ret = sdma_check_progress(sde, wait, tx);
+ ret = sdma_check_progress(sde, wait, tx, submit_count > 0);
if (ret == -EAGAIN) {
ret = 0;
goto retry;
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 64f10b8b5db8..107011d8613b 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -852,7 +852,8 @@ struct iowait;
int sdma_send_txreq(struct sdma_engine *sde,
struct iowait *wait,
- struct sdma_txreq *tx);
+ struct sdma_txreq *tx,
+ bool pkts_sent);
int sdma_send_txlist(struct sdma_engine *sde,
struct iowait *wait,
struct list_head *tx_list,
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 2f3bbcac1e34..6d2702ef34ac 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -95,7 +95,7 @@ static void port_release(struct kobject *kobj)
/* nothing to do since memory is freed by hfi1_free_devdata() */
}
-static struct bin_attribute cc_table_bin_attr = {
+static const struct bin_attribute cc_table_bin_attr = {
.attr = {.name = "cc_table_bin", .mode = 0444},
.read = read_cc_table_bin,
.size = PAGE_SIZE,
@@ -137,7 +137,7 @@ static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute cc_setting_bin_attr = {
+static const struct bin_attribute cc_setting_bin_attr = {
.attr = {.name = "cc_settings_bin", .mode = 0444},
.read = read_cc_setting_bin,
.size = PAGE_SIZE,
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index eafae487face..9938bb983ce6 100644
--- a/drivers/infiniband/hw/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -47,7 +47,7 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-u8 ibhdr_exhdr_len(struct ib_header *hdr)
+static u8 __get_ib_hdr_len(struct ib_header *hdr)
{
struct ib_other_headers *ohdr;
u8 opcode;
@@ -61,13 +61,69 @@ u8 ibhdr_exhdr_len(struct ib_header *hdr)
0 : hdr_len_by_opcode[opcode] - (12 + 8);
}
-#define IMM_PRN "imm %d"
-#define RETH_PRN "reth vaddr 0x%.16llx rkey 0x%.8x dlen 0x%.8x"
-#define AETH_PRN "aeth syn 0x%.2x %s msn 0x%.8x"
-#define DETH_PRN "deth qkey 0x%.8x sqpn 0x%.6x"
-#define IETH_PRN "ieth rkey 0x%.8x"
-#define ATOMICACKETH_PRN "origdata %llx"
-#define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %llx cdata %llx"
+static u8 __get_16b_hdr_len(struct hfi1_16b_header *hdr)
+{
+ struct ib_other_headers *ohdr;
+ u8 opcode;
+
+ if (hfi1_16B_get_l4(hdr) == OPA_16B_L4_IB_LOCAL)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+ opcode = ib_bth_get_opcode(ohdr);
+ return hdr_len_by_opcode[opcode] == 0 ?
+ 0 : hdr_len_by_opcode[opcode] - (12 + 8 + 8);
+}
+
+u8 hfi1_trace_packet_hdr_len(struct hfi1_packet *packet)
+{
+ if (packet->etype != RHF_RCV_TYPE_BYPASS)
+ return __get_ib_hdr_len(packet->hdr);
+ else
+ return __get_16b_hdr_len(packet->hdr);
+}
+
+u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opa_hdr)
+{
+ if (!opa_hdr->hdr_type)
+ return __get_ib_hdr_len(&opa_hdr->ibh);
+ else
+ return __get_16b_hdr_len(&opa_hdr->opah);
+}
+
+const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet)
+{
+ if (packet->etype != RHF_RCV_TYPE_BYPASS)
+ return "IB";
+
+ switch (hfi1_16B_get_l2(packet->hdr)) {
+ case 0:
+ return "0";
+ case 1:
+ return "1";
+ case 2:
+ return "16B";
+ case 3:
+ return "9B";
+ }
+ return "";
+}
+
+const char *hfi1_trace_get_packet_type_str(u8 l4)
+{
+ if (l4)
+ return "16B";
+ else
+ return "9B";
+}
+
+#define IMM_PRN "imm:%d"
+#define RETH_PRN "reth vaddr:0x%.16llx rkey:0x%.8x dlen:0x%.8x"
+#define AETH_PRN "aeth syn:0x%.2x %s msn:0x%.8x"
+#define DETH_PRN "deth qkey:0x%.8x sqpn:0x%.6x"
+#define IETH_PRN "ieth rkey:0x%.8x"
+#define ATOMICACKETH_PRN "origdata:%llx"
+#define ATOMICETH_PRN "vaddr:0x%llx rkey:0x%.8x sdata:%llx cdata:%llx"
#define OP(transport, op) IB_OPCODE_## transport ## _ ## op
@@ -84,6 +140,125 @@ static const char *parse_syndrome(u8 syndrome)
return "";
}
+void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
+ u8 *ack, u8 *becn, u8 *fecn, u8 *mig,
+ u8 *se, u8 *pad, u8 *opcode, u8 *tver,
+ u16 *pkey, u32 *psn, u32 *qpn)
+{
+ *ack = ib_bth_get_ackreq(ohdr);
+ *becn = ib_bth_get_becn(ohdr);
+ *fecn = ib_bth_get_fecn(ohdr);
+ *mig = ib_bth_get_migreq(ohdr);
+ *se = ib_bth_get_se(ohdr);
+ *pad = ib_bth_get_pad(ohdr);
+ *opcode = ib_bth_get_opcode(ohdr);
+ *tver = ib_bth_get_tver(ohdr);
+ *pkey = ib_bth_get_pkey(ohdr);
+ *psn = ib_bth_get_psn(ohdr);
+ *qpn = ib_bth_get_qpn(ohdr);
+}
+
+void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr,
+ u8 *ack, u8 *mig, u8 *opcode,
+ u8 *pad, u8 *se, u8 *tver,
+ u32 *psn, u32 *qpn)
+{
+ *ack = ib_bth_get_ackreq(ohdr);
+ *mig = ib_bth_get_migreq(ohdr);
+ *opcode = ib_bth_get_opcode(ohdr);
+ *pad = ib_bth_get_pad(ohdr);
+ *se = ib_bth_get_se(ohdr);
+ *tver = ib_bth_get_tver(ohdr);
+ *psn = ib_bth_get_psn(ohdr);
+ *qpn = ib_bth_get_qpn(ohdr);
+}
+
+void hfi1_trace_parse_9b_hdr(struct ib_header *hdr, bool sc5,
+ u8 *lnh, u8 *lver, u8 *sl, u8 *sc,
+ u16 *len, u32 *dlid, u32 *slid)
+{
+ *lnh = ib_get_lnh(hdr);
+ *lver = ib_get_lver(hdr);
+ *sl = ib_get_sl(hdr);
+ *sc = ib_get_sc(hdr) | (sc5 << 4);
+ *len = ib_get_len(hdr);
+ *dlid = ib_get_dlid(hdr);
+ *slid = ib_get_slid(hdr);
+}
+
+void hfi1_trace_parse_16b_hdr(struct hfi1_16b_header *hdr,
+ u8 *age, u8 *becn, u8 *fecn,
+ u8 *l4, u8 *rc, u8 *sc,
+ u16 *entropy, u16 *len, u16 *pkey,
+ u32 *dlid, u32 *slid)
+{
+ *age = hfi1_16B_get_age(hdr);
+ *becn = hfi1_16B_get_becn(hdr);
+ *fecn = hfi1_16B_get_fecn(hdr);
+ *l4 = hfi1_16B_get_l4(hdr);
+ *rc = hfi1_16B_get_rc(hdr);
+ *sc = hfi1_16B_get_sc(hdr);
+ *entropy = hfi1_16B_get_entropy(hdr);
+ *len = hfi1_16B_get_len(hdr);
+ *pkey = hfi1_16B_get_pkey(hdr);
+ *dlid = hfi1_16B_get_dlid(hdr);
+ *slid = hfi1_16B_get_slid(hdr);
+}
+
+#define LRH_PRN "len:%d sc:%d dlid:0x%.4x slid:0x%.4x "
+#define LRH_9B_PRN "lnh:%d,%s lver:%d sl:%d"
+#define LRH_16B_PRN "age:%d becn:%d fecn:%d l4:%d " \
+ "rc:%d sc:%d pkey:0x%.4x entropy:0x%.4x"
+const char *hfi1_trace_fmt_lrh(struct trace_seq *p, bool bypass,
+ u8 age, u8 becn, u8 fecn, u8 l4,
+ u8 lnh, const char *lnh_name, u8 lver,
+ u8 rc, u8 sc, u8 sl, u16 entropy,
+ u16 len, u16 pkey, u32 dlid, u32 slid)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, LRH_PRN, len, sc, dlid, slid);
+
+ if (bypass)
+ trace_seq_printf(p, LRH_16B_PRN,
+ age, becn, fecn, l4, rc, sc, pkey, entropy);
+
+ else
+ trace_seq_printf(p, LRH_9B_PRN,
+ lnh, lnh_name, lver, sl);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+#define BTH_9B_PRN \
+ "op:0x%.2x,%s se:%d m:%d pad:%d tver:%d pkey:0x%.4x " \
+ "f:%d b:%d qpn:0x%.6x a:%d psn:0x%.8x"
+#define BTH_16B_PRN \
+ "op:0x%.2x,%s se:%d m:%d pad:%d tver:%d " \
+ "qpn:0x%.6x a:%d psn:0x%.8x"
+const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
+ u8 ack, u8 becn, u8 fecn, u8 mig,
+ u8 se, u8 pad, u8 opcode, const char *opname,
+ u8 tver, u16 pkey, u32 psn, u32 qpn)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ if (bypass)
+ trace_seq_printf(p, BTH_16B_PRN,
+ opcode, opname,
+ se, mig, pad, tver, qpn, ack, psn);
+
+ else
+ trace_seq_printf(p, BTH_9B_PRN,
+ opcode, opname,
+ se, mig, pad, tver, pkey, fecn, becn,
+ qpn, ack, psn);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
const char *parse_everbs_hdrs(
struct trace_seq *p,
u8 opcode,
diff --git a/drivers/infiniband/hw/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h
index 92dc88f013c9..af50c0793450 100644
--- a/drivers/infiniband/hw/hfi1/trace.h
+++ b/drivers/infiniband/hw/hfi1/trace.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -51,3 +51,4 @@
#include "trace_rc.h"
#include "trace_rx.h"
#include "trace_tx.h"
+#include "trace_mmu.h"
diff --git a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
index 090f6b506953..6721f84dafa5 100644
--- a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
+++ b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h
@@ -55,8 +55,79 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_ibhdrs
+#define ib_opcode_name(opcode) { IB_OPCODE_##opcode, #opcode }
+#define show_ib_opcode(opcode) \
+__print_symbolic(opcode, \
+ ib_opcode_name(RC_SEND_FIRST), \
+ ib_opcode_name(RC_SEND_MIDDLE), \
+ ib_opcode_name(RC_SEND_LAST), \
+ ib_opcode_name(RC_SEND_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_SEND_ONLY), \
+ ib_opcode_name(RC_SEND_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_RDMA_WRITE_FIRST), \
+ ib_opcode_name(RC_RDMA_WRITE_MIDDLE), \
+ ib_opcode_name(RC_RDMA_WRITE_LAST), \
+ ib_opcode_name(RC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_RDMA_WRITE_ONLY), \
+ ib_opcode_name(RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(RC_RDMA_READ_REQUEST), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_FIRST), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_MIDDLE), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_LAST), \
+ ib_opcode_name(RC_RDMA_READ_RESPONSE_ONLY), \
+ ib_opcode_name(RC_ACKNOWLEDGE), \
+ ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
+ ib_opcode_name(RC_COMPARE_SWAP), \
+ ib_opcode_name(RC_FETCH_ADD), \
+ ib_opcode_name(UC_SEND_FIRST), \
+ ib_opcode_name(UC_SEND_MIDDLE), \
+ ib_opcode_name(UC_SEND_LAST), \
+ ib_opcode_name(UC_SEND_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(UC_SEND_ONLY), \
+ ib_opcode_name(UC_SEND_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(UC_RDMA_WRITE_FIRST), \
+ ib_opcode_name(UC_RDMA_WRITE_MIDDLE), \
+ ib_opcode_name(UC_RDMA_WRITE_LAST), \
+ ib_opcode_name(UC_RDMA_WRITE_LAST_WITH_IMMEDIATE), \
+ ib_opcode_name(UC_RDMA_WRITE_ONLY), \
+ ib_opcode_name(UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(UD_SEND_ONLY), \
+ ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \
+ ib_opcode_name(CNP))
+
u8 ibhdr_exhdr_len(struct ib_header *hdr);
const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
+u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opah);
+u8 hfi1_trace_packet_hdr_len(struct hfi1_packet *packet);
+const char *hfi1_trace_get_packet_type_str(u8 l4);
+const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet);
+void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
+ u8 *ack, u8 *becn, u8 *fecn, u8 *mig,
+ u8 *se, u8 *pad, u8 *opcode, u8 *tver,
+ u16 *pkey, u32 *psn, u32 *qpn);
+void hfi1_trace_parse_9b_hdr(struct ib_header *hdr, bool sc5,
+ u8 *lnh, u8 *lver, u8 *sl, u8 *sc,
+ u16 *len, u32 *dlid, u32 *slid);
+void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr,
+ u8 *ack, u8 *mig, u8 *opcode,
+ u8 *pad, u8 *se, u8 *tver,
+ u32 *psn, u32 *qpn);
+void hfi1_trace_parse_16b_hdr(struct hfi1_16b_header *hdr,
+ u8 *age, u8 *becn, u8 *fecn,
+ u8 *l4, u8 *rc, u8 *sc,
+ u16 *entropy, u16 *len, u16 *pkey,
+ u32 *dlid, u32 *slid);
+
+const char *hfi1_trace_fmt_lrh(struct trace_seq *p, bool bypass,
+ u8 age, u8 becn, u8 fecn, u8 l4,
+ u8 lnh, const char *lnh_name, u8 lver,
+ u8 rc, u8 sc, u8 sl, u16 entropy,
+ u16 len, u16 pkey, u32 dlid, u32 slid);
+
+const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
+ u8 ack, u8 becn, u8 fecn, u8 mig,
+ u8 se, u8 pad, u8 opcode, const char *opname,
+ u8 tver, u16 pkey, u32 psn, u32 qpn);
#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
@@ -65,140 +136,303 @@ const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
__print_symbolic(lrh, \
lrh_name(LRH_BTH), \
lrh_name(LRH_GRH))
+#define PKT_ENTRY(pkt) __string(ptype, hfi1_trace_get_packet_str(packet))
+#define PKT_ASSIGN(pkt) __assign_str(ptype, hfi1_trace_get_packet_str(packet))
-#define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x"
-#define BTH_PRN \
- "op 0x%.2x,%s se %d m %d pad %d tver %d pkey 0x%.4x " \
- "f %d b %d qpn 0x%.6x a %d psn 0x%.8x"
-#define EHDR_PRN "%s"
-
-DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
+DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
TP_PROTO(struct hfi1_devdata *dd,
- struct ib_header *hdr),
- TP_ARGS(dd, hdr),
+ struct hfi1_packet *packet,
+ bool sc5),
+ TP_ARGS(dd, packet, sc5),
TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
- /* LRH */
- __field(u8, vl)
+ PKT_ENTRY(packet)
+ __field(bool, bypass)
+ __field(u8, ack)
+ __field(u8, age)
+ __field(u8, becn)
+ __field(u8, fecn)
+ __field(u8, l4)
+ __field(u8, lnh)
__field(u8, lver)
+ __field(u8, mig)
+ __field(u8, opcode)
+ __field(u8, pad)
+ __field(u8, rc)
+ __field(u8, sc)
+ __field(u8, se)
__field(u8, sl)
- __field(u8, lnh)
- __field(u16, dlid)
+ __field(u8, tver)
+ __field(u16, entropy)
__field(u16, len)
- __field(u16, slid)
- /* BTH */
+ __field(u16, pkey)
+ __field(u32, dlid)
+ __field(u32, psn)
+ __field(u32, qpn)
+ __field(u32, slid)
+ /* extended headers */
+ __dynamic_array(u8, ehdrs,
+ hfi1_trace_packet_hdr_len(packet))
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ PKT_ASSIGN(packet);
+
+ if (packet->etype == RHF_RCV_TYPE_BYPASS) {
+ __entry->bypass = true;
+ hfi1_trace_parse_16b_hdr(packet->hdr,
+ &__entry->age,
+ &__entry->becn,
+ &__entry->fecn,
+ &__entry->l4,
+ &__entry->rc,
+ &__entry->sc,
+ &__entry->entropy,
+ &__entry->len,
+ &__entry->pkey,
+ &__entry->dlid,
+ &__entry->slid);
+
+ hfi1_trace_parse_16b_bth(packet->ohdr,
+ &__entry->ack,
+ &__entry->mig,
+ &__entry->opcode,
+ &__entry->pad,
+ &__entry->se,
+ &__entry->tver,
+ &__entry->psn,
+ &__entry->qpn);
+ } else {
+ __entry->bypass = false;
+ hfi1_trace_parse_9b_hdr(packet->hdr, sc5,
+ &__entry->lnh,
+ &__entry->lver,
+ &__entry->sl,
+ &__entry->sc,
+ &__entry->len,
+ &__entry->dlid,
+ &__entry->slid);
+
+ hfi1_trace_parse_9b_bth(packet->ohdr,
+ &__entry->ack,
+ &__entry->becn,
+ &__entry->fecn,
+ &__entry->mig,
+ &__entry->se,
+ &__entry->pad,
+ &__entry->opcode,
+ &__entry->tver,
+ &__entry->pkey,
+ &__entry->psn,
+ &__entry->qpn);
+ }
+ /* extended headers */
+ memcpy(__get_dynamic_array(ehdrs),
+ &packet->ohdr->u,
+ __get_dynamic_array_len(ehdrs));
+ ),
+ TP_printk("[%s] (%s) %s %s hlen:%d %s",
+ __get_str(dev),
+ __get_str(ptype),
+ hfi1_trace_fmt_lrh(p,
+ __entry->bypass,
+ __entry->age,
+ __entry->becn,
+ __entry->fecn,
+ __entry->l4,
+ __entry->lnh,
+ show_lnh(__entry->lnh),
+ __entry->lver,
+ __entry->rc,
+ __entry->sc,
+ __entry->sl,
+ __entry->entropy,
+ __entry->len,
+ __entry->pkey,
+ __entry->dlid,
+ __entry->slid),
+ hfi1_trace_fmt_bth(p,
+ __entry->bypass,
+ __entry->ack,
+ __entry->becn,
+ __entry->fecn,
+ __entry->mig,
+ __entry->se,
+ __entry->pad,
+ __entry->opcode,
+ show_ib_opcode(__entry->opcode),
+ __entry->tver,
+ __entry->pkey,
+ __entry->psn,
+ __entry->qpn),
+ /* extended headers */
+ __get_dynamic_array_len(ehdrs),
+ __parse_ib_ehdrs(
+ __entry->opcode,
+ (void *)__get_dynamic_array(ehdrs))
+ )
+);
+
+DEFINE_EVENT(hfi1_input_ibhdr_template, input_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct hfi1_packet *packet, bool sc5),
+ TP_ARGS(dd, packet, sc5));
+
+DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct hfi1_opa_header *opah, bool sc5),
+ TP_ARGS(dd, opah, sc5),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(bool, bypass)
+ __field(u8, ack)
+ __field(u8, age)
+ __field(u8, becn)
+ __field(u8, fecn)
+ __field(u8, l4)
+ __field(u8, lnh)
+ __field(u8, lver)
+ __field(u8, mig)
__field(u8, opcode)
- __field(u8, se)
- __field(u8, m)
__field(u8, pad)
+ __field(u8, rc)
+ __field(u8, sc)
+ __field(u8, se)
+ __field(u8, sl)
__field(u8, tver)
+ __field(u16, entropy)
+ __field(u16, len)
__field(u16, pkey)
- __field(u8, f)
- __field(u8, b)
- __field(u32, qpn)
- __field(u8, a)
+ __field(u32, dlid)
__field(u32, psn)
+ __field(u32, qpn)
+ __field(u32, slid)
/* extended headers */
- __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr))
+ __dynamic_array(u8, ehdrs,
+ hfi1_trace_opa_hdr_len(opah))
),
- TP_fast_assign(
+ TP_fast_assign(
struct ib_other_headers *ohdr;
DD_DEV_ASSIGN(dd);
- /* LRH */
- __entry->vl =
- (u8)(be16_to_cpu(hdr->lrh[0]) >> 12);
- __entry->lver =
- (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf;
- __entry->sl =
- (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
- __entry->lnh =
- (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
- __entry->dlid =
- be16_to_cpu(hdr->lrh[1]);
- /* allow for larger len */
- __entry->len =
- be16_to_cpu(hdr->lrh[2]);
- __entry->slid =
- be16_to_cpu(hdr->lrh[3]);
- /* BTH */
- if (__entry->lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
- __entry->opcode =
- (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
- __entry->se =
- (be32_to_cpu(ohdr->bth[0]) >> 23) & 1;
- __entry->m =
- (be32_to_cpu(ohdr->bth[0]) >> 22) & 1;
- __entry->pad =
- (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- __entry->tver =
- (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf;
- __entry->pkey =
- be32_to_cpu(ohdr->bth[0]) & 0xffff;
- __entry->f =
- (be32_to_cpu(ohdr->bth[1]) >> IB_FECN_SHIFT) &
- IB_FECN_MASK;
- __entry->b =
- (be32_to_cpu(ohdr->bth[1]) >> IB_BECN_SHIFT) &
- IB_BECN_MASK;
- __entry->qpn =
- be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- __entry->a =
- (be32_to_cpu(ohdr->bth[2]) >> 31) & 1;
- /* allow for larger PSN */
- __entry->psn =
- be32_to_cpu(ohdr->bth[2]) & 0x7fffffff;
+
+ if (opah->hdr_type) {
+ __entry->bypass = true;
+ hfi1_trace_parse_16b_hdr(&opah->opah,
+ &__entry->age,
+ &__entry->becn,
+ &__entry->fecn,
+ &__entry->l4,
+ &__entry->rc,
+ &__entry->sc,
+ &__entry->entropy,
+ &__entry->len,
+ &__entry->pkey,
+ &__entry->dlid,
+ &__entry->slid);
+
+ if (entry->l4 == OPA_16B_L4_IB_LOCAL)
+ ohdr = &opah->opah.u.oth;
+ else
+ ohdr = &opah->opah.u.l.oth;
+ hfi1_trace_parse_16b_bth(ohdr,
+ &__entry->ack,
+ &__entry->mig,
+ &__entry->opcode,
+ &__entry->pad,
+ &__entry->se,
+ &__entry->tver,
+ &__entry->psn,
+ &__entry->qpn);
+ } else {
+ __entry->bypass = false;
+ hfi1_trace_parse_9b_hdr(&opah->ibh, sc5,
+ &__entry->lnh,
+ &__entry->lver,
+ &__entry->sl,
+ &__entry->sc,
+ &__entry->len,
+ &__entry->dlid,
+ &__entry->slid);
+ if (entry->lnh == HFI1_LRH_BTH)
+ ohdr = &opah->ibh.u.oth;
+ else
+ ohdr = &opah->ibh.u.l.oth;
+ hfi1_trace_parse_9b_bth(ohdr,
+ &__entry->ack,
+ &__entry->becn,
+ &__entry->fecn,
+ &__entry->mig,
+ &__entry->se,
+ &__entry->pad,
+ &__entry->opcode,
+ &__entry->tver,
+ &__entry->pkey,
+ &__entry->psn,
+ &__entry->qpn);
+ }
+
/* extended headers */
- memcpy(__get_dynamic_array(ehdrs), &ohdr->u,
- ibhdr_exhdr_len(hdr));
- ),
- TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN,
- __get_str(dev),
- /* LRH */
- __entry->vl,
- __entry->lver,
- __entry->sl,
- __entry->lnh, show_lnh(__entry->lnh),
- __entry->dlid,
- __entry->len,
- __entry->slid,
- /* BTH */
- __entry->opcode, show_ib_opcode(__entry->opcode),
- __entry->se,
- __entry->m,
- __entry->pad,
- __entry->tver,
- __entry->pkey,
- __entry->f,
- __entry->b,
- __entry->qpn,
- __entry->a,
- __entry->psn,
- /* extended headers */
- __parse_ib_ehdrs(
- __entry->opcode,
- (void *)__get_dynamic_array(ehdrs))
- )
+ memcpy(__get_dynamic_array(ehdrs),
+ &ohdr->u, __get_dynamic_array_len(ehdrs));
+ ),
+ TP_printk("[%s] (%s) %s %s hlen:%d %s",
+ __get_str(dev),
+ hfi1_trace_get_packet_type_str(__entry->l4),
+ hfi1_trace_fmt_lrh(p,
+ __entry->bypass,
+ __entry->age,
+ __entry->becn,
+ __entry->fecn,
+ __entry->l4,
+ __entry->lnh,
+ show_lnh(__entry->lnh),
+ __entry->lver,
+ __entry->rc,
+ __entry->sc,
+ __entry->sl,
+ __entry->entropy,
+ __entry->len,
+ __entry->pkey,
+ __entry->dlid,
+ __entry->slid),
+ hfi1_trace_fmt_bth(p,
+ __entry->bypass,
+ __entry->ack,
+ __entry->becn,
+ __entry->fecn,
+ __entry->mig,
+ __entry->se,
+ __entry->pad,
+ __entry->opcode,
+ show_ib_opcode(__entry->opcode),
+ __entry->tver,
+ __entry->pkey,
+ __entry->psn,
+ __entry->qpn),
+ /* extended headers */
+ __get_dynamic_array_len(ehdrs),
+ __parse_ib_ehdrs(
+ __entry->opcode,
+ (void *)__get_dynamic_array(ehdrs))
+ )
);
-DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct ib_header *hdr),
- TP_ARGS(dd, hdr));
+DEFINE_EVENT(hfi1_output_ibhdr_template, pio_output_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct hfi1_opa_header *opah, bool sc5),
+ TP_ARGS(dd, opah, sc5));
-DEFINE_EVENT(hfi1_ibhdr_template, pio_output_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct ib_header *hdr),
- TP_ARGS(dd, hdr));
+DEFINE_EVENT(hfi1_output_ibhdr_template, ack_output_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct hfi1_opa_header *opah, bool sc5),
+ TP_ARGS(dd, opah, sc5));
-DEFINE_EVENT(hfi1_ibhdr_template, ack_output_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct ib_header *hdr),
- TP_ARGS(dd, hdr));
+DEFINE_EVENT(hfi1_output_ibhdr_template, sdma_output_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct hfi1_opa_header *opah, bool sc5),
+ TP_ARGS(dd, opah, sc5));
-DEFINE_EVENT(hfi1_ibhdr_template, sdma_output_ibhdr,
- TP_PROTO(struct hfi1_devdata *dd, struct ib_header *hdr),
- TP_ARGS(dd, hdr));
#endif /* __HFI1_TRACE_IBHDRS_H */
diff --git a/drivers/infiniband/hw/hfi1/trace_misc.h b/drivers/infiniband/hw/hfi1/trace_misc.h
index deac77ddaeab..8db2253523ff 100644
--- a/drivers/infiniband/hw/hfi1/trace_misc.h
+++ b/drivers/infiniband/hw/hfi1/trace_misc.h
@@ -72,6 +72,26 @@ TRACE_EVENT(hfi1_interrupt,
__entry->src)
);
+DECLARE_EVENT_CLASS(
+ hfi1_csr_template,
+ TP_PROTO(void __iomem *addr, u64 value),
+ TP_ARGS(addr, value),
+ TP_STRUCT__entry(
+ __field(void __iomem *, addr)
+ __field(u64, value)
+ ),
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->value = value;
+ ),
+ TP_printk("addr %p value %llx", __entry->addr, __entry->value)
+);
+
+DEFINE_EVENT(
+ hfi1_csr_template, hfi1_write_rcvarray,
+ TP_PROTO(void __iomem *addr, u64 value),
+ TP_ARGS(addr, value));
+
#ifdef CONFIG_FAULT_INJECTION
TRACE_EVENT(hfi1_fault_opcode,
TP_PROTO(struct rvt_qp *qp, u8 opcode),
diff --git a/drivers/infiniband/hw/hfi1/trace_mmu.h b/drivers/infiniband/hw/hfi1/trace_mmu.h
new file mode 100644
index 000000000000..3b7abbc382c2
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_mmu.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__HFI1_TRACE_MMU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_MMU_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "hfi.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_mmu
+
+DECLARE_EVENT_CLASS(hfi1_mmu_rb_template,
+ TP_PROTO(unsigned long addr, unsigned long len),
+ TP_ARGS(addr, len),
+ TP_STRUCT__entry(__field(unsigned long, addr)
+ __field(unsigned long, len)
+ ),
+ TP_fast_assign(__entry->addr = addr;
+ __entry->len = len;
+ ),
+ TP_printk("MMU node addr 0x%lx, len %lu",
+ __entry->addr,
+ __entry->len
+ )
+);
+
+DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_insert,
+ TP_PROTO(unsigned long addr, unsigned long len),
+ TP_ARGS(addr, len));
+
+DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_search,
+ TP_PROTO(unsigned long addr, unsigned long len),
+ TP_ARGS(addr, len));
+
+DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_remove,
+ TP_PROTO(unsigned long addr, unsigned long len),
+ TP_ARGS(addr, len));
+
+DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_mem_invalidate,
+ TP_PROTO(unsigned long addr, unsigned long len),
+ TP_ARGS(addr, len));
+
+#endif /* __HFI1_TRACE_RC_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_mmu
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
index f77e59fb43fe..f9909d240dcc 100644
--- a/drivers/infiniband/hw/hfi1/trace_rx.h
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -52,9 +52,25 @@
#include "hfi.h"
+#define tidtype_name(type) { PT_##type, #type }
+#define show_tidtype(type) \
+__print_symbolic(type, \
+ tidtype_name(EXPECTED), \
+ tidtype_name(EAGER), \
+ tidtype_name(INVALID)) \
+
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_rx
+#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
+#define show_packettype(etype) \
+__print_symbolic(etype, \
+ packettype_name(EXPECTED), \
+ packettype_name(EAGER), \
+ packettype_name(IB), \
+ packettype_name(ERROR), \
+ packettype_name(BYPASS))
+
TRACE_EVENT(hfi1_rcvhdr,
TP_PROTO(struct hfi1_devdata *dd,
u32 ctxt,
@@ -98,24 +114,24 @@ TRACE_EVENT(hfi1_rcvhdr,
);
TRACE_EVENT(hfi1_receive_interrupt,
- TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
- TP_ARGS(dd, ctxt),
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd),
+ TP_ARGS(dd, rcd),
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__field(u32, ctxt)
__field(u8, slow_path)
__field(u8, dma_rtail)
),
TP_fast_assign(DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- if (dd->rcd[ctxt]->do_interrupt ==
+ __entry->ctxt = rcd->ctxt;
+ if (rcd->do_interrupt ==
&handle_receive_interrupt) {
__entry->slow_path = 1;
__entry->dma_rtail = 0xFF;
- } else if (dd->rcd[ctxt]->do_interrupt ==
+ } else if (rcd->do_interrupt ==
&handle_receive_interrupt_dma_rtail){
__entry->dma_rtail = 1;
__entry->slow_path = 0;
- } else if (dd->rcd[ctxt]->do_interrupt ==
+ } else if (rcd->do_interrupt ==
&handle_receive_interrupt_nodma_rtail) {
__entry->dma_rtail = 0;
__entry->slow_path = 0;
@@ -129,7 +145,8 @@ TRACE_EVENT(hfi1_receive_interrupt,
)
);
-TRACE_EVENT(hfi1_exp_tid_reg,
+DECLARE_EVENT_CLASS(
+ hfi1_exp_tid_reg_unreg,
TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr,
u32 npages, unsigned long va, unsigned long pa,
dma_addr_t dma),
@@ -163,38 +180,45 @@ TRACE_EVENT(hfi1_exp_tid_reg,
)
);
-TRACE_EVENT(hfi1_exp_tid_unreg,
- TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
- unsigned long va, unsigned long pa, dma_addr_t dma),
- TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
- TP_STRUCT__entry(
- __field(unsigned int, ctxt)
- __field(u16, subctxt)
- __field(u32, rarr)
- __field(u32, npages)
- __field(unsigned long, va)
- __field(unsigned long, pa)
- __field(dma_addr_t, dma)
- ),
- TP_fast_assign(
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->rarr = rarr;
- __entry->npages = npages;
- __entry->va = va;
- __entry->pa = pa;
- __entry->dma = dma;
- ),
- TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
- __entry->ctxt,
- __entry->subctxt,
- __entry->rarr,
- __entry->npages,
- __entry->pa,
- __entry->va,
- __entry->dma
- )
- );
+DEFINE_EVENT(
+ hfi1_exp_tid_reg_unreg, hfi1_exp_tid_unreg,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
+ unsigned long va, unsigned long pa, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma));
+
+DEFINE_EVENT(
+ hfi1_exp_tid_reg_unreg, hfi1_exp_tid_reg,
+ TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
+ unsigned long va, unsigned long pa, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma));
+
+TRACE_EVENT(
+ hfi1_put_tid,
+ TP_PROTO(struct hfi1_devdata *dd,
+ u32 index, u32 type, unsigned long pa, u16 order),
+ TP_ARGS(dd, index, type, pa, order),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ __field(unsigned long, pa);
+ __field(u32, index);
+ __field(u32, type);
+ __field(u16, order);
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd);
+ __entry->pa = pa;
+ __entry->index = index;
+ __entry->type = type;
+ __entry->order = order;
+ ),
+ TP_printk("[%s] type %s pa %lx index %u order %u",
+ __get_str(dev),
+ show_tidtype(__entry->type),
+ __entry->pa,
+ __entry->index,
+ __entry->order
+ )
+);
TRACE_EVENT(hfi1_exp_tid_inval,
TP_PROTO(unsigned int ctxt, u16 subctxt, unsigned long va, u32 rarr,
diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h
index c59809a7f121..c57af3b31fe1 100644
--- a/drivers/infiniband/hw/hfi1/trace_tx.h
+++ b/drivers/infiniband/hw/hfi1/trace_tx.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -198,6 +198,140 @@ TRACE_EVENT(hfi1_sdma_engine_select,
)
);
+TRACE_EVENT(hfi1_sdma_user_free_queues,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
+ TP_ARGS(dd, ctxt, subctxt),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u16, subctxt)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ ),
+ TP_printk("[%s] SDMA [%u:%u] Freeing user SDMA queues",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt
+ )
+);
+
+TRACE_EVENT(hfi1_sdma_user_process_request,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
+ u16 comp_idx),
+ TP_ARGS(dd, ctxt, subctxt, comp_idx),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u16, subctxt)
+ __field(u16, comp_idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->comp_idx = comp_idx;
+ ),
+ TP_printk("[%s] SDMA [%u:%u] Using req/comp entry: %u",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->comp_idx
+ )
+);
+
+DECLARE_EVENT_CLASS(
+ hfi1_sdma_value_template,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, u16 comp_idx,
+ u32 value),
+ TP_ARGS(dd, ctxt, subctxt, comp_idx, value),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u16, subctxt)
+ __field(u16, comp_idx)
+ __field(u32, value)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->comp_idx = comp_idx;
+ __entry->value = value;
+ ),
+ TP_printk("[%s] SDMA [%u:%u:%u] value: %u",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->comp_idx,
+ __entry->value
+ )
+);
+
+DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_initial_tidoffset,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
+ u16 comp_idx, u32 tidoffset),
+ TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset));
+
+DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_data_length,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
+ u16 comp_idx, u32 data_len),
+ TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
+
+DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_compute_length,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
+ u16 comp_idx, u32 data_len),
+ TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
+
+TRACE_EVENT(hfi1_sdma_user_tid_info,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
+ u16 comp_idx, u32 tidoffset, u32 units, u8 shift),
+ TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset, units, shift),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u16, subctxt)
+ __field(u16, comp_idx)
+ __field(u32, tidoffset)
+ __field(u32, units)
+ __field(u8, shift)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->comp_idx = comp_idx;
+ __entry->tidoffset = tidoffset;
+ __entry->units = units;
+ __entry->shift = shift;
+ ),
+ TP_printk("[%s] SDMA [%u:%u:%u] TID offset %ubytes %uunits om %u",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->comp_idx,
+ __entry->tidoffset,
+ __entry->units,
+ __entry->shift
+ )
+);
+
+TRACE_EVENT(hfi1_sdma_request,
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
+ unsigned long dim),
+ TP_ARGS(dd, ctxt, subctxt, dim),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u16, ctxt)
+ __field(u16, subctxt)
+ __field(unsigned long, dim)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->dim = dim;
+ ),
+ TP_printk("[%s] SDMA from %u:%u (%lu)",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->dim
+ )
+);
+
DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
TP_PROTO(struct sdma_engine *sde, u64 status),
TP_ARGS(sde, status),
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 5da1e4546543..0b646173ca22 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -65,7 +65,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
struct hfi1_qp_priv *priv = qp->priv;
struct ib_other_headers *ohdr;
struct rvt_swqe *wqe;
- u32 hwords = 5;
+ u32 hwords;
u32 bth0 = 0;
u32 len;
u32 pmtu = qp->pmtu;
@@ -93,9 +93,23 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto done_free_tx;
}
- ohdr = &ps->s_txreq->phdr.hdr.u.oth;
- if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
- ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
+ ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type;
+ if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
+ /* header size in 32-bit words LRH+BTH = (8+12)/4. */
+ hwords = 5;
+ if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
+ ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
+ else
+ ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
+ } else {
+ /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
+ hwords = 7;
+ if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
+ (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
+ ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
+ else
+ ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
+ }
/* Get the next send request. */
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
@@ -297,31 +311,26 @@ bail_no_tx:
void hfi1_uc_rcv(struct hfi1_packet *packet)
{
struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
- struct ib_header *hdr = packet->hdr;
- u32 rcv_flags = packet->rcv_flags;
- void *data = packet->ebuf;
+ void *data = packet->payload;
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
struct ib_other_headers *ohdr = packet->ohdr;
- u32 bth0, opcode;
+ u32 opcode = packet->opcode;
u32 hdrsize = packet->hlen;
u32 psn;
- u32 pad;
+ u32 pad = packet->pad;
struct ib_wc wc;
u32 pmtu = qp->pmtu;
struct ib_reth *reth;
- int has_grh = rcv_flags & HFI1_HAS_GRH;
int ret;
+ u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
- bth0 = be32_to_cpu(ohdr->bth[0]);
- if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0))
+ if (hfi1_ruc_check_hdr(ibp, packet))
return;
process_ecn(qp, packet, true);
- psn = be32_to_cpu(ohdr->bth[2]);
- opcode = ib_bth_get_opcode(ohdr);
-
+ psn = ib_bth_get_psn(ohdr);
/* Compare the PSN verses the expected PSN. */
if (unlikely(cmp_psn(psn, qp->r_psn) != 0)) {
/*
@@ -414,7 +423,12 @@ send_first:
/* FALLTHROUGH */
case OP(SEND_MIDDLE):
/* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
+ /*
+ * There will be no padding for 9B packet but 16B packets
+ * will come in with some padding since we always add
+ * CRC and LT bytes which will need to be flit aligned
+ */
+ if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
goto rewind;
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
@@ -432,14 +446,12 @@ no_immediate_data:
wc.ex.imm_data = 0;
wc.wc_flags = 0;
send_last:
- /* Get the number of bytes the message was padded by. */
- pad = ib_bth_get_pad(ohdr);
/* Check for invalid length. */
/* LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
+ if (unlikely(tlen < (hdrsize + extra_bytes)))
goto rewind;
/* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
+ tlen -= (hdrsize + extra_bytes);
wc.byte_len = tlen + qp->r_rcv_len;
if (unlikely(wc.byte_len > qp->r_len))
goto rewind;
@@ -527,14 +539,12 @@ rdma_first:
rdma_last_imm:
wc.wc_flags = IB_WC_WITH_IMM;
- /* Get the number of bytes the message was padded by. */
- pad = ib_bth_get_pad(ohdr);
/* Check for invalid length. */
/* LAST len should be >= 1 */
if (unlikely(tlen < (hdrsize + pad + 4)))
goto drop;
/* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
+ tlen -= (hdrsize + extra_bytes);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
@@ -554,14 +564,12 @@ rdma_last_imm:
case OP(RDMA_WRITE_LAST):
rdma_last:
- /* Get the number of bytes the message was padded by. */
- pad = ib_bth_get_pad(ohdr);
/* Check for invalid length. */
/* LAST len should be >= 1 */
if (unlikely(tlen < (hdrsize + pad + 4)))
goto drop;
/* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
+ tlen -= (hdrsize + extra_bytes);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
hfi1_copy_sge(&qp->r_sge, data, tlen, true, false);
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 6a4e95cefae5..2ba74fdd6f15 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -53,6 +53,12 @@
#include "verbs_txreq.h"
#include "qp.h"
+/* We support only two types - 9B and 16B for now */
+static const hfi1_make_req hfi1_make_ud_req_tbl[2] = {
+ [HFI1_PKT_TYPE_9B] = &hfi1_make_ud_req_9B,
+ [HFI1_PKT_TYPE_16B] = &hfi1_make_ud_req_16B
+};
+
/**
* ud_loopback - handle send on loopback QPs
* @sqp: the sending QP
@@ -67,6 +73,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
{
struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
struct hfi1_pportdata *ppd;
+ struct hfi1_qp_priv *priv = sqp->priv;
struct rvt_qp *qp;
struct rdma_ah_attr *ah_attr;
unsigned long flags;
@@ -102,18 +109,19 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (qp->ibqp.qp_num > 1) {
u16 pkey;
- u16 slid;
+ u32 slid;
u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index);
slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
((1 << ppd->lmc) - 1));
if (unlikely(ingress_pkey_check(ppd, pkey, sc5,
- qp->s_pkey_index, slid))) {
- hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY, pkey,
- rdma_ah_get_sl(ah_attr),
- sqp->ibqp.qp_num, qp->ibqp.qp_num,
- slid, rdma_ah_get_dlid(ah_attr));
+ qp->s_pkey_index,
+ slid, false))) {
+ hfi1_bad_pkey(ibp, pkey,
+ rdma_ah_get_sl(ah_attr),
+ sqp->ibqp.qp_num, qp->ibqp.qp_num,
+ slid, rdma_ah_get_dlid(ah_attr));
goto drop;
}
}
@@ -128,18 +136,8 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
sqp->qkey : swqe->ud_wr.remote_qkey;
- if (unlikely(qkey != qp->qkey)) {
- u16 lid;
-
- lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
- ((1 << ppd->lmc) - 1));
- hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey,
- rdma_ah_get_sl(ah_attr),
- sqp->ibqp.qp_num, qp->ibqp.qp_num,
- lid,
- rdma_ah_get_dlid(ah_attr));
- goto drop;
- }
+ if (unlikely(qkey != qp->qkey))
+ goto drop; /* silently drop per IBTA spec */
}
/*
@@ -185,9 +183,33 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
struct ib_grh grh;
- const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
+ struct ib_global_route grd = *(rdma_ah_read_grh(ah_attr));
+
+ /*
+ * For loopback packets with extended LIDs, the
+ * sgid_index in the GRH is 0 and the dgid is
+ * OPA GID of the sender. While creating a response
+ * to the loopback packet, IB core creates the new
+ * sgid_index from the DGID and that will be the
+ * OPA_GID_INDEX. The new dgid is from the sgid
+ * index and that will be in the IB GID format.
+ *
+ * We now have a case where the sent packet had a
+ * different sgid_index and dgid compared to the
+ * one that was received in response.
+ *
+ * Fix this inconsistency.
+ */
+ if (priv->hdr_type == HFI1_PKT_TYPE_16B) {
+ if (grd.sgid_index == 0)
+ grd.sgid_index = OPA_GID_INDEX;
+
+ if (ib_is_opa_gid(&grd.dgid))
+ grd.dgid.global.interface_id =
+ cpu_to_be64(ppd->guids[HFI1_PORT_GUID_INDEX]);
+ }
- hfi1_make_grh(ibp, &grh, grd, 0, 0);
+ hfi1_make_grh(ibp, &grh, &grd, 0, 0);
hfi1_copy_sge(&qp->r_sge, &grh,
sizeof(grh), true, false);
wc.wc_flags |= IB_WC_GRH;
@@ -244,7 +266,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
wc.pkey_index = 0;
}
wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
- ((1 << ppd->lmc) - 1));
+ ((1 << ppd->lmc) - 1));
/* Check for loopback when the port lid is not set */
if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
wc.slid = be16_to_cpu(IB_LID_PERMISSIVE);
@@ -261,6 +283,183 @@ drop:
rcu_read_unlock();
}
+static void hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr,
+ u16 *pkey, u32 extra_bytes, bool bypass)
+{
+ u32 bth0;
+ struct hfi1_ibport *ibp;
+
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+ ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
+ bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
+ } else {
+ bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
+ }
+
+ if (wqe->wr.send_flags & IB_SEND_SOLICITED)
+ bth0 |= IB_BTH_SOLICITED;
+ bth0 |= extra_bytes << 20;
+ if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI)
+ *pkey = hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
+ else
+ *pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
+ if (!bypass)
+ bth0 |= *pkey;
+ ohdr->bth[0] = cpu_to_be32(bth0);
+ ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn);
+ ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn));
+ /*
+ * Qkeys with the high order bit set mean use the
+ * qkey from the QP context instead of the WR (see 10.2.5).
+ */
+ ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
+ qp->qkey : wqe->ud_wr.remote_qkey);
+ ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
+}
+
+void hfi1_make_ud_req_9B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ struct rvt_swqe *wqe)
+{
+ u32 nwords, extra_bytes;
+ u16 len, slid, dlid, pkey;
+ u16 lrh0 = 0;
+ u8 sc5;
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ib_other_headers *ohdr;
+ struct rdma_ah_attr *ah_attr;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+ struct ib_grh *grh;
+
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ppd = ppd_from_ibp(ibp);
+ ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
+
+ extra_bytes = -wqe->length & 3;
+ nwords = ((wqe->length + extra_bytes) >> 2) + SIZE_OF_CRC;
+ /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
+ qp->s_hdrwords = 7;
+ if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ qp->s_hdrwords++;
+
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
+ grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
+ qp->s_hdrwords += hfi1_make_grh(ibp, grh,
+ rdma_ah_read_grh(ah_attr),
+ qp->s_hdrwords - 2, nwords);
+ lrh0 = HFI1_LRH_GRH;
+ ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
+ } else {
+ lrh0 = HFI1_LRH_BTH;
+ ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
+ }
+
+ sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
+ lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
+ if (qp->ibqp.qp_type == IB_QPT_SMI) {
+ lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
+ priv->s_sc = 0xf;
+ } else {
+ lrh0 |= (sc5 & 0xf) << 12;
+ priv->s_sc = sc5;
+ }
+
+ dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B);
+ if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
+ slid = be16_to_cpu(IB_LID_PERMISSIVE);
+ } else {
+ u16 lid = (u16)ppd->lid;
+
+ if (lid) {
+ lid |= rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1);
+ slid = lid;
+ } else {
+ slid = be16_to_cpu(IB_LID_PERMISSIVE);
+ }
+ }
+ hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, false);
+ len = qp->s_hdrwords + nwords;
+
+ /* Setup the packet */
+ ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_9B;
+ hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
+ lrh0, len, dlid, slid);
+}
+
+void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ struct rvt_swqe *wqe)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ib_other_headers *ohdr;
+ struct rdma_ah_attr *ah_attr;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_ibport *ibp;
+ u32 dlid, slid, nwords, extra_bytes;
+ u16 len, pkey;
+ u8 l4, sc5;
+
+ ibp = to_iport(qp->ibqp.device, qp->port_num);
+ ppd = ppd_from_ibp(ibp);
+ ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
+ /* header size in dwords 16B LRH+BTH+DETH = (16+12+8)/4. */
+ qp->s_hdrwords = 9;
+ if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ qp->s_hdrwords++;
+
+ /* SW provides space for CRC and LT for bypass packets. */
+ extra_bytes = hfi1_get_16b_padding((qp->s_hdrwords << 2),
+ wqe->length);
+ nwords = ((wqe->length + extra_bytes + SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
+
+ if ((rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) &&
+ hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) {
+ struct ib_grh *grh;
+ struct ib_global_route *grd = rdma_ah_retrieve_grh(ah_attr);
+ /*
+ * Ensure OPA GIDs are transformed to IB gids
+ * before creating the GRH.
+ */
+ if (grd->sgid_index == OPA_GID_INDEX) {
+ dd_dev_warn(ppd->dd, "Bad sgid_index. sgid_index: %d\n",
+ grd->sgid_index);
+ grd->sgid_index = 0;
+ }
+ grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
+ qp->s_hdrwords += hfi1_make_grh(ibp, grh, grd,
+ qp->s_hdrwords - 4, nwords);
+ ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
+ l4 = OPA_16B_L4_IB_GLOBAL;
+ } else {
+ ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
+ l4 = OPA_16B_L4_IB_LOCAL;
+ }
+
+ sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
+ if (qp->ibqp.qp_type == IB_QPT_SMI)
+ priv->s_sc = 0xf;
+ else
+ priv->s_sc = sc5;
+
+ dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 16B);
+ if (!ppd->lid)
+ slid = be32_to_cpu(OPA_LID_PERMISSIVE);
+ else
+ slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
+ ((1 << ppd->lmc) - 1));
+
+ hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, true);
+ /* Convert dwords to flits */
+ len = (qp->s_hdrwords + nwords) >> 1;
+
+ /* Setup the packet */
+ ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_16B;
+ hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
+ slid, dlid, len, pkey, 0, 0, l4, priv->s_sc);
+}
+
/**
* hfi1_make_ud_req - construct a UD request packet
* @qp: the QP
@@ -272,18 +471,12 @@ drop:
int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{
struct hfi1_qp_priv *priv = qp->priv;
- struct ib_other_headers *ohdr;
struct rdma_ah_attr *ah_attr;
struct hfi1_pportdata *ppd;
struct hfi1_ibport *ibp;
struct rvt_swqe *wqe;
- u32 nwords;
- u32 extra_bytes;
- u32 bth0;
- u16 lrh0;
- u16 lid;
int next_cur;
- u8 sc5;
+ u32 lid;
ps->s_txreq = get_txreq(ps->dev, qp);
if (IS_ERR(ps->s_txreq))
@@ -320,13 +513,14 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
ibp = to_iport(qp->ibqp.device, qp->port_num);
ppd = ppd_from_ibp(ibp);
ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
- if (rdma_ah_get_dlid(ah_attr) < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
- rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE)) {
+ priv->hdr_type = hfi1_get_hdr_type(ppd->lid, ah_attr);
+ if ((!hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) ||
+ (rdma_ah_get_dlid(ah_attr) == be32_to_cpu(OPA_LID_PERMISSIVE))) {
lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
if (unlikely(!loopback &&
- (lid == ppd->lid ||
- (lid == be16_to_cpu(IB_LID_PERMISSIVE) &&
- qp->ibqp.qp_type == IB_QPT_GSI)))) {
+ ((lid == ppd->lid) ||
+ ((lid == be32_to_cpu(OPA_LID_PERMISSIVE)) &&
+ (qp->ibqp.qp_type == IB_QPT_GSI))))) {
unsigned long tflags = ps->flags;
/*
* If DMAs are in progress, we can't generate
@@ -350,11 +544,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
qp->s_cur = next_cur;
- extra_bytes = -wqe->length & 3;
- nwords = (wqe->length + extra_bytes) >> 2;
-
- /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
- qp->s_hdrwords = 7;
ps->s_txreq->s_cur_size = wqe->length;
ps->s_txreq->ss = &qp->s_sge;
qp->s_srate = rdma_ah_get_static_rate(ah_attr);
@@ -365,77 +554,12 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp->s_sge.num_sge = wqe->wr.num_sge;
qp->s_sge.total_len = wqe->length;
- if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
- /* Header size in 32-bit words. */
- qp->s_hdrwords += hfi1_make_grh(ibp,
- &ps->s_txreq->phdr.hdr.u.l.grh,
- rdma_ah_read_grh(ah_attr),
- qp->s_hdrwords, nwords);
- lrh0 = HFI1_LRH_GRH;
- ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
- /*
- * Don't worry about sending to locally attached multicast
- * QPs. It is unspecified by the spec. what happens.
- */
- } else {
- /* Header size in 32-bit words. */
- lrh0 = HFI1_LRH_BTH;
- ohdr = &ps->s_txreq->phdr.hdr.u.oth;
- }
- if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
- qp->s_hdrwords++;
- ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
- bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
- } else {
- bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
- }
- sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
- lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
- if (qp->ibqp.qp_type == IB_QPT_SMI) {
- lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
- priv->s_sc = 0xf;
- } else {
- lrh0 |= (sc5 & 0xf) << 12;
- priv->s_sc = sc5;
- }
+ /* Make the appropriate header */
+ hfi1_make_ud_req_tbl[priv->hdr_type](qp, ps, qp->s_wqe);
priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
ps->s_txreq->sde = priv->s_sde;
priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
ps->s_txreq->psc = priv->s_sendcontext;
- ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0);
- ps->s_txreq->phdr.hdr.lrh[1] =
- cpu_to_be16(rdma_ah_get_dlid(ah_attr));
- ps->s_txreq->phdr.hdr.lrh[2] =
- cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- if (rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE)) {
- ps->s_txreq->phdr.hdr.lrh[3] = IB_LID_PERMISSIVE;
- } else {
- lid = ppd->lid;
- if (lid) {
- lid |= rdma_ah_get_path_bits(ah_attr) &
- ((1 << ppd->lmc) - 1);
- ps->s_txreq->phdr.hdr.lrh[3] = cpu_to_be16(lid);
- } else {
- ps->s_txreq->phdr.hdr.lrh[3] = IB_LID_PERMISSIVE;
- }
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- bth0 |= extra_bytes << 20;
- if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI)
- bth0 |= hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
- else
- bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
- ohdr->bth[0] = cpu_to_be32(bth0);
- ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn);
- ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn));
- /*
- * Qkeys with the high order bit set mean use the
- * qkey from the QP context instead of the WR (see 10.2.5).
- */
- ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
- qp->qkey : wqe->ud_wr.remote_qkey);
- ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
/* disarm any ahg */
priv->s_ahg->ahgcount = 0;
priv->s_ahg->ahgidx = 0;
@@ -505,6 +629,64 @@ int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
return -1;
}
+void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
+ u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
+ u8 sc5, const struct ib_grh *old_grh)
+{
+ u64 pbc, pbc_flags = 0;
+ u32 bth0, plen, vl, hwords = 7;
+ u16 len;
+ u8 l4;
+ struct hfi1_16b_header hdr;
+ struct ib_other_headers *ohdr;
+ struct pio_buf *pbuf;
+ struct send_context *ctxt = qp_to_send_context(qp, sc5);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 nwords;
+
+ /* Populate length */
+ nwords = ((hfi1_get_16b_padding(hwords << 2, 0) +
+ SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
+ if (old_grh) {
+ struct ib_grh *grh = &hdr.u.l.grh;
+
+ grh->version_tclass_flow = old_grh->version_tclass_flow;
+ grh->paylen = cpu_to_be16((hwords - 4 + nwords) << 2);
+ grh->hop_limit = 0xff;
+ grh->sgid = old_grh->dgid;
+ grh->dgid = old_grh->sgid;
+ ohdr = &hdr.u.l.oth;
+ l4 = OPA_16B_L4_IB_GLOBAL;
+ hwords += sizeof(struct ib_grh) / sizeof(u32);
+ } else {
+ ohdr = &hdr.u.oth;
+ l4 = OPA_16B_L4_IB_LOCAL;
+ }
+
+ /* BIT 16 to 19 is TVER. Bit 20 to 22 is pad cnt */
+ bth0 = (IB_OPCODE_CNP << 24) | (1 << 16) |
+ (hfi1_get_16b_padding(hwords << 2, 0) << 20);
+ ohdr->bth[0] = cpu_to_be32(bth0);
+
+ ohdr->bth[1] = cpu_to_be32(remote_qpn);
+ ohdr->bth[2] = 0; /* PSN 0 */
+
+ /* Convert dwords to flits */
+ len = (hwords + nwords) >> 1;
+ hfi1_make_16b_hdr(&hdr, slid, dlid, len, pkey, 1, 0, l4, sc5);
+
+ plen = 2 /* PBC */ + hwords + nwords;
+ pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
+ vl = sc_to_vlt(ppd->dd, sc5);
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
+ if (ctxt) {
+ pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
+ if (pbuf)
+ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
+ &hdr, hwords);
+ }
+}
+
void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
u32 pkey, u32 slid, u32 dlid, u8 sc5,
const struct ib_grh *old_grh)
@@ -543,13 +725,9 @@ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << IB_BECN_SHIFT));
ohdr->bth[2] = 0; /* PSN 0 */
- hdr.lrh[0] = cpu_to_be16(lrh0);
- hdr.lrh[1] = cpu_to_be16(dlid);
- hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
- hdr.lrh[3] = cpu_to_be16(slid);
-
+ hfi1_make_ib_hdr(&hdr, lrh0, hwords + SIZE_OF_CRC, dlid, slid);
plen = 2 /* PBC */ + hwords;
- pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
+ pbc_flags |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
vl = sc_to_vlt(ppd->dd, sc5);
pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
if (ctxt) {
@@ -668,37 +846,45 @@ static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
void hfi1_ud_rcv(struct hfi1_packet *packet)
{
struct ib_other_headers *ohdr = packet->ohdr;
- int opcode;
u32 hdrsize = packet->hlen;
struct ib_wc wc;
u32 qkey;
u32 src_qp;
- u16 dlid, pkey;
+ u16 pkey;
int mgmt_pkey_idx = -1;
struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct ib_header *hdr = packet->hdr;
- u32 rcv_flags = packet->rcv_flags;
- void *data = packet->ebuf;
+ void *data = packet->payload;
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
- bool has_grh = rcv_flags & HFI1_HAS_GRH;
- u8 sc5 = hfi1_9B_get_sc5(hdr, packet->rhf);
- u32 bth1;
- u8 sl_from_sc, sl;
- u16 slid;
+ u8 sc5 = packet->sc;
+ u8 sl_from_sc;
+ u8 opcode = packet->opcode;
+ u8 sl = packet->sl;
+ u32 dlid = packet->dlid;
+ u32 slid = packet->slid;
u8 extra_bytes;
+ bool dlid_is_permissive;
+ bool slid_is_permissive;
- qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
- src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
- dlid = ib_get_dlid(hdr);
- bth1 = be32_to_cpu(ohdr->bth[1]);
- slid = ib_get_slid(hdr);
- pkey = ib_bth_get_pkey(ohdr);
- opcode = ib_bth_get_opcode(ohdr);
- sl = ib_get_sl(hdr);
- extra_bytes = ib_bth_get_pad(ohdr);
- extra_bytes += (SIZE_OF_CRC << 2);
+ extra_bytes = packet->pad + packet->extra_byte + (SIZE_OF_CRC << 2);
+ qkey = ib_get_qkey(ohdr);
+ src_qp = ib_get_sqpn(ohdr);
+
+ if (packet->etype == RHF_RCV_TYPE_BYPASS) {
+ u32 permissive_lid =
+ opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B);
+
+ pkey = hfi1_16B_get_pkey(packet->hdr);
+ dlid_is_permissive = (dlid == permissive_lid);
+ slid_is_permissive = (slid == permissive_lid);
+ } else {
+ hdr = packet->hdr;
+ pkey = ib_bth_get_pkey(ohdr);
+ dlid_is_permissive = (dlid == be16_to_cpu(IB_LID_PERMISSIVE));
+ slid_is_permissive = (slid == be16_to_cpu(IB_LID_PERMISSIVE));
+ }
sl_from_sc = ibp->sc_to_sl[sc5];
process_ecn(qp, packet, (opcode != IB_OPCODE_CNP));
@@ -716,8 +902,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
* and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
*/
if (qp->ibqp.qp_num) {
- if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
- hdr->lrh[3] == IB_LID_PERMISSIVE))
+ if (unlikely(dlid_is_permissive || slid_is_permissive))
goto drop;
if (qp->ibqp.qp_num > 1) {
if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
@@ -727,10 +912,10 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
* for invalid pkeys is optional according to
* IB spec (release 1.3, section 10.9.4)
*/
- hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_P_KEY,
- pkey, sl,
- src_qp, qp->ibqp.qp_num,
- slid, dlid);
+ hfi1_bad_pkey(ibp,
+ pkey, sl,
+ src_qp, qp->ibqp.qp_num,
+ slid, dlid);
return;
}
} else {
@@ -739,12 +924,9 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
if (mgmt_pkey_idx < 0)
goto drop;
}
- if (unlikely(qkey != qp->qkey)) {
- hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey, sl,
- src_qp, qp->ibqp.qp_num,
- slid, dlid);
+ if (unlikely(qkey != qp->qkey)) /* Silent drop */
return;
- }
+
/* Drop invalid MAD packets (see 13.5.3.1). */
if (unlikely(qp->ibqp.qp_num == 1 &&
(tlen > 2048 || (sc5 == 0xF))))
@@ -758,8 +940,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
if (tlen > 2048)
goto drop;
- if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
- hdr->lrh[3] == IB_LID_PERMISSIVE) &&
+ if ((dlid_is_permissive || slid_is_permissive) &&
smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
goto drop;
@@ -811,8 +992,19 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
qp->r_flags |= RVT_R_REUSE_SGE;
goto drop;
}
- if (has_grh) {
- hfi1_copy_sge(&qp->r_sge, &hdr->u.l.grh,
+ if (packet->grh) {
+ hfi1_copy_sge(&qp->r_sge, packet->grh,
+ sizeof(struct ib_grh), true, false);
+ wc.wc_flags |= IB_WC_GRH;
+ } else if (packet->etype == RHF_RCV_TYPE_BYPASS) {
+ struct ib_grh grh;
+ /*
+ * Assuming we only created 16B on the send side
+ * if we want to use large LIDs, since GRH was stripped
+ * out when creating 16B, add back the GRH here.
+ */
+ hfi1_make_ext_grh(packet, &grh, slid, dlid);
+ hfi1_copy_sge(&qp->r_sge, &grh,
sizeof(struct ib_grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else {
@@ -845,14 +1037,15 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
} else {
wc.pkey_index = 0;
}
-
+ if (slid_is_permissive)
+ slid = be32_to_cpu(OPA_LID_PERMISSIVE);
wc.slid = slid;
wc.sl = sl_from_sc;
/*
* Save the LMC lower bits if the destination LID is a unicast LID.
*/
- wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
+ wc.dlid_path_bits = hfi1_check_mcast(dlid) ? 0 :
dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index a8f0aa4722f6..6f6c14df383e 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -47,58 +47,28 @@
#include <asm/page.h>
#include <linux/string.h>
+#include "mmu_rb.h"
#include "user_exp_rcv.h"
#include "trace.h"
-#include "mmu_rb.h"
-
-struct tid_group {
- struct list_head list;
- u32 base;
- u8 size;
- u8 used;
- u8 map;
-};
-
-struct tid_rb_node {
- struct mmu_rb_node mmu;
- unsigned long phys;
- struct tid_group *grp;
- u32 rcventry;
- dma_addr_t dma_addr;
- bool freed;
- unsigned npages;
- struct page *pages[0];
-};
-
-struct tid_pageset {
- u16 idx;
- u16 count;
-};
-
-#define EXP_TID_SET_EMPTY(set) (set.count == 0 && list_empty(&set.list))
-
-#define num_user_pages(vaddr, len) \
- (1 + (((((unsigned long)(vaddr) + \
- (unsigned long)(len) - 1) & PAGE_MASK) - \
- ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
struct exp_tid_set *set,
struct hfi1_filedata *fd);
-static u32 find_phys_blocks(struct page **pages, unsigned npages,
- struct tid_pageset *list);
-static int set_rcvarray_entry(struct hfi1_filedata *fd, unsigned long vaddr,
+static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
+static int set_rcvarray_entry(struct hfi1_filedata *fd,
+ struct tid_user_buf *tbuf,
u32 rcventry, struct tid_group *grp,
- struct page **pages, unsigned npages);
+ u16 pageidx, unsigned int npages);
static int tid_rb_insert(void *arg, struct mmu_rb_node *node);
static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
struct tid_rb_node *tnode);
static void tid_rb_remove(void *arg, struct mmu_rb_node *node);
static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
-static int program_rcvarray(struct hfi1_filedata *fd, unsigned long vaddr,
- struct tid_group *grp, struct tid_pageset *sets,
- unsigned start, u16 count, struct page **pages,
- u32 *tidlist, unsigned *tididx, unsigned *pmapped);
+static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
+ struct tid_group *grp,
+ unsigned int start, u16 count,
+ u32 *tidlist, unsigned int *tididx,
+ unsigned int *pmapped);
static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
struct tid_group **grp);
static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
@@ -109,96 +79,14 @@ static struct mmu_rb_ops tid_rb_ops = {
.invalidate = tid_rb_invalidate
};
-static inline u32 rcventry2tidinfo(u32 rcventry)
-{
- u32 pair = rcventry & ~0x1;
-
- return EXP_TID_SET(IDX, pair >> 1) |
- EXP_TID_SET(CTRL, 1 << (rcventry - pair));
-}
-
-static inline void exp_tid_group_init(struct exp_tid_set *set)
-{
- INIT_LIST_HEAD(&set->list);
- set->count = 0;
-}
-
-static inline void tid_group_remove(struct tid_group *grp,
- struct exp_tid_set *set)
-{
- list_del_init(&grp->list);
- set->count--;
-}
-
-static inline void tid_group_add_tail(struct tid_group *grp,
- struct exp_tid_set *set)
-{
- list_add_tail(&grp->list, &set->list);
- set->count++;
-}
-
-static inline struct tid_group *tid_group_pop(struct exp_tid_set *set)
-{
- struct tid_group *grp =
- list_first_entry(&set->list, struct tid_group, list);
- list_del_init(&grp->list);
- set->count--;
- return grp;
-}
-
-static inline void tid_group_move(struct tid_group *group,
- struct exp_tid_set *s1,
- struct exp_tid_set *s2)
-{
- tid_group_remove(group, s1);
- tid_group_add_tail(group, s2);
-}
-
-int hfi1_user_exp_rcv_grp_init(struct hfi1_filedata *fd)
-{
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_devdata *dd = fd->dd;
- u32 tidbase;
- u32 i;
- struct tid_group *grp, *gptr;
-
- exp_tid_group_init(&uctxt->tid_group_list);
- exp_tid_group_init(&uctxt->tid_used_list);
- exp_tid_group_init(&uctxt->tid_full_list);
-
- tidbase = uctxt->expected_base;
- for (i = 0; i < uctxt->expected_count /
- dd->rcv_entries.group_size; i++) {
- grp = kzalloc(sizeof(*grp), GFP_KERNEL);
- if (!grp)
- goto grp_failed;
-
- grp->size = dd->rcv_entries.group_size;
- grp->base = tidbase;
- tid_group_add_tail(grp, &uctxt->tid_group_list);
- tidbase += dd->rcv_entries.group_size;
- }
-
- return 0;
-
-grp_failed:
- list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
- list) {
- list_del_init(&grp->list);
- kfree(grp);
- }
-
- return -ENOMEM;
-}
-
/*
* Initialize context and file private data needed for Expected
* receive caching. This needs to be done after the context has
* been configured with the eager/expected RcvEntry counts.
*/
-int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd)
+int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt)
{
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
int ret = 0;
@@ -266,18 +154,6 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd)
return ret;
}
-void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt)
-{
- struct tid_group *grp, *gptr;
-
- list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
- list) {
- list_del_init(&grp->list);
- kfree(grp);
- }
- hfi1_clear_tids(uctxt);
-}
-
void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
@@ -302,21 +178,90 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
fd->entry_to_rb = NULL;
}
-/*
- * Write an "empty" RcvArray entry.
- * This function exists so the TID registaration code can use it
- * to write to unused/unneeded entries and still take advantage
- * of the WC performance improvements. The HFI will ignore this
- * write to the RcvArray entry.
+/**
+ * Release pinned receive buffer pages.
+ *
+ * @mapped - true if the pages have been DMA mapped. false otherwise.
+ * @idx - Index of the first page to unpin.
+ * @npages - No of pages to unpin.
+ *
+ * If the pages have been DMA mapped (indicated by mapped parameter), their
+ * info will be passed via a struct tid_rb_node. If they haven't been mapped,
+ * their info will be passed via a struct tid_user_buf.
+ */
+static void unpin_rcv_pages(struct hfi1_filedata *fd,
+ struct tid_user_buf *tidbuf,
+ struct tid_rb_node *node,
+ unsigned int idx,
+ unsigned int npages,
+ bool mapped)
+{
+ struct page **pages;
+ struct hfi1_devdata *dd = fd->uctxt->dd;
+
+ if (mapped) {
+ pci_unmap_single(dd->pcidev, node->dma_addr,
+ node->mmu.len, PCI_DMA_FROMDEVICE);
+ pages = &node->pages[idx];
+ } else {
+ pages = &tidbuf->pages[idx];
+ }
+ hfi1_release_user_pages(fd->mm, pages, npages, mapped);
+ fd->tid_n_pinned -= npages;
+}
+
+/**
+ * Pin receive buffer pages.
*/
-static inline void rcv_array_wc_fill(struct hfi1_devdata *dd, u32 index)
+static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
{
+ int pinned;
+ unsigned int npages;
+ unsigned long vaddr = tidbuf->vaddr;
+ struct page **pages = NULL;
+ struct hfi1_devdata *dd = fd->uctxt->dd;
+
+ /* Get the number of pages the user buffer spans */
+ npages = num_user_pages(vaddr, tidbuf->length);
+ if (!npages)
+ return -EINVAL;
+
+ if (npages > fd->uctxt->expected_count) {
+ dd_dev_err(dd, "Expected buffer too big\n");
+ return -EINVAL;
+ }
+
+ /* Verify that access is OK for the user buffer */
+ if (!access_ok(VERIFY_WRITE, (void __user *)vaddr,
+ npages * PAGE_SIZE)) {
+ dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
+ (void *)vaddr, npages);
+ return -EFAULT;
+ }
+ /* Allocate the array of struct page pointers needed for pinning */
+ pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
/*
- * Doing the WC fill writes only makes sense if the device is
- * present and the RcvArray has been mapped as WC memory.
+ * Pin all the pages of the user buffer. If we can't pin all the
+ * pages, accept the amount pinned so far and program only that.
+ * User space knows how to deal with partially programmed buffers.
*/
- if ((dd->flags & HFI1_PRESENT) && dd->rcvarray_wc)
- writeq(0, dd->rcvarray_wc + (index * 8));
+ if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
+ kfree(pages);
+ return -ENOMEM;
+ }
+
+ pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
+ if (pinned <= 0) {
+ kfree(pages);
+ return pinned;
+ }
+ tidbuf->pages = pages;
+ tidbuf->npages = npages;
+ fd->tid_n_pinned += pinned;
+ return pinned;
}
/*
@@ -374,62 +319,33 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
int ret = 0, need_group = 0, pinned;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
- unsigned npages, ngroups, pageidx = 0, pageset_count, npagesets,
+ unsigned int ngroups, pageidx = 0, pageset_count,
tididx = 0, mapped, mapped_pages = 0;
- unsigned long vaddr = tinfo->vaddr;
- struct page **pages = NULL;
u32 *tidlist = NULL;
- struct tid_pageset *pagesets = NULL;
+ struct tid_user_buf *tidbuf;
- /* Get the number of pages the user buffer spans */
- npages = num_user_pages(vaddr, tinfo->length);
- if (!npages)
- return -EINVAL;
-
- if (npages > uctxt->expected_count) {
- dd_dev_err(dd, "Expected buffer too big\n");
- return -EINVAL;
- }
-
- /* Verify that access is OK for the user buffer */
- if (!access_ok(VERIFY_WRITE, (void __user *)vaddr,
- npages * PAGE_SIZE)) {
- dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
- (void *)vaddr, npages);
- return -EFAULT;
- }
-
- pagesets = kcalloc(uctxt->expected_count, sizeof(*pagesets),
- GFP_KERNEL);
- if (!pagesets)
+ tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
+ if (!tidbuf)
return -ENOMEM;
- /* Allocate the array of struct page pointers needed for pinning */
- pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- ret = -ENOMEM;
- goto bail;
- }
-
- /*
- * Pin all the pages of the user buffer. If we can't pin all the
- * pages, accept the amount pinned so far and program only that.
- * User space knows how to deal with partially programmed buffers.
- */
- if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
- ret = -ENOMEM;
- goto bail;
+ tidbuf->vaddr = tinfo->vaddr;
+ tidbuf->length = tinfo->length;
+ tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
+ GFP_KERNEL);
+ if (!tidbuf->psets) {
+ kfree(tidbuf);
+ return -ENOMEM;
}
- pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
+ pinned = pin_rcv_pages(fd, tidbuf);
if (pinned <= 0) {
- ret = pinned;
- goto bail;
+ kfree(tidbuf->psets);
+ kfree(tidbuf);
+ return pinned;
}
- fd->tid_n_pinned += npages;
/* Find sets of physically contiguous pages */
- npagesets = find_phys_blocks(pages, pinned, pagesets);
+ tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
/*
* We don't need to access this under a lock since tid_used is per
@@ -437,10 +353,10 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
* and hfi1_user_exp_rcv_setup() at the same time.
*/
spin_lock(&fd->tid_lock);
- if (fd->tid_used + npagesets > fd->tid_limit)
+ if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
pageset_count = fd->tid_limit - fd->tid_used;
else
- pageset_count = npagesets;
+ pageset_count = tidbuf->n_psets;
spin_unlock(&fd->tid_lock);
if (!pageset_count)
@@ -468,9 +384,9 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
struct tid_group *grp =
tid_group_pop(&uctxt->tid_group_list);
- ret = program_rcvarray(fd, vaddr, grp, pagesets,
+ ret = program_rcvarray(fd, tidbuf, grp,
pageidx, dd->rcv_entries.group_size,
- pages, tidlist, &tididx, &mapped);
+ tidlist, &tididx, &mapped);
/*
* If there was a failure to program the RcvArray
* entries for the entire group, reset the grp fields
@@ -514,8 +430,8 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
unsigned use = min_t(unsigned, pageset_count - pageidx,
grp->size - grp->used);
- ret = program_rcvarray(fd, vaddr, grp, pagesets,
- pageidx, use, pages, tidlist,
+ ret = program_rcvarray(fd, tidbuf, grp,
+ pageidx, use, tidlist,
&tididx, &mapped);
if (ret < 0) {
hfi1_cdbg(TID,
@@ -575,16 +491,14 @@ nomem:
* If not everything was mapped (due to insufficient RcvArray entries,
* for example), unpin all unmapped pages so we can pin them nex time.
*/
- if (mapped_pages != pinned) {
- hfi1_release_user_pages(fd->mm, &pages[mapped_pages],
- pinned - mapped_pages,
- false);
- fd->tid_n_pinned -= pinned - mapped_pages;
- }
+ if (mapped_pages != pinned)
+ unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
+ (pinned - mapped_pages), false);
bail:
- kfree(pagesets);
- kfree(pages);
+ kfree(tidbuf->psets);
kfree(tidlist);
+ kfree(tidbuf->pages);
+ kfree(tidbuf);
return ret > 0 ? 0 : ret;
}
@@ -674,11 +588,12 @@ int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
return ret;
}
-static u32 find_phys_blocks(struct page **pages, unsigned npages,
- struct tid_pageset *list)
+static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
{
unsigned pagecount, pageidx, setcount = 0, i;
unsigned long pfn, this_pfn;
+ struct page **pages = tidbuf->pages;
+ struct tid_pageset *list = tidbuf->psets;
if (!npages)
return 0;
@@ -741,13 +656,13 @@ static u32 find_phys_blocks(struct page **pages, unsigned npages,
/**
* program_rcvarray() - program an RcvArray group with receive buffers
* @fd: filedata pointer
- * @vaddr: starting user virtual address
+ * @tbuf: pointer to struct tid_user_buf that has the user buffer starting
+ * virtual address, buffer length, page pointers, pagesets (array of
+ * struct tid_pageset holding information on physically contiguous
+ * chunks from the user buffer), and other fields.
* @grp: RcvArray group
- * @sets: array of struct tid_pageset holding information on physically
- * contiguous chunks from the user buffer
* @start: starting index into sets array
* @count: number of struct tid_pageset's to program
- * @pages: an array of struct page * for the user buffer
* @tidlist: the array of u32 elements when the information about the
* programmed RcvArray entries is to be encoded.
* @tididx: starting offset into tidlist
@@ -765,11 +680,11 @@ static u32 find_phys_blocks(struct page **pages, unsigned npages,
* -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
* number of RcvArray entries programmed.
*/
-static int program_rcvarray(struct hfi1_filedata *fd, unsigned long vaddr,
+static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
struct tid_group *grp,
- struct tid_pageset *sets,
- unsigned start, u16 count, struct page **pages,
- u32 *tidlist, unsigned *tididx, unsigned *pmapped)
+ unsigned int start, u16 count,
+ u32 *tidlist, unsigned int *tididx,
+ unsigned int *pmapped)
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
@@ -808,11 +723,11 @@ static int program_rcvarray(struct hfi1_filedata *fd, unsigned long vaddr,
}
rcventry = grp->base + useidx;
- npages = sets[setidx].count;
- pageidx = sets[setidx].idx;
+ npages = tbuf->psets[setidx].count;
+ pageidx = tbuf->psets[setidx].idx;
- ret = set_rcvarray_entry(fd, vaddr + (pageidx * PAGE_SIZE),
- rcventry, grp, pages + pageidx,
+ ret = set_rcvarray_entry(fd, tbuf,
+ rcventry, grp, pageidx,
npages);
if (ret)
return ret;
@@ -833,15 +748,17 @@ static int program_rcvarray(struct hfi1_filedata *fd, unsigned long vaddr,
return idx;
}
-static int set_rcvarray_entry(struct hfi1_filedata *fd, unsigned long vaddr,
+static int set_rcvarray_entry(struct hfi1_filedata *fd,
+ struct tid_user_buf *tbuf,
u32 rcventry, struct tid_group *grp,
- struct page **pages, unsigned npages)
+ u16 pageidx, unsigned int npages)
{
int ret;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct tid_rb_node *node;
struct hfi1_devdata *dd = uctxt->dd;
dma_addr_t phys;
+ struct page **pages = tbuf->pages + pageidx;
/*
* Allocate the node first so we can handle a potential
@@ -862,7 +779,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd, unsigned long vaddr,
return -EFAULT;
}
- node->mmu.addr = vaddr;
+ node->mmu.addr = tbuf->vaddr + (pageidx * PAGE_SIZE);
node->mmu.len = npages * PAGE_SIZE;
node->phys = page_to_phys(pages[0]);
node->npages = npages;
@@ -935,17 +852,13 @@ static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
node->npages, node->mmu.addr, node->phys,
node->dma_addr);
- hfi1_put_tid(dd, node->rcventry, PT_INVALID, 0, 0);
/*
* Make sure device has seen the write before we unpin the
* pages.
*/
- flush_wc();
+ hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
- pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len,
- PCI_DMA_FROMDEVICE);
- hfi1_release_user_pages(fd->mm, node->pages, node->npages, true);
- fd->tid_n_pinned -= node->npages;
+ unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
node->grp->used--;
node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index 5250c897298d..e383cc01a2bf 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
@@ -49,30 +49,44 @@
#include "hfi.h"
-#define EXP_TID_TIDLEN_MASK 0x7FFULL
-#define EXP_TID_TIDLEN_SHIFT 0
-#define EXP_TID_TIDCTRL_MASK 0x3ULL
-#define EXP_TID_TIDCTRL_SHIFT 20
-#define EXP_TID_TIDIDX_MASK 0x3FFULL
-#define EXP_TID_TIDIDX_SHIFT 22
-#define EXP_TID_GET(tid, field) \
- (((tid) >> EXP_TID_TID##field##_SHIFT) & EXP_TID_TID##field##_MASK)
+#include "exp_rcv.h"
-#define EXP_TID_SET(field, value) \
- (((value) & EXP_TID_TID##field##_MASK) << \
- EXP_TID_TID##field##_SHIFT)
-#define EXP_TID_CLEAR(tid, field) ({ \
- (tid) &= ~(EXP_TID_TID##field##_MASK << \
- EXP_TID_TID##field##_SHIFT); \
- })
-#define EXP_TID_RESET(tid, field, value) do { \
- EXP_TID_CLEAR(tid, field); \
- (tid) |= EXP_TID_SET(field, (value)); \
- } while (0)
+struct tid_pageset {
+ u16 idx;
+ u16 count;
+};
-void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt);
-int hfi1_user_exp_rcv_grp_init(struct hfi1_filedata *fd);
-int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd);
+struct tid_user_buf {
+ unsigned long vaddr;
+ unsigned long length;
+ unsigned int npages;
+ struct page **pages;
+ struct tid_pageset *psets;
+ unsigned int n_psets;
+};
+
+struct tid_rb_node {
+ struct mmu_rb_node mmu;
+ unsigned long phys;
+ struct tid_group *grp;
+ u32 rcventry;
+ dma_addr_t dma_addr;
+ bool freed;
+ unsigned int npages;
+ struct page *pages[0];
+};
+
+static inline int num_user_pages(unsigned long addr,
+ unsigned long len)
+{
+ const unsigned long spage = addr & PAGE_MASK;
+ const unsigned long epage = (addr + len - 1) & PAGE_MASK;
+
+ return 1 + ((epage - spage) >> PAGE_SHIFT);
+}
+
+int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt);
void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd);
int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
struct hfi1_tid_info *tinfo);
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index d55339f5d73b..c0c0e0445cbf 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -64,224 +64,20 @@
#include "hfi.h"
#include "sdma.h"
+#include "mmu_rb.h"
#include "user_sdma.h"
#include "verbs.h" /* for the headers */
#include "common.h" /* for struct hfi1_tid_info */
#include "trace.h"
-#include "mmu_rb.h"
static uint hfi1_sdma_comp_ring_size = 128;
module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
-/* The maximum number of Data io vectors per message/request */
-#define MAX_VECTORS_PER_REQ 8
-/*
- * Maximum number of packet to send from each message/request
- * before moving to the next one.
- */
-#define MAX_PKTS_PER_QUEUE 16
-
-#define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT))
-
-#define req_opcode(x) \
- (((x) >> HFI1_SDMA_REQ_OPCODE_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
-#define req_version(x) \
- (((x) >> HFI1_SDMA_REQ_VERSION_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
-#define req_iovcnt(x) \
- (((x) >> HFI1_SDMA_REQ_IOVCNT_SHIFT) & HFI1_SDMA_REQ_IOVCNT_MASK)
-
-/* Number of BTH.PSN bits used for sequence number in expected rcvs */
-#define BTH_SEQ_MASK 0x7ffull
-
-/*
- * Define fields in the KDETH header so we can update the header
- * template.
- */
-#define KDETH_OFFSET_SHIFT 0
-#define KDETH_OFFSET_MASK 0x7fff
-#define KDETH_OM_SHIFT 15
-#define KDETH_OM_MASK 0x1
-#define KDETH_TID_SHIFT 16
-#define KDETH_TID_MASK 0x3ff
-#define KDETH_TIDCTRL_SHIFT 26
-#define KDETH_TIDCTRL_MASK 0x3
-#define KDETH_INTR_SHIFT 28
-#define KDETH_INTR_MASK 0x1
-#define KDETH_SH_SHIFT 29
-#define KDETH_SH_MASK 0x1
-#define KDETH_HCRC_UPPER_SHIFT 16
-#define KDETH_HCRC_UPPER_MASK 0xff
-#define KDETH_HCRC_LOWER_SHIFT 24
-#define KDETH_HCRC_LOWER_MASK 0xff
-
-#define AHG_KDETH_INTR_SHIFT 12
-#define AHG_KDETH_SH_SHIFT 13
-
-#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
-#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
-
-#define KDETH_GET(val, field) \
- (((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK)
-#define KDETH_SET(dw, field, val) do { \
- u32 dwval = le32_to_cpu(dw); \
- dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \
- dwval |= (((val) & KDETH_##field##_MASK) << \
- KDETH_##field##_SHIFT); \
- dw = cpu_to_le32(dwval); \
- } while (0)
-
-#define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \
- do { \
- if ((idx) < ARRAY_SIZE((arr))) \
- (arr)[(idx++)] = sdma_build_ahg_descriptor( \
- (__force u16)(value), (dw), (bit), \
- (width)); \
- else \
- return -ERANGE; \
- } while (0)
-
-/* KDETH OM multipliers and switch over point */
-#define KDETH_OM_SMALL 4
-#define KDETH_OM_SMALL_SHIFT 2
-#define KDETH_OM_LARGE 64
-#define KDETH_OM_LARGE_SHIFT 6
-#define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
-
-/* Tx request flag bits */
-#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
-#define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
-
-/* SDMA request flag bits */
-#define SDMA_REQ_FOR_THREAD 1
-#define SDMA_REQ_SEND_DONE 2
-#define SDMA_REQ_HAS_ERROR 3
-#define SDMA_REQ_DONE_ERROR 4
-
-#define SDMA_PKT_Q_INACTIVE BIT(0)
-#define SDMA_PKT_Q_ACTIVE BIT(1)
-#define SDMA_PKT_Q_DEFERRED BIT(2)
-
-/*
- * Maximum retry attempts to submit a TX request
- * before putting the process to sleep.
- */
-#define MAX_DEFER_RETRY_COUNT 1
-
static unsigned initial_pkt_count = 8;
-#define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
-
-struct sdma_mmu_node;
-
-struct user_sdma_iovec {
- struct list_head list;
- struct iovec iov;
- /* number of pages in this vector */
- unsigned npages;
- /* array of pinned pages for this vector */
- struct page **pages;
- /*
- * offset into the virtual address space of the vector at
- * which we last left off.
- */
- u64 offset;
- struct sdma_mmu_node *node;
-};
-
-struct sdma_mmu_node {
- struct mmu_rb_node rb;
- struct hfi1_user_sdma_pkt_q *pq;
- atomic_t refcount;
- struct page **pages;
- unsigned npages;
-};
-
-/* evict operation argument */
-struct evict_data {
- u32 cleared; /* count evicted so far */
- u32 target; /* target count to evict */
-};
-
-struct user_sdma_request {
- struct sdma_req_info info;
- struct hfi1_user_sdma_pkt_q *pq;
- struct hfi1_user_sdma_comp_q *cq;
- /* This is the original header from user space */
- struct hfi1_pkt_header hdr;
- /*
- * Pointer to the SDMA engine for this request.
- * Since different request could be on different VLs,
- * each request will need it's own engine pointer.
- */
- struct sdma_engine *sde;
- s8 ahg_idx;
- u32 ahg[9];
- /*
- * KDETH.Offset (Eager) field
- * We need to remember the initial value so the headers
- * can be updated properly.
- */
- u32 koffset;
- /*
- * KDETH.OFFSET (TID) field
- * The offset can cover multiple packets, depending on the
- * size of the TID entry.
- */
- u32 tidoffset;
- /*
- * We copy the iovs for this request (based on
- * info.iovcnt). These are only the data vectors
- */
- unsigned data_iovs;
- /* total length of the data in the request */
- u32 data_len;
- /* progress index moving along the iovs array */
- unsigned iov_idx;
- struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
- /* number of elements copied to the tids array */
- u16 n_tids;
- /* TID array values copied from the tid_iov vector */
- u32 *tids;
- u16 tididx;
- u32 sent;
- u64 seqnum;
- u64 seqcomp;
- u64 seqsubmitted;
- struct list_head txps;
- unsigned long flags;
- /* status of the last txreq completed */
- int status;
-};
-
-/*
- * A single txreq could span up to 3 physical pages when the MTU
- * is sufficiently large (> 4K). Each of the IOV pointers also
- * needs it's own set of flags so the vector has been handled
- * independently of each other.
- */
-struct user_sdma_txreq {
- /* Packet header for the txreq */
- struct hfi1_pkt_header hdr;
- struct sdma_txreq txreq;
- struct list_head list;
- struct user_sdma_request *req;
- u16 flags;
- unsigned busycount;
- u64 seqnum;
-};
-
-#define SDMA_DBG(req, fmt, ...) \
- hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
- (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
- ##__VA_ARGS__)
-#define SDMA_Q_DBG(pq, fmt, ...) \
- hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \
- (pq)->subctxt, ##__VA_ARGS__)
-
static int user_sdma_send_pkts(struct user_sdma_request *req,
unsigned maxpkts);
-static int num_user_pages(const struct iovec *iov);
static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
@@ -307,7 +103,8 @@ static int defer_packet_queue(
struct sdma_engine *sde,
struct iowait *wait,
struct sdma_txreq *txreq,
- unsigned int seq);
+ uint seq,
+ bool pkts_sent);
static void activate_packet_queue(struct iowait *wait, int reason);
static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
unsigned long len);
@@ -329,7 +126,8 @@ static int defer_packet_queue(
struct sdma_engine *sde,
struct iowait *wait,
struct sdma_txreq *txreq,
- unsigned seq)
+ uint seq,
+ bool pkts_sent)
{
struct hfi1_user_sdma_pkt_q *pq =
container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
@@ -349,7 +147,7 @@ static int defer_packet_queue(
xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
write_seqlock(&dev->iowait_lock);
if (list_empty(&pq->busy.list))
- list_add_tail(&pq->busy.list, &sde->dmawait);
+ iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
write_sequnlock(&dev->iowait_lock);
return -EBUSY;
eagain:
@@ -364,13 +162,6 @@ static void activate_packet_queue(struct iowait *wait, int reason)
wake_up(&wait->wait_dma);
};
-static void sdma_kmem_cache_ctor(void *obj)
-{
- struct user_sdma_txreq *tx = obj;
-
- memset(tx, 0, sizeof(*tx));
-}
-
int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
struct hfi1_filedata *fd)
{
@@ -379,7 +170,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
struct hfi1_devdata *dd;
struct hfi1_user_sdma_comp_q *cq;
struct hfi1_user_sdma_pkt_q *pq;
- unsigned long flags;
if (!uctxt || !fd)
return -EBADF;
@@ -393,7 +183,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
if (!pq)
return -ENOMEM;
- INIT_LIST_HEAD(&pq->list);
pq->dd = dd;
pq->ctxt = uctxt->ctxt;
pq->subctxt = fd->subctxt;
@@ -426,7 +215,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
sizeof(struct user_sdma_txreq),
L1_CACHE_BYTES,
SLAB_HWCACHE_ALIGN,
- sdma_kmem_cache_ctor);
+ NULL);
if (!pq->txreq_cache) {
dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
uctxt->ctxt);
@@ -454,10 +243,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
fd->pq = pq;
fd->cq = cq;
- spin_lock_irqsave(&uctxt->sdma_qlock, flags);
- list_add(&pq->list, &uctxt->sdma_queues);
- spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
-
return 0;
pq_mmu_fail:
@@ -476,22 +261,17 @@ pq_reqs_nomem:
return ret;
}
-int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
+int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt)
{
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_user_sdma_pkt_q *pq;
- unsigned long flags;
- hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit,
- uctxt->ctxt, fd->subctxt);
+ trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
+
pq = fd->pq;
if (pq) {
if (pq->handler)
hfi1_mmu_rb_unregister(pq->handler);
- spin_lock_irqsave(&uctxt->sdma_qlock, flags);
- if (!list_empty(&pq->list))
- list_del_init(&pq->list);
- spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
iowait_sdma_drain(&pq->busy);
/* Wait until all requests have been freed. */
wait_event_interruptible(
@@ -546,6 +326,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
struct sdma_req_info info;
struct user_sdma_request *req;
u8 opcode, sc, vl;
+ u16 pkey;
+ u32 slid;
int req_queued = 0;
u16 dlid;
u32 selector;
@@ -567,7 +349,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
(u16 *)&info);
-
if (info.comp_idx >= hfi1_sdma_comp_ring_size) {
hfi1_cdbg(SDMA,
"[%u:%u:%u:%u] Invalid comp index",
@@ -604,15 +385,23 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
/*
* All safety checks have been done and this request has been claimed.
*/
- hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
- uctxt->ctxt, fd->subctxt, info.comp_idx);
+ trace_hfi1_sdma_user_process_request(dd, uctxt->ctxt, fd->subctxt,
+ info.comp_idx);
req = pq->reqs + info.comp_idx;
- memset(req, 0, sizeof(*req));
req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
+ req->data_len = 0;
req->pq = pq;
req->cq = cq;
req->status = -1;
req->ahg_idx = -1;
+ req->iov_idx = 0;
+ req->sent = 0;
+ req->seqnum = 0;
+ req->seqcomp = 0;
+ req->seqsubmitted = 0;
+ req->tids = NULL;
+ req->done = 0;
+ req->has_error = 0;
INIT_LIST_HEAD(&req->txps);
memcpy(&req->info, &info, sizeof(info));
@@ -671,8 +460,9 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
}
/* Checking P_KEY for requests from user-space */
- if (egress_pkey_check(dd->pport, req->hdr.lrh, req->hdr.bth, sc,
- PKEY_CHECK_INVALID)) {
+ pkey = (u16)be32_to_cpu(req->hdr.bth[0]);
+ slid = be16_to_cpu(req->hdr.lrh[3]);
+ if (egress_pkey_check(dd->pport, slid, pkey, sc, PKEY_CHECK_INVALID)) {
ret = -EINVAL;
goto free_req;
}
@@ -696,24 +486,27 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
(KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
KDETH_OM_LARGE : KDETH_OM_SMALL);
- SDMA_DBG(req, "Initial TID offset %u", req->tidoffset);
+ trace_hfi1_sdma_user_initial_tidoffset(dd, uctxt->ctxt, fd->subctxt,
+ info.comp_idx, req->tidoffset);
idx++;
/* Save all the IO vector structures */
for (i = 0; i < req->data_iovs; i++) {
+ req->iovs[i].offset = 0;
INIT_LIST_HEAD(&req->iovs[i].list);
memcpy(&req->iovs[i].iov,
iovec + idx++,
sizeof(req->iovs[i].iov));
ret = pin_vector_pages(req, &req->iovs[i]);
if (ret) {
+ req->data_iovs = i;
req->status = ret;
goto free_req;
}
req->data_len += req->iovs[i].iov.iov_len;
}
- SDMA_DBG(req, "total data length %u", req->data_len);
-
+ trace_hfi1_sdma_user_data_length(dd, uctxt->ctxt, fd->subctxt,
+ info.comp_idx, req->data_len);
if (pcount > req->info.npkts)
pcount = req->info.npkts;
/*
@@ -749,6 +542,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
}
req->tids = tmp;
req->n_tids = ntids;
+ req->tididx = 0;
idx++;
}
@@ -791,12 +585,12 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
* request have been submitted to the SDMA engine. However, it
* will not wait for send completions.
*/
- while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) {
+ while (req->seqsubmitted != req->info.npkts) {
ret = user_sdma_send_pkts(req, pcount);
if (ret < 0) {
if (ret != -EBUSY) {
req->status = ret;
- set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
+ WRITE_ONCE(req->has_error, 1);
if (ACCESS_ONCE(req->seqcomp) ==
req->seqsubmitted - 1)
goto free_req;
@@ -867,7 +661,11 @@ static inline u32 compute_data_length(struct user_sdma_request *req,
} else {
len = min(req->data_len - req->sent, (u32)req->info.fragsize);
}
- SDMA_DBG(req, "Data Length = %u", len);
+ trace_hfi1_sdma_user_compute_length(req->pq->dd,
+ req->pq->ctxt,
+ req->pq->subctxt,
+ req->info.comp_idx,
+ len);
return len;
}
@@ -884,6 +682,84 @@ static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len);
}
+static int user_sdma_txadd_ahg(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ u32 datalen)
+{
+ int ret;
+ u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
+ u32 lrhlen = get_lrh_len(req->hdr, pad_len(datalen));
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+
+ /*
+ * Copy the request header into the tx header
+ * because the HW needs a cacheline-aligned
+ * address.
+ * This copy can be optimized out if the hdr
+ * member of user_sdma_request were also
+ * cacheline aligned.
+ */
+ memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
+ if (PBC2LRH(pbclen) != lrhlen) {
+ pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
+ tx->hdr.pbc[0] = cpu_to_le16(pbclen);
+ }
+ ret = check_header_template(req, &tx->hdr, lrhlen, datalen);
+ if (ret)
+ return ret;
+ ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY,
+ sizeof(tx->hdr) + datalen, req->ahg_idx,
+ 0, NULL, 0, user_sdma_txreq_cb);
+ if (ret)
+ return ret;
+ ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr));
+ if (ret)
+ sdma_txclean(pq->dd, &tx->txreq);
+ return ret;
+}
+
+static int user_sdma_txadd(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct user_sdma_iovec *iovec, u32 datalen,
+ u32 *queued_ptr, u32 *data_sent_ptr,
+ u64 *iov_offset_ptr)
+{
+ int ret;
+ unsigned int pageidx, len;
+ unsigned long base, offset;
+ u64 iov_offset = *iov_offset_ptr;
+ u32 queued = *queued_ptr, data_sent = *data_sent_ptr;
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+
+ base = (unsigned long)iovec->iov.iov_base;
+ offset = offset_in_page(base + iovec->offset + iov_offset);
+ pageidx = (((iovec->offset + iov_offset + base) - (base & PAGE_MASK)) >>
+ PAGE_SHIFT);
+ len = offset + req->info.fragsize > PAGE_SIZE ?
+ PAGE_SIZE - offset : req->info.fragsize;
+ len = min((datalen - queued), len);
+ ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx],
+ offset, len);
+ if (ret) {
+ SDMA_DBG(req, "SDMA txreq add page failed %d\n", ret);
+ return ret;
+ }
+ iov_offset += len;
+ queued += len;
+ data_sent += len;
+ if (unlikely(queued < datalen && pageidx == iovec->npages &&
+ req->iov_idx < req->data_iovs - 1)) {
+ iovec->offset += iov_offset;
+ iovec = &req->iovs[++req->iov_idx];
+ iov_offset = 0;
+ }
+
+ *queued_ptr = queued;
+ *data_sent_ptr = data_sent;
+ *iov_offset_ptr = iov_offset;
+ return ret;
+}
+
static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
{
int ret = 0, count;
@@ -898,10 +774,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
pq = req->pq;
/* If tx completion has reported an error, we are done. */
- if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
- set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
+ if (READ_ONCE(req->has_error))
return -EFAULT;
- }
/*
* Check if we might have sent the entire request already
@@ -924,10 +798,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
* with errors. If so, we are not going to process any
* more packets from this request.
*/
- if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) {
- set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
+ if (READ_ONCE(req->has_error))
return -EFAULT;
- }
tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
if (!tx)
@@ -984,39 +856,9 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
if (req->ahg_idx >= 0) {
if (!req->seqnum) {
- u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
- u32 lrhlen = get_lrh_len(req->hdr,
- pad_len(datalen));
- /*
- * Copy the request header into the tx header
- * because the HW needs a cacheline-aligned
- * address.
- * This copy can be optimized out if the hdr
- * member of user_sdma_request were also
- * cacheline aligned.
- */
- memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
- if (PBC2LRH(pbclen) != lrhlen) {
- pbclen = (pbclen & 0xf000) |
- LRH2PBC(lrhlen);
- tx->hdr.pbc[0] = cpu_to_le16(pbclen);
- }
- ret = check_header_template(req, &tx->hdr,
- lrhlen, datalen);
+ ret = user_sdma_txadd_ahg(req, tx, datalen);
if (ret)
goto free_tx;
- ret = sdma_txinit_ahg(&tx->txreq,
- SDMA_TXREQ_F_AHG_COPY,
- sizeof(tx->hdr) + datalen,
- req->ahg_idx, 0, NULL, 0,
- user_sdma_txreq_cb);
- if (ret)
- goto free_tx;
- ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq,
- &tx->hdr,
- sizeof(tx->hdr));
- if (ret)
- goto free_txreq;
} else {
int changes;
@@ -1024,11 +866,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
datalen);
if (changes < 0)
goto free_tx;
- sdma_txinit_ahg(&tx->txreq,
- SDMA_TXREQ_F_USE_AHG,
- datalen, req->ahg_idx, changes,
- req->ahg, sizeof(req->hdr),
- user_sdma_txreq_cb);
}
} else {
ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
@@ -1052,35 +889,10 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
*/
while (queued < datalen &&
(req->sent + data_sent) < req->data_len) {
- unsigned long base, offset;
- unsigned pageidx, len;
-
- base = (unsigned long)iovec->iov.iov_base;
- offset = offset_in_page(base + iovec->offset +
- iov_offset);
- pageidx = (((iovec->offset + iov_offset +
- base) - (base & PAGE_MASK)) >> PAGE_SHIFT);
- len = offset + req->info.fragsize > PAGE_SIZE ?
- PAGE_SIZE - offset : req->info.fragsize;
- len = min((datalen - queued), len);
- ret = sdma_txadd_page(pq->dd, &tx->txreq,
- iovec->pages[pageidx],
- offset, len);
- if (ret) {
- SDMA_DBG(req, "SDMA txreq add page failed %d\n",
- ret);
+ ret = user_sdma_txadd(req, tx, iovec, datalen,
+ &queued, &data_sent, &iov_offset);
+ if (ret)
goto free_txreq;
- }
- iov_offset += len;
- queued += len;
- data_sent += len;
- if (unlikely(queued < datalen &&
- pageidx == iovec->npages &&
- req->iov_idx < req->data_iovs - 1)) {
- iovec->offset += iov_offset;
- iovec = &req->iovs[++req->iov_idx];
- iov_offset = 0;
- }
}
/*
* The txreq was submitted successfully so we can update
@@ -1105,7 +917,7 @@ dosend:
ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
req->seqsubmitted += count;
if (req->seqsubmitted == req->info.npkts) {
- set_bit(SDMA_REQ_SEND_DONE, &req->flags);
+ WRITE_ONCE(req->done, 1);
/*
* The txreq has already been submitted to the HW queue
* so we can free the AHG entry now. Corruption will not
@@ -1124,19 +936,6 @@ free_tx:
return ret;
}
-/*
- * How many pages in this iovec element?
- */
-static inline int num_user_pages(const struct iovec *iov)
-{
- const unsigned long addr = (unsigned long)iov->iov_base;
- const unsigned long len = iov->iov_len;
- const unsigned long spage = addr & PAGE_MASK;
- const unsigned long epage = (addr + len - 1) & PAGE_MASK;
-
- return 1 + ((epage - spage) >> PAGE_SHIFT);
-}
-
static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
{
struct evict_data evict_data;
@@ -1147,22 +946,82 @@ static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
return evict_data.cleared;
}
+static int pin_sdma_pages(struct user_sdma_request *req,
+ struct user_sdma_iovec *iovec,
+ struct sdma_mmu_node *node,
+ int npages)
+{
+ int pinned, cleared;
+ struct page **pages;
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+
+ pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ SDMA_DBG(req, "Failed page array alloc");
+ return -ENOMEM;
+ }
+ memcpy(pages, node->pages, node->npages * sizeof(*pages));
+
+ npages -= node->npages;
+retry:
+ if (!hfi1_can_pin_pages(pq->dd, pq->mm,
+ atomic_read(&pq->n_locked), npages)) {
+ cleared = sdma_cache_evict(pq, npages);
+ if (cleared >= npages)
+ goto retry;
+ }
+ pinned = hfi1_acquire_user_pages(pq->mm,
+ ((unsigned long)iovec->iov.iov_base +
+ (node->npages * PAGE_SIZE)), npages, 0,
+ pages + node->npages);
+ if (pinned < 0) {
+ kfree(pages);
+ return pinned;
+ }
+ if (pinned != npages) {
+ unpin_vector_pages(pq->mm, pages, node->npages, pinned);
+ return -EFAULT;
+ }
+ kfree(node->pages);
+ node->rb.len = iovec->iov.iov_len;
+ node->pages = pages;
+ atomic_add(pinned, &pq->n_locked);
+ return pinned;
+}
+
+static void unpin_sdma_pages(struct sdma_mmu_node *node)
+{
+ if (node->npages) {
+ unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
+ atomic_sub(node->npages, &node->pq->n_locked);
+ }
+}
+
static int pin_vector_pages(struct user_sdma_request *req,
struct user_sdma_iovec *iovec)
{
- int ret = 0, pinned, npages, cleared;
- struct page **pages;
+ int ret = 0, pinned, npages;
struct hfi1_user_sdma_pkt_q *pq = req->pq;
struct sdma_mmu_node *node = NULL;
struct mmu_rb_node *rb_node;
-
- rb_node = hfi1_mmu_rb_extract(pq->handler,
- (unsigned long)iovec->iov.iov_base,
- iovec->iov.iov_len);
- if (rb_node)
+ struct iovec *iov;
+ bool extracted;
+
+ extracted =
+ hfi1_mmu_rb_remove_unless_exact(pq->handler,
+ (unsigned long)
+ iovec->iov.iov_base,
+ iovec->iov.iov_len, &rb_node);
+ if (rb_node) {
node = container_of(rb_node, struct sdma_mmu_node, rb);
- else
- rb_node = NULL;
+ if (!extracted) {
+ atomic_inc(&node->refcount);
+ iovec->pages = node->pages;
+ iovec->npages = node->npages;
+ iovec->node = node;
+ return 0;
+ }
+ }
if (!node) {
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -1174,46 +1033,16 @@ static int pin_vector_pages(struct user_sdma_request *req,
atomic_set(&node->refcount, 0);
}
- npages = num_user_pages(&iovec->iov);
+ iov = &iovec->iov;
+ npages = num_user_pages((unsigned long)iov->iov_base, iov->iov_len);
if (node->npages < npages) {
- pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- SDMA_DBG(req, "Failed page array alloc");
- ret = -ENOMEM;
- goto bail;
- }
- memcpy(pages, node->pages, node->npages * sizeof(*pages));
-
- npages -= node->npages;
-
-retry:
- if (!hfi1_can_pin_pages(pq->dd, pq->mm,
- atomic_read(&pq->n_locked), npages)) {
- cleared = sdma_cache_evict(pq, npages);
- if (cleared >= npages)
- goto retry;
- }
- pinned = hfi1_acquire_user_pages(pq->mm,
- ((unsigned long)iovec->iov.iov_base +
- (node->npages * PAGE_SIZE)), npages, 0,
- pages + node->npages);
+ pinned = pin_sdma_pages(req, iovec, node, npages);
if (pinned < 0) {
- kfree(pages);
ret = pinned;
goto bail;
}
- if (pinned != npages) {
- unpin_vector_pages(pq->mm, pages, node->npages,
- pinned);
- ret = -EFAULT;
- goto bail;
- }
- kfree(node->pages);
- node->rb.len = iovec->iov.iov_len;
- node->pages = pages;
node->npages += pinned;
npages = node->npages;
- atomic_add(pinned, &pq->n_locked);
}
iovec->pages = node->pages;
iovec->npages = npages;
@@ -1221,14 +1050,12 @@ retry:
ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
if (ret) {
- atomic_sub(node->npages, &pq->n_locked);
iovec->node = NULL;
goto bail;
}
return 0;
bail:
- if (rb_node)
- unpin_vector_pages(pq->mm, node->pages, 0, node->npages);
+ unpin_sdma_pages(node);
kfree(node);
return ret;
}
@@ -1408,9 +1235,10 @@ static int set_txreq_header(struct user_sdma_request *req,
* Set the KDETH.OFFSET and KDETH.OM based on size of
* transfer.
*/
- SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
- req->tidoffset, req->tidoffset >> omfactor,
- omfactor != KDETH_OM_SMALL_SHIFT);
+ trace_hfi1_sdma_user_tid_info(
+ pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx,
+ req->tidoffset, req->tidoffset >> omfactor,
+ omfactor != KDETH_OM_SMALL_SHIFT);
KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
req->tidoffset >> omfactor);
KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
@@ -1423,21 +1251,22 @@ done:
}
static int set_txreq_header_ahg(struct user_sdma_request *req,
- struct user_sdma_txreq *tx, u32 len)
+ struct user_sdma_txreq *tx, u32 datalen)
{
+ u32 ahg[AHG_KDETH_ARRAY_SIZE];
int diff = 0;
u8 omfactor; /* KDETH.OM */
struct hfi1_user_sdma_pkt_q *pq = req->pq;
struct hfi1_pkt_header *hdr = &req->hdr;
u16 pbclen = le16_to_cpu(hdr->pbc[0]);
- u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(len));
+ u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
if (PBC2LRH(pbclen) != lrhlen) {
/* PBC.PbcLengthDWs */
- AHG_HEADER_SET(req->ahg, diff, 0, 0, 12,
+ AHG_HEADER_SET(ahg, diff, 0, 0, 12,
cpu_to_le16(LRH2PBC(lrhlen)));
/* LRH.PktLen (we need the full 16 bits due to byte swap) */
- AHG_HEADER_SET(req->ahg, diff, 3, 0, 16,
+ AHG_HEADER_SET(ahg, diff, 3, 0, 16,
cpu_to_be16(lrhlen >> 2));
}
@@ -1449,13 +1278,12 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
(HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
val32 |= 1UL << 31;
- AHG_HEADER_SET(req->ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
- AHG_HEADER_SET(req->ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
+ AHG_HEADER_SET(ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
+ AHG_HEADER_SET(ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
/* KDETH.Offset */
- AHG_HEADER_SET(req->ahg, diff, 15, 0, 16,
+ AHG_HEADER_SET(ahg, diff, 15, 0, 16,
cpu_to_le16(req->koffset & 0xffff));
- AHG_HEADER_SET(req->ahg, diff, 15, 16, 16,
- cpu_to_le16(req->koffset >> 16));
+ AHG_HEADER_SET(ahg, diff, 15, 16, 16, cpu_to_le16(req->koffset >> 16));
if (req_opcode(req->info.ctrl) == EXPECTED) {
__le16 val;
@@ -1473,9 +1301,8 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
* we have to check again.
*/
if (++req->tididx > req->n_tids - 1 ||
- !req->tids[req->tididx]) {
+ !req->tids[req->tididx])
return -EINVAL;
- }
tidval = req->tids[req->tididx];
}
omfactor = ((EXP_TID_GET(tidval, LEN) *
@@ -1483,7 +1310,7 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
KDETH_OM_SMALL_SHIFT;
/* KDETH.OM and KDETH.OFFSET (TID) */
- AHG_HEADER_SET(req->ahg, diff, 7, 0, 16,
+ AHG_HEADER_SET(ahg, diff, 7, 0, 16,
((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
((req->tidoffset >> omfactor)
& 0x7fff)));
@@ -1503,12 +1330,20 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
AHG_KDETH_INTR_SHIFT));
}
- AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
+ AHG_HEADER_SET(ahg, diff, 7, 16, 14, val);
}
+ if (diff < 0)
+ return diff;
trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
req->info.comp_idx, req->sde->this_idx,
- req->ahg_idx, req->ahg, diff, tidval);
+ req->ahg_idx, ahg, diff, tidval);
+ sdma_txinit_ahg(&tx->txreq,
+ SDMA_TXREQ_F_USE_AHG,
+ datalen, req->ahg_idx, diff,
+ ahg, sizeof(req->hdr),
+ user_sdma_txreq_cb);
+
return diff;
}
@@ -1537,7 +1372,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
if (status != SDMA_TXREQ_S_OK) {
SDMA_DBG(req, "SDMA completion with error %d",
status);
- set_bit(SDMA_REQ_HAS_ERROR, &req->flags);
+ WRITE_ONCE(req->has_error, 1);
}
req->seqcomp = tx->seqnum;
@@ -1556,8 +1391,8 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
if (status != SDMA_TXREQ_S_OK)
req->status = status;
if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
- (test_bit(SDMA_REQ_SEND_DONE, &req->flags) ||
- test_bit(SDMA_REQ_DONE_ERROR, &req->flags))) {
+ (READ_ONCE(req->done) ||
+ READ_ONCE(req->has_error))) {
user_sdma_free_request(req, false);
pq_update(pq);
set_comp_state(pq, cq, idx, ERROR, req->status);
@@ -1611,8 +1446,6 @@ static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
u16 idx, enum hfi1_sdma_comp_state state,
int ret)
{
- hfi1_cdbg(SDMA, "[%u:%u:%u:%u] Setting completion status %u %d",
- pq->dd->unit, pq->ctxt, pq->subctxt, idx, state, ret);
if (state == ERROR)
cq->comps[idx].errcode = -ret;
smp_wmb(); /* make sure errcode is visible first */
@@ -1667,10 +1500,7 @@ static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
struct sdma_mmu_node *node =
container_of(mnode, struct sdma_mmu_node, rb);
- atomic_sub(node->npages, &node->pq->n_locked);
-
- unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
-
+ unpin_sdma_pages(node);
kfree(node);
}
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index e5b10aefe212..9b8bb5634c0d 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -53,11 +53,68 @@
#include "iowait.h"
#include "user_exp_rcv.h"
+/* The maximum number of Data io vectors per message/request */
+#define MAX_VECTORS_PER_REQ 8
+/*
+ * Maximum number of packet to send from each message/request
+ * before moving to the next one.
+ */
+#define MAX_PKTS_PER_QUEUE 16
+
+#define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT))
+
+#define req_opcode(x) \
+ (((x) >> HFI1_SDMA_REQ_OPCODE_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
+#define req_version(x) \
+ (((x) >> HFI1_SDMA_REQ_VERSION_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
+#define req_iovcnt(x) \
+ (((x) >> HFI1_SDMA_REQ_IOVCNT_SHIFT) & HFI1_SDMA_REQ_IOVCNT_MASK)
+
+/* Number of BTH.PSN bits used for sequence number in expected rcvs */
+#define BTH_SEQ_MASK 0x7ffull
+
+#define AHG_KDETH_INTR_SHIFT 12
+#define AHG_KDETH_SH_SHIFT 13
+#define AHG_KDETH_ARRAY_SIZE 9
+
+#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
+#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
+
+#define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \
+ do { \
+ if ((idx) < ARRAY_SIZE((arr))) \
+ (arr)[(idx++)] = sdma_build_ahg_descriptor( \
+ (__force u16)(value), (dw), (bit), \
+ (width)); \
+ else \
+ return -ERANGE; \
+ } while (0)
+
+/* Tx request flag bits */
+#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
+#define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
+
+#define SDMA_PKT_Q_INACTIVE BIT(0)
+#define SDMA_PKT_Q_ACTIVE BIT(1)
+#define SDMA_PKT_Q_DEFERRED BIT(2)
+
+/*
+ * Maximum retry attempts to submit a TX request
+ * before putting the process to sleep.
+ */
+#define MAX_DEFER_RETRY_COUNT 1
+
+#define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
+
+#define SDMA_DBG(req, fmt, ...) \
+ hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
+ (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
+ ##__VA_ARGS__)
+
extern uint extended_psn;
struct hfi1_user_sdma_pkt_q {
- struct list_head list;
- unsigned ctxt;
+ u16 ctxt;
u16 subctxt;
u16 n_max_reqs;
atomic_t n_reqs;
@@ -80,9 +137,115 @@ struct hfi1_user_sdma_comp_q {
struct hfi1_sdma_comp_entry *comps;
};
+struct sdma_mmu_node {
+ struct mmu_rb_node rb;
+ struct hfi1_user_sdma_pkt_q *pq;
+ atomic_t refcount;
+ struct page **pages;
+ unsigned int npages;
+};
+
+struct user_sdma_iovec {
+ struct list_head list;
+ struct iovec iov;
+ /* number of pages in this vector */
+ unsigned int npages;
+ /* array of pinned pages for this vector */
+ struct page **pages;
+ /*
+ * offset into the virtual address space of the vector at
+ * which we last left off.
+ */
+ u64 offset;
+ struct sdma_mmu_node *node;
+};
+
+/* evict operation argument */
+struct evict_data {
+ u32 cleared; /* count evicted so far */
+ u32 target; /* target count to evict */
+};
+
+struct user_sdma_request {
+ /* This is the original header from user space */
+ struct hfi1_pkt_header hdr;
+
+ /* Read mostly fields */
+ struct hfi1_user_sdma_pkt_q *pq ____cacheline_aligned_in_smp;
+ struct hfi1_user_sdma_comp_q *cq;
+ /*
+ * Pointer to the SDMA engine for this request.
+ * Since different request could be on different VLs,
+ * each request will need it's own engine pointer.
+ */
+ struct sdma_engine *sde;
+ struct sdma_req_info info;
+ /* TID array values copied from the tid_iov vector */
+ u32 *tids;
+ /* total length of the data in the request */
+ u32 data_len;
+ /* number of elements copied to the tids array */
+ u16 n_tids;
+ /*
+ * We copy the iovs for this request (based on
+ * info.iovcnt). These are only the data vectors
+ */
+ u8 data_iovs;
+ s8 ahg_idx;
+
+ /* Writeable fields shared with interrupt */
+ u64 seqcomp ____cacheline_aligned_in_smp;
+ u64 seqsubmitted;
+ /* status of the last txreq completed */
+ int status;
+
+ /* Send side fields */
+ struct list_head txps ____cacheline_aligned_in_smp;
+ u64 seqnum;
+ /*
+ * KDETH.OFFSET (TID) field
+ * The offset can cover multiple packets, depending on the
+ * size of the TID entry.
+ */
+ u32 tidoffset;
+ /*
+ * KDETH.Offset (Eager) field
+ * We need to remember the initial value so the headers
+ * can be updated properly.
+ */
+ u32 koffset;
+ u32 sent;
+ /* TID index copied from the tid_iov vector */
+ u16 tididx;
+ /* progress index moving along the iovs array */
+ u8 iov_idx;
+ u8 done;
+ u8 has_error;
+
+ struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
+} ____cacheline_aligned_in_smp;
+
+/*
+ * A single txreq could span up to 3 physical pages when the MTU
+ * is sufficiently large (> 4K). Each of the IOV pointers also
+ * needs it's own set of flags so the vector has been handled
+ * independently of each other.
+ */
+struct user_sdma_txreq {
+ /* Packet header for the txreq */
+ struct hfi1_pkt_header hdr;
+ struct sdma_txreq txreq;
+ struct list_head list;
+ struct user_sdma_request *req;
+ u16 flags;
+ unsigned int busycount;
+ u64 seqnum;
+};
+
int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
struct hfi1_filedata *fd);
-int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd);
+int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
+ struct hfi1_ctxtdata *uctxt);
int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
struct iovec *iovec, unsigned long dim,
unsigned long *count);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 2d19f9bb434d..e232f3c608b4 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -53,6 +53,7 @@
#include <linux/rculist.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
+#include <rdma/opa_addr.h>
#include "hfi.h"
#include "common.h"
@@ -508,13 +509,14 @@ again:
/*
* Make sure the QP is ready and able to accept the given opcode.
*/
-static inline opcode_handler qp_ok(int opcode, struct hfi1_packet *packet)
+static inline opcode_handler qp_ok(struct hfi1_packet *packet)
{
if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
return NULL;
- if (((opcode & RVT_OPCODE_QP_MASK) == packet->qp->allowed_ops) ||
- (opcode == IB_OPCODE_CNP))
- return opcode_handler_tbl[opcode];
+ if (((packet->opcode & RVT_OPCODE_QP_MASK) ==
+ packet->qp->allowed_ops) ||
+ (packet->opcode == IB_OPCODE_CNP))
+ return opcode_handler_tbl[packet->opcode];
return NULL;
}
@@ -548,69 +550,54 @@ static u64 hfi1_fault_tx(struct rvt_qp *qp, u8 opcode, u64 pbc)
return pbc;
}
-/**
- * hfi1_ib_rcv - process an incoming packet
- * @packet: data packet information
- *
- * This is called to process an incoming packet at interrupt level.
- *
- * Tlen is the length of the header + data + CRC in bytes.
- */
-void hfi1_ib_rcv(struct hfi1_packet *packet)
+static int hfi1_do_pkey_check(struct hfi1_packet *packet)
{
struct hfi1_ctxtdata *rcd = packet->rcd;
- struct ib_header *hdr = packet->hdr;
- u32 tlen = packet->tlen;
+ struct hfi1_pportdata *ppd = rcd->ppd;
+ struct hfi1_16b_header *hdr = packet->hdr;
+ u16 pkey;
+
+ /* Pkey check needed only for bypass packets */
+ if (packet->etype != RHF_RCV_TYPE_BYPASS)
+ return 0;
+
+ /* Perform pkey check */
+ pkey = hfi1_16B_get_pkey(hdr);
+ return ingress_pkey_check(ppd, pkey, packet->sc,
+ packet->qp->s_pkey_index,
+ packet->slid, true);
+}
+
+static inline void hfi1_handle_packet(struct hfi1_packet *packet,
+ bool is_mcast)
+{
+ u32 qp_num;
+ struct hfi1_ctxtdata *rcd = packet->rcd;
struct hfi1_pportdata *ppd = rcd->ppd;
struct hfi1_ibport *ibp = rcd_to_iport(rcd);
struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
opcode_handler packet_handler;
unsigned long flags;
- u32 qp_num;
- int lnh;
- u8 opcode;
- u16 lid;
-
- /* Check for GRH */
- lnh = ib_get_lnh(hdr);
- if (lnh == HFI1_LRH_BTH) {
- packet->ohdr = &hdr->u.oth;
- } else if (lnh == HFI1_LRH_GRH) {
- u32 vtf;
-
- packet->ohdr = &hdr->u.l.oth;
- if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
- goto drop;
- vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
- if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
- goto drop;
- packet->rcv_flags |= HFI1_HAS_GRH;
- } else {
- goto drop;
- }
-
- trace_input_ibhdr(rcd->dd, hdr);
- opcode = ib_bth_get_opcode(packet->ohdr);
- inc_opstats(tlen, &rcd->opstats->stats[opcode]);
+ inc_opstats(packet->tlen, &rcd->opstats->stats[packet->opcode]);
- /* Get the destination QP number. */
- qp_num = be32_to_cpu(packet->ohdr->bth[1]) & RVT_QPN_MASK;
- lid = ib_get_dlid(hdr);
- if (unlikely((lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
- (lid != be16_to_cpu(IB_LID_PERMISSIVE)))) {
+ if (unlikely(is_mcast)) {
struct rvt_mcast *mcast;
struct rvt_mcast_qp *p;
- if (lnh != HFI1_LRH_GRH)
+ if (!packet->grh)
goto drop;
- mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid, lid);
+ mcast = rvt_mcast_find(&ibp->rvp,
+ &packet->grh->dgid,
+ opa_get_lid(packet->dlid, 9B));
if (!mcast)
goto drop;
list_for_each_entry_rcu(p, &mcast->qp_list, list) {
packet->qp = p->qp;
+ if (hfi1_do_pkey_check(packet))
+ goto drop;
spin_lock_irqsave(&packet->qp->r_lock, flags);
- packet_handler = qp_ok(opcode, packet);
+ packet_handler = qp_ok(packet);
if (likely(packet_handler))
packet_handler(packet);
else
@@ -624,19 +611,22 @@ void hfi1_ib_rcv(struct hfi1_packet *packet)
if (atomic_dec_return(&mcast->refcount) <= 1)
wake_up(&mcast->wait);
} else {
+ /* Get the destination QP number. */
+ qp_num = ib_bth_get_qpn(packet->ohdr);
rcu_read_lock();
packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
- if (!packet->qp) {
- rcu_read_unlock();
- goto drop;
- }
- if (unlikely(hfi1_dbg_fault_opcode(packet->qp, opcode,
- true))) {
- rcu_read_unlock();
- goto drop;
- }
+ if (!packet->qp)
+ goto unlock_drop;
+
+ if (hfi1_do_pkey_check(packet))
+ goto unlock_drop;
+
+ if (unlikely(hfi1_dbg_fault_opcode(packet->qp, packet->opcode,
+ true)))
+ goto unlock_drop;
+
spin_lock_irqsave(&packet->qp->r_lock, flags);
- packet_handler = qp_ok(opcode, packet);
+ packet_handler = qp_ok(packet);
if (likely(packet_handler))
packet_handler(packet);
else
@@ -645,11 +635,34 @@ void hfi1_ib_rcv(struct hfi1_packet *packet)
rcu_read_unlock();
}
return;
-
+unlock_drop:
+ rcu_read_unlock();
drop:
ibp->rvp.n_pkt_drops++;
}
+/**
+ * hfi1_ib_rcv - process an incoming packet
+ * @packet: data packet information
+ *
+ * This is called to process an incoming packet at interrupt level.
+ */
+void hfi1_ib_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+
+ trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
+ hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid));
+}
+
+void hfi1_16B_rcv(struct hfi1_packet *packet)
+{
+ struct hfi1_ctxtdata *rcd = packet->rcd;
+
+ trace_input_ibhdr(rcd->dd, packet, false);
+ hfi1_handle_packet(packet, hfi1_check_mcast(packet->dlid));
+}
+
/*
* This is called from a timer to check for QPs
* which need kernel memory in order to send a packet.
@@ -696,7 +709,7 @@ static void verbs_sdma_complete(
if (tx->wqe) {
hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
} else if (qp->ibqp.qp_type == IB_QPT_RC) {
- struct ib_header *hdr;
+ struct hfi1_opa_header *hdr;
hdr = &tx->phdr.hdr;
hfi1_rc_send_complete(qp, hdr);
@@ -799,12 +812,27 @@ static int build_verbs_tx_desc(
int ret = 0;
struct hfi1_sdma_header *phdr = &tx->phdr;
u16 hdrbytes = tx->hdr_dwords << 2;
+ u32 *hdr;
+ u8 extra_bytes = 0;
+ static char trail_buf[12]; /* CRC = 4, LT = 1, Pad = 0 to 7 bytes */
+ if (tx->phdr.hdr.hdr_type) {
+ /*
+ * hdrbytes accounts for PBC. Need to subtract 8 bytes
+ * before calculating padding.
+ */
+ extra_bytes = hfi1_get_16b_padding(hdrbytes - 8, length) +
+ (SIZE_OF_CRC << 2) + SIZE_OF_LT;
+ hdr = (u32 *)&phdr->hdr.opah;
+ } else {
+ hdr = (u32 *)&phdr->hdr.ibh;
+ }
if (!ahg_info->ahgcount) {
ret = sdma_txinit_ahg(
&tx->txreq,
ahg_info->tx_flags,
- hdrbytes + length,
+ hdrbytes + length +
+ extra_bytes,
ahg_info->ahgidx,
0,
NULL,
@@ -834,8 +862,17 @@ static int build_verbs_tx_desc(
goto bail_txadd;
}
/* add the ulp payload - if any. tx->ss can be NULL for acks */
- if (tx->ss)
+ if (tx->ss) {
ret = build_verbs_ulp_payload(sde, length, tx);
+ if (ret)
+ goto bail_txadd;
+ }
+
+ /* add icrc, lt byte, and padding to flit */
+ if (extra_bytes != 0)
+ ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
+ trail_buf, extra_bytes);
+
bail_txadd:
return ret;
}
@@ -847,26 +884,42 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
struct hfi1_ahg_info *ahg_info = priv->s_ahg;
u32 hdrwords = qp->s_hdrwords;
u32 len = ps->s_txreq->s_cur_size;
- u32 plen = hdrwords + ((len + 3) >> 2) + 2; /* includes pbc */
+ u32 plen;
struct hfi1_ibdev *dev = ps->dev;
struct hfi1_pportdata *ppd = ps->ppd;
struct verbs_txreq *tx;
u8 sc5 = priv->s_sc;
-
int ret;
+ u32 dwords;
+ bool bypass = false;
+
+ if (ps->s_txreq->phdr.hdr.hdr_type) {
+ u8 extra_bytes = hfi1_get_16b_padding((hdrwords << 2), len);
+
+ dwords = (len + extra_bytes + (SIZE_OF_CRC << 2) +
+ SIZE_OF_LT) >> 2;
+ bypass = true;
+ } else {
+ dwords = (len + 3) >> 2;
+ }
+ plen = hdrwords + dwords + 2;
tx = ps->s_txreq;
if (!sdma_txreq_built(&tx->txreq)) {
if (likely(pbc == 0)) {
u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
- u8 opcode = get_opcode(&tx->phdr.hdr);
/* No vl15 here */
- /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
- pbc |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
+ /* set PBC_DC_INFO bit (aka SC[4]) in pbc */
+ if (ps->s_txreq->phdr.hdr.hdr_type)
+ pbc |= PBC_PACKET_BYPASS |
+ PBC_INSERT_BYPASS_ICRC;
+ else
+ pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
- if (unlikely(hfi1_dbg_fault_opcode(qp, opcode, false)))
- pbc = hfi1_fault_tx(qp, opcode, pbc);
+ if (unlikely(hfi1_dbg_fault_opcode(qp, ps->opcode,
+ false)))
+ pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
pbc = create_pbc(ppd,
pbc,
qp->srate_mbps,
@@ -878,14 +931,15 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
if (unlikely(ret))
goto bail_build;
}
- ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq);
+ ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq,
+ ps->pkts_sent);
if (unlikely(ret < 0)) {
if (ret == -ECOMM)
goto bail_ecomm;
return ret;
}
trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
- &ps->s_txreq->phdr.hdr);
+ &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
return ret;
bail_ecomm:
@@ -935,7 +989,8 @@ static int pio_wait(struct rvt_qp *qp,
dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN);
qp->s_flags |= flag;
was_empty = list_empty(&sc->piowait);
- list_add_tail(&priv->s_iowait.list, &sc->piowait);
+ iowait_queue(ps->pkts_sent, &priv->s_iowait,
+ &sc->piowait);
priv->s_iowait.lock = &dev->iowait_lock;
trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
rvt_get_qp(qp);
@@ -967,10 +1022,10 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u32 hdrwords = qp->s_hdrwords;
struct rvt_sge_state *ss = ps->s_txreq->ss;
u32 len = ps->s_txreq->s_cur_size;
- u32 dwords = (len + 3) >> 2;
- u32 plen = hdrwords + dwords + 2; /* includes pbc */
+ u32 dwords;
+ u32 plen;
struct hfi1_pportdata *ppd = ps->ppd;
- u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr;
+ u32 *hdr;
u8 sc5;
unsigned long flags = 0;
struct send_context *sc;
@@ -978,6 +1033,23 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
int wc_status = IB_WC_SUCCESS;
int ret = 0;
pio_release_cb cb = NULL;
+ u32 lrh0_16b;
+ bool bypass = false;
+ u8 extra_bytes = 0;
+
+ if (ps->s_txreq->phdr.hdr.hdr_type) {
+ u8 pad_size = hfi1_get_16b_padding((hdrwords << 2), len);
+
+ extra_bytes = pad_size + (SIZE_OF_CRC << 2) + SIZE_OF_LT;
+ dwords = (len + extra_bytes) >> 2;
+ hdr = (u32 *)&ps->s_txreq->phdr.hdr.opah;
+ lrh0_16b = ps->s_txreq->phdr.hdr.opah.lrh[0];
+ bypass = true;
+ } else {
+ dwords = (len + 3) >> 2;
+ hdr = (u32 *)&ps->s_txreq->phdr.hdr.ibh;
+ }
+ plen = hdrwords + dwords + 2;
/* only RC/UC use complete */
switch (qp->ibqp.qp_type) {
@@ -995,13 +1067,14 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
if (likely(pbc == 0)) {
u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
- struct verbs_txreq *tx = ps->s_txreq;
- u8 opcode = get_opcode(&tx->phdr.hdr);
- /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
- pbc |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
- if (unlikely(hfi1_dbg_fault_opcode(qp, opcode, false)))
- pbc = hfi1_fault_tx(qp, opcode, pbc);
+ /* set PBC_DC_INFO bit (aka SC[4]) in pbc */
+ if (ps->s_txreq->phdr.hdr.hdr_type)
+ pbc |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
+ else
+ pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
+ if (unlikely(hfi1_dbg_fault_opcode(qp, ps->opcode, false)))
+ pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
}
if (cb)
@@ -1038,11 +1111,12 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
}
}
- if (len == 0) {
+ if (dwords == 0) {
pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords);
} else {
+ seg_pio_copy_start(pbuf, pbc,
+ hdr, hdrwords * 4);
if (ss) {
- seg_pio_copy_start(pbuf, pbc, hdr, hdrwords * 4);
while (len) {
void *addr = ss->sge.vaddr;
u32 slen = ss->sge.length;
@@ -1053,12 +1127,24 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
seg_pio_copy_mid(pbuf, addr, slen);
len -= slen;
}
- seg_pio_copy_end(pbuf);
}
+ /*
+ * Bypass packet will need to copy additional
+ * bytes to accommodate for CRC and LT bytes
+ */
+ if (extra_bytes) {
+ u8 *empty_buf;
+
+ empty_buf = kcalloc(extra_bytes, sizeof(u8),
+ GFP_KERNEL);
+ seg_pio_copy_mid(pbuf, empty_buf, extra_bytes);
+ kfree(empty_buf);
+ }
+ seg_pio_copy_end(pbuf);
}
trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
- &ps->s_txreq->phdr.hdr);
+ &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
pio_bail:
if (qp->s_wqe) {
@@ -1104,10 +1190,10 @@ static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
/**
* egress_pkey_check - check P_KEY of a packet
- * @ppd: Physical IB port data
- * @lrh: Local route header
- * @bth: Base transport header
- * @sc5: SC for packet
+ * @ppd: Physical IB port data
+ * @slid: SLID for packet
+ * @bkey: PKEY for header
+ * @sc5: SC for packet
* @s_pkey_index: It will be used for look up optimization for kernel contexts
* only. If it is negative value, then it means user contexts is calling this
* function.
@@ -1116,19 +1202,16 @@ static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
*
* Return: 0 on success, otherwise, 1
*/
-int egress_pkey_check(struct hfi1_pportdata *ppd, __be16 *lrh, __be32 *bth,
+int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
u8 sc5, int8_t s_pkey_index)
{
struct hfi1_devdata *dd;
int i;
- u16 pkey;
int is_user_ctxt_mechanism = (s_pkey_index < 0);
if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT))
return 0;
- pkey = (u16)be32_to_cpu(bth[0]);
-
/* If SC15, pkey[0:14] must be 0x7fff */
if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
goto bad;
@@ -1161,8 +1244,6 @@ bad:
dd = ppd->dd;
if (!(dd->err_info_xmit_constraint.status &
OPA_EI_STATUS_SMASK)) {
- u16 slid = be16_to_cpu(lrh[3]);
-
dd->err_info_xmit_constraint.status |=
OPA_EI_STATUS_SMASK;
dd->err_info_xmit_constraint.slid = slid;
@@ -1179,11 +1260,11 @@ bad:
* and size
*/
static inline send_routine get_send_routine(struct rvt_qp *qp,
- struct verbs_txreq *tx)
+ struct hfi1_pkt_state *ps)
{
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
struct hfi1_qp_priv *priv = qp->priv;
- struct ib_header *h = &tx->phdr.hdr;
+ struct verbs_txreq *tx = ps->s_txreq;
if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA)))
return dd->process_pio_send;
@@ -1195,11 +1276,9 @@ static inline send_routine get_send_routine(struct rvt_qp *qp,
break;
case IB_QPT_UC:
case IB_QPT_RC: {
- u8 op = get_opcode(h);
-
if (piothreshold &&
tx->s_cur_size <= min(piothreshold, qp->pmtu) &&
- (BIT(op & OPMASK) & pio_opmask[op >> 5]) &&
+ (BIT(ps->opcode & OPMASK) & pio_opmask[ps->opcode >> 5]) &&
iowait_sdma_pending(&priv->s_iowait) == 0 &&
!sdma_txreq_built(&tx->txreq))
return dd->process_pio_send;
@@ -1224,25 +1303,38 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
struct hfi1_qp_priv *priv = qp->priv;
struct ib_other_headers *ohdr;
- struct ib_header *hdr;
send_routine sr;
int ret;
- u8 lnh;
+ u16 pkey;
+ u32 slid;
- hdr = &ps->s_txreq->phdr.hdr;
/* locate the pkey within the headers */
- lnh = ib_get_lnh(hdr);
- if (lnh == HFI1_LRH_GRH)
- ohdr = &hdr->u.l.oth;
- else
- ohdr = &hdr->u.oth;
-
- sr = get_send_routine(qp, ps->s_txreq);
- ret = egress_pkey_check(dd->pport,
- hdr->lrh,
- ohdr->bth,
- priv->s_sc,
- qp->s_pkey_index);
+ if (ps->s_txreq->phdr.hdr.hdr_type) {
+ struct hfi1_16b_header *hdr = &ps->s_txreq->phdr.hdr.opah;
+ u8 l4 = hfi1_16B_get_l4(hdr);
+
+ if (l4 == OPA_16B_L4_IB_GLOBAL)
+ ohdr = &hdr->u.l.oth;
+ else
+ ohdr = &hdr->u.oth;
+ slid = hfi1_16B_get_slid(hdr);
+ pkey = hfi1_16B_get_pkey(hdr);
+ } else {
+ struct ib_header *hdr = &ps->s_txreq->phdr.hdr.ibh;
+ u8 lnh = ib_get_lnh(hdr);
+
+ if (lnh == HFI1_LRH_GRH)
+ ohdr = &hdr->u.l.oth;
+ else
+ ohdr = &hdr->u.oth;
+ slid = ib_get_slid(hdr);
+ pkey = ib_bth_get_pkey(ohdr);
+ }
+
+ ps->opcode = ib_bth_get_opcode(ohdr);
+ sr = get_send_routine(qp, ps);
+ ret = egress_pkey_check(dd->pport, slid, pkey,
+ priv->s_sc, qp->s_pkey_index);
if (unlikely(ret)) {
/*
* The value we are returning here does not get propagated to
@@ -1361,14 +1453,14 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num,
struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
- u16 lid = ppd->lid;
+ u32 lid = ppd->lid;
/* props being zeroed by the caller, avoid zeroing it here */
props->lid = lid ? lid : 0;
props->lmc = ppd->lmc;
/* OPA logical states match IB logical states */
props->state = driver_lstate(ppd);
- props->phys_state = hfi1_ibphys_portstate(ppd);
+ props->phys_state = driver_pstate(ppd);
props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
/* see rate_show() in ib core/sysfs.c */
@@ -1388,6 +1480,15 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num,
props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
mtu_to_enum(ppd->ibmtu, IB_MTU_2048);
+ /*
+ * sm_lid of 0xFFFF needs special handling so that it can
+ * be differentiated from a permissve LID of 0xFFFF.
+ * We set the grh_required flag here so the SA can program
+ * the DGID in the address handle appropriately
+ */
+ if (props->sm_lid == be16_to_cpu(IB_LID_PERMISSIVE))
+ props->grh_required = true;
+
return 0;
}
@@ -1473,6 +1574,10 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
struct hfi1_devdata *dd;
u8 sc5;
+ if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
+ !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
+ return -EINVAL;
+
/* test the mapping for validity */
ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
ppd = ppd_from_ibp(ibp);
@@ -1491,6 +1596,7 @@ static void hfi1_notify_new_ah(struct ib_device *ibdev,
struct hfi1_pportdata *ppd;
struct hfi1_devdata *dd;
u8 sc5;
+ struct rdma_ah_attr *attr = &ah->attr;
/*
* Do not trust reading anything from rvt_ah at this point as it is not
@@ -1500,33 +1606,14 @@ static void hfi1_notify_new_ah(struct ib_device *ibdev,
ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
ppd = ppd_from_ibp(ibp);
sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)];
+ hfi1_update_ah_attr(ibdev, attr);
+ hfi1_make_opa_lid(attr);
dd = dd_from_ppd(ppd);
ah->vl = sc_to_vlt(dd, sc5);
if (ah->vl < num_vls || ah->vl == 15)
ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu);
}
-struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid)
-{
- struct rdma_ah_attr attr;
- struct ib_ah *ah = ERR_PTR(-EINVAL);
- struct rvt_qp *qp0;
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_devdata *dd = dd_from_ppd(ppd);
- u8 port_num = ppd->port;
-
- memset(&attr, 0, sizeof(attr));
- attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
- rdma_ah_set_dlid(&attr, dlid);
- rdma_ah_set_port_num(&attr, ppd_from_ibp(ibp)->port);
- rcu_read_lock();
- qp0 = rcu_dereference(ibp->rvp.qp[0]);
- if (qp0)
- ah = rdma_create_ah(qp0->ibqp.pd, &attr);
- rcu_read_unlock();
- return ah;
-}
-
/**
* hfi1_get_npkeys - return the size of the PKEY table for context 0
* @dd: the hfi1_ib device
@@ -1547,13 +1634,22 @@ static void init_ibport(struct hfi1_pportdata *ppd)
ibp->sc_to_sl[i] = i;
}
+ for (i = 0; i < RVT_MAX_TRAP_LISTS ; i++)
+ INIT_LIST_HEAD(&ibp->rvp.trap_lists[i].list);
+ setup_timer(&ibp->rvp.trap_timer, hfi1_handle_trap_timer,
+ (unsigned long)ibp);
+
spin_lock_init(&ibp->rvp.lock);
/* Set the prefix to the default value (see ch. 4.1.1) */
ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
ibp->rvp.sm_lid = 0;
- /* Below should only set bits defined in OPA PortInfo.CapabilityMask */
+ /*
+ * Below should only set bits defined in OPA PortInfo.CapabilityMask
+ * and PortInfo.CapabilityMask3
+ */
ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
IB_PORT_CAP_MASK_NOTICE_SUP;
+ ibp->rvp.port_cap3_flags = OPA_CAP_MASK3_IsSharedSpaceSupported;
ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
@@ -1564,14 +1660,13 @@ static void init_ibport(struct hfi1_pportdata *ppd)
RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
}
-static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str,
- size_t str_len)
+static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str)
{
struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
struct hfi1_ibdev *dev = dev_from_rdi(rdi);
u32 ver = dd_from_dev(dev)->dc8051_ver;
- snprintf(str, str_len, "%u.%u.%u", dc8051_ver_maj(ver),
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u", dc8051_ver_maj(ver),
dc8051_ver_min(ver), dc8051_ver_patch(ver));
}
@@ -1816,7 +1911,8 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK;
- dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA;
+ dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA |
+ RDMA_CORE_CAP_OPA_AH;
dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE;
dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index cd635d0c1d3b..87d1285a3340 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -95,6 +95,7 @@ struct hfi1_packet;
#define HFI1_VENDOR_IPG cpu_to_be16(0xFFA0)
#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
+#define OPA_BTH_MIG_REQ BIT(31)
#define RC_OP(x) IB_OPCODE_RC_##x
#define UC_OP(x) IB_OPCODE_UC_##x
@@ -104,6 +105,25 @@ enum {
HFI1_HAS_GRH = (1 << 0),
};
+struct hfi1_16b_header {
+ u32 lrh[4];
+ union {
+ struct {
+ struct ib_grh grh;
+ struct ib_other_headers oth;
+ } l;
+ struct ib_other_headers oth;
+ } u;
+} __packed;
+
+struct hfi1_opa_header {
+ union {
+ struct ib_header ibh; /* 9B header */
+ struct hfi1_16b_header opah; /* 16B header */
+ };
+ u8 hdr_type; /* 9B or 16B */
+} __packed;
+
struct hfi1_ahg_info {
u32 ahgdesc[2];
u16 tx_flags;
@@ -113,7 +133,7 @@ struct hfi1_ahg_info {
struct hfi1_sdma_header {
__le64 pbc;
- struct ib_header hdr;
+ struct hfi1_opa_header hdr;
} __packed;
/*
@@ -127,6 +147,7 @@ struct hfi1_qp_priv {
u8 s_sc; /* SC[0..4] for next packet */
struct iowait s_iowait;
struct rvt_qp *owner;
+ u8 hdr_type; /* 9B or 16B */
};
/*
@@ -142,7 +163,9 @@ struct hfi1_pkt_state {
unsigned long timeout;
unsigned long timeout_int;
int cpu;
+ u8 opcode;
bool in_thread;
+ bool pkts_sent;
};
#define HFI1_PSN_CREDIT 16
@@ -236,8 +259,8 @@ static inline int hfi1_send_ok(struct rvt_qp *qp)
/*
* This must be called with s_lock held.
*/
-void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
- u32 qp1, u32 qp2, u16 lid1, u16 lid2);
+void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl,
+ u32 qp1, u32 qp2, u32 lid1, u32 lid2);
void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num);
void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
@@ -257,13 +280,8 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
* necessarily be at least one bit less than
* the container holding the PSN.
*/
-#ifndef CONFIG_HFI1_VERBS_31BIT_PSN
-#define PSN_MASK 0xFFFFFF
-#define PSN_SHIFT 8
-#else
#define PSN_MASK 0x7FFFFFFF
#define PSN_SHIFT 1
-#endif
#define PSN_MODIFY_MASK 0xFFFFFF
/*
@@ -307,15 +325,12 @@ void hfi1_rc_rcv(struct hfi1_packet *packet);
void hfi1_rc_hdrerr(
struct hfi1_ctxtdata *rcd,
- struct ib_header *hdr,
- u32 rcv_flags,
+ struct hfi1_packet *packet,
struct rvt_qp *qp);
u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
-struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid);
-
-void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr);
+void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah);
void hfi1_ud_rcv(struct hfi1_packet *packet);
@@ -336,18 +351,7 @@ int hfi1_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
extern const u32 rc_only_opcode;
extern const u32 uc_only_opcode;
-static inline u8 get_opcode(struct ib_header *h)
-{
- u16 lnh = be16_to_cpu(h->lrh[0]) & 3;
-
- if (lnh == IB_LNH_IBA_LOCAL)
- return be32_to_cpu(h->u.oth.bth[0]) >> 24;
- else
- return be32_to_cpu(h->u.l.oth.bth[0]) >> 24;
-}
-
-int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct ib_header *hdr,
- int has_grh, struct rvt_qp *qp, u32 bth0);
+int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet);
u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
const struct ib_global_route *grh, u32 hwords, u32 nwords);
@@ -365,7 +369,8 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread);
void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status);
-void hfi1_send_rc_ack(struct hfi1_ctxtdata *, struct rvt_qp *qp, int is_fecn);
+void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
+ bool is_fecn);
int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
@@ -379,6 +384,8 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *);
void hfi1_ib_rcv(struct hfi1_packet *packet);
+void hfi1_16B_rcv(struct hfi1_packet *packet);
+
unsigned hfi1_get_npkeys(struct hfi1_devdata *);
int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index 5d23172c470f..873e48ea923f 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -119,13 +119,6 @@ out:
return tx;
}
-static void verbs_txreq_kmem_cache_ctor(void *obj)
-{
- struct verbs_txreq *tx = (struct verbs_txreq *)obj;
-
- memset(tx, 0, sizeof(*tx));
-}
-
int verbs_txreq_init(struct hfi1_ibdev *dev)
{
char buf[TXREQ_LEN];
@@ -135,7 +128,7 @@ int verbs_txreq_init(struct hfi1_ibdev *dev)
dev->verbs_txreq_cache = kmem_cache_create(buf,
sizeof(struct verbs_txreq),
0, SLAB_HWCACHE_ALIGN,
- verbs_txreq_kmem_cache_ctor);
+ NULL);
if (!dev->verbs_txreq_cache)
return -ENOMEM;
return 0;
diff --git a/drivers/infiniband/hw/hfi1/vnic.h b/drivers/infiniband/hw/hfi1/vnic.h
index 4a621cde4abb..5ae781514e32 100644
--- a/drivers/infiniband/hw/hfi1/vnic.h
+++ b/drivers/infiniband/hw/hfi1/vnic.h
@@ -54,21 +54,6 @@
#define HFI1_VNIC_MAX_TXQ 16
#define HFI1_VNIC_MAX_PAD 12
-/* L2 header definitions */
-#define HFI1_L2_TYPE_OFFSET 0x7
-#define HFI1_L2_TYPE_SHFT 0x5
-#define HFI1_L2_TYPE_MASK 0x3
-
-#define HFI1_GET_L2_TYPE(hdr) \
- ((*((u8 *)(hdr) + HFI1_L2_TYPE_OFFSET) >> HFI1_L2_TYPE_SHFT) & \
- HFI1_L2_TYPE_MASK)
-
-/* L4 type definitions */
-#define HFI1_L4_TYPE_OFFSET 8
-
-#define HFI1_GET_L4_TYPE(data) \
- (*((u8 *)(data) + HFI1_L4_TYPE_OFFSET))
-
/* L4 header definitions */
#define HFI1_VNIC_L4_HDR_OFFSET OPA_VNIC_L2_HDR_LEN
@@ -103,6 +88,7 @@ struct hfi1_vnic_sdma {
struct sdma_txreq stx;
unsigned int state;
u8 q_idx;
+ bool pkts_sent;
};
/**
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index 339f0cdd56d6..f419cbb05928 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -95,7 +95,7 @@ static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt)
if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
- hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt);
+ hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
uctxt->is_vnic = true;
done:
@@ -106,22 +106,13 @@ static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
struct hfi1_ctxtdata **vnic_ctxt)
{
struct hfi1_ctxtdata *uctxt;
- unsigned int ctxt;
int ret;
if (dd->flags & HFI1_FROZEN)
return -EIO;
- for (ctxt = dd->first_dyn_alloc_ctxt;
- ctxt < dd->num_rcv_contexts; ctxt++)
- if (!dd->rcd[ctxt])
- break;
-
- if (ctxt == dd->num_rcv_contexts)
- return -EBUSY;
-
- uctxt = hfi1_create_ctxtdata(dd->pport, ctxt, dd->node);
- if (!uctxt) {
+ ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
+ if (ret < 0) {
dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
return -ENOMEM;
}
@@ -155,12 +146,7 @@ static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
return ret;
bail:
- /*
- * hfi1_free_ctxtdata() also releases send_context
- * structure if uctxt->sc is not null
- */
- dd->rcd[uctxt->ctxt] = NULL;
- hfi1_free_ctxtdata(dd, uctxt);
+ hfi1_free_ctxt(uctxt);
dd_dev_dbg(dd, "vnic allocation failed. rc %d\n", ret);
return ret;
}
@@ -168,15 +154,12 @@ bail:
static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
struct hfi1_ctxtdata *uctxt)
{
- unsigned long flags;
-
dd_dev_dbg(dd, "closing vnic context %d\n", uctxt->ctxt);
flush_wc();
if (dd->num_msix_entries)
hfi1_reset_vnic_msix_info(uctxt);
- spin_lock_irqsave(&dd->uctxt_lock, flags);
/*
* Disable receive context and interrupt available, reset all
* RcvCtxtCtrl bits to default values.
@@ -186,7 +169,7 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
HFI1_RCVCTRL_INTRAVAIL_DIS |
HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
- HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt->ctxt);
+ HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
/*
* VNIC contexts are allocated from user context pool.
* Release them back to user context pool.
@@ -199,16 +182,15 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
sc_disable(uctxt->sc);
dd->send_contexts[uctxt->sc->sw_index].type = SC_USER;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- dd->rcd[uctxt->ctxt] = NULL;
uctxt->event_flags = 0;
hfi1_clear_tids(uctxt);
hfi1_clear_ctxt_pkey(dd, uctxt);
hfi1_stats.sps_ctxts--;
- hfi1_free_ctxtdata(dd, uctxt);
+
+ hfi1_free_ctxt(uctxt);
}
void hfi1_vnic_setup(struct hfi1_devdata *dd)
@@ -582,8 +564,8 @@ void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet)
int l4_type, vesw_id = -1;
u8 q_idx;
- l4_type = HFI1_GET_L4_TYPE(packet->ebuf);
- if (likely(l4_type == OPA_VNIC_L4_ETHR)) {
+ l4_type = hfi1_16B_get_l4(packet->ebuf);
+ if (likely(l4_type == OPA_16B_L4_ETHR)) {
vesw_id = HFI1_VNIC_GET_VESWID(packet->ebuf);
vinfo = idr_find(&dd->vnic.vesw_idr, vesw_id);
@@ -751,6 +733,7 @@ static int hfi1_vnic_init(struct hfi1_vnic_vport_info *vinfo)
rc = hfi1_vnic_allot_ctxt(dd, &dd->vnic.ctxt[i]);
if (rc)
break;
+ hfi1_rcd_get(dd->vnic.ctxt[i]);
dd->vnic.ctxt[i]->vnic_q_idx = i;
}
@@ -762,6 +745,7 @@ static int hfi1_vnic_init(struct hfi1_vnic_vport_info *vinfo)
*/
while (i-- > dd->vnic.num_ctxt) {
deallocate_vnic_ctxt(dd, dd->vnic.ctxt[i]);
+ hfi1_rcd_put(dd->vnic.ctxt[i]);
dd->vnic.ctxt[i] = NULL;
}
goto alloc_fail;
@@ -791,6 +775,7 @@ static void hfi1_vnic_deinit(struct hfi1_vnic_vport_info *vinfo)
if (--dd->vnic.num_vports == 0) {
for (i = 0; i < dd->vnic.num_ctxt; i++) {
deallocate_vnic_ctxt(dd, dd->vnic.ctxt[i]);
+ hfi1_rcd_put(dd->vnic.ctxt[i]);
dd->vnic.ctxt[i] = NULL;
}
hfi1_deinit_vnic_rsm(dd);
diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c
index 51a817d3aa14..c3c96c5869ed 100644
--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
+++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
@@ -198,11 +198,16 @@ int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
goto free_desc;
tx->retry_count = 0;
- ret = sdma_send_txreq(sde, &vnic_sdma->wait, &tx->txreq);
+ ret = sdma_send_txreq(sde, &vnic_sdma->wait, &tx->txreq,
+ vnic_sdma->pkts_sent);
/* When -ECOMM, sdma callback will be called with ABORT status */
if (unlikely(ret && unlikely(ret != -ECOMM)))
goto free_desc;
+ if (!ret) {
+ vnic_sdma->pkts_sent = true;
+ iowait_starve_clear(vnic_sdma->pkts_sent, &vnic_sdma->wait);
+ }
return ret;
free_desc:
@@ -211,6 +216,8 @@ free_desc:
tx_err:
if (ret != -EBUSY)
dev_kfree_skb_any(skb);
+ else
+ vnic_sdma->pkts_sent = false;
return ret;
}
@@ -225,7 +232,8 @@ tx_err:
static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
struct iowait *wait,
struct sdma_txreq *txreq,
- unsigned int seq)
+ uint seq,
+ bool pkts_sent)
{
struct hfi1_vnic_sdma *vnic_sdma =
container_of(wait, struct hfi1_vnic_sdma, wait);
@@ -239,7 +247,7 @@ static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
write_seqlock(&dev->iowait_lock);
if (list_empty(&vnic_sdma->wait.list))
- list_add_tail(&vnic_sdma->wait.list, &sde->dmawait);
+ iowait_queue(pkts_sent, wait, &sde->dmawait);
write_sequnlock(&dev->iowait_lock);
return -EBUSY;
}
@@ -295,22 +303,15 @@ void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
}
}
-static void hfi1_vnic_txreq_kmem_cache_ctor(void *obj)
-{
- struct vnic_txreq *tx = (struct vnic_txreq *)obj;
-
- memset(tx, 0, sizeof(*tx));
-}
-
int hfi1_vnic_txreq_init(struct hfi1_devdata *dd)
{
char buf[HFI1_VNIC_TXREQ_NAME_LEN];
snprintf(buf, sizeof(buf), "hfi1_%u_vnic_txreq_cache", dd->unit);
dd->vnic.txreq_cache = kmem_cache_create(buf,
- sizeof(struct vnic_txreq),
- 0, SLAB_HWCACHE_ALIGN,
- hfi1_vnic_txreq_kmem_cache_ctor);
+ sizeof(struct vnic_txreq),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL);
if (!dd->vnic.txreq_cache)
return -ENOMEM;
return 0;
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index e1a6e055cd60..61c93bbd230d 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,7 +1,7 @@
config INFINIBAND_HNS
tristate "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
- depends on ARM64 && HNS && HNS_DSAF && HNS_ENET
+ depends on (ARM64 || (COMPILE_TEST && 64BIT)) && HNS && HNS_DSAF && HNS_ENET
---help---
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
is used in Hisilicon Hi1610 and more further ICT SoC.
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index f78a733a63ec..d545302b8ef8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -64,8 +64,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
} else {
u8 *dmac = rdma_ah_retrieve_dmac(ah_attr);
- if (!dmac)
+ if (!dmac) {
+ kfree(ah);
return ERR_PTR(-EINVAL);
+ }
memcpy(ah->av.mac, dmac, ETH_ALEN);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 605962f2828c..e1b433cdd5e2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -32,6 +32,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/vmalloc.h>
#include "hns_roce_device.h"
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
index 50f864935a0e..b0f43735de1a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.c
@@ -31,6 +31,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/interrupt.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_eq.h"
@@ -292,7 +293,7 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n",
event_type, eq->eqn, eq->cons_index);
break;
- };
+ }
eq->cons_index++;
aeqes_found = 1;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 37d5d29597a4..747efd1ae5a6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -228,14 +228,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_RDMA_READ:
ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
- set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
- atomic_wr(wr)->rkey);
+ set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
+ rdma_wr(wr)->rkey);
break;
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
- set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
- atomic_wr(wr)->rkey);
+ set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
+ rdma_wr(wr)->rkey);
break;
case IB_WR_SEND:
case IB_WR_SEND_WITH_INV:
@@ -661,9 +661,11 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
union ib_gid dgid;
u64 subnet_prefix;
int attr_mask = 0;
- int i;
+ int i, j;
int ret;
+ u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
u8 phy_port;
+ u8 port = 0;
u8 sl;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
@@ -709,27 +711,35 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
attr.rnr_retry = 7;
attr.timeout = 0x12;
attr.path_mtu = IB_MTU_256;
+ attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
rdma_ah_set_static_rate(&attr.ah_attr, 3);
subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
+ phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
+ (i % HNS_ROCE_MAX_PORTS);
+ sl = i / HNS_ROCE_MAX_PORTS;
+
+ for (j = 0; j < caps->num_ports; j++) {
+ if (hr_dev->iboe.phy_port[j] == phy_port) {
+ queue_en[i] = 1;
+ port = j;
+ break;
+ }
+ }
+
+ if (!queue_en[i])
+ continue;
+
free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
- if (IS_ERR(free_mr->mr_free_qp[i])) {
+ if (!free_mr->mr_free_qp[i]) {
dev_err(dev, "Create loop qp failed!\n");
goto create_lp_qp_failed;
}
hr_qp = free_mr->mr_free_qp[i];
- sl = i / caps->num_ports;
-
- if (caps->num_ports == HNS_ROCE_MAX_PORTS)
- phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
- (i % caps->num_ports);
- else
- phy_port = i % caps->num_ports;
-
- hr_qp->port = phy_port + 1;
+ hr_qp->port = port;
hr_qp->phy_port = phy_port;
hr_qp->ibqp.qp_type = IB_QPT_RC;
hr_qp->ibqp.device = &hr_dev->ib_dev;
@@ -739,23 +749,22 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
hr_qp->ibqp.recv_cq = cq;
hr_qp->ibqp.send_cq = cq;
- rdma_ah_set_port_num(&attr.ah_attr, phy_port + 1);
- rdma_ah_set_sl(&attr.ah_attr, phy_port + 1);
- attr.port_num = phy_port + 1;
+ rdma_ah_set_port_num(&attr.ah_attr, port + 1);
+ rdma_ah_set_sl(&attr.ah_attr, sl);
+ attr.port_num = port + 1;
attr.dest_qp_num = hr_qp->qpn;
memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
- hr_dev->dev_addr[phy_port],
+ hr_dev->dev_addr[port],
MAC_ADDR_OCTET_NUM);
memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
- memcpy(&dgid.raw[8], hr_dev->dev_addr[phy_port], 3);
- memcpy(&dgid.raw[13], hr_dev->dev_addr[phy_port] + 3, 3);
+ memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
+ memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
dgid.raw[11] = 0xff;
dgid.raw[12] = 0xfe;
dgid.raw[8] ^= 2;
rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
- attr_mask |= IB_QP_PORT;
ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
IB_QPS_RESET, IB_QPS_INIT);
@@ -812,6 +821,9 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
hr_qp = free_mr->mr_free_qp[i];
+ if (!hr_qp)
+ continue;
+
ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
if (ret)
dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
@@ -963,7 +975,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
int i;
int ret;
- int ne;
+ int ne = 0;
mr_work = container_of(work, struct hns_roce_mr_free_work, work);
hr_mr = (struct hns_roce_mr *)mr_work->mr;
@@ -976,6 +988,10 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
hr_qp = free_mr->mr_free_qp[i];
+ if (!hr_qp)
+ continue;
+ ne++;
+
ret = hns_roce_v1_send_lp_wqe(hr_qp);
if (ret) {
dev_err(dev,
@@ -985,7 +1001,6 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
}
}
- ne = HNS_ROCE_V1_RESV_QP;
do {
ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
if (ret < 0) {
@@ -995,7 +1010,8 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
goto free_work;
}
ne -= ret;
- msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
+ usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
+ (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
} while (ne && time_before_eq(jiffies, end));
if (ne != 0)
@@ -2007,7 +2023,6 @@ int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
u32 notification_flag;
u32 doorbell[2];
- int ret = 0;
notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
@@ -2027,7 +2042,7 @@ int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
- return ret;
+ return 0;
}
static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
@@ -2181,7 +2196,7 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
}
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
- } else {
+ } else {
/* RQ conrespond to CQE */
wc->byte_len = le32_to_cpu(cqe->byte_cnt);
opcode = roce_get_field(cqe->cqe_byte_4,
@@ -3533,10 +3548,12 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
old_cnt = roce_get_field(old_send,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
- if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
+ if (cur_cnt - old_cnt >
+ SDB_ST_CMP_VAL) {
success_flags = 1;
- else {
- send_ptr = roce_get_field(old_send,
+ } else {
+ send_ptr =
+ roce_get_field(old_send,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
roce_get_field(sdb_retry_cnt,
@@ -3641,6 +3658,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
struct hns_roce_dev *hr_dev;
struct hns_roce_qp *hr_qp;
struct device *dev;
+ unsigned long qpn;
int ret;
qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
@@ -3648,8 +3666,9 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
dev = &hr_dev->pdev->dev;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
hr_qp = qp_work_entry->qp;
+ qpn = hr_qp->qpn;
- dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", hr_qp->qpn);
+ dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn);
qp_work_entry->sche_cnt++;
@@ -3660,7 +3679,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
&qp_work_entry->db_wait_stage);
if (ret) {
dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
- hr_qp->qpn);
+ qpn);
return;
}
@@ -3674,7 +3693,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
IB_QPS_RESET);
if (ret) {
- dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", hr_qp->qpn);
+ dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn);
return;
}
@@ -3683,14 +3702,14 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
/* RC QP, release QPN */
- hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+ hns_roce_release_range_qp(hr_dev, qpn, 1);
kfree(hr_qp);
} else
kfree(hr_to_hr_sqp(hr_qp));
kfree(qp_work_entry);
- dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", hr_qp->qpn);
+ dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
}
int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c3b41f95e70a..d9777b662eba 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -125,8 +125,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
return -ENODEV;
}
- spin_lock_bh(&hr_dev->iboe.lock);
-
switch (event) {
case NETDEV_UP:
case NETDEV_CHANGE:
@@ -144,7 +142,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
break;
}
- spin_unlock_bh(&hr_dev->iboe.lock);
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 80fc01ffd8bd..e387360e3780 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -32,6 +32,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/vmalloc.h>
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 054c52699090..f5dd21c2d275 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -799,7 +799,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
cur = hr_wq->head - hr_wq->tail;
if (likely(cur + nreq < hr_wq->max_post))
- return 0;
+ return false;
hr_cq = to_hr_cq(ib_cq);
spin_lock(&hr_cq->lock);
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index da2eb5a281fa..9b1566468744 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -527,6 +527,7 @@ enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
+void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev);
void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
void i40iw_rem_devusecount(struct i40iw_device *iwdev);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 6ae98aa7f74e..14f36ba4e5be 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1582,15 +1582,14 @@ static enum i40iw_status_code i40iw_del_multiple_qhash(
}
/**
- * i40iw_netdev_vlan_ipv6 - Gets the netdev and mac
+ * i40iw_netdev_vlan_ipv6 - Gets the netdev and vlan
* @addr: local IPv6 address
* @vlan_id: vlan id for the given IPv6 address
- * @mac: mac address for the given IPv6 address
*
* Returns the net_device of the IPv6 address and also sets the
- * vlan id and mac for that address.
+ * vlan id for that address.
*/
-static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
+static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id)
{
struct net_device *ip_dev = NULL;
struct in6_addr laddr6;
@@ -1600,15 +1599,11 @@ static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *ma
i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
if (vlan_id)
*vlan_id = I40IW_NO_VLAN;
- if (mac)
- eth_zero_addr(mac);
rcu_read_lock();
for_each_netdev_rcu(&init_net, ip_dev) {
if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
- if (ip_dev->dev_addr && mac)
- ether_addr_copy(mac, ip_dev->dev_addr);
break;
}
}
@@ -3487,7 +3482,8 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
(original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
(last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
- (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
+ (last_ae == I40IW_AE_LLP_CONNECTION_RESET) ||
+ iwdev->reset)) {
issue_close = 1;
iwqp->cm_id = NULL;
if (!iwqp->flush_issued) {
@@ -3587,7 +3583,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr);
} else {
cm_node->ipv4 = false;
- i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id, NULL);
+ i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id);
}
i40iw_debug(cm_node->dev,
I40IW_DEBUG_CM,
@@ -3686,8 +3682,6 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->accelerated = 1;
if (cm_node->accept_pend) {
- if (!cm_node->listener)
- i40iw_pr_err("cm_node->listener NULL for passive node\n");
atomic_dec(&cm_node->listener->pend_accepts_cnt);
cm_node->accept_pend = 0;
}
@@ -3788,7 +3782,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
raddr6->sin6_addr.in6_u.u6_addr32);
cm_info.loc_port = ntohs(laddr6->sin6_port);
cm_info.rem_port = ntohs(raddr6->sin6_port);
- i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
+ i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id);
}
cm_info.cm_id = cm_id;
cm_info.tos = cm_id->tos;
@@ -3930,8 +3924,7 @@ int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_info.loc_port = ntohs(laddr6->sin6_port);
if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY)
i40iw_netdev_vlan_ipv6(cm_info.loc_addr,
- &cm_info.vlan_id,
- NULL);
+ &cm_info.vlan_id);
else
wildcard = true;
}
@@ -4055,12 +4048,7 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
cm_node->accelerated = 1;
- if (cm_node->accept_pend) {
- if (!cm_node->listener)
- i40iw_pr_err("listener is null for passive node\n");
- atomic_dec(&cm_node->listener->pend_accepts_cnt);
- cm_node->accept_pend = 0;
- }
+
return;
error:
@@ -4265,6 +4253,8 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
attr.qp_state = IB_QPS_ERR;
i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (iwdev->reset)
+ i40iw_cm_disconn(cm_node->iwqp);
i40iw_rem_ref_cm_node(cm_node);
}
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index a027e2072477..d1f5345f04f0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -54,6 +54,17 @@ static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
set_64bit_val(wqe, 24, header);
}
+void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev)
+{
+ if (cqp_timeout->compl_cqp_cmds != dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]) {
+ cqp_timeout->compl_cqp_cmds = dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS];
+ cqp_timeout->count = 0;
+ } else {
+ if (dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] != cqp_timeout->compl_cqp_cmds)
+ cqp_timeout->count++;
+ }
+}
+
/**
* i40iw_get_cqp_reg_info - get head and tail for cqp using registers
* @cqp: struct for cqp hw
@@ -130,20 +141,32 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
u64 base = 0;
u32 i, j;
u32 k = 0;
- u32 low;
/* copy base values in obj_info */
- for (i = I40IW_HMC_IW_QP, j = 0;
- i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+ for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+ if ((i == I40IW_HMC_IW_SRQ) ||
+ (i == I40IW_HMC_IW_FSIMC) ||
+ (i == I40IW_HMC_IW_FSIAV)) {
+ info[i].base = 0;
+ info[i].cnt = 0;
+ continue;
+ }
get_64bit_val(buf, j, &temp);
info[i].base = RS_64_1(temp, 32) * 512;
if (info[i].base > base) {
base = info[i].base;
k = i;
}
- low = (u32)(temp);
- if (low)
- info[i].cnt = low;
+ if (i == I40IW_HMC_IW_APBVT_ENTRY) {
+ info[i].cnt = 1;
+ continue;
+ }
+ if (i == I40IW_HMC_IW_QP)
+ info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
+ else if (i == I40IW_HMC_IW_CQ)
+ info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
+ else
+ info[i].cnt = (u32)(temp);
}
size = info[k].cnt * info[k].size + info[k].base;
if (size & 0x1FFFFF)
@@ -155,6 +178,31 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
}
/**
+ * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
+ * @buf: ptr to fpm query buffer
+ * @buf_idx: index into buf
+ * @info: ptr to i40iw_hmc_obj_info struct
+ * @rsrc_idx: resource index into info
+ *
+ * Decode a 64 bit value from fpm query buffer into max count and size
+ */
+static u64 i40iw_sc_decode_fpm_query(u64 *buf,
+ u32 buf_idx,
+ struct i40iw_hmc_obj_info *obj_info,
+ u32 rsrc_idx)
+{
+ u64 temp;
+ u32 size;
+
+ get_64bit_val(buf, buf_idx, &temp);
+ obj_info[rsrc_idx].max_cnt = (u32)temp;
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[rsrc_idx].size = LS_64_1(1, size);
+
+ return temp;
+}
+
+/**
* i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
* @buf: ptr to fpm query buffer
* @info: ptr to i40iw_hmc_obj_info struct
@@ -168,9 +216,9 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
struct i40iw_hmc_info *hmc_info,
struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
{
- u64 temp;
struct i40iw_hmc_obj_info *obj_info;
- u32 i, j, size;
+ u64 temp;
+ u32 size;
u16 max_pe_sds;
obj_info = hmc_info->hmc_obj;
@@ -185,41 +233,52 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
hmc_fpm_misc->max_sds = max_pe_sds;
hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
- for (i = I40IW_HMC_IW_QP, j = 8;
- i <= I40IW_HMC_IW_ARP; i++, j += 8) {
- get_64bit_val(buf, j, &temp);
- if (i == I40IW_HMC_IW_QP)
- obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
- else if (i == I40IW_HMC_IW_CQ)
- obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
- else
- obj_info[i].max_cnt = (u32)temp;
+ get_64bit_val(buf, 8, &temp);
+ obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);
- size = (u32)RS_64_1(temp, 32);
- obj_info[i].size = ((u64)1 << size);
- }
- for (i = I40IW_HMC_IW_MR, j = 48;
- i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
- get_64bit_val(buf, j, &temp);
- obj_info[i].max_cnt = (u32)temp;
- size = (u32)RS_64_1(temp, 32);
- obj_info[i].size = LS_64_1(1, size);
- }
+ get_64bit_val(buf, 16, &temp);
+ obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);
+
+ i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);
+ i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);
+
+ obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
+ obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+
+ i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);
+ i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);
- get_64bit_val(buf, 120, &temp);
- hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
- get_64bit_val(buf, 120, &temp);
- hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
- get_64bit_val(buf, 120, &temp);
- hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
get_64bit_val(buf, 64, &temp);
+ obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;
+ obj_info[I40IW_HMC_IW_XFFL].size = 4;
hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
if (!hmc_fpm_misc->xf_block_size)
return I40IW_ERR_INVALID_SIZE;
+
+ i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);
+
get_64bit_val(buf, 80, &temp);
+ obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;
+ obj_info[I40IW_HMC_IW_Q1FL].size = 4;
hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
if (!hmc_fpm_misc->q1_block_size)
return I40IW_ERR_INVALID_SIZE;
+
+ i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);
+
+ get_64bit_val(buf, 112, &temp);
+ obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;
+ obj_info[I40IW_HMC_IW_PBLE].size = 8;
+
+ get_64bit_val(buf, 120, &temp);
+ hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
+ hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
+ hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
+
return 0;
}
@@ -1970,6 +2029,8 @@ static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
}
+ cqp->process_cqp_sds = i40iw_update_sds_noccq;
+
return ret_code;
}
@@ -3390,13 +3451,6 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_
hmc_info->sd_table.sd_entry = virt_mem.va;
}
- /* fill size of objects which are fixed */
- hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4;
- hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4;
- hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8;
- hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
- hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
-
return ret_code;
}
@@ -4838,7 +4892,7 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
{
u8 fcn_id = vsi->fcn_id;
- if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID))
+ if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)
vsi->dev->fcn_id_array[fcn_id] = false;
i40iw_hw_stats_stop_timer(vsi);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index a39ac12b6a7e..2ebaadbed379 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -1507,8 +1507,8 @@ enum {
I40IW_CQ0_ALIGNMENT_MASK = (256 - 1),
I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1),
I40IW_SHADOWAREA_MASK = (128 - 1),
- I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = 0,
- I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = 0
+ I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = (4 - 1),
+ I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = (4 - 1)
};
enum i40iw_alignment {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index e0f47cc2effc..cc742c3132c6 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -77,7 +77,6 @@ MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
static struct i40e_client i40iw_client;
static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
@@ -243,6 +242,8 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
if (free_hwcqp)
dev->cqp_ops->cqp_destroy(dev->cqp);
+ i40iw_cleanup_pending_cqp_op(iwdev);
+
i40iw_free_dma_mem(dev->hw, &cqp->sq);
kfree(cqp->scratch_array);
iwdev->cqp.scratch_array = NULL;
@@ -274,13 +275,12 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
/**
* i40iw_destroy_aeq - destroy aeq
* @iwdev: iwarp device
- * @reset: true if called before reset
*
* Issue a destroy aeq request and
* free the resources associated with the aeq
* The function is called during driver unload
*/
-static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
+static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
{
enum i40iw_status_code status = I40IW_ERR_NOT_READY;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -288,7 +288,7 @@ static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
if (!iwdev->msix_shared)
i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
- if (reset)
+ if (iwdev->reset)
goto exit;
if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
@@ -304,19 +304,17 @@ exit:
* i40iw_destroy_ceq - destroy ceq
* @iwdev: iwarp device
* @iwceq: ceq to be destroyed
- * @reset: true if called before reset
*
* Issue a destroy ceq request and
* free the resources associated with the ceq
*/
static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
- struct i40iw_ceq *iwceq,
- bool reset)
+ struct i40iw_ceq *iwceq)
{
enum i40iw_status_code status;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
- if (reset)
+ if (iwdev->reset)
goto exit;
status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
@@ -335,12 +333,11 @@ exit:
/**
* i40iw_dele_ceqs - destroy all ceq's
* @iwdev: iwarp device
- * @reset: true if called before reset
*
* Go through all of the device ceq's and for each ceq
* disable the ceq interrupt and destroy the ceq
*/
-static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
+static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
{
u32 i = 0;
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -349,32 +346,31 @@ static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
if (iwdev->msix_shared) {
i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
- i40iw_destroy_ceq(iwdev, iwceq, reset);
+ i40iw_destroy_ceq(iwdev, iwceq);
iwceq++;
i++;
}
for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
- i40iw_destroy_ceq(iwdev, iwceq, reset);
+ i40iw_destroy_ceq(iwdev, iwceq);
}
}
/**
* i40iw_destroy_ccq - destroy control cq
* @iwdev: iwarp device
- * @reset: true if called before reset
*
* Issue destroy ccq request and
* free the resources associated with the ccq
*/
-static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset)
+static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
{
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_ccq *ccq = &iwdev->ccq;
enum i40iw_status_code status = 0;
- if (!reset)
+ if (!iwdev->reset)
status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
if (status)
i40iw_pr_err("ccq destroy failed %d\n", status);
@@ -810,7 +806,7 @@ static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
iwceq->msix_idx = msix_vec->idx;
status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
if (status) {
- i40iw_destroy_ceq(iwdev, iwceq, false);
+ i40iw_destroy_ceq(iwdev, iwceq);
break;
}
i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
@@ -912,7 +908,7 @@ static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
status = i40iw_configure_aeq_vector(iwdev);
if (status) {
- i40iw_destroy_aeq(iwdev, false);
+ i40iw_destroy_aeq(iwdev);
return status;
}
@@ -1442,12 +1438,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
/**
* i40iw_deinit_device - clean up the device resources
* @iwdev: iwarp device
- * @reset: true if called before reset
*
* Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
* destroy the device queues and free the pble and the hmc objects
*/
-static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
+static void i40iw_deinit_device(struct i40iw_device *iwdev)
{
struct i40e_info *ldev = iwdev->ldev;
@@ -1464,7 +1459,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
i40iw_destroy_rdma_device(iwdev->iwibdev);
/* fallthrough */
case IP_ADDR_REGISTERED:
- if (!reset)
+ if (!iwdev->reset)
i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
/* fallthrough */
case INET_NOTIFIER:
@@ -1474,26 +1469,26 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
}
/* fallthrough */
+ case PBLE_CHUNK_MEM:
+ i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
+ /* fallthrough */
case CEQ_CREATED:
- i40iw_dele_ceqs(iwdev, reset);
+ i40iw_dele_ceqs(iwdev);
/* fallthrough */
case AEQ_CREATED:
- i40iw_destroy_aeq(iwdev, reset);
+ i40iw_destroy_aeq(iwdev);
/* fallthrough */
case IEQ_CREATED:
- i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
+ i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
/* fallthrough */
case ILQ_CREATED:
- i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
+ i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
/* fallthrough */
case CCQ_CREATED:
- i40iw_destroy_ccq(iwdev, reset);
- /* fallthrough */
- case PBLE_CHUNK_MEM:
- i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
+ i40iw_destroy_ccq(iwdev);
/* fallthrough */
case HMC_OBJS_CREATED:
- i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
+ i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
/* fallthrough */
case CQP_CREATED:
i40iw_destroy_cqp(iwdev, true);
@@ -1670,6 +1665,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
if (status)
break;
+ iwdev->init_state = PBLE_CHUNK_MEM;
iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
i40iw_register_notifiers();
iwdev->init_state = INET_NOTIFIER;
@@ -1693,7 +1689,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
} while (0);
i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
- i40iw_deinit_device(iwdev, false);
+ i40iw_deinit_device(iwdev);
return -ERESTART;
}
@@ -1774,9 +1770,12 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
iwdev = &hdl->device;
iwdev->closing = true;
+ if (reset)
+ iwdev->reset = true;
+
i40iw_cm_disconnect_all(iwdev);
destroy_workqueue(iwdev->virtchnl_wq);
- i40iw_deinit_device(iwdev, reset);
+ i40iw_deinit_device(iwdev);
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
index 28a92fee0822..e217a1259f57 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -35,11 +35,13 @@
#ifndef I40IW_P_H
#define I40IW_P_H
-#define PAUSE_TIMER_VALUE 0xFFFF
-#define REFRESH_THRESHOLD 0x7FFF
-#define HIGH_THRESHOLD 0x800
-#define LOW_THRESHOLD 0x200
-#define ALL_TC2PFC 0xFF
+#define PAUSE_TIMER_VALUE 0xFFFF
+#define REFRESH_THRESHOLD 0x7FFF
+#define HIGH_THRESHOLD 0x800
+#define LOW_THRESHOLD 0x200
+#define ALL_TC2PFC 0xFF
+#define CQP_COMPL_WAIT_TIME 0x3E8
+#define CQP_TIMEOUT_THRESHOLD 5
void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,
char *desc, u64 *buf, u32 size);
@@ -51,6 +53,8 @@ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);
u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch);
+void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev);
+
enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
struct i40iw_fast_reg_stag_info *info,
bool post_sq);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
index c87ba1617087..540aab5e502d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
@@ -269,10 +269,8 @@ static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,
status = i40iw_add_sd_table_entry(dev->hw, hmc_info,
info->idx.sd_idx, I40IW_SD_TYPE_PAGED,
I40IW_HMC_DIRECT_BP_SIZE);
- if (status) {
- i40iw_free_vmalloc_mem(dev->hw, chunk);
- return status;
- }
+ if (status)
+ goto error;
if (!dev->is_pf) {
status = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE,
fpm_to_idx(pble_rsrc,
@@ -280,8 +278,7 @@ static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,
(info->pages << PBLE_512_SHIFT));
if (status) {
i40iw_pr_err("allocate PBLEs in the PF. Error %i\n", status);
- i40iw_free_vmalloc_mem(dev->hw, chunk);
- return status;
+ goto error;
}
}
addr = chunk->vaddr;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index db41ab40da9c..c2cab20c4bc5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -408,6 +408,9 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
set_64bit_val(wqe, 0, info->paddr);
set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
set_64bit_val(wqe, 16, header[0]);
+
+ /* Ensure all data is written before writing valid bit */
+ wmb();
set_64bit_val(wqe, 24, header[1]);
i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
@@ -682,7 +685,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
- I40IW_CQ0_ALIGNMENT_MASK);
+ I40IW_CQ0_ALIGNMENT);
if (ret)
return ret;
@@ -946,14 +949,16 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
ret = i40iw_puda_qp_create(rsrc);
}
if (ret) {
- i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n", __func__);
+ i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n",
+ __func__);
goto error;
}
rsrc->completion = PUDA_QP_CREATED;
ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
if (ret) {
- i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error allloc_buf\n", __func__);
+ i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error alloc_buf\n",
+ __func__);
goto error;
}
@@ -1411,10 +1416,10 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
if (!list_empty(rxlist)) {
tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
- plist = &tmpbuf->list;
while ((struct list_head *)tmpbuf != rxlist) {
if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
break;
+ plist = &tmpbuf->list;
tmpbuf = (struct i40iw_puda_buf *)plist->next;
}
/* Insert buf before tmpbuf */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h
index 91c421762f06..f7013f11d808 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_status.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_status.h
@@ -62,7 +62,7 @@ enum i40iw_status_code {
I40IW_ERR_INVALID_ALIGNMENT = -23,
I40IW_ERR_FLUSHED_QUEUE = -24,
I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25,
- I40IW_ERR_INVALID_IMM_DATA_SIZE = -26,
+ I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26,
I40IW_ERR_TIMEOUT = -27,
I40IW_ERR_OPCODE_MISMATCH = -28,
I40IW_ERR_CQP_COMPL_ERROR = -29,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index 959ec81fba99..63118f6d5ab4 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -1345,4 +1345,9 @@ struct i40iw_virtchnl_work_info {
void *worker_vf_dev;
};
+struct i40iw_cqp_timeout {
+ u64 compl_cqp_cmds;
+ u8 count;
+};
+
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index b0d3a0e8a9b5..0aadb7a0d1aa 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -435,7 +435,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
op_info = &info->op.inline_rdma_write;
if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
- return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+ return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
if (ret_code)
@@ -511,7 +511,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
op_info = &info->op.inline_send;
if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
- return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+ return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
if (ret_code)
@@ -784,7 +784,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
get_64bit_val(cqe, 0, &qword0);
get_64bit_val(cqe, 16, &qword2);
- info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM);
+ info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM);
info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
@@ -912,7 +912,7 @@ enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data
return 0;
}
-static struct i40iw_qp_uk_ops iw_qp_uk_ops = {
+static const struct i40iw_qp_uk_ops iw_qp_uk_ops = {
.iw_qp_post_wr = i40iw_qp_post_wr,
.iw_qp_ring_push_db = i40iw_qp_ring_push_db,
.iw_rdma_write = i40iw_rdma_write,
@@ -926,14 +926,14 @@ static struct i40iw_qp_uk_ops iw_qp_uk_ops = {
.iw_post_nop = i40iw_nop
};
-static struct i40iw_cq_ops iw_cq_ops = {
+static const struct i40iw_cq_ops iw_cq_ops = {
.iw_cq_request_notification = i40iw_cq_request_notification,
.iw_cq_poll_completion = i40iw_cq_poll_completion,
.iw_cq_post_entries = i40iw_cq_post_entries,
.iw_cq_clean = i40iw_clean_cq
};
-static struct i40iw_device_uk_ops iw_device_uk_ops = {
+static const struct i40iw_device_uk_ops iw_device_uk_ops = {
.iwarp_cq_uk_init = i40iw_cq_uk_init,
.iwarp_qp_uk_init = i40iw_qp_uk_init,
};
@@ -1187,7 +1187,7 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
u8 *wqe_size)
{
if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
- return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+ return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
if (data_size <= 16)
*wqe_size = I40IW_QP_WQE_MIN_SIZE;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 56d986924a4c..62f1f45b8737 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -337,6 +337,7 @@ struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait
*/
void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
{
+ struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
unsigned long flags;
if (cqp_request->dynamic) {
@@ -350,6 +351,7 @@ void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp
list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
spin_unlock_irqrestore(&cqp->req_lock, flags);
}
+ wake_up(&iwdev->close_wq);
}
/**
@@ -365,6 +367,56 @@ void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
}
/**
+ * i40iw_free_pending_cqp_request -free pending cqp request objs
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,
+ struct i40iw_cqp_request *cqp_request)
+{
+ struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
+
+ if (cqp_request->waiting) {
+ cqp_request->compl_info.error = true;
+ cqp_request->request_done = true;
+ wake_up(&cqp_request->waitq);
+ }
+ i40iw_put_cqp_request(cqp, cqp_request);
+ wait_event_timeout(iwdev->close_wq,
+ !atomic_read(&cqp_request->refcount),
+ 1000);
+}
+
+/**
+ * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions
+ * @iwdev: iwarp device
+ */
+void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_cqp *cqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request = NULL;
+ struct cqp_commands_info *pcmdinfo = NULL;
+ u32 i, pending_work, wqe_idx;
+
+ pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);
+ wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);
+ for (i = 0; i < pending_work; i++) {
+ cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];
+ if (cqp_request)
+ i40iw_free_pending_cqp_request(cqp, cqp_request);
+ wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);
+ }
+
+ while (!list_empty(&dev->cqp_cmd_head)) {
+ pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
+ cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info);
+ if (cqp_request)
+ i40iw_free_pending_cqp_request(cqp, cqp_request);
+ }
+}
+
+/**
* i40iw_free_qp - callback after destroy cqp completes
* @cqp_request: cqp request for destroy qp
* @num: not used
@@ -393,23 +445,29 @@ static int i40iw_wait_event(struct i40iw_device *iwdev,
{
struct cqp_commands_info *info = &cqp_request->info;
struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_cqp_timeout cqp_timeout;
bool cqp_error = false;
int err_code = 0;
- int timeout_ret = 0;
+ memset(&cqp_timeout, 0, sizeof(cqp_timeout));
+ cqp_timeout.compl_cqp_cmds = iwdev->sc_dev.cqp_cmd_stats[OP_COMPLETED_COMMANDS];
+ do {
+ if (wait_event_timeout(cqp_request->waitq,
+ cqp_request->request_done, CQP_COMPL_WAIT_TIME))
+ break;
- timeout_ret = wait_event_timeout(cqp_request->waitq,
- cqp_request->request_done,
- I40IW_EVENT_TIMEOUT);
- if (!timeout_ret) {
- i40iw_pr_err("error cqp command 0x%x timed out ret = %d\n",
- info->cqp_cmd, timeout_ret);
+ i40iw_check_cqp_progress(&cqp_timeout, &iwdev->sc_dev);
+
+ if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
+ continue;
+
+ i40iw_pr_err("error cqp command 0x%x timed out", info->cqp_cmd);
err_code = -ETIME;
if (!iwdev->reset) {
iwdev->reset = true;
i40iw_request_reset(iwdev);
}
goto done;
- }
+ } while (1);
cqp_error = cqp_request->compl_info.error;
if (cqp_error) {
i40iw_pr_err("error cqp command 0x%x completion maj = 0x%x min=0x%x\n",
@@ -546,8 +604,12 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
cqp_info->in.u.qp_destroy.remove_hash_idx = true;
status = i40iw_handle_cqp_op(iwdev, cqp_request);
- if (status)
- i40iw_pr_err("CQP-OP Destroy QP fail");
+ if (!status)
+ return;
+
+ i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
+ i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+ i40iw_rem_devusecount(iwdev);
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 4dbe61ec7a77..1aa411034a27 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -426,9 +426,13 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
struct i40iw_qp *iwqp,
u32 qp_num)
{
+ struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
+
i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
if (qp_num)
i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
+ if (iwpbl->pbl_allocated)
+ i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
kfree(iwqp->kqp.wrid_mem);
@@ -483,7 +487,7 @@ static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
struct i40iw_qp *iwqp,
struct i40iw_qp_init_info *init_info)
{
- struct i40iw_pbl *iwpbl = iwqp->iwpbl;
+ struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
iwqp->page = qpmr->sq_page;
@@ -688,19 +692,22 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
ucontext = to_ucontext(ibpd->uobject->context);
if (req.user_wqe_buffers) {
+ struct i40iw_pbl *iwpbl;
+
spin_lock_irqsave(
&ucontext->qp_reg_mem_list_lock, flags);
- iwqp->iwpbl = i40iw_get_pbl(
+ iwpbl = i40iw_get_pbl(
(unsigned long)req.user_wqe_buffers,
&ucontext->qp_reg_mem_list);
spin_unlock_irqrestore(
&ucontext->qp_reg_mem_list_lock, flags);
- if (!iwqp->iwpbl) {
+ if (!iwpbl) {
err_code = -ENODATA;
i40iw_pr_err("no pbl info\n");
goto error;
}
+ memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
}
}
err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
@@ -1161,8 +1168,10 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
memset(&req, 0, sizeof(req));
iwcq->user_mode = true;
ucontext = to_ucontext(context);
- if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req)))
+ if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
+ err_code = -EFAULT;
goto cq_free_resources;
+ }
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
@@ -2063,7 +2072,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
ucontext = to_ucontext(ibpd->uobject->context);
i40iw_del_memlist(iwmr, ucontext);
}
- if (iwpbl->pbl_allocated)
+ if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
i40iw_free_pble(iwdev->pble_rsrc, palloc);
kfree(iwmr);
return 0;
@@ -2575,13 +2584,12 @@ static const char * const i40iw_hw_stat_names[] = {
"iwRdmaInv"
};
-static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str,
- size_t str_len)
+static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str)
{
u32 firmware_version = I40IW_FW_VERSION;
- snprintf(str, str_len, "%u.%u", firmware_version,
- (firmware_version & 0x000000ff));
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u", firmware_version,
+ (firmware_version & 0x000000ff));
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 07c3fec77de6..9067443cd311 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -170,7 +170,7 @@ struct i40iw_qp {
struct i40iw_qp_kmode kqp;
struct i40iw_dma_mem host_ctx;
struct timer_list terminate_timer;
- struct i40iw_pbl *iwpbl;
+ struct i40iw_pbl iwpbl;
struct i40iw_dma_mem q2_ctx_mem;
struct i40iw_dma_mem ietf_mem;
struct completion sq_drained;
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index ea24230ea0d4..155b4dfc0ae8 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -528,7 +528,7 @@ static int set_guid_rec(struct ib_device *ibdev,
memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
- guid_info_rec.lid = cpu_to_be16(attr.lid);
+ guid_info_rec.lid = ib_lid_be16(attr.lid);
guid_info_rec.block_num = index;
memcpy(guid_info_rec.guid_info_list, rec_det->all_recs,
@@ -781,7 +781,7 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
if (!dev->sriov.is_going_down) {
- /* If there is pending one should cancell then run, otherwise
+ /* If there is pending one should cancel then run, otherwise
* won't run till previous one is ended as same work
* struct is used.
*/
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 1e6c526450d9..fedaf8260105 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -323,6 +323,9 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
sl_cm_id = get_local_comm_id(mad);
+ id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
+ if (id)
+ goto cont;
id = id_map_alloc(ibdev, slave_id, sl_cm_id);
if (IS_ERR(id)) {
mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
@@ -343,6 +346,7 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
return -EINVAL;
}
+cont:
set_local_comm_id(mad, id->pv_cm_id);
if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 4f5a143fc0a7..cab796341697 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -102,7 +102,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
int err;
err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
- PAGE_SIZE * 2, &buf->buf, GFP_KERNEL);
+ PAGE_SIZE * 2, &buf->buf);
if (err)
goto out;
@@ -113,7 +113,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL);
+ err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
if (err)
goto err_mtt;
@@ -218,8 +218,9 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
goto err_mtt;
uar = &to_mucontext(context)->uar;
+ cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
} else {
- err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL);
+ err = mlx4_db_alloc(dev->dev, &cq->db, 1);
if (err)
goto err_cq;
@@ -233,6 +234,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
goto err_db;
uar = &dev->priv_uar;
+ cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
}
if (dev->eq_table)
@@ -635,7 +637,7 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
struct mlx4_ib_qp *qp;
*npolled = 0;
- /* Find uncompleted WQEs belonging to that cq and retrun
+ /* Find uncompleted WQEs belonging to that cq and return
* simulated FLUSH_ERR completions
*/
list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 21d31cb1325f..0793a21d76f4 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -169,7 +169,7 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
op_modifier |= 0x4;
- in_modifier |= in_wc->slid << 16;
+ in_modifier |= ib_lid_cpu16(in_wc->slid) << 16;
}
err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
@@ -625,7 +625,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
} else {
tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
- tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
+ tun_mad->hdr.slid_mac_47_32 = ib_lid_be16(wc->slid);
}
ib_dma_sync_single_for_device(&dev->ib_dev,
@@ -826,7 +826,7 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
}
}
- slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
+ slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
forward_trap(to_mdev(ibdev), port_num, in_mad);
@@ -860,7 +860,7 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
!ib_query_port(ibdev, port_num, &pattr))
- prev_lid = pattr.lid;
+ prev_lid = ib_lid_cpu16(pattr.lid);
err = mlx4_MAD_IFC(to_mdev(ibdev),
(mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 75b2f7d4cd95..c636842c5be0 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -70,7 +70,6 @@
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
int mlx4_ib_sm_guid_assign = 0;
module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
@@ -81,6 +80,8 @@ static const char mlx4_ib_version[] =
DRV_VERSION "\n";
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
+static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
+ u8 port_num);
static struct workqueue_struct *wq;
@@ -552,6 +553,16 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->timestamp_mask = 0xFFFFFFFFFFFFULL;
props->max_ah = INT_MAX;
+ if ((dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
+ (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
+ mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET)) {
+ props->rss_caps.max_rwq_indirection_tables = props->max_qp;
+ props->rss_caps.max_rwq_indirection_table_size =
+ dev->dev->caps.max_rss_tbl_sz;
+ props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
+ props->max_wq_type_rq = props->max_qp;
+ }
+
if (!mlx4_is_slave(dev->dev))
err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
@@ -563,6 +574,13 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
}
}
+ if (uhw->outlen >= resp.response_length +
+ sizeof(resp.max_inl_recv_sz)) {
+ resp.response_length += sizeof(resp.max_inl_recv_sz);
+ resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg *
+ sizeof(struct mlx4_wqe_data_seg);
+ }
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
if (err)
@@ -1069,6 +1087,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
+ INIT_LIST_HEAD(&context->wqn_ranges_list);
+ mutex_init(&context->wqn_ranges_mutex);
+
if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
else
@@ -1155,7 +1176,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
* call to mlx4_ib_vma_close.
*/
put_task_struct(owning_process);
- msleep(1);
+ usleep_range(1000, 2000);
owning_process = get_pid_task(ibcontext->tgid,
PIDTYPE_PID);
if (!owning_process ||
@@ -2566,12 +2587,11 @@ static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static void get_fw_ver_str(struct ib_device *device, char *str,
- size_t str_len)
+static void get_fw_ver_str(struct ib_device *device, char *str)
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev);
- snprintf(str, str_len, "%d.%d.%d",
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
(int) (dev->dev->caps.fw_ver >> 32),
(int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
(int) dev->dev->caps.fw_ver & 0xffff);
@@ -2713,6 +2733,26 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str;
ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
+ if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
+ ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
+ IB_LINK_LAYER_ETHERNET) ||
+ (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
+ IB_LINK_LAYER_ETHERNET))) {
+ ibdev->ib_dev.create_wq = mlx4_ib_create_wq;
+ ibdev->ib_dev.modify_wq = mlx4_ib_modify_wq;
+ ibdev->ib_dev.destroy_wq = mlx4_ib_destroy_wq;
+ ibdev->ib_dev.create_rwq_ind_table =
+ mlx4_ib_create_rwq_ind_table;
+ ibdev->ib_dev.destroy_rwq_ind_table =
+ mlx4_ib_destroy_rwq_ind_table;
+ ibdev->ib_dev.uverbs_ex_cmd_mask |=
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
+ }
+
if (!mlx4_is_slave(ibdev->dev)) {
ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
@@ -2772,7 +2812,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
allocated = 0;
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
IB_LINK_LAYER_ETHERNET) {
- err = mlx4_counter_alloc(ibdev->dev, &counter_index);
+ err = mlx4_counter_alloc(ibdev->dev, &counter_index,
+ MLX4_RES_USAGE_DRIVER);
/* if failed to allocate a new counter, use default */
if (err)
counter_index =
@@ -2827,7 +2868,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
MLX4_IB_UC_STEER_QPN_ALIGN,
- &ibdev->steer_qpn_base, 0);
+ &ibdev->steer_qpn_base, 0,
+ MLX4_RES_USAGE_DRIVER);
if (err)
goto err_counter;
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 3405e947dc1e..70eb9f917303 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -808,8 +808,7 @@ static ssize_t sysfs_show_group(struct device *dev,
struct device_attribute *attr, char *buf);
static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
- union ib_gid *mgid, int create,
- gfp_t gfp_mask)
+ union ib_gid *mgid, int create)
{
struct mcast_group *group, *cur_group;
int is_mgid0;
@@ -825,7 +824,7 @@ static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
if (!create)
return ERR_PTR(-ENOENT);
- group = kzalloc(sizeof *group, gfp_mask);
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
@@ -892,7 +891,7 @@ int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
case IB_MGMT_METHOD_GET_RESP:
case IB_SA_METHOD_DELETE_RESP:
mutex_lock(&ctx->mcg_table_lock);
- group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL);
+ group = acquire_group(ctx, &rec->mgid, 0);
mutex_unlock(&ctx->mcg_table_lock);
if (IS_ERR(group)) {
if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) {
@@ -954,7 +953,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
req->sa_mad = *sa_mad;
mutex_lock(&ctx->mcg_table_lock);
- group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL);
+ group = acquire_group(ctx, &rec->mgid, may_create);
mutex_unlock(&ctx->mcg_table_lock);
if (IS_ERR(group)) {
kfree(req);
@@ -1091,7 +1090,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
if (!count)
break;
- msleep(1);
+ usleep_range(1000, 2000);
} while (time_after(end, jiffies));
flush_workqueue(ctx->mcg_wq);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index c2b9cbf4da05..1fa19820355a 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -46,6 +46,7 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/qp.h>
#define MLX4_IB_DRV_NAME "mlx4_ib"
@@ -88,6 +89,8 @@ struct mlx4_ib_ucontext {
struct list_head db_page_list;
struct mutex db_page_mutex;
struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT];
+ struct list_head wqn_ranges_list;
+ struct mutex wqn_ranges_mutex; /* protect wqn_ranges_list */
};
struct mlx4_ib_pd {
@@ -185,7 +188,6 @@ enum mlx4_ib_qp_flags {
MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
- MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO,
/* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */
MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI,
@@ -290,8 +292,25 @@ struct mlx4_roce_smac_vlan_info {
int update_vid;
};
+struct mlx4_wqn_range {
+ int base_wqn;
+ int size;
+ int refcount;
+ bool dirty;
+ struct list_head list;
+};
+
+struct mlx4_ib_rss {
+ unsigned int base_qpn_tbl_sz;
+ u8 flags;
+ u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
+};
+
struct mlx4_ib_qp {
- struct ib_qp ibqp;
+ union {
+ struct ib_qp ibqp;
+ struct ib_wq ibwq;
+ };
struct mlx4_qp mqp;
struct mlx4_buf buf;
@@ -319,6 +338,7 @@ struct mlx4_ib_qp {
u8 sq_no_prefetch;
u8 state;
int mlx_type;
+ u32 inl_recv_sz;
struct list_head gid_list;
struct list_head steering_rules;
struct mlx4_ib_buf *sqp_proxy_rcv;
@@ -329,6 +349,10 @@ struct mlx4_ib_qp {
struct list_head cq_recv_list;
struct list_head cq_send_list;
struct counter_index *counter_index;
+ struct mlx4_wqn_range *wqn_range;
+ /* Number of RSS QP parents that uses this WQ */
+ u32 rss_usecnt;
+ struct mlx4_ib_rss *rss_ctx;
};
struct mlx4_ib_srq {
@@ -624,6 +648,8 @@ struct mlx4_uverbs_ex_query_device_resp {
__u32 comp_mask;
__u32 response_length;
__u64 hca_core_clock_offset;
+ __u32 max_inl_recv_sz;
+ __u32 reserved;
};
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
@@ -891,4 +917,17 @@ void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
+struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx4_ib_destroy_wq(struct ib_wq *wq);
+int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask, struct ib_udata *udata);
+
+struct ib_rwq_ind_table
+*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 996e9058e515..b6b33d99b0b4 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -36,7 +36,6 @@
#include <net/ip.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_pack.h>
@@ -53,6 +52,7 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
struct mlx4_ib_cq *recv_cq);
static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
struct mlx4_ib_cq *recv_cq);
+static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state);
enum {
MLX4_IB_ACK_REQ_FREQ = 8,
@@ -116,6 +116,11 @@ static const __be32 mlx4_ib_opcode[] = {
[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA),
};
+enum mlx4_ib_source_type {
+ MLX4_IB_QP_SRC = 0,
+ MLX4_IB_RWQ_SRC = 1,
+};
+
static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp)
{
return container_of(mqp, struct mlx4_ib_sqp, qp);
@@ -145,8 +150,8 @@ static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
/* VF or PF -- proxy SQP */
if (mlx4_is_mfunc(dev->dev)) {
for (i = 0; i < dev->dev->caps.num_ports; i++) {
- if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] ||
- qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) {
+ if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy ||
+ qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) {
proxy_sqp = 1;
break;
}
@@ -173,7 +178,7 @@ static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
/* VF or PF -- proxy QP0 */
if (mlx4_is_mfunc(dev->dev)) {
for (i = 0; i < dev->dev->caps.num_ports; i++) {
- if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) {
+ if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy) {
proxy_qp0 = 1;
break;
}
@@ -330,6 +335,12 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
}
}
+static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type)
+{
+ pr_warn_ratelimited("Unexpected event type %d on WQ 0x%06x. Events are not supported for WQs\n",
+ type, qp->qpn);
+}
+
static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
{
/*
@@ -377,7 +388,8 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
}
static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
- int is_user, int has_rq, struct mlx4_ib_qp *qp)
+ int is_user, int has_rq, struct mlx4_ib_qp *qp,
+ u32 inl_recv_sz)
{
/* Sanity check RQ size before proceeding */
if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
@@ -385,18 +397,24 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
return -EINVAL;
if (!has_rq) {
- if (cap->max_recv_wr)
+ if (cap->max_recv_wr || inl_recv_sz)
return -EINVAL;
qp->rq.wqe_cnt = qp->rq.max_gs = 0;
} else {
+ u32 max_inl_recv_sz = dev->dev->caps.max_rq_sg *
+ sizeof(struct mlx4_wqe_data_seg);
+ u32 wqe_size;
+
/* HW requires >= 1 RQ entry with >= 1 gather entry */
- if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge))
+ if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge ||
+ inl_recv_sz > max_inl_recv_sz))
return -EINVAL;
qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr));
qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
- qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
+ wqe_size = qp->rq.max_gs * sizeof(struct mlx4_wqe_data_seg);
+ qp->rq.wqe_shift = ilog2(max_t(u32, wqe_size, inl_recv_sz));
}
/* leave userspace return values as they were, so as not to break ABI */
@@ -614,8 +632,8 @@ static int qp0_enabled_vf(struct mlx4_dev *dev, int qpn)
{
int i;
for (i = 0; i < dev->caps.num_ports; i++) {
- if (qpn == dev->caps.qp0_proxy[i])
- return !!dev->caps.qp0_qkey[i];
+ if (qpn == dev->caps.spec_qps[i].qp0_proxy)
+ return !!dev->caps.spec_qps[i].qp0_qkey;
}
return 0;
}
@@ -632,10 +650,303 @@ static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev,
qp->counter_index = NULL;
}
+static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx4_ib_create_qp_rss *ucmd)
+{
+ rss_ctx->base_qpn_tbl_sz = init_attr->rwq_ind_tbl->ind_tbl[0]->wq_num |
+ (init_attr->rwq_ind_tbl->log_ind_tbl_size << 24);
+
+ if ((ucmd->rx_hash_function == MLX4_IB_RX_HASH_FUNC_TOEPLITZ) &&
+ (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) {
+ memcpy(rss_ctx->rss_key, ucmd->rx_hash_key,
+ MLX4_EN_RSS_KEY_SIZE);
+ } else {
+ pr_debug("RX Hash function is not supported\n");
+ return (-EOPNOTSUPP);
+ }
+
+ if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
+ rss_ctx->flags = MLX4_RSS_IPV4;
+ } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
+ pr_debug("RX Hash fields_mask is not supported - both IPv4 SRC and DST must be set\n");
+ return (-EOPNOTSUPP);
+ }
+
+ if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) &&
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) {
+ rss_ctx->flags |= MLX4_RSS_IPV6;
+ } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) ||
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) {
+ pr_debug("RX Hash fields_mask is not supported - both IPv6 SRC and DST must be set\n");
+ return (-EOPNOTSUPP);
+ }
+
+ if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) &&
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) {
+ if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
+ pr_debug("RX Hash fields_mask for UDP is not supported\n");
+ return (-EOPNOTSUPP);
+ }
+
+ if (rss_ctx->flags & MLX4_RSS_IPV4) {
+ rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
+ } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
+ rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
+ } else {
+ pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
+ return (-EOPNOTSUPP);
+ }
+ } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) {
+ pr_debug("RX Hash fields_mask is not supported - both UDP SRC and DST must be set\n");
+ return (-EOPNOTSUPP);
+ }
+
+ if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
+ if (rss_ctx->flags & MLX4_RSS_IPV4) {
+ rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
+ } else if (rss_ctx->flags & MLX4_RSS_IPV6) {
+ rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
+ } else {
+ pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
+ return (-EOPNOTSUPP);
+ }
+
+ } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
+ pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
+ return (-EOPNOTSUPP);
+ }
+
+ return 0;
+}
+
+static int create_qp_rss(struct mlx4_ib_dev *dev, struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx4_ib_create_qp_rss *ucmd,
+ struct mlx4_ib_qp *qp)
+{
+ int qpn;
+ int err;
+
+ qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
+
+ err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, 0, qp->mqp.usage);
+ if (err)
+ return err;
+
+ err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
+ if (err)
+ goto err_qpn;
+
+ mutex_init(&qp->mutex);
+
+ INIT_LIST_HEAD(&qp->gid_list);
+ INIT_LIST_HEAD(&qp->steering_rules);
+
+ qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
+ qp->state = IB_QPS_RESET;
+
+ /* Set dummy send resources to be compatible with HV and PRM */
+ qp->sq_no_prefetch = 1;
+ qp->sq.wqe_cnt = 1;
+ qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
+ qp->buf_size = qp->sq.wqe_cnt << MLX4_IB_MIN_SQ_STRIDE;
+ qp->mtt = (to_mqp(
+ (struct ib_qp *)init_attr->rwq_ind_tbl->ind_tbl[0]))->mtt;
+
+ qp->rss_ctx = kzalloc(sizeof(*qp->rss_ctx), GFP_KERNEL);
+ if (!qp->rss_ctx) {
+ err = -ENOMEM;
+ goto err_qp_alloc;
+ }
+
+ err = set_qp_rss(dev, qp->rss_ctx, init_attr, ucmd);
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(qp->rss_ctx);
+
+err_qp_alloc:
+ mlx4_qp_remove(dev->dev, &qp->mqp);
+ mlx4_qp_free(dev->dev, &qp->mqp);
+
+err_qpn:
+ mlx4_qp_release_range(dev->dev, qpn, 1);
+ return err;
+}
+
+static struct ib_qp *_mlx4_ib_create_qp_rss(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx4_ib_qp *qp;
+ struct mlx4_ib_create_qp_rss ucmd = {};
+ size_t required_cmd_sz;
+ int err;
+
+ if (!udata) {
+ pr_debug("RSS QP with NULL udata\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (udata->outlen)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ required_cmd_sz = offsetof(typeof(ucmd), reserved1) +
+ sizeof(ucmd.reserved1);
+ if (udata->inlen < required_cmd_sz) {
+ pr_debug("invalid inlen\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
+ pr_debug("copy failed\n");
+ return ERR_PTR(-EFAULT);
+ }
+
+ if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (ucmd.comp_mask || ucmd.reserved1)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd))) {
+ pr_debug("inlen is not supported\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ pr_debug("RSS QP with unsupported QP type %d\n",
+ init_attr->qp_type);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (init_attr->create_flags) {
+ pr_debug("RSS QP doesn't support create flags\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (init_attr->send_cq || init_attr->cap.max_send_wr) {
+ pr_debug("RSS QP with unsupported send attributes\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ qp->pri.vid = 0xFFFF;
+ qp->alt.vid = 0xFFFF;
+
+ err = create_qp_rss(to_mdev(pd->device), pd, init_attr, &ucmd, qp);
+ if (err) {
+ kfree(qp);
+ return ERR_PTR(err);
+ }
+
+ qp->ibqp.qp_num = qp->mqp.qpn;
+
+ return &qp->ibqp;
+}
+
+/*
+ * This function allocates a WQN from a range which is consecutive and aligned
+ * to its size. In case the range is full, then it creates a new range and
+ * allocates WQN from it. The new range will be used for following allocations.
+ */
+static int mlx4_ib_alloc_wqn(struct mlx4_ib_ucontext *context,
+ struct mlx4_ib_qp *qp, int range_size, int *wqn)
+{
+ struct mlx4_ib_dev *dev = to_mdev(context->ibucontext.device);
+ struct mlx4_wqn_range *range;
+ int err = 0;
+
+ mutex_lock(&context->wqn_ranges_mutex);
+
+ range = list_first_entry_or_null(&context->wqn_ranges_list,
+ struct mlx4_wqn_range, list);
+
+ if (!range || (range->refcount == range->size) || range->dirty) {
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = mlx4_qp_reserve_range(dev->dev, range_size,
+ range_size, &range->base_wqn, 0,
+ qp->mqp.usage);
+ if (err) {
+ kfree(range);
+ goto out;
+ }
+
+ range->size = range_size;
+ list_add(&range->list, &context->wqn_ranges_list);
+ } else if (range_size != 1) {
+ /*
+ * Requesting a new range (>1) when last range is still open, is
+ * not valid.
+ */
+ err = -EINVAL;
+ goto out;
+ }
+
+ qp->wqn_range = range;
+
+ *wqn = range->base_wqn + range->refcount;
+
+ range->refcount++;
+
+out:
+ mutex_unlock(&context->wqn_ranges_mutex);
+
+ return err;
+}
+
+static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext *context,
+ struct mlx4_ib_qp *qp, bool dirty_release)
+{
+ struct mlx4_ib_dev *dev = to_mdev(context->ibucontext.device);
+ struct mlx4_wqn_range *range;
+
+ mutex_lock(&context->wqn_ranges_mutex);
+
+ range = qp->wqn_range;
+
+ range->refcount--;
+ if (!range->refcount) {
+ mlx4_qp_release_range(dev->dev, range->base_wqn,
+ range->size);
+ list_del(&range->list);
+ kfree(range);
+ } else if (dirty_release) {
+ /*
+ * A range which one of its WQNs is destroyed, won't be able to be
+ * reused for further WQN allocations.
+ * The next created WQ will allocate a new range.
+ */
+ range->dirty = 1;
+ }
+
+ mutex_unlock(&context->wqn_ranges_mutex);
+}
+
static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
+ enum mlx4_ib_source_type src,
struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp,
- gfp_t gfp)
+ struct ib_udata *udata, int sqpn,
+ struct mlx4_ib_qp **caller_qp)
{
int qpn;
int err;
@@ -645,6 +956,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
struct mlx4_ib_cq *mcq;
unsigned long flags;
+ int range_size = 0;
/* When tunneling special qps, we use a plain UD qp */
if (sqpn) {
@@ -691,14 +1003,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
(qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
- sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp);
+ sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
if (!sqp)
return -ENOMEM;
qp = &sqp->qp;
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
} else {
- qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp);
+ qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL);
if (!qp)
return -ENOMEM;
qp->pri.vid = 0xFFFF;
@@ -719,26 +1031,70 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
- err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp);
- if (err)
- goto err;
if (pd->uobject) {
- struct mlx4_ib_create_qp ucmd;
+ union {
+ struct mlx4_ib_create_qp qp;
+ struct mlx4_ib_create_wq wq;
+ } ucmd;
+ size_t copy_len;
+
+ copy_len = (src == MLX4_IB_QP_SRC) ?
+ sizeof(struct mlx4_ib_create_qp) :
+ min(sizeof(struct mlx4_ib_create_wq), udata->inlen);
- if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
+ if (ib_copy_from_udata(&ucmd, udata, copy_len)) {
err = -EFAULT;
goto err;
}
- qp->sq_no_prefetch = ucmd.sq_no_prefetch;
+ if (src == MLX4_IB_RWQ_SRC) {
+ if (ucmd.wq.comp_mask || ucmd.wq.reserved[0] ||
+ ucmd.wq.reserved[1] || ucmd.wq.reserved[2]) {
+ pr_debug("user command isn't supported\n");
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (ucmd.wq.log_range_size >
+ ilog2(dev->dev->caps.max_rss_tbl_sz)) {
+ pr_debug("WQN range size must be equal or smaller than %d\n",
+ dev->dev->caps.max_rss_tbl_sz);
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+ range_size = 1 << ucmd.wq.log_range_size;
+ } else {
+ qp->inl_recv_sz = ucmd.qp.inl_recv_sz;
+ }
- err = set_user_sq_size(dev, qp, &ucmd);
+ err = set_rq_size(dev, &init_attr->cap, !!pd->uobject,
+ qp_has_rq(init_attr), qp, qp->inl_recv_sz);
if (err)
goto err;
- qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
- qp->buf_size, 0, 0);
+ if (src == MLX4_IB_QP_SRC) {
+ qp->sq_no_prefetch = ucmd.qp.sq_no_prefetch;
+
+ err = set_user_sq_size(dev, qp,
+ (struct mlx4_ib_create_qp *)
+ &ucmd);
+ if (err)
+ goto err;
+ } else {
+ qp->sq_no_prefetch = 1;
+ qp->sq.wqe_cnt = 1;
+ qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
+ /* Allocated buffer expects to have at least that SQ
+ * size.
+ */
+ qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
+ (qp->sq.wqe_cnt << qp->sq.wqe_shift);
+ }
+
+ qp->umem = ib_umem_get(pd->uobject->context,
+ (src == MLX4_IB_QP_SRC) ? ucmd.qp.buf_addr :
+ ucmd.wq.buf_addr, qp->buf_size, 0, 0);
if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem);
goto err;
@@ -755,11 +1111,18 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (qp_has_rq(init_attr)) {
err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
- ucmd.db_addr, &qp->db);
+ (src == MLX4_IB_QP_SRC) ? ucmd.qp.db_addr :
+ ucmd.wq.db_addr, &qp->db);
if (err)
goto err_mtt;
}
+ qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
} else {
+ err = set_rq_size(dev, &init_attr->cap, !!pd->uobject,
+ qp_has_rq(init_attr), qp, 0);
+ if (err)
+ goto err;
+
qp->sq_no_prefetch = 0;
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
@@ -780,7 +1143,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err;
if (qp_has_rq(init_attr)) {
- err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp);
+ err = mlx4_db_alloc(dev->dev, &qp->db, 0);
if (err)
goto err;
@@ -788,7 +1151,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
}
if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
- &qp->buf, gfp)) {
+ &qp->buf)) {
memcpy(&init_attr->cap, &backup_cap,
sizeof(backup_cap));
err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
@@ -797,7 +1160,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err_db;
if (mlx4_buf_alloc(dev->dev, qp->buf_size,
- PAGE_SIZE * 2, &qp->buf, gfp)) {
+ PAGE_SIZE * 2, &qp->buf)) {
err = -ENOMEM;
goto err_db;
}
@@ -808,24 +1171,19 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp);
+ err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
if (err)
goto err_mtt;
- qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64),
- gfp | __GFP_NOWARN);
- if (!qp->sq.wrid)
- qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
- gfp, PAGE_KERNEL);
- qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64),
- gfp | __GFP_NOWARN);
- if (!qp->rq.wrid)
- qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
- gfp, PAGE_KERNEL);
+ qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(u64), GFP_KERNEL);
+ qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
+ sizeof(u64), GFP_KERNEL);
if (!qp->sq.wrid || !qp->rq.wrid) {
err = -ENOMEM;
goto err_wrid;
}
+ qp->mqp.usage = MLX4_RES_USAGE_DRIVER;
}
if (sqpn) {
@@ -836,6 +1194,11 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err_wrid;
}
}
+ } else if (src == MLX4_IB_RWQ_SRC) {
+ err = mlx4_ib_alloc_wqn(to_mucontext(pd->uobject->context), qp,
+ range_size, &qpn);
+ if (err)
+ goto err_wrid;
} else {
/* Raw packet QPNs may not have bits 6,7 set in their qp_num;
* otherwise, the WQE BlueFlame setup flow wrongly causes
@@ -845,13 +1208,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
(init_attr->cap.max_send_wr ?
MLX4_RESERVE_ETH_BF_QP : 0) |
(init_attr->cap.max_recv_wr ?
- MLX4_RESERVE_A0_QP : 0));
+ MLX4_RESERVE_A0_QP : 0),
+ qp->mqp.usage);
else
if (qp->flags & MLX4_IB_QP_NETIF)
err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
else
err = mlx4_qp_reserve_range(dev->dev, 1, 1,
- &qpn, 0);
+ &qpn, 0, qp->mqp.usage);
if (err)
goto err_proxy;
}
@@ -859,7 +1223,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
- err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp);
+ err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
if (err)
goto err_qpn;
@@ -873,7 +1237,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
*/
qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
- qp->mqp.event = mlx4_ib_qp_event;
+ qp->mqp.event = (src == MLX4_IB_QP_SRC) ? mlx4_ib_qp_event :
+ mlx4_ib_wq_event;
+
if (!*caller_qp)
*caller_qp = qp;
@@ -900,6 +1266,9 @@ err_qpn:
if (!sqpn) {
if (qp->flags & MLX4_IB_QP_NETIF)
mlx4_ib_steer_qp_free(dev, qpn, 1);
+ else if (src == MLX4_IB_RWQ_SRC)
+ mlx4_ib_release_wqn(to_mucontext(pd->uobject->context),
+ qp, 0);
else
mlx4_qp_release_range(dev->dev, qpn, 1);
}
@@ -998,7 +1367,7 @@ static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp)
return to_mpd(qp->ibqp.pd);
}
-static void get_cqs(struct mlx4_ib_qp *qp,
+static void get_cqs(struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src,
struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq)
{
switch (qp->ibqp.qp_type) {
@@ -1011,14 +1380,46 @@ static void get_cqs(struct mlx4_ib_qp *qp,
*recv_cq = *send_cq;
break;
default:
- *send_cq = to_mcq(qp->ibqp.send_cq);
- *recv_cq = to_mcq(qp->ibqp.recv_cq);
+ *recv_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.recv_cq) :
+ to_mcq(qp->ibwq.cq);
+ *send_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.send_cq) :
+ *recv_cq;
break;
}
}
+static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
+{
+ if (qp->state != IB_QPS_RESET) {
+ int i;
+
+ for (i = 0; i < (1 << qp->ibqp.rwq_ind_tbl->log_ind_tbl_size);
+ i++) {
+ struct ib_wq *ibwq = qp->ibqp.rwq_ind_tbl->ind_tbl[i];
+ struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
+
+ mutex_lock(&wq->mutex);
+
+ wq->rss_usecnt--;
+
+ mutex_unlock(&wq->mutex);
+ }
+
+ if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
+ MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
+ pr_warn("modify QP %06x to RESET failed.\n",
+ qp->mqp.qpn);
+ }
+
+ mlx4_qp_remove(dev->dev, &qp->mqp);
+ mlx4_qp_free(dev->dev, &qp->mqp);
+ mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
+ del_gid_entries(qp);
+ kfree(qp->rss_ctx);
+}
+
static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
- int is_user)
+ enum mlx4_ib_source_type src, int is_user)
{
struct mlx4_ib_cq *send_cq, *recv_cq;
unsigned long flags;
@@ -1051,7 +1452,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
}
}
- get_cqs(qp, &send_cq, &recv_cq);
+ get_cqs(qp, src, &send_cq, &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx4_ib_lock_cqs(send_cq, recv_cq);
@@ -1077,6 +1478,9 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) {
if (qp->flags & MLX4_IB_QP_NETIF)
mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
+ else if (src == MLX4_IB_RWQ_SRC)
+ mlx4_ib_release_wqn(to_mucontext(
+ qp->ibwq.uobject->context), qp, 1);
else
mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
}
@@ -1084,9 +1488,12 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
mlx4_mtt_cleanup(dev->dev, &qp->mtt);
if (is_user) {
- if (qp->rq.wqe_cnt)
- mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),
- &qp->db);
+ if (qp->rq.wqe_cnt) {
+ struct mlx4_ib_ucontext *mcontext = !src ?
+ to_mucontext(qp->ibqp.uobject->context) :
+ to_mucontext(qp->ibwq.uobject->context);
+ mlx4_ib_db_unmap_user(mcontext, &qp->db);
+ }
ib_umem_release(qp->umem);
} else {
kvfree(qp->sq.wrid);
@@ -1114,9 +1521,9 @@ static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr)
}
/* PF or VF -- creating proxies */
if (attr->qp_type == IB_QPT_SMI)
- return dev->dev->caps.qp0_proxy[attr->port_num - 1];
+ return dev->dev->caps.spec_qps[attr->port_num - 1].qp0_proxy;
else
- return dev->dev->caps.qp1_proxy[attr->port_num - 1];
+ return dev->dev->caps.spec_qps[attr->port_num - 1].qp1_proxy;
}
static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
@@ -1127,10 +1534,10 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
int err;
int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
u16 xrcdn = 0;
- gfp_t gfp;
- gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ?
- GFP_NOIO : GFP_KERNEL;
+ if (init_attr->rwq_ind_tbl)
+ return _mlx4_ib_create_qp_rss(pd, init_attr, udata);
+
/*
* We only support LSO, vendor flag1, and multicast loopback blocking,
* and only for kernel UD QPs.
@@ -1140,8 +1547,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
MLX4_IB_SRIOV_TUNNEL_QP |
MLX4_IB_SRIOV_SQP |
MLX4_IB_QP_NETIF |
- MLX4_IB_QP_CREATE_ROCE_V2_GSI |
- MLX4_IB_QP_CREATE_USE_GFP_NOIO))
+ MLX4_IB_QP_CREATE_ROCE_V2_GSI))
return ERR_PTR(-EINVAL);
if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
@@ -1154,7 +1560,6 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
- MLX4_IB_QP_CREATE_USE_GFP_NOIO |
MLX4_IB_QP_CREATE_ROCE_V2_GSI |
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) &&
init_attr->qp_type != IB_QPT_UD) ||
@@ -1179,7 +1584,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_RAW_PACKET:
- qp = kzalloc(sizeof *qp, gfp);
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
qp->pri.vid = 0xFFFF;
@@ -1187,8 +1592,8 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
/* fall through */
case IB_QPT_UD:
{
- err = create_qp_common(to_mdev(pd->device), pd, init_attr,
- udata, 0, &qp, gfp);
+ err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC,
+ init_attr, udata, 0, &qp);
if (err) {
kfree(qp);
return ERR_PTR(err);
@@ -1208,7 +1613,9 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
if (udata)
return ERR_PTR(-EINVAL);
if (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI) {
- int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev, 1, 1, &sqpn, 0);
+ int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev,
+ 1, 1, &sqpn, 0,
+ MLX4_RES_USAGE_DRIVER);
if (res)
return ERR_PTR(res);
@@ -1216,9 +1623,8 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
sqpn = get_sqp_num(to_mdev(pd->device), init_attr);
}
- err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
- sqpn,
- &qp, gfp);
+ err = create_qp_common(to_mdev(pd->device), pd, MLX4_IB_QP_SRC,
+ init_attr, udata, sqpn, &qp);
if (err)
return ERR_PTR(err);
@@ -1273,7 +1679,6 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
{
struct mlx4_ib_dev *dev = to_mdev(qp->device);
struct mlx4_ib_qp *mqp = to_mqp(qp);
- struct mlx4_ib_pd *pd;
if (is_qp0(dev, mqp))
mlx4_CLOSE_PORT(dev->dev, mqp->port);
@@ -1288,8 +1693,14 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
if (mqp->counter_index)
mlx4_ib_free_qp_counter(dev, mqp);
- pd = get_pd(mqp);
- destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
+ if (qp->rwq_ind_tbl) {
+ destroy_qp_rss(dev, mqp);
+ } else {
+ struct mlx4_ib_pd *pd;
+
+ pd = get_pd(mqp);
+ destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, !!pd->ibpd.uobject);
+ }
if (is_sqp(dev, mqp))
kfree(to_msqp(mqp));
@@ -1572,7 +1983,7 @@ static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
!(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK))
return 0;
- err = mlx4_counter_alloc(dev->dev, &tmp_idx);
+ err = mlx4_counter_alloc(dev->dev, &tmp_idx, MLX4_RES_USAGE_DRIVER);
if (err)
return err;
@@ -1612,12 +2023,119 @@ static u8 gid_type_to_qpc(enum ib_gid_type gid_type)
}
}
-static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
+/*
+ * Go over all RSS QP's childes (WQs) and apply their HW state according to
+ * their logic state if the RSS QP is the first RSS QP associated for the WQ.
+ */
+static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
+ struct ib_wq *ibwq = ind_tbl->ind_tbl[i];
+ struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
+
+ mutex_lock(&wq->mutex);
+
+ /* Mlx4_ib restrictions:
+ * WQ's is associated to a port according to the RSS QP it is
+ * associates to.
+ * In case the WQ is associated to a different port by another
+ * RSS QP, return a failure.
+ */
+ if ((wq->rss_usecnt > 0) && (wq->port != port_num)) {
+ err = -EINVAL;
+ mutex_unlock(&wq->mutex);
+ break;
+ }
+ wq->port = port_num;
+ if ((wq->rss_usecnt == 0) && (ibwq->state == IB_WQS_RDY)) {
+ err = _mlx4_ib_modify_wq(ibwq, IB_WQS_RDY);
+ if (err) {
+ mutex_unlock(&wq->mutex);
+ break;
+ }
+ }
+ wq->rss_usecnt++;
+
+ mutex_unlock(&wq->mutex);
+ }
+
+ if (i && err) {
+ int j;
+
+ for (j = (i - 1); j >= 0; j--) {
+ struct ib_wq *ibwq = ind_tbl->ind_tbl[j];
+ struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
+
+ mutex_lock(&wq->mutex);
+
+ if ((wq->rss_usecnt == 1) &&
+ (ibwq->state == IB_WQS_RDY))
+ if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET))
+ pr_warn("failed to reverse WQN=0x%06x\n",
+ ibwq->wq_num);
+ wq->rss_usecnt--;
+
+ mutex_unlock(&wq->mutex);
+ }
+ }
+
+ return err;
+}
+
+static void bring_down_rss_rwqs(struct ib_rwq_ind_table *ind_tbl)
+{
+ int i;
+
+ for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
+ struct ib_wq *ibwq = ind_tbl->ind_tbl[i];
+ struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq);
+
+ mutex_lock(&wq->mutex);
+
+ if ((wq->rss_usecnt == 1) && (ibwq->state == IB_WQS_RDY))
+ if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET))
+ pr_warn("failed to reverse WQN=%x\n",
+ ibwq->wq_num);
+ wq->rss_usecnt--;
+
+ mutex_unlock(&wq->mutex);
+ }
+}
+
+static void fill_qp_rss_context(struct mlx4_qp_context *context,
+ struct mlx4_ib_qp *qp)
+{
+ struct mlx4_rss_context *rss_context;
+
+ rss_context = (void *)context + offsetof(struct mlx4_qp_context,
+ pri_path) + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
+
+ rss_context->base_qpn = cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz);
+ rss_context->default_qpn =
+ cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz & 0xffffff);
+ if (qp->rss_ctx->flags & (MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6))
+ rss_context->base_qpn_udp = rss_context->default_qpn;
+ rss_context->flags = qp->rss_ctx->flags;
+ /* Currently support just toeplitz */
+ rss_context->hash_fn = MLX4_RSS_HASH_TOP;
+
+ memcpy(rss_context->rss_key, qp->rss_ctx->rss_key,
+ MLX4_EN_RSS_KEY_SIZE);
+}
+
+static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
const struct ib_qp_attr *attr, int attr_mask,
enum ib_qp_state cur_state, enum ib_qp_state new_state)
{
- struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
- struct mlx4_ib_qp *qp = to_mqp(ibqp);
+ struct ib_uobject *ibuobject;
+ struct ib_srq *ibsrq;
+ struct ib_rwq_ind_table *rwq_ind_tbl;
+ enum ib_qp_type qp_type;
+ struct mlx4_ib_dev *dev;
+ struct mlx4_ib_qp *qp;
struct mlx4_ib_pd *pd;
struct mlx4_ib_cq *send_cq, *recv_cq;
struct mlx4_qp_context *context;
@@ -1627,6 +2145,30 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
int err = -EINVAL;
int counter_index;
+ if (src_type == MLX4_IB_RWQ_SRC) {
+ struct ib_wq *ibwq;
+
+ ibwq = (struct ib_wq *)src;
+ ibuobject = ibwq->uobject;
+ ibsrq = NULL;
+ rwq_ind_tbl = NULL;
+ qp_type = IB_QPT_RAW_PACKET;
+ qp = to_mqp((struct ib_qp *)ibwq);
+ dev = to_mdev(ibwq->device);
+ pd = to_mpd(ibwq->pd);
+ } else {
+ struct ib_qp *ibqp;
+
+ ibqp = (struct ib_qp *)src;
+ ibuobject = ibqp->uobject;
+ ibsrq = ibqp->srq;
+ rwq_ind_tbl = ibqp->rwq_ind_tbl;
+ qp_type = ibqp->qp_type;
+ qp = to_mqp(ibqp);
+ dev = to_mdev(ibqp->device);
+ pd = get_pd(qp);
+ }
+
/* APM is not supported under RoCE */
if (attr_mask & IB_QP_ALT_PATH &&
rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
@@ -1640,6 +2182,11 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |
(to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16));
+ if (rwq_ind_tbl) {
+ fill_qp_rss_context(context, qp);
+ context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET);
+ }
+
if (!(attr_mask & IB_QP_PATH_MIG_STATE))
context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);
else {
@@ -1657,11 +2204,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
+ if (qp->inl_recv_sz)
+ context->param3 |= cpu_to_be32(1 << 25);
+
+ if (qp_type == IB_QPT_GSI || qp_type == IB_QPT_SMI)
context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
- else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
+ else if (qp_type == IB_QPT_RAW_PACKET)
context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
- else if (ibqp->qp_type == IB_QPT_UD) {
+ else if (qp_type == IB_QPT_UD) {
if (qp->flags & MLX4_IB_QP_LSO)
context->mtu_msgmax = (IB_MTU_4096 << 5) |
ilog2(dev->dev->caps.max_gso_sz);
@@ -1677,9 +2227,11 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
ilog2(dev->dev->caps.max_msg_sz);
}
- if (qp->rq.wqe_cnt)
- context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
- context->rq_size_stride |= qp->rq.wqe_shift - 4;
+ if (!rwq_ind_tbl) { /* PRM RSS receive side should be left zeros */
+ if (qp->rq.wqe_cnt)
+ context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
+ context->rq_size_stride |= qp->rq.wqe_shift - 4;
+ }
if (qp->sq.wqe_cnt)
context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
@@ -1691,14 +2243,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
context->xrcd = cpu_to_be32((u32) qp->xrcdn);
- if (ibqp->qp_type == IB_QPT_RAW_PACKET)
+ if (qp_type == IB_QPT_RAW_PACKET)
context->param3 |= cpu_to_be32(1 << 30);
}
- if (qp->ibqp.uobject)
+ if (ibuobject)
context->usr_page = cpu_to_be32(
mlx4_to_hw_uar_index(dev->dev,
- to_mucontext(ibqp->uobject->context)->uar.index));
+ to_mucontext(ibuobject->context)
+ ->uar.index));
else
context->usr_page = cpu_to_be32(
mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
@@ -1742,7 +2295,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
steer_qp = 1;
}
- if (ibqp->qp_type == IB_QPT_GSI) {
+ if (qp_type == IB_QPT_GSI) {
enum ib_gid_type gid_type = qp->flags & MLX4_IB_ROCE_V2_GSI_QP ?
IB_GID_TYPE_ROCE_UDP_ENCAP : IB_GID_TYPE_ROCE;
u8 qpc_roce_mode = gid_type_to_qpc(gid_type);
@@ -1759,7 +2312,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_AV) {
- u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 :
+ u8 port_num = mlx4_is_bonded(dev->dev) ? 1 :
attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
union ib_gid gid;
struct ib_gid_attr gid_attr = {.gid_type = IB_GID_TYPE_IB};
@@ -1774,7 +2327,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
int index =
rdma_ah_read_grh(&attr->ah_attr)->sgid_index;
- status = ib_get_cached_gid(ibqp->device, port_num,
+ status = ib_get_cached_gid(&dev->ib_dev, port_num,
index, &gid, &gid_attr);
if (!status && !memcmp(&gid, &zgid, sizeof(gid)))
status = -ENOENT;
@@ -1831,15 +2384,20 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;
}
- pd = get_pd(qp);
- get_cqs(qp, &send_cq, &recv_cq);
- context->pd = cpu_to_be32(pd->pdn);
+ context->pd = cpu_to_be32(pd->pdn);
+
+ if (!rwq_ind_tbl) {
+ get_cqs(qp, src_type, &send_cq, &recv_cq);
+ } else { /* Set dummy CQs to be compatible with HV and PRM */
+ send_cq = to_mcq(rwq_ind_tbl->ind_tbl[0]->cq);
+ recv_cq = send_cq;
+ }
context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);
/* Set "fast registration enabled" for all kernel QPs */
- if (!qp->ibqp.uobject)
+ if (!ibuobject)
context->params1 |= cpu_to_be32(1 << 11);
if (attr_mask & IB_QP_RNR_RETRY) {
@@ -1874,7 +2432,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE;
}
- if (ibqp->srq)
+ if (ibsrq)
context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC);
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
@@ -1905,17 +2463,19 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
optpar |= MLX4_QP_OPTPAR_Q_KEY;
}
- if (ibqp->srq)
- context->srqn = cpu_to_be32(1 << 24 | to_msrq(ibqp->srq)->msrq.srqn);
+ if (ibsrq)
+ context->srqn = cpu_to_be32(1 << 24 |
+ to_msrq(ibsrq)->msrq.srqn);
- if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ if (qp->rq.wqe_cnt &&
+ cur_state == IB_QPS_RESET &&
+ new_state == IB_QPS_INIT)
context->db_rec_addr = cpu_to_be64(qp->db.dma);
if (cur_state == IB_QPS_INIT &&
new_state == IB_QPS_RTR &&
- (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
- ibqp->qp_type == IB_QPT_UD ||
- ibqp->qp_type == IB_QPT_RAW_PACKET)) {
+ (qp_type == IB_QPT_GSI || qp_type == IB_QPT_SMI ||
+ qp_type == IB_QPT_UD || qp_type == IB_QPT_RAW_PACKET)) {
context->pri_path.sched_queue = (qp->port - 1) << 6;
if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
qp->mlx4_ib_qp_type &
@@ -1948,7 +2508,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
+ if (qp_type == IB_QPT_RAW_PACKET) {
context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
MLX4_IB_LINK_TYPE_ETH;
if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
@@ -1958,7 +2518,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
- if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
+ if (qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
int is_eth = rdma_port_get_link_layer(
&dev->ib_dev, qp->port) ==
IB_LINK_LAYER_ETHERNET;
@@ -1968,14 +2528,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
-
if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
sqd_event = 1;
else
sqd_event = 0;
- if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ if (!ibuobject &&
+ cur_state == IB_QPS_RESET &&
+ new_state == IB_QPS_INIT)
context->rlkey_roce_mode |= (1 << 4);
/*
@@ -1984,7 +2545,9 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
* headroom is stamped so that the hardware doesn't start
* processing stale work requests.
*/
- if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ if (!ibuobject &&
+ cur_state == IB_QPS_RESET &&
+ new_state == IB_QPS_INIT) {
struct mlx4_wqe_ctrl_seg *ctrl;
int i;
@@ -2041,9 +2604,9 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
* entries and reinitialize the QP.
*/
if (new_state == IB_QPS_RESET) {
- if (!ibqp->uobject) {
+ if (!ibuobject) {
mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
- ibqp->srq ? to_msrq(ibqp->srq) : NULL);
+ ibsrq ? to_msrq(ibsrq) : NULL);
if (send_cq != recv_cq)
mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
@@ -2154,22 +2717,25 @@ out:
return err;
}
+enum {
+ MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK = (IB_QP_STATE |
+ IB_QP_PORT),
+};
+
static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
+ enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
struct mlx4_ib_qp *qp = to_mqp(ibqp);
enum ib_qp_state cur_state, new_state;
int err = -EINVAL;
- int ll;
mutex_lock(&qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (cur_state == new_state && cur_state == IB_QPS_RESET) {
- ll = IB_LINK_LAYER_UNSPECIFIED;
- } else {
+ if (cur_state != new_state || cur_state != IB_QPS_RESET) {
int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
ll = rdma_port_get_link_layer(&dev->ib_dev, port);
}
@@ -2184,6 +2750,27 @@ static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
}
+ if (ibqp->rwq_ind_tbl) {
+ if (!(((cur_state == IB_QPS_RESET) &&
+ (new_state == IB_QPS_INIT)) ||
+ ((cur_state == IB_QPS_INIT) &&
+ (new_state == IB_QPS_RTR)))) {
+ pr_debug("qpn 0x%x: RSS QP unsupported transition %d to %d\n",
+ ibqp->qp_num, cur_state, new_state);
+
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (attr_mask & ~MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK) {
+ pr_debug("qpn 0x%x: RSS QP unsupported attribute mask 0x%x for transition %d to %d\n",
+ ibqp->qp_num, attr_mask, cur_state, new_state);
+
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) {
if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
if ((ibqp->qp_type == IB_QPT_RC) ||
@@ -2248,7 +2835,17 @@ static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
}
- err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
+ if (ibqp->rwq_ind_tbl && (new_state == IB_QPS_INIT)) {
+ err = bringup_rss_rwqs(ibqp->rwq_ind_tbl, attr->port_num);
+ if (err)
+ goto out;
+ }
+
+ err = __mlx4_ib_modify_qp(ibqp, MLX4_IB_QP_SRC, attr, attr_mask,
+ cur_state, new_state);
+
+ if (ibqp->rwq_ind_tbl && err)
+ bring_down_rss_rwqs(ibqp->rwq_ind_tbl);
if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
attr->port_num = 1;
@@ -2283,9 +2880,9 @@ static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
{
int i;
for (i = 0; i < dev->caps.num_ports; i++) {
- if (qpn == dev->caps.qp0_proxy[i] ||
- qpn == dev->caps.qp0_tunnel[i]) {
- *qkey = dev->caps.qp0_qkey[i];
+ if (qpn == dev->caps.spec_qps[i].qp0_proxy ||
+ qpn == dev->caps.spec_qps[i].qp0_tunnel) {
+ *qkey = dev->caps.spec_qps[i].qp0_qkey;
return 0;
}
}
@@ -2346,7 +2943,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
else
sqp->ud_header.bth.destination_qpn =
- cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]);
+ cpu_to_be32(mdev->dev->caps.spec_qps[sqp->qp.port - 1].qp0_tunnel);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
if (mlx4_is_master(mdev->dev)) {
@@ -2806,9 +3403,9 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av));
if (qpt == MLX4_IB_QPT_PROXY_GSI)
- dseg->dqpn = cpu_to_be32(dev->dev->caps.qp1_tunnel[port - 1]);
+ dseg->dqpn = cpu_to_be32(dev->dev->caps.spec_qps[port - 1].qp1_tunnel);
else
- dseg->dqpn = cpu_to_be32(dev->dev->caps.qp0_tunnel[port - 1]);
+ dseg->dqpn = cpu_to_be32(dev->dev->caps.spec_qps[port - 1].qp0_tunnel);
/* Use QKEY from the QP context, which is set by master */
dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
}
@@ -3438,6 +4035,9 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
int mlx4_state;
int err = 0;
+ if (ibqp->rwq_ind_tbl)
+ return -EOPNOTSUPP;
+
mutex_lock(&qp->mutex);
if (qp->state == IB_QPS_RESET) {
@@ -3533,3 +4133,285 @@ out:
return err;
}
+struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx4_ib_dev *dev;
+ struct ib_qp_init_attr ib_qp_init_attr;
+ struct mlx4_ib_qp *qp;
+ struct mlx4_ib_create_wq ucmd;
+ int err, required_cmd_sz;
+
+ if (!(udata && pd->uobject))
+ return ERR_PTR(-EINVAL);
+
+ required_cmd_sz = offsetof(typeof(ucmd), comp_mask) +
+ sizeof(ucmd.comp_mask);
+ if (udata->inlen < required_cmd_sz) {
+ pr_debug("invalid inlen\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd))) {
+ pr_debug("inlen is not supported\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (udata->outlen)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ dev = to_mdev(pd->device);
+
+ if (init_attr->wq_type != IB_WQT_RQ) {
+ pr_debug("unsupported wq type %d\n", init_attr->wq_type);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ if (init_attr->create_flags) {
+ pr_debug("unsupported create_flags %u\n",
+ init_attr->create_flags);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ qp->pri.vid = 0xFFFF;
+ qp->alt.vid = 0xFFFF;
+
+ memset(&ib_qp_init_attr, 0, sizeof(ib_qp_init_attr));
+ ib_qp_init_attr.qp_context = init_attr->wq_context;
+ ib_qp_init_attr.qp_type = IB_QPT_RAW_PACKET;
+ ib_qp_init_attr.cap.max_recv_wr = init_attr->max_wr;
+ ib_qp_init_attr.cap.max_recv_sge = init_attr->max_sge;
+ ib_qp_init_attr.recv_cq = init_attr->cq;
+ ib_qp_init_attr.send_cq = ib_qp_init_attr.recv_cq; /* Dummy CQ */
+
+ err = create_qp_common(dev, pd, MLX4_IB_RWQ_SRC, &ib_qp_init_attr,
+ udata, 0, &qp);
+ if (err) {
+ kfree(qp);
+ return ERR_PTR(err);
+ }
+
+ qp->ibwq.event_handler = init_attr->event_handler;
+ qp->ibwq.wq_num = qp->mqp.qpn;
+ qp->ibwq.state = IB_WQS_RESET;
+
+ return &qp->ibwq;
+}
+
+static int ib_wq2qp_state(enum ib_wq_state state)
+{
+ switch (state) {
+ case IB_WQS_RESET:
+ return IB_QPS_RESET;
+ case IB_WQS_RDY:
+ return IB_QPS_RTR;
+ default:
+ return IB_QPS_ERR;
+ }
+}
+
+static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state)
+{
+ struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
+ enum ib_qp_state qp_cur_state;
+ enum ib_qp_state qp_new_state;
+ int attr_mask;
+ int err;
+
+ /* ib_qp.state represents the WQ HW state while ib_wq.state represents
+ * the WQ logic state.
+ */
+ qp_cur_state = qp->state;
+ qp_new_state = ib_wq2qp_state(new_state);
+
+ if (ib_wq2qp_state(new_state) == qp_cur_state)
+ return 0;
+
+ if (new_state == IB_WQS_RDY) {
+ struct ib_qp_attr attr = {};
+
+ attr.port_num = qp->port;
+ attr_mask = IB_QP_PORT;
+
+ err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, &attr,
+ attr_mask, IB_QPS_RESET, IB_QPS_INIT);
+ if (err) {
+ pr_debug("WQN=0x%06x failed to apply RST->INIT on the HW QP\n",
+ ibwq->wq_num);
+ return err;
+ }
+
+ qp_cur_state = IB_QPS_INIT;
+ }
+
+ attr_mask = 0;
+ err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL, attr_mask,
+ qp_cur_state, qp_new_state);
+
+ if (err && (qp_cur_state == IB_QPS_INIT)) {
+ qp_new_state = IB_QPS_RESET;
+ if (__mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL,
+ attr_mask, IB_QPS_INIT, IB_QPS_RESET)) {
+ pr_warn("WQN=0x%06x failed with reverting HW's resources failure\n",
+ ibwq->wq_num);
+ qp_new_state = IB_QPS_INIT;
+ }
+ }
+
+ qp->state = qp_new_state;
+
+ return err;
+}
+
+int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask, struct ib_udata *udata)
+{
+ struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
+ struct mlx4_ib_modify_wq ucmd = {};
+ size_t required_cmd_sz;
+ enum ib_wq_state cur_state, new_state;
+ int err = 0;
+
+ required_cmd_sz = offsetof(typeof(ucmd), reserved) +
+ sizeof(ucmd.reserved);
+ if (udata->inlen < required_cmd_sz)
+ return -EINVAL;
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd)))
+ return -EOPNOTSUPP;
+
+ if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
+ return -EFAULT;
+
+ if (ucmd.comp_mask || ucmd.reserved)
+ return -EOPNOTSUPP;
+
+ if (wq_attr_mask & IB_WQ_FLAGS)
+ return -EOPNOTSUPP;
+
+ cur_state = wq_attr_mask & IB_WQ_CUR_STATE ? wq_attr->curr_wq_state :
+ ibwq->state;
+ new_state = wq_attr_mask & IB_WQ_STATE ? wq_attr->wq_state : cur_state;
+
+ if (cur_state < IB_WQS_RESET || cur_state > IB_WQS_ERR ||
+ new_state < IB_WQS_RESET || new_state > IB_WQS_ERR)
+ return -EINVAL;
+
+ if ((new_state == IB_WQS_RDY) && (cur_state == IB_WQS_ERR))
+ return -EINVAL;
+
+ if ((new_state == IB_WQS_ERR) && (cur_state == IB_WQS_RESET))
+ return -EINVAL;
+
+ /* Need to protect against the parent RSS which also may modify WQ
+ * state.
+ */
+ mutex_lock(&qp->mutex);
+
+ /* Can update HW state only if a RSS QP has already associated to this
+ * WQ, so we can apply its port on the WQ.
+ */
+ if (qp->rss_usecnt)
+ err = _mlx4_ib_modify_wq(ibwq, new_state);
+
+ if (!err)
+ ibwq->state = new_state;
+
+ mutex_unlock(&qp->mutex);
+
+ return err;
+}
+
+int mlx4_ib_destroy_wq(struct ib_wq *ibwq)
+{
+ struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
+ struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
+
+ if (qp->counter_index)
+ mlx4_ib_free_qp_counter(dev, qp);
+
+ destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, 1);
+
+ kfree(qp);
+
+ return 0;
+}
+
+struct ib_rwq_ind_table
+*mlx4_ib_create_rwq_ind_table(struct ib_device *device,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct ib_rwq_ind_table *rwq_ind_table;
+ struct mlx4_ib_create_rwq_ind_tbl_resp resp = {};
+ unsigned int ind_tbl_size = 1 << init_attr->log_ind_tbl_size;
+ unsigned int base_wqn;
+ size_t min_resp_len;
+ int i;
+ int err;
+
+ if (udata->inlen > 0 &&
+ !ib_is_udata_cleared(udata, 0,
+ udata->inlen))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
+ if (udata->outlen && udata->outlen < min_resp_len)
+ return ERR_PTR(-EINVAL);
+
+ if (ind_tbl_size >
+ device->attrs.rss_caps.max_rwq_indirection_table_size) {
+ pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n",
+ ind_tbl_size,
+ device->attrs.rss_caps.max_rwq_indirection_table_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ base_wqn = init_attr->ind_tbl[0]->wq_num;
+
+ if (base_wqn % ind_tbl_size) {
+ pr_debug("WQN=0x%x isn't aligned with indirection table size\n",
+ base_wqn);
+ return ERR_PTR(-EINVAL);
+ }
+
+ for (i = 1; i < ind_tbl_size; i++) {
+ if (++base_wqn != init_attr->ind_tbl[i]->wq_num) {
+ pr_debug("indirection table's WQNs aren't consecutive\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ rwq_ind_table = kzalloc(sizeof(*rwq_ind_table), GFP_KERNEL);
+ if (!rwq_ind_table)
+ return ERR_PTR(-ENOMEM);
+
+ if (udata->outlen) {
+ resp.response_length = offsetof(typeof(resp), response_length) +
+ sizeof(resp.response_length);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ goto err;
+ }
+
+ return rwq_ind_table;
+
+err:
+ kfree(rwq_ind_table);
+ return ERR_PTR(err);
+}
+
+int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
+{
+ kfree(ib_rwq_ind_tbl);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index e32dd58937a8..ebee56cbc0e2 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -34,7 +34,6 @@
#include <linux/mlx4/qp.h>
#include <linux/mlx4/srq.h>
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include "mlx4_ib.h"
#include <rdma/mlx4-abi.h>
@@ -135,14 +134,14 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err)
goto err_mtt;
} else {
- err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL);
+ err = mlx4_db_alloc(dev->dev, &srq->db, 0);
if (err)
goto err_srq;
*srq->db.db = 0;
- if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf,
- GFP_KERNEL)) {
+ if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
+ &srq->buf)) {
err = -ENOMEM;
goto err_db;
}
@@ -167,24 +166,20 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL);
+ err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
if (err)
goto err_mtt;
- srq->wrid = kmalloc_array(srq->msrq.max, sizeof(u64),
- GFP_KERNEL | __GFP_NOWARN);
+ srq->wrid = kvmalloc_array(srq->msrq.max,
+ sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
- srq->wrid = __vmalloc(srq->msrq.max * sizeof(u64),
- GFP_KERNEL, PAGE_KERNEL);
- if (!srq->wrid) {
- err = -ENOMEM;
- goto err_mtt;
- }
+ err = -ENOMEM;
+ goto err_mtt;
}
}
- cqn = (init_attr->srq_type == IB_SRQT_XRC) ?
- to_mcq(init_attr->ext.xrc.cq)->mcq.cqn : 0;
+ cqn = ib_srq_has_cq(init_attr->srq_type) ?
+ to_mcq(init_attr->ext.cq)->mcq.cqn : 0;
xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn :
(u16) dev->dev->caps.reserved_xrcds;
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 90ad2adc752f..bc6299697dda 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
-mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o
+mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o cong.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 18d5e1db93ed..470995fa38d2 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -57,3 +57,23 @@ int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
MLX5_SET(query_cong_statistics_in, in, clear, reset);
return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
}
+
+int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
+ void *out, int out_size)
+{
+ u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = { };
+
+ MLX5_SET(query_cong_params_in, in, opcode,
+ MLX5_CMD_OP_QUERY_CONG_PARAMS);
+ MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
+}
+
+int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *dev,
+ void *in, int in_size)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)] = { };
+
+ return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out));
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index fa09228193a6..af4c24596274 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -39,4 +39,8 @@
int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
bool reset, void *out, int out_size);
+int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
+ void *out, int out_size);
+int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
+ void *in, int in_size);
#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
new file mode 100644
index 000000000000..2d32b519bb61
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (c) 2013-2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+
+#include "mlx5_ib.h"
+#include "cmd.h"
+
+enum mlx5_ib_cong_node_type {
+ MLX5_IB_RROCE_ECN_RP = 1,
+ MLX5_IB_RROCE_ECN_NP = 2,
+};
+
+static const char * const mlx5_ib_dbg_cc_name[] = {
+ "rp_clamp_tgt_rate",
+ "rp_clamp_tgt_rate_ati",
+ "rp_time_reset",
+ "rp_byte_reset",
+ "rp_threshold",
+ "rp_ai_rate",
+ "rp_hai_rate",
+ "rp_min_dec_fac",
+ "rp_min_rate",
+ "rp_rate_to_set_on_first_cnp",
+ "rp_dce_tcp_g",
+ "rp_dce_tcp_rtt",
+ "rp_rate_reduce_monitor_period",
+ "rp_initial_alpha_value",
+ "rp_gd",
+ "np_cnp_dscp",
+ "np_cnp_prio_mode",
+ "np_cnp_prio",
+};
+
+#define MLX5_IB_RP_CLAMP_TGT_RATE_ATTR BIT(1)
+#define MLX5_IB_RP_CLAMP_TGT_RATE_ATI_ATTR BIT(2)
+#define MLX5_IB_RP_TIME_RESET_ATTR BIT(3)
+#define MLX5_IB_RP_BYTE_RESET_ATTR BIT(4)
+#define MLX5_IB_RP_THRESHOLD_ATTR BIT(5)
+#define MLX5_IB_RP_AI_RATE_ATTR BIT(7)
+#define MLX5_IB_RP_HAI_RATE_ATTR BIT(8)
+#define MLX5_IB_RP_MIN_DEC_FAC_ATTR BIT(9)
+#define MLX5_IB_RP_MIN_RATE_ATTR BIT(10)
+#define MLX5_IB_RP_RATE_TO_SET_ON_FIRST_CNP_ATTR BIT(11)
+#define MLX5_IB_RP_DCE_TCP_G_ATTR BIT(12)
+#define MLX5_IB_RP_DCE_TCP_RTT_ATTR BIT(13)
+#define MLX5_IB_RP_RATE_REDUCE_MONITOR_PERIOD_ATTR BIT(14)
+#define MLX5_IB_RP_INITIAL_ALPHA_VALUE_ATTR BIT(15)
+#define MLX5_IB_RP_GD_ATTR BIT(16)
+
+#define MLX5_IB_NP_CNP_DSCP_ATTR BIT(3)
+#define MLX5_IB_NP_CNP_PRIO_MODE_ATTR BIT(4)
+
+static enum mlx5_ib_cong_node_type
+mlx5_ib_param_to_node(enum mlx5_ib_dbg_cc_types param_offset)
+{
+ if (param_offset >= MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE &&
+ param_offset <= MLX5_IB_DBG_CC_RP_GD)
+ return MLX5_IB_RROCE_ECN_RP;
+ else
+ return MLX5_IB_RROCE_ECN_NP;
+}
+
+static u32 mlx5_get_cc_param_val(void *field, int offset)
+{
+ switch (offset) {
+ case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ clamp_tgt_rate);
+ case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ clamp_tgt_rate_after_time_inc);
+ case MLX5_IB_DBG_CC_RP_TIME_RESET:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_time_reset);
+ case MLX5_IB_DBG_CC_RP_BYTE_RESET:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_byte_reset);
+ case MLX5_IB_DBG_CC_RP_THRESHOLD:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_threshold);
+ case MLX5_IB_DBG_CC_RP_AI_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_ai_rate);
+ case MLX5_IB_DBG_CC_RP_HAI_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_hai_rate);
+ case MLX5_IB_DBG_CC_RP_MIN_DEC_FAC:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_min_dec_fac);
+ case MLX5_IB_DBG_CC_RP_MIN_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_min_rate);
+ case MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rate_to_set_on_first_cnp);
+ case MLX5_IB_DBG_CC_RP_DCE_TCP_G:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ dce_tcp_g);
+ case MLX5_IB_DBG_CC_RP_DCE_TCP_RTT:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ dce_tcp_rtt);
+ case MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rate_reduce_monitor_period);
+ case MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ initial_alpha_value);
+ case MLX5_IB_DBG_CC_RP_GD:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_gd);
+ case MLX5_IB_DBG_CC_NP_CNP_DSCP:
+ return MLX5_GET(cong_control_r_roce_ecn_np, field,
+ cnp_dscp);
+ case MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE:
+ return MLX5_GET(cong_control_r_roce_ecn_np, field,
+ cnp_prio_mode);
+ case MLX5_IB_DBG_CC_NP_CNP_PRIO:
+ return MLX5_GET(cong_control_r_roce_ecn_np, field,
+ cnp_802p_prio);
+ default:
+ return 0;
+ }
+}
+
+static void mlx5_ib_set_cc_param_mask_val(void *field, int offset,
+ u32 var, u32 *attr_mask)
+{
+ switch (offset) {
+ case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE:
+ *attr_mask |= MLX5_IB_RP_CLAMP_TGT_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ clamp_tgt_rate, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI:
+ *attr_mask |= MLX5_IB_RP_CLAMP_TGT_RATE_ATI_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ clamp_tgt_rate_after_time_inc, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_TIME_RESET:
+ *attr_mask |= MLX5_IB_RP_TIME_RESET_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_time_reset, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_BYTE_RESET:
+ *attr_mask |= MLX5_IB_RP_BYTE_RESET_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_byte_reset, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_THRESHOLD:
+ *attr_mask |= MLX5_IB_RP_THRESHOLD_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_threshold, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_AI_RATE:
+ *attr_mask |= MLX5_IB_RP_AI_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_ai_rate, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_HAI_RATE:
+ *attr_mask |= MLX5_IB_RP_HAI_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_hai_rate, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_MIN_DEC_FAC:
+ *attr_mask |= MLX5_IB_RP_MIN_DEC_FAC_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_min_dec_fac, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_MIN_RATE:
+ *attr_mask |= MLX5_IB_RP_MIN_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_min_rate, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP:
+ *attr_mask |= MLX5_IB_RP_RATE_TO_SET_ON_FIRST_CNP_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rate_to_set_on_first_cnp, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_DCE_TCP_G:
+ *attr_mask |= MLX5_IB_RP_DCE_TCP_G_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ dce_tcp_g, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_DCE_TCP_RTT:
+ *attr_mask |= MLX5_IB_RP_DCE_TCP_RTT_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ dce_tcp_rtt, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD:
+ *attr_mask |= MLX5_IB_RP_RATE_REDUCE_MONITOR_PERIOD_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rate_reduce_monitor_period, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE:
+ *attr_mask |= MLX5_IB_RP_INITIAL_ALPHA_VALUE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ initial_alpha_value, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_GD:
+ *attr_mask |= MLX5_IB_RP_GD_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_gd, var);
+ break;
+ case MLX5_IB_DBG_CC_NP_CNP_DSCP:
+ *attr_mask |= MLX5_IB_NP_CNP_DSCP_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_dscp, var);
+ break;
+ case MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE:
+ *attr_mask |= MLX5_IB_NP_CNP_PRIO_MODE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_prio_mode, var);
+ break;
+ case MLX5_IB_DBG_CC_NP_CNP_PRIO:
+ *attr_mask |= MLX5_IB_NP_CNP_PRIO_MODE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_prio_mode, 0);
+ MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_802p_prio, var);
+ break;
+ }
+}
+
+static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, int offset, u32 *var)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_cong_params_out);
+ void *out;
+ void *field;
+ int err;
+ enum mlx5_ib_cong_node_type node;
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ node = mlx5_ib_param_to_node(offset);
+
+ err = mlx5_cmd_query_cong_params(dev->mdev, node, out, outlen);
+ if (err)
+ goto free;
+
+ field = MLX5_ADDR_OF(query_cong_params_out, out, congestion_parameters);
+ *var = mlx5_get_cc_param_val(field, offset);
+
+free:
+ kvfree(out);
+ return err;
+}
+
+static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, int offset, u32 var)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_cong_params_in);
+ void *in;
+ void *field;
+ enum mlx5_ib_cong_node_type node;
+ u32 attr_mask = 0;
+ int err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_cong_params_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_CONG_PARAMS);
+
+ node = mlx5_ib_param_to_node(offset);
+ MLX5_SET(modify_cong_params_in, in, cong_protocol, node);
+
+ field = MLX5_ADDR_OF(modify_cong_params_in, in, congestion_parameters);
+ mlx5_ib_set_cc_param_mask_val(field, offset, var, &attr_mask);
+
+ field = MLX5_ADDR_OF(modify_cong_params_in, in, field_select);
+ MLX5_SET(field_select_r_roce_rp, field, field_select_r_roce_rp,
+ attr_mask);
+
+ err = mlx5_cmd_modify_cong_params(dev->mdev, in, inlen);
+ kvfree(in);
+ return err;
+}
+
+static ssize_t set_param(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mlx5_ib_dbg_param *param = filp->private_data;
+ int offset = param->offset;
+ char lbuf[11] = { };
+ u32 var;
+ int ret;
+
+ if (count > sizeof(lbuf))
+ return -EINVAL;
+
+ if (copy_from_user(lbuf, buf, count))
+ return -EFAULT;
+
+ lbuf[sizeof(lbuf) - 1] = '\0';
+
+ if (kstrtou32(lbuf, 0, &var))
+ return -EINVAL;
+
+ ret = mlx5_ib_set_cc_params(param->dev, offset, var);
+ return ret ? ret : count;
+}
+
+static ssize_t get_param(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct mlx5_ib_dbg_param *param = filp->private_data;
+ int offset = param->offset;
+ u32 var = 0;
+ int ret;
+ char lbuf[11];
+
+ if (*pos)
+ return 0;
+
+ ret = mlx5_ib_get_cc_params(param->dev, offset, &var);
+ if (ret)
+ return ret;
+
+ ret = snprintf(lbuf, sizeof(lbuf), "%d\n", var);
+ if (ret < 0)
+ return ret;
+
+ if (copy_to_user(buf, lbuf, ret))
+ return -EFAULT;
+
+ *pos += ret;
+ return ret;
+}
+
+static const struct file_operations dbg_cc_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = set_param,
+ .read = get_param,
+};
+
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev)
+{
+ if (!mlx5_debugfs_root ||
+ !dev->dbg_cc_params ||
+ !dev->dbg_cc_params->root)
+ return;
+
+ debugfs_remove_recursive(dev->dbg_cc_params->root);
+ kfree(dev->dbg_cc_params);
+ dev->dbg_cc_params = NULL;
+}
+
+int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ int i;
+
+ if (!mlx5_debugfs_root)
+ goto out;
+
+ if (!MLX5_CAP_GEN(dev->mdev, cc_query_allowed) ||
+ !MLX5_CAP_GEN(dev->mdev, cc_modify_allowed))
+ goto out;
+
+ dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL);
+ if (!dbg_cc_params)
+ goto out;
+
+ dev->dbg_cc_params = dbg_cc_params;
+
+ dbg_cc_params->root = debugfs_create_dir("cc_params",
+ dev->mdev->priv.dbg_root);
+ if (!dbg_cc_params->root)
+ goto err;
+
+ for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
+ dbg_cc_params->params[i].offset = i;
+ dbg_cc_params->params[i].dev = dev;
+ dbg_cc_params->params[i].dentry =
+ debugfs_create_file(mlx5_ib_dbg_cc_name[i],
+ 0600, dbg_cc_params->root,
+ &dbg_cc_params->params[i],
+ &dbg_cc_fops);
+ if (!dbg_cc_params->params[i].dentry)
+ goto err;
+ }
+out: return 0;
+
+err:
+ mlx5_ib_warn(dev, "cong debugfs failure\n");
+ mlx5_ib_cleanup_cong_debugfs(dev);
+ /*
+ * We don't want to fail driver if debugfs failed to initialize,
+ * so we are not forwarding error to the user.
+ */
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index a384d72ea3cd..2aa53f427685 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -499,7 +499,7 @@ static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
struct mlx5_ib_qp *qp;
*npolled = 0;
- /* Find uncompleted WQEs belonging to that cq and retrun mmics ones */
+ /* Find uncompleted WQEs belonging to that cq and return mmics ones */
list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
sw_send_comp(qp, num_entries, wc + *npolled, npolled);
if (*npolled >= num_entries)
@@ -751,10 +751,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
void *cqc;
int err;
- ucmdlen =
- (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
- sizeof(ucmd)) ? (sizeof(ucmd) -
- sizeof(ucmd.reserved)) : sizeof(ucmd);
+ ucmdlen = udata->inlen < sizeof(ucmd) ?
+ (sizeof(ucmd) - sizeof(ucmd.reserved)) : sizeof(ucmd);
if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
return -EFAULT;
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
index c1b9de800fe5..649a3364f838 100644
--- a/drivers/infiniband/hw/mlx5/ib_virt.c
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -96,6 +96,7 @@ int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
struct mlx5_ib_dev *dev = to_mdev(device);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_hca_vport_context *in;
+ struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
int err;
in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -109,6 +110,8 @@ int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
}
in->field_select = MLX5_HCA_VPORT_SEL_STATE_POLICY;
err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+ if (!err)
+ vfs_ctx[vf].policy = in->policy;
out:
kfree(in);
@@ -151,6 +154,7 @@ static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
struct mlx5_ib_dev *dev = to_mdev(device);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_hca_vport_context *in;
+ struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
int err;
in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -160,6 +164,8 @@ static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
in->field_select = MLX5_HCA_VPORT_SEL_NODE_GUID;
in->node_guid = guid;
err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+ if (!err)
+ vfs_ctx[vf].node_guid = guid;
kfree(in);
return err;
}
@@ -169,6 +175,7 @@ static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
struct mlx5_ib_dev *dev = to_mdev(device);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_hca_vport_context *in;
+ struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
int err;
in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -178,6 +185,8 @@ static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
in->field_select = MLX5_HCA_VPORT_SEL_PORT_GUID;
in->port_guid = guid;
err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+ if (!err)
+ vfs_ctx[vf].port_guid = guid;
kfree(in);
return err;
}
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 95db929bdc34..1003b0133a49 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -78,7 +78,7 @@ static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
u16 slid;
int err;
- slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
+ slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0)
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
@@ -204,7 +204,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
int err;
void *out_cnt;
- /* Decalring support of extended counters */
+ /* Declaring support of extended counters */
if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
struct ib_class_port_info cpi = {};
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index a7f2e60085c4..ab3c562d5ba7 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -58,6 +59,7 @@
#include <linux/mlx5/vport.h>
#include "mlx5_ib.h"
#include "cmd.h"
+#include <linux/mlx5/vport.h>
#define DRIVER_NAME "mlx5_ib"
#define DRIVER_VERSION "5.0-0"
@@ -65,7 +67,6 @@
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRIVER_VERSION);
static char mlx5_version[] =
DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
@@ -97,6 +98,20 @@ mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
}
+static int get_port_state(struct ib_device *ibdev,
+ u8 port_num,
+ enum ib_port_state *state)
+{
+ struct ib_port_attr attr;
+ int ret;
+
+ memset(&attr, 0, sizeof(attr));
+ ret = mlx5_ib_query_port(ibdev, port_num, &attr);
+ if (!ret)
+ *state = attr.state;
+ return ret;
+}
+
static int mlx5_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -114,6 +129,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
write_unlock(&ibdev->roce.netdev_lock);
break;
+ case NETDEV_CHANGE:
case NETDEV_UP:
case NETDEV_DOWN: {
struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
@@ -127,10 +143,23 @@ static int mlx5_netdev_event(struct notifier_block *this,
if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
&& ibdev->ib_active) {
struct ib_event ibev = { };
+ enum ib_port_state port_state;
+ if (get_port_state(&ibdev->ib_dev, 1, &port_state))
+ return NOTIFY_DONE;
+
+ if (ibdev->roce.last_port_state == port_state)
+ return NOTIFY_DONE;
+
+ ibdev->roce.last_port_state = port_state;
ibev.device = &ibdev->ib_dev;
- ibev.event = (event == NETDEV_UP) ?
- IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+ if (port_state == IB_PORT_DOWN)
+ ibev.event = IB_EVENT_PORT_ERR;
+ else if (port_state == IB_PORT_ACTIVE)
+ ibev.event = IB_EVENT_PORT_ACTIVE;
+ else
+ return NOTIFY_DONE;
+
ibev.element.port_num = 1;
ib_dispatch_event(&ibev);
}
@@ -668,6 +697,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_UD_TSO;
}
+ if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
+ MLX5_CAP_GEN(dev->mdev, general_notification_event))
+ props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
+
+ if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
+ MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
+ props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
+
if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
/* Legacy bit to support old userspace libraries */
@@ -740,6 +777,16 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
}
+ if (MLX5_CAP_GEN(mdev, tag_matching)) {
+ props->xrq_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
+ props->xrq_caps.max_num_tags =
+ (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
+ props->xrq_caps.flags = IB_TM_CAP_RC;
+ props->xrq_caps.max_ops =
+ 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ props->xrq_caps.max_sge = MLX5_TM_MAX_SGE;
+ }
+
if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
resp.cqe_comp_caps.max_num =
MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
@@ -765,8 +812,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
uhw->outlen)) {
- resp.mlx5_ib_support_multi_pkt_send_wqes =
- MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
+ if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
+ resp.mlx5_ib_support_multi_pkt_send_wqes =
+ MLX5_IB_ALLOW_MPW;
+
+ if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
+ resp.mlx5_ib_support_multi_pkt_send_wqes |=
+ MLX5_IB_SUPPORT_EMPW;
+
resp.response_length +=
sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
}
@@ -774,6 +827,27 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (field_avail(typeof(resp), reserved, uhw->outlen))
resp.response_length += sizeof(resp.reserved);
+ if (field_avail(typeof(resp), sw_parsing_caps,
+ uhw->outlen)) {
+ resp.response_length += sizeof(resp.sw_parsing_caps);
+ if (MLX5_CAP_ETH(mdev, swp)) {
+ resp.sw_parsing_caps.sw_parsing_offloads |=
+ MLX5_IB_SW_PARSING;
+
+ if (MLX5_CAP_ETH(mdev, swp_csum))
+ resp.sw_parsing_caps.sw_parsing_offloads |=
+ MLX5_IB_SW_PARSING_CSUM;
+
+ if (MLX5_CAP_ETH(mdev, swp_lso))
+ resp.sw_parsing_caps.sw_parsing_offloads |=
+ MLX5_IB_SW_PARSING_LSO;
+
+ if (resp.sw_parsing_caps.sw_parsing_offloads)
+ resp.sw_parsing_caps.supported_qpts =
+ BIT(IB_QPT_RAW_PACKET);
+ }
+ }
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
@@ -1085,6 +1159,12 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
IB_LINK_LAYER_INFINIBAND);
+ /* CM layer calls ib_modify_port() regardless of the link layer. For
+ * Ethernet ports, qkey violation and Port capabilities are meaningless.
+ */
+ if (!is_ib)
+ return 0;
+
if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
@@ -1138,7 +1218,7 @@ static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
return -EINVAL;
- mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, alloated %d, using %d sys pages\n",
+ mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, using %d sys pages\n",
MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
lib_uar_4k ? "yes" : "no", ref_bfregs,
req->total_num_bfregs, *num_sys_pages);
@@ -1187,6 +1267,45 @@ static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *con
return 0;
}
+static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
+{
+ int err;
+
+ err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
+ if (err)
+ return err;
+
+ if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
+ !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
+ return err;
+
+ mutex_lock(&dev->lb_mutex);
+ dev->user_td++;
+
+ if (dev->user_td == 2)
+ err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
+
+ mutex_unlock(&dev->lb_mutex);
+ return err;
+}
+
+static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
+{
+ mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
+
+ if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
+ !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
+ return;
+
+ mutex_lock(&dev->lb_mutex);
+ dev->user_td--;
+
+ if (dev->user_td < 2)
+ mlx5_nic_vport_update_local_lb(dev->mdev, false);
+
+ mutex_unlock(&dev->lb_mutex);
+}
+
static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
@@ -1197,7 +1316,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct mlx5_bfreg_info *bfregi;
int ver;
int err;
- size_t reqlen;
size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
max_cqe_version);
bool lib_uar_4k;
@@ -1205,18 +1323,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
- if (udata->inlen < sizeof(struct ib_uverbs_cmd_hdr))
- return ERR_PTR(-EINVAL);
-
- reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
- if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
+ if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
ver = 0;
- else if (reqlen >= min_req_v2)
+ else if (udata->inlen >= min_req_v2)
ver = 2;
else
return ERR_PTR(-EINVAL);
- err = ib_copy_from_udata(&req, udata, min(reqlen, sizeof(req)));
+ err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
if (err)
return ERR_PTR(err);
@@ -1295,8 +1409,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
mutex_init(&context->upd_xlt_page_mutex);
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
- err = mlx5_core_alloc_transport_domain(dev->mdev,
- &context->tdn);
+ err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
if (err)
goto out_page;
}
@@ -1362,7 +1475,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
out_td:
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
- mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn);
out_page:
free_page(context->upd_xlt_page);
@@ -1390,7 +1503,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
bfregi = &context->bfregi;
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
- mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn);
free_page(context->upd_xlt_page);
deallocate_uars(dev, context);
@@ -2028,23 +2141,34 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
* it won't fall into the multicast flow steering table and this rule
* could steal other multicast packets.
*/
-static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
+static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
{
- struct ib_flow_spec_eth *eth_spec;
+ union ib_flow_spec *flow_spec;
if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
- ib_attr->size < sizeof(struct ib_flow_attr) +
- sizeof(struct ib_flow_spec_eth) ||
ib_attr->num_of_specs < 1)
return false;
- eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1);
- if (eth_spec->type != IB_FLOW_SPEC_ETH ||
- eth_spec->size != sizeof(*eth_spec))
+ flow_spec = (union ib_flow_spec *)(ib_attr + 1);
+ if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
+ struct ib_flow_spec_ipv4 *ipv4_spec;
+
+ ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
+ if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
+ return true;
+
return false;
+ }
+
+ if (flow_spec->type == IB_FLOW_SPEC_ETH) {
+ struct ib_flow_spec_eth *eth_spec;
+
+ eth_spec = (struct ib_flow_spec_eth *)flow_spec;
+ return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
+ is_multicast_ether_addr(eth_spec->val.dst_mac);
+ }
- return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
- is_multicast_ether_addr(eth_spec->val.dst_mac);
+ return false;
}
static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
@@ -2229,10 +2353,31 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
return err ? ERR_PTR(err) : prio;
}
-static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
- struct mlx5_ib_flow_prio *ft_prio,
- const struct ib_flow_attr *flow_attr,
- struct mlx5_flow_destination *dst)
+static void set_underlay_qp(struct mlx5_ib_dev *dev,
+ struct mlx5_flow_spec *spec,
+ u32 underlay_qpn)
+{
+ void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
+ spec->match_criteria,
+ misc_parameters);
+ void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
+ if (underlay_qpn &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ ft_field_support.bth_dst_qp)) {
+ MLX5_SET(fte_match_set_misc,
+ misc_params_v, bth_dst_qp, underlay_qpn);
+ MLX5_SET(fte_match_set_misc,
+ misc_params_c, bth_dst_qp, 0xffffff);
+ }
+}
+
+static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ const struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_destination *dst,
+ u32 underlay_qpn)
{
struct mlx5_flow_table *ft = ft_prio->flow_table;
struct mlx5_ib_flow_handler *handler;
@@ -2268,6 +2413,9 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
ib_flow += ((union ib_flow_spec *)ib_flow)->size;
}
+ if (!flow_is_multicast_only(flow_attr))
+ set_underlay_qp(dev, spec, underlay_qpn);
+
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
if (is_drop) {
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
@@ -2307,6 +2455,14 @@ free:
return err ? ERR_PTR(err) : handler;
}
+static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ const struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_destination *dst)
+{
+ return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0);
+}
+
static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
struct ib_flow_attr *flow_attr,
@@ -2443,6 +2599,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
struct mlx5_ib_flow_prio *ft_prio;
int err;
+ int underlay_qpn;
if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
return ERR_PTR(-ENOMEM);
@@ -2483,8 +2640,10 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
handler = create_dont_trap_rule(dev, ft_prio,
flow_attr, dst);
} else {
- handler = create_flow_rule(dev, ft_prio, flow_attr,
- dst);
+ underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
+ mqp->underlay_qpn : 0;
+ handler = _create_flow_rule(dev, ft_prio, flow_attr,
+ dst, underlay_qpn);
}
} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
@@ -2522,8 +2681,14 @@ unlock:
static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(ibqp);
int err;
+ if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
+ mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
if (err)
mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
@@ -2685,6 +2850,26 @@ static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
}
+static void delay_drop_handler(struct work_struct *work)
+{
+ int err;
+ struct mlx5_ib_delay_drop *delay_drop =
+ container_of(work, struct mlx5_ib_delay_drop,
+ delay_drop_work);
+
+ atomic_inc(&delay_drop->events_cnt);
+
+ mutex_lock(&delay_drop->lock);
+ err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
+ delay_drop->timeout);
+ if (err) {
+ mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
+ delay_drop->timeout);
+ delay_drop->activate = false;
+ }
+ mutex_unlock(&delay_drop->lock);
+}
+
static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param)
{
@@ -2737,8 +2922,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
ibev.event = IB_EVENT_CLIENT_REREGISTER;
port = (u8)param;
break;
+ case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
+ schedule_work(&ibdev->delay_drop.delay_drop_work);
+ goto out;
default:
- return;
+ goto out;
}
ibev.device = &ibdev->ib_dev;
@@ -2746,7 +2934,7 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
if (port < 1 || port > ibdev->num_ports) {
mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
- return;
+ goto out;
}
if (ibdev->ib_active)
@@ -2754,6 +2942,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
if (fatal)
ibdev->ib_active = false;
+
+out:
+ return;
}
static int set_has_smi_cap(struct mlx5_ib_dev *dev)
@@ -3036,7 +3227,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
attr.attr.max_sge = 1;
attr.attr.max_wr = 1;
attr.srq_type = IB_SRQT_XRC;
- attr.ext.xrc.cq = devr->c0;
+ attr.ext.cq = devr->c0;
attr.ext.xrc.xrcd = devr->x0;
devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
@@ -3051,9 +3242,9 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
devr->s0->srq_context = NULL;
devr->s0->srq_type = IB_SRQT_XRC;
devr->s0->ext.xrc.xrcd = devr->x0;
- devr->s0->ext.xrc.cq = devr->c0;
+ devr->s0->ext.cq = devr->c0;
atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
- atomic_inc(&devr->s0->ext.xrc.cq->usecnt);
+ atomic_inc(&devr->s0->ext.cq->usecnt);
atomic_inc(&devr->p0->usecnt);
atomic_set(&devr->s0->usecnt, 0);
@@ -3072,9 +3263,9 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
devr->s1->event_handler = NULL;
devr->s1->srq_context = NULL;
devr->s1->srq_type = IB_SRQT_BASIC;
- devr->s1->ext.xrc.cq = devr->c0;
+ devr->s1->ext.cq = devr->c0;
atomic_inc(&devr->p0->usecnt);
- atomic_set(&devr->s0->usecnt, 0);
+ atomic_set(&devr->s1->usecnt, 0);
for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
INIT_WORK(&devr->ports[port].pkey_change_work,
@@ -3167,13 +3358,13 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static void get_dev_fw_str(struct ib_device *ibdev, char *str,
- size_t str_len)
+static void get_dev_fw_str(struct ib_device *ibdev, char *str)
{
struct mlx5_ib_dev *dev =
container_of(ibdev, struct mlx5_ib_dev, ib_dev);
- snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev),
- fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
+ fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
+ fw_rev_sub(dev->mdev));
}
static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
@@ -3313,6 +3504,17 @@ static const struct mlx5_ib_counter cong_cnts[] = {
INIT_CONG_COUNTER(np_cnp_sent),
};
+static const struct mlx5_ib_counter extended_err_cnts[] = {
+ INIT_Q_COUNTER(resp_local_length_error),
+ INIT_Q_COUNTER(resp_cqe_error),
+ INIT_Q_COUNTER(req_cqe_error),
+ INIT_Q_COUNTER(req_remote_invalid_request),
+ INIT_Q_COUNTER(req_remote_access_errors),
+ INIT_Q_COUNTER(resp_remote_access_errors),
+ INIT_Q_COUNTER(resp_cqe_flush_error),
+ INIT_Q_COUNTER(req_cqe_flush_error),
+};
+
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
unsigned int i;
@@ -3337,6 +3539,10 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
num_counters += ARRAY_SIZE(retrans_q_cnts);
+
+ if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
+ num_counters += ARRAY_SIZE(extended_err_cnts);
+
cnts->num_q_counters = num_counters;
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
@@ -3386,6 +3592,13 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
}
}
+ if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
+ for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
+ names[j] = extended_err_cnts[i].name;
+ offsets[j] = extended_err_cnts[i].offset;
+ }
+ }
+
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
names[j] = cong_cnts[i].name;
@@ -3556,6 +3769,136 @@ mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
return netdev;
}
+static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (!dev->delay_drop.dbg)
+ return;
+ debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
+ kfree(dev->delay_drop.dbg);
+ dev->delay_drop.dbg = NULL;
+}
+
+static void cancel_delay_drop(struct mlx5_ib_dev *dev)
+{
+ if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
+ return;
+
+ cancel_work_sync(&dev->delay_drop.delay_drop_work);
+ delay_drop_debugfs_cleanup(dev);
+}
+
+static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
+ char lbuf[20];
+ int len;
+
+ len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
+ return simple_read_from_buffer(buf, count, pos, lbuf, len);
+}
+
+static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
+ u32 timeout;
+ u32 var;
+
+ if (kstrtouint_from_user(buf, count, 0, &var))
+ return -EFAULT;
+
+ timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
+ 1000);
+ if (timeout != var)
+ mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
+ timeout);
+
+ delay_drop->timeout = timeout;
+
+ return count;
+}
+
+static const struct file_operations fops_delay_drop_timeout = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = delay_drop_timeout_write,
+ .read = delay_drop_timeout_read,
+};
+
+static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_ib_dbg_delay_drop *dbg;
+
+ if (!mlx5_debugfs_root)
+ return 0;
+
+ dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
+ if (!dbg)
+ return -ENOMEM;
+
+ dbg->dir_debugfs =
+ debugfs_create_dir("delay_drop",
+ dev->mdev->priv.dbg_root);
+ if (!dbg->dir_debugfs)
+ return -ENOMEM;
+
+ dbg->events_cnt_debugfs =
+ debugfs_create_atomic_t("num_timeout_events", 0400,
+ dbg->dir_debugfs,
+ &dev->delay_drop.events_cnt);
+ if (!dbg->events_cnt_debugfs)
+ goto out_debugfs;
+
+ dbg->rqs_cnt_debugfs =
+ debugfs_create_atomic_t("num_rqs", 0400,
+ dbg->dir_debugfs,
+ &dev->delay_drop.rqs_cnt);
+ if (!dbg->rqs_cnt_debugfs)
+ goto out_debugfs;
+
+ dbg->timeout_debugfs =
+ debugfs_create_file("timeout", 0600,
+ dbg->dir_debugfs,
+ &dev->delay_drop,
+ &fops_delay_drop_timeout);
+ if (!dbg->timeout_debugfs)
+ goto out_debugfs;
+
+ dev->delay_drop.dbg = dbg;
+
+ return 0;
+
+out_debugfs:
+ delay_drop_debugfs_cleanup(dev);
+ return -ENOMEM;
+}
+
+static void init_delay_drop(struct mlx5_ib_dev *dev)
+{
+ if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
+ return;
+
+ mutex_init(&dev->delay_drop.lock);
+ dev->delay_drop.dev = dev;
+ dev->delay_drop.activate = false;
+ dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
+ INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
+ atomic_set(&dev->delay_drop.rqs_cnt, 0);
+ atomic_set(&dev->delay_drop.events_cnt, 0);
+
+ if (delay_drop_debugfs_init(dev))
+ mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
+}
+
+static const struct cpumask *
+mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+
+ return mlx5_get_vector_affinity(dev->mdev, comp_vector);
+}
+
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_dev *dev;
@@ -3686,6 +4029,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
dev->ib_dev.get_port_immutable = mlx5_port_immutable;
dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
+ dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
@@ -3723,18 +4067,20 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
}
+ dev->ib_dev.create_flow = mlx5_ib_create_flow;
+ dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
+ dev->ib_dev.uverbs_ex_cmd_mask |=
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
+
if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
IB_LINK_LAYER_ETHERNET) {
- dev->ib_dev.create_flow = mlx5_ib_create_flow;
- dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
dev->ib_dev.create_wq = mlx5_ib_create_wq;
dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
dev->ib_dev.uverbs_ex_cmd_mask |=
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
@@ -3754,6 +4100,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
err = mlx5_enable_eth(dev);
if (err)
goto err_free_port;
+ dev->roce.last_port_state = IB_PORT_DOWN;
}
err = create_dev_resources(&dev->devr);
@@ -3770,9 +4117,13 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
goto err_odp;
}
+ err = mlx5_ib_init_cong_debugfs(dev);
+ if (err)
+ goto err_cnt;
+
dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
if (!dev->mdev->priv.uar)
- goto err_cnt;
+ goto err_cong;
err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
if (err)
@@ -3790,18 +4141,25 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (err)
goto err_dev;
+ init_delay_drop(dev);
+
for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
err = device_create_file(&dev->ib_dev.dev,
mlx5_class_attributes[i]);
if (err)
- goto err_umrc;
+ goto err_delay_drop;
}
+ if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+ MLX5_CAP_GEN(mdev, disable_local_lb))
+ mutex_init(&dev->lb_mutex);
+
dev->ib_active = true;
return dev;
-err_umrc:
+err_delay_drop:
+ cancel_delay_drop(dev);
destroy_umrc_res(dev);
err_dev:
@@ -3817,6 +4175,8 @@ err_uar_page:
mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
err_cnt:
+ mlx5_ib_cleanup_cong_debugfs(dev);
+err_cong:
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
mlx5_ib_dealloc_counters(dev);
@@ -3846,11 +4206,13 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
struct mlx5_ib_dev *dev = context;
enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
+ cancel_delay_drop(dev);
mlx5_remove_netdev_notifier(dev);
ib_unregister_device(&dev->ib_dev);
mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
mlx5_free_bfreg(dev->mdev, &dev->bfreg);
mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
+ mlx5_ib_cleanup_cong_debugfs(dev);
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
mlx5_ib_dealloc_counters(dev);
destroy_umrc_res(dev);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index bdcf25410c99..189e80cd6b2f 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -107,6 +107,11 @@ enum {
MLX5_CQE_VERSION_V1,
};
+enum {
+ MLX5_TM_MAX_RNDV_MSG_SIZE = 64,
+ MLX5_TM_MAX_SGE = 1,
+};
+
struct mlx5_ib_vma_private_data {
struct list_head list;
struct vm_area_struct *vma;
@@ -247,6 +252,10 @@ struct mlx5_ib_wq {
void *qend;
};
+enum mlx5_ib_wq_flags {
+ MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
+};
+
struct mlx5_ib_rwq {
struct ib_wq ibwq;
struct mlx5_core_qp core_qp;
@@ -264,6 +273,7 @@ struct mlx5_ib_rwq {
u32 wqe_count;
u32 wqe_shift;
int wq_sig;
+ u32 create_flags; /* Use enum mlx5_ib_wq_flags */
};
enum {
@@ -378,6 +388,7 @@ struct mlx5_ib_qp {
struct list_head cq_recv_list;
struct list_head cq_send_list;
u32 rate_limit;
+ u32 underlay_qpn;
};
struct mlx5_ib_cq_buf {
@@ -399,6 +410,7 @@ enum mlx5_ib_qp_flags {
MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
MLX5_IB_QP_RSS = 1 << 8,
MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
+ MLX5_IB_QP_UNDERLAY = 1 << 10,
};
struct mlx5_umr_wr {
@@ -496,7 +508,7 @@ struct mlx5_ib_mr {
struct mlx5_shared_mr_info *smr_info;
struct list_head list;
int order;
- int umred;
+ bool allocated_from_cache;
int npages;
struct mlx5_ib_dev *dev;
u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
@@ -616,6 +628,63 @@ struct mlx5_roce {
struct net_device *netdev;
struct notifier_block nb;
atomic_t next_port;
+ enum ib_port_state last_port_state;
+};
+
+struct mlx5_ib_dbg_param {
+ int offset;
+ struct mlx5_ib_dev *dev;
+ struct dentry *dentry;
+};
+
+enum mlx5_ib_dbg_cc_types {
+ MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
+ MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
+ MLX5_IB_DBG_CC_RP_TIME_RESET,
+ MLX5_IB_DBG_CC_RP_BYTE_RESET,
+ MLX5_IB_DBG_CC_RP_THRESHOLD,
+ MLX5_IB_DBG_CC_RP_AI_RATE,
+ MLX5_IB_DBG_CC_RP_HAI_RATE,
+ MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
+ MLX5_IB_DBG_CC_RP_MIN_RATE,
+ MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
+ MLX5_IB_DBG_CC_RP_DCE_TCP_G,
+ MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
+ MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
+ MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
+ MLX5_IB_DBG_CC_RP_GD,
+ MLX5_IB_DBG_CC_NP_CNP_DSCP,
+ MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
+ MLX5_IB_DBG_CC_NP_CNP_PRIO,
+ MLX5_IB_DBG_CC_MAX,
+};
+
+struct mlx5_ib_dbg_cc_params {
+ struct dentry *root;
+ struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
+};
+
+enum {
+ MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
+};
+
+struct mlx5_ib_dbg_delay_drop {
+ struct dentry *dir_debugfs;
+ struct dentry *rqs_cnt_debugfs;
+ struct dentry *events_cnt_debugfs;
+ struct dentry *timeout_debugfs;
+};
+
+struct mlx5_ib_delay_drop {
+ struct mlx5_ib_dev *dev;
+ struct work_struct delay_drop_work;
+ /* serialize setting of delay drop */
+ struct mutex lock;
+ u32 timeout;
+ bool activate;
+ atomic_t events_cnt;
+ atomic_t rqs_cnt;
+ struct mlx5_ib_dbg_delay_drop *dbg;
};
struct mlx5_ib_dev {
@@ -652,9 +721,15 @@ struct mlx5_ib_dev {
struct list_head qp_list;
/* Array with num_ports elements */
struct mlx5_ib_port *port;
- struct mlx5_sq_bfreg bfreg;
- struct mlx5_sq_bfreg fp_bfreg;
- u8 umr_fence;
+ struct mlx5_sq_bfreg bfreg;
+ struct mlx5_sq_bfreg fp_bfreg;
+ struct mlx5_ib_delay_drop delay_drop;
+ struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+
+ /* protect the user_td */
+ struct mutex lb_mutex;
+ u32 user_td;
+ u8 umr_fence;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -904,6 +979,9 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
int index, enum ib_gid_type *gid_type);
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev);
+int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev);
+
/* GSI QP helper functions */
struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 763bb5b36144..0e2789d9bb4d 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -48,7 +48,7 @@ enum {
#define MLX5_UMR_ALIGN 2048
static int clean_mr(struct mlx5_ib_mr *mr);
-static int use_umr(struct mlx5_ib_dev *dev, int order);
+static int mr_cache_max_order(struct mlx5_ib_dev *dev);
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
@@ -183,7 +183,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
break;
}
mr->order = ent->order;
- mr->umred = 1;
+ mr->allocated_from_cache = 1;
mr->dev = dev;
MLX5_SET(mkc, mkc, free, 1);
@@ -491,16 +491,18 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_ib_mr *mr = NULL;
struct mlx5_cache_ent *ent;
+ int last_umr_cache_entry;
int c;
int i;
c = order2idx(dev, order);
- if (c < 0 || c > MAX_UMR_CACHE_ENTRY) {
+ last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
+ if (c < 0 || c > last_umr_cache_entry) {
mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
return NULL;
}
- for (i = c; i < MAX_UMR_CACHE_ENTRY; i++) {
+ for (i = c; i <= last_umr_cache_entry; i++) {
ent = &cache->ent[i];
mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
@@ -582,6 +584,15 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
}
}
+static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (!mlx5_debugfs_root)
+ return;
+
+ debugfs_remove_recursive(dev->cache.root);
+ dev->cache.root = NULL;
+}
+
static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
{
struct mlx5_mr_cache *cache = &dev->cache;
@@ -600,38 +611,34 @@ static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
sprintf(ent->name, "%d", ent->order);
ent->dir = debugfs_create_dir(ent->name, cache->root);
if (!ent->dir)
- return -ENOMEM;
+ goto err;
ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
&size_fops);
if (!ent->fsize)
- return -ENOMEM;
+ goto err;
ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
&limit_fops);
if (!ent->flimit)
- return -ENOMEM;
+ goto err;
ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
&ent->cur);
if (!ent->fcur)
- return -ENOMEM;
+ goto err;
ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
&ent->miss);
if (!ent->fmiss)
- return -ENOMEM;
+ goto err;
}
return 0;
-}
-
-static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
-{
- if (!mlx5_debugfs_root)
- return;
+err:
+ mlx5_mr_cache_debugfs_cleanup(dev);
- debugfs_remove_recursive(dev->cache.root);
+ return -ENOMEM;
}
static void delay_time_func(unsigned long ctx)
@@ -669,12 +676,12 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
queue_work(cache->wq, &ent->work);
- if (i > MAX_UMR_CACHE_ENTRY) {
+ if (i > MR_CACHE_LAST_STD_ENTRY) {
mlx5_odp_init_mr_cache_entry(ent);
continue;
}
- if (!use_umr(dev, ent->order))
+ if (ent->order > mr_cache_max_order(dev))
continue;
ent->page = PAGE_SHIFT;
@@ -692,6 +699,11 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
if (err)
mlx5_ib_warn(dev, "cache debugfs failure\n");
+ /*
+ * We don't want to fail driver if debugfs failed to initialize,
+ * so we are not forwarding error to the user.
+ */
+
return 0;
}
@@ -796,21 +808,22 @@ err_free:
return ERR_PTR(err);
}
-static int get_octo_len(u64 addr, u64 len, int page_size)
+static int get_octo_len(u64 addr, u64 len, int page_shift)
{
+ u64 page_size = 1ULL << page_shift;
u64 offset;
int npages;
offset = addr & (page_size - 1);
- npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
+ npages = ALIGN(len + offset, page_size) >> page_shift;
return (npages + 1) / 2;
}
-static int use_umr(struct mlx5_ib_dev *dev, int order)
+static int mr_cache_max_order(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
- return order <= MAX_UMR_CACHE_ENTRY + 2;
- return order <= MLX5_MAX_UMR_SHIFT;
+ return MR_CACHE_LAST_STD_ENTRY + 2;
+ return MLX5_MAX_UMR_SHIFT;
}
static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
@@ -825,7 +838,7 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
access_flags, 0);
err = PTR_ERR_OR_ZERO(*umem);
if (err < 0) {
- mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
+ mlx5_ib_err(dev, "umem get failed (%d)\n", err);
return err;
}
@@ -886,7 +899,8 @@ static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
return err;
}
-static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
+static struct mlx5_ib_mr *alloc_mr_from_cache(
+ struct ib_pd *pd, struct ib_umem *umem,
u64 virt_addr, u64 len, int npages,
int page_shift, int order, int access_flags)
{
@@ -918,16 +932,6 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
mr->mmkey.size = len;
mr->mmkey.pd = to_mpd(pd)->pdn;
- err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
- MLX5_IB_UPD_XLT_ENABLE);
-
- if (err) {
- mlx5_mr_cache_free(dev, mr);
- return ERR_PTR(err);
- }
-
- mr->live = 1;
-
return mr;
}
@@ -1093,7 +1097,8 @@ free_xlt:
static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
u64 virt_addr, u64 length,
struct ib_umem *umem, int npages,
- int page_shift, int access_flags)
+ int page_shift, int access_flags,
+ bool populate)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr;
@@ -1108,15 +1113,19 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
if (!mr)
return ERR_PTR(-ENOMEM);
- inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
- sizeof(*pas) * ((npages + 1) / 2) * 2;
+ mr->ibmr.pd = pd;
+ mr->access_flags = access_flags;
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ if (populate)
+ inlen += sizeof(*pas) * roundup(npages, 2);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_1;
}
pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
- if (!(access_flags & IB_ACCESS_ON_DEMAND))
+ if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
mlx5_ib_populate_pas(dev, umem, page_shift, pas,
pg_cap ? MLX5_IB_MTT_PRESENT : 0);
@@ -1125,23 +1134,27 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, free, !populate);
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET64(mkc, mkc, start_addr, virt_addr);
MLX5_SET64(mkc, mkc, len, length);
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
MLX5_SET(mkc, mkc, bsf_octword_size, 0);
MLX5_SET(mkc, mkc, translations_octword_size,
- get_octo_len(virt_addr, length, 1 << page_shift));
+ get_octo_len(virt_addr, length, page_shift));
MLX5_SET(mkc, mkc, log_page_size, page_shift);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
- get_octo_len(virt_addr, length, 1 << page_shift));
+ if (populate) {
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ get_octo_len(virt_addr, length, page_shift));
+ }
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
if (err) {
@@ -1150,9 +1163,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
}
mr->mmkey.type = MLX5_MKEY_MR;
mr->desc_size = sizeof(struct mlx5_mtt);
- mr->umem = umem;
mr->dev = dev;
- mr->live = 1;
kvfree(in);
mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
@@ -1192,6 +1203,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int ncont;
int order;
int err;
+ bool use_umr = true;
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
@@ -1210,27 +1222,29 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
&page_shift, &ncont, &order);
- if (err < 0)
+ if (err < 0)
return ERR_PTR(err);
- if (use_umr(dev, order)) {
- mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
- order, access_flags);
+ if (order <= mr_cache_max_order(dev)) {
+ mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
+ page_shift, order, access_flags);
if (PTR_ERR(mr) == -EAGAIN) {
mlx5_ib_dbg(dev, "cache empty for order %d", order);
mr = NULL;
}
- } else if (access_flags & IB_ACCESS_ON_DEMAND &&
- !MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
- err = -EINVAL;
- pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
- goto error;
+ } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
+ if (access_flags & IB_ACCESS_ON_DEMAND) {
+ err = -EINVAL;
+ pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
+ goto error;
+ }
+ use_umr = false;
}
if (!mr) {
mutex_lock(&dev->slow_path_mutex);
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
- page_shift, access_flags);
+ page_shift, access_flags, !use_umr);
mutex_unlock(&dev->slow_path_mutex);
}
@@ -1248,8 +1262,22 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
update_odp_mr(mr);
#endif
- return &mr->ibmr;
+ if (use_umr) {
+ int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
+
+ if (access_flags & IB_ACCESS_ON_DEMAND)
+ update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
+
+ err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
+ update_xlt_flags);
+ if (err) {
+ mlx5_ib_dereg_mr(&mr->ibmr);
+ return ERR_PTR(err);
+ }
+ }
+ mr->live = 1;
+ return &mr->ibmr;
error:
ib_umem_release(umem);
return ERR_PTR(err);
@@ -1337,7 +1365,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
/*
* UMR can't be used - MKey needs to be replaced.
*/
- if (mr->umred) {
+ if (mr->allocated_from_cache) {
err = unreg_umr(dev, mr);
if (err)
mlx5_ib_warn(dev, "Failed to unregister MR\n");
@@ -1350,12 +1378,13 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
return err;
mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
- page_shift, access_flags);
+ page_shift, access_flags, true);
if (IS_ERR(mr))
return PTR_ERR(mr);
- mr->umred = 0;
+ mr->allocated_from_cache = 0;
+ mr->live = 1;
} else {
/*
* Send a UMR WQE
@@ -1443,7 +1472,7 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
static int clean_mr(struct mlx5_ib_mr *mr)
{
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
- int umred = mr->umred;
+ int allocated_from_cache = mr->allocated_from_cache;
int err;
if (mr->sig) {
@@ -1461,20 +1490,20 @@ static int clean_mr(struct mlx5_ib_mr *mr)
mlx5_free_priv_descs(mr);
- if (!umred) {
+ if (!allocated_from_cache) {
+ u32 key = mr->mmkey.key;
+
err = destroy_mkey(dev, mr);
+ kfree(mr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
- mr->mmkey.key, err);
+ key, err);
return err;
}
} else {
mlx5_mr_cache_free(dev, mr);
}
- if (!umred)
- kfree(mr);
-
return 0;
}
@@ -1779,7 +1808,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
mr->ndescs = sg_nents;
for_each_sg(sgl, sg, sg_nents, i) {
- if (unlikely(i > mr->max_descs))
+ if (unlikely(i >= mr->max_descs))
break;
klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index ae0746754008..3d701c7a4c91 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -939,7 +939,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
if (qp->ibqp.qp_type != IB_QPT_RC) {
av = *wqe;
- if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT))
+ if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
*wqe += sizeof(struct mlx5_av);
else
*wqe += sizeof(struct mlx5_base_av);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 0889ff367c86..acb79d3a4f1d 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -34,6 +34,7 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_user_verbs.h>
+#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
/* not supported currently */
@@ -453,7 +454,8 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
return -EINVAL;
}
- if (attr->qp_type == IB_QPT_RAW_PACKET) {
+ if (attr->qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & MLX5_IB_QP_UNDERLAY) {
base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6;
} else {
@@ -675,10 +677,14 @@ err_umem:
return err;
}
-static void destroy_user_rq(struct ib_pd *pd, struct mlx5_ib_rwq *rwq)
+static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_rwq *rwq)
{
struct mlx5_ib_ucontext *context;
+ if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
+ atomic_dec(&dev->delay_drop.rqs_cnt);
+
context = to_mucontext(pd->uobject->context);
mlx5_ib_db_unmap_user(context, &rwq->db);
if (rwq->umem)
@@ -959,11 +965,16 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
goto err_free;
}
- qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
- qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
- qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
- qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
- qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
+ qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.wrid), GFP_KERNEL);
+ qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.wr_data), GFP_KERNEL);
+ qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
+ sizeof(*qp->rq.wrid), GFP_KERNEL);
+ qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.w_list), GFP_KERNEL);
+ qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.wqe_head), GFP_KERNEL);
if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
!qp->sq.w_list || !qp->sq.wqe_head) {
@@ -975,11 +986,11 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
return 0;
err_wrid:
- kfree(qp->sq.wqe_head);
- kfree(qp->sq.w_list);
- kfree(qp->sq.wrid);
- kfree(qp->sq.wr_data);
- kfree(qp->rq.wrid);
+ kvfree(qp->sq.wqe_head);
+ kvfree(qp->sq.w_list);
+ kvfree(qp->sq.wrid);
+ kvfree(qp->sq.wr_data);
+ kvfree(qp->rq.wrid);
mlx5_db_free(dev->mdev, &qp->db);
err_free:
@@ -992,11 +1003,11 @@ err_buf:
static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
- kfree(qp->sq.wqe_head);
- kfree(qp->sq.w_list);
- kfree(qp->sq.wrid);
- kfree(qp->sq.wr_data);
- kfree(qp->rq.wrid);
+ kvfree(qp->sq.wqe_head);
+ kvfree(qp->sq.w_list);
+ kvfree(qp->sq.wrid);
+ kvfree(qp->sq.wr_data);
+ kvfree(qp->rq.wrid);
mlx5_db_free(dev->mdev, &qp->db);
mlx5_buf_free(dev->mdev, &qp->buf);
}
@@ -1021,12 +1032,16 @@ static int is_connected(enum ib_qp_type qp_type)
}
static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
struct mlx5_ib_sq *sq, u32 tdn)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
MLX5_SET(tisc, tisc, transport_domain, tdn);
+ if (qp->flags & MLX5_IB_QP_UNDERLAY)
+ MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
+
return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn);
}
@@ -1068,11 +1083,16 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+ if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
+ MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index));
MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd));
MLX5_SET(sqc, sqc, tis_lst_sz, 1);
MLX5_SET(sqc, sqc, tis_num_0, sq->tisn);
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(dev->mdev, swp))
+ MLX5_SET(sqc, sqc, allow_swp, 1);
wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
@@ -1229,7 +1249,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 tdn = mucontext->tdn;
if (qp->sq.wqe_cnt) {
- err = create_raw_packet_qp_tis(dev, sq, tdn);
+ err = create_raw_packet_qp_tis(dev, qp, sq, tdn);
if (err)
return err;
@@ -1238,6 +1258,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
goto err_destroy_tis;
sq->base.container_mibqp = qp;
+ sq->base.mqp.event = mlx5_ib_qp_event;
}
if (qp->rq.wqe_cnt) {
@@ -1502,10 +1523,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 *in;
int err;
- base = init_attr->qp_type == IB_QPT_RAW_PACKET ?
- &qp->raw_packet_qp.rq.base :
- &qp->trans_qp.base;
-
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
@@ -1587,10 +1604,28 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
+
+ if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
+ if (init_attr->qp_type != IB_QPT_UD ||
+ (MLX5_CAP_GEN(dev->mdev, port_type) !=
+ MLX5_CAP_PORT_TYPE_IB) ||
+ !mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) {
+ mlx5_ib_dbg(dev, "Source QP option isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ qp->flags |= MLX5_IB_QP_UNDERLAY;
+ qp->underlay_qpn = init_attr->source_qpn;
+ }
} else {
qp->wq_sig = !!wq_signature;
}
+ base = (init_attr->qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & MLX5_IB_QP_UNDERLAY) ?
+ &qp->raw_packet_qp.rq.base :
+ &qp->trans_qp.base;
+
qp->has_rq = qp_has_rq(init_attr);
err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
qp, (pd && pd->uobject) ? &ucmd : NULL);
@@ -1694,10 +1729,15 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
- if (qp->sq.wqe_cnt)
+ if (qp->sq.wqe_cnt) {
MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
- else
+ } else {
MLX5_SET(qpc, qpc, no_sq, 1);
+ if (init_attr->srq &&
+ init_attr->srq->srq_type == IB_SRQT_TM)
+ MLX5_SET(qpc, qpc, offload_type,
+ MLX5_QPC_OFFLOAD_TYPE_RNDV);
+ }
/* Set default resources */
switch (init_attr->qp_type) {
@@ -1741,7 +1781,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->flags |= MLX5_IB_QP_LSO;
}
- if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
+ if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & MLX5_IB_QP_UNDERLAY) {
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
err = create_raw_packet_qp(dev, qp, in, pd);
@@ -1893,7 +1934,7 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
struct mlx5_ib_cq *send_cq, *recv_cq;
- struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct mlx5_ib_qp_base *base;
unsigned long flags;
int err;
@@ -1902,12 +1943,14 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
return;
}
- base = qp->ibqp.qp_type == IB_QPT_RAW_PACKET ?
+ base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & MLX5_IB_QP_UNDERLAY) ?
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
if (qp->state != IB_QPS_RESET) {
- if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) {
+ if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
+ !(qp->flags & MLX5_IB_QP_UNDERLAY)) {
err = mlx5_core_qp_modify(dev->mdev,
MLX5_CMD_OP_2RST_QP, 0,
NULL, &base->mqp);
@@ -1946,7 +1989,8 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
mlx5_ib_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
+ if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & MLX5_IB_QP_UNDERLAY) {
destroy_raw_packet_qp(dev, qp);
} else {
err = mlx5_core_destroy_qp(dev->mdev, &base->mqp);
@@ -2702,7 +2746,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (is_sqp(ibqp->qp_type)) {
context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
- } else if (ibqp->qp_type == IB_QPT_UD ||
+ } else if ((ibqp->qp_type == IB_QPT_UD &&
+ !(qp->flags & MLX5_IB_QP_UNDERLAY)) ||
ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
} else if (attr_mask & IB_QP_PATH_MTU) {
@@ -2799,6 +2844,11 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
qp->port) - 1;
+
+ /* Underlay port should be used - index 0 function per port */
+ if (qp->flags & MLX5_IB_QP_UNDERLAY)
+ port_num = 0;
+
mibport = &dev->port[port_num];
context->qp_counter_set_usr_page |=
cpu_to_be32((u32)(mibport->cnts.set_id) << 24);
@@ -2824,7 +2874,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
optpar = ib_mask_to_mlx5_opt(attr_mask);
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
+ if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & MLX5_IB_QP_UNDERLAY) {
struct mlx5_modify_raw_qp_param raw_qp_param = {};
raw_qp_param.operation = op;
@@ -2913,7 +2964,13 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port);
}
- if (qp_type != MLX5_IB_QPT_REG_UMR &&
+ if (qp->flags & MLX5_IB_QP_UNDERLAY) {
+ if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
+ mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
+ attr_mask);
+ goto out;
+ }
+ } else if (qp_type != MLX5_IB_QPT_REG_UMR &&
!ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
cur_state, new_state, ibqp->qp_type, attr_mask);
@@ -4477,9 +4534,14 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
qp_init_attr);
+ /* Not all of output fields are applicable, make sure to zero them */
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+ memset(qp_attr, 0, sizeof(*qp_attr));
+
mutex_lock(&qp->mutex);
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
+ if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & MLX5_IB_QP_UNDERLAY) {
err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
if (err)
goto out;
@@ -4597,6 +4659,27 @@ static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
}
}
+static int set_delay_drop(struct mlx5_ib_dev *dev)
+{
+ int err = 0;
+
+ mutex_lock(&dev->delay_drop.lock);
+ if (dev->delay_drop.activate)
+ goto out;
+
+ err = mlx5_core_set_delay_drop(dev->mdev, dev->delay_drop.timeout);
+ if (err)
+ goto out;
+
+ dev->delay_drop.activate = true;
+out:
+ mutex_unlock(&dev->delay_drop.lock);
+
+ if (!err)
+ atomic_inc(&dev->delay_drop.rqs_cnt);
+ return err;
+}
+
static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
struct ib_wq_init_attr *init_attr)
{
@@ -4651,9 +4734,28 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
}
MLX5_SET(rqc, rqc, scatter_fcs, 1);
}
+ if (init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
+ if (!(dev->ib_dev.attrs.raw_packet_caps &
+ IB_RAW_PACKET_CAP_DELAY_DROP)) {
+ mlx5_ib_dbg(dev, "Delay drop is not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ MLX5_SET(rqc, rqc, delay_drop_en, 1);
+ }
rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp);
+ if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
+ err = set_delay_drop(dev);
+ if (err) {
+ mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n",
+ err);
+ mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
+ } else {
+ rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP;
+ }
+ }
out:
kvfree(in);
return err;
@@ -4787,7 +4889,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
err_copy:
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
err_user_rq:
- destroy_user_rq(pd, rwq);
+ destroy_user_rq(dev, pd, rwq);
err:
kfree(rwq);
return ERR_PTR(err);
@@ -4799,7 +4901,7 @@ int mlx5_ib_destroy_wq(struct ib_wq *wq)
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
- destroy_user_rq(wq->pd, rwq);
+ destroy_user_rq(dev, wq->pd, rwq);
kfree(rwq);
return 0;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 43707b101f47..6d5fadad9090 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -101,7 +101,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
udata->inlen - sizeof(ucmd)))
return -EINVAL;
- if (in->type == IB_SRQT_XRC) {
+ if (in->type != IB_SRQT_BASIC) {
err = get_srq_user_index(to_mucontext(pd->uobject->context),
&ucmd, udata->inlen, &uidx);
if (err)
@@ -145,7 +145,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
in->page_offset = offset;
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
- in->type == IB_SRQT_XRC)
+ in->type != IB_SRQT_BASIC)
in->user_index = uidx;
return 0;
@@ -196,7 +196,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
}
mlx5_fill_page_array(&srq->buf, in->pas);
- srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
+ srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) {
err = -ENOMEM;
goto err_in;
@@ -205,7 +205,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
- in->type == IB_SRQT_XRC)
+ in->type != IB_SRQT_BASIC)
in->user_index = MLX5_IB_DEFAULT_UIDX;
return 0;
@@ -230,7 +230,7 @@ static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
{
- kfree(srq->wrid);
+ kvfree(srq->wrid);
mlx5_buf_free(dev->mdev, &srq->buf);
mlx5_db_free(dev->mdev, &srq->db);
}
@@ -292,14 +292,29 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
in.wqe_shift = srq->msrq.wqe_shift - 4;
if (srq->wq_sig)
in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
- if (init_attr->srq_type == IB_SRQT_XRC) {
+
+ if (init_attr->srq_type == IB_SRQT_XRC)
in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
- in.cqn = to_mcq(init_attr->ext.xrc.cq)->mcq.cqn;
- } else if (init_attr->srq_type == IB_SRQT_BASIC) {
+ else
in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn;
- in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
+
+ if (init_attr->srq_type == IB_SRQT_TM) {
+ in.tm_log_list_size =
+ ilog2(init_attr->ext.tag_matching.max_num_tags) + 1;
+ if (in.tm_log_list_size >
+ MLX5_CAP_GEN(dev->mdev, log_tag_matching_list_sz)) {
+ mlx5_ib_dbg(dev, "TM SRQ max_num_tags exceeding limit\n");
+ err = -EINVAL;
+ goto err_usr_kern_srq;
+ }
+ in.flags |= MLX5_SRQ_FLAG_RNDV;
}
+ if (ib_srq_has_cq(init_attr->srq_type))
+ in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn;
+ else
+ in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
+
in.pd = to_mpd(pd)->pdn;
in.db_record = srq->db.dma;
err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in);
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index 2aec9908c40a..e7f6223e9c60 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -186,7 +186,7 @@ int mthca_create_ah(struct mthca_dev *dev,
on_hca_fail:
if (ah->type == MTHCA_AH_PCI_POOL) {
- ah->av = pci_pool_zalloc(dev->av_table.pool,
+ ah->av = dma_pool_zalloc(dev->av_table.pool,
GFP_ATOMIC, &ah->avdma);
if (!ah->av)
return -ENOMEM;
@@ -250,7 +250,7 @@ int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah)
break;
case MTHCA_AH_PCI_POOL:
- pci_pool_free(dev->av_table.pool, ah->av, ah->avdma);
+ dma_pool_free(dev->av_table.pool, ah->av, ah->avdma);
break;
case MTHCA_AH_KMALLOC:
@@ -340,7 +340,7 @@ int mthca_init_av_table(struct mthca_dev *dev)
if (err)
return err;
- dev->av_table.pool = pci_pool_create("mthca_av", dev->pdev,
+ dev->av_table.pool = dma_pool_create("mthca_av", &dev->pdev->dev,
MTHCA_AV_SIZE,
MTHCA_AV_SIZE, 0);
if (!dev->av_table.pool)
@@ -360,7 +360,7 @@ int mthca_init_av_table(struct mthca_dev *dev)
return 0;
out_free_pool:
- pci_pool_destroy(dev->av_table.pool);
+ dma_pool_destroy(dev->av_table.pool);
out_free_alloc:
mthca_alloc_cleanup(&dev->av_table.alloc);
@@ -374,6 +374,6 @@ void mthca_cleanup_av_table(struct mthca_dev *dev)
if (dev->av_table.av_map)
iounmap(dev->av_table.av_map);
- pci_pool_destroy(dev->av_table.pool);
+ dma_pool_destroy(dev->av_table.pool);
mthca_alloc_cleanup(&dev->av_table.alloc);
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 9d83a53c0c67..419a2a20c047 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -538,7 +538,7 @@ int mthca_cmd_init(struct mthca_dev *dev)
return -ENOMEM;
}
- dev->cmd.pool = pci_pool_create("mthca_cmd", dev->pdev,
+ dev->cmd.pool = dma_pool_create("mthca_cmd", &dev->pdev->dev,
MTHCA_MAILBOX_SIZE,
MTHCA_MAILBOX_SIZE, 0);
if (!dev->cmd.pool) {
@@ -551,7 +551,7 @@ int mthca_cmd_init(struct mthca_dev *dev)
void mthca_cmd_cleanup(struct mthca_dev *dev)
{
- pci_pool_destroy(dev->cmd.pool);
+ dma_pool_destroy(dev->cmd.pool);
iounmap(dev->hcr);
if (dev->cmd.flags & MTHCA_CMD_POST_DOORBELLS)
iounmap(dev->cmd.dbell_map);
@@ -621,7 +621,7 @@ struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
if (!mailbox)
return ERR_PTR(-ENOMEM);
- mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
+ mailbox->buf = dma_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
if (!mailbox->buf) {
kfree(mailbox);
return ERR_PTR(-ENOMEM);
@@ -635,7 +635,7 @@ void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
if (!mailbox)
return;
- pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
+ dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
kfree(mailbox);
}
@@ -698,7 +698,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
if (virt != -1) {
pages[nent * 2] = cpu_to_be64(virt);
- virt += 1 << lg;
+ virt += 1ULL << lg;
}
pages[nent * 2 + 1] =
@@ -1921,7 +1921,7 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
(in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
MTHCA_PUT(inbox, val, MAD_IFC_G_PATH_OFFSET);
- MTHCA_PUT(inbox, in_wc->slid, MAD_IFC_RLID_OFFSET);
+ MTHCA_PUT(inbox, ib_lid_cpu16(in_wc->slid), MAD_IFC_RLID_OFFSET);
MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET);
if (in_grh)
@@ -1929,7 +1929,7 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
op_modifier |= 0x4;
- in_modifier |= in_wc->slid << 16;
+ in_modifier |= ib_lid_cpu16(in_wc->slid) << 16;
}
err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index ec7da9a474cd..5508afbf1c67 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -118,7 +118,7 @@ enum {
};
struct mthca_cmd {
- struct pci_pool *pool;
+ struct dma_pool *pool;
struct mutex hcr_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
@@ -263,7 +263,7 @@ struct mthca_qp_table {
};
struct mthca_av_table {
- struct pci_pool *pool;
+ struct dma_pool *pool;
int num_ddr_avs;
u64 ddr_av_base;
void __iomem *av_map;
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 7df3db71777a..093f7755c843 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -205,7 +205,7 @@ int mthca_process_mad(struct ib_device *ibdev,
u16 *out_mad_pkey_index)
{
int err;
- u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
+ u16 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
u16 prev_lid = 0;
struct ib_port_attr pattr;
const struct ib_mad *in_mad = (const struct ib_mad *)in;
@@ -256,7 +256,7 @@ int mthca_process_mad(struct ib_device *ibdev,
in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
!ib_query_port(ibdev, port_num, &pattr))
- prev_lid = pattr.lid;
+ prev_lid = ib_lid_cpu16(pattr.lid);
err = mthca_MAD_IFC(to_mdev(ibdev),
mad_flags & IB_MAD_IGNORE_MKEY,
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index c309e5c96383..e36a9bc52268 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -49,7 +49,6 @@
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
@@ -1162,7 +1161,7 @@ static void mthca_remove_one(struct pci_dev *pdev)
mutex_unlock(&mthca_device_mutex);
}
-static struct pci_device_id mthca_pci_table[] = {
+static const struct pci_device_id mthca_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR),
.driver_data = TAVOR },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_TAVOR),
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index c197cd9b193f..6fee7795d1c8 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -914,7 +914,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int err = 0;
int write_mtt_size;
- if (udata->inlen - sizeof (struct ib_uverbs_cmd_hdr) < sizeof ucmd) {
+ if (udata->inlen < sizeof ucmd) {
if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
current->comm);
@@ -1178,12 +1178,11 @@ static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static void get_dev_fw_str(struct ib_device *device, char *str,
- size_t str_len)
+static void get_dev_fw_str(struct ib_device *device, char *str)
{
struct mthca_dev *dev =
container_of(device, struct mthca_dev, ib_dev);
- snprintf(str, str_len, "%d.%d.%d",
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
(int) (dev->fw_ver >> 32),
(int) (dev->fw_ver >> 16) & 0xffff,
(int) dev->fw_ver & 0xffff);
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index a30aa6527f7e..942ca84713c9 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -63,7 +63,6 @@
MODULE_AUTHOR("NetEffect");
MODULE_DESCRIPTION("NetEffect RNIC Low-level iWARP Driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
int interrupt_mod_interval = 0;
@@ -102,7 +101,7 @@ static unsigned int ee_flsh_adapter;
static unsigned int sysfs_nonidx_addr;
static unsigned int sysfs_idx_addr;
-static struct pci_device_id nes_pci_table[] = {
+static const struct pci_device_id nes_pci_table[] = {
{ PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020), },
{ PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR), },
{0}
@@ -808,13 +807,6 @@ static void nes_remove(struct pci_dev *pcidev)
}
-static struct pci_driver nes_pci_driver = {
- .name = DRV_NAME,
- .id_table = nes_pci_table,
- .probe = nes_probe,
- .remove = nes_remove,
-};
-
static ssize_t adapter_show(struct device_driver *ddp, char *buf)
{
unsigned int devfn = 0xffffffff;
@@ -1156,35 +1148,29 @@ static DRIVER_ATTR_RW(idx_addr);
static DRIVER_ATTR_RW(idx_data);
static DRIVER_ATTR_RW(wqm_quanta);
-static int nes_create_driver_sysfs(struct pci_driver *drv)
-{
- int error;
- error = driver_create_file(&drv->driver, &driver_attr_adapter);
- error |= driver_create_file(&drv->driver, &driver_attr_eeprom_cmd);
- error |= driver_create_file(&drv->driver, &driver_attr_eeprom_data);
- error |= driver_create_file(&drv->driver, &driver_attr_flash_cmd);
- error |= driver_create_file(&drv->driver, &driver_attr_flash_data);
- error |= driver_create_file(&drv->driver, &driver_attr_nonidx_addr);
- error |= driver_create_file(&drv->driver, &driver_attr_nonidx_data);
- error |= driver_create_file(&drv->driver, &driver_attr_idx_addr);
- error |= driver_create_file(&drv->driver, &driver_attr_idx_data);
- error |= driver_create_file(&drv->driver, &driver_attr_wqm_quanta);
- return error;
-}
+static struct attribute *nes_attrs[] = {
+ &driver_attr_adapter.attr,
+ &driver_attr_eeprom_cmd.attr,
+ &driver_attr_eeprom_data.attr,
+ &driver_attr_flash_cmd.attr,
+ &driver_attr_flash_data.attr,
+ &driver_attr_nonidx_addr.attr,
+ &driver_attr_nonidx_data.attr,
+ &driver_attr_idx_addr.attr,
+ &driver_attr_idx_data.attr,
+ &driver_attr_wqm_quanta.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(nes);
+
+static struct pci_driver nes_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = nes_pci_table,
+ .probe = nes_probe,
+ .remove = nes_remove,
+ .groups = nes_groups,
+};
-static void nes_remove_driver_sysfs(struct pci_driver *drv)
-{
- driver_remove_file(&drv->driver, &driver_attr_adapter);
- driver_remove_file(&drv->driver, &driver_attr_eeprom_cmd);
- driver_remove_file(&drv->driver, &driver_attr_eeprom_data);
- driver_remove_file(&drv->driver, &driver_attr_flash_cmd);
- driver_remove_file(&drv->driver, &driver_attr_flash_data);
- driver_remove_file(&drv->driver, &driver_attr_nonidx_addr);
- driver_remove_file(&drv->driver, &driver_attr_nonidx_data);
- driver_remove_file(&drv->driver, &driver_attr_idx_addr);
- driver_remove_file(&drv->driver, &driver_attr_idx_data);
- driver_remove_file(&drv->driver, &driver_attr_wqm_quanta);
-}
/**
* nes_init_module - module initialization entry point
@@ -1192,20 +1178,13 @@ static void nes_remove_driver_sysfs(struct pci_driver *drv)
static int __init nes_init_module(void)
{
int retval;
- int retval1;
retval = nes_cm_start();
if (retval) {
printk(KERN_ERR PFX "Unable to start NetEffect iWARP CM.\n");
return retval;
}
- retval = pci_register_driver(&nes_pci_driver);
- if (retval >= 0) {
- retval1 = nes_create_driver_sysfs(&nes_pci_driver);
- if (retval1 < 0)
- printk(KERN_ERR PFX "Unable to create NetEffect sys files.\n");
- }
- return retval;
+ return pci_register_driver(&nes_pci_driver);
}
@@ -1215,7 +1194,6 @@ static int __init nes_init_module(void)
static void __exit nes_exit_module(void)
{
nes_cm_stop();
- nes_remove_driver_sysfs(&nes_pci_driver);
pci_unregister_driver(&nes_pci_driver);
}
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 8f9d8b4ad583..b0adf65e4bdb 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -551,7 +551,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
|| (0x0F000100 == (pcs_control_status1 & 0x0F000100)))
int_cnt++;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (int_cnt > 1) {
spin_lock_irqsave(&nesadapter->phy_lock, flags);
@@ -592,7 +592,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
break;
}
}
- msleep(1);
+ usleep_range(1000, 2000);
}
}
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 25dcd7573df9..f0dc5f4aa177 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -481,21 +481,16 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
props->lid = 1;
- props->lmc = 0;
- props->sm_lid = 0;
- props->sm_sl = 0;
if (netif_queue_stopped(netdev))
props->state = IB_PORT_DOWN;
else if (nesvnic->linkup)
props->state = IB_PORT_ACTIVE;
else
props->state = IB_PORT_DOWN;
- props->phys_state = 0;
props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
props->gid_tbl_len = 1;
props->pkey_tbl_len = 1;
- props->qkey_viol_cntr = 0;
props->active_width = IB_WIDTH_4X;
props->active_speed = IB_SPEED_SDR;
props->max_msg_sz = 0x80000000;
@@ -3672,15 +3667,14 @@ static int nes_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static void get_dev_fw_str(struct ib_device *dev, char *str,
- size_t str_len)
+static void get_dev_fw_str(struct ib_device *dev, char *str)
{
struct nes_ib_device *nesibdev =
container_of(dev, struct nes_ib_device, ibdev);
struct nes_vnic *nesvnic = nesibdev->nesvnic;
nes_debug(NES_DBG_INIT, "\n");
- snprintf(str, str_len, "%u.%u",
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u",
(nesvnic->nesdev->nesadapter->firmware_version >> 16),
(nesvnic->nesdev->nesadapter->firmware_version & 0x000000ff));
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 57c9a2ad0260..fbfbd9e96147 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -58,7 +58,6 @@
#include "ocrdma_stats.h"
#include <rdma/ocrdma-abi.h>
-MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("Dual BSD/GPL");
@@ -108,12 +107,11 @@ static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static void get_dev_fw_str(struct ib_device *device, char *str,
- size_t str_len)
+static void get_dev_fw_str(struct ib_device *device, char *str)
{
struct ocrdma_dev *dev = get_ocrdma_dev(device);
- snprintf(str, str_len, "%s", &dev->attr.fw_ver[0]);
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", &dev->attr.fw_ver[0]);
}
static int ocrdma_register_device(struct ocrdma_dev *dev)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 2f30bda8457a..27d5e8d9f08d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -744,7 +744,8 @@ err:
if (is_uctx_pd) {
ocrdma_release_ucontext_pd(uctx);
} else {
- status = _ocrdma_dealloc_pd(dev, pd);
+ if (_ocrdma_dealloc_pd(dev, pd))
+ pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__);
}
exit:
return ERR_PTR(status);
@@ -1901,6 +1902,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
goto err;
if (udata == NULL) {
+ status = -ENOMEM;
srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
GFP_KERNEL);
if (srq->rqe_wr_id_tbl == NULL)
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index b5851fd67d4f..97d033f51dc9 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -47,7 +47,6 @@
MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
MODULE_AUTHOR("QLogic Corporation");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(QEDR_MODULE_VERSION);
#define QEDR_WQ_MULTIPLIER_DFT (3)
@@ -69,13 +68,12 @@ static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
return IB_LINK_LAYER_ETHERNET;
}
-static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str,
- size_t str_len)
+static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
{
struct qedr_dev *qedr = get_qedr_dev(ibdev);
u32 fw_ver = (u32)qedr->attr.fw_ver;
- snprintf(str, str_len, "%d. %d. %d. %d",
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d. %d. %d. %d",
(fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
(fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
}
@@ -778,6 +776,7 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
if (rc)
goto init_err;
+ dev->user_dpm_enabled = dev_info.user_dpm_enabled;
dev->num_hwfns = dev_info.common.num_hwfns;
dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index ab7784bfdac6..b2bb42e2805d 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -41,7 +41,6 @@
#include <linux/qed/roce_common.h>
#include "qedr_hsi_rdma.h"
-#define QEDR_MODULE_VERSION "8.10.10.0"
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
#define DP_NAME(dev) ((dev)->ibdev.name)
@@ -163,6 +162,8 @@ struct qedr_dev {
struct qedr_qp *gsi_qp;
unsigned long enet_state;
+
+ u8 user_dpm_enabled;
};
#define QEDR_MAX_SQ_PBL (0x8000)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 548e4d1e998f..769ac07c3c8e 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -53,6 +53,14 @@
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
+ size_t len)
+{
+ size_t min_len = min_t(size_t, len, udata->outlen);
+
+ return ib_copy_to_udata(udata, src, min_len);
+}
+
int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
{
if (index > QEDR_ROCE_PKEY_TABLE_LEN)
@@ -368,6 +376,9 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
memset(&uresp, 0, sizeof(uresp));
+ uresp.dpm_enabled = dev->user_dpm_enabled;
+ uresp.wids_enabled = 1;
+ uresp.wid_count = oparams.wid_count;
uresp.db_pa = ctx->dpi_phys_addr;
uresp.db_size = ctx->dpi_size;
uresp.max_send_wr = dev->attr.max_sqe;
@@ -378,7 +389,7 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
uresp.max_cqes = QEDR_MAX_CQES;
- rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
goto err;
@@ -480,7 +491,7 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
(udata && context) ? "User Lib" : "Kernel");
if (!dev->rdma_ctx) {
- DP_ERR(dev, "invlaid RDMA context\n");
+ DP_ERR(dev, "invalid RDMA context\n");
return ERR_PTR(-EINVAL);
}
@@ -499,7 +510,7 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
uresp.pd_id = pd_id;
- rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc) {
DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
@@ -729,7 +740,7 @@ static int qedr_copy_cq_uresp(struct qedr_dev *dev,
uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
uresp.icid = cq->icid;
- rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
@@ -1238,7 +1249,7 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
uresp.qp_id = qp->qp_id;
- rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
DP_ERR(dev,
"create qp: failed a copy to user space with qp icid=0x%x.\n",
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index a3e21a25cea5..f9e1c69603a5 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1,7 +1,7 @@
#ifndef _QIB_KERNEL_H
#define _QIB_KERNEL_H
/*
- * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
+ * Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
@@ -443,7 +443,7 @@ struct qib_irq_notify;
#endif
struct qib_msix_entry {
- struct msix_entry msix;
+ int irq;
void *arg;
#ifdef CONFIG_INFINIBAND_QIB_DCA
int dca;
@@ -1433,9 +1433,9 @@ int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
const struct pci_device_id *);
void qib_pcie_ddcleanup(struct qib_devdata *);
-int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct qib_msix_entry *);
+int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent);
int qib_reinit_intr(struct qib_devdata *);
-void qib_enable_intx(struct pci_dev *);
+void qib_enable_intx(struct qib_devdata *dd);
void qib_nomsi(struct qib_devdata *);
void qib_nomsix(struct qib_devdata *);
void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
index 5bad8e3b40bb..5ed1ed93380f 100644
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ b/drivers/infiniband/hw/qib/qib_debugfs.c
@@ -1,6 +1,5 @@
-#ifdef CONFIG_DEBUG_FS
/*
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
+ * Copyright (c) 2013 - 2017 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -191,10 +190,10 @@ DEBUGFS_FILE(ctx_stats)
static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
__acquires(RCU)
{
- struct qib_qp_iter *iter;
+ struct rvt_qp_iter *iter;
loff_t n = *pos;
- iter = qib_qp_iter_init(s->private);
+ iter = rvt_qp_iter_init(s->private, 0, NULL);
/* stop calls rcu_read_unlock */
rcu_read_lock();
@@ -203,7 +202,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
return NULL;
do {
- if (qib_qp_iter_next(iter)) {
+ if (rvt_qp_iter_next(iter)) {
kfree(iter);
return NULL;
}
@@ -216,11 +215,11 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
loff_t *pos)
__must_hold(RCU)
{
- struct qib_qp_iter *iter = iter_ptr;
+ struct rvt_qp_iter *iter = iter_ptr;
(*pos)++;
- if (qib_qp_iter_next(iter)) {
+ if (rvt_qp_iter_next(iter)) {
kfree(iter);
return NULL;
}
@@ -236,7 +235,7 @@ static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
{
- struct qib_qp_iter *iter = iter_ptr;
+ struct rvt_qp_iter *iter = iter_ptr;
if (!iter)
return 0;
@@ -284,6 +283,3 @@ void qib_dbg_exit(void)
debugfs_remove_recursive(qib_dbg_root);
qib_dbg_root = NULL;
}
-
-#endif
-
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 2b5982f743ef..719906a9fd51 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -66,7 +66,6 @@ MODULE_PARM_DESC(compat_ddr_negotiate,
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel <ibsupport@intel.com>");
MODULE_DESCRIPTION("Intel IB driver");
-MODULE_VERSION(QIB_DRIVER_VERSION);
/*
* QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index e423b71e6ea0..3259a60e4f4f 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
+ * Copyright (c) 2013 - 2017 Intel Corporation. All rights reserved.
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
* All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
@@ -1742,38 +1742,32 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd)
*/
static void pe_boardname(struct qib_devdata *dd)
{
- char *n;
- u32 boardid, namelen;
+ u32 boardid;
boardid = SYM_FIELD(dd->revision, Revision,
BoardID);
switch (boardid) {
case 2:
- n = "InfiniPath_QLE7140";
+ dd->boardname = "InfiniPath_QLE7140";
break;
default:
qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid);
- n = "Unknown_InfiniPath_6120";
+ dd->boardname = "Unknown_InfiniPath_6120";
break;
}
- namelen = strlen(n) + 1;
- dd->boardname = kmalloc(namelen, GFP_KERNEL);
- if (dd->boardname)
- snprintf(dd->boardname, namelen, "%s", n);
if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
qib_dev_err(dd,
- "Unsupported InfiniPath hardware revision %u.%u!\n",
- dd->majrev, dd->minrev);
+ "Unsupported InfiniPath hardware revision %u.%u!\n",
+ dd->majrev, dd->minrev);
snprintf(dd->boardversion, sizeof(dd->boardversion),
"ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
- (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
+ (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
dd->majrev, dd->minrev,
- (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
-
+ (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
}
/*
@@ -1838,7 +1832,7 @@ static int qib_6120_setup_reset(struct qib_devdata *dd)
bail:
if (ret) {
- if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
+ if (qib_pcie_params(dd, dd->lbus_width, NULL))
qib_dev_err(dd,
"Reset failed to setup PCIe or interrupts; continuing anyway\n");
/* clear the reset error, init error/hwerror mask */
@@ -3562,7 +3556,7 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
if (qib_mini_init)
goto bail;
- if (qib_pcie_params(dd, 8, NULL, NULL))
+ if (qib_pcie_params(dd, 8, NULL))
qib_dev_err(dd,
"Failed to setup PCIe or interrupts; continuing anyway\n");
dd->cspec->irq = pdev->irq; /* save IRQ */
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index c3679c48e61c..04bdd3d487b1 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2011 - 2017 Intel Corporation. All rights reserved.
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
* All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
@@ -2049,41 +2050,35 @@ static void qib_setup_7220_interrupt(struct qib_devdata *dd)
*/
static void qib_7220_boardname(struct qib_devdata *dd)
{
- char *n;
- u32 boardid, namelen;
+ u32 boardid;
boardid = SYM_FIELD(dd->revision, Revision,
BoardID);
switch (boardid) {
case 1:
- n = "InfiniPath_QLE7240";
+ dd->boardname = "InfiniPath_QLE7240";
break;
case 2:
- n = "InfiniPath_QLE7280";
+ dd->boardname = "InfiniPath_QLE7280";
break;
default:
qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid);
- n = "Unknown_InfiniPath_7220";
+ dd->boardname = "Unknown_InfiniPath_7220";
break;
}
- namelen = strlen(n) + 1;
- dd->boardname = kmalloc(namelen, GFP_KERNEL);
- if (dd->boardname)
- snprintf(dd->boardname, namelen, "%s", n);
-
if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
qib_dev_err(dd,
- "Unsupported InfiniPath hardware revision %u.%u!\n",
- dd->majrev, dd->minrev);
+ "Unsupported InfiniPath hardware revision %u.%u!\n",
+ dd->majrev, dd->minrev);
snprintf(dd->boardversion, sizeof(dd->boardversion),
"ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
- (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
+ (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
dd->majrev, dd->minrev,
- (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
+ (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
}
/*
@@ -2148,7 +2143,7 @@ static int qib_setup_7220_reset(struct qib_devdata *dd)
bail:
if (ret) {
- if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
+ if (qib_pcie_params(dd, dd->lbus_width, NULL))
qib_dev_err(dd,
"Reset failed to setup PCIe or interrupts; continuing anyway\n");
@@ -3309,7 +3304,7 @@ static int qib_7220_intr_fallback(struct qib_devdata *dd)
qib_devinfo(dd->pcidev,
"MSI interrupt not detected, trying INTx interrupts\n");
qib_7220_free_irq(dd);
- qib_enable_intx(dd->pcidev);
+ qib_enable_intx(dd);
/*
* Some newer kernels require free_irq before disable_msi,
* and irq can be changed during disable and INTx enable
@@ -4619,7 +4614,7 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
minwidth = 8; /* x8 capable boards */
break;
}
- if (qib_pcie_params(dd, minwidth, NULL, NULL))
+ if (qib_pcie_params(dd, minwidth, NULL))
qib_dev_err(dd,
"Failed to setup PCIe or interrupts; continuing anyway\n");
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index bb2439fff8fa..14cadf6d6214 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved.
* Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -2841,10 +2841,10 @@ static void qib_7322_nomsix(struct qib_devdata *dd)
reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
#endif
irq_set_affinity_hint(
- dd->cspec->msix_entries[i].msix.vector, NULL);
+ dd->cspec->msix_entries[i].irq, NULL);
free_cpumask_var(dd->cspec->msix_entries[i].mask);
- free_irq(dd->cspec->msix_entries[i].msix.vector,
- dd->cspec->msix_entries[i].arg);
+ free_irq(dd->cspec->msix_entries[i].irq,
+ dd->cspec->msix_entries[i].arg);
}
qib_nomsix(dd);
}
@@ -3336,9 +3336,9 @@ static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
qib_devinfo(dd->pcidev,
"Disabling notifier on HCA %d irq %d\n",
dd->unit,
- m->msix.vector);
+ m->irq);
irq_set_affinity_notifier(
- m->msix.vector,
+ m->irq,
NULL);
m->notifier = NULL;
}
@@ -3354,7 +3354,7 @@ static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
int ret;
m->notifier = n;
- n->notify.irq = m->msix.vector;
+ n->notify.irq = m->irq;
n->notify.notify = qib_irq_notifier_notify;
n->notify.release = qib_irq_notifier_release;
n->arg = m->arg;
@@ -3500,10 +3500,21 @@ try_intx:
- 1,
QIB_DRV_NAME "%d (kctx)", dd->unit);
}
- ret = request_irq(
- dd->cspec->msix_entries[msixnum].msix.vector,
- handler, 0, dd->cspec->msix_entries[msixnum].name,
- arg);
+
+ dd->cspec->msix_entries[msixnum].irq = pci_irq_vector(
+ dd->pcidev, msixnum);
+ if (dd->cspec->msix_entries[msixnum].irq < 0) {
+ qib_dev_err(dd,
+ "Couldn't get MSIx irq (vec=%d): %d\n",
+ msixnum,
+ dd->cspec->msix_entries[msixnum].irq);
+ qib_7322_nomsix(dd);
+ goto try_intx;
+ }
+ ret = request_irq(dd->cspec->msix_entries[msixnum].irq,
+ handler, 0,
+ dd->cspec->msix_entries[msixnum].name,
+ arg);
if (ret) {
/*
* Shouldn't happen since the enable said we could
@@ -3512,7 +3523,7 @@ try_intx:
qib_dev_err(dd,
"Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
msixnum,
- dd->cspec->msix_entries[msixnum].msix.vector,
+ dd->cspec->msix_entries[msixnum].irq,
ret);
qib_7322_nomsix(dd);
goto try_intx;
@@ -3548,7 +3559,7 @@ try_intx:
dd->cspec->msix_entries[msixnum].mask);
}
irq_set_affinity_hint(
- dd->cspec->msix_entries[msixnum].msix.vector,
+ dd->cspec->msix_entries[msixnum].irq,
dd->cspec->msix_entries[msixnum].mask);
}
msixnum++;
@@ -3571,75 +3582,69 @@ bail:;
static unsigned qib_7322_boardname(struct qib_devdata *dd)
{
/* Will need enumeration of board-types here */
- char *n;
- u32 boardid, namelen;
- unsigned features = DUAL_PORT_CAP;
+ u32 boardid;
+ unsigned int features = DUAL_PORT_CAP;
boardid = SYM_FIELD(dd->revision, Revision, BoardID);
switch (boardid) {
case 0:
- n = "InfiniPath_QLE7342_Emulation";
+ dd->boardname = "InfiniPath_QLE7342_Emulation";
break;
case 1:
- n = "InfiniPath_QLE7340";
+ dd->boardname = "InfiniPath_QLE7340";
dd->flags |= QIB_HAS_QSFP;
features = PORT_SPD_CAP;
break;
case 2:
- n = "InfiniPath_QLE7342";
+ dd->boardname = "InfiniPath_QLE7342";
dd->flags |= QIB_HAS_QSFP;
break;
case 3:
- n = "InfiniPath_QMI7342";
+ dd->boardname = "InfiniPath_QMI7342";
break;
case 4:
- n = "InfiniPath_Unsupported7342";
+ dd->boardname = "InfiniPath_Unsupported7342";
qib_dev_err(dd, "Unsupported version of QMH7342\n");
features = 0;
break;
case BOARD_QMH7342:
- n = "InfiniPath_QMH7342";
+ dd->boardname = "InfiniPath_QMH7342";
features = 0x24;
break;
case BOARD_QME7342:
- n = "InfiniPath_QME7342";
+ dd->boardname = "InfiniPath_QME7342";
break;
case 8:
- n = "InfiniPath_QME7362";
+ dd->boardname = "InfiniPath_QME7362";
dd->flags |= QIB_HAS_QSFP;
break;
case BOARD_QMH7360:
- n = "Intel IB QDR 1P FLR-QSFP Adptr";
+ dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr";
dd->flags |= QIB_HAS_QSFP;
break;
case 15:
- n = "InfiniPath_QLE7342_TEST";
+ dd->boardname = "InfiniPath_QLE7342_TEST";
dd->flags |= QIB_HAS_QSFP;
break;
default:
- n = "InfiniPath_QLE73xy_UNKNOWN";
+ dd->boardname = "InfiniPath_QLE73xy_UNKNOWN";
qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
break;
}
dd->board_atten = 1; /* index into txdds_Xdr */
- namelen = strlen(n) + 1;
- dd->boardname = kmalloc(namelen, GFP_KERNEL);
- if (dd->boardname)
- snprintf(dd->boardname, namelen, "%s", n);
-
snprintf(dd->boardversion, sizeof(dd->boardversion),
"ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
- (unsigned)SYM_FIELD(dd->revision, Revision_R, Arch),
+ (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
dd->majrev, dd->minrev,
- (unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
+ (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
qib_devinfo(dd->pcidev,
- "IB%u: Forced to single port mode by module parameter\n",
- dd->unit);
+ "IB%u: Forced to single port mode by module parameter\n",
+ dd->unit);
features &= PORT_SPD_CAP;
}
@@ -3744,7 +3749,6 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
if (msix_entries) {
/* restore the MSIx vector address and data if saved above */
for (i = 0; i < msix_entries; i++) {
- dd->cspec->msix_entries[i].msix.entry = i;
if (!msix_vecsave || !msix_vecsave[2 * i])
continue;
qib_write_kreg(dd, 2 * i +
@@ -3762,8 +3766,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
write_7322_initregs(dd);
if (qib_pcie_params(dd, dd->lbus_width,
- &dd->cspec->num_msix_entries,
- dd->cspec->msix_entries))
+ &dd->cspec->num_msix_entries))
qib_dev_err(dd,
"Reset failed to setup PCIe or interrupts; continuing anyway\n");
@@ -5195,7 +5198,7 @@ static int qib_7322_intr_fallback(struct qib_devdata *dd)
qib_devinfo(dd->pcidev,
"MSIx interrupt not detected, trying INTx interrupts\n");
qib_7322_nomsix(dd);
- qib_enable_intx(dd->pcidev);
+ qib_enable_intx(dd);
qib_setup_7322_interrupt(dd, 0);
return 1;
}
@@ -6172,7 +6175,7 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
unsigned long val;
char *n;
- if (strlen(str) >= MAX_ATTEN_LEN) {
+ if (strlen(str) >= ARRAY_SIZE(txselect_list)) {
pr_info("txselect_values string too long\n");
return -ENOSPC;
}
@@ -6183,7 +6186,7 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
return -EINVAL;
}
- strcpy(txselect_list, str);
+ strncpy(txselect_list, str, ARRAY_SIZE(txselect_list) - 1);
list_for_each_entry(dd, &qib_dev_list, list)
if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
@@ -7327,10 +7330,7 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
if (!dd->cspec->msix_entries)
tabsize = 0;
- for (i = 0; i < tabsize; i++)
- dd->cspec->msix_entries[i].msix.entry = i;
-
- if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
+ if (qib_pcie_params(dd, 8, &tabsize))
qib_dev_err(dd,
"Failed to setup PCIe or interrupts; continuing anyway\n");
/* may be less than we wanted, if not enough available */
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 6c16ba1107ba..c5a4c65636d6 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -399,7 +399,7 @@ static int loadtime_init(struct qib_devdata *dd)
if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
qib_dev_err(dd,
- "Driver only handles version %d, chip swversion is %d (%llx), failng\n",
+ "Driver only handles version %d, chip swversion is %d (%llx), failing\n",
QIB_CHIP_SWVERSION,
(int)(dd->revision >>
QLOGIC_IB_R_SOFTWARE_SHIFT) &
@@ -1398,7 +1398,6 @@ static void cleanup_device_data(struct qib_devdata *dd)
qib_free_ctxtdata(dd, rcd);
}
kfree(tmp);
- kfree(dd->boardname);
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index da295e0392ed..82d9da9b6997 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -105,7 +105,7 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
struct ib_ah *ah;
- ah = qib_create_qp0_ah(ibp, ibp->rvp.sm_lid);
+ ah = qib_create_qp0_ah(ibp, (u16)ibp->rvp.sm_lid);
if (IS_ERR(ah))
ret = PTR_ERR(ah);
else {
@@ -134,24 +134,21 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
}
/*
- * Send a bad [PQ]_Key trap (ch. 14.3.8).
+ * Send a bad P_Key trap (ch. 14.3.8).
*/
-void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
- u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
+void qib_bad_pkey(struct qib_ibport *ibp, u32 key, u32 sl,
+ u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
{
struct ib_mad_notice_attr data;
- if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
- ibp->rvp.pkey_violations++;
- else
- ibp->rvp.qkey_violations++;
ibp->rvp.n_pkt_drops++;
+ ibp->rvp.pkey_violations++;
/* Send violation trap */
data.generic_type = IB_NOTICE_TYPE_SECURITY;
data.prod_type_msb = 0;
data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = trap_num;
+ data.trap_num = IB_NOTICE_TRAP_BAD_PKEY;
data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
data.toggle_count = 0;
memset(&data.details, 0, sizeof(data.details));
@@ -499,7 +496,7 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
pip->mkey = ibp->rvp.mkey;
pip->gid_prefix = ibp->rvp.gid_prefix;
pip->lid = cpu_to_be16(ppd->lid);
- pip->sm_lid = cpu_to_be16(ibp->rvp.sm_lid);
+ pip->sm_lid = cpu_to_be16((u16)ibp->rvp.sm_lid);
pip->cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
/* pip->diag_code; */
pip->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
@@ -874,8 +871,6 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
ib_dispatch_event(&event);
}
- ret = subn_get_portinfo(smp, ibdev, port);
-
/* restore re-reg bit per o14-12.2.1 */
pip->clientrereg_resv_subnetto |= clientrereg;
@@ -1578,8 +1573,8 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
- memset(pmp->reserved, 0, sizeof(pmp->reserved) +
- sizeof(pmp->data));
+ memset(pmp->reserved, 0, sizeof(pmp->reserved));
+ memset(pmp->data, 0, sizeof(pmp->data));
/*
* Set top 3 bits to indicate interval in picoseconds in
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index c379b8342a09..d90403e31a9d 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2010 - 2017 Intel Corporation. All rights reserved.
* Copyright (c) 2008, 2009 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -187,112 +188,84 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd)
pci_set_drvdata(dd->pcidev, NULL);
}
-static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
- struct qib_msix_entry *qib_msix_entry)
-{
- int ret;
- int nvec = *msixcnt;
- struct msix_entry *msix_entry;
- int i;
-
- ret = pci_msix_vec_count(dd->pcidev);
- if (ret < 0)
- goto do_intx;
-
- nvec = min(nvec, ret);
-
- /* We can't pass qib_msix_entry array to qib_msix_setup
- * so use a dummy msix_entry array and copy the allocated
- * irq back to the qib_msix_entry array. */
- msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL);
- if (!msix_entry)
- goto do_intx;
-
- for (i = 0; i < nvec; i++)
- msix_entry[i] = qib_msix_entry[i].msix;
-
- ret = pci_enable_msix_range(dd->pcidev, msix_entry, 1, nvec);
- if (ret < 0)
- goto free_msix_entry;
- else
- nvec = ret;
-
- for (i = 0; i < nvec; i++)
- qib_msix_entry[i].msix = msix_entry[i];
-
- kfree(msix_entry);
- *msixcnt = nvec;
- return;
-
-free_msix_entry:
- kfree(msix_entry);
-
-do_intx:
- qib_dev_err(
- dd,
- "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
- nvec, ret);
- *msixcnt = 0;
- qib_enable_intx(dd->pcidev);
-}
-
/**
* We save the msi lo and hi values, so we can restore them after
* chip reset (the kernel PCI infrastructure doesn't yet handle that
* correctly.
*/
-static int qib_msi_setup(struct qib_devdata *dd, int pos)
+static void qib_msi_setup(struct qib_devdata *dd, int pos)
{
struct pci_dev *pdev = dd->pcidev;
u16 control;
- int ret;
- ret = pci_enable_msi(pdev);
- if (ret)
- qib_dev_err(dd,
- "pci_enable_msi failed: %d, interrupts may not work\n",
- ret);
- /* continue even if it fails, we may still be OK... */
-
- pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
- &dd->msi_lo);
- pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
- &dd->msi_hi);
+ pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, &dd->msi_lo);
+ pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI, &dd->msi_hi);
pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
+
/* now save the data (vector) info */
- pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT)
- ? 12 : 8),
+ pci_read_config_word(pdev,
+ pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
&dd->msi_data);
- return ret;
}
-int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
- struct qib_msix_entry *entry)
+static int qib_allocate_irqs(struct qib_devdata *dd, u32 maxvec)
+{
+ unsigned int flags = PCI_IRQ_LEGACY;
+
+ /* Check our capabilities */
+ if (dd->pcidev->msix_cap) {
+ flags |= PCI_IRQ_MSIX;
+ } else {
+ if (dd->pcidev->msi_cap) {
+ flags |= PCI_IRQ_MSI;
+ /* Get msi_lo and msi_hi */
+ qib_msi_setup(dd, dd->pcidev->msi_cap);
+ }
+ }
+
+ if (!(flags & (PCI_IRQ_MSIX | PCI_IRQ_MSI)))
+ qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
+
+ return pci_alloc_irq_vectors(dd->pcidev, 1, maxvec, flags);
+}
+
+int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent)
{
u16 linkstat, speed;
- int pos = 0, ret = 1;
+ int nvec;
+ int maxvec;
+ int ret = 0;
if (!pci_is_pcie(dd->pcidev)) {
qib_dev_err(dd, "Can't find PCI Express capability!\n");
/* set up something... */
dd->lbus_width = 1;
dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
+ ret = -1;
goto bail;
}
- pos = dd->pcidev->msix_cap;
- if (nent && *nent && pos) {
- qib_msix_setup(dd, pos, nent, entry);
- ret = 0; /* did it, either MSIx or INTx */
- } else {
- pos = dd->pcidev->msi_cap;
- if (pos)
- ret = qib_msi_setup(dd, pos);
- else
- qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
+ maxvec = (nent && *nent) ? *nent : 1;
+ nvec = qib_allocate_irqs(dd, maxvec);
+ if (nvec < 0) {
+ ret = nvec;
+ goto bail;
+ }
+
+ /*
+ * If nent exists, make sure to record how many vectors were allocated
+ */
+ if (nent) {
+ *nent = nvec;
+
+ /*
+ * If we requested (nent) MSIX, but msix_enabled is not set,
+ * pci_alloc_irq_vectors() enabled INTx.
+ */
+ if (!dd->pcidev->msix_enabled)
+ qib_dev_err(dd,
+ "no msix vectors allocated, using INTx\n");
}
- if (!pos)
- qib_enable_intx(dd->pcidev);
pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
/*
@@ -379,7 +352,7 @@ int qib_reinit_intr(struct qib_devdata *dd)
ret = 1;
bail:
if (!ret && (dd->flags & QIB_HAS_INTX)) {
- qib_enable_intx(dd->pcidev);
+ qib_enable_intx(dd);
ret = 1;
}
@@ -397,7 +370,7 @@ bail:
void qib_nomsi(struct qib_devdata *dd)
{
dd->msi_lo = 0;
- pci_disable_msi(dd->pcidev);
+ pci_free_irq_vectors(dd->pcidev);
}
/*
@@ -405,23 +378,21 @@ void qib_nomsi(struct qib_devdata *dd)
*/
void qib_nomsix(struct qib_devdata *dd)
{
- pci_disable_msix(dd->pcidev);
+ pci_free_irq_vectors(dd->pcidev);
}
/*
* Similar to pci_intx(pdev, 1), except that we make sure
* msi(x) is off.
*/
-void qib_enable_intx(struct pci_dev *pdev)
+void qib_enable_intx(struct qib_devdata *dd)
{
u16 cw, new;
int pos;
+ struct pci_dev *pdev = dd->pcidev;
- /* first, turn on INTx */
- pci_read_config_word(pdev, PCI_COMMAND, &cw);
- new = cw & ~PCI_COMMAND_INTX_DISABLE;
- if (new != cw)
- pci_write_config_word(pdev, PCI_COMMAND, new);
+ if (pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY) < 0)
+ qib_dev_err(dd, "Failed to enable INTx\n");
pos = pdev->msi_cap;
if (pos) {
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 5984981e7dd4..344e401915f7 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
+ * Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
@@ -104,10 +104,9 @@ const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
};
-static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
- gfp_t gfp)
+static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
{
- unsigned long page = get_zeroed_page(gfp);
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
/*
* Free the page if someone raced with us installing it.
@@ -126,7 +125,7 @@ static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
* zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
*/
int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port, gfp_t gfp)
+ enum ib_qp_type type, u8 port)
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
@@ -160,7 +159,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
max_scan = qpt->nmaps - !offset;
for (i = 0;;) {
if (unlikely(!map->page)) {
- get_map_page(qpt, map, gfp);
+ get_map_page(qpt, map);
if (unlikely(!map->page))
break;
}
@@ -317,16 +316,16 @@ u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
return ib_mtu_enum_to_int(pmtu);
}
-void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
+void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
{
struct qib_qp_priv *priv;
- priv = kzalloc(sizeof(*priv), gfp);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return ERR_PTR(-ENOMEM);
priv->owner = qp;
- priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
+ priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
if (!priv->s_hdr) {
kfree(priv);
return ERR_PTR(-ENOMEM);
@@ -416,53 +415,16 @@ int qib_check_send_wqe(struct rvt_qp *qp,
#ifdef CONFIG_DEBUG_FS
-struct qib_qp_iter {
- struct qib_ibdev *dev;
- struct rvt_qp *qp;
- int n;
-};
-
-struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
-{
- struct qib_qp_iter *iter;
-
- iter = kzalloc(sizeof(*iter), GFP_KERNEL);
- if (!iter)
- return NULL;
-
- iter->dev = dev;
-
- return iter;
-}
-
-int qib_qp_iter_next(struct qib_qp_iter *iter)
-{
- struct qib_ibdev *dev = iter->dev;
- int n = iter->n;
- int ret = 1;
- struct rvt_qp *pqp = iter->qp;
- struct rvt_qp *qp;
-
- for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
- if (pqp)
- qp = rcu_dereference(pqp->next);
- else
- qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
- pqp = qp;
- if (qp) {
- iter->qp = qp;
- iter->n = n;
- return 0;
- }
- }
- return ret;
-}
-
static const char * const qp_type_str[] = {
"SMI", "GSI", "RC", "UC", "UD",
};
-void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
+/**
+ * qib_qp_iter_print - print information to seq_file
+ * @s - the seq_file
+ * @iter - the iterator
+ */
+void qib_qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
{
struct rvt_swqe *wqe;
struct rvt_qp *qp = iter->qp;
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 4ddbcac5eabe..e9a91736b12d 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -348,7 +348,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
case IB_WR_RDMA_WRITE:
if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
- /* FALLTHROUGH */
+ goto no_flow_control;
case IB_WR_RDMA_WRITE_WITH_IMM:
/* If no credit, return. */
if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
@@ -356,7 +356,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
-
+no_flow_control:
ohdr->u.rc.reth.vaddr =
cpu_to_be64(wqe->rdma_wr.remote_addr);
ohdr->u.rc.reth.rkey =
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index bd09de7c6e56..53efbb0b40c4 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -58,8 +58,10 @@ static int qib_init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
if (wqe->sg_list[i].length == 0)
continue;
/* Check LKEY */
- if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
- &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
+ ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
+ NULL, &wqe->sg_list[i],
+ IB_ACCESS_LOCAL_WRITE);
+ if (unlikely(ret <= 0))
goto bad_lkey;
qp->r_len += wqe->sg_list[i].length;
j++;
@@ -256,11 +258,11 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
}
if (!qib_pkey_ok((u16)bth0,
qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
- qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
- (u16)bth0,
- (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
- 0, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
+ qib_bad_pkey(ibp,
+ (u16)bth0,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ 0, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
goto err;
}
/* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
@@ -295,11 +297,11 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
}
if (!qib_pkey_ok((u16)bth0,
qib_get_pkey(ibp, qp->s_pkey_index))) {
- qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
- (u16)bth0,
- (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
- 0, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
+ qib_bad_pkey(ibp,
+ (u16)bth0,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
+ 0, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
goto err;
}
/* Validate the SLID. See Ch. 9.6.1.5 */
@@ -643,8 +645,10 @@ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
hdr->hop_limit = grh->hop_limit;
/* The SGID is 32-bit aligned. */
hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
- hdr->sgid.global.interface_id = grh->sgid_index ?
- ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
+ if (!grh->sgid_index)
+ hdr->sgid.global.interface_id = ppd_from_ibp(ibp)->guid;
+ else if (grh->sgid_index < QIB_GUIDS_PER_PORT)
+ hdr->sgid.global.interface_id = ibp->guids[grh->sgid_index - 1];
hdr->dgid = grh->dgid;
/* GRH header size in 32-bit words. */
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index fe4cf5e4acec..ca2638d8f35e 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -247,7 +247,7 @@ static struct kobj_type qib_port_cc_ktype = {
.release = qib_port_release,
};
-static struct bin_attribute cc_table_bin_attr = {
+static const struct bin_attribute cc_table_bin_attr = {
.attr = {.name = "cc_table_bin", .mode = 0444},
.read = read_cc_table_bin,
.size = PAGE_SIZE,
@@ -286,7 +286,7 @@ static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute cc_setting_bin_attr = {
+static const struct bin_attribute cc_setting_bin_attr = {
.attr = {.name = "cc_settings_bin", .mode = 0444},
.read = read_cc_setting_bin,
.size = PAGE_SIZE,
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 341a123ee95c..be4907453ac4 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -66,8 +66,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
qp = rvt_lookup_qpn(rdi, &ibp->rvp, swqe->ud_wr.remote_qpn);
if (!qp) {
ibp->rvp.n_pkt_drops++;
- rcu_read_unlock();
- return;
+ goto drop;
}
sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
@@ -94,11 +93,11 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
((1 << ppd->lmc) - 1));
- qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
- rdma_ah_get_sl(ah_attr),
- sqp->ibqp.qp_num, qp->ibqp.qp_num,
- cpu_to_be16(lid),
- cpu_to_be16(rdma_ah_get_dlid(ah_attr)));
+ qib_bad_pkey(ibp, pkey1,
+ rdma_ah_get_sl(ah_attr),
+ sqp->ibqp.qp_num, qp->ibqp.qp_num,
+ cpu_to_be16(lid),
+ cpu_to_be16(rdma_ah_get_dlid(ah_attr)));
goto drop;
}
}
@@ -113,18 +112,8 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
sqp->qkey : swqe->ud_wr.remote_qkey;
- if (unlikely(qkey != qp->qkey)) {
- u16 lid;
-
- lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
- ((1 << ppd->lmc) - 1));
- qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
- rdma_ah_get_sl(ah_attr),
- sqp->ibqp.qp_num, qp->ibqp.qp_num,
- cpu_to_be16(lid),
- cpu_to_be16(rdma_ah_get_dlid(ah_attr)));
+ if (unlikely(qkey != qp->qkey))
goto drop;
- }
}
/*
@@ -487,22 +476,18 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
pkey1 = be32_to_cpu(ohdr->bth[0]);
pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
- qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
- pkey1,
- (be16_to_cpu(hdr->lrh[0]) >> 4) &
+ qib_bad_pkey(ibp,
+ pkey1,
+ (be16_to_cpu(hdr->lrh[0]) >> 4) &
0xF,
- src_qp, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
+ src_qp, qp->ibqp.qp_num,
+ hdr->lrh[3], hdr->lrh[1]);
return;
}
}
- if (unlikely(qkey != qp->qkey)) {
- qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
- (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
- src_qp, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
+ if (unlikely(qkey != qp->qkey))
return;
- }
+
/* Drop invalid MAD packets (see 13.5.3.1). */
if (unlikely(qp->ibqp.qp_num == 1 &&
(tlen != 256 ||
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index ac42dce7e281..9d92aeb8d9a1 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1341,6 +1341,15 @@ int qib_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
if (rdma_ah_get_sl(ah_attr) > 15)
return -EINVAL;
+ if (rdma_ah_get_dlid(ah_attr) == 0)
+ return -EINVAL;
+ if (rdma_ah_get_dlid(ah_attr) >=
+ be16_to_cpu(IB_MULTICAST_LID_BASE) &&
+ rdma_ah_get_dlid(ah_attr) !=
+ be16_to_cpu(IB_LID_PERMISSIVE) &&
+ !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
+ return -EINVAL;
+
return 0;
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index da0db5485ddc..f887737ac142 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
+ * Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
@@ -241,8 +241,8 @@ static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
}
-void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
- u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
+void qib_bad_pkey(struct qib_ibport *ibp, u32 key, u32 sl,
+ u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num);
void qib_sys_guid_chg(struct qib_ibport *ibp);
void qib_node_desc_chg(struct qib_ibport *ibp);
@@ -274,21 +274,15 @@ int qib_get_counters(struct qib_pportdata *ppd,
* Functions provided by qib driver for rdmavt to use
*/
unsigned qib_free_all_qps(struct rvt_dev_info *rdi);
-void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp);
+void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
void qib_notify_qp_reset(struct rvt_qp *qp);
int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port, gfp_t gfp);
+ enum ib_qp_type type, u8 port);
void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
#ifdef CONFIG_DEBUG_FS
-struct qib_qp_iter;
-
-struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev);
-
-int qib_qp_iter_next(struct qib_qp_iter *iter);
-
-void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter);
+void qib_qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter);
#endif
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c
index 3c37dd59c04e..995a26b65156 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.c
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.c
@@ -110,20 +110,12 @@ void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN])
spin_unlock(&ufdev->lock);
}
-int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr)
+void usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr)
{
- int status;
-
spin_lock(&ufdev->lock);
- if (ufdev->inaddr == 0) {
+ if (!ufdev->inaddr)
ufdev->inaddr = inaddr;
- status = 0;
- } else {
- status = -EFAULT;
- }
spin_unlock(&ufdev->lock);
-
- return status;
}
void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev)
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h
index b2ac22be0731..0b2cc4e79707 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.h
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.h
@@ -75,7 +75,7 @@ struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev);
void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev);
void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]);
-int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr);
+void usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr);
void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev);
void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev);
void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index c0c1e8b027b1..f45e99a938e0 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -333,9 +333,7 @@ static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
-static void usnic_get_dev_fw_str(struct ib_device *device,
- char *str,
- size_t str_len)
+static void usnic_get_dev_fw_str(struct ib_device *device, char *str)
{
struct usnic_ib_dev *us_ibdev =
container_of(device, struct usnic_ib_dev, ib_dev);
@@ -345,7 +343,7 @@ static void usnic_get_dev_fw_str(struct ib_device *device,
us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
mutex_unlock(&us_ibdev->usdev_lock);
- snprintf(str, str_len, "%s", info.fw_version);
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
}
/* Start of PF discovery section */
@@ -353,7 +351,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
{
struct usnic_ib_dev *us_ibdev;
union ib_gid gid;
- struct in_ifaddr *in;
+ struct in_device *ind;
struct net_device *netdev;
usnic_dbg("\n");
@@ -409,6 +407,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
us_ibdev->ib_dev.query_port = usnic_ib_query_port;
us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey;
us_ibdev->ib_dev.query_gid = usnic_ib_query_gid;
+ us_ibdev->ib_dev.get_netdev = usnic_get_netdev;
us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer;
us_ibdev->ib_dev.alloc_pd = usnic_ib_alloc_pd;
us_ibdev->ib_dev.dealloc_pd = usnic_ib_dealloc_pd;
@@ -442,9 +441,11 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
if (netif_carrier_ok(us_ibdev->netdev))
usnic_fwd_carrier_up(us_ibdev->ufdev);
- in = ((struct in_device *)(netdev->ip_ptr))->ifa_list;
- if (in != NULL)
- usnic_fwd_add_ipaddr(us_ibdev->ufdev, in->ifa_address);
+ ind = in_dev_get(netdev);
+ if (ind->ifa_list)
+ usnic_fwd_add_ipaddr(us_ibdev->ufdev,
+ ind->ifa_list->ifa_address);
+ in_dev_put(ind);
usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
us_ibdev->ufdev->inaddr, &gid.raw[0]);
@@ -720,7 +721,6 @@ static void __exit usnic_ib_destroy(void)
MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR);
module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3");
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 4996984885c2..e4113ef09315 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -164,6 +164,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
if (usnic_ib_share_vf) {
/* Try to find resouces on a used vf which is in pd */
dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
+ if (IS_ERR(dev_list))
+ return ERR_CAST(dev_list);
for (i = 0; dev_list[i]; i++) {
dev = dev_list[i];
vf = pci_get_drvdata(to_pci_dev(dev));
@@ -226,27 +228,6 @@ static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
spin_unlock(&vf->lock);
}
-static void eth_speed_to_ib_speed(int speed, u8 *active_speed,
- u8 *active_width)
-{
- if (speed <= 10000) {
- *active_width = IB_WIDTH_1X;
- *active_speed = IB_SPEED_FDR10;
- } else if (speed <= 20000) {
- *active_width = IB_WIDTH_4X;
- *active_speed = IB_SPEED_DDR;
- } else if (speed <= 30000) {
- *active_width = IB_WIDTH_4X;
- *active_speed = IB_SPEED_QDR;
- } else if (speed <= 40000) {
- *active_width = IB_WIDTH_4X;
- *active_speed = IB_SPEED_FDR10;
- } else {
- *active_width = IB_WIDTH_4X;
- *active_speed = IB_SPEED_EDR;
- }
-}
-
static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
{
if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
@@ -326,12 +307,16 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
- struct ethtool_link_ksettings cmd;
usnic_dbg("\n");
mutex_lock(&us_ibdev->usdev_lock);
- __ethtool_get_link_ksettings(us_ibdev->netdev, &cmd);
+ if (ib_get_eth_speed(ibdev, port, &props->active_speed,
+ &props->active_width)) {
+ mutex_unlock(&us_ibdev->usdev_lock);
+ return -EINVAL;
+ }
+
/* props being zeroed by the caller, avoid zeroing it here */
props->lid = 0;
@@ -355,8 +340,6 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
props->qkey_viol_cntr = 0;
- eth_speed_to_ib_speed(cmd.base.speed, &props->active_speed,
- &props->active_width);
props->max_mtu = IB_MTU_4096;
props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
/* Userspace will adjust for hdrs */
@@ -424,6 +407,16 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
return 0;
}
+struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num)
+{
+ struct usnic_ib_dev *us_ibdev = to_usdev(device);
+
+ if (us_ibdev->netdev)
+ dev_hold(us_ibdev->netdev);
+
+ return us_ibdev->netdev;
+}
+
int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index 172e43b6fa95..1fda94425116 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -48,6 +48,7 @@ int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
struct ib_qp_init_attr *qp_init_attr);
int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid);
+struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num);
int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey);
struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 8e2f0a11690f..663a0c301c43 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -194,6 +194,7 @@ struct pvrdma_dev {
void *resp_slot;
unsigned long flags;
struct list_head device_link;
+ unsigned int dsr_version;
/* Locking and interrupt information. */
spinlock_t cmd_lock; /* Command lock. */
@@ -444,6 +445,7 @@ void pvrdma_ah_attr_to_rdma(struct rdma_ah_attr *dst,
const struct pvrdma_ah_attr *src);
void rdma_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
const struct rdma_ah_attr *src);
+u8 ib_gid_type_to_pvrdma(enum ib_gid_type gid_type);
int pvrdma_uar_table_init(struct pvrdma_dev *dev);
void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 69bda611d313..3562c0c30492 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -65,13 +65,28 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq,
struct pvrdma_dev *dev = to_vdev(ibcq->device);
struct pvrdma_cq *cq = to_vcq(ibcq);
u32 val = cq->cq_handle;
+ unsigned long flags;
+ int has_data = 0;
val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
+ spin_lock_irqsave(&cq->cq_lock, flags);
+
pvrdma_write_uar_cq(dev, val);
- return 0;
+ if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
+ unsigned int head;
+
+ has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
+ cq->ibcq.cqe, &head);
+ if (unlikely(has_data == PVRDMA_INVALID_IDX))
+ dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
+ }
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ return has_data;
}
/**
@@ -284,7 +299,7 @@ static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq)
{
- int head;
+ unsigned int head;
int has_data;
if (!cq->is_kernel)
@@ -374,6 +389,7 @@ retry:
wc->dlid_path_bits = cqe->dlid_path_bits;
wc->port_num = cqe->port_num;
wc->vendor_err = cqe->vendor_err;
+ wc->network_hdr_type = cqe->network_hdr_type;
/* Update shared ring state */
pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
index 09078ccfaec7..df0a6b525021 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -50,7 +50,15 @@
#include "pvrdma_verbs.h"
-#define PVRDMA_VERSION 17
+/*
+ * PVRDMA version macros. Some new features require updates to PVRDMA_VERSION.
+ * These macros allow us to check for different features if necessary.
+ */
+
+#define PVRDMA_ROCEV1_VERSION 17
+#define PVRDMA_ROCEV2_VERSION 18
+#define PVRDMA_VERSION PVRDMA_ROCEV2_VERSION
+
#define PVRDMA_BOARD_ID 1
#define PVRDMA_REV_ID 1
@@ -123,6 +131,31 @@
#define PVRDMA_GID_TYPE_FLAG_ROCE_V1 BIT(0)
#define PVRDMA_GID_TYPE_FLAG_ROCE_V2 BIT(1)
+/*
+ * Version checks. This checks whether each version supports specific
+ * capabilities from the device.
+ */
+
+#define PVRDMA_IS_VERSION17(_dev) \
+ (_dev->dsr_version == PVRDMA_ROCEV1_VERSION && \
+ _dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1)
+
+#define PVRDMA_IS_VERSION18(_dev) \
+ (_dev->dsr_version >= PVRDMA_ROCEV2_VERSION && \
+ (_dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1 || \
+ _dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2)) \
+
+#define PVRDMA_SUPPORTED(_dev) \
+ ((_dev->dsr->caps.mode == PVRDMA_DEVICE_MODE_ROCE) && \
+ (PVRDMA_IS_VERSION17(_dev) || PVRDMA_IS_VERSION18(_dev)))
+
+/*
+ * Get capability values based on device version.
+ */
+
+#define PVRDMA_GET_CAP(_dev, _old_val, _val) \
+ ((PVRDMA_IS_VERSION18(_dev)) ? _val : _old_val)
+
enum pvrdma_pci_resource {
PVRDMA_PCI_RESOURCE_MSIX, /* BAR0: MSI-X, MMIO. */
PVRDMA_PCI_RESOURCE_REG, /* BAR1: Registers, MMIO. */
@@ -225,7 +258,7 @@ struct pvrdma_device_caps {
u8 atomic_ops; /* PVRDMA_ATOMIC_OP_* bits */
u8 bmme_flags; /* FRWR Mem Mgmt Extensions */
u8 gid_types; /* PVRDMA_GID_TYPE_FLAG_ */
- u8 reserved[4];
+ u32 max_fast_reg_page_list_len;
};
struct pvrdma_ring_page_info {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 34ebc7615411..6ce709a67959 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -102,12 +102,11 @@ static struct device_attribute *pvrdma_class_attributes[] = {
&dev_attr_board_id
};
-static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str,
- size_t str_len)
+static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str)
{
struct pvrdma_dev *dev =
container_of(device, struct pvrdma_dev, ib_dev);
- snprintf(str, str_len, "%d.%d.%d\n",
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d\n",
(int) (dev->dsr->caps.fw_ver >> 32),
(int) (dev->dsr->caps.fw_ver >> 16) & 0xffff,
(int) dev->dsr->caps.fw_ver & 0xffff);
@@ -129,10 +128,14 @@ static int pvrdma_init_device(struct pvrdma_dev *dev)
static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable)
{
+ struct pvrdma_dev *dev = to_vdev(ibdev);
struct ib_port_attr attr;
int err;
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+ if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1)
+ immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE;
+ else if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2)
+ immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
err = ib_query_port(ibdev, port_num, &attr);
if (err)
@@ -570,6 +573,7 @@ static void pvrdma_free_slots(struct pvrdma_dev *dev)
static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev,
const union ib_gid *gid,
+ u8 gid_type,
int index)
{
int ret;
@@ -587,7 +591,7 @@ static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev,
cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024);
cmd_bind->vlan = 0xfff;
cmd_bind->index = index;
- cmd_bind->gid_type = PVRDMA_GID_TYPE_FLAG_ROCE_V1;
+ cmd_bind->gid_type = gid_type;
ret = pvrdma_cmd_post(dev, &req, NULL, 0);
if (ret < 0) {
@@ -608,7 +612,9 @@ static int pvrdma_add_gid(struct ib_device *ibdev,
{
struct pvrdma_dev *dev = to_vdev(ibdev);
- return pvrdma_add_gid_at_index(dev, gid, index);
+ return pvrdma_add_gid_at_index(dev, gid,
+ ib_gid_type_to_pvrdma(attr->gid_type),
+ index);
}
static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index)
@@ -723,7 +729,6 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
int ret;
unsigned long start;
unsigned long len;
- unsigned int version;
dma_addr_t slot_dma = 0;
dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev));
@@ -820,13 +825,9 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
goto err_unmap_regs;
}
- version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION);
+ dev->dsr_version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION);
dev_info(&pdev->dev, "device version %d, driver version %d\n",
- version, PVRDMA_VERSION);
- if (version < PVRDMA_VERSION) {
- dev_err(&pdev->dev, "incompatible device version\n");
- goto err_uar_unmap;
- }
+ dev->dsr_version, PVRDMA_VERSION);
dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
&dev->dsrbase, GFP_KERNEL);
@@ -897,17 +898,9 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
/* Make sure the write is complete before reading status. */
mb();
- /* Currently, the driver only supports RoCE mode. */
- if (dev->dsr->caps.mode != PVRDMA_DEVICE_MODE_ROCE) {
- dev_err(&pdev->dev, "unsupported transport %d\n",
- dev->dsr->caps.mode);
- ret = -EFAULT;
- goto err_free_cq_ring;
- }
-
- /* Currently, the driver only supports RoCE V1. */
- if (!(dev->dsr->caps.gid_types & PVRDMA_GID_TYPE_FLAG_ROCE_V1)) {
- dev_err(&pdev->dev, "driver needs RoCE v1 support\n");
+ /* The driver supports RoCE V1 and V2. */
+ if (!PVRDMA_SUPPORTED(dev)) {
+ dev_err(&pdev->dev, "driver needs RoCE v1 or v2 support\n");
ret = -EFAULT;
goto err_free_cq_ring;
}
@@ -1078,7 +1071,7 @@ static void pvrdma_pci_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-static struct pci_device_id pvrdma_pci_table[] = {
+static const struct pci_device_id pvrdma_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), },
{ 0 },
};
@@ -1119,5 +1112,4 @@ module_exit(pvrdma_cleanup);
MODULE_AUTHOR("VMware, Inc");
MODULE_DESCRIPTION("VMware Paravirtual RDMA driver");
-MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
index ec6a4ca1eeb7..fb0c5c0976b3 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
@@ -303,3 +303,10 @@ void rdma_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
dst->port_num = rdma_ah_get_port_num(src);
memcpy(&dst->dmac, src->roce.dmac, sizeof(dst->dmac));
}
+
+u8 ib_gid_type_to_pvrdma(enum ib_gid_type gid_type)
+{
+ return (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ?
+ PVRDMA_GID_TYPE_FLAG_ROCE_V2 :
+ PVRDMA_GID_TYPE_FLAG_ROCE_V1;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
index ed9022a91a1d..8b558ae234c8 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
@@ -111,21 +111,4 @@ static inline __s32 pvrdma_idx_ring_has_data(const struct pvrdma_ring *r,
return PVRDMA_INVALID_IDX;
}
-static inline bool pvrdma_idx_ring_is_valid_idx(const struct pvrdma_ring *r,
- __u32 max_elems, __u32 *idx)
-{
- const __u32 tail = atomic_read(&r->prod_tail);
- const __u32 head = atomic_read(&r->cons_head);
-
- if (pvrdma_idx_valid(tail, max_elems) &&
- pvrdma_idx_valid(head, max_elems) &&
- pvrdma_idx_valid(*idx, max_elems)) {
- if (tail > head && (*idx < tail && *idx >= head))
- return true;
- else if (head > tail && (*idx >= head || *idx < tail))
- return true;
- }
- return false;
-}
-
#endif /* __PVRDMA_RING_H__ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 28517042011d..48776f5ffb0e 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -83,6 +83,8 @@ int pvrdma_query_device(struct ib_device *ibdev,
props->max_qp_wr = dev->dsr->caps.max_qp_wr;
props->device_cap_flags = dev->dsr->caps.device_cap_flags;
props->max_sge = dev->dsr->caps.max_sge;
+ props->max_sge_rd = PVRDMA_GET_CAP(dev, dev->dsr->caps.max_sge,
+ dev->dsr->caps.max_sge_rd);
props->max_cq = dev->dsr->caps.max_cq;
props->max_cqe = dev->dsr->caps.max_cqe;
props->max_mr = dev->dsr->caps.max_mr;
@@ -101,8 +103,14 @@ int pvrdma_query_device(struct ib_device *ibdev,
(dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_REMOTE_INV) &&
(dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_FAST_REG_WR)) {
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ props->max_fast_reg_page_list_len = PVRDMA_GET_CAP(dev,
+ PVRDMA_MAX_FAST_REG_PAGES,
+ dev->dsr->caps.max_fast_reg_page_list_len);
}
+ props->device_cap_flags |= IB_DEVICE_PORT_ACTIVE_EVENT |
+ IB_DEVICE_RC_RNR_NAK_GEN;
+
return 0;
}
@@ -143,6 +151,7 @@ int pvrdma_query_port(struct ib_device *ibdev, u8 port,
props->gid_tbl_len = resp->attrs.gid_tbl_len;
props->port_cap_flags =
pvrdma_port_cap_flags_to_ib(resp->attrs.port_cap_flags);
+ props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
props->max_msg_sz = resp->attrs.max_msg_sz;
props->bad_pkey_cntr = resp->attrs.bad_pkey_cntr;
props->qkey_viol_cntr = resp->attrs.qkey_viol_cntr;
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index a96d4aa80ae8..ba3639a0d77c 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -66,8 +66,6 @@ int rvt_check_ah(struct ib_device *ibdev,
int port_num = rdma_ah_get_port_num(ah_attr);
struct ib_port_attr port_attr;
struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
- enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
- u32 dlid = rdma_ah_get_dlid(ah_attr);
u8 ah_flags = rdma_ah_get_ah_flags(ah_attr);
u8 static_rate = rdma_ah_get_static_rate(ah_attr);
@@ -83,14 +81,6 @@ int rvt_check_ah(struct ib_device *ibdev,
if ((ah_flags & IB_AH_GRH) &&
rdma_ah_read_grh(ah_attr)->sgid_index >= port_attr.gid_tbl_len)
return -EINVAL;
- if (link != IB_LINK_LAYER_ETHERNET) {
- if (dlid == 0)
- return -EINVAL;
- if (dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) &&
- dlid != be16_to_cpu(IB_LID_PERMISSIVE) &&
- !(ah_flags & IB_AH_GRH))
- return -EINVAL;
- }
if (rdi->driver_f.check_ah)
return rdi->driver_f.check_ah(ibdev, ah_attr);
return 0;
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 0ae2ff8cf81e..97d71e49c092 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -107,7 +107,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
wc->uqueue[head].src_qp = entry->src_qp;
wc->uqueue[head].wc_flags = entry->wc_flags;
wc->uqueue[head].pkey_index = entry->pkey_index;
- wc->uqueue[head].slid = entry->slid;
+ wc->uqueue[head].slid = ib_lid_cpu16(entry->slid);
wc->uqueue[head].sl = entry->sl;
wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
wc->uqueue[head].port_num = entry->port_num;
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index aa5f9ea318e4..42713511b53b 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -441,6 +441,105 @@ bail_umem:
}
/**
+ * rvt_dereg_clean_qp_cb - callback from iterator
+ * @qp - the qp
+ * @v - the mregion (as u64)
+ *
+ * This routine fields the callback for all QPs and
+ * for QPs in the same PD as the MR will call the
+ * rvt_qp_mr_clean() to potentially cleanup references.
+ */
+static void rvt_dereg_clean_qp_cb(struct rvt_qp *qp, u64 v)
+{
+ struct rvt_mregion *mr = (struct rvt_mregion *)v;
+
+ /* skip PDs that are not ours */
+ if (mr->pd != qp->ibqp.pd)
+ return;
+ rvt_qp_mr_clean(qp, mr->lkey);
+}
+
+/**
+ * rvt_dereg_clean_qps - find QPs for reference cleanup
+ * @mr - the MR that is being deregistered
+ *
+ * This routine iterates RC QPs looking for references
+ * to the lkey noted in mr.
+ */
+static void rvt_dereg_clean_qps(struct rvt_mregion *mr)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
+
+ rvt_qp_iter(rdi, (u64)mr, rvt_dereg_clean_qp_cb);
+}
+
+/**
+ * rvt_check_refs - check references
+ * @mr - the megion
+ * @t - the caller identification
+ *
+ * This routine checks MRs holding a reference during
+ * when being de-registered.
+ *
+ * If the count is non-zero, the code calls a clean routine then
+ * waits for the timeout for the count to zero.
+ */
+static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
+{
+ unsigned long timeout;
+ struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
+
+ if (percpu_ref_is_zero(&mr->refcount))
+ return 0;
+ /* avoid dma mr */
+ if (mr->lkey)
+ rvt_dereg_clean_qps(mr);
+ timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ);
+ if (!timeout) {
+ rvt_pr_err(rdi,
+ "%s timeout mr %p pd %p lkey %x refcount %ld\n",
+ t, mr, mr->pd, mr->lkey,
+ atomic_long_read(&mr->refcount.count));
+ rvt_get_mr(mr);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/**
+ * rvt_mr_has_lkey - is MR
+ * @mr - the mregion
+ * @lkey - the lkey
+ */
+bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey)
+{
+ return mr && lkey == mr->lkey;
+}
+
+/**
+ * rvt_ss_has_lkey - is mr in sge tests
+ * @ss - the sge state
+ * @lkey
+ *
+ * This code tests for an MR in the indicated
+ * sge state.
+ */
+bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey)
+{
+ int i;
+ bool rval = false;
+
+ if (!ss->num_sge)
+ return rval;
+ /* first one */
+ rval = rvt_mr_has_lkey(ss->sge.mr, lkey);
+ /* any others */
+ for (i = 0; !rval && i < ss->num_sge - 1; i++)
+ rval = rvt_mr_has_lkey(ss->sg_list[i].mr, lkey);
+ return rval;
+}
+
+/**
* rvt_dereg_mr - unregister and free a memory region
* @ibmr: the memory region to free
*
@@ -453,22 +552,14 @@ bail_umem:
int rvt_dereg_mr(struct ib_mr *ibmr)
{
struct rvt_mr *mr = to_imr(ibmr);
- struct rvt_dev_info *rdi = ib_to_rvt(ibmr->pd->device);
- int ret = 0;
- unsigned long timeout;
+ int ret;
rvt_free_lkey(&mr->mr);
rvt_put_mr(&mr->mr); /* will set completion if last */
- timeout = wait_for_completion_timeout(&mr->mr.comp, 5 * HZ);
- if (!timeout) {
- rvt_pr_err(rdi,
- "rvt_dereg_mr timeout mr %p pd %p\n",
- mr, mr->mr.pd);
- rvt_get_mr(&mr->mr);
- ret = -EBUSY;
+ ret = rvt_check_refs(&mr->mr, __func__);
+ if (ret)
goto out;
- }
rvt_deinit_mregion(&mr->mr);
if (mr->umem)
ib_umem_release(mr->umem);
@@ -761,16 +852,12 @@ int rvt_dealloc_fmr(struct ib_fmr *ibfmr)
{
struct rvt_fmr *fmr = to_ifmr(ibfmr);
int ret = 0;
- unsigned long timeout;
rvt_free_lkey(&fmr->mr);
rvt_put_mr(&fmr->mr); /* will set completion if last */
- timeout = wait_for_completion_timeout(&fmr->mr.comp, 5 * HZ);
- if (!timeout) {
- rvt_get_mr(&fmr->mr);
- ret = -EBUSY;
+ ret = rvt_check_refs(&fmr->mr, __func__);
+ if (ret)
goto out;
- }
rvt_deinit_mregion(&fmr->mr);
kfree(fmr);
out:
@@ -778,23 +865,52 @@ out:
}
/**
+ * rvt_sge_adjacent - is isge compressible
+ * @last_sge: last outgoing SGE written
+ * @sge: SGE to check
+ *
+ * If adjacent will update last_sge to add length.
+ *
+ * Return: true if isge is adjacent to last sge
+ */
+static inline bool rvt_sge_adjacent(struct rvt_sge *last_sge,
+ struct ib_sge *sge)
+{
+ if (last_sge && sge->lkey == last_sge->mr->lkey &&
+ ((uint64_t)(last_sge->vaddr + last_sge->length) == sge->addr)) {
+ if (sge->lkey) {
+ if (unlikely((sge->addr - last_sge->mr->user_base +
+ sge->length > last_sge->mr->length)))
+ return false; /* overrun, caller will catch */
+ } else {
+ last_sge->length += sge->length;
+ }
+ last_sge->sge_length += sge->length;
+ trace_rvt_sge_adjacent(last_sge, sge);
+ return true;
+ }
+ return false;
+}
+
+/**
* rvt_lkey_ok - check IB SGE for validity and initialize
* @rkt: table containing lkey to check SGE against
* @pd: protection domain
* @isge: outgoing internal SGE
+ * @last_sge: last outgoing SGE written
* @sge: SGE to check
* @acc: access flags
*
* Check the IB SGE for validity and initialize our internal version
* of it.
*
- * Return: 1 if valid and successful, otherwise returns 0.
- *
- * increments the reference count upon success
+ * Increments the reference count when a new sge is stored.
*
+ * Return: 0 if compressed, 1 if added , otherwise returns -errno.
*/
int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
- struct rvt_sge *isge, struct ib_sge *sge, int acc)
+ struct rvt_sge *isge, struct rvt_sge *last_sge,
+ struct ib_sge *sge, int acc)
{
struct rvt_mregion *mr;
unsigned n, m;
@@ -804,12 +920,14 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
* We use LKEY == zero for kernel virtual addresses
* (see rvt_get_dma_mr() and dma_virt_ops).
*/
- rcu_read_lock();
if (sge->lkey == 0) {
struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
if (pd->user)
- goto bail;
+ return -EINVAL;
+ if (rvt_sge_adjacent(last_sge, sge))
+ return 0;
+ rcu_read_lock();
mr = rcu_dereference(dev->dma_mr);
if (!mr)
goto bail;
@@ -824,6 +942,9 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
isge->n = 0;
goto ok;
}
+ if (rvt_sge_adjacent(last_sge, sge))
+ return 0;
+ rcu_read_lock();
mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]);
if (!mr)
goto bail;
@@ -874,12 +995,13 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
isge->m = m;
isge->n = n;
ok:
+ trace_rvt_sge_new(isge, sge);
return 1;
bail_unref:
rvt_put_mr(mr);
bail:
rcu_read_unlock();
- return 0;
+ return -EINVAL;
}
EXPORT_SYMBOL(rvt_lkey_ok);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 727e81cc2c8f..22df09ae809e 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -52,6 +52,7 @@
#include <linux/slab.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_hdrs.h>
+#include <rdma/opa_addr.h>
#include "qp.h"
#include "vt.h"
#include "trace.h"
@@ -118,10 +119,9 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
EXPORT_SYMBOL(ib_rvt_state_ops);
static void get_map_page(struct rvt_qpn_table *qpt,
- struct rvt_qpn_map *map,
- gfp_t gfp)
+ struct rvt_qpn_map *map)
{
- unsigned long page = get_zeroed_page(gfp);
+ unsigned long page = get_zeroed_page(GFP_KERNEL);
/*
* Free the page if someone raced with us installing it.
@@ -173,7 +173,7 @@ static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
if (!map->page) {
- get_map_page(qpt, map, GFP_KERNEL);
+ get_map_page(qpt, map);
if (!map->page) {
ret = -ENOMEM;
break;
@@ -342,14 +342,14 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
* Return: The queue pair number
*/
static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u8 port_num, gfp_t gfp)
+ enum ib_qp_type type, u8 port_num)
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
u32 ret;
if (rdi->driver_f.alloc_qpn)
- return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp);
+ return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
unsigned n;
@@ -374,7 +374,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
max_scan = qpt->nmaps - !offset;
for (i = 0;;) {
if (unlikely(!map->page)) {
- get_map_page(qpt, map, gfp);
+ get_map_page(qpt, map);
if (unlikely(!map->page))
break;
}
@@ -422,15 +422,6 @@ bail:
return ret;
}
-static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
-{
- struct rvt_qpn_map *map;
-
- map = qpt->map + qpn / RVT_BITS_PER_PAGE;
- if (map->page)
- clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
-}
-
/**
* rvt_clear_mr_refs - Drop help mr refs
* @qp: rvt qp data structure
@@ -449,13 +440,9 @@ static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
if (clr_sends) {
while (qp->s_last != qp->s_head) {
struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- unsigned i;
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct rvt_sge *sge = &wqe->sg_list[i];
+ rvt_put_swqe(wqe);
- rvt_put_mr(sge->mr);
- }
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
qp->ibqp.qp_type == IB_QPT_GSI)
@@ -471,10 +458,7 @@ static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
}
}
- if (qp->ibqp.qp_type != IB_QPT_RC)
- return;
-
- for (n = 0; n < rvt_max_atomic(rdi); n++) {
+ for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
struct rvt_ack_entry *e = &qp->s_ack_queue[n];
if (e->rdma_sge.mr) {
@@ -485,6 +469,113 @@ static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
}
/**
+ * rvt_swqe_has_lkey - return true if lkey is used by swqe
+ * @wqe - the send wqe
+ * @lkey - the lkey
+ *
+ * Test the swqe for using lkey
+ */
+static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
+{
+ int i;
+
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct rvt_sge *sge = &wqe->sg_list[i];
+
+ if (rvt_mr_has_lkey(sge->mr, lkey))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * rvt_qp_sends_has_lkey - return true is qp sends use lkey
+ * @qp - the rvt_qp
+ * @lkey - the lkey
+ */
+static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
+{
+ u32 s_last = qp->s_last;
+
+ while (s_last != qp->s_head) {
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
+
+ if (rvt_swqe_has_lkey(wqe, lkey))
+ return true;
+
+ if (++s_last >= qp->s_size)
+ s_last = 0;
+ }
+ if (qp->s_rdma_mr)
+ if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
+ return true;
+ return false;
+}
+
+/**
+ * rvt_qp_acks_has_lkey - return true if acks have lkey
+ * @qp - the qp
+ * @lkey - the lkey
+ */
+static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
+{
+ int i;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
+ struct rvt_ack_entry *e = &qp->s_ack_queue[i];
+
+ if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
+ return true;
+ }
+ return false;
+}
+
+/*
+ * rvt_qp_mr_clean - clean up remote ops for lkey
+ * @qp - the qp
+ * @lkey - the lkey that is being de-registered
+ *
+ * This routine checks if the lkey is being used by
+ * the qp.
+ *
+ * If so, the qp is put into an error state to elminate
+ * any references from the qp.
+ */
+void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
+{
+ bool lastwqe = false;
+
+ if (qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ /* avoid special QPs */
+ return;
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+
+ if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
+ goto check_lwqe;
+
+ if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
+ rvt_qp_sends_has_lkey(qp, lkey) ||
+ rvt_qp_acks_has_lkey(qp, lkey))
+ lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
+check_lwqe:
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+}
+
+/**
* rvt_remove_qp - remove qp form table
* @rdi: rvt dev struct
* @qp: qp to remove
@@ -646,6 +737,19 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
lockdep_assert_held(&qp->s_lock);
}
+/** rvt_free_qpn - Free a qpn from the bit map
+ * @qpt: QP table
+ * @qpn: queue pair number to free
+ */
+static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
+{
+ struct rvt_qpn_map *map;
+
+ map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
+ if (map->page)
+ clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
+}
+
/**
* rvt_create_qp - create a queue pair for a device
* @ibpd: the protection domain who's device we create the queue pair for
@@ -672,7 +776,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
struct ib_qp *ret = ERR_PTR(-ENOMEM);
struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
void *priv = NULL;
- gfp_t gfp;
size_t sqsize;
if (!rdi)
@@ -680,18 +783,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
- init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
+ init_attr->create_flags)
return ERR_PTR(-EINVAL);
- /* GFP_NOIO is applicable to RC QP's only */
-
- if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
- init_attr->qp_type != IB_QPT_RC)
- return ERR_PTR(-EINVAL);
-
- gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
- GFP_NOIO : GFP_KERNEL;
-
/* Check receive queue parameters if no SRQ is specified. */
if (!init_attr->srq) {
if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
@@ -719,14 +813,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
sz = sizeof(struct rvt_sge) *
init_attr->cap.max_send_sge +
sizeof(struct rvt_swqe);
- if (gfp == GFP_NOIO)
- swq = __vmalloc(
- sqsize * sz,
- gfp | __GFP_ZERO, PAGE_KERNEL);
- else
- swq = vzalloc_node(
- sqsize * sz,
- rdi->dparms.node);
+ swq = vzalloc_node(sqsize * sz, rdi->dparms.node);
if (!swq)
return ERR_PTR(-ENOMEM);
@@ -741,7 +828,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
} else if (init_attr->cap.max_recv_sge > 1)
sg_list_sz = sizeof(*qp->r_sg_list) *
(init_attr->cap.max_recv_sge - 1);
- qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node);
+ qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
+ rdi->dparms.node);
if (!qp)
goto bail_swq;
@@ -751,7 +839,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
kzalloc_node(
sizeof(*qp->s_ack_queue) *
rvt_max_atomic(rdi),
- gfp,
+ GFP_KERNEL,
rdi->dparms.node);
if (!qp->s_ack_queue)
goto bail_qp;
@@ -766,7 +854,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
* Driver needs to set up it's private QP structure and do any
* initialization that is needed.
*/
- priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
+ priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
if (IS_ERR(priv)) {
ret = priv;
goto bail_qp;
@@ -786,11 +874,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
qp->r_rq.wq = vmalloc_user(
sizeof(struct rvt_rwq) +
qp->r_rq.size * sz);
- else if (gfp == GFP_NOIO)
- qp->r_rq.wq = __vmalloc(
- sizeof(struct rvt_rwq) +
- qp->r_rq.size * sz,
- gfp | __GFP_ZERO, PAGE_KERNEL);
else
qp->r_rq.wq = vzalloc_node(
sizeof(struct rvt_rwq) +
@@ -824,7 +907,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
init_attr->qp_type,
- init_attr->port_num, gfp);
+ init_attr->port_num);
if (err < 0) {
ret = ERR_PTR(err);
goto bail_rq_wq;
@@ -936,7 +1019,7 @@ bail_ip:
kref_put(&qp->ip->ref, rvt_release_mmap_info);
bail_qpn:
- free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
+ rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
bail_rq_wq:
if (!qp->ip)
@@ -1084,6 +1167,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int mig = 0;
int pmtu = 0; /* for gcc warning only */
enum rdma_link_layer link;
+ int opa_ah;
link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
@@ -1094,6 +1178,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
cur_state = attr_mask & IB_QP_CUR_STATE ?
attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+ opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
attr_mask, link))
@@ -1104,17 +1189,31 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto inval;
if (attr_mask & IB_QP_AV) {
- if (rdma_ah_get_dlid(&attr->ah_attr) >=
- be16_to_cpu(IB_MULTICAST_LID_BASE))
- goto inval;
+ if (opa_ah) {
+ if (rdma_ah_get_dlid(&attr->ah_attr) >=
+ opa_get_mcast_base(OPA_MCAST_NR))
+ goto inval;
+ } else {
+ if (rdma_ah_get_dlid(&attr->ah_attr) >=
+ be16_to_cpu(IB_MULTICAST_LID_BASE))
+ goto inval;
+ }
+
if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
goto inval;
}
if (attr_mask & IB_QP_ALT_PATH) {
- if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
- be16_to_cpu(IB_MULTICAST_LID_BASE))
- goto inval;
+ if (opa_ah) {
+ if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
+ opa_get_mcast_base(OPA_MCAST_NR))
+ goto inval;
+ } else {
+ if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
+ be16_to_cpu(IB_MULTICAST_LID_BASE))
+ goto inval;
+ }
+
if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
goto inval;
if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
@@ -1261,7 +1360,6 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_PATH_MTU) {
qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
- qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
qp->log_pmtu = ilog2(qp->pmtu);
}
@@ -1280,9 +1378,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_TIMEOUT) {
qp->timeout = attr->timeout;
- qp->timeout_jiffies =
- usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
- 1000UL);
+ qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
}
if (attr_mask & IB_QP_QKEY)
@@ -1325,19 +1421,6 @@ inval:
return -EINVAL;
}
-/** rvt_free_qpn - Free a qpn from the bit map
- * @qpt: QP table
- * @qpn: queue pair number to free
- */
-static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
-{
- struct rvt_qpn_map *map;
-
- map = qpt->map + qpn / RVT_BITS_PER_PAGE;
- if (map->page)
- clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
-}
-
/**
* rvt_destroy_qp - destroy a queue pair
* @ibqp: the queue pair to destroy
@@ -1399,7 +1482,7 @@ int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr->qp_state = qp->state;
attr->cur_qp_state = attr->qp_state;
- attr->path_mtu = qp->path_mtu;
+ attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
attr->path_mig_state = qp->s_mig_state;
attr->qkey = qp->qkey;
attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
@@ -1719,22 +1802,23 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
wqe->length = 0;
j = 0;
if (wr->num_sge) {
+ struct rvt_sge *last_sge = NULL;
+
acc = wr->opcode >= IB_WR_RDMA_READ ?
IB_ACCESS_LOCAL_WRITE : 0;
for (i = 0; i < wr->num_sge; i++) {
u32 length = wr->sg_list[i].length;
- int ok;
if (length == 0)
continue;
- ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
- &wr->sg_list[i], acc);
- if (!ok) {
- ret = -EINVAL;
+ ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
+ &wr->sg_list[i], acc);
+ if (unlikely(ret < 0))
goto bail_inval_free;
- }
wqe->length += length;
- j++;
+ if (ret)
+ last_sge = &wqe->sg_list[j];
+ j += ret;
}
wqe->wr.num_sge = j;
}
@@ -1781,7 +1865,7 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
qp->s_avail--;
}
- trace_rvt_post_one_wr(qp, wqe);
+ trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
smp_wmb(); /* see request builders */
qp->s_head = next;
@@ -2089,3 +2173,147 @@ enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
return HRTIMER_NORESTART;
}
EXPORT_SYMBOL(rvt_rc_rnr_retry);
+
+/**
+ * rvt_qp_iter_init - initial for QP iteration
+ * @rdi - rvt devinfo
+ * @v - u64 value
+ *
+ * This returns an iterator suitable for iterating QPs
+ * in the system.
+ *
+ * The @cb is a user defined callback and @v is a 64
+ * bit value passed to and relevant for processing in the
+ * @cb. An example use case would be to alter QP processing
+ * based on criteria not part of the rvt_qp.
+ *
+ * Use cases that require memory allocation to succeed
+ * must preallocate appropriately.
+ *
+ * Return: a pointer to an rvt_qp_iter or NULL
+ */
+struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
+ u64 v,
+ void (*cb)(struct rvt_qp *qp, u64 v))
+{
+ struct rvt_qp_iter *i;
+
+ i = kzalloc(sizeof(*i), GFP_KERNEL);
+ if (!i)
+ return NULL;
+
+ i->rdi = rdi;
+ /* number of special QPs (SMI/GSI) for device */
+ i->specials = rdi->ibdev.phys_port_cnt * 2;
+ i->v = v;
+ i->cb = cb;
+
+ return i;
+}
+EXPORT_SYMBOL(rvt_qp_iter_init);
+
+/**
+ * rvt_qp_iter_next - return the next QP in iter
+ * @iter - the iterator
+ *
+ * Fine grained QP iterator suitable for use
+ * with debugfs seq_file mechanisms.
+ *
+ * Updates iter->qp with the current QP when the return
+ * value is 0.
+ *
+ * Return: 0 - iter->qp is valid 1 - no more QPs
+ */
+int rvt_qp_iter_next(struct rvt_qp_iter *iter)
+ __must_hold(RCU)
+{
+ int n = iter->n;
+ int ret = 1;
+ struct rvt_qp *pqp = iter->qp;
+ struct rvt_qp *qp;
+ struct rvt_dev_info *rdi = iter->rdi;
+
+ /*
+ * The approach is to consider the special qps
+ * as additional table entries before the
+ * real hash table. Since the qp code sets
+ * the qp->next hash link to NULL, this works just fine.
+ *
+ * iter->specials is 2 * # ports
+ *
+ * n = 0..iter->specials is the special qp indices
+ *
+ * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
+ * the potential hash bucket entries
+ *
+ */
+ for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) {
+ if (pqp) {
+ qp = rcu_dereference(pqp->next);
+ } else {
+ if (n < iter->specials) {
+ struct rvt_ibport *rvp;
+ int pidx;
+
+ pidx = n % rdi->ibdev.phys_port_cnt;
+ rvp = rdi->ports[pidx];
+ qp = rcu_dereference(rvp->qp[n & 1]);
+ } else {
+ qp = rcu_dereference(
+ rdi->qp_dev->qp_table[
+ (n - iter->specials)]);
+ }
+ }
+ pqp = qp;
+ if (qp) {
+ iter->qp = qp;
+ iter->n = n;
+ return 0;
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL(rvt_qp_iter_next);
+
+/**
+ * rvt_qp_iter - iterate all QPs
+ * @rdi - rvt devinfo
+ * @v - a 64 bit value
+ * @cb - a callback
+ *
+ * This provides a way for iterating all QPs.
+ *
+ * The @cb is a user defined callback and @v is a 64
+ * bit value passed to and relevant for processing in the
+ * cb. An example use case would be to alter QP processing
+ * based on criteria not part of the rvt_qp.
+ *
+ * The code has an internal iterator to simplify
+ * non seq_file use cases.
+ */
+void rvt_qp_iter(struct rvt_dev_info *rdi,
+ u64 v,
+ void (*cb)(struct rvt_qp *qp, u64 v))
+{
+ int ret;
+ struct rvt_qp_iter i = {
+ .rdi = rdi,
+ .specials = rdi->ibdev.phys_port_cnt * 2,
+ .v = v,
+ .cb = cb
+ };
+
+ rcu_read_lock();
+ do {
+ ret = rvt_qp_iter_next(&i);
+ if (!ret) {
+ rvt_get_qp(i.qp);
+ rcu_read_unlock();
+ i.cb(i.qp, i.v);
+ rcu_read_lock();
+ rvt_put_qp(i.qp);
+ }
+ } while (!ret);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(rvt_qp_iter);
diff --git a/drivers/infiniband/sw/rdmavt/trace_mr.h b/drivers/infiniband/sw/rdmavt/trace_mr.h
index 3318a6c36373..976e482930a3 100644
--- a/drivers/infiniband/sw/rdmavt/trace_mr.h
+++ b/drivers/infiniband/sw/rdmavt/trace_mr.h
@@ -103,6 +103,68 @@ DEFINE_EVENT(
TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
TP_ARGS(mr, m, n, v, len));
+DECLARE_EVENT_CLASS(
+ rvt_sge_template,
+ TP_PROTO(struct rvt_sge *sge, struct ib_sge *isge),
+ TP_ARGS(sge, isge),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(sge->mr->pd->device))
+ __field(struct rvt_mregion *, mr)
+ __field(struct rvt_sge *, sge)
+ __field(struct ib_sge *, isge)
+ __field(void *, vaddr)
+ __field(u64, ivaddr)
+ __field(u32, lkey)
+ __field(u32, sge_length)
+ __field(u32, length)
+ __field(u32, ilength)
+ __field(int, user)
+ __field(u16, m)
+ __field(u16, n)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(sge->mr->pd->device));
+ __entry->mr = sge->mr;
+ __entry->sge = sge;
+ __entry->isge = isge;
+ __entry->vaddr = sge->vaddr;
+ __entry->ivaddr = isge->addr;
+ __entry->lkey = sge->mr->lkey;
+ __entry->sge_length = sge->sge_length;
+ __entry->length = sge->length;
+ __entry->ilength = isge->length;
+ __entry->m = sge->m;
+ __entry->n = sge->m;
+ __entry->user = ibpd_to_rvtpd(sge->mr->pd)->user;
+ ),
+ TP_printk(
+ "[%s] mr %p sge %p isge %p vaddr %p ivaddr %llx lkey %x sge_length %u length %u ilength %u m %u n %u user %u",
+ __get_str(dev),
+ __entry->mr,
+ __entry->sge,
+ __entry->isge,
+ __entry->vaddr,
+ __entry->ivaddr,
+ __entry->lkey,
+ __entry->sge_length,
+ __entry->length,
+ __entry->ilength,
+ __entry->m,
+ __entry->n,
+ __entry->user
+ )
+);
+
+DEFINE_EVENT(
+ rvt_sge_template, rvt_sge_adjacent,
+ TP_PROTO(struct rvt_sge *sge, struct ib_sge *isge),
+ TP_ARGS(sge, isge));
+
+DEFINE_EVENT(
+ rvt_sge_template, rvt_sge_new,
+ TP_PROTO(struct rvt_sge *sge, struct ib_sge *isge),
+ TP_ARGS(sge, isge));
+
#endif /* __RVT_TRACE_MR_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/sw/rdmavt/trace_tx.h b/drivers/infiniband/sw/rdmavt/trace_tx.h
index a613a2223751..0ef25fc49f25 100644
--- a/drivers/infiniband/sw/rdmavt/trace_tx.h
+++ b/drivers/infiniband/sw/rdmavt/trace_tx.h
@@ -84,12 +84,12 @@ __print_symbolic(opcode, \
wr_opcode_name(RESERVED10))
#define POS_PRN \
-"[%s] wqe %p wr_id %llx send_flags %x qpn %x qpt %u psn %x lpsn %x ssn %x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u pid %u num_sge %u"
+"[%s] wqe %p wr_id %llx send_flags %x qpn %x qpt %u psn %x lpsn %x ssn %x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u pid %u num_sge %u wr_num_sge %u"
TRACE_EVENT(
rvt_post_one_wr,
- TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe),
- TP_ARGS(qp, wqe),
+ TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, int wr_num_sge),
+ TP_ARGS(qp, wqe, wr_num_sge),
TP_STRUCT__entry(
RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
__field(u64, wr_id)
@@ -108,6 +108,7 @@ TRACE_EVENT(
__field(int, send_flags)
__field(pid_t, pid)
__field(int, num_sge)
+ __field(int, wr_num_sge)
),
TP_fast_assign(
RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
@@ -127,6 +128,7 @@ TRACE_EVENT(
__entry->ssn = wqe->ssn;
__entry->send_flags = wqe->wr.send_flags;
__entry->num_sge = wqe->wr.num_sge;
+ __entry->wr_num_sge = wr_num_sge;
),
TP_printk(
POS_PRN,
@@ -146,7 +148,8 @@ TRACE_EVENT(
__entry->head,
__entry->last,
__entry->pid,
- __entry->num_sge
+ __entry->num_sge,
+ __entry->wr_num_sge
)
);
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 0d7c6bb551d9..64bdd442078a 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -202,8 +202,13 @@ static int rvt_modify_port(struct ib_device *ibdev, u8 port_num,
return -EINVAL;
rvp = rdi->ports[port_index];
- rvp->port_cap_flags |= props->set_port_cap_mask;
- rvp->port_cap_flags &= ~props->clr_port_cap_mask;
+ if (port_modify_mask & IB_PORT_OPA_MASK_CHG) {
+ rvp->port_cap3_flags |= props->set_port_cap_mask;
+ rvp->port_cap3_flags &= ~props->clr_port_cap_mask;
+ } else {
+ rvp->port_cap_flags |= props->set_port_cap_mask;
+ rvp->port_cap_flags &= ~props->clr_port_cap_mask;
+ }
if (props->set_port_cap_mask || props->clr_port_cap_mask)
rdi->driver_f.cap_mask_chg(rdi, port_num);
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index c21c913f911a..8c3d30b3092d 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -38,7 +38,6 @@
MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
MODULE_DESCRIPTION("Soft RDMA transport");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION("0.2");
/* free resources for all ports on a device */
static void rxe_cleanup_ports(struct rxe_dev *rxe)
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 1ac5b8551a4d..6447d736d5a4 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -97,7 +97,7 @@ int rxe_rcv(struct sk_buff *skb);
void rxe_dev_put(struct rxe_dev *rxe);
struct rxe_dev *net_to_rxe(struct net_device *ndev);
-struct rxe_dev *get_rxe_by_name(const char* name);
+struct rxe_dev *get_rxe_by_name(const char *name);
void rxe_port_up(struct rxe_dev *rxe);
void rxe_port_down(struct rxe_dev *rxe);
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index 5bddf469361b..1cc9e2e1365d 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -38,18 +38,13 @@ int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr)
{
struct rxe_port *port;
- if (rdma_ah_get_port_num(attr) != 1) {
- pr_info("invalid port_num = %d\n", rdma_ah_get_port_num(attr));
- return -EINVAL;
- }
-
port = &rxe->port;
if (rdma_ah_get_ah_flags(attr) & IB_AH_GRH) {
u8 sgid_index = rdma_ah_read_grh(attr)->sgid_index;
if (sgid_index > port->attr.gid_tbl_len) {
- pr_info("invalid sgid index = %d\n", sgid_index);
+ pr_warn("invalid sgid index = %d\n", sgid_index);
return -EINVAL;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 49fe42c23f4d..c4aabf78dc90 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -69,6 +69,14 @@ err1:
static void rxe_send_complete(unsigned long data)
{
struct rxe_cq *cq = (struct rxe_cq *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ if (cq->is_dying) {
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
@@ -97,6 +105,8 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
if (udata)
cq->is_user = 1;
+ cq->is_dying = false;
+
tasklet_init(&cq->comp_task, rxe_send_complete, (unsigned long)cq);
spin_lock_init(&cq->cq_lock);
@@ -156,6 +166,15 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
return 0;
}
+void rxe_cq_disable(struct rxe_cq *cq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ cq->is_dying = true;
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+}
+
void rxe_cq_cleanup(struct rxe_pool_entry *arg)
{
struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.c b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
index 7ef90aad7dfd..6aeb7a165e46 100644
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.c
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
@@ -33,7 +33,7 @@
#include "rxe.h"
#include "rxe_hw_counters.h"
-const char * const rxe_counter_name[] = {
+static const char * const rxe_counter_name[] = {
[RXE_CNT_SENT_PKTS] = "sent_pkts",
[RXE_CNT_RCVD_PKTS] = "rcvd_pkts",
[RXE_CNT_DUP_REQ] = "duplicate_request",
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index d6299edf9a5b..77b3ed0df936 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -64,6 +64,8 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, struct ib_udata *udata);
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
+void rxe_cq_disable(struct rxe_cq *cq);
+
void rxe_cq_cleanup(struct rxe_pool_entry *arg);
/* rxe_mcast.c */
@@ -219,8 +221,6 @@ static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
void retransmit_timer(unsigned long data);
void rnr_nak_timer(unsigned long data);
-void dump_qp(struct rxe_qp *qp);
-
/* rxe_srq.c */
#define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
@@ -250,7 +250,7 @@ void rxe_resp_queue_pkt(struct rxe_dev *rxe,
void rxe_comp_queue_pkt(struct rxe_dev *rxe,
struct rxe_qp *qp, struct sk_buff *skb);
-static inline unsigned wr_opcode_mask(int opcode, struct rxe_qp *qp)
+static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
{
return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
index bd812e00988e..d22431e3a908 100644
--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
+++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
@@ -76,7 +76,7 @@ static void rxe_vma_close(struct vm_area_struct *vma)
kref_put(&ip->ref, rxe_mmap_release);
}
-static struct vm_operations_struct rxe_vm_ops = {
+static const struct vm_operations_struct rxe_vm_ops = {
.open = rxe_vma_open,
.close = rxe_vma_close,
};
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index e37cc89987e1..5c2684bf430f 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -367,11 +367,11 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
dest = (dir == to_mem_obj) ?
((void *)(uintptr_t)iova) : addr;
+ memcpy(dest, src, length);
+
if (crcp)
*crcp = rxe_crc32(to_rdev(mem->pd->ibpd.device),
- *crcp, src, length);
-
- memcpy(dest, src, length);
+ *crcp, dest, length);
return 0;
}
@@ -401,11 +401,11 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
if (bytes > length)
bytes = length;
+ memcpy(dest, src, bytes);
+
if (crcp)
crc = rxe_crc32(to_rdev(mem->pd->ibpd.device),
- crc, src, bytes);
-
- memcpy(dest, src, bytes);
+ crc, dest, bytes);
length -= bytes;
addr += bytes;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index c3a140ed4df2..59dee10bebcb 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -191,7 +191,7 @@ static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
if (qp_type(qp) == IB_QPT_RC)
dst = sk_dst_get(qp->sk->sk);
- if (!dst || !(dst->obsolete && dst->ops->check(dst, 0))) {
+ if (!dst || !dst_check(dst, qp->dst_cookie)) {
if (dst)
dst_release(dst);
@@ -209,6 +209,11 @@ static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr;
daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr;
dst = rxe_find_route6(rxe->ndev, saddr6, daddr6);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (dst)
+ qp->dst_cookie =
+ rt6_get_cookie((struct rt6_info *)dst);
+#endif
}
}
@@ -337,7 +342,7 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
| IPSKB_REROUTED);
- skb_dst_set(skb, dst);
+ skb_dst_set(skb, dst_clone(dst));
__skb_push(skb, sizeof(*ip6h));
skb_reset_network_header(skb);
@@ -388,7 +393,7 @@ static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct sk_buff *skb, struct rxe_av *av)
{
struct rxe_qp *qp = pkt->qp;
- struct dst_entry *dst = NULL;
+ struct dst_entry *dst;
struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
@@ -441,6 +446,8 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
if (unlikely(qp->need_req_skb &&
skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
rxe_run_task(&qp->req.task, 1);
+
+ rxe_drop_ref(qp);
}
int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
@@ -458,12 +465,17 @@ int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
nskb->destructor = rxe_skb_tx_dtor;
nskb->sk = pkt->qp->sk->sk;
+ rxe_add_ref(pkt->qp);
+ atomic_inc(&pkt->qp->skb_out);
+
if (av->network_type == RDMA_NETWORK_IPV4) {
err = ip_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
} else if (av->network_type == RDMA_NETWORK_IPV6) {
err = ip6_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
} else {
pr_err("Unknown layer 3 protocol: %d\n", av->network_type);
+ atomic_dec(&pkt->qp->skb_out);
+ rxe_drop_ref(pkt->qp);
kfree_skb(nskb);
return -EINVAL;
}
@@ -473,9 +485,7 @@ int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
return -EAGAIN;
}
- atomic_inc(&pkt->qp->skb_out);
kfree_skb(skb);
-
return 0;
}
@@ -641,8 +651,13 @@ static int rxe_notify(struct notifier_block *not_blk,
pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
rxe_set_mtu(rxe, ndev->mtu);
break;
- case NETDEV_REBOOT:
case NETDEV_CHANGE:
+ if (netif_running(ndev) && netif_carrier_ok(ndev))
+ rxe_port_up(rxe);
+ else
+ rxe_port_down(rxe);
+ break;
+ case NETDEV_REBOOT:
case NETDEV_GOING_DOWN:
case NETDEV_CHANGEADDR:
case NETDEV_CHANGENAME:
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 75d11ee635ec..c1b5f38f31a5 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -188,7 +188,7 @@ int rxe_pool_init(
struct rxe_dev *rxe,
struct rxe_pool *pool,
enum rxe_elem_type type,
- unsigned max_elem)
+ unsigned int max_elem)
{
int err = 0;
size_t size = rxe_type_info[type].size;
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 80ccc7c7c341..00bda9380a2e 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -851,13 +851,8 @@ void rxe_qp_cleanup(struct rxe_pool_entry *arg)
qp->resp.mr = NULL;
}
- if (qp_type(qp) == IB_QPT_RC) {
- struct dst_entry *dst = NULL;
-
- dst = sk_dst_get(qp->sk->sk);
- if (dst)
- dst_release(dst);
- }
+ if (qp_type(qp) == IB_QPT_RC)
+ sk_dst_reset(qp->sk->sk);
free_rd_atomic_resources(qp);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 7ee465d1a1e1..d84222f9d5d2 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -43,7 +43,7 @@ static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
static inline void retry_first_write_send(struct rxe_qp *qp,
struct rxe_send_wqe *wqe,
- unsigned mask, int npsn)
+ unsigned int mask, int npsn)
{
int i;
@@ -594,8 +594,10 @@ int rxe_requester(void *arg)
rxe_add_ref(qp);
next_wqe:
- if (unlikely(!qp->valid))
+ if (unlikely(!qp->valid)) {
+ rxe_drain_req_pkts(qp, true);
goto exit;
+ }
if (unlikely(qp->req.state == QP_STATE_ERROR)) {
rxe_drain_req_pkts(qp, true);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index be944d5aa9af..4240866a5331 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -1055,7 +1055,7 @@ static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
{
int i;
- for (i = 0; i < qp->attr.max_rd_atomic; i++) {
+ for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
struct resp_res *res = &qp->resp.resources[i];
if (res->type == 0)
@@ -1219,6 +1219,9 @@ void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
kfree_skb(skb);
}
+ if (notify)
+ return;
+
while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
advance_consumer(qp->rq.queue);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index d2a14a1bdc7f..ea3810b29273 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -78,7 +78,7 @@ void rxe_do_task(unsigned long data)
default:
spin_unlock_irqrestore(&task->state_lock, flags);
- pr_warn("bad state = %d in rxe_do_task\n", task->state);
+ pr_warn("%s failed with bad state %d\n", __func__, task->state);
return;
}
@@ -105,7 +105,7 @@ void rxe_do_task(unsigned long data)
break;
default:
- pr_warn("bad state = %d in rxe_do_task\n",
+ pr_warn("%s failed with bad state %d\n", __func__,
task->state);
}
spin_unlock_irqrestore(&task->state_lock, flags);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 073e66783f1d..0b362f49a10a 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -51,40 +51,16 @@ static int rxe_query_device(struct ib_device *dev,
return 0;
}
-static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed,
- u8 *active_width)
-{
- if (speed <= 1000) {
- *active_width = IB_WIDTH_1X;
- *active_speed = IB_SPEED_SDR;
- } else if (speed <= 10000) {
- *active_width = IB_WIDTH_1X;
- *active_speed = IB_SPEED_FDR10;
- } else if (speed <= 20000) {
- *active_width = IB_WIDTH_4X;
- *active_speed = IB_SPEED_DDR;
- } else if (speed <= 30000) {
- *active_width = IB_WIDTH_4X;
- *active_speed = IB_SPEED_QDR;
- } else if (speed <= 40000) {
- *active_width = IB_WIDTH_4X;
- *active_speed = IB_SPEED_FDR10;
- } else {
- *active_width = IB_WIDTH_4X;
- *active_speed = IB_SPEED_EDR;
- }
-}
-
static int rxe_query_port(struct ib_device *dev,
u8 port_num, struct ib_port_attr *attr)
{
struct rxe_dev *rxe = to_rdev(dev);
struct rxe_port *port;
- u32 speed;
+ int rc = -EINVAL;
if (unlikely(port_num != 1)) {
pr_warn("invalid port_number %d\n", port_num);
- goto err1;
+ goto out;
}
port = &rxe->port;
@@ -93,29 +69,12 @@ static int rxe_query_port(struct ib_device *dev,
*attr = port->attr;
mutex_lock(&rxe->usdev_lock);
- if (rxe->ndev->ethtool_ops->get_link_ksettings) {
- struct ethtool_link_ksettings ks;
-
- rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks);
- speed = ks.base.speed;
- } else if (rxe->ndev->ethtool_ops->get_settings) {
- struct ethtool_cmd cmd;
-
- rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
- speed = cmd.speed;
- } else {
- pr_warn("%s speed is unknown, defaulting to 1000\n",
- rxe->ndev->name);
- speed = 1000;
- }
- rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
- &attr->active_width);
+ rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
+ &attr->active_width);
mutex_unlock(&rxe->usdev_lock);
- return 0;
-
-err1:
- return -EINVAL;
+out:
+ return rc;
}
static int rxe_query_gid(struct ib_device *device,
@@ -914,6 +873,9 @@ static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_unlock_irqrestore(&rq->producer_lock, flags);
+ if (qp->resp.state == QP_STATE_ERROR)
+ rxe_run_task(&qp->resp.task, 1);
+
err1:
return err;
}
@@ -957,6 +919,8 @@ static int rxe_destroy_cq(struct ib_cq *ibcq)
{
struct rxe_cq *cq = to_rcq(ibcq);
+ rxe_cq_disable(cq);
+
rxe_drop_ref(cq);
return 0;
}
@@ -1207,8 +1171,8 @@ static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
}
-static ssize_t rxe_show_parent(struct device *device,
- struct device_attribute *attr, char *buf)
+static ssize_t parent_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct rxe_dev *rxe = container_of(device, struct rxe_dev,
ib_dev.dev);
@@ -1216,7 +1180,7 @@ static ssize_t rxe_show_parent(struct device *device,
return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
}
-static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
+static DEVICE_ATTR_RO(parent);
static struct device_attribute *rxe_dev_attributes[] = {
&dev_attr_parent,
@@ -1240,6 +1204,8 @@ int rxe_register_device(struct rxe_dev *rxe)
addrconf_addr_eui48((unsigned char *)&dev->node_guid,
rxe->ndev->dev_addr);
dev->dev.dma_ops = &dma_virt_ops;
+ dma_coerce_mask_and_coherent(&dev->dev,
+ dma_get_required_mask(dev->dev.parent));
dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
@@ -1331,15 +1297,15 @@ int rxe_register_device(struct rxe_dev *rxe)
err = ib_register_device(dev, NULL);
if (err) {
- pr_warn("rxe_register_device failed, err = %d\n", err);
+ pr_warn("%s failed with error %d\n", __func__, err);
goto err1;
}
for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
if (err) {
- pr_warn("device_create_file failed, i = %d, err = %d\n",
- i, err);
+ pr_warn("%s failed with error %d for attr number %d\n",
+ __func__, err, i);
goto err2;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 5a180fbe40d9..0c2dbe45c729 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -89,6 +89,7 @@ struct rxe_cq {
struct rxe_queue *queue;
spinlock_t cq_lock;
u8 notify;
+ bool is_dying;
int is_user;
struct tasklet_struct comp_task;
};
@@ -247,6 +248,7 @@ struct rxe_qp {
struct rxe_rq rq;
struct socket *sk;
+ u32 dst_cookie;
struct rxe_av pri_av;
struct rxe_av alt_av;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index ff50a7bd66d8..4a5c7a07a631 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -336,6 +336,8 @@ struct ipoib_dev_priv {
unsigned long flags;
struct rw_semaphore vlan_rwsem;
+ struct mutex mcast_mutex;
+ struct mutex sysfs_mutex;
struct rb_root path_tree;
struct list_head path_list;
@@ -366,7 +368,7 @@ struct ipoib_dev_priv {
u32 qkey;
union ib_gid local_gid;
- u16 local_lid;
+ u32 local_lid;
unsigned int admin_mtu;
unsigned int mcast_mtu;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7cbcfdac6529..14b62f7472b4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -39,6 +39,7 @@
#include <linux/vmalloc.h>
#include <linux/moduleparam.h>
#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
#include "ipoib.h"
@@ -510,7 +511,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
case IB_CM_REQ_RECEIVED:
return ipoib_cm_req_handler(cm_id, event);
case IB_CM_DREQ_RECEIVED:
- p = cm_id->context;
ib_send_cm_drep(cm_id, NULL, 0);
/* Fall through */
case IB_CM_REJ_RECEIVED:
@@ -954,7 +954,7 @@ void ipoib_cm_dev_stop(struct net_device *dev)
break;
}
spin_unlock_irq(&priv->lock);
- msleep(1);
+ usleep_range(1000, 2000);
ipoib_drain_cq(dev);
spin_lock_irq(&priv->lock);
}
@@ -1047,9 +1047,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_RC,
.qp_context = tx,
- .create_flags = IB_QP_CREATE_USE_GFP_NOIO
+ .create_flags = 0
};
-
struct ib_qp *tx_qp;
if (dev->features & NETIF_F_SG)
@@ -1057,10 +1056,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
tx_qp = ib_create_qp(priv->pd, &attr);
- if (PTR_ERR(tx_qp) == -EINVAL) {
- attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
- tx_qp = ib_create_qp(priv->pd, &attr);
- }
tx->max_send_sge = attr.cap.max_send_sge;
return tx_qp;
}
@@ -1131,10 +1126,11 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
struct sa_path_rec *pathrec)
{
struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
+ unsigned int noio_flag;
int ret;
- p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring,
- GFP_NOIO, PAGE_KERNEL);
+ noio_flag = memalloc_noio_save();
+ p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
if (!p->tx_ring) {
ret = -ENOMEM;
goto err_tx;
@@ -1142,9 +1138,10 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
p->qp = ipoib_cm_create_tx_qp(p->dev, p);
+ memalloc_noio_restore(noio_flag);
if (IS_ERR(p->qp)) {
ret = PTR_ERR(p->qp);
- ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
+ ipoib_warn(priv, "failed to create tx qp: %d\n", ret);
goto err_qp;
}
@@ -1206,7 +1203,7 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
goto timeout;
}
- msleep(1);
+ usleep_range(1000, 2000);
}
}
@@ -1509,9 +1506,14 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
return -EPERM;
- if (!rtnl_trylock())
+ if (!mutex_trylock(&priv->sysfs_mutex))
return restart_syscall();
+ if (!rtnl_trylock()) {
+ mutex_unlock(&priv->sysfs_mutex);
+ return restart_syscall();
+ }
+
ret = ipoib_set_mode(dev, buf);
/* The assumption is that the function ipoib_set_mode returned
@@ -1520,6 +1522,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
*/
if (ret != -EBUSY)
rtnl_unlock();
+ mutex_unlock(&priv->sysfs_mutex);
return (!ret || ret == -EBUSY) ? count : ret;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 7871379342f4..8dc1e6225cc8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -52,7 +52,8 @@ static const struct ipoib_stats ipoib_gstrings_stats[] = {
IPOIB_NETDEV_STAT(tx_bytes),
IPOIB_NETDEV_STAT(tx_errors),
IPOIB_NETDEV_STAT(rx_dropped),
- IPOIB_NETDEV_STAT(tx_dropped)
+ IPOIB_NETDEV_STAT(tx_dropped),
+ IPOIB_NETDEV_STAT(multicast),
};
#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
@@ -62,8 +63,7 @@ static void ipoib_get_drvinfo(struct net_device *netdev,
{
struct ipoib_dev_priv *priv = ipoib_priv(netdev);
- ib_get_device_fw_str(priv->ca, drvinfo->fw_version,
- sizeof(drvinfo->fw_version));
+ ib_get_device_fw_str(priv->ca, drvinfo->fw_version);
strlcpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent),
sizeof(drvinfo->bus_info));
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index efe7402f4885..2e075377242e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -256,6 +256,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ dev->stats.multicast++;
skb->dev = dev;
if ((dev->features & NETIF_F_RXCSUM) &&
@@ -709,6 +711,27 @@ static int recvs_pending(struct net_device *dev)
return pending;
}
+static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
+ struct ib_qp *qp,
+ enum ib_qp_state new_state)
+{
+ struct ib_qp_attr qp_attr;
+ struct ib_qp_init_attr query_init_attr;
+ int ret;
+
+ ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
+ if (ret) {
+ ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
+ return;
+ }
+ /* print according to the new-state and the previous state.*/
+ if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
+ ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
+ else
+ ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
+ new_state, qp_attr.qp_state);
+}
+
int ipoib_ib_dev_stop_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -728,7 +751,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
*/
qp_attr.qp_state = IB_QPS_ERR;
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
- ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
+ check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
/* Wait for all sends and receives to complete */
begin = jiffies;
@@ -770,7 +793,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
ipoib_drain_cq(dev);
- msleep(1);
+ usleep_range(1000, 2000);
}
ipoib_dbg(priv, "All sends and receives done.\n");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6e86eeee370e..bac95b509a9b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -60,7 +60,6 @@ const char ipoib_driver_version[] = DRV_VERSION;
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
@@ -100,6 +99,8 @@ static struct net_device *ipoib_get_net_dev_by_params(
const union ib_gid *gid, const struct sockaddr *addr,
void *client_data);
static int ipoib_set_mac(struct net_device *dev, void *addr);
+static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd);
static struct ib_client ipoib_client = {
.name = "ipoib",
@@ -233,6 +234,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu
static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ int ret = 0;
/* dev->mtu > 2K ==> connected mode */
if (ipoib_cm_admin_enabled(dev)) {
@@ -256,9 +258,34 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
ipoib_dbg(priv, "MTU must be smaller than the underlying "
"link layer MTU - 4 (%u)\n", priv->mcast_mtu);
- dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
+ new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
- return 0;
+ if (priv->rn_ops->ndo_change_mtu) {
+ bool carrier_status = netif_carrier_ok(dev);
+
+ netif_carrier_off(dev);
+
+ /* notify lower level on the real mtu */
+ ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
+
+ if (carrier_status)
+ netif_carrier_on(dev);
+ } else {
+ dev->mtu = new_mtu;
+ }
+
+ return ret;
+}
+
+static void ipoib_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (priv->rn_ops->ndo_get_stats64)
+ priv->rn_ops->ndo_get_stats64(dev, stats);
+ else
+ netdev_stats_to_stats64(stats, &dev->stats);
}
/* Called with an RCU read lock taken */
@@ -1534,6 +1561,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
int i, wait_flushed = 0;
init_completion(&priv->ntbl.flushed);
+ set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
spin_lock_irqsave(&priv->lock, flags);
@@ -1578,7 +1606,6 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
init_completion(&priv->ntbl.deleted);
- set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
/* Stop GC if called at init fail need to cancel work */
stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
@@ -1655,6 +1682,17 @@ out:
return -ENOMEM;
}
+static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
+ int cmd)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (!priv->rn_ops->ndo_do_ioctl)
+ return -EOPNOTSUPP;
+
+ return priv->rn_ops->ndo_do_ioctl(dev, ifr, cmd);
+}
+
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -1808,6 +1846,8 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
.ndo_get_vf_stats = ipoib_get_vf_stats,
.ndo_set_vf_guid = ipoib_set_vf_guid,
.ndo_set_mac_address = ipoib_set_mac,
+ .ndo_get_stats64 = ipoib_get_stats,
+ .ndo_do_ioctl = ipoib_ioctl,
};
static const struct net_device_ops ipoib_netdev_ops_vf = {
@@ -1820,6 +1860,8 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
.ndo_tx_timeout = ipoib_timeout,
.ndo_set_rx_mode = ipoib_set_mcast_list,
.ndo_get_iflink = ipoib_get_iflink,
+ .ndo_get_stats64 = ipoib_get_stats,
+ .ndo_do_ioctl = ipoib_ioctl,
};
void ipoib_setup_common(struct net_device *dev)
@@ -1850,6 +1892,8 @@ static void ipoib_build_priv(struct net_device *dev)
priv->dev = dev;
spin_lock_init(&priv->lock);
init_rwsem(&priv->vlan_rwsem);
+ mutex_init(&priv->mcast_mutex);
+ mutex_init(&priv->sysfs_mutex);
INIT_LIST_HEAD(&priv->path_list);
INIT_LIST_HEAD(&priv->child_intfs);
@@ -2146,14 +2190,14 @@ static struct net_device *ipoib_add_port(const char *format,
priv->dev->dev_id = port - 1;
result = ib_query_port(hca, port, &attr);
- if (!result)
- priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
- else {
+ if (result) {
printk(KERN_WARNING "%s: ib_query_port %d failed\n",
hca->name, port);
goto device_init_failed;
}
+ priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+
/* MTU will be reset when mcast join happens */
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
@@ -2184,12 +2228,14 @@ static struct net_device *ipoib_add_port(const char *format,
printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
hca->name, port, result);
goto device_init_failed;
- } else
- memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+ }
+
+ memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
+ sizeof(union ib_gid));
set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
result = ipoib_dev_init(priv->dev, hca, port);
- if (result < 0) {
+ if (result) {
printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
hca->name, port, result);
goto device_init_failed;
@@ -2197,13 +2243,7 @@ static struct net_device *ipoib_add_port(const char *format,
INIT_IB_EVENT_HANDLER(&priv->event_handler,
priv->ca, ipoib_event);
- result = ib_register_event_handler(&priv->event_handler);
- if (result < 0) {
- printk(KERN_WARNING "%s: ib_register_event_handler failed for "
- "port %d (ret = %d)\n",
- hca->name, port, result);
- goto event_failed;
- }
+ ib_register_event_handler(&priv->event_handler);
result = register_netdev(priv->dev);
if (result) {
@@ -2212,6 +2252,7 @@ static struct net_device *ipoib_add_port(const char *format,
goto register_failed;
}
+ result = -ENOMEM;
if (ipoib_cm_add_mode_attr(priv->dev))
goto sysfs_failed;
if (ipoib_add_pkey_attr(priv->dev))
@@ -2235,8 +2276,6 @@ register_failed:
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task);
flush_workqueue(priv->wq);
-
-event_failed:
ipoib_dev_cleanup(priv->dev);
device_init_failed:
@@ -2306,7 +2345,11 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
cancel_delayed_work(&priv->neigh_reap_task);
flush_workqueue(priv->wq);
+ /* Wrap rtnl_lock/unlock with mutex to protect sysfs calls */
+ mutex_lock(&priv->sysfs_mutex);
unregister_netdev(priv->dev);
+ mutex_unlock(&priv->sysfs_mutex);
+
rn->free_rdma_netdev(priv->dev);
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
@@ -2337,6 +2380,7 @@ static int __init ipoib_init_module(void)
ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
#ifdef CONFIG_INFINIBAND_IPOIB_CM
ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
+ ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
#endif
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 057f58e6afca..93e149efc1f5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -684,15 +684,10 @@ void ipoib_mcast_start_thread(struct net_device *dev)
int ipoib_mcast_stop_thread(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- unsigned long flags;
ipoib_dbg_mcast(priv, "stopping multicast thread\n");
- spin_lock_irqsave(&priv->lock, flags);
- cancel_delayed_work(&priv->mcast_task);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- flush_workqueue(priv->wq);
+ cancel_delayed_work_sync(&priv->mcast_task);
return 0;
}
@@ -748,6 +743,14 @@ void ipoib_mcast_remove_list(struct list_head *remove_list)
{
struct ipoib_mcast *mcast, *tmcast;
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
+ list_for_each_entry_safe(mcast, tmcast, remove_list, list)
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ wait_for_completion(&mcast->done);
+
list_for_each_entry_safe(mcast, tmcast, remove_list, list) {
ipoib_mcast_leave(mcast->dev, mcast);
ipoib_mcast_free(mcast);
@@ -838,6 +841,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
struct ipoib_mcast *mcast, *tmcast;
unsigned long flags;
+ mutex_lock(&priv->mcast_mutex);
ipoib_dbg_mcast(priv, "flushing multicast list\n");
spin_lock_irqsave(&priv->lock, flags);
@@ -856,15 +860,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- /*
- * make sure the in-flight joins have finished before we attempt
- * to leave
- */
- list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- wait_for_completion(&mcast->done);
-
ipoib_mcast_remove_list(&remove_list);
+ mutex_unlock(&priv->mcast_mutex);
}
static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
@@ -982,14 +979,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
netif_addr_unlock(dev);
local_irq_restore(flags);
- /*
- * make sure the in-flight joins have finished before we attempt
- * to leave
- */
- list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- wait_for_completion(&mcast->done);
-
ipoib_mcast_remove_list(&remove_list);
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 081b33deff1b..9927cd6b7082 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -133,12 +133,20 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
snprintf(intf_name, sizeof intf_name, "%s.%04x",
ppriv->dev->name, pkey);
- if (!rtnl_trylock())
+ if (!mutex_trylock(&ppriv->sysfs_mutex))
return restart_syscall();
+ if (!rtnl_trylock()) {
+ mutex_unlock(&ppriv->sysfs_mutex);
+ return restart_syscall();
+ }
+
priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
- if (!priv)
+ if (!priv) {
+ rtnl_unlock();
+ mutex_unlock(&ppriv->sysfs_mutex);
return -ENOMEM;
+ }
down_write(&ppriv->vlan_rwsem);
@@ -164,8 +172,8 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
out:
up_write(&ppriv->vlan_rwsem);
-
rtnl_unlock();
+ mutex_unlock(&ppriv->sysfs_mutex);
if (result) {
free_netdev(priv->dev);
@@ -188,8 +196,13 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
return -EPERM;
- if (!rtnl_trylock())
+ if (!mutex_trylock(&ppriv->sysfs_mutex))
+ return restart_syscall();
+
+ if (!rtnl_trylock()) {
+ mutex_unlock(&ppriv->sysfs_mutex);
return restart_syscall();
+ }
down_write(&ppriv->vlan_rwsem);
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
@@ -208,6 +221,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
}
rtnl_unlock();
+ mutex_unlock(&ppriv->sysfs_mutex);
if (dev) {
free_netdev(dev);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 5a887efb4bdf..19624e023ebd 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -77,12 +77,12 @@
MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz");
-MODULE_VERSION(DRV_VER);
static struct scsi_host_template iscsi_iser_sht;
static struct iscsi_transport iscsi_iser_transport;
static struct scsi_transport_template *iscsi_iser_scsi_transport;
static struct workqueue_struct *release_wq;
+static DEFINE_MUTEX(unbind_iser_conn_mutex);
struct iser_global ig;
int iser_debug_level = 0;
@@ -550,12 +550,14 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
*/
if (iser_conn) {
mutex_lock(&iser_conn->state_mutex);
+ mutex_lock(&unbind_iser_conn_mutex);
iser_conn_terminate(iser_conn);
iscsi_conn_stop(cls_conn, flag);
/* unbind */
iser_conn->iscsi_conn = NULL;
conn->dd_data = NULL;
+ mutex_unlock(&unbind_iser_conn_mutex);
complete(&iser_conn->stop_completion);
mutex_unlock(&iser_conn->state_mutex);
@@ -977,13 +979,21 @@ static int iscsi_iser_slave_alloc(struct scsi_device *sdev)
struct iser_conn *iser_conn;
struct ib_device *ib_dev;
+ mutex_lock(&unbind_iser_conn_mutex);
+
session = starget_to_session(scsi_target(sdev))->dd_data;
iser_conn = session->leadconn->dd_data;
+ if (!iser_conn) {
+ mutex_unlock(&unbind_iser_conn_mutex);
+ return -ENOTCONN;
+ }
ib_dev = iser_conn->ib_conn.device->ib_device;
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
+ mutex_unlock(&unbind_iser_conn_mutex);
+
return 0;
}
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 12ed62ce9ff7..2a07692007bd 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -137,8 +137,10 @@ iser_prepare_write_cmd(struct iscsi_task *task,
if (unsol_sz < edtl) {
hdr->flags |= ISER_WSV;
- hdr->write_stag = cpu_to_be32(mem_reg->rkey);
- hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
+ if (buf_out->data_len > imm_sz) {
+ hdr->write_stag = cpu_to_be32(mem_reg->rkey);
+ hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
+ }
iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
"VA:%#llX + unsol:%d\n",
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index c538a38c91ce..55a73b0ed4c6 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -106,9 +106,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev,
iser_event_handler);
- if (ib_register_event_handler(&device->event_handler))
- goto cq_err;
-
+ ib_register_event_handler(&device->event_handler);
return 0;
cq_err:
@@ -141,7 +139,7 @@ static void iser_free_device_ib_res(struct iser_device *device)
comp->cq = NULL;
}
- (void)ib_unregister_event_handler(&device->event_handler);
+ ib_unregister_event_handler(&device->event_handler);
ib_dealloc_pd(device->pd);
kfree(device->comps);
@@ -708,8 +706,14 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
unsigned short sg_tablesize, sup_sg_tablesize;
sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
- sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
- device->ib_device->attrs.max_fast_reg_page_list_len);
+ if (device->ib_device->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS)
+ sup_sg_tablesize =
+ min_t(
+ uint, ISCSI_ISER_MAX_SG_TABLESIZE,
+ device->ib_device->attrs.max_fast_reg_page_list_len);
+ else
+ sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 0e662656ef42..ceabdb85df8b 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2710,7 +2710,6 @@ static void __exit isert_exit(void)
}
MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
-MODULE_VERSION("1.0");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index cf768dd78d1b..21f0b481edcc 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -52,7 +52,9 @@
#include <linux/module.h>
#include <rdma/ib_addr.h>
-#include <rdma/ib_smi.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/opa_smi.h>
+#include <rdma/opa_port_info.h>
#include "opa_vnic_internal.h"
@@ -952,12 +954,7 @@ static int vema_register(struct opa_vnic_ctrl_port *cport)
INIT_IB_EVENT_HANDLER(&port->event_handler,
cport->ibdev, opa_vnic_event);
- ret = ib_register_event_handler(&port->event_handler);
- if (ret) {
- c_err("port %d: event handler register failed\n", i);
- vema_unregister(cport);
- return ret;
- }
+ ib_register_event_handler(&port->event_handler);
idr_init(&port->vport_idr);
mutex_init(&port->lock);
@@ -980,6 +977,27 @@ static int vema_register(struct opa_vnic_ctrl_port *cport)
}
/**
+ * opa_vnic_ctrl_config_dev -- This function sends a trap to the EM
+ * by way of ib_modify_port to indicate support for ethernet on the
+ * fabric.
+ * @cport: pointer to control port
+ * @en: enable or disable ethernet on fabric support
+ */
+static void opa_vnic_ctrl_config_dev(struct opa_vnic_ctrl_port *cport, bool en)
+{
+ struct ib_port_modify pm = { 0 };
+ int i;
+
+ if (en)
+ pm.set_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported;
+ else
+ pm.clr_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported;
+
+ for (i = 1; i <= cport->num_ports; i++)
+ ib_modify_port(cport->ibdev, i, IB_PORT_OPA_MASK_CHG, &pm);
+}
+
+/**
* opa_vnic_vema_add_one -- Handle new ib device
* @device: ib device pointer
*
@@ -1007,6 +1025,7 @@ static void opa_vnic_vema_add_one(struct ib_device *device)
c_info("VNIC client initialized\n");
ib_set_client_data(device, &opa_vnic_client, cport);
+ opa_vnic_ctrl_config_dev(cport, true);
}
/**
@@ -1025,6 +1044,7 @@ static void opa_vnic_vema_rem_one(struct ib_device *device,
return;
c_info("removing VNIC client\n");
+ opa_vnic_ctrl_config_dev(cport, false);
vema_unregister(cport);
kfree(cport);
}
@@ -1053,4 +1073,3 @@ module_exit(opa_vnic_deinit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel OPA Virtual Network driver");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 2354c742caa1..fa5ccdb3bb2a 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -62,7 +62,6 @@
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_INFO(release_date, DRV_RELDATE);
#if !defined(CONFIG_DYNAMIC_DEBUG)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 402275be0931..9e8e9220f816 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2238,7 +2238,7 @@ static int srpt_write_pending(struct se_cmd *se_cmd)
cqe, first_wr);
cqe = NULL;
}
-
+
ret = ib_post_send(ch->qp, first_wr, &bad_wr);
if (ret) {
pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
@@ -2530,8 +2530,7 @@ static void srpt_add_one(struct ib_device *device)
INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
srpt_event_handler);
- if (ib_register_event_handler(&sdev->event_handler))
- goto err_cm;
+ ib_register_event_handler(&sdev->event_handler);
sdev->ioctx_ring = (struct srpt_recv_ioctx **)
srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index cc1183851af5..1b817e51b84b 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -328,8 +328,8 @@ struct srpt_port {
u8 port_guid[24];
u8 port_gid[64];
u8 port;
- u16 sm_lid;
- u16 lid;
+ u32 sm_lid;
+ u32 lid;
union ib_gid gid;
struct work_struct work;
struct se_portal_group port_guid_tpg;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 7e6842bd525c..d268fdc23c64 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1398,7 +1398,7 @@ static struct attribute *input_dev_attrs[] = {
NULL
};
-static struct attribute_group input_dev_attr_group = {
+static const struct attribute_group input_dev_attr_group = {
.attrs = input_dev_attrs,
};
@@ -1425,7 +1425,7 @@ static struct attribute *input_dev_id_attrs[] = {
NULL
};
-static struct attribute_group input_dev_id_attr_group = {
+static const struct attribute_group input_dev_id_attr_group = {
.name = "id",
.attrs = input_dev_id_attrs,
};
@@ -1495,7 +1495,7 @@ static struct attribute *input_dev_caps_attrs[] = {
NULL
};
-static struct attribute_group input_dev_caps_attr_group = {
+static const struct attribute_group input_dev_caps_attr_group = {
.name = "capabilities",
.attrs = input_dev_caps_attrs,
};
diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c
index 46d5041d2d9d..154e827b559b 100644
--- a/drivers/input/joystick/iforce/iforce-serio.c
+++ b/drivers/input/joystick/iforce/iforce-serio.c
@@ -164,7 +164,7 @@ static void iforce_serio_disconnect(struct serio *serio)
kfree(iforce);
}
-static struct serio_device_id iforce_serio_ids[] = {
+static const struct serio_device_id iforce_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_IFORCE,
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index db64adfbe1af..e8724f1a4a25 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -209,7 +209,7 @@ static void iforce_usb_disconnect(struct usb_interface *intf)
kfree(iforce);
}
-static struct usb_device_id iforce_usb_ids [] = {
+static const struct usb_device_id iforce_usb_ids[] = {
{ USB_DEVICE(0x044f, 0xa01c) }, /* Thrustmaster Motor Sport GT */
{ USB_DEVICE(0x046d, 0xc281) }, /* Logitech WingMan Force */
{ USB_DEVICE(0x046d, 0xc291) }, /* Logitech WingMan Formula Force */
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c
index c5358ba1f571..a9d0e3edca94 100644
--- a/drivers/input/joystick/magellan.c
+++ b/drivers/input/joystick/magellan.c
@@ -198,7 +198,7 @@ static int magellan_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id magellan_serio_ids[] = {
+static const struct serio_device_id magellan_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_MAGELLAN,
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index f4445a4e8d6a..e9712a1b7cad 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -272,7 +272,7 @@ static int spaceball_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id spaceball_serio_ids[] = {
+static const struct serio_device_id spaceball_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_SPACEBALL,
diff --git a/drivers/input/joystick/spaceorb.c b/drivers/input/joystick/spaceorb.c
index f2667820e8c5..05da0ed514e2 100644
--- a/drivers/input/joystick/spaceorb.c
+++ b/drivers/input/joystick/spaceorb.c
@@ -213,7 +213,7 @@ static int spaceorb_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id spaceorb_serio_ids[] = {
+static const struct serio_device_id spaceorb_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_SPACEORB,
diff --git a/drivers/input/joystick/stinger.c b/drivers/input/joystick/stinger.c
index 099c6d7b5e08..cb10e7b097ae 100644
--- a/drivers/input/joystick/stinger.c
+++ b/drivers/input/joystick/stinger.c
@@ -184,7 +184,7 @@ static int stinger_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id stinger_serio_ids[] = {
+static const struct serio_device_id stinger_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_STINGER,
diff --git a/drivers/input/joystick/twidjoy.c b/drivers/input/joystick/twidjoy.c
index 7f7e5ab3f9e3..e60cb004cb8c 100644
--- a/drivers/input/joystick/twidjoy.c
+++ b/drivers/input/joystick/twidjoy.c
@@ -233,7 +233,7 @@ static int twidjoy_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id twidjoy_serio_ids[] = {
+static const struct serio_device_id twidjoy_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_TWIDJOY,
diff --git a/drivers/input/joystick/warrior.c b/drivers/input/joystick/warrior.c
index e13a9144a25d..ef5391ba4470 100644
--- a/drivers/input/joystick/warrior.c
+++ b/drivers/input/joystick/warrior.c
@@ -193,7 +193,7 @@ static int warrior_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id warrior_serio_ids[] = {
+static const struct serio_device_id warrior_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_WARRIOR,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 298a6ba51411..f8e34ef643c7 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -408,7 +408,7 @@ static const signed short xpad_abs_triggers[] = {
#define XPAD_XBOXONE_VENDOR(vend) \
{ XPAD_XBOXONE_VENDOR_PROTOCOL(vend, 208) }
-static struct usb_device_id xpad_table[] = {
+static const struct usb_device_id xpad_table[] = {
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
@@ -476,10 +476,21 @@ static const u8 xboxone_hori_init[] = {
};
/*
- * A rumble packet is required for some PowerA pads to start
+ * A specific rumble packet is required for some PowerA pads to start
* sending input reports. One of those pads is (0x24c6:0x543a).
*/
-static const u8 xboxone_zerorumble_init[] = {
+static const u8 xboxone_rumblebegin_init[] = {
+ 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
+ 0x1D, 0x1D, 0xFF, 0x00, 0x00
+};
+
+/*
+ * A rumble packet with zero FF intensity will immediately
+ * terminate the rumbling required to init PowerA pads.
+ * This should happen fast enough that the motors don't
+ * spin up to enough speed to actually vibrate the gamepad.
+ */
+static const u8 xboxone_rumbleend_init[] = {
0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00
};
@@ -494,9 +505,12 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
- XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_zerorumble_init),
- XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_zerorumble_init),
- XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_zerorumble_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumbleend_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumbleend_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumbleend_init),
};
struct xpad_output_packet {
diff --git a/drivers/input/joystick/zhenhua.c b/drivers/input/joystick/zhenhua.c
index 4a8258bf13fd..5c6d5de743f1 100644
--- a/drivers/input/joystick/zhenhua.c
+++ b/drivers/input/joystick/zhenhua.c
@@ -192,7 +192,7 @@ static int zhenhua_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id zhenhua_serio_ids[] = {
+static const struct serio_device_id zhenhua_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_ZHENHUA,
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index ec876b5b1382..7e75835e220f 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -1270,7 +1270,7 @@ static int atkbd_reconnect(struct serio *serio)
return retval;
}
-static struct serio_device_id atkbd_serio_ids[] = {
+static const struct serio_device_id atkbd_serio_ids[] = {
{
.type = SERIO_8042,
.proto = SERIO_ANY,
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index a047b9af8369..e9f0ebf3267a 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -353,7 +353,7 @@ static struct attribute *gpio_keys_attrs[] = {
NULL,
};
-static struct attribute_group gpio_keys_attr_group = {
+static const struct attribute_group gpio_keys_attr_group = {
.attrs = gpio_keys_attrs,
};
@@ -827,7 +827,7 @@ static int gpio_keys_probe(struct platform_device *pdev)
fwnode_handle_put(child);
- error = sysfs_create_group(&dev->kobj, &gpio_keys_attr_group);
+ error = devm_device_add_group(dev, &gpio_keys_attr_group);
if (error) {
dev_err(dev, "Unable to export keys/switches, error: %d\n",
error);
@@ -838,23 +838,12 @@ static int gpio_keys_probe(struct platform_device *pdev)
if (error) {
dev_err(dev, "Unable to register input device, error: %d\n",
error);
- goto err_remove_group;
+ return error;
}
device_init_wakeup(dev, wakeup);
return 0;
-
-err_remove_group:
- sysfs_remove_group(&dev->kobj, &gpio_keys_attr_group);
- return error;
-}
-
-static int gpio_keys_remove(struct platform_device *pdev)
-{
- sysfs_remove_group(&pdev->dev.kobj, &gpio_keys_attr_group);
-
- return 0;
}
static int __maybe_unused gpio_keys_suspend(struct device *dev)
@@ -912,7 +901,6 @@ static SIMPLE_DEV_PM_OPS(gpio_keys_pm_ops, gpio_keys_suspend, gpio_keys_resume);
static struct platform_driver gpio_keys_device_driver = {
.probe = gpio_keys_probe,
- .remove = gpio_keys_remove,
.driver = {
.name = "gpio-keys",
.pm = &gpio_keys_pm_ops,
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index 5b152f25a8e1..bb29a7c9a1c0 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -559,7 +559,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
return error;
}
-static struct serio_device_id hil_dev_ids[] = {
+static const struct serio_device_id hil_dev_ids[] = {
{
.type = SERIO_HIL_MLC,
.proto = SERIO_HIL,
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c
index 198dc07a1be5..a4e404aaf64b 100644
--- a/drivers/input/keyboard/hilkbd.c
+++ b/drivers/input/keyboard/hilkbd.c
@@ -299,7 +299,7 @@ static void hil_keyb_exit(void)
}
#if defined(CONFIG_PARISC)
-static int hil_probe_chip(struct parisc_device *dev)
+static int __init hil_probe_chip(struct parisc_device *dev)
{
/* Only allow one HIL keyboard */
if (hil_dev.dev)
@@ -320,14 +320,14 @@ static int hil_probe_chip(struct parisc_device *dev)
return hil_keyb_init();
}
-static int hil_remove_chip(struct parisc_device *dev)
+static int __exit hil_remove_chip(struct parisc_device *dev)
{
hil_keyb_exit();
return 0;
}
-static struct parisc_device_id hil_tbl[] = {
+static const struct parisc_device_id hil_tbl[] __initconst = {
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00073 },
{ 0, }
};
@@ -337,11 +337,11 @@ static struct parisc_device_id hil_tbl[] = {
MODULE_DEVICE_TABLE(parisc, hil_tbl);
#endif
-static struct parisc_driver hil_driver = {
+static struct parisc_driver hil_driver __refdata = {
.name = "hil",
.id_table = hil_tbl,
.probe = hil_probe_chip,
- .remove = hil_remove_chip,
+ .remove = __exit_p(hil_remove_chip),
};
static int __init hil_init(void)
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index 9fcd9f1d5dc8..471d53815c6d 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -707,7 +707,7 @@ static void lkkbd_disconnect(struct serio *serio)
kfree(lk);
}
-static struct serio_device_id lkkbd_serio_ids[] = {
+static const struct serio_device_id lkkbd_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_LKKBD,
diff --git a/drivers/input/keyboard/newtonkbd.c b/drivers/input/keyboard/newtonkbd.c
index 20f044377990..fb9b8e23ab93 100644
--- a/drivers/input/keyboard/newtonkbd.c
+++ b/drivers/input/keyboard/newtonkbd.c
@@ -142,7 +142,7 @@ static void nkbd_disconnect(struct serio *serio)
kfree(nkbd);
}
-static struct serio_device_id nkbd_serio_ids[] = {
+static const struct serio_device_id nkbd_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_NEWTON,
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 3841fa30db33..d0bdaeadf86d 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -644,9 +644,12 @@ static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
static int pxa27x_keypad_open(struct input_dev *dev)
{
struct pxa27x_keypad *keypad = input_get_drvdata(dev);
-
+ int ret;
/* Enable unit clock */
- clk_prepare_enable(keypad->clk);
+ ret = clk_prepare_enable(keypad->clk);
+ if (ret)
+ return ret;
+
pxa27x_keypad_config(keypad);
return 0;
@@ -683,6 +686,7 @@ static int pxa27x_keypad_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
struct input_dev *input_dev = keypad->input_dev;
+ int ret = 0;
/*
* If the keypad is used as wake up source, the clock is not turned
@@ -695,14 +699,15 @@ static int pxa27x_keypad_resume(struct device *dev)
if (input_dev->users) {
/* Enable unit clock */
- clk_prepare_enable(keypad->clk);
- pxa27x_keypad_config(keypad);
+ ret = clk_prepare_enable(keypad->clk);
+ if (!ret)
+ pxa27x_keypad_config(keypad);
}
mutex_unlock(&input_dev->mutex);
}
- return 0;
+ return ret;
}
#endif
diff --git a/drivers/input/keyboard/stowaway.c b/drivers/input/keyboard/stowaway.c
index a6e0d565e306..8b6de9a692dc 100644
--- a/drivers/input/keyboard/stowaway.c
+++ b/drivers/input/keyboard/stowaway.c
@@ -146,7 +146,7 @@ static void skbd_disconnect(struct serio *serio)
kfree(skbd);
}
-static struct serio_device_id skbd_serio_ids[] = {
+static const struct serio_device_id skbd_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_STOWAWAY,
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index dc6bb9d5b4f0..c95707ea2656 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -339,7 +339,7 @@ static void sunkbd_disconnect(struct serio *serio)
kfree(sunkbd);
}
-static struct serio_device_id sunkbd_serio_ids[] = {
+static const struct serio_device_id sunkbd_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_SUNKBD,
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 0c07e1023a46..edc1385ca00b 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -370,8 +370,11 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
{
unsigned int debounce_cnt;
u32 val = 0;
+ int ret;
- clk_prepare_enable(kbc->clk);
+ ret = clk_prepare_enable(kbc->clk);
+ if (ret)
+ return ret;
/* Reset the KBC controller to clear all previous status.*/
reset_control_assert(kbc->rst);
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index 39e72b3219d8..f9f98ef1d98e 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -30,7 +30,7 @@
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/platform_device.h>
-#include <linux/i2c/twl.h>
+#include <linux/mfd/twl.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/input/keyboard/xtkbd.c b/drivers/input/keyboard/xtkbd.c
index 7c2325bd7408..8f64b9ded8d0 100644
--- a/drivers/input/keyboard/xtkbd.c
+++ b/drivers/input/keyboard/xtkbd.c
@@ -145,7 +145,7 @@ static void xtkbd_disconnect(struct serio *serio)
kfree(xtkbd);
}
-static struct serio_device_id xtkbd_serio_ids[] = {
+static const struct serio_device_id xtkbd_serio_ids[] = {
{
.type = SERIO_XT,
.proto = SERIO_ANY,
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 3872488c3fd7..f47e836eaa0f 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -581,6 +581,17 @@ config INPUT_PWM_BEEPER
To compile this driver as a module, choose M here: the module will be
called pwm-beeper.
+config INPUT_RK805_PWRKEY
+ tristate "Rockchip RK805 PMIC power key support"
+ depends on MFD_RK808
+ help
+ Select this option to enable power key driver for RK805.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the module will be
+ called rk805_pwrkey.
+
config INPUT_GPIO_ROTARY_ENCODER
tristate "Rotary encoders connected to GPIO pins"
depends on GPIOLIB || COMPILE_TEST
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index b923a9828c88..1072e0760c19 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_INPUT_REGULATOR_HAPTIC) += regulator-haptic.o
obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o
obj-$(CONFIG_INPUT_AXP20X_PEK) += axp20x-pek.o
obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
+obj-$(CONFIG_INPUT_RK805_PWRKEY) += rk805-pwrkey.o
obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o
obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 1c5914cae853..ebf4448b31b9 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -110,7 +110,7 @@ static const struct kernel_param_ops param_ops_mode_mask = {
module_param(mode_mask, mode_mask, 0644);
MODULE_PARM_DESC(mode_mask, "Bitmask of modes to accept <4:PC><3:AUX4><2:AUX3><1:AUX2><0:AUX1>");
-static struct usb_device_id ati_remote2_id_table[] = {
+static const struct usb_device_id ati_remote2_id_table[] = {
{ USB_DEVICE(0x0471, 0x0602) }, /* ATI Remote Wonder II */
{ }
};
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 38c79ebff033..6cee5adc3b5c 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -29,9 +29,17 @@
#define AXP20X_PEK_STARTUP_MASK (0xc0)
#define AXP20X_PEK_SHUTDOWN_MASK (0x03)
+struct axp20x_info {
+ const struct axp20x_time *startup_time;
+ unsigned int startup_mask;
+ const struct axp20x_time *shutdown_time;
+ unsigned int shutdown_mask;
+};
+
struct axp20x_pek {
struct axp20x_dev *axp20x;
struct input_dev *input;
+ struct axp20x_info *info;
int irq_dbr;
int irq_dbf;
};
@@ -48,6 +56,13 @@ static const struct axp20x_time startup_time[] = {
{ .time = 2000, .idx = 3 },
};
+static const struct axp20x_time axp221_startup_time[] = {
+ { .time = 128, .idx = 0 },
+ { .time = 1000, .idx = 1 },
+ { .time = 2000, .idx = 2 },
+ { .time = 3000, .idx = 3 },
+};
+
static const struct axp20x_time shutdown_time[] = {
{ .time = 4000, .idx = 0 },
{ .time = 6000, .idx = 1 },
@@ -55,31 +70,25 @@ static const struct axp20x_time shutdown_time[] = {
{ .time = 10000, .idx = 3 },
};
-struct axp20x_pek_ext_attr {
- const struct axp20x_time *p_time;
- unsigned int mask;
-};
-
-static struct axp20x_pek_ext_attr axp20x_pek_startup_ext_attr = {
- .p_time = startup_time,
- .mask = AXP20X_PEK_STARTUP_MASK,
+static const struct axp20x_info axp20x_info = {
+ .startup_time = startup_time,
+ .startup_mask = AXP20X_PEK_STARTUP_MASK,
+ .shutdown_time = shutdown_time,
+ .shutdown_mask = AXP20X_PEK_SHUTDOWN_MASK,
};
-static struct axp20x_pek_ext_attr axp20x_pek_shutdown_ext_attr = {
- .p_time = shutdown_time,
- .mask = AXP20X_PEK_SHUTDOWN_MASK,
+static const struct axp20x_info axp221_info = {
+ .startup_time = axp221_startup_time,
+ .startup_mask = AXP20X_PEK_STARTUP_MASK,
+ .shutdown_time = shutdown_time,
+ .shutdown_mask = AXP20X_PEK_SHUTDOWN_MASK,
};
-static struct axp20x_pek_ext_attr *get_axp_ext_attr(struct device_attribute *attr)
-{
- return container_of(attr, struct dev_ext_attribute, attr)->var;
-}
-
-static ssize_t axp20x_show_ext_attr(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t axp20x_show_attr(struct device *dev,
+ const struct axp20x_time *time,
+ unsigned int mask, char *buf)
{
struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
- struct axp20x_pek_ext_attr *axp20x_ea = get_axp_ext_attr(attr);
unsigned int val;
int ret, i;
@@ -87,22 +96,42 @@ static ssize_t axp20x_show_ext_attr(struct device *dev,
if (ret != 0)
return ret;
- val &= axp20x_ea->mask;
- val >>= ffs(axp20x_ea->mask) - 1;
+ val &= mask;
+ val >>= ffs(mask) - 1;
for (i = 0; i < 4; i++)
- if (val == axp20x_ea->p_time[i].idx)
- val = axp20x_ea->p_time[i].time;
+ if (val == time[i].idx)
+ val = time[i].time;
return sprintf(buf, "%u\n", val);
}
-static ssize_t axp20x_store_ext_attr(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t axp20x_show_attr_startup(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
+
+ return axp20x_show_attr(dev, axp20x_pek->info->startup_time,
+ axp20x_pek->info->startup_mask, buf);
+}
+
+static ssize_t axp20x_show_attr_shutdown(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
+
+ return axp20x_show_attr(dev, axp20x_pek->info->shutdown_time,
+ axp20x_pek->info->shutdown_mask, buf);
+}
+
+static ssize_t axp20x_store_attr(struct device *dev,
+ const struct axp20x_time *time,
+ unsigned int mask, const char *buf,
+ size_t count)
{
struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
- struct axp20x_pek_ext_attr *axp20x_ea = get_axp_ext_attr(attr);
char val_str[20];
size_t len;
int ret, i;
@@ -123,39 +152,52 @@ static ssize_t axp20x_store_ext_attr(struct device *dev,
for (i = 3; i >= 0; i--) {
unsigned int err;
- err = abs(axp20x_ea->p_time[i].time - val);
+ err = abs(time[i].time - val);
if (err < best_err) {
best_err = err;
- idx = axp20x_ea->p_time[i].idx;
+ idx = time[i].idx;
}
if (!err)
break;
}
- idx <<= ffs(axp20x_ea->mask) - 1;
- ret = regmap_update_bits(axp20x_pek->axp20x->regmap,
- AXP20X_PEK_KEY,
- axp20x_ea->mask, idx);
+ idx <<= ffs(mask) - 1;
+ ret = regmap_update_bits(axp20x_pek->axp20x->regmap, AXP20X_PEK_KEY,
+ mask, idx);
if (ret != 0)
return -EINVAL;
return count;
}
-static struct dev_ext_attribute axp20x_dev_attr_startup = {
- .attr = __ATTR(startup, 0644, axp20x_show_ext_attr, axp20x_store_ext_attr),
- .var = &axp20x_pek_startup_ext_attr,
-};
+static ssize_t axp20x_store_attr_startup(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
-static struct dev_ext_attribute axp20x_dev_attr_shutdown = {
- .attr = __ATTR(shutdown, 0644, axp20x_show_ext_attr, axp20x_store_ext_attr),
- .var = &axp20x_pek_shutdown_ext_attr,
-};
+ return axp20x_store_attr(dev, axp20x_pek->info->startup_time,
+ axp20x_pek->info->startup_mask, buf, count);
+}
+
+static ssize_t axp20x_store_attr_shutdown(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
+
+ return axp20x_store_attr(dev, axp20x_pek->info->shutdown_time,
+ axp20x_pek->info->shutdown_mask, buf, count);
+}
+
+DEVICE_ATTR(startup, 0644, axp20x_show_attr_startup, axp20x_store_attr_startup);
+DEVICE_ATTR(shutdown, 0644, axp20x_show_attr_shutdown,
+ axp20x_store_attr_shutdown);
static struct attribute *axp20x_attributes[] = {
- &axp20x_dev_attr_startup.attr.attr,
- &axp20x_dev_attr_shutdown.attr.attr,
+ &dev_attr_startup.attr,
+ &dev_attr_shutdown.attr,
NULL,
};
@@ -182,13 +224,6 @@ static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
return IRQ_HANDLED;
}
-static void axp20x_remove_sysfs_group(void *_data)
-{
- struct device *dev = _data;
-
- sysfs_remove_group(&dev->kobj, &axp20x_attribute_group);
-}
-
static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
struct platform_device *pdev)
{
@@ -298,8 +333,14 @@ static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek,
static int axp20x_pek_probe(struct platform_device *pdev)
{
struct axp20x_pek *axp20x_pek;
+ const struct platform_device_id *match = platform_get_device_id(pdev);
int error;
+ if (!match) {
+ dev_err(&pdev->dev, "Failed to get platform_device_id\n");
+ return -EINVAL;
+ }
+
axp20x_pek = devm_kzalloc(&pdev->dev, sizeof(struct axp20x_pek),
GFP_KERNEL);
if (!axp20x_pek)
@@ -313,18 +354,11 @@ static int axp20x_pek_probe(struct platform_device *pdev)
return error;
}
- error = sysfs_create_group(&pdev->dev.kobj, &axp20x_attribute_group);
- if (error) {
- dev_err(&pdev->dev, "Failed to create sysfs attributes: %d\n",
- error);
- return error;
- }
+ axp20x_pek->info = (struct axp20x_info *)match->driver_data;
- error = devm_add_action(&pdev->dev,
- axp20x_remove_sysfs_group, &pdev->dev);
+ error = devm_device_add_group(&pdev->dev, &axp20x_attribute_group);
if (error) {
- axp20x_remove_sysfs_group(&pdev->dev);
- dev_err(&pdev->dev, "Failed to add sysfs cleanup action: %d\n",
+ dev_err(&pdev->dev, "Failed to create sysfs attributes: %d\n",
error);
return error;
}
@@ -358,8 +392,21 @@ static const struct dev_pm_ops axp20x_pek_pm_ops = {
#endif
};
+static const struct platform_device_id axp_pek_id_match[] = {
+ {
+ .name = "axp20x-pek",
+ .driver_data = (kernel_ulong_t)&axp20x_info,
+ },
+ {
+ .name = "axp221-pek",
+ .driver_data = (kernel_ulong_t)&axp221_info,
+ },
+ { /* sentinel */ }
+};
+
static struct platform_driver axp20x_pek_driver = {
.probe = axp20x_pek_probe,
+ .id_table = axp_pek_id_match,
.driver = {
.name = "axp20x-pek",
.pm = &axp20x_pek_pm_ops,
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index bab256ef32b9..c803db64a376 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
-#include <linux/i2c/dm355evm_msp.h>
+#include <linux/mfd/dm355evm_msp.h>
#include <linux/module.h>
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index f4e8fbec6a94..6bf82ea8c918 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1261,7 +1261,7 @@ static umode_t ims_pcu_is_attr_visible(struct kobject *kobj,
return mode;
}
-static struct attribute_group ims_pcu_attr_group = {
+static const struct attribute_group ims_pcu_attr_group = {
.is_visible = ims_pcu_is_attr_visible,
.attrs = ims_pcu_attrs,
};
@@ -1480,7 +1480,7 @@ static struct attribute *ims_pcu_ofn_attrs[] = {
NULL
};
-static struct attribute_group ims_pcu_ofn_attr_group = {
+static const struct attribute_group ims_pcu_ofn_attr_group = {
.name = "ofn",
.attrs = ims_pcu_ofn_attrs,
};
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index a3fe4a990cc9..77c47d6325fe 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -85,7 +85,7 @@ static const unsigned short keyspan_key_table[] = {
};
/* table of devices that work with this driver */
-static struct usb_device_id keyspan_table[] = {
+static const struct usb_device_id keyspan_table[] = {
{ USB_DEVICE(USB_KEYSPAN_VENDOR_ID, USB_KEYSPAN_PRODUCT_UIA11) },
{ } /* Terminating entry */
};
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 72b1fc3ab910..56ddba21de84 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -18,25 +18,30 @@
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/timex.h>
-#include <asm/io.h>
+#include <linux/io.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("PC Speaker beeper driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pcspkr");
-static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
+static int pcspkr_event(struct input_dev *dev, unsigned int type,
+ unsigned int code, int value)
{
unsigned int count = 0;
unsigned long flags;
if (type != EV_SND)
- return -1;
+ return -EINVAL;
switch (code) {
- case SND_BELL: if (value) value = 1000;
- case SND_TONE: break;
- default: return -1;
+ case SND_BELL:
+ if (value)
+ value = 1000;
+ case SND_TONE:
+ break;
+ default:
+ return -EINVAL;
}
if (value > 20 && value < 32767)
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index 84909a12ff36..5c8c79623c87 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -432,7 +432,7 @@ static void powermate_disconnect(struct usb_interface *intf)
}
}
-static struct usb_device_id powermate_devices [] = {
+static const struct usb_device_id powermate_devices[] = {
{ USB_DEVICE(POWERMATE_VENDOR, POWERMATE_PRODUCT_NEW) },
{ USB_DEVICE(POWERMATE_VENDOR, POWERMATE_PRODUCT_OLD) },
{ USB_DEVICE(CONTOUR_VENDOR, CONTOUR_JOG) },
diff --git a/drivers/input/misc/rk805-pwrkey.c b/drivers/input/misc/rk805-pwrkey.c
new file mode 100644
index 000000000000..921003963a53
--- /dev/null
+++ b/drivers/input/misc/rk805-pwrkey.c
@@ -0,0 +1,111 @@
+/*
+ * Rockchip RK805 PMIC Power Key driver
+ *
+ * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Joseph Chen <chenjh@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+static irqreturn_t pwrkey_fall_irq(int irq, void *_pwr)
+{
+ struct input_dev *pwr = _pwr;
+
+ input_report_key(pwr, KEY_POWER, 1);
+ input_sync(pwr);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t pwrkey_rise_irq(int irq, void *_pwr)
+{
+ struct input_dev *pwr = _pwr;
+
+ input_report_key(pwr, KEY_POWER, 0);
+ input_sync(pwr);
+
+ return IRQ_HANDLED;
+}
+
+static int rk805_pwrkey_probe(struct platform_device *pdev)
+{
+ struct input_dev *pwr;
+ int fall_irq, rise_irq;
+ int err;
+
+ pwr = devm_input_allocate_device(&pdev->dev);
+ if (!pwr) {
+ dev_err(&pdev->dev, "Can't allocate power button\n");
+ return -ENOMEM;
+ }
+
+ pwr->name = "rk805 pwrkey";
+ pwr->phys = "rk805_pwrkey/input0";
+ pwr->id.bustype = BUS_HOST;
+ input_set_capability(pwr, EV_KEY, KEY_POWER);
+
+ fall_irq = platform_get_irq(pdev, 0);
+ if (fall_irq < 0) {
+ dev_err(&pdev->dev, "Can't get fall irq: %d\n", fall_irq);
+ return fall_irq;
+ }
+
+ rise_irq = platform_get_irq(pdev, 1);
+ if (rise_irq < 0) {
+ dev_err(&pdev->dev, "Can't get rise irq: %d\n", rise_irq);
+ return rise_irq;
+ }
+
+ err = devm_request_any_context_irq(&pwr->dev, fall_irq,
+ pwrkey_fall_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "rk805_pwrkey_fall", pwr);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Can't register fall irq: %d\n", err);
+ return err;
+ }
+
+ err = devm_request_any_context_irq(&pwr->dev, rise_irq,
+ pwrkey_rise_irq,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "rk805_pwrkey_rise", pwr);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Can't register rise irq: %d\n", err);
+ return err;
+ }
+
+ err = input_register_device(pwr);
+ if (err) {
+ dev_err(&pdev->dev, "Can't register power button: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, pwr);
+ device_init_wakeup(&pdev->dev, true);
+
+ return 0;
+}
+
+static struct platform_driver rk805_pwrkey_driver = {
+ .probe = rk805_pwrkey_probe,
+ .driver = {
+ .name = "rk805-pwrkey",
+ },
+};
+module_platform_driver(rk805_pwrkey_driver);
+
+MODULE_AUTHOR("Joseph Chen <chenjh@rock-chips.com>");
+MODULE_DESCRIPTION("RK805 PMIC Power Key driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index f600f3a7a3c6..23520df7650f 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -331,7 +331,7 @@ static int soc_button_probe(struct platform_device *pdev)
error = gpiod_count(dev, NULL);
if (error < 0) {
dev_dbg(dev, "no GPIO attached, ignoring...\n");
- return error;
+ return -ENODEV;
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index 1c13005b228f..b307cca17022 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -27,7 +27,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/i2c/twl.h>
+#include <linux/mfd/twl.h>
#define PWR_PWRON_IRQ (1 << 0)
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index caa5a62c42fb..6c51d404874b 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -28,7 +28,7 @@
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/workqueue.h>
-#include <linux/i2c/twl.h>
+#include <linux/mfd/twl.h>
#include <linux/mfd/twl4030-audio.h>
#include <linux/input.h>
#include <linux/slab.h>
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index fa130e7b734c..6bf56bb5f8d9 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -84,17 +84,20 @@ static void xenkbd_handle_key_event(struct xenkbd_info *info,
struct xenkbd_key *key)
{
struct input_dev *dev;
+ int value = key->pressed;
if (test_bit(key->keycode, info->ptr->keybit)) {
dev = info->ptr;
} else if (test_bit(key->keycode, info->kbd->keybit)) {
dev = info->kbd;
+ if (key->pressed && test_bit(key->keycode, info->kbd->key))
+ value = 2; /* Mark as autorepeat */
} else {
pr_warn("unhandled keycode 0x%x\n", key->keycode);
return;
}
- input_report_key(dev, key->keycode, key->pressed);
+ input_event(dev, EV_KEY, key->keycode, value);
input_sync(dev);
}
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 6e7ff9561d92..a1e0ff59d2f2 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -798,7 +798,7 @@ static struct attribute *yld_attributes[] = {
NULL
};
-static struct attribute_group yld_attr_group = {
+static const struct attribute_group yld_attr_group = {
.attrs = yld_attributes
};
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 262d1057c1da..850b00e3ad8e 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1215,14 +1215,24 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
case SS4_PACKET_ID_TWO:
if (priv->flags & ALPS_BUTTONPAD) {
- f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
+ if (IS_SS4PLUS_DEV(priv->dev_id)) {
+ f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
+ f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+ } else {
+ f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
+ f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
+ }
f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0);
- f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1);
} else {
- f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
+ if (IS_SS4PLUS_DEV(priv->dev_id)) {
+ f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
+ f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+ } else {
+ f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
+ f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
+ }
f->mt[0].y = SS4_STD_MF_Y_V2(p, 0);
- f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
f->mt[1].y = SS4_STD_MF_Y_V2(p, 1);
}
f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0;
@@ -1239,16 +1249,27 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
case SS4_PACKET_ID_MULTI:
if (priv->flags & ALPS_BUTTONPAD) {
- f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
+ if (IS_SS4PLUS_DEV(priv->dev_id)) {
+ f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
+ f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+ } else {
+ f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
+ f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
+ }
+
f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
- f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
no_data_x = SS4_MFPACKET_NO_AX_BL;
no_data_y = SS4_MFPACKET_NO_AY_BL;
} else {
- f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
+ if (IS_SS4PLUS_DEV(priv->dev_id)) {
+ f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
+ f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+ } else {
+ f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
+ f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
+ }
f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
- f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
no_data_x = SS4_MFPACKET_NO_AX;
no_data_y = SS4_MFPACKET_NO_AY;
@@ -2541,8 +2562,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
memset(otp, 0, sizeof(otp));
- if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) ||
- alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]))
+ if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) ||
+ alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]))
return -1;
alps_update_device_area_ss4_v2(otp, priv);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index ed2d6879fa52..c80a7c76cb76 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -100,6 +100,10 @@ enum SS4_PACKET_ID {
((_b[1 + _i * 3] << 5) & 0x1F00) \
)
+#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \
+ ((_b[1 + (_i) * 3] << 4) & 0x0F80) \
+ )
+
#define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \
((_b[2 + (_i) * 3] << 5) & 0x01E0) | \
((_b[2 + (_i) * 3] << 4) & 0x0E00) \
@@ -109,6 +113,10 @@ enum SS4_PACKET_ID {
((_b[0 + (_i) * 3] >> 3) & 0x0010) \
)
+#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) | \
+ ((_b[0 + (_i) * 3] >> 4) & 0x0008) \
+ )
+
#define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \
((_b[0 + (_i) * 3] >> 3) & 0x0008) \
)
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index ef234c9b2f2f..81a695d0b4e0 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -125,7 +125,7 @@ static const struct atp_info geyser4_info = {
* According to Info.plist Geyser IV is the same as Geyser III.)
*/
-static struct usb_device_id atp_table[] = {
+static const struct usb_device_id atp_table[] = {
/* PowerBooks Feb 2005, iBooks G4 */
ATP_DEVICE(0x020e, fountain_info), /* FOUNTAIN ANSI */
ATP_DEVICE(0x020f, fountain_info), /* FOUNTAIN ISO */
diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c
index b27aa637f877..b64b81599f7e 100644
--- a/drivers/input/mouse/byd.c
+++ b/drivers/input/mouse/byd.c
@@ -344,7 +344,7 @@ static int byd_reset_touchpad(struct psmouse *psmouse)
u8 param[4];
size_t i;
- const struct {
+ static const struct {
u16 command;
u8 arg;
} seq[] = {
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index 61c202436250..599544c1a91c 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -58,7 +58,7 @@ struct elan_transport_ops {
int (*get_version)(struct i2c_client *client, bool iap, u8 *version);
int (*get_sm_version)(struct i2c_client *client,
- u16 *ic_type, u8 *version);
+ u16 *ic_type, u8 *version, u8 *clickpad);
int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
int (*get_product_id)(struct i2c_client *client, u16 *id);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 3b616cb7c67f..0e761d079dc4 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -95,6 +95,7 @@ struct elan_tp_data {
u8 min_baseline;
u8 max_baseline;
bool baseline_ready;
+ u8 clickpad;
};
static int elan_get_fwinfo(u16 ic_type, u16 *validpage_count,
@@ -213,7 +214,7 @@ static int elan_query_product(struct elan_tp_data *data)
return error;
error = data->ops->get_sm_version(data->client, &data->ic_type,
- &data->sm_version);
+ &data->sm_version, &data->clickpad);
if (error)
return error;
@@ -923,6 +924,7 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
}
input_report_key(input, BTN_LEFT, tp_info & 0x01);
+ input_report_key(input, BTN_RIGHT, tp_info & 0x02);
input_report_abs(input, ABS_DISTANCE, hover_event != 0);
input_mt_report_pointer_emulation(input, true);
input_sync(input);
@@ -991,7 +993,10 @@ static int elan_setup_input_device(struct elan_tp_data *data)
__set_bit(EV_ABS, input->evbit);
__set_bit(INPUT_PROP_POINTER, input->propbit);
- __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ if (data->clickpad)
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ else
+ __set_bit(BTN_RIGHT, input->keybit);
__set_bit(BTN_LEFT, input->keybit);
/* Set up ST parameters */
@@ -1247,7 +1252,12 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
{ "ELAN0600", 0 },
+ { "ELAN0602", 0 },
+ { "ELAN0605", 0 },
+ { "ELAN0608", 0 },
{ "ELAN0605", 0 },
+ { "ELAN0609", 0 },
+ { "ELAN060B", 0 },
{ "ELAN1000", 0 },
{ }
};
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index 80172f25974d..15b1330606c1 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -288,7 +288,8 @@ static int elan_i2c_get_version(struct i2c_client *client,
}
static int elan_i2c_get_sm_version(struct i2c_client *client,
- u16 *ic_type, u8 *version)
+ u16 *ic_type, u8 *version,
+ u8 *clickpad)
{
int error;
u8 pattern_ver;
@@ -317,6 +318,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client,
return error;
}
*version = val[1];
+ *clickpad = val[0] & 0x10;
} else {
error = elan_i2c_read_cmd(client, ETP_I2C_OSM_VERSION_CMD, val);
if (error) {
@@ -326,6 +328,15 @@ static int elan_i2c_get_sm_version(struct i2c_client *client,
}
*version = val[0];
*ic_type = val[1];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_NSM_VERSION_CMD,
+ val);
+ if (error) {
+ dev_err(&client->dev, "failed to get SM version: %d\n",
+ error);
+ return error;
+ }
+ *clickpad = val[0] & 0x10;
}
return 0;
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index df7a57ca7331..29f99529b187 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -166,7 +166,8 @@ static int elan_smbus_get_version(struct i2c_client *client,
}
static int elan_smbus_get_sm_version(struct i2c_client *client,
- u16 *ic_type, u8 *version)
+ u16 *ic_type, u8 *version,
+ u8 *clickpad)
{
int error;
u8 val[3];
@@ -180,6 +181,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
*version = val[0];
*ic_type = val[1];
+ *clickpad = val[0] & 0x10;
return 0;
}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 791993215ea3..6428d6f4d568 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1377,7 +1377,7 @@ static struct attribute *elantech_attrs[] = {
NULL
};
-static struct attribute_group elantech_attr_group = {
+static const struct attribute_group elantech_attr_group = {
.attrs = elantech_attrs,
};
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index f73b47b8c578..6a5649e52eed 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -101,7 +101,7 @@ static struct attribute *psmouse_attributes[] = {
NULL
};
-static struct attribute_group psmouse_attribute_group = {
+static const struct attribute_group psmouse_attribute_group = {
.attrs = psmouse_attributes,
};
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 16c30460ef04..5af0b7d200bc 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -535,16 +535,17 @@ static void synaptics_apply_quirks(struct psmouse *psmouse,
}
}
+static bool synaptics_has_agm(struct synaptics_data *priv)
+{
+ return (SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) ||
+ SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c));
+}
+
static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
{
static u8 param = 0xc8;
- struct synaptics_data *priv = psmouse->private;
int error;
- if (!(SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) ||
- SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c)))
- return 0;
-
error = psmouse_sliced_command(psmouse, SYN_QUE_MODEL);
if (error)
return error;
@@ -553,9 +554,6 @@ static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
if (error)
return error;
- /* Advanced gesture mode also sends multi finger data */
- priv->info.capabilities |= BIT(1);
-
return 0;
}
@@ -578,7 +576,7 @@ static int synaptics_set_mode(struct psmouse *psmouse)
if (error)
return error;
- if (priv->absolute_mode) {
+ if (priv->absolute_mode && synaptics_has_agm(priv)) {
error = synaptics_set_advanced_gesture_mode(psmouse);
if (error) {
psmouse_err(psmouse,
@@ -766,9 +764,7 @@ static int synaptics_parse_hw_state(const u8 buf[],
((buf[0] & 0x04) >> 1) |
((buf[3] & 0x04) >> 2));
- if ((SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) ||
- SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c)) &&
- hw->w == 2) {
+ if (synaptics_has_agm(priv) && hw->w == 2) {
synaptics_parse_agm(buf, priv, hw);
return 1;
}
@@ -1033,6 +1029,15 @@ static void synaptics_image_sensor_process(struct psmouse *psmouse,
synaptics_report_mt_data(psmouse, sgm, num_fingers);
}
+static bool synaptics_has_multifinger(struct synaptics_data *priv)
+{
+ if (SYN_CAP_MULTIFINGER(priv->info.capabilities))
+ return true;
+
+ /* Advanced gesture mode also sends multi finger data */
+ return synaptics_has_agm(priv);
+}
+
/*
* called for each full received packet from the touchpad
*/
@@ -1079,7 +1084,7 @@ static void synaptics_process_packet(struct psmouse *psmouse)
if (SYN_CAP_EXTENDED(info->capabilities)) {
switch (hw.w) {
case 0 ... 1:
- if (SYN_CAP_MULTIFINGER(info->capabilities))
+ if (synaptics_has_multifinger(priv))
num_fingers = hw.w + 2;
break;
case 2:
@@ -1123,7 +1128,7 @@ static void synaptics_process_packet(struct psmouse *psmouse)
input_report_abs(dev, ABS_TOOL_WIDTH, finger_width);
input_report_key(dev, BTN_TOOL_FINGER, num_fingers == 1);
- if (SYN_CAP_MULTIFINGER(info->capabilities)) {
+ if (synaptics_has_multifinger(priv)) {
input_report_key(dev, BTN_TOOL_DOUBLETAP, num_fingers == 2);
input_report_key(dev, BTN_TOOL_TRIPLETAP, num_fingers == 3);
}
@@ -1283,7 +1288,7 @@ static void set_input_params(struct psmouse *psmouse,
__set_bit(BTN_TOUCH, dev->keybit);
__set_bit(BTN_TOOL_FINGER, dev->keybit);
- if (SYN_CAP_MULTIFINGER(info->capabilities)) {
+ if (synaptics_has_multifinger(priv)) {
__set_bit(BTN_TOOL_DOUBLETAP, dev->keybit);
__set_bit(BTN_TOOL_TRIPLETAP, dev->keybit);
}
diff --git a/drivers/input/mouse/synaptics_usb.c b/drivers/input/mouse/synaptics_usb.c
index 6bcc0189c1c9..cb7d15d826d0 100644
--- a/drivers/input/mouse/synaptics_usb.c
+++ b/drivers/input/mouse/synaptics_usb.c
@@ -525,7 +525,7 @@ static int synusb_reset_resume(struct usb_interface *intf)
return synusb_resume(intf);
}
-static struct usb_device_id synusb_idtable[] = {
+static const struct usb_device_id synusb_idtable[] = {
{ USB_DEVICE_SYNAPTICS(TP, SYNUSB_TOUCHPAD) },
{ USB_DEVICE_SYNAPTICS(INT_TP, SYNUSB_TOUCHPAD) },
{ USB_DEVICE_SYNAPTICS(CPAD,
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 922ea02edcc3..0871010f18d5 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir
if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
return -1;
- if (param[0] != TP_MAGIC_IDENT)
+ /* add new TP ID. */
+ if (!(param[0] & TP_MAGIC_IDENT))
return -1;
if (firmware_id)
@@ -380,8 +381,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
return 0;
if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) {
- psmouse_warn(psmouse, "failed to get extended button data\n");
- button_info = 0;
+ psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
+ button_info = 0x33;
}
psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 5617ed3a7d7a..88055755f82e 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -21,8 +21,9 @@
#define TP_COMMAND 0xE2 /* Commands start with this */
#define TP_READ_ID 0xE1 /* Sent for device identification */
-#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */
+#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
/* by the firmware ID */
+ /* Firmware ID includes 0x1, 0x2, 0x3 */
/*
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 0e0ff84088fd..2d7f691ec71c 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -15,6 +15,7 @@
#define MOUSEDEV_MINORS 31
#define MOUSEDEV_MIX 63
+#include <linux/bitops.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
@@ -103,7 +104,7 @@ struct mousedev_client {
spinlock_t packet_lock;
int pos_x, pos_y;
- signed char ps2[6];
+ u8 ps2[6];
unsigned char ready, buffer, bufsiz;
unsigned char imexseq, impsseq;
enum mousedev_emul mode;
@@ -291,11 +292,10 @@ static void mousedev_notify_readers(struct mousedev *mousedev,
}
client->pos_x += packet->dx;
- client->pos_x = client->pos_x < 0 ?
- 0 : (client->pos_x >= xres ? xres : client->pos_x);
+ client->pos_x = clamp_val(client->pos_x, 0, xres);
+
client->pos_y += packet->dy;
- client->pos_y = client->pos_y < 0 ?
- 0 : (client->pos_y >= yres ? yres : client->pos_y);
+ client->pos_y = clamp_val(client->pos_y, 0, yres);
p->dx += packet->dx;
p->dy += packet->dy;
@@ -571,44 +571,50 @@ static int mousedev_open(struct inode *inode, struct file *file)
return error;
}
-static inline int mousedev_limit_delta(int delta, int limit)
-{
- return delta > limit ? limit : (delta < -limit ? -limit : delta);
-}
-
-static void mousedev_packet(struct mousedev_client *client,
- signed char *ps2_data)
+static void mousedev_packet(struct mousedev_client *client, u8 *ps2_data)
{
struct mousedev_motion *p = &client->packets[client->tail];
+ s8 dx, dy, dz;
+
+ dx = clamp_val(p->dx, -127, 127);
+ p->dx -= dx;
+
+ dy = clamp_val(p->dy, -127, 127);
+ p->dy -= dy;
- ps2_data[0] = 0x08 |
- ((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07);
- ps2_data[1] = mousedev_limit_delta(p->dx, 127);
- ps2_data[2] = mousedev_limit_delta(p->dy, 127);
- p->dx -= ps2_data[1];
- p->dy -= ps2_data[2];
+ ps2_data[0] = BIT(3);
+ ps2_data[0] |= ((dx & BIT(7)) >> 3) | ((dy & BIT(7)) >> 2);
+ ps2_data[0] |= p->buttons & 0x07;
+ ps2_data[1] = dx;
+ ps2_data[2] = dy;
switch (client->mode) {
case MOUSEDEV_EMUL_EXPS:
- ps2_data[3] = mousedev_limit_delta(p->dz, 7);
- p->dz -= ps2_data[3];
- ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1);
+ dz = clamp_val(p->dz, -7, 7);
+ p->dz -= dz;
+
+ ps2_data[3] = (dz & 0x0f) | ((p->buttons & 0x18) << 1);
client->bufsiz = 4;
break;
case MOUSEDEV_EMUL_IMPS:
- ps2_data[0] |=
- ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
- ps2_data[3] = mousedev_limit_delta(p->dz, 127);
- p->dz -= ps2_data[3];
+ dz = clamp_val(p->dz, -127, 127);
+ p->dz -= dz;
+
+ ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
+ ((p->buttons & 0x08) >> 1);
+ ps2_data[3] = dz;
+
client->bufsiz = 4;
break;
case MOUSEDEV_EMUL_PS2:
default:
- ps2_data[0] |=
- ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
p->dz = 0;
+
+ ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
+ ((p->buttons & 0x08) >> 1);
+
client->bufsiz = 3;
break;
}
@@ -714,7 +720,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
- signed char data[sizeof(client->ps2)];
+ u8 data[sizeof(client->ps2)];
int retval = 0;
if (!client->ready && !client->buffer && mousedev->exist &&
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index 7f7e9176f7ea..ae966e333a2f 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -334,7 +334,7 @@ static struct attribute *rmi_f01_attrs[] = {
NULL
};
-static struct attribute_group rmi_f01_attr_group = {
+static const struct attribute_group rmi_f01_attr_group = {
.attrs = rmi_f01_attrs,
};
@@ -570,18 +570,14 @@ static int rmi_f01_probe(struct rmi_function *fn)
dev_set_drvdata(&fn->dev, f01);
- error = sysfs_create_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
+ error = devm_device_add_group(&fn->rmi_dev->dev, &rmi_f01_attr_group);
if (error)
- dev_warn(&fn->dev, "Failed to create sysfs group: %d\n", error);
+ dev_warn(&fn->dev,
+ "Failed to create attribute group: %d\n", error);
return 0;
}
-static void rmi_f01_remove(struct rmi_function *fn)
-{
- sysfs_remove_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group);
-}
-
static int rmi_f01_config(struct rmi_function *fn)
{
struct f01_data *f01 = dev_get_drvdata(&fn->dev);
@@ -721,7 +717,6 @@ struct rmi_function_handler rmi_f01_handler = {
},
.func = 0x01,
.probe = rmi_f01_probe,
- .remove = rmi_f01_remove,
.config = rmi_f01_config,
.attention = rmi_f01_attention,
.suspend = rmi_f01_suspend,
diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c
index b8ee78e0d61f..4cfe9703a8e7 100644
--- a/drivers/input/rmi4/rmi_f34.c
+++ b/drivers/input/rmi4/rmi_f34.c
@@ -516,7 +516,7 @@ static struct attribute *rmi_firmware_attrs[] = {
NULL
};
-static struct attribute_group rmi_firmware_attr_group = {
+static const struct attribute_group rmi_firmware_attr_group = {
.attrs = rmi_firmware_attrs,
};
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index c3d05b4d3118..21488c048fa3 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -292,6 +292,17 @@ config SERIO_SUN4I_PS2
To compile this driver as a module, choose M here: the
module will be called sun4i-ps2.
+config SERIO_GPIO_PS2
+ tristate "GPIO PS/2 bit banging driver"
+ depends on GPIOLIB
+ help
+ Say Y here if you want PS/2 bit banging support via GPIO.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ps2-gpio.
+
+ If you are unsure, say N.
+
config USERIO
tristate "User space serio port driver support"
help
diff --git a/drivers/input/serio/Makefile b/drivers/input/serio/Makefile
index 2374ef9b33d7..767bd9b6e1ed 100644
--- a/drivers/input/serio/Makefile
+++ b/drivers/input/serio/Makefile
@@ -30,4 +30,5 @@ obj-$(CONFIG_SERIO_APBPS2) += apbps2.o
obj-$(CONFIG_SERIO_OLPC_APSP) += olpc_apsp.o
obj-$(CONFIG_HYPERV_KEYBOARD) += hyperv-keyboard.o
obj-$(CONFIG_SERIO_SUN4I_PS2) += sun4i-ps2.o
+obj-$(CONFIG_SERIO_GPIO_PS2) += ps2-gpio.o
obj-$(CONFIG_USERIO) += userio.o
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index c6606cacb6a7..ff3875cf3da1 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -187,7 +187,7 @@ static int __maybe_unused amba_kmi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(amba_kmi_dev_pm_ops, NULL, amba_kmi_resume);
-static struct amba_id amba_kmi_idtable[] = {
+static const struct amba_id amba_kmi_idtable[] = {
{
.id = 0x00041050,
.mask = 0x000fffff,
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index ecba666afadb..aa9f29b875de 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -325,7 +325,7 @@ static void gscps2_close(struct serio *port)
* @return: success/error report
*/
-static int gscps2_probe(struct parisc_device *dev)
+static int __init gscps2_probe(struct parisc_device *dev)
{
struct gscps2port *ps2port;
struct serio *serio;
@@ -412,7 +412,7 @@ fail_nomem:
* @return: success/error report
*/
-static int gscps2_remove(struct parisc_device *dev)
+static int __exit gscps2_remove(struct parisc_device *dev)
{
struct gscps2port *ps2port = dev_get_drvdata(&dev->dev);
@@ -430,7 +430,7 @@ static int gscps2_remove(struct parisc_device *dev)
}
-static struct parisc_device_id gscps2_device_tbl[] = {
+static const struct parisc_device_id gscps2_device_tbl[] __initconst = {
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00084 }, /* LASI PS/2 */
#ifdef DINO_TESTED
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00096 }, /* DINO PS/2 */
@@ -439,11 +439,11 @@ static struct parisc_device_id gscps2_device_tbl[] = {
};
MODULE_DEVICE_TABLE(parisc, gscps2_device_tbl);
-static struct parisc_driver parisc_ps2_driver = {
+static struct parisc_driver parisc_ps2_driver __refdata = {
.name = "gsc_ps2",
.id_table = gscps2_device_tbl,
.probe = gscps2_probe,
- .remove = gscps2_remove,
+ .remove = __exit_p(gscps2_remove),
};
static int __init gscps2_init(void)
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 1bfdae4b0d99..8eef6849d066 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -805,7 +805,7 @@ static void hp_sdc_kicker(unsigned long data)
#if defined(__hppa__)
-static const struct parisc_device_id hp_sdc_tbl[] = {
+static const struct parisc_device_id hp_sdc_tbl[] __initconst = {
{
.hw_type = HPHW_FIO,
.hversion_rev = HVERSION_REV_ANY_ID,
@@ -820,7 +820,7 @@ MODULE_DEVICE_TABLE(parisc, hp_sdc_tbl);
static int __init hp_sdc_init_hppa(struct parisc_device *d);
static struct delayed_work moduleloader_work;
-static struct parisc_driver hp_sdc_driver = {
+static struct parisc_driver hp_sdc_driver __refdata = {
.name = "hp_sdc",
.id_table = hp_sdc_tbl,
.probe = hp_sdc_init_hppa,
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index f932a83b4990..ae81e57e13b9 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -927,7 +927,7 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id *
return 0;
}
-static struct pnp_device_id pnp_kbd_devids[] = {
+static const struct pnp_device_id pnp_kbd_devids[] = {
{ .id = "PNP0300", .driver_data = 0 },
{ .id = "PNP0301", .driver_data = 0 },
{ .id = "PNP0302", .driver_data = 0 },
@@ -957,7 +957,7 @@ static struct pnp_driver i8042_pnp_kbd_driver = {
},
};
-static struct pnp_device_id pnp_aux_devids[] = {
+static const struct pnp_device_id pnp_aux_devids[] = {
{ .id = "AUI0200", .driver_data = 0 },
{ .id = "FJC6000", .driver_data = 0 },
{ .id = "FJC6001", .driver_data = 0 },
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
new file mode 100644
index 000000000000..b50e3817f3c4
--- /dev/null
+++ b/drivers/input/serio/ps2-gpio.c
@@ -0,0 +1,453 @@
+/*
+ * GPIO based serio bus driver for bit banging the PS/2 protocol
+ *
+ * Author: Danilo Krummrich <danilokrummrich@dk-develop.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/preempt.h>
+#include <linux/property.h>
+#include <linux/of.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+
+#define DRIVER_NAME "ps2-gpio"
+
+#define PS2_MODE_RX 0
+#define PS2_MODE_TX 1
+
+#define PS2_START_BIT 0
+#define PS2_DATA_BIT0 1
+#define PS2_DATA_BIT1 2
+#define PS2_DATA_BIT2 3
+#define PS2_DATA_BIT3 4
+#define PS2_DATA_BIT4 5
+#define PS2_DATA_BIT5 6
+#define PS2_DATA_BIT6 7
+#define PS2_DATA_BIT7 8
+#define PS2_PARITY_BIT 9
+#define PS2_STOP_BIT 10
+#define PS2_TX_TIMEOUT 11
+#define PS2_ACK_BIT 12
+
+#define PS2_DEV_RET_ACK 0xfa
+#define PS2_DEV_RET_NACK 0xfe
+
+#define PS2_CMD_RESEND 0xfe
+
+struct ps2_gpio_data {
+ struct device *dev;
+ struct serio *serio;
+ unsigned char mode;
+ struct gpio_desc *gpio_clk;
+ struct gpio_desc *gpio_data;
+ bool write_enable;
+ int irq;
+ unsigned char rx_cnt;
+ unsigned char rx_byte;
+ unsigned char tx_cnt;
+ unsigned char tx_byte;
+ struct completion tx_done;
+ struct mutex tx_mutex;
+ struct delayed_work tx_work;
+};
+
+static int ps2_gpio_open(struct serio *serio)
+{
+ struct ps2_gpio_data *drvdata = serio->port_data;
+
+ enable_irq(drvdata->irq);
+ return 0;
+}
+
+static void ps2_gpio_close(struct serio *serio)
+{
+ struct ps2_gpio_data *drvdata = serio->port_data;
+
+ disable_irq(drvdata->irq);
+}
+
+static int __ps2_gpio_write(struct serio *serio, unsigned char val)
+{
+ struct ps2_gpio_data *drvdata = serio->port_data;
+
+ disable_irq_nosync(drvdata->irq);
+ gpiod_direction_output(drvdata->gpio_clk, 0);
+
+ drvdata->mode = PS2_MODE_TX;
+ drvdata->tx_byte = val;
+
+ schedule_delayed_work(&drvdata->tx_work, usecs_to_jiffies(200));
+
+ return 0;
+}
+
+static int ps2_gpio_write(struct serio *serio, unsigned char val)
+{
+ struct ps2_gpio_data *drvdata = serio->port_data;
+ int ret = 0;
+
+ if (in_task()) {
+ mutex_lock(&drvdata->tx_mutex);
+ __ps2_gpio_write(serio, val);
+ if (!wait_for_completion_timeout(&drvdata->tx_done,
+ msecs_to_jiffies(10000)))
+ ret = SERIO_TIMEOUT;
+ mutex_unlock(&drvdata->tx_mutex);
+ } else {
+ __ps2_gpio_write(serio, val);
+ }
+
+ return ret;
+}
+
+static void ps2_gpio_tx_work_fn(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct ps2_gpio_data *drvdata = container_of(dwork,
+ struct ps2_gpio_data,
+ tx_work);
+
+ enable_irq(drvdata->irq);
+ gpiod_direction_output(drvdata->gpio_data, 0);
+ gpiod_direction_input(drvdata->gpio_clk);
+}
+
+static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata)
+{
+ unsigned char byte, cnt;
+ int data;
+ int rxflags = 0;
+ static unsigned long old_jiffies;
+
+ byte = drvdata->rx_byte;
+ cnt = drvdata->rx_cnt;
+
+ if (old_jiffies == 0)
+ old_jiffies = jiffies;
+
+ if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) {
+ dev_err(drvdata->dev,
+ "RX: timeout, probably we missed an interrupt\n");
+ goto err;
+ }
+ old_jiffies = jiffies;
+
+ data = gpiod_get_value(drvdata->gpio_data);
+ if (unlikely(data < 0)) {
+ dev_err(drvdata->dev, "RX: failed to get data gpio val: %d\n",
+ data);
+ goto err;
+ }
+
+ switch (cnt) {
+ case PS2_START_BIT:
+ /* start bit should be low */
+ if (unlikely(data)) {
+ dev_err(drvdata->dev, "RX: start bit should be low\n");
+ goto err;
+ }
+ break;
+ case PS2_DATA_BIT0:
+ case PS2_DATA_BIT1:
+ case PS2_DATA_BIT2:
+ case PS2_DATA_BIT3:
+ case PS2_DATA_BIT4:
+ case PS2_DATA_BIT5:
+ case PS2_DATA_BIT6:
+ case PS2_DATA_BIT7:
+ /* processing data bits */
+ if (data)
+ byte |= (data << (cnt - 1));
+ break;
+ case PS2_PARITY_BIT:
+ /* check odd parity */
+ if (!((hweight8(byte) & 1) ^ data)) {
+ rxflags |= SERIO_PARITY;
+ dev_warn(drvdata->dev, "RX: parity error\n");
+ if (!drvdata->write_enable)
+ goto err;
+ }
+
+ /* Do not send spurious ACK's and NACK's when write fn is
+ * not provided.
+ */
+ if (!drvdata->write_enable) {
+ if (byte == PS2_DEV_RET_NACK)
+ goto err;
+ else if (byte == PS2_DEV_RET_ACK)
+ break;
+ }
+
+ /* Let's send the data without waiting for the stop bit to be
+ * sent. It may happen that we miss the stop bit. When this
+ * happens we have no way to recover from this, certainly
+ * missing the parity bit would be recognized when processing
+ * the stop bit. When missing both, data is lost.
+ */
+ serio_interrupt(drvdata->serio, byte, rxflags);
+ dev_dbg(drvdata->dev, "RX: sending byte 0x%x\n", byte);
+ break;
+ case PS2_STOP_BIT:
+ /* stop bit should be high */
+ if (unlikely(!data)) {
+ dev_err(drvdata->dev, "RX: stop bit should be high\n");
+ goto err;
+ }
+ cnt = byte = 0;
+ old_jiffies = 0;
+ goto end; /* success */
+ default:
+ dev_err(drvdata->dev, "RX: got out of sync with the device\n");
+ goto err;
+ }
+
+ cnt++;
+ goto end; /* success */
+
+err:
+ cnt = byte = 0;
+ old_jiffies = 0;
+ __ps2_gpio_write(drvdata->serio, PS2_CMD_RESEND);
+end:
+ drvdata->rx_cnt = cnt;
+ drvdata->rx_byte = byte;
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata)
+{
+ unsigned char byte, cnt;
+ int data;
+ static unsigned long old_jiffies;
+
+ cnt = drvdata->tx_cnt;
+ byte = drvdata->tx_byte;
+
+ if (old_jiffies == 0)
+ old_jiffies = jiffies;
+
+ if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) {
+ dev_err(drvdata->dev,
+ "TX: timeout, probably we missed an interrupt\n");
+ goto err;
+ }
+ old_jiffies = jiffies;
+
+ switch (cnt) {
+ case PS2_START_BIT:
+ /* should never happen */
+ dev_err(drvdata->dev,
+ "TX: start bit should have been sent already\n");
+ goto err;
+ case PS2_DATA_BIT0:
+ case PS2_DATA_BIT1:
+ case PS2_DATA_BIT2:
+ case PS2_DATA_BIT3:
+ case PS2_DATA_BIT4:
+ case PS2_DATA_BIT5:
+ case PS2_DATA_BIT6:
+ case PS2_DATA_BIT7:
+ data = byte & BIT(cnt - 1);
+ gpiod_set_value(drvdata->gpio_data, data);
+ break;
+ case PS2_PARITY_BIT:
+ /* do odd parity */
+ data = !(hweight8(byte) & 1);
+ gpiod_set_value(drvdata->gpio_data, data);
+ break;
+ case PS2_STOP_BIT:
+ /* release data line to generate stop bit */
+ gpiod_direction_input(drvdata->gpio_data);
+ break;
+ case PS2_TX_TIMEOUT:
+ /* Devices generate one extra clock pulse before sending the
+ * acknowledgment.
+ */
+ break;
+ case PS2_ACK_BIT:
+ gpiod_direction_input(drvdata->gpio_data);
+ data = gpiod_get_value(drvdata->gpio_data);
+ if (data) {
+ dev_warn(drvdata->dev, "TX: received NACK, retry\n");
+ goto err;
+ }
+
+ drvdata->mode = PS2_MODE_RX;
+ complete(&drvdata->tx_done);
+
+ cnt = 1;
+ old_jiffies = 0;
+ goto end; /* success */
+ default:
+ /* Probably we missed the stop bit. Therefore we release data
+ * line and try again.
+ */
+ gpiod_direction_input(drvdata->gpio_data);
+ dev_err(drvdata->dev, "TX: got out of sync with the device\n");
+ goto err;
+ }
+
+ cnt++;
+ goto end; /* success */
+
+err:
+ cnt = 1;
+ old_jiffies = 0;
+ gpiod_direction_input(drvdata->gpio_data);
+ __ps2_gpio_write(drvdata->serio, drvdata->tx_byte);
+end:
+ drvdata->tx_cnt = cnt;
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ps2_gpio_irq(int irq, void *dev_id)
+{
+ struct ps2_gpio_data *drvdata = dev_id;
+
+ return drvdata->mode ? ps2_gpio_irq_tx(drvdata) :
+ ps2_gpio_irq_rx(drvdata);
+}
+
+static int ps2_gpio_get_props(struct device *dev,
+ struct ps2_gpio_data *drvdata)
+{
+ drvdata->gpio_data = devm_gpiod_get(dev, "data", GPIOD_IN);
+ if (IS_ERR(drvdata->gpio_data)) {
+ dev_err(dev, "failed to request data gpio: %ld",
+ PTR_ERR(drvdata->gpio_data));
+ return PTR_ERR(drvdata->gpio_data);
+ }
+
+ drvdata->gpio_clk = devm_gpiod_get(dev, "clk", GPIOD_IN);
+ if (IS_ERR(drvdata->gpio_clk)) {
+ dev_err(dev, "failed to request clock gpio: %ld",
+ PTR_ERR(drvdata->gpio_clk));
+ return PTR_ERR(drvdata->gpio_clk);
+ }
+
+ drvdata->write_enable = device_property_read_bool(dev,
+ "write-enable");
+
+ return 0;
+}
+
+static int ps2_gpio_probe(struct platform_device *pdev)
+{
+ struct ps2_gpio_data *drvdata;
+ struct serio *serio;
+ struct device *dev = &pdev->dev;
+ int error;
+
+ drvdata = devm_kzalloc(dev, sizeof(struct ps2_gpio_data), GFP_KERNEL);
+ serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+ if (!drvdata || !serio) {
+ error = -ENOMEM;
+ goto err_free_serio;
+ }
+
+ error = ps2_gpio_get_props(dev, drvdata);
+ if (error)
+ goto err_free_serio;
+
+ if (gpiod_cansleep(drvdata->gpio_data) ||
+ gpiod_cansleep(drvdata->gpio_clk)) {
+ dev_err(dev, "GPIO data or clk are connected via slow bus\n");
+ error = -EINVAL;
+ }
+
+ drvdata->irq = platform_get_irq(pdev, 0);
+ if (drvdata->irq < 0) {
+ dev_err(dev, "failed to get irq from platform resource: %d\n",
+ drvdata->irq);
+ error = drvdata->irq;
+ goto err_free_serio;
+ }
+
+ error = devm_request_irq(dev, drvdata->irq, ps2_gpio_irq,
+ IRQF_NO_THREAD, DRIVER_NAME, drvdata);
+ if (error) {
+ dev_err(dev, "failed to request irq %d: %d\n",
+ drvdata->irq, error);
+ goto err_free_serio;
+ }
+
+ /* Keep irq disabled until serio->open is called. */
+ disable_irq(drvdata->irq);
+
+ serio->id.type = SERIO_8042;
+ serio->open = ps2_gpio_open;
+ serio->close = ps2_gpio_close;
+ /* Write can be enabled in platform/dt data, but possibly it will not
+ * work because of the tough timings.
+ */
+ serio->write = drvdata->write_enable ? ps2_gpio_write : NULL;
+ serio->port_data = drvdata;
+ serio->dev.parent = dev;
+ strlcpy(serio->name, dev_name(dev), sizeof(serio->name));
+ strlcpy(serio->phys, dev_name(dev), sizeof(serio->phys));
+
+ drvdata->serio = serio;
+ drvdata->dev = dev;
+ drvdata->mode = PS2_MODE_RX;
+
+ /* Tx count always starts at 1, as the start bit is sent implicitly by
+ * host-to-device communication initialization.
+ */
+ drvdata->tx_cnt = 1;
+
+ INIT_DELAYED_WORK(&drvdata->tx_work, ps2_gpio_tx_work_fn);
+ init_completion(&drvdata->tx_done);
+ mutex_init(&drvdata->tx_mutex);
+
+ serio_register_port(serio);
+ platform_set_drvdata(pdev, drvdata);
+
+ return 0; /* success */
+
+err_free_serio:
+ kfree(serio);
+ return error;
+}
+
+static int ps2_gpio_remove(struct platform_device *pdev)
+{
+ struct ps2_gpio_data *drvdata = platform_get_drvdata(pdev);
+
+ serio_unregister_port(drvdata->serio);
+ return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id ps2_gpio_match[] = {
+ { .compatible = "ps2-gpio", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ps2_gpio_match);
+#endif
+
+static struct platform_driver ps2_gpio_driver = {
+ .probe = ps2_gpio_probe,
+ .remove = ps2_gpio_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(ps2_gpio_match),
+ },
+};
+module_platform_driver(ps2_gpio_driver);
+
+MODULE_AUTHOR("Danilo Krummrich <danilokrummrich@dk-develop.de>");
+MODULE_DESCRIPTION("GPIO PS2 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 30d6230d48f7..24a90c8db5b3 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -469,7 +469,7 @@ static struct attribute *serio_device_id_attrs[] = {
NULL
};
-static struct attribute_group serio_id_attr_group = {
+static const struct attribute_group serio_id_attr_group = {
.name = "id",
.attrs = serio_device_id_attrs,
};
@@ -489,7 +489,7 @@ static struct attribute *serio_device_attrs[] = {
NULL
};
-static struct attribute_group serio_device_attr_group = {
+static const struct attribute_group serio_device_attr_group = {
.attrs = serio_device_attrs,
};
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 71ef5d65a0c6..516f9fe77a17 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -410,7 +410,7 @@ static void serio_raw_disconnect(struct serio *serio)
serio_set_drvdata(serio, NULL);
}
-static struct serio_device_id serio_raw_serio_ids[] = {
+static const struct serio_device_id serio_raw_serio_ids[] = {
{
.type = SERIO_8042,
.proto = SERIO_ANY,
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index 14c40892ed82..07de1b49293c 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -45,8 +45,10 @@
#define XPS2_STATUS_RX_FULL 0x00000001 /* Receive Full */
#define XPS2_STATUS_TX_FULL 0x00000002 /* Transmit Full */
-/* Bit definitions for ISR/IER registers. Both the registers have the same bit
- * definitions and are only defined once. */
+/*
+ * Bit definitions for ISR/IER registers. Both the registers have the same bit
+ * definitions and are only defined once.
+ */
#define XPS2_IPIXR_WDT_TOUT 0x00000001 /* Watchdog Timeout Interrupt */
#define XPS2_IPIXR_TX_NOACK 0x00000002 /* Transmit No ACK Interrupt */
#define XPS2_IPIXR_TX_ACK 0x00000004 /* Transmit ACK (Data) Interrupt */
@@ -292,8 +294,10 @@ static int xps2_of_probe(struct platform_device *ofdev)
/* Disable all the interrupts, just in case */
out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, 0);
- /* Reset the PS2 device and abort any current transaction, to make sure
- * we have the PS2 in a good state */
+ /*
+ * Reset the PS2 device and abort any current transaction,
+ * to make sure we have the PS2 in a good state.
+ */
out_be32(drvdata->base_address + XPS2_SRST_OFFSET, XPS2_SRST_RESET);
dev_info(dev, "Xilinx PS2 at 0x%08llX mapped to 0x%p, irq=%d\n",
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index e86e377a90f5..aebb3f9090cd 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -260,7 +260,7 @@ static void usb_acecad_disconnect(struct usb_interface *intf)
kfree(acecad);
}
-static struct usb_device_id usb_acecad_id_table [] = {
+static const struct usb_device_id usb_acecad_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_FLAIR), .driver_info = 0 },
{ USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_302), .driver_info = 1 },
{ }
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index d67547bded3e..0b55e1f375b3 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1676,7 +1676,7 @@ static struct attribute *aiptek_attributes[] = {
NULL
};
-static struct attribute_group aiptek_attribute_group = {
+static const struct attribute_group aiptek_attribute_group = {
.attrs = aiptek_attributes,
};
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 4d9d64908b59..a41c3ff7c9af 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -88,7 +88,7 @@ static void kbtab_irq(struct urb *urb)
__func__, retval);
}
-static struct usb_device_id kbtab_ids[] = {
+static const struct usb_device_id kbtab_ids[] = {
{ USB_DEVICE(USB_VENDOR_ID_KBGEAR, 0x1001), .driver_info = 0 },
{ }
};
diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c
index 20ab802461e7..38bfaca48eab 100644
--- a/drivers/input/tablet/wacom_serial4.c
+++ b/drivers/input/tablet/wacom_serial4.c
@@ -594,7 +594,7 @@ free_device:
return err;
}
-static struct serio_device_id wacom_serio_ids[] = {
+static const struct serio_device_id wacom_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_WACOM_IV,
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 735a0be1ad95..a2f45aefce08 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -499,7 +499,7 @@ static struct attribute *ads7846_attributes[] = {
NULL,
};
-static struct attribute_group ads7846_attr_group = {
+static const struct attribute_group ads7846_attr_group = {
.attrs = ads7846_attributes,
.is_visible = ads7846_is_visible,
};
@@ -599,7 +599,7 @@ static struct attribute *ads784x_attributes[] = {
NULL,
};
-static struct attribute_group ads784x_attr_group = {
+static const struct attribute_group ads784x_attr_group = {
.attrs = ads784x_attributes,
};
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index dd042a9b0aaa..7659bc48f1db 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -28,6 +28,7 @@
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include <asm/unaligned.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -300,6 +301,7 @@ struct mxt_data {
u8 multitouch;
struct t7_config t7_cfg;
struct mxt_dbg dbg;
+ struct gpio_desc *reset_gpio;
/* Cached parameters from object table */
u16 T5_address;
@@ -3117,11 +3119,9 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- data = kzalloc(sizeof(struct mxt_data), GFP_KERNEL);
- if (!data) {
- dev_err(&client->dev, "Failed to allocate memory\n");
+ data = devm_kzalloc(&client->dev, sizeof(struct mxt_data), GFP_KERNEL);
+ if (!data)
return -ENOMEM;
- }
snprintf(data->phys, sizeof(data->phys), "i2c-%u-%04x/input0",
client->adapter->nr, client->addr);
@@ -3135,19 +3135,40 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
init_completion(&data->reset_completion);
init_completion(&data->crc_completion);
- error = request_threaded_irq(client->irq, NULL, mxt_interrupt,
- pdata->irqflags | IRQF_ONESHOT,
- client->name, data);
+ data->reset_gpio = devm_gpiod_get_optional(&client->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(data->reset_gpio)) {
+ error = PTR_ERR(data->reset_gpio);
+ dev_err(&client->dev, "Failed to get reset gpio: %d\n", error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, mxt_interrupt,
+ pdata->irqflags | IRQF_ONESHOT,
+ client->name, data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
- goto err_free_mem;
+ return error;
+ }
+
+ if (data->reset_gpio) {
+ data->in_bootloader = true;
+ msleep(MXT_RESET_TIME);
+ reinit_completion(&data->bl_completion);
+ gpiod_set_value(data->reset_gpio, 1);
+ error = mxt_wait_for_completion(data, &data->bl_completion,
+ MXT_RESET_TIMEOUT);
+ if (error)
+ return error;
+ data->in_bootloader = false;
}
disable_irq(client->irq);
error = mxt_initialize(data);
if (error)
- goto err_free_irq;
+ return error;
error = sysfs_create_group(&client->dev.kobj, &mxt_attr_group);
if (error) {
@@ -3161,10 +3182,6 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
err_free_object:
mxt_free_input_device(data);
mxt_free_object_table(data);
-err_free_irq:
- free_irq(client->irq, data);
-err_free_mem:
- kfree(data);
return error;
}
@@ -3172,11 +3189,10 @@ static int mxt_remove(struct i2c_client *client)
{
struct mxt_data *data = i2c_get_clientdata(client);
+ disable_irq(data->irq);
sysfs_remove_group(&client->dev.kobj, &mxt_attr_group);
- free_irq(data->irq, data);
mxt_free_input_device(data);
mxt_free_object_table(data);
- kfree(data);
return 0;
}
diff --git a/drivers/input/touchscreen/dynapro.c b/drivers/input/touchscreen/dynapro.c
index 86237a910876..5b1b66fffbe3 100644
--- a/drivers/input/touchscreen/dynapro.c
+++ b/drivers/input/touchscreen/dynapro.c
@@ -164,7 +164,7 @@ static int dynapro_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id dynapro_serio_ids[] = {
+static const struct serio_device_id dynapro_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_DYNAPRO,
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 872750eeca93..0f4cda7282a2 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1066,7 +1066,7 @@ static struct attribute *elants_attributes[] = {
NULL
};
-static struct attribute_group elants_attribute_group = {
+static const struct attribute_group elants_attribute_group = {
.attrs = elants_attributes,
};
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
index 8051a4b704ea..83433e8efff7 100644
--- a/drivers/input/touchscreen/elo.c
+++ b/drivers/input/touchscreen/elo.c
@@ -381,7 +381,7 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id elo_serio_ids[] = {
+static const struct serio_device_id elo_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_ELO,
diff --git a/drivers/input/touchscreen/fujitsu_ts.c b/drivers/input/touchscreen/fujitsu_ts.c
index d0e46a7e183b..a0fbb454499d 100644
--- a/drivers/input/touchscreen/fujitsu_ts.c
+++ b/drivers/input/touchscreen/fujitsu_ts.c
@@ -151,7 +151,7 @@ static int fujitsu_connect(struct serio *serio, struct serio_driver *drv)
/*
* The serio driver structure.
*/
-static struct serio_device_id fujitsu_serio_ids[] = {
+static const struct serio_device_id fujitsu_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_FUJITSU,
diff --git a/drivers/input/touchscreen/gunze.c b/drivers/input/touchscreen/gunze.c
index e2ee62615273..481586909d28 100644
--- a/drivers/input/touchscreen/gunze.c
+++ b/drivers/input/touchscreen/gunze.c
@@ -162,7 +162,7 @@ static int gunze_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id gunze_serio_ids[] = {
+static const struct serio_device_id gunze_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_GUNZE,
diff --git a/drivers/input/touchscreen/hampshire.c b/drivers/input/touchscreen/hampshire.c
index ecb1e0e01328..eb052d559e54 100644
--- a/drivers/input/touchscreen/hampshire.c
+++ b/drivers/input/touchscreen/hampshire.c
@@ -163,7 +163,7 @@ static int hampshire_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id hampshire_serio_ids[] = {
+static const struct serio_device_id hampshire_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_HAMPSHIRE,
diff --git a/drivers/input/touchscreen/inexio.c b/drivers/input/touchscreen/inexio.c
index adb80b65a259..b9bc56233ccc 100644
--- a/drivers/input/touchscreen/inexio.c
+++ b/drivers/input/touchscreen/inexio.c
@@ -165,7 +165,7 @@ static int inexio_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id inexio_serio_ids[] = {
+static const struct serio_device_id inexio_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_INEXIO,
diff --git a/drivers/input/touchscreen/mtouch.c b/drivers/input/touchscreen/mtouch.c
index 9b5552a26169..a3707fad4d1c 100644
--- a/drivers/input/touchscreen/mtouch.c
+++ b/drivers/input/touchscreen/mtouch.c
@@ -178,7 +178,7 @@ static int mtouch_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id mtouch_serio_ids[] = {
+static const struct serio_device_id mtouch_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_MICROTOUCH,
diff --git a/drivers/input/touchscreen/mxs-lradc-ts.c b/drivers/input/touchscreen/mxs-lradc-ts.c
index 58c016cd6809..3707e927f770 100644
--- a/drivers/input/touchscreen/mxs-lradc-ts.c
+++ b/drivers/input/touchscreen/mxs-lradc-ts.c
@@ -30,7 +30,7 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
-const char *mxs_lradc_ts_irq_names[] = {
+static const char * const mxs_lradc_ts_irq_names[] = {
"mxs-lradc-touchscreen",
"mxs-lradc-channel6",
"mxs-lradc-channel7",
@@ -630,9 +630,11 @@ static int mxs_lradc_ts_probe(struct platform_device *pdev)
spin_lock_init(&ts->lock);
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -EINVAL;
ts->base = devm_ioremap(dev, iores->start, resource_size(iores));
- if (IS_ERR(ts->base))
- return PTR_ERR(ts->base);
+ if (!ts->base)
+ return -ENOMEM;
ret = of_property_read_u32(node, "fsl,lradc-touchscreen-wires",
&ts_wires);
diff --git a/drivers/input/touchscreen/penmount.c b/drivers/input/touchscreen/penmount.c
index 417d87379265..6e6d7fd98cd2 100644
--- a/drivers/input/touchscreen/penmount.c
+++ b/drivers/input/touchscreen/penmount.c
@@ -293,7 +293,7 @@ static int pm_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id pm_serio_ids[] = {
+static const struct serio_device_id pm_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_PENMOUNT,
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index 1252e49ccfa1..4f1d3fd5d412 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -939,7 +939,7 @@ static struct attribute *raydium_i2c_attributes[] = {
NULL
};
-static struct attribute_group raydium_i2c_attribute_group = {
+static const struct attribute_group raydium_i2c_attribute_group = {
.attrs = raydium_i2c_attributes,
};
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index d07dd29d4848..d2e14d9e5975 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -206,7 +206,7 @@ static int sun4i_get_tz_temp(void *data, int *temp)
return sun4i_get_temp(data, temp);
}
-static struct thermal_zone_of_device_ops sun4i_ts_tz_ops = {
+static const struct thermal_zone_of_device_ops sun4i_ts_tz_ops = {
.get_temp = sun4i_get_tz_temp,
};
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 128e5bd74720..f16f8358c70a 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -59,7 +59,7 @@ struct sur40_blob {
__le16 blob_id;
u8 action; /* 0x02 = enter/exit, 0x03 = update (?) */
- u8 unknown; /* always 0x01 or 0x02 (no idea what this is?) */
+ u8 type; /* bitmask (0x01 blob, 0x02 touch, 0x04 tag) */
__le16 bb_pos_x; /* upper left corner of bounding box */
__le16 bb_pos_y;
@@ -133,12 +133,19 @@ struct sur40_image_header {
/* control commands */
#define SUR40_GET_VERSION 0xb0 /* 12 bytes string */
-#define SUR40_UNKNOWN1 0xb3 /* 5 bytes */
-#define SUR40_UNKNOWN2 0xc1 /* 24 bytes */
+#define SUR40_ACCEL_CAPS 0xb3 /* 5 bytes */
+#define SUR40_SENSOR_CAPS 0xc1 /* 24 bytes */
+
+#define SUR40_POKE 0xc5 /* poke register byte */
+#define SUR40_PEEK 0xc4 /* 48 bytes registers */
#define SUR40_GET_STATE 0xc5 /* 4 bytes state (?) */
#define SUR40_GET_SENSORS 0xb1 /* 8 bytes sensors */
+#define SUR40_BLOB 0x01
+#define SUR40_TOUCH 0x02
+#define SUR40_TAG 0x04
+
static const struct v4l2_pix_format sur40_pix_format[] = {
{
.pixelformat = V4L2_TCH_FMT_TU08,
@@ -238,11 +245,11 @@ static int sur40_init(struct sur40_state *dev)
if (result < 0)
goto error;
- result = sur40_command(dev, SUR40_UNKNOWN2, 0x00, buffer, 24);
+ result = sur40_command(dev, SUR40_SENSOR_CAPS, 0x00, buffer, 24);
if (result < 0)
goto error;
- result = sur40_command(dev, SUR40_UNKNOWN1, 0x00, buffer, 5);
+ result = sur40_command(dev, SUR40_ACCEL_CAPS, 0x00, buffer, 5);
if (result < 0)
goto error;
@@ -289,20 +296,24 @@ static void sur40_close(struct input_polled_dev *polldev)
static void sur40_report_blob(struct sur40_blob *blob, struct input_dev *input)
{
int wide, major, minor;
+ int bb_size_x, bb_size_y, pos_x, pos_y, ctr_x, ctr_y, slotnum;
- int bb_size_x = le16_to_cpu(blob->bb_size_x);
- int bb_size_y = le16_to_cpu(blob->bb_size_y);
-
- int pos_x = le16_to_cpu(blob->pos_x);
- int pos_y = le16_to_cpu(blob->pos_y);
-
- int ctr_x = le16_to_cpu(blob->ctr_x);
- int ctr_y = le16_to_cpu(blob->ctr_y);
+ if (blob->type != SUR40_TOUCH)
+ return;
- int slotnum = input_mt_get_slot_by_key(input, blob->blob_id);
+ slotnum = input_mt_get_slot_by_key(input, blob->blob_id);
if (slotnum < 0 || slotnum >= MAX_CONTACTS)
return;
+ bb_size_x = le16_to_cpu(blob->bb_size_x);
+ bb_size_y = le16_to_cpu(blob->bb_size_y);
+
+ pos_x = le16_to_cpu(blob->pos_x);
+ pos_y = le16_to_cpu(blob->pos_y);
+
+ ctr_x = le16_to_cpu(blob->ctr_x);
+ ctr_y = le16_to_cpu(blob->ctr_y);
+
input_mt_slot(input, slotnum);
input_mt_report_slot_state(input, MT_TOOL_FINGER, 1);
wide = (bb_size_x > bb_size_y);
@@ -367,10 +378,13 @@ static void sur40_poll(struct input_polled_dev *polldev)
/*
* Sanity check. when video data is also being retrieved, the
* packet ID will usually increase in the middle of a series
- * instead of at the end.
- */
+ * instead of at the end. However, the data is still consistent,
+ * so the packet ID is probably just valid for the first packet
+ * in a series.
+
if (packet_id != le32_to_cpu(header->packet_id))
dev_dbg(sur40->dev, "packet ID mismatch\n");
+ */
packet_blobs = result / sizeof(struct sur40_blob);
dev_dbg(sur40->dev, "received %d blobs\n", packet_blobs);
diff --git a/drivers/input/touchscreen/touchit213.c b/drivers/input/touchscreen/touchit213.c
index c27cf8f3d1ca..98a16698be8e 100644
--- a/drivers/input/touchscreen/touchit213.c
+++ b/drivers/input/touchscreen/touchit213.c
@@ -192,7 +192,7 @@ static int touchit213_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id touchit213_serio_ids[] = {
+static const struct serio_device_id touchit213_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_TOUCHIT213,
diff --git a/drivers/input/touchscreen/touchright.c b/drivers/input/touchscreen/touchright.c
index 4000e5205407..45c325c33f21 100644
--- a/drivers/input/touchscreen/touchright.c
+++ b/drivers/input/touchscreen/touchright.c
@@ -152,7 +152,7 @@ static int tr_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id tr_serio_ids[] = {
+static const struct serio_device_id tr_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_TOUCHRIGHT,
diff --git a/drivers/input/touchscreen/touchwin.c b/drivers/input/touchscreen/touchwin.c
index ba90f447df75..2ba6b4ca28cb 100644
--- a/drivers/input/touchscreen/touchwin.c
+++ b/drivers/input/touchscreen/touchwin.c
@@ -159,7 +159,7 @@ static int tw_connect(struct serio *serio, struct serio_driver *drv)
* The serio driver structure.
*/
-static struct serio_device_id tw_serio_ids[] = {
+static const struct serio_device_id tw_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_TOUCHWIN,
diff --git a/drivers/input/touchscreen/tsc40.c b/drivers/input/touchscreen/tsc40.c
index 29687872cb94..d4ae4ba84c1f 100644
--- a/drivers/input/touchscreen/tsc40.c
+++ b/drivers/input/touchscreen/tsc40.c
@@ -141,7 +141,7 @@ static void tsc_disconnect(struct serio *serio)
serio_set_drvdata(serio, NULL);
}
-static struct serio_device_id tsc_serio_ids[] = {
+static const struct serio_device_id tsc_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_TSC40,
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 85e95725d0df..3715d1eace92 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -681,7 +681,7 @@ fail1:
return err;
}
-static struct serio_device_id w8001_serio_ids[] = {
+static const struct serio_device_id w8001_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_W8001,
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 688e77576e5a..4ad7e5e31943 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -575,7 +575,7 @@ static void dump_dte_entry(u16 devid)
static void dump_command(unsigned long phys_addr)
{
- struct iommu_cmd *cmd = phys_to_virt(phys_addr);
+ struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
int i;
for (i = 0; i < 4; ++i)
@@ -919,11 +919,13 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
{
+ u64 paddr = iommu_virt_to_phys((void *)address);
+
WARN_ON(address & 0x7ULL);
memset(cmd, 0, sizeof(*cmd));
- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
- cmd->data[1] = upper_32_bits(__pa(address));
+ cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
+ cmd->data[1] = upper_32_bits(paddr);
cmd->data[2] = 1;
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
}
@@ -1383,7 +1385,7 @@ static bool increase_address_space(struct protection_domain *domain,
return false;
*pte = PM_LEVEL_PDE(domain->mode,
- virt_to_phys(domain->pt_root));
+ iommu_virt_to_phys(domain->pt_root));
domain->pt_root = pte;
domain->mode += 1;
domain->updated = true;
@@ -1420,7 +1422,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
if (!page)
return NULL;
- __npte = PM_LEVEL_PDE(level, virt_to_phys(page));
+ __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
/* pte could have been changed somewhere. */
if (cmpxchg64(pte, __pte, __npte) != __pte) {
@@ -1536,10 +1538,10 @@ static int iommu_map_page(struct protection_domain *dom,
return -EBUSY;
if (count > 1) {
- __pte = PAGE_SIZE_PTE(phys_addr, page_size);
+ __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
} else
- __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
+ __pte = __sme_set(phys_addr) | IOMMU_PTE_P | IOMMU_PTE_FC;
if (prot & IOMMU_PROT_IR)
__pte |= IOMMU_PTE_IR;
@@ -1755,7 +1757,7 @@ static void free_gcr3_tbl_level1(u64 *tbl)
if (!(tbl[i] & GCR3_VALID))
continue;
- ptr = __va(tbl[i] & PAGE_MASK);
+ ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
free_page((unsigned long)ptr);
}
@@ -1770,7 +1772,7 @@ static void free_gcr3_tbl_level2(u64 *tbl)
if (!(tbl[i] & GCR3_VALID))
continue;
- ptr = __va(tbl[i] & PAGE_MASK);
+ ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
free_gcr3_tbl_level1(ptr);
}
@@ -2049,7 +2051,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
u64 flags = 0;
if (domain->mode != PAGE_MODE_NONE)
- pte_root = virt_to_phys(domain->pt_root);
+ pte_root = iommu_virt_to_phys(domain->pt_root);
pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
@@ -2061,7 +2063,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
flags |= DTE_FLAG_IOTLB;
if (domain->flags & PD_IOMMUV2_MASK) {
- u64 gcr3 = __pa(domain->gcr3_tbl);
+ u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
u64 glx = domain->glx;
u64 tmp;
@@ -3606,10 +3608,10 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
if (root == NULL)
return NULL;
- *pte = __pa(root) | GCR3_VALID;
+ *pte = iommu_virt_to_phys(root) | GCR3_VALID;
}
- root = __va(*pte & PAGE_MASK);
+ root = iommu_phys_to_virt(*pte & PAGE_MASK);
level -= 1;
}
@@ -3788,7 +3790,7 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
dte = amd_iommu_dev_table[devid].data[2];
dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
- dte |= virt_to_phys(table->table);
+ dte |= iommu_virt_to_phys(table->table);
dte |= DTE_IRQ_REMAP_INTCTL;
dte |= DTE_IRQ_TABLE_LEN;
dte |= DTE_IRQ_REMAP_ENABLE;
@@ -4452,6 +4454,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
/* Setting */
irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
irte->hi.fields.vector = vcpu_pi_info->vector;
+ irte->lo.fields_vapic.ga_log_intr = 1;
irte->lo.fields_vapic.guest_mode = 1;
irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 5cc597b383c7..2292a6cece76 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -30,6 +30,7 @@
#include <linux/iommu.h>
#include <linux/kmemleak.h>
#include <linux/crash_dump.h>
+#include <linux/mem_encrypt.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/gart.h>
@@ -348,7 +349,7 @@ static void iommu_set_device_table(struct amd_iommu *iommu)
BUG_ON(iommu->mmio_base == NULL);
- entry = virt_to_phys(amd_iommu_dev_table);
+ entry = iommu_virt_to_phys(amd_iommu_dev_table);
entry |= (dev_table_size >> 12) - 1;
memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
&entry, sizeof(entry));
@@ -606,7 +607,7 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
BUG_ON(iommu->cmd_buf == NULL);
- entry = (u64)virt_to_phys(iommu->cmd_buf);
+ entry = iommu_virt_to_phys(iommu->cmd_buf);
entry |= MMIO_CMD_SIZE_512;
memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
@@ -635,7 +636,7 @@ static void iommu_enable_event_buffer(struct amd_iommu *iommu)
BUG_ON(iommu->evt_buf == NULL);
- entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
+ entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
&entry, sizeof(entry));
@@ -668,7 +669,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
if (iommu->ppr_log == NULL)
return;
- entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
+ entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
&entry, sizeof(entry));
@@ -748,10 +749,10 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
if (!iommu->ga_log_tail)
goto err_out;
- entry = (u64)virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
+ entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
&entry, sizeof(entry));
- entry = ((u64)virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
+ entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
&entry, sizeof(entry));
writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
@@ -2440,11 +2441,11 @@ static int __init state_next(void)
break;
case IOMMU_ACPI_FINISHED:
early_enable_iommus();
- register_syscore_ops(&amd_iommu_syscore_ops);
x86_platform.iommu_shutdown = disable_iommus;
init_state = IOMMU_ENABLED;
break;
case IOMMU_ENABLED:
+ register_syscore_ops(&amd_iommu_syscore_ops);
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
enable_iommus_v2();
@@ -2564,6 +2565,24 @@ static int __init amd_iommu_init(void)
return ret;
}
+static bool amd_iommu_sme_check(void)
+{
+ if (!sme_active() || (boot_cpu_data.x86 != 0x17))
+ return true;
+
+ /* For Fam17h, a specific level of support is required */
+ if (boot_cpu_data.microcode >= 0x08001205)
+ return true;
+
+ if ((boot_cpu_data.microcode >= 0x08001126) &&
+ (boot_cpu_data.microcode <= 0x080011ff))
+ return true;
+
+ pr_notice("AMD-Vi: IOMMU not currently supported when SME is active\n");
+
+ return false;
+}
+
/****************************************************************************
*
* Early detect code. This code runs at IOMMU detection time in the DMA
@@ -2578,6 +2597,9 @@ int __init amd_iommu_detect(void)
if (no_iommu || (iommu_detected && !gart_iommu_aperture))
return -ENODEV;
+ if (!amd_iommu_sme_check())
+ return -ENODEV;
+
ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
if (ret)
return ret;
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 466260f8a1df..3f12fb2338ea 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -87,4 +87,14 @@ static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
return !!(iommu->features & f);
}
+static inline u64 iommu_virt_to_phys(void *vaddr)
+{
+ return (u64)__sme_set(virt_to_phys(vaddr));
+}
+
+static inline void *iommu_phys_to_virt(unsigned long paddr)
+{
+ return phys_to_virt(__sme_clr(paddr));
+}
+
#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 294a409e283b..8e3a85759242 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -344,7 +344,7 @@
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
-#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
+#define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK))
#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
#define IOMMU_PROT_MASK 0x03
@@ -574,7 +574,9 @@ struct amd_iommu {
static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
{
- return container_of(dev, struct amd_iommu, iommu.dev);
+ struct iommu_device *iommu = dev_to_iommu_device(dev);
+
+ return container_of(iommu, struct amd_iommu, iommu);
}
#define ACPIHID_UID_LEN 256
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 6629c472eafd..dccf5b76eff2 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -391,13 +391,6 @@ static int mn_clear_flush_young(struct mmu_notifier *mn,
return 0;
}
-static void mn_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- __mn_flush_page(mn, address);
-}
-
static void mn_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end)
@@ -436,7 +429,6 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops iommu_mn = {
.release = mn_release,
.clear_flush_young = mn_clear_flush_young,
- .invalidate_page = mn_invalidate_page,
.invalidate_range = mn_invalidate_range,
};
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index bc89b4d6c043..2d80fa8a0634 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -400,6 +400,8 @@ struct arm_smmu_device {
u32 cavium_id_base; /* Specific to Cavium */
+ spinlock_t global_sync_lock;
+
/* IOMMU core code handle */
struct iommu_device iommu;
};
@@ -436,7 +438,7 @@ struct arm_smmu_domain {
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
struct mutex init_mutex; /* Protects smmu pointer */
- spinlock_t cb_lock; /* Serialises ATS1* ops */
+ spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
struct iommu_domain domain;
};
@@ -602,9 +604,12 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
{
void __iomem *base = ARM_SMMU_GR0(smmu);
+ unsigned long flags;
+ spin_lock_irqsave(&smmu->global_sync_lock, flags);
__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
base + ARM_SMMU_GR0_sTLBGSTATUS);
+ spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
}
static void arm_smmu_tlb_sync_context(void *cookie)
@@ -612,9 +617,12 @@ static void arm_smmu_tlb_sync_context(void *cookie)
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
+ unsigned long flags;
+ spin_lock_irqsave(&smmu_domain->cb_lock, flags);
__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
base + ARM_SMMU_CB_TLBSTATUS);
+ spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
}
static void arm_smmu_tlb_sync_vmid(void *cookie)
@@ -1511,6 +1519,12 @@ static int arm_smmu_add_device(struct device *dev)
if (using_legacy_binding) {
ret = arm_smmu_register_legacy_master(dev, &smmu);
+
+ /*
+ * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
+ * will allocate/initialise a new one. Thus we need to update fwspec for
+ * later use.
+ */
fwspec = dev->iommu_fwspec;
if (ret)
goto out_free;
@@ -1550,15 +1564,15 @@ static int arm_smmu_add_device(struct device *dev)
ret = arm_smmu_master_alloc_smes(dev);
if (ret)
- goto out_free;
+ goto out_cfg_free;
iommu_device_link(&smmu->iommu, dev);
return 0;
+out_cfg_free:
+ kfree(cfg);
out_free:
- if (fwspec)
- kfree(fwspec->iommu_priv);
iommu_fwspec_free(dev);
return ret;
}
@@ -1925,6 +1939,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
smmu->num_mapping_groups = size;
mutex_init(&smmu->stream_map_mutex);
+ spin_lock_init(&smmu->global_sync_lock);
if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 687f18f65cea..3e8636f1220e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4736,7 +4736,9 @@ static void intel_disable_iommus(void)
static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{
- return container_of(dev, struct intel_iommu, iommu.dev);
+ struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
+
+ return container_of(iommu_dev, struct intel_iommu, iommu);
}
static ssize_t intel_iommu_show_version(struct device *dev,
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index f167c0d84ebf..f620dccec8ee 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -223,14 +223,6 @@ static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm,
intel_flush_svm_range(svm, address, 1, 1, 0);
}
-static void intel_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
- unsigned long address)
-{
- struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
-
- intel_flush_svm_range(svm, address, 1, 1, 0);
-}
-
/* Pages have been freed at this point */
static void intel_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm,
@@ -285,7 +277,6 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops intel_mmuops = {
.release = intel_mm_release,
.change_pte = intel_change_pte,
- .invalidate_page = intel_invalidate_page,
.invalidate_range = intel_invalidate_range,
};
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index af330f513653..d665d0dc16e8 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -479,6 +479,9 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
return 0;
+ if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr)))
+ return -ERANGE;
+
ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
/*
* Synchronise all PTE updates for the new mapping before there's
@@ -659,6 +662,9 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
size_t unmapped;
+ if (WARN_ON(upper_32_bits(iova)))
+ return 0;
+
unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd);
if (unmapped)
io_pgtable_tlb_sync(&data->iop);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index b182039862c5..e8018a308868 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -452,6 +452,10 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
return 0;
+ if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
+ paddr >= (1ULL << data->iop.cfg.oas)))
+ return -ERANGE;
+
prot = arm_lpae_prot_to_pte(data, iommu_prot);
ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
/*
@@ -610,6 +614,9 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
arm_lpae_iopte *ptep = data->pgd;
int lvl = ARM_LPAE_START_LVL(data);
+ if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
+ return 0;
+
unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
if (unmapped)
io_pgtable_tlb_sync(&data->iop);
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 524263a7ae6f..a3e667077b14 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -158,14 +158,12 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops);
* @fmt: The page table format.
* @cookie: An opaque token provided by the IOMMU driver and passed back to
* any callback routines.
- * @tlb_sync_pending: Private flag for optimising out redundant syncs.
* @cfg: A copy of the page table configuration.
* @ops: The page table operations in use for this set of page tables.
*/
struct io_pgtable {
enum io_pgtable_fmt fmt;
void *cookie;
- bool tlb_sync_pending;
struct io_pgtable_cfg cfg;
struct io_pgtable_ops ops;
};
@@ -175,22 +173,17 @@ struct io_pgtable {
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
{
iop->cfg.tlb->tlb_flush_all(iop->cookie);
- iop->tlb_sync_pending = true;
}
static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
unsigned long iova, size_t size, size_t granule, bool leaf)
{
iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
- iop->tlb_sync_pending = true;
}
static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
{
- if (iop->tlb_sync_pending) {
- iop->cfg.tlb->tlb_sync(iop->cookie);
- iop->tlb_sync_pending = false;
- }
+ iop->cfg.tlb->tlb_sync(iop->cookie);
}
/**
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
index c58351ed61c1..36d1a7ce7fc4 100644
--- a/drivers/iommu/iommu-sysfs.c
+++ b/drivers/iommu/iommu-sysfs.c
@@ -62,32 +62,40 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
va_list vargs;
int ret;
- device_initialize(&iommu->dev);
+ iommu->dev = kzalloc(sizeof(*iommu->dev), GFP_KERNEL);
+ if (!iommu->dev)
+ return -ENOMEM;
- iommu->dev.class = &iommu_class;
- iommu->dev.parent = parent;
- iommu->dev.groups = groups;
+ device_initialize(iommu->dev);
+
+ iommu->dev->class = &iommu_class;
+ iommu->dev->parent = parent;
+ iommu->dev->groups = groups;
va_start(vargs, fmt);
- ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs);
+ ret = kobject_set_name_vargs(&iommu->dev->kobj, fmt, vargs);
va_end(vargs);
if (ret)
goto error;
- ret = device_add(&iommu->dev);
+ ret = device_add(iommu->dev);
if (ret)
goto error;
+ dev_set_drvdata(iommu->dev, iommu);
+
return 0;
error:
- put_device(&iommu->dev);
+ put_device(iommu->dev);
return ret;
}
void iommu_device_sysfs_remove(struct iommu_device *iommu)
{
- device_unregister(&iommu->dev);
+ dev_set_drvdata(iommu->dev, NULL);
+ device_unregister(iommu->dev);
+ iommu->dev = NULL;
}
/*
* IOMMU drivers can indicate a device is managed by a given IOMMU using
@@ -102,14 +110,14 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
if (!iommu || IS_ERR(iommu))
return -ENODEV;
- ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices",
+ ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices",
&link->kobj, dev_name(link));
if (ret)
return ret;
- ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu");
+ ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev->kobj, "iommu");
if (ret)
- sysfs_remove_link_from_group(&iommu->dev.kobj, "devices",
+ sysfs_remove_link_from_group(&iommu->dev->kobj, "devices",
dev_name(link));
return ret;
@@ -121,5 +129,5 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
return;
sysfs_remove_link(&link->kobj, "iommu");
- sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link));
+ sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
}
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 5d14cd15198d..91c6d367ab35 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -129,6 +129,7 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A);
writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
+ data->tlb_flush_active = true;
}
static void mtk_iommu_tlb_sync(void *cookie)
@@ -137,6 +138,10 @@ static void mtk_iommu_tlb_sync(void *cookie)
int ret;
u32 tmp;
+ /* Avoid timing out if there's nothing to wait for */
+ if (!data->tlb_flush_active)
+ return;
+
ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp,
tmp != 0, 10, 100000);
if (ret) {
@@ -146,6 +151,7 @@ static void mtk_iommu_tlb_sync(void *cookie)
}
/* Clear the CPE status */
writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
+ data->tlb_flush_active = false;
}
static const struct iommu_gather_ops mtk_iommu_gather_ops = {
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 2a28eadeea0e..c06cc91b5d9a 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -47,6 +47,7 @@ struct mtk_iommu_data {
struct iommu_group *m4u_group;
struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
bool enable_4GB;
+ bool tlb_flush_active;
struct iommu_device iommu;
};
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index f1fd5f44d1d4..9d8a1dd2e2c2 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -7,6 +7,7 @@ config ARM_GIC
select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
select MULTI_IRQ_HANDLER
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config ARM_GIC_PM
bool
@@ -34,6 +35,7 @@ config ARM_GIC_V3
select MULTI_IRQ_HANDLER
select IRQ_DOMAIN_HIERARCHY
select PARTITION_PERCPU
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config ARM_GIC_V3_ITS
bool
@@ -64,6 +66,7 @@ config ARMADA_370_XP_IRQ
bool
select GENERIC_IRQ_CHIP
select PCI_MSI if PCI
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config ALPINE_MSI
bool
@@ -93,11 +96,13 @@ config BCM6345_L1_IRQ
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config BCM7038_L1_IRQ
bool
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config BCM7120_L2_IRQ
bool
@@ -136,6 +141,7 @@ config IRQ_MIPS_CPU
select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING
select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config CLPS711X_IRQCHIP
bool
@@ -217,6 +223,7 @@ config VERSATILE_FPGA_IRQ_NR
config XTENSA_MX
bool
select IRQ_DOMAIN
+ select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config XILINX_INTC
bool
@@ -306,3 +313,11 @@ config QCOM_IRQ_COMBINER
help
Say yes here to add support for the IRQ combiner devices embedded
in Qualcomm Technologies chips.
+
+config IRQ_UNIPHIER_AIDET
+ bool "UniPhier AIDET support" if COMPILE_TEST
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ default ARCH_UNIPHIER
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support for the UniPhier AIDET (ARM Interrupt Detector).
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index e88d856cc09c..845abc107ad5 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -28,7 +28,7 @@ obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
-obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o
+obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
@@ -78,3 +78,4 @@ obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
+obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index b207b2c3aa55..c9bdc5221b82 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -203,7 +203,7 @@ static struct irq_chip armada_370_xp_msi_irq_chip = {
static struct msi_domain_info armada_370_xp_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
.chip = &armada_370_xp_msi_irq_chip,
};
@@ -330,6 +330,8 @@ static int armada_xp_set_affinity(struct irq_data *d,
writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
raw_spin_unlock(&irq_controller_lock);
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
return IRQ_SET_MASK_OK;
}
#endif
@@ -363,6 +365,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
} else {
irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
handle_level_irq);
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
}
irq_set_probe(virq);
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 28b26c80f4cf..072bd227b6c6 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -137,14 +137,14 @@ static void __init aic_common_ext_irq_of_init(struct irq_domain *domain)
#define AT91_RTC_IMR 0x28
#define AT91_RTC_IRQ_MASK 0x1f
-void __init aic_common_rtc_irq_fixup(struct device_node *root)
+void __init aic_common_rtc_irq_fixup(void)
{
struct device_node *np;
void __iomem *regs;
- np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc");
+ np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
if (!np)
- np = of_find_compatible_node(root, NULL,
+ np = of_find_compatible_node(NULL, NULL,
"atmel,at91sam9x5-rtc");
if (!np)
@@ -165,7 +165,7 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root)
#define AT91_RTT_ALMIEN (1 << 16) /* Alarm Interrupt Enable */
#define AT91_RTT_RTTINCIEN (1 << 17) /* Real Time Timer Increment Interrupt Enable */
-void __init aic_common_rtt_irq_fixup(struct device_node *root)
+void __init aic_common_rtt_irq_fixup(void)
{
struct device_node *np;
void __iomem *regs;
@@ -196,11 +196,10 @@ static void __init aic_common_irq_fixup(const struct of_device_id *matches)
return;
match = of_match_node(matches, root);
- of_node_put(root);
if (match) {
- void (*fixup)(struct device_node *) = match->data;
- fixup(root);
+ void (*fixup)(void) = match->data;
+ fixup();
}
of_node_put(root);
diff --git a/drivers/irqchip/irq-atmel-aic-common.h b/drivers/irqchip/irq-atmel-aic-common.h
index af60376d50de..242e62c1851e 100644
--- a/drivers/irqchip/irq-atmel-aic-common.h
+++ b/drivers/irqchip/irq-atmel-aic-common.h
@@ -33,8 +33,8 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
const char *name, int nirqs,
const struct of_device_id *matches);
-void __init aic_common_rtc_irq_fixup(struct device_node *root);
+void __init aic_common_rtc_irq_fixup(void);
-void __init aic_common_rtt_irq_fixup(struct device_node *root);
+void __init aic_common_rtt_irq_fixup(void);
#endif /* __IRQ_ATMEL_AIC_COMMON_H */
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 37f952dd9fc9..bb1ad451392f 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -209,20 +209,20 @@ static const struct irq_domain_ops aic_irq_ops = {
.xlate = aic_irq_domain_xlate,
};
-static void __init at91rm9200_aic_irq_fixup(struct device_node *root)
+static void __init at91rm9200_aic_irq_fixup(void)
{
- aic_common_rtc_irq_fixup(root);
+ aic_common_rtc_irq_fixup();
}
-static void __init at91sam9260_aic_irq_fixup(struct device_node *root)
+static void __init at91sam9260_aic_irq_fixup(void)
{
- aic_common_rtt_irq_fixup(root);
+ aic_common_rtt_irq_fixup();
}
-static void __init at91sam9g45_aic_irq_fixup(struct device_node *root)
+static void __init at91sam9g45_aic_irq_fixup(void)
{
- aic_common_rtc_irq_fixup(root);
- aic_common_rtt_irq_fixup(root);
+ aic_common_rtc_irq_fixup();
+ aic_common_rtt_irq_fixup();
}
static const struct of_device_id aic_irq_fixups[] __initconst = {
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index c04ee9a23d09..6acad2ea0fb3 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -305,9 +305,9 @@ static const struct irq_domain_ops aic5_irq_ops = {
.xlate = aic5_irq_domain_xlate,
};
-static void __init sama5d3_aic_irq_fixup(struct device_node *root)
+static void __init sama5d3_aic_irq_fixup(void)
{
- aic_common_rtc_irq_fixup(root);
+ aic_common_rtc_irq_fixup();
}
static const struct of_device_id aic5_irq_fixups[] __initconst = {
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index 44d7c38dde47..d2da8a1e6b1b 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -147,13 +147,12 @@ static int __init armctrl_of_init(struct device_node *node,
base = of_iomap(node, 0);
if (!base)
- panic("%s: unable to map IC registers\n",
- node->full_name);
+ panic("%pOF: unable to map IC registers\n", node);
intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0),
&armctrl_ops, NULL);
if (!intc.domain)
- panic("%s: unable to create IRQ domain\n", node->full_name);
+ panic("%pOF: unable to create IRQ domain\n", node);
for (b = 0; b < NR_BANKS; b++) {
intc.pending[b] = base + reg_pending[b];
@@ -173,8 +172,8 @@ static int __init armctrl_of_init(struct device_node *node,
int parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) {
- panic("%s: unable to get parent interrupt.\n",
- node->full_name);
+ panic("%pOF: unable to get parent interrupt.\n",
+ node);
}
irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq);
} else {
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index e7463e3c0814..dc8c1e3eafe7 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -282,8 +282,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
{
intc.base = of_iomap(node, 0);
if (!intc.base) {
- panic("%s: unable to map local interrupt registers\n",
- node->full_name);
+ panic("%pOF: unable to map local interrupt registers\n", node);
}
bcm2835_init_local_timer_frequency();
@@ -292,7 +291,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
&bcm2836_arm_irqchip_intc_ops,
NULL);
if (!intc.domain)
- panic("%s: unable to create IRQ domain\n", node->full_name);
+ panic("%pOF: unable to create IRQ domain\n", node);
bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ,
&bcm2836_arm_irqchip_timer);
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index daa4ae89e466..43f8abe40878 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -231,6 +231,8 @@ static int bcm6345_l1_set_affinity(struct irq_data *d,
}
raw_spin_unlock_irqrestore(&intc->lock, flags);
+ irq_data_update_effective_affinity(d, cpumask_of(new_cpu));
+
return IRQ_SET_MASK_OK_NOCOPY;
}
@@ -291,6 +293,7 @@ static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
irq_set_chip_and_handler(virq,
&bcm6345_l1_irq_chip, handle_percpu_irq);
irq_set_chip_data(virq, d->host_data);
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
return 0;
}
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index c2662a1bfdd3..55cfb986225b 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -212,6 +212,8 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
__bcm7038_l1_unmask(d, first_cpu);
raw_spin_unlock_irqrestore(&intc->lock, flags);
+ irq_data_update_effective_affinity(d, cpumask_of(first_cpu));
+
return 0;
}
@@ -299,6 +301,7 @@ static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
{
irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
irq_set_chip_data(virq, d->host_data);
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
return 0;
}
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 64c2692070ef..983640eba418 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -250,12 +250,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
if (ret < 0)
goto out_free_l1_data;
- for (idx = 0; idx < data->n_words; idx++) {
- __raw_writel(data->irq_fwd_mask[idx],
- data->pair_base[idx] +
- data->en_offset[idx]);
- }
-
for (irq = 0; irq < data->num_parent_irqs; irq++) {
ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask);
if (ret)
@@ -297,6 +291,10 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
gc->reg_base = data->pair_base[idx];
ct->regs.mask = data->en_offset[idx];
+ /* gc->reg_base is defined and so is gc->writel */
+ irq_reg_writel(gc, data->irq_fwd_mask[idx],
+ data->en_offset[idx]);
+
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_ack = irq_gc_noop;
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index bddf169c4b37..b009b916a292 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -189,6 +189,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
ct->chip.irq_resume = brcmstb_l2_intc_resume;
+ ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend;
if (data->can_wake) {
/* This IRQ chip can wake the system, set all child interrupts
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index f96601268f71..99d97d7e3fd7 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -341,13 +341,13 @@ static int __init irqcrossbar_init(struct device_node *node,
int err;
if (!parent) {
- pr_err("%s: no parent, giving up\n", node->full_name);
+ pr_err("%pOF: no parent, giving up\n", node);
return -ENODEV;
}
parent_domain = irq_find_host(parent);
if (!parent_domain) {
- pr_err("%s: unable to obtain parent domain\n", node->full_name);
+ pr_err("%pOF: unable to obtain parent domain\n", node);
return -ENXIO;
}
@@ -360,7 +360,7 @@ static int __init irqcrossbar_init(struct device_node *node,
node, &crossbar_domain_ops,
NULL);
if (!domain) {
- pr_err("%s: failed to allocated domain\n", node->full_name);
+ pr_err("%pOF: failed to allocated domain\n", node);
return -ENOMEM;
}
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
index dad85e74c37c..fc38d2da11b9 100644
--- a/drivers/irqchip/irq-digicolor.c
+++ b/drivers/irqchip/irq-digicolor.c
@@ -71,14 +71,14 @@ static void __init digicolor_set_gc(void __iomem *reg_base, unsigned irq_base,
static int __init digicolor_of_init(struct device_node *node,
struct device_node *parent)
{
- static void __iomem *reg_base;
+ void __iomem *reg_base;
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct regmap *ucregs;
int ret;
reg_base = of_iomap(node, 0);
if (!reg_base) {
- pr_err("%s: unable to map IC registers\n", node->full_name);
+ pr_err("%pOF: unable to map IC registers\n", node);
return -ENXIO;
}
@@ -88,7 +88,7 @@ static int __init digicolor_of_init(struct device_node *node,
ucregs = syscon_regmap_lookup_by_phandle(node, "syscon");
if (IS_ERR(ucregs)) {
- pr_err("%s: unable to map UC registers\n", node->full_name);
+ pr_err("%pOF: unable to map UC registers\n", node);
return PTR_ERR(ucregs);
}
/* channel 1, regular IRQs */
@@ -97,7 +97,7 @@ static int __init digicolor_of_init(struct device_node *node,
digicolor_irq_domain =
irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL);
if (!digicolor_irq_domain) {
- pr_err("%s: unable to create IRQ domain\n", node->full_name);
+ pr_err("%pOF: unable to create IRQ domain\n", node);
return -ENOMEM;
}
@@ -105,7 +105,7 @@ static int __init digicolor_of_init(struct device_node *node,
"digicolor_irq", handle_level_irq,
clr, 0, 0);
if (ret) {
- pr_err("%s: unable to allocate IRQ gc\n", node->full_name);
+ pr_err("%pOF: unable to allocate IRQ gc\n", node);
return ret;
}
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index 052f266364c0..0a19618ce2c8 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -79,24 +79,24 @@ static int __init dw_apb_ictl_init(struct device_node *np,
/* Map the parent interrupt for the chained handler */
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
- pr_err("%s: unable to parse irq\n", np->full_name);
+ pr_err("%pOF: unable to parse irq\n", np);
return -EINVAL;
}
ret = of_address_to_resource(np, 0, &r);
if (ret) {
- pr_err("%s: unable to get resource\n", np->full_name);
+ pr_err("%pOF: unable to get resource\n", np);
return ret;
}
if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
- pr_err("%s: unable to request mem region\n", np->full_name);
+ pr_err("%pOF: unable to request mem region\n", np);
return -ENOMEM;
}
iobase = ioremap(r.start, resource_size(&r));
if (!iobase) {
- pr_err("%s: unable to map resource\n", np->full_name);
+ pr_err("%pOF: unable to map resource\n", np);
ret = -ENOMEM;
goto err_release;
}
@@ -123,7 +123,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
domain = irq_domain_add_linear(np, nrirqs,
&irq_generic_chip_ops, NULL);
if (!domain) {
- pr_err("%s: unable to add irq domain\n", np->full_name);
+ pr_err("%pOF: unable to add irq domain\n", np);
ret = -ENOMEM;
goto err_unmap;
}
@@ -132,7 +132,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
handle_level_irq, clr, 0,
IRQ_GC_INIT_MASK_CACHE);
if (ret) {
- pr_err("%s: unable to alloc irq domain gc\n", np->full_name);
+ pr_err("%pOF: unable to alloc irq domain gc\n", np);
goto err_unmap;
}
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
index 54c296401525..18d58d2b4ffe 100644
--- a/drivers/irqchip/irq-gic-realview.c
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -43,7 +43,7 @@ static const struct of_device_id syscon_pldset_of_match[] = {
static int __init
realview_gic_of_init(struct device_node *node, struct device_node *parent)
{
- static struct regmap *map;
+ struct regmap *map;
struct device_node *np;
const struct of_device_id *gic_id;
u32 pld1_ctrl;
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index 77931214d954..14a8c0a7e095 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -138,7 +138,7 @@ static int __init its_pci_of_msi_init(void)
if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name))
continue;
- pr_info("PCI/MSI: %s domain created\n", np->full_name);
+ pr_info("PCI/MSI: %pOF domain created\n", np);
}
return 0;
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index 249240d9a425..833a90fe33ae 100644
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -43,6 +43,7 @@ static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
*dev_id = args.args[0];
break;
}
+ index++;
} while (!ret);
return ret;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 68932873eebc..e8d89343d613 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
+ * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -36,6 +36,7 @@
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irqchip/arm-gic-v4.h>
#include <asm/cputype.h>
#include <asm/exception.h>
@@ -48,6 +49,19 @@
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
+static u32 lpi_id_bits;
+
+/*
+ * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
+ * deal with (one configuration byte per interrupt). PENDBASE has to
+ * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
+ */
+#define LPI_NRBITS lpi_id_bits
+#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
+#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
+
+#define LPI_PROP_DEFAULT_PRIO 0xa0
+
/*
* Collection structure - just an ID, and a redistributor address to
* ping. We use one per CPU as a bag of interrupts assigned to this
@@ -88,6 +102,7 @@ struct its_node {
u32 ite_size;
u32 device_ids;
int numa_node;
+ bool is_v4;
};
#define ITS_ITT_ALIGN SZ_256
@@ -100,11 +115,17 @@ struct event_lpi_map {
u16 *col_map;
irq_hw_number_t lpi_base;
int nr_lpis;
+ struct mutex vlpi_lock;
+ struct its_vm *vm;
+ struct its_vlpi_map *vlpi_maps;
+ int nr_vlpis;
};
/*
- * The ITS view of a device - belongs to an ITS, a collection, owns an
- * interrupt translation table, and a list of interrupts.
+ * The ITS view of a device - belongs to an ITS, owns an interrupt
+ * translation table, and a list of interrupts. If it some of its
+ * LPIs are injected into a guest (GICv4), the event_map.vm field
+ * indicates which one.
*/
struct its_device {
struct list_head entry;
@@ -115,13 +136,33 @@ struct its_device {
u32 device_id;
};
+static struct {
+ raw_spinlock_t lock;
+ struct its_device *dev;
+ struct its_vpe **vpes;
+ int next_victim;
+} vpe_proxy;
+
static LIST_HEAD(its_nodes);
static DEFINE_SPINLOCK(its_lock);
static struct rdists *gic_rdists;
static struct irq_domain *its_parent;
+/*
+ * We have a maximum number of 16 ITSs in the whole system if we're
+ * using the ITSList mechanism
+ */
+#define ITS_LIST_MAX 16
+
+static unsigned long its_list_map;
+static u16 vmovp_seq_num;
+static DEFINE_RAW_SPINLOCK(vmovp_lock);
+
+static DEFINE_IDA(its_vpeid_ida);
+
#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
+#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
static struct its_collection *dev_event_to_col(struct its_device *its_dev,
u32 event)
@@ -145,6 +186,11 @@ struct its_cmd_desc {
struct {
struct its_device *dev;
u32 event_id;
+ } its_clear_cmd;
+
+ struct {
+ struct its_device *dev;
+ u32 event_id;
} its_int_cmd;
struct {
@@ -177,6 +223,38 @@ struct its_cmd_desc {
struct {
struct its_collection *col;
} its_invall_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ } its_vinvall_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ struct its_collection *col;
+ bool valid;
+ } its_vmapp_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ struct its_device *dev;
+ u32 virt_id;
+ u32 event_id;
+ bool db_enabled;
+ } its_vmapti_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ struct its_device *dev;
+ u32 event_id;
+ bool db_enabled;
+ } its_vmovi_cmd;
+
+ struct {
+ struct its_vpe *vpe;
+ struct its_collection *col;
+ u16 seq_num;
+ u16 its_list;
+ } its_vmovp_cmd;
};
};
@@ -193,6 +271,9 @@ struct its_cmd_block {
typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
struct its_cmd_desc *);
+typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *,
+ struct its_cmd_desc *);
+
static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
{
u64 mask = GENMASK_ULL(h, l);
@@ -245,6 +326,46 @@ static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
}
+static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
+{
+ its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
+}
+
+static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
+{
+ its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
+}
+
+static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
+{
+ its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
+}
+
+static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
+{
+ its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
+}
+
+static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
+{
+ its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
+}
+
+static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
+{
+ its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
+}
+
+static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
+{
+ its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16);
+}
+
+static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
+{
+ its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
+}
+
static inline void its_fixup_cmd(struct its_cmd_block *cmd)
{
/* Let's fixup BE commands */
@@ -358,6 +479,40 @@ static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
return col;
}
+static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_int_cmd.dev,
+ desc->its_int_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_INT);
+ its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_int_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return col;
+}
+
+static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ struct its_collection *col;
+
+ col = dev_event_to_col(desc->its_clear_cmd.dev,
+ desc->its_clear_cmd.event_id);
+
+ its_encode_cmd(cmd, GITS_CMD_CLEAR);
+ its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
+ its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
+
+ its_fixup_cmd(cmd);
+
+ return col;
+}
+
static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
@@ -369,6 +524,94 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
return NULL;
}
+static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_VINVALL);
+ its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_vinvall_cmd.vpe;
+}
+
+static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ unsigned long vpt_addr;
+
+ vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
+
+ its_encode_cmd(cmd, GITS_CMD_VMAPP);
+ its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
+ its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
+ its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address);
+ its_encode_vpt_addr(cmd, vpt_addr);
+ its_encode_vpt_size(cmd, LPI_NRBITS - 1);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_vmapp_cmd.vpe;
+}
+
+static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ u32 db;
+
+ if (desc->its_vmapti_cmd.db_enabled)
+ db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
+ else
+ db = 1023;
+
+ its_encode_cmd(cmd, GITS_CMD_VMAPTI);
+ its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
+ its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
+ its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
+ its_encode_db_phys_id(cmd, db);
+ its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_vmapti_cmd.vpe;
+}
+
+static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ u32 db;
+
+ if (desc->its_vmovi_cmd.db_enabled)
+ db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
+ else
+ db = 1023;
+
+ its_encode_cmd(cmd, GITS_CMD_VMOVI);
+ its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
+ its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
+ its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
+ its_encode_db_phys_id(cmd, db);
+ its_encode_db_valid(cmd, true);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_vmovi_cmd.vpe;
+}
+
+static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd,
+ struct its_cmd_desc *desc)
+{
+ its_encode_cmd(cmd, GITS_CMD_VMOVP);
+ its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
+ its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
+ its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
+ its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address);
+
+ its_fixup_cmd(cmd);
+
+ return desc->its_vmovp_cmd.vpe;
+}
+
static u64 its_cmd_ptr_to_offset(struct its_node *its,
struct its_cmd_block *ptr)
{
@@ -453,7 +696,13 @@ static void its_wait_for_range_completion(struct its_node *its,
while (1) {
rd_idx = readl_relaxed(its->base + GITS_CREADR);
- if (rd_idx >= to_idx || rd_idx < from_idx)
+
+ /* Direct case */
+ if (from_idx < to_idx && rd_idx >= to_idx)
+ break;
+
+ /* Wrapped case */
+ if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
break;
count--;
@@ -466,42 +715,84 @@ static void its_wait_for_range_completion(struct its_node *its,
}
}
-static void its_send_single_command(struct its_node *its,
- its_cmd_builder_t builder,
- struct its_cmd_desc *desc)
+/* Warning, macro hell follows */
+#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
+void name(struct its_node *its, \
+ buildtype builder, \
+ struct its_cmd_desc *desc) \
+{ \
+ struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
+ synctype *sync_obj; \
+ unsigned long flags; \
+ \
+ raw_spin_lock_irqsave(&its->lock, flags); \
+ \
+ cmd = its_allocate_entry(its); \
+ if (!cmd) { /* We're soooooo screewed... */ \
+ raw_spin_unlock_irqrestore(&its->lock, flags); \
+ return; \
+ } \
+ sync_obj = builder(cmd, desc); \
+ its_flush_cmd(its, cmd); \
+ \
+ if (sync_obj) { \
+ sync_cmd = its_allocate_entry(its); \
+ if (!sync_cmd) \
+ goto post; \
+ \
+ buildfn(sync_cmd, sync_obj); \
+ its_flush_cmd(its, sync_cmd); \
+ } \
+ \
+post: \
+ next_cmd = its_post_commands(its); \
+ raw_spin_unlock_irqrestore(&its->lock, flags); \
+ \
+ its_wait_for_range_completion(its, cmd, next_cmd); \
+}
+
+static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
+ struct its_collection *sync_col)
+{
+ its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
+ its_encode_target(sync_cmd, sync_col->target_address);
+
+ its_fixup_cmd(sync_cmd);
+}
+
+static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
+ struct its_collection, its_build_sync_cmd)
+
+static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd,
+ struct its_vpe *sync_vpe)
+{
+ its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
+ its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
+
+ its_fixup_cmd(sync_cmd);
+}
+
+static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
+ struct its_vpe, its_build_vsync_cmd)
+
+static void its_send_int(struct its_device *dev, u32 event_id)
{
- struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
- struct its_collection *sync_col;
- unsigned long flags;
+ struct its_cmd_desc desc;
- raw_spin_lock_irqsave(&its->lock, flags);
+ desc.its_int_cmd.dev = dev;
+ desc.its_int_cmd.event_id = event_id;
- cmd = its_allocate_entry(its);
- if (!cmd) { /* We're soooooo screewed... */
- pr_err_ratelimited("ITS can't allocate, dropping command\n");
- raw_spin_unlock_irqrestore(&its->lock, flags);
- return;
- }
- sync_col = builder(cmd, desc);
- its_flush_cmd(its, cmd);
+ its_send_single_command(dev->its, its_build_int_cmd, &desc);
+}
- if (sync_col) {
- sync_cmd = its_allocate_entry(its);
- if (!sync_cmd) {
- pr_err_ratelimited("ITS can't SYNC, skipping\n");
- goto post;
- }
- its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
- its_encode_target(sync_cmd, sync_col->target_address);
- its_fixup_cmd(sync_cmd);
- its_flush_cmd(its, sync_cmd);
- }
+static void its_send_clear(struct its_device *dev, u32 event_id)
+{
+ struct its_cmd_desc desc;
-post:
- next_cmd = its_post_commands(its);
- raw_spin_unlock_irqrestore(&its->lock, flags);
+ desc.its_clear_cmd.dev = dev;
+ desc.its_clear_cmd.event_id = event_id;
- its_wait_for_range_completion(its, cmd, next_cmd);
+ its_send_single_command(dev->its, its_build_clear_cmd, &desc);
}
static void its_send_inv(struct its_device *dev, u32 event_id)
@@ -577,6 +868,106 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
its_send_single_command(its, its_build_invall_cmd, &desc);
}
+static void its_send_vmapti(struct its_device *dev, u32 id)
+{
+ struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
+ struct its_cmd_desc desc;
+
+ desc.its_vmapti_cmd.vpe = map->vpe;
+ desc.its_vmapti_cmd.dev = dev;
+ desc.its_vmapti_cmd.virt_id = map->vintid;
+ desc.its_vmapti_cmd.event_id = id;
+ desc.its_vmapti_cmd.db_enabled = map->db_enabled;
+
+ its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
+}
+
+static void its_send_vmovi(struct its_device *dev, u32 id)
+{
+ struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
+ struct its_cmd_desc desc;
+
+ desc.its_vmovi_cmd.vpe = map->vpe;
+ desc.its_vmovi_cmd.dev = dev;
+ desc.its_vmovi_cmd.event_id = id;
+ desc.its_vmovi_cmd.db_enabled = map->db_enabled;
+
+ its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
+}
+
+static void its_send_vmapp(struct its_vpe *vpe, bool valid)
+{
+ struct its_cmd_desc desc;
+ struct its_node *its;
+
+ desc.its_vmapp_cmd.vpe = vpe;
+ desc.its_vmapp_cmd.valid = valid;
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!its->is_v4)
+ continue;
+
+ desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
+ its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
+ }
+}
+
+static void its_send_vmovp(struct its_vpe *vpe)
+{
+ struct its_cmd_desc desc;
+ struct its_node *its;
+ unsigned long flags;
+ int col_id = vpe->col_idx;
+
+ desc.its_vmovp_cmd.vpe = vpe;
+ desc.its_vmovp_cmd.its_list = (u16)its_list_map;
+
+ if (!its_list_map) {
+ its = list_first_entry(&its_nodes, struct its_node, entry);
+ desc.its_vmovp_cmd.seq_num = 0;
+ desc.its_vmovp_cmd.col = &its->collections[col_id];
+ its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
+ return;
+ }
+
+ /*
+ * Yet another marvel of the architecture. If using the
+ * its_list "feature", we need to make sure that all ITSs
+ * receive all VMOVP commands in the same order. The only way
+ * to guarantee this is to make vmovp a serialization point.
+ *
+ * Wall <-- Head.
+ */
+ raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+ desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
+
+ /* Emit VMOVPs */
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!its->is_v4)
+ continue;
+
+ desc.its_vmovp_cmd.col = &its->collections[col_id];
+ its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
+ }
+
+ raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
+static void its_send_vinvall(struct its_vpe *vpe)
+{
+ struct its_cmd_desc desc;
+ struct its_node *its;
+
+ desc.its_vinvall_cmd.vpe = vpe;
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!its->is_v4)
+ continue;
+ its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
+ }
+}
+
/*
* irqchip functions - assumes MSI, mostly.
*/
@@ -587,17 +978,26 @@ static inline u32 its_get_event_id(struct irq_data *d)
return d->hwirq - its_dev->event_map.lpi_base;
}
-static void lpi_set_config(struct irq_data *d, bool enable)
+static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
{
- struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- irq_hw_number_t hwirq = d->hwirq;
- u32 id = its_get_event_id(d);
- u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192;
+ irq_hw_number_t hwirq;
+ struct page *prop_page;
+ u8 *cfg;
- if (enable)
- *cfg |= LPI_PROP_ENABLED;
- else
- *cfg &= ~LPI_PROP_ENABLED;
+ if (irqd_is_forwarded_to_vcpu(d)) {
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+
+ prop_page = its_dev->event_map.vm->vprop_page;
+ hwirq = its_dev->event_map.vlpi_maps[event].vintid;
+ } else {
+ prop_page = gic_rdists->prop_page;
+ hwirq = d->hwirq;
+ }
+
+ cfg = page_address(prop_page) + hwirq - 8192;
+ *cfg &= ~clr;
+ *cfg |= set | LPI_PROP_GROUP1;
/*
* Make the above write visible to the redistributors.
@@ -608,17 +1008,53 @@ static void lpi_set_config(struct irq_data *d, bool enable)
gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
else
dsb(ishst);
- its_send_inv(its_dev, id);
+}
+
+static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+ lpi_write_config(d, clr, set);
+ its_send_inv(its_dev, its_get_event_id(d));
+}
+
+static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+
+ if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
+ return;
+
+ its_dev->event_map.vlpi_maps[event].db_enabled = enable;
+
+ /*
+ * More fun with the architecture:
+ *
+ * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
+ * value or to 1023, depending on the enable bit. But that
+ * would be issueing a mapping for an /existing/ DevID+EventID
+ * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
+ * to the /same/ vPE, using this opportunity to adjust the
+ * doorbell. Mouahahahaha. We loves it, Precious.
+ */
+ its_send_vmovi(its_dev, event);
}
static void its_mask_irq(struct irq_data *d)
{
- lpi_set_config(d, false);
+ if (irqd_is_forwarded_to_vcpu(d))
+ its_vlpi_set_doorbell(d, false);
+
+ lpi_update_config(d, LPI_PROP_ENABLED, 0);
}
static void its_unmask_irq(struct irq_data *d)
{
- lpi_set_config(d, true);
+ if (irqd_is_forwarded_to_vcpu(d))
+ its_vlpi_set_doorbell(d, true);
+
+ lpi_update_config(d, 0, LPI_PROP_ENABLED);
}
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
@@ -630,6 +1066,10 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
struct its_collection *target_col;
u32 id = its_get_event_id(d);
+ /* A forwarded interrupt should use irq_set_vcpu_affinity */
+ if (irqd_is_forwarded_to_vcpu(d))
+ return -EINVAL;
+
/* lpi cannot be routed to a redistributor that is on a foreign node */
if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
if (its_dev->its->numa_node >= 0) {
@@ -649,6 +1089,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
target_col = &its_dev->its->collections[cpu];
its_send_movi(its_dev, target_col, id);
its_dev->event_map.col_map[id] = cpu;
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
}
return IRQ_SET_MASK_OK_DONE;
@@ -670,6 +1111,179 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
iommu_dma_map_msi_msg(d->irq, msg);
}
+static int its_irq_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+
+ if (which != IRQCHIP_STATE_PENDING)
+ return -EINVAL;
+
+ if (state)
+ its_send_int(its_dev, event);
+ else
+ its_send_clear(its_dev, event);
+
+ return 0;
+}
+
+static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+ int ret = 0;
+
+ if (!info->map)
+ return -EINVAL;
+
+ mutex_lock(&its_dev->event_map.vlpi_lock);
+
+ if (!its_dev->event_map.vm) {
+ struct its_vlpi_map *maps;
+
+ maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis,
+ GFP_KERNEL);
+ if (!maps) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ its_dev->event_map.vm = info->map->vm;
+ its_dev->event_map.vlpi_maps = maps;
+ } else if (its_dev->event_map.vm != info->map->vm) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Get our private copy of the mapping information */
+ its_dev->event_map.vlpi_maps[event] = *info->map;
+
+ if (irqd_is_forwarded_to_vcpu(d)) {
+ /* Already mapped, move it around */
+ its_send_vmovi(its_dev, event);
+ } else {
+ /* Drop the physical mapping */
+ its_send_discard(its_dev, event);
+
+ /* and install the virtual one */
+ its_send_vmapti(its_dev, event);
+ irqd_set_forwarded_to_vcpu(d);
+
+ /* Increment the number of VLPIs */
+ its_dev->event_map.nr_vlpis++;
+ }
+
+out:
+ mutex_unlock(&its_dev->event_map.vlpi_lock);
+ return ret;
+}
+
+static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+ int ret = 0;
+
+ mutex_lock(&its_dev->event_map.vlpi_lock);
+
+ if (!its_dev->event_map.vm ||
+ !its_dev->event_map.vlpi_maps[event].vm) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Copy our mapping information to the incoming request */
+ *info->map = its_dev->event_map.vlpi_maps[event];
+
+out:
+ mutex_unlock(&its_dev->event_map.vlpi_lock);
+ return ret;
+}
+
+static int its_vlpi_unmap(struct irq_data *d)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ u32 event = its_get_event_id(d);
+ int ret = 0;
+
+ mutex_lock(&its_dev->event_map.vlpi_lock);
+
+ if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Drop the virtual mapping */
+ its_send_discard(its_dev, event);
+
+ /* and restore the physical one */
+ irqd_clr_forwarded_to_vcpu(d);
+ its_send_mapti(its_dev, d->hwirq, event);
+ lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
+ LPI_PROP_ENABLED |
+ LPI_PROP_GROUP1));
+
+ /*
+ * Drop the refcount and make the device available again if
+ * this was the last VLPI.
+ */
+ if (!--its_dev->event_map.nr_vlpis) {
+ its_dev->event_map.vm = NULL;
+ kfree(its_dev->event_map.vlpi_maps);
+ }
+
+out:
+ mutex_unlock(&its_dev->event_map.vlpi_lock);
+ return ret;
+}
+
+static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+
+ if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
+ return -EINVAL;
+
+ if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
+ lpi_update_config(d, 0xff, info->config);
+ else
+ lpi_write_config(d, 0xff, info->config);
+ its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
+
+ return 0;
+}
+
+static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+ struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_cmd_info *info = vcpu_info;
+
+ /* Need a v4 ITS */
+ if (!its_dev->its->is_v4)
+ return -EINVAL;
+
+ /* Unmap request? */
+ if (!info)
+ return its_vlpi_unmap(d);
+
+ switch (info->cmd_type) {
+ case MAP_VLPI:
+ return its_vlpi_map(d, info);
+
+ case GET_VLPI:
+ return its_vlpi_get(d, info);
+
+ case PROP_UPDATE_VLPI:
+ case PROP_UPDATE_AND_INV_VLPI:
+ return its_vlpi_prop_update(d, info);
+
+ default:
+ return -EINVAL;
+ }
+}
+
static struct irq_chip its_irq_chip = {
.name = "ITS",
.irq_mask = its_mask_irq,
@@ -677,6 +1291,8 @@ static struct irq_chip its_irq_chip = {
.irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = its_set_affinity,
.irq_compose_msi_msg = its_irq_compose_msi_msg,
+ .irq_set_irqchip_state = its_irq_set_irqchip_state,
+ .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
};
/*
@@ -695,7 +1311,6 @@ static struct irq_chip its_irq_chip = {
static unsigned long *lpi_bitmap;
static u32 lpi_chunks;
-static u32 lpi_id_bits;
static DEFINE_SPINLOCK(lpi_lock);
static int its_lpi_to_chunk(int lpi)
@@ -766,16 +1381,15 @@ out:
return bitmap;
}
-static void its_lpi_free(struct event_lpi_map *map)
+static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
{
- int base = map->lpi_base;
- int nr_ids = map->nr_lpis;
int lpi;
spin_lock(&lpi_lock);
for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
int chunk = its_lpi_to_chunk(lpi);
+
BUG_ON(chunk > lpi_chunks);
if (test_bit(chunk, lpi_bitmap)) {
clear_bit(chunk, lpi_bitmap);
@@ -786,28 +1400,40 @@ static void its_lpi_free(struct event_lpi_map *map)
spin_unlock(&lpi_lock);
- kfree(map->lpi_map);
- kfree(map->col_map);
+ kfree(bitmap);
}
-/*
- * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
- * deal with (one configuration byte per interrupt). PENDBASE has to
- * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
- */
-#define LPI_NRBITS lpi_id_bits
-#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
-#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
+static struct page *its_allocate_prop_table(gfp_t gfp_flags)
+{
+ struct page *prop_page;
-#define LPI_PROP_DEFAULT_PRIO 0xa0
+ prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
+ if (!prop_page)
+ return NULL;
+
+ /* Priority 0xa0, Group-1, disabled */
+ memset(page_address(prop_page),
+ LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
+ LPI_PROPBASE_SZ);
+
+ /* Make sure the GIC will observe the written configuration */
+ gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
+
+ return prop_page;
+}
+
+static void its_free_prop_table(struct page *prop_page)
+{
+ free_pages((unsigned long)page_address(prop_page),
+ get_order(LPI_PROPBASE_SZ));
+}
static int __init its_alloc_lpi_tables(void)
{
phys_addr_t paddr;
lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
- gic_rdists->prop_page = alloc_pages(GFP_NOWAIT,
- get_order(LPI_PROPBASE_SZ));
+ gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
if (!gic_rdists->prop_page) {
pr_err("Failed to allocate PROPBASE\n");
return -ENOMEM;
@@ -816,14 +1442,6 @@ static int __init its_alloc_lpi_tables(void)
paddr = page_to_phys(gic_rdists->prop_page);
pr_info("GIC: using LPI property table @%pa\n", &paddr);
- /* Priority 0xa0, Group-1, disabled */
- memset(page_address(gic_rdists->prop_page),
- LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
- LPI_PROPBASE_SZ);
-
- /* Make sure the GIC will observe the written configuration */
- gic_flush_dcache_to_poc(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
-
return its_lpi_init(lpi_id_bits);
}
@@ -962,10 +1580,13 @@ retry_baser:
return 0;
}
-static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser,
- u32 psz, u32 *order)
+static bool its_parse_indirect_baser(struct its_node *its,
+ struct its_baser *baser,
+ u32 psz, u32 *order)
{
- u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser));
+ u64 tmp = its_read_baser(its, baser);
+ u64 type = GITS_BASER_TYPE(tmp);
+ u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
u32 ids = its->device_ids;
u32 new_order = *order;
@@ -1004,8 +1625,9 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
if (new_order >= MAX_ORDER) {
new_order = MAX_ORDER - 1;
ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
- pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
- &its->phys_base, its->device_ids, ids);
+ pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
+ &its->phys_base, its_base_type_string[type],
+ its->device_ids, ids);
}
*order = new_order;
@@ -1053,11 +1675,16 @@ static int its_alloc_tables(struct its_node *its)
u32 order = get_order(psz);
bool indirect = false;
- if (type == GITS_BASER_TYPE_NONE)
+ switch (type) {
+ case GITS_BASER_TYPE_NONE:
continue;
- if (type == GITS_BASER_TYPE_DEVICE)
- indirect = its_parse_baser_device(its, baser, psz, &order);
+ case GITS_BASER_TYPE_DEVICE:
+ case GITS_BASER_TYPE_VCPU:
+ indirect = its_parse_indirect_baser(its, baser,
+ psz, &order);
+ break;
+ }
err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
if (err < 0) {
@@ -1084,6 +1711,30 @@ static int its_alloc_collections(struct its_node *its)
return 0;
}
+static struct page *its_allocate_pending_table(gfp_t gfp_flags)
+{
+ struct page *pend_page;
+ /*
+ * The pending pages have to be at least 64kB aligned,
+ * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
+ */
+ pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
+ get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+ if (!pend_page)
+ return NULL;
+
+ /* Make sure the GIC will observe the zero-ed page */
+ gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
+
+ return pend_page;
+}
+
+static void its_free_pending_table(struct page *pt)
+{
+ free_pages((unsigned long)page_address(pt),
+ get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+}
+
static void its_cpu_init_lpis(void)
{
void __iomem *rbase = gic_data_rdist_rd_base();
@@ -1094,21 +1745,14 @@ static void its_cpu_init_lpis(void)
pend_page = gic_data_rdist()->pend_page;
if (!pend_page) {
phys_addr_t paddr;
- /*
- * The pending pages have to be at least 64kB aligned,
- * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
- */
- pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
- get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+
+ pend_page = its_allocate_pending_table(GFP_NOWAIT);
if (!pend_page) {
pr_err("Failed to allocate PENDBASE for CPU%d\n",
smp_processor_id());
return;
}
- /* Make sure the GIC will observe the zero-ed page */
- gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
-
paddr = page_to_phys(pend_page);
pr_info("CPU%d: using LPI pending table @%pa\n",
smp_processor_id(), &paddr);
@@ -1259,26 +1903,19 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type)
return NULL;
}
-static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
+static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
{
- struct its_baser *baser;
struct page *page;
u32 esz, idx;
__le64 *table;
- baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
-
- /* Don't allow device id that exceeds ITS hardware limit */
- if (!baser)
- return (ilog2(dev_id) < its->device_ids);
-
/* Don't allow device id that exceeds single, flat table limit */
esz = GITS_BASER_ENTRY_SIZE(baser->val);
if (!(baser->val & GITS_BASER_INDIRECT))
- return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
+ return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
/* Compute 1st level table index & check if that exceeds table limit */
- idx = dev_id >> ilog2(baser->psz / esz);
+ idx = id >> ilog2(baser->psz / esz);
if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
return false;
@@ -1307,11 +1944,52 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
return true;
}
+static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
+{
+ struct its_baser *baser;
+
+ baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
+
+ /* Don't allow device id that exceeds ITS hardware limit */
+ if (!baser)
+ return (ilog2(dev_id) < its->device_ids);
+
+ return its_alloc_table_entry(baser, dev_id);
+}
+
+static bool its_alloc_vpe_table(u32 vpe_id)
+{
+ struct its_node *its;
+
+ /*
+ * Make sure the L2 tables are allocated on *all* v4 ITSs. We
+ * could try and only do it on ITSs corresponding to devices
+ * that have interrupts targeted at this VPE, but the
+ * complexity becomes crazy (and you have tons of memory
+ * anyway, right?).
+ */
+ list_for_each_entry(its, &its_nodes, entry) {
+ struct its_baser *baser;
+
+ if (!its->is_v4)
+ continue;
+
+ baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
+ if (!baser)
+ return false;
+
+ if (!its_alloc_table_entry(baser, vpe_id))
+ return false;
+ }
+
+ return true;
+}
+
static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
- int nvecs)
+ int nvecs, bool alloc_lpis)
{
struct its_device *dev;
- unsigned long *lpi_map;
+ unsigned long *lpi_map = NULL;
unsigned long flags;
u16 *col_map = NULL;
void *itt;
@@ -1333,11 +2011,18 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc(sz, GFP_KERNEL);
- lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
- if (lpi_map)
- col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
+ if (alloc_lpis) {
+ lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
+ if (lpi_map)
+ col_map = kzalloc(sizeof(*col_map) * nr_lpis,
+ GFP_KERNEL);
+ } else {
+ col_map = kzalloc(sizeof(*col_map) * nr_ites, GFP_KERNEL);
+ nr_lpis = 0;
+ lpi_base = 0;
+ }
- if (!dev || !itt || !lpi_map || !col_map) {
+ if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
kfree(dev);
kfree(itt);
kfree(lpi_map);
@@ -1354,6 +2039,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
dev->event_map.col_map = col_map;
dev->event_map.lpi_base = lpi_base;
dev->event_map.nr_lpis = nr_lpis;
+ mutex_init(&dev->event_map.vlpi_lock);
dev->device_id = dev_id;
INIT_LIST_HEAD(&dev->entry);
@@ -1412,6 +2098,16 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
msi_info = msi_get_domain_info(domain);
its = msi_info->data;
+ if (!gic_rdists->has_direct_lpi &&
+ vpe_proxy.dev &&
+ vpe_proxy.dev->its == its &&
+ dev_id == vpe_proxy.dev->device_id) {
+ /* Bad luck. Get yourself a better implementation */
+ WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
+ dev_id);
+ return -EINVAL;
+ }
+
its_dev = its_find_device(its, dev_id);
if (its_dev) {
/*
@@ -1423,7 +2119,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
goto out;
}
- its_dev = its_create_device(its, dev_id, nvec);
+ its_dev = its_create_device(its, dev_id, nvec, true);
if (!its_dev)
return -ENOMEM;
@@ -1481,6 +2177,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
irq_domain_set_hwirq_and_chip(domain, virq + i,
hwirq, &its_irq_chip, its_dev);
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
pr_debug("ID:%d pID:%d vID:%d\n",
(int)(hwirq - its_dev->event_map.lpi_base),
(int) hwirq, virq + i);
@@ -1495,13 +2192,16 @@ static void its_irq_domain_activate(struct irq_domain *domain,
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
const struct cpumask *cpu_mask = cpu_online_mask;
+ int cpu;
/* get the cpu_mask of local node */
if (its_dev->its->numa_node >= 0)
cpu_mask = cpumask_of_node(its_dev->its->numa_node);
/* Bind the LPI to the first possible CPU */
- its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
+ cpu = cpumask_first(cpu_mask);
+ its_dev->event_map.col_map[event] = cpu;
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
/* Map the GIC IRQ and event to the device */
its_send_mapti(its_dev, d->hwirq, event);
@@ -1539,7 +2239,10 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
/* If all interrupts have been freed, start mopping the floor */
if (bitmap_empty(its_dev->event_map.lpi_map,
its_dev->event_map.nr_lpis)) {
- its_lpi_free(&its_dev->event_map);
+ its_lpi_free_chunks(its_dev->event_map.lpi_map,
+ its_dev->event_map.lpi_base,
+ its_dev->event_map.nr_lpis);
+ kfree(its_dev->event_map.col_map);
/* Unmap device/itt */
its_send_mapd(its_dev, 0);
@@ -1556,6 +2259,451 @@ static const struct irq_domain_ops its_domain_ops = {
.deactivate = its_irq_domain_deactivate,
};
+/*
+ * This is insane.
+ *
+ * If a GICv4 doesn't implement Direct LPIs (which is extremely
+ * likely), the only way to perform an invalidate is to use a fake
+ * device to issue an INV command, implying that the LPI has first
+ * been mapped to some event on that device. Since this is not exactly
+ * cheap, we try to keep that mapping around as long as possible, and
+ * only issue an UNMAP if we're short on available slots.
+ *
+ * Broken by design(tm).
+ */
+static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
+{
+ /* Already unmapped? */
+ if (vpe->vpe_proxy_event == -1)
+ return;
+
+ its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
+ vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
+
+ /*
+ * We don't track empty slots at all, so let's move the
+ * next_victim pointer if we can quickly reuse that slot
+ * instead of nuking an existing entry. Not clear that this is
+ * always a win though, and this might just generate a ripple
+ * effect... Let's just hope VPEs don't migrate too often.
+ */
+ if (vpe_proxy.vpes[vpe_proxy.next_victim])
+ vpe_proxy.next_victim = vpe->vpe_proxy_event;
+
+ vpe->vpe_proxy_event = -1;
+}
+
+static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
+{
+ if (!gic_rdists->has_direct_lpi) {
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
+ its_vpe_db_proxy_unmap_locked(vpe);
+ raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
+ }
+}
+
+static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
+{
+ /* Already mapped? */
+ if (vpe->vpe_proxy_event != -1)
+ return;
+
+ /* This slot was already allocated. Kick the other VPE out. */
+ if (vpe_proxy.vpes[vpe_proxy.next_victim])
+ its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
+
+ /* Map the new VPE instead */
+ vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
+ vpe->vpe_proxy_event = vpe_proxy.next_victim;
+ vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
+
+ vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
+ its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
+}
+
+static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
+{
+ unsigned long flags;
+ struct its_collection *target_col;
+
+ if (gic_rdists->has_direct_lpi) {
+ void __iomem *rdbase;
+
+ rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
+ gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
+ while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
+ cpu_relax();
+
+ return;
+ }
+
+ raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
+
+ its_vpe_db_proxy_map_locked(vpe);
+
+ target_col = &vpe_proxy.dev->its->collections[to];
+ its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
+ vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
+
+ raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
+}
+
+static int its_vpe_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val,
+ bool force)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ int cpu = cpumask_first(mask_val);
+
+ /*
+ * Changing affinity is mega expensive, so let's be as lazy as
+ * we can and only do it if we really have to. Also, if mapped
+ * into the proxy device, we need to move the doorbell
+ * interrupt to its new location.
+ */
+ if (vpe->col_idx != cpu) {
+ int from = vpe->col_idx;
+
+ vpe->col_idx = cpu;
+ its_send_vmovp(vpe);
+ its_vpe_db_proxy_move(vpe, from, cpu);
+ }
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+
+static void its_vpe_schedule(struct its_vpe *vpe)
+{
+ void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
+ u64 val;
+
+ /* Schedule the VPE */
+ val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
+ GENMASK_ULL(51, 12);
+ val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
+ val |= GICR_VPROPBASER_RaWb;
+ val |= GICR_VPROPBASER_InnerShareable;
+ gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+
+ val = virt_to_phys(page_address(vpe->vpt_page)) &
+ GENMASK_ULL(51, 16);
+ val |= GICR_VPENDBASER_RaWaWb;
+ val |= GICR_VPENDBASER_NonShareable;
+ /*
+ * There is no good way of finding out if the pending table is
+ * empty as we can race against the doorbell interrupt very
+ * easily. So in the end, vpe->pending_last is only an
+ * indication that the vcpu has something pending, not one
+ * that the pending table is empty. A good implementation
+ * would be able to read its coarse map pretty quickly anyway,
+ * making this a tolerable issue.
+ */
+ val |= GICR_VPENDBASER_PendingLast;
+ val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
+ val |= GICR_VPENDBASER_Valid;
+ gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+}
+
+static void its_vpe_deschedule(struct its_vpe *vpe)
+{
+ void * __iomem vlpi_base = gic_data_rdist_vlpi_base();
+ u32 count = 1000000; /* 1s! */
+ bool clean;
+ u64 val;
+
+ /* We're being scheduled out */
+ val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+ val &= ~GICR_VPENDBASER_Valid;
+ gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+ do {
+ val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+ clean = !(val & GICR_VPENDBASER_Dirty);
+ if (!clean) {
+ count--;
+ cpu_relax();
+ udelay(1);
+ }
+ } while (!clean && count);
+
+ if (unlikely(!clean && !count)) {
+ pr_err_ratelimited("ITS virtual pending table not cleaning\n");
+ vpe->idai = false;
+ vpe->pending_last = true;
+ } else {
+ vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
+ vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
+ }
+}
+
+static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_cmd_info *info = vcpu_info;
+
+ switch (info->cmd_type) {
+ case SCHEDULE_VPE:
+ its_vpe_schedule(vpe);
+ return 0;
+
+ case DESCHEDULE_VPE:
+ its_vpe_deschedule(vpe);
+ return 0;
+
+ case INVALL_VPE:
+ its_send_vinvall(vpe);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void its_vpe_send_cmd(struct its_vpe *vpe,
+ void (*cmd)(struct its_device *, u32))
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
+
+ its_vpe_db_proxy_map_locked(vpe);
+ cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
+
+ raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
+}
+
+static void its_vpe_send_inv(struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+ if (gic_rdists->has_direct_lpi) {
+ void __iomem *rdbase;
+
+ rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
+ gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
+ while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
+ cpu_relax();
+ } else {
+ its_vpe_send_cmd(vpe, its_send_inv);
+ }
+}
+
+static void its_vpe_mask_irq(struct irq_data *d)
+{
+ /*
+ * We need to unmask the LPI, which is described by the parent
+ * irq_data. Instead of calling into the parent (which won't
+ * exactly do the right thing, let's simply use the
+ * parent_data pointer. Yes, I'm naughty.
+ */
+ lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
+ its_vpe_send_inv(d);
+}
+
+static void its_vpe_unmask_irq(struct irq_data *d)
+{
+ /* Same hack as above... */
+ lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
+ its_vpe_send_inv(d);
+}
+
+static int its_vpe_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+ if (which != IRQCHIP_STATE_PENDING)
+ return -EINVAL;
+
+ if (gic_rdists->has_direct_lpi) {
+ void __iomem *rdbase;
+
+ rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
+ if (state) {
+ gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
+ } else {
+ gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
+ while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
+ cpu_relax();
+ }
+ } else {
+ if (state)
+ its_vpe_send_cmd(vpe, its_send_int);
+ else
+ its_vpe_send_cmd(vpe, its_send_clear);
+ }
+
+ return 0;
+}
+
+static struct irq_chip its_vpe_irq_chip = {
+ .name = "GICv4-vpe",
+ .irq_mask = its_vpe_mask_irq,
+ .irq_unmask = its_vpe_unmask_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = its_vpe_set_affinity,
+ .irq_set_irqchip_state = its_vpe_set_irqchip_state,
+ .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
+};
+
+static int its_vpe_id_alloc(void)
+{
+ return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL);
+}
+
+static void its_vpe_id_free(u16 id)
+{
+ ida_simple_remove(&its_vpeid_ida, id);
+}
+
+static int its_vpe_init(struct its_vpe *vpe)
+{
+ struct page *vpt_page;
+ int vpe_id;
+
+ /* Allocate vpe_id */
+ vpe_id = its_vpe_id_alloc();
+ if (vpe_id < 0)
+ return vpe_id;
+
+ /* Allocate VPT */
+ vpt_page = its_allocate_pending_table(GFP_KERNEL);
+ if (!vpt_page) {
+ its_vpe_id_free(vpe_id);
+ return -ENOMEM;
+ }
+
+ if (!its_alloc_vpe_table(vpe_id)) {
+ its_vpe_id_free(vpe_id);
+ its_free_pending_table(vpe->vpt_page);
+ return -ENOMEM;
+ }
+
+ vpe->vpe_id = vpe_id;
+ vpe->vpt_page = vpt_page;
+ vpe->vpe_proxy_event = -1;
+
+ return 0;
+}
+
+static void its_vpe_teardown(struct its_vpe *vpe)
+{
+ its_vpe_db_proxy_unmap(vpe);
+ its_vpe_id_free(vpe->vpe_id);
+ its_free_pending_table(vpe->vpt_page);
+}
+
+static void its_vpe_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct its_vm *vm = domain->host_data;
+ int i;
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *data = irq_domain_get_irq_data(domain,
+ virq + i);
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
+
+ BUG_ON(vm != vpe->its_vm);
+
+ clear_bit(data->hwirq, vm->db_bitmap);
+ its_vpe_teardown(vpe);
+ irq_domain_reset_irq_data(data);
+ }
+
+ if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
+ its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
+ its_free_prop_table(vm->vprop_page);
+ }
+}
+
+static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct its_vm *vm = args;
+ unsigned long *bitmap;
+ struct page *vprop_page;
+ int base, nr_ids, i, err = 0;
+
+ BUG_ON(!vm);
+
+ bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
+ if (!bitmap)
+ return -ENOMEM;
+
+ if (nr_ids < nr_irqs) {
+ its_lpi_free_chunks(bitmap, base, nr_ids);
+ return -ENOMEM;
+ }
+
+ vprop_page = its_allocate_prop_table(GFP_KERNEL);
+ if (!vprop_page) {
+ its_lpi_free_chunks(bitmap, base, nr_ids);
+ return -ENOMEM;
+ }
+
+ vm->db_bitmap = bitmap;
+ vm->db_lpi_base = base;
+ vm->nr_db_lpis = nr_ids;
+ vm->vprop_page = vprop_page;
+
+ for (i = 0; i < nr_irqs; i++) {
+ vm->vpes[i]->vpe_db_lpi = base + i;
+ err = its_vpe_init(vm->vpes[i]);
+ if (err)
+ break;
+ err = its_irq_gic_domain_alloc(domain, virq + i,
+ vm->vpes[i]->vpe_db_lpi);
+ if (err)
+ break;
+ irq_domain_set_hwirq_and_chip(domain, virq + i, i,
+ &its_vpe_irq_chip, vm->vpes[i]);
+ set_bit(i, bitmap);
+ }
+
+ if (err) {
+ if (i > 0)
+ its_vpe_irq_domain_free(domain, virq, i - 1);
+
+ its_lpi_free_chunks(bitmap, base, nr_ids);
+ its_free_prop_table(vprop_page);
+ }
+
+ return err;
+}
+
+static void its_vpe_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+ /* Map the VPE to the first possible CPU */
+ vpe->col_idx = cpumask_first(cpu_online_mask);
+ its_send_vmapp(vpe, true);
+ its_send_vinvall(vpe);
+}
+
+static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+
+ its_send_vmapp(vpe, false);
+}
+
+static const struct irq_domain_ops its_vpe_domain_ops = {
+ .alloc = its_vpe_irq_domain_alloc,
+ .free = its_vpe_irq_domain_free,
+ .activate = its_vpe_irq_domain_activate,
+ .deactivate = its_vpe_irq_domain_deactivate,
+};
+
static int its_force_quiescent(void __iomem *base)
{
u32 count = 1000000; /* 1s */
@@ -1571,7 +2719,7 @@ static int its_force_quiescent(void __iomem *base)
return 0;
/* Disable the generation of all interrupts to this ITS */
- val &= ~GITS_CTLR_ENABLE;
+ val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
writel_relaxed(val, base + GITS_CTLR);
/* Poll GITS_CTLR and wait until ITS becomes quiescent */
@@ -1672,13 +2820,92 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
return 0;
}
+static int its_init_vpe_domain(void)
+{
+ struct its_node *its;
+ u32 devid;
+ int entries;
+
+ if (gic_rdists->has_direct_lpi) {
+ pr_info("ITS: Using DirectLPI for VPE invalidation\n");
+ return 0;
+ }
+
+ /* Any ITS will do, even if not v4 */
+ its = list_first_entry(&its_nodes, struct its_node, entry);
+
+ entries = roundup_pow_of_two(nr_cpu_ids);
+ vpe_proxy.vpes = kzalloc(sizeof(*vpe_proxy.vpes) * entries,
+ GFP_KERNEL);
+ if (!vpe_proxy.vpes) {
+ pr_err("ITS: Can't allocate GICv4 proxy device array\n");
+ return -ENOMEM;
+ }
+
+ /* Use the last possible DevID */
+ devid = GENMASK(its->device_ids - 1, 0);
+ vpe_proxy.dev = its_create_device(its, devid, entries, false);
+ if (!vpe_proxy.dev) {
+ kfree(vpe_proxy.vpes);
+ pr_err("ITS: Can't allocate GICv4 proxy device\n");
+ return -ENOMEM;
+ }
+
+ BUG_ON(entries != vpe_proxy.dev->nr_ites);
+
+ raw_spin_lock_init(&vpe_proxy.lock);
+ vpe_proxy.next_victim = 0;
+ pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
+ devid, vpe_proxy.dev->nr_ites);
+
+ return 0;
+}
+
+static int __init its_compute_its_list_map(struct resource *res,
+ void __iomem *its_base)
+{
+ int its_number;
+ u32 ctlr;
+
+ /*
+ * This is assumed to be done early enough that we're
+ * guaranteed to be single-threaded, hence no
+ * locking. Should this change, we should address
+ * this.
+ */
+ its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX);
+ if (its_number >= ITS_LIST_MAX) {
+ pr_err("ITS@%pa: No ITSList entry available!\n",
+ &res->start);
+ return -EINVAL;
+ }
+
+ ctlr = readl_relaxed(its_base + GITS_CTLR);
+ ctlr &= ~GITS_CTLR_ITS_NUMBER;
+ ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
+ writel_relaxed(ctlr, its_base + GITS_CTLR);
+ ctlr = readl_relaxed(its_base + GITS_CTLR);
+ if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
+ its_number = ctlr & GITS_CTLR_ITS_NUMBER;
+ its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
+ }
+
+ if (test_and_set_bit(its_number, &its_list_map)) {
+ pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
+ &res->start, its_number);
+ return -EINVAL;
+ }
+
+ return its_number;
+}
+
static int __init its_probe_one(struct resource *res,
struct fwnode_handle *handle, int numa_node)
{
struct its_node *its;
void __iomem *its_base;
- u32 val;
- u64 baser, tmp;
+ u32 val, ctlr;
+ u64 baser, tmp, typer;
int err;
its_base = ioremap(res->start, resource_size(res));
@@ -1711,9 +2938,24 @@ static int __init its_probe_one(struct resource *res,
raw_spin_lock_init(&its->lock);
INIT_LIST_HEAD(&its->entry);
INIT_LIST_HEAD(&its->its_device_list);
+ typer = gic_read_typer(its_base + GITS_TYPER);
its->base = its_base;
its->phys_base = res->start;
- its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+ its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
+ its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
+ if (its->is_v4) {
+ if (!(typer & GITS_TYPER_VMOVP)) {
+ err = its_compute_its_list_map(res, its_base);
+ if (err < 0)
+ goto out_free_its;
+
+ pr_info("ITS@%pa: Using ITS number %d\n",
+ &res->start, err);
+ } else {
+ pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
+ }
+ }
+
its->numa_node = numa_node;
its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
@@ -1760,7 +3002,11 @@ static int __init its_probe_one(struct resource *res,
}
gits_write_cwriter(0, its->base + GITS_CWRITER);
- writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
+ ctlr = readl_relaxed(its->base + GITS_CTLR);
+ ctlr |= GITS_CTLR_ENABLE;
+ if (its->is_v4)
+ ctlr |= GITS_CTLR_ImDe;
+ writel_relaxed(ctlr, its->base + GITS_CTLR);
err = its_init_domain(handle, its);
if (err)
@@ -1816,13 +3062,13 @@ static int __init its_of_probe(struct device_node *node)
for (np = of_find_matching_node(node, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) {
if (!of_property_read_bool(np, "msi-controller")) {
- pr_warn("%s: no msi-controller property, ITS ignored\n",
- np->full_name);
+ pr_warn("%pOF: no msi-controller property, ITS ignored\n",
+ np);
continue;
}
if (of_address_to_resource(np, 0, &res)) {
- pr_warn("%s: no regs?\n", np->full_name);
+ pr_warn("%pOF: no regs?\n", np);
continue;
}
@@ -1835,7 +3081,7 @@ static int __init its_of_probe(struct device_node *node)
#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
-#if defined(CONFIG_ACPI_NUMA) && (ACPI_CA_VERSION >= 0x20170531)
+#ifdef CONFIG_ACPI_NUMA
struct its_srat_map {
/* numa node id */
u32 numa_node;
@@ -1843,7 +3089,7 @@ struct its_srat_map {
u32 its_id;
};
-static struct its_srat_map its_srat_maps[MAX_NUMNODES] __initdata;
+static struct its_srat_map *its_srat_maps __initdata;
static int its_in_srat __initdata;
static int __init acpi_get_its_numa_node(u32 its_id)
@@ -1857,6 +3103,12 @@ static int __init acpi_get_its_numa_node(u32 its_id)
return NUMA_NO_NODE;
}
+static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ return 0;
+}
+
static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
const unsigned long end)
{
@@ -1873,12 +3125,6 @@ static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
return -EINVAL;
}
- if (its_in_srat >= MAX_NUMNODES) {
- pr_err("SRAT: ITS affinity exceeding max count[%d]\n",
- MAX_NUMNODES);
- return -EINVAL;
- }
-
node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
@@ -1897,14 +3143,37 @@ static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
static void __init acpi_table_parse_srat_its(void)
{
+ int count;
+
+ count = acpi_table_parse_entries(ACPI_SIG_SRAT,
+ sizeof(struct acpi_table_srat),
+ ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
+ gic_acpi_match_srat_its, 0);
+ if (count <= 0)
+ return;
+
+ its_srat_maps = kmalloc(count * sizeof(struct its_srat_map),
+ GFP_KERNEL);
+ if (!its_srat_maps) {
+ pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
+ return;
+ }
+
acpi_table_parse_entries(ACPI_SIG_SRAT,
sizeof(struct acpi_table_srat),
ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
gic_acpi_parse_srat_its, 0);
}
+
+/* free the its_srat_maps after ITS probing */
+static void __init acpi_its_srat_maps_free(void)
+{
+ kfree(its_srat_maps);
+}
#else
static void __init acpi_table_parse_srat_its(void) { }
static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
+static void __init acpi_its_srat_maps_free(void) { }
#endif
static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
@@ -1951,6 +3220,7 @@ static void __init its_acpi_probe(void)
acpi_table_parse_srat_its();
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
gic_acpi_parse_madt_its, 0);
+ acpi_its_srat_maps_free();
}
#else
static void __init its_acpi_probe(void) { }
@@ -1960,6 +3230,9 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct irq_domain *parent_domain)
{
struct device_node *of_node;
+ struct its_node *its;
+ bool has_v4 = false;
+ int err;
its_parent = parent_domain;
of_node = to_of_node(handle);
@@ -1974,5 +3247,20 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
}
gic_rdists = rdists;
- return its_alloc_lpi_tables();
+ err = its_alloc_lpi_tables();
+ if (err)
+ return err;
+
+ list_for_each_entry(its, &its_nodes, entry)
+ has_v4 |= its->is_v4;
+
+ if (has_v4 & rdists->has_vlpis) {
+ if (its_init_vpe_domain() ||
+ its_init_v4(parent_domain, &its_vpe_domain_ops)) {
+ rdists->has_vlpis = false;
+ pr_err("ITS: Disabling GICv4 support\n");
+ }
+ }
+
+ return 0;
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index dbffb7ab6203..519149ec9053 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
+ * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -353,6 +353,8 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
if (static_key_true(&supports_deactivate))
gic_write_eoir(irqnr);
+ else
+ isb();
err = handle_domain_irq(gic_data.domain, irqnr, regs);
if (err) {
@@ -421,24 +423,14 @@ static void __init gic_dist_init(void)
gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
}
-static int gic_populate_rdist(void)
+static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
{
- unsigned long mpidr = cpu_logical_map(smp_processor_id());
- u64 typer;
- u32 aff;
+ int ret = -ENODEV;
int i;
- /*
- * Convert affinity to a 32bit value that can be matched to
- * GICR_TYPER bits [63:32].
- */
- aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
- MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
- MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
- MPIDR_AFFINITY_LEVEL(mpidr, 0));
-
for (i = 0; i < gic_data.nr_redist_regions; i++) {
void __iomem *ptr = gic_data.redist_regions[i].redist_base;
+ u64 typer;
u32 reg;
reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
@@ -450,15 +442,9 @@ static int gic_populate_rdist(void)
do {
typer = gic_read_typer(ptr + GICR_TYPER);
- if ((typer >> 32) == aff) {
- u64 offset = ptr - gic_data.redist_regions[i].redist_base;
- gic_data_rdist_rd_base() = ptr;
- gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
- pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
- smp_processor_id(), mpidr, i,
- &gic_data_rdist()->phys_base);
+ ret = fn(gic_data.redist_regions + i, ptr);
+ if (!ret)
return 0;
- }
if (gic_data.redist_regions[i].single_redist)
break;
@@ -473,12 +459,71 @@ static int gic_populate_rdist(void)
} while (!(typer & GICR_TYPER_LAST));
}
+ return ret ? -ENODEV : 0;
+}
+
+static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
+{
+ unsigned long mpidr = cpu_logical_map(smp_processor_id());
+ u64 typer;
+ u32 aff;
+
+ /*
+ * Convert affinity to a 32bit value that can be matched to
+ * GICR_TYPER bits [63:32].
+ */
+ aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
+ MPIDR_AFFINITY_LEVEL(mpidr, 0));
+
+ typer = gic_read_typer(ptr + GICR_TYPER);
+ if ((typer >> 32) == aff) {
+ u64 offset = ptr - region->redist_base;
+ gic_data_rdist_rd_base() = ptr;
+ gic_data_rdist()->phys_base = region->phys_base + offset;
+
+ pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
+ smp_processor_id(), mpidr,
+ (int)(region - gic_data.redist_regions),
+ &gic_data_rdist()->phys_base);
+ return 0;
+ }
+
+ /* Try next one */
+ return 1;
+}
+
+static int gic_populate_rdist(void)
+{
+ if (gic_iterate_rdists(__gic_populate_rdist) == 0)
+ return 0;
+
/* We couldn't even deal with ourselves... */
WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
- smp_processor_id(), mpidr);
+ smp_processor_id(),
+ (unsigned long)cpu_logical_map(smp_processor_id()));
return -ENODEV;
}
+static int __gic_update_vlpi_properties(struct redist_region *region,
+ void __iomem *ptr)
+{
+ u64 typer = gic_read_typer(ptr + GICR_TYPER);
+ gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
+ gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
+
+ return 1;
+}
+
+static void gic_update_vlpi_properties(void)
+{
+ gic_iterate_rdists(__gic_update_vlpi_properties);
+ pr_info("%sVLPI support, %sdirect LPI support\n",
+ !gic_data.rdists.has_vlpis ? "no " : "",
+ !gic_data.rdists.has_direct_lpi ? "no " : "");
+}
+
static void gic_cpu_sys_reg_init(void)
{
/*
@@ -640,11 +685,16 @@ static void gic_smp_init(void)
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ unsigned int cpu;
void __iomem *reg;
int enabled;
u64 val;
+ if (force)
+ cpu = cpumask_first(mask_val);
+ else
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+
if (cpu >= nr_cpu_ids)
return -EINVAL;
@@ -670,6 +720,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
else
gic_dist_wait_for_rwp();
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
return IRQ_SET_MASK_OK_DONE;
}
#else
@@ -768,6 +820,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
irq_set_probe(irq);
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
}
/* LPIs */
if (hw >= 8192 && hw < GIC_ID_NR) {
@@ -831,8 +884,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
if (ret)
return ret;
- for (i = 0; i < nr_irqs; i++)
- gic_irq_domain_map(domain, virq + i, hwirq + i);
+ for (i = 0; i < nr_irqs; i++) {
+ ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -943,6 +999,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
&gic_data);
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
+ gic_data.rdists.has_vlpis = true;
+ gic_data.rdists.has_direct_lpi = true;
if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
err = -ENOMEM;
@@ -951,6 +1009,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
set_handle_irq(gic_handle_irq);
+ gic_update_vlpi_properties();
+
if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
its_init(handle, &gic_data.rdists, gic_data.domain);
@@ -1057,7 +1117,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
if (WARN_ON(cpu == -1))
continue;
- pr_cont("%s[%d] ", cpu_node->full_name, cpu);
+ pr_cont("%pOF[%d] ", cpu_node, cpu);
cpumask_set_cpu(cpu, &part->mask);
}
@@ -1112,6 +1172,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
if (!ret)
gic_v3_kvm_info.vcpu = r;
+ gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_set_kvm_info(&gic_v3_kvm_info);
}
@@ -1125,15 +1186,13 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
dist_base = of_iomap(node, 0);
if (!dist_base) {
- pr_err("%s: unable to map gic dist registers\n",
- node->full_name);
+ pr_err("%pOF: unable to map gic dist registers\n", node);
return -ENXIO;
}
err = gic_validate_dist_version(dist_base);
if (err) {
- pr_err("%s: no distributor detected, giving up\n",
- node->full_name);
+ pr_err("%pOF: no distributor detected, giving up\n", node);
goto out_unmap_dist;
}
@@ -1153,8 +1212,7 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
ret = of_address_to_resource(node, 1 + i, &res);
rdist_regs[i].redist_base = of_iomap(node, 1 + i);
if (ret || !rdist_regs[i].redist_base) {
- pr_err("%s: couldn't map region %d\n",
- node->full_name, i);
+ pr_err("%pOF: couldn't map region %d\n", node, i);
err = -ENODEV;
goto out_unmap_rdist;
}
@@ -1408,6 +1466,7 @@ static void __init gic_acpi_setup_kvm_info(void)
vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
}
+ gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
gic_set_kvm_info(&gic_v3_kvm_info);
}
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
new file mode 100644
index 000000000000..2370e6d9e603
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/sched.h>
+
+#include <linux/irqchip/arm-gic-v4.h>
+
+/*
+ * WARNING: The blurb below assumes that you understand the
+ * intricacies of GICv3, GICv4, and how a guest's view of a GICv3 gets
+ * translated into GICv4 commands. So it effectively targets at most
+ * two individuals. You know who you are.
+ *
+ * The core GICv4 code is designed to *avoid* exposing too much of the
+ * core GIC code (that would in turn leak into the hypervisor code),
+ * and instead provide a hypervisor agnostic interface to the HW (of
+ * course, the astute reader will quickly realize that hypervisor
+ * agnostic actually means KVM-specific - what were you thinking?).
+ *
+ * In order to achieve a modicum of isolation, we try to hide most of
+ * the GICv4 "stuff" behind normal irqchip operations:
+ *
+ * - Any guest-visible VLPI is backed by a Linux interrupt (and a
+ * physical LPI which gets unmapped when the guest maps the
+ * VLPI). This allows the same DevID/EventID pair to be either
+ * mapped to the LPI (host) or the VLPI (guest). Note that this is
+ * exclusive, and you cannot have both.
+ *
+ * - Enabling/disabling a VLPI is done by issuing mask/unmask calls.
+ *
+ * - Guest INT/CLEAR commands are implemented through
+ * irq_set_irqchip_state().
+ *
+ * - The *bizarre* stuff (mapping/unmapping an interrupt to a VLPI, or
+ * issuing an INV after changing a priority) gets shoved into the
+ * irq_set_vcpu_affinity() method. While this is quite horrible
+ * (let's face it, this is the irqchip version of an ioctl), it
+ * confines the crap to a single location. And map/unmap really is
+ * about setting the affinity of a VLPI to a vcpu, so only INV is
+ * majorly out of place. So there.
+ *
+ * A number of commands are simply not provided by this interface, as
+ * they do not make direct sense. For example, MAPD is purely local to
+ * the virtual ITS (because it references a virtual device, and the
+ * physical ITS is still very much in charge of the physical
+ * device). Same goes for things like MAPC (the physical ITS deals
+ * with the actual vPE affinity, and not the braindead concept of
+ * collection). SYNC is not provided either, as each and every command
+ * is followed by a VSYNC. This could be relaxed in the future, should
+ * this be seen as a bottleneck (yes, this means *never*).
+ *
+ * But handling VLPIs is only one side of the job of the GICv4
+ * code. The other (darker) side is to take care of the doorbell
+ * interrupts which are delivered when a VLPI targeting a non-running
+ * vcpu is being made pending.
+ *
+ * The choice made here is that each vcpu (VPE in old northern GICv4
+ * dialect) gets a single doorbell LPI, no matter how many interrupts
+ * are targeting it. This has a nice property, which is that the
+ * interrupt becomes a handle for the VPE, and that the hypervisor
+ * code can manipulate it through the normal interrupt API:
+ *
+ * - VMs (or rather the VM abstraction that matters to the GIC)
+ * contain an irq domain where each interrupt maps to a VPE. In
+ * turn, this domain sits on top of the normal LPI allocator, and a
+ * specially crafted irq_chip implementation.
+ *
+ * - mask/unmask do what is expected on the doorbell interrupt.
+ *
+ * - irq_set_affinity is used to move a VPE from one redistributor to
+ * another.
+ *
+ * - irq_set_vcpu_affinity once again gets hijacked for the purpose of
+ * creating a new sub-API, namely scheduling/descheduling a VPE
+ * (which involves programming GICR_V{PROP,PEND}BASER) and
+ * performing INVALL operations.
+ */
+
+static struct irq_domain *gic_domain;
+static const struct irq_domain_ops *vpe_domain_ops;
+
+int its_alloc_vcpu_irqs(struct its_vm *vm)
+{
+ int vpe_base_irq, i;
+
+ vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe",
+ task_pid_nr(current));
+ if (!vm->fwnode)
+ goto err;
+
+ vm->domain = irq_domain_create_hierarchy(gic_domain, 0, vm->nr_vpes,
+ vm->fwnode, vpe_domain_ops,
+ vm);
+ if (!vm->domain)
+ goto err;
+
+ for (i = 0; i < vm->nr_vpes; i++) {
+ vm->vpes[i]->its_vm = vm;
+ vm->vpes[i]->idai = true;
+ }
+
+ vpe_base_irq = __irq_domain_alloc_irqs(vm->domain, -1, vm->nr_vpes,
+ NUMA_NO_NODE, vm,
+ false, NULL);
+ if (vpe_base_irq <= 0)
+ goto err;
+
+ for (i = 0; i < vm->nr_vpes; i++)
+ vm->vpes[i]->irq = vpe_base_irq + i;
+
+ return 0;
+
+err:
+ if (vm->domain)
+ irq_domain_remove(vm->domain);
+ if (vm->fwnode)
+ irq_domain_free_fwnode(vm->fwnode);
+
+ return -ENOMEM;
+}
+
+void its_free_vcpu_irqs(struct its_vm *vm)
+{
+ irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
+ irq_domain_remove(vm->domain);
+ irq_domain_free_fwnode(vm->fwnode);
+}
+
+static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
+{
+ return irq_set_vcpu_affinity(vpe->irq, info);
+}
+
+int its_schedule_vpe(struct its_vpe *vpe, bool on)
+{
+ struct its_cmd_info info;
+
+ WARN_ON(preemptible());
+
+ info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
+
+ return its_send_vpe_cmd(vpe, &info);
+}
+
+int its_invall_vpe(struct its_vpe *vpe)
+{
+ struct its_cmd_info info = {
+ .cmd_type = INVALL_VPE,
+ };
+
+ return its_send_vpe_cmd(vpe, &info);
+}
+
+int its_map_vlpi(int irq, struct its_vlpi_map *map)
+{
+ struct its_cmd_info info = {
+ .cmd_type = MAP_VLPI,
+ .map = map,
+ };
+
+ /*
+ * The host will never see that interrupt firing again, so it
+ * is vital that we don't do any lazy masking.
+ */
+ irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
+ return irq_set_vcpu_affinity(irq, &info);
+}
+
+int its_get_vlpi(int irq, struct its_vlpi_map *map)
+{
+ struct its_cmd_info info = {
+ .cmd_type = GET_VLPI,
+ .map = map,
+ };
+
+ return irq_set_vcpu_affinity(irq, &info);
+}
+
+int its_unmap_vlpi(int irq)
+{
+ irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
+ return irq_set_vcpu_affinity(irq, NULL);
+}
+
+int its_prop_update_vlpi(int irq, u8 config, bool inv)
+{
+ struct its_cmd_info info = {
+ .cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI,
+ .config = config,
+ };
+
+ return irq_set_vcpu_affinity(irq, &info);
+}
+
+int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops)
+{
+ if (domain) {
+ pr_info("ITS: Enabling GICv4 support\n");
+ gic_domain = domain;
+ vpe_domain_ops = ops;
+ return 0;
+ }
+
+ pr_err("ITS: No GICv4 VPE domain allocated\n");
+ return -ENODEV;
+}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 1b1df4f770bd..651d726e8b12 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -344,6 +344,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
writel_relaxed(val | bit, reg);
gic_unlock_irqrestore(flags);
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
return IRQ_SET_MASK_OK_DONE;
}
#endif
@@ -361,6 +363,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
if (likely(irqnr > 15 && irqnr < 1020)) {
if (static_key_true(&supports_deactivate))
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+ isb();
handle_domain_irq(gic->domain, irqnr, regs);
continue;
}
@@ -401,16 +404,18 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
goto out;
cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
- if (unlikely(gic_irq < 32 || gic_irq > 1020))
+ if (unlikely(gic_irq < 32 || gic_irq > 1020)) {
handle_bad_irq(desc);
- else
+ } else {
+ isb();
generic_handle_irq(cascade_irq);
+ }
out:
chained_irq_exit(chip, desc);
}
-static struct irq_chip gic_chip = {
+static const struct irq_chip gic_chip = {
.irq_mask = gic_mask_irq,
.irq_unmask = gic_unmask_irq,
.irq_eoi = gic_eoi_irq,
@@ -966,6 +971,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
irq_set_probe(irq);
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
}
return 0;
}
@@ -1027,8 +1033,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
if (ret)
return ret;
- for (i = 0; i < nr_irqs; i++)
- gic_irq_domain_map(domain, virq + i, hwirq + i);
+ for (i = 0; i < nr_irqs; i++) {
+ ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index c1b4ee955dbe..5b4fd2f4e5f8 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -165,6 +165,8 @@ static int hip04_irq_set_affinity(struct irq_data *d,
writel_relaxed(val | bit, reg);
raw_spin_unlock(&irq_controller_lock);
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
return IRQ_SET_MASK_OK;
}
#endif
@@ -312,6 +314,7 @@ static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_and_handler(irq, &hip04_irq_chip,
handle_fasteoi_irq);
irq_set_probe(irq);
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
}
irq_set_chip_data(irq, d->host_data);
return 0;
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index bb36f572e322..675eda5ff2b8 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -214,13 +214,13 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
int i;
if (!parent) {
- pr_err("%s: no parent, giving up\n", node->full_name);
+ pr_err("%pOF: no parent, giving up\n", node);
return -ENODEV;
}
parent_domain = irq_find_host(parent);
if (!parent_domain) {
- pr_err("%s: unable to get parent domain\n", node->full_name);
+ pr_err("%pOF: unable to get parent domain\n", node);
return -ENXIO;
}
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
index 1034aeb2e98a..a48357d369b5 100644
--- a/drivers/irqchip/irq-lpc32xx.c
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -191,7 +191,7 @@ static int __init lpc32xx_of_ic_init(struct device_node *node,
irqc->base = of_iomap(node, 0);
if (!irqc->base) {
- pr_err("%s: unable to map registers\n", node->full_name);
+ pr_err("%pOF: unable to map registers\n", node);
kfree(irqc);
return -EINVAL;
}
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 02cca74cab94..119f4ef0d421 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -17,13 +17,32 @@
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/spinlock.h>
-#define MSI_MAX_IRQS 32
-#define MSI_IBS_SHIFT 3
-#define MSIR 4
+#define MSI_IRQS_PER_MSIR 32
+#define MSI_MSIR_OFFSET 4
+
+#define MSI_LS1043V1_1_IRQS_PER_MSIR 8
+#define MSI_LS1043V1_1_MSIR_OFFSET 0x10
+
+struct ls_scfg_msi_cfg {
+ u32 ibs_shift; /* Shift of interrupt bit select */
+ u32 msir_irqs; /* The irq number per MSIR */
+ u32 msir_base; /* The base address of MSIR */
+};
+
+struct ls_scfg_msir {
+ struct ls_scfg_msi *msi_data;
+ unsigned int index;
+ unsigned int gic_irq;
+ unsigned int bit_start;
+ unsigned int bit_end;
+ unsigned int srs; /* Shared interrupt register select */
+ void __iomem *reg;
+};
struct ls_scfg_msi {
spinlock_t lock;
@@ -32,8 +51,11 @@ struct ls_scfg_msi {
struct irq_domain *msi_domain;
void __iomem *regs;
phys_addr_t msiir_addr;
- int irq;
- DECLARE_BITMAP(used, MSI_MAX_IRQS);
+ struct ls_scfg_msi_cfg *cfg;
+ u32 msir_num;
+ struct ls_scfg_msir *msir;
+ u32 irqs_num;
+ unsigned long *used;
};
static struct irq_chip ls_scfg_msi_irq_chip = {
@@ -49,19 +71,56 @@ static struct msi_domain_info ls_scfg_msi_domain_info = {
.chip = &ls_scfg_msi_irq_chip,
};
+static int msi_affinity_flag = 1;
+
+static int __init early_parse_ls_scfg_msi(char *p)
+{
+ if (p && strncmp(p, "no-affinity", 11) == 0)
+ msi_affinity_flag = 0;
+ else
+ msi_affinity_flag = 1;
+
+ return 0;
+}
+early_param("lsmsi", early_parse_ls_scfg_msi);
+
static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
{
struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
msg->address_hi = upper_32_bits(msi_data->msiir_addr);
msg->address_lo = lower_32_bits(msi_data->msiir_addr);
- msg->data = data->hwirq << MSI_IBS_SHIFT;
+ msg->data = data->hwirq;
+
+ if (msi_affinity_flag)
+ msg->data |= cpumask_first(data->common->affinity);
}
static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
const struct cpumask *mask, bool force)
{
- return -EINVAL;
+ struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
+ u32 cpu;
+
+ if (!msi_affinity_flag)
+ return -EINVAL;
+
+ if (!force)
+ cpu = cpumask_any_and(mask, cpu_online_mask);
+ else
+ cpu = cpumask_first(mask);
+
+ if (cpu >= msi_data->msir_num)
+ return -EINVAL;
+
+ if (msi_data->msir[cpu].gic_irq <= 0) {
+ pr_warn("cannot bind the irq to cpu%d\n", cpu);
+ return -EINVAL;
+ }
+
+ cpumask_copy(irq_data->common->affinity, mask);
+
+ return IRQ_SET_MASK_OK;
}
static struct irq_chip ls_scfg_msi_parent_chip = {
@@ -81,8 +140,8 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
WARN_ON(nr_irqs != 1);
spin_lock(&msi_data->lock);
- pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS);
- if (pos < MSI_MAX_IRQS)
+ pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
+ if (pos < msi_data->irqs_num)
__set_bit(pos, msi_data->used);
else
err = -ENOSPC;
@@ -106,7 +165,7 @@ static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
int pos;
pos = d->hwirq;
- if (pos < 0 || pos >= MSI_MAX_IRQS) {
+ if (pos < 0 || pos >= msi_data->irqs_num) {
pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
return;
}
@@ -123,15 +182,22 @@ static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
{
- struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc);
+ struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
+ struct ls_scfg_msi *msi_data = msir->msi_data;
unsigned long val;
- int pos, virq;
+ int pos, size, virq, hwirq;
chained_irq_enter(irq_desc_get_chip(desc), desc);
- val = ioread32be(msi_data->regs + MSIR);
- for_each_set_bit(pos, &val, MSI_MAX_IRQS) {
- virq = irq_find_mapping(msi_data->parent, (31 - pos));
+ val = ioread32be(msir->reg);
+
+ pos = msir->bit_start;
+ size = msir->bit_end + 1;
+
+ for_each_set_bit_from(pos, &val, size) {
+ hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
+ msir->srs;
+ virq = irq_find_mapping(msi_data->parent, hwirq);
if (virq)
generic_handle_irq(virq);
}
@@ -143,7 +209,7 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
{
/* Initialize MSI domain parent */
msi_data->parent = irq_domain_add_linear(NULL,
- MSI_MAX_IRQS,
+ msi_data->irqs_num,
&ls_scfg_msi_domain_ops,
msi_data);
if (!msi_data->parent) {
@@ -164,16 +230,117 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
return 0;
}
+static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
+{
+ struct ls_scfg_msir *msir;
+ int virq, i, hwirq;
+
+ virq = platform_get_irq(msi_data->pdev, index);
+ if (virq <= 0)
+ return -ENODEV;
+
+ msir = &msi_data->msir[index];
+ msir->index = index;
+ msir->msi_data = msi_data;
+ msir->gic_irq = virq;
+ msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
+
+ if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
+ msir->bit_start = 32 - ((msir->index + 1) *
+ MSI_LS1043V1_1_IRQS_PER_MSIR);
+ msir->bit_end = msir->bit_start +
+ MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
+ } else {
+ msir->bit_start = 0;
+ msir->bit_end = msi_data->cfg->msir_irqs - 1;
+ }
+
+ irq_set_chained_handler_and_data(msir->gic_irq,
+ ls_scfg_msi_irq_handler,
+ msir);
+
+ if (msi_affinity_flag) {
+ /* Associate MSIR interrupt to the cpu */
+ irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
+ msir->srs = 0; /* This value is determined by the CPU */
+ } else
+ msir->srs = index;
+
+ /* Release the hwirqs corresponding to this MSIR */
+ if (!msi_affinity_flag || msir->index == 0) {
+ for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
+ hwirq = i << msi_data->cfg->ibs_shift | msir->index;
+ bitmap_clear(msi_data->used, hwirq, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
+{
+ struct ls_scfg_msi *msi_data = msir->msi_data;
+ int i, hwirq;
+
+ if (msir->gic_irq > 0)
+ irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
+
+ for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
+ hwirq = i << msi_data->cfg->ibs_shift | msir->index;
+ bitmap_set(msi_data->used, hwirq, 1);
+ }
+
+ return 0;
+}
+
+static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
+ .ibs_shift = 3,
+ .msir_irqs = MSI_IRQS_PER_MSIR,
+ .msir_base = MSI_MSIR_OFFSET,
+};
+
+static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
+ .ibs_shift = 2,
+ .msir_irqs = MSI_IRQS_PER_MSIR,
+ .msir_base = MSI_MSIR_OFFSET,
+};
+
+static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
+ .ibs_shift = 2,
+ .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
+ .msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
+};
+
+static const struct of_device_id ls_scfg_msi_id[] = {
+ /* The following two misspelled compatibles are obsolete */
+ { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
+ { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
+
+ { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
+ { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
+ { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
+ { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
+
static int ls_scfg_msi_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct ls_scfg_msi *msi_data;
struct resource *res;
- int ret;
+ int i, ret;
+
+ match = of_match_device(ls_scfg_msi_id, &pdev->dev);
+ if (!match)
+ return -ENODEV;
msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
if (!msi_data)
return -ENOMEM;
+ msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(msi_data->regs)) {
@@ -182,23 +349,48 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
}
msi_data->msiir_addr = res->start;
- msi_data->irq = platform_get_irq(pdev, 0);
- if (msi_data->irq <= 0) {
- dev_err(&pdev->dev, "failed to get MSI irq\n");
- return -ENODEV;
- }
-
msi_data->pdev = pdev;
spin_lock_init(&msi_data->lock);
+ msi_data->irqs_num = MSI_IRQS_PER_MSIR *
+ (1 << msi_data->cfg->ibs_shift);
+ msi_data->used = devm_kcalloc(&pdev->dev,
+ BITS_TO_LONGS(msi_data->irqs_num),
+ sizeof(*msi_data->used),
+ GFP_KERNEL);
+ if (!msi_data->used)
+ return -ENOMEM;
+ /*
+ * Reserve all the hwirqs
+ * The available hwirqs will be released in ls1_msi_setup_hwirq()
+ */
+ bitmap_set(msi_data->used, 0, msi_data->irqs_num);
+
+ msi_data->msir_num = of_irq_count(pdev->dev.of_node);
+
+ if (msi_affinity_flag) {
+ u32 cpu_num;
+
+ cpu_num = num_possible_cpus();
+ if (msi_data->msir_num >= cpu_num)
+ msi_data->msir_num = cpu_num;
+ else
+ msi_affinity_flag = 0;
+ }
+
+ msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
+ sizeof(*msi_data->msir),
+ GFP_KERNEL);
+ if (!msi_data->msir)
+ return -ENOMEM;
+
+ for (i = 0; i < msi_data->msir_num; i++)
+ ls_scfg_msi_setup_hwirq(msi_data, i);
+
ret = ls_scfg_msi_domains_init(msi_data);
if (ret)
return ret;
- irq_set_chained_handler_and_data(msi_data->irq,
- ls_scfg_msi_irq_handler,
- msi_data);
-
platform_set_drvdata(pdev, msi_data);
return 0;
@@ -207,8 +399,10 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
static int ls_scfg_msi_remove(struct platform_device *pdev)
{
struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
+ int i;
- irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL);
+ for (i = 0; i < msi_data->msir_num; i++)
+ ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
irq_domain_remove(msi_data->msi_domain);
irq_domain_remove(msi_data->parent);
@@ -218,12 +412,6 @@ static int ls_scfg_msi_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id ls_scfg_msi_id[] = {
- { .compatible = "fsl,1s1021a-msi", },
- { .compatible = "fsl,1s1043a-msi", },
- {},
-};
-
static struct platform_driver ls_scfg_msi_driver = {
.driver = {
.name = "ls-scfg-msi",
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index 0cdd923d1535..be7216bfb8dd 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -518,6 +518,8 @@ static int meta_intc_set_affinity(struct irq_data *data,
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
+ irq_data_update_effective_affinity(data, cpumask_of(cpu));
+
return 0;
}
#else
@@ -578,6 +580,8 @@ static int meta_intc_map(struct irq_domain *d, unsigned int irq,
else
irq_set_chip_and_handler(irq, &meta_intc_edge_chip,
handle_edge_irq);
+
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
return 0;
}
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index 0a8ed1c05518..14461cbfab2f 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -154,7 +154,7 @@ asmlinkage void __weak plat_irq_dispatch(void)
static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
- static struct irq_chip *chip;
+ struct irq_chip *chip;
if (hw < 2 && cpu_has_mipsmt) {
/* Software interrupts are used for MT/CMT IPI */
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 832ebf4062f7..b3a60da088db 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -445,24 +445,27 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
cpumask_t tmp = CPU_MASK_NONE;
unsigned long flags;
- int i;
+ int i, cpu;
cpumask_and(&tmp, cpumask, cpu_online_mask);
if (cpumask_empty(&tmp))
return -EINVAL;
+ cpu = cpumask_first(&tmp);
+
/* Assumption : cpumask refers to a single CPU */
spin_lock_irqsave(&gic_lock, flags);
/* Re-route this IRQ */
- gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
+ gic_map_to_vpe(irq, mips_cm_vp_id(cpu));
/* Update the pcpu_masks */
for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
clear_bit(irq, pcpu_masks[i].pcpu_mask);
- set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
+ set_bit(irq, pcpu_masks[cpu].pcpu_mask);
cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
spin_unlock_irqrestore(&gic_lock, flags);
return IRQ_SET_MASK_OK_NOCOPY;
@@ -716,6 +719,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
if (err)
return err;
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
return gic_shared_irq_domain_map(d, virq, hwirq, 0);
}
@@ -950,7 +954,6 @@ static void __init __gic_init(unsigned long gic_base_addr,
&gic_irq_domain_ops, NULL);
if (!gic_irq_domain)
panic("Failed to add GIC IRQ domain");
- gic_irq_domain->name = "mips-gic-irq";
gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
IRQ_DOMAIN_FLAG_IPI_PER_CPU,
@@ -959,7 +962,6 @@ static void __init __gic_init(unsigned long gic_base_addr,
if (!gic_ipi_domain)
panic("Failed to add GIC IPI domain");
- gic_ipi_domain->name = "mips-gic-ipi";
irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
if (node &&
@@ -1022,8 +1024,11 @@ static int __init gic_of_init(struct device_node *node,
gic_len = resource_size(&res);
}
- if (mips_cm_present())
+ if (mips_cm_present()) {
write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
+ /* Ensure GIC region is enabled before trying to access it */
+ __sync();
+ }
gic_present = true;
__gic_init(gic_base, gic_len, cpu_vec, 0, node);
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 013fc9659a84..25f32e1d7764 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -181,13 +181,13 @@ const struct irq_domain_ops mmp_irq_domain_ops = {
.xlate = mmp_irq_domain_xlate,
};
-static struct mmp_intc_conf mmp_conf = {
+static const struct mmp_intc_conf mmp_conf = {
.conf_enable = 0x51,
.conf_disable = 0x0,
.conf_mask = 0x7f,
};
-static struct mmp_intc_conf mmp2_conf = {
+static const struct mmp_intc_conf mmp2_conf = {
.conf_enable = 0x20,
.conf_disable = 0x0,
.conf_mask = 0x7f,
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index eeac512ec5a8..90aaf190157f 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -178,8 +178,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
chip_data->intpol_words[i] = size / 4;
chip_data->intpol_bases[i] = of_iomap(node, i);
if (ret || !chip_data->intpol_bases[i]) {
- pr_err("%s: couldn't map region %d\n",
- node->full_name, i);
+ pr_err("%pOF: couldn't map region %d\n", node, i);
ret = -ENODEV;
goto out_free_intpol;
}
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 05fa9f7af53c..e8b31f52e071 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -179,7 +179,7 @@ static void __init icoll_add_domain(struct device_node *np,
&icoll_irq_domain_ops, NULL);
if (!icoll_domain)
- panic("%s: unable to create irq domain", np->full_name);
+ panic("%pOF: unable to create irq domain", np);
}
static void __iomem * __init icoll_init_iobase(struct device_node *np)
@@ -188,7 +188,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
icoll_base = of_io_request_and_map(np, 0, np->name);
if (IS_ERR(icoll_base))
- panic("%s: unable to map resource", np->full_name);
+ panic("%pOF: unable to map resource", np);
return icoll_base;
}
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 491568c95aa5..45363ff8d06f 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -140,7 +140,7 @@ static int __init stm32_exti_init(struct device_node *node,
base = of_iomap(node, 0);
if (!base) {
- pr_err("%s: Unable to map registers\n", node->full_name);
+ pr_err("%pOF: Unable to map registers\n", node);
return -ENOMEM;
}
@@ -149,7 +149,7 @@ static int __init stm32_exti_init(struct device_node *node,
nr_exti = fls(readl_relaxed(base + EXTI_RTSR));
writel_relaxed(0, base + EXTI_RTSR);
- pr_info("%s: %d External IRQs detected\n", node->full_name, nr_exti);
+ pr_info("%pOF: %d External IRQs detected\n", node, nr_exti);
domain = irq_domain_add_linear(node, nr_exti,
&irq_exti_domain_ops, NULL);
@@ -163,8 +163,8 @@ static int __init stm32_exti_init(struct device_node *node,
ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti",
handle_edge_irq, clr, 0, 0);
if (ret) {
- pr_err("%s: Could not allocate generic interrupt chip.\n",
- node->full_name);
+ pr_err("%pOF: Could not allocate generic interrupt chip.\n",
+ node);
goto out_free_domain;
}
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 376b28074e0d..e3e5b9132b75 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -97,8 +97,8 @@ static int __init sun4i_of_init(struct device_node *node,
{
sun4i_irq_base = of_iomap(node, 0);
if (!sun4i_irq_base)
- panic("%s: unable to map IC registers\n",
- node->full_name);
+ panic("%pOF: unable to map IC registers\n",
+ node);
/* Disable all interrupts */
writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0));
@@ -124,7 +124,7 @@ static int __init sun4i_of_init(struct device_node *node,
sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32,
&sun4i_irq_ops, NULL);
if (!sun4i_irq_domain)
- panic("%s: unable to create IRQ domain\n", node->full_name);
+ panic("%pOF: unable to create IRQ domain\n", node);
set_handle_irq(sun4i_handle_irq);
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index 3973a14bb15b..0abc0cd1c32e 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -291,13 +291,13 @@ static int __init tegra_ictlr_init(struct device_node *node,
int err;
if (!parent) {
- pr_err("%s: no parent, giving up\n", node->full_name);
+ pr_err("%pOF: no parent, giving up\n", node);
return -ENODEV;
}
parent_domain = irq_find_host(parent);
if (!parent_domain) {
- pr_err("%s: unable to obtain parent domain\n", node->full_name);
+ pr_err("%pOF: unable to obtain parent domain\n", node);
return -ENXIO;
}
@@ -329,29 +329,29 @@ static int __init tegra_ictlr_init(struct device_node *node,
}
if (!num_ictlrs) {
- pr_err("%s: no valid regions, giving up\n", node->full_name);
+ pr_err("%pOF: no valid regions, giving up\n", node);
err = -ENOMEM;
goto out_free;
}
WARN(num_ictlrs != soc->num_ictlrs,
- "%s: Found %u interrupt controllers in DT; expected %u.\n",
- node->full_name, num_ictlrs, soc->num_ictlrs);
+ "%pOF: Found %u interrupt controllers in DT; expected %u.\n",
+ node, num_ictlrs, soc->num_ictlrs);
domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32,
node, &tegra_ictlr_domain_ops,
lic);
if (!domain) {
- pr_err("%s: failed to allocated domain\n", node->full_name);
+ pr_err("%pOF: failed to allocated domain\n", node);
err = -ENOMEM;
goto out_unmap;
}
tegra_ictlr_syscore_init();
- pr_info("%s: %d interrupts forwarded to %s\n",
- node->full_name, num_ictlrs * 32, parent->full_name);
+ pr_info("%pOF: %d interrupts forwarded to %pOF\n",
+ node, num_ictlrs * 32, parent);
return 0;
diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c
new file mode 100644
index 000000000000..7ba7f253470e
--- /dev/null
+++ b/drivers/irqchip/irq-uniphier-aidet.c
@@ -0,0 +1,261 @@
+/*
+ * Driver for UniPhier AIDET (ARM Interrupt Detector)
+ *
+ * Copyright (C) 2017 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define UNIPHIER_AIDET_NR_IRQS 256
+
+#define UNIPHIER_AIDET_DETCONF 0x04 /* inverter register base */
+
+struct uniphier_aidet_priv {
+ struct irq_domain *domain;
+ void __iomem *reg_base;
+ spinlock_t lock;
+ u32 saved_vals[UNIPHIER_AIDET_NR_IRQS / 32];
+};
+
+static void uniphier_aidet_reg_update(struct uniphier_aidet_priv *priv,
+ unsigned int reg, u32 mask, u32 val)
+{
+ unsigned long flags;
+ u32 tmp;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ tmp = readl_relaxed(priv->reg_base + reg);
+ tmp &= ~mask;
+ tmp |= mask & val;
+ writel_relaxed(tmp, priv->reg_base + reg);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void uniphier_aidet_detconf_update(struct uniphier_aidet_priv *priv,
+ unsigned long index, unsigned int val)
+{
+ unsigned int reg;
+ u32 mask;
+
+ reg = UNIPHIER_AIDET_DETCONF + index / 32 * 4;
+ mask = BIT(index % 32);
+
+ uniphier_aidet_reg_update(priv, reg, mask, val ? mask : 0);
+}
+
+static int uniphier_aidet_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct uniphier_aidet_priv *priv = data->chip_data;
+ unsigned int val;
+
+ /* enable inverter for active low triggers */
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ val = 0;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = 1;
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ val = 1;
+ type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ uniphier_aidet_detconf_update(priv, data->hwirq, val);
+
+ return irq_chip_set_type_parent(data, type);
+}
+
+static struct irq_chip uniphier_aidet_irq_chip = {
+ .name = "AIDET",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = uniphier_aidet_irq_set_type,
+};
+
+static int uniphier_aidet_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (WARN_ON(fwspec->param_count < 2))
+ return -EINVAL;
+
+ *out_hwirq = fwspec->param[0];
+ *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static int uniphier_aidet_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *arg)
+{
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int ret;
+
+ if (nr_irqs != 1)
+ return -EINVAL;
+
+ ret = uniphier_aidet_domain_translate(domain, arg, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (hwirq >= UNIPHIER_AIDET_NR_IRQS)
+ return -ENXIO;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &uniphier_aidet_irq_chip,
+ domain->host_data);
+ if (ret)
+ return ret;
+
+ /* parent is GIC */
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = 0; /* SPI */
+ parent_fwspec.param[1] = hwirq;
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+}
+
+static const struct irq_domain_ops uniphier_aidet_domain_ops = {
+ .alloc = uniphier_aidet_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+ .translate = uniphier_aidet_domain_translate,
+};
+
+static int uniphier_aidet_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent_np;
+ struct irq_domain *parent_domain;
+ struct uniphier_aidet_priv *priv;
+ struct resource *res;
+
+ parent_np = of_irq_find_parent(dev->of_node);
+ if (!parent_np)
+ return -ENXIO;
+
+ parent_domain = irq_find_host(parent_np);
+ of_node_put(parent_np);
+ if (!parent_domain)
+ return -EPROBE_DEFER;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->reg_base))
+ return PTR_ERR(priv->reg_base);
+
+ spin_lock_init(&priv->lock);
+
+ priv->domain = irq_domain_create_hierarchy(
+ parent_domain, 0,
+ UNIPHIER_AIDET_NR_IRQS,
+ of_node_to_fwnode(dev->of_node),
+ &uniphier_aidet_domain_ops, priv);
+ if (!priv->domain)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static int __maybe_unused uniphier_aidet_suspend(struct device *dev)
+{
+ struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
+ priv->saved_vals[i] = readl_relaxed(
+ priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
+
+ return 0;
+}
+
+static int __maybe_unused uniphier_aidet_resume(struct device *dev)
+{
+ struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
+ writel_relaxed(priv->saved_vals[i],
+ priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
+
+ return 0;
+}
+
+static const struct dev_pm_ops uniphier_aidet_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(uniphier_aidet_suspend,
+ uniphier_aidet_resume)
+};
+
+static const struct of_device_id uniphier_aidet_match[] = {
+ { .compatible = "socionext,uniphier-ld4-aidet" },
+ { .compatible = "socionext,uniphier-pro4-aidet" },
+ { .compatible = "socionext,uniphier-sld8-aidet" },
+ { .compatible = "socionext,uniphier-pro5-aidet" },
+ { .compatible = "socionext,uniphier-pxs2-aidet" },
+ { .compatible = "socionext,uniphier-ld11-aidet" },
+ { .compatible = "socionext,uniphier-ld20-aidet" },
+ { .compatible = "socionext,uniphier-pxs3-aidet" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver uniphier_aidet_driver = {
+ .probe = uniphier_aidet_probe,
+ .driver = {
+ .name = "uniphier-aidet",
+ .of_match_table = uniphier_aidet_match,
+ .pm = &uniphier_aidet_pm_ops,
+ },
+};
+builtin_platform_driver(uniphier_aidet_driver);
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
index 3db7ab1c9741..e3043ded8973 100644
--- a/drivers/irqchip/irq-xilinx-intc.c
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -186,8 +186,8 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
if (irqc->intr_mask >> nr_irq)
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
- pr_info("irq-xilinx: %s: num_irq=%d, edge=0x%x\n",
- intc->full_name, nr_irq, irqc->intr_mask);
+ pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
+ intc, nr_irq, irqc->intr_mask);
/*
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index 72a391e01011..a15a9510c904 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -32,6 +32,7 @@ static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
irq_set_status_flags(irq, IRQ_LEVEL);
return 0;
}
+ irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
return xtensa_irq_map(d, irq, hw);
}
@@ -121,9 +122,12 @@ static int xtensa_mx_irq_retrigger(struct irq_data *d)
static int xtensa_mx_irq_set_affinity(struct irq_data *d,
const struct cpumask *dest, bool force)
{
- unsigned mask = 1u << cpumask_any_and(dest, cpu_online_mask);
+ int cpu = cpumask_any_and(dest, cpu_online_mask);
+ unsigned mask = 1u << cpu;
set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
return 0;
}
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 9ca691d6c13b..46c189ad8d94 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -55,7 +55,7 @@ struct capictr_event {
/* ------------------------------------------------------------- */
-static struct capi_version driver_version = {2, 0, 1, 1 << 4};
+static const struct capi_version driver_version = {2, 0, 1, 1 << 4};
static char driver_serial[CAPI_SERIAL_LEN] = "0004711";
static char capi_manufakturer[64] = "AVM Berlin";
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 060d357f107f..6f423bc49d0d 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -485,18 +485,19 @@ static int isdn_divert_icall(isdn_ctrl *ic)
cs->deflect_dest[0] = '\0';
retval = 4; /* only proceed */
}
- sprintf(cs->info, "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n",
- cs->akt_state,
- cs->divert_id,
- divert_if.drv_to_name(cs->ics.driver),
- (ic->command == ISDN_STAT_ICALLW) ? "1" : "0",
- cs->ics.parm.setup.phone,
- cs->ics.parm.setup.eazmsn,
- cs->ics.parm.setup.si1,
- cs->ics.parm.setup.si2,
- cs->ics.parm.setup.screen,
- dv->rule.waittime,
- cs->deflect_dest);
+ snprintf(cs->info, sizeof(cs->info),
+ "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n",
+ cs->akt_state,
+ cs->divert_id,
+ divert_if.drv_to_name(cs->ics.driver),
+ (ic->command == ISDN_STAT_ICALLW) ? "1" : "0",
+ cs->ics.parm.setup.phone,
+ cs->ics.parm.setup.eazmsn,
+ cs->ics.parm.setup.si1,
+ cs->ics.parm.setup.si2,
+ cs->ics.parm.setup.screen,
+ dv->rule.waittime,
+ cs->deflect_dest);
if ((dv->rule.action == DEFLECT_REPORT) ||
(dv->rule.action == DEFLECT_REJECT)) {
put_info_buffer(cs->info);
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 40c7e2cf423b..034cabac699d 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -42,7 +42,7 @@ static char *revision = "$Revision: 1.1.2.2 $";
static bool suppress_pollack;
-static struct pci_device_id c4_pci_tbl[] = {
+static const struct pci_device_id c4_pci_tbl[] = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 0, 0, (unsigned long)4 },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2, 0, 0, (unsigned long)2 },
{ } /* Terminating entry */
diff --git a/drivers/isdn/hardware/eicon/divacapi.h b/drivers/isdn/hardware/eicon/divacapi.h
index a315a2914d70..c4868a0d82f4 100644
--- a/drivers/isdn/hardware/eicon/divacapi.h
+++ b/drivers/isdn/hardware/eicon/divacapi.h
@@ -26,15 +26,7 @@
/*#define DEBUG */
-
-
-
-
-
-
-
-
-
+#include <linux/types.h>
#define IMPLEMENT_DTMF 1
#define IMPLEMENT_LINE_INTERCONNECT2 1
@@ -82,8 +74,6 @@
#define CODEC_PERMANENT 0x02
#define ADV_VOICE 0x03
#define MAX_CIP_TYPES 5 /* kind of CIP types for group optimization */
-#define C_IND_MASK_DWORDS ((MAX_APPL + 32) >> 5)
-
#define FAX_CONNECT_INFO_BUFFER_SIZE 256
#define NCPI_BUFFER_SIZE 256
@@ -265,8 +255,8 @@ struct _PLCI {
word ncci_ring_list;
byte inc_dis_ncci_table[MAX_CHANNELS_PER_PLCI];
t_std_internal_command internal_command_queue[MAX_INTERNAL_COMMAND_LEVELS];
- dword c_ind_mask_table[C_IND_MASK_DWORDS];
- dword group_optimization_mask_table[C_IND_MASK_DWORDS];
+ DECLARE_BITMAP(c_ind_mask_table, MAX_APPL);
+ DECLARE_BITMAP(group_optimization_mask_table, MAX_APPL);
byte RBuffer[200];
dword msg_in_queue[MSG_IN_QUEUE_SIZE/sizeof(dword)];
API_SAVE saved_msg;
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 8b7ad4f1ab01..b2023e08dcd2 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -110,7 +110,7 @@ typedef struct _diva_os_thread_dpc {
/*
This table should be sorted by PCI device ID
*/
-static struct pci_device_id divas_pci_tbl[] = {
+static const struct pci_device_id divas_pci_tbl[] = {
/* Diva Server BRI-2M PCI 0xE010 */
{ PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRA),
CARDTYPE_MAESTRA_PCI },
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 3b11422b1cce..eadd1ed1e014 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -23,9 +23,7 @@
*
*/
-
-
-
+#include <linux/bitmap.h>
#include "platform.h"
#include "di_defs.h"
@@ -35,19 +33,9 @@
#include "mdm_msg.h"
#include "divasync.h"
-
-
#define FILE_ "MESSAGE.C"
#define dprintf
-
-
-
-
-
-
-
-
/*------------------------------------------------------------------*/
/* This is options supported for all adapters that are server by */
/* XDI driver. Allo it is not necessary to ask it from every adapter*/
@@ -72,9 +60,6 @@ static dword diva_xdi_extended_features = 0;
/*------------------------------------------------------------------*/
static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci);
-static void set_group_ind_mask(PLCI *plci);
-static void clear_group_ind_mask_bit(PLCI *plci, word b);
-static byte test_group_ind_mask_bit(PLCI *plci, word b);
void AutomaticLaw(DIVA_CAPI_ADAPTER *);
word CapiRelease(word);
word CapiRegister(word);
@@ -1087,106 +1072,6 @@ static void plci_remove(PLCI *plci)
}
/*------------------------------------------------------------------*/
-/* Application Group function helpers */
-/*------------------------------------------------------------------*/
-
-static void set_group_ind_mask(PLCI *plci)
-{
- word i;
-
- for (i = 0; i < C_IND_MASK_DWORDS; i++)
- plci->group_optimization_mask_table[i] = 0xffffffffL;
-}
-
-static void clear_group_ind_mask_bit(PLCI *plci, word b)
-{
- plci->group_optimization_mask_table[b >> 5] &= ~(1L << (b & 0x1f));
-}
-
-static byte test_group_ind_mask_bit(PLCI *plci, word b)
-{
- return ((plci->group_optimization_mask_table[b >> 5] & (1L << (b & 0x1f))) != 0);
-}
-
-/*------------------------------------------------------------------*/
-/* c_ind_mask operations for arbitrary MAX_APPL */
-/*------------------------------------------------------------------*/
-
-static void clear_c_ind_mask(PLCI *plci)
-{
- word i;
-
- for (i = 0; i < C_IND_MASK_DWORDS; i++)
- plci->c_ind_mask_table[i] = 0;
-}
-
-static byte c_ind_mask_empty(PLCI *plci)
-{
- word i;
-
- i = 0;
- while ((i < C_IND_MASK_DWORDS) && (plci->c_ind_mask_table[i] == 0))
- i++;
- return (i == C_IND_MASK_DWORDS);
-}
-
-static void set_c_ind_mask_bit(PLCI *plci, word b)
-{
- plci->c_ind_mask_table[b >> 5] |= (1L << (b & 0x1f));
-}
-
-static void clear_c_ind_mask_bit(PLCI *plci, word b)
-{
- plci->c_ind_mask_table[b >> 5] &= ~(1L << (b & 0x1f));
-}
-
-static byte test_c_ind_mask_bit(PLCI *plci, word b)
-{
- return ((plci->c_ind_mask_table[b >> 5] & (1L << (b & 0x1f))) != 0);
-}
-
-static void dump_c_ind_mask(PLCI *plci)
-{
- word i, j, k;
- dword d;
- char *p;
- char buf[40];
-
- for (i = 0; i < C_IND_MASK_DWORDS; i += 4)
- {
- p = buf + 36;
- *p = '\0';
- for (j = 0; j < 4; j++)
- {
- if (i + j < C_IND_MASK_DWORDS)
- {
- d = plci->c_ind_mask_table[i + j];
- for (k = 0; k < 8; k++)
- {
- *(--p) = hex_asc_lo(d);
- d >>= 4;
- }
- }
- else if (i != 0)
- {
- for (k = 0; k < 8; k++)
- *(--p) = ' ';
- }
- *(--p) = ' ';
- }
- dbug(1, dprintf("c_ind_mask =%s", (char *) p));
- }
-}
-
-
-
-
-
-#define dump_plcis(a)
-
-
-
-/*------------------------------------------------------------------*/
/* translation function for each message */
/*------------------------------------------------------------------*/
@@ -1457,13 +1342,13 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
return 1;
}
else if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) {
- clear_c_ind_mask_bit(plci, (word)(appl->Id - 1));
- dump_c_ind_mask(plci);
+ __clear_bit(appl->Id - 1, plci->c_ind_mask_table);
+ dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table));
Reject = GET_WORD(parms[0].info);
dbug(1, dprintf("Reject=0x%x", Reject));
if (Reject)
{
- if (c_ind_mask_empty(plci))
+ if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL))
{
if ((Reject & 0xff00) == 0x3400)
{
@@ -1553,11 +1438,8 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
sig_req(plci, CALL_RES, 0);
}
- for (i = 0; i < max_appl; i++) {
- if (test_c_ind_mask_bit(plci, i)) {
- sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
- }
- }
+ for_each_set_bit(i, plci->c_ind_mask_table, max_appl)
+ sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
}
}
return 1;
@@ -1584,13 +1466,10 @@ static byte disconnect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
{
if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT)
{
- clear_c_ind_mask_bit(plci, (word)(appl->Id - 1));
+ __clear_bit(appl->Id - 1, plci->c_ind_mask_table);
plci->appl = appl;
- for (i = 0; i < max_appl; i++)
- {
- if (test_c_ind_mask_bit(plci, i))
- sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0);
- }
+ for_each_set_bit(i, plci->c_ind_mask_table, max_appl)
+ sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0);
plci->State = OUTG_DIS_PENDING;
}
if (plci->Sig.Id && plci->appl)
@@ -1634,7 +1513,7 @@ static byte disconnect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
{
/* clear ind mask bit, just in case of collsion of */
/* DISCONNECT_IND and CONNECT_RES */
- clear_c_ind_mask_bit(plci, (word)(appl->Id - 1));
+ __clear_bit(appl->Id - 1, plci->c_ind_mask_table);
ncci_free_receive_buffers(plci, 0);
if (plci_remove_check(plci))
{
@@ -1642,7 +1521,7 @@ static byte disconnect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
}
if (plci->State == INC_DIS_PENDING
|| plci->State == SUSPENDING) {
- if (c_ind_mask_empty(plci)) {
+ if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) {
if (plci->State != SUSPENDING) plci->State = IDLE;
dbug(1, dprintf("chs=%d", plci->channels));
if (!plci->channels) {
@@ -3351,13 +3230,11 @@ static byte select_b_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
}
plci->State = INC_CON_CONNECTED_ALERT;
plci->appl = appl;
- clear_c_ind_mask_bit(plci, (word)(appl->Id - 1));
- dump_c_ind_mask(plci);
- for (i = 0; i < max_appl; i++) /* disconnect the other appls */
- { /* its quasi a connect */
- if (test_c_ind_mask_bit(plci, i))
- sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
- }
+ __clear_bit(appl->Id - 1, plci->c_ind_mask_table);
+ dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table));
+ /* disconnect the other appls its quasi a connect */
+ for_each_set_bit(i, plci->c_ind_mask_table, max_appl)
+ sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
}
api_save_msg(msg, "s", &plci->saved_msg);
@@ -5692,19 +5569,17 @@ static void sig_ind(PLCI *plci)
cip = find_cip(a, parms[4], parms[6]);
cip_mask = 1L << cip;
dbug(1, dprintf("cip=%d,cip_mask=%lx", cip, cip_mask));
- clear_c_ind_mask(plci);
+ bitmap_zero(plci->c_ind_mask_table, MAX_APPL);
if (!remove_started && !a->adapter_disabled)
{
- set_c_ind_mask_bit(plci, MAX_APPL);
group_optimization(a, plci);
- for (i = 0; i < max_appl; i++) {
+ for_each_set_bit(i, plci->group_optimization_mask_table, max_appl) {
if (application[i].Id
&& (a->CIP_Mask[i] & 1 || a->CIP_Mask[i] & cip_mask)
- && CPN_filter_ok(parms[0], a, i)
- && test_group_ind_mask_bit(plci, i)) {
+ && CPN_filter_ok(parms[0], a, i)) {
dbug(1, dprintf("storedcip_mask[%d]=0x%lx", i, a->CIP_Mask[i]));
- set_c_ind_mask_bit(plci, i);
- dump_c_ind_mask(plci);
+ __set_bit(i, plci->c_ind_mask_table);
+ dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table));
plci->State = INC_CON_PENDING;
plci->call_dir = (plci->call_dir & ~(CALL_DIR_OUT | CALL_DIR_ORIGINATE)) |
CALL_DIR_IN | CALL_DIR_ANSWER;
@@ -5750,10 +5625,9 @@ static void sig_ind(PLCI *plci)
SendMultiIE(plci, Id, multi_pi_parms, PI, 0x210, true));
}
}
- clear_c_ind_mask_bit(plci, MAX_APPL);
- dump_c_ind_mask(plci);
+ dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table));
}
- if (c_ind_mask_empty(plci)) {
+ if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) {
sig_req(plci, HANGUP, 0);
send_req(plci);
plci->State = IDLE;
@@ -5994,13 +5868,13 @@ static void sig_ind(PLCI *plci)
break;
case RESUME:
- clear_c_ind_mask_bit(plci, (word)(plci->appl->Id - 1));
+ __clear_bit(plci->appl->Id - 1, plci->c_ind_mask_table);
PUT_WORD(&resume_cau[4], GOOD);
sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, resume_cau);
break;
case SUSPEND:
- clear_c_ind_mask(plci);
+ bitmap_zero(plci->c_ind_mask_table, MAX_APPL);
if (plci->NL.Id && !plci->nl_remove_id) {
mixer_remove(plci);
@@ -6037,15 +5911,12 @@ static void sig_ind(PLCI *plci)
if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT)
{
- for (i = 0; i < max_appl; i++)
- {
- if (test_c_ind_mask_bit(plci, i))
- sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0);
- }
+ for_each_set_bit(i, plci->c_ind_mask_table, max_appl)
+ sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0);
}
else
{
- clear_c_ind_mask(plci);
+ bitmap_zero(plci->c_ind_mask_table, MAX_APPL);
}
if (!plci->appl)
{
@@ -6055,7 +5926,7 @@ static void sig_ind(PLCI *plci)
a->listen_active--;
}
plci->State = INC_DIS_PENDING;
- if (c_ind_mask_empty(plci))
+ if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL))
{
plci->State = IDLE;
if (plci->NL.Id && !plci->nl_remove_id)
@@ -6341,14 +6212,10 @@ static void SendInfo(PLCI *plci, dword Id, byte **parms, byte iesent)
|| Info_Number == DSP
|| Info_Number == UUI)
{
- for (j = 0; j < max_appl; j++)
- {
- if (test_c_ind_mask_bit(plci, j))
- {
- dbug(1, dprintf("Ovl_Ind"));
- iesent = true;
- sendf(&application[j], _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
- }
+ for_each_set_bit(j, plci->c_ind_mask_table, max_appl) {
+ dbug(1, dprintf("Ovl_Ind"));
+ iesent = true;
+ sendf(&application[j], _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
}
}
} /* all other signalling states */
@@ -6416,14 +6283,10 @@ static byte SendMultiIE(PLCI *plci, dword Id, byte **parms, byte ie_type,
}
else if (!plci->appl && Info_Number)
{ /* overlap receiving broadcast */
- for (j = 0; j < max_appl; j++)
- {
- if (test_c_ind_mask_bit(plci, j))
- {
- iesent = true;
- dbug(1, dprintf("Mlt_Ovl_Ind"));
- sendf(&application[j] , _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
- }
+ for_each_set_bit(j, plci->c_ind_mask_table, max_appl) {
+ iesent = true;
+ dbug(1, dprintf("Mlt_Ovl_Ind"));
+ sendf(&application[j] , _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
}
} /* all other signalling states */
else if (Info_Number
@@ -7270,7 +7133,6 @@ static word get_plci(DIVA_CAPI_ADAPTER *a)
word i, j;
PLCI *plci;
- dump_plcis(a);
for (i = 0; i < a->max_plci && a->plci[i].Id; i++);
if (i == a->max_plci) {
dbug(1, dprintf("get_plci: out of PLCIs"));
@@ -7321,8 +7183,8 @@ static word get_plci(DIVA_CAPI_ADAPTER *a)
plci->ncci_ring_list = 0;
for (j = 0; j < MAX_CHANNELS_PER_PLCI; j++) plci->inc_dis_ncci_table[j] = 0;
- clear_c_ind_mask(plci);
- set_group_ind_mask(plci);
+ bitmap_zero(plci->c_ind_mask_table, MAX_APPL);
+ bitmap_fill(plci->group_optimization_mask_table, MAX_APPL);
plci->fax_connect_info_length = 0;
plci->nsf_control_bits = 0;
plci->ncpi_state = 0x00;
@@ -9373,10 +9235,10 @@ word CapiRelease(word Id)
if (plci->State == INC_CON_PENDING
|| plci->State == INC_CON_ALERT)
{
- if (test_c_ind_mask_bit(plci, (word)(Id - 1)))
+ if (test_bit(Id - 1, plci->c_ind_mask_table))
{
- clear_c_ind_mask_bit(plci, (word)(Id - 1));
- if (c_ind_mask_empty(plci))
+ __clear_bit(Id - 1, plci->c_ind_mask_table);
+ if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL))
{
sig_req(plci, HANGUP, 0);
send_req(plci);
@@ -9384,10 +9246,10 @@ word CapiRelease(word Id)
}
}
}
- if (test_c_ind_mask_bit(plci, (word)(Id - 1)))
+ if (test_bit(Id - 1, plci->c_ind_mask_table))
{
- clear_c_ind_mask_bit(plci, (word)(Id - 1));
- if (c_ind_mask_empty(plci))
+ __clear_bit(Id - 1, plci->c_ind_mask_table);
+ if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL))
{
if (!plci->appl)
{
@@ -9452,7 +9314,7 @@ word CapiRelease(word Id)
static word plci_remove_check(PLCI *plci)
{
if (!plci) return true;
- if (!plci->NL.Id && c_ind_mask_empty(plci))
+ if (!plci->NL.Id && bitmap_empty(plci->c_ind_mask_table, MAX_APPL))
{
if (plci->Sig.Id == 0xff)
plci->Sig.Id = 0;
@@ -14735,7 +14597,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci)
word appl_number_group_type[MAX_APPL];
PLCI *auxplci;
- set_group_ind_mask(plci); /* all APPLs within this inc. call are allowed to dial in */
+ /* all APPLs within this inc. call are allowed to dial in */
+ bitmap_fill(plci->group_optimization_mask_table, MAX_APPL);
if (!a->group_optimization_enabled)
{
@@ -14771,13 +14634,12 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci)
if (a->plci[k].Id)
{
auxplci = &a->plci[k];
- if (auxplci->appl == &application[i]) /* application has a busy PLCI */
- {
+ if (auxplci->appl == &application[i]) {
+ /* application has a busy PLCI */
busy = true;
dbug(1, dprintf("Appl 0x%x is busy", i + 1));
- }
- else if (test_c_ind_mask_bit(auxplci, i)) /* application has an incoming call pending */
- {
+ } else if (test_bit(i, plci->c_ind_mask_table)) {
+ /* application has an incoming call pending */
busy = true;
dbug(1, dprintf("Appl 0x%x has inc. call pending", i + 1));
}
@@ -14826,7 +14688,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci)
if (appl_number_group_type[i] == appl_number_group_type[j])
{
dbug(1, dprintf("Appl 0x%x is member of group 0x%x, no call", j + 1, appl_number_group_type[j]));
- clear_group_ind_mask_bit(plci, j); /* disable call on other group members */
+ /* disable call on other group members */
+ __clear_bit(j, plci->group_optimization_mask_table);
appl_number_group_type[j] = 0; /* remove disabled group member from group list */
}
}
@@ -14834,7 +14697,7 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci)
}
else /* application should not get a call */
{
- clear_group_ind_mask_bit(plci, i);
+ __clear_bit(i, plci->group_optimization_mask_table);
}
}
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index e3fa1cd64470..dce6632daae1 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -1142,7 +1142,7 @@ fritz_remove_pci(struct pci_dev *pdev)
pr_info("%s: drvdata already removed\n", __func__);
}
-static struct pci_device_id fcpci_ids[] = {
+static const struct pci_device_id fcpci_ids[] = {
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1, PCI_ANY_ID, PCI_ANY_ID,
0, 0, (unsigned long) "Fritz!Card PCI"},
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1_V2, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index aea0c9616ea5..3cf07b8ced1c 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -5348,7 +5348,7 @@ static const struct hm_map hfcm_map[] = {
#undef H
#define H(x) ((unsigned long)&hfcm_map[x])
-static struct pci_device_id hfmultipci_ids[] = {
+static const struct pci_device_id hfmultipci_ids[] = {
/* Cards with HFC-4S Chip */
{ PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 5dc246d71c16..d2e401a8090e 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2161,7 +2161,7 @@ static const struct _hfc_map hfc_map[] =
{},
};
-static struct pci_device_id hfc_ids[] =
+static const struct pci_device_id hfc_ids[] =
{
{ PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0),
(unsigned long) &hfc_map[0] },
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.h b/drivers/isdn/hardware/mISDN/hfcsusb.h
index 4157311d569d..5f8f1d9cac11 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.h
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.h
@@ -337,7 +337,7 @@ static const char *HFC_NT_LAYER1_STATES[HFC_MAX_NT_LAYER1_STATE + 1] = {
};
/* supported devices */
-static struct usb_device_id hfcsusb_idtab[] = {
+static const struct usb_device_id hfcsusb_idtab[] = {
{
USB_DEVICE(0x0959, 0x2bd0),
.driver_info = (unsigned long) &((struct hfcsusb_vdata)
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index afde4edef9ae..6a6d848bd18e 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -1137,7 +1137,7 @@ static void nj_remove(struct pci_dev *pdev)
/* We cannot select cards with PCI_SUB... IDs, since here are cards with
* SUB IDs set to PCI_ANY_ID, so we need to match all and reject
* known other cards which not work with this driver - see probe function */
-static struct pci_device_id nj_pci_ids[] = {
+static const struct pci_device_id nj_pci_ids[] = {
{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 3052c836b89f..d80072fef434 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -1398,7 +1398,7 @@ w6692_remove_pci(struct pci_dev *pdev)
pr_notice("%s: drvdata already removed\n", __func__);
}
-static struct pci_device_id w6692_ids[] = {
+static const struct pci_device_id w6692_ids[] = {
{ PCI_VENDOR_ID_DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, (ulong)&w6692_map[0]},
{ PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692,
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index c7d68675b028..7108bdb8742e 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1909,7 +1909,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
#ifdef CONFIG_PCI
#include <linux/pci.h>
-static struct pci_device_id hisax_pci_tbl[] __used = {
+static const struct pci_device_id hisax_pci_tbl[] __used = {
#ifdef CONFIG_HISAX_FRITZPCI
{PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) },
#endif
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 90f051ce0259..9090cc1e1f29 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -86,7 +86,7 @@ typedef struct {
char *device_name;
} hfc4s8s_param;
-static struct pci_device_id hfc4s8s_ids[] = {
+static const struct pci_device_id hfc4s8s_ids[] = {
{.vendor = PCI_VENDOR_ID_CCD,
.device = PCI_DEVICE_ID_4S,
.subvendor = 0x1397,
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index ef4748083efd..e8212185d386 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -65,7 +65,7 @@ typedef struct {
} hfcsusb_vdata;
/* VID/PID device list */
-static struct usb_device_id hfcusb_idtab[] = {
+static const struct usb_device_id hfcusb_idtab[] = {
{
USB_DEVICE(0x0959, 0x2bd0),
.driver_info = (unsigned long) &((hfcsusb_vdata)
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 5a9f39ed1d5d..e4f7573ba9bf 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -52,7 +52,7 @@ module_param(debug, int, 0);
MODULE_AUTHOR("Kai Germaschewski <kai.germaschewski@gmx.de>/Karsten Keil <kkeil@suse.de>");
MODULE_DESCRIPTION("AVM Fritz!PCI/PnP ISDN driver");
-static struct pci_device_id fcpci_ids[] = {
+static const struct pci_device_id fcpci_ids[] = {
{ .vendor = PCI_VENDOR_ID_AVM,
.device = PCI_DEVICE_ID_AVM_A1,
.subvendor = PCI_ANY_ID,
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index 7b5fd8fb1761..aaca0b3d662e 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -44,7 +44,6 @@ struct procdata {
char log_name[15]; /* log filename */
struct log_data *log_head, *log_tail; /* head and tail for queue */
int if_used; /* open count for interface */
- int volatile del_lock; /* lock for delete operations */
unsigned char logtmp[LOG_MAX_LINELEN];
wait_queue_head_t rd_queue;
};
@@ -102,7 +101,6 @@ put_log_buffer(hysdn_card *card, char *cp)
{
struct log_data *ib;
struct procdata *pd = card->proclog;
- int i;
unsigned long flags;
if (!pd)
@@ -126,21 +124,21 @@ put_log_buffer(hysdn_card *card, char *cp)
else
pd->log_tail->next = ib; /* follows existing messages */
pd->log_tail = ib; /* new tail */
- i = pd->del_lock++; /* get lock state */
- spin_unlock_irqrestore(&card->hysdn_lock, flags);
/* delete old entrys */
- if (!i)
- while (pd->log_head->next) {
- if ((pd->log_head->usage_cnt <= 0) &&
- (pd->log_head->next->usage_cnt <= 0)) {
- ib = pd->log_head;
- pd->log_head = pd->log_head->next;
- kfree(ib);
- } else
- break;
- } /* pd->log_head->next */
- pd->del_lock--; /* release lock level */
+ while (pd->log_head->next) {
+ if ((pd->log_head->usage_cnt <= 0) &&
+ (pd->log_head->next->usage_cnt <= 0)) {
+ ib = pd->log_head;
+ pd->log_head = pd->log_head->next;
+ kfree(ib);
+ } else {
+ break;
+ }
+ } /* pd->log_head->next */
+
+ spin_unlock_irqrestore(&card->hysdn_lock, flags);
+
wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */
} /* put_log_buffer */
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 89b09c51ab7c..38a5bb764c7b 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1376,6 +1376,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
if (arg) {
if (copy_from_user(bname, argp, sizeof(bname) - 1))
return -EFAULT;
+ bname[sizeof(bname)-1] = 0;
} else
return -EINVAL;
ret = mutex_lock_interruptible(&dev->mtx);
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index c151c6daa67e..f63a110b7bcb 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2611,10 +2611,9 @@ isdn_net_newslave(char *parm)
char newname[10];
if (p) {
- /* Slave-Name MUST not be empty */
- if (!strlen(p + 1))
+ /* Slave-Name MUST not be empty or overflow 'newname' */
+ if (strscpy(newname, p + 1, sizeof(newname)) <= 0)
return NULL;
- strcpy(newname, p + 1);
*p = 0;
/* Master must already exist */
if (!(n = isdn_net_findif(parm)))
diff --git a/drivers/isdn/mISDN/fsm.c b/drivers/isdn/mISDN/fsm.c
index 78fc5d5e9051..92e6570b1143 100644
--- a/drivers/isdn/mISDN/fsm.c
+++ b/drivers/isdn/mISDN/fsm.c
@@ -26,7 +26,7 @@
#define FSM_TIMER_DEBUG 0
-void
+int
mISDN_FsmNew(struct Fsm *fsm,
struct FsmNode *fnlist, int fncount)
{
@@ -34,6 +34,8 @@ mISDN_FsmNew(struct Fsm *fsm,
fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count *
fsm->event_count, GFP_KERNEL);
+ if (fsm->jumpmatrix == NULL)
+ return -ENOMEM;
for (i = 0; i < fncount; i++)
if ((fnlist[i].state >= fsm->state_count) ||
@@ -45,6 +47,7 @@ mISDN_FsmNew(struct Fsm *fsm,
} else
fsm->jumpmatrix[fsm->state_count * fnlist[i].event +
fnlist[i].state] = (FSMFNPTR) fnlist[i].routine;
+ return 0;
}
EXPORT_SYMBOL(mISDN_FsmNew);
diff --git a/drivers/isdn/mISDN/fsm.h b/drivers/isdn/mISDN/fsm.h
index 928f5be192c1..e1def8490221 100644
--- a/drivers/isdn/mISDN/fsm.h
+++ b/drivers/isdn/mISDN/fsm.h
@@ -55,7 +55,7 @@ struct FsmTimer {
void *arg;
};
-extern void mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
+extern int mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
extern void mISDN_FsmFree(struct Fsm *);
extern int mISDN_FsmEvent(struct FsmInst *, int , void *);
extern void mISDN_FsmChangeState(struct FsmInst *, int);
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
index bebc57b72138..3192b0eb3944 100644
--- a/drivers/isdn/mISDN/layer1.c
+++ b/drivers/isdn/mISDN/layer1.c
@@ -414,8 +414,7 @@ l1_init(u_int *deb)
l1fsm_s.event_count = L1_EVENT_COUNT;
l1fsm_s.strEvent = strL1Event;
l1fsm_s.strState = strL1SState;
- mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
- return 0;
+ return mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
}
void
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 7243a6746f8b..9ff0903a0e89 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -2247,15 +2247,26 @@ static struct Bprotocol X75SLP = {
int
Isdnl2_Init(u_int *deb)
{
+ int res;
debug = deb;
mISDN_register_Bprotocol(&X75SLP);
l2fsm.state_count = L2_STATE_COUNT;
l2fsm.event_count = L2_EVENT_COUNT;
l2fsm.strEvent = strL2Event;
l2fsm.strState = strL2State;
- mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
- TEIInit(deb);
+ res = mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
+ if (res)
+ goto error;
+ res = TEIInit(deb);
+ if (res)
+ goto error_fsm;
return 0;
+
+error_fsm:
+ mISDN_FsmFree(&l2fsm);
+error:
+ mISDN_unregister_Bprotocol(&X75SLP);
+ return res;
}
void
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index 908127efccf8..12d9e5f4beb1 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -1387,23 +1387,37 @@ create_teimanager(struct mISDNdevice *dev)
int TEIInit(u_int *deb)
{
+ int res;
debug = deb;
teifsmu.state_count = TEI_STATE_COUNT;
teifsmu.event_count = TEI_EVENT_COUNT;
teifsmu.strEvent = strTeiEvent;
teifsmu.strState = strTeiState;
- mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
+ res = mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
+ if (res)
+ goto error;
teifsmn.state_count = TEI_STATE_COUNT;
teifsmn.event_count = TEI_EVENT_COUNT;
teifsmn.strEvent = strTeiEvent;
teifsmn.strState = strTeiState;
- mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
+ res = mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
+ if (res)
+ goto error_smn;
deactfsm.state_count = DEACT_STATE_COUNT;
deactfsm.event_count = DEACT_EVENT_COUNT;
deactfsm.strEvent = strDeactEvent;
deactfsm.strState = strDeactState;
- mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
+ res = mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
+ if (res)
+ goto error_deact;
return 0;
+
+error_deact:
+ mISDN_FsmFree(&teifsmn);
+error_smn:
+ mISDN_FsmFree(&teifsmu);
+error:
+ return res;
}
void TEIFree(void)
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 594b24d410c3..27dc8002b121 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -58,6 +58,15 @@ config LEDS_AAT1290
help
This option enables support for the LEDs on the AAT1290.
+config LEDS_AS3645A
+ tristate "AS3645A LED flash controller support"
+ depends on I2C && LEDS_CLASS_FLASH
+ depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
+ help
+ Enable LED flash class support for AS3645A LED flash
+ controller. V4L2 flash API is provided as well if
+ CONFIG_V4L2_FLASH_API is enabled.
+
config LEDS_BCM6328
tristate "LED Support for Broadcom BCM6328"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 909dae62ba05..7d7b26552923 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
# LED Platform Drivers
obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o
+obj-$(CONFIG_LEDS_AS3645A) += leds-as3645a.o
obj-$(CONFIG_LEDS_BCM6328) += leds-bcm6328.o
obj-$(CONFIG_LEDS_BCM6358) += leds-bcm6358.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
diff --git a/drivers/leds/leds-aat1290.c b/drivers/leds/leds-aat1290.c
index a21e19297745..424898e6c69d 100644
--- a/drivers/leds/leds-aat1290.c
+++ b/drivers/leds/leds-aat1290.c
@@ -432,7 +432,7 @@ static void aat1290_init_v4l2_flash_config(struct aat1290_led *led,
strlcpy(v4l2_sd_cfg->dev_name, led_cdev->name,
sizeof(v4l2_sd_cfg->dev_name));
- s = &v4l2_sd_cfg->torch_intensity;
+ s = &v4l2_sd_cfg->intensity;
s->min = led->mm_current_scale[0];
s->max = led_cfg->max_mm_current;
s->step = 1;
@@ -504,7 +504,7 @@ static int aat1290_led_probe(struct platform_device *pdev)
/* Create V4L2 Flash subdev. */
led->v4l2_flash = v4l2_flash_init(dev, of_fwnode_handle(sub_node),
- fled_cdev, NULL, &v4l2_flash_ops,
+ fled_cdev, &v4l2_flash_ops,
&v4l2_sd_cfg);
if (IS_ERR(led->v4l2_flash)) {
ret = PTR_ERR(led->v4l2_flash);
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
new file mode 100644
index 000000000000..bbbbe0898233
--- /dev/null
+++ b/drivers/leds/leds-as3645a.c
@@ -0,0 +1,763 @@
+/*
+ * drivers/leds/leds-as3645a.c - AS3645A and LM3555 flash controllers driver
+ *
+ * Copyright (C) 2008-2011 Nokia Corporation
+ * Copyright (c) 2011, 2017 Intel Corporation.
+ *
+ * Based on drivers/media/i2c/as3645a.c.
+ *
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/led-class-flash.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-flash-led-class.h>
+
+#define AS_TIMER_US_TO_CODE(t) (((t) / 1000 - 100) / 50)
+#define AS_TIMER_CODE_TO_US(c) ((50 * (c) + 100) * 1000)
+
+/* Register definitions */
+
+/* Read-only Design info register: Reset state: xxxx 0001 */
+#define AS_DESIGN_INFO_REG 0x00
+#define AS_DESIGN_INFO_FACTORY(x) (((x) >> 4))
+#define AS_DESIGN_INFO_MODEL(x) ((x) & 0x0f)
+
+/* Read-only Version control register: Reset state: 0000 0000
+ * for first engineering samples
+ */
+#define AS_VERSION_CONTROL_REG 0x01
+#define AS_VERSION_CONTROL_RFU(x) (((x) >> 4))
+#define AS_VERSION_CONTROL_VERSION(x) ((x) & 0x0f)
+
+/* Read / Write (Indicator and timer register): Reset state: 0000 1111 */
+#define AS_INDICATOR_AND_TIMER_REG 0x02
+#define AS_INDICATOR_AND_TIMER_TIMEOUT_SHIFT 0
+#define AS_INDICATOR_AND_TIMER_VREF_SHIFT 4
+#define AS_INDICATOR_AND_TIMER_INDICATOR_SHIFT 6
+
+/* Read / Write (Current set register): Reset state: 0110 1001 */
+#define AS_CURRENT_SET_REG 0x03
+#define AS_CURRENT_ASSIST_LIGHT_SHIFT 0
+#define AS_CURRENT_LED_DET_ON (1 << 3)
+#define AS_CURRENT_FLASH_CURRENT_SHIFT 4
+
+/* Read / Write (Control register): Reset state: 1011 0100 */
+#define AS_CONTROL_REG 0x04
+#define AS_CONTROL_MODE_SETTING_SHIFT 0
+#define AS_CONTROL_STROBE_ON (1 << 2)
+#define AS_CONTROL_OUT_ON (1 << 3)
+#define AS_CONTROL_EXT_TORCH_ON (1 << 4)
+#define AS_CONTROL_STROBE_TYPE_EDGE (0 << 5)
+#define AS_CONTROL_STROBE_TYPE_LEVEL (1 << 5)
+#define AS_CONTROL_COIL_PEAK_SHIFT 6
+
+/* Read only (D3 is read / write) (Fault and info): Reset state: 0000 x000 */
+#define AS_FAULT_INFO_REG 0x05
+#define AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT (1 << 1)
+#define AS_FAULT_INFO_INDICATOR_LED (1 << 2)
+#define AS_FAULT_INFO_LED_AMOUNT (1 << 3)
+#define AS_FAULT_INFO_TIMEOUT (1 << 4)
+#define AS_FAULT_INFO_OVER_TEMPERATURE (1 << 5)
+#define AS_FAULT_INFO_SHORT_CIRCUIT (1 << 6)
+#define AS_FAULT_INFO_OVER_VOLTAGE (1 << 7)
+
+/* Boost register */
+#define AS_BOOST_REG 0x0d
+#define AS_BOOST_CURRENT_DISABLE (0 << 0)
+#define AS_BOOST_CURRENT_ENABLE (1 << 0)
+
+/* Password register is used to unlock boost register writing */
+#define AS_PASSWORD_REG 0x0f
+#define AS_PASSWORD_UNLOCK_VALUE 0x55
+
+#define AS_NAME "as3645a"
+#define AS_I2C_ADDR (0x60 >> 1) /* W:0x60, R:0x61 */
+
+#define AS_FLASH_TIMEOUT_MIN 100000 /* us */
+#define AS_FLASH_TIMEOUT_MAX 850000
+#define AS_FLASH_TIMEOUT_STEP 50000
+
+#define AS_FLASH_INTENSITY_MIN 200000 /* uA */
+#define AS_FLASH_INTENSITY_MAX_1LED 500000
+#define AS_FLASH_INTENSITY_MAX_2LEDS 400000
+#define AS_FLASH_INTENSITY_STEP 20000
+
+#define AS_TORCH_INTENSITY_MIN 20000 /* uA */
+#define AS_TORCH_INTENSITY_MAX 160000
+#define AS_TORCH_INTENSITY_STEP 20000
+
+#define AS_INDICATOR_INTENSITY_MIN 0 /* uA */
+#define AS_INDICATOR_INTENSITY_MAX 10000
+#define AS_INDICATOR_INTENSITY_STEP 2500
+
+#define AS_PEAK_mA_MAX 2000
+#define AS_PEAK_mA_TO_REG(a) \
+ ((min_t(u32, AS_PEAK_mA_MAX, a) - 1250) / 250)
+
+enum as_mode {
+ AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT,
+ AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT,
+ AS_MODE_ASSIST = 2 << AS_CONTROL_MODE_SETTING_SHIFT,
+ AS_MODE_FLASH = 3 << AS_CONTROL_MODE_SETTING_SHIFT,
+};
+
+struct as3645a_config {
+ u32 flash_timeout_us;
+ u32 flash_max_ua;
+ u32 assist_max_ua;
+ u32 indicator_max_ua;
+ u32 voltage_reference;
+ u32 peak;
+};
+
+struct as3645a_names {
+ char flash[32];
+ char indicator[32];
+};
+
+struct as3645a {
+ struct i2c_client *client;
+
+ struct mutex mutex;
+
+ struct led_classdev_flash fled;
+ struct led_classdev iled_cdev;
+
+ struct v4l2_flash *vf;
+ struct v4l2_flash *vfind;
+
+ struct device_node *flash_node;
+ struct device_node *indicator_node;
+
+ struct as3645a_config cfg;
+
+ enum as_mode mode;
+ unsigned int timeout;
+ unsigned int flash_current;
+ unsigned int assist_current;
+ unsigned int indicator_current;
+ enum v4l2_flash_strobe_source strobe_source;
+};
+
+#define fled_to_as3645a(__fled) container_of(__fled, struct as3645a, fled)
+#define iled_cdev_to_as3645a(__iled_cdev) \
+ container_of(__iled_cdev, struct as3645a, iled_cdev)
+
+/* Return negative errno else zero on success */
+static int as3645a_write(struct as3645a *flash, u8 addr, u8 val)
+{
+ struct i2c_client *client = flash->client;
+ int rval;
+
+ rval = i2c_smbus_write_byte_data(client, addr, val);
+
+ dev_dbg(&client->dev, "Write Addr:%02X Val:%02X %s\n", addr, val,
+ rval < 0 ? "fail" : "ok");
+
+ return rval;
+}
+
+/* Return negative errno else a data byte received from the device. */
+static int as3645a_read(struct as3645a *flash, u8 addr)
+{
+ struct i2c_client *client = flash->client;
+ int rval;
+
+ rval = i2c_smbus_read_byte_data(client, addr);
+
+ dev_dbg(&client->dev, "Read Addr:%02X Val:%02X %s\n", addr, rval,
+ rval < 0 ? "fail" : "ok");
+
+ return rval;
+}
+
+/* -----------------------------------------------------------------------------
+ * Hardware configuration and trigger
+ */
+
+/**
+ * as3645a_set_config - Set flash configuration registers
+ * @flash: The flash
+ *
+ * Configure the hardware with flash, assist and indicator currents, as well as
+ * flash timeout.
+ *
+ * Return 0 on success, or a negative error code if an I2C communication error
+ * occurred.
+ */
+static int as3645a_set_current(struct as3645a *flash)
+{
+ u8 val;
+
+ val = (flash->flash_current << AS_CURRENT_FLASH_CURRENT_SHIFT)
+ | (flash->assist_current << AS_CURRENT_ASSIST_LIGHT_SHIFT)
+ | AS_CURRENT_LED_DET_ON;
+
+ return as3645a_write(flash, AS_CURRENT_SET_REG, val);
+}
+
+static int as3645a_set_timeout(struct as3645a *flash)
+{
+ u8 val;
+
+ val = flash->timeout << AS_INDICATOR_AND_TIMER_TIMEOUT_SHIFT;
+
+ val |= (flash->cfg.voltage_reference
+ << AS_INDICATOR_AND_TIMER_VREF_SHIFT)
+ | ((flash->indicator_current ? flash->indicator_current - 1 : 0)
+ << AS_INDICATOR_AND_TIMER_INDICATOR_SHIFT);
+
+ return as3645a_write(flash, AS_INDICATOR_AND_TIMER_REG, val);
+}
+
+/**
+ * as3645a_set_control - Set flash control register
+ * @flash: The flash
+ * @mode: Desired output mode
+ * @on: Desired output state
+ *
+ * Configure the hardware with output mode and state.
+ *
+ * Return 0 on success, or a negative error code if an I2C communication error
+ * occurred.
+ */
+static int
+as3645a_set_control(struct as3645a *flash, enum as_mode mode, bool on)
+{
+ u8 reg;
+
+ /* Configure output parameters and operation mode. */
+ reg = (flash->cfg.peak << AS_CONTROL_COIL_PEAK_SHIFT)
+ | (on ? AS_CONTROL_OUT_ON : 0)
+ | mode;
+
+ if (mode == AS_MODE_FLASH &&
+ flash->strobe_source == V4L2_FLASH_STROBE_SOURCE_EXTERNAL)
+ reg |= AS_CONTROL_STROBE_TYPE_LEVEL
+ | AS_CONTROL_STROBE_ON;
+
+ return as3645a_write(flash, AS_CONTROL_REG, reg);
+}
+
+static int as3645a_get_fault(struct led_classdev_flash *fled, u32 *fault)
+{
+ struct as3645a *flash = fled_to_as3645a(fled);
+ int rval;
+
+ /* NOTE: reading register clears fault status */
+ rval = as3645a_read(flash, AS_FAULT_INFO_REG);
+ if (rval < 0)
+ return rval;
+
+ if (rval & AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT)
+ *fault |= LED_FAULT_OVER_CURRENT;
+
+ if (rval & AS_FAULT_INFO_INDICATOR_LED)
+ *fault |= LED_FAULT_INDICATOR;
+
+ dev_dbg(&flash->client->dev, "%u connected LEDs\n",
+ rval & AS_FAULT_INFO_LED_AMOUNT ? 2 : 1);
+
+ if (rval & AS_FAULT_INFO_TIMEOUT)
+ *fault |= LED_FAULT_TIMEOUT;
+
+ if (rval & AS_FAULT_INFO_OVER_TEMPERATURE)
+ *fault |= LED_FAULT_OVER_TEMPERATURE;
+
+ if (rval & AS_FAULT_INFO_SHORT_CIRCUIT)
+ *fault |= LED_FAULT_OVER_CURRENT;
+
+ if (rval & AS_FAULT_INFO_OVER_VOLTAGE)
+ *fault |= LED_FAULT_INPUT_VOLTAGE;
+
+ return rval;
+}
+
+static unsigned int __as3645a_current_to_reg(unsigned int min, unsigned int max,
+ unsigned int step,
+ unsigned int val)
+{
+ if (val < min)
+ val = min;
+
+ if (val > max)
+ val = max;
+
+ return (val - min) / step;
+}
+
+static unsigned int as3645a_current_to_reg(struct as3645a *flash, bool is_flash,
+ unsigned int ua)
+{
+ if (is_flash)
+ return __as3645a_current_to_reg(AS_TORCH_INTENSITY_MIN,
+ flash->cfg.assist_max_ua,
+ AS_TORCH_INTENSITY_STEP, ua);
+ else
+ return __as3645a_current_to_reg(AS_FLASH_INTENSITY_MIN,
+ flash->cfg.flash_max_ua,
+ AS_FLASH_INTENSITY_STEP, ua);
+}
+
+static int as3645a_set_indicator_brightness(struct led_classdev *iled_cdev,
+ enum led_brightness brightness)
+{
+ struct as3645a *flash = iled_cdev_to_as3645a(iled_cdev);
+ int rval;
+
+ flash->indicator_current = brightness;
+
+ rval = as3645a_set_timeout(flash);
+ if (rval)
+ return rval;
+
+ return as3645a_set_control(flash, AS_MODE_INDICATOR, brightness);
+}
+
+static int as3645a_set_assist_brightness(struct led_classdev *fled_cdev,
+ enum led_brightness brightness)
+{
+ struct led_classdev_flash *fled = lcdev_to_flcdev(fled_cdev);
+ struct as3645a *flash = fled_to_as3645a(fled);
+ int rval;
+
+ if (brightness) {
+ /* Register value 0 is 20 mA. */
+ flash->assist_current = brightness - 1;
+
+ rval = as3645a_set_current(flash);
+ if (rval)
+ return rval;
+ }
+
+ return as3645a_set_control(flash, AS_MODE_ASSIST, brightness);
+}
+
+static int as3645a_set_flash_brightness(struct led_classdev_flash *fled,
+ u32 brightness_ua)
+{
+ struct as3645a *flash = fled_to_as3645a(fled);
+
+ flash->flash_current = as3645a_current_to_reg(flash, true, brightness_ua);
+
+ return as3645a_set_current(flash);
+}
+
+static int as3645a_set_flash_timeout(struct led_classdev_flash *fled,
+ u32 timeout_us)
+{
+ struct as3645a *flash = fled_to_as3645a(fled);
+
+ flash->timeout = AS_TIMER_US_TO_CODE(timeout_us);
+
+ return as3645a_set_timeout(flash);
+}
+
+static int as3645a_set_strobe(struct led_classdev_flash *fled, bool state)
+{
+ struct as3645a *flash = fled_to_as3645a(fled);
+
+ return as3645a_set_control(flash, AS_MODE_FLASH, state);
+}
+
+static const struct led_flash_ops as3645a_led_flash_ops = {
+ .flash_brightness_set = as3645a_set_flash_brightness,
+ .timeout_set = as3645a_set_flash_timeout,
+ .strobe_set = as3645a_set_strobe,
+ .fault_get = as3645a_get_fault,
+};
+
+static int as3645a_setup(struct as3645a *flash)
+{
+ struct device *dev = &flash->client->dev;
+ u32 fault = 0;
+ int rval;
+
+ /* clear errors */
+ rval = as3645a_read(flash, AS_FAULT_INFO_REG);
+ if (rval < 0)
+ return rval;
+
+ dev_dbg(dev, "Fault info: %02x\n", rval);
+
+ rval = as3645a_set_current(flash);
+ if (rval < 0)
+ return rval;
+
+ rval = as3645a_set_timeout(flash);
+ if (rval < 0)
+ return rval;
+
+ rval = as3645a_set_control(flash, AS_MODE_INDICATOR, false);
+ if (rval < 0)
+ return rval;
+
+ /* read status */
+ rval = as3645a_get_fault(&flash->fled, &fault);
+ if (rval < 0)
+ return rval;
+
+ dev_dbg(dev, "AS_INDICATOR_AND_TIMER_REG: %02x\n",
+ as3645a_read(flash, AS_INDICATOR_AND_TIMER_REG));
+ dev_dbg(dev, "AS_CURRENT_SET_REG: %02x\n",
+ as3645a_read(flash, AS_CURRENT_SET_REG));
+ dev_dbg(dev, "AS_CONTROL_REG: %02x\n",
+ as3645a_read(flash, AS_CONTROL_REG));
+
+ return rval & ~AS_FAULT_INFO_LED_AMOUNT ? -EIO : 0;
+}
+
+static int as3645a_detect(struct as3645a *flash)
+{
+ struct device *dev = &flash->client->dev;
+ int rval, man, model, rfu, version;
+ const char *vendor;
+
+ rval = as3645a_read(flash, AS_DESIGN_INFO_REG);
+ if (rval < 0) {
+ dev_err(dev, "can't read design info reg\n");
+ return rval;
+ }
+
+ man = AS_DESIGN_INFO_FACTORY(rval);
+ model = AS_DESIGN_INFO_MODEL(rval);
+
+ rval = as3645a_read(flash, AS_VERSION_CONTROL_REG);
+ if (rval < 0) {
+ dev_err(dev, "can't read version control reg\n");
+ return rval;
+ }
+
+ rfu = AS_VERSION_CONTROL_RFU(rval);
+ version = AS_VERSION_CONTROL_VERSION(rval);
+
+ /* Verify the chip model and version. */
+ if (model != 0x01 || rfu != 0x00) {
+ dev_err(dev, "AS3645A not detected "
+ "(model %d rfu %d)\n", model, rfu);
+ return -ENODEV;
+ }
+
+ switch (man) {
+ case 1:
+ vendor = "AMS, Austria Micro Systems";
+ break;
+ case 2:
+ vendor = "ADI, Analog Devices Inc.";
+ break;
+ case 3:
+ vendor = "NSC, National Semiconductor";
+ break;
+ case 4:
+ vendor = "NXP";
+ break;
+ case 5:
+ vendor = "TI, Texas Instrument";
+ break;
+ default:
+ vendor = "Unknown";
+ }
+
+ dev_info(dev, "Chip vendor: %s (%d) Version: %d\n", vendor,
+ man, version);
+
+ rval = as3645a_write(flash, AS_PASSWORD_REG, AS_PASSWORD_UNLOCK_VALUE);
+ if (rval < 0)
+ return rval;
+
+ return as3645a_write(flash, AS_BOOST_REG, AS_BOOST_CURRENT_DISABLE);
+}
+
+static int as3645a_parse_node(struct as3645a *flash,
+ struct as3645a_names *names,
+ struct device_node *node)
+{
+ struct as3645a_config *cfg = &flash->cfg;
+ const char *name;
+ int rval;
+
+ flash->flash_node = of_get_child_by_name(node, "flash");
+ if (!flash->flash_node) {
+ dev_err(&flash->client->dev, "can't find flash node\n");
+ return -ENODEV;
+ }
+
+ rval = of_property_read_string(flash->flash_node, "label", &name);
+ if (!rval)
+ strlcpy(names->flash, name, sizeof(names->flash));
+ else
+ snprintf(names->flash, sizeof(names->flash),
+ "%s:flash", node->name);
+
+ rval = of_property_read_u32(flash->flash_node, "flash-timeout-us",
+ &cfg->flash_timeout_us);
+ if (rval < 0) {
+ dev_err(&flash->client->dev,
+ "can't read flash-timeout-us property for flash\n");
+ goto out_err;
+ }
+
+ rval = of_property_read_u32(flash->flash_node, "flash-max-microamp",
+ &cfg->flash_max_ua);
+ if (rval < 0) {
+ dev_err(&flash->client->dev,
+ "can't read flash-max-microamp property for flash\n");
+ goto out_err;
+ }
+
+ rval = of_property_read_u32(flash->flash_node, "led-max-microamp",
+ &cfg->assist_max_ua);
+ if (rval < 0) {
+ dev_err(&flash->client->dev,
+ "can't read led-max-microamp property for flash\n");
+ goto out_err;
+ }
+
+ of_property_read_u32(flash->flash_node, "voltage-reference",
+ &cfg->voltage_reference);
+
+ of_property_read_u32(flash->flash_node, "peak-current-limit",
+ &cfg->peak);
+ cfg->peak = AS_PEAK_mA_TO_REG(cfg->peak);
+
+ flash->indicator_node = of_get_child_by_name(node, "indicator");
+ if (!flash->indicator_node) {
+ dev_warn(&flash->client->dev,
+ "can't find indicator node\n");
+ goto out_err;
+ }
+
+ rval = of_property_read_string(flash->indicator_node, "label", &name);
+ if (!rval)
+ strlcpy(names->indicator, name, sizeof(names->indicator));
+ else
+ snprintf(names->indicator, sizeof(names->indicator),
+ "%s:indicator", node->name);
+
+ rval = of_property_read_u32(flash->indicator_node, "led-max-microamp",
+ &cfg->indicator_max_ua);
+ if (rval < 0) {
+ dev_err(&flash->client->dev,
+ "can't read led-max-microamp property for indicator\n");
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ of_node_put(flash->flash_node);
+ of_node_put(flash->indicator_node);
+
+ return rval;
+}
+
+static int as3645a_led_class_setup(struct as3645a *flash,
+ struct as3645a_names *names)
+{
+ struct led_classdev *fled_cdev = &flash->fled.led_cdev;
+ struct led_classdev *iled_cdev = &flash->iled_cdev;
+ struct led_flash_setting *cfg;
+ int rval;
+
+ iled_cdev->name = names->indicator;
+ iled_cdev->brightness_set_blocking = as3645a_set_indicator_brightness;
+ iled_cdev->max_brightness =
+ flash->cfg.indicator_max_ua / AS_INDICATOR_INTENSITY_STEP;
+ iled_cdev->flags = LED_CORE_SUSPENDRESUME;
+
+ rval = led_classdev_register(&flash->client->dev, iled_cdev);
+ if (rval < 0)
+ return rval;
+
+ cfg = &flash->fled.brightness;
+ cfg->min = AS_FLASH_INTENSITY_MIN;
+ cfg->max = flash->cfg.flash_max_ua;
+ cfg->step = AS_FLASH_INTENSITY_STEP;
+ cfg->val = flash->cfg.flash_max_ua;
+
+ cfg = &flash->fled.timeout;
+ cfg->min = AS_FLASH_TIMEOUT_MIN;
+ cfg->max = flash->cfg.flash_timeout_us;
+ cfg->step = AS_FLASH_TIMEOUT_STEP;
+ cfg->val = flash->cfg.flash_timeout_us;
+
+ flash->fled.ops = &as3645a_led_flash_ops;
+
+ fled_cdev->name = names->flash;
+ fled_cdev->brightness_set_blocking = as3645a_set_assist_brightness;
+ /* Value 0 is off in LED class. */
+ fled_cdev->max_brightness =
+ as3645a_current_to_reg(flash, false,
+ flash->cfg.assist_max_ua) + 1;
+ fled_cdev->flags = LED_DEV_CAP_FLASH | LED_CORE_SUSPENDRESUME;
+
+ rval = led_classdev_flash_register(&flash->client->dev, &flash->fled);
+ if (rval) {
+ led_classdev_unregister(iled_cdev);
+ dev_err(&flash->client->dev,
+ "led_classdev_flash_register() failed, error %d\n",
+ rval);
+ }
+
+ return rval;
+}
+
+static int as3645a_v4l2_setup(struct as3645a *flash)
+{
+ struct led_classdev_flash *fled = &flash->fled;
+ struct led_classdev *led = &fled->led_cdev;
+ struct v4l2_flash_config cfg = {
+ .intensity = {
+ .min = AS_TORCH_INTENSITY_MIN,
+ .max = flash->cfg.assist_max_ua,
+ .step = AS_TORCH_INTENSITY_STEP,
+ .val = flash->cfg.assist_max_ua,
+ },
+ };
+ struct v4l2_flash_config cfgind = {
+ .intensity = {
+ .min = AS_INDICATOR_INTENSITY_MIN,
+ .max = flash->cfg.indicator_max_ua,
+ .step = AS_INDICATOR_INTENSITY_STEP,
+ .val = flash->cfg.indicator_max_ua,
+ },
+ };
+
+ strlcpy(cfg.dev_name, led->name, sizeof(cfg.dev_name));
+ strlcpy(cfgind.dev_name, flash->iled_cdev.name, sizeof(cfg.dev_name));
+
+ flash->vf = v4l2_flash_init(
+ &flash->client->dev, of_fwnode_handle(flash->flash_node),
+ &flash->fled, NULL, &cfg);
+ if (IS_ERR(flash->vf))
+ return PTR_ERR(flash->vf);
+
+ flash->vfind = v4l2_flash_indicator_init(
+ &flash->client->dev, of_fwnode_handle(flash->indicator_node),
+ &flash->iled_cdev, &cfgind);
+ if (IS_ERR(flash->vfind)) {
+ v4l2_flash_release(flash->vf);
+ return PTR_ERR(flash->vfind);
+ }
+
+ return 0;
+}
+
+static int as3645a_probe(struct i2c_client *client)
+{
+ struct as3645a_names names;
+ struct as3645a *flash;
+ int rval;
+
+ if (client->dev.of_node == NULL)
+ return -ENODEV;
+
+ flash = devm_kzalloc(&client->dev, sizeof(*flash), GFP_KERNEL);
+ if (flash == NULL)
+ return -ENOMEM;
+
+ flash->client = client;
+
+ rval = as3645a_parse_node(flash, &names, client->dev.of_node);
+ if (rval < 0)
+ return rval;
+
+ rval = as3645a_detect(flash);
+ if (rval < 0)
+ goto out_put_nodes;
+
+ mutex_init(&flash->mutex);
+ i2c_set_clientdata(client, flash);
+
+ rval = as3645a_setup(flash);
+ if (rval)
+ goto out_mutex_destroy;
+
+ rval = as3645a_led_class_setup(flash, &names);
+ if (rval)
+ goto out_mutex_destroy;
+
+ rval = as3645a_v4l2_setup(flash);
+ if (rval)
+ goto out_led_classdev_flash_unregister;
+
+ return 0;
+
+out_led_classdev_flash_unregister:
+ led_classdev_flash_unregister(&flash->fled);
+
+out_mutex_destroy:
+ mutex_destroy(&flash->mutex);
+
+out_put_nodes:
+ of_node_put(flash->flash_node);
+ of_node_put(flash->indicator_node);
+
+ return rval;
+}
+
+static int as3645a_remove(struct i2c_client *client)
+{
+ struct as3645a *flash = i2c_get_clientdata(client);
+
+ as3645a_set_control(flash, AS_MODE_EXT_TORCH, false);
+
+ v4l2_flash_release(flash->vf);
+
+ led_classdev_flash_unregister(&flash->fled);
+ led_classdev_unregister(&flash->iled_cdev);
+
+ mutex_destroy(&flash->mutex);
+
+ of_node_put(flash->flash_node);
+ of_node_put(flash->indicator_node);
+
+ return 0;
+}
+
+static const struct i2c_device_id as3645a_id_table[] = {
+ { AS_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, as3645a_id_table);
+
+static const struct of_device_id as3645a_of_table[] = {
+ { .compatible = "ams,as3645a" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, as3645a_of_table);
+
+static struct i2c_driver as3645a_i2c_driver = {
+ .driver = {
+ .of_match_table = as3645a_of_table,
+ .name = AS_NAME,
+ },
+ .probe_new = as3645a_probe,
+ .remove = as3645a_remove,
+ .id_table = as3645a_id_table,
+};
+
+module_i2c_driver(as3645a_i2c_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@iki.fi>");
+MODULE_DESCRIPTION("LED flash driver for AS3645A, LM3555 and their clones");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-max77693.c b/drivers/leds/leds-max77693.c
index 2d3062d53325..adf0f191f794 100644
--- a/drivers/leds/leds-max77693.c
+++ b/drivers/leds/leds-max77693.c
@@ -856,7 +856,7 @@ static void max77693_init_v4l2_flash_config(struct max77693_sub_led *sub_led,
"%s %d-%04x", sub_led->fled_cdev.led_cdev.name,
i2c_adapter_id(i2c->adapter), i2c->addr);
- s = &v4l2_sd_cfg->torch_intensity;
+ s = &v4l2_sd_cfg->intensity;
s->min = TORCH_IOUT_MIN;
s->max = sub_led->fled_cdev.led_cdev.max_brightness * TORCH_IOUT_STEP;
s->step = TORCH_IOUT_STEP;
@@ -931,7 +931,7 @@ static int max77693_register_led(struct max77693_sub_led *sub_led,
/* Register in the V4L2 subsystem. */
sub_led->v4l2_flash = v4l2_flash_init(dev, of_fwnode_handle(sub_node),
- fled_cdev, NULL, &v4l2_flash_ops,
+ fled_cdev, &v4l2_flash_ops,
&v4l2_sd_cfg);
if (IS_ERR(sub_led->v4l2_flash)) {
ret = PTR_ERR(sub_led->v4l2_flash);
diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig
deleted file mode 100644
index 169172d2ba05..000000000000
--- a/drivers/lguest/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-config LGUEST
- tristate "Linux hypervisor example code"
- depends on X86_32 && EVENTFD && TTY && PCI_DIRECT
- select HVC_DRIVER
- ---help---
- This is a very simple module which allows you to run
- multiple instances of the same Linux kernel, using the
- "lguest" command found in the tools/lguest directory.
-
- Note that "lguest" is pronounced to rhyme with "fell quest",
- not "rustyvisor". See tools/lguest/lguest.txt.
-
- If unsure, say N. If curious, say M. If masochistic, say Y.
diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile
deleted file mode 100644
index 16f52ee73994..000000000000
--- a/drivers/lguest/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-# Host requires the other files, which can be a module.
-obj-$(CONFIG_LGUEST) += lg.o
-lg-y = core.o hypercalls.o page_tables.o interrupts_and_traps.o \
- segments.o lguest_user.o
-
-lg-$(CONFIG_X86_32) += x86/switcher_32.o x86/core.o
-
-Preparation Preparation!: PREFIX=P
-Guest: PREFIX=G
-Drivers: PREFIX=D
-Launcher: PREFIX=L
-Host: PREFIX=H
-Switcher: PREFIX=S
-Mastery: PREFIX=M
-Beer:
- @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}"
-Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery:
- @sh ../../tools/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'`
-Puppy:
- @clear
- @printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n"
- @sleep 2; clear; printf "\n\n Sit!\n\n"; sleep 1; clear
- @printf " __ \n ()'\`; \n /\\|\` \n / | \n(/_)_|_ \n"
- @sleep 2; clear; printf "\n\n Stand!\n\n"; sleep 1; clear
- @printf " __ \n ()'\`; \n /\\|\` \n /._.= \n /| / \n(_\_)_ \n"
- @sleep 2; clear; printf "\n\n Good puppy!\n\n"; sleep 1; clear
diff --git a/drivers/lguest/README b/drivers/lguest/README
deleted file mode 100644
index b7db39a64c66..000000000000
--- a/drivers/lguest/README
+++ /dev/null
@@ -1,47 +0,0 @@
-Welcome, friend reader, to lguest.
-
-Lguest is an adventure, with you, the reader, as Hero. I can't think of many
-5000-line projects which offer both such capability and glimpses of future
-potential; it is an exciting time to be delving into the source!
-
-But be warned; this is an arduous journey of several hours or more! And as we
-know, all true Heroes are driven by a Noble Goal. Thus I offer a Beer (or
-equivalent) to anyone I meet who has completed this documentation.
-
-So get comfortable and keep your wits about you (both quick and humorous).
-Along your way to the Noble Goal, you will also gain masterly insight into
-lguest, and hypervisors and x86 virtualization in general.
-
-Our Quest is in seven parts: (best read with C highlighting turned on)
-
-I) Preparation
- - In which our potential hero is flown quickly over the landscape for a
- taste of its scope. Suitable for the armchair coders and other such
- persons of faint constitution.
-
-II) Guest
- - Where we encounter the first tantalising wisps of code, and come to
- understand the details of the life of a Guest kernel.
-
-III) Drivers
- - Whereby the Guest finds its voice and become useful, and our
- understanding of the Guest is completed.
-
-IV) Launcher
- - Where we trace back to the creation of the Guest, and thus begin our
- understanding of the Host.
-
-V) Host
- - Where we master the Host code, through a long and tortuous journey.
- Indeed, it is here that our hero is tested in the Bit of Despair.
-
-VI) Switcher
- - Where our understanding of the intertwined nature of Guests and Hosts
- is completed.
-
-VII) Mastery
- - Where our fully fledged hero grapples with the Great Question:
- "What next?"
-
-make Preparation!
-Rusty Russell.
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
deleted file mode 100644
index 395ed1961dbf..000000000000
--- a/drivers/lguest/core.c
+++ /dev/null
@@ -1,398 +0,0 @@
-/*P:400
- * This contains run_guest() which actually calls into the Host<->Guest
- * Switcher and analyzes the return, such as determining if the Guest wants the
- * Host to do something. This file also contains useful helper routines.
-:*/
-#include <linux/module.h>
-#include <linux/stringify.h>
-#include <linux/stddef.h>
-#include <linux/io.h>
-#include <linux/mm.h>
-#include <linux/sched/signal.h>
-#include <linux/vmalloc.h>
-#include <linux/cpu.h>
-#include <linux/freezer.h>
-#include <linux/highmem.h>
-#include <linux/slab.h>
-#include <asm/paravirt.h>
-#include <asm/pgtable.h>
-#include <linux/uaccess.h>
-#include <asm/poll.h>
-#include <asm/asm-offsets.h>
-#include "lg.h"
-
-unsigned long switcher_addr;
-struct page **lg_switcher_pages;
-static struct vm_struct *switcher_text_vma;
-static struct vm_struct *switcher_stacks_vma;
-
-/* This One Big lock protects all inter-guest data structures. */
-DEFINE_MUTEX(lguest_lock);
-
-/*H:010
- * We need to set up the Switcher at a high virtual address. Remember the
- * Switcher is a few hundred bytes of assembler code which actually changes the
- * CPU to run the Guest, and then changes back to the Host when a trap or
- * interrupt happens.
- *
- * The Switcher code must be at the same virtual address in the Guest as the
- * Host since it will be running as the switchover occurs.
- *
- * Trying to map memory at a particular address is an unusual thing to do, so
- * it's not a simple one-liner.
- */
-static __init int map_switcher(void)
-{
- int i, err;
-
- /*
- * Map the Switcher in to high memory.
- *
- * It turns out that if we choose the address 0xFFC00000 (4MB under the
- * top virtual address), it makes setting up the page tables really
- * easy.
- */
-
- /* We assume Switcher text fits into a single page. */
- if (end_switcher_text - start_switcher_text > PAGE_SIZE) {
- printk(KERN_ERR "lguest: switcher text too large (%zu)\n",
- end_switcher_text - start_switcher_text);
- return -EINVAL;
- }
-
- /*
- * We allocate an array of struct page pointers. map_vm_area() wants
- * this, rather than just an array of pages.
- */
- lg_switcher_pages = kmalloc(sizeof(lg_switcher_pages[0])
- * TOTAL_SWITCHER_PAGES,
- GFP_KERNEL);
- if (!lg_switcher_pages) {
- err = -ENOMEM;
- goto out;
- }
-
- /*
- * Now we actually allocate the pages. The Guest will see these pages,
- * so we make sure they're zeroed.
- */
- for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
- lg_switcher_pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
- if (!lg_switcher_pages[i]) {
- err = -ENOMEM;
- goto free_some_pages;
- }
- }
-
- /*
- * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
- * It goes in the first page, which we map in momentarily.
- */
- memcpy(kmap(lg_switcher_pages[0]), start_switcher_text,
- end_switcher_text - start_switcher_text);
- kunmap(lg_switcher_pages[0]);
-
- /*
- * We place the Switcher underneath the fixmap area, which is the
- * highest virtual address we can get. This is important, since we
- * tell the Guest it can't access this memory, so we want its ceiling
- * as high as possible.
- */
- switcher_addr = FIXADDR_START - TOTAL_SWITCHER_PAGES*PAGE_SIZE;
-
- /*
- * Now we reserve the "virtual memory area"s we want. We might
- * not get them in theory, but in practice it's worked so far.
- *
- * We want the switcher text to be read-only and executable, and
- * the stacks to be read-write and non-executable.
- */
- switcher_text_vma = __get_vm_area(PAGE_SIZE, VM_ALLOC|VM_NO_GUARD,
- switcher_addr,
- switcher_addr + PAGE_SIZE);
-
- if (!switcher_text_vma) {
- err = -ENOMEM;
- printk("lguest: could not map switcher pages high\n");
- goto free_pages;
- }
-
- switcher_stacks_vma = __get_vm_area(SWITCHER_STACK_PAGES * PAGE_SIZE,
- VM_ALLOC|VM_NO_GUARD,
- switcher_addr + PAGE_SIZE,
- switcher_addr + TOTAL_SWITCHER_PAGES * PAGE_SIZE);
- if (!switcher_stacks_vma) {
- err = -ENOMEM;
- printk("lguest: could not map switcher pages high\n");
- goto free_text_vma;
- }
-
- /*
- * This code actually sets up the pages we've allocated to appear at
- * switcher_addr. map_vm_area() takes the vma we allocated above, the
- * kind of pages we're mapping (kernel text pages and kernel writable
- * pages respectively), and a pointer to our array of struct pages.
- */
- err = map_vm_area(switcher_text_vma, PAGE_KERNEL_RX, lg_switcher_pages);
- if (err) {
- printk("lguest: text map_vm_area failed: %i\n", err);
- goto free_vmas;
- }
-
- err = map_vm_area(switcher_stacks_vma, PAGE_KERNEL,
- lg_switcher_pages + SWITCHER_TEXT_PAGES);
- if (err) {
- printk("lguest: stacks map_vm_area failed: %i\n", err);
- goto free_vmas;
- }
-
- /*
- * Now the Switcher is mapped at the right address, we can't fail!
- */
- printk(KERN_INFO "lguest: mapped switcher at %p\n",
- switcher_text_vma->addr);
- /* And we succeeded... */
- return 0;
-
-free_vmas:
- /* Undoes map_vm_area and __get_vm_area */
- vunmap(switcher_stacks_vma->addr);
-free_text_vma:
- vunmap(switcher_text_vma->addr);
-free_pages:
- i = TOTAL_SWITCHER_PAGES;
-free_some_pages:
- for (--i; i >= 0; i--)
- __free_pages(lg_switcher_pages[i], 0);
- kfree(lg_switcher_pages);
-out:
- return err;
-}
-/*:*/
-
-/* Cleaning up the mapping when the module is unloaded is almost... too easy. */
-static void unmap_switcher(void)
-{
- unsigned int i;
-
- /* vunmap() undoes *both* map_vm_area() and __get_vm_area(). */
- vunmap(switcher_text_vma->addr);
- vunmap(switcher_stacks_vma->addr);
- /* Now we just need to free the pages we copied the switcher into */
- for (i = 0; i < TOTAL_SWITCHER_PAGES; i++)
- __free_pages(lg_switcher_pages[i], 0);
- kfree(lg_switcher_pages);
-}
-
-/*H:032
- * Dealing With Guest Memory.
- *
- * Before we go too much further into the Host, we need to grok the routines
- * we use to deal with Guest memory.
- *
- * When the Guest gives us (what it thinks is) a physical address, we can use
- * the normal copy_from_user() & copy_to_user() on the corresponding place in
- * the memory region allocated by the Launcher.
- *
- * But we can't trust the Guest: it might be trying to access the Launcher
- * code. We have to check that the range is below the pfn_limit the Launcher
- * gave us. We have to make sure that addr + len doesn't give us a false
- * positive by overflowing, too.
- */
-bool lguest_address_ok(const struct lguest *lg,
- unsigned long addr, unsigned long len)
-{
- return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr);
-}
-
-/*
- * This routine copies memory from the Guest. Here we can see how useful the
- * kill_lguest() routine we met in the Launcher can be: we return a random
- * value (all zeroes) instead of needing to return an error.
- */
-void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes)
-{
- if (!lguest_address_ok(cpu->lg, addr, bytes)
- || copy_from_user(b, cpu->lg->mem_base + addr, bytes) != 0) {
- /* copy_from_user should do this, but as we rely on it... */
- memset(b, 0, bytes);
- kill_guest(cpu, "bad read address %#lx len %u", addr, bytes);
- }
-}
-
-/* This is the write (copy into Guest) version. */
-void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b,
- unsigned bytes)
-{
- if (!lguest_address_ok(cpu->lg, addr, bytes)
- || copy_to_user(cpu->lg->mem_base + addr, b, bytes) != 0)
- kill_guest(cpu, "bad write address %#lx len %u", addr, bytes);
-}
-/*:*/
-
-/*H:030
- * Let's jump straight to the the main loop which runs the Guest.
- * Remember, this is called by the Launcher reading /dev/lguest, and we keep
- * going around and around until something interesting happens.
- */
-int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
-{
- /* If the launcher asked for a register with LHREQ_GETREG */
- if (cpu->reg_read) {
- if (put_user(*cpu->reg_read, user))
- return -EFAULT;
- cpu->reg_read = NULL;
- return sizeof(*cpu->reg_read);
- }
-
- /* We stop running once the Guest is dead. */
- while (!cpu->lg->dead) {
- unsigned int irq;
- bool more;
-
- /* First we run any hypercalls the Guest wants done. */
- if (cpu->hcall)
- do_hypercalls(cpu);
-
- /* Do we have to tell the Launcher about a trap? */
- if (cpu->pending.trap) {
- if (copy_to_user(user, &cpu->pending,
- sizeof(cpu->pending)))
- return -EFAULT;
- return sizeof(cpu->pending);
- }
-
- /*
- * All long-lived kernel loops need to check with this horrible
- * thing called the freezer. If the Host is trying to suspend,
- * it stops us.
- */
- try_to_freeze();
-
- /* Check for signals */
- if (signal_pending(current))
- return -ERESTARTSYS;
-
- /*
- * Check if there are any interrupts which can be delivered now:
- * if so, this sets up the hander to be executed when we next
- * run the Guest.
- */
- irq = interrupt_pending(cpu, &more);
- if (irq < LGUEST_IRQS)
- try_deliver_interrupt(cpu, irq, more);
-
- /*
- * Just make absolutely sure the Guest is still alive. One of
- * those hypercalls could have been fatal, for example.
- */
- if (cpu->lg->dead)
- break;
-
- /*
- * If the Guest asked to be stopped, we sleep. The Guest's
- * clock timer will wake us.
- */
- if (cpu->halted) {
- set_current_state(TASK_INTERRUPTIBLE);
- /*
- * Just before we sleep, make sure no interrupt snuck in
- * which we should be doing.
- */
- if (interrupt_pending(cpu, &more) < LGUEST_IRQS)
- set_current_state(TASK_RUNNING);
- else
- schedule();
- continue;
- }
-
- /*
- * OK, now we're ready to jump into the Guest. First we put up
- * the "Do Not Disturb" sign:
- */
- local_irq_disable();
-
- /* Actually run the Guest until something happens. */
- lguest_arch_run_guest(cpu);
-
- /* Now we're ready to be interrupted or moved to other CPUs */
- local_irq_enable();
-
- /* Now we deal with whatever happened to the Guest. */
- lguest_arch_handle_trap(cpu);
- }
-
- /* Special case: Guest is 'dead' but wants a reboot. */
- if (cpu->lg->dead == ERR_PTR(-ERESTART))
- return -ERESTART;
-
- /* The Guest is dead => "No such file or directory" */
- return -ENOENT;
-}
-
-/*H:000
- * Welcome to the Host!
- *
- * By this point your brain has been tickled by the Guest code and numbed by
- * the Launcher code; prepare for it to be stretched by the Host code. This is
- * the heart. Let's begin at the initialization routine for the Host's lg
- * module.
- */
-static int __init init(void)
-{
- int err;
-
- /* Lguest can't run under Xen, VMI or itself. It does Tricky Stuff. */
- if (get_kernel_rpl() != 0) {
- printk("lguest is afraid of being a guest\n");
- return -EPERM;
- }
-
- /* First we put the Switcher up in very high virtual memory. */
- err = map_switcher();
- if (err)
- goto out;
-
- /* We might need to reserve an interrupt vector. */
- err = init_interrupts();
- if (err)
- goto unmap;
-
- /* /dev/lguest needs to be registered. */
- err = lguest_device_init();
- if (err)
- goto free_interrupts;
-
- /* Finally we do some architecture-specific setup. */
- lguest_arch_host_init();
-
- /* All good! */
- return 0;
-
-free_interrupts:
- free_interrupts();
-unmap:
- unmap_switcher();
-out:
- return err;
-}
-
-/* Cleaning up is just the same code, backwards. With a little French. */
-static void __exit fini(void)
-{
- lguest_device_remove();
- free_interrupts();
- unmap_switcher();
-
- lguest_arch_host_fini();
-}
-/*:*/
-
-/*
- * The Host side of lguest can be a module. This is a nice way for people to
- * play with it.
- */
-module_init(init);
-module_exit(fini);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
deleted file mode 100644
index 601f81c04873..000000000000
--- a/drivers/lguest/hypercalls.c
+++ /dev/null
@@ -1,304 +0,0 @@
-/*P:500
- * Just as userspace programs request kernel operations through a system
- * call, the Guest requests Host operations through a "hypercall". You might
- * notice this nomenclature doesn't really follow any logic, but the name has
- * been around for long enough that we're stuck with it. As you'd expect, this
- * code is basically a one big switch statement.
-:*/
-
-/* Copyright (C) 2006 Rusty Russell IBM Corporation
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-*/
-#include <linux/uaccess.h>
-#include <linux/syscalls.h>
-#include <linux/mm.h>
-#include <linux/ktime.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include "lg.h"
-
-/*H:120
- * This is the core hypercall routine: where the Guest gets what it wants.
- * Or gets killed. Or, in the case of LHCALL_SHUTDOWN, both.
- */
-static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
-{
- switch (args->arg0) {
- case LHCALL_FLUSH_ASYNC:
- /*
- * This call does nothing, except by breaking out of the Guest
- * it makes us process all the asynchronous hypercalls.
- */
- break;
- case LHCALL_SEND_INTERRUPTS:
- /*
- * This call does nothing too, but by breaking out of the Guest
- * it makes us process any pending interrupts.
- */
- break;
- case LHCALL_LGUEST_INIT:
- /*
- * You can't get here unless you're already initialized. Don't
- * do that.
- */
- kill_guest(cpu, "already have lguest_data");
- break;
- case LHCALL_SHUTDOWN: {
- char msg[128];
- /*
- * Shutdown is such a trivial hypercall that we do it in five
- * lines right here.
- *
- * If the lgread fails, it will call kill_guest() itself; the
- * kill_guest() with the message will be ignored.
- */
- __lgread(cpu, msg, args->arg1, sizeof(msg));
- msg[sizeof(msg)-1] = '\0';
- kill_guest(cpu, "CRASH: %s", msg);
- if (args->arg2 == LGUEST_SHUTDOWN_RESTART)
- cpu->lg->dead = ERR_PTR(-ERESTART);
- break;
- }
- case LHCALL_FLUSH_TLB:
- /* FLUSH_TLB comes in two flavors, depending on the argument: */
- if (args->arg1)
- guest_pagetable_clear_all(cpu);
- else
- guest_pagetable_flush_user(cpu);
- break;
-
- /*
- * All these calls simply pass the arguments through to the right
- * routines.
- */
- case LHCALL_NEW_PGTABLE:
- guest_new_pagetable(cpu, args->arg1);
- break;
- case LHCALL_SET_STACK:
- guest_set_stack(cpu, args->arg1, args->arg2, args->arg3);
- break;
- case LHCALL_SET_PTE:
-#ifdef CONFIG_X86_PAE
- guest_set_pte(cpu, args->arg1, args->arg2,
- __pte(args->arg3 | (u64)args->arg4 << 32));
-#else
- guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3));
-#endif
- break;
- case LHCALL_SET_PGD:
- guest_set_pgd(cpu->lg, args->arg1, args->arg2);
- break;
-#ifdef CONFIG_X86_PAE
- case LHCALL_SET_PMD:
- guest_set_pmd(cpu->lg, args->arg1, args->arg2);
- break;
-#endif
- case LHCALL_SET_CLOCKEVENT:
- guest_set_clockevent(cpu, args->arg1);
- break;
- case LHCALL_HALT:
- /* Similarly, this sets the halted flag for run_guest(). */
- cpu->halted = 1;
- break;
- default:
- /* It should be an architecture-specific hypercall. */
- if (lguest_arch_do_hcall(cpu, args))
- kill_guest(cpu, "Bad hypercall %li\n", args->arg0);
- }
-}
-
-/*H:124
- * Asynchronous hypercalls are easy: we just look in the array in the
- * Guest's "struct lguest_data" to see if any new ones are marked "ready".
- *
- * We are careful to do these in order: obviously we respect the order the
- * Guest put them in the ring, but we also promise the Guest that they will
- * happen before any normal hypercall (which is why we check this before
- * checking for a normal hcall).
- */
-static void do_async_hcalls(struct lg_cpu *cpu)
-{
- unsigned int i;
- u8 st[LHCALL_RING_SIZE];
-
- /* For simplicity, we copy the entire call status array in at once. */
- if (copy_from_user(&st, &cpu->lg->lguest_data->hcall_status, sizeof(st)))
- return;
-
- /* We process "struct lguest_data"s hcalls[] ring once. */
- for (i = 0; i < ARRAY_SIZE(st); i++) {
- struct hcall_args args;
- /*
- * We remember where we were up to from last time. This makes
- * sure that the hypercalls are done in the order the Guest
- * places them in the ring.
- */
- unsigned int n = cpu->next_hcall;
-
- /* 0xFF means there's no call here (yet). */
- if (st[n] == 0xFF)
- break;
-
- /*
- * OK, we have hypercall. Increment the "next_hcall" cursor,
- * and wrap back to 0 if we reach the end.
- */
- if (++cpu->next_hcall == LHCALL_RING_SIZE)
- cpu->next_hcall = 0;
-
- /*
- * Copy the hypercall arguments into a local copy of the
- * hcall_args struct.
- */
- if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n],
- sizeof(struct hcall_args))) {
- kill_guest(cpu, "Fetching async hypercalls");
- break;
- }
-
- /* Do the hypercall, same as a normal one. */
- do_hcall(cpu, &args);
-
- /* Mark the hypercall done. */
- if (put_user(0xFF, &cpu->lg->lguest_data->hcall_status[n])) {
- kill_guest(cpu, "Writing result for async hypercall");
- break;
- }
-
- /*
- * Stop doing hypercalls if they want to notify the Launcher:
- * it needs to service this first.
- */
- if (cpu->pending.trap)
- break;
- }
-}
-
-/*
- * Last of all, we look at what happens first of all. The very first time the
- * Guest makes a hypercall, we end up here to set things up:
- */
-static void initialize(struct lg_cpu *cpu)
-{
- /*
- * You can't do anything until you're initialized. The Guest knows the
- * rules, so we're unforgiving here.
- */
- if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) {
- kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0);
- return;
- }
-
- if (lguest_arch_init_hypercalls(cpu))
- kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
-
- /*
- * The Guest tells us where we're not to deliver interrupts by putting
- * the instruction address into "struct lguest_data".
- */
- if (get_user(cpu->lg->noirq_iret, &cpu->lg->lguest_data->noirq_iret))
- kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
-
- /*
- * We write the current time into the Guest's data page once so it can
- * set its clock.
- */
- write_timestamp(cpu);
-
- /* page_tables.c will also do some setup. */
- page_table_guest_data_init(cpu);
-
- /*
- * This is the one case where the above accesses might have been the
- * first write to a Guest page. This may have caused a copy-on-write
- * fault, but the old page might be (read-only) in the Guest
- * pagetable.
- */
- guest_pagetable_clear_all(cpu);
-}
-/*:*/
-
-/*M:013
- * If a Guest reads from a page (so creates a mapping) that it has never
- * written to, and then the Launcher writes to it (ie. the output of a virtual
- * device), the Guest will still see the old page. In practice, this never
- * happens: why would the Guest read a page which it has never written to? But
- * a similar scenario might one day bite us, so it's worth mentioning.
- *
- * Note that if we used a shared anonymous mapping in the Launcher instead of
- * mapping /dev/zero private, we wouldn't worry about cop-on-write. And we
- * need that to switch the Launcher to processes (away from threads) anyway.
-:*/
-
-/*H:100
- * Hypercalls
- *
- * Remember from the Guest, hypercalls come in two flavors: normal and
- * asynchronous. This file handles both of types.
- */
-void do_hypercalls(struct lg_cpu *cpu)
-{
- /* Not initialized yet? This hypercall must do it. */
- if (unlikely(!cpu->lg->lguest_data)) {
- /* Set up the "struct lguest_data" */
- initialize(cpu);
- /* Hcall is done. */
- cpu->hcall = NULL;
- return;
- }
-
- /*
- * The Guest has initialized.
- *
- * Look in the hypercall ring for the async hypercalls:
- */
- do_async_hcalls(cpu);
-
- /*
- * If we stopped reading the hypercall ring because the Guest did a
- * NOTIFY to the Launcher, we want to return now. Otherwise we do
- * the hypercall.
- */
- if (!cpu->pending.trap) {
- do_hcall(cpu, cpu->hcall);
- /*
- * Tricky point: we reset the hcall pointer to mark the
- * hypercall as "done". We use the hcall pointer rather than
- * the trap number to indicate a hypercall is pending.
- * Normally it doesn't matter: the Guest will run again and
- * update the trap number before we come back here.
- *
- * However, if we are signalled or the Guest sends I/O to the
- * Launcher, the run_guest() loop will exit without running the
- * Guest. When it comes back it would try to re-run the
- * hypercall. Finding that bug sucked.
- */
- cpu->hcall = NULL;
- }
-}
-
-/*
- * This routine supplies the Guest with time: it's used for wallclock time at
- * initial boot and as a rough time source if the TSC isn't available.
- */
-void write_timestamp(struct lg_cpu *cpu)
-{
- struct timespec now;
- ktime_get_real_ts(&now);
- if (copy_to_user(&cpu->lg->lguest_data->time,
- &now, sizeof(struct timespec)))
- kill_guest(cpu, "Writing timestamp");
-}
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
deleted file mode 100644
index 67392b6ab845..000000000000
--- a/drivers/lguest/interrupts_and_traps.c
+++ /dev/null
@@ -1,706 +0,0 @@
-/*P:800
- * Interrupts (traps) are complicated enough to earn their own file.
- * There are three classes of interrupts:
- *
- * 1) Real hardware interrupts which occur while we're running the Guest,
- * 2) Interrupts for virtual devices attached to the Guest, and
- * 3) Traps and faults from the Guest.
- *
- * Real hardware interrupts must be delivered to the Host, not the Guest.
- * Virtual interrupts must be delivered to the Guest, but we make them look
- * just like real hardware would deliver them. Traps from the Guest can be set
- * up to go directly back into the Guest, but sometimes the Host wants to see
- * them first, so we also have a way of "reflecting" them into the Guest as if
- * they had been delivered to it directly.
-:*/
-#include <linux/uaccess.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include "lg.h"
-
-/* Allow Guests to use a non-128 (ie. non-Linux) syscall trap. */
-static unsigned int syscall_vector = IA32_SYSCALL_VECTOR;
-module_param(syscall_vector, uint, 0444);
-
-/* The address of the interrupt handler is split into two bits: */
-static unsigned long idt_address(u32 lo, u32 hi)
-{
- return (lo & 0x0000FFFF) | (hi & 0xFFFF0000);
-}
-
-/*
- * The "type" of the interrupt handler is a 4 bit field: we only support a
- * couple of types.
- */
-static int idt_type(u32 lo, u32 hi)
-{
- return (hi >> 8) & 0xF;
-}
-
-/* An IDT entry can't be used unless the "present" bit is set. */
-static bool idt_present(u32 lo, u32 hi)
-{
- return (hi & 0x8000);
-}
-
-/*
- * We need a helper to "push" a value onto the Guest's stack, since that's a
- * big part of what delivering an interrupt does.
- */
-static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
-{
- /* Stack grows upwards: move stack then write value. */
- *gstack -= 4;
- lgwrite(cpu, *gstack, u32, val);
-}
-
-/*H:210
- * The push_guest_interrupt_stack() routine saves Guest state on the stack for
- * an interrupt or trap. The mechanics of delivering traps and interrupts to
- * the Guest are the same, except some traps have an "error code" which gets
- * pushed onto the stack as well: the caller tells us if this is one.
- *
- * We set up the stack just like the CPU does for a real interrupt, so it's
- * identical for the Guest (and the standard "iret" instruction will undo
- * it).
- */
-static void push_guest_interrupt_stack(struct lg_cpu *cpu, bool has_err)
-{
- unsigned long gstack, origstack;
- u32 eflags, ss, irq_enable;
- unsigned long virtstack;
-
- /*
- * There are two cases for interrupts: one where the Guest is already
- * in the kernel, and a more complex one where the Guest is in
- * userspace. We check the privilege level to find out.
- */
- if ((cpu->regs->ss&0x3) != GUEST_PL) {
- /*
- * The Guest told us their kernel stack with the SET_STACK
- * hypercall: both the virtual address and the segment.
- */
- virtstack = cpu->esp1;
- ss = cpu->ss1;
-
- origstack = gstack = guest_pa(cpu, virtstack);
- /*
- * We push the old stack segment and pointer onto the new
- * stack: when the Guest does an "iret" back from the interrupt
- * handler the CPU will notice they're dropping privilege
- * levels and expect these here.
- */
- push_guest_stack(cpu, &gstack, cpu->regs->ss);
- push_guest_stack(cpu, &gstack, cpu->regs->esp);
- } else {
- /* We're staying on the same Guest (kernel) stack. */
- virtstack = cpu->regs->esp;
- ss = cpu->regs->ss;
-
- origstack = gstack = guest_pa(cpu, virtstack);
- }
-
- /*
- * Remember that we never let the Guest actually disable interrupts, so
- * the "Interrupt Flag" bit is always set. We copy that bit from the
- * Guest's "irq_enabled" field into the eflags word: we saw the Guest
- * copy it back in "lguest_iret".
- */
- eflags = cpu->regs->eflags;
- if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0
- && !(irq_enable & X86_EFLAGS_IF))
- eflags &= ~X86_EFLAGS_IF;
-
- /*
- * An interrupt is expected to push three things on the stack: the old
- * "eflags" word, the old code segment, and the old instruction
- * pointer.
- */
- push_guest_stack(cpu, &gstack, eflags);
- push_guest_stack(cpu, &gstack, cpu->regs->cs);
- push_guest_stack(cpu, &gstack, cpu->regs->eip);
-
- /* For the six traps which supply an error code, we push that, too. */
- if (has_err)
- push_guest_stack(cpu, &gstack, cpu->regs->errcode);
-
- /* Adjust the stack pointer and stack segment. */
- cpu->regs->ss = ss;
- cpu->regs->esp = virtstack + (gstack - origstack);
-}
-
-/*
- * This actually makes the Guest start executing the given interrupt/trap
- * handler.
- *
- * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this
- * interrupt or trap. It's split into two parts for traditional reasons: gcc
- * on i386 used to be frightened by 64 bit numbers.
- */
-static void guest_run_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi)
-{
- /* If we're already in the kernel, we don't change stacks. */
- if ((cpu->regs->ss&0x3) != GUEST_PL)
- cpu->regs->ss = cpu->esp1;
-
- /*
- * Set the code segment and the address to execute.
- */
- cpu->regs->cs = (__KERNEL_CS|GUEST_PL);
- cpu->regs->eip = idt_address(lo, hi);
-
- /*
- * Trapping always clears these flags:
- * TF: Trap flag
- * VM: Virtual 8086 mode
- * RF: Resume
- * NT: Nested task.
- */
- cpu->regs->eflags &=
- ~(X86_EFLAGS_TF|X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT);
-
- /*
- * There are two kinds of interrupt handlers: 0xE is an "interrupt
- * gate" which expects interrupts to be disabled on entry.
- */
- if (idt_type(lo, hi) == 0xE)
- if (put_user(0, &cpu->lg->lguest_data->irq_enabled))
- kill_guest(cpu, "Disabling interrupts");
-}
-
-/* This restores the eflags word which was pushed on the stack by a trap */
-static void restore_eflags(struct lg_cpu *cpu)
-{
- /* This is the physical address of the stack. */
- unsigned long stack_pa = guest_pa(cpu, cpu->regs->esp);
-
- /*
- * Stack looks like this:
- * Address Contents
- * esp EIP
- * esp + 4 CS
- * esp + 8 EFLAGS
- */
- cpu->regs->eflags = lgread(cpu, stack_pa + 8, u32);
- cpu->regs->eflags &=
- ~(X86_EFLAGS_TF|X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT);
-}
-
-/*H:205
- * Virtual Interrupts.
- *
- * interrupt_pending() returns the first pending interrupt which isn't blocked
- * by the Guest. It is called before every entry to the Guest, and just before
- * we go to sleep when the Guest has halted itself.
- */
-unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more)
-{
- unsigned int irq;
- DECLARE_BITMAP(blk, LGUEST_IRQS);
-
- /* If the Guest hasn't even initialized yet, we can do nothing. */
- if (!cpu->lg->lguest_data)
- return LGUEST_IRQS;
-
- /*
- * Take our "irqs_pending" array and remove any interrupts the Guest
- * wants blocked: the result ends up in "blk".
- */
- if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
- sizeof(blk)))
- return LGUEST_IRQS;
- bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS);
-
- /* Find the first interrupt. */
- irq = find_first_bit(blk, LGUEST_IRQS);
- *more = find_next_bit(blk, LGUEST_IRQS, irq+1);
-
- return irq;
-}
-
-/*
- * This actually diverts the Guest to running an interrupt handler, once an
- * interrupt has been identified by interrupt_pending().
- */
-void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
-{
- struct desc_struct *idt;
-
- BUG_ON(irq >= LGUEST_IRQS);
-
- /* If they're halted, interrupts restart them. */
- if (cpu->halted) {
- /* Re-enable interrupts. */
- if (put_user(X86_EFLAGS_IF, &cpu->lg->lguest_data->irq_enabled))
- kill_guest(cpu, "Re-enabling interrupts");
- cpu->halted = 0;
- } else {
- /* Otherwise we check if they have interrupts disabled. */
- u32 irq_enabled;
- if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled))
- irq_enabled = 0;
- if (!irq_enabled) {
- /* Make sure they know an IRQ is pending. */
- put_user(X86_EFLAGS_IF,
- &cpu->lg->lguest_data->irq_pending);
- return;
- }
- }
-
- /*
- * Look at the IDT entry the Guest gave us for this interrupt. The
- * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip
- * over them.
- */
- idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq];
- /* If they don't have a handler (yet?), we just ignore it */
- if (idt_present(idt->a, idt->b)) {
- /* OK, mark it no longer pending and deliver it. */
- clear_bit(irq, cpu->irqs_pending);
-
- /*
- * They may be about to iret, where they asked us never to
- * deliver interrupts. In this case, we can emulate that iret
- * then immediately deliver the interrupt. This is basically
- * a noop: the iret would pop the interrupt frame and restore
- * eflags, and then we'd set it up again. So just restore the
- * eflags word and jump straight to the handler in this case.
- *
- * Denys Vlasenko points out that this isn't quite right: if
- * the iret was returning to userspace, then that interrupt
- * would reset the stack pointer (which the Guest told us
- * about via LHCALL_SET_STACK). But unless the Guest is being
- * *really* weird, that will be the same as the current stack
- * anyway.
- */
- if (cpu->regs->eip == cpu->lg->noirq_iret) {
- restore_eflags(cpu);
- } else {
- /*
- * set_guest_interrupt() takes a flag to say whether
- * this interrupt pushes an error code onto the stack
- * as well: virtual interrupts never do.
- */
- push_guest_interrupt_stack(cpu, false);
- }
- /* Actually make Guest cpu jump to handler. */
- guest_run_interrupt(cpu, idt->a, idt->b);
- }
-
- /*
- * Every time we deliver an interrupt, we update the timestamp in the
- * Guest's lguest_data struct. It would be better for the Guest if we
- * did this more often, but it can actually be quite slow: doing it
- * here is a compromise which means at least it gets updated every
- * timer interrupt.
- */
- write_timestamp(cpu);
-
- /*
- * If there are no other interrupts we want to deliver, clear
- * the pending flag.
- */
- if (!more)
- put_user(0, &cpu->lg->lguest_data->irq_pending);
-}
-
-/* And this is the routine when we want to set an interrupt for the Guest. */
-void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
-{
- /*
- * Next time the Guest runs, the core code will see if it can deliver
- * this interrupt.
- */
- set_bit(irq, cpu->irqs_pending);
-
- /*
- * Make sure it sees it; it might be asleep (eg. halted), or running
- * the Guest right now, in which case kick_process() will knock it out.
- */
- if (!wake_up_process(cpu->tsk))
- kick_process(cpu->tsk);
-}
-/*:*/
-
-/*
- * Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent
- * me a patch, so we support that too. It'd be a big step for lguest if half
- * the Plan 9 user base were to start using it.
- *
- * Actually now I think of it, it's possible that Ron *is* half the Plan 9
- * userbase. Oh well.
- */
-bool could_be_syscall(unsigned int num)
-{
- /* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */
- return num == IA32_SYSCALL_VECTOR || num == syscall_vector;
-}
-
-/* The syscall vector it wants must be unused by Host. */
-bool check_syscall_vector(struct lguest *lg)
-{
- u32 vector;
-
- if (get_user(vector, &lg->lguest_data->syscall_vec))
- return false;
-
- return could_be_syscall(vector);
-}
-
-int init_interrupts(void)
-{
- /* If they want some strange system call vector, reserve it now */
- if (syscall_vector != IA32_SYSCALL_VECTOR) {
- if (test_bit(syscall_vector, used_vectors) ||
- vector_used_by_percpu_irq(syscall_vector)) {
- printk(KERN_ERR "lg: couldn't reserve syscall %u\n",
- syscall_vector);
- return -EBUSY;
- }
- set_bit(syscall_vector, used_vectors);
- }
-
- return 0;
-}
-
-void free_interrupts(void)
-{
- if (syscall_vector != IA32_SYSCALL_VECTOR)
- clear_bit(syscall_vector, used_vectors);
-}
-
-/*H:220
- * Now we've got the routines to deliver interrupts, delivering traps like
- * page fault is easy. The only trick is that Intel decided that some traps
- * should have error codes:
- */
-static bool has_err(unsigned int trap)
-{
- return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17);
-}
-
-/* deliver_trap() returns true if it could deliver the trap. */
-bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
-{
- /*
- * Trap numbers are always 8 bit, but we set an impossible trap number
- * for traps inside the Switcher, so check that here.
- */
- if (num >= ARRAY_SIZE(cpu->arch.idt))
- return false;
-
- /*
- * Early on the Guest hasn't set the IDT entries (or maybe it put a
- * bogus one in): if we fail here, the Guest will be killed.
- */
- if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b))
- return false;
- push_guest_interrupt_stack(cpu, has_err(num));
- guest_run_interrupt(cpu, cpu->arch.idt[num].a,
- cpu->arch.idt[num].b);
- return true;
-}
-
-/*H:250
- * Here's the hard part: returning to the Host every time a trap happens
- * and then calling deliver_trap() and re-entering the Guest is slow.
- * Particularly because Guest userspace system calls are traps (usually trap
- * 128).
- *
- * So we'd like to set up the IDT to tell the CPU to deliver traps directly
- * into the Guest. This is possible, but the complexities cause the size of
- * this file to double! However, 150 lines of code is worth writing for taking
- * system calls down from 1750ns to 270ns. Plus, if lguest didn't do it, all
- * the other hypervisors would beat it up at lunchtime.
- *
- * This routine indicates if a particular trap number could be delivered
- * directly.
- *
- * Unfortunately, Linux 4.6 started using an interrupt gate instead of a
- * trap gate for syscalls, so this trick is ineffective. See Mastery for
- * how we could do this anyway...
- */
-static bool direct_trap(unsigned int num)
-{
- /*
- * Hardware interrupts don't go to the Guest at all (except system
- * call).
- */
- if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num))
- return false;
-
- /*
- * The Host needs to see page faults (for shadow paging and to save the
- * fault address), general protection faults (in/out emulation) and
- * device not available (TS handling) and of course, the hypercall trap.
- */
- return num != 14 && num != 13 && num != 7 && num != LGUEST_TRAP_ENTRY;
-}
-/*:*/
-
-/*M:005
- * The Guest has the ability to turn its interrupt gates into trap gates,
- * if it is careful. The Host will let trap gates can go directly to the
- * Guest, but the Guest needs the interrupts atomically disabled for an
- * interrupt gate. The Host could provide a mechanism to register more
- * "no-interrupt" regions, and the Guest could point the trap gate at
- * instructions within that region, where it can safely disable interrupts.
- */
-
-/*M:006
- * The Guests do not use the sysenter (fast system call) instruction,
- * because it's hardcoded to enter privilege level 0 and so can't go direct.
- * It's about twice as fast as the older "int 0x80" system call, so it might
- * still be worthwhile to handle it in the Switcher and lcall down to the
- * Guest. The sysenter semantics are hairy tho: search for that keyword in
- * entry.S
-:*/
-
-/*H:260
- * When we make traps go directly into the Guest, we need to make sure
- * the kernel stack is valid (ie. mapped in the page tables). Otherwise, the
- * CPU trying to deliver the trap will fault while trying to push the interrupt
- * words on the stack: this is called a double fault, and it forces us to kill
- * the Guest.
- *
- * Which is deeply unfair, because (literally!) it wasn't the Guests' fault.
- */
-void pin_stack_pages(struct lg_cpu *cpu)
-{
- unsigned int i;
-
- /*
- * Depending on the CONFIG_4KSTACKS option, the Guest can have one or
- * two pages of stack space.
- */
- for (i = 0; i < cpu->lg->stack_pages; i++)
- /*
- * The stack grows *upwards*, so the address we're given is the
- * start of the page after the kernel stack. Subtract one to
- * get back onto the first stack page, and keep subtracting to
- * get to the rest of the stack pages.
- */
- pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE);
-}
-
-/*
- * Direct traps also mean that we need to know whenever the Guest wants to use
- * a different kernel stack, so we can change the guest TSS to use that
- * stack. The TSS entries expect a virtual address, so unlike most addresses
- * the Guest gives us, the "esp" (stack pointer) value here is virtual, not
- * physical.
- *
- * In Linux each process has its own kernel stack, so this happens a lot: we
- * change stacks on each context switch.
- */
-void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages)
-{
- /*
- * You're not allowed a stack segment with privilege level 0: bad Guest!
- */
- if ((seg & 0x3) != GUEST_PL)
- kill_guest(cpu, "bad stack segment %i", seg);
- /* We only expect one or two stack pages. */
- if (pages > 2)
- kill_guest(cpu, "bad stack pages %u", pages);
- /* Save where the stack is, and how many pages */
- cpu->ss1 = seg;
- cpu->esp1 = esp;
- cpu->lg->stack_pages = pages;
- /* Make sure the new stack pages are mapped */
- pin_stack_pages(cpu);
-}
-
-/*
- * All this reference to mapping stacks leads us neatly into the other complex
- * part of the Host: page table handling.
- */
-
-/*H:235
- * This is the routine which actually checks the Guest's IDT entry and
- * transfers it into the entry in "struct lguest":
- */
-static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap,
- unsigned int num, u32 lo, u32 hi)
-{
- u8 type = idt_type(lo, hi);
-
- /* We zero-out a not-present entry */
- if (!idt_present(lo, hi)) {
- trap->a = trap->b = 0;
- return;
- }
-
- /* We only support interrupt and trap gates. */
- if (type != 0xE && type != 0xF)
- kill_guest(cpu, "bad IDT type %i", type);
-
- /*
- * We only copy the handler address, present bit, privilege level and
- * type. The privilege level controls where the trap can be triggered
- * manually with an "int" instruction. This is usually GUEST_PL,
- * except for system calls which userspace can use.
- */
- trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF);
- trap->b = (hi&0xFFFFEF00);
-}
-
-/*H:230
- * While we're here, dealing with delivering traps and interrupts to the
- * Guest, we might as well complete the picture: how the Guest tells us where
- * it wants them to go. This would be simple, except making traps fast
- * requires some tricks.
- *
- * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the
- * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here.
- */
-void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi)
-{
- /*
- * Guest never handles: NMI, doublefault, spurious interrupt or
- * hypercall. We ignore when it tries to set them.
- */
- if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY)
- return;
-
- /*
- * Mark the IDT as changed: next time the Guest runs we'll know we have
- * to copy this again.
- */
- cpu->changed |= CHANGED_IDT;
-
- /* Check that the Guest doesn't try to step outside the bounds. */
- if (num >= ARRAY_SIZE(cpu->arch.idt))
- kill_guest(cpu, "Setting idt entry %u", num);
- else
- set_trap(cpu, &cpu->arch.idt[num], num, lo, hi);
-}
-
-/*
- * The default entry for each interrupt points into the Switcher routines which
- * simply return to the Host. The run_guest() loop will then call
- * deliver_trap() to bounce it back into the Guest.
- */
-static void default_idt_entry(struct desc_struct *idt,
- int trap,
- const unsigned long handler,
- const struct desc_struct *base)
-{
- /* A present interrupt gate. */
- u32 flags = 0x8e00;
-
- /*
- * Set the privilege level on the entry for the hypercall: this allows
- * the Guest to use the "int" instruction to trigger it.
- */
- if (trap == LGUEST_TRAP_ENTRY)
- flags |= (GUEST_PL << 13);
- else if (base)
- /*
- * Copy privilege level from what Guest asked for. This allows
- * debug (int 3) traps from Guest userspace, for example.
- */
- flags |= (base->b & 0x6000);
-
- /* Now pack it into the IDT entry in its weird format. */
- idt->a = (LGUEST_CS<<16) | (handler&0x0000FFFF);
- idt->b = (handler&0xFFFF0000) | flags;
-}
-
-/* When the Guest first starts, we put default entries into the IDT. */
-void setup_default_idt_entries(struct lguest_ro_state *state,
- const unsigned long *def)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(state->guest_idt); i++)
- default_idt_entry(&state->guest_idt[i], i, def[i], NULL);
-}
-
-/*H:240
- * We don't use the IDT entries in the "struct lguest" directly, instead
- * we copy them into the IDT which we've set up for Guests on this CPU, just
- * before we run the Guest. This routine does that copy.
- */
-void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
- const unsigned long *def)
-{
- unsigned int i;
-
- /*
- * We can simply copy the direct traps, otherwise we use the default
- * ones in the Switcher: they will return to the Host.
- */
- for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) {
- const struct desc_struct *gidt = &cpu->arch.idt[i];
-
- /* If no Guest can ever override this trap, leave it alone. */
- if (!direct_trap(i))
- continue;
-
- /*
- * Only trap gates (type 15) can go direct to the Guest.
- * Interrupt gates (type 14) disable interrupts as they are
- * entered, which we never let the Guest do. Not present
- * entries (type 0x0) also can't go direct, of course.
- *
- * If it can't go direct, we still need to copy the priv. level:
- * they might want to give userspace access to a software
- * interrupt.
- */
- if (idt_type(gidt->a, gidt->b) == 0xF)
- idt[i] = *gidt;
- else
- default_idt_entry(&idt[i], i, def[i], gidt);
- }
-}
-
-/*H:200
- * The Guest Clock.
- *
- * There are two sources of virtual interrupts. We saw one in lguest_user.c:
- * the Launcher sending interrupts for virtual devices. The other is the Guest
- * timer interrupt.
- *
- * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to
- * the next timer interrupt (in nanoseconds). We use the high-resolution timer
- * infrastructure to set a callback at that time.
- *
- * 0 means "turn off the clock".
- */
-void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta)
-{
- ktime_t expires;
-
- if (unlikely(delta == 0)) {
- /* Clock event device is shutting down. */
- hrtimer_cancel(&cpu->hrt);
- return;
- }
-
- /*
- * We use wallclock time here, so the Guest might not be running for
- * all the time between now and the timer interrupt it asked for. This
- * is almost always the right thing to do.
- */
- expires = ktime_add_ns(ktime_get_real(), delta);
- hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS);
-}
-
-/* This is the function called when the Guest's timer expires. */
-static enum hrtimer_restart clockdev_fn(struct hrtimer *timer)
-{
- struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt);
-
- /* Remember the first interrupt is the timer interrupt. */
- set_interrupt(cpu, 0);
- return HRTIMER_NORESTART;
-}
-
-/* This sets up the timer for this Guest. */
-void init_clockdev(struct lg_cpu *cpu)
-{
- hrtimer_init(&cpu->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS);
- cpu->hrt.function = clockdev_fn;
-}
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
deleted file mode 100644
index 2356a2318034..000000000000
--- a/drivers/lguest/lg.h
+++ /dev/null
@@ -1,258 +0,0 @@
-#ifndef _LGUEST_H
-#define _LGUEST_H
-
-#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/stringify.h>
-#include <linux/lguest.h>
-#include <linux/lguest_launcher.h>
-#include <linux/wait.h>
-#include <linux/hrtimer.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-
-#include <asm/lguest.h>
-
-struct pgdir {
- unsigned long gpgdir;
- bool switcher_mapped;
- int last_host_cpu;
- pgd_t *pgdir;
-};
-
-/* We have two pages shared with guests, per cpu. */
-struct lguest_pages {
- /* This is the stack page mapped rw in guest */
- char spare[PAGE_SIZE - sizeof(struct lguest_regs)];
- struct lguest_regs regs;
-
- /* This is the host state & guest descriptor page, ro in guest */
- struct lguest_ro_state state;
-} __attribute__((aligned(PAGE_SIZE)));
-
-#define CHANGED_IDT 1
-#define CHANGED_GDT 2
-#define CHANGED_GDT_TLS 4 /* Actually a subset of CHANGED_GDT */
-#define CHANGED_ALL 3
-
-struct lg_cpu {
- unsigned int id;
- struct lguest *lg;
- struct task_struct *tsk;
- struct mm_struct *mm; /* == tsk->mm, but that becomes NULL on exit */
-
- u32 cr2;
- u32 esp1;
- u16 ss1;
-
- /* Bitmap of what has changed: see CHANGED_* above. */
- int changed;
-
- /* Pending operation. */
- struct lguest_pending pending;
-
- unsigned long *reg_read; /* register from LHREQ_GETREG */
-
- /* At end of a page shared mapped over lguest_pages in guest. */
- unsigned long regs_page;
- struct lguest_regs *regs;
-
- struct lguest_pages *last_pages;
-
- /* Initialization mode: linear map everything. */
- bool linear_pages;
- int cpu_pgd; /* Which pgd this cpu is currently using */
-
- /* If a hypercall was asked for, this points to the arguments. */
- struct hcall_args *hcall;
- u32 next_hcall;
-
- /* Virtual clock device */
- struct hrtimer hrt;
-
- /* Did the Guest tell us to halt? */
- int halted;
-
- /* Pending virtual interrupts */
- DECLARE_BITMAP(irqs_pending, LGUEST_IRQS);
-
- struct lg_cpu_arch arch;
-};
-
-/* The private info the thread maintains about the guest. */
-struct lguest {
- struct lguest_data __user *lguest_data;
- struct lg_cpu cpus[NR_CPUS];
- unsigned int nr_cpus;
-
- /* Valid guest memory pages must be < this. */
- u32 pfn_limit;
-
- /* Device memory is >= pfn_limit and < device_limit. */
- u32 device_limit;
-
- /*
- * This provides the offset to the base of guest-physical memory in the
- * Launcher.
- */
- void __user *mem_base;
- unsigned long kernel_address;
-
- struct pgdir pgdirs[4];
-
- unsigned long noirq_iret;
-
- unsigned int stack_pages;
- u32 tsc_khz;
-
- /* Dead? */
- const char *dead;
-};
-
-extern struct mutex lguest_lock;
-
-/* core.c: */
-bool lguest_address_ok(const struct lguest *lg,
- unsigned long addr, unsigned long len);
-void __lgread(struct lg_cpu *, void *, unsigned long, unsigned);
-void __lgwrite(struct lg_cpu *, unsigned long, const void *, unsigned);
-extern struct page **lg_switcher_pages;
-
-/*H:035
- * Using memory-copy operations like that is usually inconvient, so we
- * have the following helper macros which read and write a specific type (often
- * an unsigned long).
- *
- * This reads into a variable of the given type then returns that.
- */
-#define lgread(cpu, addr, type) \
- ({ type _v; __lgread((cpu), &_v, (addr), sizeof(_v)); _v; })
-
-/* This checks that the variable is of the given type, then writes it out. */
-#define lgwrite(cpu, addr, type, val) \
- do { \
- typecheck(type, val); \
- __lgwrite((cpu), (addr), &(val), sizeof(val)); \
- } while(0)
-/* (end of memory access helper routines) :*/
-
-int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
-
-/*
- * Helper macros to obtain the first 12 or the last 20 bits, this is only the
- * first step in the migration to the kernel types. pte_pfn is already defined
- * in the kernel.
- */
-#define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK)
-#define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT)
-#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK)
-#define pmd_pfn(x) (pmd_val(x) >> PAGE_SHIFT)
-
-/* interrupts_and_traps.c: */
-unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more);
-void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more);
-void set_interrupt(struct lg_cpu *cpu, unsigned int irq);
-bool deliver_trap(struct lg_cpu *cpu, unsigned int num);
-void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i,
- u32 low, u32 hi);
-void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages);
-void pin_stack_pages(struct lg_cpu *cpu);
-void setup_default_idt_entries(struct lguest_ro_state *state,
- const unsigned long *def);
-void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
- const unsigned long *def);
-void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
-bool send_notify_to_eventfd(struct lg_cpu *cpu);
-void init_clockdev(struct lg_cpu *cpu);
-bool check_syscall_vector(struct lguest *lg);
-bool could_be_syscall(unsigned int num);
-int init_interrupts(void);
-void free_interrupts(void);
-
-/* segments.c: */
-void setup_default_gdt_entries(struct lguest_ro_state *state);
-void setup_guest_gdt(struct lg_cpu *cpu);
-void load_guest_gdt_entry(struct lg_cpu *cpu, unsigned int i,
- u32 low, u32 hi);
-void guest_load_tls(struct lg_cpu *cpu, unsigned long tls_array);
-void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt);
-void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt);
-
-/* page_tables.c: */
-int init_guest_pagetable(struct lguest *lg);
-void free_guest_pagetable(struct lguest *lg);
-void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable);
-void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 i);
-#ifdef CONFIG_X86_PAE
-void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i);
-#endif
-void guest_pagetable_clear_all(struct lg_cpu *cpu);
-void guest_pagetable_flush_user(struct lg_cpu *cpu);
-void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir,
- unsigned long vaddr, pte_t val);
-void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages);
-bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode,
- unsigned long *iomem);
-void pin_page(struct lg_cpu *cpu, unsigned long vaddr);
-bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr);
-unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr);
-void page_table_guest_data_init(struct lg_cpu *cpu);
-
-/* <arch>/core.c: */
-void lguest_arch_host_init(void);
-void lguest_arch_host_fini(void);
-void lguest_arch_run_guest(struct lg_cpu *cpu);
-void lguest_arch_handle_trap(struct lg_cpu *cpu);
-int lguest_arch_init_hypercalls(struct lg_cpu *cpu);
-int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args);
-void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start);
-unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any);
-
-/* <arch>/switcher.S: */
-extern char start_switcher_text[], end_switcher_text[], switch_to_guest[];
-
-/* lguest_user.c: */
-int lguest_device_init(void);
-void lguest_device_remove(void);
-
-/* hypercalls.c: */
-void do_hypercalls(struct lg_cpu *cpu);
-void write_timestamp(struct lg_cpu *cpu);
-
-/*L:035
- * Let's step aside for the moment, to study one important routine that's used
- * widely in the Host code.
- *
- * There are many cases where the Guest can do something invalid, like pass crap
- * to a hypercall. Since only the Guest kernel can make hypercalls, it's quite
- * acceptable to simply terminate the Guest and give the Launcher a nicely
- * formatted reason. It's also simpler for the Guest itself, which doesn't
- * need to check most hypercalls for "success"; if you're still running, it
- * succeeded.
- *
- * Once this is called, the Guest will never run again, so most Host code can
- * call this then continue as if nothing had happened. This means many
- * functions don't have to explicitly return an error code, which keeps the
- * code simple.
- *
- * It also means that this can be called more than once: only the first one is
- * remembered. The only trick is that we still need to kill the Guest even if
- * we can't allocate memory to store the reason. Linux has a neat way of
- * packing error codes into invalid pointers, so we use that here.
- *
- * Like any macro which uses an "if", it is safely wrapped in a run-once "do {
- * } while(0)".
- */
-#define kill_guest(cpu, fmt...) \
-do { \
- if (!(cpu)->lg->dead) { \
- (cpu)->lg->dead = kasprintf(GFP_ATOMIC, fmt); \
- if (!(cpu)->lg->dead) \
- (cpu)->lg->dead = ERR_PTR(-ENOMEM); \
- } \
-} while(0)
-/* (End of aside) :*/
-
-#endif /* __ASSEMBLY__ */
-#endif /* _LGUEST_H */
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
deleted file mode 100644
index 1a6787bc9386..000000000000
--- a/drivers/lguest/lguest_user.c
+++ /dev/null
@@ -1,446 +0,0 @@
-/*P:200 This contains all the /dev/lguest code, whereby the userspace
- * launcher controls and communicates with the Guest. For example,
- * the first write will tell us the Guest's memory layout and entry
- * point. A read will run the Guest until something happens, such as
- * a signal or the Guest accessing a device.
-:*/
-#include <linux/uaccess.h>
-#include <linux/miscdevice.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/sched/mm.h>
-#include <linux/file.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include "lg.h"
-
-/*L:052
- The Launcher can get the registers, and also set some of them.
-*/
-static int getreg_setup(struct lg_cpu *cpu, const unsigned long __user *input)
-{
- unsigned long which;
-
- /* We re-use the ptrace structure to specify which register to read. */
- if (get_user(which, input) != 0)
- return -EFAULT;
-
- /*
- * We set up the cpu register pointer, and their next read will
- * actually get the value (instead of running the guest).
- *
- * The last argument 'true' says we can access any register.
- */
- cpu->reg_read = lguest_arch_regptr(cpu, which, true);
- if (!cpu->reg_read)
- return -ENOENT;
-
- /* And because this is a write() call, we return the length used. */
- return sizeof(unsigned long) * 2;
-}
-
-static int setreg(struct lg_cpu *cpu, const unsigned long __user *input)
-{
- unsigned long which, value, *reg;
-
- /* We re-use the ptrace structure to specify which register to read. */
- if (get_user(which, input) != 0)
- return -EFAULT;
- input++;
- if (get_user(value, input) != 0)
- return -EFAULT;
-
- /* The last argument 'false' means we can't access all registers. */
- reg = lguest_arch_regptr(cpu, which, false);
- if (!reg)
- return -ENOENT;
-
- *reg = value;
-
- /* And because this is a write() call, we return the length used. */
- return sizeof(unsigned long) * 3;
-}
-
-/*L:050
- * Sending an interrupt is done by writing LHREQ_IRQ and an interrupt
- * number to /dev/lguest.
- */
-static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
-{
- unsigned long irq;
-
- if (get_user(irq, input) != 0)
- return -EFAULT;
- if (irq >= LGUEST_IRQS)
- return -EINVAL;
-
- /*
- * Next time the Guest runs, the core code will see if it can deliver
- * this interrupt.
- */
- set_interrupt(cpu, irq);
- return 0;
-}
-
-/*L:053
- * Deliver a trap: this is used by the Launcher if it can't emulate
- * an instruction.
- */
-static int trap(struct lg_cpu *cpu, const unsigned long __user *input)
-{
- unsigned long trapnum;
-
- if (get_user(trapnum, input) != 0)
- return -EFAULT;
-
- if (!deliver_trap(cpu, trapnum))
- return -EINVAL;
-
- return 0;
-}
-
-/*L:040
- * Once our Guest is initialized, the Launcher makes it run by reading
- * from /dev/lguest.
- */
-static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
-{
- struct lguest *lg = file->private_data;
- struct lg_cpu *cpu;
- unsigned int cpu_id = *o;
-
- /* You must write LHREQ_INITIALIZE first! */
- if (!lg)
- return -EINVAL;
-
- /* Watch out for arbitrary vcpu indexes! */
- if (cpu_id >= lg->nr_cpus)
- return -EINVAL;
-
- cpu = &lg->cpus[cpu_id];
-
- /* If you're not the task which owns the Guest, go away. */
- if (current != cpu->tsk)
- return -EPERM;
-
- /* If the Guest is already dead, we indicate why */
- if (lg->dead) {
- size_t len;
-
- /* lg->dead either contains an error code, or a string. */
- if (IS_ERR(lg->dead))
- return PTR_ERR(lg->dead);
-
- /* We can only return as much as the buffer they read with. */
- len = min(size, strlen(lg->dead)+1);
- if (copy_to_user(user, lg->dead, len) != 0)
- return -EFAULT;
- return len;
- }
-
- /*
- * If we returned from read() last time because the Guest sent I/O,
- * clear the flag.
- */
- if (cpu->pending.trap)
- cpu->pending.trap = 0;
-
- /* Run the Guest until something interesting happens. */
- return run_guest(cpu, (unsigned long __user *)user);
-}
-
-/*L:025
- * This actually initializes a CPU. For the moment, a Guest is only
- * uniprocessor, so "id" is always 0.
- */
-static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
-{
- /* We have a limited number of CPUs in the lguest struct. */
- if (id >= ARRAY_SIZE(cpu->lg->cpus))
- return -EINVAL;
-
- /* Set up this CPU's id, and pointer back to the lguest struct. */
- cpu->id = id;
- cpu->lg = container_of(cpu, struct lguest, cpus[id]);
- cpu->lg->nr_cpus++;
-
- /* Each CPU has a timer it can set. */
- init_clockdev(cpu);
-
- /*
- * We need a complete page for the Guest registers: they are accessible
- * to the Guest and we can only grant it access to whole pages.
- */
- cpu->regs_page = get_zeroed_page(GFP_KERNEL);
- if (!cpu->regs_page)
- return -ENOMEM;
-
- /* We actually put the registers at the end of the page. */
- cpu->regs = (void *)cpu->regs_page + PAGE_SIZE - sizeof(*cpu->regs);
-
- /*
- * Now we initialize the Guest's registers, handing it the start
- * address.
- */
- lguest_arch_setup_regs(cpu, start_ip);
-
- /*
- * We keep a pointer to the Launcher task (ie. current task) for when
- * other Guests want to wake this one (eg. console input).
- */
- cpu->tsk = current;
-
- /*
- * We need to keep a pointer to the Launcher's memory map, because if
- * the Launcher dies we need to clean it up. If we don't keep a
- * reference, it is destroyed before close() is called.
- */
- cpu->mm = get_task_mm(cpu->tsk);
-
- /*
- * We remember which CPU's pages this Guest used last, for optimization
- * when the same Guest runs on the same CPU twice.
- */
- cpu->last_pages = NULL;
-
- /* No error == success. */
- return 0;
-}
-
-/*L:020
- * The initialization write supplies 3 pointer sized (32 or 64 bit) values (in
- * addition to the LHREQ_INITIALIZE value). These are:
- *
- * base: The start of the Guest-physical memory inside the Launcher memory.
- *
- * pfnlimit: The highest (Guest-physical) page number the Guest should be
- * allowed to access. The Guest memory lives inside the Launcher, so it sets
- * this to ensure the Guest can only reach its own memory.
- *
- * start: The first instruction to execute ("eip" in x86-speak).
- */
-static int initialize(struct file *file, const unsigned long __user *input)
-{
- /* "struct lguest" contains all we (the Host) know about a Guest. */
- struct lguest *lg;
- int err;
- unsigned long args[4];
-
- /*
- * We grab the Big Lguest lock, which protects against multiple
- * simultaneous initializations.
- */
- mutex_lock(&lguest_lock);
- /* You can't initialize twice! Close the device and start again... */
- if (file->private_data) {
- err = -EBUSY;
- goto unlock;
- }
-
- if (copy_from_user(args, input, sizeof(args)) != 0) {
- err = -EFAULT;
- goto unlock;
- }
-
- lg = kzalloc(sizeof(*lg), GFP_KERNEL);
- if (!lg) {
- err = -ENOMEM;
- goto unlock;
- }
-
- /* Populate the easy fields of our "struct lguest" */
- lg->mem_base = (void __user *)args[0];
- lg->pfn_limit = args[1];
- lg->device_limit = args[3];
-
- /* This is the first cpu (cpu 0) and it will start booting at args[2] */
- err = lg_cpu_start(&lg->cpus[0], 0, args[2]);
- if (err)
- goto free_lg;
-
- /*
- * Initialize the Guest's shadow page tables. This allocates
- * memory, so can fail.
- */
- err = init_guest_pagetable(lg);
- if (err)
- goto free_regs;
-
- /* We keep our "struct lguest" in the file's private_data. */
- file->private_data = lg;
-
- mutex_unlock(&lguest_lock);
-
- /* And because this is a write() call, we return the length used. */
- return sizeof(args);
-
-free_regs:
- /* FIXME: This should be in free_vcpu */
- free_page(lg->cpus[0].regs_page);
-free_lg:
- kfree(lg);
-unlock:
- mutex_unlock(&lguest_lock);
- return err;
-}
-
-/*L:010
- * The first operation the Launcher does must be a write. All writes
- * start with an unsigned long number: for the first write this must be
- * LHREQ_INITIALIZE to set up the Guest. After that the Launcher can use
- * writes of other values to send interrupts or set up receipt of notifications.
- *
- * Note that we overload the "offset" in the /dev/lguest file to indicate what
- * CPU number we're dealing with. Currently this is always 0 since we only
- * support uniprocessor Guests, but you can see the beginnings of SMP support
- * here.
- */
-static ssize_t write(struct file *file, const char __user *in,
- size_t size, loff_t *off)
-{
- /*
- * Once the Guest is initialized, we hold the "struct lguest" in the
- * file private data.
- */
- struct lguest *lg = file->private_data;
- const unsigned long __user *input = (const unsigned long __user *)in;
- unsigned long req;
- struct lg_cpu *uninitialized_var(cpu);
- unsigned int cpu_id = *off;
-
- /* The first value tells us what this request is. */
- if (get_user(req, input) != 0)
- return -EFAULT;
- input++;
-
- /* If you haven't initialized, you must do that first. */
- if (req != LHREQ_INITIALIZE) {
- if (!lg || (cpu_id >= lg->nr_cpus))
- return -EINVAL;
- cpu = &lg->cpus[cpu_id];
-
- /* Once the Guest is dead, you can only read() why it died. */
- if (lg->dead)
- return -ENOENT;
- }
-
- switch (req) {
- case LHREQ_INITIALIZE:
- return initialize(file, input);
- case LHREQ_IRQ:
- return user_send_irq(cpu, input);
- case LHREQ_GETREG:
- return getreg_setup(cpu, input);
- case LHREQ_SETREG:
- return setreg(cpu, input);
- case LHREQ_TRAP:
- return trap(cpu, input);
- default:
- return -EINVAL;
- }
-}
-
-static int open(struct inode *inode, struct file *file)
-{
- file->private_data = NULL;
-
- return 0;
-}
-
-/*L:060
- * The final piece of interface code is the close() routine. It reverses
- * everything done in initialize(). This is usually called because the
- * Launcher exited.
- *
- * Note that the close routine returns 0 or a negative error number: it can't
- * really fail, but it can whine. I blame Sun for this wart, and K&R C for
- * letting them do it.
-:*/
-static int close(struct inode *inode, struct file *file)
-{
- struct lguest *lg = file->private_data;
- unsigned int i;
-
- /* If we never successfully initialized, there's nothing to clean up */
- if (!lg)
- return 0;
-
- /*
- * We need the big lock, to protect from inter-guest I/O and other
- * Launchers initializing guests.
- */
- mutex_lock(&lguest_lock);
-
- /* Free up the shadow page tables for the Guest. */
- free_guest_pagetable(lg);
-
- for (i = 0; i < lg->nr_cpus; i++) {
- /* Cancels the hrtimer set via LHCALL_SET_CLOCKEVENT. */
- hrtimer_cancel(&lg->cpus[i].hrt);
- /* We can free up the register page we allocated. */
- free_page(lg->cpus[i].regs_page);
- /*
- * Now all the memory cleanups are done, it's safe to release
- * the Launcher's memory management structure.
- */
- mmput(lg->cpus[i].mm);
- }
-
- /*
- * If lg->dead doesn't contain an error code it will be NULL or a
- * kmalloc()ed string, either of which is ok to hand to kfree().
- */
- if (!IS_ERR(lg->dead))
- kfree(lg->dead);
- /* Free the memory allocated to the lguest_struct */
- kfree(lg);
- /* Release lock and exit. */
- mutex_unlock(&lguest_lock);
-
- return 0;
-}
-
-/*L:000
- * Welcome to our journey through the Launcher!
- *
- * The Launcher is the Host userspace program which sets up, runs and services
- * the Guest. In fact, many comments in the Drivers which refer to "the Host"
- * doing things are inaccurate: the Launcher does all the device handling for
- * the Guest, but the Guest can't know that.
- *
- * Just to confuse you: to the Host kernel, the Launcher *is* the Guest and we
- * shall see more of that later.
- *
- * We begin our understanding with the Host kernel interface which the Launcher
- * uses: reading and writing a character device called /dev/lguest. All the
- * work happens in the read(), write() and close() routines:
- */
-static const struct file_operations lguest_fops = {
- .owner = THIS_MODULE,
- .open = open,
- .release = close,
- .write = write,
- .read = read,
- .llseek = default_llseek,
-};
-/*:*/
-
-/*
- * This is a textbook example of a "misc" character device. Populate a "struct
- * miscdevice" and register it with misc_register().
- */
-static struct miscdevice lguest_dev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "lguest",
- .fops = &lguest_fops,
-};
-
-int __init lguest_device_init(void)
-{
- return misc_register(&lguest_dev);
-}
-
-void __exit lguest_device_remove(void)
-{
- misc_deregister(&lguest_dev);
-}
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
deleted file mode 100644
index 0bc127e9f16a..000000000000
--- a/drivers/lguest/page_tables.c
+++ /dev/null
@@ -1,1239 +0,0 @@
-/*P:700
- * The pagetable code, on the other hand, still shows the scars of
- * previous encounters. It's functional, and as neat as it can be in the
- * circumstances, but be wary, for these things are subtle and break easily.
- * The Guest provides a virtual to physical mapping, but we can neither trust
- * it nor use it: we verify and convert it here then point the CPU to the
- * converted Guest pages when running the Guest.
-:*/
-
-/* Copyright (C) Rusty Russell IBM Corporation 2013.
- * GPL v2 and any later version */
-#include <linux/mm.h>
-#include <linux/gfp.h>
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/random.h>
-#include <linux/percpu.h>
-#include <asm/tlbflush.h>
-#include <linux/uaccess.h>
-#include "lg.h"
-
-/*M:008
- * We hold reference to pages, which prevents them from being swapped.
- * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
- * to swap out. If we had this, and a shrinker callback to trim PTE pages, we
- * could probably consider launching Guests as non-root.
-:*/
-
-/*H:300
- * The Page Table Code
- *
- * We use two-level page tables for the Guest, or three-level with PAE. If
- * you're not entirely comfortable with virtual addresses, physical addresses
- * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page
- * Table Handling" (with diagrams!).
- *
- * The Guest keeps page tables, but we maintain the actual ones here: these are
- * called "shadow" page tables. Which is a very Guest-centric name: these are
- * the real page tables the CPU uses, although we keep them up to date to
- * reflect the Guest's. (See what I mean about weird naming? Since when do
- * shadows reflect anything?)
- *
- * Anyway, this is the most complicated part of the Host code. There are seven
- * parts to this:
- * (i) Looking up a page table entry when the Guest faults,
- * (ii) Making sure the Guest stack is mapped,
- * (iii) Setting up a page table entry when the Guest tells us one has changed,
- * (iv) Switching page tables,
- * (v) Flushing (throwing away) page tables,
- * (vi) Mapping the Switcher when the Guest is about to run,
- * (vii) Setting up the page tables initially.
-:*/
-
-/*
- * The Switcher uses the complete top PTE page. That's 1024 PTE entries (4MB)
- * or 512 PTE entries with PAE (2MB).
- */
-#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
-
-/*
- * For PAE we need the PMD index as well. We use the last 2MB, so we
- * will need the last pmd entry of the last pmd page.
- */
-#ifdef CONFIG_X86_PAE
-#define CHECK_GPGD_MASK _PAGE_PRESENT
-#else
-#define CHECK_GPGD_MASK _PAGE_TABLE
-#endif
-
-/*H:320
- * The page table code is curly enough to need helper functions to keep it
- * clear and clean. The kernel itself provides many of them; one advantage
- * of insisting that the Guest and Host use the same CONFIG_X86_PAE setting.
- *
- * There are two functions which return pointers to the shadow (aka "real")
- * page tables.
- *
- * spgd_addr() takes the virtual address and returns a pointer to the top-level
- * page directory entry (PGD) for that address. Since we keep track of several
- * page tables, the "i" argument tells us which one we're interested in (it's
- * usually the current one).
- */
-static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
-{
- unsigned int index = pgd_index(vaddr);
-
- /* Return a pointer index'th pgd entry for the i'th page table. */
- return &cpu->lg->pgdirs[i].pgdir[index];
-}
-
-#ifdef CONFIG_X86_PAE
-/*
- * This routine then takes the PGD entry given above, which contains the
- * address of the PMD page. It then returns a pointer to the PMD entry for the
- * given address.
- */
-static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
-{
- unsigned int index = pmd_index(vaddr);
- pmd_t *page;
-
- /* You should never call this if the PGD entry wasn't valid */
- BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
- page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
-
- return &page[index];
-}
-#endif
-
-/*
- * This routine then takes the page directory entry returned above, which
- * contains the address of the page table entry (PTE) page. It then returns a
- * pointer to the PTE entry for the given address.
- */
-static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
-{
-#ifdef CONFIG_X86_PAE
- pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
- pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
-
- /* You should never call this if the PMD entry wasn't valid */
- BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
-#else
- pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
- /* You should never call this if the PGD entry wasn't valid */
- BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
-#endif
-
- return &page[pte_index(vaddr)];
-}
-
-/*
- * These functions are just like the above, except they access the Guest
- * page tables. Hence they return a Guest address.
- */
-static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
-{
- unsigned int index = vaddr >> (PGDIR_SHIFT);
- return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
-}
-
-#ifdef CONFIG_X86_PAE
-/* Follow the PGD to the PMD. */
-static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
-{
- unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
- BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
- return gpage + pmd_index(vaddr) * sizeof(pmd_t);
-}
-
-/* Follow the PMD to the PTE. */
-static unsigned long gpte_addr(struct lg_cpu *cpu,
- pmd_t gpmd, unsigned long vaddr)
-{
- unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
-
- BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT));
- return gpage + pte_index(vaddr) * sizeof(pte_t);
-}
-#else
-/* Follow the PGD to the PTE (no mid-level for !PAE). */
-static unsigned long gpte_addr(struct lg_cpu *cpu,
- pgd_t gpgd, unsigned long vaddr)
-{
- unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
-
- BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
- return gpage + pte_index(vaddr) * sizeof(pte_t);
-}
-#endif
-/*:*/
-
-/*M:007
- * get_pfn is slow: we could probably try to grab batches of pages here as
- * an optimization (ie. pre-faulting).
-:*/
-
-/*H:350
- * This routine takes a page number given by the Guest and converts it to
- * an actual, physical page number. It can fail for several reasons: the
- * virtual address might not be mapped by the Launcher, the write flag is set
- * and the page is read-only, or the write flag was set and the page was
- * shared so had to be copied, but we ran out of memory.
- *
- * This holds a reference to the page, so release_pte() is careful to put that
- * back.
- */
-static unsigned long get_pfn(unsigned long virtpfn, int write)
-{
- struct page *page;
-
- /* gup me one page at this address please! */
- if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
- return page_to_pfn(page);
-
- /* This value indicates failure. */
- return -1UL;
-}
-
-/*H:340
- * Converting a Guest page table entry to a shadow (ie. real) page table
- * entry can be a little tricky. The flags are (almost) the same, but the
- * Guest PTE contains a virtual page number: the CPU needs the real page
- * number.
- */
-static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
-{
- unsigned long pfn, base, flags;
-
- /*
- * The Guest sets the global flag, because it thinks that it is using
- * PGE. We only told it to use PGE so it would tell us whether it was
- * flushing a kernel mapping or a userspace mapping. We don't actually
- * use the global bit, so throw it away.
- */
- flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
-
- /* The Guest's pages are offset inside the Launcher. */
- base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
-
- /*
- * We need a temporary "unsigned long" variable to hold the answer from
- * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
- * fit in spte.pfn. get_pfn() finds the real physical number of the
- * page, given the virtual number.
- */
- pfn = get_pfn(base + pte_pfn(gpte), write);
- if (pfn == -1UL) {
- kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
- /*
- * When we destroy the Guest, we'll go through the shadow page
- * tables and release_pte() them. Make sure we don't think
- * this one is valid!
- */
- flags = 0;
- }
- /* Now we assemble our shadow PTE from the page number and flags. */
- return pfn_pte(pfn, __pgprot(flags));
-}
-
-/*H:460 And to complete the chain, release_pte() looks like this: */
-static void release_pte(pte_t pte)
-{
- /*
- * Remember that get_user_pages_fast() took a reference to the page, in
- * get_pfn()? We have to put it back now.
- */
- if (pte_flags(pte) & _PAGE_PRESENT)
- put_page(pte_page(pte));
-}
-/*:*/
-
-static bool gpte_in_iomem(struct lg_cpu *cpu, pte_t gpte)
-{
- /* We don't handle large pages. */
- if (pte_flags(gpte) & _PAGE_PSE)
- return false;
-
- return (pte_pfn(gpte) >= cpu->lg->pfn_limit
- && pte_pfn(gpte) < cpu->lg->device_limit);
-}
-
-static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
-{
- if ((pte_flags(gpte) & _PAGE_PSE) ||
- pte_pfn(gpte) >= cpu->lg->pfn_limit) {
- kill_guest(cpu, "bad page table entry");
- return false;
- }
- return true;
-}
-
-static bool check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
-{
- if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
- (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) {
- kill_guest(cpu, "bad page directory entry");
- return false;
- }
- return true;
-}
-
-#ifdef CONFIG_X86_PAE
-static bool check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
-{
- if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
- (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) {
- kill_guest(cpu, "bad page middle directory entry");
- return false;
- }
- return true;
-}
-#endif
-
-/*H:331
- * This is the core routine to walk the shadow page tables and find the page
- * table entry for a specific address.
- *
- * If allocate is set, then we allocate any missing levels, setting the flags
- * on the new page directory and mid-level directories using the arguments
- * (which are copied from the Guest's page table entries).
- */
-static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate,
- int pgd_flags, int pmd_flags)
-{
- pgd_t *spgd;
- /* Mid level for PAE. */
-#ifdef CONFIG_X86_PAE
- pmd_t *spmd;
-#endif
-
- /* Get top level entry. */
- spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
- if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
- /* No shadow entry: allocate a new shadow PTE page. */
- unsigned long ptepage;
-
- /* If they didn't want us to allocate anything, stop. */
- if (!allocate)
- return NULL;
-
- ptepage = get_zeroed_page(GFP_KERNEL);
- /*
- * This is not really the Guest's fault, but killing it is
- * simple for this corner case.
- */
- if (!ptepage) {
- kill_guest(cpu, "out of memory allocating pte page");
- return NULL;
- }
- /*
- * And we copy the flags to the shadow PGD entry. The page
- * number in the shadow PGD is the page we just allocated.
- */
- set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags));
- }
-
- /*
- * Intel's Physical Address Extension actually uses three levels of
- * page tables, so we need to look in the mid-level.
- */
-#ifdef CONFIG_X86_PAE
- /* Now look at the mid-level shadow entry. */
- spmd = spmd_addr(cpu, *spgd, vaddr);
-
- if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
- /* No shadow entry: allocate a new shadow PTE page. */
- unsigned long ptepage;
-
- /* If they didn't want us to allocate anything, stop. */
- if (!allocate)
- return NULL;
-
- ptepage = get_zeroed_page(GFP_KERNEL);
-
- /*
- * This is not really the Guest's fault, but killing it is
- * simple for this corner case.
- */
- if (!ptepage) {
- kill_guest(cpu, "out of memory allocating pmd page");
- return NULL;
- }
-
- /*
- * And we copy the flags to the shadow PMD entry. The page
- * number in the shadow PMD is the page we just allocated.
- */
- set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags));
- }
-#endif
-
- /* Get the pointer to the shadow PTE entry we're going to set. */
- return spte_addr(cpu, *spgd, vaddr);
-}
-
-/*H:330
- * (i) Looking up a page table entry when the Guest faults.
- *
- * We saw this call in run_guest(): when we see a page fault in the Guest, we
- * come here. That's because we only set up the shadow page tables lazily as
- * they're needed, so we get page faults all the time and quietly fix them up
- * and return to the Guest without it knowing.
- *
- * If we fixed up the fault (ie. we mapped the address), this routine returns
- * true. Otherwise, it was a real fault and we need to tell the Guest.
- *
- * There's a corner case: they're trying to access memory between
- * pfn_limit and device_limit, which is I/O memory. In this case, we
- * return false and set @iomem to the physical address, so the the
- * Launcher can handle the instruction manually.
- */
-bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode,
- unsigned long *iomem)
-{
- unsigned long gpte_ptr;
- pte_t gpte;
- pte_t *spte;
- pmd_t gpmd;
- pgd_t gpgd;
-
- *iomem = 0;
-
- /* We never demand page the Switcher, so trying is a mistake. */
- if (vaddr >= switcher_addr)
- return false;
-
- /* First step: get the top-level Guest page table entry. */
- if (unlikely(cpu->linear_pages)) {
- /* Faking up a linear mapping. */
- gpgd = __pgd(CHECK_GPGD_MASK);
- } else {
- gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
- /* Toplevel not present? We can't map it in. */
- if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
- return false;
-
- /*
- * This kills the Guest if it has weird flags or tries to
- * refer to a "physical" address outside the bounds.
- */
- if (!check_gpgd(cpu, gpgd))
- return false;
- }
-
- /* This "mid-level" entry is only used for non-linear, PAE mode. */
- gpmd = __pmd(_PAGE_TABLE);
-
-#ifdef CONFIG_X86_PAE
- if (likely(!cpu->linear_pages)) {
- gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
- /* Middle level not present? We can't map it in. */
- if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
- return false;
-
- /*
- * This kills the Guest if it has weird flags or tries to
- * refer to a "physical" address outside the bounds.
- */
- if (!check_gpmd(cpu, gpmd))
- return false;
- }
-
- /*
- * OK, now we look at the lower level in the Guest page table: keep its
- * address, because we might update it later.
- */
- gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
-#else
- /*
- * OK, now we look at the lower level in the Guest page table: keep its
- * address, because we might update it later.
- */
- gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
-#endif
-
- if (unlikely(cpu->linear_pages)) {
- /* Linear? Make up a PTE which points to same page. */
- gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
- } else {
- /* Read the actual PTE value. */
- gpte = lgread(cpu, gpte_ptr, pte_t);
- }
-
- /* If this page isn't in the Guest page tables, we can't page it in. */
- if (!(pte_flags(gpte) & _PAGE_PRESENT))
- return false;
-
- /*
- * Check they're not trying to write to a page the Guest wants
- * read-only (bit 2 of errcode == write).
- */
- if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
- return false;
-
- /* User access to a kernel-only page? (bit 3 == user access) */
- if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
- return false;
-
- /* If they're accessing io memory, we expect a fault. */
- if (gpte_in_iomem(cpu, gpte)) {
- *iomem = (pte_pfn(gpte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
- return false;
- }
-
- /*
- * Check that the Guest PTE flags are OK, and the page number is below
- * the pfn_limit (ie. not mapping the Launcher binary).
- */
- if (!check_gpte(cpu, gpte))
- return false;
-
- /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
- gpte = pte_mkyoung(gpte);
- if (errcode & 2)
- gpte = pte_mkdirty(gpte);
-
- /* Get the pointer to the shadow PTE entry we're going to set. */
- spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd));
- if (!spte)
- return false;
-
- /*
- * If there was a valid shadow PTE entry here before, we release it.
- * This can happen with a write to a previously read-only entry.
- */
- release_pte(*spte);
-
- /*
- * If this is a write, we insist that the Guest page is writable (the
- * final arg to gpte_to_spte()).
- */
- if (pte_dirty(gpte))
- *spte = gpte_to_spte(cpu, gpte, 1);
- else
- /*
- * If this is a read, don't set the "writable" bit in the page
- * table entry, even if the Guest says it's writable. That way
- * we will come back here when a write does actually occur, so
- * we can update the Guest's _PAGE_DIRTY flag.
- */
- set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
-
- /*
- * Finally, we write the Guest PTE entry back: we've set the
- * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
- */
- if (likely(!cpu->linear_pages))
- lgwrite(cpu, gpte_ptr, pte_t, gpte);
-
- /*
- * The fault is fixed, the page table is populated, the mapping
- * manipulated, the result returned and the code complete. A small
- * delay and a trace of alliteration are the only indications the Guest
- * has that a page fault occurred at all.
- */
- return true;
-}
-
-/*H:360
- * (ii) Making sure the Guest stack is mapped.
- *
- * Remember that direct traps into the Guest need a mapped Guest kernel stack.
- * pin_stack_pages() calls us here: we could simply call demand_page(), but as
- * we've seen that logic is quite long, and usually the stack pages are already
- * mapped, so it's overkill.
- *
- * This is a quick version which answers the question: is this virtual address
- * mapped by the shadow page tables, and is it writable?
- */
-static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
-{
- pte_t *spte;
- unsigned long flags;
-
- /* You can't put your stack in the Switcher! */
- if (vaddr >= switcher_addr)
- return false;
-
- /* If there's no shadow PTE, it's not writable. */
- spte = find_spte(cpu, vaddr, false, 0, 0);
- if (!spte)
- return false;
-
- /*
- * Check the flags on the pte entry itself: it must be present and
- * writable.
- */
- flags = pte_flags(*spte);
- return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
-}
-
-/*
- * So, when pin_stack_pages() asks us to pin a page, we check if it's already
- * in the page tables, and if not, we call demand_page() with error code 2
- * (meaning "write").
- */
-void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
-{
- unsigned long iomem;
-
- if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2, &iomem))
- kill_guest(cpu, "bad stack page %#lx", vaddr);
-}
-/*:*/
-
-#ifdef CONFIG_X86_PAE
-static void release_pmd(pmd_t *spmd)
-{
- /* If the entry's not present, there's nothing to release. */
- if (pmd_flags(*spmd) & _PAGE_PRESENT) {
- unsigned int i;
- pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
- /* For each entry in the page, we might need to release it. */
- for (i = 0; i < PTRS_PER_PTE; i++)
- release_pte(ptepage[i]);
- /* Now we can free the page of PTEs */
- free_page((long)ptepage);
- /* And zero out the PMD entry so we never release it twice. */
- set_pmd(spmd, __pmd(0));
- }
-}
-
-static void release_pgd(pgd_t *spgd)
-{
- /* If the entry's not present, there's nothing to release. */
- if (pgd_flags(*spgd) & _PAGE_PRESENT) {
- unsigned int i;
- pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
-
- for (i = 0; i < PTRS_PER_PMD; i++)
- release_pmd(&pmdpage[i]);
-
- /* Now we can free the page of PMDs */
- free_page((long)pmdpage);
- /* And zero out the PGD entry so we never release it twice. */
- set_pgd(spgd, __pgd(0));
- }
-}
-
-#else /* !CONFIG_X86_PAE */
-/*H:450
- * If we chase down the release_pgd() code, the non-PAE version looks like
- * this. The PAE version is almost identical, but instead of calling
- * release_pte it calls release_pmd(), which looks much like this.
- */
-static void release_pgd(pgd_t *spgd)
-{
- /* If the entry's not present, there's nothing to release. */
- if (pgd_flags(*spgd) & _PAGE_PRESENT) {
- unsigned int i;
- /*
- * Converting the pfn to find the actual PTE page is easy: turn
- * the page number into a physical address, then convert to a
- * virtual address (easy for kernel pages like this one).
- */
- pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
- /* For each entry in the page, we might need to release it. */
- for (i = 0; i < PTRS_PER_PTE; i++)
- release_pte(ptepage[i]);
- /* Now we can free the page of PTEs */
- free_page((long)ptepage);
- /* And zero out the PGD entry so we never release it twice. */
- *spgd = __pgd(0);
- }
-}
-#endif
-
-/*H:445
- * We saw flush_user_mappings() twice: once from the flush_user_mappings()
- * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
- * It simply releases every PTE page from 0 up to the Guest's kernel address.
- */
-static void flush_user_mappings(struct lguest *lg, int idx)
-{
- unsigned int i;
- /* Release every pgd entry up to the kernel's address. */
- for (i = 0; i < pgd_index(lg->kernel_address); i++)
- release_pgd(lg->pgdirs[idx].pgdir + i);
-}
-
-/*H:440
- * (v) Flushing (throwing away) page tables,
- *
- * The Guest has a hypercall to throw away the page tables: it's used when a
- * large number of mappings have been changed.
- */
-void guest_pagetable_flush_user(struct lg_cpu *cpu)
-{
- /* Drop the userspace part of the current page table. */
- flush_user_mappings(cpu->lg, cpu->cpu_pgd);
-}
-/*:*/
-
-/* We walk down the guest page tables to get a guest-physical address */
-bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr)
-{
- pgd_t gpgd;
- pte_t gpte;
-#ifdef CONFIG_X86_PAE
- pmd_t gpmd;
-#endif
-
- /* Still not set up? Just map 1:1. */
- if (unlikely(cpu->linear_pages)) {
- *paddr = vaddr;
- return true;
- }
-
- /* First step: get the top-level Guest page table entry. */
- gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
- /* Toplevel not present? We can't map it in. */
- if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
- goto fail;
-
-#ifdef CONFIG_X86_PAE
- gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
- if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
- goto fail;
- gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
-#else
- gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
-#endif
- if (!(pte_flags(gpte) & _PAGE_PRESENT))
- goto fail;
-
- *paddr = pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
- return true;
-
-fail:
- *paddr = -1UL;
- return false;
-}
-
-/*
- * This is the version we normally use: kills the Guest if it uses a
- * bad address
- */
-unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
-{
- unsigned long paddr;
-
- if (!__guest_pa(cpu, vaddr, &paddr))
- kill_guest(cpu, "Bad address %#lx", vaddr);
- return paddr;
-}
-
-/*
- * We keep several page tables. This is a simple routine to find the page
- * table (if any) corresponding to this top-level address the Guest has given
- * us.
- */
-static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
-{
- unsigned int i;
- for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
- if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable)
- break;
- return i;
-}
-
-/*H:435
- * And this is us, creating the new page directory. If we really do
- * allocate a new one (and so the kernel parts are not there), we set
- * blank_pgdir.
- */
-static unsigned int new_pgdir(struct lg_cpu *cpu,
- unsigned long gpgdir,
- int *blank_pgdir)
-{
- unsigned int next;
-
- /*
- * We pick one entry at random to throw out. Choosing the Least
- * Recently Used might be better, but this is easy.
- */
- next = prandom_u32() % ARRAY_SIZE(cpu->lg->pgdirs);
- /* If it's never been allocated at all before, try now. */
- if (!cpu->lg->pgdirs[next].pgdir) {
- cpu->lg->pgdirs[next].pgdir =
- (pgd_t *)get_zeroed_page(GFP_KERNEL);
- /* If the allocation fails, just keep using the one we have */
- if (!cpu->lg->pgdirs[next].pgdir)
- next = cpu->cpu_pgd;
- else {
- /*
- * This is a blank page, so there are no kernel
- * mappings: caller must map the stack!
- */
- *blank_pgdir = 1;
- }
- }
- /* Record which Guest toplevel this shadows. */
- cpu->lg->pgdirs[next].gpgdir = gpgdir;
- /* Release all the non-kernel mappings. */
- flush_user_mappings(cpu->lg, next);
-
- /* This hasn't run on any CPU at all. */
- cpu->lg->pgdirs[next].last_host_cpu = -1;
-
- return next;
-}
-
-/*H:501
- * We do need the Switcher code mapped at all times, so we allocate that
- * part of the Guest page table here. We map the Switcher code immediately,
- * but defer mapping of the guest register page and IDT/LDT etc page until
- * just before we run the guest in map_switcher_in_guest().
- *
- * We *could* do this setup in map_switcher_in_guest(), but at that point
- * we've interrupts disabled, and allocating pages like that is fraught: we
- * can't sleep if we need to free up some memory.
- */
-static bool allocate_switcher_mapping(struct lg_cpu *cpu)
-{
- int i;
-
- for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
- pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true,
- CHECK_GPGD_MASK, _PAGE_TABLE);
- if (!pte)
- return false;
-
- /*
- * Map the switcher page if not already there. It might
- * already be there because we call allocate_switcher_mapping()
- * in guest_set_pgd() just in case it did discard our Switcher
- * mapping, but it probably didn't.
- */
- if (i == 0 && !(pte_flags(*pte) & _PAGE_PRESENT)) {
- /* Get a reference to the Switcher page. */
- get_page(lg_switcher_pages[0]);
- /* Create a read-only, exectuable, kernel-style PTE */
- set_pte(pte,
- mk_pte(lg_switcher_pages[0], PAGE_KERNEL_RX));
- }
- }
- cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true;
- return true;
-}
-
-/*H:470
- * Finally, a routine which throws away everything: all PGD entries in all
- * the shadow page tables, including the Guest's kernel mappings. This is used
- * when we destroy the Guest.
- */
-static void release_all_pagetables(struct lguest *lg)
-{
- unsigned int i, j;
-
- /* Every shadow pagetable this Guest has */
- for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) {
- if (!lg->pgdirs[i].pgdir)
- continue;
-
- /* Every PGD entry. */
- for (j = 0; j < PTRS_PER_PGD; j++)
- release_pgd(lg->pgdirs[i].pgdir + j);
- lg->pgdirs[i].switcher_mapped = false;
- lg->pgdirs[i].last_host_cpu = -1;
- }
-}
-
-/*
- * We also throw away everything when a Guest tells us it's changed a kernel
- * mapping. Since kernel mappings are in every page table, it's easiest to
- * throw them all away. This traps the Guest in amber for a while as
- * everything faults back in, but it's rare.
- */
-void guest_pagetable_clear_all(struct lg_cpu *cpu)
-{
- release_all_pagetables(cpu->lg);
- /* We need the Guest kernel stack mapped again. */
- pin_stack_pages(cpu);
- /* And we need Switcher allocated. */
- if (!allocate_switcher_mapping(cpu))
- kill_guest(cpu, "Cannot populate switcher mapping");
-}
-
-/*H:430
- * (iv) Switching page tables
- *
- * Now we've seen all the page table setting and manipulation, let's see
- * what happens when the Guest changes page tables (ie. changes the top-level
- * pgdir). This occurs on almost every context switch.
- */
-void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
-{
- int newpgdir, repin = 0;
-
- /*
- * The very first time they call this, we're actually running without
- * any page tables; we've been making it up. Throw them away now.
- */
- if (unlikely(cpu->linear_pages)) {
- release_all_pagetables(cpu->lg);
- cpu->linear_pages = false;
- /* Force allocation of a new pgdir. */
- newpgdir = ARRAY_SIZE(cpu->lg->pgdirs);
- } else {
- /* Look to see if we have this one already. */
- newpgdir = find_pgdir(cpu->lg, pgtable);
- }
-
- /*
- * If not, we allocate or mug an existing one: if it's a fresh one,
- * repin gets set to 1.
- */
- if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
- newpgdir = new_pgdir(cpu, pgtable, &repin);
- /* Change the current pgd index to the new one. */
- cpu->cpu_pgd = newpgdir;
- /*
- * If it was completely blank, we map in the Guest kernel stack and
- * the Switcher.
- */
- if (repin)
- pin_stack_pages(cpu);
-
- if (!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped) {
- if (!allocate_switcher_mapping(cpu))
- kill_guest(cpu, "Cannot populate switcher mapping");
- }
-}
-/*:*/
-
-/*M:009
- * Since we throw away all mappings when a kernel mapping changes, our
- * performance sucks for guests using highmem. In fact, a guest with
- * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
- * usually slower than a Guest with less memory.
- *
- * This, of course, cannot be fixed. It would take some kind of... well, I
- * don't know, but the term "puissant code-fu" comes to mind.
-:*/
-
-/*H:420
- * This is the routine which actually sets the page table entry for then
- * "idx"'th shadow page table.
- *
- * Normally, we can just throw out the old entry and replace it with 0: if they
- * use it demand_page() will put the new entry in. We need to do this anyway:
- * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
- * is read from, and _PAGE_DIRTY when it's written to.
- *
- * But Avi Kivity pointed out that most Operating Systems (Linux included) set
- * these bits on PTEs immediately anyway. This is done to save the CPU from
- * having to update them, but it helps us the same way: if they set
- * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
- * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
- */
-static void __guest_set_pte(struct lg_cpu *cpu, int idx,
- unsigned long vaddr, pte_t gpte)
-{
- /* Look up the matching shadow page directory entry. */
- pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
-#ifdef CONFIG_X86_PAE
- pmd_t *spmd;
-#endif
-
- /* If the top level isn't present, there's no entry to update. */
- if (pgd_flags(*spgd) & _PAGE_PRESENT) {
-#ifdef CONFIG_X86_PAE
- spmd = spmd_addr(cpu, *spgd, vaddr);
- if (pmd_flags(*spmd) & _PAGE_PRESENT) {
-#endif
- /* Otherwise, start by releasing the existing entry. */
- pte_t *spte = spte_addr(cpu, *spgd, vaddr);
- release_pte(*spte);
-
- /*
- * If they're setting this entry as dirty or accessed,
- * we might as well put that entry they've given us in
- * now. This shaves 10% off a copy-on-write
- * micro-benchmark.
- */
- if ((pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED))
- && !gpte_in_iomem(cpu, gpte)) {
- if (!check_gpte(cpu, gpte))
- return;
- set_pte(spte,
- gpte_to_spte(cpu, gpte,
- pte_flags(gpte) & _PAGE_DIRTY));
- } else {
- /*
- * Otherwise kill it and we can demand_page()
- * it in later.
- */
- set_pte(spte, __pte(0));
- }
-#ifdef CONFIG_X86_PAE
- }
-#endif
- }
-}
-
-/*H:410
- * Updating a PTE entry is a little trickier.
- *
- * We keep track of several different page tables (the Guest uses one for each
- * process, so it makes sense to cache at least a few). Each of these have
- * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
- * all processes. So when the page table above that address changes, we update
- * all the page tables, not just the current one. This is rare.
- *
- * The benefit is that when we have to track a new page table, we can keep all
- * the kernel mappings. This speeds up context switch immensely.
- */
-void guest_set_pte(struct lg_cpu *cpu,
- unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
-{
- /* We don't let you remap the Switcher; we need it to get back! */
- if (vaddr >= switcher_addr) {
- kill_guest(cpu, "attempt to set pte into Switcher pages");
- return;
- }
-
- /*
- * Kernel mappings must be changed on all top levels. Slow, but doesn't
- * happen often.
- */
- if (vaddr >= cpu->lg->kernel_address) {
- unsigned int i;
- for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
- if (cpu->lg->pgdirs[i].pgdir)
- __guest_set_pte(cpu, i, vaddr, gpte);
- } else {
- /* Is this page table one we have a shadow for? */
- int pgdir = find_pgdir(cpu->lg, gpgdir);
- if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
- /* If so, do the update. */
- __guest_set_pte(cpu, pgdir, vaddr, gpte);
- }
-}
-
-/*H:400
- * (iii) Setting up a page table entry when the Guest tells us one has changed.
- *
- * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
- * with the other side of page tables while we're here: what happens when the
- * Guest asks for a page table to be updated?
- *
- * We already saw that demand_page() will fill in the shadow page tables when
- * needed, so we can simply remove shadow page table entries whenever the Guest
- * tells us they've changed. When the Guest tries to use the new entry it will
- * fault and demand_page() will fix it up.
- *
- * So with that in mind here's our code to update a (top-level) PGD entry:
- */
-void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
-{
- int pgdir;
-
- if (idx > PTRS_PER_PGD) {
- kill_guest(&lg->cpus[0], "Attempt to set pgd %u/%u",
- idx, PTRS_PER_PGD);
- return;
- }
-
- /* If they're talking about a page table we have a shadow for... */
- pgdir = find_pgdir(lg, gpgdir);
- if (pgdir < ARRAY_SIZE(lg->pgdirs)) {
- /* ... throw it away. */
- release_pgd(lg->pgdirs[pgdir].pgdir + idx);
- /* That might have been the Switcher mapping, remap it. */
- if (!allocate_switcher_mapping(&lg->cpus[0])) {
- kill_guest(&lg->cpus[0],
- "Cannot populate switcher mapping");
- }
- lg->pgdirs[pgdir].last_host_cpu = -1;
- }
-}
-
-#ifdef CONFIG_X86_PAE
-/* For setting a mid-level, we just throw everything away. It's easy. */
-void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
-{
- guest_pagetable_clear_all(&lg->cpus[0]);
-}
-#endif
-
-/*H:500
- * (vii) Setting up the page tables initially.
- *
- * When a Guest is first created, set initialize a shadow page table which
- * we will populate on future faults. The Guest doesn't have any actual
- * pagetables yet, so we set linear_pages to tell demand_page() to fake it
- * for the moment.
- *
- * We do need the Switcher to be mapped at all times, so we allocate that
- * part of the Guest page table here.
- */
-int init_guest_pagetable(struct lguest *lg)
-{
- struct lg_cpu *cpu = &lg->cpus[0];
- int allocated = 0;
-
- /* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */
- cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated);
- if (!allocated)
- return -ENOMEM;
-
- /* We start with a linear mapping until the initialize. */
- cpu->linear_pages = true;
-
- /* Allocate the page tables for the Switcher. */
- if (!allocate_switcher_mapping(cpu)) {
- release_all_pagetables(lg);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
-void page_table_guest_data_init(struct lg_cpu *cpu)
-{
- /*
- * We tell the Guest that it can't use the virtual addresses
- * used by the Switcher. This trick is equivalent to 4GB -
- * switcher_addr.
- */
- u32 top = ~switcher_addr + 1;
-
- /* We get the kernel address: above this is all kernel memory. */
- if (get_user(cpu->lg->kernel_address,
- &cpu->lg->lguest_data->kernel_address)
- /*
- * We tell the Guest that it can't use the top virtual
- * addresses (used by the Switcher).
- */
- || put_user(top, &cpu->lg->lguest_data->reserve_mem)) {
- kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
- return;
- }
-
- /*
- * In flush_user_mappings() we loop from 0 to
- * "pgd_index(lg->kernel_address)". This assumes it won't hit the
- * Switcher mappings, so check that now.
- */
- if (cpu->lg->kernel_address >= switcher_addr)
- kill_guest(cpu, "bad kernel address %#lx",
- cpu->lg->kernel_address);
-}
-
-/* When a Guest dies, our cleanup is fairly simple. */
-void free_guest_pagetable(struct lguest *lg)
-{
- unsigned int i;
-
- /* Throw away all page table pages. */
- release_all_pagetables(lg);
- /* Now free the top levels: free_page() can handle 0 just fine. */
- for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
- free_page((long)lg->pgdirs[i].pgdir);
-}
-
-/*H:481
- * This clears the Switcher mappings for cpu #i.
- */
-static void remove_switcher_percpu_map(struct lg_cpu *cpu, unsigned int i)
-{
- unsigned long base = switcher_addr + PAGE_SIZE + i * PAGE_SIZE*2;
- pte_t *pte;
-
- /* Clear the mappings for both pages. */
- pte = find_spte(cpu, base, false, 0, 0);
- release_pte(*pte);
- set_pte(pte, __pte(0));
-
- pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
- release_pte(*pte);
- set_pte(pte, __pte(0));
-}
-
-/*H:480
- * (vi) Mapping the Switcher when the Guest is about to run.
- *
- * The Switcher and the two pages for this CPU need to be visible in the Guest
- * (and not the pages for other CPUs).
- *
- * The pages for the pagetables have all been allocated before: we just need
- * to make sure the actual PTEs are up-to-date for the CPU we're about to run
- * on.
- */
-void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
-{
- unsigned long base;
- struct page *percpu_switcher_page, *regs_page;
- pte_t *pte;
- struct pgdir *pgdir = &cpu->lg->pgdirs[cpu->cpu_pgd];
-
- /* Switcher page should always be mapped by now! */
- BUG_ON(!pgdir->switcher_mapped);
-
- /*
- * Remember that we have two pages for each Host CPU, so we can run a
- * Guest on each CPU without them interfering. We need to make sure
- * those pages are mapped correctly in the Guest, but since we usually
- * run on the same CPU, we cache that, and only update the mappings
- * when we move.
- */
- if (pgdir->last_host_cpu == raw_smp_processor_id())
- return;
-
- /* -1 means unknown so we remove everything. */
- if (pgdir->last_host_cpu == -1) {
- unsigned int i;
- for_each_possible_cpu(i)
- remove_switcher_percpu_map(cpu, i);
- } else {
- /* We know exactly what CPU mapping to remove. */
- remove_switcher_percpu_map(cpu, pgdir->last_host_cpu);
- }
-
- /*
- * When we're running the Guest, we want the Guest's "regs" page to
- * appear where the first Switcher page for this CPU is. This is an
- * optimization: when the Switcher saves the Guest registers, it saves
- * them into the first page of this CPU's "struct lguest_pages": if we
- * make sure the Guest's register page is already mapped there, we
- * don't have to copy them out again.
- */
- /* Find the shadow PTE for this regs page. */
- base = switcher_addr + PAGE_SIZE
- + raw_smp_processor_id() * sizeof(struct lguest_pages);
- pte = find_spte(cpu, base, false, 0, 0);
- regs_page = pfn_to_page(__pa(cpu->regs_page) >> PAGE_SHIFT);
- get_page(regs_page);
- set_pte(pte, mk_pte(regs_page, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL)));
-
- /*
- * We map the second page of the struct lguest_pages read-only in
- * the Guest: the IDT, GDT and other things it's not supposed to
- * change.
- */
- pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
- percpu_switcher_page
- = lg_switcher_pages[1 + raw_smp_processor_id()*2 + 1];
- get_page(percpu_switcher_page);
- set_pte(pte, mk_pte(percpu_switcher_page,
- __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)));
-
- pgdir->last_host_cpu = raw_smp_processor_id();
-}
-
-/*H:490
- * We've made it through the page table code. Perhaps our tired brains are
- * still processing the details, or perhaps we're simply glad it's over.
- *
- * If nothing else, note that all this complexity in juggling shadow page tables
- * in sync with the Guest's page tables is for one reason: for most Guests this
- * page table dance determines how bad performance will be. This is why Xen
- * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
- * have implemented shadow page table support directly into hardware.
- *
- * There is just one file remaining in the Host.
- */
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
deleted file mode 100644
index c4fb424dfddb..000000000000
--- a/drivers/lguest/segments.c
+++ /dev/null
@@ -1,228 +0,0 @@
-/*P:600
- * The x86 architecture has segments, which involve a table of descriptors
- * which can be used to do funky things with virtual address interpretation.
- * We originally used to use segments so the Guest couldn't alter the
- * Guest<->Host Switcher, and then we had to trim Guest segments, and restore
- * for userspace per-thread segments, but trim again for on userspace->kernel
- * transitions... This nightmarish creation was contained within this file,
- * where we knew not to tread without heavy armament and a change of underwear.
- *
- * In these modern times, the segment handling code consists of simple sanity
- * checks, and the worst you'll experience reading this code is butterfly-rash
- * from frolicking through its parklike serenity.
-:*/
-#include "lg.h"
-
-/*H:600
- * Segments & The Global Descriptor Table
- *
- * (That title sounds like a bad Nerdcore group. Not to suggest that there are
- * any good Nerdcore groups, but in high school a friend of mine had a band
- * called Joe Fish and the Chips, so there are definitely worse band names).
- *
- * To refresh: the GDT is a table of 8-byte values describing segments. Once
- * set up, these segments can be loaded into one of the 6 "segment registers".
- *
- * GDT entries are passed around as "struct desc_struct"s, which like IDT
- * entries are split into two 32-bit members, "a" and "b". One day, someone
- * will clean that up, and be declared a Hero. (No pressure, I'm just saying).
- *
- * Anyway, the GDT entry contains a base (the start address of the segment), a
- * limit (the size of the segment - 1), and some flags. Sounds simple, and it
- * would be, except those zany Intel engineers decided that it was too boring
- * to put the base at one end, the limit at the other, and the flags in
- * between. They decided to shotgun the bits at random throughout the 8 bytes,
- * like so:
- *
- * 0 16 40 48 52 56 63
- * [ limit part 1 ][ base part 1 ][ flags ][li][fl][base ]
- * mit ags part 2
- * part 2
- *
- * As a result, this file contains a certain amount of magic numeracy. Let's
- * begin.
- */
-
-/*
- * There are several entries we don't let the Guest set. The TSS entry is the
- * "Task State Segment" which controls all kinds of delicate things. The
- * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the
- * the Guest can't be trusted to deal with double faults.
- */
-static bool ignored_gdt(unsigned int num)
-{
- return (num == GDT_ENTRY_TSS
- || num == GDT_ENTRY_LGUEST_CS
- || num == GDT_ENTRY_LGUEST_DS
- || num == GDT_ENTRY_DOUBLEFAULT_TSS);
-}
-
-/*H:630
- * Once the Guest gave us new GDT entries, we fix them up a little. We
- * don't care if they're invalid: the worst that can happen is a General
- * Protection Fault in the Switcher when it restores a Guest segment register
- * which tries to use that entry. Then we kill the Guest for causing such a
- * mess: the message will be "unhandled trap 256".
- */
-static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end)
-{
- unsigned int i;
-
- for (i = start; i < end; i++) {
- /*
- * We never copy these ones to real GDT, so we don't care what
- * they say
- */
- if (ignored_gdt(i))
- continue;
-
- /*
- * Segment descriptors contain a privilege level: the Guest is
- * sometimes careless and leaves this as 0, even though it's
- * running at privilege level 1. If so, we fix it here.
- */
- if (cpu->arch.gdt[i].dpl == 0)
- cpu->arch.gdt[i].dpl |= GUEST_PL;
-
- /*
- * Each descriptor has an "accessed" bit. If we don't set it
- * now, the CPU will try to set it when the Guest first loads
- * that entry into a segment register. But the GDT isn't
- * writable by the Guest, so bad things can happen.
- */
- cpu->arch.gdt[i].type |= 0x1;
- }
-}
-
-/*H:610
- * Like the IDT, we never simply use the GDT the Guest gives us. We keep
- * a GDT for each CPU, and copy across the Guest's entries each time we want to
- * run the Guest on that CPU.
- *
- * This routine is called at boot or modprobe time for each CPU to set up the
- * constant GDT entries: the ones which are the same no matter what Guest we're
- * running.
- */
-void setup_default_gdt_entries(struct lguest_ro_state *state)
-{
- struct desc_struct *gdt = state->guest_gdt;
- unsigned long tss = (unsigned long)&state->guest_tss;
-
- /* The Switcher segments are full 0-4G segments, privilege level 0 */
- gdt[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
- gdt[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
-
- /*
- * The TSS segment refers to the TSS entry for this particular CPU.
- */
- gdt[GDT_ENTRY_TSS].a = 0;
- gdt[GDT_ENTRY_TSS].b = 0;
-
- gdt[GDT_ENTRY_TSS].limit0 = 0x67;
- gdt[GDT_ENTRY_TSS].base0 = tss & 0xFFFF;
- gdt[GDT_ENTRY_TSS].base1 = (tss >> 16) & 0xFF;
- gdt[GDT_ENTRY_TSS].base2 = tss >> 24;
- gdt[GDT_ENTRY_TSS].type = 0x9; /* 32-bit TSS (available) */
- gdt[GDT_ENTRY_TSS].p = 0x1; /* Entry is present */
- gdt[GDT_ENTRY_TSS].dpl = 0x0; /* Privilege level 0 */
- gdt[GDT_ENTRY_TSS].s = 0x0; /* system segment */
-
-}
-
-/*
- * This routine sets up the initial Guest GDT for booting. All entries start
- * as 0 (unusable).
- */
-void setup_guest_gdt(struct lg_cpu *cpu)
-{
- /*
- * Start with full 0-4G segments...except the Guest is allowed to use
- * them, so set the privilege level appropriately in the flags.
- */
- cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT;
- cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT;
- cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].dpl |= GUEST_PL;
- cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].dpl |= GUEST_PL;
-}
-
-/*H:650
- * An optimization of copy_gdt(), for just the three "thead-local storage"
- * entries.
- */
-void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt)
-{
- unsigned int i;
-
- for (i = GDT_ENTRY_TLS_MIN; i <= GDT_ENTRY_TLS_MAX; i++)
- gdt[i] = cpu->arch.gdt[i];
-}
-
-/*H:640
- * When the Guest is run on a different CPU, or the GDT entries have changed,
- * copy_gdt() is called to copy the Guest's GDT entries across to this CPU's
- * GDT.
- */
-void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt)
-{
- unsigned int i;
-
- /*
- * The default entries from setup_default_gdt_entries() are not
- * replaced. See ignored_gdt() above.
- */
- for (i = 0; i < GDT_ENTRIES; i++)
- if (!ignored_gdt(i))
- gdt[i] = cpu->arch.gdt[i];
-}
-
-/*H:620
- * This is where the Guest asks us to load a new GDT entry
- * (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in.
- */
-void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
-{
- /*
- * We assume the Guest has the same number of GDT entries as the
- * Host, otherwise we'd have to dynamically allocate the Guest GDT.
- */
- if (num >= ARRAY_SIZE(cpu->arch.gdt)) {
- kill_guest(cpu, "too many gdt entries %i", num);
- return;
- }
-
- /* Set it up, then fix it. */
- cpu->arch.gdt[num].a = lo;
- cpu->arch.gdt[num].b = hi;
- fixup_gdt_table(cpu, num, num+1);
- /*
- * Mark that the GDT changed so the core knows it has to copy it again,
- * even if the Guest is run on the same CPU.
- */
- cpu->changed |= CHANGED_GDT;
-}
-
-/*
- * This is the fast-track version for just changing the three TLS entries.
- * Remember that this happens on every context switch, so it's worth
- * optimizing. But wouldn't it be neater to have a single hypercall to cover
- * both cases?
- */
-void guest_load_tls(struct lg_cpu *cpu, unsigned long gtls)
-{
- struct desc_struct *tls = &cpu->arch.gdt[GDT_ENTRY_TLS_MIN];
-
- __lgread(cpu, tls, gtls, sizeof(*tls)*GDT_ENTRY_TLS_ENTRIES);
- fixup_gdt_table(cpu, GDT_ENTRY_TLS_MIN, GDT_ENTRY_TLS_MAX+1);
- /* Note that just the TLS entries have changed. */
- cpu->changed |= CHANGED_GDT_TLS;
-}
-
-/*H:660
- * With this, we have finished the Host.
- *
- * Five of the seven parts of our task are complete. You have made it through
- * the Bit of Despair (I think that's somewhere in the page table code,
- * myself).
- *
- * Next, we examine "make Switcher". It's short, but intense.
- */
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
deleted file mode 100644
index b4f79b923aea..000000000000
--- a/drivers/lguest/x86/core.c
+++ /dev/null
@@ -1,724 +0,0 @@
-/*
- * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
- * Copyright (C) 2007, Jes Sorensen <jes@sgi.com> SGI.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-/*P:450
- * This file contains the x86-specific lguest code. It used to be all
- * mixed in with drivers/lguest/core.c but several foolhardy code slashers
- * wrestled most of the dependencies out to here in preparation for porting
- * lguest to other architectures (see what I mean by foolhardy?).
- *
- * This also contains a couple of non-obvious setup and teardown pieces which
- * were implemented after days of debugging pain.
-:*/
-#include <linux/kernel.h>
-#include <linux/start_kernel.h>
-#include <linux/string.h>
-#include <linux/console.h>
-#include <linux/screen_info.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/cpu.h>
-#include <linux/lguest.h>
-#include <linux/lguest_launcher.h>
-#include <asm/paravirt.h>
-#include <asm/param.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/desc.h>
-#include <asm/setup.h>
-#include <asm/lguest.h>
-#include <linux/uaccess.h>
-#include <asm/fpu/internal.h>
-#include <asm/tlbflush.h>
-#include "../lg.h"
-
-static int cpu_had_pge;
-
-static struct {
- unsigned long offset;
- unsigned short segment;
-} lguest_entry;
-
-/* Offset from where switcher.S was compiled to where we've copied it */
-static unsigned long switcher_offset(void)
-{
- return switcher_addr - (unsigned long)start_switcher_text;
-}
-
-/* This cpu's struct lguest_pages (after the Switcher text page) */
-static struct lguest_pages *lguest_pages(unsigned int cpu)
-{
- return &(((struct lguest_pages *)(switcher_addr + PAGE_SIZE))[cpu]);
-}
-
-static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);
-
-/*S:010
- * We approach the Switcher.
- *
- * Remember that each CPU has two pages which are visible to the Guest when it
- * runs on that CPU. This has to contain the state for that Guest: we copy the
- * state in just before we run the Guest.
- *
- * Each Guest has "changed" flags which indicate what has changed in the Guest
- * since it last ran. We saw this set in interrupts_and_traps.c and
- * segments.c.
- */
-static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
-{
- /*
- * Copying all this data can be quite expensive. We usually run the
- * same Guest we ran last time (and that Guest hasn't run anywhere else
- * meanwhile). If that's not the case, we pretend everything in the
- * Guest has changed.
- */
- if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) {
- __this_cpu_write(lg_last_cpu, cpu);
- cpu->last_pages = pages;
- cpu->changed = CHANGED_ALL;
- }
-
- /*
- * These copies are pretty cheap, so we do them unconditionally: */
- /* Save the current Host top-level page directory.
- */
- pages->state.host_cr3 = __pa(current->mm->pgd);
- /*
- * Set up the Guest's page tables to see this CPU's pages (and no
- * other CPU's pages).
- */
- map_switcher_in_guest(cpu, pages);
- /*
- * Set up the two "TSS" members which tell the CPU what stack to use
- * for traps which do directly into the Guest (ie. traps at privilege
- * level 1).
- */
- pages->state.guest_tss.sp1 = cpu->esp1;
- pages->state.guest_tss.ss1 = cpu->ss1;
-
- /* Copy direct-to-Guest trap entries. */
- if (cpu->changed & CHANGED_IDT)
- copy_traps(cpu, pages->state.guest_idt, default_idt_entries);
-
- /* Copy all GDT entries which the Guest can change. */
- if (cpu->changed & CHANGED_GDT)
- copy_gdt(cpu, pages->state.guest_gdt);
- /* If only the TLS entries have changed, copy them. */
- else if (cpu->changed & CHANGED_GDT_TLS)
- copy_gdt_tls(cpu, pages->state.guest_gdt);
-
- /* Mark the Guest as unchanged for next time. */
- cpu->changed = 0;
-}
-
-/* Finally: the code to actually call into the Switcher to run the Guest. */
-static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
-{
- /* This is a dummy value we need for GCC's sake. */
- unsigned int clobber;
-
- /*
- * Copy the guest-specific information into this CPU's "struct
- * lguest_pages".
- */
- copy_in_guest_info(cpu, pages);
-
- /*
- * Set the trap number to 256 (impossible value). If we fault while
- * switching to the Guest (bad segment registers or bug), this will
- * cause us to abort the Guest.
- */
- cpu->regs->trapnum = 256;
-
- /*
- * Now: we push the "eflags" register on the stack, then do an "lcall".
- * This is how we change from using the kernel code segment to using
- * the dedicated lguest code segment, as well as jumping into the
- * Switcher.
- *
- * The lcall also pushes the old code segment (KERNEL_CS) onto the
- * stack, then the address of this call. This stack layout happens to
- * exactly match the stack layout created by an interrupt...
- */
- asm volatile("pushf; lcall *%4"
- /*
- * This is how we tell GCC that %eax ("a") and %ebx ("b")
- * are changed by this routine. The "=" means output.
- */
- : "=a"(clobber), "=b"(clobber)
- /*
- * %eax contains the pages pointer. ("0" refers to the
- * 0-th argument above, ie "a"). %ebx contains the
- * physical address of the Guest's top-level page
- * directory.
- */
- : "0"(pages),
- "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir)),
- "m"(lguest_entry)
- /*
- * We tell gcc that all these registers could change,
- * which means we don't have to save and restore them in
- * the Switcher.
- */
- : "memory", "%edx", "%ecx", "%edi", "%esi");
-}
-/*:*/
-
-unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any)
-{
- switch (reg_off) {
- case offsetof(struct pt_regs, bx):
- return &cpu->regs->ebx;
- case offsetof(struct pt_regs, cx):
- return &cpu->regs->ecx;
- case offsetof(struct pt_regs, dx):
- return &cpu->regs->edx;
- case offsetof(struct pt_regs, si):
- return &cpu->regs->esi;
- case offsetof(struct pt_regs, di):
- return &cpu->regs->edi;
- case offsetof(struct pt_regs, bp):
- return &cpu->regs->ebp;
- case offsetof(struct pt_regs, ax):
- return &cpu->regs->eax;
- case offsetof(struct pt_regs, ip):
- return &cpu->regs->eip;
- case offsetof(struct pt_regs, sp):
- return &cpu->regs->esp;
- }
-
- /* Launcher can read these, but we don't allow any setting. */
- if (any) {
- switch (reg_off) {
- case offsetof(struct pt_regs, ds):
- return &cpu->regs->ds;
- case offsetof(struct pt_regs, es):
- return &cpu->regs->es;
- case offsetof(struct pt_regs, fs):
- return &cpu->regs->fs;
- case offsetof(struct pt_regs, gs):
- return &cpu->regs->gs;
- case offsetof(struct pt_regs, cs):
- return &cpu->regs->cs;
- case offsetof(struct pt_regs, flags):
- return &cpu->regs->eflags;
- case offsetof(struct pt_regs, ss):
- return &cpu->regs->ss;
- }
- }
-
- return NULL;
-}
-
-/*M:002
- * There are hooks in the scheduler which we can register to tell when we
- * get kicked off the CPU (preempt_notifier_register()). This would allow us
- * to lazily disable SYSENTER which would regain some performance, and should
- * also simplify copy_in_guest_info(). Note that we'd still need to restore
- * things when we exit to Launcher userspace, but that's fairly easy.
- *
- * We could also try using these hooks for PGE, but that might be too expensive.
- *
- * The hooks were designed for KVM, but we can also put them to good use.
-:*/
-
-/*H:040
- * This is the i386-specific code to setup and run the Guest. Interrupts
- * are disabled: we own the CPU.
- */
-void lguest_arch_run_guest(struct lg_cpu *cpu)
-{
- /*
- * SYSENTER is an optimized way of doing system calls. We can't allow
- * it because it always jumps to privilege level 0. A normal Guest
- * won't try it because we don't advertise it in CPUID, but a malicious
- * Guest (or malicious Guest userspace program) could, so we tell the
- * CPU to disable it before running the Guest.
- */
- if (boot_cpu_has(X86_FEATURE_SEP))
- wrmsr(MSR_IA32_SYSENTER_CS, 0, 0);
-
- /*
- * Now we actually run the Guest. It will return when something
- * interesting happens, and we can examine its registers to see what it
- * was doing.
- */
- run_guest_once(cpu, lguest_pages(raw_smp_processor_id()));
-
- /*
- * Note that the "regs" structure contains two extra entries which are
- * not really registers: a trap number which says what interrupt or
- * trap made the switcher code come back, and an error code which some
- * traps set.
- */
-
- /* Restore SYSENTER if it's supposed to be on. */
- if (boot_cpu_has(X86_FEATURE_SEP))
- wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
-
- /*
- * If the Guest page faulted, then the cr2 register will tell us the
- * bad virtual address. We have to grab this now, because once we
- * re-enable interrupts an interrupt could fault and thus overwrite
- * cr2, or we could even move off to a different CPU.
- */
- if (cpu->regs->trapnum == 14)
- cpu->arch.last_pagefault = read_cr2();
- /*
- * Similarly, if we took a trap because the Guest used the FPU,
- * we have to restore the FPU it expects to see.
- * fpu__restore() may sleep and we may even move off to
- * a different CPU. So all the critical stuff should be done
- * before this.
- */
- else if (cpu->regs->trapnum == 7 && !fpregs_active())
- fpu__restore(&current->thread.fpu);
-}
-
-/*H:130
- * Now we've examined the hypercall code; our Guest can make requests.
- * Our Guest is usually so well behaved; it never tries to do things it isn't
- * allowed to, and uses hypercalls instead. Unfortunately, Linux's paravirtual
- * infrastructure isn't quite complete, because it doesn't contain replacements
- * for the Intel I/O instructions. As a result, the Guest sometimes fumbles
- * across one during the boot process as it probes for various things which are
- * usually attached to a PC.
- *
- * When the Guest uses one of these instructions, we get a trap (General
- * Protection Fault) and come here. We queue this to be sent out to the
- * Launcher to handle.
- */
-
-/*
- * The eip contains the *virtual* address of the Guest's instruction:
- * we copy the instruction here so the Launcher doesn't have to walk
- * the page tables to decode it. We handle the case (eg. in a kernel
- * module) where the instruction is over two pages, and the pages are
- * virtually but not physically contiguous.
- *
- * The longest possible x86 instruction is 15 bytes, but we don't handle
- * anything that strange.
- */
-static void copy_from_guest(struct lg_cpu *cpu,
- void *dst, unsigned long vaddr, size_t len)
-{
- size_t to_page_end = PAGE_SIZE - (vaddr % PAGE_SIZE);
- unsigned long paddr;
-
- BUG_ON(len > PAGE_SIZE);
-
- /* If it goes over a page, copy in two parts. */
- if (len > to_page_end) {
- /* But make sure the next page is mapped! */
- if (__guest_pa(cpu, vaddr + to_page_end, &paddr))
- copy_from_guest(cpu, dst + to_page_end,
- vaddr + to_page_end,
- len - to_page_end);
- else
- /* Otherwise fill with zeroes. */
- memset(dst + to_page_end, 0, len - to_page_end);
- len = to_page_end;
- }
-
- /* This will kill the guest if it isn't mapped, but that
- * shouldn't happen. */
- __lgread(cpu, dst, guest_pa(cpu, vaddr), len);
-}
-
-
-static void setup_emulate_insn(struct lg_cpu *cpu)
-{
- cpu->pending.trap = 13;
- copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip,
- sizeof(cpu->pending.insn));
-}
-
-static void setup_iomem_insn(struct lg_cpu *cpu, unsigned long iomem_addr)
-{
- cpu->pending.trap = 14;
- cpu->pending.addr = iomem_addr;
- copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip,
- sizeof(cpu->pending.insn));
-}
-
-/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */
-void lguest_arch_handle_trap(struct lg_cpu *cpu)
-{
- unsigned long iomem_addr;
-
- switch (cpu->regs->trapnum) {
- case 13: /* We've intercepted a General Protection Fault. */
- /* Hand to Launcher to emulate those pesky IN and OUT insns */
- if (cpu->regs->errcode == 0) {
- setup_emulate_insn(cpu);
- return;
- }
- break;
- case 14: /* We've intercepted a Page Fault. */
- /*
- * The Guest accessed a virtual address that wasn't mapped.
- * This happens a lot: we don't actually set up most of the page
- * tables for the Guest at all when we start: as it runs it asks
- * for more and more, and we set them up as required. In this
- * case, we don't even tell the Guest that the fault happened.
- *
- * The errcode tells whether this was a read or a write, and
- * whether kernel or userspace code.
- */
- if (demand_page(cpu, cpu->arch.last_pagefault,
- cpu->regs->errcode, &iomem_addr))
- return;
-
- /* Was this an access to memory mapped IO? */
- if (iomem_addr) {
- /* Tell Launcher, let it handle it. */
- setup_iomem_insn(cpu, iomem_addr);
- return;
- }
-
- /*
- * OK, it's really not there (or not OK): the Guest needs to
- * know. We write out the cr2 value so it knows where the
- * fault occurred.
- *
- * Note that if the Guest were really messed up, this could
- * happen before it's done the LHCALL_LGUEST_INIT hypercall, so
- * lg->lguest_data could be NULL
- */
- if (cpu->lg->lguest_data &&
- put_user(cpu->arch.last_pagefault,
- &cpu->lg->lguest_data->cr2))
- kill_guest(cpu, "Writing cr2");
- break;
- case 7: /* We've intercepted a Device Not Available fault. */
- /* No special handling is needed here. */
- break;
- case 32 ... 255:
- /* This might be a syscall. */
- if (could_be_syscall(cpu->regs->trapnum))
- break;
-
- /*
- * Other values mean a real interrupt occurred, in which case
- * the Host handler has already been run. We just do a
- * friendly check if another process should now be run, then
- * return to run the Guest again.
- */
- cond_resched();
- return;
- case LGUEST_TRAP_ENTRY:
- /*
- * Our 'struct hcall_args' maps directly over our regs: we set
- * up the pointer now to indicate a hypercall is pending.
- */
- cpu->hcall = (struct hcall_args *)cpu->regs;
- return;
- }
-
- /* We didn't handle the trap, so it needs to go to the Guest. */
- if (!deliver_trap(cpu, cpu->regs->trapnum))
- /*
- * If the Guest doesn't have a handler (either it hasn't
- * registered any yet, or it's one of the faults we don't let
- * it handle), it dies with this cryptic error message.
- */
- kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)",
- cpu->regs->trapnum, cpu->regs->eip,
- cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault
- : cpu->regs->errcode);
-}
-
-/*
- * Now we can look at each of the routines this calls, in increasing order of
- * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(),
- * deliver_trap() and demand_page(). After all those, we'll be ready to
- * examine the Switcher, and our philosophical understanding of the Host/Guest
- * duality will be complete.
-:*/
-static void adjust_pge(void *on)
-{
- if (on)
- cr4_set_bits(X86_CR4_PGE);
- else
- cr4_clear_bits(X86_CR4_PGE);
-}
-
-/*H:020
- * Now the Switcher is mapped and every thing else is ready, we need to do
- * some more i386-specific initialization.
- */
-void __init lguest_arch_host_init(void)
-{
- int i;
-
- /*
- * Most of the x86/switcher_32.S doesn't care that it's been moved; on
- * Intel, jumps are relative, and it doesn't access any references to
- * external code or data.
- *
- * The only exception is the interrupt handlers in switcher.S: their
- * addresses are placed in a table (default_idt_entries), so we need to
- * update the table with the new addresses. switcher_offset() is a
- * convenience function which returns the distance between the
- * compiled-in switcher code and the high-mapped copy we just made.
- */
- for (i = 0; i < IDT_ENTRIES; i++)
- default_idt_entries[i] += switcher_offset();
-
- /*
- * Set up the Switcher's per-cpu areas.
- *
- * Each CPU gets two pages of its own within the high-mapped region
- * (aka. "struct lguest_pages"). Much of this can be initialized now,
- * but some depends on what Guest we are running (which is set up in
- * copy_in_guest_info()).
- */
- for_each_possible_cpu(i) {
- /* lguest_pages() returns this CPU's two pages. */
- struct lguest_pages *pages = lguest_pages(i);
- /* This is a convenience pointer to make the code neater. */
- struct lguest_ro_state *state = &pages->state;
-
- /*
- * The Global Descriptor Table: the Host has a different one
- * for each CPU. We keep a descriptor for the GDT which says
- * where it is and how big it is (the size is actually the last
- * byte, not the size, hence the "-1").
- */
- state->host_gdt_desc.size = GDT_SIZE-1;
- state->host_gdt_desc.address = (long)get_cpu_gdt_rw(i);
-
- /*
- * All CPUs on the Host use the same Interrupt Descriptor
- * Table, so we just use store_idt(), which gets this CPU's IDT
- * descriptor.
- */
- store_idt(&state->host_idt_desc);
-
- /*
- * The descriptors for the Guest's GDT and IDT can be filled
- * out now, too. We copy the GDT & IDT into ->guest_gdt and
- * ->guest_idt before actually running the Guest.
- */
- state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
- state->guest_idt_desc.address = (long)&state->guest_idt;
- state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
- state->guest_gdt_desc.address = (long)&state->guest_gdt;
-
- /*
- * We know where we want the stack to be when the Guest enters
- * the Switcher: in pages->regs. The stack grows upwards, so
- * we start it at the end of that structure.
- */
- state->guest_tss.sp0 = (long)(&pages->regs + 1);
- /*
- * And this is the GDT entry to use for the stack: we keep a
- * couple of special LGUEST entries.
- */
- state->guest_tss.ss0 = LGUEST_DS;
-
- /*
- * x86 can have a finegrained bitmap which indicates what I/O
- * ports the process can use. We set it to the end of our
- * structure, meaning "none".
- */
- state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);
-
- /*
- * Some GDT entries are the same across all Guests, so we can
- * set them up now.
- */
- setup_default_gdt_entries(state);
- /* Most IDT entries are the same for all Guests, too.*/
- setup_default_idt_entries(state, default_idt_entries);
-
- /*
- * The Host needs to be able to use the LGUEST segments on this
- * CPU, too, so put them in the Host GDT.
- */
- get_cpu_gdt_rw(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
- get_cpu_gdt_rw(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
- }
-
- /*
- * In the Switcher, we want the %cs segment register to use the
- * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so
- * it will be undisturbed when we switch. To change %cs and jump we
- * need this structure to feed to Intel's "lcall" instruction.
- */
- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
- lguest_entry.segment = LGUEST_CS;
-
- /*
- * Finally, we need to turn off "Page Global Enable". PGE is an
- * optimization where page table entries are specially marked to show
- * they never change. The Host kernel marks all the kernel pages this
- * way because it's always present, even when userspace is running.
- *
- * Lguest breaks this: unbeknownst to the rest of the Host kernel, we
- * switch to the Guest kernel. If you don't disable this on all CPUs,
- * you'll get really weird bugs that you'll chase for two days.
- *
- * I used to turn PGE off every time we switched to the Guest and back
- * on when we return, but that slowed the Switcher down noticibly.
- */
-
- /*
- * We don't need the complexity of CPUs coming and going while we're
- * doing this.
- */
- get_online_cpus();
- if (boot_cpu_has(X86_FEATURE_PGE)) { /* We have a broader idea of "global". */
- /* Remember that this was originally set (for cleanup). */
- cpu_had_pge = 1;
- /*
- * adjust_pge is a helper function which sets or unsets the PGE
- * bit on its CPU, depending on the argument (0 == unset).
- */
- on_each_cpu(adjust_pge, (void *)0, 1);
- /* Turn off the feature in the global feature set. */
- clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
- }
- put_online_cpus();
-}
-/*:*/
-
-void __exit lguest_arch_host_fini(void)
-{
- /* If we had PGE before we started, turn it back on now. */
- get_online_cpus();
- if (cpu_had_pge) {
- set_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
- /* adjust_pge's argument "1" means set PGE. */
- on_each_cpu(adjust_pge, (void *)1, 1);
- }
- put_online_cpus();
-}
-
-
-/*H:122 The i386-specific hypercalls simply farm out to the right functions. */
-int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
-{
- switch (args->arg0) {
- case LHCALL_LOAD_GDT_ENTRY:
- load_guest_gdt_entry(cpu, args->arg1, args->arg2, args->arg3);
- break;
- case LHCALL_LOAD_IDT_ENTRY:
- load_guest_idt_entry(cpu, args->arg1, args->arg2, args->arg3);
- break;
- case LHCALL_LOAD_TLS:
- guest_load_tls(cpu, args->arg1);
- break;
- default:
- /* Bad Guest. Bad! */
- return -EIO;
- }
- return 0;
-}
-
-/*H:126 i386-specific hypercall initialization: */
-int lguest_arch_init_hypercalls(struct lg_cpu *cpu)
-{
- u32 tsc_speed;
-
- /*
- * The pointer to the Guest's "struct lguest_data" is the only argument.
- * We check that address now.
- */
- if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1,
- sizeof(*cpu->lg->lguest_data)))
- return -EFAULT;
-
- /*
- * Having checked it, we simply set lg->lguest_data to point straight
- * into the Launcher's memory at the right place and then use
- * copy_to_user/from_user from now on, instead of lgread/write. I put
- * this in to show that I'm not immune to writing stupid
- * optimizations.
- */
- cpu->lg->lguest_data = cpu->lg->mem_base + cpu->hcall->arg1;
-
- /*
- * We insist that the Time Stamp Counter exist and doesn't change with
- * cpu frequency. Some devious chip manufacturers decided that TSC
- * changes could be handled in software. I decided that time going
- * backwards might be good for benchmarks, but it's bad for users.
- *
- * We also insist that the TSC be stable: the kernel detects unreliable
- * TSCs for its own purposes, and we use that here.
- */
- if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable())
- tsc_speed = tsc_khz;
- else
- tsc_speed = 0;
- if (put_user(tsc_speed, &cpu->lg->lguest_data->tsc_khz))
- return -EFAULT;
-
- /* The interrupt code might not like the system call vector. */
- if (!check_syscall_vector(cpu->lg))
- kill_guest(cpu, "bad syscall vector");
-
- return 0;
-}
-/*:*/
-
-/*L:030
- * Most of the Guest's registers are left alone: we used get_zeroed_page() to
- * allocate the structure, so they will be 0.
- */
-void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start)
-{
- struct lguest_regs *regs = cpu->regs;
-
- /*
- * There are four "segment" registers which the Guest needs to boot:
- * The "code segment" register (cs) refers to the kernel code segment
- * __KERNEL_CS, and the "data", "extra" and "stack" segment registers
- * refer to the kernel data segment __KERNEL_DS.
- *
- * The privilege level is packed into the lower bits. The Guest runs
- * at privilege level 1 (GUEST_PL).
- */
- regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL;
- regs->cs = __KERNEL_CS|GUEST_PL;
-
- /*
- * The "eflags" register contains miscellaneous flags. Bit 1 (0x002)
- * is supposed to always be "1". Bit 9 (0x200) controls whether
- * interrupts are enabled. We always leave interrupts enabled while
- * running the Guest.
- */
- regs->eflags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
-
- /*
- * The "Extended Instruction Pointer" register says where the Guest is
- * running.
- */
- regs->eip = start;
-
- /*
- * %esi points to our boot information, at physical address 0, so don't
- * touch it.
- */
-
- /* There are a couple of GDT entries the Guest expects at boot. */
- setup_guest_gdt(cpu);
-}
diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
deleted file mode 100644
index 40634b0db9f7..000000000000
--- a/drivers/lguest/x86/switcher_32.S
+++ /dev/null
@@ -1,388 +0,0 @@
-/*P:900
- * This is the Switcher: code which sits at 0xFFC00000 (or 0xFFE00000) astride
- * both the Host and Guest to do the low-level Guest<->Host switch. It is as
- * simple as it can be made, but it's naturally very specific to x86.
- *
- * You have now completed Preparation. If this has whet your appetite; if you
- * are feeling invigorated and refreshed then the next, more challenging stage
- * can be found in "make Guest".
- :*/
-
-/*M:012
- * Lguest is meant to be simple: my rule of thumb is that 1% more LOC must
- * gain at least 1% more performance. Since neither LOC nor performance can be
- * measured beforehand, it generally means implementing a feature then deciding
- * if it's worth it. And once it's implemented, who can say no?
- *
- * This is why I haven't implemented this idea myself. I want to, but I
- * haven't. You could, though.
- *
- * The main place where lguest performance sucks is Guest page faulting. When
- * a Guest userspace process hits an unmapped page we switch back to the Host,
- * walk the page tables, find it's not mapped, switch back to the Guest page
- * fault handler, which calls a hypercall to set the page table entry, then
- * finally returns to userspace. That's two round-trips.
- *
- * If we had a small walker in the Switcher, we could quickly check the Guest
- * page table and if the page isn't mapped, immediately reflect the fault back
- * into the Guest. This means the Switcher would have to know the top of the
- * Guest page table and the page fault handler address.
- *
- * For simplicity, the Guest should only handle the case where the privilege
- * level of the fault is 3 and probably only not present or write faults. It
- * should also detect recursive faults, and hand the original fault to the
- * Host (which is actually really easy).
- *
- * Two questions remain. Would the performance gain outweigh the complexity?
- * And who would write the verse documenting it?
-:*/
-
-/*M:011
- * Lguest64 handles NMI. This gave me NMI envy (until I looked at their
- * code). It's worth doing though, since it would let us use oprofile in the
- * Host when a Guest is running.
-:*/
-
-/*S:100
- * Welcome to the Switcher itself!
- *
- * This file contains the low-level code which changes the CPU to run the Guest
- * code, and returns to the Host when something happens. Understand this, and
- * you understand the heart of our journey.
- *
- * Because this is in assembler rather than C, our tale switches from prose to
- * verse. First I tried limericks:
- *
- * There once was an eax reg,
- * To which our pointer was fed,
- * It needed an add,
- * Which asm-offsets.h had
- * But this limerick is hurting my head.
- *
- * Next I tried haikus, but fitting the required reference to the seasons in
- * every stanza was quickly becoming tiresome:
- *
- * The %eax reg
- * Holds "struct lguest_pages" now:
- * Cherry blossoms fall.
- *
- * Then I started with Heroic Verse, but the rhyming requirement leeched away
- * the content density and led to some uniquely awful oblique rhymes:
- *
- * These constants are coming from struct offsets
- * For use within the asm switcher text.
- *
- * Finally, I settled for something between heroic hexameter, and normal prose
- * with inappropriate linebreaks. Anyway, it aint no Shakespeare.
- */
-
-// Not all kernel headers work from assembler
-// But these ones are needed: the ENTRY() define
-// And constants extracted from struct offsets
-// To avoid magic numbers and breakage:
-// Should they change the compiler can't save us
-// Down here in the depths of assembler code.
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/page.h>
-#include <asm/segment.h>
-#include <asm/lguest.h>
-
-// We mark the start of the code to copy
-// It's placed in .text tho it's never run here
-// You'll see the trick macro at the end
-// Which interleaves data and text to effect.
-.text
-ENTRY(start_switcher_text)
-
-// When we reach switch_to_guest we have just left
-// The safe and comforting shores of C code
-// %eax has the "struct lguest_pages" to use
-// Where we save state and still see it from the Guest
-// And %ebx holds the Guest shadow pagetable:
-// Once set we have truly left Host behind.
-ENTRY(switch_to_guest)
- // We told gcc all its regs could fade,
- // Clobbered by our journey into the Guest
- // We could have saved them, if we tried
- // But time is our master and cycles count.
-
- // Segment registers must be saved for the Host
- // We push them on the Host stack for later
- pushl %es
- pushl %ds
- pushl %gs
- pushl %fs
- // But the compiler is fickle, and heeds
- // No warning of %ebp clobbers
- // When frame pointers are used. That register
- // Must be saved and restored or chaos strikes.
- pushl %ebp
- // The Host's stack is done, now save it away
- // In our "struct lguest_pages" at offset
- // Distilled into asm-offsets.h
- movl %esp, LGUEST_PAGES_host_sp(%eax)
-
- // All saved and there's now five steps before us:
- // Stack, GDT, IDT, TSS
- // Then last of all the page tables are flipped.
-
- // Yet beware that our stack pointer must be
- // Always valid lest an NMI hits
- // %edx does the duty here as we juggle
- // %eax is lguest_pages: our stack lies within.
- movl %eax, %edx
- addl $LGUEST_PAGES_regs, %edx
- movl %edx, %esp
-
- // The Guest's GDT we so carefully
- // Placed in the "struct lguest_pages" before
- lgdt LGUEST_PAGES_guest_gdt_desc(%eax)
-
- // The Guest's IDT we did partially
- // Copy to "struct lguest_pages" as well.
- lidt LGUEST_PAGES_guest_idt_desc(%eax)
-
- // The TSS entry which controls traps
- // Must be loaded up with "ltr" now:
- // The GDT entry that TSS uses
- // Changes type when we load it: damn Intel!
- // For after we switch over our page tables
- // That entry will be read-only: we'd crash.
- movl $(GDT_ENTRY_TSS*8), %edx
- ltr %dx
-
- // Look back now, before we take this last step!
- // The Host's TSS entry was also marked used;
- // Let's clear it again for our return.
- // The GDT descriptor of the Host
- // Points to the table after two "size" bytes
- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
- // Clear "used" from type field (byte 5, bit 2)
- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
-
- // Once our page table's switched, the Guest is live!
- // The Host fades as we run this final step.
- // Our "struct lguest_pages" is now read-only.
- movl %ebx, %cr3
-
- // The page table change did one tricky thing:
- // The Guest's register page has been mapped
- // Writable under our %esp (stack) --
- // We can simply pop off all Guest regs.
- popl %eax
- popl %ebx
- popl %ecx
- popl %edx
- popl %esi
- popl %edi
- popl %ebp
- popl %gs
- popl %fs
- popl %ds
- popl %es
-
- // Near the base of the stack lurk two strange fields
- // Which we fill as we exit the Guest
- // These are the trap number and its error
- // We can simply step past them on our way.
- addl $8, %esp
-
- // The last five stack slots hold return address
- // And everything needed to switch privilege
- // From Switcher's level 0 to Guest's 1,
- // And the stack where the Guest had last left it.
- // Interrupts are turned back on: we are Guest.
- iret
-
-// We tread two paths to switch back to the Host
-// Yet both must save Guest state and restore Host
-// So we put the routine in a macro.
-#define SWITCH_TO_HOST \
- /* We save the Guest state: all registers first \
- * Laid out just as "struct lguest_regs" defines */ \
- pushl %es; \
- pushl %ds; \
- pushl %fs; \
- pushl %gs; \
- pushl %ebp; \
- pushl %edi; \
- pushl %esi; \
- pushl %edx; \
- pushl %ecx; \
- pushl %ebx; \
- pushl %eax; \
- /* Our stack and our code are using segments \
- * Set in the TSS and IDT \
- * Yet if we were to touch data we'd use \
- * Whatever data segment the Guest had. \
- * Load the lguest ds segment for now. */ \
- movl $(LGUEST_DS), %eax; \
- movl %eax, %ds; \
- /* So where are we? Which CPU, which struct? \
- * The stack is our clue: our TSS starts \
- * It at the end of "struct lguest_pages". \
- * Or we may have stumbled while restoring \
- * Our Guest segment regs while in switch_to_guest, \
- * The fault pushed atop that part-unwound stack. \
- * If we round the stack down to the page start \
- * We're at the start of "struct lguest_pages". */ \
- movl %esp, %eax; \
- andl $(~(1 << PAGE_SHIFT - 1)), %eax; \
- /* Save our trap number: the switch will obscure it \
- * (In the Host the Guest regs are not mapped here) \
- * %ebx holds it safe for deliver_to_host */ \
- movl LGUEST_PAGES_regs_trapnum(%eax), %ebx; \
- /* The Host GDT, IDT and stack! \
- * All these lie safely hidden from the Guest: \
- * We must return to the Host page tables \
- * (Hence that was saved in struct lguest_pages) */ \
- movl LGUEST_PAGES_host_cr3(%eax), %edx; \
- movl %edx, %cr3; \
- /* As before, when we looked back at the Host \
- * As we left and marked TSS unused \
- * So must we now for the Guest left behind. */ \
- andb $0xFD, (LGUEST_PAGES_guest_gdt+GDT_ENTRY_TSS*8+5)(%eax); \
- /* Switch to Host's GDT, IDT. */ \
- lgdt LGUEST_PAGES_host_gdt_desc(%eax); \
- lidt LGUEST_PAGES_host_idt_desc(%eax); \
- /* Restore the Host's stack where its saved regs lie */ \
- movl LGUEST_PAGES_host_sp(%eax), %esp; \
- /* Last the TSS: our Host is returned */ \
- movl $(GDT_ENTRY_TSS*8), %edx; \
- ltr %dx; \
- /* Restore now the regs saved right at the first. */ \
- popl %ebp; \
- popl %fs; \
- popl %gs; \
- popl %ds; \
- popl %es
-
-// The first path is trod when the Guest has trapped:
-// (Which trap it was has been pushed on the stack).
-// We need only switch back, and the Host will decode
-// Why we came home, and what needs to be done.
-return_to_host:
- SWITCH_TO_HOST
- iret
-
-// We are lead to the second path like so:
-// An interrupt, with some cause external
-// Has ajerked us rudely from the Guest's code
-// Again we must return home to the Host
-deliver_to_host:
- SWITCH_TO_HOST
- // But now we must go home via that place
- // Where that interrupt was supposed to go
- // Had we not been ensconced, running the Guest.
- // Here we see the trickness of run_guest_once():
- // The Host stack is formed like an interrupt
- // With EIP, CS and EFLAGS layered.
- // Interrupt handlers end with "iret"
- // And that will take us home at long long last.
-
- // But first we must find the handler to call!
- // The IDT descriptor for the Host
- // Has two bytes for size, and four for address:
- // %edx will hold it for us for now.
- movl (LGUEST_PAGES_host_idt_desc+2)(%eax), %edx
- // We now know the table address we need,
- // And saved the trap's number inside %ebx.
- // Yet the pointer to the handler is smeared
- // Across the bits of the table entry.
- // What oracle can tell us how to extract
- // From such a convoluted encoding?
- // I consulted gcc, and it gave
- // These instructions, which I gladly credit:
- leal (%edx,%ebx,8), %eax
- movzwl (%eax),%edx
- movl 4(%eax), %eax
- xorw %ax, %ax
- orl %eax, %edx
- // Now the address of the handler's in %edx
- // We call it now: its "iret" drops us home.
- jmp *%edx
-
-// Every interrupt can come to us here
-// But we must truly tell each apart.
-// They number two hundred and fifty six
-// And each must land in a different spot,
-// Push its number on stack, and join the stream.
-
-// And worse, a mere six of the traps stand apart
-// And push on their stack an addition:
-// An error number, thirty two bits long
-// So we punish the other two fifty
-// And make them push a zero so they match.
-
-// Yet two fifty six entries is long
-// And all will look most the same as the last
-// So we create a macro which can make
-// As many entries as we need to fill.
-
-// Note the change to .data then .text:
-// We plant the address of each entry
-// Into a (data) table for the Host
-// To know where each Guest interrupt should go.
-.macro IRQ_STUB N TARGET
- .data; .long 1f; .text; 1:
- // Trap eight, ten through fourteen and seventeen
- // Supply an error number. Else zero.
- .if (\N <> 8) && (\N < 10 || \N > 14) && (\N <> 17)
- pushl $0
- .endif
- pushl $\N
- jmp \TARGET
- ALIGN
-.endm
-
-// This macro creates numerous entries
-// Using GAS macros which out-power C's.
-.macro IRQ_STUBS FIRST LAST TARGET
- irq=\FIRST
- .rept \LAST-\FIRST+1
- IRQ_STUB irq \TARGET
- irq=irq+1
- .endr
-.endm
-
-// Here's the marker for our pointer table
-// Laid in the data section just before
-// Each macro places the address of code
-// Forming an array: each one points to text
-// Which handles interrupt in its turn.
-.data
-.global default_idt_entries
-default_idt_entries:
-.text
- // The first two traps go straight back to the Host
- IRQ_STUBS 0 1 return_to_host
- // We'll say nothing, yet, about NMI
- IRQ_STUB 2 handle_nmi
- // Other traps also return to the Host
- IRQ_STUBS 3 31 return_to_host
- // All interrupts go via their handlers
- IRQ_STUBS 32 127 deliver_to_host
- // 'Cept system calls coming from userspace
- // Are to go to the Guest, never the Host.
- IRQ_STUB 128 return_to_host
- IRQ_STUBS 129 255 deliver_to_host
-
-// The NMI, what a fabulous beast
-// Which swoops in and stops us no matter that
-// We're suspended between heaven and hell,
-// (Or more likely between the Host and Guest)
-// When in it comes! We are dazed and confused
-// So we do the simplest thing which one can.
-// Though we've pushed the trap number and zero
-// We discard them, return, and hope we live.
-handle_nmi:
- addl $8, %esp
- iret
-
-// We are done; all that's left is Mastery
-// And "make Mastery" is a journey long
-// Designed to make your fingers itch to code.
-
-// Here ends the text, the file and poem.
-ENTRY(end_switcher_text)
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 5ecc154f6831..9bc32578a766 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -657,7 +657,7 @@ try:
* be directed to disk.
*/
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
- struct ppa_addr ppa, int bio_iter)
+ struct ppa_addr ppa, int bio_iter, bool advanced_bio)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct pblk_rb_entry *entry;
@@ -694,7 +694,7 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
* filled with data from the cache). If part of the data resides on the
* media, we will read later on
*/
- if (unlikely(!bio->bi_iter.bi_idx))
+ if (unlikely(!advanced_bio))
bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);
data = bio_data(bio);
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 4e5c48f3de62..d682e89e6493 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -26,7 +26,7 @@
*/
static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
sector_t lba, struct ppa_addr ppa,
- int bio_iter)
+ int bio_iter, bool advanced_bio)
{
#ifdef CONFIG_NVM_DEBUG
/* Callers must ensure that the ppa points to a cache address */
@@ -34,7 +34,8 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
BUG_ON(!pblk_addr_in_cache(ppa));
#endif
- return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, bio_iter);
+ return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
+ bio_iter, advanced_bio);
}
static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -44,7 +45,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
sector_t blba = pblk_get_lba(bio);
int nr_secs = rqd->nr_ppas;
- int advanced_bio = 0;
+ bool advanced_bio = false;
int i, j = 0;
/* logic error: lba out-of-bounds. Ignore read request */
@@ -62,19 +63,26 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
retry:
if (pblk_ppa_empty(p)) {
WARN_ON(test_and_set_bit(i, read_bitmap));
- continue;
+
+ if (unlikely(!advanced_bio)) {
+ bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
+ advanced_bio = true;
+ }
+
+ goto next;
}
/* Try to read from write buffer. The address is later checked
* on the write buffer to prevent retrieving overwritten data.
*/
if (pblk_addr_in_cache(p)) {
- if (!pblk_read_from_cache(pblk, bio, lba, p, i)) {
+ if (!pblk_read_from_cache(pblk, bio, lba, p, i,
+ advanced_bio)) {
pblk_lookup_l2p_seq(pblk, &p, lba, 1);
goto retry;
}
WARN_ON(test_and_set_bit(i, read_bitmap));
- advanced_bio = 1;
+ advanced_bio = true;
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->cache_reads);
#endif
@@ -83,6 +91,7 @@ retry:
rqd->ppa_list[j++] = p;
}
+next:
if (advanced_bio)
bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
}
@@ -282,7 +291,7 @@ retry:
* write buffer to prevent retrieving overwritten data.
*/
if (pblk_addr_in_cache(ppa)) {
- if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0)) {
+ if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
goto retry;
}
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 0c5692cc2f60..67e623bd5c2d 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -670,7 +670,7 @@ unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
struct list_head *list,
unsigned int max);
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
- struct ppa_addr ppa, int bio_iter);
+ struct ppa_addr ppa, int bio_iter, bool advanced_bio);
unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index 2445274f7e4b..281f5345661e 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -52,7 +52,7 @@ static ssize_t devspec_show(struct device *dev,
struct platform_device *ofdev;
ofdev = to_platform_device(dev);
- return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
+ return sprintf(buf, "%pOF\n", ofdev->dev.of_node);
}
static DEVICE_ATTR_RO(modalias);
static DEVICE_ATTR_RO(devspec);
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index e199fd6c71ce..910b5b6f96b1 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -411,16 +411,16 @@ static int rackmeter_probe(struct macio_dev* mdev,
#if 0 /* Use that when i2s-a is finally an mdev per-se */
if (macio_resource_count(mdev) < 2 || macio_irq_count(mdev) < 2) {
printk(KERN_ERR
- "rackmeter: found match but lacks resources: %s"
+ "rackmeter: found match but lacks resources: %pOF"
" (%d resources, %d interrupts)\n",
- mdev->ofdev.node->full_name);
+ mdev->ofdev.dev.of_node);
rc = -ENXIO;
goto bail_free;
}
if (macio_request_resources(mdev, "rackmeter")) {
printk(KERN_ERR
- "rackmeter: failed to request resources: %s\n",
- mdev->ofdev.node->full_name);
+ "rackmeter: failed to request resources: %pOF\n",
+ mdev->ofdev.dev.of_node);
rc = -EBUSY;
goto bail_free;
}
@@ -431,8 +431,8 @@ static int rackmeter_probe(struct macio_dev* mdev,
of_address_to_resource(i2s, 0, &ri2s) ||
of_address_to_resource(i2s, 1, &rdma)) {
printk(KERN_ERR
- "rackmeter: found match but lacks resources: %s",
- mdev->ofdev.dev.of_node->full_name);
+ "rackmeter: found match but lacks resources: %pOF",
+ mdev->ofdev.dev.of_node);
rc = -ENXIO;
goto bail_free;
}
@@ -579,7 +579,7 @@ static int rackmeter_shutdown(struct macio_dev* mdev)
return 0;
}
-static struct of_device_id rackmeter_match[] = {
+static const struct of_device_id rackmeter_match[] = {
{ .name = "i2s" },
{ }
};
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 1ac66421877a..ea9bdc85a21d 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -589,14 +589,14 @@ static int smu_late_init(void)
if (smu->db_node) {
smu->db_irq = irq_of_parse_and_map(smu->db_node, 0);
if (!smu->db_irq)
- printk(KERN_ERR "smu: failed to map irq for node %s\n",
- smu->db_node->full_name);
+ printk(KERN_ERR "smu: failed to map irq for node %pOF\n",
+ smu->db_node);
}
if (smu->msg_node) {
smu->msg_irq = irq_of_parse_and_map(smu->msg_node, 0);
if (!smu->msg_irq)
- printk(KERN_ERR "smu: failed to map irq for node %s\n",
- smu->msg_node->full_name);
+ printk(KERN_ERR "smu: failed to map irq for node %pOF\n",
+ smu->msg_node);
}
/*
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index c60415958dfe..147da4edd021 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -297,8 +297,8 @@ static int __init via_cuda_start(void)
#else
cuda_irq = irq_of_parse_and_map(vias, 0);
if (!cuda_irq) {
- printk(KERN_ERR "via-cuda: can't map interrupts for %s\n",
- vias->full_name);
+ printk(KERN_ERR "via-cuda: can't map interrupts for %pOF\n",
+ vias);
return -ENODEV;
}
#endif
diff --git a/drivers/macintosh/windfarm_cpufreq_clamp.c b/drivers/macintosh/windfarm_cpufreq_clamp.c
index 72d1fdfe02a5..2626990331dc 100644
--- a/drivers/macintosh/windfarm_cpufreq_clamp.c
+++ b/drivers/macintosh/windfarm_cpufreq_clamp.c
@@ -63,7 +63,7 @@ static s32 clamp_max(struct wf_control *ct)
return 1;
}
-static struct wf_control_ops clamp_ops = {
+static const struct wf_control_ops clamp_ops = {
.set_value = clamp_set,
.get_value = clamp_get,
.get_min = clamp_min,
diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c
index 0226b796a21c..fab7a21e9577 100644
--- a/drivers/macintosh/windfarm_fcu_controls.c
+++ b/drivers/macintosh/windfarm_fcu_controls.c
@@ -470,8 +470,8 @@ static void wf_fcu_lookup_fans(struct wf_fcu_priv *pv)
id = ((*reg) - 0x30) / 2;
if (id > 7) {
pr_warning("wf_fcu: Can't parse "
- "fan ID in device-tree for %s\n",
- np->full_name);
+ "fan ID in device-tree for %pOF\n",
+ np);
break;
}
wf_fcu_add_fan(pv, name, type, id);
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index 590214ba736c..6cdfe714901d 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -82,7 +82,7 @@ static void wf_lm75_release(struct wf_sensor *sr)
kfree(lm);
}
-static struct wf_sensor_ops wf_lm75_ops = {
+static const struct wf_sensor_ops wf_lm75_ops = {
.get_value = wf_lm75_get,
.release = wf_lm75_release,
.owner = THIS_MODULE,
diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c
index c071aab79dd1..35aa571d498a 100644
--- a/drivers/macintosh/windfarm_lm87_sensor.c
+++ b/drivers/macintosh/windfarm_lm87_sensor.c
@@ -91,7 +91,7 @@ static void wf_lm87_release(struct wf_sensor *sr)
kfree(lm);
}
-static struct wf_sensor_ops wf_lm87_ops = {
+static const struct wf_sensor_ops wf_lm87_ops = {
.get_value = wf_lm87_get,
.release = wf_lm87_release,
.owner = THIS_MODULE,
@@ -126,8 +126,8 @@ static int wf_lm87_probe(struct i2c_client *client,
}
}
if (!name) {
- pr_warning("wf_lm87: Unsupported sensor %s\n",
- client->dev.of_node->full_name);
+ pr_warning("wf_lm87: Unsupported sensor %pOF\n",
+ client->dev.of_node);
return -ENODEV;
}
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index 87e439b10318..6ad035e13c08 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -55,7 +55,7 @@ static void wf_max6690_release(struct wf_sensor *sr)
kfree(max);
}
-static struct wf_sensor_ops wf_max6690_ops = {
+static const struct wf_sensor_ops wf_max6690_ops = {
.get_value = wf_max6690_get,
.release = wf_max6690_release,
.owner = THIS_MODULE,
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index bdfcb8a8bfbb..a0cd9c7f9835 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -338,7 +338,7 @@ static int cpu_setup_pid(int cpu)
}
/* Backside/U3 fan */
-static struct wf_pid_param backside_param = {
+static const struct wf_pid_param backside_param = {
.interval = 1,
.history_len = 2,
.gd = 0x00500000,
@@ -351,7 +351,7 @@ static struct wf_pid_param backside_param = {
};
/* DIMMs temperature (clamp the backside fan) */
-static struct wf_pid_param dimms_param = {
+static const struct wf_pid_param dimms_param = {
.interval = 1,
.history_len = 20,
.gd = 0,
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index c155a54e8638..d174c7437337 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -145,7 +145,7 @@ static s32 smu_fan_max(struct wf_control *ct)
return fct->max;
}
-static struct wf_control_ops smu_fan_ops = {
+static const struct wf_control_ops smu_fan_ops = {
.set_value = smu_fan_set,
.get_value = smu_fan_get,
.get_min = smu_fan_min,
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index ad6223e88340..da7f4fc1a51d 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -195,7 +195,7 @@ static void wf_sat_sensor_release(struct wf_sensor *sr)
kref_put(&sat->ref, wf_sat_release);
}
-static struct wf_sensor_ops wf_sat_ops = {
+static const struct wf_sensor_ops wf_sat_ops = {
.get_value = wf_sat_sensor_get,
.release = wf_sat_sensor_release,
.owner = THIS_MODULE,
@@ -248,7 +248,7 @@ static int wf_sat_probe(struct i2c_client *client,
core = loc[5] - '0';
if (chip > 1 || core > 1) {
printk(KERN_ERR "wf_sat_create: don't understand "
- "location %s for %s\n", loc, child->full_name);
+ "location %s for %pOF\n", loc, child);
continue;
}
cpu = 2 * chip + core;
diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c
index 1cc4e4953d89..172fd267dcf6 100644
--- a/drivers/macintosh/windfarm_smu_sensors.c
+++ b/drivers/macintosh/windfarm_smu_sensors.c
@@ -172,22 +172,22 @@ static int smu_slotspow_get(struct wf_sensor *sr, s32 *value)
}
-static struct wf_sensor_ops smu_cputemp_ops = {
+static const struct wf_sensor_ops smu_cputemp_ops = {
.get_value = smu_cputemp_get,
.release = smu_ads_release,
.owner = THIS_MODULE,
};
-static struct wf_sensor_ops smu_cpuamp_ops = {
+static const struct wf_sensor_ops smu_cpuamp_ops = {
.get_value = smu_cpuamp_get,
.release = smu_ads_release,
.owner = THIS_MODULE,
};
-static struct wf_sensor_ops smu_cpuvolt_ops = {
+static const struct wf_sensor_ops smu_cpuvolt_ops = {
.get_value = smu_cpuvolt_get,
.release = smu_ads_release,
.owner = THIS_MODULE,
};
-static struct wf_sensor_ops smu_slotspow_ops = {
+static const struct wf_sensor_ops smu_slotspow_ops = {
.get_value = smu_slotspow_get,
.release = smu_ads_release,
.owner = THIS_MODULE,
@@ -327,7 +327,7 @@ static int smu_cpu_power_get(struct wf_sensor *sr, s32 *value)
return 0;
}
-static struct wf_sensor_ops smu_cpu_power_ops = {
+static const struct wf_sensor_ops smu_cpu_power_ops = {
.get_value = smu_cpu_power_get,
.release = smu_cpu_power_release,
.owner = THIS_MODULE,
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index da67882caa7b..ae6146311934 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -17,12 +17,14 @@
#include <asm/barrier.h>
#include <asm/byteorder.h>
+#include <linux/atomic.h>
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/err.h>
-#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
@@ -95,7 +97,7 @@
/* Register RING_CMPL_START_ADDR fields */
#define CMPL_START_ADDR_VALUE(pa) \
- ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x03ffffff))
+ ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))
/* Register RING_CONTROL fields */
#define CONTROL_MASK_DISABLE_CONTROL 12
@@ -260,18 +262,21 @@ struct flexrm_ring {
void __iomem *regs;
bool irq_requested;
unsigned int irq;
+ cpumask_t irq_aff_hint;
unsigned int msi_timer_val;
unsigned int msi_count_threshold;
- struct ida requests_ida;
struct brcm_message *requests[RING_MAX_REQ_COUNT];
void *bd_base;
dma_addr_t bd_dma_base;
u32 bd_write_offset;
void *cmpl_base;
dma_addr_t cmpl_dma_base;
+ /* Atomic stats */
+ atomic_t msg_send_count;
+ atomic_t msg_cmpl_count;
/* Protected members */
spinlock_t lock;
- struct brcm_message *last_pending_msg;
+ DECLARE_BITMAP(requests_bmap, RING_MAX_REQ_COUNT);
u32 cmpl_read_offset;
};
@@ -282,6 +287,9 @@ struct flexrm_mbox {
struct flexrm_ring *rings;
struct dma_pool *bd_pool;
struct dma_pool *cmpl_pool;
+ struct dentry *root;
+ struct dentry *config;
+ struct dentry *stats;
struct mbox_controller controller;
};
@@ -912,6 +920,62 @@ static void *flexrm_write_descs(struct brcm_message *msg, u32 nhcnt,
/* ====== FlexRM driver helper routines ===== */
+static void flexrm_write_config_in_seqfile(struct flexrm_mbox *mbox,
+ struct seq_file *file)
+{
+ int i;
+ const char *state;
+ struct flexrm_ring *ring;
+
+ seq_printf(file, "%-5s %-9s %-18s %-10s %-18s %-10s\n",
+ "Ring#", "State", "BD_Addr", "BD_Size",
+ "Cmpl_Addr", "Cmpl_Size");
+
+ for (i = 0; i < mbox->num_rings; i++) {
+ ring = &mbox->rings[i];
+ if (readl(ring->regs + RING_CONTROL) &
+ BIT(CONTROL_ACTIVE_SHIFT))
+ state = "active";
+ else
+ state = "inactive";
+ seq_printf(file,
+ "%-5d %-9s 0x%016llx 0x%08x 0x%016llx 0x%08x\n",
+ ring->num, state,
+ (unsigned long long)ring->bd_dma_base,
+ (u32)RING_BD_SIZE,
+ (unsigned long long)ring->cmpl_dma_base,
+ (u32)RING_CMPL_SIZE);
+ }
+}
+
+static void flexrm_write_stats_in_seqfile(struct flexrm_mbox *mbox,
+ struct seq_file *file)
+{
+ int i;
+ u32 val, bd_read_offset;
+ struct flexrm_ring *ring;
+
+ seq_printf(file, "%-5s %-10s %-10s %-10s %-11s %-11s\n",
+ "Ring#", "BD_Read", "BD_Write",
+ "Cmpl_Read", "Submitted", "Completed");
+
+ for (i = 0; i < mbox->num_rings; i++) {
+ ring = &mbox->rings[i];
+ bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
+ val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
+ bd_read_offset *= RING_DESC_SIZE;
+ bd_read_offset += (u32)(BD_START_ADDR_DECODE(val) -
+ ring->bd_dma_base);
+ seq_printf(file, "%-5d 0x%08x 0x%08x 0x%08x %-11d %-11d\n",
+ ring->num,
+ (u32)bd_read_offset,
+ (u32)ring->bd_write_offset,
+ (u32)ring->cmpl_read_offset,
+ (u32)atomic_read(&ring->msg_send_count),
+ (u32)atomic_read(&ring->msg_cmpl_count));
+ }
+}
+
static int flexrm_new_request(struct flexrm_ring *ring,
struct brcm_message *batch_msg,
struct brcm_message *msg)
@@ -929,38 +993,24 @@ static int flexrm_new_request(struct flexrm_ring *ring,
msg->error = 0;
/* If no requests possible then save data pointer and goto done. */
- reqid = ida_simple_get(&ring->requests_ida, 0,
- RING_MAX_REQ_COUNT, GFP_KERNEL);
- if (reqid < 0) {
- spin_lock_irqsave(&ring->lock, flags);
- if (batch_msg)
- ring->last_pending_msg = batch_msg;
- else
- ring->last_pending_msg = msg;
- spin_unlock_irqrestore(&ring->lock, flags);
- return 0;
- }
+ spin_lock_irqsave(&ring->lock, flags);
+ reqid = bitmap_find_free_region(ring->requests_bmap,
+ RING_MAX_REQ_COUNT, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
+ if (reqid < 0)
+ return -ENOSPC;
ring->requests[reqid] = msg;
/* Do DMA mappings for the message */
ret = flexrm_dma_map(ring->mbox->dev, msg);
if (ret < 0) {
ring->requests[reqid] = NULL;
- ida_simple_remove(&ring->requests_ida, reqid);
+ spin_lock_irqsave(&ring->lock, flags);
+ bitmap_release_region(ring->requests_bmap, reqid, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
return ret;
}
- /* If last_pending_msg is already set then goto done with error */
- spin_lock_irqsave(&ring->lock, flags);
- if (ring->last_pending_msg)
- ret = -ENOSPC;
- spin_unlock_irqrestore(&ring->lock, flags);
- if (ret < 0) {
- dev_warn(ring->mbox->dev, "no space in ring %d\n", ring->num);
- exit_cleanup = true;
- goto exit;
- }
-
/* Determine current HW BD read offset */
read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
@@ -987,13 +1037,7 @@ static int flexrm_new_request(struct flexrm_ring *ring,
break;
}
if (count) {
- spin_lock_irqsave(&ring->lock, flags);
- if (batch_msg)
- ring->last_pending_msg = batch_msg;
- else
- ring->last_pending_msg = msg;
- spin_unlock_irqrestore(&ring->lock, flags);
- ret = 0;
+ ret = -ENOSPC;
exit_cleanup = true;
goto exit;
}
@@ -1012,6 +1056,9 @@ static int flexrm_new_request(struct flexrm_ring *ring,
/* Save ring BD write offset */
ring->bd_write_offset = (unsigned long)(next - ring->bd_base);
+ /* Increment number of messages sent */
+ atomic_inc_return(&ring->msg_send_count);
+
exit:
/* Update error status in message */
msg->error = ret;
@@ -1020,7 +1067,9 @@ exit:
if (exit_cleanup) {
flexrm_dma_unmap(ring->mbox->dev, msg);
ring->requests[reqid] = NULL;
- ida_simple_remove(&ring->requests_ida, reqid);
+ spin_lock_irqsave(&ring->lock, flags);
+ bitmap_release_region(ring->requests_bmap, reqid, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
}
return ret;
@@ -1037,12 +1086,6 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
spin_lock_irqsave(&ring->lock, flags);
- /* Check last_pending_msg */
- if (ring->last_pending_msg) {
- msg = ring->last_pending_msg;
- ring->last_pending_msg = NULL;
- }
-
/*
* Get current completion read and write offset
*
@@ -1058,10 +1101,6 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
spin_unlock_irqrestore(&ring->lock, flags);
- /* If last_pending_msg was set then queue it back */
- if (msg)
- mbox_send_message(chan, msg);
-
/* For each completed request notify mailbox clients */
reqid = 0;
while (cmpl_read_offset != cmpl_write_offset) {
@@ -1095,7 +1134,9 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
/* Release reqid for recycling */
ring->requests[reqid] = NULL;
- ida_simple_remove(&ring->requests_ida, reqid);
+ spin_lock_irqsave(&ring->lock, flags);
+ bitmap_release_region(ring->requests_bmap, reqid, 0);
+ spin_unlock_irqrestore(&ring->lock, flags);
/* Unmap DMA mappings */
flexrm_dma_unmap(ring->mbox->dev, msg);
@@ -1105,12 +1146,37 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
mbox_chan_received_data(chan, msg);
/* Increment number of completions processed */
+ atomic_inc_return(&ring->msg_cmpl_count);
count++;
}
return count;
}
+/* ====== FlexRM Debugfs callbacks ====== */
+
+static int flexrm_debugfs_conf_show(struct seq_file *file, void *offset)
+{
+ struct platform_device *pdev = to_platform_device(file->private);
+ struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
+
+ /* Write config in file */
+ flexrm_write_config_in_seqfile(mbox, file);
+
+ return 0;
+}
+
+static int flexrm_debugfs_stats_show(struct seq_file *file, void *offset)
+{
+ struct platform_device *pdev = to_platform_device(file->private);
+ struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
+
+ /* Write stats in file */
+ flexrm_write_stats_in_seqfile(mbox, file);
+
+ return 0;
+}
+
/* ====== FlexRM interrupt handler ===== */
static irqreturn_t flexrm_irq_event(int irq, void *dev_id)
@@ -1217,6 +1283,18 @@ static int flexrm_startup(struct mbox_chan *chan)
}
ring->irq_requested = true;
+ /* Set IRQ affinity hint */
+ ring->irq_aff_hint = CPU_MASK_NONE;
+ val = ring->mbox->num_rings;
+ val = (num_online_cpus() < val) ? val / num_online_cpus() : 1;
+ cpumask_set_cpu((ring->num / val) % num_online_cpus(),
+ &ring->irq_aff_hint);
+ ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
+ if (ret) {
+ dev_err(ring->mbox->dev, "failed to set IRQ affinity hint\n");
+ goto fail_free_irq;
+ }
+
/* Disable/inactivate ring */
writel_relaxed(0x0, ring->regs + RING_CONTROL);
@@ -1233,9 +1311,6 @@ static int flexrm_startup(struct mbox_chan *chan)
val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base);
writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR);
- /* Ensure last pending message is cleared */
- ring->last_pending_msg = NULL;
-
/* Completion read pointer will be same as HW write pointer */
ring->cmpl_read_offset =
readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
@@ -1259,8 +1334,15 @@ static int flexrm_startup(struct mbox_chan *chan)
val = BIT(CONTROL_ACTIVE_SHIFT);
writel_relaxed(val, ring->regs + RING_CONTROL);
+ /* Reset stats to zero */
+ atomic_set(&ring->msg_send_count, 0);
+ atomic_set(&ring->msg_cmpl_count, 0);
+
return 0;
+fail_free_irq:
+ free_irq(ring->irq, ring);
+ ring->irq_requested = false;
fail_free_cmpl_memory:
dma_pool_free(ring->mbox->cmpl_pool,
ring->cmpl_base, ring->cmpl_dma_base);
@@ -1302,7 +1384,6 @@ static void flexrm_shutdown(struct mbox_chan *chan)
/* Release reqid for recycling */
ring->requests[reqid] = NULL;
- ida_simple_remove(&ring->requests_ida, reqid);
/* Unmap DMA mappings */
flexrm_dma_unmap(ring->mbox->dev, msg);
@@ -1312,8 +1393,12 @@ static void flexrm_shutdown(struct mbox_chan *chan)
mbox_chan_received_data(chan, msg);
}
+ /* Clear requests bitmap */
+ bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
+
/* Release IRQ */
if (ring->irq_requested) {
+ irq_set_affinity_hint(ring->irq, NULL);
free_irq(ring->irq, ring);
ring->irq_requested = false;
}
@@ -1333,24 +1418,10 @@ static void flexrm_shutdown(struct mbox_chan *chan)
}
}
-static bool flexrm_last_tx_done(struct mbox_chan *chan)
-{
- bool ret;
- unsigned long flags;
- struct flexrm_ring *ring = chan->con_priv;
-
- spin_lock_irqsave(&ring->lock, flags);
- ret = (ring->last_pending_msg) ? false : true;
- spin_unlock_irqrestore(&ring->lock, flags);
-
- return ret;
-}
-
static const struct mbox_chan_ops flexrm_mbox_chan_ops = {
.send_data = flexrm_send_data,
.startup = flexrm_startup,
.shutdown = flexrm_shutdown,
- .last_tx_done = flexrm_last_tx_done,
.peek_data = flexrm_peek_data,
};
@@ -1468,14 +1539,15 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
ring->irq_requested = false;
ring->msi_timer_val = MSI_TIMER_VAL_MASK;
ring->msi_count_threshold = 0x1;
- ida_init(&ring->requests_ida);
memset(ring->requests, 0, sizeof(ring->requests));
ring->bd_base = NULL;
ring->bd_dma_base = 0;
ring->cmpl_base = NULL;
ring->cmpl_dma_base = 0;
+ atomic_set(&ring->msg_send_count, 0);
+ atomic_set(&ring->msg_cmpl_count, 0);
spin_lock_init(&ring->lock);
- ring->last_pending_msg = NULL;
+ bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
ring->cmpl_read_offset = 0;
}
@@ -1515,10 +1587,39 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
ring->irq = desc->irq;
}
+ /* Check availability of debugfs */
+ if (!debugfs_initialized())
+ goto skip_debugfs;
+
+ /* Create debugfs root entry */
+ mbox->root = debugfs_create_dir(dev_name(mbox->dev), NULL);
+ if (IS_ERR_OR_NULL(mbox->root)) {
+ ret = PTR_ERR_OR_ZERO(mbox->root);
+ goto fail_free_msis;
+ }
+
+ /* Create debugfs config entry */
+ mbox->config = debugfs_create_devm_seqfile(mbox->dev,
+ "config", mbox->root,
+ flexrm_debugfs_conf_show);
+ if (IS_ERR_OR_NULL(mbox->config)) {
+ ret = PTR_ERR_OR_ZERO(mbox->config);
+ goto fail_free_debugfs_root;
+ }
+
+ /* Create debugfs stats entry */
+ mbox->stats = debugfs_create_devm_seqfile(mbox->dev,
+ "stats", mbox->root,
+ flexrm_debugfs_stats_show);
+ if (IS_ERR_OR_NULL(mbox->stats)) {
+ ret = PTR_ERR_OR_ZERO(mbox->stats);
+ goto fail_free_debugfs_root;
+ }
+skip_debugfs:
+
/* Initialize mailbox controller */
mbox->controller.txdone_irq = false;
- mbox->controller.txdone_poll = true;
- mbox->controller.txpoll_period = 1;
+ mbox->controller.txdone_poll = false;
mbox->controller.ops = &flexrm_mbox_chan_ops;
mbox->controller.dev = dev;
mbox->controller.num_chans = mbox->num_rings;
@@ -1527,7 +1628,7 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
sizeof(*mbox->controller.chans), GFP_KERNEL);
if (!mbox->controller.chans) {
ret = -ENOMEM;
- goto fail_free_msis;
+ goto fail_free_debugfs_root;
}
for (index = 0; index < mbox->num_rings; index++)
mbox->controller.chans[index].con_priv = &mbox->rings[index];
@@ -1535,13 +1636,15 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
/* Register mailbox controller */
ret = mbox_controller_register(&mbox->controller);
if (ret)
- goto fail_free_msis;
+ goto fail_free_debugfs_root;
dev_info(dev, "registered flexrm mailbox with %d channels\n",
mbox->controller.num_chans);
return 0;
+fail_free_debugfs_root:
+ debugfs_remove_recursive(mbox->root);
fail_free_msis:
platform_msi_domain_free_irqs(dev);
fail_destroy_cmpl_pool:
@@ -1554,23 +1657,18 @@ fail:
static int flexrm_mbox_remove(struct platform_device *pdev)
{
- int index;
struct device *dev = &pdev->dev;
- struct flexrm_ring *ring;
struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
mbox_controller_unregister(&mbox->controller);
+ debugfs_remove_recursive(mbox->root);
+
platform_msi_domain_free_irqs(dev);
dma_pool_destroy(mbox->cmpl_pool);
dma_pool_destroy(mbox->bd_pool);
- for (index = 0; index < mbox->num_rings; index++) {
- ring = &mbox->rings[index];
- ida_destroy(&ring->requests_ida);
- }
-
return 0;
}
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index ac91fd0d62c6..9b7005e1345e 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -92,7 +92,7 @@ static struct mbox_controller pcc_mbox_ctrl = {};
*/
static struct mbox_chan *get_pcc_channel(int id)
{
- if (id < 0 || id > pcc_mbox_ctrl.num_chans)
+ if (id < 0 || id >= pcc_mbox_ctrl.num_chans)
return ERR_PTR(-ENOENT);
return &pcc_mbox_channels[id];
@@ -457,10 +457,8 @@ static int __init acpi_pcc_probe(void)
/* Search for PCCT */
status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
- if (ACPI_FAILURE(status) || !pcct_tbl) {
- pr_warn("PCCT header not found.\n");
+ if (ACPI_FAILURE(status) || !pcct_tbl)
return -ENODEV;
- }
count = acpi_table_parse_entries(ACPI_SIG_PCCT,
sizeof(struct acpi_table_pcct),
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index 921a5d2a802b..bb5c5692dedc 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -418,6 +418,22 @@ void mcb_bus_add_devices(const struct mcb_bus *bus)
EXPORT_SYMBOL_GPL(mcb_bus_add_devices);
/**
+ * mcb_get_resource() - get a resource for a mcb device
+ * @dev: the mcb device
+ * @type: the type of resource
+ */
+struct resource *mcb_get_resource(struct mcb_device *dev, unsigned int type)
+{
+ if (type == IORESOURCE_MEM)
+ return &dev->mem;
+ else if (type == IORESOURCE_IRQ)
+ return &dev->irq;
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(mcb_get_resource);
+
+/**
* mcb_request_mem() - Request memory
* @dev: The @mcb_device the memory is for
* @name: The name for the memory reference.
@@ -460,7 +476,9 @@ EXPORT_SYMBOL_GPL(mcb_release_mem);
static int __mcb_get_irq(struct mcb_device *dev)
{
- struct resource *irq = &dev->irq;
+ struct resource *irq;
+
+ irq = mcb_get_resource(dev, IORESOURCE_IRQ);
return irq->start;
}
diff --git a/drivers/mcb/mcb-lpc.c b/drivers/mcb/mcb-lpc.c
index d072c088ce73..945091a88354 100644
--- a/drivers/mcb/mcb-lpc.c
+++ b/drivers/mcb/mcb-lpc.c
@@ -114,6 +114,12 @@ static struct resource sc24_fpga_resource = {
.flags = IORESOURCE_MEM,
};
+static struct resource sc31_fpga_resource = {
+ .start = 0xf000e000,
+ .end = 0xf000e000 + CHAM_HEADER_SIZE,
+ .flags = IORESOURCE_MEM,
+};
+
static struct platform_driver mcb_lpc_driver = {
.driver = {
.name = "mcb-lpc",
@@ -132,6 +138,15 @@ static const struct dmi_system_id mcb_lpc_dmi_table[] = {
.driver_data = (void *)&sc24_fpga_resource,
.callback = mcb_lpc_create_platform_device,
},
+ {
+ .ident = "SC31",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEN"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "14SC31"),
+ },
+ .driver_data = (void *)&sc31_fpga_resource,
+ .callback = mcb_lpc_create_platform_device,
+ },
{}
};
MODULE_DEVICE_TABLE(dmi, mcb_lpc_dmi_table);
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index ee7fb6ec96bd..7369bda3442f 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -182,7 +182,7 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
int num_cells = 0;
uint32_t dtype;
int bar_count;
- int ret = 0;
+ int ret;
u32 hsize;
hsize = sizeof(struct chameleon_fpga_header);
@@ -210,8 +210,10 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
header->filename);
bar_count = chameleon_get_bar(&p, mapbase, &cb);
- if (bar_count < 0)
+ if (bar_count < 0) {
+ ret = bar_count;
goto free_header;
+ }
for_each_chameleon_cell(dtype, p) {
switch (dtype) {
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 35a5a7210e51..61076eda2e6d 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -49,7 +49,7 @@ void bch_btree_verify(struct btree *b)
v->keys.ops = b->keys.ops;
bio = bch_bbio_alloc(b->c);
- bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev;
+ bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev);
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
bio->bi_opf = REQ_OP_READ | REQ_META;
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 6a9b85095e7b..7e871bdc0097 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -34,7 +34,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
struct bbio *b = container_of(bio, struct bbio, bio);
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
- bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
+ bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
b->submit_time_us = local_clock_us();
closure_bio_submit(bio, bio->bi_private);
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 0352d05e495c..7e1d1c3ba33a 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -53,7 +53,7 @@ reread: left = ca->sb.bucket_size - offset;
bio_reset(bio);
bio->bi_iter.bi_sector = bucket + offset;
- bio->bi_bdev = ca->bdev;
+ bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio;
@@ -452,7 +452,7 @@ static void do_journal_discard(struct cache *ca)
bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
- bio->bi_bdev = ca->bdev;
+ bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio;
@@ -623,7 +623,7 @@ static void journal_write_unlocked(struct closure *cl)
bio_reset(bio);
bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
- bio->bi_bdev = ca->bdev;
+ bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 019b3df9f1c6..0e1463d0c334 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -607,7 +607,8 @@ static void request_endio(struct bio *bio)
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
- generic_end_io_acct(bio_data_dir(s->orig_bio),
+ struct request_queue *q = s->orig_bio->bi_disk->queue;
+ generic_end_io_acct(q, bio_data_dir(s->orig_bio),
&s->d->disk->part0, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio);
@@ -734,7 +735,7 @@ static void cached_dev_read_done(struct closure *cl)
if (s->iop.bio) {
bio_reset(s->iop.bio);
s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
- s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
+ bio_copy_dev(s->iop.bio, s->cache_miss);
s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
bch_bio_map(s->iop.bio, NULL);
@@ -793,7 +794,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
!(bio->bi_opf & REQ_META) &&
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
reada = min_t(sector_t, dc->readahead >> 9,
- bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
+ get_capacity(bio->bi_disk) - bio_end_sector(bio));
s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
@@ -819,7 +820,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
goto out_submit;
cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
- cache_bio->bi_bdev = miss->bi_bdev;
+ bio_copy_dev(cache_bio, miss);
cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
cache_bio->bi_end_io = request_endio;
@@ -918,7 +919,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
dc->disk.bio_split);
- flush->bi_bdev = bio->bi_bdev;
+ bio_copy_dev(flush, bio);
flush->bi_end_io = request_endio;
flush->bi_private = cl;
flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
@@ -955,13 +956,13 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
struct bio *bio)
{
struct search *s;
- struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
+ struct bcache_device *d = bio->bi_disk->private_data;
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
int rw = bio_data_dir(bio);
- generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
+ generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
- bio->bi_bdev = dc->bdev;
+ bio_set_dev(bio, dc->bdev);
bio->bi_iter.bi_sector += dc->sb.data_offset;
if (cached_dev_get(dc)) {
@@ -1071,10 +1072,10 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
{
struct search *s;
struct closure *cl;
- struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
+ struct bcache_device *d = bio->bi_disk->private_data;
int rw = bio_data_dir(bio);
- generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0);
+ generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
s = search_alloc(bio, d);
cl = &s->cl;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 8352fad765f6..974d832e54a6 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -257,7 +257,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
closure_init(cl, parent);
bio_reset(bio);
- bio->bi_bdev = dc->bdev;
+ bio_set_dev(bio, dc->bdev);
bio->bi_end_io = write_bdev_super_endio;
bio->bi_private = dc;
@@ -303,7 +303,7 @@ void bcache_write_super(struct cache_set *c)
SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
bio_reset(bio);
- bio->bi_bdev = ca->bdev;
+ bio_set_dev(bio, ca->bdev);
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
@@ -508,7 +508,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op,
closure_init_stack(cl);
bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
- bio->bi_bdev = ca->bdev;
+ bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = prio_endio;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 42c66e76f05e..c49022a8dc9d 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -181,7 +181,7 @@ static void write_dirty(struct closure *cl)
dirty_init(w);
bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
io->bio.bi_iter.bi_sector = KEY_START(&w->key);
- io->bio.bi_bdev = io->dc->bdev;
+ bio_set_dev(&io->bio, io->dc->bdev);
io->bio.bi_end_io = dirty_endio;
closure_bio_submit(&io->bio, cl);
@@ -250,8 +250,7 @@ static void read_dirty(struct cached_dev *dc)
dirty_init(w);
bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
- io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
- &w->key, 0)->bdev;
+ bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
io->bio.bi_end_io = read_dirty_endio;
if (bio_alloc_pages(&io->bio, GFP_KERNEL))
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index f4eace5ea184..d2121637b4ab 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -156,7 +156,8 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
rdev_for_each(rdev, mddev) {
if (! test_bit(In_sync, &rdev->flags)
- || test_bit(Faulty, &rdev->flags))
+ || test_bit(Faulty, &rdev->flags)
+ || test_bit(Bitmap_sync, &rdev->flags))
continue;
target = offset + index * (PAGE_SIZE/512);
@@ -624,7 +625,7 @@ re_read:
err = read_sb_page(bitmap->mddev,
offset,
sb_page,
- 0, sizeof(bitmap_super_t));
+ 0, PAGE_SIZE);
}
if (err)
return err;
@@ -2057,6 +2058,11 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
long pages;
struct bitmap_page *new_bp;
+ if (bitmap->storage.file && !init) {
+ pr_info("md: cannot resize file-based bitmap\n");
+ return -EINVAL;
+ }
+
if (chunksize == 0) {
/* If there is enough space, leave the chunk size unchanged,
* else increase by factor of two until there is enough space.
@@ -2117,7 +2123,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
if (store.sb_page && bitmap->storage.sb_page)
memcpy(page_address(store.sb_page),
page_address(bitmap->storage.sb_page),
- sizeof(bitmap_super_t));
+ PAGE_SIZE);
bitmap_file_unmap(&bitmap->storage);
bitmap->storage = store;
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index dd3646111561..c82578af56a5 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -18,21 +18,24 @@
*/
struct dm_bio_details {
- struct block_device *bi_bdev;
+ struct gendisk *bi_disk;
+ u8 bi_partno;
unsigned long bi_flags;
struct bvec_iter bi_iter;
};
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
{
- bd->bi_bdev = bio->bi_bdev;
+ bd->bi_disk = bio->bi_disk;
+ bd->bi_partno = bio->bi_partno;
bd->bi_flags = bio->bi_flags;
bd->bi_iter = bio->bi_iter;
}
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
{
- bio->bi_bdev = bd->bi_bdev;
+ bio->bi_disk = bd->bi_disk;
+ bio->bi_partno = bd->bi_partno;
bio->bi_flags = bd->bi_flags;
bio->bi_iter = bd->bi_iter;
}
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 850ff6c67994..9601225e0ae9 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -616,7 +616,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector,
bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
b->bio.bi_iter.bi_sector = sector;
- b->bio.bi_bdev = b->c->bdev;
+ bio_set_dev(&b->bio, b->c->bdev);
b->bio.bi_end_io = inline_endio;
/*
* Use of .bi_private isn't a problem here because
@@ -1258,8 +1258,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
*/
int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
{
- blk_status_t a;
- int f;
+ int a, f;
unsigned long buffers_processed = 0;
struct dm_buffer *b, *tmp;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index c5ea03fc7ee1..dcac25c2be7a 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -833,7 +833,7 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
*--------------------------------------------------------------*/
static void remap_to_origin(struct cache *cache, struct bio *bio)
{
- bio->bi_bdev = cache->origin_dev->bdev;
+ bio_set_dev(bio, cache->origin_dev->bdev);
}
static void remap_to_cache(struct cache *cache, struct bio *bio,
@@ -842,7 +842,7 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
sector_t bi_sector = bio->bi_iter.bi_sector;
sector_t block = from_cblock(cblock);
- bio->bi_bdev = cache->cache_dev->bdev;
+ bio_set_dev(bio, cache->cache_dev->bdev);
if (!block_size_is_power_of_two(cache))
bio->bi_iter.bi_sector =
(block * cache->sectors_per_block) +
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index cdf6b1e12460..54aef8ed97db 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -758,9 +758,8 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
int i, r;
/* xor whitening with sector number */
- memcpy(buf, tcw->whitening, TCW_WHITENING_SIZE);
- crypto_xor(buf, (u8 *)&sector, 8);
- crypto_xor(&buf[8], (u8 *)&sector, 8);
+ crypto_xor_cpy(buf, tcw->whitening, (u8 *)&sector, 8);
+ crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)&sector, 8);
/* calculate crc32 for every 32bit part and xor it */
desc->tfm = tcw->crc32_tfm;
@@ -805,10 +804,10 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
}
/* Calculate IV */
- memcpy(iv, tcw->iv_seed, cc->iv_size);
- crypto_xor(iv, (u8 *)&sector, 8);
+ crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)&sector, 8);
if (cc->iv_size > 8)
- crypto_xor(&iv[8], (u8 *)&sector, cc->iv_size - 8);
+ crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)&sector,
+ cc->iv_size - 8);
return r;
}
@@ -933,9 +932,6 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
bip->bip_iter.bi_size = tag_len;
bip->bip_iter.bi_sector = io->cc->start + io->sector;
- /* We own the metadata, do not let bio_free to release it */
- bip->bip_flags &= ~BIP_BLOCK_INTEGRITY;
-
ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
tag_len, offset_in_page(io->integrity_metadata));
if (unlikely(ret != tag_len))
@@ -1547,7 +1543,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
- clone->bi_bdev = cc->dev->bdev;
+ bio_set_dev(clone, cc->dev->bdev);
clone->bi_opf = io->base_bio->bi_opf;
}
@@ -2796,7 +2792,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
*/
if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
bio_op(bio) == REQ_OP_DISCARD)) {
- bio->bi_bdev = cc->dev->bdev;
+ bio_set_dev(bio, cc->dev->bdev);
if (bio_sectors(bio))
bio->bi_iter.bi_sector = cc->start +
dm_target_offset(ti, bio->bi_iter.bi_sector);
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index ae3158795d26..2209a9700acd 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -282,7 +282,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
struct delay_c *dc = ti->private;
if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
- bio->bi_bdev = dc->dev_write->bdev;
+ bio_set_dev(bio, dc->dev_write->bdev);
if (bio_sectors(bio))
bio->bi_iter.bi_sector = dc->start_write +
dm_target_offset(ti, bio->bi_iter.bi_sector);
@@ -290,7 +290,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
return delay_bio(dc, dc->write_delay, bio);
}
- bio->bi_bdev = dc->dev_read->bdev;
+ bio_set_dev(bio, dc->dev_read->bdev);
bio->bi_iter.bi_sector = dc->start_read +
dm_target_offset(ti, bio->bi_iter.bi_sector);
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index e7ba89f98d8d..ba84b8d62cd0 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1192,7 +1192,7 @@ static dm_block_t get_block(struct era *era, struct bio *bio)
static void remap_to_origin(struct era *era, struct bio *bio)
{
- bio->bi_bdev = era->origin_dev->bdev;
+ bio_set_dev(bio, era->origin_dev->bdev);
}
/*----------------------------------------------------------------
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index e2c7234931bc..7146c2d9762d 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -274,7 +274,7 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
{
struct flakey_c *fc = ti->private;
- bio->bi_bdev = fc->dev->bdev;
+ bio_set_dev(bio, fc->dev->bdev);
if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
bio->bi_iter.bi_sector =
flakey_map_sector(ti, bio->bi_iter.bi_sector);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 1b224aa9cf15..27c0f223f8ea 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -250,7 +250,8 @@ struct dm_integrity_io {
struct completion *completion;
- struct block_device *orig_bi_bdev;
+ struct gendisk *orig_bi_disk;
+ u8 orig_bi_partno;
bio_end_io_t *orig_bi_end_io;
struct bio_integrity_payload *orig_bi_integrity;
struct bvec_iter orig_bi_iter;
@@ -1164,7 +1165,8 @@ static void integrity_end_io(struct bio *bio)
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
bio->bi_iter = dio->orig_bi_iter;
- bio->bi_bdev = dio->orig_bi_bdev;
+ bio->bi_disk = dio->orig_bi_disk;
+ bio->bi_partno = dio->orig_bi_partno;
if (dio->orig_bi_integrity) {
bio->bi_integrity = dio->orig_bi_integrity;
bio->bi_opf |= REQ_INTEGRITY;
@@ -1587,16 +1589,18 @@ retry:
if (likely(ic->mode == 'J')) {
if (dio->write) {
unsigned next_entry, i, pos;
- unsigned ws, we;
+ unsigned ws, we, range_sectors;
- dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors);
+ dio->range.n_sectors = min(dio->range.n_sectors,
+ ic->free_sectors << ic->sb->log2_sectors_per_block);
if (unlikely(!dio->range.n_sectors))
goto sleep;
- ic->free_sectors -= dio->range.n_sectors;
+ range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
+ ic->free_sectors -= range_sectors;
journal_section = ic->free_section;
journal_entry = ic->free_section_entry;
- next_entry = ic->free_section_entry + dio->range.n_sectors;
+ next_entry = ic->free_section_entry + range_sectors;
ic->free_section_entry = next_entry % ic->journal_section_entries;
ic->free_section += next_entry / ic->journal_section_entries;
ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
@@ -1679,8 +1683,9 @@ sleep:
dio->orig_bi_iter = bio->bi_iter;
- dio->orig_bi_bdev = bio->bi_bdev;
- bio->bi_bdev = ic->dev->bdev;
+ dio->orig_bi_disk = bio->bi_disk;
+ dio->orig_bi_partno = bio->bi_partno;
+ bio_set_dev(bio, ic->dev->bdev);
dio->orig_bi_integrity = bio_integrity(bio);
bio->bi_integrity = NULL;
@@ -1727,6 +1732,8 @@ static void pad_uncommitted(struct dm_integrity_c *ic)
wraparound_section(ic, &ic->free_section);
ic->n_uncommitted_sections++;
}
+ WARN_ON(ic->journal_sections * ic->journal_section_entries !=
+ (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors);
}
static void integrity_commit(struct work_struct *w)
@@ -1821,6 +1828,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
{
unsigned i, j, n;
struct journal_completion comp;
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
comp.ic = ic;
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
@@ -1945,6 +1955,8 @@ skip_io:
dm_bufio_write_dirty_buffers_async(ic->bufio);
+ blk_finish_plug(&plug);
+
complete_journal_op(&comp);
wait_for_completion_io(&comp.comp);
@@ -3019,6 +3031,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Block size doesn't match the information in superblock";
goto bad;
}
+ if (!le32_to_cpu(ic->sb->journal_sections)) {
+ r = -EINVAL;
+ ti->error = "Corrupted superblock, journal_sections is 0";
+ goto bad;
+ }
/* make sure that ti->max_io_len doesn't overflow */
if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 25039607f3cb..b4357ed4d541 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -347,7 +347,7 @@ static void do_region(int op, int op_flags, unsigned region,
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
- bio->bi_bdev = where->bdev;
+ bio_set_dev(bio, where->bdev);
bio->bi_end_io = endio;
bio_set_op_attrs(bio, op, op_flags);
store_io_and_region_in_bio(bio, io, region);
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 41971a090e34..405eca206d67 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -88,7 +88,7 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
{
struct linear_c *lc = ti->private;
- bio->bi_bdev = lc->dev->bdev;
+ bio_set_dev(bio, lc->dev->bdev);
if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
bio->bi_iter.bi_sector =
linear_map_sector(ti, bio->bi_iter.bi_sector);
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index a1da0eb58a93..534a254eb977 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -198,7 +198,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
}
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = lc->logdev->bdev;
+ bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -263,7 +263,7 @@ static int log_one_block(struct log_writes_c *lc,
}
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = lc->logdev->bdev;
+ bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -285,7 +285,7 @@ static int log_one_block(struct log_writes_c *lc,
}
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = lc->logdev->bdev;
+ bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -539,7 +539,7 @@ static void normal_map_bio(struct dm_target *ti, struct bio *bio)
{
struct log_writes_c *lc = ti->private;
- bio->bi_bdev = lc->dev->bdev;
+ bio_set_dev(bio, lc->dev->bdev);
}
static int log_writes_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 0e8ab5bb3575..96aedaac2c64 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -504,7 +504,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
if (queue_dying) {
atomic_inc(&m->pg_init_in_progress);
activate_or_offline_path(pgpath);
- return DM_MAPIO_REQUEUE;
}
return DM_MAPIO_DELAY_REQUEUE;
}
@@ -566,7 +565,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
mpio->nr_bytes = nr_bytes;
bio->bi_status = 0;
- bio->bi_bdev = pgpath->path.dev->bdev;
+ bio_set_dev(bio, pgpath->path.dev->bdev);
bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
if (pgpath->pg->ps.type->start_io)
@@ -1458,7 +1457,6 @@ static int noretry_error(blk_status_t error)
case BLK_STS_TARGET:
case BLK_STS_NEXUS:
case BLK_STS_MEDIUM:
- case BLK_STS_RESOURCE:
return 1;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 2e10c2f13a34..5bfe285ea9d1 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -208,6 +208,7 @@ struct raid_dev {
#define RT_FLAG_RS_BITMAP_LOADED 2
#define RT_FLAG_UPDATE_SBS 3
#define RT_FLAG_RESHAPE_RS 4
+#define RT_FLAG_RS_SUSPENDED 5
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -564,9 +565,10 @@ static const char *raid10_md_layout_to_format(int layout)
if (__raid10_near_copies(layout) > 1)
return "near";
- WARN_ON(__raid10_far_copies(layout) < 2);
+ if (__raid10_far_copies(layout) > 1)
+ return "far";
- return "far";
+ return "unknown";
}
/* Return md raid10 algorithm for @name */
@@ -2540,11 +2542,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
if (!freshest)
return 0;
- if (validate_raid_redundancy(rs)) {
- rs->ti->error = "Insufficient redundancy to activate array";
- return -EINVAL;
- }
-
/*
* Validation of the freshest device provides the source of
* validation for the remaining devices.
@@ -2553,6 +2550,11 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
if (super_validate(rs, freshest))
return -EINVAL;
+ if (validate_raid_redundancy(rs)) {
+ rs->ti->error = "Insufficient redundancy to activate array";
+ return -EINVAL;
+ }
+
rdev_for_each(rdev, mddev)
if (!test_bit(Journal, &rdev->flags) &&
rdev != freshest &&
@@ -3168,6 +3170,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
mddev_suspend(&rs->md);
+ set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
/* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
if (rs_is_raid456(rs)) {
@@ -3625,7 +3628,7 @@ static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
- if (!rs->md.suspended)
+ if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
mddev_suspend(&rs->md);
rs->md.ro = 1;
@@ -3759,7 +3762,7 @@ static int rs_start_reshape(struct raid_set *rs)
return r;
/* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
- if (mddev->suspended)
+ if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
mddev_resume(mddev);
/*
@@ -3786,8 +3789,8 @@ static int rs_start_reshape(struct raid_set *rs)
}
/* Suspend because a resume will happen in raid_resume() */
- if (!mddev->suspended)
- mddev_suspend(mddev);
+ set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
+ mddev_suspend(mddev);
/*
* Now reshape got set up, update superblocks to
@@ -3883,13 +3886,13 @@ static void raid_resume(struct dm_target *ti)
if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- if (mddev->suspended)
+ if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
mddev_resume(mddev);
}
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 11, 1},
+ .version = {1, 12, 1},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index a4fbd911d566..c0b82136b2d1 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -145,7 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
struct dm_raid1_bio_record {
struct mirror *m;
- /* if details->bi_bdev == NULL, details were not saved */
+ /* if details->bi_disk == NULL, details were not saved */
struct dm_bio_details details;
region_t write_region;
};
@@ -464,7 +464,7 @@ static sector_t map_sector(struct mirror *m, struct bio *bio)
static void map_bio(struct mirror *m, struct bio *bio)
{
- bio->bi_bdev = m->dev->bdev;
+ bio_set_dev(bio, m->dev->bdev);
bio->bi_iter.bi_sector = map_sector(m, bio);
}
@@ -1199,7 +1199,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
struct dm_raid1_bio_record *bio_record =
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
- bio_record->details.bi_bdev = NULL;
+ bio_record->details.bi_disk = NULL;
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
@@ -1266,7 +1266,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
goto out;
if (unlikely(*error)) {
- if (!bio_record->details.bi_bdev) {
+ if (!bio_record->details.bi_disk) {
/*
* There wasn't enough memory to record necessary
* information for a retry or there was no other
@@ -1291,7 +1291,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
bd = &bio_record->details;
dm_bio_restore(bd, bio);
- bio_record->details.bi_bdev = NULL;
+ bio_record->details.bi_disk = NULL;
bio->bi_status = 0;
queue_bio(ms, bio, rw);
@@ -1301,7 +1301,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
}
out:
- bio_record->details.bi_bdev = NULL;
+ bio_record->details.bi_disk = NULL;
return DM_ENDIO_DONE;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 1ba41048b438..1113b42e1eda 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1663,7 +1663,7 @@ __find_pending_exception(struct dm_snapshot *s,
static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
struct bio *bio, chunk_t chunk)
{
- bio->bi_bdev = s->cow->bdev;
+ bio_set_dev(bio, s->cow->bdev);
bio->bi_iter.bi_sector =
chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
(chunk - e->old_chunk)) +
@@ -1681,7 +1681,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
init_tracked_chunk(bio);
if (bio->bi_opf & REQ_PREFLUSH) {
- bio->bi_bdev = s->cow->bdev;
+ bio_set_dev(bio, s->cow->bdev);
return DM_MAPIO_REMAPPED;
}
@@ -1769,7 +1769,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
goto out;
}
} else {
- bio->bi_bdev = s->origin->bdev;
+ bio_set_dev(bio, s->origin->bdev);
track_chunk(s, bio, chunk);
}
@@ -1802,9 +1802,9 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
if (bio->bi_opf & REQ_PREFLUSH) {
if (!dm_bio_get_target_bio_nr(bio))
- bio->bi_bdev = s->origin->bdev;
+ bio_set_dev(bio, s->origin->bdev);
else
- bio->bi_bdev = s->cow->bdev;
+ bio_set_dev(bio, s->cow->bdev);
return DM_MAPIO_REMAPPED;
}
@@ -1824,7 +1824,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
chunk >= s->first_merging_chunk &&
chunk < (s->first_merging_chunk +
s->num_merging_chunks)) {
- bio->bi_bdev = s->origin->bdev;
+ bio_set_dev(bio, s->origin->bdev);
bio_list_add(&s->bios_queued_during_merge, bio);
r = DM_MAPIO_SUBMITTED;
goto out_unlock;
@@ -1838,7 +1838,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
}
redirect_to_origin:
- bio->bi_bdev = s->origin->bdev;
+ bio_set_dev(bio, s->origin->bdev);
if (bio_data_dir(bio) == WRITE) {
up_write(&s->lock);
@@ -2285,7 +2285,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
struct dm_origin *o = ti->private;
unsigned available_sectors;
- bio->bi_bdev = o->dev->bdev;
+ bio_set_dev(bio, o->dev->bdev);
if (unlikely(bio->bi_opf & REQ_PREFLUSH))
return DM_MAPIO_REMAPPED;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index a0375530b07f..ab50d7c4377f 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -270,7 +270,7 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
stripe_map_range_sector(sc, bio_end_sector(bio),
target_stripe, &end);
if (begin < end) {
- bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
+ bio_set_dev(bio, sc->stripe[target_stripe].dev->bdev);
bio->bi_iter.bi_sector = begin +
sc->stripe[target_stripe].physical_start;
bio->bi_iter.bi_size = to_bytes(end - begin);
@@ -291,7 +291,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
if (bio->bi_opf & REQ_PREFLUSH) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
BUG_ON(target_bio_nr >= sc->stripes);
- bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
+ bio_set_dev(bio, sc->stripe[target_bio_nr].dev->bdev);
return DM_MAPIO_REMAPPED;
}
if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
@@ -306,7 +306,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
&stripe, &bio->bi_iter.bi_sector);
bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
- bio->bi_bdev = sc->stripe[stripe].dev->bdev;
+ bio_set_dev(bio, sc->stripe[stripe].dev->bdev);
return DM_MAPIO_REMAPPED;
}
@@ -430,9 +430,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
return DM_ENDIO_DONE;
memset(major_minor, 0, sizeof(major_minor));
- sprintf(major_minor, "%d:%d",
- MAJOR(disk_devt(bio->bi_bdev->bd_disk)),
- MINOR(disk_devt(bio->bi_bdev->bd_disk)));
+ sprintf(major_minor, "%d:%d", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)));
/*
* Test to see which stripe drive triggered the event
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 871c18fe000d..2dcea4c56f37 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -322,7 +322,7 @@ static int switch_map(struct dm_target *ti, struct bio *bio)
sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
unsigned path_nr = switch_get_path_nr(sctx, offset);
- bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev;
+ bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev);
bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
return DM_MAPIO_REMAPPED;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a39bcd9b982a..28a4071cdf85 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -20,6 +20,7 @@
#include <linux/atomic.h>
#include <linux/blk-mq.h>
#include <linux/mount.h>
+#include <linux/dax.h>
#define DM_MSG_PREFIX "table"
@@ -1630,6 +1631,37 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
return false;
}
+static int device_dax_write_cache_enabled(struct dm_target *ti,
+ struct dm_dev *dev, sector_t start,
+ sector_t len, void *data)
+{
+ struct dax_device *dax_dev = dev->dax_dev;
+
+ if (!dax_dev)
+ return false;
+
+ if (dax_write_cache_enabled(dax_dev))
+ return true;
+ return false;
+}
+
+static int dm_table_supports_dax_write_cache(struct dm_table *t)
+{
+ struct dm_target *ti;
+ unsigned i;
+
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+
+ if (ti->type->iterate_devices &&
+ ti->type->iterate_devices(ti,
+ device_dax_write_cache_enabled, NULL))
+ return true;
+ }
+
+ return false;
+}
+
static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1785,6 +1817,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
blk_queue_write_cache(q, wc, fua);
+ if (dm_table_supports_dax_write_cache(t))
+ dax_write_cache(t->md->dax_dev, true);
+
/* Ensure that all underlying devices are non-rotational. */
if (dm_table_all_devices_attribute(t, device_is_nonrot))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 9dec2f8cc739..69d88aee3055 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -679,7 +679,7 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
struct pool *pool = tc->pool;
sector_t bi_sector = bio->bi_iter.bi_sector;
- bio->bi_bdev = tc->pool_dev->bdev;
+ bio_set_dev(bio, tc->pool_dev->bdev);
if (block_size_is_power_of_two(pool))
bio->bi_iter.bi_sector =
(block << pool->sectors_per_block_shift) |
@@ -691,7 +691,7 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{
- bio->bi_bdev = tc->origin_dev->bdev;
+ bio_set_dev(bio, tc->origin_dev->bdev);
}
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
@@ -3313,7 +3313,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
* As this is a singleton target, ti->begin is always zero.
*/
spin_lock_irqsave(&pool->lock, flags);
- bio->bi_bdev = pt->data_dev->bdev;
+ bio_set_dev(bio, pt->data_dev->bdev);
r = DM_MAPIO_REMAPPED;
spin_unlock_irqrestore(&pool->lock, flags);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 504ba3fa328b..e13f90832b6b 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -308,19 +308,14 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
{
unsigned n;
- if (!fio->rs) {
- fio->rs = mempool_alloc(v->fec->rs_pool, 0);
- if (unlikely(!fio->rs)) {
- DMERR("failed to allocate RS");
- return -ENOMEM;
- }
- }
+ if (!fio->rs)
+ fio->rs = mempool_alloc(v->fec->rs_pool, GFP_NOIO);
fec_for_each_prealloc_buffer(n) {
if (fio->bufs[n])
continue;
- fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOIO);
+ fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOWAIT);
if (unlikely(!fio->bufs[n])) {
DMERR("failed to allocate FEC buffer");
return -ENOMEM;
@@ -332,22 +327,16 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
if (fio->bufs[n])
continue;
- fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOIO);
+ fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOWAIT);
/* we can manage with even one buffer if necessary */
if (unlikely(!fio->bufs[n]))
break;
}
fio->nbufs = n;
- if (!fio->output) {
+ if (!fio->output)
fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO);
- if (!fio->output) {
- DMERR("failed to allocate FEC page");
- return -ENOMEM;
- }
- }
-
return 0;
}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index b46705ebf01f..1c5b6185c79d 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -637,7 +637,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
struct dm_verity *v = ti->private;
struct dm_verity_io *io;
- bio->bi_bdev = v->data_dev->bdev;
+ bio_set_dev(bio, v->data_dev->bdev);
bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 884ff7c170a0..70485de37b66 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -409,7 +409,7 @@ static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd,
}
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio->bi_bdev = zmd->dev->bdev;
+ bio_set_dev(bio, zmd->dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
@@ -564,7 +564,7 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
set_bit(DMZ_META_WRITING, &mblk->state);
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio->bi_bdev = zmd->dev->bdev;
+ bio_set_dev(bio, zmd->dev->bdev);
bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
@@ -586,7 +586,7 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
return -ENOMEM;
bio->bi_iter.bi_sector = dmz_blk2sect(block);
- bio->bi_bdev = zmd->dev->bdev;
+ bio_set_dev(bio, zmd->dev->bdev);
bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
ret = submit_bio_wait(bio);
@@ -624,7 +624,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
return ret;
}
@@ -658,7 +658,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
/* Flush drive cache (this will also sync data) */
if (ret == 0)
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
return ret;
}
@@ -722,7 +722,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
/* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) {
- ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
goto out;
}
@@ -927,7 +927,7 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
(zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
}
- page = alloc_page(GFP_KERNEL);
+ page = alloc_page(GFP_NOIO);
if (!page)
return -ENOMEM;
@@ -1183,7 +1183,7 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
/* Get zone information from disk */
ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
- &blkz, &nr_blkz, GFP_KERNEL);
+ &blkz, &nr_blkz, GFP_NOIO);
if (ret) {
dmz_dev_err(zmd->dev, "Get zone %u report failed",
dmz_id(zmd, zone));
@@ -1257,7 +1257,7 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
ret = blkdev_reset_zones(dev->bdev,
dmz_start_sect(zmd, zone),
- dev->zone_nr_sectors, GFP_KERNEL);
+ dev->zone_nr_sectors, GFP_NOIO);
if (ret) {
dmz_dev_err(dev, "Reset zone %u failed %d",
dmz_id(zmd, zone), ret);
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index 05c0a126f5c8..44a119e12f1a 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -75,7 +75,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
nr_blocks = block - wp_block;
ret = blkdev_issue_zeroout(zrc->dev->bdev,
dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
- dmz_blk2sect(nr_blocks), GFP_NOFS, false);
+ dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
if (ret) {
dmz_dev_err(zrc->dev,
"Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 2b538fa817f4..b87c1741da4b 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -238,7 +238,7 @@ static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
/* Setup and submit the BIO */
- bio->bi_bdev = dmz->dev->bdev;
+ bio_set_dev(bio, dmz->dev->bdev);
bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
atomic_inc(&bioctx->ref);
generic_make_request(bio);
@@ -541,7 +541,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
int ret;
/* Create a new chunk work */
- cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOFS);
+ cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
if (!cw)
goto out;
@@ -586,9 +586,9 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
(unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
(unsigned int)dmz_bio_blocks(bio));
- bio->bi_bdev = dev->bdev;
+ bio_set_dev(bio, dev->bdev);
- if (!nr_sectors && (bio_op(bio) != REQ_OP_FLUSH) && (bio_op(bio) != REQ_OP_WRITE))
+ if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
return DM_MAPIO_REMAPPED;
/* The BIO should be block aligned */
@@ -603,7 +603,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
bioctx->status = BLK_STS_OK;
/* Set the BIO pending in the flush list */
- if (bio_op(bio) == REQ_OP_FLUSH || (!nr_sectors && bio_op(bio) == REQ_OP_WRITE)) {
+ if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
spin_lock(&dmz->flush_lock);
bio_list_add(&dmz->flush_list, bio);
spin_unlock(&dmz->flush_lock);
@@ -785,7 +785,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Chunk BIO work */
mutex_init(&dmz->chunk_lock);
- INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOFS);
+ INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
0, dev->name);
if (!dmz->chunk_wq) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 2edbcc2d7d3f..04ae795e8a5f 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -27,16 +27,6 @@
#define DM_MSG_PREFIX "core"
-#ifdef CONFIG_PRINTK
-/*
- * ratelimit state to be used in DMXXX_LIMIT().
- */
-DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
- DEFAULT_RATELIMIT_INTERVAL,
- DEFAULT_RATELIMIT_BURST);
-EXPORT_SYMBOL(dm_ratelimit_state);
-#endif
-
/*
* Cookies are numeric values sent with CHANGE and REMOVE
* uevents while resuming, removing or renaming the device.
@@ -520,7 +510,7 @@ static void start_io_acct(struct dm_io *io)
io->start_time = jiffies;
cpu = part_stat_lock();
- part_round_stats(cpu, &dm_disk(md)->part0);
+ part_round_stats(md->queue, cpu, &dm_disk(md)->part0);
part_stat_unlock();
atomic_set(&dm_disk(md)->part0.in_flight[rw],
atomic_inc_return(&md->pending[rw]));
@@ -539,7 +529,7 @@ static void end_io_acct(struct dm_io *io)
int pending;
int rw = bio_data_dir(bio);
- generic_end_io_acct(rw, &dm_disk(md)->part0, io->start_time);
+ generic_end_io_acct(md->queue, rw, &dm_disk(md)->part0, io->start_time);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
@@ -851,10 +841,10 @@ static void clone_endio(struct bio *bio)
if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)
+ !bio->bi_disk->queue->limits.max_write_same_sectors)
disable_write_same(md);
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
- !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
+ !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
disable_write_zeroes(md);
}
@@ -1215,8 +1205,8 @@ static void __map_bio(struct dm_target_io *tio)
break;
case DM_MAPIO_REMAPPED:
/* the bio has been remapped so dispatch it */
- trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
- tio->io->bio->bi_bdev->bd_dev, sector);
+ trace_block_bio_remap(clone->bi_disk->queue, clone,
+ bio_dev(tio->io->bio), sector);
generic_make_request(clone);
break;
case DM_MAPIO_KILL:
@@ -1523,7 +1513,7 @@ static void __split_and_process_bio(struct mapped_device *md,
}
/* drop the extra reference count */
- dec_pending(ci.io, error);
+ dec_pending(ci.io, errno_to_blk_status(error));
}
/*-----------------------------------------------------------------
* CRUD END
@@ -1542,7 +1532,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
map = dm_get_live_table(md, &srcu_idx);
- generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
+ generic_start_io_acct(q, rw, bio_sectors(bio), &dm_disk(md)->part0);
/* if we're suspended, we have to queue this io for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
@@ -1796,7 +1786,7 @@ static struct mapped_device *alloc_dev(int minor)
goto bad;
bio_init(&md->flush_bio, NULL, 0);
- md->flush_bio.bi_bdev = md->bdev;
+ bio_set_dev(&md->flush_bio, md->bdev);
md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
dm_stats_init(&md->stats);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 06a64d5d8c6c..38264b38420f 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -216,12 +216,12 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
if (failit) {
struct bio *b = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
- b->bi_bdev = conf->rdev->bdev;
+ bio_set_dev(b, conf->rdev->bdev);
b->bi_private = bio;
b->bi_end_io = faulty_fail;
bio = b;
} else
- bio->bi_bdev = conf->rdev->bdev;
+ bio_set_dev(bio, conf->rdev->bdev);
generic_make_request(bio);
return true;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 5f1eb9189542..c464fb48039a 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -275,17 +275,17 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
bio = split;
}
- bio->bi_bdev = tmp_dev->rdev->bdev;
+ bio_set_dev(bio, tmp_dev->rdev->bdev);
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
start_sector + data_offset;
if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
+ !blk_queue_discard(bio->bi_disk->queue))) {
/* Just ignore it */
bio_endio(bio);
} else {
if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+ trace_block_bio_remap(bio->bi_disk->queue,
bio, disk_devt(mddev->gendisk),
bio_sector);
mddev_check_writesame(mddev, bio);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8cdca0296749..08fcaebc61bd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -422,7 +422,7 @@ static void submit_flushes(struct work_struct *ws)
bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
- bi->bi_bdev = rdev->bdev;
+ bio_set_dev(bi, rdev->bdev);
bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
atomic_inc(&mddev->flush_pending);
submit_bio(bi);
@@ -772,7 +772,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
atomic_inc(&rdev->nr_pending);
- bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
+ bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
@@ -803,8 +803,10 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct bio *bio = md_bio_alloc_sync(rdev->mddev);
int ret;
- bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
- rdev->meta_bdev : rdev->bdev;
+ if (metadata_op && rdev->meta_bdev)
+ bio_set_dev(bio, rdev->meta_bdev);
+ else
+ bio_set_dev(bio, rdev->bdev);
bio_set_op_attrs(bio, op, op_flags);
if (metadata_op)
bio->bi_iter.bi_sector = sector + rdev->sb_start;
@@ -1536,7 +1538,8 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
} else if (sb->bblog_offset != 0)
rdev->badblocks.shift = 0;
- if (le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) {
+ if ((le32_to_cpu(sb->feature_map) &
+ (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
rdev->ppl.size = le16_to_cpu(sb->ppl.size);
rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
@@ -1655,10 +1658,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
set_bit(MD_HAS_JOURNAL, &mddev->flags);
- if (le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) {
+ if (le32_to_cpu(sb->feature_map) &
+ (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
if (le32_to_cpu(sb->feature_map) &
(MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
return -EINVAL;
+ if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
+ (le32_to_cpu(sb->feature_map) &
+ MD_FEATURE_MULTIPLE_PPLS))
+ return -EINVAL;
set_bit(MD_HAS_PPL, &mddev->flags);
}
} else if (mddev->pers == NULL) {
@@ -1875,7 +1883,11 @@ retry:
sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
if (test_bit(MD_HAS_PPL, &mddev->flags)) {
- sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
+ if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
+ sb->feature_map |=
+ cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
+ else
+ sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
sb->ppl.size = cpu_to_le16(rdev->ppl.size);
}
@@ -2287,7 +2299,7 @@ static void export_array(struct mddev *mddev)
static bool set_in_sync(struct mddev *mddev)
{
- WARN_ON_ONCE(!spin_is_locked(&mddev->lock));
+ WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock));
if (!mddev->in_sync) {
mddev->sync_checkers++;
spin_unlock(&mddev->lock);
@@ -4283,6 +4295,8 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
if (err)
export_rdev(rdev);
mddev_unlock(mddev);
+ if (!err)
+ md_new_event(mddev);
return err ? err : len;
}
@@ -7836,7 +7850,7 @@ static const struct file_operations md_seq_fops = {
.open = md_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release_private,
+ .release = seq_release,
.poll = mdstat_poll,
};
@@ -7996,7 +8010,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
if (mddev->safemode == 1)
mddev->safemode = 0;
/* sync_checkers is always 0 when writes_pending is in per-cpu mode */
- if (mddev->in_sync || !mddev->sync_checkers) {
+ if (mddev->in_sync || mddev->sync_checkers) {
spin_lock(&mddev->lock);
if (mddev->in_sync) {
mddev->in_sync = 0;
@@ -8656,6 +8670,9 @@ void md_check_recovery(struct mddev *mddev)
if (mddev_trylock(mddev)) {
int spares = 0;
+ if (!mddev->external && mddev->safemode == 1)
+ mddev->safemode = 0;
+
if (mddev->ro) {
struct md_rdev *rdev;
if (!mddev->external && mddev->in_sync)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 991f0fe2dcc6..561d22b9a9a8 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -134,7 +134,9 @@ enum flag_bits {
Faulty, /* device is known to have a fault */
In_sync, /* device is in_sync with rest of array */
Bitmap_sync, /* ..actually, not quite In_sync. Need a
- * bitmap-based recovery to get fully in sync
+ * bitmap-based recovery to get fully in sync.
+ * The bit is only meaningful before device
+ * has been passed to pers->hot_add_disk.
*/
WriteMostly, /* Avoid reading if at all possible */
AutoDetected, /* added by auto-detect */
@@ -234,6 +236,7 @@ enum mddev_flags {
* never cause the array to become failed.
*/
MD_HAS_PPL, /* The raid array has PPL feature set */
+ MD_HAS_MULTIPLE_PPLS, /* The raid array has multiple PPLs feature set */
};
enum mddev_sb_flags {
@@ -507,6 +510,11 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect
atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
}
+static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
+{
+ atomic_add(nr_sectors, &bio->bi_disk->sync_io);
+}
+
struct md_personality
{
char *name;
@@ -719,68 +727,14 @@ static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
{
if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)
+ !bio->bi_disk->queue->limits.max_write_same_sectors)
mddev->queue->limits.max_write_same_sectors = 0;
}
static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
{
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
- !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
+ !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
mddev->queue->limits.max_write_zeroes_sectors = 0;
}
-
-/* Maximum size of each resync request */
-#define RESYNC_BLOCK_SIZE (64*1024)
-#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
-
-/* for managing resync I/O pages */
-struct resync_pages {
- unsigned idx; /* for get/put page from the pool */
- void *raid_bio;
- struct page *pages[RESYNC_PAGES];
-};
-
-static inline int resync_alloc_pages(struct resync_pages *rp,
- gfp_t gfp_flags)
-{
- int i;
-
- for (i = 0; i < RESYNC_PAGES; i++) {
- rp->pages[i] = alloc_page(gfp_flags);
- if (!rp->pages[i])
- goto out_free;
- }
-
- return 0;
-
-out_free:
- while (--i >= 0)
- put_page(rp->pages[i]);
- return -ENOMEM;
-}
-
-static inline void resync_free_pages(struct resync_pages *rp)
-{
- int i;
-
- for (i = 0; i < RESYNC_PAGES; i++)
- put_page(rp->pages[i]);
-}
-
-static inline void resync_get_all_pages(struct resync_pages *rp)
-{
- int i;
-
- for (i = 0; i < RESYNC_PAGES; i++)
- get_page(rp->pages[i]);
-}
-
-static inline struct page *resync_fetch_page(struct resync_pages *rp,
- unsigned idx)
-{
- if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
- return NULL;
- return rp->pages[idx];
-}
#endif /* _MD_MD_H */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 23a162ba6c56..b68e0666b9b0 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -134,7 +134,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
__bio_clone_fast(&mp_bh->bio, bio);
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
- mp_bh->bio.bi_bdev = multipath->rdev->bdev;
+ bio_set_dev(&mp_bh->bio, multipath->rdev->bdev);
mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
@@ -345,17 +345,17 @@ static void multipathd(struct md_thread *thread)
if ((mp_bh->path = multipath_map (conf))<0) {
pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
- bdevname(bio->bi_bdev,b),
+ bio_devname(bio, b),
(unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, BLK_STS_IOERR);
} else {
pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
- bdevname(bio->bi_bdev,b),
+ bio_devname(bio, b),
(unsigned long long)bio->bi_iter.bi_sector);
*bio = *(mp_bh->master_bio);
bio->bi_iter.bi_sector +=
conf->multipaths[mp_bh->path].rdev->data_offset;
- bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
+ bio_set_dev(bio, conf->multipaths[mp_bh->path].rdev->bdev);
bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 94d9ae9b0fd0..5a00fc118470 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -30,7 +30,8 @@
((1L << MD_HAS_JOURNAL) | \
(1L << MD_JOURNAL_CLEAN) | \
(1L << MD_FAILFAST_SUPPORTED) |\
- (1L << MD_HAS_PPL))
+ (1L << MD_HAS_PPL) | \
+ (1L << MD_HAS_MULTIPLE_PPLS))
static int raid0_congested(struct mddev *mddev, int bits)
{
@@ -539,6 +540,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
!discard_bio)
continue;
bio_chain(discard_bio, bio);
+ bio_clone_blkcg_association(discard_bio, bio);
if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(rdev->bdev),
discard_bio, disk_devt(mddev->gendisk),
@@ -588,14 +590,13 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
zone = find_zone(mddev->private, &sector);
tmp_dev = map_sector(mddev, zone, sector, &sector);
- bio->bi_bdev = tmp_dev->bdev;
+ bio_set_dev(bio, tmp_dev->bdev);
bio->bi_iter.bi_sector = sector + zone->dev_start +
tmp_dev->data_offset;
if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
- bio, disk_devt(mddev->gendisk),
- bio_sector);
+ trace_block_bio_remap(bio->bi_disk->queue, bio,
+ disk_devt(mddev->gendisk), bio_sector);
mddev_check_writesame(mddev, bio);
mddev_check_write_zeroes(mddev, bio);
generic_make_request(bio);
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
new file mode 100644
index 000000000000..9f2670b45f31
--- /dev/null
+++ b/drivers/md/raid1-10.c
@@ -0,0 +1,81 @@
+/* Maximum size of each resync request */
+#define RESYNC_BLOCK_SIZE (64*1024)
+#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
+
+/* for managing resync I/O pages */
+struct resync_pages {
+ void *raid_bio;
+ struct page *pages[RESYNC_PAGES];
+};
+
+static inline int resync_alloc_pages(struct resync_pages *rp,
+ gfp_t gfp_flags)
+{
+ int i;
+
+ for (i = 0; i < RESYNC_PAGES; i++) {
+ rp->pages[i] = alloc_page(gfp_flags);
+ if (!rp->pages[i])
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ while (--i >= 0)
+ put_page(rp->pages[i]);
+ return -ENOMEM;
+}
+
+static inline void resync_free_pages(struct resync_pages *rp)
+{
+ int i;
+
+ for (i = 0; i < RESYNC_PAGES; i++)
+ put_page(rp->pages[i]);
+}
+
+static inline void resync_get_all_pages(struct resync_pages *rp)
+{
+ int i;
+
+ for (i = 0; i < RESYNC_PAGES; i++)
+ get_page(rp->pages[i]);
+}
+
+static inline struct page *resync_fetch_page(struct resync_pages *rp,
+ unsigned idx)
+{
+ if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
+ return NULL;
+ return rp->pages[idx];
+}
+
+/*
+ * 'strct resync_pages' stores actual pages used for doing the resync
+ * IO, and it is per-bio, so make .bi_private points to it.
+ */
+static inline struct resync_pages *get_resync_pages(struct bio *bio)
+{
+ return bio->bi_private;
+}
+
+/* generally called after bio_reset() for reseting bvec */
+static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
+ int size)
+{
+ int idx = 0;
+
+ /* initialize bvec table again */
+ do {
+ struct page *page = resync_fetch_page(rp, idx);
+ int len = min_t(int, size, PAGE_SIZE);
+
+ /*
+ * won't fail because the vec table is big
+ * enough to hold all these pages
+ */
+ bio_add_page(bio, page, len, 0);
+ size -= len;
+ } while (idx++ < RESYNC_PAGES && size > 0);
+}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 3febfc8391fb..f3f3e40dc9d8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -48,7 +48,8 @@
#define UNSUPPORTED_MDDEV_FLAGS \
((1L << MD_HAS_JOURNAL) | \
(1L << MD_JOURNAL_CLEAN) | \
- (1L << MD_HAS_PPL))
+ (1L << MD_HAS_PPL) | \
+ (1L << MD_HAS_MULTIPLE_PPLS))
/*
* Number of guaranteed r1bios in case of extreme VM load:
@@ -81,14 +82,7 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
#define raid1_log(md, fmt, args...) \
do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
-/*
- * 'strct resync_pages' stores actual pages used for doing the resync
- * IO, and it is per-bio, so make .bi_private points to it.
- */
-static inline struct resync_pages *get_resync_pages(struct bio *bio)
-{
- return bio->bi_private;
-}
+#include "raid1-10.c"
/*
* for resync bio, r1bio pointer can be retrieved from the per-bio
@@ -170,7 +164,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
resync_get_all_pages(rp);
}
- rp->idx = 0;
rp->raid_bio = r1_bio;
bio->bi_private = rp;
}
@@ -492,10 +485,6 @@ static void raid1_end_write_request(struct bio *bio)
}
if (behind) {
- /* we release behind master bio when all write are done */
- if (r1_bio->behind_master_bio == bio)
- to_put = NULL;
-
if (test_bit(WriteMostly, &rdev->flags))
atomic_dec(&r1_bio->behind_remaining);
@@ -798,14 +787,13 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_bdev;
+ struct md_rdev *rdev = (void *)bio->bi_disk;
bio->bi_next = NULL;
- bio->bi_bdev = rdev->bdev;
+ bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ !blk_queue_discard(bio->bi_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1088,7 +1076,7 @@ static void unfreeze_array(struct r1conf *conf)
wake_up(&conf->wait_barrier);
}
-static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
+static void alloc_behind_master_bio(struct r1bio *r1_bio,
struct bio *bio)
{
int size = bio->bi_iter.bi_size;
@@ -1098,11 +1086,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
if (!behind_bio)
- goto fail;
+ return;
/* discard op, we don't support writezero/writesame yet */
- if (!bio_has_data(bio))
+ if (!bio_has_data(bio)) {
+ behind_bio->bi_iter.bi_size = size;
goto skip_copy;
+ }
while (i < vcnt && size) {
struct page *page;
@@ -1123,14 +1113,13 @@ skip_copy:
r1_bio->behind_master_bio = behind_bio;;
set_bit(R1BIO_BehindIO, &r1_bio->state);
- return behind_bio;
+ return;
free_pages:
pr_debug("%dB behind alloc failed, doing sync I/O\n",
bio->bi_iter.bi_size);
bio_free_pages(behind_bio);
-fail:
- return behind_bio;
+ bio_put(behind_bio);
}
struct raid1_plug_cb {
@@ -1285,7 +1274,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_iter.bi_sector = r1_bio->sector +
mirror->rdev->data_offset;
- read_bio->bi_bdev = mirror->rdev->bdev;
+ bio_set_dev(read_bio, mirror->rdev->bdev);
read_bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &mirror->rdev->flags) &&
@@ -1294,9 +1283,8 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_private = r1_bio;
if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
- read_bio, disk_devt(mddev->gendisk),
- r1_bio->sector);
+ trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
+ disk_devt(mddev->gendisk), r1_bio->sector);
generic_make_request(read_bio);
}
@@ -1483,7 +1471,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
(atomic_read(&bitmap->behind_writes)
< mddev->bitmap_info.max_write_behind) &&
!waitqueue_active(&bitmap->behind_wait)) {
- mbio = alloc_behind_master_bio(r1_bio, bio);
+ alloc_behind_master_bio(r1_bio, bio);
}
bitmap_startwrite(bitmap, r1_bio->sector,
@@ -1493,14 +1481,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
first_clone = 0;
}
- if (!mbio) {
- if (r1_bio->behind_master_bio)
- mbio = bio_clone_fast(r1_bio->behind_master_bio,
- GFP_NOIO,
- mddev->bio_set);
- else
- mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
- }
+ if (r1_bio->behind_master_bio)
+ mbio = bio_clone_fast(r1_bio->behind_master_bio,
+ GFP_NOIO, mddev->bio_set);
+ else
+ mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
if (r1_bio->behind_master_bio) {
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
@@ -1511,7 +1496,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
mbio->bi_iter.bi_sector = (r1_bio->sector +
conf->mirrors[i].rdev->data_offset);
- mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
+ bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
mbio->bi_end_io = raid1_end_write_request;
mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
@@ -1523,11 +1508,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
atomic_inc(&r1_bio->remaining);
if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ trace_block_bio_remap(mbio->bi_disk->queue,
mbio, disk_devt(mddev->gendisk),
r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
- mbio->bi_bdev = (void*)conf->mirrors[i].rdev;
+ mbio->bi_disk = (void *)conf->mirrors[i].rdev;
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb)
@@ -2005,8 +1990,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
* Don't fail devices as that won't really help.
*/
pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
- mdname(mddev),
- bdevname(bio->bi_bdev, b),
+ mdname(mddev), bio_devname(bio, b),
(unsigned long long)r1_bio->sector);
for (d = 0; d < conf->raid_disks * 2; d++) {
rdev = conf->mirrors[d].rdev;
@@ -2086,10 +2070,7 @@ static void process_checks(struct r1bio *r1_bio)
/* Fix variable parts of all bios */
vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
for (i = 0; i < conf->raid_disks * 2; i++) {
- int j;
- int size;
blk_status_t status;
- struct bio_vec *bi;
struct bio *b = r1_bio->bios[i];
struct resync_pages *rp = get_resync_pages(b);
if (b->bi_end_io != end_sync_read)
@@ -2098,24 +2079,15 @@ static void process_checks(struct r1bio *r1_bio)
status = b->bi_status;
bio_reset(b);
b->bi_status = status;
- b->bi_vcnt = vcnt;
- b->bi_iter.bi_size = r1_bio->sectors << 9;
b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset;
- b->bi_bdev = conf->mirrors[i].rdev->bdev;
+ bio_set_dev(b, conf->mirrors[i].rdev->bdev);
b->bi_end_io = end_sync_read;
rp->raid_bio = r1_bio;
b->bi_private = rp;
- size = b->bi_iter.bi_size;
- bio_for_each_segment_all(bi, b, j) {
- bi->bv_offset = 0;
- if (size > PAGE_SIZE)
- bi->bv_len = PAGE_SIZE;
- else
- bi->bv_len = size;
- size -= PAGE_SIZE;
- }
+ /* initialize bvec table again */
+ md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
}
for (primary = 0; primary < conf->raid_disks * 2; primary++)
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
@@ -2366,8 +2338,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
wbio = bio_clone_fast(r1_bio->behind_master_bio,
GFP_NOIO,
mddev->bio_set);
- /* We really need a _all clone */
- wbio->bi_iter = (struct bvec_iter){ 0 };
} else {
wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
mddev->bio_set);
@@ -2379,7 +2349,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
bio_trim(wbio, sector - r1_bio->sector, sectors);
wbio->bi_iter.bi_sector += rdev->data_offset;
- wbio->bi_bdev = rdev->bdev;
+ bio_set_dev(wbio, rdev->bdev);
if (submit_bio_wait(wbio) < 0)
/* failure! */
@@ -2469,7 +2439,6 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
struct mddev *mddev = conf->mddev;
struct bio *bio;
struct md_rdev *rdev;
- dev_t bio_dev;
sector_t bio_sector;
clear_bit(R1BIO_ReadError, &r1_bio->state);
@@ -2483,7 +2452,6 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
*/
bio = r1_bio->bios[r1_bio->read_disk];
- bio_dev = bio->bi_bdev->bd_dev;
bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
bio_put(bio);
r1_bio->bios[r1_bio->read_disk] = NULL;
@@ -2593,6 +2561,23 @@ static int init_resync(struct r1conf *conf)
return 0;
}
+static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
+{
+ struct r1bio *r1bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
+ struct resync_pages *rps;
+ struct bio *bio;
+ int i;
+
+ for (i = conf->poolinfo->raid_disks; i--; ) {
+ bio = r1bio->bios[i];
+ rps = bio->bi_private;
+ bio_reset(bio);
+ bio->bi_private = rps;
+ }
+ r1bio->master_bio = NULL;
+ return r1bio;
+}
+
/*
* perform a "sync" on one "block"
*
@@ -2619,6 +2604,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
int good_sectors = RESYNC_SECTORS;
int min_bad = 0; /* number of sectors that are bad in all devices */
int idx = sector_to_idx(sector_nr);
+ int page_idx = 0;
if (!conf->r1buf_pool)
if (init_resync(conf))
@@ -2677,7 +2663,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bitmap_cond_end_sync(mddev->bitmap, sector_nr,
mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
- r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
+ r1_bio = raid1_alloc_init_r1buf(conf);
raise_barrier(conf, sector_nr);
@@ -2755,7 +2741,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (bio->bi_end_io) {
atomic_inc(&rdev->nr_pending);
bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
+ bio_set_dev(bio, rdev->bdev);
if (test_bit(FailFast, &rdev->flags))
bio->bi_opf |= MD_FAILFAST;
}
@@ -2846,7 +2832,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r1_bio->bios[i];
rp = get_resync_pages(bio);
if (bio->bi_end_io) {
- page = resync_fetch_page(rp, rp->idx++);
+ page = resync_fetch_page(rp, page_idx);
/*
* won't fail because the vec table is big
@@ -2858,7 +2844,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
nr_sectors += len>>9;
sector_nr += len>>9;
sync_blocks -= (len>>9);
- } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES);
+ } while (++page_idx < RESYNC_PAGES);
r1_bio->sectors = nr_sectors;
@@ -2881,7 +2867,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) {
read_targets--;
- md_sync_acct(bio->bi_bdev, nr_sectors);
+ md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio);
@@ -2890,7 +2876,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
} else {
atomic_set(&r1_bio->remaining, 1);
bio = r1_bio->bios[r1_bio->read_disk];
- md_sync_acct(bio->bi_bdev, nr_sectors);
+ md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5026e7ad51d3..374df5796649 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -110,14 +110,7 @@ static void end_reshape(struct r10conf *conf);
#define raid10_log(md, fmt, args...) \
do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
-/*
- * 'strct resync_pages' stores actual pages used for doing the resync
- * IO, and it is per-bio, so make .bi_private points to it.
- */
-static inline struct resync_pages *get_resync_pages(struct bio *bio)
-{
- return bio->bi_private;
-}
+#include "raid1-10.c"
/*
* for resync bio, r10bio pointer can be retrieved from the per-bio
@@ -221,7 +214,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
resync_get_all_pages(rp);
}
- rp->idx = 0;
rp->raid_bio = r10_bio;
bio->bi_private = rp;
if (rbio) {
@@ -909,14 +901,13 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_bdev;
+ struct md_rdev *rdev = (void*)bio->bi_disk;
bio->bi_next = NULL;
- bio->bi_bdev = rdev->bdev;
+ bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ !blk_queue_discard(bio->bi_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1094,14 +1085,13 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next;
- struct md_rdev *rdev = (void*)bio->bi_bdev;
+ struct md_rdev *rdev = (void*)bio->bi_disk;
bio->bi_next = NULL;
- bio->bi_bdev = rdev->bdev;
+ bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
- !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+ !blk_queue_discard(bio->bi_disk->queue)))
/* Just ignore it */
bio_endio(bio);
else
@@ -1210,7 +1200,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
choose_data_offset(r10_bio, rdev);
- read_bio->bi_bdev = rdev->bdev;
+ bio_set_dev(read_bio, rdev->bdev);
read_bio->bi_end_io = raid10_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &rdev->flags) &&
@@ -1219,7 +1209,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_private = r10_bio;
if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ trace_block_bio_remap(read_bio->bi_disk->queue,
read_bio, disk_devt(mddev->gendisk),
r10_bio->sector);
generic_make_request(read_bio);
@@ -1259,7 +1249,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
choose_data_offset(r10_bio, rdev));
- mbio->bi_bdev = rdev->bdev;
+ bio_set_dev(mbio, rdev->bdev);
mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua);
if (!replacement && test_bit(FailFast,
@@ -1269,11 +1259,11 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
mbio->bi_private = r10_bio;
if (conf->mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+ trace_block_bio_remap(mbio->bi_disk->queue,
mbio, disk_devt(conf->mddev->gendisk),
r10_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
- mbio->bi_bdev = (void *)rdev;
+ mbio->bi_disk = (void *)rdev;
atomic_inc(&r10_bio->remaining);
@@ -2087,8 +2077,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
rp = get_resync_pages(tbio);
bio_reset(tbio);
- tbio->bi_vcnt = vcnt;
- tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
+ md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
+
rp->raid_bio = r10_bio;
tbio->bi_private = rp;
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
@@ -2104,7 +2094,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
- tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
+ bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
generic_make_request(tbio);
}
@@ -2562,7 +2552,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
wbio->bi_iter.bi_sector = wsector +
choose_data_offset(r10_bio, rdev);
- wbio->bi_bdev = rdev->bdev;
+ bio_set_dev(wbio, rdev->bdev);
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
if (submit_bio_wait(wbio) < 0)
@@ -2585,7 +2575,6 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
struct bio *bio;
struct r10conf *conf = mddev->private;
struct md_rdev *rdev = r10_bio->devs[slot].rdev;
- dev_t bio_dev;
sector_t bio_last_sector;
/* we got a read error. Maybe the drive is bad. Maybe just
@@ -2597,7 +2586,6 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
* frozen.
*/
bio = r10_bio->devs[slot].bio;
- bio_dev = bio->bi_bdev->bd_dev;
bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors;
bio_put(bio);
r10_bio->devs[slot].bio = NULL;
@@ -2808,6 +2796,35 @@ static int init_resync(struct r10conf *conf)
return 0;
}
+static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
+{
+ struct r10bio *r10bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+ struct rsync_pages *rp;
+ struct bio *bio;
+ int nalloc;
+ int i;
+
+ if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
+ test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
+ nalloc = conf->copies; /* resync */
+ else
+ nalloc = 2; /* recovery */
+
+ for (i = 0; i < nalloc; i++) {
+ bio = r10bio->devs[i].bio;
+ rp = bio->bi_private;
+ bio_reset(bio);
+ bio->bi_private = rp;
+ bio = r10bio->devs[i].repl_bio;
+ if (bio) {
+ rp = bio->bi_private;
+ bio_reset(bio);
+ bio->bi_private = rp;
+ }
+ }
+ return r10bio;
+}
+
/*
* perform a "sync" on one "block"
*
@@ -2853,6 +2870,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sectors_skipped = 0;
int chunks_skipped = 0;
sector_t chunk_mask = conf->geo.chunk_mask;
+ int page_idx = 0;
if (!conf->r10buf_pool)
if (init_resync(conf))
@@ -2959,7 +2977,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
- * have bi_end_io, bi_sector, bi_bdev set,
+ * have bi_end_io, bi_sector, bi_disk set,
* and bi_private set to the r10bio.
* For recovery, we may actually create several r10bios
* with 2 bios in each, that correspond to the bios in the main one.
@@ -3036,7 +3054,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
atomic_inc(&mreplace->nr_pending);
rcu_read_unlock();
- r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+ r10_bio = raid10_alloc_init_r10buf(conf);
r10_bio->state = 0;
raise_barrier(conf, rb2 != NULL);
atomic_set(&r10_bio->remaining, 0);
@@ -3104,7 +3122,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
from_addr = r10_bio->devs[j].addr;
bio->bi_iter.bi_sector = from_addr +
rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
+ bio_set_dev(bio, rdev->bdev);
atomic_inc(&rdev->nr_pending);
/* and we write to 'i' (if not in_sync) */
@@ -3126,7 +3144,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_iter.bi_sector = to_addr
+ mrdev->data_offset;
- bio->bi_bdev = mrdev->bdev;
+ bio_set_dev(bio, mrdev->bdev);
atomic_inc(&r10_bio->remaining);
} else
r10_bio->devs[1].bio->bi_end_io = NULL;
@@ -3152,7 +3170,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_iter.bi_sector = to_addr +
mreplace->data_offset;
- bio->bi_bdev = mreplace->bdev;
+ bio_set_dev(bio, mreplace->bdev);
atomic_inc(&r10_bio->remaining);
break;
}
@@ -3245,7 +3263,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
if (sync_blocks < max_sync)
max_sync = sync_blocks;
- r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+ r10_bio = raid10_alloc_init_r10buf(conf);
r10_bio->state = 0;
r10_bio->mddev = mddev;
@@ -3298,7 +3316,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (test_bit(FailFast, &rdev->flags))
bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
+ bio_set_dev(bio, rdev->bdev);
count++;
rdev = rcu_dereference(conf->mirrors[d].replacement);
@@ -3320,7 +3338,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (test_bit(FailFast, &rdev->flags))
bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset;
- bio->bi_bdev = rdev->bdev;
+ bio_set_dev(bio, rdev->bdev);
count++;
rcu_read_unlock();
}
@@ -3355,7 +3373,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
break;
for (bio= biolist ; bio ; bio=bio->bi_next) {
struct resync_pages *rp = get_resync_pages(bio);
- page = resync_fetch_page(rp, rp->idx++);
+ page = resync_fetch_page(rp, page_idx);
/*
* won't fail because the vec table is big enough
* to hold all these pages
@@ -3364,7 +3382,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
nr_sectors += len>>9;
sector_nr += len>>9;
- } while (get_resync_pages(biolist)->idx < RESYNC_PAGES);
+ } while (++page_idx < RESYNC_PAGES);
r10_bio->sectors = nr_sectors;
while (biolist) {
@@ -3376,7 +3394,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->sectors = nr_sectors;
if (bio->bi_end_io == end_sync_read) {
- md_sync_acct(bio->bi_bdev, nr_sectors);
+ md_sync_acct_bio(bio, nr_sectors);
bio->bi_status = 0;
generic_make_request(bio);
}
@@ -4369,7 +4387,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
read_more:
/* Now schedule reads for blocks from sector_nr to last */
- r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
+ r10_bio = raid10_alloc_init_r10buf(conf);
r10_bio->state = 0;
raise_barrier(conf, sectors_done != 0);
atomic_set(&r10_bio->remaining, 0);
@@ -4392,7 +4410,7 @@ read_more:
read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
- read_bio->bi_bdev = rdev->bdev;
+ bio_set_dev(read_bio, rdev->bdev);
read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset);
read_bio->bi_private = r10_bio;
@@ -4426,7 +4444,7 @@ read_more:
if (!rdev2 || test_bit(Faulty, &rdev2->flags))
continue;
- b->bi_bdev = rdev2->bdev;
+ bio_set_dev(b, rdev2->bdev);
b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
rdev2->new_data_offset;
b->bi_end_io = end_reshape_write;
@@ -4458,7 +4476,7 @@ read_more:
r10_bio->sectors = nr_sectors;
/* Now submit the read */
- md_sync_acct(read_bio->bi_bdev, r10_bio->sectors);
+ md_sync_acct_bio(read_bio, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
read_bio->bi_next = NULL;
generic_make_request(read_bio);
@@ -4520,7 +4538,7 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
}
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- md_sync_acct(b->bi_bdev, r10_bio->sectors);
+ md_sync_acct_bio(b, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
b->bi_next = NULL;
generic_make_request(b);
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index bfa1e907c472..0b7406ac8ce1 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -236,9 +236,10 @@ struct r5l_io_unit {
bool need_split_bio;
struct bio *split_bio;
- unsigned int has_flush:1; /* include flush request */
- unsigned int has_fua:1; /* include fua request */
- unsigned int has_null_flush:1; /* include empty flush request */
+ unsigned int has_flush:1; /* include flush request */
+ unsigned int has_fua:1; /* include fua request */
+ unsigned int has_null_flush:1; /* include null flush request */
+ unsigned int has_flush_payload:1; /* include flush payload */
/*
* io isn't sent yet, flush/fua request can only be submitted till it's
* the first IO in running_ios list
@@ -571,6 +572,8 @@ static void r5l_log_endio(struct bio *bio)
struct r5l_io_unit *io_deferred;
struct r5l_log *log = io->log;
unsigned long flags;
+ bool has_null_flush;
+ bool has_flush_payload;
if (bio->bi_status)
md_error(log->rdev->mddev, log->rdev);
@@ -580,6 +583,16 @@ static void r5l_log_endio(struct bio *bio)
spin_lock_irqsave(&log->io_list_lock, flags);
__r5l_set_io_unit_state(io, IO_UNIT_IO_END);
+
+ /*
+ * if the io doesn't not have null_flush or flush payload,
+ * it is not safe to access it after releasing io_list_lock.
+ * Therefore, it is necessary to check the condition with
+ * the lock held.
+ */
+ has_null_flush = io->has_null_flush;
+ has_flush_payload = io->has_flush_payload;
+
if (log->need_cache_flush && !list_empty(&io->stripe_list))
r5l_move_to_end_ios(log);
else
@@ -600,19 +613,23 @@ static void r5l_log_endio(struct bio *bio)
if (log->need_cache_flush)
md_wakeup_thread(log->rdev->mddev->thread);
- if (io->has_null_flush) {
+ /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
+ if (has_null_flush) {
struct bio *bi;
WARN_ON(bio_list_empty(&io->flush_barriers));
while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
bio_endio(bi);
- atomic_dec(&io->pending_stripe);
+ if (atomic_dec_and_test(&io->pending_stripe)) {
+ __r5l_stripe_write_finished(io);
+ return;
+ }
}
}
-
- /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
- if (atomic_read(&io->pending_stripe) == 0)
- __r5l_stripe_write_finished(io);
+ /* decrease pending_stripe for flush payload */
+ if (has_flush_payload)
+ if (atomic_dec_and_test(&io->pending_stripe))
+ __r5l_stripe_write_finished(io);
}
static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
@@ -728,7 +745,7 @@ static struct bio *r5l_bio_alloc(struct r5l_log *log)
struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- bio->bi_bdev = log->rdev->bdev;
+ bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
return bio;
@@ -881,6 +898,11 @@ static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
payload->size = cpu_to_le32(sizeof(__le64));
payload->flush_stripes[0] = cpu_to_le64(sect);
io->meta_offset += meta_size;
+ /* multiple flush payloads count as one pending_stripe */
+ if (!io->has_flush_payload) {
+ io->has_flush_payload = 1;
+ atomic_inc(&io->pending_stripe);
+ }
mutex_unlock(&log->io_mutex);
}
@@ -1291,7 +1313,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
if (!do_flush)
return;
bio_reset(&log->flush_bio);
- log->flush_bio.bi_bdev = log->rdev->bdev;
+ bio_set_dev(&log->flush_bio, log->rdev->bdev);
log->flush_bio.bi_end_io = r5l_log_flush_endio;
log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(&log->flush_bio);
@@ -1669,7 +1691,7 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
sector_t offset)
{
bio_reset(ctx->ra_bio);
- ctx->ra_bio->bi_bdev = log->rdev->bdev;
+ bio_set_dev(ctx->ra_bio, log->rdev->bdev);
bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
@@ -2507,11 +2529,18 @@ static void r5l_write_super(struct r5l_log *log, sector_t cp)
static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
{
- struct r5conf *conf = mddev->private;
+ struct r5conf *conf;
int ret;
- if (!conf->log)
+ ret = mddev_lock(mddev);
+ if (ret)
+ return ret;
+
+ conf = mddev->private;
+ if (!conf || !conf->log) {
+ mddev_unlock(mddev);
return 0;
+ }
switch (conf->log->r5c_journal_mode) {
case R5C_JOURNAL_MODE_WRITE_THROUGH:
@@ -2529,6 +2558,7 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
default:
ret = 0;
}
+ mddev_unlock(mddev);
return ret;
}
@@ -2540,23 +2570,32 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
*/
int r5c_journal_mode_set(struct mddev *mddev, int mode)
{
- struct r5conf *conf = mddev->private;
- struct r5l_log *log = conf->log;
-
- if (!log)
- return -ENODEV;
+ struct r5conf *conf;
+ int err;
if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
mode > R5C_JOURNAL_MODE_WRITE_BACK)
return -EINVAL;
+ err = mddev_lock(mddev);
+ if (err)
+ return err;
+ conf = mddev->private;
+ if (!conf || !conf->log) {
+ mddev_unlock(mddev);
+ return -ENODEV;
+ }
+
if (raid5_calc_degraded(conf) > 0 &&
- mode == R5C_JOURNAL_MODE_WRITE_BACK)
+ mode == R5C_JOURNAL_MODE_WRITE_BACK) {
+ mddev_unlock(mddev);
return -EINVAL;
+ }
mddev_suspend(mddev);
conf->log->r5c_journal_mode = mode;
mddev_resume(mddev);
+ mddev_unlock(mddev);
pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
mdname(mddev), mode, r5c_journal_mode_str[mode]);
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 77cce3573aa8..cd026c88f7ef 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -87,6 +87,8 @@
* The current io_unit accepting new stripes is always at the end of the list.
*/
+#define PPL_SPACE_SIZE (128 * 1024)
+
struct ppl_conf {
struct mddev *mddev;
@@ -122,6 +124,10 @@ struct ppl_log {
* always at the end of io_list */
spinlock_t io_list_lock;
struct list_head io_list; /* all io_units of this log */
+
+ sector_t next_io_sector;
+ unsigned int entry_space;
+ bool use_multippl;
};
#define PPL_IO_INLINE_BVECS 32
@@ -264,13 +270,12 @@ static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
int i;
sector_t data_sector = 0;
int data_disks = 0;
- unsigned int entry_space = (log->rdev->ppl.size << 9) - PPL_HEADER_SIZE;
struct r5conf *conf = sh->raid_conf;
pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector);
/* check if current io_unit is full */
- if (io && (io->pp_size == entry_space ||
+ if (io && (io->pp_size == log->entry_space ||
io->entries_count == PPL_HDR_MAX_ENTRIES)) {
pr_debug("%s: add io_unit blocked by seq: %llu\n",
__func__, io->seq);
@@ -415,7 +420,7 @@ static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
__func__, io->seq, bio->bi_iter.bi_size,
(unsigned long long)bio->bi_iter.bi_sector,
- bdevname(bio->bi_bdev, b));
+ bio_devname(bio, b));
submit_bio(bio);
}
@@ -451,12 +456,25 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
pplhdr->entries_count = cpu_to_le32(io->entries_count);
pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE));
+ /* Rewind the buffer if current PPL is larger then remaining space */
+ if (log->use_multippl &&
+ log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector <
+ (PPL_HEADER_SIZE + io->pp_size) >> 9)
+ log->next_io_sector = log->rdev->ppl.sector;
+
+
bio->bi_end_io = ppl_log_endio;
bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
- bio->bi_bdev = log->rdev->bdev;
- bio->bi_iter.bi_sector = log->rdev->ppl.sector;
+ bio_set_dev(bio, log->rdev->bdev);
+ bio->bi_iter.bi_sector = log->next_io_sector;
bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
+ pr_debug("%s: log->current_io_sector: %llu\n", __func__,
+ (unsigned long long)log->next_io_sector);
+
+ if (log->use_multippl)
+ log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
+
list_for_each_entry(sh, &io->stripe_list, log_list) {
/* entries for full stripe writes have no partial parity */
if (test_bit(STRIPE_FULL_WRITE, &sh->state))
@@ -468,7 +486,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
ppl_conf->bs);
bio->bi_opf = prev->bi_opf;
- bio->bi_bdev = prev->bi_bdev;
+ bio_copy_dev(bio, prev);
bio->bi_iter.bi_sector = bio_end_sector(prev);
bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
@@ -813,12 +831,14 @@ out:
return ret;
}
-static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr)
+static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
+ sector_t offset)
{
struct ppl_conf *ppl_conf = log->ppl_conf;
struct md_rdev *rdev = log->rdev;
struct mddev *mddev = rdev->mddev;
- sector_t ppl_sector = rdev->ppl.sector + (PPL_HEADER_SIZE >> 9);
+ sector_t ppl_sector = rdev->ppl.sector + offset +
+ (PPL_HEADER_SIZE >> 9);
struct page *page;
int i;
int ret = 0;
@@ -902,6 +922,9 @@ static int ppl_write_empty_header(struct ppl_log *log)
return -ENOMEM;
pplhdr = page_address(page);
+ /* zero out PPL space to avoid collision with old PPLs */
+ blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector,
+ log->rdev->ppl.size, GFP_NOIO, 0);
memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
@@ -922,63 +945,110 @@ static int ppl_load_distributed(struct ppl_log *log)
struct ppl_conf *ppl_conf = log->ppl_conf;
struct md_rdev *rdev = log->rdev;
struct mddev *mddev = rdev->mddev;
- struct page *page;
- struct ppl_header *pplhdr;
+ struct page *page, *page2, *tmp;
+ struct ppl_header *pplhdr = NULL, *prev_pplhdr = NULL;
u32 crc, crc_stored;
u32 signature;
- int ret = 0;
+ int ret = 0, i;
+ sector_t pplhdr_offset = 0, prev_pplhdr_offset = 0;
pr_debug("%s: disk: %d\n", __func__, rdev->raid_disk);
-
- /* read PPL header */
+ /* read PPL headers, find the recent one */
page = alloc_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
- if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
- PAGE_SIZE, page, REQ_OP_READ, 0, false)) {
- md_error(mddev, rdev);
- ret = -EIO;
- goto out;
+ page2 = alloc_page(GFP_KERNEL);
+ if (!page2) {
+ __free_page(page);
+ return -ENOMEM;
}
- pplhdr = page_address(page);
- /* check header validity */
- crc_stored = le32_to_cpu(pplhdr->checksum);
- pplhdr->checksum = 0;
- crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE);
+ /* searching ppl area for latest ppl */
+ while (pplhdr_offset < rdev->ppl.size - (PPL_HEADER_SIZE >> 9)) {
+ if (!sync_page_io(rdev,
+ rdev->ppl.sector - rdev->data_offset +
+ pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ,
+ 0, false)) {
+ md_error(mddev, rdev);
+ ret = -EIO;
+ /* if not able to read - don't recover any PPL */
+ pplhdr = NULL;
+ break;
+ }
+ pplhdr = page_address(page);
+
+ /* check header validity */
+ crc_stored = le32_to_cpu(pplhdr->checksum);
+ pplhdr->checksum = 0;
+ crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE);
+
+ if (crc_stored != crc) {
+ pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
+ __func__, crc_stored, crc,
+ (unsigned long long)pplhdr_offset);
+ pplhdr = prev_pplhdr;
+ pplhdr_offset = prev_pplhdr_offset;
+ break;
+ }
- if (crc_stored != crc) {
- pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x\n",
- __func__, crc_stored, crc);
- ppl_conf->mismatch_count++;
- goto out;
- }
+ signature = le32_to_cpu(pplhdr->signature);
- signature = le32_to_cpu(pplhdr->signature);
+ if (mddev->external) {
+ /*
+ * For external metadata the header signature is set and
+ * validated in userspace.
+ */
+ ppl_conf->signature = signature;
+ } else if (ppl_conf->signature != signature) {
+ pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n",
+ __func__, signature, ppl_conf->signature,
+ (unsigned long long)pplhdr_offset);
+ pplhdr = prev_pplhdr;
+ pplhdr_offset = prev_pplhdr_offset;
+ break;
+ }
- if (mddev->external) {
- /*
- * For external metadata the header signature is set and
- * validated in userspace.
- */
- ppl_conf->signature = signature;
- } else if (ppl_conf->signature != signature) {
- pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x\n",
- __func__, signature, ppl_conf->signature);
- ppl_conf->mismatch_count++;
- goto out;
+ if (prev_pplhdr && le64_to_cpu(prev_pplhdr->generation) >
+ le64_to_cpu(pplhdr->generation)) {
+ /* previous was newest */
+ pplhdr = prev_pplhdr;
+ pplhdr_offset = prev_pplhdr_offset;
+ break;
+ }
+
+ prev_pplhdr_offset = pplhdr_offset;
+ prev_pplhdr = pplhdr;
+
+ tmp = page;
+ page = page2;
+ page2 = tmp;
+
+ /* calculate next potential ppl offset */
+ for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++)
+ pplhdr_offset +=
+ le32_to_cpu(pplhdr->entries[i].pp_size) >> 9;
+ pplhdr_offset += PPL_HEADER_SIZE >> 9;
}
+ /* no valid ppl found */
+ if (!pplhdr)
+ ppl_conf->mismatch_count++;
+ else
+ pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n",
+ __func__, (unsigned long long)pplhdr_offset,
+ le64_to_cpu(pplhdr->generation));
+
/* attempt to recover from log if we are starting a dirty array */
- if (!mddev->pers && mddev->recovery_cp != MaxSector)
- ret = ppl_recover(log, pplhdr);
-out:
+ if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector)
+ ret = ppl_recover(log, pplhdr, pplhdr_offset);
+
/* write empty header if we are starting the array */
if (!ret && !mddev->pers)
ret = ppl_write_empty_header(log);
__free_page(page);
+ __free_page(page2);
pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n",
__func__, ret, ppl_conf->mismatch_count,
@@ -1031,6 +1101,7 @@ static int ppl_load(struct ppl_conf *ppl_conf)
static void __ppl_exit_log(struct ppl_conf *ppl_conf)
{
clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags);
+ clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags);
kfree(ppl_conf->child_logs);
@@ -1099,6 +1170,22 @@ static int ppl_validate_rdev(struct md_rdev *rdev)
return 0;
}
+static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
+{
+ if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
+ PPL_HEADER_SIZE) * 2) {
+ log->use_multippl = true;
+ set_bit(MD_HAS_MULTIPLE_PPLS,
+ &log->ppl_conf->mddev->flags);
+ log->entry_space = PPL_SPACE_SIZE;
+ } else {
+ log->use_multippl = false;
+ log->entry_space = (log->rdev->ppl.size << 9) -
+ PPL_HEADER_SIZE;
+ }
+ log->next_io_sector = rdev->ppl.sector;
+}
+
int ppl_init_log(struct r5conf *conf)
{
struct ppl_conf *ppl_conf;
@@ -1150,7 +1237,7 @@ int ppl_init_log(struct r5conf *conf)
goto err;
}
- ppl_conf->bs = bioset_create(conf->raid_disks, 0, 0);
+ ppl_conf->bs = bioset_create(conf->raid_disks, 0, BIOSET_NEED_BVECS);
if (!ppl_conf->bs) {
ret = -ENOMEM;
goto err;
@@ -1196,6 +1283,7 @@ int ppl_init_log(struct r5conf *conf)
q = bdev_get_queue(rdev->bdev);
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
need_cache_flush = true;
+ ppl_init_child_log(log, rdev);
}
}
@@ -1261,6 +1349,7 @@ int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
if (!ret) {
log->rdev = rdev;
ret = ppl_write_empty_header(log);
+ ppl_init_child_log(log, rdev);
}
} else {
log->rdev = NULL;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2ceb338b094b..4188a4881148 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -494,7 +494,6 @@ static int grow_buffers(struct stripe_head *sh, gfp_t gfp)
return 0;
}
-static void raid5_build_block(struct stripe_head *sh, int i, int previous);
static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
struct stripe_head *sh);
@@ -530,7 +529,7 @@ retry:
WARN_ON(1);
}
dev->flags = 0;
- raid5_build_block(sh, i, previous);
+ dev->sector = raid5_compute_blocknr(sh, i, previous);
}
if (read_seqcount_retry(&conf->gen_lock, seq))
goto retry;
@@ -1096,7 +1095,7 @@ again:
set_bit(STRIPE_IO_STARTED, &sh->state);
- bi->bi_bdev = rdev->bdev;
+ bio_set_dev(bi, rdev->bdev);
bio_set_op_attrs(bi, op, op_flags);
bi->bi_end_io = op_is_write(op)
? raid5_end_write_request
@@ -1145,7 +1144,7 @@ again:
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
if (conf->mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
+ trace_block_bio_remap(bi->bi_disk->queue,
bi, disk_devt(conf->mddev->gendisk),
sh->dev[i].sector);
if (should_defer && op_is_write(op))
@@ -1160,7 +1159,7 @@ again:
set_bit(STRIPE_IO_STARTED, &sh->state);
- rbi->bi_bdev = rrdev->bdev;
+ bio_set_dev(rbi, rrdev->bdev);
bio_set_op_attrs(rbi, op, op_flags);
BUG_ON(!op_is_write(op));
rbi->bi_end_io = raid5_end_write_request;
@@ -1193,7 +1192,7 @@ again:
if (op == REQ_OP_DISCARD)
rbi->bi_vcnt = 0;
if (conf->mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
+ trace_block_bio_remap(rbi->bi_disk->queue,
rbi, disk_devt(conf->mddev->gendisk),
sh->dev[i].sector);
if (should_defer && op_is_write(op))
@@ -2662,14 +2661,6 @@ static void raid5_end_write_request(struct bio *bi)
raid5_release_stripe(sh->batch_head);
}
-static void raid5_build_block(struct stripe_head *sh, int i, int previous)
-{
- struct r5dev *dev = &sh->dev[i];
-
- dev->flags = 0;
- dev->sector = raid5_compute_blocknr(sh, i, previous);
-}
-
static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
@@ -3381,9 +3372,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
- bi->bi_status = BLK_STS_IOERR;
md_write_end(conf->mddev);
- bio_endio(bi);
+ bio_io_error(bi);
bi = nextbi;
}
if (bitmap_end)
@@ -3403,9 +3393,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
- bi->bi_status = BLK_STS_IOERR;
md_write_end(conf->mddev);
- bio_endio(bi);
+ bio_io_error(bi);
bi = bi2;
}
@@ -3429,8 +3418,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector);
- bi->bi_status = BLK_STS_IOERR;
- bio_endio(bi);
+ bio_io_error(bi);
bi = nextbi;
}
}
@@ -5095,10 +5083,12 @@ static int raid5_congested(struct mddev *mddev, int bits)
static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
{
struct r5conf *conf = mddev->private;
- sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
+ sector_t sector = bio->bi_iter.bi_sector;
unsigned int chunk_sectors;
unsigned int bio_sectors = bio_sectors(bio);
+ WARN_ON_ONCE(bio->bi_partno);
+
chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
return chunk_sectors >=
((sector & (chunk_sectors - 1)) + bio_sectors);
@@ -5234,7 +5224,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
raid_bio->bi_next = (void*)rdev;
- align_bi->bi_bdev = rdev->bdev;
+ bio_set_dev(align_bi, rdev->bdev);
bio_clear_flag(align_bi, BIO_SEG_VALID);
if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
@@ -5256,7 +5246,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
spin_unlock_irq(&conf->device_lock);
if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
+ trace_block_bio_remap(align_bi->bi_disk->queue,
align_bi, disk_devt(mddev->gendisk),
raid_bio->bi_iter.bi_sector);
generic_make_request(align_bi);
@@ -6237,6 +6227,12 @@ static void raid5_do_work(struct work_struct *work)
pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock);
+
+ flush_deferred_bios(conf);
+
+ r5l_flush_stripe_to_raid(conf->log);
+
+ async_tx_issue_pending_all();
blk_finish_plug(&plug);
pr_debug("--- raid5worker inactive\n");
@@ -7242,6 +7238,7 @@ static int raid5_run(struct mddev *mddev)
pr_warn("md/raid:%s: using journal device and PPL not allowed - disabling PPL\n",
mdname(mddev));
clear_bit(MD_HAS_PPL, &mddev->flags);
+ clear_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags);
}
if (mddev->private == NULL)
@@ -7951,12 +7948,10 @@ static void end_reshape(struct r5conf *conf)
{
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
- struct md_rdev *rdev;
spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks;
- rdev_for_each(rdev, conf->mddev)
- rdev->data_offset = rdev->new_data_offset;
+ md_finish_reshape(conf->mddev);
smp_wmb();
conf->reshape_progress = MaxSector;
conf->mddev->reshape_position = MaxSector;
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 55d9c2b82b7e..edfe99b22d56 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -8,6 +8,11 @@ config CEC_CORE
config CEC_NOTIFIER
bool
+config CEC_PIN
+ bool
+
+source "drivers/media/rc/Kconfig"
+
menuconfig MEDIA_SUPPORT
tristate "Multimedia support"
depends on HAS_IOMEM
@@ -72,20 +77,6 @@ config MEDIA_SDR_SUPPORT
Say Y when you have a software defined radio device.
-config MEDIA_RC_SUPPORT
- bool "Remote Controller support"
- depends on INPUT
- ---help---
- Enable support for Remote Controllers on Linux. This is
- needed in order to support several video capture adapters,
- standalone IR receivers/transmitters, and RF receivers.
-
- Enable this option if you have a video capture board even
- if you don't need IR, as otherwise, you may not be able to
- compile the driver for your adapter.
-
- Say Y when you have a TV or an IR device.
-
config MEDIA_CEC_SUPPORT
bool "HDMI CEC support"
---help---
@@ -175,7 +166,6 @@ config TTPCI_EEPROM
source "drivers/media/dvb-core/Kconfig"
comment "Media drivers"
-source "drivers/media/rc/Kconfig"
#
# V4L platform/mem2mem drivers
diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile
index eaf408e64669..3353c1741961 100644
--- a/drivers/media/cec/Makefile
+++ b/drivers/media/cec/Makefile
@@ -4,4 +4,8 @@ ifeq ($(CONFIG_CEC_NOTIFIER),y)
cec-objs += cec-notifier.o
endif
+ifeq ($(CONFIG_CEC_PIN),y)
+ cec-objs += cec-pin.o
+endif
+
obj-$(CONFIG_CEC_CORE) += cec.o
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index bf45977b2823..dd769e40416f 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -78,42 +78,62 @@ static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr
* Queue a new event for this filehandle. If ts == 0, then set it
* to the current time.
*
- * The two events that are currently defined do not need to keep track
- * of intermediate events, so no actual queue of events is needed,
- * instead just store the latest state and the total number of lost
- * messages.
- *
- * Should new events be added in the future that require intermediate
- * results to be queued as well, then a proper queue data structure is
- * required. But until then, just keep it simple.
+ * We keep a queue of at most max_event events where max_event differs
+ * per event. If the queue becomes full, then drop the oldest event and
+ * keep track of how many events we've dropped.
*/
void cec_queue_event_fh(struct cec_fh *fh,
const struct cec_event *new_ev, u64 ts)
{
- struct cec_event *ev = &fh->events[new_ev->event - 1];
+ static const u8 max_events[CEC_NUM_EVENTS] = {
+ 1, 1, 64, 64,
+ };
+ struct cec_event_entry *entry;
+ unsigned int ev_idx = new_ev->event - 1;
+
+ if (WARN_ON(ev_idx >= ARRAY_SIZE(fh->events)))
+ return;
if (ts == 0)
ts = ktime_get_ns();
mutex_lock(&fh->lock);
- if (new_ev->event == CEC_EVENT_LOST_MSGS &&
- fh->pending_events & (1 << new_ev->event)) {
- /*
- * If there is already a lost_msgs event, then just
- * update the lost_msgs count. This effectively
- * merges the old and new events into one.
- */
- ev->lost_msgs.lost_msgs += new_ev->lost_msgs.lost_msgs;
- goto unlock;
- }
+ if (ev_idx < CEC_NUM_CORE_EVENTS)
+ entry = &fh->core_events[ev_idx];
+ else
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry) {
+ if (new_ev->event == CEC_EVENT_LOST_MSGS &&
+ fh->queued_events[ev_idx]) {
+ entry->ev.lost_msgs.lost_msgs +=
+ new_ev->lost_msgs.lost_msgs;
+ goto unlock;
+ }
+ entry->ev = *new_ev;
+ entry->ev.ts = ts;
+
+ if (fh->queued_events[ev_idx] < max_events[ev_idx]) {
+ /* Add new msg at the end of the queue */
+ list_add_tail(&entry->list, &fh->events[ev_idx]);
+ fh->queued_events[ev_idx]++;
+ fh->total_queued_events++;
+ goto unlock;
+ }
- /*
- * Intermediate states are not interesting, so just
- * overwrite any older event.
- */
- *ev = *new_ev;
- ev->ts = ts;
- fh->pending_events |= 1 << new_ev->event;
+ if (ev_idx >= CEC_NUM_CORE_EVENTS) {
+ list_add_tail(&entry->list, &fh->events[ev_idx]);
+ /* drop the oldest event */
+ entry = list_first_entry(&fh->events[ev_idx],
+ struct cec_event_entry, list);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+ /* Mark that events were lost */
+ entry = list_first_entry_or_null(&fh->events[ev_idx],
+ struct cec_event_entry, list);
+ if (entry)
+ entry->ev.flags |= CEC_EVENT_FL_DROPPED_EVENTS;
unlock:
mutex_unlock(&fh->lock);
@@ -133,47 +153,68 @@ static void cec_queue_event(struct cec_adapter *adap,
mutex_unlock(&adap->devnode.lock);
}
+/* Notify userspace that the CEC pin changed state at the given time. */
+void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high, ktime_t ts)
+{
+ struct cec_event ev = {
+ .event = is_high ? CEC_EVENT_PIN_CEC_HIGH :
+ CEC_EVENT_PIN_CEC_LOW,
+ };
+ struct cec_fh *fh;
+
+ mutex_lock(&adap->devnode.lock);
+ list_for_each_entry(fh, &adap->devnode.fhs, list)
+ if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
+ cec_queue_event_fh(fh, &ev, ktime_to_ns(ts));
+ mutex_unlock(&adap->devnode.lock);
+}
+EXPORT_SYMBOL_GPL(cec_queue_pin_cec_event);
+
/*
- * Queue a new message for this filehandle. If there is no more room
- * in the queue, then send the LOST_MSGS event instead.
+ * Queue a new message for this filehandle.
+ *
+ * We keep a queue of at most CEC_MAX_MSG_RX_QUEUE_SZ messages. If the
+ * queue becomes full, then drop the oldest message and keep track
+ * of how many messages we've dropped.
*/
static void cec_queue_msg_fh(struct cec_fh *fh, const struct cec_msg *msg)
{
- static const struct cec_event ev_lost_msg = {
- .ts = 0,
+ static const struct cec_event ev_lost_msgs = {
.event = CEC_EVENT_LOST_MSGS,
- .flags = 0,
- {
- .lost_msgs.lost_msgs = 1,
- },
+ .lost_msgs.lost_msgs = 1,
};
struct cec_msg_entry *entry;
mutex_lock(&fh->lock);
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- goto lost_msgs;
-
- entry->msg = *msg;
- /* Add new msg at the end of the queue */
- list_add_tail(&entry->list, &fh->msgs);
+ if (entry) {
+ entry->msg = *msg;
+ /* Add new msg at the end of the queue */
+ list_add_tail(&entry->list, &fh->msgs);
+
+ if (fh->queued_msgs < CEC_MAX_MSG_RX_QUEUE_SZ) {
+ /* All is fine if there is enough room */
+ fh->queued_msgs++;
+ mutex_unlock(&fh->lock);
+ wake_up_interruptible(&fh->wait);
+ return;
+ }
- /*
- * if the queue now has more than CEC_MAX_MSG_RX_QUEUE_SZ
- * messages, drop the oldest one and send a lost message event.
- */
- if (fh->queued_msgs == CEC_MAX_MSG_RX_QUEUE_SZ) {
+ /*
+ * if the message queue is full, then drop the oldest one and
+ * send a lost message event.
+ */
+ entry = list_first_entry(&fh->msgs, struct cec_msg_entry, list);
list_del(&entry->list);
- goto lost_msgs;
+ kfree(entry);
}
- fh->queued_msgs++;
mutex_unlock(&fh->lock);
- wake_up_interruptible(&fh->wait);
- return;
-lost_msgs:
- mutex_unlock(&fh->lock);
- cec_queue_event_fh(fh, &ev_lost_msg, 0);
+ /*
+ * We lost a message, either because kmalloc failed or the queue
+ * was full.
+ */
+ cec_queue_event_fh(fh, &ev_lost_msgs, ktime_get_ns());
}
/*
@@ -394,13 +435,17 @@ int cec_thread_func(void *_adap)
if (adap->transmitting && timeout) {
/*
- * If we timeout, then log that. This really shouldn't
- * happen and is an indication of a faulty CEC adapter
- * driver, or the CEC bus is in some weird state.
+ * If we timeout, then log that. Normally this does
+ * not happen and it is an indication of a faulty CEC
+ * adapter driver, or the CEC bus is in some weird
+ * state. On rare occasions it can happen if there is
+ * so much traffic on the bus that the adapter was
+ * unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s).
*/
- dprintk(0, "%s: message %*ph timed out!\n", __func__,
+ dprintk(1, "%s: message %*ph timed out\n", __func__,
adap->transmitting->msg.len,
adap->transmitting->msg.msg);
+ adap->tx_timeouts++;
/* Just give up on this. */
cec_data_cancel(adap->transmitting);
goto unlock;
@@ -467,14 +512,19 @@ unlock:
/*
* Called by the CEC adapter if a transmit finished.
*/
-void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
- u8 nack_cnt, u8 low_drive_cnt, u8 error_cnt)
+void cec_transmit_done_ts(struct cec_adapter *adap, u8 status,
+ u8 arb_lost_cnt, u8 nack_cnt, u8 low_drive_cnt,
+ u8 error_cnt, ktime_t ts)
{
struct cec_data *data;
struct cec_msg *msg;
- u64 ts = ktime_get_ns();
+ unsigned int attempts_made = arb_lost_cnt + nack_cnt +
+ low_drive_cnt + error_cnt;
dprintk(2, "%s: status %02x\n", __func__, status);
+ if (attempts_made < 1)
+ attempts_made = 1;
+
mutex_lock(&adap->lock);
data = adap->transmitting;
if (!data) {
@@ -492,7 +542,7 @@ void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
/* Drivers must fill in the status! */
WARN_ON(status == 0);
- msg->tx_ts = ts;
+ msg->tx_ts = ktime_to_ns(ts);
msg->tx_status |= status;
msg->tx_arb_lost_cnt += arb_lost_cnt;
msg->tx_nack_cnt += nack_cnt;
@@ -507,10 +557,10 @@ void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
* the hardware didn't signal that it retried itself (by setting
* CEC_TX_STATUS_MAX_RETRIES), then we will retry ourselves.
*/
- if (data->attempts > 1 &&
+ if (data->attempts > attempts_made &&
!(status & (CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_OK))) {
/* Retry this message */
- data->attempts--;
+ data->attempts -= attempts_made;
if (msg->timeout)
dprintk(2, "retransmit: %*ph (attempts: %d, wait for 0x%02x)\n",
msg->len, msg->msg, data->attempts, msg->reply);
@@ -555,25 +605,26 @@ wake_thread:
unlock:
mutex_unlock(&adap->lock);
}
-EXPORT_SYMBOL_GPL(cec_transmit_done);
+EXPORT_SYMBOL_GPL(cec_transmit_done_ts);
-void cec_transmit_attempt_done(struct cec_adapter *adap, u8 status)
+void cec_transmit_attempt_done_ts(struct cec_adapter *adap,
+ u8 status, ktime_t ts)
{
- switch (status) {
+ switch (status & ~CEC_TX_STATUS_MAX_RETRIES) {
case CEC_TX_STATUS_OK:
- cec_transmit_done(adap, status, 0, 0, 0, 0);
+ cec_transmit_done_ts(adap, status, 0, 0, 0, 0, ts);
return;
case CEC_TX_STATUS_ARB_LOST:
- cec_transmit_done(adap, status, 1, 0, 0, 0);
+ cec_transmit_done_ts(adap, status, 1, 0, 0, 0, ts);
return;
case CEC_TX_STATUS_NACK:
- cec_transmit_done(adap, status, 0, 1, 0, 0);
+ cec_transmit_done_ts(adap, status, 0, 1, 0, 0, ts);
return;
case CEC_TX_STATUS_LOW_DRIVE:
- cec_transmit_done(adap, status, 0, 0, 1, 0);
+ cec_transmit_done_ts(adap, status, 0, 0, 1, 0, ts);
return;
case CEC_TX_STATUS_ERROR:
- cec_transmit_done(adap, status, 0, 0, 0, 1);
+ cec_transmit_done_ts(adap, status, 0, 0, 0, 1, ts);
return;
default:
/* Should never happen */
@@ -581,7 +632,7 @@ void cec_transmit_attempt_done(struct cec_adapter *adap, u8 status)
return;
}
}
-EXPORT_SYMBOL_GPL(cec_transmit_attempt_done);
+EXPORT_SYMBOL_GPL(cec_transmit_attempt_done_ts);
/*
* Called when waiting for a reply times out.
@@ -630,9 +681,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
msg->tx_nack_cnt = 0;
msg->tx_low_drive_cnt = 0;
msg->tx_error_cnt = 0;
- msg->sequence = ++adap->sequence;
- if (!msg->sequence)
- msg->sequence = ++adap->sequence;
+ msg->sequence = 0;
if (msg->reply && msg->timeout == 0) {
/* Make sure the timeout isn't 0. */
@@ -671,6 +720,9 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
msg->tx_status = CEC_TX_STATUS_NACK |
CEC_TX_STATUS_MAX_RETRIES;
msg->tx_nack_cnt = 1;
+ msg->sequence = ++adap->sequence;
+ if (!msg->sequence)
+ msg->sequence = ++adap->sequence;
return 0;
}
}
@@ -705,6 +757,10 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
if (!data)
return -ENOMEM;
+ msg->sequence = ++adap->sequence;
+ if (!msg->sequence)
+ msg->sequence = ++adap->sequence;
+
if (msg->len > 1 && msg->msg[1] == CEC_MSG_CDC_MESSAGE) {
msg->msg[2] = adap->phys_addr >> 8;
msg->msg[3] = adap->phys_addr & 0xff;
@@ -712,7 +768,8 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
if (msg->timeout)
dprintk(2, "%s: %*ph (wait for 0x%02x%s)\n",
- __func__, msg->len, msg->msg, msg->reply, !block ? ", nb" : "");
+ __func__, msg->len, msg->msg, msg->reply,
+ !block ? ", nb" : "");
else
dprintk(2, "%s: %*ph%s\n",
__func__, msg->len, msg->msg, !block ? " (nb)" : "");
@@ -909,7 +966,8 @@ static const u8 cec_msg_size[256] = {
};
/* Called by the CEC adapter if a message is received */
-void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
+void cec_received_msg_ts(struct cec_adapter *adap,
+ struct cec_msg *msg, ktime_t ts)
{
struct cec_data *data;
u8 msg_init = cec_msg_initiator(msg);
@@ -937,7 +995,7 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
cec_has_log_addr(adap, msg_init))
return;
- msg->rx_ts = ktime_get_ns();
+ msg->rx_ts = ktime_to_ns(ts);
msg->rx_status = CEC_RX_STATUS_OK;
msg->sequence = msg->reply = msg->timeout = 0;
msg->tx_status = 0;
@@ -1102,7 +1160,7 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
*/
cec_receive_notify(adap, msg, is_reply);
}
-EXPORT_SYMBOL_GPL(cec_received_msg);
+EXPORT_SYMBOL_GPL(cec_received_msg_ts);
/* Logical Address Handling */
@@ -1390,7 +1448,9 @@ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
*/
void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
{
- if (phys_addr == adap->phys_addr || adap->devnode.unregistered)
+ if (phys_addr == adap->phys_addr)
+ return;
+ if (phys_addr != CEC_PHYS_ADDR_INVALID && adap->devnode.unregistered)
return;
dprintk(1, "new physical address %x.%x.%x.%x\n",
@@ -1471,8 +1531,13 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
return -ENODEV;
if (!log_addrs || log_addrs->num_log_addrs == 0) {
- adap->log_addrs.num_log_addrs = 0;
cec_adap_unconfigure(adap);
+ adap->log_addrs.num_log_addrs = 0;
+ for (i = 0; i < CEC_MAX_LOG_ADDRS; i++)
+ adap->log_addrs.log_addr[i] = CEC_LOG_ADDR_INVALID;
+ adap->log_addrs.osd_name[0] = '\0';
+ adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE;
+ adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
return 0;
}
@@ -1704,6 +1769,9 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
int la_idx = cec_log_addr2idx(adap, dest_laddr);
bool from_unregistered = init_laddr == 0xf;
struct cec_msg tx_cec_msg = { };
+#ifdef CONFIG_MEDIA_CEC_RC
+ int scancode;
+#endif
dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
@@ -1792,11 +1860,9 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
*/
case 0x60:
if (msg->len == 2)
- rc_keydown(adap->rc, RC_TYPE_CEC,
- msg->msg[2], 0);
+ scancode = msg->msg[2];
else
- rc_keydown(adap->rc, RC_TYPE_CEC,
- msg->msg[2] << 8 | msg->msg[3], 0);
+ scancode = msg->msg[2] << 8 | msg->msg[3];
break;
/*
* Other function messages that are not handled.
@@ -1809,11 +1875,54 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
*/
case 0x56: case 0x57:
case 0x67: case 0x68: case 0x69: case 0x6a:
+ scancode = -1;
break;
default:
- rc_keydown(adap->rc, RC_TYPE_CEC, msg->msg[2], 0);
+ scancode = msg->msg[2];
break;
}
+
+ /* Was repeating, but keypress timed out */
+ if (adap->rc_repeating && !adap->rc->keypressed) {
+ adap->rc_repeating = false;
+ adap->rc_last_scancode = -1;
+ }
+ /* Different keypress from last time, ends repeat mode */
+ if (adap->rc_last_scancode != scancode) {
+ rc_keyup(adap->rc);
+ adap->rc_repeating = false;
+ }
+ /* We can't handle this scancode */
+ if (scancode < 0) {
+ adap->rc_last_scancode = scancode;
+ break;
+ }
+
+ /* Send key press */
+ rc_keydown(adap->rc, RC_PROTO_CEC, scancode, 0);
+
+ /* When in repeating mode, we're done */
+ if (adap->rc_repeating)
+ break;
+
+ /*
+ * We are not repeating, but the new scancode is
+ * the same as the last one, and this second key press is
+ * within 550 ms (the 'Follower Safety Timeout') from the
+ * previous key press, so we now enable the repeating mode.
+ */
+ if (adap->rc_last_scancode == scancode &&
+ msg->rx_ts - adap->rc_last_keypress < 550 * NSEC_PER_MSEC) {
+ adap->rc_repeating = true;
+ break;
+ }
+ /*
+ * Not in repeating mode, so avoid triggering repeat mode
+ * by calling keyup.
+ */
+ rc_keyup(adap->rc);
+ adap->rc_last_scancode = scancode;
+ adap->rc_last_keypress = msg->rx_ts;
#endif
break;
@@ -1823,6 +1932,8 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
break;
#ifdef CONFIG_MEDIA_CEC_RC
rc_keyup(adap->rc);
+ adap->rc_repeating = false;
+ adap->rc_last_scancode = -1;
#endif
break;
@@ -1941,6 +2052,11 @@ int cec_adap_status(struct seq_file *file, void *priv)
if (adap->monitor_all_cnt)
seq_printf(file, "file handles in Monitor All mode: %u\n",
adap->monitor_all_cnt);
+ if (adap->tx_timeouts) {
+ seq_printf(file, "transmit timeouts: %u\n",
+ adap->tx_timeouts);
+ adap->tx_timeouts = 0;
+ }
data = adap->transmitting;
if (data)
seq_printf(file, "transmitting message: %*ph (reply: %02x, timeout: %ums)\n",
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index f7eb4c54a354..a079f7fe018c 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -30,6 +30,7 @@
#include <linux/uaccess.h>
#include <linux/version.h>
+#include <media/cec-pin.h>
#include "cec-priv.h"
static inline struct cec_devnode *cec_devnode_data(struct file *filp)
@@ -57,7 +58,7 @@ static unsigned int cec_poll(struct file *filp,
res |= POLLOUT | POLLWRNORM;
if (fh->queued_msgs)
res |= POLLIN | POLLRDNORM;
- if (fh->pending_events)
+ if (fh->total_queued_events)
res |= POLLPRI;
poll_wait(filp, &fh->wait, poll);
mutex_unlock(&adap->lock);
@@ -289,15 +290,17 @@ static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
bool block, struct cec_event __user *parg)
{
- struct cec_event *ev = NULL;
+ struct cec_event_entry *ev = NULL;
u64 ts = ~0ULL;
unsigned int i;
+ unsigned int ev_idx;
long err = 0;
mutex_lock(&fh->lock);
- while (!fh->pending_events && block) {
+ while (!fh->total_queued_events && block) {
mutex_unlock(&fh->lock);
- err = wait_event_interruptible(fh->wait, fh->pending_events);
+ err = wait_event_interruptible(fh->wait,
+ fh->total_queued_events);
if (err)
return err;
mutex_lock(&fh->lock);
@@ -305,23 +308,29 @@ static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
/* Find the oldest event */
for (i = 0; i < CEC_NUM_EVENTS; i++) {
- if (fh->pending_events & (1 << (i + 1)) &&
- fh->events[i].ts <= ts) {
- ev = &fh->events[i];
- ts = ev->ts;
+ struct cec_event_entry *entry =
+ list_first_entry_or_null(&fh->events[i],
+ struct cec_event_entry, list);
+
+ if (entry && entry->ev.ts <= ts) {
+ ev = entry;
+ ev_idx = i;
+ ts = ev->ev.ts;
}
}
+
if (!ev) {
err = -EAGAIN;
goto unlock;
}
+ list_del(&ev->list);
- if (copy_to_user(parg, ev, sizeof(*ev))) {
+ if (copy_to_user(parg, &ev->ev, sizeof(ev->ev)))
err = -EFAULT;
- goto unlock;
- }
-
- fh->pending_events &= ~(1 << ev->event);
+ if (ev_idx >= CEC_NUM_CORE_EVENTS)
+ kfree(ev);
+ fh->queued_events[ev_idx]--;
+ fh->total_queued_events--;
unlock:
mutex_unlock(&fh->lock);
@@ -348,33 +357,50 @@ static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
if (copy_from_user(&mode, parg, sizeof(mode)))
return -EFAULT;
- if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK))
+ if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) {
+ dprintk(1, "%s: invalid mode bits set\n", __func__);
return -EINVAL;
+ }
mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
- mode_follower > CEC_MODE_MONITOR_ALL)
+ mode_follower > CEC_MODE_MONITOR_ALL) {
+ dprintk(1, "%s: unknown mode\n", __func__);
return -EINVAL;
+ }
if (mode_follower == CEC_MODE_MONITOR_ALL &&
- !(adap->capabilities & CEC_CAP_MONITOR_ALL))
+ !(adap->capabilities & CEC_CAP_MONITOR_ALL)) {
+ dprintk(1, "%s: MONITOR_ALL not supported\n", __func__);
return -EINVAL;
+ }
+
+ if (mode_follower == CEC_MODE_MONITOR_PIN &&
+ !(adap->capabilities & CEC_CAP_MONITOR_PIN)) {
+ dprintk(1, "%s: MONITOR_PIN not supported\n", __func__);
+ return -EINVAL;
+ }
/* Follower modes should always be able to send CEC messages */
if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
!(adap->capabilities & CEC_CAP_TRANSMIT)) &&
mode_follower >= CEC_MODE_FOLLOWER &&
- mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU)
+ mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
+ dprintk(1, "%s: cannot transmit\n", __func__);
return -EINVAL;
+ }
/* Monitor modes require CEC_MODE_NO_INITIATOR */
- if (mode_initiator && mode_follower >= CEC_MODE_MONITOR)
+ if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) {
+ dprintk(1, "%s: monitor modes require NO_INITIATOR\n",
+ __func__);
return -EINVAL;
+ }
/* Monitor modes require CAP_NET_ADMIN */
- if (mode_follower >= CEC_MODE_MONITOR && !capable(CAP_NET_ADMIN))
+ if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN))
return -EPERM;
mutex_lock(&adap->lock);
@@ -413,8 +439,20 @@ static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
if (fh->mode_follower == CEC_MODE_FOLLOWER)
adap->follower_cnt--;
+ if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
+ adap->monitor_pin_cnt--;
if (mode_follower == CEC_MODE_FOLLOWER)
adap->follower_cnt++;
+ if (mode_follower == CEC_MODE_MONITOR_PIN) {
+ struct cec_event ev = {
+ .flags = CEC_EVENT_FL_INITIAL_STATE,
+ };
+
+ ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH :
+ CEC_EVENT_PIN_CEC_LOW;
+ cec_queue_event_fh(fh, &ev, 0);
+ adap->monitor_pin_cnt++;
+ }
if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
adap->passthrough =
@@ -495,6 +533,7 @@ static int cec_open(struct inode *inode, struct file *filp)
.event = CEC_EVENT_STATE_CHANGE,
.flags = CEC_EVENT_FL_INITIAL_STATE,
};
+ unsigned int i;
int err;
if (!fh)
@@ -502,6 +541,8 @@ static int cec_open(struct inode *inode, struct file *filp)
INIT_LIST_HEAD(&fh->msgs);
INIT_LIST_HEAD(&fh->xfer_list);
+ for (i = 0; i < CEC_NUM_EVENTS; i++)
+ INIT_LIST_HEAD(&fh->events[i]);
mutex_init(&fh->lock);
init_waitqueue_head(&fh->wait);
@@ -544,6 +585,7 @@ static int cec_release(struct inode *inode, struct file *filp)
struct cec_devnode *devnode = cec_devnode_data(filp);
struct cec_adapter *adap = to_cec_adapter(devnode);
struct cec_fh *fh = filp->private_data;
+ unsigned int i;
mutex_lock(&adap->lock);
if (adap->cec_initiator == fh)
@@ -554,6 +596,8 @@ static int cec_release(struct inode *inode, struct file *filp)
}
if (fh->mode_follower == CEC_MODE_FOLLOWER)
adap->follower_cnt--;
+ if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
+ adap->monitor_pin_cnt--;
if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
cec_monitor_all_cnt_dec(adap);
mutex_unlock(&adap->lock);
@@ -585,6 +629,16 @@ static int cec_release(struct inode *inode, struct file *filp)
list_del(&entry->list);
kfree(entry);
}
+ for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) {
+ while (!list_empty(&fh->events[i])) {
+ struct cec_event_entry *entry =
+ list_first_entry(&fh->events[i],
+ struct cec_event_entry, list);
+
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
kfree(fh);
cec_put_device(devnode);
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index b516d599d6c4..648136e552d5 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -227,6 +227,7 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
return ERR_PTR(-ENOMEM);
strlcpy(adap->name, name, sizeof(adap->name));
adap->phys_addr = CEC_PHYS_ADDR_INVALID;
+ adap->cec_pin_is_high = true;
adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE;
adap->capabilities = caps;
@@ -263,22 +264,24 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
return ERR_PTR(-ENOMEM);
}
- snprintf(adap->input_name, sizeof(adap->input_name),
+ snprintf(adap->device_name, sizeof(adap->device_name),
"RC for %s", name);
snprintf(adap->input_phys, sizeof(adap->input_phys),
"%s/input0", name);
- adap->rc->input_name = adap->input_name;
+ adap->rc->device_name = adap->device_name;
adap->rc->input_phys = adap->input_phys;
adap->rc->input_id.bustype = BUS_CEC;
adap->rc->input_id.vendor = 0;
adap->rc->input_id.product = 0;
adap->rc->input_id.version = 1;
adap->rc->driver_name = CEC_NAME;
- adap->rc->allowed_protocols = RC_BIT_CEC;
+ adap->rc->allowed_protocols = RC_PROTO_BIT_CEC;
+ adap->rc->enabled_protocols = RC_PROTO_BIT_CEC;
adap->rc->priv = adap;
adap->rc->map_name = RC_MAP_CEC;
adap->rc->timeout = MS_TO_NS(100);
+ adap->rc_last_scancode = -1;
#endif
return adap;
}
@@ -310,6 +313,17 @@ int cec_register_adapter(struct cec_adapter *adap,
adap->rc = NULL;
return res;
}
+ /*
+ * The REP_DELAY for CEC is really the time between the initial
+ * 'User Control Pressed' message and the second. The first
+ * keypress is always seen as non-repeating, the second
+ * (provided it has the same UI Command) will start the 'Press
+ * and Hold' (aka repeat) behavior. By setting REP_DELAY to the
+ * same value as REP_PERIOD the expected CEC behavior is
+ * reproduced.
+ */
+ adap->rc->input_dev->rep[REP_DELAY] =
+ adap->rc->input_dev->rep[REP_PERIOD];
}
#endif
@@ -374,6 +388,8 @@ void cec_delete_adapter(struct cec_adapter *adap)
kthread_stop(adap->kthread);
if (adap->kthread_config)
kthread_stop(adap->kthread_config);
+ if (adap->ops->adap_free)
+ adap->ops->adap_free(adap);
#ifdef CONFIG_MEDIA_CEC_RC
rc_free_device(adap->rc);
#endif
@@ -386,11 +402,8 @@ EXPORT_SYMBOL_GPL(cec_delete_adapter);
*/
static int __init cec_devnode_init(void)
{
- int ret;
+ int ret = alloc_chrdev_region(&cec_dev_t, 0, CEC_NUM_DEVICES, CEC_NAME);
- pr_info("Linux cec interface: v0.10\n");
- ret = alloc_chrdev_region(&cec_dev_t, 0, CEC_NUM_DEVICES,
- CEC_NAME);
if (ret < 0) {
pr_warn("cec: unable to allocate major\n");
return ret;
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
index 74dc1c32080e..08b619d0ea1e 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/cec-notifier.c
@@ -87,6 +87,9 @@ EXPORT_SYMBOL_GPL(cec_notifier_put);
void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa)
{
+ if (n == NULL)
+ return;
+
mutex_lock(&n->lock);
n->phys_addr = pa;
if (n->callback)
@@ -100,6 +103,9 @@ void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
{
u16 pa = CEC_PHYS_ADDR_INVALID;
+ if (n == NULL)
+ return;
+
if (edid && edid->extensions)
pa = cec_get_edid_phys_addr((const u8 *)edid,
EDID_LENGTH * (edid->extensions + 1), NULL);
diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
new file mode 100644
index 000000000000..c003b8eac617
--- /dev/null
+++ b/drivers/media/cec/cec-pin.c
@@ -0,0 +1,802 @@
+/*
+ * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/sched/types.h>
+
+#include <media/cec-pin.h>
+
+/* All timings are in microseconds */
+
+/* start bit timings */
+#define CEC_TIM_START_BIT_LOW 3700
+#define CEC_TIM_START_BIT_LOW_MIN 3500
+#define CEC_TIM_START_BIT_LOW_MAX 3900
+#define CEC_TIM_START_BIT_TOTAL 4500
+#define CEC_TIM_START_BIT_TOTAL_MIN 4300
+#define CEC_TIM_START_BIT_TOTAL_MAX 4700
+
+/* data bit timings */
+#define CEC_TIM_DATA_BIT_0_LOW 1500
+#define CEC_TIM_DATA_BIT_0_LOW_MIN 1300
+#define CEC_TIM_DATA_BIT_0_LOW_MAX 1700
+#define CEC_TIM_DATA_BIT_1_LOW 600
+#define CEC_TIM_DATA_BIT_1_LOW_MIN 400
+#define CEC_TIM_DATA_BIT_1_LOW_MAX 800
+#define CEC_TIM_DATA_BIT_TOTAL 2400
+#define CEC_TIM_DATA_BIT_TOTAL_MIN 2050
+#define CEC_TIM_DATA_BIT_TOTAL_MAX 2750
+/* earliest safe time to sample the bit state */
+#define CEC_TIM_DATA_BIT_SAMPLE 850
+/* earliest time the bit is back to 1 (T7 + 50) */
+#define CEC_TIM_DATA_BIT_HIGH 1750
+
+/* when idle, sample once per millisecond */
+#define CEC_TIM_IDLE_SAMPLE 1000
+/* when processing the start bit, sample twice per millisecond */
+#define CEC_TIM_START_BIT_SAMPLE 500
+/* when polling for a state change, sample once every 50 micoseconds */
+#define CEC_TIM_SAMPLE 50
+
+#define CEC_TIM_LOW_DRIVE_ERROR (1.5 * CEC_TIM_DATA_BIT_TOTAL)
+
+struct cec_state {
+ const char * const name;
+ unsigned int usecs;
+};
+
+static const struct cec_state states[CEC_PIN_STATES] = {
+ { "Off", 0 },
+ { "Idle", CEC_TIM_IDLE_SAMPLE },
+ { "Tx Wait", CEC_TIM_SAMPLE },
+ { "Tx Wait for High", CEC_TIM_IDLE_SAMPLE },
+ { "Tx Start Bit Low", CEC_TIM_START_BIT_LOW },
+ { "Tx Start Bit High", CEC_TIM_START_BIT_TOTAL - CEC_TIM_START_BIT_LOW },
+ { "Tx Data 0 Low", CEC_TIM_DATA_BIT_0_LOW },
+ { "Tx Data 0 High", CEC_TIM_DATA_BIT_TOTAL - CEC_TIM_DATA_BIT_0_LOW },
+ { "Tx Data 1 Low", CEC_TIM_DATA_BIT_1_LOW },
+ { "Tx Data 1 High", CEC_TIM_DATA_BIT_TOTAL - CEC_TIM_DATA_BIT_1_LOW },
+ { "Tx Data 1 Pre Sample", CEC_TIM_DATA_BIT_SAMPLE - CEC_TIM_DATA_BIT_1_LOW },
+ { "Tx Data 1 Post Sample", CEC_TIM_DATA_BIT_TOTAL - CEC_TIM_DATA_BIT_SAMPLE },
+ { "Rx Start Bit Low", CEC_TIM_SAMPLE },
+ { "Rx Start Bit High", CEC_TIM_SAMPLE },
+ { "Rx Data Sample", CEC_TIM_DATA_BIT_SAMPLE },
+ { "Rx Data Post Sample", CEC_TIM_DATA_BIT_HIGH - CEC_TIM_DATA_BIT_SAMPLE },
+ { "Rx Data High", CEC_TIM_SAMPLE },
+ { "Rx Ack Low", CEC_TIM_DATA_BIT_0_LOW },
+ { "Rx Ack Low Post", CEC_TIM_DATA_BIT_HIGH - CEC_TIM_DATA_BIT_0_LOW },
+ { "Rx Ack High Post", CEC_TIM_DATA_BIT_HIGH },
+ { "Rx Ack Finish", CEC_TIM_DATA_BIT_TOTAL_MIN - CEC_TIM_DATA_BIT_HIGH },
+ { "Rx Low Drive", CEC_TIM_LOW_DRIVE_ERROR },
+ { "Rx Irq", 0 },
+};
+
+static void cec_pin_update(struct cec_pin *pin, bool v, bool force)
+{
+ if (!force && v == pin->adap->cec_pin_is_high)
+ return;
+
+ pin->adap->cec_pin_is_high = v;
+ if (atomic_read(&pin->work_pin_events) < CEC_NUM_PIN_EVENTS) {
+ pin->work_pin_is_high[pin->work_pin_events_wr] = v;
+ pin->work_pin_ts[pin->work_pin_events_wr] = ktime_get();
+ pin->work_pin_events_wr =
+ (pin->work_pin_events_wr + 1) % CEC_NUM_PIN_EVENTS;
+ atomic_inc(&pin->work_pin_events);
+ }
+ wake_up_interruptible(&pin->kthread_waitq);
+}
+
+static bool cec_pin_read(struct cec_pin *pin)
+{
+ bool v = pin->ops->read(pin->adap);
+
+ cec_pin_update(pin, v, false);
+ return v;
+}
+
+static void cec_pin_low(struct cec_pin *pin)
+{
+ pin->ops->low(pin->adap);
+ cec_pin_update(pin, false, false);
+}
+
+static bool cec_pin_high(struct cec_pin *pin)
+{
+ pin->ops->high(pin->adap);
+ return cec_pin_read(pin);
+}
+
+static void cec_pin_to_idle(struct cec_pin *pin)
+{
+ /*
+ * Reset all status fields, release the bus and
+ * go to idle state.
+ */
+ pin->rx_bit = pin->tx_bit = 0;
+ pin->rx_msg.len = 0;
+ memset(pin->rx_msg.msg, 0, sizeof(pin->rx_msg.msg));
+ pin->state = CEC_ST_IDLE;
+ pin->ts = 0;
+}
+
+/*
+ * Handle Transmit-related states
+ *
+ * Basic state changes when transmitting:
+ *
+ * Idle -> Tx Wait (waiting for the end of signal free time) ->
+ * Tx Start Bit Low -> Tx Start Bit High ->
+ *
+ * Regular data bits + EOM:
+ * Tx Data 0 Low -> Tx Data 0 High ->
+ * or:
+ * Tx Data 1 Low -> Tx Data 1 High ->
+ *
+ * First 4 data bits or Ack bit:
+ * Tx Data 0 Low -> Tx Data 0 High ->
+ * or:
+ * Tx Data 1 Low -> Tx Data 1 High -> Tx Data 1 Pre Sample ->
+ * Tx Data 1 Post Sample ->
+ *
+ * After the last Ack go to Idle.
+ *
+ * If it detects a Low Drive condition then:
+ * Tx Wait For High -> Idle
+ *
+ * If it loses arbitration, then it switches to state Rx Data Post Sample.
+ */
+static void cec_pin_tx_states(struct cec_pin *pin, ktime_t ts)
+{
+ bool v;
+ bool is_ack_bit, ack;
+
+ switch (pin->state) {
+ case CEC_ST_TX_WAIT_FOR_HIGH:
+ if (cec_pin_read(pin))
+ cec_pin_to_idle(pin);
+ break;
+
+ case CEC_ST_TX_START_BIT_LOW:
+ pin->state = CEC_ST_TX_START_BIT_HIGH;
+ /* Generate start bit */
+ cec_pin_high(pin);
+ break;
+
+ case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE:
+ /* If the read value is 1, then all is OK */
+ if (!cec_pin_read(pin)) {
+ /*
+ * It's 0, so someone detected an error and pulled the
+ * line low for 1.5 times the nominal bit period.
+ */
+ pin->tx_msg.len = 0;
+ pin->work_tx_ts = ts;
+ pin->work_tx_status = CEC_TX_STATUS_LOW_DRIVE;
+ pin->state = CEC_ST_TX_WAIT_FOR_HIGH;
+ wake_up_interruptible(&pin->kthread_waitq);
+ break;
+ }
+ if (pin->tx_nacked) {
+ cec_pin_to_idle(pin);
+ pin->tx_msg.len = 0;
+ pin->work_tx_ts = ts;
+ pin->work_tx_status = CEC_TX_STATUS_NACK;
+ wake_up_interruptible(&pin->kthread_waitq);
+ break;
+ }
+ /* fall through */
+ case CEC_ST_TX_DATA_BIT_0_HIGH:
+ case CEC_ST_TX_DATA_BIT_1_HIGH:
+ pin->tx_bit++;
+ /* fall through */
+ case CEC_ST_TX_START_BIT_HIGH:
+ if (pin->tx_bit / 10 >= pin->tx_msg.len) {
+ cec_pin_to_idle(pin);
+ pin->tx_msg.len = 0;
+ pin->work_tx_ts = ts;
+ pin->work_tx_status = CEC_TX_STATUS_OK;
+ wake_up_interruptible(&pin->kthread_waitq);
+ break;
+ }
+
+ switch (pin->tx_bit % 10) {
+ default:
+ v = pin->tx_msg.msg[pin->tx_bit / 10] &
+ (1 << (7 - (pin->tx_bit % 10)));
+ pin->state = v ? CEC_ST_TX_DATA_BIT_1_LOW :
+ CEC_ST_TX_DATA_BIT_0_LOW;
+ break;
+ case 8:
+ v = pin->tx_bit / 10 == pin->tx_msg.len - 1;
+ pin->state = v ? CEC_ST_TX_DATA_BIT_1_LOW :
+ CEC_ST_TX_DATA_BIT_0_LOW;
+ break;
+ case 9:
+ pin->state = CEC_ST_TX_DATA_BIT_1_LOW;
+ break;
+ }
+ cec_pin_low(pin);
+ break;
+
+ case CEC_ST_TX_DATA_BIT_0_LOW:
+ case CEC_ST_TX_DATA_BIT_1_LOW:
+ v = pin->state == CEC_ST_TX_DATA_BIT_1_LOW;
+ pin->state = v ? CEC_ST_TX_DATA_BIT_1_HIGH :
+ CEC_ST_TX_DATA_BIT_0_HIGH;
+ is_ack_bit = pin->tx_bit % 10 == 9;
+ if (v && (pin->tx_bit < 4 || is_ack_bit))
+ pin->state = CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE;
+ cec_pin_high(pin);
+ break;
+
+ case CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE:
+ /* Read the CEC value at the sample time */
+ v = cec_pin_read(pin);
+ is_ack_bit = pin->tx_bit % 10 == 9;
+ /*
+ * If v == 0 and we're within the first 4 bits
+ * of the initiator, then someone else started
+ * transmitting and we lost the arbitration
+ * (i.e. the logical address of the other
+ * transmitter has more leading 0 bits in the
+ * initiator).
+ */
+ if (!v && !is_ack_bit) {
+ pin->tx_msg.len = 0;
+ pin->work_tx_ts = ts;
+ pin->work_tx_status = CEC_TX_STATUS_ARB_LOST;
+ wake_up_interruptible(&pin->kthread_waitq);
+ pin->rx_bit = pin->tx_bit;
+ pin->tx_bit = 0;
+ memset(pin->rx_msg.msg, 0, sizeof(pin->rx_msg.msg));
+ pin->rx_msg.msg[0] = pin->tx_msg.msg[0];
+ pin->rx_msg.msg[0] &= ~(1 << (7 - pin->rx_bit));
+ pin->rx_msg.len = 0;
+ pin->state = CEC_ST_RX_DATA_POST_SAMPLE;
+ pin->rx_bit++;
+ break;
+ }
+ pin->state = CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE;
+ if (!is_ack_bit)
+ break;
+ /* Was the message ACKed? */
+ ack = cec_msg_is_broadcast(&pin->tx_msg) ? v : !v;
+ if (!ack) {
+ /*
+ * Note: the CEC spec is ambiguous regarding
+ * what action to take when a NACK appears
+ * before the last byte of the payload was
+ * transmitted: either stop transmitting
+ * immediately, or wait until the last byte
+ * was transmitted.
+ *
+ * Most CEC implementations appear to stop
+ * immediately, and that's what we do here
+ * as well.
+ */
+ pin->tx_nacked = true;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Handle Receive-related states
+ *
+ * Basic state changes when receiving:
+ *
+ * Rx Start Bit Low -> Rx Start Bit High ->
+ * Regular data bits + EOM:
+ * Rx Data Sample -> Rx Data Post Sample -> Rx Data High ->
+ * Ack bit 0:
+ * Rx Ack Low -> Rx Ack Low Post -> Rx Data High ->
+ * Ack bit 1:
+ * Rx Ack High Post -> Rx Data High ->
+ * Ack bit 0 && EOM:
+ * Rx Ack Low -> Rx Ack Low Post -> Rx Ack Finish -> Idle
+ */
+static void cec_pin_rx_states(struct cec_pin *pin, ktime_t ts)
+{
+ s32 delta;
+ bool v;
+ bool ack;
+ bool bcast, for_us;
+ u8 dest;
+
+ switch (pin->state) {
+ /* Receive states */
+ case CEC_ST_RX_START_BIT_LOW:
+ v = cec_pin_read(pin);
+ if (!v)
+ break;
+ pin->state = CEC_ST_RX_START_BIT_HIGH;
+ delta = ktime_us_delta(ts, pin->ts);
+ pin->ts = ts;
+ /* Start bit low is too short, go back to idle */
+ if (delta < CEC_TIM_START_BIT_LOW_MIN -
+ CEC_TIM_IDLE_SAMPLE) {
+ cec_pin_to_idle(pin);
+ }
+ break;
+
+ case CEC_ST_RX_START_BIT_HIGH:
+ v = cec_pin_read(pin);
+ delta = ktime_us_delta(ts, pin->ts);
+ if (v && delta > CEC_TIM_START_BIT_TOTAL_MAX -
+ CEC_TIM_START_BIT_LOW_MIN) {
+ cec_pin_to_idle(pin);
+ break;
+ }
+ if (v)
+ break;
+ pin->state = CEC_ST_RX_DATA_SAMPLE;
+ pin->ts = ts;
+ pin->rx_eom = false;
+ break;
+
+ case CEC_ST_RX_DATA_SAMPLE:
+ v = cec_pin_read(pin);
+ pin->state = CEC_ST_RX_DATA_POST_SAMPLE;
+ switch (pin->rx_bit % 10) {
+ default:
+ if (pin->rx_bit / 10 < CEC_MAX_MSG_SIZE)
+ pin->rx_msg.msg[pin->rx_bit / 10] |=
+ v << (7 - (pin->rx_bit % 10));
+ break;
+ case 8:
+ pin->rx_eom = v;
+ pin->rx_msg.len = pin->rx_bit / 10 + 1;
+ break;
+ case 9:
+ break;
+ }
+ pin->rx_bit++;
+ break;
+
+ case CEC_ST_RX_DATA_POST_SAMPLE:
+ pin->state = CEC_ST_RX_DATA_HIGH;
+ break;
+
+ case CEC_ST_RX_DATA_HIGH:
+ v = cec_pin_read(pin);
+ delta = ktime_us_delta(ts, pin->ts);
+ if (v && delta > CEC_TIM_DATA_BIT_TOTAL_MAX) {
+ cec_pin_to_idle(pin);
+ break;
+ }
+ if (v)
+ break;
+ /*
+ * Go to low drive state when the total bit time is
+ * too short.
+ */
+ if (delta < CEC_TIM_DATA_BIT_TOTAL_MIN) {
+ cec_pin_low(pin);
+ pin->state = CEC_ST_LOW_DRIVE;
+ break;
+ }
+ pin->ts = ts;
+ if (pin->rx_bit % 10 != 9) {
+ pin->state = CEC_ST_RX_DATA_SAMPLE;
+ break;
+ }
+
+ dest = cec_msg_destination(&pin->rx_msg);
+ bcast = dest == CEC_LOG_ADDR_BROADCAST;
+ /* for_us == broadcast or directed to us */
+ for_us = bcast || (pin->la_mask & (1 << dest));
+ /* ACK bit value */
+ ack = bcast ? 1 : !for_us;
+
+ if (ack) {
+ /* No need to write to the bus, just wait */
+ pin->state = CEC_ST_RX_ACK_HIGH_POST;
+ break;
+ }
+ cec_pin_low(pin);
+ pin->state = CEC_ST_RX_ACK_LOW;
+ break;
+
+ case CEC_ST_RX_ACK_LOW:
+ cec_pin_high(pin);
+ pin->state = CEC_ST_RX_ACK_LOW_POST;
+ break;
+
+ case CEC_ST_RX_ACK_LOW_POST:
+ case CEC_ST_RX_ACK_HIGH_POST:
+ v = cec_pin_read(pin);
+ if (v && pin->rx_eom) {
+ pin->work_rx_msg = pin->rx_msg;
+ pin->work_rx_msg.rx_ts = ts;
+ wake_up_interruptible(&pin->kthread_waitq);
+ pin->ts = ts;
+ pin->state = CEC_ST_RX_ACK_FINISH;
+ break;
+ }
+ pin->rx_bit++;
+ pin->state = CEC_ST_RX_DATA_HIGH;
+ break;
+
+ case CEC_ST_RX_ACK_FINISH:
+ cec_pin_to_idle(pin);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Main timer function
+ *
+ */
+static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
+{
+ struct cec_pin *pin = container_of(timer, struct cec_pin, timer);
+ struct cec_adapter *adap = pin->adap;
+ ktime_t ts;
+ s32 delta;
+
+ ts = ktime_get();
+ if (pin->timer_ts) {
+ delta = ktime_us_delta(ts, pin->timer_ts);
+ pin->timer_cnt++;
+ if (delta > 100 && pin->state != CEC_ST_IDLE) {
+ /* Keep track of timer overruns */
+ pin->timer_sum_overrun += delta;
+ pin->timer_100ms_overruns++;
+ if (delta > 300)
+ pin->timer_300ms_overruns++;
+ if (delta > pin->timer_max_overrun)
+ pin->timer_max_overrun = delta;
+ }
+ }
+ if (adap->monitor_pin_cnt)
+ cec_pin_read(pin);
+
+ if (pin->wait_usecs) {
+ /*
+ * If we are monitoring the pin, then we have to
+ * sample at regular intervals.
+ */
+ if (pin->wait_usecs > 150) {
+ pin->wait_usecs -= 100;
+ pin->timer_ts = ktime_add_us(ts, 100);
+ hrtimer_forward_now(timer, 100000);
+ return HRTIMER_RESTART;
+ }
+ if (pin->wait_usecs > 100) {
+ pin->wait_usecs /= 2;
+ pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
+ hrtimer_forward_now(timer, pin->wait_usecs * 1000);
+ return HRTIMER_RESTART;
+ }
+ pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
+ hrtimer_forward_now(timer, pin->wait_usecs * 1000);
+ pin->wait_usecs = 0;
+ return HRTIMER_RESTART;
+ }
+
+ switch (pin->state) {
+ /* Transmit states */
+ case CEC_ST_TX_WAIT_FOR_HIGH:
+ case CEC_ST_TX_START_BIT_LOW:
+ case CEC_ST_TX_DATA_BIT_1_HIGH_POST_SAMPLE:
+ case CEC_ST_TX_DATA_BIT_0_HIGH:
+ case CEC_ST_TX_DATA_BIT_1_HIGH:
+ case CEC_ST_TX_START_BIT_HIGH:
+ case CEC_ST_TX_DATA_BIT_0_LOW:
+ case CEC_ST_TX_DATA_BIT_1_LOW:
+ case CEC_ST_TX_DATA_BIT_1_HIGH_PRE_SAMPLE:
+ cec_pin_tx_states(pin, ts);
+ break;
+
+ /* Receive states */
+ case CEC_ST_RX_START_BIT_LOW:
+ case CEC_ST_RX_START_BIT_HIGH:
+ case CEC_ST_RX_DATA_SAMPLE:
+ case CEC_ST_RX_DATA_POST_SAMPLE:
+ case CEC_ST_RX_DATA_HIGH:
+ case CEC_ST_RX_ACK_LOW:
+ case CEC_ST_RX_ACK_LOW_POST:
+ case CEC_ST_RX_ACK_HIGH_POST:
+ case CEC_ST_RX_ACK_FINISH:
+ cec_pin_rx_states(pin, ts);
+ break;
+
+ case CEC_ST_IDLE:
+ case CEC_ST_TX_WAIT:
+ if (!cec_pin_high(pin)) {
+ /* Start bit, switch to receive state */
+ pin->ts = ts;
+ pin->state = CEC_ST_RX_START_BIT_LOW;
+ break;
+ }
+ if (pin->ts == 0)
+ pin->ts = ts;
+ if (pin->tx_msg.len) {
+ /*
+ * Check if the bus has been free for long enough
+ * so we can kick off the pending transmit.
+ */
+ delta = ktime_us_delta(ts, pin->ts);
+ if (delta / CEC_TIM_DATA_BIT_TOTAL >
+ pin->tx_signal_free_time) {
+ pin->tx_nacked = false;
+ pin->state = CEC_ST_TX_START_BIT_LOW;
+ /* Generate start bit */
+ cec_pin_low(pin);
+ break;
+ }
+ if (delta / CEC_TIM_DATA_BIT_TOTAL >
+ pin->tx_signal_free_time - 1)
+ pin->state = CEC_ST_TX_WAIT;
+ break;
+ }
+ if (pin->state != CEC_ST_IDLE || pin->ops->enable_irq == NULL ||
+ pin->enable_irq_failed || adap->is_configuring ||
+ adap->is_configured || adap->monitor_all_cnt)
+ break;
+ /* Switch to interrupt mode */
+ atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_ENABLE);
+ pin->state = CEC_ST_RX_IRQ;
+ wake_up_interruptible(&pin->kthread_waitq);
+ return HRTIMER_NORESTART;
+
+ case CEC_ST_LOW_DRIVE:
+ cec_pin_to_idle(pin);
+ break;
+
+ default:
+ break;
+ }
+ if (!adap->monitor_pin_cnt || states[pin->state].usecs <= 150) {
+ pin->wait_usecs = 0;
+ pin->timer_ts = ktime_add_us(ts, states[pin->state].usecs);
+ hrtimer_forward_now(timer, states[pin->state].usecs * 1000);
+ return HRTIMER_RESTART;
+ }
+ pin->wait_usecs = states[pin->state].usecs - 100;
+ pin->timer_ts = ktime_add_us(ts, 100);
+ hrtimer_forward_now(timer, 100000);
+ return HRTIMER_RESTART;
+}
+
+static int cec_pin_thread_func(void *_adap)
+{
+ struct cec_adapter *adap = _adap;
+ struct cec_pin *pin = adap->pin;
+
+ for (;;) {
+ wait_event_interruptible(pin->kthread_waitq,
+ kthread_should_stop() ||
+ pin->work_rx_msg.len ||
+ pin->work_tx_status ||
+ atomic_read(&pin->work_irq_change) ||
+ atomic_read(&pin->work_pin_events));
+
+ if (pin->work_rx_msg.len) {
+ cec_received_msg_ts(adap, &pin->work_rx_msg,
+ pin->work_rx_msg.rx_ts);
+ pin->work_rx_msg.len = 0;
+ }
+ if (pin->work_tx_status) {
+ unsigned int tx_status = pin->work_tx_status;
+
+ pin->work_tx_status = 0;
+ cec_transmit_attempt_done_ts(adap, tx_status,
+ pin->work_tx_ts);
+ }
+
+ while (atomic_read(&pin->work_pin_events)) {
+ unsigned int idx = pin->work_pin_events_rd;
+
+ cec_queue_pin_cec_event(adap,
+ pin->work_pin_is_high[idx],
+ pin->work_pin_ts[idx]);
+ pin->work_pin_events_rd = (idx + 1) % CEC_NUM_PIN_EVENTS;
+ atomic_dec(&pin->work_pin_events);
+ }
+
+ switch (atomic_xchg(&pin->work_irq_change,
+ CEC_PIN_IRQ_UNCHANGED)) {
+ case CEC_PIN_IRQ_DISABLE:
+ pin->ops->disable_irq(adap);
+ cec_pin_high(pin);
+ cec_pin_to_idle(pin);
+ hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ break;
+ case CEC_PIN_IRQ_ENABLE:
+ pin->enable_irq_failed = !pin->ops->enable_irq(adap);
+ if (pin->enable_irq_failed) {
+ cec_pin_to_idle(pin);
+ hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (kthread_should_stop())
+ break;
+ }
+ return 0;
+}
+
+static int cec_pin_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct cec_pin *pin = adap->pin;
+
+ pin->enabled = enable;
+ if (enable) {
+ atomic_set(&pin->work_pin_events, 0);
+ pin->work_pin_events_rd = pin->work_pin_events_wr = 0;
+ cec_pin_read(pin);
+ cec_pin_to_idle(pin);
+ pin->tx_msg.len = 0;
+ pin->timer_ts = 0;
+ atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_UNCHANGED);
+ pin->kthread = kthread_run(cec_pin_thread_func, adap,
+ "cec-pin");
+ if (IS_ERR(pin->kthread)) {
+ pr_err("cec-pin: kernel_thread() failed\n");
+ return PTR_ERR(pin->kthread);
+ }
+ hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ } else {
+ if (pin->ops->disable_irq)
+ pin->ops->disable_irq(adap);
+ hrtimer_cancel(&pin->timer);
+ kthread_stop(pin->kthread);
+ cec_pin_read(pin);
+ cec_pin_to_idle(pin);
+ pin->state = CEC_ST_OFF;
+ }
+ return 0;
+}
+
+static int cec_pin_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+{
+ struct cec_pin *pin = adap->pin;
+
+ if (log_addr == CEC_LOG_ADDR_INVALID)
+ pin->la_mask = 0;
+ else
+ pin->la_mask |= (1 << log_addr);
+ return 0;
+}
+
+static int cec_pin_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct cec_pin *pin = adap->pin;
+
+ pin->tx_signal_free_time = signal_free_time;
+ pin->tx_msg = *msg;
+ pin->work_tx_status = 0;
+ pin->tx_bit = 0;
+ if (pin->state == CEC_ST_RX_IRQ) {
+ atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_UNCHANGED);
+ pin->ops->disable_irq(adap);
+ cec_pin_high(pin);
+ cec_pin_to_idle(pin);
+ hrtimer_start(&pin->timer, 0, HRTIMER_MODE_REL);
+ }
+ return 0;
+}
+
+static void cec_pin_adap_status(struct cec_adapter *adap,
+ struct seq_file *file)
+{
+ struct cec_pin *pin = adap->pin;
+
+ seq_printf(file, "state: %s\n", states[pin->state].name);
+ seq_printf(file, "tx_bit: %d\n", pin->tx_bit);
+ seq_printf(file, "rx_bit: %d\n", pin->rx_bit);
+ seq_printf(file, "cec pin: %d\n", pin->ops->read(adap));
+ seq_printf(file, "irq failed: %d\n", pin->enable_irq_failed);
+ if (pin->timer_100ms_overruns) {
+ seq_printf(file, "timer overruns > 100ms: %u of %u\n",
+ pin->timer_100ms_overruns, pin->timer_cnt);
+ seq_printf(file, "timer overruns > 300ms: %u of %u\n",
+ pin->timer_300ms_overruns, pin->timer_cnt);
+ seq_printf(file, "max timer overrun: %u usecs\n",
+ pin->timer_max_overrun);
+ seq_printf(file, "avg timer overrun: %u usecs\n",
+ pin->timer_sum_overrun / pin->timer_100ms_overruns);
+ }
+ pin->timer_cnt = 0;
+ pin->timer_100ms_overruns = 0;
+ pin->timer_300ms_overruns = 0;
+ pin->timer_max_overrun = 0;
+ pin->timer_sum_overrun = 0;
+ if (pin->ops->status)
+ pin->ops->status(adap, file);
+}
+
+static int cec_pin_adap_monitor_all_enable(struct cec_adapter *adap,
+ bool enable)
+{
+ struct cec_pin *pin = adap->pin;
+
+ pin->monitor_all = enable;
+ return 0;
+}
+
+static void cec_pin_adap_free(struct cec_adapter *adap)
+{
+ struct cec_pin *pin = adap->pin;
+
+ if (pin->ops->free)
+ pin->ops->free(adap);
+ adap->pin = NULL;
+ kfree(pin);
+}
+
+void cec_pin_changed(struct cec_adapter *adap, bool value)
+{
+ struct cec_pin *pin = adap->pin;
+
+ cec_pin_update(pin, value, false);
+ if (!value && (adap->is_configuring || adap->is_configured ||
+ adap->monitor_all_cnt))
+ atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_DISABLE);
+}
+EXPORT_SYMBOL_GPL(cec_pin_changed);
+
+static const struct cec_adap_ops cec_pin_adap_ops = {
+ .adap_enable = cec_pin_adap_enable,
+ .adap_monitor_all_enable = cec_pin_adap_monitor_all_enable,
+ .adap_log_addr = cec_pin_adap_log_addr,
+ .adap_transmit = cec_pin_adap_transmit,
+ .adap_status = cec_pin_adap_status,
+ .adap_free = cec_pin_adap_free,
+};
+
+struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops,
+ void *priv, const char *name, u32 caps)
+{
+ struct cec_adapter *adap;
+ struct cec_pin *pin = kzalloc(sizeof(*pin), GFP_KERNEL);
+
+ if (pin == NULL)
+ return ERR_PTR(-ENOMEM);
+ pin->ops = pin_ops;
+ hrtimer_init(&pin->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ pin->timer.function = cec_pin_timer;
+ init_waitqueue_head(&pin->kthread_waitq);
+
+ adap = cec_allocate_adapter(&cec_pin_adap_ops, priv, name,
+ caps | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN,
+ CEC_MAX_LOG_ADDRS);
+
+ if (PTR_ERR_OR_ZERO(adap)) {
+ kfree(pin);
+ return adap;
+ }
+
+ adap->pin = pin;
+ pin->adap = adap;
+ cec_pin_update(pin, cec_pin_high(pin), true);
+ return adap;
+}
+EXPORT_SYMBOL_GPL(cec_pin_allocate_adapter);
diff --git a/drivers/media/common/saa7146/saa7146_i2c.c b/drivers/media/common/saa7146/saa7146_i2c.c
index 239a2db35068..75897f95e4b4 100644
--- a/drivers/media/common/saa7146/saa7146_i2c.c
+++ b/drivers/media/common/saa7146/saa7146_i2c.c
@@ -395,7 +395,7 @@ static int saa7146_i2c_xfer(struct i2c_adapter* adapter, struct i2c_msg *msg, in
/* i2c-adapter helper functions */
/* exported algorithm data */
-static struct i2c_algorithm saa7146_algo = {
+static const struct i2c_algorithm saa7146_algo = {
.master_xfer = saa7146_i2c_xfer,
.functionality = saa7146_i2c_func,
};
diff --git a/drivers/media/common/saa7146/saa7146_vbi.c b/drivers/media/common/saa7146/saa7146_vbi.c
index 3553ac4cba5c..d79e4d7ecd9f 100644
--- a/drivers/media/common/saa7146/saa7146_vbi.c
+++ b/drivers/media/common/saa7146/saa7146_vbi.c
@@ -308,7 +308,7 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
saa7146_dma_free(dev,q,buf);
}
-static struct videobuf_queue_ops vbi_qops = {
+static const struct videobuf_queue_ops vbi_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
index b3b29d4f36ed..37b4654dc21c 100644
--- a/drivers/media/common/saa7146/saa7146_video.c
+++ b/drivers/media/common/saa7146/saa7146_video.c
@@ -1187,7 +1187,7 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
release_all_pagetables(dev, buf);
}
-static struct videobuf_queue_ops video_qops = {
+static const struct videobuf_queue_ops video_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
diff --git a/drivers/media/common/siano/smsir.c b/drivers/media/common/siano/smsir.c
index 7c898b06d85c..e77bb0c95e69 100644
--- a/drivers/media/common/siano/smsir.c
+++ b/drivers/media/common/siano/smsir.c
@@ -73,7 +73,7 @@ int sms_ir_init(struct smscore_device_t *coredev)
strlcpy(coredev->ir.phys, coredev->devpath, sizeof(coredev->ir.phys));
strlcat(coredev->ir.phys, "/ir0", sizeof(coredev->ir.phys));
- dev->input_name = coredev->ir.name;
+ dev->device_name = coredev->ir.name;
dev->input_phys = coredev->ir.phys;
dev->dev.parent = coredev->device;
@@ -86,12 +86,12 @@ int sms_ir_init(struct smscore_device_t *coredev)
#endif
dev->priv = coredev;
- dev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
+ dev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
dev->map_name = sms_get_board(board_id)->rc_codes;
dev->driver_name = MODULE_NAME;
pr_debug("Input device (IR) %s is set for key events\n",
- dev->input_name);
+ dev->device_name);
err = rc_register_device(dev);
if (err < 0) {
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c
index 9bcbd318489b..5b5f95c38fe1 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c
@@ -646,14 +646,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S
[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][5] = { 3248, 944, 1094 },
[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][6] = { 1017, 967, 3168 },
[V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
- [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
- [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][1] = { 3802, 3805, 2602 },
- [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 3806, 3797 },
- [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][3] = { 1780, 3812, 2592 },
- [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][4] = { 3820, 2215, 3796 },
- [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][5] = { 3824, 2409, 2574 },
- [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][6] = { 2491, 2435, 3795 },
- [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][1] = { 1815, 1818, 910 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 1819, 1811 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][3] = { 472, 1825, 904 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][4] = { 1832, 686, 1810 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][5] = { 1835, 794, 893 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][6] = { 843, 809, 1810 },
+ [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 },
[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][1] = { 2953, 2963, 586 },
[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_709][2] = { 0, 2967, 2937 },
@@ -702,14 +702,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S
[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][5] = { 3248, 944, 1094 },
[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][6] = { 1017, 967, 3168 },
[V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
- [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
- [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][1] = { 3802, 3805, 2602 },
- [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 3806, 3797 },
- [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][3] = { 1780, 3812, 2592 },
- [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][4] = { 3820, 2215, 3796 },
- [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][5] = { 3824, 2409, 2574 },
- [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][6] = { 2491, 2435, 3795 },
- [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][1] = { 1815, 1818, 910 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][2] = { 0, 1819, 1811 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][3] = { 472, 1825, 904 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][4] = { 1832, 686, 1810 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][5] = { 1835, 794, 893 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][6] = { 843, 809, 1810 },
+ [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 },
[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 547 },
[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_709][2] = { 547, 2939, 2939 },
@@ -758,14 +758,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S
[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][5] = { 3175, 1084, 1084 },
[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3175 },
[V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
- [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
- [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2563 },
- [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][2] = { 2563, 3798, 3798 },
- [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][3] = { 2563, 3798, 2563 },
- [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][4] = { 3798, 2563, 3798 },
- [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][5] = { 3798, 2563, 2563 },
- [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3798 },
- [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 886 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][2] = { 886, 1812, 1812 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][3] = { 886, 1812, 886 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][4] = { 1812, 886, 1812 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][5] = { 1812, 886, 886 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1812 },
+ [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 },
[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][1] = { 2892, 3034, 910 },
[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_709][2] = { 1715, 2916, 2914 },
@@ -814,14 +814,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S
[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][5] = { 2765, 1182, 1190 },
[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][6] = { 1270, 0, 3094 },
[V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][1] = { 3784, 3825, 2879 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][2] = { 3351, 3791, 3790 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][3] = { 3311, 3819, 2815 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][4] = { 3659, 1900, 3777 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][5] = { 3640, 2662, 2669 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][6] = { 2743, 0, 3769 },
- [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][1] = { 1800, 1836, 1090 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][2] = { 1436, 1806, 1805 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][3] = { 1405, 1830, 1047 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][4] = { 1691, 527, 1793 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][5] = { 1674, 947, 952 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][6] = { 1000, 0, 1786 },
+ [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 },
[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 464 },
[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_709][2] = { 786, 2939, 2939 },
@@ -870,14 +870,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S
[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][5] = { 3126, 1084, 1084 },
[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3188 },
[V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
- [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
- [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2476 },
- [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][2] = { 2782, 3798, 3798 },
- [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][3] = { 2782, 3798, 2476 },
- [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][4] = { 3780, 2563, 3803 },
- [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][5] = { 3780, 2563, 2563 },
- [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3803 },
- [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 833 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][2] = { 1025, 1812, 1812 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][3] = { 1025, 1812, 833 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][4] = { 1796, 886, 1816 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][5] = { 1796, 886, 886 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1816 },
+ [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 },
[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 547 },
[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_709][2] = { 547, 2939, 2939 },
@@ -926,14 +926,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S
[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][5] = { 3175, 1084, 1084 },
[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3175 },
[V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
- [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
- [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2563 },
- [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 2563, 3798, 3798 },
- [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 2563, 3798, 2563 },
- [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 3798, 2563, 3798 },
- [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 3798, 2563, 2563 },
- [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3798 },
- [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 886 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 886, 1812, 1812 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 886, 1812, 886 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 1812, 886, 1812 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 1812, 886, 886 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1812 },
+ [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 },
[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 781 },
[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][2] = { 1622, 2939, 2939 },
@@ -982,14 +982,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S
[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][5] = { 2816, 1084, 1084 },
[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3127 },
[V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
- [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
- [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 3798, 3798, 2778 },
- [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 3306, 3798, 3798 },
- [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 3306, 3798, 2778 },
- [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 3661, 2563, 3781 },
- [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 3661, 2563, 2563 },
- [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 2563, 2563, 3781 },
- [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 1022 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 1402, 1812, 1812 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 1402, 1812, 1022 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 1692, 886, 1797 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 1692, 886, 886 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1797 },
+ [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 },
[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][1] = { 2877, 2923, 1058 },
[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][2] = { 1837, 2840, 2916 },
@@ -1038,14 +1038,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S
[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][5] = { 2690, 1431, 1182 },
[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][6] = { 1318, 1153, 3051 },
[V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
- [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
- [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][1] = { 3780, 3793, 2984 },
- [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][2] = { 3406, 3768, 3791 },
- [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][3] = { 3359, 3763, 2939 },
- [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][4] = { 3636, 2916, 3760 },
- [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][5] = { 3609, 2880, 2661 },
- [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][6] = { 2786, 2633, 3753 },
- [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][1] = { 1796, 1808, 1163 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][2] = { 1480, 1786, 1806 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][3] = { 1443, 1781, 1131 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][4] = { 1670, 1116, 1778 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][5] = { 1648, 1091, 947 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][6] = { 1028, 929, 1772 },
+ [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 },
[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][1] = { 2936, 2934, 992 },
[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_709][2] = { 1159, 2890, 2916 },
@@ -1094,14 +1094,14 @@ const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFER_FUNC_S
[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][5] = { 3018, 1276, 1184 },
[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][6] = { 1100, 1107, 3071 },
[V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
- [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][0] = { 3798, 3798, 3798 },
- [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][1] = { 3797, 3796, 2938 },
- [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][2] = { 3049, 3783, 3791 },
- [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][3] = { 3044, 3782, 2887 },
- [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][4] = { 3741, 2765, 3768 },
- [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][5] = { 3740, 2749, 2663 },
- [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][6] = { 2580, 2587, 3760 },
- [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][7] = { 2563, 2563, 2563 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][1] = { 1811, 1810, 1131 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][2] = { 1210, 1799, 1806 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][3] = { 1206, 1798, 1096 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][4] = { 1762, 1014, 1785 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][5] = { 1761, 1004, 948 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][6] = { 896, 901, 1778 },
+ [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 },
};
#else
@@ -1225,6 +1225,12 @@ static double transfer_rgb_to_smpte2084(double v)
const double c2 = 32.0 * 2413.0 / 4096.0;
const double c3 = 32.0 * 2392.0 / 4096.0;
+ /*
+ * The RGB input maps to the luminance range 0-100 cd/m^2, while
+ * SMPTE-2084 maps values to the luminance range of 0-10000 cd/m^2.
+ * Hence the factor 100.
+ */
+ v /= 100.0;
v = pow(v, m1);
return pow((c1 + c2 * v) / (1 + c3 * v), m2);
}
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index 3dd22da7e17d..a772976cfe26 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -615,7 +615,7 @@ static void color_to_ycbcr(struct tpg_data *tpg, int r, int g, int b,
static const int bt2020_full[3][3] = {
{ COEFF(0.2627, 255), COEFF(0.6780, 255), COEFF(0.0593, 255) },
{ COEFF(-0.1396, 255), COEFF(-0.3604, 255), COEFF(0.5, 255) },
- { COEFF(0.5, 255), COEFF(-0.4698, 255), COEFF(-0.0402, 255) },
+ { COEFF(0.5, 255), COEFF(-0.4598, 255), COEFF(-0.0402, 255) },
};
static const int bt2020c[4] = {
COEFF(1.0 / 1.9404, 224), COEFF(1.0 / 1.5816, 224),
diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h
index f854309ba8a5..c4df6cee48e6 100644
--- a/drivers/media/dvb-core/demux.h
+++ b/drivers/media/dvb-core/demux.h
@@ -210,7 +210,7 @@ struct dmx_section_feed {
* the start of the first undelivered TS packet within a circular buffer.
* The @buffer2 buffer parameter is normally NULL, except when the received
* TS packets have crossed the last address of the circular buffer and
- * ”wrapped” to the beginning of the buffer. In the latter case the @buffer1
+ * "wrapped" to the beginning of the buffer. In the latter case the @buffer1
* parameter would contain an address within the circular buffer, while the
* @buffer2 parameter would contain the first address of the circular buffer.
* The number of bytes delivered with this function (i.e. @buffer1_length +
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 45e91add73ba..18e4230865be 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -562,7 +562,7 @@ static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev,
{
ktime_t timeout = 0;
struct dmx_pes_filter_params *para = &filter->params.pes;
- dmx_output_t otype;
+ enum dmx_output otype;
int ret;
int ts_type;
enum dmx_ts_pes ts_pes;
@@ -787,7 +787,7 @@ static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev,
return 0;
}
-static inline void invert_mode(dmx_filter_t *filter)
+static inline void invert_mode(struct dmx_filter *filter)
{
int i;
@@ -1025,26 +1025,6 @@ static int dvb_demux_do_ioctl(struct file *file,
dmxdev->demux->get_pes_pids(dmxdev->demux, parg);
break;
-#if 0
- /* Not used upstream and never documented */
-
- case DMX_GET_CAPS:
- if (!dmxdev->demux->get_caps) {
- ret = -EINVAL;
- break;
- }
- ret = dmxdev->demux->get_caps(dmxdev->demux, parg);
- break;
-
- case DMX_SET_SOURCE:
- if (!dmxdev->demux->set_source) {
- ret = -EINVAL;
- break;
- }
- ret = dmxdev->demux->set_source(dmxdev->demux, parg);
- break;
-#endif
-
case DMX_GET_STC:
if (!dmxdev->demux->get_stc) {
ret = -EINVAL;
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index e200aa6f2d2f..5b6041d462bc 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -279,6 +279,7 @@
#define USB_PID_TERRATEC_H7 0x10b4
#define USB_PID_TERRATEC_H7_2 0x10a3
#define USB_PID_TERRATEC_H7_3 0x10a5
+#define USB_PID_TERRATEC_T1 0x10ae
#define USB_PID_TERRATEC_T3 0x10a0
#define USB_PID_TERRATEC_T5 0x10a1
#define USB_PID_NOXON_DAB_STICK 0x00b3
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index af694f2066a2..95b3723282f4 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -76,8 +76,6 @@ MODULE_PARM_DESC(cam_debug, "enable verbose debug messages");
#define STATUSREG_WE 2 /* write error */
#define STATUSREG_FR 0x40 /* module free */
#define STATUSREG_DA 0x80 /* data available */
-#define STATUSREG_TXERR (STATUSREG_RE|STATUSREG_WE) /* general transfer error */
-
#define DVB_CA_SLOTSTATE_NONE 0
#define DVB_CA_SLOTSTATE_UNINITIALISED 1
@@ -88,10 +86,8 @@ MODULE_PARM_DESC(cam_debug, "enable verbose debug messages");
#define DVB_CA_SLOTSTATE_WAITFR 6
#define DVB_CA_SLOTSTATE_LINKINIT 7
-
/* Information on a CA slot */
struct dvb_ca_slot {
-
/* current state of the CAM */
int slot_state;
@@ -157,7 +153,10 @@ struct dvb_ca_private {
/* Delay the main thread should use */
unsigned long delay;
- /* Slot to start looking for data to read from in the next user-space read operation */
+ /*
+ * Slot to start looking for data to read from in the next user-space
+ * read operation
+ */
int next_read_slot;
/* mutex serializing ioctls */
@@ -178,7 +177,9 @@ static void dvb_ca_private_free(struct dvb_ca_private *ca)
static void dvb_ca_private_release(struct kref *ref)
{
- struct dvb_ca_private *ca = container_of(ref, struct dvb_ca_private, refcount);
+ struct dvb_ca_private *ca;
+
+ ca = container_of(ref, struct dvb_ca_private, refcount);
dvb_ca_private_free(ca);
}
@@ -198,7 +199,6 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
u8 *ebuf, int ecount);
-
/**
* Safely find needle in haystack.
*
@@ -223,25 +223,22 @@ static char *findstr(char *haystack, int hlen, char *needle, int nlen)
return NULL;
}
-
-
-/* ******************************************************************************** */
+/* ************************************************************************** */
/* EN50221 physical interface functions */
-
/**
* dvb_ca_en50221_check_camstatus - Check CAM status.
*/
static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot)
{
+ struct dvb_ca_slot *sl = &ca->slot_info[slot];
int slot_status;
int cam_present_now;
int cam_changed;
/* IRQ mode */
- if (ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE) {
- return (atomic_read(&ca->slot_info[slot].camchange_count) != 0);
- }
+ if (ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)
+ return (atomic_read(&sl->camchange_count) != 0);
/* poll mode */
slot_status = ca->pub->poll_slot_status(ca->pub, slot, ca->open);
@@ -249,29 +246,28 @@ static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot)
cam_present_now = (slot_status & DVB_CA_EN50221_POLL_CAM_PRESENT) ? 1 : 0;
cam_changed = (slot_status & DVB_CA_EN50221_POLL_CAM_CHANGED) ? 1 : 0;
if (!cam_changed) {
- int cam_present_old = (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_NONE);
+ int cam_present_old = (sl->slot_state != DVB_CA_SLOTSTATE_NONE);
+
cam_changed = (cam_present_now != cam_present_old);
}
if (cam_changed) {
- if (!cam_present_now) {
- ca->slot_info[slot].camchange_type = DVB_CA_EN50221_CAMCHANGE_REMOVED;
- } else {
- ca->slot_info[slot].camchange_type = DVB_CA_EN50221_CAMCHANGE_INSERTED;
- }
- atomic_set(&ca->slot_info[slot].camchange_count, 1);
+ if (!cam_present_now)
+ sl->camchange_type = DVB_CA_EN50221_CAMCHANGE_REMOVED;
+ else
+ sl->camchange_type = DVB_CA_EN50221_CAMCHANGE_INSERTED;
+ atomic_set(&sl->camchange_count, 1);
} else {
- if ((ca->slot_info[slot].slot_state == DVB_CA_SLOTSTATE_WAITREADY) &&
+ if ((sl->slot_state == DVB_CA_SLOTSTATE_WAITREADY) &&
(slot_status & DVB_CA_EN50221_POLL_CAM_READY)) {
- // move to validate state if reset is completed
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_VALIDATE;
+ /* move to validate state if reset is completed */
+ sl->slot_state = DVB_CA_SLOTSTATE_VALIDATE;
}
}
return cam_changed;
}
-
/**
* dvb_ca_en50221_wait_if_status - Wait for flags to become set on the STATUS
* register on a CAM interface, checking for errors and timeout.
@@ -295,8 +291,10 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
start = jiffies;
timeout = jiffies + timeout_hz;
while (1) {
+ int res;
+
/* read the status and check for error */
- int res = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
+ res = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
if (res < 0)
return -EIO;
@@ -308,12 +306,11 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
}
/* check for timeout */
- if (time_after(jiffies, timeout)) {
+ if (time_after(jiffies, timeout))
break;
- }
/* wait for a bit */
- msleep(1);
+ usleep_range(1000, 1100);
}
dprintk("%s failed timeout:%lu\n", __func__, jiffies - start);
@@ -322,7 +319,6 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
return -ETIMEDOUT;
}
-
/**
* dvb_ca_en50221_link_init - Initialise the link layer connection to a CAM.
*
@@ -333,6 +329,7 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
*/
static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
{
+ struct dvb_ca_slot *sl = &ca->slot_info[slot];
int ret;
int buf_size;
u8 buf[2];
@@ -340,39 +337,54 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
dprintk("%s\n", __func__);
/* we'll be determining these during this function */
- ca->slot_info[slot].da_irq_supported = 0;
+ sl->da_irq_supported = 0;
- /* set the host link buffer size temporarily. it will be overwritten with the
- * real negotiated size later. */
- ca->slot_info[slot].link_buf_size = 2;
+ /*
+ * set the host link buffer size temporarily. it will be overwritten
+ * with the real negotiated size later.
+ */
+ sl->link_buf_size = 2;
/* read the buffer size from the CAM */
- if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_SR)) != 0)
+ ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND,
+ IRQEN | CMDREG_SR);
+ if (ret)
return ret;
- if ((ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ / 10)) != 0)
+ ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ);
+ if (ret)
return ret;
- if ((ret = dvb_ca_en50221_read_data(ca, slot, buf, 2)) != 2)
+ ret = dvb_ca_en50221_read_data(ca, slot, buf, 2);
+ if (ret != 2)
return -EIO;
- if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN)) != 0)
+ ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN);
+ if (ret)
return ret;
- /* store it, and choose the minimum of our buffer and the CAM's buffer size */
+ /*
+ * store it, and choose the minimum of our buffer and the CAM's buffer
+ * size
+ */
buf_size = (buf[0] << 8) | buf[1];
if (buf_size > HOST_LINK_BUF_SIZE)
buf_size = HOST_LINK_BUF_SIZE;
- ca->slot_info[slot].link_buf_size = buf_size;
+ sl->link_buf_size = buf_size;
buf[0] = buf_size >> 8;
buf[1] = buf_size & 0xff;
dprintk("Chosen link buffer size of %i\n", buf_size);
/* write the buffer size to the CAM */
- if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_SW)) != 0)
+ ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND,
+ IRQEN | CMDREG_SW);
+ if (ret)
return ret;
- if ((ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_FR, HZ / 10)) != 0)
+ ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_FR, HZ / 10);
+ if (ret)
return ret;
- if ((ret = dvb_ca_en50221_write_data(ca, slot, buf, 2)) != 2)
+ ret = dvb_ca_en50221_write_data(ca, slot, buf, 2);
+ if (ret != 2)
return -EIO;
- if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN)) != 0)
+ ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN);
+ if (ret)
return ret;
/* success */
@@ -392,47 +404,50 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
* @return 0 on success, nonzero on error.
*/
static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot,
- int *address, int *tupleType,
- int *tupleLength, u8 *tuple)
+ int *address, int *tuple_type,
+ int *tuple_length, u8 *tuple)
{
int i;
- int _tupleType;
- int _tupleLength;
+ int _tuple_type;
+ int _tuple_length;
int _address = *address;
/* grab the next tuple length and type */
- if ((_tupleType = ca->pub->read_attribute_mem(ca->pub, slot, _address)) < 0)
- return _tupleType;
- if (_tupleType == 0xff) {
- dprintk("END OF CHAIN TUPLE type:0x%x\n", _tupleType);
+ _tuple_type = ca->pub->read_attribute_mem(ca->pub, slot, _address);
+ if (_tuple_type < 0)
+ return _tuple_type;
+ if (_tuple_type == 0xff) {
+ dprintk("END OF CHAIN TUPLE type:0x%x\n", _tuple_type);
*address += 2;
- *tupleType = _tupleType;
- *tupleLength = 0;
+ *tuple_type = _tuple_type;
+ *tuple_length = 0;
return 0;
}
- if ((_tupleLength = ca->pub->read_attribute_mem(ca->pub, slot, _address + 2)) < 0)
- return _tupleLength;
+ _tuple_length = ca->pub->read_attribute_mem(ca->pub, slot,
+ _address + 2);
+ if (_tuple_length < 0)
+ return _tuple_length;
_address += 4;
- dprintk("TUPLE type:0x%x length:%i\n", _tupleType, _tupleLength);
+ dprintk("TUPLE type:0x%x length:%i\n", _tuple_type, _tuple_length);
/* read in the whole tuple */
- for (i = 0; i < _tupleLength; i++) {
- tuple[i] = ca->pub->read_attribute_mem(ca->pub, slot, _address + (i * 2));
+ for (i = 0; i < _tuple_length; i++) {
+ tuple[i] = ca->pub->read_attribute_mem(ca->pub, slot,
+ _address + (i * 2));
dprintk(" 0x%02x: 0x%02x %c\n",
i, tuple[i] & 0xff,
((tuple[i] > 31) && (tuple[i] < 127)) ? tuple[i] : '.');
}
- _address += (_tupleLength * 2);
+ _address += (_tuple_length * 2);
- // success
- *tupleType = _tupleType;
- *tupleLength = _tupleLength;
+ /* success */
+ *tuple_type = _tuple_type;
+ *tuple_length = _tuple_length;
*address = _address;
return 0;
}
-
/**
* dvb_ca_en50221_parse_attributes - Parse attribute memory of a CAM module,
* extracting Config register, and checking it is a DVB CAM module.
@@ -444,9 +459,10 @@ static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot,
*/
static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
{
+ struct dvb_ca_slot *sl;
int address = 0;
- int tupleLength;
- int tupleType;
+ int tuple_length;
+ int tuple_type;
u8 tuple[257];
char *dvb_str;
int rasz;
@@ -457,70 +473,66 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
u16 manfid = 0;
u16 devid = 0;
-
- // CISTPL_DEVICE_0A
- if ((status =
- dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, &tupleLength, tuple)) < 0)
+ /* CISTPL_DEVICE_0A */
+ status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tuple_type,
+ &tuple_length, tuple);
+ if (status < 0)
return status;
- if (tupleType != 0x1D)
+ if (tuple_type != 0x1D)
return -EINVAL;
-
-
- // CISTPL_DEVICE_0C
- if ((status =
- dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, &tupleLength, tuple)) < 0)
+ /* CISTPL_DEVICE_0C */
+ status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tuple_type,
+ &tuple_length, tuple);
+ if (status < 0)
return status;
- if (tupleType != 0x1C)
+ if (tuple_type != 0x1C)
return -EINVAL;
-
-
- // CISTPL_VERS_1
- if ((status =
- dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType, &tupleLength, tuple)) < 0)
+ /* CISTPL_VERS_1 */
+ status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tuple_type,
+ &tuple_length, tuple);
+ if (status < 0)
return status;
- if (tupleType != 0x15)
+ if (tuple_type != 0x15)
return -EINVAL;
-
-
- // CISTPL_MANFID
- if ((status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType,
- &tupleLength, tuple)) < 0)
+ /* CISTPL_MANFID */
+ status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tuple_type,
+ &tuple_length, tuple);
+ if (status < 0)
return status;
- if (tupleType != 0x20)
+ if (tuple_type != 0x20)
return -EINVAL;
- if (tupleLength != 4)
+ if (tuple_length != 4)
return -EINVAL;
manfid = (tuple[1] << 8) | tuple[0];
devid = (tuple[3] << 8) | tuple[2];
-
-
- // CISTPL_CONFIG
- if ((status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType,
- &tupleLength, tuple)) < 0)
+ /* CISTPL_CONFIG */
+ status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tuple_type,
+ &tuple_length, tuple);
+ if (status < 0)
return status;
- if (tupleType != 0x1A)
+ if (tuple_type != 0x1A)
return -EINVAL;
- if (tupleLength < 3)
+ if (tuple_length < 3)
return -EINVAL;
/* extract the configbase */
rasz = tuple[0] & 3;
- if (tupleLength < (3 + rasz + 14))
+ if (tuple_length < (3 + rasz + 14))
return -EINVAL;
- ca->slot_info[slot].config_base = 0;
- for (i = 0; i < rasz + 1; i++) {
- ca->slot_info[slot].config_base |= (tuple[2 + i] << (8 * i));
- }
+ sl = &ca->slot_info[slot];
+ sl->config_base = 0;
+ for (i = 0; i < rasz + 1; i++)
+ sl->config_base |= (tuple[2 + i] << (8 * i));
/* check it contains the correct DVB string */
- dvb_str = findstr((char *)tuple, tupleLength, "DVB_CI_V", 8);
- if (dvb_str == NULL)
+ dvb_str = findstr((char *)tuple, tuple_length, "DVB_CI_V", 8);
+ if (!dvb_str)
return -EINVAL;
- if (tupleLength < ((dvb_str - (char *) tuple) + 12))
+ if (tuple_length < ((dvb_str - (char *)tuple) + 12))
return -EINVAL;
/* is it a version we support? */
@@ -533,12 +545,14 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
/* process the CFTABLE_ENTRY tuples, and any after those */
while ((!end_chain) && (address < 0x1000)) {
- if ((status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tupleType,
- &tupleLength, tuple)) < 0)
+ status = dvb_ca_en50221_read_tuple(ca, slot, &address,
+ &tuple_type, &tuple_length,
+ tuple);
+ if (status < 0)
return status;
- switch (tupleType) {
- case 0x1B: // CISTPL_CFTABLE_ENTRY
- if (tupleLength < (2 + 11 + 17))
+ switch (tuple_type) {
+ case 0x1B: /* CISTPL_CFTABLE_ENTRY */
+ if (tuple_length < (2 + 11 + 17))
break;
/* if we've already parsed one, just use it */
@@ -546,26 +560,28 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
break;
/* get the config option */
- ca->slot_info[slot].config_option = tuple[0] & 0x3f;
+ sl->config_option = tuple[0] & 0x3f;
/* OK, check it contains the correct strings */
- if ((findstr((char *)tuple, tupleLength, "DVB_HOST", 8) == NULL) ||
- (findstr((char *)tuple, tupleLength, "DVB_CI_MODULE", 13) == NULL))
+ if (!findstr((char *)tuple, tuple_length,
+ "DVB_HOST", 8) ||
+ !findstr((char *)tuple, tuple_length,
+ "DVB_CI_MODULE", 13))
break;
got_cftableentry = 1;
break;
- case 0x14: // CISTPL_NO_LINK
+ case 0x14: /* CISTPL_NO_LINK */
break;
- case 0xFF: // CISTPL_END
+ case 0xFF: /* CISTPL_END */
end_chain = 1;
break;
- default: /* Unknown tuple type - just skip this tuple and move to the next one */
+ default: /* Unknown tuple type - just skip this tuple */
dprintk("dvb_ca: Skipping unknown tuple type:0x%x length:0x%x\n",
- tupleType, tupleLength);
+ tuple_type, tuple_length);
break;
}
}
@@ -574,14 +590,12 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
return -EINVAL;
dprintk("Valid DVB CAM detected MANID:%x DEVID:%x CONFIGBASE:0x%x CONFIGOPTION:0x%x\n",
- manfid, devid, ca->slot_info[slot].config_base,
- ca->slot_info[slot].config_option);
+ manfid, devid, sl->config_base, sl->config_option);
- // success!
+ /* success! */
return 0;
}
-
/**
* dvb_ca_en50221_set_configoption - Set CAM's configoption correctly.
*
@@ -590,26 +604,25 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
*/
static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot)
{
+ struct dvb_ca_slot *sl = &ca->slot_info[slot];
int configoption;
dprintk("%s\n", __func__);
/* set the config option */
- ca->pub->write_attribute_mem(ca->pub, slot,
- ca->slot_info[slot].config_base,
- ca->slot_info[slot].config_option);
+ ca->pub->write_attribute_mem(ca->pub, slot, sl->config_base,
+ sl->config_option);
/* check it */
- configoption = ca->pub->read_attribute_mem(ca->pub, slot, ca->slot_info[slot].config_base);
+ configoption = ca->pub->read_attribute_mem(ca->pub, slot,
+ sl->config_base);
dprintk("Set configoption 0x%x, read configoption 0x%x\n",
- ca->slot_info[slot].config_option, configoption & 0x3f);
+ sl->config_option, configoption & 0x3f);
/* fine! */
return 0;
-
}
-
/**
* dvb_ca_en50221_read_data - This function talks to an EN50221 CAM control
* interface. It reads a buffer of data from the CAM. The data can either
@@ -627,6 +640,7 @@ static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot)
static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
u8 *ebuf, int ecount)
{
+ struct dvb_ca_slot *sl = &ca->slot_info[slot];
int bytes_read;
int status;
u8 buf[HOST_LINK_BUF_SIZE];
@@ -635,90 +649,118 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
dprintk("%s\n", __func__);
/* check if we have space for a link buf in the rx_buffer */
- if (ebuf == NULL) {
+ if (!ebuf) {
int buf_free;
- if (ca->slot_info[slot].rx_buffer.data == NULL) {
+ if (!sl->rx_buffer.data) {
status = -EIO;
goto exit;
}
- buf_free = dvb_ringbuffer_free(&ca->slot_info[slot].rx_buffer);
+ buf_free = dvb_ringbuffer_free(&sl->rx_buffer);
- if (buf_free < (ca->slot_info[slot].link_buf_size + DVB_RINGBUFFER_PKTHDRSIZE)) {
+ if (buf_free < (sl->link_buf_size +
+ DVB_RINGBUFFER_PKTHDRSIZE)) {
status = -EAGAIN;
goto exit;
}
}
- /* check if there is data available */
- if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0)
- goto exit;
- if (!(status & STATUSREG_DA)) {
- /* no data */
- status = 0;
- goto exit;
- }
-
- /* read the amount of data */
- if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH)) < 0)
- goto exit;
- bytes_read = status << 8;
- if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_LOW)) < 0)
- goto exit;
- bytes_read |= status;
-
- /* check it will fit */
- if (ebuf == NULL) {
- if (bytes_read > ca->slot_info[slot].link_buf_size) {
- pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n",
- ca->dvbdev->adapter->num, bytes_read,
- ca->slot_info[slot].link_buf_size);
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
- status = -EIO;
- goto exit;
- }
- if (bytes_read < 2) {
- pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n",
- ca->dvbdev->adapter->num);
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
- status = -EIO;
+ if (ca->pub->read_data &&
+ (sl->slot_state != DVB_CA_SLOTSTATE_LINKINIT)) {
+ if (!ebuf)
+ status = ca->pub->read_data(ca->pub, slot, buf,
+ sizeof(buf));
+ else
+ status = ca->pub->read_data(ca->pub, slot, buf, ecount);
+ if (status < 0)
+ return status;
+ bytes_read = status;
+ if (status == 0)
goto exit;
- }
} else {
- if (bytes_read > ecount) {
- pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n",
- ca->dvbdev->adapter->num);
- status = -EIO;
+ /* check if there is data available */
+ status = ca->pub->read_cam_control(ca->pub, slot,
+ CTRLIF_STATUS);
+ if (status < 0)
+ goto exit;
+ if (!(status & STATUSREG_DA)) {
+ /* no data */
+ status = 0;
goto exit;
}
- }
- /* fill the buffer */
- for (i = 0; i < bytes_read; i++) {
- /* read byte and check */
- if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_DATA)) < 0)
+ /* read the amount of data */
+ status = ca->pub->read_cam_control(ca->pub, slot,
+ CTRLIF_SIZE_HIGH);
+ if (status < 0)
goto exit;
+ bytes_read = status << 8;
+ status = ca->pub->read_cam_control(ca->pub, slot,
+ CTRLIF_SIZE_LOW);
+ if (status < 0)
+ goto exit;
+ bytes_read |= status;
+
+ /* check it will fit */
+ if (!ebuf) {
+ if (bytes_read > sl->link_buf_size) {
+ pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n",
+ ca->dvbdev->adapter->num, bytes_read,
+ sl->link_buf_size);
+ sl->slot_state = DVB_CA_SLOTSTATE_LINKINIT;
+ status = -EIO;
+ goto exit;
+ }
+ if (bytes_read < 2) {
+ pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n",
+ ca->dvbdev->adapter->num);
+ sl->slot_state = DVB_CA_SLOTSTATE_LINKINIT;
+ status = -EIO;
+ goto exit;
+ }
+ } else {
+ if (bytes_read > ecount) {
+ pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n",
+ ca->dvbdev->adapter->num);
+ status = -EIO;
+ goto exit;
+ }
+ }
- /* OK, store it in the buffer */
- buf[i] = status;
- }
+ /* fill the buffer */
+ for (i = 0; i < bytes_read; i++) {
+ /* read byte and check */
+ status = ca->pub->read_cam_control(ca->pub, slot,
+ CTRLIF_DATA);
+ if (status < 0)
+ goto exit;
- /* check for read error (RE should now be 0) */
- if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0)
- goto exit;
- if (status & STATUSREG_RE) {
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
- status = -EIO;
- goto exit;
+ /* OK, store it in the buffer */
+ buf[i] = status;
+ }
+
+ /* check for read error (RE should now be 0) */
+ status = ca->pub->read_cam_control(ca->pub, slot,
+ CTRLIF_STATUS);
+ if (status < 0)
+ goto exit;
+ if (status & STATUSREG_RE) {
+ sl->slot_state = DVB_CA_SLOTSTATE_LINKINIT;
+ status = -EIO;
+ goto exit;
+ }
}
- /* OK, add it to the receive buffer, or copy into external buffer if supplied */
- if (ebuf == NULL) {
- if (ca->slot_info[slot].rx_buffer.data == NULL) {
+ /*
+ * OK, add it to the receive buffer, or copy into external buffer if
+ * supplied
+ */
+ if (!ebuf) {
+ if (!sl->rx_buffer.data) {
status = -EIO;
goto exit;
}
- dvb_ringbuffer_pkt_write(&ca->slot_info[slot].rx_buffer, buf, bytes_read);
+ dvb_ringbuffer_pkt_write(&sl->rx_buffer, buf, bytes_read);
} else {
memcpy(ebuf, buf, bytes_read);
}
@@ -727,16 +769,15 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
buf[0], (buf[1] & 0x80) == 0, bytes_read);
/* wake up readers when a last_fragment is received */
- if ((buf[1] & 0x80) == 0x00) {
+ if ((buf[1] & 0x80) == 0x00)
wake_up_interruptible(&ca->wait_queue);
- }
+
status = bytes_read;
exit:
return status;
}
-
/**
* dvb_ca_en50221_write_data - This function talks to an EN50221 CAM control
* interface. It writes a buffer of data to a CAM.
@@ -752,21 +793,28 @@ exit:
static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
u8 *buf, int bytes_write)
{
+ struct dvb_ca_slot *sl = &ca->slot_info[slot];
int status;
int i;
dprintk("%s\n", __func__);
-
/* sanity check */
- if (bytes_write > ca->slot_info[slot].link_buf_size)
+ if (bytes_write > sl->link_buf_size)
return -EINVAL;
- /* it is possible we are dealing with a single buffer implementation,
- thus if there is data available for read or if there is even a read
- already in progress, we do nothing but awake the kernel thread to
- process the data if necessary. */
- if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0)
+ if (ca->pub->write_data &&
+ (sl->slot_state != DVB_CA_SLOTSTATE_LINKINIT))
+ return ca->pub->write_data(ca->pub, slot, buf, bytes_write);
+
+ /*
+ * it is possible we are dealing with a single buffer implementation,
+ * thus if there is data available for read or if there is even a read
+ * already in progress, we do nothing but awake the kernel thread to
+ * process the data if necessary.
+ */
+ status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
+ if (status < 0)
goto exitnowrite;
if (status & (STATUSREG_DA | STATUSREG_RE)) {
if (status & STATUSREG_DA)
@@ -777,12 +825,14 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
}
/* OK, set HC bit */
- if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND,
- IRQEN | CMDREG_HC)) != 0)
+ status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND,
+ IRQEN | CMDREG_HC);
+ if (status)
goto exit;
/* check if interface is still free */
- if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0)
+ status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
+ if (status < 0)
goto exit;
if (!(status & STATUSREG_FR)) {
/* it wasn't free => try again later */
@@ -814,23 +864,29 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
}
/* send the amount of data */
- if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH, bytes_write >> 8)) != 0)
+ status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH,
+ bytes_write >> 8);
+ if (status)
goto exit;
- if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_LOW,
- bytes_write & 0xff)) != 0)
+ status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_LOW,
+ bytes_write & 0xff);
+ if (status)
goto exit;
/* send the buffer */
for (i = 0; i < bytes_write; i++) {
- if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_DATA, buf[i])) != 0)
+ status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_DATA,
+ buf[i]);
+ if (status)
goto exit;
}
/* check for write error (WE should now be 0) */
- if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0)
+ status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
+ if (status < 0)
goto exit;
if (status & STATUSREG_WE) {
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
+ sl->slot_state = DVB_CA_SLOTSTATE_LINKINIT;
status = -EIO;
goto exit;
}
@@ -846,12 +902,9 @@ exitnowrite:
return status;
}
-
-
-/* ******************************************************************************** */
+/* ************************************************************************** */
/* EN50221 higher level functions */
-
/**
* dvb_ca_en50221_slot_shutdown - A CAM has been removed => shut it down.
*
@@ -865,8 +918,10 @@ static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot)
ca->pub->slot_shutdown(ca->pub, slot);
ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE;
- /* need to wake up all processes to check if they're now
- trying to write to a defunct CAM */
+ /*
+ * need to wake up all processes to check if they're now trying to
+ * write to a defunct CAM
+ */
wake_up_interruptible(&ca->wait_queue);
dprintk("Slot %i shutdown\n", slot);
@@ -875,7 +930,6 @@ static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot)
return 0;
}
-
/**
* dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred.
*
@@ -883,9 +937,11 @@ static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot)
* @slot: Slot concerned.
* @change_type: One of the DVB_CA_CAMCHANGE_* values.
*/
-void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot, int change_type)
+void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot,
+ int change_type)
{
struct dvb_ca_private *ca = pubca->private;
+ struct dvb_ca_slot *sl = &ca->slot_info[slot];
dprintk("CAMCHANGE IRQ slot:%i change_type:%i\n", slot, change_type);
@@ -898,13 +954,12 @@ void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot, int ch
return;
}
- ca->slot_info[slot].camchange_type = change_type;
- atomic_inc(&ca->slot_info[slot].camchange_count);
+ sl->camchange_type = change_type;
+ atomic_inc(&sl->camchange_count);
dvb_ca_en50221_thread_wakeup(ca);
}
EXPORT_SYMBOL(dvb_ca_en50221_camchange_irq);
-
/**
* dvb_ca_en50221_camready_irq - A CAMREADY IRQ has occurred.
*
@@ -914,17 +969,17 @@ EXPORT_SYMBOL(dvb_ca_en50221_camchange_irq);
void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot)
{
struct dvb_ca_private *ca = pubca->private;
+ struct dvb_ca_slot *sl = &ca->slot_info[slot];
dprintk("CAMREADY IRQ slot:%i\n", slot);
- if (ca->slot_info[slot].slot_state == DVB_CA_SLOTSTATE_WAITREADY) {
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_VALIDATE;
+ if (sl->slot_state == DVB_CA_SLOTSTATE_WAITREADY) {
+ sl->slot_state = DVB_CA_SLOTSTATE_VALIDATE;
dvb_ca_en50221_thread_wakeup(ca);
}
}
EXPORT_SYMBOL(dvb_ca_en50221_camready_irq);
-
/**
* dvb_ca_en50221_frda_irq - An FR or DA IRQ has occurred.
*
@@ -934,16 +989,17 @@ EXPORT_SYMBOL(dvb_ca_en50221_camready_irq);
void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *pubca, int slot)
{
struct dvb_ca_private *ca = pubca->private;
+ struct dvb_ca_slot *sl = &ca->slot_info[slot];
int flags;
dprintk("FR/DA IRQ slot:%i\n", slot);
- switch (ca->slot_info[slot].slot_state) {
+ switch (sl->slot_state) {
case DVB_CA_SLOTSTATE_LINKINIT:
flags = ca->pub->read_cam_control(pubca, slot, CTRLIF_STATUS);
if (flags & STATUSREG_DA) {
dprintk("CAM supports DA IRQ\n");
- ca->slot_info[slot].da_irq_supported = 1;
+ sl->da_irq_supported = 1;
}
break;
@@ -955,8 +1011,7 @@ void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *pubca, int slot)
}
EXPORT_SYMBOL(dvb_ca_en50221_frda_irq);
-
-/* ******************************************************************************** */
+/* ************************************************************************** */
/* EN50221 thread functions */
/**
@@ -966,7 +1021,6 @@ EXPORT_SYMBOL(dvb_ca_en50221_frda_irq);
*/
static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca)
{
-
dprintk("%s\n", __func__);
ca->wakeup = 1;
@@ -985,11 +1039,14 @@ static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca)
int curdelay = 100000000;
int slot;
- /* Beware of too high polling frequency, because one polling
+ /*
+ * Beware of too high polling frequency, because one polling
* call might take several hundred milliseconds until timeout!
*/
for (slot = 0; slot < ca->slot_count; slot++) {
- switch (ca->slot_info[slot].slot_state) {
+ struct dvb_ca_slot *sl = &ca->slot_info[slot];
+
+ switch (sl->slot_state) {
default:
case DVB_CA_SLOTSTATE_NONE:
delay = HZ * 60; /* 60s */
@@ -1015,7 +1072,7 @@ static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca)
if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE))
delay = HZ / 10; /* 100ms */
if (ca->open) {
- if ((!ca->slot_info[slot].da_irq_supported) ||
+ if ((!sl->da_irq_supported) ||
(!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_DA)))
delay = HZ / 10; /* 100ms */
}
@@ -1029,214 +1086,250 @@ static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca)
ca->delay = curdelay;
}
+/**
+ * Poll if the CAM is gone.
+ *
+ * @ca: CA instance.
+ * @slot: Slot to process.
+ * @return: 0 .. no change
+ * 1 .. CAM state changed
+ */
+
+static int dvb_ca_en50221_poll_cam_gone(struct dvb_ca_private *ca, int slot)
+{
+ int changed = 0;
+ int status;
+ /*
+ * we need this extra check for annoying interfaces like the
+ * budget-av
+ */
+ if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) &&
+ (ca->pub->poll_slot_status)) {
+ status = ca->pub->poll_slot_status(ca->pub, slot, 0);
+ if (!(status &
+ DVB_CA_EN50221_POLL_CAM_PRESENT)) {
+ ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE;
+ dvb_ca_en50221_thread_update_delay(ca);
+ changed = 1;
+ }
+ }
+ return changed;
+}
/**
- * Kernel thread which monitors CA slots for CAM changes, and performs data transfers.
+ * Thread state machine for one CA slot to perform the data transfer.
+ *
+ * @ca: CA instance.
+ * @slot: Slot to process.
*/
-static int dvb_ca_en50221_thread(void *data)
+static void dvb_ca_en50221_thread_state_machine(struct dvb_ca_private *ca,
+ int slot)
{
- struct dvb_ca_private *ca = data;
- int slot;
+ struct dvb_ca_slot *sl = &ca->slot_info[slot];
int flags;
- int status;
int pktcount;
void *rxbuf;
- dprintk("%s\n", __func__);
+ mutex_lock(&sl->slot_lock);
- /* choose the correct initial delay */
- dvb_ca_en50221_thread_update_delay(ca);
+ /* check the cam status + deal with CAMCHANGEs */
+ while (dvb_ca_en50221_check_camstatus(ca, slot)) {
+ /* clear down an old CI slot if necessary */
+ if (sl->slot_state != DVB_CA_SLOTSTATE_NONE)
+ dvb_ca_en50221_slot_shutdown(ca, slot);
- /* main loop */
- while (!kthread_should_stop()) {
- /* sleep for a bit */
- if (!ca->wakeup) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(ca->delay);
- if (kthread_should_stop())
- return 0;
+ /* if a CAM is NOW present, initialise it */
+ if (sl->camchange_type == DVB_CA_EN50221_CAMCHANGE_INSERTED)
+ sl->slot_state = DVB_CA_SLOTSTATE_UNINITIALISED;
+
+ /* we've handled one CAMCHANGE */
+ dvb_ca_en50221_thread_update_delay(ca);
+ atomic_dec(&sl->camchange_count);
+ }
+
+ /* CAM state machine */
+ switch (sl->slot_state) {
+ case DVB_CA_SLOTSTATE_NONE:
+ case DVB_CA_SLOTSTATE_INVALID:
+ /* no action needed */
+ break;
+
+ case DVB_CA_SLOTSTATE_UNINITIALISED:
+ sl->slot_state = DVB_CA_SLOTSTATE_WAITREADY;
+ ca->pub->slot_reset(ca->pub, slot);
+ sl->timeout = jiffies + (INIT_TIMEOUT_SECS * HZ);
+ break;
+
+ case DVB_CA_SLOTSTATE_WAITREADY:
+ if (time_after(jiffies, sl->timeout)) {
+ pr_err("dvb_ca adaptor %d: PC card did not respond :(\n",
+ ca->dvbdev->adapter->num);
+ sl->slot_state = DVB_CA_SLOTSTATE_INVALID;
+ dvb_ca_en50221_thread_update_delay(ca);
+ break;
}
- ca->wakeup = 0;
+ /*
+ * no other action needed; will automatically change state when
+ * ready
+ */
+ break;
- /* go through all the slots processing them */
- for (slot = 0; slot < ca->slot_count; slot++) {
+ case DVB_CA_SLOTSTATE_VALIDATE:
+ if (dvb_ca_en50221_parse_attributes(ca, slot) != 0) {
+ if (dvb_ca_en50221_poll_cam_gone(ca, slot))
+ break;
- mutex_lock(&ca->slot_info[slot].slot_lock);
+ pr_err("dvb_ca adapter %d: Invalid PC card inserted :(\n",
+ ca->dvbdev->adapter->num);
+ sl->slot_state = DVB_CA_SLOTSTATE_INVALID;
+ dvb_ca_en50221_thread_update_delay(ca);
+ break;
+ }
+ if (dvb_ca_en50221_set_configoption(ca, slot) != 0) {
+ pr_err("dvb_ca adapter %d: Unable to initialise CAM :(\n",
+ ca->dvbdev->adapter->num);
+ sl->slot_state = DVB_CA_SLOTSTATE_INVALID;
+ dvb_ca_en50221_thread_update_delay(ca);
+ break;
+ }
+ if (ca->pub->write_cam_control(ca->pub, slot,
+ CTRLIF_COMMAND,
+ CMDREG_RS) != 0) {
+ pr_err("dvb_ca adapter %d: Unable to reset CAM IF\n",
+ ca->dvbdev->adapter->num);
+ sl->slot_state = DVB_CA_SLOTSTATE_INVALID;
+ dvb_ca_en50221_thread_update_delay(ca);
+ break;
+ }
+ dprintk("DVB CAM validated successfully\n");
- // check the cam status + deal with CAMCHANGEs
- while (dvb_ca_en50221_check_camstatus(ca, slot)) {
- /* clear down an old CI slot if necessary */
- if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_NONE)
- dvb_ca_en50221_slot_shutdown(ca, slot);
+ sl->timeout = jiffies + (INIT_TIMEOUT_SECS * HZ);
+ sl->slot_state = DVB_CA_SLOTSTATE_WAITFR;
+ ca->wakeup = 1;
+ break;
- /* if a CAM is NOW present, initialise it */
- if (ca->slot_info[slot].camchange_type == DVB_CA_EN50221_CAMCHANGE_INSERTED) {
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_UNINITIALISED;
- }
+ case DVB_CA_SLOTSTATE_WAITFR:
+ if (time_after(jiffies, sl->timeout)) {
+ pr_err("dvb_ca adapter %d: DVB CAM did not respond :(\n",
+ ca->dvbdev->adapter->num);
+ sl->slot_state = DVB_CA_SLOTSTATE_INVALID;
+ dvb_ca_en50221_thread_update_delay(ca);
+ break;
+ }
- /* we've handled one CAMCHANGE */
- dvb_ca_en50221_thread_update_delay(ca);
- atomic_dec(&ca->slot_info[slot].camchange_count);
- }
+ flags = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
+ if (flags & STATUSREG_FR) {
+ sl->slot_state = DVB_CA_SLOTSTATE_LINKINIT;
+ ca->wakeup = 1;
+ }
+ break;
- // CAM state machine
- switch (ca->slot_info[slot].slot_state) {
- case DVB_CA_SLOTSTATE_NONE:
- case DVB_CA_SLOTSTATE_INVALID:
- // no action needed
+ case DVB_CA_SLOTSTATE_LINKINIT:
+ if (dvb_ca_en50221_link_init(ca, slot) != 0) {
+ if (dvb_ca_en50221_poll_cam_gone(ca, slot))
break;
- case DVB_CA_SLOTSTATE_UNINITIALISED:
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_WAITREADY;
- ca->pub->slot_reset(ca->pub, slot);
- ca->slot_info[slot].timeout = jiffies + (INIT_TIMEOUT_SECS * HZ);
- break;
+ pr_err("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n",
+ ca->dvbdev->adapter->num);
+ sl->slot_state = DVB_CA_SLOTSTATE_UNINITIALISED;
+ dvb_ca_en50221_thread_update_delay(ca);
+ break;
+ }
- case DVB_CA_SLOTSTATE_WAITREADY:
- if (time_after(jiffies, ca->slot_info[slot].timeout)) {
- pr_err("dvb_ca adaptor %d: PC card did not respond :(\n",
- ca->dvbdev->adapter->num);
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
- dvb_ca_en50221_thread_update_delay(ca);
- break;
- }
- // no other action needed; will automatically change state when ready
+ if (!sl->rx_buffer.data) {
+ rxbuf = vmalloc(RX_BUFFER_SIZE);
+ if (!rxbuf) {
+ pr_err("dvb_ca adapter %d: Unable to allocate CAM rx buffer :(\n",
+ ca->dvbdev->adapter->num);
+ sl->slot_state = DVB_CA_SLOTSTATE_INVALID;
+ dvb_ca_en50221_thread_update_delay(ca);
break;
+ }
+ dvb_ringbuffer_init(&sl->rx_buffer, rxbuf,
+ RX_BUFFER_SIZE);
+ }
- case DVB_CA_SLOTSTATE_VALIDATE:
- if (dvb_ca_en50221_parse_attributes(ca, slot) != 0) {
- /* we need this extra check for annoying interfaces like the budget-av */
- if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) &&
- (ca->pub->poll_slot_status)) {
- status = ca->pub->poll_slot_status(ca->pub, slot, 0);
- if (!(status & DVB_CA_EN50221_POLL_CAM_PRESENT)) {
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE;
- dvb_ca_en50221_thread_update_delay(ca);
- break;
- }
- }
-
- pr_err("dvb_ca adapter %d: Invalid PC card inserted :(\n",
- ca->dvbdev->adapter->num);
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
- dvb_ca_en50221_thread_update_delay(ca);
- break;
- }
- if (dvb_ca_en50221_set_configoption(ca, slot) != 0) {
- pr_err("dvb_ca adapter %d: Unable to initialise CAM :(\n",
- ca->dvbdev->adapter->num);
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
- dvb_ca_en50221_thread_update_delay(ca);
- break;
- }
- if (ca->pub->write_cam_control(ca->pub, slot,
- CTRLIF_COMMAND, CMDREG_RS) != 0) {
- pr_err("dvb_ca adapter %d: Unable to reset CAM IF\n",
- ca->dvbdev->adapter->num);
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
- dvb_ca_en50221_thread_update_delay(ca);
- break;
- }
- dprintk("DVB CAM validated successfully\n");
-
- ca->slot_info[slot].timeout = jiffies + (INIT_TIMEOUT_SECS * HZ);
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_WAITFR;
- ca->wakeup = 1;
- break;
+ ca->pub->slot_ts_enable(ca->pub, slot);
+ sl->slot_state = DVB_CA_SLOTSTATE_RUNNING;
+ dvb_ca_en50221_thread_update_delay(ca);
+ pr_err("dvb_ca adapter %d: DVB CAM detected and initialised successfully\n",
+ ca->dvbdev->adapter->num);
+ break;
- case DVB_CA_SLOTSTATE_WAITFR:
- if (time_after(jiffies, ca->slot_info[slot].timeout)) {
- pr_err("dvb_ca adapter %d: DVB CAM did not respond :(\n",
- ca->dvbdev->adapter->num);
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
- dvb_ca_en50221_thread_update_delay(ca);
- break;
- }
-
- flags = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
- if (flags & STATUSREG_FR) {
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
- ca->wakeup = 1;
- }
- break;
+ case DVB_CA_SLOTSTATE_RUNNING:
+ if (!ca->open)
+ break;
- case DVB_CA_SLOTSTATE_LINKINIT:
- if (dvb_ca_en50221_link_init(ca, slot) != 0) {
- /* we need this extra check for annoying interfaces like the budget-av */
- if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) &&
- (ca->pub->poll_slot_status)) {
- status = ca->pub->poll_slot_status(ca->pub, slot, 0);
- if (!(status & DVB_CA_EN50221_POLL_CAM_PRESENT)) {
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE;
- dvb_ca_en50221_thread_update_delay(ca);
- break;
- }
- }
-
- pr_err("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n",
- ca->dvbdev->adapter->num);
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
- dvb_ca_en50221_thread_update_delay(ca);
- break;
- }
-
- if (ca->slot_info[slot].rx_buffer.data == NULL) {
- rxbuf = vmalloc(RX_BUFFER_SIZE);
- if (rxbuf == NULL) {
- pr_err("dvb_ca adapter %d: Unable to allocate CAM rx buffer :(\n",
- ca->dvbdev->adapter->num);
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
- dvb_ca_en50221_thread_update_delay(ca);
- break;
- }
- dvb_ringbuffer_init(&ca->slot_info[slot].rx_buffer, rxbuf, RX_BUFFER_SIZE);
- }
-
- ca->pub->slot_ts_enable(ca->pub, slot);
- ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_RUNNING;
- dvb_ca_en50221_thread_update_delay(ca);
- pr_err("dvb_ca adapter %d: DVB CAM detected and initialised successfully\n",
- ca->dvbdev->adapter->num);
+ /* poll slots for data */
+ pktcount = 0;
+ while (dvb_ca_en50221_read_data(ca, slot, NULL, 0) > 0) {
+ if (!ca->open)
break;
- case DVB_CA_SLOTSTATE_RUNNING:
- if (!ca->open)
- break;
-
- // poll slots for data
- pktcount = 0;
- while ((status = dvb_ca_en50221_read_data(ca, slot, NULL, 0)) > 0) {
- if (!ca->open)
- break;
-
- /* if a CAMCHANGE occurred at some point, do not do any more processing of this slot */
- if (dvb_ca_en50221_check_camstatus(ca, slot)) {
- // we dont want to sleep on the next iteration so we can handle the cam change
- ca->wakeup = 1;
- break;
- }
-
- /* check if we've hit our limit this time */
- if (++pktcount >= MAX_RX_PACKETS_PER_ITERATION) {
- // dont sleep; there is likely to be more data to read
- ca->wakeup = 1;
- break;
- }
- }
+ /*
+ * if a CAMCHANGE occurred at some point, do not do any
+ * more processing of this slot
+ */
+ if (dvb_ca_en50221_check_camstatus(ca, slot)) {
+ /*
+ * we don't want to sleep on the next iteration
+ * so we can handle the cam change
+ */
+ ca->wakeup = 1;
break;
}
- mutex_unlock(&ca->slot_info[slot].slot_lock);
+ /* check if we've hit our limit this time */
+ if (++pktcount >= MAX_RX_PACKETS_PER_ITERATION) {
+ /*
+ * don't sleep; there is likely to be more data
+ * to read
+ */
+ ca->wakeup = 1;
+ break;
+ }
}
+ break;
}
- return 0;
+ mutex_unlock(&sl->slot_lock);
}
+/**
+ * Kernel thread which monitors CA slots for CAM changes, and performs data
+ * transfers.
+ */
+static int dvb_ca_en50221_thread(void *data)
+{
+ struct dvb_ca_private *ca = data;
+ int slot;
+
+ dprintk("%s\n", __func__);
+ /* choose the correct initial delay */
+ dvb_ca_en50221_thread_update_delay(ca);
-/* ******************************************************************************** */
+ /* main loop */
+ while (!kthread_should_stop()) {
+ /* sleep for a bit */
+ if (!ca->wakeup) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(ca->delay);
+ if (kthread_should_stop())
+ return 0;
+ }
+ ca->wakeup = 0;
+
+ /* go through all the slots processing them */
+ for (slot = 0; slot < ca->slot_count; slot++)
+ dvb_ca_en50221_thread_state_machine(ca, slot);
+ }
+
+ return 0;
+}
+
+/* ************************************************************************** */
/* EN50221 IO interface functions */
/**
@@ -1266,15 +1359,17 @@ static int dvb_ca_en50221_io_do_ioctl(struct file *file,
switch (cmd) {
case CA_RESET:
for (slot = 0; slot < ca->slot_count; slot++) {
- mutex_lock(&ca->slot_info[slot].slot_lock);
- if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_NONE) {
+ struct dvb_ca_slot *sl = &ca->slot_info[slot];
+
+ mutex_lock(&sl->slot_lock);
+ if (sl->slot_state != DVB_CA_SLOTSTATE_NONE) {
dvb_ca_en50221_slot_shutdown(ca, slot);
if (ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)
dvb_ca_en50221_camchange_irq(ca->pub,
slot,
DVB_CA_EN50221_CAMCHANGE_INSERTED);
}
- mutex_unlock(&ca->slot_info[slot].slot_lock);
+ mutex_unlock(&sl->slot_lock);
}
ca->next_read_slot = 0;
dvb_ca_en50221_thread_wakeup(ca);
@@ -1292,21 +1387,23 @@ static int dvb_ca_en50221_io_do_ioctl(struct file *file,
case CA_GET_SLOT_INFO: {
struct ca_slot_info *info = parg;
+ struct dvb_ca_slot *sl;
- if ((info->num > ca->slot_count) || (info->num < 0)) {
+ slot = info->num;
+ if ((slot > ca->slot_count) || (slot < 0)) {
err = -EINVAL;
goto out_unlock;
}
info->type = CA_CI_LINK;
info->flags = 0;
- if ((ca->slot_info[info->num].slot_state != DVB_CA_SLOTSTATE_NONE)
- && (ca->slot_info[info->num].slot_state != DVB_CA_SLOTSTATE_INVALID)) {
+ sl = &ca->slot_info[slot];
+ if ((sl->slot_state != DVB_CA_SLOTSTATE_NONE) &&
+ (sl->slot_state != DVB_CA_SLOTSTATE_INVALID)) {
info->flags = CA_CI_MODULE_PRESENT;
}
- if (ca->slot_info[info->num].slot_state == DVB_CA_SLOTSTATE_RUNNING) {
+ if (sl->slot_state == DVB_CA_SLOTSTATE_RUNNING)
info->flags |= CA_CI_MODULE_READY;
- }
break;
}
@@ -1320,7 +1417,6 @@ out_unlock:
return err;
}
-
/**
* Wrapper for ioctl implementation.
*
@@ -1337,7 +1433,6 @@ static long dvb_ca_en50221_io_ioctl(struct file *file,
return dvb_usercopy(file, cmd, arg, dvb_ca_en50221_io_do_ioctl);
}
-
/**
* Implementation of write() syscall.
*
@@ -1354,6 +1449,7 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_ca_private *ca = dvbdev->priv;
+ struct dvb_ca_slot *sl;
u8 slot, connection_id;
int status;
u8 fragbuf[HOST_LINK_BUF_SIZE];
@@ -1364,7 +1460,10 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
dprintk("%s\n", __func__);
- /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
+ /*
+ * Incoming packet has a 2 byte header.
+ * hdr[0] = slot_id, hdr[1] = connection_id
+ */
if (count < 2)
return -EINVAL;
@@ -1375,14 +1474,15 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
return -EFAULT;
buf += 2;
count -= 2;
+ sl = &ca->slot_info[slot];
/* check if the slot is actually running */
- if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_RUNNING)
+ if (sl->slot_state != DVB_CA_SLOTSTATE_RUNNING)
return -EINVAL;
/* fragment the packets & store in the buffer */
while (fragpos < count) {
- fraglen = ca->slot_info[slot].link_buf_size - 2;
+ fraglen = sl->link_buf_size - 2;
if (fraglen < 0)
break;
if (fraglen > HOST_LINK_BUF_SIZE - 2)
@@ -1401,15 +1501,19 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
timeout = jiffies + HZ / 2;
written = 0;
while (!time_after(jiffies, timeout)) {
- /* check the CAM hasn't been removed/reset in the meantime */
- if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_RUNNING) {
+ /*
+ * check the CAM hasn't been removed/reset in the
+ * meantime
+ */
+ if (sl->slot_state != DVB_CA_SLOTSTATE_RUNNING) {
status = -EIO;
goto exit;
}
- mutex_lock(&ca->slot_info[slot].slot_lock);
- status = dvb_ca_en50221_write_data(ca, slot, fragbuf, fraglen + 2);
- mutex_unlock(&ca->slot_info[slot].slot_lock);
+ mutex_lock(&sl->slot_lock);
+ status = dvb_ca_en50221_write_data(ca, slot, fragbuf,
+ fraglen + 2);
+ mutex_unlock(&sl->slot_lock);
if (status == (fraglen + 2)) {
written = 1;
break;
@@ -1417,7 +1521,7 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
if (status != -EAGAIN)
goto exit;
- msleep(1);
+ usleep_range(1000, 1100);
}
if (!written) {
status = -EIO;
@@ -1432,7 +1536,6 @@ exit:
return status;
}
-
/**
* Condition for waking up in dvb_ca_en50221_io_read_condition
*/
@@ -1449,25 +1552,28 @@ static int dvb_ca_en50221_io_read_condition(struct dvb_ca_private *ca,
slot = ca->next_read_slot;
while ((slot_count < ca->slot_count) && (!found)) {
- if (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_RUNNING)
+ struct dvb_ca_slot *sl = &ca->slot_info[slot];
+
+ if (sl->slot_state != DVB_CA_SLOTSTATE_RUNNING)
goto nextslot;
- if (ca->slot_info[slot].rx_buffer.data == NULL) {
+ if (!sl->rx_buffer.data)
return 0;
- }
- idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, -1, &fraglen);
+ idx = dvb_ringbuffer_pkt_next(&sl->rx_buffer, -1, &fraglen);
while (idx != -1) {
- dvb_ringbuffer_pkt_read(&ca->slot_info[slot].rx_buffer, idx, 0, hdr, 2);
+ dvb_ringbuffer_pkt_read(&sl->rx_buffer, idx, 0, hdr, 2);
if (connection_id == -1)
connection_id = hdr[0];
- if ((hdr[0] == connection_id) && ((hdr[1] & 0x80) == 0)) {
+ if ((hdr[0] == connection_id) &&
+ ((hdr[1] & 0x80) == 0)) {
*_slot = slot;
found = 1;
break;
}
- idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, idx, &fraglen);
+ idx = dvb_ringbuffer_pkt_next(&sl->rx_buffer, idx,
+ &fraglen);
}
nextslot:
@@ -1479,7 +1585,6 @@ nextslot:
return found;
}
-
/**
* Implementation of read() syscall.
*
@@ -1495,6 +1600,7 @@ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf,
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_ca_private *ca = dvbdev->priv;
+ struct dvb_ca_slot *sl;
int status;
int result = 0;
u8 hdr[2];
@@ -1508,13 +1614,16 @@ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf,
dprintk("%s\n", __func__);
- /* Outgoing packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
+ /*
+ * Outgoing packet has a 2 byte header.
+ * hdr[0] = slot_id, hdr[1] = connection_id
+ */
if (count < 2)
return -EINVAL;
/* wait for some data */
- if ((status = dvb_ca_en50221_io_read_condition(ca, &result, &slot)) == 0) {
-
+ status = dvb_ca_en50221_io_read_condition(ca, &result, &slot);
+ if (status == 0) {
/* if we're in nonblocking mode, exit immediately */
if (file->f_flags & O_NONBLOCK)
return -EWOULDBLOCK;
@@ -1530,7 +1639,8 @@ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf,
return status;
}
- idx = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, -1, &fraglen);
+ sl = &ca->slot_info[slot];
+ idx = dvb_ringbuffer_pkt_next(&sl->rx_buffer, -1, &fraglen);
pktlen = 2;
do {
if (idx == -1) {
@@ -1540,21 +1650,24 @@ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf,
goto exit;
}
- dvb_ringbuffer_pkt_read(&ca->slot_info[slot].rx_buffer, idx, 0, hdr, 2);
+ dvb_ringbuffer_pkt_read(&sl->rx_buffer, idx, 0, hdr, 2);
if (connection_id == -1)
connection_id = hdr[0];
if (hdr[0] == connection_id) {
if (pktlen < count) {
- if ((pktlen + fraglen - 2) > count) {
+ if ((pktlen + fraglen - 2) > count)
fraglen = count - pktlen;
- } else {
+ else
fraglen -= 2;
- }
- if ((status = dvb_ringbuffer_pkt_read_user(&ca->slot_info[slot].rx_buffer, idx, 2,
- buf + pktlen, fraglen)) < 0) {
+ status =
+ dvb_ringbuffer_pkt_read_user(&sl->rx_buffer,
+ idx, 2,
+ buf + pktlen,
+ fraglen);
+ if (status < 0)
goto exit;
- }
+
pktlen += fraglen;
}
@@ -1563,9 +1676,9 @@ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf,
dispose = 1;
}
- idx2 = dvb_ringbuffer_pkt_next(&ca->slot_info[slot].rx_buffer, idx, &fraglen);
+ idx2 = dvb_ringbuffer_pkt_next(&sl->rx_buffer, idx, &fraglen);
if (dispose)
- dvb_ringbuffer_pkt_dispose(&ca->slot_info[slot].rx_buffer, idx);
+ dvb_ringbuffer_pkt_dispose(&sl->rx_buffer, idx);
idx = idx2;
dispose = 0;
} while (!last_fragment);
@@ -1583,7 +1696,6 @@ exit:
return status;
}
-
/**
* Implementation of file open syscall.
*
@@ -1611,12 +1723,16 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
}
for (i = 0; i < ca->slot_count; i++) {
-
- if (ca->slot_info[i].slot_state == DVB_CA_SLOTSTATE_RUNNING) {
- if (ca->slot_info[i].rx_buffer.data != NULL) {
- /* it is safe to call this here without locks because
- * ca->open == 0. Data is not read in this case */
- dvb_ringbuffer_flush(&ca->slot_info[i].rx_buffer);
+ struct dvb_ca_slot *sl = &ca->slot_info[i];
+
+ if (sl->slot_state == DVB_CA_SLOTSTATE_RUNNING) {
+ if (!sl->rx_buffer.data) {
+ /*
+ * it is safe to call this here without locks
+ * because ca->open == 0. Data is not read in
+ * this case
+ */
+ dvb_ringbuffer_flush(&sl->rx_buffer);
}
}
}
@@ -1630,7 +1746,6 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
return 0;
}
-
/**
* Implementation of file close syscall.
*
@@ -1660,7 +1775,6 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
return err;
}
-
/**
* Implementation of poll() syscall.
*
@@ -1679,9 +1793,8 @@ static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
dprintk("%s\n", __func__);
- if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) {
+ if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1)
mask |= POLLIN;
- }
/* if there is something, return now */
if (mask)
@@ -1690,14 +1803,11 @@ static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
/* wait for something to happen */
poll_wait(file, &ca->wait_queue, wait);
- if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) {
+ if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1)
mask |= POLLIN;
- }
return mask;
}
-EXPORT_SYMBOL(dvb_ca_en50221_init);
-
static const struct file_operations dvb_ca_fops = {
.owner = THIS_MODULE,
@@ -1721,10 +1831,9 @@ static const struct dvb_device dvbdev_ca = {
.fops = &dvb_ca_fops,
};
-/* ******************************************************************************** */
+/* ************************************************************************** */
/* Initialisation/shutdown functions */
-
/**
* Initialise a new DVB CA EN50221 interface device.
*
@@ -1748,7 +1857,8 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
return -EINVAL;
/* initialise the system data */
- if ((ca = kzalloc(sizeof(struct dvb_ca_private), GFP_KERNEL)) == NULL) {
+ ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+ if (!ca) {
ret = -ENOMEM;
goto exit;
}
@@ -1756,7 +1866,9 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
ca->pub = pubca;
ca->flags = flags;
ca->slot_count = slot_count;
- if ((ca->slot_info = kcalloc(slot_count, sizeof(struct dvb_ca_slot), GFP_KERNEL)) == NULL) {
+ ca->slot_info = kcalloc(slot_count, sizeof(struct dvb_ca_slot),
+ GFP_KERNEL);
+ if (!ca->slot_info) {
ret = -ENOMEM;
goto free_ca;
}
@@ -1767,17 +1879,20 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
pubca->private = ca;
/* register the DVB device */
- ret = dvb_register_device(dvb_adapter, &ca->dvbdev, &dvbdev_ca, ca, DVB_DEVICE_CA, 0);
+ ret = dvb_register_device(dvb_adapter, &ca->dvbdev, &dvbdev_ca, ca,
+ DVB_DEVICE_CA, 0);
if (ret)
goto free_slot_info;
/* now initialise each slot */
for (i = 0; i < slot_count; i++) {
- memset(&ca->slot_info[i], 0, sizeof(struct dvb_ca_slot));
- ca->slot_info[i].slot_state = DVB_CA_SLOTSTATE_NONE;
- atomic_set(&ca->slot_info[i].camchange_count, 0);
- ca->slot_info[i].camchange_type = DVB_CA_EN50221_CAMCHANGE_REMOVED;
- mutex_init(&ca->slot_info[i].slot_lock);
+ struct dvb_ca_slot *sl = &ca->slot_info[i];
+
+ memset(sl, 0, sizeof(struct dvb_ca_slot));
+ sl->slot_state = DVB_CA_SLOTSTATE_NONE;
+ atomic_set(&sl->camchange_count, 0);
+ sl->camchange_type = DVB_CA_EN50221_CAMCHANGE_REMOVED;
+ mutex_init(&sl->slot_lock);
}
mutex_init(&ca->ioctl_mutex);
@@ -1809,9 +1924,7 @@ exit:
pubca->private = NULL;
return ret;
}
-EXPORT_SYMBOL(dvb_ca_en50221_release);
-
-
+EXPORT_SYMBOL(dvb_ca_en50221_init);
/**
* Release a DVB CA EN50221 interface device.
@@ -1829,10 +1942,11 @@ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
/* shutdown the thread if there was one */
kthread_stop(ca->thread);
- for (i = 0; i < ca->slot_count; i++) {
+ for (i = 0; i < ca->slot_count; i++)
dvb_ca_en50221_slot_shutdown(ca, i);
- }
+
dvb_remove_device(ca->dvbdev);
dvb_ca_private_put(ca);
pubca->private = NULL;
}
+EXPORT_SYMBOL(dvb_ca_en50221_release);
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.h b/drivers/media/dvb-core/dvb_ca_en50221.h
index 1e4bbbd34d91..367687d2b41a 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.h
+++ b/drivers/media/dvb-core/dvb_ca_en50221.h
@@ -41,6 +41,8 @@
* @write_attribute_mem: function for writing attribute memory on the CAM
* @read_cam_control: function for reading the control interface on the CAM
* @write_cam_control: function for reading the control interface on the CAM
+ * @read_data: function for reading data (block mode)
+ * @write_data: function for writing data (block mode)
* @slot_reset: function to reset the CAM slot
* @slot_shutdown: function to shutdown a CAM slot
* @slot_ts_enable: function to enable the Transport Stream on a CAM slot
@@ -66,6 +68,11 @@ struct dvb_ca_en50221 {
int (*write_cam_control)(struct dvb_ca_en50221 *ca,
int slot, u8 address, u8 value);
+ int (*read_data)(struct dvb_ca_en50221 *ca,
+ int slot, u8 *ebuf, int ecount);
+ int (*write_data)(struct dvb_ca_en50221 *ca,
+ int slot, u8 *ebuf, int ecount);
+
int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot);
int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot);
int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot);
@@ -121,8 +128,8 @@ void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *ca, int slot);
*
* @return 0 on success, nonzero on failure
*/
-extern int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
- struct dvb_ca_en50221 *ca, int flags,
+int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
+ struct dvb_ca_en50221 *ca, int flags,
int slot_count);
/**
@@ -130,6 +137,6 @@ extern int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
*
* @ca: The associated dvb_ca instance.
*/
-extern void dvb_ca_en50221_release(struct dvb_ca_en50221 *ca);
+void dvb_ca_en50221_release(struct dvb_ca_en50221 *ca);
#endif
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index e3fff8f64d37..2fcba1616168 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -460,7 +460,7 @@ static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wra
static void dvb_frontend_swzigzag(struct dvb_frontend *fe)
{
- enum fe_status s = 0;
+ enum fe_status s = FE_NONE;
int retval = 0;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache, tmp;
@@ -631,7 +631,7 @@ static int dvb_frontend_thread(void *data)
struct dvb_frontend *fe = data;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct dvb_frontend_private *fepriv = fe->frontend_priv;
- enum fe_status s;
+ enum fe_status s = FE_NONE;
enum dvbfe_algo algo;
bool re_tune = false;
bool semheld = false;
@@ -1000,6 +1000,17 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
.buffer = b \
}
+struct dtv_cmds_h {
+ char *name; /* A display name for debugging purposes */
+
+ __u32 cmd; /* A unique ID */
+
+ /* Flags */
+ __u32 set:1; /* Either a set or get property */
+ __u32 buffer:1; /* Does this property use the buffer? */
+ __u32 reserved:30; /* Align */
+};
+
static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
_DTV_CMD(DTV_TUNE, 1, 0),
_DTV_CMD(DTV_CLEAR, 1, 0),
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 3a260b82b3e8..2631d0e0a024 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -28,6 +28,15 @@ config DVB_STV090x
DVB-S/S2/DSS Multistandard Professional/Broadcast demodulators.
Say Y when you want to support these frontends.
+config DVB_STV0910
+ tristate "STV0910 based"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ ST STV0910 DVB-S/S2 demodulator driver.
+
+ Say Y when you want to support these frontends.
+
config DVB_STV6110x
tristate "STV6110/(A) based tuners"
depends on DVB_CORE && I2C
@@ -35,6 +44,24 @@ config DVB_STV6110x
help
A Silicon tuner that supports DVB-S and DVB-S2 modes
+config DVB_STV6111
+ tristate "STV6111 based tuners"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ A Silicon tuner that supports DVB-S and DVB-S2 modes
+
+ Say Y when you want to support these frontends.
+
+config DVB_MXL5XX
+ tristate "MaxLinear MxL5xx based tuner-demodulators"
+ depends on DVB_CORE && I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ MaxLinear MxL5xx family of DVB-S/S2 tuners/demodulators.
+
+ Say Y when you want to support these frontends.
+
config DVB_M88DS3103
tristate "Montage Technology M88DS3103"
depends on DVB_CORE && I2C && I2C_MUX
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index 3fccaf34ef52..f45f6a4a4371 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -110,6 +110,9 @@ obj-$(CONFIG_DVB_CXD2820R) += cxd2820r.o
obj-$(CONFIG_DVB_CXD2841ER) += cxd2841er.o
obj-$(CONFIG_DVB_DRXK) += drxk.o
obj-$(CONFIG_DVB_TDA18271C2DD) += tda18271c2dd.o
+obj-$(CONFIG_DVB_STV0910) += stv0910.o
+obj-$(CONFIG_DVB_STV6111) += stv6111.o
+obj-$(CONFIG_DVB_MXL5XX) += mxl5xx.o
obj-$(CONFIG_DVB_SI2165) += si2165.o
obj-$(CONFIG_DVB_A8293) += a8293.o
obj-$(CONFIG_DVB_SP2) += sp2.o
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index 4ae3d922a8e8..1d59d1d3bd82 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -1032,7 +1032,7 @@ static u32 cx24123_tuner_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm cx24123_tuner_i2c_algo = {
+static const struct i2c_algorithm cx24123_tuner_i2c_algo = {
.master_xfer = cx24123_tuner_i2c_tuner_xfer,
.functionality = cx24123_tuner_i2c_func,
};
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 08f67d60a7d9..48ee9bc00c06 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -487,6 +487,8 @@ static int cxd2841er_sleep_tc_to_shutdown(struct cxd2841er_priv *priv);
static int cxd2841er_shutdown_to_sleep_tc(struct cxd2841er_priv *priv);
+static int cxd2841er_sleep_tc(struct dvb_frontend *fe);
+
static int cxd2841er_retune_active(struct cxd2841er_priv *priv,
struct dtv_frontend_properties *p)
{
@@ -2178,42 +2180,42 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
u32 iffreq, ifhz;
u8 data[MAX_WRITE_REGSIZE];
- const uint8_t nominalRate8bw[3][5] = {
+ static const uint8_t nominalRate8bw[3][5] = {
/* TRCG Nominal Rate [37:0] */
{0x11, 0xF0, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */
{0x15, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */
{0x11, 0xF0, 0x00, 0x00, 0x00} /* 41MHz XTal */
};
- const uint8_t nominalRate7bw[3][5] = {
+ static const uint8_t nominalRate7bw[3][5] = {
/* TRCG Nominal Rate [37:0] */
{0x14, 0x80, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */
{0x18, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */
{0x14, 0x80, 0x00, 0x00, 0x00} /* 41MHz XTal */
};
- const uint8_t nominalRate6bw[3][5] = {
+ static const uint8_t nominalRate6bw[3][5] = {
/* TRCG Nominal Rate [37:0] */
{0x17, 0xEA, 0xAA, 0xAA, 0xAA}, /* 20.5MHz XTal */
{0x1C, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */
{0x17, 0xEA, 0xAA, 0xAA, 0xAA} /* 41MHz XTal */
};
- const uint8_t nominalRate5bw[3][5] = {
+ static const uint8_t nominalRate5bw[3][5] = {
/* TRCG Nominal Rate [37:0] */
{0x1C, 0xB3, 0x33, 0x33, 0x33}, /* 20.5MHz XTal */
{0x21, 0x99, 0x99, 0x99, 0x99}, /* 24MHz XTal */
{0x1C, 0xB3, 0x33, 0x33, 0x33} /* 41MHz XTal */
};
- const uint8_t nominalRate17bw[3][5] = {
+ static const uint8_t nominalRate17bw[3][5] = {
/* TRCG Nominal Rate [37:0] */
{0x58, 0xE2, 0xAF, 0xE0, 0xBC}, /* 20.5MHz XTal */
{0x68, 0x0F, 0xA2, 0x32, 0xD0}, /* 24MHz XTal */
{0x58, 0xE2, 0xAF, 0xE0, 0xBC} /* 41MHz XTal */
};
- const uint8_t itbCoef8bw[3][14] = {
+ static const uint8_t itbCoef8bw[3][14] = {
{0x26, 0xAF, 0x06, 0xCD, 0x13, 0xBB, 0x28, 0xBA,
0x23, 0xA9, 0x1F, 0xA8, 0x2C, 0xC8}, /* 20.5MHz XTal */
{0x2F, 0xBA, 0x28, 0x9B, 0x28, 0x9D, 0x28, 0xA1,
@@ -2222,7 +2224,7 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
0x23, 0xA9, 0x1F, 0xA8, 0x2C, 0xC8} /* 41MHz XTal */
};
- const uint8_t itbCoef7bw[3][14] = {
+ static const uint8_t itbCoef7bw[3][14] = {
{0x2C, 0xBD, 0x02, 0xCF, 0x04, 0xF8, 0x23, 0xA6,
0x29, 0xB0, 0x26, 0xA9, 0x21, 0xA5}, /* 20.5MHz XTal */
{0x30, 0xB1, 0x29, 0x9A, 0x28, 0x9C, 0x28, 0xA0,
@@ -2231,7 +2233,7 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
0x29, 0xB0, 0x26, 0xA9, 0x21, 0xA5} /* 41MHz XTal */
};
- const uint8_t itbCoef6bw[3][14] = {
+ static const uint8_t itbCoef6bw[3][14] = {
{0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8,
0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4}, /* 20.5MHz XTal */
{0x31, 0xA8, 0x29, 0x9B, 0x27, 0x9C, 0x28, 0x9E,
@@ -2240,7 +2242,7 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4} /* 41MHz XTal */
};
- const uint8_t itbCoef5bw[3][14] = {
+ static const uint8_t itbCoef5bw[3][14] = {
{0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8,
0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4}, /* 20.5MHz XTal */
{0x31, 0xA8, 0x29, 0x9B, 0x27, 0x9C, 0x28, 0x9E,
@@ -2249,7 +2251,7 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4} /* 41MHz XTal */
};
- const uint8_t itbCoef17bw[3][14] = {
+ static const uint8_t itbCoef17bw[3][14] = {
{0x25, 0xA0, 0x36, 0x8D, 0x2E, 0x94, 0x28, 0x9B,
0x32, 0x90, 0x2C, 0x9D, 0x29, 0x99}, /* 20.5MHz XTal */
{0x33, 0x8E, 0x2B, 0x97, 0x2D, 0x95, 0x37, 0x8B,
@@ -2423,32 +2425,32 @@ static int cxd2841er_sleep_tc_to_active_t_band(
{
u8 data[MAX_WRITE_REGSIZE];
u32 iffreq, ifhz;
- u8 nominalRate8bw[3][5] = {
+ static const u8 nominalRate8bw[3][5] = {
/* TRCG Nominal Rate [37:0] */
{0x11, 0xF0, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */
{0x15, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */
{0x11, 0xF0, 0x00, 0x00, 0x00} /* 41MHz XTal */
};
- u8 nominalRate7bw[3][5] = {
+ static const u8 nominalRate7bw[3][5] = {
/* TRCG Nominal Rate [37:0] */
{0x14, 0x80, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */
{0x18, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */
{0x14, 0x80, 0x00, 0x00, 0x00} /* 41MHz XTal */
};
- u8 nominalRate6bw[3][5] = {
+ static const u8 nominalRate6bw[3][5] = {
/* TRCG Nominal Rate [37:0] */
{0x17, 0xEA, 0xAA, 0xAA, 0xAA}, /* 20.5MHz XTal */
{0x1C, 0x00, 0x00, 0x00, 0x00}, /* 24MHz XTal */
{0x17, 0xEA, 0xAA, 0xAA, 0xAA} /* 41MHz XTal */
};
- u8 nominalRate5bw[3][5] = {
+ static const u8 nominalRate5bw[3][5] = {
/* TRCG Nominal Rate [37:0] */
{0x1C, 0xB3, 0x33, 0x33, 0x33}, /* 20.5MHz XTal */
{0x21, 0x99, 0x99, 0x99, 0x99}, /* 24MHz XTal */
{0x1C, 0xB3, 0x33, 0x33, 0x33} /* 41MHz XTal */
};
- u8 itbCoef8bw[3][14] = {
+ static const u8 itbCoef8bw[3][14] = {
{0x26, 0xAF, 0x06, 0xCD, 0x13, 0xBB, 0x28, 0xBA, 0x23, 0xA9,
0x1F, 0xA8, 0x2C, 0xC8}, /* 20.5MHz XTal */
{0x2F, 0xBA, 0x28, 0x9B, 0x28, 0x9D, 0x28, 0xA1, 0x29, 0xA5,
@@ -2456,7 +2458,7 @@ static int cxd2841er_sleep_tc_to_active_t_band(
{0x26, 0xAF, 0x06, 0xCD, 0x13, 0xBB, 0x28, 0xBA, 0x23, 0xA9,
0x1F, 0xA8, 0x2C, 0xC8} /* 41MHz XTal */
};
- u8 itbCoef7bw[3][14] = {
+ static const u8 itbCoef7bw[3][14] = {
{0x2C, 0xBD, 0x02, 0xCF, 0x04, 0xF8, 0x23, 0xA6, 0x29, 0xB0,
0x26, 0xA9, 0x21, 0xA5}, /* 20.5MHz XTal */
{0x30, 0xB1, 0x29, 0x9A, 0x28, 0x9C, 0x28, 0xA0, 0x29, 0xA2,
@@ -2464,7 +2466,7 @@ static int cxd2841er_sleep_tc_to_active_t_band(
{0x2C, 0xBD, 0x02, 0xCF, 0x04, 0xF8, 0x23, 0xA6, 0x29, 0xB0,
0x26, 0xA9, 0x21, 0xA5} /* 41MHz XTal */
};
- u8 itbCoef6bw[3][14] = {
+ static const u8 itbCoef6bw[3][14] = {
{0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00, 0xCF,
0x00, 0xE6, 0x23, 0xA4}, /* 20.5MHz XTal */
{0x31, 0xA8, 0x29, 0x9B, 0x27, 0x9C, 0x28, 0x9E, 0x29, 0xA4,
@@ -2472,7 +2474,7 @@ static int cxd2841er_sleep_tc_to_active_t_band(
{0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00, 0xCF,
0x00, 0xE6, 0x23, 0xA4} /* 41MHz XTal */
};
- u8 itbCoef5bw[3][14] = {
+ static const u8 itbCoef5bw[3][14] = {
{0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00, 0xCF,
0x00, 0xE6, 0x23, 0xA4}, /* 20.5MHz XTal */
{0x31, 0xA8, 0x29, 0x9B, 0x27, 0x9C, 0x28, 0x9E, 0x29, 0xA4,
@@ -2652,39 +2654,39 @@ static int cxd2841er_sleep_tc_to_active_i_band(
u8 data[3];
/* TRCG Nominal Rate */
- u8 nominalRate8bw[3][5] = {
+ static const u8 nominalRate8bw[3][5] = {
{0x00, 0x00, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */
{0x11, 0xB8, 0x00, 0x00, 0x00}, /* 24MHz XTal */
{0x00, 0x00, 0x00, 0x00, 0x00} /* 41MHz XTal */
};
- u8 nominalRate7bw[3][5] = {
+ static const u8 nominalRate7bw[3][5] = {
{0x00, 0x00, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */
{0x14, 0x40, 0x00, 0x00, 0x00}, /* 24MHz XTal */
{0x00, 0x00, 0x00, 0x00, 0x00} /* 41MHz XTal */
};
- u8 nominalRate6bw[3][5] = {
+ static const u8 nominalRate6bw[3][5] = {
{0x14, 0x2E, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */
{0x17, 0xA0, 0x00, 0x00, 0x00}, /* 24MHz XTal */
{0x14, 0x2E, 0x00, 0x00, 0x00} /* 41MHz XTal */
};
- u8 itbCoef8bw[3][14] = {
+ static const u8 itbCoef8bw[3][14] = {
{0x00}, /* 20.5MHz XTal */
{0x2F, 0xBA, 0x28, 0x9B, 0x28, 0x9D, 0x28, 0xA1, 0x29,
0xA5, 0x2A, 0xAC, 0x29, 0xB5}, /* 24MHz Xtal */
{0x0}, /* 41MHz XTal */
};
- u8 itbCoef7bw[3][14] = {
+ static const u8 itbCoef7bw[3][14] = {
{0x00}, /* 20.5MHz XTal */
{0x30, 0xB1, 0x29, 0x9A, 0x28, 0x9C, 0x28, 0xA0, 0x29,
0xA2, 0x2B, 0xA6, 0x2B, 0xAD}, /* 24MHz Xtal */
{0x00}, /* 41MHz XTal */
};
- u8 itbCoef6bw[3][14] = {
+ static const u8 itbCoef6bw[3][14] = {
{0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8, 0x00,
0xCF, 0x00, 0xE6, 0x23, 0xA4}, /* 20.5MHz XTal */
{0x31, 0xA8, 0x29, 0x9B, 0x27, 0x9C, 0x28, 0x9E, 0x29,
@@ -3279,7 +3281,10 @@ static int cxd2841er_get_frontend(struct dvb_frontend *fe,
else if (priv->state == STATE_ACTIVE_TC)
cxd2841er_read_status_tc(fe, &status);
- cxd2841er_read_signal_strength(fe);
+ if (priv->state == STATE_ACTIVE_TC || priv->state == STATE_ACTIVE_S)
+ cxd2841er_read_signal_strength(fe);
+ else
+ p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
if (status & FE_HAS_LOCK) {
cxd2841er_read_snr(fe);
@@ -3375,6 +3380,14 @@ static int cxd2841er_set_frontend_tc(struct dvb_frontend *fe)
if (priv->flags & CXD2841ER_EARLY_TUNE)
cxd2841er_tuner_set(fe);
+ /* deconfigure/put demod to sleep on delsys switch if active */
+ if (priv->state == STATE_ACTIVE_TC &&
+ priv->system != p->delivery_system) {
+ dev_dbg(&priv->i2c->dev, "%s(): old_delsys=%d, new_delsys=%d -> sleep\n",
+ __func__, priv->system, p->delivery_system);
+ cxd2841er_sleep_tc(fe);
+ }
+
if (p->delivery_system == SYS_DVBT) {
priv->system = SYS_DVBT;
switch (priv->state) {
@@ -3591,6 +3604,7 @@ static int cxd2841er_sleep_tc(struct dvb_frontend *fe)
struct cxd2841er_priv *priv = fe->demodulator_priv;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+
if (priv->state == STATE_ACTIVE_TC) {
switch (priv->system) {
case SYS_DVBT:
@@ -3616,7 +3630,17 @@ static int cxd2841er_sleep_tc(struct dvb_frontend *fe)
__func__, priv->state);
return -EINVAL;
}
- cxd2841er_sleep_tc_to_shutdown(priv);
+ return 0;
+}
+
+static int cxd2841er_shutdown_tc(struct dvb_frontend *fe)
+{
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
+
+ if (!cxd2841er_sleep_tc(fe))
+ cxd2841er_sleep_tc_to_shutdown(priv);
return 0;
}
@@ -3965,7 +3989,7 @@ static struct dvb_frontend_ops cxd2841er_t_c_ops = {
.symbol_rate_max = 11700000
},
.init = cxd2841er_init_tc,
- .sleep = cxd2841er_sleep_tc,
+ .sleep = cxd2841er_shutdown_tc,
.release = cxd2841er_release,
.set_frontend = cxd2841er_set_frontend_tc,
.get_frontend = cxd2841er_get_frontend,
@@ -3975,6 +3999,6 @@ static struct dvb_frontend_ops cxd2841er_t_c_ops = {
.get_frontend_algo = cxd2841er_get_algo
};
-MODULE_DESCRIPTION("Sony CXD2841ER/CXD2854ER DVB-C/C2/T/T2/S/S2 demodulator driver");
+MODULE_DESCRIPTION("Sony CXD2837/38/41/43/54ER DVB-C/C2/T/T2/S/S2 demodulator driver");
MODULE_AUTHOR("Sergey Kozlov <serjk@netup.ru>, Abylay Ospan <aospan@netup.ru>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index 33af14df27bd..d9d730dfe0b1 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -2052,7 +2052,7 @@ int dib0090_update_tuning_table_7090(struct dvb_frontend *fe,
struct dib0090_state *state = fe->tuner_priv;
const struct dib0090_tuning *tune =
dib0090_tuning_table_cband_7090e_sensitivity;
- const struct dib0090_tuning dib0090_tuning_table_cband_7090e_aci[] = {
+ static const struct dib0090_tuning dib0090_tuning_table_cband_7090e_aci[] = {
{ 300000, 0 , 3, 0x8165, 0x2c0, 0x2d12, 0xb84e, EN_CAB },
{ 650000, 0 , 4, 0x815B, 0x280, 0x2d12, 0xb84e, EN_CAB },
{ 860000, 0 , 5, 0x84EF, 0x280, 0x2d12, 0xb84e, EN_CAB },
@@ -2435,14 +2435,7 @@ static int dib0090_tune(struct dvb_frontend *fe)
Den = 1;
if (Rest > 0) {
- if (state->config->analog_output)
- lo6 |= (1 << 2) | 2;
- else {
- if (state->identity.in_soc)
- lo6 |= (1 << 2) | 2;
- else
- lo6 |= (1 << 2) | 2;
- }
+ lo6 |= (1 << 2) | 2;
Den = 255;
}
dib0090_write_reg(state, 0x15, (u16) FBDiv);
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 1caa04d8f60f..0fbaabe43682 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -2388,7 +2388,7 @@ static u32 dib7000p_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm dib7090_tuner_xfer_algo = {
+static const struct i2c_algorithm dib7090_tuner_xfer_algo = {
.master_xfer = dib7090_tuner_xfer,
.functionality = dib7000p_i2c_func,
};
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index e501ec964df1..5d9381509b07 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -1880,7 +1880,7 @@ static u32 dib8096p_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm dib8096p_tuner_xfer_algo = {
+static const struct i2c_algorithm dib8096p_tuner_xfer_algo = {
.master_xfer = dib8096p_tuner_xfer,
.functionality = dib8096p_i2c_func,
};
@@ -4255,23 +4255,6 @@ static int dib8000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_fronte
return -ENOMEM;
}
-static int dib8000_remove_slave_frontend(struct dvb_frontend *fe)
-{
- struct dib8000_state *state = fe->demodulator_priv;
- u8 index_frontend = 1;
-
- while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
- index_frontend++;
- if (index_frontend != 1) {
- dprintk("remove slave fe %p (index %i)\n", state->fe[index_frontend-1], index_frontend-1);
- state->fe[index_frontend] = NULL;
- return 0;
- }
-
- dprintk("no frontend to be removed\n");
- return -ENODEV;
-}
-
static struct dvb_frontend *dib8000_get_slave_frontend(struct dvb_frontend *fe, int slave_index)
{
struct dib8000_state *state = fe->demodulator_priv;
@@ -4506,7 +4489,6 @@ void *dib8000_attach(struct dib8000_ops *ops)
ops->get_slave_frontend = dib8000_get_slave_frontend;
ops->set_tune_state = dib8000_set_tune_state;
ops->pid_filter_ctrl = dib8000_pid_filter_ctrl;
- ops->remove_slave_frontend = dib8000_remove_slave_frontend;
ops->get_adc_power = dib8000_get_adc_power;
ops->update_pll = dib8000_update_pll;
ops->tuner_sleep = dib8096p_tuner_sleep;
diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
index 2b8b4b1656a2..75cc8e47ec8f 100644
--- a/drivers/media/dvb-frontends/dib8000.h
+++ b/drivers/media/dvb-frontends/dib8000.h
@@ -53,7 +53,6 @@ struct dib8000_ops {
enum frontend_tune_state (*get_tune_state)(struct dvb_frontend *fe);
int (*set_tune_state)(struct dvb_frontend *fe, enum frontend_tune_state tune_state);
int (*set_slave_frontend)(struct dvb_frontend *fe, struct dvb_frontend *fe_slave);
- int (*remove_slave_frontend)(struct dvb_frontend *fe);
struct dvb_frontend *(*get_slave_frontend)(struct dvb_frontend *fe, int slave_index);
int (*i2c_enumeration)(struct i2c_adapter *host, int no_of_demods,
u8 default_addr, u8 first_addr, u8 is_dib8096p);
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index c95fff4f9582..1b7a4331af05 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -1714,12 +1714,12 @@ static u32 dib9000_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm dib9000_tuner_algo = {
+static const struct i2c_algorithm dib9000_tuner_algo = {
.master_xfer = dib9000_tuner_xfer,
.functionality = dib9000_i2c_func,
};
-static struct i2c_algorithm dib9000_component_bus_algo = {
+static const struct i2c_algorithm dib9000_component_bus_algo = {
.master_xfer = dib9000_fw_component_bus_xfer,
.functionality = dib9000_i2c_func,
};
@@ -2462,24 +2462,6 @@ int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_
}
EXPORT_SYMBOL(dib9000_set_slave_frontend);
-int dib9000_remove_slave_frontend(struct dvb_frontend *fe)
-{
- struct dib9000_state *state = fe->demodulator_priv;
- u8 index_frontend = 1;
-
- while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
- index_frontend++;
- if (index_frontend != 1) {
- dprintk("remove slave fe %p (index %i)\n", state->fe[index_frontend - 1], index_frontend - 1);
- state->fe[index_frontend] = NULL;
- return 0;
- }
-
- dprintk("no frontend to be removed\n");
- return -ENODEV;
-}
-EXPORT_SYMBOL(dib9000_remove_slave_frontend);
-
struct dvb_frontend *dib9000_get_slave_frontend(struct dvb_frontend *fe, int slave_index)
{
struct dib9000_state *state = fe->demodulator_priv;
diff --git a/drivers/media/dvb-frontends/dib9000.h b/drivers/media/dvb-frontends/dib9000.h
index b10a70aa7c9f..40883b41e66b 100644
--- a/drivers/media/dvb-frontends/dib9000.h
+++ b/drivers/media/dvb-frontends/dib9000.h
@@ -37,7 +37,6 @@ extern int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff);
extern int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
extern int dib9000_firmware_post_pll_init(struct dvb_frontend *fe);
extern int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave);
-extern int dib9000_remove_slave_frontend(struct dvb_frontend *fe);
extern struct dvb_frontend *dib9000_get_slave_frontend(struct dvb_frontend *fe, int slave_index);
extern struct i2c_adapter *dib9000_get_component_bus_interface(struct dvb_frontend *fe);
extern int dib9000_set_i2c_adapter(struct dvb_frontend *fe, struct i2c_adapter *i2c);
@@ -97,12 +96,6 @@ static inline int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb
return -ENODEV;
}
-static inline int dib9000_remove_slave_frontend(struct dvb_frontend *fe)
-{
- printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return -ENODEV;
-}
-
static inline struct dvb_frontend *dib9000_get_slave_frontend(struct dvb_frontend *fe, int slave_index)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
index 4442e478db72..cd69e187ba7a 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
@@ -307,7 +307,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
* \def DRX_UNKNOWN
* \brief Generic UNKNOWN value for DRX enumerated types.
*
-* Used to indicate that the parameter value is unknown or not yet initalized.
+* Used to indicate that the parameter value is unknown or not yet initialized.
*/
#ifndef DRX_UNKNOWN
#define DRX_UNKNOWN (254)
@@ -450,19 +450,6 @@ MACROS
((u8)((((u16)x)>>8)&0xFF))
/**
-* \brief Macro to sign extend signed 9 bit value to signed 16 bit value
-*/
-#define DRX_S9TOS16(x) ((((u16)x)&0x100) ? ((s16)((u16)(x)|0xFF00)) : (x))
-
-/**
-* \brief Macro to sign extend signed 9 bit value to signed 16 bit value
-*/
-#define DRX_S24TODRXFREQ(x) ((((u32) x) & 0x00800000UL) ? \
- ((s32) \
- (((u32) x) | 0xFF000000)) : \
- ((s32) x))
-
-/**
* \brief Macro to convert 16 bit register value to a s32
*/
#define DRX_U16TODRXFREQ(x) ((x & 0x8000) ? \
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 14040c915dbb..499ccff557bf 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -5489,7 +5489,7 @@ static int set_vsb_leak_n_gain(struct drx_demod_instance *demod)
struct i2c_device_addr *dev_addr = NULL;
int rc;
- const u8 vsb_ffe_leak_gain_ram0[] = {
+ static const u8 vsb_ffe_leak_gain_ram0[] = {
DRXJ_16TO8(0x8), /* FFETRAINLKRATIO1 */
DRXJ_16TO8(0x8), /* FFETRAINLKRATIO2 */
DRXJ_16TO8(0x8), /* FFETRAINLKRATIO3 */
@@ -5620,7 +5620,7 @@ static int set_vsb_leak_n_gain(struct drx_demod_instance *demod)
DRXJ_16TO8(0x1010) /* FIRRCA1GAIN8 */
};
- const u8 vsb_ffe_leak_gain_ram1[] = {
+ static const u8 vsb_ffe_leak_gain_ram1[] = {
DRXJ_16TO8(0x1010), /* FIRRCA1GAIN9 */
DRXJ_16TO8(0x0808), /* FIRRCA1GAIN10 */
DRXJ_16TO8(0x0808), /* FIRRCA1GAIN11 */
@@ -5710,7 +5710,7 @@ static int set_vsb(struct drx_demod_instance *demod)
struct drxj_data *ext_attr = NULL;
u16 cmd_result = 0;
u16 cmd_param = 0;
- const u8 vsb_taps_re[] = {
+ static const u8 vsb_taps_re[] = {
DRXJ_16TO8(-2), /* re0 */
DRXJ_16TO8(4), /* re1 */
DRXJ_16TO8(1), /* re2 */
@@ -6666,7 +6666,7 @@ static int set_qam16(struct drx_demod_instance *demod)
{
struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
int rc;
- const u8 qam_dq_qual_fun[] = {
+ static const u8 qam_dq_qual_fun[] = {
DRXJ_16TO8(2), /* fun0 */
DRXJ_16TO8(2), /* fun1 */
DRXJ_16TO8(2), /* fun2 */
@@ -6674,7 +6674,7 @@ static int set_qam16(struct drx_demod_instance *demod)
DRXJ_16TO8(3), /* fun4 */
DRXJ_16TO8(3), /* fun5 */
};
- const u8 qam_eq_cma_rad[] = {
+ static const u8 qam_eq_cma_rad[] = {
DRXJ_16TO8(13517), /* RAD0 */
DRXJ_16TO8(13517), /* RAD1 */
DRXJ_16TO8(13517), /* RAD2 */
@@ -6901,7 +6901,7 @@ static int set_qam32(struct drx_demod_instance *demod)
{
struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
int rc;
- const u8 qam_dq_qual_fun[] = {
+ static const u8 qam_dq_qual_fun[] = {
DRXJ_16TO8(3), /* fun0 */
DRXJ_16TO8(3), /* fun1 */
DRXJ_16TO8(3), /* fun2 */
@@ -6909,7 +6909,7 @@ static int set_qam32(struct drx_demod_instance *demod)
DRXJ_16TO8(4), /* fun4 */
DRXJ_16TO8(4), /* fun5 */
};
- const u8 qam_eq_cma_rad[] = {
+ static const u8 qam_eq_cma_rad[] = {
DRXJ_16TO8(6707), /* RAD0 */
DRXJ_16TO8(6707), /* RAD1 */
DRXJ_16TO8(6707), /* RAD2 */
@@ -7136,7 +7136,8 @@ static int set_qam64(struct drx_demod_instance *demod)
{
struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
int rc;
- const u8 qam_dq_qual_fun[] = { /* this is hw reset value. no necessary to re-write */
+ static const u8 qam_dq_qual_fun[] = {
+ /* this is hw reset value. no necessary to re-write */
DRXJ_16TO8(4), /* fun0 */
DRXJ_16TO8(4), /* fun1 */
DRXJ_16TO8(4), /* fun2 */
@@ -7144,7 +7145,7 @@ static int set_qam64(struct drx_demod_instance *demod)
DRXJ_16TO8(6), /* fun4 */
DRXJ_16TO8(6), /* fun5 */
};
- const u8 qam_eq_cma_rad[] = {
+ static const u8 qam_eq_cma_rad[] = {
DRXJ_16TO8(13336), /* RAD0 */
DRXJ_16TO8(12618), /* RAD1 */
DRXJ_16TO8(11988), /* RAD2 */
@@ -7371,7 +7372,7 @@ static int set_qam128(struct drx_demod_instance *demod)
{
struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
int rc;
- const u8 qam_dq_qual_fun[] = {
+ static const u8 qam_dq_qual_fun[] = {
DRXJ_16TO8(6), /* fun0 */
DRXJ_16TO8(6), /* fun1 */
DRXJ_16TO8(6), /* fun2 */
@@ -7379,7 +7380,7 @@ static int set_qam128(struct drx_demod_instance *demod)
DRXJ_16TO8(9), /* fun4 */
DRXJ_16TO8(9), /* fun5 */
};
- const u8 qam_eq_cma_rad[] = {
+ static const u8 qam_eq_cma_rad[] = {
DRXJ_16TO8(6164), /* RAD0 */
DRXJ_16TO8(6598), /* RAD1 */
DRXJ_16TO8(6394), /* RAD2 */
@@ -7606,7 +7607,7 @@ static int set_qam256(struct drx_demod_instance *demod)
{
struct i2c_device_addr *dev_addr = demod->my_i2c_dev_addr;
int rc;
- const u8 qam_dq_qual_fun[] = {
+ static const u8 qam_dq_qual_fun[] = {
DRXJ_16TO8(8), /* fun0 */
DRXJ_16TO8(8), /* fun1 */
DRXJ_16TO8(8), /* fun2 */
@@ -7614,7 +7615,7 @@ static int set_qam256(struct drx_demod_instance *demod)
DRXJ_16TO8(12), /* fun4 */
DRXJ_16TO8(12), /* fun5 */
};
- const u8 qam_eq_cma_rad[] = {
+ static const u8 qam_eq_cma_rad[] = {
DRXJ_16TO8(12345), /* RAD0 */
DRXJ_16TO8(12345), /* RAD1 */
DRXJ_16TO8(13626), /* RAD2 */
@@ -7862,7 +7863,7 @@ set_qam(struct drx_demod_instance *demod,
/* parameter */ NULL,
/* result */ NULL
};
- const u8 qam_a_taps[] = {
+ static const u8 qam_a_taps[] = {
DRXJ_16TO8(-1), /* re0 */
DRXJ_16TO8(1), /* re1 */
DRXJ_16TO8(1), /* re2 */
@@ -7892,7 +7893,7 @@ set_qam(struct drx_demod_instance *demod,
DRXJ_16TO8(-40), /* re26 */
DRXJ_16TO8(619) /* re27 */
};
- const u8 qam_b64_taps[] = {
+ static const u8 qam_b64_taps[] = {
DRXJ_16TO8(0), /* re0 */
DRXJ_16TO8(-2), /* re1 */
DRXJ_16TO8(1), /* re2 */
@@ -7922,7 +7923,7 @@ set_qam(struct drx_demod_instance *demod,
DRXJ_16TO8(-46), /* re26 */
DRXJ_16TO8(614) /* re27 */
};
- const u8 qam_b256_taps[] = {
+ static const u8 qam_b256_taps[] = {
DRXJ_16TO8(-2), /* re0 */
DRXJ_16TO8(4), /* re1 */
DRXJ_16TO8(1), /* re2 */
@@ -7952,7 +7953,7 @@ set_qam(struct drx_demod_instance *demod,
DRXJ_16TO8(-32), /* re26 */
DRXJ_16TO8(628) /* re27 */
};
- const u8 qam_c_taps[] = {
+ static const u8 qam_c_taps[] = {
DRXJ_16TO8(-3), /* re0 */
DRXJ_16TO8(3), /* re1 */
DRXJ_16TO8(2), /* re2 */
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 17638e08835a..7d04400b18dd 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -638,8 +638,10 @@ static int SetCfgIfAgc(struct drxd_state *state, struct SCfgAgc *cfg)
/* == Speed == */
{
const u16 maxRur = 8;
- const u16 slowIncrDecLUT[] = { 3, 4, 4, 5, 6 };
- const u16 fastIncrDecLUT[] = { 14, 15, 15, 16,
+ static const u16 slowIncrDecLUT[] = {
+ 3, 4, 4, 5, 6 };
+ const u16 fastIncrDecLUT[] = {
+ 14, 15, 15, 16,
17, 18, 18, 19,
20, 21, 22, 23,
24, 26, 27, 28,
diff --git a/drivers/media/dvb-frontends/isl6421.c b/drivers/media/dvb-frontends/isl6421.c
index 838b42771a05..3f3487887672 100644
--- a/drivers/media/dvb-frontends/isl6421.c
+++ b/drivers/media/dvb-frontends/isl6421.c
@@ -38,35 +38,101 @@ struct isl6421 {
u8 override_and;
struct i2c_adapter *i2c;
u8 i2c_addr;
+ bool is_off;
};
static int isl6421_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
+ int ret;
+ u8 buf;
+ bool is_off;
struct isl6421 *isl6421 = (struct isl6421 *) fe->sec_priv;
- struct i2c_msg msg = { .addr = isl6421->i2c_addr, .flags = 0,
- .buf = &isl6421->config,
- .len = sizeof(isl6421->config) };
+ struct i2c_msg msg[2] = {
+ {
+ .addr = isl6421->i2c_addr,
+ .flags = 0,
+ .buf = &isl6421->config,
+ .len = 1,
+ }, {
+ .addr = isl6421->i2c_addr,
+ .flags = I2C_M_RD,
+ .buf = &buf,
+ .len = 1,
+ }
+
+ };
isl6421->config &= ~(ISL6421_VSEL1 | ISL6421_EN1);
switch(voltage) {
case SEC_VOLTAGE_OFF:
+ is_off = true;
break;
case SEC_VOLTAGE_13:
+ is_off = false;
isl6421->config |= ISL6421_EN1;
break;
case SEC_VOLTAGE_18:
+ is_off = false;
isl6421->config |= (ISL6421_EN1 | ISL6421_VSEL1);
break;
default:
return -EINVAL;
}
+ /*
+ * If LNBf were not powered on, disable dynamic current limit, as,
+ * according with datasheet, highly capacitive load on the output may
+ * cause a difficult start-up.
+ */
+ if (isl6421->is_off && !is_off)
+ isl6421->config |= ISL6421_DCL;
+
isl6421->config |= isl6421->override_or;
isl6421->config &= isl6421->override_and;
- return (i2c_transfer(isl6421->i2c, &msg, 1) == 1) ? 0 : -EIO;
+ ret = i2c_transfer(isl6421->i2c, msg, 2);
+ if (ret < 0)
+ return ret;
+ if (ret != 2)
+ return -EIO;
+
+ /* Store off status now incase future commands fail */
+ isl6421->is_off = is_off;
+
+ /* On overflow, the device will try again after 900 ms (typically) */
+ if (!is_off && (buf & ISL6421_OLF1))
+ msleep(1000);
+
+ /* Re-enable dynamic current limit */
+ if ((isl6421->config & ISL6421_DCL) &&
+ !(isl6421->override_or & ISL6421_DCL)) {
+ isl6421->config &= ~ISL6421_DCL;
+
+ ret = i2c_transfer(isl6421->i2c, msg, 2);
+ if (ret < 0)
+ return ret;
+ if (ret != 2)
+ return -EIO;
+ }
+
+ /* Check if overload flag is active. If so, disable power */
+ if (!is_off && (buf & ISL6421_OLF1)) {
+ isl6421->config &= ~(ISL6421_VSEL1 | ISL6421_EN1);
+ ret = i2c_transfer(isl6421->i2c, msg, 1);
+ if (ret < 0)
+ return ret;
+ if (ret != 1)
+ return -EIO;
+ isl6421->is_off = true;
+
+ dev_warn(&isl6421->i2c->dev,
+ "Overload current detected. disabling LNBf power\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
static int isl6421_enable_high_lnb_voltage(struct dvb_frontend *fe, long arg)
@@ -148,6 +214,8 @@ struct dvb_frontend *isl6421_attach(struct dvb_frontend *fe, struct i2c_adapter
return NULL;
}
+ isl6421->is_off = true;
+
/* install release callback */
fe->ops.release_sec = isl6421_release;
diff --git a/drivers/media/dvb-frontends/lnbh25.c b/drivers/media/dvb-frontends/lnbh25.c
index ef3021e964be..cb486e879fdd 100644
--- a/drivers/media/dvb-frontends/lnbh25.c
+++ b/drivers/media/dvb-frontends/lnbh25.c
@@ -76,8 +76,8 @@ static int lnbh25_read_vmon(struct lnbh25_priv *priv)
return ret;
}
}
- print_hex_dump_bytes("lnbh25_read_vmon: ",
- DUMP_PREFIX_OFFSET, status, sizeof(status));
+ dev_dbg(&priv->i2c->dev, "%s(): %*ph\n",
+ __func__, (int) sizeof(status), status);
if ((status[0] & (LNBH25_STATUS_OFL | LNBH25_STATUS_VMON)) != 0) {
dev_err(&priv->i2c->dev,
"%s(): voltage in failure state, status reg 0x%x\n",
@@ -178,7 +178,7 @@ struct dvb_frontend *lnbh25_attach(struct dvb_frontend *fe,
fe->ops.release_sec = lnbh25_release;
fe->ops.set_voltage = lnbh25_set_voltage;
- dev_err(&i2c->dev, "%s(): attached at I2C addr 0x%02x\n",
+ dev_info(&i2c->dev, "%s(): attached at I2C addr 0x%02x\n",
__func__, priv->i2c_address);
return fe;
}
diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c
index 9bb122c39c1b..dfe322eccaa1 100644
--- a/drivers/media/dvb-frontends/mb86a16.c
+++ b/drivers/media/dvb-frontends/mb86a16.c
@@ -415,27 +415,21 @@ static int signal_det(struct mb86a16_state *state,
int smrt,
unsigned char *SIG)
{
-
- int ret ;
- int smrtd ;
- int wait_sym ;
-
- u32 wait_t;
- unsigned char S[3] ;
- int i ;
+ int ret;
+ int smrtd;
+ unsigned char S[3];
+ int i;
if (*SIG > 45) {
if (CNTM_set(state, 2, 1, 2) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error");
return -1;
}
- wait_sym = 40000;
} else {
if (CNTM_set(state, 3, 1, 2) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error");
return -1;
}
- wait_sym = 80000;
}
for (i = 0; i < 3; i++) {
if (i == 0)
@@ -447,22 +441,17 @@ static int signal_det(struct mb86a16_state *state,
smrt_info_get(state, smrtd);
smrt_set(state, smrtd);
srst(state);
- wait_t = (wait_sym + 99 * smrtd / 100) / smrtd;
- if (wait_t == 0)
- wait_t = 1;
msleep_interruptible(10);
if (mb86a16_read(state, 0x37, &(S[i])) != 2) {
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
}
- if ((S[1] > S[0] * 112 / 100) &&
- (S[1] > S[2] * 112 / 100)) {
-
+ if ((S[1] > S[0] * 112 / 100) && (S[1] > S[2] * 112 / 100))
ret = 1;
- } else {
+ else
ret = 0;
- }
+
*SIG = S[1];
if (CNTM_set(state, 0, 1, 2) < 0) {
diff --git a/drivers/media/dvb-frontends/mn88472.c b/drivers/media/dvb-frontends/mn88472.c
index f6938f9607ac..5e8fd63832e9 100644
--- a/drivers/media/dvb-frontends/mn88472.c
+++ b/drivers/media/dvb-frontends/mn88472.c
@@ -377,7 +377,9 @@ static int mn88472_set_frontend(struct dvb_frontend *fe)
ret = regmap_write(dev->regmap[1], 0xf6, 0x05);
if (ret)
goto err;
- ret = regmap_write(dev->regmap[2], 0x32, c->stream_id);
+ ret = regmap_write(dev->regmap[2], 0x32,
+ (c->stream_id == NO_STREAM_ID_FILTER) ? 0 :
+ c->stream_id );
if (ret)
goto err;
break;
diff --git a/drivers/media/dvb-frontends/mn88473.c b/drivers/media/dvb-frontends/mn88473.c
index 15874244fd8b..58247432a628 100644
--- a/drivers/media/dvb-frontends/mn88473.c
+++ b/drivers/media/dvb-frontends/mn88473.c
@@ -225,7 +225,9 @@ static int mn88473_set_frontend(struct dvb_frontend *fe)
/* PLP */
if (c->delivery_system == SYS_DVBT2) {
- ret = regmap_write(dev->regmap[2], 0x36, c->stream_id);
+ ret = regmap_write(dev->regmap[2], 0x36,
+ (c->stream_id == NO_STREAM_ID_FILTER) ? 0 :
+ c->stream_id );
if (ret)
goto err;
}
diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
new file mode 100644
index 000000000000..676c96c216c3
--- /dev/null
+++ b/drivers/media/dvb-frontends/mxl5xx.c
@@ -0,0 +1,1873 @@
+/*
+ * Driver for the MaxLinear MxL5xx family of tuners/demods
+ *
+ * Copyright (C) 2014-2015 Ralph Metzler <rjkm@metzlerbros.de>
+ * Marcus Metzler <mocm@metzlerbros.de>
+ * developed for Digital Devices GmbH
+ *
+ * based on code:
+ * Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved
+ * which was released under GPL V2
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/version.h>
+#include <linux/mutex.h>
+#include <linux/vmalloc.h>
+#include <asm/div64.h>
+#include <asm/unaligned.h>
+
+#include "dvb_frontend.h"
+#include "mxl5xx.h"
+#include "mxl5xx_regs.h"
+#include "mxl5xx_defs.h"
+
+#define BYTE0(v) ((v >> 0) & 0xff)
+#define BYTE1(v) ((v >> 8) & 0xff)
+#define BYTE2(v) ((v >> 16) & 0xff)
+#define BYTE3(v) ((v >> 24) & 0xff)
+
+LIST_HEAD(mxllist);
+
+struct mxl_base {
+ struct list_head mxllist;
+ struct list_head mxls;
+
+ u8 adr;
+ struct i2c_adapter *i2c;
+
+ u32 count;
+ u32 type;
+ u32 sku_type;
+ u32 chipversion;
+ u32 clock;
+ u32 fwversion;
+
+ u8 *ts_map;
+ u8 can_clkout;
+ u8 chan_bond;
+ u8 demod_num;
+ u8 tuner_num;
+
+ unsigned long next_tune;
+
+ struct mutex i2c_lock;
+ struct mutex status_lock;
+ struct mutex tune_lock;
+
+ u8 buf[MXL_HYDRA_OEM_MAX_CMD_BUFF_LEN];
+
+ u32 cmd_size;
+ u8 cmd_data[MAX_CMD_DATA];
+};
+
+struct mxl {
+ struct list_head mxl;
+
+ struct mxl_base *base;
+ struct dvb_frontend fe;
+ struct device *i2cdev;
+ u32 demod;
+ u32 tuner;
+ u32 tuner_in_use;
+ u8 xbar[3];
+
+ unsigned long tune_time;
+};
+
+static void convert_endian(u8 flag, u32 size, u8 *d)
+{
+ u32 i;
+
+ if (!flag)
+ return;
+ for (i = 0; i < (size & ~3); i += 4) {
+ d[i + 0] ^= d[i + 3];
+ d[i + 3] ^= d[i + 0];
+ d[i + 0] ^= d[i + 3];
+
+ d[i + 1] ^= d[i + 2];
+ d[i + 2] ^= d[i + 1];
+ d[i + 1] ^= d[i + 2];
+ }
+
+ switch (size & 3) {
+ case 0:
+ case 1:
+ /* do nothing */
+ break;
+ case 2:
+ d[i + 0] ^= d[i + 1];
+ d[i + 1] ^= d[i + 0];
+ d[i + 0] ^= d[i + 1];
+ break;
+
+ case 3:
+ d[i + 0] ^= d[i + 2];
+ d[i + 2] ^= d[i + 0];
+ d[i + 0] ^= d[i + 2];
+ break;
+ }
+
+}
+
+static int i2c_write(struct i2c_adapter *adap, u8 adr,
+ u8 *data, u32 len)
+{
+ struct i2c_msg msg = {.addr = adr, .flags = 0,
+ .buf = data, .len = len};
+
+ return (i2c_transfer(adap, &msg, 1) == 1) ? 0 : -1;
+}
+
+static int i2c_read(struct i2c_adapter *adap, u8 adr,
+ u8 *data, u32 len)
+{
+ struct i2c_msg msg = {.addr = adr, .flags = I2C_M_RD,
+ .buf = data, .len = len};
+
+ return (i2c_transfer(adap, &msg, 1) == 1) ? 0 : -1;
+}
+
+static int i2cread(struct mxl *state, u8 *data, int len)
+{
+ return i2c_read(state->base->i2c, state->base->adr, data, len);
+}
+
+static int i2cwrite(struct mxl *state, u8 *data, int len)
+{
+ return i2c_write(state->base->i2c, state->base->adr, data, len);
+}
+
+static int read_register_unlocked(struct mxl *state, u32 reg, u32 *val)
+{
+ int stat;
+ u8 data[MXL_HYDRA_REG_SIZE_IN_BYTES + MXL_HYDRA_I2C_HDR_SIZE] = {
+ MXL_HYDRA_PLID_REG_READ, 0x04,
+ GET_BYTE(reg, 0), GET_BYTE(reg, 1),
+ GET_BYTE(reg, 2), GET_BYTE(reg, 3),
+ };
+
+ stat = i2cwrite(state, data,
+ MXL_HYDRA_REG_SIZE_IN_BYTES + MXL_HYDRA_I2C_HDR_SIZE);
+ if (stat)
+ dev_err(state->i2cdev, "i2c read error 1\n");
+ if (!stat)
+ stat = i2cread(state, (u8 *) val,
+ MXL_HYDRA_REG_SIZE_IN_BYTES);
+ le32_to_cpus(val);
+ if (stat)
+ dev_err(state->i2cdev, "i2c read error 2\n");
+ return stat;
+}
+
+#define DMA_I2C_INTERRUPT_ADDR 0x8000011C
+#define DMA_INTR_PROT_WR_CMP 0x08
+
+static int send_command(struct mxl *state, u32 size, u8 *buf)
+{
+ int stat;
+ u32 val, count = 10;
+
+ mutex_lock(&state->base->i2c_lock);
+ if (state->base->fwversion > 0x02010109) {
+ read_register_unlocked(state, DMA_I2C_INTERRUPT_ADDR, &val);
+ if (DMA_INTR_PROT_WR_CMP & val)
+ dev_info(state->i2cdev, "%s busy\n", __func__);
+ while ((DMA_INTR_PROT_WR_CMP & val) && --count) {
+ mutex_unlock(&state->base->i2c_lock);
+ usleep_range(1000, 2000);
+ mutex_lock(&state->base->i2c_lock);
+ read_register_unlocked(state, DMA_I2C_INTERRUPT_ADDR,
+ &val);
+ }
+ if (!count) {
+ dev_info(state->i2cdev, "%s busy\n", __func__);
+ mutex_unlock(&state->base->i2c_lock);
+ return -EBUSY;
+ }
+ }
+ stat = i2cwrite(state, buf, size);
+ mutex_unlock(&state->base->i2c_lock);
+ return stat;
+}
+
+static int write_register(struct mxl *state, u32 reg, u32 val)
+{
+ int stat;
+ u8 data[MXL_HYDRA_REG_WRITE_LEN] = {
+ MXL_HYDRA_PLID_REG_WRITE, 0x08,
+ BYTE0(reg), BYTE1(reg), BYTE2(reg), BYTE3(reg),
+ BYTE0(val), BYTE1(val), BYTE2(val), BYTE3(val),
+ };
+ mutex_lock(&state->base->i2c_lock);
+ stat = i2cwrite(state, data, sizeof(data));
+ mutex_unlock(&state->base->i2c_lock);
+ if (stat)
+ dev_err(state->i2cdev, "i2c write error\n");
+ return stat;
+}
+
+static int write_firmware_block(struct mxl *state,
+ u32 reg, u32 size, u8 *reg_data_ptr)
+{
+ int stat;
+ u8 *buf = state->base->buf;
+
+ mutex_lock(&state->base->i2c_lock);
+ buf[0] = MXL_HYDRA_PLID_REG_WRITE;
+ buf[1] = size + 4;
+ buf[2] = GET_BYTE(reg, 0);
+ buf[3] = GET_BYTE(reg, 1);
+ buf[4] = GET_BYTE(reg, 2);
+ buf[5] = GET_BYTE(reg, 3);
+ memcpy(&buf[6], reg_data_ptr, size);
+ stat = i2cwrite(state, buf,
+ MXL_HYDRA_I2C_HDR_SIZE +
+ MXL_HYDRA_REG_SIZE_IN_BYTES + size);
+ mutex_unlock(&state->base->i2c_lock);
+ if (stat)
+ dev_err(state->i2cdev, "fw block write failed\n");
+ return stat;
+}
+
+static int read_register(struct mxl *state, u32 reg, u32 *val)
+{
+ int stat;
+ u8 data[MXL_HYDRA_REG_SIZE_IN_BYTES + MXL_HYDRA_I2C_HDR_SIZE] = {
+ MXL_HYDRA_PLID_REG_READ, 0x04,
+ GET_BYTE(reg, 0), GET_BYTE(reg, 1),
+ GET_BYTE(reg, 2), GET_BYTE(reg, 3),
+ };
+
+ mutex_lock(&state->base->i2c_lock);
+ stat = i2cwrite(state, data,
+ MXL_HYDRA_REG_SIZE_IN_BYTES + MXL_HYDRA_I2C_HDR_SIZE);
+ if (stat)
+ dev_err(state->i2cdev, "i2c read error 1\n");
+ if (!stat)
+ stat = i2cread(state, (u8 *) val,
+ MXL_HYDRA_REG_SIZE_IN_BYTES);
+ mutex_unlock(&state->base->i2c_lock);
+ le32_to_cpus(val);
+ if (stat)
+ dev_err(state->i2cdev, "i2c read error 2\n");
+ return stat;
+}
+
+static int read_register_block(struct mxl *state, u32 reg, u32 size, u8 *data)
+{
+ int stat;
+ u8 *buf = state->base->buf;
+
+ mutex_lock(&state->base->i2c_lock);
+
+ buf[0] = MXL_HYDRA_PLID_REG_READ;
+ buf[1] = size + 4;
+ buf[2] = GET_BYTE(reg, 0);
+ buf[3] = GET_BYTE(reg, 1);
+ buf[4] = GET_BYTE(reg, 2);
+ buf[5] = GET_BYTE(reg, 3);
+ stat = i2cwrite(state, buf,
+ MXL_HYDRA_I2C_HDR_SIZE + MXL_HYDRA_REG_SIZE_IN_BYTES);
+ if (!stat) {
+ stat = i2cread(state, data, size);
+ convert_endian(MXL_ENABLE_BIG_ENDIAN, size, data);
+ }
+ mutex_unlock(&state->base->i2c_lock);
+ return stat;
+}
+
+static int read_by_mnemonic(struct mxl *state,
+ u32 reg, u8 lsbloc, u8 numofbits, u32 *val)
+{
+ u32 data = 0, mask = 0;
+ int stat;
+
+ stat = read_register(state, reg, &data);
+ if (stat)
+ return stat;
+ mask = MXL_GET_REG_MASK_32(lsbloc, numofbits);
+ data &= mask;
+ data >>= lsbloc;
+ *val = data;
+ return 0;
+}
+
+
+static int update_by_mnemonic(struct mxl *state,
+ u32 reg, u8 lsbloc, u8 numofbits, u32 val)
+{
+ u32 data, mask;
+ int stat;
+
+ stat = read_register(state, reg, &data);
+ if (stat)
+ return stat;
+ mask = MXL_GET_REG_MASK_32(lsbloc, numofbits);
+ data = (data & ~mask) | ((val << lsbloc) & mask);
+ stat = write_register(state, reg, data);
+ return stat;
+}
+
+static int firmware_is_alive(struct mxl *state)
+{
+ u32 hb0, hb1;
+
+ if (read_register(state, HYDRA_HEAR_BEAT, &hb0))
+ return 0;
+ msleep(20);
+ if (read_register(state, HYDRA_HEAR_BEAT, &hb1))
+ return 0;
+ if (hb1 == hb0)
+ return 0;
+ return 1;
+}
+
+static int init(struct dvb_frontend *fe)
+{
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ /* init fe stats */
+ p->strength.len = 1;
+ p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->cnr.len = 1;
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->pre_bit_error.len = 1;
+ p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->pre_bit_count.len = 1;
+ p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_error.len = 1;
+ p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_count.len = 1;
+ p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ return 0;
+}
+
+static void release(struct dvb_frontend *fe)
+{
+ struct mxl *state = fe->demodulator_priv;
+
+ list_del(&state->mxl);
+ /* Release one frontend, two more shall take its place! */
+ state->base->count--;
+ if (state->base->count == 0) {
+ list_del(&state->base->mxllist);
+ kfree(state->base);
+ }
+ kfree(state);
+}
+
+static int get_algo(struct dvb_frontend *fe)
+{
+ return DVBFE_ALGO_HW;
+}
+
+static int cfg_demod_abort_tune(struct mxl *state)
+{
+ struct MXL_HYDRA_DEMOD_ABORT_TUNE_T abort_tune_cmd;
+ u8 cmd_size = sizeof(abort_tune_cmd);
+ u8 cmd_buff[MXL_HYDRA_OEM_MAX_CMD_BUFF_LEN];
+
+ abort_tune_cmd.demod_id = state->demod;
+ BUILD_HYDRA_CMD(MXL_HYDRA_ABORT_TUNE_CMD, MXL_CMD_WRITE,
+ cmd_size, &abort_tune_cmd, cmd_buff);
+ return send_command(state, cmd_size + MXL_HYDRA_CMD_HEADER_SIZE,
+ &cmd_buff[0]);
+}
+
+static int send_master_cmd(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *cmd)
+{
+ /*struct mxl *state = fe->demodulator_priv;*/
+
+ return 0; /*CfgDemodAbortTune(state);*/
+}
+
+static int set_parameters(struct dvb_frontend *fe)
+{
+ struct mxl *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ struct MXL_HYDRA_DEMOD_PARAM_T demod_chan_cfg;
+ u8 cmd_size = sizeof(demod_chan_cfg);
+ u8 cmd_buff[MXL_HYDRA_OEM_MAX_CMD_BUFF_LEN];
+ u32 srange = 10;
+ int stat;
+
+ if (p->frequency < 950000 || p->frequency > 2150000)
+ return -EINVAL;
+ if (p->symbol_rate < 1000000 || p->symbol_rate > 45000000)
+ return -EINVAL;
+
+ /* CfgDemodAbortTune(state); */
+
+ switch (p->delivery_system) {
+ case SYS_DSS:
+ demod_chan_cfg.standard = MXL_HYDRA_DSS;
+ demod_chan_cfg.roll_off = MXL_HYDRA_ROLLOFF_AUTO;
+ break;
+ case SYS_DVBS:
+ srange = p->symbol_rate / 1000000;
+ if (srange > 10)
+ srange = 10;
+ demod_chan_cfg.standard = MXL_HYDRA_DVBS;
+ demod_chan_cfg.roll_off = MXL_HYDRA_ROLLOFF_0_35;
+ demod_chan_cfg.modulation_scheme = MXL_HYDRA_MOD_QPSK;
+ demod_chan_cfg.pilots = MXL_HYDRA_PILOTS_OFF;
+ break;
+ case SYS_DVBS2:
+ demod_chan_cfg.standard = MXL_HYDRA_DVBS2;
+ demod_chan_cfg.roll_off = MXL_HYDRA_ROLLOFF_AUTO;
+ demod_chan_cfg.modulation_scheme = MXL_HYDRA_MOD_AUTO;
+ demod_chan_cfg.pilots = MXL_HYDRA_PILOTS_AUTO;
+ /* cfg_scrambler(state); */
+ break;
+ default:
+ return -EINVAL;
+ }
+ demod_chan_cfg.tuner_index = state->tuner;
+ demod_chan_cfg.demod_index = state->demod;
+ demod_chan_cfg.frequency_in_hz = p->frequency * 1000;
+ demod_chan_cfg.symbol_rate_in_hz = p->symbol_rate;
+ demod_chan_cfg.max_carrier_offset_in_mhz = srange;
+ demod_chan_cfg.spectrum_inversion = MXL_HYDRA_SPECTRUM_AUTO;
+ demod_chan_cfg.fec_code_rate = MXL_HYDRA_FEC_AUTO;
+
+ mutex_lock(&state->base->tune_lock);
+ if (time_after(jiffies + msecs_to_jiffies(200),
+ state->base->next_tune))
+ while (time_before(jiffies, state->base->next_tune))
+ usleep_range(10000, 11000);
+ state->base->next_tune = jiffies + msecs_to_jiffies(100);
+ state->tuner_in_use = state->tuner;
+ BUILD_HYDRA_CMD(MXL_HYDRA_DEMOD_SET_PARAM_CMD, MXL_CMD_WRITE,
+ cmd_size, &demod_chan_cfg, cmd_buff);
+ stat = send_command(state, cmd_size + MXL_HYDRA_CMD_HEADER_SIZE,
+ &cmd_buff[0]);
+ mutex_unlock(&state->base->tune_lock);
+ return stat;
+}
+
+static int enable_tuner(struct mxl *state, u32 tuner, u32 enable);
+
+static int sleep(struct dvb_frontend *fe)
+{
+ struct mxl *state = fe->demodulator_priv;
+ struct mxl *p;
+
+ cfg_demod_abort_tune(state);
+ if (state->tuner_in_use != 0xffffffff) {
+ mutex_lock(&state->base->tune_lock);
+ state->tuner_in_use = 0xffffffff;
+ list_for_each_entry(p, &state->base->mxls, mxl) {
+ if (p->tuner_in_use == state->tuner)
+ break;
+ }
+ if (&p->mxl == &state->base->mxls)
+ enable_tuner(state, state->tuner, 0);
+ mutex_unlock(&state->base->tune_lock);
+ }
+ return 0;
+}
+
+static int read_snr(struct dvb_frontend *fe)
+{
+ struct mxl *state = fe->demodulator_priv;
+ int stat;
+ u32 reg_data = 0;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ mutex_lock(&state->base->status_lock);
+ HYDRA_DEMOD_STATUS_LOCK(state, state->demod);
+ stat = read_register(state, (HYDRA_DMD_SNR_ADDR_OFFSET +
+ HYDRA_DMD_STATUS_OFFSET(state->demod)),
+ &reg_data);
+ HYDRA_DEMOD_STATUS_UNLOCK(state, state->demod);
+ mutex_unlock(&state->base->status_lock);
+
+ p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ p->cnr.stat[0].svalue = (s16)reg_data * 10;
+
+ return stat;
+}
+
+static int read_ber(struct dvb_frontend *fe)
+{
+ struct mxl *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 reg[8];
+
+ mutex_lock(&state->base->status_lock);
+ HYDRA_DEMOD_STATUS_LOCK(state, state->demod);
+ read_register_block(state,
+ (HYDRA_DMD_DVBS_1ST_CORR_RS_ERRORS_ADDR_OFFSET +
+ HYDRA_DMD_STATUS_OFFSET(state->demod)),
+ (4 * sizeof(u32)),
+ (u8 *) &reg[0]);
+ HYDRA_DEMOD_STATUS_UNLOCK(state, state->demod);
+
+ switch (p->delivery_system) {
+ case SYS_DSS:
+ case SYS_DVBS:
+ p->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->pre_bit_error.stat[0].uvalue = reg[2];
+ p->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ p->pre_bit_count.stat[0].uvalue = reg[3];
+ break;
+ default:
+ break;
+ }
+
+ read_register_block(state,
+ (HYDRA_DMD_DVBS2_CRC_ERRORS_ADDR_OFFSET +
+ HYDRA_DMD_STATUS_OFFSET(state->demod)),
+ (7 * sizeof(u32)),
+ (u8 *) &reg[0]);
+
+ switch (p->delivery_system) {
+ case SYS_DSS:
+ case SYS_DVBS:
+ p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->post_bit_error.stat[0].uvalue = reg[5];
+ p->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ p->post_bit_count.stat[0].uvalue = reg[6];
+ break;
+ case SYS_DVBS2:
+ p->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->post_bit_error.stat[0].uvalue = reg[1];
+ p->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ p->post_bit_count.stat[0].uvalue = reg[2];
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&state->base->status_lock);
+
+ return 0;
+}
+
+static int read_signal_strength(struct dvb_frontend *fe)
+{
+ struct mxl *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ int stat;
+ u32 reg_data = 0;
+
+ mutex_lock(&state->base->status_lock);
+ HYDRA_DEMOD_STATUS_LOCK(state, state->demod);
+ stat = read_register(state, (HYDRA_DMD_STATUS_INPUT_POWER_ADDR +
+ HYDRA_DMD_STATUS_OFFSET(state->demod)),
+ &reg_data);
+ HYDRA_DEMOD_STATUS_UNLOCK(state, state->demod);
+ mutex_unlock(&state->base->status_lock);
+
+ p->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ p->strength.stat[0].svalue = (s16) reg_data * 10; /* fix scale */
+
+ return stat;
+}
+
+static int read_status(struct dvb_frontend *fe, enum fe_status *status)
+{
+ struct mxl *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 reg_data = 0;
+
+ mutex_lock(&state->base->status_lock);
+ HYDRA_DEMOD_STATUS_LOCK(state, state->demod);
+ read_register(state, (HYDRA_DMD_LOCK_STATUS_ADDR_OFFSET +
+ HYDRA_DMD_STATUS_OFFSET(state->demod)),
+ &reg_data);
+ HYDRA_DEMOD_STATUS_UNLOCK(state, state->demod);
+ mutex_unlock(&state->base->status_lock);
+
+ *status = (reg_data == 1) ? 0x1f : 0;
+
+ /* signal statistics */
+
+ /* signal strength is always available */
+ read_signal_strength(fe);
+
+ if (*status & FE_HAS_CARRIER)
+ read_snr(fe);
+ else
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ if (*status & FE_HAS_SYNC)
+ read_ber(fe);
+ else {
+ p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ return 0;
+}
+
+static int tune(struct dvb_frontend *fe, bool re_tune,
+ unsigned int mode_flags,
+ unsigned int *delay, enum fe_status *status)
+{
+ struct mxl *state = fe->demodulator_priv;
+ int r = 0;
+
+ *delay = HZ / 2;
+ if (re_tune) {
+ r = set_parameters(fe);
+ if (r)
+ return r;
+ state->tune_time = jiffies;
+ return 0;
+ }
+ if (*status & FE_HAS_LOCK)
+ return 0;
+
+ r = read_status(fe, status);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static enum fe_code_rate conv_fec(enum MXL_HYDRA_FEC_E fec)
+{
+ enum fe_code_rate fec2fec[11] = {
+ FEC_NONE, FEC_1_2, FEC_3_5, FEC_2_3,
+ FEC_3_4, FEC_4_5, FEC_5_6, FEC_6_7,
+ FEC_7_8, FEC_8_9, FEC_9_10
+ };
+
+ if (fec > MXL_HYDRA_FEC_9_10)
+ return FEC_NONE;
+ return fec2fec[fec];
+}
+
+static int get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
+{
+ struct mxl *state = fe->demodulator_priv;
+ u32 reg_data[MXL_DEMOD_CHAN_PARAMS_BUFF_SIZE];
+ u32 freq;
+
+ mutex_lock(&state->base->status_lock);
+ HYDRA_DEMOD_STATUS_LOCK(state, state->demod);
+ read_register_block(state,
+ (HYDRA_DMD_STANDARD_ADDR_OFFSET +
+ HYDRA_DMD_STATUS_OFFSET(state->demod)),
+ (MXL_DEMOD_CHAN_PARAMS_BUFF_SIZE * 4), /* 25 * 4 bytes */
+ (u8 *) &reg_data[0]);
+ /* read demod channel parameters */
+ read_register_block(state,
+ (HYDRA_DMD_STATUS_CENTER_FREQ_IN_KHZ_ADDR +
+ HYDRA_DMD_STATUS_OFFSET(state->demod)),
+ (4), /* 4 bytes */
+ (u8 *) &freq);
+ HYDRA_DEMOD_STATUS_UNLOCK(state, state->demod);
+ mutex_unlock(&state->base->status_lock);
+
+ dev_dbg(state->i2cdev, "freq=%u delsys=%u srate=%u\n",
+ freq * 1000, reg_data[DMD_STANDARD_ADDR],
+ reg_data[DMD_SYMBOL_RATE_ADDR]);
+ p->symbol_rate = reg_data[DMD_SYMBOL_RATE_ADDR];
+ p->frequency = freq;
+ /*
+ * p->delivery_system =
+ * (MXL_HYDRA_BCAST_STD_E) regData[DMD_STANDARD_ADDR];
+ * p->inversion =
+ * (MXL_HYDRA_SPECTRUM_E) regData[DMD_SPECTRUM_INVERSION_ADDR];
+ * freqSearchRangeKHz =
+ * (regData[DMD_FREQ_SEARCH_RANGE_IN_KHZ_ADDR]);
+ */
+
+ p->fec_inner = conv_fec(reg_data[DMD_FEC_CODE_RATE_ADDR]);
+ switch (p->delivery_system) {
+ case SYS_DSS:
+ break;
+ case SYS_DVBS2:
+ switch ((enum MXL_HYDRA_PILOTS_E)
+ reg_data[DMD_DVBS2_PILOT_ON_OFF_ADDR]) {
+ case MXL_HYDRA_PILOTS_OFF:
+ p->pilot = PILOT_OFF;
+ break;
+ case MXL_HYDRA_PILOTS_ON:
+ p->pilot = PILOT_ON;
+ break;
+ default:
+ break;
+ }
+ case SYS_DVBS:
+ switch ((enum MXL_HYDRA_MODULATION_E)
+ reg_data[DMD_MODULATION_SCHEME_ADDR]) {
+ case MXL_HYDRA_MOD_QPSK:
+ p->modulation = QPSK;
+ break;
+ case MXL_HYDRA_MOD_8PSK:
+ p->modulation = PSK_8;
+ break;
+ default:
+ break;
+ }
+ switch ((enum MXL_HYDRA_ROLLOFF_E)
+ reg_data[DMD_SPECTRUM_ROLL_OFF_ADDR]) {
+ case MXL_HYDRA_ROLLOFF_0_20:
+ p->rolloff = ROLLOFF_20;
+ break;
+ case MXL_HYDRA_ROLLOFF_0_35:
+ p->rolloff = ROLLOFF_35;
+ break;
+ case MXL_HYDRA_ROLLOFF_0_25:
+ p->rolloff = ROLLOFF_25;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int set_input(struct dvb_frontend *fe, int input)
+{
+ struct mxl *state = fe->demodulator_priv;
+
+ state->tuner = input;
+ return 0;
+}
+
+static struct dvb_frontend_ops mxl_ops = {
+ .delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
+ .info = {
+ .name = "MaxLinear MxL5xx DVB-S/S2 tuner-demodulator",
+ .frequency_min = 300000,
+ .frequency_max = 2350000,
+ .frequency_stepsize = 0,
+ .frequency_tolerance = 0,
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 45000000,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_2G_MODULATION
+ },
+ .init = init,
+ .release = release,
+ .get_frontend_algo = get_algo,
+ .tune = tune,
+ .read_status = read_status,
+ .sleep = sleep,
+ .get_frontend = get_frontend,
+ .diseqc_send_master_cmd = send_master_cmd,
+};
+
+static struct mxl_base *match_base(struct i2c_adapter *i2c, u8 adr)
+{
+ struct mxl_base *p;
+
+ list_for_each_entry(p, &mxllist, mxllist)
+ if (p->i2c == i2c && p->adr == adr)
+ return p;
+ return NULL;
+}
+
+static void cfg_dev_xtal(struct mxl *state, u32 freq, u32 cap, u32 enable)
+{
+ if (state->base->can_clkout || !enable)
+ update_by_mnemonic(state, 0x90200054, 23, 1, enable);
+
+ if (freq == 24000000)
+ write_register(state, HYDRA_CRYSTAL_SETTING, 0);
+ else
+ write_register(state, HYDRA_CRYSTAL_SETTING, 1);
+
+ write_register(state, HYDRA_CRYSTAL_CAP, cap);
+}
+
+static u32 get_big_endian(u8 num_of_bits, const u8 buf[])
+{
+ u32 ret_value = 0;
+
+ switch (num_of_bits) {
+ case 24:
+ ret_value = (((u32) buf[0]) << 16) |
+ (((u32) buf[1]) << 8) | buf[2];
+ break;
+ case 32:
+ ret_value = (((u32) buf[0]) << 24) |
+ (((u32) buf[1]) << 16) |
+ (((u32) buf[2]) << 8) | buf[3];
+ break;
+ default:
+ break;
+ }
+
+ return ret_value;
+}
+
+static int write_fw_segment(struct mxl *state,
+ u32 mem_addr, u32 total_size, u8 *data_ptr)
+{
+ int status;
+ u32 data_count = 0;
+ u32 size = 0;
+ u32 orig_size = 0;
+ u8 *w_buf_ptr = NULL;
+ u32 block_size = ((MXL_HYDRA_OEM_MAX_BLOCK_WRITE_LENGTH -
+ (MXL_HYDRA_I2C_HDR_SIZE +
+ MXL_HYDRA_REG_SIZE_IN_BYTES)) / 4) * 4;
+ u8 w_msg_buffer[MXL_HYDRA_OEM_MAX_BLOCK_WRITE_LENGTH -
+ (MXL_HYDRA_I2C_HDR_SIZE + MXL_HYDRA_REG_SIZE_IN_BYTES)];
+
+ do {
+ size = orig_size = (((u32)(data_count + block_size)) > total_size) ?
+ (total_size - data_count) : block_size;
+
+ if (orig_size & 3)
+ size = (orig_size + 4) & ~3;
+ w_buf_ptr = &w_msg_buffer[0];
+ memset((void *) w_buf_ptr, 0, size);
+ memcpy((void *) w_buf_ptr, (void *) data_ptr, orig_size);
+ convert_endian(1, size, w_buf_ptr);
+ status = write_firmware_block(state, mem_addr, size, w_buf_ptr);
+ if (status)
+ return status;
+ data_count += size;
+ mem_addr += size;
+ data_ptr += size;
+ } while (data_count < total_size);
+
+ return status;
+}
+
+static int do_firmware_download(struct mxl *state, u8 *mbin_buffer_ptr,
+ u32 mbin_buffer_size)
+
+{
+ int status;
+ u32 index = 0;
+ u32 seg_length = 0;
+ u32 seg_address = 0;
+ struct MBIN_FILE_T *mbin_ptr = (struct MBIN_FILE_T *)mbin_buffer_ptr;
+ struct MBIN_SEGMENT_T *segment_ptr;
+ enum MXL_BOOL_E xcpu_fw_flag = MXL_FALSE;
+
+ if (mbin_ptr->header.id != MBIN_FILE_HEADER_ID) {
+ dev_err(state->i2cdev, "%s: Invalid file header ID (%c)\n",
+ __func__, mbin_ptr->header.id);
+ return -EINVAL;
+ }
+ status = write_register(state, FW_DL_SIGN_ADDR, 0);
+ if (status)
+ return status;
+ segment_ptr = (struct MBIN_SEGMENT_T *) (&mbin_ptr->data[0]);
+ for (index = 0; index < mbin_ptr->header.num_segments; index++) {
+ if (segment_ptr->header.id != MBIN_SEGMENT_HEADER_ID) {
+ dev_err(state->i2cdev, "%s: Invalid segment header ID (%c)\n",
+ __func__, segment_ptr->header.id);
+ return -EINVAL;
+ }
+ seg_length = get_big_endian(24,
+ &(segment_ptr->header.len24[0]));
+ seg_address = get_big_endian(32,
+ &(segment_ptr->header.address[0]));
+
+ if (state->base->type == MXL_HYDRA_DEVICE_568) {
+ if ((((seg_address & 0x90760000) == 0x90760000) ||
+ ((seg_address & 0x90740000) == 0x90740000)) &&
+ (xcpu_fw_flag == MXL_FALSE)) {
+ update_by_mnemonic(state, 0x8003003C, 0, 1, 1);
+ msleep(200);
+ write_register(state, 0x90720000, 0);
+ usleep_range(10000, 11000);
+ xcpu_fw_flag = MXL_TRUE;
+ }
+ status = write_fw_segment(state, seg_address,
+ seg_length,
+ (u8 *) segment_ptr->data);
+ } else {
+ if (((seg_address & 0x90760000) != 0x90760000) &&
+ ((seg_address & 0x90740000) != 0x90740000))
+ status = write_fw_segment(state, seg_address,
+ seg_length, (u8 *) segment_ptr->data);
+ }
+ if (status)
+ return status;
+ segment_ptr = (struct MBIN_SEGMENT_T *)
+ &(segment_ptr->data[((seg_length + 3) / 4) * 4]);
+ }
+ return status;
+}
+
+static int check_fw(struct mxl *state, u8 *mbin, u32 mbin_len)
+{
+ struct MBIN_FILE_HEADER_T *fh = (struct MBIN_FILE_HEADER_T *) mbin;
+ u32 flen = (fh->image_size24[0] << 16) |
+ (fh->image_size24[1] << 8) | fh->image_size24[2];
+ u8 *fw, cs = 0;
+ u32 i;
+
+ if (fh->id != 'M' || fh->fmt_version != '1' || flen > 0x3FFF0) {
+ dev_info(state->i2cdev, "Invalid FW Header\n");
+ return -1;
+ }
+ fw = mbin + sizeof(struct MBIN_FILE_HEADER_T);
+ for (i = 0; i < flen; i += 1)
+ cs += fw[i];
+ if (cs != fh->image_checksum) {
+ dev_info(state->i2cdev, "Invalid FW Checksum\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int firmware_download(struct mxl *state, u8 *mbin, u32 mbin_len)
+{
+ int status;
+ u32 reg_data = 0;
+ struct MXL_HYDRA_SKU_COMMAND_T dev_sku_cfg;
+ u8 cmd_size = sizeof(struct MXL_HYDRA_SKU_COMMAND_T);
+ u8 cmd_buff[sizeof(struct MXL_HYDRA_SKU_COMMAND_T) + 6];
+
+ if (check_fw(state, mbin, mbin_len))
+ return -1;
+
+ /* put CPU into reset */
+ status = update_by_mnemonic(state, 0x8003003C, 0, 1, 0);
+ if (status)
+ return status;
+ usleep_range(1000, 2000);
+
+ /* Reset TX FIFO's, BBAND, XBAR */
+ status = write_register(state, HYDRA_RESET_TRANSPORT_FIFO_REG,
+ HYDRA_RESET_TRANSPORT_FIFO_DATA);
+ if (status)
+ return status;
+ status = write_register(state, HYDRA_RESET_BBAND_REG,
+ HYDRA_RESET_BBAND_DATA);
+ if (status)
+ return status;
+ status = write_register(state, HYDRA_RESET_XBAR_REG,
+ HYDRA_RESET_XBAR_DATA);
+ if (status)
+ return status;
+
+ /* Disable clock to Baseband, Wideband, SerDes,
+ * Alias ext & Transport modules
+ */
+ status = write_register(state, HYDRA_MODULES_CLK_2_REG,
+ HYDRA_DISABLE_CLK_2);
+ if (status)
+ return status;
+ /* Clear Software & Host interrupt status - (Clear on read) */
+ status = read_register(state, HYDRA_PRCM_ROOT_CLK_REG, &reg_data);
+ if (status)
+ return status;
+ status = do_firmware_download(state, mbin, mbin_len);
+ if (status)
+ return status;
+
+ if (state->base->type == MXL_HYDRA_DEVICE_568) {
+ usleep_range(10000, 11000);
+
+ /* bring XCPU out of reset */
+ status = write_register(state, 0x90720000, 1);
+ if (status)
+ return status;
+ msleep(500);
+
+ /* Enable XCPU UART message processing in MCPU */
+ status = write_register(state, 0x9076B510, 1);
+ if (status)
+ return status;
+ } else {
+ /* Bring CPU out of reset */
+ status = update_by_mnemonic(state, 0x8003003C, 0, 1, 1);
+ if (status)
+ return status;
+ /* Wait until FW boots */
+ msleep(150);
+ }
+
+ /* Initialize XPT XBAR */
+ status = write_register(state, XPT_DMD0_BASEADDR, 0x76543210);
+ if (status)
+ return status;
+
+ if (!firmware_is_alive(state))
+ return -1;
+
+ dev_info(state->i2cdev, "Hydra FW alive. Hail!\n");
+
+ /* sometimes register values are wrong shortly
+ * after first heart beats
+ */
+ msleep(50);
+
+ dev_sku_cfg.sku_type = state->base->sku_type;
+ BUILD_HYDRA_CMD(MXL_HYDRA_DEV_CFG_SKU_CMD, MXL_CMD_WRITE,
+ cmd_size, &dev_sku_cfg, cmd_buff);
+ status = send_command(state, cmd_size + MXL_HYDRA_CMD_HEADER_SIZE,
+ &cmd_buff[0]);
+
+ return status;
+}
+
+static int cfg_ts_pad_mux(struct mxl *state, enum MXL_BOOL_E enable_serial_ts)
+{
+ int status = 0;
+ u32 pad_mux_value = 0;
+
+ if (enable_serial_ts == MXL_TRUE) {
+ pad_mux_value = 0;
+ if ((state->base->type == MXL_HYDRA_DEVICE_541) ||
+ (state->base->type == MXL_HYDRA_DEVICE_541S))
+ pad_mux_value = 2;
+ } else {
+ if ((state->base->type == MXL_HYDRA_DEVICE_581) ||
+ (state->base->type == MXL_HYDRA_DEVICE_581S))
+ pad_mux_value = 2;
+ else
+ pad_mux_value = 3;
+ }
+
+ switch (state->base->type) {
+ case MXL_HYDRA_DEVICE_561:
+ case MXL_HYDRA_DEVICE_581:
+ case MXL_HYDRA_DEVICE_541:
+ case MXL_HYDRA_DEVICE_541S:
+ case MXL_HYDRA_DEVICE_561S:
+ case MXL_HYDRA_DEVICE_581S:
+ status |= update_by_mnemonic(state, 0x90000170, 24, 3,
+ pad_mux_value);
+ status |= update_by_mnemonic(state, 0x90000170, 28, 3,
+ pad_mux_value);
+ status |= update_by_mnemonic(state, 0x90000174, 0, 3,
+ pad_mux_value);
+ status |= update_by_mnemonic(state, 0x90000174, 4, 3,
+ pad_mux_value);
+ status |= update_by_mnemonic(state, 0x90000174, 8, 3,
+ pad_mux_value);
+ status |= update_by_mnemonic(state, 0x90000174, 12, 3,
+ pad_mux_value);
+ status |= update_by_mnemonic(state, 0x90000174, 16, 3,
+ pad_mux_value);
+ status |= update_by_mnemonic(state, 0x90000174, 20, 3,
+ pad_mux_value);
+ status |= update_by_mnemonic(state, 0x90000174, 24, 3,
+ pad_mux_value);
+ status |= update_by_mnemonic(state, 0x90000174, 28, 3,
+ pad_mux_value);
+ status |= update_by_mnemonic(state, 0x90000178, 0, 3,
+ pad_mux_value);
+ status |= update_by_mnemonic(state, 0x90000178, 4, 3,
+ pad_mux_value);
+ status |= update_by_mnemonic(state, 0x90000178, 8, 3,
+ pad_mux_value);
+ break;
+
+ case MXL_HYDRA_DEVICE_544:
+ case MXL_HYDRA_DEVICE_542:
+ status |= update_by_mnemonic(state, 0x9000016C, 4, 3, 1);
+ status |= update_by_mnemonic(state, 0x9000016C, 8, 3, 0);
+ status |= update_by_mnemonic(state, 0x9000016C, 12, 3, 0);
+ status |= update_by_mnemonic(state, 0x9000016C, 16, 3, 0);
+ status |= update_by_mnemonic(state, 0x90000170, 0, 3, 0);
+ status |= update_by_mnemonic(state, 0x90000178, 12, 3, 1);
+ status |= update_by_mnemonic(state, 0x90000178, 16, 3, 1);
+ status |= update_by_mnemonic(state, 0x90000178, 20, 3, 1);
+ status |= update_by_mnemonic(state, 0x90000178, 24, 3, 1);
+ status |= update_by_mnemonic(state, 0x9000017C, 0, 3, 1);
+ status |= update_by_mnemonic(state, 0x9000017C, 4, 3, 1);
+ if (enable_serial_ts == MXL_ENABLE) {
+ status |= update_by_mnemonic(state,
+ 0x90000170, 4, 3, 0);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 8, 3, 0);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 12, 3, 0);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 16, 3, 0);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 20, 3, 1);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 24, 3, 1);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 28, 3, 2);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 0, 3, 2);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 4, 3, 2);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 8, 3, 2);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 12, 3, 2);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 16, 3, 2);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 20, 3, 2);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 24, 3, 2);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 28, 3, 2);
+ status |= update_by_mnemonic(state,
+ 0x90000178, 0, 3, 2);
+ status |= update_by_mnemonic(state,
+ 0x90000178, 4, 3, 2);
+ status |= update_by_mnemonic(state,
+ 0x90000178, 8, 3, 2);
+ } else {
+ status |= update_by_mnemonic(state,
+ 0x90000170, 4, 3, 3);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 8, 3, 3);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 12, 3, 3);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 16, 3, 3);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 20, 3, 3);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 24, 3, 3);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 28, 3, 3);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 0, 3, 3);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 4, 3, 3);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 8, 3, 3);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 12, 3, 3);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 16, 3, 3);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 20, 3, 1);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 24, 3, 1);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 28, 3, 1);
+ status |= update_by_mnemonic(state,
+ 0x90000178, 0, 3, 1);
+ status |= update_by_mnemonic(state,
+ 0x90000178, 4, 3, 1);
+ status |= update_by_mnemonic(state,
+ 0x90000178, 8, 3, 1);
+ }
+ break;
+
+ case MXL_HYDRA_DEVICE_568:
+ if (enable_serial_ts == MXL_FALSE) {
+ status |= update_by_mnemonic(state,
+ 0x9000016C, 8, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x9000016C, 12, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x9000016C, 16, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x9000016C, 20, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x9000016C, 24, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x9000016C, 28, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 0, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 4, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 8, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 12, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 16, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 20, 3, 5);
+
+ status |= update_by_mnemonic(state,
+ 0x90000170, 24, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 0, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 4, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 8, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 12, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 16, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 20, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 24, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 28, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000178, 0, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000178, 4, 3, pad_mux_value);
+
+ status |= update_by_mnemonic(state,
+ 0x90000178, 8, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x90000178, 12, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x90000178, 16, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x90000178, 20, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x90000178, 24, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x90000178, 28, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x9000017C, 0, 3, 5);
+ status |= update_by_mnemonic(state,
+ 0x9000017C, 4, 3, 5);
+ } else {
+ status |= update_by_mnemonic(state,
+ 0x90000170, 4, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 8, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 12, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 16, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 20, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 24, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 28, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 0, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 4, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 8, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 12, 3, pad_mux_value);
+ }
+ break;
+
+
+ case MXL_HYDRA_DEVICE_584:
+ default:
+ status |= update_by_mnemonic(state,
+ 0x90000170, 4, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 8, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 12, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 16, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 20, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 24, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000170, 28, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 0, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 4, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 8, 3, pad_mux_value);
+ status |= update_by_mnemonic(state,
+ 0x90000174, 12, 3, pad_mux_value);
+ break;
+ }
+ return status;
+}
+
+static int set_drive_strength(struct mxl *state,
+ enum MXL_HYDRA_TS_DRIVE_STRENGTH_E ts_drive_strength)
+{
+ int stat = 0;
+ u32 val;
+
+ read_register(state, 0x90000194, &val);
+ dev_info(state->i2cdev, "DIGIO = %08x\n", val);
+ dev_info(state->i2cdev, "set drive_strength = %u\n", ts_drive_strength);
+
+
+ stat |= update_by_mnemonic(state, 0x90000194, 0, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x90000194, 20, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x90000194, 24, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x90000198, 12, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x90000198, 16, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x90000198, 20, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x90000198, 24, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x9000019C, 0, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x9000019C, 4, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x9000019C, 8, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x9000019C, 24, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x9000019C, 28, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x900001A0, 0, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x900001A0, 4, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x900001A0, 20, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x900001A0, 24, 3, ts_drive_strength);
+ stat |= update_by_mnemonic(state, 0x900001A0, 28, 3, ts_drive_strength);
+
+ return stat;
+}
+
+static int enable_tuner(struct mxl *state, u32 tuner, u32 enable)
+{
+ int stat = 0;
+ struct MXL_HYDRA_TUNER_CMD ctrl_tuner_cmd;
+ u8 cmd_size = sizeof(ctrl_tuner_cmd);
+ u8 cmd_buff[MXL_HYDRA_OEM_MAX_CMD_BUFF_LEN];
+ u32 val, count = 10;
+
+ ctrl_tuner_cmd.tuner_id = tuner;
+ ctrl_tuner_cmd.enable = enable;
+ BUILD_HYDRA_CMD(MXL_HYDRA_TUNER_ACTIVATE_CMD, MXL_CMD_WRITE,
+ cmd_size, &ctrl_tuner_cmd, cmd_buff);
+ stat = send_command(state, cmd_size + MXL_HYDRA_CMD_HEADER_SIZE,
+ &cmd_buff[0]);
+ if (stat)
+ return stat;
+ read_register(state, HYDRA_TUNER_ENABLE_COMPLETE, &val);
+ while (--count && ((val >> tuner) & 1) != enable) {
+ msleep(20);
+ read_register(state, HYDRA_TUNER_ENABLE_COMPLETE, &val);
+ }
+ if (!count)
+ return -1;
+ read_register(state, HYDRA_TUNER_ENABLE_COMPLETE, &val);
+ dev_dbg(state->i2cdev, "tuner %u ready = %u\n",
+ tuner, (val >> tuner) & 1);
+
+ return 0;
+}
+
+
+static int config_ts(struct mxl *state, enum MXL_HYDRA_DEMOD_ID_E demod_id,
+ struct MXL_HYDRA_MPEGOUT_PARAM_T *mpeg_out_param_ptr)
+{
+ int status = 0;
+ u32 nco_count_min = 0;
+ u32 clk_type = 0;
+
+ struct MXL_REG_FIELD_T xpt_sync_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ {0x90700010, 8, 1}, {0x90700010, 9, 1},
+ {0x90700010, 10, 1}, {0x90700010, 11, 1},
+ {0x90700010, 12, 1}, {0x90700010, 13, 1},
+ {0x90700010, 14, 1}, {0x90700010, 15, 1} };
+ struct MXL_REG_FIELD_T xpt_clock_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ {0x90700010, 16, 1}, {0x90700010, 17, 1},
+ {0x90700010, 18, 1}, {0x90700010, 19, 1},
+ {0x90700010, 20, 1}, {0x90700010, 21, 1},
+ {0x90700010, 22, 1}, {0x90700010, 23, 1} };
+ struct MXL_REG_FIELD_T xpt_valid_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ {0x90700014, 0, 1}, {0x90700014, 1, 1},
+ {0x90700014, 2, 1}, {0x90700014, 3, 1},
+ {0x90700014, 4, 1}, {0x90700014, 5, 1},
+ {0x90700014, 6, 1}, {0x90700014, 7, 1} };
+ struct MXL_REG_FIELD_T xpt_ts_clock_phase[MXL_HYDRA_DEMOD_MAX] = {
+ {0x90700018, 0, 3}, {0x90700018, 4, 3},
+ {0x90700018, 8, 3}, {0x90700018, 12, 3},
+ {0x90700018, 16, 3}, {0x90700018, 20, 3},
+ {0x90700018, 24, 3}, {0x90700018, 28, 3} };
+ struct MXL_REG_FIELD_T xpt_lsb_first[MXL_HYDRA_DEMOD_MAX] = {
+ {0x9070000C, 16, 1}, {0x9070000C, 17, 1},
+ {0x9070000C, 18, 1}, {0x9070000C, 19, 1},
+ {0x9070000C, 20, 1}, {0x9070000C, 21, 1},
+ {0x9070000C, 22, 1}, {0x9070000C, 23, 1} };
+ struct MXL_REG_FIELD_T xpt_sync_byte[MXL_HYDRA_DEMOD_MAX] = {
+ {0x90700010, 0, 1}, {0x90700010, 1, 1},
+ {0x90700010, 2, 1}, {0x90700010, 3, 1},
+ {0x90700010, 4, 1}, {0x90700010, 5, 1},
+ {0x90700010, 6, 1}, {0x90700010, 7, 1} };
+ struct MXL_REG_FIELD_T xpt_enable_output[MXL_HYDRA_DEMOD_MAX] = {
+ {0x9070000C, 0, 1}, {0x9070000C, 1, 1},
+ {0x9070000C, 2, 1}, {0x9070000C, 3, 1},
+ {0x9070000C, 4, 1}, {0x9070000C, 5, 1},
+ {0x9070000C, 6, 1}, {0x9070000C, 7, 1} };
+ struct MXL_REG_FIELD_T xpt_err_replace_sync[MXL_HYDRA_DEMOD_MAX] = {
+ {0x9070000C, 24, 1}, {0x9070000C, 25, 1},
+ {0x9070000C, 26, 1}, {0x9070000C, 27, 1},
+ {0x9070000C, 28, 1}, {0x9070000C, 29, 1},
+ {0x9070000C, 30, 1}, {0x9070000C, 31, 1} };
+ struct MXL_REG_FIELD_T xpt_err_replace_valid[MXL_HYDRA_DEMOD_MAX] = {
+ {0x90700014, 8, 1}, {0x90700014, 9, 1},
+ {0x90700014, 10, 1}, {0x90700014, 11, 1},
+ {0x90700014, 12, 1}, {0x90700014, 13, 1},
+ {0x90700014, 14, 1}, {0x90700014, 15, 1} };
+ struct MXL_REG_FIELD_T xpt_continuous_clock[MXL_HYDRA_DEMOD_MAX] = {
+ {0x907001D4, 0, 1}, {0x907001D4, 1, 1},
+ {0x907001D4, 2, 1}, {0x907001D4, 3, 1},
+ {0x907001D4, 4, 1}, {0x907001D4, 5, 1},
+ {0x907001D4, 6, 1}, {0x907001D4, 7, 1} };
+ struct MXL_REG_FIELD_T xpt_nco_clock_rate[MXL_HYDRA_DEMOD_MAX] = {
+ {0x90700044, 16, 80}, {0x90700044, 16, 81},
+ {0x90700044, 16, 82}, {0x90700044, 16, 83},
+ {0x90700044, 16, 84}, {0x90700044, 16, 85},
+ {0x90700044, 16, 86}, {0x90700044, 16, 87} };
+
+ demod_id = state->base->ts_map[demod_id];
+
+ if (mpeg_out_param_ptr->enable == MXL_ENABLE) {
+ if (mpeg_out_param_ptr->mpeg_mode ==
+ MXL_HYDRA_MPEG_MODE_PARALLEL) {
+ } else {
+ cfg_ts_pad_mux(state, MXL_TRUE);
+ update_by_mnemonic(state,
+ 0x90700010, 27, 1, MXL_FALSE);
+ }
+ }
+
+ nco_count_min =
+ (u32)(MXL_HYDRA_NCO_CLK / mpeg_out_param_ptr->max_mpeg_clk_rate);
+
+ if (state->base->chipversion >= 2) {
+ status |= update_by_mnemonic(state,
+ xpt_nco_clock_rate[demod_id].reg_addr, /* Reg Addr */
+ xpt_nco_clock_rate[demod_id].lsb_pos, /* LSB pos */
+ xpt_nco_clock_rate[demod_id].num_of_bits, /* Num of bits */
+ nco_count_min); /* Data */
+ } else
+ update_by_mnemonic(state, 0x90700044, 16, 8, nco_count_min);
+
+ if (mpeg_out_param_ptr->mpeg_clk_type == MXL_HYDRA_MPEG_CLK_CONTINUOUS)
+ clk_type = 1;
+
+ if (mpeg_out_param_ptr->mpeg_mode < MXL_HYDRA_MPEG_MODE_PARALLEL) {
+ status |= update_by_mnemonic(state,
+ xpt_continuous_clock[demod_id].reg_addr,
+ xpt_continuous_clock[demod_id].lsb_pos,
+ xpt_continuous_clock[demod_id].num_of_bits,
+ clk_type);
+ } else
+ update_by_mnemonic(state, 0x907001D4, 8, 1, clk_type);
+
+ status |= update_by_mnemonic(state,
+ xpt_sync_polarity[demod_id].reg_addr,
+ xpt_sync_polarity[demod_id].lsb_pos,
+ xpt_sync_polarity[demod_id].num_of_bits,
+ mpeg_out_param_ptr->mpeg_sync_pol);
+
+ status |= update_by_mnemonic(state,
+ xpt_valid_polarity[demod_id].reg_addr,
+ xpt_valid_polarity[demod_id].lsb_pos,
+ xpt_valid_polarity[demod_id].num_of_bits,
+ mpeg_out_param_ptr->mpeg_valid_pol);
+
+ status |= update_by_mnemonic(state,
+ xpt_clock_polarity[demod_id].reg_addr,
+ xpt_clock_polarity[demod_id].lsb_pos,
+ xpt_clock_polarity[demod_id].num_of_bits,
+ mpeg_out_param_ptr->mpeg_clk_pol);
+
+ status |= update_by_mnemonic(state,
+ xpt_sync_byte[demod_id].reg_addr,
+ xpt_sync_byte[demod_id].lsb_pos,
+ xpt_sync_byte[demod_id].num_of_bits,
+ mpeg_out_param_ptr->mpeg_sync_pulse_width);
+
+ status |= update_by_mnemonic(state,
+ xpt_ts_clock_phase[demod_id].reg_addr,
+ xpt_ts_clock_phase[demod_id].lsb_pos,
+ xpt_ts_clock_phase[demod_id].num_of_bits,
+ mpeg_out_param_ptr->mpeg_clk_phase);
+
+ status |= update_by_mnemonic(state,
+ xpt_lsb_first[demod_id].reg_addr,
+ xpt_lsb_first[demod_id].lsb_pos,
+ xpt_lsb_first[demod_id].num_of_bits,
+ mpeg_out_param_ptr->lsb_or_msb_first);
+
+ switch (mpeg_out_param_ptr->mpeg_error_indication) {
+ case MXL_HYDRA_MPEG_ERR_REPLACE_SYNC:
+ status |= update_by_mnemonic(state,
+ xpt_err_replace_sync[demod_id].reg_addr,
+ xpt_err_replace_sync[demod_id].lsb_pos,
+ xpt_err_replace_sync[demod_id].num_of_bits,
+ MXL_TRUE);
+ status |= update_by_mnemonic(state,
+ xpt_err_replace_valid[demod_id].reg_addr,
+ xpt_err_replace_valid[demod_id].lsb_pos,
+ xpt_err_replace_valid[demod_id].num_of_bits,
+ MXL_FALSE);
+ break;
+
+ case MXL_HYDRA_MPEG_ERR_REPLACE_VALID:
+ status |= update_by_mnemonic(state,
+ xpt_err_replace_sync[demod_id].reg_addr,
+ xpt_err_replace_sync[demod_id].lsb_pos,
+ xpt_err_replace_sync[demod_id].num_of_bits,
+ MXL_FALSE);
+
+ status |= update_by_mnemonic(state,
+ xpt_err_replace_valid[demod_id].reg_addr,
+ xpt_err_replace_valid[demod_id].lsb_pos,
+ xpt_err_replace_valid[demod_id].num_of_bits,
+ MXL_TRUE);
+ break;
+
+ case MXL_HYDRA_MPEG_ERR_INDICATION_DISABLED:
+ default:
+ status |= update_by_mnemonic(state,
+ xpt_err_replace_sync[demod_id].reg_addr,
+ xpt_err_replace_sync[demod_id].lsb_pos,
+ xpt_err_replace_sync[demod_id].num_of_bits,
+ MXL_FALSE);
+
+ status |= update_by_mnemonic(state,
+ xpt_err_replace_valid[demod_id].reg_addr,
+ xpt_err_replace_valid[demod_id].lsb_pos,
+ xpt_err_replace_valid[demod_id].num_of_bits,
+ MXL_FALSE);
+
+ break;
+
+ }
+
+ if (mpeg_out_param_ptr->mpeg_mode != MXL_HYDRA_MPEG_MODE_PARALLEL) {
+ status |= update_by_mnemonic(state,
+ xpt_enable_output[demod_id].reg_addr,
+ xpt_enable_output[demod_id].lsb_pos,
+ xpt_enable_output[demod_id].num_of_bits,
+ mpeg_out_param_ptr->enable);
+ }
+ return status;
+}
+
+static int config_mux(struct mxl *state)
+{
+ update_by_mnemonic(state, 0x9070000C, 0, 1, 0);
+ update_by_mnemonic(state, 0x9070000C, 1, 1, 0);
+ update_by_mnemonic(state, 0x9070000C, 2, 1, 0);
+ update_by_mnemonic(state, 0x9070000C, 3, 1, 0);
+ update_by_mnemonic(state, 0x9070000C, 4, 1, 0);
+ update_by_mnemonic(state, 0x9070000C, 5, 1, 0);
+ update_by_mnemonic(state, 0x9070000C, 6, 1, 0);
+ update_by_mnemonic(state, 0x9070000C, 7, 1, 0);
+ update_by_mnemonic(state, 0x90700008, 0, 2, 1);
+ update_by_mnemonic(state, 0x90700008, 2, 2, 1);
+ return 0;
+}
+
+static int load_fw(struct mxl *state, struct mxl5xx_cfg *cfg)
+{
+ int stat = 0;
+ u8 *buf;
+
+ if (cfg->fw)
+ return firmware_download(state, cfg->fw, cfg->fw_len);
+
+ if (!cfg->fw_read)
+ return -1;
+
+ buf = vmalloc(0x40000);
+ if (!buf)
+ return -ENOMEM;
+
+ cfg->fw_read(cfg->fw_priv, buf, 0x40000);
+ stat = firmware_download(state, buf, 0x40000);
+ vfree(buf);
+
+ return stat;
+}
+
+static int validate_sku(struct mxl *state)
+{
+ u32 pad_mux_bond = 0, prcm_chip_id = 0, prcm_so_cid = 0;
+ int status;
+ u32 type = state->base->type;
+
+ status = read_by_mnemonic(state, 0x90000190, 0, 3, &pad_mux_bond);
+ status |= read_by_mnemonic(state, 0x80030000, 0, 12, &prcm_chip_id);
+ status |= read_by_mnemonic(state, 0x80030004, 24, 8, &prcm_so_cid);
+ if (status)
+ return -1;
+
+ dev_info(state->i2cdev, "padMuxBond=%08x, prcmChipId=%08x, prcmSoCId=%08x\n",
+ pad_mux_bond, prcm_chip_id, prcm_so_cid);
+
+ if (prcm_chip_id != 0x560) {
+ switch (pad_mux_bond) {
+ case MXL_HYDRA_SKU_ID_581:
+ if (type == MXL_HYDRA_DEVICE_581)
+ return 0;
+ if (type == MXL_HYDRA_DEVICE_581S) {
+ state->base->type = MXL_HYDRA_DEVICE_581;
+ return 0;
+ }
+ break;
+ case MXL_HYDRA_SKU_ID_584:
+ if (type == MXL_HYDRA_DEVICE_584)
+ return 0;
+ break;
+ case MXL_HYDRA_SKU_ID_544:
+ if (type == MXL_HYDRA_DEVICE_544)
+ return 0;
+ if (type == MXL_HYDRA_DEVICE_542)
+ return 0;
+ break;
+ case MXL_HYDRA_SKU_ID_582:
+ if (type == MXL_HYDRA_DEVICE_582)
+ return 0;
+ break;
+ default:
+ return -1;
+ }
+ } else {
+
+ }
+ return -1;
+}
+
+static int get_fwinfo(struct mxl *state)
+{
+ int status;
+ u32 val = 0;
+
+ status = read_by_mnemonic(state, 0x90000190, 0, 3, &val);
+ if (status)
+ return status;
+ dev_info(state->i2cdev, "chipID=%08x\n", val);
+
+ status = read_by_mnemonic(state, 0x80030004, 8, 8, &val);
+ if (status)
+ return status;
+ dev_info(state->i2cdev, "chipVer=%08x\n", val);
+
+ status = read_register(state, HYDRA_FIRMWARE_VERSION, &val);
+ if (status)
+ return status;
+ dev_info(state->i2cdev, "FWVer=%08x\n", val);
+
+ state->base->fwversion = val;
+ return status;
+}
+
+
+static u8 ts_map1_to_1[MXL_HYDRA_DEMOD_MAX] = {
+ MXL_HYDRA_DEMOD_ID_0,
+ MXL_HYDRA_DEMOD_ID_1,
+ MXL_HYDRA_DEMOD_ID_2,
+ MXL_HYDRA_DEMOD_ID_3,
+ MXL_HYDRA_DEMOD_ID_4,
+ MXL_HYDRA_DEMOD_ID_5,
+ MXL_HYDRA_DEMOD_ID_6,
+ MXL_HYDRA_DEMOD_ID_7,
+};
+
+static u8 ts_map54x[MXL_HYDRA_DEMOD_MAX] = {
+ MXL_HYDRA_DEMOD_ID_2,
+ MXL_HYDRA_DEMOD_ID_3,
+ MXL_HYDRA_DEMOD_ID_4,
+ MXL_HYDRA_DEMOD_ID_5,
+ MXL_HYDRA_DEMOD_MAX,
+ MXL_HYDRA_DEMOD_MAX,
+ MXL_HYDRA_DEMOD_MAX,
+ MXL_HYDRA_DEMOD_MAX,
+};
+
+static int probe(struct mxl *state, struct mxl5xx_cfg *cfg)
+{
+ u32 chipver;
+ int fw, status, j;
+ struct MXL_HYDRA_MPEGOUT_PARAM_T mpeg_interface_cfg;
+
+ state->base->ts_map = ts_map1_to_1;
+
+ switch (state->base->type) {
+ case MXL_HYDRA_DEVICE_581:
+ case MXL_HYDRA_DEVICE_581S:
+ state->base->can_clkout = 1;
+ state->base->demod_num = 8;
+ state->base->tuner_num = 1;
+ state->base->sku_type = MXL_HYDRA_SKU_TYPE_581;
+ break;
+ case MXL_HYDRA_DEVICE_582:
+ state->base->can_clkout = 1;
+ state->base->demod_num = 8;
+ state->base->tuner_num = 3;
+ state->base->sku_type = MXL_HYDRA_SKU_TYPE_582;
+ break;
+ case MXL_HYDRA_DEVICE_585:
+ state->base->can_clkout = 0;
+ state->base->demod_num = 8;
+ state->base->tuner_num = 4;
+ state->base->sku_type = MXL_HYDRA_SKU_TYPE_585;
+ break;
+ case MXL_HYDRA_DEVICE_544:
+ state->base->can_clkout = 0;
+ state->base->demod_num = 4;
+ state->base->tuner_num = 4;
+ state->base->sku_type = MXL_HYDRA_SKU_TYPE_544;
+ state->base->ts_map = ts_map54x;
+ break;
+ case MXL_HYDRA_DEVICE_541:
+ case MXL_HYDRA_DEVICE_541S:
+ state->base->can_clkout = 0;
+ state->base->demod_num = 4;
+ state->base->tuner_num = 1;
+ state->base->sku_type = MXL_HYDRA_SKU_TYPE_541;
+ state->base->ts_map = ts_map54x;
+ break;
+ case MXL_HYDRA_DEVICE_561:
+ case MXL_HYDRA_DEVICE_561S:
+ state->base->can_clkout = 0;
+ state->base->demod_num = 6;
+ state->base->tuner_num = 1;
+ state->base->sku_type = MXL_HYDRA_SKU_TYPE_561;
+ break;
+ case MXL_HYDRA_DEVICE_568:
+ state->base->can_clkout = 0;
+ state->base->demod_num = 8;
+ state->base->tuner_num = 1;
+ state->base->chan_bond = 1;
+ state->base->sku_type = MXL_HYDRA_SKU_TYPE_568;
+ break;
+ case MXL_HYDRA_DEVICE_542:
+ state->base->can_clkout = 1;
+ state->base->demod_num = 4;
+ state->base->tuner_num = 3;
+ state->base->sku_type = MXL_HYDRA_SKU_TYPE_542;
+ state->base->ts_map = ts_map54x;
+ break;
+ case MXL_HYDRA_DEVICE_TEST:
+ case MXL_HYDRA_DEVICE_584:
+ default:
+ state->base->can_clkout = 0;
+ state->base->demod_num = 8;
+ state->base->tuner_num = 4;
+ state->base->sku_type = MXL_HYDRA_SKU_TYPE_584;
+ break;
+ }
+
+ status = validate_sku(state);
+ if (status)
+ return status;
+
+ update_by_mnemonic(state, 0x80030014, 9, 1, 1);
+ update_by_mnemonic(state, 0x8003003C, 12, 1, 1);
+ status = read_by_mnemonic(state, 0x80030000, 12, 4, &chipver);
+ if (status)
+ state->base->chipversion = 0;
+ else
+ state->base->chipversion = (chipver == 2) ? 2 : 1;
+ dev_info(state->i2cdev, "Hydra chip version %u\n",
+ state->base->chipversion);
+
+ cfg_dev_xtal(state, cfg->clk, cfg->cap, 0);
+
+ fw = firmware_is_alive(state);
+ if (!fw) {
+ status = load_fw(state, cfg);
+ if (status)
+ return status;
+ }
+ get_fwinfo(state);
+
+ config_mux(state);
+ mpeg_interface_cfg.enable = MXL_ENABLE;
+ mpeg_interface_cfg.lsb_or_msb_first = MXL_HYDRA_MPEG_SERIAL_MSB_1ST;
+ /* supports only (0-104&139)MHz */
+ if (cfg->ts_clk)
+ mpeg_interface_cfg.max_mpeg_clk_rate = cfg->ts_clk;
+ else
+ mpeg_interface_cfg.max_mpeg_clk_rate = 69; /* 139; */
+ mpeg_interface_cfg.mpeg_clk_phase = MXL_HYDRA_MPEG_CLK_PHASE_SHIFT_0_DEG;
+ mpeg_interface_cfg.mpeg_clk_pol = MXL_HYDRA_MPEG_CLK_IN_PHASE;
+ /* MXL_HYDRA_MPEG_CLK_GAPPED; */
+ mpeg_interface_cfg.mpeg_clk_type = MXL_HYDRA_MPEG_CLK_CONTINUOUS;
+ mpeg_interface_cfg.mpeg_error_indication =
+ MXL_HYDRA_MPEG_ERR_INDICATION_DISABLED;
+ mpeg_interface_cfg.mpeg_mode = MXL_HYDRA_MPEG_MODE_SERIAL_3_WIRE;
+ mpeg_interface_cfg.mpeg_sync_pol = MXL_HYDRA_MPEG_ACTIVE_HIGH;
+ mpeg_interface_cfg.mpeg_sync_pulse_width = MXL_HYDRA_MPEG_SYNC_WIDTH_BIT;
+ mpeg_interface_cfg.mpeg_valid_pol = MXL_HYDRA_MPEG_ACTIVE_HIGH;
+
+ for (j = 0; j < state->base->demod_num; j++) {
+ status = config_ts(state, (enum MXL_HYDRA_DEMOD_ID_E) j,
+ &mpeg_interface_cfg);
+ if (status)
+ return status;
+ }
+ set_drive_strength(state, 1);
+ return 0;
+}
+
+struct dvb_frontend *mxl5xx_attach(struct i2c_adapter *i2c,
+ struct mxl5xx_cfg *cfg, u32 demod, u32 tuner,
+ int (**fn_set_input)(struct dvb_frontend *, int))
+{
+ struct mxl *state;
+ struct mxl_base *base;
+
+ state = kzalloc(sizeof(struct mxl), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ state->demod = demod;
+ state->tuner = tuner;
+ state->tuner_in_use = 0xffffffff;
+ state->i2cdev = &i2c->dev;
+
+ base = match_base(i2c, cfg->adr);
+ if (base) {
+ base->count++;
+ if (base->count > base->demod_num)
+ goto fail;
+ state->base = base;
+ } else {
+ base = kzalloc(sizeof(struct mxl_base), GFP_KERNEL);
+ if (!base)
+ goto fail;
+ base->i2c = i2c;
+ base->adr = cfg->adr;
+ base->type = cfg->type;
+ base->count = 1;
+ mutex_init(&base->i2c_lock);
+ mutex_init(&base->status_lock);
+ mutex_init(&base->tune_lock);
+ INIT_LIST_HEAD(&base->mxls);
+
+ state->base = base;
+ if (probe(state, cfg) < 0) {
+ kfree(base);
+ goto fail;
+ }
+ list_add(&base->mxllist, &mxllist);
+ }
+ state->fe.ops = mxl_ops;
+ state->xbar[0] = 4;
+ state->xbar[1] = demod;
+ state->xbar[2] = 8;
+ state->fe.demodulator_priv = state;
+ *fn_set_input = set_input;
+
+ list_add(&state->mxl, &base->mxls);
+ return &state->fe;
+
+fail:
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(mxl5xx_attach);
+
+MODULE_DESCRIPTION("MaxLinear MxL5xx DVB-S/S2 tuner-demodulator driver");
+MODULE_AUTHOR("Ralph and Marcus Metzler, Metzler Brothers Systementwicklung GbR");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/mxl5xx.h b/drivers/media/dvb-frontends/mxl5xx.h
new file mode 100644
index 000000000000..532e08111537
--- /dev/null
+++ b/drivers/media/dvb-frontends/mxl5xx.h
@@ -0,0 +1,41 @@
+#ifndef _MXL5XX_H_
+#define _MXL5XX_H_
+
+#include <linux/types.h>
+#include <linux/i2c.h>
+
+#include "dvb_frontend.h"
+
+struct mxl5xx_cfg {
+ u8 adr;
+ u8 type;
+ u32 cap;
+ u32 clk;
+ u32 ts_clk;
+
+ u8 *fw;
+ u32 fw_len;
+
+ int (*fw_read)(void *priv, u8 *buf, u32 len);
+ void *fw_priv;
+};
+
+#if IS_REACHABLE(CONFIG_DVB_MXL5XX)
+
+extern struct dvb_frontend *mxl5xx_attach(struct i2c_adapter *i2c,
+ struct mxl5xx_cfg *cfg, u32 demod, u32 tuner,
+ int (**fn_set_input)(struct dvb_frontend *, int));
+
+#else
+
+static inline struct dvb_frontend *mxl5xx_attach(struct i2c_adapter *i2c,
+ struct mxl5xx_cfg *cfg, u32 demod, u32 tuner,
+ int (**fn_set_input)(struct dvb_frontend *, int))
+{
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+#endif /* CONFIG_DVB_MXL5XX */
+
+#endif /* _MXL5XX_H_ */
diff --git a/drivers/media/dvb-frontends/mxl5xx_defs.h b/drivers/media/dvb-frontends/mxl5xx_defs.h
new file mode 100644
index 000000000000..fd9e61e0188f
--- /dev/null
+++ b/drivers/media/dvb-frontends/mxl5xx_defs.h
@@ -0,0 +1,731 @@
+/*
+ * Defines for the Maxlinear MX58x family of tuners/demods
+ *
+ * Copyright (C) 2014 Digital Devices GmbH
+ *
+ * based on code:
+ * Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved
+ * which was released under GPL V2
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ */
+
+enum MXL_BOOL_E {
+ MXL_DISABLE = 0,
+ MXL_ENABLE = 1,
+
+ MXL_FALSE = 0,
+ MXL_TRUE = 1,
+
+ MXL_INVALID = 0,
+ MXL_VALID = 1,
+
+ MXL_NO = 0,
+ MXL_YES = 1,
+
+ MXL_OFF = 0,
+ MXL_ON = 1
+};
+
+/* Firmware-Host Command IDs */
+enum MXL_HYDRA_HOST_CMD_ID_E {
+ /* --Device command IDs-- */
+ MXL_HYDRA_DEV_NO_OP_CMD = 0, /* No OP */
+
+ MXL_HYDRA_DEV_SET_POWER_MODE_CMD = 1,
+ MXL_HYDRA_DEV_SET_OVERWRITE_DEF_CMD = 2,
+
+ /* Host-used CMD, not used by firmware */
+ MXL_HYDRA_DEV_FIRMWARE_DOWNLOAD_CMD = 3,
+
+ /* Additional CONTROL types from DTV */
+ MXL_HYDRA_DEV_SET_BROADCAST_PID_STB_ID_CMD = 4,
+ MXL_HYDRA_DEV_GET_PMM_SLEEP_CMD = 5,
+
+ /* --Tuner command IDs-- */
+ MXL_HYDRA_TUNER_TUNE_CMD = 6,
+ MXL_HYDRA_TUNER_GET_STATUS_CMD = 7,
+
+ /* --Demod command IDs-- */
+ MXL_HYDRA_DEMOD_SET_PARAM_CMD = 8,
+ MXL_HYDRA_DEMOD_GET_STATUS_CMD = 9,
+
+ MXL_HYDRA_DEMOD_RESET_FEC_COUNTER_CMD = 10,
+
+ MXL_HYDRA_DEMOD_SET_PKT_NUM_CMD = 11,
+
+ MXL_HYDRA_DEMOD_SET_IQ_SOURCE_CMD = 12,
+ MXL_HYDRA_DEMOD_GET_IQ_DATA_CMD = 13,
+
+ MXL_HYDRA_DEMOD_GET_M68HC05_VER_CMD = 14,
+
+ MXL_HYDRA_DEMOD_SET_ERROR_COUNTER_MODE_CMD = 15,
+
+ /* --- ABORT channel tune */
+ MXL_HYDRA_ABORT_TUNE_CMD = 16, /* Abort current tune command. */
+
+ /* --SWM/FSK command IDs-- */
+ MXL_HYDRA_FSK_RESET_CMD = 17,
+ MXL_HYDRA_FSK_MSG_CMD = 18,
+ MXL_HYDRA_FSK_SET_OP_MODE_CMD = 19,
+
+ /* --DiSeqC command IDs-- */
+ MXL_HYDRA_DISEQC_MSG_CMD = 20,
+ MXL_HYDRA_DISEQC_COPY_MSG_TO_MAILBOX = 21,
+ MXL_HYDRA_DISEQC_CFG_MSG_CMD = 22,
+
+ /* --- FFT Debug Command IDs-- */
+ MXL_HYDRA_REQ_FFT_SPECTRUM_CMD = 23,
+
+ /* -- Demod scramblle code */
+ MXL_HYDRA_DEMOD_SCRAMBLE_CODE_CMD = 24,
+
+ /* ---For host to know how many commands in total */
+ MXL_HYDRA_LAST_HOST_CMD = 25,
+
+ MXL_HYDRA_DEMOD_INTR_TYPE_CMD = 47,
+ MXL_HYDRA_DEV_INTR_CLEAR_CMD = 48,
+ MXL_HYDRA_TUNER_SPECTRUM_REQ_CMD = 53,
+ MXL_HYDRA_TUNER_ACTIVATE_CMD = 55,
+ MXL_HYDRA_DEV_CFG_POWER_MODE_CMD = 56,
+ MXL_HYDRA_DEV_XTAL_CAP_CMD = 57,
+ MXL_HYDRA_DEV_CFG_SKU_CMD = 58,
+ MXL_HYDRA_TUNER_SPECTRUM_MIN_GAIN_CMD = 59,
+ MXL_HYDRA_DISEQC_CONT_TONE_CFG = 60,
+ MXL_HYDRA_DEV_RF_WAKE_UP_CMD = 61,
+ MXL_HYDRA_DEMOD_CFG_EQ_CTRL_PARAM_CMD = 62,
+ MXL_HYDRA_DEMOD_FREQ_OFFSET_SEARCH_RANGE_CMD = 63,
+ MXL_HYDRA_DEV_REQ_PWR_FROM_ADCRSSI_CMD = 64,
+
+ MXL_XCPU_PID_FLT_CFG_CMD = 65,
+ MXL_XCPU_SHMEM_TEST_CMD = 66,
+ MXL_XCPU_ABORT_TUNE_CMD = 67,
+ MXL_XCPU_CHAN_TUNE_CMD = 68,
+ MXL_XCPU_FLT_BOND_HDRS_CMD = 69,
+
+ MXL_HYDRA_DEV_BROADCAST_WAKE_UP_CMD = 70,
+ MXL_HYDRA_FSK_CFG_FSK_FREQ_CMD = 71,
+ MXL_HYDRA_FSK_POWER_DOWN_CMD = 72,
+ MXL_XCPU_CLEAR_CB_STATS_CMD = 73,
+ MXL_XCPU_CHAN_BOND_RESTART_CMD = 74
+};
+
+#define MXL_ENABLE_BIG_ENDIAN (0)
+
+#define MXL_HYDRA_OEM_MAX_BLOCK_WRITE_LENGTH 248
+
+#define MXL_HYDRA_OEM_MAX_CMD_BUFF_LEN (248)
+
+#define MXL_HYDRA_CAP_MIN 10
+#define MXL_HYDRA_CAP_MAX 33
+
+#define MXL_HYDRA_PLID_REG_READ 0xFB /* Read register PLID */
+#define MXL_HYDRA_PLID_REG_WRITE 0xFC /* Write register PLID */
+
+#define MXL_HYDRA_PLID_CMD_READ 0xFD /* Command Read PLID */
+#define MXL_HYDRA_PLID_CMD_WRITE 0xFE /* Command Write PLID */
+
+#define MXL_HYDRA_REG_SIZE_IN_BYTES 4 /* Hydra register size in bytes */
+#define MXL_HYDRA_I2C_HDR_SIZE (2 * sizeof(u8)) /* PLID + LEN(0xFF) */
+#define MXL_HYDRA_CMD_HEADER_SIZE (MXL_HYDRA_REG_SIZE_IN_BYTES + MXL_HYDRA_I2C_HDR_SIZE)
+
+#define MXL_HYDRA_SKU_ID_581 0
+#define MXL_HYDRA_SKU_ID_584 1
+#define MXL_HYDRA_SKU_ID_585 2
+#define MXL_HYDRA_SKU_ID_544 3
+#define MXL_HYDRA_SKU_ID_561 4
+#define MXL_HYDRA_SKU_ID_582 5
+#define MXL_HYDRA_SKU_ID_568 6
+
+/* macro for register write data buffer size
+ * (PLID + LEN (0xFF) + RegAddr + RegData)
+ */
+#define MXL_HYDRA_REG_WRITE_LEN (MXL_HYDRA_I2C_HDR_SIZE + (2 * MXL_HYDRA_REG_SIZE_IN_BYTES))
+
+/* macro to extract a single byte from 4-byte(32-bit) data */
+#define GET_BYTE(x, n) (((x) >> (8*(n))) & 0xFF)
+
+#define MAX_CMD_DATA 512
+
+#define MXL_GET_REG_MASK_32(lsb_loc, num_of_bits) ((0xFFFFFFFF >> (32 - (num_of_bits))) << (lsb_loc))
+
+#define FW_DL_SIGN (0xDEADBEEF)
+
+#define MBIN_FORMAT_VERSION '1'
+#define MBIN_FILE_HEADER_ID 'M'
+#define MBIN_SEGMENT_HEADER_ID 'S'
+#define MBIN_MAX_FILE_LENGTH (1<<23)
+
+struct MBIN_FILE_HEADER_T {
+ u8 id;
+ u8 fmt_version;
+ u8 header_len;
+ u8 num_segments;
+ u8 entry_address[4];
+ u8 image_size24[3];
+ u8 image_checksum;
+ u8 reserved[4];
+};
+
+struct MBIN_FILE_T {
+ struct MBIN_FILE_HEADER_T header;
+ u8 data[1];
+};
+
+struct MBIN_SEGMENT_HEADER_T {
+ u8 id;
+ u8 len24[3];
+ u8 address[4];
+};
+
+struct MBIN_SEGMENT_T {
+ struct MBIN_SEGMENT_HEADER_T header;
+ u8 data[1];
+};
+
+enum MXL_CMD_TYPE_E { MXL_CMD_WRITE = 0, MXL_CMD_READ };
+
+#define BUILD_HYDRA_CMD(cmd_id, req_type, size, data_ptr, cmd_buff) \
+ do { \
+ cmd_buff[0] = ((req_type == MXL_CMD_WRITE) ? MXL_HYDRA_PLID_CMD_WRITE : MXL_HYDRA_PLID_CMD_READ); \
+ cmd_buff[1] = (size > 251) ? 0xff : (u8) (size + 4); \
+ cmd_buff[2] = size; \
+ cmd_buff[3] = cmd_id; \
+ cmd_buff[4] = 0x00; \
+ cmd_buff[5] = 0x00; \
+ convert_endian(MXL_ENABLE_BIG_ENDIAN, size, (u8 *)data_ptr); \
+ memcpy((void *)&cmd_buff[6], data_ptr, size); \
+ } while (0)
+
+struct MXL_REG_FIELD_T {
+ u32 reg_addr;
+ u8 lsb_pos;
+ u8 num_of_bits;
+};
+
+struct MXL_DEV_CMD_DATA_T {
+ u32 data_size;
+ u8 data[MAX_CMD_DATA];
+};
+
+enum MXL_HYDRA_SKU_TYPE_E {
+ MXL_HYDRA_SKU_TYPE_MIN = 0x00,
+ MXL_HYDRA_SKU_TYPE_581 = 0x00,
+ MXL_HYDRA_SKU_TYPE_584 = 0x01,
+ MXL_HYDRA_SKU_TYPE_585 = 0x02,
+ MXL_HYDRA_SKU_TYPE_544 = 0x03,
+ MXL_HYDRA_SKU_TYPE_561 = 0x04,
+ MXL_HYDRA_SKU_TYPE_5XX = 0x05,
+ MXL_HYDRA_SKU_TYPE_5YY = 0x06,
+ MXL_HYDRA_SKU_TYPE_511 = 0x07,
+ MXL_HYDRA_SKU_TYPE_561_DE = 0x08,
+ MXL_HYDRA_SKU_TYPE_582 = 0x09,
+ MXL_HYDRA_SKU_TYPE_541 = 0x0A,
+ MXL_HYDRA_SKU_TYPE_568 = 0x0B,
+ MXL_HYDRA_SKU_TYPE_542 = 0x0C,
+ MXL_HYDRA_SKU_TYPE_MAX = 0x0D,
+};
+
+struct MXL_HYDRA_SKU_COMMAND_T {
+ enum MXL_HYDRA_SKU_TYPE_E sku_type;
+};
+
+enum MXL_HYDRA_DEMOD_ID_E {
+ MXL_HYDRA_DEMOD_ID_0 = 0,
+ MXL_HYDRA_DEMOD_ID_1,
+ MXL_HYDRA_DEMOD_ID_2,
+ MXL_HYDRA_DEMOD_ID_3,
+ MXL_HYDRA_DEMOD_ID_4,
+ MXL_HYDRA_DEMOD_ID_5,
+ MXL_HYDRA_DEMOD_ID_6,
+ MXL_HYDRA_DEMOD_ID_7,
+ MXL_HYDRA_DEMOD_MAX
+};
+
+#define MXL_DEMOD_SCRAMBLE_SEQ_LEN 12
+
+#define MAX_STEP_SIZE_24_XTAL_102_05_KHZ 195
+#define MAX_STEP_SIZE_24_XTAL_204_10_KHZ 215
+#define MAX_STEP_SIZE_24_XTAL_306_15_KHZ 203
+#define MAX_STEP_SIZE_24_XTAL_408_20_KHZ 177
+
+#define MAX_STEP_SIZE_27_XTAL_102_05_KHZ 195
+#define MAX_STEP_SIZE_27_XTAL_204_10_KHZ 215
+#define MAX_STEP_SIZE_27_XTAL_306_15_KHZ 203
+#define MAX_STEP_SIZE_27_XTAL_408_20_KHZ 177
+
+#define MXL_HYDRA_SPECTRUM_MIN_FREQ_KHZ 300000
+#define MXL_HYDRA_SPECTRUM_MAX_FREQ_KHZ 2350000
+
+enum MXL_DEMOD_CHAN_PARAMS_OFFSET_E {
+ DMD_STANDARD_ADDR = 0,
+ DMD_SPECTRUM_INVERSION_ADDR,
+ DMD_SPECTRUM_ROLL_OFF_ADDR,
+ DMD_SYMBOL_RATE_ADDR,
+ DMD_MODULATION_SCHEME_ADDR,
+ DMD_FEC_CODE_RATE_ADDR,
+ DMD_SNR_ADDR,
+ DMD_FREQ_OFFSET_ADDR,
+ DMD_CTL_FREQ_OFFSET_ADDR,
+ DMD_STR_FREQ_OFFSET_ADDR,
+ DMD_FTL_FREQ_OFFSET_ADDR,
+ DMD_STR_NBC_SYNC_LOCK_ADDR,
+ DMD_CYCLE_SLIP_COUNT_ADDR,
+ DMD_DISPLAY_IQ_ADDR,
+ DMD_DVBS2_CRC_ERRORS_ADDR,
+ DMD_DVBS2_PER_COUNT_ADDR,
+ DMD_DVBS2_PER_WINDOW_ADDR,
+ DMD_DVBS_CORR_RS_ERRORS_ADDR,
+ DMD_DVBS_UNCORR_RS_ERRORS_ADDR,
+ DMD_DVBS_BER_COUNT_ADDR,
+ DMD_DVBS_BER_WINDOW_ADDR,
+ DMD_TUNER_ID_ADDR,
+ DMD_DVBS2_PILOT_ON_OFF_ADDR,
+ DMD_FREQ_SEARCH_RANGE_IN_KHZ_ADDR,
+
+ MXL_DEMOD_CHAN_PARAMS_BUFF_SIZE,
+};
+
+enum MXL_HYDRA_TUNER_ID_E {
+ MXL_HYDRA_TUNER_ID_0 = 0,
+ MXL_HYDRA_TUNER_ID_1,
+ MXL_HYDRA_TUNER_ID_2,
+ MXL_HYDRA_TUNER_ID_3,
+ MXL_HYDRA_TUNER_MAX
+};
+
+enum MXL_HYDRA_BCAST_STD_E {
+ MXL_HYDRA_DSS = 0,
+ MXL_HYDRA_DVBS,
+ MXL_HYDRA_DVBS2,
+};
+
+enum MXL_HYDRA_FEC_E {
+ MXL_HYDRA_FEC_AUTO = 0,
+ MXL_HYDRA_FEC_1_2,
+ MXL_HYDRA_FEC_3_5,
+ MXL_HYDRA_FEC_2_3,
+ MXL_HYDRA_FEC_3_4,
+ MXL_HYDRA_FEC_4_5,
+ MXL_HYDRA_FEC_5_6,
+ MXL_HYDRA_FEC_6_7,
+ MXL_HYDRA_FEC_7_8,
+ MXL_HYDRA_FEC_8_9,
+ MXL_HYDRA_FEC_9_10,
+};
+
+enum MXL_HYDRA_MODULATION_E {
+ MXL_HYDRA_MOD_AUTO = 0,
+ MXL_HYDRA_MOD_QPSK,
+ MXL_HYDRA_MOD_8PSK
+};
+
+enum MXL_HYDRA_SPECTRUM_E {
+ MXL_HYDRA_SPECTRUM_AUTO = 0,
+ MXL_HYDRA_SPECTRUM_INVERTED,
+ MXL_HYDRA_SPECTRUM_NON_INVERTED,
+};
+
+enum MXL_HYDRA_ROLLOFF_E {
+ MXL_HYDRA_ROLLOFF_AUTO = 0,
+ MXL_HYDRA_ROLLOFF_0_20,
+ MXL_HYDRA_ROLLOFF_0_25,
+ MXL_HYDRA_ROLLOFF_0_35
+};
+
+enum MXL_HYDRA_PILOTS_E {
+ MXL_HYDRA_PILOTS_OFF = 0,
+ MXL_HYDRA_PILOTS_ON,
+ MXL_HYDRA_PILOTS_AUTO
+};
+
+enum MXL_HYDRA_CONSTELLATION_SRC_E {
+ MXL_HYDRA_FORMATTER = 0,
+ MXL_HYDRA_LEGACY_FEC,
+ MXL_HYDRA_FREQ_RECOVERY,
+ MXL_HYDRA_NBC,
+ MXL_HYDRA_CTL,
+ MXL_HYDRA_EQ,
+};
+
+struct MXL_HYDRA_DEMOD_LOCK_T {
+ int agc_lock; /* AGC lock info */
+ int fec_lock; /* Demod FEC block lock info */
+};
+
+struct MXL_HYDRA_DEMOD_STATUS_DVBS_T {
+ u32 rs_errors; /* RS decoder err counter */
+ u32 ber_window; /* Ber Windows */
+ u32 ber_count; /* BER count */
+ u32 ber_window_iter1; /* Ber Windows - post viterbi */
+ u32 ber_count_iter1; /* BER count - post viterbi */
+};
+
+struct MXL_HYDRA_DEMOD_STATUS_DSS_T {
+ u32 rs_errors; /* RS decoder err counter */
+ u32 ber_window; /* Ber Windows */
+ u32 ber_count; /* BER count */
+};
+
+struct MXL_HYDRA_DEMOD_STATUS_DVBS2_T {
+ u32 crc_errors; /* CRC error counter */
+ u32 packet_error_count; /* Number of packet errors */
+ u32 total_packets; /* Total packets */
+};
+
+struct MXL_HYDRA_DEMOD_STATUS_T {
+ enum MXL_HYDRA_BCAST_STD_E standard_mask; /* Standard DVB-S, DVB-S2 or DSS */
+
+ union {
+ struct MXL_HYDRA_DEMOD_STATUS_DVBS_T demod_status_dvbs; /* DVB-S demod status */
+ struct MXL_HYDRA_DEMOD_STATUS_DVBS2_T demod_status_dvbs2; /* DVB-S2 demod status */
+ struct MXL_HYDRA_DEMOD_STATUS_DSS_T demod_status_dss; /* DSS demod status */
+ } u;
+};
+
+struct MXL_HYDRA_DEMOD_SIG_OFFSET_INFO_T {
+ s32 carrier_offset_in_hz; /* CRL offset info */
+ s32 symbol_offset_in_symbol; /* SRL offset info */
+};
+
+struct MXL_HYDRA_DEMOD_SCRAMBLE_INFO_T {
+ u8 scramble_sequence[MXL_DEMOD_SCRAMBLE_SEQ_LEN]; /* scramble sequence */
+ u32 scramble_code; /* scramble gold code */
+};
+
+enum MXL_HYDRA_SPECTRUM_STEP_SIZE_E {
+ MXL_HYDRA_STEP_SIZE_24_XTAL_102_05KHZ, /* 102.05 KHz for 24 MHz XTAL */
+ MXL_HYDRA_STEP_SIZE_24_XTAL_204_10KHZ, /* 204.10 KHz for 24 MHz XTAL */
+ MXL_HYDRA_STEP_SIZE_24_XTAL_306_15KHZ, /* 306.15 KHz for 24 MHz XTAL */
+ MXL_HYDRA_STEP_SIZE_24_XTAL_408_20KHZ, /* 408.20 KHz for 24 MHz XTAL */
+
+ MXL_HYDRA_STEP_SIZE_27_XTAL_102_05KHZ, /* 102.05 KHz for 27 MHz XTAL */
+ MXL_HYDRA_STEP_SIZE_27_XTAL_204_35KHZ, /* 204.35 KHz for 27 MHz XTAL */
+ MXL_HYDRA_STEP_SIZE_27_XTAL_306_52KHZ, /* 306.52 KHz for 27 MHz XTAL */
+ MXL_HYDRA_STEP_SIZE_27_XTAL_408_69KHZ, /* 408.69 KHz for 27 MHz XTAL */
+};
+
+enum MXL_HYDRA_SPECTRUM_RESOLUTION_E {
+ MXL_HYDRA_SPECTRUM_RESOLUTION_00_1_DB, /* 0.1 dB */
+ MXL_HYDRA_SPECTRUM_RESOLUTION_01_0_DB, /* 1.0 dB */
+ MXL_HYDRA_SPECTRUM_RESOLUTION_05_0_DB, /* 5.0 dB */
+ MXL_HYDRA_SPECTRUM_RESOLUTION_10_0_DB, /* 10 dB */
+};
+
+enum MXL_HYDRA_SPECTRUM_ERROR_CODE_E {
+ MXL_SPECTRUM_NO_ERROR,
+ MXL_SPECTRUM_INVALID_PARAMETER,
+ MXL_SPECTRUM_INVALID_STEP_SIZE,
+ MXL_SPECTRUM_BW_CANNOT_BE_COVERED,
+ MXL_SPECTRUM_DEMOD_BUSY,
+ MXL_SPECTRUM_TUNER_NOT_ENABLED,
+};
+
+struct MXL_HYDRA_SPECTRUM_REQ_T {
+ u32 tuner_index; /* TUNER Ctrl: one of MXL58x_TUNER_ID_E */
+ u32 demod_index; /* DEMOD Ctrl: one of MXL58x_DEMOD_ID_E */
+ enum MXL_HYDRA_SPECTRUM_STEP_SIZE_E step_size_in_khz;
+ u32 starting_freq_ink_hz;
+ u32 total_steps;
+ enum MXL_HYDRA_SPECTRUM_RESOLUTION_E spectrum_division;
+};
+
+enum MXL_HYDRA_SEARCH_FREQ_OFFSET_TYPE_E {
+ MXL_HYDRA_SEARCH_MAX_OFFSET = 0, /* DMD searches for max freq offset (i.e. 5MHz) */
+ MXL_HYDRA_SEARCH_BW_PLUS_ROLLOFF, /* DMD searches for BW + ROLLOFF/2 */
+};
+
+struct MXL58X_CFG_FREQ_OFF_SEARCH_RANGE_T {
+ u32 demod_index;
+ enum MXL_HYDRA_SEARCH_FREQ_OFFSET_TYPE_E search_type;
+};
+
+/* there are two slices
+ * slice0 - TS0, TS1, TS2 & TS3
+ * slice1 - TS4, TS5, TS6 & TS7
+ */
+#define MXL_HYDRA_TS_SLICE_MAX 2
+
+#define MAX_FIXED_PID_NUM 32
+
+#define MXL_HYDRA_NCO_CLK 418 /* 418 MHz */
+
+#define MXL_HYDRA_MAX_TS_CLOCK 139 /* 139 MHz */
+
+#define MXL_HYDRA_TS_FIXED_PID_FILT_SIZE 32
+
+#define MXL_HYDRA_SHARED_PID_FILT_SIZE_DEFAULT 33 /* Shared PID filter size in 1-1 mux mode */
+#define MXL_HYDRA_SHARED_PID_FILT_SIZE_2_TO_1 66 /* Shared PID filter size in 2-1 mux mode */
+#define MXL_HYDRA_SHARED_PID_FILT_SIZE_4_TO_1 132 /* Shared PID filter size in 4-1 mux mode */
+
+enum MXL_HYDRA_PID_BANK_TYPE_E {
+ MXL_HYDRA_SOFTWARE_PID_BANK = 0,
+ MXL_HYDRA_HARDWARE_PID_BANK,
+};
+
+enum MXL_HYDRA_TS_MUX_MODE_E {
+ MXL_HYDRA_TS_MUX_PID_REMAP = 0,
+ MXL_HYDRA_TS_MUX_PREFIX_EXTRA_HEADER = 1,
+};
+
+enum MXL_HYDRA_TS_MUX_TYPE_E {
+ MXL_HYDRA_TS_MUX_DISABLE = 0, /* No Mux ( 1 TSIF to 1 TSIF) */
+ MXL_HYDRA_TS_MUX_2_TO_1, /* Mux 2 TSIF to 1 TSIF */
+ MXL_HYDRA_TS_MUX_4_TO_1, /* Mux 4 TSIF to 1 TSIF */
+};
+
+enum MXL_HYDRA_TS_GROUP_E {
+ MXL_HYDRA_TS_GROUP_0_3 = 0, /* TS group 0 to 3 (TS0, TS1, TS2 & TS3) */
+ MXL_HYDRA_TS_GROUP_4_7, /* TS group 0 to 3 (TS4, TS5, TS6 & TS7) */
+};
+
+enum MXL_HYDRA_TS_PID_FLT_CTRL_E {
+ MXL_HYDRA_TS_PIDS_ALLOW_ALL = 0, /* Allow all pids */
+ MXL_HYDRA_TS_PIDS_DROP_ALL, /* Drop all pids */
+ MXL_HYDRA_TS_INVALIDATE_PID_FILTER, /* Delete current PD filter in the device */
+};
+
+enum MXL_HYDRA_TS_PID_TYPE_E {
+ MXL_HYDRA_TS_PID_FIXED = 0,
+ MXL_HYDRA_TS_PID_REGULAR,
+};
+
+struct MXL_HYDRA_TS_PID_T {
+ u16 original_pid; /* pid from TS */
+ u16 remapped_pid; /* remapped pid */
+ enum MXL_BOOL_E enable; /* enable or disable pid */
+ enum MXL_BOOL_E allow_or_drop; /* allow or drop pid */
+ enum MXL_BOOL_E enable_pid_remap; /* enable or disable pid remap */
+ u8 bond_id; /* Bond ID in A0 always 0 - Only for 568 Sku */
+ u8 dest_id; /* Output port ID for the PID - Only for 568 Sku */
+};
+
+struct MXL_HYDRA_TS_MUX_PREFIX_HEADER_T {
+ enum MXL_BOOL_E enable;
+ u8 num_byte;
+ u8 header[12];
+};
+
+enum MXL_HYDRA_PID_FILTER_BANK_E {
+ MXL_HYDRA_PID_BANK_A = 0,
+ MXL_HYDRA_PID_BANK_B,
+};
+
+enum MXL_HYDRA_MPEG_DATA_FMT_E {
+ MXL_HYDRA_MPEG_SERIAL_MSB_1ST = 0,
+ MXL_HYDRA_MPEG_SERIAL_LSB_1ST,
+
+ MXL_HYDRA_MPEG_SYNC_WIDTH_BIT = 0,
+ MXL_HYDRA_MPEG_SYNC_WIDTH_BYTE
+};
+
+enum MXL_HYDRA_MPEG_MODE_E {
+ MXL_HYDRA_MPEG_MODE_SERIAL_4_WIRE = 0, /* MPEG 4 Wire serial mode */
+ MXL_HYDRA_MPEG_MODE_SERIAL_3_WIRE, /* MPEG 3 Wire serial mode */
+ MXL_HYDRA_MPEG_MODE_SERIAL_2_WIRE, /* MPEG 2 Wire serial mode */
+ MXL_HYDRA_MPEG_MODE_PARALLEL /* MPEG parallel mode - valid only for MxL581 */
+};
+
+enum MXL_HYDRA_MPEG_CLK_TYPE_E {
+ MXL_HYDRA_MPEG_CLK_CONTINUOUS = 0, /* Continuous MPEG clock */
+ MXL_HYDRA_MPEG_CLK_GAPPED, /* Gapped (gated) MPEG clock */
+};
+
+enum MXL_HYDRA_MPEG_CLK_FMT_E {
+ MXL_HYDRA_MPEG_ACTIVE_LOW = 0,
+ MXL_HYDRA_MPEG_ACTIVE_HIGH,
+
+ MXL_HYDRA_MPEG_CLK_NEGATIVE = 0,
+ MXL_HYDRA_MPEG_CLK_POSITIVE,
+
+ MXL_HYDRA_MPEG_CLK_IN_PHASE = 0,
+ MXL_HYDRA_MPEG_CLK_INVERTED,
+};
+
+enum MXL_HYDRA_MPEG_CLK_PHASE_E {
+ MXL_HYDRA_MPEG_CLK_PHASE_SHIFT_0_DEG = 0,
+ MXL_HYDRA_MPEG_CLK_PHASE_SHIFT_90_DEG,
+ MXL_HYDRA_MPEG_CLK_PHASE_SHIFT_180_DEG,
+ MXL_HYDRA_MPEG_CLK_PHASE_SHIFT_270_DEG
+};
+
+enum MXL_HYDRA_MPEG_ERR_INDICATION_E {
+ MXL_HYDRA_MPEG_ERR_REPLACE_SYNC = 0,
+ MXL_HYDRA_MPEG_ERR_REPLACE_VALID,
+ MXL_HYDRA_MPEG_ERR_INDICATION_DISABLED
+};
+
+struct MXL_HYDRA_MPEGOUT_PARAM_T {
+ int enable; /* Enable or Disable MPEG OUT */
+ enum MXL_HYDRA_MPEG_CLK_TYPE_E mpeg_clk_type; /* Continuous or gapped */
+ enum MXL_HYDRA_MPEG_CLK_FMT_E mpeg_clk_pol; /* MPEG Clk polarity */
+ u8 max_mpeg_clk_rate; /* Max MPEG Clk rate (0 - 104 MHz, 139 MHz) */
+ enum MXL_HYDRA_MPEG_CLK_PHASE_E mpeg_clk_phase; /* MPEG Clk phase */
+ enum MXL_HYDRA_MPEG_DATA_FMT_E lsb_or_msb_first; /* LSB first or MSB first in TS transmission */
+ enum MXL_HYDRA_MPEG_DATA_FMT_E mpeg_sync_pulse_width; /* MPEG SYNC pulse width (1-bit or 1-byte) */
+ enum MXL_HYDRA_MPEG_CLK_FMT_E mpeg_valid_pol; /* MPEG VALID polarity */
+ enum MXL_HYDRA_MPEG_CLK_FMT_E mpeg_sync_pol; /* MPEG SYNC polarity */
+ enum MXL_HYDRA_MPEG_MODE_E mpeg_mode; /* config 4/3/2-wire serial or parallel TS out */
+ enum MXL_HYDRA_MPEG_ERR_INDICATION_E mpeg_error_indication; /* Enable or Disable MPEG error indication */
+};
+
+enum MXL_HYDRA_EXT_TS_IN_ID_E {
+ MXL_HYDRA_EXT_TS_IN_0 = 0,
+ MXL_HYDRA_EXT_TS_IN_1,
+ MXL_HYDRA_EXT_TS_IN_2,
+ MXL_HYDRA_EXT_TS_IN_3,
+ MXL_HYDRA_EXT_TS_IN_MAX
+};
+
+enum MXL_HYDRA_TS_OUT_ID_E {
+ MXL_HYDRA_TS_OUT_0 = 0,
+ MXL_HYDRA_TS_OUT_1,
+ MXL_HYDRA_TS_OUT_2,
+ MXL_HYDRA_TS_OUT_3,
+ MXL_HYDRA_TS_OUT_4,
+ MXL_HYDRA_TS_OUT_5,
+ MXL_HYDRA_TS_OUT_6,
+ MXL_HYDRA_TS_OUT_7,
+ MXL_HYDRA_TS_OUT_MAX
+};
+
+enum MXL_HYDRA_TS_DRIVE_STRENGTH_E {
+ MXL_HYDRA_TS_DRIVE_STRENGTH_1X = 0,
+ MXL_HYDRA_TS_DRIVE_STRENGTH_2X,
+ MXL_HYDRA_TS_DRIVE_STRENGTH_3X,
+ MXL_HYDRA_TS_DRIVE_STRENGTH_4X,
+ MXL_HYDRA_TS_DRIVE_STRENGTH_5X,
+ MXL_HYDRA_TS_DRIVE_STRENGTH_6X,
+ MXL_HYDRA_TS_DRIVE_STRENGTH_7X,
+ MXL_HYDRA_TS_DRIVE_STRENGTH_8X
+};
+
+enum MXL_HYDRA_DEVICE_E {
+ MXL_HYDRA_DEVICE_581 = 0,
+ MXL_HYDRA_DEVICE_584,
+ MXL_HYDRA_DEVICE_585,
+ MXL_HYDRA_DEVICE_544,
+ MXL_HYDRA_DEVICE_561,
+ MXL_HYDRA_DEVICE_TEST,
+ MXL_HYDRA_DEVICE_582,
+ MXL_HYDRA_DEVICE_541,
+ MXL_HYDRA_DEVICE_568,
+ MXL_HYDRA_DEVICE_542,
+ MXL_HYDRA_DEVICE_541S,
+ MXL_HYDRA_DEVICE_561S,
+ MXL_HYDRA_DEVICE_581S,
+ MXL_HYDRA_DEVICE_MAX
+};
+
+/* Demod IQ data */
+struct MXL_HYDRA_DEMOD_IQ_SRC_T {
+ u32 demod_id;
+ u32 source_of_iq; /* == 0, it means I/Q comes from Formatter
+ * == 1, Legacy FEC
+ * == 2, Frequency Recovery
+ * == 3, NBC
+ * == 4, CTL
+ * == 5, EQ
+ * == 6, FPGA
+ */
+};
+
+struct MXL_HYDRA_DEMOD_ABORT_TUNE_T {
+ u32 demod_id;
+};
+
+struct MXL_HYDRA_TUNER_CMD {
+ u8 tuner_id;
+ u8 enable;
+};
+
+/* Demod Para for Channel Tune */
+struct MXL_HYDRA_DEMOD_PARAM_T {
+ u32 tuner_index;
+ u32 demod_index;
+ u32 frequency_in_hz; /* Frequency */
+ u32 standard; /* one of MXL_HYDRA_BCAST_STD_E */
+ u32 spectrum_inversion; /* Input : Spectrum inversion. */
+ u32 roll_off; /* rollOff (alpha) factor */
+ u32 symbol_rate_in_hz; /* Symbol rate */
+ u32 pilots; /* TRUE = pilots enabled */
+ u32 modulation_scheme; /* Input : Modulation Scheme is one of MXL_HYDRA_MODULATION_E */
+ u32 fec_code_rate; /* Input : Forward error correction rate. Is one of MXL_HYDRA_FEC_E */
+ u32 max_carrier_offset_in_mhz; /* Maximum carrier freq offset in MHz. Same as freqSearchRangeKHz, but in unit of MHz. */
+};
+
+struct MXL_HYDRA_DEMOD_SCRAMBLE_CODE_T {
+ u32 demod_index;
+ u8 scramble_sequence[12]; /* scramble sequence */
+ u32 scramble_code; /* scramble gold code */
+};
+
+struct MXL_INTR_CFG_T {
+ u32 intr_type;
+ u32 intr_duration_in_nano_secs;
+ u32 intr_mask;
+};
+
+struct MXL_HYDRA_POWER_MODE_CMD {
+ u8 power_mode; /* enumeration values are defined in MXL_HYDRA_PWR_MODE_E (device API.h) */
+};
+
+struct MXL_HYDRA_RF_WAKEUP_PARAM_T {
+ u32 time_interval_in_seconds; /* in seconds */
+ u32 tuner_index;
+ s32 rssi_threshold;
+};
+
+struct MXL_HYDRA_RF_WAKEUP_CFG_T {
+ u32 tuner_count;
+ struct MXL_HYDRA_RF_WAKEUP_PARAM_T params;
+};
+
+enum MXL_HYDRA_AUX_CTRL_MODE_E {
+ MXL_HYDRA_AUX_CTRL_MODE_FSK = 0, /* Select FSK controller */
+ MXL_HYDRA_AUX_CTRL_MODE_DISEQC, /* Select DiSEqC controller */
+};
+
+enum MXL_HYDRA_DISEQC_OPMODE_E {
+ MXL_HYDRA_DISEQC_ENVELOPE_MODE = 0,
+ MXL_HYDRA_DISEQC_TONE_MODE,
+};
+
+enum MXL_HYDRA_DISEQC_VER_E {
+ MXL_HYDRA_DISEQC_1_X = 0, /* Config DiSEqC 1.x mode */
+ MXL_HYDRA_DISEQC_2_X, /* Config DiSEqC 2.x mode */
+ MXL_HYDRA_DISEQC_DISABLE /* Disable DiSEqC */
+};
+
+enum MXL_HYDRA_DISEQC_CARRIER_FREQ_E {
+ MXL_HYDRA_DISEQC_CARRIER_FREQ_22KHZ = 0, /* DiSEqC signal frequency of 22 KHz */
+ MXL_HYDRA_DISEQC_CARRIER_FREQ_33KHZ, /* DiSEqC signal frequency of 33 KHz */
+ MXL_HYDRA_DISEQC_CARRIER_FREQ_44KHZ /* DiSEqC signal frequency of 44 KHz */
+};
+
+enum MXL_HYDRA_DISEQC_ID_E {
+ MXL_HYDRA_DISEQC_ID_0 = 0,
+ MXL_HYDRA_DISEQC_ID_1,
+ MXL_HYDRA_DISEQC_ID_2,
+ MXL_HYDRA_DISEQC_ID_3
+};
+
+enum MXL_HYDRA_FSK_OP_MODE_E {
+ MXL_HYDRA_FSK_CFG_TYPE_39KPBS = 0, /* 39.0kbps */
+ MXL_HYDRA_FSK_CFG_TYPE_39_017KPBS, /* 39.017kbps */
+ MXL_HYDRA_FSK_CFG_TYPE_115_2KPBS /* 115.2kbps */
+};
+
+struct MXL58X_DSQ_OP_MODE_T {
+ u32 diseqc_id; /* DSQ 0, 1, 2 or 3 */
+ u32 op_mode; /* Envelope mode (0) or internal tone mode (1) */
+ u32 version; /* 0: 1.0, 1: 1.1, 2: Disable */
+ u32 center_freq; /* 0: 22KHz, 1: 33KHz and 2: 44 KHz */
+};
+
+struct MXL_HYDRA_DISEQC_CFG_CONT_TONE_T {
+ u32 diseqc_id;
+ u32 cont_tone_flag; /* 1: Enable , 0: Disable */
+};
diff --git a/drivers/media/dvb-frontends/mxl5xx_regs.h b/drivers/media/dvb-frontends/mxl5xx_regs.h
new file mode 100644
index 000000000000..5001dafe1ba8
--- /dev/null
+++ b/drivers/media/dvb-frontends/mxl5xx_regs.h
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ * This program may alternatively be licensed under a proprietary license from
+ * MaxLinear, Inc.
+ *
+ */
+
+#ifndef __MXL58X_REGISTERS_H__
+#define __MXL58X_REGISTERS_H__
+
+#define HYDRA_INTR_STATUS_REG 0x80030008
+#define HYDRA_INTR_MASK_REG 0x8003000C
+
+#define HYDRA_CRYSTAL_SETTING 0x3FFFC5F0 /* 0 - 24 MHz & 1 - 27 MHz */
+#define HYDRA_CRYSTAL_CAP 0x3FFFEDA4 /* 0 - 24 MHz & 1 - 27 MHz */
+
+#define HYDRA_CPU_RESET_REG 0x8003003C
+#define HYDRA_CPU_RESET_DATA 0x00000400
+
+#define HYDRA_RESET_TRANSPORT_FIFO_REG 0x80030028
+#define HYDRA_RESET_TRANSPORT_FIFO_DATA 0x00000000
+
+#define HYDRA_RESET_BBAND_REG 0x80030024
+#define HYDRA_RESET_BBAND_DATA 0x00000000
+
+#define HYDRA_RESET_XBAR_REG 0x80030020
+#define HYDRA_RESET_XBAR_DATA 0x00000000
+
+#define HYDRA_MODULES_CLK_1_REG 0x80030014
+#define HYDRA_DISABLE_CLK_1 0x00000000
+
+#define HYDRA_MODULES_CLK_2_REG 0x8003001C
+#define HYDRA_DISABLE_CLK_2 0x0000000B
+
+#define HYDRA_PRCM_ROOT_CLK_REG 0x80030018
+#define HYDRA_PRCM_ROOT_CLK_DISABLE 0x00000000
+
+#define HYDRA_CPU_RESET_CHECK_REG 0x80030008
+#define HYDRA_CPU_RESET_CHECK_OFFSET 0x40000000 /* <bit 30> */
+
+#define HYDRA_SKU_ID_REG 0x90000190
+
+#define FW_DL_SIGN_ADDR 0x3FFFEAE0
+
+/* Register to check if FW is running or not */
+#define HYDRA_HEAR_BEAT 0x3FFFEDDC
+
+/* Firmware version */
+#define HYDRA_FIRMWARE_VERSION 0x3FFFEDB8
+#define HYDRA_FW_RC_VERSION 0x3FFFCFAC
+
+/* Firmware patch version */
+#define HYDRA_FIRMWARE_PATCH_VERSION 0x3FFFEDC2
+
+/* SOC operating temperature in C */
+#define HYDRA_TEMPARATURE 0x3FFFEDB4
+
+/* Demod & Tuner status registers */
+/* Demod 0 status base address */
+#define HYDRA_DEMOD_0_BASE_ADDR 0x3FFFC64C
+
+/* Tuner 0 status base address */
+#define HYDRA_TUNER_0_BASE_ADDR 0x3FFFCE4C
+
+#define POWER_FROM_ADCRSSI_READBACK 0x3FFFEB6C
+
+/* Macros to determine base address of respective demod or tuner */
+#define HYDRA_DMD_STATUS_OFFSET(demodID) ((demodID) * 0x100)
+#define HYDRA_TUNER_STATUS_OFFSET(tunerID) ((tunerID) * 0x40)
+
+/* Demod status address offset from respective demod's base address */
+#define HYDRA_DMD_AGC_DIG_LEVEL_ADDR_OFFSET 0x3FFFC64C
+#define HYDRA_DMD_LOCK_STATUS_ADDR_OFFSET 0x3FFFC650
+#define HYDRA_DMD_ACQ_STATUS_ADDR_OFFSET 0x3FFFC654
+
+#define HYDRA_DMD_STANDARD_ADDR_OFFSET 0x3FFFC658
+#define HYDRA_DMD_SPECTRUM_INVERSION_ADDR_OFFSET 0x3FFFC65C
+#define HYDRA_DMD_SPECTRUM_ROLL_OFF_ADDR_OFFSET 0x3FFFC660
+#define HYDRA_DMD_SYMBOL_RATE_ADDR_OFFSET 0x3FFFC664
+#define HYDRA_DMD_MODULATION_SCHEME_ADDR_OFFSET 0x3FFFC668
+#define HYDRA_DMD_FEC_CODE_RATE_ADDR_OFFSET 0x3FFFC66C
+
+#define HYDRA_DMD_SNR_ADDR_OFFSET 0x3FFFC670
+#define HYDRA_DMD_FREQ_OFFSET_ADDR_OFFSET 0x3FFFC674
+#define HYDRA_DMD_CTL_FREQ_OFFSET_ADDR_OFFSET 0x3FFFC678
+#define HYDRA_DMD_STR_FREQ_OFFSET_ADDR_OFFSET 0x3FFFC67C
+#define HYDRA_DMD_FTL_FREQ_OFFSET_ADDR_OFFSET 0x3FFFC680
+#define HYDRA_DMD_STR_NBC_SYNC_LOCK_ADDR_OFFSET 0x3FFFC684
+#define HYDRA_DMD_CYCLE_SLIP_COUNT_ADDR_OFFSET 0x3FFFC688
+
+#define HYDRA_DMD_DISPLAY_I_ADDR_OFFSET 0x3FFFC68C
+#define HYDRA_DMD_DISPLAY_Q_ADDR_OFFSET 0x3FFFC68E
+
+#define HYDRA_DMD_DVBS2_CRC_ERRORS_ADDR_OFFSET 0x3FFFC690
+#define HYDRA_DMD_DVBS2_PER_COUNT_ADDR_OFFSET 0x3FFFC694
+#define HYDRA_DMD_DVBS2_PER_WINDOW_ADDR_OFFSET 0x3FFFC698
+
+#define HYDRA_DMD_DVBS_CORR_RS_ERRORS_ADDR_OFFSET 0x3FFFC69C
+#define HYDRA_DMD_DVBS_UNCORR_RS_ERRORS_ADDR_OFFSET 0x3FFFC6A0
+#define HYDRA_DMD_DVBS_BER_COUNT_ADDR_OFFSET 0x3FFFC6A4
+#define HYDRA_DMD_DVBS_BER_WINDOW_ADDR_OFFSET 0x3FFFC6A8
+
+/* Debug-purpose DVB-S DMD 0 */
+#define HYDRA_DMD_DVBS_1ST_CORR_RS_ERRORS_ADDR_OFFSET 0x3FFFC6C8 /* corrected RS Errors: 1st iteration */
+#define HYDRA_DMD_DVBS_1ST_UNCORR_RS_ERRORS_ADDR_OFFSET 0x3FFFC6CC /* uncorrected RS Errors: 1st iteration */
+#define HYDRA_DMD_DVBS_BER_COUNT_1ST_ADDR_OFFSET 0x3FFFC6D0
+#define HYDRA_DMD_DVBS_BER_WINDOW_1ST_ADDR_OFFSET 0x3FFFC6D4
+
+#define HYDRA_DMD_TUNER_ID_ADDR_OFFSET 0x3FFFC6AC
+#define HYDRA_DMD_DVBS2_PILOT_ON_OFF_ADDR_OFFSET 0x3FFFC6B0
+#define HYDRA_DMD_FREQ_SEARCH_RANGE_KHZ_ADDR_OFFSET 0x3FFFC6B4
+#define HYDRA_DMD_STATUS_LOCK_ADDR_OFFSET 0x3FFFC6B8
+#define HYDRA_DMD_STATUS_CENTER_FREQ_IN_KHZ_ADDR 0x3FFFC704
+#define HYDRA_DMD_STATUS_INPUT_POWER_ADDR 0x3FFFC708
+
+/* DVB-S new scaled_BER_count for a new BER API, see HYDRA-1343 "DVB-S post viterbi information" */
+#define DMD0_STATUS_DVBS_1ST_SCALED_BER_COUNT_ADDR 0x3FFFC710 /* DMD 0: 1st iteration BER count scaled by HYDRA_BER_COUNT_SCALING_FACTOR */
+#define DMD0_STATUS_DVBS_SCALED_BER_COUNT_ADDR 0x3FFFC714 /* DMD 0: 2nd iteration BER count scaled by HYDRA_BER_COUNT_SCALING_FACTOR */
+
+#define DMD0_SPECTRUM_MIN_GAIN_STATUS 0x3FFFC73C
+#define DMD0_SPECTRUM_MIN_GAIN_WB_SAGC_VALUE 0x3FFFC740
+#define DMD0_SPECTRUM_MIN_GAIN_NB_SAGC_VALUE 0x3FFFC744
+
+#define HYDRA_DMD_STATUS_END_ADDR_OFFSET 0x3FFFC748
+
+/* Tuner status address offset from respective tuners's base address */
+#define HYDRA_TUNER_DEMOD_ID_ADDR_OFFSET 0x3FFFCE4C
+#define HYDRA_TUNER_AGC_LOCK_OFFSET 0x3FFFCE50
+#define HYDRA_TUNER_SPECTRUM_STATUS_OFFSET 0x3FFFCE54
+#define HYDRA_TUNER_SPECTRUM_BIN_SIZE_OFFSET 0x3FFFCE58
+#define HYDRA_TUNER_SPECTRUM_ADDRESS_OFFSET 0x3FFFCE5C
+#define HYDRA_TUNER_ENABLE_COMPLETE 0x3FFFEB78
+
+#define HYDRA_DEMOD_STATUS_LOCK(devId, demodId) write_register(devId, (HYDRA_DMD_STATUS_LOCK_ADDR_OFFSET + HYDRA_DMD_STATUS_OFFSET(demodId)), MXL_YES)
+#define HYDRA_DEMOD_STATUS_UNLOCK(devId, demodId) write_register(devId, (HYDRA_DMD_STATUS_LOCK_ADDR_OFFSET + HYDRA_DMD_STATUS_OFFSET(demodId)), MXL_NO)
+
+#define HYDRA_VERSION 0x3FFFEDB8
+#define HYDRA_DEMOD0_VERSION 0x3FFFEDBC
+#define HYDRA_DEMOD1_VERSION 0x3FFFEDC0
+#define HYDRA_DEMOD2_VERSION 0x3FFFEDC4
+#define HYDRA_DEMOD3_VERSION 0x3FFFEDC8
+#define HYDRA_DEMOD4_VERSION 0x3FFFEDCC
+#define HYDRA_DEMOD5_VERSION 0x3FFFEDD0
+#define HYDRA_DEMOD6_VERSION 0x3FFFEDD4
+#define HYDRA_DEMOD7_VERSION 0x3FFFEDD8
+#define HYDRA_HEAR_BEAT 0x3FFFEDDC
+#define HYDRA_SKU_MGMT 0x3FFFEBC0
+
+#define MXL_HYDRA_FPGA_A_ADDRESS 0x91C00000
+#define MXL_HYDRA_FPGA_B_ADDRESS 0x91D00000
+
+/* TS control base address */
+#define HYDRA_TS_CTRL_BASE_ADDR 0x90700000
+
+#define MPEG_MUX_MODE_SLICE0_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x08)
+
+#define MPEG_MUX_MODE_SLICE1_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x08)
+
+#define PID_BANK_SEL_SLICE0_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x190)
+#define PID_BANK_SEL_SLICE1_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x1B0)
+
+#define MPEG_CLK_GATED_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x20)
+
+#define MPEG_CLK_ALWAYS_ON_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x1D4)
+
+#define HYDRA_REGULAR_PID_BANK_A_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x190)
+
+#define HYDRA_FIXED_PID_BANK_A_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x190)
+
+#define HYDRA_REGULAR_PID_BANK_B_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x1B0)
+
+#define HYDRA_FIXED_PID_BANK_B_REG (HYDRA_TS_CTRL_BASE_ADDR + 0x1B0)
+
+#define FIXED_PID_TBL_REG_ADDRESS_0 (HYDRA_TS_CTRL_BASE_ADDR + 0x9000)
+#define FIXED_PID_TBL_REG_ADDRESS_1 (HYDRA_TS_CTRL_BASE_ADDR + 0x9100)
+#define FIXED_PID_TBL_REG_ADDRESS_2 (HYDRA_TS_CTRL_BASE_ADDR + 0x9200)
+#define FIXED_PID_TBL_REG_ADDRESS_3 (HYDRA_TS_CTRL_BASE_ADDR + 0x9300)
+
+#define FIXED_PID_TBL_REG_ADDRESS_4 (HYDRA_TS_CTRL_BASE_ADDR + 0xB000)
+#define FIXED_PID_TBL_REG_ADDRESS_5 (HYDRA_TS_CTRL_BASE_ADDR + 0xB100)
+#define FIXED_PID_TBL_REG_ADDRESS_6 (HYDRA_TS_CTRL_BASE_ADDR + 0xB200)
+#define FIXED_PID_TBL_REG_ADDRESS_7 (HYDRA_TS_CTRL_BASE_ADDR + 0xB300)
+
+#define REGULAR_PID_TBL_REG_ADDRESS_0 (HYDRA_TS_CTRL_BASE_ADDR + 0x8000)
+#define REGULAR_PID_TBL_REG_ADDRESS_1 (HYDRA_TS_CTRL_BASE_ADDR + 0x8200)
+#define REGULAR_PID_TBL_REG_ADDRESS_2 (HYDRA_TS_CTRL_BASE_ADDR + 0x8400)
+#define REGULAR_PID_TBL_REG_ADDRESS_3 (HYDRA_TS_CTRL_BASE_ADDR + 0x8600)
+
+#define REGULAR_PID_TBL_REG_ADDRESS_4 (HYDRA_TS_CTRL_BASE_ADDR + 0xA000)
+#define REGULAR_PID_TBL_REG_ADDRESS_5 (HYDRA_TS_CTRL_BASE_ADDR + 0xA200)
+#define REGULAR_PID_TBL_REG_ADDRESS_6 (HYDRA_TS_CTRL_BASE_ADDR + 0xA400)
+#define REGULAR_PID_TBL_REG_ADDRESS_7 (HYDRA_TS_CTRL_BASE_ADDR + 0xA600)
+
+/***************************************************************************/
+
+#define PAD_MUX_GPIO_00_SYNC_BASEADDR 0x90000188
+
+
+#define PAD_MUX_UART_RX_C_PINMUX_BASEADDR 0x9000001C
+
+#define XPT_PACKET_GAP_MIN_BASEADDR 0x90700044
+#define XPT_NCO_COUNT_BASEADDR 0x90700238
+
+#define XPT_NCO_COUNT_BASEADDR1 0x9070023C
+
+/* V2 DigRF status register */
+
+#define XPT_PID_BASEADDR 0x90708000
+
+#define XPT_PID_REMAP_BASEADDR 0x90708004
+
+#define XPT_KNOWN_PID_BASEADDR 0x90709000
+
+#define XPT_PID_BASEADDR1 0x9070A000
+
+#define XPT_PID_REMAP_BASEADDR1 0x9070A004
+
+#define XPT_KNOWN_PID_BASEADDR1 0x9070B000
+
+#define XPT_BERT_LOCK_BASEADDR 0x907000B8
+
+#define XPT_BERT_BASEADDR 0x907000BC
+
+#define XPT_BERT_INVERT_BASEADDR 0x907000C0
+
+#define XPT_BERT_HEADER_BASEADDR 0x907000C4
+
+#define XPT_BERT_BASEADDR1 0x907000C8
+
+#define XPT_BERT_BIT_COUNT0_BASEADDR 0x907000CC
+
+#define XPT_BERT_BIT_COUNT0_BASEADDR1 0x907000D0
+
+#define XPT_BERT_BIT_COUNT1_BASEADDR 0x907000D4
+
+#define XPT_BERT_BIT_COUNT1_BASEADDR1 0x907000D8
+
+#define XPT_BERT_BIT_COUNT2_BASEADDR 0x907000DC
+
+#define XPT_BERT_BIT_COUNT2_BASEADDR1 0x907000E0
+
+#define XPT_BERT_BIT_COUNT3_BASEADDR 0x907000E4
+
+#define XPT_BERT_BIT_COUNT3_BASEADDR1 0x907000E8
+
+#define XPT_BERT_BIT_COUNT4_BASEADDR 0x907000EC
+
+#define XPT_BERT_BIT_COUNT4_BASEADDR1 0x907000F0
+
+#define XPT_BERT_BIT_COUNT5_BASEADDR 0x907000F4
+
+#define XPT_BERT_BIT_COUNT5_BASEADDR1 0x907000F8
+
+#define XPT_BERT_BIT_COUNT6_BASEADDR 0x907000FC
+
+#define XPT_BERT_BIT_COUNT6_BASEADDR1 0x90700100
+
+#define XPT_BERT_BIT_COUNT7_BASEADDR 0x90700104
+
+#define XPT_BERT_BIT_COUNT7_BASEADDR1 0x90700108
+
+#define XPT_BERT_ERR_COUNT0_BASEADDR 0x9070010C
+
+#define XPT_BERT_ERR_COUNT0_BASEADDR1 0x90700110
+
+#define XPT_BERT_ERR_COUNT1_BASEADDR 0x90700114
+
+#define XPT_BERT_ERR_COUNT1_BASEADDR1 0x90700118
+
+#define XPT_BERT_ERR_COUNT2_BASEADDR 0x9070011C
+
+#define XPT_BERT_ERR_COUNT2_BASEADDR1 0x90700120
+
+#define XPT_BERT_ERR_COUNT3_BASEADDR 0x90700124
+
+#define XPT_BERT_ERR_COUNT3_BASEADDR1 0x90700128
+
+#define XPT_BERT_ERR_COUNT4_BASEADDR 0x9070012C
+
+#define XPT_BERT_ERR_COUNT4_BASEADDR1 0x90700130
+
+#define XPT_BERT_ERR_COUNT5_BASEADDR 0x90700134
+
+#define XPT_BERT_ERR_COUNT5_BASEADDR1 0x90700138
+
+#define XPT_BERT_ERR_COUNT6_BASEADDR 0x9070013C
+
+#define XPT_BERT_ERR_COUNT6_BASEADDR1 0x90700140
+
+#define XPT_BERT_ERR_COUNT7_BASEADDR 0x90700144
+
+#define XPT_BERT_ERR_COUNT7_BASEADDR1 0x90700148
+
+#define XPT_BERT_ERROR_BASEADDR 0x9070014C
+
+#define XPT_BERT_ANALYZER_BASEADDR 0x90700150
+
+#define XPT_BERT_ANALYZER_BASEADDR1 0x90700154
+
+#define XPT_BERT_ANALYZER_BASEADDR2 0x90700158
+
+#define XPT_BERT_ANALYZER_BASEADDR3 0x9070015C
+
+#define XPT_BERT_ANALYZER_BASEADDR4 0x90700160
+
+#define XPT_BERT_ANALYZER_BASEADDR5 0x90700164
+
+#define XPT_BERT_ANALYZER_BASEADDR6 0x90700168
+
+#define XPT_BERT_ANALYZER_BASEADDR7 0x9070016C
+
+#define XPT_BERT_ANALYZER_BASEADDR8 0x90700170
+
+#define XPT_BERT_ANALYZER_BASEADDR9 0x90700174
+
+#define XPT_DMD0_BASEADDR 0x9070024C
+
+/* V2 AGC Gain Freeze & step */
+#define DBG_ENABLE_DISABLE_AGC (0x3FFFCF60) /* 1: DISABLE, 0:ENABLE */
+#define WB_DFE0_DFE_FB_RF1_BASEADDR 0x903004A4
+
+#define WB_DFE1_DFE_FB_RF1_BASEADDR 0x904004A4
+
+#define WB_DFE2_DFE_FB_RF1_BASEADDR 0x905004A4
+
+#define WB_DFE3_DFE_FB_RF1_BASEADDR 0x906004A4
+
+#define AFE_REG_D2A_TA_RFFE_LNA_BO_1P8_BASEADDR 0x90200104
+
+#define AFE_REG_AFE_REG_SPARE_BASEADDR 0x902000A0
+
+#define AFE_REG_AFE_REG_SPARE_BASEADDR1 0x902000B4
+
+#define AFE_REG_AFE_REG_SPARE_BASEADDR2 0x902000C4
+
+#define AFE_REG_AFE_REG_SPARE_BASEADDR3 0x902000D4
+
+#define WB_DFE0_DFE_FB_AGC_BASEADDR 0x90300498
+
+#define WB_DFE1_DFE_FB_AGC_BASEADDR 0x90400498
+
+#define WB_DFE2_DFE_FB_AGC_BASEADDR 0x90500498
+
+#define WB_DFE3_DFE_FB_AGC_BASEADDR 0x90600498
+
+#define WDT_WD_INT_BASEADDR 0x8002000C
+
+#define FSK_TX_FTM_BASEADDR 0x80090000
+
+#define FSK_TX_FTM_TX_CNT_BASEADDR 0x80090018
+
+#define AFE_REG_D2A_FSK_BIAS_BASEADDR 0x90200040
+
+#define DMD_TEI_BASEADDR 0x3FFFEBE0
+
+#endif /* __MXL58X_REGISTERS_H__ */
diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
index cba9bff05b12..fd427a29c001 100644
--- a/drivers/media/dvb-frontends/s5h1420.c
+++ b/drivers/media/dvb-frontends/s5h1420.c
@@ -864,7 +864,7 @@ static int s5h1420_tuner_i2c_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c
return i2c_transfer(state->i2c, m, 1 + num) == 1 + num ? num : -EIO;
}
-static struct i2c_algorithm s5h1420_tuner_i2c_algo = {
+static const struct i2c_algorithm s5h1420_tuner_i2c_algo = {
.master_xfer = s5h1420_tuner_i2c_tuner_xfer,
.functionality = s5h1420_tuner_i2c_func,
};
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index e726c2e00460..f3529df8211d 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -25,6 +25,8 @@
#include <linux/slab.h>
#include <linux/i2c.h>
+#include "dvb_math.h"
+
#include "stv0367.h"
#include "stv0367_defs.h"
#include "stv0367_regs.h"
@@ -1437,7 +1439,7 @@ static int stv0367ter_get_frontend(struct dvb_frontend *fe,
return 0;
}
-static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr)
+static u32 stv0367ter_snr_readreg(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
u32 snru32 = 0;
@@ -1453,10 +1455,16 @@ static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr)
cpt++;
}
-
snru32 /= 10;/*average on 10 values*/
- *snr = snru32 / 1000;
+ return snru32;
+}
+
+static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ u32 snrval = stv0367ter_snr_readreg(fe);
+
+ *snr = snrval / 1000;
return 0;
}
@@ -1501,7 +1509,8 @@ static int stv0367ter_read_status(struct dvb_frontend *fe,
*status = 0;
if (stv0367_readbits(state, F367TER_LK)) {
- *status |= FE_HAS_LOCK;
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI
+ | FE_HAS_SYNC | FE_HAS_LOCK;
dprintk("%s: stv0367 has locked\n", __func__);
}
@@ -2140,6 +2149,71 @@ static u32 stv0367cab_GetSymbolRate(struct stv0367_state *state, u32 mclk_hz)
return regsym;
}
+static u32 stv0367cab_fsm_status(struct stv0367_state *state)
+{
+ return stv0367_readbits(state, F367CAB_FSM_STATUS);
+}
+
+static u32 stv0367cab_qamfec_lock(struct stv0367_state *state)
+{
+ return stv0367_readbits(state,
+ (state->cab_state->qamfec_status_reg ?
+ state->cab_state->qamfec_status_reg :
+ F367CAB_QAMFEC_LOCK));
+}
+
+static
+enum stv0367_cab_signal_type stv0367cab_fsm_signaltype(u32 qam_fsm_status)
+{
+ enum stv0367_cab_signal_type signaltype = FE_CAB_NOAGC;
+
+ switch (qam_fsm_status) {
+ case 1:
+ signaltype = FE_CAB_NOAGC;
+ break;
+ case 2:
+ signaltype = FE_CAB_NOTIMING;
+ break;
+ case 3:
+ signaltype = FE_CAB_TIMINGOK;
+ break;
+ case 4:
+ signaltype = FE_CAB_NOCARRIER;
+ break;
+ case 5:
+ signaltype = FE_CAB_CARRIEROK;
+ break;
+ case 7:
+ signaltype = FE_CAB_NOBLIND;
+ break;
+ case 8:
+ signaltype = FE_CAB_BLINDOK;
+ break;
+ case 10:
+ signaltype = FE_CAB_NODEMOD;
+ break;
+ case 11:
+ signaltype = FE_CAB_DEMODOK;
+ break;
+ case 12:
+ signaltype = FE_CAB_DEMODOK;
+ break;
+ case 13:
+ signaltype = FE_CAB_NODEMOD;
+ break;
+ case 14:
+ signaltype = FE_CAB_NOBLIND;
+ break;
+ case 15:
+ signaltype = FE_CAB_NOSIGNAL;
+ break;
+ default:
+ break;
+ }
+
+ return signaltype;
+}
+
static int stv0367cab_read_status(struct dvb_frontend *fe,
enum fe_status *status)
{
@@ -2149,10 +2223,26 @@ static int stv0367cab_read_status(struct dvb_frontend *fe,
*status = 0;
- if (stv0367_readbits(state, (state->cab_state->qamfec_status_reg ?
- state->cab_state->qamfec_status_reg : F367CAB_QAMFEC_LOCK))) {
- *status |= FE_HAS_LOCK;
+ /* update cab_state->state from QAM_FSM_STATUS */
+ state->cab_state->state = stv0367cab_fsm_signaltype(
+ stv0367cab_fsm_status(state));
+
+ if (stv0367cab_qamfec_lock(state)) {
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI
+ | FE_HAS_SYNC | FE_HAS_LOCK;
dprintk("%s: stv0367 has locked\n", __func__);
+ } else {
+ if (state->cab_state->state > FE_CAB_NOSIGNAL)
+ *status |= FE_HAS_SIGNAL;
+
+ if (state->cab_state->state > FE_CAB_NOCARRIER)
+ *status |= FE_HAS_CARRIER;
+
+ if (state->cab_state->state >= FE_CAB_DEMODOK)
+ *status |= FE_HAS_VITERBI;
+
+ if (state->cab_state->state >= FE_CAB_DATAOK)
+ *status |= FE_HAS_SYNC;
}
return 0;
@@ -2353,7 +2443,7 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
LockTime = 0;
stv0367_writereg(state, R367CAB_CTRL_1, 0x00);
do {
- QAM_Lock = stv0367_readbits(state, F367CAB_FSM_STATUS);
+ QAM_Lock = stv0367cab_fsm_status(state);
if ((LockTime >= (DemodTimeOut - EQLTimeOut)) &&
(QAM_Lock == 0x04))
/*
@@ -2414,10 +2504,7 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
do {
usleep_range(5000, 7000);
LockTime += 5;
- QAMFEC_Lock = stv0367_readbits(state,
- (state->cab_state->qamfec_status_reg ?
- state->cab_state->qamfec_status_reg :
- F367CAB_QAMFEC_LOCK));
+ QAMFEC_Lock = stv0367cab_qamfec_lock(state);
} while (!QAMFEC_Lock && (LockTime < FECTimeOut));
} else
QAMFEC_Lock = 0;
@@ -2453,52 +2540,8 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
cab_state->locked = 1;
/* stv0367_setbits(state, F367CAB_AGC_ACCUMRSTSEL,7);*/
- } else {
- switch (QAM_Lock) {
- case 1:
- signalType = FE_CAB_NOAGC;
- break;
- case 2:
- signalType = FE_CAB_NOTIMING;
- break;
- case 3:
- signalType = FE_CAB_TIMINGOK;
- break;
- case 4:
- signalType = FE_CAB_NOCARRIER;
- break;
- case 5:
- signalType = FE_CAB_CARRIEROK;
- break;
- case 7:
- signalType = FE_CAB_NOBLIND;
- break;
- case 8:
- signalType = FE_CAB_BLINDOK;
- break;
- case 10:
- signalType = FE_CAB_NODEMOD;
- break;
- case 11:
- signalType = FE_CAB_DEMODOK;
- break;
- case 12:
- signalType = FE_CAB_DEMODOK;
- break;
- case 13:
- signalType = FE_CAB_NODEMOD;
- break;
- case 14:
- signalType = FE_CAB_NOBLIND;
- break;
- case 15:
- signalType = FE_CAB_NOSIGNAL;
- break;
- default:
- break;
- }
-
- }
+ } else
+ signalType = stv0367cab_fsm_signaltype(QAM_Lock);
/* Set the AGC control values to tracking values */
stv0367_writebits(state, F367CAB_AGC_ACCUMRSTSEL, TrackAGCAccum);
@@ -2702,51 +2745,61 @@ static int stv0367cab_read_strength(struct dvb_frontend *fe, u16 *strength)
return 0;
}
-static int stv0367cab_read_snr(struct dvb_frontend *fe, u16 *snr)
+static int stv0367cab_snr_power(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
- u32 noisepercentage;
enum stv0367cab_mod QAMSize;
- u32 regval = 0, temp = 0;
- int power, i;
QAMSize = stv0367_readbits(state, F367CAB_QAM_MODE);
switch (QAMSize) {
case FE_CAB_MOD_QAM4:
- power = 21904;
- break;
+ return 21904;
case FE_CAB_MOD_QAM16:
- power = 20480;
- break;
+ return 20480;
case FE_CAB_MOD_QAM32:
- power = 23040;
- break;
+ return 23040;
case FE_CAB_MOD_QAM64:
- power = 21504;
- break;
+ return 21504;
case FE_CAB_MOD_QAM128:
- power = 23616;
- break;
+ return 23616;
case FE_CAB_MOD_QAM256:
- power = 21760;
- break;
- case FE_CAB_MOD_QAM512:
- power = 1;
- break;
+ return 21760;
case FE_CAB_MOD_QAM1024:
- power = 21280;
- break;
+ return 21280;
default:
- power = 1;
break;
}
+ return 1;
+}
+
+static int stv0367cab_snr_readreg(struct dvb_frontend *fe, int avgdiv)
+{
+ struct stv0367_state *state = fe->demodulator_priv;
+ u32 regval = 0;
+ int i;
+
for (i = 0; i < 10; i++) {
regval += (stv0367_readbits(state, F367CAB_SNR_LO)
+ 256 * stv0367_readbits(state, F367CAB_SNR_HI));
}
- regval /= 10; /*for average over 10 times in for loop above*/
+ if (avgdiv)
+ regval /= 10;
+
+ return regval;
+}
+
+static int stv0367cab_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct stv0367_state *state = fe->demodulator_priv;
+ u32 noisepercentage;
+ u32 regval = 0, temp = 0;
+ int power;
+
+ power = stv0367cab_snr_power(fe);
+ regval = stv0367cab_snr_readreg(fe, 1);
+
if (regval != 0) {
temp = power
* (1 << (3 + stv0367_readbits(state, F367CAB_SNR_PER)));
@@ -2980,21 +3033,117 @@ static int stv0367ddb_set_frontend(struct dvb_frontend *fe)
return -EINVAL;
}
+static void stv0367ddb_read_signal_strength(struct dvb_frontend *fe)
+{
+ struct stv0367_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ s32 signalstrength;
+
+ switch (state->activedemod) {
+ case demod_cab:
+ signalstrength = stv0367cab_get_rf_lvl(state) * 1000;
+ break;
+ default:
+ p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ return;
+ }
+
+ p->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ p->strength.stat[0].uvalue = signalstrength;
+}
+
+static void stv0367ddb_read_snr(struct dvb_frontend *fe)
+{
+ struct stv0367_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ int cab_pwr;
+ u32 regval, tmpval, snrval = 0;
+
+ switch (state->activedemod) {
+ case demod_ter:
+ snrval = stv0367ter_snr_readreg(fe);
+ break;
+ case demod_cab:
+ cab_pwr = stv0367cab_snr_power(fe);
+ regval = stv0367cab_snr_readreg(fe, 0);
+
+ /* prevent division by zero */
+ if (!regval) {
+ snrval = 0;
+ break;
+ }
+
+ tmpval = (cab_pwr * 320) / regval;
+ snrval = ((tmpval != 0) ? (intlog2(tmpval) / 5581) : 0);
+ break;
+ default:
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ return;
+ }
+
+ p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ p->cnr.stat[0].uvalue = snrval;
+}
+
+static void stv0367ddb_read_ucblocks(struct dvb_frontend *fe)
+{
+ struct stv0367_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 ucblocks = 0;
+
+ switch (state->activedemod) {
+ case demod_ter:
+ stv0367ter_read_ucblocks(fe, &ucblocks);
+ break;
+ case demod_cab:
+ stv0367cab_read_ucblcks(fe, &ucblocks);
+ break;
+ default:
+ p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ return;
+ }
+
+ p->block_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->block_error.stat[0].uvalue = ucblocks;
+}
+
static int stv0367ddb_read_status(struct dvb_frontend *fe,
enum fe_status *status)
{
struct stv0367_state *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ int ret = 0;
switch (state->activedemod) {
case demod_ter:
- return stv0367ter_read_status(fe, status);
+ ret = stv0367ter_read_status(fe, status);
+ break;
case demod_cab:
- return stv0367cab_read_status(fe, status);
+ ret = stv0367cab_read_status(fe, status);
+ break;
default:
break;
}
- return -EINVAL;
+ /* stop and report on *_read_status failure */
+ if (ret)
+ return ret;
+
+ stv0367ddb_read_signal_strength(fe);
+
+ /* read carrier/noise when a carrier is detected */
+ if (*status & FE_HAS_CARRIER)
+ stv0367ddb_read_snr(fe);
+ else
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ /* read uncorrected blocks on FE_HAS_LOCK */
+ if (*status & FE_HAS_LOCK)
+ stv0367ddb_read_ucblocks(fe);
+ else
+ p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ return 0;
}
static int stv0367ddb_get_frontend(struct dvb_frontend *fe,
@@ -3011,7 +3160,7 @@ static int stv0367ddb_get_frontend(struct dvb_frontend *fe,
break;
}
- return -EINVAL;
+ return 0;
}
static int stv0367ddb_sleep(struct dvb_frontend *fe)
@@ -3035,6 +3184,7 @@ static int stv0367ddb_sleep(struct dvb_frontend *fe)
static int stv0367ddb_init(struct stv0367_state *state)
{
struct stv0367ter_state *ter_state = state->ter_state;
+ struct dtv_frontend_properties *p = &state->fe.dtv_property_cache;
stv0367_writereg(state, R367TER_TOPCTRL, 0x10);
@@ -3109,6 +3259,13 @@ static int stv0367ddb_init(struct stv0367_state *state)
ter_state->first_lock = 0;
ter_state->unlock_counter = 2;
+ p->strength.len = 1;
+ p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->cnr.len = 1;
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->block_error.len = 1;
+ p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
return 0;
}
@@ -3126,15 +3283,12 @@ static const struct dvb_frontend_ops stv0367ddb_ops = {
0x400 |/* FE_CAN_QAM_4 */
FE_CAN_QAM_16 | FE_CAN_QAM_32 |
FE_CAN_QAM_64 | FE_CAN_QAM_128 |
- FE_CAN_QAM_256 | FE_CAN_FEC_AUTO |
+ FE_CAN_QAM_256 |
/* DVB-T */
- FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
- FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
- FE_CAN_FEC_AUTO |
- FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
- FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_RECOVER |
- FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK | FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_RECOVER | FE_CAN_INVERSION_AUTO |
FE_CAN_MUTE_TS
},
.release = stv0367_release,
diff --git a/drivers/media/dvb-frontends/stv0910.c b/drivers/media/dvb-frontends/stv0910.c
new file mode 100644
index 000000000000..8bf855c301f5
--- /dev/null
+++ b/drivers/media/dvb-frontends/stv0910.c
@@ -0,0 +1,1813 @@
+/*
+ * Driver for the ST STV0910 DVB-S/S2 demodulator.
+ *
+ * Copyright (C) 2014-2015 Ralph Metzler <rjkm@metzlerbros.de>
+ * Marcus Metzler <mocm@metzlerbros.de>
+ * developed for Digital Devices GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <asm/div64.h>
+
+#include "dvb_math.h"
+#include "dvb_frontend.h"
+#include "stv0910.h"
+#include "stv0910_regs.h"
+
+#define EXT_CLOCK 30000000
+#define TUNING_DELAY 200
+#define BER_SRC_S 0x20
+#define BER_SRC_S2 0x20
+
+static LIST_HEAD(stvlist);
+
+enum receive_mode { RCVMODE_NONE, RCVMODE_DVBS, RCVMODE_DVBS2, RCVMODE_AUTO };
+
+enum dvbs2_fectype { DVBS2_64K, DVBS2_16K };
+
+enum dvbs2_mod_cod {
+ DVBS2_DUMMY_PLF, DVBS2_QPSK_1_4, DVBS2_QPSK_1_3, DVBS2_QPSK_2_5,
+ DVBS2_QPSK_1_2, DVBS2_QPSK_3_5, DVBS2_QPSK_2_3, DVBS2_QPSK_3_4,
+ DVBS2_QPSK_4_5, DVBS2_QPSK_5_6, DVBS2_QPSK_8_9, DVBS2_QPSK_9_10,
+ DVBS2_8PSK_3_5, DVBS2_8PSK_2_3, DVBS2_8PSK_3_4, DVBS2_8PSK_5_6,
+ DVBS2_8PSK_8_9, DVBS2_8PSK_9_10, DVBS2_16APSK_2_3, DVBS2_16APSK_3_4,
+ DVBS2_16APSK_4_5, DVBS2_16APSK_5_6, DVBS2_16APSK_8_9, DVBS2_16APSK_9_10,
+ DVBS2_32APSK_3_4, DVBS2_32APSK_4_5, DVBS2_32APSK_5_6, DVBS2_32APSK_8_9,
+ DVBS2_32APSK_9_10
+};
+
+enum fe_stv0910_mod_cod {
+ FE_DUMMY_PLF, FE_QPSK_14, FE_QPSK_13, FE_QPSK_25,
+ FE_QPSK_12, FE_QPSK_35, FE_QPSK_23, FE_QPSK_34,
+ FE_QPSK_45, FE_QPSK_56, FE_QPSK_89, FE_QPSK_910,
+ FE_8PSK_35, FE_8PSK_23, FE_8PSK_34, FE_8PSK_56,
+ FE_8PSK_89, FE_8PSK_910, FE_16APSK_23, FE_16APSK_34,
+ FE_16APSK_45, FE_16APSK_56, FE_16APSK_89, FE_16APSK_910,
+ FE_32APSK_34, FE_32APSK_45, FE_32APSK_56, FE_32APSK_89,
+ FE_32APSK_910
+};
+
+enum fe_stv0910_roll_off { FE_SAT_35, FE_SAT_25, FE_SAT_20, FE_SAT_15 };
+
+static inline u32 muldiv32(u32 a, u32 b, u32 c)
+{
+ u64 tmp64;
+
+ tmp64 = (u64)a * (u64)b;
+ do_div(tmp64, c);
+
+ return (u32)tmp64;
+}
+
+struct stv_base {
+ struct list_head stvlist;
+
+ u8 adr;
+ struct i2c_adapter *i2c;
+ struct mutex i2c_lock; /* shared I2C access protect */
+ struct mutex reg_lock; /* shared register write protect */
+ int count;
+
+ u32 extclk;
+ u32 mclk;
+};
+
+struct stv {
+ struct stv_base *base;
+ struct dvb_frontend fe;
+ int nr;
+ u16 regoff;
+ u8 i2crpt;
+ u8 tscfgh;
+ u8 tsgeneral;
+ u8 tsspeed;
+ u8 single;
+ unsigned long tune_time;
+
+ s32 search_range;
+ u32 started;
+ u32 demod_lock_time;
+ enum receive_mode receive_mode;
+ u32 demod_timeout;
+ u32 fec_timeout;
+ u32 first_time_lock;
+ u8 demod_bits;
+ u32 symbol_rate;
+
+ u8 last_viterbi_rate;
+ enum fe_code_rate puncture_rate;
+ enum fe_stv0910_mod_cod mod_cod;
+ enum dvbs2_fectype fectype;
+ u32 pilots;
+ enum fe_stv0910_roll_off feroll_off;
+
+ int is_standard_broadcast;
+ int is_vcm;
+
+ u32 cur_scrambling_code;
+
+ u32 last_bernumerator;
+ u32 last_berdenominator;
+ u8 berscale;
+
+ u8 vth[6];
+};
+
+struct sinit_table {
+ u16 address;
+ u8 data;
+};
+
+struct slookup {
+ s16 value;
+ u32 reg_value;
+};
+
+static inline int i2c_write(struct i2c_adapter *adap, u8 adr,
+ u8 *data, int len)
+{
+ struct i2c_msg msg = {.addr = adr, .flags = 0,
+ .buf = data, .len = len};
+
+ if (i2c_transfer(adap, &msg, 1) != 1) {
+ dev_warn(&adap->dev, "i2c write error ([%02x] %04x: %02x)\n",
+ adr, (data[0] << 8) | data[1],
+ (len > 2 ? data[2] : 0));
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int i2c_write_reg16(struct i2c_adapter *adap, u8 adr, u16 reg, u8 val)
+{
+ u8 msg[3] = {reg >> 8, reg & 0xff, val};
+
+ return i2c_write(adap, adr, msg, 3);
+}
+
+static int write_reg(struct stv *state, u16 reg, u8 val)
+{
+ return i2c_write_reg16(state->base->i2c, state->base->adr, reg, val);
+}
+
+static inline int i2c_read_regs16(struct i2c_adapter *adapter, u8 adr,
+ u16 reg, u8 *val, int count)
+{
+ u8 msg[2] = {reg >> 8, reg & 0xff};
+ struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
+ .buf = msg, .len = 2},
+ {.addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = count } };
+
+ if (i2c_transfer(adapter, msgs, 2) != 2) {
+ dev_warn(&adapter->dev, "i2c read error ([%02x] %04x)\n",
+ adr, reg);
+ return -EREMOTEIO;
+ }
+ return 0;
+}
+
+static int read_reg(struct stv *state, u16 reg, u8 *val)
+{
+ return i2c_read_regs16(state->base->i2c, state->base->adr,
+ reg, val, 1);
+}
+
+static int read_regs(struct stv *state, u16 reg, u8 *val, int len)
+{
+ return i2c_read_regs16(state->base->i2c, state->base->adr,
+ reg, val, len);
+}
+
+static int write_shared_reg(struct stv *state, u16 reg, u8 mask, u8 val)
+{
+ int status;
+ u8 tmp;
+
+ mutex_lock(&state->base->reg_lock);
+ status = read_reg(state, reg, &tmp);
+ if (!status)
+ status = write_reg(state, reg, (tmp & ~mask) | (val & mask));
+ mutex_unlock(&state->base->reg_lock);
+ return status;
+}
+
+static const struct slookup s1_sn_lookup[] = {
+ { 0, 9242 }, /* C/N= 0dB */
+ { 5, 9105 }, /* C/N= 0.5dB */
+ { 10, 8950 }, /* C/N= 1.0dB */
+ { 15, 8780 }, /* C/N= 1.5dB */
+ { 20, 8566 }, /* C/N= 2.0dB */
+ { 25, 8366 }, /* C/N= 2.5dB */
+ { 30, 8146 }, /* C/N= 3.0dB */
+ { 35, 7908 }, /* C/N= 3.5dB */
+ { 40, 7666 }, /* C/N= 4.0dB */
+ { 45, 7405 }, /* C/N= 4.5dB */
+ { 50, 7136 }, /* C/N= 5.0dB */
+ { 55, 6861 }, /* C/N= 5.5dB */
+ { 60, 6576 }, /* C/N= 6.0dB */
+ { 65, 6330 }, /* C/N= 6.5dB */
+ { 70, 6048 }, /* C/N= 7.0dB */
+ { 75, 5768 }, /* C/N= 7.5dB */
+ { 80, 5492 }, /* C/N= 8.0dB */
+ { 85, 5224 }, /* C/N= 8.5dB */
+ { 90, 4959 }, /* C/N= 9.0dB */
+ { 95, 4709 }, /* C/N= 9.5dB */
+ { 100, 4467 }, /* C/N=10.0dB */
+ { 105, 4236 }, /* C/N=10.5dB */
+ { 110, 4013 }, /* C/N=11.0dB */
+ { 115, 3800 }, /* C/N=11.5dB */
+ { 120, 3598 }, /* C/N=12.0dB */
+ { 125, 3406 }, /* C/N=12.5dB */
+ { 130, 3225 }, /* C/N=13.0dB */
+ { 135, 3052 }, /* C/N=13.5dB */
+ { 140, 2889 }, /* C/N=14.0dB */
+ { 145, 2733 }, /* C/N=14.5dB */
+ { 150, 2587 }, /* C/N=15.0dB */
+ { 160, 2318 }, /* C/N=16.0dB */
+ { 170, 2077 }, /* C/N=17.0dB */
+ { 180, 1862 }, /* C/N=18.0dB */
+ { 190, 1670 }, /* C/N=19.0dB */
+ { 200, 1499 }, /* C/N=20.0dB */
+ { 210, 1347 }, /* C/N=21.0dB */
+ { 220, 1213 }, /* C/N=22.0dB */
+ { 230, 1095 }, /* C/N=23.0dB */
+ { 240, 992 }, /* C/N=24.0dB */
+ { 250, 900 }, /* C/N=25.0dB */
+ { 260, 826 }, /* C/N=26.0dB */
+ { 270, 758 }, /* C/N=27.0dB */
+ { 280, 702 }, /* C/N=28.0dB */
+ { 290, 653 }, /* C/N=29.0dB */
+ { 300, 613 }, /* C/N=30.0dB */
+ { 310, 579 }, /* C/N=31.0dB */
+ { 320, 550 }, /* C/N=32.0dB */
+ { 330, 526 }, /* C/N=33.0dB */
+ { 350, 490 }, /* C/N=33.0dB */
+ { 400, 445 }, /* C/N=40.0dB */
+ { 450, 430 }, /* C/N=45.0dB */
+ { 500, 426 }, /* C/N=50.0dB */
+ { 510, 425 } /* C/N=51.0dB */
+};
+
+static const struct slookup s2_sn_lookup[] = {
+ { -30, 13950 }, /* C/N=-2.5dB */
+ { -25, 13580 }, /* C/N=-2.5dB */
+ { -20, 13150 }, /* C/N=-2.0dB */
+ { -15, 12760 }, /* C/N=-1.5dB */
+ { -10, 12345 }, /* C/N=-1.0dB */
+ { -5, 11900 }, /* C/N=-0.5dB */
+ { 0, 11520 }, /* C/N= 0dB */
+ { 5, 11080 }, /* C/N= 0.5dB */
+ { 10, 10630 }, /* C/N= 1.0dB */
+ { 15, 10210 }, /* C/N= 1.5dB */
+ { 20, 9790 }, /* C/N= 2.0dB */
+ { 25, 9390 }, /* C/N= 2.5dB */
+ { 30, 8970 }, /* C/N= 3.0dB */
+ { 35, 8575 }, /* C/N= 3.5dB */
+ { 40, 8180 }, /* C/N= 4.0dB */
+ { 45, 7800 }, /* C/N= 4.5dB */
+ { 50, 7430 }, /* C/N= 5.0dB */
+ { 55, 7080 }, /* C/N= 5.5dB */
+ { 60, 6720 }, /* C/N= 6.0dB */
+ { 65, 6320 }, /* C/N= 6.5dB */
+ { 70, 6060 }, /* C/N= 7.0dB */
+ { 75, 5760 }, /* C/N= 7.5dB */
+ { 80, 5480 }, /* C/N= 8.0dB */
+ { 85, 5200 }, /* C/N= 8.5dB */
+ { 90, 4930 }, /* C/N= 9.0dB */
+ { 95, 4680 }, /* C/N= 9.5dB */
+ { 100, 4425 }, /* C/N=10.0dB */
+ { 105, 4210 }, /* C/N=10.5dB */
+ { 110, 3980 }, /* C/N=11.0dB */
+ { 115, 3765 }, /* C/N=11.5dB */
+ { 120, 3570 }, /* C/N=12.0dB */
+ { 125, 3315 }, /* C/N=12.5dB */
+ { 130, 3140 }, /* C/N=13.0dB */
+ { 135, 2980 }, /* C/N=13.5dB */
+ { 140, 2820 }, /* C/N=14.0dB */
+ { 145, 2670 }, /* C/N=14.5dB */
+ { 150, 2535 }, /* C/N=15.0dB */
+ { 160, 2270 }, /* C/N=16.0dB */
+ { 170, 2035 }, /* C/N=17.0dB */
+ { 180, 1825 }, /* C/N=18.0dB */
+ { 190, 1650 }, /* C/N=19.0dB */
+ { 200, 1485 }, /* C/N=20.0dB */
+ { 210, 1340 }, /* C/N=21.0dB */
+ { 220, 1212 }, /* C/N=22.0dB */
+ { 230, 1100 }, /* C/N=23.0dB */
+ { 240, 1000 }, /* C/N=24.0dB */
+ { 250, 910 }, /* C/N=25.0dB */
+ { 260, 836 }, /* C/N=26.0dB */
+ { 270, 772 }, /* C/N=27.0dB */
+ { 280, 718 }, /* C/N=28.0dB */
+ { 290, 671 }, /* C/N=29.0dB */
+ { 300, 635 }, /* C/N=30.0dB */
+ { 310, 602 }, /* C/N=31.0dB */
+ { 320, 575 }, /* C/N=32.0dB */
+ { 330, 550 }, /* C/N=33.0dB */
+ { 350, 517 }, /* C/N=35.0dB */
+ { 400, 480 }, /* C/N=40.0dB */
+ { 450, 466 }, /* C/N=45.0dB */
+ { 500, 464 }, /* C/N=50.0dB */
+ { 510, 463 }, /* C/N=51.0dB */
+};
+
+static const struct slookup padc_lookup[] = {
+ { 0, 118000 }, /* PADC= +0dBm */
+ { -100, 93600 }, /* PADC= -1dBm */
+ { -200, 74500 }, /* PADC= -2dBm */
+ { -300, 59100 }, /* PADC= -3dBm */
+ { -400, 47000 }, /* PADC= -4dBm */
+ { -500, 37300 }, /* PADC= -5dBm */
+ { -600, 29650 }, /* PADC= -6dBm */
+ { -700, 23520 }, /* PADC= -7dBm */
+ { -900, 14850 }, /* PADC= -9dBm */
+ { -1100, 9380 }, /* PADC=-11dBm */
+ { -1300, 5910 }, /* PADC=-13dBm */
+ { -1500, 3730 }, /* PADC=-15dBm */
+ { -1700, 2354 }, /* PADC=-17dBm */
+ { -1900, 1485 }, /* PADC=-19dBm */
+ { -2000, 1179 }, /* PADC=-20dBm */
+ { -2100, 1000 }, /* PADC=-21dBm */
+};
+
+/*********************************************************************
+ * Tracking carrier loop carrier QPSK 1/4 to 8PSK 9/10 long Frame
+ *********************************************************************/
+static const u8 s2car_loop[] = {
+ /*
+ * Modcod 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff
+ * 20MPon 20MPoff 30MPon 30MPoff
+ */
+
+ /* FE_QPSK_14 */
+ 0x0C, 0x3C, 0x0B, 0x3C, 0x2A, 0x2C, 0x2A, 0x1C, 0x3A, 0x3B,
+ /* FE_QPSK_13 */
+ 0x0C, 0x3C, 0x0B, 0x3C, 0x2A, 0x2C, 0x3A, 0x0C, 0x3A, 0x2B,
+ /* FE_QPSK_25 */
+ 0x1C, 0x3C, 0x1B, 0x3C, 0x3A, 0x1C, 0x3A, 0x3B, 0x3A, 0x2B,
+ /* FE_QPSK_12 */
+ 0x0C, 0x1C, 0x2B, 0x1C, 0x0B, 0x2C, 0x0B, 0x0C, 0x2A, 0x2B,
+ /* FE_QPSK_35 */
+ 0x1C, 0x1C, 0x2B, 0x1C, 0x0B, 0x2C, 0x0B, 0x0C, 0x2A, 0x2B,
+ /* FE_QPSK_23 */
+ 0x2C, 0x2C, 0x2B, 0x1C, 0x0B, 0x2C, 0x0B, 0x0C, 0x2A, 0x2B,
+ /* FE_QPSK_34 */
+ 0x3C, 0x2C, 0x3B, 0x2C, 0x1B, 0x1C, 0x1B, 0x3B, 0x3A, 0x1B,
+ /* FE_QPSK_45 */
+ 0x0D, 0x3C, 0x3B, 0x2C, 0x1B, 0x1C, 0x1B, 0x3B, 0x3A, 0x1B,
+ /* FE_QPSK_56 */
+ 0x1D, 0x3C, 0x0C, 0x2C, 0x2B, 0x1C, 0x1B, 0x3B, 0x0B, 0x1B,
+ /* FE_QPSK_89 */
+ 0x3D, 0x0D, 0x0C, 0x2C, 0x2B, 0x0C, 0x2B, 0x2B, 0x0B, 0x0B,
+ /* FE_QPSK_910 */
+ 0x1E, 0x0D, 0x1C, 0x2C, 0x3B, 0x0C, 0x2B, 0x2B, 0x1B, 0x0B,
+ /* FE_8PSK_35 */
+ 0x28, 0x09, 0x28, 0x09, 0x28, 0x09, 0x28, 0x08, 0x28, 0x27,
+ /* FE_8PSK_23 */
+ 0x19, 0x29, 0x19, 0x29, 0x19, 0x29, 0x38, 0x19, 0x28, 0x09,
+ /* FE_8PSK_34 */
+ 0x1A, 0x0B, 0x1A, 0x3A, 0x0A, 0x2A, 0x39, 0x2A, 0x39, 0x1A,
+ /* FE_8PSK_56 */
+ 0x2B, 0x2B, 0x1B, 0x1B, 0x0B, 0x1B, 0x1A, 0x0B, 0x1A, 0x1A,
+ /* FE_8PSK_89 */
+ 0x0C, 0x0C, 0x3B, 0x3B, 0x1B, 0x1B, 0x2A, 0x0B, 0x2A, 0x2A,
+ /* FE_8PSK_910 */
+ 0x0C, 0x1C, 0x0C, 0x3B, 0x2B, 0x1B, 0x3A, 0x0B, 0x2A, 0x2A,
+
+ /**********************************************************************
+ * Tracking carrier loop carrier 16APSK 2/3 to 32APSK 9/10 long Frame
+ **********************************************************************/
+
+ /*
+ * Modcod 2MPon 2MPoff 5MPon 5MPoff 10MPon 10MPoff 20MPon
+ * 20MPoff 30MPon 30MPoff
+ */
+
+ /* FE_16APSK_23 */
+ 0x0A, 0x0A, 0x0A, 0x0A, 0x1A, 0x0A, 0x39, 0x0A, 0x29, 0x0A,
+ /* FE_16APSK_34 */
+ 0x0A, 0x0A, 0x0A, 0x0A, 0x0B, 0x0A, 0x2A, 0x0A, 0x1A, 0x0A,
+ /* FE_16APSK_45 */
+ 0x0A, 0x0A, 0x0A, 0x0A, 0x1B, 0x0A, 0x3A, 0x0A, 0x2A, 0x0A,
+ /* FE_16APSK_56 */
+ 0x0A, 0x0A, 0x0A, 0x0A, 0x1B, 0x0A, 0x3A, 0x0A, 0x2A, 0x0A,
+ /* FE_16APSK_89 */
+ 0x0A, 0x0A, 0x0A, 0x0A, 0x2B, 0x0A, 0x0B, 0x0A, 0x3A, 0x0A,
+ /* FE_16APSK_910 */
+ 0x0A, 0x0A, 0x0A, 0x0A, 0x2B, 0x0A, 0x0B, 0x0A, 0x3A, 0x0A,
+ /* FE_32APSK_34 */
+ 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,
+ /* FE_32APSK_45 */
+ 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,
+ /* FE_32APSK_56 */
+ 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,
+ /* FE_32APSK_89 */
+ 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,
+ /* FE_32APSK_910 */
+ 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,
+};
+
+static u8 get_optim_cloop(struct stv *state,
+ enum fe_stv0910_mod_cod mod_cod, u32 pilots)
+{
+ int i = 0;
+
+ if (mod_cod >= FE_32APSK_910)
+ i = ((int)FE_32APSK_910 - (int)FE_QPSK_14) * 10;
+ else if (mod_cod >= FE_QPSK_14)
+ i = ((int)mod_cod - (int)FE_QPSK_14) * 10;
+
+ if (state->symbol_rate <= 3000000)
+ i += 0;
+ else if (state->symbol_rate <= 7000000)
+ i += 2;
+ else if (state->symbol_rate <= 15000000)
+ i += 4;
+ else if (state->symbol_rate <= 25000000)
+ i += 6;
+ else
+ i += 8;
+
+ if (!pilots)
+ i += 1;
+
+ return s2car_loop[i];
+}
+
+static int get_cur_symbol_rate(struct stv *state, u32 *p_symbol_rate)
+{
+ int status = 0;
+ u8 symb_freq0;
+ u8 symb_freq1;
+ u8 symb_freq2;
+ u8 symb_freq3;
+ u8 tim_offs0;
+ u8 tim_offs1;
+ u8 tim_offs2;
+ u32 symbol_rate;
+ s32 timing_offset;
+
+ *p_symbol_rate = 0;
+ if (!state->started)
+ return status;
+
+ read_reg(state, RSTV0910_P2_SFR3 + state->regoff, &symb_freq3);
+ read_reg(state, RSTV0910_P2_SFR2 + state->regoff, &symb_freq2);
+ read_reg(state, RSTV0910_P2_SFR1 + state->regoff, &symb_freq1);
+ read_reg(state, RSTV0910_P2_SFR0 + state->regoff, &symb_freq0);
+ read_reg(state, RSTV0910_P2_TMGREG2 + state->regoff, &tim_offs2);
+ read_reg(state, RSTV0910_P2_TMGREG1 + state->regoff, &tim_offs1);
+ read_reg(state, RSTV0910_P2_TMGREG0 + state->regoff, &tim_offs0);
+
+ symbol_rate = ((u32)symb_freq3 << 24) | ((u32)symb_freq2 << 16) |
+ ((u32)symb_freq1 << 8) | (u32)symb_freq0;
+ timing_offset = ((u32)tim_offs2 << 16) | ((u32)tim_offs1 << 8) |
+ (u32)tim_offs0;
+
+ if ((timing_offset & (1 << 23)) != 0)
+ timing_offset |= 0xFF000000; /* Sign extent */
+
+ symbol_rate = (u32)(((u64)symbol_rate * state->base->mclk) >> 32);
+ timing_offset = (s32)(((s64)symbol_rate * (s64)timing_offset) >> 29);
+
+ *p_symbol_rate = symbol_rate + timing_offset;
+
+ return 0;
+}
+
+static int get_signal_parameters(struct stv *state)
+{
+ u8 tmp;
+
+ if (!state->started)
+ return -EINVAL;
+
+ if (state->receive_mode == RCVMODE_DVBS2) {
+ read_reg(state, RSTV0910_P2_DMDMODCOD + state->regoff, &tmp);
+ state->mod_cod = (enum fe_stv0910_mod_cod)((tmp & 0x7c) >> 2);
+ state->pilots = (tmp & 0x01) != 0;
+ state->fectype = (enum dvbs2_fectype)((tmp & 0x02) >> 1);
+
+ } else if (state->receive_mode == RCVMODE_DVBS) {
+ read_reg(state, RSTV0910_P2_VITCURPUN + state->regoff, &tmp);
+ state->puncture_rate = FEC_NONE;
+ switch (tmp & 0x1F) {
+ case 0x0d:
+ state->puncture_rate = FEC_1_2;
+ break;
+ case 0x12:
+ state->puncture_rate = FEC_2_3;
+ break;
+ case 0x15:
+ state->puncture_rate = FEC_3_4;
+ break;
+ case 0x18:
+ state->puncture_rate = FEC_5_6;
+ break;
+ case 0x1a:
+ state->puncture_rate = FEC_7_8;
+ break;
+ }
+ state->is_vcm = 0;
+ state->is_standard_broadcast = 1;
+ state->feroll_off = FE_SAT_35;
+ }
+ return 0;
+}
+
+static int tracking_optimization(struct stv *state)
+{
+ u32 symbol_rate = 0;
+ u8 tmp;
+
+ get_cur_symbol_rate(state, &symbol_rate);
+ read_reg(state, RSTV0910_P2_DMDCFGMD + state->regoff, &tmp);
+ tmp &= ~0xC0;
+
+ switch (state->receive_mode) {
+ case RCVMODE_DVBS:
+ tmp |= 0x40;
+ break;
+ case RCVMODE_DVBS2:
+ tmp |= 0x80;
+ break;
+ default:
+ tmp |= 0xC0;
+ break;
+ }
+ write_reg(state, RSTV0910_P2_DMDCFGMD + state->regoff, tmp);
+
+ if (state->receive_mode == RCVMODE_DVBS2) {
+ /* Disable Reed-Solomon */
+ write_shared_reg(state,
+ RSTV0910_TSTTSRS, state->nr ? 0x02 : 0x01,
+ 0x03);
+
+ if (state->fectype == DVBS2_64K) {
+ u8 aclc = get_optim_cloop(state, state->mod_cod,
+ state->pilots);
+
+ if (state->mod_cod <= FE_QPSK_910) {
+ write_reg(state, RSTV0910_P2_ACLC2S2Q +
+ state->regoff, aclc);
+ } else if (state->mod_cod <= FE_8PSK_910) {
+ write_reg(state, RSTV0910_P2_ACLC2S2Q +
+ state->regoff, 0x2a);
+ write_reg(state, RSTV0910_P2_ACLC2S28 +
+ state->regoff, aclc);
+ } else if (state->mod_cod <= FE_16APSK_910) {
+ write_reg(state, RSTV0910_P2_ACLC2S2Q +
+ state->regoff, 0x2a);
+ write_reg(state, RSTV0910_P2_ACLC2S216A +
+ state->regoff, aclc);
+ } else if (state->mod_cod <= FE_32APSK_910) {
+ write_reg(state, RSTV0910_P2_ACLC2S2Q +
+ state->regoff, 0x2a);
+ write_reg(state, RSTV0910_P2_ACLC2S232A +
+ state->regoff, aclc);
+ }
+ }
+ }
+ return 0;
+}
+
+static s32 table_lookup(const struct slookup *table,
+ int table_size, u32 reg_value)
+{
+ s32 value;
+ int imin = 0;
+ int imax = table_size - 1;
+ int i;
+ s32 reg_diff;
+
+ /* Assumes Table[0].RegValue > Table[imax].RegValue */
+ if (reg_value >= table[0].reg_value) {
+ value = table[0].value;
+ } else if (reg_value <= table[imax].reg_value) {
+ value = table[imax].value;
+ } else {
+ while ((imax - imin) > 1) {
+ i = (imax + imin) / 2;
+ if ((table[imin].reg_value >= reg_value) &&
+ (reg_value >= table[i].reg_value))
+ imax = i;
+ else
+ imin = i;
+ }
+
+ reg_diff = table[imax].reg_value - table[imin].reg_value;
+ value = table[imin].value;
+ if (reg_diff != 0)
+ value += ((s32)(reg_value - table[imin].reg_value) *
+ (s32)(table[imax].value
+ - table[imin].value))
+ / (reg_diff);
+ }
+
+ return value;
+}
+
+static int get_signal_to_noise(struct stv *state, s32 *signal_to_noise)
+{
+ u8 data0;
+ u8 data1;
+ u16 data;
+ int n_lookup;
+ const struct slookup *lookup;
+
+ *signal_to_noise = 0;
+
+ if (!state->started)
+ return -EINVAL;
+
+ if (state->receive_mode == RCVMODE_DVBS2) {
+ read_reg(state, RSTV0910_P2_NNOSPLHT1 + state->regoff,
+ &data1);
+ read_reg(state, RSTV0910_P2_NNOSPLHT0 + state->regoff,
+ &data0);
+ n_lookup = ARRAY_SIZE(s2_sn_lookup);
+ lookup = s2_sn_lookup;
+ } else {
+ read_reg(state, RSTV0910_P2_NNOSDATAT1 + state->regoff,
+ &data1);
+ read_reg(state, RSTV0910_P2_NNOSDATAT0 + state->regoff,
+ &data0);
+ n_lookup = ARRAY_SIZE(s1_sn_lookup);
+ lookup = s1_sn_lookup;
+ }
+ data = (((u16)data1) << 8) | (u16)data0;
+ *signal_to_noise = table_lookup(lookup, n_lookup, data);
+ return 0;
+}
+
+static int get_bit_error_rate_s(struct stv *state, u32 *bernumerator,
+ u32 *berdenominator)
+{
+ u8 regs[3];
+
+ int status = read_regs(state,
+ RSTV0910_P2_ERRCNT12 + state->regoff,
+ regs, 3);
+
+ if (status)
+ return -EINVAL;
+
+ if ((regs[0] & 0x80) == 0) {
+ state->last_berdenominator = 1 << ((state->berscale * 2) +
+ 10 + 3);
+ state->last_bernumerator = ((u32)(regs[0] & 0x7F) << 16) |
+ ((u32)regs[1] << 8) | regs[2];
+ if (state->last_bernumerator < 256 && state->berscale < 6) {
+ state->berscale += 1;
+ status = write_reg(state, RSTV0910_P2_ERRCTRL1 +
+ state->regoff,
+ 0x20 | state->berscale);
+ } else if (state->last_bernumerator > 1024 &&
+ state->berscale > 2) {
+ state->berscale -= 1;
+ status = write_reg(state, RSTV0910_P2_ERRCTRL1 +
+ state->regoff, 0x20 |
+ state->berscale);
+ }
+ }
+ *bernumerator = state->last_bernumerator;
+ *berdenominator = state->last_berdenominator;
+ return 0;
+}
+
+static u32 dvbs2_nbch(enum dvbs2_mod_cod mod_cod, enum dvbs2_fectype fectype)
+{
+ static const u32 nbch[][2] = {
+ { 0, 0}, /* DUMMY_PLF */
+ {16200, 3240}, /* QPSK_1_4, */
+ {21600, 5400}, /* QPSK_1_3, */
+ {25920, 6480}, /* QPSK_2_5, */
+ {32400, 7200}, /* QPSK_1_2, */
+ {38880, 9720}, /* QPSK_3_5, */
+ {43200, 10800}, /* QPSK_2_3, */
+ {48600, 11880}, /* QPSK_3_4, */
+ {51840, 12600}, /* QPSK_4_5, */
+ {54000, 13320}, /* QPSK_5_6, */
+ {57600, 14400}, /* QPSK_8_9, */
+ {58320, 16000}, /* QPSK_9_10, */
+ {43200, 9720}, /* 8PSK_3_5, */
+ {48600, 10800}, /* 8PSK_2_3, */
+ {51840, 11880}, /* 8PSK_3_4, */
+ {54000, 13320}, /* 8PSK_5_6, */
+ {57600, 14400}, /* 8PSK_8_9, */
+ {58320, 16000}, /* 8PSK_9_10, */
+ {43200, 10800}, /* 16APSK_2_3, */
+ {48600, 11880}, /* 16APSK_3_4, */
+ {51840, 12600}, /* 16APSK_4_5, */
+ {54000, 13320}, /* 16APSK_5_6, */
+ {57600, 14400}, /* 16APSK_8_9, */
+ {58320, 16000}, /* 16APSK_9_10 */
+ {48600, 11880}, /* 32APSK_3_4, */
+ {51840, 12600}, /* 32APSK_4_5, */
+ {54000, 13320}, /* 32APSK_5_6, */
+ {57600, 14400}, /* 32APSK_8_9, */
+ {58320, 16000}, /* 32APSK_9_10 */
+ };
+
+ if (mod_cod >= DVBS2_QPSK_1_4 &&
+ mod_cod <= DVBS2_32APSK_9_10 && fectype <= DVBS2_16K)
+ return nbch[mod_cod][fectype];
+ return 64800;
+}
+
+static int get_bit_error_rate_s2(struct stv *state, u32 *bernumerator,
+ u32 *berdenominator)
+{
+ u8 regs[3];
+
+ int status = read_regs(state, RSTV0910_P2_ERRCNT12 + state->regoff,
+ regs, 3);
+
+ if (status)
+ return -EINVAL;
+
+ if ((regs[0] & 0x80) == 0) {
+ state->last_berdenominator =
+ dvbs2_nbch((enum dvbs2_mod_cod)state->mod_cod,
+ state->fectype) <<
+ (state->berscale * 2);
+ state->last_bernumerator = (((u32)regs[0] & 0x7F) << 16) |
+ ((u32)regs[1] << 8) | regs[2];
+ if (state->last_bernumerator < 256 && state->berscale < 6) {
+ state->berscale += 1;
+ write_reg(state, RSTV0910_P2_ERRCTRL1 + state->regoff,
+ 0x20 | state->berscale);
+ } else if (state->last_bernumerator > 1024 &&
+ state->berscale > 2) {
+ state->berscale -= 1;
+ write_reg(state, RSTV0910_P2_ERRCTRL1 + state->regoff,
+ 0x20 | state->berscale);
+ }
+ }
+ *bernumerator = state->last_bernumerator;
+ *berdenominator = state->last_berdenominator;
+ return status;
+}
+
+static int get_bit_error_rate(struct stv *state, u32 *bernumerator,
+ u32 *berdenominator)
+{
+ *bernumerator = 0;
+ *berdenominator = 1;
+
+ switch (state->receive_mode) {
+ case RCVMODE_DVBS:
+ return get_bit_error_rate_s(state,
+ bernumerator, berdenominator);
+ case RCVMODE_DVBS2:
+ return get_bit_error_rate_s2(state,
+ bernumerator, berdenominator);
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int set_mclock(struct stv *state, u32 master_clock)
+{
+ u32 idf = 1;
+ u32 odf = 4;
+ u32 quartz = state->base->extclk / 1000000;
+ u32 fphi = master_clock / 1000000;
+ u32 ndiv = (fphi * odf * idf) / quartz;
+ u32 cp = 7;
+ u32 fvco;
+
+ if (ndiv >= 7 && ndiv <= 71)
+ cp = 7;
+ else if (ndiv >= 72 && ndiv <= 79)
+ cp = 8;
+ else if (ndiv >= 80 && ndiv <= 87)
+ cp = 9;
+ else if (ndiv >= 88 && ndiv <= 95)
+ cp = 10;
+ else if (ndiv >= 96 && ndiv <= 103)
+ cp = 11;
+ else if (ndiv >= 104 && ndiv <= 111)
+ cp = 12;
+ else if (ndiv >= 112 && ndiv <= 119)
+ cp = 13;
+ else if (ndiv >= 120 && ndiv <= 127)
+ cp = 14;
+ else if (ndiv >= 128 && ndiv <= 135)
+ cp = 15;
+ else if (ndiv >= 136 && ndiv <= 143)
+ cp = 16;
+ else if (ndiv >= 144 && ndiv <= 151)
+ cp = 17;
+ else if (ndiv >= 152 && ndiv <= 159)
+ cp = 18;
+ else if (ndiv >= 160 && ndiv <= 167)
+ cp = 19;
+ else if (ndiv >= 168 && ndiv <= 175)
+ cp = 20;
+ else if (ndiv >= 176 && ndiv <= 183)
+ cp = 21;
+ else if (ndiv >= 184 && ndiv <= 191)
+ cp = 22;
+ else if (ndiv >= 192 && ndiv <= 199)
+ cp = 23;
+ else if (ndiv >= 200 && ndiv <= 207)
+ cp = 24;
+ else if (ndiv >= 208 && ndiv <= 215)
+ cp = 25;
+ else if (ndiv >= 216 && ndiv <= 223)
+ cp = 26;
+ else if (ndiv >= 224 && ndiv <= 225)
+ cp = 27;
+
+ write_reg(state, RSTV0910_NCOARSE, (cp << 3) | idf);
+ write_reg(state, RSTV0910_NCOARSE2, odf);
+ write_reg(state, RSTV0910_NCOARSE1, ndiv);
+
+ fvco = (quartz * 2 * ndiv) / idf;
+ state->base->mclk = fvco / (2 * odf) * 1000000;
+
+ return 0;
+}
+
+static int stop(struct stv *state)
+{
+ if (state->started) {
+ u8 tmp;
+
+ write_reg(state, RSTV0910_P2_TSCFGH + state->regoff,
+ state->tscfgh | 0x01);
+ read_reg(state, RSTV0910_P2_PDELCTRL1 + state->regoff, &tmp);
+ tmp &= ~0x01; /* release reset DVBS2 packet delin */
+ write_reg(state, RSTV0910_P2_PDELCTRL1 + state->regoff, tmp);
+ /* Blind optim*/
+ write_reg(state, RSTV0910_P2_AGC2O + state->regoff, 0x5B);
+ /* Stop the demod */
+ write_reg(state, RSTV0910_P2_DMDISTATE + state->regoff, 0x5c);
+ state->started = 0;
+ }
+ state->receive_mode = RCVMODE_NONE;
+ return 0;
+}
+
+static int init_search_param(struct stv *state)
+{
+ u8 tmp;
+
+ read_reg(state, RSTV0910_P2_PDELCTRL1 + state->regoff, &tmp);
+ tmp |= 0x20; /* Filter_en (no effect if SIS=non-MIS */
+ write_reg(state, RSTV0910_P2_PDELCTRL1 + state->regoff, tmp);
+
+ read_reg(state, RSTV0910_P2_PDELCTRL2 + state->regoff, &tmp);
+ tmp &= ~0x02; /* frame mode = 0 */
+ write_reg(state, RSTV0910_P2_PDELCTRL2 + state->regoff, tmp);
+
+ write_reg(state, RSTV0910_P2_UPLCCST0 + state->regoff, 0xe0);
+ write_reg(state, RSTV0910_P2_ISIBITENA + state->regoff, 0x00);
+
+ read_reg(state, RSTV0910_P2_TSSTATEM + state->regoff, &tmp);
+ tmp &= ~0x01; /* nosync = 0, in case next signal is standard TS */
+ write_reg(state, RSTV0910_P2_TSSTATEM + state->regoff, tmp);
+
+ read_reg(state, RSTV0910_P2_TSCFGL + state->regoff, &tmp);
+ tmp &= ~0x04; /* embindvb = 0 */
+ write_reg(state, RSTV0910_P2_TSCFGL + state->regoff, tmp);
+
+ read_reg(state, RSTV0910_P2_TSINSDELH + state->regoff, &tmp);
+ tmp &= ~0x80; /* syncbyte = 0 */
+ write_reg(state, RSTV0910_P2_TSINSDELH + state->regoff, tmp);
+
+ read_reg(state, RSTV0910_P2_TSINSDELM + state->regoff, &tmp);
+ tmp &= ~0x08; /* token = 0 */
+ write_reg(state, RSTV0910_P2_TSINSDELM + state->regoff, tmp);
+
+ read_reg(state, RSTV0910_P2_TSDLYSET2 + state->regoff, &tmp);
+ tmp &= ~0x30; /* hysteresis threshold = 0 */
+ write_reg(state, RSTV0910_P2_TSDLYSET2 + state->regoff, tmp);
+
+ read_reg(state, RSTV0910_P2_PDELCTRL0 + state->regoff, &tmp);
+ tmp = (tmp & ~0x30) | 0x10; /* isi obs mode = 1, observe min ISI */
+ write_reg(state, RSTV0910_P2_PDELCTRL0 + state->regoff, tmp);
+
+ return 0;
+}
+
+static int enable_puncture_rate(struct stv *state, enum fe_code_rate rate)
+{
+ switch (rate) {
+ case FEC_1_2:
+ return write_reg(state,
+ RSTV0910_P2_PRVIT + state->regoff, 0x01);
+ case FEC_2_3:
+ return write_reg(state,
+ RSTV0910_P2_PRVIT + state->regoff, 0x02);
+ case FEC_3_4:
+ return write_reg(state,
+ RSTV0910_P2_PRVIT + state->regoff, 0x04);
+ case FEC_5_6:
+ return write_reg(state,
+ RSTV0910_P2_PRVIT + state->regoff, 0x08);
+ case FEC_7_8:
+ return write_reg(state,
+ RSTV0910_P2_PRVIT + state->regoff, 0x20);
+ case FEC_NONE:
+ default:
+ return write_reg(state,
+ RSTV0910_P2_PRVIT + state->regoff, 0x2f);
+ }
+}
+
+static int set_vth_default(struct stv *state)
+{
+ state->vth[0] = 0xd7;
+ state->vth[1] = 0x85;
+ state->vth[2] = 0x58;
+ state->vth[3] = 0x3a;
+ state->vth[4] = 0x34;
+ state->vth[5] = 0x28;
+ write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 0, state->vth[0]);
+ write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 1, state->vth[1]);
+ write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 2, state->vth[2]);
+ write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 3, state->vth[3]);
+ write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 4, state->vth[4]);
+ write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 5, state->vth[5]);
+ return 0;
+}
+
+static int set_vth(struct stv *state)
+{
+ static const struct slookup vthlookup_table[] = {
+ {250, 8780}, /* C/N= 1.5dB */
+ {100, 7405}, /* C/N= 4.5dB */
+ {40, 6330}, /* C/N= 6.5dB */
+ {12, 5224}, /* C/N= 8.5dB */
+ {5, 4236} /* C/N=10.5dB */
+ };
+
+ int i;
+ u8 tmp[2];
+ int status = read_regs(state,
+ RSTV0910_P2_NNOSDATAT1 + state->regoff,
+ tmp, 2);
+ u16 reg_value = (tmp[0] << 8) | tmp[1];
+ s32 vth = table_lookup(vthlookup_table, ARRAY_SIZE(vthlookup_table),
+ reg_value);
+
+ for (i = 0; i < 6; i += 1)
+ if (state->vth[i] > vth)
+ state->vth[i] = vth;
+
+ write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 0, state->vth[0]);
+ write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 1, state->vth[1]);
+ write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 2, state->vth[2]);
+ write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 3, state->vth[3]);
+ write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 4, state->vth[4]);
+ write_reg(state, RSTV0910_P2_VTH12 + state->regoff + 5, state->vth[5]);
+ return status;
+}
+
+static int start(struct stv *state, struct dtv_frontend_properties *p)
+{
+ s32 freq;
+ u8 reg_dmdcfgmd;
+ u16 symb;
+ u32 scrambling_code = 1;
+
+ if (p->symbol_rate < 100000 || p->symbol_rate > 70000000)
+ return -EINVAL;
+
+ state->receive_mode = RCVMODE_NONE;
+ state->demod_lock_time = 0;
+
+ /* Demod Stop */
+ if (state->started)
+ write_reg(state, RSTV0910_P2_DMDISTATE + state->regoff, 0x5C);
+
+ init_search_param(state);
+
+ if (p->stream_id != NO_STREAM_ID_FILTER) {
+ /*
+ * Backwards compatibility to "crazy" API.
+ * PRBS X root cannot be 0, so this should always work.
+ */
+ if (p->stream_id & 0xffffff00)
+ scrambling_code = p->stream_id >> 8;
+ write_reg(state, RSTV0910_P2_ISIENTRY + state->regoff,
+ p->stream_id & 0xff);
+ write_reg(state, RSTV0910_P2_ISIBITENA + state->regoff,
+ 0xff);
+ }
+
+ if (scrambling_code != state->cur_scrambling_code) {
+ write_reg(state, RSTV0910_P2_PLROOT0 + state->regoff,
+ scrambling_code & 0xff);
+ write_reg(state, RSTV0910_P2_PLROOT1 + state->regoff,
+ (scrambling_code >> 8) & 0xff);
+ write_reg(state, RSTV0910_P2_PLROOT2 + state->regoff,
+ (scrambling_code >> 16) & 0x0f);
+ state->cur_scrambling_code = scrambling_code;
+ }
+
+ if (p->symbol_rate <= 1000000) { /* SR <=1Msps */
+ state->demod_timeout = 3000;
+ state->fec_timeout = 2000;
+ } else if (p->symbol_rate <= 2000000) { /* 1Msps < SR <=2Msps */
+ state->demod_timeout = 2500;
+ state->fec_timeout = 1300;
+ } else if (p->symbol_rate <= 5000000) { /* 2Msps< SR <=5Msps */
+ state->demod_timeout = 1000;
+ state->fec_timeout = 650;
+ } else if (p->symbol_rate <= 10000000) { /* 5Msps< SR <=10Msps */
+ state->demod_timeout = 700;
+ state->fec_timeout = 350;
+ } else if (p->symbol_rate < 20000000) { /* 10Msps< SR <=20Msps */
+ state->demod_timeout = 400;
+ state->fec_timeout = 200;
+ } else { /* SR >=20Msps */
+ state->demod_timeout = 300;
+ state->fec_timeout = 200;
+ }
+
+ /* Set the Init Symbol rate */
+ symb = muldiv32(p->symbol_rate, 65536, state->base->mclk);
+ write_reg(state, RSTV0910_P2_SFRINIT1 + state->regoff,
+ ((symb >> 8) & 0x7F));
+ write_reg(state, RSTV0910_P2_SFRINIT0 + state->regoff, (symb & 0xFF));
+
+ state->demod_bits |= 0x80;
+ write_reg(state, RSTV0910_P2_DEMOD + state->regoff, state->demod_bits);
+
+ /* FE_STV0910_SetSearchStandard */
+ read_reg(state, RSTV0910_P2_DMDCFGMD + state->regoff, &reg_dmdcfgmd);
+ write_reg(state, RSTV0910_P2_DMDCFGMD + state->regoff,
+ reg_dmdcfgmd |= 0xC0);
+
+ write_shared_reg(state,
+ RSTV0910_TSTTSRS, state->nr ? 0x02 : 0x01, 0x00);
+
+ /* Disable DSS */
+ write_reg(state, RSTV0910_P2_FECM + state->regoff, 0x00);
+ write_reg(state, RSTV0910_P2_PRVIT + state->regoff, 0x2F);
+
+ enable_puncture_rate(state, FEC_NONE);
+
+ /* 8PSK 3/5, 8PSK 2/3 Poff tracking optimization WA */
+ write_reg(state, RSTV0910_P2_ACLC2S2Q + state->regoff, 0x0B);
+ write_reg(state, RSTV0910_P2_ACLC2S28 + state->regoff, 0x0A);
+ write_reg(state, RSTV0910_P2_BCLC2S2Q + state->regoff, 0x84);
+ write_reg(state, RSTV0910_P2_BCLC2S28 + state->regoff, 0x84);
+ write_reg(state, RSTV0910_P2_CARHDR + state->regoff, 0x1C);
+ write_reg(state, RSTV0910_P2_CARFREQ + state->regoff, 0x79);
+
+ write_reg(state, RSTV0910_P2_ACLC2S216A + state->regoff, 0x29);
+ write_reg(state, RSTV0910_P2_ACLC2S232A + state->regoff, 0x09);
+ write_reg(state, RSTV0910_P2_BCLC2S216A + state->regoff, 0x84);
+ write_reg(state, RSTV0910_P2_BCLC2S232A + state->regoff, 0x84);
+
+ /*
+ * Reset CAR3, bug DVBS2->DVBS1 lock
+ * Note: The bit is only pulsed -> no lock on shared register needed
+ */
+ write_reg(state, RSTV0910_TSTRES0, state->nr ? 0x04 : 0x08);
+ write_reg(state, RSTV0910_TSTRES0, 0);
+
+ set_vth_default(state);
+ /* Reset demod */
+ write_reg(state, RSTV0910_P2_DMDISTATE + state->regoff, 0x1F);
+
+ write_reg(state, RSTV0910_P2_CARCFG + state->regoff, 0x46);
+
+ if (p->symbol_rate <= 5000000)
+ freq = (state->search_range / 2000) + 80;
+ else
+ freq = (state->search_range / 2000) + 1600;
+ freq = (freq << 16) / (state->base->mclk / 1000);
+
+ write_reg(state, RSTV0910_P2_CFRUP1 + state->regoff,
+ (freq >> 8) & 0xff);
+ write_reg(state, RSTV0910_P2_CFRUP0 + state->regoff, (freq & 0xff));
+ /* CFR Low Setting */
+ freq = -freq;
+ write_reg(state, RSTV0910_P2_CFRLOW1 + state->regoff,
+ (freq >> 8) & 0xff);
+ write_reg(state, RSTV0910_P2_CFRLOW0 + state->regoff, (freq & 0xff));
+
+ /* init the demod frequency offset to 0 */
+ write_reg(state, RSTV0910_P2_CFRINIT1 + state->regoff, 0);
+ write_reg(state, RSTV0910_P2_CFRINIT0 + state->regoff, 0);
+
+ write_reg(state, RSTV0910_P2_DMDISTATE + state->regoff, 0x1F);
+ /* Trigger acq */
+ write_reg(state, RSTV0910_P2_DMDISTATE + state->regoff, 0x15);
+
+ state->demod_lock_time += TUNING_DELAY;
+ state->started = 1;
+
+ return 0;
+}
+
+static int init_diseqc(struct stv *state)
+{
+ u16 offs = state->nr ? 0x40 : 0; /* Address offset */
+ u8 freq = ((state->base->mclk + 11000 * 32) / (22000 * 32));
+
+ /* Disable receiver */
+ write_reg(state, RSTV0910_P1_DISRXCFG + offs, 0x00);
+ write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0xBA); /* Reset = 1 */
+ write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x3A); /* Reset = 0 */
+ write_reg(state, RSTV0910_P1_DISTXF22 + offs, freq);
+ return 0;
+}
+
+static int probe(struct stv *state)
+{
+ u8 id;
+
+ state->receive_mode = RCVMODE_NONE;
+ state->started = 0;
+
+ if (read_reg(state, RSTV0910_MID, &id) < 0)
+ return -ENODEV;
+
+ if (id != 0x51)
+ return -EINVAL;
+
+ /* Configure the I2C repeater to off */
+ write_reg(state, RSTV0910_P1_I2CRPT, 0x24);
+ /* Configure the I2C repeater to off */
+ write_reg(state, RSTV0910_P2_I2CRPT, 0x24);
+ /* Set the I2C to oversampling ratio */
+ write_reg(state, RSTV0910_I2CCFG, 0x88); /* state->i2ccfg */
+
+ write_reg(state, RSTV0910_OUTCFG, 0x00); /* OUTCFG */
+ write_reg(state, RSTV0910_PADCFG, 0x05); /* RFAGC Pads Dev = 05 */
+ write_reg(state, RSTV0910_SYNTCTRL, 0x02); /* SYNTCTRL */
+ write_reg(state, RSTV0910_TSGENERAL, state->tsgeneral); /* TSGENERAL */
+ write_reg(state, RSTV0910_CFGEXT, 0x02); /* CFGEXT */
+
+ if (state->single)
+ write_reg(state, RSTV0910_GENCFG, 0x14); /* GENCFG */
+ else
+ write_reg(state, RSTV0910_GENCFG, 0x15); /* GENCFG */
+
+ write_reg(state, RSTV0910_P1_TNRCFG2, 0x02); /* IQSWAP = 0 */
+ write_reg(state, RSTV0910_P2_TNRCFG2, 0x82); /* IQSWAP = 1 */
+
+ write_reg(state, RSTV0910_P1_CAR3CFG, 0x02);
+ write_reg(state, RSTV0910_P2_CAR3CFG, 0x02);
+ write_reg(state, RSTV0910_P1_DMDCFG4, 0x04);
+ write_reg(state, RSTV0910_P2_DMDCFG4, 0x04);
+
+ write_reg(state, RSTV0910_TSTRES0, 0x80); /* LDPC Reset */
+ write_reg(state, RSTV0910_TSTRES0, 0x00);
+
+ write_reg(state, RSTV0910_P1_TSPIDFLT1, 0x00);
+ write_reg(state, RSTV0910_P2_TSPIDFLT1, 0x00);
+
+ write_reg(state, RSTV0910_P1_TMGCFG2, 0x80);
+ write_reg(state, RSTV0910_P2_TMGCFG2, 0x80);
+
+ set_mclock(state, 135000000);
+
+ /* TS output */
+ write_reg(state, RSTV0910_P1_TSCFGH, state->tscfgh | 0x01);
+ write_reg(state, RSTV0910_P1_TSCFGH, state->tscfgh);
+ write_reg(state, RSTV0910_P1_TSCFGM, 0xC0); /* Manual speed */
+ write_reg(state, RSTV0910_P1_TSCFGL, 0x20);
+
+ /* Speed = 67.5 MHz */
+ write_reg(state, RSTV0910_P1_TSSPEED, state->tsspeed);
+
+ write_reg(state, RSTV0910_P2_TSCFGH, state->tscfgh | 0x01);
+ write_reg(state, RSTV0910_P2_TSCFGH, state->tscfgh);
+ write_reg(state, RSTV0910_P2_TSCFGM, 0xC0); /* Manual speed */
+ write_reg(state, RSTV0910_P2_TSCFGL, 0x20);
+
+ /* Speed = 67.5 MHz */
+ write_reg(state, RSTV0910_P2_TSSPEED, state->tsspeed);
+
+ /* Reset stream merger */
+ write_reg(state, RSTV0910_P1_TSCFGH, state->tscfgh | 0x01);
+ write_reg(state, RSTV0910_P2_TSCFGH, state->tscfgh | 0x01);
+ write_reg(state, RSTV0910_P1_TSCFGH, state->tscfgh);
+ write_reg(state, RSTV0910_P2_TSCFGH, state->tscfgh);
+
+ write_reg(state, RSTV0910_P1_I2CRPT, state->i2crpt);
+ write_reg(state, RSTV0910_P2_I2CRPT, state->i2crpt);
+
+ init_diseqc(state);
+ return 0;
+}
+
+static int gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ struct stv *state = fe->demodulator_priv;
+ u8 i2crpt = state->i2crpt & ~0x86;
+
+ /*
+ * mutex_lock note: Concurrent I2C gate bus accesses must be
+ * prevented (STV0910 = dual demod on a single IC with a single I2C
+ * gate/bus, and two tuners attached), similar to most (if not all)
+ * other I2C host interfaces/busses.
+ *
+ * enable=1 (open I2C gate) will grab the lock
+ * enable=0 (close I2C gate) releases the lock
+ */
+
+ if (enable) {
+ mutex_lock(&state->base->i2c_lock);
+ i2crpt |= 0x80;
+ } else {
+ i2crpt |= 0x02;
+ }
+
+ if (write_reg(state, state->nr ? RSTV0910_P2_I2CRPT :
+ RSTV0910_P1_I2CRPT, i2crpt) < 0) {
+ /* don't hold the I2C bus lock on failure */
+ mutex_unlock(&state->base->i2c_lock);
+ dev_err(&state->base->i2c->dev,
+ "%s() write_reg failure (enable=%d)\n",
+ __func__, enable);
+ return -EIO;
+ }
+
+ state->i2crpt = i2crpt;
+
+ if (!enable)
+ mutex_unlock(&state->base->i2c_lock);
+ return 0;
+}
+
+static void release(struct dvb_frontend *fe)
+{
+ struct stv *state = fe->demodulator_priv;
+
+ state->base->count--;
+ if (state->base->count == 0) {
+ list_del(&state->base->stvlist);
+ kfree(state->base);
+ }
+ kfree(state);
+}
+
+static int set_parameters(struct dvb_frontend *fe)
+{
+ int stat = 0;
+ struct stv *state = fe->demodulator_priv;
+ u32 iffreq;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+
+ stop(state);
+ if (fe->ops.tuner_ops.set_params)
+ fe->ops.tuner_ops.set_params(fe);
+ if (fe->ops.tuner_ops.get_if_frequency)
+ fe->ops.tuner_ops.get_if_frequency(fe, &iffreq);
+ state->symbol_rate = p->symbol_rate;
+ stat = start(state, p);
+ return stat;
+}
+
+static int manage_matype_info(struct stv *state)
+{
+ if (!state->started)
+ return -EINVAL;
+ if (state->receive_mode == RCVMODE_DVBS2) {
+ u8 bbheader[2];
+
+ read_regs(state, RSTV0910_P2_MATSTR1 + state->regoff,
+ bbheader, 2);
+ state->feroll_off =
+ (enum fe_stv0910_roll_off)(bbheader[0] & 0x03);
+ state->is_vcm = (bbheader[0] & 0x10) == 0;
+ state->is_standard_broadcast = (bbheader[0] & 0xFC) == 0xF0;
+ } else if (state->receive_mode == RCVMODE_DVBS) {
+ state->is_vcm = 0;
+ state->is_standard_broadcast = 1;
+ state->feroll_off = FE_SAT_35;
+ }
+ return 0;
+}
+
+static int read_snr(struct dvb_frontend *fe)
+{
+ struct stv *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ s32 snrval;
+
+ if (!get_signal_to_noise(state, &snrval)) {
+ p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+ p->cnr.stat[0].uvalue = 100 * snrval; /* fix scale */
+ } else {
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ return 0;
+}
+
+static int read_ber(struct dvb_frontend *fe)
+{
+ struct stv *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 n, d;
+
+ get_bit_error_rate(state, &n, &d);
+
+ p->pre_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ p->pre_bit_error.stat[0].uvalue = n;
+ p->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+ p->pre_bit_count.stat[0].uvalue = d;
+
+ return 0;
+}
+
+static void read_signal_strength(struct dvb_frontend *fe)
+{
+ struct stv *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &state->fe.dtv_property_cache;
+ u8 reg[2];
+ u16 agc;
+ s32 padc, power = 0;
+ int i;
+
+ read_regs(state, RSTV0910_P2_AGCIQIN1 + state->regoff, reg, 2);
+
+ agc = (((u32)reg[0]) << 8) | reg[1];
+
+ for (i = 0; i < 5; i += 1) {
+ read_regs(state, RSTV0910_P2_POWERI + state->regoff, reg, 2);
+ power += (u32)reg[0] * (u32)reg[0]
+ + (u32)reg[1] * (u32)reg[1];
+ usleep_range(3000, 4000);
+ }
+ power /= 5;
+
+ padc = table_lookup(padc_lookup, ARRAY_SIZE(padc_lookup), power) + 352;
+
+ p->strength.stat[0].scale = FE_SCALE_DECIBEL;
+ p->strength.stat[0].svalue = (padc - agc);
+}
+
+static int read_status(struct dvb_frontend *fe, enum fe_status *status)
+{
+ struct stv *state = fe->demodulator_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u8 dmd_state = 0;
+ u8 dstatus = 0;
+ enum receive_mode cur_receive_mode = RCVMODE_NONE;
+ u32 feclock = 0;
+
+ *status = 0;
+
+ read_reg(state, RSTV0910_P2_DMDSTATE + state->regoff, &dmd_state);
+
+ if (dmd_state & 0x40) {
+ read_reg(state, RSTV0910_P2_DSTATUS + state->regoff, &dstatus);
+ if (dstatus & 0x08)
+ cur_receive_mode = (dmd_state & 0x20) ?
+ RCVMODE_DVBS : RCVMODE_DVBS2;
+ }
+ if (cur_receive_mode == RCVMODE_NONE) {
+ set_vth(state);
+
+ /* reset signal statistics */
+ p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ return 0;
+ }
+
+ *status |= (FE_HAS_SIGNAL
+ | FE_HAS_CARRIER
+ | FE_HAS_VITERBI
+ | FE_HAS_SYNC);
+
+ if (state->receive_mode == RCVMODE_NONE) {
+ state->receive_mode = cur_receive_mode;
+ state->demod_lock_time = jiffies;
+ state->first_time_lock = 1;
+
+ get_signal_parameters(state);
+ tracking_optimization(state);
+
+ write_reg(state, RSTV0910_P2_TSCFGH + state->regoff,
+ state->tscfgh);
+ usleep_range(3000, 4000);
+ write_reg(state, RSTV0910_P2_TSCFGH + state->regoff,
+ state->tscfgh | 0x01);
+ write_reg(state, RSTV0910_P2_TSCFGH + state->regoff,
+ state->tscfgh);
+ }
+ if (dmd_state & 0x40) {
+ if (state->receive_mode == RCVMODE_DVBS2) {
+ u8 pdelstatus;
+
+ read_reg(state,
+ RSTV0910_P2_PDELSTATUS1 + state->regoff,
+ &pdelstatus);
+ feclock = (pdelstatus & 0x02) != 0;
+ } else {
+ u8 vstatus;
+
+ read_reg(state,
+ RSTV0910_P2_VSTATUSVIT + state->regoff,
+ &vstatus);
+ feclock = (vstatus & 0x08) != 0;
+ }
+ }
+
+ if (feclock) {
+ *status |= FE_HAS_LOCK;
+
+ if (state->first_time_lock) {
+ u8 tmp;
+
+ state->first_time_lock = 0;
+
+ manage_matype_info(state);
+
+ if (state->receive_mode == RCVMODE_DVBS2) {
+ /*
+ * FSTV0910_P2_MANUALSX_ROLLOFF,
+ * FSTV0910_P2_MANUALS2_ROLLOFF = 0
+ */
+ state->demod_bits &= ~0x84;
+ write_reg(state,
+ RSTV0910_P2_DEMOD + state->regoff,
+ state->demod_bits);
+ read_reg(state,
+ RSTV0910_P2_PDELCTRL2 + state->regoff,
+ &tmp);
+ /* reset DVBS2 packet delinator error counter */
+ tmp |= 0x40;
+ write_reg(state,
+ RSTV0910_P2_PDELCTRL2 + state->regoff,
+ tmp);
+ /* reset DVBS2 packet delinator error counter */
+ tmp &= ~0x40;
+ write_reg(state,
+ RSTV0910_P2_PDELCTRL2 + state->regoff,
+ tmp);
+
+ state->berscale = 2;
+ state->last_bernumerator = 0;
+ state->last_berdenominator = 1;
+ /* force to PRE BCH Rate */
+ write_reg(state,
+ RSTV0910_P2_ERRCTRL1 + state->regoff,
+ BER_SRC_S2 | state->berscale);
+ } else {
+ state->berscale = 2;
+ state->last_bernumerator = 0;
+ state->last_berdenominator = 1;
+ /* force to PRE RS Rate */
+ write_reg(state,
+ RSTV0910_P2_ERRCTRL1 + state->regoff,
+ BER_SRC_S | state->berscale);
+ }
+ /* Reset the Total packet counter */
+ write_reg(state,
+ RSTV0910_P2_FBERCPT4 + state->regoff, 0x00);
+ /*
+ * Reset the packet Error counter2 (and Set it to
+ * infinit error count mode)
+ */
+ write_reg(state,
+ RSTV0910_P2_ERRCTRL2 + state->regoff, 0xc1);
+
+ set_vth_default(state);
+ if (state->receive_mode == RCVMODE_DVBS)
+ enable_puncture_rate(state,
+ state->puncture_rate);
+ }
+ }
+
+ /* read signal statistics */
+
+ /* read signal strength */
+ read_signal_strength(fe);
+
+ /* read carrier/noise on FE_HAS_CARRIER */
+ if (*status & FE_HAS_CARRIER)
+ read_snr(fe);
+ else
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
+ /* read ber */
+ if (*status & FE_HAS_VITERBI) {
+ read_ber(fe);
+ } else {
+ p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ }
+
+ return 0;
+}
+
+static int get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
+{
+ struct stv *state = fe->demodulator_priv;
+ u8 tmp;
+
+ if (state->receive_mode == RCVMODE_DVBS2) {
+ u32 mc;
+ const enum fe_modulation modcod2mod[0x20] = {
+ QPSK, QPSK, QPSK, QPSK,
+ QPSK, QPSK, QPSK, QPSK,
+ QPSK, QPSK, QPSK, QPSK,
+ PSK_8, PSK_8, PSK_8, PSK_8,
+ PSK_8, PSK_8, APSK_16, APSK_16,
+ APSK_16, APSK_16, APSK_16, APSK_16,
+ APSK_32, APSK_32, APSK_32, APSK_32,
+ APSK_32,
+ };
+ const enum fe_code_rate modcod2fec[0x20] = {
+ FEC_NONE, FEC_NONE, FEC_NONE, FEC_2_5,
+ FEC_1_2, FEC_3_5, FEC_2_3, FEC_3_4,
+ FEC_4_5, FEC_5_6, FEC_8_9, FEC_9_10,
+ FEC_3_5, FEC_2_3, FEC_3_4, FEC_5_6,
+ FEC_8_9, FEC_9_10, FEC_2_3, FEC_3_4,
+ FEC_4_5, FEC_5_6, FEC_8_9, FEC_9_10,
+ FEC_3_4, FEC_4_5, FEC_5_6, FEC_8_9,
+ FEC_9_10
+ };
+ read_reg(state, RSTV0910_P2_DMDMODCOD + state->regoff, &tmp);
+ mc = ((tmp & 0x7c) >> 2);
+ p->pilot = (tmp & 0x01) ? PILOT_ON : PILOT_OFF;
+ p->modulation = modcod2mod[mc];
+ p->fec_inner = modcod2fec[mc];
+ } else if (state->receive_mode == RCVMODE_DVBS) {
+ read_reg(state, RSTV0910_P2_VITCURPUN + state->regoff, &tmp);
+ switch (tmp & 0x1F) {
+ case 0x0d:
+ p->fec_inner = FEC_1_2;
+ break;
+ case 0x12:
+ p->fec_inner = FEC_2_3;
+ break;
+ case 0x15:
+ p->fec_inner = FEC_3_4;
+ break;
+ case 0x18:
+ p->fec_inner = FEC_5_6;
+ break;
+ case 0x1a:
+ p->fec_inner = FEC_7_8;
+ break;
+ default:
+ p->fec_inner = FEC_NONE;
+ break;
+ }
+ p->rolloff = ROLLOFF_35;
+ }
+
+ return 0;
+}
+
+static int tune(struct dvb_frontend *fe, bool re_tune,
+ unsigned int mode_flags,
+ unsigned int *delay, enum fe_status *status)
+{
+ struct stv *state = fe->demodulator_priv;
+ int r;
+
+ if (re_tune) {
+ r = set_parameters(fe);
+ if (r)
+ return r;
+ state->tune_time = jiffies;
+ }
+
+ r = read_status(fe, status);
+ if (r)
+ return r;
+
+ if (*status & FE_HAS_LOCK)
+ return 0;
+ *delay = HZ;
+
+ return 0;
+}
+
+static int get_algo(struct dvb_frontend *fe)
+{
+ return DVBFE_ALGO_HW;
+}
+
+static int set_tone(struct dvb_frontend *fe, enum fe_sec_tone_mode tone)
+{
+ struct stv *state = fe->demodulator_priv;
+ u16 offs = state->nr ? 0x40 : 0;
+
+ switch (tone) {
+ case SEC_TONE_ON:
+ return write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x38);
+ case SEC_TONE_OFF:
+ return write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x3a);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int wait_dis(struct stv *state, u8 flag, u8 val)
+{
+ int i;
+ u8 stat;
+ u16 offs = state->nr ? 0x40 : 0;
+
+ for (i = 0; i < 10; i++) {
+ read_reg(state, RSTV0910_P1_DISTXSTATUS + offs, &stat);
+ if ((stat & flag) == val)
+ return 0;
+ usleep_range(10000, 11000);
+ }
+ return -ETIMEDOUT;
+}
+
+static int send_master_cmd(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *cmd)
+{
+ struct stv *state = fe->demodulator_priv;
+ u16 offs = state->nr ? 0x40 : 0;
+ int i;
+
+ write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x3E);
+ for (i = 0; i < cmd->msg_len; i++) {
+ wait_dis(state, 0x40, 0x00);
+ write_reg(state, RSTV0910_P1_DISTXFIFO + offs, cmd->msg[i]);
+ }
+ write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x3A);
+ wait_dis(state, 0x20, 0x20);
+ return 0;
+}
+
+static int send_burst(struct dvb_frontend *fe, enum fe_sec_mini_cmd burst)
+{
+ struct stv *state = fe->demodulator_priv;
+ u16 offs = state->nr ? 0x40 : 0;
+ u8 value;
+
+ if (burst == SEC_MINI_A) {
+ write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x3F);
+ value = 0x00;
+ } else {
+ write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x3E);
+ value = 0xFF;
+ }
+ wait_dis(state, 0x40, 0x00);
+ write_reg(state, RSTV0910_P1_DISTXFIFO + offs, value);
+ write_reg(state, RSTV0910_P1_DISTXCFG + offs, 0x3A);
+ wait_dis(state, 0x20, 0x20);
+
+ return 0;
+}
+
+static int sleep(struct dvb_frontend *fe)
+{
+ struct stv *state = fe->demodulator_priv;
+
+ stop(state);
+ return 0;
+}
+
+static const struct dvb_frontend_ops stv0910_ops = {
+ .delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
+ .info = {
+ .name = "ST STV0910",
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ .frequency_stepsize = 0,
+ .frequency_tolerance = 0,
+ .symbol_rate_min = 100000,
+ .symbol_rate_max = 70000000,
+ .caps = FE_CAN_INVERSION_AUTO |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_2G_MODULATION |
+ FE_CAN_MULTISTREAM
+ },
+ .sleep = sleep,
+ .release = release,
+ .i2c_gate_ctrl = gate_ctrl,
+ .set_frontend = set_parameters,
+ .get_frontend_algo = get_algo,
+ .get_frontend = get_frontend,
+ .tune = tune,
+ .read_status = read_status,
+ .set_tone = set_tone,
+
+ .diseqc_send_master_cmd = send_master_cmd,
+ .diseqc_send_burst = send_burst,
+};
+
+static struct stv_base *match_base(struct i2c_adapter *i2c, u8 adr)
+{
+ struct stv_base *p;
+
+ list_for_each_entry(p, &stvlist, stvlist)
+ if (p->i2c == i2c && p->adr == adr)
+ return p;
+ return NULL;
+}
+
+static void stv0910_init_stats(struct stv *state)
+{
+ struct dtv_frontend_properties *p = &state->fe.dtv_property_cache;
+
+ p->strength.len = 1;
+ p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->cnr.len = 1;
+ p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->pre_bit_error.len = 1;
+ p->pre_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+ p->pre_bit_count.len = 1;
+ p->pre_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+}
+
+struct dvb_frontend *stv0910_attach(struct i2c_adapter *i2c,
+ struct stv0910_cfg *cfg,
+ int nr)
+{
+ struct stv *state;
+ struct stv_base *base;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ state->tscfgh = 0x20 | (cfg->parallel ? 0 : 0x40);
+ state->tsgeneral = (cfg->parallel == 2) ? 0x02 : 0x00;
+ state->i2crpt = 0x0A | ((cfg->rptlvl & 0x07) << 4);
+ state->tsspeed = 0x28;
+ state->nr = nr;
+ state->regoff = state->nr ? 0 : 0x200;
+ state->search_range = 16000000;
+ state->demod_bits = 0x10; /* Inversion : Auto with reset to 0 */
+ state->receive_mode = RCVMODE_NONE;
+ state->cur_scrambling_code = (~0U);
+ state->single = cfg->single ? 1 : 0;
+
+ base = match_base(i2c, cfg->adr);
+ if (base) {
+ base->count++;
+ state->base = base;
+ } else {
+ base = kzalloc(sizeof(*base), GFP_KERNEL);
+ if (!base)
+ goto fail;
+ base->i2c = i2c;
+ base->adr = cfg->adr;
+ base->count = 1;
+ base->extclk = cfg->clk ? cfg->clk : 30000000;
+
+ mutex_init(&base->i2c_lock);
+ mutex_init(&base->reg_lock);
+ state->base = base;
+ if (probe(state) < 0) {
+ dev_info(&i2c->dev, "No demod found at adr %02X on %s\n",
+ cfg->adr, dev_name(&i2c->dev));
+ kfree(base);
+ goto fail;
+ }
+ list_add(&base->stvlist, &stvlist);
+ }
+ state->fe.ops = stv0910_ops;
+ state->fe.demodulator_priv = state;
+ state->nr = nr;
+
+ dev_info(&i2c->dev, "%s demod found at adr %02X on %s\n",
+ state->fe.ops.info.name, cfg->adr, dev_name(&i2c->dev));
+
+ stv0910_init_stats(state);
+
+ return &state->fe;
+
+fail:
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(stv0910_attach);
+
+MODULE_DESCRIPTION("ST STV0910 multistandard frontend driver");
+MODULE_AUTHOR("Ralph and Marcus Metzler, Manfred Voelkel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/stv0910.h b/drivers/media/dvb-frontends/stv0910.h
new file mode 100644
index 000000000000..fccd8d9b665f
--- /dev/null
+++ b/drivers/media/dvb-frontends/stv0910.h
@@ -0,0 +1,32 @@
+#ifndef _STV0910_H_
+#define _STV0910_H_
+
+#include <linux/types.h>
+#include <linux/i2c.h>
+
+struct stv0910_cfg {
+ u32 clk;
+ u8 adr;
+ u8 parallel;
+ u8 rptlvl;
+ u8 single;
+};
+
+#if IS_REACHABLE(CONFIG_DVB_STV0910)
+
+struct dvb_frontend *stv0910_attach(struct i2c_adapter *i2c,
+ struct stv0910_cfg *cfg, int nr);
+
+#else
+
+static inline struct dvb_frontend *stv0910_attach(struct i2c_adapter *i2c,
+ struct stv0910_cfg *cfg,
+ int nr)
+{
+ pr_warn("%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+#endif /* CONFIG_DVB_STV0910 */
+
+#endif /* _STV0910_H_ */
diff --git a/drivers/media/dvb-frontends/stv0910_regs.h b/drivers/media/dvb-frontends/stv0910_regs.h
new file mode 100644
index 000000000000..32ced4eaf296
--- /dev/null
+++ b/drivers/media/dvb-frontends/stv0910_regs.h
@@ -0,0 +1,4760 @@
+/*
+ * @DVB-S/DVB-S2 STMicroelectronics STV0900 register definitions
+ * Author Manfred Voelkel, August 2013
+ * (c) 2013 Digital Devices GmbH Germany. All rights reserved
+ *
+ * =======================================================================
+ * Registers Declaration (Internal ST, All Applications )
+ * -------------------------
+ * Each register (RSTV0910__XXXXX) is defined by its address (2 bytes).
+ *
+ * Each field (FSTV0910__XXXXX)is defined as follow:
+ * [register address -- 2bytes][field sign -- 1byte][field mask -- 1byte]
+ * ======================================================================
+ */
+
+/* MID */
+#define RSTV0910_MID 0xf100
+#define FSTV0910_MCHIP_IDENT 0xf10000f0
+#define FSTV0910_MRELEASE 0xf100000f
+
+/* DID */
+#define RSTV0910_DID 0xf101
+#define FSTV0910_DEVICE_ID 0xf10100ff
+
+/* DACR1 */
+#define RSTV0910_DACR1 0xf113
+#define FSTV0910_DAC_MODE 0xf11300e0
+#define FSTV0910_DAC_VALUE1 0xf113000f
+
+/* DACR2 */
+#define RSTV0910_DACR2 0xf114
+#define FSTV0910_DAC_VALUE0 0xf11400ff
+
+/* PADCFG */
+#define RSTV0910_PADCFG 0xf11a
+#define FSTV0910_AGCRF2_OPD 0xf11a0008
+#define FSTV0910_AGCRF2_XOR 0xf11a0004
+#define FSTV0910_AGCRF1_OPD 0xf11a0002
+#define FSTV0910_AGCRF1_XOR 0xf11a0001
+
+/* OUTCFG2 */
+#define RSTV0910_OUTCFG2 0xf11b
+#define FSTV0910_TS2_ERROR_XOR 0xf11b0080
+#define FSTV0910_TS2_DPN_XOR 0xf11b0040
+#define FSTV0910_TS2_STROUT_XOR 0xf11b0020
+#define FSTV0910_TS2_CLOCKOUT_XOR 0xf11b0010
+#define FSTV0910_TS1_ERROR_XOR 0xf11b0008
+#define FSTV0910_TS1_DPN_XOR 0xf11b0004
+#define FSTV0910_TS1_STROUT_XOR 0xf11b0002
+#define FSTV0910_TS1_CLOCKOUT_XOR 0xf11b0001
+
+/* OUTCFG */
+#define RSTV0910_OUTCFG 0xf11c
+#define FSTV0910_TS2_OUTSER_HZ 0xf11c0020
+#define FSTV0910_TS1_OUTSER_HZ 0xf11c0010
+#define FSTV0910_TS2_OUTPAR_HZ 0xf11c0008
+#define FSTV0910_TS1_OUTPAR_HZ 0xf11c0004
+#define FSTV0910_TS_SERDATA0 0xf11c0002
+
+/* IRQSTATUS3 */
+#define RSTV0910_IRQSTATUS3 0xf120
+#define FSTV0910_SPLL_LOCK 0xf1200020
+#define FSTV0910_SSTREAM_LCK_1 0xf1200010
+#define FSTV0910_SSTREAM_LCK_2 0xf1200008
+#define FSTV0910_SDVBS1_PRF_2 0xf1200002
+#define FSTV0910_SDVBS1_PRF_1 0xf1200001
+
+/* IRQSTATUS2 */
+#define RSTV0910_IRQSTATUS2 0xf121
+#define FSTV0910_SSPY_ENDSIM_1 0xf1210080
+#define FSTV0910_SSPY_ENDSIM_2 0xf1210040
+#define FSTV0910_SPKTDEL_ERROR_2 0xf1210010
+#define FSTV0910_SPKTDEL_LOCKB_2 0xf1210008
+#define FSTV0910_SPKTDEL_LOCK_2 0xf1210004
+#define FSTV0910_SPKTDEL_ERROR_1 0xf1210002
+#define FSTV0910_SPKTDEL_LOCKB_1 0xf1210001
+
+/* IRQSTATUS1 */
+#define RSTV0910_IRQSTATUS1 0xf122
+#define FSTV0910_SPKTDEL_LOCK_1 0xf1220080
+#define FSTV0910_SFEC_LOCKB_2 0xf1220040
+#define FSTV0910_SFEC_LOCK_2 0xf1220020
+#define FSTV0910_SFEC_LOCKB_1 0xf1220010
+#define FSTV0910_SFEC_LOCK_1 0xf1220008
+#define FSTV0910_SDEMOD_LOCKB_2 0xf1220004
+#define FSTV0910_SDEMOD_LOCK_2 0xf1220002
+#define FSTV0910_SDEMOD_IRQ_2 0xf1220001
+
+/* IRQSTATUS0 */
+#define RSTV0910_IRQSTATUS0 0xf123
+#define FSTV0910_SDEMOD_LOCKB_1 0xf1230080
+#define FSTV0910_SDEMOD_LOCK_1 0xf1230040
+#define FSTV0910_SDEMOD_IRQ_1 0xf1230020
+#define FSTV0910_SBCH_ERRFLAG 0xf1230010
+#define FSTV0910_SDISEQC2_IRQ 0xf1230004
+#define FSTV0910_SDISEQC1_IRQ 0xf1230001
+
+/* IRQMASK3 */
+#define RSTV0910_IRQMASK3 0xf124
+#define FSTV0910_MPLL_LOCK 0xf1240020
+#define FSTV0910_MSTREAM_LCK_1 0xf1240010
+#define FSTV0910_MSTREAM_LCK_2 0xf1240008
+#define FSTV0910_MDVBS1_PRF_2 0xf1240002
+#define FSTV0910_MDVBS1_PRF_1 0xf1240001
+
+/* IRQMASK2 */
+#define RSTV0910_IRQMASK2 0xf125
+#define FSTV0910_MSPY_ENDSIM_1 0xf1250080
+#define FSTV0910_MSPY_ENDSIM_2 0xf1250040
+#define FSTV0910_MPKTDEL_ERROR_2 0xf1250010
+#define FSTV0910_MPKTDEL_LOCKB_2 0xf1250008
+#define FSTV0910_MPKTDEL_LOCK_2 0xf1250004
+#define FSTV0910_MPKTDEL_ERROR_1 0xf1250002
+#define FSTV0910_MPKTDEL_LOCKB_1 0xf1250001
+
+/* IRQMASK1 */
+#define RSTV0910_IRQMASK1 0xf126
+#define FSTV0910_MPKTDEL_LOCK_1 0xf1260080
+#define FSTV0910_MFEC_LOCKB_2 0xf1260040
+#define FSTV0910_MFEC_LOCK_2 0xf1260020
+#define FSTV0910_MFEC_LOCKB_1 0xf1260010
+#define FSTV0910_MFEC_LOCK_1 0xf1260008
+#define FSTV0910_MDEMOD_LOCKB_2 0xf1260004
+#define FSTV0910_MDEMOD_LOCK_2 0xf1260002
+#define FSTV0910_MDEMOD_IRQ_2 0xf1260001
+
+/* IRQMASK0 */
+#define RSTV0910_IRQMASK0 0xf127
+#define FSTV0910_MDEMOD_LOCKB_1 0xf1270080
+#define FSTV0910_MDEMOD_LOCK_1 0xf1270040
+#define FSTV0910_MDEMOD_IRQ_1 0xf1270020
+#define FSTV0910_MBCH_ERRFLAG 0xf1270010
+#define FSTV0910_MDISEQC2_IRQ 0xf1270004
+#define FSTV0910_MDISEQC1_IRQ 0xf1270001
+
+/* I2CCFG */
+#define RSTV0910_I2CCFG 0xf129
+#define FSTV0910_I2C_FASTMODE 0xf1290008
+#define FSTV0910_I2CADDR_INC 0xf1290003
+
+/* P1_I2CRPT */
+#define RSTV0910_P1_I2CRPT 0xf12a
+#define FSTV0910_P1_I2CT_ON 0xf12a0080
+#define FSTV0910_P1_ENARPT_LEVEL 0xf12a0070
+#define FSTV0910_P1_SCLT_DELAY 0xf12a0008
+#define FSTV0910_P1_STOP_ENABLE 0xf12a0004
+#define FSTV0910_P1_STOP_SDAT2SDA 0xf12a0002
+
+/* P2_I2CRPT */
+#define RSTV0910_P2_I2CRPT 0xf12b
+#define FSTV0910_P2_I2CT_ON 0xf12b0080
+#define FSTV0910_P2_ENARPT_LEVEL 0xf12b0070
+#define FSTV0910_P2_SCLT_DELAY 0xf12b0008
+#define FSTV0910_P2_STOP_ENABLE 0xf12b0004
+#define FSTV0910_P2_STOP_SDAT2SDA 0xf12b0002
+
+/* GPIO0CFG */
+#define RSTV0910_GPIO0CFG 0xf140
+#define FSTV0910_GPIO0_OPD 0xf1400080
+#define FSTV0910_GPIO0_CONFIG 0xf140007e
+#define FSTV0910_GPIO0_XOR 0xf1400001
+
+/* GPIO1CFG */
+#define RSTV0910_GPIO1CFG 0xf141
+#define FSTV0910_GPIO1_OPD 0xf1410080
+#define FSTV0910_GPIO1_CONFIG 0xf141007e
+#define FSTV0910_GPIO1_XOR 0xf1410001
+
+/* GPIO2CFG */
+#define RSTV0910_GPIO2CFG 0xf142
+#define FSTV0910_GPIO2_OPD 0xf1420080
+#define FSTV0910_GPIO2_CONFIG 0xf142007e
+#define FSTV0910_GPIO2_XOR 0xf1420001
+
+/* GPIO3CFG */
+#define RSTV0910_GPIO3CFG 0xf143
+#define FSTV0910_GPIO3_OPD 0xf1430080
+#define FSTV0910_GPIO3_CONFIG 0xf143007e
+#define FSTV0910_GPIO3_XOR 0xf1430001
+
+/* GPIO4CFG */
+#define RSTV0910_GPIO4CFG 0xf144
+#define FSTV0910_GPIO4_OPD 0xf1440080
+#define FSTV0910_GPIO4_CONFIG 0xf144007e
+#define FSTV0910_GPIO4_XOR 0xf1440001
+
+/* GPIO5CFG */
+#define RSTV0910_GPIO5CFG 0xf145
+#define FSTV0910_GPIO5_OPD 0xf1450080
+#define FSTV0910_GPIO5_CONFIG 0xf145007e
+#define FSTV0910_GPIO5_XOR 0xf1450001
+
+/* GPIO6CFG */
+#define RSTV0910_GPIO6CFG 0xf146
+#define FSTV0910_GPIO6_OPD 0xf1460080
+#define FSTV0910_GPIO6_CONFIG 0xf146007e
+#define FSTV0910_GPIO6_XOR 0xf1460001
+
+/* GPIO7CFG */
+#define RSTV0910_GPIO7CFG 0xf147
+#define FSTV0910_GPIO7_OPD 0xf1470080
+#define FSTV0910_GPIO7_CONFIG 0xf147007e
+#define FSTV0910_GPIO7_XOR 0xf1470001
+
+/* GPIO8CFG */
+#define RSTV0910_GPIO8CFG 0xf148
+#define FSTV0910_GPIO8_OPD 0xf1480080
+#define FSTV0910_GPIO8_CONFIG 0xf148007e
+#define FSTV0910_GPIO8_XOR 0xf1480001
+
+/* GPIO9CFG */
+#define RSTV0910_GPIO9CFG 0xf149
+#define FSTV0910_GPIO9_OPD 0xf1490080
+#define FSTV0910_GPIO9_CONFIG 0xf149007e
+#define FSTV0910_GPIO9_XOR 0xf1490001
+
+/* GPIO10CFG */
+#define RSTV0910_GPIO10CFG 0xf14a
+#define FSTV0910_GPIO10_OPD 0xf14a0080
+#define FSTV0910_GPIO10_CONFIG 0xf14a007e
+#define FSTV0910_GPIO10_XOR 0xf14a0001
+
+/* GPIO11CFG */
+#define RSTV0910_GPIO11CFG 0xf14b
+#define FSTV0910_GPIO11_OPD 0xf14b0080
+#define FSTV0910_GPIO11_CONFIG 0xf14b007e
+#define FSTV0910_GPIO11_XOR 0xf14b0001
+
+/* GPIO12CFG */
+#define RSTV0910_GPIO12CFG 0xf14c
+#define FSTV0910_GPIO12_OPD 0xf14c0080
+#define FSTV0910_GPIO12_CONFIG 0xf14c007e
+#define FSTV0910_GPIO12_XOR 0xf14c0001
+
+/* GPIO13CFG */
+#define RSTV0910_GPIO13CFG 0xf14d
+#define FSTV0910_GPIO13_OPD 0xf14d0080
+#define FSTV0910_GPIO13_CONFIG 0xf14d007e
+#define FSTV0910_GPIO13_XOR 0xf14d0001
+
+/* GPIO14CFG */
+#define RSTV0910_GPIO14CFG 0xf14e
+#define FSTV0910_GPIO14_OPD 0xf14e0080
+#define FSTV0910_GPIO14_CONFIG 0xf14e007e
+#define FSTV0910_GPIO14_XOR 0xf14e0001
+
+/* GPIO15CFG */
+#define RSTV0910_GPIO15CFG 0xf14f
+#define FSTV0910_GPIO15_OPD 0xf14f0080
+#define FSTV0910_GPIO15_CONFIG 0xf14f007e
+#define FSTV0910_GPIO15_XOR 0xf14f0001
+
+/* GPIO16CFG */
+#define RSTV0910_GPIO16CFG 0xf150
+#define FSTV0910_GPIO16_OPD 0xf1500080
+#define FSTV0910_GPIO16_CONFIG 0xf150007e
+#define FSTV0910_GPIO16_XOR 0xf1500001
+
+/* GPIO17CFG */
+#define RSTV0910_GPIO17CFG 0xf151
+#define FSTV0910_GPIO17_OPD 0xf1510080
+#define FSTV0910_GPIO17_CONFIG 0xf151007e
+#define FSTV0910_GPIO17_XOR 0xf1510001
+
+/* GPIO18CFG */
+#define RSTV0910_GPIO18CFG 0xf152
+#define FSTV0910_GPIO18_OPD 0xf1520080
+#define FSTV0910_GPIO18_CONFIG 0xf152007e
+#define FSTV0910_GPIO18_XOR 0xf1520001
+
+/* GPIO19CFG */
+#define RSTV0910_GPIO19CFG 0xf153
+#define FSTV0910_GPIO19_OPD 0xf1530080
+#define FSTV0910_GPIO19_CONFIG 0xf153007e
+#define FSTV0910_GPIO19_XOR 0xf1530001
+
+/* GPIO20CFG */
+#define RSTV0910_GPIO20CFG 0xf154
+#define FSTV0910_GPIO20_OPD 0xf1540080
+#define FSTV0910_GPIO20_CONFIG 0xf154007e
+#define FSTV0910_GPIO20_XOR 0xf1540001
+
+/* GPIO21CFG */
+#define RSTV0910_GPIO21CFG 0xf155
+#define FSTV0910_GPIO21_OPD 0xf1550080
+#define FSTV0910_GPIO21_CONFIG 0xf155007e
+#define FSTV0910_GPIO21_XOR 0xf1550001
+
+/* GPIO22CFG */
+#define RSTV0910_GPIO22CFG 0xf156
+#define FSTV0910_GPIO22_OPD 0xf1560080
+#define FSTV0910_GPIO22_CONFIG 0xf156007e
+#define FSTV0910_GPIO22_XOR 0xf1560001
+
+/* STRSTATUS1 */
+#define RSTV0910_STRSTATUS1 0xf16a
+#define FSTV0910_STRSTATUS_SEL2 0xf16a00f0
+#define FSTV0910_STRSTATUS_SEL1 0xf16a000f
+
+/* STRSTATUS2 */
+#define RSTV0910_STRSTATUS2 0xf16b
+#define FSTV0910_STRSTATUS_SEL4 0xf16b00f0
+#define FSTV0910_STRSTATUS_SEL3 0xf16b000f
+
+/* STRSTATUS3 */
+#define RSTV0910_STRSTATUS3 0xf16c
+#define FSTV0910_STRSTATUS_SEL6 0xf16c00f0
+#define FSTV0910_STRSTATUS_SEL5 0xf16c000f
+
+/* FSKTFC2 */
+#define RSTV0910_FSKTFC2 0xf170
+#define FSTV0910_FSKT_KMOD 0xf17000fc
+#define FSTV0910_FSKT_CAR2 0xf1700003
+
+/* FSKTFC1 */
+#define RSTV0910_FSKTFC1 0xf171
+#define FSTV0910_FSKT_CAR1 0xf17100ff
+
+/* FSKTFC0 */
+#define RSTV0910_FSKTFC0 0xf172
+#define FSTV0910_FSKT_CAR0 0xf17200ff
+
+/* FSKTDELTAF1 */
+#define RSTV0910_FSKTDELTAF1 0xf173
+#define FSTV0910_FSKT_DELTAF1 0xf173000f
+
+/* FSKTDELTAF0 */
+#define RSTV0910_FSKTDELTAF0 0xf174
+#define FSTV0910_FSKT_DELTAF0 0xf17400ff
+
+/* FSKTCTRL */
+#define RSTV0910_FSKTCTRL 0xf175
+#define FSTV0910_FSKT_PINSEL 0xf1750080
+#define FSTV0910_FSKT_EN_SGN 0xf1750040
+#define FSTV0910_FSKT_MOD_SGN 0xf1750020
+#define FSTV0910_FSKT_MOD_EN 0xf175001c
+#define FSTV0910_FSKT_DACMODE 0xf1750003
+
+/* FSKRFC2 */
+#define RSTV0910_FSKRFC2 0xf176
+#define FSTV0910_FSKR_DETSGN 0xf1760040
+#define FSTV0910_FSKR_OUTSGN 0xf1760020
+#define FSTV0910_FSKR_KAGC 0xf176001c
+#define FSTV0910_FSKR_CAR2 0xf1760003
+
+/* FSKRFC1 */
+#define RSTV0910_FSKRFC1 0xf177
+#define FSTV0910_FSKR_CAR1 0xf17700ff
+
+/* FSKRFC0 */
+#define RSTV0910_FSKRFC0 0xf178
+#define FSTV0910_FSKR_CAR0 0xf17800ff
+
+/* FSKRK1 */
+#define RSTV0910_FSKRK1 0xf179
+#define FSTV0910_FSKR_K1_EXP 0xf17900e0
+#define FSTV0910_FSKR_K1_MANT 0xf179001f
+
+/* FSKRK2 */
+#define RSTV0910_FSKRK2 0xf17a
+#define FSTV0910_FSKR_K2_EXP 0xf17a00e0
+#define FSTV0910_FSKR_K2_MANT 0xf17a001f
+
+/* FSKRAGCR */
+#define RSTV0910_FSKRAGCR 0xf17b
+#define FSTV0910_FSKR_OUTCTL 0xf17b00c0
+#define FSTV0910_FSKR_AGC_REF 0xf17b003f
+
+/* FSKRAGC */
+#define RSTV0910_FSKRAGC 0xf17c
+#define FSTV0910_FSKR_AGC_ACCU 0xf17c00ff
+
+/* FSKRALPHA */
+#define RSTV0910_FSKRALPHA 0xf17d
+#define FSTV0910_FSKR_ALPHA_EXP 0xf17d001c
+#define FSTV0910_FSKR_ALPHA_M 0xf17d0003
+
+/* FSKRPLTH1 */
+#define RSTV0910_FSKRPLTH1 0xf17e
+#define FSTV0910_FSKR_BETA 0xf17e00f0
+#define FSTV0910_FSKR_PLL_TRESH1 0xf17e000f
+
+/* FSKRPLTH0 */
+#define RSTV0910_FSKRPLTH0 0xf17f
+#define FSTV0910_FSKR_PLL_TRESH0 0xf17f00ff
+
+/* FSKRDF1 */
+#define RSTV0910_FSKRDF1 0xf180
+#define FSTV0910_FSKR_OUT 0xf1800080
+#define FSTV0910_FSKR_STATE 0xf1800060
+#define FSTV0910_FSKR_DELTAF1 0xf180001f
+
+/* FSKRDF0 */
+#define RSTV0910_FSKRDF0 0xf181
+#define FSTV0910_FSKR_DELTAF0 0xf18100ff
+
+/* FSKRSTEPP */
+#define RSTV0910_FSKRSTEPP 0xf182
+#define FSTV0910_FSKR_STEP_PLUS 0xf18200ff
+
+/* FSKRSTEPM */
+#define RSTV0910_FSKRSTEPM 0xf183
+#define FSTV0910_FSKR_STEP_MINUS 0xf18300ff
+
+/* FSKRDET1 */
+#define RSTV0910_FSKRDET1 0xf184
+#define FSTV0910_FSKR_DETECT 0xf1840080
+#define FSTV0910_FSKR_CARDET_ACCU1 0xf184000f
+
+/* FSKRDET0 */
+#define RSTV0910_FSKRDET0 0xf185
+#define FSTV0910_FSKR_CARDET_ACCU0 0xf18500ff
+
+/* FSKRDTH1 */
+#define RSTV0910_FSKRDTH1 0xf186
+#define FSTV0910_FSKR_CARLOSS_THRESH1 0xf18600f0
+#define FSTV0910_FSKR_CARDET_THRESH1 0xf186000f
+
+/* FSKRDTH0 */
+#define RSTV0910_FSKRDTH0 0xf187
+#define FSTV0910_FSKR_CARDET_THRESH0 0xf18700ff
+
+/* FSKRLOSS */
+#define RSTV0910_FSKRLOSS 0xf188
+#define FSTV0910_FSKR_CARLOSS_THRESH0 0xf18800ff
+
+/* NCOARSE */
+#define RSTV0910_NCOARSE 0xf1b3
+#define FSTV0910_CP 0xf1b300f8
+#define FSTV0910_IDF 0xf1b30007
+
+/* NCOARSE1 */
+#define RSTV0910_NCOARSE1 0xf1b4
+#define FSTV0910_N_DIV 0xf1b400ff
+
+/* NCOARSE2 */
+#define RSTV0910_NCOARSE2 0xf1b5
+#define FSTV0910_ODF 0xf1b5003f
+
+/* SYNTCTRL */
+#define RSTV0910_SYNTCTRL 0xf1b6
+#define FSTV0910_STANDBY 0xf1b60080
+#define FSTV0910_BYPASSPLLCORE 0xf1b60040
+#define FSTV0910_STOP_PLL 0xf1b60008
+#define FSTV0910_OSCI_E 0xf1b60002
+
+/* FILTCTRL */
+#define RSTV0910_FILTCTRL 0xf1b7
+#define FSTV0910_INV_CLKFSK 0xf1b70002
+#define FSTV0910_BYPASS_APPLI 0xf1b70001
+
+/* PLLSTAT */
+#define RSTV0910_PLLSTAT 0xf1b8
+#define FSTV0910_PLLLOCK 0xf1b80001
+
+/* STOPCLK1 */
+#define RSTV0910_STOPCLK1 0xf1c2
+#define FSTV0910_INV_CLKADCI2 0xf1c20004
+#define FSTV0910_INV_CLKADCI1 0xf1c20001
+
+/* STOPCLK2 */
+#define RSTV0910_STOPCLK2 0xf1c3
+#define FSTV0910_STOP_DVBS2FEC2 0xf1c30020
+#define FSTV0910_STOP_DVBS2FEC 0xf1c30010
+#define FSTV0910_STOP_DVBS1FEC2 0xf1c30008
+#define FSTV0910_STOP_DVBS1FEC 0xf1c30004
+#define FSTV0910_STOP_DEMOD2 0xf1c30002
+#define FSTV0910_STOP_DEMOD 0xf1c30001
+
+/* PREGCTL */
+#define RSTV0910_PREGCTL 0xf1c8
+#define FSTV0910_REG3V3TO2V5_POFF 0xf1c80080
+
+/* TSTTNR0 */
+#define RSTV0910_TSTTNR0 0xf1df
+#define FSTV0910_FSK_PON 0xf1df0004
+
+/* TSTTNR1 */
+#define RSTV0910_TSTTNR1 0xf1e0
+#define FSTV0910_ADC1_PON 0xf1e00002
+
+/* TSTTNR2 */
+#define RSTV0910_TSTTNR2 0xf1e1
+#define FSTV0910_I2C_DISEQC_PON 0xf1e10020
+#define FSTV0910_DISEQC_CLKDIV 0xf1e1000f
+
+/* TSTTNR3 */
+#define RSTV0910_TSTTNR3 0xf1e2
+#define FSTV0910_ADC2_PON 0xf1e20002
+
+/* P2_IQCONST */
+#define RSTV0910_P2_IQCONST 0xf200
+#define FSTV0910_P2_CONSTEL_SELECT 0xf2000060
+#define FSTV0910_P2_IQSYMB_SEL 0xf200001f
+
+/* P2_NOSCFG */
+#define RSTV0910_P2_NOSCFG 0xf201
+#define FSTV0910_P2_DUMMYPL_NOSDATA 0xf2010020
+#define FSTV0910_P2_NOSPLH_BETA 0xf2010018
+#define FSTV0910_P2_NOSDATA_BETA 0xf2010007
+
+/* P2_ISYMB */
+#define RSTV0910_P2_ISYMB 0xf202
+#define FSTV0910_P2_I_SYMBOL 0xf20201ff
+
+/* P2_QSYMB */
+#define RSTV0910_P2_QSYMB 0xf203
+#define FSTV0910_P2_Q_SYMBOL 0xf20301ff
+
+/* P2_AGC1CFG */
+#define RSTV0910_P2_AGC1CFG 0xf204
+#define FSTV0910_P2_DC_FROZEN 0xf2040080
+#define FSTV0910_P2_DC_CORRECT 0xf2040040
+#define FSTV0910_P2_AMM_FROZEN 0xf2040020
+#define FSTV0910_P2_AMM_CORRECT 0xf2040010
+#define FSTV0910_P2_QUAD_FROZEN 0xf2040008
+#define FSTV0910_P2_QUAD_CORRECT 0xf2040004
+
+/* P2_AGC1CN */
+#define RSTV0910_P2_AGC1CN 0xf206
+#define FSTV0910_P2_AGC1_LOCKED 0xf2060080
+#define FSTV0910_P2_AGC1_MINPOWER 0xf2060010
+#define FSTV0910_P2_AGCOUT_FAST 0xf2060008
+#define FSTV0910_P2_AGCIQ_BETA 0xf2060007
+
+/* P2_AGC1REF */
+#define RSTV0910_P2_AGC1REF 0xf207
+#define FSTV0910_P2_AGCIQ_REF 0xf20700ff
+
+/* P2_IDCCOMP */
+#define RSTV0910_P2_IDCCOMP 0xf208
+#define FSTV0910_P2_IAVERAGE_ADJ 0xf20801ff
+
+/* P2_QDCCOMP */
+#define RSTV0910_P2_QDCCOMP 0xf209
+#define FSTV0910_P2_QAVERAGE_ADJ 0xf20901ff
+
+/* P2_POWERI */
+#define RSTV0910_P2_POWERI 0xf20a
+#define FSTV0910_P2_POWER_I 0xf20a00ff
+
+/* P2_POWERQ */
+#define RSTV0910_P2_POWERQ 0xf20b
+#define FSTV0910_P2_POWER_Q 0xf20b00ff
+
+/* P2_AGC1AMM */
+#define RSTV0910_P2_AGC1AMM 0xf20c
+#define FSTV0910_P2_AMM_VALUE 0xf20c00ff
+
+/* P2_AGC1QUAD */
+#define RSTV0910_P2_AGC1QUAD 0xf20d
+#define FSTV0910_P2_QUAD_VALUE 0xf20d01ff
+
+/* P2_AGCIQIN1 */
+#define RSTV0910_P2_AGCIQIN1 0xf20e
+#define FSTV0910_P2_AGCIQ_VALUE1 0xf20e00ff
+
+/* P2_AGCIQIN0 */
+#define RSTV0910_P2_AGCIQIN0 0xf20f
+#define FSTV0910_P2_AGCIQ_VALUE0 0xf20f00ff
+
+/* P2_DEMOD */
+#define RSTV0910_P2_DEMOD 0xf210
+#define FSTV0910_P2_MANUALS2_ROLLOFF 0xf2100080
+#define FSTV0910_P2_SPECINV_CONTROL 0xf2100030
+#define FSTV0910_P2_MANUALSX_ROLLOFF 0xf2100004
+#define FSTV0910_P2_ROLLOFF_CONTROL 0xf2100003
+
+/* P2_DMDMODCOD */
+#define RSTV0910_P2_DMDMODCOD 0xf211
+#define FSTV0910_P2_MANUAL_MODCOD 0xf2110080
+#define FSTV0910_P2_DEMOD_MODCOD 0xf211007c
+#define FSTV0910_P2_DEMOD_TYPE 0xf2110003
+
+/* P2_DSTATUS */
+#define RSTV0910_P2_DSTATUS 0xf212
+#define FSTV0910_P2_CAR_LOCK 0xf2120080
+#define FSTV0910_P2_TMGLOCK_QUALITY 0xf2120060
+#define FSTV0910_P2_LOCK_DEFINITIF 0xf2120008
+#define FSTV0910_P2_OVADC_DETECT 0xf2120001
+
+/* P2_DSTATUS2 */
+#define RSTV0910_P2_DSTATUS2 0xf213
+#define FSTV0910_P2_DEMOD_DELOCK 0xf2130080
+#define FSTV0910_P2_MODCODRQ_SYNCTAG 0xf2130020
+#define FSTV0910_P2_POLYPH_SATEVENT 0xf2130010
+#define FSTV0910_P2_AGC1_NOSIGNALACK 0xf2130008
+#define FSTV0910_P2_AGC2_OVERFLOW 0xf2130004
+#define FSTV0910_P2_CFR_OVERFLOW 0xf2130002
+#define FSTV0910_P2_GAMMA_OVERUNDER 0xf2130001
+
+/* P2_DMDCFGMD */
+#define RSTV0910_P2_DMDCFGMD 0xf214
+#define FSTV0910_P2_DVBS2_ENABLE 0xf2140080
+#define FSTV0910_P2_DVBS1_ENABLE 0xf2140040
+#define FSTV0910_P2_SCAN_ENABLE 0xf2140010
+#define FSTV0910_P2_CFR_AUTOSCAN 0xf2140008
+#define FSTV0910_P2_TUN_RNG 0xf2140003
+
+/* P2_DMDCFG2 */
+#define RSTV0910_P2_DMDCFG2 0xf215
+#define FSTV0910_P2_S1S2_SEQUENTIAL 0xf2150040
+#define FSTV0910_P2_INFINITE_RELOCK 0xf2150010
+
+/* P2_DMDISTATE */
+#define RSTV0910_P2_DMDISTATE 0xf216
+#define FSTV0910_P2_I2C_NORESETDMODE 0xf2160080
+#define FSTV0910_P2_I2C_DEMOD_MODE 0xf216001f
+
+/* P2_DMDT0M */
+#define RSTV0910_P2_DMDT0M 0xf217
+#define FSTV0910_P2_DMDT0_MIN 0xf21700ff
+
+/* P2_DMDSTATE */
+#define RSTV0910_P2_DMDSTATE 0xf21b
+#define FSTV0910_P2_HEADER_MODE 0xf21b0060
+
+/* P2_DMDFLYW */
+#define RSTV0910_P2_DMDFLYW 0xf21c
+#define FSTV0910_P2_I2C_IRQVAL 0xf21c00f0
+#define FSTV0910_P2_FLYWHEEL_CPT 0xf21c000f
+
+/* P2_DSTATUS3 */
+#define RSTV0910_P2_DSTATUS3 0xf21d
+#define FSTV0910_P2_CFR_ZIGZAG 0xf21d0080
+#define FSTV0910_P2_DEMOD_CFGMODE 0xf21d0060
+#define FSTV0910_P2_GAMMA_LOWBAUDRATE 0xf21d0010
+
+/* P2_DMDCFG3 */
+#define RSTV0910_P2_DMDCFG3 0xf21e
+#define FSTV0910_P2_NOSTOP_FIFOFULL 0xf21e0008
+
+/* P2_DMDCFG4 */
+#define RSTV0910_P2_DMDCFG4 0xf21f
+#define FSTV0910_P2_DIS_VITLOCK 0xf21f0080
+#define FSTV0910_P2_DIS_CLKENABLE 0xf21f0004
+
+/* P2_CORRELMANT */
+#define RSTV0910_P2_CORRELMANT 0xf220
+#define FSTV0910_P2_CORREL_MANT 0xf22000ff
+
+/* P2_CORRELABS */
+#define RSTV0910_P2_CORRELABS 0xf221
+#define FSTV0910_P2_CORREL_ABS 0xf22100ff
+
+/* P2_CORRELEXP */
+#define RSTV0910_P2_CORRELEXP 0xf222
+#define FSTV0910_P2_CORREL_ABSEXP 0xf22200f0
+#define FSTV0910_P2_CORREL_EXP 0xf222000f
+
+/* P2_PLHMODCOD */
+#define RSTV0910_P2_PLHMODCOD 0xf224
+#define FSTV0910_P2_SPECINV_DEMOD 0xf2240080
+#define FSTV0910_P2_PLH_MODCOD 0xf224007c
+#define FSTV0910_P2_PLH_TYPE 0xf2240003
+
+/* P2_DMDREG */
+#define RSTV0910_P2_DMDREG 0xf225
+#define FSTV0910_P2_DECIM_PLFRAMES 0xf2250001
+
+/* P2_AGCNADJ */
+#define RSTV0910_P2_AGCNADJ 0xf226
+#define FSTV0910_P2_RADJOFF_AGC2 0xf2260080
+#define FSTV0910_P2_RADJOFF_AGC1 0xf2260040
+#define FSTV0910_P2_AGC_NADJ 0xf226013f
+
+/* P2_AGCKS */
+#define RSTV0910_P2_AGCKS 0xf227
+#define FSTV0910_P2_RSADJ_MANUALCFG 0xf2270080
+#define FSTV0910_P2_RSADJ_CCMMODE 0xf2270040
+#define FSTV0910_P2_RADJ_SPSK 0xf227013f
+
+/* P2_AGCKQ */
+#define RSTV0910_P2_AGCKQ 0xf228
+#define FSTV0910_P2_RADJON_DVBS1 0xf2280040
+#define FSTV0910_P2_RADJ_QPSK 0xf228013f
+
+/* P2_AGCK8 */
+#define RSTV0910_P2_AGCK8 0xf229
+#define FSTV0910_P2_RADJ_8PSK 0xf229013f
+
+/* P2_AGCK16 */
+#define RSTV0910_P2_AGCK16 0xf22a
+#define FSTV0910_P2_R2ADJOFF_16APSK 0xf22a0040
+#define FSTV0910_P2_R1ADJOFF_16APSK 0xf22a0020
+#define FSTV0910_P2_RADJ_16APSK 0xf22a011f
+
+/* P2_AGCK32 */
+#define RSTV0910_P2_AGCK32 0xf22b
+#define FSTV0910_P2_R3ADJOFF_32APSK 0xf22b0080
+#define FSTV0910_P2_R2ADJOFF_32APSK 0xf22b0040
+#define FSTV0910_P2_R1ADJOFF_32APSK 0xf22b0020
+#define FSTV0910_P2_RADJ_32APSK 0xf22b011f
+
+/* P2_AGC2O */
+#define RSTV0910_P2_AGC2O 0xf22c
+#define FSTV0910_P2_CSTENV_MODE 0xf22c00c0
+#define FSTV0910_P2_AGC2_COEF 0xf22c0007
+
+/* P2_AGC2REF */
+#define RSTV0910_P2_AGC2REF 0xf22d
+#define FSTV0910_P2_AGC2_REF 0xf22d00ff
+
+/* P2_AGC1ADJ */
+#define RSTV0910_P2_AGC1ADJ 0xf22e
+#define FSTV0910_P2_AGC1_ADJUSTED 0xf22e007f
+
+/* P2_AGCRSADJ */
+#define RSTV0910_P2_AGCRSADJ 0xf22f
+#define FSTV0910_P2_RS_ADJUSTED 0xf22f007f
+
+/* P2_AGCRQADJ */
+#define RSTV0910_P2_AGCRQADJ 0xf230
+#define FSTV0910_P2_RQ_ADJUSTED 0xf230007f
+
+/* P2_AGCR8ADJ */
+#define RSTV0910_P2_AGCR8ADJ 0xf231
+#define FSTV0910_P2_R8_ADJUSTED 0xf231007f
+
+/* P2_AGCR1ADJ */
+#define RSTV0910_P2_AGCR1ADJ 0xf232
+#define FSTV0910_P2_R1_ADJUSTED 0xf232007f
+
+/* P2_AGCR2ADJ */
+#define RSTV0910_P2_AGCR2ADJ 0xf233
+#define FSTV0910_P2_R2_ADJUSTED 0xf233007f
+
+/* P2_AGCR3ADJ */
+#define RSTV0910_P2_AGCR3ADJ 0xf234
+#define FSTV0910_P2_R3_ADJUSTED 0xf234007f
+
+/* P2_AGCREFADJ */
+#define RSTV0910_P2_AGCREFADJ 0xf235
+#define FSTV0910_P2_AGC2REF_ADJUSTED 0xf235007f
+
+/* P2_AGC2I1 */
+#define RSTV0910_P2_AGC2I1 0xf236
+#define FSTV0910_P2_AGC2_INTEGRATOR1 0xf23600ff
+
+/* P2_AGC2I0 */
+#define RSTV0910_P2_AGC2I0 0xf237
+#define FSTV0910_P2_AGC2_INTEGRATOR0 0xf23700ff
+
+/* P2_CARCFG */
+#define RSTV0910_P2_CARCFG 0xf238
+#define FSTV0910_P2_ROTAON 0xf2380004
+#define FSTV0910_P2_PH_DET_ALGO 0xf2380003
+
+/* P2_ACLC */
+#define RSTV0910_P2_ACLC 0xf239
+#define FSTV0910_P2_CAR_ALPHA_MANT 0xf2390030
+#define FSTV0910_P2_CAR_ALPHA_EXP 0xf239000f
+
+/* P2_BCLC */
+#define RSTV0910_P2_BCLC 0xf23a
+#define FSTV0910_P2_CAR_BETA_MANT 0xf23a0030
+#define FSTV0910_P2_CAR_BETA_EXP 0xf23a000f
+
+/* P2_ACLCS2 */
+#define RSTV0910_P2_ACLCS2 0xf23b
+#define FSTV0910_P2_CARS2_APLHA_MANTISSE 0xf23b0030
+#define FSTV0910_P2_CARS2_ALPHA_EXP 0xf23b000f
+
+/* P2_BCLCS2 */
+#define RSTV0910_P2_BCLCS2 0xf23c
+#define FSTV0910_P2_CARS2_BETA_MANTISSE 0xf23c0030
+#define FSTV0910_P2_CARS2_BETA_EXP 0xf23c000f
+
+/* P2_CARFREQ */
+#define RSTV0910_P2_CARFREQ 0xf23d
+#define FSTV0910_P2_KC_COARSE_EXP 0xf23d00f0
+#define FSTV0910_P2_BETA_FREQ 0xf23d000f
+
+/* P2_CARHDR */
+#define RSTV0910_P2_CARHDR 0xf23e
+#define FSTV0910_P2_K_FREQ_HDR 0xf23e00ff
+
+/* P2_LDT */
+#define RSTV0910_P2_LDT 0xf23f
+#define FSTV0910_P2_CARLOCK_THRES 0xf23f01ff
+
+/* P2_LDT2 */
+#define RSTV0910_P2_LDT2 0xf240
+#define FSTV0910_P2_CARLOCK_THRES2 0xf24001ff
+
+/* P2_CFRICFG */
+#define RSTV0910_P2_CFRICFG 0xf241
+#define FSTV0910_P2_NEG_CFRSTEP 0xf2410001
+
+/* P2_CFRUP1 */
+#define RSTV0910_P2_CFRUP1 0xf242
+#define FSTV0910_P2_CFR_UP1 0xf24201ff
+
+/* P2_CFRUP0 */
+#define RSTV0910_P2_CFRUP0 0xf243
+#define FSTV0910_P2_CFR_UP0 0xf24300ff
+
+/* P2_CFRIBASE1 */
+#define RSTV0910_P2_CFRIBASE1 0xf244
+#define FSTV0910_P2_CFRINIT_BASE1 0xf24400ff
+
+/* P2_CFRIBASE0 */
+#define RSTV0910_P2_CFRIBASE0 0xf245
+#define FSTV0910_P2_CFRINIT_BASE0 0xf24500ff
+
+/* P2_CFRLOW1 */
+#define RSTV0910_P2_CFRLOW1 0xf246
+#define FSTV0910_P2_CFR_LOW1 0xf24601ff
+
+/* P2_CFRLOW0 */
+#define RSTV0910_P2_CFRLOW0 0xf247
+#define FSTV0910_P2_CFR_LOW0 0xf24700ff
+
+/* P2_CFRINIT1 */
+#define RSTV0910_P2_CFRINIT1 0xf248
+#define FSTV0910_P2_CFR_INIT1 0xf24801ff
+
+/* P2_CFRINIT0 */
+#define RSTV0910_P2_CFRINIT0 0xf249
+#define FSTV0910_P2_CFR_INIT0 0xf24900ff
+
+/* P2_CFRINC1 */
+#define RSTV0910_P2_CFRINC1 0xf24a
+#define FSTV0910_P2_MANUAL_CFRINC 0xf24a0080
+#define FSTV0910_P2_CFR_INC1 0xf24a003f
+
+/* P2_CFRINC0 */
+#define RSTV0910_P2_CFRINC0 0xf24b
+#define FSTV0910_P2_CFR_INC0 0xf24b00ff
+
+/* P2_CFR2 */
+#define RSTV0910_P2_CFR2 0xf24c
+#define FSTV0910_P2_CAR_FREQ2 0xf24c01ff
+
+/* P2_CFR1 */
+#define RSTV0910_P2_CFR1 0xf24d
+#define FSTV0910_P2_CAR_FREQ1 0xf24d00ff
+
+/* P2_CFR0 */
+#define RSTV0910_P2_CFR0 0xf24e
+#define FSTV0910_P2_CAR_FREQ0 0xf24e00ff
+
+/* P2_LDI */
+#define RSTV0910_P2_LDI 0xf24f
+#define FSTV0910_P2_LOCK_DET_INTEGR 0xf24f01ff
+
+/* P2_TMGCFG */
+#define RSTV0910_P2_TMGCFG 0xf250
+#define FSTV0910_P2_TMGLOCK_BETA 0xf25000c0
+#define FSTV0910_P2_DO_TIMING_CORR 0xf2500010
+#define FSTV0910_P2_TMG_MINFREQ 0xf2500003
+
+/* P2_RTC */
+#define RSTV0910_P2_RTC 0xf251
+#define FSTV0910_P2_TMGALPHA_EXP 0xf25100f0
+#define FSTV0910_P2_TMGBETA_EXP 0xf251000f
+
+/* P2_RTCS2 */
+#define RSTV0910_P2_RTCS2 0xf252
+#define FSTV0910_P2_TMGALPHAS2_EXP 0xf25200f0
+#define FSTV0910_P2_TMGBETAS2_EXP 0xf252000f
+
+/* P2_TMGTHRISE */
+#define RSTV0910_P2_TMGTHRISE 0xf253
+#define FSTV0910_P2_TMGLOCK_THRISE 0xf25300ff
+
+/* P2_TMGTHFALL */
+#define RSTV0910_P2_TMGTHFALL 0xf254
+#define FSTV0910_P2_TMGLOCK_THFALL 0xf25400ff
+
+/* P2_SFRUPRATIO */
+#define RSTV0910_P2_SFRUPRATIO 0xf255
+#define FSTV0910_P2_SFR_UPRATIO 0xf25500ff
+
+/* P2_SFRLOWRATIO */
+#define RSTV0910_P2_SFRLOWRATIO 0xf256
+#define FSTV0910_P2_SFR_LOWRATIO 0xf25600ff
+
+/* P2_KTTMG */
+#define RSTV0910_P2_KTTMG 0xf257
+#define FSTV0910_P2_KT_TMG_EXP 0xf25700f0
+
+/* P2_KREFTMG */
+#define RSTV0910_P2_KREFTMG 0xf258
+#define FSTV0910_P2_KREF_TMG 0xf25800ff
+
+/* P2_SFRSTEP */
+#define RSTV0910_P2_SFRSTEP 0xf259
+#define FSTV0910_P2_SFR_SCANSTEP 0xf25900f0
+#define FSTV0910_P2_SFR_CENTERSTEP 0xf259000f
+
+/* P2_TMGCFG2 */
+#define RSTV0910_P2_TMGCFG2 0xf25a
+#define FSTV0910_P2_DIS_AUTOSAMP 0xf25a0008
+#define FSTV0910_P2_SFRRATIO_FINE 0xf25a0001
+
+/* P2_KREFTMG2 */
+#define RSTV0910_P2_KREFTMG2 0xf25b
+#define FSTV0910_P2_KREF_TMG2 0xf25b00ff
+
+/* P2_TMGCFG3 */
+#define RSTV0910_P2_TMGCFG3 0xf25d
+#define FSTV0910_P2_CONT_TMGCENTER 0xf25d0008
+#define FSTV0910_P2_AUTO_GUP 0xf25d0004
+#define FSTV0910_P2_AUTO_GLOW 0xf25d0002
+
+/* P2_SFRINIT1 */
+#define RSTV0910_P2_SFRINIT1 0xf25e
+#define FSTV0910_P2_SFR_INIT1 0xf25e00ff
+
+/* P2_SFRINIT0 */
+#define RSTV0910_P2_SFRINIT0 0xf25f
+#define FSTV0910_P2_SFR_INIT0 0xf25f00ff
+
+/* P2_SFRUP1 */
+#define RSTV0910_P2_SFRUP1 0xf260
+#define FSTV0910_P2_SYMB_FREQ_UP1 0xf26000ff
+
+/* P2_SFRUP0 */
+#define RSTV0910_P2_SFRUP0 0xf261
+#define FSTV0910_P2_SYMB_FREQ_UP0 0xf26100ff
+
+/* P2_SFRLOW1 */
+#define RSTV0910_P2_SFRLOW1 0xf262
+#define FSTV0910_P2_SYMB_FREQ_LOW1 0xf26200ff
+
+/* P2_SFRLOW0 */
+#define RSTV0910_P2_SFRLOW0 0xf263
+#define FSTV0910_P2_SYMB_FREQ_LOW0 0xf26300ff
+
+/* P2_SFR3 */
+#define RSTV0910_P2_SFR3 0xf264
+#define FSTV0910_P2_SYMB_FREQ3 0xf26400ff
+
+/* P2_SFR2 */
+#define RSTV0910_P2_SFR2 0xf265
+#define FSTV0910_P2_SYMB_FREQ2 0xf26500ff
+
+/* P2_SFR1 */
+#define RSTV0910_P2_SFR1 0xf266
+#define FSTV0910_P2_SYMB_FREQ1 0xf26600ff
+
+/* P2_SFR0 */
+#define RSTV0910_P2_SFR0 0xf267
+#define FSTV0910_P2_SYMB_FREQ0 0xf26700ff
+
+/* P2_TMGREG2 */
+#define RSTV0910_P2_TMGREG2 0xf268
+#define FSTV0910_P2_TMGREG2 0xf26800ff
+
+/* P2_TMGREG1 */
+#define RSTV0910_P2_TMGREG1 0xf269
+#define FSTV0910_P2_TMGREG1 0xf26900ff
+
+/* P2_TMGREG0 */
+#define RSTV0910_P2_TMGREG0 0xf26a
+#define FSTV0910_P2_TMGREG0 0xf26a00ff
+
+/* P2_TMGLOCK1 */
+#define RSTV0910_P2_TMGLOCK1 0xf26b
+#define FSTV0910_P2_TMGLOCK_LEVEL1 0xf26b01ff
+
+/* P2_TMGLOCK0 */
+#define RSTV0910_P2_TMGLOCK0 0xf26c
+#define FSTV0910_P2_TMGLOCK_LEVEL0 0xf26c00ff
+
+/* P2_TMGOBS */
+#define RSTV0910_P2_TMGOBS 0xf26d
+#define FSTV0910_P2_ROLLOFF_STATUS 0xf26d00c0
+
+/* P2_EQUALCFG */
+#define RSTV0910_P2_EQUALCFG 0xf26f
+#define FSTV0910_P2_EQUAL_ON 0xf26f0040
+#define FSTV0910_P2_MU_EQUALDFE 0xf26f0007
+
+/* P2_EQUAI1 */
+#define RSTV0910_P2_EQUAI1 0xf270
+#define FSTV0910_P2_EQUA_ACCI1 0xf27001ff
+
+/* P2_EQUAQ1 */
+#define RSTV0910_P2_EQUAQ1 0xf271
+#define FSTV0910_P2_EQUA_ACCQ1 0xf27101ff
+
+/* P2_EQUAI2 */
+#define RSTV0910_P2_EQUAI2 0xf272
+#define FSTV0910_P2_EQUA_ACCI2 0xf27201ff
+
+/* P2_EQUAQ2 */
+#define RSTV0910_P2_EQUAQ2 0xf273
+#define FSTV0910_P2_EQUA_ACCQ2 0xf27301ff
+
+/* P2_EQUAI3 */
+#define RSTV0910_P2_EQUAI3 0xf274
+#define FSTV0910_P2_EQUA_ACCI3 0xf27401ff
+
+/* P2_EQUAQ3 */
+#define RSTV0910_P2_EQUAQ3 0xf275
+#define FSTV0910_P2_EQUA_ACCQ3 0xf27501ff
+
+/* P2_EQUAI4 */
+#define RSTV0910_P2_EQUAI4 0xf276
+#define FSTV0910_P2_EQUA_ACCI4 0xf27601ff
+
+/* P2_EQUAQ4 */
+#define RSTV0910_P2_EQUAQ4 0xf277
+#define FSTV0910_P2_EQUA_ACCQ4 0xf27701ff
+
+/* P2_EQUAI5 */
+#define RSTV0910_P2_EQUAI5 0xf278
+#define FSTV0910_P2_EQUA_ACCI5 0xf27801ff
+
+/* P2_EQUAQ5 */
+#define RSTV0910_P2_EQUAQ5 0xf279
+#define FSTV0910_P2_EQUA_ACCQ5 0xf27901ff
+
+/* P2_EQUAI6 */
+#define RSTV0910_P2_EQUAI6 0xf27a
+#define FSTV0910_P2_EQUA_ACCI6 0xf27a01ff
+
+/* P2_EQUAQ6 */
+#define RSTV0910_P2_EQUAQ6 0xf27b
+#define FSTV0910_P2_EQUA_ACCQ6 0xf27b01ff
+
+/* P2_EQUAI7 */
+#define RSTV0910_P2_EQUAI7 0xf27c
+#define FSTV0910_P2_EQUA_ACCI7 0xf27c01ff
+
+/* P2_EQUAQ7 */
+#define RSTV0910_P2_EQUAQ7 0xf27d
+#define FSTV0910_P2_EQUA_ACCQ7 0xf27d01ff
+
+/* P2_EQUAI8 */
+#define RSTV0910_P2_EQUAI8 0xf27e
+#define FSTV0910_P2_EQUA_ACCI8 0xf27e01ff
+
+/* P2_EQUAQ8 */
+#define RSTV0910_P2_EQUAQ8 0xf27f
+#define FSTV0910_P2_EQUA_ACCQ8 0xf27f01ff
+
+/* P2_NNOSDATAT1 */
+#define RSTV0910_P2_NNOSDATAT1 0xf280
+#define FSTV0910_P2_NOSDATAT_NORMED1 0xf28000ff
+
+/* P2_NNOSDATAT0 */
+#define RSTV0910_P2_NNOSDATAT0 0xf281
+#define FSTV0910_P2_NOSDATAT_NORMED0 0xf28100ff
+
+/* P2_NNOSDATA1 */
+#define RSTV0910_P2_NNOSDATA1 0xf282
+#define FSTV0910_P2_NOSDATA_NORMED1 0xf28200ff
+
+/* P2_NNOSDATA0 */
+#define RSTV0910_P2_NNOSDATA0 0xf283
+#define FSTV0910_P2_NOSDATA_NORMED0 0xf28300ff
+
+/* P2_NNOSPLHT1 */
+#define RSTV0910_P2_NNOSPLHT1 0xf284
+#define FSTV0910_P2_NOSPLHT_NORMED1 0xf28400ff
+
+/* P2_NNOSPLHT0 */
+#define RSTV0910_P2_NNOSPLHT0 0xf285
+#define FSTV0910_P2_NOSPLHT_NORMED0 0xf28500ff
+
+/* P2_NNOSPLH1 */
+#define RSTV0910_P2_NNOSPLH1 0xf286
+#define FSTV0910_P2_NOSPLH_NORMED1 0xf28600ff
+
+/* P2_NNOSPLH0 */
+#define RSTV0910_P2_NNOSPLH0 0xf287
+#define FSTV0910_P2_NOSPLH_NORMED0 0xf28700ff
+
+/* P2_NOSDATAT1 */
+#define RSTV0910_P2_NOSDATAT1 0xf288
+#define FSTV0910_P2_NOSDATAT_UNNORMED1 0xf28800ff
+
+/* P2_NOSDATAT0 */
+#define RSTV0910_P2_NOSDATAT0 0xf289
+#define FSTV0910_P2_NOSDATAT_UNNORMED0 0xf28900ff
+
+/* P2_NNOSFRAME1 */
+#define RSTV0910_P2_NNOSFRAME1 0xf28a
+#define FSTV0910_P2_NOSFRAME_NORMED1 0xf28a00ff
+
+/* P2_NNOSFRAME0 */
+#define RSTV0910_P2_NNOSFRAME0 0xf28b
+#define FSTV0910_P2_NOSFRAME_NORMED0 0xf28b00ff
+
+/* P2_NNOSRAD1 */
+#define RSTV0910_P2_NNOSRAD1 0xf28c
+#define FSTV0910_P2_NOSRADIAL_NORMED1 0xf28c00ff
+
+/* P2_NNOSRAD0 */
+#define RSTV0910_P2_NNOSRAD0 0xf28d
+#define FSTV0910_P2_NOSRADIAL_NORMED0 0xf28d00ff
+
+/* P2_NOSCFGF1 */
+#define RSTV0910_P2_NOSCFGF1 0xf28e
+#define FSTV0910_P2_LOWNOISE_MESURE 0xf28e0080
+#define FSTV0910_P2_NOS_DELFRAME 0xf28e0040
+#define FSTV0910_P2_NOSDATA_MODE 0xf28e0030
+#define FSTV0910_P2_FRAMESEL_TYPESEL 0xf28e000c
+#define FSTV0910_P2_FRAMESEL_TYPE 0xf28e0003
+
+/* P2_NOSCFGF2 */
+#define RSTV0910_P2_NOSCFGF2 0xf28f
+#define FSTV0910_P2_DIS_NOSPILOTS 0xf28f0080
+#define FSTV0910_P2_FRAMESEL_MODCODSEL 0xf28f0060
+#define FSTV0910_P2_FRAMESEL_MODCOD 0xf28f001f
+
+/* P2_CAR2CFG */
+#define RSTV0910_P2_CAR2CFG 0xf290
+#define FSTV0910_P2_ROTA2ON 0xf2900004
+#define FSTV0910_P2_PH_DET_ALGO2 0xf2900003
+
+/* P2_CFR2CFR1 */
+#define RSTV0910_P2_CFR2CFR1 0xf291
+#define FSTV0910_P2_EN_S2CAR2CENTER 0xf2910020
+#define FSTV0910_P2_CFR2TOCFR1_BETA 0xf2910007
+
+/* P2_CAR3CFG */
+#define RSTV0910_P2_CAR3CFG 0xf292
+#define FSTV0910_P2_CARRIER23_MODE 0xf29200c0
+#define FSTV0910_P2_CAR3INTERM_DVBS1 0xf2920020
+#define FSTV0910_P2_ABAMPLIF_MODE 0xf2920018
+#define FSTV0910_P2_CARRIER3_ALPHA3DL 0xf2920007
+
+/* P2_CFR22 */
+#define RSTV0910_P2_CFR22 0xf293
+#define FSTV0910_P2_CAR2_FREQ2 0xf29301ff
+
+/* P2_CFR21 */
+#define RSTV0910_P2_CFR21 0xf294
+#define FSTV0910_P2_CAR2_FREQ1 0xf29400ff
+
+/* P2_CFR20 */
+#define RSTV0910_P2_CFR20 0xf295
+#define FSTV0910_P2_CAR2_FREQ0 0xf29500ff
+
+/* P2_ACLC2S2Q */
+#define RSTV0910_P2_ACLC2S2Q 0xf297
+#define FSTV0910_P2_ENAB_SPSKSYMB 0xf2970080
+#define FSTV0910_P2_CAR2S2_Q_ALPH_M 0xf2970030
+#define FSTV0910_P2_CAR2S2_Q_ALPH_E 0xf297000f
+
+/* P2_ACLC2S28 */
+#define RSTV0910_P2_ACLC2S28 0xf298
+#define FSTV0910_P2_CAR2S2_8_ALPH_M 0xf2980030
+#define FSTV0910_P2_CAR2S2_8_ALPH_E 0xf298000f
+
+/* P2_ACLC2S216A */
+#define RSTV0910_P2_ACLC2S216A 0xf299
+#define FSTV0910_P2_CAR2S2_16A_ALPH_M 0xf2990030
+#define FSTV0910_P2_CAR2S2_16A_ALPH_E 0xf299000f
+
+/* P2_ACLC2S232A */
+#define RSTV0910_P2_ACLC2S232A 0xf29a
+#define FSTV0910_P2_CAR2S2_32A_ALPH_M 0xf29a0030
+#define FSTV0910_P2_CAR2S2_32A_ALPH_E 0xf29a000f
+
+/* P2_BCLC2S2Q */
+#define RSTV0910_P2_BCLC2S2Q 0xf29c
+#define FSTV0910_P2_CAR2S2_Q_BETA_M 0xf29c0030
+#define FSTV0910_P2_CAR2S2_Q_BETA_E 0xf29c000f
+
+/* P2_BCLC2S28 */
+#define RSTV0910_P2_BCLC2S28 0xf29d
+#define FSTV0910_P2_CAR2S2_8_BETA_M 0xf29d0030
+#define FSTV0910_P2_CAR2S2_8_BETA_E 0xf29d000f
+
+/* P2_BCLC2S216A */
+#define RSTV0910_P2_BCLC2S216A 0xf29e
+#define FSTV0910_P2_DVBS2S216A_NIP 0xf29e0080
+#define FSTV0910_P2_CAR2S2_16A_BETA_M 0xf29e0030
+#define FSTV0910_P2_CAR2S2_16A_BETA_E 0xf29e000f
+
+/* P2_BCLC2S232A */
+#define RSTV0910_P2_BCLC2S232A 0xf29f
+#define FSTV0910_P2_DVBS2S232A_NIP 0xf29f0080
+#define FSTV0910_P2_CAR2S2_32A_BETA_M 0xf29f0030
+#define FSTV0910_P2_CAR2S2_32A_BETA_E 0xf29f000f
+
+/* P2_PLROOT2 */
+#define RSTV0910_P2_PLROOT2 0xf2ac
+#define FSTV0910_P2_PLSCRAMB_MODE 0xf2ac000c
+#define FSTV0910_P2_PLSCRAMB_ROOT2 0xf2ac0003
+
+/* P2_PLROOT1 */
+#define RSTV0910_P2_PLROOT1 0xf2ad
+#define FSTV0910_P2_PLSCRAMB_ROOT1 0xf2ad00ff
+
+/* P2_PLROOT0 */
+#define RSTV0910_P2_PLROOT0 0xf2ae
+#define FSTV0910_P2_PLSCRAMB_ROOT0 0xf2ae00ff
+
+/* P2_MODCODLST0 */
+#define RSTV0910_P2_MODCODLST0 0xf2b0
+#define FSTV0910_P2_NACCES_MODCODCH 0xf2b00001
+
+/* P2_MODCODLST1 */
+#define RSTV0910_P2_MODCODLST1 0xf2b1
+#define FSTV0910_P2_SYMBRATE_FILTER 0xf2b10008
+#define FSTV0910_P2_NRESET_MODCODLST 0xf2b10004
+#define FSTV0910_P2_DIS_32PSK_9_10 0xf2b10003
+
+/* P2_MODCODLST2 */
+#define RSTV0910_P2_MODCODLST2 0xf2b2
+#define FSTV0910_P2_DIS_32PSK_8_9 0xf2b200f0
+#define FSTV0910_P2_DIS_32PSK_5_6 0xf2b2000f
+
+/* P2_MODCODLST3 */
+#define RSTV0910_P2_MODCODLST3 0xf2b3
+#define FSTV0910_P2_DIS_32PSK_4_5 0xf2b300f0
+#define FSTV0910_P2_DIS_32PSK_3_4 0xf2b3000f
+
+/* P2_MODCODLST4 */
+#define RSTV0910_P2_MODCODLST4 0xf2b4
+#define FSTV0910_P2_DUMMYPL_PILOT 0xf2b40080
+#define FSTV0910_P2_DUMMYPL_NOPILOT 0xf2b40040
+#define FSTV0910_P2_DIS_16PSK_9_10 0xf2b40030
+#define FSTV0910_P2_DIS_16PSK_8_9 0xf2b4000f
+
+/* P2_MODCODLST5 */
+#define RSTV0910_P2_MODCODLST5 0xf2b5
+#define FSTV0910_P2_DIS_16PSK_5_6 0xf2b500f0
+#define FSTV0910_P2_DIS_16PSK_4_5 0xf2b5000f
+
+/* P2_MODCODLST6 */
+#define RSTV0910_P2_MODCODLST6 0xf2b6
+#define FSTV0910_P2_DIS_16PSK_3_4 0xf2b600f0
+#define FSTV0910_P2_DIS_16PSK_2_3 0xf2b6000f
+
+/* P2_MODCODLST7 */
+#define RSTV0910_P2_MODCODLST7 0xf2b7
+#define FSTV0910_P2_MODCOD_NNOSFILTER 0xf2b70080
+#define FSTV0910_P2_DIS_8PSK_9_10 0xf2b70030
+#define FSTV0910_P2_DIS_8PSK_8_9 0xf2b7000f
+
+/* P2_MODCODLST8 */
+#define RSTV0910_P2_MODCODLST8 0xf2b8
+#define FSTV0910_P2_DIS_8PSK_5_6 0xf2b800f0
+#define FSTV0910_P2_DIS_8PSK_3_4 0xf2b8000f
+
+/* P2_MODCODLST9 */
+#define RSTV0910_P2_MODCODLST9 0xf2b9
+#define FSTV0910_P2_DIS_8PSK_2_3 0xf2b900f0
+#define FSTV0910_P2_DIS_8PSK_3_5 0xf2b9000f
+
+/* P2_MODCODLSTA */
+#define RSTV0910_P2_MODCODLSTA 0xf2ba
+#define FSTV0910_P2_NOSFILTER_LIMITE 0xf2ba0080
+#define FSTV0910_P2_DIS_QPSK_9_10 0xf2ba0030
+#define FSTV0910_P2_DIS_QPSK_8_9 0xf2ba000f
+
+/* P2_MODCODLSTB */
+#define RSTV0910_P2_MODCODLSTB 0xf2bb
+#define FSTV0910_P2_DIS_QPSK_5_6 0xf2bb00f0
+#define FSTV0910_P2_DIS_QPSK_4_5 0xf2bb000f
+
+/* P2_MODCODLSTC */
+#define RSTV0910_P2_MODCODLSTC 0xf2bc
+#define FSTV0910_P2_DIS_QPSK_3_4 0xf2bc00f0
+#define FSTV0910_P2_DIS_QPSK_2_3 0xf2bc000f
+
+/* P2_MODCODLSTD */
+#define RSTV0910_P2_MODCODLSTD 0xf2bd
+#define FSTV0910_P2_DIS_QPSK_3_5 0xf2bd00f0
+#define FSTV0910_P2_DIS_QPSK_1_2 0xf2bd000f
+
+/* P2_MODCODLSTE */
+#define RSTV0910_P2_MODCODLSTE 0xf2be
+#define FSTV0910_P2_DIS_QPSK_2_5 0xf2be00f0
+#define FSTV0910_P2_DIS_QPSK_1_3 0xf2be000f
+
+/* P2_MODCODLSTF */
+#define RSTV0910_P2_MODCODLSTF 0xf2bf
+#define FSTV0910_P2_DIS_QPSK_1_4 0xf2bf00f0
+#define FSTV0910_P2_DEMOD_INVMODLST 0xf2bf0008
+#define FSTV0910_P2_DEMODOUT_ENABLE 0xf2bf0004
+#define FSTV0910_P2_DDEMOD_NSET 0xf2bf0002
+#define FSTV0910_P2_MODCOD_NSTOCK 0xf2bf0001
+
+/* P2_GAUSSR0 */
+#define RSTV0910_P2_GAUSSR0 0xf2c0
+#define FSTV0910_P2_EN_CCIMODE 0xf2c00080
+#define FSTV0910_P2_R0_GAUSSIEN 0xf2c0007f
+
+/* P2_CCIR0 */
+#define RSTV0910_P2_CCIR0 0xf2c1
+#define FSTV0910_P2_CCIDETECT_PLHONLY 0xf2c10080
+#define FSTV0910_P2_R0_CCI 0xf2c1007f
+
+/* P2_CCIQUANT */
+#define RSTV0910_P2_CCIQUANT 0xf2c2
+#define FSTV0910_P2_CCI_BETA 0xf2c200e0
+#define FSTV0910_P2_CCI_QUANT 0xf2c2001f
+
+/* P2_CCITHRES */
+#define RSTV0910_P2_CCITHRES 0xf2c3
+#define FSTV0910_P2_CCI_THRESHOLD 0xf2c300ff
+
+/* P2_CCIACC */
+#define RSTV0910_P2_CCIACC 0xf2c4
+#define FSTV0910_P2_CCI_VALUE 0xf2c400ff
+
+/* P2_DSTATUS4 */
+#define RSTV0910_P2_DSTATUS4 0xf2c5
+#define FSTV0910_P2_RAINFADE_DETECT 0xf2c50080
+#define FSTV0910_P2_NOTHRES2_FAIL 0xf2c50040
+#define FSTV0910_P2_NOTHRES1_FAIL 0xf2c50020
+#define FSTV0910_P2_DMDPROG_ERROR 0xf2c50004
+#define FSTV0910_P2_CSTENV_DETECT 0xf2c50002
+#define FSTV0910_P2_DETECTION_TRIAX 0xf2c50001
+
+/* P2_DMDRESCFG */
+#define RSTV0910_P2_DMDRESCFG 0xf2c6
+#define FSTV0910_P2_DMDRES_RESET 0xf2c60080
+#define FSTV0910_P2_DMDRES_STRALL 0xf2c60008
+#define FSTV0910_P2_DMDRES_NEWONLY 0xf2c60004
+#define FSTV0910_P2_DMDRES_NOSTORE 0xf2c60002
+
+/* P2_DMDRESADR */
+#define RSTV0910_P2_DMDRESADR 0xf2c7
+#define FSTV0910_P2_DMDRES_VALIDCFR 0xf2c70040
+#define FSTV0910_P2_DMDRES_MEMFULL 0xf2c70030
+#define FSTV0910_P2_DMDRES_RESNBR 0xf2c7000f
+
+/* P2_DMDRESDATA7 */
+#define RSTV0910_P2_DMDRESDATA7 0xf2c8
+#define FSTV0910_P2_DMDRES_DATA7 0xf2c800ff
+
+/* P2_DMDRESDATA6 */
+#define RSTV0910_P2_DMDRESDATA6 0xf2c9
+#define FSTV0910_P2_DMDRES_DATA6 0xf2c900ff
+
+/* P2_DMDRESDATA5 */
+#define RSTV0910_P2_DMDRESDATA5 0xf2ca
+#define FSTV0910_P2_DMDRES_DATA5 0xf2ca00ff
+
+/* P2_DMDRESDATA4 */
+#define RSTV0910_P2_DMDRESDATA4 0xf2cb
+#define FSTV0910_P2_DMDRES_DATA4 0xf2cb00ff
+
+/* P2_DMDRESDATA3 */
+#define RSTV0910_P2_DMDRESDATA3 0xf2cc
+#define FSTV0910_P2_DMDRES_DATA3 0xf2cc00ff
+
+/* P2_DMDRESDATA2 */
+#define RSTV0910_P2_DMDRESDATA2 0xf2cd
+#define FSTV0910_P2_DMDRES_DATA2 0xf2cd00ff
+
+/* P2_DMDRESDATA1 */
+#define RSTV0910_P2_DMDRESDATA1 0xf2ce
+#define FSTV0910_P2_DMDRES_DATA1 0xf2ce00ff
+
+/* P2_DMDRESDATA0 */
+#define RSTV0910_P2_DMDRESDATA0 0xf2cf
+#define FSTV0910_P2_DMDRES_DATA0 0xf2cf00ff
+
+/* P2_FFEI1 */
+#define RSTV0910_P2_FFEI1 0xf2d0
+#define FSTV0910_P2_FFE_ACCI1 0xf2d001ff
+
+/* P2_FFEQ1 */
+#define RSTV0910_P2_FFEQ1 0xf2d1
+#define FSTV0910_P2_FFE_ACCQ1 0xf2d101ff
+
+/* P2_FFEI2 */
+#define RSTV0910_P2_FFEI2 0xf2d2
+#define FSTV0910_P2_FFE_ACCI2 0xf2d201ff
+
+/* P2_FFEQ2 */
+#define RSTV0910_P2_FFEQ2 0xf2d3
+#define FSTV0910_P2_FFE_ACCQ2 0xf2d301ff
+
+/* P2_FFEI3 */
+#define RSTV0910_P2_FFEI3 0xf2d4
+#define FSTV0910_P2_FFE_ACCI3 0xf2d401ff
+
+/* P2_FFEQ3 */
+#define RSTV0910_P2_FFEQ3 0xf2d5
+#define FSTV0910_P2_FFE_ACCQ3 0xf2d501ff
+
+/* P2_FFEI4 */
+#define RSTV0910_P2_FFEI4 0xf2d6
+#define FSTV0910_P2_FFE_ACCI4 0xf2d601ff
+
+/* P2_FFEQ4 */
+#define RSTV0910_P2_FFEQ4 0xf2d7
+#define FSTV0910_P2_FFE_ACCQ4 0xf2d701ff
+
+/* P2_FFECFG */
+#define RSTV0910_P2_FFECFG 0xf2d8
+#define FSTV0910_P2_EQUALFFE_ON 0xf2d80040
+#define FSTV0910_P2_EQUAL_USEDSYMB 0xf2d80030
+#define FSTV0910_P2_MU_EQUALFFE 0xf2d80007
+
+/* P2_TNRCFG2 */
+#define RSTV0910_P2_TNRCFG2 0xf2e1
+#define FSTV0910_P2_TUN_IQSWAP 0xf2e10080
+
+/* P2_SMAPCOEF7 */
+#define RSTV0910_P2_SMAPCOEF7 0xf300
+#define FSTV0910_P2_DIS_QSCALE 0xf3000080
+#define FSTV0910_P2_SMAPCOEF_Q_LLR12 0xf300017f
+
+/* P2_SMAPCOEF6 */
+#define RSTV0910_P2_SMAPCOEF6 0xf301
+#define FSTV0910_P2_DIS_AGC2SCALE 0xf3010080
+#define FSTV0910_P2_ADJ_8PSKLLR1 0xf3010004
+#define FSTV0910_P2_OLD_8PSKLLR1 0xf3010002
+#define FSTV0910_P2_DIS_AB8PSK 0xf3010001
+
+/* P2_SMAPCOEF5 */
+#define RSTV0910_P2_SMAPCOEF5 0xf302
+#define FSTV0910_P2_DIS_8SCALE 0xf3020080
+#define FSTV0910_P2_SMAPCOEF_8P_LLR23 0xf302017f
+
+/* P2_SMAPCOEF4 */
+#define RSTV0910_P2_SMAPCOEF4 0xf303
+#define FSTV0910_P2_SMAPCOEF_16APSK_LLR12 0xf303017f
+
+/* P2_SMAPCOEF3 */
+#define RSTV0910_P2_SMAPCOEF3 0xf304
+#define FSTV0910_P2_SMAPCOEF_16APSK_LLR34 0xf304017f
+
+/* P2_SMAPCOEF2 */
+#define RSTV0910_P2_SMAPCOEF2 0xf305
+#define FSTV0910_P2_SMAPCOEF_32APSK_R2R3 0xf30501f0
+#define FSTV0910_P2_SMAPCOEF_32APSK_LLR2 0xf305010f
+
+/* P2_SMAPCOEF1 */
+#define RSTV0910_P2_SMAPCOEF1 0xf306
+#define FSTV0910_P2_DIS_16SCALE 0xf3060080
+#define FSTV0910_P2_SMAPCOEF_32_LLR34 0xf306017f
+
+/* P2_SMAPCOEF0 */
+#define RSTV0910_P2_SMAPCOEF0 0xf307
+#define FSTV0910_P2_DIS_32SCALE 0xf3070080
+#define FSTV0910_P2_SMAPCOEF_32_LLR15 0xf307017f
+
+/* P2_NOSTHRES1 */
+#define RSTV0910_P2_NOSTHRES1 0xf309
+#define FSTV0910_P2_NOS_THRESHOLD1 0xf30900ff
+
+/* P2_NOSTHRES2 */
+#define RSTV0910_P2_NOSTHRES2 0xf30a
+#define FSTV0910_P2_NOS_THRESHOLD2 0xf30a00ff
+
+/* P2_NOSDIFF1 */
+#define RSTV0910_P2_NOSDIFF1 0xf30b
+#define FSTV0910_P2_NOSTHRES1_DIFF 0xf30b00ff
+
+/* P2_RAINFADE */
+#define RSTV0910_P2_RAINFADE 0xf30c
+#define FSTV0910_P2_NOSTHRES_DATAT 0xf30c0080
+#define FSTV0910_P2_RAINFADE_CNLIMIT 0xf30c0070
+#define FSTV0910_P2_RAINFADE_TIMEOUT 0xf30c0007
+
+/* P2_NOSRAMCFG */
+#define RSTV0910_P2_NOSRAMCFG 0xf30d
+#define FSTV0910_P2_NOSRAM_ACTIVATION 0xf30d0030
+#define FSTV0910_P2_NOSRAM_CNRONLY 0xf30d0008
+#define FSTV0910_P2_NOSRAM_LGNCNR1 0xf30d0007
+
+/* P2_NOSRAMPOS */
+#define RSTV0910_P2_NOSRAMPOS 0xf30e
+#define FSTV0910_P2_NOSRAM_LGNCNR0 0xf30e00f0
+#define FSTV0910_P2_NOSRAM_VALIDE 0xf30e0004
+#define FSTV0910_P2_NOSRAM_CNRVAL1 0xf30e0003
+
+/* P2_NOSRAMVAL */
+#define RSTV0910_P2_NOSRAMVAL 0xf30f
+#define FSTV0910_P2_NOSRAM_CNRVAL0 0xf30f00ff
+
+/* P2_DMDPLHSTAT */
+#define RSTV0910_P2_DMDPLHSTAT 0xf320
+#define FSTV0910_P2_PLH_STATISTIC 0xf32000ff
+
+/* P2_LOCKTIME3 */
+#define RSTV0910_P2_LOCKTIME3 0xf322
+#define FSTV0910_P2_DEMOD_LOCKTIME3 0xf32200ff
+
+/* P2_LOCKTIME2 */
+#define RSTV0910_P2_LOCKTIME2 0xf323
+#define FSTV0910_P2_DEMOD_LOCKTIME2 0xf32300ff
+
+/* P2_LOCKTIME1 */
+#define RSTV0910_P2_LOCKTIME1 0xf324
+#define FSTV0910_P2_DEMOD_LOCKTIME1 0xf32400ff
+
+/* P2_LOCKTIME0 */
+#define RSTV0910_P2_LOCKTIME0 0xf325
+#define FSTV0910_P2_DEMOD_LOCKTIME0 0xf32500ff
+
+/* P2_VITSCALE */
+#define RSTV0910_P2_VITSCALE 0xf332
+#define FSTV0910_P2_NVTH_NOSRANGE 0xf3320080
+#define FSTV0910_P2_VERROR_MAXMODE 0xf3320040
+#define FSTV0910_P2_NSLOWSN_LOCKED 0xf3320008
+#define FSTV0910_P2_DIS_RSFLOCK 0xf3320002
+
+/* P2_FECM */
+#define RSTV0910_P2_FECM 0xf333
+#define FSTV0910_P2_DSS_DVB 0xf3330080
+#define FSTV0910_P2_DSS_SRCH 0xf3330010
+#define FSTV0910_P2_SYNCVIT 0xf3330002
+#define FSTV0910_P2_IQINV 0xf3330001
+
+/* P2_VTH12 */
+#define RSTV0910_P2_VTH12 0xf334
+#define FSTV0910_P2_VTH12 0xf33400ff
+
+/* P2_VTH23 */
+#define RSTV0910_P2_VTH23 0xf335
+#define FSTV0910_P2_VTH23 0xf33500ff
+
+/* P2_VTH34 */
+#define RSTV0910_P2_VTH34 0xf336
+#define FSTV0910_P2_VTH34 0xf33600ff
+
+/* P2_VTH56 */
+#define RSTV0910_P2_VTH56 0xf337
+#define FSTV0910_P2_VTH56 0xf33700ff
+
+/* P2_VTH67 */
+#define RSTV0910_P2_VTH67 0xf338
+#define FSTV0910_P2_VTH67 0xf33800ff
+
+/* P2_VTH78 */
+#define RSTV0910_P2_VTH78 0xf339
+#define FSTV0910_P2_VTH78 0xf33900ff
+
+/* P2_VITCURPUN */
+#define RSTV0910_P2_VITCURPUN 0xf33a
+#define FSTV0910_P2_VIT_CURPUN 0xf33a001f
+
+/* P2_VERROR */
+#define RSTV0910_P2_VERROR 0xf33b
+#define FSTV0910_P2_REGERR_VIT 0xf33b00ff
+
+/* P2_PRVIT */
+#define RSTV0910_P2_PRVIT 0xf33c
+#define FSTV0910_P2_DIS_VTHLOCK 0xf33c0040
+#define FSTV0910_P2_E7_8VIT 0xf33c0020
+#define FSTV0910_P2_E6_7VIT 0xf33c0010
+#define FSTV0910_P2_E5_6VIT 0xf33c0008
+#define FSTV0910_P2_E3_4VIT 0xf33c0004
+#define FSTV0910_P2_E2_3VIT 0xf33c0002
+#define FSTV0910_P2_E1_2VIT 0xf33c0001
+
+/* P2_VAVSRVIT */
+#define RSTV0910_P2_VAVSRVIT 0xf33d
+#define FSTV0910_P2_AMVIT 0xf33d0080
+#define FSTV0910_P2_FROZENVIT 0xf33d0040
+#define FSTV0910_P2_SNVIT 0xf33d0030
+#define FSTV0910_P2_TOVVIT 0xf33d000c
+#define FSTV0910_P2_HYPVIT 0xf33d0003
+
+/* P2_VSTATUSVIT */
+#define RSTV0910_P2_VSTATUSVIT 0xf33e
+#define FSTV0910_P2_PRFVIT 0xf33e0010
+#define FSTV0910_P2_LOCKEDVIT 0xf33e0008
+
+/* P2_VTHINUSE */
+#define RSTV0910_P2_VTHINUSE 0xf33f
+#define FSTV0910_P2_VIT_INUSE 0xf33f00ff
+
+/* P2_KDIV12 */
+#define RSTV0910_P2_KDIV12 0xf340
+#define FSTV0910_P2_K_DIVIDER_12 0xf340007f
+
+/* P2_KDIV23 */
+#define RSTV0910_P2_KDIV23 0xf341
+#define FSTV0910_P2_K_DIVIDER_23 0xf341007f
+
+/* P2_KDIV34 */
+#define RSTV0910_P2_KDIV34 0xf342
+#define FSTV0910_P2_K_DIVIDER_34 0xf342007f
+
+/* P2_KDIV56 */
+#define RSTV0910_P2_KDIV56 0xf343
+#define FSTV0910_P2_K_DIVIDER_56 0xf343007f
+
+/* P2_KDIV67 */
+#define RSTV0910_P2_KDIV67 0xf344
+#define FSTV0910_P2_K_DIVIDER_67 0xf344007f
+
+/* P2_KDIV78 */
+#define RSTV0910_P2_KDIV78 0xf345
+#define FSTV0910_P2_K_DIVIDER_78 0xf345007f
+
+/* P2_TSPIDFLT1 */
+#define RSTV0910_P2_TSPIDFLT1 0xf346
+#define FSTV0910_P2_PIDFLT_ADDR 0xf34600ff
+
+/* P2_TSPIDFLT0 */
+#define RSTV0910_P2_TSPIDFLT0 0xf347
+#define FSTV0910_P2_PIDFLT_DATA 0xf34700ff
+
+/* P2_PDELCTRL0 */
+#define RSTV0910_P2_PDELCTRL0 0xf34f
+#define FSTV0910_P2_ISIOBS_MODE 0xf34f0030
+
+/* P2_PDELCTRL1 */
+#define RSTV0910_P2_PDELCTRL1 0xf350
+#define FSTV0910_P2_INV_MISMASK 0xf3500080
+#define FSTV0910_P2_FILTER_EN 0xf3500020
+#define FSTV0910_P2_HYSTEN 0xf3500008
+#define FSTV0910_P2_HYSTSWRST 0xf3500004
+#define FSTV0910_P2_EN_MIS00 0xf3500002
+#define FSTV0910_P2_ALGOSWRST 0xf3500001
+
+/* P2_PDELCTRL2 */
+#define RSTV0910_P2_PDELCTRL2 0xf351
+#define FSTV0910_P2_FORCE_CONTINUOUS 0xf3510080
+#define FSTV0910_P2_RESET_UPKO_COUNT 0xf3510040
+#define FSTV0910_P2_USER_PKTDELIN_NB 0xf3510020
+#define FSTV0910_P2_FRAME_MODE 0xf3510002
+
+/* P2_HYSTTHRESH */
+#define RSTV0910_P2_HYSTTHRESH 0xf354
+#define FSTV0910_P2_DELIN_LOCKTHRES 0xf35400f0
+#define FSTV0910_P2_DELIN_UNLOCKTHRES 0xf354000f
+
+/* P2_UPLCCST0 */
+#define RSTV0910_P2_UPLCCST0 0xf358
+#define FSTV0910_P2_UPL_CST0 0xf35800f8
+#define FSTV0910_P2_UPL_MODE 0xf3580007
+
+/* P2_ISIENTRY */
+#define RSTV0910_P2_ISIENTRY 0xf35e
+#define FSTV0910_P2_ISI_ENTRY 0xf35e00ff
+
+/* P2_ISIBITENA */
+#define RSTV0910_P2_ISIBITENA 0xf35f
+#define FSTV0910_P2_ISI_BIT_EN 0xf35f00ff
+
+/* P2_MATSTR1 */
+#define RSTV0910_P2_MATSTR1 0xf360
+#define FSTV0910_P2_MATYPE_CURRENT1 0xf36000ff
+
+/* P2_MATSTR0 */
+#define RSTV0910_P2_MATSTR0 0xf361
+#define FSTV0910_P2_MATYPE_CURRENT0 0xf36100ff
+
+/* P2_UPLSTR1 */
+#define RSTV0910_P2_UPLSTR1 0xf362
+#define FSTV0910_P2_UPL_CURRENT1 0xf36200ff
+
+/* P2_UPLSTR0 */
+#define RSTV0910_P2_UPLSTR0 0xf363
+#define FSTV0910_P2_UPL_CURRENT0 0xf36300ff
+
+/* P2_DFLSTR1 */
+#define RSTV0910_P2_DFLSTR1 0xf364
+#define FSTV0910_P2_DFL_CURRENT1 0xf36400ff
+
+/* P2_DFLSTR0 */
+#define RSTV0910_P2_DFLSTR0 0xf365
+#define FSTV0910_P2_DFL_CURRENT0 0xf36500ff
+
+/* P2_SYNCSTR */
+#define RSTV0910_P2_SYNCSTR 0xf366
+#define FSTV0910_P2_SYNC_CURRENT 0xf36600ff
+
+/* P2_SYNCDSTR1 */
+#define RSTV0910_P2_SYNCDSTR1 0xf367
+#define FSTV0910_P2_SYNCD_CURRENT1 0xf36700ff
+
+/* P2_SYNCDSTR0 */
+#define RSTV0910_P2_SYNCDSTR0 0xf368
+#define FSTV0910_P2_SYNCD_CURRENT0 0xf36800ff
+
+/* P2_PDELSTATUS1 */
+#define RSTV0910_P2_PDELSTATUS1 0xf369
+#define FSTV0910_P2_PKTDELIN_DELOCK 0xf3690080
+#define FSTV0910_P2_SYNCDUPDFL_BADDFL 0xf3690040
+#define FSTV0910_P2_UNACCEPTED_STREAM 0xf3690010
+#define FSTV0910_P2_BCH_ERROR_FLAG 0xf3690008
+#define FSTV0910_P2_PKTDELIN_LOCK 0xf3690002
+#define FSTV0910_P2_FIRST_LOCK 0xf3690001
+
+/* P2_PDELSTATUS2 */
+#define RSTV0910_P2_PDELSTATUS2 0xf36a
+#define FSTV0910_P2_FRAME_MODCOD 0xf36a007c
+#define FSTV0910_P2_FRAME_TYPE 0xf36a0003
+
+/* P2_BBFCRCKO1 */
+#define RSTV0910_P2_BBFCRCKO1 0xf36b
+#define FSTV0910_P2_BBHCRC_KOCNT1 0xf36b00ff
+
+/* P2_BBFCRCKO0 */
+#define RSTV0910_P2_BBFCRCKO0 0xf36c
+#define FSTV0910_P2_BBHCRC_KOCNT0 0xf36c00ff
+
+/* P2_UPCRCKO1 */
+#define RSTV0910_P2_UPCRCKO1 0xf36d
+#define FSTV0910_P2_PKTCRC_KOCNT1 0xf36d00ff
+
+/* P2_UPCRCKO0 */
+#define RSTV0910_P2_UPCRCKO0 0xf36e
+#define FSTV0910_P2_PKTCRC_KOCNT0 0xf36e00ff
+
+/* P2_PDELCTRL3 */
+#define RSTV0910_P2_PDELCTRL3 0xf36f
+#define FSTV0910_P2_NOFIFO_BCHERR 0xf36f0020
+#define FSTV0910_P2_PKTDELIN_DELACMERR 0xf36f0010
+
+/* P2_TSSTATEM */
+#define RSTV0910_P2_TSSTATEM 0xf370
+#define FSTV0910_P2_TSDIL_ON 0xf3700080
+#define FSTV0910_P2_TSRS_ON 0xf3700020
+#define FSTV0910_P2_TSDESCRAMB_ON 0xf3700010
+#define FSTV0910_P2_TSFRAME_MODE 0xf3700008
+#define FSTV0910_P2_TS_DISABLE 0xf3700004
+#define FSTV0910_P2_TSACM_MODE 0xf3700002
+#define FSTV0910_P2_TSOUT_NOSYNC 0xf3700001
+
+/* P2_TSSTATEL */
+#define RSTV0910_P2_TSSTATEL 0xf371
+#define FSTV0910_P2_TSNOSYNCBYTE 0xf3710080
+#define FSTV0910_P2_TSPARITY_ON 0xf3710040
+#define FSTV0910_P2_TSISSYI_ON 0xf3710008
+#define FSTV0910_P2_TSNPD_ON 0xf3710004
+#define FSTV0910_P2_TSCRC8_ON 0xf3710002
+#define FSTV0910_P2_TSDSS_PACKET 0xf3710001
+
+/* P2_TSCFGH */
+#define RSTV0910_P2_TSCFGH 0xf372
+#define FSTV0910_P2_TSFIFO_DVBCI 0xf3720080
+#define FSTV0910_P2_TSFIFO_SERIAL 0xf3720040
+#define FSTV0910_P2_TSFIFO_TEIUPDATE 0xf3720020
+#define FSTV0910_P2_TSFIFO_DUTY50 0xf3720010
+#define FSTV0910_P2_TSFIFO_HSGNLOUT 0xf3720008
+#define FSTV0910_P2_TSFIFO_ERRMODE 0xf3720006
+#define FSTV0910_P2_RST_HWARE 0xf3720001
+
+/* P2_TSCFGM */
+#define RSTV0910_P2_TSCFGM 0xf373
+#define FSTV0910_P2_TSFIFO_MANSPEED 0xf37300c0
+#define FSTV0910_P2_TSFIFO_PERMDATA 0xf3730020
+#define FSTV0910_P2_TSFIFO_NONEWSGNL 0xf3730010
+#define FSTV0910_P2_TSFIFO_INVDATA 0xf3730001
+
+/* P2_TSCFGL */
+#define RSTV0910_P2_TSCFGL 0xf374
+#define FSTV0910_P2_TSFIFO_BCLKDEL1CK 0xf37400c0
+#define FSTV0910_P2_BCHERROR_MODE 0xf3740030
+#define FSTV0910_P2_TSFIFO_NSGNL2DATA 0xf3740008
+#define FSTV0910_P2_TSFIFO_EMBINDVB 0xf3740004
+#define FSTV0910_P2_TSFIFO_BITSPEED 0xf3740003
+
+/* P2_TSSYNC */
+#define RSTV0910_P2_TSSYNC 0xf375
+#define FSTV0910_P2_TSFIFO_SYNCMODE 0xf3750018
+
+/* P2_TSINSDELH */
+#define RSTV0910_P2_TSINSDELH 0xf376
+#define FSTV0910_P2_TSDEL_SYNCBYTE 0xf3760080
+#define FSTV0910_P2_TSDEL_XXHEADER 0xf3760040
+#define FSTV0910_P2_TSDEL_DATAFIELD 0xf3760010
+#define FSTV0910_P2_TSINSDEL_RSPARITY 0xf3760002
+#define FSTV0910_P2_TSINSDEL_CRC8 0xf3760001
+
+/* P2_TSINSDELM */
+#define RSTV0910_P2_TSINSDELM 0xf377
+#define FSTV0910_P2_TSINS_EMODCOD 0xf3770010
+#define FSTV0910_P2_TSINS_TOKEN 0xf3770008
+#define FSTV0910_P2_TSINS_XXXERR 0xf3770004
+#define FSTV0910_P2_TSINS_MATYPE 0xf3770002
+#define FSTV0910_P2_TSINS_UPL 0xf3770001
+
+/* P2_TSINSDELL */
+#define RSTV0910_P2_TSINSDELL 0xf378
+#define FSTV0910_P2_TSINS_DFL 0xf3780080
+#define FSTV0910_P2_TSINS_SYNCD 0xf3780040
+#define FSTV0910_P2_TSINS_BLOCLEN 0xf3780020
+#define FSTV0910_P2_TSINS_SIGPCOUNT 0xf3780010
+#define FSTV0910_P2_TSINS_FIFO 0xf3780008
+#define FSTV0910_P2_TSINS_REALPACK 0xf3780004
+#define FSTV0910_P2_TSINS_TSCONFIG 0xf3780002
+#define FSTV0910_P2_TSINS_LATENCY 0xf3780001
+
+/* P2_TSDIVN */
+#define RSTV0910_P2_TSDIVN 0xf379
+#define FSTV0910_P2_TSFIFO_SPEEDMODE 0xf37900c0
+#define FSTV0910_P2_TSFIFO_RISEOK 0xf3790007
+
+/* P2_TSCFG4 */
+#define RSTV0910_P2_TSCFG4 0xf37a
+#define FSTV0910_P2_TSFIFO_TSSPEEDMODE 0xf37a00c0
+
+/* P2_TSSPEED */
+#define RSTV0910_P2_TSSPEED 0xf380
+#define FSTV0910_P2_TSFIFO_OUTSPEED 0xf38000ff
+
+/* P2_TSSTATUS */
+#define RSTV0910_P2_TSSTATUS 0xf381
+#define FSTV0910_P2_TSFIFO_LINEOK 0xf3810080
+#define FSTV0910_P2_TSFIFO_ERROR 0xf3810040
+#define FSTV0910_P2_TSFIFO_NOSYNC 0xf3810010
+#define FSTV0910_P2_TSREGUL_ERROR 0xf3810004
+#define FSTV0910_P2_DIL_READY 0xf3810001
+
+/* P2_TSSTATUS2 */
+#define RSTV0910_P2_TSSTATUS2 0xf382
+#define FSTV0910_P2_TSFIFO_DEMODSEL 0xf3820080
+#define FSTV0910_P2_TSFIFOSPEED_STORE 0xf3820040
+#define FSTV0910_P2_DILXX_RESET 0xf3820020
+#define FSTV0910_P2_SCRAMBDETECT 0xf3820002
+
+/* P2_TSBITRATE1 */
+#define RSTV0910_P2_TSBITRATE1 0xf383
+#define FSTV0910_P2_TSFIFO_BITRATE1 0xf38300ff
+
+/* P2_TSBITRATE0 */
+#define RSTV0910_P2_TSBITRATE0 0xf384
+#define FSTV0910_P2_TSFIFO_BITRATE0 0xf38400ff
+
+/* P2_TSPACKLEN1 */
+#define RSTV0910_P2_TSPACKLEN1 0xf385
+#define FSTV0910_P2_TSFIFO_PACKCPT 0xf38500e0
+
+/* P2_TSDLY2 */
+#define RSTV0910_P2_TSDLY2 0xf389
+#define FSTV0910_P2_SOFFIFO_LATENCY2 0xf389000f
+
+/* P2_TSDLY1 */
+#define RSTV0910_P2_TSDLY1 0xf38a
+#define FSTV0910_P2_SOFFIFO_LATENCY1 0xf38a00ff
+
+/* P2_TSDLY0 */
+#define RSTV0910_P2_TSDLY0 0xf38b
+#define FSTV0910_P2_SOFFIFO_LATENCY0 0xf38b00ff
+
+/* P2_TSNPDAV */
+#define RSTV0910_P2_TSNPDAV 0xf38c
+#define FSTV0910_P2_TSNPD_AVERAGE 0xf38c00ff
+
+/* P2_TSBUFSTAT2 */
+#define RSTV0910_P2_TSBUFSTAT2 0xf38d
+#define FSTV0910_P2_TSISCR_3BYTES 0xf38d0080
+#define FSTV0910_P2_TSISCR_NEWDATA 0xf38d0040
+#define FSTV0910_P2_TSISCR_BUFSTAT2 0xf38d003f
+
+/* P2_TSBUFSTAT1 */
+#define RSTV0910_P2_TSBUFSTAT1 0xf38e
+#define FSTV0910_P2_TSISCR_BUFSTAT1 0xf38e00ff
+
+/* P2_TSBUFSTAT0 */
+#define RSTV0910_P2_TSBUFSTAT0 0xf38f
+#define FSTV0910_P2_TSISCR_BUFSTAT0 0xf38f00ff
+
+/* P2_TSDEBUGL */
+#define RSTV0910_P2_TSDEBUGL 0xf391
+#define FSTV0910_P2_TSFIFO_ERROR_EVNT 0xf3910004
+#define FSTV0910_P2_TSFIFO_OVERFLOWM 0xf3910001
+
+/* P2_TSDLYSET2 */
+#define RSTV0910_P2_TSDLYSET2 0xf392
+#define FSTV0910_P2_SOFFIFO_OFFSET 0xf39200c0
+#define FSTV0910_P2_HYSTERESIS_THRESHOLD 0xf3920030
+#define FSTV0910_P2_SOFFIFO_SYMBOFFS2 0xf392000f
+
+/* P2_TSDLYSET1 */
+#define RSTV0910_P2_TSDLYSET1 0xf393
+#define FSTV0910_P2_SOFFIFO_SYMBOFFS1 0xf39300ff
+
+/* P2_TSDLYSET0 */
+#define RSTV0910_P2_TSDLYSET0 0xf394
+#define FSTV0910_P2_SOFFIFO_SYMBOFFS0 0xf39400ff
+
+/* P2_ERRCTRL1 */
+#define RSTV0910_P2_ERRCTRL1 0xf398
+#define FSTV0910_P2_ERR_SOURCE1 0xf39800f0
+#define FSTV0910_P2_NUM_EVENT1 0xf3980007
+
+/* P2_ERRCNT12 */
+#define RSTV0910_P2_ERRCNT12 0xf399
+#define FSTV0910_P2_ERRCNT1_OLDVALUE 0xf3990080
+#define FSTV0910_P2_ERR_CNT12 0xf399007f
+
+/* P2_ERRCNT11 */
+#define RSTV0910_P2_ERRCNT11 0xf39a
+#define FSTV0910_P2_ERR_CNT11 0xf39a00ff
+
+/* P2_ERRCNT10 */
+#define RSTV0910_P2_ERRCNT10 0xf39b
+#define FSTV0910_P2_ERR_CNT10 0xf39b00ff
+
+/* P2_ERRCTRL2 */
+#define RSTV0910_P2_ERRCTRL2 0xf39c
+#define FSTV0910_P2_ERR_SOURCE2 0xf39c00f0
+#define FSTV0910_P2_NUM_EVENT2 0xf39c0007
+
+/* P2_ERRCNT22 */
+#define RSTV0910_P2_ERRCNT22 0xf39d
+#define FSTV0910_P2_ERRCNT2_OLDVALUE 0xf39d0080
+#define FSTV0910_P2_ERR_CNT22 0xf39d007f
+
+/* P2_ERRCNT21 */
+#define RSTV0910_P2_ERRCNT21 0xf39e
+#define FSTV0910_P2_ERR_CNT21 0xf39e00ff
+
+/* P2_ERRCNT20 */
+#define RSTV0910_P2_ERRCNT20 0xf39f
+#define FSTV0910_P2_ERR_CNT20 0xf39f00ff
+
+/* P2_FECSPY */
+#define RSTV0910_P2_FECSPY 0xf3a0
+#define FSTV0910_P2_SPY_ENABLE 0xf3a00080
+#define FSTV0910_P2_NO_SYNCBYTE 0xf3a00040
+#define FSTV0910_P2_SERIAL_MODE 0xf3a00020
+#define FSTV0910_P2_UNUSUAL_PACKET 0xf3a00010
+#define FSTV0910_P2_BERMETER_DATAMODE 0xf3a0000c
+#define FSTV0910_P2_BERMETER_LMODE 0xf3a00002
+#define FSTV0910_P2_BERMETER_RESET 0xf3a00001
+
+/* P2_FSPYCFG */
+#define RSTV0910_P2_FSPYCFG 0xf3a1
+#define FSTV0910_P2_FECSPY_INPUT 0xf3a100c0
+#define FSTV0910_P2_RST_ON_ERROR 0xf3a10020
+#define FSTV0910_P2_ONE_SHOT 0xf3a10010
+#define FSTV0910_P2_I2C_MODE 0xf3a1000c
+#define FSTV0910_P2_SPY_HYSTERESIS 0xf3a10003
+
+/* P2_FSPYDATA */
+#define RSTV0910_P2_FSPYDATA 0xf3a2
+#define FSTV0910_P2_SPY_STUFFING 0xf3a20080
+#define FSTV0910_P2_SPY_CNULLPKT 0xf3a20020
+#define FSTV0910_P2_SPY_OUTDATA_MODE 0xf3a2001f
+
+/* P2_FSPYOUT */
+#define RSTV0910_P2_FSPYOUT 0xf3a3
+#define FSTV0910_P2_FSPY_DIRECT 0xf3a30080
+#define FSTV0910_P2_STUFF_MODE 0xf3a30007
+
+/* P2_FSTATUS */
+#define RSTV0910_P2_FSTATUS 0xf3a4
+#define FSTV0910_P2_SPY_ENDSIM 0xf3a40080
+#define FSTV0910_P2_VALID_SIM 0xf3a40040
+#define FSTV0910_P2_FOUND_SIGNAL 0xf3a40020
+#define FSTV0910_P2_DSS_SYNCBYTE 0xf3a40010
+#define FSTV0910_P2_RESULT_STATE 0xf3a4000f
+
+/* P2_FBERCPT4 */
+#define RSTV0910_P2_FBERCPT4 0xf3a8
+#define FSTV0910_P2_FBERMETER_CPT4 0xf3a800ff
+
+/* P2_FBERCPT3 */
+#define RSTV0910_P2_FBERCPT3 0xf3a9
+#define FSTV0910_P2_FBERMETER_CPT3 0xf3a900ff
+
+/* P2_FBERCPT2 */
+#define RSTV0910_P2_FBERCPT2 0xf3aa
+#define FSTV0910_P2_FBERMETER_CPT2 0xf3aa00ff
+
+/* P2_FBERCPT1 */
+#define RSTV0910_P2_FBERCPT1 0xf3ab
+#define FSTV0910_P2_FBERMETER_CPT1 0xf3ab00ff
+
+/* P2_FBERCPT0 */
+#define RSTV0910_P2_FBERCPT0 0xf3ac
+#define FSTV0910_P2_FBERMETER_CPT0 0xf3ac00ff
+
+/* P2_FBERERR2 */
+#define RSTV0910_P2_FBERERR2 0xf3ad
+#define FSTV0910_P2_FBERMETER_ERR2 0xf3ad00ff
+
+/* P2_FBERERR1 */
+#define RSTV0910_P2_FBERERR1 0xf3ae
+#define FSTV0910_P2_FBERMETER_ERR1 0xf3ae00ff
+
+/* P2_FBERERR0 */
+#define RSTV0910_P2_FBERERR0 0xf3af
+#define FSTV0910_P2_FBERMETER_ERR0 0xf3af00ff
+
+/* P2_FSPYBER */
+#define RSTV0910_P2_FSPYBER 0xf3b2
+#define FSTV0910_P2_FSPYBER_SYNCBYTE 0xf3b20010
+#define FSTV0910_P2_FSPYBER_UNSYNC 0xf3b20008
+#define FSTV0910_P2_FSPYBER_CTIME 0xf3b20007
+
+/* P2_SFERROR */
+#define RSTV0910_P2_SFERROR 0xf3c1
+#define FSTV0910_P2_SFEC_REGERR_VIT 0xf3c100ff
+
+/* P2_SFECSTATUS */
+#define RSTV0910_P2_SFECSTATUS 0xf3c3
+#define FSTV0910_P2_SFEC_ON 0xf3c30080
+#define FSTV0910_P2_SFEC_OFF 0xf3c30040
+#define FSTV0910_P2_LOCKEDSFEC 0xf3c30008
+#define FSTV0910_P2_SFEC_DELOCK 0xf3c30004
+#define FSTV0910_P2_SFEC_DEMODSEL 0xf3c30002
+#define FSTV0910_P2_SFEC_OVFON 0xf3c30001
+
+/* P2_SFKDIV12 */
+#define RSTV0910_P2_SFKDIV12 0xf3c4
+#define FSTV0910_P2_SFECKDIV12_MAN 0xf3c40080
+
+/* P2_SFKDIV23 */
+#define RSTV0910_P2_SFKDIV23 0xf3c5
+#define FSTV0910_P2_SFECKDIV23_MAN 0xf3c50080
+
+/* P2_SFKDIV34 */
+#define RSTV0910_P2_SFKDIV34 0xf3c6
+#define FSTV0910_P2_SFECKDIV34_MAN 0xf3c60080
+
+/* P2_SFKDIV56 */
+#define RSTV0910_P2_SFKDIV56 0xf3c7
+#define FSTV0910_P2_SFECKDIV56_MAN 0xf3c70080
+
+/* P2_SFKDIV67 */
+#define RSTV0910_P2_SFKDIV67 0xf3c8
+#define FSTV0910_P2_SFECKDIV67_MAN 0xf3c80080
+
+/* P2_SFKDIV78 */
+#define RSTV0910_P2_SFKDIV78 0xf3c9
+#define FSTV0910_P2_SFECKDIV78_MAN 0xf3c90080
+
+/* P2_SFSTATUS */
+#define RSTV0910_P2_SFSTATUS 0xf3cc
+#define FSTV0910_P2_SFEC_LINEOK 0xf3cc0080
+#define FSTV0910_P2_SFEC_ERROR 0xf3cc0040
+#define FSTV0910_P2_SFEC_DATA7 0xf3cc0020
+#define FSTV0910_P2_SFEC_PKTDNBRFAIL 0xf3cc0010
+#define FSTV0910_P2_TSSFEC_DEMODSEL 0xf3cc0008
+#define FSTV0910_P2_SFEC_NOSYNC 0xf3cc0004
+#define FSTV0910_P2_SFEC_UNREGULA 0xf3cc0002
+#define FSTV0910_P2_SFEC_READY 0xf3cc0001
+
+/* P2_SFDLYSET2 */
+#define RSTV0910_P2_SFDLYSET2 0xf3d0
+#define FSTV0910_P2_SFEC_DISABLE 0xf3d00002
+
+/* P2_SFERRCTRL */
+#define RSTV0910_P2_SFERRCTRL 0xf3d8
+#define FSTV0910_P2_SFEC_ERR_SOURCE 0xf3d800f0
+#define FSTV0910_P2_SFEC_NUM_EVENT 0xf3d80007
+
+/* P2_SFERRCNT2 */
+#define RSTV0910_P2_SFERRCNT2 0xf3d9
+#define FSTV0910_P2_SFERRC_OLDVALUE 0xf3d90080
+#define FSTV0910_P2_SFEC_ERR_CNT2 0xf3d9007f
+
+/* P2_SFERRCNT1 */
+#define RSTV0910_P2_SFERRCNT1 0xf3da
+#define FSTV0910_P2_SFEC_ERR_CNT1 0xf3da00ff
+
+/* P2_SFERRCNT0 */
+#define RSTV0910_P2_SFERRCNT0 0xf3db
+#define FSTV0910_P2_SFEC_ERR_CNT0 0xf3db00ff
+
+/* P1_IQCONST */
+#define RSTV0910_P1_IQCONST 0xf400
+#define FSTV0910_P1_CONSTEL_SELECT 0xf4000060
+#define FSTV0910_P1_IQSYMB_SEL 0xf400001f
+
+/* P1_NOSCFG */
+#define RSTV0910_P1_NOSCFG 0xf401
+#define FSTV0910_P1_DUMMYPL_NOSDATA 0xf4010020
+#define FSTV0910_P1_NOSPLH_BETA 0xf4010018
+#define FSTV0910_P1_NOSDATA_BETA 0xf4010007
+
+/* P1_ISYMB */
+#define RSTV0910_P1_ISYMB 0xf402
+#define FSTV0910_P1_I_SYMBOL 0xf40201ff
+
+/* P1_QSYMB */
+#define RSTV0910_P1_QSYMB 0xf403
+#define FSTV0910_P1_Q_SYMBOL 0xf40301ff
+
+/* P1_AGC1CFG */
+#define RSTV0910_P1_AGC1CFG 0xf404
+#define FSTV0910_P1_DC_FROZEN 0xf4040080
+#define FSTV0910_P1_DC_CORRECT 0xf4040040
+#define FSTV0910_P1_AMM_FROZEN 0xf4040020
+#define FSTV0910_P1_AMM_CORRECT 0xf4040010
+#define FSTV0910_P1_QUAD_FROZEN 0xf4040008
+#define FSTV0910_P1_QUAD_CORRECT 0xf4040004
+
+/* P1_AGC1CN */
+#define RSTV0910_P1_AGC1CN 0xf406
+#define FSTV0910_P1_AGC1_LOCKED 0xf4060080
+#define FSTV0910_P1_AGC1_MINPOWER 0xf4060010
+#define FSTV0910_P1_AGCOUT_FAST 0xf4060008
+#define FSTV0910_P1_AGCIQ_BETA 0xf4060007
+
+/* P1_AGC1REF */
+#define RSTV0910_P1_AGC1REF 0xf407
+#define FSTV0910_P1_AGCIQ_REF 0xf40700ff
+
+/* P1_IDCCOMP */
+#define RSTV0910_P1_IDCCOMP 0xf408
+#define FSTV0910_P1_IAVERAGE_ADJ 0xf40801ff
+
+/* P1_QDCCOMP */
+#define RSTV0910_P1_QDCCOMP 0xf409
+#define FSTV0910_P1_QAVERAGE_ADJ 0xf40901ff
+
+/* P1_POWERI */
+#define RSTV0910_P1_POWERI 0xf40a
+#define FSTV0910_P1_POWER_I 0xf40a00ff
+
+/* P1_POWERQ */
+#define RSTV0910_P1_POWERQ 0xf40b
+#define FSTV0910_P1_POWER_Q 0xf40b00ff
+
+/* P1_AGC1AMM */
+#define RSTV0910_P1_AGC1AMM 0xf40c
+#define FSTV0910_P1_AMM_VALUE 0xf40c00ff
+
+/* P1_AGC1QUAD */
+#define RSTV0910_P1_AGC1QUAD 0xf40d
+#define FSTV0910_P1_QUAD_VALUE 0xf40d01ff
+
+/* P1_AGCIQIN1 */
+#define RSTV0910_P1_AGCIQIN1 0xf40e
+#define FSTV0910_P1_AGCIQ_VALUE1 0xf40e00ff
+
+/* P1_AGCIQIN0 */
+#define RSTV0910_P1_AGCIQIN0 0xf40f
+#define FSTV0910_P1_AGCIQ_VALUE0 0xf40f00ff
+
+/* P1_DEMOD */
+#define RSTV0910_P1_DEMOD 0xf410
+#define FSTV0910_P1_MANUALS2_ROLLOFF 0xf4100080
+#define FSTV0910_P1_SPECINV_CONTROL 0xf4100030
+#define FSTV0910_P1_MANUALSX_ROLLOFF 0xf4100004
+#define FSTV0910_P1_ROLLOFF_CONTROL 0xf4100003
+
+/* P1_DMDMODCOD */
+#define RSTV0910_P1_DMDMODCOD 0xf411
+#define FSTV0910_P1_MANUAL_MODCOD 0xf4110080
+#define FSTV0910_P1_DEMOD_MODCOD 0xf411007c
+#define FSTV0910_P1_DEMOD_TYPE 0xf4110003
+
+/* P1_DSTATUS */
+#define RSTV0910_P1_DSTATUS 0xf412
+#define FSTV0910_P1_CAR_LOCK 0xf4120080
+#define FSTV0910_P1_TMGLOCK_QUALITY 0xf4120060
+#define FSTV0910_P1_LOCK_DEFINITIF 0xf4120008
+#define FSTV0910_P1_OVADC_DETECT 0xf4120001
+
+/* P1_DSTATUS2 */
+#define RSTV0910_P1_DSTATUS2 0xf413
+#define FSTV0910_P1_DEMOD_DELOCK 0xf4130080
+#define FSTV0910_P1_MODCODRQ_SYNCTAG 0xf4130020
+#define FSTV0910_P1_POLYPH_SATEVENT 0xf4130010
+#define FSTV0910_P1_AGC1_NOSIGNALACK 0xf4130008
+#define FSTV0910_P1_AGC2_OVERFLOW 0xf4130004
+#define FSTV0910_P1_CFR_OVERFLOW 0xf4130002
+#define FSTV0910_P1_GAMMA_OVERUNDER 0xf4130001
+
+/* P1_DMDCFGMD */
+#define RSTV0910_P1_DMDCFGMD 0xf414
+#define FSTV0910_P1_DVBS2_ENABLE 0xf4140080
+#define FSTV0910_P1_DVBS1_ENABLE 0xf4140040
+#define FSTV0910_P1_SCAN_ENABLE 0xf4140010
+#define FSTV0910_P1_CFR_AUTOSCAN 0xf4140008
+#define FSTV0910_P1_TUN_RNG 0xf4140003
+
+/* P1_DMDCFG2 */
+#define RSTV0910_P1_DMDCFG2 0xf415
+#define FSTV0910_P1_S1S2_SEQUENTIAL 0xf4150040
+#define FSTV0910_P1_INFINITE_RELOCK 0xf4150010
+
+/* P1_DMDISTATE */
+#define RSTV0910_P1_DMDISTATE 0xf416
+#define FSTV0910_P1_I2C_NORESETDMODE 0xf4160080
+#define FSTV0910_P1_I2C_DEMOD_MODE 0xf416001f
+
+/* P1_DMDT0M */
+#define RSTV0910_P1_DMDT0M 0xf417
+#define FSTV0910_P1_DMDT0_MIN 0xf41700ff
+
+/* P1_DMDSTATE */
+#define RSTV0910_P1_DMDSTATE 0xf41b
+#define FSTV0910_P1_HEADER_MODE 0xf41b0060
+
+/* P1_DMDFLYW */
+#define RSTV0910_P1_DMDFLYW 0xf41c
+#define FSTV0910_P1_I2C_IRQVAL 0xf41c00f0
+#define FSTV0910_P1_FLYWHEEL_CPT 0xf41c000f
+
+/* P1_DSTATUS3 */
+#define RSTV0910_P1_DSTATUS3 0xf41d
+#define FSTV0910_P1_CFR_ZIGZAG 0xf41d0080
+#define FSTV0910_P1_DEMOD_CFGMODE 0xf41d0060
+#define FSTV0910_P1_GAMMA_LOWBAUDRATE 0xf41d0010
+
+/* P1_DMDCFG3 */
+#define RSTV0910_P1_DMDCFG3 0xf41e
+#define FSTV0910_P1_NOSTOP_FIFOFULL 0xf41e0008
+
+/* P1_DMDCFG4 */
+#define RSTV0910_P1_DMDCFG4 0xf41f
+#define FSTV0910_P1_DIS_VITLOCK 0xf41f0080
+#define FSTV0910_P1_DIS_CLKENABLE 0xf41f0004
+
+/* P1_CORRELMANT */
+#define RSTV0910_P1_CORRELMANT 0xf420
+#define FSTV0910_P1_CORREL_MANT 0xf42000ff
+
+/* P1_CORRELABS */
+#define RSTV0910_P1_CORRELABS 0xf421
+#define FSTV0910_P1_CORREL_ABS 0xf42100ff
+
+/* P1_CORRELEXP */
+#define RSTV0910_P1_CORRELEXP 0xf422
+#define FSTV0910_P1_CORREL_ABSEXP 0xf42200f0
+#define FSTV0910_P1_CORREL_EXP 0xf422000f
+
+/* P1_PLHMODCOD */
+#define RSTV0910_P1_PLHMODCOD 0xf424
+#define FSTV0910_P1_SPECINV_DEMOD 0xf4240080
+#define FSTV0910_P1_PLH_MODCOD 0xf424007c
+#define FSTV0910_P1_PLH_TYPE 0xf4240003
+
+/* P1_DMDREG */
+#define RSTV0910_P1_DMDREG 0xf425
+#define FSTV0910_P1_DECIM_PLFRAMES 0xf4250001
+
+/* P1_AGCNADJ */
+#define RSTV0910_P1_AGCNADJ 0xf426
+#define FSTV0910_P1_RADJOFF_AGC2 0xf4260080
+#define FSTV0910_P1_RADJOFF_AGC1 0xf4260040
+#define FSTV0910_P1_AGC_NADJ 0xf426013f
+
+/* P1_AGCKS */
+#define RSTV0910_P1_AGCKS 0xf427
+#define FSTV0910_P1_RSADJ_MANUALCFG 0xf4270080
+#define FSTV0910_P1_RSADJ_CCMMODE 0xf4270040
+#define FSTV0910_P1_RADJ_SPSK 0xf427013f
+
+/* P1_AGCKQ */
+#define RSTV0910_P1_AGCKQ 0xf428
+#define FSTV0910_P1_RADJON_DVBS1 0xf4280040
+#define FSTV0910_P1_RADJ_QPSK 0xf428013f
+
+/* P1_AGCK8 */
+#define RSTV0910_P1_AGCK8 0xf429
+#define FSTV0910_P1_RADJ_8PSK 0xf429013f
+
+/* P1_AGCK16 */
+#define RSTV0910_P1_AGCK16 0xf42a
+#define FSTV0910_P1_R2ADJOFF_16APSK 0xf42a0040
+#define FSTV0910_P1_R1ADJOFF_16APSK 0xf42a0020
+#define FSTV0910_P1_RADJ_16APSK 0xf42a011f
+
+/* P1_AGCK32 */
+#define RSTV0910_P1_AGCK32 0xf42b
+#define FSTV0910_P1_R3ADJOFF_32APSK 0xf42b0080
+#define FSTV0910_P1_R2ADJOFF_32APSK 0xf42b0040
+#define FSTV0910_P1_R1ADJOFF_32APSK 0xf42b0020
+#define FSTV0910_P1_RADJ_32APSK 0xf42b011f
+
+/* P1_AGC2O */
+#define RSTV0910_P1_AGC2O 0xf42c
+#define FSTV0910_P1_CSTENV_MODE 0xf42c00c0
+#define FSTV0910_P1_AGC2_COEF 0xf42c0007
+
+/* P1_AGC2REF */
+#define RSTV0910_P1_AGC2REF 0xf42d
+#define FSTV0910_P1_AGC2_REF 0xf42d00ff
+
+/* P1_AGC1ADJ */
+#define RSTV0910_P1_AGC1ADJ 0xf42e
+#define FSTV0910_P1_AGC1_ADJUSTED 0xf42e007f
+
+/* P1_AGCRSADJ */
+#define RSTV0910_P1_AGCRSADJ 0xf42f
+#define FSTV0910_P1_RS_ADJUSTED 0xf42f007f
+
+/* P1_AGCRQADJ */
+#define RSTV0910_P1_AGCRQADJ 0xf430
+#define FSTV0910_P1_RQ_ADJUSTED 0xf430007f
+
+/* P1_AGCR8ADJ */
+#define RSTV0910_P1_AGCR8ADJ 0xf431
+#define FSTV0910_P1_R8_ADJUSTED 0xf431007f
+
+/* P1_AGCR1ADJ */
+#define RSTV0910_P1_AGCR1ADJ 0xf432
+#define FSTV0910_P1_R1_ADJUSTED 0xf432007f
+
+/* P1_AGCR2ADJ */
+#define RSTV0910_P1_AGCR2ADJ 0xf433
+#define FSTV0910_P1_R2_ADJUSTED 0xf433007f
+
+/* P1_AGCR3ADJ */
+#define RSTV0910_P1_AGCR3ADJ 0xf434
+#define FSTV0910_P1_R3_ADJUSTED 0xf434007f
+
+/* P1_AGCREFADJ */
+#define RSTV0910_P1_AGCREFADJ 0xf435
+#define FSTV0910_P1_AGC2REF_ADJUSTED 0xf435007f
+
+/* P1_AGC2I1 */
+#define RSTV0910_P1_AGC2I1 0xf436
+#define FSTV0910_P1_AGC2_INTEGRATOR1 0xf43600ff
+
+/* P1_AGC2I0 */
+#define RSTV0910_P1_AGC2I0 0xf437
+#define FSTV0910_P1_AGC2_INTEGRATOR0 0xf43700ff
+
+/* P1_CARCFG */
+#define RSTV0910_P1_CARCFG 0xf438
+#define FSTV0910_P1_ROTAON 0xf4380004
+#define FSTV0910_P1_PH_DET_ALGO 0xf4380003
+
+/* P1_ACLC */
+#define RSTV0910_P1_ACLC 0xf439
+#define FSTV0910_P1_CAR_ALPHA_MANT 0xf4390030
+#define FSTV0910_P1_CAR_ALPHA_EXP 0xf439000f
+
+/* P1_BCLC */
+#define RSTV0910_P1_BCLC 0xf43a
+#define FSTV0910_P1_CAR_BETA_MANT 0xf43a0030
+#define FSTV0910_P1_CAR_BETA_EXP 0xf43a000f
+
+/* P1_ACLCS2 */
+#define RSTV0910_P1_ACLCS2 0xf43b
+#define FSTV0910_P1_CARS2_APLHA_MANTISSE 0xf43b0030
+#define FSTV0910_P1_CARS2_ALPHA_EXP 0xf43b000f
+
+/* P1_BCLCS2 */
+#define RSTV0910_P1_BCLCS2 0xf43c
+#define FSTV0910_P1_CARS2_BETA_MANTISSE 0xf43c0030
+#define FSTV0910_P1_CARS2_BETA_EXP 0xf43c000f
+
+/* P1_CARFREQ */
+#define RSTV0910_P1_CARFREQ 0xf43d
+#define FSTV0910_P1_KC_COARSE_EXP 0xf43d00f0
+#define FSTV0910_P1_BETA_FREQ 0xf43d000f
+
+/* P1_CARHDR */
+#define RSTV0910_P1_CARHDR 0xf43e
+#define FSTV0910_P1_K_FREQ_HDR 0xf43e00ff
+
+/* P1_LDT */
+#define RSTV0910_P1_LDT 0xf43f
+#define FSTV0910_P1_CARLOCK_THRES 0xf43f01ff
+
+/* P1_LDT2 */
+#define RSTV0910_P1_LDT2 0xf440
+#define FSTV0910_P1_CARLOCK_THRES2 0xf44001ff
+
+/* P1_CFRICFG */
+#define RSTV0910_P1_CFRICFG 0xf441
+#define FSTV0910_P1_NEG_CFRSTEP 0xf4410001
+
+/* P1_CFRUP1 */
+#define RSTV0910_P1_CFRUP1 0xf442
+#define FSTV0910_P1_CFR_UP1 0xf44201ff
+
+/* P1_CFRUP0 */
+#define RSTV0910_P1_CFRUP0 0xf443
+#define FSTV0910_P1_CFR_UP0 0xf44300ff
+
+/* P1_CFRIBASE1 */
+#define RSTV0910_P1_CFRIBASE1 0xf444
+#define FSTV0910_P1_CFRINIT_BASE1 0xf44400ff
+
+/* P1_CFRIBASE0 */
+#define RSTV0910_P1_CFRIBASE0 0xf445
+#define FSTV0910_P1_CFRINIT_BASE0 0xf44500ff
+
+/* P1_CFRLOW1 */
+#define RSTV0910_P1_CFRLOW1 0xf446
+#define FSTV0910_P1_CFR_LOW1 0xf44601ff
+
+/* P1_CFRLOW0 */
+#define RSTV0910_P1_CFRLOW0 0xf447
+#define FSTV0910_P1_CFR_LOW0 0xf44700ff
+
+/* P1_CFRINIT1 */
+#define RSTV0910_P1_CFRINIT1 0xf448
+#define FSTV0910_P1_CFR_INIT1 0xf44801ff
+
+/* P1_CFRINIT0 */
+#define RSTV0910_P1_CFRINIT0 0xf449
+#define FSTV0910_P1_CFR_INIT0 0xf44900ff
+
+/* P1_CFRINC1 */
+#define RSTV0910_P1_CFRINC1 0xf44a
+#define FSTV0910_P1_MANUAL_CFRINC 0xf44a0080
+#define FSTV0910_P1_CFR_INC1 0xf44a003f
+
+/* P1_CFRINC0 */
+#define RSTV0910_P1_CFRINC0 0xf44b
+#define FSTV0910_P1_CFR_INC0 0xf44b00ff
+
+/* P1_CFR2 */
+#define RSTV0910_P1_CFR2 0xf44c
+#define FSTV0910_P1_CAR_FREQ2 0xf44c01ff
+
+/* P1_CFR1 */
+#define RSTV0910_P1_CFR1 0xf44d
+#define FSTV0910_P1_CAR_FREQ1 0xf44d00ff
+
+/* P1_CFR0 */
+#define RSTV0910_P1_CFR0 0xf44e
+#define FSTV0910_P1_CAR_FREQ0 0xf44e00ff
+
+/* P1_LDI */
+#define RSTV0910_P1_LDI 0xf44f
+#define FSTV0910_P1_LOCK_DET_INTEGR 0xf44f01ff
+
+/* P1_TMGCFG */
+#define RSTV0910_P1_TMGCFG 0xf450
+#define FSTV0910_P1_TMGLOCK_BETA 0xf45000c0
+#define FSTV0910_P1_DO_TIMING_CORR 0xf4500010
+#define FSTV0910_P1_TMG_MINFREQ 0xf4500003
+
+/* P1_RTC */
+#define RSTV0910_P1_RTC 0xf451
+#define FSTV0910_P1_TMGALPHA_EXP 0xf45100f0
+#define FSTV0910_P1_TMGBETA_EXP 0xf451000f
+
+/* P1_RTCS2 */
+#define RSTV0910_P1_RTCS2 0xf452
+#define FSTV0910_P1_TMGALPHAS2_EXP 0xf45200f0
+#define FSTV0910_P1_TMGBETAS2_EXP 0xf452000f
+
+/* P1_TMGTHRISE */
+#define RSTV0910_P1_TMGTHRISE 0xf453
+#define FSTV0910_P1_TMGLOCK_THRISE 0xf45300ff
+
+/* P1_TMGTHFALL */
+#define RSTV0910_P1_TMGTHFALL 0xf454
+#define FSTV0910_P1_TMGLOCK_THFALL 0xf45400ff
+
+/* P1_SFRUPRATIO */
+#define RSTV0910_P1_SFRUPRATIO 0xf455
+#define FSTV0910_P1_SFR_UPRATIO 0xf45500ff
+
+/* P1_SFRLOWRATIO */
+#define RSTV0910_P1_SFRLOWRATIO 0xf456
+#define FSTV0910_P1_SFR_LOWRATIO 0xf45600ff
+
+/* P1_KTTMG */
+#define RSTV0910_P1_KTTMG 0xf457
+#define FSTV0910_P1_KT_TMG_EXP 0xf45700f0
+
+/* P1_KREFTMG */
+#define RSTV0910_P1_KREFTMG 0xf458
+#define FSTV0910_P1_KREF_TMG 0xf45800ff
+
+/* P1_SFRSTEP */
+#define RSTV0910_P1_SFRSTEP 0xf459
+#define FSTV0910_P1_SFR_SCANSTEP 0xf45900f0
+#define FSTV0910_P1_SFR_CENTERSTEP 0xf459000f
+
+/* P1_TMGCFG2 */
+#define RSTV0910_P1_TMGCFG2 0xf45a
+#define FSTV0910_P1_DIS_AUTOSAMP 0xf45a0008
+#define FSTV0910_P1_SFRRATIO_FINE 0xf45a0001
+
+/* P1_KREFTMG2 */
+#define RSTV0910_P1_KREFTMG2 0xf45b
+#define FSTV0910_P1_KREF_TMG2 0xf45b00ff
+
+/* P1_TMGCFG3 */
+#define RSTV0910_P1_TMGCFG3 0xf45d
+#define FSTV0910_P1_CONT_TMGCENTER 0xf45d0008
+#define FSTV0910_P1_AUTO_GUP 0xf45d0004
+#define FSTV0910_P1_AUTO_GLOW 0xf45d0002
+
+/* P1_SFRINIT1 */
+#define RSTV0910_P1_SFRINIT1 0xf45e
+#define FSTV0910_P1_SFR_INIT1 0xf45e00ff
+
+/* P1_SFRINIT0 */
+#define RSTV0910_P1_SFRINIT0 0xf45f
+#define FSTV0910_P1_SFR_INIT0 0xf45f00ff
+
+/* P1_SFRUP1 */
+#define RSTV0910_P1_SFRUP1 0xf460
+#define FSTV0910_P1_SYMB_FREQ_UP1 0xf46000ff
+
+/* P1_SFRUP0 */
+#define RSTV0910_P1_SFRUP0 0xf461
+#define FSTV0910_P1_SYMB_FREQ_UP0 0xf46100ff
+
+/* P1_SFRLOW1 */
+#define RSTV0910_P1_SFRLOW1 0xf462
+#define FSTV0910_P1_SYMB_FREQ_LOW1 0xf46200ff
+
+/* P1_SFRLOW0 */
+#define RSTV0910_P1_SFRLOW0 0xf463
+#define FSTV0910_P1_SYMB_FREQ_LOW0 0xf46300ff
+
+/* P1_SFR3 */
+#define RSTV0910_P1_SFR3 0xf464
+#define FSTV0910_P1_SYMB_FREQ3 0xf46400ff
+
+/* P1_SFR2 */
+#define RSTV0910_P1_SFR2 0xf465
+#define FSTV0910_P1_SYMB_FREQ2 0xf46500ff
+
+/* P1_SFR1 */
+#define RSTV0910_P1_SFR1 0xf466
+#define FSTV0910_P1_SYMB_FREQ1 0xf46600ff
+
+/* P1_SFR0 */
+#define RSTV0910_P1_SFR0 0xf467
+#define FSTV0910_P1_SYMB_FREQ0 0xf46700ff
+
+/* P1_TMGREG2 */
+#define RSTV0910_P1_TMGREG2 0xf468
+#define FSTV0910_P1_TMGREG2 0xf46800ff
+
+/* P1_TMGREG1 */
+#define RSTV0910_P1_TMGREG1 0xf469
+#define FSTV0910_P1_TMGREG1 0xf46900ff
+
+/* P1_TMGREG0 */
+#define RSTV0910_P1_TMGREG0 0xf46a
+#define FSTV0910_P1_TMGREG0 0xf46a00ff
+
+/* P1_TMGLOCK1 */
+#define RSTV0910_P1_TMGLOCK1 0xf46b
+#define FSTV0910_P1_TMGLOCK_LEVEL1 0xf46b01ff
+
+/* P1_TMGLOCK0 */
+#define RSTV0910_P1_TMGLOCK0 0xf46c
+#define FSTV0910_P1_TMGLOCK_LEVEL0 0xf46c00ff
+
+/* P1_TMGOBS */
+#define RSTV0910_P1_TMGOBS 0xf46d
+#define FSTV0910_P1_ROLLOFF_STATUS 0xf46d00c0
+
+/* P1_EQUALCFG */
+#define RSTV0910_P1_EQUALCFG 0xf46f
+#define FSTV0910_P1_EQUAL_ON 0xf46f0040
+#define FSTV0910_P1_MU_EQUALDFE 0xf46f0007
+
+/* P1_EQUAI1 */
+#define RSTV0910_P1_EQUAI1 0xf470
+#define FSTV0910_P1_EQUA_ACCI1 0xf47001ff
+
+/* P1_EQUAQ1 */
+#define RSTV0910_P1_EQUAQ1 0xf471
+#define FSTV0910_P1_EQUA_ACCQ1 0xf47101ff
+
+/* P1_EQUAI2 */
+#define RSTV0910_P1_EQUAI2 0xf472
+#define FSTV0910_P1_EQUA_ACCI2 0xf47201ff
+
+/* P1_EQUAQ2 */
+#define RSTV0910_P1_EQUAQ2 0xf473
+#define FSTV0910_P1_EQUA_ACCQ2 0xf47301ff
+
+/* P1_EQUAI3 */
+#define RSTV0910_P1_EQUAI3 0xf474
+#define FSTV0910_P1_EQUA_ACCI3 0xf47401ff
+
+/* P1_EQUAQ3 */
+#define RSTV0910_P1_EQUAQ3 0xf475
+#define FSTV0910_P1_EQUA_ACCQ3 0xf47501ff
+
+/* P1_EQUAI4 */
+#define RSTV0910_P1_EQUAI4 0xf476
+#define FSTV0910_P1_EQUA_ACCI4 0xf47601ff
+
+/* P1_EQUAQ4 */
+#define RSTV0910_P1_EQUAQ4 0xf477
+#define FSTV0910_P1_EQUA_ACCQ4 0xf47701ff
+
+/* P1_EQUAI5 */
+#define RSTV0910_P1_EQUAI5 0xf478
+#define FSTV0910_P1_EQUA_ACCI5 0xf47801ff
+
+/* P1_EQUAQ5 */
+#define RSTV0910_P1_EQUAQ5 0xf479
+#define FSTV0910_P1_EQUA_ACCQ5 0xf47901ff
+
+/* P1_EQUAI6 */
+#define RSTV0910_P1_EQUAI6 0xf47a
+#define FSTV0910_P1_EQUA_ACCI6 0xf47a01ff
+
+/* P1_EQUAQ6 */
+#define RSTV0910_P1_EQUAQ6 0xf47b
+#define FSTV0910_P1_EQUA_ACCQ6 0xf47b01ff
+
+/* P1_EQUAI7 */
+#define RSTV0910_P1_EQUAI7 0xf47c
+#define FSTV0910_P1_EQUA_ACCI7 0xf47c01ff
+
+/* P1_EQUAQ7 */
+#define RSTV0910_P1_EQUAQ7 0xf47d
+#define FSTV0910_P1_EQUA_ACCQ7 0xf47d01ff
+
+/* P1_EQUAI8 */
+#define RSTV0910_P1_EQUAI8 0xf47e
+#define FSTV0910_P1_EQUA_ACCI8 0xf47e01ff
+
+/* P1_EQUAQ8 */
+#define RSTV0910_P1_EQUAQ8 0xf47f
+#define FSTV0910_P1_EQUA_ACCQ8 0xf47f01ff
+
+/* P1_NNOSDATAT1 */
+#define RSTV0910_P1_NNOSDATAT1 0xf480
+#define FSTV0910_P1_NOSDATAT_NORMED1 0xf48000ff
+
+/* P1_NNOSDATAT0 */
+#define RSTV0910_P1_NNOSDATAT0 0xf481
+#define FSTV0910_P1_NOSDATAT_NORMED0 0xf48100ff
+
+/* P1_NNOSDATA1 */
+#define RSTV0910_P1_NNOSDATA1 0xf482
+#define FSTV0910_P1_NOSDATA_NORMED1 0xf48200ff
+
+/* P1_NNOSDATA0 */
+#define RSTV0910_P1_NNOSDATA0 0xf483
+#define FSTV0910_P1_NOSDATA_NORMED0 0xf48300ff
+
+/* P1_NNOSPLHT1 */
+#define RSTV0910_P1_NNOSPLHT1 0xf484
+#define FSTV0910_P1_NOSPLHT_NORMED1 0xf48400ff
+
+/* P1_NNOSPLHT0 */
+#define RSTV0910_P1_NNOSPLHT0 0xf485
+#define FSTV0910_P1_NOSPLHT_NORMED0 0xf48500ff
+
+/* P1_NNOSPLH1 */
+#define RSTV0910_P1_NNOSPLH1 0xf486
+#define FSTV0910_P1_NOSPLH_NORMED1 0xf48600ff
+
+/* P1_NNOSPLH0 */
+#define RSTV0910_P1_NNOSPLH0 0xf487
+#define FSTV0910_P1_NOSPLH_NORMED0 0xf48700ff
+
+/* P1_NOSDATAT1 */
+#define RSTV0910_P1_NOSDATAT1 0xf488
+#define FSTV0910_P1_NOSDATAT_UNNORMED1 0xf48800ff
+
+/* P1_NOSDATAT0 */
+#define RSTV0910_P1_NOSDATAT0 0xf489
+#define FSTV0910_P1_NOSDATAT_UNNORMED0 0xf48900ff
+
+/* P1_NNOSFRAME1 */
+#define RSTV0910_P1_NNOSFRAME1 0xf48a
+#define FSTV0910_P1_NOSFRAME_NORMED1 0xf48a00ff
+
+/* P1_NNOSFRAME0 */
+#define RSTV0910_P1_NNOSFRAME0 0xf48b
+#define FSTV0910_P1_NOSFRAME_NORMED0 0xf48b00ff
+
+/* P1_NNOSRAD1 */
+#define RSTV0910_P1_NNOSRAD1 0xf48c
+#define FSTV0910_P1_NOSRADIAL_NORMED1 0xf48c00ff
+
+/* P1_NNOSRAD0 */
+#define RSTV0910_P1_NNOSRAD0 0xf48d
+#define FSTV0910_P1_NOSRADIAL_NORMED0 0xf48d00ff
+
+/* P1_NOSCFGF1 */
+#define RSTV0910_P1_NOSCFGF1 0xf48e
+#define FSTV0910_P1_LOWNOISE_MESURE 0xf48e0080
+#define FSTV0910_P1_NOS_DELFRAME 0xf48e0040
+#define FSTV0910_P1_NOSDATA_MODE 0xf48e0030
+#define FSTV0910_P1_FRAMESEL_TYPESEL 0xf48e000c
+#define FSTV0910_P1_FRAMESEL_TYPE 0xf48e0003
+
+/* P1_NOSCFGF2 */
+#define RSTV0910_P1_NOSCFGF2 0xf48f
+#define FSTV0910_P1_DIS_NOSPILOTS 0xf48f0080
+#define FSTV0910_P1_FRAMESEL_MODCODSEL 0xf48f0060
+#define FSTV0910_P1_FRAMESEL_MODCOD 0xf48f001f
+
+/* P1_CAR2CFG */
+#define RSTV0910_P1_CAR2CFG 0xf490
+#define FSTV0910_P1_ROTA2ON 0xf4900004
+#define FSTV0910_P1_PH_DET_ALGO2 0xf4900003
+
+/* P1_CFR2CFR1 */
+#define RSTV0910_P1_CFR2CFR1 0xf491
+#define FSTV0910_P1_EN_S2CAR2CENTER 0xf4910020
+#define FSTV0910_P1_CFR2TOCFR1_BETA 0xf4910007
+
+/* P1_CAR3CFG */
+#define RSTV0910_P1_CAR3CFG 0xf492
+#define FSTV0910_P1_CARRIER23_MODE 0xf49200c0
+#define FSTV0910_P1_CAR3INTERM_DVBS1 0xf4920020
+#define FSTV0910_P1_ABAMPLIF_MODE 0xf4920018
+#define FSTV0910_P1_CARRIER3_ALPHA3DL 0xf4920007
+
+/* P1_CFR22 */
+#define RSTV0910_P1_CFR22 0xf493
+#define FSTV0910_P1_CAR2_FREQ2 0xf49301ff
+
+/* P1_CFR21 */
+#define RSTV0910_P1_CFR21 0xf494
+#define FSTV0910_P1_CAR2_FREQ1 0xf49400ff
+
+/* P1_CFR20 */
+#define RSTV0910_P1_CFR20 0xf495
+#define FSTV0910_P1_CAR2_FREQ0 0xf49500ff
+
+/* P1_ACLC2S2Q */
+#define RSTV0910_P1_ACLC2S2Q 0xf497
+#define FSTV0910_P1_ENAB_SPSKSYMB 0xf4970080
+#define FSTV0910_P1_CAR2S2_Q_ALPH_M 0xf4970030
+#define FSTV0910_P1_CAR2S2_Q_ALPH_E 0xf497000f
+
+/* P1_ACLC2S28 */
+#define RSTV0910_P1_ACLC2S28 0xf498
+#define FSTV0910_P1_CAR2S2_8_ALPH_M 0xf4980030
+#define FSTV0910_P1_CAR2S2_8_ALPH_E 0xf498000f
+
+/* P1_ACLC2S216A */
+#define RSTV0910_P1_ACLC2S216A 0xf499
+#define FSTV0910_P1_CAR2S2_16A_ALPH_M 0xf4990030
+#define FSTV0910_P1_CAR2S2_16A_ALPH_E 0xf499000f
+
+/* P1_ACLC2S232A */
+#define RSTV0910_P1_ACLC2S232A 0xf49a
+#define FSTV0910_P1_CAR2S2_32A_ALPH_M 0xf49a0030
+#define FSTV0910_P1_CAR2S2_32A_ALPH_E 0xf49a000f
+
+/* P1_BCLC2S2Q */
+#define RSTV0910_P1_BCLC2S2Q 0xf49c
+#define FSTV0910_P1_CAR2S2_Q_BETA_M 0xf49c0030
+#define FSTV0910_P1_CAR2S2_Q_BETA_E 0xf49c000f
+
+/* P1_BCLC2S28 */
+#define RSTV0910_P1_BCLC2S28 0xf49d
+#define FSTV0910_P1_CAR2S2_8_BETA_M 0xf49d0030
+#define FSTV0910_P1_CAR2S2_8_BETA_E 0xf49d000f
+
+/* P1_BCLC2S216A */
+#define RSTV0910_P1_BCLC2S216A 0xf49e
+#define FSTV0910_P1_DVBS2S216A_NIP 0xf49e0080
+#define FSTV0910_P1_CAR2S2_16A_BETA_M 0xf49e0030
+#define FSTV0910_P1_CAR2S2_16A_BETA_E 0xf49e000f
+
+/* P1_BCLC2S232A */
+#define RSTV0910_P1_BCLC2S232A 0xf49f
+#define FSTV0910_P1_DVBS2S232A_NIP 0xf49f0080
+#define FSTV0910_P1_CAR2S2_32A_BETA_M 0xf49f0030
+#define FSTV0910_P1_CAR2S2_32A_BETA_E 0xf49f000f
+
+/* P1_PLROOT2 */
+#define RSTV0910_P1_PLROOT2 0xf4ac
+#define FSTV0910_P1_PLSCRAMB_MODE 0xf4ac000c
+#define FSTV0910_P1_PLSCRAMB_ROOT2 0xf4ac0003
+
+/* P1_PLROOT1 */
+#define RSTV0910_P1_PLROOT1 0xf4ad
+#define FSTV0910_P1_PLSCRAMB_ROOT1 0xf4ad00ff
+
+/* P1_PLROOT0 */
+#define RSTV0910_P1_PLROOT0 0xf4ae
+#define FSTV0910_P1_PLSCRAMB_ROOT0 0xf4ae00ff
+
+/* P1_MODCODLST0 */
+#define RSTV0910_P1_MODCODLST0 0xf4b0
+#define FSTV0910_P1_NACCES_MODCODCH 0xf4b00001
+
+/* P1_MODCODLST1 */
+#define RSTV0910_P1_MODCODLST1 0xf4b1
+#define FSTV0910_P1_SYMBRATE_FILTER 0xf4b10008
+#define FSTV0910_P1_NRESET_MODCODLST 0xf4b10004
+#define FSTV0910_P1_DIS_32PSK_9_10 0xf4b10003
+
+/* P1_MODCODLST2 */
+#define RSTV0910_P1_MODCODLST2 0xf4b2
+#define FSTV0910_P1_DIS_32PSK_8_9 0xf4b200f0
+#define FSTV0910_P1_DIS_32PSK_5_6 0xf4b2000f
+
+/* P1_MODCODLST3 */
+#define RSTV0910_P1_MODCODLST3 0xf4b3
+#define FSTV0910_P1_DIS_32PSK_4_5 0xf4b300f0
+#define FSTV0910_P1_DIS_32PSK_3_4 0xf4b3000f
+
+/* P1_MODCODLST4 */
+#define RSTV0910_P1_MODCODLST4 0xf4b4
+#define FSTV0910_P1_DUMMYPL_PILOT 0xf4b40080
+#define FSTV0910_P1_DUMMYPL_NOPILOT 0xf4b40040
+#define FSTV0910_P1_DIS_16PSK_9_10 0xf4b40030
+#define FSTV0910_P1_DIS_16PSK_8_9 0xf4b4000f
+
+/* P1_MODCODLST5 */
+#define RSTV0910_P1_MODCODLST5 0xf4b5
+#define FSTV0910_P1_DIS_16PSK_5_6 0xf4b500f0
+#define FSTV0910_P1_DIS_16PSK_4_5 0xf4b5000f
+
+/* P1_MODCODLST6 */
+#define RSTV0910_P1_MODCODLST6 0xf4b6
+#define FSTV0910_P1_DIS_16PSK_3_4 0xf4b600f0
+#define FSTV0910_P1_DIS_16PSK_2_3 0xf4b6000f
+
+/* P1_MODCODLST7 */
+#define RSTV0910_P1_MODCODLST7 0xf4b7
+#define FSTV0910_P1_MODCOD_NNOSFILTER 0xf4b70080
+#define FSTV0910_P1_DIS_8PSK_9_10 0xf4b70030
+#define FSTV0910_P1_DIS_8PSK_8_9 0xf4b7000f
+
+/* P1_MODCODLST8 */
+#define RSTV0910_P1_MODCODLST8 0xf4b8
+#define FSTV0910_P1_DIS_8PSK_5_6 0xf4b800f0
+#define FSTV0910_P1_DIS_8PSK_3_4 0xf4b8000f
+
+/* P1_MODCODLST9 */
+#define RSTV0910_P1_MODCODLST9 0xf4b9
+#define FSTV0910_P1_DIS_8PSK_2_3 0xf4b900f0
+#define FSTV0910_P1_DIS_8PSK_3_5 0xf4b9000f
+
+/* P1_MODCODLSTA */
+#define RSTV0910_P1_MODCODLSTA 0xf4ba
+#define FSTV0910_P1_NOSFILTER_LIMITE 0xf4ba0080
+#define FSTV0910_P1_DIS_QPSK_9_10 0xf4ba0030
+#define FSTV0910_P1_DIS_QPSK_8_9 0xf4ba000f
+
+/* P1_MODCODLSTB */
+#define RSTV0910_P1_MODCODLSTB 0xf4bb
+#define FSTV0910_P1_DIS_QPSK_5_6 0xf4bb00f0
+#define FSTV0910_P1_DIS_QPSK_4_5 0xf4bb000f
+
+/* P1_MODCODLSTC */
+#define RSTV0910_P1_MODCODLSTC 0xf4bc
+#define FSTV0910_P1_DIS_QPSK_3_4 0xf4bc00f0
+#define FSTV0910_P1_DIS_QPSK_2_3 0xf4bc000f
+
+/* P1_MODCODLSTD */
+#define RSTV0910_P1_MODCODLSTD 0xf4bd
+#define FSTV0910_P1_DIS_QPSK_3_5 0xf4bd00f0
+#define FSTV0910_P1_DIS_QPSK_1_2 0xf4bd000f
+
+/* P1_MODCODLSTE */
+#define RSTV0910_P1_MODCODLSTE 0xf4be
+#define FSTV0910_P1_DIS_QPSK_2_5 0xf4be00f0
+#define FSTV0910_P1_DIS_QPSK_1_3 0xf4be000f
+
+/* P1_MODCODLSTF */
+#define RSTV0910_P1_MODCODLSTF 0xf4bf
+#define FSTV0910_P1_DIS_QPSK_1_4 0xf4bf00f0
+#define FSTV0910_P1_DEMOD_INVMODLST 0xf4bf0008
+#define FSTV0910_P1_DEMODOUT_ENABLE 0xf4bf0004
+#define FSTV0910_P1_DDEMOD_NSET 0xf4bf0002
+#define FSTV0910_P1_MODCOD_NSTOCK 0xf4bf0001
+
+/* P1_GAUSSR0 */
+#define RSTV0910_P1_GAUSSR0 0xf4c0
+#define FSTV0910_P1_EN_CCIMODE 0xf4c00080
+#define FSTV0910_P1_R0_GAUSSIEN 0xf4c0007f
+
+/* P1_CCIR0 */
+#define RSTV0910_P1_CCIR0 0xf4c1
+#define FSTV0910_P1_CCIDETECT_PLHONLY 0xf4c10080
+#define FSTV0910_P1_R0_CCI 0xf4c1007f
+
+/* P1_CCIQUANT */
+#define RSTV0910_P1_CCIQUANT 0xf4c2
+#define FSTV0910_P1_CCI_BETA 0xf4c200e0
+#define FSTV0910_P1_CCI_QUANT 0xf4c2001f
+
+/* P1_CCITHRES */
+#define RSTV0910_P1_CCITHRES 0xf4c3
+#define FSTV0910_P1_CCI_THRESHOLD 0xf4c300ff
+
+/* P1_CCIACC */
+#define RSTV0910_P1_CCIACC 0xf4c4
+#define FSTV0910_P1_CCI_VALUE 0xf4c400ff
+
+/* P1_DSTATUS4 */
+#define RSTV0910_P1_DSTATUS4 0xf4c5
+#define FSTV0910_P1_RAINFADE_DETECT 0xf4c50080
+#define FSTV0910_P1_NOTHRES2_FAIL 0xf4c50040
+#define FSTV0910_P1_NOTHRES1_FAIL 0xf4c50020
+#define FSTV0910_P1_DMDPROG_ERROR 0xf4c50004
+#define FSTV0910_P1_CSTENV_DETECT 0xf4c50002
+#define FSTV0910_P1_DETECTION_TRIAX 0xf4c50001
+
+/* P1_DMDRESCFG */
+#define RSTV0910_P1_DMDRESCFG 0xf4c6
+#define FSTV0910_P1_DMDRES_RESET 0xf4c60080
+#define FSTV0910_P1_DMDRES_STRALL 0xf4c60008
+#define FSTV0910_P1_DMDRES_NEWONLY 0xf4c60004
+#define FSTV0910_P1_DMDRES_NOSTORE 0xf4c60002
+
+/* P1_DMDRESADR */
+#define RSTV0910_P1_DMDRESADR 0xf4c7
+#define FSTV0910_P1_DMDRES_VALIDCFR 0xf4c70040
+#define FSTV0910_P1_DMDRES_MEMFULL 0xf4c70030
+#define FSTV0910_P1_DMDRES_RESNBR 0xf4c7000f
+
+/* P1_DMDRESDATA7 */
+#define RSTV0910_P1_DMDRESDATA7 0xf4c8
+#define FSTV0910_P1_DMDRES_DATA7 0xf4c800ff
+
+/* P1_DMDRESDATA6 */
+#define RSTV0910_P1_DMDRESDATA6 0xf4c9
+#define FSTV0910_P1_DMDRES_DATA6 0xf4c900ff
+
+/* P1_DMDRESDATA5 */
+#define RSTV0910_P1_DMDRESDATA5 0xf4ca
+#define FSTV0910_P1_DMDRES_DATA5 0xf4ca00ff
+
+/* P1_DMDRESDATA4 */
+#define RSTV0910_P1_DMDRESDATA4 0xf4cb
+#define FSTV0910_P1_DMDRES_DATA4 0xf4cb00ff
+
+/* P1_DMDRESDATA3 */
+#define RSTV0910_P1_DMDRESDATA3 0xf4cc
+#define FSTV0910_P1_DMDRES_DATA3 0xf4cc00ff
+
+/* P1_DMDRESDATA2 */
+#define RSTV0910_P1_DMDRESDATA2 0xf4cd
+#define FSTV0910_P1_DMDRES_DATA2 0xf4cd00ff
+
+/* P1_DMDRESDATA1 */
+#define RSTV0910_P1_DMDRESDATA1 0xf4ce
+#define FSTV0910_P1_DMDRES_DATA1 0xf4ce00ff
+
+/* P1_DMDRESDATA0 */
+#define RSTV0910_P1_DMDRESDATA0 0xf4cf
+#define FSTV0910_P1_DMDRES_DATA0 0xf4cf00ff
+
+/* P1_FFEI1 */
+#define RSTV0910_P1_FFEI1 0xf4d0
+#define FSTV0910_P1_FFE_ACCI1 0xf4d001ff
+
+/* P1_FFEQ1 */
+#define RSTV0910_P1_FFEQ1 0xf4d1
+#define FSTV0910_P1_FFE_ACCQ1 0xf4d101ff
+
+/* P1_FFEI2 */
+#define RSTV0910_P1_FFEI2 0xf4d2
+#define FSTV0910_P1_FFE_ACCI2 0xf4d201ff
+
+/* P1_FFEQ2 */
+#define RSTV0910_P1_FFEQ2 0xf4d3
+#define FSTV0910_P1_FFE_ACCQ2 0xf4d301ff
+
+/* P1_FFEI3 */
+#define RSTV0910_P1_FFEI3 0xf4d4
+#define FSTV0910_P1_FFE_ACCI3 0xf4d401ff
+
+/* P1_FFEQ3 */
+#define RSTV0910_P1_FFEQ3 0xf4d5
+#define FSTV0910_P1_FFE_ACCQ3 0xf4d501ff
+
+/* P1_FFEI4 */
+#define RSTV0910_P1_FFEI4 0xf4d6
+#define FSTV0910_P1_FFE_ACCI4 0xf4d601ff
+
+/* P1_FFEQ4 */
+#define RSTV0910_P1_FFEQ4 0xf4d7
+#define FSTV0910_P1_FFE_ACCQ4 0xf4d701ff
+
+/* P1_FFECFG */
+#define RSTV0910_P1_FFECFG 0xf4d8
+#define FSTV0910_P1_EQUALFFE_ON 0xf4d80040
+#define FSTV0910_P1_EQUAL_USEDSYMB 0xf4d80030
+#define FSTV0910_P1_MU_EQUALFFE 0xf4d80007
+
+/* P1_TNRCFG2 */
+#define RSTV0910_P1_TNRCFG2 0xf4e1
+#define FSTV0910_P1_TUN_IQSWAP 0xf4e10080
+
+/* P1_SMAPCOEF7 */
+#define RSTV0910_P1_SMAPCOEF7 0xf500
+#define FSTV0910_P1_DIS_QSCALE 0xf5000080
+#define FSTV0910_P1_SMAPCOEF_Q_LLR12 0xf500017f
+
+/* P1_SMAPCOEF6 */
+#define RSTV0910_P1_SMAPCOEF6 0xf501
+#define FSTV0910_P1_DIS_AGC2SCALE 0xf5010080
+#define FSTV0910_P1_ADJ_8PSKLLR1 0xf5010004
+#define FSTV0910_P1_OLD_8PSKLLR1 0xf5010002
+#define FSTV0910_P1_DIS_AB8PSK 0xf5010001
+
+/* P1_SMAPCOEF5 */
+#define RSTV0910_P1_SMAPCOEF5 0xf502
+#define FSTV0910_P1_DIS_8SCALE 0xf5020080
+#define FSTV0910_P1_SMAPCOEF_8P_LLR23 0xf502017f
+
+/* P1_SMAPCOEF4 */
+#define RSTV0910_P1_SMAPCOEF4 0xf503
+#define FSTV0910_P1_SMAPCOEF_16APSK_LLR12 0xf503017f
+
+/* P1_SMAPCOEF3 */
+#define RSTV0910_P1_SMAPCOEF3 0xf504
+#define FSTV0910_P1_SMAPCOEF_16APSK_LLR34 0xf504017f
+
+/* P1_SMAPCOEF2 */
+#define RSTV0910_P1_SMAPCOEF2 0xf505
+#define FSTV0910_P1_SMAPCOEF_32APSK_R2R3 0xf50501f0
+#define FSTV0910_P1_SMAPCOEF_32APSK_LLR2 0xf505010f
+
+/* P1_SMAPCOEF1 */
+#define RSTV0910_P1_SMAPCOEF1 0xf506
+#define FSTV0910_P1_DIS_16SCALE 0xf5060080
+#define FSTV0910_P1_SMAPCOEF_32_LLR34 0xf506017f
+
+/* P1_SMAPCOEF0 */
+#define RSTV0910_P1_SMAPCOEF0 0xf507
+#define FSTV0910_P1_DIS_32SCALE 0xf5070080
+#define FSTV0910_P1_SMAPCOEF_32_LLR15 0xf507017f
+
+/* P1_NOSTHRES1 */
+#define RSTV0910_P1_NOSTHRES1 0xf509
+#define FSTV0910_P1_NOS_THRESHOLD1 0xf50900ff
+
+/* P1_NOSTHRES2 */
+#define RSTV0910_P1_NOSTHRES2 0xf50a
+#define FSTV0910_P1_NOS_THRESHOLD2 0xf50a00ff
+
+/* P1_NOSDIFF1 */
+#define RSTV0910_P1_NOSDIFF1 0xf50b
+#define FSTV0910_P1_NOSTHRES1_DIFF 0xf50b00ff
+
+/* P1_RAINFADE */
+#define RSTV0910_P1_RAINFADE 0xf50c
+#define FSTV0910_P1_NOSTHRES_DATAT 0xf50c0080
+#define FSTV0910_P1_RAINFADE_CNLIMIT 0xf50c0070
+#define FSTV0910_P1_RAINFADE_TIMEOUT 0xf50c0007
+
+/* P1_NOSRAMCFG */
+#define RSTV0910_P1_NOSRAMCFG 0xf50d
+#define FSTV0910_P1_NOSRAM_ACTIVATION 0xf50d0030
+#define FSTV0910_P1_NOSRAM_CNRONLY 0xf50d0008
+#define FSTV0910_P1_NOSRAM_LGNCNR1 0xf50d0007
+
+/* P1_NOSRAMPOS */
+#define RSTV0910_P1_NOSRAMPOS 0xf50e
+#define FSTV0910_P1_NOSRAM_LGNCNR0 0xf50e00f0
+#define FSTV0910_P1_NOSRAM_VALIDE 0xf50e0004
+#define FSTV0910_P1_NOSRAM_CNRVAL1 0xf50e0003
+
+/* P1_NOSRAMVAL */
+#define RSTV0910_P1_NOSRAMVAL 0xf50f
+#define FSTV0910_P1_NOSRAM_CNRVAL0 0xf50f00ff
+
+/* P1_DMDPLHSTAT */
+#define RSTV0910_P1_DMDPLHSTAT 0xf520
+#define FSTV0910_P1_PLH_STATISTIC 0xf52000ff
+
+/* P1_LOCKTIME3 */
+#define RSTV0910_P1_LOCKTIME3 0xf522
+#define FSTV0910_P1_DEMOD_LOCKTIME3 0xf52200ff
+
+/* P1_LOCKTIME2 */
+#define RSTV0910_P1_LOCKTIME2 0xf523
+#define FSTV0910_P1_DEMOD_LOCKTIME2 0xf52300ff
+
+/* P1_LOCKTIME1 */
+#define RSTV0910_P1_LOCKTIME1 0xf524
+#define FSTV0910_P1_DEMOD_LOCKTIME1 0xf52400ff
+
+/* P1_LOCKTIME0 */
+#define RSTV0910_P1_LOCKTIME0 0xf525
+#define FSTV0910_P1_DEMOD_LOCKTIME0 0xf52500ff
+
+/* P1_VITSCALE */
+#define RSTV0910_P1_VITSCALE 0xf532
+#define FSTV0910_P1_NVTH_NOSRANGE 0xf5320080
+#define FSTV0910_P1_VERROR_MAXMODE 0xf5320040
+#define FSTV0910_P1_NSLOWSN_LOCKED 0xf5320008
+#define FSTV0910_P1_DIS_RSFLOCK 0xf5320002
+
+/* P1_FECM */
+#define RSTV0910_P1_FECM 0xf533
+#define FSTV0910_P1_DSS_DVB 0xf5330080
+#define FSTV0910_P1_DSS_SRCH 0xf5330010
+#define FSTV0910_P1_SYNCVIT 0xf5330002
+#define FSTV0910_P1_IQINV 0xf5330001
+
+/* P1_VTH12 */
+#define RSTV0910_P1_VTH12 0xf534
+#define FSTV0910_P1_VTH12 0xf53400ff
+
+/* P1_VTH23 */
+#define RSTV0910_P1_VTH23 0xf535
+#define FSTV0910_P1_VTH23 0xf53500ff
+
+/* P1_VTH34 */
+#define RSTV0910_P1_VTH34 0xf536
+#define FSTV0910_P1_VTH34 0xf53600ff
+
+/* P1_VTH56 */
+#define RSTV0910_P1_VTH56 0xf537
+#define FSTV0910_P1_VTH56 0xf53700ff
+
+/* P1_VTH67 */
+#define RSTV0910_P1_VTH67 0xf538
+#define FSTV0910_P1_VTH67 0xf53800ff
+
+/* P1_VTH78 */
+#define RSTV0910_P1_VTH78 0xf539
+#define FSTV0910_P1_VTH78 0xf53900ff
+
+/* P1_VITCURPUN */
+#define RSTV0910_P1_VITCURPUN 0xf53a
+#define FSTV0910_P1_VIT_CURPUN 0xf53a001f
+
+/* P1_VERROR */
+#define RSTV0910_P1_VERROR 0xf53b
+#define FSTV0910_P1_REGERR_VIT 0xf53b00ff
+
+/* P1_PRVIT */
+#define RSTV0910_P1_PRVIT 0xf53c
+#define FSTV0910_P1_DIS_VTHLOCK 0xf53c0040
+#define FSTV0910_P1_E7_8VIT 0xf53c0020
+#define FSTV0910_P1_E6_7VIT 0xf53c0010
+#define FSTV0910_P1_E5_6VIT 0xf53c0008
+#define FSTV0910_P1_E3_4VIT 0xf53c0004
+#define FSTV0910_P1_E2_3VIT 0xf53c0002
+#define FSTV0910_P1_E1_2VIT 0xf53c0001
+
+/* P1_VAVSRVIT */
+#define RSTV0910_P1_VAVSRVIT 0xf53d
+#define FSTV0910_P1_AMVIT 0xf53d0080
+#define FSTV0910_P1_FROZENVIT 0xf53d0040
+#define FSTV0910_P1_SNVIT 0xf53d0030
+#define FSTV0910_P1_TOVVIT 0xf53d000c
+#define FSTV0910_P1_HYPVIT 0xf53d0003
+
+/* P1_VSTATUSVIT */
+#define RSTV0910_P1_VSTATUSVIT 0xf53e
+#define FSTV0910_P1_PRFVIT 0xf53e0010
+#define FSTV0910_P1_LOCKEDVIT 0xf53e0008
+
+/* P1_VTHINUSE */
+#define RSTV0910_P1_VTHINUSE 0xf53f
+#define FSTV0910_P1_VIT_INUSE 0xf53f00ff
+
+/* P1_KDIV12 */
+#define RSTV0910_P1_KDIV12 0xf540
+#define FSTV0910_P1_K_DIVIDER_12 0xf540007f
+
+/* P1_KDIV23 */
+#define RSTV0910_P1_KDIV23 0xf541
+#define FSTV0910_P1_K_DIVIDER_23 0xf541007f
+
+/* P1_KDIV34 */
+#define RSTV0910_P1_KDIV34 0xf542
+#define FSTV0910_P1_K_DIVIDER_34 0xf542007f
+
+/* P1_KDIV56 */
+#define RSTV0910_P1_KDIV56 0xf543
+#define FSTV0910_P1_K_DIVIDER_56 0xf543007f
+
+/* P1_KDIV67 */
+#define RSTV0910_P1_KDIV67 0xf544
+#define FSTV0910_P1_K_DIVIDER_67 0xf544007f
+
+/* P1_KDIV78 */
+#define RSTV0910_P1_KDIV78 0xf545
+#define FSTV0910_P1_K_DIVIDER_78 0xf545007f
+
+/* P1_TSPIDFLT1 */
+#define RSTV0910_P1_TSPIDFLT1 0xf546
+#define FSTV0910_P1_PIDFLT_ADDR 0xf54600ff
+
+/* P1_TSPIDFLT0 */
+#define RSTV0910_P1_TSPIDFLT0 0xf547
+#define FSTV0910_P1_PIDFLT_DATA 0xf54700ff
+
+/* P1_PDELCTRL0 */
+#define RSTV0910_P1_PDELCTRL0 0xf54f
+#define FSTV0910_P1_ISIOBS_MODE 0xf54f0030
+
+/* P1_PDELCTRL1 */
+#define RSTV0910_P1_PDELCTRL1 0xf550
+#define FSTV0910_P1_INV_MISMASK 0xf5500080
+#define FSTV0910_P1_FILTER_EN 0xf5500020
+#define FSTV0910_P1_HYSTEN 0xf5500008
+#define FSTV0910_P1_HYSTSWRST 0xf5500004
+#define FSTV0910_P1_EN_MIS00 0xf5500002
+#define FSTV0910_P1_ALGOSWRST 0xf5500001
+
+/* P1_PDELCTRL2 */
+#define RSTV0910_P1_PDELCTRL2 0xf551
+#define FSTV0910_P1_FORCE_CONTINUOUS 0xf5510080
+#define FSTV0910_P1_RESET_UPKO_COUNT 0xf5510040
+#define FSTV0910_P1_USER_PKTDELIN_NB 0xf5510020
+#define FSTV0910_P1_FRAME_MODE 0xf5510002
+
+/* P1_HYSTTHRESH */
+#define RSTV0910_P1_HYSTTHRESH 0xf554
+#define FSTV0910_P1_DELIN_LOCKTHRES 0xf55400f0
+#define FSTV0910_P1_DELIN_UNLOCKTHRES 0xf554000f
+
+/* P1_UPLCCST0 */
+#define RSTV0910_P1_UPLCCST0 0xf558
+#define FSTV0910_P1_UPL_CST0 0xf55800f8
+#define FSTV0910_P1_UPL_MODE 0xf5580007
+
+/* P1_ISIENTRY */
+#define RSTV0910_P1_ISIENTRY 0xf55e
+#define FSTV0910_P1_ISI_ENTRY 0xf55e00ff
+
+/* P1_ISIBITENA */
+#define RSTV0910_P1_ISIBITENA 0xf55f
+#define FSTV0910_P1_ISI_BIT_EN 0xf55f00ff
+
+/* P1_MATSTR1 */
+#define RSTV0910_P1_MATSTR1 0xf560
+#define FSTV0910_P1_MATYPE_CURRENT1 0xf56000ff
+
+/* P1_MATSTR0 */
+#define RSTV0910_P1_MATSTR0 0xf561
+#define FSTV0910_P1_MATYPE_CURRENT0 0xf56100ff
+
+/* P1_UPLSTR1 */
+#define RSTV0910_P1_UPLSTR1 0xf562
+#define FSTV0910_P1_UPL_CURRENT1 0xf56200ff
+
+/* P1_UPLSTR0 */
+#define RSTV0910_P1_UPLSTR0 0xf563
+#define FSTV0910_P1_UPL_CURRENT0 0xf56300ff
+
+/* P1_DFLSTR1 */
+#define RSTV0910_P1_DFLSTR1 0xf564
+#define FSTV0910_P1_DFL_CURRENT1 0xf56400ff
+
+/* P1_DFLSTR0 */
+#define RSTV0910_P1_DFLSTR0 0xf565
+#define FSTV0910_P1_DFL_CURRENT0 0xf56500ff
+
+/* P1_SYNCSTR */
+#define RSTV0910_P1_SYNCSTR 0xf566
+#define FSTV0910_P1_SYNC_CURRENT 0xf56600ff
+
+/* P1_SYNCDSTR1 */
+#define RSTV0910_P1_SYNCDSTR1 0xf567
+#define FSTV0910_P1_SYNCD_CURRENT1 0xf56700ff
+
+/* P1_SYNCDSTR0 */
+#define RSTV0910_P1_SYNCDSTR0 0xf568
+#define FSTV0910_P1_SYNCD_CURRENT0 0xf56800ff
+
+/* P1_PDELSTATUS1 */
+#define RSTV0910_P1_PDELSTATUS1 0xf569
+#define FSTV0910_P1_PKTDELIN_DELOCK 0xf5690080
+#define FSTV0910_P1_SYNCDUPDFL_BADDFL 0xf5690040
+#define FSTV0910_P1_UNACCEPTED_STREAM 0xf5690010
+#define FSTV0910_P1_BCH_ERROR_FLAG 0xf5690008
+#define FSTV0910_P1_PKTDELIN_LOCK 0xf5690002
+#define FSTV0910_P1_FIRST_LOCK 0xf5690001
+
+/* P1_PDELSTATUS2 */
+#define RSTV0910_P1_PDELSTATUS2 0xf56a
+#define FSTV0910_P1_FRAME_MODCOD 0xf56a007c
+#define FSTV0910_P1_FRAME_TYPE 0xf56a0003
+
+/* P1_BBFCRCKO1 */
+#define RSTV0910_P1_BBFCRCKO1 0xf56b
+#define FSTV0910_P1_BBHCRC_KOCNT1 0xf56b00ff
+
+/* P1_BBFCRCKO0 */
+#define RSTV0910_P1_BBFCRCKO0 0xf56c
+#define FSTV0910_P1_BBHCRC_KOCNT0 0xf56c00ff
+
+/* P1_UPCRCKO1 */
+#define RSTV0910_P1_UPCRCKO1 0xf56d
+#define FSTV0910_P1_PKTCRC_KOCNT1 0xf56d00ff
+
+/* P1_UPCRCKO0 */
+#define RSTV0910_P1_UPCRCKO0 0xf56e
+#define FSTV0910_P1_PKTCRC_KOCNT0 0xf56e00ff
+
+/* P1_PDELCTRL3 */
+#define RSTV0910_P1_PDELCTRL3 0xf56f
+#define FSTV0910_P1_NOFIFO_BCHERR 0xf56f0020
+#define FSTV0910_P1_PKTDELIN_DELACMERR 0xf56f0010
+
+/* P1_TSSTATEM */
+#define RSTV0910_P1_TSSTATEM 0xf570
+#define FSTV0910_P1_TSDIL_ON 0xf5700080
+#define FSTV0910_P1_TSRS_ON 0xf5700020
+#define FSTV0910_P1_TSDESCRAMB_ON 0xf5700010
+#define FSTV0910_P1_TSFRAME_MODE 0xf5700008
+#define FSTV0910_P1_TS_DISABLE 0xf5700004
+#define FSTV0910_P1_TSACM_MODE 0xf5700002
+#define FSTV0910_P1_TSOUT_NOSYNC 0xf5700001
+
+/* P1_TSSTATEL */
+#define RSTV0910_P1_TSSTATEL 0xf571
+#define FSTV0910_P1_TSNOSYNCBYTE 0xf5710080
+#define FSTV0910_P1_TSPARITY_ON 0xf5710040
+#define FSTV0910_P1_TSISSYI_ON 0xf5710008
+#define FSTV0910_P1_TSNPD_ON 0xf5710004
+#define FSTV0910_P1_TSCRC8_ON 0xf5710002
+#define FSTV0910_P1_TSDSS_PACKET 0xf5710001
+
+/* P1_TSCFGH */
+#define RSTV0910_P1_TSCFGH 0xf572
+#define FSTV0910_P1_TSFIFO_DVBCI 0xf5720080
+#define FSTV0910_P1_TSFIFO_SERIAL 0xf5720040
+#define FSTV0910_P1_TSFIFO_TEIUPDATE 0xf5720020
+#define FSTV0910_P1_TSFIFO_DUTY50 0xf5720010
+#define FSTV0910_P1_TSFIFO_HSGNLOUT 0xf5720008
+#define FSTV0910_P1_TSFIFO_ERRMODE 0xf5720006
+#define FSTV0910_P1_RST_HWARE 0xf5720001
+
+/* P1_TSCFGM */
+#define RSTV0910_P1_TSCFGM 0xf573
+#define FSTV0910_P1_TSFIFO_MANSPEED 0xf57300c0
+#define FSTV0910_P1_TSFIFO_PERMDATA 0xf5730020
+#define FSTV0910_P1_TSFIFO_NONEWSGNL 0xf5730010
+#define FSTV0910_P1_TSFIFO_INVDATA 0xf5730001
+
+/* P1_TSCFGL */
+#define RSTV0910_P1_TSCFGL 0xf574
+#define FSTV0910_P1_TSFIFO_BCLKDEL1CK 0xf57400c0
+#define FSTV0910_P1_BCHERROR_MODE 0xf5740030
+#define FSTV0910_P1_TSFIFO_NSGNL2DATA 0xf5740008
+#define FSTV0910_P1_TSFIFO_EMBINDVB 0xf5740004
+#define FSTV0910_P1_TSFIFO_BITSPEED 0xf5740003
+
+/* P1_TSSYNC */
+#define RSTV0910_P1_TSSYNC 0xf575
+#define FSTV0910_P1_TSFIFO_SYNCMODE 0xf5750018
+
+/* P1_TSINSDELH */
+#define RSTV0910_P1_TSINSDELH 0xf576
+#define FSTV0910_P1_TSDEL_SYNCBYTE 0xf5760080
+#define FSTV0910_P1_TSDEL_XXHEADER 0xf5760040
+#define FSTV0910_P1_TSDEL_DATAFIELD 0xf5760010
+#define FSTV0910_P1_TSINSDEL_RSPARITY 0xf5760002
+#define FSTV0910_P1_TSINSDEL_CRC8 0xf5760001
+
+/* P1_TSINSDELM */
+#define RSTV0910_P1_TSINSDELM 0xf577
+#define FSTV0910_P1_TSINS_EMODCOD 0xf5770010
+#define FSTV0910_P1_TSINS_TOKEN 0xf5770008
+#define FSTV0910_P1_TSINS_XXXERR 0xf5770004
+#define FSTV0910_P1_TSINS_MATYPE 0xf5770002
+#define FSTV0910_P1_TSINS_UPL 0xf5770001
+
+/* P1_TSINSDELL */
+#define RSTV0910_P1_TSINSDELL 0xf578
+#define FSTV0910_P1_TSINS_DFL 0xf5780080
+#define FSTV0910_P1_TSINS_SYNCD 0xf5780040
+#define FSTV0910_P1_TSINS_BLOCLEN 0xf5780020
+#define FSTV0910_P1_TSINS_SIGPCOUNT 0xf5780010
+#define FSTV0910_P1_TSINS_FIFO 0xf5780008
+#define FSTV0910_P1_TSINS_REALPACK 0xf5780004
+#define FSTV0910_P1_TSINS_TSCONFIG 0xf5780002
+#define FSTV0910_P1_TSINS_LATENCY 0xf5780001
+
+/* P1_TSDIVN */
+#define RSTV0910_P1_TSDIVN 0xf579
+#define FSTV0910_P1_TSFIFO_SPEEDMODE 0xf57900c0
+#define FSTV0910_P1_TSFIFO_RISEOK 0xf5790007
+
+/* P1_TSCFG4 */
+#define RSTV0910_P1_TSCFG4 0xf57a
+#define FSTV0910_P1_TSFIFO_TSSPEEDMODE 0xf57a00c0
+
+/* P1_TSSPEED */
+#define RSTV0910_P1_TSSPEED 0xf580
+#define FSTV0910_P1_TSFIFO_OUTSPEED 0xf58000ff
+
+/* P1_TSSTATUS */
+#define RSTV0910_P1_TSSTATUS 0xf581
+#define FSTV0910_P1_TSFIFO_LINEOK 0xf5810080
+#define FSTV0910_P1_TSFIFO_ERROR 0xf5810040
+#define FSTV0910_P1_TSFIFO_NOSYNC 0xf5810010
+#define FSTV0910_P1_TSREGUL_ERROR 0xf5810004
+#define FSTV0910_P1_DIL_READY 0xf5810001
+
+/* P1_TSSTATUS2 */
+#define RSTV0910_P1_TSSTATUS2 0xf582
+#define FSTV0910_P1_TSFIFO_DEMODSEL 0xf5820080
+#define FSTV0910_P1_TSFIFOSPEED_STORE 0xf5820040
+#define FSTV0910_P1_DILXX_RESET 0xf5820020
+#define FSTV0910_P1_SCRAMBDETECT 0xf5820002
+
+/* P1_TSBITRATE1 */
+#define RSTV0910_P1_TSBITRATE1 0xf583
+#define FSTV0910_P1_TSFIFO_BITRATE1 0xf58300ff
+
+/* P1_TSBITRATE0 */
+#define RSTV0910_P1_TSBITRATE0 0xf584
+#define FSTV0910_P1_TSFIFO_BITRATE0 0xf58400ff
+
+/* P1_TSPACKLEN1 */
+#define RSTV0910_P1_TSPACKLEN1 0xf585
+#define FSTV0910_P1_TSFIFO_PACKCPT 0xf58500e0
+
+/* P1_TSDLY2 */
+#define RSTV0910_P1_TSDLY2 0xf589
+#define FSTV0910_P1_SOFFIFO_LATENCY2 0xf589000f
+
+/* P1_TSDLY1 */
+#define RSTV0910_P1_TSDLY1 0xf58a
+#define FSTV0910_P1_SOFFIFO_LATENCY1 0xf58a00ff
+
+/* P1_TSDLY0 */
+#define RSTV0910_P1_TSDLY0 0xf58b
+#define FSTV0910_P1_SOFFIFO_LATENCY0 0xf58b00ff
+
+/* P1_TSNPDAV */
+#define RSTV0910_P1_TSNPDAV 0xf58c
+#define FSTV0910_P1_TSNPD_AVERAGE 0xf58c00ff
+
+/* P1_TSBUFSTAT2 */
+#define RSTV0910_P1_TSBUFSTAT2 0xf58d
+#define FSTV0910_P1_TSISCR_3BYTES 0xf58d0080
+#define FSTV0910_P1_TSISCR_NEWDATA 0xf58d0040
+#define FSTV0910_P1_TSISCR_BUFSTAT2 0xf58d003f
+
+/* P1_TSBUFSTAT1 */
+#define RSTV0910_P1_TSBUFSTAT1 0xf58e
+#define FSTV0910_P1_TSISCR_BUFSTAT1 0xf58e00ff
+
+/* P1_TSBUFSTAT0 */
+#define RSTV0910_P1_TSBUFSTAT0 0xf58f
+#define FSTV0910_P1_TSISCR_BUFSTAT0 0xf58f00ff
+
+/* P1_TSDEBUGL */
+#define RSTV0910_P1_TSDEBUGL 0xf591
+#define FSTV0910_P1_TSFIFO_ERROR_EVNT 0xf5910004
+#define FSTV0910_P1_TSFIFO_OVERFLOWM 0xf5910001
+
+/* P1_TSDLYSET2 */
+#define RSTV0910_P1_TSDLYSET2 0xf592
+#define FSTV0910_P1_SOFFIFO_OFFSET 0xf59200c0
+#define FSTV0910_P1_HYSTERESIS_THRESHOLD 0xf5920030
+#define FSTV0910_P1_SOFFIFO_SYMBOFFS2 0xf592000f
+
+/* P1_TSDLYSET1 */
+#define RSTV0910_P1_TSDLYSET1 0xf593
+#define FSTV0910_P1_SOFFIFO_SYMBOFFS1 0xf59300ff
+
+/* P1_TSDLYSET0 */
+#define RSTV0910_P1_TSDLYSET0 0xf594
+#define FSTV0910_P1_SOFFIFO_SYMBOFFS0 0xf59400ff
+
+/* P1_ERRCTRL1 */
+#define RSTV0910_P1_ERRCTRL1 0xf598
+#define FSTV0910_P1_ERR_SOURCE1 0xf59800f0
+#define FSTV0910_P1_NUM_EVENT1 0xf5980007
+
+/* P1_ERRCNT12 */
+#define RSTV0910_P1_ERRCNT12 0xf599
+#define FSTV0910_P1_ERRCNT1_OLDVALUE 0xf5990080
+#define FSTV0910_P1_ERR_CNT12 0xf599007f
+
+/* P1_ERRCNT11 */
+#define RSTV0910_P1_ERRCNT11 0xf59a
+#define FSTV0910_P1_ERR_CNT11 0xf59a00ff
+
+/* P1_ERRCNT10 */
+#define RSTV0910_P1_ERRCNT10 0xf59b
+#define FSTV0910_P1_ERR_CNT10 0xf59b00ff
+
+/* P1_ERRCTRL2 */
+#define RSTV0910_P1_ERRCTRL2 0xf59c
+#define FSTV0910_P1_ERR_SOURCE2 0xf59c00f0
+#define FSTV0910_P1_NUM_EVENT2 0xf59c0007
+
+/* P1_ERRCNT22 */
+#define RSTV0910_P1_ERRCNT22 0xf59d
+#define FSTV0910_P1_ERRCNT2_OLDVALUE 0xf59d0080
+#define FSTV0910_P1_ERR_CNT22 0xf59d007f
+
+/* P1_ERRCNT21 */
+#define RSTV0910_P1_ERRCNT21 0xf59e
+#define FSTV0910_P1_ERR_CNT21 0xf59e00ff
+
+/* P1_ERRCNT20 */
+#define RSTV0910_P1_ERRCNT20 0xf59f
+#define FSTV0910_P1_ERR_CNT20 0xf59f00ff
+
+/* P1_FECSPY */
+#define RSTV0910_P1_FECSPY 0xf5a0
+#define FSTV0910_P1_SPY_ENABLE 0xf5a00080
+#define FSTV0910_P1_NO_SYNCBYTE 0xf5a00040
+#define FSTV0910_P1_SERIAL_MODE 0xf5a00020
+#define FSTV0910_P1_UNUSUAL_PACKET 0xf5a00010
+#define FSTV0910_P1_BERMETER_DATAMODE 0xf5a0000c
+#define FSTV0910_P1_BERMETER_LMODE 0xf5a00002
+#define FSTV0910_P1_BERMETER_RESET 0xf5a00001
+
+/* P1_FSPYCFG */
+#define RSTV0910_P1_FSPYCFG 0xf5a1
+#define FSTV0910_P1_FECSPY_INPUT 0xf5a100c0
+#define FSTV0910_P1_RST_ON_ERROR 0xf5a10020
+#define FSTV0910_P1_ONE_SHOT 0xf5a10010
+#define FSTV0910_P1_I2C_MODE 0xf5a1000c
+#define FSTV0910_P1_SPY_HYSTERESIS 0xf5a10003
+
+/* P1_FSPYDATA */
+#define RSTV0910_P1_FSPYDATA 0xf5a2
+#define FSTV0910_P1_SPY_STUFFING 0xf5a20080
+#define FSTV0910_P1_SPY_CNULLPKT 0xf5a20020
+#define FSTV0910_P1_SPY_OUTDATA_MODE 0xf5a2001f
+
+/* P1_FSPYOUT */
+#define RSTV0910_P1_FSPYOUT 0xf5a3
+#define FSTV0910_P1_FSPY_DIRECT 0xf5a30080
+#define FSTV0910_P1_STUFF_MODE 0xf5a30007
+
+/* P1_FSTATUS */
+#define RSTV0910_P1_FSTATUS 0xf5a4
+#define FSTV0910_P1_SPY_ENDSIM 0xf5a40080
+#define FSTV0910_P1_VALID_SIM 0xf5a40040
+#define FSTV0910_P1_FOUND_SIGNAL 0xf5a40020
+#define FSTV0910_P1_DSS_SYNCBYTE 0xf5a40010
+#define FSTV0910_P1_RESULT_STATE 0xf5a4000f
+
+/* P1_FBERCPT4 */
+#define RSTV0910_P1_FBERCPT4 0xf5a8
+#define FSTV0910_P1_FBERMETER_CPT4 0xf5a800ff
+
+/* P1_FBERCPT3 */
+#define RSTV0910_P1_FBERCPT3 0xf5a9
+#define FSTV0910_P1_FBERMETER_CPT3 0xf5a900ff
+
+/* P1_FBERCPT2 */
+#define RSTV0910_P1_FBERCPT2 0xf5aa
+#define FSTV0910_P1_FBERMETER_CPT2 0xf5aa00ff
+
+/* P1_FBERCPT1 */
+#define RSTV0910_P1_FBERCPT1 0xf5ab
+#define FSTV0910_P1_FBERMETER_CPT1 0xf5ab00ff
+
+/* P1_FBERCPT0 */
+#define RSTV0910_P1_FBERCPT0 0xf5ac
+#define FSTV0910_P1_FBERMETER_CPT0 0xf5ac00ff
+
+/* P1_FBERERR2 */
+#define RSTV0910_P1_FBERERR2 0xf5ad
+#define FSTV0910_P1_FBERMETER_ERR2 0xf5ad00ff
+
+/* P1_FBERERR1 */
+#define RSTV0910_P1_FBERERR1 0xf5ae
+#define FSTV0910_P1_FBERMETER_ERR1 0xf5ae00ff
+
+/* P1_FBERERR0 */
+#define RSTV0910_P1_FBERERR0 0xf5af
+#define FSTV0910_P1_FBERMETER_ERR0 0xf5af00ff
+
+/* P1_FSPYBER */
+#define RSTV0910_P1_FSPYBER 0xf5b2
+#define FSTV0910_P1_FSPYBER_SYNCBYTE 0xf5b20010
+#define FSTV0910_P1_FSPYBER_UNSYNC 0xf5b20008
+#define FSTV0910_P1_FSPYBER_CTIME 0xf5b20007
+
+/* P1_SFERROR */
+#define RSTV0910_P1_SFERROR 0xf5c1
+#define FSTV0910_P1_SFEC_REGERR_VIT 0xf5c100ff
+
+/* P1_SFECSTATUS */
+#define RSTV0910_P1_SFECSTATUS 0xf5c3
+#define FSTV0910_P1_SFEC_ON 0xf5c30080
+#define FSTV0910_P1_SFEC_OFF 0xf5c30040
+#define FSTV0910_P1_LOCKEDSFEC 0xf5c30008
+#define FSTV0910_P1_SFEC_DELOCK 0xf5c30004
+#define FSTV0910_P1_SFEC_DEMODSEL 0xf5c30002
+#define FSTV0910_P1_SFEC_OVFON 0xf5c30001
+
+/* P1_SFKDIV12 */
+#define RSTV0910_P1_SFKDIV12 0xf5c4
+#define FSTV0910_P1_SFECKDIV12_MAN 0xf5c40080
+
+/* P1_SFKDIV23 */
+#define RSTV0910_P1_SFKDIV23 0xf5c5
+#define FSTV0910_P1_SFECKDIV23_MAN 0xf5c50080
+
+/* P1_SFKDIV34 */
+#define RSTV0910_P1_SFKDIV34 0xf5c6
+#define FSTV0910_P1_SFECKDIV34_MAN 0xf5c60080
+
+/* P1_SFKDIV56 */
+#define RSTV0910_P1_SFKDIV56 0xf5c7
+#define FSTV0910_P1_SFECKDIV56_MAN 0xf5c70080
+
+/* P1_SFKDIV67 */
+#define RSTV0910_P1_SFKDIV67 0xf5c8
+#define FSTV0910_P1_SFECKDIV67_MAN 0xf5c80080
+
+/* P1_SFKDIV78 */
+#define RSTV0910_P1_SFKDIV78 0xf5c9
+#define FSTV0910_P1_SFECKDIV78_MAN 0xf5c90080
+
+/* P1_SFSTATUS */
+#define RSTV0910_P1_SFSTATUS 0xf5cc
+#define FSTV0910_P1_SFEC_LINEOK 0xf5cc0080
+#define FSTV0910_P1_SFEC_ERROR 0xf5cc0040
+#define FSTV0910_P1_SFEC_DATA7 0xf5cc0020
+#define FSTV0910_P1_SFEC_PKTDNBRFAIL 0xf5cc0010
+#define FSTV0910_P1_TSSFEC_DEMODSEL 0xf5cc0008
+#define FSTV0910_P1_SFEC_NOSYNC 0xf5cc0004
+#define FSTV0910_P1_SFEC_UNREGULA 0xf5cc0002
+#define FSTV0910_P1_SFEC_READY 0xf5cc0001
+
+/* P1_SFDLYSET2 */
+#define RSTV0910_P1_SFDLYSET2 0xf5d0
+#define FSTV0910_P1_SFEC_DISABLE 0xf5d00002
+
+/* P1_SFERRCTRL */
+#define RSTV0910_P1_SFERRCTRL 0xf5d8
+#define FSTV0910_P1_SFEC_ERR_SOURCE 0xf5d800f0
+#define FSTV0910_P1_SFEC_NUM_EVENT 0xf5d80007
+
+/* P1_SFERRCNT2 */
+#define RSTV0910_P1_SFERRCNT2 0xf5d9
+#define FSTV0910_P1_SFERRC_OLDVALUE 0xf5d90080
+#define FSTV0910_P1_SFEC_ERR_CNT2 0xf5d9007f
+
+/* P1_SFERRCNT1 */
+#define RSTV0910_P1_SFERRCNT1 0xf5da
+#define FSTV0910_P1_SFEC_ERR_CNT1 0xf5da00ff
+
+/* P1_SFERRCNT0 */
+#define RSTV0910_P1_SFERRCNT0 0xf5db
+#define FSTV0910_P1_SFEC_ERR_CNT0 0xf5db00ff
+
+/* RCCFG2 */
+#define RSTV0910_RCCFG2 0xf600
+#define FSTV0910_TSRCFIFO_DVBCI 0xf6000080
+#define FSTV0910_TSRCFIFO_SERIAL 0xf6000040
+#define FSTV0910_TSRCFIFO_DISABLE 0xf6000020
+#define FSTV0910_TSFIFO_2TORC 0xf6000010
+#define FSTV0910_TSRCFIFO_HSGNLOUT 0xf6000008
+#define FSTV0910_TSRCFIFO_ERRMODE 0xf6000006
+
+/* RCCFG1 */
+#define RSTV0910_RCCFG1 0xf601
+#define FSTV0910_TSRCFIFO_MANSPEED 0xf60100c0
+#define FSTV0910_TSRCFIFO_PERMDATA 0xf6010020
+#define FSTV0910_TSRCFIFO_NONEWSGNL 0xf6010010
+#define FSTV0910_TSRCFIFO_INVDATA 0xf6010001
+
+/* RCCFG0 */
+#define RSTV0910_RCCFG0 0xf602
+#define FSTV0910_TSRCFIFO_BCLKDEL1CK 0xf60200c0
+#define FSTV0910_TSRCFIFO_DUTY50 0xf6020010
+#define FSTV0910_TSRCFIFO_NSGNL2DATA 0xf6020008
+#define FSTV0910_TSRCFIFO_NPDSGNL 0xf6020004
+
+/* RCINSDEL2 */
+#define RSTV0910_RCINSDEL2 0xf603
+#define FSTV0910_TSRCDEL_SYNCBYTE 0xf6030080
+#define FSTV0910_TSRCDEL_XXHEADER 0xf6030040
+#define FSTV0910_TSRCDEL_BBHEADER 0xf6030020
+#define FSTV0910_TSRCDEL_DATAFIELD 0xf6030010
+#define FSTV0910_TSRCINSDEL_ISCR 0xf6030008
+#define FSTV0910_TSRCINSDEL_NPD 0xf6030004
+#define FSTV0910_TSRCINSDEL_RSPARITY 0xf6030002
+#define FSTV0910_TSRCINSDEL_CRC8 0xf6030001
+
+/* RCINSDEL1 */
+#define RSTV0910_RCINSDEL1 0xf604
+#define FSTV0910_TSRCINS_BBPADDING 0xf6040080
+#define FSTV0910_TSRCINS_BCHFEC 0xf6040040
+#define FSTV0910_TSRCINS_EMODCOD 0xf6040010
+#define FSTV0910_TSRCINS_TOKEN 0xf6040008
+#define FSTV0910_TSRCINS_XXXERR 0xf6040004
+#define FSTV0910_TSRCINS_MATYPE 0xf6040002
+#define FSTV0910_TSRCINS_UPL 0xf6040001
+
+/* RCINSDEL0 */
+#define RSTV0910_RCINSDEL0 0xf605
+#define FSTV0910_TSRCINS_DFL 0xf6050080
+#define FSTV0910_TSRCINS_SYNCD 0xf6050040
+#define FSTV0910_TSRCINS_BLOCLEN 0xf6050020
+#define FSTV0910_TSRCINS_SIGPCOUNT 0xf6050010
+#define FSTV0910_TSRCINS_FIFO 0xf6050008
+#define FSTV0910_TSRCINS_REALPACK 0xf6050004
+#define FSTV0910_TSRCINS_TSCONFIG 0xf6050002
+#define FSTV0910_TSRCINS_LATENCY 0xf6050001
+
+/* RCSTATUS */
+#define RSTV0910_RCSTATUS 0xf606
+#define FSTV0910_TSRCFIFO_LINEOK 0xf6060080
+#define FSTV0910_TSRCFIFO_ERROR 0xf6060040
+#define FSTV0910_TSRCREGUL_ERROR 0xf6060010
+#define FSTV0910_TSRCFIFO_DEMODSEL 0xf6060008
+#define FSTV0910_TSRCFIFOSPEED_STORE 0xf6060004
+#define FSTV0910_TSRCSPEED_IMPOSSIBLE 0xf6060001
+
+/* RCSPEED */
+#define RSTV0910_RCSPEED 0xf607
+#define FSTV0910_TSRCFIFO_OUTSPEED 0xf60700ff
+
+/* TSGENERAL */
+#define RSTV0910_TSGENERAL 0xf630
+#define FSTV0910_TSFIFO_DISTS2PAR 0xf6300040
+#define FSTV0910_MUXSTREAM_OUTMODE 0xf6300008
+#define FSTV0910_TSFIFO_PERMPARAL 0xf6300006
+
+/* P1_DISIRQCFG */
+#define RSTV0910_P1_DISIRQCFG 0xf700
+#define FSTV0910_P1_ENRXEND 0xf7000040
+#define FSTV0910_P1_ENRXFIFO8B 0xf7000020
+#define FSTV0910_P1_ENTRFINISH 0xf7000010
+#define FSTV0910_P1_ENTIMEOUT 0xf7000008
+#define FSTV0910_P1_ENTXEND 0xf7000004
+#define FSTV0910_P1_ENTXFIFO64B 0xf7000002
+#define FSTV0910_P1_ENGAPBURST 0xf7000001
+
+/* P1_DISIRQSTAT */
+#define RSTV0910_P1_DISIRQSTAT 0xf701
+#define FSTV0910_P1_IRQRXEND 0xf7010040
+#define FSTV0910_P1_IRQRXFIFO8B 0xf7010020
+#define FSTV0910_P1_IRQTRFINISH 0xf7010010
+#define FSTV0910_P1_IRQTIMEOUT 0xf7010008
+#define FSTV0910_P1_IRQTXEND 0xf7010004
+#define FSTV0910_P1_IRQTXFIFO64B 0xf7010002
+#define FSTV0910_P1_IRQGAPBURST 0xf7010001
+
+/* P1_DISTXCFG */
+#define RSTV0910_P1_DISTXCFG 0xf702
+#define FSTV0910_P1_DISTX_RESET 0xf7020080
+#define FSTV0910_P1_TIM_OFF 0xf7020040
+#define FSTV0910_P1_TIM_CMD 0xf7020030
+#define FSTV0910_P1_ENVELOP 0xf7020008
+#define FSTV0910_P1_DIS_PRECHARGE 0xf7020004
+#define FSTV0910_P1_DISEQC_MODE 0xf7020003
+
+/* P1_DISTXSTATUS */
+#define RSTV0910_P1_DISTXSTATUS 0xf703
+#define FSTV0910_P1_TX_FIFO_FULL 0xf7030040
+#define FSTV0910_P1_TX_IDLE 0xf7030020
+#define FSTV0910_P1_GAP_BURST 0xf7030010
+#define FSTV0910_P1_TX_FIFO64B 0xf7030008
+#define FSTV0910_P1_TX_END 0xf7030004
+#define FSTV0910_P1_TR_TIMEOUT 0xf7030002
+#define FSTV0910_P1_TR_FINISH 0xf7030001
+
+/* P1_DISTXBYTES */
+#define RSTV0910_P1_DISTXBYTES 0xf704
+#define FSTV0910_P1_TXFIFO_BYTES 0xf70400ff
+
+/* P1_DISTXFIFO */
+#define RSTV0910_P1_DISTXFIFO 0xf705
+#define FSTV0910_P1_DISEQC_TX_FIFO 0xf70500ff
+
+/* P1_DISTXF22 */
+#define RSTV0910_P1_DISTXF22 0xf706
+#define FSTV0910_P1_F22TX 0xf70600ff
+
+/* P1_DISTIMEOCFG */
+#define RSTV0910_P1_DISTIMEOCFG 0xf708
+#define FSTV0910_P1_RXCHOICE 0xf7080006
+#define FSTV0910_P1_TIMEOUT_OFF 0xf7080001
+
+/* P1_DISTIMEOUT */
+#define RSTV0910_P1_DISTIMEOUT 0xf709
+#define FSTV0910_P1_TIMEOUT_COUNT 0xf70900ff
+
+/* P1_DISRXCFG */
+#define RSTV0910_P1_DISRXCFG 0xf70a
+#define FSTV0910_P1_DISRX_RESET 0xf70a0080
+#define FSTV0910_P1_EXTENVELOP 0xf70a0040
+#define FSTV0910_P1_PINSELECT 0xf70a0038
+#define FSTV0910_P1_IGNORE_SHORT22K 0xf70a0004
+#define FSTV0910_P1_SIGNED_RXIN 0xf70a0002
+#define FSTV0910_P1_DISRX_ON 0xf70a0001
+
+/* P1_DISRXSTAT1 */
+#define RSTV0910_P1_DISRXSTAT1 0xf70b
+#define FSTV0910_P1_RXEND 0xf70b0080
+#define FSTV0910_P1_RXACTIVE 0xf70b0040
+#define FSTV0910_P1_RXDETECT 0xf70b0020
+#define FSTV0910_P1_CONTTONE 0xf70b0010
+#define FSTV0910_P1_8BFIFOREADY 0xf70b0008
+#define FSTV0910_P1_FIFOEMPTY 0xf70b0004
+
+/* P1_DISRXSTAT0 */
+#define RSTV0910_P1_DISRXSTAT0 0xf70c
+#define FSTV0910_P1_RXFAIL 0xf70c0080
+#define FSTV0910_P1_FIFOPFAIL 0xf70c0040
+#define FSTV0910_P1_RXNONBYTE 0xf70c0020
+#define FSTV0910_P1_FIFOOVF 0xf70c0010
+#define FSTV0910_P1_SHORT22K 0xf70c0008
+#define FSTV0910_P1_RXMSGLOST 0xf70c0004
+
+/* P1_DISRXBYTES */
+#define RSTV0910_P1_DISRXBYTES 0xf70d
+#define FSTV0910_P1_RXFIFO_BYTES 0xf70d001f
+
+/* P1_DISRXPARITY1 */
+#define RSTV0910_P1_DISRXPARITY1 0xf70e
+#define FSTV0910_P1_DISRX_PARITY1 0xf70e00ff
+
+/* P1_DISRXPARITY0 */
+#define RSTV0910_P1_DISRXPARITY0 0xf70f
+#define FSTV0910_P1_DISRX_PARITY0 0xf70f00ff
+
+/* P1_DISRXFIFO */
+#define RSTV0910_P1_DISRXFIFO 0xf710
+#define FSTV0910_P1_DISEQC_RX_FIFO 0xf71000ff
+
+/* P1_DISRXDC1 */
+#define RSTV0910_P1_DISRXDC1 0xf711
+#define FSTV0910_P1_DC_VALUE1 0xf7110103
+
+/* P1_DISRXDC0 */
+#define RSTV0910_P1_DISRXDC0 0xf712
+#define FSTV0910_P1_DC_VALUE0 0xf71200ff
+
+/* P1_DISRXF221 */
+#define RSTV0910_P1_DISRXF221 0xf714
+#define FSTV0910_P1_F22RX1 0xf714000f
+
+/* P1_DISRXF220 */
+#define RSTV0910_P1_DISRXF220 0xf715
+#define FSTV0910_P1_F22RX0 0xf71500ff
+
+/* P1_DISRXF100 */
+#define RSTV0910_P1_DISRXF100 0xf716
+#define FSTV0910_P1_F100RX 0xf71600ff
+
+/* P1_DISRXSHORT22K */
+#define RSTV0910_P1_DISRXSHORT22K 0xf71c
+#define FSTV0910_P1_SHORT22K_LENGTH 0xf71c001f
+
+/* P1_ACRPRESC */
+#define RSTV0910_P1_ACRPRESC 0xf71e
+#define FSTV0910_P1_ACR_PRESC 0xf71e0007
+
+/* P1_ACRDIV */
+#define RSTV0910_P1_ACRDIV 0xf71f
+#define FSTV0910_P1_ACR_DIV 0xf71f00ff
+
+/* P2_DISIRQCFG */
+#define RSTV0910_P2_DISIRQCFG 0xf740
+#define FSTV0910_P2_ENRXEND 0xf7400040
+#define FSTV0910_P2_ENRXFIFO8B 0xf7400020
+#define FSTV0910_P2_ENTRFINISH 0xf7400010
+#define FSTV0910_P2_ENTIMEOUT 0xf7400008
+#define FSTV0910_P2_ENTXEND 0xf7400004
+#define FSTV0910_P2_ENTXFIFO64B 0xf7400002
+#define FSTV0910_P2_ENGAPBURST 0xf7400001
+
+/* P2_DISIRQSTAT */
+#define RSTV0910_P2_DISIRQSTAT 0xf741
+#define FSTV0910_P2_IRQRXEND 0xf7410040
+#define FSTV0910_P2_IRQRXFIFO8B 0xf7410020
+#define FSTV0910_P2_IRQTRFINISH 0xf7410010
+#define FSTV0910_P2_IRQTIMEOUT 0xf7410008
+#define FSTV0910_P2_IRQTXEND 0xf7410004
+#define FSTV0910_P2_IRQTXFIFO64B 0xf7410002
+#define FSTV0910_P2_IRQGAPBURST 0xf7410001
+
+/* P2_DISTXCFG */
+#define RSTV0910_P2_DISTXCFG 0xf742
+#define FSTV0910_P2_DISTX_RESET 0xf7420080
+#define FSTV0910_P2_TIM_OFF 0xf7420040
+#define FSTV0910_P2_TIM_CMD 0xf7420030
+#define FSTV0910_P2_ENVELOP 0xf7420008
+#define FSTV0910_P2_DIS_PRECHARGE 0xf7420004
+#define FSTV0910_P2_DISEQC_MODE 0xf7420003
+
+/* P2_DISTXSTATUS */
+#define RSTV0910_P2_DISTXSTATUS 0xf743
+#define FSTV0910_P2_TX_FIFO_FULL 0xf7430040
+#define FSTV0910_P2_TX_IDLE 0xf7430020
+#define FSTV0910_P2_GAP_BURST 0xf7430010
+#define FSTV0910_P2_TX_FIFO64B 0xf7430008
+#define FSTV0910_P2_TX_END 0xf7430004
+#define FSTV0910_P2_TR_TIMEOUT 0xf7430002
+#define FSTV0910_P2_TR_FINISH 0xf7430001
+
+/* P2_DISTXBYTES */
+#define RSTV0910_P2_DISTXBYTES 0xf744
+#define FSTV0910_P2_TXFIFO_BYTES 0xf74400ff
+
+/* P2_DISTXFIFO */
+#define RSTV0910_P2_DISTXFIFO 0xf745
+#define FSTV0910_P2_DISEQC_TX_FIFO 0xf74500ff
+
+/* P2_DISTXF22 */
+#define RSTV0910_P2_DISTXF22 0xf746
+#define FSTV0910_P2_F22TX 0xf74600ff
+
+/* P2_DISTIMEOCFG */
+#define RSTV0910_P2_DISTIMEOCFG 0xf748
+#define FSTV0910_P2_RXCHOICE 0xf7480006
+#define FSTV0910_P2_TIMEOUT_OFF 0xf7480001
+
+/* P2_DISTIMEOUT */
+#define RSTV0910_P2_DISTIMEOUT 0xf749
+#define FSTV0910_P2_TIMEOUT_COUNT 0xf74900ff
+
+/* P2_DISRXCFG */
+#define RSTV0910_P2_DISRXCFG 0xf74a
+#define FSTV0910_P2_DISRX_RESET 0xf74a0080
+#define FSTV0910_P2_EXTENVELOP 0xf74a0040
+#define FSTV0910_P2_PINSELECT 0xf74a0038
+#define FSTV0910_P2_IGNORE_SHORT22K 0xf74a0004
+#define FSTV0910_P2_SIGNED_RXIN 0xf74a0002
+#define FSTV0910_P2_DISRX_ON 0xf74a0001
+
+/* P2_DISRXSTAT1 */
+#define RSTV0910_P2_DISRXSTAT1 0xf74b
+#define FSTV0910_P2_RXEND 0xf74b0080
+#define FSTV0910_P2_RXACTIVE 0xf74b0040
+#define FSTV0910_P2_RXDETECT 0xf74b0020
+#define FSTV0910_P2_CONTTONE 0xf74b0010
+#define FSTV0910_P2_8BFIFOREADY 0xf74b0008
+#define FSTV0910_P2_FIFOEMPTY 0xf74b0004
+
+/* P2_DISRXSTAT0 */
+#define RSTV0910_P2_DISRXSTAT0 0xf74c
+#define FSTV0910_P2_RXFAIL 0xf74c0080
+#define FSTV0910_P2_FIFOPFAIL 0xf74c0040
+#define FSTV0910_P2_RXNONBYTE 0xf74c0020
+#define FSTV0910_P2_FIFOOVF 0xf74c0010
+#define FSTV0910_P2_SHORT22K 0xf74c0008
+#define FSTV0910_P2_RXMSGLOST 0xf74c0004
+
+/* P2_DISRXBYTES */
+#define RSTV0910_P2_DISRXBYTES 0xf74d
+#define FSTV0910_P2_RXFIFO_BYTES 0xf74d001f
+
+/* P2_DISRXPARITY1 */
+#define RSTV0910_P2_DISRXPARITY1 0xf74e
+#define FSTV0910_P2_DISRX_PARITY1 0xf74e00ff
+
+/* P2_DISRXPARITY0 */
+#define RSTV0910_P2_DISRXPARITY0 0xf74f
+#define FSTV0910_P2_DISRX_PARITY0 0xf74f00ff
+
+/* P2_DISRXFIFO */
+#define RSTV0910_P2_DISRXFIFO 0xf750
+#define FSTV0910_P2_DISEQC_RX_FIFO 0xf75000ff
+
+/* P2_DISRXDC1 */
+#define RSTV0910_P2_DISRXDC1 0xf751
+#define FSTV0910_P2_DC_VALUE1 0xf7510103
+
+/* P2_DISRXDC0 */
+#define RSTV0910_P2_DISRXDC0 0xf752
+#define FSTV0910_P2_DC_VALUE0 0xf75200ff
+
+/* P2_DISRXF221 */
+#define RSTV0910_P2_DISRXF221 0xf754
+#define FSTV0910_P2_F22RX1 0xf754000f
+
+/* P2_DISRXF220 */
+#define RSTV0910_P2_DISRXF220 0xf755
+#define FSTV0910_P2_F22RX0 0xf75500ff
+
+/* P2_DISRXF100 */
+#define RSTV0910_P2_DISRXF100 0xf756
+#define FSTV0910_P2_F100RX 0xf75600ff
+
+/* P2_DISRXSHORT22K */
+#define RSTV0910_P2_DISRXSHORT22K 0xf75c
+#define FSTV0910_P2_SHORT22K_LENGTH 0xf75c001f
+
+/* P2_ACRPRESC */
+#define RSTV0910_P2_ACRPRESC 0xf75e
+#define FSTV0910_P2_ACR_PRESC 0xf75e0007
+
+/* P2_ACRDIV */
+#define RSTV0910_P2_ACRDIV 0xf75f
+#define FSTV0910_P2_ACR_DIV 0xf75f00ff
+
+/* P1_NBITER_NF1 */
+#define RSTV0910_P1_NBITER_NF1 0xfa00
+#define FSTV0910_P1_NBITER_NF_QPSK_1_4 0xfa0000ff
+
+/* P1_NBITER_NF2 */
+#define RSTV0910_P1_NBITER_NF2 0xfa01
+#define FSTV0910_P1_NBITER_NF_QPSK_1_3 0xfa0100ff
+
+/* P1_NBITER_NF3 */
+#define RSTV0910_P1_NBITER_NF3 0xfa02
+#define FSTV0910_P1_NBITER_NF_QPSK_2_5 0xfa0200ff
+
+/* P1_NBITER_NF4 */
+#define RSTV0910_P1_NBITER_NF4 0xfa03
+#define FSTV0910_P1_NBITER_NF_QPSK_1_2 0xfa0300ff
+
+/* P1_NBITER_NF5 */
+#define RSTV0910_P1_NBITER_NF5 0xfa04
+#define FSTV0910_P1_NBITER_NF_QPSK_3_5 0xfa0400ff
+
+/* P1_NBITER_NF6 */
+#define RSTV0910_P1_NBITER_NF6 0xfa05
+#define FSTV0910_P1_NBITER_NF_QPSK_2_3 0xfa0500ff
+
+/* P1_NBITER_NF7 */
+#define RSTV0910_P1_NBITER_NF7 0xfa06
+#define FSTV0910_P1_NBITER_NF_QPSK_3_4 0xfa0600ff
+
+/* P1_NBITER_NF8 */
+#define RSTV0910_P1_NBITER_NF8 0xfa07
+#define FSTV0910_P1_NBITER_NF_QPSK_4_5 0xfa0700ff
+
+/* P1_NBITER_NF9 */
+#define RSTV0910_P1_NBITER_NF9 0xfa08
+#define FSTV0910_P1_NBITER_NF_QPSK_5_6 0xfa0800ff
+
+/* P1_NBITER_NF10 */
+#define RSTV0910_P1_NBITER_NF10 0xfa09
+#define FSTV0910_P1_NBITER_NF_QPSK_8_9 0xfa0900ff
+
+/* P1_NBITER_NF11 */
+#define RSTV0910_P1_NBITER_NF11 0xfa0a
+#define FSTV0910_P1_NBITER_NF_QPSK_9_10 0xfa0a00ff
+
+/* P1_NBITER_NF12 */
+#define RSTV0910_P1_NBITER_NF12 0xfa0b
+#define FSTV0910_P1_NBITER_NF_8PSK_3_5 0xfa0b00ff
+
+/* P1_NBITER_NF13 */
+#define RSTV0910_P1_NBITER_NF13 0xfa0c
+#define FSTV0910_P1_NBITER_NF_8PSK_2_3 0xfa0c00ff
+
+/* P1_NBITER_NF14 */
+#define RSTV0910_P1_NBITER_NF14 0xfa0d
+#define FSTV0910_P1_NBITER_NF_8PSK_3_4 0xfa0d00ff
+
+/* P1_NBITER_NF15 */
+#define RSTV0910_P1_NBITER_NF15 0xfa0e
+#define FSTV0910_P1_NBITER_NF_8PSK_5_6 0xfa0e00ff
+
+/* P1_NBITER_NF16 */
+#define RSTV0910_P1_NBITER_NF16 0xfa0f
+#define FSTV0910_P1_NBITER_NF_8PSK_8_9 0xfa0f00ff
+
+/* P1_NBITER_NF17 */
+#define RSTV0910_P1_NBITER_NF17 0xfa10
+#define FSTV0910_P1_NBITER_NF_8PSK_9_10 0xfa1000ff
+
+/* P1_NBITER_NF18 */
+#define RSTV0910_P1_NBITER_NF18 0xfa11
+#define FSTV0910_P1_NBITER_NF_16APSK_2_3 0xfa1100ff
+
+/* P1_NBITER_NF19 */
+#define RSTV0910_P1_NBITER_NF19 0xfa12
+#define FSTV0910_P1_NBITER_NF_16APSK_3_4 0xfa1200ff
+
+/* P1_NBITER_NF20 */
+#define RSTV0910_P1_NBITER_NF20 0xfa13
+#define FSTV0910_P1_NBITER_NF_16APSK_4_5 0xfa1300ff
+
+/* P1_NBITER_NF21 */
+#define RSTV0910_P1_NBITER_NF21 0xfa14
+#define FSTV0910_P1_NBITER_NF_16APSK_5_6 0xfa1400ff
+
+/* P1_NBITER_NF22 */
+#define RSTV0910_P1_NBITER_NF22 0xfa15
+#define FSTV0910_P1_NBITER_NF_16APSK_8_9 0xfa1500ff
+
+/* P1_NBITER_NF23 */
+#define RSTV0910_P1_NBITER_NF23 0xfa16
+#define FSTV0910_P1_NBITER_NF_16APSK_9_10 0xfa1600ff
+
+/* P1_NBITER_NF24 */
+#define RSTV0910_P1_NBITER_NF24 0xfa17
+#define FSTV0910_P1_NBITER_NF_32APSK_3_4 0xfa1700ff
+
+/* P1_NBITER_NF25 */
+#define RSTV0910_P1_NBITER_NF25 0xfa18
+#define FSTV0910_P1_NBITER_NF_32APSK_4_5 0xfa1800ff
+
+/* P1_NBITER_NF26 */
+#define RSTV0910_P1_NBITER_NF26 0xfa19
+#define FSTV0910_P1_NBITER_NF_32APSK_5_6 0xfa1900ff
+
+/* P1_NBITER_NF27 */
+#define RSTV0910_P1_NBITER_NF27 0xfa1a
+#define FSTV0910_P1_NBITER_NF_32APSK_8_9 0xfa1a00ff
+
+/* P1_NBITER_NF28 */
+#define RSTV0910_P1_NBITER_NF28 0xfa1b
+#define FSTV0910_P1_NBITER_NF_32APSK_9_10 0xfa1b00ff
+
+/* P1_NBITER_SF1 */
+#define RSTV0910_P1_NBITER_SF1 0xfa1c
+#define FSTV0910_P1_NBITER_SF_QPSK_1_4 0xfa1c00ff
+
+/* P1_NBITER_SF2 */
+#define RSTV0910_P1_NBITER_SF2 0xfa1d
+#define FSTV0910_P1_NBITER_SF_QPSK_1_3 0xfa1d00ff
+
+/* P1_NBITER_SF3 */
+#define RSTV0910_P1_NBITER_SF3 0xfa1e
+#define FSTV0910_P1_NBITER_SF_QPSK_2_5 0xfa1e00ff
+
+/* P1_NBITER_SF4 */
+#define RSTV0910_P1_NBITER_SF4 0xfa1f
+#define FSTV0910_P1_NBITER_SF_QPSK_1_2 0xfa1f00ff
+
+/* P1_NBITER_SF5 */
+#define RSTV0910_P1_NBITER_SF5 0xfa20
+#define FSTV0910_P1_NBITER_SF_QPSK_3_5 0xfa2000ff
+
+/* P1_NBITER_SF6 */
+#define RSTV0910_P1_NBITER_SF6 0xfa21
+#define FSTV0910_P1_NBITER_SF_QPSK_2_3 0xfa2100ff
+
+/* P1_NBITER_SF7 */
+#define RSTV0910_P1_NBITER_SF7 0xfa22
+#define FSTV0910_P1_NBITER_SF_QPSK_3_4 0xfa2200ff
+
+/* P1_NBITER_SF8 */
+#define RSTV0910_P1_NBITER_SF8 0xfa23
+#define FSTV0910_P1_NBITER_SF_QPSK_4_5 0xfa2300ff
+
+/* P1_NBITER_SF9 */
+#define RSTV0910_P1_NBITER_SF9 0xfa24
+#define FSTV0910_P1_NBITER_SF_QPSK_5_6 0xfa2400ff
+
+/* P1_NBITER_SF10 */
+#define RSTV0910_P1_NBITER_SF10 0xfa25
+#define FSTV0910_P1_NBITER_SF_QPSK_8_9 0xfa2500ff
+
+/* P1_NBITER_SF12 */
+#define RSTV0910_P1_NBITER_SF12 0xfa26
+#define FSTV0910_P1_NBITER_SF_8PSK_3_5 0xfa2600ff
+
+/* P1_NBITER_SF13 */
+#define RSTV0910_P1_NBITER_SF13 0xfa27
+#define FSTV0910_P1_NBITER_SF_8PSK_2_3 0xfa2700ff
+
+/* P1_NBITER_SF14 */
+#define RSTV0910_P1_NBITER_SF14 0xfa28
+#define FSTV0910_P1_NBITER_SF_8PSK_3_4 0xfa2800ff
+
+/* P1_NBITER_SF15 */
+#define RSTV0910_P1_NBITER_SF15 0xfa29
+#define FSTV0910_P1_NBITER_SF_8PSK_5_6 0xfa2900ff
+
+/* P1_NBITER_SF16 */
+#define RSTV0910_P1_NBITER_SF16 0xfa2a
+#define FSTV0910_P1_NBITER_SF_8PSK_8_9 0xfa2a00ff
+
+/* P1_NBITER_SF18 */
+#define RSTV0910_P1_NBITER_SF18 0xfa2b
+#define FSTV0910_P1_NBITER_SF_16APSK_2_3 0xfa2b00ff
+
+/* P1_NBITER_SF19 */
+#define RSTV0910_P1_NBITER_SF19 0xfa2c
+#define FSTV0910_P1_NBITER_SF_16APSK_3_4 0xfa2c00ff
+
+/* P1_NBITER_SF20 */
+#define RSTV0910_P1_NBITER_SF20 0xfa2d
+#define FSTV0910_P1_NBITER_SF_16APSK_4_5 0xfa2d00ff
+
+/* P1_NBITER_SF21 */
+#define RSTV0910_P1_NBITER_SF21 0xfa2e
+#define FSTV0910_P1_NBITER_SF_16APSK_5_6 0xfa2e00ff
+
+/* P1_NBITER_SF22 */
+#define RSTV0910_P1_NBITER_SF22 0xfa2f
+#define FSTV0910_P1_NBITER_SF_16APSK_8_9 0xfa2f00ff
+
+/* P1_NBITER_SF24 */
+#define RSTV0910_P1_NBITER_SF24 0xfa30
+#define FSTV0910_P1_NBITER_SF_32APSK_3_4 0xfa3000ff
+
+/* P1_NBITER_SF25 */
+#define RSTV0910_P1_NBITER_SF25 0xfa31
+#define FSTV0910_P1_NBITER_SF_32APSK_4_5 0xfa3100ff
+
+/* P1_NBITER_SF26 */
+#define RSTV0910_P1_NBITER_SF26 0xfa32
+#define FSTV0910_P1_NBITER_SF_32APSK_5_6 0xfa3200ff
+
+/* P1_NBITER_SF27 */
+#define RSTV0910_P1_NBITER_SF27 0xfa33
+#define FSTV0910_P1_NBITER_SF_32APSK_8_9 0xfa3300ff
+
+/* SELSATUR6 */
+#define RSTV0910_SELSATUR6 0xfa34
+#define FSTV0910_SSAT_SF27 0xfa340008
+#define FSTV0910_SSAT_SF26 0xfa340004
+#define FSTV0910_SSAT_SF25 0xfa340002
+#define FSTV0910_SSAT_SF24 0xfa340001
+
+/* SELSATUR5 */
+#define RSTV0910_SELSATUR5 0xfa35
+#define FSTV0910_SSAT_SF22 0xfa350080
+#define FSTV0910_SSAT_SF21 0xfa350040
+#define FSTV0910_SSAT_SF20 0xfa350020
+#define FSTV0910_SSAT_SF19 0xfa350010
+#define FSTV0910_SSAT_SF18 0xfa350008
+#define FSTV0910_SSAT_SF16 0xfa350004
+#define FSTV0910_SSAT_SF15 0xfa350002
+#define FSTV0910_SSAT_SF14 0xfa350001
+
+/* SELSATUR4 */
+#define RSTV0910_SELSATUR4 0xfa36
+#define FSTV0910_SSAT_SF13 0xfa360080
+#define FSTV0910_SSAT_SF12 0xfa360040
+#define FSTV0910_SSAT_SF10 0xfa360020
+#define FSTV0910_SSAT_SF9 0xfa360010
+#define FSTV0910_SSAT_SF8 0xfa360008
+#define FSTV0910_SSAT_SF7 0xfa360004
+#define FSTV0910_SSAT_SF6 0xfa360002
+#define FSTV0910_SSAT_SF5 0xfa360001
+
+/* SELSATUR3 */
+#define RSTV0910_SELSATUR3 0xfa37
+#define FSTV0910_SSAT_SF4 0xfa370080
+#define FSTV0910_SSAT_SF3 0xfa370040
+#define FSTV0910_SSAT_SF2 0xfa370020
+#define FSTV0910_SSAT_SF1 0xfa370010
+#define FSTV0910_SSAT_NF28 0xfa370008
+#define FSTV0910_SSAT_NF27 0xfa370004
+#define FSTV0910_SSAT_NF26 0xfa370002
+#define FSTV0910_SSAT_NF25 0xfa370001
+
+/* SELSATUR2 */
+#define RSTV0910_SELSATUR2 0xfa38
+#define FSTV0910_SSAT_NF24 0xfa380080
+#define FSTV0910_SSAT_NF23 0xfa380040
+#define FSTV0910_SSAT_NF22 0xfa380020
+#define FSTV0910_SSAT_NF21 0xfa380010
+#define FSTV0910_SSAT_NF20 0xfa380008
+#define FSTV0910_SSAT_NF19 0xfa380004
+#define FSTV0910_SSAT_NF18 0xfa380002
+#define FSTV0910_SSAT_NF17 0xfa380001
+
+/* SELSATUR1 */
+#define RSTV0910_SELSATUR1 0xfa39
+#define FSTV0910_SSAT_NF16 0xfa390080
+#define FSTV0910_SSAT_NF15 0xfa390040
+#define FSTV0910_SSAT_NF14 0xfa390020
+#define FSTV0910_SSAT_NF13 0xfa390010
+#define FSTV0910_SSAT_NF12 0xfa390008
+#define FSTV0910_SSAT_NF11 0xfa390004
+#define FSTV0910_SSAT_NF10 0xfa390002
+#define FSTV0910_SSAT_NF9 0xfa390001
+
+/* SELSATUR0 */
+#define RSTV0910_SELSATUR0 0xfa3a
+#define FSTV0910_SSAT_NF8 0xfa3a0080
+#define FSTV0910_SSAT_NF7 0xfa3a0040
+#define FSTV0910_SSAT_NF6 0xfa3a0020
+#define FSTV0910_SSAT_NF5 0xfa3a0010
+#define FSTV0910_SSAT_NF4 0xfa3a0008
+#define FSTV0910_SSAT_NF3 0xfa3a0004
+#define FSTV0910_SSAT_NF2 0xfa3a0002
+#define FSTV0910_SSAT_NF1 0xfa3a0001
+
+/* GAINLLR_NF1 */
+#define RSTV0910_GAINLLR_NF1 0xfa40
+#define FSTV0910_GAINLLR_NF_QPSK_1_4 0xfa40007f
+
+/* GAINLLR_NF2 */
+#define RSTV0910_GAINLLR_NF2 0xfa41
+#define FSTV0910_GAINLLR_NF_QPSK_1_3 0xfa41007f
+
+/* GAINLLR_NF3 */
+#define RSTV0910_GAINLLR_NF3 0xfa42
+#define FSTV0910_GAINLLR_NF_QPSK_2_5 0xfa42007f
+
+/* GAINLLR_NF4 */
+#define RSTV0910_GAINLLR_NF4 0xfa43
+#define FSTV0910_GAINLLR_NF_QPSK_1_2 0xfa43007f
+
+/* GAINLLR_NF5 */
+#define RSTV0910_GAINLLR_NF5 0xfa44
+#define FSTV0910_GAINLLR_NF_QPSK_3_5 0xfa44007f
+
+/* GAINLLR_NF6 */
+#define RSTV0910_GAINLLR_NF6 0xfa45
+#define FSTV0910_GAINLLR_NF_QPSK_2_3 0xfa45007f
+
+/* GAINLLR_NF7 */
+#define RSTV0910_GAINLLR_NF7 0xfa46
+#define FSTV0910_GAINLLR_NF_QPSK_3_4 0xfa46007f
+
+/* GAINLLR_NF8 */
+#define RSTV0910_GAINLLR_NF8 0xfa47
+#define FSTV0910_GAINLLR_NF_QPSK_4_5 0xfa47007f
+
+/* GAINLLR_NF9 */
+#define RSTV0910_GAINLLR_NF9 0xfa48
+#define FSTV0910_GAINLLR_NF_QPSK_5_6 0xfa48007f
+
+/* GAINLLR_NF10 */
+#define RSTV0910_GAINLLR_NF10 0xfa49
+#define FSTV0910_GAINLLR_NF_QPSK_8_9 0xfa49007f
+
+/* GAINLLR_NF11 */
+#define RSTV0910_GAINLLR_NF11 0xfa4a
+#define FSTV0910_GAINLLR_NF_QPSK_9_10 0xfa4a007f
+
+/* GAINLLR_NF12 */
+#define RSTV0910_GAINLLR_NF12 0xfa4b
+#define FSTV0910_GAINLLR_NF_8PSK_3_5 0xfa4b007f
+
+/* GAINLLR_NF13 */
+#define RSTV0910_GAINLLR_NF13 0xfa4c
+#define FSTV0910_GAINLLR_NF_8PSK_2_3 0xfa4c007f
+
+/* GAINLLR_NF14 */
+#define RSTV0910_GAINLLR_NF14 0xfa4d
+#define FSTV0910_GAINLLR_NF_8PSK_3_4 0xfa4d007f
+
+/* GAINLLR_NF15 */
+#define RSTV0910_GAINLLR_NF15 0xfa4e
+#define FSTV0910_GAINLLR_NF_8PSK_5_6 0xfa4e007f
+
+/* GAINLLR_NF16 */
+#define RSTV0910_GAINLLR_NF16 0xfa4f
+#define FSTV0910_GAINLLR_NF_8PSK_8_9 0xfa4f007f
+
+/* GAINLLR_NF17 */
+#define RSTV0910_GAINLLR_NF17 0xfa50
+#define FSTV0910_GAINLLR_NF_8PSK_9_10 0xfa50007f
+
+/* GAINLLR_NF18 */
+#define RSTV0910_GAINLLR_NF18 0xfa51
+#define FSTV0910_GAINLLR_NF_16APSK_2_3 0xfa51007f
+
+/* GAINLLR_NF19 */
+#define RSTV0910_GAINLLR_NF19 0xfa52
+#define FSTV0910_GAINLLR_NF_16APSK_3_4 0xfa52007f
+
+/* GAINLLR_NF20 */
+#define RSTV0910_GAINLLR_NF20 0xfa53
+#define FSTV0910_GAINLLR_NF_16APSK_4_5 0xfa53007f
+
+/* GAINLLR_NF21 */
+#define RSTV0910_GAINLLR_NF21 0xfa54
+#define FSTV0910_GAINLLR_NF_16APSK_5_6 0xfa54007f
+
+/* GAINLLR_NF22 */
+#define RSTV0910_GAINLLR_NF22 0xfa55
+#define FSTV0910_GAINLLR_NF_16APSK_8_9 0xfa55007f
+
+/* GAINLLR_NF23 */
+#define RSTV0910_GAINLLR_NF23 0xfa56
+#define FSTV0910_GAINLLR_NF_16APSK_9_10 0xfa56007f
+
+/* GAINLLR_NF24 */
+#define RSTV0910_GAINLLR_NF24 0xfa57
+#define FSTV0910_GAINLLR_NF_32APSK_3_4 0xfa57007f
+
+/* GAINLLR_NF25 */
+#define RSTV0910_GAINLLR_NF25 0xfa58
+#define FSTV0910_GAINLLR_NF_32APSK_4_5 0xfa58007f
+
+/* GAINLLR_NF26 */
+#define RSTV0910_GAINLLR_NF26 0xfa59
+#define FSTV0910_GAINLLR_NF_32APSK_5_6 0xfa59007f
+
+/* GAINLLR_NF27 */
+#define RSTV0910_GAINLLR_NF27 0xfa5a
+#define FSTV0910_GAINLLR_NF_32APSK_8_9 0xfa5a007f
+
+/* GAINLLR_NF28 */
+#define RSTV0910_GAINLLR_NF28 0xfa5b
+#define FSTV0910_GAINLLR_NF_32APSK_9_10 0xfa5b007f
+
+/* GAINLLR_SF1 */
+#define RSTV0910_GAINLLR_SF1 0xfa5c
+#define FSTV0910_GAINLLR_SF_QPSK_1_4 0xfa5c007f
+
+/* GAINLLR_SF2 */
+#define RSTV0910_GAINLLR_SF2 0xfa5d
+#define FSTV0910_GAINLLR_SF_QPSK_1_3 0xfa5d007f
+
+/* GAINLLR_SF3 */
+#define RSTV0910_GAINLLR_SF3 0xfa5e
+#define FSTV0910_GAINLLR_SF_QPSK_2_5 0xfa5e007f
+
+/* GAINLLR_SF4 */
+#define RSTV0910_GAINLLR_SF4 0xfa5f
+#define FSTV0910_GAINLLR_SF_QPSK_1_2 0xfa5f007f
+
+/* GAINLLR_SF5 */
+#define RSTV0910_GAINLLR_SF5 0xfa60
+#define FSTV0910_GAINLLR_SF_QPSK_3_5 0xfa60007f
+
+/* GAINLLR_SF6 */
+#define RSTV0910_GAINLLR_SF6 0xfa61
+#define FSTV0910_GAINLLR_SF_QPSK_2_3 0xfa61007f
+
+/* GAINLLR_SF7 */
+#define RSTV0910_GAINLLR_SF7 0xfa62
+#define FSTV0910_GAINLLR_SF_QPSK_3_4 0xfa62007f
+
+/* GAINLLR_SF8 */
+#define RSTV0910_GAINLLR_SF8 0xfa63
+#define FSTV0910_GAINLLR_SF_QPSK_4_5 0xfa63007f
+
+/* GAINLLR_SF9 */
+#define RSTV0910_GAINLLR_SF9 0xfa64
+#define FSTV0910_GAINLLR_SF_QPSK_5_6 0xfa64007f
+
+/* GAINLLR_SF10 */
+#define RSTV0910_GAINLLR_SF10 0xfa65
+#define FSTV0910_GAINLLR_SF_QPSK_8_9 0xfa65007f
+
+/* GAINLLR_SF12 */
+#define RSTV0910_GAINLLR_SF12 0xfa66
+#define FSTV0910_GAINLLR_SF_8PSK_3_5 0xfa66007f
+
+/* GAINLLR_SF13 */
+#define RSTV0910_GAINLLR_SF13 0xfa67
+#define FSTV0910_GAINLLR_SF_8PSK_2_3 0xfa67007f
+
+/* GAINLLR_SF14 */
+#define RSTV0910_GAINLLR_SF14 0xfa68
+#define FSTV0910_GAINLLR_SF_8PSK_3_4 0xfa68007f
+
+/* GAINLLR_SF15 */
+#define RSTV0910_GAINLLR_SF15 0xfa69
+#define FSTV0910_GAINLLR_SF_8PSK_5_6 0xfa69007f
+
+/* GAINLLR_SF16 */
+#define RSTV0910_GAINLLR_SF16 0xfa6a
+#define FSTV0910_GAINLLR_SF_8PSK_8_9 0xfa6a007f
+
+/* GAINLLR_SF18 */
+#define RSTV0910_GAINLLR_SF18 0xfa6b
+#define FSTV0910_GAINLLR_SF_16APSK_2_3 0xfa6b007f
+
+/* GAINLLR_SF19 */
+#define RSTV0910_GAINLLR_SF19 0xfa6c
+#define FSTV0910_GAINLLR_SF_16APSK_3_4 0xfa6c007f
+
+/* GAINLLR_SF20 */
+#define RSTV0910_GAINLLR_SF20 0xfa6d
+#define FSTV0910_GAINLLR_SF_16APSK_4_5 0xfa6d007f
+
+/* GAINLLR_SF21 */
+#define RSTV0910_GAINLLR_SF21 0xfa6e
+#define FSTV0910_GAINLLR_SF_16APSK_5_6 0xfa6e007f
+
+/* GAINLLR_SF22 */
+#define RSTV0910_GAINLLR_SF22 0xfa6f
+#define FSTV0910_GAINLLR_SF_16APSK_8_9 0xfa6f007f
+
+/* GAINLLR_SF24 */
+#define RSTV0910_GAINLLR_SF24 0xfa70
+#define FSTV0910_GAINLLR_SF_32APSK_3_4 0xfa70007f
+
+/* GAINLLR_SF25 */
+#define RSTV0910_GAINLLR_SF25 0xfa71
+#define FSTV0910_GAINLLR_SF_32APSK_4_5 0xfa71007f
+
+/* GAINLLR_SF26 */
+#define RSTV0910_GAINLLR_SF26 0xfa72
+#define FSTV0910_GAINLLR_SF_32APSK_5_6 0xfa72007f
+
+/* GAINLLR_SF27 */
+#define RSTV0910_GAINLLR_SF27 0xfa73
+#define FSTV0910_GAINLLR_SF_32APSK_8_9 0xfa73007f
+
+/* CFGEXT */
+#define RSTV0910_CFGEXT 0xfa80
+#define FSTV0910_BYPBCH 0xfa800040
+#define FSTV0910_BYPLDPC 0xfa800020
+#define FSTV0910_SHORTMULT 0xfa800004
+
+/* GENCFG */
+#define RSTV0910_GENCFG 0xfa86
+#define FSTV0910_BROADCAST 0xfa860010
+#define FSTV0910_CROSSINPUT 0xfa860002
+#define FSTV0910_DDEMOD 0xfa860001
+
+/* LDPCERR1 */
+#define RSTV0910_LDPCERR1 0xfa96
+#define FSTV0910_LDPC_ERRORS1 0xfa9600ff
+
+/* LDPCERR0 */
+#define RSTV0910_LDPCERR0 0xfa97
+#define FSTV0910_LDPC_ERRORS0 0xfa9700ff
+
+/* BCHERR */
+#define RSTV0910_BCHERR 0xfa98
+#define FSTV0910_ERRORFLAG 0xfa980010
+#define FSTV0910_BCH_ERRORS_COUNTER 0xfa98000f
+
+/* P1_MAXEXTRAITER */
+#define RSTV0910_P1_MAXEXTRAITER 0xfab1
+#define FSTV0910_P1_MAX_EXTRA_ITER 0xfab100ff
+
+/* P2_MAXEXTRAITER */
+#define RSTV0910_P2_MAXEXTRAITER 0xfab6
+#define FSTV0910_P2_MAX_EXTRA_ITER 0xfab600ff
+
+/* P1_STATUSITER */
+#define RSTV0910_P1_STATUSITER 0xfabc
+#define FSTV0910_P1_STATUS_ITER 0xfabc00ff
+
+/* P1_STATUSMAXITER */
+#define RSTV0910_P1_STATUSMAXITER 0xfabd
+#define FSTV0910_P1_STATUS_MAX_ITER 0xfabd00ff
+
+/* P2_STATUSITER */
+#define RSTV0910_P2_STATUSITER 0xfabe
+#define FSTV0910_P2_STATUS_ITER 0xfabe00ff
+
+/* P2_STATUSMAXITER */
+#define RSTV0910_P2_STATUSMAXITER 0xfabf
+#define FSTV0910_P2_STATUS_MAX_ITER 0xfabf00ff
+
+/* P2_NBITER_NF1 */
+#define RSTV0910_P2_NBITER_NF1 0xfac0
+#define FSTV0910_P2_NBITER_NF_QPSK_1_4 0xfac000ff
+
+/* P2_NBITER_NF2 */
+#define RSTV0910_P2_NBITER_NF2 0xfac1
+#define FSTV0910_P2_NBITER_NF_QPSK_1_3 0xfac100ff
+
+/* P2_NBITER_NF3 */
+#define RSTV0910_P2_NBITER_NF3 0xfac2
+#define FSTV0910_P2_NBITER_NF_QPSK_2_5 0xfac200ff
+
+/* P2_NBITER_NF4 */
+#define RSTV0910_P2_NBITER_NF4 0xfac3
+#define FSTV0910_P2_NBITER_NF_QPSK_1_2 0xfac300ff
+
+/* P2_NBITER_NF5 */
+#define RSTV0910_P2_NBITER_NF5 0xfac4
+#define FSTV0910_P2_NBITER_NF_QPSK_3_5 0xfac400ff
+
+/* P2_NBITER_NF6 */
+#define RSTV0910_P2_NBITER_NF6 0xfac5
+#define FSTV0910_P2_NBITER_NF_QPSK_2_3 0xfac500ff
+
+/* P2_NBITER_NF7 */
+#define RSTV0910_P2_NBITER_NF7 0xfac6
+#define FSTV0910_P2_NBITER_NF_QPSK_3_4 0xfac600ff
+
+/* P2_NBITER_NF8 */
+#define RSTV0910_P2_NBITER_NF8 0xfac7
+#define FSTV0910_P2_NBITER_NF_QPSK_4_5 0xfac700ff
+
+/* P2_NBITER_NF9 */
+#define RSTV0910_P2_NBITER_NF9 0xfac8
+#define FSTV0910_P2_NBITER_NF_QPSK_5_6 0xfac800ff
+
+/* P2_NBITER_NF10 */
+#define RSTV0910_P2_NBITER_NF10 0xfac9
+#define FSTV0910_P2_NBITER_NF_QPSK_8_9 0xfac900ff
+
+/* P2_NBITER_NF11 */
+#define RSTV0910_P2_NBITER_NF11 0xfaca
+#define FSTV0910_P2_NBITER_NF_QPSK_9_10 0xfaca00ff
+
+/* P2_NBITER_NF12 */
+#define RSTV0910_P2_NBITER_NF12 0xfacb
+#define FSTV0910_P2_NBITER_NF_8PSK_3_5 0xfacb00ff
+
+/* P2_NBITER_NF13 */
+#define RSTV0910_P2_NBITER_NF13 0xfacc
+#define FSTV0910_P2_NBITER_NF_8PSK_2_3 0xfacc00ff
+
+/* P2_NBITER_NF14 */
+#define RSTV0910_P2_NBITER_NF14 0xfacd
+#define FSTV0910_P2_NBITER_NF_8PSK_3_4 0xfacd00ff
+
+/* P2_NBITER_NF15 */
+#define RSTV0910_P2_NBITER_NF15 0xface
+#define FSTV0910_P2_NBITER_NF_8PSK_5_6 0xface00ff
+
+/* P2_NBITER_NF16 */
+#define RSTV0910_P2_NBITER_NF16 0xfacf
+#define FSTV0910_P2_NBITER_NF_8PSK_8_9 0xfacf00ff
+
+/* P2_NBITER_NF17 */
+#define RSTV0910_P2_NBITER_NF17 0xfad0
+#define FSTV0910_P2_NBITER_NF_8PSK_9_10 0xfad000ff
+
+/* P2_NBITER_NF18 */
+#define RSTV0910_P2_NBITER_NF18 0xfad1
+#define FSTV0910_P2_NBITER_NF_16APSK_2_3 0xfad100ff
+
+/* P2_NBITER_NF19 */
+#define RSTV0910_P2_NBITER_NF19 0xfad2
+#define FSTV0910_P2_NBITER_NF_16APSK_3_4 0xfad200ff
+
+/* P2_NBITER_NF20 */
+#define RSTV0910_P2_NBITER_NF20 0xfad3
+#define FSTV0910_P2_NBITER_NF_16APSK_4_5 0xfad300ff
+
+/* P2_NBITER_NF21 */
+#define RSTV0910_P2_NBITER_NF21 0xfad4
+#define FSTV0910_P2_NBITER_NF_16APSK_5_6 0xfad400ff
+
+/* P2_NBITER_NF22 */
+#define RSTV0910_P2_NBITER_NF22 0xfad5
+#define FSTV0910_P2_NBITER_NF_16APSK_8_9 0xfad500ff
+
+/* P2_NBITER_NF23 */
+#define RSTV0910_P2_NBITER_NF23 0xfad6
+#define FSTV0910_P2_NBITER_NF_16APSK_9_10 0xfad600ff
+
+/* P2_NBITER_NF24 */
+#define RSTV0910_P2_NBITER_NF24 0xfad7
+#define FSTV0910_P2_NBITER_NF_32APSK_3_4 0xfad700ff
+
+/* P2_NBITER_NF25 */
+#define RSTV0910_P2_NBITER_NF25 0xfad8
+#define FSTV0910_P2_NBITER_NF_32APSK_4_5 0xfad800ff
+
+/* P2_NBITER_NF26 */
+#define RSTV0910_P2_NBITER_NF26 0xfad9
+#define FSTV0910_P2_NBITER_NF_32APSK_5_6 0xfad900ff
+
+/* P2_NBITER_NF27 */
+#define RSTV0910_P2_NBITER_NF27 0xfada
+#define FSTV0910_P2_NBITER_NF_32APSK_8_9 0xfada00ff
+
+/* P2_NBITER_NF28 */
+#define RSTV0910_P2_NBITER_NF28 0xfadb
+#define FSTV0910_P2_NBITER_NF_32APSK_9_10 0xfadb00ff
+
+/* P2_NBITER_SF1 */
+#define RSTV0910_P2_NBITER_SF1 0xfadc
+#define FSTV0910_P2_NBITER_SF_QPSK_1_4 0xfadc00ff
+
+/* P2_NBITER_SF2 */
+#define RSTV0910_P2_NBITER_SF2 0xfadd
+#define FSTV0910_P2_NBITER_SF_QPSK_1_3 0xfadd00ff
+
+/* P2_NBITER_SF3 */
+#define RSTV0910_P2_NBITER_SF3 0xfade
+#define FSTV0910_P2_NBITER_SF_QPSK_2_5 0xfade00ff
+
+/* P2_NBITER_SF4 */
+#define RSTV0910_P2_NBITER_SF4 0xfadf
+#define FSTV0910_P2_NBITER_SF_QPSK_1_2 0xfadf00ff
+
+/* P2_NBITER_SF5 */
+#define RSTV0910_P2_NBITER_SF5 0xfae0
+#define FSTV0910_P2_NBITER_SF_QPSK_3_5 0xfae000ff
+
+/* P2_NBITER_SF6 */
+#define RSTV0910_P2_NBITER_SF6 0xfae1
+#define FSTV0910_P2_NBITER_SF_QPSK_2_3 0xfae100ff
+
+/* P2_NBITER_SF7 */
+#define RSTV0910_P2_NBITER_SF7 0xfae2
+#define FSTV0910_P2_NBITER_SF_QPSK_3_4 0xfae200ff
+
+/* P2_NBITER_SF8 */
+#define RSTV0910_P2_NBITER_SF8 0xfae3
+#define FSTV0910_P2_NBITER_SF_QPSK_4_5 0xfae300ff
+
+/* P2_NBITER_SF9 */
+#define RSTV0910_P2_NBITER_SF9 0xfae4
+#define FSTV0910_P2_NBITER_SF_QPSK_5_6 0xfae400ff
+
+/* P2_NBITER_SF10 */
+#define RSTV0910_P2_NBITER_SF10 0xfae5
+#define FSTV0910_P2_NBITER_SF_QPSK_8_9 0xfae500ff
+
+/* P2_NBITER_SF12 */
+#define RSTV0910_P2_NBITER_SF12 0xfae6
+#define FSTV0910_P2_NBITER_SF_8PSK_3_5 0xfae600ff
+
+/* P2_NBITER_SF13 */
+#define RSTV0910_P2_NBITER_SF13 0xfae7
+#define FSTV0910_P2_NBITER_SF_8PSK_2_3 0xfae700ff
+
+/* P2_NBITER_SF14 */
+#define RSTV0910_P2_NBITER_SF14 0xfae8
+#define FSTV0910_P2_NBITER_SF_8PSK_3_4 0xfae800ff
+
+/* P2_NBITER_SF15 */
+#define RSTV0910_P2_NBITER_SF15 0xfae9
+#define FSTV0910_P2_NBITER_SF_8PSK_5_6 0xfae900ff
+
+/* P2_NBITER_SF16 */
+#define RSTV0910_P2_NBITER_SF16 0xfaea
+#define FSTV0910_P2_NBITER_SF_8PSK_8_9 0xfaea00ff
+
+/* P2_NBITER_SF18 */
+#define RSTV0910_P2_NBITER_SF18 0xfaeb
+#define FSTV0910_P2_NBITER_SF_16APSK_2_3 0xfaeb00ff
+
+/* P2_NBITER_SF19 */
+#define RSTV0910_P2_NBITER_SF19 0xfaec
+#define FSTV0910_P2_NBITER_SF_16APSK_3_4 0xfaec00ff
+
+/* P2_NBITER_SF20 */
+#define RSTV0910_P2_NBITER_SF20 0xfaed
+#define FSTV0910_P2_NBITER_SF_16APSK_4_5 0xfaed00ff
+
+/* P2_NBITER_SF21 */
+#define RSTV0910_P2_NBITER_SF21 0xfaee
+#define FSTV0910_P2_NBITER_SF_16APSK_5_6 0xfaee00ff
+
+/* P2_NBITER_SF22 */
+#define RSTV0910_P2_NBITER_SF22 0xfaef
+#define FSTV0910_P2_NBITER_SF_16APSK_8_9 0xfaef00ff
+
+/* P2_NBITER_SF24 */
+#define RSTV0910_P2_NBITER_SF24 0xfaf0
+#define FSTV0910_P2_NBITER_SF_32APSK_3_4 0xfaf000ff
+
+/* P2_NBITER_SF25 */
+#define RSTV0910_P2_NBITER_SF25 0xfaf1
+#define FSTV0910_P2_NBITER_SF_32APSK_4_5 0xfaf100ff
+
+/* P2_NBITER_SF26 */
+#define RSTV0910_P2_NBITER_SF26 0xfaf2
+#define FSTV0910_P2_NBITER_SF_32APSK_5_6 0xfaf200ff
+
+/* P2_NBITER_SF27 */
+#define RSTV0910_P2_NBITER_SF27 0xfaf3
+#define FSTV0910_P2_NBITER_SF_32APSK_8_9 0xfaf300ff
+
+/* TSTRES0 */
+#define RSTV0910_TSTRES0 0xff11
+#define FSTV0910_FRESFEC 0xff110080
+#define FSTV0910_FRESSYM1 0xff110008
+#define FSTV0910_FRESSYM2 0xff110004
+
+/* TSTOUT */
+#define RSTV0910_TSTOUT 0xff12
+#define FSTV0910_TS 0xff12003e
+#define FSTV0910_TEST_OUT 0xff120001
+
+/* TSTIN */
+#define RSTV0910_TSTIN 0xff13
+#define FSTV0910_TEST_IN 0xff130080
+
+/* P2_TSTDMD */
+#define RSTV0910_P2_TSTDMD 0xff20
+#define FSTV0910_P2_CFRINIT_INVZIGZAG 0xff200008
+
+/* P2_TCTL1 */
+#define RSTV0910_P2_TCTL1 0xff24
+#define FSTV0910_P2_TST_IQSYMBSEL 0xff24001f
+
+/* P2_TCTL4 */
+#define RSTV0910_P2_TCTL4 0xff28
+#define FSTV0910_P2_CFR2TOCFR1_DVBS1 0xff2800c0
+
+/* P2_TPKTDELIN */
+#define RSTV0910_P2_TPKTDELIN 0xff37
+#define FSTV0910_P2_CFG_RSPARITYON 0xff370080
+
+/* P1_TSTDMD */
+#define RSTV0910_P1_TSTDMD 0xff40
+#define FSTV0910_P1_CFRINIT_INVZIGZAG 0xff400008
+
+/* P1_TCTL1 */
+#define RSTV0910_P1_TCTL1 0xff44
+#define FSTV0910_P1_TST_IQSYMBSEL 0xff44001f
+
+/* P1_TCTL4 */
+#define RSTV0910_P1_TCTL4 0xff48
+#define FSTV0910_P1_CFR2TOCFR1_DVBS1 0xff4800c0
+
+/* P1_TPKTDELIN */
+#define RSTV0910_P1_TPKTDELIN 0xff57
+#define FSTV0910_P1_CFG_RSPARITYON 0xff570080
+
+/* TSTTSRS */
+#define RSTV0910_TSTTSRS 0xff6d
+#define FSTV0910_TSTRS_DISRS2 0xff6d0002
+#define FSTV0910_TSTRS_DISRS1 0xff6d0001
+
+#define STV0910_NBREGS 975
+#define STV0910_NBFIELDS 1818
diff --git a/drivers/media/dvb-frontends/stv6111.c b/drivers/media/dvb-frontends/stv6111.c
new file mode 100644
index 000000000000..e3e90070e293
--- /dev/null
+++ b/drivers/media/dvb-frontends/stv6111.c
@@ -0,0 +1,681 @@
+/*
+ * Driver for the ST STV6111 tuner
+ *
+ * Copyright (C) 2014 Digital Devices GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <asm/div64.h>
+
+#include "stv6111.h"
+
+#include "dvb_frontend.h"
+
+struct stv {
+ struct i2c_adapter *i2c;
+ u8 adr;
+
+ u8 reg[11];
+ u32 ref_freq;
+ u32 frequency;
+};
+
+struct slookup {
+ s16 value;
+ u16 reg_value;
+};
+
+static const struct slookup lnagain_nf_lookup[] = {
+ /* Gain *100dB // Reg */
+ { 2572, 0 },
+ { 2575, 1 },
+ { 2580, 2 },
+ { 2588, 3 },
+ { 2596, 4 },
+ { 2611, 5 },
+ { 2633, 6 },
+ { 2664, 7 },
+ { 2701, 8 },
+ { 2753, 9 },
+ { 2816, 10 },
+ { 2902, 11 },
+ { 2995, 12 },
+ { 3104, 13 },
+ { 3215, 14 },
+ { 3337, 15 },
+ { 3492, 16 },
+ { 3614, 17 },
+ { 3731, 18 },
+ { 3861, 19 },
+ { 3988, 20 },
+ { 4124, 21 },
+ { 4253, 22 },
+ { 4386, 23 },
+ { 4505, 24 },
+ { 4623, 25 },
+ { 4726, 26 },
+ { 4821, 27 },
+ { 4903, 28 },
+ { 4979, 29 },
+ { 5045, 30 },
+ { 5102, 31 }
+};
+
+static const struct slookup lnagain_iip3_lookup[] = {
+ /* Gain *100dB // reg */
+ { 1548, 0 },
+ { 1552, 1 },
+ { 1569, 2 },
+ { 1565, 3 },
+ { 1577, 4 },
+ { 1594, 5 },
+ { 1627, 6 },
+ { 1656, 7 },
+ { 1700, 8 },
+ { 1748, 9 },
+ { 1805, 10 },
+ { 1896, 11 },
+ { 1995, 12 },
+ { 2113, 13 },
+ { 2233, 14 },
+ { 2366, 15 },
+ { 2543, 16 },
+ { 2687, 17 },
+ { 2842, 18 },
+ { 2999, 19 },
+ { 3167, 20 },
+ { 3342, 21 },
+ { 3507, 22 },
+ { 3679, 23 },
+ { 3827, 24 },
+ { 3970, 25 },
+ { 4094, 26 },
+ { 4210, 27 },
+ { 4308, 28 },
+ { 4396, 29 },
+ { 4468, 30 },
+ { 4535, 31 }
+};
+
+static const struct slookup gain_rfagc_lookup[] = {
+ /* Gain *100dB // reg */
+ { 4870, 0x3000 },
+ { 4850, 0x3C00 },
+ { 4800, 0x4500 },
+ { 4750, 0x4800 },
+ { 4700, 0x4B00 },
+ { 4650, 0x4D00 },
+ { 4600, 0x4F00 },
+ { 4550, 0x5100 },
+ { 4500, 0x5200 },
+ { 4420, 0x5500 },
+ { 4316, 0x5800 },
+ { 4200, 0x5B00 },
+ { 4119, 0x5D00 },
+ { 3999, 0x6000 },
+ { 3950, 0x6100 },
+ { 3876, 0x6300 },
+ { 3755, 0x6600 },
+ { 3641, 0x6900 },
+ { 3567, 0x6B00 },
+ { 3425, 0x6F00 },
+ { 3350, 0x7100 },
+ { 3236, 0x7400 },
+ { 3118, 0x7700 },
+ { 3004, 0x7A00 },
+ { 2917, 0x7C00 },
+ { 2776, 0x7F00 },
+ { 2635, 0x8200 },
+ { 2516, 0x8500 },
+ { 2406, 0x8800 },
+ { 2290, 0x8B00 },
+ { 2170, 0x8E00 },
+ { 2073, 0x9100 },
+ { 1949, 0x9400 },
+ { 1836, 0x9700 },
+ { 1712, 0x9A00 },
+ { 1631, 0x9C00 },
+ { 1515, 0x9F00 },
+ { 1400, 0xA200 },
+ { 1323, 0xA400 },
+ { 1203, 0xA700 },
+ { 1091, 0xAA00 },
+ { 1011, 0xAC00 },
+ { 904, 0xAF00 },
+ { 787, 0xB200 },
+ { 685, 0xB500 },
+ { 571, 0xB800 },
+ { 464, 0xBB00 },
+ { 374, 0xBE00 },
+ { 275, 0xC200 },
+ { 181, 0xC600 },
+ { 102, 0xCC00 },
+ { 49, 0xD900 }
+};
+
+/*
+ * This table is 6 dB too low comapred to the others (probably created with
+ * a different BB_MAG setting)
+ */
+static const struct slookup gain_channel_agc_nf_lookup[] = {
+ /* Gain *100dB // reg */
+ { 7082, 0x3000 },
+ { 7052, 0x4000 },
+ { 7007, 0x4600 },
+ { 6954, 0x4A00 },
+ { 6909, 0x4D00 },
+ { 6833, 0x5100 },
+ { 6753, 0x5400 },
+ { 6659, 0x5700 },
+ { 6561, 0x5A00 },
+ { 6472, 0x5C00 },
+ { 6366, 0x5F00 },
+ { 6259, 0x6100 },
+ { 6151, 0x6400 },
+ { 6026, 0x6700 },
+ { 5920, 0x6900 },
+ { 5835, 0x6B00 },
+ { 5770, 0x6C00 },
+ { 5681, 0x6E00 },
+ { 5596, 0x7000 },
+ { 5503, 0x7200 },
+ { 5429, 0x7300 },
+ { 5319, 0x7500 },
+ { 5220, 0x7700 },
+ { 5111, 0x7900 },
+ { 4983, 0x7B00 },
+ { 4876, 0x7D00 },
+ { 4755, 0x7F00 },
+ { 4635, 0x8100 },
+ { 4499, 0x8300 },
+ { 4405, 0x8500 },
+ { 4323, 0x8600 },
+ { 4233, 0x8800 },
+ { 4156, 0x8A00 },
+ { 4038, 0x8C00 },
+ { 3935, 0x8E00 },
+ { 3823, 0x9000 },
+ { 3712, 0x9200 },
+ { 3601, 0x9500 },
+ { 3511, 0x9700 },
+ { 3413, 0x9900 },
+ { 3309, 0x9B00 },
+ { 3213, 0x9D00 },
+ { 3088, 0x9F00 },
+ { 2992, 0xA100 },
+ { 2878, 0xA400 },
+ { 2769, 0xA700 },
+ { 2645, 0xAA00 },
+ { 2538, 0xAD00 },
+ { 2441, 0xB000 },
+ { 2350, 0xB600 },
+ { 2237, 0xBA00 },
+ { 2137, 0xBF00 },
+ { 2039, 0xC500 },
+ { 1938, 0xDF00 },
+ { 1927, 0xFF00 }
+};
+
+static const struct slookup gain_channel_agc_iip3_lookup[] = {
+ /* Gain *100dB // reg */
+ { 7070, 0x3000 },
+ { 7028, 0x4000 },
+ { 7019, 0x4600 },
+ { 6900, 0x4A00 },
+ { 6811, 0x4D00 },
+ { 6763, 0x5100 },
+ { 6690, 0x5400 },
+ { 6644, 0x5700 },
+ { 6617, 0x5A00 },
+ { 6598, 0x5C00 },
+ { 6462, 0x5F00 },
+ { 6348, 0x6100 },
+ { 6197, 0x6400 },
+ { 6154, 0x6700 },
+ { 6098, 0x6900 },
+ { 5893, 0x6B00 },
+ { 5812, 0x6C00 },
+ { 5773, 0x6E00 },
+ { 5723, 0x7000 },
+ { 5661, 0x7200 },
+ { 5579, 0x7300 },
+ { 5460, 0x7500 },
+ { 5308, 0x7700 },
+ { 5099, 0x7900 },
+ { 4910, 0x7B00 },
+ { 4800, 0x7D00 },
+ { 4785, 0x7F00 },
+ { 4635, 0x8100 },
+ { 4466, 0x8300 },
+ { 4314, 0x8500 },
+ { 4295, 0x8600 },
+ { 4144, 0x8800 },
+ { 3920, 0x8A00 },
+ { 3889, 0x8C00 },
+ { 3771, 0x8E00 },
+ { 3655, 0x9000 },
+ { 3446, 0x9200 },
+ { 3298, 0x9500 },
+ { 3083, 0x9700 },
+ { 3015, 0x9900 },
+ { 2833, 0x9B00 },
+ { 2746, 0x9D00 },
+ { 2632, 0x9F00 },
+ { 2598, 0xA100 },
+ { 2480, 0xA400 },
+ { 2236, 0xA700 },
+ { 2171, 0xAA00 },
+ { 2060, 0xAD00 },
+ { 1999, 0xB000 },
+ { 1974, 0xB600 },
+ { 1820, 0xBA00 },
+ { 1741, 0xBF00 },
+ { 1655, 0xC500 },
+ { 1444, 0xDF00 },
+ { 1325, 0xFF00 },
+};
+
+static inline u32 muldiv32(u32 a, u32 b, u32 c)
+{
+ u64 tmp64;
+
+ tmp64 = (u64)a * (u64)b;
+ do_div(tmp64, c);
+
+ return (u32)tmp64;
+}
+
+static int i2c_read(struct i2c_adapter *adap,
+ u8 adr, u8 *msg, int len, u8 *answ, int alen)
+{
+ struct i2c_msg msgs[2] = { { .addr = adr, .flags = 0,
+ .buf = msg, .len = len},
+ { .addr = adr, .flags = I2C_M_RD,
+ .buf = answ, .len = alen } };
+ if (i2c_transfer(adap, msgs, 2) != 2) {
+ dev_err(&adap->dev, "i2c read error\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len)
+{
+ struct i2c_msg msg = {.addr = adr, .flags = 0,
+ .buf = data, .len = len};
+
+ if (i2c_transfer(adap, &msg, 1) != 1) {
+ dev_err(&adap->dev, "i2c write error\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int write_regs(struct stv *state, int reg, int len)
+{
+ u8 d[12];
+
+ memcpy(&d[1], &state->reg[reg], len);
+ d[0] = reg;
+ return i2c_write(state->i2c, state->adr, d, len + 1);
+}
+
+static int write_reg(struct stv *state, u8 reg, u8 val)
+{
+ u8 d[2] = {reg, val};
+
+ return i2c_write(state->i2c, state->adr, d, 2);
+}
+
+static int read_reg(struct stv *state, u8 reg, u8 *val)
+{
+ return i2c_read(state->i2c, state->adr, &reg, 1, val, 1);
+}
+
+static int wait_for_call_done(struct stv *state, u8 mask)
+{
+ int status = 0;
+ u32 lock_retry_count = 10;
+
+ while (lock_retry_count > 0) {
+ u8 regval;
+
+ status = read_reg(state, 9, &regval);
+ if (status < 0)
+ return status;
+
+ if ((regval & mask) == 0)
+ break;
+ usleep_range(4000, 6000);
+ lock_retry_count -= 1;
+
+ status = -EIO;
+ }
+ return status;
+}
+
+static void init_state(struct stv *state)
+{
+ u32 clkdiv = 0;
+ u32 agcmode = 0;
+ u32 agcref = 2;
+ u32 agcset = 0xffffffff;
+ u32 bbmode = 0xffffffff;
+
+ state->reg[0] = 0x08;
+ state->reg[1] = 0x41;
+ state->reg[2] = 0x8f;
+ state->reg[3] = 0x00;
+ state->reg[4] = 0xce;
+ state->reg[5] = 0x54;
+ state->reg[6] = 0x55;
+ state->reg[7] = 0x45;
+ state->reg[8] = 0x46;
+ state->reg[9] = 0xbd;
+ state->reg[10] = 0x11;
+
+ state->ref_freq = 16000;
+
+ if (clkdiv <= 3)
+ state->reg[0x00] |= (clkdiv & 0x03);
+ if (agcmode <= 3) {
+ state->reg[0x03] |= (agcmode << 5);
+ if (agcmode == 0x01)
+ state->reg[0x01] |= 0x30;
+ }
+ if (bbmode <= 3)
+ state->reg[0x01] = (state->reg[0x01] & ~0x30) | (bbmode << 4);
+ if (agcref <= 7)
+ state->reg[0x03] |= agcref;
+ if (agcset <= 31)
+ state->reg[0x02] = (state->reg[0x02] & ~0x1F) | agcset | 0x40;
+}
+
+static int attach_init(struct stv *state)
+{
+ if (write_regs(state, 0, 11))
+ return -ENODEV;
+ return 0;
+}
+
+static void release(struct dvb_frontend *fe)
+{
+ kfree(fe->tuner_priv);
+ fe->tuner_priv = NULL;
+}
+
+static int set_bandwidth(struct dvb_frontend *fe, u32 cutoff_frequency)
+{
+ struct stv *state = fe->tuner_priv;
+ u32 index = (cutoff_frequency + 999999) / 1000000;
+
+ if (index < 6)
+ index = 6;
+ if (index > 50)
+ index = 50;
+ if ((state->reg[0x08] & ~0xFC) == ((index - 6) << 2))
+ return 0;
+
+ state->reg[0x08] = (state->reg[0x08] & ~0xFC) | ((index - 6) << 2);
+ state->reg[0x09] = (state->reg[0x09] & ~0x0C) | 0x08;
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ write_regs(state, 0x08, 2);
+ wait_for_call_done(state, 0x08);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ return 0;
+}
+
+static int set_lof(struct stv *state, u32 local_frequency, u32 cutoff_frequency)
+{
+ u32 index = (cutoff_frequency + 999999) / 1000000;
+ u32 frequency = (local_frequency + 500) / 1000;
+ u32 p = 1, psel = 0, fvco, div, frac;
+ u8 icp, tmp;
+
+ if (index < 6)
+ index = 6;
+ if (index > 50)
+ index = 50;
+
+ if (frequency <= 1300000) {
+ p = 4;
+ psel = 1;
+ } else {
+ p = 2;
+ psel = 0;
+ }
+ fvco = frequency * p;
+ div = fvco / state->ref_freq;
+ frac = fvco % state->ref_freq;
+ frac = muldiv32(frac, 0x40000, state->ref_freq);
+
+ icp = 0;
+ if (fvco < 2700000)
+ icp = 0;
+ else if (fvco < 2950000)
+ icp = 1;
+ else if (fvco < 3300000)
+ icp = 2;
+ else if (fvco < 3700000)
+ icp = 3;
+ else if (fvco < 4200000)
+ icp = 5;
+ else if (fvco < 4800000)
+ icp = 6;
+ else
+ icp = 7;
+
+ state->reg[0x02] |= 0x80; /* LNA IIP3 Mode */
+
+ state->reg[0x03] = (state->reg[0x03] & ~0x80) | (psel << 7);
+ state->reg[0x04] = (div & 0xFF);
+ state->reg[0x05] = (((div >> 8) & 0x01) | ((frac & 0x7F) << 1)) & 0xff;
+ state->reg[0x06] = ((frac >> 7) & 0xFF);
+ state->reg[0x07] = (state->reg[0x07] & ~0x07) | ((frac >> 15) & 0x07);
+ state->reg[0x07] = (state->reg[0x07] & ~0xE0) | (icp << 5);
+
+ state->reg[0x08] = (state->reg[0x08] & ~0xFC) | ((index - 6) << 2);
+ /* Start cal vco,CF */
+ state->reg[0x09] = (state->reg[0x09] & ~0x0C) | 0x0C;
+ write_regs(state, 2, 8);
+
+ wait_for_call_done(state, 0x0C);
+
+ usleep_range(10000, 12000);
+
+ read_reg(state, 0x03, &tmp);
+ if (tmp & 0x10) {
+ state->reg[0x02] &= ~0x80; /* LNA NF Mode */
+ write_regs(state, 2, 1);
+ }
+ read_reg(state, 0x08, &tmp);
+
+ state->frequency = frequency;
+
+ return 0;
+}
+
+static int set_params(struct dvb_frontend *fe)
+{
+ struct stv *state = fe->tuner_priv;
+ struct dtv_frontend_properties *p = &fe->dtv_property_cache;
+ u32 freq, cutoff;
+
+ if (p->delivery_system != SYS_DVBS && p->delivery_system != SYS_DVBS2)
+ return -EINVAL;
+
+ freq = p->frequency * 1000;
+ cutoff = 5000000 + muldiv32(p->symbol_rate, 135, 200);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ set_lof(state, freq, cutoff);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ return 0;
+}
+
+static s32 table_lookup(const struct slookup *table,
+ int table_size, u16 reg_value)
+{
+ s32 gain;
+ s32 reg_diff;
+ int imin = 0;
+ int imax = table_size - 1;
+ int i;
+
+ /* Assumes Table[0].RegValue < Table[imax].RegValue */
+ if (reg_value <= table[0].reg_value) {
+ gain = table[0].value;
+ } else if (reg_value >= table[imax].reg_value) {
+ gain = table[imax].value;
+ } else {
+ while ((imax - imin) > 1) {
+ i = (imax + imin) / 2;
+ if ((table[imin].reg_value <= reg_value) &&
+ (reg_value <= table[i].reg_value))
+ imax = i;
+ else
+ imin = i;
+ }
+ reg_diff = table[imax].reg_value - table[imin].reg_value;
+ gain = table[imin].value;
+ if (reg_diff != 0)
+ gain += ((s32)(reg_value - table[imin].reg_value) *
+ (s32)(table[imax].value
+ - table[imin].value)) / reg_diff;
+ }
+ return gain;
+}
+
+static int get_rf_strength(struct dvb_frontend *fe, u16 *st)
+{
+ struct stv *state = fe->tuner_priv;
+ u16 rfagc = *st;
+ s32 gain;
+
+ if ((state->reg[0x03] & 0x60) == 0) {
+ /* RF Mode, Read AGC ADC */
+ u8 reg = 0;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ write_reg(state, 0x02, state->reg[0x02] | 0x20);
+ read_reg(state, 2, &reg);
+ if (reg & 0x20)
+ read_reg(state, 2, &reg);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ if ((state->reg[0x02] & 0x80) == 0)
+ /* NF */
+ gain = table_lookup(lnagain_nf_lookup,
+ ARRAY_SIZE(lnagain_nf_lookup),
+ reg & 0x1F);
+ else
+ /* IIP3 */
+ gain = table_lookup(lnagain_iip3_lookup,
+ ARRAY_SIZE(lnagain_iip3_lookup),
+ reg & 0x1F);
+
+ gain += table_lookup(gain_rfagc_lookup,
+ ARRAY_SIZE(gain_rfagc_lookup), rfagc);
+
+ gain -= 2400;
+ } else {
+ /* Channel Mode */
+ if ((state->reg[0x02] & 0x80) == 0) {
+ /* NF */
+ gain = table_lookup(
+ gain_channel_agc_nf_lookup,
+ ARRAY_SIZE(gain_channel_agc_nf_lookup), rfagc);
+
+ gain += 600;
+ } else {
+ /* IIP3 */
+ gain = table_lookup(
+ gain_channel_agc_iip3_lookup,
+ ARRAY_SIZE(gain_channel_agc_iip3_lookup),
+ rfagc);
+ }
+ }
+
+ if (state->frequency > 0)
+ /* Tilt correction ( 0.00016 dB/MHz ) */
+ gain -= ((((s32)(state->frequency / 1000) - 1550) * 2) / 12);
+
+ /* + (BBGain * 10); */
+ gain += (s32)((state->reg[0x01] & 0xC0) >> 6) * 600 - 1300;
+
+ if (gain < 0)
+ gain = 0;
+ else if (gain > 10000)
+ gain = 10000;
+
+ *st = 10000 - gain;
+
+ return 0;
+}
+
+static const struct dvb_tuner_ops tuner_ops = {
+ .info = {
+ .name = "ST STV6111",
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ .frequency_step = 0
+ },
+ .set_params = set_params,
+ .release = release,
+ .get_rf_strength = get_rf_strength,
+ .set_bandwidth = set_bandwidth,
+};
+
+struct dvb_frontend *stv6111_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, u8 adr)
+{
+ struct stv *state;
+ int stat;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+ state->adr = adr;
+ state->i2c = i2c;
+ memcpy(&fe->ops.tuner_ops, &tuner_ops, sizeof(struct dvb_tuner_ops));
+ init_state(state);
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ stat = attach_init(state);
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ if (stat < 0) {
+ kfree(state);
+ return NULL;
+ }
+ fe->tuner_priv = state;
+ return fe;
+}
+EXPORT_SYMBOL_GPL(stv6111_attach);
+
+MODULE_DESCRIPTION("ST STV6111 satellite tuner driver");
+MODULE_AUTHOR("Ralph Metzler, Manfred Voelkel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/stv6111.h b/drivers/media/dvb-frontends/stv6111.h
new file mode 100644
index 000000000000..5bc1228dc9bd
--- /dev/null
+++ b/drivers/media/dvb-frontends/stv6111.h
@@ -0,0 +1,21 @@
+#ifndef _STV6111_H_
+#define _STV6111_H_
+
+#if IS_REACHABLE(CONFIG_DVB_STV6111)
+
+struct dvb_frontend *stv6111_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, u8 adr);
+
+#else
+
+static inline struct dvb_frontend *stv6111_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c,
+ u8 adr)
+{
+ pr_warn("%s: Driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+#endif /* CONFIG_DVB_STV6111 */
+
+#endif /* _STV6111_H_ */
diff --git a/drivers/media/dvb-frontends/zd1301_demod.c b/drivers/media/dvb-frontends/zd1301_demod.c
index fcf5f69de0c5..84a2b25a574a 100644
--- a/drivers/media/dvb-frontends/zd1301_demod.c
+++ b/drivers/media/dvb-frontends/zd1301_demod.c
@@ -445,7 +445,7 @@ static u32 zd1301_demod_i2c_functionality(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm zd1301_demod_i2c_algorithm = {
+static const struct i2c_algorithm zd1301_demod_i2c_algorithm = {
.master_xfer = zd1301_demod_i2c_master_xfer,
.functionality = zd1301_demod_i2c_functionality,
};
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 121b3b5394cb..94153895fcd4 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -204,6 +204,18 @@ config VIDEO_ADV7183
To compile this driver as a module, choose M here: the
module will be called adv7183.
+config VIDEO_ADV748X
+ tristate "Analog Devices ADV748x decoder"
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on OF
+ select REGMAP_I2C
+ ---help---
+ V4L2 subdevice driver for the Analog Devices
+ ADV7481 and ADV7482 HDMI/Analog video decoders.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adv748x.
+
config VIDEO_ADV7604
tristate "Analog Devices ADV7604 decoder"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
@@ -593,6 +605,30 @@ config VIDEO_OV5647
To compile this driver as a module, choose M here: the
module will be called ov5647.
+config VIDEO_OV6650
+ tristate "OmniVision OV6650 sensor support"
+ depends on I2C && VIDEO_V4L2
+ depends on MEDIA_CAMERA_SUPPORT
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the OmniVision
+ OV6650 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov6650.
+
+config VIDEO_OV5670
+ tristate "OmniVision OV5670 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ depends on MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the OmniVision
+ OV5670 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov5670.
+
config VIDEO_OV7640
tristate "OmniVision OV7640 sensor support"
depends on I2C && VIDEO_V4L2
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 2c0868fa6034..c843c181dfb9 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
obj-$(CONFIG_VIDEO_ADV7183) += adv7183.o
obj-$(CONFIG_VIDEO_ADV7343) += adv7343.o
obj-$(CONFIG_VIDEO_ADV7393) += adv7393.o
+obj-$(CONFIG_VIDEO_ADV748X) += adv748x/
obj-$(CONFIG_VIDEO_ADV7604) += adv7604.o
obj-$(CONFIG_VIDEO_ADV7842) += adv7842.o
obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o
@@ -62,6 +63,8 @@ obj-$(CONFIG_VIDEO_OV2640) += ov2640.o
obj-$(CONFIG_VIDEO_OV5640) += ov5640.o
obj-$(CONFIG_VIDEO_OV5645) += ov5645.o
obj-$(CONFIG_VIDEO_OV5647) += ov5647.o
+obj-$(CONFIG_VIDEO_OV5670) += ov5670.o
+obj-$(CONFIG_VIDEO_OV6650) += ov6650.o
obj-$(CONFIG_VIDEO_OV7640) += ov7640.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index 50f354144ee7..a056d6cdaaaa 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -1208,7 +1208,7 @@ static int ad9389b_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
-static struct i2c_device_id ad9389b_id[] = {
+static const struct i2c_device_id ad9389b_id[] = {
{ "ad9389b", 0 },
{ "ad9889b", 0 },
{ }
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 78de7ddf5081..3df28f2f9b38 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -1402,6 +1402,8 @@ static int adv7180_remove(struct i2c_client *client)
static const struct i2c_device_id adv7180_id[] = {
{ "adv7180", (kernel_ulong_t)&adv7180_info },
+ { "adv7180cp", (kernel_ulong_t)&adv7180_info },
+ { "adv7180st", (kernel_ulong_t)&adv7180_info },
{ "adv7182", (kernel_ulong_t)&adv7182_info },
{ "adv7280", (kernel_ulong_t)&adv7280_info },
{ "adv7280-m", (kernel_ulong_t)&adv7280_m_info },
diff --git a/drivers/media/i2c/adv748x/Makefile b/drivers/media/i2c/adv748x/Makefile
new file mode 100644
index 000000000000..c0711e076f1d
--- /dev/null
+++ b/drivers/media/i2c/adv748x/Makefile
@@ -0,0 +1,7 @@
+adv748x-objs := \
+ adv748x-afe.o \
+ adv748x-core.o \
+ adv748x-csi2.o \
+ adv748x-hdmi.o
+
+obj-$(CONFIG_VIDEO_ADV748X) += adv748x.o
diff --git a/drivers/media/i2c/adv748x/adv748x-afe.c b/drivers/media/i2c/adv748x/adv748x-afe.c
new file mode 100644
index 000000000000..b33ccfc08708
--- /dev/null
+++ b/drivers/media/i2c/adv748x/adv748x-afe.c
@@ -0,0 +1,552 @@
+/*
+ * Driver for Analog Devices ADV748X 8 channel analog front end (AFE) receiver
+ * with standard definition processor (SDP)
+ *
+ * Copyright (C) 2017 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/v4l2-dv-timings.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-ioctl.h>
+
+#include "adv748x.h"
+
+/* -----------------------------------------------------------------------------
+ * SDP
+ */
+
+#define ADV748X_AFE_STD_AD_PAL_BG_NTSC_J_SECAM 0x0
+#define ADV748X_AFE_STD_AD_PAL_BG_NTSC_J_SECAM_PED 0x1
+#define ADV748X_AFE_STD_AD_PAL_N_NTSC_J_SECAM 0x2
+#define ADV748X_AFE_STD_AD_PAL_N_NTSC_M_SECAM 0x3
+#define ADV748X_AFE_STD_NTSC_J 0x4
+#define ADV748X_AFE_STD_NTSC_M 0x5
+#define ADV748X_AFE_STD_PAL60 0x6
+#define ADV748X_AFE_STD_NTSC_443 0x7
+#define ADV748X_AFE_STD_PAL_BG 0x8
+#define ADV748X_AFE_STD_PAL_N 0x9
+#define ADV748X_AFE_STD_PAL_M 0xa
+#define ADV748X_AFE_STD_PAL_M_PED 0xb
+#define ADV748X_AFE_STD_PAL_COMB_N 0xc
+#define ADV748X_AFE_STD_PAL_COMB_N_PED 0xd
+#define ADV748X_AFE_STD_PAL_SECAM 0xe
+#define ADV748X_AFE_STD_PAL_SECAM_PED 0xf
+
+static int adv748x_afe_read_ro_map(struct adv748x_state *state, u8 reg)
+{
+ int ret;
+
+ /* Select SDP Read-Only Main Map */
+ ret = sdp_write(state, ADV748X_SDP_MAP_SEL,
+ ADV748X_SDP_MAP_SEL_RO_MAIN);
+ if (ret < 0)
+ return ret;
+
+ return sdp_read(state, reg);
+}
+
+static int adv748x_afe_status(struct adv748x_afe *afe, u32 *signal,
+ v4l2_std_id *std)
+{
+ struct adv748x_state *state = adv748x_afe_to_state(afe);
+ int info;
+
+ /* Read status from reg 0x10 of SDP RO Map */
+ info = adv748x_afe_read_ro_map(state, ADV748X_SDP_RO_10);
+ if (info < 0)
+ return info;
+
+ if (signal)
+ *signal = info & ADV748X_SDP_RO_10_IN_LOCK ?
+ 0 : V4L2_IN_ST_NO_SIGNAL;
+
+ if (!std)
+ return 0;
+
+ /* Standard not valid if there is no signal */
+ if (!(info & ADV748X_SDP_RO_10_IN_LOCK)) {
+ *std = V4L2_STD_UNKNOWN;
+ return 0;
+ }
+
+ switch (info & 0x70) {
+ case 0x00:
+ *std = V4L2_STD_NTSC;
+ break;
+ case 0x10:
+ *std = V4L2_STD_NTSC_443;
+ break;
+ case 0x20:
+ *std = V4L2_STD_PAL_M;
+ break;
+ case 0x30:
+ *std = V4L2_STD_PAL_60;
+ break;
+ case 0x40:
+ *std = V4L2_STD_PAL;
+ break;
+ case 0x50:
+ *std = V4L2_STD_SECAM;
+ break;
+ case 0x60:
+ *std = V4L2_STD_PAL_Nc | V4L2_STD_PAL_N;
+ break;
+ case 0x70:
+ *std = V4L2_STD_SECAM;
+ break;
+ default:
+ *std = V4L2_STD_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static void adv748x_afe_fill_format(struct adv748x_afe *afe,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ memset(fmt, 0, sizeof(*fmt));
+
+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ fmt->field = V4L2_FIELD_ALTERNATE;
+
+ fmt->width = 720;
+ fmt->height = afe->curr_norm & V4L2_STD_525_60 ? 480 : 576;
+
+ /* Field height */
+ fmt->height /= 2;
+}
+
+static int adv748x_afe_std(v4l2_std_id std)
+{
+ if (std == V4L2_STD_PAL_60)
+ return ADV748X_AFE_STD_PAL60;
+ if (std == V4L2_STD_NTSC_443)
+ return ADV748X_AFE_STD_NTSC_443;
+ if (std == V4L2_STD_PAL_N)
+ return ADV748X_AFE_STD_PAL_N;
+ if (std == V4L2_STD_PAL_M)
+ return ADV748X_AFE_STD_PAL_M;
+ if (std == V4L2_STD_PAL_Nc)
+ return ADV748X_AFE_STD_PAL_COMB_N;
+ if (std & V4L2_STD_NTSC)
+ return ADV748X_AFE_STD_NTSC_M;
+ if (std & V4L2_STD_PAL)
+ return ADV748X_AFE_STD_PAL_BG;
+ if (std & V4L2_STD_SECAM)
+ return ADV748X_AFE_STD_PAL_SECAM;
+
+ return -EINVAL;
+}
+
+static void adv748x_afe_set_video_standard(struct adv748x_state *state,
+ int sdpstd)
+{
+ sdp_clrset(state, ADV748X_SDP_VID_SEL, ADV748X_SDP_VID_SEL_MASK,
+ (sdpstd & 0xf) << ADV748X_SDP_VID_SEL_SHIFT);
+}
+
+static int adv748x_afe_s_input(struct adv748x_afe *afe, unsigned int input)
+{
+ struct adv748x_state *state = adv748x_afe_to_state(afe);
+
+ return sdp_write(state, ADV748X_SDP_INSEL, input);
+}
+
+static int adv748x_afe_g_pixelaspect(struct v4l2_subdev *sd,
+ struct v4l2_fract *aspect)
+{
+ struct adv748x_afe *afe = adv748x_sd_to_afe(sd);
+
+ if (afe->curr_norm & V4L2_STD_525_60) {
+ aspect->numerator = 11;
+ aspect->denominator = 10;
+ } else {
+ aspect->numerator = 54;
+ aspect->denominator = 59;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * v4l2_subdev_video_ops
+ */
+
+static int adv748x_afe_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
+{
+ struct adv748x_afe *afe = adv748x_sd_to_afe(sd);
+
+ *norm = afe->curr_norm;
+
+ return 0;
+}
+
+static int adv748x_afe_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+ struct adv748x_afe *afe = adv748x_sd_to_afe(sd);
+ struct adv748x_state *state = adv748x_afe_to_state(afe);
+ int afe_std = adv748x_afe_std(std);
+
+ if (afe_std < 0)
+ return afe_std;
+
+ mutex_lock(&state->mutex);
+
+ adv748x_afe_set_video_standard(state, afe_std);
+ afe->curr_norm = std;
+
+ mutex_unlock(&state->mutex);
+
+ return 0;
+}
+
+static int adv748x_afe_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ struct adv748x_afe *afe = adv748x_sd_to_afe(sd);
+ struct adv748x_state *state = adv748x_afe_to_state(afe);
+ int ret;
+
+ mutex_lock(&state->mutex);
+
+ if (afe->streaming) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /* Set auto detect mode */
+ adv748x_afe_set_video_standard(state,
+ ADV748X_AFE_STD_AD_PAL_BG_NTSC_J_SECAM);
+
+ msleep(100);
+
+ /* Read detected standard */
+ ret = adv748x_afe_status(afe, NULL, std);
+
+ /* Restore original state */
+ adv748x_afe_set_video_standard(state, afe->curr_norm);
+
+unlock:
+ mutex_unlock(&state->mutex);
+
+ return ret;
+}
+
+static int adv748x_afe_g_tvnorms(struct v4l2_subdev *sd, v4l2_std_id *norm)
+{
+ *norm = V4L2_STD_ALL;
+
+ return 0;
+}
+
+static int adv748x_afe_g_input_status(struct v4l2_subdev *sd, u32 *status)
+{
+ struct adv748x_afe *afe = adv748x_sd_to_afe(sd);
+ struct adv748x_state *state = adv748x_afe_to_state(afe);
+ int ret;
+
+ mutex_lock(&state->mutex);
+
+ ret = adv748x_afe_status(afe, status, NULL);
+
+ mutex_unlock(&state->mutex);
+ return ret;
+}
+
+static int adv748x_afe_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct adv748x_afe *afe = adv748x_sd_to_afe(sd);
+ struct adv748x_state *state = adv748x_afe_to_state(afe);
+ int ret, signal = V4L2_IN_ST_NO_SIGNAL;
+
+ mutex_lock(&state->mutex);
+
+ if (enable) {
+ ret = adv748x_afe_s_input(afe, afe->input);
+ if (ret)
+ goto unlock;
+ }
+
+ ret = adv748x_txb_power(state, enable);
+ if (ret)
+ goto unlock;
+
+ afe->streaming = enable;
+
+ adv748x_afe_status(afe, &signal, NULL);
+ if (signal != V4L2_IN_ST_NO_SIGNAL)
+ adv_dbg(state, "Detected SDP signal\n");
+ else
+ adv_dbg(state, "Couldn't detect SDP video signal\n");
+
+unlock:
+ mutex_unlock(&state->mutex);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops adv748x_afe_video_ops = {
+ .g_std = adv748x_afe_g_std,
+ .s_std = adv748x_afe_s_std,
+ .querystd = adv748x_afe_querystd,
+ .g_tvnorms = adv748x_afe_g_tvnorms,
+ .g_input_status = adv748x_afe_g_input_status,
+ .s_stream = adv748x_afe_s_stream,
+ .g_pixelaspect = adv748x_afe_g_pixelaspect,
+};
+
+/* -----------------------------------------------------------------------------
+ * v4l2_subdev_pad_ops
+ */
+
+static int adv748x_afe_propagate_pixelrate(struct adv748x_afe *afe)
+{
+ struct v4l2_subdev *tx;
+ unsigned int width, height, fps;
+
+ tx = adv748x_get_remote_sd(&afe->pads[ADV748X_AFE_SOURCE]);
+ if (!tx)
+ return -ENOLINK;
+
+ width = 720;
+ height = afe->curr_norm & V4L2_STD_525_60 ? 480 : 576;
+ fps = afe->curr_norm & V4L2_STD_525_60 ? 30 : 25;
+
+ return adv748x_csi2_set_pixelrate(tx, width * height * fps);
+}
+
+static int adv748x_afe_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index != 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_UYVY8_2X8;
+
+ return 0;
+}
+
+static int adv748x_afe_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct adv748x_afe *afe = adv748x_sd_to_afe(sd);
+ struct v4l2_mbus_framefmt *mbusformat;
+
+ /* It makes no sense to get the format of the analog sink pads */
+ if (sdformat->pad != ADV748X_AFE_SOURCE)
+ return -EINVAL;
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad);
+ sdformat->format = *mbusformat;
+ } else {
+ adv748x_afe_fill_format(afe, &sdformat->format);
+ adv748x_afe_propagate_pixelrate(afe);
+ }
+
+ return 0;
+}
+
+static int adv748x_afe_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct v4l2_mbus_framefmt *mbusformat;
+
+ /* It makes no sense to get the format of the analog sink pads */
+ if (sdformat->pad != ADV748X_AFE_SOURCE)
+ return -EINVAL;
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ return adv748x_afe_get_format(sd, cfg, sdformat);
+
+ mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad);
+ *mbusformat = sdformat->format;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops adv748x_afe_pad_ops = {
+ .enum_mbus_code = adv748x_afe_enum_mbus_code,
+ .set_fmt = adv748x_afe_set_format,
+ .get_fmt = adv748x_afe_get_format,
+};
+
+/* -----------------------------------------------------------------------------
+ * v4l2_subdev_ops
+ */
+
+static const struct v4l2_subdev_ops adv748x_afe_ops = {
+ .video = &adv748x_afe_video_ops,
+ .pad = &adv748x_afe_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+static const char * const afe_ctrl_frp_menu[] = {
+ "Disabled",
+ "Solid Blue",
+ "Color Bars",
+ "Grey Ramp",
+ "Cb Ramp",
+ "Cr Ramp",
+ "Boundary"
+};
+
+static int adv748x_afe_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct adv748x_afe *afe = adv748x_ctrl_to_afe(ctrl);
+ struct adv748x_state *state = adv748x_afe_to_state(afe);
+ bool enable;
+ int ret;
+
+ ret = sdp_write(state, 0x0e, 0x00);
+ if (ret < 0)
+ return ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ret = sdp_write(state, ADV748X_SDP_BRI, ctrl->val);
+ break;
+ case V4L2_CID_HUE:
+ /* Hue is inverted according to HSL chart */
+ ret = sdp_write(state, ADV748X_SDP_HUE, -ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ ret = sdp_write(state, ADV748X_SDP_CON, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ ret = sdp_write(state, ADV748X_SDP_SD_SAT_U, ctrl->val);
+ if (ret)
+ break;
+ ret = sdp_write(state, ADV748X_SDP_SD_SAT_V, ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ enable = !!ctrl->val;
+
+ /* Enable/Disable Color bar test patterns */
+ ret = sdp_clrset(state, ADV748X_SDP_DEF, ADV748X_SDP_DEF_VAL_EN,
+ enable);
+ if (ret)
+ break;
+ ret = sdp_clrset(state, ADV748X_SDP_FRP, ADV748X_SDP_FRP_MASK,
+ enable ? ctrl->val - 1 : 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops adv748x_afe_ctrl_ops = {
+ .s_ctrl = adv748x_afe_s_ctrl,
+};
+
+static int adv748x_afe_init_controls(struct adv748x_afe *afe)
+{
+ struct adv748x_state *state = adv748x_afe_to_state(afe);
+
+ v4l2_ctrl_handler_init(&afe->ctrl_hdl, 5);
+
+ /* Use our mutex for the controls */
+ afe->ctrl_hdl.lock = &state->mutex;
+
+ v4l2_ctrl_new_std(&afe->ctrl_hdl, &adv748x_afe_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, ADV748X_SDP_BRI_MIN,
+ ADV748X_SDP_BRI_MAX, 1, ADV748X_SDP_BRI_DEF);
+ v4l2_ctrl_new_std(&afe->ctrl_hdl, &adv748x_afe_ctrl_ops,
+ V4L2_CID_CONTRAST, ADV748X_SDP_CON_MIN,
+ ADV748X_SDP_CON_MAX, 1, ADV748X_SDP_CON_DEF);
+ v4l2_ctrl_new_std(&afe->ctrl_hdl, &adv748x_afe_ctrl_ops,
+ V4L2_CID_SATURATION, ADV748X_SDP_SAT_MIN,
+ ADV748X_SDP_SAT_MAX, 1, ADV748X_SDP_SAT_DEF);
+ v4l2_ctrl_new_std(&afe->ctrl_hdl, &adv748x_afe_ctrl_ops,
+ V4L2_CID_HUE, ADV748X_SDP_HUE_MIN,
+ ADV748X_SDP_HUE_MAX, 1, ADV748X_SDP_HUE_DEF);
+
+ v4l2_ctrl_new_std_menu_items(&afe->ctrl_hdl, &adv748x_afe_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(afe_ctrl_frp_menu) - 1,
+ 0, 0, afe_ctrl_frp_menu);
+
+ afe->sd.ctrl_handler = &afe->ctrl_hdl;
+ if (afe->ctrl_hdl.error) {
+ v4l2_ctrl_handler_free(&afe->ctrl_hdl);
+ return afe->ctrl_hdl.error;
+ }
+
+ return v4l2_ctrl_handler_setup(&afe->ctrl_hdl);
+}
+
+int adv748x_afe_init(struct adv748x_afe *afe)
+{
+ struct adv748x_state *state = adv748x_afe_to_state(afe);
+ int ret;
+ unsigned int i;
+
+ afe->input = 0;
+ afe->streaming = false;
+ afe->curr_norm = V4L2_STD_NTSC_M;
+
+ adv748x_subdev_init(&afe->sd, state, &adv748x_afe_ops,
+ MEDIA_ENT_F_ATV_DECODER, "afe");
+
+ /* Identify the first connector found as a default input if set */
+ for (i = ADV748X_PORT_AIN0; i <= ADV748X_PORT_AIN7; i++) {
+ /* Inputs and ports are 1-indexed to match the data sheet */
+ if (state->endpoints[i]) {
+ afe->input = i;
+ break;
+ }
+ }
+
+ adv748x_afe_s_input(afe, afe->input);
+
+ adv_dbg(state, "AFE Default input set to %d\n", afe->input);
+
+ /* Entity pads and sinks are 0-indexed to match the pads */
+ for (i = ADV748X_AFE_SINK_AIN0; i <= ADV748X_AFE_SINK_AIN7; i++)
+ afe->pads[i].flags = MEDIA_PAD_FL_SINK;
+
+ afe->pads[ADV748X_AFE_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&afe->sd.entity, ADV748X_AFE_NR_PADS,
+ afe->pads);
+ if (ret)
+ return ret;
+
+ ret = adv748x_afe_init_controls(afe);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ media_entity_cleanup(&afe->sd.entity);
+
+ return ret;
+}
+
+void adv748x_afe_cleanup(struct adv748x_afe *afe)
+{
+ v4l2_device_unregister_subdev(&afe->sd);
+ media_entity_cleanup(&afe->sd.entity);
+ v4l2_ctrl_handler_free(&afe->ctrl_hdl);
+}
diff --git a/drivers/media/i2c/adv748x/adv748x-core.c b/drivers/media/i2c/adv748x/adv748x-core.c
new file mode 100644
index 000000000000..5ee14f2c2747
--- /dev/null
+++ b/drivers/media/i2c/adv748x/adv748x-core.c
@@ -0,0 +1,833 @@
+/*
+ * Driver for Analog Devices ADV748X HDMI receiver with AFE
+ *
+ * Copyright (C) 2017 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Authors:
+ * Koji Matsuoka <koji.matsuoka.xm@renesas.com>
+ * Niklas Söderlund <niklas.soderlund@ragnatech.se>
+ * Kieran Bingham <kieran.bingham@ideasonboard.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/v4l2-dv-timings.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-ioctl.h>
+
+#include "adv748x.h"
+
+/* -----------------------------------------------------------------------------
+ * Register manipulation
+ */
+
+static const struct regmap_config adv748x_regmap_cnf[] = {
+ {
+ .name = "io",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "dpll",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "cp",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "hdmi",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "edid",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "repeater",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "infoframe",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "cec",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "sdp",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+
+ {
+ .name = "txb",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+ {
+ .name = "txa",
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_NONE,
+ },
+};
+
+static int adv748x_configure_regmap(struct adv748x_state *state, int region)
+{
+ int err;
+
+ if (!state->i2c_clients[region])
+ return -ENODEV;
+
+ state->regmap[region] =
+ devm_regmap_init_i2c(state->i2c_clients[region],
+ &adv748x_regmap_cnf[region]);
+
+ if (IS_ERR(state->regmap[region])) {
+ err = PTR_ERR(state->regmap[region]);
+ adv_err(state,
+ "Error initializing regmap %d with error %d\n",
+ region, err);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Default addresses for the I2C pages */
+static int adv748x_i2c_addresses[ADV748X_PAGE_MAX] = {
+ ADV748X_I2C_IO,
+ ADV748X_I2C_DPLL,
+ ADV748X_I2C_CP,
+ ADV748X_I2C_HDMI,
+ ADV748X_I2C_EDID,
+ ADV748X_I2C_REPEATER,
+ ADV748X_I2C_INFOFRAME,
+ ADV748X_I2C_CEC,
+ ADV748X_I2C_SDP,
+ ADV748X_I2C_TXB,
+ ADV748X_I2C_TXA,
+};
+
+static int adv748x_read_check(struct adv748x_state *state,
+ int client_page, u8 reg)
+{
+ struct i2c_client *client = state->i2c_clients[client_page];
+ int err;
+ unsigned int val;
+
+ err = regmap_read(state->regmap[client_page], reg, &val);
+
+ if (err) {
+ adv_err(state, "error reading %02x, %02x\n",
+ client->addr, reg);
+ return err;
+ }
+
+ return val;
+}
+
+int adv748x_read(struct adv748x_state *state, u8 page, u8 reg)
+{
+ return adv748x_read_check(state, page, reg);
+}
+
+int adv748x_write(struct adv748x_state *state, u8 page, u8 reg, u8 value)
+{
+ return regmap_write(state->regmap[page], reg, value);
+}
+
+/* adv748x_write_block(): Write raw data with a maximum of I2C_SMBUS_BLOCK_MAX
+ * size to one or more registers.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int adv748x_write_block(struct adv748x_state *state, int client_page,
+ unsigned int init_reg, const void *val,
+ size_t val_len)
+{
+ struct regmap *regmap = state->regmap[client_page];
+
+ if (val_len > I2C_SMBUS_BLOCK_MAX)
+ val_len = I2C_SMBUS_BLOCK_MAX;
+
+ return regmap_raw_write(regmap, init_reg, val, val_len);
+}
+
+static struct i2c_client *adv748x_dummy_client(struct adv748x_state *state,
+ u8 addr, u8 io_reg)
+{
+ struct i2c_client *client = state->client;
+
+ if (addr)
+ io_write(state, io_reg, addr << 1);
+
+ return i2c_new_dummy(client->adapter, io_read(state, io_reg) >> 1);
+}
+
+static void adv748x_unregister_clients(struct adv748x_state *state)
+{
+ unsigned int i;
+
+ for (i = 1; i < ARRAY_SIZE(state->i2c_clients); ++i) {
+ if (state->i2c_clients[i])
+ i2c_unregister_device(state->i2c_clients[i]);
+ }
+}
+
+static int adv748x_initialise_clients(struct adv748x_state *state)
+{
+ int i;
+ int ret;
+
+ for (i = ADV748X_PAGE_DPLL; i < ADV748X_PAGE_MAX; ++i) {
+ state->i2c_clients[i] =
+ adv748x_dummy_client(state, adv748x_i2c_addresses[i],
+ ADV748X_IO_SLAVE_ADDR_BASE + i);
+ if (state->i2c_clients[i] == NULL) {
+ adv_err(state, "failed to create i2c client %u\n", i);
+ return -ENOMEM;
+ }
+
+ ret = adv748x_configure_regmap(state, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * struct adv748x_reg_value - Register write instruction
+ * @page: Regmap page identifier
+ * @reg: I2C register
+ * @value: value to write to @page at @reg
+ */
+struct adv748x_reg_value {
+ u8 page;
+ u8 reg;
+ u8 value;
+};
+
+static int adv748x_write_regs(struct adv748x_state *state,
+ const struct adv748x_reg_value *regs)
+{
+ int ret;
+
+ while (regs->page != ADV748X_PAGE_EOR) {
+ if (regs->page == ADV748X_PAGE_WAIT) {
+ msleep(regs->value);
+ } else {
+ ret = adv748x_write(state, regs->page, regs->reg,
+ regs->value);
+ if (ret < 0) {
+ adv_err(state,
+ "Error regs page: 0x%02x reg: 0x%02x\n",
+ regs->page, regs->reg);
+ return ret;
+ }
+ }
+ regs++;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * TXA and TXB
+ */
+
+static const struct adv748x_reg_value adv748x_power_up_txa_4lane[] = {
+
+ {ADV748X_PAGE_TXA, 0x00, 0x84}, /* Enable 4-lane MIPI */
+ {ADV748X_PAGE_TXA, 0x00, 0xa4}, /* Set Auto DPHY Timing */
+
+ {ADV748X_PAGE_TXA, 0x31, 0x82}, /* ADI Required Write */
+ {ADV748X_PAGE_TXA, 0x1e, 0x40}, /* ADI Required Write */
+ {ADV748X_PAGE_TXA, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */
+ {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */
+ {ADV748X_PAGE_TXA, 0x00, 0x24 },/* Power-up CSI-TX */
+ {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
+ {ADV748X_PAGE_TXA, 0xc1, 0x2b}, /* ADI Required Write */
+ {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
+ {ADV748X_PAGE_TXA, 0x31, 0x80}, /* ADI Required Write */
+
+ {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */
+};
+
+static const struct adv748x_reg_value adv748x_power_down_txa_4lane[] = {
+
+ {ADV748X_PAGE_TXA, 0x31, 0x82}, /* ADI Required Write */
+ {ADV748X_PAGE_TXA, 0x1e, 0x00}, /* ADI Required Write */
+ {ADV748X_PAGE_TXA, 0x00, 0x84}, /* Enable 4-lane MIPI */
+ {ADV748X_PAGE_TXA, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */
+ {ADV748X_PAGE_TXA, 0xc1, 0x3b}, /* ADI Required Write */
+
+ {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */
+};
+
+static const struct adv748x_reg_value adv748x_power_up_txb_1lane[] = {
+
+ {ADV748X_PAGE_TXB, 0x00, 0x81}, /* Enable 1-lane MIPI */
+ {ADV748X_PAGE_TXB, 0x00, 0xa1}, /* Set Auto DPHY Timing */
+
+ {ADV748X_PAGE_TXB, 0x31, 0x82}, /* ADI Required Write */
+ {ADV748X_PAGE_TXB, 0x1e, 0x40}, /* ADI Required Write */
+ {ADV748X_PAGE_TXB, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */
+ {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */
+ {ADV748X_PAGE_TXB, 0x00, 0x21 },/* Power-up CSI-TX */
+ {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
+ {ADV748X_PAGE_TXB, 0xc1, 0x2b}, /* ADI Required Write */
+ {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
+ {ADV748X_PAGE_TXB, 0x31, 0x80}, /* ADI Required Write */
+
+ {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */
+};
+
+static const struct adv748x_reg_value adv748x_power_down_txb_1lane[] = {
+
+ {ADV748X_PAGE_TXB, 0x31, 0x82}, /* ADI Required Write */
+ {ADV748X_PAGE_TXB, 0x1e, 0x00}, /* ADI Required Write */
+ {ADV748X_PAGE_TXB, 0x00, 0x81}, /* Enable 4-lane MIPI */
+ {ADV748X_PAGE_TXB, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */
+ {ADV748X_PAGE_TXB, 0xc1, 0x3b}, /* ADI Required Write */
+
+ {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */
+};
+
+int adv748x_txa_power(struct adv748x_state *state, bool on)
+{
+ int val;
+
+ val = txa_read(state, ADV748X_CSI_FS_AS_LS);
+ if (val < 0)
+ return val;
+
+ /*
+ * This test against BIT(6) is not documented by the datasheet, but was
+ * specified in the downstream driver.
+ * Track with a WARN_ONCE to determine if it is ever set by HW.
+ */
+ WARN_ONCE((on && val & ADV748X_CSI_FS_AS_LS_UNKNOWN),
+ "Enabling with unknown bit set");
+
+ if (on)
+ return adv748x_write_regs(state, adv748x_power_up_txa_4lane);
+
+ return adv748x_write_regs(state, adv748x_power_down_txa_4lane);
+}
+
+int adv748x_txb_power(struct adv748x_state *state, bool on)
+{
+ int val;
+
+ val = txb_read(state, ADV748X_CSI_FS_AS_LS);
+ if (val < 0)
+ return val;
+
+ /*
+ * This test against BIT(6) is not documented by the datasheet, but was
+ * specified in the downstream driver.
+ * Track with a WARN_ONCE to determine if it is ever set by HW.
+ */
+ WARN_ONCE((on && val & ADV748X_CSI_FS_AS_LS_UNKNOWN),
+ "Enabling with unknown bit set");
+
+ if (on)
+ return adv748x_write_regs(state, adv748x_power_up_txb_1lane);
+
+ return adv748x_write_regs(state, adv748x_power_down_txb_1lane);
+}
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static const struct media_entity_operations adv748x_media_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* -----------------------------------------------------------------------------
+ * HW setup
+ */
+
+static const struct adv748x_reg_value adv748x_sw_reset[] = {
+
+ {ADV748X_PAGE_IO, 0xff, 0xff}, /* SW reset */
+ {ADV748X_PAGE_WAIT, 0x00, 0x05},/* delay 5 */
+ {ADV748X_PAGE_IO, 0x01, 0x76}, /* ADI Required Write */
+ {ADV748X_PAGE_IO, 0xf2, 0x01}, /* Enable I2C Read Auto-Increment */
+ {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */
+};
+
+static const struct adv748x_reg_value adv748x_set_slave_address[] = {
+ {ADV748X_PAGE_IO, 0xf3, ADV748X_I2C_DPLL << 1},
+ {ADV748X_PAGE_IO, 0xf4, ADV748X_I2C_CP << 1},
+ {ADV748X_PAGE_IO, 0xf5, ADV748X_I2C_HDMI << 1},
+ {ADV748X_PAGE_IO, 0xf6, ADV748X_I2C_EDID << 1},
+ {ADV748X_PAGE_IO, 0xf7, ADV748X_I2C_REPEATER << 1},
+ {ADV748X_PAGE_IO, 0xf8, ADV748X_I2C_INFOFRAME << 1},
+ {ADV748X_PAGE_IO, 0xfa, ADV748X_I2C_CEC << 1},
+ {ADV748X_PAGE_IO, 0xfb, ADV748X_I2C_SDP << 1},
+ {ADV748X_PAGE_IO, 0xfc, ADV748X_I2C_TXB << 1},
+ {ADV748X_PAGE_IO, 0xfd, ADV748X_I2C_TXA << 1},
+ {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */
+};
+
+/* Supported Formats For Script Below */
+/* - 01-29 HDMI to MIPI TxA CSI 4-Lane - RGB888: */
+static const struct adv748x_reg_value adv748x_init_txa_4lane[] = {
+ /* Disable chip powerdown & Enable HDMI Rx block */
+ {ADV748X_PAGE_IO, 0x00, 0x40},
+
+ {ADV748X_PAGE_REPEATER, 0x40, 0x83}, /* Enable HDCP 1.1 */
+
+ {ADV748X_PAGE_HDMI, 0x00, 0x08},/* Foreground Channel = A */
+ {ADV748X_PAGE_HDMI, 0x98, 0xff},/* ADI Required Write */
+ {ADV748X_PAGE_HDMI, 0x99, 0xa3},/* ADI Required Write */
+ {ADV748X_PAGE_HDMI, 0x9a, 0x00},/* ADI Required Write */
+ {ADV748X_PAGE_HDMI, 0x9b, 0x0a},/* ADI Required Write */
+ {ADV748X_PAGE_HDMI, 0x9d, 0x40},/* ADI Required Write */
+ {ADV748X_PAGE_HDMI, 0xcb, 0x09},/* ADI Required Write */
+ {ADV748X_PAGE_HDMI, 0x3d, 0x10},/* ADI Required Write */
+ {ADV748X_PAGE_HDMI, 0x3e, 0x7b},/* ADI Required Write */
+ {ADV748X_PAGE_HDMI, 0x3f, 0x5e},/* ADI Required Write */
+ {ADV748X_PAGE_HDMI, 0x4e, 0xfe},/* ADI Required Write */
+ {ADV748X_PAGE_HDMI, 0x4f, 0x18},/* ADI Required Write */
+ {ADV748X_PAGE_HDMI, 0x57, 0xa3},/* ADI Required Write */
+ {ADV748X_PAGE_HDMI, 0x58, 0x04},/* ADI Required Write */
+ {ADV748X_PAGE_HDMI, 0x85, 0x10},/* ADI Required Write */
+
+ {ADV748X_PAGE_HDMI, 0x83, 0x00},/* Enable All Terminations */
+ {ADV748X_PAGE_HDMI, 0xa3, 0x01},/* ADI Required Write */
+ {ADV748X_PAGE_HDMI, 0xbe, 0x00},/* ADI Required Write */
+
+ {ADV748X_PAGE_HDMI, 0x6c, 0x01},/* HPA Manual Enable */
+ {ADV748X_PAGE_HDMI, 0xf8, 0x01},/* HPA Asserted */
+ {ADV748X_PAGE_HDMI, 0x0f, 0x00},/* Audio Mute Speed Set to Fastest */
+ /* (Smallest Step Size) */
+
+ {ADV748X_PAGE_IO, 0x04, 0x02}, /* RGB Out of CP */
+ {ADV748X_PAGE_IO, 0x12, 0xf0}, /* CSC Depends on ip Packets, SDR 444 */
+ {ADV748X_PAGE_IO, 0x17, 0x80}, /* Luma & Chroma can reach 254d */
+ {ADV748X_PAGE_IO, 0x03, 0x86}, /* CP-Insert_AV_Code */
+
+ {ADV748X_PAGE_CP, 0x7c, 0x00}, /* ADI Required Write */
+
+ {ADV748X_PAGE_IO, 0x0c, 0xe0}, /* Enable LLC_DLL & Double LLC Timing */
+ {ADV748X_PAGE_IO, 0x0e, 0xdd}, /* LLC/PIX/SPI PINS TRISTATED AUD */
+ /* Outputs Enabled */
+ {ADV748X_PAGE_IO, 0x10, 0xa0}, /* Enable 4-lane CSI Tx & Pixel Port */
+
+ {ADV748X_PAGE_TXA, 0x00, 0x84}, /* Enable 4-lane MIPI */
+ {ADV748X_PAGE_TXA, 0x00, 0xa4}, /* Set Auto DPHY Timing */
+ {ADV748X_PAGE_TXA, 0xdb, 0x10}, /* ADI Required Write */
+ {ADV748X_PAGE_TXA, 0xd6, 0x07}, /* ADI Required Write */
+ {ADV748X_PAGE_TXA, 0xc4, 0x0a}, /* ADI Required Write */
+ {ADV748X_PAGE_TXA, 0x71, 0x33}, /* ADI Required Write */
+ {ADV748X_PAGE_TXA, 0x72, 0x11}, /* ADI Required Write */
+ {ADV748X_PAGE_TXA, 0xf0, 0x00}, /* i2c_dphy_pwdn - 1'b0 */
+
+ {ADV748X_PAGE_TXA, 0x31, 0x82}, /* ADI Required Write */
+ {ADV748X_PAGE_TXA, 0x1e, 0x40}, /* ADI Required Write */
+ {ADV748X_PAGE_TXA, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */
+ {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */
+ {ADV748X_PAGE_TXA, 0x00, 0x24 },/* Power-up CSI-TX */
+ {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
+ {ADV748X_PAGE_TXA, 0xc1, 0x2b}, /* ADI Required Write */
+ {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
+ {ADV748X_PAGE_TXA, 0x31, 0x80}, /* ADI Required Write */
+
+ {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */
+};
+
+/* 02-01 Analog CVBS to MIPI TX-B CSI 1-Lane - */
+/* Autodetect CVBS Single Ended In Ain 1 - MIPI Out */
+static const struct adv748x_reg_value adv748x_init_txb_1lane[] = {
+
+ {ADV748X_PAGE_IO, 0x00, 0x30}, /* Disable chip powerdown Rx */
+ {ADV748X_PAGE_IO, 0xf2, 0x01}, /* Enable I2C Read Auto-Increment */
+
+ {ADV748X_PAGE_IO, 0x0e, 0xff}, /* LLC/PIX/AUD/SPI PINS TRISTATED */
+
+ {ADV748X_PAGE_SDP, 0x0f, 0x00}, /* Exit Power Down Mode */
+ {ADV748X_PAGE_SDP, 0x52, 0xcd}, /* ADI Required Write */
+
+ {ADV748X_PAGE_SDP, 0x0e, 0x80}, /* ADI Required Write */
+ {ADV748X_PAGE_SDP, 0x9c, 0x00}, /* ADI Required Write */
+ {ADV748X_PAGE_SDP, 0x9c, 0xff}, /* ADI Required Write */
+ {ADV748X_PAGE_SDP, 0x0e, 0x00}, /* ADI Required Write */
+
+ /* ADI recommended writes for improved video quality */
+ {ADV748X_PAGE_SDP, 0x80, 0x51}, /* ADI Required Write */
+ {ADV748X_PAGE_SDP, 0x81, 0x51}, /* ADI Required Write */
+ {ADV748X_PAGE_SDP, 0x82, 0x68}, /* ADI Required Write */
+
+ {ADV748X_PAGE_SDP, 0x03, 0x42}, /* Tri-S Output , PwrDwn 656 pads */
+ {ADV748X_PAGE_SDP, 0x04, 0xb5}, /* ITU-R BT.656-4 compatible */
+ {ADV748X_PAGE_SDP, 0x13, 0x00}, /* ADI Required Write */
+
+ {ADV748X_PAGE_SDP, 0x17, 0x41}, /* Select SH1 */
+ {ADV748X_PAGE_SDP, 0x31, 0x12}, /* ADI Required Write */
+ {ADV748X_PAGE_SDP, 0xe6, 0x4f}, /* V bit end pos manually in NTSC */
+
+ /* Enable 1-Lane MIPI Tx, */
+ /* enable pixel output and route SD through Pixel port */
+ {ADV748X_PAGE_IO, 0x10, 0x70},
+
+ {ADV748X_PAGE_TXB, 0x00, 0x81}, /* Enable 1-lane MIPI */
+ {ADV748X_PAGE_TXB, 0x00, 0xa1}, /* Set Auto DPHY Timing */
+ {ADV748X_PAGE_TXB, 0xd2, 0x40}, /* ADI Required Write */
+ {ADV748X_PAGE_TXB, 0xc4, 0x0a}, /* ADI Required Write */
+ {ADV748X_PAGE_TXB, 0x71, 0x33}, /* ADI Required Write */
+ {ADV748X_PAGE_TXB, 0x72, 0x11}, /* ADI Required Write */
+ {ADV748X_PAGE_TXB, 0xf0, 0x00}, /* i2c_dphy_pwdn - 1'b0 */
+ {ADV748X_PAGE_TXB, 0x31, 0x82}, /* ADI Required Write */
+ {ADV748X_PAGE_TXB, 0x1e, 0x40}, /* ADI Required Write */
+ {ADV748X_PAGE_TXB, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */
+
+ {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */
+ {ADV748X_PAGE_TXB, 0x00, 0x21 },/* Power-up CSI-TX */
+ {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
+ {ADV748X_PAGE_TXB, 0xc1, 0x2b}, /* ADI Required Write */
+ {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */
+ {ADV748X_PAGE_TXB, 0x31, 0x80}, /* ADI Required Write */
+
+ {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */
+};
+
+static int adv748x_reset(struct adv748x_state *state)
+{
+ int ret;
+
+ ret = adv748x_write_regs(state, adv748x_sw_reset);
+ if (ret < 0)
+ return ret;
+
+ ret = adv748x_write_regs(state, adv748x_set_slave_address);
+ if (ret < 0)
+ return ret;
+
+ /* Init and power down TXA */
+ ret = adv748x_write_regs(state, adv748x_init_txa_4lane);
+ if (ret)
+ return ret;
+
+ adv748x_txa_power(state, 0);
+
+ /* Init and power down TXB */
+ ret = adv748x_write_regs(state, adv748x_init_txb_1lane);
+ if (ret)
+ return ret;
+
+ adv748x_txb_power(state, 0);
+
+ /* Disable chip powerdown & Enable HDMI Rx block */
+ io_write(state, ADV748X_IO_PD, ADV748X_IO_PD_RX_EN);
+
+ /* Enable 4-lane CSI Tx & Pixel Port */
+ io_write(state, ADV748X_IO_10, ADV748X_IO_10_CSI4_EN |
+ ADV748X_IO_10_CSI1_EN |
+ ADV748X_IO_10_PIX_OUT_EN);
+
+ /* Use vid_std and v_freq as freerun resolution for CP */
+ cp_clrset(state, ADV748X_CP_CLMP_POS, ADV748X_CP_CLMP_POS_DIS_AUTO,
+ ADV748X_CP_CLMP_POS_DIS_AUTO);
+
+ return 0;
+}
+
+static int adv748x_identify_chip(struct adv748x_state *state)
+{
+ int msb, lsb;
+
+ lsb = io_read(state, ADV748X_IO_CHIP_REV_ID_1);
+ msb = io_read(state, ADV748X_IO_CHIP_REV_ID_2);
+
+ if (lsb < 0 || msb < 0) {
+ adv_err(state, "Failed to read chip revision\n");
+ return -EIO;
+ }
+
+ adv_info(state, "chip found @ 0x%02x revision %02x%02x\n",
+ state->client->addr << 1, lsb, msb);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * i2c driver
+ */
+
+void adv748x_subdev_init(struct v4l2_subdev *sd, struct adv748x_state *state,
+ const struct v4l2_subdev_ops *ops, u32 function,
+ const char *ident)
+{
+ v4l2_subdev_init(sd, ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* the owner is the same as the i2c_client's driver owner */
+ sd->owner = state->dev->driver->owner;
+ sd->dev = state->dev;
+
+ v4l2_set_subdevdata(sd, state);
+
+ /* initialize name */
+ snprintf(sd->name, sizeof(sd->name), "%s %d-%04x %s",
+ state->dev->driver->name,
+ i2c_adapter_id(state->client->adapter),
+ state->client->addr, ident);
+
+ sd->entity.function = function;
+ sd->entity.ops = &adv748x_media_ops;
+}
+
+static int adv748x_parse_dt(struct adv748x_state *state)
+{
+ struct device_node *ep_np = NULL;
+ struct of_endpoint ep;
+ bool found = false;
+
+ for_each_endpoint_of_node(state->dev->of_node, ep_np) {
+ of_graph_parse_endpoint(ep_np, &ep);
+ adv_info(state, "Endpoint %s on port %d",
+ of_node_full_name(ep.local_node),
+ ep.port);
+
+ if (ep.port >= ADV748X_PORT_MAX) {
+ adv_err(state, "Invalid endpoint %s on port %d",
+ of_node_full_name(ep.local_node),
+ ep.port);
+
+ continue;
+ }
+
+ if (state->endpoints[ep.port]) {
+ adv_err(state,
+ "Multiple port endpoints are not supported");
+ continue;
+ }
+
+ of_node_get(ep_np);
+ state->endpoints[ep.port] = ep_np;
+
+ found = true;
+ }
+
+ return found ? 0 : -ENODEV;
+}
+
+static void adv748x_dt_cleanup(struct adv748x_state *state)
+{
+ unsigned int i;
+
+ for (i = 0; i < ADV748X_PORT_MAX; i++)
+ of_node_put(state->endpoints[i]);
+}
+
+static int adv748x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adv748x_state *state;
+ int ret;
+
+ /* Check if the adapter supports the needed features */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ state = kzalloc(sizeof(struct adv748x_state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ mutex_init(&state->mutex);
+
+ state->dev = &client->dev;
+ state->client = client;
+ state->i2c_clients[ADV748X_PAGE_IO] = client;
+ i2c_set_clientdata(client, state);
+
+ /* Discover and process ports declared by the Device tree endpoints */
+ ret = adv748x_parse_dt(state);
+ if (ret) {
+ adv_err(state, "Failed to parse device tree");
+ goto err_free_mutex;
+ }
+
+ /* Configure IO Regmap region */
+ ret = adv748x_configure_regmap(state, ADV748X_PAGE_IO);
+ if (ret) {
+ adv_err(state, "Error configuring IO regmap region");
+ goto err_cleanup_dt;
+ }
+
+ ret = adv748x_identify_chip(state);
+ if (ret) {
+ adv_err(state, "Failed to identify chip");
+ goto err_cleanup_clients;
+ }
+
+ /* Configure remaining pages as I2C clients with regmap access */
+ ret = adv748x_initialise_clients(state);
+ if (ret) {
+ adv_err(state, "Failed to setup client regmap pages");
+ goto err_cleanup_clients;
+ }
+
+ /* SW reset ADV748X to its default values */
+ ret = adv748x_reset(state);
+ if (ret) {
+ adv_err(state, "Failed to reset hardware");
+ goto err_cleanup_clients;
+ }
+
+ /* Initialise HDMI */
+ ret = adv748x_hdmi_init(&state->hdmi);
+ if (ret) {
+ adv_err(state, "Failed to probe HDMI");
+ goto err_cleanup_clients;
+ }
+
+ /* Initialise AFE */
+ ret = adv748x_afe_init(&state->afe);
+ if (ret) {
+ adv_err(state, "Failed to probe AFE");
+ goto err_cleanup_hdmi;
+ }
+
+ /* Initialise TXA */
+ ret = adv748x_csi2_init(state, &state->txa);
+ if (ret) {
+ adv_err(state, "Failed to probe TXA");
+ goto err_cleanup_afe;
+ }
+
+ /* Initialise TXB */
+ ret = adv748x_csi2_init(state, &state->txb);
+ if (ret) {
+ adv_err(state, "Failed to probe TXB");
+ goto err_cleanup_txa;
+ }
+
+ return 0;
+
+err_cleanup_txa:
+ adv748x_csi2_cleanup(&state->txa);
+err_cleanup_afe:
+ adv748x_afe_cleanup(&state->afe);
+err_cleanup_hdmi:
+ adv748x_hdmi_cleanup(&state->hdmi);
+err_cleanup_clients:
+ adv748x_unregister_clients(state);
+err_cleanup_dt:
+ adv748x_dt_cleanup(state);
+err_free_mutex:
+ mutex_destroy(&state->mutex);
+ kfree(state);
+
+ return ret;
+}
+
+static int adv748x_remove(struct i2c_client *client)
+{
+ struct adv748x_state *state = i2c_get_clientdata(client);
+
+ adv748x_afe_cleanup(&state->afe);
+ adv748x_hdmi_cleanup(&state->hdmi);
+
+ adv748x_csi2_cleanup(&state->txa);
+ adv748x_csi2_cleanup(&state->txb);
+
+ adv748x_unregister_clients(state);
+ adv748x_dt_cleanup(state);
+ mutex_destroy(&state->mutex);
+
+ kfree(state);
+
+ return 0;
+}
+
+static const struct i2c_device_id adv748x_id[] = {
+ { "adv7481", 0 },
+ { "adv7482", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, adv748x_id);
+
+static const struct of_device_id adv748x_of_table[] = {
+ { .compatible = "adi,adv7481", },
+ { .compatible = "adi,adv7482", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adv748x_of_table);
+
+static struct i2c_driver adv748x_driver = {
+ .driver = {
+ .name = "adv748x",
+ .of_match_table = adv748x_of_table,
+ },
+ .probe = adv748x_probe,
+ .remove = adv748x_remove,
+ .id_table = adv748x_id,
+};
+
+module_i2c_driver(adv748x_driver);
+
+MODULE_AUTHOR("Kieran Bingham <kieran.bingham@ideasonboard.com>");
+MODULE_DESCRIPTION("ADV748X video decoder");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/adv748x/adv748x-csi2.c b/drivers/media/i2c/adv748x/adv748x-csi2.c
new file mode 100644
index 000000000000..979825d4a419
--- /dev/null
+++ b/drivers/media/i2c/adv748x/adv748x-csi2.c
@@ -0,0 +1,326 @@
+/*
+ * Driver for Analog Devices ADV748X CSI-2 Transmitter
+ *
+ * Copyright (C) 2017 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+
+#include "adv748x.h"
+
+static bool is_txa(struct adv748x_csi2 *tx)
+{
+ return tx == &tx->state->txa;
+}
+
+static int adv748x_csi2_set_virtual_channel(struct adv748x_csi2 *tx,
+ unsigned int vc)
+{
+ return tx_write(tx, ADV748X_CSI_VC_REF, vc << ADV748X_CSI_VC_REF_SHIFT);
+}
+
+/**
+ * adv748x_csi2_register_link : Register and link internal entities
+ *
+ * @tx: CSI2 private entity
+ * @v4l2_dev: Video registration device
+ * @src: Source subdevice to establish link
+ * @src_pad: Pad number of source to link to this @tx
+ *
+ * Ensure that the subdevice is registered against the v4l2_device, and link the
+ * source pad to the sink pad of the CSI2 bus entity.
+ */
+static int adv748x_csi2_register_link(struct adv748x_csi2 *tx,
+ struct v4l2_device *v4l2_dev,
+ struct v4l2_subdev *src,
+ unsigned int src_pad)
+{
+ int enabled = MEDIA_LNK_FL_ENABLED;
+ int ret;
+
+ /*
+ * Dynamic linking of the AFE is not supported.
+ * Register the links as immutable.
+ */
+ enabled |= MEDIA_LNK_FL_IMMUTABLE;
+
+ if (!src->v4l2_dev) {
+ ret = v4l2_device_register_subdev(v4l2_dev, src);
+ if (ret)
+ return ret;
+ }
+
+ return media_create_pad_link(&src->entity, src_pad,
+ &tx->sd.entity, ADV748X_CSI2_SINK,
+ enabled);
+}
+
+/* -----------------------------------------------------------------------------
+ * v4l2_subdev_internal_ops
+ *
+ * We use the internal registered operation to be able to ensure that our
+ * incremental subdevices (not connected in the forward path) can be registered
+ * against the resulting video path and media device.
+ */
+
+static int adv748x_csi2_registered(struct v4l2_subdev *sd)
+{
+ struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd);
+ struct adv748x_state *state = tx->state;
+
+ adv_dbg(state, "Registered %s (%s)", is_txa(tx) ? "TXA":"TXB",
+ sd->name);
+
+ /*
+ * The adv748x hardware allows the AFE to route through the TXA, however
+ * this is not currently supported in this driver.
+ *
+ * Link HDMI->TXA, and AFE->TXB directly.
+ */
+ if (is_txa(tx)) {
+ return adv748x_csi2_register_link(tx, sd->v4l2_dev,
+ &state->hdmi.sd,
+ ADV748X_HDMI_SOURCE);
+ } else {
+ return adv748x_csi2_register_link(tx, sd->v4l2_dev,
+ &state->afe.sd,
+ ADV748X_AFE_SOURCE);
+ }
+}
+
+static const struct v4l2_subdev_internal_ops adv748x_csi2_internal_ops = {
+ .registered = adv748x_csi2_registered,
+};
+
+/* -----------------------------------------------------------------------------
+ * v4l2_subdev_video_ops
+ */
+
+static int adv748x_csi2_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd);
+ struct v4l2_subdev *src;
+
+ src = adv748x_get_remote_sd(&tx->pads[ADV748X_CSI2_SINK]);
+ if (!src)
+ return -EPIPE;
+
+ return v4l2_subdev_call(src, video, s_stream, enable);
+}
+
+static const struct v4l2_subdev_video_ops adv748x_csi2_video_ops = {
+ .s_stream = adv748x_csi2_s_stream,
+};
+
+/* -----------------------------------------------------------------------------
+ * v4l2_subdev_pad_ops
+ *
+ * The CSI2 bus pads are ignorant to the data sizes or formats.
+ * But we must support setting the pad formats for format propagation.
+ */
+
+static struct v4l2_mbus_framefmt *
+adv748x_csi2_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(sd, cfg, pad);
+
+ return &tx->format;
+}
+
+static int adv748x_csi2_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd);
+ struct adv748x_state *state = tx->state;
+ struct v4l2_mbus_framefmt *mbusformat;
+
+ mbusformat = adv748x_csi2_get_pad_format(sd, cfg, sdformat->pad,
+ sdformat->which);
+ if (!mbusformat)
+ return -EINVAL;
+
+ mutex_lock(&state->mutex);
+
+ sdformat->format = *mbusformat;
+
+ mutex_unlock(&state->mutex);
+
+ return 0;
+}
+
+static int adv748x_csi2_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd);
+ struct adv748x_state *state = tx->state;
+ struct v4l2_mbus_framefmt *mbusformat;
+ int ret = 0;
+
+ mbusformat = adv748x_csi2_get_pad_format(sd, cfg, sdformat->pad,
+ sdformat->which);
+ if (!mbusformat)
+ return -EINVAL;
+
+ mutex_lock(&state->mutex);
+
+ if (sdformat->pad == ADV748X_CSI2_SOURCE) {
+ const struct v4l2_mbus_framefmt *sink_fmt;
+
+ sink_fmt = adv748x_csi2_get_pad_format(sd, cfg,
+ ADV748X_CSI2_SINK,
+ sdformat->which);
+
+ if (!sink_fmt) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ sdformat->format = *sink_fmt;
+ }
+
+ *mbusformat = sdformat->format;
+
+unlock:
+ mutex_unlock(&state->mutex);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_pad_ops adv748x_csi2_pad_ops = {
+ .get_fmt = adv748x_csi2_get_format,
+ .set_fmt = adv748x_csi2_set_format,
+};
+
+/* -----------------------------------------------------------------------------
+ * v4l2_subdev_ops
+ */
+
+static const struct v4l2_subdev_ops adv748x_csi2_ops = {
+ .video = &adv748x_csi2_video_ops,
+ .pad = &adv748x_csi2_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Subdev module and controls
+ */
+
+int adv748x_csi2_set_pixelrate(struct v4l2_subdev *sd, s64 rate)
+{
+ struct v4l2_ctrl *ctrl;
+
+ ctrl = v4l2_ctrl_find(sd->ctrl_handler, V4L2_CID_PIXEL_RATE);
+ if (!ctrl)
+ return -EINVAL;
+
+ return v4l2_ctrl_s_ctrl_int64(ctrl, rate);
+}
+
+static int adv748x_csi2_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ switch (ctrl->id) {
+ case V4L2_CID_PIXEL_RATE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ctrl_ops adv748x_csi2_ctrl_ops = {
+ .s_ctrl = adv748x_csi2_s_ctrl,
+};
+
+static int adv748x_csi2_init_controls(struct adv748x_csi2 *tx)
+{
+
+ v4l2_ctrl_handler_init(&tx->ctrl_hdl, 1);
+
+ v4l2_ctrl_new_std(&tx->ctrl_hdl, &adv748x_csi2_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 1, INT_MAX, 1, 1);
+
+ tx->sd.ctrl_handler = &tx->ctrl_hdl;
+ if (tx->ctrl_hdl.error) {
+ v4l2_ctrl_handler_free(&tx->ctrl_hdl);
+ return tx->ctrl_hdl.error;
+ }
+
+ return v4l2_ctrl_handler_setup(&tx->ctrl_hdl);
+}
+
+int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx)
+{
+ struct device_node *ep;
+ int ret;
+
+ /* We can not use container_of to get back to the state with two TXs */
+ tx->state = state;
+ tx->page = is_txa(tx) ? ADV748X_PAGE_TXA : ADV748X_PAGE_TXB;
+
+ ep = state->endpoints[is_txa(tx) ? ADV748X_PORT_TXA : ADV748X_PORT_TXB];
+ if (!ep) {
+ adv_err(state, "No endpoint found for %s\n",
+ is_txa(tx) ? "txa" : "txb");
+ return -ENODEV;
+ }
+
+ /* Initialise the virtual channel */
+ adv748x_csi2_set_virtual_channel(tx, 0);
+
+ adv748x_subdev_init(&tx->sd, state, &adv748x_csi2_ops,
+ MEDIA_ENT_F_UNKNOWN,
+ is_txa(tx) ? "txa" : "txb");
+
+ /* Ensure that matching is based upon the endpoint fwnodes */
+ tx->sd.fwnode = of_fwnode_handle(ep);
+
+ /* Register internal ops for incremental subdev registration */
+ tx->sd.internal_ops = &adv748x_csi2_internal_ops;
+
+ tx->pads[ADV748X_CSI2_SINK].flags = MEDIA_PAD_FL_SINK;
+ tx->pads[ADV748X_CSI2_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&tx->sd.entity, ADV748X_CSI2_NR_PADS,
+ tx->pads);
+ if (ret)
+ return ret;
+
+ ret = adv748x_csi2_init_controls(tx);
+ if (ret)
+ goto err_free_media;
+
+ ret = v4l2_async_register_subdev(&tx->sd);
+ if (ret)
+ goto err_free_ctrl;
+
+ return 0;
+
+err_free_ctrl:
+ v4l2_ctrl_handler_free(&tx->ctrl_hdl);
+err_free_media:
+ media_entity_cleanup(&tx->sd.entity);
+
+ return ret;
+}
+
+void adv748x_csi2_cleanup(struct adv748x_csi2 *tx)
+{
+ v4l2_async_unregister_subdev(&tx->sd);
+ media_entity_cleanup(&tx->sd.entity);
+ v4l2_ctrl_handler_free(&tx->ctrl_hdl);
+}
diff --git a/drivers/media/i2c/adv748x/adv748x-hdmi.c b/drivers/media/i2c/adv748x/adv748x-hdmi.c
new file mode 100644
index 000000000000..4da4253553fc
--- /dev/null
+++ b/drivers/media/i2c/adv748x/adv748x-hdmi.c
@@ -0,0 +1,768 @@
+/*
+ * Driver for Analog Devices ADV748X HDMI receiver and Component Processor (CP)
+ *
+ * Copyright (C) 2017 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-ioctl.h>
+
+#include <uapi/linux/v4l2-dv-timings.h>
+
+#include "adv748x.h"
+
+/* -----------------------------------------------------------------------------
+ * HDMI and CP
+ */
+
+#define ADV748X_HDMI_MIN_WIDTH 640
+#define ADV748X_HDMI_MAX_WIDTH 1920
+#define ADV748X_HDMI_MIN_HEIGHT 480
+#define ADV748X_HDMI_MAX_HEIGHT 1200
+
+/* V4L2_DV_BT_CEA_720X480I59_94 - 0.5 MHz */
+#define ADV748X_HDMI_MIN_PIXELCLOCK 13000000
+/* V4L2_DV_BT_DMT_1600X1200P60 */
+#define ADV748X_HDMI_MAX_PIXELCLOCK 162000000
+
+static const struct v4l2_dv_timings_cap adv748x_hdmi_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with GCC < 4.4.6 */
+ .reserved = { 0 },
+
+ V4L2_INIT_BT_TIMINGS(ADV748X_HDMI_MIN_WIDTH, ADV748X_HDMI_MAX_WIDTH,
+ ADV748X_HDMI_MIN_HEIGHT, ADV748X_HDMI_MAX_HEIGHT,
+ ADV748X_HDMI_MIN_PIXELCLOCK,
+ ADV748X_HDMI_MAX_PIXELCLOCK,
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT,
+ V4L2_DV_BT_CAP_PROGRESSIVE)
+};
+
+struct adv748x_hdmi_video_standards {
+ struct v4l2_dv_timings timings;
+ u8 vid_std;
+ u8 v_freq;
+};
+
+static const struct adv748x_hdmi_video_standards
+adv748x_hdmi_video_standards[] = {
+ { V4L2_DV_BT_CEA_720X480P59_94, 0x4a, 0x00 },
+ { V4L2_DV_BT_CEA_720X576P50, 0x4b, 0x00 },
+ { V4L2_DV_BT_CEA_1280X720P60, 0x53, 0x00 },
+ { V4L2_DV_BT_CEA_1280X720P50, 0x53, 0x01 },
+ { V4L2_DV_BT_CEA_1280X720P30, 0x53, 0x02 },
+ { V4L2_DV_BT_CEA_1280X720P25, 0x53, 0x03 },
+ { V4L2_DV_BT_CEA_1280X720P24, 0x53, 0x04 },
+ { V4L2_DV_BT_CEA_1920X1080P60, 0x5e, 0x00 },
+ { V4L2_DV_BT_CEA_1920X1080P50, 0x5e, 0x01 },
+ { V4L2_DV_BT_CEA_1920X1080P30, 0x5e, 0x02 },
+ { V4L2_DV_BT_CEA_1920X1080P25, 0x5e, 0x03 },
+ { V4L2_DV_BT_CEA_1920X1080P24, 0x5e, 0x04 },
+ /* SVGA */
+ { V4L2_DV_BT_DMT_800X600P56, 0x80, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P60, 0x81, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P72, 0x82, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P75, 0x83, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P85, 0x84, 0x00 },
+ /* SXGA */
+ { V4L2_DV_BT_DMT_1280X1024P60, 0x85, 0x00 },
+ { V4L2_DV_BT_DMT_1280X1024P75, 0x86, 0x00 },
+ /* VGA */
+ { V4L2_DV_BT_DMT_640X480P60, 0x88, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P72, 0x89, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P75, 0x8a, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P85, 0x8b, 0x00 },
+ /* XGA */
+ { V4L2_DV_BT_DMT_1024X768P60, 0x8c, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P70, 0x8d, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P75, 0x8e, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P85, 0x8f, 0x00 },
+ /* UXGA */
+ { V4L2_DV_BT_DMT_1600X1200P60, 0x96, 0x00 },
+};
+
+static void adv748x_hdmi_fill_format(struct adv748x_hdmi *hdmi,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ memset(fmt, 0, sizeof(*fmt));
+
+ fmt->code = MEDIA_BUS_FMT_RGB888_1X24;
+ fmt->field = hdmi->timings.bt.interlaced ?
+ V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE;
+
+ /* TODO: The colorspace depends on the AVI InfoFrame contents */
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->width = hdmi->timings.bt.width;
+ fmt->height = hdmi->timings.bt.height;
+}
+
+static void adv748x_fill_optional_dv_timings(struct v4l2_dv_timings *timings)
+{
+ v4l2_find_dv_timings_cap(timings, &adv748x_hdmi_timings_cap,
+ 250000, NULL, NULL);
+}
+
+static bool adv748x_hdmi_has_signal(struct adv748x_state *state)
+{
+ int val;
+
+ /* Check that VERT_FILTER and DE_REGEN is locked */
+ val = hdmi_read(state, ADV748X_HDMI_LW1);
+ return (val & ADV748X_HDMI_LW1_VERT_FILTER) &&
+ (val & ADV748X_HDMI_LW1_DE_REGEN);
+}
+
+static int adv748x_hdmi_read_pixelclock(struct adv748x_state *state)
+{
+ int a, b;
+
+ a = hdmi_read(state, ADV748X_HDMI_TMDS_1);
+ b = hdmi_read(state, ADV748X_HDMI_TMDS_2);
+ if (a < 0 || b < 0)
+ return -ENODATA;
+
+ /*
+ * The high 9 bits store TMDS frequency measurement in MHz
+ * The low 7 bits of TMDS_2 store the 7-bit TMDS fractional frequency
+ * measurement in 1/128 MHz
+ */
+ return ((a << 1) | (b >> 7)) * 1000000 + (b & 0x7f) * 1000000 / 128;
+}
+
+/*
+ * adv748x_hdmi_set_de_timings: Adjust horizontal picture offset through DE
+ *
+ * HDMI CP uses a Data Enable synchronisation timing reference
+ *
+ * Vary the leading and trailing edge position of the DE signal output by the CP
+ * core. Values are stored as signed-twos-complement in one-pixel-clock units
+ *
+ * The start and end are shifted equally by the 10-bit shift value.
+ */
+static void adv748x_hdmi_set_de_timings(struct adv748x_state *state, int shift)
+{
+ u8 high, low;
+
+ /* POS_HIGH stores bits 8 and 9 of both the start and end */
+ high = ADV748X_CP_DE_POS_HIGH_SET;
+ high |= (shift & 0x300) >> 8;
+ low = shift & 0xff;
+
+ /* The sequence of the writes is important and must be followed */
+ cp_write(state, ADV748X_CP_DE_POS_HIGH, high);
+ cp_write(state, ADV748X_CP_DE_POS_END_LOW, low);
+
+ high |= (shift & 0x300) >> 6;
+
+ cp_write(state, ADV748X_CP_DE_POS_HIGH, high);
+ cp_write(state, ADV748X_CP_DE_POS_START_LOW, low);
+}
+
+static int adv748x_hdmi_set_video_timings(struct adv748x_state *state,
+ const struct v4l2_dv_timings *timings)
+{
+ const struct adv748x_hdmi_video_standards *stds =
+ adv748x_hdmi_video_standards;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(adv748x_hdmi_video_standards); i++) {
+ if (!v4l2_match_dv_timings(timings, &stds[i].timings, 250000,
+ false))
+ continue;
+ }
+
+ if (i >= ARRAY_SIZE(adv748x_hdmi_video_standards))
+ return -EINVAL;
+
+ /*
+ * When setting cp_vid_std to either 720p, 1080i, or 1080p, the video
+ * will get shifted horizontally to the left in active video mode.
+ * The de_h_start and de_h_end controls are used to centre the picture
+ * correctly
+ */
+ switch (stds[i].vid_std) {
+ case 0x53: /* 720p */
+ adv748x_hdmi_set_de_timings(state, -40);
+ break;
+ case 0x54: /* 1080i */
+ case 0x5e: /* 1080p */
+ adv748x_hdmi_set_de_timings(state, -44);
+ break;
+ default:
+ adv748x_hdmi_set_de_timings(state, 0);
+ break;
+ }
+
+ io_write(state, ADV748X_IO_VID_STD, stds[i].vid_std);
+ io_clrset(state, ADV748X_IO_DATAPATH, ADV748X_IO_DATAPATH_VFREQ_M,
+ stds[i].v_freq << ADV748X_IO_DATAPATH_VFREQ_SHIFT);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * v4l2_subdev_video_ops
+ */
+
+static int adv748x_hdmi_s_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd);
+ struct adv748x_state *state = adv748x_hdmi_to_state(hdmi);
+ int ret;
+
+ if (!timings)
+ return -EINVAL;
+
+ if (v4l2_match_dv_timings(&hdmi->timings, timings, 0, false))
+ return 0;
+
+ if (!v4l2_valid_dv_timings(timings, &adv748x_hdmi_timings_cap,
+ NULL, NULL))
+ return -ERANGE;
+
+ adv748x_fill_optional_dv_timings(timings);
+
+ mutex_lock(&state->mutex);
+
+ ret = adv748x_hdmi_set_video_timings(state, timings);
+ if (ret)
+ goto error;
+
+ hdmi->timings = *timings;
+
+ cp_clrset(state, ADV748X_CP_VID_ADJ_2, ADV748X_CP_VID_ADJ_2_INTERLACED,
+ timings->bt.interlaced ?
+ ADV748X_CP_VID_ADJ_2_INTERLACED : 0);
+
+ mutex_unlock(&state->mutex);
+
+ return 0;
+
+error:
+ mutex_unlock(&state->mutex);
+ return ret;
+}
+
+static int adv748x_hdmi_g_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd);
+ struct adv748x_state *state = adv748x_hdmi_to_state(hdmi);
+
+ mutex_lock(&state->mutex);
+
+ *timings = hdmi->timings;
+
+ mutex_unlock(&state->mutex);
+
+ return 0;
+}
+
+static int adv748x_hdmi_query_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd);
+ struct adv748x_state *state = adv748x_hdmi_to_state(hdmi);
+ struct v4l2_bt_timings *bt = &timings->bt;
+ int pixelclock;
+ int polarity;
+
+ if (!timings)
+ return -EINVAL;
+
+ memset(timings, 0, sizeof(struct v4l2_dv_timings));
+
+ if (!adv748x_hdmi_has_signal(state))
+ return -ENOLINK;
+
+ pixelclock = adv748x_hdmi_read_pixelclock(state);
+ if (pixelclock < 0)
+ return -ENODATA;
+
+ timings->type = V4L2_DV_BT_656_1120;
+
+ bt->pixelclock = pixelclock;
+ bt->interlaced = hdmi_read(state, ADV748X_HDMI_F1H1) &
+ ADV748X_HDMI_F1H1_INTERLACED ?
+ V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
+ bt->width = hdmi_read16(state, ADV748X_HDMI_LW1,
+ ADV748X_HDMI_LW1_WIDTH_MASK);
+ bt->height = hdmi_read16(state, ADV748X_HDMI_F0H1,
+ ADV748X_HDMI_F0H1_HEIGHT_MASK);
+ bt->hfrontporch = hdmi_read16(state, ADV748X_HDMI_HFRONT_PORCH,
+ ADV748X_HDMI_HFRONT_PORCH_MASK);
+ bt->hsync = hdmi_read16(state, ADV748X_HDMI_HSYNC_WIDTH,
+ ADV748X_HDMI_HSYNC_WIDTH_MASK);
+ bt->hbackporch = hdmi_read16(state, ADV748X_HDMI_HBACK_PORCH,
+ ADV748X_HDMI_HBACK_PORCH_MASK);
+ bt->vfrontporch = hdmi_read16(state, ADV748X_HDMI_VFRONT_PORCH,
+ ADV748X_HDMI_VFRONT_PORCH_MASK) / 2;
+ bt->vsync = hdmi_read16(state, ADV748X_HDMI_VSYNC_WIDTH,
+ ADV748X_HDMI_VSYNC_WIDTH_MASK) / 2;
+ bt->vbackporch = hdmi_read16(state, ADV748X_HDMI_VBACK_PORCH,
+ ADV748X_HDMI_VBACK_PORCH_MASK) / 2;
+
+ polarity = hdmi_read(state, 0x05);
+ bt->polarities = (polarity & BIT(4) ? V4L2_DV_VSYNC_POS_POL : 0) |
+ (polarity & BIT(5) ? V4L2_DV_HSYNC_POS_POL : 0);
+
+ if (bt->interlaced == V4L2_DV_INTERLACED) {
+ bt->height += hdmi_read16(state, 0x0b, 0x1fff);
+ bt->il_vfrontporch = hdmi_read16(state, 0x2c, 0x3fff) / 2;
+ bt->il_vsync = hdmi_read16(state, 0x30, 0x3fff) / 2;
+ bt->il_vbackporch = hdmi_read16(state, 0x34, 0x3fff) / 2;
+ }
+
+ adv748x_fill_optional_dv_timings(timings);
+
+ /*
+ * No interrupt handling is implemented yet.
+ * There should be an IRQ when a cable is plugged and the new timings
+ * should be figured out and stored to state.
+ */
+ hdmi->timings = *timings;
+
+ return 0;
+}
+
+static int adv748x_hdmi_g_input_status(struct v4l2_subdev *sd, u32 *status)
+{
+ struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd);
+ struct adv748x_state *state = adv748x_hdmi_to_state(hdmi);
+
+ mutex_lock(&state->mutex);
+
+ *status = adv748x_hdmi_has_signal(state) ? 0 : V4L2_IN_ST_NO_SIGNAL;
+
+ mutex_unlock(&state->mutex);
+
+ return 0;
+}
+
+static int adv748x_hdmi_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd);
+ struct adv748x_state *state = adv748x_hdmi_to_state(hdmi);
+ int ret;
+
+ mutex_lock(&state->mutex);
+
+ ret = adv748x_txa_power(state, enable);
+ if (ret)
+ goto done;
+
+ if (adv748x_hdmi_has_signal(state))
+ adv_dbg(state, "Detected HDMI signal\n");
+ else
+ adv_dbg(state, "Couldn't detect HDMI video signal\n");
+
+done:
+ mutex_unlock(&state->mutex);
+ return ret;
+}
+
+static int adv748x_hdmi_g_pixelaspect(struct v4l2_subdev *sd,
+ struct v4l2_fract *aspect)
+{
+ aspect->numerator = 1;
+ aspect->denominator = 1;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops adv748x_video_ops_hdmi = {
+ .s_dv_timings = adv748x_hdmi_s_dv_timings,
+ .g_dv_timings = adv748x_hdmi_g_dv_timings,
+ .query_dv_timings = adv748x_hdmi_query_dv_timings,
+ .g_input_status = adv748x_hdmi_g_input_status,
+ .s_stream = adv748x_hdmi_s_stream,
+ .g_pixelaspect = adv748x_hdmi_g_pixelaspect,
+};
+
+/* -----------------------------------------------------------------------------
+ * v4l2_subdev_pad_ops
+ */
+
+static int adv748x_hdmi_propagate_pixelrate(struct adv748x_hdmi *hdmi)
+{
+ struct v4l2_subdev *tx;
+ struct v4l2_dv_timings timings;
+ struct v4l2_bt_timings *bt = &timings.bt;
+ unsigned int fps;
+
+ tx = adv748x_get_remote_sd(&hdmi->pads[ADV748X_HDMI_SOURCE]);
+ if (!tx)
+ return -ENOLINK;
+
+ adv748x_hdmi_query_dv_timings(&hdmi->sd, &timings);
+
+ fps = DIV_ROUND_CLOSEST_ULL(bt->pixelclock,
+ V4L2_DV_BT_FRAME_WIDTH(bt) *
+ V4L2_DV_BT_FRAME_HEIGHT(bt));
+
+ return adv748x_csi2_set_pixelrate(tx, bt->width * bt->height * fps);
+}
+
+static int adv748x_hdmi_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index != 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_RGB888_1X24;
+
+ return 0;
+}
+
+static int adv748x_hdmi_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd);
+ struct v4l2_mbus_framefmt *mbusformat;
+
+ if (sdformat->pad != ADV748X_HDMI_SOURCE)
+ return -EINVAL;
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY) {
+ mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad);
+ sdformat->format = *mbusformat;
+ } else {
+ adv748x_hdmi_fill_format(hdmi, &sdformat->format);
+ adv748x_hdmi_propagate_pixelrate(hdmi);
+ }
+
+ return 0;
+}
+
+static int adv748x_hdmi_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct v4l2_mbus_framefmt *mbusformat;
+
+ if (sdformat->pad != ADV748X_HDMI_SOURCE)
+ return -EINVAL;
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ return adv748x_hdmi_get_format(sd, cfg, sdformat);
+
+ mbusformat = v4l2_subdev_get_try_format(sd, cfg, sdformat->pad);
+ *mbusformat = sdformat->format;
+
+ return 0;
+}
+
+static int adv748x_hdmi_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
+{
+ struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd);
+
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+
+ if (!hdmi->edid.present)
+ return -ENODATA;
+
+ if (edid->start_block == 0 && edid->blocks == 0) {
+ edid->blocks = hdmi->edid.blocks;
+ return 0;
+ }
+
+ if (edid->start_block >= hdmi->edid.blocks)
+ return -EINVAL;
+
+ if (edid->start_block + edid->blocks > hdmi->edid.blocks)
+ edid->blocks = hdmi->edid.blocks - edid->start_block;
+
+ memcpy(edid->edid, hdmi->edid.edid + edid->start_block * 128,
+ edid->blocks * 128);
+
+ return 0;
+}
+
+static inline int adv748x_hdmi_edid_write_block(struct adv748x_hdmi *hdmi,
+ unsigned int total_len, const u8 *val)
+{
+ struct adv748x_state *state = adv748x_hdmi_to_state(hdmi);
+ int err = 0;
+ int i = 0;
+ int len = 0;
+
+ adv_dbg(state, "%s: write EDID block (%d byte)\n",
+ __func__, total_len);
+
+ while (!err && i < total_len) {
+ len = (total_len - i) > I2C_SMBUS_BLOCK_MAX ?
+ I2C_SMBUS_BLOCK_MAX :
+ (total_len - i);
+
+ err = adv748x_write_block(state, ADV748X_PAGE_EDID,
+ i, val + i, len);
+ i += len;
+ }
+
+ return err;
+}
+
+static int adv748x_hdmi_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
+{
+ struct adv748x_hdmi *hdmi = adv748x_sd_to_hdmi(sd);
+ struct adv748x_state *state = adv748x_hdmi_to_state(hdmi);
+ int err;
+
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+
+ if (edid->start_block != 0)
+ return -EINVAL;
+
+ if (edid->blocks == 0) {
+ hdmi->edid.blocks = 0;
+ hdmi->edid.present = 0;
+
+ /* Fall back to a 16:9 aspect ratio */
+ hdmi->aspect_ratio.numerator = 16;
+ hdmi->aspect_ratio.denominator = 9;
+
+ /* Disable the EDID */
+ repeater_write(state, ADV748X_REPEATER_EDID_SZ,
+ edid->blocks << ADV748X_REPEATER_EDID_SZ_SHIFT);
+
+ repeater_write(state, ADV748X_REPEATER_EDID_CTL, 0);
+
+ return 0;
+ }
+
+ if (edid->blocks > 4) {
+ edid->blocks = 4;
+ return -E2BIG;
+ }
+
+ memcpy(hdmi->edid.edid, edid->edid, 128 * edid->blocks);
+ hdmi->edid.blocks = edid->blocks;
+ hdmi->edid.present = true;
+
+ hdmi->aspect_ratio = v4l2_calc_aspect_ratio(edid->edid[0x15],
+ edid->edid[0x16]);
+
+ err = adv748x_hdmi_edid_write_block(hdmi, 128 * edid->blocks,
+ hdmi->edid.edid);
+ if (err < 0) {
+ v4l2_err(sd, "error %d writing edid pad %d\n", err, edid->pad);
+ return err;
+ }
+
+ repeater_write(state, ADV748X_REPEATER_EDID_SZ,
+ edid->blocks << ADV748X_REPEATER_EDID_SZ_SHIFT);
+
+ repeater_write(state, ADV748X_REPEATER_EDID_CTL,
+ ADV748X_REPEATER_EDID_CTL_EN);
+
+ return 0;
+}
+
+static bool adv748x_hdmi_check_dv_timings(const struct v4l2_dv_timings *timings,
+ void *hdl)
+{
+ const struct adv748x_hdmi_video_standards *stds =
+ adv748x_hdmi_video_standards;
+ unsigned int i;
+
+ for (i = 0; stds[i].timings.bt.width; i++)
+ if (v4l2_match_dv_timings(timings, &stds[i].timings, 0, false))
+ return true;
+
+ return false;
+}
+
+static int adv748x_hdmi_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *timings)
+{
+ return v4l2_enum_dv_timings_cap(timings, &adv748x_hdmi_timings_cap,
+ adv748x_hdmi_check_dv_timings, NULL);
+}
+
+static int adv748x_hdmi_dv_timings_cap(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings_cap *cap)
+{
+ *cap = adv748x_hdmi_timings_cap;
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops adv748x_pad_ops_hdmi = {
+ .enum_mbus_code = adv748x_hdmi_enum_mbus_code,
+ .set_fmt = adv748x_hdmi_set_format,
+ .get_fmt = adv748x_hdmi_get_format,
+ .get_edid = adv748x_hdmi_get_edid,
+ .set_edid = adv748x_hdmi_set_edid,
+ .dv_timings_cap = adv748x_hdmi_dv_timings_cap,
+ .enum_dv_timings = adv748x_hdmi_enum_dv_timings,
+};
+
+/* -----------------------------------------------------------------------------
+ * v4l2_subdev_ops
+ */
+
+static const struct v4l2_subdev_ops adv748x_ops_hdmi = {
+ .video = &adv748x_video_ops_hdmi,
+ .pad = &adv748x_pad_ops_hdmi,
+};
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+static const char * const hdmi_ctrl_patgen_menu[] = {
+ "Disabled",
+ "Solid Color",
+ "Color Bars",
+ "Ramp Grey",
+ "Ramp Blue",
+ "Ramp Red",
+ "Checkered"
+};
+
+static int adv748x_hdmi_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct adv748x_hdmi *hdmi = adv748x_ctrl_to_hdmi(ctrl);
+ struct adv748x_state *state = adv748x_hdmi_to_state(hdmi);
+ int ret;
+ u8 pattern;
+
+ /* Enable video adjustment first */
+ ret = cp_clrset(state, ADV748X_CP_VID_ADJ,
+ ADV748X_CP_VID_ADJ_ENABLE,
+ ADV748X_CP_VID_ADJ_ENABLE);
+ if (ret < 0)
+ return ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ ret = cp_write(state, ADV748X_CP_BRI, ctrl->val);
+ break;
+ case V4L2_CID_HUE:
+ ret = cp_write(state, ADV748X_CP_HUE, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ ret = cp_write(state, ADV748X_CP_CON, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ ret = cp_write(state, ADV748X_CP_SAT, ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ pattern = ctrl->val;
+
+ /* Pattern is 0-indexed. Ctrl Menu is 1-indexed */
+ if (pattern) {
+ pattern--;
+ pattern |= ADV748X_CP_PAT_GEN_EN;
+ }
+
+ ret = cp_write(state, ADV748X_CP_PAT_GEN, pattern);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops adv748x_hdmi_ctrl_ops = {
+ .s_ctrl = adv748x_hdmi_s_ctrl,
+};
+
+static int adv748x_hdmi_init_controls(struct adv748x_hdmi *hdmi)
+{
+ struct adv748x_state *state = adv748x_hdmi_to_state(hdmi);
+
+ v4l2_ctrl_handler_init(&hdmi->ctrl_hdl, 5);
+
+ /* Use our mutex for the controls */
+ hdmi->ctrl_hdl.lock = &state->mutex;
+
+ v4l2_ctrl_new_std(&hdmi->ctrl_hdl, &adv748x_hdmi_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, ADV748X_CP_BRI_MIN,
+ ADV748X_CP_BRI_MAX, 1, ADV748X_CP_BRI_DEF);
+ v4l2_ctrl_new_std(&hdmi->ctrl_hdl, &adv748x_hdmi_ctrl_ops,
+ V4L2_CID_CONTRAST, ADV748X_CP_CON_MIN,
+ ADV748X_CP_CON_MAX, 1, ADV748X_CP_CON_DEF);
+ v4l2_ctrl_new_std(&hdmi->ctrl_hdl, &adv748x_hdmi_ctrl_ops,
+ V4L2_CID_SATURATION, ADV748X_CP_SAT_MIN,
+ ADV748X_CP_SAT_MAX, 1, ADV748X_CP_SAT_DEF);
+ v4l2_ctrl_new_std(&hdmi->ctrl_hdl, &adv748x_hdmi_ctrl_ops,
+ V4L2_CID_HUE, ADV748X_CP_HUE_MIN,
+ ADV748X_CP_HUE_MAX, 1, ADV748X_CP_HUE_DEF);
+
+ /*
+ * Todo: V4L2_CID_DV_RX_POWER_PRESENT should also be supported when
+ * interrupts are handled correctly
+ */
+
+ v4l2_ctrl_new_std_menu_items(&hdmi->ctrl_hdl, &adv748x_hdmi_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(hdmi_ctrl_patgen_menu) - 1,
+ 0, 0, hdmi_ctrl_patgen_menu);
+
+ hdmi->sd.ctrl_handler = &hdmi->ctrl_hdl;
+ if (hdmi->ctrl_hdl.error) {
+ v4l2_ctrl_handler_free(&hdmi->ctrl_hdl);
+ return hdmi->ctrl_hdl.error;
+ }
+
+ return v4l2_ctrl_handler_setup(&hdmi->ctrl_hdl);
+}
+
+int adv748x_hdmi_init(struct adv748x_hdmi *hdmi)
+{
+ struct adv748x_state *state = adv748x_hdmi_to_state(hdmi);
+ static const struct v4l2_dv_timings cea1280x720 =
+ V4L2_DV_BT_CEA_1280X720P30;
+ int ret;
+
+ hdmi->timings = cea1280x720;
+
+ /* Initialise a default 16:9 aspect ratio */
+ hdmi->aspect_ratio.numerator = 16;
+ hdmi->aspect_ratio.denominator = 9;
+
+ adv748x_subdev_init(&hdmi->sd, state, &adv748x_ops_hdmi,
+ MEDIA_ENT_F_IO_DTV, "hdmi");
+
+ hdmi->pads[ADV748X_HDMI_SINK].flags = MEDIA_PAD_FL_SINK;
+ hdmi->pads[ADV748X_HDMI_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&hdmi->sd.entity,
+ ADV748X_HDMI_NR_PADS, hdmi->pads);
+ if (ret)
+ return ret;
+
+ ret = adv748x_hdmi_init_controls(hdmi);
+ if (ret)
+ goto err_free_media;
+
+ return 0;
+
+err_free_media:
+ media_entity_cleanup(&hdmi->sd.entity);
+
+ return ret;
+}
+
+void adv748x_hdmi_cleanup(struct adv748x_hdmi *hdmi)
+{
+ v4l2_device_unregister_subdev(&hdmi->sd);
+ media_entity_cleanup(&hdmi->sd.entity);
+ v4l2_ctrl_handler_free(&hdmi->ctrl_hdl);
+}
diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h
new file mode 100644
index 000000000000..cc4151b5b31e
--- /dev/null
+++ b/drivers/media/i2c/adv748x/adv748x.h
@@ -0,0 +1,425 @@
+/*
+ * Driver for Analog Devices ADV748X video decoder and HDMI receiver
+ *
+ * Copyright (C) 2017 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Authors:
+ * Koji Matsuoka <koji.matsuoka.xm@renesas.com>
+ * Niklas Söderlund <niklas.soderlund@ragnatech.se>
+ * Kieran Bingham <kieran.bingham@ideasonboard.com>
+ *
+ * The ADV748x range of receivers have the following configurations:
+ *
+ * Analog HDMI MHL 4-Lane 1-Lane
+ * In In CSI CSI
+ * ADV7480 X X X
+ * ADV7481 X X X X X
+ * ADV7482 X X X X
+ */
+
+#include <linux/i2c.h>
+
+#ifndef _ADV748X_H_
+#define _ADV748X_H_
+
+/* I2C slave addresses */
+#define ADV748X_I2C_IO 0x70 /* IO Map */
+#define ADV748X_I2C_DPLL 0x26 /* DPLL Map */
+#define ADV748X_I2C_CP 0x22 /* CP Map */
+#define ADV748X_I2C_HDMI 0x34 /* HDMI Map */
+#define ADV748X_I2C_EDID 0x36 /* EDID Map */
+#define ADV748X_I2C_REPEATER 0x32 /* HDMI RX Repeater Map */
+#define ADV748X_I2C_INFOFRAME 0x31 /* HDMI RX InfoFrame Map */
+#define ADV748X_I2C_CEC 0x41 /* CEC Map */
+#define ADV748X_I2C_SDP 0x79 /* SDP Map */
+#define ADV748X_I2C_TXB 0x48 /* CSI-TXB Map */
+#define ADV748X_I2C_TXA 0x4a /* CSI-TXA Map */
+
+enum adv748x_page {
+ ADV748X_PAGE_IO,
+ ADV748X_PAGE_DPLL,
+ ADV748X_PAGE_CP,
+ ADV748X_PAGE_HDMI,
+ ADV748X_PAGE_EDID,
+ ADV748X_PAGE_REPEATER,
+ ADV748X_PAGE_INFOFRAME,
+ ADV748X_PAGE_CEC,
+ ADV748X_PAGE_SDP,
+ ADV748X_PAGE_TXB,
+ ADV748X_PAGE_TXA,
+ ADV748X_PAGE_MAX,
+
+ /* Fake pages for register sequences */
+ ADV748X_PAGE_WAIT, /* Wait x msec */
+ ADV748X_PAGE_EOR, /* End Mark */
+};
+
+/**
+ * enum adv748x_ports - Device tree port number definitions
+ *
+ * The ADV748X ports define the mapping between subdevices
+ * and the device tree specification
+ */
+enum adv748x_ports {
+ ADV748X_PORT_AIN0 = 0,
+ ADV748X_PORT_AIN1 = 1,
+ ADV748X_PORT_AIN2 = 2,
+ ADV748X_PORT_AIN3 = 3,
+ ADV748X_PORT_AIN4 = 4,
+ ADV748X_PORT_AIN5 = 5,
+ ADV748X_PORT_AIN6 = 6,
+ ADV748X_PORT_AIN7 = 7,
+ ADV748X_PORT_HDMI = 8,
+ ADV748X_PORT_TTL = 9,
+ ADV748X_PORT_TXA = 10,
+ ADV748X_PORT_TXB = 11,
+ ADV748X_PORT_MAX = 12,
+};
+
+enum adv748x_csi2_pads {
+ ADV748X_CSI2_SINK,
+ ADV748X_CSI2_SOURCE,
+ ADV748X_CSI2_NR_PADS,
+};
+
+/* CSI2 transmitters can have 2 internal connections, HDMI/AFE */
+#define ADV748X_CSI2_MAX_SUBDEVS 2
+
+struct adv748x_csi2 {
+ struct adv748x_state *state;
+ struct v4l2_mbus_framefmt format;
+ unsigned int page;
+
+ struct media_pad pads[ADV748X_CSI2_NR_PADS];
+ struct v4l2_ctrl_handler ctrl_hdl;
+ struct v4l2_subdev sd;
+};
+
+#define notifier_to_csi2(n) container_of(n, struct adv748x_csi2, notifier)
+#define adv748x_sd_to_csi2(sd) container_of(sd, struct adv748x_csi2, sd)
+
+enum adv748x_hdmi_pads {
+ ADV748X_HDMI_SINK,
+ ADV748X_HDMI_SOURCE,
+ ADV748X_HDMI_NR_PADS,
+};
+
+struct adv748x_hdmi {
+ struct media_pad pads[ADV748X_HDMI_NR_PADS];
+ struct v4l2_ctrl_handler ctrl_hdl;
+ struct v4l2_subdev sd;
+ struct v4l2_mbus_framefmt format;
+
+ struct v4l2_dv_timings timings;
+ struct v4l2_fract aspect_ratio;
+
+ struct {
+ u8 edid[512];
+ u32 present;
+ unsigned int blocks;
+ } edid;
+};
+
+#define adv748x_ctrl_to_hdmi(ctrl) \
+ container_of(ctrl->handler, struct adv748x_hdmi, ctrl_hdl)
+#define adv748x_sd_to_hdmi(sd) container_of(sd, struct adv748x_hdmi, sd)
+
+enum adv748x_afe_pads {
+ ADV748X_AFE_SINK_AIN0,
+ ADV748X_AFE_SINK_AIN1,
+ ADV748X_AFE_SINK_AIN2,
+ ADV748X_AFE_SINK_AIN3,
+ ADV748X_AFE_SINK_AIN4,
+ ADV748X_AFE_SINK_AIN5,
+ ADV748X_AFE_SINK_AIN6,
+ ADV748X_AFE_SINK_AIN7,
+ ADV748X_AFE_SOURCE,
+ ADV748X_AFE_NR_PADS,
+};
+
+struct adv748x_afe {
+ struct media_pad pads[ADV748X_AFE_NR_PADS];
+ struct v4l2_ctrl_handler ctrl_hdl;
+ struct v4l2_subdev sd;
+ struct v4l2_mbus_framefmt format;
+
+ bool streaming;
+ v4l2_std_id curr_norm;
+ unsigned int input;
+};
+
+#define adv748x_ctrl_to_afe(ctrl) \
+ container_of(ctrl->handler, struct adv748x_afe, ctrl_hdl)
+#define adv748x_sd_to_afe(sd) container_of(sd, struct adv748x_afe, sd)
+
+/**
+ * struct adv748x_state - State of ADV748X
+ * @dev: (OF) device
+ * @client: I2C client
+ * @mutex: protect global state
+ *
+ * @endpoints: parsed device node endpoints for each port
+ *
+ * @i2c_addresses I2C Page addresses
+ * @i2c_clients I2C clients for the page accesses
+ * @regmap regmap configuration pages.
+ *
+ * @hdmi: state of HDMI receiver context
+ * @afe: state of AFE receiver context
+ * @txa: state of TXA transmitter context
+ * @txb: state of TXB transmitter context
+ */
+struct adv748x_state {
+ struct device *dev;
+ struct i2c_client *client;
+ struct mutex mutex;
+
+ struct device_node *endpoints[ADV748X_PORT_MAX];
+
+ struct i2c_client *i2c_clients[ADV748X_PAGE_MAX];
+ struct regmap *regmap[ADV748X_PAGE_MAX];
+
+ struct adv748x_hdmi hdmi;
+ struct adv748x_afe afe;
+ struct adv748x_csi2 txa;
+ struct adv748x_csi2 txb;
+};
+
+#define adv748x_hdmi_to_state(h) container_of(h, struct adv748x_state, hdmi)
+#define adv748x_afe_to_state(a) container_of(a, struct adv748x_state, afe)
+
+#define adv_err(a, fmt, arg...) dev_err(a->dev, fmt, ##arg)
+#define adv_info(a, fmt, arg...) dev_info(a->dev, fmt, ##arg)
+#define adv_dbg(a, fmt, arg...) dev_dbg(a->dev, fmt, ##arg)
+
+/* Register Mappings */
+
+/* IO Map */
+#define ADV748X_IO_PD 0x00 /* power down controls */
+#define ADV748X_IO_PD_RX_EN BIT(6)
+
+#define ADV748X_IO_REG_04 0x04
+#define ADV748X_IO_REG_04_FORCE_FR BIT(0) /* Force CP free-run */
+
+#define ADV748X_IO_DATAPATH 0x03 /* datapath cntrl */
+#define ADV748X_IO_DATAPATH_VFREQ_M 0x70
+#define ADV748X_IO_DATAPATH_VFREQ_SHIFT 4
+
+#define ADV748X_IO_VID_STD 0x05
+
+#define ADV748X_IO_10 0x10 /* io_reg_10 */
+#define ADV748X_IO_10_CSI4_EN BIT(7)
+#define ADV748X_IO_10_CSI1_EN BIT(6)
+#define ADV748X_IO_10_PIX_OUT_EN BIT(5)
+
+#define ADV748X_IO_CHIP_REV_ID_1 0xdf
+#define ADV748X_IO_CHIP_REV_ID_2 0xe0
+
+#define ADV748X_IO_SLAVE_ADDR_BASE 0xf2
+
+/* HDMI RX Map */
+#define ADV748X_HDMI_LW1 0x07 /* line width_1 */
+#define ADV748X_HDMI_LW1_VERT_FILTER BIT(7)
+#define ADV748X_HDMI_LW1_DE_REGEN BIT(5)
+#define ADV748X_HDMI_LW1_WIDTH_MASK 0x1fff
+
+#define ADV748X_HDMI_F0H1 0x09 /* field0 height_1 */
+#define ADV748X_HDMI_F0H1_HEIGHT_MASK 0x1fff
+
+#define ADV748X_HDMI_F1H1 0x0b /* field1 height_1 */
+#define ADV748X_HDMI_F1H1_INTERLACED BIT(5)
+
+#define ADV748X_HDMI_HFRONT_PORCH 0x20 /* hsync_front_porch_1 */
+#define ADV748X_HDMI_HFRONT_PORCH_MASK 0x1fff
+
+#define ADV748X_HDMI_HSYNC_WIDTH 0x22 /* hsync_pulse_width_1 */
+#define ADV748X_HDMI_HSYNC_WIDTH_MASK 0x1fff
+
+#define ADV748X_HDMI_HBACK_PORCH 0x24 /* hsync_back_porch_1 */
+#define ADV748X_HDMI_HBACK_PORCH_MASK 0x1fff
+
+#define ADV748X_HDMI_VFRONT_PORCH 0x2a /* field0_vs_front_porch_1 */
+#define ADV748X_HDMI_VFRONT_PORCH_MASK 0x3fff
+
+#define ADV748X_HDMI_VSYNC_WIDTH 0x2e /* field0_vs_pulse_width_1 */
+#define ADV748X_HDMI_VSYNC_WIDTH_MASK 0x3fff
+
+#define ADV748X_HDMI_VBACK_PORCH 0x32 /* field0_vs_back_porch_1 */
+#define ADV748X_HDMI_VBACK_PORCH_MASK 0x3fff
+
+#define ADV748X_HDMI_TMDS_1 0x51 /* hdmi_reg_51 */
+#define ADV748X_HDMI_TMDS_2 0x52 /* hdmi_reg_52 */
+
+/* HDMI RX Repeater Map */
+#define ADV748X_REPEATER_EDID_SZ 0x70 /* primary_edid_size */
+#define ADV748X_REPEATER_EDID_SZ_SHIFT 4
+
+#define ADV748X_REPEATER_EDID_CTL 0x74 /* hdcp edid controls */
+#define ADV748X_REPEATER_EDID_CTL_EN BIT(0) /* man_edid_a_enable */
+
+/* SDP Main Map */
+#define ADV748X_SDP_INSEL 0x00 /* user_map_rw_reg_00 */
+
+#define ADV748X_SDP_VID_SEL 0x02 /* user_map_rw_reg_02 */
+#define ADV748X_SDP_VID_SEL_MASK 0xf0
+#define ADV748X_SDP_VID_SEL_SHIFT 4
+
+/* Contrast - Unsigned*/
+#define ADV748X_SDP_CON 0x08 /* user_map_rw_reg_08 */
+#define ADV748X_SDP_CON_MIN 0
+#define ADV748X_SDP_CON_DEF 128
+#define ADV748X_SDP_CON_MAX 255
+
+/* Brightness - Signed */
+#define ADV748X_SDP_BRI 0x0a /* user_map_rw_reg_0a */
+#define ADV748X_SDP_BRI_MIN -128
+#define ADV748X_SDP_BRI_DEF 0
+#define ADV748X_SDP_BRI_MAX 127
+
+/* Hue - Signed, inverted*/
+#define ADV748X_SDP_HUE 0x0b /* user_map_rw_reg_0b */
+#define ADV748X_SDP_HUE_MIN -127
+#define ADV748X_SDP_HUE_DEF 0
+#define ADV748X_SDP_HUE_MAX 128
+
+/* Test Patterns / Default Values */
+#define ADV748X_SDP_DEF 0x0c /* user_map_rw_reg_0c */
+#define ADV748X_SDP_DEF_VAL_EN BIT(0) /* Force free run mode */
+#define ADV748X_SDP_DEF_VAL_AUTO_EN BIT(1) /* Free run when no signal */
+
+#define ADV748X_SDP_MAP_SEL 0x0e /* user_map_rw_reg_0e */
+#define ADV748X_SDP_MAP_SEL_RO_MAIN 1
+
+/* Free run pattern select */
+#define ADV748X_SDP_FRP 0x14
+#define ADV748X_SDP_FRP_MASK GENMASK(3, 1)
+
+/* Saturation */
+#define ADV748X_SDP_SD_SAT_U 0xe3 /* user_map_rw_reg_e3 */
+#define ADV748X_SDP_SD_SAT_V 0xe4 /* user_map_rw_reg_e4 */
+#define ADV748X_SDP_SAT_MIN 0
+#define ADV748X_SDP_SAT_DEF 128
+#define ADV748X_SDP_SAT_MAX 255
+
+/* SDP RO Main Map */
+#define ADV748X_SDP_RO_10 0x10
+#define ADV748X_SDP_RO_10_IN_LOCK BIT(0)
+
+/* CP Map */
+#define ADV748X_CP_PAT_GEN 0x37 /* int_pat_gen_1 */
+#define ADV748X_CP_PAT_GEN_EN BIT(7)
+
+/* Contrast Control - Unsigned */
+#define ADV748X_CP_CON 0x3a /* contrast_cntrl */
+#define ADV748X_CP_CON_MIN 0 /* Minimum contrast */
+#define ADV748X_CP_CON_DEF 128 /* Default */
+#define ADV748X_CP_CON_MAX 255 /* Maximum contrast */
+
+/* Saturation Control - Unsigned */
+#define ADV748X_CP_SAT 0x3b /* saturation_cntrl */
+#define ADV748X_CP_SAT_MIN 0 /* Minimum saturation */
+#define ADV748X_CP_SAT_DEF 128 /* Default */
+#define ADV748X_CP_SAT_MAX 255 /* Maximum saturation */
+
+/* Brightness Control - Signed */
+#define ADV748X_CP_BRI 0x3c /* brightness_cntrl */
+#define ADV748X_CP_BRI_MIN -128 /* Luma is -512d */
+#define ADV748X_CP_BRI_DEF 0 /* Luma is 0 */
+#define ADV748X_CP_BRI_MAX 127 /* Luma is 508d */
+
+/* Hue Control */
+#define ADV748X_CP_HUE 0x3d /* hue_cntrl */
+#define ADV748X_CP_HUE_MIN 0 /* -90 degree */
+#define ADV748X_CP_HUE_DEF 0 /* -90 degree */
+#define ADV748X_CP_HUE_MAX 255 /* +90 degree */
+
+#define ADV748X_CP_VID_ADJ 0x3e /* vid_adj_0 */
+#define ADV748X_CP_VID_ADJ_ENABLE BIT(7) /* Enable colour controls */
+
+#define ADV748X_CP_DE_POS_HIGH 0x8b /* de_pos_adj_6 */
+#define ADV748X_CP_DE_POS_HIGH_SET BIT(6)
+#define ADV748X_CP_DE_POS_END_LOW 0x8c /* de_pos_adj_7 */
+#define ADV748X_CP_DE_POS_START_LOW 0x8d /* de_pos_adj_8 */
+
+#define ADV748X_CP_VID_ADJ_2 0x91
+#define ADV748X_CP_VID_ADJ_2_INTERLACED BIT(6)
+#define ADV748X_CP_VID_ADJ_2_INTERLACED_3D BIT(4)
+
+#define ADV748X_CP_CLMP_POS 0xc9 /* clmp_pos_cntrl_4 */
+#define ADV748X_CP_CLMP_POS_DIS_AUTO BIT(0) /* dis_auto_param_buff */
+
+/* CSI : TXA/TXB Maps */
+#define ADV748X_CSI_VC_REF 0x0d /* csi_tx_top_reg_0d */
+#define ADV748X_CSI_VC_REF_SHIFT 6
+
+#define ADV748X_CSI_FS_AS_LS 0x1e /* csi_tx_top_reg_1e */
+#define ADV748X_CSI_FS_AS_LS_UNKNOWN BIT(6) /* Undocumented bit */
+
+/* Register handling */
+
+int adv748x_read(struct adv748x_state *state, u8 addr, u8 reg);
+int adv748x_write(struct adv748x_state *state, u8 page, u8 reg, u8 value);
+int adv748x_write_block(struct adv748x_state *state, int client_page,
+ unsigned int init_reg, const void *val,
+ size_t val_len);
+
+#define io_read(s, r) adv748x_read(s, ADV748X_PAGE_IO, r)
+#define io_write(s, r, v) adv748x_write(s, ADV748X_PAGE_IO, r, v)
+#define io_clrset(s, r, m, v) io_write(s, r, (io_read(s, r) & ~m) | v)
+
+#define hdmi_read(s, r) adv748x_read(s, ADV748X_PAGE_HDMI, r)
+#define hdmi_read16(s, r, m) (((hdmi_read(s, r) << 8) | hdmi_read(s, r+1)) & m)
+#define hdmi_write(s, r, v) adv748x_write(s, ADV748X_PAGE_HDMI, r, v)
+
+#define repeater_read(s, r) adv748x_read(s, ADV748X_PAGE_REPEATER, r)
+#define repeater_write(s, r, v) adv748x_write(s, ADV748X_PAGE_REPEATER, r, v)
+
+#define sdp_read(s, r) adv748x_read(s, ADV748X_PAGE_SDP, r)
+#define sdp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_SDP, r, v)
+#define sdp_clrset(s, r, m, v) sdp_write(s, r, (sdp_read(s, r) & ~m) | v)
+
+#define cp_read(s, r) adv748x_read(s, ADV748X_PAGE_CP, r)
+#define cp_write(s, r, v) adv748x_write(s, ADV748X_PAGE_CP, r, v)
+#define cp_clrset(s, r, m, v) cp_write(s, r, (cp_read(s, r) & ~m) | v)
+
+#define txa_read(s, r) adv748x_read(s, ADV748X_PAGE_TXA, r)
+#define txb_read(s, r) adv748x_read(s, ADV748X_PAGE_TXB, r)
+
+#define tx_read(t, r) adv748x_read(t->state, t->page, r)
+#define tx_write(t, r, v) adv748x_write(t->state, t->page, r, v)
+
+static inline struct v4l2_subdev *adv748x_get_remote_sd(struct media_pad *pad)
+{
+ pad = media_entity_remote_pad(pad);
+ if (!pad)
+ return NULL;
+
+ return media_entity_to_v4l2_subdev(pad->entity);
+}
+
+void adv748x_subdev_init(struct v4l2_subdev *sd, struct adv748x_state *state,
+ const struct v4l2_subdev_ops *ops, u32 function,
+ const char *ident);
+
+int adv748x_register_subdevs(struct adv748x_state *state,
+ struct v4l2_device *v4l2_dev);
+
+int adv748x_txa_power(struct adv748x_state *state, bool on);
+int adv748x_txb_power(struct adv748x_state *state, bool on);
+
+int adv748x_afe_init(struct adv748x_afe *afe);
+void adv748x_afe_cleanup(struct adv748x_afe *afe);
+
+int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx);
+void adv748x_csi2_cleanup(struct adv748x_csi2 *tx);
+int adv748x_csi2_set_pixelrate(struct v4l2_subdev *sd, s64 rate);
+
+int adv748x_hdmi_init(struct adv748x_hdmi *hdmi);
+void adv748x_hdmi_cleanup(struct adv748x_hdmi *hdmi);
+
+#endif /* _ADV748X_H_ */
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index ccc478605643..2817bafc67bf 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -1927,8 +1927,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
state->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
- state, dev_name(&client->dev), CEC_CAP_TRANSMIT |
- CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH | CEC_CAP_RC,
+ state, dev_name(&client->dev), CEC_CAP_DEFAULTS,
ADV7511_MAX_ADDRS);
err = PTR_ERR_OR_ZERO(state->cec_adap);
if (err) {
@@ -1986,7 +1985,7 @@ static int adv7511_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
-static struct i2c_device_id adv7511_id[] = {
+static const struct i2c_device_id adv7511_id[] = {
{ "adv7511", 0 },
{ }
};
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 660bacb8f7d9..f289b8aca1da 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -618,7 +618,7 @@ static int adv76xx_read_reg(struct v4l2_subdev *sd, unsigned int reg)
unsigned int val;
int err;
- if (!(BIT(page) & state->info->page_mask))
+ if (page >= ADV76XX_PAGE_MAX || !(BIT(page) & state->info->page_mask))
return -EINVAL;
reg &= 0xff;
@@ -633,7 +633,7 @@ static int adv76xx_write_reg(struct v4l2_subdev *sd, unsigned int reg, u8 val)
struct adv76xx_state *state = to_state(sd);
unsigned int page = reg >> 8;
- if (!(BIT(page) & state->info->page_mask))
+ if (page >= ADV76XX_PAGE_MAX || !(BIT(page) & state->info->page_mask))
return -EINVAL;
reg &= 0xff;
@@ -3515,8 +3515,7 @@ static int adv76xx_probe(struct i2c_client *client,
#if IS_ENABLED(CONFIG_VIDEO_ADV7604_CEC)
state->cec_adap = cec_allocate_adapter(&adv76xx_cec_adap_ops,
state, dev_name(&client->dev),
- CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
- CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV76XX_MAX_ADDRS);
+ CEC_CAP_DEFAULTS, ADV76XX_MAX_ADDRS);
err = PTR_ERR_OR_ZERO(state->cec_adap);
if (err)
goto err_entity;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 303effda1a2e..65f34e7e146f 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3568,8 +3568,7 @@ static int adv7842_probe(struct i2c_client *client,
#if IS_ENABLED(CONFIG_VIDEO_ADV7842_CEC)
state->cec_adap = cec_allocate_adapter(&adv7842_cec_adap_ops,
state, dev_name(&client->dev),
- CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
- CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV7842_MAX_ADDRS);
+ CEC_CAP_DEFAULTS, ADV7842_MAX_ADDRS);
err = PTR_ERR_OR_ZERO(state->cec_adap);
if (err)
goto err_entity;
@@ -3608,7 +3607,7 @@ static int adv7842_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
-static struct i2c_device_id adv7842_id[] = {
+static const struct i2c_device_id adv7842_id[] = {
{ "adv7842", 0 },
{ }
};
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index 6a607d7f82de..95af4fc99cd0 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -11,7 +11,6 @@
* GNU General Public License for more details.
*/
-#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
@@ -147,8 +146,7 @@ static int dw9714_init_controls(struct dw9714_device *dev_vcm)
return hdl->error;
}
-static int dw9714_probe(struct i2c_client *client,
- const struct i2c_device_id *devid)
+static int dw9714_probe(struct i2c_client *client)
{
struct dw9714_device *dw9714_dev;
int rval;
@@ -250,20 +248,18 @@ static int __maybe_unused dw9714_vcm_resume(struct device *dev)
return 0;
}
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id dw9714_acpi_match[] = {
- {},
-};
-MODULE_DEVICE_TABLE(acpi, dw9714_acpi_match);
-#endif
-
static const struct i2c_device_id dw9714_id_table[] = {
- {DW9714_NAME, 0},
- {}
+ { DW9714_NAME, 0 },
+ { { 0 } }
};
-
MODULE_DEVICE_TABLE(i2c, dw9714_id_table);
+static const struct of_device_id dw9714_of_table[] = {
+ { .compatible = "dongwoon,dw9714" },
+ { { 0 } }
+};
+MODULE_DEVICE_TABLE(of, dw9714_of_table);
+
static const struct dev_pm_ops dw9714_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dw9714_vcm_suspend, dw9714_vcm_resume)
SET_RUNTIME_PM_OPS(dw9714_vcm_suspend, dw9714_vcm_resume, NULL)
@@ -273,9 +269,9 @@ static struct i2c_driver dw9714_i2c_driver = {
.driver = {
.name = DW9714_NAME,
.pm = &dw9714_pm_ops,
- .acpi_match_table = ACPI_PTR(dw9714_acpi_match),
+ .of_match_table = dw9714_of_table,
},
- .probe = dw9714_probe,
+ .probe_new = dw9714_probe,
.remove = dw9714_remove,
.id_table = dw9714_id_table,
};
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index 6e313d5243a0..c14f0fd6ded3 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -43,7 +43,7 @@
#define ET8EK8_NAME "et8ek8"
#define ET8EK8_PRIV_MEM_SIZE 128
-#define ET8EK8_MAX_MSG 48
+#define ET8EK8_MAX_MSG 8
struct et8ek8_sensor {
struct v4l2_subdev subdev;
@@ -220,7 +220,8 @@ static void et8ek8_i2c_create_msg(struct i2c_client *client, u16 len, u16 reg,
/*
* A buffered write method that puts the wanted register write
- * commands in a message list and passes the list to the i2c framework
+ * commands in smaller number of message lists and passes the lists to
+ * the i2c framework
*/
static int et8ek8_i2c_buffered_write_regs(struct i2c_client *client,
const struct et8ek8_reg *wnext,
@@ -231,11 +232,7 @@ static int et8ek8_i2c_buffered_write_regs(struct i2c_client *client,
int wcnt = 0;
u16 reg, data_length;
u32 val;
-
- if (WARN_ONCE(cnt > ET8EK8_MAX_MSG,
- ET8EK8_NAME ": %s: too many messages.\n", __func__)) {
- return -EINVAL;
- }
+ int rval;
/* Create new write messages for all writes */
while (wcnt < cnt) {
@@ -249,10 +246,21 @@ static int et8ek8_i2c_buffered_write_regs(struct i2c_client *client,
/* Update write count */
wcnt++;
+
+ if (wcnt < ET8EK8_MAX_MSG)
+ continue;
+
+ rval = i2c_transfer(client->adapter, msg, wcnt);
+ if (rval < 0)
+ return rval;
+
+ cnt -= wcnt;
+ wcnt = 0;
}
- /* Now we send everything ... */
- return i2c_transfer(client->adapter, msg, wcnt);
+ rval = i2c_transfer(client->adapter, msg, wcnt);
+
+ return rval < 0 ? rval : 0;
}
/*
@@ -1496,7 +1504,6 @@ MODULE_DEVICE_TABLE(i2c, et8ek8_id_table);
static const struct dev_pm_ops et8ek8_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(et8ek8_suspend, et8ek8_resume)
};
-MODULE_DEVICE_TABLE(of, et8ek8_of_table);
static struct i2c_driver et8ek8_i2c_driver = {
.driver = {
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index cee7fd9cf08b..a374e2a0ac3d 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -59,8 +59,8 @@ module_param(debug, int, 0644); /* debug level (0,1,2) */
/* ----------------------------------------------------------------------- */
-static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol,
- u32 *scancode, u8 *ptoggle, int size)
+static int get_key_haup_common(struct IR_i2c *ir, enum rc_proto *protocol,
+ u32 *scancode, u8 *ptoggle, int size)
{
unsigned char buf[6];
int start, range, toggle, dev, code, ircode, vendor;
@@ -99,7 +99,7 @@ static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol,
dprintk(1, "ir hauppauge (rc5): s%d r%d t%d dev=%d code=%d\n",
start, range, toggle, dev, code);
- *protocol = RC_TYPE_RC5;
+ *protocol = RC_PROTO_RC5;
*scancode = RC_SCANCODE_RC5(dev, code);
*ptoggle = toggle;
@@ -111,13 +111,13 @@ static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol,
if (vendor == 0x800f) {
*ptoggle = (dev & 0x80) != 0;
- *protocol = RC_TYPE_RC6_MCE;
+ *protocol = RC_PROTO_RC6_MCE;
dev &= 0x7f;
dprintk(1, "ir hauppauge (rc6-mce): t%d vendor=%d dev=%d code=%d\n",
*ptoggle, vendor, dev, code);
} else {
*ptoggle = 0;
- *protocol = RC_TYPE_RC6_6A_32;
+ *protocol = RC_PROTO_RC6_6A_32;
dprintk(1, "ir hauppauge (rc6-6a-32): vendor=%d dev=%d code=%d\n",
vendor, dev, code);
}
@@ -130,13 +130,13 @@ static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol,
return 0;
}
-static int get_key_haup(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_haup(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
return get_key_haup_common(ir, protocol, scancode, toggle, 3);
}
-static int get_key_haup_xvr(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_haup_xvr(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
int ret;
@@ -155,7 +155,7 @@ static int get_key_haup_xvr(struct IR_i2c *ir, enum rc_type *protocol,
return get_key_haup_common(ir, protocol, scancode, toggle, 6);
}
-static int get_key_pixelview(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_pixelview(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char b;
@@ -166,13 +166,13 @@ static int get_key_pixelview(struct IR_i2c *ir, enum rc_type *protocol,
return -EIO;
}
- *protocol = RC_TYPE_OTHER;
+ *protocol = RC_PROTO_OTHER;
*scancode = b;
*toggle = 0;
return 1;
}
-static int get_key_fusionhdtv(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_fusionhdtv(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char buf[4];
@@ -191,13 +191,13 @@ static int get_key_fusionhdtv(struct IR_i2c *ir, enum rc_type *protocol,
if(buf[0] != 0x1 || buf[1] != 0xfe)
return 0;
- *protocol = RC_TYPE_UNKNOWN;
+ *protocol = RC_PROTO_UNKNOWN;
*scancode = buf[2];
*toggle = 0;
return 1;
}
-static int get_key_knc1(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_knc1(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char b;
@@ -221,13 +221,13 @@ static int get_key_knc1(struct IR_i2c *ir, enum rc_type *protocol,
/* keep old data */
return 1;
- *protocol = RC_TYPE_UNKNOWN;
+ *protocol = RC_PROTO_UNKNOWN;
*scancode = b;
*toggle = 0;
return 1;
}
-static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char subaddr, key, keygroup;
@@ -262,7 +262,7 @@ static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_type *protocol,
}
key |= (keygroup & 1) << 6;
- *protocol = RC_TYPE_UNKNOWN;
+ *protocol = RC_PROTO_UNKNOWN;
*scancode = key;
if (ir->c->addr == 0x41) /* AVerMedia EM78P153 */
*scancode |= keygroup << 8;
@@ -274,7 +274,7 @@ static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_type *protocol,
static int ir_key_poll(struct IR_i2c *ir)
{
- enum rc_type protocol;
+ enum rc_proto protocol;
u32 scancode;
u8 toggle;
int rc;
@@ -315,7 +315,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
char *ir_codes = NULL;
const char *name = NULL;
- u64 rc_type = RC_BIT_UNKNOWN;
+ u64 rc_proto = RC_PROTO_BIT_UNKNOWN;
struct IR_i2c *ir;
struct rc_dev *rc = NULL;
struct i2c_adapter *adap = client->adapter;
@@ -334,7 +334,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
case 0x64:
name = "Pixelview";
ir->get_key = get_key_pixelview;
- rc_type = RC_BIT_OTHER;
+ rc_proto = RC_PROTO_BIT_OTHER;
ir_codes = RC_MAP_EMPTY;
break;
case 0x18:
@@ -342,38 +342,39 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
case 0x1a:
name = "Hauppauge";
ir->get_key = get_key_haup;
- rc_type = RC_BIT_RC5;
+ rc_proto = RC_PROTO_BIT_RC5;
ir_codes = RC_MAP_HAUPPAUGE;
break;
case 0x30:
name = "KNC One";
ir->get_key = get_key_knc1;
- rc_type = RC_BIT_OTHER;
+ rc_proto = RC_PROTO_BIT_OTHER;
ir_codes = RC_MAP_EMPTY;
break;
case 0x6b:
name = "FusionHDTV";
ir->get_key = get_key_fusionhdtv;
- rc_type = RC_BIT_UNKNOWN;
+ rc_proto = RC_PROTO_BIT_UNKNOWN;
ir_codes = RC_MAP_FUSIONHDTV_MCE;
break;
case 0x40:
name = "AVerMedia Cardbus remote";
ir->get_key = get_key_avermedia_cardbus;
- rc_type = RC_BIT_OTHER;
+ rc_proto = RC_PROTO_BIT_OTHER;
ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
break;
case 0x41:
name = "AVerMedia EM78P153";
ir->get_key = get_key_avermedia_cardbus;
- rc_type = RC_BIT_OTHER;
+ rc_proto = RC_PROTO_BIT_OTHER;
/* RM-KV remote, seems to be same as RM-K6 */
ir_codes = RC_MAP_AVERMEDIA_M733A_RM_K6;
break;
case 0x71:
name = "Hauppauge/Zilog Z8";
ir->get_key = get_key_haup_xvr;
- rc_type = RC_BIT_RC5 | RC_BIT_RC6_MCE | RC_BIT_RC6_6A_32;
+ rc_proto = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE |
+ RC_PROTO_BIT_RC6_6A_32;
ir_codes = RC_MAP_HAUPPAUGE;
break;
}
@@ -388,7 +389,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
name = init_data->name;
if (init_data->type)
- rc_type = init_data->type;
+ rc_proto = init_data->type;
if (init_data->polling_interval)
ir->polling_interval = init_data->polling_interval;
@@ -431,7 +432,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
ir->rc = rc;
/* Make sure we are all setup before going on */
- if (!name || !ir->get_key || !rc_type || !ir_codes) {
+ if (!name || !ir->get_key || !rc_proto || !ir_codes) {
dprintk(1, ": Unsupported device at address 0x%02x\n",
addr);
err = -ENODEV;
@@ -452,14 +453,14 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
*/
rc->input_id.bustype = BUS_I2C;
rc->input_phys = ir->phys;
- rc->input_name = ir->name;
+ rc->device_name = ir->name;
/*
* Initialize the other fields of rc_dev
*/
rc->map_name = ir->ir_codes;
- rc->allowed_protocols = rc_type;
- rc->enabled_protocols = rc_type;
+ rc->allowed_protocols = rc_proto;
+ rc->enabled_protocols = rc_proto;
if (!rc->driver_name)
rc->driver_name = MODULE_NAME;
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
index 9ccb5ee55fa9..463534d44756 100644
--- a/drivers/media/i2c/m5mols/m5mols_core.c
+++ b/drivers/media/i2c/m5mols/m5mols_core.c
@@ -457,7 +457,7 @@ static int m5mols_get_version(struct v4l2_subdev *sd)
v4l2_info(sd, "Manufacturer\t[%s]\n",
is_manufacturer(info, REG_SAMSUNG_ELECTRO) ?
- "Samsung Electro-Machanics" :
+ "Samsung Electro-Mechanics" :
is_manufacturer(info, REG_SAMSUNG_OPTICS) ?
"Samsung Fiber-Optics" :
is_manufacturer(info, REG_SAMSUNG_TECHWIN) ?
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
index a4736a8a7792..bf0e821a2b93 100644
--- a/drivers/media/i2c/max2175.c
+++ b/drivers/media/i2c/max2175.c
@@ -1319,7 +1319,7 @@ static int max2175_probe(struct i2c_client *client,
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(&client->dev, "cannot get clock %d\n", ret);
- return -ENODEV;
+ return ret;
}
regmap = devm_regmap_init_i2c(client, &max2175_regmap_config);
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 72e71b762827..99b992e46702 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -835,7 +835,7 @@ static const struct v4l2_ctrl_ops mt9m111_ctrl_ops = {
.s_ctrl = mt9m111_s_ctrl,
};
-static struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = {
.s_power = mt9m111_s_power,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9m111_g_register,
@@ -865,7 +865,7 @@ static int mt9m111_g_mbus_config(struct v4l2_subdev *sd,
return 0;
}
-static struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = {
.g_mbus_config = mt9m111_g_mbus_config,
};
@@ -877,7 +877,7 @@ static const struct v4l2_subdev_pad_ops mt9m111_subdev_pad_ops = {
.set_fmt = mt9m111_set_fmt,
};
-static struct v4l2_subdev_ops mt9m111_subdev_ops = {
+static const struct v4l2_subdev_ops mt9m111_subdev_ops = {
.core = &mt9m111_subdev_core_ops,
.video = &mt9m111_subdev_video_ops,
.pad = &mt9m111_subdev_pad_ops,
diff --git a/drivers/media/i2c/mt9t001.c b/drivers/media/i2c/mt9t001.c
index 842017fa4aab..9d981d9f5686 100644
--- a/drivers/media/i2c/mt9t001.c
+++ b/drivers/media/i2c/mt9t001.c
@@ -822,15 +822,15 @@ static int mt9t001_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
return mt9t001_set_power(subdev, 0);
}
-static struct v4l2_subdev_core_ops mt9t001_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops mt9t001_subdev_core_ops = {
.s_power = mt9t001_set_power,
};
-static struct v4l2_subdev_video_ops mt9t001_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops mt9t001_subdev_video_ops = {
.s_stream = mt9t001_s_stream,
};
-static struct v4l2_subdev_pad_ops mt9t001_subdev_pad_ops = {
+static const struct v4l2_subdev_pad_ops mt9t001_subdev_pad_ops = {
.enum_mbus_code = mt9t001_enum_mbus_code,
.enum_frame_size = mt9t001_enum_frame_size,
.get_fmt = mt9t001_get_format,
@@ -839,7 +839,7 @@ static struct v4l2_subdev_pad_ops mt9t001_subdev_pad_ops = {
.set_selection = mt9t001_set_selection,
};
-static struct v4l2_subdev_ops mt9t001_subdev_ops = {
+static const struct v4l2_subdev_ops mt9t001_subdev_ops = {
.core = &mt9t001_subdev_core_ops,
.video = &mt9t001_subdev_video_ops,
.pad = &mt9t001_subdev_pad_ops,
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 86550d8ddfee..af7af0d14c69 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -57,16 +57,14 @@
#define OV13858_VTS_30FPS 0x0c8e /* 30 fps */
#define OV13858_VTS_60FPS 0x0648 /* 60 fps */
#define OV13858_VTS_MAX 0x7fff
-#define OV13858_VBLANK_MIN 56
/* HBLANK control - read only */
-#define OV13858_PPL_540MHZ 2244
-#define OV13858_PPL_1080MHZ 4488
+#define OV13858_PPL_270MHZ 2244
+#define OV13858_PPL_540MHZ 4488
/* Exposure control */
#define OV13858_REG_EXPOSURE 0x3500
#define OV13858_EXPOSURE_MIN 4
-#define OV13858_EXPOSURE_MAX (OV13858_VTS_MAX - 8)
#define OV13858_EXPOSURE_STEP 1
#define OV13858_EXPOSURE_DEFAULT 0x640
@@ -78,13 +76,13 @@
#define OV13858_ANA_GAIN_DEFAULT 0x80
/* Digital gain control */
-#define OV13858_REG_DIGITAL_GAIN 0x350a
-#define OV13858_DGTL_GAIN_MASK 0xf3
-#define OV13858_DGTL_GAIN_SHIFT 2
-#define OV13858_DGTL_GAIN_MIN 1
-#define OV13858_DGTL_GAIN_MAX 4
-#define OV13858_DGTL_GAIN_STEP 1
-#define OV13858_DGTL_GAIN_DEFAULT 1
+#define OV13858_REG_B_MWB_GAIN 0x5100
+#define OV13858_REG_G_MWB_GAIN 0x5102
+#define OV13858_REG_R_MWB_GAIN 0x5104
+#define OV13858_DGTL_GAIN_MIN 0
+#define OV13858_DGTL_GAIN_MAX 16384 /* Max = 16 X */
+#define OV13858_DGTL_GAIN_DEFAULT 1024 /* Default gain = 1 X */
+#define OV13858_DGTL_GAIN_STEP 1 /* Each step = 1/1024 */
/* Test Pattern Control */
#define OV13858_REG_TEST_PATTERN 0x4503
@@ -121,7 +119,8 @@ struct ov13858_mode {
u32 height;
/* V-timing */
- u32 vts;
+ u32 vts_def;
+ u32 vts_min;
/* Index of Link frequency config to be used */
u32 link_freq_index;
@@ -944,31 +943,33 @@ static const char * const ov13858_test_pattern_menu[] = {
/* Configurations for supported link frequencies */
#define OV13858_NUM_OF_LINK_FREQS 2
-#define OV13858_LINK_FREQ_1080MBPS 1080000000
-#define OV13858_LINK_FREQ_540MBPS 540000000
+#define OV13858_LINK_FREQ_540MHZ 540000000ULL
+#define OV13858_LINK_FREQ_270MHZ 270000000ULL
#define OV13858_LINK_FREQ_INDEX_0 0
#define OV13858_LINK_FREQ_INDEX_1 1
/* Menu items for LINK_FREQ V4L2 control */
static const s64 link_freq_menu_items[OV13858_NUM_OF_LINK_FREQS] = {
- OV13858_LINK_FREQ_1080MBPS,
- OV13858_LINK_FREQ_540MBPS
+ OV13858_LINK_FREQ_540MHZ,
+ OV13858_LINK_FREQ_270MHZ
};
/* Link frequency configs */
static const struct ov13858_link_freq_config
link_freq_configs[OV13858_NUM_OF_LINK_FREQS] = {
{
- .pixel_rate = 864000000,
- .pixels_per_line = OV13858_PPL_1080MHZ,
+ /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */
+ .pixel_rate = (OV13858_LINK_FREQ_540MHZ * 2 * 4) / 10,
+ .pixels_per_line = OV13858_PPL_540MHZ,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mipi_data_rate_1080mbps),
.regs = mipi_data_rate_1080mbps,
}
},
{
- .pixel_rate = 432000000,
- .pixels_per_line = OV13858_PPL_540MHZ,
+ /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */
+ .pixel_rate = (OV13858_LINK_FREQ_270MHZ * 2 * 4) / 10,
+ .pixels_per_line = OV13858_PPL_270MHZ,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mipi_data_rate_540mbps),
.regs = mipi_data_rate_540mbps,
@@ -981,7 +982,8 @@ static const struct ov13858_mode supported_modes[] = {
{
.width = 4224,
.height = 3136,
- .vts = OV13858_VTS_30FPS,
+ .vts_def = OV13858_VTS_30FPS,
+ .vts_min = OV13858_VTS_30FPS,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_4224x3136_regs),
.regs = mode_4224x3136_regs,
@@ -991,7 +993,8 @@ static const struct ov13858_mode supported_modes[] = {
{
.width = 2112,
.height = 1568,
- .vts = OV13858_VTS_30FPS,
+ .vts_def = OV13858_VTS_30FPS,
+ .vts_min = 1608,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_2112x1568_regs),
.regs = mode_2112x1568_regs,
@@ -1001,7 +1004,8 @@ static const struct ov13858_mode supported_modes[] = {
{
.width = 2112,
.height = 1188,
- .vts = OV13858_VTS_30FPS,
+ .vts_def = OV13858_VTS_30FPS,
+ .vts_min = 1608,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_2112x1188_regs),
.regs = mode_2112x1188_regs,
@@ -1011,7 +1015,8 @@ static const struct ov13858_mode supported_modes[] = {
{
.width = 1056,
.height = 784,
- .vts = OV13858_VTS_30FPS,
+ .vts_def = OV13858_VTS_30FPS,
+ .vts_min = 804,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_1056x784_regs),
.regs = mode_1056x784_regs,
@@ -1161,21 +1166,21 @@ static int ov13858_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
static int ov13858_update_digital_gain(struct ov13858 *ov13858, u32 d_gain)
{
int ret;
- u32 val;
- if (d_gain == 3)
- return -EINVAL;
+ ret = ov13858_write_reg(ov13858, OV13858_REG_B_MWB_GAIN,
+ OV13858_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
- ret = ov13858_read_reg(ov13858, OV13858_REG_DIGITAL_GAIN,
- OV13858_REG_VALUE_08BIT, &val);
+ ret = ov13858_write_reg(ov13858, OV13858_REG_G_MWB_GAIN,
+ OV13858_REG_VALUE_16BIT, d_gain);
if (ret)
return ret;
- val &= OV13858_DGTL_GAIN_MASK;
- val |= (d_gain - 1) << OV13858_DGTL_GAIN_SHIFT;
+ ret = ov13858_write_reg(ov13858, OV13858_REG_R_MWB_GAIN,
+ OV13858_REG_VALUE_16BIT, d_gain);
- return ov13858_write_reg(ov13858, OV13858_REG_DIGITAL_GAIN,
- OV13858_REG_VALUE_08BIT, val);
+ return ret;
}
static int ov13858_enable_test_pattern(struct ov13858 *ov13858, u32 pattern)
@@ -1377,6 +1382,8 @@ ov13858_set_pad_format(struct v4l2_subdev *sd,
struct ov13858 *ov13858 = to_ov13858(sd);
const struct ov13858_mode *mode;
struct v4l2_mbus_framefmt *framefmt;
+ s32 vblank_def;
+ s32 vblank_min;
s64 h_blank;
mutex_lock(&ov13858->mutex);
@@ -1397,10 +1404,15 @@ ov13858_set_pad_format(struct v4l2_subdev *sd,
ov13858->pixel_rate,
link_freq_configs[mode->link_freq_index].pixel_rate);
/* Update limits and set FPS to default */
+ vblank_def = ov13858->cur_mode->vts_def -
+ ov13858->cur_mode->height;
+ vblank_min = ov13858->cur_mode->vts_min -
+ ov13858->cur_mode->height;
__v4l2_ctrl_modify_range(
- ov13858->vblank, OV13858_VBLANK_MIN,
+ ov13858->vblank, vblank_min,
OV13858_VTS_MAX - ov13858->cur_mode->height, 1,
- ov13858->cur_mode->vts - ov13858->cur_mode->height);
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(ov13858->vblank, vblank_def);
h_blank =
link_freq_configs[mode->link_freq_index].pixels_per_line
- ov13858->cur_mode->width;
@@ -1602,6 +1614,9 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
{
struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max;
+ s64 vblank_def;
+ s64 vblank_min;
int ret;
ctrl_hdlr = &ov13858->ctrl_handler;
@@ -1625,25 +1640,27 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
link_freq_configs[0].pixel_rate, 1,
link_freq_configs[0].pixel_rate);
+ vblank_def = ov13858->cur_mode->vts_def - ov13858->cur_mode->height;
+ vblank_min = ov13858->cur_mode->vts_min - ov13858->cur_mode->height;
ov13858->vblank = v4l2_ctrl_new_std(
ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_VBLANK,
- OV13858_VBLANK_MIN,
+ vblank_min,
OV13858_VTS_MAX - ov13858->cur_mode->height, 1,
- ov13858->cur_mode->vts
- - ov13858->cur_mode->height);
+ vblank_def);
ov13858->hblank = v4l2_ctrl_new_std(
ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_HBLANK,
- OV13858_PPL_1080MHZ - ov13858->cur_mode->width,
- OV13858_PPL_1080MHZ - ov13858->cur_mode->width,
+ OV13858_PPL_540MHZ - ov13858->cur_mode->width,
+ OV13858_PPL_540MHZ - ov13858->cur_mode->width,
1,
- OV13858_PPL_1080MHZ - ov13858->cur_mode->width);
+ OV13858_PPL_540MHZ - ov13858->cur_mode->width);
ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ exposure_max = ov13858->cur_mode->vts_def - 8;
ov13858->exposure = v4l2_ctrl_new_std(
ctrl_hdlr, &ov13858_ctrl_ops,
V4L2_CID_EXPOSURE, OV13858_EXPOSURE_MIN,
- OV13858_EXPOSURE_MAX, OV13858_EXPOSURE_STEP,
+ exposure_max, OV13858_EXPOSURE_STEP,
OV13858_EXPOSURE_DEFAULT);
v4l2_ctrl_new_std(ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 1f5b483cf334..39a2269c0bee 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -1524,8 +1524,7 @@ static int ov5640_restore_mode(struct ov5640_dev *sensor)
static void ov5640_power(struct ov5640_dev *sensor, bool enable)
{
- if (sensor->pwdn_gpio)
- gpiod_set_value(sensor->pwdn_gpio, enable ? 0 : 1);
+ gpiod_set_value(sensor->pwdn_gpio, enable ? 0 : 1);
}
static void ov5640_reset(struct ov5640_dev *sensor)
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index d1e844f7f03f..d28845f7356f 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -80,6 +80,8 @@ struct ov5645_mode_info {
u32 height;
const struct reg_value *data;
u32 data_size;
+ u32 pixel_clock;
+ u32 link_freq;
};
struct ov5645 {
@@ -99,6 +101,8 @@ struct ov5645 {
const struct ov5645_mode_info *current_mode;
struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *pixel_clock;
+ struct v4l2_ctrl *link_freq;
/* Cached register values */
u8 aec_pk_manual;
@@ -505,24 +509,35 @@ static const struct reg_value ov5645_setting_full[] = {
{ 0x4202, 0x00 }
};
+static const s64 link_freq[] = {
+ 222880000,
+ 334320000
+};
+
static const struct ov5645_mode_info ov5645_mode_info_data[] = {
{
.width = 1280,
.height = 960,
.data = ov5645_setting_sxga,
- .data_size = ARRAY_SIZE(ov5645_setting_sxga)
+ .data_size = ARRAY_SIZE(ov5645_setting_sxga),
+ .pixel_clock = 111440000,
+ .link_freq = 0 /* an index in link_freq[] */
},
{
.width = 1920,
.height = 1080,
.data = ov5645_setting_1080p,
- .data_size = ARRAY_SIZE(ov5645_setting_1080p)
+ .data_size = ARRAY_SIZE(ov5645_setting_1080p),
+ .pixel_clock = 167160000,
+ .link_freq = 1 /* an index in link_freq[] */
},
{
.width = 2592,
.height = 1944,
.data = ov5645_setting_full,
- .data_size = ARRAY_SIZE(ov5645_setting_full)
+ .data_size = ARRAY_SIZE(ov5645_setting_full),
+ .pixel_clock = 167160000,
+ .link_freq = 1 /* an index in link_freq[] */
},
};
@@ -969,6 +984,7 @@ static int ov5645_set_format(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *__format;
struct v4l2_rect *__crop;
const struct ov5645_mode_info *new_mode;
+ int ret;
__crop = __ov5645_get_pad_crop(ov5645, cfg, format->pad,
format->which);
@@ -978,8 +994,19 @@ static int ov5645_set_format(struct v4l2_subdev *sd,
__crop->width = new_mode->width;
__crop->height = new_mode->height;
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ ret = v4l2_ctrl_s_ctrl_int64(ov5645->pixel_clock,
+ new_mode->pixel_clock);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_ctrl_s_ctrl(ov5645->link_freq,
+ new_mode->link_freq);
+ if (ret < 0)
+ return ret;
+
ov5645->current_mode = new_mode;
+ }
__format = __ov5645_get_pad_format(ov5645, cfg, format->pad,
format->which);
@@ -1197,7 +1224,7 @@ static int ov5645_probe(struct i2c_client *client,
mutex_init(&ov5645->power_lock);
- v4l2_ctrl_handler_init(&ov5645->ctrls, 7);
+ v4l2_ctrl_handler_init(&ov5645->ctrls, 9);
v4l2_ctrl_new_std(&ov5645->ctrls, &ov5645_ctrl_ops,
V4L2_CID_SATURATION, -4, 4, 1, 0);
v4l2_ctrl_new_std(&ov5645->ctrls, &ov5645_ctrl_ops,
@@ -1215,6 +1242,17 @@ static int ov5645_probe(struct i2c_client *client,
V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(ov5645_test_pattern_menu) - 1,
0, 0, ov5645_test_pattern_menu);
+ ov5645->pixel_clock = v4l2_ctrl_new_std(&ov5645->ctrls,
+ &ov5645_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ 1, INT_MAX, 1, 1);
+ ov5645->link_freq = v4l2_ctrl_new_int_menu(&ov5645->ctrls,
+ &ov5645_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(link_freq) - 1,
+ 0, link_freq);
+ if (ov5645->link_freq)
+ ov5645->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
ov5645->sd.ctrl_handler = &ov5645->ctrls;
@@ -1229,6 +1267,7 @@ static int ov5645_probe(struct i2c_client *client,
ov5645->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
ov5645->pad.flags = MEDIA_PAD_FL_SOURCE;
ov5645->sd.dev = &client->dev;
+ ov5645->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&ov5645->sd.entity, 1, &ov5645->pad);
if (ret < 0) {
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
new file mode 100644
index 000000000000..6f7a1d6d2200
--- /dev/null
+++ b/drivers/media/i2c/ov5670.c
@@ -0,0 +1,2601 @@
+/*
+ * Copyright (c) 2017 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#define OV5670_REG_CHIP_ID 0x300a
+#define OV5670_CHIP_ID 0x005670
+
+#define OV5670_REG_MODE_SELECT 0x0100
+#define OV5670_MODE_STANDBY 0x00
+#define OV5670_MODE_STREAMING 0x01
+
+#define OV5670_REG_SOFTWARE_RST 0x0103
+#define OV5670_SOFTWARE_RST 0x01
+
+/* vertical-timings from sensor */
+#define OV5670_REG_VTS 0x380e
+#define OV5670_VTS_30FPS 0x0808 /* default for 30 fps */
+#define OV5670_VTS_MAX 0xffff
+
+/* horizontal-timings from sensor */
+#define OV5670_REG_HTS 0x380c
+
+/*
+ * Pixels-per-line(PPL) = Time-per-line * pixel-rate
+ * In OV5670, Time-per-line = HTS/SCLK.
+ * HTS is fixed for all resolutions, not recommended to change.
+ */
+#define OV5670_FIXED_PPL 2724 /* Pixels per line */
+
+/* Exposure controls from sensor */
+#define OV5670_REG_EXPOSURE 0x3500
+#define OV5670_EXPOSURE_MIN 4
+#define OV5670_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define OV5670_REG_ANALOG_GAIN 0x3508
+#define ANALOG_GAIN_MIN 0
+#define ANALOG_GAIN_MAX 8191
+#define ANALOG_GAIN_STEP 1
+#define ANALOG_GAIN_DEFAULT 128
+
+/* Digital gain controls from sensor */
+#define OV5670_REG_R_DGTL_GAIN 0x5032
+#define OV5670_REG_G_DGTL_GAIN 0x5034
+#define OV5670_REG_B_DGTL_GAIN 0x5036
+#define OV5670_DGTL_GAIN_MIN 0
+#define OV5670_DGTL_GAIN_MAX 4095
+#define OV5670_DGTL_GAIN_STEP 1
+#define OV5670_DGTL_GAIN_DEFAULT 1024
+
+/* Test Pattern Control */
+#define OV5670_REG_TEST_PATTERN 0x4303
+#define OV5670_TEST_PATTERN_ENABLE BIT(3)
+#define OV5670_REG_TEST_PATTERN_CTRL 0x4320
+
+#define OV5670_REG_VALUE_08BIT 1
+#define OV5670_REG_VALUE_16BIT 2
+#define OV5670_REG_VALUE_24BIT 3
+
+/* Initial number of frames to skip to avoid possible garbage */
+#define OV5670_NUM_OF_SKIP_FRAMES 2
+
+struct ov5670_reg {
+ u16 address;
+ u8 val;
+};
+
+struct ov5670_reg_list {
+ u32 num_of_regs;
+ const struct ov5670_reg *regs;
+};
+
+struct ov5670_link_freq_config {
+ u32 pixel_rate;
+ const struct ov5670_reg_list reg_list;
+};
+
+struct ov5670_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Default vertical timining size */
+ u32 vts_def;
+
+ /* Min vertical timining size */
+ u32 vts_min;
+
+ /* Link frequency needed for this resolution */
+ u32 link_freq_index;
+
+ /* Sensor register settings for this resolution */
+ const struct ov5670_reg_list reg_list;
+};
+
+static const struct ov5670_reg mipi_data_rate_840mbps[] = {
+ {0x0300, 0x04},
+ {0x0301, 0x00},
+ {0x0302, 0x84},
+ {0x0303, 0x00},
+ {0x0304, 0x03},
+ {0x0305, 0x01},
+ {0x0306, 0x01},
+ {0x030a, 0x00},
+ {0x030b, 0x00},
+ {0x030c, 0x00},
+ {0x030d, 0x26},
+ {0x030e, 0x00},
+ {0x030f, 0x06},
+ {0x0312, 0x01},
+ {0x3031, 0x0a},
+};
+
+static const struct ov5670_reg mode_2592x1944_regs[] = {
+ {0x3000, 0x00},
+ {0x3002, 0x21},
+ {0x3005, 0xf0},
+ {0x3007, 0x00},
+ {0x3015, 0x0f},
+ {0x3018, 0x32},
+ {0x301a, 0xf0},
+ {0x301b, 0xf0},
+ {0x301c, 0xf0},
+ {0x301d, 0xf0},
+ {0x301e, 0xf0},
+ {0x3030, 0x00},
+ {0x3031, 0x0a},
+ {0x303c, 0xff},
+ {0x303e, 0xff},
+ {0x3040, 0xf0},
+ {0x3041, 0x00},
+ {0x3042, 0xf0},
+ {0x3106, 0x11},
+ {0x3500, 0x00},
+ {0x3501, 0x80},
+ {0x3502, 0x00},
+ {0x3503, 0x04},
+ {0x3504, 0x03},
+ {0x3505, 0x83},
+ {0x3508, 0x04},
+ {0x3509, 0x00},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3601, 0xc8},
+ {0x3610, 0x88},
+ {0x3612, 0x48},
+ {0x3614, 0x5b},
+ {0x3615, 0x96},
+ {0x3621, 0xd0},
+ {0x3622, 0x00},
+ {0x3623, 0x00},
+ {0x3633, 0x13},
+ {0x3634, 0x13},
+ {0x3635, 0x13},
+ {0x3636, 0x13},
+ {0x3645, 0x13},
+ {0x3646, 0x82},
+ {0x3650, 0x00},
+ {0x3652, 0xff},
+ {0x3655, 0x20},
+ {0x3656, 0xff},
+ {0x365a, 0xff},
+ {0x365e, 0xff},
+ {0x3668, 0x00},
+ {0x366a, 0x07},
+ {0x366e, 0x10},
+ {0x366d, 0x00},
+ {0x366f, 0x80},
+ {0x3700, 0x28},
+ {0x3701, 0x10},
+ {0x3702, 0x3a},
+ {0x3703, 0x19},
+ {0x3704, 0x10},
+ {0x3705, 0x00},
+ {0x3706, 0x66},
+ {0x3707, 0x08},
+ {0x3708, 0x34},
+ {0x3709, 0x40},
+ {0x370a, 0x01},
+ {0x370b, 0x1b},
+ {0x3714, 0x24},
+ {0x371a, 0x3e},
+ {0x3733, 0x00},
+ {0x3734, 0x00},
+ {0x373a, 0x05},
+ {0x373b, 0x06},
+ {0x373c, 0x0a},
+ {0x373f, 0xa0},
+ {0x3755, 0x00},
+ {0x3758, 0x00},
+ {0x375b, 0x0e},
+ {0x3766, 0x5f},
+ {0x3768, 0x00},
+ {0x3769, 0x22},
+ {0x3773, 0x08},
+ {0x3774, 0x1f},
+ {0x3776, 0x06},
+ {0x37a0, 0x88},
+ {0x37a1, 0x5c},
+ {0x37a7, 0x88},
+ {0x37a8, 0x70},
+ {0x37aa, 0x88},
+ {0x37ab, 0x48},
+ {0x37b3, 0x66},
+ {0x37c2, 0x04},
+ {0x37c5, 0x00},
+ {0x37c8, 0x00},
+ {0x3800, 0x00},
+ {0x3801, 0x0c},
+ {0x3802, 0x00},
+ {0x3803, 0x04},
+ {0x3804, 0x0a},
+ {0x3805, 0x33},
+ {0x3806, 0x07},
+ {0x3807, 0xa3},
+ {0x3808, 0x0a},
+ {0x3809, 0x20},
+ {0x380a, 0x07},
+ {0x380b, 0x98},
+ {0x380c, 0x06},
+ {0x380d, 0x90},
+ {0x380e, 0x08},
+ {0x380f, 0x08},
+ {0x3811, 0x04},
+ {0x3813, 0x02},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x00},
+ {0x3820, 0x84},
+ {0x3821, 0x46},
+ {0x3822, 0x48},
+ {0x3826, 0x00},
+ {0x3827, 0x08},
+ {0x382a, 0x01},
+ {0x382b, 0x01},
+ {0x3830, 0x08},
+ {0x3836, 0x02},
+ {0x3837, 0x00},
+ {0x3838, 0x10},
+ {0x3841, 0xff},
+ {0x3846, 0x48},
+ {0x3861, 0x00},
+ {0x3862, 0x04},
+ {0x3863, 0x06},
+ {0x3a11, 0x01},
+ {0x3a12, 0x78},
+ {0x3b00, 0x00},
+ {0x3b02, 0x00},
+ {0x3b03, 0x00},
+ {0x3b04, 0x00},
+ {0x3b05, 0x00},
+ {0x3c00, 0x89},
+ {0x3c01, 0xab},
+ {0x3c02, 0x01},
+ {0x3c03, 0x00},
+ {0x3c04, 0x00},
+ {0x3c05, 0x03},
+ {0x3c06, 0x00},
+ {0x3c07, 0x05},
+ {0x3c0c, 0x00},
+ {0x3c0d, 0x00},
+ {0x3c0e, 0x00},
+ {0x3c0f, 0x00},
+ {0x3c40, 0x00},
+ {0x3c41, 0xa3},
+ {0x3c43, 0x7d},
+ {0x3c45, 0xd7},
+ {0x3c47, 0xfc},
+ {0x3c50, 0x05},
+ {0x3c52, 0xaa},
+ {0x3c54, 0x71},
+ {0x3c56, 0x80},
+ {0x3d85, 0x17},
+ {0x3f03, 0x00},
+ {0x3f0a, 0x00},
+ {0x3f0b, 0x00},
+ {0x4001, 0x60},
+ {0x4009, 0x0d},
+ {0x4020, 0x00},
+ {0x4021, 0x00},
+ {0x4022, 0x00},
+ {0x4023, 0x00},
+ {0x4024, 0x00},
+ {0x4025, 0x00},
+ {0x4026, 0x00},
+ {0x4027, 0x00},
+ {0x4028, 0x00},
+ {0x4029, 0x00},
+ {0x402a, 0x00},
+ {0x402b, 0x00},
+ {0x402c, 0x00},
+ {0x402d, 0x00},
+ {0x402e, 0x00},
+ {0x402f, 0x00},
+ {0x4040, 0x00},
+ {0x4041, 0x03},
+ {0x4042, 0x00},
+ {0x4043, 0x7A},
+ {0x4044, 0x00},
+ {0x4045, 0x7A},
+ {0x4046, 0x00},
+ {0x4047, 0x7A},
+ {0x4048, 0x00},
+ {0x4049, 0x7A},
+ {0x4307, 0x30},
+ {0x4500, 0x58},
+ {0x4501, 0x04},
+ {0x4502, 0x40},
+ {0x4503, 0x10},
+ {0x4508, 0xaa},
+ {0x4509, 0xaa},
+ {0x450a, 0x00},
+ {0x450b, 0x00},
+ {0x4600, 0x01},
+ {0x4601, 0x03},
+ {0x4700, 0xa4},
+ {0x4800, 0x4c},
+ {0x4816, 0x53},
+ {0x481f, 0x40},
+ {0x4837, 0x13},
+ {0x5000, 0x56},
+ {0x5001, 0x01},
+ {0x5002, 0x28},
+ {0x5004, 0x0c},
+ {0x5006, 0x0c},
+ {0x5007, 0xe0},
+ {0x5008, 0x01},
+ {0x5009, 0xb0},
+ {0x5901, 0x00},
+ {0x5a01, 0x00},
+ {0x5a03, 0x00},
+ {0x5a04, 0x0c},
+ {0x5a05, 0xe0},
+ {0x5a06, 0x09},
+ {0x5a07, 0xb0},
+ {0x5a08, 0x06},
+ {0x5e00, 0x00},
+ {0x3734, 0x40},
+ {0x5b00, 0x01},
+ {0x5b01, 0x10},
+ {0x5b02, 0x01},
+ {0x5b03, 0xdb},
+ {0x3d8c, 0x71},
+ {0x3d8d, 0xea},
+ {0x4017, 0x08},
+ {0x3618, 0x2a},
+ {0x5780, 0x3e},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x06},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x3503, 0x00}
+};
+
+static const struct ov5670_reg mode_1296x972_regs[] = {
+ {0x3000, 0x00},
+ {0x3002, 0x21},
+ {0x3005, 0xf0},
+ {0x3007, 0x00},
+ {0x3015, 0x0f},
+ {0x3018, 0x32},
+ {0x301a, 0xf0},
+ {0x301b, 0xf0},
+ {0x301c, 0xf0},
+ {0x301d, 0xf0},
+ {0x301e, 0xf0},
+ {0x3030, 0x00},
+ {0x3031, 0x0a},
+ {0x303c, 0xff},
+ {0x303e, 0xff},
+ {0x3040, 0xf0},
+ {0x3041, 0x00},
+ {0x3042, 0xf0},
+ {0x3106, 0x11},
+ {0x3500, 0x00},
+ {0x3501, 0x80},
+ {0x3502, 0x00},
+ {0x3503, 0x04},
+ {0x3504, 0x03},
+ {0x3505, 0x83},
+ {0x3508, 0x07},
+ {0x3509, 0x80},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3601, 0xc8},
+ {0x3610, 0x88},
+ {0x3612, 0x48},
+ {0x3614, 0x5b},
+ {0x3615, 0x96},
+ {0x3621, 0xd0},
+ {0x3622, 0x00},
+ {0x3623, 0x00},
+ {0x3633, 0x13},
+ {0x3634, 0x13},
+ {0x3635, 0x13},
+ {0x3636, 0x13},
+ {0x3645, 0x13},
+ {0x3646, 0x82},
+ {0x3650, 0x00},
+ {0x3652, 0xff},
+ {0x3655, 0x20},
+ {0x3656, 0xff},
+ {0x365a, 0xff},
+ {0x365e, 0xff},
+ {0x3668, 0x00},
+ {0x366a, 0x07},
+ {0x366e, 0x08},
+ {0x366d, 0x00},
+ {0x366f, 0x80},
+ {0x3700, 0x28},
+ {0x3701, 0x10},
+ {0x3702, 0x3a},
+ {0x3703, 0x19},
+ {0x3704, 0x10},
+ {0x3705, 0x00},
+ {0x3706, 0x66},
+ {0x3707, 0x08},
+ {0x3708, 0x34},
+ {0x3709, 0x40},
+ {0x370a, 0x01},
+ {0x370b, 0x1b},
+ {0x3714, 0x24},
+ {0x371a, 0x3e},
+ {0x3733, 0x00},
+ {0x3734, 0x00},
+ {0x373a, 0x05},
+ {0x373b, 0x06},
+ {0x373c, 0x0a},
+ {0x373f, 0xa0},
+ {0x3755, 0x00},
+ {0x3758, 0x00},
+ {0x375b, 0x0e},
+ {0x3766, 0x5f},
+ {0x3768, 0x00},
+ {0x3769, 0x22},
+ {0x3773, 0x08},
+ {0x3774, 0x1f},
+ {0x3776, 0x06},
+ {0x37a0, 0x88},
+ {0x37a1, 0x5c},
+ {0x37a7, 0x88},
+ {0x37a8, 0x70},
+ {0x37aa, 0x88},
+ {0x37ab, 0x48},
+ {0x37b3, 0x66},
+ {0x37c2, 0x04},
+ {0x37c5, 0x00},
+ {0x37c8, 0x00},
+ {0x3800, 0x00},
+ {0x3801, 0x0c},
+ {0x3802, 0x00},
+ {0x3803, 0x04},
+ {0x3804, 0x0a},
+ {0x3805, 0x33},
+ {0x3806, 0x07},
+ {0x3807, 0xa3},
+ {0x3808, 0x05},
+ {0x3809, 0x10},
+ {0x380a, 0x03},
+ {0x380b, 0xcc},
+ {0x380c, 0x06},
+ {0x380d, 0x90},
+ {0x380e, 0x08},
+ {0x380f, 0x08},
+ {0x3811, 0x04},
+ {0x3813, 0x04},
+ {0x3814, 0x03},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x00},
+ {0x3820, 0x94},
+ {0x3821, 0x47},
+ {0x3822, 0x48},
+ {0x3826, 0x00},
+ {0x3827, 0x08},
+ {0x382a, 0x03},
+ {0x382b, 0x01},
+ {0x3830, 0x08},
+ {0x3836, 0x02},
+ {0x3837, 0x00},
+ {0x3838, 0x10},
+ {0x3841, 0xff},
+ {0x3846, 0x48},
+ {0x3861, 0x00},
+ {0x3862, 0x04},
+ {0x3863, 0x06},
+ {0x3a11, 0x01},
+ {0x3a12, 0x78},
+ {0x3b00, 0x00},
+ {0x3b02, 0x00},
+ {0x3b03, 0x00},
+ {0x3b04, 0x00},
+ {0x3b05, 0x00},
+ {0x3c00, 0x89},
+ {0x3c01, 0xab},
+ {0x3c02, 0x01},
+ {0x3c03, 0x00},
+ {0x3c04, 0x00},
+ {0x3c05, 0x03},
+ {0x3c06, 0x00},
+ {0x3c07, 0x05},
+ {0x3c0c, 0x00},
+ {0x3c0d, 0x00},
+ {0x3c0e, 0x00},
+ {0x3c0f, 0x00},
+ {0x3c40, 0x00},
+ {0x3c41, 0xa3},
+ {0x3c43, 0x7d},
+ {0x3c45, 0xd7},
+ {0x3c47, 0xfc},
+ {0x3c50, 0x05},
+ {0x3c52, 0xaa},
+ {0x3c54, 0x71},
+ {0x3c56, 0x80},
+ {0x3d85, 0x17},
+ {0x3f03, 0x00},
+ {0x3f0a, 0x00},
+ {0x3f0b, 0x00},
+ {0x4001, 0x60},
+ {0x4009, 0x05},
+ {0x4020, 0x00},
+ {0x4021, 0x00},
+ {0x4022, 0x00},
+ {0x4023, 0x00},
+ {0x4024, 0x00},
+ {0x4025, 0x00},
+ {0x4026, 0x00},
+ {0x4027, 0x00},
+ {0x4028, 0x00},
+ {0x4029, 0x00},
+ {0x402a, 0x00},
+ {0x402b, 0x00},
+ {0x402c, 0x00},
+ {0x402d, 0x00},
+ {0x402e, 0x00},
+ {0x402f, 0x00},
+ {0x4040, 0x00},
+ {0x4041, 0x03},
+ {0x4042, 0x00},
+ {0x4043, 0x7A},
+ {0x4044, 0x00},
+ {0x4045, 0x7A},
+ {0x4046, 0x00},
+ {0x4047, 0x7A},
+ {0x4048, 0x00},
+ {0x4049, 0x7A},
+ {0x4307, 0x30},
+ {0x4500, 0x58},
+ {0x4501, 0x04},
+ {0x4502, 0x48},
+ {0x4503, 0x10},
+ {0x4508, 0x55},
+ {0x4509, 0x55},
+ {0x450a, 0x00},
+ {0x450b, 0x00},
+ {0x4600, 0x00},
+ {0x4601, 0x81},
+ {0x4700, 0xa4},
+ {0x4800, 0x4c},
+ {0x4816, 0x53},
+ {0x481f, 0x40},
+ {0x4837, 0x13},
+ {0x5000, 0x56},
+ {0x5001, 0x01},
+ {0x5002, 0x28},
+ {0x5004, 0x0c},
+ {0x5006, 0x0c},
+ {0x5007, 0xe0},
+ {0x5008, 0x01},
+ {0x5009, 0xb0},
+ {0x5901, 0x00},
+ {0x5a01, 0x00},
+ {0x5a03, 0x00},
+ {0x5a04, 0x0c},
+ {0x5a05, 0xe0},
+ {0x5a06, 0x09},
+ {0x5a07, 0xb0},
+ {0x5a08, 0x06},
+ {0x5e00, 0x00},
+ {0x3734, 0x40},
+ {0x5b00, 0x01},
+ {0x5b01, 0x10},
+ {0x5b02, 0x01},
+ {0x5b03, 0xdb},
+ {0x3d8c, 0x71},
+ {0x3d8d, 0xea},
+ {0x4017, 0x10},
+ {0x3618, 0x2a},
+ {0x5780, 0x3e},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x04},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x3503, 0x00}
+};
+
+static const struct ov5670_reg mode_648x486_regs[] = {
+ {0x3000, 0x00},
+ {0x3002, 0x21},
+ {0x3005, 0xf0},
+ {0x3007, 0x00},
+ {0x3015, 0x0f},
+ {0x3018, 0x32},
+ {0x301a, 0xf0},
+ {0x301b, 0xf0},
+ {0x301c, 0xf0},
+ {0x301d, 0xf0},
+ {0x301e, 0xf0},
+ {0x3030, 0x00},
+ {0x3031, 0x0a},
+ {0x303c, 0xff},
+ {0x303e, 0xff},
+ {0x3040, 0xf0},
+ {0x3041, 0x00},
+ {0x3042, 0xf0},
+ {0x3106, 0x11},
+ {0x3500, 0x00},
+ {0x3501, 0x80},
+ {0x3502, 0x00},
+ {0x3503, 0x04},
+ {0x3504, 0x03},
+ {0x3505, 0x83},
+ {0x3508, 0x04},
+ {0x3509, 0x00},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3601, 0xc8},
+ {0x3610, 0x88},
+ {0x3612, 0x48},
+ {0x3614, 0x5b},
+ {0x3615, 0x96},
+ {0x3621, 0xd0},
+ {0x3622, 0x00},
+ {0x3623, 0x04},
+ {0x3633, 0x13},
+ {0x3634, 0x13},
+ {0x3635, 0x13},
+ {0x3636, 0x13},
+ {0x3645, 0x13},
+ {0x3646, 0x82},
+ {0x3650, 0x00},
+ {0x3652, 0xff},
+ {0x3655, 0x20},
+ {0x3656, 0xff},
+ {0x365a, 0xff},
+ {0x365e, 0xff},
+ {0x3668, 0x00},
+ {0x366a, 0x07},
+ {0x366e, 0x08},
+ {0x366d, 0x00},
+ {0x366f, 0x80},
+ {0x3700, 0x28},
+ {0x3701, 0x10},
+ {0x3702, 0x3a},
+ {0x3703, 0x19},
+ {0x3704, 0x10},
+ {0x3705, 0x00},
+ {0x3706, 0x66},
+ {0x3707, 0x08},
+ {0x3708, 0x34},
+ {0x3709, 0x40},
+ {0x370a, 0x01},
+ {0x370b, 0x1b},
+ {0x3714, 0x24},
+ {0x371a, 0x3e},
+ {0x3733, 0x00},
+ {0x3734, 0x00},
+ {0x373a, 0x05},
+ {0x373b, 0x06},
+ {0x373c, 0x0a},
+ {0x373f, 0xa0},
+ {0x3755, 0x00},
+ {0x3758, 0x00},
+ {0x375b, 0x0e},
+ {0x3766, 0x5f},
+ {0x3768, 0x00},
+ {0x3769, 0x22},
+ {0x3773, 0x08},
+ {0x3774, 0x1f},
+ {0x3776, 0x06},
+ {0x37a0, 0x88},
+ {0x37a1, 0x5c},
+ {0x37a7, 0x88},
+ {0x37a8, 0x70},
+ {0x37aa, 0x88},
+ {0x37ab, 0x48},
+ {0x37b3, 0x66},
+ {0x37c2, 0x04},
+ {0x37c5, 0x00},
+ {0x37c8, 0x00},
+ {0x3800, 0x00},
+ {0x3801, 0x0c},
+ {0x3802, 0x00},
+ {0x3803, 0x04},
+ {0x3804, 0x0a},
+ {0x3805, 0x33},
+ {0x3806, 0x07},
+ {0x3807, 0xa3},
+ {0x3808, 0x02},
+ {0x3809, 0x88},
+ {0x380a, 0x01},
+ {0x380b, 0xe6},
+ {0x380c, 0x06},
+ {0x380d, 0x90},
+ {0x380e, 0x08},
+ {0x380f, 0x08},
+ {0x3811, 0x04},
+ {0x3813, 0x02},
+ {0x3814, 0x07},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x00},
+ {0x3820, 0x94},
+ {0x3821, 0xc6},
+ {0x3822, 0x48},
+ {0x3826, 0x00},
+ {0x3827, 0x08},
+ {0x382a, 0x07},
+ {0x382b, 0x01},
+ {0x3830, 0x08},
+ {0x3836, 0x02},
+ {0x3837, 0x00},
+ {0x3838, 0x10},
+ {0x3841, 0xff},
+ {0x3846, 0x48},
+ {0x3861, 0x00},
+ {0x3862, 0x04},
+ {0x3863, 0x06},
+ {0x3a11, 0x01},
+ {0x3a12, 0x78},
+ {0x3b00, 0x00},
+ {0x3b02, 0x00},
+ {0x3b03, 0x00},
+ {0x3b04, 0x00},
+ {0x3b05, 0x00},
+ {0x3c00, 0x89},
+ {0x3c01, 0xab},
+ {0x3c02, 0x01},
+ {0x3c03, 0x00},
+ {0x3c04, 0x00},
+ {0x3c05, 0x03},
+ {0x3c06, 0x00},
+ {0x3c07, 0x05},
+ {0x3c0c, 0x00},
+ {0x3c0d, 0x00},
+ {0x3c0e, 0x00},
+ {0x3c0f, 0x00},
+ {0x3c40, 0x00},
+ {0x3c41, 0xa3},
+ {0x3c43, 0x7d},
+ {0x3c45, 0xd7},
+ {0x3c47, 0xfc},
+ {0x3c50, 0x05},
+ {0x3c52, 0xaa},
+ {0x3c54, 0x71},
+ {0x3c56, 0x80},
+ {0x3d85, 0x17},
+ {0x3f03, 0x00},
+ {0x3f0a, 0x00},
+ {0x3f0b, 0x00},
+ {0x4001, 0x60},
+ {0x4009, 0x05},
+ {0x4020, 0x00},
+ {0x4021, 0x00},
+ {0x4022, 0x00},
+ {0x4023, 0x00},
+ {0x4024, 0x00},
+ {0x4025, 0x00},
+ {0x4026, 0x00},
+ {0x4027, 0x00},
+ {0x4028, 0x00},
+ {0x4029, 0x00},
+ {0x402a, 0x00},
+ {0x402b, 0x00},
+ {0x402c, 0x00},
+ {0x402d, 0x00},
+ {0x402e, 0x00},
+ {0x402f, 0x00},
+ {0x4040, 0x00},
+ {0x4041, 0x03},
+ {0x4042, 0x00},
+ {0x4043, 0x7A},
+ {0x4044, 0x00},
+ {0x4045, 0x7A},
+ {0x4046, 0x00},
+ {0x4047, 0x7A},
+ {0x4048, 0x00},
+ {0x4049, 0x7A},
+ {0x4307, 0x30},
+ {0x4500, 0x58},
+ {0x4501, 0x04},
+ {0x4502, 0x40},
+ {0x4503, 0x10},
+ {0x4508, 0x55},
+ {0x4509, 0x55},
+ {0x450a, 0x02},
+ {0x450b, 0x00},
+ {0x4600, 0x00},
+ {0x4601, 0x40},
+ {0x4700, 0xa4},
+ {0x4800, 0x4c},
+ {0x4816, 0x53},
+ {0x481f, 0x40},
+ {0x4837, 0x13},
+ {0x5000, 0x56},
+ {0x5001, 0x01},
+ {0x5002, 0x28},
+ {0x5004, 0x0c},
+ {0x5006, 0x0c},
+ {0x5007, 0xe0},
+ {0x5008, 0x01},
+ {0x5009, 0xb0},
+ {0x5901, 0x00},
+ {0x5a01, 0x00},
+ {0x5a03, 0x00},
+ {0x5a04, 0x0c},
+ {0x5a05, 0xe0},
+ {0x5a06, 0x09},
+ {0x5a07, 0xb0},
+ {0x5a08, 0x06},
+ {0x5e00, 0x00},
+ {0x3734, 0x40},
+ {0x5b00, 0x01},
+ {0x5b01, 0x10},
+ {0x5b02, 0x01},
+ {0x5b03, 0xdb},
+ {0x3d8c, 0x71},
+ {0x3d8d, 0xea},
+ {0x4017, 0x10},
+ {0x3618, 0x2a},
+ {0x5780, 0x3e},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x06},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x3503, 0x00}
+};
+
+static const struct ov5670_reg mode_2560x1440_regs[] = {
+ {0x3000, 0x00},
+ {0x3002, 0x21},
+ {0x3005, 0xf0},
+ {0x3007, 0x00},
+ {0x3015, 0x0f},
+ {0x3018, 0x32},
+ {0x301a, 0xf0},
+ {0x301b, 0xf0},
+ {0x301c, 0xf0},
+ {0x301d, 0xf0},
+ {0x301e, 0xf0},
+ {0x3030, 0x00},
+ {0x3031, 0x0a},
+ {0x303c, 0xff},
+ {0x303e, 0xff},
+ {0x3040, 0xf0},
+ {0x3041, 0x00},
+ {0x3042, 0xf0},
+ {0x3106, 0x11},
+ {0x3500, 0x00},
+ {0x3501, 0x80},
+ {0x3502, 0x00},
+ {0x3503, 0x04},
+ {0x3504, 0x03},
+ {0x3505, 0x83},
+ {0x3508, 0x04},
+ {0x3509, 0x00},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3601, 0xc8},
+ {0x3610, 0x88},
+ {0x3612, 0x48},
+ {0x3614, 0x5b},
+ {0x3615, 0x96},
+ {0x3621, 0xd0},
+ {0x3622, 0x00},
+ {0x3623, 0x00},
+ {0x3633, 0x13},
+ {0x3634, 0x13},
+ {0x3635, 0x13},
+ {0x3636, 0x13},
+ {0x3645, 0x13},
+ {0x3646, 0x82},
+ {0x3650, 0x00},
+ {0x3652, 0xff},
+ {0x3655, 0x20},
+ {0x3656, 0xff},
+ {0x365a, 0xff},
+ {0x365e, 0xff},
+ {0x3668, 0x00},
+ {0x366a, 0x07},
+ {0x366e, 0x10},
+ {0x366d, 0x00},
+ {0x366f, 0x80},
+ {0x3700, 0x28},
+ {0x3701, 0x10},
+ {0x3702, 0x3a},
+ {0x3703, 0x19},
+ {0x3704, 0x10},
+ {0x3705, 0x00},
+ {0x3706, 0x66},
+ {0x3707, 0x08},
+ {0x3708, 0x34},
+ {0x3709, 0x40},
+ {0x370a, 0x01},
+ {0x370b, 0x1b},
+ {0x3714, 0x24},
+ {0x371a, 0x3e},
+ {0x3733, 0x00},
+ {0x3734, 0x00},
+ {0x373a, 0x05},
+ {0x373b, 0x06},
+ {0x373c, 0x0a},
+ {0x373f, 0xa0},
+ {0x3755, 0x00},
+ {0x3758, 0x00},
+ {0x375b, 0x0e},
+ {0x3766, 0x5f},
+ {0x3768, 0x00},
+ {0x3769, 0x22},
+ {0x3773, 0x08},
+ {0x3774, 0x1f},
+ {0x3776, 0x06},
+ {0x37a0, 0x88},
+ {0x37a1, 0x5c},
+ {0x37a7, 0x88},
+ {0x37a8, 0x70},
+ {0x37aa, 0x88},
+ {0x37ab, 0x48},
+ {0x37b3, 0x66},
+ {0x37c2, 0x04},
+ {0x37c5, 0x00},
+ {0x37c8, 0x00},
+ {0x3800, 0x00},
+ {0x3801, 0x0c},
+ {0x3802, 0x00},
+ {0x3803, 0x04},
+ {0x3804, 0x0a},
+ {0x3805, 0x33},
+ {0x3806, 0x07},
+ {0x3807, 0xa3},
+ {0x3808, 0x0a},
+ {0x3809, 0x00},
+ {0x380a, 0x05},
+ {0x380b, 0xa0},
+ {0x380c, 0x06},
+ {0x380d, 0x90},
+ {0x380e, 0x08},
+ {0x380f, 0x08},
+ {0x3811, 0x04},
+ {0x3813, 0x02},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x00},
+ {0x3820, 0x84},
+ {0x3821, 0x46},
+ {0x3822, 0x48},
+ {0x3826, 0x00},
+ {0x3827, 0x08},
+ {0x382a, 0x01},
+ {0x382b, 0x01},
+ {0x3830, 0x08},
+ {0x3836, 0x02},
+ {0x3837, 0x00},
+ {0x3838, 0x10},
+ {0x3841, 0xff},
+ {0x3846, 0x48},
+ {0x3861, 0x00},
+ {0x3862, 0x04},
+ {0x3863, 0x06},
+ {0x3a11, 0x01},
+ {0x3a12, 0x78},
+ {0x3b00, 0x00},
+ {0x3b02, 0x00},
+ {0x3b03, 0x00},
+ {0x3b04, 0x00},
+ {0x3b05, 0x00},
+ {0x3c00, 0x89},
+ {0x3c01, 0xab},
+ {0x3c02, 0x01},
+ {0x3c03, 0x00},
+ {0x3c04, 0x00},
+ {0x3c05, 0x03},
+ {0x3c06, 0x00},
+ {0x3c07, 0x05},
+ {0x3c0c, 0x00},
+ {0x3c0d, 0x00},
+ {0x3c0e, 0x00},
+ {0x3c0f, 0x00},
+ {0x3c40, 0x00},
+ {0x3c41, 0xa3},
+ {0x3c43, 0x7d},
+ {0x3c45, 0xd7},
+ {0x3c47, 0xfc},
+ {0x3c50, 0x05},
+ {0x3c52, 0xaa},
+ {0x3c54, 0x71},
+ {0x3c56, 0x80},
+ {0x3d85, 0x17},
+ {0x3f03, 0x00},
+ {0x3f0a, 0x00},
+ {0x3f0b, 0x00},
+ {0x4001, 0x60},
+ {0x4009, 0x0d},
+ {0x4020, 0x00},
+ {0x4021, 0x00},
+ {0x4022, 0x00},
+ {0x4023, 0x00},
+ {0x4024, 0x00},
+ {0x4025, 0x00},
+ {0x4026, 0x00},
+ {0x4027, 0x00},
+ {0x4028, 0x00},
+ {0x4029, 0x00},
+ {0x402a, 0x00},
+ {0x402b, 0x00},
+ {0x402c, 0x00},
+ {0x402d, 0x00},
+ {0x402e, 0x00},
+ {0x402f, 0x00},
+ {0x4040, 0x00},
+ {0x4041, 0x03},
+ {0x4042, 0x00},
+ {0x4043, 0x7A},
+ {0x4044, 0x00},
+ {0x4045, 0x7A},
+ {0x4046, 0x00},
+ {0x4047, 0x7A},
+ {0x4048, 0x00},
+ {0x4049, 0x7A},
+ {0x4307, 0x30},
+ {0x4500, 0x58},
+ {0x4501, 0x04},
+ {0x4502, 0x40},
+ {0x4503, 0x10},
+ {0x4508, 0xaa},
+ {0x4509, 0xaa},
+ {0x450a, 0x00},
+ {0x450b, 0x00},
+ {0x4600, 0x01},
+ {0x4601, 0x00},
+ {0x4700, 0xa4},
+ {0x4800, 0x4c},
+ {0x4816, 0x53},
+ {0x481f, 0x40},
+ {0x4837, 0x13},
+ {0x5000, 0x56},
+ {0x5001, 0x01},
+ {0x5002, 0x28},
+ {0x5004, 0x0c},
+ {0x5006, 0x0c},
+ {0x5007, 0xe0},
+ {0x5008, 0x01},
+ {0x5009, 0xb0},
+ {0x5901, 0x00},
+ {0x5a01, 0x00},
+ {0x5a03, 0x00},
+ {0x5a04, 0x0c},
+ {0x5a05, 0xe0},
+ {0x5a06, 0x09},
+ {0x5a07, 0xb0},
+ {0x5a08, 0x06},
+ {0x5e00, 0x00},
+ {0x3734, 0x40},
+ {0x5b00, 0x01},
+ {0x5b01, 0x10},
+ {0x5b02, 0x01},
+ {0x5b03, 0xdb},
+ {0x3d8c, 0x71},
+ {0x3d8d, 0xea},
+ {0x4017, 0x08},
+ {0x3618, 0x2a},
+ {0x5780, 0x3e},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x06},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3}
+};
+
+static const struct ov5670_reg mode_1280x720_regs[] = {
+ {0x3000, 0x00},
+ {0x3002, 0x21},
+ {0x3005, 0xf0},
+ {0x3007, 0x00},
+ {0x3015, 0x0f},
+ {0x3018, 0x32},
+ {0x301a, 0xf0},
+ {0x301b, 0xf0},
+ {0x301c, 0xf0},
+ {0x301d, 0xf0},
+ {0x301e, 0xf0},
+ {0x3030, 0x00},
+ {0x3031, 0x0a},
+ {0x303c, 0xff},
+ {0x303e, 0xff},
+ {0x3040, 0xf0},
+ {0x3041, 0x00},
+ {0x3042, 0xf0},
+ {0x3106, 0x11},
+ {0x3500, 0x00},
+ {0x3501, 0x80},
+ {0x3502, 0x00},
+ {0x3503, 0x04},
+ {0x3504, 0x03},
+ {0x3505, 0x83},
+ {0x3508, 0x04},
+ {0x3509, 0x00},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3601, 0xc8},
+ {0x3610, 0x88},
+ {0x3612, 0x48},
+ {0x3614, 0x5b},
+ {0x3615, 0x96},
+ {0x3621, 0xd0},
+ {0x3622, 0x00},
+ {0x3623, 0x00},
+ {0x3633, 0x13},
+ {0x3634, 0x13},
+ {0x3635, 0x13},
+ {0x3636, 0x13},
+ {0x3645, 0x13},
+ {0x3646, 0x82},
+ {0x3650, 0x00},
+ {0x3652, 0xff},
+ {0x3655, 0x20},
+ {0x3656, 0xff},
+ {0x365a, 0xff},
+ {0x365e, 0xff},
+ {0x3668, 0x00},
+ {0x366a, 0x07},
+ {0x366e, 0x08},
+ {0x366d, 0x00},
+ {0x366f, 0x80},
+ {0x3700, 0x28},
+ {0x3701, 0x10},
+ {0x3702, 0x3a},
+ {0x3703, 0x19},
+ {0x3704, 0x10},
+ {0x3705, 0x00},
+ {0x3706, 0x66},
+ {0x3707, 0x08},
+ {0x3708, 0x34},
+ {0x3709, 0x40},
+ {0x370a, 0x01},
+ {0x370b, 0x1b},
+ {0x3714, 0x24},
+ {0x371a, 0x3e},
+ {0x3733, 0x00},
+ {0x3734, 0x00},
+ {0x373a, 0x05},
+ {0x373b, 0x06},
+ {0x373c, 0x0a},
+ {0x373f, 0xa0},
+ {0x3755, 0x00},
+ {0x3758, 0x00},
+ {0x375b, 0x0e},
+ {0x3766, 0x5f},
+ {0x3768, 0x00},
+ {0x3769, 0x22},
+ {0x3773, 0x08},
+ {0x3774, 0x1f},
+ {0x3776, 0x06},
+ {0x37a0, 0x88},
+ {0x37a1, 0x5c},
+ {0x37a7, 0x88},
+ {0x37a8, 0x70},
+ {0x37aa, 0x88},
+ {0x37ab, 0x48},
+ {0x37b3, 0x66},
+ {0x37c2, 0x04},
+ {0x37c5, 0x00},
+ {0x37c8, 0x00},
+ {0x3800, 0x00},
+ {0x3801, 0x0c},
+ {0x3802, 0x00},
+ {0x3803, 0x04},
+ {0x3804, 0x0a},
+ {0x3805, 0x33},
+ {0x3806, 0x07},
+ {0x3807, 0xa3},
+ {0x3808, 0x05},
+ {0x3809, 0x00},
+ {0x380a, 0x02},
+ {0x380b, 0xd0},
+ {0x380c, 0x06},
+ {0x380d, 0x90},
+ {0x380e, 0x08},
+ {0x380f, 0x08},
+ {0x3811, 0x04},
+ {0x3813, 0x02},
+ {0x3814, 0x03},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x00},
+ {0x3820, 0x94},
+ {0x3821, 0x47},
+ {0x3822, 0x48},
+ {0x3826, 0x00},
+ {0x3827, 0x08},
+ {0x382a, 0x03},
+ {0x382b, 0x01},
+ {0x3830, 0x08},
+ {0x3836, 0x02},
+ {0x3837, 0x00},
+ {0x3838, 0x10},
+ {0x3841, 0xff},
+ {0x3846, 0x48},
+ {0x3861, 0x00},
+ {0x3862, 0x04},
+ {0x3863, 0x06},
+ {0x3a11, 0x01},
+ {0x3a12, 0x78},
+ {0x3b00, 0x00},
+ {0x3b02, 0x00},
+ {0x3b03, 0x00},
+ {0x3b04, 0x00},
+ {0x3b05, 0x00},
+ {0x3c00, 0x89},
+ {0x3c01, 0xab},
+ {0x3c02, 0x01},
+ {0x3c03, 0x00},
+ {0x3c04, 0x00},
+ {0x3c05, 0x03},
+ {0x3c06, 0x00},
+ {0x3c07, 0x05},
+ {0x3c0c, 0x00},
+ {0x3c0d, 0x00},
+ {0x3c0e, 0x00},
+ {0x3c0f, 0x00},
+ {0x3c40, 0x00},
+ {0x3c41, 0xa3},
+ {0x3c43, 0x7d},
+ {0x3c45, 0xd7},
+ {0x3c47, 0xfc},
+ {0x3c50, 0x05},
+ {0x3c52, 0xaa},
+ {0x3c54, 0x71},
+ {0x3c56, 0x80},
+ {0x3d85, 0x17},
+ {0x3f03, 0x00},
+ {0x3f0a, 0x00},
+ {0x3f0b, 0x00},
+ {0x4001, 0x60},
+ {0x4009, 0x05},
+ {0x4020, 0x00},
+ {0x4021, 0x00},
+ {0x4022, 0x00},
+ {0x4023, 0x00},
+ {0x4024, 0x00},
+ {0x4025, 0x00},
+ {0x4026, 0x00},
+ {0x4027, 0x00},
+ {0x4028, 0x00},
+ {0x4029, 0x00},
+ {0x402a, 0x00},
+ {0x402b, 0x00},
+ {0x402c, 0x00},
+ {0x402d, 0x00},
+ {0x402e, 0x00},
+ {0x402f, 0x00},
+ {0x4040, 0x00},
+ {0x4041, 0x03},
+ {0x4042, 0x00},
+ {0x4043, 0x7A},
+ {0x4044, 0x00},
+ {0x4045, 0x7A},
+ {0x4046, 0x00},
+ {0x4047, 0x7A},
+ {0x4048, 0x00},
+ {0x4049, 0x7A},
+ {0x4307, 0x30},
+ {0x4500, 0x58},
+ {0x4501, 0x04},
+ {0x4502, 0x48},
+ {0x4503, 0x10},
+ {0x4508, 0x55},
+ {0x4509, 0x55},
+ {0x450a, 0x00},
+ {0x450b, 0x00},
+ {0x4600, 0x00},
+ {0x4601, 0x80},
+ {0x4700, 0xa4},
+ {0x4800, 0x4c},
+ {0x4816, 0x53},
+ {0x481f, 0x40},
+ {0x4837, 0x13},
+ {0x5000, 0x56},
+ {0x5001, 0x01},
+ {0x5002, 0x28},
+ {0x5004, 0x0c},
+ {0x5006, 0x0c},
+ {0x5007, 0xe0},
+ {0x5008, 0x01},
+ {0x5009, 0xb0},
+ {0x5901, 0x00},
+ {0x5a01, 0x00},
+ {0x5a03, 0x00},
+ {0x5a04, 0x0c},
+ {0x5a05, 0xe0},
+ {0x5a06, 0x09},
+ {0x5a07, 0xb0},
+ {0x5a08, 0x06},
+ {0x5e00, 0x00},
+ {0x3734, 0x40},
+ {0x5b00, 0x01},
+ {0x5b01, 0x10},
+ {0x5b02, 0x01},
+ {0x5b03, 0xdb},
+ {0x3d8c, 0x71},
+ {0x3d8d, 0xea},
+ {0x4017, 0x10},
+ {0x3618, 0x2a},
+ {0x5780, 0x3e},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x06},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x3503, 0x00}
+};
+
+static const struct ov5670_reg mode_640x360_regs[] = {
+ {0x3000, 0x00},
+ {0x3002, 0x21},
+ {0x3005, 0xf0},
+ {0x3007, 0x00},
+ {0x3015, 0x0f},
+ {0x3018, 0x32},
+ {0x301a, 0xf0},
+ {0x301b, 0xf0},
+ {0x301c, 0xf0},
+ {0x301d, 0xf0},
+ {0x301e, 0xf0},
+ {0x3030, 0x00},
+ {0x3031, 0x0a},
+ {0x303c, 0xff},
+ {0x303e, 0xff},
+ {0x3040, 0xf0},
+ {0x3041, 0x00},
+ {0x3042, 0xf0},
+ {0x3106, 0x11},
+ {0x3500, 0x00},
+ {0x3501, 0x80},
+ {0x3502, 0x00},
+ {0x3503, 0x04},
+ {0x3504, 0x03},
+ {0x3505, 0x83},
+ {0x3508, 0x04},
+ {0x3509, 0x00},
+ {0x350e, 0x04},
+ {0x350f, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3601, 0xc8},
+ {0x3610, 0x88},
+ {0x3612, 0x48},
+ {0x3614, 0x5b},
+ {0x3615, 0x96},
+ {0x3621, 0xd0},
+ {0x3622, 0x00},
+ {0x3623, 0x04},
+ {0x3633, 0x13},
+ {0x3634, 0x13},
+ {0x3635, 0x13},
+ {0x3636, 0x13},
+ {0x3645, 0x13},
+ {0x3646, 0x82},
+ {0x3650, 0x00},
+ {0x3652, 0xff},
+ {0x3655, 0x20},
+ {0x3656, 0xff},
+ {0x365a, 0xff},
+ {0x365e, 0xff},
+ {0x3668, 0x00},
+ {0x366a, 0x07},
+ {0x366e, 0x08},
+ {0x366d, 0x00},
+ {0x366f, 0x80},
+ {0x3700, 0x28},
+ {0x3701, 0x10},
+ {0x3702, 0x3a},
+ {0x3703, 0x19},
+ {0x3704, 0x10},
+ {0x3705, 0x00},
+ {0x3706, 0x66},
+ {0x3707, 0x08},
+ {0x3708, 0x34},
+ {0x3709, 0x40},
+ {0x370a, 0x01},
+ {0x370b, 0x1b},
+ {0x3714, 0x24},
+ {0x371a, 0x3e},
+ {0x3733, 0x00},
+ {0x3734, 0x00},
+ {0x373a, 0x05},
+ {0x373b, 0x06},
+ {0x373c, 0x0a},
+ {0x373f, 0xa0},
+ {0x3755, 0x00},
+ {0x3758, 0x00},
+ {0x375b, 0x0e},
+ {0x3766, 0x5f},
+ {0x3768, 0x00},
+ {0x3769, 0x22},
+ {0x3773, 0x08},
+ {0x3774, 0x1f},
+ {0x3776, 0x06},
+ {0x37a0, 0x88},
+ {0x37a1, 0x5c},
+ {0x37a7, 0x88},
+ {0x37a8, 0x70},
+ {0x37aa, 0x88},
+ {0x37ab, 0x48},
+ {0x37b3, 0x66},
+ {0x37c2, 0x04},
+ {0x37c5, 0x00},
+ {0x37c8, 0x00},
+ {0x3800, 0x00},
+ {0x3801, 0x0c},
+ {0x3802, 0x00},
+ {0x3803, 0x04},
+ {0x3804, 0x0a},
+ {0x3805, 0x33},
+ {0x3806, 0x07},
+ {0x3807, 0xa3},
+ {0x3808, 0x02},
+ {0x3809, 0x80},
+ {0x380a, 0x01},
+ {0x380b, 0x68},
+ {0x380c, 0x06},
+ {0x380d, 0x90},
+ {0x380e, 0x08},
+ {0x380f, 0x08},
+ {0x3811, 0x04},
+ {0x3813, 0x02},
+ {0x3814, 0x07},
+ {0x3815, 0x01},
+ {0x3816, 0x00},
+ {0x3817, 0x00},
+ {0x3818, 0x00},
+ {0x3819, 0x00},
+ {0x3820, 0x94},
+ {0x3821, 0xc6},
+ {0x3822, 0x48},
+ {0x3826, 0x00},
+ {0x3827, 0x08},
+ {0x382a, 0x07},
+ {0x382b, 0x01},
+ {0x3830, 0x08},
+ {0x3836, 0x02},
+ {0x3837, 0x00},
+ {0x3838, 0x10},
+ {0x3841, 0xff},
+ {0x3846, 0x48},
+ {0x3861, 0x00},
+ {0x3862, 0x04},
+ {0x3863, 0x06},
+ {0x3a11, 0x01},
+ {0x3a12, 0x78},
+ {0x3b00, 0x00},
+ {0x3b02, 0x00},
+ {0x3b03, 0x00},
+ {0x3b04, 0x00},
+ {0x3b05, 0x00},
+ {0x3c00, 0x89},
+ {0x3c01, 0xab},
+ {0x3c02, 0x01},
+ {0x3c03, 0x00},
+ {0x3c04, 0x00},
+ {0x3c05, 0x03},
+ {0x3c06, 0x00},
+ {0x3c07, 0x05},
+ {0x3c0c, 0x00},
+ {0x3c0d, 0x00},
+ {0x3c0e, 0x00},
+ {0x3c0f, 0x00},
+ {0x3c40, 0x00},
+ {0x3c41, 0xa3},
+ {0x3c43, 0x7d},
+ {0x3c45, 0xd7},
+ {0x3c47, 0xfc},
+ {0x3c50, 0x05},
+ {0x3c52, 0xaa},
+ {0x3c54, 0x71},
+ {0x3c56, 0x80},
+ {0x3d85, 0x17},
+ {0x3f03, 0x00},
+ {0x3f0a, 0x00},
+ {0x3f0b, 0x00},
+ {0x4001, 0x60},
+ {0x4009, 0x05},
+ {0x4020, 0x00},
+ {0x4021, 0x00},
+ {0x4022, 0x00},
+ {0x4023, 0x00},
+ {0x4024, 0x00},
+ {0x4025, 0x00},
+ {0x4026, 0x00},
+ {0x4027, 0x00},
+ {0x4028, 0x00},
+ {0x4029, 0x00},
+ {0x402a, 0x00},
+ {0x402b, 0x00},
+ {0x402c, 0x00},
+ {0x402d, 0x00},
+ {0x402e, 0x00},
+ {0x402f, 0x00},
+ {0x4040, 0x00},
+ {0x4041, 0x03},
+ {0x4042, 0x00},
+ {0x4043, 0x7A},
+ {0x4044, 0x00},
+ {0x4045, 0x7A},
+ {0x4046, 0x00},
+ {0x4047, 0x7A},
+ {0x4048, 0x00},
+ {0x4049, 0x7A},
+ {0x4307, 0x30},
+ {0x4500, 0x58},
+ {0x4501, 0x04},
+ {0x4502, 0x40},
+ {0x4503, 0x10},
+ {0x4508, 0x55},
+ {0x4509, 0x55},
+ {0x450a, 0x02},
+ {0x450b, 0x00},
+ {0x4600, 0x00},
+ {0x4601, 0x40},
+ {0x4700, 0xa4},
+ {0x4800, 0x4c},
+ {0x4816, 0x53},
+ {0x481f, 0x40},
+ {0x4837, 0x13},
+ {0x5000, 0x56},
+ {0x5001, 0x01},
+ {0x5002, 0x28},
+ {0x5004, 0x0c},
+ {0x5006, 0x0c},
+ {0x5007, 0xe0},
+ {0x5008, 0x01},
+ {0x5009, 0xb0},
+ {0x5901, 0x00},
+ {0x5a01, 0x00},
+ {0x5a03, 0x00},
+ {0x5a04, 0x0c},
+ {0x5a05, 0xe0},
+ {0x5a06, 0x09},
+ {0x5a07, 0xb0},
+ {0x5a08, 0x06},
+ {0x5e00, 0x00},
+ {0x3734, 0x40},
+ {0x5b00, 0x01},
+ {0x5b01, 0x10},
+ {0x5b02, 0x01},
+ {0x5b03, 0xdb},
+ {0x3d8c, 0x71},
+ {0x3d8d, 0xea},
+ {0x4017, 0x10},
+ {0x3618, 0x2a},
+ {0x5780, 0x3e},
+ {0x5781, 0x0f},
+ {0x5782, 0x44},
+ {0x5783, 0x02},
+ {0x5784, 0x01},
+ {0x5785, 0x01},
+ {0x5786, 0x00},
+ {0x5787, 0x04},
+ {0x5788, 0x02},
+ {0x5789, 0x0f},
+ {0x578a, 0xfd},
+ {0x578b, 0xf5},
+ {0x578c, 0xf5},
+ {0x578d, 0x03},
+ {0x578e, 0x08},
+ {0x578f, 0x0c},
+ {0x5790, 0x08},
+ {0x5791, 0x06},
+ {0x5792, 0x00},
+ {0x5793, 0x52},
+ {0x5794, 0xa3},
+ {0x3503, 0x00}
+};
+
+static const char * const ov5670_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Color Bar Type 1",
+};
+
+/* Supported link frequencies */
+#define OV5670_LINK_FREQ_422MHZ 422400000
+#define OV5670_LINK_FREQ_422MHZ_INDEX 0
+static const struct ov5670_link_freq_config link_freq_configs[] = {
+ {
+ /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */
+ .pixel_rate = (OV5670_LINK_FREQ_422MHZ * 2 * 2) / 10,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_840mbps),
+ .regs = mipi_data_rate_840mbps,
+ }
+ }
+};
+
+static const s64 link_freq_menu_items[] = {
+ OV5670_LINK_FREQ_422MHZ
+};
+
+/*
+ * OV5670 sensor supports following resolutions with full FOV:
+ * 4:3 ==> {2592x1944, 1296x972, 648x486}
+ * 16:9 ==> {2560x1440, 1280x720, 640x360}
+ */
+static const struct ov5670_mode supported_modes[] = {
+ {
+ .width = 2592,
+ .height = 1944,
+ .vts_def = OV5670_VTS_30FPS,
+ .vts_min = OV5670_VTS_30FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2592x1944_regs),
+ .regs = mode_2592x1944_regs,
+ },
+ .link_freq_index = OV5670_LINK_FREQ_422MHZ_INDEX,
+ },
+ {
+ .width = 1296,
+ .height = 972,
+ .vts_def = OV5670_VTS_30FPS,
+ .vts_min = 996,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1296x972_regs),
+ .regs = mode_1296x972_regs,
+ },
+ .link_freq_index = OV5670_LINK_FREQ_422MHZ_INDEX,
+ },
+ {
+ .width = 648,
+ .height = 486,
+ .vts_def = OV5670_VTS_30FPS,
+ .vts_min = 516,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_648x486_regs),
+ .regs = mode_648x486_regs,
+ },
+ .link_freq_index = OV5670_LINK_FREQ_422MHZ_INDEX,
+ },
+ {
+ .width = 2560,
+ .height = 1440,
+ .vts_def = OV5670_VTS_30FPS,
+ .vts_min = OV5670_VTS_30FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2560x1440_regs),
+ .regs = mode_2560x1440_regs,
+ },
+ .link_freq_index = OV5670_LINK_FREQ_422MHZ_INDEX,
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .vts_def = OV5670_VTS_30FPS,
+ .vts_min = 1020,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1280x720_regs),
+ .regs = mode_1280x720_regs,
+ },
+ .link_freq_index = OV5670_LINK_FREQ_422MHZ_INDEX,
+ },
+ {
+ .width = 640,
+ .height = 360,
+ .vts_def = OV5670_VTS_30FPS,
+ .vts_min = 510,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_640x360_regs),
+ .regs = mode_640x360_regs,
+ },
+ .link_freq_index = OV5670_LINK_FREQ_422MHZ_INDEX,
+ }
+};
+
+struct ov5670 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct ov5670_mode *cur_mode;
+
+ /* To serialize asynchronus callbacks */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+#define to_ov5670(_sd) container_of(_sd, struct ov5670, sd)
+
+/* Read registers up to 4 at a time */
+static int ov5670_read_reg(struct ov5670 *ov5670, u16 reg, unsigned int len,
+ u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
+ struct i2c_msg msgs[2];
+ u8 *data_be_p;
+ u32 data_be = 0;
+ u16 reg_addr_be = cpu_to_be16(reg);
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ data_be_p = (u8 *)&data_be;
+ /* Write register address */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 2;
+ msgs[0].buf = (u8 *)&reg_addr_be;
+
+ /* Read data from register */
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_be_p[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = be32_to_cpu(data_be);
+
+ return 0;
+}
+
+/* Write registers up to 4 at a time */
+static int ov5670_write_reg(struct ov5670 *ov5670, u16 reg, unsigned int len,
+ u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
+ int buf_i;
+ int val_i;
+ u8 buf[6];
+ u8 *val_p;
+
+ if (len > 4)
+ return -EINVAL;
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xff;
+
+ val = cpu_to_be32(val);
+ val_p = (u8 *)&val;
+ buf_i = 2;
+ val_i = 4 - len;
+
+ while (val_i < 4)
+ buf[buf_i++] = val_p[val_i++];
+
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+/* Write a list of registers */
+static int ov5670_write_regs(struct ov5670 *ov5670,
+ const struct ov5670_reg *regs, unsigned int len)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < len; i++) {
+ ret = ov5670_write_reg(ov5670, regs[i].address, 1, regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(
+ &client->dev,
+ "Failed to write reg 0x%4.4x. error = %d\n",
+ regs[i].address, ret);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ov5670_write_reg_list(struct ov5670 *ov5670,
+ const struct ov5670_reg_list *r_list)
+{
+ return ov5670_write_regs(ov5670, r_list->regs, r_list->num_of_regs);
+}
+
+/* Open sub-device */
+static int ov5670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct ov5670 *ov5670 = to_ov5670(sd);
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_get_try_format(sd, fh->pad, 0);
+
+ mutex_lock(&ov5670->mutex);
+
+ /* Initialize try_fmt */
+ try_fmt->width = ov5670->cur_mode->width;
+ try_fmt->height = ov5670->cur_mode->height;
+ try_fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ try_fmt->field = V4L2_FIELD_NONE;
+
+ /* No crop or compose */
+ mutex_unlock(&ov5670->mutex);
+
+ return 0;
+}
+
+static int ov5670_update_digital_gain(struct ov5670 *ov5670, u32 d_gain)
+{
+ int ret;
+
+ ret = ov5670_write_reg(ov5670, OV5670_REG_R_DGTL_GAIN,
+ OV5670_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ ret = ov5670_write_reg(ov5670, OV5670_REG_G_DGTL_GAIN,
+ OV5670_REG_VALUE_16BIT, d_gain);
+ if (ret)
+ return ret;
+
+ return ov5670_write_reg(ov5670, OV5670_REG_B_DGTL_GAIN,
+ OV5670_REG_VALUE_16BIT, d_gain);
+}
+
+static int ov5670_enable_test_pattern(struct ov5670 *ov5670, u32 pattern)
+{
+ u32 val;
+ int ret;
+
+ /* Set the bayer order that we support */
+ ret = ov5670_write_reg(ov5670, OV5670_REG_TEST_PATTERN_CTRL,
+ OV5670_REG_VALUE_08BIT, 0);
+ if (ret)
+ return ret;
+
+ ret = ov5670_read_reg(ov5670, OV5670_REG_TEST_PATTERN,
+ OV5670_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ if (pattern)
+ val |= OV5670_TEST_PATTERN_ENABLE;
+ else
+ val &= ~OV5670_TEST_PATTERN_ENABLE;
+
+ return ov5670_write_reg(ov5670, OV5670_REG_TEST_PATTERN,
+ OV5670_REG_VALUE_08BIT, val);
+}
+
+/* Initialize control handlers */
+static int ov5670_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov5670 *ov5670 = container_of(ctrl->handler,
+ struct ov5670, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
+ s64 max;
+ int ret = 0;
+
+ /* Propagate change of current control to all related controls */
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ /* Update max exposure while meeting expected vblanking */
+ max = ov5670->cur_mode->height + ctrl->val - 8;
+ __v4l2_ctrl_modify_range(ov5670->exposure,
+ ov5670->exposure->minimum, max,
+ ov5670->exposure->step, max);
+ break;
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (pm_runtime_get_if_in_use(&client->dev) <= 0)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov5670_write_reg(ov5670, OV5670_REG_ANALOG_GAIN,
+ OV5670_REG_VALUE_16BIT, ctrl->val);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = ov5670_update_digital_gain(ov5670, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ /* 4 least significant bits of expsoure are fractional part */
+ ret = ov5670_write_reg(ov5670, OV5670_REG_EXPOSURE,
+ OV5670_REG_VALUE_24BIT, ctrl->val << 4);
+ break;
+ case V4L2_CID_VBLANK:
+ /* Update VTS that meets expected vertical blanking */
+ ret = ov5670_write_reg(ov5670, OV5670_REG_VTS,
+ OV5670_REG_VALUE_16BIT,
+ ov5670->cur_mode->height + ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov5670_enable_test_pattern(ov5670, ctrl->val);
+ break;
+ default:
+ dev_info(&client->dev, "%s Unhandled id:0x%x, val:0x%x\n",
+ __func__, ctrl->id, ctrl->val);
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov5670_ctrl_ops = {
+ .s_ctrl = ov5670_set_ctrl,
+};
+
+/* Initialize control handlers */
+static int ov5670_init_controls(struct ov5670 *ov5670)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 vblank_max;
+ s64 vblank_def;
+ s64 vblank_min;
+ s64 exposure_max;
+ int ret;
+
+ ctrl_hdlr = &ov5670->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &ov5670->mutex;
+ ov5670->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &ov5670_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ 0, 0, link_freq_menu_items);
+ if (ov5670->link_freq)
+ ov5670->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ /* By default, V4L2_CID_PIXEL_RATE is read only */
+ ov5670->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ link_freq_configs[0].pixel_rate,
+ 1,
+ link_freq_configs[0].pixel_rate);
+
+ vblank_max = OV5670_VTS_MAX - ov5670->cur_mode->height;
+ vblank_def = ov5670->cur_mode->vts_def - ov5670->cur_mode->height;
+ vblank_min = ov5670->cur_mode->vts_min - ov5670->cur_mode->height;
+ ov5670->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_min,
+ vblank_max, 1, vblank_def);
+
+ ov5670->hblank = v4l2_ctrl_new_std(
+ ctrl_hdlr, &ov5670_ctrl_ops, V4L2_CID_HBLANK,
+ OV5670_FIXED_PPL - ov5670->cur_mode->width,
+ OV5670_FIXED_PPL - ov5670->cur_mode->width, 1,
+ OV5670_FIXED_PPL - ov5670->cur_mode->width);
+ if (ov5670->hblank)
+ ov5670->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ /* Get min, max, step, default from sensor */
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ ANALOG_GAIN_MIN, ANALOG_GAIN_MAX, ANALOG_GAIN_STEP,
+ ANALOG_GAIN_DEFAULT);
+
+ /* Digital gain */
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV5670_DGTL_GAIN_MIN, OV5670_DGTL_GAIN_MAX,
+ OV5670_DGTL_GAIN_STEP, OV5670_DGTL_GAIN_DEFAULT);
+
+ /* Get min, max, step, default from sensor */
+ exposure_max = ov5670->cur_mode->vts_def - 8;
+ ov5670->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV5670_EXPOSURE_MIN,
+ exposure_max, OV5670_EXPOSURE_STEP,
+ exposure_max);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov5670_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov5670_test_pattern_menu) - 1,
+ 0, 0, ov5670_test_pattern_menu);
+
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ goto error;
+ }
+
+ ov5670->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+
+ return ret;
+}
+
+static int ov5670_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /* Only one bayer order GRBG is supported */
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov5670_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+/* Calculate resolution distance */
+static int ov5670_get_reso_dist(const struct ov5670_mode *mode,
+ struct v4l2_mbus_framefmt *framefmt)
+{
+ return abs(mode->width - framefmt->width) +
+ abs(mode->height - framefmt->height);
+}
+
+/* Find the closest supported resolution to the requested resolution */
+static const struct ov5670_mode *ov5670_find_best_fit(
+ struct ov5670 *ov5670,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *framefmt = &fmt->format;
+ int dist;
+ int cur_best_fit = 0;
+ int cur_best_fit_dist = -1;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(supported_modes); i++) {
+ dist = ov5670_get_reso_dist(&supported_modes[i], framefmt);
+ if (cur_best_fit_dist == -1 || dist < cur_best_fit_dist) {
+ cur_best_fit_dist = dist;
+ cur_best_fit = i;
+ }
+ }
+
+ return &supported_modes[cur_best_fit];
+}
+
+static void ov5670_update_pad_format(const struct ov5670_mode *mode,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+ fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->format.field = V4L2_FIELD_NONE;
+}
+
+static int ov5670_do_get_pad_format(struct ov5670 *ov5670,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&ov5670->sd, cfg,
+ fmt->pad);
+ else
+ ov5670_update_pad_format(ov5670->cur_mode, fmt);
+
+ return 0;
+}
+
+static int ov5670_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov5670 *ov5670 = to_ov5670(sd);
+ int ret;
+
+ mutex_lock(&ov5670->mutex);
+ ret = ov5670_do_get_pad_format(ov5670, cfg, fmt);
+ mutex_unlock(&ov5670->mutex);
+
+ return ret;
+}
+
+static int ov5670_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov5670 *ov5670 = to_ov5670(sd);
+ const struct ov5670_mode *mode;
+ s32 vblank_def;
+ s32 h_blank;
+
+ mutex_lock(&ov5670->mutex);
+
+ fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ mode = ov5670_find_best_fit(ov5670, fmt);
+ ov5670_update_pad_format(mode, fmt);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ } else {
+ ov5670->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(ov5670->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(
+ ov5670->pixel_rate,
+ link_freq_configs[mode->link_freq_index].pixel_rate);
+ /* Update limits and set FPS to default */
+ vblank_def = ov5670->cur_mode->vts_def -
+ ov5670->cur_mode->height;
+ __v4l2_ctrl_modify_range(
+ ov5670->vblank,
+ ov5670->cur_mode->vts_min - ov5670->cur_mode->height,
+ OV5670_VTS_MAX - ov5670->cur_mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(ov5670->vblank, vblank_def);
+ h_blank = OV5670_FIXED_PPL - ov5670->cur_mode->width;
+ __v4l2_ctrl_modify_range(ov5670->hblank, h_blank, h_blank, 1,
+ h_blank);
+ }
+
+ mutex_unlock(&ov5670->mutex);
+
+ return 0;
+}
+
+static int ov5670_get_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ *frames = OV5670_NUM_OF_SKIP_FRAMES;
+
+ return 0;
+}
+
+/* Prepare streaming by writing default values and customized values */
+static int ov5670_start_streaming(struct ov5670 *ov5670)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
+ const struct ov5670_reg_list *reg_list;
+ int link_freq_index;
+ int ret;
+
+ /* Get out of from software reset */
+ ret = ov5670_write_reg(ov5670, OV5670_REG_SOFTWARE_RST,
+ OV5670_REG_VALUE_08BIT, OV5670_SOFTWARE_RST);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set powerup registers\n",
+ __func__);
+ return ret;
+ }
+
+ /* Setup PLL */
+ link_freq_index = ov5670->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = ov5670_write_reg_list(ov5670, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ return ret;
+ }
+
+ /* Apply default values of current mode */
+ reg_list = &ov5670->cur_mode->reg_list;
+ ret = ov5670_write_reg_list(ov5670, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ return ret;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(ov5670->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ /* Write stream on list */
+ ret = ov5670_write_reg(ov5670, OV5670_REG_MODE_SELECT,
+ OV5670_REG_VALUE_08BIT, OV5670_MODE_STREAMING);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set stream\n", __func__);
+ return ret;
+ }
+
+ ov5670->streaming = true;
+
+ return 0;
+}
+
+static int ov5670_stop_streaming(struct ov5670 *ov5670)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
+ int ret;
+
+ ret = ov5670_write_reg(ov5670, OV5670_REG_MODE_SELECT,
+ OV5670_REG_VALUE_08BIT, OV5670_MODE_STANDBY);
+ if (ret)
+ dev_err(&client->dev, "%s failed to set stream\n", __func__);
+
+ ov5670->streaming = false;
+
+ /* Return success even if it was an error, as there is nothing the
+ * caller can do about it.
+ */
+ return 0;
+}
+
+static int ov5670_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov5670 *ov5670 = to_ov5670(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&ov5670->mutex);
+ if (ov5670->streaming == enable)
+ goto unlock_and_return;
+
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ goto unlock_and_return;
+ }
+
+ ret = ov5670_start_streaming(ov5670);
+ if (ret)
+ goto error;
+ } else {
+ ret = ov5670_stop_streaming(ov5670);
+ pm_runtime_put(&client->dev);
+ }
+ goto unlock_and_return;
+
+error:
+ pm_runtime_put(&client->dev);
+
+unlock_and_return:
+ mutex_unlock(&ov5670->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused ov5670_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov5670 *ov5670 = to_ov5670(sd);
+
+ if (ov5670->streaming)
+ ov5670_stop_streaming(ov5670);
+
+ return 0;
+}
+
+static int __maybe_unused ov5670_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov5670 *ov5670 = to_ov5670(sd);
+ int ret;
+
+ if (ov5670->streaming) {
+ ret = ov5670_start_streaming(ov5670);
+ if (ret) {
+ ov5670_stop_streaming(ov5670);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Verify chip ID */
+static int ov5670_identify_module(struct ov5670 *ov5670)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
+ int ret;
+ u32 val;
+
+ ret = ov5670_read_reg(ov5670, OV5670_REG_CHIP_ID,
+ OV5670_REG_VALUE_24BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV5670_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ OV5670_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov5670_video_ops = {
+ .s_stream = ov5670_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov5670_pad_ops = {
+ .enum_mbus_code = ov5670_enum_mbus_code,
+ .get_fmt = ov5670_get_pad_format,
+ .set_fmt = ov5670_set_pad_format,
+ .enum_frame_size = ov5670_enum_frame_size,
+};
+
+static const struct v4l2_subdev_sensor_ops ov5670_sensor_ops = {
+ .g_skip_frames = ov5670_get_skip_frames,
+};
+
+static const struct v4l2_subdev_ops ov5670_subdev_ops = {
+ .video = &ov5670_video_ops,
+ .pad = &ov5670_pad_ops,
+ .sensor = &ov5670_sensor_ops,
+};
+
+static const struct media_entity_operations ov5670_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov5670_internal_ops = {
+ .open = ov5670_open,
+};
+
+static int ov5670_probe(struct i2c_client *client)
+{
+ struct ov5670 *ov5670;
+ const char *err_msg;
+ u32 input_clk = 0;
+ int ret;
+
+ device_property_read_u32(&client->dev, "clock-frequency", &input_clk);
+ if (input_clk != 19200000)
+ return -EINVAL;
+
+ ov5670 = devm_kzalloc(&client->dev, sizeof(*ov5670), GFP_KERNEL);
+ if (!ov5670) {
+ ret = -ENOMEM;
+ err_msg = "devm_kzalloc() error";
+ goto error_print;
+ }
+
+ /* Initialize subdev */
+ v4l2_i2c_subdev_init(&ov5670->sd, client, &ov5670_subdev_ops);
+
+ /* Check module identity */
+ ret = ov5670_identify_module(ov5670);
+ if (ret) {
+ err_msg = "ov5670_identify_module() error";
+ goto error_print;
+ }
+
+ mutex_init(&ov5670->mutex);
+
+ /* Set default mode to max resolution */
+ ov5670->cur_mode = &supported_modes[0];
+
+ ret = ov5670_init_controls(ov5670);
+ if (ret) {
+ err_msg = "ov5670_init_controls() error";
+ goto error_mutex_destroy;
+ }
+
+ ov5670->sd.internal_ops = &ov5670_internal_ops;
+ ov5670->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov5670->sd.entity.ops = &ov5670_subdev_entity_ops;
+ ov5670->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ /* Source pad initialization */
+ ov5670->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov5670->sd.entity, 1, &ov5670->pad);
+ if (ret) {
+ err_msg = "media_entity_pads_init() error";
+ goto error_handler_free;
+ }
+
+ /* Async register for subdev */
+ ret = v4l2_async_register_subdev(&ov5670->sd);
+ if (ret < 0) {
+ err_msg = "v4l2_async_register_subdev() error";
+ goto error_entity_cleanup;
+ }
+
+ ov5670->streaming = false;
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_put(&client->dev);
+
+ return 0;
+
+error_entity_cleanup:
+ media_entity_cleanup(&ov5670->sd.entity);
+
+error_handler_free:
+ v4l2_ctrl_handler_free(ov5670->sd.ctrl_handler);
+
+error_mutex_destroy:
+ mutex_destroy(&ov5670->mutex);
+
+error_print:
+ dev_err(&client->dev, "%s: %s %d\n", __func__, err_msg, ret);
+
+ return ret;
+}
+
+static int ov5670_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov5670 *ov5670 = to_ov5670(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ mutex_destroy(&ov5670->mutex);
+
+ /*
+ * Disable runtime PM but keep the device turned on.
+ * i2c-core with ACPI domain PM will turn off the device.
+ */
+ pm_runtime_get_sync(&client->dev);
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ov5670_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ov5670_suspend, ov5670_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov5670_acpi_ids[] = {
+ {"INT3479"},
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(acpi, ov5670_acpi_ids);
+#endif
+
+static struct i2c_driver ov5670_i2c_driver = {
+ .driver = {
+ .name = "ov5670",
+ .pm = &ov5670_pm_ops,
+ .acpi_match_table = ACPI_PTR(ov5670_acpi_ids),
+ },
+ .probe_new = ov5670_probe,
+ .remove = ov5670_remove,
+};
+
+module_i2c_driver(ov5670_i2c_driver);
+
+MODULE_AUTHOR("Rapolu, Chiranjeevi <chiranjeevi.rapolu@intel.com>");
+MODULE_AUTHOR("Yang, Hyungwoo <hyungwoo.yang@intel.com>");
+MODULE_DESCRIPTION("Omnivision ov5670 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/ov6650.c
index d2be64d54b22..768f2950ea36 100644
--- a/drivers/media/i2c/soc_camera/ov6650.c
+++ b/drivers/media/i2c/ov6650.c
@@ -1,5 +1,5 @@
/*
- * V4L2 SoC Camera driver for OmniVision OV6650 Camera Sensor
+ * V4L2 subdevice driver for OmniVision OV6650 Camera Sensor
*
* Copyright (C) 2010 Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
*
@@ -31,9 +31,9 @@
#include <linux/v4l2-mediabus.h>
#include <linux/module.h>
-#include <media/soc_camera.h>
#include <media/v4l2-clk.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
/* Register definitions */
#define REG_GAIN 0x00 /* range 00 - 3F */
@@ -426,10 +426,15 @@ static int ov6650_set_register(struct v4l2_subdev *sd,
static int ov6650_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
struct ov6650 *priv = to_ov6650(client);
+ int ret = 0;
- return soc_camera_set_power(&client->dev, ssdd, priv->clk, on);
+ if (on)
+ ret = v4l2_clk_enable(priv->clk);
+ else
+ v4l2_clk_disable(priv->clk);
+
+ return ret;
}
static int ov6650_get_selection(struct v4l2_subdev *sd,
@@ -471,14 +476,13 @@ static int ov6650_set_selection(struct v4l2_subdev *sd,
sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- rect.left = ALIGN(rect.left, 2);
- rect.width = ALIGN(rect.width, 2);
- rect.top = ALIGN(rect.top, 2);
- rect.height = ALIGN(rect.height, 2);
- soc_camera_limit_side(&rect.left, &rect.width,
- DEF_HSTRT << 1, 2, W_CIF);
- soc_camera_limit_side(&rect.top, &rect.height,
- DEF_VSTRT << 1, 2, H_CIF);
+ v4l_bound_align_image(&rect.width, 2, W_CIF, 1,
+ &rect.height, 2, H_CIF, 1, 0);
+ v4l_bound_align_image(&rect.left, DEF_HSTRT << 1,
+ (DEF_HSTRT << 1) + W_CIF - (__s32)rect.width, 1,
+ &rect.top, DEF_VSTRT << 1,
+ (DEF_VSTRT << 1) + H_CIF - (__s32)rect.height, 1,
+ 0);
ret = ov6650_reg_write(client, REG_HSTRT, rect.left >> 1);
if (!ret) {
@@ -547,8 +551,6 @@ static u8 to_clkrc(struct v4l2_fract *timeperframe,
static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
- struct soc_camera_sense *sense = icd->sense;
struct ov6650 *priv = to_ov6650(client);
bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
struct v4l2_subdev_selection sel = {
@@ -640,32 +642,10 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
}
priv->half_scale = half_scale;
- if (sense) {
- if (sense->master_clock == 8000000) {
- dev_dbg(&client->dev, "8MHz input clock\n");
- clkrc = CLKRC_6MHz;
- } else if (sense->master_clock == 12000000) {
- dev_dbg(&client->dev, "12MHz input clock\n");
- clkrc = CLKRC_12MHz;
- } else if (sense->master_clock == 16000000) {
- dev_dbg(&client->dev, "16MHz input clock\n");
- clkrc = CLKRC_16MHz;
- } else if (sense->master_clock == 24000000) {
- dev_dbg(&client->dev, "24MHz input clock\n");
- clkrc = CLKRC_24MHz;
- } else {
- dev_err(&client->dev,
- "unsupported input clock, check platform data\n");
- return -EINVAL;
- }
- mclk = sense->master_clock;
- priv->pclk_limit = sense->pixel_clock_max;
- } else {
- clkrc = CLKRC_24MHz;
- mclk = 24000000;
- priv->pclk_limit = 0;
- dev_dbg(&client->dev, "using default 24MHz input clock\n");
- }
+ clkrc = CLKRC_12MHz;
+ mclk = 12000000;
+ priv->pclk_limit = 1334000;
+ dev_dbg(&client->dev, "using 12MHz input clock\n");
clkrc |= to_clkrc(&priv->tpf, priv->pclk_limit, priv->pclk_max);
@@ -899,8 +879,6 @@ static const struct v4l2_subdev_core_ops ov6650_core_ops = {
static int ov6650_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
cfg->flags = V4L2_MBUS_MASTER |
V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING |
@@ -908,7 +886,6 @@ static int ov6650_g_mbus_config(struct v4l2_subdev *sd,
V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW |
V4L2_MBUS_DATA_ACTIVE_HIGH;
cfg->type = V4L2_MBUS_PARALLEL;
- cfg->flags = soc_camera_apply_board_flags(ssdd, cfg);
return 0;
}
@@ -918,25 +895,23 @@ static int ov6650_s_mbus_config(struct v4l2_subdev *sd,
const struct v4l2_mbus_config *cfg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
- unsigned long flags = soc_camera_apply_board_flags(ssdd, cfg);
int ret;
- if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ if (cfg->flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_PCLK_RISING, 0);
else
ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_PCLK_RISING);
if (ret)
return ret;
- if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ if (cfg->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
ret = ov6650_reg_rmw(client, REG_COMF, COMF_HREF_LOW, 0);
else
ret = ov6650_reg_rmw(client, REG_COMF, 0, COMF_HREF_LOW);
if (ret)
return ret;
- if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ if (cfg->flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
ret = ov6650_reg_rmw(client, REG_COMJ, COMJ_VSYNC_HIGH, 0);
else
ret = ov6650_reg_rmw(client, REG_COMJ, 0, COMJ_VSYNC_HIGH);
@@ -973,14 +948,8 @@ static int ov6650_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov6650 *priv;
- struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client);
int ret;
- if (!ssdd) {
- dev_err(&client->dev, "Missing platform_data for driver\n");
- return -EINVAL;
- }
-
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(&client->dev,
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index 7270c68ed18a..e88549f0e704 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -1614,8 +1614,10 @@ static int ov7670_probe(struct i2c_client *client,
info->clk = devm_clk_get(&client->dev, "xclk");
if (IS_ERR(info->clk))
- return -EPROBE_DEFER;
- clk_prepare_enable(info->clk);
+ return PTR_ERR(info->clk);
+ ret = clk_prepare_enable(info->clk);
+ if (ret)
+ return ret;
ret = ov7670_init_gpio(client, info);
if (ret)
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 2de2fbb13b85..6ffb460e8589 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -484,6 +484,7 @@ static int ov965x_set_default_gamma_curve(struct ov965x *ov965x)
for (i = 0; i < ARRAY_SIZE(gamma_curve); i++) {
int ret = ov965x_write(ov965x->client, addr, gamma_curve[i]);
+
if (ret < 0)
return ret;
addr++;
@@ -503,6 +504,7 @@ static int ov965x_set_color_matrix(struct ov965x *ov965x)
for (i = 0; i < ARRAY_SIZE(mtx); i++) {
int ret = ov965x_write(ov965x->client, addr, mtx[i]);
+
if (ret < 0)
return ret;
addr++;
@@ -611,7 +613,7 @@ static int ov965x_set_banding_filter(struct ov965x *ov965x, int value)
}
if (value == V4L2_CID_POWER_LINE_FREQUENCY_DISABLED)
return 0;
- if (WARN_ON(ov965x->fiv == NULL))
+ if (WARN_ON(!ov965x->fiv))
return -EINVAL;
/* Set minimal exposure time for 50/60 HZ lighting */
if (value == V4L2_CID_POWER_LINE_FREQUENCY_50HZ)
@@ -999,44 +1001,47 @@ static int ov965x_initialize_controls(struct ov965x *ov965x)
/* Auto/manual white balance */
ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
- V4L2_CID_AUTO_WHITE_BALANCE,
- 0, 1, 1, 1);
+ V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1, 1);
ctrls->blue_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BLUE_BALANCE,
0, 0xff, 1, 0x80);
ctrls->red_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_RED_BALANCE,
- 0, 0xff, 1, 0x80);
+ 0, 0xff, 1, 0x80);
/* Auto/manual exposure */
- ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
- V4L2_CID_EXPOSURE_AUTO,
- V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO);
+ ctrls->auto_exp =
+ v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0,
+ V4L2_EXPOSURE_AUTO);
/* Exposure time, in 100 us units. min/max is updated dynamically. */
ctrls->exposure = v4l2_ctrl_new_std(hdl, ops,
- V4L2_CID_EXPOSURE_ABSOLUTE,
- 2, 1500, 1, 500);
+ V4L2_CID_EXPOSURE_ABSOLUTE,
+ 2, 1500, 1, 500);
/* Auto/manual gain */
ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
- 0, 1, 1, 1);
+ 0, 1, 1, 1);
ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
- 16, 64 * (16 + 15), 1, 64 * 16);
+ 16, 64 * (16 + 15), 1, 64 * 16);
ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION,
- -2, 2, 1, 0);
+ -2, 2, 1, 0);
ctrls->brightness = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS,
- -3, 3, 1, 0);
+ -3, 3, 1, 0);
ctrls->sharpness = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SHARPNESS,
- 0, 32, 1, 6);
+ 0, 32, 1, 6);
ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
- ctrls->light_freq = v4l2_ctrl_new_std_menu(hdl, ops,
- V4L2_CID_POWER_LINE_FREQUENCY,
- V4L2_CID_POWER_LINE_FREQUENCY_60HZ, ~0x7,
- V4L2_CID_POWER_LINE_FREQUENCY_50HZ);
+ ctrls->light_freq =
+ v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, ~0x7,
+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ);
v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
- ARRAY_SIZE(test_pattern_menu) - 1, 0, 0,
- test_pattern_menu);
+ ARRAY_SIZE(test_pattern_menu) - 1, 0, 0,
+ test_pattern_menu);
if (hdl->error) {
ret = hdl->error;
v4l2_ctrl_handler_free(hdl);
@@ -1121,7 +1126,6 @@ static int __ov965x_set_frame_interval(struct ov965x *ov965x,
u64 req_int, err, min_err = ~0ULL;
unsigned int i;
-
if (fi->interval.denominator == 0)
return -EINVAL;
@@ -1165,7 +1169,8 @@ static int ov965x_s_frame_interval(struct v4l2_subdev *sd,
return ret;
}
-static int ov965x_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int ov965x_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct ov965x *ov965x = to_ov965x(sd);
@@ -1209,7 +1214,8 @@ static void __ov965x_try_frame_size(struct v4l2_mbus_framefmt *mf,
*size = match;
}
-static int ov965x_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
+static int ov965x_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
unsigned int index = ARRAY_SIZE(ov965x_formats);
@@ -1231,7 +1237,7 @@ static int ov965x_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config
mutex_lock(&ov965x->lock);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- if (cfg != NULL) {
+ if (cfg) {
mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
*mf = fmt->format;
}
@@ -1362,7 +1368,8 @@ static int ov965x_s_stream(struct v4l2_subdev *sd, int on)
*/
static int ov965x_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct v4l2_mbus_framefmt *mf = v4l2_subdev_get_try_format(sd, fh->pad, 0);
+ struct v4l2_mbus_framefmt *mf =
+ v4l2_subdev_get_try_format(sd, fh->pad, 0);
ov965x_get_default_format(mf);
return 0;
@@ -1470,7 +1477,7 @@ static int ov965x_probe(struct i2c_client *client,
struct ov965x *ov965x;
int ret;
- if (pdata == NULL) {
+ if (!pdata) {
dev_err(&client->dev, "platform data not specified\n");
return -EINVAL;
}
@@ -1498,13 +1505,13 @@ static int ov965x_probe(struct i2c_client *client,
ret = ov965x_configure_gpios(ov965x, pdata);
if (ret < 0)
- return ret;
+ goto err_mutex;
ov965x->pad.flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&sd->entity, 1, &ov965x->pad);
if (ret < 0)
- return ret;
+ goto err_mutex;
ret = ov965x_initialize_controls(ov965x);
if (ret < 0)
@@ -1530,16 +1537,20 @@ err_ctrls:
v4l2_ctrl_handler_free(sd->ctrl_handler);
err_me:
media_entity_cleanup(&sd->entity);
+err_mutex:
+ mutex_destroy(&ov965x->lock);
return ret;
}
static int ov965x_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov965x *ov965x = to_ov965x(sd);
v4l2_async_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
media_entity_cleanup(&sd->entity);
+ mutex_destroy(&ov965x->lock);
return 0;
}
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index f434fb2ee6fc..cdc4f2392ef9 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1635,8 +1635,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
node_ep = of_graph_get_next_endpoint(node, NULL);
if (!node_ep) {
- dev_warn(dev, "no endpoint defined for node: %s\n",
- node->full_name);
+ dev_warn(dev, "no endpoint defined for node: %pOF\n", node);
return 0;
}
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 962051b9939d..ff46d2c96cea 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -1374,7 +1374,7 @@ static int s5k5baf_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
- static enum selection_rect rtype;
+ enum selection_rect rtype;
struct s5k5baf *state = to_s5k5baf(sd);
rtype = s5k5baf_get_sel_rect(sel->pad, sel->target);
@@ -1863,8 +1863,7 @@ static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev)
node_ep = of_graph_get_next_endpoint(node, NULL);
if (!node_ep) {
- dev_err(dev, "no endpoint defined at node %s\n",
- node->full_name);
+ dev_err(dev, "no endpoint defined at node %pOF\n", node);
return -EINVAL;
}
@@ -1882,8 +1881,8 @@ static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev)
case V4L2_MBUS_PARALLEL:
break;
default:
- dev_err(dev, "unsupported bus in endpoint defined at node %s\n",
- node->full_name);
+ dev_err(dev, "unsupported bus in endpoint defined at node %pOF\n",
+ node);
return -EINVAL;
}
diff --git a/drivers/media/i2c/saa7127.c b/drivers/media/i2c/saa7127.c
index 99c303002e90..01784d441ae6 100644
--- a/drivers/media/i2c/saa7127.c
+++ b/drivers/media/i2c/saa7127.c
@@ -806,7 +806,7 @@ static int saa7127_remove(struct i2c_client *client)
/* ----------------------------------------------------------------------- */
-static struct i2c_device_id saa7127_id[] = {
+static const struct i2c_device_id saa7127_id[] = {
{ "saa7127_auto", 0 }, /* auto-detection */
{ "saa7126", SAA7127 },
{ "saa7127", SAA7127 },
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index e1f6bc219c64..102467e00fb3 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -1069,7 +1069,7 @@ static int saa717x_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
struct saa717x_state *decoder = to_state(sd);
v4l2_dbg(1, debug, sd, "decoder set norm ");
- v4l2_dbg(1, debug, sd, "(not yet implementd)\n");
+ v4l2_dbg(1, debug, sd, "(not yet implemented)\n");
decoder->radio = 0;
decoder->std = std;
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index e0b0c032c4ac..700f433261d0 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -841,6 +841,8 @@ static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
&client->dev,
compressed_max_bpp - sensor->compressed_min_bpp + 1,
sizeof(*sensor->valid_link_freqs), GFP_KERNEL);
+ if (!sensor->valid_link_freqs)
+ return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
const struct smiapp_csi_data_format *f =
@@ -2809,13 +2811,19 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
switch (bus_cfg->bus_type) {
case V4L2_MBUS_CSI2:
hwcfg->csi_signalling_mode = SMIAPP_CSI_SIGNALLING_MODE_CSI2;
+ hwcfg->lanes = bus_cfg->bus.mipi_csi2.num_data_lanes;
+ break;
+ case V4L2_MBUS_CCP2:
+ hwcfg->csi_signalling_mode = (bus_cfg->bus.mipi_csi1.strobe) ?
+ SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_STROBE :
+ SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_CLOCK;
+ hwcfg->lanes = 1;
break;
- /* FIXME: add CCP2 support. */
default:
+ dev_err(dev, "unsupported bus %u\n", bus_cfg->bus_type);
goto out_err;
}
- hwcfg->lanes = bus_cfg->bus.mipi_csi2.num_data_lanes;
dev_dbg(dev, "lanes %u\n", hwcfg->lanes);
/* NVM size is not mandatory */
@@ -2828,8 +2836,8 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
goto out_err;
}
- dev_dbg(dev, "nvm %d, clk %d, csi %d\n", hwcfg->nvm_size,
- hwcfg->ext_clk, hwcfg->csi_signalling_mode);
+ dev_dbg(dev, "nvm %d, clk %d, mode %d\n",
+ hwcfg->nvm_size, hwcfg->ext_clk, hwcfg->csi_signalling_mode);
if (!bus_cfg->nr_of_link_frequencies) {
dev_warn(dev, "no link frequencies defined\n");
diff --git a/drivers/media/i2c/smiapp/smiapp-quirk.c b/drivers/media/i2c/smiapp/smiapp-quirk.c
index cb128eae9c54..95c0272bb014 100644
--- a/drivers/media/i2c/smiapp/smiapp-quirk.c
+++ b/drivers/media/i2c/smiapp/smiapp-quirk.c
@@ -71,7 +71,7 @@ static int jt8ew9_limits(struct smiapp_sensor *sensor)
static int jt8ew9_post_poweron(struct smiapp_sensor *sensor)
{
- const struct smiapp_reg_8 regs[] = {
+ static const struct smiapp_reg_8 regs[] = {
{ 0x30a3, 0xd8 }, /* Output port control : LVDS ports only */
{ 0x30ae, 0x00 }, /* 0x0307 pll_multiplier maximum value on PLL input 9.6MHz ( 19.2MHz is divided on pre_pll_div) */
{ 0x30af, 0xd0 }, /* 0x0307 pll_multiplier maximum value on PLL input 9.6MHz ( 19.2MHz is divided on pre_pll_div) */
@@ -115,7 +115,7 @@ const struct smiapp_quirk smiapp_jt8ew9_quirk = {
static int imx125es_post_poweron(struct smiapp_sensor *sensor)
{
/* Taken from v02. No idea what the other two are. */
- const struct smiapp_reg_8 regs[] = {
+ static const struct smiapp_reg_8 regs[] = {
/*
* 0x3302: clk during frame blanking:
* 0x00 - HS mode, 0x01 - LP11
@@ -145,7 +145,7 @@ static int jt8ev1_post_poweron(struct smiapp_sensor *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
int rval;
- const struct smiapp_reg_8 regs[] = {
+ static const struct smiapp_reg_8 regs[] = {
{ 0x3031, 0xcd }, /* For digital binning (EQ_MONI) */
{ 0x30a3, 0xd0 }, /* FLASH STROBE enable */
{ 0x3237, 0x00 }, /* For control of pulse timing for ADC */
@@ -166,7 +166,7 @@ static int jt8ev1_post_poweron(struct smiapp_sensor *sensor)
{ 0x33cf, 0xec }, /* For Black sun */
{ 0x3328, 0x80 }, /* Ugh. No idea what's this. */
};
- const struct smiapp_reg_8 regs_96[] = {
+ static const struct smiapp_reg_8 regs_96[] = {
{ 0x30ae, 0x00 }, /* For control of ADC clock */
{ 0x30af, 0xd0 },
{ 0x30b0, 0x01 },
diff --git a/drivers/media/i2c/soc_camera/Kconfig b/drivers/media/i2c/soc_camera/Kconfig
index 96859f37cb1c..72b369895b37 100644
--- a/drivers/media/i2c/soc_camera/Kconfig
+++ b/drivers/media/i2c/soc_camera/Kconfig
@@ -47,12 +47,6 @@ config SOC_CAMERA_OV5642
help
This is a V4L2 camera driver for the OmniVision OV5642 sensor
-config SOC_CAMERA_OV6650
- tristate "ov6650 sensor support"
- depends on SOC_CAMERA && I2C
- ---help---
- This is a V4L2 SoC camera driver for the OmniVision OV6650 sensor
-
config SOC_CAMERA_OV772X
tristate "ov772x camera support"
depends on SOC_CAMERA && I2C
diff --git a/drivers/media/i2c/soc_camera/Makefile b/drivers/media/i2c/soc_camera/Makefile
index 974bdb721dbe..78532a7fb8e2 100644
--- a/drivers/media/i2c/soc_camera/Makefile
+++ b/drivers/media/i2c/soc_camera/Makefile
@@ -4,7 +4,6 @@ obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031.o
obj-$(CONFIG_SOC_CAMERA_MT9T112) += mt9t112.o
obj-$(CONFIG_SOC_CAMERA_MT9V022) += mt9v022.o
obj-$(CONFIG_SOC_CAMERA_OV5642) += ov5642.o
-obj-$(CONFIG_SOC_CAMERA_OV6650) += ov6650.o
obj-$(CONFIG_SOC_CAMERA_OV772X) += ov772x.o
obj-$(CONFIG_SOC_CAMERA_OV9640) += ov9640.o
obj-$(CONFIG_SOC_CAMERA_OV9740) += ov9740.o
diff --git a/drivers/media/i2c/soc_camera/mt9t031.c b/drivers/media/i2c/soc_camera/mt9t031.c
index 714fb3555b34..4802d30e47de 100644
--- a/drivers/media/i2c/soc_camera/mt9t031.c
+++ b/drivers/media/i2c/soc_camera/mt9t031.c
@@ -592,7 +592,7 @@ static const struct dev_pm_ops mt9t031_dev_pm_ops = {
.runtime_resume = mt9t031_runtime_resume,
};
-static struct device_type mt9t031_dev_type = {
+static const struct device_type mt9t031_dev_type = {
.name = "MT9T031",
.pm = &mt9t031_dev_pm_ops,
};
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 5788af238b86..e6f5c363ccab 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -2013,7 +2013,7 @@ static int tc358743_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id tc358743_id[] = {
+static const struct i2c_device_id tc358743_id[] = {
{"tc358743", 0},
{}
};
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index 42340e364cea..498ad2368cbc 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -483,7 +483,7 @@ static int ths8200_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id ths8200_id[] = {
+static const struct i2c_device_id ths8200_id[] = {
{ "ths8200", 0 },
{},
};
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 9da4bf4f2c7a..7b79a7498751 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -659,7 +659,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,
struct tvp5150 *decoder = to_tvp5150(sd);
v4l2_std_id std = decoder->norm;
u8 reg;
- int pos=0;
+ int pos = 0;
if (std == V4L2_STD_ALL) {
dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n");
@@ -669,33 +669,30 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,
line += 3;
}
- if (line<6||line>27)
+ if (line < 6 || line > 27)
return 0;
- while (regs->reg != (u16)-1 ) {
+ while (regs->reg != (u16)-1) {
if ((type & regs->type.vbi_type) &&
- (line>=regs->type.ini_line) &&
- (line<=regs->type.end_line)) {
- type=regs->type.vbi_type;
+ (line >= regs->type.ini_line) &&
+ (line <= regs->type.end_line))
break;
- }
regs++;
pos++;
}
+
if (regs->reg == (u16)-1)
return 0;
- type=pos | (flags & 0xf0);
- reg=((line-6)<<1)+TVP5150_LINE_MODE_INI;
+ type = pos | (flags & 0xf0);
+ reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI;
- if (fields&1) {
+ if (fields & 1)
tvp5150_write(sd, reg, type);
- }
- if (fields&2) {
- tvp5150_write(sd, reg+1, type);
- }
+ if (fields & 2)
+ tvp5150_write(sd, reg + 1, type);
return type;
}
diff --git a/drivers/media/i2c/vs6624.c b/drivers/media/i2c/vs6624.c
index f0741ab338df..560738213c00 100644
--- a/drivers/media/i2c/vs6624.c
+++ b/drivers/media/i2c/vs6624.c
@@ -58,7 +58,7 @@ static const struct vs6624_format {
},
};
-static struct v4l2_mbus_framefmt vs6624_default_fmt = {
+static const struct v4l2_mbus_framefmt vs6624_default_fmt = {
.width = VGA_WIDTH,
.height = VGA_HEIGHT,
.code = MEDIA_BUS_FMT_UYVY8_2X8,
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 760e3e424e23..e79f72b8b858 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -69,9 +69,9 @@ static int media_device_get_info(struct media_device *dev,
strlcpy(info->serial, dev->serial, sizeof(info->serial));
strlcpy(info->bus_info, dev->bus_info, sizeof(info->bus_info));
- info->media_version = MEDIA_API_VERSION;
+ info->media_version = LINUX_VERSION_CODE;
+ info->driver_version = info->media_version;
info->hw_revision = dev->hw_revision;
- info->driver_version = dev->driver_version;
return 0;
}
@@ -537,9 +537,9 @@ static DEVICE_ATTR(model, S_IRUGO, show_model, NULL);
* Registration/unregistration
*/
-static void media_device_release(struct media_devnode *mdev)
+static void media_device_release(struct media_devnode *devnode)
{
- dev_dbg(mdev->parent, "Media device released\n");
+ dev_dbg(devnode->parent, "Media device released\n");
}
/**
@@ -591,9 +591,8 @@ int __must_check media_device_register_entity(struct media_device *mdev,
&entity->pads[i].graph_obj);
/* invoke entity_notify callbacks */
- list_for_each_entry_safe(notify, next, &mdev->entity_notify, list) {
- (notify)->notify(entity, notify->notify_data);
- }
+ list_for_each_entry_safe(notify, next, &mdev->entity_notify, list)
+ notify->notify(entity, notify->notify_data);
if (mdev->entity_internal_idx_max
>= mdev->pm_count_walk.ent_enum.idx_max) {
@@ -834,8 +833,6 @@ void media_device_pci_init(struct media_device *mdev,
mdev->hw_revision = (pci_dev->subsystem_vendor << 16)
| pci_dev->subsystem_device;
- mdev->driver_version = LINUX_VERSION_CODE;
-
media_device_init(mdev);
}
EXPORT_SYMBOL_GPL(media_device_pci_init);
@@ -863,7 +860,6 @@ void __media_device_usb_init(struct media_device *mdev,
strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
usb_make_path(udev, mdev->bus_info, sizeof(mdev->bus_info));
mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
- mdev->driver_version = LINUX_VERSION_CODE;
media_device_init(mdev);
}
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index dd0f0ead9516..2ace0410d277 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -917,7 +917,7 @@ media_entity_find_link(struct media_pad *source, struct media_pad *sink)
}
EXPORT_SYMBOL_GPL(media_entity_find_link);
-struct media_pad *media_entity_remote_pad(struct media_pad *pad)
+struct media_pad *media_entity_remote_pad(const struct media_pad *pad)
{
struct media_link *link;
diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c
index 6e60decb2198..cc6527e35537 100644
--- a/drivers/media/pci/b2c2/flexcop-pci.c
+++ b/drivers/media/pci/b2c2/flexcop-pci.c
@@ -415,7 +415,7 @@ static void flexcop_pci_remove(struct pci_dev *pdev)
flexcop_device_kfree(fc_pci->fc_dev);
}
-static struct pci_device_id flexcop_pci_tbl[] = {
+static const struct pci_device_id flexcop_pci_tbl[] = {
{ PCI_DEVICE(0x13d0, 0x2103) },
{ },
};
diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
index 8aa726651630..a5f52137d306 100644
--- a/drivers/media/pci/bt8xx/bt878.c
+++ b/drivers/media/pci/bt8xx/bt878.c
@@ -383,7 +383,7 @@ EXPORT_SYMBOL(bt878_device_control);
.driver_data = (unsigned long) name \
}
-static struct pci_device_id bt878_pci_tbl[] = {
+static const struct pci_device_id bt878_pci_tbl[] = {
BROOKTREE_878_DEVICE(0x0071, 0x0101, "Nebula Electronics DigiTV"),
BROOKTREE_878_DEVICE(0x1461, 0x0761, "AverMedia AverTV DVB-T 761"),
BROOKTREE_878_DEVICE(0x11bd, 0x001c, "Pinnacle PCTV Sat"),
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index ed319f18ba48..227086a2e99c 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -1702,7 +1702,7 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
bttv_dma_free(q,fh->btv,buf);
}
-static struct videobuf_queue_ops bttv_video_qops = {
+static const struct videobuf_queue_ops bttv_video_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
@@ -4388,7 +4388,7 @@ static int bttv_resume(struct pci_dev *pci_dev)
}
#endif
-static struct pci_device_id bttv_pci_tbl[] = {
+static const struct pci_device_id bttv_pci_tbl[] = {
{PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT848), 0},
{PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT849), 0},
{PCI_VDEVICE(BROOKTREE, PCI_DEVICE_ID_BT878), 0},
diff --git a/drivers/media/pci/bt8xx/bttv-i2c.c b/drivers/media/pci/bt8xx/bttv-i2c.c
index 274fd036b306..eccd1e3d717a 100644
--- a/drivers/media/pci/bt8xx/bttv-i2c.c
+++ b/drivers/media/pci/bt8xx/bttv-i2c.c
@@ -97,7 +97,7 @@ static int bttv_bit_getsda(void *data)
return state;
}
-static struct i2c_algo_bit_data bttv_i2c_algo_bit_template = {
+static const struct i2c_algo_bit_data bttv_i2c_algo_bit_template = {
.setsda = bttv_bit_setsda,
.setscl = bttv_bit_setscl,
.getsda = bttv_bit_getsda,
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index 2fd07a8afcd2..73d655d073d6 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -69,12 +69,13 @@ static void ir_handle_key(struct bttv *btv)
if ((ir->mask_keydown && (gpio & ir->mask_keydown)) ||
(ir->mask_keyup && !(gpio & ir->mask_keyup))) {
- rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0);
+ rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data, 0);
} else {
/* HACK: Probably, ir->mask_keydown is missing
for this board */
if (btv->c.type == BTTV_BOARD_WINFAST2000)
- rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0);
+ rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data,
+ 0);
rc_keyup(ir->dev);
}
@@ -99,7 +100,7 @@ static void ir_enltv_handle_key(struct bttv *btv)
gpio, data,
(gpio & ir->mask_keyup) ? " up" : "up/down");
- rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0);
+ rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data, 0);
if (keyup)
rc_keyup(ir->dev);
} else {
@@ -113,7 +114,8 @@ static void ir_enltv_handle_key(struct bttv *btv)
if (keyup)
rc_keyup(ir->dev);
else
- rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0);
+ rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data,
+ 0);
}
ir->last_gpio = data | keyup;
@@ -235,7 +237,7 @@ static void bttv_rc5_timer_end(unsigned long data)
}
scancode = RC_SCANCODE_RC5(system, command);
- rc_keydown(ir->dev, RC_TYPE_RC5, scancode, toggle);
+ rc_keydown(ir->dev, RC_PROTO_RC5, scancode, toggle);
dprintk("scancode %x, toggle %x\n", scancode, toggle);
}
@@ -327,7 +329,7 @@ static void bttv_ir_stop(struct bttv *btv)
* Get_key functions used by I2C remotes
*/
-static int get_key_pv951(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_pv951(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char b;
@@ -355,7 +357,7 @@ static int get_key_pv951(struct IR_i2c *ir, enum rc_type *protocol,
* the device is bound to the vendor-provided RC.
*/
- *protocol = RC_TYPE_UNKNOWN;
+ *protocol = RC_PROTO_UNKNOWN;
*scancode = b;
*toggle = 0;
return 1;
@@ -535,7 +537,7 @@ int bttv_input_init(struct bttv *btv)
snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0",
pci_name(btv->c.pci));
- rc->input_name = ir->name;
+ rc->device_name = ir->name;
rc->input_phys = ir->phys;
rc->input_id.bustype = BUS_PCI;
rc->input_id.version = 1;
diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c
index 90f4263452d3..530b3e9764ce 100644
--- a/drivers/media/pci/bt8xx/dst_ca.c
+++ b/drivers/media/pci/bt8xx/dst_ca.c
@@ -57,20 +57,6 @@ static unsigned int verbose = 5;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)");
-/* Need some more work */
-static int ca_set_slot_descr(void)
-{
- /* We could make this more graceful ? */
- return -EOPNOTSUPP;
-}
-
-/* Need some more work */
-static int ca_set_pid(void)
-{
- /* We could make this more graceful ? */
- return -EOPNOTSUPP;
-}
-
static void put_command_and_length(u8 *data, int command, int length)
{
data[0] = (command >> 16) & 0xff;
@@ -144,7 +130,7 @@ static int dst_put_ci(struct dst_state *state, u8 *data, int len, u8 *ca_string,
}
if(dst_ca_comm_err == RETRIES)
- return -1;
+ return -EIO;
return 0;
}
@@ -159,7 +145,7 @@ static int ca_get_app_info(struct dst_state *state)
put_checksum(&command[0], command[0]);
if ((dst_put_ci(state, command, sizeof(command), state->messages, GET_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !");
- return -1;
+ return -EIO;
}
dprintk(verbose, DST_CA_INFO, 1, " -->dst_put_ci SUCCESS !");
dprintk(verbose, DST_CA_INFO, 1, " ================================ CI Module Application Info ======================================");
@@ -198,7 +184,7 @@ static int ca_get_ca_info(struct dst_state *state)
put_checksum(&slot_command[0], slot_command[0]);
if ((dst_put_ci(state, slot_command, sizeof (slot_command), state->messages, GET_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !");
- return -1;
+ return -EIO;
}
dprintk(verbose, DST_CA_INFO, 1, " -->dst_put_ci SUCCESS !");
@@ -242,7 +228,7 @@ static int ca_get_slot_caps(struct dst_state *state, struct ca_caps *p_ca_caps,
put_checksum(&slot_command[0], slot_command[0]);
if ((dst_put_ci(state, slot_command, sizeof (slot_command), slot_cap, GET_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !");
- return -1;
+ return -EIO;
}
dprintk(verbose, DST_CA_NOTICE, 1, " -->dst_put_ci SUCCESS !");
@@ -282,7 +268,7 @@ static int ca_get_slot_info(struct dst_state *state, struct ca_slot_info *p_ca_s
put_checksum(&slot_command[0], 7);
if ((dst_put_ci(state, slot_command, sizeof (slot_command), slot_info, GET_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !");
- return -1;
+ return -EIO;
}
dprintk(verbose, DST_CA_INFO, 1, " -->dst_put_ci SUCCESS !");
@@ -354,7 +340,7 @@ static int handle_dst_tag(struct dst_state *state, struct ca_msg *p_ca_message,
} else {
if (length > 247) {
dprintk(verbose, DST_CA_ERROR, 1, " Message too long ! *** Bailing Out *** !");
- return -1;
+ return -EIO;
}
hw_buffer->msg[0] = (length & 0xff) + 7;
hw_buffer->msg[1] = 0x40;
@@ -380,7 +366,7 @@ static int write_to_8820(struct dst_state *state, struct ca_msg *hw_buffer, u8 l
dprintk(verbose, DST_CA_ERROR, 1, " DST-CI Command failed.");
dprintk(verbose, DST_CA_NOTICE, 1, " Resetting DST.");
rdc_reset_state(state);
- return -1;
+ return -EIO;
}
dprintk(verbose, DST_CA_NOTICE, 1, " DST-CI Command success.");
@@ -453,7 +439,7 @@ static int dst_check_ca_pmt(struct dst_state *state, struct ca_msg *p_ca_message
if (ca_pmt_reply_test) {
if ((ca_set_pmt(state, p_ca_message, hw_buffer, 1, GET_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " ca_set_pmt.. failed !");
- return -1;
+ return -EIO;
}
/* Process CA PMT Reply */
@@ -464,7 +450,7 @@ static int dst_check_ca_pmt(struct dst_state *state, struct ca_msg *p_ca_message
if (!ca_pmt_reply_test) {
if ((ca_set_pmt(state, p_ca_message, hw_buffer, 0, NO_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " ca_set_pmt.. failed !");
- return -1;
+ return -EIO;
}
dprintk(verbose, DST_CA_NOTICE, 1, " ca_set_pmt.. success !");
/* put a dummy message */
@@ -573,17 +559,18 @@ static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioct
switch (cmd) {
case CA_SEND_MSG:
dprintk(verbose, DST_CA_INFO, 1, " Sending message");
- if ((ca_send_message(state, p_ca_message, arg)) < 0) {
+ result = ca_send_message(state, p_ca_message, arg);
+
+ if (result < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_SEND_MSG Failed !");
- result = -1;
goto free_mem_and_exit;
}
break;
case CA_GET_MSG:
dprintk(verbose, DST_CA_INFO, 1, " Getting message");
- if ((ca_get_message(state, p_ca_message, arg)) < 0) {
+ result = ca_get_message(state, p_ca_message, arg);
+ if (result < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_MSG Failed !");
- result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_MSG Success !");
@@ -595,7 +582,8 @@ static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioct
break;
case CA_GET_SLOT_INFO:
dprintk(verbose, DST_CA_INFO, 1, " Getting Slot info");
- if ((ca_get_slot_info(state, p_ca_slot_info, arg)) < 0) {
+ result = ca_get_slot_info(state, p_ca_slot_info, arg);
+ if (result < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_SLOT_INFO Failed !");
result = -1;
goto free_mem_and_exit;
@@ -604,40 +592,22 @@ static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioct
break;
case CA_GET_CAP:
dprintk(verbose, DST_CA_INFO, 1, " Getting Slot capabilities");
- if ((ca_get_slot_caps(state, p_ca_caps, arg)) < 0) {
+ result = ca_get_slot_caps(state, p_ca_caps, arg);
+ if (result < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_CAP Failed !");
- result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_CAP Success !");
break;
case CA_GET_DESCR_INFO:
dprintk(verbose, DST_CA_INFO, 1, " Getting descrambler description");
- if ((ca_get_slot_descr(state, p_ca_message, arg)) < 0) {
+ result = ca_get_slot_descr(state, p_ca_message, arg);
+ if (result < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_DESCR_INFO Failed !");
- result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_DESCR_INFO Success !");
break;
- case CA_SET_DESCR:
- dprintk(verbose, DST_CA_INFO, 1, " Setting descrambler");
- if ((ca_set_slot_descr()) < 0) {
- dprintk(verbose, DST_CA_ERROR, 1, " -->CA_SET_DESCR Failed !");
- result = -1;
- goto free_mem_and_exit;
- }
- dprintk(verbose, DST_CA_INFO, 1, " -->CA_SET_DESCR Success !");
- break;
- case CA_SET_PID:
- dprintk(verbose, DST_CA_INFO, 1, " Setting PID");
- if ((ca_set_pid()) < 0) {
- dprintk(verbose, DST_CA_ERROR, 1, " -->CA_SET_PID Failed !");
- result = -1;
- goto free_mem_and_exit;
- }
- dprintk(verbose, DST_CA_INFO, 1, " -->CA_SET_PID Success !");
- break;
default:
result = -EOPNOTSUPP;
}
diff --git a/drivers/media/pci/cobalt/cobalt-alsa-pcm.c b/drivers/media/pci/cobalt/cobalt-alsa-pcm.c
index 49013c6b8646..b69b258d39b9 100644
--- a/drivers/media/pci/cobalt/cobalt-alsa-pcm.c
+++ b/drivers/media/pci/cobalt/cobalt-alsa-pcm.c
@@ -43,7 +43,7 @@ MODULE_PARM_DESC(pcm_debug, "enable debug messages for pcm");
pr_info("cobalt-alsa-pcm %s: " fmt, __func__, ##arg); \
} while (0)
-static struct snd_pcm_hardware snd_cobalt_hdmi_capture = {
+static const struct snd_pcm_hardware snd_cobalt_hdmi_capture = {
.info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
@@ -64,7 +64,7 @@ static struct snd_pcm_hardware snd_cobalt_hdmi_capture = {
.periods_max = 4,
};
-static struct snd_pcm_hardware snd_cobalt_playback = {
+static const struct snd_pcm_hardware snd_cobalt_playback = {
.info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index f8e173f3e9e2..98b6cb9505d1 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -36,7 +36,7 @@
#include "cobalt-omnitek.h"
/* add your revision and whatnot here */
-static struct pci_device_id cobalt_pci_tbl[] = {
+static const struct pci_device_id cobalt_pci_tbl[] = {
{PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_COBALT,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
diff --git a/drivers/media/pci/cobalt/cobalt-i2c.c b/drivers/media/pci/cobalt/cobalt-i2c.c
index ad16b89b8d0c..1a5c55673ea8 100644
--- a/drivers/media/pci/cobalt/cobalt-i2c.c
+++ b/drivers/media/pci/cobalt/cobalt-i2c.c
@@ -301,7 +301,7 @@ static u32 cobalt_func(struct i2c_adapter *adap)
}
/* template for i2c-bit-algo */
-static struct i2c_adapter cobalt_i2c_adap_template = {
+static const struct i2c_adapter cobalt_i2c_adap_template = {
.name = "cobalt i2c driver",
.algo = NULL, /* set by i2c-algo-bit */
.algo_data = NULL, /* filled from template */
diff --git a/drivers/media/pci/cx18/cx18-alsa-mixer.c b/drivers/media/pci/cx18/cx18-alsa-mixer.c
index 06b066bc9301..cb04c3d820e2 100644
--- a/drivers/media/pci/cx18/cx18-alsa-mixer.c
+++ b/drivers/media/pci/cx18/cx18-alsa-mixer.c
@@ -161,7 +161,7 @@ int __init snd_cx18_mixer_create(struct snd_cx18_card *cxsc)
strlcpy(sc->mixername, "CX23418 Mixer", sizeof(sc->mixername));
- ret = snd_ctl_add(sc, snd_ctl_new1(snd_cx18_mixer_tv_vol, cxsc));
+ ret = snd_ctl_add(sc, snd_ctl_new1(&snd_cx18_mixer_tv_vol, cxsc));
if (ret) {
CX18_ALSA_WARN("%s: failed to add %s control, err %d\n",
__func__, snd_cx18_mixer_tv_vol.name, ret);
diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.c b/drivers/media/pci/cx18/cx18-alsa-pcm.c
index f68ee57a9ae2..aadd76466aec 100644
--- a/drivers/media/pci/cx18/cx18-alsa-pcm.c
+++ b/drivers/media/pci/cx18/cx18-alsa-pcm.c
@@ -44,7 +44,7 @@ MODULE_PARM_DESC(pcm_debug, "enable debug messages for pcm");
__func__, ##arg); \
} while (0)
-static struct snd_pcm_hardware snd_cx18_hw_capture = {
+static const struct snd_pcm_hardware snd_cx18_hw_capture = {
.info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 8bce49cdad46..8654710464cc 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -48,7 +48,7 @@ int (*cx18_ext_init)(struct cx18 *);
EXPORT_SYMBOL(cx18_ext_init);
/* add your revision and whatnot here */
-static struct pci_device_id cx18_pci_tbl[] = {
+static const struct pci_device_id cx18_pci_tbl[] = {
{PCI_VENDOR_ID_CX, PCI_DEVICE_ID_CX23418,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
diff --git a/drivers/media/pci/cx18/cx18-i2c.c b/drivers/media/pci/cx18/cx18-i2c.c
index eabdd4c5520a..7f588eeac60f 100644
--- a/drivers/media/pci/cx18/cx18-i2c.c
+++ b/drivers/media/pci/cx18/cx18-i2c.c
@@ -93,8 +93,8 @@ static int cx18_i2c_new_ir(struct cx18 *cx, struct i2c_adapter *adap, u32 hw,
case CX18_HW_Z8F0811_IR_RX_HAUP:
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_BIT_RC5 | RC_BIT_RC6_MCE |
- RC_BIT_RC6_6A_32;
+ init_data->type = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE |
+ RC_PROTO_BIT_RC6_6A_32;
init_data->name = cx->card_name;
info.platform_data = init_data;
break;
@@ -206,7 +206,7 @@ static int cx18_getsda(void *data)
}
/* template for i2c-bit-algo */
-static struct i2c_adapter cx18_i2c_adap_template = {
+static const struct i2c_adapter cx18_i2c_adap_template = {
.name = "cx18 i2c driver",
.algo = NULL, /* set by i2c-algo-bit */
.algo_data = NULL, /* filled from template */
@@ -216,7 +216,7 @@ static struct i2c_adapter cx18_i2c_adap_template = {
#define CX18_SCL_PERIOD (10) /* usecs. 10 usec is period for a 100 KHz clock */
#define CX18_ALGO_BIT_TIMEOUT (2) /* seconds */
-static struct i2c_algo_bit_data cx18_i2c_algo_template = {
+static const struct i2c_algo_bit_data cx18_i2c_algo_template = {
.setsda = cx18_setsda,
.setscl = cx18_setscl,
.getsda = cx18_getsda,
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 3c45e0071530..8385411af641 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -31,7 +31,7 @@
#define CX18_DSP0_INTERRUPT_MASK 0xd0004C
-static struct v4l2_file_operations cx18_v4l2_enc_fops = {
+static const struct v4l2_file_operations cx18_v4l2_enc_fops = {
.owner = THIS_MODULE,
.read = cx18_v4l2_read,
.open = cx18_v4l2_open,
@@ -240,7 +240,7 @@ static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
list_add_tail(&buf->vb.queue, &s->vb_capture);
}
-static struct videobuf_queue_ops cx18_videobuf_qops = {
+static const struct videobuf_queue_ops cx18_videobuf_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index 2ff1d1e274be..a71f3c7569ce 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1416,7 +1416,7 @@ static int vidioc_log_status(struct file *file, void *priv)
return 0;
}
-static struct v4l2_file_operations mpeg_fops = {
+static const struct v4l2_file_operations mpeg_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index c148f9a4a9ac..d8c3637e492e 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -293,7 +293,7 @@ static int dsp_buffer_free(struct cx23885_audio_dev *chip)
*/
#define DEFAULT_FIFO_SIZE 4096
-static struct snd_pcm_hardware snd_cx23885_digital_hw = {
+static const struct snd_pcm_hardware snd_cx23885_digital_hw = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index c48fa8e25a70..78a8836d03e4 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -1278,6 +1278,12 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
case 85721:
/* WinTV-HVR1290 (PCIe, OEM, RCA in, IR,
Dual channel ATSC and Basic analog */
+ case 121019:
+ /* WinTV-HVR4400 (PCIe, DVB-S2, DVB-C/T) */
+ break;
+ case 121029:
+ /* WinTV-HVR5500 (PCIe, DVB-S2, DVB-C/T) */
+ break;
case 150329:
/* WinTV-HVR5525 (PCIe, DVB-S/S2, DVB-T/T2/C) */
break;
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 02b5ec549369..8f63df1cb418 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -2056,7 +2056,7 @@ static void cx23885_finidev(struct pci_dev *pci_dev)
kfree(dev);
}
-static struct pci_device_id cx23885_pci_tbl[] = {
+static const struct pci_device_id cx23885_pci_tbl[] = {
{
/* CX23885 */
.vendor = 0x14f1,
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 979b66627f60..e795ddeb7fe2 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -2637,6 +2637,11 @@ int cx23885_dvb_unregister(struct cx23885_tsport *port)
struct vb2_dvb_frontend *fe0;
struct i2c_client *client;
+ fe0 = vb2_dvb_get_frontend(&port->frontends, 1);
+
+ if (fe0 && fe0->dvb.frontend)
+ vb2_dvb_unregister_bus(&port->frontends);
+
/* remove I2C client for CI */
client = port->i2c_client_ci;
if (client) {
@@ -2665,11 +2670,6 @@ int cx23885_dvb_unregister(struct cx23885_tsport *port)
i2c_unregister_device(client);
}
- fe0 = vb2_dvb_get_frontend(&port->frontends, 1);
-
- if (fe0 && fe0->dvb.frontend)
- vb2_dvb_unregister_bus(&port->frontends);
-
switch (port->dev->board) {
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
netup_ci_exit(port);
diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c
index 8528032090f2..0f21467ae88e 100644
--- a/drivers/media/pci/cx23885/cx23885-i2c.c
+++ b/drivers/media/pci/cx23885/cx23885-i2c.c
@@ -264,7 +264,7 @@ static const struct i2c_algorithm cx23885_i2c_algo_template = {
/* ----------------------------------------------------------------------- */
-static struct i2c_adapter cx23885_i2c_adap_template = {
+static const struct i2c_adapter cx23885_i2c_adap_template = {
.name = "cx23885",
.owner = THIS_MODULE,
.algo = &cx23885_i2c_algo_template,
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 4367cb3162b6..944b70831f12 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -284,32 +284,32 @@ int cx23885_input_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_HAUPPAUGE_HVR1250:
/* Integrated CX2388[58] IR controller */
- allowed_protos = RC_BIT_ALL_IR_DECODER;
+ allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER;
/* The grey Hauppauge RC-5 remote */
rc_map = RC_MAP_HAUPPAUGE;
break;
case CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL:
/* Integrated CX23885 IR controller */
- allowed_protos = RC_BIT_ALL_IR_DECODER;
+ allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER;
/* The grey Terratec remote with orange buttons */
rc_map = RC_MAP_NEC_TERRATEC_CINERGY_XS;
break;
case CX23885_BOARD_TEVII_S470:
/* Integrated CX23885 IR controller */
- allowed_protos = RC_BIT_ALL_IR_DECODER;
+ allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER;
/* A guess at the remote */
rc_map = RC_MAP_TEVII_NEC;
break;
case CX23885_BOARD_MYGICA_X8507:
/* Integrated CX23885 IR controller */
- allowed_protos = RC_BIT_ALL_IR_DECODER;
+ allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER;
/* A guess at the remote */
rc_map = RC_MAP_TOTAL_MEDIA_IN_HAND_02;
break;
case CX23885_BOARD_TBS_6980:
case CX23885_BOARD_TBS_6981:
/* Integrated CX23885 IR controller */
- allowed_protos = RC_BIT_ALL_IR_DECODER;
+ allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER;
/* A guess at the remote */
rc_map = RC_MAP_TBS_NEC;
break;
@@ -320,12 +320,12 @@ int cx23885_input_init(struct cx23885_dev *dev)
case CX23885_BOARD_DVBSKY_S952:
case CX23885_BOARD_DVBSKY_T982:
/* Integrated CX23885 IR controller */
- allowed_protos = RC_BIT_ALL_IR_DECODER;
+ allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER;
rc_map = RC_MAP_DVBSKY;
break;
case CX23885_BOARD_TT_CT2_4500_CI:
/* Integrated CX23885 IR controller */
- allowed_protos = RC_BIT_ALL_IR_DECODER;
+ allowed_protos = RC_PROTO_BIT_ALL_IR_DECODER;
rc_map = RC_MAP_TT_1500;
break;
default:
@@ -351,7 +351,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
}
kernel_ir->rc = rc;
- rc->input_name = kernel_ir->name;
+ rc->device_name = kernel_ir->name;
rc->input_phys = kernel_ir->phys;
rc->input_id.bustype = BUS_PCI;
rc->input_id.version = 1;
diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c
index 519b81c0c837..2b34990e86f2 100644
--- a/drivers/media/pci/cx25821/cx25821-alsa.c
+++ b/drivers/media/pci/cx25821/cx25821-alsa.c
@@ -428,7 +428,7 @@ static int dsp_buffer_free(struct cx25821_audio_dev *chip)
* Digital hardware definition
*/
#define DEFAULT_FIFO_SIZE 384
-static struct snd_pcm_hardware snd_cx25821_digital_hw = {
+static const struct snd_pcm_hardware snd_cx25821_digital_hw = {
.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index fbc0229183bd..04aa4a68a0ae 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -1390,10 +1390,7 @@ static struct pci_driver cx25821_pci_driver = {
static int __init cx25821_init(void)
{
- pr_info("driver version %d.%d.%d loaded\n",
- (CX25821_VERSION_CODE >> 16) & 0xff,
- (CX25821_VERSION_CODE >> 8) & 0xff,
- CX25821_VERSION_CODE & 0xff);
+ pr_info("driver loaded\n");
return pci_register_driver(&cx25821_pci_driver);
}
diff --git a/drivers/media/pci/cx25821/cx25821-i2c.c b/drivers/media/pci/cx25821/cx25821-i2c.c
index 263a1cf36ef1..000049d3c71b 100644
--- a/drivers/media/pci/cx25821/cx25821-i2c.c
+++ b/drivers/media/pci/cx25821/cx25821-i2c.c
@@ -285,7 +285,7 @@ static const struct i2c_algorithm cx25821_i2c_algo_template = {
#endif
};
-static struct i2c_adapter cx25821_i2c_adap_template = {
+static const struct i2c_adapter cx25821_i2c_adap_template = {
.name = "cx25821",
.owner = THIS_MODULE,
.algo = &cx25821_i2c_algo_template,
diff --git a/drivers/media/pci/cx25821/cx25821.h b/drivers/media/pci/cx25821/cx25821.h
index 0f20e89b0cde..b3eb2dabb30b 100644
--- a/drivers/media/pci/cx25821/cx25821.h
+++ b/drivers/media/pci/cx25821/cx25821.h
@@ -41,8 +41,6 @@
#include <linux/version.h>
#include <linux/mutex.h>
-#define CX25821_VERSION_CODE KERNEL_VERSION(0, 0, 106)
-
#define UNSET (-1U)
#define NO_SYNC_LINE (-1U)
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index c81fe4681d14..9740326bc93f 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -799,7 +799,7 @@ static int snd_cx88_alc_put(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new snd_cx88_alc_switch = {
+static const struct snd_kcontrol_new snd_cx88_alc_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line-In ALC Switch",
.info = snd_ctl_boolean_mono_info,
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index aa49c9597d9c..e3101f04941c 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -1075,7 +1075,7 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
-static struct video_device cx8802_mpeg_template = {
+static const struct video_device cx8802_mpeg_template = {
.name = "cx8802",
.fops = &mpeg_fops,
.ioctl_ops = &mpeg_ioctl_ops,
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index 01f2e472a2a0..e02449bf2041 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -132,7 +132,7 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
data = (data << 4) | ((gpio_key & 0xf0) >> 4);
- rc_keydown(ir->dev, RC_TYPE_UNKNOWN, data, 0);
+ rc_keydown(ir->dev, RC_PROTO_UNKNOWN, data, 0);
} else if (ir->core->boardnr == CX88_BOARD_PROLINK_PLAYTVPVR ||
ir->core->boardnr == CX88_BOARD_PIXELVIEW_PLAYTV_ULTRA_PRO) {
@@ -146,7 +146,7 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
scancode = RC_SCANCODE_NECX(addr, cmd);
if (0 == (gpio & ir->mask_keyup))
- rc_keydown_notimeout(ir->dev, RC_TYPE_NECX, scancode,
+ rc_keydown_notimeout(ir->dev, RC_PROTO_NECX, scancode,
0);
else
rc_keyup(ir->dev);
@@ -154,20 +154,22 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
} else if (ir->mask_keydown) {
/* bit set on keydown */
if (gpio & ir->mask_keydown)
- rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0);
+ rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data,
+ 0);
else
rc_keyup(ir->dev);
} else if (ir->mask_keyup) {
/* bit cleared on keydown */
if (0 == (gpio & ir->mask_keyup))
- rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0);
+ rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data,
+ 0);
else
rc_keyup(ir->dev);
} else {
/* can't distinguish keydown/up :-/ */
- rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0);
+ rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data, 0);
rc_keyup(ir->dev);
}
}
@@ -267,7 +269,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
struct cx88_IR *ir;
struct rc_dev *dev;
char *ir_codes = NULL;
- u64 rc_type = RC_BIT_OTHER;
+ u64 rc_proto = RC_PROTO_BIT_OTHER;
int err = -ENOMEM;
u32 hardware_mask = 0; /* For devices with a hardware mask, when
* used with a full-code IR table
@@ -348,7 +350,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
* 002-T mini RC, provided with newer PV hardware
*/
ir_codes = RC_MAP_PIXELVIEW_MK12;
- rc_type = RC_BIT_NECX;
+ rc_proto = RC_PROTO_BIT_NECX;
ir->gpio_addr = MO_GP1_IO;
ir->mask_keyup = 0x80;
ir->polling = 10; /* ms */
@@ -464,7 +466,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
snprintf(ir->name, sizeof(ir->name), "cx88 IR (%s)", core->board.name);
snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0", pci_name(pci));
- dev->input_name = ir->name;
+ dev->device_name = ir->name;
dev->input_phys = ir->phys;
dev->input_id.bustype = BUS_PCI;
dev->input_id.version = 1;
@@ -487,7 +489,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
dev->timeout = 10 * 1000 * 1000; /* 10 ms */
} else {
dev->driver_type = RC_DRIVER_SCANCODE;
- dev->allowed_protocols = rc_type;
+ dev->allowed_protocols = rc_proto;
}
ir->core = core;
@@ -557,7 +559,7 @@ void cx88_ir_irq(struct cx88_core *core)
ir_raw_event_handle(ir->dev);
}
-static int get_key_pvr2000(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_pvr2000(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
int flags, code;
@@ -582,7 +584,7 @@ static int get_key_pvr2000(struct IR_i2c *ir, enum rc_type *protocol,
dprintk("IR Key/Flags: (0x%02x/0x%02x)\n",
code & 0xff, flags & 0xff);
- *protocol = RC_TYPE_UNKNOWN;
+ *protocol = RC_PROTO_UNKNOWN;
*scancode = code & 0xff;
*toggle = 0;
return 1;
@@ -612,7 +614,7 @@ void cx88_i2c_init_ir(struct cx88_core *core)
case CX88_BOARD_LEADTEK_PVR2000:
addr_list = pvr2000_addr_list;
core->init_data.name = "cx88 Leadtek PVR 2000 remote";
- core->init_data.type = RC_BIT_UNKNOWN;
+ core->init_data.type = RC_PROTO_BIT_UNKNOWN;
core->init_data.get_key = get_key_pvr2000;
core->init_data.ir_codes = RC_MAP_EMPTY;
break;
@@ -633,8 +635,8 @@ void cx88_i2c_init_ir(struct cx88_core *core)
/* Hauppauge XVR */
core->init_data.name = "cx88 Hauppauge XVR remote";
core->init_data.ir_codes = RC_MAP_HAUPPAUGE;
- core->init_data.type = RC_BIT_RC5 | RC_BIT_RC6_MCE |
- RC_BIT_RC6_6A_32;
+ core->init_data.type = RC_PROTO_BIT_RC5 |
+ RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_RC6_6A_32;
core->init_data.internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
info.platform_data = &core->init_data;
diff --git a/drivers/media/pci/ddbridge/Kconfig b/drivers/media/pci/ddbridge/Kconfig
index ffed78c2ffb4..f43d0b83fc0c 100644
--- a/drivers/media/pci/ddbridge/Kconfig
+++ b/drivers/media/pci/ddbridge/Kconfig
@@ -8,7 +8,11 @@ config DVB_DDBRIDGE
select DVB_TDA18271C2DD if MEDIA_SUBDRV_AUTOSELECT
select DVB_STV0367 if MEDIA_SUBDRV_AUTOSELECT
select DVB_CXD2841ER if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0910 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV6111 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_LNBH25 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_TDA18212 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_MXL5XX if MEDIA_SUBDRV_AUTOSELECT
---help---
Support for cards with the Digital Devices PCI express bridge:
- Octopus PCIe Bridge
@@ -20,5 +24,22 @@ config DVB_DDBRIDGE
- CineCTv6 and DuoFlex CT (STV0367-based)
- CineCTv7 and DuoFlex CT2/C2T2/C2T2I (Sony CXD28xx-based)
- MaxA8 series
+ - CineS2 V7/V7A and DuoFlex S2 V4 (ST STV0910-based)
+ - Max S4/8
Say Y if you own such a card and want to use it.
+
+config DVB_DDBRIDGE_MSIENABLE
+ bool "Enable Message Signaled Interrupts (MSI) per default (EXPERIMENTAL)"
+ depends on DVB_DDBRIDGE
+ depends on PCI_MSI
+ default n
+ ---help---
+ Use PCI MSI (Message Signaled Interrupts) per default. Enabling this
+ might lead to I2C errors originating from the bridge in conjunction
+ with certain SATA controllers, requiring a reload of the ddbridge
+ module. MSI can still be disabled by passing msi=0 as option, as
+ this will just change the msi option default value.
+
+ If you're unsure, concerned about stability and don't want to pass
+ module options in case of troubles, say N.
diff --git a/drivers/media/pci/ddbridge/Makefile b/drivers/media/pci/ddbridge/Makefile
index 7446c8b677b5..09703312a3f1 100644
--- a/drivers/media/pci/ddbridge/Makefile
+++ b/drivers/media/pci/ddbridge/Makefile
@@ -2,7 +2,8 @@
# Makefile for the ddbridge device driver
#
-ddbridge-objs := ddbridge-core.o
+ddbridge-objs := ddbridge-main.o ddbridge-core.o ddbridge-hw.o \
+ ddbridge-i2c.o ddbridge-maxs8.o
obj-$(CONFIG_DVB_DDBRIDGE) += ddbridge.o
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 9420479bee9a..f4bd4908acdd 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -1,7 +1,10 @@
/*
- * ddbridge.c: Digital Devices PCIe bridge driver
+ * ddbridge-core.c: Digital Devices bridge core functions
+ *
+ * Copyright (C) 2010-2017 Digital Devices GmbH
+ * Marcus Metzler <mocm@metzlerbros.de>
+ * Ralph Metzler <rjkm@metzlerbros.de>
*
- * Copyright (C) 2010-2011 Digital Devices GmbH
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -30,9 +33,12 @@
#include <linux/i2c.h>
#include <linux/swab.h>
#include <linux/vmalloc.h>
-#include "ddbridge.h"
+#include "ddbridge.h"
+#include "ddbridge-i2c.h"
#include "ddbridge-regs.h"
+#include "ddbridge-maxs8.h"
+#include "ddbridge-io.h"
#include "tda18271c2dd.h"
#include "stv6110x.h"
@@ -43,433 +49,511 @@
#include "stv0367_priv.h"
#include "cxd2841er.h"
#include "tda18212.h"
+#include "stv0910.h"
+#include "stv6111.h"
+#include "lnbh25.h"
+#include "cxd2099.h"
-static int xo2_speed = 2;
-module_param(xo2_speed, int, 0444);
-MODULE_PARM_DESC(xo2_speed, "default transfer speed for xo2 based duoflex, 0=55,1=75,2=90,3=104 MBit/s, default=2, use attribute to change for individual cards");
+/****************************************************************************/
-DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+#define DDB_MAX_ADAPTER 64
-/* MSI had problems with lost interrupts, fixed but needs testing */
-#undef CONFIG_PCI_MSI
+/****************************************************************************/
-/******************************************************************************/
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-static int i2c_io(struct i2c_adapter *adapter, u8 adr,
- u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
-{
- struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
- .buf = wbuf, .len = wlen },
- {.addr = adr, .flags = I2C_M_RD,
- .buf = rbuf, .len = rlen } };
- return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
-}
+static int adapter_alloc;
+module_param(adapter_alloc, int, 0444);
+MODULE_PARM_DESC(adapter_alloc,
+ "0-one adapter per io, 1-one per tab with io, 2-one per tab, 3-one for all");
-static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len)
-{
- struct i2c_msg msg = {.addr = adr, .flags = 0,
- .buf = data, .len = len};
-
- return (i2c_transfer(adap, &msg, 1) == 1) ? 0 : -1;
-}
+/****************************************************************************/
-static int i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val)
-{
- struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD,
- .buf = val, .len = 1 } };
- return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1;
-}
+static DEFINE_MUTEX(redirect_lock);
-static int i2c_read_regs(struct i2c_adapter *adapter,
- u8 adr, u8 reg, u8 *val, u8 len)
-{
- struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
- .buf = &reg, .len = 1 },
- {.addr = adr, .flags = I2C_M_RD,
- .buf = val, .len = len } };
- return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
-}
+struct workqueue_struct *ddb_wq;
-static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr, u8 reg, u8 *val)
-{
- return i2c_read_regs(adapter, adr, reg, val, 1);
-}
+static struct ddb *ddbs[DDB_MAX_ADAPTER];
-static int i2c_read_reg16(struct i2c_adapter *adapter, u8 adr,
- u16 reg, u8 *val)
-{
- u8 msg[2] = {reg>>8, reg&0xff};
- struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
- .buf = msg, .len = 2},
- {.addr = adr, .flags = I2C_M_RD,
- .buf = val, .len = 1} };
- return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
-}
-
-static int i2c_write_reg(struct i2c_adapter *adap, u8 adr,
- u8 reg, u8 val)
-{
- u8 msg[2] = {reg, val};
-
- return i2c_write(adap, adr, msg, 2);
-}
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
-static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd)
+static void ddb_set_dma_table(struct ddb_io *io)
{
- struct ddb *dev = i2c->dev;
- long stat;
- u32 val;
+ struct ddb *dev = io->port->dev;
+ struct ddb_dma *dma = io->dma;
+ u32 i;
+ u64 mem;
- i2c->done = 0;
- ddbwritel((adr << 9) | cmd, i2c->regs + I2C_COMMAND);
- stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ);
- if (stat == 0) {
- printk(KERN_ERR "I2C timeout\n");
- { /* MSI debugging*/
- u32 istat = ddbreadl(INTERRUPT_STATUS);
- printk(KERN_ERR "IRS %08x\n", istat);
- ddbwritel(istat, INTERRUPT_ACK);
- }
- return -EIO;
+ if (!dma)
+ return;
+ for (i = 0; i < dma->num; i++) {
+ mem = dma->pbuf[i];
+ ddbwritel(dev, mem & 0xffffffff, dma->bufregs + i * 8);
+ ddbwritel(dev, mem >> 32, dma->bufregs + i * 8 + 4);
}
- val = ddbreadl(i2c->regs+I2C_COMMAND);
- if (val & 0x70000)
- return -EIO;
- return 0;
+ dma->bufval = ((dma->div & 0x0f) << 16) |
+ ((dma->num & 0x1f) << 11) |
+ ((dma->size >> 7) & 0x7ff);
}
-static int ddb_i2c_master_xfer(struct i2c_adapter *adapter,
- struct i2c_msg msg[], int num)
+static void ddb_set_dma_tables(struct ddb *dev)
{
- struct ddb_i2c *i2c = (struct ddb_i2c *)i2c_get_adapdata(adapter);
- struct ddb *dev = i2c->dev;
- u8 addr = 0;
-
- if (num)
- addr = msg[0].addr;
-
- if (num == 2 && msg[1].flags & I2C_M_RD &&
- !(msg[0].flags & I2C_M_RD)) {
- memcpy_toio(dev->regs + I2C_TASKMEM_BASE + i2c->wbuf,
- msg[0].buf, msg[0].len);
- ddbwritel(msg[0].len|(msg[1].len << 16),
- i2c->regs+I2C_TASKLENGTH);
- if (!ddb_i2c_cmd(i2c, addr, 1)) {
- memcpy_fromio(msg[1].buf,
- dev->regs + I2C_TASKMEM_BASE + i2c->rbuf,
- msg[1].len);
- return num;
- }
- }
+ u32 i;
- if (num == 1 && !(msg[0].flags & I2C_M_RD)) {
- ddbcpyto(I2C_TASKMEM_BASE + i2c->wbuf, msg[0].buf, msg[0].len);
- ddbwritel(msg[0].len, i2c->regs + I2C_TASKLENGTH);
- if (!ddb_i2c_cmd(i2c, addr, 2))
- return num;
+ for (i = 0; i < DDB_MAX_PORT; i++) {
+ if (dev->port[i].input[0])
+ ddb_set_dma_table(dev->port[i].input[0]);
+ if (dev->port[i].input[1])
+ ddb_set_dma_table(dev->port[i].input[1]);
+ if (dev->port[i].output)
+ ddb_set_dma_table(dev->port[i].output);
}
- if (num == 1 && (msg[0].flags & I2C_M_RD)) {
- ddbwritel(msg[0].len << 16, i2c->regs + I2C_TASKLENGTH);
- if (!ddb_i2c_cmd(i2c, addr, 3)) {
- ddbcpyfrom(msg[0].buf,
- I2C_TASKMEM_BASE + i2c->rbuf, msg[0].len);
- return num;
- }
- }
- return -EIO;
}
-static u32 ddb_i2c_functionality(struct i2c_adapter *adap)
-{
- return I2C_FUNC_SMBUS_EMUL;
-}
-
-static struct i2c_algorithm ddb_i2c_algo = {
- .master_xfer = ddb_i2c_master_xfer,
- .functionality = ddb_i2c_functionality,
-};
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
-static void ddb_i2c_release(struct ddb *dev)
+static void ddb_redirect_dma(struct ddb *dev,
+ struct ddb_dma *sdma,
+ struct ddb_dma *ddma)
{
- int i;
- struct ddb_i2c *i2c;
- struct i2c_adapter *adap;
+ u32 i, base;
+ u64 mem;
- for (i = 0; i < dev->info->port_num; i++) {
- i2c = &dev->i2c[i];
- adap = &i2c->adap;
- i2c_del_adapter(adap);
+ sdma->bufval = ddma->bufval;
+ base = sdma->bufregs;
+ for (i = 0; i < ddma->num; i++) {
+ mem = ddma->pbuf[i];
+ ddbwritel(dev, mem & 0xffffffff, base + i * 8);
+ ddbwritel(dev, mem >> 32, base + i * 8 + 4);
}
}
-static int ddb_i2c_init(struct ddb *dev)
+static int ddb_unredirect(struct ddb_port *port)
{
- int i, j, stat = 0;
- struct ddb_i2c *i2c;
- struct i2c_adapter *adap;
+ struct ddb_input *oredi, *iredi = NULL;
+ struct ddb_output *iredo = NULL;
- for (i = 0; i < dev->info->port_num; i++) {
- i2c = &dev->i2c[i];
- i2c->dev = dev;
- i2c->nr = i;
- i2c->wbuf = i * (I2C_TASKMEM_SIZE / 4);
- i2c->rbuf = i2c->wbuf + (I2C_TASKMEM_SIZE / 8);
- i2c->regs = 0x80 + i * 0x20;
- ddbwritel(I2C_SPEED_100, i2c->regs + I2C_TIMING);
- ddbwritel((i2c->rbuf << 16) | i2c->wbuf,
- i2c->regs + I2C_TASKADDRESS);
- init_waitqueue_head(&i2c->wq);
-
- adap = &i2c->adap;
- i2c_set_adapdata(adap, i2c);
-#ifdef I2C_ADAP_CLASS_TV_DIGITAL
- adap->class = I2C_ADAP_CLASS_TV_DIGITAL|I2C_CLASS_TV_ANALOG;
-#else
-#ifdef I2C_CLASS_TV_ANALOG
- adap->class = I2C_CLASS_TV_ANALOG;
-#endif
-#endif
- strcpy(adap->name, "ddbridge");
- adap->algo = &ddb_i2c_algo;
- adap->algo_data = (void *)i2c;
- adap->dev.parent = &dev->pdev->dev;
- stat = i2c_add_adapter(adap);
- if (stat)
- break;
+ /* dev_info(port->dev->dev,
+ * "unredirect %d.%d\n", port->dev->nr, port->nr);
+ */
+ mutex_lock(&redirect_lock);
+ if (port->output->dma->running) {
+ mutex_unlock(&redirect_lock);
+ return -EBUSY;
}
- if (stat)
- for (j = 0; j < i; j++) {
- i2c = &dev->i2c[j];
- adap = &i2c->adap;
- i2c_del_adapter(adap);
+ oredi = port->output->redi;
+ if (!oredi)
+ goto done;
+ if (port->input[0]) {
+ iredi = port->input[0]->redi;
+ iredo = port->input[0]->redo;
+
+ if (iredo) {
+ iredo->port->output->redi = oredi;
+ if (iredo->port->input[0]) {
+ iredo->port->input[0]->redi = iredi;
+ ddb_redirect_dma(oredi->port->dev,
+ oredi->dma, iredo->dma);
+ }
+ port->input[0]->redo = NULL;
+ ddb_set_dma_table(port->input[0]);
}
- return stat;
+ oredi->redi = iredi;
+ port->input[0]->redi = NULL;
+ }
+ oredi->redo = NULL;
+ port->output->redi = NULL;
+
+ ddb_set_dma_table(oredi);
+done:
+ mutex_unlock(&redirect_lock);
+ return 0;
}
+static int ddb_redirect(u32 i, u32 p)
+{
+ struct ddb *idev = ddbs[(i >> 4) & 0x3f];
+ struct ddb_input *input, *input2;
+ struct ddb *pdev = ddbs[(p >> 4) & 0x3f];
+ struct ddb_port *port;
+
+ if (!idev || !pdev)
+ return -EINVAL;
+ if (!idev->has_dma || !pdev->has_dma)
+ return -EINVAL;
-/******************************************************************************/
-/******************************************************************************/
-/******************************************************************************/
+ port = &pdev->port[p & 0x0f];
+ if (!port->output)
+ return -EINVAL;
+ if (ddb_unredirect(port))
+ return -EBUSY;
-#if 0
-static void set_table(struct ddb *dev, u32 off,
- dma_addr_t *pbuf, u32 num)
-{
- u32 i, base;
- u64 mem;
+ if (i == 8)
+ return 0;
- base = DMA_BASE_ADDRESS_TABLE + off;
- for (i = 0; i < num; i++) {
- mem = pbuf[i];
- ddbwritel(mem & 0xffffffff, base + i * 8);
- ddbwritel(mem >> 32, base + i * 8 + 4);
- }
-}
-#endif
+ input = &idev->input[i & 7];
+ if (!input)
+ return -EINVAL;
-static void ddb_address_table(struct ddb *dev)
-{
- u32 i, j, base;
- u64 mem;
- dma_addr_t *pbuf;
-
- for (i = 0; i < dev->info->port_num * 2; i++) {
- base = DMA_BASE_ADDRESS_TABLE + i * 0x100;
- pbuf = dev->input[i].pbuf;
- for (j = 0; j < dev->input[i].dma_buf_num; j++) {
- mem = pbuf[j];
- ddbwritel(mem & 0xffffffff, base + j * 8);
- ddbwritel(mem >> 32, base + j * 8 + 4);
- }
+ mutex_lock(&redirect_lock);
+ if (port->output->dma->running || input->dma->running) {
+ mutex_unlock(&redirect_lock);
+ return -EBUSY;
}
- for (i = 0; i < dev->info->port_num; i++) {
- base = DMA_BASE_ADDRESS_TABLE + 0x800 + i * 0x100;
- pbuf = dev->output[i].pbuf;
- for (j = 0; j < dev->output[i].dma_buf_num; j++) {
- mem = pbuf[j];
- ddbwritel(mem & 0xffffffff, base + j * 8);
- ddbwritel(mem >> 32, base + j * 8 + 4);
- }
+ input2 = port->input[0];
+ if (input2) {
+ if (input->redi) {
+ input2->redi = input->redi;
+ input->redi = NULL;
+ } else
+ input2->redi = input;
}
+ input->redo = port->output;
+ port->output->redi = input;
+
+ ddb_redirect_dma(input->port->dev, input->dma, port->output->dma);
+ mutex_unlock(&redirect_lock);
+ return 0;
}
-static void io_free(struct pci_dev *pdev, u8 **vbuf,
- dma_addr_t *pbuf, u32 size, int num)
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
+
+static void dma_free(struct pci_dev *pdev, struct ddb_dma *dma, int dir)
{
int i;
- for (i = 0; i < num; i++) {
- if (vbuf[i]) {
- pci_free_consistent(pdev, size, vbuf[i], pbuf[i]);
- vbuf[i] = NULL;
+ if (!dma)
+ return;
+ for (i = 0; i < dma->num; i++) {
+ if (dma->vbuf[i]) {
+ if (alt_dma) {
+ dma_unmap_single(&pdev->dev, dma->pbuf[i],
+ dma->size,
+ dir ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ kfree(dma->vbuf[i]);
+ dma->vbuf[i] = NULL;
+ } else {
+ dma_free_coherent(&pdev->dev, dma->size,
+ dma->vbuf[i], dma->pbuf[i]);
+ }
+
+ dma->vbuf[i] = NULL;
}
}
}
-static int io_alloc(struct pci_dev *pdev, u8 **vbuf,
- dma_addr_t *pbuf, u32 size, int num)
+static int dma_alloc(struct pci_dev *pdev, struct ddb_dma *dma, int dir)
{
int i;
- for (i = 0; i < num; i++) {
- vbuf[i] = pci_alloc_consistent(pdev, size, &pbuf[i]);
- if (!vbuf[i])
- return -ENOMEM;
+ if (!dma)
+ return 0;
+ for (i = 0; i < dma->num; i++) {
+ if (alt_dma) {
+ dma->vbuf[i] = kmalloc(dma->size, __GFP_RETRY_MAYFAIL);
+ if (!dma->vbuf[i])
+ return -ENOMEM;
+ dma->pbuf[i] = dma_map_single(&pdev->dev,
+ dma->vbuf[i],
+ dma->size,
+ dir ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma->pbuf[i])) {
+ kfree(dma->vbuf[i]);
+ dma->vbuf[i] = NULL;
+ return -ENOMEM;
+ }
+ } else {
+ dma->vbuf[i] = dma_alloc_coherent(&pdev->dev,
+ dma->size,
+ &dma->pbuf[i],
+ GFP_KERNEL);
+ if (!dma->vbuf[i])
+ return -ENOMEM;
+ }
}
return 0;
}
-static int ddb_buffers_alloc(struct ddb *dev)
+int ddb_buffers_alloc(struct ddb *dev)
{
int i;
struct ddb_port *port;
- for (i = 0; i < dev->info->port_num; i++) {
+ for (i = 0; i < dev->port_num; i++) {
port = &dev->port[i];
switch (port->class) {
case DDB_PORT_TUNER:
- if (io_alloc(dev->pdev, port->input[0]->vbuf,
- port->input[0]->pbuf,
- port->input[0]->dma_buf_size,
- port->input[0]->dma_buf_num) < 0)
- return -1;
- if (io_alloc(dev->pdev, port->input[1]->vbuf,
- port->input[1]->pbuf,
- port->input[1]->dma_buf_size,
- port->input[1]->dma_buf_num) < 0)
- return -1;
+ if (port->input[0]->dma)
+ if (dma_alloc(dev->pdev, port->input[0]->dma, 0)
+ < 0)
+ return -1;
+ if (port->input[1]->dma)
+ if (dma_alloc(dev->pdev, port->input[1]->dma, 0)
+ < 0)
+ return -1;
break;
case DDB_PORT_CI:
- if (io_alloc(dev->pdev, port->input[0]->vbuf,
- port->input[0]->pbuf,
- port->input[0]->dma_buf_size,
- port->input[0]->dma_buf_num) < 0)
- return -1;
- if (io_alloc(dev->pdev, port->output->vbuf,
- port->output->pbuf,
- port->output->dma_buf_size,
- port->output->dma_buf_num) < 0)
- return -1;
+ case DDB_PORT_LOOP:
+ if (port->input[0]->dma)
+ if (dma_alloc(dev->pdev, port->input[0]->dma, 0)
+ < 0)
+ return -1;
+ if (port->output->dma)
+ if (dma_alloc(dev->pdev, port->output->dma, 1)
+ < 0)
+ return -1;
break;
default:
break;
}
}
- ddb_address_table(dev);
+ ddb_set_dma_tables(dev);
return 0;
}
-static void ddb_buffers_free(struct ddb *dev)
+void ddb_buffers_free(struct ddb *dev)
{
int i;
struct ddb_port *port;
- for (i = 0; i < dev->info->port_num; i++) {
+ for (i = 0; i < dev->port_num; i++) {
port = &dev->port[i];
- io_free(dev->pdev, port->input[0]->vbuf,
- port->input[0]->pbuf,
- port->input[0]->dma_buf_size,
- port->input[0]->dma_buf_num);
- io_free(dev->pdev, port->input[1]->vbuf,
- port->input[1]->pbuf,
- port->input[1]->dma_buf_size,
- port->input[1]->dma_buf_num);
- io_free(dev->pdev, port->output->vbuf,
- port->output->pbuf,
- port->output->dma_buf_size,
- port->output->dma_buf_num);
+
+ if (port->input[0] && port->input[0]->dma)
+ dma_free(dev->pdev, port->input[0]->dma, 0);
+ if (port->input[1] && port->input[1]->dma)
+ dma_free(dev->pdev, port->input[1]->dma, 0);
+ if (port->output && port->output->dma)
+ dma_free(dev->pdev, port->output->dma, 1);
}
}
-static void ddb_input_start(struct ddb_input *input)
+static void calc_con(struct ddb_output *output, u32 *con, u32 *con2, u32 flags)
{
- struct ddb *dev = input->port->dev;
+ struct ddb *dev = output->port->dev;
+ u32 bitrate = output->port->obr, max_bitrate = 72000;
+ u32 gap = 4, nco = 0;
+
+ *con = 0x1c;
+ if (output->port->gap != 0xffffffff) {
+ flags |= 1;
+ gap = output->port->gap;
+ max_bitrate = 0;
+ }
+ if (dev->link[0].info->type == DDB_OCTOPUS_CI && output->port->nr > 1) {
+ *con = 0x10c;
+ if (dev->link[0].ids.regmapid >= 0x10003 && !(flags & 1)) {
+ if (!(flags & 2)) {
+ /* NCO */
+ max_bitrate = 0;
+ gap = 0;
+ if (bitrate != 72000) {
+ if (bitrate >= 96000)
+ *con |= 0x800;
+ else {
+ *con |= 0x1000;
+ nco = (bitrate * 8192 + 71999)
+ / 72000;
+ }
+ }
+ } else {
+ /* Divider and gap */
+ *con |= 0x1810;
+ if (bitrate <= 64000) {
+ max_bitrate = 64000;
+ nco = 8;
+ } else if (bitrate <= 72000) {
+ max_bitrate = 72000;
+ nco = 7;
+ } else {
+ max_bitrate = 96000;
+ nco = 5;
+ }
+ }
+ } else {
+ if (bitrate > 72000) {
+ *con |= 0x810; /* 96 MBit/s and gap */
+ max_bitrate = 96000;
+ }
+ *con |= 0x10; /* enable gap */
+ }
+ }
+ if (max_bitrate > 0) {
+ if (bitrate > max_bitrate)
+ bitrate = max_bitrate;
+ if (bitrate < 31000)
+ bitrate = 31000;
+ gap = ((max_bitrate - bitrate) * 94) / bitrate;
+ if (gap < 2)
+ *con &= ~0x10; /* Disable gap */
+ else
+ gap -= 2;
+ if (gap > 127)
+ gap = 127;
+ }
+
+ *con2 = (nco << 16) | gap;
+}
+
+static void ddb_output_start(struct ddb_output *output)
+{
+ struct ddb *dev = output->port->dev;
+ u32 con = 0x11c, con2 = 0;
+
+ if (output->dma) {
+ spin_lock_irq(&output->dma->lock);
+ output->dma->cbuf = 0;
+ output->dma->coff = 0;
+ output->dma->stat = 0;
+ ddbwritel(dev, 0, DMA_BUFFER_CONTROL(output->dma));
+ }
+
+ if (output->port->input[0]->port->class == DDB_PORT_LOOP)
+ con = (1UL << 13) | 0x14;
+ else
+ calc_con(output, &con, &con2, 0);
+
+ ddbwritel(dev, 0, TS_CONTROL(output));
+ ddbwritel(dev, 2, TS_CONTROL(output));
+ ddbwritel(dev, 0, TS_CONTROL(output));
+ ddbwritel(dev, con, TS_CONTROL(output));
+ ddbwritel(dev, con2, TS_CONTROL2(output));
+
+ if (output->dma) {
+ ddbwritel(dev, output->dma->bufval,
+ DMA_BUFFER_SIZE(output->dma));
+ ddbwritel(dev, 0, DMA_BUFFER_ACK(output->dma));
+ ddbwritel(dev, 1, DMA_BASE_READ);
+ ddbwritel(dev, 7, DMA_BUFFER_CONTROL(output->dma));
+ }
+
+ ddbwritel(dev, con | 1, TS_CONTROL(output));
+
+ if (output->dma) {
+ output->dma->running = 1;
+ spin_unlock_irq(&output->dma->lock);
+ }
+}
- spin_lock_irq(&input->lock);
- input->cbuf = 0;
- input->coff = 0;
+static void ddb_output_stop(struct ddb_output *output)
+{
+ struct ddb *dev = output->port->dev;
- /* reset */
- ddbwritel(0, TS_INPUT_CONTROL(input->nr));
- ddbwritel(2, TS_INPUT_CONTROL(input->nr));
- ddbwritel(0, TS_INPUT_CONTROL(input->nr));
+ if (output->dma)
+ spin_lock_irq(&output->dma->lock);
- ddbwritel((1 << 16) |
- (input->dma_buf_num << 11) |
- (input->dma_buf_size >> 7),
- DMA_BUFFER_SIZE(input->nr));
- ddbwritel(0, DMA_BUFFER_ACK(input->nr));
+ ddbwritel(dev, 0, TS_CONTROL(output));
- ddbwritel(1, DMA_BASE_WRITE);
- ddbwritel(3, DMA_BUFFER_CONTROL(input->nr));
- ddbwritel(9, TS_INPUT_CONTROL(input->nr));
- input->running = 1;
- spin_unlock_irq(&input->lock);
+ if (output->dma) {
+ ddbwritel(dev, 0, DMA_BUFFER_CONTROL(output->dma));
+ output->dma->running = 0;
+ spin_unlock_irq(&output->dma->lock);
+ }
}
static void ddb_input_stop(struct ddb_input *input)
{
struct ddb *dev = input->port->dev;
+ u32 tag = DDB_LINK_TAG(input->port->lnr);
+
+ if (input->dma)
+ spin_lock_irq(&input->dma->lock);
+ ddbwritel(dev, 0, tag | TS_CONTROL(input));
+ if (input->dma) {
+ ddbwritel(dev, 0, DMA_BUFFER_CONTROL(input->dma));
+ input->dma->running = 0;
+ spin_unlock_irq(&input->dma->lock);
+ }
+}
+
+static void ddb_input_start(struct ddb_input *input)
+{
+ struct ddb *dev = input->port->dev;
+
+ if (input->dma) {
+ spin_lock_irq(&input->dma->lock);
+ input->dma->cbuf = 0;
+ input->dma->coff = 0;
+ input->dma->stat = 0;
+ ddbwritel(dev, 0, DMA_BUFFER_CONTROL(input->dma));
+ }
+ ddbwritel(dev, 0, TS_CONTROL(input));
+ ddbwritel(dev, 2, TS_CONTROL(input));
+ ddbwritel(dev, 0, TS_CONTROL(input));
+
+ if (input->dma) {
+ ddbwritel(dev, input->dma->bufval,
+ DMA_BUFFER_SIZE(input->dma));
+ ddbwritel(dev, 0, DMA_BUFFER_ACK(input->dma));
+ ddbwritel(dev, 1, DMA_BASE_WRITE);
+ ddbwritel(dev, 3, DMA_BUFFER_CONTROL(input->dma));
+ }
+
+ ddbwritel(dev, 0x09, TS_CONTROL(input));
- spin_lock_irq(&input->lock);
- ddbwritel(0, TS_INPUT_CONTROL(input->nr));
- ddbwritel(0, DMA_BUFFER_CONTROL(input->nr));
- input->running = 0;
- spin_unlock_irq(&input->lock);
+ if (input->dma) {
+ input->dma->running = 1;
+ spin_unlock_irq(&input->dma->lock);
+ }
}
-static void ddb_output_start(struct ddb_output *output)
+
+static void ddb_input_start_all(struct ddb_input *input)
{
- struct ddb *dev = output->port->dev;
+ struct ddb_input *i = input;
+ struct ddb_output *o;
- spin_lock_irq(&output->lock);
- output->cbuf = 0;
- output->coff = 0;
- ddbwritel(0, TS_OUTPUT_CONTROL(output->nr));
- ddbwritel(2, TS_OUTPUT_CONTROL(output->nr));
- ddbwritel(0, TS_OUTPUT_CONTROL(output->nr));
- ddbwritel(0x3c, TS_OUTPUT_CONTROL(output->nr));
- ddbwritel((1 << 16) |
- (output->dma_buf_num << 11) |
- (output->dma_buf_size >> 7),
- DMA_BUFFER_SIZE(output->nr + 8));
- ddbwritel(0, DMA_BUFFER_ACK(output->nr + 8));
-
- ddbwritel(1, DMA_BASE_READ);
- ddbwritel(3, DMA_BUFFER_CONTROL(output->nr + 8));
- /* ddbwritel(0xbd, TS_OUTPUT_CONTROL(output->nr)); */
- ddbwritel(0x1d, TS_OUTPUT_CONTROL(output->nr));
- output->running = 1;
- spin_unlock_irq(&output->lock);
+ mutex_lock(&redirect_lock);
+ while (i && (o = i->redo)) {
+ ddb_output_start(o);
+ i = o->port->input[0];
+ if (i)
+ ddb_input_start(i);
+ }
+ ddb_input_start(input);
+ mutex_unlock(&redirect_lock);
}
-static void ddb_output_stop(struct ddb_output *output)
+static void ddb_input_stop_all(struct ddb_input *input)
{
- struct ddb *dev = output->port->dev;
+ struct ddb_input *i = input;
+ struct ddb_output *o;
- spin_lock_irq(&output->lock);
- ddbwritel(0, TS_OUTPUT_CONTROL(output->nr));
- ddbwritel(0, DMA_BUFFER_CONTROL(output->nr + 8));
- output->running = 0;
- spin_unlock_irq(&output->lock);
+ mutex_lock(&redirect_lock);
+ ddb_input_stop(input);
+ while (i && (o = i->redo)) {
+ ddb_output_stop(o);
+ i = o->port->input[0];
+ if (i)
+ ddb_input_stop(i);
+ }
+ mutex_unlock(&redirect_lock);
}
static u32 ddb_output_free(struct ddb_output *output)
{
- u32 idx, off, stat = output->stat;
+ u32 idx, off, stat = output->dma->stat;
s32 diff;
idx = (stat >> 11) & 0x1f;
off = (stat & 0x7ff) << 7;
- if (output->cbuf != idx) {
- if ((((output->cbuf + 1) % output->dma_buf_num) == idx) &&
- (output->dma_buf_size - output->coff <= 188))
+ if (output->dma->cbuf != idx) {
+ if ((((output->dma->cbuf + 1) % output->dma->num) == idx) &&
+ (output->dma->size - output->dma->coff <= 188))
return 0;
return 188;
}
- diff = off - output->coff;
+ diff = off - output->dma->coff;
if (diff <= 0 || diff > 188)
return 188;
return 0;
@@ -479,46 +563,51 @@ static ssize_t ddb_output_write(struct ddb_output *output,
const __user u8 *buf, size_t count)
{
struct ddb *dev = output->port->dev;
- u32 idx, off, stat = output->stat;
+ u32 idx, off, stat = output->dma->stat;
u32 left = count, len;
idx = (stat >> 11) & 0x1f;
off = (stat & 0x7ff) << 7;
while (left) {
- len = output->dma_buf_size - output->coff;
- if ((((output->cbuf + 1) % output->dma_buf_num) == idx) &&
+ len = output->dma->size - output->dma->coff;
+ if ((((output->dma->cbuf + 1) % output->dma->num) == idx) &&
(off == 0)) {
if (len <= 188)
break;
len -= 188;
}
- if (output->cbuf == idx) {
- if (off > output->coff) {
-#if 1
- len = off - output->coff;
+ if (output->dma->cbuf == idx) {
+ if (off > output->dma->coff) {
+ len = off - output->dma->coff;
len -= (len % 188);
if (len <= 188)
-
-#endif
break;
len -= 188;
}
}
if (len > left)
len = left;
- if (copy_from_user(output->vbuf[output->cbuf] + output->coff,
+ if (copy_from_user(output->dma->vbuf[output->dma->cbuf] +
+ output->dma->coff,
buf, len))
return -EIO;
+ if (alt_dma)
+ dma_sync_single_for_device(dev->dev,
+ output->dma->pbuf[output->dma->cbuf],
+ output->dma->size, DMA_TO_DEVICE);
left -= len;
buf += len;
- output->coff += len;
- if (output->coff == output->dma_buf_size) {
- output->coff = 0;
- output->cbuf = ((output->cbuf + 1) % output->dma_buf_num);
+ output->dma->coff += len;
+ if (output->dma->coff == output->dma->size) {
+ output->dma->coff = 0;
+ output->dma->cbuf = ((output->dma->cbuf + 1) %
+ output->dma->num);
}
- ddbwritel((output->cbuf << 11) | (output->coff >> 7),
- DMA_BUFFER_ACK(output->nr + 8));
+ ddbwritel(dev,
+ (output->dma->cbuf << 11) |
+ (output->dma->coff >> 7),
+ DMA_BUFFER_ACK(output->dma));
}
return count - left;
}
@@ -526,81 +615,229 @@ static ssize_t ddb_output_write(struct ddb_output *output,
static u32 ddb_input_avail(struct ddb_input *input)
{
struct ddb *dev = input->port->dev;
- u32 idx, off, stat = input->stat;
- u32 ctrl = ddbreadl(DMA_BUFFER_CONTROL(input->nr));
+ u32 idx, off, stat = input->dma->stat;
+ u32 ctrl = ddbreadl(dev, DMA_BUFFER_CONTROL(input->dma));
idx = (stat >> 11) & 0x1f;
off = (stat & 0x7ff) << 7;
if (ctrl & 4) {
- printk(KERN_ERR "IA %d %d %08x\n", idx, off, ctrl);
- ddbwritel(input->stat, DMA_BUFFER_ACK(input->nr));
+ dev_err(dev->dev, "IA %d %d %08x\n", idx, off, ctrl);
+ ddbwritel(dev, stat, DMA_BUFFER_ACK(input->dma));
return 0;
}
- if (input->cbuf != idx)
+ if (input->dma->cbuf != idx)
return 188;
return 0;
}
-static ssize_t ddb_input_read(struct ddb_input *input, __user u8 *buf, size_t count)
+static ssize_t ddb_input_read(struct ddb_input *input,
+ __user u8 *buf, size_t count)
{
struct ddb *dev = input->port->dev;
u32 left = count;
- u32 idx, free, stat = input->stat;
+ u32 idx, free, stat = input->dma->stat;
int ret;
idx = (stat >> 11) & 0x1f;
while (left) {
- if (input->cbuf == idx)
+ if (input->dma->cbuf == idx)
return count - left;
- free = input->dma_buf_size - input->coff;
+ free = input->dma->size - input->dma->coff;
if (free > left)
free = left;
- ret = copy_to_user(buf, input->vbuf[input->cbuf] +
- input->coff, free);
+ if (alt_dma)
+ dma_sync_single_for_cpu(dev->dev,
+ input->dma->pbuf[input->dma->cbuf],
+ input->dma->size, DMA_FROM_DEVICE);
+ ret = copy_to_user(buf, input->dma->vbuf[input->dma->cbuf] +
+ input->dma->coff, free);
if (ret)
return -EFAULT;
- input->coff += free;
- if (input->coff == input->dma_buf_size) {
- input->coff = 0;
- input->cbuf = (input->cbuf+1) % input->dma_buf_num;
+ input->dma->coff += free;
+ if (input->dma->coff == input->dma->size) {
+ input->dma->coff = 0;
+ input->dma->cbuf = (input->dma->cbuf + 1) %
+ input->dma->num;
}
left -= free;
- ddbwritel((input->cbuf << 11) | (input->coff >> 7),
- DMA_BUFFER_ACK(input->nr));
+ buf += free;
+ ddbwritel(dev,
+ (input->dma->cbuf << 11) | (input->dma->coff >> 7),
+ DMA_BUFFER_ACK(input->dma));
}
return count;
}
-/******************************************************************************/
-/******************************************************************************/
-/******************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
-#if 0
-static struct ddb_input *fe2input(struct ddb *dev, struct dvb_frontend *fe)
+static ssize_t ts_write(struct file *file, const __user char *buf,
+ size_t count, loff_t *ppos)
{
- int i;
+ struct dvb_device *dvbdev = file->private_data;
+ struct ddb_output *output = dvbdev->priv;
+ struct ddb *dev = output->port->dev;
+ size_t left = count;
+ int stat;
- for (i = 0; i < dev->info->port_num * 2; i++) {
- if (dev->input[i].fe == fe)
- return &dev->input[i];
+ if (!dev->has_dma)
+ return -EINVAL;
+ while (left) {
+ if (ddb_output_free(output) < 188) {
+ if (file->f_flags & O_NONBLOCK)
+ break;
+ if (wait_event_interruptible(
+ output->dma->wq,
+ ddb_output_free(output) >= 188) < 0)
+ break;
+ }
+ stat = ddb_output_write(output, buf, left);
+ if (stat < 0)
+ return stat;
+ buf += stat;
+ left -= stat;
}
- return NULL;
+ return (left == count) ? -EAGAIN : (count - left);
}
-#endif
-static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
+static ssize_t ts_read(struct file *file, __user char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct ddb_output *output = dvbdev->priv;
+ struct ddb_input *input = output->port->input[0];
+ struct ddb *dev = output->port->dev;
+ size_t left = count;
+ int stat;
+
+ if (!dev->has_dma)
+ return -EINVAL;
+ while (left) {
+ if (ddb_input_avail(input) < 188) {
+ if (file->f_flags & O_NONBLOCK)
+ break;
+ if (wait_event_interruptible(
+ input->dma->wq,
+ ddb_input_avail(input) >= 188) < 0)
+ break;
+ }
+ stat = ddb_input_read(input, buf, left);
+ if (stat < 0)
+ return stat;
+ left -= stat;
+ buf += stat;
+ }
+ return (count && (left == count)) ? -EAGAIN : (count - left);
+}
+
+static unsigned int ts_poll(struct file *file, poll_table *wait)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct ddb_output *output = dvbdev->priv;
+ struct ddb_input *input = output->port->input[0];
+
+ unsigned int mask = 0;
+
+ poll_wait(file, &input->dma->wq, wait);
+ poll_wait(file, &output->dma->wq, wait);
+ if (ddb_input_avail(input) >= 188)
+ mask |= POLLIN | POLLRDNORM;
+ if (ddb_output_free(output) >= 188)
+ mask |= POLLOUT | POLLWRNORM;
+ return mask;
+}
+
+static int ts_release(struct inode *inode, struct file *file)
+{
+ struct dvb_device *dvbdev = file->private_data;
+ struct ddb_output *output = NULL;
+ struct ddb_input *input = NULL;
+
+ if (dvbdev) {
+ output = dvbdev->priv;
+ input = output->port->input[0];
+ }
+
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
+ if (!input)
+ return -EINVAL;
+ ddb_input_stop(input);
+ } else if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
+ if (!output)
+ return -EINVAL;
+ ddb_output_stop(output);
+ }
+ return dvb_generic_release(inode, file);
+}
+
+static int ts_open(struct inode *inode, struct file *file)
+{
+ int err;
+ struct dvb_device *dvbdev = file->private_data;
+ struct ddb_output *output = NULL;
+ struct ddb_input *input = NULL;
+
+ if (dvbdev) {
+ output = dvbdev->priv;
+ input = output->port->input[0];
+ }
+
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
+ if (!input)
+ return -EINVAL;
+ if (input->redo || input->redi)
+ return -EBUSY;
+ } else if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
+ if (!output)
+ return -EINVAL;
+ } else
+ return -EINVAL;
+ err = dvb_generic_open(inode, file);
+ if (err < 0)
+ return err;
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ ddb_input_start(input);
+ else if ((file->f_flags & O_ACCMODE) == O_WRONLY)
+ ddb_output_start(output);
+ return err;
+}
+
+static const struct file_operations ci_fops = {
+ .owner = THIS_MODULE,
+ .read = ts_read,
+ .write = ts_write,
+ .open = ts_open,
+ .release = ts_release,
+ .poll = ts_poll,
+ .mmap = NULL,
+};
+
+static struct dvb_device dvbdev_ci = {
+ .priv = NULL,
+ .readers = 1,
+ .writers = 1,
+ .users = 2,
+ .fops = &ci_fops,
+};
+
+
+/****************************************************************************/
+/****************************************************************************/
+
+static int locked_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct ddb_input *input = fe->sec_priv;
struct ddb_port *port = input->port;
+ struct ddb_dvb *dvb = &port->dvb[input->nr & 1];
int status;
if (enable) {
mutex_lock(&port->i2c_gate_lock);
- status = input->gate_ctrl(fe, 1);
+ status = dvb->i2c_gate_ctrl(fe, 1);
} else {
- status = input->gate_ctrl(fe, 0);
+ status = dvb->i2c_gate_ctrl(fe, 0);
mutex_unlock(&port->i2c_gate_lock);
}
return status;
@@ -609,39 +846,42 @@ static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
static int demod_attach_drxk(struct ddb_input *input)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
+ struct device *dev = input->port->dev->dev;
struct dvb_frontend *fe;
struct drxk_config config;
memset(&config, 0, sizeof(config));
- config.microcode_name = "drxk_a3.mc";
- config.qam_demod_parameter_count = 4;
config.adr = 0x29 + (input->nr & 1);
+ config.microcode_name = "drxk_a3.mc";
- fe = input->fe = dvb_attach(drxk_attach, &config, i2c);
- if (!input->fe) {
- printk(KERN_ERR "No DRXK found!\n");
+ fe = dvb->fe = dvb_attach(drxk_attach, &config, i2c);
+ if (!fe) {
+ dev_err(dev, "No DRXK found!\n");
return -ENODEV;
}
fe->sec_priv = input;
- input->gate_ctrl = fe->ops.i2c_gate_ctrl;
- fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
+ dvb->i2c_gate_ctrl = fe->ops.i2c_gate_ctrl;
+ fe->ops.i2c_gate_ctrl = locked_gate_ctrl;
return 0;
}
static int tuner_attach_tda18271(struct ddb_input *input)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
+ struct device *dev = input->port->dev->dev;
struct dvb_frontend *fe;
- if (input->fe->ops.i2c_gate_ctrl)
- input->fe->ops.i2c_gate_ctrl(input->fe, 1);
- fe = dvb_attach(tda18271c2dd_attach, input->fe, i2c, 0x60);
+ if (dvb->fe->ops.i2c_gate_ctrl)
+ dvb->fe->ops.i2c_gate_ctrl(dvb->fe, 1);
+ fe = dvb_attach(tda18271c2dd_attach, dvb->fe, i2c, 0x60);
+ if (dvb->fe->ops.i2c_gate_ctrl)
+ dvb->fe->ops.i2c_gate_ctrl(dvb->fe, 0);
if (!fe) {
- printk(KERN_ERR "No TDA18271 found!\n");
+ dev_err(dev, "No TDA18271 found!\n");
return -ENODEV;
}
- if (input->fe->ops.i2c_gate_ctrl)
- input->fe->ops.i2c_gate_ctrl(input->fe, 0);
return 0;
}
@@ -670,40 +910,43 @@ static struct stv0367_config ddb_stv0367_config[] = {
static int demod_attach_stv0367(struct ddb_input *input)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
+ struct device *dev = input->port->dev->dev;
+ struct dvb_frontend *fe;
/* attach frontend */
- input->fe = dvb_attach(stv0367ddb_attach,
+ fe = dvb->fe = dvb_attach(stv0367ddb_attach,
&ddb_stv0367_config[(input->nr & 1)], i2c);
- if (!input->fe) {
- printk(KERN_ERR "stv0367ddb_attach failed (not found?)\n");
+ if (!dvb->fe) {
+ dev_err(dev, "No stv0367 found!\n");
return -ENODEV;
}
-
- input->fe->sec_priv = input;
- input->gate_ctrl = input->fe->ops.i2c_gate_ctrl;
- input->fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
-
+ fe->sec_priv = input;
+ dvb->i2c_gate_ctrl = fe->ops.i2c_gate_ctrl;
+ fe->ops.i2c_gate_ctrl = locked_gate_ctrl;
return 0;
}
static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr)
{
struct i2c_adapter *adapter = &input->port->i2c->adap;
+ struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
+ struct device *dev = input->port->dev->dev;
u8 tda_id[2];
u8 subaddr = 0x00;
- printk(KERN_DEBUG "stv0367-tda18212 tuner ping\n");
- if (input->fe->ops.i2c_gate_ctrl)
- input->fe->ops.i2c_gate_ctrl(input->fe, 1);
+ dev_dbg(dev, "stv0367-tda18212 tuner ping\n");
+ if (dvb->fe->ops.i2c_gate_ctrl)
+ dvb->fe->ops.i2c_gate_ctrl(dvb->fe, 1);
if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0)
- printk(KERN_DEBUG "tda18212 ping 1 fail\n");
+ dev_dbg(dev, "tda18212 ping 1 fail\n");
if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0)
- printk(KERN_DEBUG "tda18212 ping 2 fail\n");
+ dev_warn(dev, "tda18212 ping failed, expect problems\n");
- if (input->fe->ops.i2c_gate_ctrl)
- input->fe->ops.i2c_gate_ctrl(input->fe, 0);
+ if (dvb->fe->ops.i2c_gate_ctrl)
+ dvb->fe->ops.i2c_gate_ctrl(dvb->fe, 0);
return 0;
}
@@ -711,6 +954,9 @@ static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr)
static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
+ struct device *dev = input->port->dev->dev;
+ struct dvb_frontend *fe;
struct cxd2841er_config cfg;
/* the cxd2841er driver expects 8bit/shifted I2C addresses */
@@ -725,26 +971,26 @@ static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24)
cfg.flags |= CXD2841ER_TS_SERIAL;
/* attach frontend */
- input->fe = dvb_attach(cxd2841er_attach_t_c, &cfg, i2c);
+ fe = dvb->fe = dvb_attach(cxd2841er_attach_t_c, &cfg, i2c);
- if (!input->fe) {
- printk(KERN_ERR "No Sony CXD28xx found!\n");
+ if (!dvb->fe) {
+ dev_err(dev, "No cxd2837/38/43/54 found!\n");
return -ENODEV;
}
-
- input->fe->sec_priv = input;
- input->gate_ctrl = input->fe->ops.i2c_gate_ctrl;
- input->fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
-
+ fe->sec_priv = input;
+ dvb->i2c_gate_ctrl = fe->ops.i2c_gate_ctrl;
+ fe->ops.i2c_gate_ctrl = locked_gate_ctrl;
return 0;
}
static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype)
{
struct i2c_adapter *adapter = &input->port->i2c->adap;
+ struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
+ struct device *dev = input->port->dev->dev;
struct i2c_client *client;
struct tda18212_config config = {
- .fe = input->fe,
+ .fe = dvb->fe,
.if_dvbt_6 = 3550,
.if_dvbt_7 = 3700,
.if_dvbt_8 = 4150,
@@ -782,17 +1028,17 @@ static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype)
goto err;
}
- input->i2c_client[0] = client;
+ dvb->i2c_client[0] = client;
return 0;
err:
- printk(KERN_INFO "TDA18212 tuner not found. Device is not fully operational.\n");
+ dev_notice(dev, "TDA18212 tuner not found. Device is not fully operational.\n");
return -ENODEV;
}
-/******************************************************************************/
-/******************************************************************************/
-/******************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
static struct stv090x_config stv0900 = {
.device = STV0900,
@@ -805,6 +1051,9 @@ static struct stv090x_config stv0900 = {
.ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
.ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+ .ts1_tei = 1,
+ .ts2_tei = 1,
+
.repeater_level = STV090x_RPTLEVEL_16,
.adc1_range = STV090x_ADC_1Vpp,
@@ -824,6 +1073,9 @@ static struct stv090x_config stv0900_aa = {
.ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
.ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
+ .ts1_tei = 1,
+ .ts2_tei = 1,
+
.repeater_level = STV090x_RPTLEVEL_16,
.adc1_range = STV090x_ADC_1Vpp,
@@ -848,18 +1100,20 @@ static int demod_attach_stv0900(struct ddb_input *input, int type)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
-
- input->fe = dvb_attach(stv090x_attach, feconf, i2c,
- (input->nr & 1) ? STV090x_DEMODULATOR_1
- : STV090x_DEMODULATOR_0);
- if (!input->fe) {
- printk(KERN_ERR "No STV0900 found!\n");
+ struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
+ struct device *dev = input->port->dev->dev;
+
+ dvb->fe = dvb_attach(stv090x_attach, feconf, i2c,
+ (input->nr & 1) ? STV090x_DEMODULATOR_1
+ : STV090x_DEMODULATOR_0);
+ if (!dvb->fe) {
+ dev_err(dev, "No STV0900 found!\n");
return -ENODEV;
}
- if (!dvb_attach(lnbh24_attach, input->fe, i2c, 0,
+ if (!dvb_attach(lnbh24_attach, dvb->fe, i2c, 0,
0, (input->nr & 1) ?
(0x09 - type) : (0x0b - type))) {
- printk(KERN_ERR "No LNBH24 found!\n");
+ dev_err(dev, "No LNBH24 found!\n");
return -ENODEV;
}
return 0;
@@ -868,18 +1122,20 @@ static int demod_attach_stv0900(struct ddb_input *input, int type)
static int tuner_attach_stv6110(struct ddb_input *input, int type)
{
struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
+ struct device *dev = input->port->dev->dev;
struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
struct stv6110x_config *tunerconf = (input->nr & 1) ?
&stv6110b : &stv6110a;
const struct stv6110x_devctl *ctl;
- ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c);
+ ctl = dvb_attach(stv6110x_attach, dvb->fe, tunerconf, i2c);
if (!ctl) {
- printk(KERN_ERR "No STV6110X found!\n");
+ dev_err(dev, "No STV6110X found!\n");
return -ENODEV;
}
- printk(KERN_INFO "attach tuner input %d adr %02x\n",
- input->nr, tunerconf->addr);
+ dev_info(dev, "attach tuner input %d adr %02x\n",
+ input->nr, tunerconf->addr);
feconf->tuner_init = ctl->tuner_init;
feconf->tuner_sleep = ctl->tuner_sleep;
@@ -896,416 +1152,1096 @@ static int tuner_attach_stv6110(struct ddb_input *input, int type)
return 0;
}
-static int my_dvb_dmx_ts_card_init(struct dvb_demux *dvbdemux, char *id,
- int (*start_feed)(struct dvb_demux_feed *),
- int (*stop_feed)(struct dvb_demux_feed *),
- void *priv)
+static const struct stv0910_cfg stv0910_p = {
+ .adr = 0x68,
+ .parallel = 1,
+ .rptlvl = 4,
+ .clk = 30000000,
+};
+
+static const struct lnbh25_config lnbh25_cfg = {
+ .i2c_address = 0x0c << 1,
+ .data2_config = LNBH25_TEN
+};
+
+static int demod_attach_stv0910(struct ddb_input *input, int type)
{
- dvbdemux->priv = priv;
+ struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
+ struct device *dev = input->port->dev->dev;
+ struct stv0910_cfg cfg = stv0910_p;
+ struct lnbh25_config lnbcfg = lnbh25_cfg;
+
+ if (stv0910_single)
+ cfg.single = 1;
+
+ if (type)
+ cfg.parallel = 2;
+ dvb->fe = dvb_attach(stv0910_attach, i2c, &cfg, (input->nr & 1));
+ if (!dvb->fe) {
+ cfg.adr = 0x6c;
+ dvb->fe = dvb_attach(stv0910_attach, i2c,
+ &cfg, (input->nr & 1));
+ }
+ if (!dvb->fe) {
+ dev_err(dev, "No STV0910 found!\n");
+ return -ENODEV;
+ }
- dvbdemux->filternum = 256;
- dvbdemux->feednum = 256;
- dvbdemux->start_feed = start_feed;
- dvbdemux->stop_feed = stop_feed;
- dvbdemux->write_to_decoder = NULL;
- dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
- DMX_SECTION_FILTERING |
- DMX_MEMORY_BASED_FILTERING);
- return dvb_dmx_init(dvbdemux);
+ /* attach lnbh25 - leftshift by one as the lnbh25 driver expects 8bit
+ * i2c addresses
+ */
+ lnbcfg.i2c_address = (((input->nr & 1) ? 0x0d : 0x0c) << 1);
+ if (!dvb_attach(lnbh25_attach, dvb->fe, &lnbcfg, i2c)) {
+ lnbcfg.i2c_address = (((input->nr & 1) ? 0x09 : 0x08) << 1);
+ if (!dvb_attach(lnbh25_attach, dvb->fe, &lnbcfg, i2c)) {
+ dev_err(dev, "No LNBH25 found!\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
}
-static int my_dvb_dmxdev_ts_card_init(struct dmxdev *dmxdev,
- struct dvb_demux *dvbdemux,
- struct dmx_frontend *hw_frontend,
- struct dmx_frontend *mem_frontend,
- struct dvb_adapter *dvb_adapter)
+static int tuner_attach_stv6111(struct ddb_input *input, int type)
{
- int ret;
-
- dmxdev->filternum = 256;
- dmxdev->demux = &dvbdemux->dmx;
- dmxdev->capabilities = 0;
- ret = dvb_dmxdev_init(dmxdev, dvb_adapter);
- if (ret < 0)
- return ret;
+ struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
+ struct device *dev = input->port->dev->dev;
+ struct dvb_frontend *fe;
+ u8 adr = (type ? 0 : 4) + ((input->nr & 1) ? 0x63 : 0x60);
- hw_frontend->source = DMX_FRONTEND_0;
- dvbdemux->dmx.add_frontend(&dvbdemux->dmx, hw_frontend);
- mem_frontend->source = DMX_MEMORY_FE;
- dvbdemux->dmx.add_frontend(&dvbdemux->dmx, mem_frontend);
- return dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, hw_frontend);
+ fe = dvb_attach(stv6111_attach, dvb->fe, i2c, adr);
+ if (!fe) {
+ fe = dvb_attach(stv6111_attach, dvb->fe, i2c, adr & ~4);
+ if (!fe) {
+ dev_err(dev, "No STV6111 found at 0x%02x!\n", adr);
+ return -ENODEV;
+ }
+ }
+ return 0;
}
static int start_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
struct ddb_input *input = dvbdmx->priv;
+ struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
- if (!input->users)
- ddb_input_start(input);
+ if (!dvb->users)
+ ddb_input_start_all(input);
- return ++input->users;
+ return ++dvb->users;
}
static int stop_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
struct ddb_input *input = dvbdmx->priv;
+ struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
- if (--input->users)
- return input->users;
+ if (--dvb->users)
+ return dvb->users;
- ddb_input_stop(input);
+ ddb_input_stop_all(input);
return 0;
}
-
static void dvb_input_detach(struct ddb_input *input)
{
- struct dvb_adapter *adap = &input->adap;
- struct dvb_demux *dvbdemux = &input->demux;
+ struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
+ struct dvb_demux *dvbdemux = &dvb->demux;
struct i2c_client *client;
- switch (input->attached) {
- case 5:
- client = input->i2c_client[0];
+ switch (dvb->attached) {
+ case 0x31:
+ if (dvb->fe2)
+ dvb_unregister_frontend(dvb->fe2);
+ if (dvb->fe)
+ dvb_unregister_frontend(dvb->fe);
+ /* fallthrough */
+ case 0x30:
+ if (dvb->fe2)
+ dvb_frontend_detach(dvb->fe2);
+ if (dvb->fe)
+ dvb_frontend_detach(dvb->fe);
+ dvb->fe = dvb->fe2 = NULL;
+ /* fallthrough */
+ case 0x20:
+ client = dvb->i2c_client[0];
if (client) {
module_put(client->dev.driver->owner);
i2c_unregister_device(client);
}
- if (input->fe2) {
- dvb_unregister_frontend(input->fe2);
- input->fe2 = NULL;
- }
- if (input->fe) {
- dvb_unregister_frontend(input->fe);
- dvb_frontend_detach(input->fe);
- input->fe = NULL;
- }
- /* fall-through */
- case 4:
- dvb_net_release(&input->dvbnet);
- /* fall-through */
- case 3:
- dvbdemux->dmx.close(&dvbdemux->dmx);
+
+ dvb_net_release(&dvb->dvbnet);
+ /* fallthrough */
+ case 0x12:
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
- &input->hw_frontend);
+ &dvb->hw_frontend);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
- &input->mem_frontend);
- dvb_dmxdev_release(&input->dmxdev);
- /* fall-through */
- case 2:
- dvb_dmx_release(&input->demux);
- /* fall-through */
- case 1:
- dvb_unregister_adapter(adap);
+ &dvb->mem_frontend);
+ /* fallthrough */
+ case 0x11:
+ dvb_dmxdev_release(&dvb->dmxdev);
+ /* fallthrough */
+ case 0x10:
+ dvb_dmx_release(&dvb->demux);
+ /* fallthrough */
+ case 0x01:
+ break;
+ }
+ dvb->attached = 0x00;
+}
+
+static int dvb_register_adapters(struct ddb *dev)
+{
+ int i, ret = 0;
+ struct ddb_port *port;
+ struct dvb_adapter *adap;
+
+ if (adapter_alloc == 3) {
+ port = &dev->port[0];
+ adap = port->dvb[0].adap;
+ ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE,
+ port->dev->dev,
+ adapter_nr);
+ if (ret < 0)
+ return ret;
+ port->dvb[0].adap_registered = 1;
+ for (i = 0; i < dev->port_num; i++) {
+ port = &dev->port[i];
+ port->dvb[0].adap = adap;
+ port->dvb[1].adap = adap;
+ }
+ return 0;
+ }
+
+ for (i = 0; i < dev->port_num; i++) {
+ port = &dev->port[i];
+ switch (port->class) {
+ case DDB_PORT_TUNER:
+ adap = port->dvb[0].adap;
+ ret = dvb_register_adapter(adap, "DDBridge",
+ THIS_MODULE,
+ port->dev->dev,
+ adapter_nr);
+ if (ret < 0)
+ return ret;
+ port->dvb[0].adap_registered = 1;
+
+ if (adapter_alloc > 0) {
+ port->dvb[1].adap = port->dvb[0].adap;
+ break;
+ }
+ adap = port->dvb[1].adap;
+ ret = dvb_register_adapter(adap, "DDBridge",
+ THIS_MODULE,
+ port->dev->dev,
+ adapter_nr);
+ if (ret < 0)
+ return ret;
+ port->dvb[1].adap_registered = 1;
+ break;
+
+ case DDB_PORT_CI:
+ case DDB_PORT_LOOP:
+ adap = port->dvb[0].adap;
+ ret = dvb_register_adapter(adap, "DDBridge",
+ THIS_MODULE,
+ port->dev->dev,
+ adapter_nr);
+ if (ret < 0)
+ return ret;
+ port->dvb[0].adap_registered = 1;
+ break;
+ default:
+ if (adapter_alloc < 2)
+ break;
+ adap = port->dvb[0].adap;
+ ret = dvb_register_adapter(adap, "DDBridge",
+ THIS_MODULE,
+ port->dev->dev,
+ adapter_nr);
+ if (ret < 0)
+ return ret;
+ port->dvb[0].adap_registered = 1;
+ break;
+ }
+ }
+ return ret;
+}
+
+static void dvb_unregister_adapters(struct ddb *dev)
+{
+ int i;
+ struct ddb_port *port;
+ struct ddb_dvb *dvb;
+
+ for (i = 0; i < dev->link[0].info->port_num; i++) {
+ port = &dev->port[i];
+
+ dvb = &port->dvb[0];
+ if (dvb->adap_registered)
+ dvb_unregister_adapter(dvb->adap);
+ dvb->adap_registered = 0;
+
+ dvb = &port->dvb[1];
+ if (dvb->adap_registered)
+ dvb_unregister_adapter(dvb->adap);
+ dvb->adap_registered = 0;
}
- input->attached = 0;
}
static int dvb_input_attach(struct ddb_input *input)
{
- int ret;
+ int ret = 0;
+ struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
struct ddb_port *port = input->port;
- struct dvb_adapter *adap = &input->adap;
- struct dvb_demux *dvbdemux = &input->demux;
- int sony_osc24 = 0, sony_tspar = 0;
-
- ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE,
- &input->port->dev->pdev->dev,
- adapter_nr);
- if (ret < 0) {
- printk(KERN_ERR "ddbridge: Could not register adapter.Check if you enabled enough adapters in dvb-core!\n");
+ struct dvb_adapter *adap = dvb->adap;
+ struct dvb_demux *dvbdemux = &dvb->demux;
+ int par = 0, osc24 = 0;
+
+ dvb->attached = 0x01;
+
+ dvbdemux->priv = input;
+ dvbdemux->dmx.capabilities = DMX_TS_FILTERING |
+ DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING;
+ dvbdemux->start_feed = start_feed;
+ dvbdemux->stop_feed = stop_feed;
+ dvbdemux->filternum = dvbdemux->feednum = 256;
+ ret = dvb_dmx_init(dvbdemux);
+ if (ret < 0)
return ret;
- }
- input->attached = 1;
+ dvb->attached = 0x10;
- ret = my_dvb_dmx_ts_card_init(dvbdemux, "SW demux",
- start_feed,
- stop_feed, input);
+ dvb->dmxdev.filternum = 256;
+ dvb->dmxdev.demux = &dvbdemux->dmx;
+ ret = dvb_dmxdev_init(&dvb->dmxdev, adap);
if (ret < 0)
return ret;
- input->attached = 2;
+ dvb->attached = 0x11;
- ret = my_dvb_dmxdev_ts_card_init(&input->dmxdev, &input->demux,
- &input->hw_frontend,
- &input->mem_frontend, adap);
+ dvb->mem_frontend.source = DMX_MEMORY_FE;
+ dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->mem_frontend);
+ dvb->hw_frontend.source = DMX_FRONTEND_0;
+ dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->hw_frontend);
+ ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &dvb->hw_frontend);
if (ret < 0)
return ret;
- input->attached = 3;
+ dvb->attached = 0x12;
- ret = dvb_net_init(adap, &input->dvbnet, input->dmxdev.demux);
+ ret = dvb_net_init(adap, &dvb->dvbnet, dvb->dmxdev.demux);
if (ret < 0)
return ret;
- input->attached = 4;
+ dvb->attached = 0x20;
- input->fe = NULL;
+ dvb->fe = dvb->fe2 = NULL;
switch (port->type) {
+ case DDB_TUNER_MXL5XX:
+ if (fe_attach_mxl5xx(input) < 0)
+ return -ENODEV;
+ break;
case DDB_TUNER_DVBS_ST:
if (demod_attach_stv0900(input, 0) < 0)
return -ENODEV;
if (tuner_attach_stv6110(input, 0) < 0)
return -ENODEV;
- if (input->fe) {
- if (dvb_register_frontend(adap, input->fe) < 0)
- return -ENODEV;
- }
break;
case DDB_TUNER_DVBS_ST_AA:
if (demod_attach_stv0900(input, 1) < 0)
return -ENODEV;
if (tuner_attach_stv6110(input, 1) < 0)
return -ENODEV;
- if (input->fe) {
- if (dvb_register_frontend(adap, input->fe) < 0)
- return -ENODEV;
- }
+ break;
+ case DDB_TUNER_DVBS_STV0910:
+ if (demod_attach_stv0910(input, 0) < 0)
+ return -ENODEV;
+ if (tuner_attach_stv6111(input, 0) < 0)
+ return -ENODEV;
+ break;
+ case DDB_TUNER_DVBS_STV0910_PR:
+ if (demod_attach_stv0910(input, 1) < 0)
+ return -ENODEV;
+ if (tuner_attach_stv6111(input, 1) < 0)
+ return -ENODEV;
+ break;
+ case DDB_TUNER_DVBS_STV0910_P:
+ if (demod_attach_stv0910(input, 0) < 0)
+ return -ENODEV;
+ if (tuner_attach_stv6111(input, 1) < 0)
+ return -ENODEV;
break;
case DDB_TUNER_DVBCT_TR:
if (demod_attach_drxk(input) < 0)
return -ENODEV;
if (tuner_attach_tda18271(input) < 0)
return -ENODEV;
- if (dvb_register_frontend(adap, input->fe) < 0)
- return -ENODEV;
- if (input->fe2) {
- if (dvb_register_frontend(adap, input->fe2) < 0)
- return -ENODEV;
- input->fe2->tuner_priv = input->fe->tuner_priv;
- memcpy(&input->fe2->ops.tuner_ops,
- &input->fe->ops.tuner_ops,
- sizeof(struct dvb_tuner_ops));
- }
break;
case DDB_TUNER_DVBCT_ST:
if (demod_attach_stv0367(input) < 0)
return -ENODEV;
- if (tuner_attach_tda18212(input, port->type) < 0)
+ if (tuner_attach_tda18212(input, port->type) < 0) {
+ if (dvb->fe2)
+ dvb_frontend_detach(dvb->fe2);
+ if (dvb->fe)
+ dvb_frontend_detach(dvb->fe);
return -ENODEV;
- if (input->fe) {
- if (dvb_register_frontend(adap, input->fe) < 0)
- return -ENODEV;
}
break;
case DDB_TUNER_DVBC2T2I_SONY_P:
+ if (input->port->dev->link[input->port->lnr].info->ts_quirks &
+ TS_QUIRK_ALT_OSC)
+ osc24 = 0;
+ else
+ osc24 = 1;
+ /* fall-through */
case DDB_TUNER_DVBCT2_SONY_P:
case DDB_TUNER_DVBC2T2_SONY_P:
case DDB_TUNER_ISDBT_SONY_P:
- if (port->type == DDB_TUNER_DVBC2T2I_SONY_P)
- sony_osc24 = 1;
- if (input->port->dev->info->ts_quirks & TS_QUIRK_ALT_OSC)
- sony_osc24 = 0;
- if (input->port->dev->info->ts_quirks & TS_QUIRK_SERIAL)
- sony_tspar = 0;
+ if (input->port->dev->link[input->port->lnr].info->ts_quirks
+ & TS_QUIRK_SERIAL)
+ par = 0;
else
- sony_tspar = 1;
-
- if (demod_attach_cxd28xx(input, sony_tspar, sony_osc24) < 0)
+ par = 1;
+ if (demod_attach_cxd28xx(input, par, osc24) < 0)
return -ENODEV;
- if (tuner_attach_tda18212(input, port->type) < 0)
+ if (tuner_attach_tda18212(input, port->type) < 0) {
+ if (dvb->fe2)
+ dvb_frontend_detach(dvb->fe2);
+ if (dvb->fe)
+ dvb_frontend_detach(dvb->fe);
return -ENODEV;
- if (input->fe) {
- if (dvb_register_frontend(adap, input->fe) < 0)
- return -ENODEV;
}
break;
- case DDB_TUNER_XO2_DVBC2T2I_SONY:
- case DDB_TUNER_XO2_DVBCT2_SONY:
- case DDB_TUNER_XO2_DVBC2T2_SONY:
- case DDB_TUNER_XO2_ISDBT_SONY:
- if (port->type == DDB_TUNER_XO2_DVBC2T2I_SONY)
- sony_osc24 = 1;
-
- if (demod_attach_cxd28xx(input, 0, sony_osc24) < 0)
+ case DDB_TUNER_DVBC2T2I_SONY:
+ osc24 = 1;
+ /* fall-through */
+ case DDB_TUNER_DVBCT2_SONY:
+ case DDB_TUNER_DVBC2T2_SONY:
+ case DDB_TUNER_ISDBT_SONY:
+ if (demod_attach_cxd28xx(input, 0, osc24) < 0)
return -ENODEV;
- if (tuner_attach_tda18212(input, port->type) < 0)
+ if (tuner_attach_tda18212(input, port->type) < 0) {
+ if (dvb->fe2)
+ dvb_frontend_detach(dvb->fe2);
+ if (dvb->fe)
+ dvb_frontend_detach(dvb->fe);
return -ENODEV;
- if (input->fe) {
- if (dvb_register_frontend(adap, input->fe) < 0)
- return -ENODEV;
}
break;
+ default:
+ return 0;
}
+ dvb->attached = 0x30;
- input->attached = 5;
+ if (dvb->fe) {
+ if (dvb_register_frontend(adap, dvb->fe) < 0)
+ return -ENODEV;
+
+ if (dvb->fe2) {
+ if (dvb_register_frontend(adap, dvb->fe2) < 0)
+ return -ENODEV;
+ dvb->fe2->tuner_priv = dvb->fe->tuner_priv;
+ memcpy(&dvb->fe2->ops.tuner_ops,
+ &dvb->fe->ops.tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+ }
+ }
+
+ dvb->attached = 0x31;
return 0;
}
-/****************************************************************************/
-/****************************************************************************/
+static int port_has_encti(struct ddb_port *port)
+{
+ struct device *dev = port->dev->dev;
+ u8 val;
+ int ret = i2c_read_reg(&port->i2c->adap, 0x20, 0, &val);
-static ssize_t ts_write(struct file *file, const __user char *buf,
- size_t count, loff_t *ppos)
+ if (!ret)
+ dev_info(dev, "[0x20]=0x%02x\n", val);
+ return ret ? 0 : 1;
+}
+
+static int port_has_cxd(struct ddb_port *port, u8 *type)
{
- struct dvb_device *dvbdev = file->private_data;
- struct ddb_output *output = dvbdev->priv;
- size_t left = count;
- int stat;
+ u8 val;
+ u8 probe[4] = { 0xe0, 0x00, 0x00, 0x00 }, data[4];
+ struct i2c_msg msgs[2] = {{ .addr = 0x40, .flags = 0,
+ .buf = probe, .len = 4 },
+ { .addr = 0x40, .flags = I2C_M_RD,
+ .buf = data, .len = 4 } };
+ val = i2c_transfer(&port->i2c->adap, msgs, 2);
+ if (val != 2)
+ return 0;
- while (left) {
- if (ddb_output_free(output) < 188) {
- if (file->f_flags & O_NONBLOCK)
- break;
- if (wait_event_interruptible(
- output->wq, ddb_output_free(output) >= 188) < 0)
- break;
- }
- stat = ddb_output_write(output, buf, left);
- if (stat < 0)
- break;
- buf += stat;
- left -= stat;
+ if (data[0] == 0x02 && data[1] == 0x2b && data[3] == 0x43)
+ *type = 2;
+ else
+ *type = 1;
+ return 1;
+}
+
+static int port_has_xo2(struct ddb_port *port, u8 *type, u8 *id)
+{
+ u8 probe[1] = { 0x00 }, data[4];
+
+ if (i2c_io(&port->i2c->adap, 0x10, probe, 1, data, 4))
+ return 0;
+ if (data[0] == 'D' && data[1] == 'F') {
+ *id = data[2];
+ *type = 1;
+ return 1;
}
- return (left == count) ? -EAGAIN : (count - left);
+ if (data[0] == 'C' && data[1] == 'I') {
+ *id = data[2];
+ *type = 2;
+ return 1;
+ }
+ return 0;
}
-static ssize_t ts_read(struct file *file, __user char *buf,
- size_t count, loff_t *ppos)
+static int port_has_stv0900(struct ddb_port *port)
{
- struct dvb_device *dvbdev = file->private_data;
- struct ddb_output *output = dvbdev->priv;
- struct ddb_input *input = output->port->input[0];
- int left, read;
+ u8 val;
- count -= count % 188;
- left = count;
- while (left) {
- if (ddb_input_avail(input) < 188) {
- if (file->f_flags & O_NONBLOCK)
- break;
- if (wait_event_interruptible(
- input->wq, ddb_input_avail(input) >= 188) < 0)
- break;
- }
- read = ddb_input_read(input, buf, left);
- if (read < 0)
- return read;
- left -= read;
- buf += read;
+ if (i2c_read_reg16(&port->i2c->adap, 0x69, 0xf100, &val) < 0)
+ return 0;
+ return 1;
+}
+
+static int port_has_stv0900_aa(struct ddb_port *port, u8 *id)
+{
+ if (i2c_read_reg16(&port->i2c->adap, 0x68, 0xf100, id) < 0)
+ return 0;
+ return 1;
+}
+
+static int port_has_drxks(struct ddb_port *port)
+{
+ u8 val;
+
+ if (i2c_read(&port->i2c->adap, 0x29, &val) < 0)
+ return 0;
+ if (i2c_read(&port->i2c->adap, 0x2a, &val) < 0)
+ return 0;
+ return 1;
+}
+
+static int port_has_stv0367(struct ddb_port *port)
+{
+ u8 val;
+
+ if (i2c_read_reg16(&port->i2c->adap, 0x1e, 0xf000, &val) < 0)
+ return 0;
+ if (val != 0x60)
+ return 0;
+ if (i2c_read_reg16(&port->i2c->adap, 0x1f, 0xf000, &val) < 0)
+ return 0;
+ if (val != 0x60)
+ return 0;
+ return 1;
+}
+
+static int init_xo2(struct ddb_port *port)
+{
+ struct i2c_adapter *i2c = &port->i2c->adap;
+ struct ddb *dev = port->dev;
+ u8 val, data[2];
+ int res;
+
+ res = i2c_read_regs(i2c, 0x10, 0x04, data, 2);
+ if (res < 0)
+ return res;
+
+ if (data[0] != 0x01) {
+ dev_info(dev->dev, "Port %d: invalid XO2\n", port->nr);
+ return -1;
}
- return (left == count) ? -EAGAIN : (count - left);
+
+ i2c_read_reg(i2c, 0x10, 0x08, &val);
+ if (val != 0) {
+ i2c_write_reg(i2c, 0x10, 0x08, 0x00);
+ msleep(100);
+ }
+ /* Enable tuner power, disable pll, reset demods */
+ i2c_write_reg(i2c, 0x10, 0x08, 0x04);
+ usleep_range(2000, 3000);
+ /* Release demod resets */
+ i2c_write_reg(i2c, 0x10, 0x08, 0x07);
+
+ /* speed: 0=55,1=75,2=90,3=104 MBit/s */
+ i2c_write_reg(i2c, 0x10, 0x09, xo2_speed);
+
+ if (dev->link[port->lnr].info->con_clock) {
+ dev_info(dev->dev, "Setting continuous clock for XO2\n");
+ i2c_write_reg(i2c, 0x10, 0x0a, 0x03);
+ i2c_write_reg(i2c, 0x10, 0x0b, 0x03);
+ } else {
+ i2c_write_reg(i2c, 0x10, 0x0a, 0x01);
+ i2c_write_reg(i2c, 0x10, 0x0b, 0x01);
+ }
+
+ usleep_range(2000, 3000);
+ /* Start XO2 PLL */
+ i2c_write_reg(i2c, 0x10, 0x08, 0x87);
+
+ return 0;
}
-static unsigned int ts_poll(struct file *file, poll_table *wait)
+static int init_xo2_ci(struct ddb_port *port)
{
- /*
- struct dvb_device *dvbdev = file->private_data;
- struct ddb_output *output = dvbdev->priv;
- struct ddb_input *input = output->port->input[0];
- */
- unsigned int mask = 0;
+ struct i2c_adapter *i2c = &port->i2c->adap;
+ struct ddb *dev = port->dev;
+ u8 val, data[2];
+ int res;
-#if 0
- if (data_avail_to_read)
- mask |= POLLIN | POLLRDNORM;
- if (data_avail_to_write)
- mask |= POLLOUT | POLLWRNORM;
+ res = i2c_read_regs(i2c, 0x10, 0x04, data, 2);
+ if (res < 0)
+ return res;
- poll_wait(file, &read_queue, wait);
- poll_wait(file, &write_queue, wait);
-#endif
- return mask;
+ if (data[0] > 1) {
+ dev_info(dev->dev, "Port %d: invalid XO2 CI %02x\n",
+ port->nr, data[0]);
+ return -1;
+ }
+ dev_info(dev->dev, "Port %d: DuoFlex CI %u.%u\n",
+ port->nr, data[0], data[1]);
+
+ i2c_read_reg(i2c, 0x10, 0x08, &val);
+ if (val != 0) {
+ i2c_write_reg(i2c, 0x10, 0x08, 0x00);
+ msleep(100);
+ }
+ /* Enable both CI */
+ i2c_write_reg(i2c, 0x10, 0x08, 3);
+ usleep_range(2000, 3000);
+
+
+ /* speed: 0=55,1=75,2=90,3=104 MBit/s */
+ i2c_write_reg(i2c, 0x10, 0x09, 1);
+
+ i2c_write_reg(i2c, 0x10, 0x08, 0x83);
+ usleep_range(2000, 3000);
+
+ if (dev->link[port->lnr].info->con_clock) {
+ dev_info(dev->dev, "Setting continuous clock for DuoFlex CI\n");
+ i2c_write_reg(i2c, 0x10, 0x0a, 0x03);
+ i2c_write_reg(i2c, 0x10, 0x0b, 0x03);
+ } else {
+ i2c_write_reg(i2c, 0x10, 0x0a, 0x01);
+ i2c_write_reg(i2c, 0x10, 0x0b, 0x01);
+ }
+ return 0;
}
-static const struct file_operations ci_fops = {
- .owner = THIS_MODULE,
- .read = ts_read,
- .write = ts_write,
- .open = dvb_generic_open,
- .release = dvb_generic_release,
- .poll = ts_poll,
+static int port_has_cxd28xx(struct ddb_port *port, u8 *id)
+{
+ struct i2c_adapter *i2c = &port->i2c->adap;
+ int status;
+
+ status = i2c_write_reg(&port->i2c->adap, 0x6e, 0, 0);
+ if (status)
+ return 0;
+ status = i2c_read_reg(i2c, 0x6e, 0xfd, id);
+ if (status)
+ return 0;
+ return 1;
+}
+
+static char *xo2names[] = {
+ "DUAL DVB-S2", "DUAL DVB-C/T/T2",
+ "DUAL DVB-ISDBT", "DUAL DVB-C/C2/T/T2",
+ "DUAL ATSC", "DUAL DVB-C/C2/T/T2,ISDB-T",
+ "", ""
};
-static struct dvb_device dvbdev_ci = {
- .readers = -1,
- .writers = -1,
- .users = -1,
- .fops = &ci_fops,
+static char *xo2types[] = {
+ "DVBS_ST", "DVBCT2_SONY",
+ "ISDBT_SONY", "DVBC2T2_SONY",
+ "ATSC_ST", "DVBC2T2I_SONY"
};
+static void ddb_port_probe(struct ddb_port *port)
+{
+ struct ddb *dev = port->dev;
+ u32 l = port->lnr;
+ u8 id, type;
+
+ port->name = "NO MODULE";
+ port->type_name = "NONE";
+ port->class = DDB_PORT_NONE;
+
+ /* Handle missing ports and ports without I2C */
+
+ if (port->nr == ts_loop) {
+ port->name = "TS LOOP";
+ port->class = DDB_PORT_LOOP;
+ return;
+ }
+
+ if (port->nr == 1 && dev->link[l].info->type == DDB_OCTOPUS_CI &&
+ dev->link[l].info->i2c_mask == 1) {
+ port->name = "NO TAB";
+ port->class = DDB_PORT_NONE;
+ return;
+ }
+
+ if (dev->link[l].info->type == DDB_OCTOPUS_MAX) {
+ port->name = "DUAL DVB-S2 MAX";
+ port->type_name = "MXL5XX";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_MXL5XX;
+ if (port->i2c)
+ ddbwritel(dev, I2C_SPEED_400,
+ port->i2c->regs + I2C_TIMING);
+ return;
+ }
+
+ if (port->nr > 1 && dev->link[l].info->type == DDB_OCTOPUS_CI) {
+ port->name = "CI internal";
+ port->type_name = "INTERNAL";
+ port->class = DDB_PORT_CI;
+ port->type = DDB_CI_INTERNAL;
+ }
+
+ if (!port->i2c)
+ return;
+
+ /* Probe ports with I2C */
+
+ if (port_has_cxd(port, &id)) {
+ if (id == 1) {
+ port->name = "CI";
+ port->type_name = "CXD2099";
+ port->class = DDB_PORT_CI;
+ port->type = DDB_CI_EXTERNAL_SONY;
+ ddbwritel(dev, I2C_SPEED_400,
+ port->i2c->regs + I2C_TIMING);
+ } else {
+ dev_info(dev->dev, "Port %d: Uninitialized DuoFlex\n",
+ port->nr);
+ return;
+ }
+ } else if (port_has_xo2(port, &type, &id)) {
+ ddbwritel(dev, I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
+ /*dev_info(dev->dev, "XO2 ID %02x\n", id);*/
+ if (type == 2) {
+ port->name = "DuoFlex CI";
+ port->class = DDB_PORT_CI;
+ port->type = DDB_CI_EXTERNAL_XO2;
+ port->type_name = "CI_XO2";
+ init_xo2_ci(port);
+ return;
+ }
+ id >>= 2;
+ if (id > 5) {
+ port->name = "unknown XO2 DuoFlex";
+ port->type_name = "UNKNOWN";
+ } else {
+ port->name = xo2names[id];
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_XO2 + id;
+ port->type_name = xo2types[id];
+ init_xo2(port);
+ }
+ } else if (port_has_cxd28xx(port, &id)) {
+ switch (id) {
+ case 0xa4:
+ port->name = "DUAL DVB-C2T2 CXD2843";
+ port->type = DDB_TUNER_DVBC2T2_SONY_P;
+ port->type_name = "DVBC2T2_SONY";
+ break;
+ case 0xb1:
+ port->name = "DUAL DVB-CT2 CXD2837";
+ port->type = DDB_TUNER_DVBCT2_SONY_P;
+ port->type_name = "DVBCT2_SONY";
+ break;
+ case 0xb0:
+ port->name = "DUAL ISDB-T CXD2838";
+ port->type = DDB_TUNER_ISDBT_SONY_P;
+ port->type_name = "ISDBT_SONY";
+ break;
+ case 0xc1:
+ port->name = "DUAL DVB-C2T2 ISDB-T CXD2854";
+ port->type = DDB_TUNER_DVBC2T2I_SONY_P;
+ port->type_name = "DVBC2T2I_ISDBT_SONY";
+ break;
+ default:
+ return;
+ }
+ port->class = DDB_PORT_TUNER;
+ ddbwritel(dev, I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
+ } else if (port_has_stv0900(port)) {
+ port->name = "DUAL DVB-S2";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_DVBS_ST;
+ port->type_name = "DVBS_ST";
+ ddbwritel(dev, I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
+ } else if (port_has_stv0900_aa(port, &id)) {
+ port->name = "DUAL DVB-S2";
+ port->class = DDB_PORT_TUNER;
+ if (id == 0x51) {
+ if (port->nr == 0 &&
+ dev->link[l].info->ts_quirks & TS_QUIRK_REVERSED)
+ port->type = DDB_TUNER_DVBS_STV0910_PR;
+ else
+ port->type = DDB_TUNER_DVBS_STV0910_P;
+ port->type_name = "DVBS_ST_0910";
+ } else {
+ port->type = DDB_TUNER_DVBS_ST_AA;
+ port->type_name = "DVBS_ST_AA";
+ }
+ ddbwritel(dev, I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
+ } else if (port_has_drxks(port)) {
+ port->name = "DUAL DVB-C/T";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_DVBCT_TR;
+ port->type_name = "DVBCT_TR";
+ ddbwritel(dev, I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
+ } else if (port_has_stv0367(port)) {
+ port->name = "DUAL DVB-C/T";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_DVBCT_ST;
+ port->type_name = "DVBCT_ST";
+ ddbwritel(dev, I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
+ } else if (port_has_encti(port)) {
+ port->name = "ENCTI";
+ port->class = DDB_PORT_LOOP;
+ }
+}
+
+
/****************************************************************************/
/****************************************************************************/
/****************************************************************************/
-static void input_tasklet(unsigned long data)
+static int wait_ci_ready(struct ddb_ci *ci)
{
- struct ddb_input *input = (struct ddb_input *) data;
- struct ddb *dev = input->port->dev;
+ u32 count = 10;
+
+ ndelay(500);
+ do {
+ if (ddbreadl(ci->port->dev,
+ CI_CONTROL(ci->nr)) & CI_READY)
+ break;
+ usleep_range(1, 2);
+ if ((--count) == 0)
+ return -1;
+ } while (1);
+ return 0;
+}
+
+static int read_attribute_mem(struct dvb_ca_en50221 *ca,
+ int slot, int address)
+{
+ struct ddb_ci *ci = ca->data;
+ u32 val, off = (address >> 1) & (CI_BUFFER_SIZE - 1);
+
+ if (address > CI_BUFFER_SIZE)
+ return -1;
+ ddbwritel(ci->port->dev, CI_READ_CMD | (1 << 16) | address,
+ CI_DO_READ_ATTRIBUTES(ci->nr));
+ wait_ci_ready(ci);
+ val = 0xff & ddbreadl(ci->port->dev, CI_BUFFER(ci->nr) + off);
+ return val;
+}
+
+static int write_attribute_mem(struct dvb_ca_en50221 *ca, int slot,
+ int address, u8 value)
+{
+ struct ddb_ci *ci = ca->data;
+
+ ddbwritel(ci->port->dev, CI_WRITE_CMD | (value << 16) | address,
+ CI_DO_ATTRIBUTE_RW(ci->nr));
+ wait_ci_ready(ci);
+ return 0;
+}
+
+static int read_cam_control(struct dvb_ca_en50221 *ca,
+ int slot, u8 address)
+{
+ u32 count = 100;
+ struct ddb_ci *ci = ca->data;
+ u32 res;
+
+ ddbwritel(ci->port->dev, CI_READ_CMD | address,
+ CI_DO_IO_RW(ci->nr));
+ ndelay(500);
+ do {
+ res = ddbreadl(ci->port->dev, CI_READDATA(ci->nr));
+ if (res & CI_READY)
+ break;
+ usleep_range(1, 2);
+ if ((--count) == 0)
+ return -1;
+ } while (1);
+ return 0xff & res;
+}
+
+static int write_cam_control(struct dvb_ca_en50221 *ca, int slot,
+ u8 address, u8 value)
+{
+ struct ddb_ci *ci = ca->data;
+
+ ddbwritel(ci->port->dev, CI_WRITE_CMD | (value << 16) | address,
+ CI_DO_IO_RW(ci->nr));
+ wait_ci_ready(ci);
+ return 0;
+}
- spin_lock(&input->lock);
- if (!input->running) {
- spin_unlock(&input->lock);
+static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
+{
+ struct ddb_ci *ci = ca->data;
+
+ ddbwritel(ci->port->dev, CI_POWER_ON,
+ CI_CONTROL(ci->nr));
+ msleep(100);
+ ddbwritel(ci->port->dev, CI_POWER_ON | CI_RESET_CAM,
+ CI_CONTROL(ci->nr));
+ ddbwritel(ci->port->dev, CI_ENABLE | CI_POWER_ON | CI_RESET_CAM,
+ CI_CONTROL(ci->nr));
+ udelay(20);
+ ddbwritel(ci->port->dev, CI_ENABLE | CI_POWER_ON,
+ CI_CONTROL(ci->nr));
+ return 0;
+}
+
+static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
+{
+ struct ddb_ci *ci = ca->data;
+
+ ddbwritel(ci->port->dev, 0, CI_CONTROL(ci->nr));
+ msleep(300);
+ return 0;
+}
+
+static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
+{
+ struct ddb_ci *ci = ca->data;
+ u32 val = ddbreadl(ci->port->dev, CI_CONTROL(ci->nr));
+
+ ddbwritel(ci->port->dev, val | CI_BYPASS_DISABLE,
+ CI_CONTROL(ci->nr));
+ return 0;
+}
+
+static int poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
+{
+ struct ddb_ci *ci = ca->data;
+ u32 val = ddbreadl(ci->port->dev, CI_CONTROL(ci->nr));
+ int stat = 0;
+
+ if (val & CI_CAM_DETECT)
+ stat |= DVB_CA_EN50221_POLL_CAM_PRESENT;
+ if (val & CI_CAM_READY)
+ stat |= DVB_CA_EN50221_POLL_CAM_READY;
+ return stat;
+}
+
+static struct dvb_ca_en50221 en_templ = {
+ .read_attribute_mem = read_attribute_mem,
+ .write_attribute_mem = write_attribute_mem,
+ .read_cam_control = read_cam_control,
+ .write_cam_control = write_cam_control,
+ .slot_reset = slot_reset,
+ .slot_shutdown = slot_shutdown,
+ .slot_ts_enable = slot_ts_enable,
+ .poll_slot_status = poll_slot_status,
+};
+
+static void ci_attach(struct ddb_port *port)
+{
+ struct ddb_ci *ci = NULL;
+
+ ci = kzalloc(sizeof(*ci), GFP_KERNEL);
+ if (!ci)
return;
- }
- input->stat = ddbreadl(DMA_BUFFER_CURRENT(input->nr));
+ memcpy(&ci->en, &en_templ, sizeof(en_templ));
+ ci->en.data = ci;
+ port->en = &ci->en;
+ ci->port = port;
+ ci->nr = port->nr - 2;
+}
+
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
- if (input->port->class == DDB_PORT_TUNER) {
- if (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))
- printk(KERN_ERR "Overflow input %d\n", input->nr);
- while (input->cbuf != ((input->stat >> 11) & 0x1f)
- || (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))) {
- dvb_dmx_swfilter_packets(&input->demux,
- input->vbuf[input->cbuf],
- input->dma_buf_size / 188);
+static int write_creg(struct ddb_ci *ci, u8 data, u8 mask)
+{
+ struct i2c_adapter *i2c = &ci->port->i2c->adap;
+ u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13;
- input->cbuf = (input->cbuf + 1) % input->dma_buf_num;
- ddbwritel((input->cbuf << 11),
- DMA_BUFFER_ACK(input->nr));
- input->stat = ddbreadl(DMA_BUFFER_CURRENT(input->nr));
- }
- }
- if (input->port->class == DDB_PORT_CI)
- wake_up(&input->wq);
- spin_unlock(&input->lock);
+ ci->port->creg = (ci->port->creg & ~mask) | data;
+ return i2c_write_reg(i2c, adr, 0x02, ci->port->creg);
}
-static void output_tasklet(unsigned long data)
+static int read_attribute_mem_xo2(struct dvb_ca_en50221 *ca,
+ int slot, int address)
{
- struct ddb_output *output = (struct ddb_output *) data;
- struct ddb *dev = output->port->dev;
+ struct ddb_ci *ci = ca->data;
+ struct i2c_adapter *i2c = &ci->port->i2c->adap;
+ u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13;
+ int res;
+ u8 val;
+
+ res = i2c_read_reg16(i2c, adr, 0x8000 | address, &val);
+ return res ? res : val;
+}
+
+static int write_attribute_mem_xo2(struct dvb_ca_en50221 *ca, int slot,
+ int address, u8 value)
+{
+ struct ddb_ci *ci = ca->data;
+ struct i2c_adapter *i2c = &ci->port->i2c->adap;
+ u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13;
+
+ return i2c_write_reg16(i2c, adr, 0x8000 | address, value);
+}
- spin_lock(&output->lock);
- if (!output->running) {
- spin_unlock(&output->lock);
+static int read_cam_control_xo2(struct dvb_ca_en50221 *ca,
+ int slot, u8 address)
+{
+ struct ddb_ci *ci = ca->data;
+ struct i2c_adapter *i2c = &ci->port->i2c->adap;
+ u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13;
+ u8 val;
+ int res;
+
+ res = i2c_read_reg(i2c, adr, 0x20 | (address & 3), &val);
+ return res ? res : val;
+}
+
+static int write_cam_control_xo2(struct dvb_ca_en50221 *ca, int slot,
+ u8 address, u8 value)
+{
+ struct ddb_ci *ci = ca->data;
+ struct i2c_adapter *i2c = &ci->port->i2c->adap;
+ u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13;
+
+ return i2c_write_reg(i2c, adr, 0x20 | (address & 3), value);
+}
+
+static int slot_reset_xo2(struct dvb_ca_en50221 *ca, int slot)
+{
+ struct ddb_ci *ci = ca->data;
+
+ dev_dbg(ci->port->dev->dev, "%s\n", __func__);
+ write_creg(ci, 0x01, 0x01);
+ write_creg(ci, 0x04, 0x04);
+ msleep(20);
+ write_creg(ci, 0x02, 0x02);
+ write_creg(ci, 0x00, 0x04);
+ write_creg(ci, 0x18, 0x18);
+ return 0;
+}
+
+static int slot_shutdown_xo2(struct dvb_ca_en50221 *ca, int slot)
+{
+ struct ddb_ci *ci = ca->data;
+
+ dev_dbg(ci->port->dev->dev, "%s\n", __func__);
+ write_creg(ci, 0x10, 0xff);
+ write_creg(ci, 0x08, 0x08);
+ return 0;
+}
+
+static int slot_ts_enable_xo2(struct dvb_ca_en50221 *ca, int slot)
+{
+ struct ddb_ci *ci = ca->data;
+
+ dev_info(ci->port->dev->dev, "%s\n", __func__);
+ write_creg(ci, 0x00, 0x10);
+ return 0;
+}
+
+static int poll_slot_status_xo2(struct dvb_ca_en50221 *ca, int slot, int open)
+{
+ struct ddb_ci *ci = ca->data;
+ struct i2c_adapter *i2c = &ci->port->i2c->adap;
+ u8 adr = (ci->port->type == DDB_CI_EXTERNAL_XO2) ? 0x12 : 0x13;
+ u8 val = 0;
+ int stat = 0;
+
+ i2c_read_reg(i2c, adr, 0x01, &val);
+
+ if (val & 2)
+ stat |= DVB_CA_EN50221_POLL_CAM_PRESENT;
+ if (val & 1)
+ stat |= DVB_CA_EN50221_POLL_CAM_READY;
+ return stat;
+}
+
+static struct dvb_ca_en50221 en_xo2_templ = {
+ .read_attribute_mem = read_attribute_mem_xo2,
+ .write_attribute_mem = write_attribute_mem_xo2,
+ .read_cam_control = read_cam_control_xo2,
+ .write_cam_control = write_cam_control_xo2,
+ .slot_reset = slot_reset_xo2,
+ .slot_shutdown = slot_shutdown_xo2,
+ .slot_ts_enable = slot_ts_enable_xo2,
+ .poll_slot_status = poll_slot_status_xo2,
+};
+
+static void ci_xo2_attach(struct ddb_port *port)
+{
+ struct ddb_ci *ci;
+
+ ci = kzalloc(sizeof(*ci), GFP_KERNEL);
+ if (!ci)
return;
- }
- output->stat = ddbreadl(DMA_BUFFER_CURRENT(output->nr + 8));
- wake_up(&output->wq);
- spin_unlock(&output->lock);
+ memcpy(&ci->en, &en_xo2_templ, sizeof(en_xo2_templ));
+ ci->en.data = ci;
+ port->en = &ci->en;
+ ci->port = port;
+ ci->nr = port->nr - 2;
+ ci->port->creg = 0;
+ write_creg(ci, 0x10, 0xff);
+ write_creg(ci, 0x08, 0x08);
}
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
static struct cxd2099_cfg cxd_cfg = {
- .bitrate = 62000,
+ .bitrate = 72000,
.adr = 0x40,
.polarity = 1,
.clock_mode = 1,
+ .max_i2c = 512,
};
static int ddb_ci_attach(struct ddb_port *port)
{
- int ret;
+ switch (port->type) {
+ case DDB_CI_EXTERNAL_SONY:
+ cxd_cfg.bitrate = ci_bitrate;
+ port->en = cxd2099_attach(&cxd_cfg, port, &port->i2c->adap);
+ if (!port->en)
+ return -ENODEV;
+ dvb_ca_en50221_init(port->dvb[0].adap,
+ port->en, 0, 1);
+ break;
- ret = dvb_register_adapter(&port->output->adap,
- "DDBridge",
- THIS_MODULE,
- &port->dev->pdev->dev,
- adapter_nr);
- if (ret < 0)
- return ret;
- port->en = cxd2099_attach(&cxd_cfg, port, &port->i2c->adap);
- if (!port->en) {
- dvb_unregister_adapter(&port->output->adap);
- return -ENODEV;
+ case DDB_CI_EXTERNAL_XO2:
+ case DDB_CI_EXTERNAL_XO2_B:
+ ci_xo2_attach(port);
+ if (!port->en)
+ return -ENODEV;
+ dvb_ca_en50221_init(port->dvb[0].adap, port->en, 0, 1);
+ break;
+
+ case DDB_CI_INTERNAL:
+ ci_attach(port);
+ if (!port->en)
+ return -ENODEV;
+ dvb_ca_en50221_init(port->dvb[0].adap, port->en, 0, 1);
+ break;
}
- ddb_input_start(port->input[0]);
- ddb_output_start(port->output);
- dvb_ca_en50221_init(&port->output->adap,
- port->en, 0, 1);
- ret = dvb_register_device(&port->output->adap, &port->output->dev,
- &dvbdev_ci, (void *) port->output,
- DVB_DEVICE_SEC, 0);
- return ret;
+ return 0;
}
static int ddb_port_attach(struct ddb_port *port)
@@ -1318,370 +2254,429 @@ static int ddb_port_attach(struct ddb_port *port)
if (ret < 0)
break;
ret = dvb_input_attach(port->input[1]);
+ if (ret < 0)
+ break;
+ port->input[0]->redi = port->input[0];
+ port->input[1]->redi = port->input[1];
break;
case DDB_PORT_CI:
ret = ddb_ci_attach(port);
+ if (ret < 0)
+ break;
+ /* fall-through */
+ case DDB_PORT_LOOP:
+ ret = dvb_register_device(port->dvb[0].adap,
+ &port->dvb[0].dev,
+ &dvbdev_ci, (void *) port->output,
+ DVB_DEVICE_SEC, 0);
break;
default:
break;
}
if (ret < 0)
- printk(KERN_ERR "port_attach on port %d failed\n", port->nr);
+ dev_err(port->dev->dev, "port_attach on port %d failed\n",
+ port->nr);
return ret;
}
-static int ddb_ports_attach(struct ddb *dev)
+int ddb_ports_attach(struct ddb *dev)
{
int i, ret = 0;
struct ddb_port *port;
- for (i = 0; i < dev->info->port_num; i++) {
+ if (dev->port_num) {
+ ret = dvb_register_adapters(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "Registering adapters failed. Check DVB_MAX_ADAPTERS in config.\n");
+ return ret;
+ }
+ }
+ for (i = 0; i < dev->port_num; i++) {
port = &dev->port[i];
ret = ddb_port_attach(port);
- if (ret < 0)
- break;
}
return ret;
}
-static void ddb_ports_detach(struct ddb *dev)
+void ddb_ports_detach(struct ddb *dev)
{
int i;
struct ddb_port *port;
- for (i = 0; i < dev->info->port_num; i++) {
+ for (i = 0; i < dev->port_num; i++) {
port = &dev->port[i];
+
switch (port->class) {
case DDB_PORT_TUNER:
dvb_input_detach(port->input[0]);
dvb_input_detach(port->input[1]);
break;
case DDB_PORT_CI:
- dvb_unregister_device(port->output->dev);
+ case DDB_PORT_LOOP:
+ if (port->dvb[0].dev)
+ dvb_unregister_device(port->dvb[0].dev);
if (port->en) {
- ddb_input_stop(port->input[0]);
- ddb_output_stop(port->output);
dvb_ca_en50221_release(port->en);
kfree(port->en);
port->en = NULL;
- dvb_unregister_adapter(&port->output->adap);
}
break;
}
}
+ dvb_unregister_adapters(dev);
}
-/****************************************************************************/
-/****************************************************************************/
-static int init_xo2(struct ddb_port *port)
+/* Copy input DMA pointers to output DMA and ACK. */
+
+static void input_write_output(struct ddb_input *input,
+ struct ddb_output *output)
{
- struct i2c_adapter *i2c = &port->i2c->adap;
- u8 val, data[2];
- int res;
+ ddbwritel(output->port->dev,
+ input->dma->stat, DMA_BUFFER_ACK(output->dma));
+ output->dma->cbuf = (input->dma->stat >> 11) & 0x1f;
+ output->dma->coff = (input->dma->stat & 0x7ff) << 7;
+}
- res = i2c_read_regs(i2c, 0x10, 0x04, data, 2);
- if (res < 0)
- return res;
+static void output_ack_input(struct ddb_output *output,
+ struct ddb_input *input)
+{
+ ddbwritel(input->port->dev,
+ output->dma->stat, DMA_BUFFER_ACK(input->dma));
+}
- if (data[0] != 0x01) {
- pr_info("Port %d: invalid XO2\n", port->nr);
- return -1;
+static void input_write_dvb(struct ddb_input *input,
+ struct ddb_input *input2)
+{
+ struct ddb_dvb *dvb = &input2->port->dvb[input2->nr & 1];
+ struct ddb_dma *dma, *dma2;
+ struct ddb *dev = input->port->dev;
+ int ack = 1;
+
+ dma = dma2 = input->dma;
+ /* if there also is an output connected, do not ACK.
+ * input_write_output will ACK.
+ */
+ if (input->redo) {
+ dma2 = input->redo->dma;
+ ack = 0;
+ }
+ while (dma->cbuf != ((dma->stat >> 11) & 0x1f)
+ || (4 & dma->ctrl)) {
+ if (4 & dma->ctrl) {
+ /* dev_err(dev->dev, "Overflow dma %d\n", dma->nr); */
+ ack = 1;
+ }
+ if (alt_dma)
+ dma_sync_single_for_cpu(dev->dev, dma2->pbuf[dma->cbuf],
+ dma2->size, DMA_FROM_DEVICE);
+ dvb_dmx_swfilter_packets(&dvb->demux,
+ dma2->vbuf[dma->cbuf],
+ dma2->size / 188);
+ dma->cbuf = (dma->cbuf + 1) % dma2->num;
+ if (ack)
+ ddbwritel(dev, (dma->cbuf << 11),
+ DMA_BUFFER_ACK(dma));
+ dma->stat = safe_ddbreadl(dev, DMA_BUFFER_CURRENT(dma));
+ dma->ctrl = safe_ddbreadl(dev, DMA_BUFFER_CONTROL(dma));
}
+}
- i2c_read_reg(i2c, 0x10, 0x08, &val);
- if (val != 0) {
- i2c_write_reg(i2c, 0x10, 0x08, 0x00);
- msleep(100);
+static void input_work(struct work_struct *work)
+{
+ struct ddb_dma *dma = container_of(work, struct ddb_dma, work);
+ struct ddb_input *input = (struct ddb_input *) dma->io;
+ struct ddb *dev = input->port->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dma->lock, flags);
+ if (!dma->running) {
+ spin_unlock_irqrestore(&dma->lock, flags);
+ return;
}
- /* Enable tuner power, disable pll, reset demods */
- i2c_write_reg(i2c, 0x10, 0x08, 0x04);
- usleep_range(2000, 3000);
- /* Release demod resets */
- i2c_write_reg(i2c, 0x10, 0x08, 0x07);
+ dma->stat = ddbreadl(dev, DMA_BUFFER_CURRENT(dma));
+ dma->ctrl = ddbreadl(dev, DMA_BUFFER_CONTROL(dma));
- /* speed: 0=55,1=75,2=90,3=104 MBit/s */
- i2c_write_reg(i2c, 0x10, 0x09,
- ((xo2_speed >= 0 && xo2_speed <= 3) ? xo2_speed : 2));
+ if (input->redi)
+ input_write_dvb(input, input->redi);
+ if (input->redo)
+ input_write_output(input, input->redo);
+ wake_up(&dma->wq);
+ spin_unlock_irqrestore(&dma->lock, flags);
+}
- i2c_write_reg(i2c, 0x10, 0x0a, 0x01);
- i2c_write_reg(i2c, 0x10, 0x0b, 0x01);
+static void input_handler(unsigned long data)
+{
+ struct ddb_input *input = (struct ddb_input *) data;
+ struct ddb_dma *dma = input->dma;
- usleep_range(2000, 3000);
- /* Start XO2 PLL */
- i2c_write_reg(i2c, 0x10, 0x08, 0x87);
- return 0;
+ /* If there is no input connected, input_tasklet() will
+ * just copy pointers and ACK. So, there is no need to go
+ * through the tasklet scheduler.
+ */
+ if (input->redi)
+ queue_work(ddb_wq, &dma->work);
+ else
+ input_work(&dma->work);
}
-static int port_has_xo2(struct ddb_port *port, u8 *type, u8 *id)
+static void output_handler(unsigned long data)
{
- u8 probe[1] = { 0x00 }, data[4];
-
- *type = DDB_XO2_TYPE_NONE;
+ struct ddb_output *output = (struct ddb_output *) data;
+ struct ddb_dma *dma = output->dma;
+ struct ddb *dev = output->port->dev;
- if (i2c_io(&port->i2c->adap, 0x10, probe, 1, data, 4))
- return 0;
- if (data[0] == 'D' && data[1] == 'F') {
- *id = data[2];
- *type = DDB_XO2_TYPE_DUOFLEX;
- return 1;
- }
- if (data[0] == 'C' && data[1] == 'I') {
- *id = data[2];
- *type = DDB_XO2_TYPE_CI;
- return 1;
+ spin_lock(&dma->lock);
+ if (!dma->running) {
+ spin_unlock(&dma->lock);
+ return;
}
- return 0;
+ dma->stat = ddbreadl(dev, DMA_BUFFER_CURRENT(dma));
+ dma->ctrl = ddbreadl(dev, DMA_BUFFER_CONTROL(dma));
+ if (output->redi)
+ output_ack_input(output, output->redi);
+ wake_up(&dma->wq);
+ spin_unlock(&dma->lock);
}
/****************************************************************************/
/****************************************************************************/
-static int port_has_ci(struct ddb_port *port)
+static const struct ddb_regmap *io_regmap(struct ddb_io *io, int link)
{
- u8 val;
- return i2c_read_reg(&port->i2c->adap, 0x40, 0, &val) ? 0 : 1;
-}
+ const struct ddb_info *info;
-static int port_has_stv0900(struct ddb_port *port)
-{
- u8 val;
- if (i2c_read_reg16(&port->i2c->adap, 0x69, 0xf100, &val) < 0)
- return 0;
- return 1;
-}
+ if (link)
+ info = io->port->dev->link[io->port->lnr].info;
+ else
+ info = io->port->dev->link[0].info;
-static int port_has_stv0900_aa(struct ddb_port *port)
-{
- u8 val;
- if (i2c_read_reg16(&port->i2c->adap, 0x68, 0xf100, &val) < 0)
- return 0;
- return 1;
-}
+ if (!info)
+ return NULL;
-static int port_has_drxks(struct ddb_port *port)
-{
- u8 val;
- if (i2c_read(&port->i2c->adap, 0x29, &val) < 0)
- return 0;
- if (i2c_read(&port->i2c->adap, 0x2a, &val) < 0)
- return 0;
- return 1;
+ return info->regmap;
}
-static int port_has_stv0367(struct ddb_port *port)
+static void ddb_dma_init(struct ddb_io *io, int nr, int out)
{
- u8 val;
- if (i2c_read_reg16(&port->i2c->adap, 0x1e, 0xf000, &val) < 0)
- return 0;
- if (val != 0x60)
- return 0;
- if (i2c_read_reg16(&port->i2c->adap, 0x1f, 0xf000, &val) < 0)
- return 0;
- if (val != 0x60)
- return 0;
- return 1;
+ struct ddb_dma *dma;
+ const struct ddb_regmap *rm = io_regmap(io, 0);
+
+ dma = out ? &io->port->dev->odma[nr] : &io->port->dev->idma[nr];
+ io->dma = dma;
+ dma->io = io;
+
+ spin_lock_init(&dma->lock);
+ init_waitqueue_head(&dma->wq);
+ if (out) {
+ dma->regs = rm->odma->base + rm->odma->size * nr;
+ dma->bufregs = rm->odma_buf->base + rm->odma_buf->size * nr;
+ dma->num = OUTPUT_DMA_BUFS;
+ dma->size = OUTPUT_DMA_SIZE;
+ dma->div = OUTPUT_DMA_IRQ_DIV;
+ } else {
+ INIT_WORK(&dma->work, input_work);
+ dma->regs = rm->idma->base + rm->idma->size * nr;
+ dma->bufregs = rm->idma_buf->base + rm->idma_buf->size * nr;
+ dma->num = INPUT_DMA_BUFS;
+ dma->size = INPUT_DMA_SIZE;
+ dma->div = INPUT_DMA_IRQ_DIV;
+ }
+ ddbwritel(io->port->dev, 0, DMA_BUFFER_ACK(dma));
+ dev_dbg(io->port->dev->dev, "init link %u, io %u, dma %u, dmaregs %08x bufregs %08x\n",
+ io->port->lnr, io->nr, nr, dma->regs, dma->bufregs);
}
-static int port_has_cxd28xx(struct ddb_port *port, u8 *id)
+static void ddb_input_init(struct ddb_port *port, int nr, int pnr, int anr)
{
- struct i2c_adapter *i2c = &port->i2c->adap;
- int status;
+ struct ddb *dev = port->dev;
+ struct ddb_input *input = &dev->input[anr];
+ const struct ddb_regmap *rm;
- status = i2c_write_reg(&port->i2c->adap, 0x6e, 0, 0);
- if (status)
- return 0;
- status = i2c_read_reg(i2c, 0x6e, 0xfd, id);
- if (status)
- return 0;
- return 1;
+ port->input[pnr] = input;
+ input->nr = nr;
+ input->port = port;
+ rm = io_regmap(input, 1);
+ input->regs = DDB_LINK_TAG(port->lnr) |
+ (rm->input->base + rm->input->size * nr);
+ dev_dbg(dev->dev, "init link %u, input %u, regs %08x\n",
+ port->lnr, nr, input->regs);
+
+ if (dev->has_dma) {
+ const struct ddb_regmap *rm0 = io_regmap(input, 0);
+ u32 base = rm0->irq_base_idma;
+ u32 dma_nr = nr;
+
+ if (port->lnr)
+ dma_nr += 32 + (port->lnr - 1) * 8;
+
+ dev_dbg(dev->dev, "init link %u, input %u, handler %u\n",
+ port->lnr, nr, dma_nr + base);
+
+ dev->handler[0][dma_nr + base] = input_handler;
+ dev->handler_data[0][dma_nr + base] = (unsigned long) input;
+ ddb_dma_init(input, dma_nr, 0);
+ }
}
-static void ddb_port_probe(struct ddb_port *port)
+static void ddb_output_init(struct ddb_port *port, int nr)
{
struct ddb *dev = port->dev;
- char *modname = "NO MODULE";
- u8 xo2_type, xo2_id, cxd_id;
+ struct ddb_output *output = &dev->output[nr];
+ const struct ddb_regmap *rm;
- port->class = DDB_PORT_NONE;
+ port->output = output;
+ output->nr = nr;
+ output->port = port;
+ rm = io_regmap(output, 1);
+ output->regs = DDB_LINK_TAG(port->lnr) |
+ (rm->output->base + rm->output->size * nr);
- if (port_has_ci(port)) {
- modname = "CI";
- port->class = DDB_PORT_CI;
- ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
- } else if (port_has_xo2(port, &xo2_type, &xo2_id)) {
- printk(KERN_INFO "Port %d (TAB %d): XO2 type: %d, id: %d\n",
- port->nr, port->nr+1, xo2_type, xo2_id);
+ dev_dbg(dev->dev, "init link %u, output %u, regs %08x\n",
+ port->lnr, nr, output->regs);
- ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
+ if (dev->has_dma) {
+ const struct ddb_regmap *rm0 = io_regmap(output, 0);
+ u32 base = rm0->irq_base_odma;
- switch (xo2_type) {
- case DDB_XO2_TYPE_DUOFLEX:
- init_xo2(port);
- switch (xo2_id >> 2) {
- case 0:
- modname = "DUAL DVB-S2 (unsupported)";
- port->class = DDB_PORT_NONE;
- port->type = DDB_TUNER_XO2_DVBS_STV0910;
- break;
- case 1:
- modname = "DUAL DVB-C/T/T2";
- port->class = DDB_PORT_TUNER;
- port->type = DDB_TUNER_XO2_DVBCT2_SONY;
- break;
- case 2:
- modname = "DUAL DVB-ISDBT";
- port->class = DDB_PORT_TUNER;
- port->type = DDB_TUNER_XO2_ISDBT_SONY;
- break;
- case 3:
- modname = "DUAL DVB-C/C2/T/T2";
- port->class = DDB_PORT_TUNER;
- port->type = DDB_TUNER_XO2_DVBC2T2_SONY;
- break;
- case 4:
- modname = "DUAL ATSC (unsupported)";
- port->class = DDB_PORT_NONE;
- port->type = DDB_TUNER_XO2_ATSC_ST;
- break;
- case 5:
- modname = "DUAL DVB-C/C2/T/T2/ISDBT";
- port->class = DDB_PORT_TUNER;
- port->type = DDB_TUNER_XO2_DVBC2T2I_SONY;
- break;
- default:
- modname = "Unknown XO2 DuoFlex module\n";
- break;
- }
- break;
- case DDB_XO2_TYPE_CI:
- printk(KERN_INFO "DuoFlex CI modules not supported\n");
- break;
- default:
- printk(KERN_INFO "Unknown XO2 DuoFlex module\n");
- break;
- }
- } else if (port_has_cxd28xx(port, &cxd_id)) {
- switch (cxd_id) {
- case 0xa4:
- modname = "DUAL DVB-C2T2 CXD2843";
- port->class = DDB_PORT_TUNER;
- port->type = DDB_TUNER_DVBC2T2_SONY_P;
- break;
- case 0xb1:
- modname = "DUAL DVB-CT2 CXD2837";
- port->class = DDB_PORT_TUNER;
- port->type = DDB_TUNER_DVBCT2_SONY_P;
- break;
- case 0xb0:
- modname = "DUAL ISDB-T CXD2838";
- port->class = DDB_PORT_TUNER;
- port->type = DDB_TUNER_ISDBT_SONY_P;
- break;
- case 0xc1:
- modname = "DUAL DVB-C2T2 ISDB-T CXD2854";
- port->class = DDB_PORT_TUNER;
- port->type = DDB_TUNER_DVBC2T2I_SONY_P;
- break;
- default:
- modname = "Unknown CXD28xx tuner";
- break;
- }
- ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
- } else if (port_has_stv0900(port)) {
- modname = "DUAL DVB-S2";
- port->class = DDB_PORT_TUNER;
- port->type = DDB_TUNER_DVBS_ST;
- ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
- } else if (port_has_stv0900_aa(port)) {
- modname = "DUAL DVB-S2";
- port->class = DDB_PORT_TUNER;
- port->type = DDB_TUNER_DVBS_ST_AA;
- ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
- } else if (port_has_drxks(port)) {
- modname = "DUAL DVB-C/T";
- port->class = DDB_PORT_TUNER;
- port->type = DDB_TUNER_DVBCT_TR;
- ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
- } else if (port_has_stv0367(port)) {
- modname = "DUAL DVB-C/T";
- port->class = DDB_PORT_TUNER;
- port->type = DDB_TUNER_DVBCT_ST;
- ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
+ dev->handler[0][nr + base] = output_handler;
+ dev->handler_data[0][nr + base] = (unsigned long) output;
+ ddb_dma_init(output, nr, 1);
}
-
- printk(KERN_INFO "Port %d (TAB %d): %s\n",
- port->nr, port->nr+1, modname);
}
-static void ddb_input_init(struct ddb_port *port, int nr)
+static int ddb_port_match_i2c(struct ddb_port *port)
{
struct ddb *dev = port->dev;
- struct ddb_input *input = &dev->input[nr];
+ u32 i;
- input->nr = nr;
- input->port = port;
- input->dma_buf_num = INPUT_DMA_BUFS;
- input->dma_buf_size = INPUT_DMA_SIZE;
- ddbwritel(0, TS_INPUT_CONTROL(nr));
- ddbwritel(2, TS_INPUT_CONTROL(nr));
- ddbwritel(0, TS_INPUT_CONTROL(nr));
- ddbwritel(0, DMA_BUFFER_ACK(nr));
- tasklet_init(&input->tasklet, input_tasklet, (unsigned long) input);
- spin_lock_init(&input->lock);
- init_waitqueue_head(&input->wq);
+ for (i = 0; i < dev->i2c_num; i++) {
+ if (dev->i2c[i].link == port->lnr &&
+ dev->i2c[i].nr == port->nr) {
+ port->i2c = &dev->i2c[i];
+ return 1;
+ }
+ }
+ return 0;
}
-static void ddb_output_init(struct ddb_port *port, int nr)
+static int ddb_port_match_link_i2c(struct ddb_port *port)
{
struct ddb *dev = port->dev;
- struct ddb_output *output = &dev->output[nr];
- output->nr = nr;
- output->port = port;
- output->dma_buf_num = OUTPUT_DMA_BUFS;
- output->dma_buf_size = OUTPUT_DMA_SIZE;
+ u32 i;
- ddbwritel(0, TS_OUTPUT_CONTROL(nr));
- ddbwritel(2, TS_OUTPUT_CONTROL(nr));
- ddbwritel(0, TS_OUTPUT_CONTROL(nr));
- tasklet_init(&output->tasklet, output_tasklet, (unsigned long) output);
- init_waitqueue_head(&output->wq);
+ for (i = 0; i < dev->i2c_num; i++) {
+ if (dev->i2c[i].link == port->lnr) {
+ port->i2c = &dev->i2c[i];
+ return 1;
+ }
+ }
+ return 0;
}
-static void ddb_ports_init(struct ddb *dev)
+void ddb_ports_init(struct ddb *dev)
{
- int i;
+ u32 i, l, p;
struct ddb_port *port;
+ const struct ddb_info *info;
+ const struct ddb_regmap *rm;
+
+ for (p = l = 0; l < DDB_MAX_LINK; l++) {
+ info = dev->link[l].info;
+ if (!info)
+ continue;
+ rm = info->regmap;
+ if (!rm)
+ continue;
+ for (i = 0; i < info->port_num; i++, p++) {
+ port = &dev->port[p];
+ port->dev = dev;
+ port->nr = i;
+ port->lnr = l;
+ port->pnr = p;
+ port->gap = 0xffffffff;
+ port->obr = ci_bitrate;
+ mutex_init(&port->i2c_gate_lock);
+
+ if (!ddb_port_match_i2c(port)) {
+ if (info->type == DDB_OCTOPUS_MAX)
+ ddb_port_match_link_i2c(port);
+ }
- for (i = 0; i < dev->info->port_num; i++) {
- port = &dev->port[i];
- port->dev = dev;
- port->nr = i;
- port->i2c = &dev->i2c[i];
- port->input[0] = &dev->input[2 * i];
- port->input[1] = &dev->input[2 * i + 1];
- port->output = &dev->output[i];
+ ddb_port_probe(port);
+
+ port->dvb[0].adap = &dev->adap[2 * p];
+ port->dvb[1].adap = &dev->adap[2 * p + 1];
+
+ if ((port->class == DDB_PORT_NONE) && i && p &&
+ dev->port[p - 1].type == DDB_CI_EXTERNAL_XO2) {
+ port->class = DDB_PORT_CI;
+ port->type = DDB_CI_EXTERNAL_XO2_B;
+ port->name = "DuoFlex CI_B";
+ port->i2c = dev->port[p - 1].i2c;
+ }
+
+ dev_info(dev->dev, "Port %u: Link %u, Link Port %u (TAB %u): %s\n",
+ port->pnr, port->lnr, port->nr, port->nr + 1,
+ port->name);
+
+ if (port->class == DDB_PORT_CI &&
+ port->type == DDB_CI_EXTERNAL_XO2) {
+ ddb_input_init(port, 2 * i, 0, 2 * i);
+ ddb_output_init(port, i);
+ continue;
+ }
- mutex_init(&port->i2c_gate_lock);
- ddb_port_probe(port);
- ddb_input_init(port, 2 * i);
- ddb_input_init(port, 2 * i + 1);
- ddb_output_init(port, i);
+ if (port->class == DDB_PORT_CI &&
+ port->type == DDB_CI_EXTERNAL_XO2_B) {
+ ddb_input_init(port, 2 * i - 1, 0, 2 * i - 1);
+ ddb_output_init(port, i);
+ continue;
+ }
+
+ if (port->class == DDB_PORT_NONE)
+ continue;
+
+ switch (dev->link[l].info->type) {
+ case DDB_OCTOPUS_CI:
+ if (i >= 2) {
+ ddb_input_init(port, 2 + i, 0, 2 + i);
+ ddb_input_init(port, 4 + i, 1, 4 + i);
+ ddb_output_init(port, i);
+ break;
+ } /* fallthrough */
+ case DDB_OCTOPUS:
+ ddb_input_init(port, 2 * i, 0, 2 * i);
+ ddb_input_init(port, 2 * i + 1, 1, 2 * i + 1);
+ ddb_output_init(port, i);
+ break;
+ case DDB_OCTOPUS_MAX:
+ case DDB_OCTOPUS_MAX_CT:
+ ddb_input_init(port, 2 * i, 0, 2 * p);
+ ddb_input_init(port, 2 * i + 1, 1, 2 * p + 1);
+ break;
+ default:
+ break;
+ }
+ }
}
+ dev->port_num = p;
}
-static void ddb_ports_release(struct ddb *dev)
+void ddb_ports_release(struct ddb *dev)
{
int i;
struct ddb_port *port;
- for (i = 0; i < dev->info->port_num; i++) {
+ for (i = 0; i < dev->port_num; i++) {
port = &dev->port[i];
- port->dev = dev;
- tasklet_kill(&port->input[0]->tasklet);
- tasklet_kill(&port->input[1]->tasklet);
- tasklet_kill(&port->output->tasklet);
+ if (port->input[0] && port->input[0]->dma)
+ cancel_work_sync(&port->input[0]->dma->work);
+ if (port->input[1] && port->input[1]->dma)
+ cancel_work_sync(&port->input[1]->dma->work);
+ if (port->output && port->output->dma)
+ cancel_work_sync(&port->output->dma->work);
}
}
@@ -1689,90 +2684,158 @@ static void ddb_ports_release(struct ddb *dev)
/****************************************************************************/
/****************************************************************************/
-static void irq_handle_i2c(struct ddb *dev, int n)
+#define IRQ_HANDLE(_nr) \
+ do { if ((s & (1UL << ((_nr) & 0x1f))) && dev->handler[0][_nr]) \
+ dev->handler[0][_nr](dev->handler_data[0][_nr]); } \
+ while (0)
+
+static void irq_handle_msg(struct ddb *dev, u32 s)
{
- struct ddb_i2c *i2c = &dev->i2c[n];
+ dev->i2c_irq++;
+ IRQ_HANDLE(0);
+ IRQ_HANDLE(1);
+ IRQ_HANDLE(2);
+ IRQ_HANDLE(3);
+}
- i2c->done = 1;
- wake_up(&i2c->wq);
+static void irq_handle_io(struct ddb *dev, u32 s)
+{
+ dev->ts_irq++;
+ if ((s & 0x000000f0)) {
+ IRQ_HANDLE(4);
+ IRQ_HANDLE(5);
+ IRQ_HANDLE(6);
+ IRQ_HANDLE(7);
+ }
+ if ((s & 0x0000ff00)) {
+ IRQ_HANDLE(8);
+ IRQ_HANDLE(9);
+ IRQ_HANDLE(10);
+ IRQ_HANDLE(11);
+ IRQ_HANDLE(12);
+ IRQ_HANDLE(13);
+ IRQ_HANDLE(14);
+ IRQ_HANDLE(15);
+ }
+ if ((s & 0x00ff0000)) {
+ IRQ_HANDLE(16);
+ IRQ_HANDLE(17);
+ IRQ_HANDLE(18);
+ IRQ_HANDLE(19);
+ IRQ_HANDLE(20);
+ IRQ_HANDLE(21);
+ IRQ_HANDLE(22);
+ IRQ_HANDLE(23);
+ }
+ if ((s & 0xff000000)) {
+ IRQ_HANDLE(24);
+ IRQ_HANDLE(25);
+ IRQ_HANDLE(26);
+ IRQ_HANDLE(27);
+ IRQ_HANDLE(28);
+ IRQ_HANDLE(29);
+ IRQ_HANDLE(30);
+ IRQ_HANDLE(31);
+ }
}
-static irqreturn_t irq_handler(int irq, void *dev_id)
+irqreturn_t ddb_irq_handler0(int irq, void *dev_id)
{
struct ddb *dev = (struct ddb *) dev_id;
- u32 s = ddbreadl(INTERRUPT_STATUS);
+ u32 s = ddbreadl(dev, INTERRUPT_STATUS);
- if (!s)
- return IRQ_NONE;
+ do {
+ if (s & 0x80000000)
+ return IRQ_NONE;
+ if (!(s & 0xfffff00))
+ return IRQ_NONE;
+ ddbwritel(dev, s & 0xfffff00, INTERRUPT_ACK);
+ irq_handle_io(dev, s);
+ } while ((s = ddbreadl(dev, INTERRUPT_STATUS)));
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t ddb_irq_handler1(int irq, void *dev_id)
+{
+ struct ddb *dev = (struct ddb *) dev_id;
+ u32 s = ddbreadl(dev, INTERRUPT_STATUS);
do {
- ddbwritel(s, INTERRUPT_ACK);
-
- if (s & 0x00000001)
- irq_handle_i2c(dev, 0);
- if (s & 0x00000002)
- irq_handle_i2c(dev, 1);
- if (s & 0x00000004)
- irq_handle_i2c(dev, 2);
- if (s & 0x00000008)
- irq_handle_i2c(dev, 3);
-
- if (s & 0x00000100)
- tasklet_schedule(&dev->input[0].tasklet);
- if (s & 0x00000200)
- tasklet_schedule(&dev->input[1].tasklet);
- if (s & 0x00000400)
- tasklet_schedule(&dev->input[2].tasklet);
- if (s & 0x00000800)
- tasklet_schedule(&dev->input[3].tasklet);
- if (s & 0x00001000)
- tasklet_schedule(&dev->input[4].tasklet);
- if (s & 0x00002000)
- tasklet_schedule(&dev->input[5].tasklet);
- if (s & 0x00004000)
- tasklet_schedule(&dev->input[6].tasklet);
- if (s & 0x00008000)
- tasklet_schedule(&dev->input[7].tasklet);
-
- if (s & 0x00010000)
- tasklet_schedule(&dev->output[0].tasklet);
- if (s & 0x00020000)
- tasklet_schedule(&dev->output[1].tasklet);
- if (s & 0x00040000)
- tasklet_schedule(&dev->output[2].tasklet);
- if (s & 0x00080000)
- tasklet_schedule(&dev->output[3].tasklet);
-
- /* if (s & 0x000f0000) printk(KERN_DEBUG "%08x\n", istat); */
- } while ((s = ddbreadl(INTERRUPT_STATUS)));
+ if (s & 0x80000000)
+ return IRQ_NONE;
+ if (!(s & 0x0000f))
+ return IRQ_NONE;
+ ddbwritel(dev, s & 0x0000f, INTERRUPT_ACK);
+ irq_handle_msg(dev, s);
+ } while ((s = ddbreadl(dev, INTERRUPT_STATUS)));
return IRQ_HANDLED;
}
-/******************************************************************************/
-/******************************************************************************/
-/******************************************************************************/
+irqreturn_t ddb_irq_handler(int irq, void *dev_id)
+{
+ struct ddb *dev = (struct ddb *) dev_id;
+ u32 s = ddbreadl(dev, INTERRUPT_STATUS);
+ int ret = IRQ_HANDLED;
+
+ if (!s)
+ return IRQ_NONE;
+ do {
+ if (s & 0x80000000)
+ return IRQ_NONE;
+ ddbwritel(dev, s, INTERRUPT_ACK);
+
+ if (s & 0x0000000f)
+ irq_handle_msg(dev, s);
+ if (s & 0x0fffff00)
+ irq_handle_io(dev, s);
+ } while ((s = ddbreadl(dev, INTERRUPT_STATUS)));
+
+ return ret;
+}
+
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
+
+static int reg_wait(struct ddb *dev, u32 reg, u32 bit)
+{
+ u32 count = 0;
-static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
+ while (safe_ddbreadl(dev, reg) & bit) {
+ ndelay(10);
+ if (++count == 100)
+ return -1;
+ }
+ return 0;
+}
+
+static int flashio(struct ddb *dev, u32 lnr, u8 *wbuf, u32 wlen, u8 *rbuf,
+ u32 rlen)
{
u32 data, shift;
+ u32 tag = DDB_LINK_TAG(lnr);
+ struct ddb_link *link = &dev->link[lnr];
+ mutex_lock(&link->flash_mutex);
if (wlen > 4)
- ddbwritel(1, SPI_CONTROL);
+ ddbwritel(dev, 1, tag | SPI_CONTROL);
while (wlen > 4) {
/* FIXME: check for big-endian */
- data = swab32(*(u32 *)wbuf);
+ data = swab32(*(u32 *) wbuf);
wbuf += 4;
wlen -= 4;
- ddbwritel(data, SPI_DATA);
- while (ddbreadl(SPI_CONTROL) & 0x0004)
- ;
+ ddbwritel(dev, data, tag | SPI_DATA);
+ if (reg_wait(dev, tag | SPI_CONTROL, 4))
+ goto fail;
}
-
if (rlen)
- ddbwritel(0x0001 | ((wlen << (8 + 3)) & 0x1f00), SPI_CONTROL);
+ ddbwritel(dev, 0x0001 | ((wlen << (8 + 3)) & 0x1f00),
+ tag | SPI_CONTROL);
else
- ddbwritel(0x0003 | ((wlen << (8 + 3)) & 0x1f00), SPI_CONTROL);
+ ddbwritel(dev, 0x0003 | ((wlen << (8 + 3)) & 0x1f00),
+ tag | SPI_CONTROL);
data = 0;
shift = ((4 - wlen) * 8);
@@ -1784,33 +2847,34 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
}
if (shift)
data <<= shift;
- ddbwritel(data, SPI_DATA);
- while (ddbreadl(SPI_CONTROL) & 0x0004)
- ;
+ ddbwritel(dev, data, tag | SPI_DATA);
+ if (reg_wait(dev, tag | SPI_CONTROL, 4))
+ goto fail;
if (!rlen) {
- ddbwritel(0, SPI_CONTROL);
- return 0;
+ ddbwritel(dev, 0, tag | SPI_CONTROL);
+ goto exit;
}
if (rlen > 4)
- ddbwritel(1, SPI_CONTROL);
+ ddbwritel(dev, 1, tag | SPI_CONTROL);
while (rlen > 4) {
- ddbwritel(0xffffffff, SPI_DATA);
- while (ddbreadl(SPI_CONTROL) & 0x0004)
- ;
- data = ddbreadl(SPI_DATA);
+ ddbwritel(dev, 0xffffffff, tag | SPI_DATA);
+ if (reg_wait(dev, tag | SPI_CONTROL, 4))
+ goto fail;
+ data = ddbreadl(dev, tag | SPI_DATA);
*(u32 *) rbuf = swab32(data);
rbuf += 4;
rlen -= 4;
}
- ddbwritel(0x0003 | ((rlen << (8 + 3)) & 0x1F00), SPI_CONTROL);
- ddbwritel(0xffffffff, SPI_DATA);
- while (ddbreadl(SPI_CONTROL) & 0x0004)
- ;
+ ddbwritel(dev, 0x0003 | ((rlen << (8 + 3)) & 0x1F00),
+ tag | SPI_CONTROL);
+ ddbwritel(dev, 0xffffffff, tag | SPI_DATA);
+ if (reg_wait(dev, tag | SPI_CONTROL, 4))
+ goto fail;
- data = ddbreadl(SPI_DATA);
- ddbwritel(0, SPI_CONTROL);
+ data = ddbreadl(dev, tag | SPI_DATA);
+ ddbwritel(dev, 0, tag | SPI_CONTROL);
if (rlen < 4)
data <<= ((4 - rlen) * 8);
@@ -1821,31 +2885,47 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
rbuf++;
rlen--;
}
+exit:
+ mutex_unlock(&link->flash_mutex);
return 0;
+fail:
+ mutex_unlock(&link->flash_mutex);
+ return -1;
}
-#define DDB_MAGIC 'd'
+int ddbridge_flashread(struct ddb *dev, u32 link, u8 *buf, u32 addr, u32 len)
+{
+ u8 cmd[4] = {0x03, (addr >> 16) & 0xff,
+ (addr >> 8) & 0xff, addr & 0xff};
-struct ddb_flashio {
- __user __u8 *write_buf;
- __u32 write_len;
- __user __u8 *read_buf;
- __u32 read_len;
-};
+ return flashio(dev, link, cmd, 4, buf, len);
+}
-#define IOCTL_DDB_FLASHIO _IOWR(DDB_MAGIC, 0x00, struct ddb_flashio)
+/*
+ * TODO/FIXME: add/implement IOCTLs from upstream driver
+ */
#define DDB_NAME "ddbridge"
static u32 ddb_num;
-static struct ddb *ddbs[32];
-static struct class *ddb_class;
static int ddb_major;
+static DEFINE_MUTEX(ddb_mutex);
+
+static int ddb_release(struct inode *inode, struct file *file)
+{
+ struct ddb *dev = file->private_data;
+
+ dev->ddb_dev_users--;
+ return 0;
+}
static int ddb_open(struct inode *inode, struct file *file)
{
struct ddb *dev = ddbs[iminor(inode)];
+ if (dev->ddb_dev_users)
+ return -EBUSY;
+ dev->ddb_dev_users++;
file->private_data = dev;
return 0;
}
@@ -1853,44 +2933,17 @@ static int ddb_open(struct inode *inode, struct file *file)
static long ddb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ddb *dev = file->private_data;
- __user void *parg = (__user void *)arg;
- int res;
-
- switch (cmd) {
- case IOCTL_DDB_FLASHIO:
- {
- struct ddb_flashio fio;
- u8 *rbuf, *wbuf;
-
- if (copy_from_user(&fio, parg, sizeof(fio)))
- return -EFAULT;
- if (fio.write_len > 1028 || fio.read_len > 1028)
- return -EINVAL;
- if (fio.write_len + fio.read_len > 1028)
- return -EINVAL;
+ dev_warn(dev->dev, "DDB IOCTLs unsupported (cmd: %d, arg: %lu)\n",
+ cmd, arg);
- wbuf = &dev->iobuf[0];
- rbuf = wbuf + fio.write_len;
-
- if (copy_from_user(wbuf, fio.write_buf, fio.write_len))
- return -EFAULT;
- res = flashio(dev, wbuf, fio.write_len, rbuf, fio.read_len);
- if (res)
- return res;
- if (copy_to_user(fio.read_buf, rbuf, fio.read_len))
- return -EFAULT;
- break;
- }
- default:
- return -ENOTTY;
- }
- return 0;
+ return -ENOTTY;
}
static const struct file_operations ddb_fops = {
.unlocked_ioctl = ddb_ioctl,
.open = ddb_open,
+ .release = ddb_release,
};
static char *ddb_devnode(struct device *device, umode_t *mode)
@@ -1900,369 +2953,684 @@ static char *ddb_devnode(struct device *device, umode_t *mode)
return kasprintf(GFP_KERNEL, "ddbridge/card%d", dev->nr);
}
-static int ddb_class_create(void)
+#define __ATTR_MRO(_name, _show) { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = _show, \
+}
+
+#define __ATTR_MWO(_name, _store) { \
+ .attr = { .name = __stringify(_name), .mode = 0222 }, \
+ .store = _store, \
+}
+
+static ssize_t ports_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
- ddb_major = register_chrdev(0, DDB_NAME, &ddb_fops);
- if (ddb_major < 0)
- return ddb_major;
+ struct ddb *dev = dev_get_drvdata(device);
- ddb_class = class_create(THIS_MODULE, DDB_NAME);
- if (IS_ERR(ddb_class)) {
- unregister_chrdev(ddb_major, DDB_NAME);
- return PTR_ERR(ddb_class);
- }
- ddb_class->devnode = ddb_devnode;
- return 0;
+ return sprintf(buf, "%d\n", dev->port_num);
}
-static void ddb_class_destroy(void)
+static ssize_t ts_irq_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
- class_destroy(ddb_class);
- unregister_chrdev(ddb_major, DDB_NAME);
+ struct ddb *dev = dev_get_drvdata(device);
+
+ return sprintf(buf, "%d\n", dev->ts_irq);
}
-static int ddb_device_create(struct ddb *dev)
+static ssize_t i2c_irq_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
- dev->nr = ddb_num++;
- dev->ddb_dev = device_create(ddb_class, NULL,
- MKDEV(ddb_major, dev->nr),
- dev, "ddbridge%d", dev->nr);
- ddbs[dev->nr] = dev;
- if (IS_ERR(dev->ddb_dev))
- return -1;
- return 0;
+ struct ddb *dev = dev_get_drvdata(device);
+
+ return sprintf(buf, "%d\n", dev->i2c_irq);
}
-static void ddb_device_destroy(struct ddb *dev)
+static ssize_t fan_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
- ddb_num--;
- if (IS_ERR(dev->ddb_dev))
- return;
- device_destroy(ddb_class, MKDEV(ddb_major, 0));
+ struct ddb *dev = dev_get_drvdata(device);
+ u32 val;
+
+ val = ddbreadl(dev, GPIO_OUTPUT) & 1;
+ return sprintf(buf, "%d\n", val);
}
+static ssize_t fan_store(struct device *device, struct device_attribute *d,
+ const char *buf, size_t count)
+{
+ struct ddb *dev = dev_get_drvdata(device);
+ u32 val;
-/****************************************************************************/
-/****************************************************************************/
-/****************************************************************************/
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+ ddbwritel(dev, 1, GPIO_DIRECTION);
+ ddbwritel(dev, val & 1, GPIO_OUTPUT);
+ return count;
+}
-static void ddb_unmap(struct ddb *dev)
+static ssize_t fanspeed_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
- if (dev->regs)
- iounmap(dev->regs);
- vfree(dev);
+ struct ddb *dev = dev_get_drvdata(device);
+ int num = attr->attr.name[8] - 0x30;
+ struct ddb_link *link = &dev->link[num];
+ u32 spd;
+
+ spd = ddblreadl(link, TEMPMON_FANCONTROL) & 0xff;
+ return sprintf(buf, "%u\n", spd * 100);
}
+static ssize_t temp_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ddb *dev = dev_get_drvdata(device);
+ struct ddb_link *link = &dev->link[0];
+ struct i2c_adapter *adap;
+ int temp, temp2;
+ u8 tmp[2];
+
+ if (!link->info->temp_num)
+ return sprintf(buf, "no sensor\n");
+ adap = &dev->i2c[link->info->temp_bus].adap;
+ if (i2c_read_regs(adap, 0x48, 0, tmp, 2) < 0)
+ return sprintf(buf, "read_error\n");
+ temp = (tmp[0] << 3) | (tmp[1] >> 5);
+ temp *= 125;
+ if (link->info->temp_num == 2) {
+ if (i2c_read_regs(adap, 0x49, 0, tmp, 2) < 0)
+ return sprintf(buf, "read_error\n");
+ temp2 = (tmp[0] << 3) | (tmp[1] >> 5);
+ temp2 *= 125;
+ return sprintf(buf, "%d %d\n", temp, temp2);
+ }
+ return sprintf(buf, "%d\n", temp);
+}
-static void ddb_remove(struct pci_dev *pdev)
+static ssize_t ctemp_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
- struct ddb *dev = pci_get_drvdata(pdev);
+ struct ddb *dev = dev_get_drvdata(device);
+ struct i2c_adapter *adap;
+ int temp;
+ u8 tmp[2];
+ int num = attr->attr.name[4] - 0x30;
- ddb_ports_detach(dev);
- ddb_i2c_release(dev);
+ adap = &dev->i2c[num].adap;
+ if (!adap)
+ return 0;
+ if (i2c_read_regs(adap, 0x49, 0, tmp, 2) < 0)
+ if (i2c_read_regs(adap, 0x4d, 0, tmp, 2) < 0)
+ return sprintf(buf, "no sensor\n");
+ temp = tmp[0] * 1000;
+ return sprintf(buf, "%d\n", temp);
+}
- ddbwritel(0, INTERRUPT_ENABLE);
- free_irq(dev->pdev->irq, dev);
-#ifdef CONFIG_PCI_MSI
- if (dev->msi)
- pci_disable_msi(dev->pdev);
-#endif
- ddb_ports_release(dev);
- ddb_buffers_free(dev);
- ddb_device_destroy(dev);
+static ssize_t led_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ddb *dev = dev_get_drvdata(device);
+ int num = attr->attr.name[3] - 0x30;
- ddb_unmap(dev);
- pci_set_drvdata(pdev, NULL);
- pci_disable_device(pdev);
+ return sprintf(buf, "%d\n", dev->leds & (1 << num) ? 1 : 0);
}
-static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static void ddb_set_led(struct ddb *dev, int num, int val)
{
- struct ddb *dev;
- int stat = 0;
- int irq_flag = IRQF_SHARED;
+ if (!dev->link[0].info->led_num)
+ return;
+ switch (dev->port[num].class) {
+ case DDB_PORT_TUNER:
+ switch (dev->port[num].type) {
+ case DDB_TUNER_DVBS_ST:
+ i2c_write_reg16(&dev->i2c[num].adap,
+ 0x69, 0xf14c, val ? 2 : 0);
+ break;
+ case DDB_TUNER_DVBCT_ST:
+ i2c_write_reg16(&dev->i2c[num].adap,
+ 0x1f, 0xf00e, 0);
+ i2c_write_reg16(&dev->i2c[num].adap,
+ 0x1f, 0xf00f, val ? 1 : 0);
+ break;
+ case DDB_TUNER_XO2 ... DDB_TUNER_DVBC2T2I_SONY:
+ {
+ u8 v;
- if (pci_enable_device(pdev) < 0)
- return -ENODEV;
+ i2c_read_reg(&dev->i2c[num].adap, 0x10, 0x08, &v);
+ v = (v & ~0x10) | (val ? 0x10 : 0);
+ i2c_write_reg(&dev->i2c[num].adap, 0x10, 0x08, v);
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+ }
+}
- dev = vzalloc(sizeof(struct ddb));
- if (dev == NULL)
- return -ENOMEM;
+static ssize_t led_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ddb *dev = dev_get_drvdata(device);
+ int num = attr->attr.name[3] - 0x30;
+ u32 val;
- dev->pdev = pdev;
- pci_set_drvdata(pdev, dev);
- dev->info = (struct ddb_info *) id->driver_data;
- printk(KERN_INFO "DDBridge driver detected: %s\n", dev->info->name);
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+ if (val)
+ dev->leds |= (1 << num);
+ else
+ dev->leds &= ~(1 << num);
+ ddb_set_led(dev, num, val);
+ return count;
+}
- dev->regs = ioremap(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
- if (!dev->regs) {
- stat = -ENOMEM;
- goto fail;
- }
- printk(KERN_INFO "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4));
+static ssize_t snr_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ddb *dev = dev_get_drvdata(device);
+ char snr[32];
+ int num = attr->attr.name[3] - 0x30;
-#ifdef CONFIG_PCI_MSI
- if (pci_msi_enabled())
- stat = pci_enable_msi(dev->pdev);
- if (stat) {
- printk(KERN_INFO ": MSI not available.\n");
+ if (dev->port[num].type >= DDB_TUNER_XO2) {
+ if (i2c_read_regs(&dev->i2c[num].adap, 0x10, 0x10, snr, 16) < 0)
+ return sprintf(buf, "NO SNR\n");
+ snr[16] = 0;
} else {
- irq_flag = 0;
- dev->msi = 1;
- }
-#endif
- stat = request_irq(dev->pdev->irq, irq_handler,
- irq_flag, "DDBridge", (void *) dev);
- if (stat < 0)
- goto fail1;
- ddbwritel(0, DMA_BASE_WRITE);
- ddbwritel(0, DMA_BASE_READ);
- ddbwritel(0xffffffff, INTERRUPT_ACK);
- ddbwritel(0xfff0f, INTERRUPT_ENABLE);
- ddbwritel(0, MSI1_ENABLE);
-
- /* board control */
- if (dev->info->board_control) {
- ddbwritel(0, DDB_LINK_TAG(0) | BOARD_CONTROL);
- msleep(100);
- ddbwritel(dev->info->board_control_2,
- DDB_LINK_TAG(0) | BOARD_CONTROL);
- usleep_range(2000, 3000);
- ddbwritel(dev->info->board_control_2
- | dev->info->board_control,
- DDB_LINK_TAG(0) | BOARD_CONTROL);
- usleep_range(2000, 3000);
+ /* serial number at 0x100-0x11f */
+ if (i2c_read_regs16(&dev->i2c[num].adap,
+ 0x57, 0x100, snr, 32) < 0)
+ if (i2c_read_regs16(&dev->i2c[num].adap,
+ 0x50, 0x100, snr, 32) < 0)
+ return sprintf(buf, "NO SNR\n");
+ snr[31] = 0; /* in case it is not terminated on EEPROM */
}
+ return sprintf(buf, "%s\n", snr);
+}
- if (ddb_i2c_init(dev) < 0)
- goto fail1;
- ddb_ports_init(dev);
- if (ddb_buffers_alloc(dev) < 0) {
- printk(KERN_INFO ": Could not allocate buffer memory\n");
- goto fail2;
- }
- if (ddb_ports_attach(dev) < 0)
- goto fail3;
- ddb_device_create(dev);
+static ssize_t bsnr_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ddb *dev = dev_get_drvdata(device);
+ char snr[16];
+
+ ddbridge_flashread(dev, 0, snr, 0x10, 15);
+ snr[15] = 0; /* in case it is not terminated on EEPROM */
+ return sprintf(buf, "%s\n", snr);
+}
+
+static ssize_t bpsnr_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ddb *dev = dev_get_drvdata(device);
+ unsigned char snr[32];
+
+ if (!dev->i2c_num)
+ return 0;
+
+ if (i2c_read_regs16(&dev->i2c[0].adap,
+ 0x50, 0x0000, snr, 32) < 0 ||
+ snr[0] == 0xff)
+ return sprintf(buf, "NO SNR\n");
+ snr[31] = 0; /* in case it is not terminated on EEPROM */
+ return sprintf(buf, "%s\n", snr);
+}
+
+static ssize_t redirect_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
return 0;
+}
-fail3:
- ddb_ports_detach(dev);
- printk(KERN_ERR "fail3\n");
- ddb_ports_release(dev);
-fail2:
- printk(KERN_ERR "fail2\n");
- ddb_buffers_free(dev);
-fail1:
- printk(KERN_ERR "fail1\n");
- if (dev->msi)
- pci_disable_msi(dev->pdev);
- if (stat == 0)
- free_irq(dev->pdev->irq, dev);
-fail:
- printk(KERN_ERR "fail\n");
- ddb_unmap(dev);
- pci_set_drvdata(pdev, NULL);
- pci_disable_device(pdev);
- return -1;
+static ssize_t redirect_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int i, p;
+ int res;
+
+ if (sscanf(buf, "%x %x\n", &i, &p) != 2)
+ return -EINVAL;
+ res = ddb_redirect(i, p);
+ if (res < 0)
+ return res;
+ dev_info(device, "redirect: %02x, %02x\n", i, p);
+ return count;
}
-/******************************************************************************/
-/******************************************************************************/
-/******************************************************************************/
+static ssize_t gap_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ddb *dev = dev_get_drvdata(device);
+ int num = attr->attr.name[3] - 0x30;
-static const struct ddb_info ddb_none = {
- .type = DDB_NONE,
- .name = "Digital Devices PCIe bridge",
-};
+ return sprintf(buf, "%d\n", dev->port[num].gap);
-static const struct ddb_info ddb_octopus = {
- .type = DDB_OCTOPUS,
- .name = "Digital Devices Octopus DVB adapter",
- .port_num = 4,
-};
+}
-static const struct ddb_info ddb_octopus_le = {
- .type = DDB_OCTOPUS,
- .name = "Digital Devices Octopus LE DVB adapter",
- .port_num = 2,
-};
+static ssize_t gap_store(struct device *device, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ddb *dev = dev_get_drvdata(device);
+ int num = attr->attr.name[3] - 0x30;
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+ if (val > 128)
+ return -EINVAL;
+ if (val == 128)
+ val = 0xffffffff;
+ dev->port[num].gap = val;
+ return count;
+}
+
+static ssize_t version_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ddb *dev = dev_get_drvdata(device);
+
+ return sprintf(buf, "%08x %08x\n",
+ dev->link[0].ids.hwid, dev->link[0].ids.regmapid);
+}
+
+static ssize_t hwid_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ddb *dev = dev_get_drvdata(device);
+
+ return sprintf(buf, "0x%08X\n", dev->link[0].ids.hwid);
+}
+
+static ssize_t regmap_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ddb *dev = dev_get_drvdata(device);
+
+ return sprintf(buf, "0x%08X\n", dev->link[0].ids.regmapid);
+}
+
+static ssize_t fmode_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ int num = attr->attr.name[5] - 0x30;
+ struct ddb *dev = dev_get_drvdata(device);
+
+ return sprintf(buf, "%u\n", dev->link[num].lnb.fmode);
+}
-static const struct ddb_info ddb_octopus_oem = {
- .type = DDB_OCTOPUS,
- .name = "Digital Devices Octopus OEM",
- .port_num = 4,
+static ssize_t devid_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ int num = attr->attr.name[5] - 0x30;
+ struct ddb *dev = dev_get_drvdata(device);
+
+ return sprintf(buf, "%08x\n", dev->link[num].ids.devid);
+}
+
+static ssize_t fmode_store(struct device *device, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ddb *dev = dev_get_drvdata(device);
+ int num = attr->attr.name[5] - 0x30;
+ unsigned int val;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+ if (val > 3)
+ return -EINVAL;
+ lnb_init_fmode(dev, &dev->link[num], val);
+ return count;
+}
+
+static struct device_attribute ddb_attrs[] = {
+ __ATTR_RO(version),
+ __ATTR_RO(ports),
+ __ATTR_RO(ts_irq),
+ __ATTR_RO(i2c_irq),
+ __ATTR(gap0, 0664, gap_show, gap_store),
+ __ATTR(gap1, 0664, gap_show, gap_store),
+ __ATTR(gap2, 0664, gap_show, gap_store),
+ __ATTR(gap3, 0664, gap_show, gap_store),
+ __ATTR(fmode0, 0664, fmode_show, fmode_store),
+ __ATTR(fmode1, 0664, fmode_show, fmode_store),
+ __ATTR(fmode2, 0664, fmode_show, fmode_store),
+ __ATTR(fmode3, 0664, fmode_show, fmode_store),
+ __ATTR_MRO(devid0, devid_show),
+ __ATTR_MRO(devid1, devid_show),
+ __ATTR_MRO(devid2, devid_show),
+ __ATTR_MRO(devid3, devid_show),
+ __ATTR_RO(hwid),
+ __ATTR_RO(regmap),
+ __ATTR(redirect, 0664, redirect_show, redirect_store),
+ __ATTR_MRO(snr, bsnr_show),
+ __ATTR_RO(bpsnr),
+ __ATTR_NULL,
};
-static const struct ddb_info ddb_octopus_mini = {
- .type = DDB_OCTOPUS,
- .name = "Digital Devices Octopus Mini",
- .port_num = 4,
+static struct device_attribute ddb_attrs_temp[] = {
+ __ATTR_RO(temp),
};
-static const struct ddb_info ddb_v6 = {
- .type = DDB_OCTOPUS,
- .name = "Digital Devices Cine S2 V6 DVB adapter",
- .port_num = 3,
+static struct device_attribute ddb_attrs_fan[] = {
+ __ATTR(fan, 0664, fan_show, fan_store),
};
-static const struct ddb_info ddb_v6_5 = {
- .type = DDB_OCTOPUS,
- .name = "Digital Devices Cine S2 V6.5 DVB adapter",
- .port_num = 4,
+
+static struct device_attribute ddb_attrs_snr[] = {
+ __ATTR_MRO(snr0, snr_show),
+ __ATTR_MRO(snr1, snr_show),
+ __ATTR_MRO(snr2, snr_show),
+ __ATTR_MRO(snr3, snr_show),
};
-static const struct ddb_info ddb_dvbct = {
- .type = DDB_OCTOPUS,
- .name = "Digital Devices DVBCT V6.1 DVB adapter",
- .port_num = 3,
+static struct device_attribute ddb_attrs_ctemp[] = {
+ __ATTR_MRO(temp0, ctemp_show),
+ __ATTR_MRO(temp1, ctemp_show),
+ __ATTR_MRO(temp2, ctemp_show),
+ __ATTR_MRO(temp3, ctemp_show),
};
-static const struct ddb_info ddb_ctv7 = {
- .type = DDB_OCTOPUS,
- .name = "Digital Devices Cine CT V7 DVB adapter",
- .port_num = 4,
- .board_control = 3,
- .board_control_2 = 4,
+static struct device_attribute ddb_attrs_led[] = {
+ __ATTR(led0, 0664, led_show, led_store),
+ __ATTR(led1, 0664, led_show, led_store),
+ __ATTR(led2, 0664, led_show, led_store),
+ __ATTR(led3, 0664, led_show, led_store),
};
-static const struct ddb_info ddb_satixS2v3 = {
- .type = DDB_OCTOPUS,
- .name = "Mystique SaTiX-S2 V3 DVB adapter",
- .port_num = 3,
+static struct device_attribute ddb_attrs_fanspeed[] = {
+ __ATTR_MRO(fanspeed0, fanspeed_show),
+ __ATTR_MRO(fanspeed1, fanspeed_show),
+ __ATTR_MRO(fanspeed2, fanspeed_show),
+ __ATTR_MRO(fanspeed3, fanspeed_show),
};
-static const struct ddb_info ddb_octopusv3 = {
- .type = DDB_OCTOPUS,
- .name = "Digital Devices Octopus V3 DVB adapter",
- .port_num = 4,
+static struct class ddb_class = {
+ .name = "ddbridge",
+ .owner = THIS_MODULE,
+ .devnode = ddb_devnode,
};
-/*** MaxA8 adapters ***********************************************************/
+int ddb_class_create(void)
+{
+ ddb_major = register_chrdev(0, DDB_NAME, &ddb_fops);
+ if (ddb_major < 0)
+ return ddb_major;
+ if (class_register(&ddb_class) < 0)
+ return -1;
+ return 0;
+}
-static struct ddb_info ddb_ct2_8 = {
- .type = DDB_OCTOPUS_MAX_CT,
- .name = "Digital Devices MAX A8 CT2",
- .port_num = 4,
- .board_control = 0x0ff,
- .board_control_2 = 0xf00,
- .ts_quirks = TS_QUIRK_SERIAL,
-};
+void ddb_class_destroy(void)
+{
+ class_unregister(&ddb_class);
+ unregister_chrdev(ddb_major, DDB_NAME);
+}
-static struct ddb_info ddb_c2t2_8 = {
- .type = DDB_OCTOPUS_MAX_CT,
- .name = "Digital Devices MAX A8 C2T2",
- .port_num = 4,
- .board_control = 0x0ff,
- .board_control_2 = 0xf00,
- .ts_quirks = TS_QUIRK_SERIAL,
-};
+static void ddb_device_attrs_del(struct ddb *dev)
+{
+ int i;
-static struct ddb_info ddb_isdbt_8 = {
- .type = DDB_OCTOPUS_MAX_CT,
- .name = "Digital Devices MAX A8 ISDBT",
- .port_num = 4,
- .board_control = 0x0ff,
- .board_control_2 = 0xf00,
- .ts_quirks = TS_QUIRK_SERIAL,
-};
+ for (i = 0; i < 4; i++)
+ if (dev->link[i].info && dev->link[i].info->tempmon_irq)
+ device_remove_file(dev->ddb_dev,
+ &ddb_attrs_fanspeed[i]);
+ for (i = 0; i < dev->link[0].info->temp_num; i++)
+ device_remove_file(dev->ddb_dev, &ddb_attrs_temp[i]);
+ for (i = 0; i < dev->link[0].info->fan_num; i++)
+ device_remove_file(dev->ddb_dev, &ddb_attrs_fan[i]);
+ for (i = 0; i < dev->i2c_num && i < 4; i++) {
+ if (dev->link[0].info->led_num)
+ device_remove_file(dev->ddb_dev, &ddb_attrs_led[i]);
+ device_remove_file(dev->ddb_dev, &ddb_attrs_snr[i]);
+ device_remove_file(dev->ddb_dev, &ddb_attrs_ctemp[i]);
+ }
+ for (i = 0; ddb_attrs[i].attr.name != NULL; i++)
+ device_remove_file(dev->ddb_dev, &ddb_attrs[i]);
+}
-static struct ddb_info ddb_c2t2i_v0_8 = {
- .type = DDB_OCTOPUS_MAX_CT,
- .name = "Digital Devices MAX A8 C2T2I V0",
- .port_num = 4,
- .board_control = 0x0ff,
- .board_control_2 = 0xf00,
- .ts_quirks = TS_QUIRK_SERIAL | TS_QUIRK_ALT_OSC,
-};
+static int ddb_device_attrs_add(struct ddb *dev)
+{
+ int i;
-static struct ddb_info ddb_c2t2i_8 = {
- .type = DDB_OCTOPUS_MAX_CT,
- .name = "Digital Devices MAX A8 C2T2I",
- .port_num = 4,
- .board_control = 0x0ff,
- .board_control_2 = 0xf00,
- .ts_quirks = TS_QUIRK_SERIAL,
-};
+ for (i = 0; ddb_attrs[i].attr.name != NULL; i++)
+ if (device_create_file(dev->ddb_dev, &ddb_attrs[i]))
+ goto fail;
+ for (i = 0; i < dev->link[0].info->temp_num; i++)
+ if (device_create_file(dev->ddb_dev, &ddb_attrs_temp[i]))
+ goto fail;
+ for (i = 0; i < dev->link[0].info->fan_num; i++)
+ if (device_create_file(dev->ddb_dev, &ddb_attrs_fan[i]))
+ goto fail;
+ for (i = 0; (i < dev->i2c_num) && (i < 4); i++) {
+ if (device_create_file(dev->ddb_dev, &ddb_attrs_snr[i]))
+ goto fail;
+ if (device_create_file(dev->ddb_dev, &ddb_attrs_ctemp[i]))
+ goto fail;
+ if (dev->link[0].info->led_num)
+ if (device_create_file(dev->ddb_dev,
+ &ddb_attrs_led[i]))
+ goto fail;
+ }
+ for (i = 0; i < 4; i++)
+ if (dev->link[i].info && dev->link[i].info->tempmon_irq)
+ if (device_create_file(dev->ddb_dev,
+ &ddb_attrs_fanspeed[i]))
+ goto fail;
+ return 0;
+fail:
+ return -1;
+}
-/******************************************************************************/
+int ddb_device_create(struct ddb *dev)
+{
+ int res = 0;
-#define DDVID 0xdd01 /* Digital Devices Vendor ID */
-
-#define DDB_ID(_vend, _dev, _subvend, _subdev, _driverdata) { \
- .vendor = _vend, .device = _dev, \
- .subvendor = _subvend, .subdevice = _subdev, \
- .driver_data = (unsigned long)&_driverdata }
-
-static const struct pci_device_id ddb_id_tbl[] = {
- DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
- DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
- DDB_ID(DDVID, 0x0005, DDVID, 0x0004, ddb_octopusv3),
- DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
- DDB_ID(DDVID, 0x0003, DDVID, 0x0003, ddb_octopus_oem),
- DDB_ID(DDVID, 0x0003, DDVID, 0x0010, ddb_octopus_mini),
- DDB_ID(DDVID, 0x0005, DDVID, 0x0011, ddb_octopus_mini),
- DDB_ID(DDVID, 0x0003, DDVID, 0x0020, ddb_v6),
- DDB_ID(DDVID, 0x0003, DDVID, 0x0021, ddb_v6_5),
- DDB_ID(DDVID, 0x0003, DDVID, 0x0030, ddb_dvbct),
- DDB_ID(DDVID, 0x0003, DDVID, 0xdb03, ddb_satixS2v3),
- DDB_ID(DDVID, 0x0006, DDVID, 0x0031, ddb_ctv7),
- DDB_ID(DDVID, 0x0006, DDVID, 0x0032, ddb_ctv7),
- DDB_ID(DDVID, 0x0006, DDVID, 0x0033, ddb_ctv7),
- DDB_ID(DDVID, 0x0008, DDVID, 0x0034, ddb_ct2_8),
- DDB_ID(DDVID, 0x0008, DDVID, 0x0035, ddb_c2t2_8),
- DDB_ID(DDVID, 0x0008, DDVID, 0x0036, ddb_isdbt_8),
- DDB_ID(DDVID, 0x0008, DDVID, 0x0037, ddb_c2t2i_v0_8),
- DDB_ID(DDVID, 0x0008, DDVID, 0x0038, ddb_c2t2i_8),
- DDB_ID(DDVID, 0x0006, DDVID, 0x0039, ddb_ctv7),
- /* in case sub-ids got deleted in flash */
- DDB_ID(DDVID, 0x0003, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
- DDB_ID(DDVID, 0x0005, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
- DDB_ID(DDVID, 0x0006, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
- DDB_ID(DDVID, 0x0007, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
- DDB_ID(DDVID, 0x0008, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
- DDB_ID(DDVID, 0x0011, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
- DDB_ID(DDVID, 0x0013, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
- DDB_ID(DDVID, 0x0201, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
- DDB_ID(DDVID, 0x0320, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
- {0}
-};
-MODULE_DEVICE_TABLE(pci, ddb_id_tbl);
+ if (ddb_num == DDB_MAX_ADAPTER)
+ return -ENOMEM;
+ mutex_lock(&ddb_mutex);
+ dev->nr = ddb_num;
+ ddbs[dev->nr] = dev;
+ dev->ddb_dev = device_create(&ddb_class, dev->dev,
+ MKDEV(ddb_major, dev->nr),
+ dev, "ddbridge%d", dev->nr);
+ if (IS_ERR(dev->ddb_dev)) {
+ res = PTR_ERR(dev->ddb_dev);
+ dev_info(dev->dev, "Could not create ddbridge%d\n", dev->nr);
+ goto fail;
+ }
+ res = ddb_device_attrs_add(dev);
+ if (res) {
+ ddb_device_attrs_del(dev);
+ device_destroy(&ddb_class, MKDEV(ddb_major, dev->nr));
+ ddbs[dev->nr] = NULL;
+ dev->ddb_dev = ERR_PTR(-ENODEV);
+ } else
+ ddb_num++;
+fail:
+ mutex_unlock(&ddb_mutex);
+ return res;
+}
+void ddb_device_destroy(struct ddb *dev)
+{
+ if (IS_ERR(dev->ddb_dev))
+ return;
+ ddb_device_attrs_del(dev);
+ device_destroy(&ddb_class, MKDEV(ddb_major, dev->nr));
+}
-static struct pci_driver ddb_pci_driver = {
- .name = "DDBridge",
- .id_table = ddb_id_tbl,
- .probe = ddb_probe,
- .remove = ddb_remove,
-};
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
-static __init int module_init_ddbridge(void)
+static void tempmon_setfan(struct ddb_link *link)
{
- int ret;
+ u32 temp, temp2, pwm;
+
+ if ((ddblreadl(link, TEMPMON_CONTROL) &
+ TEMPMON_CONTROL_OVERTEMP) != 0) {
+ dev_info(link->dev->dev, "Over temperature condition\n");
+ link->overtemperature_error = 1;
+ }
+ temp = (ddblreadl(link, TEMPMON_SENSOR0) >> 8) & 0xFF;
+ if (temp & 0x80)
+ temp = 0;
+ temp2 = (ddblreadl(link, TEMPMON_SENSOR1) >> 8) & 0xFF;
+ if (temp2 & 0x80)
+ temp2 = 0;
+ if (temp2 > temp)
+ temp = temp2;
+
+ pwm = (ddblreadl(link, TEMPMON_FANCONTROL) >> 8) & 0x0F;
+ if (pwm > 10)
+ pwm = 10;
+
+ if (temp >= link->temp_tab[pwm]) {
+ while (pwm < 10 && temp >= link->temp_tab[pwm + 1])
+ pwm += 1;
+ } else {
+ while (pwm > 1 && temp < link->temp_tab[pwm - 2])
+ pwm -= 1;
+ }
+ ddblwritel(link, (pwm << 8), TEMPMON_FANCONTROL);
+}
- printk(KERN_INFO "Digital Devices PCIE bridge driver, Copyright (C) 2010-11 Digital Devices GmbH\n");
+static void temp_handler(unsigned long data)
+{
+ struct ddb_link *link = (struct ddb_link *) data;
- ret = ddb_class_create();
- if (ret < 0)
- return ret;
- ret = pci_register_driver(&ddb_pci_driver);
- if (ret < 0)
- ddb_class_destroy();
- return ret;
+ spin_lock(&link->temp_lock);
+ tempmon_setfan(link);
+ spin_unlock(&link->temp_lock);
+}
+
+static int tempmon_init(struct ddb_link *link, int first_time)
+{
+ struct ddb *dev = link->dev;
+ int status = 0;
+ u32 l = link->nr;
+
+ spin_lock_irq(&link->temp_lock);
+ if (first_time) {
+ static u8 temperature_table[11] = {
+ 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80 };
+
+ memcpy(link->temp_tab, temperature_table,
+ sizeof(temperature_table));
+ }
+ dev->handler[l][link->info->tempmon_irq] = temp_handler;
+ dev->handler_data[l][link->info->tempmon_irq] = (unsigned long) link;
+ ddblwritel(link, (TEMPMON_CONTROL_OVERTEMP | TEMPMON_CONTROL_AUTOSCAN |
+ TEMPMON_CONTROL_INTENABLE),
+ TEMPMON_CONTROL);
+ ddblwritel(link, (3 << 8), TEMPMON_FANCONTROL);
+
+ link->overtemperature_error =
+ ((ddblreadl(link, TEMPMON_CONTROL) &
+ TEMPMON_CONTROL_OVERTEMP) != 0);
+ if (link->overtemperature_error) {
+ dev_info(link->dev->dev, "Over temperature condition\n");
+ status = -1;
+ }
+ tempmon_setfan(link);
+ spin_unlock_irq(&link->temp_lock);
+ return status;
}
-static __exit void module_exit_ddbridge(void)
+static int ddb_init_tempmon(struct ddb_link *link)
{
- pci_unregister_driver(&ddb_pci_driver);
- ddb_class_destroy();
+ const struct ddb_info *info = link->info;
+
+ if (!info->tempmon_irq)
+ return 0;
+ if (info->type == DDB_OCTOPUS_MAX_CT)
+ if (link->ids.regmapid < 0x00010002)
+ return 0;
+ spin_lock_init(&link->temp_lock);
+ dev_dbg(link->dev->dev, "init_tempmon\n");
+ return tempmon_init(link, 1);
+}
+
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
+
+static int ddb_init_boards(struct ddb *dev)
+{
+ const struct ddb_info *info;
+ struct ddb_link *link;
+ u32 l;
+
+ for (l = 0; l < DDB_MAX_LINK; l++) {
+ link = &dev->link[l];
+ info = link->info;
+
+ if (!info)
+ continue;
+ if (info->board_control) {
+ ddbwritel(dev, 0, DDB_LINK_TAG(l) | BOARD_CONTROL);
+ msleep(100);
+ ddbwritel(dev, info->board_control_2,
+ DDB_LINK_TAG(l) | BOARD_CONTROL);
+ usleep_range(2000, 3000);
+ ddbwritel(dev,
+ info->board_control_2 | info->board_control,
+ DDB_LINK_TAG(l) | BOARD_CONTROL);
+ usleep_range(2000, 3000);
+ }
+ ddb_init_tempmon(link);
+ }
+ return 0;
}
-module_init(module_init_ddbridge);
-module_exit(module_exit_ddbridge);
+int ddb_init(struct ddb *dev)
+{
+ mutex_init(&dev->link[0].lnb.lock);
+ mutex_init(&dev->link[0].flash_mutex);
+ if (no_init) {
+ ddb_device_create(dev);
+ return 0;
+ }
+
+ ddb_init_boards(dev);
+
+ if (ddb_i2c_init(dev) < 0)
+ goto fail;
+ ddb_ports_init(dev);
+ if (ddb_buffers_alloc(dev) < 0) {
+ dev_info(dev->dev, "Could not allocate buffer memory\n");
+ goto fail2;
+ }
+ if (ddb_ports_attach(dev) < 0)
+ goto fail3;
+
+ ddb_device_create(dev);
+
+ if (dev->link[0].info->fan_num) {
+ ddbwritel(dev, 1, GPIO_DIRECTION);
+ ddbwritel(dev, 1, GPIO_OUTPUT);
+ }
+ return 0;
+
+fail3:
+ ddb_ports_detach(dev);
+ dev_err(dev->dev, "fail3\n");
+ ddb_ports_release(dev);
+fail2:
+ dev_err(dev->dev, "fail2\n");
+ ddb_buffers_free(dev);
+ ddb_i2c_release(dev);
+fail:
+ dev_err(dev->dev, "fail1\n");
+ return -1;
+}
-MODULE_DESCRIPTION("Digital Devices PCIe Bridge");
-MODULE_AUTHOR("Ralph Metzler");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("0.5");
+void ddb_unmap(struct ddb *dev)
+{
+ if (dev->regs)
+ iounmap(dev->regs);
+ vfree(dev);
+}
diff --git a/drivers/media/pci/ddbridge/ddbridge-hw.c b/drivers/media/pci/ddbridge/ddbridge-hw.c
new file mode 100644
index 000000000000..48248bcd59c2
--- /dev/null
+++ b/drivers/media/pci/ddbridge/ddbridge-hw.c
@@ -0,0 +1,376 @@
+/*
+ * ddbridge-hw.c: Digital Devices bridge hardware maps
+ *
+ * Copyright (C) 2010-2017 Digital Devices GmbH
+ * Ralph Metzler <rjkm@metzlerbros.de>
+ * Marcus Metzler <mocm@metzlerbros.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "ddbridge.h"
+#include "ddbridge-hw.h"
+
+/******************************************************************************/
+
+static const struct ddb_regset octopus_input = {
+ .base = 0x200,
+ .num = 0x08,
+ .size = 0x10,
+};
+
+static const struct ddb_regset octopus_output = {
+ .base = 0x280,
+ .num = 0x08,
+ .size = 0x10,
+};
+
+static const struct ddb_regset octopus_idma = {
+ .base = 0x300,
+ .num = 0x08,
+ .size = 0x10,
+};
+
+static const struct ddb_regset octopus_idma_buf = {
+ .base = 0x2000,
+ .num = 0x08,
+ .size = 0x100,
+};
+
+static const struct ddb_regset octopus_odma = {
+ .base = 0x380,
+ .num = 0x04,
+ .size = 0x10,
+};
+
+static const struct ddb_regset octopus_odma_buf = {
+ .base = 0x2800,
+ .num = 0x04,
+ .size = 0x100,
+};
+
+static const struct ddb_regset octopus_i2c = {
+ .base = 0x80,
+ .num = 0x04,
+ .size = 0x20,
+};
+
+static const struct ddb_regset octopus_i2c_buf = {
+ .base = 0x1000,
+ .num = 0x04,
+ .size = 0x200,
+};
+
+/****************************************************************************/
+
+static const struct ddb_regmap octopus_map = {
+ .irq_base_i2c = 0,
+ .irq_base_idma = 8,
+ .irq_base_odma = 16,
+ .i2c = &octopus_i2c,
+ .i2c_buf = &octopus_i2c_buf,
+ .idma = &octopus_idma,
+ .idma_buf = &octopus_idma_buf,
+ .odma = &octopus_odma,
+ .odma_buf = &octopus_odma_buf,
+ .input = &octopus_input,
+ .output = &octopus_output,
+};
+
+/****************************************************************************/
+
+static const struct ddb_info ddb_none = {
+ .type = DDB_NONE,
+ .name = "unknown Digital Devices PCIe card, install newer driver",
+ .regmap = &octopus_map,
+};
+
+static const struct ddb_info ddb_octopus = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Octopus DVB adapter",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x0f,
+};
+
+static const struct ddb_info ddb_octopusv3 = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Octopus V3 DVB adapter",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x0f,
+};
+
+static const struct ddb_info ddb_octopus_le = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Octopus LE DVB adapter",
+ .regmap = &octopus_map,
+ .port_num = 2,
+ .i2c_mask = 0x03,
+};
+
+static const struct ddb_info ddb_octopus_oem = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Octopus OEM",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x0f,
+ .led_num = 1,
+ .fan_num = 1,
+ .temp_num = 1,
+ .temp_bus = 0,
+};
+
+static const struct ddb_info ddb_octopus_mini = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Octopus Mini",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x0f,
+};
+
+static const struct ddb_info ddb_v6 = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Cine S2 V6 DVB adapter",
+ .regmap = &octopus_map,
+ .port_num = 3,
+ .i2c_mask = 0x07,
+};
+
+static const struct ddb_info ddb_v6_5 = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Cine S2 V6.5 DVB adapter",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x0f,
+};
+
+static const struct ddb_info ddb_v7 = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Cine S2 V7 DVB adapter",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x0f,
+ .board_control = 2,
+ .board_control_2 = 4,
+ .ts_quirks = TS_QUIRK_REVERSED,
+};
+
+static const struct ddb_info ddb_v7a = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Cine S2 V7 Advanced DVB adapter",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x0f,
+ .board_control = 2,
+ .board_control_2 = 4,
+ .ts_quirks = TS_QUIRK_REVERSED,
+};
+
+static const struct ddb_info ddb_ctv7 = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Cine CT V7 DVB adapter",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x0f,
+ .board_control = 3,
+ .board_control_2 = 4,
+};
+
+static const struct ddb_info ddb_satixS2v3 = {
+ .type = DDB_OCTOPUS,
+ .name = "Mystique SaTiX-S2 V3 DVB adapter",
+ .regmap = &octopus_map,
+ .port_num = 3,
+ .i2c_mask = 0x07,
+};
+
+static const struct ddb_info ddb_ci = {
+ .type = DDB_OCTOPUS_CI,
+ .name = "Digital Devices Octopus CI",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x03,
+};
+
+static const struct ddb_info ddb_cis = {
+ .type = DDB_OCTOPUS_CI,
+ .name = "Digital Devices Octopus CI single",
+ .regmap = &octopus_map,
+ .port_num = 3,
+ .i2c_mask = 0x03,
+};
+
+static const struct ddb_info ddb_ci_s2_pro = {
+ .type = DDB_OCTOPUS_CI,
+ .name = "Digital Devices Octopus CI S2 Pro",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x01,
+ .board_control = 2,
+ .board_control_2 = 4,
+};
+
+static const struct ddb_info ddb_ci_s2_pro_a = {
+ .type = DDB_OCTOPUS_CI,
+ .name = "Digital Devices Octopus CI S2 Pro Advanced",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x01,
+ .board_control = 2,
+ .board_control_2 = 4,
+};
+
+static const struct ddb_info ddb_dvbct = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices DVBCT V6.1 DVB adapter",
+ .regmap = &octopus_map,
+ .port_num = 3,
+ .i2c_mask = 0x07,
+};
+
+/****************************************************************************/
+
+static const struct ddb_info ddb_ct2_8 = {
+ .type = DDB_OCTOPUS_MAX_CT,
+ .name = "Digital Devices MAX A8 CT2",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x0f,
+ .board_control = 0x0ff,
+ .board_control_2 = 0xf00,
+ .ts_quirks = TS_QUIRK_SERIAL,
+ .tempmon_irq = 24,
+};
+
+static const struct ddb_info ddb_c2t2_8 = {
+ .type = DDB_OCTOPUS_MAX_CT,
+ .name = "Digital Devices MAX A8 C2T2",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x0f,
+ .board_control = 0x0ff,
+ .board_control_2 = 0xf00,
+ .ts_quirks = TS_QUIRK_SERIAL,
+ .tempmon_irq = 24,
+};
+
+static const struct ddb_info ddb_isdbt_8 = {
+ .type = DDB_OCTOPUS_MAX_CT,
+ .name = "Digital Devices MAX A8 ISDBT",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x0f,
+ .board_control = 0x0ff,
+ .board_control_2 = 0xf00,
+ .ts_quirks = TS_QUIRK_SERIAL,
+ .tempmon_irq = 24,
+};
+
+static const struct ddb_info ddb_c2t2i_v0_8 = {
+ .type = DDB_OCTOPUS_MAX_CT,
+ .name = "Digital Devices MAX A8 C2T2I V0",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x0f,
+ .board_control = 0x0ff,
+ .board_control_2 = 0xf00,
+ .ts_quirks = TS_QUIRK_SERIAL | TS_QUIRK_ALT_OSC,
+ .tempmon_irq = 24,
+};
+
+static const struct ddb_info ddb_c2t2i_8 = {
+ .type = DDB_OCTOPUS_MAX_CT,
+ .name = "Digital Devices MAX A8 C2T2I",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x0f,
+ .board_control = 0x0ff,
+ .board_control_2 = 0xf00,
+ .ts_quirks = TS_QUIRK_SERIAL,
+ .tempmon_irq = 24,
+};
+
+/****************************************************************************/
+
+static const struct ddb_info ddb_s2_48 = {
+ .type = DDB_OCTOPUS_MAX,
+ .name = "Digital Devices MAX S8 4/8",
+ .regmap = &octopus_map,
+ .port_num = 4,
+ .i2c_mask = 0x01,
+ .board_control = 1,
+ .tempmon_irq = 24,
+};
+
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
+
+#define DDB_DEVID(_device, _subdevice, _info) { \
+ .vendor = DDVID, \
+ .device = _device, \
+ .subvendor = DDVID, \
+ .subdevice = _subdevice, \
+ .info = &_info }
+
+static const struct ddb_device_id ddb_device_ids[] = {
+ /* PCIe devices */
+ DDB_DEVID(0x0002, 0x0001, ddb_octopus),
+ DDB_DEVID(0x0003, 0x0001, ddb_octopus),
+ DDB_DEVID(0x0005, 0x0004, ddb_octopusv3),
+ DDB_DEVID(0x0003, 0x0002, ddb_octopus_le),
+ DDB_DEVID(0x0003, 0x0003, ddb_octopus_oem),
+ DDB_DEVID(0x0003, 0x0010, ddb_octopus_mini),
+ DDB_DEVID(0x0005, 0x0011, ddb_octopus_mini),
+ DDB_DEVID(0x0003, 0x0020, ddb_v6),
+ DDB_DEVID(0x0003, 0x0021, ddb_v6_5),
+ DDB_DEVID(0x0006, 0x0022, ddb_v7),
+ DDB_DEVID(0x0006, 0x0024, ddb_v7a),
+ DDB_DEVID(0x0003, 0x0030, ddb_dvbct),
+ DDB_DEVID(0x0003, 0xdb03, ddb_satixS2v3),
+ DDB_DEVID(0x0006, 0x0031, ddb_ctv7),
+ DDB_DEVID(0x0006, 0x0032, ddb_ctv7),
+ DDB_DEVID(0x0006, 0x0033, ddb_ctv7),
+ DDB_DEVID(0x0007, 0x0023, ddb_s2_48),
+ DDB_DEVID(0x0008, 0x0034, ddb_ct2_8),
+ DDB_DEVID(0x0008, 0x0035, ddb_c2t2_8),
+ DDB_DEVID(0x0008, 0x0036, ddb_isdbt_8),
+ DDB_DEVID(0x0008, 0x0037, ddb_c2t2i_v0_8),
+ DDB_DEVID(0x0008, 0x0038, ddb_c2t2i_8),
+ DDB_DEVID(0x0006, 0x0039, ddb_ctv7),
+ DDB_DEVID(0x0011, 0x0040, ddb_ci),
+ DDB_DEVID(0x0011, 0x0041, ddb_cis),
+ DDB_DEVID(0x0012, 0x0042, ddb_ci),
+ DDB_DEVID(0x0013, 0x0043, ddb_ci_s2_pro),
+ DDB_DEVID(0x0013, 0x0044, ddb_ci_s2_pro_a),
+};
+
+/****************************************************************************/
+
+const struct ddb_info *get_ddb_info(u16 vendor, u16 device,
+ u16 subvendor, u16 subdevice)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ddb_device_ids); i++) {
+ const struct ddb_device_id *id = &ddb_device_ids[i];
+
+ if (vendor == id->vendor &&
+ device == id->device &&
+ subvendor == id->subvendor &&
+ ((subdevice == id->subdevice) ||
+ (id->subdevice == 0xffff)))
+ return id->info;
+ }
+
+ return &ddb_none;
+}
diff --git a/drivers/media/pci/ddbridge/ddbridge-hw.h b/drivers/media/pci/ddbridge/ddbridge-hw.h
new file mode 100644
index 000000000000..7c142419419c
--- /dev/null
+++ b/drivers/media/pci/ddbridge/ddbridge-hw.h
@@ -0,0 +1,43 @@
+/*
+ * ddbridge-hw.h: Digital Devices bridge hardware maps
+ *
+ * Copyright (C) 2010-2017 Digital Devices GmbH
+ * Ralph Metzler <rjkm@metzlerbros.de>
+ * Marcus Metzler <mocm@metzlerbros.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DDBRIDGE_HW_H_
+#define _DDBRIDGE_HW_H_
+
+#include "ddbridge.h"
+
+/******************************************************************************/
+
+#define DDVID 0xdd01 /* Digital Devices Vendor ID */
+
+/******************************************************************************/
+
+struct ddb_device_id {
+ u16 vendor;
+ u16 device;
+ u16 subvendor;
+ u16 subdevice;
+ const struct ddb_info *info;
+};
+
+/******************************************************************************/
+
+const struct ddb_info *get_ddb_info(u16 vendor, u16 device,
+ u16 subvendor, u16 subdevice);
+
+#endif /* _DDBRIDGE_HW_H */
diff --git a/drivers/media/pci/ddbridge/ddbridge-i2c.c b/drivers/media/pci/ddbridge/ddbridge-i2c.c
new file mode 100644
index 000000000000..e4d39c3270ae
--- /dev/null
+++ b/drivers/media/pci/ddbridge/ddbridge-i2c.c
@@ -0,0 +1,230 @@
+/*
+ * ddbridge-i2c.c: Digital Devices bridge i2c driver
+ *
+ * Copyright (C) 2010-2017 Digital Devices GmbH
+ * Ralph Metzler <rjkm@metzlerbros.de>
+ * Marcus Metzler <mocm@metzlerbros.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/timer.h>
+#include <linux/i2c.h>
+#include <linux/swab.h>
+#include <linux/vmalloc.h>
+
+#include "ddbridge.h"
+#include "ddbridge-i2c.h"
+#include "ddbridge-regs.h"
+#include "ddbridge-io.h"
+
+/******************************************************************************/
+
+static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd)
+{
+ struct ddb *dev = i2c->dev;
+ unsigned long stat;
+ u32 val;
+
+ ddbwritel(dev, (adr << 9) | cmd, i2c->regs + I2C_COMMAND);
+ stat = wait_for_completion_timeout(&i2c->completion, HZ);
+ val = ddbreadl(dev, i2c->regs + I2C_COMMAND);
+ if (stat == 0) {
+ dev_err(dev->dev, "I2C timeout, card %d, port %d, link %u\n",
+ dev->nr, i2c->nr, i2c->link);
+ {
+ u32 istat = ddbreadl(dev, INTERRUPT_STATUS);
+
+ dev_err(dev->dev, "DDBridge IRS %08x\n", istat);
+ if (i2c->link) {
+ u32 listat = ddbreadl(dev,
+ DDB_LINK_TAG(i2c->link) |
+ INTERRUPT_STATUS);
+
+ dev_err(dev->dev, "DDBridge link %u IRS %08x\n",
+ i2c->link, listat);
+ }
+ if (istat & 1) {
+ ddbwritel(dev, istat & 1, INTERRUPT_ACK);
+ } else {
+ u32 mon = ddbreadl(dev,
+ i2c->regs + I2C_MONITOR);
+
+ dev_err(dev->dev, "I2C cmd=%08x mon=%08x\n",
+ val, mon);
+ }
+ }
+ return -EIO;
+ }
+ if (val & 0x70000)
+ return -EIO;
+ return 0;
+}
+
+static int ddb_i2c_master_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg msg[], int num)
+{
+ struct ddb_i2c *i2c = (struct ddb_i2c *) i2c_get_adapdata(adapter);
+ struct ddb *dev = i2c->dev;
+ u8 addr = 0;
+
+ addr = msg[0].addr;
+ if (msg[0].len > i2c->bsize)
+ return -EIO;
+ switch (num) {
+ case 1:
+ if (msg[0].flags & I2C_M_RD) {
+ ddbwritel(dev, msg[0].len << 16,
+ i2c->regs + I2C_TASKLENGTH);
+ if (ddb_i2c_cmd(i2c, addr, 3))
+ break;
+ ddbcpyfrom(dev, msg[0].buf,
+ i2c->rbuf, msg[0].len);
+ return num;
+ }
+ ddbcpyto(dev, i2c->wbuf, msg[0].buf, msg[0].len);
+ ddbwritel(dev, msg[0].len, i2c->regs + I2C_TASKLENGTH);
+ if (ddb_i2c_cmd(i2c, addr, 2))
+ break;
+ return num;
+ case 2:
+ if ((msg[0].flags & I2C_M_RD) == I2C_M_RD)
+ break;
+ if ((msg[1].flags & I2C_M_RD) != I2C_M_RD)
+ break;
+ if (msg[1].len > i2c->bsize)
+ break;
+ ddbcpyto(dev, i2c->wbuf, msg[0].buf, msg[0].len);
+ ddbwritel(dev, msg[0].len | (msg[1].len << 16),
+ i2c->regs + I2C_TASKLENGTH);
+ if (ddb_i2c_cmd(i2c, addr, 1))
+ break;
+ ddbcpyfrom(dev, msg[1].buf,
+ i2c->rbuf,
+ msg[1].len);
+ return num;
+ default:
+ break;
+ }
+ return -EIO;
+}
+
+static u32 ddb_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm ddb_i2c_algo = {
+ .master_xfer = ddb_i2c_master_xfer,
+ .functionality = ddb_i2c_functionality,
+};
+
+void ddb_i2c_release(struct ddb *dev)
+{
+ int i;
+ struct ddb_i2c *i2c;
+
+ for (i = 0; i < dev->i2c_num; i++) {
+ i2c = &dev->i2c[i];
+ i2c_del_adapter(&i2c->adap);
+ }
+}
+
+static void i2c_handler(unsigned long priv)
+{
+ struct ddb_i2c *i2c = (struct ddb_i2c *) priv;
+
+ complete(&i2c->completion);
+}
+
+static int ddb_i2c_add(struct ddb *dev, struct ddb_i2c *i2c,
+ const struct ddb_regmap *regmap, int link,
+ int i, int num)
+{
+ struct i2c_adapter *adap;
+
+ i2c->nr = i;
+ i2c->dev = dev;
+ i2c->link = link;
+ i2c->bsize = regmap->i2c_buf->size;
+ i2c->wbuf = DDB_LINK_TAG(link) |
+ (regmap->i2c_buf->base + i2c->bsize * i);
+ i2c->rbuf = i2c->wbuf; /* + i2c->bsize / 2 */
+ i2c->regs = DDB_LINK_TAG(link) |
+ (regmap->i2c->base + regmap->i2c->size * i);
+ ddbwritel(dev, I2C_SPEED_100, i2c->regs + I2C_TIMING);
+ ddbwritel(dev, ((i2c->rbuf & 0xffff) << 16) | (i2c->wbuf & 0xffff),
+ i2c->regs + I2C_TASKADDRESS);
+ init_completion(&i2c->completion);
+
+ adap = &i2c->adap;
+ i2c_set_adapdata(adap, i2c);
+#ifdef I2C_ADAP_CLASS_TV_DIGITAL
+ adap->class = I2C_ADAP_CLASS_TV_DIGITAL|I2C_CLASS_TV_ANALOG;
+#else
+#ifdef I2C_CLASS_TV_ANALOG
+ adap->class = I2C_CLASS_TV_ANALOG;
+#endif
+#endif
+ snprintf(adap->name, I2C_NAME_SIZE, "ddbridge_%02x.%x.%x",
+ dev->nr, i2c->link, i);
+ adap->algo = &ddb_i2c_algo;
+ adap->algo_data = (void *)i2c;
+ adap->dev.parent = dev->dev;
+ return i2c_add_adapter(adap);
+}
+
+int ddb_i2c_init(struct ddb *dev)
+{
+ int stat = 0;
+ u32 i, j, num = 0, l, base;
+ struct ddb_i2c *i2c;
+ struct i2c_adapter *adap;
+ const struct ddb_regmap *regmap;
+
+ for (l = 0; l < DDB_MAX_LINK; l++) {
+ if (!dev->link[l].info)
+ continue;
+ regmap = dev->link[l].info->regmap;
+ if (!regmap || !regmap->i2c)
+ continue;
+ base = regmap->irq_base_i2c;
+ for (i = 0; i < regmap->i2c->num; i++) {
+ if (!(dev->link[l].info->i2c_mask & (1 << i)))
+ continue;
+ i2c = &dev->i2c[num];
+ dev->handler_data[l][i + base] = (unsigned long) i2c;
+ dev->handler[l][i + base] = i2c_handler;
+ stat = ddb_i2c_add(dev, i2c, regmap, l, i, num);
+ if (stat)
+ break;
+ num++;
+ }
+ }
+ if (stat) {
+ for (j = 0; j < num; j++) {
+ i2c = &dev->i2c[j];
+ adap = &i2c->adap;
+ i2c_del_adapter(adap);
+ }
+ } else
+ dev->i2c_num = num;
+ return stat;
+}
diff --git a/drivers/media/pci/ddbridge/ddbridge-i2c.h b/drivers/media/pci/ddbridge/ddbridge-i2c.h
new file mode 100644
index 000000000000..7ed220506c05
--- /dev/null
+++ b/drivers/media/pci/ddbridge/ddbridge-i2c.h
@@ -0,0 +1,112 @@
+/*
+ * ddbridge-i2c.c: Digital Devices bridge i2c driver
+ *
+ * Copyright (C) 2010-2017 Digital Devices GmbH
+ * Ralph Metzler <rjkm@metzlerbros.de>
+ * Marcus Metzler <mocm@metzlerbros.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DDBRIDGE_I2C_H__
+#define __DDBRIDGE_I2C_H__
+
+#include <linux/i2c.h>
+
+#include "ddbridge.h"
+
+/******************************************************************************/
+
+void ddb_i2c_release(struct ddb *dev);
+int ddb_i2c_init(struct ddb *dev);
+
+/******************************************************************************/
+
+static int __maybe_unused i2c_io(struct i2c_adapter *adapter, u8 adr,
+ u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
+{
+ struct i2c_msg msgs[2] = { { .addr = adr, .flags = 0,
+ .buf = wbuf, .len = wlen },
+ { .addr = adr, .flags = I2C_M_RD,
+ .buf = rbuf, .len = rlen } };
+
+ return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
+}
+
+static int __maybe_unused i2c_write(struct i2c_adapter *adap, u8 adr,
+ u8 *data, int len)
+{
+ struct i2c_msg msg = { .addr = adr, .flags = 0,
+ .buf = data, .len = len };
+
+ return (i2c_transfer(adap, &msg, 1) == 1) ? 0 : -1;
+}
+
+static int __maybe_unused i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val)
+{
+ struct i2c_msg msgs[1] = { { .addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = 1 } };
+
+ return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1;
+}
+
+static int __maybe_unused i2c_read_regs(struct i2c_adapter *adapter,
+ u8 adr, u8 reg, u8 *val, u8 len)
+{
+ struct i2c_msg msgs[2] = { { .addr = adr, .flags = 0,
+ .buf = &reg, .len = 1 },
+ { .addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = len } };
+
+ return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
+}
+
+static int __maybe_unused i2c_read_regs16(struct i2c_adapter *adapter,
+ u8 adr, u16 reg, u8 *val, u8 len)
+{
+ u8 msg[2] = { reg >> 8, reg & 0xff };
+ struct i2c_msg msgs[2] = { { .addr = adr, .flags = 0,
+ .buf = msg, .len = 2 },
+ { .addr = adr, .flags = I2C_M_RD,
+ .buf = val, .len = len } };
+
+ return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
+}
+
+static int __maybe_unused i2c_write_reg16(struct i2c_adapter *adap,
+ u8 adr, u16 reg, u8 val)
+{
+ u8 msg[3] = { reg >> 8, reg & 0xff, val };
+
+ return i2c_write(adap, adr, msg, 3);
+}
+
+static int __maybe_unused i2c_write_reg(struct i2c_adapter *adap,
+ u8 adr, u8 reg, u8 val)
+{
+ u8 msg[2] = { reg, val };
+
+ return i2c_write(adap, adr, msg, 2);
+}
+
+static int __maybe_unused i2c_read_reg16(struct i2c_adapter *adapter,
+ u8 adr, u16 reg, u8 *val)
+{
+ return i2c_read_regs16(adapter, adr, reg, val, 1);
+}
+
+static int __maybe_unused i2c_read_reg(struct i2c_adapter *adapter,
+ u8 adr, u8 reg, u8 *val)
+{
+ return i2c_read_regs(adapter, adr, reg, val, 1);
+}
+
+#endif /* __DDBRIDGE_I2C_H__ */
diff --git a/drivers/media/pci/ddbridge/ddbridge-io.h b/drivers/media/pci/ddbridge/ddbridge-io.h
new file mode 100644
index 000000000000..a4c6bbe09168
--- /dev/null
+++ b/drivers/media/pci/ddbridge/ddbridge-io.h
@@ -0,0 +1,71 @@
+/*
+ * ddbridge-io.h: Digital Devices bridge I/O inline functions
+ *
+ * Copyright (C) 2010-2017 Digital Devices GmbH
+ * Ralph Metzler <rjkm@metzlerbros.de>
+ * Marcus Metzler <mocm@metzlerbros.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DDBRIDGE_IO_H__
+#define __DDBRIDGE_IO_H__
+
+#include <linux/io.h>
+
+#include "ddbridge.h"
+
+/******************************************************************************/
+
+static inline u32 ddblreadl(struct ddb_link *link, u32 adr)
+{
+ return readl(link->dev->regs + adr);
+}
+
+static inline void ddblwritel(struct ddb_link *link, u32 val, u32 adr)
+{
+ writel(val, link->dev->regs + adr);
+}
+
+static inline u32 ddbreadl(struct ddb *dev, u32 adr)
+{
+ return readl(dev->regs + adr);
+}
+
+static inline void ddbwritel(struct ddb *dev, u32 val, u32 adr)
+{
+ writel(val, dev->regs + adr);
+}
+
+static inline void ddbcpyto(struct ddb *dev, u32 adr, void *src, long count)
+{
+ return memcpy_toio(dev->regs + adr, src, count);
+}
+
+static inline void ddbcpyfrom(struct ddb *dev, void *dst, u32 adr, long count)
+{
+ return memcpy_fromio(dst, dev->regs + adr, count);
+}
+
+static inline u32 safe_ddbreadl(struct ddb *dev, u32 adr)
+{
+ u32 val = ddbreadl(dev, adr);
+
+ /* (ddb)readl returns (uint)-1 (all bits set) on failure, catch that */
+ if (val == ~0) {
+ dev_err(&dev->pdev->dev, "ddbreadl failure, adr=%08x\n", adr);
+ return 0;
+ }
+
+ return val;
+}
+
+#endif /* __DDBRIDGE_IO_H__ */
diff --git a/drivers/media/pci/ddbridge/ddbridge-main.c b/drivers/media/pci/ddbridge/ddbridge-main.c
new file mode 100644
index 000000000000..ccac7fe31336
--- /dev/null
+++ b/drivers/media/pci/ddbridge/ddbridge-main.c
@@ -0,0 +1,346 @@
+/*
+ * ddbridge.c: Digital Devices PCIe bridge driver
+ *
+ * Copyright (C) 2010-2017 Digital Devices GmbH
+ * Ralph Metzler <rjkm@metzlerbros.de>
+ * Marcus Metzler <mocm@metzlerbros.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/timer.h>
+#include <linux/i2c.h>
+#include <linux/swab.h>
+#include <linux/vmalloc.h>
+
+#include "ddbridge.h"
+#include "ddbridge-i2c.h"
+#include "ddbridge-regs.h"
+#include "ddbridge-hw.h"
+#include "ddbridge-io.h"
+
+/****************************************************************************/
+/* module parameters */
+
+#ifdef CONFIG_PCI_MSI
+#ifdef CONFIG_DVB_DDBRIDGE_MSIENABLE
+static int msi = 1;
+#else
+static int msi;
+#endif
+module_param(msi, int, 0444);
+#ifdef CONFIG_DVB_DDBRIDGE_MSIENABLE
+MODULE_PARM_DESC(msi, "Control MSI interrupts: 0-disable, 1-enable (default)");
+#else
+MODULE_PARM_DESC(msi, "Control MSI interrupts: 0-disable (default), 1-enable");
+#endif
+#endif
+
+int ci_bitrate = 70000;
+module_param(ci_bitrate, int, 0444);
+MODULE_PARM_DESC(ci_bitrate, " Bitrate in KHz for output to CI.");
+
+int ts_loop = -1;
+module_param(ts_loop, int, 0444);
+MODULE_PARM_DESC(ts_loop, "TS in/out test loop on port ts_loop");
+
+int xo2_speed = 2;
+module_param(xo2_speed, int, 0444);
+MODULE_PARM_DESC(xo2_speed, "default transfer speed for xo2 based duoflex, 0=55,1=75,2=90,3=104 MBit/s, default=2, use attribute to change for individual cards");
+
+#ifdef __arm__
+int alt_dma = 1;
+#else
+int alt_dma;
+#endif
+module_param(alt_dma, int, 0444);
+MODULE_PARM_DESC(alt_dma, "use alternative DMA buffer handling");
+
+int no_init;
+module_param(no_init, int, 0444);
+MODULE_PARM_DESC(no_init, "do not initialize most devices");
+
+int stv0910_single;
+module_param(stv0910_single, int, 0444);
+MODULE_PARM_DESC(stv0910_single, "use stv0910 cards as single demods");
+
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
+
+static void ddb_irq_disable(struct ddb *dev)
+{
+ ddbwritel(dev, 0, INTERRUPT_ENABLE);
+ ddbwritel(dev, 0, MSI1_ENABLE);
+}
+
+static void ddb_irq_exit(struct ddb *dev)
+{
+ ddb_irq_disable(dev);
+ if (dev->msi == 2)
+ free_irq(dev->pdev->irq + 1, dev);
+ free_irq(dev->pdev->irq, dev);
+#ifdef CONFIG_PCI_MSI
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+#endif
+}
+
+static void ddb_remove(struct pci_dev *pdev)
+{
+ struct ddb *dev = (struct ddb *) pci_get_drvdata(pdev);
+
+ ddb_device_destroy(dev);
+ ddb_ports_detach(dev);
+ ddb_i2c_release(dev);
+
+ ddb_irq_exit(dev);
+ ddb_ports_release(dev);
+ ddb_buffers_free(dev);
+
+ ddb_unmap(dev);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PCI_MSI
+static void ddb_irq_msi(struct ddb *dev, int nr)
+{
+ int stat;
+
+ if (msi && pci_msi_enabled()) {
+ stat = pci_alloc_irq_vectors(dev->pdev, 1, nr, PCI_IRQ_MSI);
+ if (stat >= 1) {
+ dev->msi = stat;
+ dev_info(dev->dev, "using %d MSI interrupt(s)\n",
+ dev->msi);
+ } else
+ dev_info(dev->dev, "MSI not available.\n");
+ }
+}
+#endif
+
+static int ddb_irq_init(struct ddb *dev)
+{
+ int stat;
+ int irq_flag = IRQF_SHARED;
+
+ ddbwritel(dev, 0x00000000, INTERRUPT_ENABLE);
+ ddbwritel(dev, 0x00000000, MSI1_ENABLE);
+ ddbwritel(dev, 0x00000000, MSI2_ENABLE);
+ ddbwritel(dev, 0x00000000, MSI3_ENABLE);
+ ddbwritel(dev, 0x00000000, MSI4_ENABLE);
+ ddbwritel(dev, 0x00000000, MSI5_ENABLE);
+ ddbwritel(dev, 0x00000000, MSI6_ENABLE);
+ ddbwritel(dev, 0x00000000, MSI7_ENABLE);
+
+#ifdef CONFIG_PCI_MSI
+ ddb_irq_msi(dev, 2);
+
+ if (dev->msi)
+ irq_flag = 0;
+ if (dev->msi == 2) {
+ stat = request_irq(dev->pdev->irq, ddb_irq_handler0,
+ irq_flag, "ddbridge", (void *) dev);
+ if (stat < 0)
+ return stat;
+ stat = request_irq(dev->pdev->irq + 1, ddb_irq_handler1,
+ irq_flag, "ddbridge", (void *) dev);
+ if (stat < 0) {
+ free_irq(dev->pdev->irq, dev);
+ return stat;
+ }
+ } else
+#endif
+ {
+ stat = request_irq(dev->pdev->irq, ddb_irq_handler,
+ irq_flag, "ddbridge", (void *) dev);
+ if (stat < 0)
+ return stat;
+ }
+ if (dev->msi == 2) {
+ ddbwritel(dev, 0x0fffff00, INTERRUPT_ENABLE);
+ ddbwritel(dev, 0x0000000f, MSI1_ENABLE);
+ } else {
+ ddbwritel(dev, 0x0fffff0f, INTERRUPT_ENABLE);
+ ddbwritel(dev, 0x00000000, MSI1_ENABLE);
+ }
+ return stat;
+}
+
+static int ddb_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct ddb *dev;
+ int stat = 0;
+
+ if (pci_enable_device(pdev) < 0)
+ return -ENODEV;
+
+ pci_set_master(pdev);
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+ return -ENODEV;
+
+ dev = vzalloc(sizeof(struct ddb));
+ if (dev == NULL)
+ return -ENOMEM;
+
+ mutex_init(&dev->mutex);
+ dev->has_dma = 1;
+ dev->pdev = pdev;
+ dev->dev = &pdev->dev;
+ pci_set_drvdata(pdev, dev);
+
+ dev->link[0].ids.vendor = id->vendor;
+ dev->link[0].ids.device = id->device;
+ dev->link[0].ids.subvendor = id->subvendor;
+ dev->link[0].ids.subdevice = pdev->subsystem_device;
+
+ dev->link[0].dev = dev;
+ dev->link[0].info = get_ddb_info(id->vendor, id->device,
+ id->subvendor, pdev->subsystem_device);
+
+ dev_info(&pdev->dev, "detected %s\n", dev->link[0].info->name);
+
+ dev->regs_len = pci_resource_len(dev->pdev, 0);
+ dev->regs = ioremap(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
+ if (!dev->regs) {
+ dev_err(&pdev->dev, "not enough memory for register map\n");
+ stat = -ENOMEM;
+ goto fail;
+ }
+ if (ddbreadl(dev, 0) == 0xffffffff) {
+ dev_err(&pdev->dev, "cannot read registers\n");
+ stat = -ENODEV;
+ goto fail;
+ }
+
+ dev->link[0].ids.hwid = ddbreadl(dev, 0);
+ dev->link[0].ids.regmapid = ddbreadl(dev, 4);
+
+ dev_info(&pdev->dev, "HW %08x REGMAP %08x\n",
+ dev->link[0].ids.hwid, dev->link[0].ids.regmapid);
+
+ ddbwritel(dev, 0, DMA_BASE_READ);
+ ddbwritel(dev, 0, DMA_BASE_WRITE);
+
+ stat = ddb_irq_init(dev);
+ if (stat < 0)
+ goto fail0;
+
+ if (ddb_init(dev) == 0)
+ return 0;
+
+ ddb_irq_exit(dev);
+fail0:
+ dev_err(&pdev->dev, "fail0\n");
+ if (dev->msi)
+ pci_disable_msi(dev->pdev);
+fail:
+ dev_err(&pdev->dev, "fail\n");
+
+ ddb_unmap(dev);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ return -1;
+}
+
+/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
+
+#define DDB_DEVICE_ANY(_device) \
+ { PCI_DEVICE_SUB(DDVID, _device, DDVID, PCI_ANY_ID) }
+
+static const struct pci_device_id ddb_id_table[] = {
+ DDB_DEVICE_ANY(0x0002),
+ DDB_DEVICE_ANY(0x0003),
+ DDB_DEVICE_ANY(0x0005),
+ DDB_DEVICE_ANY(0x0006),
+ DDB_DEVICE_ANY(0x0007),
+ DDB_DEVICE_ANY(0x0008),
+ DDB_DEVICE_ANY(0x0011),
+ DDB_DEVICE_ANY(0x0012),
+ DDB_DEVICE_ANY(0x0013),
+ DDB_DEVICE_ANY(0x0201),
+ DDB_DEVICE_ANY(0x0203),
+ DDB_DEVICE_ANY(0x0210),
+ DDB_DEVICE_ANY(0x0220),
+ DDB_DEVICE_ANY(0x0320),
+ DDB_DEVICE_ANY(0x0321),
+ DDB_DEVICE_ANY(0x0322),
+ DDB_DEVICE_ANY(0x0323),
+ DDB_DEVICE_ANY(0x0328),
+ DDB_DEVICE_ANY(0x0329),
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, ddb_id_table);
+
+static struct pci_driver ddb_pci_driver = {
+ .name = "ddbridge",
+ .id_table = ddb_id_table,
+ .probe = ddb_probe,
+ .remove = ddb_remove,
+};
+
+static __init int module_init_ddbridge(void)
+{
+ int stat = -1;
+
+ pr_info("Digital Devices PCIE bridge driver "
+ DDBRIDGE_VERSION
+ ", Copyright (C) 2010-17 Digital Devices GmbH\n");
+ if (ddb_class_create() < 0)
+ return -1;
+ ddb_wq = create_workqueue("ddbridge");
+ if (ddb_wq == NULL)
+ goto exit1;
+ stat = pci_register_driver(&ddb_pci_driver);
+ if (stat < 0)
+ goto exit2;
+ return stat;
+exit2:
+ destroy_workqueue(ddb_wq);
+exit1:
+ ddb_class_destroy();
+ return stat;
+}
+
+static __exit void module_exit_ddbridge(void)
+{
+ pci_unregister_driver(&ddb_pci_driver);
+ destroy_workqueue(ddb_wq);
+ ddb_class_destroy();
+}
+
+module_init(module_init_ddbridge);
+module_exit(module_exit_ddbridge);
+
+MODULE_DESCRIPTION("Digital Devices PCIe Bridge");
+MODULE_AUTHOR("Ralph and Marcus Metzler, Metzler Brothers Systementwicklung GbR");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DDBRIDGE_VERSION);
diff --git a/drivers/media/pci/ddbridge/ddbridge-maxs8.c b/drivers/media/pci/ddbridge/ddbridge-maxs8.c
new file mode 100644
index 000000000000..f8a53bc7c86c
--- /dev/null
+++ b/drivers/media/pci/ddbridge/ddbridge-maxs8.c
@@ -0,0 +1,444 @@
+/*
+ * ddbridge-maxs8.c: Digital Devices bridge MaxS4/8 support
+ *
+ * Copyright (C) 2010-2017 Digital Devices GmbH
+ * Ralph Metzler <rjkm@metzlerbros.de>
+ * Marcus Metzler <mocm@metzlerbros.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/timer.h>
+#include <linux/i2c.h>
+#include <linux/swab.h>
+#include <linux/vmalloc.h>
+
+#include "ddbridge.h"
+#include "ddbridge-regs.h"
+#include "ddbridge-io.h"
+
+#include "ddbridge-maxs8.h"
+#include "mxl5xx.h"
+
+/******************************************************************************/
+
+/* MaxS4/8 related modparams */
+static int fmode;
+module_param(fmode, int, 0444);
+MODULE_PARM_DESC(fmode, "frontend emulation mode");
+
+static int fmode_sat = -1;
+module_param(fmode_sat, int, 0444);
+MODULE_PARM_DESC(fmode_sat, "set frontend emulation mode sat");
+
+static int old_quattro;
+module_param(old_quattro, int, 0444);
+MODULE_PARM_DESC(old_quattro, "old quattro LNB input order ");
+
+/******************************************************************************/
+
+static int lnb_command(struct ddb *dev, u32 link, u32 lnb, u32 cmd)
+{
+ u32 c, v = 0, tag = DDB_LINK_TAG(link);
+
+ v = LNB_TONE & (dev->link[link].lnb.tone << (15 - lnb));
+ ddbwritel(dev, cmd | v, tag | LNB_CONTROL(lnb));
+ for (c = 0; c < 10; c++) {
+ v = ddbreadl(dev, tag | LNB_CONTROL(lnb));
+ if ((v & LNB_BUSY) == 0)
+ break;
+ msleep(20);
+ }
+ if (c == 10)
+ dev_info(dev->dev, "%s lnb = %08x cmd = %08x\n",
+ __func__, lnb, cmd);
+ return 0;
+}
+
+static int max_send_master_cmd(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *cmd)
+{
+ struct ddb_input *input = fe->sec_priv;
+ struct ddb_port *port = input->port;
+ struct ddb *dev = port->dev;
+ struct ddb_dvb *dvb = &port->dvb[input->nr & 1];
+ u32 tag = DDB_LINK_TAG(port->lnr);
+ int i;
+ u32 fmode = dev->link[port->lnr].lnb.fmode;
+
+ if (fmode == 2 || fmode == 1)
+ return 0;
+ if (dvb->diseqc_send_master_cmd)
+ dvb->diseqc_send_master_cmd(fe, cmd);
+
+ mutex_lock(&dev->link[port->lnr].lnb.lock);
+ ddbwritel(dev, 0, tag | LNB_BUF_LEVEL(dvb->input));
+ for (i = 0; i < cmd->msg_len; i++)
+ ddbwritel(dev, cmd->msg[i], tag | LNB_BUF_WRITE(dvb->input));
+ lnb_command(dev, port->lnr, dvb->input, LNB_CMD_DISEQC);
+ mutex_unlock(&dev->link[port->lnr].lnb.lock);
+ return 0;
+}
+
+static int lnb_send_diseqc(struct ddb *dev, u32 link, u32 input,
+ struct dvb_diseqc_master_cmd *cmd)
+{
+ u32 tag = DDB_LINK_TAG(link);
+ int i;
+
+ ddbwritel(dev, 0, tag | LNB_BUF_LEVEL(input));
+ for (i = 0; i < cmd->msg_len; i++)
+ ddbwritel(dev, cmd->msg[i], tag | LNB_BUF_WRITE(input));
+ lnb_command(dev, link, input, LNB_CMD_DISEQC);
+ return 0;
+}
+
+static int lnb_set_sat(struct ddb *dev, u32 link, u32 input, u32 sat, u32 band,
+ u32 hor)
+{
+ struct dvb_diseqc_master_cmd cmd = {
+ .msg = {0xe0, 0x10, 0x38, 0xf0, 0x00, 0x00},
+ .msg_len = 4
+ };
+ cmd.msg[3] = 0xf0 | (((sat << 2) & 0x0c) | (band ? 1 : 0) |
+ (hor ? 2 : 0));
+ return lnb_send_diseqc(dev, link, input, &cmd);
+}
+
+static int lnb_set_tone(struct ddb *dev, u32 link, u32 input,
+ enum fe_sec_tone_mode tone)
+{
+ int s = 0;
+ u32 mask = (1ULL << input);
+
+ switch (tone) {
+ case SEC_TONE_OFF:
+ if (!(dev->link[link].lnb.tone & mask))
+ return 0;
+ dev->link[link].lnb.tone &= ~(1ULL << input);
+ break;
+ case SEC_TONE_ON:
+ if (dev->link[link].lnb.tone & mask)
+ return 0;
+ dev->link[link].lnb.tone |= (1ULL << input);
+ break;
+ default:
+ s = -EINVAL;
+ break;
+ }
+ if (!s)
+ s = lnb_command(dev, link, input, LNB_CMD_NOP);
+ return s;
+}
+
+static int lnb_set_voltage(struct ddb *dev, u32 link, u32 input,
+ enum fe_sec_voltage voltage)
+{
+ int s = 0;
+
+ if (dev->link[link].lnb.oldvoltage[input] == voltage)
+ return 0;
+ switch (voltage) {
+ case SEC_VOLTAGE_OFF:
+ if (dev->link[link].lnb.voltage[input])
+ return 0;
+ lnb_command(dev, link, input, LNB_CMD_OFF);
+ break;
+ case SEC_VOLTAGE_13:
+ lnb_command(dev, link, input, LNB_CMD_LOW);
+ break;
+ case SEC_VOLTAGE_18:
+ lnb_command(dev, link, input, LNB_CMD_HIGH);
+ break;
+ default:
+ s = -EINVAL;
+ break;
+ }
+ dev->link[link].lnb.oldvoltage[input] = voltage;
+ return s;
+}
+
+static int max_set_input_unlocked(struct dvb_frontend *fe, int in)
+{
+ struct ddb_input *input = fe->sec_priv;
+ struct ddb_port *port = input->port;
+ struct ddb *dev = port->dev;
+ struct ddb_dvb *dvb = &port->dvb[input->nr & 1];
+ int res = 0;
+
+ if (in > 3)
+ return -EINVAL;
+ if (dvb->input != in) {
+ u32 bit = (1ULL << input->nr);
+ u32 obit =
+ dev->link[port->lnr].lnb.voltage[dvb->input & 3] & bit;
+
+ dev->link[port->lnr].lnb.voltage[dvb->input & 3] &= ~bit;
+ dvb->input = in;
+ dev->link[port->lnr].lnb.voltage[dvb->input & 3] |= obit;
+ }
+ res = dvb->set_input(fe, in);
+ return res;
+}
+
+static int max_set_tone(struct dvb_frontend *fe, enum fe_sec_tone_mode tone)
+{
+ struct ddb_input *input = fe->sec_priv;
+ struct ddb_port *port = input->port;
+ struct ddb *dev = port->dev;
+ struct ddb_dvb *dvb = &port->dvb[input->nr & 1];
+ int tuner = 0;
+ int res = 0;
+ u32 fmode = dev->link[port->lnr].lnb.fmode;
+
+ mutex_lock(&dev->link[port->lnr].lnb.lock);
+ dvb->tone = tone;
+ switch (fmode) {
+ default:
+ case 0:
+ case 3:
+ res = lnb_set_tone(dev, port->lnr, dvb->input, tone);
+ break;
+ case 1:
+ case 2:
+ if (old_quattro) {
+ if (dvb->tone == SEC_TONE_ON)
+ tuner |= 2;
+ if (dvb->voltage == SEC_VOLTAGE_18)
+ tuner |= 1;
+ } else {
+ if (dvb->tone == SEC_TONE_ON)
+ tuner |= 1;
+ if (dvb->voltage == SEC_VOLTAGE_18)
+ tuner |= 2;
+ }
+ res = max_set_input_unlocked(fe, tuner);
+ break;
+ }
+ mutex_unlock(&dev->link[port->lnr].lnb.lock);
+ return res;
+}
+
+static int max_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage)
+{
+ struct ddb_input *input = fe->sec_priv;
+ struct ddb_port *port = input->port;
+ struct ddb *dev = port->dev;
+ struct ddb_dvb *dvb = &port->dvb[input->nr & 1];
+ int tuner = 0;
+ u32 nv, ov = dev->link[port->lnr].lnb.voltages;
+ int res = 0;
+ u32 fmode = dev->link[port->lnr].lnb.fmode;
+
+ mutex_lock(&dev->link[port->lnr].lnb.lock);
+ dvb->voltage = voltage;
+
+ switch (fmode) {
+ case 3:
+ default:
+ case 0:
+ if (fmode == 3)
+ max_set_input_unlocked(fe, 0);
+ if (voltage == SEC_VOLTAGE_OFF)
+ dev->link[port->lnr].lnb.voltage[dvb->input] &=
+ ~(1ULL << input->nr);
+ else
+ dev->link[port->lnr].lnb.voltage[dvb->input] |=
+ (1ULL << input->nr);
+
+ res = lnb_set_voltage(dev, port->lnr, dvb->input, voltage);
+ break;
+ case 1:
+ case 2:
+ if (voltage == SEC_VOLTAGE_OFF)
+ dev->link[port->lnr].lnb.voltages &=
+ ~(1ULL << input->nr);
+ else
+ dev->link[port->lnr].lnb.voltages |=
+ (1ULL << input->nr);
+
+ nv = dev->link[port->lnr].lnb.voltages;
+
+ if (old_quattro) {
+ if (dvb->tone == SEC_TONE_ON)
+ tuner |= 2;
+ if (dvb->voltage == SEC_VOLTAGE_18)
+ tuner |= 1;
+ } else {
+ if (dvb->tone == SEC_TONE_ON)
+ tuner |= 1;
+ if (dvb->voltage == SEC_VOLTAGE_18)
+ tuner |= 2;
+ }
+ res = max_set_input_unlocked(fe, tuner);
+
+ if (nv != ov) {
+ if (nv) {
+ lnb_set_voltage(dev,
+ port->lnr, 0, SEC_VOLTAGE_13);
+ if (fmode == 1) {
+ lnb_set_voltage(dev, port->lnr,
+ 0, SEC_VOLTAGE_13);
+ if (old_quattro) {
+ lnb_set_voltage(dev, port->lnr,
+ 1, SEC_VOLTAGE_18);
+ lnb_set_voltage(dev, port->lnr,
+ 2, SEC_VOLTAGE_13);
+ } else {
+ lnb_set_voltage(dev, port->lnr,
+ 1, SEC_VOLTAGE_13);
+ lnb_set_voltage(dev, port->lnr,
+ 2, SEC_VOLTAGE_18);
+ }
+ lnb_set_voltage(dev, port->lnr,
+ 3, SEC_VOLTAGE_18);
+ }
+ } else {
+ lnb_set_voltage(dev, port->lnr,
+ 0, SEC_VOLTAGE_OFF);
+ if (fmode == 1) {
+ lnb_set_voltage(dev, port->lnr,
+ 1, SEC_VOLTAGE_OFF);
+ lnb_set_voltage(dev, port->lnr,
+ 2, SEC_VOLTAGE_OFF);
+ lnb_set_voltage(dev, port->lnr,
+ 3, SEC_VOLTAGE_OFF);
+ }
+ }
+ }
+ break;
+ }
+ mutex_unlock(&dev->link[port->lnr].lnb.lock);
+ return res;
+}
+
+static int max_enable_high_lnb_voltage(struct dvb_frontend *fe, long arg)
+{
+
+ return 0;
+}
+
+static int max_send_burst(struct dvb_frontend *fe, enum fe_sec_mini_cmd burst)
+{
+ return 0;
+}
+
+static int mxl_fw_read(void *priv, u8 *buf, u32 len)
+{
+ struct ddb_link *link = priv;
+ struct ddb *dev = link->dev;
+
+ dev_info(dev->dev, "Read mxl_fw from link %u\n", link->nr);
+
+ return ddbridge_flashread(dev, link->nr, buf, 0xc0000, len);
+}
+
+int lnb_init_fmode(struct ddb *dev, struct ddb_link *link, u32 fm)
+{
+ u32 l = link->nr;
+
+ if (link->lnb.fmode == fm)
+ return 0;
+ dev_info(dev->dev, "Set fmode link %u = %u\n", l, fm);
+ mutex_lock(&link->lnb.lock);
+ if (fm == 2 || fm == 1) {
+ if (fmode_sat >= 0) {
+ lnb_set_sat(dev, l, 0, fmode_sat, 0, 0);
+ if (old_quattro) {
+ lnb_set_sat(dev, l, 1, fmode_sat, 0, 1);
+ lnb_set_sat(dev, l, 2, fmode_sat, 1, 0);
+ } else {
+ lnb_set_sat(dev, l, 1, fmode_sat, 1, 0);
+ lnb_set_sat(dev, l, 2, fmode_sat, 0, 1);
+ }
+ lnb_set_sat(dev, l, 3, fmode_sat, 1, 1);
+ }
+ lnb_set_tone(dev, l, 0, SEC_TONE_OFF);
+ if (old_quattro) {
+ lnb_set_tone(dev, l, 1, SEC_TONE_OFF);
+ lnb_set_tone(dev, l, 2, SEC_TONE_ON);
+ } else {
+ lnb_set_tone(dev, l, 1, SEC_TONE_ON);
+ lnb_set_tone(dev, l, 2, SEC_TONE_OFF);
+ }
+ lnb_set_tone(dev, l, 3, SEC_TONE_ON);
+ }
+ link->lnb.fmode = fm;
+ mutex_unlock(&link->lnb.lock);
+ return 0;
+}
+
+static struct mxl5xx_cfg mxl5xx = {
+ .adr = 0x60,
+ .type = 0x01,
+ .clk = 27000000,
+ .ts_clk = 139,
+ .cap = 12,
+ .fw_read = mxl_fw_read,
+};
+
+int fe_attach_mxl5xx(struct ddb_input *input)
+{
+ struct ddb *dev = input->port->dev;
+ struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct ddb_dvb *dvb = &input->port->dvb[input->nr & 1];
+ struct ddb_port *port = input->port;
+ struct ddb_link *link = &dev->link[port->lnr];
+ struct mxl5xx_cfg cfg;
+ int demod, tuner;
+
+ cfg = mxl5xx;
+ cfg.fw_priv = link;
+ dvb->set_input = NULL;
+
+ demod = input->nr;
+ tuner = demod & 3;
+ if (fmode == 3)
+ tuner = 0;
+
+ dvb->fe = dvb_attach(mxl5xx_attach, i2c, &cfg,
+ demod, tuner, &dvb->set_input);
+
+ if (!dvb->fe) {
+ dev_err(dev->dev, "No MXL5XX found!\n");
+ return -ENODEV;
+ }
+
+ if (!dvb->set_input) {
+ dev_err(dev->dev, "No mxl5xx_set_input function pointer!\n");
+ return -ENODEV;
+ }
+
+ if (input->nr < 4) {
+ lnb_command(dev, port->lnr, input->nr, LNB_CMD_INIT);
+ lnb_set_voltage(dev, port->lnr, input->nr, SEC_VOLTAGE_OFF);
+ }
+ lnb_init_fmode(dev, link, fmode);
+
+ dvb->fe->ops.set_voltage = max_set_voltage;
+ dvb->fe->ops.enable_high_lnb_voltage = max_enable_high_lnb_voltage;
+ dvb->fe->ops.set_tone = max_set_tone;
+ dvb->diseqc_send_master_cmd = dvb->fe->ops.diseqc_send_master_cmd;
+ dvb->fe->ops.diseqc_send_master_cmd = max_send_master_cmd;
+ dvb->fe->ops.diseqc_send_burst = max_send_burst;
+ dvb->fe->sec_priv = input;
+ dvb->input = tuner;
+ return 0;
+}
diff --git a/drivers/media/pci/ddbridge/ddbridge-maxs8.h b/drivers/media/pci/ddbridge/ddbridge-maxs8.h
new file mode 100644
index 000000000000..bb8884811a46
--- /dev/null
+++ b/drivers/media/pci/ddbridge/ddbridge-maxs8.h
@@ -0,0 +1,29 @@
+/*
+ * ddbridge-maxs8.h: Digital Devices bridge MaxS4/8 support
+ *
+ * Copyright (C) 2010-2017 Digital Devices GmbH
+ * Ralph Metzler <rjkm@metzlerbros.de>
+ * Marcus Metzler <mocm@metzlerbros.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 only, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DDBRIDGE_MAXS8_H_
+#define _DDBRIDGE_MAXS8_H_
+
+#include "ddbridge.h"
+
+/******************************************************************************/
+
+int lnb_init_fmode(struct ddb *dev, struct ddb_link *link, u32 fm);
+int fe_attach_mxl5xx(struct ddb_input *input);
+
+#endif /* _DDBRIDGE_MAXS8_H */
diff --git a/drivers/media/pci/ddbridge/ddbridge-regs.h b/drivers/media/pci/ddbridge/ddbridge-regs.h
index 98cebb97d64f..9d44f8d3af75 100644
--- a/drivers/media/pci/ddbridge/ddbridge-regs.h
+++ b/drivers/media/pci/ddbridge/ddbridge-regs.h
@@ -1,7 +1,7 @@
/*
* ddbridge-regs.h: Digital Devices PCIe bridge driver
*
- * Copyright (C) 2010-2011 Digital Devices GmbH
+ * Copyright (C) 2010-2017 Digital Devices GmbH
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -17,20 +17,26 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-/* DD-DVBBridgeV1.h 273 2010-09-17 05:03:16Z manfred */
+/* ------------------------------------------------------------------------- */
+/* SPI Controller */
-/* Register Definitions */
+#define SPI_CONTROL 0x10
+#define SPI_DATA 0x14
-#define CUR_REGISTERMAP_VERSION 0x10000
+/* ------------------------------------------------------------------------- */
+/* GPIO */
-#define HARDWARE_VERSION 0x00
-#define REGISTERMAP_VERSION 0x04
+#define GPIO_OUTPUT 0x20
+#define GPIO_INPUT 0x24
+#define GPIO_DIRECTION 0x28
/* ------------------------------------------------------------------------- */
-/* SPI Controller */
+/* MDIO */
-#define SPI_CONTROL 0x10
-#define SPI_DATA 0x14
+#define MDIO_CTRL 0x20
+#define MDIO_ADR 0x24
+#define MDIO_REG 0x28
+#define MDIO_VAL 0x2C
/* ------------------------------------------------------------------------- */
@@ -38,14 +44,14 @@
/* ------------------------------------------------------------------------- */
-/* Interrupt controller */
-/* How many MSI's are available depends on HW (Min 2 max 8) */
-/* How many are usable also depends on Host platform */
+/* Interrupt controller
+ * How many MSI's are available depends on HW (Min 2 max 8)
+ * How many are usable also depends on Host platform
+ */
#define INTERRUPT_BASE (0x40)
#define INTERRUPT_ENABLE (INTERRUPT_BASE + 0x00)
-#define MSI0_ENABLE (INTERRUPT_BASE + 0x00)
#define MSI1_ENABLE (INTERRUPT_BASE + 0x04)
#define MSI2_ENABLE (INTERRUPT_BASE + 0x08)
#define MSI3_ENABLE (INTERRUPT_BASE + 0x0C)
@@ -57,59 +63,31 @@
#define INTERRUPT_STATUS (INTERRUPT_BASE + 0x20)
#define INTERRUPT_ACK (INTERRUPT_BASE + 0x20)
-#define INTMASK_I2C1 (0x00000001)
-#define INTMASK_I2C2 (0x00000002)
-#define INTMASK_I2C3 (0x00000004)
-#define INTMASK_I2C4 (0x00000008)
-
-#define INTMASK_CIRQ1 (0x00000010)
-#define INTMASK_CIRQ2 (0x00000020)
-#define INTMASK_CIRQ3 (0x00000040)
-#define INTMASK_CIRQ4 (0x00000080)
-
-#define INTMASK_TSINPUT1 (0x00000100)
-#define INTMASK_TSINPUT2 (0x00000200)
-#define INTMASK_TSINPUT3 (0x00000400)
-#define INTMASK_TSINPUT4 (0x00000800)
-#define INTMASK_TSINPUT5 (0x00001000)
-#define INTMASK_TSINPUT6 (0x00002000)
-#define INTMASK_TSINPUT7 (0x00004000)
-#define INTMASK_TSINPUT8 (0x00008000)
-
-#define INTMASK_TSOUTPUT1 (0x00010000)
-#define INTMASK_TSOUTPUT2 (0x00020000)
-#define INTMASK_TSOUTPUT3 (0x00040000)
-#define INTMASK_TSOUTPUT4 (0x00080000)
+/* Temperature Monitor ( 2x LM75A @ 0x90,0x92 I2c ) */
+#define TEMPMON_BASE (0x1c0)
+#define TEMPMON_CONTROL (TEMPMON_BASE + 0x00)
+
+#define TEMPMON_CONTROL_AUTOSCAN (0x00000002)
+#define TEMPMON_CONTROL_INTENABLE (0x00000004)
+#define TEMPMON_CONTROL_OVERTEMP (0x00008000)
+
+/* SHORT Temperature in Celsius x 256 */
+#define TEMPMON_SENSOR0 (TEMPMON_BASE + 0x04)
+#define TEMPMON_SENSOR1 (TEMPMON_BASE + 0x08)
+
+#define TEMPMON_FANCONTROL (TEMPMON_BASE + 0x10)
/* ------------------------------------------------------------------------- */
/* I2C Master Controller */
-#define I2C_BASE (0x80) /* Byte offset */
-
#define I2C_COMMAND (0x00)
#define I2C_TIMING (0x04)
#define I2C_TASKLENGTH (0x08) /* High read, low write */
#define I2C_TASKADDRESS (0x0C) /* High read, low write */
-
#define I2C_MONITOR (0x1C)
-#define I2C_BASE_1 (I2C_BASE + 0x00)
-#define I2C_BASE_2 (I2C_BASE + 0x20)
-#define I2C_BASE_3 (I2C_BASE + 0x40)
-#define I2C_BASE_4 (I2C_BASE + 0x60)
-
-#define I2C_BASE_N(i) (I2C_BASE + (i) * 0x20)
-
-#define I2C_TASKMEM_BASE (0x1000) /* Byte offset */
-#define I2C_TASKMEM_SIZE (0x1000)
-
#define I2C_SPEED_400 (0x04030404)
-#define I2C_SPEED_200 (0x09080909)
-#define I2C_SPEED_154 (0x0C0B0C0C)
#define I2C_SPEED_100 (0x13121313)
-#define I2C_SPEED_77 (0x19181919)
-#define I2C_SPEED_50 (0x27262727)
-
/* ------------------------------------------------------------------------- */
/* DMA Controller */
@@ -117,35 +95,62 @@
#define DMA_BASE_WRITE (0x100)
#define DMA_BASE_READ (0x140)
-#define DMA_CONTROL (0x00) /* 64 */
-#define DMA_ERROR (0x04) /* 65 ( only read instance ) */
-
-#define DMA_DIAG_CONTROL (0x1C) /* 71 */
-#define DMA_DIAG_PACKETCOUNTER_LOW (0x20) /* 72 */
-#define DMA_DIAG_PACKETCOUNTER_HIGH (0x24) /* 73 */
-#define DMA_DIAG_TIMECOUNTER_LOW (0x28) /* 74 */
-#define DMA_DIAG_TIMECOUNTER_HIGH (0x2C) /* 75 */
-#define DMA_DIAG_RECHECKCOUNTER (0x30) /* 76 ( Split completions on read ) */
-#define DMA_DIAG_WAITTIMEOUTINIT (0x34) /* 77 */
-#define DMA_DIAG_WAITOVERFLOWCOUNTER (0x38) /* 78 */
-#define DMA_DIAG_WAITCOUNTER (0x3C) /* 79 */
+#define TS_CONTROL(_io) (_io->regs + 0x00)
+#define TS_CONTROL2(_io) (_io->regs + 0x04)
/* ------------------------------------------------------------------------- */
/* DMA Buffer */
-#define TS_INPUT_BASE (0x200)
-#define TS_INPUT_CONTROL(i) (TS_INPUT_BASE + (i) * 16 + 0x00)
+#define DMA_BUFFER_CONTROL(_dma) (_dma->regs + 0x00)
+#define DMA_BUFFER_ACK(_dma) (_dma->regs + 0x04)
+#define DMA_BUFFER_CURRENT(_dma) (_dma->regs + 0x08)
+#define DMA_BUFFER_SIZE(_dma) (_dma->regs + 0x0c)
+
+/* ------------------------------------------------------------------------- */
+/* CI Interface (only CI-Bridge) */
+
+#define CI_BASE (0x400)
+#define CI_CONTROL(i) (CI_BASE + (i) * 32 + 0x00)
+
+#define CI_DO_ATTRIBUTE_RW(i) (CI_BASE + (i) * 32 + 0x04)
+#define CI_DO_IO_RW(i) (CI_BASE + (i) * 32 + 0x08)
+#define CI_READDATA(i) (CI_BASE + (i) * 32 + 0x0c)
+#define CI_DO_READ_ATTRIBUTES(i) (CI_BASE + (i) * 32 + 0x10)
+
+#define CI_RESET_CAM (0x00000001)
+#define CI_POWER_ON (0x00000002)
+#define CI_ENABLE (0x00000004)
+#define CI_BYPASS_DISABLE (0x00000010)
+
+#define CI_CAM_READY (0x00010000)
+#define CI_CAM_DETECT (0x00020000)
+#define CI_READY (0x80000000)
+
+#define CI_READ_CMD (0x40000000)
+#define CI_WRITE_CMD (0x80000000)
+
+#define CI_BUFFER_BASE (0x3000)
+#define CI_BUFFER_SIZE (0x0800)
+
+#define CI_BUFFER(i) (CI_BUFFER_BASE + (i) * CI_BUFFER_SIZE)
+
+/* ------------------------------------------------------------------------- */
+/* LNB commands (mxl5xx / Max S8) */
-#define TS_OUTPUT_BASE (0x280)
-#define TS_OUTPUT_CONTROL(i) (TS_OUTPUT_BASE + (i) * 16 + 0x00)
+#define LNB_BASE (0x400)
+#define LNB_CONTROL(i) (LNB_BASE + (i) * 0x20 + 0x00)
-#define DMA_BUFFER_BASE (0x300)
+#define LNB_CMD (7ULL << 0)
+#define LNB_CMD_NOP 0
+#define LNB_CMD_INIT 1
+#define LNB_CMD_LOW 3
+#define LNB_CMD_HIGH 4
+#define LNB_CMD_OFF 5
+#define LNB_CMD_DISEQC 6
-#define DMA_BUFFER_CONTROL(i) (DMA_BUFFER_BASE + (i) * 16 + 0x00)
-#define DMA_BUFFER_ACK(i) (DMA_BUFFER_BASE + (i) * 16 + 0x04)
-#define DMA_BUFFER_CURRENT(i) (DMA_BUFFER_BASE + (i) * 16 + 0x08)
-#define DMA_BUFFER_SIZE(i) (DMA_BUFFER_BASE + (i) * 16 + 0x0c)
+#define LNB_BUSY (1ULL << 4)
+#define LNB_TONE (1ULL << 15)
-#define DMA_BASE_ADDRESS_TABLE (0x2000)
-#define DMA_BASE_ADDRESS_TABLE_ENTRIES (512)
+#define LNB_BUF_LEVEL(i) (LNB_BASE + (i) * 0x20 + 0x10)
+#define LNB_BUF_WRITE(i) (LNB_BASE + (i) * 0x20 + 0x14)
diff --git a/drivers/media/pci/ddbridge/ddbridge.h b/drivers/media/pci/ddbridge/ddbridge.h
index 4a0e3283d646..e9afa96bd9df 100644
--- a/drivers/media/pci/ddbridge/ddbridge.h
+++ b/drivers/media/pci/ddbridge/ddbridge.h
@@ -1,7 +1,8 @@
/*
* ddbridge.h: Digital Devices PCIe bridge driver
*
- * Copyright (C) 2010-2011 Digital Devices GmbH
+ * Copyright (C) 2010-2017 Digital Devices GmbH
+ * Ralph Metzler <rmetzler@digitaldevices.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -20,15 +21,39 @@
#ifndef _DDBRIDGE_H_
#define _DDBRIDGE_H_
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/timer.h>
+#include <linux/i2c.h>
+#include <linux/swab.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/completion.h>
+
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
-#include <linux/i2c.h>
#include <linux/mutex.h>
#include <asm/dma.h>
-#include <linux/dvb/frontend.h>
+#include <asm/irq.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
#include <linux/dvb/ca.h>
#include <linux/socket.h>
+#include <linux/device.h>
+#include <linux/io.h>
#include "dmxdev.h"
#include "dvbdev.h"
@@ -37,69 +62,122 @@
#include "dvb_ringbuffer.h"
#include "dvb_ca_en50221.h"
#include "dvb_net.h"
-#include "cxd2099.h"
-#define DDB_MAX_I2C 4
-#define DDB_MAX_PORT 4
-#define DDB_MAX_INPUT 8
-#define DDB_MAX_OUTPUT 4
+#define DDBRIDGE_VERSION "0.9.31intermediate-integrated"
+
+#define DDB_MAX_I2C 32
+#define DDB_MAX_PORT 32
+#define DDB_MAX_INPUT 64
+#define DDB_MAX_OUTPUT 32
#define DDB_MAX_LINK 4
#define DDB_LINK_SHIFT 28
#define DDB_LINK_TAG(_x) (_x << DDB_LINK_SHIFT)
-#define DDB_XO2_TYPE_NONE 0
-#define DDB_XO2_TYPE_DUOFLEX 1
-#define DDB_XO2_TYPE_CI 2
+struct ddb_regset {
+ u32 base;
+ u32 num;
+ u32 size;
+};
+
+struct ddb_regmap {
+ u32 irq_base_i2c;
+ u32 irq_base_idma;
+ u32 irq_base_odma;
+
+ const struct ddb_regset *i2c;
+ const struct ddb_regset *i2c_buf;
+ const struct ddb_regset *idma;
+ const struct ddb_regset *idma_buf;
+ const struct ddb_regset *odma;
+ const struct ddb_regset *odma_buf;
+
+ const struct ddb_regset *input;
+ const struct ddb_regset *output;
+
+ const struct ddb_regset *channel;
+};
+
+struct ddb_ids {
+ u16 vendor;
+ u16 device;
+ u16 subvendor;
+ u16 subdevice;
+
+ u32 hwid;
+ u32 regmapid;
+ u32 devid;
+ u32 mac;
+};
struct ddb_info {
int type;
-#define DDB_NONE 0
-#define DDB_OCTOPUS 1
-#define DDB_OCTOPUS_MAX_CT 6
+#define DDB_NONE 0
+#define DDB_OCTOPUS 1
+#define DDB_OCTOPUS_CI 2
+#define DDB_OCTOPUS_MAX 5
+#define DDB_OCTOPUS_MAX_CT 6
char *name;
- int port_num;
- u32 port_type[DDB_MAX_PORT];
+ u32 i2c_mask;
+ u8 port_num;
+ u8 led_num;
+ u8 fan_num;
+ u8 temp_num;
+ u8 temp_bus;
u32 board_control;
u32 board_control_2;
+ u8 mdio_num;
+ u8 con_clock; /* use a continuous clock */
u8 ts_quirks;
#define TS_QUIRK_SERIAL 1
#define TS_QUIRK_REVERSED 2
#define TS_QUIRK_ALT_OSC 8
+ u32 tempmon_irq;
+ const struct ddb_regmap *regmap;
};
-/* DMA_SIZE MUST be divisible by 188 and 128 !!! */
+/* DMA_SIZE MUST be smaller than 256k and
+ * MUST be divisible by 188 and 128 !!!
+ */
+
+#define DMA_MAX_BUFS 32 /* hardware table limit */
-#define INPUT_DMA_MAX_BUFS 32 /* hardware table limit */
#define INPUT_DMA_BUFS 8
#define INPUT_DMA_SIZE (128*47*21)
+#define INPUT_DMA_IRQ_DIV 1
-#define OUTPUT_DMA_MAX_BUFS 32
#define OUTPUT_DMA_BUFS 8
#define OUTPUT_DMA_SIZE (128*47*21)
+#define OUTPUT_DMA_IRQ_DIV 1
struct ddb;
struct ddb_port;
-struct ddb_input {
- struct ddb_port *port;
- u32 nr;
- int attached;
+struct ddb_dma {
+ void *io;
+ u32 regs;
+ u32 bufregs;
- dma_addr_t pbuf[INPUT_DMA_MAX_BUFS];
- u8 *vbuf[INPUT_DMA_MAX_BUFS];
- u32 dma_buf_num;
- u32 dma_buf_size;
+ dma_addr_t pbuf[DMA_MAX_BUFS];
+ u8 *vbuf[DMA_MAX_BUFS];
+ u32 num;
+ u32 size;
+ u32 div;
+ u32 bufval;
- struct tasklet_struct tasklet;
+ struct work_struct work;
spinlock_t lock;
wait_queue_head_t wq;
int running;
u32 stat;
+ u32 ctrl;
u32 cbuf;
u32 coff;
+};
- struct dvb_adapter adap;
+struct ddb_dvb {
+ struct dvb_adapter *adap;
+ int adap_registered;
struct dvb_device *dev;
struct i2c_client *i2c_client[1];
struct dvb_frontend *fe;
@@ -110,97 +188,210 @@ struct ddb_input {
struct dmx_frontend hw_frontend;
struct dmx_frontend mem_frontend;
int users;
- int (*gate_ctrl)(struct dvb_frontend *, int);
+ u32 attached;
+ u8 input;
+
+ enum fe_sec_tone_mode tone;
+ enum fe_sec_voltage voltage;
+
+ int (*i2c_gate_ctrl)(struct dvb_frontend *, int);
+ int (*set_voltage)(struct dvb_frontend *fe,
+ enum fe_sec_voltage voltage);
+ int (*set_input)(struct dvb_frontend *fe, int input);
+ int (*diseqc_send_master_cmd)(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *cmd);
};
-struct ddb_output {
+struct ddb_ci {
+ struct dvb_ca_en50221 en;
struct ddb_port *port;
u32 nr;
- dma_addr_t pbuf[OUTPUT_DMA_MAX_BUFS];
- u8 *vbuf[OUTPUT_DMA_MAX_BUFS];
- u32 dma_buf_num;
- u32 dma_buf_size;
- struct tasklet_struct tasklet;
- spinlock_t lock;
- wait_queue_head_t wq;
- int running;
- u32 stat;
- u32 cbuf;
- u32 coff;
+ struct mutex lock;
+};
- struct dvb_adapter adap;
- struct dvb_device *dev;
+struct ddb_io {
+ struct ddb_port *port;
+ u32 nr;
+ u32 regs;
+ struct ddb_dma *dma;
+ struct ddb_io *redo;
+ struct ddb_io *redi;
};
+#define ddb_output ddb_io
+#define ddb_input ddb_io
+
struct ddb_i2c {
struct ddb *dev;
u32 nr;
- struct i2c_adapter adap;
- struct i2c_adapter adap2;
u32 regs;
+ u32 link;
+ struct i2c_adapter adap;
u32 rbuf;
u32 wbuf;
- int done;
- wait_queue_head_t wq;
+ u32 bsize;
+ struct completion completion;
};
struct ddb_port {
struct ddb *dev;
u32 nr;
+ u32 pnr;
+ u32 regs;
+ u32 lnr;
struct ddb_i2c *i2c;
struct mutex i2c_gate_lock;
u32 class;
#define DDB_PORT_NONE 0
#define DDB_PORT_CI 1
#define DDB_PORT_TUNER 2
- u32 type;
-#define DDB_TUNER_NONE 0
-#define DDB_TUNER_DVBS_ST 1
-#define DDB_TUNER_DVBS_ST_AA 2
-#define DDB_TUNER_DVBCT2_SONY_P 7
-#define DDB_TUNER_DVBC2T2_SONY_P 8
-#define DDB_TUNER_ISDBT_SONY_P 9
-#define DDB_TUNER_DVBC2T2I_SONY_P 15
-#define DDB_TUNER_DVBCT_TR 16
-#define DDB_TUNER_DVBCT_ST 17
-#define DDB_TUNER_XO2_DVBS_STV0910 32
-#define DDB_TUNER_XO2_DVBCT2_SONY 33
-#define DDB_TUNER_XO2_ISDBT_SONY 34
-#define DDB_TUNER_XO2_DVBC2T2_SONY 35
-#define DDB_TUNER_XO2_ATSC_ST 36
-#define DDB_TUNER_XO2_DVBC2T2I_SONY 37
-
- u32 adr;
+#define DDB_PORT_LOOP 3
+ char *name;
+ char *type_name;
+ u32 type;
+#define DDB_TUNER_NONE 0
+#define DDB_TUNER_DVBS_ST 1
+#define DDB_TUNER_DVBS_ST_AA 2
+#define DDB_TUNER_DVBCT_TR 3
+#define DDB_TUNER_DVBCT_ST 4
+#define DDB_CI_INTERNAL 5
+#define DDB_CI_EXTERNAL_SONY 6
+#define DDB_TUNER_DVBCT2_SONY_P 7
+#define DDB_TUNER_DVBC2T2_SONY_P 8
+#define DDB_TUNER_ISDBT_SONY_P 9
+#define DDB_TUNER_DVBS_STV0910_P 10
+#define DDB_TUNER_MXL5XX 11
+#define DDB_CI_EXTERNAL_XO2 12
+#define DDB_CI_EXTERNAL_XO2_B 13
+#define DDB_TUNER_DVBS_STV0910_PR 14
+#define DDB_TUNER_DVBC2T2I_SONY_P 15
+
+#define DDB_TUNER_XO2 32
+#define DDB_TUNER_DVBS_STV0910 (DDB_TUNER_XO2 + 0)
+#define DDB_TUNER_DVBCT2_SONY (DDB_TUNER_XO2 + 1)
+#define DDB_TUNER_ISDBT_SONY (DDB_TUNER_XO2 + 2)
+#define DDB_TUNER_DVBC2T2_SONY (DDB_TUNER_XO2 + 3)
+#define DDB_TUNER_ATSC_ST (DDB_TUNER_XO2 + 4)
+#define DDB_TUNER_DVBC2T2I_SONY (DDB_TUNER_XO2 + 5)
struct ddb_input *input[2];
struct ddb_output *output;
struct dvb_ca_en50221 *en;
+ struct ddb_dvb dvb[2];
+ u32 gap;
+ u32 obr;
+ u8 creg;
+};
+
+#define CM_STARTUP_DELAY 2
+#define CM_AVERAGE 20
+#define CM_GAIN 10
+
+#define HW_LSB_SHIFT 12
+#define HW_LSB_MASK 0x1000
+
+#define CM_IDLE 0
+#define CM_STARTUP 1
+#define CM_ADJUST 2
+
+#define TS_CAPTURE_LEN (4096)
+
+struct ddb_lnb {
+ struct mutex lock;
+ u32 tone;
+ enum fe_sec_voltage oldvoltage[4];
+ u32 voltage[4];
+ u32 voltages;
+ u32 fmode;
+};
+
+struct ddb_link {
+ struct ddb *dev;
+ const struct ddb_info *info;
+ u32 nr;
+ u32 regs;
+ spinlock_t lock;
+ struct mutex flash_mutex;
+ struct ddb_lnb lnb;
+ struct tasklet_struct tasklet;
+ struct ddb_ids ids;
+
+ spinlock_t temp_lock;
+ int overtemperature_error;
+ u8 temp_tab[11];
};
struct ddb {
struct pci_dev *pdev;
+ struct platform_device *pfdev;
+ struct device *dev;
+
+ int msi;
+ struct workqueue_struct *wq;
+ u32 has_dma;
+
+ struct ddb_link link[DDB_MAX_LINK];
unsigned char __iomem *regs;
+ u32 regs_len;
+ u32 port_num;
struct ddb_port port[DDB_MAX_PORT];
+ u32 i2c_num;
struct ddb_i2c i2c[DDB_MAX_I2C];
struct ddb_input input[DDB_MAX_INPUT];
struct ddb_output output[DDB_MAX_OUTPUT];
+ struct dvb_adapter adap[DDB_MAX_INPUT];
+ struct ddb_dma idma[DDB_MAX_INPUT];
+ struct ddb_dma odma[DDB_MAX_OUTPUT];
+
+ void (*handler[4][256])(unsigned long);
+ unsigned long handler_data[4][256];
struct device *ddb_dev;
- int nr;
+ u32 ddb_dev_users;
+ u32 nr;
u8 iobuf[1028];
- struct ddb_info *info;
- int msi;
+ u8 leds;
+ u32 ts_irq;
+ u32 i2c_irq;
+
+ struct mutex mutex;
+
+ u8 tsbuf[TS_CAPTURE_LEN];
};
/****************************************************************************/
+/****************************************************************************/
+/****************************************************************************/
-#define ddbwritel(_val, _adr) writel((_val), \
- dev->regs+(_adr))
-#define ddbreadl(_adr) readl(dev->regs+(_adr))
-#define ddbcpyto(_adr, _src, _count) memcpy_toio(dev->regs+(_adr), (_src), (_count))
-#define ddbcpyfrom(_dst, _adr, _count) memcpy_fromio((_dst), dev->regs+(_adr), (_count))
+int ddbridge_flashread(struct ddb *dev, u32 link, u8 *buf, u32 addr, u32 len);
/****************************************************************************/
-#endif
+/* ddbridge-main.c (modparams) */
+extern int ci_bitrate;
+extern int ts_loop;
+extern int xo2_speed;
+extern int alt_dma;
+extern int no_init;
+extern int stv0910_single;
+extern struct workqueue_struct *ddb_wq;
+
+/* ddbridge-core.c */
+void ddb_ports_detach(struct ddb *dev);
+void ddb_ports_release(struct ddb *dev);
+void ddb_buffers_free(struct ddb *dev);
+void ddb_device_destroy(struct ddb *dev);
+irqreturn_t ddb_irq_handler0(int irq, void *dev_id);
+irqreturn_t ddb_irq_handler1(int irq, void *dev_id);
+irqreturn_t ddb_irq_handler(int irq, void *dev_id);
+void ddb_ports_init(struct ddb *dev);
+int ddb_buffers_alloc(struct ddb *dev);
+int ddb_ports_attach(struct ddb *dev);
+int ddb_device_create(struct ddb *dev);
+int ddb_class_create(void);
+void ddb_class_destroy(void);
+int ddb_init(struct ddb *dev);
+void ddb_unmap(struct ddb *dev);
+
+#endif /* DDBRIDGE_H */
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index 1d41934cfaf5..7c3900dec368 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -571,7 +571,7 @@ static u32 functionality(struct i2c_adapter *adap)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm dm1105_algo = {
+static const struct i2c_algorithm dm1105_algo = {
.master_xfer = dm1105_i2c_xfer,
.functionality = functionality,
};
@@ -675,7 +675,7 @@ static void dm1105_emit_key(struct work_struct *work)
data = (ircom >> 8) & 0x7f;
/* FIXME: UNKNOWN because we don't generate a full NEC scancode (yet?) */
- rc_keydown(ir->dev, RC_TYPE_UNKNOWN, data, 0);
+ rc_keydown(ir->dev, RC_PROTO_UNKNOWN, data, 0);
}
/* work handler */
@@ -748,7 +748,7 @@ static int dm1105_ir_init(struct dm1105_dev *dm1105)
dev->driver_name = MODULE_NAME;
dev->map_name = RC_MAP_DM1105_NEC;
- dev->input_name = "DVB on-card IR receiver";
+ dev->device_name = "DVB on-card IR receiver";
dev->input_phys = dm1105->ir.input_phys;
dev->input_id.bustype = BUS_PCI;
dev->input_id.version = 1;
@@ -1208,7 +1208,7 @@ static void dm1105_remove(struct pci_dev *pdev)
kfree(dev);
}
-static struct pci_device_id dm1105_id_table[] = {
+static const struct pci_device_id dm1105_id_table[] = {
{
.vendor = PCI_VENDOR_ID_TRIGEM,
.device = PCI_DEVICE_ID_DM1105,
diff --git a/drivers/media/pci/dt3155/dt3155.c b/drivers/media/pci/dt3155/dt3155.c
index 6a219694b225..1775c36891ae 100644
--- a/drivers/media/pci/dt3155/dt3155.c
+++ b/drivers/media/pci/dt3155/dt3155.c
@@ -499,7 +499,7 @@ static int dt3155_init_board(struct dt3155_priv *pd)
return 0;
}
-static struct video_device dt3155_vdev = {
+static const struct video_device dt3155_vdev = {
.name = DT3155_NAME,
.fops = &dt3155_fops,
.ioctl_ops = &dt3155_ioctl_ops,
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-mixer.c b/drivers/media/pci/ivtv/ivtv-alsa-mixer.c
index ba372a23eb5c..aee453fcff37 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-mixer.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-mixer.c
@@ -156,7 +156,7 @@ int __init snd_ivtv_mixer_create(struct snd_ivtv_card *itvsc)
strlcpy(sc->mixername, "CX2341[56] Mixer", sizeof(sc->mixername));
- ret = snd_ctl_add(sc, snd_ctl_new1(snd_ivtv_mixer_tv_vol, itvsc));
+ ret = snd_ctl_add(sc, snd_ctl_new1(&snd_ivtv_mixer_tv_vol, itvsc));
if (ret) {
IVTV_ALSA_WARN("%s: failed to add %s control, err %d\n",
__func__, snd_ivtv_mixer_tv_vol.name, ret);
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
index 417d03da01f0..5326d86fa375 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
@@ -41,7 +41,7 @@ MODULE_PARM_DESC(pcm_debug, "enable debug messages for pcm");
pr_info("ivtv-alsa-pcm %s: " fmt, __func__, ##arg); \
} while (0)
-static struct snd_pcm_hardware snd_ivtv_hw_capture = {
+static const struct snd_pcm_hardware snd_ivtv_hw_capture = {
.info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index e8fa99b6c7b4..54dcac4b2229 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -73,7 +73,7 @@ int (*ivtv_ext_init)(struct ivtv *);
EXPORT_SYMBOL(ivtv_ext_init);
/* add your revision and whatnot here */
-static struct pci_device_id ivtv_pci_tbl[] = {
+static const struct pci_device_id ivtv_pci_tbl[] = {
{PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV15,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_ICOMP, PCI_DEVICE_ID_IVTV16,
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index dea80efd5836..5a35e366f4c0 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -148,7 +148,7 @@ static const char * const hw_devicenames[] = {
"ir_video", /* IVTV_HW_I2C_IR_RX_ADAPTEC */
};
-static int get_key_adaptec(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_adaptec(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char keybuf[4];
@@ -168,7 +168,7 @@ static int get_key_adaptec(struct IR_i2c *ir, enum rc_type *protocol,
keybuf[2] &= 0x7f;
keybuf[3] |= 0x80;
- *protocol = RC_TYPE_UNKNOWN;
+ *protocol = RC_PROTO_UNKNOWN;
*scancode = keybuf[3] | keybuf[2] << 8 | keybuf[1] << 16 |keybuf[0] << 24;
*toggle = 0;
return 1;
@@ -201,22 +201,22 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
init_data->ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
init_data->internal_get_key_func =
IR_KBD_GET_KEY_AVERMEDIA_CARDBUS;
- init_data->type = RC_BIT_OTHER;
+ init_data->type = RC_PROTO_BIT_OTHER;
init_data->name = "AVerMedia AVerTV card";
break;
case IVTV_HW_I2C_IR_RX_HAUP_EXT:
case IVTV_HW_I2C_IR_RX_HAUP_INT:
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
- init_data->type = RC_BIT_RC5;
+ init_data->type = RC_PROTO_BIT_RC5;
init_data->name = itv->card_name;
break;
case IVTV_HW_Z8F0811_IR_RX_HAUP:
/* Default to grey remote */
init_data->ir_codes = RC_MAP_HAUPPAUGE;
init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
- init_data->type = RC_BIT_RC5 | RC_BIT_RC6_MCE |
- RC_BIT_RC6_6A_32;
+ init_data->type = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE |
+ RC_PROTO_BIT_RC6_6A_32;
init_data->name = itv->card_name;
break;
case IVTV_HW_I2C_IR_RX_ADAPTEC:
@@ -224,7 +224,7 @@ static int ivtv_i2c_new_ir(struct ivtv *itv, u32 hw, const char *type, u8 addr)
init_data->name = itv->card_name;
/* FIXME: The protocol and RC_MAP needs to be corrected */
init_data->ir_codes = RC_MAP_EMPTY;
- init_data->type = RC_BIT_UNKNOWN;
+ init_data->type = RC_PROTO_BIT_UNKNOWN;
break;
}
@@ -632,7 +632,7 @@ static const struct i2c_algorithm ivtv_algo = {
};
/* template for our-bit banger */
-static struct i2c_adapter ivtv_i2c_adap_hw_template = {
+static const struct i2c_adapter ivtv_i2c_adap_hw_template = {
.name = "ivtv i2c driver",
.algo = &ivtv_algo,
.algo_data = NULL, /* filled from template */
@@ -682,7 +682,7 @@ static int ivtv_getsda_old(void *data)
}
/* template for i2c-bit-algo */
-static struct i2c_adapter ivtv_i2c_adap_template = {
+static const struct i2c_adapter ivtv_i2c_adap_template = {
.name = "ivtv i2c driver",
.algo = NULL, /* set by i2c-algo-bit */
.algo_data = NULL, /* filled from template */
diff --git a/drivers/media/pci/mantis/hopper_cards.c b/drivers/media/pci/mantis/hopper_cards.c
index 68b5800030b7..11e987860b23 100644
--- a/drivers/media/pci/mantis/hopper_cards.c
+++ b/drivers/media/pci/mantis/hopper_cards.c
@@ -255,7 +255,7 @@ static void hopper_pci_remove(struct pci_dev *pdev)
}
-static struct pci_device_id hopper_pci_table[] = {
+static const struct pci_device_id hopper_pci_table[] = {
MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_3028_DVB_T, &vp3028_config,
NULL),
{ }
diff --git a/drivers/media/pci/mantis/mantis_cards.c b/drivers/media/pci/mantis/mantis_cards.c
index cdefffc16d9e..adc980d33711 100644
--- a/drivers/media/pci/mantis/mantis_cards.c
+++ b/drivers/media/pci/mantis/mantis_cards.c
@@ -281,7 +281,7 @@ static void mantis_pci_remove(struct pci_dev *pdev)
return;
}
-static struct pci_device_id mantis_pci_table[] = {
+static const struct pci_device_id mantis_pci_table[] = {
MAKE_ENTRY(TECHNISAT, CABLESTAR_HD2, &vp2040_config,
RC_MAP_TECHNISAT_TS35),
MAKE_ENTRY(TECHNISAT, SKYSTAR_HD2_10, &vp1041_config,
diff --git a/drivers/media/pci/mantis/mantis_common.h b/drivers/media/pci/mantis/mantis_common.h
index d48778a366a9..a664c319ef0a 100644
--- a/drivers/media/pci/mantis/mantis_common.h
+++ b/drivers/media/pci/mantis/mantis_common.h
@@ -176,7 +176,7 @@ struct mantis_pci {
struct work_struct uart_work;
struct rc_dev *rc;
- char input_name[80];
+ char device_name[80];
char input_phys[80];
char *rc_map_name;
};
diff --git a/drivers/media/pci/mantis/mantis_i2c.c b/drivers/media/pci/mantis/mantis_i2c.c
index d72ee47dc6e4..496c10dfc4df 100644
--- a/drivers/media/pci/mantis/mantis_i2c.c
+++ b/drivers/media/pci/mantis/mantis_i2c.c
@@ -212,7 +212,7 @@ static u32 mantis_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_SMBUS_EMUL;
}
-static struct i2c_algorithm mantis_algo = {
+static const struct i2c_algorithm mantis_algo = {
.master_xfer = mantis_i2c_xfer,
.functionality = mantis_i2c_func,
};
diff --git a/drivers/media/pci/mantis/mantis_input.c b/drivers/media/pci/mantis/mantis_input.c
index 50d10cb7d49d..7519dcc934dd 100644
--- a/drivers/media/pci/mantis/mantis_input.c
+++ b/drivers/media/pci/mantis/mantis_input.c
@@ -31,7 +31,7 @@
void mantis_input_process(struct mantis_pci *mantis, int scancode)
{
if (mantis->rc)
- rc_keydown(mantis->rc, RC_TYPE_UNKNOWN, scancode, 0);
+ rc_keydown(mantis->rc, RC_PROTO_UNKNOWN, scancode, 0);
}
int mantis_input_init(struct mantis_pci *mantis)
@@ -46,12 +46,12 @@ int mantis_input_init(struct mantis_pci *mantis)
goto out;
}
- snprintf(mantis->input_name, sizeof(mantis->input_name),
+ snprintf(mantis->device_name, sizeof(mantis->device_name),
"Mantis %s IR receiver", mantis->hwconfig->model_name);
snprintf(mantis->input_phys, sizeof(mantis->input_phys),
"pci-%s/ir0", pci_name(mantis->pdev));
- dev->input_name = mantis->input_name;
+ dev->device_name = mantis->device_name;
dev->input_phys = mantis->input_phys;
dev->input_id.bustype = BUS_PCI;
dev->input_id.vendor = mantis->vendor_id;
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 9c4a024745de..49e047e4a81e 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1533,7 +1533,7 @@ static const struct v4l2_ioctl_ops meye_ioctl_ops = {
.vidioc_default = vidioc_default,
};
-static struct video_device meye_template = {
+static const struct video_device meye_template = {
.name = "meye",
.fops = &meye_fops,
.ioctl_ops = &meye_ioctl_ops,
@@ -1801,7 +1801,7 @@ static void meye_remove(struct pci_dev *pcidev)
printk(KERN_INFO "meye: removed\n");
}
-static struct pci_device_id meye_pci_tbl[] = {
+static const struct pci_device_id meye_pci_tbl[] = {
{ PCI_VDEVICE(KAWASAKI, PCI_DEVICE_ID_MCHIP_KL5A72002), 0 },
{ }
};
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 5c0a4e614413..60e6cd5b3a03 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -1014,7 +1014,7 @@ static void netup_unidvb_finidev(struct pci_dev *pci_dev)
}
-static struct pci_device_id netup_unidvb_pci_tbl[] = {
+static const struct pci_device_id netup_unidvb_pci_tbl[] = {
{ PCI_DEVICE(0x1b55, 0x18f6) }, /* hw rev. 1.3 */
{ PCI_DEVICE(0x1b55, 0x18f7) }, /* hw rev. 1.4 */
{ 0, }
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
index b49e4f9788e8..b13e319d24b7 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c
@@ -300,7 +300,7 @@ static const struct i2c_algorithm netup_i2c_algorithm = {
.functionality = netup_i2c_func,
};
-static struct i2c_adapter netup_i2c_adapter = {
+static const struct i2c_adapter netup_i2c_adapter = {
.owner = THIS_MODULE,
.name = NETUP_UNIDVB_NAME,
.class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index ce69e648b663..8c92cb7f7e72 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -336,9 +336,9 @@ int ngene_command(struct ngene *dev, struct ngene_command *com)
{
int result;
- down(&dev->cmd_mutex);
+ mutex_lock(&dev->cmd_mutex);
result = ngene_command_mutex(dev, com);
- up(&dev->cmd_mutex);
+ mutex_unlock(&dev->cmd_mutex);
return result;
}
@@ -560,7 +560,6 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream,
u16 BsSPI = ((stream & 1) ? 0x9800 : 0x9700);
u16 BsSDO = 0x9B00;
- down(&dev->stream_mutex);
memset(&com, 0, sizeof(com));
com.cmd.hdr.Opcode = CMD_CONTROL;
com.cmd.hdr.Length = sizeof(struct FW_STREAM_CONTROL) - 2;
@@ -586,17 +585,13 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream,
chan->State = KSSTATE_ACQUIRE;
chan->HWState = HWSTATE_STOP;
spin_unlock_irq(&chan->state_lock);
- if (ngene_command(dev, &com) < 0) {
- up(&dev->stream_mutex);
+ if (ngene_command(dev, &com) < 0)
return -1;
- }
/* clear_buffers(chan); */
flush_buffers(chan);
- up(&dev->stream_mutex);
return 0;
}
spin_unlock_irq(&chan->state_lock);
- up(&dev->stream_mutex);
return 0;
}
@@ -692,11 +687,9 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream,
chan->HWState = HWSTATE_STARTUP;
spin_unlock_irq(&chan->state_lock);
- if (ngene_command(dev, &com) < 0) {
- up(&dev->stream_mutex);
+ if (ngene_command(dev, &com) < 0)
return -1;
- }
- up(&dev->stream_mutex);
+
return 0;
}
@@ -750,8 +743,11 @@ void set_transfer(struct ngene_channel *chan, int state)
/* else printk(KERN_INFO DEVICE_NAME ": lock=%08x\n",
ngreadl(0x9310)); */
+ mutex_lock(&dev->stream_mutex);
ret = ngene_command_stream_control(dev, chan->number,
control, mode, flags);
+ mutex_unlock(&dev->stream_mutex);
+
if (!ret)
chan->running = state;
else
@@ -1283,7 +1279,7 @@ static int ngene_load_firm(struct ngene *dev)
static void ngene_stop(struct ngene *dev)
{
- down(&dev->cmd_mutex);
+ mutex_destroy(&dev->cmd_mutex);
i2c_del_adapter(&(dev->channel[0].i2c_adapter));
i2c_del_adapter(&(dev->channel[1].i2c_adapter));
ngwritel(0, NGENE_INT_ENABLE);
@@ -1346,10 +1342,10 @@ static int ngene_start(struct ngene *dev)
init_waitqueue_head(&dev->cmd_wq);
init_waitqueue_head(&dev->tx_wq);
init_waitqueue_head(&dev->rx_wq);
- sema_init(&dev->cmd_mutex, 1);
- sema_init(&dev->stream_mutex, 1);
+ mutex_init(&dev->cmd_mutex);
+ mutex_init(&dev->stream_mutex);
sema_init(&dev->pll_mutex, 1);
- sema_init(&dev->i2c_switch_mutex, 1);
+ mutex_init(&dev->i2c_switch_mutex);
spin_lock_init(&dev->cmd_lock);
for (i = 0; i < MAX_STREAM; i++)
spin_lock_init(&dev->channel[i].state_lock);
@@ -1606,10 +1602,10 @@ static void ngene_unlink(struct ngene *dev)
com.in_len = 3;
com.out_len = 1;
- down(&dev->cmd_mutex);
+ mutex_lock(&dev->cmd_mutex);
ngwritel(0, NGENE_INT_ENABLE);
ngene_command_mutex(dev, &com);
- up(&dev->cmd_mutex);
+ mutex_unlock(&dev->cmd_mutex);
}
void ngene_shutdown(struct pci_dev *pdev)
diff --git a/drivers/media/pci/ngene/ngene-i2c.c b/drivers/media/pci/ngene/ngene-i2c.c
index cf39fcf54adf..3004947f300b 100644
--- a/drivers/media/pci/ngene/ngene-i2c.c
+++ b/drivers/media/pci/ngene/ngene-i2c.c
@@ -118,7 +118,7 @@ static int ngene_i2c_master_xfer(struct i2c_adapter *adapter,
(struct ngene_channel *)i2c_get_adapdata(adapter);
struct ngene *dev = chan->dev;
- down(&dev->i2c_switch_mutex);
+ mutex_lock(&dev->i2c_switch_mutex);
ngene_i2c_set_bus(dev, chan->number);
if (num == 2 && msg[1].flags & I2C_M_RD && !(msg[0].flags & I2C_M_RD))
@@ -136,11 +136,11 @@ static int ngene_i2c_master_xfer(struct i2c_adapter *adapter,
msg[0].buf, msg[0].len, 0))
goto done;
- up(&dev->i2c_switch_mutex);
+ mutex_unlock(&dev->i2c_switch_mutex);
return -EIO;
done:
- up(&dev->i2c_switch_mutex);
+ mutex_unlock(&dev->i2c_switch_mutex);
return num;
}
@@ -150,7 +150,7 @@ static u32 ngene_i2c_functionality(struct i2c_adapter *adap)
return I2C_FUNC_SMBUS_EMUL;
}
-static struct i2c_algorithm ngene_i2c_algo = {
+static const struct i2c_algorithm ngene_i2c_algo = {
.master_xfer = ngene_i2c_master_xfer,
.functionality = ngene_i2c_functionality,
};
diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h
index 10d8f74c4f0a..7c7cd217333d 100644
--- a/drivers/media/pci/ngene/ngene.h
+++ b/drivers/media/pci/ngene/ngene.h
@@ -762,10 +762,10 @@ struct ngene {
wait_queue_head_t cmd_wq;
int cmd_done;
- struct semaphore cmd_mutex;
- struct semaphore stream_mutex;
+ struct mutex cmd_mutex;
+ struct mutex stream_mutex;
struct semaphore pll_mutex;
- struct semaphore i2c_switch_mutex;
+ struct mutex i2c_switch_mutex;
int i2c_current_channel;
int i2c_current_bus;
spinlock_t cmd_lock;
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index 74838109afe5..39dcba2b620c 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -770,7 +770,7 @@ static void pluto2_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_PLUTO2 0x0001
#endif
-static struct pci_device_id pluto2_id_table[] = {
+static const struct pci_device_id pluto2_id_table[] = {
{
.vendor = PCI_VENDOR_ID_SCM,
.device = PCI_DEVICE_ID_PLUTO2,
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index 3219d2f3271e..b6b1a8d20d86 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -1202,7 +1202,7 @@ err:
}
-static struct pci_device_id pt1_id_table[] = {
+static const struct pci_device_id pt1_id_table[] = {
{ PCI_DEVICE(0x10ee, 0x211a) },
{ PCI_DEVICE(0x10ee, 0x222a) },
{ },
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index e8b5d0992157..34044a45fecc 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -472,7 +472,6 @@ static int pt3_fetch_thread(void *data)
}
dev_dbg(adap->dvb_adap.device, "PT3: [%s] exited\n",
adap->thread->comm);
- adap->thread = NULL;
return 0;
}
@@ -486,6 +485,7 @@ static int pt3_start_streaming(struct pt3_adapter *adap)
if (IS_ERR(thread)) {
int ret = PTR_ERR(thread);
+ adap->thread = NULL;
dev_warn(adap->dvb_adap.device,
"PT3 (adap:%d, dmx:%d): failed to start kthread\n",
adap->dvb_adap.num, adap->dmxdev.dvbdev->id);
@@ -508,6 +508,7 @@ static int pt3_stop_streaming(struct pt3_adapter *adap)
/* kill the fetching thread */
ret = kthread_stop(adap->thread);
+ adap->thread = NULL;
return ret;
}
@@ -520,14 +521,8 @@ static int pt3_start_feed(struct dvb_demux_feed *feed)
adap = container_of(feed->demux, struct pt3_adapter, demux);
adap->num_feeds++;
- if (adap->thread)
+ if (adap->num_feeds > 1)
return 0;
- if (adap->num_feeds != 1) {
- dev_warn(adap->dvb_adap.device,
- "%s: unmatched start/stop_feed in adap:%i/dmx:%i\n",
- __func__, adap->dvb_adap.num, adap->dmxdev.dvbdev->id);
- adap->num_feeds = 1;
- }
return pt3_start_streaming(adap);
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index bf358ec7aca5..c59b69f1af9d 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -627,7 +627,7 @@ snd_card_saa7134_capture_pointer(struct snd_pcm_substream * substream)
* switching to 32kHz without any frequency translation
*/
-static struct snd_pcm_hardware snd_card_saa7134_capture =
+static const struct snd_pcm_hardware snd_card_saa7134_capture =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index b1d3648dcba1..66acfd35ffc6 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -205,7 +205,7 @@ static const struct v4l2_ioctl_ops ts_ioctl_ops = {
/* ----------------------------------------------------------- */
-static struct video_device saa7134_empress_template = {
+static const struct video_device saa7134_empress_template = {
.name = "saa7134-empress",
.fops = &ts_fops,
.ioctl_ops = &ts_ioctl_ops,
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index 9d0e69eae036..8f2ed632840f 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -339,7 +339,7 @@ static const struct i2c_algorithm saa7134_algo = {
.functionality = functionality,
};
-static struct i2c_adapter saa7134_adap_template = {
+static const struct i2c_adapter saa7134_adap_template = {
.owner = THIS_MODULE,
.name = "saa7134",
.algo = &saa7134_algo,
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index 78849c19f68a..9337e4615519 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -83,14 +83,16 @@ static int build_key(struct saa7134_dev *dev)
if (data == ir->mask_keycode)
rc_keyup(ir->dev);
else
- rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0);
+ rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data,
+ 0);
return 0;
}
if (ir->polling) {
if ((ir->mask_keydown && (0 != (gpio & ir->mask_keydown))) ||
(ir->mask_keyup && (0 == (gpio & ir->mask_keyup)))) {
- rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0);
+ rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data,
+ 0);
} else {
rc_keyup(ir->dev);
}
@@ -98,7 +100,8 @@ static int build_key(struct saa7134_dev *dev)
else { /* IRQ driven mode - handle key press and release in one go */
if ((ir->mask_keydown && (0 != (gpio & ir->mask_keydown))) ||
(ir->mask_keyup && (0 == (gpio & ir->mask_keyup)))) {
- rc_keydown_notimeout(ir->dev, RC_TYPE_UNKNOWN, data, 0);
+ rc_keydown_notimeout(ir->dev, RC_PROTO_UNKNOWN, data,
+ 0);
rc_keyup(ir->dev);
}
}
@@ -108,7 +111,7 @@ static int build_key(struct saa7134_dev *dev)
/* --------------------- Chip specific I2C key builders ----------------- */
-static int get_key_flydvb_trio(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_flydvb_trio(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
int gpio;
@@ -154,13 +157,14 @@ static int get_key_flydvb_trio(struct IR_i2c *ir, enum rc_type *protocol,
return -EIO;
}
- *protocol = RC_TYPE_UNKNOWN;
+ *protocol = RC_PROTO_UNKNOWN;
*scancode = b;
*toggle = 0;
return 1;
}
-static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir,
+ enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char b;
@@ -201,14 +205,14 @@ static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir, enum rc_type *protocol
/* Button pressed */
input_dbg("get_key_msi_tvanywhere_plus: Key = 0x%02X\n", b);
- *protocol = RC_TYPE_UNKNOWN;
+ *protocol = RC_PROTO_UNKNOWN;
*scancode = b;
*toggle = 0;
return 1;
}
/* copied and modified from get_key_msi_tvanywhere_plus() */
-static int get_key_kworld_pc150u(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_kworld_pc150u(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char b;
@@ -249,13 +253,13 @@ static int get_key_kworld_pc150u(struct IR_i2c *ir, enum rc_type *protocol,
/* Button pressed */
input_dbg("get_key_kworld_pc150u: Key = 0x%02X\n", b);
- *protocol = RC_TYPE_UNKNOWN;
+ *protocol = RC_PROTO_UNKNOWN;
*scancode = b;
*toggle = 0;
return 1;
}
-static int get_key_purpletv(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_purpletv(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char b;
@@ -274,13 +278,13 @@ static int get_key_purpletv(struct IR_i2c *ir, enum rc_type *protocol,
if (b & 0x80)
return 1;
- *protocol = RC_TYPE_UNKNOWN;
+ *protocol = RC_PROTO_UNKNOWN;
*scancode = b;
*toggle = 0;
return 1;
}
-static int get_key_hvr1110(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_hvr1110(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char buf[5];
@@ -304,14 +308,14 @@ static int get_key_hvr1110(struct IR_i2c *ir, enum rc_type *protocol,
*
* FIXME: start bits could maybe be used...?
*/
- *protocol = RC_TYPE_RC5;
+ *protocol = RC_PROTO_RC5;
*scancode = RC_SCANCODE_RC5(buf[3] & 0x1f, buf[4] >> 2);
*toggle = !!(buf[3] & 0x40);
return 1;
}
-static int get_key_beholdm6xx(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_beholdm6xx(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char data[12];
@@ -338,7 +342,7 @@ static int get_key_beholdm6xx(struct IR_i2c *ir, enum rc_type *protocol,
if (data[9] != (unsigned char)(~data[8]))
return 0;
- *protocol = RC_TYPE_NECX;
+ *protocol = RC_PROTO_NECX;
*scancode = RC_SCANCODE_NECX(data[11] << 8 | data[10], data[9]);
*toggle = 0;
return 1;
@@ -347,7 +351,7 @@ static int get_key_beholdm6xx(struct IR_i2c *ir, enum rc_type *protocol,
/* Common (grey or coloured) pinnacle PCTV remote handling
*
*/
-static int get_key_pinnacle(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_pinnacle(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle, int parity_offset,
int marker, int code_modulo)
{
@@ -384,7 +388,7 @@ static int get_key_pinnacle(struct IR_i2c *ir, enum rc_type *protocol,
code %= code_modulo;
- *protocol = RC_TYPE_UNKNOWN;
+ *protocol = RC_PROTO_UNKNOWN;
*scancode = code;
*toggle = 0;
@@ -401,7 +405,7 @@ static int get_key_pinnacle(struct IR_i2c *ir, enum rc_type *protocol,
*
* Sylvain Pasche <sylvain.pasche@gmail.com>
*/
-static int get_key_pinnacle_grey(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_pinnacle_grey(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
@@ -413,7 +417,7 @@ static int get_key_pinnacle_grey(struct IR_i2c *ir, enum rc_type *protocol,
*
* Ricardo Cerqueira <v4l@cerqueira.org>
*/
-static int get_key_pinnacle_color(struct IR_i2c *ir, enum rc_type *protocol,
+static int get_key_pinnacle_color(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
/* code_modulo parameter (0x88) is used to reduce code value to fit inside IR_KEYTAB_SIZE
@@ -452,13 +456,6 @@ static void saa7134_input_timer(unsigned long data)
mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
}
-static void ir_raw_decode_timer_end(unsigned long data)
-{
- struct saa7134_dev *dev = (struct saa7134_dev *)data;
-
- ir_raw_event_handle(dev->remote->dev);
-}
-
static int __saa7134_ir_start(void *priv)
{
struct saa7134_dev *dev = priv;
@@ -514,10 +511,6 @@ static int __saa7134_ir_start(void *priv)
(unsigned long)dev);
ir->timer.expires = jiffies + HZ;
add_timer(&ir->timer);
- } else if (ir->raw_decode) {
- /* set timer_end for code completion */
- setup_timer(&ir->timer, ir_raw_decode_timer_end,
- (unsigned long)dev);
}
return 0;
@@ -535,7 +528,7 @@ static void __saa7134_ir_stop(void *priv)
if (!ir->running)
return;
- if (ir->polling || ir->raw_decode)
+ if (ir->polling)
del_timer_sync(&ir->timer);
ir->running = false;
@@ -867,10 +860,12 @@ int saa7134_input_init1(struct saa7134_dev *dev)
rc->priv = dev;
rc->open = saa7134_ir_open;
rc->close = saa7134_ir_close;
- if (raw_decode)
+ if (raw_decode) {
rc->driver_type = RC_DRIVER_IR_RAW;
+ rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
+ }
- rc->input_name = ir->name;
+ rc->device_name = ir->name;
rc->input_phys = ir->phys;
rc->input_id.bustype = BUS_PCI;
rc->input_id.version = 1;
@@ -884,6 +879,9 @@ int saa7134_input_init1(struct saa7134_dev *dev)
rc->dev.parent = &dev->pci->dev;
rc->map_name = ir_codes;
rc->driver_name = MODULE_NAME;
+ rc->min_timeout = 1;
+ rc->timeout = IR_DEFAULT_TIMEOUT;
+ rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
err = rc_register_device(rc);
if (err)
@@ -1028,7 +1026,7 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
dev->init_data.name = "BeholdTV";
dev->init_data.get_key = get_key_beholdm6xx;
dev->init_data.ir_codes = RC_MAP_BEHOLD;
- dev->init_data.type = RC_BIT_NECX;
+ dev->init_data.type = RC_PROTO_BIT_NECX;
info.addr = 0x2d;
break;
case SAA7134_BOARD_AVERMEDIA_CARDBUS_501:
@@ -1057,26 +1055,13 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
static int saa7134_raw_decode_irq(struct saa7134_dev *dev)
{
struct saa7134_card_ir *ir = dev->remote;
- unsigned long timeout;
int space;
/* Generate initial event */
saa_clearb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
saa_setb(SAA7134_GPIO_GPMODE3, SAA7134_GPIO_GPRESCAN);
space = saa_readl(SAA7134_GPIO_GPSTATUS0 >> 2) & ir->mask_keydown;
- ir_raw_event_store_edge(dev->remote->dev, space ? IR_SPACE : IR_PULSE);
-
- /*
- * Wait 15 ms from the start of the first IR event before processing
- * the event. This time is enough for NEC protocol. May need adjustments
- * to work with other protocols.
- */
- smp_mb();
-
- if (!timer_pending(&ir->timer)) {
- timeout = jiffies + msecs_to_jiffies(15);
- mod_timer(&ir->timer, timeout);
- }
+ ir_raw_event_store_edge(dev->remote->dev, !space);
return 1;
}
diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
index c889ec9f8a5a..f708cab01fef 100644
--- a/drivers/media/pci/saa7146/hexium_gemini.c
+++ b/drivers/media/pci/saa7146/hexium_gemini.c
@@ -363,7 +363,7 @@ static struct saa7146_pci_extension_data hexium_gemini_dual_4bnc = {
.ext = &hexium_extension,
};
-static struct pci_device_id pci_tbl[] = {
+static const struct pci_device_id pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7146,
diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c
index c306a92e8909..01f01580c7ca 100644
--- a/drivers/media/pci/saa7146/hexium_orion.c
+++ b/drivers/media/pci/saa7146/hexium_orion.c
@@ -427,7 +427,7 @@ static struct saa7146_pci_extension_data hexium_orion_4bnc = {
.ext = &extension,
};
-static struct pci_device_id pci_tbl[] = {
+static const struct pci_device_id pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7146,
diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c
index 504d78807639..930218cc2de1 100644
--- a/drivers/media/pci/saa7146/mxb.c
+++ b/drivers/media/pci/saa7146/mxb.c
@@ -819,7 +819,7 @@ static struct saa7146_pci_extension_data mxb = {
.ext = &extension,
};
-static struct pci_device_id pci_tbl[] = {
+static const struct pci_device_id pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7146,
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 75eed4cc4823..fca36a4910c2 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1490,7 +1490,7 @@ static void saa7164_finidev(struct pci_dev *pci_dev)
kfree(dev);
}
-static struct pci_device_id saa7164_pci_tbl[] = {
+static const struct pci_device_id saa7164_pci_tbl[] = {
{
/* SAA7164 */
.vendor = 0x1131,
diff --git a/drivers/media/pci/saa7164/saa7164-i2c.c b/drivers/media/pci/saa7164/saa7164-i2c.c
index 430f6789f222..4bcde7c79dc3 100644
--- a/drivers/media/pci/saa7164/saa7164-i2c.c
+++ b/drivers/media/pci/saa7164/saa7164-i2c.c
@@ -78,7 +78,7 @@ static const struct i2c_algorithm saa7164_i2c_algo_template = {
/* ----------------------------------------------------------------------- */
-static struct i2c_adapter saa7164_i2c_adap_template = {
+static const struct i2c_adapter saa7164_i2c_adap_template = {
.name = "saa7164",
.owner = THIS_MODULE,
.algo = &saa7164_i2c_algo_template,
diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c
index d2730c3fdbae..c5595af6b976 100644
--- a/drivers/media/pci/smipcie/smipcie-ir.c
+++ b/drivers/media/pci/smipcie/smipcie-ir.c
@@ -144,7 +144,7 @@ static void smi_ir_decode(struct work_struct *work)
rc5_system = (dwIRCode & 0x7C0) >> 6;
toggle = (dwIRCode & 0x800) ? 1 : 0;
scancode = rc5_system << 8 | rc5_command;
- rc_keydown(rc_dev, RC_TYPE_RC5, scancode, toggle);
+ rc_keydown(rc_dev, RC_PROTO_RC5, scancode, toggle);
}
}
end_ir_decode:
@@ -188,14 +188,14 @@ int smi_ir_init(struct smi_dev *dev)
return -ENOMEM;
/* init input device */
- snprintf(ir->input_name, sizeof(ir->input_name), "IR (%s)",
+ snprintf(ir->device_name, sizeof(ir->device_name), "IR (%s)",
dev->info->name);
snprintf(ir->input_phys, sizeof(ir->input_phys), "pci-%s/ir0",
pci_name(dev->pci_dev));
rc_dev->driver_name = "SMI_PCIe";
rc_dev->input_phys = ir->input_phys;
- rc_dev->input_name = ir->input_name;
+ rc_dev->device_name = ir->device_name;
rc_dev->input_id.bustype = BUS_PCI;
rc_dev->input_id.version = 1;
rc_dev->input_id.vendor = dev->pci_dev->subsystem_vendor;
diff --git a/drivers/media/pci/smipcie/smipcie.h b/drivers/media/pci/smipcie/smipcie.h
index 611e4f02cadd..c8368c78ddd5 100644
--- a/drivers/media/pci/smipcie/smipcie.h
+++ b/drivers/media/pci/smipcie/smipcie.h
@@ -240,7 +240,7 @@ struct smi_rc {
struct smi_dev *dev;
struct rc_dev *rc_dev;
char input_phys[64];
- char input_name[64];
+ char device_name[64];
struct work_struct work;
u8 irData[256];
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index 3ca947092775..81be1b8df758 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -319,7 +319,7 @@ static int snd_solo_capture_volume_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static struct snd_kcontrol_new snd_solo_capture_volume = {
+static const struct snd_kcontrol_new snd_solo_capture_volume = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Volume",
.info = snd_solo_capture_volume_info,
diff --git a/drivers/media/pci/solo6x10/solo6x10-gpio.c b/drivers/media/pci/solo6x10/solo6x10-gpio.c
index 6d3b4a36bc11..3d0d1aa2f6a8 100644
--- a/drivers/media/pci/solo6x10/solo6x10-gpio.c
+++ b/drivers/media/pci/solo6x10/solo6x10-gpio.c
@@ -57,6 +57,9 @@ static void solo_gpio_mode(struct solo_dev *solo_dev,
ret |= 1 << port;
}
+ /* Enable GPIO[31:16] */
+ ret |= 0xffff0000;
+
solo_reg_write(solo_dev, SOLO_GPIO_CONFIG_1, ret);
}
@@ -90,16 +93,110 @@ static void solo_gpio_config(struct solo_dev *solo_dev)
/* Initially set relay status to 0 */
solo_gpio_clear(solo_dev, 0xff00);
+
+ /* Set input pins direction */
+ solo_gpio_mode(solo_dev, 0xffff0000, 0);
+}
+
+#ifdef CONFIG_GPIOLIB
+/* Pins 0-7 are not exported, because it seems from code above they are
+ * used for internal purposes. So offset 0 corresponds to pin 8, therefore
+ * offsets 0-7 are relay GPIOs, 8-23 - input GPIOs.
+ */
+static int solo_gpiochip_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ int ret, mode;
+ struct solo_dev *solo_dev = gpiochip_get_data(chip);
+
+ if (offset < 8) {
+ ret = solo_reg_read(solo_dev, SOLO_GPIO_CONFIG_0);
+ mode = 3 & (ret >> ((offset + 8) * 2));
+ } else {
+ ret = solo_reg_read(solo_dev, SOLO_GPIO_CONFIG_1);
+ mode = 1 & (ret >> (offset - 8));
+ }
+
+ if (!mode)
+ return 1;
+ else if (mode == 1)
+ return 0;
+
+ return -1;
}
+static int solo_gpiochip_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return -1;
+}
+
+static int solo_gpiochip_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ return -1;
+}
+
+static int solo_gpiochip_get(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ int ret;
+ struct solo_dev *solo_dev = gpiochip_get_data(chip);
+
+ ret = solo_reg_read(solo_dev, SOLO_GPIO_DATA_IN);
+
+ return 1 & (ret >> (offset + 8));
+}
+
+static void solo_gpiochip_set(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct solo_dev *solo_dev = gpiochip_get_data(chip);
+
+ if (value)
+ solo_gpio_set(solo_dev, 1 << (offset + 8));
+ else
+ solo_gpio_clear(solo_dev, 1 << (offset + 8));
+}
+#endif
+
int solo_gpio_init(struct solo_dev *solo_dev)
{
+ int ret;
+
solo_gpio_config(solo_dev);
+#ifdef CONFIG_GPIOLIB
+ solo_dev->gpio_dev.label = SOLO6X10_NAME"_gpio";
+ solo_dev->gpio_dev.parent = &solo_dev->pdev->dev;
+ solo_dev->gpio_dev.owner = THIS_MODULE;
+ solo_dev->gpio_dev.base = -1;
+ solo_dev->gpio_dev.ngpio = 24;
+ solo_dev->gpio_dev.can_sleep = 0;
+
+ solo_dev->gpio_dev.get_direction = solo_gpiochip_get_direction;
+ solo_dev->gpio_dev.direction_input = solo_gpiochip_direction_input;
+ solo_dev->gpio_dev.direction_output = solo_gpiochip_direction_output;
+ solo_dev->gpio_dev.get = solo_gpiochip_get;
+ solo_dev->gpio_dev.set = solo_gpiochip_set;
+
+ ret = gpiochip_add_data(&solo_dev->gpio_dev, solo_dev);
+
+ if (ret) {
+ solo_dev->gpio_dev.label = NULL;
+ return -1;
+ }
+#endif
return 0;
}
void solo_gpio_exit(struct solo_dev *solo_dev)
{
+#ifdef CONFIG_GPIOLIB
+ if (solo_dev->gpio_dev.label) {
+ gpiochip_remove(&solo_dev->gpio_dev);
+ solo_dev->gpio_dev.label = NULL;
+ }
+#endif
solo_gpio_clear(solo_dev, 0x30);
solo_gpio_config(solo_dev);
}
diff --git a/drivers/media/pci/solo6x10/solo6x10-tw28.c b/drivers/media/pci/solo6x10/solo6x10-tw28.c
index 0632d3f7c73c..7ecb725b6dd2 100644
--- a/drivers/media/pci/solo6x10/solo6x10-tw28.c
+++ b/drivers/media/pci/solo6x10/solo6x10-tw28.c
@@ -532,7 +532,7 @@ static void saa712x_write_regs(struct solo_dev *dev, const u8 *vals,
static void saa712x_setup(struct solo_dev *dev)
{
const int reg_start = 0x26;
- const u8 saa7128_regs_ntsc[] = {
+ static const u8 saa7128_regs_ntsc[] = {
/* :0x26 */
0x0d, 0x00,
/* :0x28 */
@@ -606,6 +606,7 @@ int solo_tw28_init(struct solo_dev *solo_dev)
solo_dev->tw28_cnt++;
break;
case 0x0c:
+ case 0x0d:
solo_dev->tw2864 |= 1 << i;
solo_dev->tw28_cnt++;
break;
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
index 3266fc21825f..99ffd1ed4a73 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
@@ -630,7 +630,7 @@ static const struct v4l2_ioctl_ops solo_v4l2_ioctl_ops = {
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
-static struct video_device solo_v4l2_template = {
+static const struct video_device solo_v4l2_template = {
.name = SOLO6X10_NAME,
.fops = &solo_v4l2_fops,
.ioctl_ops = &solo_v4l2_ioctl_ops,
diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
index 3f8da5e8c430..3a1893ae2dad 100644
--- a/drivers/media/pci/solo6x10/solo6x10.h
+++ b/drivers/media/pci/solo6x10/solo6x10.h
@@ -31,6 +31,7 @@
#include <linux/atomic.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
+#include <linux/gpio/driver.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
@@ -199,6 +200,10 @@ struct solo_dev {
u32 irq_mask;
u32 motion_mask;
struct v4l2_device v4l2_dev;
+#ifdef CONFIG_GPIOLIB
+ /* GPIO */
+ struct gpio_chip gpio_dev;
+#endif
/* tw28xx accounting */
u8 tw2865, tw2864, tw2815;
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 6343d24eb1d5..eb5a9eae7c8e 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -754,7 +754,7 @@ static const struct v4l2_ioctl_ops vip_ioctl_ops = {
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
-static struct video_device video_dev_template = {
+static const struct video_device video_dev_template = {
.name = KBUILD_MODNAME,
.release = video_device_release_empty,
.fops = &vip_fops,
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index f2905bd80366..f46947d8adf8 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -2872,7 +2872,7 @@ MAKE_AV7110_INFO(fsc, "Fujitsu Siemens DVB-C");
MAKE_AV7110_INFO(fss, "Fujitsu Siemens DVB-S rev1.6");
MAKE_AV7110_INFO(gxs_1_3, "Galaxis DVB-S rev1.3");
-static struct pci_device_id pci_tbl[] = {
+static const struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(fsc, 0x110a, 0x0000),
MAKE_EXTENSION_PCI(tts_1_X_fsc, 0x13c2, 0x0000),
MAKE_EXTENSION_PCI(ttt_1_X, 0x13c2, 0x0001),
diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/media/pci/ttpci/av7110.h
index 824c1e262fbb..347827925c14 100644
--- a/drivers/media/pci/ttpci/av7110.h
+++ b/drivers/media/pci/ttpci/av7110.h
@@ -177,7 +177,7 @@ struct av7110 {
/* CA */
- ca_slot_info_t ci_slot[2];
+ struct ca_slot_info ci_slot[2];
enum av7110_video_mode vidmode;
struct dmxdev dmxdev;
diff --git a/drivers/media/pci/ttpci/av7110_ca.c b/drivers/media/pci/ttpci/av7110_ca.c
index f64723aea56b..1fe49171d823 100644
--- a/drivers/media/pci/ttpci/av7110_ca.c
+++ b/drivers/media/pci/ttpci/av7110_ca.c
@@ -119,7 +119,7 @@ static void ci_ll_release(struct dvb_ringbuffer *cirbuf, struct dvb_ringbuffer *
}
static int ci_ll_reset(struct dvb_ringbuffer *cibuf, struct file *file,
- int slots, ca_slot_info_t *slot)
+ int slots, struct ca_slot_info *slot)
{
int i;
int len = 0;
@@ -264,7 +264,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
break;
case CA_GET_CAP:
{
- ca_caps_t cap;
+ struct ca_caps cap;
cap.slot_num = 2;
cap.slot_type = (FW_CI_LL_SUPPORT(av7110->arm_app) ?
@@ -277,7 +277,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
case CA_GET_SLOT_INFO:
{
- ca_slot_info_t *info=(ca_slot_info_t *)parg;
+ struct ca_slot_info *info=(struct ca_slot_info *)parg;
if (info->num < 0 || info->num > 1) {
mutex_unlock(&av7110->ioctl_mutex);
@@ -286,7 +286,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
av7110->ci_slot[info->num].num = info->num;
av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ?
CA_CI_LINK : CA_CI;
- memcpy(info, &av7110->ci_slot[info->num], sizeof(ca_slot_info_t));
+ memcpy(info, &av7110->ci_slot[info->num], sizeof(struct ca_slot_info));
break;
}
@@ -298,7 +298,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
case CA_GET_DESCR_INFO:
{
- ca_descr_info_t info;
+ struct ca_descr_info info;
info.num = 16;
info.type = CA_ECD;
@@ -308,7 +308,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
case CA_SET_DESCR:
{
- ca_descr_t *descr = (ca_descr_t*) parg;
+ struct ca_descr *descr = (struct ca_descr*) parg;
if (descr->index >= 16 || descr->parity > 1) {
mutex_unlock(&av7110->ioctl_mutex);
diff --git a/drivers/media/pci/ttpci/av7110_v4l.c b/drivers/media/pci/ttpci/av7110_v4l.c
index 397fe146dedd..e4cf42c32284 100644
--- a/drivers/media/pci/ttpci/av7110_v4l.c
+++ b/drivers/media/pci/ttpci/av7110_v4l.c
@@ -218,7 +218,7 @@ static struct saa7146_standard analog_standard[];
static struct saa7146_standard dvb_standard[];
static struct saa7146_standard standard[];
-static struct v4l2_audio msp3400_v4l2_audio = {
+static const struct v4l2_audio msp3400_v4l2_audio = {
.index = 0,
.name = "Television",
.capability = V4L2_AUDCAP_STEREO
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index dc7be8fac9a3..ac83fff9fe0b 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -1567,7 +1567,7 @@ MAKE_BUDGET_INFO(cin1200c, "Terratec Cinergy 1200 DVB-C", BUDGET_CIN1200C);
MAKE_BUDGET_INFO(cin1200cmk3, "Terratec Cinergy 1200 DVB-C MK3", BUDGET_CIN1200C_MK3);
MAKE_BUDGET_INFO(cin1200t, "Terratec Cinergy 1200 DVB-T", BUDGET_CIN1200T);
-static struct pci_device_id pci_tbl[] = {
+static const struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(knc1s, 0x1131, 0x4f56),
MAKE_EXTENSION_PCI(knc1s, 0x1131, 0x0010),
MAKE_EXTENSION_PCI(knc1s, 0x1894, 0x0010),
diff --git a/drivers/media/pci/ttpci/budget-ci.c b/drivers/media/pci/ttpci/budget-ci.c
index 11b9227307bf..57af11804fd6 100644
--- a/drivers/media/pci/ttpci/budget-ci.c
+++ b/drivers/media/pci/ttpci/budget-ci.c
@@ -158,14 +158,15 @@ static void msp430_ir_interrupt(unsigned long data)
return;
if (budget_ci->ir.full_rc5) {
- rc_keydown(dev, RC_TYPE_RC5,
+ rc_keydown(dev, RC_PROTO_RC5,
RC_SCANCODE_RC5(budget_ci->ir.rc5_device, budget_ci->ir.ir_key),
!!(command & 0x20));
return;
}
/* FIXME: We should generate complete scancodes for all devices */
- rc_keydown(dev, RC_TYPE_UNKNOWN, budget_ci->ir.ir_key, !!(command & 0x20));
+ rc_keydown(dev, RC_PROTO_UNKNOWN, budget_ci->ir.ir_key,
+ !!(command & 0x20));
}
static int msp430_ir_init(struct budget_ci *budget_ci)
@@ -186,7 +187,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
"pci-%s/ir0", pci_name(saa->pci));
dev->driver_name = MODULE_NAME;
- dev->input_name = budget_ci->ir.name;
+ dev->device_name = budget_ci->ir.name;
dev->input_phys = budget_ci->ir.phys;
dev->input_id.bustype = BUS_PCI;
dev->input_id.version = 1;
@@ -1538,7 +1539,7 @@ MAKE_BUDGET_INFO(ttc1501, "TT-Budget C-1501 PCI", BUDGET_TT);
MAKE_BUDGET_INFO(tt3200, "TT-Budget S2-3200 PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttbs1500b, "TT-Budget S-1500B PCI", BUDGET_TT);
-static struct pci_device_id pci_tbl[] = {
+static const struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(ttbci, 0x13c2, 0x100c),
MAKE_EXTENSION_PCI(ttbci, 0x13c2, 0x100f),
MAKE_EXTENSION_PCI(ttbcci, 0x13c2, 0x1010),
diff --git a/drivers/media/pci/ttpci/budget-patch.c b/drivers/media/pci/ttpci/budget-patch.c
index 442992372008..a738018cdca8 100644
--- a/drivers/media/pci/ttpci/budget-patch.c
+++ b/drivers/media/pci/ttpci/budget-patch.c
@@ -45,7 +45,7 @@ static struct saa7146_extension budget_extension;
MAKE_BUDGET_INFO(ttbp, "TT-Budget/Patch DVB-S 1.x PCI", BUDGET_PATCH);
//MAKE_BUDGET_INFO(satel,"TT-Budget/Patch SATELCO PCI", BUDGET_TT_HW_DISEQC);
-static struct pci_device_id pci_tbl[] = {
+static const struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(ttbp,0x13c2, 0x0000),
// MAKE_EXTENSION_PCI(satel, 0x13c2, 0x1013),
{
diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c
index 81fe35cedd10..f59eadb7a5eb 100644
--- a/drivers/media/pci/ttpci/budget.c
+++ b/drivers/media/pci/ttpci/budget.c
@@ -845,7 +845,7 @@ MAKE_BUDGET_INFO(fsact1, "Fujitsu Siemens Activy Budget-T PCI (rev AL/ALPS TDHD1
MAKE_BUDGET_INFO(omicom, "Omicom S2 PCI", BUDGET_TT);
MAKE_BUDGET_INFO(sylt, "Philips Semi Sylt PCI", BUDGET_TT_HW_DISEQC);
-static struct pci_device_id pci_tbl[] = {
+static const struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(ttbs, 0x13c2, 0x1003),
MAKE_EXTENSION_PCI(ttbc, 0x13c2, 0x1004),
MAKE_EXTENSION_PCI(ttbt, 0x13c2, 0x1005),
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
index 2a044be729da..e7bd2b8484e3 100644
--- a/drivers/media/pci/tw5864/tw5864-video.c
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -545,6 +545,7 @@ static int tw5864_fmt_vid_cap(struct file *file, void *priv,
switch (input->std) {
default:
WARN_ON_ONCE(1);
+ return -EINVAL;
case STD_NTSC:
f->fmt.pix.height = 480;
break;
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 58c4dd75bfa1..8c1f4a049764 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -916,7 +916,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
#endif
};
-static struct video_device tw68_video_template = {
+static const struct video_device tw68_video_template = {
.name = "tw68_video",
.fops = &video_fops,
.ioctl_ops = &video_ioctl_ops,
diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index 4680f001653a..a6b9ebd20263 100644
--- a/drivers/media/pci/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -130,7 +130,7 @@ MODULE_VERSION(ZORAN_VERSION);
.vendor = PCI_VENDOR_ID_ZORAN, .device = PCI_DEVICE_ID_ZORAN_36057, \
.subvendor = (subven), .subdevice = (subdev), .driver_data = (data) }
-static struct pci_device_id zr36067_pci_tbl[] = {
+static const struct pci_device_id zr36067_pci_tbl[] = {
ZR_DEVICE(PCI_VENDOR_ID_MIRO, PCI_DEVICE_ID_MIRO_DC10PLUS, DC10plus),
ZR_DEVICE(PCI_VENDOR_ID_MIRO, PCI_DEVICE_ID_MIRO_DC30PLUS, DC30plus),
ZR_DEVICE(PCI_VENDOR_ID_ELECTRONICDESIGNGMBH, PCI_DEVICE_ID_LML_33R10, LML33R10),
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 1313cd533436..7e7cc49b8674 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -76,7 +76,8 @@ config VIDEO_M32R_AR_M64278
config VIDEO_MUX
tristate "Video Multiplexer"
- depends on OF && VIDEO_V4L2_SUBDEV_API && MEDIA_CONTROLLER
+ select MULTIPLEXER
+ depends on VIDEO_V4L2 && OF && VIDEO_V4L2_SUBDEV_API && MEDIA_CONTROLLER
select REGMAP
help
This driver provides support for N:1 video bus multiplexers.
@@ -109,6 +110,13 @@ config VIDEO_PXA27x
---help---
This is a v4l2 driver for the PXA27x Quick Capture Interface
+config VIDEO_QCOM_CAMSS
+ tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
+ select VIDEOBUF2_DMA_SG
+ select V4L2_FWNODE
+
config VIDEO_S3C_CAMIF
tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
@@ -475,8 +483,8 @@ config VIDEO_QCOM_VENUS
tristate "Qualcomm Venus V4L2 encoder/decoder driver"
depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
- select QCOM_MDT_LOADER if (ARM || ARM64)
- select QCOM_SCM if (ARM || ARM64)
+ select QCOM_MDT_LOADER if ARCH_QCOM
+ select QCOM_SCM if ARCH_QCOM
select VIDEOBUF2_DMA_SG
select V4L2_MEM2MEM_DEV
---help---
@@ -536,6 +544,17 @@ menuconfig CEC_PLATFORM_DRIVERS
if CEC_PLATFORM_DRIVERS
+config VIDEO_MESON_AO_CEC
+ tristate "Amlogic Meson AO CEC driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ ---help---
+ This is a driver for Amlogic Meson SoCs AO CEC interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
config VIDEO_SAMSUNG_S5P_CEC
tristate "Samsung S5P CEC driver"
depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 9beadc760467..c1ef946bf032 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -85,4 +85,8 @@ obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp/
obj-$(CONFIG_VIDEO_MEDIATEK_JPEG) += mtk-jpeg/
+obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom/camss-8x16/
+
obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/
+
+obj-y += meson/
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 466aba8b0e00..dfcc484cab89 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -2490,8 +2490,8 @@ vpfe_get_pdata(struct platform_device *pdev)
rem = of_graph_get_remote_port_parent(endpoint);
if (!rem) {
- dev_err(&pdev->dev, "Remote device at %s not found\n",
- endpoint->full_name);
+ dev_err(&pdev->dev, "Remote device at %pOF not found\n",
+ endpoint);
goto done;
}
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
index d6534252cdcd..d7103c5f92c3 100644
--- a/drivers/media/platform/atmel/atmel-isc.c
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -873,7 +873,7 @@ static void isc_buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
}
-static struct vb2_ops isc_vb2_ops = {
+static const struct vb2_ops isc_vb2_ops = {
.queue_setup = isc_queue_setup,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
@@ -1700,8 +1700,8 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
rem = of_graph_get_remote_port_parent(epn);
if (!rem) {
- dev_notice(dev, "Remote device at %s not found\n",
- of_node_full_name(epn));
+ dev_notice(dev, "Remote device at %pOF not found\n",
+ epn);
continue;
}
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index 1c5166df46f5..41f179117fb0 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -375,7 +375,7 @@ static void bcap_stop_streaming(struct vb2_queue *vq)
}
}
-static struct vb2_ops bcap_video_qops = {
+static const struct vb2_ops bcap_video_qops = {
.queue_setup = bcap_queue_setup,
.buf_prepare = bcap_buffer_prepare,
.buf_cleanup = bcap_buffer_cleanup,
@@ -769,7 +769,7 @@ static const struct v4l2_ioctl_ops bcap_ioctl_ops = {
.vidioc_log_status = bcap_log_status,
};
-static struct v4l2_file_operations bcap_fops = {
+static const struct v4l2_file_operations bcap_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 25cbf9e5ac5a..291c40933935 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -393,8 +393,9 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
int ret;
int i;
- if (ctx->codec && (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
- ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264)) {
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
+ ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264 ||
+ ctx->codec->dst_fourcc == V4L2_PIX_FMT_MPEG4) {
width = round_up(q_data->width, 16);
height = round_up(q_data->height, 16);
} else {
@@ -702,6 +703,8 @@ static u32 coda_supported_firmwares[] = {
CODA_FIRMWARE_VERNUM(CODA_DX6, 2, 2, 5),
CODA_FIRMWARE_VERNUM(CODA_7541, 1, 4, 50),
CODA_FIRMWARE_VERNUM(CODA_960, 2, 1, 5),
+ CODA_FIRMWARE_VERNUM(CODA_960, 2, 3, 10),
+ CODA_FIRMWARE_VERNUM(CODA_960, 3, 1, 1),
};
static bool coda_firmware_supported(u32 vernum)
@@ -1006,7 +1009,7 @@ static int coda_start_encoding(struct coda_ctx *ctx)
break;
}
coda_write(dev, value, CODA_CMD_ENC_SEQ_SLICE_MODE);
- value = ctx->params.gop_size & CODA_GOP_SIZE_MASK;
+ value = ctx->params.gop_size;
coda_write(dev, value, CODA_CMD_ENC_SEQ_GOP_SIZE);
}
@@ -1250,7 +1253,8 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
force_ipicture = ctx->params.force_ipicture;
if (force_ipicture)
ctx->params.force_ipicture = false;
- else if ((src_buf->sequence % ctx->params.gop_size) == 0)
+ else if (ctx->params.gop_size != 0 &&
+ (src_buf->sequence % ctx->params.gop_size) == 0)
force_ipicture = 1;
/*
@@ -1411,6 +1415,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
}
dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+ dst_buf->field = src_buf->field;
dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
dst_buf->flags |=
src_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
@@ -1634,9 +1639,6 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
ctx->frm_dis_flg = 0;
coda_write(dev, 0, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
- coda_write(dev, CODA_BIT_DEC_SEQ_INIT_ESCAPE,
- CODA_REG_BIT_BIT_STREAM_PARAM);
-
coda_write(dev, bitstream_buf, CODA_CMD_DEC_SEQ_BB_START);
coda_write(dev, bitstream_size / 1024, CODA_CMD_DEC_SEQ_BB_SIZE);
val = 0;
@@ -1652,6 +1654,10 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
ctx->params.codec_mode_aux = CODA_MP4_AUX_MPEG4;
else
ctx->params.codec_mode_aux = 0;
+ if (src_fourcc == V4L2_PIX_FMT_MPEG4) {
+ coda_write(dev, CODA_MP4_CLASS_MPEG4,
+ CODA_CMD_DEC_SEQ_MP4_ASP_CLASS);
+ }
if (src_fourcc == V4L2_PIX_FMT_H264) {
if (dev->devtype->product == CODA_7541) {
coda_write(dev, ctx->psbuf.paddr,
@@ -1667,18 +1673,18 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
if (dev->devtype->product != CODA_960)
coda_write(dev, 0, CODA_CMD_DEC_SEQ_SRC_SIZE);
- if (coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT)) {
+ ctx->bit_stream_param = CODA_BIT_DEC_SEQ_INIT_ESCAPE;
+ ret = coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT);
+ ctx->bit_stream_param = 0;
+ if (ret) {
v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
- coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
- return -ETIMEDOUT;
+ return ret;
}
ctx->initialized = 1;
/* Update kfifo out pointer from coda bitstream read pointer */
coda_kfifo_sync_from_device(ctx);
- coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
-
if (coda_read(dev, CODA_RET_DEC_SEQ_SUCCESS) == 0) {
v4l2_err(&dev->v4l2_dev,
"CODA_COMMAND_SEQ_INIT failed, error code = %d\n",
@@ -2153,6 +2159,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
dst_buf->sequence = ctx->osequence++;
+ dst_buf->field = V4L2_FIELD_NONE;
dst_buf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME |
V4L2_BUF_FLAG_PFRAME |
V4L2_BUF_FLAG_BFRAME);
@@ -2198,7 +2205,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
ctx->display_idx = display_idx;
}
-static void coda_error_decode(struct coda_ctx *ctx)
+static void coda_decode_timeout(struct coda_ctx *ctx)
{
struct vb2_v4l2_buffer *dst_buf;
@@ -2223,7 +2230,7 @@ const struct coda_context_ops coda_bit_decode_ops = {
.start_streaming = coda_start_decoding,
.prepare_run = coda_prepare_decode,
.finish_run = coda_finish_decode,
- .error_run = coda_error_decode,
+ .run_timeout = coda_decode_timeout,
.seq_end_work = coda_seq_end_work,
.release = coda_bit_release,
};
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index f92cc7df58fb..15eb5dc4dff9 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(disable_vdoa, "Disable Video Data Order Adapter tiled to raster
static int enable_bwb = 0;
module_param(enable_bwb, int, 0644);
-MODULE_PARM_DESC(enable_bwb, "Enable BWB unit, may crash on certain streams");
+MODULE_PARM_DESC(enable_bwb, "Enable BWB unit for decoding, may crash on certain streams");
void coda_write(struct coda_dev *dev, u32 data, u32 reg)
{
@@ -714,9 +714,10 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
break;
case V4L2_PIX_FMT_NV12:
- ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
- if (!disable_tiling)
+ if (!disable_tiling) {
+ ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
break;
+ }
/* else fall through */
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
@@ -932,7 +933,7 @@ static int coda_encoder_cmd(struct file *file, void *fh,
ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
/* If there is no buffer in flight, wake up */
- if (ctx->qsequence == ctx->osequence) {
+ if (!ctx->streamon_out || ctx->qsequence == ctx->osequence) {
dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE);
dst_vq->last_buffer_dequeued = true;
@@ -1164,8 +1165,8 @@ static void coda_pic_run_work(struct work_struct *work)
coda_hw_reset(ctx);
- if (ctx->ops->error_run)
- ctx->ops->error_run(ctx);
+ if (ctx->ops->run_timeout)
+ ctx->ops->run_timeout(ctx);
} else if (!ctx->aborting) {
ctx->ops->finish_run(ctx);
}
@@ -1683,12 +1684,23 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
ctx->params.h264_deblk_enabled = (ctrl->val ==
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ /* TODO: switch between baseline and constrained baseline */
+ ctx->params.h264_profile_idc = 66;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ /* nothing to do, this is set by the encoder */
+ break;
case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP:
ctx->params.mpeg4_intra_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:
ctx->params.mpeg4_inter_qp = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ /* nothing to do, these are fixed */
+ break;
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
ctx->params.slice_mode = ctrl->val;
break;
@@ -1734,10 +1746,12 @@ static const struct v4l2_ctrl_ops coda_ctrl_ops = {
static void coda_encode_ctrls(struct coda_ctx *ctx)
{
+ int max_gop_size = (ctx->dev->devtype->product == CODA_DX6) ? 60 : 99;
+
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_BITRATE, 0, 32767000, 1000, 0);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_GOP_SIZE, 1, 60, 1, 16);
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0, max_gop_size, 1, 16);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 0, 51, 1, 25);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
@@ -1756,11 +1770,47 @@ static void coda_encode_ctrls(struct coda_ctx *ctx)
V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED, 0x0,
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0,
+ V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE);
+ if (ctx->dev->devtype->product == CODA_7541) {
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_3_1,
+ ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1)),
+ V4L2_MPEG_VIDEO_H264_LEVEL_3_1);
+ }
+ if (ctx->dev->devtype->product == CODA_960) {
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
+ ~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0)),
+ V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
+ }
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP, 1, 31, 1, 2);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP, 1, 31, 1, 2);
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE, 0x0,
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE);
+ if (ctx->dev->devtype->product == CODA_7541 ||
+ ctx->dev->devtype->product == CODA_960) {
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
+ ~(1 << V4L2_MPEG_VIDEO_MPEG4_LEVEL_5),
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_5);
+ }
+ v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES, 0x0,
V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
@@ -1938,7 +1988,13 @@ static int coda_open(struct file *file)
ctx->idx = idx;
switch (dev->devtype->product) {
case CODA_960:
- if (enable_bwb)
+ /*
+ * Enabling the BWB when decoding can hang the firmware with
+ * certain streams. The issue was tracked as ENGR00293425 by
+ * Freescale. As a workaround, disable BWB for all decoders.
+ * The enable_bwb module parameter allows to override this.
+ */
+ if (enable_bwb || ctx->inst_type == CODA_INST_ENCODER)
ctx->frame_mem_ctrl = CODA9_FRAME_ENABLE_BWB;
/* fallthrough */
case CODA_7541:
@@ -2142,7 +2198,8 @@ static int coda_hw_init(struct coda_dev *dev)
CODA_REG_BIT_STREAM_CTRL);
}
if (dev->devtype->product == CODA_960)
- coda_write(dev, 1 << 12, CODA_REG_BIT_FRAME_MEM_CTRL);
+ coda_write(dev, CODA9_FRAME_ENABLE_BWB,
+ CODA_REG_BIT_FRAME_MEM_CTRL);
else
coda_write(dev, 0, CODA_REG_BIT_FRAME_MEM_CTRL);
@@ -2386,11 +2443,11 @@ static const struct coda_devtype coda_devdata[] = {
.num_vdevs = ARRAY_SIZE(coda9_video_devices),
.workbuf_size = 80 * 1024,
.tempbuf_size = 204 * 1024,
- .iram_size = 0x20000,
+ .iram_size = 0x1f000, /* leave 4k for suspend code */
},
};
-static struct platform_device_id coda_platform_ids[] = {
+static const struct platform_device_id coda_platform_ids[] = {
{ .name = "coda-imx27", .driver_data = CODA_IMX27 },
{ /* sentinel */ }
};
@@ -2470,7 +2527,8 @@ static int coda_probe(struct platform_device *pdev)
return ret;
}
- dev->rstc = devm_reset_control_get_optional(&pdev->dev, NULL);
+ dev->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ NULL);
if (IS_ERR(dev->rstc)) {
ret = PTR_ERR(dev->rstc);
dev_err(&pdev->dev, "failed get reset control: %d\n", ret);
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 40fe22f0d757..c5f504d8cf67 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -183,7 +183,7 @@ struct coda_context_ops {
int (*start_streaming)(struct coda_ctx *ctx);
int (*prepare_run)(struct coda_ctx *ctx);
void (*finish_run)(struct coda_ctx *ctx);
- void (*error_run)(struct coda_ctx *ctx);
+ void (*run_timeout)(struct coda_ctx *ctx);
void (*seq_end_work)(struct work_struct *work);
void (*release)(struct coda_ctx *ctx);
};
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index 77ee46a93427..38df5fd9a2fa 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -158,6 +158,7 @@
#define CODA_CMD_DEC_SEQ_PS_BB_START 0x194
#define CODA_CMD_DEC_SEQ_PS_BB_SIZE 0x198
#define CODA_CMD_DEC_SEQ_MP4_ASP_CLASS 0x19c
+#define CODA_MP4_CLASS_MPEG4 0
#define CODA_CMD_DEC_SEQ_X264_MV_EN 0x19c
#define CODA_CMD_DEC_SEQ_SPP_CHUNK_SIZE 0x1a0
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c
index df9b71621420..8eb3e0c05473 100644
--- a/drivers/media/platform/coda/imx-vdoa.c
+++ b/drivers/media/platform/coda/imx-vdoa.c
@@ -314,6 +314,8 @@ static int vdoa_probe(struct platform_device *pdev)
return PTR_ERR(vdoa->regs);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res)
+ return -EINVAL;
vdoa->irq = devm_request_threaded_irq(&pdev->dev, res->start, NULL,
vdoa_irq_handler, IRQF_ONESHOT,
"vdoa", vdoa);
diff --git a/drivers/media/platform/davinci/ccdc_hw_device.h b/drivers/media/platform/davinci/ccdc_hw_device.h
index 8f6688a7a111..f1b521045d64 100644
--- a/drivers/media/platform/davinci/ccdc_hw_device.h
+++ b/drivers/media/platform/davinci/ccdc_hw_device.h
@@ -42,16 +42,6 @@ struct ccdc_hw_ops {
int (*set_hw_if_params) (struct vpfe_hw_if_param *param);
/* get interface parameters */
int (*get_hw_if_params) (struct vpfe_hw_if_param *param);
- /*
- * Pointer to function to set parameters. Used
- * for implementing VPFE_S_CCDC_PARAMS
- */
- int (*set_params) (void *params);
- /*
- * Pointer to function to get parameter. Used
- * for implementing VPFE_G_CCDC_PARAMS
- */
- int (*get_params) (void *params);
/* Pointer to function to configure ccdc */
int (*configure) (void);
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index 73db166dc338..6d492dc4c3a9 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -17,12 +17,7 @@
* This module is for configuring DM355 CCD controller of VPFE to capture
* Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
* such as Defect Pixel Correction, Color Space Conversion etc to
- * pre-process the Bayer RGB data, before writing it to SDRAM. This
- * module also allows application to configure individual
- * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL.
- * To do so, application include dm355_ccdc.h and vpfe_capture.h header
- * files. The setparams() API is called by vpfe_capture driver
- * to configure module parameters
+ * pre-process the Bayer RGB data, before writing it to SDRAM.
*
* TODO: 1) Raw bayer parameter settings and bayer capture
* 2) Split module parameter structure to module specific ioctl structs
@@ -260,90 +255,6 @@ static void ccdc_setwin(struct v4l2_rect *image_win,
dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin...");
}
-static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam)
-{
- if (ccdcparam->datasft < CCDC_DATA_NO_SHIFT ||
- ccdcparam->datasft > CCDC_DATA_SHIFT_6BIT) {
- dev_dbg(ccdc_cfg.dev, "Invalid value of data shift\n");
- return -EINVAL;
- }
-
- if (ccdcparam->mfilt1 < CCDC_NO_MEDIAN_FILTER1 ||
- ccdcparam->mfilt1 > CCDC_MEDIAN_FILTER1) {
- dev_dbg(ccdc_cfg.dev, "Invalid value of median filter1\n");
- return -EINVAL;
- }
-
- if (ccdcparam->mfilt2 < CCDC_NO_MEDIAN_FILTER2 ||
- ccdcparam->mfilt2 > CCDC_MEDIAN_FILTER2) {
- dev_dbg(ccdc_cfg.dev, "Invalid value of median filter2\n");
- return -EINVAL;
- }
-
- if ((ccdcparam->med_filt_thres < 0) ||
- (ccdcparam->med_filt_thres > CCDC_MED_FILT_THRESH)) {
- dev_dbg(ccdc_cfg.dev,
- "Invalid value of median filter threshold\n");
- return -EINVAL;
- }
-
- if (ccdcparam->data_sz < CCDC_DATA_16BITS ||
- ccdcparam->data_sz > CCDC_DATA_8BITS) {
- dev_dbg(ccdc_cfg.dev, "Invalid value of data size\n");
- return -EINVAL;
- }
-
- if (ccdcparam->alaw.enable) {
- if (ccdcparam->alaw.gamma_wd < CCDC_GAMMA_BITS_13_4 ||
- ccdcparam->alaw.gamma_wd > CCDC_GAMMA_BITS_09_0) {
- dev_dbg(ccdc_cfg.dev, "Invalid value of ALAW\n");
- return -EINVAL;
- }
- }
-
- if (ccdcparam->blk_clamp.b_clamp_enable) {
- if (ccdcparam->blk_clamp.sample_pixel < CCDC_SAMPLE_1PIXELS ||
- ccdcparam->blk_clamp.sample_pixel > CCDC_SAMPLE_16PIXELS) {
- dev_dbg(ccdc_cfg.dev,
- "Invalid value of sample pixel\n");
- return -EINVAL;
- }
- if (ccdcparam->blk_clamp.sample_ln < CCDC_SAMPLE_1LINES ||
- ccdcparam->blk_clamp.sample_ln > CCDC_SAMPLE_16LINES) {
- dev_dbg(ccdc_cfg.dev,
- "Invalid value of sample lines\n");
- return -EINVAL;
- }
- }
- return 0;
-}
-
-/* Parameter operations */
-static int ccdc_set_params(void __user *params)
-{
- struct ccdc_config_params_raw ccdc_raw_params;
- int x;
-
- /* only raw module parameters can be set through the IOCTL */
- if (ccdc_cfg.if_type != VPFE_RAW_BAYER)
- return -EINVAL;
-
- x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
- if (x) {
- dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying ccdcparams, %d\n",
- x);
- return -EFAULT;
- }
-
- if (!validate_ccdc_param(&ccdc_raw_params)) {
- memcpy(&ccdc_cfg.bayer.config_params,
- &ccdc_raw_params,
- sizeof(ccdc_raw_params));
- return 0;
- }
- return -EINVAL;
-}
-
/* This function will configure CCDC for YCbCr video capture */
static void ccdc_config_ycbcr(void)
{
@@ -939,7 +850,6 @@ static struct ccdc_hw_device ccdc_hw_dev = {
.enable = ccdc_enable,
.enable_out_to_sdram = ccdc_enable_output_to_sdram,
.set_hw_if_params = ccdc_set_hw_if_params,
- .set_params = ccdc_set_params,
.configure = ccdc_configure,
.set_buftype = ccdc_set_buftype,
.get_buftype = ccdc_get_buftype,
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index 740fbc7a8c14..3b2d8a9317b8 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -17,13 +17,9 @@
* This module is for configuring CCD controller of DM6446 VPFE to capture
* Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
* such as Defect Pixel Correction, Color Space Conversion etc to
- * pre-process the Raw Bayer RGB data, before writing it to SDRAM. This
- * module also allows application to configure individual
- * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL.
- * To do so, application includes dm644x_ccdc.h and vpfe_capture.h header
- * files. The setparams() API is called by vpfe_capture driver
- * to configure module parameters. This file is named DM644x so that other
- * variants such DM6443 may be supported using the same module.
+ * pre-process the Raw Bayer RGB data, before writing it to SDRAM.
+ * This file is named DM644x so that other variants such DM6443
+ * may be supported using the same module.
*
* TODO: Test Raw bayer parameter settings and bayer capture
* Split module parameter structure to module specific ioctl structs
@@ -216,96 +212,8 @@ static void ccdc_readregs(void)
dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_LINES...\n", val);
}
-static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam)
-{
- if (ccdcparam->alaw.enable) {
- u8 max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd);
- u8 max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
-
- if ((ccdcparam->alaw.gamma_wd > CCDC_GAMMA_BITS_09_0) ||
- (ccdcparam->alaw.gamma_wd < CCDC_GAMMA_BITS_15_6) ||
- (max_gamma > max_data)) {
- dev_dbg(ccdc_cfg.dev, "\nInvalid data line select");
- return -1;
- }
- }
- return 0;
-}
-
-static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params)
-{
- struct ccdc_config_params_raw *config_params =
- &ccdc_cfg.bayer.config_params;
- unsigned int *fpc_virtaddr = NULL;
- unsigned int *fpc_physaddr = NULL;
-
- memcpy(config_params, raw_params, sizeof(*raw_params));
- /*
- * allocate memory for fault pixel table and copy the user
- * values to the table
- */
- if (!config_params->fault_pxl.enable)
- return 0;
-
- fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr;
- fpc_virtaddr = (unsigned int *)phys_to_virt(
- (unsigned long)fpc_physaddr);
- /*
- * Allocate memory for FPC table if current
- * FPC table buffer is not big enough to
- * accommodate FPC Number requested
- */
- if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) {
- if (fpc_physaddr != NULL) {
- free_pages((unsigned long)fpc_virtaddr,
- get_order
- (config_params->fault_pxl.fp_num *
- FP_NUM_BYTES));
- }
-
- /* Allocate memory for FPC table */
- fpc_virtaddr =
- (unsigned int *)__get_free_pages(GFP_KERNEL | GFP_DMA,
- get_order(raw_params->
- fault_pxl.fp_num *
- FP_NUM_BYTES));
-
- if (fpc_virtaddr == NULL) {
- dev_dbg(ccdc_cfg.dev,
- "\nUnable to allocate memory for FPC");
- return -EFAULT;
- }
- fpc_physaddr =
- (unsigned int *)virt_to_phys((void *)fpc_virtaddr);
- }
-
- /* Copy number of fault pixels and FPC table */
- config_params->fault_pxl.fp_num = raw_params->fault_pxl.fp_num;
- if (copy_from_user(fpc_virtaddr,
- (void __user *)raw_params->fault_pxl.fpc_table_addr,
- config_params->fault_pxl.fp_num * FP_NUM_BYTES)) {
- dev_dbg(ccdc_cfg.dev, "\n copy_from_user failed");
- return -EFAULT;
- }
- config_params->fault_pxl.fpc_table_addr = (unsigned long)fpc_physaddr;
- return 0;
-}
-
static int ccdc_close(struct device *dev)
{
- struct ccdc_config_params_raw *config_params =
- &ccdc_cfg.bayer.config_params;
- unsigned int *fpc_physaddr = NULL, *fpc_virtaddr = NULL;
-
- fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr;
-
- if (fpc_physaddr != NULL) {
- fpc_virtaddr = (unsigned int *)
- phys_to_virt((unsigned long)fpc_physaddr);
- free_pages((unsigned long)fpc_virtaddr,
- get_order(config_params->fault_pxl.fp_num *
- FP_NUM_BYTES));
- }
return 0;
}
@@ -339,29 +247,6 @@ static void ccdc_sbl_reset(void)
vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O);
}
-/* Parameter operations */
-static int ccdc_set_params(void __user *params)
-{
- struct ccdc_config_params_raw ccdc_raw_params;
- int x;
-
- if (ccdc_cfg.if_type != VPFE_RAW_BAYER)
- return -EINVAL;
-
- x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
- if (x) {
- dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copyingccdc params, %d\n",
- x);
- return -EFAULT;
- }
-
- if (!validate_ccdc_param(&ccdc_raw_params)) {
- if (!ccdc_update_raw_params(&ccdc_raw_params))
- return 0;
- }
- return -EINVAL;
-}
-
/*
* ccdc_config_ycbcr()
* This function will configure CCDC for YCbCr video capture
@@ -489,32 +374,6 @@ static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp)
regw(val, CCDC_BLKCMP);
}
-static void ccdc_config_fpc(struct ccdc_fault_pixel *fpc)
-{
- u32 val;
-
- /* Initially disable FPC */
- val = CCDC_FPC_DISABLE;
- regw(val, CCDC_FPC);
-
- if (!fpc->enable)
- return;
-
- /* Configure Fault pixel if needed */
- regw(fpc->fpc_table_addr, CCDC_FPC_ADDR);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%lx to FPC_ADDR...\n",
- (fpc->fpc_table_addr));
- /* Write the FPC params with FPC disable */
- val = fpc->fp_num & CCDC_FPC_FPC_NUM_MASK;
- regw(val, CCDC_FPC);
-
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val);
- /* read the FPC register */
- val = regr(CCDC_FPC) | CCDC_FPC_ENABLE;
- regw(val, CCDC_FPC);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val);
-}
-
/*
* ccdc_config_raw()
* This function will configure CCDC for Raw capture mode
@@ -569,9 +428,6 @@ static void ccdc_config_raw(void)
/* Configure Black level compensation */
ccdc_config_black_compense(&config_params->blk_comp);
- /* Configure Fault Pixel Correction */
- ccdc_config_fpc(&config_params->fault_pxl);
-
/* If data size is 8 bit then pack the data */
if ((config_params->data_sz == CCDC_DATA_8BITS) ||
config_params->alaw.enable)
@@ -929,7 +785,6 @@ static struct ccdc_hw_device ccdc_hw_dev = {
.reset = ccdc_sbl_reset,
.enable = ccdc_enable,
.set_hw_if_params = ccdc_set_hw_if_params,
- .set_params = ccdc_set_params,
.configure = ccdc_configure,
.set_buftype = ccdc_set_buftype,
.get_buftype = ccdc_get_buftype,
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 3679b1e7b39e..7f6462562579 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -790,7 +790,7 @@ static void vpbe_deinitialize(struct device *dev, struct vpbe_device *vpbe_dev)
vpss_enable_clock(VPSS_VPBE_CLOCK, 0);
}
-static struct vpbe_device_ops vpbe_dev_ops = {
+static const struct vpbe_device_ops vpbe_dev_ops = {
.g_cropcap = vpbe_g_cropcap,
.enum_outputs = vpbe_enum_outputs,
.set_output = vpbe_set_output,
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index a9bc0175e4d3..13d027031ff0 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -355,7 +355,7 @@ static void vpbe_stop_streaming(struct vb2_queue *vq)
spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
}
-static struct vb2_ops video_qops = {
+static const struct vb2_ops video_qops = {
.queue_setup = vpbe_buffer_queue_setup,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
@@ -1275,7 +1275,7 @@ static const struct v4l2_ioctl_ops vpbe_ioctl_ops = {
.vidioc_enum_dv_timings = vpbe_display_enum_dv_timings,
};
-static struct v4l2_file_operations vpbe_fops = {
+static const struct v4l2_file_operations vpbe_fops = {
.owner = THIS_MODULE,
.open = vpbe_display_open,
.release = vpbe_display_release,
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
index df042e84a678..66449791c70c 100644
--- a/drivers/media/platform/davinci/vpbe_osd.c
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -37,7 +37,7 @@
#define MODULE_NAME "davinci-vpbe-osd"
-static struct platform_device_id vpbe_osd_devtype[] = {
+static const struct platform_device_id vpbe_osd_devtype[] = {
{
.name = DM644X_VPBE_OSD_SUBDEV_NAME,
.driver_data = VPBE_VERSION_1,
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
index 8bfe90a24681..3a4e78595149 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -36,7 +36,7 @@
#define MODULE_NAME "davinci-vpbe-venc"
-static struct platform_device_id vpbe_venc_devtype[] = {
+static const struct platform_device_id vpbe_venc_devtype[] = {
{
.name = DM644X_VPBE_VENC_SUBDEV_NAME,
.driver_data = VPBE_VERSION_1,
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index e3fe3e0635aa..6792da16d9c7 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -281,45 +281,6 @@ void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev)
EXPORT_SYMBOL(vpfe_unregister_ccdc_device);
/*
- * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings
- */
-static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe_dev,
- struct v4l2_format *f)
-{
- struct v4l2_rect image_win;
- enum ccdc_buftype buf_type;
- enum ccdc_frmfmt frm_fmt;
-
- memset(f, 0, sizeof(*f));
- f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- ccdc_dev->hw_ops.get_image_window(&image_win);
- f->fmt.pix.width = image_win.width;
- f->fmt.pix.height = image_win.height;
- f->fmt.pix.bytesperline = ccdc_dev->hw_ops.get_line_length();
- f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
- f->fmt.pix.height;
- buf_type = ccdc_dev->hw_ops.get_buftype();
- f->fmt.pix.pixelformat = ccdc_dev->hw_ops.get_pixel_format();
- frm_fmt = ccdc_dev->hw_ops.get_frame_format();
- if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
- f->fmt.pix.field = V4L2_FIELD_NONE;
- else if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
- if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
- f->fmt.pix.field = V4L2_FIELD_INTERLACED;
- else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED)
- f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
- else {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf_type\n");
- return -EINVAL;
- }
- } else {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid frm_fmt\n");
- return -EINVAL;
- }
- return 0;
-}
-
-/*
* vpfe_config_ccdc_image_format()
* For a pix format, configure ccdc to setup the capture
*/
@@ -1327,7 +1288,7 @@ static void vpfe_videobuf_release(struct videobuf_queue *vq,
vb->state = VIDEOBUF_NEEDS_INIT;
}
-static struct videobuf_queue_ops vpfe_videobuf_qops = {
+static const struct videobuf_queue_ops vpfe_videobuf_qops = {
.buf_setup = vpfe_videobuf_setup,
.buf_prepare = vpfe_videobuf_prepare,
.buf_queue = vpfe_videobuf_queue,
@@ -1697,59 +1658,6 @@ unlock_out:
return ret;
}
-
-static long vpfe_param_handler(struct file *file, void *priv,
- bool valid_prio, unsigned int cmd, void *param)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- int ret;
-
- v4l2_dbg(2, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n");
-
- if (vpfe_dev->started) {
- /* only allowed if streaming is not started */
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "device already started\n");
- return -EBUSY;
- }
-
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- return ret;
-
- switch (cmd) {
- case VPFE_CMD_S_CCDC_RAW_PARAMS:
- v4l2_warn(&vpfe_dev->v4l2_dev,
- "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
- if (ccdc_dev->hw_ops.set_params) {
- ret = ccdc_dev->hw_ops.set_params(param);
- if (ret) {
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "Error setting parameters in CCDC\n");
- goto unlock_out;
- }
- ret = vpfe_get_ccdc_image_format(vpfe_dev,
- &vpfe_dev->fmt);
- if (ret < 0) {
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "Invalid image format at CCDC\n");
- goto unlock_out;
- }
- } else {
- ret = -EINVAL;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
- }
- break;
- default:
- ret = -ENOTTY;
- }
-unlock_out:
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-
/* vpfe capture ioctl operations */
static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
.vidioc_querycap = vpfe_querycap,
@@ -1772,7 +1680,6 @@ static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
.vidioc_cropcap = vpfe_cropcap,
.vidioc_g_selection = vpfe_g_selection,
.vidioc_s_selection = vpfe_s_selection,
- .vidioc_default = vpfe_param_handler,
};
static struct vpfe_device *vpfe_initialize(void)
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index d78580f9e431..0ef36cec21d1 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -312,7 +312,7 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
spin_unlock_irqrestore(&common->irqlock, flags);
}
-static struct vb2_ops video_qops = {
+static const struct vb2_ops video_qops = {
.queue_setup = vpif_buffer_queue_setup,
.buf_prepare = vpif_buffer_prepare,
.start_streaming = vpif_start_streaming,
@@ -1344,7 +1344,7 @@ static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
};
/* vpif file operations */
-static struct v4l2_file_operations vpif_fops = {
+static const struct v4l2_file_operations vpif_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
@@ -1397,9 +1397,9 @@ static int vpif_async_bound(struct v4l2_async_notifier *notifier,
vpif_obj.config->chan_config->inputs[i].subdev_name =
(char *)to_of_node(subdev->fwnode)->full_name;
vpif_dbg(2, debug,
- "%s: setting input %d subdev_name = %s\n",
+ "%s: setting input %d subdev_name = %pOF\n",
__func__, i,
- to_of_node(subdev->fwnode)->full_name);
+ to_of_node(subdev->fwnode));
return 0;
}
}
@@ -1557,8 +1557,8 @@ vpif_capture_get_pdata(struct platform_device *pdev)
dev_err(&pdev->dev, "Could not parse the endpoint\n");
goto done;
}
- dev_dbg(&pdev->dev, "Endpoint %s, bus_width = %d\n",
- endpoint->full_name, bus_cfg.bus.parallel.bus_width);
+ dev_dbg(&pdev->dev, "Endpoint %pOF, bus_width = %d\n",
+ endpoint, bus_cfg.bus.parallel.bus_width);
flags = bus_cfg.bus.parallel.flags;
if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
@@ -1569,13 +1569,13 @@ vpif_capture_get_pdata(struct platform_device *pdev)
rem = of_graph_get_remote_port_parent(endpoint);
if (!rem) {
- dev_dbg(&pdev->dev, "Remote device at %s not found\n",
- endpoint->full_name);
+ dev_dbg(&pdev->dev, "Remote device at %pOF not found\n",
+ endpoint);
goto done;
}
- dev_dbg(&pdev->dev, "Remote device %s, %s found\n",
- rem->name, rem->full_name);
+ dev_dbg(&pdev->dev, "Remote device %s, %pOF found\n",
+ rem->name, rem);
sdinfo->name = rem->full_name;
pdata->asd[i] = devm_kzalloc(&pdev->dev,
@@ -1593,9 +1593,11 @@ vpif_capture_get_pdata(struct platform_device *pdev)
}
done:
- pdata->asd_sizes[0] = i;
- pdata->subdev_count = i;
- pdata->card_name = "DA850/OMAP-L138 Video Capture";
+ if (pdata) {
+ pdata->asd_sizes[0] = i;
+ pdata->subdev_count = i;
+ pdata->card_name = "DA850/OMAP-L138 Video Capture";
+ }
return pdata;
}
@@ -1719,7 +1721,6 @@ vpif_unregister:
*/
static int vpif_remove(struct platform_device *device)
{
- struct common_obj *common;
struct channel_obj *ch;
int i;
@@ -1730,7 +1731,6 @@ static int vpif_remove(struct platform_device *device)
for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
/* Get the pointer to the channel object */
ch = vpif_obj.dev[i];
- common = &ch->common[VPIF_VIDEO_INDEX];
/* Unregister video device */
video_unregister_device(&ch->video_dev);
kfree(vpif_obj.dev[i]);
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index b5ac6ce626b3..56fe4e5b396e 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -290,7 +290,7 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
spin_unlock_irqrestore(&common->irqlock, flags);
}
-static struct vb2_ops video_qops = {
+static const struct vb2_ops video_qops = {
.queue_setup = vpif_buffer_queue_setup,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
@@ -1339,7 +1339,6 @@ vpif_unregister:
*/
static int vpif_remove(struct platform_device *device)
{
- struct common_obj *common;
struct channel_obj *ch;
int i;
@@ -1350,7 +1349,6 @@ static int vpif_remove(struct platform_device *device)
for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
/* Get the pointer to the channel object */
ch = vpif_obj.dev[i];
- common = &ch->common[VPIF_VIDEO_INDEX];
/* Unregister video device */
video_unregister_device(&ch->video_dev);
kfree(vpif_obj.dev[i]);
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 33611a46ce35..2a2994ef15d5 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -747,7 +747,7 @@ static const struct v4l2_file_operations gsc_m2m_fops = {
.mmap = gsc_m2m_mmap,
};
-static struct v4l2_m2m_ops gsc_m2m_ops = {
+static const struct v4l2_m2m_ops gsc_m2m_ops = {
.device_run = gsc_m2m_device_run,
.job_abort = gsc_m2m_job_abort,
};
diff --git a/drivers/media/platform/exynos4-is/fimc-is-i2c.c b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
index 2f559663e51e..70dd4852b2b9 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-i2c.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
@@ -130,7 +130,7 @@ static int fimc_is_i2c_resume(struct device *dev)
}
#endif
-static struct dev_pm_ops fimc_is_i2c_pm_ops = {
+static const struct dev_pm_ops fimc_is_i2c_pm_ops = {
SET_RUNTIME_PM_OPS(fimc_is_i2c_runtime_suspend,
fimc_is_i2c_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(fimc_is_i2c_suspend, fimc_is_i2c_resume)
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 340d906db370..5ddb2321e9e4 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -174,8 +174,8 @@ static int fimc_is_parse_sensor_config(struct fimc_is *is, unsigned int index,
sensor->drvdata = fimc_is_sensor_get_drvdata(node);
if (!sensor->drvdata) {
- dev_err(&is->pdev->dev, "no driver data found for: %s\n",
- node->full_name);
+ dev_err(&is->pdev->dev, "no driver data found for: %pOF\n",
+ node);
return -EINVAL;
}
@@ -191,8 +191,8 @@ static int fimc_is_parse_sensor_config(struct fimc_is *is, unsigned int index,
/* Use MIPI-CSIS channel id to determine the ISP I2C bus index. */
ret = of_property_read_u32(port, "reg", &tmp);
if (ret < 0) {
- dev_err(&is->pdev->dev, "reg property not found at: %s\n",
- port->full_name);
+ dev_err(&is->pdev->dev, "reg property not found at: %pOF\n",
+ port);
of_node_put(port);
return ret;
}
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index 8efe9160ab34..fd793d3ac072 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -433,7 +433,7 @@ static const struct v4l2_subdev_core_ops fimc_is_core_ops = {
.s_power = fimc_isp_subdev_s_power,
};
-static struct v4l2_subdev_ops fimc_is_subdev_ops = {
+static const struct v4l2_subdev_ops fimc_is_subdev_ops = {
.core = &fimc_is_core_ops,
.video = &fimc_is_subdev_video_ops,
.pad = &fimc_is_subdev_pad_ops,
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index 7d3ec5cc6608..4a3c9948ca54 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -1361,7 +1361,7 @@ static const struct v4l2_subdev_core_ops fimc_lite_core_ops = {
.log_status = fimc_lite_log_status,
};
-static struct v4l2_subdev_ops fimc_lite_subdev_ops = {
+static const struct v4l2_subdev_ops fimc_lite_subdev_ops = {
.core = &fimc_lite_core_ops,
.video = &fimc_lite_subdev_video_ops,
.pad = &fimc_lite_subdev_pad_ops,
@@ -1493,8 +1493,7 @@ static int fimc_lite_probe(struct platform_device *pdev)
if (!drv_data || fimc->index >= drv_data->num_instances ||
fimc->index < 0) {
- dev_err(dev, "Wrong %s node alias\n",
- dev->of_node->full_name);
+ dev_err(dev, "Wrong %pOF node alias\n", dev->of_node);
return -EINVAL;
}
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index d8724fe9e9da..9027d0b0d2bd 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -704,7 +704,7 @@ static const struct v4l2_file_operations fimc_m2m_fops = {
.mmap = v4l2_m2m_fop_mmap,
};
-static struct v4l2_m2m_ops m2m_ops = {
+static const struct v4l2_m2m_ops m2m_ops = {
.device_run = fimc_device_run,
.job_abort = fimc_job_abort,
};
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 7d1cf78846c4..d4656d5175d7 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -412,8 +412,8 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
rem = of_graph_get_remote_port_parent(ep);
of_node_put(ep);
if (rem == NULL) {
- v4l2_info(&fmd->v4l2_dev, "Remote device at %s not found\n",
- ep->full_name);
+ v4l2_info(&fmd->v4l2_dev, "Remote device at %pOF not found\n",
+ ep);
return 0;
}
@@ -430,8 +430,8 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
*/
pd->sensor_bus_type = FIMC_BUS_TYPE_MIPI_CSI2;
} else {
- v4l2_err(&fmd->v4l2_dev, "Wrong port id (%u) at node %s\n",
- endpoint.base.port, rem->full_name);
+ v4l2_err(&fmd->v4l2_dev, "Wrong port id (%u) at node %pOF\n",
+ endpoint.base.port, rem);
}
/*
* For FIMC-IS handled sensors, that are placed under i2c-isp device
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 98c89873c2dc..560aadabcb11 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -730,8 +730,8 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
node = of_graph_get_next_endpoint(node, NULL);
if (!node) {
- dev_err(&pdev->dev, "No port node at %s\n",
- pdev->dev.of_node->full_name);
+ dev_err(&pdev->dev, "No port node at %pOF\n",
+ pdev->dev.of_node);
return -EINVAL;
}
/* Get port node and validate MIPI-CSI channel id. */
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index 97e164b2075a..fb43025df573 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -549,7 +549,7 @@ static void buffer_release(struct videobuf_queue *vq,
free_buffer(vq, buf);
}
-static struct videobuf_queue_ops viu_video_qops = {
+static const struct videobuf_queue_ops viu_video_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
@@ -1340,7 +1340,7 @@ static int viu_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
}
-static struct v4l2_file_operations viu_fops = {
+static const struct v4l2_file_operations viu_fops = {
.owner = THIS_MODULE,
.open = viu_open,
.release = viu_release,
@@ -1380,7 +1380,7 @@ static const struct v4l2_ioctl_ops viu_ioctl_ops = {
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
-static struct video_device viu_template = {
+static const struct video_device viu_template = {
.name = "FSL viu",
.fops = &viu_fops,
.minor = -1,
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 980066b8d32a..c8a12493f395 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -979,7 +979,7 @@ static const struct v4l2_file_operations deinterlace_fops = {
.mmap = deinterlace_mmap,
};
-static struct video_device deinterlace_videodev = {
+static const struct video_device deinterlace_videodev = {
.name = MEM2MEM_NAME,
.fops = &deinterlace_fops,
.ioctl_ops = &deinterlace_ioctl_ops,
@@ -988,7 +988,7 @@ static struct video_device deinterlace_videodev = {
.vfl_dir = VFL_DIR_M2M,
};
-static struct v4l2_m2m_ops m2m_ops = {
+static const struct v4l2_m2m_ops m2m_ops = {
.device_run = deinterlace_device_run,
.job_ready = deinterlace_job_ready,
.job_abort = deinterlace_job_abort,
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c
index 77890bd0deab..57d2c483ad09 100644
--- a/drivers/media/platform/marvell-ccic/cafe-driver.c
+++ b/drivers/media/platform/marvell-ccic/cafe-driver.c
@@ -326,7 +326,7 @@ static u32 cafe_smbus_func(struct i2c_adapter *adapter)
I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
}
-static struct i2c_algorithm cafe_smbus_algo = {
+static const struct i2c_algorithm cafe_smbus_algo = {
.smbus_xfer = cafe_smbus_xfer,
.functionality = cafe_smbus_func
};
@@ -612,7 +612,7 @@ static int cafe_pci_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
-static struct pci_device_id cafe_ids[] = {
+static const struct pci_device_id cafe_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL,
PCI_DEVICE_ID_MARVELL_88ALP01_CCIC) },
{ 0, }
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 8cac2f202099..b07a251e8857 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -1639,7 +1639,7 @@ static const struct v4l2_file_operations mcam_v4l_fops = {
* This template device holds all of those v4l2 methods; we
* clone it for specific real devices.
*/
-static struct video_device mcam_v4l_template = {
+static const struct video_device mcam_v4l_template = {
.name = "mcam",
.fops = &mcam_v4l_fops,
.ioctl_ops = &mcam_v4l_ioctl_ops,
diff --git a/drivers/media/platform/meson/Makefile b/drivers/media/platform/meson/Makefile
new file mode 100644
index 000000000000..597beb8f34d1
--- /dev/null
+++ b/drivers/media/platform/meson/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_MESON_AO_CEC) += ao-cec.o
diff --git a/drivers/media/platform/meson/ao-cec.c b/drivers/media/platform/meson/ao-cec.c
new file mode 100644
index 000000000000..8040a6285c3f
--- /dev/null
+++ b/drivers/media/platform/meson/ao-cec.c
@@ -0,0 +1,744 @@
+/*
+ * Driver for Amlogic Meson AO CEC Controller
+ *
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved
+ * Copyright (C) 2017 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+
+/* CEC Registers */
+
+/*
+ * [2:1] cntl_clk
+ * - 0 = Disable clk (Power-off mode)
+ * - 1 = Enable gated clock (Normal mode)
+ * - 2 = Enable free-run clk (Debug mode)
+ */
+#define CEC_GEN_CNTL_REG 0x00
+
+#define CEC_GEN_CNTL_RESET BIT(0)
+#define CEC_GEN_CNTL_CLK_DISABLE 0
+#define CEC_GEN_CNTL_CLK_ENABLE 1
+#define CEC_GEN_CNTL_CLK_ENABLE_DBG 2
+#define CEC_GEN_CNTL_CLK_CTRL_MASK GENMASK(2, 1)
+
+/*
+ * [7:0] cec_reg_addr
+ * [15:8] cec_reg_wrdata
+ * [16] cec_reg_wr
+ * - 0 = Read
+ * - 1 = Write
+ * [23] bus free
+ * [31:24] cec_reg_rddata
+ */
+#define CEC_RW_REG 0x04
+
+#define CEC_RW_ADDR GENMASK(7, 0)
+#define CEC_RW_WR_DATA GENMASK(15, 8)
+#define CEC_RW_WRITE_EN BIT(16)
+#define CEC_RW_BUS_BUSY BIT(23)
+#define CEC_RW_RD_DATA GENMASK(31, 24)
+
+/*
+ * [1] tx intr
+ * [2] rx intr
+ */
+#define CEC_INTR_MASKN_REG 0x08
+#define CEC_INTR_CLR_REG 0x0c
+#define CEC_INTR_STAT_REG 0x10
+
+#define CEC_INTR_TX BIT(1)
+#define CEC_INTR_RX BIT(2)
+
+/* CEC Commands */
+
+#define CEC_TX_MSG_0_HEADER 0x00
+#define CEC_TX_MSG_1_OPCODE 0x01
+#define CEC_TX_MSG_2_OP1 0x02
+#define CEC_TX_MSG_3_OP2 0x03
+#define CEC_TX_MSG_4_OP3 0x04
+#define CEC_TX_MSG_5_OP4 0x05
+#define CEC_TX_MSG_6_OP5 0x06
+#define CEC_TX_MSG_7_OP6 0x07
+#define CEC_TX_MSG_8_OP7 0x08
+#define CEC_TX_MSG_9_OP8 0x09
+#define CEC_TX_MSG_A_OP9 0x0A
+#define CEC_TX_MSG_B_OP10 0x0B
+#define CEC_TX_MSG_C_OP11 0x0C
+#define CEC_TX_MSG_D_OP12 0x0D
+#define CEC_TX_MSG_E_OP13 0x0E
+#define CEC_TX_MSG_F_OP14 0x0F
+#define CEC_TX_MSG_LENGTH 0x10
+#define CEC_TX_MSG_CMD 0x11
+#define CEC_TX_WRITE_BUF 0x12
+#define CEC_TX_CLEAR_BUF 0x13
+#define CEC_RX_MSG_CMD 0x14
+#define CEC_RX_CLEAR_BUF 0x15
+#define CEC_LOGICAL_ADDR0 0x16
+#define CEC_LOGICAL_ADDR1 0x17
+#define CEC_LOGICAL_ADDR2 0x18
+#define CEC_LOGICAL_ADDR3 0x19
+#define CEC_LOGICAL_ADDR4 0x1A
+#define CEC_CLOCK_DIV_H 0x1B
+#define CEC_CLOCK_DIV_L 0x1C
+#define CEC_QUIESCENT_25MS_BIT7_0 0x20
+#define CEC_QUIESCENT_25MS_BIT11_8 0x21
+#define CEC_STARTBITMINL2H_3MS5_BIT7_0 0x22
+#define CEC_STARTBITMINL2H_3MS5_BIT8 0x23
+#define CEC_STARTBITMAXL2H_3MS9_BIT7_0 0x24
+#define CEC_STARTBITMAXL2H_3MS9_BIT8 0x25
+#define CEC_STARTBITMINH_0MS6_BIT7_0 0x26
+#define CEC_STARTBITMINH_0MS6_BIT8 0x27
+#define CEC_STARTBITMAXH_1MS0_BIT7_0 0x28
+#define CEC_STARTBITMAXH_1MS0_BIT8 0x29
+#define CEC_STARTBITMINTOT_4MS3_BIT7_0 0x2A
+#define CEC_STARTBITMINTOT_4MS3_BIT9_8 0x2B
+#define CEC_STARTBITMAXTOT_4MS7_BIT7_0 0x2C
+#define CEC_STARTBITMAXTOT_4MS7_BIT9_8 0x2D
+#define CEC_LOGIC1MINL2H_0MS4_BIT7_0 0x2E
+#define CEC_LOGIC1MINL2H_0MS4_BIT8 0x2F
+#define CEC_LOGIC1MAXL2H_0MS8_BIT7_0 0x30
+#define CEC_LOGIC1MAXL2H_0MS8_BIT8 0x31
+#define CEC_LOGIC0MINL2H_1MS3_BIT7_0 0x32
+#define CEC_LOGIC0MINL2H_1MS3_BIT8 0x33
+#define CEC_LOGIC0MAXL2H_1MS7_BIT7_0 0x34
+#define CEC_LOGIC0MAXL2H_1MS7_BIT8 0x35
+#define CEC_LOGICMINTOTAL_2MS05_BIT7_0 0x36
+#define CEC_LOGICMINTOTAL_2MS05_BIT9_8 0x37
+#define CEC_LOGICMAXHIGH_2MS8_BIT7_0 0x38
+#define CEC_LOGICMAXHIGH_2MS8_BIT8 0x39
+#define CEC_LOGICERRLOW_3MS4_BIT7_0 0x3A
+#define CEC_LOGICERRLOW_3MS4_BIT8 0x3B
+#define CEC_NOMSMPPOINT_1MS05 0x3C
+#define CEC_DELCNTR_LOGICERR 0x3E
+#define CEC_TXTIME_17MS_BIT7_0 0x40
+#define CEC_TXTIME_17MS_BIT10_8 0x41
+#define CEC_TXTIME_2BIT_BIT7_0 0x42
+#define CEC_TXTIME_2BIT_BIT10_8 0x43
+#define CEC_TXTIME_4BIT_BIT7_0 0x44
+#define CEC_TXTIME_4BIT_BIT10_8 0x45
+#define CEC_STARTBITNOML2H_3MS7_BIT7_0 0x46
+#define CEC_STARTBITNOML2H_3MS7_BIT8 0x47
+#define CEC_STARTBITNOMH_0MS8_BIT7_0 0x48
+#define CEC_STARTBITNOMH_0MS8_BIT8 0x49
+#define CEC_LOGIC1NOML2H_0MS6_BIT7_0 0x4A
+#define CEC_LOGIC1NOML2H_0MS6_BIT8 0x4B
+#define CEC_LOGIC0NOML2H_1MS5_BIT7_0 0x4C
+#define CEC_LOGIC0NOML2H_1MS5_BIT8 0x4D
+#define CEC_LOGIC1NOMH_1MS8_BIT7_0 0x4E
+#define CEC_LOGIC1NOMH_1MS8_BIT8 0x4F
+#define CEC_LOGIC0NOMH_0MS9_BIT7_0 0x50
+#define CEC_LOGIC0NOMH_0MS9_BIT8 0x51
+#define CEC_LOGICERRLOW_3MS6_BIT7_0 0x52
+#define CEC_LOGICERRLOW_3MS6_BIT8 0x53
+#define CEC_CHKCONTENTION_0MS1 0x54
+#define CEC_PREPARENXTBIT_0MS05_BIT7_0 0x56
+#define CEC_PREPARENXTBIT_0MS05_BIT8 0x57
+#define CEC_NOMSMPACKPOINT_0MS45 0x58
+#define CEC_ACK0NOML2H_1MS5_BIT7_0 0x5A
+#define CEC_ACK0NOML2H_1MS5_BIT8 0x5B
+#define CEC_BUGFIX_DISABLE_0 0x60
+#define CEC_BUGFIX_DISABLE_1 0x61
+#define CEC_RX_MSG_0_HEADER 0x80
+#define CEC_RX_MSG_1_OPCODE 0x81
+#define CEC_RX_MSG_2_OP1 0x82
+#define CEC_RX_MSG_3_OP2 0x83
+#define CEC_RX_MSG_4_OP3 0x84
+#define CEC_RX_MSG_5_OP4 0x85
+#define CEC_RX_MSG_6_OP5 0x86
+#define CEC_RX_MSG_7_OP6 0x87
+#define CEC_RX_MSG_8_OP7 0x88
+#define CEC_RX_MSG_9_OP8 0x89
+#define CEC_RX_MSG_A_OP9 0x8A
+#define CEC_RX_MSG_B_OP10 0x8B
+#define CEC_RX_MSG_C_OP11 0x8C
+#define CEC_RX_MSG_D_OP12 0x8D
+#define CEC_RX_MSG_E_OP13 0x8E
+#define CEC_RX_MSG_F_OP14 0x8F
+#define CEC_RX_MSG_LENGTH 0x90
+#define CEC_RX_MSG_STATUS 0x91
+#define CEC_RX_NUM_MSG 0x92
+#define CEC_TX_MSG_STATUS 0x93
+#define CEC_TX_NUM_MSG 0x94
+
+
+/* CEC_TX_MSG_CMD definition */
+#define TX_NO_OP 0 /* No transaction */
+#define TX_REQ_CURRENT 1 /* Transmit earliest message in buffer */
+#define TX_ABORT 2 /* Abort transmitting earliest message */
+#define TX_REQ_NEXT 3 /* Overwrite earliest msg, transmit next */
+
+/* tx_msg_status definition */
+#define TX_IDLE 0 /* No transaction */
+#define TX_BUSY 1 /* Transmitter is busy */
+#define TX_DONE 2 /* Message successfully transmitted */
+#define TX_ERROR 3 /* Message transmitted with error */
+
+/* rx_msg_cmd */
+#define RX_NO_OP 0 /* No transaction */
+#define RX_ACK_CURRENT 1 /* Read earliest message in buffer */
+#define RX_DISABLE 2 /* Disable receiving latest message */
+#define RX_ACK_NEXT 3 /* Clear earliest msg, read next */
+
+/* rx_msg_status */
+#define RX_IDLE 0 /* No transaction */
+#define RX_BUSY 1 /* Receiver is busy */
+#define RX_DONE 2 /* Message has been received successfully */
+#define RX_ERROR 3 /* Message has been received with error */
+
+/* RX_CLEAR_BUF options */
+#define CLEAR_START 1
+#define CLEAR_STOP 0
+
+/* CEC_LOGICAL_ADDRx options */
+#define LOGICAL_ADDR_MASK 0xf
+#define LOGICAL_ADDR_VALID BIT(4)
+#define LOGICAL_ADDR_DISABLE 0
+
+#define CEC_CLK_RATE 32768
+
+struct meson_ao_cec_device {
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct clk *core;
+ spinlock_t cec_reg_lock;
+ struct cec_notifier *notify;
+ struct cec_adapter *adap;
+ struct cec_msg rx_msg;
+};
+
+#define writel_bits_relaxed(mask, val, addr) \
+ writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr)
+
+static inline int meson_ao_cec_wait_busy(struct meson_ao_cec_device *ao_cec)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), 5000);
+
+ while (readl_relaxed(ao_cec->base + CEC_RW_REG) & CEC_RW_BUS_BUSY) {
+ if (ktime_compare(ktime_get(), timeout) > 0)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void meson_ao_cec_read(struct meson_ao_cec_device *ao_cec,
+ unsigned long address, u8 *data,
+ int *res)
+{
+ unsigned long flags;
+ u32 reg = FIELD_PREP(CEC_RW_ADDR, address);
+ int ret = 0;
+
+ if (res && *res)
+ return;
+
+ spin_lock_irqsave(&ao_cec->cec_reg_lock, flags);
+
+ ret = meson_ao_cec_wait_busy(ao_cec);
+ if (ret)
+ goto read_out;
+
+ writel_relaxed(reg, ao_cec->base + CEC_RW_REG);
+
+ ret = meson_ao_cec_wait_busy(ao_cec);
+ if (ret)
+ goto read_out;
+
+ *data = FIELD_GET(CEC_RW_RD_DATA,
+ readl_relaxed(ao_cec->base + CEC_RW_REG));
+
+read_out:
+ spin_unlock_irqrestore(&ao_cec->cec_reg_lock, flags);
+
+ if (res)
+ *res = ret;
+}
+
+static void meson_ao_cec_write(struct meson_ao_cec_device *ao_cec,
+ unsigned long address, u8 data,
+ int *res)
+{
+ unsigned long flags;
+ u32 reg = FIELD_PREP(CEC_RW_ADDR, address) |
+ FIELD_PREP(CEC_RW_WR_DATA, data) |
+ CEC_RW_WRITE_EN;
+ int ret = 0;
+
+ if (res && *res)
+ return;
+
+ spin_lock_irqsave(&ao_cec->cec_reg_lock, flags);
+
+ ret = meson_ao_cec_wait_busy(ao_cec);
+ if (ret)
+ goto write_out;
+
+ writel_relaxed(reg, ao_cec->base + CEC_RW_REG);
+
+write_out:
+ spin_unlock_irqrestore(&ao_cec->cec_reg_lock, flags);
+
+ if (res)
+ *res = ret;
+}
+
+static inline void meson_ao_cec_irq_setup(struct meson_ao_cec_device *ao_cec,
+ bool enable)
+{
+ u32 cfg = CEC_INTR_TX | CEC_INTR_RX;
+
+ writel_bits_relaxed(cfg, enable ? cfg : 0,
+ ao_cec->base + CEC_INTR_MASKN_REG);
+}
+
+static inline int meson_ao_cec_clear(struct meson_ao_cec_device *ao_cec)
+{
+ int ret = 0;
+
+ meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_DISABLE, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_ABORT, &ret);
+ meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, 1, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TX_CLEAR_BUF, 1, &ret);
+ if (ret)
+ return ret;
+
+ udelay(100);
+
+ meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, 0, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TX_CLEAR_BUF, 0, &ret);
+ if (ret)
+ return ret;
+
+ udelay(100);
+
+ meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_NO_OP, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_NO_OP, &ret);
+
+ return ret;
+}
+
+static int meson_ao_cec_arbit_bit_time_set(struct meson_ao_cec_device *ao_cec,
+ unsigned int bit_set,
+ unsigned int time_set)
+{
+ int ret = 0;
+
+ switch (bit_set) {
+ case CEC_SIGNAL_FREE_TIME_RETRY:
+ meson_ao_cec_write(ao_cec, CEC_TXTIME_4BIT_BIT7_0,
+ time_set & 0xff, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TXTIME_4BIT_BIT10_8,
+ (time_set >> 8) & 0x7, &ret);
+ break;
+
+ case CEC_SIGNAL_FREE_TIME_NEW_INITIATOR:
+ meson_ao_cec_write(ao_cec, CEC_TXTIME_2BIT_BIT7_0,
+ time_set & 0xff, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TXTIME_2BIT_BIT10_8,
+ (time_set >> 8) & 0x7, &ret);
+ break;
+
+ case CEC_SIGNAL_FREE_TIME_NEXT_XFER:
+ meson_ao_cec_write(ao_cec, CEC_TXTIME_17MS_BIT7_0,
+ time_set & 0xff, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TXTIME_17MS_BIT10_8,
+ (time_set >> 8) & 0x7, &ret);
+ break;
+ }
+
+ return ret;
+}
+
+static irqreturn_t meson_ao_cec_irq(int irq, void *data)
+{
+ struct meson_ao_cec_device *ao_cec = data;
+ u32 stat = readl_relaxed(ao_cec->base + CEC_INTR_STAT_REG);
+
+ if (stat)
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_NONE;
+}
+
+static void meson_ao_cec_irq_tx(struct meson_ao_cec_device *ao_cec)
+{
+ unsigned long tx_status = 0;
+ u8 stat;
+ int ret = 0;
+
+ meson_ao_cec_read(ao_cec, CEC_TX_MSG_STATUS, &stat, &ret);
+ if (ret)
+ goto tx_reg_err;
+
+ switch (stat) {
+ case TX_DONE:
+ tx_status = CEC_TX_STATUS_OK;
+ break;
+
+ case TX_BUSY:
+ tx_status = CEC_TX_STATUS_ARB_LOST;
+ break;
+
+ case TX_IDLE:
+ tx_status = CEC_TX_STATUS_LOW_DRIVE;
+ break;
+
+ case TX_ERROR:
+ default:
+ tx_status = CEC_TX_STATUS_NACK;
+ break;
+ }
+
+ /* Clear Interruption */
+ writel_relaxed(CEC_INTR_TX, ao_cec->base + CEC_INTR_CLR_REG);
+
+ /* Stop TX */
+ meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_NO_OP, &ret);
+ if (ret)
+ goto tx_reg_err;
+
+ cec_transmit_attempt_done(ao_cec->adap, tx_status);
+ return;
+
+tx_reg_err:
+ cec_transmit_attempt_done(ao_cec->adap, CEC_TX_STATUS_ERROR);
+}
+
+static void meson_ao_cec_irq_rx(struct meson_ao_cec_device *ao_cec)
+{
+ int i, ret = 0;
+ u8 reg;
+
+ meson_ao_cec_read(ao_cec, CEC_RX_MSG_STATUS, &reg, &ret);
+ if (reg != RX_DONE)
+ goto rx_out;
+
+ meson_ao_cec_read(ao_cec, CEC_RX_NUM_MSG, &reg, &ret);
+ if (reg != 1)
+ goto rx_out;
+
+ meson_ao_cec_read(ao_cec, CEC_RX_MSG_LENGTH, &reg, &ret);
+
+ ao_cec->rx_msg.len = reg + 1;
+ if (ao_cec->rx_msg.len > CEC_MAX_MSG_SIZE)
+ ao_cec->rx_msg.len = CEC_MAX_MSG_SIZE;
+
+ for (i = 0; i < ao_cec->rx_msg.len; i++) {
+ u8 byte;
+
+ meson_ao_cec_read(ao_cec, CEC_RX_MSG_0_HEADER + i, &byte, &ret);
+
+ ao_cec->rx_msg.msg[i] = byte;
+ }
+
+ if (ret)
+ goto rx_out;
+
+ cec_received_msg(ao_cec->adap, &ao_cec->rx_msg);
+
+rx_out:
+ /* Clear Interruption */
+ writel_relaxed(CEC_INTR_RX, ao_cec->base + CEC_INTR_CLR_REG);
+
+ /* Ack RX message */
+ meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_ACK_CURRENT, &ret);
+ meson_ao_cec_write(ao_cec, CEC_RX_MSG_CMD, RX_NO_OP, &ret);
+
+ /* Clear RX buffer */
+ meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, CLEAR_START, &ret);
+ meson_ao_cec_write(ao_cec, CEC_RX_CLEAR_BUF, CLEAR_STOP, &ret);
+}
+
+static irqreturn_t meson_ao_cec_irq_thread(int irq, void *data)
+{
+ struct meson_ao_cec_device *ao_cec = data;
+ u32 stat = readl_relaxed(ao_cec->base + CEC_INTR_STAT_REG);
+
+ if (stat & CEC_INTR_TX)
+ meson_ao_cec_irq_tx(ao_cec);
+
+ meson_ao_cec_irq_rx(ao_cec);
+
+ return IRQ_HANDLED;
+}
+
+static int meson_ao_cec_set_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct meson_ao_cec_device *ao_cec = adap->priv;
+ int ret = 0;
+
+ meson_ao_cec_write(ao_cec, CEC_LOGICAL_ADDR0,
+ LOGICAL_ADDR_DISABLE, &ret);
+ if (ret)
+ return ret;
+
+ ret = meson_ao_cec_clear(ao_cec);
+ if (ret)
+ return ret;
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ return 0;
+
+ meson_ao_cec_write(ao_cec, CEC_LOGICAL_ADDR0,
+ logical_addr & LOGICAL_ADDR_MASK, &ret);
+ if (ret)
+ return ret;
+
+ udelay(100);
+
+ meson_ao_cec_write(ao_cec, CEC_LOGICAL_ADDR0,
+ (logical_addr & LOGICAL_ADDR_MASK) |
+ LOGICAL_ADDR_VALID, &ret);
+
+ return ret;
+}
+
+static int meson_ao_cec_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct meson_ao_cec_device *ao_cec = adap->priv;
+ int i, ret = 0;
+ u8 reg;
+
+ meson_ao_cec_read(ao_cec, CEC_TX_MSG_STATUS, &reg, &ret);
+ if (ret)
+ return ret;
+
+ if (reg == TX_BUSY) {
+ dev_err(&ao_cec->pdev->dev, "%s: busy TX: aborting\n",
+ __func__);
+ meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_ABORT, &ret);
+ }
+
+ for (i = 0; i < msg->len; i++) {
+ meson_ao_cec_write(ao_cec, CEC_TX_MSG_0_HEADER + i,
+ msg->msg[i], &ret);
+ }
+
+ meson_ao_cec_write(ao_cec, CEC_TX_MSG_LENGTH, msg->len - 1, &ret);
+ meson_ao_cec_write(ao_cec, CEC_TX_MSG_CMD, TX_REQ_CURRENT, &ret);
+
+ return ret;
+}
+
+static int meson_ao_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct meson_ao_cec_device *ao_cec = adap->priv;
+ int ret;
+
+ meson_ao_cec_irq_setup(ao_cec, false);
+
+ writel_bits_relaxed(CEC_GEN_CNTL_RESET, CEC_GEN_CNTL_RESET,
+ ao_cec->base + CEC_GEN_CNTL_REG);
+
+ if (!enable)
+ return 0;
+
+ /* Enable gated clock (Normal mode). */
+ writel_bits_relaxed(CEC_GEN_CNTL_CLK_CTRL_MASK,
+ FIELD_PREP(CEC_GEN_CNTL_CLK_CTRL_MASK,
+ CEC_GEN_CNTL_CLK_ENABLE),
+ ao_cec->base + CEC_GEN_CNTL_REG);
+
+ udelay(100);
+
+ /* Release Reset */
+ writel_bits_relaxed(CEC_GEN_CNTL_RESET, 0,
+ ao_cec->base + CEC_GEN_CNTL_REG);
+
+ /* Clear buffers */
+ ret = meson_ao_cec_clear(ao_cec);
+ if (ret)
+ return ret;
+
+ /* CEC arbitration 3/5/7 bit time set. */
+ ret = meson_ao_cec_arbit_bit_time_set(ao_cec,
+ CEC_SIGNAL_FREE_TIME_RETRY,
+ 0x118);
+ if (ret)
+ return ret;
+ ret = meson_ao_cec_arbit_bit_time_set(ao_cec,
+ CEC_SIGNAL_FREE_TIME_NEW_INITIATOR,
+ 0x000);
+ if (ret)
+ return ret;
+ ret = meson_ao_cec_arbit_bit_time_set(ao_cec,
+ CEC_SIGNAL_FREE_TIME_NEXT_XFER,
+ 0x2aa);
+ if (ret)
+ return ret;
+
+ meson_ao_cec_irq_setup(ao_cec, true);
+
+ return 0;
+}
+
+static const struct cec_adap_ops meson_ao_cec_ops = {
+ .adap_enable = meson_ao_cec_adap_enable,
+ .adap_log_addr = meson_ao_cec_set_log_addr,
+ .adap_transmit = meson_ao_cec_transmit,
+};
+
+static int meson_ao_cec_probe(struct platform_device *pdev)
+{
+ struct meson_ao_cec_device *ao_cec;
+ struct platform_device *hdmi_dev;
+ struct device_node *np;
+ struct resource *res;
+ int ret, irq;
+
+ np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "Failed to find hdmi node\n");
+ return -ENODEV;
+ }
+
+ hdmi_dev = of_find_device_by_node(np);
+ if (hdmi_dev == NULL)
+ return -EPROBE_DEFER;
+
+ ao_cec = devm_kzalloc(&pdev->dev, sizeof(*ao_cec), GFP_KERNEL);
+ if (!ao_cec)
+ return -ENOMEM;
+
+ spin_lock_init(&ao_cec->cec_reg_lock);
+
+ ao_cec->notify = cec_notifier_get(&hdmi_dev->dev);
+ if (!ao_cec->notify)
+ return -ENOMEM;
+
+ ao_cec->adap = cec_allocate_adapter(&meson_ao_cec_ops, ao_cec,
+ "meson_ao_cec",
+ CEC_CAP_LOG_ADDRS |
+ CEC_CAP_TRANSMIT |
+ CEC_CAP_RC |
+ CEC_CAP_PASSTHROUGH,
+ 1); /* Use 1 for now */
+ if (IS_ERR(ao_cec->adap)) {
+ ret = PTR_ERR(ao_cec->adap);
+ goto out_probe_notify;
+ }
+
+ ao_cec->adap->owner = THIS_MODULE;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ao_cec->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ao_cec->base)) {
+ ret = PTR_ERR(ao_cec->base);
+ goto out_probe_adapter;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ meson_ao_cec_irq,
+ meson_ao_cec_irq_thread,
+ 0, NULL, ao_cec);
+ if (ret) {
+ dev_err(&pdev->dev, "irq request failed\n");
+ goto out_probe_adapter;
+ }
+
+ ao_cec->core = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(ao_cec->core)) {
+ dev_err(&pdev->dev, "core clock request failed\n");
+ ret = PTR_ERR(ao_cec->core);
+ goto out_probe_adapter;
+ }
+
+ ret = clk_prepare_enable(ao_cec->core);
+ if (ret) {
+ dev_err(&pdev->dev, "core clock enable failed\n");
+ goto out_probe_adapter;
+ }
+
+ ret = clk_set_rate(ao_cec->core, CEC_CLK_RATE);
+ if (ret) {
+ dev_err(&pdev->dev, "core clock set rate failed\n");
+ goto out_probe_clk;
+ }
+
+ device_reset_optional(&pdev->dev);
+
+ ao_cec->pdev = pdev;
+ platform_set_drvdata(pdev, ao_cec);
+
+ ret = cec_register_adapter(ao_cec->adap, &pdev->dev);
+ if (ret < 0) {
+ cec_notifier_put(ao_cec->notify);
+ goto out_probe_clk;
+ }
+
+ /* Setup Hardware */
+ writel_relaxed(CEC_GEN_CNTL_RESET,
+ ao_cec->base + CEC_GEN_CNTL_REG);
+
+ cec_register_cec_notifier(ao_cec->adap, ao_cec->notify);
+
+ return 0;
+
+out_probe_clk:
+ clk_disable_unprepare(ao_cec->core);
+
+out_probe_adapter:
+ cec_delete_adapter(ao_cec->adap);
+
+out_probe_notify:
+ cec_notifier_put(ao_cec->notify);
+
+ dev_err(&pdev->dev, "CEC controller registration failed\n");
+
+ return ret;
+}
+
+static int meson_ao_cec_remove(struct platform_device *pdev)
+{
+ struct meson_ao_cec_device *ao_cec = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(ao_cec->core);
+
+ cec_unregister_adapter(ao_cec->adap);
+
+ cec_notifier_put(ao_cec->notify);
+
+ return 0;
+}
+
+static const struct of_device_id meson_ao_cec_of_match[] = {
+ { .compatible = "amlogic,meson-gx-ao-cec", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, meson_ao_cec_of_match);
+
+static struct platform_driver meson_ao_cec_driver = {
+ .probe = meson_ao_cec_probe,
+ .remove = meson_ao_cec_remove,
+ .driver = {
+ .name = "meson-ao-cec",
+ .of_match_table = of_match_ptr(meson_ao_cec_of_match),
+ },
+};
+
+module_platform_driver(meson_ao_cec_driver);
+
+MODULE_DESCRIPTION("Meson AO CEC Controller driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
index 451a54039e65..226f90886484 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
@@ -756,7 +756,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
pm_runtime_put_sync(ctx->jpeg->dev);
}
-static struct vb2_ops mtk_jpeg_qops = {
+static const struct vb2_ops mtk_jpeg_qops = {
.queue_setup = mtk_jpeg_queue_setup,
.buf_prepare = mtk_jpeg_buf_prepare,
.buf_queue = mtk_jpeg_buf_queue,
@@ -865,7 +865,7 @@ static void mtk_jpeg_job_abort(void *priv)
{
}
-static struct v4l2_m2m_ops mtk_jpeg_m2m_ops = {
+static const struct v4l2_m2m_ops mtk_jpeg_m2m_ops = {
.device_run = mtk_jpeg_device_run,
.job_ready = mtk_jpeg_job_ready,
.job_abort = mtk_jpeg_job_abort,
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
index aa8f9fd1f1a2..03aba03a24c8 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
@@ -75,7 +75,7 @@ void mtk_mdp_comp_clock_on(struct device *dev, struct mtk_mdp_comp *comp)
}
for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
- if (!comp->clk[i])
+ if (IS_ERR(comp->clk[i]))
continue;
err = clk_prepare_enable(comp->clk[i]);
if (err)
@@ -90,7 +90,7 @@ void mtk_mdp_comp_clock_off(struct device *dev, struct mtk_mdp_comp *comp)
int i;
for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
- if (!comp->clk[i])
+ if (IS_ERR(comp->clk[i]))
continue;
clk_disable_unprepare(comp->clk[i]);
}
@@ -134,15 +134,13 @@ int mtk_mdp_comp_init(struct device *dev, struct device_node *node,
larb_node = of_parse_phandle(node, "mediatek,larb", 0);
if (!larb_node) {
dev_err(dev,
- "Missing mediadek,larb phandle in %s node\n",
- node->full_name);
+ "Missing mediadek,larb phandle in %pOF node\n", node);
return -EINVAL;
}
larb_pdev = of_find_device_by_node(larb_node);
if (!larb_pdev) {
- dev_warn(dev, "Waiting for larb device %s\n",
- larb_node->full_name);
+ dev_warn(dev, "Waiting for larb device %pOF\n", larb_node);
of_node_put(larb_node);
return -EPROBE_DEFER;
}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
index 81347558b24a..bbb24fb95b95 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
@@ -137,16 +137,16 @@ static int mtk_mdp_probe(struct platform_device *pdev)
continue;
if (!of_device_is_available(node)) {
- dev_err(dev, "Skipping disabled component %s\n",
- node->full_name);
+ dev_err(dev, "Skipping disabled component %pOF\n",
+ node);
continue;
}
comp_type = (enum mtk_mdp_comp_type)of_id->data;
comp_id = mtk_mdp_comp_get_id(dev, node, comp_type);
if (comp_id < 0) {
- dev_warn(dev, "Skipping unknown component %s\n",
- node->full_name);
+ dev_warn(dev, "Skipping unknown component %pOF\n",
+ node);
continue;
}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
index 13afe48b9dc5..583d47724ee8 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
@@ -621,7 +621,7 @@ static void mtk_mdp_m2m_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
}
-static struct vb2_ops mtk_mdp_m2m_qops = {
+static const struct vb2_ops mtk_mdp_m2m_qops = {
.queue_setup = mtk_mdp_m2m_queue_setup,
.buf_prepare = mtk_mdp_m2m_buf_prepare,
.buf_queue = mtk_mdp_m2m_buf_queue,
@@ -1225,7 +1225,7 @@ static const struct v4l2_file_operations mtk_mdp_m2m_fops = {
.mmap = v4l2_m2m_fop_mmap,
};
-static struct v4l2_m2m_ops mtk_mdp_m2m_ops = {
+static const struct v4l2_m2m_ops mtk_mdp_m2m_ops = {
.device_run = mtk_mdp_m2m_device_run,
.job_abort = mtk_mdp_m2m_job_abort,
};
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
index 1daee1207469..bc8349bc2e80 100644
--- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -31,6 +31,7 @@
#define MAX_NUM_REF_FRAMES 8
#define VP9_MAX_FRM_BUF_NUM 9
#define VP9_MAX_FRM_BUF_NODE_NUM (VP9_MAX_FRM_BUF_NUM * 2)
+#define VP9_SEG_ID_SZ 0x12000
/**
* struct vp9_dram_buf - contains buffer info for vpu
@@ -132,6 +133,7 @@ struct vp9_sf_ref_fb {
* @frm_num : decoded frame number, include sub-frame count (AP-R, VPU-W)
* @mv_buf : motion vector working buffer (AP-W, VPU-R)
* @frm_refs : maintain three reference buffer info (AP-R/W, VPU-R/W)
+ * @seg_id_buf : segmentation map working buffer (AP-W, VPU-R)
*/
struct vdec_vp9_vsi {
unsigned char sf_bs_buf[VP9_SUPER_FRAME_BS_SZ];
@@ -167,11 +169,14 @@ struct vdec_vp9_vsi {
struct vp9_dram_buf mv_buf;
struct vp9_ref_buf frm_refs[REFS_PER_FRAME];
+ struct vp9_dram_buf seg_id_buf;
+
};
/*
* struct vdec_vp9_inst - vp9 decode instance
* @mv_buf : working buffer for mv
+ * @seg_id_buf : working buffer for segmentation map
* @dec_fb : vdec_fb node to link fb to different fb_xxx_list
* @available_fb_node_list : current available vdec_fb node
* @fb_use_list : current used or referenced vdec_fb
@@ -187,6 +192,7 @@ struct vdec_vp9_vsi {
*/
struct vdec_vp9_inst {
struct mtk_vcodec_mem mv_buf;
+ struct mtk_vcodec_mem seg_id_buf;
struct vdec_fb_node dec_fb[VP9_MAX_FRM_BUF_NODE_NUM];
struct list_head available_fb_node_list;
@@ -388,13 +394,11 @@ static bool vp9_alloc_work_buf(struct vdec_vp9_inst *inst)
vsi->buf_h);
mem = &inst->mv_buf;
-
if (mem->va)
mtk_vcodec_mem_free(inst->ctx, mem);
mem->size = ((vsi->buf_w / 64) *
(vsi->buf_h / 64) + 2) * 36 * 16;
-
result = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (result) {
mem->size = 0;
@@ -406,6 +410,24 @@ static bool vp9_alloc_work_buf(struct vdec_vp9_inst *inst)
vsi->mv_buf.pa = (unsigned long)mem->dma_addr;
vsi->mv_buf.sz = (unsigned int)mem->size;
+
+ mem = &inst->seg_id_buf;
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+
+ mem->size = VP9_SEG_ID_SZ;
+ result = mtk_vcodec_mem_alloc(inst->ctx, mem);
+ if (result) {
+ mem->size = 0;
+ mtk_vcodec_err(inst, "Cannot allocate seg_id_buf");
+ return false;
+ }
+ /* Set the va again */
+ vsi->seg_id_buf.va = (unsigned long)mem->va;
+ vsi->seg_id_buf.pa = (unsigned long)mem->dma_addr;
+ vsi->seg_id_buf.sz = (unsigned int)mem->size;
+
+
vp9_free_all_sf_ref_fb(inst);
vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
@@ -653,6 +675,12 @@ static void vp9_reset(struct vdec_vp9_inst *inst)
inst->vsi->mv_buf.va = (unsigned long)inst->mv_buf.va;
inst->vsi->mv_buf.pa = (unsigned long)inst->mv_buf.dma_addr;
inst->vsi->mv_buf.sz = (unsigned long)inst->mv_buf.size;
+
+ /* Set the va again, since vpu_dec_reset will clear seg_id_buf in vpu */
+ inst->vsi->seg_id_buf.va = (unsigned long)inst->seg_id_buf.va;
+ inst->vsi->seg_id_buf.pa = (unsigned long)inst->seg_id_buf.dma_addr;
+ inst->vsi->seg_id_buf.sz = (unsigned long)inst->seg_id_buf.size;
+
}
static void init_all_fb_lists(struct vdec_vp9_inst *inst)
@@ -752,6 +780,10 @@ static void vdec_vp9_deinit(unsigned long h_vdec)
if (mem->va)
mtk_vcodec_mem_free(inst->ctx, mem);
+ mem = &inst->seg_id_buf;
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+
vp9_free_all_sf_ref_fb(inst);
vp9_free_inst(inst);
}
@@ -848,6 +880,7 @@ static int vdec_vp9_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
vsi->sf_frm_sz[idx]);
}
}
+ memset(inst->seg_id_buf.va, 0, inst->seg_id_buf.size);
ret = vpu_dec_start(&inst->vpu, data, 3);
if (ret) {
mtk_vcodec_err(inst, "vpu_dec_start failed");
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 03e47e0f778d..4a2b1afa19c4 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -873,7 +873,7 @@ static const struct v4l2_file_operations emmaprp_fops = {
.mmap = emmaprp_mmap,
};
-static struct video_device emmaprp_videodev = {
+static const struct video_device emmaprp_videodev = {
.name = MEM2MEM_NAME,
.fops = &emmaprp_fops,
.ioctl_ops = &emmaprp_ioctl_ops,
@@ -882,7 +882,7 @@ static struct video_device emmaprp_videodev = {
.vfl_dir = VFL_DIR_M2M,
};
-static struct v4l2_m2m_ops m2m_ops = {
+static const struct v4l2_m2m_ops m2m_ops = {
.device_run = emmaprp_device_run,
.job_abort = emmaprp_job_abort,
.lock = emmaprp_lock,
@@ -942,6 +942,8 @@ static int emmaprp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcdev);
irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0,
dev_name(&pdev->dev), pcdev);
if (ret)
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 92c4e1826356..123c2b26a933 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -12,11 +12,11 @@
#include <linux/sched.h>
#include <linux/platform_device.h>
#include <linux/videodev2.h>
+#include <linux/slab.h>
#include <media/videobuf-dma-contig.h>
#include <media/v4l2-device.h>
-#include <linux/omap-dma.h>
#include <video/omapvrfb.h>
#include "omap_voutdef.h"
@@ -63,7 +63,7 @@ static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
/*
* Wakes up the application once the DMA transfer to VRFB space is completed.
*/
-static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data)
+static void omap_vout_vrfb_dma_tx_callback(void *data)
{
struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
@@ -94,6 +94,7 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
int ret = 0, i, j;
struct omap_vout_device *vout;
struct video_device *vfd;
+ dma_cap_mask_t mask;
int image_width, image_height;
int vrfb_num_bufs = VRFB_NUM_BUFS;
struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
@@ -131,18 +132,27 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
/*
* Request and Initialize DMA, for DMA based VRFB transfer
*/
- vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE;
- vout->vrfb_dma_tx.dma_ch = -1;
- vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED;
- ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX",
- omap_vout_vrfb_dma_tx_callback,
- (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
- if (ret < 0) {
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_INTERLEAVE, mask);
+ vout->vrfb_dma_tx.chan = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(vout->vrfb_dma_tx.chan)) {
vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ } else {
+ size_t xt_size = sizeof(struct dma_interleaved_template) +
+ sizeof(struct data_chunk);
+
+ vout->vrfb_dma_tx.xt = kzalloc(xt_size, GFP_KERNEL);
+ if (!vout->vrfb_dma_tx.xt) {
+ dma_release_channel(vout->vrfb_dma_tx.chan);
+ vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
+ }
+ }
+
+ if (vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED)
dev_info(&pdev->dev,
": failed to allocate DMA Channel for video%d\n",
vfd->minor);
- }
+
init_waitqueue_head(&vout->vrfb_dma_tx.wait);
/* statically allocated the VRFB buffer is done through
@@ -177,7 +187,9 @@ void omap_vout_release_vrfb(struct omap_vout_device *vout)
if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
- omap_free_dma(vout->vrfb_dma_tx.dma_ch);
+ kfree(vout->vrfb_dma_tx.xt);
+ dmaengine_terminate_sync(vout->vrfb_dma_tx.chan);
+ dma_release_channel(vout->vrfb_dma_tx.chan);
}
}
@@ -219,70 +231,84 @@ int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
}
int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
- struct videobuf_buffer *vb)
+ struct videobuf_buffer *vb)
{
- dma_addr_t dmabuf;
- struct vid_vrfb_dma *tx;
+ struct dma_async_tx_descriptor *tx;
+ enum dma_ctrl_flags flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ struct dma_chan *chan = vout->vrfb_dma_tx.chan;
+ struct dma_device *dmadev = chan->device;
+ struct dma_interleaved_template *xt = vout->vrfb_dma_tx.xt;
+ dma_cookie_t cookie;
+ enum dma_status status;
enum dss_rotation rotation;
- u32 dest_frame_index = 0, src_element_index = 0;
- u32 dest_element_index = 0, src_frame_index = 0;
- u32 elem_count = 0, frame_count = 0, pixsize = 2;
+ size_t dst_icg;
+ u32 pixsize;
if (!is_rotation_enabled(vout))
return 0;
- dmabuf = vout->buf_phy_addr[vb->i];
/* If rotation is enabled, copy input buffer into VRFB
* memory space using DMA. We are copying input buffer
* into VRFB memory space of desired angle and DSS will
* read image VRFB memory for 0 degree angle
*/
+
pixsize = vout->bpp * vout->vrfb_bpp;
- /*
- * DMA transfer in double index mode
- */
+ dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) -
+ (vout->pix.width * vout->bpp)) + 1;
+
+ xt->src_start = vout->buf_phy_addr[vb->i];
+ xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
+
+ xt->numf = vout->pix.height;
+ xt->frame_size = 1;
+ xt->sgl[0].size = vout->pix.width * vout->bpp;
+ xt->sgl[0].icg = dst_icg;
+
+ xt->dir = DMA_MEM_TO_MEM;
+ xt->src_sgl = false;
+ xt->src_inc = true;
+ xt->dst_sgl = true;
+ xt->dst_inc = true;
+
+ tx = dmadev->device_prep_interleaved_dma(chan, xt, flags);
+ if (tx == NULL) {
+ pr_err("%s: DMA interleaved prep error\n", __func__);
+ return -EINVAL;
+ }
- /* Frame index */
- dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) -
- (vout->pix.width * vout->bpp)) + 1;
-
- /* Source and destination parameters */
- src_element_index = 0;
- src_frame_index = 0;
- dest_element_index = 1;
- /* Number of elements per frame */
- elem_count = vout->pix.width * vout->bpp;
- frame_count = vout->pix.height;
- tx = &vout->vrfb_dma_tx;
- tx->tx_status = 0;
- omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
- (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
- tx->dev_id, 0x0);
- /* src_port required only for OMAP1 */
- omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- dmabuf, src_element_index, src_frame_index);
- /*set dma source burst mode for VRFB */
- omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
- rotation = calc_rotation(vout);
+ tx->callback = omap_vout_vrfb_dma_tx_callback;
+ tx->callback_param = &vout->vrfb_dma_tx;
+
+ cookie = dmaengine_submit(tx);
+ if (dma_submit_error(cookie)) {
+ pr_err("%s: dmaengine_submit failed (%d)\n", __func__, cookie);
+ return -EINVAL;
+ }
- /* dest_port required only for OMAP1 */
- omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX,
- vout->vrfb_context[vb->i].paddr[0], dest_element_index,
- dest_frame_index);
- /*set dma dest burst mode for VRFB */
- omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
- omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
+ vout->vrfb_dma_tx.tx_status = 0;
+ dma_async_issue_pending(chan);
- omap_start_dma(tx->dma_ch);
- wait_event_interruptible_timeout(tx->wait, tx->tx_status == 1,
+ wait_event_interruptible_timeout(vout->vrfb_dma_tx.wait,
+ vout->vrfb_dma_tx.tx_status == 1,
VRFB_TX_TIMEOUT);
- if (tx->tx_status == 0) {
- omap_stop_dma(tx->dma_ch);
+ status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
+
+ if (vout->vrfb_dma_tx.tx_status == 0) {
+ pr_err("%s: Timeout while waiting for DMA\n", __func__);
+ dmaengine_terminate_sync(chan);
+ return -EINVAL;
+ } else if (status != DMA_COMPLETE) {
+ pr_err("%s: DMA completion %s status\n", __func__,
+ status == DMA_ERROR ? "error" : "busy");
+ dmaengine_terminate_sync(chan);
return -EINVAL;
}
+
/* Store buffers physical address into an array. Addresses
* from this array will be used to configure DSS */
+ rotation = calc_rotation(vout);
vout->queued_buf_addr[vb->i] = (u8 *)
vout->vrfb_context[vb->i].paddr[rotation];
return 0;
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
index 80c79fabdf95..56b630b1c8b4 100644
--- a/drivers/media/platform/omap/omap_voutdef.h
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -14,6 +14,7 @@
#include <media/v4l2-ctrls.h>
#include <video/omapfb_dss.h>
#include <video/omapvrfb.h>
+#include <linux/dmaengine.h>
#define YUYV_BPP 2
#define RGB565_BPP 2
@@ -81,8 +82,9 @@ enum vout_rotaion_type {
* for VRFB hidden buffer
*/
struct vid_vrfb_dma {
- int dev_id;
- int dma_ch;
+ struct dma_chan *chan;
+ struct dma_interleaved_template *xt;
+
int req_status;
int tx_status;
wait_queue_head_t wait;
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 9df64c189883..1a428fe9f070 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -1859,6 +1859,7 @@ static void isp_cleanup_modules(struct isp_device *isp)
omap3isp_ccdc_cleanup(isp);
omap3isp_ccp2_cleanup(isp);
omap3isp_csi2_cleanup(isp);
+ omap3isp_csiphy_cleanup(isp);
}
static int isp_initialize_modules(struct isp_device *isp)
@@ -1868,7 +1869,7 @@ static int isp_initialize_modules(struct isp_device *isp)
ret = omap3isp_csiphy_init(isp);
if (ret < 0) {
dev_err(isp->dev, "CSI PHY initialization failed\n");
- goto error_csiphy;
+ return ret;
}
ret = omap3isp_csi2_init(isp);
@@ -1879,7 +1880,8 @@ static int isp_initialize_modules(struct isp_device *isp)
ret = omap3isp_ccp2_init(isp);
if (ret < 0) {
- dev_err(isp->dev, "CCP2 initialization failed\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(isp->dev, "CCP2 initialization failed\n");
goto error_ccp2;
}
@@ -1936,7 +1938,8 @@ error_ccdc:
error_ccp2:
omap3isp_csi2_cleanup(isp);
error_csi2:
-error_csiphy:
+ omap3isp_csiphy_cleanup(isp);
+
return ret;
}
@@ -2015,13 +2018,14 @@ static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode,
struct v4l2_fwnode_endpoint vep;
unsigned int i;
int ret;
+ bool csi1 = false;
ret = v4l2_fwnode_endpoint_parse(fwnode, &vep);
if (ret)
return ret;
- dev_dbg(dev, "parsing endpoint %s, interface %u\n",
- to_of_node(fwnode)->full_name, vep.base.port);
+ dev_dbg(dev, "parsing endpoint %pOF, interface %u\n",
+ to_of_node(fwnode), vep.base.port);
switch (vep.base.port) {
case ISP_OF_PHY_PARALLEL:
@@ -2039,48 +2043,102 @@ static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode,
!!(vep.bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW);
buscfg->bus.parallel.data_pol =
!!(vep.bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW);
+ buscfg->bus.parallel.bt656 = vep.bus_type == V4L2_MBUS_BT656;
break;
case ISP_OF_PHY_CSIPHY1:
case ISP_OF_PHY_CSIPHY2:
- /* FIXME: always assume CSI-2 for now. */
+ switch (vep.bus_type) {
+ case V4L2_MBUS_CCP2:
+ case V4L2_MBUS_CSI1:
+ dev_dbg(dev, "CSI-1/CCP-2 configuration\n");
+ csi1 = true;
+ break;
+ case V4L2_MBUS_CSI2:
+ dev_dbg(dev, "CSI-2 configuration\n");
+ csi1 = false;
+ break;
+ default:
+ dev_err(dev, "unsupported bus type %u\n",
+ vep.bus_type);
+ return -EINVAL;
+ }
+
switch (vep.base.port) {
case ISP_OF_PHY_CSIPHY1:
- buscfg->interface = ISP_INTERFACE_CSI2C_PHY1;
+ if (csi1)
+ buscfg->interface = ISP_INTERFACE_CCP2B_PHY1;
+ else
+ buscfg->interface = ISP_INTERFACE_CSI2C_PHY1;
break;
case ISP_OF_PHY_CSIPHY2:
- buscfg->interface = ISP_INTERFACE_CSI2A_PHY2;
+ if (csi1)
+ buscfg->interface = ISP_INTERFACE_CCP2B_PHY2;
+ else
+ buscfg->interface = ISP_INTERFACE_CSI2A_PHY2;
break;
}
- buscfg->bus.csi2.lanecfg.clk.pos = vep.bus.mipi_csi2.clock_lane;
- buscfg->bus.csi2.lanecfg.clk.pol =
- vep.bus.mipi_csi2.lane_polarities[0];
- dev_dbg(dev, "clock lane polarity %u, pos %u\n",
- buscfg->bus.csi2.lanecfg.clk.pol,
- buscfg->bus.csi2.lanecfg.clk.pos);
-
- for (i = 0; i < ISP_CSIPHY2_NUM_DATA_LANES; i++) {
- buscfg->bus.csi2.lanecfg.data[i].pos =
- vep.bus.mipi_csi2.data_lanes[i];
- buscfg->bus.csi2.lanecfg.data[i].pol =
- vep.bus.mipi_csi2.lane_polarities[i + 1];
- dev_dbg(dev, "data lane %u polarity %u, pos %u\n", i,
- buscfg->bus.csi2.lanecfg.data[i].pol,
- buscfg->bus.csi2.lanecfg.data[i].pos);
+ if (csi1) {
+ buscfg->bus.ccp2.lanecfg.clk.pos =
+ vep.bus.mipi_csi1.clock_lane;
+ buscfg->bus.ccp2.lanecfg.clk.pol =
+ vep.bus.mipi_csi1.lane_polarity[0];
+ dev_dbg(dev, "clock lane polarity %u, pos %u\n",
+ buscfg->bus.ccp2.lanecfg.clk.pol,
+ buscfg->bus.ccp2.lanecfg.clk.pos);
+
+ buscfg->bus.ccp2.lanecfg.data[0].pos =
+ vep.bus.mipi_csi1.data_lane;
+ buscfg->bus.ccp2.lanecfg.data[0].pol =
+ vep.bus.mipi_csi1.lane_polarity[1];
+
+ dev_dbg(dev, "data lane polarity %u, pos %u\n",
+ buscfg->bus.ccp2.lanecfg.data[0].pol,
+ buscfg->bus.ccp2.lanecfg.data[0].pos);
+
+ buscfg->bus.ccp2.strobe_clk_pol =
+ vep.bus.mipi_csi1.clock_inv;
+ buscfg->bus.ccp2.phy_layer = vep.bus.mipi_csi1.strobe;
+ buscfg->bus.ccp2.ccp2_mode =
+ vep.bus_type == V4L2_MBUS_CCP2;
+ buscfg->bus.ccp2.vp_clk_pol = 1;
+
+ buscfg->bus.ccp2.crc = 1;
+ } else {
+ buscfg->bus.csi2.lanecfg.clk.pos =
+ vep.bus.mipi_csi2.clock_lane;
+ buscfg->bus.csi2.lanecfg.clk.pol =
+ vep.bus.mipi_csi2.lane_polarities[0];
+ dev_dbg(dev, "clock lane polarity %u, pos %u\n",
+ buscfg->bus.csi2.lanecfg.clk.pol,
+ buscfg->bus.csi2.lanecfg.clk.pos);
+
+ buscfg->bus.csi2.num_data_lanes =
+ vep.bus.mipi_csi2.num_data_lanes;
+
+ for (i = 0; i < buscfg->bus.csi2.num_data_lanes; i++) {
+ buscfg->bus.csi2.lanecfg.data[i].pos =
+ vep.bus.mipi_csi2.data_lanes[i];
+ buscfg->bus.csi2.lanecfg.data[i].pol =
+ vep.bus.mipi_csi2.lane_polarities[i + 1];
+ dev_dbg(dev,
+ "data lane %u polarity %u, pos %u\n", i,
+ buscfg->bus.csi2.lanecfg.data[i].pol,
+ buscfg->bus.csi2.lanecfg.data[i].pos);
+ }
+ /*
+ * FIXME: now we assume the CRC is always there.
+ * Implement a way to obtain this information from the
+ * sensor. Frame descriptors, perhaps?
+ */
+ buscfg->bus.csi2.crc = 1;
}
-
- /*
- * FIXME: now we assume the CRC is always there.
- * Implement a way to obtain this information from the
- * sensor. Frame descriptors, perhaps?
- */
- buscfg->bus.csi2.crc = 1;
break;
default:
- dev_warn(dev, "%s: invalid interface %u\n",
- to_of_node(fwnode)->full_name, vep.base.port);
- break;
+ dev_warn(dev, "%pOF: invalid interface %u\n",
+ to_of_node(fwnode), vep.base.port);
+ return -EINVAL;
}
return 0;
@@ -2105,10 +2163,12 @@ static int isp_fwnodes_parse(struct device *dev,
if (!isd)
goto error;
- notifier->subdevs[notifier->num_subdevs] = &isd->asd;
+ if (isp_fwnode_parse(dev, fwnode, isd)) {
+ devm_kfree(dev, isd);
+ continue;
+ }
- if (isp_fwnode_parse(dev, fwnode, isd))
- goto error;
+ notifier->subdevs[notifier->num_subdevs] = &isd->asd;
isd->asd.match.fwnode.fwnode =
fwnode_graph_get_remote_port_parent(fwnode);
@@ -2128,26 +2188,12 @@ error:
return -EINVAL;
}
-static int isp_subdev_notifier_bound(struct v4l2_async_notifier *async,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
-{
- struct isp_async_subdev *isd =
- container_of(asd, struct isp_async_subdev, asd);
-
- isd->sd = subdev;
- isd->sd->host_priv = &isd->bus;
-
- return 0;
-}
-
static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
{
struct isp_device *isp = container_of(async, struct isp_device,
notifier);
struct v4l2_device *v4l2_dev = &isp->v4l2_dev;
struct v4l2_subdev *sd;
- struct isp_bus_cfg *bus;
int ret;
ret = media_entity_enum_init(&isp->crashed, &isp->media_dev);
@@ -2155,13 +2201,13 @@ static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
return ret;
list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
- /* Only try to link entities whose interface was set on bound */
- if (sd->host_priv) {
- bus = (struct isp_bus_cfg *)sd->host_priv;
- ret = isp_link_entity(isp, &sd->entity, bus->interface);
- if (ret < 0)
- return ret;
- }
+ if (!sd->asd)
+ continue;
+
+ ret = isp_link_entity(isp, &sd->entity,
+ v4l2_subdev_to_bus_cfg(sd)->interface);
+ if (ret < 0)
+ return ret;
}
ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
@@ -2339,7 +2385,6 @@ static int isp_probe(struct platform_device *pdev)
if (ret < 0)
goto error_register_entities;
- isp->notifier.bound = isp_subdev_notifier_bound;
isp->notifier.complete = isp_subdev_notifier_complete;
ret = v4l2_async_notifier_register(&isp->v4l2_dev, &isp->notifier);
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index 2f2ae609c548..e528df6efc09 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -226,11 +226,13 @@ struct isp_device {
};
struct isp_async_subdev {
- struct v4l2_subdev *sd;
struct isp_bus_cfg bus;
struct v4l2_async_subdev asd;
};
+#define v4l2_subdev_to_bus_cfg(sd) \
+ (&container_of((sd)->asd, struct isp_async_subdev, asd)->bus)
+
#define v4l2_dev_to_isp_device(dev) \
container_of(dev, struct isp_device, v4l2_dev)
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 7207558d722c..b66276ab5765 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -1139,15 +1139,11 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
pad = media_entity_remote_pad(&ccdc->pads[CCDC_PAD_SINK]);
sensor = media_entity_to_v4l2_subdev(pad->entity);
if (ccdc->input == CCDC_INPUT_PARALLEL) {
- struct v4l2_mbus_config cfg;
- int ret;
+ struct v4l2_subdev *sd =
+ to_isp_pipeline(&ccdc->subdev.entity)->external;
- ret = v4l2_subdev_call(sensor, video, g_mbus_config, &cfg);
- if (!ret)
- ccdc->bt656 = cfg.type == V4L2_MBUS_BT656;
-
- parcfg = &((struct isp_bus_cfg *)sensor->host_priv)
- ->bus.parallel;
+ parcfg = &v4l2_subdev_to_bus_cfg(sd)->bus.parallel;
+ ccdc->bt656 = parcfg->bt656;
}
/* CCDC_PAD_SINK */
@@ -2418,11 +2414,11 @@ static int ccdc_link_validate(struct v4l2_subdev *sd,
/* We've got a parallel sensor here. */
if (ccdc->input == CCDC_INPUT_PARALLEL) {
- struct isp_parallel_cfg *parcfg =
- &((struct isp_bus_cfg *)
- media_entity_to_v4l2_subdev(link->source->entity)
- ->host_priv)->bus.parallel;
- parallel_shift = parcfg->data_lane_shift;
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(link->source->entity);
+ struct isp_bus_cfg *bus_cfg = v4l2_subdev_to_bus_cfg(sd);
+
+ parallel_shift = bus_cfg->bus.parallel.data_lane_shift;
} else {
parallel_shift = 0;
}
diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
index ca095238510d..e062939d0d05 100644
--- a/drivers/media/platform/omap3isp/ispccp2.c
+++ b/drivers/media/platform/omap3isp/ispccp2.c
@@ -213,14 +213,17 @@ static int ccp2_phyif_config(struct isp_ccp2_device *ccp2,
struct isp_device *isp = to_isp_device(ccp2);
u32 val;
- /* CCP2B mode */
val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL) |
- ISPCCP2_CTRL_IO_OUT_SEL | ISPCCP2_CTRL_MODE;
+ ISPCCP2_CTRL_MODE;
/* Data/strobe physical layer */
BIT_SET(val, ISPCCP2_CTRL_PHY_SEL_SHIFT, ISPCCP2_CTRL_PHY_SEL_MASK,
buscfg->phy_layer);
+ BIT_SET(val, ISPCCP2_CTRL_IO_OUT_SEL_SHIFT,
+ ISPCCP2_CTRL_IO_OUT_SEL_MASK, buscfg->ccp2_mode);
BIT_SET(val, ISPCCP2_CTRL_INV_SHIFT, ISPCCP2_CTRL_INV_MASK,
buscfg->strobe_clk_pol);
+ BIT_SET(val, ISPCCP2_CTRL_VP_CLK_POL_SHIFT,
+ ISPCCP2_CTRL_VP_CLK_POL_MASK, buscfg->vp_clk_pol);
isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL);
@@ -347,6 +350,7 @@ static void ccp2_lcx_config(struct isp_ccp2_device *ccp2,
*/
static int ccp2_if_configure(struct isp_ccp2_device *ccp2)
{
+ struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity);
const struct isp_bus_cfg *buscfg;
struct v4l2_mbus_framefmt *format;
struct media_pad *pad;
@@ -358,7 +362,7 @@ static int ccp2_if_configure(struct isp_ccp2_device *ccp2)
pad = media_entity_remote_pad(&ccp2->pads[CCP2_PAD_SINK]);
sensor = media_entity_to_v4l2_subdev(pad->entity);
- buscfg = sensor->host_priv;
+ buscfg = v4l2_subdev_to_bus_cfg(pipe->external);
ret = ccp2_phyif_config(ccp2, &buscfg->bus.ccp2);
if (ret < 0)
@@ -838,7 +842,7 @@ static int ccp2_s_stream(struct v4l2_subdev *sd, int enable)
switch (enable) {
case ISP_PIPELINE_STREAM_CONTINUOUS:
if (ccp2->phy) {
- ret = omap3isp_csiphy_acquire(ccp2->phy);
+ ret = omap3isp_csiphy_acquire(ccp2->phy, &sd->entity);
if (ret < 0)
return ret;
}
@@ -1137,10 +1141,16 @@ int omap3isp_ccp2_init(struct isp_device *isp)
if (isp->revision == ISP_REVISION_2_0) {
ccp2->vdds_csib = devm_regulator_get(isp->dev, "vdds_csib");
if (IS_ERR(ccp2->vdds_csib)) {
+ if (PTR_ERR(ccp2->vdds_csib) == -EPROBE_DEFER) {
+ dev_dbg(isp->dev,
+ "Can't get regulator vdds_csib, deferring probing\n");
+ return -EPROBE_DEFER;
+ }
dev_dbg(isp->dev,
"Could not get regulator vdds_csib\n");
ccp2->vdds_csib = NULL;
}
+ ccp2->phy = &isp->isp_csiphy2;
} else if (isp->revision == ISP_REVISION_15_0) {
ccp2->phy = &isp->isp_csiphy1;
}
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index 7dae2fe0d42d..a4d3d030e81e 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -490,7 +490,7 @@ int omap3isp_csi2_reset(struct isp_csi2_device *csi2)
if (!csi2->available)
return -ENODEV;
- if (csi2->phy->phy_in_use)
+ if (csi2->phy->entity)
return -EBUSY;
isp_reg_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG,
@@ -566,7 +566,7 @@ static int csi2_configure(struct isp_csi2_device *csi2)
pad = media_entity_remote_pad(&csi2->pads[CSI2_PAD_SINK]);
sensor = media_entity_to_v4l2_subdev(pad->entity);
- buscfg = sensor->host_priv;
+ buscfg = v4l2_subdev_to_bus_cfg(pipe->external);
csi2->frame_skip = 0;
v4l2_subdev_call(sensor, sensor, g_skip_frames, &csi2->frame_skip);
@@ -1053,7 +1053,7 @@ static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
switch (enable) {
case ISP_PIPELINE_STREAM_CONTINUOUS:
- if (omap3isp_csiphy_acquire(csi2->phy) < 0)
+ if (omap3isp_csiphy_acquire(csi2->phy, &sd->entity) < 0)
return -ENODEV;
if (csi2->output & CSI2_OUTPUT_MEMORY)
omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CSI2A_WRITE);
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/omap3isp/ispcsiphy.c
index 871d4fe09c7f..a28fb79abaac 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.c
+++ b/drivers/media/platform/omap3isp/ispcsiphy.c
@@ -164,30 +164,28 @@ static int csiphy_set_power(struct isp_csiphy *phy, u32 power)
static int omap3isp_csiphy_config(struct isp_csiphy *phy)
{
- struct isp_csi2_device *csi2 = phy->csi2;
- struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity);
- struct isp_bus_cfg *buscfg = pipe->external->host_priv;
+ struct isp_pipeline *pipe = to_isp_pipeline(phy->entity);
+ struct isp_bus_cfg *buscfg = v4l2_subdev_to_bus_cfg(pipe->external);
struct isp_csiphy_lanes_cfg *lanes;
int csi2_ddrclk_khz;
- unsigned int used_lanes = 0;
+ unsigned int num_data_lanes, used_lanes = 0;
unsigned int i;
u32 reg;
- if (!buscfg) {
- struct isp_async_subdev *isd =
- container_of(pipe->external->asd,
- struct isp_async_subdev, asd);
- buscfg = &isd->bus;
- }
-
if (buscfg->interface == ISP_INTERFACE_CCP2B_PHY1
- || buscfg->interface == ISP_INTERFACE_CCP2B_PHY2)
+ || buscfg->interface == ISP_INTERFACE_CCP2B_PHY2) {
lanes = &buscfg->bus.ccp2.lanecfg;
- else
+ num_data_lanes = 1;
+ } else {
lanes = &buscfg->bus.csi2.lanecfg;
+ num_data_lanes = buscfg->bus.csi2.num_data_lanes;
+ }
+
+ if (num_data_lanes > phy->num_data_lanes)
+ return -EINVAL;
/* Clock and data lanes verification */
- for (i = 0; i < phy->num_data_lanes; i++) {
+ for (i = 0; i < num_data_lanes; i++) {
if (lanes->data[i].pol > 1 || lanes->data[i].pos > 3)
return -EINVAL;
@@ -216,7 +214,7 @@ static int omap3isp_csiphy_config(struct isp_csiphy *phy)
csi2_ddrclk_khz = pipe->external_rate / 1000
/ (2 * hweight32(used_lanes)) * pipe->external_width;
- reg = isp_reg_readl(csi2->isp, phy->phy_regs, ISPCSIPHY_REG0);
+ reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG0);
reg &= ~(ISPCSIPHY_REG0_THS_TERM_MASK |
ISPCSIPHY_REG0_THS_SETTLE_MASK);
@@ -227,9 +225,9 @@ static int omap3isp_csiphy_config(struct isp_csiphy *phy)
reg |= (DIV_ROUND_UP(90 * csi2_ddrclk_khz, 1000000) + 3)
<< ISPCSIPHY_REG0_THS_SETTLE_SHIFT;
- isp_reg_writel(csi2->isp, reg, phy->phy_regs, ISPCSIPHY_REG0);
+ isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG0);
- reg = isp_reg_readl(csi2->isp, phy->phy_regs, ISPCSIPHY_REG1);
+ reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG1);
reg &= ~(ISPCSIPHY_REG1_TCLK_TERM_MASK |
ISPCSIPHY_REG1_TCLK_MISS_MASK |
@@ -238,12 +236,12 @@ static int omap3isp_csiphy_config(struct isp_csiphy *phy)
reg |= TCLK_MISS << ISPCSIPHY_REG1_TCLK_MISS_SHIFT;
reg |= TCLK_SETTLE << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT;
- isp_reg_writel(csi2->isp, reg, phy->phy_regs, ISPCSIPHY_REG1);
+ isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG1);
/* DPHY lane configuration */
- reg = isp_reg_readl(csi2->isp, phy->cfg_regs, ISPCSI2_PHY_CFG);
+ reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG);
- for (i = 0; i < phy->num_data_lanes; i++) {
+ for (i = 0; i < num_data_lanes; i++) {
reg &= ~(ISPCSI2_PHY_CFG_DATA_POL_MASK(i + 1) |
ISPCSI2_PHY_CFG_DATA_POSITION_MASK(i + 1));
reg |= (lanes->data[i].pol <<
@@ -257,12 +255,12 @@ static int omap3isp_csiphy_config(struct isp_csiphy *phy)
reg |= lanes->clk.pol << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT;
reg |= lanes->clk.pos << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT;
- isp_reg_writel(csi2->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG);
+ isp_reg_writel(phy->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG);
return 0;
}
-int omap3isp_csiphy_acquire(struct isp_csiphy *phy)
+int omap3isp_csiphy_acquire(struct isp_csiphy *phy, struct media_entity *entity)
{
int rval;
@@ -282,20 +280,25 @@ int omap3isp_csiphy_acquire(struct isp_csiphy *phy)
if (rval < 0)
goto done;
+ phy->entity = entity;
+
rval = omap3isp_csiphy_config(phy);
if (rval < 0)
goto done;
- rval = csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_ON);
- if (rval) {
- regulator_disable(phy->vdd);
- goto done;
- }
-
- csiphy_power_autoswitch_enable(phy, true);
- phy->phy_in_use = 1;
+ if (phy->isp->revision == ISP_REVISION_15_0) {
+ rval = csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_ON);
+ if (rval) {
+ regulator_disable(phy->vdd);
+ goto done;
+ }
+ csiphy_power_autoswitch_enable(phy, true);
+ }
done:
+ if (rval < 0)
+ phy->entity = NULL;
+
mutex_unlock(&phy->mutex);
return rval;
}
@@ -303,18 +306,19 @@ done:
void omap3isp_csiphy_release(struct isp_csiphy *phy)
{
mutex_lock(&phy->mutex);
- if (phy->phy_in_use) {
- struct isp_csi2_device *csi2 = phy->csi2;
- struct isp_pipeline *pipe =
- to_isp_pipeline(&csi2->subdev.entity);
- struct isp_bus_cfg *buscfg = pipe->external->host_priv;
+ if (phy->entity) {
+ struct isp_pipeline *pipe = to_isp_pipeline(phy->entity);
+ struct isp_bus_cfg *buscfg =
+ v4l2_subdev_to_bus_cfg(pipe->external);
csiphy_routing_cfg(phy, buscfg->interface, false,
buscfg->bus.ccp2.phy_layer);
- csiphy_power_autoswitch_enable(phy, false);
- csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_OFF);
+ if (phy->isp->revision == ISP_REVISION_15_0) {
+ csiphy_power_autoswitch_enable(phy, false);
+ csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_OFF);
+ }
regulator_disable(phy->vdd);
- phy->phy_in_use = 0;
+ phy->entity = NULL;
}
mutex_unlock(&phy->mutex);
}
@@ -334,14 +338,21 @@ int omap3isp_csiphy_init(struct isp_device *isp)
phy2->phy_regs = OMAP3_ISP_IOMEM_CSIPHY2;
mutex_init(&phy2->mutex);
+ phy1->isp = isp;
+ mutex_init(&phy1->mutex);
+
if (isp->revision == ISP_REVISION_15_0) {
- phy1->isp = isp;
phy1->csi2 = &isp->isp_csi2c;
phy1->num_data_lanes = ISP_CSIPHY1_NUM_DATA_LANES;
phy1->cfg_regs = OMAP3_ISP_IOMEM_CSI2C_REGS1;
phy1->phy_regs = OMAP3_ISP_IOMEM_CSIPHY1;
- mutex_init(&phy1->mutex);
}
return 0;
}
+
+void omap3isp_csiphy_cleanup(struct isp_device *isp)
+{
+ mutex_destroy(&isp->isp_csiphy1.mutex);
+ mutex_destroy(&isp->isp_csiphy2.mutex);
+}
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.h b/drivers/media/platform/omap3isp/ispcsiphy.h
index 28b63b28f9f7..91543a09b28a 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.h
+++ b/drivers/media/platform/omap3isp/ispcsiphy.h
@@ -25,9 +25,10 @@ struct regulator;
struct isp_csiphy {
struct isp_device *isp;
struct mutex mutex; /* serialize csiphy configuration */
- u8 phy_in_use;
struct isp_csi2_device *csi2;
struct regulator *vdd;
+ /* the entity that acquired the phy */
+ struct media_entity *entity;
/* mem resources - enums as defined in enum isp_mem_resources */
unsigned int cfg_regs;
@@ -36,8 +37,10 @@ struct isp_csiphy {
u8 num_data_lanes; /* number of CSI2 Data Lanes supported */
};
-int omap3isp_csiphy_acquire(struct isp_csiphy *phy);
+int omap3isp_csiphy_acquire(struct isp_csiphy *phy,
+ struct media_entity *entity);
void omap3isp_csiphy_release(struct isp_csiphy *phy);
int omap3isp_csiphy_init(struct isp_device *isp);
+void omap3isp_csiphy_cleanup(struct isp_device *isp);
#endif /* OMAP3_ISP_CSI_PHY_H */
diff --git a/drivers/media/platform/omap3isp/ispreg.h b/drivers/media/platform/omap3isp/ispreg.h
index b5ea8da0b904..d08483919a77 100644
--- a/drivers/media/platform/omap3isp/ispreg.h
+++ b/drivers/media/platform/omap3isp/ispreg.h
@@ -87,6 +87,8 @@
#define ISPCCP2_CTRL_PHY_SEL_MASK 0x1
#define ISPCCP2_CTRL_PHY_SEL_SHIFT 1
#define ISPCCP2_CTRL_IO_OUT_SEL (1 << 2)
+#define ISPCCP2_CTRL_IO_OUT_SEL_MASK 0x1
+#define ISPCCP2_CTRL_IO_OUT_SEL_SHIFT 2
#define ISPCCP2_CTRL_MODE (1 << 4)
#define ISPCCP2_CTRL_VP_CLK_FORCE_ON (1 << 9)
#define ISPCCP2_CTRL_INV (1 << 10)
@@ -94,6 +96,8 @@
#define ISPCCP2_CTRL_INV_SHIFT 10
#define ISPCCP2_CTRL_VP_ONLY_EN (1 << 11)
#define ISPCCP2_CTRL_VP_CLK_POL (1 << 12)
+#define ISPCCP2_CTRL_VP_CLK_POL_MASK 0x1
+#define ISPCCP2_CTRL_VP_CLK_POL_SHIFT 12
#define ISPCCP2_CTRL_VPCLK_DIV_SHIFT 15
#define ISPCCP2_CTRL_VPCLK_DIV_MASK 0x1ffff /* [31:15] */
#define ISPCCP2_CTRL_VP_OUT_CTRL_SHIFT 8 /* 3430 bits */
diff --git a/drivers/media/platform/omap3isp/omap3isp.h b/drivers/media/platform/omap3isp/omap3isp.h
index 443e8f7673e2..9fb4d5bce004 100644
--- a/drivers/media/platform/omap3isp/omap3isp.h
+++ b/drivers/media/platform/omap3isp/omap3isp.h
@@ -46,6 +46,7 @@ enum isp_interface_type {
* 0 - Positive, 1 - Negative
* @data_pol: Data polarity
* 0 - Normal, 1 - One's complement
+ * @bt656: Data contain BT.656 embedded synchronization
*/
struct isp_parallel_cfg {
unsigned int data_lane_shift:3;
@@ -54,6 +55,7 @@ struct isp_parallel_cfg {
unsigned int vs_pol:1;
unsigned int fld_pol:1;
unsigned int data_pol:1;
+ unsigned int bt656:1;
};
enum {
@@ -108,16 +110,20 @@ struct isp_ccp2_cfg {
unsigned int ccp2_mode:1;
unsigned int phy_layer:1;
unsigned int vpclk_div:2;
+ unsigned int vp_clk_pol:1;
struct isp_csiphy_lanes_cfg lanecfg;
};
/**
* struct isp_csi2_cfg - CSI2 interface configuration
* @crc: Enable the cyclic redundancy check
+ * @lanecfg: CSI-2 lane configuration
+ * @num_data_lanes: The number of data lanes in use
*/
struct isp_csi2_cfg {
unsigned crc:1;
struct isp_csiphy_lanes_cfg lanecfg;
+ u8 num_data_lanes;
};
struct isp_bus_cfg {
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 399095170b6e..edca993c2b1f 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -638,6 +638,9 @@ static unsigned int pxa_mbus_config_compatible(const struct v4l2_mbus_config *cf
mipi_clock = common_flags & (V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK |
V4L2_MBUS_CSI2_CONTINUOUS_CLOCK);
return (!mipi_lanes || !mipi_clock) ? 0 : common_flags;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
}
return 0;
}
@@ -1557,7 +1560,7 @@ static void pxac_vb2_stop_streaming(struct vb2_queue *vq)
pxa_camera_wakeup(pcdev, buf, VB2_BUF_STATE_ERROR);
}
-static struct vb2_ops pxac_vb2_ops = {
+static const struct vb2_ops pxac_vb2_ops = {
.queue_setup = pxac_vb2_queue_setup,
.buf_init = pxac_vb2_init,
.buf_prepare = pxac_vb2_prepare,
@@ -2097,7 +2100,7 @@ static const struct v4l2_ioctl_ops pxa_camera_ioctl_ops = {
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
-static struct v4l2_clk_ops pxa_camera_mclk_ops = {
+static const struct v4l2_clk_ops pxa_camera_mclk_ops = {
};
static const struct video_device pxa_camera_videodev_template = {
@@ -2328,7 +2331,7 @@ static int pxa_camera_pdata_from_dt(struct device *dev,
asd->match.fwnode.fwnode = of_fwnode_handle(remote);
of_node_put(remote);
} else {
- dev_notice(dev, "no remote for %s\n", of_node_full_name(np));
+ dev_notice(dev, "no remote for %pOF\n", np);
}
out:
diff --git a/drivers/media/platform/qcom/camss-8x16/Makefile b/drivers/media/platform/qcom/camss-8x16/Makefile
new file mode 100644
index 000000000000..3c4024fbb768
--- /dev/null
+++ b/drivers/media/platform/qcom/camss-8x16/Makefile
@@ -0,0 +1,11 @@
+# Makefile for Qualcomm CAMSS driver
+
+qcom-camss-objs += \
+ camss.o \
+ camss-csid.o \
+ camss-csiphy.o \
+ camss-ispif.o \
+ camss-vfe.o \
+ camss-video.o \
+
+obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom-camss.o
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.c b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
new file mode 100644
index 000000000000..64df82817de3
--- /dev/null
+++ b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
@@ -0,0 +1,1092 @@
+/*
+ * camss-csid.c
+ *
+ * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module
+ *
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015-2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "camss-csid.h"
+#include "camss.h"
+
+#define MSM_CSID_NAME "msm_csid"
+
+#define CAMSS_CSID_HW_VERSION 0x0
+#define CAMSS_CSID_CORE_CTRL_0 0x004
+#define CAMSS_CSID_CORE_CTRL_1 0x008
+#define CAMSS_CSID_RST_CMD 0x00c
+#define CAMSS_CSID_CID_LUT_VC_n(n) (0x010 + 0x4 * (n))
+#define CAMSS_CSID_CID_n_CFG(n) (0x020 + 0x4 * (n))
+#define CAMSS_CSID_IRQ_CLEAR_CMD 0x060
+#define CAMSS_CSID_IRQ_MASK 0x064
+#define CAMSS_CSID_IRQ_STATUS 0x068
+#define CAMSS_CSID_TG_CTRL 0x0a0
+#define CAMSS_CSID_TG_CTRL_DISABLE 0xa06436
+#define CAMSS_CSID_TG_CTRL_ENABLE 0xa06437
+#define CAMSS_CSID_TG_VC_CFG 0x0a4
+#define CAMSS_CSID_TG_VC_CFG_H_BLANKING 0x3ff
+#define CAMSS_CSID_TG_VC_CFG_V_BLANKING 0x7f
+#define CAMSS_CSID_TG_DT_n_CGG_0(n) (0x0ac + 0xc * (n))
+#define CAMSS_CSID_TG_DT_n_CGG_1(n) (0x0b0 + 0xc * (n))
+#define CAMSS_CSID_TG_DT_n_CGG_2(n) (0x0b4 + 0xc * (n))
+
+#define DATA_TYPE_EMBEDDED_DATA_8BIT 0x12
+#define DATA_TYPE_YUV422_8BIT 0x1e
+#define DATA_TYPE_RAW_6BIT 0x28
+#define DATA_TYPE_RAW_8BIT 0x2a
+#define DATA_TYPE_RAW_10BIT 0x2b
+#define DATA_TYPE_RAW_12BIT 0x2c
+
+#define DECODE_FORMAT_UNCOMPRESSED_6_BIT 0x0
+#define DECODE_FORMAT_UNCOMPRESSED_8_BIT 0x1
+#define DECODE_FORMAT_UNCOMPRESSED_10_BIT 0x2
+#define DECODE_FORMAT_UNCOMPRESSED_12_BIT 0x3
+
+#define CSID_RESET_TIMEOUT_MS 500
+
+struct csid_fmts {
+ u32 code;
+ u8 data_type;
+ u8 decode_format;
+ u8 bpp;
+ u8 spp; /* bus samples per pixel */
+};
+
+static const struct csid_fmts csid_input_fmts[] = {
+ {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_VYUY8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_YVYU8_2X8,
+ DATA_TYPE_YUV422_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 2,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ DATA_TYPE_RAW_8BIT,
+ DECODE_FORMAT_UNCOMPRESSED_8_BIT,
+ 8,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ DATA_TYPE_RAW_10BIT,
+ DECODE_FORMAT_UNCOMPRESSED_10_BIT,
+ 10,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ },
+ {
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ DATA_TYPE_RAW_12BIT,
+ DECODE_FORMAT_UNCOMPRESSED_12_BIT,
+ 12,
+ 1,
+ }
+};
+
+static const struct csid_fmts *csid_get_fmt_entry(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++)
+ if (code == csid_input_fmts[i].code)
+ return &csid_input_fmts[i];
+
+ WARN(1, "Unknown format\n");
+
+ return &csid_input_fmts[0];
+}
+
+/*
+ * csid_isr - CSID module interrupt handler
+ * @irq: Interrupt line
+ * @dev: CSID device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t csid_isr(int irq, void *dev)
+{
+ struct csid_device *csid = dev;
+ u32 value;
+
+ value = readl_relaxed(csid->base + CAMSS_CSID_IRQ_STATUS);
+ writel_relaxed(value, csid->base + CAMSS_CSID_IRQ_CLEAR_CMD);
+
+ if ((value >> 11) & 0x1)
+ complete(&csid->reset_complete);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csid_set_clock_rates - Calculate and set clock rates on CSID module
+ * @csiphy: CSID device
+ */
+static int csid_set_clock_rates(struct csid_device *csid)
+{
+ struct device *dev = to_device_index(csid, csid->id);
+ u32 pixel_clock;
+ int i, j;
+ int ret;
+
+ ret = camss_get_pixel_clock(&csid->subdev.entity, &pixel_clock);
+ if (ret)
+ pixel_clock = 0;
+
+ for (i = 0; i < csid->nclocks; i++) {
+ struct camss_clock *clock = &csid->clock[i];
+
+ if (!strcmp(clock->name, "csi0") ||
+ !strcmp(clock->name, "csi1")) {
+ u8 bpp = csid_get_fmt_entry(
+ csid->fmt[MSM_CSIPHY_PAD_SINK].code)->bpp;
+ u8 num_lanes = csid->phy.lane_cnt;
+ u64 min_rate = pixel_clock * bpp / (2 * num_lanes * 4);
+ long rate;
+
+ camss_add_clock_margin(&min_rate);
+
+ for (j = 0; j < clock->nfreqs; j++)
+ if (min_rate < clock->freq[j])
+ break;
+
+ if (j == clock->nfreqs) {
+ dev_err(dev,
+ "Pixel clock is too high for CSID\n");
+ return -EINVAL;
+ }
+
+ /* if sensor pixel clock is not available */
+ /* set highest possible CSID clock rate */
+ if (min_rate == 0)
+ j = clock->nfreqs - 1;
+
+ rate = clk_round_rate(clock->clk, clock->freq[j]);
+ if (rate < 0) {
+ dev_err(dev, "clk round rate failed: %ld\n",
+ rate);
+ return -EINVAL;
+ }
+
+ ret = clk_set_rate(clock->clk, rate);
+ if (ret < 0) {
+ dev_err(dev, "clk set rate failed: %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * csid_reset - Trigger reset on CSID module and wait to complete
+ * @csid: CSID device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_reset(struct csid_device *csid)
+{
+ unsigned long time;
+
+ reinit_completion(&csid->reset_complete);
+
+ writel_relaxed(0x7fff, csid->base + CAMSS_CSID_RST_CMD);
+
+ time = wait_for_completion_timeout(&csid->reset_complete,
+ msecs_to_jiffies(CSID_RESET_TIMEOUT_MS));
+ if (!time) {
+ dev_err(to_device_index(csid, csid->id),
+ "CSID reset timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * csid_set_power - Power on/off CSID module
+ * @sd: CSID V4L2 subdevice
+ * @on: Requested power state
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_set_power(struct v4l2_subdev *sd, int on)
+{
+ struct csid_device *csid = v4l2_get_subdevdata(sd);
+ struct device *dev = to_device_index(csid, csid->id);
+ int ret;
+
+ if (on) {
+ u32 hw_version;
+
+ ret = regulator_enable(csid->vdda);
+ if (ret < 0)
+ return ret;
+
+ ret = csid_set_clock_rates(csid);
+ if (ret < 0) {
+ regulator_disable(csid->vdda);
+ return ret;
+ }
+
+ ret = camss_enable_clocks(csid->nclocks, csid->clock, dev);
+ if (ret < 0) {
+ regulator_disable(csid->vdda);
+ return ret;
+ }
+
+ enable_irq(csid->irq);
+
+ ret = csid_reset(csid);
+ if (ret < 0) {
+ disable_irq(csid->irq);
+ camss_disable_clocks(csid->nclocks, csid->clock);
+ regulator_disable(csid->vdda);
+ return ret;
+ }
+
+ hw_version = readl_relaxed(csid->base + CAMSS_CSID_HW_VERSION);
+ dev_dbg(dev, "CSID HW Version = 0x%08x\n", hw_version);
+ } else {
+ disable_irq(csid->irq);
+ camss_disable_clocks(csid->nclocks, csid->clock);
+ ret = regulator_disable(csid->vdda);
+ }
+
+ return ret;
+}
+
+/*
+ * csid_set_stream - Enable/disable streaming on CSID module
+ * @sd: CSID V4L2 subdevice
+ * @enable: Requested streaming state
+ *
+ * Main configuration of CSID module is also done here.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct csid_device *csid = v4l2_get_subdevdata(sd);
+ struct csid_testgen_config *tg = &csid->testgen;
+ u32 val;
+
+ if (enable) {
+ u8 vc = 0; /* Virtual Channel 0 */
+ u8 cid = vc * 4; /* id of Virtual Channel and Data Type set */
+ u8 dt, dt_shift, df;
+ int ret;
+
+ ret = v4l2_ctrl_handler_setup(&csid->ctrls);